From 537cb49eb241521bf58df8510c1c86aea60eb84f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Sat, 31 Aug 2024 03:27:07 +0200 Subject: [PATCH 001/202] std.os.linux: Define the Stat struct for riscv32. The kernel does define the struct, it just doesn't use it. Yet both glibc and musl expose it directly as their public stat struct, and std.c takes it from std.os.linux. So just define it after all. --- lib/std/os/linux/riscv32.zig | 33 +++++++++++++++++++++++++++++++-- 1 file changed, 31 insertions(+), 2 deletions(-) diff --git a/lib/std/os/linux/riscv32.zig b/lib/std/os/linux/riscv32.zig index 08288d45e5bf..457d7e50b4b4 100644 --- a/lib/std/os/linux/riscv32.zig +++ b/lib/std/os/linux/riscv32.zig @@ -212,8 +212,37 @@ pub const msghdr_const = extern struct { flags: i32, }; -/// No `Stat` structure on this platform, only `Statx`. -pub const Stat = void; +// The `stat` definition used by the Linux kernel. +pub const Stat = extern struct { + dev: dev_t, + ino: ino_t, + mode: mode_t, + nlink: nlink_t, + uid: uid_t, + gid: gid_t, + rdev: dev_t, + __pad: usize, + size: off_t, + blksize: blksize_t, + __pad2: i32, + blocks: blkcnt_t, + atim: timespec, + mtim: timespec, + ctim: timespec, + __unused: [2]u32, + + pub fn atime(self: @This()) timespec { + return self.atim; + } + + pub fn mtime(self: @This()) timespec { + return self.mtim; + } + + pub fn ctime(self: @This()) timespec { + return self.ctim; + } +}; pub const Elf_Symndx = u32; From 6364995d3f40efa96bec905c43182e3d4bbaea08 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Sat, 31 Aug 2024 03:25:48 +0200 Subject: [PATCH 002/202] std.os.linux: Also use kernel_timespec for riscv32 when libc is linked. Both glibc and musl use time64 as the base ABI for riscv32. This fixes the `sleep` test in `std.time` hanging forever due to the libc functions reading bogus values. --- lib/std/os/linux.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig index df9d491a5e04..82593d61692d 100644 --- a/lib/std/os/linux.zig +++ b/lib/std/os/linux.zig @@ -7452,7 +7452,7 @@ pub const kernel_timespec = extern struct { }; // https://github.com/ziglang/zig/issues/4726#issuecomment-2190337877 -pub const timespec = if (!builtin.link_libc and native_arch == .riscv32) kernel_timespec else extern struct { +pub const timespec = if (native_arch == .riscv32) kernel_timespec else extern struct { sec: isize, nsec: isize, }; From 68bb788df57d6725c783adb2b9018cc7db789dd8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Sat, 31 Aug 2024 03:25:08 +0200 Subject: [PATCH 003/202] std.os.linux: Make nanosleep() a compile error on riscv32. This should eventually be converted to the void/{} pattern along with the other syscalls that are compile errors for riscv32. --- lib/std/os/linux.zig | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig index 82593d61692d..90298bc7cd00 100644 --- a/lib/std/os/linux.zig +++ b/lib/std/os/linux.zig @@ -1465,7 +1465,9 @@ pub fn settimeofday(tv: *const timeval, tz: *const timezone) usize { } pub fn nanosleep(req: *const timespec, rem: ?*timespec) usize { - return syscall2(.nanosleep, @intFromPtr(req), @intFromPtr(rem)); + if (native_arch == .riscv32) { + @compileError("No nanosleep syscall on this architecture."); + } else return syscall2(.nanosleep, @intFromPtr(req), @intFromPtr(rem)); } pub fn pause() usize { From f10b226c7703f08b39f21d8739768818c4d57d0e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Tue, 27 Aug 2024 08:03:35 +0200 Subject: [PATCH 004/202] Compilation: Pass -fPIC for assembly files too, not just C files. There are targets (e.g. MIPS) where PIC actually affects assembler behavior. --- src/Compilation.zig | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/Compilation.zig b/src/Compilation.zig index 3d40957a72f8..4d4f1527a23e 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -5636,10 +5636,6 @@ pub fn addCCArgs( try argv.append("-Werror=date-time"); } - if (target_util.supports_fpic(target) and mod.pic) { - try argv.append("-fPIC"); - } - if (mod.unwind_tables) { try argv.append("-funwind-tables"); } else { @@ -5730,6 +5726,10 @@ pub fn addCCArgs( }, } + if (target_util.supports_fpic(target) and mod.pic) { + try argv.append("-fPIC"); + } + try argv.ensureUnusedCapacity(2); switch (comp.config.debug_format) { .strip => {}, From 2de7296262b88f3d19ad9cb75f824bf69350c717 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Tue, 27 Aug 2024 08:09:47 +0200 Subject: [PATCH 005/202] Compilation: Pass -mthumb for assembly files too, not just C files. --- src/Compilation.zig | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/Compilation.zig b/src/Compilation.zig index 4d4f1527a23e..54fa1208569f 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -5536,10 +5536,6 @@ pub fn addCCArgs( else => {}, } - if (target.cpu.arch.isThumb()) { - try argv.append("-mthumb"); - } - { var san_arg: std.ArrayListUnmanaged(u8) = .{}; const prefix = "-fsanitize="; @@ -5726,6 +5722,10 @@ pub fn addCCArgs( }, } + if (target.cpu.arch.isThumb()) { + try argv.append("-mthumb"); + } + if (target_util.supports_fpic(target) and mod.pic) { try argv.append("-fPIC"); } From f021ad548fd8ad6c7d8d8ea7e7c409c695dce1e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Wed, 28 Aug 2024 10:27:29 +0200 Subject: [PATCH 006/202] musl: Build with -fno-builtin and -mimplicit-it=always (for thumb) like upstream. --- src/musl.zig | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/musl.zig b/src/musl.zig index 1c20c209687b..acf590203349 100644 --- a/src/musl.zig +++ b/src/musl.zig @@ -384,6 +384,7 @@ fn addCcArgs( try args.appendSlice(&[_][]const u8{ "-std=c99", "-ffreestanding", + "-fno-builtin", "-fexcess-precision=standard", "-frounding-math", "-fno-strict-aliasing", @@ -422,6 +423,10 @@ fn addCcArgs( "-Qunused-arguments", "-w", // disable all warnings }); + + if (target.cpu.arch.isThumb()) { + try args.append("-mimplicit-it=always"); + } } fn start_asm_path(comp: *Compilation, arena: Allocator, basename: []const u8) ![]const u8 { From 0ecc6332b4eb1ced547ffa38f57471134aaa4d13 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Thu, 29 Aug 2024 16:07:46 +0200 Subject: [PATCH 007/202] start: Fix arm stack alignment code to work for thumb too. --- lib/std/start.zig | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/std/start.zig b/lib/std/start.zig index ea6f347bd665..5c94a4b591b8 100644 --- a/lib/std/start.zig +++ b/lib/std/start.zig @@ -283,7 +283,9 @@ fn _start() callconv(.Naked) noreturn { \\ mov fp, #0 \\ mov lr, #0 \\ mov a1, sp - \\ and sp, #-16 + \\ mov ip, sp + \\ and ip, ip, #-16 + \\ mov sp, ip \\ b %[posixCallMainAndExit] , .csky => From 49ad51b2feacad394e05d7b5c87c5020c3bc0f5e Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sun, 28 Apr 2024 13:13:40 -0400 Subject: [PATCH 008/202] Builder: add `indirectbr` llvm instruction --- src/codegen/llvm/Builder.zig | 97 ++++++++++++++++++++++++++++++------ src/codegen/llvm/ir.zig | 14 ++++++ 2 files changed, 96 insertions(+), 15 deletions(-) diff --git a/src/codegen/llvm/Builder.zig b/src/codegen/llvm/Builder.zig index 029b81dc3f75..d663f21a2129 100644 --- a/src/codegen/llvm/Builder.zig +++ b/src/codegen/llvm/Builder.zig @@ -4157,6 +4157,7 @@ pub const Function = struct { @"icmp ugt", @"icmp ule", @"icmp ult", + indirectbr, insertelement, insertvalue, inttoptr, @@ -4367,6 +4368,7 @@ pub const Function = struct { return switch (wip.instructions.items(.tag)[@intFromEnum(self)]) { .br, .br_cond, + .indirectbr, .ret, .@"ret void", .@"switch", @@ -4381,6 +4383,7 @@ pub const Function = struct { .br, .br_cond, .fence, + .indirectbr, .ret, .@"ret void", .store, @@ -4471,6 +4474,7 @@ pub const Function = struct { .br, .br_cond, .fence, + .indirectbr, .ret, .@"ret void", .store, @@ -4657,6 +4661,7 @@ pub const Function = struct { .br, .br_cond, .fence, + .indirectbr, .ret, .@"ret void", .store, @@ -4837,6 +4842,12 @@ pub const Function = struct { //case_blocks: [cases_len]Block.Index, }; + pub const IndirectBr = struct { + addr: Value, + targets_len: u32, + //targets: [targets_len]Block.Index, + }; + pub const Binary = struct { lhs: Value, rhs: Value, @@ -5294,10 +5305,27 @@ pub const WipFunction = struct { return .{ .index = 0, .instruction = instruction }; } + pub fn indirectbr( + self: *WipFunction, + addr: Value, + targets: []const Block.Index, + ) Allocator.Error!Instruction.Index { + try self.ensureUnusedExtraCapacity(1, Instruction.IndirectBr, targets.len); + const instruction = try self.addInst(null, .{ + .tag = .indirectbr, + .data = self.addExtraAssumeCapacity(Instruction.IndirectBr{ + .addr = addr, + .targets_len = @intCast(targets.len), + }), + }); + _ = self.extra.appendSliceAssumeCapacity(@ptrCast(targets)); + for (targets) |target| target.ptr(self).branches += 1; + return instruction; + } + pub fn @"unreachable"(self: *WipFunction) Allocator.Error!Instruction.Index { try self.ensureUnusedExtraCapacity(1, NoExtra, 0); - const instruction = try self.addInst(null, .{ .tag = .@"unreachable", .data = undefined }); - return instruction; + return try self.addInst(null, .{ .tag = .@"unreachable", .data = undefined }); } pub fn un( @@ -6299,8 +6327,7 @@ pub const WipFunction = struct { }); names[@intFromEnum(new_block_index)] = try wip_name.map(current_block.name, ""); for (current_block.instructions.items) |old_instruction_index| { - const new_instruction_index: Instruction.Index = - @enumFromInt(function.instructions.len); + const new_instruction_index: Instruction.Index = @enumFromInt(function.instructions.len); var instruction = self.instructions.get(@intFromEnum(old_instruction_index)); switch (instruction.tag) { .add, @@ -6509,6 +6536,15 @@ pub const WipFunction = struct { }); wip_extra.appendMappedValues(indices, instructions); }, + .indirectbr => { + var extra = self.extraDataTrail(Instruction.IndirectBr, instruction.data); + const targets = extra.trail.next(extra.data.targets_len, Block.Index, self); + instruction.data = wip_extra.addExtra(Instruction.IndirectBr{ + .addr = instructions.map(extra.data.addr), + .targets_len = extra.data.targets_len, + }); + wip_extra.appendSlice(targets); + }, .insertelement => { const extra = self.extraData(Instruction.InsertElement, instruction.data); instruction.data = wip_extra.addExtra(Instruction.InsertElement{ @@ -7555,10 +7591,10 @@ pub const Constant = enum(u32) { .blockaddress => |tag| { const extra = data.builder.constantExtraData(BlockAddress, item.data); const function = extra.function.ptrConst(data.builder); - try writer.print("{s}({}, %{d})", .{ + try writer.print("{s}({}, {})", .{ @tagName(tag), function.global.fmt(data.builder), - @intFromEnum(extra.block), // TODO + extra.block.toInst(function).fmt(extra.function, data.builder), }); }, .dso_local_equivalent, @@ -9902,6 +9938,23 @@ pub fn printUnbuffered( index.fmt(function_index, self), }); }, + .indirectbr => |tag| { + var extra = + function.extraDataTrail(Function.Instruction.IndirectBr, instruction.data); + const targets = + extra.trail.next(extra.data.targets_len, Function.Block.Index, &function); + try writer.print(" {s} {%}, [", .{ + @tagName(tag), + extra.data.addr.fmt(function_index, self), + }); + for (0.., targets) |target_index, target| { + if (target_index > 0) try writer.writeAll(", "); + try writer.print("{%}", .{ + target.toInst(&function).fmt(function_index, self), + }); + } + try writer.writeByte(']'); + }, .insertelement => |tag| { const extra = function.extraData(Function.Instruction.InsertElement, instruction.data); @@ -14777,15 +14830,6 @@ pub fn toBitcode(self: *Builder, allocator: Allocator) bitcode_writer.Error![]co .indices = indices, }); }, - .insertvalue => { - var extra = func.extraDataTrail(Function.Instruction.InsertValue, data); - const indices = extra.trail.next(extra.data.indices_len, u32, &func); - try function_block.writeAbbrev(FunctionBlock.InsertValue{ - .val = adapter.getOffsetValueIndex(extra.data.val), - .elem = adapter.getOffsetValueIndex(extra.data.elem), - .indices = indices, - }); - }, .extractelement => { const extra = func.extraData(Function.Instruction.ExtractElement, data); try function_block.writeAbbrev(FunctionBlock.ExtractElement{ @@ -14793,6 +14837,20 @@ pub fn toBitcode(self: *Builder, allocator: Allocator) bitcode_writer.Error![]co .index = adapter.getOffsetValueIndex(extra.index), }); }, + .indirectbr => { + var extra = + func.extraDataTrail(Function.Instruction.IndirectBr, datas[instr_index]); + const targets = + extra.trail.next(extra.data.targets_len, Function.Block.Index, &func); + try function_block.writeAbbrevAdapted( + FunctionBlock.IndirectBr{ + .ty = extra.data.addr.typeOf(@enumFromInt(func_index), self), + .addr = extra.data.addr, + .targets = targets, + }, + adapter, + ); + }, .insertelement => { const extra = func.extraData(Function.Instruction.InsertElement, data); try function_block.writeAbbrev(FunctionBlock.InsertElement{ @@ -14801,6 +14859,15 @@ pub fn toBitcode(self: *Builder, allocator: Allocator) bitcode_writer.Error![]co .index = adapter.getOffsetValueIndex(extra.index), }); }, + .insertvalue => { + var extra = func.extraDataTrail(Function.Instruction.InsertValue, datas[instr_index]); + const indices = extra.trail.next(extra.data.indices_len, u32, &func); + try function_block.writeAbbrev(FunctionBlock.InsertValue{ + .val = adapter.getOffsetValueIndex(extra.data.val), + .elem = adapter.getOffsetValueIndex(extra.data.elem), + .indices = indices, + }); + }, .select => { const extra = func.extraData(Function.Instruction.Select, data); try function_block.writeAbbrev(FunctionBlock.Select{ diff --git a/src/codegen/llvm/ir.zig b/src/codegen/llvm/ir.zig index 4d7effdaaf94..271e87c9959c 100644 --- a/src/codegen/llvm/ir.zig +++ b/src/codegen/llvm/ir.zig @@ -19,6 +19,7 @@ const LineAbbrev = AbbrevOp{ .vbr = 8 }; const ColumnAbbrev = AbbrevOp{ .vbr = 8 }; const BlockAbbrev = AbbrevOp{ .vbr = 6 }; +const BlockArrayAbbrev = AbbrevOp{ .array_vbr = 6 }; /// Unused tags are commented out so that they are omitted in the generated /// bitcode, which scans over this enum using reflection. @@ -1294,6 +1295,7 @@ pub const FunctionBlock = struct { DebugLoc, DebugLocAgain, ColdOperandBundle, + IndirectBr, }; pub const DeclareBlocks = struct { @@ -1813,6 +1815,18 @@ pub const FunctionBlock = struct { .{ .literal = 0 }, }; }; + + pub const IndirectBr = struct { + pub const ops = [_]AbbrevOp{ + .{ .literal = 31 }, + .{ .fixed_runtime = Builder.Type }, + ValueAbbrev, + BlockArrayAbbrev, + }; + ty: Builder.Type, + addr: Builder.Value, + targets: []const Builder.Function.Block.Index, + }; }; pub const FunctionValueSymbolTable = struct { From 1b000b90c9a7abde3aeacf29cef73a877da237e1 Mon Sep 17 00:00:00 2001 From: mlugg Date: Fri, 30 Aug 2024 20:29:27 +0100 Subject: [PATCH 009/202] Air: direct representation of ranges in switch cases This commit modifies the representation of the AIR `switch_br` instruction to represent ranges in cases. Previously, Sema emitted different AIR in the case of a range, where the `else` branch of the `switch_br` contained a simple `cond_br` for each such case which did a simple range check (`x > a and x < b`). Not only does this add complexity to Sema, which we would like to minimize, but it also gets in the way of the implementation of #8220. That proposal turns certain `switch` statements into a looping construct, and for optimization purposes, we want to lower this to AIR fairly directly (i.e. without involving a `loop` instruction). That means we would ideally like a single instruction to represent the entire `switch` statement, so that we can dispatch back to it with a different operand as in #8220. This is not really possible to do correctly under the status quo system. This commit implements lowering of this new `switch_br` usage in the LLVM and C backends. The C backend just turns any case containing ranges entirely into conditionals, as before. The LLVM backend is a little smarter, and puts scalar items into the `switch` instruction, only using conditionals for the range cases (which direct to the same bb). All remaining self-hosted backends are temporarily regressed in the presence of switch range cases. This functionality will be restored for at least the x86_64 backend before merge. --- src/Air.zig | 12 +- src/Air/types_resolved.zig | 4 + src/Sema.zig | 370 ++++++++++++++--------------------- src/arch/aarch64/CodeGen.zig | 2 + src/arch/arm/CodeGen.zig | 1 + src/arch/riscv64/CodeGen.zig | 2 + src/arch/wasm/CodeGen.zig | 2 + src/arch/x86_64/CodeGen.zig | 2 + src/codegen/c.zig | 58 ++++-- src/codegen/llvm.zig | 56 +++++- src/codegen/spirv.zig | 1 + src/print_air.zig | 6 + 12 files changed, 268 insertions(+), 248 deletions(-) diff --git a/src/Air.zig b/src/Air.zig index d64fe2983d99..9db68da89f39 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -1143,10 +1143,12 @@ pub const SwitchBr = struct { else_body_len: u32, /// Trailing: - /// * item: Inst.Ref // for each `items_len`. - /// * instruction index for each `body_len`. + /// * item: Inst.Ref // for each `items_len` + /// * { range_start: Inst.Ref, range_end: Inst.Ref } // for each `ranges_len` + /// * body_inst: Inst.Index // for each `body_len` pub const Case = struct { items_len: u32, + ranges_len: u32, body_len: u32, }; }; @@ -1862,6 +1864,10 @@ pub const UnwrappedSwitch = struct { var extra_index = extra.end; const items: []const Inst.Ref = @ptrCast(it.air.extra[extra_index..][0..extra.data.items_len]); extra_index += items.len; + // TODO: ptrcast from []const Inst.Ref to []const [2]Inst.Ref when supported + const ranges_ptr: [*]const [2]Inst.Ref = @ptrCast(it.air.extra[extra_index..]); + const ranges: []const [2]Inst.Ref = ranges_ptr[0..extra.data.ranges_len]; + extra_index += ranges.len * 2; const body: []const Inst.Index = @ptrCast(it.air.extra[extra_index..][0..extra.data.body_len]); extra_index += body.len; it.extra_index = @intCast(extra_index); @@ -1869,6 +1875,7 @@ pub const UnwrappedSwitch = struct { return .{ .idx = idx, .items = items, + .ranges = ranges, .body = body, }; } @@ -1881,6 +1888,7 @@ pub const UnwrappedSwitch = struct { pub const Case = struct { idx: u32, items: []const Inst.Ref, + ranges: []const [2]Inst.Ref, body: []const Inst.Index, }; }; diff --git a/src/Air/types_resolved.zig b/src/Air/types_resolved.zig index e60f5ef3110e..3de5aeb3e37f 100644 --- a/src/Air/types_resolved.zig +++ b/src/Air/types_resolved.zig @@ -386,6 +386,10 @@ fn checkBody(air: Air, body: []const Air.Inst.Index, zcu: *Zcu) bool { var it = switch_br.iterateCases(); while (it.next()) |case| { for (case.items) |item| if (!checkRef(item, zcu)) return false; + for (case.ranges) |range| { + if (!checkRef(range[0], zcu)) return false; + if (!checkRef(range[1], zcu)) return false; + } if (!checkBody(air, case.body, zcu)) return false; } if (!checkBody(air, it.elseBody(), zcu)) return false; diff --git a/src/Sema.zig b/src/Sema.zig index a72c749f2ed1..aafd605bc8bc 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -11353,9 +11353,14 @@ const SwitchProngAnalysis = struct { const coerced = try sema.coerce(&coerce_block, capture_ty, uncoerced, case_src); _ = try coerce_block.addBr(capture_block_inst, coerced); - try cases_extra.ensureUnusedCapacity(3 + coerce_block.instructions.items.len); - cases_extra.appendAssumeCapacity(1); // items_len - cases_extra.appendAssumeCapacity(@intCast(coerce_block.instructions.items.len)); // body_len + try cases_extra.ensureUnusedCapacity(@typeInfo(Air.SwitchBr.Case).@"struct".fields.len + + 1 + // `item`, no ranges + coerce_block.instructions.items.len); + cases_extra.appendSliceAssumeCapacity(&payloadToExtraItems(Air.SwitchBr.Case{ + .items_len = 1, + .ranges_len = 0, + .body_len = @intCast(coerce_block.instructions.items.len), + })); cases_extra.appendAssumeCapacity(@intFromEnum(case_vals[idx])); // item cases_extra.appendSliceAssumeCapacity(@ptrCast(coerce_block.instructions.items)); // body } @@ -12578,21 +12583,18 @@ fn analyzeSwitchRuntimeBlock( }; try branch_hints.append(gpa, prong_hint); - try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len); - cases_extra.appendAssumeCapacity(1); // items_len - cases_extra.appendAssumeCapacity(@intCast(case_block.instructions.items.len)); + try cases_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.SwitchBr.Case).@"struct".fields.len + + 1 + // `item`, no ranges + case_block.instructions.items.len); + cases_extra.appendSliceAssumeCapacity(&payloadToExtraItems(Air.SwitchBr.Case{ + .items_len = 1, + .ranges_len = 0, + .body_len = @intCast(case_block.instructions.items.len), + })); cases_extra.appendAssumeCapacity(@intFromEnum(item)); cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items)); } - var is_first = true; - var prev_cond_br: Air.Inst.Index = undefined; - var prev_hint: std.builtin.BranchHint = undefined; - var first_else_body: []const Air.Inst.Index = &.{}; - defer gpa.free(first_else_body); - var prev_then_body: []const Air.Inst.Index = &.{}; - defer gpa.free(prev_then_body); - var cases_len = scalar_cases_len; var case_val_idx: usize = scalar_cases_len; var multi_i: u32 = 0; @@ -12602,31 +12604,27 @@ fn analyzeSwitchRuntimeBlock( const ranges_len = sema.code.extra[extra_index]; extra_index += 1; const info: Zir.Inst.SwitchBlock.ProngInfo = @bitCast(sema.code.extra[extra_index]); - extra_index += 1 + items_len; + extra_index += 1 + items_len + 2 * ranges_len; const items = case_vals.items[case_val_idx..][0..items_len]; case_val_idx += items_len; + // TODO: @ptrCast slice once Sema supports it + const ranges: []const [2]Air.Inst.Ref = @as([*]const [2]Air.Inst.Ref, @ptrCast(case_vals.items[case_val_idx..]))[0..ranges_len]; + case_val_idx += ranges_len * 2; + + const body = sema.code.bodySlice(extra_index, info.body_len); + extra_index += info.body_len; case_block.instructions.shrinkRetainingCapacity(0); case_block.error_return_trace_index = child_block.error_return_trace_index; // Generate all possible cases as scalar prongs. if (info.is_inline) { - const body_start = extra_index + 2 * ranges_len; - const body = sema.code.bodySlice(body_start, info.body_len); var emit_bb = false; - var range_i: u32 = 0; - while (range_i < ranges_len) : (range_i += 1) { - const range_items = case_vals.items[case_val_idx..][0..2]; - extra_index += 2; - case_val_idx += 2; - - const item_first_ref = range_items[0]; - const item_last_ref = range_items[1]; - - var item = sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, item_first_ref, undefined) catch unreachable; - const item_last = sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, item_last_ref, undefined) catch unreachable; + for (ranges, 0..) |range_items, range_i| { + var item = sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, range_items[0], undefined) catch unreachable; + const item_last = sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, range_items[1], undefined) catch unreachable; while (item.compareScalar(.lte, item_last, operand_ty, zcu)) : ({ // Previous validation has resolved any possible lazy values. @@ -12664,9 +12662,14 @@ fn analyzeSwitchRuntimeBlock( ); try branch_hints.append(gpa, prong_hint); - try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len); - cases_extra.appendAssumeCapacity(1); // items_len - cases_extra.appendAssumeCapacity(@intCast(case_block.instructions.items.len)); + try cases_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.SwitchBr.Case).@"struct".fields.len + + 1 + // `item`, no ranges + case_block.instructions.items.len); + cases_extra.appendSliceAssumeCapacity(&payloadToExtraItems(Air.SwitchBr.Case{ + .items_len = 1, + .ranges_len = 0, + .body_len = @intCast(case_block.instructions.items.len), + })); cases_extra.appendAssumeCapacity(@intFromEnum(item_ref)); cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items)); @@ -12713,134 +12716,39 @@ fn analyzeSwitchRuntimeBlock( }; try branch_hints.append(gpa, prong_hint); - try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len); - cases_extra.appendAssumeCapacity(1); // items_len - cases_extra.appendAssumeCapacity(@intCast(case_block.instructions.items.len)); + try cases_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.SwitchBr.Case).@"struct".fields.len + + 1 + // `item`, no ranges + case_block.instructions.items.len); + cases_extra.appendSliceAssumeCapacity(&payloadToExtraItems(Air.SwitchBr.Case{ + .items_len = 1, + .ranges_len = 0, + .body_len = @intCast(case_block.instructions.items.len), + })); cases_extra.appendAssumeCapacity(@intFromEnum(item)); cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items)); } - extra_index += info.body_len; continue; } - var any_ok: Air.Inst.Ref = .none; - - // If there are any ranges, we have to put all the items into the - // else prong. Otherwise, we can take advantage of multiple items - // mapping to the same body. - if (ranges_len == 0) { - cases_len += 1; - - const analyze_body = if (union_originally) - for (items) |item| { - const item_val = sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, item, undefined) catch unreachable; - const field_ty = maybe_union_ty.unionFieldType(item_val, zcu).?; - if (field_ty.zigTypeTag(zcu) != .noreturn) break true; - } else false - else - true; + cases_len += 1; - const body = sema.code.bodySlice(extra_index, info.body_len); - extra_index += info.body_len; - const prong_hint: std.builtin.BranchHint = if (err_set and - try sema.maybeErrorUnwrap(&case_block, body, operand, operand_src, allow_err_code_unwrap)) - h: { - // nothing to do here. weight against error branch - break :h .unlikely; - } else if (analyze_body) h: { - break :h try spa.analyzeProngRuntime( - &case_block, - .normal, - body, - info.capture, - child_block.src(.{ .switch_capture = .{ - .switch_node_offset = switch_node_offset, - .case_idx = .{ .kind = .multi, .index = @intCast(multi_i) }, - } }), - items, - .none, - false, - ); - } else h: { - _ = try case_block.addNoOp(.unreach); - break :h .none; - }; - - try branch_hints.append(gpa, prong_hint); - try cases_extra.ensureUnusedCapacity(gpa, 2 + items.len + - case_block.instructions.items.len); - - cases_extra.appendAssumeCapacity(@intCast(items.len)); - cases_extra.appendAssumeCapacity(@intCast(case_block.instructions.items.len)); - - for (items) |item| { - cases_extra.appendAssumeCapacity(@intFromEnum(item)); - } - - cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items)); - } else { + const analyze_body = if (union_originally) for (items) |item| { - const cmp_ok = try case_block.addBinOp(if (case_block.float_mode == .optimized) .cmp_eq_optimized else .cmp_eq, operand, item); - if (any_ok != .none) { - any_ok = try case_block.addBinOp(.bool_or, any_ok, cmp_ok); - } else { - any_ok = cmp_ok; - } - } - - var range_i: usize = 0; - while (range_i < ranges_len) : (range_i += 1) { - const range_items = case_vals.items[case_val_idx..][0..2]; - extra_index += 2; - case_val_idx += 2; - - const item_first = range_items[0]; - const item_last = range_items[1]; - - // operand >= first and operand <= last - const range_first_ok = try case_block.addBinOp( - if (case_block.float_mode == .optimized) .cmp_gte_optimized else .cmp_gte, - operand, - item_first, - ); - const range_last_ok = try case_block.addBinOp( - if (case_block.float_mode == .optimized) .cmp_lte_optimized else .cmp_lte, - operand, - item_last, - ); - const range_ok = try case_block.addBinOp( - .bool_and, - range_first_ok, - range_last_ok, - ); - if (any_ok != .none) { - any_ok = try case_block.addBinOp(.bool_or, any_ok, range_ok); - } else { - any_ok = range_ok; - } - } - - const new_cond_br = try case_block.addInstAsIndex(.{ .tag = .cond_br, .data = .{ - .pl_op = .{ - .operand = any_ok, - .payload = undefined, - }, - } }); - var cond_body = try case_block.instructions.toOwnedSlice(gpa); - defer gpa.free(cond_body); - - case_block.instructions.shrinkRetainingCapacity(0); - case_block.error_return_trace_index = child_block.error_return_trace_index; + const item_val = sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, item, undefined) catch unreachable; + const field_ty = maybe_union_ty.unionFieldType(item_val, zcu).?; + if (field_ty.zigTypeTag(zcu) != .noreturn) break true; + } else false + else + true; - const body = sema.code.bodySlice(extra_index, info.body_len); - extra_index += info.body_len; - const prong_hint: std.builtin.BranchHint = if (err_set and - try sema.maybeErrorUnwrap(&case_block, body, operand, operand_src, allow_err_code_unwrap)) - h: { - // nothing to do here. weight against error branch - break :h .unlikely; - } else try spa.analyzeProngRuntime( + const prong_hint: std.builtin.BranchHint = if (err_set and + try sema.maybeErrorUnwrap(&case_block, body, operand, operand_src, allow_err_code_unwrap)) + h: { + // nothing to do here. weight against error branch + break :h .unlikely; + } else if (analyze_body) h: { + break :h try spa.analyzeProngRuntime( &case_block, .normal, body, @@ -12853,40 +12761,36 @@ fn analyzeSwitchRuntimeBlock( .none, false, ); + } else h: { + _ = try case_block.addNoOp(.unreach); + break :h .none; + }; - if (is_first) { - is_first = false; - first_else_body = cond_body; - cond_body = &.{}; - } else { - try sema.air_extra.ensureUnusedCapacity( - gpa, - @typeInfo(Air.CondBr).@"struct".fields.len + prev_then_body.len + cond_body.len, - ); + try branch_hints.append(gpa, prong_hint); - sema.air_instructions.items(.data)[@intFromEnum(prev_cond_br)].pl_op.payload = sema.addExtraAssumeCapacity(Air.CondBr{ - .then_body_len = @intCast(prev_then_body.len), - .else_body_len = @intCast(cond_body.len), - .branch_hints = .{ - .true = prev_hint, - .false = .none, - // Code coverage is desired for error handling. - .then_cov = .poi, - .else_cov = .poi, - }, - }); - sema.air_extra.appendSliceAssumeCapacity(@ptrCast(prev_then_body)); - sema.air_extra.appendSliceAssumeCapacity(@ptrCast(cond_body)); - } - gpa.free(prev_then_body); - prev_then_body = try case_block.instructions.toOwnedSlice(gpa); - prev_cond_br = new_cond_br; - prev_hint = prong_hint; + try cases_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.SwitchBr.Case).@"struct".fields.len + + items.len + 2 * ranges_len + + case_block.instructions.items.len); + cases_extra.appendSliceAssumeCapacity(&payloadToExtraItems(Air.SwitchBr.Case{ + .items_len = @intCast(items.len), + .ranges_len = @intCast(ranges_len), + .body_len = @intCast(case_block.instructions.items.len), + })); + + for (items) |item| { + cases_extra.appendAssumeCapacity(@intFromEnum(item)); } + for (ranges) |range| { + cases_extra.appendSliceAssumeCapacity(&.{ + @intFromEnum(range[0]), + @intFromEnum(range[1]), + }); + } + + cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items)); } - var final_else_body: []const Air.Inst.Index = &.{}; - if (special.body.len != 0 or !is_first or case_block.wantSafety()) { + const else_body: []const Air.Inst.Index = if (special.body.len != 0 or case_block.wantSafety()) else_body: { var emit_bb = false; if (special.is_inline) switch (operand_ty.zigTypeTag(zcu)) { .@"enum" => { @@ -12933,9 +12837,14 @@ fn analyzeSwitchRuntimeBlock( }; try branch_hints.append(gpa, prong_hint); - try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len); - cases_extra.appendAssumeCapacity(1); // items_len - cases_extra.appendAssumeCapacity(@intCast(case_block.instructions.items.len)); + try cases_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.SwitchBr.Case).@"struct".fields.len + + 1 + // `item`, no ranges + case_block.instructions.items.len); + cases_extra.appendSliceAssumeCapacity(&payloadToExtraItems(Air.SwitchBr.Case{ + .items_len = 1, + .ranges_len = 0, + .body_len = @intCast(case_block.instructions.items.len), + })); cases_extra.appendAssumeCapacity(@intFromEnum(item_ref)); cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items)); } @@ -12979,9 +12888,14 @@ fn analyzeSwitchRuntimeBlock( ); try branch_hints.append(gpa, prong_hint); - try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len); - cases_extra.appendAssumeCapacity(1); // items_len - cases_extra.appendAssumeCapacity(@intCast(case_block.instructions.items.len)); + try cases_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.SwitchBr.Case).@"struct".fields.len + + 1 + // `item`, no ranges + case_block.instructions.items.len); + cases_extra.appendSliceAssumeCapacity(&payloadToExtraItems(Air.SwitchBr.Case{ + .items_len = 1, + .ranges_len = 0, + .body_len = @intCast(case_block.instructions.items.len), + })); cases_extra.appendAssumeCapacity(@intFromEnum(item_ref)); cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items)); } @@ -13014,9 +12928,14 @@ fn analyzeSwitchRuntimeBlock( ); try branch_hints.append(gpa, prong_hint); - try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len); - cases_extra.appendAssumeCapacity(1); // items_len - cases_extra.appendAssumeCapacity(@intCast(case_block.instructions.items.len)); + try cases_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.SwitchBr.Case).@"struct".fields.len + + 1 + // `item`, no ranges + case_block.instructions.items.len); + cases_extra.appendSliceAssumeCapacity(&payloadToExtraItems(Air.SwitchBr.Case{ + .items_len = 1, + .ranges_len = 0, + .body_len = @intCast(case_block.instructions.items.len), + })); cases_extra.appendAssumeCapacity(@intFromEnum(item_ref)); cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items)); } @@ -13046,9 +12965,14 @@ fn analyzeSwitchRuntimeBlock( ); try branch_hints.append(gpa, prong_hint); - try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len); - cases_extra.appendAssumeCapacity(1); // items_len - cases_extra.appendAssumeCapacity(@intCast(case_block.instructions.items.len)); + try cases_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.SwitchBr.Case).@"struct".fields.len + + 1 + // `item`, no ranges + case_block.instructions.items.len); + cases_extra.appendSliceAssumeCapacity(&payloadToExtraItems(Air.SwitchBr.Case{ + .items_len = 1, + .ranges_len = 0, + .body_len = @intCast(case_block.instructions.items.len), + })); cases_extra.appendAssumeCapacity(@intFromEnum(Air.Inst.Ref.bool_true)); cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items)); } @@ -13076,9 +13000,14 @@ fn analyzeSwitchRuntimeBlock( ); try branch_hints.append(gpa, prong_hint); - try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len); - cases_extra.appendAssumeCapacity(1); // items_len - cases_extra.appendAssumeCapacity(@intCast(case_block.instructions.items.len)); + try cases_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.SwitchBr.Case).@"struct".fields.len + + 1 + // `item`, no ranges + case_block.instructions.items.len); + cases_extra.appendSliceAssumeCapacity(&payloadToExtraItems(Air.SwitchBr.Case{ + .items_len = 1, + .ranges_len = 0, + .body_len = @intCast(case_block.instructions.items.len), + })); cases_extra.appendAssumeCapacity(@intFromEnum(Air.Inst.Ref.bool_false)); cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items)); } @@ -13142,41 +13071,22 @@ fn analyzeSwitchRuntimeBlock( break :h .cold; }; - if (is_first) { - try branch_hints.append(gpa, else_hint); - final_else_body = case_block.instructions.items; - } else { - try branch_hints.append(gpa, .none); // we have the range conditionals first - try sema.air_extra.ensureUnusedCapacity(gpa, prev_then_body.len + - @typeInfo(Air.CondBr).@"struct".fields.len + case_block.instructions.items.len); - - sema.air_instructions.items(.data)[@intFromEnum(prev_cond_br)].pl_op.payload = sema.addExtraAssumeCapacity(Air.CondBr{ - .then_body_len = @intCast(prev_then_body.len), - .else_body_len = @intCast(case_block.instructions.items.len), - .branch_hints = .{ - .true = prev_hint, - .false = else_hint, - .then_cov = .poi, - .else_cov = .poi, - }, - }); - sema.air_extra.appendSliceAssumeCapacity(@ptrCast(prev_then_body)); - sema.air_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items)); - final_else_body = first_else_body; - } - } else { + try branch_hints.append(gpa, else_hint); + break :else_body case_block.instructions.items; + } else else_body: { try branch_hints.append(gpa, .none); - } + break :else_body &.{}; + }; assert(branch_hints.items.len == cases_len + 1); try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.SwitchBr).@"struct".fields.len + - cases_extra.items.len + final_else_body.len + + cases_extra.items.len + else_body.len + (std.math.divCeil(usize, branch_hints.items.len, 10) catch unreachable)); // branch hints const payload_index = sema.addExtraAssumeCapacity(Air.SwitchBr{ .cases_len = @intCast(cases_len), - .else_body_len = @intCast(final_else_body.len), + .else_body_len = @intCast(else_body.len), }); { @@ -13195,7 +13105,7 @@ fn analyzeSwitchRuntimeBlock( } } sema.air_extra.appendSliceAssumeCapacity(@ptrCast(cases_extra.items)); - sema.air_extra.appendSliceAssumeCapacity(@ptrCast(final_else_body)); + sema.air_extra.appendSliceAssumeCapacity(@ptrCast(else_body)); return try child_block.addInst(.{ .tag = .switch_br, @@ -37386,15 +37296,21 @@ pub fn addExtra(sema: *Sema, extra: anytype) Allocator.Error!u32 { } pub fn addExtraAssumeCapacity(sema: *Sema, extra: anytype) u32 { - const fields = std.meta.fields(@TypeOf(extra)); const result: u32 = @intCast(sema.air_extra.items.len); - inline for (fields) |field| { - sema.air_extra.appendAssumeCapacity(switch (field.type) { - u32 => @field(extra, field.name), - i32, Air.CondBr.BranchHints => @bitCast(@field(extra, field.name)), - Air.Inst.Ref, InternPool.Index => @intFromEnum(@field(extra, field.name)), + sema.air_extra.appendSliceAssumeCapacity(&payloadToExtraItems(extra)); + return result; +} + +fn payloadToExtraItems(data: anytype) [@typeInfo(@TypeOf(data)).@"struct".fields.len]u32 { + const fields = @typeInfo(@TypeOf(data)).@"struct".fields; + var result: [fields.len]u32 = undefined; + inline for (&result, fields) |*val, field| { + val.* = switch (field.type) { + u32 => @field(data, field.name), + i32, Air.CondBr.BranchHints => @bitCast(@field(data, field.name)), + Air.Inst.Ref, InternPool.Index => @intFromEnum(@field(data, field.name)), else => @compileError("bad field type: " ++ @typeName(field.type)), - }); + }; } return result; } diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index c61e540c4a53..fd5a1a536709 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -5105,6 +5105,8 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void { var it = switch_br.iterateCases(); while (it.next()) |case| { + if (case.ranges.len > 0) return self.fail("TODO: switch with ranges", .{}); + // For every item, we compare it to condition and branch into // the prong if they are equal. After we compared to all // items, we branch into the next prong (or if no other prongs diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index d693c06ec9a7..d88029bd6c7b 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -5053,6 +5053,7 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void { var it = switch_br.iterateCases(); while (it.next()) |case| { + if (case.ranges.len > 0) return self.fail("TODO: switch with ranges", .{}); // For every item, we compare it to condition and branch into // the prong if they are equal. After we compared to all // items, we branch into the next prong (or if no other prongs diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index a70618a394f3..d48a99c8d715 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -5681,6 +5681,8 @@ fn airSwitchBr(func: *Func, inst: Air.Inst.Index) !void { var it = switch_br.iterateCases(); while (it.next()) |case| { + if (case.ranges.len > 0) return func.fail("TODO: switch with ranges", .{}); + var relocs = try func.gpa.alloc(Mir.Inst.Index, case.items.len); defer func.gpa.free(relocs); diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index a3a51c72334f..7fe8676234d1 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -4064,6 +4064,8 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { var it = switch_br.iterateCases(); while (it.next()) |case| { + if (case.ranges.len > 0) return func.fail("TODO: switch with ranges", .{}); + const values = try func.gpa.alloc(CaseValue, case.items.len); errdefer func.gpa.free(values); diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 1f13c8ae7d66..136ad3ca4c03 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -13695,6 +13695,8 @@ fn airSwitchBr(self: *Self, inst: Air.Inst.Index) !void { var it = switch_br.iterateCases(); while (it.next()) |case| { + if (case.ranges.len > 0) return self.fail("TODO: switch with ranges", .{}); + var relocs = try self.gpa.alloc(Mir.Inst.Index, case.items.len); defer self.gpa.free(relocs); diff --git a/src/codegen/c.zig b/src/codegen/c.zig index f3b8c7e72a51..3c97d74cd451 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -5017,12 +5017,13 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue { const liveness = try f.liveness.getSwitchBr(gpa, inst, switch_br.cases_len + 1); defer gpa.free(liveness.deaths); - // On the final iteration we do not need to fix any state. This is because, like in the `else` - // branch of a `cond_br`, our parent has to do it for this entire body anyway. - const last_case_i = switch_br.cases_len - @intFromBool(switch_br.else_body_len == 0); - + var any_range_cases = false; var it = switch_br.iterateCases(); while (it.next()) |case| { + if (case.ranges.len > 0) { + any_range_cases = true; + continue; + } for (case.items) |item| { try f.object.indent_writer.insertNewline(); try writer.writeAll("case "); @@ -5041,29 +5042,56 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue { } try writer.writeByte(' '); - if (case.idx != last_case_i) { - try genBodyResolveState(f, inst, liveness.deaths[case.idx], case.body, false); - } else { - for (liveness.deaths[case.idx]) |death| { - try die(f, inst, death.toRef()); - } - try genBody(f, case.body); - } + try genBodyResolveState(f, inst, liveness.deaths[case.idx], case.body, false); // The case body must be noreturn so we don't need to insert a break. } const else_body = it.elseBody(); try f.object.indent_writer.insertNewline(); + + try writer.writeAll("default: "); + if (any_range_cases) { + // We will iterate the cases again to handle those with ranges, and generate + // code using conditions rather than switch cases for such cases. + it = switch_br.iterateCases(); + while (it.next()) |case| { + if (case.ranges.len == 0) continue; // handled above + + try writer.writeAll("if ("); + for (case.items, 0..) |item, item_i| { + if (item_i != 0) try writer.writeAll(" || "); + try f.writeCValue(writer, condition, .Other); + try writer.writeAll(" == "); + try f.object.dg.renderValue(writer, (try f.air.value(item, pt)).?, .Other); + } + for (case.ranges, 0..) |range, range_i| { + if (case.items.len != 0 or range_i != 0) try writer.writeAll(" || "); + // "(x >= lower && x <= upper)" + try writer.writeByte('('); + try f.writeCValue(writer, condition, .Other); + try writer.writeAll(" >= "); + try f.object.dg.renderValue(writer, (try f.air.value(range[0], pt)).?, .Other); + try writer.writeAll(" && "); + try f.writeCValue(writer, condition, .Other); + try writer.writeAll(" <= "); + try f.object.dg.renderValue(writer, (try f.air.value(range[1], pt)).?, .Other); + try writer.writeByte(')'); + } + try writer.writeAll(") "); + try genBodyResolveState(f, inst, liveness.deaths[case.idx], case.body, false); + } + } + if (else_body.len > 0) { - // Note that this must be the last case (i.e. the `last_case_i` case was not hit above) + // Note that this must be the last case, so we do not need to use `genBodyResolveState` since + // the parent block will do it (because the case body is noreturn). for (liveness.deaths[liveness.deaths.len - 1]) |death| { try die(f, inst, death.toRef()); } - try writer.writeAll("default: "); try genBody(f, else_body); } else { - try writer.writeAll("default: zig_unreachable();"); + try writer.writeAll("zig_unreachable();"); } try f.object.indent_writer.insertNewline(); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index dc8996afda08..25cf2e8dcc9c 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -6230,7 +6230,15 @@ pub const FuncGen = struct { const cond = try self.resolveInst(switch_br.operand); - const else_block = try self.wip.block(1, "Default"); + // This is not necessarily the actual `else` prong; it first contains conditionals + // for any range cases. It's just the `else` of the LLVM switch. + const llvm_else_block = try self.wip.block(1, "Default"); + + const case_blocks = try self.gpa.alloc(Builder.Function.Block.Index, switch_br.cases_len); + defer self.gpa.free(case_blocks); + // We set incoming as 0 for now, and increment it as we construct the switch. + for (case_blocks) |*b| b.* = try self.wip.block(0, "Case"); + const llvm_usize = try o.lowerType(Type.usize); const cond_int = if (cond.typeOfWip(&self.wip).isPointer(&o.builder)) try self.wip.cast(.ptrtoint, cond, llvm_usize, "") @@ -6294,12 +6302,17 @@ pub const FuncGen = struct { break :weights @enumFromInt(@intFromEnum(tuple)); }; - var wip_switch = try self.wip.@"switch"(cond_int, else_block, llvm_cases_len, weights); + var wip_switch = try self.wip.@"switch"(cond_int, llvm_else_block, llvm_cases_len, weights); defer wip_switch.finish(&self.wip); var it = switch_br.iterateCases(); + var any_ranges = false; while (it.next()) |case| { - const case_block = try self.wip.block(@intCast(case.items.len), "Case"); + if (case.ranges.len > 0) any_ranges = true; + const case_block = case_blocks[case.idx]; + case_block.ptr(&self.wip).incoming += @intCast(case.items.len); + // Handle scalar items, and generate the block. + // We'll generate conditionals for the ranges later on. for (case.items) |item| { const llvm_item = (try self.resolveInst(item)).toConst().?; const llvm_int_item = if (llvm_item.typeOf(&o.builder).isPointer(&o.builder)) @@ -6314,7 +6327,42 @@ pub const FuncGen = struct { } const else_body = it.elseBody(); - self.wip.cursor = .{ .block = else_block }; + self.wip.cursor = .{ .block = llvm_else_block }; + if (any_ranges) { + const cond_ty = self.typeOf(switch_br.operand); + // Add conditionals for the ranges, directing to the relevant bb. + // We don't need to consider `cold` branch hints since that information is stored + // in the target bb body, but we do care about likely/unlikely/unpredictable. + it = switch_br.iterateCases(); + while (it.next()) |case| { + if (case.ranges.len == 0) continue; + const case_block = case_blocks[case.idx]; + const hint = switch_br.getHint(case.idx); + case_block.ptr(&self.wip).incoming += 1; + const next_else_block = try self.wip.block(1, "Default"); + var range_cond: ?Builder.Value = null; + for (case.ranges) |range| { + const llvm_min = try self.resolveInst(range[0]); + const llvm_max = try self.resolveInst(range[1]); + const cond_part = try self.wip.bin( + .@"and", + try self.cmp(.normal, .gte, cond_ty, cond, llvm_min), + try self.cmp(.normal, .lte, cond_ty, cond, llvm_max), + "", + ); + if (range_cond) |prev| { + range_cond = try self.wip.bin(.@"or", prev, cond_part, ""); + } else range_cond = cond_part; + } + _ = try self.wip.brCond(range_cond.?, case_block, next_else_block, switch (hint) { + .none, .cold => .none, + .unpredictable => .unpredictable, + .likely => .then_likely, + .unlikely => .else_likely, + }); + self.wip.cursor = .{ .block = next_else_block }; + } + } if (switch_br.getElseHint() == .cold) _ = try self.wip.callIntrinsicAssumeCold(); if (else_body.len != 0) { try self.genBodyDebugScope(null, else_body, .poi); diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 345e80a23c4c..b9aa77778669 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -6211,6 +6211,7 @@ const NavGen = struct { var num_conditions: u32 = 0; var it = switch_br.iterateCases(); while (it.next()) |case| { + if (case.ranges.len > 0) return self.todo("switch with ranges", .{}); num_conditions += @intCast(case.items.len); } break :blk num_conditions; diff --git a/src/print_air.zig b/src/print_air.zig index 227f362c396f..acb5eda07e42 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -864,6 +864,12 @@ const Writer = struct { if (item_i != 0) try s.writeAll(", "); try w.writeInstRef(s, item, false); } + for (case.ranges, 0..) |range, range_i| { + if (range_i != 0 or case.items.len != 0) try s.writeAll(", "); + try w.writeInstRef(s, range[0], false); + try s.writeAll("..."); + try w.writeInstRef(s, range[1], false); + } try s.writeAll("] "); const hint = switch_br.getHint(case.idx); if (hint != .none) { From 5fb4a7df38deb705f77088d7788f0acc09da613d Mon Sep 17 00:00:00 2001 From: mlugg Date: Thu, 25 Apr 2024 03:46:10 +0100 Subject: [PATCH 010/202] Air: add explicit `repeat` instruction to repeat loops This commit introduces a new AIR instruction, `repeat`, which causes control flow to move back to the start of a given AIR loop. `loop` instructions will no longer automatically perform this operation after control flow reaches the end of the body. The motivation for making this change now was really just consistency with the upcoming implementation of #8220: it wouldn't make sense to have this feature work significantly differently. However, there were already some TODOs kicking around which wanted this feature. It's useful for two key reasons: * It allows loops over AIR instruction bodies to loop precisely until they reach a `noreturn` instruction. This allows for tail calling a few things, and avoiding a range check on each iteration of a hot path, plus gives a nice assertion that validates AIR structure a little. This is a very minor benefit, which this commit does apply to the LLVM and C backends. * It should allow for more compact ZIR and AIR to be emitted by having AstGen emit `repeat` instructions more often rather than having `continue` statements `break` to a `block` which is *followed* by a `repeat`. This is done in status quo because `repeat` instructions only ever cause the direct parent block to repeat. Now that AIR is more flexible, this flexibility can be pretty trivially extended to ZIR, and we can then emit better ZIR. This commit does not implement this. Support for this feature is currently regressed on all self-hosted native backends, including x86_64. This support will be added where necessary before this branch is merged. --- src/Air.zig | 15 ++-- src/Air/types_resolved.zig | 1 + src/Liveness.zig | 62 +++++++++++++++-- src/Liveness/Verify.zig | 38 ++++++++--- src/Sema.zig | 19 +++++- src/arch/aarch64/CodeGen.zig | 1 + src/arch/arm/CodeGen.zig | 1 + src/arch/riscv64/CodeGen.zig | 1 + src/arch/sparc64/CodeGen.zig | 1 + src/arch/wasm/CodeGen.zig | 1 + src/arch/x86_64/CodeGen.zig | 1 + src/codegen/c.zig | 94 ++++++++++++++----------- src/codegen/llvm.zig | 129 ++++++++++++++++++++--------------- src/codegen/spirv.zig | 1 + src/print_air.zig | 6 ++ 15 files changed, 257 insertions(+), 114 deletions(-) diff --git a/src/Air.zig b/src/Air.zig index 9db68da89f39..539624bd4f81 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -274,13 +274,15 @@ pub const Inst = struct { /// is to encounter a `br` that targets this `block`. If the `block` type is `noreturn`, /// then there do not exist any `br` instructions targeting this `block`. block, - /// A labeled block of code that loops forever. At the end of the body it is implied - /// to repeat; no explicit "repeat" instruction terminates loop bodies. + /// A labeled block of code that loops forever. The body must be `noreturn`: loops + /// occur through an explicit `repeat` instruction pointing back to this one. /// Result type is always `noreturn`; no instructions in a block follow this one. - /// The body never ends with a `noreturn` instruction, so the "repeat" operation - /// is always statically reachable. + /// There is always at least one `repeat` instruction referencing the loop. /// Uses the `ty_pl` field. Payload is `Block`. loop, + /// Sends control flow back to the beginning of a parent `loop` body. + /// Uses the `repeat` field. + repeat, /// Return from a block with a result. /// Result type is always noreturn; no instructions in a block follow this one. /// Uses the `br` field. @@ -1045,6 +1047,9 @@ pub const Inst = struct { block_inst: Index, operand: Ref, }, + repeat: struct { + loop_inst: Index, + }, pl_op: struct { operand: Ref, payload: u32, @@ -1445,6 +1450,7 @@ pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool) => return datas[@intFromEnum(inst)].ty_op.ty.toType(), .loop, + .repeat, .br, .cond_br, .switch_br, @@ -1602,6 +1608,7 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: *const InternPool) bool { .arg, .block, .loop, + .repeat, .br, .trap, .breakpoint, diff --git a/src/Air/types_resolved.zig b/src/Air/types_resolved.zig index 3de5aeb3e37f..c68971a28a9d 100644 --- a/src/Air/types_resolved.zig +++ b/src/Air/types_resolved.zig @@ -420,6 +420,7 @@ fn checkBody(air: Air, body: []const Air.Inst.Index, zcu: *Zcu) bool { .dbg_stmt, .err_return_trace, .save_err_return_trace_index, + .repeat, => {}, } } diff --git a/src/Liveness.zig b/src/Liveness.zig index f26ea9a7a7d7..48ddca747aa5 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -70,7 +70,8 @@ pub const Block = struct { const LivenessPass = enum { /// In this pass, we perform some basic analysis of loops to gain information the main pass /// needs. In particular, for every `loop`, we track the following information: - /// * Every block which the loop body contains a `br` to. + /// * Every outer block which the loop body contains a `br` to. + /// * Every outer loop which the loop body contains a `repeat` to. /// * Every operand referenced within the loop body but created outside the loop. /// This gives the main analysis pass enough information to determine the full set of /// instructions which need to be alive when a loop repeats. This data is TEMPORARILY stored in @@ -89,7 +90,8 @@ fn LivenessPassData(comptime pass: LivenessPass) type { return switch (pass) { .loop_analysis => struct { /// The set of blocks which are exited with a `br` instruction at some point within this - /// body and which we are currently within. + /// body and which we are currently within. Also includes `loop`s which are the target + /// of a `repeat` instruction. breaks: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{}, /// The set of operands for which we have seen at least one usage but not their birth. @@ -102,7 +104,7 @@ fn LivenessPassData(comptime pass: LivenessPass) type { }, .main_analysis => struct { - /// Every `block` currently under analysis. + /// Every `block` and `loop` currently under analysis. block_scopes: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockScope) = .{}, /// The set of instructions currently alive in the current control @@ -114,7 +116,8 @@ fn LivenessPassData(comptime pass: LivenessPass) type { old_extra: std.ArrayListUnmanaged(u32) = .{}, const BlockScope = struct { - /// The set of instructions which are alive upon a `br` to this block. + /// If this is a `block`, these instructions are alive upon a `br` to this block. + /// If this is a `loop`, these instructions are alive upon a `repeat` to this block. live_set: std.AutoHashMapUnmanaged(Air.Inst.Index, void), }; @@ -326,6 +329,7 @@ pub fn categorizeOperand( .ret_ptr, .trap, .breakpoint, + .repeat, .dbg_stmt, .unreach, .ret_addr, @@ -1201,6 +1205,7 @@ fn analyzeInst( }, .br => return analyzeInstBr(a, pass, data, inst), + .repeat => return analyzeInstRepeat(a, pass, data, inst), .assembly => { const extra = a.air.extraData(Air.Asm, inst_datas[@intFromEnum(inst)].ty_pl.payload); @@ -1380,6 +1385,33 @@ fn analyzeInstBr( return analyzeOperands(a, pass, data, inst, .{ br.operand, .none, .none }); } +fn analyzeInstRepeat( + a: *Analysis, + comptime pass: LivenessPass, + data: *LivenessPassData(pass), + inst: Air.Inst.Index, +) !void { + const inst_datas = a.air.instructions.items(.data); + const repeat = inst_datas[@intFromEnum(inst)].repeat; + const gpa = a.gpa; + + switch (pass) { + .loop_analysis => { + try data.breaks.put(gpa, repeat.loop_inst, {}); + }, + + .main_analysis => { + const block_scope = data.block_scopes.get(repeat.loop_inst).?; // we should always be repeating an enclosing loop + + const new_live_set = try block_scope.live_set.clone(gpa); + data.live_set.deinit(gpa); + data.live_set = new_live_set; + }, + } + + return analyzeOperands(a, pass, data, inst, .{ .none, .none, .none }); +} + fn analyzeInstBlock( a: *Analysis, comptime pass: LivenessPass, @@ -1402,8 +1434,10 @@ fn analyzeInstBlock( .main_analysis => { log.debug("[{}] %{}: block live set is {}", .{ pass, inst, fmtInstSet(&data.live_set) }); + // We can move the live set because the body should have a noreturn + // instruction which overrides the set. try data.block_scopes.put(gpa, inst, .{ - .live_set = try data.live_set.clone(gpa), + .live_set = data.live_set.move(), }); defer { log.debug("[{}] %{}: popped block scope", .{ pass, inst }); @@ -1471,10 +1505,15 @@ fn analyzeInstLoop( try analyzeBody(a, pass, data, body); + // `loop`s are guaranteed to have at least one matching `repeat`. + // However, we no longer care about repeats of this loop itself. + assert(data.breaks.remove(inst)); + + const extra_index: u32 = @intCast(a.extra.items.len); + const num_breaks = data.breaks.count(); try a.extra.ensureUnusedCapacity(gpa, 1 + num_breaks); - const extra_index = @as(u32, @intCast(a.extra.items.len)); a.extra.appendAssumeCapacity(num_breaks); var it = data.breaks.keyIterator(); @@ -1543,6 +1582,17 @@ fn analyzeInstLoop( } } + // Now, `data.live_set` is the operands which must be alive when the loop repeats. + // Move them into a block scope for corresponding `repeat` instructions to notice. + log.debug("[{}] %{}: loop live set is {}", .{ pass, inst, fmtInstSet(&data.live_set) }); + try data.block_scopes.putNoClobber(gpa, inst, .{ + .live_set = data.live_set.move(), + }); + defer { + log.debug("[{}] %{}: popped loop block scop", .{ pass, inst }); + var scope = data.block_scopes.fetchRemove(inst).?.value; + scope.live_set.deinit(gpa); + } try analyzeBody(a, pass, data, body); }, } diff --git a/src/Liveness/Verify.zig b/src/Liveness/Verify.zig index bcd60d72c8d9..1ff3059417ca 100644 --- a/src/Liveness/Verify.zig +++ b/src/Liveness/Verify.zig @@ -1,28 +1,38 @@ -//! Verifies that liveness information is valid. +//! Verifies that Liveness information is valid. gpa: std.mem.Allocator, air: Air, liveness: Liveness, live: LiveMap = .{}, blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, LiveMap) = .{}, +loops: std.AutoHashMapUnmanaged(Air.Inst.Index, LiveMap) = .{}, intern_pool: *const InternPool, pub const Error = error{ LivenessInvalid, OutOfMemory }; pub fn deinit(self: *Verify) void { self.live.deinit(self.gpa); - var block_it = self.blocks.valueIterator(); - while (block_it.next()) |block| block.deinit(self.gpa); - self.blocks.deinit(self.gpa); + { + var it = self.blocks.valueIterator(); + while (it.next()) |block| block.deinit(self.gpa); + self.blocks.deinit(self.gpa); + } + { + var it = self.loops.valueIterator(); + while (it.next()) |block| block.deinit(self.gpa); + self.loops.deinit(self.gpa); + } self.* = undefined; } pub fn verify(self: *Verify) Error!void { self.live.clearRetainingCapacity(); self.blocks.clearRetainingCapacity(); + self.loops.clearRetainingCapacity(); try self.verifyBody(self.air.getMainBody()); // We don't care about `self.live` now, because the loop body was noreturn - everything being dead was checked on `ret` etc assert(self.blocks.count() == 0); + assert(self.loops.count() == 0); } const LiveMap = std.AutoHashMapUnmanaged(Air.Inst.Index, void); @@ -430,6 +440,13 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { } try self.verifyInst(inst); }, + .repeat => { + const repeat = data[@intFromEnum(inst)].repeat; + const expected_live = self.loops.get(repeat.loop_inst) orelse + return invalid("%{}: loop %{} not in scope", .{ @intFromEnum(inst), @intFromEnum(repeat.loop_inst) }); + + try self.verifyMatchingLiveness(repeat.loop_inst, expected_live); + }, .block, .dbg_inline_block => |tag| { const ty_pl = data[@intFromEnum(inst)].ty_pl; const block_ty = ty_pl.ty.toType(); @@ -475,14 +492,17 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { const extra = self.air.extraData(Air.Block, ty_pl.payload); const loop_body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]); - var live = try self.live.clone(self.gpa); - defer live.deinit(self.gpa); + // The same stuff should be alive after the loop as before it. + const gop = try self.loops.getOrPut(self.gpa, inst); + defer { + var live = self.loops.fetchRemove(inst).?; + live.value.deinit(self.gpa); + } + if (gop.found_existing) return invalid("%{}: loop already exists", .{@intFromEnum(inst)}); + gop.value_ptr.* = try self.live.clone(self.gpa); try self.verifyBody(loop_body); - // The same stuff should be alive after the loop as before it - try self.verifyMatchingLiveness(inst, live); - try self.verifyInstOperands(inst, .{ .none, .none, .none }); }, .cond_br => { diff --git a/src/Sema.zig b/src/Sema.zig index aafd605bc8bc..a9287e21d1af 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -1559,6 +1559,8 @@ fn analyzeBodyInner( // We are definitely called by `zirLoop`, which will treat the // fact that this body does not terminate `noreturn` as an // implicit repeat. + // TODO: since AIR has `repeat` now, we could change ZIR to generate + // more optimal code utilizing `repeat` instructions across blocks! break; } }, @@ -5811,17 +5813,30 @@ fn zirLoop(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileError // Use `analyzeBodyInner` directly to push any comptime control flow up the stack. try sema.analyzeBodyInner(&loop_block, body); + // TODO: since AIR has `repeat` now, we could change ZIR to generate + // more optimal code utilizing `repeat` instructions across blocks! + // For now, if the generated loop body does not terminate `noreturn`, + // then `analyzeBodyInner` is signalling that it ended with `repeat`. + const loop_block_len = loop_block.instructions.items.len; if (loop_block_len > 0 and sema.typeOf(loop_block.instructions.items[loop_block_len - 1].toRef()).isNoReturn(zcu)) { // If the loop ended with a noreturn terminator, then there is no way for it to loop, // so we can just use the block instead. try child_block.instructions.appendSlice(gpa, loop_block.instructions.items); } else { + _ = try loop_block.addInst(.{ + .tag = .repeat, + .data = .{ .repeat = .{ + .loop_inst = loop_inst, + } }, + }); + // Note that `loop_block_len` is now off by one. + try child_block.instructions.append(gpa, loop_inst); - try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).@"struct".fields.len + loop_block_len); + try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).@"struct".fields.len + loop_block_len + 1); sema.air_instructions.items(.data)[@intFromEnum(loop_inst)].ty_pl.payload = sema.addExtraAssumeCapacity( - Air.Block{ .body_len = @intCast(loop_block_len) }, + Air.Block{ .body_len = @intCast(loop_block_len + 1) }, ); sema.air_extra.appendSliceAssumeCapacity(@ptrCast(loop_block.instructions.items)); } diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index fd5a1a536709..334787caf81c 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -734,6 +734,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .bitcast => try self.airBitCast(inst), .block => try self.airBlock(inst), .br => try self.airBr(inst), + .repeat => return self.fail("TODO implement `repeat`", .{}), .trap => try self.airTrap(), .breakpoint => try self.airBreakpoint(), .ret_addr => try self.airRetAddr(inst), diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index d88029bd6c7b..0128a2d9bb57 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -721,6 +721,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .bitcast => try self.airBitCast(inst), .block => try self.airBlock(inst), .br => try self.airBr(inst), + .repeat => return self.fail("TODO implement `repeat`", .{}), .trap => try self.airTrap(), .breakpoint => try self.airBreakpoint(), .ret_addr => try self.airRetAddr(inst), diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index d48a99c8d715..413fab348bf0 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -1579,6 +1579,7 @@ fn genBody(func: *Func, body: []const Air.Inst.Index) InnerError!void { .bitcast => try func.airBitCast(inst), .block => try func.airBlock(inst), .br => try func.airBr(inst), + .repeat => return func.fail("TODO implement `repeat`", .{}), .trap => try func.airTrap(), .breakpoint => try func.airBreakpoint(), .ret_addr => try func.airRetAddr(inst), diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 2cb07fb25f57..a26182930623 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -576,6 +576,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .bitcast => try self.airBitCast(inst), .block => try self.airBlock(inst), .br => try self.airBr(inst), + .repeat => return self.fail("TODO implement `repeat`", .{}), .trap => try self.airTrap(), .breakpoint => try self.airBreakpoint(), .ret_addr => @panic("TODO try self.airRetAddr(inst)"), diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 7fe8676234d1..cbf243972148 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -1903,6 +1903,7 @@ fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { .trap => func.airTrap(inst), .breakpoint => func.airBreakpoint(inst), .br => func.airBr(inst), + .repeat => return func.fail("TODO implement `repeat`", .{}), .int_from_bool => func.airIntFromBool(inst), .cond_br => func.airCondBr(inst), .intcast => func.airIntcast(inst), diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 136ad3ca4c03..c92d42ffb7a6 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -2247,6 +2247,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .bitcast => try self.airBitCast(inst), .block => try self.airBlock(inst), .br => try self.airBr(inst), + .repeat => return self.fail("TODO implement `repeat`", .{}), .trap => try self.airTrap(), .breakpoint => try self.airBreakpoint(), .ret_addr => try self.airRetAddr(inst), diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 3c97d74cd451..91dec12279fd 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -3137,11 +3137,9 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, .arg => try airArg(f, inst), - .trap => try airTrap(f, f.object.writer()), .breakpoint => try airBreakpoint(f.object.writer()), .ret_addr => try airRetAddr(f, inst), .frame_addr => try airFrameAddress(f, inst), - .unreach => try airUnreach(f), .fence => try airFence(f, inst), .ptr_add => try airPtrAddSub(f, inst, '+'), @@ -3248,21 +3246,13 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, .alloc => try airAlloc(f, inst), .ret_ptr => try airRetPtr(f, inst), .assembly => try airAsm(f, inst), - .block => try airBlock(f, inst), .bitcast => try airBitcast(f, inst), .intcast => try airIntCast(f, inst), .trunc => try airTrunc(f, inst), .int_from_bool => try airIntFromBool(f, inst), .load => try airLoad(f, inst), - .ret => try airRet(f, inst, false), - .ret_safe => try airRet(f, inst, false), // TODO - .ret_load => try airRet(f, inst, true), .store => try airStore(f, inst, false), .store_safe => try airStore(f, inst, true), - .loop => try airLoop(f, inst), - .cond_br => try airCondBr(f, inst), - .br => try airBr(f, inst), - .switch_br => try airSwitchBr(f, inst), .struct_field_ptr => try airStructFieldPtr(f, inst), .array_to_slice => try airArrayToSlice(f, inst), .cmpxchg_weak => try airCmpxchg(f, inst, "weak"), @@ -3296,14 +3286,8 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, .try_ptr_cold => try airTryPtr(f, inst), .dbg_stmt => try airDbgStmt(f, inst), - .dbg_inline_block => try airDbgInlineBlock(f, inst), .dbg_var_ptr, .dbg_var_val, .dbg_arg_inline => try airDbgVar(f, inst), - .call => try airCall(f, inst, .auto), - .call_always_tail => .none, - .call_never_tail => try airCall(f, inst, .never_tail), - .call_never_inline => try airCall(f, inst, .never_inline), - .float_from_int, .int_from_float, .fptrunc, @@ -3390,6 +3374,39 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, .work_group_size, .work_group_id, => unreachable, + + // Instructions that are known to always be `noreturn` based on their tag. + .br => return airBr(f, inst), + .repeat => return airRepeat(f, inst), + .cond_br => return airCondBr(f, inst), + .switch_br => return airSwitchBr(f, inst), + .loop => return airLoop(f, inst), + .ret => return airRet(f, inst, false), + .ret_safe => return airRet(f, inst, false), // TODO + .ret_load => return airRet(f, inst, true), + .trap => return airTrap(f, f.object.writer()), + .unreach => return airUnreach(f), + + // Instructions which may be `noreturn`. + .block => res: { + const res = try airBlock(f, inst); + if (f.typeOfIndex(inst).isNoReturn(zcu)) return; + break :res res; + }, + .dbg_inline_block => res: { + const res = try airDbgInlineBlock(f, inst); + if (f.typeOfIndex(inst).isNoReturn(zcu)) return; + break :res res; + }, + // TODO: calls should be in this category! The AIR we emit for them is a bit weird. + // The instruction has type `noreturn`, but there are instructions (and maybe a safety + // check) following nonetheless. The `unreachable` or safety check should be emitted by + // backends instead. + .call => try airCall(f, inst, .auto), + .call_always_tail => .none, + .call_never_tail => try airCall(f, inst, .never_tail), + .call_never_inline => try airCall(f, inst, .never_inline), + // zig fmt: on }; if (result_value == .new_local) { @@ -3401,6 +3418,7 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, else => result_value, }); } + unreachable; } fn airSliceField(f: *Function, inst: Air.Inst.Index, is_ptr: bool, field_name: []const u8) !CValue { @@ -3718,7 +3736,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { return local; } -fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { +fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !void { const pt = f.object.dg.pt; const zcu = pt.zcu; const un_op = f.air.instructions.items(.data)[@intFromEnum(inst)].un_op; @@ -3769,7 +3787,6 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValue { // Not even allowed to return void in a naked function. if (!f.object.dg.is_naked_fn) try writer.writeAll("return;\n"); } - return .none; } fn airIntCast(f: *Function, inst: Air.Inst.Index) !CValue { @@ -4741,7 +4758,7 @@ fn lowerTry( return local; } -fn airBr(f: *Function, inst: Air.Inst.Index) !CValue { +fn airBr(f: *Function, inst: Air.Inst.Index) !void { const branch = f.air.instructions.items(.data)[@intFromEnum(inst)].br; const block = f.blocks.get(branch.block_inst).?; const result = block.result; @@ -4761,7 +4778,12 @@ fn airBr(f: *Function, inst: Air.Inst.Index) !CValue { } try writer.print("goto zig_block_{d};\n", .{block.block_id}); - return .none; +} + +fn airRepeat(f: *Function, inst: Air.Inst.Index) !void { + const repeat = f.air.instructions.items(.data)[@intFromEnum(inst)].repeat; + const writer = f.object.writer(); + try writer.print("goto zig_loop_{d};\n", .{@intFromEnum(repeat.loop_inst)}); } fn airBitcast(f: *Function, inst: Air.Inst.Index) !CValue { @@ -4889,12 +4911,10 @@ fn bitcast(f: *Function, dest_ty: Type, operand: CValue, operand_ty: Type) !CVal return local; } -fn airTrap(f: *Function, writer: anytype) !CValue { +fn airTrap(f: *Function, writer: anytype) !void { // Not even allowed to call trap in a naked function. - if (f.object.dg.is_naked_fn) return .none; - + if (f.object.dg.is_naked_fn) return; try writer.writeAll("zig_trap();\n"); - return .none; } fn airBreakpoint(writer: anytype) !CValue { @@ -4933,28 +4953,27 @@ fn airFence(f: *Function, inst: Air.Inst.Index) !CValue { return .none; } -fn airUnreach(f: *Function) !CValue { +fn airUnreach(f: *Function) !void { // Not even allowed to call unreachable in a naked function. - if (f.object.dg.is_naked_fn) return .none; - + if (f.object.dg.is_naked_fn) return; try f.object.writer().writeAll("zig_unreachable();\n"); - return .none; } -fn airLoop(f: *Function, inst: Air.Inst.Index) !CValue { +fn airLoop(f: *Function, inst: Air.Inst.Index) !void { const ty_pl = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const loop = f.air.extraData(Air.Block, ty_pl.payload); const body: []const Air.Inst.Index = @ptrCast(f.air.extra[loop.end..][0..loop.data.body_len]); const writer = f.object.writer(); - try writer.writeAll("for (;;) "); - try genBody(f, body); // no need to restore state, we're noreturn - try writer.writeByte('\n'); - - return .none; + // `repeat` instructions matching this loop will branch to + // this label. Since we need a label for arbitrary `repeat` + // anyway, there's actually no need to use a "real" looping + // construct at all! + try writer.print("zig_loop_{d}:\n", .{@intFromEnum(inst)}); + try genBodyInner(f, body); // no need to restore state, we're noreturn } -fn airCondBr(f: *Function, inst: Air.Inst.Index) !CValue { +fn airCondBr(f: *Function, inst: Air.Inst.Index) !void { const pl_op = f.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const cond = try f.resolveInst(pl_op.operand); try reap(f, inst, &.{pl_op.operand}); @@ -4983,11 +5002,9 @@ fn airCondBr(f: *Function, inst: Air.Inst.Index) !CValue { // instance) `br` to a block (label). try genBodyInner(f, else_body); - - return .none; } -fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue { +fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !void { const pt = f.object.dg.pt; const zcu = pt.zcu; const switch_br = f.air.unwrapSwitch(inst); @@ -5097,7 +5114,6 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue { f.object.indent_writer.popIndent(); try writer.writeAll("}\n"); - return .none; } fn asmInputNeedsLocal(f: *Function, constraint: []const u8, value: CValue) bool { diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 25cf2e8dcc9c..d1ec8eca9fad 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1720,6 +1720,7 @@ pub const Object = struct { .arg_inline_index = 0, .func_inst_table = .{}, .blocks = .{}, + .loops = .{}, .sync_scope = if (owner_mod.single_threaded) .singlethread else .system, .file = file, .scope = subprogram, @@ -4841,6 +4842,9 @@ pub const FuncGen = struct { breaks: *BreakList, }), + /// Maps `loop` instructions to the bb to branch to to repeat the loop. + loops: std.AutoHashMapUnmanaged(Air.Inst.Index, Builder.Function.Block.Index), + sync_scope: Builder.SyncScope, const Fuzz = struct { @@ -4867,6 +4871,7 @@ pub const FuncGen = struct { self.wip.deinit(); self.func_inst_table.deinit(gpa); self.blocks.deinit(gpa); + self.loops.deinit(gpa); } fn todo(self: *FuncGen, comptime format: []const u8, args: anytype) Error { @@ -5058,14 +5063,9 @@ pub const FuncGen = struct { .arg => try self.airArg(inst), .bitcast => try self.airBitCast(inst), .int_from_bool => try self.airIntFromBool(inst), - .block => try self.airBlock(inst), - .br => try self.airBr(inst), - .switch_br => try self.airSwitchBr(inst), - .trap => try self.airTrap(inst), .breakpoint => try self.airBreakpoint(inst), .ret_addr => try self.airRetAddr(inst), .frame_addr => try self.airFrameAddress(inst), - .cond_br => try self.airCondBr(inst), .@"try" => try self.airTry(body[i..], false), .try_cold => try self.airTry(body[i..], true), .try_ptr => try self.airTryPtr(inst, false), @@ -5076,22 +5076,13 @@ pub const FuncGen = struct { .fpext => try self.airFpext(inst), .int_from_ptr => try self.airIntFromPtr(inst), .load => try self.airLoad(body[i..]), - .loop => try self.airLoop(inst), .not => try self.airNot(inst), - .ret => try self.airRet(inst, false), - .ret_safe => try self.airRet(inst, true), - .ret_load => try self.airRetLoad(inst), .store => try self.airStore(inst, false), .store_safe => try self.airStore(inst, true), .assembly => try self.airAssembly(inst), .slice_ptr => try self.airSliceField(inst, 0), .slice_len => try self.airSliceField(inst, 1), - .call => try self.airCall(inst, .auto), - .call_always_tail => try self.airCall(inst, .always_tail), - .call_never_tail => try self.airCall(inst, .never_tail), - .call_never_inline => try self.airCall(inst, .never_inline), - .ptr_slice_ptr_ptr => try self.airPtrSliceFieldPtr(inst, 0), .ptr_slice_len_ptr => try self.airPtrSliceFieldPtr(inst, 1), @@ -5176,9 +5167,7 @@ pub const FuncGen = struct { .inferred_alloc, .inferred_alloc_comptime => unreachable, - .unreach => try self.airUnreach(inst), .dbg_stmt => try self.airDbgStmt(inst), - .dbg_inline_block => try self.airDbgInlineBlock(inst), .dbg_var_ptr => try self.airDbgVarPtr(inst), .dbg_var_val => try self.airDbgVarVal(inst, false), .dbg_arg_inline => try self.airDbgVarVal(inst, true), @@ -5191,10 +5180,50 @@ pub const FuncGen = struct { .work_item_id => try self.airWorkItemId(inst), .work_group_size => try self.airWorkGroupSize(inst), .work_group_id => try self.airWorkGroupId(inst), + + // Instructions that are known to always be `noreturn` based on their tag. + .br => return self.airBr(inst), + .repeat => return self.airRepeat(inst), + .cond_br => return self.airCondBr(inst), + .switch_br => return self.airSwitchBr(inst), + .loop => return self.airLoop(inst), + .ret => return self.airRet(inst, false), + .ret_safe => return self.airRet(inst, true), + .ret_load => return self.airRetLoad(inst), + .trap => return self.airTrap(inst), + .unreach => return self.airUnreach(inst), + + // Instructions which may be `noreturn`. + .block => res: { + const res = try self.airBlock(inst); + if (self.typeOfIndex(inst).isNoReturn(zcu)) return; + break :res res; + }, + .dbg_inline_block => res: { + const res = try self.airDbgInlineBlock(inst); + if (self.typeOfIndex(inst).isNoReturn(zcu)) return; + break :res res; + }, + .call, .call_always_tail, .call_never_tail, .call_never_inline => |tag| res: { + const res = try self.airCall(inst, switch (tag) { + .call => .auto, + .call_always_tail => .always_tail, + .call_never_tail => .never_tail, + .call_never_inline => .never_inline, + else => unreachable, + }); + // TODO: the AIR we emit for calls is a bit weird - the instruction has + // type `noreturn`, but there are instructions (and maybe a safety check) following + // nonetheless. The `unreachable` or safety check should be emitted by backends instead. + //if (self.typeOfIndex(inst).isNoReturn(mod)) return; + break :res res; + }, + // zig fmt: on }; if (val != .none) try self.func_inst_table.putNoClobber(self.gpa, inst.toRef(), val); } + unreachable; } fn genBodyDebugScope( @@ -5640,7 +5669,7 @@ pub const FuncGen = struct { _ = try fg.wip.@"unreachable"(); } - fn airRet(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !Builder.Value { + fn airRet(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !void { const o = self.ng.object; const pt = o.pt; const zcu = pt.zcu; @@ -5675,7 +5704,7 @@ pub const FuncGen = struct { try self.valgrindMarkUndef(self.ret_ptr, len); } _ = try self.wip.retVoid(); - return .none; + return; } const unwrapped_operand = operand.unwrap(); @@ -5684,12 +5713,12 @@ pub const FuncGen = struct { // Return value was stored previously if (unwrapped_operand == .instruction and unwrapped_ret == .instruction and unwrapped_operand.instruction == unwrapped_ret.instruction) { _ = try self.wip.retVoid(); - return .none; + return; } try self.store(self.ret_ptr, ptr_ty, operand, .none); _ = try self.wip.retVoid(); - return .none; + return; } const fn_info = zcu.typeToFunc(Type.fromInterned(ip.getNav(self.ng.nav_index).typeOf(ip))).?; if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) { @@ -5701,7 +5730,7 @@ pub const FuncGen = struct { } else { _ = try self.wip.retVoid(); } - return .none; + return; } const abi_ret_ty = try lowerFnRetTy(o, fn_info); @@ -5725,29 +5754,29 @@ pub const FuncGen = struct { try self.valgrindMarkUndef(rp, len); } _ = try self.wip.ret(try self.wip.load(.normal, abi_ret_ty, rp, alignment, "")); - return .none; + return; } if (isByRef(ret_ty, zcu)) { // operand is a pointer however self.ret_ptr is null so that means // we need to return a value. _ = try self.wip.ret(try self.wip.load(.normal, abi_ret_ty, operand, alignment, "")); - return .none; + return; } const llvm_ret_ty = operand.typeOfWip(&self.wip); if (abi_ret_ty == llvm_ret_ty) { _ = try self.wip.ret(operand); - return .none; + return; } const rp = try self.buildAlloca(llvm_ret_ty, alignment); _ = try self.wip.store(.normal, operand, rp, alignment); _ = try self.wip.ret(try self.wip.load(.normal, abi_ret_ty, rp, alignment, "")); - return .none; + return; } - fn airRetLoad(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { + fn airRetLoad(self: *FuncGen, inst: Air.Inst.Index) !void { const o = self.ng.object; const pt = o.pt; const zcu = pt.zcu; @@ -5765,17 +5794,17 @@ pub const FuncGen = struct { } else { _ = try self.wip.retVoid(); } - return .none; + return; } if (self.ret_ptr != .none) { _ = try self.wip.retVoid(); - return .none; + return; } const ptr = try self.resolveInst(un_op); const abi_ret_ty = try lowerFnRetTy(o, fn_info); const alignment = ret_ty.abiAlignment(zcu).toLlvm(); _ = try self.wip.ret(try self.wip.load(.normal, abi_ret_ty, ptr, alignment, "")); - return .none; + return; } fn airCVaArg(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { @@ -6039,7 +6068,7 @@ pub const FuncGen = struct { } } - fn airBr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { + fn airBr(self: *FuncGen, inst: Air.Inst.Index) !void { const o = self.ng.object; const zcu = o.pt.zcu; const branch = self.air.instructions.items(.data)[@intFromEnum(inst)].br; @@ -6055,10 +6084,16 @@ pub const FuncGen = struct { try block.breaks.list.append(self.gpa, .{ .bb = self.wip.cursor.block, .val = val }); } else block.breaks.len += 1; _ = try self.wip.br(block.parent_bb); - return .none; } - fn airCondBr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { + fn airRepeat(self: *FuncGen, inst: Air.Inst.Index) !void { + const repeat = self.air.instructions.items(.data)[@intFromEnum(inst)].repeat; + const loop_bb = self.loops.get(repeat.loop_inst).?; + loop_bb.ptr(&self.wip).incoming += 1; + _ = try self.wip.br(loop_bb); + } + + fn airCondBr(self: *FuncGen, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const cond = try self.resolveInst(pl_op.operand); const extra = self.air.extraData(Air.CondBr, pl_op.payload); @@ -6117,7 +6152,6 @@ pub const FuncGen = struct { try self.genBodyDebugScope(null, else_body, extra.data.branch_hints.else_cov); // No need to reset the insert cursor since this instruction is noreturn. - return .none; } fn airTry(self: *FuncGen, body_tail: []const Air.Inst.Index, err_cold: bool) !Builder.Value { @@ -6223,7 +6257,7 @@ pub const FuncGen = struct { return fg.wip.extractValue(err_union, &.{offset}, ""); } - fn airSwitchBr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { + fn airSwitchBr(self: *FuncGen, inst: Air.Inst.Index) !void { const o = self.ng.object; const switch_br = self.air.unwrapSwitch(inst); @@ -6371,31 +6405,20 @@ pub const FuncGen = struct { } // No need to reset the insert cursor since this instruction is noreturn. - return .none; } - fn airLoop(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { - const o = self.ng.object; - const zcu = o.pt.zcu; + fn airLoop(self: *FuncGen, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl; const loop = self.air.extraData(Air.Block, ty_pl.payload); const body: []const Air.Inst.Index = @ptrCast(self.air.extra[loop.end..][0..loop.data.body_len]); - const loop_block = try self.wip.block(2, "Loop"); + const loop_block = try self.wip.block(1, "Loop"); // `airRepeat` will increment incoming each time _ = try self.wip.br(loop_block); + try self.loops.putNoClobber(self.gpa, inst, loop_block); + defer assert(self.loops.remove(inst)); + self.wip.cursor = .{ .block = loop_block }; try self.genBodyDebugScope(null, body, .none); - - // TODO instead of this logic, change AIR to have the property that - // every block is guaranteed to end with a noreturn instruction. - // Then we can simply rely on the fact that a repeat or break instruction - // would have been emitted already. Also the main loop in genBody can - // be while(true) instead of for(body), which will eliminate 1 branch on - // a hot path. - if (body.len == 0 or !self.typeOfIndex(body[body.len - 1]).isNoReturn(zcu)) { - _ = try self.wip.br(loop_block); - } - return .none; } fn airArrayToSlice(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { @@ -6890,10 +6913,9 @@ pub const FuncGen = struct { return self.wip.not(operand, ""); } - fn airUnreach(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { + fn airUnreach(self: *FuncGen, inst: Air.Inst.Index) !void { _ = inst; _ = try self.wip.@"unreachable"(); - return .none; } fn airDbgStmt(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { @@ -9296,11 +9318,10 @@ pub const FuncGen = struct { return fg.load(ptr, ptr_ty); } - fn airTrap(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { + fn airTrap(self: *FuncGen, inst: Air.Inst.Index) !void { _ = inst; _ = try self.wip.callIntrinsic(.normal, .none, .trap, &.{}, &.{}, ""); _ = try self.wip.@"unreachable"(); - return .none; } fn airBreakpoint(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value { diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index b9aa77778669..afc7641072f5 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -3340,6 +3340,7 @@ const NavGen = struct { .store, .store_safe => return self.airStore(inst), .br => return self.airBr(inst), + .repeat => return self.fail("TODO implement `repeat`", .{}), .breakpoint => return, .cond_br => return self.airCondBr(inst), .loop => return self.airLoop(inst), diff --git a/src/print_air.zig b/src/print_air.zig index acb5eda07e42..c9fde5282213 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -296,6 +296,7 @@ const Writer = struct { .aggregate_init => try w.writeAggregateInit(s, inst), .union_init => try w.writeUnionInit(s, inst), .br => try w.writeBr(s, inst), + .repeat => try w.writeRepeat(s, inst), .cond_br => try w.writeCondBr(s, inst), .@"try", .try_cold => try w.writeTry(s, inst), .try_ptr, .try_ptr_cold => try w.writeTryPtr(s, inst), @@ -708,6 +709,11 @@ const Writer = struct { try w.writeOperand(s, inst, 0, br.operand); } + fn writeRepeat(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + const repeat = w.air.instructions.items(.data)[@intFromEnum(inst)].repeat; + try w.writeInstIndex(s, repeat.loop_inst, false); + } + fn writeTry(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { const pl_op = w.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const extra = w.air.extraData(Air.Try, pl_op.payload); From 5e12ca9fe3c77ce1d2a3ea1c22c4bcb6d9b2bb0c Mon Sep 17 00:00:00 2001 From: mlugg Date: Sun, 28 Apr 2024 21:44:57 +0100 Subject: [PATCH 011/202] compiler: implement labeled switch/continue --- lib/std/zig/Ast.zig | 54 ++- lib/std/zig/AstGen.zig | 113 ++++++- lib/std/zig/Parse.zig | 26 +- lib/std/zig/Zir.zig | 14 +- src/Air.zig | 17 +- src/Air/types_resolved.zig | 4 +- src/Liveness.zig | 267 ++++++++++----- src/Liveness/Verify.zig | 31 +- src/Sema.zig | 620 ++++++++++++++++++++++++++-------- src/Value.zig | 1 + src/arch/aarch64/CodeGen.zig | 2 + src/arch/arm/CodeGen.zig | 2 + src/arch/riscv64/CodeGen.zig | 2 + src/arch/sparc64/CodeGen.zig | 2 + src/arch/wasm/CodeGen.zig | 2 + src/arch/x86_64/CodeGen.zig | 2 + src/codegen/c.zig | 127 +++++-- src/codegen/llvm.zig | 488 +++++++++++++++++++++----- src/print_air.zig | 3 +- src/print_zir.zig | 1 + test/behavior.zig | 1 + test/behavior/switch_loop.zig | 205 +++++++++++ 22 files changed, 1602 insertions(+), 382 deletions(-) create mode 100644 test/behavior/switch_loop.zig diff --git a/lib/std/zig/Ast.zig b/lib/std/zig/Ast.zig index 1f734cef63f0..b6f4ad68ee01 100644 --- a/lib/std/zig/Ast.zig +++ b/lib/std/zig/Ast.zig @@ -1184,14 +1184,7 @@ pub fn lastToken(tree: Ast, node: Node.Index) TokenIndex { n = extra.sentinel; }, - .@"continue" => { - if (datas[n].lhs != 0) { - return datas[n].lhs + end_offset; - } else { - return main_tokens[n] + end_offset; - } - }, - .@"break" => { + .@"continue", .@"break" => { if (datas[n].rhs != 0) { n = datas[n].rhs; } else if (datas[n].lhs != 0) { @@ -1895,6 +1888,15 @@ pub fn taggedUnionEnumTag(tree: Ast, node: Node.Index) full.ContainerDecl { }); } +pub fn switchFull(tree: Ast, node: Node.Index) full.Switch { + const data = &tree.nodes.items(.data)[node]; + return tree.fullSwitchComponents(.{ + .switch_token = tree.nodes.items(.main_token)[node], + .condition = data.lhs, + .sub_range = data.rhs, + }); +} + pub fn switchCaseOne(tree: Ast, node: Node.Index) full.SwitchCase { const data = &tree.nodes.items(.data)[node]; const values: *[1]Node.Index = &data.lhs; @@ -2206,6 +2208,21 @@ fn fullContainerDeclComponents(tree: Ast, info: full.ContainerDecl.Components) f return result; } +fn fullSwitchComponents(tree: Ast, info: full.Switch.Components) full.Switch { + const token_tags = tree.tokens.items(.tag); + const tok_i = info.switch_token -| 1; + var result: full.Switch = .{ + .ast = info, + .label_token = null, + }; + if (token_tags[tok_i] == .colon and + token_tags[tok_i -| 1] == .identifier) + { + result.label_token = tok_i - 1; + } + return result; +} + fn fullSwitchCaseComponents(tree: Ast, info: full.SwitchCase.Components, node: Node.Index) full.SwitchCase { const token_tags = tree.tokens.items(.tag); const node_tags = tree.nodes.items(.tag); @@ -2477,6 +2494,13 @@ pub fn fullContainerDecl(tree: Ast, buffer: *[2]Ast.Node.Index, node: Node.Index }; } +pub fn fullSwitch(tree: Ast, node: Node.Index) ?full.Switch { + return switch (tree.nodes.items(.tag)[node]) { + .@"switch", .switch_comma => tree.switchFull(node), + else => null, + }; +} + pub fn fullSwitchCase(tree: Ast, node: Node.Index) ?full.SwitchCase { return switch (tree.nodes.items(.tag)[node]) { .switch_case_one, .switch_case_inline_one => tree.switchCaseOne(node), @@ -2829,6 +2853,17 @@ pub const full = struct { }; }; + pub const Switch = struct { + ast: Components, + label_token: ?TokenIndex, + + pub const Components = struct { + switch_token: TokenIndex, + condition: Node.Index, + sub_range: Node.Index, + }; + }; + pub const SwitchCase = struct { inline_token: ?TokenIndex, /// Points to the first token after the `|`. Will either be an identifier or @@ -3287,7 +3322,8 @@ pub const Node = struct { @"suspend", /// `resume lhs`. rhs is unused. @"resume", - /// `continue`. lhs is token index of label if any. rhs is unused. + /// `continue :lhs rhs` + /// both lhs and rhs may be omitted. @"continue", /// `break :lhs rhs` /// both lhs and rhs may be omitted. diff --git a/lib/std/zig/AstGen.zig b/lib/std/zig/AstGen.zig index 9c27d1e036bb..92da810b308e 100644 --- a/lib/std/zig/AstGen.zig +++ b/lib/std/zig/AstGen.zig @@ -1144,7 +1144,7 @@ fn expr(gz: *GenZir, scope: *Scope, ri: ResultInfo, node: Ast.Node.Index) InnerE .error_set_decl => return errorSetDecl(gz, ri, node), .array_access => return arrayAccess(gz, scope, ri, node), .@"comptime" => return comptimeExprAst(gz, scope, ri, node), - .@"switch", .switch_comma => return switchExpr(gz, scope, ri.br(), node), + .@"switch", .switch_comma => return switchExpr(gz, scope, ri.br(), node, tree.fullSwitch(node).?), .@"nosuspend" => return nosuspendExpr(gz, scope, ri, node), .@"suspend" => return suspendExpr(gz, scope, node), @@ -2160,6 +2160,11 @@ fn breakExpr(parent_gz: *GenZir, parent_scope: *Scope, node: Ast.Node.Index) Inn if (break_label != 0) { if (block_gz.label) |*label| { if (try astgen.tokenIdentEql(label.token, break_label)) { + const maybe_switch_tag = astgen.instructions.items(.tag)[@intFromEnum(label.block_inst)]; + switch (maybe_switch_tag) { + .switch_block, .switch_block_ref => return astgen.failNode(node, "cannot break from switch", .{}), + else => {}, + } label.used = true; break :blk label.block_inst; } @@ -2234,6 +2239,11 @@ fn continueExpr(parent_gz: *GenZir, parent_scope: *Scope, node: Ast.Node.Index) const tree = astgen.tree; const node_datas = tree.nodes.items(.data); const break_label = node_datas[node].lhs; + const rhs = node_datas[node].rhs; + + if (break_label == 0 and rhs != 0) { + return astgen.failNode(node, "cannot continue with operand without label", .{}); + } // Look for the label in the scope. var scope = parent_scope; @@ -2258,6 +2268,15 @@ fn continueExpr(parent_gz: *GenZir, parent_scope: *Scope, node: Ast.Node.Index) if (break_label != 0) blk: { if (gen_zir.label) |*label| { if (try astgen.tokenIdentEql(label.token, break_label)) { + const maybe_switch_tag = astgen.instructions.items(.tag)[@intFromEnum(label.block_inst)]; + if (rhs != 0) switch (maybe_switch_tag) { + .switch_block, .switch_block_ref => {}, + else => return astgen.failNode(node, "cannot continue loop with operand", .{}), + } else switch (maybe_switch_tag) { + .switch_block, .switch_block_ref => return astgen.failNode(node, "cannot continue switch without operand", .{}), + else => {}, + } + label.used = true; break :blk; } @@ -2265,8 +2284,35 @@ fn continueExpr(parent_gz: *GenZir, parent_scope: *Scope, node: Ast.Node.Index) // found continue but either it has a different label, or no label scope = gen_zir.parent; continue; + } else if (gen_zir.label) |label| { + // This `continue` is unlabeled. If the gz we've found corresponds to a labeled + // `switch`, ignore it and continue to parent scopes. + switch (astgen.instructions.items(.tag)[@intFromEnum(label.block_inst)]) { + .switch_block, .switch_block_ref => { + scope = gen_zir.parent; + continue; + }, + else => {}, + } + } + + if (rhs != 0) { + // We need to figure out the result info to use. + // The type should match + const operand = try reachableExpr(parent_gz, parent_scope, gen_zir.continue_result_info, rhs, node); + + try genDefers(parent_gz, scope, parent_scope, .normal_only); + + // As our last action before the continue, "pop" the error trace if needed + if (!gen_zir.is_comptime) + _ = try parent_gz.addRestoreErrRetIndex(.{ .block = continue_block }, .always, node); + + _ = try parent_gz.addBreakWithSrcNode(.switch_continue, continue_block, operand, rhs); + return Zir.Inst.Ref.unreachable_value; } + try genDefers(parent_gz, scope, parent_scope, .normal_only); + const break_tag: Zir.Inst.Tag = if (gen_zir.is_inline) .break_inline else @@ -2284,12 +2330,7 @@ fn continueExpr(parent_gz: *GenZir, parent_scope: *Scope, node: Ast.Node.Index) }, .local_val => scope = scope.cast(Scope.LocalVal).?.parent, .local_ptr => scope = scope.cast(Scope.LocalPtr).?.parent, - .defer_normal => { - const defer_scope = scope.cast(Scope.Defer).?; - scope = defer_scope.parent; - try parent_gz.addDefer(defer_scope.index, defer_scope.len); - }, - .defer_error => scope = scope.cast(Scope.Defer).?.parent, + .defer_normal, .defer_error => scope = scope.cast(Scope.Defer).?.parent, .namespace => break, .top => unreachable, } @@ -2881,6 +2922,7 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As .panic, .trap, .check_comptime_control_flow, + .switch_continue, => { noreturn_src_node = statement; break :b true; @@ -7546,7 +7588,8 @@ fn switchExpr( parent_gz: *GenZir, scope: *Scope, ri: ResultInfo, - switch_node: Ast.Node.Index, + node: Ast.Node.Index, + switch_full: Ast.full.Switch, ) InnerError!Zir.Inst.Ref { const astgen = parent_gz.astgen; const gpa = astgen.gpa; @@ -7555,14 +7598,14 @@ fn switchExpr( const node_tags = tree.nodes.items(.tag); const main_tokens = tree.nodes.items(.main_token); const token_tags = tree.tokens.items(.tag); - const operand_node = node_datas[switch_node].lhs; - const extra = tree.extraData(node_datas[switch_node].rhs, Ast.Node.SubRange); + const operand_node = node_datas[node].lhs; + const extra = tree.extraData(node_datas[node].rhs, Ast.Node.SubRange); const case_nodes = tree.extra_data[extra.start..extra.end]; - const need_rl = astgen.nodes_need_rl.contains(switch_node); + const need_rl = astgen.nodes_need_rl.contains(node); const block_ri: ResultInfo = if (need_rl) ri else .{ .rl = switch (ri.rl) { - .ptr => .{ .ty = (try ri.rl.resultType(parent_gz, switch_node)).? }, + .ptr => .{ .ty = (try ri.rl.resultType(parent_gz, node)).? }, .inferred_ptr => .none, else => ri.rl, }, @@ -7573,11 +7616,16 @@ fn switchExpr( const LocTag = @typeInfo(ResultInfo.Loc).@"union".tag_type.?; const need_result_rvalue = @as(LocTag, block_ri.rl) != @as(LocTag, ri.rl); + if (switch_full.label_token) |label_token| { + try astgen.checkLabelRedefinition(scope, label_token); + } + // We perform two passes over the AST. This first pass is to collect information // for the following variables, make note of the special prong AST node index, // and bail out with a compile error if there are multiple special prongs present. var any_payload_is_ref = false; var any_has_tag_capture = false; + var any_non_inline_capture = false; var scalar_cases_len: u32 = 0; var multi_cases_len: u32 = 0; var inline_cases_len: u32 = 0; @@ -7595,6 +7643,15 @@ fn switchExpr( if (token_tags[ident + 1] == .comma) { any_has_tag_capture = true; } + + // If the first capture is ignored, then there is no runtime-known + // capture, as the tag capture must be for an inline prong. + // This check isn't perfect, because for things like enums, the + // first prong *is* comptime-known for inline prongs! But such + // knowledge requires semantic analysis. + if (!mem.eql(u8, tree.tokenSlice(ident), "_")) { + any_non_inline_capture = true; + } } // Check for else/`_` prong. if (case.ast.values.len == 0) { @@ -7614,7 +7671,7 @@ fn switchExpr( ); } else if (underscore_src) |some_underscore| { return astgen.failNodeNotes( - switch_node, + node, "else and '_' prong in switch expression", .{}, &[_]u32{ @@ -7655,7 +7712,7 @@ fn switchExpr( ); } else if (else_src) |some_else| { return astgen.failNodeNotes( - switch_node, + node, "else and '_' prong in switch expression", .{}, &[_]u32{ @@ -7704,6 +7761,12 @@ fn switchExpr( const raw_operand = try expr(parent_gz, scope, operand_ri, operand_node); const item_ri: ResultInfo = .{ .rl = .none }; + // If this switch is labeled, it will have `continue`s targeting it, and thus we need the operand type + // to provide a result type. + const raw_operand_ty_ref = if (switch_full.label_token != null) t: { + break :t try parent_gz.addUnNode(.typeof, raw_operand, operand_node); + } else undefined; + // This contains the data that goes into the `extra` array for the SwitchBlock/SwitchBlockMulti, // except the first cases_nodes.len slots are a table that indexes payloads later in the array, with // the special case index coming first, then scalar_case_len indexes, then multi_cases_len indexes @@ -7725,7 +7788,22 @@ fn switchExpr( try emitDbgStmtForceCurrentIndex(parent_gz, operand_lc); // This gets added to the parent block later, after the item expressions. const switch_tag: Zir.Inst.Tag = if (any_payload_is_ref) .switch_block_ref else .switch_block; - const switch_block = try parent_gz.makeBlockInst(switch_tag, switch_node); + const switch_block = try parent_gz.makeBlockInst(switch_tag, node); + + if (switch_full.label_token) |label_token| { + block_scope.continue_block = switch_block.toOptional(); + block_scope.continue_result_info = .{ + .rl = if (any_payload_is_ref) + .{ .ref_coerced_ty = raw_operand_ty_ref } + else + .{ .coerced_ty = raw_operand_ty_ref }, + }; + + block_scope.label = .{ + .token = label_token, + .block_inst = switch_block, + }; + } // We re-use this same scope for all cases, including the special prong, if any. var case_scope = parent_gz.makeSubBlock(&block_scope.base); @@ -7946,6 +8024,8 @@ fn switchExpr( .has_else = special_prong == .@"else", .has_under = special_prong == .under, .any_has_tag_capture = any_has_tag_capture, + .any_non_inline_capture = any_non_inline_capture, + .has_continue = switch_full.label_token != null, .scalar_cases_len = @intCast(scalar_cases_len), }, }); @@ -7982,7 +8062,7 @@ fn switchExpr( } if (need_result_rvalue) { - return rvalue(parent_gz, ri, switch_block.toRef(), switch_node); + return rvalue(parent_gz, ri, switch_block.toRef(), node); } else { return switch_block.toRef(); } @@ -11824,6 +11904,7 @@ const GenZir = struct { continue_block: Zir.Inst.OptionalIndex = .none, /// Only valid when setBreakResultInfo is called. break_result_info: AstGen.ResultInfo = undefined, + continue_result_info: AstGen.ResultInfo = undefined, suspend_node: Ast.Node.Index = 0, nosuspend_node: Ast.Node.Index = 0, diff --git a/lib/std/zig/Parse.zig b/lib/std/zig/Parse.zig index 6f557a0f5526..20e69845cb3c 100644 --- a/lib/std/zig/Parse.zig +++ b/lib/std/zig/Parse.zig @@ -924,7 +924,6 @@ fn expectContainerField(p: *Parse) !Node.Index { /// / KEYWORD_errdefer Payload? BlockExprStatement /// / IfStatement /// / LabeledStatement -/// / SwitchExpr /// / VarDeclExprStatement fn expectStatement(p: *Parse, allow_defer_var: bool) Error!Node.Index { if (p.eatToken(.keyword_comptime)) |comptime_token| { @@ -995,7 +994,6 @@ fn expectStatement(p: *Parse, allow_defer_var: bool) Error!Node.Index { .rhs = try p.expectBlockExprStatement(), }, }), - .keyword_switch => return p.expectSwitchExpr(), .keyword_if => return p.expectIfStatement(), .keyword_enum, .keyword_struct, .keyword_union => { const identifier = p.tok_i + 1; @@ -1238,7 +1236,7 @@ fn expectIfStatement(p: *Parse) !Node.Index { }); } -/// LabeledStatement <- BlockLabel? (Block / LoopStatement) +/// LabeledStatement <- BlockLabel? (Block / LoopStatement / SwitchExpr) fn parseLabeledStatement(p: *Parse) !Node.Index { const label_token = p.parseBlockLabel(); const block = try p.parseBlock(); @@ -1247,6 +1245,9 @@ fn parseLabeledStatement(p: *Parse) !Node.Index { const loop_stmt = try p.parseLoopStatement(); if (loop_stmt != 0) return loop_stmt; + const switch_expr = try p.parseSwitchExpr(); + if (switch_expr != 0) return switch_expr; + if (label_token != 0) { const after_colon = p.tok_i; const node = try p.parseTypeExpr(); @@ -2072,7 +2073,7 @@ fn expectTypeExpr(p: *Parse) Error!Node.Index { /// / KEYWORD_break BreakLabel? Expr? /// / KEYWORD_comptime Expr /// / KEYWORD_nosuspend Expr -/// / KEYWORD_continue BreakLabel? +/// / KEYWORD_continue BreakLabel? Expr? /// / KEYWORD_resume Expr /// / KEYWORD_return Expr? /// / BlockLabel? LoopExpr @@ -2098,7 +2099,7 @@ fn parsePrimaryExpr(p: *Parse) !Node.Index { .main_token = p.nextToken(), .data = .{ .lhs = try p.parseBreakLabel(), - .rhs = undefined, + .rhs = try p.parseExpr(), }, }); }, @@ -2627,7 +2628,6 @@ fn parseSuffixExpr(p: *Parse) !Node.Index { /// / KEYWORD_anyframe /// / KEYWORD_unreachable /// / STRINGLITERAL -/// / SwitchExpr /// /// ContainerDecl <- (KEYWORD_extern / KEYWORD_packed)? ContainerDeclAuto /// @@ -2647,6 +2647,7 @@ fn parseSuffixExpr(p: *Parse) !Node.Index { /// LabeledTypeExpr /// <- BlockLabel Block /// / BlockLabel? LoopTypeExpr +/// / BlockLabel? SwitchExpr /// /// LoopTypeExpr <- KEYWORD_inline? (ForTypeExpr / WhileTypeExpr) fn parsePrimaryTypeExpr(p: *Parse) !Node.Index { @@ -2753,6 +2754,10 @@ fn parsePrimaryTypeExpr(p: *Parse) !Node.Index { p.tok_i += 2; return p.parseWhileTypeExpr(); }, + .keyword_switch => { + p.tok_i += 2; + return p.expectSwitchExpr(); + }, .l_brace => { p.tok_i += 2; return p.parseBlock(); @@ -3029,8 +3034,17 @@ fn parseWhileTypeExpr(p: *Parse) !Node.Index { } /// SwitchExpr <- KEYWORD_switch LPAREN Expr RPAREN LBRACE SwitchProngList RBRACE +fn parseSwitchExpr(p: *Parse) !Node.Index { + const switch_token = p.eatToken(.keyword_switch) orelse return null_node; + return p.expectSwitchSuffix(switch_token); +} + fn expectSwitchExpr(p: *Parse) !Node.Index { const switch_token = p.assertToken(.keyword_switch); + return p.expectSwitchSuffix(switch_token); +} + +fn expectSwitchSuffix(p: *Parse, switch_token: TokenIndex) !Node.Index { _ = try p.expectToken(.l_paren); const expr_node = try p.expectExpr(); _ = try p.expectToken(.r_paren); diff --git a/lib/std/zig/Zir.zig b/lib/std/zig/Zir.zig index af4ddaad6afc..0186b45f74a7 100644 --- a/lib/std/zig/Zir.zig +++ b/lib/std/zig/Zir.zig @@ -314,6 +314,9 @@ pub const Inst = struct { /// break instruction in a block, and the target block is the parent. /// Uses the `break` union field. break_inline, + /// Branch from within a switch case to the case specified by the operand. + /// Uses the `break` union field. `block_inst` refers to a `switch_block` or `switch_block_ref`. + switch_continue, /// Checks that comptime control flow does not happen inside a runtime block. /// Uses the `un_node` union field. check_comptime_control_flow, @@ -1273,6 +1276,7 @@ pub const Inst = struct { .panic, .trap, .check_comptime_control_flow, + .switch_continue, => true, }; } @@ -1512,6 +1516,7 @@ pub const Inst = struct { .break_inline, .condbr, .condbr_inline, + .switch_continue, .compile_error, .ret_node, .ret_load, @@ -1597,6 +1602,7 @@ pub const Inst = struct { .bool_br_or = .pl_node, .@"break" = .@"break", .break_inline = .@"break", + .switch_continue = .@"break", .check_comptime_control_flow = .un_node, .for_len = .pl_node, .call = .pl_node, @@ -2288,6 +2294,7 @@ pub const Inst = struct { }, @"break": struct { operand: Ref, + /// Index of a `Break` payload. payload_index: u32, }, dbg_stmt: LineColumn, @@ -2945,9 +2952,13 @@ pub const Inst = struct { has_under: bool, /// If true, at least one prong has an inline tag capture. any_has_tag_capture: bool, + /// If true, at least one prong has a capture which may not + /// be comptime-known via `inline`. + any_non_inline_capture: bool, + has_continue: bool, scalar_cases_len: ScalarCasesLen, - pub const ScalarCasesLen = u28; + pub const ScalarCasesLen = u26; pub fn specialProng(bits: Bits) SpecialProng { const has_else: u2 = @intFromBool(bits.has_else); @@ -3750,6 +3761,7 @@ fn findDeclsInner( .bool_br_or, .@"break", .break_inline, + .switch_continue, .check_comptime_control_flow, .builtin_call, .cmp_lt, diff --git a/src/Air.zig b/src/Air.zig index 539624bd4f81..343694a906d6 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -429,6 +429,14 @@ pub const Inst = struct { /// Result type is always noreturn; no instructions in a block follow this one. /// Uses the `pl_op` field. Operand is the condition. Payload is `SwitchBr`. switch_br, + /// Switch branch which can dispatch back to itself with a different operand. + /// Result type is always noreturn; no instructions in a block follow this one. + /// Uses the `pl_op` field. Operand is the condition. Payload is `SwitchBr`. + loop_switch_br, + /// Dispatches back to a branch of a parent `loop_switch_br`. + /// Result type is always noreturn; no instructions in a block follow this one. + /// Uses the `br` field. `block_inst` is a `loop_switch_br` instruction. + switch_dispatch, /// Given an operand which is an error union, splits control flow. In /// case of error, control flow goes into the block that is part of this /// instruction, which is guaranteed to end with a return instruction @@ -1454,6 +1462,8 @@ pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool) .br, .cond_br, .switch_br, + .loop_switch_br, + .switch_dispatch, .ret, .ret_safe, .ret_load, @@ -1618,6 +1628,8 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: *const InternPool) bool { .call_never_inline, .cond_br, .switch_br, + .loop_switch_br, + .switch_dispatch, .@"try", .try_cold, .try_ptr, @@ -1903,7 +1915,10 @@ pub const UnwrappedSwitch = struct { pub fn unwrapSwitch(air: *const Air, switch_inst: Inst.Index) UnwrappedSwitch { const inst = air.instructions.get(@intFromEnum(switch_inst)); - assert(inst.tag == .switch_br); + switch (inst.tag) { + .switch_br, .loop_switch_br => {}, + else => unreachable, // assertion failure + } const pl_op = inst.data.pl_op; const extra = air.extraData(SwitchBr, pl_op.payload); const hint_bag_count = std.math.divCeil(usize, extra.data.cases_len + 1, 10) catch unreachable; diff --git a/src/Air/types_resolved.zig b/src/Air/types_resolved.zig index c68971a28a9d..717729ff1dbb 100644 --- a/src/Air/types_resolved.zig +++ b/src/Air/types_resolved.zig @@ -222,7 +222,7 @@ fn checkBody(air: Air, body: []const Air.Inst.Index, zcu: *Zcu) bool { if (!checkRef(data.un_op, zcu)) return false; }, - .br => { + .br, .switch_dispatch => { if (!checkRef(data.br.operand, zcu)) return false; }, @@ -380,7 +380,7 @@ fn checkBody(air: Air, body: []const Air.Inst.Index, zcu: *Zcu) bool { )) return false; }, - .switch_br => { + .switch_br, .loop_switch_br => { const switch_br = air.unwrapSwitch(inst); if (!checkRef(switch_br.operand, zcu)) return false; var it = switch_br.iterateCases(); diff --git a/src/Liveness.zig b/src/Liveness.zig index 48ddca747aa5..d90af6462aa3 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -31,6 +31,7 @@ tomb_bits: []usize, /// * `try`, `try_ptr` - points to a `CondBr` in `extra` at this index. The error path (the block /// in the instruction) is considered the "else" path, and the rest of the block the "then". /// * `switch_br` - points to a `SwitchBr` in `extra` at this index. +/// * `loop_switch_br` - points to a `SwitchBr` in `extra` at this index. /// * `block` - points to a `Block` in `extra` at this index. /// * `asm`, `call`, `aggregate_init` - the value is a set of bits which are the extra tomb /// bits of operands. @@ -68,8 +69,8 @@ pub const Block = struct { /// Liveness analysis runs in several passes. Each pass iterates backwards over instructions in /// bodies, and recurses into bodies. const LivenessPass = enum { - /// In this pass, we perform some basic analysis of loops to gain information the main pass - /// needs. In particular, for every `loop`, we track the following information: + /// In this pass, we perform some basic analysis of loops to gain information the main pass needs. + /// In particular, for every `loop` and `loop_switch_br`, we track the following information: /// * Every outer block which the loop body contains a `br` to. /// * Every outer loop which the loop body contains a `repeat` to. /// * Every operand referenced within the loop body but created outside the loop. @@ -91,7 +92,8 @@ fn LivenessPassData(comptime pass: LivenessPass) type { .loop_analysis => struct { /// The set of blocks which are exited with a `br` instruction at some point within this /// body and which we are currently within. Also includes `loop`s which are the target - /// of a `repeat` instruction. + /// of a `repeat` instruction, and `loop_switch_br`s which are the target of a + /// `switch_dispatch` instruction. breaks: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{}, /// The set of operands for which we have seen at least one usage but not their birth. @@ -330,6 +332,7 @@ pub fn categorizeOperand( .trap, .breakpoint, .repeat, + .switch_dispatch, .dbg_stmt, .unreach, .ret_addr, @@ -662,21 +665,17 @@ pub fn categorizeOperand( return .complex; }, - .@"try", .try_cold => { - return .complex; - }, - .try_ptr, .try_ptr_cold => { - return .complex; - }, - .loop => { - return .complex; - }, - .cond_br => { - return .complex; - }, - .switch_br => { - return .complex; - }, + + .@"try", + .try_cold, + .try_ptr, + .try_ptr_cold, + .loop, + .cond_br, + .switch_br, + .loop_switch_br, + => return .complex, + .wasm_memory_grow => { const pl_op = air_datas[@intFromEnum(inst)].pl_op; if (pl_op.operand == operand_ref) return matchOperandSmallIndex(l, inst, 0, .none); @@ -1206,6 +1205,7 @@ fn analyzeInst( .br => return analyzeInstBr(a, pass, data, inst), .repeat => return analyzeInstRepeat(a, pass, data, inst), + .switch_dispatch => return analyzeInstSwitchDispatch(a, pass, data, inst), .assembly => { const extra = a.air.extraData(Air.Asm, inst_datas[@intFromEnum(inst)].ty_pl.payload); @@ -1262,7 +1262,8 @@ fn analyzeInst( .@"try", .try_cold => return analyzeInstCondBr(a, pass, data, inst, .@"try"), .try_ptr, .try_ptr_cold => return analyzeInstCondBr(a, pass, data, inst, .try_ptr), .cond_br => return analyzeInstCondBr(a, pass, data, inst, .cond_br), - .switch_br => return analyzeInstSwitchBr(a, pass, data, inst), + .switch_br => return analyzeInstSwitchBr(a, pass, data, inst, false), + .loop_switch_br => return analyzeInstSwitchBr(a, pass, data, inst, true), .wasm_memory_grow => { const pl_op = inst_datas[@intFromEnum(inst)].pl_op; @@ -1412,6 +1413,35 @@ fn analyzeInstRepeat( return analyzeOperands(a, pass, data, inst, .{ .none, .none, .none }); } +fn analyzeInstSwitchDispatch( + a: *Analysis, + comptime pass: LivenessPass, + data: *LivenessPassData(pass), + inst: Air.Inst.Index, +) !void { + // This happens to be identical to `analyzeInstBr`, but is separated anyway for clarity. + + const inst_datas = a.air.instructions.items(.data); + const br = inst_datas[@intFromEnum(inst)].br; + const gpa = a.gpa; + + switch (pass) { + .loop_analysis => { + try data.breaks.put(gpa, br.block_inst, {}); + }, + + .main_analysis => { + const block_scope = data.block_scopes.get(br.block_inst).?; // we should always be repeating an enclosing loop + + const new_live_set = try block_scope.live_set.clone(gpa); + data.live_set.deinit(gpa); + data.live_set = new_live_set; + }, + } + + return analyzeOperands(a, pass, data, inst, .{ br.operand, .none, .none }); +} + fn analyzeInstBlock( a: *Analysis, comptime pass: LivenessPass, @@ -1482,109 +1512,133 @@ fn analyzeInstBlock( } } -fn analyzeInstLoop( +fn writeLoopInfo( a: *Analysis, - comptime pass: LivenessPass, - data: *LivenessPassData(pass), + data: *LivenessPassData(.loop_analysis), inst: Air.Inst.Index, + old_breaks: std.AutoHashMapUnmanaged(Air.Inst.Index, void), + old_live: std.AutoHashMapUnmanaged(Air.Inst.Index, void), ) !void { - const inst_datas = a.air.instructions.items(.data); - const extra = a.air.extraData(Air.Block, inst_datas[@intFromEnum(inst)].ty_pl.payload); - const body: []const Air.Inst.Index = @ptrCast(a.air.extra[extra.end..][0..extra.data.body_len]); const gpa = a.gpa; - try analyzeOperands(a, pass, data, inst, .{ .none, .none, .none }); + // `loop`s are guaranteed to have at least one matching `repeat`. + // Similarly, `loop_switch_br`s have a matching `switch_dispatch`. + // However, we no longer care about repeats of this loop for resolving + // which operands must live within it. + assert(data.breaks.remove(inst)); - switch (pass) { - .loop_analysis => { - var old_breaks = data.breaks.move(); - defer old_breaks.deinit(gpa); + const extra_index: u32 = @intCast(a.extra.items.len); - var old_live = data.live_set.move(); - defer old_live.deinit(gpa); + const num_breaks = data.breaks.count(); + try a.extra.ensureUnusedCapacity(gpa, 1 + num_breaks); - try analyzeBody(a, pass, data, body); + a.extra.appendAssumeCapacity(num_breaks); - // `loop`s are guaranteed to have at least one matching `repeat`. - // However, we no longer care about repeats of this loop itself. - assert(data.breaks.remove(inst)); + var it = data.breaks.keyIterator(); + while (it.next()) |key| { + const block_inst = key.*; + a.extra.appendAssumeCapacity(@intFromEnum(block_inst)); + } + log.debug("[{}] %{}: includes breaks to {}", .{ LivenessPass.loop_analysis, inst, fmtInstSet(&data.breaks) }); - const extra_index: u32 = @intCast(a.extra.items.len); + // Now we put the live operands from the loop body in too + const num_live = data.live_set.count(); + try a.extra.ensureUnusedCapacity(gpa, 1 + num_live); - const num_breaks = data.breaks.count(); - try a.extra.ensureUnusedCapacity(gpa, 1 + num_breaks); + a.extra.appendAssumeCapacity(num_live); + it = data.live_set.keyIterator(); + while (it.next()) |key| { + const alive = key.*; + a.extra.appendAssumeCapacity(@intFromEnum(alive)); + } + log.debug("[{}] %{}: maintain liveness of {}", .{ LivenessPass.loop_analysis, inst, fmtInstSet(&data.live_set) }); - a.extra.appendAssumeCapacity(num_breaks); + try a.special.put(gpa, inst, extra_index); - var it = data.breaks.keyIterator(); - while (it.next()) |key| { - const block_inst = key.*; - a.extra.appendAssumeCapacity(@intFromEnum(block_inst)); - } - log.debug("[{}] %{}: includes breaks to {}", .{ pass, inst, fmtInstSet(&data.breaks) }); + // Add back operands which were previously alive + it = old_live.keyIterator(); + while (it.next()) |key| { + const alive = key.*; + try data.live_set.put(gpa, alive, {}); + } - // Now we put the live operands from the loop body in too - const num_live = data.live_set.count(); - try a.extra.ensureUnusedCapacity(gpa, 1 + num_live); + // And the same for breaks + it = old_breaks.keyIterator(); + while (it.next()) |key| { + const block_inst = key.*; + try data.breaks.put(gpa, block_inst, {}); + } +} - a.extra.appendAssumeCapacity(num_live); - it = data.live_set.keyIterator(); - while (it.next()) |key| { - const alive = key.*; - a.extra.appendAssumeCapacity(@intFromEnum(alive)); - } - log.debug("[{}] %{}: maintain liveness of {}", .{ pass, inst, fmtInstSet(&data.live_set) }); +/// When analyzing a loop in the main pass, sets up `data.live_set` to be the set +/// of operands known to be alive when the loop repeats. +fn resolveLoopLiveSet( + a: *Analysis, + data: *LivenessPassData(.main_analysis), + inst: Air.Inst.Index, +) !void { + const gpa = a.gpa; - try a.special.put(gpa, inst, extra_index); + const extra_idx = a.special.fetchRemove(inst).?.value; + const num_breaks = data.old_extra.items[extra_idx]; + const breaks: []const Air.Inst.Index = @ptrCast(data.old_extra.items[extra_idx + 1 ..][0..num_breaks]); - // Add back operands which were previously alive - it = old_live.keyIterator(); - while (it.next()) |key| { - const alive = key.*; - try data.live_set.put(gpa, alive, {}); - } + const num_loop_live = data.old_extra.items[extra_idx + num_breaks + 1]; + const loop_live: []const Air.Inst.Index = @ptrCast(data.old_extra.items[extra_idx + num_breaks + 2 ..][0..num_loop_live]); - // And the same for breaks - it = old_breaks.keyIterator(); - while (it.next()) |key| { - const block_inst = key.*; - try data.breaks.put(gpa, block_inst, {}); - } - }, + // This is necessarily not in the same control flow branch, because loops are noreturn + data.live_set.clearRetainingCapacity(); - .main_analysis => { - const extra_idx = a.special.fetchRemove(inst).?.value; // remove because this data does not exist after analysis + try data.live_set.ensureUnusedCapacity(gpa, @intCast(loop_live.len)); + for (loop_live) |alive| data.live_set.putAssumeCapacity(alive, {}); - const num_breaks = data.old_extra.items[extra_idx]; - const breaks: []const Air.Inst.Index = @ptrCast(data.old_extra.items[extra_idx + 1 ..][0..num_breaks]); + log.debug("[{}] %{}: block live set is {}", .{ LivenessPass.main_analysis, inst, fmtInstSet(&data.live_set) }); - const num_loop_live = data.old_extra.items[extra_idx + num_breaks + 1]; - const loop_live: []const Air.Inst.Index = @ptrCast(data.old_extra.items[extra_idx + num_breaks + 2 ..][0..num_loop_live]); + for (breaks) |block_inst| { + // We might break to this block, so include every operand that the block needs alive + const block_scope = data.block_scopes.get(block_inst).?; - // This is necessarily not in the same control flow branch, because loops are noreturn - data.live_set.clearRetainingCapacity(); + var it = block_scope.live_set.keyIterator(); + while (it.next()) |key| { + const alive = key.*; + try data.live_set.put(gpa, alive, {}); + } + } - try data.live_set.ensureUnusedCapacity(gpa, @intCast(loop_live.len)); - for (loop_live) |alive| { - data.live_set.putAssumeCapacity(alive, {}); - } + log.debug("[{}] %{}: loop live set is {}", .{ LivenessPass.main_analysis, inst, fmtInstSet(&data.live_set) }); +} - log.debug("[{}] %{}: block live set is {}", .{ pass, inst, fmtInstSet(&data.live_set) }); +fn analyzeInstLoop( + a: *Analysis, + comptime pass: LivenessPass, + data: *LivenessPassData(pass), + inst: Air.Inst.Index, +) !void { + const inst_datas = a.air.instructions.items(.data); + const extra = a.air.extraData(Air.Block, inst_datas[@intFromEnum(inst)].ty_pl.payload); + const body: []const Air.Inst.Index = @ptrCast(a.air.extra[extra.end..][0..extra.data.body_len]); + const gpa = a.gpa; - for (breaks) |block_inst| { - // We might break to this block, so include every operand that the block needs alive - const block_scope = data.block_scopes.get(block_inst).?; + try analyzeOperands(a, pass, data, inst, .{ .none, .none, .none }); - var it = block_scope.live_set.keyIterator(); - while (it.next()) |key| { - const alive = key.*; - try data.live_set.put(gpa, alive, {}); - } - } + switch (pass) { + .loop_analysis => { + var old_breaks = data.breaks.move(); + defer old_breaks.deinit(gpa); + + var old_live = data.live_set.move(); + defer old_live.deinit(gpa); + + try analyzeBody(a, pass, data, body); + + try writeLoopInfo(a, data, inst, old_breaks, old_live); + }, + + .main_analysis => { + try resolveLoopLiveSet(a, data, inst); // Now, `data.live_set` is the operands which must be alive when the loop repeats. // Move them into a block scope for corresponding `repeat` instructions to notice. - log.debug("[{}] %{}: loop live set is {}", .{ pass, inst, fmtInstSet(&data.live_set) }); try data.block_scopes.putNoClobber(gpa, inst, .{ .live_set = data.live_set.move(), }); @@ -1720,6 +1774,7 @@ fn analyzeInstSwitchBr( comptime pass: LivenessPass, data: *LivenessPassData(pass), inst: Air.Inst.Index, + is_dispatch_loop: bool, ) !void { const inst_datas = a.air.instructions.items(.data); const pl_op = inst_datas[@intFromEnum(inst)].pl_op; @@ -1730,6 +1785,17 @@ fn analyzeInstSwitchBr( switch (pass) { .loop_analysis => { + var old_breaks: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{}; + defer old_breaks.deinit(gpa); + + var old_live: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{}; + defer old_live.deinit(gpa); + + if (is_dispatch_loop) { + old_breaks = data.breaks.move(); + old_live = data.live_set.move(); + } + var it = switch_br.iterateCases(); while (it.next()) |case| { try analyzeBody(a, pass, data, case.body); @@ -1738,9 +1804,24 @@ fn analyzeInstSwitchBr( const else_body = it.elseBody(); try analyzeBody(a, pass, data, else_body); } + + if (is_dispatch_loop) { + try writeLoopInfo(a, data, inst, old_breaks, old_live); + } }, .main_analysis => { + if (is_dispatch_loop) { + try resolveLoopLiveSet(a, data, inst); + try data.block_scopes.putNoClobber(gpa, inst, .{ + .live_set = data.live_set.move(), + }); + } + defer if (is_dispatch_loop) { + log.debug("[{}] %{}: popped loop block scop", .{ pass, inst }); + var scope = data.block_scopes.fetchRemove(inst).?.value; + scope.live_set.deinit(gpa); + }; // This is, all in all, just a messier version of the `cond_br` logic. If you're trying // to understand it, I encourage looking at `analyzeInstCondBr` first. diff --git a/src/Liveness/Verify.zig b/src/Liveness/Verify.zig index 1ff3059417ca..87ba6a3fc893 100644 --- a/src/Liveness/Verify.zig +++ b/src/Liveness/Verify.zig @@ -447,6 +447,16 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { try self.verifyMatchingLiveness(repeat.loop_inst, expected_live); }, + .switch_dispatch => { + const br = data[@intFromEnum(inst)].br; + + try self.verifyOperand(inst, br.operand, self.liveness.operandDies(inst, 0)); + + const expected_live = self.loops.get(br.block_inst) orelse + return invalid("%{}: loop %{} not in scope", .{ @intFromEnum(inst), @intFromEnum(br.block_inst) }); + + try self.verifyMatchingLiveness(br.block_inst, expected_live); + }, .block, .dbg_inline_block => |tag| { const ty_pl = data[@intFromEnum(inst)].ty_pl; const block_ty = ty_pl.ty.toType(); @@ -494,11 +504,11 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { // The same stuff should be alive after the loop as before it. const gop = try self.loops.getOrPut(self.gpa, inst); + if (gop.found_existing) return invalid("%{}: loop already exists", .{@intFromEnum(inst)}); defer { var live = self.loops.fetchRemove(inst).?; live.value.deinit(self.gpa); } - if (gop.found_existing) return invalid("%{}: loop already exists", .{@intFromEnum(inst)}); gop.value_ptr.* = try self.live.clone(self.gpa); try self.verifyBody(loop_body); @@ -528,7 +538,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { try self.verifyInst(inst); }, - .switch_br => { + .switch_br, .loop_switch_br => { const switch_br = self.air.unwrapSwitch(inst); const switch_br_liveness = try self.liveness.getSwitchBr( self.gpa, @@ -539,13 +549,22 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { try self.verifyOperand(inst, switch_br.operand, self.liveness.operandDies(inst, 0)); - var live = self.live.move(); - defer live.deinit(self.gpa); + // Excluding the operand (which we just handled), the same stuff should be alive + // after the loop as before it. + { + const gop = try self.loops.getOrPut(self.gpa, inst); + if (gop.found_existing) return invalid("%{}: loop already exists", .{@intFromEnum(inst)}); + gop.value_ptr.* = self.live.move(); + } + defer { + var live = self.loops.fetchRemove(inst).?; + live.value.deinit(self.gpa); + } var it = switch_br.iterateCases(); while (it.next()) |case| { self.live.deinit(self.gpa); - self.live = try live.clone(self.gpa); + self.live = try self.loops.get(inst).?.clone(self.gpa); for (switch_br_liveness.deaths[case.idx]) |death| try self.verifyDeath(inst, death); try self.verifyBody(case.body); @@ -554,7 +573,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { const else_body = it.elseBody(); if (else_body.len > 0) { self.live.deinit(self.gpa); - self.live = try live.clone(self.gpa); + self.live = try self.loops.get(inst).?.clone(self.gpa); for (switch_br_liveness.deaths[switch_br.cases_len]) |death| try self.verifyDeath(inst, death); try self.verifyBody(else_body); } diff --git a/src/Sema.zig b/src/Sema.zig index a9287e21d1af..57c33324f59c 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -503,11 +503,21 @@ pub const Block = struct { /// to enable more precise compile errors. /// Same indexes, capacity, length as `results`. src_locs: std.ArrayListUnmanaged(?LazySrcLoc), - - pub fn deinit(merges: *@This(), allocator: mem.Allocator) void { + /// Most blocks do not utilize this field. When it is used, its use is + /// contextual. The possible uses are as follows: + /// * for a `switch_block[_ref]`, this refers to dummy `br` instructions + /// which correspond to `switch_continue` ZIR. The switch logic will + /// rewrite these to appropriate AIR switch dispatches. + extra_insts: std.ArrayListUnmanaged(Air.Inst.Index) = .{}, + /// Same indexes, capacity, length as `extra_insts`. + extra_src_locs: std.ArrayListUnmanaged(LazySrcLoc) = .{}, + + pub fn deinit(merges: *@This(), allocator: Allocator) void { merges.results.deinit(allocator); merges.br_list.deinit(allocator); merges.src_locs.deinit(allocator); + merges.extra_insts.deinit(allocator); + merges.extra_src_locs.deinit(allocator); } }; @@ -946,14 +956,21 @@ fn analyzeInlineBody( error.ComptimeBreak => {}, else => |e| return e, } - const break_inst = sema.comptime_break_inst; - const break_data = sema.code.instructions.items(.data)[@intFromEnum(break_inst)].@"break"; - const extra = sema.code.extraData(Zir.Inst.Break, break_data.payload_index).data; + const break_inst = sema.code.instructions.get(@intFromEnum(sema.comptime_break_inst)); + switch (break_inst.tag) { + .switch_continue => { + // This is handled by separate logic. + return error.ComptimeBreak; + }, + .break_inline, .@"break" => {}, + else => unreachable, + } + const extra = sema.code.extraData(Zir.Inst.Break, break_inst.data.@"break".payload_index).data; if (extra.block_inst != break_target) { // This control flow goes further up the stack. return error.ComptimeBreak; } - return try sema.resolveInst(break_data.operand); + return try sema.resolveInst(break_inst.data.@"break".operand); } /// Like `analyzeInlineBody`, but if the body does not break with a value, returns @@ -1571,6 +1588,13 @@ fn analyzeBodyInner( i = 0; continue; }, + .switch_continue => if (block.is_comptime) { + sema.comptime_break_inst = inst; + return error.ComptimeBreak; + } else { + try sema.zirSwitchContinue(block, inst); + break; + }, .loop => blk: { if (!block.is_comptime) break :blk try sema.zirLoop(block, inst); // Same as `block_inline`. TODO https://github.com/ziglang/zig/issues/8220 @@ -6531,6 +6555,56 @@ fn zirBreak(sema: *Sema, start_block: *Block, inst: Zir.Inst.Index) CompileError } } +fn zirSwitchContinue(sema: *Sema, start_block: *Block, inst: Zir.Inst.Index) CompileError!void { + const tracy = trace(@src()); + defer tracy.end(); + + const inst_data = sema.code.instructions.items(.data)[@intFromEnum(inst)].@"break"; + const extra = sema.code.extraData(Zir.Inst.Break, inst_data.payload_index).data; + assert(extra.operand_src_node != Zir.Inst.Break.no_src_node); + const operand_src = start_block.nodeOffset(extra.operand_src_node); + const uncoerced_operand = try sema.resolveInst(inst_data.operand); + const switch_inst = extra.block_inst; + + switch (sema.code.instructions.items(.tag)[@intFromEnum(switch_inst)]) { + .switch_block, .switch_block_ref => {}, + else => unreachable, // assertion failure + } + + const switch_payload_index = sema.code.instructions.items(.data)[@intFromEnum(switch_inst)].pl_node.payload_index; + const switch_operand_ref = sema.code.extraData(Zir.Inst.SwitchBlock, switch_payload_index).data.operand; + const switch_operand_ty = sema.typeOf(try sema.resolveInst(switch_operand_ref)); + + const operand = try sema.coerce(start_block, switch_operand_ty, uncoerced_operand, operand_src); + + try sema.validateRuntimeValue(start_block, operand_src, operand); + + // We want to generate a `switch_dispatch` instruction with the switch condition, + // possibly preceded by a store to the stack alloc containing the raw operand. + // However, to avoid too much special-case state in Sema, this is handled by the + // `switch` lowering logic. As such, we will find the `Block` corresponding to the + // parent `switch_block[_ref]` instruction, create a dummy `br`, and add a merge + // to signal to the switch logic to rewrite this into an appropriate dispatch. + + var block = start_block; + while (true) { + if (block.label) |label| { + if (label.zir_block == switch_inst) { + const br_ref = try start_block.addBr(label.merges.block_inst, operand); + try label.merges.extra_insts.append(sema.gpa, br_ref.toIndex().?); + try label.merges.extra_src_locs.append(sema.gpa, operand_src); + block.runtime_index.increment(); + if (block.runtime_cond == null and block.runtime_loop == null) { + block.runtime_cond = start_block.runtime_cond orelse start_block.runtime_loop; + block.runtime_loop = start_block.runtime_loop; + } + return; + } + } + block = block.parent.?; + } +} + fn zirDbgStmt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void { if (block.is_comptime or block.ownerModule().strip) return; @@ -10940,12 +11014,7 @@ const SwitchProngAnalysis = struct { sema: *Sema, /// The block containing the `switch_block` itself. parent_block: *Block, - /// The raw switch operand value (*not* the condition). Always defined. - operand: Air.Inst.Ref, - /// May be `undefined` if no prong has a by-ref capture. - operand_ptr: Air.Inst.Ref, - /// The switch condition value. For unions, `operand` is the union and `cond` is its tag. - cond: Air.Inst.Ref, + operand: Operand, /// If this switch is on an error set, this is the type to assign to the /// `else` prong. If `null`, the prong should be unreachable. else_error_ty: ?Type, @@ -10955,6 +11024,34 @@ const SwitchProngAnalysis = struct { /// undefined if no prong has a tag capture. tag_capture_inst: Zir.Inst.Index, + const Operand = union(enum) { + /// This switch will be dispatched only once, with the given operand. + simple: struct { + /// The raw switch operand value. Always defined. + by_val: Air.Inst.Ref, + /// The switch operand *pointer*. Defined only if there is a prong + /// with a by-ref capture. + by_ref: Air.Inst.Ref, + /// The switch condition value. For unions, `operand` is the union + /// and `cond` is its enum tag value. + cond: Air.Inst.Ref, + }, + /// This switch may be dispatched multiple times with `continue` syntax. + /// As such, the operand is stored in an alloc if needed. + loop: struct { + /// The `alloc` containing the `switch` operand for the active dispatch. + /// Each prong must load from this `alloc` to get captures. + /// If there are no captures, this may be undefined. + operand_alloc: Air.Inst.Ref, + /// Whether `operand_alloc` contains a by-val operand or a by-ref + /// operand. + operand_is_ref: bool, + /// The switch condition value for the *initial* dispatch. For + /// unions, this is the enum tag value. + init_cond: Air.Inst.Ref, + }, + }; + /// Resolve a switch prong which is determined at comptime to have no peers. /// Uses `resolveBlockBody`. Sets up captures as needed. fn resolveProngComptime( @@ -11086,7 +11183,15 @@ const SwitchProngAnalysis = struct { const sema = spa.sema; const pt = sema.pt; const zcu = pt.zcu; - const operand_ty = sema.typeOf(spa.operand); + const operand_ty = switch (spa.operand) { + .simple => |s| sema.typeOf(s.by_val), + .loop => |l| ty: { + const alloc_ty = sema.typeOf(l.operand_alloc); + const alloc_child = alloc_ty.childType(zcu); + if (l.operand_is_ref) break :ty alloc_child.childType(zcu); + break :ty alloc_child; + }, + }; if (operand_ty.zigTypeTag(zcu) != .@"union") { const tag_capture_src: LazySrcLoc = .{ .base_node_inst = capture_src.base_node_inst, @@ -11117,10 +11222,24 @@ const SwitchProngAnalysis = struct { const zir_datas = sema.code.instructions.items(.data); const switch_node_offset = zir_datas[@intFromEnum(spa.switch_block_inst)].pl_node.src_node; - const operand_ty = sema.typeOf(spa.operand); - const operand_ptr_ty = if (capture_byref) sema.typeOf(spa.operand_ptr) else undefined; const operand_src = block.src(.{ .node_offset_switch_operand = switch_node_offset }); + const operand_val, const operand_ptr = switch (spa.operand) { + .simple => |s| .{ s.by_val, s.by_ref }, + .loop => |l| op: { + const loaded = try sema.analyzeLoad(block, operand_src, l.operand_alloc, operand_src); + if (l.operand_is_ref) { + const by_val = try sema.analyzeLoad(block, operand_src, loaded, operand_src); + break :op .{ by_val, loaded }; + } else { + break :op .{ loaded, undefined }; + } + }, + }; + + const operand_ty = sema.typeOf(operand_val); + const operand_ptr_ty = if (capture_byref) sema.typeOf(operand_ptr) else undefined; + if (inline_case_capture != .none) { const item_val = sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, inline_case_capture, undefined) catch unreachable; if (operand_ty.zigTypeTag(zcu) == .@"union") { @@ -11136,16 +11255,16 @@ const SwitchProngAnalysis = struct { .address_space = operand_ptr_ty.ptrAddressSpace(zcu), }, }); - if (try sema.resolveDefinedValue(block, operand_src, spa.operand_ptr)) |union_ptr| { + if (try sema.resolveDefinedValue(block, operand_src, operand_ptr)) |union_ptr| { return Air.internedToRef((try union_ptr.ptrField(field_index, pt)).toIntern()); } - return block.addStructFieldPtr(spa.operand_ptr, field_index, ptr_field_ty); + return block.addStructFieldPtr(operand_ptr, field_index, ptr_field_ty); } else { - if (try sema.resolveDefinedValue(block, operand_src, spa.operand)) |union_val| { + if (try sema.resolveDefinedValue(block, operand_src, operand_val)) |union_val| { const tag_and_val = ip.indexToKey(union_val.toIntern()).un; return Air.internedToRef(tag_and_val.val); } - return block.addStructFieldVal(spa.operand, field_index, field_ty); + return block.addStructFieldVal(operand_val, field_index, field_ty); } } else if (capture_byref) { return sema.uavRef(item_val.toIntern()); @@ -11156,17 +11275,17 @@ const SwitchProngAnalysis = struct { if (is_special_prong) { if (capture_byref) { - return spa.operand_ptr; + return operand_ptr; } switch (operand_ty.zigTypeTag(zcu)) { .error_set => if (spa.else_error_ty) |ty| { - return sema.bitCast(block, ty, spa.operand, operand_src, null); + return sema.bitCast(block, ty, operand_val, operand_src, null); } else { try sema.analyzeUnreachable(block, operand_src, false); return .unreachable_value; }, - else => return spa.operand, + else => return operand_val, } } @@ -11265,19 +11384,19 @@ const SwitchProngAnalysis = struct { }; }; - if (try sema.resolveDefinedValue(block, operand_src, spa.operand_ptr)) |op_ptr_val| { + if (try sema.resolveDefinedValue(block, operand_src, operand_ptr)) |op_ptr_val| { if (op_ptr_val.isUndef(zcu)) return pt.undefRef(capture_ptr_ty); const field_ptr_val = try op_ptr_val.ptrField(first_field_index, pt); return Air.internedToRef((try pt.getCoerced(field_ptr_val, capture_ptr_ty)).toIntern()); } try sema.requireRuntimeBlock(block, operand_src, null); - return block.addStructFieldPtr(spa.operand_ptr, first_field_index, capture_ptr_ty); + return block.addStructFieldPtr(operand_ptr, first_field_index, capture_ptr_ty); } - if (try sema.resolveDefinedValue(block, operand_src, spa.operand)) |operand_val| { - if (operand_val.isUndef(zcu)) return pt.undefRef(capture_ty); - const union_val = ip.indexToKey(operand_val.toIntern()).un; + if (try sema.resolveDefinedValue(block, operand_src, operand_val)) |operand_val_val| { + if (operand_val_val.isUndef(zcu)) return pt.undefRef(capture_ty); + const union_val = ip.indexToKey(operand_val_val.toIntern()).un; if (Value.fromInterned(union_val.tag).isUndef(zcu)) return pt.undefRef(capture_ty); const uncoerced = Air.internedToRef(union_val.val); return sema.coerce(block, capture_ty, uncoerced, operand_src); @@ -11286,7 +11405,7 @@ const SwitchProngAnalysis = struct { try sema.requireRuntimeBlock(block, operand_src, null); if (same_types) { - return block.addStructFieldVal(spa.operand, first_field_index, capture_ty); + return block.addStructFieldVal(operand_val, first_field_index, capture_ty); } // We may have to emit a switch block which coerces the operand to the capture type. @@ -11300,7 +11419,7 @@ const SwitchProngAnalysis = struct { } // All fields are in-memory coercible to the resolved type! // Just take the first field and bitcast the result. - const uncoerced = try block.addStructFieldVal(spa.operand, first_field_index, first_field_ty); + const uncoerced = try block.addStructFieldVal(operand_val, first_field_index, first_field_ty); return block.addBitCast(capture_ty, uncoerced); }; @@ -11364,7 +11483,7 @@ const SwitchProngAnalysis = struct { const field_idx = field_indices[idx]; const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_idx]); - const uncoerced = try coerce_block.addStructFieldVal(spa.operand, field_idx, field_ty); + const uncoerced = try coerce_block.addStructFieldVal(operand_val, field_idx, field_ty); const coerced = try sema.coerce(&coerce_block, capture_ty, uncoerced, case_src); _ = try coerce_block.addBr(capture_block_inst, coerced); @@ -11388,7 +11507,7 @@ const SwitchProngAnalysis = struct { const first_imc_item_idx = in_mem_coercible.findFirstSet().?; const first_imc_field_idx = field_indices[first_imc_item_idx]; const first_imc_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[first_imc_field_idx]); - const uncoerced = try coerce_block.addStructFieldVal(spa.operand, first_imc_field_idx, first_imc_field_ty); + const uncoerced = try coerce_block.addStructFieldVal(operand_val, first_imc_field_idx, first_imc_field_ty); const coerced = try coerce_block.addBitCast(capture_ty, uncoerced); _ = try coerce_block.addBr(capture_block_inst, coerced); @@ -11404,21 +11523,47 @@ const SwitchProngAnalysis = struct { const switch_br_inst: u32 = @intCast(sema.air_instructions.len); try sema.air_instructions.append(sema.gpa, .{ .tag = .switch_br, - .data = .{ .pl_op = .{ - .operand = spa.cond, - .payload = sema.addExtraAssumeCapacity(Air.SwitchBr{ - .cases_len = @intCast(prong_count), - .else_body_len = @intCast(else_body_len), - }), - } }, + .data = .{ + .pl_op = .{ + .operand = undefined, // set by switch below + .payload = sema.addExtraAssumeCapacity(Air.SwitchBr{ + .cases_len = @intCast(prong_count), + .else_body_len = @intCast(else_body_len), + }), + }, + }, }); sema.air_extra.appendSliceAssumeCapacity(cases_extra.items); // Set up block body - sema.air_instructions.items(.data)[@intFromEnum(capture_block_inst)].ty_pl.payload = sema.addExtraAssumeCapacity(Air.Block{ - .body_len = 1, - }); - sema.air_extra.appendAssumeCapacity(switch_br_inst); + switch (spa.operand) { + .simple => |s| { + const air_datas = sema.air_instructions.items(.data); + air_datas[switch_br_inst].pl_op.operand = s.cond; + air_datas[@intFromEnum(capture_block_inst)].ty_pl.payload = sema.addExtraAssumeCapacity(Air.Block{ + .body_len = 1, + }); + sema.air_extra.appendAssumeCapacity(switch_br_inst); + }, + .loop => { + // The block must first extract the tag from the loaded union. + const tag_inst: Air.Inst.Index = @enumFromInt(sema.air_instructions.len); + try sema.air_instructions.append(sema.gpa, .{ + .tag = .get_union_tag, + .data = .{ .ty_op = .{ + .ty = Air.internedToRef(union_obj.enum_tag_ty), + .operand = operand_val, + } }, + }); + const air_datas = sema.air_instructions.items(.data); + air_datas[switch_br_inst].pl_op.operand = tag_inst.toRef(); + air_datas[@intFromEnum(capture_block_inst)].ty_pl.payload = sema.addExtraAssumeCapacity(Air.Block{ + .body_len = 2, + }); + sema.air_extra.appendAssumeCapacity(@intFromEnum(tag_inst)); + sema.air_extra.appendAssumeCapacity(switch_br_inst); + }, + } return capture_block_inst.toRef(); }, @@ -11435,7 +11580,7 @@ const SwitchProngAnalysis = struct { if (case_vals.len == 1) { const item_val = sema.resolveConstDefinedValue(block, LazySrcLoc.unneeded, case_vals[0], undefined) catch unreachable; const item_ty = try pt.singleErrorSetType(item_val.getErrorName(zcu).unwrap().?); - return sema.bitCast(block, item_ty, spa.operand, operand_src, null); + return sema.bitCast(block, item_ty, operand_val, operand_src, null); } var names: InferredErrorSet.NameMap = .{}; @@ -11445,15 +11590,15 @@ const SwitchProngAnalysis = struct { names.putAssumeCapacityNoClobber(err_val.getErrorName(zcu).unwrap().?, {}); } const error_ty = try pt.errorSetFromUnsortedNames(names.keys()); - return sema.bitCast(block, error_ty, spa.operand, operand_src, null); + return sema.bitCast(block, error_ty, operand_val, operand_src, null); }, else => { // In this case the capture value is just the passed-through value // of the switch condition. if (capture_byref) { - return spa.operand_ptr; + return operand_ptr; } else { - return spa.operand; + return operand_val; } }, } @@ -11686,9 +11831,13 @@ fn zirSwitchBlockErrUnion(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Comp var spa: SwitchProngAnalysis = .{ .sema = sema, .parent_block = block, - .operand = undefined, // must be set to the unwrapped error code before use - .operand_ptr = .none, - .cond = raw_operand_val, + .operand = .{ + .simple = .{ + .by_val = undefined, // must be set to the unwrapped error code before use + .by_ref = undefined, + .cond = raw_operand_val, + }, + }, .else_error_ty = else_error_ty, .switch_block_inst = inst, .tag_capture_inst = undefined, @@ -11709,13 +11858,13 @@ fn zirSwitchBlockErrUnion(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Comp .name = operand_val.getErrorName(zcu).unwrap().?, }, })); - spa.operand = if (extra.data.bits.payload_is_ref) + spa.operand.simple.by_val = if (extra.data.bits.payload_is_ref) try sema.analyzeErrUnionCodePtr(block, switch_operand_src, raw_operand_val) else try sema.analyzeErrUnionCode(block, switch_operand_src, raw_operand_val); if (extra.data.bits.any_uses_err_capture) { - sema.inst_map.putAssumeCapacity(err_capture_inst, spa.operand); + sema.inst_map.putAssumeCapacity(err_capture_inst, spa.operand.simple.by_val); } defer if (extra.data.bits.any_uses_err_capture) assert(sema.inst_map.remove(err_capture_inst)); @@ -11723,7 +11872,7 @@ fn zirSwitchBlockErrUnion(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Comp sema, spa, &child_block, - try sema.switchCond(block, switch_operand_src, spa.operand), + try sema.switchCond(block, switch_operand_src, spa.operand.simple.by_val), err_val, operand_err_set_ty, switch_src_node_offset, @@ -11777,20 +11926,20 @@ fn zirSwitchBlockErrUnion(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Comp const true_instructions = try sub_block.instructions.toOwnedSlice(gpa); defer gpa.free(true_instructions); - spa.operand = if (extra.data.bits.payload_is_ref) + spa.operand.simple.by_val = if (extra.data.bits.payload_is_ref) try sema.analyzeErrUnionCodePtr(&sub_block, switch_operand_src, raw_operand_val) else try sema.analyzeErrUnionCode(&sub_block, switch_operand_src, raw_operand_val); if (extra.data.bits.any_uses_err_capture) { - sema.inst_map.putAssumeCapacity(err_capture_inst, spa.operand); + sema.inst_map.putAssumeCapacity(err_capture_inst, spa.operand.simple.by_val); } defer if (extra.data.bits.any_uses_err_capture) assert(sema.inst_map.remove(err_capture_inst)); _ = try sema.analyzeSwitchRuntimeBlock( spa, &sub_block, switch_src, - try sema.switchCond(block, switch_operand_src, spa.operand), + try sema.switchCond(block, switch_operand_src, spa.operand.simple.by_val), operand_err_set_ty, switch_operand_src, case_vals, @@ -11859,17 +12008,63 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r const special_prong_src = block.src(.{ .node_offset_switch_special_prong = src_node_offset }); const extra = sema.code.extraData(Zir.Inst.SwitchBlock, inst_data.payload_index); - const raw_operand_val: Air.Inst.Ref, const raw_operand_ptr: Air.Inst.Ref = blk: { + const operand: SwitchProngAnalysis.Operand, const raw_operand_ty: Type = op: { const maybe_ptr = try sema.resolveInst(extra.data.operand); - if (operand_is_ref) { - const val = try sema.analyzeLoad(block, src, maybe_ptr, operand_src); - break :blk .{ val, maybe_ptr }; - } else { - break :blk .{ maybe_ptr, undefined }; + const val, const ref = if (operand_is_ref) + .{ try sema.analyzeLoad(block, src, maybe_ptr, operand_src), maybe_ptr } + else + .{ maybe_ptr, undefined }; + + const init_cond = try sema.switchCond(block, operand_src, val); + + const operand_ty = sema.typeOf(val); + + if (extra.data.bits.has_continue and !block.is_comptime) { + // Even if the operand is comptime-known, this `switch` is runtime. + if (try operand_ty.comptimeOnlySema(pt)) { + return sema.failWithOwnedErrorMsg(block, msg: { + const msg = try sema.errMsg(operand_src, "operand of switch loop has comptime-only type '{}'", .{operand_ty.fmt(pt)}); + errdefer msg.destroy(gpa); + try sema.errNote(operand_src, msg, "switch loops are evalauted at runtime outside of comptime scopes", .{}); + break :msg msg; + }); + } + try sema.validateRuntimeValue(block, operand_src, maybe_ptr); + const operand_alloc = if (extra.data.bits.any_non_inline_capture) a: { + const operand_ptr_ty = try pt.singleMutPtrType(sema.typeOf(maybe_ptr)); + const operand_alloc = try block.addTy(.alloc, operand_ptr_ty); + _ = try block.addBinOp(.store, operand_alloc, maybe_ptr); + break :a operand_alloc; + } else undefined; + break :op .{ + .{ .loop = .{ + .operand_alloc = operand_alloc, + .operand_is_ref = operand_is_ref, + .init_cond = init_cond, + } }, + operand_ty, + }; } + + // We always use `simple` in the comptime case, because as far as the dispatching logic + // is concerned, it really is dispatching a single prong. `resolveSwitchComptime` will + // be resposible for recursively resolving different prongs as needed. + break :op .{ + .{ .simple = .{ + .by_val = val, + .by_ref = ref, + .cond = init_cond, + } }, + operand_ty, + }; }; - const operand = try sema.switchCond(block, operand_src, raw_operand_val); + const union_originally = raw_operand_ty.zigTypeTag(zcu) == .@"union"; + const err_set = raw_operand_ty.zigTypeTag(zcu) == .error_set; + const cond_ty = switch (raw_operand_ty.zigTypeTag(zcu)) { + .@"union" => raw_operand_ty.unionTagType(zcu).?, // validated by `switchCond` above + else => raw_operand_ty, + }; // AstGen guarantees that the instruction immediately preceding // switch_block(_ref) is a dbg_stmt @@ -11919,9 +12114,6 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r }, }; - const maybe_union_ty = sema.typeOf(raw_operand_val); - const union_originally = maybe_union_ty.zigTypeTag(zcu) == .@"union"; - // Duplicate checking variables later also used for `inline else`. var seen_enum_fields: []?LazySrcLoc = &.{}; var seen_errors = SwitchErrorSet.init(gpa); @@ -11937,13 +12129,10 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r var empty_enum = false; - const operand_ty = sema.typeOf(operand); - const err_set = operand_ty.zigTypeTag(zcu) == .error_set; - var else_error_ty: ?Type = null; // Validate usage of '_' prongs. - if (special_prong == .under and (!operand_ty.isNonexhaustiveEnum(zcu) or union_originally)) { + if (special_prong == .under and !raw_operand_ty.isNonexhaustiveEnum(zcu)) { const msg = msg: { const msg = try sema.errMsg( src, @@ -11969,11 +12158,11 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r } // Validate for duplicate items, missing else prong, and invalid range. - switch (operand_ty.zigTypeTag(zcu)) { + switch (cond_ty.zigTypeTag(zcu)) { .@"union" => unreachable, // handled in `switchCond` .@"enum" => { - seen_enum_fields = try gpa.alloc(?LazySrcLoc, operand_ty.enumFieldCount(zcu)); - empty_enum = seen_enum_fields.len == 0 and !operand_ty.isNonexhaustiveEnum(zcu); + seen_enum_fields = try gpa.alloc(?LazySrcLoc, cond_ty.enumFieldCount(zcu)); + empty_enum = seen_enum_fields.len == 0 and !cond_ty.isNonexhaustiveEnum(zcu); @memset(seen_enum_fields, null); // `range_set` is used for non-exhaustive enum values that do not correspond to any tags. @@ -11991,7 +12180,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r seen_enum_fields, &range_set, item_ref, - operand_ty, + cond_ty, block.src(.{ .switch_case_item = .{ .switch_node_offset = src_node_offset, .case_idx = .{ .kind = .scalar, .index = @intCast(scalar_i) }, @@ -12019,7 +12208,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r seen_enum_fields, &range_set, item_ref, - operand_ty, + cond_ty, block.src(.{ .switch_case_item = .{ .switch_node_offset = src_node_offset, .case_idx = .{ .kind = .multi, .index = @intCast(multi_i) }, @@ -12028,7 +12217,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r )); } - try sema.validateSwitchNoRange(block, ranges_len, operand_ty, src_node_offset); + try sema.validateSwitchNoRange(block, ranges_len, cond_ty, src_node_offset); } } const all_tags_handled = for (seen_enum_fields) |seen_src| { @@ -12036,7 +12225,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r } else true; if (special_prong == .@"else") { - if (all_tags_handled and !operand_ty.isNonexhaustiveEnum(zcu)) return sema.fail( + if (all_tags_handled and !cond_ty.isNonexhaustiveEnum(zcu)) return sema.fail( block, special_prong_src, "unreachable else prong; all cases already handled", @@ -12053,9 +12242,9 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r for (seen_enum_fields, 0..) |seen_src, i| { if (seen_src != null) continue; - const field_name = operand_ty.enumFieldName(i, zcu); + const field_name = cond_ty.enumFieldName(i, zcu); try sema.addFieldErrNote( - operand_ty, + cond_ty, i, msg, "unhandled enumeration value: '{}'", @@ -12063,15 +12252,15 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r ); } try sema.errNote( - operand_ty.srcLoc(zcu), + cond_ty.srcLoc(zcu), msg, "enum '{}' declared here", - .{operand_ty.fmt(pt)}, + .{cond_ty.fmt(pt)}, ); break :msg msg; }; return sema.failWithOwnedErrorMsg(block, msg); - } else if (special_prong == .none and operand_ty.isNonexhaustiveEnum(zcu) and !union_originally) { + } else if (special_prong == .none and cond_ty.isNonexhaustiveEnum(zcu) and !union_originally) { return sema.fail( block, src, @@ -12085,7 +12274,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r block, &seen_errors, &case_vals, - operand_ty, + cond_ty, inst_data, scalar_cases_len, multi_cases_len, @@ -12106,7 +12295,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r block, &range_set, item_ref, - operand_ty, + cond_ty, block.src(.{ .switch_case_item = .{ .switch_node_offset = src_node_offset, .case_idx = .{ .kind = .scalar, .index = @intCast(scalar_i) }, @@ -12133,7 +12322,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r block, &range_set, item_ref, - operand_ty, + cond_ty, block.src(.{ .switch_case_item = .{ .switch_node_offset = src_node_offset, .case_idx = .{ .kind = .multi, .index = @intCast(multi_i) }, @@ -12155,7 +12344,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r &range_set, item_first, item_last, - operand_ty, + cond_ty, block.src(.{ .switch_case_item = .{ .switch_node_offset = src_node_offset, .case_idx = .{ .kind = .multi, .index = @intCast(multi_i) }, @@ -12171,9 +12360,9 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r } check_range: { - if (operand_ty.zigTypeTag(zcu) == .int) { - const min_int = try operand_ty.minInt(pt, operand_ty); - const max_int = try operand_ty.maxInt(pt, operand_ty); + if (cond_ty.zigTypeTag(zcu) == .int) { + const min_int = try cond_ty.minInt(pt, cond_ty); + const max_int = try cond_ty.maxInt(pt, cond_ty); if (try range_set.spans(min_int.toIntern(), max_int.toIntern())) { if (special_prong == .@"else") { return sema.fail( @@ -12246,7 +12435,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r )); } - try sema.validateSwitchNoRange(block, ranges_len, operand_ty, src_node_offset); + try sema.validateSwitchNoRange(block, ranges_len, cond_ty, src_node_offset); } } switch (special_prong) { @@ -12278,7 +12467,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r block, src, "else prong required when switching on type '{}'", - .{operand_ty.fmt(pt)}, + .{cond_ty.fmt(pt)}, ); } @@ -12299,7 +12488,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r block, &seen_values, item_ref, - operand_ty, + cond_ty, block.src(.{ .switch_case_item = .{ .switch_node_offset = src_node_offset, .case_idx = .{ .kind = .scalar, .index = @intCast(scalar_i) }, @@ -12326,7 +12515,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r block, &seen_values, item_ref, - operand_ty, + cond_ty, block.src(.{ .switch_case_item = .{ .switch_node_offset = src_node_offset, .case_idx = .{ .kind = .multi, .index = @intCast(multi_i) }, @@ -12335,7 +12524,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r )); } - try sema.validateSwitchNoRange(block, ranges_len, operand_ty, src_node_offset); + try sema.validateSwitchNoRange(block, ranges_len, cond_ty, src_node_offset); } } }, @@ -12354,16 +12543,14 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r .comptime_float, .float, => return sema.fail(block, operand_src, "invalid switch operand type '{}'", .{ - operand_ty.fmt(pt), + raw_operand_ty.fmt(pt), }), } const spa: SwitchProngAnalysis = .{ .sema = sema, .parent_block = block, - .operand = raw_operand_val, - .operand_ptr = raw_operand_ptr, - .cond = operand, + .operand = operand, .else_error_ty = else_error_ty, .switch_block_inst = inst, .tag_capture_inst = tag_capture_inst, @@ -12407,24 +12594,6 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r defer child_block.instructions.deinit(gpa); defer merges.deinit(gpa); - if (try sema.resolveDefinedValue(&child_block, src, operand)) |operand_val| { - return resolveSwitchComptime( - sema, - spa, - &child_block, - operand, - operand_val, - operand_ty, - src_node_offset, - special, - case_vals, - scalar_cases_len, - multi_cases_len, - err_set, - empty_enum, - ); - } - if (scalar_cases_len + multi_cases_len == 0 and !special.is_inline) { if (empty_enum) { return .void_value; @@ -12432,54 +12601,90 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r if (special_prong == .none) { return sema.fail(block, src, "switch must handle all possibilities", .{}); } - if (err_set and try sema.maybeErrorUnwrap(block, special.body, operand, operand_src, false)) { - return .unreachable_value; - } - if (zcu.backendSupportsFeature(.is_named_enum_value) and block.wantSafety() and operand_ty.zigTypeTag(zcu) == .@"enum" and - (!operand_ty.isNonexhaustiveEnum(zcu) or union_originally)) + const init_cond = switch (operand) { + .simple => |s| s.cond, + .loop => |l| l.init_cond, + }; + if (zcu.backendSupportsFeature(.is_named_enum_value) and block.wantSafety() and + raw_operand_ty.zigTypeTag(zcu) == .@"enum" and !raw_operand_ty.isNonexhaustiveEnum(zcu)) { try sema.zirDbgStmt(block, cond_dbg_node_index); - const ok = try block.addUnOp(.is_named_enum_value, operand); + const ok = try block.addUnOp(.is_named_enum_value, init_cond); try sema.addSafetyCheck(block, src, ok, .corrupt_switch); } + if (err_set and try sema.maybeErrorUnwrap(block, special.body, init_cond, operand_src, false)) { + return .unreachable_value; + } + } - return spa.resolveProngComptime( - &child_block, - .special, - special.body, - special.capture, - block.src(.{ .switch_capture = .{ - .switch_node_offset = src_node_offset, - .case_idx = LazySrcLoc.Offset.SwitchCaseIndex.special, - } }), - undefined, // case_vals may be undefined for special prongs - .none, - false, - merges, - ); + switch (operand) { + .loop => {}, // always runtime; evaluation in comptime scope uses `simple` + .simple => |s| { + if (try sema.resolveDefinedValue(&child_block, src, s.cond)) |cond_val| { + return resolveSwitchComptimeLoop( + sema, + spa, + &child_block, + if (operand_is_ref) + sema.typeOf(s.by_ref) + else + raw_operand_ty, + cond_ty, + cond_val, + src_node_offset, + special, + case_vals, + scalar_cases_len, + multi_cases_len, + err_set, + empty_enum, + operand_is_ref, + ); + } + + if (scalar_cases_len + multi_cases_len == 0 and !special.is_inline and !extra.data.bits.has_continue) { + return spa.resolveProngComptime( + &child_block, + .special, + special.body, + special.capture, + block.src(.{ .switch_capture = .{ + .switch_node_offset = src_node_offset, + .case_idx = LazySrcLoc.Offset.SwitchCaseIndex.special, + } }), + undefined, // case_vals may be undefined for special prongs + .none, + false, + merges, + ); + } + }, } if (child_block.is_comptime) { - _ = try sema.resolveConstDefinedValue(&child_block, operand_src, operand, .{ + _ = try sema.resolveConstDefinedValue(&child_block, operand_src, operand.simple.cond, .{ .needed_comptime_reason = "condition in comptime switch must be comptime-known", .block_comptime_reason = child_block.comptime_reason, }); unreachable; } - _ = try sema.analyzeSwitchRuntimeBlock( + const air_switch_ref = try sema.analyzeSwitchRuntimeBlock( spa, &child_block, src, - operand, - operand_ty, + switch (operand) { + .simple => |s| s.cond, + .loop => |l| l.init_cond, + }, + cond_ty, operand_src, case_vals, special, scalar_cases_len, multi_cases_len, union_originally, - maybe_union_ty, + raw_operand_ty, err_set, src_node_offset, special_prong_src, @@ -12492,6 +12697,67 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index, operand_is_r false, ); + for (merges.extra_insts.items, merges.extra_src_locs.items) |placeholder_inst, dispatch_src| { + var replacement_block = block.makeSubBlock(); + defer replacement_block.instructions.deinit(gpa); + + assert(sema.air_instructions.items(.tag)[@intFromEnum(placeholder_inst)] == .br); + const new_operand_maybe_ref = sema.air_instructions.items(.data)[@intFromEnum(placeholder_inst)].br.operand; + + if (extra.data.bits.any_non_inline_capture) { + _ = try replacement_block.addBinOp(.store, operand.loop.operand_alloc, new_operand_maybe_ref); + } + + const new_operand_val = if (operand_is_ref) + try sema.analyzeLoad(&replacement_block, dispatch_src, new_operand_maybe_ref, dispatch_src) + else + new_operand_maybe_ref; + + const new_cond = try sema.switchCond(&replacement_block, dispatch_src, new_operand_val); + + if (zcu.backendSupportsFeature(.is_named_enum_value) and block.wantSafety() and + cond_ty.zigTypeTag(zcu) == .@"enum" and !cond_ty.isNonexhaustiveEnum(zcu) and + !try sema.isComptimeKnown(new_cond)) + { + const ok = try replacement_block.addUnOp(.is_named_enum_value, new_cond); + try sema.addSafetyCheck(&replacement_block, src, ok, .corrupt_switch); + } + + _ = try replacement_block.addInst(.{ + .tag = .switch_dispatch, + .data = .{ .br = .{ + .block_inst = air_switch_ref.toIndex().?, + .operand = new_cond, + } }, + }); + + if (replacement_block.instructions.items.len == 1) { + // Optimization: we don't need a block! + sema.air_instructions.set( + @intFromEnum(placeholder_inst), + sema.air_instructions.get(@intFromEnum(replacement_block.instructions.items[0])), + ); + continue; + } + + // Replace placeholder with a block. + // No `br` is needed as the block is a switch dispatch so necessarily `noreturn`. + try sema.air_extra.ensureUnusedCapacity( + gpa, + @typeInfo(Air.Block).@"struct".fields.len + replacement_block.instructions.items.len, + ); + sema.air_instructions.set(@intFromEnum(placeholder_inst), .{ + .tag = .block, + .data = .{ .ty_pl = .{ + .ty = .noreturn_type, + .payload = sema.addExtraAssumeCapacity(Air.Block{ + .body_len = @intCast(replacement_block.instructions.items.len), + }), + } }, + }); + sema.air_extra.appendSliceAssumeCapacity(@ptrCast(replacement_block.instructions.items)); + } + return sema.resolveAnalyzedBlock(block, src, &child_block, merges, false); } @@ -13123,7 +13389,7 @@ fn analyzeSwitchRuntimeBlock( sema.air_extra.appendSliceAssumeCapacity(@ptrCast(else_body)); return try child_block.addInst(.{ - .tag = .switch_br, + .tag = if (spa.operand == .loop) .loop_switch_br else .switch_br, .data = .{ .pl_op = .{ .operand = operand, .payload = payload_index, @@ -13131,6 +13397,77 @@ fn analyzeSwitchRuntimeBlock( }); } +fn resolveSwitchComptimeLoop( + sema: *Sema, + init_spa: SwitchProngAnalysis, + child_block: *Block, + maybe_ptr_operand_ty: Type, + cond_ty: Type, + init_cond_val: Value, + switch_node_offset: i32, + special: SpecialProng, + case_vals: std.ArrayListUnmanaged(Air.Inst.Ref), + scalar_cases_len: u32, + multi_cases_len: u32, + err_set: bool, + empty_enum: bool, + operand_is_ref: bool, +) CompileError!Air.Inst.Ref { + var spa = init_spa; + var cond_val = init_cond_val; + + while (true) { + if (resolveSwitchComptime( + sema, + spa, + child_block, + spa.operand.simple.cond, + cond_val, + cond_ty, + switch_node_offset, + special, + case_vals, + scalar_cases_len, + multi_cases_len, + err_set, + empty_enum, + )) |result| { + return result; + } else |err| switch (err) { + error.ComptimeBreak => { + const break_inst = sema.code.instructions.get(@intFromEnum(sema.comptime_break_inst)); + if (break_inst.tag != .switch_continue) return error.ComptimeBreak; + const extra = sema.code.extraData(Zir.Inst.Break, break_inst.data.@"break".payload_index).data; + if (extra.block_inst != spa.switch_block_inst) return error.ComptimeBreak; + // This is a `switch_continue` targeting this block. Change the operand and start over. + const src = child_block.nodeOffset(extra.operand_src_node); + const new_operand_uncoerced = try sema.resolveInst(break_inst.data.@"break".operand); + const new_operand = try sema.coerce(child_block, maybe_ptr_operand_ty, new_operand_uncoerced, src); + + try sema.emitBackwardBranch(child_block, src); + + const val, const ref = if (operand_is_ref) + .{ try sema.analyzeLoad(child_block, src, new_operand, src), new_operand } + else + .{ new_operand, undefined }; + + const cond_ref = try sema.switchCond(child_block, src, val); + + cond_val = try sema.resolveConstDefinedValue(child_block, src, cond_ref, .{ + .needed_comptime_reason = "condition in comptime switch must be comptime-known", + .block_comptime_reason = child_block.comptime_reason, + }); + spa.operand = .{ .simple = .{ + .by_val = val, + .by_ref = ref, + .cond = cond_ref, + } }; + }, + else => |e| return e, + } + } +} + fn resolveSwitchComptime( sema: *Sema, spa: SwitchProngAnalysis, @@ -13148,6 +13485,7 @@ fn resolveSwitchComptime( ) CompileError!Air.Inst.Ref { const merges = &child_block.label.?.merges; const resolved_operand_val = try sema.resolveLazyValue(operand_val); + var extra_index: usize = special.end; { var scalar_i: usize = 0; diff --git a/src/Value.zig b/src/Value.zig index e29e0338843c..575a84f10397 100644 --- a/src/Value.zig +++ b/src/Value.zig @@ -292,6 +292,7 @@ pub fn getUnsignedIntInner( .none => 0, else => |payload| Value.fromInterned(payload).getUnsignedIntInner(strat, zcu, tid), }, + .enum_tag => |enum_tag| return Value.fromInterned(enum_tag.int).getUnsignedIntInner(strat, zcu, tid), else => null, }, }; diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 334787caf81c..0d01b3d458ef 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -735,6 +735,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .block => try self.airBlock(inst), .br => try self.airBr(inst), .repeat => return self.fail("TODO implement `repeat`", .{}), + .switch_dispatch => return self.fail("TODO implement `switch_dispatch`", .{}), .trap => try self.airTrap(), .breakpoint => try self.airBreakpoint(), .ret_addr => try self.airRetAddr(inst), @@ -825,6 +826,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .field_parent_ptr => try self.airFieldParentPtr(inst), .switch_br => try self.airSwitch(inst), + .loop_switch_br => return self.fail("TODO implement `loop_switch_br`", .{}), .slice_ptr => try self.airSlicePtr(inst), .slice_len => try self.airSliceLen(inst), diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 0128a2d9bb57..5d2ebf52092f 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -722,6 +722,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .block => try self.airBlock(inst), .br => try self.airBr(inst), .repeat => return self.fail("TODO implement `repeat`", .{}), + .switch_dispatch => return self.fail("TODO implement `switch_dispatch`", .{}), .trap => try self.airTrap(), .breakpoint => try self.airBreakpoint(), .ret_addr => try self.airRetAddr(inst), @@ -812,6 +813,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .field_parent_ptr => try self.airFieldParentPtr(inst), .switch_br => try self.airSwitch(inst), + .loop_switch_br => return self.fail("TODO implement `loop_switch_br`", .{}), .slice_ptr => try self.airSlicePtr(inst), .slice_len => try self.airSliceLen(inst), diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 413fab348bf0..e810c6ee30f8 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -1580,6 +1580,7 @@ fn genBody(func: *Func, body: []const Air.Inst.Index) InnerError!void { .block => try func.airBlock(inst), .br => try func.airBr(inst), .repeat => return func.fail("TODO implement `repeat`", .{}), + .switch_dispatch => return func.fail("TODO implement `switch_dispatch`", .{}), .trap => try func.airTrap(), .breakpoint => try func.airBreakpoint(), .ret_addr => try func.airRetAddr(inst), @@ -1669,6 +1670,7 @@ fn genBody(func: *Func, body: []const Air.Inst.Index) InnerError!void { .field_parent_ptr => try func.airFieldParentPtr(inst), .switch_br => try func.airSwitchBr(inst), + .loop_switch_br => return func.fail("TODO implement `loop_switch_br`", .{}), .ptr_slice_len_ptr => try func.airPtrSliceLenPtr(inst), .ptr_slice_ptr_ptr => try func.airPtrSlicePtrPtr(inst), diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index a26182930623..4f838b9c5289 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -577,6 +577,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .block => try self.airBlock(inst), .br => try self.airBr(inst), .repeat => return self.fail("TODO implement `repeat`", .{}), + .switch_dispatch => return self.fail("TODO implement `switch_dispatch`", .{}), .trap => try self.airTrap(), .breakpoint => try self.airBreakpoint(), .ret_addr => @panic("TODO try self.airRetAddr(inst)"), @@ -667,6 +668,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .field_parent_ptr => @panic("TODO try self.airFieldParentPtr(inst)"), .switch_br => try self.airSwitch(inst), + .loop_switch_br => return self.fail("TODO implement `loop_switch_br`", .{}), .slice_ptr => try self.airSlicePtr(inst), .slice_len => try self.airSliceLen(inst), diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index cbf243972148..01edf87f3eb3 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -1904,6 +1904,7 @@ fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { .breakpoint => func.airBreakpoint(inst), .br => func.airBr(inst), .repeat => return func.fail("TODO implement `repeat`", .{}), + .switch_dispatch => return func.fail("TODO implement `switch_dispatch`", .{}), .int_from_bool => func.airIntFromBool(inst), .cond_br => func.airCondBr(inst), .intcast => func.airIntcast(inst), @@ -1985,6 +1986,7 @@ fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { .field_parent_ptr => func.airFieldParentPtr(inst), .switch_br => func.airSwitchBr(inst), + .loop_switch_br => return func.fail("TODO implement `loop_switch_br`", .{}), .trunc => func.airTrunc(inst), .unreach => func.airUnreachable(inst), diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index c92d42ffb7a6..e27e04782d4f 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -2248,6 +2248,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .block => try self.airBlock(inst), .br => try self.airBr(inst), .repeat => return self.fail("TODO implement `repeat`", .{}), + .switch_dispatch => return self.fail("TODO implement `switch_dispatch`", .{}), .trap => try self.airTrap(), .breakpoint => try self.airBreakpoint(), .ret_addr => try self.airRetAddr(inst), @@ -2336,6 +2337,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .field_parent_ptr => try self.airFieldParentPtr(inst), .switch_br => try self.airSwitchBr(inst), + .loop_switch_br => return self.fail("TODO implement `loop_switch_br`", .{}), .slice_ptr => try self.airSlicePtr(inst), .slice_len => try self.airSliceLen(inst), diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 91dec12279fd..c761aa72258c 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -321,6 +321,9 @@ pub const Function = struct { /// by type alignment. /// The value is whether the alloc needs to be emitted in the header. allocs: std.AutoArrayHashMapUnmanaged(LocalIndex, bool) = .{}, + /// Maps from `loop_switch_br` instructions to the allocated local used + /// for the switch cond. Dispatches should set this local to the new cond. + loop_switch_conds: std.AutoHashMapUnmanaged(Air.Inst.Index, LocalIndex) = .{}, fn resolveInst(f: *Function, ref: Air.Inst.Ref) !CValue { const gop = try f.value_map.getOrPut(ref); @@ -531,6 +534,7 @@ pub const Function = struct { f.blocks.deinit(gpa); f.value_map.deinit(); f.lazy_fns.deinit(gpa); + f.loop_switch_conds.deinit(gpa); } fn typeOf(f: *Function, inst: Air.Inst.Ref) Type { @@ -3376,16 +3380,18 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, => unreachable, // Instructions that are known to always be `noreturn` based on their tag. - .br => return airBr(f, inst), - .repeat => return airRepeat(f, inst), - .cond_br => return airCondBr(f, inst), - .switch_br => return airSwitchBr(f, inst), - .loop => return airLoop(f, inst), - .ret => return airRet(f, inst, false), - .ret_safe => return airRet(f, inst, false), // TODO - .ret_load => return airRet(f, inst, true), - .trap => return airTrap(f, f.object.writer()), - .unreach => return airUnreach(f), + .br => return airBr(f, inst), + .repeat => return airRepeat(f, inst), + .switch_dispatch => return airSwitchDispatch(f, inst), + .cond_br => return airCondBr(f, inst), + .switch_br => return airSwitchBr(f, inst, false), + .loop_switch_br => return airSwitchBr(f, inst, true), + .loop => return airLoop(f, inst), + .ret => return airRet(f, inst, false), + .ret_safe => return airRet(f, inst, false), // TODO + .ret_load => return airRet(f, inst, true), + .trap => return airTrap(f, f.object.writer()), + .unreach => return airUnreach(f), // Instructions which may be `noreturn`. .block => res: { @@ -4786,6 +4792,46 @@ fn airRepeat(f: *Function, inst: Air.Inst.Index) !void { try writer.print("goto zig_loop_{d};\n", .{@intFromEnum(repeat.loop_inst)}); } +fn airSwitchDispatch(f: *Function, inst: Air.Inst.Index) !void { + const pt = f.object.dg.pt; + const zcu = pt.zcu; + const br = f.air.instructions.items(.data)[@intFromEnum(inst)].br; + const writer = f.object.writer(); + + if (try f.air.value(br.operand, pt)) |cond_val| { + // Comptime-known dispatch. Iterate the cases to find the correct + // one, and branch directly to the corresponding case. + const switch_br = f.air.unwrapSwitch(br.block_inst); + var it = switch_br.iterateCases(); + const target_case_idx: u32 = target: while (it.next()) |case| { + for (case.items) |item| { + const val = Value.fromInterned(item.toInterned().?); + if (cond_val.compareHetero(.eq, val, zcu)) break :target case.idx; + } + for (case.ranges) |range| { + const low = Value.fromInterned(range[0].toInterned().?); + const high = Value.fromInterned(range[1].toInterned().?); + if (cond_val.compareHetero(.gte, low, zcu) and + cond_val.compareHetero(.lte, high, zcu)) + { + break :target case.idx; + } + } + } else switch_br.cases_len; + try writer.print("goto zig_switch_{d}_dispatch_{d};\n", .{ @intFromEnum(br.block_inst), target_case_idx }); + return; + } + + // Runtime-known dispatch. Set the switch condition, and branch back. + const cond = try f.resolveInst(br.operand); + const cond_local = f.loop_switch_conds.get(br.block_inst).?; + try f.writeCValue(writer, .{ .local = cond_local }, .Other); + try writer.writeAll(" = "); + try f.writeCValue(writer, cond, .Initializer); + try writer.writeAll(";\n"); + try writer.print("goto zig_switch_{d}_loop;", .{@intFromEnum(br.block_inst)}); +} + fn airBitcast(f: *Function, inst: Air.Inst.Index) !CValue { const ty_op = f.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; const inst_ty = f.typeOfIndex(inst); @@ -5004,15 +5050,34 @@ fn airCondBr(f: *Function, inst: Air.Inst.Index) !void { try genBodyInner(f, else_body); } -fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !void { +fn airSwitchBr(f: *Function, inst: Air.Inst.Index, is_dispatch_loop: bool) !void { const pt = f.object.dg.pt; const zcu = pt.zcu; + const gpa = f.object.dg.gpa; const switch_br = f.air.unwrapSwitch(inst); - const condition = try f.resolveInst(switch_br.operand); + const init_condition = try f.resolveInst(switch_br.operand); try reap(f, inst, &.{switch_br.operand}); const condition_ty = f.typeOf(switch_br.operand); const writer = f.object.writer(); + // For dispatches, we will create a local alloc to contain the condition value. + // This may not result in optimal codegen for switch loops, but it minimizes the + // amount of C code we generate, which is probably more desirable here (and is simpler). + const condition = if (is_dispatch_loop) cond: { + const new_local = try f.allocLocal(inst, condition_ty); + try f.writeCValue(writer, new_local, .Other); + try writer.writeAll(" = "); + try f.writeCValue(writer, init_condition, .Initializer); + try writer.writeAll(";\n"); + try writer.print("zig_switch_{d}_loop:", .{@intFromEnum(inst)}); + try f.loop_switch_conds.put(gpa, inst, new_local.new_local); + break :cond new_local; + } else init_condition; + + defer if (is_dispatch_loop) { + assert(f.loop_switch_conds.remove(inst)); + }; + try writer.writeAll("switch ("); const lowered_condition_ty = if (condition_ty.toIntern() == .bool_type) @@ -5030,7 +5095,6 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !void { try writer.writeAll(") {"); f.object.indent_writer.pushIndent(); - const gpa = f.object.dg.gpa; const liveness = try f.liveness.getSwitchBr(gpa, inst, switch_br.cases_len + 1); defer gpa.free(liveness.deaths); @@ -5045,9 +5109,15 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !void { try f.object.indent_writer.insertNewline(); try writer.writeAll("case "); const item_value = try f.air.value(item, pt); - if (item_value.?.getUnsignedInt(zcu)) |item_int| try writer.print("{}\n", .{ - try f.fmtIntLiteral(try pt.intValue(lowered_condition_ty, item_int)), - }) else { + // If `item_value` is a pointer with a known integer address, print the address + // with no cast to avoid a warning. + write_val: { + if (condition_ty.isPtrAtRuntime(zcu)) { + if (item_value.?.getUnsignedInt(zcu)) |item_int| { + try writer.print("{}", .{try f.fmtIntLiteral(try pt.intValue(lowered_condition_ty, item_int))}); + break :write_val; + } + } if (condition_ty.isPtrAtRuntime(zcu)) { try writer.writeByte('('); try f.renderType(writer, Type.usize); @@ -5057,9 +5127,14 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !void { } try writer.writeByte(':'); } - try writer.writeByte(' '); - - try genBodyResolveState(f, inst, liveness.deaths[case.idx], case.body, false); + try writer.writeAll(" {\n"); + f.object.indent_writer.pushIndent(); + if (is_dispatch_loop) { + try writer.print("zig_switch_{d}_dispatch_{d}: ", .{ @intFromEnum(inst), case.idx }); + } + try genBodyResolveState(f, inst, liveness.deaths[case.idx], case.body, true); + f.object.indent_writer.popIndent(); + try writer.writeByte('}'); // The case body must be noreturn so we don't need to insert a break. } @@ -5095,11 +5170,19 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !void { try f.object.dg.renderValue(writer, (try f.air.value(range[1], pt)).?, .Other); try writer.writeByte(')'); } - try writer.writeAll(") "); - try genBodyResolveState(f, inst, liveness.deaths[case.idx], case.body, false); + try writer.writeAll(") {\n"); + f.object.indent_writer.pushIndent(); + if (is_dispatch_loop) { + try writer.print("zig_switch_{d}_dispatch_{d}: ", .{ @intFromEnum(inst), case.idx }); + } + try genBodyResolveState(f, inst, liveness.deaths[case.idx], case.body, true); + f.object.indent_writer.popIndent(); + try writer.writeByte('}'); } } - + if (is_dispatch_loop) { + try writer.print("zig_switch_{d}_dispatch_{d}: ", .{ @intFromEnum(inst), switch_br.cases_len }); + } if (else_body.len > 0) { // Note that this must be the last case, so we do not need to use `genBodyResolveState` since // the parent block will do it (because the case body is noreturn). diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index d1ec8eca9fad..e5e43bbbf808 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1721,6 +1721,7 @@ pub const Object = struct { .func_inst_table = .{}, .blocks = .{}, .loops = .{}, + .switch_dispatch_info = .{}, .sync_scope = if (owner_mod.single_threaded) .singlethread else .system, .file = file, .scope = subprogram, @@ -4845,6 +4846,10 @@ pub const FuncGen = struct { /// Maps `loop` instructions to the bb to branch to to repeat the loop. loops: std.AutoHashMapUnmanaged(Air.Inst.Index, Builder.Function.Block.Index), + /// Maps `loop_switch_br` instructions to the information required to lower + /// dispatches (`switch_dispatch` instructions). + switch_dispatch_info: std.AutoHashMapUnmanaged(Air.Inst.Index, SwitchDispatchInfo), + sync_scope: Builder.SyncScope, const Fuzz = struct { @@ -4857,6 +4862,33 @@ pub const FuncGen = struct { } }; + const SwitchDispatchInfo = struct { + /// These are the blocks corresponding to each switch case. + /// The final element corresponds to the `else` case. + /// Slices allocated into `gpa`. + case_blocks: []Builder.Function.Block.Index, + /// This is `.none` if `jmp_table` is set, since we won't use a `switch` instruction to dispatch. + switch_weights: Builder.Function.Instruction.BrCond.Weights, + /// If not `null`, we have manually constructed a jump table to reach the desired block. + /// `table` can be used if the value is between `min` and `max` inclusive. + /// We perform this lowering manually to avoid some questionable behavior from LLVM. + /// See `airSwitchBr` for details. + jmp_table: ?JmpTable, + + const JmpTable = struct { + min: Builder.Constant, + max: Builder.Constant, + in_bounds_hint: enum { none, unpredictable, likely, unlikely }, + /// Pointer to the jump table itself, to be used with `indirectbr`. + /// The index into the jump table is the dispatch condition minus `min`. + /// The table values are `blockaddress` constants corresponding to blocks in `case_blocks`. + table: Builder.Constant, + /// `true` if `table` conatins a reference to the `else` block. + /// In this case, the `indirectbr` must include the `else` block in its target list. + table_includes_else: bool, + }; + }; + const BreakList = union { list: std.MultiArrayList(struct { bb: Builder.Function.Block.Index, @@ -4872,6 +4904,11 @@ pub const FuncGen = struct { self.func_inst_table.deinit(gpa); self.blocks.deinit(gpa); self.loops.deinit(gpa); + var it = self.switch_dispatch_info.valueIterator(); + while (it.next()) |info| { + self.gpa.free(info.case_blocks); + } + self.switch_dispatch_info.deinit(gpa); } fn todo(self: *FuncGen, comptime format: []const u8, args: anytype) Error { @@ -5182,16 +5219,18 @@ pub const FuncGen = struct { .work_group_id => try self.airWorkGroupId(inst), // Instructions that are known to always be `noreturn` based on their tag. - .br => return self.airBr(inst), - .repeat => return self.airRepeat(inst), - .cond_br => return self.airCondBr(inst), - .switch_br => return self.airSwitchBr(inst), - .loop => return self.airLoop(inst), - .ret => return self.airRet(inst, false), - .ret_safe => return self.airRet(inst, true), - .ret_load => return self.airRetLoad(inst), - .trap => return self.airTrap(inst), - .unreach => return self.airUnreach(inst), + .br => return self.airBr(inst), + .repeat => return self.airRepeat(inst), + .switch_dispatch => return self.airSwitchDispatch(inst), + .cond_br => return self.airCondBr(inst), + .switch_br => return self.airSwitchBr(inst, false), + .loop_switch_br => return self.airSwitchBr(inst, true), + .loop => return self.airLoop(inst), + .ret => return self.airRet(inst, false), + .ret_safe => return self.airRet(inst, true), + .ret_load => return self.airRetLoad(inst), + .trap => return self.airTrap(inst), + .unreach => return self.airUnreach(inst), // Instructions which may be `noreturn`. .block => res: { @@ -6093,6 +6132,202 @@ pub const FuncGen = struct { _ = try self.wip.br(loop_bb); } + fn lowerSwitchDispatch( + self: *FuncGen, + switch_inst: Air.Inst.Index, + cond_ref: Air.Inst.Ref, + dispatch_info: SwitchDispatchInfo, + ) !void { + const o = self.ng.object; + const pt = o.pt; + const zcu = pt.zcu; + const cond_ty = self.typeOf(cond_ref); + const switch_br = self.air.unwrapSwitch(switch_inst); + + if (try self.air.value(cond_ref, pt)) |cond_val| { + // Comptime-known dispatch. Iterate the cases to find the correct + // one, and branch to the corresponding element of `case_blocks`. + var it = switch_br.iterateCases(); + const target_case_idx = target: while (it.next()) |case| { + for (case.items) |item| { + const val = Value.fromInterned(item.toInterned().?); + if (cond_val.compareHetero(.eq, val, zcu)) break :target case.idx; + } + for (case.ranges) |range| { + const low = Value.fromInterned(range[0].toInterned().?); + const high = Value.fromInterned(range[1].toInterned().?); + if (cond_val.compareHetero(.gte, low, zcu) and + cond_val.compareHetero(.lte, high, zcu)) + { + break :target case.idx; + } + } + } else dispatch_info.case_blocks.len - 1; + const target_block = dispatch_info.case_blocks[target_case_idx]; + target_block.ptr(&self.wip).incoming += 1; + _ = try self.wip.br(target_block); + return; + } + + // Runtime-known dispatch. + const cond = try self.resolveInst(cond_ref); + + if (dispatch_info.jmp_table) |jmp_table| { + // We should use the constructed jump table. + // First, check the bounds to branch to the `else` case if needed. + const inbounds = try self.wip.bin( + .@"and", + try self.cmp(.normal, .gte, cond_ty, cond, jmp_table.min.toValue()), + try self.cmp(.normal, .lte, cond_ty, cond, jmp_table.max.toValue()), + "", + ); + const jmp_table_block = try self.wip.block(1, "Then"); + const else_block = dispatch_info.case_blocks[dispatch_info.case_blocks.len - 1]; + else_block.ptr(&self.wip).incoming += 1; + _ = try self.wip.brCond(inbounds, jmp_table_block, else_block, switch (jmp_table.in_bounds_hint) { + .none => .none, + .unpredictable => .unpredictable, + .likely => .then_likely, + .unlikely => .else_likely, + }); + + self.wip.cursor = .{ .block = jmp_table_block }; + + // Figure out the list of blocks we might branch to. + // This includes all case blocks, but it might not include the `else` block if + // the table is dense. + const target_blocks_len = dispatch_info.case_blocks.len - @intFromBool(!jmp_table.table_includes_else); + const target_blocks = dispatch_info.case_blocks[0..target_blocks_len]; + + // Make sure to cast the index to a usize so it's not treated as negative! + const table_index = try self.wip.cast( + .zext, + try self.wip.bin(.@"sub nuw", cond, jmp_table.min.toValue(), ""), + try o.lowerType(Type.usize), + "", + ); + const target_ptr_ptr = try self.wip.gep( + .inbounds, + .ptr, + jmp_table.table.toValue(), + &.{table_index}, + "", + ); + const target_ptr = try self.wip.load(.normal, .ptr, target_ptr_ptr, .default, ""); + + // Do the branch! + _ = try self.wip.indirectbr(target_ptr, target_blocks); + + // Mark all target blocks as having one more incoming branch. + for (target_blocks) |case_block| { + case_block.ptr(&self.wip).incoming += 1; + } + + return; + } + + // We must lower to an actual LLVM `switch` instruction. + // The switch prongs will correspond to our scalar cases. Ranges will + // be handled by conditional branches in the `else` prong. + + const llvm_usize = try o.lowerType(Type.usize); + const cond_int = if (cond.typeOfWip(&self.wip).isPointer(&o.builder)) + try self.wip.cast(.ptrtoint, cond, llvm_usize, "") + else + cond; + + const llvm_cases_len, const last_range_case = info: { + var llvm_cases_len: u32 = 0; + var last_range_case: ?u32 = null; + var it = switch_br.iterateCases(); + while (it.next()) |case| { + if (case.ranges.len > 0) last_range_case = case.idx; + llvm_cases_len += @intCast(case.items.len); + } + break :info .{ llvm_cases_len, last_range_case }; + }; + + // The `else` of the LLVM `switch` is the actual `else` prong only + // if there are no ranges. Otherwise, the `else` will have a + // conditional chain before the "true" `else` prong. + const llvm_else_block = if (last_range_case == null) + dispatch_info.case_blocks[dispatch_info.case_blocks.len - 1] + else + try self.wip.block(0, "RangeTest"); + + llvm_else_block.ptr(&self.wip).incoming += 1; + + var wip_switch = try self.wip.@"switch"(cond_int, llvm_else_block, llvm_cases_len, dispatch_info.switch_weights); + defer wip_switch.finish(&self.wip); + + // Construct the actual cases. Set the cursor to the `else` block so + // we can construct ranges at the same time as scalar cases. + self.wip.cursor = .{ .block = llvm_else_block }; + + var it = switch_br.iterateCases(); + while (it.next()) |case| { + const case_block = dispatch_info.case_blocks[case.idx]; + + for (case.items) |item| { + const llvm_item = (try self.resolveInst(item)).toConst().?; + const llvm_int_item = if (llvm_item.typeOf(&o.builder).isPointer(&o.builder)) + try o.builder.castConst(.ptrtoint, llvm_item, llvm_usize) + else + llvm_item; + try wip_switch.addCase(llvm_int_item, case_block, &self.wip); + } + case_block.ptr(&self.wip).incoming += @intCast(case.items.len); + + if (case.ranges.len == 0) continue; + + // Add a conditional for the ranges, directing to the relevant bb. + // We don't need to consider `cold` branch hints since that information is stored + // in the target bb body, but we do care about likely/unlikely/unpredictable. + + const hint = switch_br.getHint(case.idx); + + var range_cond: ?Builder.Value = null; + for (case.ranges) |range| { + const llvm_min = try self.resolveInst(range[0]); + const llvm_max = try self.resolveInst(range[1]); + const cond_part = try self.wip.bin( + .@"and", + try self.cmp(.normal, .gte, cond_ty, cond, llvm_min), + try self.cmp(.normal, .lte, cond_ty, cond, llvm_max), + "", + ); + if (range_cond) |prev| { + range_cond = try self.wip.bin(.@"or", prev, cond_part, ""); + } else range_cond = cond_part; + } + + // If the check fails, we either branch to the "true" `else` case, + // or to the next range condition. + const range_else_block = if (case.idx == last_range_case.?) + dispatch_info.case_blocks[dispatch_info.case_blocks.len - 1] + else + try self.wip.block(0, "RangeTest"); + + _ = try self.wip.brCond(range_cond.?, case_block, range_else_block, switch (hint) { + .none, .cold => .none, + .unpredictable => .unpredictable, + .likely => .then_likely, + .unlikely => .else_likely, + }); + case_block.ptr(&self.wip).incoming += 1; + range_else_block.ptr(&self.wip).incoming += 1; + + // Construct the next range conditional (if any) in the false branch. + self.wip.cursor = .{ .block = range_else_block }; + } + } + + fn airSwitchDispatch(self: *FuncGen, inst: Air.Inst.Index) !void { + const br = self.air.instructions.items(.data)[@intFromEnum(inst)].br; + const dispatch_info = self.switch_dispatch_info.get(br.block_inst).?; + return self.lowerSwitchDispatch(br.block_inst, br.operand, dispatch_info); + } + fn airCondBr(self: *FuncGen, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op; const cond = try self.resolveInst(pl_op.operand); @@ -6257,36 +6492,123 @@ pub const FuncGen = struct { return fg.wip.extractValue(err_union, &.{offset}, ""); } - fn airSwitchBr(self: *FuncGen, inst: Air.Inst.Index) !void { + fn airSwitchBr(self: *FuncGen, inst: Air.Inst.Index, is_dispatch_loop: bool) !void { const o = self.ng.object; + const zcu = o.pt.zcu; const switch_br = self.air.unwrapSwitch(inst); - const cond = try self.resolveInst(switch_br.operand); + // For `loop_switch_br`, we need these BBs prepared ahead of time to generate dispatches. + // For `switch_br`, they allow us to sometimes generate better IR by sharing a BB between + // scalar and range cases in the same prong. + // +1 for `else` case. This is not the same as the LLVM `else` prong, as that may first contain + // conditionals to handle ranges. + const case_blocks = try self.gpa.alloc(Builder.Function.Block.Index, switch_br.cases_len + 1); + defer self.gpa.free(case_blocks); + // We set incoming as 0 for now, and increment it as we construct dispatches. + for (case_blocks[0 .. case_blocks.len - 1]) |*b| b.* = try self.wip.block(0, "Case"); + case_blocks[case_blocks.len - 1] = try self.wip.block(0, "Default"); + + // There's a special case here to manually generate a jump table in some cases. + // + // Labeled switch in Zig is intended to follow the "direct threading" pattern. We would ideally use a jump + // table, and each `continue` has its own indirect `jmp`, to allow the branch predictor to more accurately + // use data patterns to predict future dispatches. The problem, however, is that LLVM emits fascinatingly + // bad asm for this. Not only does it not share the jump table -- which we really need it to do to prevent + // destroying the cache -- but it also actually generates slightly different jump tables for each case, + // and *a separate conditional branch beforehand* to handle dispatching back to the case we're currently + // within(!!). + // + // This asm is really, really, not what we want. As such, we will construct the jump table manually where + // appropriate (the values are dense and relatively few), and use it when lowering dispatches. + + const jmp_table: ?SwitchDispatchInfo.JmpTable = jmp_table: { + if (!is_dispatch_loop) break :jmp_table null; + // On a 64-bit target, 1024 pointers in our jump table is about 8K of pointers. This seems just + // about acceptable - it won't fill L1d cache on most CPUs. + const max_table_len = 1024; - // This is not necessarily the actual `else` prong; it first contains conditionals - // for any range cases. It's just the `else` of the LLVM switch. - const llvm_else_block = try self.wip.block(1, "Default"); + const cond_ty = self.typeOf(switch_br.operand); + switch (cond_ty.zigTypeTag(zcu)) { + .bool, .pointer => break :jmp_table null, + .@"enum", .int, .error_set => {}, + else => unreachable, + } - const case_blocks = try self.gpa.alloc(Builder.Function.Block.Index, switch_br.cases_len); - defer self.gpa.free(case_blocks); - // We set incoming as 0 for now, and increment it as we construct the switch. - for (case_blocks) |*b| b.* = try self.wip.block(0, "Case"); + if (cond_ty.intInfo(zcu).signedness == .signed) break :jmp_table null; - const llvm_usize = try o.lowerType(Type.usize); - const cond_int = if (cond.typeOfWip(&self.wip).isPointer(&o.builder)) - try self.wip.cast(.ptrtoint, cond, llvm_usize, "") - else - cond; + // Don't worry about the size of the type -- it's irrelevant, because the prong values could be fairly dense. + // If they are, then we will construct a jump table. + const min, const max = self.switchCaseItemRange(switch_br); + const min_int = min.getUnsignedInt(zcu) orelse break :jmp_table null; + const max_int = max.getUnsignedInt(zcu) orelse break :jmp_table null; + const table_len = max_int - min_int + 1; + if (table_len > max_table_len) break :jmp_table null; + + const table_elems = try self.gpa.alloc(Builder.Constant, @intCast(table_len)); + defer self.gpa.free(table_elems); - const llvm_cases_len = llvm_cases_len: { - var len: u32 = 0; + // Set them all to the `else` branch, then iterate over the AIR switch + // and replace all values which correspond to other prongs. + @memset(table_elems, try o.builder.blockAddrConst( + self.wip.function, + case_blocks[case_blocks.len - 1], + )); + var item_count: u32 = 0; var it = switch_br.iterateCases(); - while (it.next()) |case| len += @intCast(case.items.len); - break :llvm_cases_len len; + while (it.next()) |case| { + const case_block = case_blocks[case.idx]; + const case_block_addr = try o.builder.blockAddrConst( + self.wip.function, + case_block, + ); + for (case.items) |item| { + const val = Value.fromInterned(item.toInterned().?); + const table_idx = val.toUnsignedInt(zcu) - min_int; + table_elems[@intCast(table_idx)] = case_block_addr; + item_count += 1; + } + for (case.ranges) |range| { + const low = Value.fromInterned(range[0].toInterned().?); + const high = Value.fromInterned(range[1].toInterned().?); + const low_idx = low.toUnsignedInt(zcu) - min_int; + const high_idx = high.toUnsignedInt(zcu) - min_int; + @memset(table_elems[@intCast(low_idx)..@intCast(high_idx + 1)], case_block_addr); + item_count += @intCast(high_idx + 1 - low_idx); + } + } + + const table_llvm_ty = try o.builder.arrayType(table_elems.len, .ptr); + const table_val = try o.builder.arrayConst(table_llvm_ty, table_elems); + + const table_variable = try o.builder.addVariable( + try o.builder.strtabStringFmt("__jmptab_{d}", .{@intFromEnum(inst)}), + table_llvm_ty, + .default, + ); + try table_variable.setInitializer(table_val, &o.builder); + table_variable.setLinkage(.internal, &o.builder); + table_variable.setUnnamedAddr(.unnamed_addr, &o.builder); + + const table_includes_else = item_count != table_len; + + break :jmp_table .{ + .min = try o.lowerValue(min.toIntern()), + .max = try o.lowerValue(max.toIntern()), + .in_bounds_hint = if (table_includes_else) .none else switch (switch_br.getElseHint()) { + .none, .cold => .none, + .unpredictable => .unpredictable, + .likely => .likely, + .unlikely => .unlikely, + }, + .table = table_variable.toConst(&o.builder), + .table_includes_else = table_includes_else, + }; }; const weights: Builder.Function.Instruction.BrCond.Weights = weights: { + if (jmp_table != null) break :weights .none; // not used + // First pass. If any weights are `.unpredictable`, unpredictable. // If all are `.none` or `.cold`, none. var any_likely = false; @@ -6304,6 +6626,13 @@ pub const FuncGen = struct { } if (!any_likely) break :weights .none; + const llvm_cases_len = llvm_cases_len: { + var len: u32 = 0; + var it = switch_br.iterateCases(); + while (it.next()) |case| len += @intCast(case.items.len); + break :llvm_cases_len len; + }; + var weights = try self.gpa.alloc(Builder.Metadata, llvm_cases_len + 1); defer self.gpa.free(weights); @@ -6336,75 +6665,66 @@ pub const FuncGen = struct { break :weights @enumFromInt(@intFromEnum(tuple)); }; - var wip_switch = try self.wip.@"switch"(cond_int, llvm_else_block, llvm_cases_len, weights); - defer wip_switch.finish(&self.wip); + const dispatch_info: SwitchDispatchInfo = .{ + .case_blocks = case_blocks, + .switch_weights = weights, + .jmp_table = jmp_table, + }; + + if (is_dispatch_loop) { + try self.switch_dispatch_info.putNoClobber(self.gpa, inst, dispatch_info); + } + defer if (is_dispatch_loop) { + assert(self.switch_dispatch_info.remove(inst)); + }; + + // Generate the initial dispatch. + // If this is a simple `switch_br`, this is the only dispatch. + try self.lowerSwitchDispatch(inst, switch_br.operand, dispatch_info); + // Iterate the cases and generate their bodies. var it = switch_br.iterateCases(); - var any_ranges = false; while (it.next()) |case| { - if (case.ranges.len > 0) any_ranges = true; const case_block = case_blocks[case.idx]; - case_block.ptr(&self.wip).incoming += @intCast(case.items.len); - // Handle scalar items, and generate the block. - // We'll generate conditionals for the ranges later on. - for (case.items) |item| { - const llvm_item = (try self.resolveInst(item)).toConst().?; - const llvm_int_item = if (llvm_item.typeOf(&o.builder).isPointer(&o.builder)) - try o.builder.castConst(.ptrtoint, llvm_item, llvm_usize) - else - llvm_item; - try wip_switch.addCase(llvm_int_item, case_block, &self.wip); - } self.wip.cursor = .{ .block = case_block }; if (switch_br.getHint(case.idx) == .cold) _ = try self.wip.callIntrinsicAssumeCold(); - try self.genBodyDebugScope(null, case.body, .poi); + try self.genBodyDebugScope(null, case.body, .none); } - + self.wip.cursor = .{ .block = case_blocks[case_blocks.len - 1] }; const else_body = it.elseBody(); - self.wip.cursor = .{ .block = llvm_else_block }; - if (any_ranges) { - const cond_ty = self.typeOf(switch_br.operand); - // Add conditionals for the ranges, directing to the relevant bb. - // We don't need to consider `cold` branch hints since that information is stored - // in the target bb body, but we do care about likely/unlikely/unpredictable. - it = switch_br.iterateCases(); - while (it.next()) |case| { - if (case.ranges.len == 0) continue; - const case_block = case_blocks[case.idx]; - const hint = switch_br.getHint(case.idx); - case_block.ptr(&self.wip).incoming += 1; - const next_else_block = try self.wip.block(1, "Default"); - var range_cond: ?Builder.Value = null; - for (case.ranges) |range| { - const llvm_min = try self.resolveInst(range[0]); - const llvm_max = try self.resolveInst(range[1]); - const cond_part = try self.wip.bin( - .@"and", - try self.cmp(.normal, .gte, cond_ty, cond, llvm_min), - try self.cmp(.normal, .lte, cond_ty, cond, llvm_max), - "", - ); - if (range_cond) |prev| { - range_cond = try self.wip.bin(.@"or", prev, cond_part, ""); - } else range_cond = cond_part; - } - _ = try self.wip.brCond(range_cond.?, case_block, next_else_block, switch (hint) { - .none, .cold => .none, - .unpredictable => .unpredictable, - .likely => .then_likely, - .unlikely => .else_likely, - }); - self.wip.cursor = .{ .block = next_else_block }; - } - } if (switch_br.getElseHint() == .cold) _ = try self.wip.callIntrinsicAssumeCold(); - if (else_body.len != 0) { - try self.genBodyDebugScope(null, else_body, .poi); + if (else_body.len > 0) { + try self.genBodyDebugScope(null, it.elseBody(), .none); } else { _ = try self.wip.@"unreachable"(); } + } - // No need to reset the insert cursor since this instruction is noreturn. + fn switchCaseItemRange(self: *FuncGen, switch_br: Air.UnwrappedSwitch) [2]Value { + const zcu = self.ng.object.pt.zcu; + var it = switch_br.iterateCases(); + var min: ?Value = null; + var max: ?Value = null; + while (it.next()) |case| { + for (case.items) |item| { + const val = Value.fromInterned(item.toInterned().?); + const low = if (min) |m| val.compareHetero(.lt, m, zcu) else true; + const high = if (max) |m| val.compareHetero(.gt, m, zcu) else true; + if (low) min = val; + if (high) max = val; + } + for (case.ranges) |range| { + const vals: [2]Value = .{ + Value.fromInterned(range[0].toInterned().?), + Value.fromInterned(range[1].toInterned().?), + }; + const low = if (min) |m| vals[0].compareHetero(.lt, m, zcu) else true; + const high = if (max) |m| vals[1].compareHetero(.gt, m, zcu) else true; + if (low) min = vals[0]; + if (high) max = vals[1]; + } + } + return .{ min.?, max.? }; } fn airLoop(self: *FuncGen, inst: Air.Inst.Index) !void { diff --git a/src/print_air.zig b/src/print_air.zig index c9fde5282213..7ff727ff0cd7 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -296,11 +296,12 @@ const Writer = struct { .aggregate_init => try w.writeAggregateInit(s, inst), .union_init => try w.writeUnionInit(s, inst), .br => try w.writeBr(s, inst), + .switch_dispatch => try w.writeBr(s, inst), .repeat => try w.writeRepeat(s, inst), .cond_br => try w.writeCondBr(s, inst), .@"try", .try_cold => try w.writeTry(s, inst), .try_ptr, .try_ptr_cold => try w.writeTryPtr(s, inst), - .switch_br => try w.writeSwitchBr(s, inst), + .loop_switch_br, .switch_br => try w.writeSwitchBr(s, inst), .cmpxchg_weak, .cmpxchg_strong => try w.writeCmpxchg(s, inst), .fence => try w.writeFence(s, inst), .atomic_load => try w.writeAtomicLoad(s, inst), diff --git a/src/print_zir.zig b/src/print_zir.zig index 8d70af5f3cd1..8ae63df650e1 100644 --- a/src/print_zir.zig +++ b/src/print_zir.zig @@ -302,6 +302,7 @@ const Writer = struct { .@"break", .break_inline, + .switch_continue, => try self.writeBreak(stream, inst), .slice_start => try self.writeSliceStart(stream, inst), diff --git a/test/behavior.zig b/test/behavior.zig index 430c34bcc6d5..650be5a91afe 100644 --- a/test/behavior.zig +++ b/test/behavior.zig @@ -88,6 +88,7 @@ test { _ = @import("behavior/struct_contains_null_ptr_itself.zig"); _ = @import("behavior/struct_contains_slice_of_itself.zig"); _ = @import("behavior/switch.zig"); + _ = @import("behavior/switch_loop.zig"); _ = @import("behavior/switch_prong_err_enum.zig"); _ = @import("behavior/switch_prong_implicit_cast.zig"); _ = @import("behavior/switch_on_captured_error.zig"); diff --git a/test/behavior/switch_loop.zig b/test/behavior/switch_loop.zig new file mode 100644 index 000000000000..b88bdfe74f49 --- /dev/null +++ b/test/behavior/switch_loop.zig @@ -0,0 +1,205 @@ +const builtin = @import("builtin"); +const std = @import("std"); +const expect = std.testing.expect; + +test "simple switch loop" { + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO + + const S = struct { + fn doTheTest() !void { + var start: u32 = undefined; + start = 32; + const result: u32 = s: switch (start) { + 0 => 0, + 1 => 1, + 2 => 2, + 3 => 3, + else => |x| continue :s x / 2, + }; + try expect(result == 2); + } + }; + try S.doTheTest(); + try comptime S.doTheTest(); +} + +test "switch loop with ranges" { + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO + + const S = struct { + fn doTheTest() !void { + var start: u32 = undefined; + start = 32; + const result = s: switch (start) { + 0...3 => |x| x, + else => |x| continue :s x / 2, + }; + try expect(result == 2); + } + }; + try S.doTheTest(); + try comptime S.doTheTest(); +} + +test "switch loop on enum" { + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO + + const S = struct { + const E = enum { a, b, c }; + + fn doTheTest() !void { + var start: E = undefined; + start = .a; + const result: u32 = s: switch (start) { + .a => continue :s .b, + .b => continue :s .c, + .c => 123, + }; + try expect(result == 123); + } + }; + try S.doTheTest(); + try comptime S.doTheTest(); +} + +test "switch loop on tagged union" { + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO + + const S = struct { + const U = union(enum) { + a: u32, + b: f32, + c: f32, + }; + + fn doTheTest() !void { + var start: U = undefined; + start = .{ .a = 80 }; + const result = s: switch (start) { + .a => |x| switch (x) { + 0...49 => continue :s .{ .b = @floatFromInt(x) }, + 50 => continue :s .{ .c = @floatFromInt(x) }, + else => continue :s .{ .a = x / 2 }, + }, + .b => |x| x, + .c => return error.TestFailed, + }; + try expect(result == 40.0); + } + }; + try S.doTheTest(); + try comptime S.doTheTest(); +} + +test "switch loop dispatching instructions" { + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO + + const S = struct { + const Inst = union(enum) { + set: u32, + add: u32, + sub: u32, + end, + }; + + fn doTheTest() !void { + var insts: [5]Inst = undefined; + @memcpy(&insts, &[5]Inst{ + .{ .set = 123 }, + .{ .add = 100 }, + .{ .sub = 50 }, + .{ .sub = 10 }, + .end, + }); + var i: u32 = 0; + var cur: u32 = undefined; + eval: switch (insts[0]) { + .set => |x| { + cur = x; + i += 1; + continue :eval insts[i]; + }, + .add => |x| { + cur += x; + i += 1; + continue :eval insts[i]; + }, + .sub => |x| { + cur -= x; + i += 1; + continue :eval insts[i]; + }, + .end => {}, + } + try expect(cur == 163); + } + }; + try S.doTheTest(); + try comptime S.doTheTest(); +} + +test "switch loop with pointer capture" { + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO + + const S = struct { + const U = union(enum) { + a: u32, + b: u32, + c: u32, + }; + + fn doTheTest() !void { + var a: U = .{ .a = 100 }; + var b: U = .{ .b = 200 }; + var c: U = .{ .c = 300 }; + inc: switch (a) { + .a => |*x| { + x.* += 1; + continue :inc b; + }, + .b => |*x| { + x.* += 10; + continue :inc c; + }, + .c => |*x| { + x.* += 50; + }, + } + try expect(a.a == 101); + try expect(b.b == 210); + try expect(c.c == 350); + } + }; + try S.doTheTest(); + try comptime S.doTheTest(); +} From 0cc8435a830d9d3850add163be4f12e5bd4f2f5c Mon Sep 17 00:00:00 2001 From: mlugg Date: Mon, 29 Apr 2024 02:25:54 +0100 Subject: [PATCH 012/202] std.zig: resolve syntactic ambiguity The parse of `fn foo(a: switch (...) { ... })` was previously handled incorrectly; `a` was treated as both the parameter name and a label. The same issue exists for `for` and `while` expressions -- they should be fixed too, and the grammar amended appropriately. This commit does not do this: it only aims to avoid introducing regressions from labeled switch syntax. --- lib/std/zig/Ast.zig | 20 +++++++++++++++----- lib/std/zig/Parse.zig | 18 +++++++++--------- 2 files changed, 24 insertions(+), 14 deletions(-) diff --git a/lib/std/zig/Ast.zig b/lib/std/zig/Ast.zig index b6f4ad68ee01..d9080175c344 100644 --- a/lib/std/zig/Ast.zig +++ b/lib/std/zig/Ast.zig @@ -1890,11 +1890,20 @@ pub fn taggedUnionEnumTag(tree: Ast, node: Node.Index) full.ContainerDecl { pub fn switchFull(tree: Ast, node: Node.Index) full.Switch { const data = &tree.nodes.items(.data)[node]; - return tree.fullSwitchComponents(.{ - .switch_token = tree.nodes.items(.main_token)[node], - .condition = data.lhs, - .sub_range = data.rhs, - }); + const main_token = tree.nodes.items(.main_token)[node]; + const switch_token: TokenIndex, const label_token: ?TokenIndex = switch (tree.tokens.items(.tag)[main_token]) { + .identifier => .{ main_token + 2, main_token }, + .keyword_switch => .{ main_token, null }, + else => unreachable, + }; + return .{ + .ast = .{ + .switch_token = switch_token, + .condition = data.lhs, + .sub_range = data.rhs, + }, + .label_token = label_token, + }; } pub fn switchCaseOne(tree: Ast, node: Node.Index) full.SwitchCase { @@ -3278,6 +3287,7 @@ pub const Node = struct { /// main_token is the `(`. async_call_comma, /// `switch(lhs) {}`. `SubRange[rhs]`. + /// `main_token` is the identifier of a preceding label, if any; otherwise `switch`. @"switch", /// Same as switch except there is known to be a trailing comma /// before the final rbrace diff --git a/lib/std/zig/Parse.zig b/lib/std/zig/Parse.zig index 20e69845cb3c..677911d14ba9 100644 --- a/lib/std/zig/Parse.zig +++ b/lib/std/zig/Parse.zig @@ -1245,7 +1245,7 @@ fn parseLabeledStatement(p: *Parse) !Node.Index { const loop_stmt = try p.parseLoopStatement(); if (loop_stmt != 0) return loop_stmt; - const switch_expr = try p.parseSwitchExpr(); + const switch_expr = try p.parseSwitchExpr(label_token != 0); if (switch_expr != 0) return switch_expr; if (label_token != 0) { @@ -2699,7 +2699,7 @@ fn parsePrimaryTypeExpr(p: *Parse) !Node.Index { .builtin => return p.parseBuiltinCall(), .keyword_fn => return p.parseFnProto(), .keyword_if => return p.parseIf(expectTypeExpr), - .keyword_switch => return p.expectSwitchExpr(), + .keyword_switch => return p.expectSwitchExpr(false), .keyword_extern, .keyword_packed, @@ -2756,7 +2756,7 @@ fn parsePrimaryTypeExpr(p: *Parse) !Node.Index { }, .keyword_switch => { p.tok_i += 2; - return p.expectSwitchExpr(); + return p.expectSwitchExpr(true); }, .l_brace => { p.tok_i += 2; @@ -3034,17 +3034,17 @@ fn parseWhileTypeExpr(p: *Parse) !Node.Index { } /// SwitchExpr <- KEYWORD_switch LPAREN Expr RPAREN LBRACE SwitchProngList RBRACE -fn parseSwitchExpr(p: *Parse) !Node.Index { +fn parseSwitchExpr(p: *Parse, is_labeled: bool) !Node.Index { const switch_token = p.eatToken(.keyword_switch) orelse return null_node; - return p.expectSwitchSuffix(switch_token); + return p.expectSwitchSuffix(if (is_labeled) switch_token - 2 else switch_token); } -fn expectSwitchExpr(p: *Parse) !Node.Index { +fn expectSwitchExpr(p: *Parse, is_labeled: bool) !Node.Index { const switch_token = p.assertToken(.keyword_switch); - return p.expectSwitchSuffix(switch_token); + return p.expectSwitchSuffix(if (is_labeled) switch_token - 2 else switch_token); } -fn expectSwitchSuffix(p: *Parse, switch_token: TokenIndex) !Node.Index { +fn expectSwitchSuffix(p: *Parse, main_token: TokenIndex) !Node.Index { _ = try p.expectToken(.l_paren); const expr_node = try p.expectExpr(); _ = try p.expectToken(.r_paren); @@ -3055,7 +3055,7 @@ fn expectSwitchSuffix(p: *Parse, switch_token: TokenIndex) !Node.Index { return p.addNode(.{ .tag = if (trailing_comma) .switch_comma else .@"switch", - .main_token = switch_token, + .main_token = main_token, .data = .{ .lhs = expr_node, .rhs = try p.addExtra(Node.SubRange{ From 3b52e5a2217baca92f0328c0f9134e982bf15698 Mon Sep 17 00:00:00 2001 From: mlugg Date: Tue, 30 Apr 2024 19:20:03 +0100 Subject: [PATCH 013/202] std.zig.render: fix switch rendering --- lib/std/zig/Ast.zig | 5 ++-- lib/std/zig/AstGen.zig | 5 ++-- lib/std/zig/render.zig | 57 ++++++++++++++++++------------------------ 3 files changed, 29 insertions(+), 38 deletions(-) diff --git a/lib/std/zig/Ast.zig b/lib/std/zig/Ast.zig index d9080175c344..e6f456290da8 100644 --- a/lib/std/zig/Ast.zig +++ b/lib/std/zig/Ast.zig @@ -1896,11 +1896,12 @@ pub fn switchFull(tree: Ast, node: Node.Index) full.Switch { .keyword_switch => .{ main_token, null }, else => unreachable, }; + const extra = tree.extraData(data.rhs, Ast.Node.SubRange); return .{ .ast = .{ .switch_token = switch_token, .condition = data.lhs, - .sub_range = data.rhs, + .cases = tree.extra_data[extra.start..extra.end], }, .label_token = label_token, }; @@ -2869,7 +2870,7 @@ pub const full = struct { pub const Components = struct { switch_token: TokenIndex, condition: Node.Index, - sub_range: Node.Index, + cases: []const Node.Index, }; }; diff --git a/lib/std/zig/AstGen.zig b/lib/std/zig/AstGen.zig index 92da810b308e..807625b67175 100644 --- a/lib/std/zig/AstGen.zig +++ b/lib/std/zig/AstGen.zig @@ -7598,9 +7598,8 @@ fn switchExpr( const node_tags = tree.nodes.items(.tag); const main_tokens = tree.nodes.items(.main_token); const token_tags = tree.tokens.items(.tag); - const operand_node = node_datas[node].lhs; - const extra = tree.extraData(node_datas[node].rhs, Ast.Node.SubRange); - const case_nodes = tree.extra_data[extra.start..extra.end]; + const operand_node = switch_full.ast.condition; + const case_nodes = switch_full.ast.cases; const need_rl = astgen.nodes_need_rl.contains(node); const block_ri: ResultInfo = if (need_rl) ri else .{ diff --git a/lib/std/zig/render.zig b/lib/std/zig/render.zig index 177bd948d81f..e5950429ee63 100644 --- a/lib/std/zig/render.zig +++ b/lib/std/zig/render.zig @@ -693,39 +693,27 @@ fn renderExpression(r: *Render, node: Ast.Node.Index, space: Space) Error!void { return renderToken(r, datas[node].rhs, space); }, - .@"break" => { + .@"break", .@"continue" => { const main_token = main_tokens[node]; const label_token = datas[node].lhs; const target = datas[node].rhs; if (label_token == 0 and target == 0) { - try renderToken(r, main_token, space); // break keyword + try renderToken(r, main_token, space); // break/continue } else if (label_token == 0 and target != 0) { - try renderToken(r, main_token, .space); // break keyword + try renderToken(r, main_token, .space); // break/continue try renderExpression(r, target, space); } else if (label_token != 0 and target == 0) { - try renderToken(r, main_token, .space); // break keyword - try renderToken(r, label_token - 1, .none); // colon + try renderToken(r, main_token, .space); // break/continue + try renderToken(r, label_token - 1, .none); // : try renderIdentifier(r, label_token, space, .eagerly_unquote); // identifier } else if (label_token != 0 and target != 0) { - try renderToken(r, main_token, .space); // break keyword - try renderToken(r, label_token - 1, .none); // colon + try renderToken(r, main_token, .space); // break/continue + try renderToken(r, label_token - 1, .none); // : try renderIdentifier(r, label_token, .space, .eagerly_unquote); // identifier try renderExpression(r, target, space); } }, - .@"continue" => { - const main_token = main_tokens[node]; - const label = datas[node].lhs; - if (label != 0) { - try renderToken(r, main_token, .space); // continue - try renderToken(r, label - 1, .none); // : - return renderIdentifier(r, label, space, .eagerly_unquote); // label - } else { - return renderToken(r, main_token, space); // continue - } - }, - .@"return" => { if (datas[node].lhs != 0) { try renderToken(r, main_tokens[node], .space); @@ -845,26 +833,29 @@ fn renderExpression(r: *Render, node: Ast.Node.Index, space: Space) Error!void { .@"switch", .switch_comma, => { - const switch_token = main_tokens[node]; - const condition = datas[node].lhs; - const extra = tree.extraData(datas[node].rhs, Ast.Node.SubRange); - const cases = tree.extra_data[extra.start..extra.end]; - const rparen = tree.lastToken(condition) + 1; + const full = tree.switchFull(node); - try renderToken(r, switch_token, .space); // switch keyword - try renderToken(r, switch_token + 1, .none); // lparen - try renderExpression(r, condition, .none); // condition expression - try renderToken(r, rparen, .space); // rparen + if (full.label_token) |label_token| { + try renderIdentifier(r, label_token, .none, .eagerly_unquote); // label + try renderToken(r, label_token + 1, .space); // : + } + + const rparen = tree.lastToken(full.ast.condition) + 1; + + try renderToken(r, full.ast.switch_token, .space); // switch + try renderToken(r, full.ast.switch_token + 1, .none); // ( + try renderExpression(r, full.ast.condition, .none); // condition expression + try renderToken(r, rparen, .space); // ) ais.pushIndentNextLine(); - if (cases.len == 0) { - try renderToken(r, rparen + 1, .none); // lbrace + if (full.ast.cases.len == 0) { + try renderToken(r, rparen + 1, .none); // { } else { - try renderToken(r, rparen + 1, .newline); // lbrace - try renderExpressions(r, cases, .comma); + try renderToken(r, rparen + 1, .newline); // { + try renderExpressions(r, full.ast.cases, .comma); } ais.popIndent(); - return renderToken(r, tree.lastToken(node), space); // rbrace + return renderToken(r, tree.lastToken(node), space); // } }, .switch_case_one, From cb68c0917ab6ef858a7a9a3ed9e85672304f7ab2 Mon Sep 17 00:00:00 2001 From: mlugg Date: Mon, 29 Apr 2024 23:41:08 +0100 Subject: [PATCH 014/202] wasm: un-regress `loop` and `switch_br` `.loop` is also a block, so the block_depth must be stored *after* block creation, ensuring a correct block_depth to jump back to when receiving `.repeat`. This also un-regresses `switch_br` which now correctly handles ranges within cases. It supports it for both jump tables as well as regular conditional branches. --- src/arch/wasm/CodeGen.zig | 111 ++++++++++++++++++++++++-------------- 1 file changed, 72 insertions(+), 39 deletions(-) diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 01edf87f3eb3..d78b4ae80e26 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -662,6 +662,8 @@ blocks: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, struct { label: u32, value: WValue, }) = .{}, +/// Maps `loop` instructions to their label. `br` to here repeats the loop. +loops: std.AutoHashMapUnmanaged(Air.Inst.Index, u32) = .{}, /// `bytes` contains the wasm bytecode belonging to the 'code' section. code: *ArrayList(u8), /// The index the next local generated will have @@ -751,6 +753,7 @@ pub fn deinit(func: *CodeGen) void { } func.branches.deinit(func.gpa); func.blocks.deinit(func.gpa); + func.loops.deinit(func.gpa); func.locals.deinit(func.gpa); func.simd_immediates.deinit(func.gpa); func.mir_instructions.deinit(func.gpa); @@ -1903,7 +1906,7 @@ fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { .trap => func.airTrap(inst), .breakpoint => func.airBreakpoint(inst), .br => func.airBr(inst), - .repeat => return func.fail("TODO implement `repeat`", .{}), + .repeat => func.airRepeat(inst), .switch_dispatch => return func.fail("TODO implement `switch_dispatch`", .{}), .int_from_bool => func.airIntFromBool(inst), .cond_br => func.airCondBr(inst), @@ -3537,10 +3540,11 @@ fn airLoop(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { // result type of loop is always 'noreturn', meaning we can always // emit the wasm type 'block_empty'. try func.startBlock(.loop, wasm.block_empty); - try func.genBody(body); - // breaking to the index of a loop block will continue the loop instead - try func.addLabel(.br, 0); + try func.loops.putNoClobber(func.gpa, inst, func.block_depth); + defer assert(func.loops.remove(inst)); + + try func.genBody(body); try func.endBlock(); return func.finishAir(inst, .none, &.{}); @@ -3737,6 +3741,16 @@ fn airBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { return func.finishAir(inst, .none, &.{br.operand}); } +fn airRepeat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { + const repeat = func.air.instructions.items(.data)[@intFromEnum(inst)].repeat; + const loop_label = func.loops.get(repeat.loop_inst).?; + + const idx: u32 = func.block_depth - loop_label; + try func.addLabel(.br, idx); + + return func.finishAir(inst, .none, &.{}); +} + fn airNot(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; @@ -4053,7 +4067,10 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { defer func.gpa.free(liveness.deaths); // a list that maps each value with its value and body based on the order inside the list. - const CaseValue = struct { integer: i32, value: Value }; + const CaseValue = union(enum) { + singular: struct { integer: i32, value: Value }, + range: struct { min: i32, min_value: Value, max: i32, max_value: Value }, + }; var case_list = try std.ArrayList(struct { values: []const CaseValue, body: []const Air.Inst.Index, @@ -4064,12 +4081,9 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { var lowest_maybe: ?i32 = null; var highest_maybe: ?i32 = null; - var it = switch_br.iterateCases(); while (it.next()) |case| { - if (case.ranges.len > 0) return func.fail("TODO: switch with ranges", .{}); - - const values = try func.gpa.alloc(CaseValue, case.items.len); + const values = try func.gpa.alloc(CaseValue, case.items.len + case.ranges.len); errdefer func.gpa.free(values); for (case.items, 0..) |ref, i| { @@ -4081,7 +4095,30 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { if (highest_maybe == null or int_val > highest_maybe.?) { highest_maybe = int_val; } - values[i] = .{ .integer = int_val, .value = item_val }; + values[i] = .{ .singular = .{ .integer = int_val, .value = item_val } }; + } + + for (case.ranges, 0..) |range, i| { + const min_val = (try func.air.value(range[0], pt)).?; + const int_min_val = func.valueAsI32(min_val); + + if (lowest_maybe == null or int_min_val < lowest_maybe.?) { + lowest_maybe = int_min_val; + } + + const max_val = (try func.air.value(range[1], pt)).?; + const int_max_val = func.valueAsI32(max_val); + + if (highest_maybe == null or int_max_val > highest_maybe.?) { + highest_maybe = int_max_val; + } + + values[i + case.items.len] = .{ .range = .{ + .min = int_min_val, + .min_value = min_val, + .max = int_max_val, + .max_value = max_val, + } }; } case_list.appendAssumeCapacity(.{ .values = values, .body = case.body }); @@ -4134,7 +4171,12 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { const idx = blk: { for (case_list.items, 0..) |case, idx| { for (case.values) |case_value| { - if (case_value.integer == value) break :blk @as(u32, @intCast(idx)); + switch (case_value) { + .singular => |val| if (val.integer == value) break :blk @as(u32, @intCast(idx)), + .range => |range_val| if (value >= range_val.min and value <= range_val.max) { + break :blk @as(u32, @intCast(idx)); + }, + } } } // error sets are almost always sparse so we use the default case @@ -4150,43 +4192,34 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { try func.endBlock(); } - const signedness: std.builtin.Signedness = blk: { - // by default we tell the operand type is unsigned (i.e. bools and enum values) - if (target_ty.zigTypeTag(zcu) != .int) break :blk .unsigned; - - // incase of an actual integer, we emit the correct signedness - break :blk target_ty.intInfo(zcu).signedness; - }; - try func.branches.ensureUnusedCapacity(func.gpa, case_list.items.len + @intFromBool(has_else_body)); for (case_list.items, 0..) |case, index| { // when sparse, we use if/else-chain, so emit conditional checks if (is_sparse) { - // for single value prong we can emit a simple if - if (case.values.len == 1) { - try func.emitWValue(target); - const val = try func.lowerConstant(case.values[0].value, target_ty); - try func.emitWValue(val); - const opcode = buildOpcode(.{ - .valtype1 = typeToValtype(target_ty, pt, func.target.*), - .op = .ne, // not equal, because we want to jump out of this block if it does not match the condition. - .signedness = signedness, - }); - try func.addTag(Mir.Inst.Tag.fromOpcode(opcode)); + // for single value prong we can emit a simple condition + if (case.values.len == 1 and case.values[0] == .singular) { + const val = try func.lowerConstant(case.values[0].singular.value, target_ty); + // not equal, because we want to jump out of this block if it does not match the condition. + _ = try func.cmp(target, val, target_ty, .neq); try func.addLabel(.br_if, 0); } else { // in multi-value prongs we must check if any prongs match the target value. try func.startBlock(.block, blocktype); for (case.values) |value| { - try func.emitWValue(target); - const val = try func.lowerConstant(value.value, target_ty); - try func.emitWValue(val); - const opcode = buildOpcode(.{ - .valtype1 = typeToValtype(target_ty, pt, func.target.*), - .op = .eq, - .signedness = signedness, - }); - try func.addTag(Mir.Inst.Tag.fromOpcode(opcode)); + switch (value) { + .singular => |single_val| { + const val = try func.lowerConstant(single_val.value, target_ty); + _ = try func.cmp(target, val, target_ty, .eq); + }, + .range => |range| { + const min_val = try func.lowerConstant(range.min_value, target_ty); + const max_val = try func.lowerConstant(range.max_value, target_ty); + + const gte = try func.cmp(target, min_val, target_ty, .gte); + const lte = try func.cmp(target, max_val, target_ty, .lte); + _ = try func.binOp(gte, lte, Type.bool, .@"and"); + }, + } try func.addLabel(.br_if, 0); } // value did not match any of the prong values From fd70d9db9960a98fb97def91aa34f56c15499ebf Mon Sep 17 00:00:00 2001 From: mlugg Date: Fri, 30 Aug 2024 23:33:57 +0100 Subject: [PATCH 015/202] x86_64: un-regress `loop` and `switch_br` This does *not* yet implement the new `loop_switch_br` instruction. --- src/arch/x86_64/CodeGen.zig | 107 ++++++++++++++++++++++++++++++------ 1 file changed, 91 insertions(+), 16 deletions(-) diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index e27e04782d4f..dcf51c2514f9 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -105,6 +105,13 @@ frame_allocs: std.MultiArrayList(FrameAlloc) = .{}, free_frame_indices: std.AutoArrayHashMapUnmanaged(FrameIndex, void) = .{}, frame_locs: std.MultiArrayList(Mir.FrameLoc) = .{}, +loop_repeat_info: std.AutoHashMapUnmanaged(Air.Inst.Index, struct { + /// The state to restore before branching. + state: State, + /// The branch target. + jmp_target: Mir.Inst.Index, +}) = .{}, + /// Debug field, used to find bugs in the compiler. air_bookkeeping: @TypeOf(air_bookkeeping_init) = air_bookkeeping_init, @@ -815,6 +822,7 @@ pub fn generate( function.frame_allocs.deinit(gpa); function.free_frame_indices.deinit(gpa); function.frame_locs.deinit(gpa); + function.loop_repeat_info.deinit(gpa); var block_it = function.blocks.valueIterator(); while (block_it.next()) |block| block.deinit(gpa); function.blocks.deinit(gpa); @@ -2247,7 +2255,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .bitcast => try self.airBitCast(inst), .block => try self.airBlock(inst), .br => try self.airBr(inst), - .repeat => return self.fail("TODO implement `repeat`", .{}), + .repeat => try self.airRepeat(inst), .switch_dispatch => return self.fail("TODO implement `switch_dispatch`", .{}), .trap => try self.airTrap(), .breakpoint => try self.airBreakpoint(), @@ -13629,15 +13637,13 @@ fn airLoop(self: *Self, inst: Air.Inst.Index) !void { self.scope_generation += 1; const state = try self.saveState(); - const jmp_target: Mir.Inst.Index = @intCast(self.mir_instructions.len); - try self.genBody(body); - try self.restoreState(state, &.{}, .{ - .emit_instructions = true, - .update_tracking = false, - .resurrect = false, - .close_scope = true, + try self.loop_repeat_info.putNoClobber(self.gpa, inst, .{ + .state = state, + .jmp_target = @intCast(self.mir_instructions.len), }); - _ = try self.asmJmpReloc(jmp_target); + defer assert(self.loop_repeat_info.remove(inst)); + + try self.genBody(body); self.finishAirBookkeeping(); } @@ -13680,12 +13686,19 @@ fn lowerBlock(self: *Self, inst: Air.Inst.Index, body: []const Air.Inst.Index) ! } fn airSwitchBr(self: *Self, inst: Air.Inst.Index) !void { + const zcu = self.pt.zcu; const switch_br = self.air.unwrapSwitch(inst); const condition = try self.resolveInst(switch_br.operand); const condition_ty = self.typeOf(switch_br.operand); const liveness = try self.liveness.getSwitchBr(self.gpa, inst, switch_br.cases_len + 1); defer self.gpa.free(liveness.deaths); + const signedness = switch (condition_ty.zigTypeTag(zcu)) { + .bool, .pointer => .unsigned, + .int, .@"enum", .error_set => condition_ty.intInfo(zcu).signedness, + else => unreachable, + }; + // If the condition dies here in this switch instruction, process // that death now instead of later as this has an effect on // whether it needs to be spilled in the branches @@ -13698,13 +13711,11 @@ fn airSwitchBr(self: *Self, inst: Air.Inst.Index) !void { var it = switch_br.iterateCases(); while (it.next()) |case| { - if (case.ranges.len > 0) return self.fail("TODO: switch with ranges", .{}); - - var relocs = try self.gpa.alloc(Mir.Inst.Index, case.items.len); + var relocs = try self.gpa.alloc(Mir.Inst.Index, case.items.len + case.ranges.len); defer self.gpa.free(relocs); try self.spillEflagsIfOccupied(); - for (case.items, relocs, 0..) |item, *reloc, i| { + for (case.items, relocs[0..case.items.len]) |item, *reloc| { const item_mcv = try self.resolveInst(item); const cc: Condition = switch (condition) { .eflags => |cc| switch (item_mcv.immediate) { @@ -13717,12 +13728,62 @@ fn airSwitchBr(self: *Self, inst: Air.Inst.Index) !void { break :cc .e; }, }; - reloc.* = try self.asmJccReloc(if (i < relocs.len - 1) cc else cc.negate(), undefined); + reloc.* = try self.asmJccReloc(cc, undefined); } + for (case.ranges, relocs[case.items.len..]) |range, *reloc| { + const min_mcv = try self.resolveInst(range[0]); + const max_mcv = try self.resolveInst(range[1]); + // `null` means always false. + const lt_min: ?Condition = switch (condition) { + .eflags => |cc| switch (min_mcv.immediate) { + 0 => null, // condition never <0 + 1 => cc.negate(), + else => unreachable, + }, + else => cc: { + try self.genBinOpMir(.{ ._, .cmp }, condition_ty, condition, min_mcv); + break :cc switch (signedness) { + .unsigned => .b, + .signed => .l, + }; + }, + }; + const lt_min_reloc = if (lt_min) |cc| r: { + break :r try self.asmJccReloc(cc, undefined); + } else null; + // `null` means always true. + const lte_max: ?Condition = switch (condition) { + .eflags => |cc| switch (max_mcv.immediate) { + 0 => cc.negate(), + 1 => null, // condition always >=1 + else => unreachable, + }, + else => cc: { + try self.genBinOpMir(.{ ._, .cmp }, condition_ty, condition, max_mcv); + break :cc switch (signedness) { + .unsigned => .be, + .signed => .le, + }; + }, + }; + // "Success" case is in `reloc`.... + if (lte_max) |cc| { + reloc.* = try self.asmJccReloc(cc, undefined); + } else { + reloc.* = try self.asmJmpReloc(undefined); + } + // ...and "fail" case falls through to next checks. + if (lt_min_reloc) |r| self.performReloc(r); + } + + // The jump to skip this case if the conditions all failed. + const skip_case_reloc = try self.asmJmpReloc(undefined); + for (liveness.deaths[case.idx]) |operand| try self.processDeath(operand); - for (relocs[0 .. relocs.len - 1]) |reloc| self.performReloc(reloc); + // Relocate all success cases to the body we're about to generate. + for (relocs) |reloc| self.performReloc(reloc); try self.genBody(case.body); try self.restoreState(state, &.{}, .{ .emit_instructions = false, @@ -13731,7 +13792,8 @@ fn airSwitchBr(self: *Self, inst: Air.Inst.Index) !void { .close_scope = true, }); - self.performReloc(relocs[relocs.len - 1]); + // Relocate the "skip" branch to fall through to the next case. + self.performReloc(skip_case_reloc); } if (switch_br.else_body_len > 0) { @@ -13827,6 +13889,19 @@ fn airBr(self: *Self, inst: Air.Inst.Index) !void { self.finishAirBookkeeping(); } +fn airRepeat(self: *Self, inst: Air.Inst.Index) !void { + const loop_inst = self.air.instructions.items(.data)[@intFromEnum(inst)].repeat.loop_inst; + const repeat_info = self.loop_repeat_info.get(loop_inst).?; + try self.restoreState(repeat_info.state, &.{}, .{ + .emit_instructions = true, + .update_tracking = false, + .resurrect = false, + .close_scope = true, + }); + _ = try self.asmJmpReloc(repeat_info.jmp_target); + self.finishAirBookkeeping(); +} + fn airAsm(self: *Self, inst: Air.Inst.Index) !void { const pt = self.pt; const zcu = pt.zcu; From b7a55cd6c3ca0c4c97f266b72f741b980416456a Mon Sep 17 00:00:00 2001 From: mlugg Date: Sat, 31 Aug 2024 00:33:45 +0100 Subject: [PATCH 016/202] AstGen: allow breaking from labeled switch Also, don't use the special switch lowering for errors if the switch is labeled; this isn't currently supported. Related: #20627. --- lib/std/zig/AstGen.zig | 35 ++++++++++++++--------------------- test/behavior/switch.zig | 24 ++++++++++++++++++++++++ 2 files changed, 38 insertions(+), 21 deletions(-) diff --git a/lib/std/zig/AstGen.zig b/lib/std/zig/AstGen.zig index 807625b67175..b280360535e4 100644 --- a/lib/std/zig/AstGen.zig +++ b/lib/std/zig/AstGen.zig @@ -857,13 +857,10 @@ fn expr(gz: *GenZir, scope: *Scope, ri: ResultInfo, node: Ast.Node.Index) InnerE const if_full = tree.fullIf(node).?; no_switch_on_err: { const error_token = if_full.error_token orelse break :no_switch_on_err; - switch (node_tags[if_full.ast.else_expr]) { - .@"switch", .switch_comma => {}, - else => break :no_switch_on_err, - } - const switch_operand = node_datas[if_full.ast.else_expr].lhs; - if (node_tags[switch_operand] != .identifier) break :no_switch_on_err; - if (!mem.eql(u8, tree.tokenSlice(error_token), tree.tokenSlice(main_tokens[switch_operand]))) break :no_switch_on_err; + const full_switch = tree.fullSwitch(if_full.ast.else_expr) orelse break :no_switch_on_err; + if (full_switch.label_token != null) break :no_switch_on_err; + if (node_tags[full_switch.ast.condition] != .identifier) break :no_switch_on_err; + if (!mem.eql(u8, tree.tokenSlice(error_token), tree.tokenSlice(main_tokens[full_switch.ast.condition]))) break :no_switch_on_err; return switchExprErrUnion(gz, scope, ri.br(), node, .@"if"); } return ifExpr(gz, scope, ri.br(), node, if_full); @@ -1049,13 +1046,10 @@ fn expr(gz: *GenZir, scope: *Scope, ri: ResultInfo, node: Ast.Node.Index) InnerE null; no_switch_on_err: { const capture_token = payload_token orelse break :no_switch_on_err; - switch (node_tags[node_datas[node].rhs]) { - .@"switch", .switch_comma => {}, - else => break :no_switch_on_err, - } - const switch_operand = node_datas[node_datas[node].rhs].lhs; - if (node_tags[switch_operand] != .identifier) break :no_switch_on_err; - if (!mem.eql(u8, tree.tokenSlice(capture_token), tree.tokenSlice(main_tokens[switch_operand]))) break :no_switch_on_err; + const full_switch = tree.fullSwitch(node_datas[node].rhs) orelse break :no_switch_on_err; + if (full_switch.label_token != null) break :no_switch_on_err; + if (node_tags[full_switch.ast.condition] != .identifier) break :no_switch_on_err; + if (!mem.eql(u8, tree.tokenSlice(capture_token), tree.tokenSlice(main_tokens[full_switch.ast.condition]))) break :no_switch_on_err; return switchExprErrUnion(gz, scope, ri.br(), node, .@"catch"); } switch (ri.rl) { @@ -2160,11 +2154,6 @@ fn breakExpr(parent_gz: *GenZir, parent_scope: *Scope, node: Ast.Node.Index) Inn if (break_label != 0) { if (block_gz.label) |*label| { if (try astgen.tokenIdentEql(label.token, break_label)) { - const maybe_switch_tag = astgen.instructions.items(.tag)[@intFromEnum(label.block_inst)]; - switch (maybe_switch_tag) { - .switch_block, .switch_block_ref => return astgen.failNode(node, "cannot break from switch", .{}), - else => {}, - } label.used = true; break :blk label.block_inst; } @@ -2278,6 +2267,7 @@ fn continueExpr(parent_gz: *GenZir, parent_scope: *Scope, node: Ast.Node.Index) } label.used = true; + label.used_for_continue = true; break :blk; } } @@ -7760,7 +7750,7 @@ fn switchExpr( const raw_operand = try expr(parent_gz, scope, operand_ri, operand_node); const item_ri: ResultInfo = .{ .rl = .none }; - // If this switch is labeled, it will have `continue`s targeting it, and thus we need the operand type + // If this switch is labeled, it may have `continue`s targeting it, and thus we need the operand type // to provide a result type. const raw_operand_ty_ref = if (switch_full.label_token != null) t: { break :t try parent_gz.addUnNode(.typeof, raw_operand, operand_node); @@ -7790,7 +7780,9 @@ fn switchExpr( const switch_block = try parent_gz.makeBlockInst(switch_tag, node); if (switch_full.label_token) |label_token| { + block_scope.break_block = switch_block.toOptional(); block_scope.continue_block = switch_block.toOptional(); + // `break_result_info` already set above block_scope.continue_result_info = .{ .rl = if (any_payload_is_ref) .{ .ref_coerced_ty = raw_operand_ty_ref } @@ -8024,7 +8016,7 @@ fn switchExpr( .has_under = special_prong == .under, .any_has_tag_capture = any_has_tag_capture, .any_non_inline_capture = any_non_inline_capture, - .has_continue = switch_full.label_token != null, + .has_continue = switch_full.label_token != null and block_scope.label.?.used_for_continue, .scalar_cases_len = @intCast(scalar_cases_len), }, }); @@ -11963,6 +11955,7 @@ const GenZir = struct { token: Ast.TokenIndex, block_inst: Zir.Inst.Index, used: bool = false, + used_for_continue: bool = false, }; /// Assumes nothing stacked on `gz`. diff --git a/test/behavior/switch.zig b/test/behavior/switch.zig index 1cec0dfad44b..f1ded573a029 100644 --- a/test/behavior/switch.zig +++ b/test/behavior/switch.zig @@ -961,3 +961,27 @@ test "block error return trace index is reset between prongs" { }; try result; } + +test "labeled switch with break" { + var six: u32 = undefined; + six = 6; + + const val = s: switch (six) { + 0...4 => break :s false, + 5 => break :s false, + 6...7 => break :s true, + else => break :s false, + }; + + try expect(val); + + // Make sure the switch is implicitly comptime! + const comptime_val = s: switch (@as(u32, 6)) { + 0...4 => break :s false, + 5 => break :s false, + 6...7 => break :s true, + else => break :s false, + }; + + comptime assert(comptime_val); +} From 2b9af9e825fcbfd55f953e7c3bc80decb52a4b2b Mon Sep 17 00:00:00 2001 From: mlugg Date: Sat, 31 Aug 2024 00:40:59 +0100 Subject: [PATCH 017/202] AstGen: error on unused switch label --- lib/std/zig/AstGen.zig | 5 +++++ test/cases/compile_errors/duplicate-unused_labels.zig | 6 ++++++ 2 files changed, 11 insertions(+) diff --git a/lib/std/zig/AstGen.zig b/lib/std/zig/AstGen.zig index b280360535e4..aa1ea2498f42 100644 --- a/lib/std/zig/AstGen.zig +++ b/lib/std/zig/AstGen.zig @@ -7999,6 +7999,11 @@ fn switchExpr( appendBodyWithFixupsArrayList(astgen, payloads, case_slice); } } + + if (switch_full.label_token) |label_token| if (!block_scope.label.?.used) { + try astgen.appendErrorTok(label_token, "unused switch label", .{}); + }; + // Now that the item expressions are generated we can add this. try parent_gz.instructions.append(gpa, switch_block); diff --git a/test/cases/compile_errors/duplicate-unused_labels.zig b/test/cases/compile_errors/duplicate-unused_labels.zig index 301d273bde42..7b22edef2007 100644 --- a/test/cases/compile_errors/duplicate-unused_labels.zig +++ b/test/cases/compile_errors/duplicate-unused_labels.zig @@ -22,6 +22,11 @@ comptime { comptime { blk: for (@as([0]void, undefined)) |_| {} } +comptime { + blk: switch (true) { + else => {}, + } +} // error // target=native @@ -35,3 +40,4 @@ comptime { // :17:5: error: unused block label // :20:5: error: unused while loop label // :23:5: error: unused for loop label +// :26:5: error: unused switch label From d5b01df3c8f87621eaf32ef4b647a867b54628b2 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sat, 31 Aug 2024 22:18:30 -0400 Subject: [PATCH 018/202] x86_64: implement `loop_switch_br` and `switch_dispatch` --- src/arch/x86_64/CodeGen.zig | 187 ++++++++++++++++++++++++++++------ test/behavior/switch_loop.zig | 6 -- 2 files changed, 157 insertions(+), 36 deletions(-) diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index dcf51c2514f9..35f47ad882a4 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -105,7 +105,7 @@ frame_allocs: std.MultiArrayList(FrameAlloc) = .{}, free_frame_indices: std.AutoArrayHashMapUnmanaged(FrameIndex, void) = .{}, frame_locs: std.MultiArrayList(Mir.FrameLoc) = .{}, -loop_repeat_info: std.AutoHashMapUnmanaged(Air.Inst.Index, struct { +loops: std.AutoHashMapUnmanaged(Air.Inst.Index, struct { /// The state to restore before branching. state: State, /// The branch target. @@ -219,6 +219,38 @@ pub const MCValue = union(enum) { reserved_frame: FrameIndex, air_ref: Air.Inst.Ref, + fn isModifiable(mcv: MCValue) bool { + return switch (mcv) { + .none, + .unreach, + .dead, + .undef, + .immediate, + .register_offset, + .eflags, + .register_overflow, + .lea_symbol, + .lea_direct, + .lea_got, + .lea_tlv, + .lea_frame, + .elementwise_regs_then_frame, + .reserved_frame, + .air_ref, + => false, + .register, + .register_pair, + .memory, + .load_symbol, + .load_got, + .load_direct, + .load_tlv, + .indirect, + => true, + .load_frame => |frame_addr| !frame_addr.index.isNamed(), + }; + } + fn isMemory(mcv: MCValue) bool { return switch (mcv) { .memory, .indirect, .load_frame => true, @@ -822,7 +854,7 @@ pub fn generate( function.frame_allocs.deinit(gpa); function.free_frame_indices.deinit(gpa); function.frame_locs.deinit(gpa); - function.loop_repeat_info.deinit(gpa); + function.loops.deinit(gpa); var block_it = function.blocks.valueIterator(); while (block_it.next()) |block| block.deinit(gpa); function.blocks.deinit(gpa); @@ -2156,18 +2188,20 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { const air_tags = self.air.instructions.items(.tag); self.arg_index = 0; - for (body) |inst| { - wip_mir_log.debug("{}", .{self.fmtAir(inst)}); - verbose_tracking_log.debug("{}", .{self.fmtTracking()}); + for (body) |inst| switch (air_tags[@intFromEnum(inst)]) { + .arg => { + wip_mir_log.debug("{}", .{self.fmtAir(inst)}); + verbose_tracking_log.debug("{}", .{self.fmtTracking()}); - const old_air_bookkeeping = self.air_bookkeeping; - try self.inst_tracking.ensureUnusedCapacity(self.gpa, 1); - switch (air_tags[@intFromEnum(inst)]) { - .arg => try self.airArg(inst), - else => break, - } - self.checkInvariantsAfterAirInst(inst, old_air_bookkeeping); - } + const old_air_bookkeeping = self.air_bookkeeping; + try self.inst_tracking.ensureUnusedCapacity(self.gpa, 1); + + try self.airArg(inst); + + self.checkInvariantsAfterAirInst(inst, old_air_bookkeeping); + }, + else => break, + }; if (self.arg_index == 0) try self.airDbgVarArgs(); self.arg_index = 0; @@ -2256,7 +2290,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .block => try self.airBlock(inst), .br => try self.airBr(inst), .repeat => try self.airRepeat(inst), - .switch_dispatch => return self.fail("TODO implement `switch_dispatch`", .{}), + .switch_dispatch => try self.airSwitchDispatch(inst), .trap => try self.airTrap(), .breakpoint => try self.airBreakpoint(), .ret_addr => try self.airRetAddr(inst), @@ -2345,7 +2379,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .field_parent_ptr => try self.airFieldParentPtr(inst), .switch_br => try self.airSwitchBr(inst), - .loop_switch_br => return self.fail("TODO implement `loop_switch_br`", .{}), + .loop_switch_br => try self.airLoopSwitchBr(inst), .slice_ptr => try self.airSlicePtr(inst), .slice_len => try self.airSliceLen(inst), @@ -13637,14 +13671,13 @@ fn airLoop(self: *Self, inst: Air.Inst.Index) !void { self.scope_generation += 1; const state = try self.saveState(); - try self.loop_repeat_info.putNoClobber(self.gpa, inst, .{ + try self.loops.putNoClobber(self.gpa, inst, .{ .state = state, .jmp_target = @intCast(self.mir_instructions.len), }); - defer assert(self.loop_repeat_info.remove(inst)); + defer assert(self.loops.remove(inst)); try self.genBody(body); - self.finishAirBookkeeping(); } @@ -13685,10 +13718,8 @@ fn lowerBlock(self: *Self, inst: Air.Inst.Index, body: []const Air.Inst.Index) ! self.finishAirBookkeeping(); } -fn airSwitchBr(self: *Self, inst: Air.Inst.Index) !void { +fn lowerSwitchBr(self: *Self, inst: Air.Inst.Index, switch_br: Air.UnwrappedSwitch, condition: MCValue) !void { const zcu = self.pt.zcu; - const switch_br = self.air.unwrapSwitch(inst); - const condition = try self.resolveInst(switch_br.operand); const condition_ty = self.typeOf(switch_br.operand); const liveness = try self.liveness.getSwitchBr(self.gpa, inst, switch_br.cases_len + 1); defer self.gpa.free(liveness.deaths); @@ -13699,13 +13730,6 @@ fn airSwitchBr(self: *Self, inst: Air.Inst.Index) !void { else => unreachable, }; - // If the condition dies here in this switch instruction, process - // that death now instead of later as this has an effect on - // whether it needs to be spilled in the branches - if (self.liveness.operandDies(inst, 0)) { - if (switch_br.operand.toIndex()) |op_inst| try self.processDeath(op_inst); - } - self.scope_generation += 1; const state = try self.saveState(); @@ -13810,11 +13834,111 @@ fn airSwitchBr(self: *Self, inst: Air.Inst.Index) !void { .close_scope = true, }); } +} + +fn airSwitchBr(self: *Self, inst: Air.Inst.Index) !void { + const switch_br = self.air.unwrapSwitch(inst); + const condition = try self.resolveInst(switch_br.operand); + + // If the condition dies here in this switch instruction, process + // that death now instead of later as this has an effect on + // whether it needs to be spilled in the branches + if (self.liveness.operandDies(inst, 0)) { + if (switch_br.operand.toIndex()) |op_inst| try self.processDeath(op_inst); + } + + try self.lowerSwitchBr(inst, switch_br, condition); // We already took care of pl_op.operand earlier, so there's nothing left to do self.finishAirBookkeeping(); } +fn airLoopSwitchBr(self: *Self, inst: Air.Inst.Index) !void { + const switch_br = self.air.unwrapSwitch(inst); + const condition = try self.resolveInst(switch_br.operand); + + const mat_cond = if (condition.isModifiable() and + self.reuseOperand(inst, switch_br.operand, 0, condition)) + condition + else mat_cond: { + const mat_cond = try self.allocRegOrMem(inst, true); + try self.genCopy(self.typeOf(switch_br.operand), mat_cond, condition, .{}); + break :mat_cond mat_cond; + }; + self.inst_tracking.putAssumeCapacityNoClobber(inst, InstTracking.init(mat_cond)); + + // If the condition dies here in this switch instruction, process + // that death now instead of later as this has an effect on + // whether it needs to be spilled in the branches + if (self.liveness.operandDies(inst, 0)) { + if (switch_br.operand.toIndex()) |op_inst| try self.processDeath(op_inst); + } + + self.scope_generation += 1; + const state = try self.saveState(); + + try self.loops.putNoClobber(self.gpa, inst, .{ + .state = state, + .jmp_target = @intCast(self.mir_instructions.len), + }); + defer assert(self.loops.remove(inst)); + + // Stop tracking block result without forgetting tracking info + try self.freeValue(mat_cond); + + try self.lowerSwitchBr(inst, switch_br, mat_cond); + + try self.processDeath(inst); + self.finishAirBookkeeping(); +} + +fn airSwitchDispatch(self: *Self, inst: Air.Inst.Index) !void { + const br = self.air.instructions.items(.data)[@intFromEnum(inst)].br; + + const block_ty = self.typeOfIndex(br.block_inst); + const block_tracking = self.inst_tracking.getPtr(br.block_inst).?; + const loop_data = self.loops.getPtr(br.block_inst).?; + done: { + try self.getValue(block_tracking.short, null); + const src_mcv = try self.resolveInst(br.operand); + + if (self.reuseOperandAdvanced(inst, br.operand, 0, src_mcv, br.block_inst)) { + try self.getValue(block_tracking.short, br.block_inst); + // .long = .none to avoid merging operand and block result stack frames. + const current_tracking: InstTracking = .{ .long = .none, .short = src_mcv }; + try current_tracking.materializeUnsafe(self, br.block_inst, block_tracking.*); + for (current_tracking.getRegs()) |src_reg| self.register_manager.freeReg(src_reg); + break :done; + } + + try self.getValue(block_tracking.short, br.block_inst); + const dst_mcv = block_tracking.short; + try self.genCopy(block_ty, dst_mcv, try self.resolveInst(br.operand), .{}); + break :done; + } + + // Process operand death so that it is properly accounted for in the State below. + if (self.liveness.operandDies(inst, 0)) { + if (br.operand.toIndex()) |op_inst| try self.processDeath(op_inst); + } + + try self.restoreState(loop_data.state, &.{}, .{ + .emit_instructions = true, + .update_tracking = false, + .resurrect = false, + .close_scope = false, + }); + + // Emit a jump with a relocation. It will be patched up after the block ends. + // Leave the jump offset undefined + _ = try self.asmJmpReloc(loop_data.jmp_target); + + // Stop tracking block result without forgetting tracking info + try self.freeValue(block_tracking.short); + + self.finishAirBookkeeping(); +} + fn performReloc(self: *Self, reloc: Mir.Inst.Index) void { const next_inst: u32 = @intCast(self.mir_instructions.len); switch (self.mir_instructions.items(.tag)[reloc]) { @@ -13891,7 +14015,7 @@ fn airBr(self: *Self, inst: Air.Inst.Index) !void { fn airRepeat(self: *Self, inst: Air.Inst.Index) !void { const loop_inst = self.air.instructions.items(.data)[@intFromEnum(inst)].repeat.loop_inst; - const repeat_info = self.loop_repeat_info.get(loop_inst).?; + const repeat_info = self.loops.get(loop_inst).?; try self.restoreState(repeat_info.state, &.{}, .{ .emit_instructions = true, .update_tracking = false, @@ -19578,7 +19702,10 @@ fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { const pt = self.pt; const zcu = pt.zcu; - return self.air.typeOfIndex(inst, &zcu.intern_pool); + return switch (self.air.instructions.items(.tag)[@intFromEnum(inst)]) { + .loop_switch_br => self.typeOf(self.air.unwrapSwitch(inst).operand), + else => self.air.typeOfIndex(inst, &zcu.intern_pool), + }; } fn intCompilerRtAbiName(int_bits: u32) u8 { diff --git a/test/behavior/switch_loop.zig b/test/behavior/switch_loop.zig index b88bdfe74f49..e77e23cfd749 100644 --- a/test/behavior/switch_loop.zig +++ b/test/behavior/switch_loop.zig @@ -4,7 +4,6 @@ const expect = std.testing.expect; test "simple switch loop" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -30,7 +29,6 @@ test "simple switch loop" { test "switch loop with ranges" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -53,7 +51,6 @@ test "switch loop with ranges" { test "switch loop on enum" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -79,7 +76,6 @@ test "switch loop on enum" { test "switch loop on tagged union" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -113,7 +109,6 @@ test "switch loop on tagged union" { test "switch loop dispatching instructions" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -165,7 +160,6 @@ test "switch loop dispatching instructions" { test "switch loop with pointer capture" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO From 97ed2392033ba1713b6ab16d88d84dd019d6bb2e Mon Sep 17 00:00:00 2001 From: David Rubin Date: Sun, 1 Sep 2024 01:42:17 -0700 Subject: [PATCH 019/202] riscv: implement `repeat` and the new `switch_br` --- src/arch/riscv64/CodeGen.zig | 97 ++++++++++++++++++++++++++---------- 1 file changed, 72 insertions(+), 25 deletions(-) diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index e810c6ee30f8..e29b12e8a4c0 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -108,6 +108,13 @@ frame_allocs: std.MultiArrayList(FrameAlloc) = .{}, free_frame_indices: std.AutoArrayHashMapUnmanaged(FrameIndex, void) = .{}, frame_locs: std.MultiArrayList(Mir.FrameLoc) = .{}, +loop_repeat_info: std.AutoHashMapUnmanaged(Air.Inst.Index, struct { + /// The state to restore before branching. + state: State, + /// The branch target. + jmp_target: Mir.Inst.Index, +}) = .{}, + /// Debug field, used to find bugs in the compiler. air_bookkeeping: @TypeOf(air_bookkeeping_init) = air_bookkeeping_init, @@ -797,6 +804,7 @@ pub fn generate( function.frame_allocs.deinit(gpa); function.free_frame_indices.deinit(gpa); function.frame_locs.deinit(gpa); + function.loop_repeat_info.deinit(gpa); var block_it = function.blocks.valueIterator(); while (block_it.next()) |block| block.deinit(gpa); function.blocks.deinit(gpa); @@ -1579,7 +1587,7 @@ fn genBody(func: *Func, body: []const Air.Inst.Index) InnerError!void { .bitcast => try func.airBitCast(inst), .block => try func.airBlock(inst), .br => try func.airBr(inst), - .repeat => return func.fail("TODO implement `repeat`", .{}), + .repeat => try func.airRepeat(inst), .switch_dispatch => return func.fail("TODO implement `switch_dispatch`", .{}), .trap => try func.airTrap(), .breakpoint => try func.airBreakpoint(), @@ -5602,15 +5610,13 @@ fn airLoop(func: *Func, inst: Air.Inst.Index) !void { func.scope_generation += 1; const state = try func.saveState(); - const jmp_target: Mir.Inst.Index = @intCast(func.mir_instructions.len); - try func.genBody(body); - try func.restoreState(state, &.{}, .{ - .emit_instructions = true, - .update_tracking = false, - .resurrect = false, - .close_scope = true, + try func.loop_repeat_info.putNoClobber(func.gpa, inst, .{ + .state = state, + .jmp_target = @intCast(func.mir_instructions.len), }); - _ = try func.jump(jmp_target); + defer assert(func.loop_repeat_info.remove(inst)); + + try func.genBody(body); func.finishAirBookkeeping(); } @@ -5684,12 +5690,10 @@ fn airSwitchBr(func: *Func, inst: Air.Inst.Index) !void { var it = switch_br.iterateCases(); while (it.next()) |case| { - if (case.ranges.len > 0) return func.fail("TODO: switch with ranges", .{}); - - var relocs = try func.gpa.alloc(Mir.Inst.Index, case.items.len); + var relocs = try func.gpa.alloc(Mir.Inst.Index, case.items.len + case.ranges.len); defer func.gpa.free(relocs); - for (case.items, relocs, 0..) |item, *reloc, i| { + for (case.items, relocs[0..case.items.len]) |item, *reloc| { const item_mcv = try func.resolveInst(item); const cond_lock = switch (condition) { @@ -5710,22 +5714,52 @@ fn airSwitchBr(func: *Func, inst: Air.Inst.Index) !void { cmp_reg, ); - if (!(i < relocs.len - 1)) { - _ = try func.addInst(.{ - .tag = .pseudo_not, - .data = .{ .rr = .{ - .rd = cmp_reg, - .rs = cmp_reg, - } }, - }); - } - reloc.* = try func.condBr(condition_ty, .{ .register = cmp_reg }); } + for (case.ranges, relocs[case.items.len..]) |range, *reloc| { + const min_mcv = try func.resolveInst(range[0]); + const max_mcv = try func.resolveInst(range[1]); + const cond_lock = switch (condition) { + .register => func.register_manager.lockRegAssumeUnused(condition.register), + else => null, + }; + defer if (cond_lock) |lock| func.register_manager.unlockReg(lock); + + const temp_cmp_reg, const temp_cmp_lock = try func.allocReg(.int); + defer func.register_manager.unlockReg(temp_cmp_lock); + + // is `condition` less than `min`? is "true", we've failed + try func.genBinOp( + .cmp_gte, + condition, + condition_ty, + min_mcv, + condition_ty, + temp_cmp_reg, + ); + + // if the compare was true, we will jump to the fail case and fall through + // to the next checks + const lt_fail_reloc = try func.condBr(condition_ty, .{ .register = temp_cmp_reg }); + try func.genBinOp( + .cmp_gt, + condition, + condition_ty, + max_mcv, + condition_ty, + temp_cmp_reg, + ); + + reloc.* = try func.condBr(condition_ty, .{ .register = temp_cmp_reg }); + func.performReloc(lt_fail_reloc); + } + + const skip_case_reloc = try func.jump(undefined); + for (liveness.deaths[case.idx]) |operand| try func.processDeath(operand); - for (relocs[0 .. relocs.len - 1]) |reloc| func.performReloc(reloc); + for (relocs) |reloc| func.performReloc(reloc); try func.genBody(case.body); try func.restoreState(state, &.{}, .{ .emit_instructions = false, @@ -5734,7 +5768,7 @@ fn airSwitchBr(func: *Func, inst: Air.Inst.Index) !void { .close_scope = true, }); - func.performReloc(relocs[relocs.len - 1]); + func.performReloc(skip_case_reloc); } if (switch_br.else_body_len > 0) { @@ -5831,6 +5865,19 @@ fn airBr(func: *Func, inst: Air.Inst.Index) !void { func.finishAirBookkeeping(); } +fn airRepeat(func: *Func, inst: Air.Inst.Index) !void { + const loop_inst = func.air.instructions.items(.data)[@intFromEnum(inst)].repeat.loop_inst; + const repeat_info = func.loop_repeat_info.get(loop_inst).?; + try func.restoreState(repeat_info.state, &.{}, .{ + .emit_instructions = true, + .update_tracking = false, + .resurrect = false, + .close_scope = true, + }); + _ = try func.jump(repeat_info.jmp_target); + func.finishAirBookkeeping(); +} + fn airBoolOp(func: *Func, inst: Air.Inst.Index) !void { const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const tag: Air.Inst.Tag = func.air.instructions.items(.tag)[@intFromEnum(inst)]; From 0d295d76358037c15d9dc1f56b7b43de29a94d8d Mon Sep 17 00:00:00 2001 From: David Rubin Date: Sun, 1 Sep 2024 02:32:33 -0700 Subject: [PATCH 020/202] riscv: implement `switch_dispatch` & `loop_switch_br` --- src/arch/riscv64/CodeGen.zig | 129 ++++++++++++++++++++++++++++++---- test/behavior/switch_loop.zig | 1 + 2 files changed, 115 insertions(+), 15 deletions(-) diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index e29b12e8a4c0..a30f764eb849 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -108,7 +108,7 @@ frame_allocs: std.MultiArrayList(FrameAlloc) = .{}, free_frame_indices: std.AutoArrayHashMapUnmanaged(FrameIndex, void) = .{}, frame_locs: std.MultiArrayList(Mir.FrameLoc) = .{}, -loop_repeat_info: std.AutoHashMapUnmanaged(Air.Inst.Index, struct { +loops: std.AutoHashMapUnmanaged(Air.Inst.Index, struct { /// The state to restore before branching. state: State, /// The branch target. @@ -232,11 +232,12 @@ const MCValue = union(enum) { .register, .register_pair, .register_offset, - .load_frame, .load_symbol, .load_tlv, .indirect, => true, + + .load_frame => |frame_addr| !frame_addr.index.isNamed(), }; } @@ -804,7 +805,7 @@ pub fn generate( function.frame_allocs.deinit(gpa); function.free_frame_indices.deinit(gpa); function.frame_locs.deinit(gpa); - function.loop_repeat_info.deinit(gpa); + function.loops.deinit(gpa); var block_it = function.blocks.valueIterator(); while (block_it.next()) |block| block.deinit(gpa); function.blocks.deinit(gpa); @@ -1588,7 +1589,7 @@ fn genBody(func: *Func, body: []const Air.Inst.Index) InnerError!void { .block => try func.airBlock(inst), .br => try func.airBr(inst), .repeat => try func.airRepeat(inst), - .switch_dispatch => return func.fail("TODO implement `switch_dispatch`", .{}), + .switch_dispatch => try func.airSwitchDispatch(inst), .trap => try func.airTrap(), .breakpoint => try func.airBreakpoint(), .ret_addr => try func.airRetAddr(inst), @@ -1678,7 +1679,7 @@ fn genBody(func: *Func, body: []const Air.Inst.Index) InnerError!void { .field_parent_ptr => try func.airFieldParentPtr(inst), .switch_br => try func.airSwitchBr(inst), - .loop_switch_br => return func.fail("TODO implement `loop_switch_br`", .{}), + .loop_switch_br => try func.airLoopSwitchBr(inst), .ptr_slice_len_ptr => try func.airPtrSliceLenPtr(inst), .ptr_slice_ptr_ptr => try func.airPtrSlicePtrPtr(inst), @@ -5610,11 +5611,11 @@ fn airLoop(func: *Func, inst: Air.Inst.Index) !void { func.scope_generation += 1; const state = try func.saveState(); - try func.loop_repeat_info.putNoClobber(func.gpa, inst, .{ + try func.loops.putNoClobber(func.gpa, inst, .{ .state = state, .jmp_target = @intCast(func.mir_instructions.len), }); - defer assert(func.loop_repeat_info.remove(inst)); + defer assert(func.loops.remove(inst)); try func.genBody(body); @@ -5671,12 +5672,7 @@ fn lowerBlock(func: *Func, inst: Air.Inst.Index, body: []const Air.Inst.Index) ! fn airSwitchBr(func: *Func, inst: Air.Inst.Index) !void { const switch_br = func.air.unwrapSwitch(inst); - - const liveness = try func.liveness.getSwitchBr(func.gpa, inst, switch_br.cases_len + 1); - defer func.gpa.free(liveness.deaths); - const condition = try func.resolveInst(switch_br.operand); - const condition_ty = func.typeOf(switch_br.operand); // If the condition dies here in this switch instruction, process // that death now instead of later as this has an effect on @@ -5685,6 +5681,22 @@ fn airSwitchBr(func: *Func, inst: Air.Inst.Index) !void { if (switch_br.operand.toIndex()) |op_inst| try func.processDeath(op_inst); } + try func.lowerSwitchBr(inst, switch_br, condition); + + // We already took care of pl_op.operand earlier, so there's nothing left to do + func.finishAirBookkeeping(); +} + +fn lowerSwitchBr( + func: *Func, + inst: Air.Inst.Index, + switch_br: Air.UnwrappedSwitch, + condition: MCValue, +) !void { + const condition_ty = func.typeOf(switch_br.operand); + const liveness = try func.liveness.getSwitchBr(func.gpa, inst, switch_br.cases_len + 1); + defer func.gpa.free(liveness.deaths); + func.scope_generation += 1; const state = try func.saveState(); @@ -5785,8 +5797,92 @@ fn airSwitchBr(func: *Func, inst: Air.Inst.Index) !void { .close_scope = true, }); } +} + +fn airLoopSwitchBr(func: *Func, inst: Air.Inst.Index) !void { + const switch_br = func.air.unwrapSwitch(inst); + const condition = try func.resolveInst(switch_br.operand); + + const mat_cond = if (condition.isMutable() and + func.reuseOperand(inst, switch_br.operand, 0, condition)) + condition + else mat_cond: { + const ty = func.typeOf(switch_br.operand); + const mat_cond = try func.allocRegOrMem(ty, inst, true); + try func.genCopy(ty, mat_cond, condition); + break :mat_cond mat_cond; + }; + func.inst_tracking.putAssumeCapacityNoClobber(inst, InstTracking.init(mat_cond)); + + // If the condition dies here in this switch instruction, process + // that death now instead of later as this has an effect on + // whether it needs to be spilled in the branches + if (func.liveness.operandDies(inst, 0)) { + if (switch_br.operand.toIndex()) |op_inst| try func.processDeath(op_inst); + } + + func.scope_generation += 1; + const state = try func.saveState(); + + try func.loops.putNoClobber(func.gpa, inst, .{ + .state = state, + .jmp_target = @intCast(func.mir_instructions.len), + }); + defer assert(func.loops.remove(inst)); + + // Stop tracking block result without forgetting tracking info + try func.freeValue(mat_cond); + + try func.lowerSwitchBr(inst, switch_br, mat_cond); + + try func.processDeath(inst); + func.finishAirBookkeeping(); +} + +fn airSwitchDispatch(func: *Func, inst: Air.Inst.Index) !void { + const br = func.air.instructions.items(.data)[@intFromEnum(inst)].br; + + const block_ty = func.typeOfIndex(br.block_inst); + const block_tracking = func.inst_tracking.getPtr(br.block_inst).?; + const loop_data = func.loops.getPtr(br.block_inst).?; + done: { + try func.getValue(block_tracking.short, null); + const src_mcv = try func.resolveInst(br.operand); + + if (func.reuseOperandAdvanced(inst, br.operand, 0, src_mcv, br.block_inst)) { + try func.getValue(block_tracking.short, br.block_inst); + // .long = .none to avoid merging operand and block result stack frames. + const current_tracking: InstTracking = .{ .long = .none, .short = src_mcv }; + try current_tracking.materializeUnsafe(func, br.block_inst, block_tracking.*); + for (current_tracking.getRegs()) |src_reg| func.register_manager.freeReg(src_reg); + break :done; + } + + try func.getValue(block_tracking.short, br.block_inst); + const dst_mcv = block_tracking.short; + try func.genCopy(block_ty, dst_mcv, try func.resolveInst(br.operand)); + break :done; + } + + // Process operand death so that it is properly accounted for in the State below. + if (func.liveness.operandDies(inst, 0)) { + if (br.operand.toIndex()) |op_inst| try func.processDeath(op_inst); + } + + try func.restoreState(loop_data.state, &.{}, .{ + .emit_instructions = true, + .update_tracking = false, + .resurrect = false, + .close_scope = false, + }); + + // Emit a jump with a relocation. It will be patched up after the block ends. + // Leave the jump offset undefined + _ = try func.jump(loop_data.jmp_target); + + // Stop tracking block result without forgetting tracking info + try func.freeValue(block_tracking.short); - // We already took care of pl_op.operand earlier, so there's nothing left to do func.finishAirBookkeeping(); } @@ -5867,7 +5963,7 @@ fn airBr(func: *Func, inst: Air.Inst.Index) !void { fn airRepeat(func: *Func, inst: Air.Inst.Index) !void { const loop_inst = func.air.instructions.items(.data)[@intFromEnum(inst)].repeat.loop_inst; - const repeat_info = func.loop_repeat_info.get(loop_inst).?; + const repeat_info = func.loops.get(loop_inst).?; try func.restoreState(repeat_info.state, &.{}, .{ .emit_instructions = true, .update_tracking = false, @@ -8298,7 +8394,10 @@ fn typeOf(func: *Func, inst: Air.Inst.Ref) Type { fn typeOfIndex(func: *Func, inst: Air.Inst.Index) Type { const zcu = func.pt.zcu; - return func.air.typeOfIndex(inst, &zcu.intern_pool); + return switch (func.air.instructions.items(.tag)[@intFromEnum(inst)]) { + .loop_switch_br => func.typeOf(func.air.unwrapSwitch(inst).operand), + else => func.air.typeOfIndex(inst, &zcu.intern_pool), + }; } fn hasFeature(func: *Func, feature: Target.riscv.Feature) bool { diff --git a/test/behavior/switch_loop.zig b/test/behavior/switch_loop.zig index e77e23cfd749..d35a4e16368d 100644 --- a/test/behavior/switch_loop.zig +++ b/test/behavior/switch_loop.zig @@ -80,6 +80,7 @@ test "switch loop on tagged union" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const U = union(enum) { From 289c704b60c3e4b65bc00be55266b3f1c3fc27a3 Mon Sep 17 00:00:00 2001 From: mlugg Date: Sun, 1 Sep 2024 20:31:01 +0100 Subject: [PATCH 021/202] cbe: don't emit 'x = x' in switch dispatch loop --- src/codegen/c.zig | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/src/codegen/c.zig b/src/codegen/c.zig index c761aa72258c..0ec5513b6f35 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -5065,11 +5065,8 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index, is_dispatch_loop: bool) !void // amount of C code we generate, which is probably more desirable here (and is simpler). const condition = if (is_dispatch_loop) cond: { const new_local = try f.allocLocal(inst, condition_ty); - try f.writeCValue(writer, new_local, .Other); - try writer.writeAll(" = "); - try f.writeCValue(writer, init_condition, .Initializer); - try writer.writeAll(";\n"); - try writer.print("zig_switch_{d}_loop:", .{@intFromEnum(inst)}); + try f.copyCValue(try f.ctypeFromType(condition_ty, .complete), new_local, init_condition); + try writer.print("zig_switch_{d}_loop:\n", .{@intFromEnum(inst)}); try f.loop_switch_conds.put(gpa, inst, new_local.new_local); break :cond new_local; } else init_condition; From 0df1d78d78f21ba27c4f35e02d26deb5af1af4d0 Mon Sep 17 00:00:00 2001 From: Lucas Santos Date: Mon, 2 Sep 2024 23:13:38 -0300 Subject: [PATCH 022/202] `std.equalRange`: Compute lower and upper bounds simultaneously. The current implementation of `equalRange` just calls `lowerRange` and `upperRange`, but a lot of the work done by these two functions can be shared. Specifically, each iteration gives information about whether the lower bound or the upper bound can be tightened. This leads to fewer iterations and, since there is one comparison per iteration, fewer comparisons. Implementation adapted from [GCC](https://github.com/gcc-mirror/gcc/blob/519ec1cfe9d2c6a1d06709c52cb103508d2c42a7/libstdc%2B%2B-v3/include/bits/stl_algo.h#L2063) This sample demonstrates the difference between the current implementation and mine: ```zig fn S(comptime T: type) type { return struct { needle: T, count: *usize, pub fn order(context: @This(), item: T) std.math.Order { context.count.* += 1; return std.math.order(item, context.needle); } pub fn orderLength(context: @This(), item: []const u8) std.math.Order { context.count.* += 1; return std.math.order(item.len, context.needle); } }; } pub fn main() !void { var count: usize = 0; try std.testing.expectEqual(.{ 0, 0 }, equalRange(i32, &[_]i32{}, S(i32){ .needle = 0, .count = &count }, S(i32).order)); try std.testing.expectEqual(.{ 0, 0 }, equalRange(i32, &[_]i32{ 2, 4, 8, 16, 32, 64 }, S(i32){ .needle = 0, .count = &count }, S(i32).order)); try std.testing.expectEqual(.{ 0, 1 }, equalRange(i32, &[_]i32{ 2, 4, 8, 16, 32, 64 }, S(i32){ .needle = 2, .count = &count }, S(i32).order)); try std.testing.expectEqual(.{ 2, 2 }, equalRange(i32, &[_]i32{ 2, 4, 8, 16, 32, 64 }, S(i32){ .needle = 5, .count = &count }, S(i32).order)); try std.testing.expectEqual(.{ 2, 3 }, equalRange(i32, &[_]i32{ 2, 4, 8, 16, 32, 64 }, S(i32){ .needle = 8, .count = &count }, S(i32).order)); try std.testing.expectEqual(.{ 5, 6 }, equalRange(i32, &[_]i32{ 2, 4, 8, 16, 32, 64 }, S(i32){ .needle = 64, .count = &count }, S(i32).order)); try std.testing.expectEqual(.{ 6, 6 }, equalRange(i32, &[_]i32{ 2, 4, 8, 16, 32, 64 }, S(i32){ .needle = 100, .count = &count }, S(i32).order)); try std.testing.expectEqual(.{ 2, 6 }, equalRange(i32, &[_]i32{ 2, 4, 8, 8, 8, 8, 15, 22 }, S(i32){ .needle = 8, .count = &count }, S(i32).order)); try std.testing.expectEqual(.{ 2, 2 }, equalRange(u32, &[_]u32{ 2, 4, 8, 16, 32, 64 }, S(u32){ .needle = 5, .count = &count }, S(u32).order)); try std.testing.expectEqual(.{ 1, 1 }, equalRange(f32, &[_]f32{ -54.2, -26.7, 0.0, 56.55, 100.1, 322.0 }, S(f32){ .needle = -33.4, .count = &count }, S(f32).order)); try std.testing.expectEqual(.{ 3, 5 }, equalRange( []const u8, &[_][]const u8{ "Mars", "Venus", "Earth", "Saturn", "Uranus", "Mercury", "Jupiter", "Neptune" }, S(usize){ .needle = 6, .count = &count }, S(usize).orderLength, )); std.debug.print("Count: {}\n", .{count}); } ``` For each comparison, we bump the count. With the current implementation, we get 57 comparisons. With mine, we get 43. This optimization is orthogonal to left-bias proposed by [21278](https://github.com/ziglang/zig/pull/21278) --- lib/std/sort.zig | 36 ++++++++++++++++++++++++++++++++---- 1 file changed, 32 insertions(+), 4 deletions(-) diff --git a/lib/std/sort.zig b/lib/std/sort.zig index 9dad2949bfdf..640348141d6e 100644 --- a/lib/std/sort.zig +++ b/lib/std/sort.zig @@ -771,10 +771,38 @@ pub fn equalRange( context: anytype, comptime compareFn: fn (@TypeOf(context), T) std.math.Order, ) struct { usize, usize } { - return .{ - lowerBound(T, items, context, compareFn), - upperBound(T, items, context, compareFn), - }; + var low: usize = 0; + var high: usize = items.len; + + while (low < high) { + const mid = low + (high - low) / 2; + switch (compareFn(context, items[mid])) { + .lt => { + low = mid + 1; + }, + .gt => { + high = mid; + }, + .eq => { + return .{ + std.sort.lowerBound( + T, + items[0..mid], + context, + compareFn, + ), + mid + std.sort.upperBound( + T, + items[mid..], + context, + compareFn, + ), + }; + }, + } + } + + return .{ low, low }; } test equalRange { From 8f742ece1e8008617cc40a01d2feb8fa0152f8de Mon Sep 17 00:00:00 2001 From: Lucas Santos Date: Tue, 3 Sep 2024 18:52:47 -0300 Subject: [PATCH 023/202] Reuse precomputed bounds. when calling `lowerBound` and `upperBound`, the previous implementation was discarding information about low and high bounds that had already been computed. Thanks, @Olvilock. --- lib/std/sort.zig | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/std/sort.zig b/lib/std/sort.zig index 640348141d6e..b925d8a4d8cd 100644 --- a/lib/std/sort.zig +++ b/lib/std/sort.zig @@ -785,15 +785,15 @@ pub fn equalRange( }, .eq => { return .{ - std.sort.lowerBound( + low + std.sort.lowerBound( T, - items[0..mid], + items[low..mid], context, compareFn, ), mid + std.sort.upperBound( T, - items[mid..], + items[mid..high], context, compareFn, ), From 7a4d69983a6ac7ab91f71910cec5886ed5bec745 Mon Sep 17 00:00:00 2001 From: Michael Dusan Date: Tue, 3 Sep 2024 14:37:15 -0400 Subject: [PATCH 024/202] AstGen: update @errorCast to maybe eval to err Consequently, `AstGen.ret()` now passes the error code to `.defer_error_code`. Previously, the error union value was passed. closes #20371 --- lib/std/zig/BuiltinFn.zig | 2 +- test/behavior/defer.zig | 37 +++++++++++++++++++++++++++++++++++++ 2 files changed, 38 insertions(+), 1 deletion(-) diff --git a/lib/std/zig/BuiltinFn.zig b/lib/std/zig/BuiltinFn.zig index 1da3ffb5a727..95c6c7be12a2 100644 --- a/lib/std/zig/BuiltinFn.zig +++ b/lib/std/zig/BuiltinFn.zig @@ -482,7 +482,7 @@ pub const list = list: { "@errorCast", .{ .tag = .error_cast, - .eval_to_error = .always, + .eval_to_error = .maybe, .param_count = 1, }, }, diff --git a/test/behavior/defer.zig b/test/behavior/defer.zig index 64bd1a5e0d4f..07519f16b5b8 100644 --- a/test/behavior/defer.zig +++ b/test/behavior/defer.zig @@ -197,3 +197,40 @@ const defer_assign = switch (block: { comptime { if (defer_assign != 0) @compileError("defer_assign failed!"); } + +test "errdefer capture" { + const S = struct { + fail: bool = undefined, + fn bar0(self: *@This()) error{a}!void { + self.fail = false; + errdefer |err| if (@TypeOf(err) != error{a}) { + self.fail = true; + }; + return error.a; + } + fn bar1(self: *@This()) error{a}!void { + self.fail = false; + errdefer |err| if (@TypeOf(err) != error{a}) { + self.fail = true; + }; + const rv: error{a}!void = @errorCast(@as(error{a}!void, error.a)); + return rv; + } + // https://github.com/ziglang/zig/issues/20371 + fn bar2(self: *@This()) error{a}!void { + self.fail = false; + errdefer |err| if (@TypeOf(err) != error{a}) { + self.fail = true; + }; + return @errorCast(@as(error{a}!void, error.a)); + } + }; + + var s: S = .{}; + s.bar0() catch {}; + if (s.fail) return error.TestExpectedError; + s.bar1() catch {}; + if (s.fail) return error.TestExpectedError; + s.bar2() catch {}; + if (s.fail) return error.TestExpectedError; +} From 290ccb160e2d884f70ac92d2eb16d56efd380533 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Sat, 31 Aug 2024 19:26:16 +0200 Subject: [PATCH 025/202] glibc: Avoid building and linking stub libraries that were emptied in 2.34. Closes #20919. --- src/glibc.zig | 13 +++++++++---- src/link/Elf.zig | 12 ++++++++++++ 2 files changed, 21 insertions(+), 4 deletions(-) diff --git a/src/glibc.zig b/src/glibc.zig index 94c180f3d474..ee1b208cdd49 100644 --- a/src/glibc.zig +++ b/src/glibc.zig @@ -16,6 +16,7 @@ const Module = @import("Package/Module.zig"); pub const Lib = struct { name: []const u8, sover: u8, + removed_in: ?Version = null, }; pub const ABI = struct { @@ -34,12 +35,12 @@ pub const ABI = struct { // The order of the elements in this array defines the linking order. pub const libs = [_]Lib{ .{ .name = "m", .sover = 6 }, - .{ .name = "pthread", .sover = 0 }, + .{ .name = "pthread", .sover = 0, .removed_in = .{ .major = 2, .minor = 34, .patch = 0 } }, .{ .name = "c", .sover = 6 }, - .{ .name = "dl", .sover = 2 }, - .{ .name = "rt", .sover = 1 }, + .{ .name = "dl", .sover = 2, .removed_in = .{ .major = 2, .minor = 34, .patch = 0 } }, + .{ .name = "rt", .sover = 1, .removed_in = .{ .major = 2, .minor = 34, .patch = 0 } }, .{ .name = "ld", .sover = 2 }, - .{ .name = "util", .sover = 1 }, + .{ .name = "util", .sover = 1, .removed_in = .{ .major = 2, .minor = 34, .patch = 0 } }, .{ .name = "resolv", .sover = 2 }, }; @@ -797,6 +798,10 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) !voi defer stubs_asm.deinit(); for (libs, 0..) |lib, lib_i| { + if (lib.removed_in) |rem_in| { + if (target_version.order(rem_in) != .lt) continue; + } + stubs_asm.shrinkRetainingCapacity(0); try stubs_asm.appendSlice(".text\n"); diff --git a/src/link/Elf.zig b/src/link/Elf.zig index e577b8d45aa7..194c1b8ad2ba 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -840,6 +840,10 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod } else if (target.isGnuLibC()) { try system_libs.ensureUnusedCapacity(glibc.libs.len + 1); for (glibc.libs) |lib| { + if (lib.removed_in) |rem_in| { + if (target.os.version_range.linux.glibc.order(rem_in) != .lt) continue; + } + const lib_path = try std.fmt.allocPrint(arena, "{s}{c}lib{s}.so.{d}", .{ comp.glibc_so_files.?.dir_path, fs.path.sep, lib.name, lib.sover, }); @@ -1286,6 +1290,10 @@ fn dumpArgv(self: *Elf, comp: *Compilation) !void { if (needs_grouping) try argv.append("--end-group"); } else if (target.isGnuLibC()) { for (glibc.libs) |lib| { + if (lib.removed_in) |rem_in| { + if (target.os.version_range.linux.glibc.order(rem_in) != .lt) continue; + } + const lib_path = try std.fmt.allocPrint(arena, "{s}{c}lib{s}.so.{d}", .{ comp.glibc_so_files.?.dir_path, fs.path.sep, lib.name, lib.sover, }); @@ -2288,6 +2296,10 @@ fn linkWithLLD(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: s if (needs_grouping) try argv.append("--end-group"); } else if (target.isGnuLibC()) { for (glibc.libs) |lib| { + if (lib.removed_in) |rem_in| { + if (target.os.version_range.linux.glibc.order(rem_in) != .lt) continue; + } + const lib_path = try std.fmt.allocPrint(arena, "{s}{c}lib{s}.so.{d}", .{ comp.glibc_so_files.?.dir_path, fs.path.sep, lib.name, lib.sover, }); From e5ee9c1e43d74354f12fc6cf76f39f7a607558ca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Tue, 20 Aug 2024 21:29:55 +0200 Subject: [PATCH 026/202] std.elf: Bring the EM enum up to date. Based on: * `include/elf/common.h` in binutils * `include/uapi/linux/elf-em.h` in Linux * https://www.sco.com/developers/gabi/latest/ch4.eheader.html I opted to use the tag naming of binutils because it seems to be by far the most complete and authoritative source at this point in time. --- lib/std/Target.zig | 2 +- lib/std/elf.zig | 427 ++++++++++++++++++--------------------------- 2 files changed, 168 insertions(+), 261 deletions(-) diff --git a/lib/std/Target.zig b/lib/std/Target.zig index 3580813aeced..d215e29fc88e 100644 --- a/lib/std/Target.zig +++ b/lib/std/Target.zig @@ -846,7 +846,7 @@ pub fn toElfMachine(target: Target) std.elf.EM { .avr => .AVR, .bpfel, .bpfeb => .BPF, .csky => .CSKY, - .hexagon => .HEXAGON, + .hexagon => .QDSP6, .kalimba => .CSR_KALIMBA, .lanai => .LANAI, .loongarch32, .loongarch64 => .LOONGARCH, diff --git a/lib/std/elf.zig b/lib/std/elf.zig index aefaca4bef83..d92973c314cb 100644 --- a/lib/std/elf.zig +++ b/lib/std/elf.zig @@ -1101,549 +1101,456 @@ pub const Half = u16; pub const EM = enum(u16) { /// No machine NONE = 0, - /// AT&T WE 32100 M32 = 1, - - /// SPARC + /// SUN SPARC SPARC = 2, - - /// Intel 386 + /// Intel 80386 @"386" = 3, - - /// Motorola 68000 + /// Motorola m68k family @"68K" = 4, - - /// Motorola 88000 + /// Motorola m88k family @"88K" = 5, - /// Intel MCU IAMCU = 6, - /// Intel 80860 @"860" = 7, - - /// MIPS R3000 + /// MIPS R3000 (officially, big-endian only) MIPS = 8, - /// IBM System/370 S370 = 9, - - /// MIPS RS3000 Little-endian + /// MIPS R3000 (and R4000) little-endian, Oct 4 1993 Draft (deprecated) MIPS_RS3_LE = 10, - + /// Old version of Sparc v9, from before the ABI (deprecated) + OLD_SPARCV9 = 11, /// SPU Mark II SPU_2 = 13, - - /// Hewlett-Packard PA-RISC + /// HPPA PARISC = 15, - - /// Fujitsu VPP500 + /// Fujitsu VPP500 (also old version of PowerPC; deprecated) VPP500 = 17, - - /// Enhanced instruction set SPARC + /// Sun's "v8plus" SPARC32PLUS = 18, - /// Intel 80960 @"960" = 19, - /// PowerPC PPC = 20, - - /// PowerPC64 + /// 64-bit PowerPC PPC64 = 21, - - /// IBM System/390 + /// IBM S/390 S390 = 22, - - /// IBM SPU/SPC + /// Sony/Toshiba/IBM SPU SPU = 23, - - /// NEC V800 + /// NEC V800 series V800 = 36, - /// Fujitsu FR20 FR20 = 37, - - /// TRW RH-32 + /// TRW RH32 RH32 = 38, - - /// Motorola RCE - RCE = 39, - + /// Motorola M*Core, aka RCE (also Fujitsu MMA) + MCORE = 39, /// ARM ARM = 40, - - /// DEC Alpha - ALPHA = 41, - - /// Hitachi SH + /// Digital Alpha + OLD_ALPHA = 41, + /// Renesas (formerly Hitachi) / SuperH SH SH = 42, - - /// SPARC V9 + /// SPARC v9 64-bit SPARCV9 = 43, - - /// Siemens TriCore + /// Siemens Tricore embedded processor TRICORE = 44, - - /// Argonaut RISC Core + /// ARC Cores ARC = 45, - - /// Hitachi H8/300 + /// Renesas (formerly Hitachi) H8/300 H8_300 = 46, - - /// Hitachi H8/300H + /// Renesas (formerly Hitachi) H8/300H H8_300H = 47, - - /// Hitachi H8S + /// Renesas (formerly Hitachi) H8S H8S = 48, - - /// Hitachi H8/500 + /// Renesas (formerly Hitachi) H8/500 H8_500 = 49, - - /// Intel IA-64 processor architecture + /// Intel IA-64 Processor IA_64 = 50, - /// Stanford MIPS-X MIPS_X = 51, - - /// Motorola ColdFire + /// Motorola Coldfire COLDFIRE = 52, - /// Motorola M68HC12 @"68HC12" = 53, - - /// Fujitsu MMA Multimedia Accelerator + /// Fujitsu Multimedia Accelerator MMA = 54, - /// Siemens PCP PCP = 55, - /// Sony nCPU embedded RISC processor NCPU = 56, - /// Denso NDR1 microprocessor NDR1 = 57, - /// Motorola Star*Core processor STARCORE = 58, - /// Toyota ME16 processor ME16 = 59, - /// STMicroelectronics ST100 processor ST100 = 60, - - /// Advanced Logic Corp. TinyJ embedded processor family + /// Advanced Logic Corp. TinyJ embedded processor TINYJ = 61, - - /// AMD x86-64 architecture + /// Advanced Micro Devices X86-64 processor X86_64 = 62, - /// Sony DSP Processor PDSP = 63, - /// Digital Equipment Corp. PDP-10 PDP10 = 64, - /// Digital Equipment Corp. PDP-11 PDP11 = 65, - /// Siemens FX66 microcontroller FX66 = 66, - /// STMicroelectronics ST9+ 8/16 bit microcontroller ST9PLUS = 67, - /// STMicroelectronics ST7 8-bit microcontroller ST7 = 68, - /// Motorola MC68HC16 Microcontroller @"68HC16" = 69, - /// Motorola MC68HC11 Microcontroller @"68HC11" = 70, - /// Motorola MC68HC08 Microcontroller @"68HC08" = 71, - /// Motorola MC68HC05 Microcontroller @"68HC05" = 72, - /// Silicon Graphics SVx SVX = 73, - - /// STMicroelectronics ST19 8-bit microcontroller + /// STMicroelectronics ST19 8-bit cpu ST19 = 74, - /// Digital VAX VAX = 75, - /// Axis Communications 32-bit embedded processor CRIS = 76, - - /// Infineon Technologies 32-bit embedded processor + /// Infineon Technologies 32-bit embedded cpu JAVELIN = 77, - - /// Element 14 64-bit DSP Processor + /// Element 14 64-bit DSP processor FIREPATH = 78, - - /// LSI Logic 16-bit DSP Processor + /// LSI Logic's 16-bit DSP processor ZSP = 79, - /// Donald Knuth's educational 64-bit processor MMIX = 80, - - /// Harvard University machine-independent object files + /// Harvard's machine-independent format HUANY = 81, - /// SiTera Prism PRISM = 82, - /// Atmel AVR 8-bit microcontroller AVR = 83, - /// Fujitsu FR30 FR30 = 84, - /// Mitsubishi D10V D10V = 85, - /// Mitsubishi D30V D30V = 86, - - /// NEC v850 + /// Renesas V850 (formerly NEC V850) V850 = 87, - - /// Mitsubishi M32R + /// Renesas M32R (formerly Mitsubishi M32R) M32R = 88, - /// Matsushita MN10300 MN10300 = 89, - /// Matsushita MN10200 MN10200 = 90, - /// picoJava PJ = 91, - - /// OpenRISC 32-bit embedded processor - OPENRISC = 92, - - /// ARC International ARCompact processor (old spelling/synonym: EM_ARC_A5) + /// OpenRISC 1000 32-bit embedded processor + OR1K = 92, + /// ARC International ARCompact processor ARC_COMPACT = 93, - /// Tensilica Xtensa Architecture XTENSA = 94, - - /// Alphamosaic VideoCore processor + /// Alphamosaic VideoCore processor (also old Sunplus S+core7 backend magic number) VIDEOCORE = 95, - /// Thompson Multimedia General Purpose Processor TMM_GPP = 96, - /// National Semiconductor 32000 series NS32K = 97, - /// Tenor Network TPC processor TPC = 98, - - /// Trebia SNP 1000 processor + /// Trebia SNP 1000 processor (also old value for picoJava; deprecated) SNP1K = 99, - - /// STMicroelectronics (www.st.com) ST200 + /// STMicroelectronics ST200 microcontroller ST200 = 100, - - /// Ubicom IP2xxx microcontroller family + /// Ubicom IP2022 micro controller IP2K = 101, - /// MAX Processor MAX = 102, - - /// National Semiconductor CompactRISC microprocessor + /// National Semiconductor CompactRISC CR = 103, - /// Fujitsu F2MC16 F2MC16 = 104, - - /// Texas Instruments embedded microcontroller msp430 + /// TI msp430 micro controller MSP430 = 105, - - /// Analog Devices Blackfin (DSP) processor + /// ADI Blackfin BLACKFIN = 106, - /// S1C33 Family of Seiko Epson processors SE_C33 = 107, - /// Sharp embedded microprocessor SEP = 108, - /// Arca RISC Microprocessor ARCA = 109, - /// Microprocessor series from PKU-Unity Ltd. and MPRC of Peking University UNICORE = 110, - /// eXcess: 16/32/64-bit configurable embedded CPU EXCESS = 111, - /// Icera Semiconductor Inc. Deep Execution Processor DXP = 112, - /// Altera Nios II soft-core processor ALTERA_NIOS2 = 113, - - /// National Semiconductor CompactRISC CRX + /// National Semiconductor CRX CRX = 114, - - /// Motorola XGATE embedded processor + /// Motorola XGATE embedded processor (also old value for National Semiconductor CompactRISC; deprecated) XGATE = 115, - /// Infineon C16x/XC16x processor C166 = 116, - /// Renesas M16C series microprocessors M16C = 117, - /// Microchip Technology dsPIC30F Digital Signal Controller DSPIC30F = 118, - /// Freescale Communication Engine RISC core CE = 119, - /// Renesas M32C series microprocessors M32C = 120, - /// Altium TSK3000 core TSK3000 = 131, - /// Freescale RS08 embedded processor RS08 = 132, - /// Analog Devices SHARC family of 32-bit DSP processors SHARC = 133, - /// Cyan Technology eCOG2 microprocessor ECOG2 = 134, - - /// Sunplus S+core7 RISC processor - SCORE7 = 135, - + /// Sunplus S+core (and S+core7) RISC processor + SCORE = 135, /// New Japan Radio (NJR) 24-bit DSP Processor DSP24 = 136, - /// Broadcom VideoCore III processor VIDEOCORE3 = 137, - /// RISC processor for Lattice FPGA architecture LATTICEMICO32 = 138, - /// Seiko Epson C17 family SE_C17 = 139, - - /// The Texas Instruments TMS320C6000 DSP family + /// Texas Instruments TMS320C6000 DSP family TI_C6000 = 140, - - /// The Texas Instruments TMS320C2000 DSP family + /// Texas Instruments TMS320C2000 DSP family TI_C2000 = 141, - - /// The Texas Instruments TMS320C55x DSP family + /// Texas Instruments TMS320C55x DSP family TI_C5500 = 142, - + /// Texas Instruments Programmable Realtime Unit + TI_PRU = 144, /// STMicroelectronics 64bit VLIW Data Signal Processor MMDSP_PLUS = 160, - /// Cypress M8C microprocessor CYPRESS_M8C = 161, - /// Renesas R32C series microprocessors R32C = 162, - /// NXP Semiconductors TriMedia architecture family TRIMEDIA = 163, - - /// Qualcomm Hexagon processor - HEXAGON = 164, - + /// QUALCOMM DSP6 Processor + QDSP6 = 164, /// Intel 8051 and variants @"8051" = 165, - - /// STMicroelectronics STxP7x family of configurable and extensible RISC processors + /// STMicroelectronics STxP7x family STXP7X = 166, - /// Andes Technology compact code size embedded RISC processor family NDS32 = 167, - /// Cyan Technology eCOG1X family ECOG1X = 168, - /// Dallas Semiconductor MAXQ30 Core Micro-controllers MAXQ30 = 169, - /// New Japan Radio (NJR) 16-bit DSP Processor XIMO16 = 170, - /// M2000 Reconfigurable RISC Microprocessor MANIK = 171, - /// Cray Inc. NV2 vector architecture CRAYNV2 = 172, - /// Renesas RX family RX = 173, - - /// Imagination Technologies META processor architecture + /// Imagination Technologies Meta processor architecture METAG = 174, - /// MCST Elbrus general purpose hardware architecture MCST_ELBRUS = 175, - /// Cyan Technology eCOG16 family ECOG16 = 176, - - /// National Semiconductor CompactRISC CR16 16-bit microprocessor + /// National Semiconductor CompactRISC 16-bit processor CR16 = 177, - /// Freescale Extended Time Processing Unit ETPU = 178, - /// Infineon Technologies SLE9X core SLE9X = 179, - /// Intel L10M L10M = 180, - /// Intel K10M K10M = 181, - - /// ARM AArch64 + /// ARM 64-bit architecture AARCH64 = 183, - /// Atmel Corporation 32-bit microprocessor family AVR32 = 185, - /// STMicroeletronics STM8 8-bit microcontroller STM8 = 186, - /// Tilera TILE64 multicore architecture family TILE64 = 187, - /// Tilera TILEPro multicore architecture family TILEPRO = 188, - - /// Xilinx MicroBlaze + /// Xilinx MicroBlaze 32-bit RISC soft processor core MICROBLAZE = 189, - /// NVIDIA CUDA architecture CUDA = 190, - /// Tilera TILE-Gx multicore architecture family TILEGX = 191, - /// CloudShield architecture family CLOUDSHIELD = 192, - /// KIPO-KAIST Core-A 1st generation processor family COREA_1ST = 193, - /// KIPO-KAIST Core-A 2nd generation processor family COREA_2ND = 194, - /// Synopsys ARCompact V2 ARC_COMPACT2 = 195, - /// Open8 8-bit RISC soft processor core OPEN8 = 196, - /// Renesas RL78 family RL78 = 197, - /// Broadcom VideoCore V processor VIDEOCORE5 = 198, - - /// Renesas 78KOR family - @"78KOR" = 199, - + /// Renesas 78K0R + @"78K0R" = 199, /// Freescale 56800EX Digital Signal Controller (DSC) @"56800EX" = 200, - /// Beyond BA1 CPU architecture BA1 = 201, - /// Beyond BA2 CPU architecture BA2 = 202, - /// XMOS xCORE processor family XCORE = 203, - /// Microchip 8-bit PIC(r) family MCHP_PIC = 204, - - /// Reserved by Intel - INTEL205 = 205, - - /// Reserved by Intel - INTEL206 = 206, - - /// Reserved by Intel - INTEL207 = 207, - - /// Reserved by Intel - INTEL208 = 208, - - /// Reserved by Intel - INTEL209 = 209, - + /// Intel Graphics Technology + INTELGT = 205, /// KM211 KM32 32-bit processor KM32 = 210, - /// KM211 KMX32 32-bit processor KMX32 = 211, - /// KM211 KMX16 16-bit processor KMX16 = 212, - /// KM211 KMX8 8-bit processor KMX8 = 213, - /// KM211 KVARC processor KVARC = 214, - /// Paneve CDP architecture family CDP = 215, - /// Cognitive Smart Memory Processor COGE = 216, - - /// iCelero CoolEngine + /// Bluechip Systems CoolEngine COOL = 217, - /// Nanoradio Optimized RISC NORC = 218, - /// CSR Kalimba architecture family CSR_KALIMBA = 219, - + /// Zilog Z80 + Z80 = 220, + /// Controls and Data Services VISIUMcore processor + VISIUM = 221, + /// FTDI Chip FT32 high performance 32-bit RISC architecture + FT32 = 222, + /// Moxie processor family + MOXIE = 223, /// AMD GPU architecture AMDGPU = 224, - /// RISC-V RISCV = 243, - /// Lanai 32-bit processor LANAI = 244, - - /// Linux kernel bpf virtual machine + /// CEVA Processor Architecture Family + CEVA = 245, + /// CEVA X2 Processor Family + CEVA_X2 = 246, + /// Linux BPF - in-kernel virtual machine BPF = 247, - - /// C-SKY + /// Graphcore Intelligent Processing Unit + GRAPHCORE_IPU = 248, + /// Imagination Technologies + IMG1 = 249, + /// Netronome Flow Processor + NFP = 250, + /// NEC Vector Engine + VE = 251, + /// C-SKY processor family CSKY = 252, - + /// Synopsys ARCv2.3 64-bit + ARC_COMPACT3_64 = 253, + /// MOS Technology MCS 6502 processor + MCS6502 = 254, + /// Synopsys ARCv2.3 32-bit + ARC_COMPACT3 = 255, + /// Kalray VLIW core of the MPPA processor family + KVX = 256, + /// WDC 65816/65C816 + @"65816" = 257, /// LoongArch LOONGARCH = 258, - - /// Fujitsu FR-V - FRV = 0x5441, + /// ChipON KungFu32 + KF32 = 259, + /// LAPIS nX-U16/U8 + U16_U8CORE = 260, + /// Tachyum + TACHYUM = 261, + /// NXP 56800EF Digital Signal Controller (DSC) + @"56800EF" = 262, + /// AVR + AVR_OLD = 0x1057, + /// MSP430 + MSP430_OLD = 0x1059, + /// Morpho MT + MT = 0x2530, + /// FR30 + CYGNUS_FR30 = 0x3330, + /// WebAssembly (as used by LLVM) + WEBASSEMBLY = 0x4157, + /// Infineon Technologies 16-bit microcontroller with C166-V2 core + XC16X = 0x4688, + /// Freescale S12Z + S12Z = 0x4def, + /// DLX + DLX = 0x5aa5, + /// FRV + CYGNUS_FRV = 0x5441, + /// D10V + CYGNUS_D10V = 0x7650, + /// D30V + CYGNUS_D30V = 0x7676, + /// Ubicom IP2xxx + IP2K_OLD = 0x8217, + /// Cygnus PowerPC ELF + CYGNUS_POWERPC = 0x9025, + /// Alpha + ALPHA = 0x9026, + /// Cygnus M32R ELF + CYGNUS_M32R = 0x9041, + /// V850 + CYGNUS_V850 = 0x9080, + /// Old S/390 + S390_OLD = 0xa390, + /// Old unofficial value for Xtensa + XTENSA_OLD = 0xabc7, + /// Xstormy16 + XSTORMY16 = 0xad45, + /// MN10300 + CYGNUS_MN10300 = 0xbeef, + /// MN10200 + CYGNUS_MN10200 = 0xdead, + /// Renesas M32C and M16C + M32C_OLD = 0xfeb0, + /// Vitesse IQ2000 + IQ2000 = 0xfeba, + /// NIOS + NIOS32 = 0xfebb, + /// Toshiba MeP + CYGNUS_MEP = 0xf00d, + /// Old unofficial value for Moxie + MOXIE_OLD = 0xfeed, + /// Old MicroBlaze + MICROBLAZE_OLD = 0xbaab, + /// Adapteva's Epiphany architecture + ADAPTEVA_EPIPHANY = 0x1223, _, }; From f87dd43c1285d38d7a0f3092f6487bf1e1f4faa6 Mon Sep 17 00:00:00 2001 From: Arwalk Date: Wed, 4 Sep 2024 10:10:12 +0200 Subject: [PATCH 027/202] stdlib : base64 encode to writer (#20961) --- lib/std/base64.zig | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/lib/std/base64.zig b/lib/std/base64.zig index 2627480295bd..243f20644511 100644 --- a/lib/std/base64.zig +++ b/lib/std/base64.zig @@ -5,6 +5,7 @@ const assert = std.debug.assert; const builtin = @import("builtin"); const testing = std.testing; const mem = std.mem; +const window = mem.window; pub const Error = error{ InvalidCharacter, @@ -98,6 +99,32 @@ pub const Base64Encoder = struct { } } + // dest must be compatible with std.io.Writer's writeAll interface + pub fn encodeWriter(encoder: *const Base64Encoder, dest: anytype, source: []const u8) !void { + var chunker = window(u8, source, 3, 3); + while (chunker.next()) |chunk| { + var temp: [5]u8 = undefined; + const s = encoder.encode(&temp, chunk); + try dest.writeAll(s); + } + } + + // destWriter must be compatible with std.io.Writer's writeAll interface + // sourceReader must be compatible with std.io.Reader's read interface + pub fn encodeFromReaderToWriter(encoder: *const Base64Encoder, destWriter: anytype, sourceReader: anytype) !void { + while (true) { + var tempSource: [3]u8 = undefined; + const bytesRead = try sourceReader.read(&tempSource); + if (bytesRead == 0) { + break; + } + + var temp: [5]u8 = undefined; + const s = encoder.encode(&temp, tempSource[0..bytesRead]); + try destWriter.writeAll(s); + } + } + /// dest.len must at least be what you get from ::calcSize. pub fn encode(encoder: *const Base64Encoder, dest: []u8, source: []const u8) []const u8 { const out_len = encoder.calcSize(source.len); @@ -477,9 +504,21 @@ fn testBase64UrlSafeNoPad() !void { fn testAllApis(codecs: Codecs, expected_decoded: []const u8, expected_encoded: []const u8) !void { // Base64Encoder { + // raw encode var buffer: [0x100]u8 = undefined; const encoded = codecs.Encoder.encode(&buffer, expected_decoded); try testing.expectEqualSlices(u8, expected_encoded, encoded); + + // stream encode + var list = try std.BoundedArray(u8, 0x100).init(0); + try codecs.Encoder.encodeWriter(list.writer(), expected_decoded); + try testing.expectEqualSlices(u8, expected_encoded, list.slice()); + + // reader to writer encode + var stream = std.io.fixedBufferStream(expected_decoded); + list = try std.BoundedArray(u8, 0x100).init(0); + try codecs.Encoder.encodeFromReaderToWriter(list.writer(), stream.reader()); + try testing.expectEqualSlices(u8, expected_encoded, list.slice()); } // Base64Decoder From b44dd599ad4a0f8a6684acd6e3c5a02f3c7b8f04 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Tue, 27 Aug 2024 12:53:05 +0200 Subject: [PATCH 028/202] elf: split Atom.allocate into Atom-independent parts --- src/link/Elf.zig | 77 +++++++++++++++++++++ src/link/Elf/Atom.zig | 134 ------------------------------------- src/link/Elf/ZigObject.zig | 66 ++++++++++++++++-- 3 files changed, 136 insertions(+), 141 deletions(-) diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 194c1b8ad2ba..4957811e7a4e 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -675,6 +675,83 @@ pub fn markDirty(self: *Elf, shdr_index: u32) void { } } +const AllocateChunkResult = struct { + value: u64, + placement: Ref, +}; + +pub fn allocateChunk(self: *Elf, shndx: u32, size: u64, alignment: Atom.Alignment) !AllocateChunkResult { + const slice = self.sections.slice(); + const shdr = &slice.items(.shdr)[shndx]; + const free_list = &slice.items(.free_list)[shndx]; + const last_atom_ref = &slice.items(.last_atom)[shndx]; + const new_atom_ideal_capacity = padToIdeal(size); + + // First we look for an appropriately sized free list node. + // The list is unordered. We'll just take the first thing that works. + const res: AllocateChunkResult = blk: { + var i: usize = if (self.base.child_pid == null) 0 else free_list.items.len; + while (i < free_list.items.len) { + const big_atom_ref = free_list.items[i]; + const big_atom = self.atom(big_atom_ref).?; + // We now have a pointer to a live atom that has too much capacity. + // Is it enough that we could fit this new atom? + const cap = big_atom.capacity(self); + const ideal_capacity = padToIdeal(cap); + const ideal_capacity_end_vaddr = std.math.add(u64, @intCast(big_atom.value), ideal_capacity) catch ideal_capacity; + const capacity_end_vaddr = @as(u64, @intCast(big_atom.value)) + cap; + const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity; + const new_start_vaddr = alignment.backward(new_start_vaddr_unaligned); + if (new_start_vaddr < ideal_capacity_end_vaddr) { + // Additional bookkeeping here to notice if this free list node + // should be deleted because the block that it points to has grown to take up + // more of the extra capacity. + if (!big_atom.freeListEligible(self)) { + _ = free_list.swapRemove(i); + } else { + i += 1; + } + continue; + } + // At this point we know that we will place the new block here. But the + // remaining question is whether there is still yet enough capacity left + // over for there to still be a free list node. + const remaining_capacity = new_start_vaddr - ideal_capacity_end_vaddr; + const keep_free_list_node = remaining_capacity >= min_text_capacity; + + if (!keep_free_list_node) { + _ = free_list.swapRemove(i); + } + break :blk .{ .value = new_start_vaddr, .placement = big_atom_ref }; + } else if (self.atom(last_atom_ref.*)) |last_atom| { + const ideal_capacity = padToIdeal(last_atom.size); + const ideal_capacity_end_vaddr = @as(u64, @intCast(last_atom.value)) + ideal_capacity; + const new_start_vaddr = alignment.forward(ideal_capacity_end_vaddr); + break :blk .{ .value = new_start_vaddr, .placement = last_atom.ref() }; + } else { + break :blk .{ .value = 0, .placement = .{} }; + } + }; + + log.debug("allocated chunk (size({x}),align({x})) at 0x{x} (file(0x{x}))", .{ + size, + alignment.toByteUnits().?, + shdr.sh_addr + res.value, + shdr.sh_offset + res.value, + }); + + const expand_section = if (self.atom(res.placement)) |placement_atom| + placement_atom.nextAtom(self) == null + else + true; + if (expand_section) { + const needed_size = res.value + size; + try self.growAllocSection(shndx, needed_size); + } + + return res; +} + pub fn flush(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void { const use_lld = build_options.have_llvm and self.base.comp.config.use_lld; if (use_lld) { diff --git a/src/link/Elf/Atom.zig b/src/link/Elf/Atom.zig index ef2301f1cd74..3d1fe04fb507 100644 --- a/src/link/Elf/Atom.zig +++ b/src/link/Elf/Atom.zig @@ -123,140 +123,6 @@ pub fn freeListEligible(self: Atom, elf_file: *Elf) bool { return surplus >= Elf.min_text_capacity; } -pub fn allocate(self: *Atom, elf_file: *Elf) !void { - const slice = elf_file.sections.slice(); - const shdr = &slice.items(.shdr)[self.output_section_index]; - const free_list = &slice.items(.free_list)[self.output_section_index]; - const last_atom_ref = &slice.items(.last_atom)[self.output_section_index]; - const new_atom_ideal_capacity = Elf.padToIdeal(self.size); - - // We use these to indicate our intention to update metadata, placing the new atom, - // and possibly removing a free list node. - // It would be simpler to do it inside the for loop below, but that would cause a - // problem if an error was returned later in the function. So this action - // is actually carried out at the end of the function, when errors are no longer possible. - var atom_placement: ?Elf.Ref = null; - var free_list_removal: ?usize = null; - - // First we look for an appropriately sized free list node. - // The list is unordered. We'll just take the first thing that works. - self.value = blk: { - var i: usize = if (elf_file.base.child_pid == null) 0 else free_list.items.len; - while (i < free_list.items.len) { - const big_atom_ref = free_list.items[i]; - const big_atom = elf_file.atom(big_atom_ref).?; - // We now have a pointer to a live atom that has too much capacity. - // Is it enough that we could fit this new atom? - const cap = big_atom.capacity(elf_file); - const ideal_capacity = Elf.padToIdeal(cap); - const ideal_capacity_end_vaddr = std.math.add(u64, @intCast(big_atom.value), ideal_capacity) catch ideal_capacity; - const capacity_end_vaddr = @as(u64, @intCast(big_atom.value)) + cap; - const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity; - const new_start_vaddr = self.alignment.backward(new_start_vaddr_unaligned); - if (new_start_vaddr < ideal_capacity_end_vaddr) { - // Additional bookkeeping here to notice if this free list node - // should be deleted because the block that it points to has grown to take up - // more of the extra capacity. - if (!big_atom.freeListEligible(elf_file)) { - _ = free_list.swapRemove(i); - } else { - i += 1; - } - continue; - } - // At this point we know that we will place the new block here. But the - // remaining question is whether there is still yet enough capacity left - // over for there to still be a free list node. - const remaining_capacity = new_start_vaddr - ideal_capacity_end_vaddr; - const keep_free_list_node = remaining_capacity >= Elf.min_text_capacity; - - // Set up the metadata to be updated, after errors are no longer possible. - atom_placement = big_atom_ref; - if (!keep_free_list_node) { - free_list_removal = i; - } - break :blk @intCast(new_start_vaddr); - } else if (elf_file.atom(last_atom_ref.*)) |last_atom| { - const ideal_capacity = Elf.padToIdeal(last_atom.size); - const ideal_capacity_end_vaddr = @as(u64, @intCast(last_atom.value)) + ideal_capacity; - const new_start_vaddr = self.alignment.forward(ideal_capacity_end_vaddr); - // Set up the metadata to be updated, after errors are no longer possible. - atom_placement = last_atom.ref(); - break :blk @intCast(new_start_vaddr); - } else { - break :blk 0; - } - }; - - log.debug("allocated atom({}) : '{s}' at 0x{x} to 0x{x}", .{ - self.ref(), - self.name(elf_file), - self.address(elf_file), - self.address(elf_file) + @as(i64, @intCast(self.size)), - }); - - const expand_section = if (atom_placement) |placement_ref| - elf_file.atom(placement_ref).?.nextAtom(elf_file) == null - else - true; - if (expand_section) { - const needed_size: u64 = @intCast(self.value + @as(i64, @intCast(self.size))); - try elf_file.growAllocSection(self.output_section_index, needed_size); - last_atom_ref.* = self.ref(); - - switch (self.file(elf_file).?) { - .zig_object => |zo| if (zo.dwarf) |_| { - // The .debug_info section has `low_pc` and `high_pc` values which is the virtual address - // range of the compilation unit. When we expand the text section, this range changes, - // so the DW_TAG.compile_unit tag of the .debug_info section becomes dirty. - zo.debug_info_section_dirty = true; - // This becomes dirty for the same reason. We could potentially make this more - // fine-grained with the addition of support for more compilation units. It is planned to - // model each package as a different compilation unit. - zo.debug_aranges_section_dirty = true; - zo.debug_rnglists_section_dirty = true; - }, - else => {}, - } - } - shdr.sh_addralign = @max(shdr.sh_addralign, self.alignment.toByteUnits().?); - - // This function can also reallocate an atom. - // In this case we need to "unplug" it from its previous location before - // plugging it in to its new location. - if (self.prevAtom(elf_file)) |prev| { - prev.next_atom_ref = self.next_atom_ref; - } - if (self.nextAtom(elf_file)) |next| { - next.prev_atom_ref = self.prev_atom_ref; - } - - if (atom_placement) |big_atom_ref| { - const big_atom = elf_file.atom(big_atom_ref).?; - self.prev_atom_ref = big_atom_ref; - self.next_atom_ref = big_atom.next_atom_ref; - big_atom.next_atom_ref = self.ref(); - } else { - self.prev_atom_ref = .{ .index = 0, .file = 0 }; - self.next_atom_ref = .{ .index = 0, .file = 0 }; - } - if (free_list_removal) |i| { - _ = free_list.swapRemove(i); - } - - self.alive = true; -} - -pub fn shrink(self: *Atom, elf_file: *Elf) void { - _ = self; - _ = elf_file; -} - -pub fn grow(self: *Atom, elf_file: *Elf) !void { - if (!self.alignment.check(@intCast(self.value)) or self.size > self.capacity(elf_file)) - try self.allocate(elf_file); -} - pub fn free(self: *Atom, elf_file: *Elf) void { log.debug("freeAtom atom({}) ({s})", .{ self.ref(), self.name(elf_file) }); diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig index 549657800cf4..ce839a512b1e 100644 --- a/src/link/Elf/ZigObject.zig +++ b/src/link/Elf/ZigObject.zig @@ -1362,19 +1362,18 @@ fn updateNavCode( const capacity = atom_ptr.capacity(elf_file); const need_realloc = code.len > capacity or !required_alignment.check(@intCast(atom_ptr.value)); if (need_realloc) { - try atom_ptr.grow(elf_file); + try self.growAtom(atom_ptr, elf_file); log.debug("growing {} from 0x{x} to 0x{x}", .{ nav.fqn.fmt(ip), old_vaddr, atom_ptr.value }); if (old_vaddr != atom_ptr.value) { sym.value = 0; esym.st_value = 0; } } else if (code.len < old_size) { - atom_ptr.shrink(elf_file); + // TODO shrink section size } } else { - try atom_ptr.allocate(elf_file); + try self.allocateAtom(atom_ptr, elf_file); errdefer self.freeNavMetadata(elf_file, sym_index); - sym.value = 0; esym.st_value = 0; } @@ -1739,7 +1738,7 @@ fn updateLazySymbol( atom_ptr.size = code.len; atom_ptr.output_section_index = output_section_index; - try atom_ptr.allocate(elf_file); + try self.allocateAtom(atom_ptr, elf_file); errdefer self.freeNavMetadata(elf_file, symbol_index); local_sym.value = 0; @@ -1797,8 +1796,7 @@ fn lowerConst( atom_ptr.size = code.len; atom_ptr.output_section_index = output_section_index; - try atom_ptr.allocate(elf_file); - // TODO rename and re-audit this method + try self.allocateAtom(atom_ptr, elf_file); errdefer self.freeNavMetadata(elf_file, sym_index); const shdr = elf_file.sections.items(.shdr)[output_section_index]; @@ -1998,6 +1996,60 @@ fn writeTrampoline(tr_sym: Symbol, target: Symbol, elf_file: *Elf) !void { } } +fn allocateAtom(self: *ZigObject, atom_ptr: *Atom, elf_file: *Elf) !void { + const alloc_res = try elf_file.allocateChunk(atom_ptr.output_section_index, atom_ptr.size, atom_ptr.alignment); + atom_ptr.value = @intCast(alloc_res.value); + + const slice = elf_file.sections.slice(); + const shdr = &slice.items(.shdr)[atom_ptr.output_section_index]; + const last_atom_ref = &slice.items(.last_atom)[atom_ptr.output_section_index]; + + const expand_section = if (elf_file.atom(alloc_res.placement)) |placement_atom| + placement_atom.nextAtom(elf_file) == null + else + true; + if (expand_section) { + last_atom_ref.* = atom_ptr.ref(); + if (self.dwarf) |_| { + // The .debug_info section has `low_pc` and `high_pc` values which is the virtual address + // range of the compilation unit. When we expand the text section, this range changes, + // so the DW_TAG.compile_unit tag of the .debug_info section becomes dirty. + self.debug_info_section_dirty = true; + // This becomes dirty for the same reason. We could potentially make this more + // fine-grained with the addition of support for more compilation units. It is planned to + // model each package as a different compilation unit. + self.debug_aranges_section_dirty = true; + self.debug_rnglists_section_dirty = true; + } + } + shdr.sh_addralign = @max(shdr.sh_addralign, atom_ptr.alignment.toByteUnits().?); + + // This function can also reallocate an atom. + // In this case we need to "unplug" it from its previous location before + // plugging it in to its new location. + if (atom_ptr.prevAtom(elf_file)) |prev| { + prev.next_atom_ref = atom_ptr.next_atom_ref; + } + if (atom_ptr.nextAtom(elf_file)) |next| { + next.prev_atom_ref = atom_ptr.prev_atom_ref; + } + + if (elf_file.atom(alloc_res.placement)) |big_atom| { + atom_ptr.prev_atom_ref = alloc_res.placement; + atom_ptr.next_atom_ref = big_atom.next_atom_ref; + big_atom.next_atom_ref = atom_ptr.ref(); + } else { + atom_ptr.prev_atom_ref = .{ .index = 0, .file = 0 }; + atom_ptr.next_atom_ref = .{ .index = 0, .file = 0 }; + } +} + +fn growAtom(self: *ZigObject, atom_ptr: *Atom, elf_file: *Elf) !void { + if (!atom_ptr.alignment.check(@intCast(atom_ptr.value)) or atom_ptr.size > atom_ptr.capacity(elf_file)) { + try self.allocateAtom(atom_ptr, elf_file); + } +} + pub fn asFile(self: *ZigObject) File { return .{ .zig_object = self }; } From d32af9ea2ade7c447ac8b882076dba337a10dc51 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Tue, 27 Aug 2024 15:22:33 +0200 Subject: [PATCH 029/202] elf: move initOutputSection into Elf from Object --- src/link/Elf.zig | 53 ++++++++++++++++++++++++++++ src/link/Elf/Object.zig | 76 +++++++++++------------------------------ 2 files changed, 73 insertions(+), 56 deletions(-) diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 4957811e7a4e..1ef5a562a811 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -1827,6 +1827,59 @@ fn scanRelocs(self: *Elf) !void { } } +pub fn initOutputSection(self: *Elf, args: struct { + name: [:0]const u8, + flags: u64, + type: u32, +}) error{OutOfMemory}!u32 { + const name = blk: { + if (self.base.isRelocatable()) break :blk args.name; + if (args.flags & elf.SHF_MERGE != 0) break :blk args.name; + const name_prefixes: []const [:0]const u8 = &.{ + ".text", ".data.rel.ro", ".data", ".rodata", ".bss.rel.ro", ".bss", + ".init_array", ".fini_array", ".tbss", ".tdata", ".gcc_except_table", ".ctors", + ".dtors", ".gnu.warning", + }; + inline for (name_prefixes) |prefix| { + if (std.mem.eql(u8, args.name, prefix) or std.mem.startsWith(u8, args.name, prefix ++ ".")) { + break :blk prefix; + } + } + break :blk args.name; + }; + const @"type" = tt: { + if (self.getTarget().cpu.arch == .x86_64 and + args.type == elf.SHT_X86_64_UNWIND) break :tt elf.SHT_PROGBITS; + switch (args.type) { + elf.SHT_NULL => unreachable, + elf.SHT_PROGBITS => { + if (std.mem.eql(u8, args.name, ".init_array") or std.mem.startsWith(u8, args.name, ".init_array.")) + break :tt elf.SHT_INIT_ARRAY; + if (std.mem.eql(u8, args.name, ".fini_array") or std.mem.startsWith(u8, args.name, ".fini_array.")) + break :tt elf.SHT_FINI_ARRAY; + break :tt args.type; + }, + else => break :tt args.type, + } + }; + const flags = blk: { + var flags = args.flags; + if (!self.base.isRelocatable()) { + flags &= ~@as(u64, elf.SHF_COMPRESSED | elf.SHF_GROUP | elf.SHF_GNU_RETAIN); + } + break :blk switch (@"type") { + elf.SHT_INIT_ARRAY, elf.SHT_FINI_ARRAY => flags | elf.SHF_WRITE, + else => flags, + }; + }; + const out_shndx = self.sectionByName(name) orelse try self.addSection(.{ + .type = @"type", + .flags = flags, + .name = try self.insertShString(name), + }); + return out_shndx; +} + fn linkWithLLD(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) !void { dev.check(.lld_linker); diff --git a/src/link/Elf/Object.zig b/src/link/Elf/Object.zig index 40d09c8ec409..c4443bb67e4a 100644 --- a/src/link/Elf/Object.zig +++ b/src/link/Elf/Object.zig @@ -311,58 +311,6 @@ fn initAtoms(self: *Object, allocator: Allocator, handle: std.fs.File, elf_file: }; } -fn initOutputSection(self: Object, elf_file: *Elf, shdr: elf.Elf64_Shdr) error{OutOfMemory}!u32 { - const name = blk: { - const name = self.getString(shdr.sh_name); - if (elf_file.base.isRelocatable()) break :blk name; - if (shdr.sh_flags & elf.SHF_MERGE != 0) break :blk name; - const sh_name_prefixes: []const [:0]const u8 = &.{ - ".text", ".data.rel.ro", ".data", ".rodata", ".bss.rel.ro", ".bss", - ".init_array", ".fini_array", ".tbss", ".tdata", ".gcc_except_table", ".ctors", - ".dtors", ".gnu.warning", - }; - inline for (sh_name_prefixes) |prefix| { - if (std.mem.eql(u8, name, prefix) or std.mem.startsWith(u8, name, prefix ++ ".")) { - break :blk prefix; - } - } - break :blk name; - }; - const @"type" = tt: { - if (elf_file.getTarget().cpu.arch == .x86_64 and - shdr.sh_type == elf.SHT_X86_64_UNWIND) break :tt elf.SHT_PROGBITS; - - const @"type" = switch (shdr.sh_type) { - elf.SHT_NULL => unreachable, - elf.SHT_PROGBITS => blk: { - if (std.mem.eql(u8, name, ".init_array") or std.mem.startsWith(u8, name, ".init_array.")) - break :blk elf.SHT_INIT_ARRAY; - if (std.mem.eql(u8, name, ".fini_array") or std.mem.startsWith(u8, name, ".fini_array.")) - break :blk elf.SHT_FINI_ARRAY; - break :blk shdr.sh_type; - }, - else => shdr.sh_type, - }; - break :tt @"type"; - }; - const flags = blk: { - var flags = shdr.sh_flags; - if (!elf_file.base.isRelocatable()) { - flags &= ~@as(u64, elf.SHF_COMPRESSED | elf.SHF_GROUP | elf.SHF_GNU_RETAIN); - } - break :blk switch (@"type") { - elf.SHT_INIT_ARRAY, elf.SHT_FINI_ARRAY => flags | elf.SHF_WRITE, - else => flags, - }; - }; - const out_shndx = elf_file.sectionByName(name) orelse try elf_file.addSection(.{ - .type = @"type", - .flags = flags, - .name = try elf_file.insertShString(name), - }); - return out_shndx; -} - fn skipShdr(self: *Object, index: u32, elf_file: *Elf) bool { const comp = elf_file.base.comp; const shdr = self.shdrs.items[index]; @@ -985,7 +933,11 @@ pub fn initOutputSections(self: *Object, elf_file: *Elf) !void { const atom_ptr = self.atom(atom_index) orelse continue; if (!atom_ptr.alive) continue; const shdr = atom_ptr.inputShdr(elf_file); - _ = try self.initOutputSection(elf_file, shdr); + _ = try elf_file.initOutputSection(.{ + .name = self.getString(shdr.sh_name), + .flags = shdr.sh_flags, + .type = shdr.sh_type, + }); } } @@ -994,7 +946,11 @@ pub fn addAtomsToOutputSections(self: *Object, elf_file: *Elf) !void { const atom_ptr = self.atom(atom_index) orelse continue; if (!atom_ptr.alive) continue; const shdr = atom_ptr.inputShdr(elf_file); - atom_ptr.output_section_index = self.initOutputSection(elf_file, shdr) catch unreachable; + atom_ptr.output_section_index = elf_file.initOutputSection(.{ + .name = self.getString(shdr.sh_name), + .flags = shdr.sh_flags, + .type = shdr.sh_type, + }) catch unreachable; const comp = elf_file.base.comp; const gpa = comp.gpa; @@ -1009,7 +965,11 @@ pub fn initRelaSections(self: *Object, elf_file: *Elf) !void { if (!atom_ptr.alive) continue; const shndx = atom_ptr.relocsShndx() orelse continue; const shdr = self.shdrs.items[shndx]; - const out_shndx = try self.initOutputSection(elf_file, shdr); + const out_shndx = try elf_file.initOutputSection(.{ + .name = self.getString(shdr.sh_name), + .flags = shdr.sh_flags, + .type = shdr.sh_type, + }); const out_shdr = &elf_file.sections.items(.shdr)[out_shndx]; out_shdr.sh_type = elf.SHT_RELA; out_shdr.sh_addralign = @alignOf(elf.Elf64_Rela); @@ -1025,7 +985,11 @@ pub fn addAtomsToRelaSections(self: *Object, elf_file: *Elf) !void { const shndx = blk: { const shndx = atom_ptr.relocsShndx() orelse continue; const shdr = self.shdrs.items[shndx]; - break :blk self.initOutputSection(elf_file, shdr) catch unreachable; + break :blk elf_file.initOutputSection(.{ + .name = self.getString(shdr.sh_name), + .flags = shdr.sh_flags, + .type = shdr.sh_type, + }) catch unreachable; }; const slice = elf_file.sections.slice(); const shdr = &slice.items(.shdr)[shndx]; From 8f1ce3c85b6463206147119dd56ba415f6e4d28e Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Tue, 27 Aug 2024 15:34:13 +0200 Subject: [PATCH 030/202] elf: shuffle some stages to make it clear what needs what --- src/link/Elf.zig | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 1ef5a562a811..bfbc34503fa9 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -1049,14 +1049,16 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod try self.initSyntheticSections(); try self.initSpecialPhdrs(); try self.sortShdrs(); - for (self.objects.items) |index| { - try self.file(index).?.object.addAtomsToOutputSections(self); - } - try self.sortInitFini(); + try self.setDynamicSection(rpath_table.keys()); self.sortDynamicSymtab(); try self.setHashSections(); try self.setVersionSymtab(); + + for (self.objects.items) |index| { + try self.file(index).?.object.addAtomsToOutputSections(self); + } + try self.sortInitFini(); try self.updateMergeSectionSizes(); try self.updateSectionSizes(); From 37a1f0e7f2586b1fd3c88c3483f0c9f731f41a30 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Wed, 28 Aug 2024 06:57:55 +0200 Subject: [PATCH 031/202] elf: allocate .bss in ZigObject similarly to .eh_frame --- src/link/Elf.zig | 16 +++--- src/link/Elf/ZigObject.zig | 114 +++++++++++++++++-------------------- 2 files changed, 61 insertions(+), 69 deletions(-) diff --git a/src/link/Elf.zig b/src/link/Elf.zig index bfbc34503fa9..5757e40ab88a 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -61,8 +61,6 @@ phdr_zig_load_re_index: ?u16 = null, phdr_zig_load_ro_index: ?u16 = null, /// The index into the program headers of a PT_LOAD program header with Write flag phdr_zig_load_rw_index: ?u16 = null, -/// The index into the program headers of a PT_LOAD program header with zerofill data. -phdr_zig_load_zerofill_index: ?u16 = null, /// Special program headers /// PT_PHDR @@ -129,7 +127,6 @@ comdat_group_sections: std.ArrayListUnmanaged(ComdatGroupSection) = .{}, zig_text_section_index: ?u32 = null, zig_data_rel_ro_section_index: ?u32 = null, zig_data_section_index: ?u32 = null, -zig_bss_section_index: ?u32 = null, debug_info_section_index: ?u32 = null, debug_abbrev_section_index: ?u32 = null, @@ -3367,7 +3364,6 @@ fn sortPhdrs(self: *Elf) error{OutOfMemory}!void { for (&[_]*?u16{ &self.phdr_zig_load_re_index, &self.phdr_zig_load_ro_index, - &self.phdr_zig_load_zerofill_index, &self.phdr_table_index, &self.phdr_table_load_index, &self.phdr_interp_index, @@ -3496,7 +3492,6 @@ fn resetShdrIndexes(self: *Elf, backlinks: []const u32) void { &self.zig_text_section_index, &self.zig_data_rel_ro_section_index, &self.zig_data_section_index, - &self.zig_bss_section_index, &self.debug_info_section_index, &self.debug_abbrev_section_index, &self.debug_str_section_index, @@ -3591,9 +3586,17 @@ fn resetShdrIndexes(self: *Elf, backlinks: []const u32) void { fn updateSectionSizes(self: *Elf) !void { const slice = self.sections.slice(); - for (slice.items(.shdr), slice.items(.atom_list)) |*shdr, atom_list| { + for (slice.items(.shdr), slice.items(.atom_list), 0..) |*shdr, atom_list, shndx| { if (atom_list.items.len == 0) continue; if (self.requiresThunks() and shdr.sh_flags & elf.SHF_EXECINSTR != 0) continue; + if (self.zigObjectPtr()) |zo| { + if (zo.bss_index) |sym_index| { + const atom_ptr = zo.symbol(sym_index).atom(self).?; + if (shndx == atom_ptr.output_section_index) { + shdr.sh_size = atom_ptr.size; + } + } + } for (atom_list.items) |ref| { const atom_ptr = self.atom(ref) orelse continue; if (!atom_ptr.alive) continue; @@ -4804,7 +4807,6 @@ pub fn isZigSection(self: Elf, shndx: u32) bool { self.zig_text_section_index, self.zig_data_rel_ro_section_index, self.zig_data_section_index, - self.zig_bss_section_index, }) |index| { if (index == shndx) return true; } diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig index ce839a512b1e..594dd5f640dc 100644 --- a/src/link/Elf/ZigObject.zig +++ b/src/link/Elf/ZigObject.zig @@ -60,6 +60,7 @@ debug_line_str_index: ?Symbol.Index = null, debug_loclists_index: ?Symbol.Index = null, debug_rnglists_index: ?Symbol.Index = null, eh_frame_index: ?Symbol.Index = null, +bss_index: ?Symbol.Index = null, pub const global_symbol_bit: u32 = 0x80000000; pub const symbol_mask: u32 = 0x7fffffff; @@ -149,17 +150,6 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .flags = elf.PF_R | elf.PF_W, }); } - - if (elf_file.phdr_zig_load_zerofill_index == null) { - const alignment = elf_file.page_size; - elf_file.phdr_zig_load_zerofill_index = try elf_file.addPhdr(.{ - .type = elf.PT_LOAD, - .addr = if (ptr_size >= 4) 0x14000000 else 0xf000, - .memsz = 1024, - .@"align" = alignment, - .flags = elf.PF_R | elf.PF_W, - }); - } } if (elf_file.zig_text_section_index == null) { @@ -225,51 +215,11 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { } } - if (elf_file.zig_bss_section_index == null) { - elf_file.zig_bss_section_index = try elf_file.addSection(.{ - .name = try elf_file.insertShString(".bss.zig"), - .type = elf.SHT_NOBITS, - .addralign = ptr_size, - .flags = elf.SHF_ALLOC | elf.SHF_WRITE, - .offset = 0, - }); - const shdr = &elf_file.sections.items(.shdr)[elf_file.zig_bss_section_index.?]; - const phndx = &elf_file.sections.items(.phndx)[elf_file.zig_bss_section_index.?]; - if (elf_file.base.isRelocatable()) { - shdr.sh_size = 1024; - } else { - phndx.* = elf_file.phdr_zig_load_zerofill_index.?; - const phdr = elf_file.phdrs.items[phndx.*.?]; - shdr.sh_addr = phdr.p_vaddr; - shdr.sh_size = phdr.p_memsz; - } - } - switch (comp.config.debug_format) { .strip => {}, .dwarf => |v| { var dwarf = Dwarf.init(&elf_file.base, v); - const addSectionSymbol = struct { - fn addSectionSymbol( - zig_object: *ZigObject, - alloc: Allocator, - name: [:0]const u8, - alignment: Atom.Alignment, - shndx: u32, - ) !Symbol.Index { - const name_off = try zig_object.addString(alloc, name); - const index = try zig_object.newSymbolWithAtom(alloc, name_off); - const sym = zig_object.symbol(index); - const esym = &zig_object.symtab.items(.elf_sym)[sym.esym_index]; - esym.st_info |= elf.STT_SECTION; - const atom_ptr = zig_object.atom(sym.ref.index).?; - atom_ptr.alignment = alignment; - atom_ptr.output_section_index = shndx; - return index; - } - }.addSectionSymbol; - if (elf_file.debug_str_section_index == null) { elf_file.debug_str_section_index = try elf_file.addSection(.{ .name = try elf_file.insertShString(".debug_str"), @@ -279,7 +229,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .addralign = 1, }); self.debug_str_section_dirty = true; - self.debug_str_index = try addSectionSymbol(self, gpa, ".debug_str", .@"1", elf_file.debug_str_section_index.?); + self.debug_str_index = try self.addSectionSymbol(gpa, ".debug_str", .@"1", elf_file.debug_str_section_index.?); } if (elf_file.debug_info_section_index == null) { @@ -289,7 +239,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .addralign = 1, }); self.debug_info_section_dirty = true; - self.debug_info_index = try addSectionSymbol(self, gpa, ".debug_info", .@"1", elf_file.debug_info_section_index.?); + self.debug_info_index = try self.addSectionSymbol(gpa, ".debug_info", .@"1", elf_file.debug_info_section_index.?); } if (elf_file.debug_abbrev_section_index == null) { @@ -299,7 +249,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .addralign = 1, }); self.debug_abbrev_section_dirty = true; - self.debug_abbrev_index = try addSectionSymbol(self, gpa, ".debug_abbrev", .@"1", elf_file.debug_abbrev_section_index.?); + self.debug_abbrev_index = try self.addSectionSymbol(gpa, ".debug_abbrev", .@"1", elf_file.debug_abbrev_section_index.?); } if (elf_file.debug_aranges_section_index == null) { @@ -309,7 +259,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .addralign = 16, }); self.debug_aranges_section_dirty = true; - self.debug_aranges_index = try addSectionSymbol(self, gpa, ".debug_aranges", .@"16", elf_file.debug_aranges_section_index.?); + self.debug_aranges_index = try self.addSectionSymbol(gpa, ".debug_aranges", .@"16", elf_file.debug_aranges_section_index.?); } if (elf_file.debug_line_section_index == null) { @@ -319,7 +269,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .addralign = 1, }); self.debug_line_section_dirty = true; - self.debug_line_index = try addSectionSymbol(self, gpa, ".debug_line", .@"1", elf_file.debug_line_section_index.?); + self.debug_line_index = try self.addSectionSymbol(gpa, ".debug_line", .@"1", elf_file.debug_line_section_index.?); } if (elf_file.debug_line_str_section_index == null) { @@ -331,7 +281,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .addralign = 1, }); self.debug_line_str_section_dirty = true; - self.debug_line_str_index = try addSectionSymbol(self, gpa, ".debug_line_str", .@"1", elf_file.debug_line_str_section_index.?); + self.debug_line_str_index = try self.addSectionSymbol(gpa, ".debug_line_str", .@"1", elf_file.debug_line_str_section_index.?); } if (elf_file.debug_loclists_section_index == null) { @@ -341,7 +291,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .addralign = 1, }); self.debug_loclists_section_dirty = true; - self.debug_loclists_index = try addSectionSymbol(self, gpa, ".debug_loclists", .@"1", elf_file.debug_loclists_section_index.?); + self.debug_loclists_index = try self.addSectionSymbol(gpa, ".debug_loclists", .@"1", elf_file.debug_loclists_section_index.?); } if (elf_file.debug_rnglists_section_index == null) { @@ -351,7 +301,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .addralign = 1, }); self.debug_rnglists_section_dirty = true; - self.debug_rnglists_index = try addSectionSymbol(self, gpa, ".debug_rnglists", .@"1", elf_file.debug_rnglists_section_index.?); + self.debug_rnglists_index = try self.addSectionSymbol(gpa, ".debug_rnglists", .@"1", elf_file.debug_rnglists_section_index.?); } if (elf_file.eh_frame_section_index == null) { @@ -365,7 +315,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .addralign = ptr_size, }); self.eh_frame_section_dirty = true; - self.eh_frame_index = try addSectionSymbol(self, gpa, ".eh_frame", Atom.Alignment.fromNonzeroByteUnits(ptr_size), elf_file.eh_frame_section_index.?); + self.eh_frame_index = try self.addSectionSymbol(gpa, ".eh_frame", Atom.Alignment.fromNonzeroByteUnits(ptr_size), elf_file.eh_frame_section_index.?); } try dwarf.initMetadata(); @@ -1270,6 +1220,24 @@ pub fn getOrCreateMetadataForNav( return gop.value_ptr.symbol_index; } +fn addSectionSymbol( + self: *ZigObject, + allocator: Allocator, + name: [:0]const u8, + alignment: Atom.Alignment, + shndx: u32, +) !Symbol.Index { + const name_off = try self.addString(allocator, name); + const index = try self.newSymbolWithAtom(allocator, name_off); + const sym = self.symbol(index); + const esym = &self.symtab.items(.elf_sym)[sym.esym_index]; + esym.st_info |= elf.STT_SECTION; + const atom_ptr = self.atom(sym.ref.index).?; + atom_ptr.alignment = alignment; + atom_ptr.output_section_index = shndx; + return index; +} + fn getNavShdrIndex( self: *ZigObject, elf_file: *Elf, @@ -1309,12 +1277,34 @@ fn getNavShdrIndex( if (nav_init != .none and Value.fromInterned(nav_init).isUndefDeep(zcu)) return switch (zcu.navFileScope(nav_index).mod.optimize_mode) { .Debug, .ReleaseSafe => elf_file.zig_data_section_index.?, - .ReleaseFast, .ReleaseSmall => elf_file.zig_bss_section_index.?, + .ReleaseFast, .ReleaseSmall => { + if (self.bss_index) |symbol_index| + return self.symbol(symbol_index).atom(elf_file).?.output_section_index; + const osec = try elf_file.addSection(.{ + .type = elf.SHT_NOBITS, + .flags = elf.SHF_ALLOC | elf.SHF_WRITE, + .name = try elf_file.insertShString(".bss"), + .addralign = 1, + }); + self.bss_index = try self.addSectionSymbol(elf_file.base.comp.gpa, ".bss", .@"1", osec); + return osec; + }, }; const is_bss = !has_relocs and for (code) |byte| { if (byte != 0) break false; } else true; - if (is_bss) return elf_file.zig_bss_section_index.?; + if (is_bss) { + if (self.bss_index) |symbol_index| + return self.symbol(symbol_index).atom(elf_file).?.output_section_index; + const osec = try elf_file.addSection(.{ + .type = elf.SHT_NOBITS, + .flags = elf.SHF_ALLOC | elf.SHF_WRITE, + .name = try elf_file.insertShString(".bss"), + .addralign = 1, + }); + self.bss_index = try self.addSectionSymbol(elf_file.base.comp.gpa, ".bss", .@"1", osec); + return osec; + } return elf_file.zig_data_section_index.?; } From 0b92404ddf71c72664d2d4dd252e211a9678492d Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Thu, 29 Aug 2024 07:30:36 +0200 Subject: [PATCH 032/202] elf: allocate .data in ZigObject similarly to .eh_frame --- src/link/Elf.zig | 52 ++++++++++++++---------- src/link/Elf/ZigObject.zig | 81 +++++++++++++++++++------------------- 2 files changed, 72 insertions(+), 61 deletions(-) diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 5757e40ab88a..ae6933fdf7db 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -59,8 +59,6 @@ phdrs: std.ArrayListUnmanaged(elf.Elf64_Phdr) = .{}, phdr_zig_load_re_index: ?u16 = null, /// The index into the program headers of a PT_LOAD program header with Read flag phdr_zig_load_ro_index: ?u16 = null, -/// The index into the program headers of a PT_LOAD program header with Write flag -phdr_zig_load_rw_index: ?u16 = null, /// Special program headers /// PT_PHDR @@ -126,7 +124,6 @@ comdat_group_sections: std.ArrayListUnmanaged(ComdatGroupSection) = .{}, /// .rela.* sections are only used when emitting a relocatable object file. zig_text_section_index: ?u32 = null, zig_data_rel_ro_section_index: ?u32 = null, -zig_data_section_index: ?u32 = null, debug_info_section_index: ?u32 = null, debug_abbrev_section_index: ?u32 = null, @@ -3491,7 +3488,6 @@ fn resetShdrIndexes(self: *Elf, backlinks: []const u32) void { &self.verneed_section_index, &self.zig_text_section_index, &self.zig_data_rel_ro_section_index, - &self.zig_data_section_index, &self.debug_info_section_index, &self.debug_abbrev_section_index, &self.debug_str_section_index, @@ -3589,12 +3585,16 @@ fn updateSectionSizes(self: *Elf) !void { for (slice.items(.shdr), slice.items(.atom_list), 0..) |*shdr, atom_list, shndx| { if (atom_list.items.len == 0) continue; if (self.requiresThunks() and shdr.sh_flags & elf.SHF_EXECINSTR != 0) continue; - if (self.zigObjectPtr()) |zo| { - if (zo.bss_index) |sym_index| { - const atom_ptr = zo.symbol(sym_index).atom(self).?; - if (shndx == atom_ptr.output_section_index) { - shdr.sh_size = atom_ptr.size; - } + if (self.zigObjectPtr()) |zo| blk: { + const sym_index = for ([_]?Symbol.Index{ + zo.data_index, + zo.bss_index, + }) |maybe_idx| { + if (maybe_idx) |idx| break idx; + } else break :blk; + const atom_ptr = zo.symbol(sym_index).atom(self).?; + if (shndx == atom_ptr.output_section_index) { + shdr.sh_size = atom_ptr.size; } } for (atom_list.items) |ref| { @@ -3929,10 +3929,16 @@ pub fn allocateAllocSections(self: *Elf) !void { } new_offset = alignment.@"align"(shndx, shdr.sh_addralign, new_offset); - if (shndx == self.eh_frame_section_index) eh_frame: { - const zo = self.zigObjectPtr() orelse break :eh_frame; - const sym = zo.symbol(zo.eh_frame_index orelse break :eh_frame); - const existing_size = sym.atom(self).?.size; + if (self.zigObjectPtr()) |zo| blk: { + const existing_size = for ([_]?Symbol.Index{ + zo.data_index, + zo.eh_frame_index, + }) |maybe_sym_index| { + const sect_sym_index = maybe_sym_index orelse continue; + const sect_atom_ptr = zo.symbol(sect_sym_index).atom(self).?; + if (sect_atom_ptr.output_section_index != shndx) continue; + break sect_atom_ptr.size; + } else break :blk; log.debug("moving {s} from 0x{x} to 0x{x}", .{ self.getShString(shdr.sh_name), shdr.sh_offset, @@ -4096,10 +4102,17 @@ fn writeAtoms(self: *Elf) !void { if (atom_ptr.output_section_index == shndx) break :base_offset atom_ptr.size; } break :base_offset 0; - } else if (@as(u32, @intCast(shndx)) == self.eh_frame_section_index) base_offset: { - const zo = self.zigObjectPtr() orelse break :base_offset 0; - const sym = zo.symbol(zo.eh_frame_index orelse break :base_offset 0); - break :base_offset sym.atom(self).?.size; + } else if (self.zigObjectPtr()) |zo| base_offset: { + const sym_index = for ([_]?Symbol.Index{ + zo.data_index, + zo.eh_frame_index, + }) |maybe_idx| { + if (maybe_idx) |idx| break idx; + } else break :base_offset 0; + const sym = zo.symbol(sym_index); + const atom_ptr = sym.atom(self).?; + if (atom_ptr.output_section_index == @as(u32, @intCast(shndx))) break :base_offset atom_ptr.size; + break :base_offset 0; } else 0; const sh_offset = shdr.sh_offset + base_offset; const sh_size = math.cast(usize, shdr.sh_size - base_offset) orelse return error.Overflow; @@ -4806,7 +4819,6 @@ pub fn isZigSection(self: Elf, shndx: u32) bool { inline for (&[_]?u32{ self.zig_text_section_index, self.zig_data_rel_ro_section_index, - self.zig_data_section_index, }) |index| { if (index == shndx) return true; } @@ -5507,7 +5519,7 @@ fn requiresThunks(self: Elf) bool { /// so that we reserve enough space for the program header table up-front. /// Bump these numbers when adding or deleting a Zig specific pre-allocated segment, or adding /// more special-purpose program headers. -pub const number_of_zig_segments = 4; +pub const number_of_zig_segments = 2; const max_number_of_object_segments = 9; const max_number_of_special_phdrs = 5; diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig index 594dd5f640dc..8cd234187315 100644 --- a/src/link/Elf/ZigObject.zig +++ b/src/link/Elf/ZigObject.zig @@ -61,6 +61,7 @@ debug_loclists_index: ?Symbol.Index = null, debug_rnglists_index: ?Symbol.Index = null, eh_frame_index: ?Symbol.Index = null, bss_index: ?Symbol.Index = null, +data_index: ?Symbol.Index = null, pub const global_symbol_bit: u32 = 0x80000000; pub const symbol_mask: u32 = 0x7fffffff; @@ -104,7 +105,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { } }.fillSection; - comptime assert(Elf.number_of_zig_segments == 4); + comptime assert(Elf.number_of_zig_segments == 2); if (!elf_file.base.isRelocatable()) { if (elf_file.phdr_zig_load_re_index == null) { @@ -135,21 +136,6 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .flags = elf.PF_R | elf.PF_W, }); } - - if (elf_file.phdr_zig_load_rw_index == null) { - const alignment = elf_file.page_size; - const filesz: u64 = 1024; - const off = try elf_file.findFreeSpace(filesz, alignment); - elf_file.phdr_zig_load_rw_index = try elf_file.addPhdr(.{ - .type = elf.PT_LOAD, - .offset = off, - .filesz = filesz, - .addr = if (ptr_size >= 4) 0x10000000 else 0xc000, - .memsz = filesz, - .@"align" = alignment, - .flags = elf.PF_R | elf.PF_W, - }); - } } if (elf_file.zig_text_section_index == null) { @@ -194,27 +180,6 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { } } - if (elf_file.zig_data_section_index == null) { - elf_file.zig_data_section_index = try elf_file.addSection(.{ - .name = try elf_file.insertShString(".data.zig"), - .type = elf.SHT_PROGBITS, - .addralign = ptr_size, - .flags = elf.SHF_ALLOC | elf.SHF_WRITE, - .offset = std.math.maxInt(u64), - }); - const shdr = &elf_file.sections.items(.shdr)[elf_file.zig_data_section_index.?]; - const phndx = &elf_file.sections.items(.phndx)[elf_file.zig_data_section_index.?]; - try fillSection(elf_file, shdr, 1024, elf_file.phdr_zig_load_rw_index); - if (elf_file.base.isRelocatable()) { - _ = try elf_file.addRelaShdr( - try elf_file.insertShString(".rela.data.zig"), - elf_file.zig_data_section_index.?, - ); - } else { - phndx.* = elf_file.phdr_zig_load_rw_index.?; - } - } - switch (comp.config.debug_format) { .strip => {}, .dwarf => |v| { @@ -1246,6 +1211,8 @@ fn getNavShdrIndex( sym_index: Symbol.Index, code: []const u8, ) error{OutOfMemory}!u32 { + const gpa = elf_file.base.comp.gpa; + const ptr_size = elf_file.ptrWidthBytes(); const ip = &zcu.intern_pool; const any_non_single_threaded = elf_file.base.comp.config.any_non_single_threaded; const nav_val = zcu.navValue(nav_index); @@ -1276,7 +1243,19 @@ fn getNavShdrIndex( if (is_const) return elf_file.zig_data_rel_ro_section_index.?; if (nav_init != .none and Value.fromInterned(nav_init).isUndefDeep(zcu)) return switch (zcu.navFileScope(nav_index).mod.optimize_mode) { - .Debug, .ReleaseSafe => elf_file.zig_data_section_index.?, + .Debug, .ReleaseSafe => { + if (self.data_index) |symbol_index| + return self.symbol(symbol_index).atom(elf_file).?.output_section_index; + const osec = try elf_file.addSection(.{ + .name = try elf_file.insertShString(".data"), + .type = elf.SHT_PROGBITS, + .addralign = ptr_size, + .flags = elf.SHF_ALLOC | elf.SHF_WRITE, + .offset = std.math.maxInt(u64), + }); + self.data_index = try self.addSectionSymbol(gpa, ".data", .@"1", osec); + return osec; + }, .ReleaseFast, .ReleaseSmall => { if (self.bss_index) |symbol_index| return self.symbol(symbol_index).atom(elf_file).?.output_section_index; @@ -1286,7 +1265,7 @@ fn getNavShdrIndex( .name = try elf_file.insertShString(".bss"), .addralign = 1, }); - self.bss_index = try self.addSectionSymbol(elf_file.base.comp.gpa, ".bss", .@"1", osec); + self.bss_index = try self.addSectionSymbol(gpa, ".bss", .@"1", osec); return osec; }, }; @@ -1302,10 +1281,20 @@ fn getNavShdrIndex( .name = try elf_file.insertShString(".bss"), .addralign = 1, }); - self.bss_index = try self.addSectionSymbol(elf_file.base.comp.gpa, ".bss", .@"1", osec); + self.bss_index = try self.addSectionSymbol(gpa, ".bss", .@"1", osec); return osec; } - return elf_file.zig_data_section_index.?; + if (self.data_index) |symbol_index| + return self.symbol(symbol_index).atom(elf_file).?.output_section_index; + const osec = try elf_file.addSection(.{ + .name = try elf_file.insertShString(".data"), + .type = elf.SHT_PROGBITS, + .addralign = ptr_size, + .flags = elf.SHF_ALLOC | elf.SHF_WRITE, + .offset = std.math.maxInt(u64), + }); + self.data_index = try self.addSectionSymbol(gpa, ".data", .@"1", osec); + return osec; } fn updateNavCode( @@ -2014,6 +2003,16 @@ fn allocateAtom(self: *ZigObject, atom_ptr: *Atom, elf_file: *Elf) !void { } shdr.sh_addralign = @max(shdr.sh_addralign, atom_ptr.alignment.toByteUnits().?); + const sect_atom_ptr = for ([_]?Symbol.Index{self.data_index}) |maybe_sym_index| { + const sect_sym_index = maybe_sym_index orelse continue; + const sect_atom_ptr = self.symbol(sect_sym_index).atom(elf_file).?; + if (sect_atom_ptr.output_section_index == atom_ptr.output_section_index) break sect_atom_ptr; + } else null; + if (sect_atom_ptr) |sap| { + sap.size = shdr.sh_size; + sap.alignment = Atom.Alignment.fromNonzeroByteUnits(shdr.sh_addralign); + } + // This function can also reallocate an atom. // In this case we need to "unplug" it from its previous location before // plugging it in to its new location. From 848535535d88641a97c6f364cc78171743d7ae81 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Thu, 29 Aug 2024 20:56:12 +0200 Subject: [PATCH 033/202] elf: allocate .data.rel.ro and .rodata in ZigObject similarly to .eh_frame --- src/link/Elf.zig | 9 ++-- src/link/Elf/ZigObject.zig | 101 ++++++++++++++++++++++--------------- 2 files changed, 62 insertions(+), 48 deletions(-) diff --git a/src/link/Elf.zig b/src/link/Elf.zig index ae6933fdf7db..018cdda88841 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -57,8 +57,6 @@ phdrs: std.ArrayListUnmanaged(elf.Elf64_Phdr) = .{}, /// Tracked loadable segments during incremental linking. /// The index into the program headers of a PT_LOAD program header with Read and Execute flags phdr_zig_load_re_index: ?u16 = null, -/// The index into the program headers of a PT_LOAD program header with Read flag -phdr_zig_load_ro_index: ?u16 = null, /// Special program headers /// PT_PHDR @@ -123,7 +121,6 @@ comdat_group_sections: std.ArrayListUnmanaged(ComdatGroupSection) = .{}, /// Tracked section headers with incremental updates to Zig object. /// .rela.* sections are only used when emitting a relocatable object file. zig_text_section_index: ?u32 = null, -zig_data_rel_ro_section_index: ?u32 = null, debug_info_section_index: ?u32 = null, debug_abbrev_section_index: ?u32 = null, @@ -3360,7 +3357,6 @@ fn sortPhdrs(self: *Elf) error{OutOfMemory}!void { for (&[_]*?u16{ &self.phdr_zig_load_re_index, - &self.phdr_zig_load_ro_index, &self.phdr_table_index, &self.phdr_table_load_index, &self.phdr_interp_index, @@ -3487,7 +3483,6 @@ fn resetShdrIndexes(self: *Elf, backlinks: []const u32) void { &self.versym_section_index, &self.verneed_section_index, &self.zig_text_section_index, - &self.zig_data_rel_ro_section_index, &self.debug_info_section_index, &self.debug_abbrev_section_index, &self.debug_str_section_index, @@ -3588,6 +3583,7 @@ fn updateSectionSizes(self: *Elf) !void { if (self.zigObjectPtr()) |zo| blk: { const sym_index = for ([_]?Symbol.Index{ zo.data_index, + zo.data_relro_index, zo.bss_index, }) |maybe_idx| { if (maybe_idx) |idx| break idx; @@ -3932,6 +3928,7 @@ pub fn allocateAllocSections(self: *Elf) !void { if (self.zigObjectPtr()) |zo| blk: { const existing_size = for ([_]?Symbol.Index{ zo.data_index, + zo.data_relro_index, zo.eh_frame_index, }) |maybe_sym_index| { const sect_sym_index = maybe_sym_index orelse continue; @@ -4105,6 +4102,7 @@ fn writeAtoms(self: *Elf) !void { } else if (self.zigObjectPtr()) |zo| base_offset: { const sym_index = for ([_]?Symbol.Index{ zo.data_index, + zo.data_relro_index, zo.eh_frame_index, }) |maybe_idx| { if (maybe_idx) |idx| break idx; @@ -4818,7 +4816,6 @@ pub fn isEffectivelyDynLib(self: Elf) bool { pub fn isZigSection(self: Elf, shndx: u32) bool { inline for (&[_]?u32{ self.zig_text_section_index, - self.zig_data_rel_ro_section_index, }) |index| { if (index == shndx) return true; } diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig index 8cd234187315..2ebe82e8660e 100644 --- a/src/link/Elf/ZigObject.zig +++ b/src/link/Elf/ZigObject.zig @@ -62,6 +62,8 @@ debug_rnglists_index: ?Symbol.Index = null, eh_frame_index: ?Symbol.Index = null, bss_index: ?Symbol.Index = null, data_index: ?Symbol.Index = null, +data_relro_index: ?Symbol.Index = null, +rodata_index: ?Symbol.Index = null, pub const global_symbol_bit: u32 = 0x80000000; pub const symbol_mask: u32 = 0x7fffffff; @@ -121,21 +123,6 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .flags = elf.PF_X | elf.PF_R | elf.PF_W, }); } - - if (elf_file.phdr_zig_load_ro_index == null) { - const alignment = elf_file.page_size; - const filesz: u64 = 1024; - const off = try elf_file.findFreeSpace(filesz, alignment); - elf_file.phdr_zig_load_ro_index = try elf_file.addPhdr(.{ - .type = elf.PT_LOAD, - .offset = off, - .filesz = filesz, - .addr = if (ptr_size >= 4) 0xc000000 else 0xa000, - .memsz = filesz, - .@"align" = alignment, - .flags = elf.PF_R | elf.PF_W, - }); - } } if (elf_file.zig_text_section_index == null) { @@ -159,27 +146,6 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { } } - if (elf_file.zig_data_rel_ro_section_index == null) { - elf_file.zig_data_rel_ro_section_index = try elf_file.addSection(.{ - .name = try elf_file.insertShString(".data.rel.ro.zig"), - .type = elf.SHT_PROGBITS, - .addralign = 1, - .flags = elf.SHF_ALLOC | elf.SHF_WRITE, - .offset = std.math.maxInt(u64), - }); - const shdr = &elf_file.sections.items(.shdr)[elf_file.zig_data_rel_ro_section_index.?]; - const phndx = &elf_file.sections.items(.phndx)[elf_file.zig_data_rel_ro_section_index.?]; - try fillSection(elf_file, shdr, 1024, elf_file.phdr_zig_load_ro_index); - if (elf_file.base.isRelocatable()) { - _ = try elf_file.addRelaShdr( - try elf_file.insertShString(".rela.data.rel.ro.zig"), - elf_file.zig_data_rel_ro_section_index.?, - ); - } else { - phndx.* = elf_file.phdr_zig_load_ro_index.?; - } - } - switch (comp.config.debug_format) { .strip => {}, .dwarf => |v| { @@ -1083,6 +1049,20 @@ pub fn lowerUav( return .{ .mcv = .{ .load_symbol = metadata.symbol_index } }; } + const osec = if (self.data_relro_index) |sym_index| + self.symbol(sym_index).atom(elf_file).?.output_section_index + else osec: { + const osec = try elf_file.addSection(.{ + .name = try elf_file.insertShString(".data.rel.ro"), + .type = elf.SHT_PROGBITS, + .addralign = 1, + .flags = elf.SHF_ALLOC | elf.SHF_WRITE, + .offset = std.math.maxInt(u64), + }); + self.data_relro_index = osec; + break :osec osec; + }; + var name_buf: [32]u8 = undefined; const name = std.fmt.bufPrint(&name_buf, "__anon_{d}", .{ @intFromEnum(uav), @@ -1093,7 +1073,7 @@ pub fn lowerUav( name, val, uav_alignment, - elf_file.zig_data_rel_ro_section_index.?, + osec, src_loc, ) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, @@ -1240,7 +1220,19 @@ fn getNavShdrIndex( .offset = std.math.maxInt(u64), }); } - if (is_const) return elf_file.zig_data_rel_ro_section_index.?; + if (is_const) { + if (self.data_relro_index) |symbol_index| + return self.symbol(symbol_index).atom(elf_file).?.output_section_index; + const osec = try elf_file.addSection(.{ + .name = try elf_file.insertShString(".data.rel.ro"), + .type = elf.SHT_PROGBITS, + .addralign = 1, + .flags = elf.SHF_ALLOC | elf.SHF_WRITE, + .offset = std.math.maxInt(u64), + }); + self.data_relro_index = try self.addSectionSymbol(gpa, ".data.rel.ro", .@"1", osec); + return osec; + } if (nav_init != .none and Value.fromInterned(nav_init).isUndefDeep(zcu)) return switch (zcu.navFileScope(nav_index).mod.optimize_mode) { .Debug, .ReleaseSafe => { @@ -1253,7 +1245,12 @@ fn getNavShdrIndex( .flags = elf.SHF_ALLOC | elf.SHF_WRITE, .offset = std.math.maxInt(u64), }); - self.data_index = try self.addSectionSymbol(gpa, ".data", .@"1", osec); + self.data_index = try self.addSectionSymbol( + gpa, + ".data", + Atom.Alignment.fromNonzeroByteUnits(ptr_size), + osec, + ); return osec; }, .ReleaseFast, .ReleaseSmall => { @@ -1293,7 +1290,12 @@ fn getNavShdrIndex( .flags = elf.SHF_ALLOC | elf.SHF_WRITE, .offset = std.math.maxInt(u64), }); - self.data_index = try self.addSectionSymbol(gpa, ".data", .@"1", osec); + self.data_index = try self.addSectionSymbol( + gpa, + ".data", + Atom.Alignment.fromNonzeroByteUnits(ptr_size), + osec, + ); return osec; } @@ -1702,7 +1704,19 @@ fn updateLazySymbol( const output_section_index = switch (sym.kind) { .code => elf_file.zig_text_section_index.?, - .const_data => elf_file.zig_data_rel_ro_section_index.?, + .const_data => if (self.rodata_index) |sym_index| + self.symbol(sym_index).atom(elf_file).?.output_section_index + else osec: { + const osec = try elf_file.addSection(.{ + .name = try elf_file.insertShString(".rodata"), + .type = elf.SHT_PROGBITS, + .addralign = 1, + .flags = elf.SHF_ALLOC, + .offset = std.math.maxInt(u64), + }); + self.rodata_index = try self.addSectionSymbol(gpa, ".rodata", .@"1", osec); + break :osec osec; + }, }; const local_sym = self.symbol(symbol_index); local_sym.name_offset = name_str_index; @@ -2003,7 +2017,10 @@ fn allocateAtom(self: *ZigObject, atom_ptr: *Atom, elf_file: *Elf) !void { } shdr.sh_addralign = @max(shdr.sh_addralign, atom_ptr.alignment.toByteUnits().?); - const sect_atom_ptr = for ([_]?Symbol.Index{self.data_index}) |maybe_sym_index| { + const sect_atom_ptr = for ([_]?Symbol.Index{ + self.data_index, + self.data_relro_index, + }) |maybe_sym_index| { const sect_sym_index = maybe_sym_index orelse continue; const sect_atom_ptr = self.symbol(sect_sym_index).atom(elf_file).?; if (sect_atom_ptr.output_section_index == atom_ptr.output_section_index) break sect_atom_ptr; From 2d0f4fc9c0401a3c0fcf68dfc67c7948351c7f73 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Thu, 29 Aug 2024 21:14:46 +0200 Subject: [PATCH 034/202] elf: allocate .text in ZigObject similarly to .eh_frame --- src/link/Elf.zig | 23 +------- src/link/Elf/ZigObject.zig | 109 ++++++++++++++++--------------------- 2 files changed, 48 insertions(+), 84 deletions(-) diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 018cdda88841..813d0c3afc6d 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -54,10 +54,6 @@ shdr_table_offset: ?u64 = null, /// Same order as in the file. phdrs: std.ArrayListUnmanaged(elf.Elf64_Phdr) = .{}, -/// Tracked loadable segments during incremental linking. -/// The index into the program headers of a PT_LOAD program header with Read and Execute flags -phdr_zig_load_re_index: ?u16 = null, - /// Special program headers /// PT_PHDR phdr_table_index: ?u16 = null, @@ -118,10 +114,6 @@ rela_plt: std.ArrayListUnmanaged(elf.Elf64_Rela) = .{}, /// Applies only to a relocatable. comdat_group_sections: std.ArrayListUnmanaged(ComdatGroupSection) = .{}, -/// Tracked section headers with incremental updates to Zig object. -/// .rela.* sections are only used when emitting a relocatable object file. -zig_text_section_index: ?u32 = null, - debug_info_section_index: ?u32 = null, debug_abbrev_section_index: ?u32 = null, debug_str_section_index: ?u32 = null, @@ -3356,7 +3348,6 @@ fn sortPhdrs(self: *Elf) error{OutOfMemory}!void { } for (&[_]*?u16{ - &self.phdr_zig_load_re_index, &self.phdr_table_index, &self.phdr_table_load_index, &self.phdr_interp_index, @@ -3482,7 +3473,6 @@ fn resetShdrIndexes(self: *Elf, backlinks: []const u32) void { &self.copy_rel_section_index, &self.versym_section_index, &self.verneed_section_index, - &self.zig_text_section_index, &self.debug_info_section_index, &self.debug_abbrev_section_index, &self.debug_str_section_index, @@ -3734,10 +3724,9 @@ fn getMaxNumberOfPhdrs() u64 { /// We permit a maximum of 3**2 number of segments. fn calcNumberOfSegments(self: *Elf) usize { var covers: [9]bool = [_]bool{false} ** 9; - for (self.sections.items(.shdr), 0..) |shdr, shndx| { + for (self.sections.items(.shdr)) |shdr| { if (shdr.sh_type == elf.SHT_NULL) continue; if (shdr.sh_flags & elf.SHF_ALLOC == 0) continue; - if (self.isZigSection(@intCast(shndx))) continue; const flags = shdrToPhdrFlags(shdr.sh_flags); covers[flags - 1] = true; } @@ -3835,7 +3824,6 @@ pub fn allocateAllocSections(self: *Elf) !void { for (slice.items(.shdr), 0..) |shdr, shndx| { if (shdr.sh_type == elf.SHT_NULL) continue; if (shdr.sh_flags & elf.SHF_ALLOC == 0) continue; - if (self.isZigSection(@intCast(shndx))) continue; const flags = shdrToPhdrFlags(shdr.sh_flags); try covers[flags - 1].append(@intCast(shndx)); } @@ -4813,15 +4801,6 @@ pub fn isEffectivelyDynLib(self: Elf) bool { }; } -pub fn isZigSection(self: Elf, shndx: u32) bool { - inline for (&[_]?u32{ - self.zig_text_section_index, - }) |index| { - if (index == shndx) return true; - } - return false; -} - pub fn isDebugSection(self: Elf, shndx: u32) bool { inline for (&[_]?u32{ self.debug_info_section_index, diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig index 2ebe82e8660e..39df4b78cc21 100644 --- a/src/link/Elf/ZigObject.zig +++ b/src/link/Elf/ZigObject.zig @@ -51,6 +51,12 @@ debug_loclists_section_dirty: bool = false, debug_rnglists_section_dirty: bool = false, eh_frame_section_dirty: bool = false, +text_index: ?Symbol.Index = null, +data_relro_index: ?Symbol.Index = null, +rodata_index: ?Symbol.Index = null, +data_index: ?Symbol.Index = null, +bss_index: ?Symbol.Index = null, +eh_frame_index: ?Symbol.Index = null, debug_info_index: ?Symbol.Index = null, debug_abbrev_index: ?Symbol.Index = null, debug_aranges_index: ?Symbol.Index = null, @@ -59,11 +65,6 @@ debug_line_index: ?Symbol.Index = null, debug_line_str_index: ?Symbol.Index = null, debug_loclists_index: ?Symbol.Index = null, debug_rnglists_index: ?Symbol.Index = null, -eh_frame_index: ?Symbol.Index = null, -bss_index: ?Symbol.Index = null, -data_index: ?Symbol.Index = null, -data_relro_index: ?Symbol.Index = null, -rodata_index: ?Symbol.Index = null, pub const global_symbol_bit: u32 = 0x80000000; pub const symbol_mask: u32 = 0x7fffffff; @@ -75,6 +76,7 @@ const InitOptions = struct { }; pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { + _ = options; const comp = elf_file.base.comp; const gpa = comp.gpa; const ptr_size = elf_file.ptrWidthBytes(); @@ -92,60 +94,6 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { esym.st_shndx = elf.SHN_ABS; } - const fillSection = struct { - fn fillSection(ef: *Elf, shdr: *elf.Elf64_Shdr, size: u64, phndx: ?u16) !void { - if (ef.base.isRelocatable()) { - const off = try ef.findFreeSpace(size, shdr.sh_addralign); - shdr.sh_offset = off; - shdr.sh_size = size; - } else { - const phdr = ef.phdrs.items[phndx.?]; - shdr.sh_addr = phdr.p_vaddr; - shdr.sh_offset = phdr.p_offset; - shdr.sh_size = phdr.p_memsz; - } - } - }.fillSection; - - comptime assert(Elf.number_of_zig_segments == 2); - - if (!elf_file.base.isRelocatable()) { - if (elf_file.phdr_zig_load_re_index == null) { - const filesz = options.program_code_size_hint; - const off = try elf_file.findFreeSpace(filesz, elf_file.page_size); - elf_file.phdr_zig_load_re_index = try elf_file.addPhdr(.{ - .type = elf.PT_LOAD, - .offset = off, - .filesz = filesz, - .addr = if (ptr_size >= 4) 0x4000000 else 0x4000, - .memsz = filesz, - .@"align" = elf_file.page_size, - .flags = elf.PF_X | elf.PF_R | elf.PF_W, - }); - } - } - - if (elf_file.zig_text_section_index == null) { - elf_file.zig_text_section_index = try elf_file.addSection(.{ - .name = try elf_file.insertShString(".text.zig"), - .type = elf.SHT_PROGBITS, - .flags = elf.SHF_ALLOC | elf.SHF_EXECINSTR, - .addralign = 1, - .offset = std.math.maxInt(u64), - }); - const shdr = &elf_file.sections.items(.shdr)[elf_file.zig_text_section_index.?]; - const phndx = &elf_file.sections.items(.phndx)[elf_file.zig_text_section_index.?]; - try fillSection(elf_file, shdr, options.program_code_size_hint, elf_file.phdr_zig_load_re_index); - if (elf_file.base.isRelocatable()) { - _ = try elf_file.addRelaShdr( - try elf_file.insertShString(".rela.text.zig"), - elf_file.zig_text_section_index.?, - ); - } else { - phndx.* = elf_file.phdr_zig_load_re_index.?; - } - } - switch (comp.config.debug_format) { .strip => {}, .dwarf => |v| { @@ -1196,7 +1144,19 @@ fn getNavShdrIndex( const ip = &zcu.intern_pool; const any_non_single_threaded = elf_file.base.comp.config.any_non_single_threaded; const nav_val = zcu.navValue(nav_index); - if (ip.isFunctionType(nav_val.typeOf(zcu).toIntern())) return elf_file.zig_text_section_index.?; + if (ip.isFunctionType(nav_val.typeOf(zcu).toIntern())) { + if (self.text_index) |symbol_index| + return self.symbol(symbol_index).atom(elf_file).?.output_section_index; + const osec = try elf_file.addSection(.{ + .type = elf.SHT_PROGBITS, + .flags = elf.SHF_ALLOC | elf.SHF_EXECINSTR, + .name = try elf_file.insertShString(".text"), + .addralign = 1, + .offset = std.math.maxInt(u64), + }); + self.text_index = try self.addSectionSymbol(gpa, ".text", .@"1", osec); + return osec; + } const is_const, const is_threadlocal, const nav_init = switch (ip.indexToKey(nav_val.toIntern())) { .variable => |variable| .{ false, variable.is_threadlocal, variable.init }, .@"extern" => |@"extern"| .{ @"extern".is_const, @"extern".is_threadlocal, .none }, @@ -1538,6 +1498,19 @@ pub fn updateFunc( self.symbol(sym_index).name(elf_file), }); defer gpa.free(name); + const osec = if (self.text_index) |sect_sym_index| + self.symbol(sect_sym_index).atom(elf_file).?.output_section_index + else osec: { + const osec = try elf_file.addSection(.{ + .name = try elf_file.insertShString(".text"), + .flags = elf.SHF_ALLOC | elf.SHF_EXECINSTR, + .type = elf.SHT_PROGBITS, + .addralign = 1, + .offset = std.math.maxInt(u64), + }); + self.text_index = try self.addSectionSymbol(gpa, ".text", .@"1", osec); + break :osec osec; + }; const name_off = try self.addString(gpa, name); const tr_size = trampolineSize(elf_file.getTarget().cpu.arch); const tr_sym_index = try self.newSymbolWithAtom(gpa, name_off); @@ -1549,7 +1522,7 @@ pub fn updateFunc( tr_atom_ptr.value = old_rva; tr_atom_ptr.alive = true; tr_atom_ptr.alignment = old_alignment; - tr_atom_ptr.output_section_index = elf_file.zig_text_section_index.?; + tr_atom_ptr.output_section_index = osec; tr_atom_ptr.size = tr_size; const target_sym = self.symbol(sym_index); target_sym.addExtra(.{ .trampoline = tr_sym_index }, elf_file); @@ -1703,7 +1676,19 @@ fn updateLazySymbol( }; const output_section_index = switch (sym.kind) { - .code => elf_file.zig_text_section_index.?, + .code => if (self.text_index) |sym_index| + self.symbol(sym_index).atom(elf_file).?.output_section_index + else osec: { + const osec = try elf_file.addSection(.{ + .name = try elf_file.insertShString(".text"), + .type = elf.SHT_PROGBITS, + .addralign = 1, + .flags = elf.SHF_ALLOC | elf.SHF_EXECINSTR, + .offset = std.math.maxInt(u64), + }); + self.text_index = try self.addSectionSymbol(gpa, ".text", .@"1", osec); + break :osec osec; + }, .const_data => if (self.rodata_index) |sym_index| self.symbol(sym_index).atom(elf_file).?.output_section_index else osec: { From ef6ee90ff8762624852ee9a71241bd5a926371ab Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Thu, 29 Aug 2024 21:19:16 +0200 Subject: [PATCH 035/202] elf: remove now unused number_of_zig_segments --- src/link/Elf.zig | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 813d0c3afc6d..180314918ff0 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -3708,13 +3708,11 @@ fn shdrToPhdrFlags(sh_flags: u64) u32 { /// (This is an upper bound so that we can reserve enough space for the header and progam header /// table without running out of space and being forced to move things around.) fn getMaxNumberOfPhdrs() u64 { - // First, assume we compile Zig's source incrementally, this gives us: - var num: u64 = number_of_zig_segments; - // Next, the estimated maximum number of segments the linker can emit for input sections are: - num += max_number_of_object_segments; - // Next, any other non-loadable program headers, including TLS, DYNAMIC, GNU_STACK, GNU_EH_FRAME, INTERP: + // The estimated maximum number of segments the linker can emit for input sections are: + var num: u64 = max_number_of_object_segments; + // Any other non-loadable program headers, including TLS, DYNAMIC, GNU_STACK, GNU_EH_FRAME, INTERP: num += max_number_of_special_phdrs; - // Finally, PHDR program header and corresponding read-only load segment: + // PHDR program header and corresponding read-only load segment: num += 2; return num; } @@ -5495,7 +5493,6 @@ fn requiresThunks(self: Elf) bool { /// so that we reserve enough space for the program header table up-front. /// Bump these numbers when adding or deleting a Zig specific pre-allocated segment, or adding /// more special-purpose program headers. -pub const number_of_zig_segments = 2; const max_number_of_object_segments = 9; const max_number_of_special_phdrs = 5; From bc39bddd5fd66037bc524bb7c824230fbb249e01 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Thu, 29 Aug 2024 22:08:36 +0200 Subject: [PATCH 036/202] elf: remove isDebugSection helper --- src/link/Elf.zig | 97 +++++++++++++++--------------------- src/link/Elf/ZigObject.zig | 4 +- src/link/Elf/relocatable.zig | 24 ++++----- 3 files changed, 54 insertions(+), 71 deletions(-) diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 180314918ff0..d44892836cf1 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -3572,8 +3572,10 @@ fn updateSectionSizes(self: *Elf) !void { if (self.requiresThunks() and shdr.sh_flags & elf.SHF_EXECINSTR != 0) continue; if (self.zigObjectPtr()) |zo| blk: { const sym_index = for ([_]?Symbol.Index{ - zo.data_index, + zo.text_index, + zo.rodata_index, zo.data_relro_index, + zo.data_index, zo.bss_index, }) |maybe_idx| { if (maybe_idx) |idx| break idx; @@ -3913,8 +3915,10 @@ pub fn allocateAllocSections(self: *Elf) !void { if (self.zigObjectPtr()) |zo| blk: { const existing_size = for ([_]?Symbol.Index{ - zo.data_index, + zo.text_index, + zo.rodata_index, zo.data_relro_index, + zo.data_index, zo.eh_frame_index, }) |maybe_sym_index| { const sect_sym_index = maybe_sym_index orelse continue; @@ -3954,27 +3958,27 @@ pub fn allocateNonAllocSections(self: *Elf) !void { shdr.sh_size = 0; const new_offset = try self.findFreeSpace(needed_size, shdr.sh_addralign); - if (self.isDebugSection(@intCast(shndx))) { + if (self.zigObjectPtr()) |zo| blk: { + const existing_size = for ([_]?Symbol.Index{ + zo.debug_info_index, + zo.debug_abbrev_index, + zo.debug_aranges_index, + zo.debug_str_index, + zo.debug_line_index, + zo.debug_line_str_index, + zo.debug_loclists_index, + zo.debug_rnglists_index, + }) |maybe_sym_index| { + const sym_index = maybe_sym_index orelse continue; + const sym = zo.symbol(sym_index); + const atom_ptr = sym.atom(self).?; + if (atom_ptr.output_section_index == shndx) break atom_ptr.size; + } else break :blk; log.debug("moving {s} from 0x{x} to 0x{x}", .{ self.getShString(shdr.sh_name), shdr.sh_offset, new_offset, }); - const zo = self.zigObjectPtr().?; - const existing_size = for ([_]Symbol.Index{ - zo.debug_info_index.?, - zo.debug_abbrev_index.?, - zo.debug_aranges_index.?, - zo.debug_str_index.?, - zo.debug_line_index.?, - zo.debug_line_str_index.?, - zo.debug_loclists_index.?, - zo.debug_rnglists_index.?, - }) |sym_index| { - const sym = zo.symbol(sym_index); - const atom_ptr = sym.atom(self).?; - if (atom_ptr.output_section_index == shndx) break atom_ptr.size; - } else 0; const amt = try self.base.file.?.copyRangeAll( shdr.sh_offset, self.base.file.?, @@ -4068,35 +4072,28 @@ fn writeAtoms(self: *Elf) !void { log.debug("writing atoms in '{s}' section", .{self.getShString(shdr.sh_name)}); // TODO really, really handle debug section separately - const base_offset = if (self.isDebugSection(@intCast(shndx))) base_offset: { - const zo = self.zigObjectPtr().?; - for ([_]Symbol.Index{ - zo.debug_info_index.?, - zo.debug_abbrev_index.?, - zo.debug_aranges_index.?, - zo.debug_str_index.?, - zo.debug_line_index.?, - zo.debug_line_str_index.?, - zo.debug_loclists_index.?, - zo.debug_rnglists_index.?, - }) |sym_index| { + const base_offset = if (self.zigObjectPtr()) |zo| base_offset: { + for ([_]?Symbol.Index{ + zo.text_index, + zo.rodata_index, + zo.data_relro_index, + zo.data_index, + zo.eh_frame_index, + zo.debug_info_index, + zo.debug_abbrev_index, + zo.debug_aranges_index, + zo.debug_str_index, + zo.debug_line_index, + zo.debug_line_str_index, + zo.debug_loclists_index, + zo.debug_rnglists_index, + }) |maybe_sym_index| { + const sym_index = maybe_sym_index orelse continue; const sym = zo.symbol(sym_index); const atom_ptr = sym.atom(self).?; if (atom_ptr.output_section_index == shndx) break :base_offset atom_ptr.size; } break :base_offset 0; - } else if (self.zigObjectPtr()) |zo| base_offset: { - const sym_index = for ([_]?Symbol.Index{ - zo.data_index, - zo.data_relro_index, - zo.eh_frame_index, - }) |maybe_idx| { - if (maybe_idx) |idx| break idx; - } else break :base_offset 0; - const sym = zo.symbol(sym_index); - const atom_ptr = sym.atom(self).?; - if (atom_ptr.output_section_index == @as(u32, @intCast(shndx))) break :base_offset atom_ptr.size; - break :base_offset 0; } else 0; const sh_offset = shdr.sh_offset + base_offset; const sh_size = math.cast(usize, shdr.sh_size - base_offset) orelse return error.Overflow; @@ -4799,22 +4796,6 @@ pub fn isEffectivelyDynLib(self: Elf) bool { }; } -pub fn isDebugSection(self: Elf, shndx: u32) bool { - inline for (&[_]?u32{ - self.debug_info_section_index, - self.debug_abbrev_section_index, - self.debug_str_section_index, - self.debug_aranges_section_index, - self.debug_line_section_index, - self.debug_line_str_section_index, - self.debug_loclists_section_index, - self.debug_rnglists_section_index, - }) |index| { - if (index == shndx) return true; - } - return false; -} - pub fn addPhdr(self: *Elf, opts: struct { type: u32 = 0, flags: u32 = 0, diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig index 39df4b78cc21..0173ffb40b3f 100644 --- a/src/link/Elf/ZigObject.zig +++ b/src/link/Elf/ZigObject.zig @@ -52,8 +52,8 @@ debug_rnglists_section_dirty: bool = false, eh_frame_section_dirty: bool = false, text_index: ?Symbol.Index = null, -data_relro_index: ?Symbol.Index = null, rodata_index: ?Symbol.Index = null, +data_relro_index: ?Symbol.Index = null, data_index: ?Symbol.Index = null, bss_index: ?Symbol.Index = null, eh_frame_index: ?Symbol.Index = null, @@ -2003,6 +2003,8 @@ fn allocateAtom(self: *ZigObject, atom_ptr: *Atom, elf_file: *Elf) !void { shdr.sh_addralign = @max(shdr.sh_addralign, atom_ptr.alignment.toByteUnits().?); const sect_atom_ptr = for ([_]?Symbol.Index{ + self.text_index, + self.rodata_index, self.data_index, self.data_relro_index, }) |maybe_sym_index| { diff --git a/src/link/Elf/relocatable.zig b/src/link/Elf/relocatable.zig index 8f5ea8e25b5d..4397bc2ca309 100644 --- a/src/link/Elf/relocatable.zig +++ b/src/link/Elf/relocatable.zig @@ -436,18 +436,18 @@ fn writeAtoms(elf_file: *Elf) !void { log.debug("writing atoms in '{s}' section", .{elf_file.getShString(shdr.sh_name)}); // TODO really, really handle debug section separately - const base_offset = if (elf_file.isDebugSection(@intCast(shndx))) blk: { - const zo = elf_file.zigObjectPtr().?; - break :blk for ([_]Symbol.Index{ - zo.debug_info_index.?, - zo.debug_abbrev_index.?, - zo.debug_aranges_index.?, - zo.debug_str_index.?, - zo.debug_line_index.?, - zo.debug_line_str_index.?, - zo.debug_loclists_index.?, - zo.debug_rnglists_index.?, - }) |sym_index| { + const base_offset = if (elf_file.zigObjectPtr()) |zo| blk: { + break :blk for ([_]?Symbol.Index{ + zo.debug_info_index, + zo.debug_abbrev_index, + zo.debug_aranges_index, + zo.debug_str_index, + zo.debug_line_index, + zo.debug_line_str_index, + zo.debug_loclists_index, + zo.debug_rnglists_index, + }) |maybe_sym_index| { + const sym_index = maybe_sym_index orelse continue; const sym = zo.symbol(sym_index); const atom_ptr = sym.atom(elf_file).?; if (atom_ptr.output_section_index == shndx) break atom_ptr.size; From 25fa092bb1940ccd88f413a6ebe0a5d4d3befa71 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Thu, 29 Aug 2024 22:41:57 +0200 Subject: [PATCH 037/202] elf: fix a typo in setting ZigObject.data_relro_index --- src/link/Elf/ZigObject.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig index 0173ffb40b3f..9e42a53ccba3 100644 --- a/src/link/Elf/ZigObject.zig +++ b/src/link/Elf/ZigObject.zig @@ -1007,7 +1007,7 @@ pub fn lowerUav( .flags = elf.SHF_ALLOC | elf.SHF_WRITE, .offset = std.math.maxInt(u64), }); - self.data_relro_index = osec; + self.data_relro_index = try self.addSectionSymbol(gpa, ".data.rel.ro", .@"1", osec); break :osec osec; }; From acb91f4b30e6cbb14bb81406ca6ca71241dc3a98 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Thu, 29 Aug 2024 23:23:39 +0200 Subject: [PATCH 038/202] elf: fix emitting correct .rela. sections in -r mode --- src/link/Elf/ZigObject.zig | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig index 9e42a53ccba3..27fb121b04e6 100644 --- a/src/link/Elf/ZigObject.zig +++ b/src/link/Elf/ZigObject.zig @@ -494,14 +494,6 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !voi } } } - - if (elf_file.base.isRelocatable() and relocs.items.len > 0) { - const rela_sect_name = try std.fmt.allocPrintZ(gpa, ".rela{s}", .{elf_file.getShString(shdr.sh_name)}); - defer gpa.free(rela_sect_name); - if (elf_file.sectionByName(rela_sect_name) == null) { - _ = try elf_file.addRelaShdr(try elf_file.insertShString(rela_sect_name), shndx); - } - } } self.debug_abbrev_section_dirty = false; @@ -820,6 +812,7 @@ pub fn writeAr(self: ZigObject, writer: anytype) !void { } pub fn addAtomsToRelaSections(self: *ZigObject, elf_file: *Elf) !void { + const gpa = elf_file.base.comp.gpa; for (self.atoms_indexes.items) |atom_index| { const atom_ptr = self.atom(atom_index) orelse continue; if (!atom_ptr.alive) continue; @@ -829,11 +822,18 @@ pub fn addAtomsToRelaSections(self: *ZigObject, elf_file: *Elf) !void { const out_shndx = atom_ptr.output_section_index; const out_shdr = elf_file.sections.items(.shdr)[out_shndx]; if (out_shdr.sh_type == elf.SHT_NOBITS) continue; - const out_rela_shndx = for (elf_file.sections.items(.shdr), 0..) |out_rela_shdr, out_rela_shndx| { - if (out_rela_shdr.sh_type == elf.SHT_RELA and out_rela_shdr.sh_info == out_shndx) break out_rela_shndx; - } else unreachable; + const rela_sect_name = try std.fmt.allocPrintZ(gpa, ".rela{s}", .{ + elf_file.getShString(out_shdr.sh_name), + }); + defer gpa.free(rela_sect_name); + const out_rela_shndx = if (elf_file.sectionByName(rela_sect_name)) |out_rela_shndx| + out_rela_shndx + else + try elf_file.addRelaShdr(try elf_file.insertShString(rela_sect_name), out_shndx); + const out_rela_shdr = &elf_file.sections.items(.shdr)[out_rela_shndx]; + out_rela_shdr.sh_info = out_shndx; + out_rela_shdr.sh_link = elf_file.symtab_section_index.?; const atom_list = &elf_file.sections.items(.atom_list)[out_rela_shndx]; - const gpa = elf_file.base.comp.gpa; try atom_list.append(gpa, .{ .index = atom_index, .file = self.index }); } } From da60159d85f2b23e35b62f0626163d798413916f Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Fri, 30 Aug 2024 11:34:21 +0200 Subject: [PATCH 039/202] elf+dwarf: refer sections via section symbols --- src/link/Dwarf.zig | 85 +++++++++++++++++++++----------------- src/link/Elf.zig | 61 +++++++++++---------------- src/link/Elf/ZigObject.zig | 60 ++++++++++++--------------- 3 files changed, 100 insertions(+), 106 deletions(-) diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index c3197f765144..332c79e0fceb 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -375,12 +375,17 @@ pub const Section = struct { fn resize(sec: *Section, dwarf: *Dwarf, len: u64) UpdateError!void { if (len <= sec.len) return; if (dwarf.bin_file.cast(.elf)) |elf_file| { + const zo = elf_file.zigObjectPtr().?; + const atom = zo.symbol(sec.index).atom(elf_file).?; + const shndx = atom.output_section_index; if (sec == &dwarf.debug_frame.section) - try elf_file.growAllocSection(sec.index, len) + try elf_file.growAllocSection(shndx, len) else - try elf_file.growNonAllocSection(sec.index, len, @intCast(sec.alignment.toByteUnits().?), true); - const shdr = &elf_file.sections.items(.shdr)[sec.index]; - sec.off = shdr.sh_offset; + try elf_file.growNonAllocSection(shndx, len, @intCast(sec.alignment.toByteUnits().?), true); + const shdr = elf_file.sections.items(.shdr)[shndx]; + atom.size = shdr.sh_size; + atom.alignment = InternPool.Alignment.fromNonzeroByteUnits(shdr.sh_addralign); + sec.off = shdr.sh_offset + @as(u64, @intCast(atom.value)); sec.len = shdr.sh_size; } else if (dwarf.bin_file.cast(.macho)) |macho_file| { const header = if (macho_file.d_sym) |*d_sym| header: { @@ -402,7 +407,11 @@ pub const Section = struct { sec.off += len; sec.len -= len; if (dwarf.bin_file.cast(.elf)) |elf_file| { - const shdr = &elf_file.sections.items(.shdr)[sec.index]; + const zo = elf_file.zigObjectPtr().?; + const atom = zo.symbol(sec.index).atom(elf_file).?; + const shndx = atom.output_section_index; + const shdr = &elf_file.sections.items(.shdr)[shndx]; + atom.size = sec.len; shdr.sh_offset = sec.off; shdr.sh_size = sec.len; } else if (dwarf.bin_file.cast(.macho)) |macho_file| { @@ -891,9 +900,11 @@ const Entry = struct { if (std.debug.runtime_safety) { log.err("missing {} from {s}", .{ @as(Entry.Index, @enumFromInt(entry - unit.entries.items.ptr)), - std.mem.sliceTo(if (dwarf.bin_file.cast(.elf)) |elf_file| - elf_file.shstrtab.items[elf_file.sections.items(.shdr)[sec.index].sh_name..] - else if (dwarf.bin_file.cast(.macho)) |macho_file| + std.mem.sliceTo(if (dwarf.bin_file.cast(.elf)) |elf_file| sh_name: { + const zo = elf_file.zigObjectPtr().?; + const shndx = zo.symbol(sec.index).atom(elf_file).?.output_section_index; + break :sh_name elf_file.shstrtab.items[elf_file.sections.items(.shdr)[shndx].sh_name..]; + } else if (dwarf.bin_file.cast(.macho)) |macho_file| if (macho_file.d_sym) |*d_sym| &d_sym.sections.items[sec.index].segname else @@ -961,7 +972,8 @@ const Entry = struct { .none, .debug_frame => {}, .eh_frame => return if (dwarf.bin_file.cast(.elf)) |elf_file| { const zo = elf_file.zigObjectPtr().?; - const entry_addr: i64 = @intCast(entry_off - sec.off + elf_file.shdrs.items[sec.index].sh_addr); + const shndx = zo.symbol(sec.index).atom(elf_file).?.output_section_index; + const entry_addr: i64 = @intCast(entry_off - sec.off + elf_file.shdrs.items[shndx].sh_addr); for (entry.external_relocs.items) |reloc| { const symbol = zo.symbol(reloc.target_sym); try dwarf.resolveReloc( @@ -1877,34 +1889,7 @@ pub fn init(lf: *link.File, format: DW.Format) Dwarf { } pub fn reloadSectionMetadata(dwarf: *Dwarf) void { - if (dwarf.bin_file.cast(.elf)) |elf_file| { - for ([_]*Section{ - &dwarf.debug_abbrev.section, - &dwarf.debug_aranges.section, - &dwarf.debug_frame.section, - &dwarf.debug_info.section, - &dwarf.debug_line.section, - &dwarf.debug_line_str.section, - &dwarf.debug_loclists.section, - &dwarf.debug_rnglists.section, - &dwarf.debug_str.section, - }, [_]u32{ - elf_file.debug_abbrev_section_index.?, - elf_file.debug_aranges_section_index.?, - elf_file.eh_frame_section_index.?, - elf_file.debug_info_section_index.?, - elf_file.debug_line_section_index.?, - elf_file.debug_line_str_section_index.?, - elf_file.debug_loclists_section_index.?, - elf_file.debug_rnglists_section_index.?, - elf_file.debug_str_section_index.?, - }) |sec, section_index| { - const shdr = &elf_file.sections.items(.shdr)[section_index]; - sec.index = section_index; - sec.off = shdr.sh_offset; - sec.len = shdr.sh_size; - } - } else if (dwarf.bin_file.cast(.macho)) |macho_file| { + if (dwarf.bin_file.cast(.macho)) |macho_file| { if (macho_file.d_sym) |*d_sym| { for ([_]*Section{ &dwarf.debug_abbrev.section, @@ -1960,6 +1945,32 @@ pub fn reloadSectionMetadata(dwarf: *Dwarf) void { } pub fn initMetadata(dwarf: *Dwarf) UpdateError!void { + if (dwarf.bin_file.cast(.elf)) |elf_file| { + const zo = elf_file.zigObjectPtr().?; + for ([_]*Section{ + &dwarf.debug_abbrev.section, + &dwarf.debug_aranges.section, + &dwarf.debug_frame.section, + &dwarf.debug_info.section, + &dwarf.debug_line.section, + &dwarf.debug_line_str.section, + &dwarf.debug_loclists.section, + &dwarf.debug_rnglists.section, + &dwarf.debug_str.section, + }, [_]u32{ + zo.debug_abbrev_index.?, + zo.debug_aranges_index.?, + zo.eh_frame_index.?, + zo.debug_info_index.?, + zo.debug_line_index.?, + zo.debug_line_str_index.?, + zo.debug_loclists_index.?, + zo.debug_rnglists_index.?, + zo.debug_str_index.?, + }) |sec, sym_index| { + sec.index = sym_index; + } + } dwarf.reloadSectionMetadata(); dwarf.debug_abbrev.section.pad_to_ideal = false; diff --git a/src/link/Elf.zig b/src/link/Elf.zig index d44892836cf1..29407590097b 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -114,15 +114,6 @@ rela_plt: std.ArrayListUnmanaged(elf.Elf64_Rela) = .{}, /// Applies only to a relocatable. comdat_group_sections: std.ArrayListUnmanaged(ComdatGroupSection) = .{}, -debug_info_section_index: ?u32 = null, -debug_abbrev_section_index: ?u32 = null, -debug_str_section_index: ?u32 = null, -debug_aranges_section_index: ?u32 = null, -debug_line_section_index: ?u32 = null, -debug_line_str_section_index: ?u32 = null, -debug_loclists_section_index: ?u32 = null, -debug_rnglists_section_index: ?u32 = null, - copy_rel_section_index: ?u32 = null, dynamic_section_index: ?u32 = null, dynstrtab_section_index: ?u32 = null, @@ -636,24 +627,31 @@ pub fn growNonAllocSection( } pub fn markDirty(self: *Elf, shdr_index: u32) void { - const zig_object = self.zigObjectPtr().?; - if (zig_object.dwarf) |_| { - if (self.debug_info_section_index.? == shdr_index) { - zig_object.debug_info_section_dirty = true; - } else if (self.debug_abbrev_section_index.? == shdr_index) { - zig_object.debug_abbrev_section_dirty = true; - } else if (self.debug_str_section_index.? == shdr_index) { - zig_object.debug_str_section_dirty = true; - } else if (self.debug_aranges_section_index.? == shdr_index) { - zig_object.debug_aranges_section_dirty = true; - } else if (self.debug_line_section_index.? == shdr_index) { - zig_object.debug_line_section_dirty = true; - } else if (self.debug_line_str_section_index.? == shdr_index) { - zig_object.debug_line_str_section_dirty = true; - } else if (self.debug_loclists_section_index.? == shdr_index) { - zig_object.debug_loclists_section_dirty = true; - } else if (self.debug_rnglists_section_index.? == shdr_index) { - zig_object.debug_rnglists_section_dirty = true; + if (self.zigObjectPtr()) |zo| { + for ([_]?Symbol.Index{ + zo.debug_info_index, + zo.debug_abbrev_index, + zo.debug_aranges_index, + zo.debug_str_index, + zo.debug_line_index, + zo.debug_line_str_index, + zo.debug_loclists_index, + zo.debug_rnglists_index, + }, [_]*bool{ + &zo.debug_info_section_dirty, + &zo.debug_abbrev_section_dirty, + &zo.debug_aranges_section_dirty, + &zo.debug_str_section_dirty, + &zo.debug_line_section_dirty, + &zo.debug_line_str_section_dirty, + &zo.debug_loclists_section_dirty, + &zo.debug_rnglists_section_dirty, + }) |maybe_sym_index, dirty| { + const sym_index = maybe_sym_index orelse continue; + if (zo.symbol(sym_index).atom(self).?.output_section_index == shdr_index) { + dirty.* = true; + break; + } } } } @@ -3473,14 +3471,6 @@ fn resetShdrIndexes(self: *Elf, backlinks: []const u32) void { &self.copy_rel_section_index, &self.versym_section_index, &self.verneed_section_index, - &self.debug_info_section_index, - &self.debug_abbrev_section_index, - &self.debug_str_section_index, - &self.debug_aranges_section_index, - &self.debug_line_section_index, - &self.debug_line_str_section_index, - &self.debug_loclists_section_index, - &self.debug_rnglists_section_index, }) |maybe_index| { if (maybe_index.*) |*index| { index.* = backlinks[index.*]; @@ -3505,7 +3495,6 @@ fn resetShdrIndexes(self: *Elf, backlinks: []const u32) void { const atom_ptr = zo.atom(atom_index) orelse continue; atom_ptr.output_section_index = backlinks[atom_ptr.output_section_index]; } - if (zo.dwarf) |*dwarf| dwarf.reloadSectionMetadata(); } for (self.comdat_group_sections.items) |*cg| { diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig index 27fb121b04e6..8de8551463b8 100644 --- a/src/link/Elf/ZigObject.zig +++ b/src/link/Elf/ZigObject.zig @@ -99,8 +99,8 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .dwarf => |v| { var dwarf = Dwarf.init(&elf_file.base, v); - if (elf_file.debug_str_section_index == null) { - elf_file.debug_str_section_index = try elf_file.addSection(.{ + if (self.debug_str_index == null) { + const osec = try elf_file.addSection(.{ .name = try elf_file.insertShString(".debug_str"), .flags = elf.SHF_MERGE | elf.SHF_STRINGS, .entsize = 1, @@ -108,51 +108,51 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .addralign = 1, }); self.debug_str_section_dirty = true; - self.debug_str_index = try self.addSectionSymbol(gpa, ".debug_str", .@"1", elf_file.debug_str_section_index.?); + self.debug_str_index = try self.addSectionSymbol(gpa, ".debug_str", .@"1", osec); } - if (elf_file.debug_info_section_index == null) { - elf_file.debug_info_section_index = try elf_file.addSection(.{ + if (self.debug_info_index == null) { + const osec = try elf_file.addSection(.{ .name = try elf_file.insertShString(".debug_info"), .type = elf.SHT_PROGBITS, .addralign = 1, }); self.debug_info_section_dirty = true; - self.debug_info_index = try self.addSectionSymbol(gpa, ".debug_info", .@"1", elf_file.debug_info_section_index.?); + self.debug_info_index = try self.addSectionSymbol(gpa, ".debug_info", .@"1", osec); } - if (elf_file.debug_abbrev_section_index == null) { - elf_file.debug_abbrev_section_index = try elf_file.addSection(.{ + if (self.debug_abbrev_index == null) { + const osec = try elf_file.addSection(.{ .name = try elf_file.insertShString(".debug_abbrev"), .type = elf.SHT_PROGBITS, .addralign = 1, }); self.debug_abbrev_section_dirty = true; - self.debug_abbrev_index = try self.addSectionSymbol(gpa, ".debug_abbrev", .@"1", elf_file.debug_abbrev_section_index.?); + self.debug_abbrev_index = try self.addSectionSymbol(gpa, ".debug_abbrev", .@"1", osec); } - if (elf_file.debug_aranges_section_index == null) { - elf_file.debug_aranges_section_index = try elf_file.addSection(.{ + if (self.debug_aranges_index == null) { + const osec = try elf_file.addSection(.{ .name = try elf_file.insertShString(".debug_aranges"), .type = elf.SHT_PROGBITS, .addralign = 16, }); self.debug_aranges_section_dirty = true; - self.debug_aranges_index = try self.addSectionSymbol(gpa, ".debug_aranges", .@"16", elf_file.debug_aranges_section_index.?); + self.debug_aranges_index = try self.addSectionSymbol(gpa, ".debug_aranges", .@"16", osec); } - if (elf_file.debug_line_section_index == null) { - elf_file.debug_line_section_index = try elf_file.addSection(.{ + if (self.debug_line_index == null) { + const osec = try elf_file.addSection(.{ .name = try elf_file.insertShString(".debug_line"), .type = elf.SHT_PROGBITS, .addralign = 1, }); self.debug_line_section_dirty = true; - self.debug_line_index = try self.addSectionSymbol(gpa, ".debug_line", .@"1", elf_file.debug_line_section_index.?); + self.debug_line_index = try self.addSectionSymbol(gpa, ".debug_line", .@"1", osec); } - if (elf_file.debug_line_str_section_index == null) { - elf_file.debug_line_str_section_index = try elf_file.addSection(.{ + if (self.debug_line_str_index == null) { + const osec = try elf_file.addSection(.{ .name = try elf_file.insertShString(".debug_line_str"), .flags = elf.SHF_MERGE | elf.SHF_STRINGS, .entsize = 1, @@ -160,31 +160,31 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .addralign = 1, }); self.debug_line_str_section_dirty = true; - self.debug_line_str_index = try self.addSectionSymbol(gpa, ".debug_line_str", .@"1", elf_file.debug_line_str_section_index.?); + self.debug_line_str_index = try self.addSectionSymbol(gpa, ".debug_line_str", .@"1", osec); } - if (elf_file.debug_loclists_section_index == null) { - elf_file.debug_loclists_section_index = try elf_file.addSection(.{ + if (self.debug_loclists_index == null) { + const osec = try elf_file.addSection(.{ .name = try elf_file.insertShString(".debug_loclists"), .type = elf.SHT_PROGBITS, .addralign = 1, }); self.debug_loclists_section_dirty = true; - self.debug_loclists_index = try self.addSectionSymbol(gpa, ".debug_loclists", .@"1", elf_file.debug_loclists_section_index.?); + self.debug_loclists_index = try self.addSectionSymbol(gpa, ".debug_loclists", .@"1", osec); } - if (elf_file.debug_rnglists_section_index == null) { - elf_file.debug_rnglists_section_index = try elf_file.addSection(.{ + if (self.debug_rnglists_index == null) { + const osec = try elf_file.addSection(.{ .name = try elf_file.insertShString(".debug_rnglists"), .type = elf.SHT_PROGBITS, .addralign = 1, }); self.debug_rnglists_section_dirty = true; - self.debug_rnglists_index = try self.addSectionSymbol(gpa, ".debug_rnglists", .@"1", elf_file.debug_rnglists_section_index.?); + self.debug_rnglists_index = try self.addSectionSymbol(gpa, ".debug_rnglists", .@"1", osec); } - if (elf_file.eh_frame_section_index == null) { - elf_file.eh_frame_section_index = try elf_file.addSection(.{ + if (self.eh_frame_index == null) { + const osec = try elf_file.addSection(.{ .name = try elf_file.insertShString(".eh_frame"), .type = if (elf_file.getTarget().cpu.arch == .x86_64) elf.SHT_X86_64_UNWIND @@ -194,7 +194,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { .addralign = ptr_size, }); self.eh_frame_section_dirty = true; - self.eh_frame_index = try self.addSectionSymbol(gpa, ".eh_frame", Atom.Alignment.fromNonzeroByteUnits(ptr_size), elf_file.eh_frame_section_index.?); + self.eh_frame_index = try self.addSectionSymbol(gpa, ".eh_frame", Atom.Alignment.fromNonzeroByteUnits(ptr_size), osec); } try dwarf.initMetadata(); @@ -328,12 +328,6 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !voi const sym = self.symbol(sym_index); const atom_ptr = self.atom(sym.ref.index).?; if (!atom_ptr.alive) continue; - const shndx = sym.outputShndx(elf_file).?; - const shdr = elf_file.sections.items(.shdr)[shndx]; - const esym = &self.symtab.items(.elf_sym)[sym.esym_index]; - esym.st_size = shdr.sh_size; - atom_ptr.size = shdr.sh_size; - atom_ptr.alignment = Atom.Alignment.fromNonzeroByteUnits(shdr.sh_addralign); log.debug("parsing relocs in {s}", .{sym.name(elf_file)}); From 3e100c5daba0f64695eab0bb4216b9d242229ba6 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Fri, 30 Aug 2024 15:11:10 +0200 Subject: [PATCH 040/202] dwarf: make Section.off a function --- src/link/Dwarf.zig | 63 +++++++++++++++++++++----------------- src/link/Elf.zig | 2 +- src/link/Elf/Atom.zig | 25 +++++++++------ src/link/Elf/ZigObject.zig | 15 +++------ 4 files changed, 56 insertions(+), 49 deletions(-) diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 332c79e0fceb..97bc15ced8b1 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -261,7 +261,6 @@ pub const Section = struct { index: u32, first: Unit.Index.Optional, last: Unit.Index.Optional, - off: u64, len: u64, units: std.ArrayListUnmanaged(Unit), @@ -284,9 +283,8 @@ pub const Section = struct { .index = std.math.maxInt(u32), .first = .none, .last = .none, - .off = 0, - .len = 0, .units = .{}, + .len = 0, }; fn deinit(sec: *Section, gpa: std.mem.Allocator) void { @@ -295,6 +293,20 @@ pub const Section = struct { sec.* = undefined; } + fn off(sec: Section, dwarf: *Dwarf) u64 { + if (dwarf.bin_file.cast(.elf)) |elf_file| { + const zo = elf_file.zigObjectPtr().?; + const atom = zo.symbol(sec.index).atom(elf_file).?; + return atom.offset(elf_file); + } else if (dwarf.bin_file.cast(.macho)) |macho_file| { + const header = if (macho_file.d_sym) |d_sym| + d_sym.sections.items[sec.index] + else + macho_file.sections.items(.header)[sec.index]; + return header.offset; + } else unreachable; + } + fn addUnit(sec: *Section, header_len: u32, trailer_len: u32, dwarf: *Dwarf) UpdateError!Unit.Index { const unit: Unit.Index = @enumFromInt(sec.units.items.len); const unit_ptr = try sec.units.addOne(dwarf.gpa); @@ -306,9 +318,9 @@ pub const Section = struct { .next = .none, .first = .none, .last = .none, - .off = 0, .header_len = aligned_header_len, .trailer_len = aligned_trailer_len, + .off = 0, .len = aligned_header_len + aligned_trailer_len, .entries = .{}, .cross_unit_relocs = .{}, @@ -385,7 +397,6 @@ pub const Section = struct { const shdr = elf_file.sections.items(.shdr)[shndx]; atom.size = shdr.sh_size; atom.alignment = InternPool.Alignment.fromNonzeroByteUnits(shdr.sh_addralign); - sec.off = shdr.sh_offset + @as(u64, @intCast(atom.value)); sec.len = shdr.sh_size; } else if (dwarf.bin_file.cast(.macho)) |macho_file| { const header = if (macho_file.d_sym) |*d_sym| header: { @@ -395,7 +406,6 @@ pub const Section = struct { try macho_file.growSection(@intCast(sec.index), len); break :header &macho_file.sections.items(.header)[sec.index]; }; - sec.off = header.offset; sec.len = header.size; } } @@ -404,7 +414,6 @@ pub const Section = struct { const len = sec.getUnit(sec.first.unwrap() orelse return).off; if (len == 0) return; for (sec.units.items) |*unit| unit.off -= len; - sec.off += len; sec.len -= len; if (dwarf.bin_file.cast(.elf)) |elf_file| { const zo = elf_file.zigObjectPtr().?; @@ -412,14 +421,14 @@ pub const Section = struct { const shndx = atom.output_section_index; const shdr = &elf_file.sections.items(.shdr)[shndx]; atom.size = sec.len; - shdr.sh_offset = sec.off; + shdr.sh_offset += len; shdr.sh_size = sec.len; } else if (dwarf.bin_file.cast(.macho)) |macho_file| { const header = if (macho_file.d_sym) |*d_sym| &d_sym.sections.items[sec.index] else &macho_file.sections.items(.header)[sec.index]; - header.offset = @intCast(sec.off); + header.offset += @intCast(len); header.size = sec.len; } } @@ -548,9 +557,9 @@ const Unit = struct { fn move(unit: *Unit, sec: *Section, dwarf: *Dwarf, new_off: u32) UpdateError!void { if (unit.off == new_off) return; if (try dwarf.getFile().?.copyRangeAll( - sec.off + unit.off, + sec.off(dwarf) + unit.off, dwarf.getFile().?, - sec.off + new_off, + sec.off(dwarf) + new_off, unit.len, ) != unit.len) return error.InputOutput; unit.off = new_off; @@ -582,7 +591,7 @@ const Unit = struct { fn replaceHeader(unit: *Unit, sec: *Section, dwarf: *Dwarf, contents: []const u8) UpdateError!void { assert(contents.len == unit.header_len); - try dwarf.getFile().?.pwriteAll(contents, sec.off + unit.off); + try dwarf.getFile().?.pwriteAll(contents, sec.off(dwarf) + unit.off); } fn writeTrailer(unit: *Unit, sec: *Section, dwarf: *Dwarf) UpdateError!void { @@ -614,7 +623,7 @@ const Unit = struct { assert(fbs.pos == extended_op_bytes + op_len_bytes); writer.writeByte(DW.LNE.padding) catch unreachable; assert(fbs.pos >= unit.trailer_len and fbs.pos <= len); - return dwarf.getFile().?.pwriteAll(fbs.getWritten(), sec.off + start); + return dwarf.getFile().?.pwriteAll(fbs.getWritten(), sec.off(dwarf) + start); } var trailer = try std.ArrayList(u8).initCapacity(dwarf.gpa, len); defer trailer.deinit(); @@ -673,11 +682,11 @@ const Unit = struct { assert(trailer.items.len == unit.trailer_len); trailer.appendNTimesAssumeCapacity(fill_byte, len - unit.trailer_len); assert(trailer.items.len == len); - try dwarf.getFile().?.pwriteAll(trailer.items, sec.off + start); + try dwarf.getFile().?.pwriteAll(trailer.items, sec.off(dwarf) + start); } fn resolveRelocs(unit: *Unit, sec: *Section, dwarf: *Dwarf) RelocError!void { - const unit_off = sec.off + unit.off; + const unit_off = sec.off(dwarf) + unit.off; for (unit.cross_unit_relocs.items) |reloc| { const target_unit = sec.getUnit(reloc.target_unit); try dwarf.resolveReloc( @@ -764,12 +773,12 @@ const Entry = struct { dwarf.writeInt(unit_len[0..dwarf.sectionOffsetBytes()], len - dwarf.unitLengthBytes()); try dwarf.getFile().?.pwriteAll( unit_len[0..dwarf.sectionOffsetBytes()], - sec.off + unit.off + unit.header_len + entry.off, + sec.off(dwarf) + unit.off + unit.header_len + entry.off, ); const buf = try dwarf.gpa.alloc(u8, len - entry.len); defer dwarf.gpa.free(buf); @memset(buf, DW.CFA.nop); - try dwarf.getFile().?.pwriteAll(buf, sec.off + unit.off + unit.header_len + start); + try dwarf.getFile().?.pwriteAll(buf, sec.off(dwarf) + unit.off + unit.header_len + start); return; } const len = unit.getEntry(entry.next.unwrap() orelse return).off - start; @@ -825,7 +834,7 @@ const Entry = struct { }, } else assert(!sec.pad_to_ideal and len == 0); assert(fbs.pos <= len); - try dwarf.getFile().?.pwriteAll(fbs.getWritten(), sec.off + unit.off + unit.header_len + start); + try dwarf.getFile().?.pwriteAll(fbs.getWritten(), sec.off(dwarf) + unit.off + unit.header_len + start); } fn resize(entry_ptr: *Entry, unit: *Unit, sec: *Section, dwarf: *Dwarf, len: u32) UpdateError!void { @@ -860,15 +869,15 @@ const Entry = struct { fn replace(entry_ptr: *Entry, unit: *Unit, sec: *Section, dwarf: *Dwarf, contents: []const u8) UpdateError!void { assert(contents.len == entry_ptr.len); - try dwarf.getFile().?.pwriteAll(contents, sec.off + unit.off + unit.header_len + entry_ptr.off); + try dwarf.getFile().?.pwriteAll(contents, sec.off(dwarf) + unit.off + unit.header_len + entry_ptr.off); if (false) { const buf = try dwarf.gpa.alloc(u8, sec.len); defer dwarf.gpa.free(buf); - _ = try dwarf.getFile().?.preadAll(buf, sec.off); + _ = try dwarf.getFile().?.preadAll(buf, sec.off(dwarf)); log.info("Section{{ .first = {}, .last = {}, .off = 0x{x}, .len = 0x{x} }}", .{ @intFromEnum(sec.first), @intFromEnum(sec.last), - sec.off, + sec.off(dwarf), sec.len, }); for (sec.units.items) |*unit_ptr| { @@ -935,7 +944,7 @@ const Entry = struct { } fn resolveRelocs(entry: *Entry, unit: *Unit, sec: *Section, dwarf: *Dwarf) RelocError!void { - const entry_off = sec.off + unit.off + unit.header_len + entry.off; + const entry_off = sec.off(dwarf) + unit.off + unit.header_len + entry.off; for (entry.cross_entry_relocs.items) |reloc| { try dwarf.resolveReloc( entry_off + reloc.source_off, @@ -973,7 +982,7 @@ const Entry = struct { .eh_frame => return if (dwarf.bin_file.cast(.elf)) |elf_file| { const zo = elf_file.zigObjectPtr().?; const shndx = zo.symbol(sec.index).atom(elf_file).?.output_section_index; - const entry_addr: i64 = @intCast(entry_off - sec.off + elf_file.shdrs.items[shndx].sh_addr); + const entry_addr: i64 = @intCast(entry_off - sec.off(dwarf) + elf_file.shdrs.items[shndx].sh_addr); for (entry.external_relocs.items) |reloc| { const symbol = zo.symbol(reloc.target_sym); try dwarf.resolveReloc( @@ -1912,7 +1921,6 @@ pub fn reloadSectionMetadata(dwarf: *Dwarf) void { }) |sec, sect_index| { const header = &d_sym.sections.items[sect_index]; sec.index = sect_index; - sec.off = header.offset; sec.len = header.size; } } else { @@ -1937,7 +1945,6 @@ pub fn reloadSectionMetadata(dwarf: *Dwarf) void { }) |sec, sect_index| { const header = &macho_file.sections.items(.header)[sect_index]; sec.index = sect_index; - sec.off = header.offset; sec.len = header.size; } } @@ -2534,7 +2541,7 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool var abbrev_code_buf: [AbbrevCode.decl_bytes]u8 = undefined; if (try dwarf.getFile().?.preadAll( &abbrev_code_buf, - dwarf.debug_info.section.off + unit_ptr.off + unit_ptr.header_len + entry_ptr.off, + dwarf.debug_info.section.off(dwarf) + unit_ptr.off + unit_ptr.header_len + entry_ptr.off, ) != abbrev_code_buf.len) return error.InputOutput; var abbrev_code_fbs = std.io.fixedBufferStream(&abbrev_code_buf); const abbrev_code: AbbrevCode = @enumFromInt( @@ -3945,7 +3952,7 @@ pub fn flushModule(dwarf: *Dwarf, pt: Zcu.PerThread) FlushError!void { if (dwarf.debug_str.section.dirty) { const contents = dwarf.debug_str.contents.items; try dwarf.debug_str.section.resize(dwarf, contents.len); - try dwarf.getFile().?.pwriteAll(contents, dwarf.debug_str.section.off); + try dwarf.getFile().?.pwriteAll(contents, dwarf.debug_str.section.off(dwarf)); dwarf.debug_str.section.dirty = false; } if (dwarf.debug_line.section.dirty) { @@ -4051,7 +4058,7 @@ pub fn flushModule(dwarf: *Dwarf, pt: Zcu.PerThread) FlushError!void { if (dwarf.debug_line_str.section.dirty) { const contents = dwarf.debug_line_str.contents.items; try dwarf.debug_line_str.section.resize(dwarf, contents.len); - try dwarf.getFile().?.pwriteAll(contents, dwarf.debug_line_str.section.off); + try dwarf.getFile().?.pwriteAll(contents, dwarf.debug_line_str.section.off(dwarf)); dwarf.debug_line_str.section.dirty = false; } if (dwarf.debug_loclists.section.dirty) { diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 29407590097b..f182a4a86837 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -1070,7 +1070,7 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod if (shdr.sh_type == elf.SHT_NOBITS) continue; const code = try zo.codeAlloc(self, atom_index); defer gpa.free(code); - const file_offset = shdr.sh_offset + @as(u64, @intCast(atom_ptr.value)); + const file_offset = atom_ptr.offset(self); atom_ptr.resolveRelocsAlloc(self, code) catch |err| switch (err) { error.RelocFailure, error.RelaxFailure => has_reloc_errors = true, error.UnsupportedCpuArch => { diff --git a/src/link/Elf/Atom.zig b/src/link/Elf/Atom.zig index 3d1fe04fb507..ab0e98440b90 100644 --- a/src/link/Elf/Atom.zig +++ b/src/link/Elf/Atom.zig @@ -51,6 +51,11 @@ pub fn address(self: Atom, elf_file: *Elf) i64 { return @as(i64, @intCast(shdr.sh_addr)) + self.value; } +pub fn offset(self: Atom, elf_file: *Elf) u64 { + const shdr = elf_file.sections.items(.shdr)[self.output_section_index]; + return shdr.sh_offset + @as(u64, @intCast(self.value)); +} + pub fn ref(self: Atom) Elf.Ref { return .{ .index = self.atom_index, .file = self.file_index }; } @@ -1673,7 +1678,7 @@ const aarch64 = struct { => { // TODO: NC means no overflow check const taddr = @as(u64, @intCast(S + A)); - const offset: u12 = switch (r_type) { + const off: u12 = switch (r_type) { .LDST8_ABS_LO12_NC => @truncate(taddr), .LDST16_ABS_LO12_NC => @divExact(@as(u12, @truncate(taddr)), 2), .LDST32_ABS_LO12_NC => @divExact(@as(u12, @truncate(taddr)), 4), @@ -1681,7 +1686,7 @@ const aarch64 = struct { .LDST128_ABS_LO12_NC => @divExact(@as(u12, @truncate(taddr)), 16), else => unreachable, }; - aarch64_util.writeLoadStoreRegInst(offset, code); + aarch64_util.writeLoadStoreRegInst(off, code); }, .TLSLE_ADD_TPREL_HI12 => { @@ -1705,8 +1710,8 @@ const aarch64 = struct { .TLSIE_LD64_GOTTPREL_LO12_NC => { const S_ = target.gotTpAddress(elf_file); relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A }); - const offset: u12 = try math.divExact(u12, @truncate(@as(u64, @bitCast(S_ + A))), 8); - aarch64_util.writeLoadStoreRegInst(offset, code); + const off: u12 = try math.divExact(u12, @truncate(@as(u64, @bitCast(S_ + A))), 8); + aarch64_util.writeLoadStoreRegInst(off, code); }, .TLSGD_ADR_PAGE21 => { @@ -1719,8 +1724,8 @@ const aarch64 = struct { .TLSGD_ADD_LO12_NC => { const S_ = target.tlsGdAddress(elf_file); relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A }); - const offset: u12 = @truncate(@as(u64, @bitCast(S_ + A))); - aarch64_util.writeAddImmInst(offset, code); + const off: u12 = @truncate(@as(u64, @bitCast(S_ + A))); + aarch64_util.writeAddImmInst(off, code); }, .TLSDESC_ADR_PAGE21 => { @@ -1739,8 +1744,8 @@ const aarch64 = struct { if (target.flags.has_tlsdesc) { const S_ = target.tlsDescAddress(elf_file); relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A }); - const offset: u12 = try math.divExact(u12, @truncate(@as(u64, @bitCast(S_ + A))), 8); - aarch64_util.writeLoadStoreRegInst(offset, code); + const off: u12 = try math.divExact(u12, @truncate(@as(u64, @bitCast(S_ + A))), 8); + aarch64_util.writeLoadStoreRegInst(off, code); } else { relocs_log.debug(" relaxing ldr => nop", .{}); mem.writeInt(u32, code, Instruction.nop().toU32(), .little); @@ -1751,8 +1756,8 @@ const aarch64 = struct { if (target.flags.has_tlsdesc) { const S_ = target.tlsDescAddress(elf_file); relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A }); - const offset: u12 = @truncate(@as(u64, @bitCast(S_ + A))); - aarch64_util.writeAddImmInst(offset, code); + const off: u12 = @truncate(@as(u64, @bitCast(S_ + A))); + aarch64_util.writeAddImmInst(off, code); } else { const old_inst = Instruction{ .add_subtract_immediate = mem.bytesToValue(std.meta.TagPayload( diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig index 8de8551463b8..8f16672591d3 100644 --- a/src/link/Elf/ZigObject.zig +++ b/src/link/Elf/ZigObject.zig @@ -906,7 +906,7 @@ pub fn codeAlloc(self: *ZigObject, elf_file: *Elf, atom_index: Atom.Index) ![]u8 return code; } - const file_offset = shdr.sh_offset + @as(u64, @intCast(atom_ptr.value)); + const file_offset = atom_ptr.offset(elf_file); const size = std.math.cast(usize, atom_ptr.size) orelse return error.Overflow; const code = try gpa.alloc(u8, size); errdefer gpa.free(code); @@ -1338,7 +1338,7 @@ fn updateNavCode( const shdr = elf_file.sections.items(.shdr)[shdr_index]; if (shdr.sh_type != elf.SHT_NOBITS) { - const file_offset = shdr.sh_offset + @as(u64, @intCast(atom_ptr.value)); + const file_offset = atom_ptr.offset(elf_file); try elf_file.base.file.?.pwriteAll(code, file_offset); log.debug("writing {} from 0x{x} to 0x{x}", .{ nav.fqn.fmt(ip), file_offset, file_offset + code.len }); } @@ -1716,9 +1716,7 @@ fn updateLazySymbol( local_sym.value = 0; local_esym.st_value = 0; - const shdr = elf_file.sections.items(.shdr)[output_section_index]; - const file_offset = shdr.sh_offset + @as(u64, @intCast(atom_ptr.value)); - try elf_file.base.file.?.pwriteAll(code, file_offset); + try elf_file.base.file.?.pwriteAll(code, atom_ptr.offset(elf_file)); } const LowerConstResult = union(enum) { @@ -1771,9 +1769,7 @@ fn lowerConst( try self.allocateAtom(atom_ptr, elf_file); errdefer self.freeNavMetadata(elf_file, sym_index); - const shdr = elf_file.sections.items(.shdr)[output_section_index]; - const file_offset = shdr.sh_offset + @as(u64, @intCast(atom_ptr.value)); - try elf_file.base.file.?.pwriteAll(code, file_offset); + try elf_file.base.file.?.pwriteAll(code, atom_ptr.offset(elf_file)); return .{ .ok = sym_index }; } @@ -1935,8 +1931,7 @@ fn trampolineSize(cpu_arch: std.Target.Cpu.Arch) u64 { fn writeTrampoline(tr_sym: Symbol, target: Symbol, elf_file: *Elf) !void { const atom_ptr = tr_sym.atom(elf_file).?; - const shdr = elf_file.sections.items(.shdr)[atom_ptr.output_section_index]; - const fileoff = shdr.sh_offset + @as(u64, @intCast(atom_ptr.value)); + const fileoff = atom_ptr.offset(elf_file); const source_addr = tr_sym.address(.{}, elf_file); const target_addr = target.address(.{ .trampoline = false }, elf_file); var buf: [max_trampoline_len]u8 = undefined; From 6ec5df3898d594dd97d1b557aa7bdde38a9998b6 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Fri, 30 Aug 2024 22:40:27 +0200 Subject: [PATCH 041/202] elf: allocate .tdata and .tbss using allocateAtom mechanics --- src/link/Elf.zig | 19 ++-------- src/link/Elf/ZigObject.zig | 78 +++++++++++++++++++------------------- 2 files changed, 43 insertions(+), 54 deletions(-) diff --git a/src/link/Elf.zig b/src/link/Elf.zig index f182a4a86837..2c1bd34879f4 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -3556,24 +3556,9 @@ fn resetShdrIndexes(self: *Elf, backlinks: []const u32) void { fn updateSectionSizes(self: *Elf) !void { const slice = self.sections.slice(); - for (slice.items(.shdr), slice.items(.atom_list), 0..) |*shdr, atom_list, shndx| { + for (slice.items(.shdr), slice.items(.atom_list)) |*shdr, atom_list| { if (atom_list.items.len == 0) continue; if (self.requiresThunks() and shdr.sh_flags & elf.SHF_EXECINSTR != 0) continue; - if (self.zigObjectPtr()) |zo| blk: { - const sym_index = for ([_]?Symbol.Index{ - zo.text_index, - zo.rodata_index, - zo.data_relro_index, - zo.data_index, - zo.bss_index, - }) |maybe_idx| { - if (maybe_idx) |idx| break idx; - } else break :blk; - const atom_ptr = zo.symbol(sym_index).atom(self).?; - if (shndx == atom_ptr.output_section_index) { - shdr.sh_size = atom_ptr.size; - } - } for (atom_list.items) |ref| { const atom_ptr = self.atom(ref) orelse continue; if (!atom_ptr.alive) continue; @@ -3908,6 +3893,7 @@ pub fn allocateAllocSections(self: *Elf) !void { zo.rodata_index, zo.data_relro_index, zo.data_index, + zo.tdata_index, zo.eh_frame_index, }) |maybe_sym_index| { const sect_sym_index = maybe_sym_index orelse continue; @@ -4067,6 +4053,7 @@ fn writeAtoms(self: *Elf) !void { zo.rodata_index, zo.data_relro_index, zo.data_index, + zo.tdata_index, zo.eh_frame_index, zo.debug_info_index, zo.debug_abbrev_index, diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig index 8f16672591d3..2d20895d2e33 100644 --- a/src/link/Elf/ZigObject.zig +++ b/src/link/Elf/ZigObject.zig @@ -56,6 +56,8 @@ rodata_index: ?Symbol.Index = null, data_relro_index: ?Symbol.Index = null, data_index: ?Symbol.Index = null, bss_index: ?Symbol.Index = null, +tdata_index: ?Symbol.Index = null, +tbss_index: ?Symbol.Index = null, eh_frame_index: ?Symbol.Index = null, debug_info_index: ?Symbol.Index = null, debug_abbrev_index: ?Symbol.Index = null, @@ -233,10 +235,6 @@ pub fn deinit(self: *ZigObject, allocator: Allocator) void { meta.exports.deinit(allocator); } self.uavs.deinit(allocator); - - for (self.tls_variables.values()) |*tlv| { - tlv.deinit(allocator); - } self.tls_variables.deinit(allocator); if (self.dwarf) |*dwarf| { @@ -898,14 +896,6 @@ pub fn writeSymtab(self: ZigObject, elf_file: *Elf) void { pub fn codeAlloc(self: *ZigObject, elf_file: *Elf, atom_index: Atom.Index) ![]u8 { const gpa = elf_file.base.comp.gpa; const atom_ptr = self.atom(atom_index).?; - const shdr = &elf_file.sections.items(.shdr)[atom_ptr.output_section_index]; - - if (shdr.sh_flags & elf.SHF_TLS != 0) { - const tlv = self.tls_variables.get(atom_index).?; - const code = try gpa.dupe(u8, tlv.code); - return code; - } - const file_offset = atom_ptr.offset(elf_file); const size = std.math.cast(usize, atom_ptr.size) orelse return error.Overflow; const code = try gpa.alloc(u8, size); @@ -1161,18 +1151,29 @@ fn getNavShdrIndex( const is_bss = !has_relocs and for (code) |byte| { if (byte != 0) break false; } else true; - if (is_bss) return elf_file.sectionByName(".tbss") orelse try elf_file.addSection(.{ - .type = elf.SHT_NOBITS, - .flags = elf.SHF_ALLOC | elf.SHF_WRITE | elf.SHF_TLS, - .name = try elf_file.insertShString(".tbss"), - .offset = std.math.maxInt(u64), - }); - return elf_file.sectionByName(".tdata") orelse try elf_file.addSection(.{ + if (is_bss) { + if (self.tbss_index) |symbol_index| + return self.symbol(symbol_index).atom(elf_file).?.output_section_index; + const osec = try elf_file.addSection(.{ + .name = try elf_file.insertShString(".tbss"), + .flags = elf.SHF_ALLOC | elf.SHF_WRITE | elf.SHF_TLS, + .type = elf.SHT_NOBITS, + .addralign = 1, + }); + self.tbss_index = try self.addSectionSymbol(gpa, ".tbss", .@"1", osec); + return osec; + } + if (self.tdata_index) |symbol_index| + return self.symbol(symbol_index).atom(elf_file).?.output_section_index; + const osec = try elf_file.addSection(.{ .type = elf.SHT_PROGBITS, .flags = elf.SHF_ALLOC | elf.SHF_WRITE | elf.SHF_TLS, .name = try elf_file.insertShString(".tdata"), + .addralign = 1, .offset = std.math.maxInt(u64), }); + self.tdata_index = try self.addSectionSymbol(gpa, ".tdata", .@"1", osec); + return osec; } if (is_const) { if (self.data_relro_index) |symbol_index| @@ -1367,15 +1368,11 @@ fn updateTlv( const atom_ptr = sym.atom(elf_file).?; const name_offset = try self.strtab.insert(gpa, nav.fqn.toSlice(ip)); - sym.value = 0; - sym.name_offset = name_offset; - - atom_ptr.output_section_index = shndx; atom_ptr.alive = true; atom_ptr.name_offset = name_offset; + atom_ptr.output_section_index = shndx; sym.name_offset = name_offset; - esym.st_value = 0; esym.st_name = name_offset; esym.st_info = elf.STT_TLS; esym.st_size = code.len; @@ -1383,21 +1380,25 @@ fn updateTlv( atom_ptr.alignment = required_alignment; atom_ptr.size = code.len; - self.navs.getPtr(nav_index).?.allocated = true; + const gop = try self.tls_variables.getOrPut(gpa, atom_ptr.atom_index); + assert(!gop.found_existing); // TODO incremental updates - { - const gop = try self.tls_variables.getOrPut(gpa, atom_ptr.atom_index); - assert(!gop.found_existing); // TODO incremental updates - gop.value_ptr.* = .{ .symbol_index = sym_index }; + try self.allocateAtom(atom_ptr, elf_file); + sym.value = 0; + esym.st_value = 0; - // We only store the data for the TLV if it's non-zerofill. - if (elf_file.sections.items(.shdr)[shndx].sh_type != elf.SHT_NOBITS) { - gop.value_ptr.code = try gpa.dupe(u8, code); - } - } + self.navs.getPtr(nav_index).?.allocated = true; - const atom_list = &elf_file.sections.items(.atom_list)[atom_ptr.output_section_index]; - try atom_list.append(gpa, .{ .index = atom_ptr.atom_index, .file = self.index }); + const shdr = elf_file.sections.items(.shdr)[shndx]; + if (shdr.sh_type != elf.SHT_NOBITS) { + const file_offset = atom_ptr.offset(elf_file); + try elf_file.base.file.?.pwriteAll(code, file_offset); + log.debug("writing TLV {s} from 0x{x} to 0x{x}", .{ + atom_ptr.name(elf_file), + file_offset, + file_offset + code.len, + }); + } } pub fn updateFunc( @@ -1994,8 +1995,9 @@ fn allocateAtom(self: *ZigObject, atom_ptr: *Atom, elf_file: *Elf) !void { const sect_atom_ptr = for ([_]?Symbol.Index{ self.text_index, self.rodata_index, - self.data_index, self.data_relro_index, + self.data_index, + self.tdata_index, }) |maybe_sym_index| { const sect_sym_index = maybe_sym_index orelse continue; const sect_atom_ptr = self.symbol(sect_sym_index).atom(elf_file).?; @@ -2305,7 +2307,7 @@ const AtomList = std.ArrayListUnmanaged(Atom.Index); const NavTable = std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, AvMetadata); const UavTable = std.AutoArrayHashMapUnmanaged(InternPool.Index, AvMetadata); const LazySymbolTable = std.AutoArrayHashMapUnmanaged(InternPool.Index, LazySymbolMetadata); -const TlsTable = std.AutoArrayHashMapUnmanaged(Atom.Index, TlsVariable); +const TlsTable = std.AutoArrayHashMapUnmanaged(Atom.Index, void); const x86_64 = struct { fn writeTrampolineCode(source_addr: i64, target_addr: i64, buf: *[max_trampoline_len]u8) ![]u8 { From 1ef96f05eb7806d7123919d30f4b672e82a64d75 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Sun, 1 Sep 2024 13:51:08 +0200 Subject: [PATCH 042/202] elf: introduce SectionChunk - a container of atoms per object file --- src/link/Elf.zig | 7 ++- src/link/Elf/Object.zig | 114 +++++++++++++++++++++++++++++++++++++++- 2 files changed, 119 insertions(+), 2 deletions(-) diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 2c1bd34879f4..cb5db6dec9e0 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -3555,6 +3555,10 @@ fn resetShdrIndexes(self: *Elf, backlinks: []const u32) void { } fn updateSectionSizes(self: *Elf) !void { + for (self.objects.items) |index| { + try self.file(index).?.object.allocateAtoms(self); + } + const slice = self.sections.slice(); for (slice.items(.shdr), slice.items(.atom_list)) |*shdr, atom_list| { if (atom_list.items.len == 0) continue; @@ -5334,8 +5338,9 @@ fn fmtDumpState( try writer.print("object({d}) : {}", .{ index, object.fmtPath() }); if (!object.alive) try writer.writeAll(" : [*]"); try writer.writeByte('\n'); - try writer.print("{}{}{}{}{}\n", .{ + try writer.print("{}{}{}{}{}{}\n", .{ object.fmtAtoms(self), + object.fmtSectionChunks(self), object.fmtCies(self), object.fmtFdes(self), object.fmtSymtab(self), diff --git a/src/link/Elf/Object.zig b/src/link/Elf/Object.zig index c4443bb67e4a..3337bfb1a226 100644 --- a/src/link/Elf/Object.zig +++ b/src/link/Elf/Object.zig @@ -17,6 +17,7 @@ relocs: std.ArrayListUnmanaged(elf.Elf64_Rela) = .{}, atoms: std.ArrayListUnmanaged(Atom) = .{}, atoms_indexes: std.ArrayListUnmanaged(Atom.Index) = .{}, atoms_extra: std.ArrayListUnmanaged(u32) = .{}, +section_chunks: std.ArrayListUnmanaged(SectionChunk) = .{}, comdat_groups: std.ArrayListUnmanaged(Elf.ComdatGroup) = .{}, comdat_group_data: std.ArrayListUnmanaged(u32) = .{}, @@ -58,6 +59,10 @@ pub fn deinit(self: *Object, allocator: Allocator) void { self.atoms.deinit(allocator); self.atoms_indexes.deinit(allocator); self.atoms_extra.deinit(allocator); + for (self.section_chunks.items) |*chunk| { + chunk.deinit(allocator); + } + self.section_chunks.deinit(allocator); self.comdat_groups.deinit(allocator); self.comdat_group_data.deinit(allocator); self.relocs.deinit(allocator); @@ -933,11 +938,26 @@ pub fn initOutputSections(self: *Object, elf_file: *Elf) !void { const atom_ptr = self.atom(atom_index) orelse continue; if (!atom_ptr.alive) continue; const shdr = atom_ptr.inputShdr(elf_file); - _ = try elf_file.initOutputSection(.{ + const osec = try elf_file.initOutputSection(.{ .name = self.getString(shdr.sh_name), .flags = shdr.sh_flags, .type = shdr.sh_type, }); + const chunk = for (self.section_chunks.items) |*chunk| { + if (chunk.output_section_index == osec) break chunk; + } else blk: { + const chunk = try self.section_chunks.addOne(elf_file.base.comp.gpa); + chunk.* = .{ .output_section_index = osec }; + break :blk chunk; + }; + try chunk.atoms.append(elf_file.base.comp.gpa, atom_index); + } +} + +pub fn allocateAtoms(self: *Object, elf_file: *Elf) !void { + _ = elf_file; + for (self.section_chunks.items) |*chunk| { + chunk.updateSize(self); } } @@ -1427,6 +1447,29 @@ fn formatAtoms( } } +pub fn fmtSectionChunks(self: *Object, elf_file: *Elf) std.fmt.Formatter(formatSectionChunks) { + return .{ .data = .{ + .object = self, + .elf_file = elf_file, + } }; +} + +fn formatSectionChunks( + ctx: FormatContext, + comptime unused_fmt_string: []const u8, + options: std.fmt.FormatOptions, + writer: anytype, +) !void { + _ = unused_fmt_string; + _ = options; + const object = ctx.object; + const elf_file = ctx.elf_file; + try writer.writeAll(" section chunks\n"); + for (object.section_chunks.items) |chunk| { + try writer.print(" {}\n", .{chunk.fmt(elf_file)}); + } +} + pub fn fmtCies(self: *Object, elf_file: *Elf) std.fmt.Formatter(formatCies) { return .{ .data = .{ .object = self, @@ -1528,6 +1571,75 @@ const InArchive = struct { size: u32, }; +const SectionChunk = struct { + value: i64 = 0, + size: u64 = 0, + alignment: Atom.Alignment = .@"1", + output_section_index: u32 = 0, + atoms: std.ArrayListUnmanaged(Atom.Index) = .{}, + + fn deinit(chunk: *SectionChunk, allocator: Allocator) void { + chunk.atoms.deinit(allocator); + } + + fn address(chunk: SectionChunk, elf_file: *Elf) i64 { + const shdr = elf_file.sections.items(.shdr)[chunk.output_section_index]; + return @as(i64, @intCast(shdr.sh_addr)) + chunk.value; + } + + fn updateSize(chunk: *SectionChunk, object: *Object) void { + for (chunk.atoms.items) |atom_index| { + const atom_ptr = object.atom(atom_index).?; + assert(atom_ptr.alive); + const offset = atom_ptr.alignment.forward(chunk.size); + const padding = offset - chunk.size; + atom_ptr.value = @intCast(offset); + chunk.size += padding + atom_ptr.size; + chunk.alignment = chunk.alignment.max(atom_ptr.alignment); + } + } + + pub fn format( + chunk: SectionChunk, + comptime unused_fmt_string: []const u8, + options: std.fmt.FormatOptions, + writer: anytype, + ) !void { + _ = chunk; + _ = unused_fmt_string; + _ = options; + _ = writer; + @compileError("do not format SectionChunk directly"); + } + + const FormatCtx = struct { SectionChunk, *Elf }; + + pub fn fmt(chunk: SectionChunk, elf_file: *Elf) std.fmt.Formatter(format2) { + return .{ .data = .{ chunk, elf_file } }; + } + + fn format2( + ctx: FormatCtx, + comptime unused_fmt_string: []const u8, + options: std.fmt.FormatOptions, + writer: anytype, + ) !void { + _ = unused_fmt_string; + _ = options; + const chunk, const elf_file = ctx; + try writer.print("chunk : @{x} : shdr({d}) : align({x}) : size({x})", .{ + chunk.address(elf_file), chunk.output_section_index, + chunk.alignment.toByteUnits() orelse 0, chunk.size, + }); + try writer.writeAll(" : atoms{ "); + for (chunk.atoms.items, 0..) |atom_index, i| { + try writer.print("{d}", .{atom_index}); + if (i < chunk.atoms.items.len - 1) try writer.writeAll(", "); + } + try writer.writeAll(" }"); + } +}; + const Object = @This(); const std = @import("std"); From 45e46f0fb9f9f8a314802c544e92d1ee865119ef Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Sun, 1 Sep 2024 15:13:09 +0200 Subject: [PATCH 043/202] elf: allocate atom chunks using allocateChunk mechanics in objects --- src/link/Elf.zig | 54 ++++++++++++++++++------------------ src/link/Elf/Object.zig | 41 ++++++++++++++++----------- src/link/Elf/relocatable.zig | 1 - 3 files changed, 52 insertions(+), 44 deletions(-) diff --git a/src/link/Elf.zig b/src/link/Elf.zig index cb5db6dec9e0..897deb86c5c8 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -727,7 +727,10 @@ pub fn allocateChunk(self: *Elf, shndx: u32, size: u64, alignment: Atom.Alignmen true; if (expand_section) { const needed_size = res.value + size; - try self.growAllocSection(shndx, needed_size); + if (shdr.sh_flags & elf.SHF_ALLOC != 0) + try self.growAllocSection(shndx, needed_size) + else + try self.growNonAllocSection(shndx, needed_size, @intCast(alignment.toByteUnits().?), true); } return res; @@ -1036,9 +1039,6 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod try self.setHashSections(); try self.setVersionSymtab(); - for (self.objects.items) |index| { - try self.file(index).?.object.addAtomsToOutputSections(self); - } try self.sortInitFini(); try self.updateMergeSectionSizes(); try self.updateSectionSizes(); @@ -3560,29 +3560,29 @@ fn updateSectionSizes(self: *Elf) !void { } const slice = self.sections.slice(); - for (slice.items(.shdr), slice.items(.atom_list)) |*shdr, atom_list| { - if (atom_list.items.len == 0) continue; - if (self.requiresThunks() and shdr.sh_flags & elf.SHF_EXECINSTR != 0) continue; - for (atom_list.items) |ref| { - const atom_ptr = self.atom(ref) orelse continue; - if (!atom_ptr.alive) continue; - const offset = atom_ptr.alignment.forward(shdr.sh_size); - const padding = offset - shdr.sh_size; - atom_ptr.value = @intCast(offset); - shdr.sh_size += padding + atom_ptr.size; - shdr.sh_addralign = @max(shdr.sh_addralign, atom_ptr.alignment.toByteUnits() orelse 1); - } - } - - if (self.requiresThunks()) { - for (slice.items(.shdr), slice.items(.atom_list), 0..) |*shdr, atom_list, shndx| { - if (shdr.sh_flags & elf.SHF_EXECINSTR == 0) continue; - if (atom_list.items.len == 0) continue; - - // Create jump/branch range extenders if needed. - try self.createThunks(shdr, @intCast(shndx)); - } - } + // for (slice.items(.shdr), slice.items(.atom_list)) |*shdr, atom_list| { + // if (atom_list.items.len == 0) continue; + // if (self.requiresThunks() and shdr.sh_flags & elf.SHF_EXECINSTR != 0) continue; + // for (atom_list.items) |ref| { + // const atom_ptr = self.atom(ref) orelse continue; + // if (!atom_ptr.alive) continue; + // const offset = atom_ptr.alignment.forward(shdr.sh_size); + // const padding = offset - shdr.sh_size; + // atom_ptr.value = @intCast(offset); + // shdr.sh_size += padding + atom_ptr.size; + // shdr.sh_addralign = @max(shdr.sh_addralign, atom_ptr.alignment.toByteUnits() orelse 1); + // } + // } + + // if (self.requiresThunks()) { + // for (slice.items(.shdr), slice.items(.atom_list), 0..) |*shdr, atom_list, shndx| { + // if (shdr.sh_flags & elf.SHF_EXECINSTR == 0) continue; + // if (atom_list.items.len == 0) continue; + + // // Create jump/branch range extenders if needed. + // try self.createThunks(shdr, @intCast(shndx)); + // } + // } const shdrs = slice.items(.shdr); if (self.eh_frame_section_index) |index| { diff --git a/src/link/Elf/Object.zig b/src/link/Elf/Object.zig index 3337bfb1a226..26c2f2585b0e 100644 --- a/src/link/Elf/Object.zig +++ b/src/link/Elf/Object.zig @@ -955,27 +955,26 @@ pub fn initOutputSections(self: *Object, elf_file: *Elf) !void { } pub fn allocateAtoms(self: *Object, elf_file: *Elf) !void { - _ = elf_file; for (self.section_chunks.items) |*chunk| { chunk.updateSize(self); } -} -pub fn addAtomsToOutputSections(self: *Object, elf_file: *Elf) !void { - for (self.atoms_indexes.items) |atom_index| { - const atom_ptr = self.atom(atom_index) orelse continue; - if (!atom_ptr.alive) continue; - const shdr = atom_ptr.inputShdr(elf_file); - atom_ptr.output_section_index = elf_file.initOutputSection(.{ - .name = self.getString(shdr.sh_name), - .flags = shdr.sh_flags, - .type = shdr.sh_type, - }) catch unreachable; + for (self.section_chunks.items) |*chunk| { + const alloc_res = try elf_file.allocateChunk(chunk.output_section_index, chunk.size, chunk.alignment); + chunk.value = @intCast(alloc_res.value); - const comp = elf_file.base.comp; - const gpa = comp.gpa; - const atom_list = &elf_file.sections.items(.atom_list)[atom_ptr.output_section_index]; - try atom_list.append(gpa, .{ .index = atom_index, .file = self.index }); + const slice = elf_file.sections.slice(); + const shdr = &slice.items(.shdr)[chunk.output_section_index]; + const last_atom_ref = &slice.items(.last_atom)[chunk.output_section_index]; + + const expand_section = if (elf_file.atom(alloc_res.placement)) |placement_atom| + placement_atom.nextAtom(elf_file) == null + else + true; + if (expand_section) last_atom_ref.* = chunk.lastAtom(self).ref(); + shdr.sh_addralign = @max(shdr.sh_addralign, chunk.alignment.toByteUnits().?); + + // TODO create back and forward links } } @@ -1599,6 +1598,16 @@ const SectionChunk = struct { } } + fn firstAtom(chunk: SectionChunk, object: *Object) *Atom { + assert(chunk.atoms.items.len > 0); + return object.atom(chunk.atoms.items[0]).?; + } + + fn lastAtom(chunk: SectionChunk, object: *Object) *Atom { + assert(chunk.atoms.items.len > 0); + return object.atom(chunk.atoms.items[chunk.atoms.items.len - 1]).?; + } + pub fn format( chunk: SectionChunk, comptime unused_fmt_string: []const u8, diff --git a/src/link/Elf/relocatable.zig b/src/link/Elf/relocatable.zig index 4397bc2ca309..71d28abb32d8 100644 --- a/src/link/Elf/relocatable.zig +++ b/src/link/Elf/relocatable.zig @@ -208,7 +208,6 @@ pub fn flushObject(elf_file: *Elf, comp: *Compilation, module_obj_path: ?[]const } for (elf_file.objects.items) |index| { const object = elf_file.file(index).?.object; - try object.addAtomsToOutputSections(elf_file); try object.addAtomsToRelaSections(elf_file); } try elf_file.updateMergeSectionSizes(); From 874ef6308e6e438985a141310b94028de33286dc Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Mon, 2 Sep 2024 07:57:27 +0200 Subject: [PATCH 044/202] elf: do not create .eh_frame section if ZigObject already did so --- src/link/Dwarf.zig | 4 ++-- src/link/Elf.zig | 44 +++++++++++++++++++++++++------------------- 2 files changed, 27 insertions(+), 21 deletions(-) diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 97bc15ced8b1..bff33ecf140c 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -391,9 +391,9 @@ pub const Section = struct { const atom = zo.symbol(sec.index).atom(elf_file).?; const shndx = atom.output_section_index; if (sec == &dwarf.debug_frame.section) - try elf_file.growAllocSection(shndx, len) + try elf_file.growAllocSection(shndx, len, sec.alignment.toByteUnits().?) else - try elf_file.growNonAllocSection(shndx, len, @intCast(sec.alignment.toByteUnits().?), true); + try elf_file.growNonAllocSection(shndx, len, sec.alignment.toByteUnits().?, true); const shdr = elf_file.sections.items(.shdr)[shndx]; atom.size = shdr.sh_size; atom.alignment = InternPool.Alignment.fromNonzeroByteUnits(shdr.sh_addralign); diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 897deb86c5c8..bbacf8737cc3 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -528,7 +528,7 @@ pub fn findFreeSpace(self: *Elf, object_size: u64, min_alignment: u64) !u64 { return start; } -pub fn growAllocSection(self: *Elf, shdr_index: u32, needed_size: u64) !void { +pub fn growAllocSection(self: *Elf, shdr_index: u32, needed_size: u64, min_alignment: u64) !void { const slice = self.sections.slice(); const shdr = &slice.items(.shdr)[shdr_index]; assert(shdr.sh_flags & elf.SHF_ALLOC != 0); @@ -547,8 +547,7 @@ pub fn growAllocSection(self: *Elf, shdr_index: u32, needed_size: u64) !void { const existing_size = shdr.sh_size; shdr.sh_size = 0; // Must move the entire section. - const alignment = if (maybe_phdr) |phdr| phdr.p_align else shdr.sh_addralign; - const new_offset = try self.findFreeSpace(needed_size, alignment); + const new_offset = try self.findFreeSpace(needed_size, min_alignment); log.debug("new '{s}' file offset 0x{x} to 0x{x}", .{ self.getShString(shdr.sh_name), @@ -588,7 +587,7 @@ pub fn growNonAllocSection( self: *Elf, shdr_index: u32, needed_size: u64, - min_alignment: u32, + min_alignment: u64, requires_file_copy: bool, ) !void { const shdr = &self.sections.items(.shdr)[shdr_index]; @@ -728,9 +727,9 @@ pub fn allocateChunk(self: *Elf, shndx: u32, size: u64, alignment: Atom.Alignmen if (expand_section) { const needed_size = res.value + size; if (shdr.sh_flags & elf.SHF_ALLOC != 0) - try self.growAllocSection(shndx, needed_size) + try self.growAllocSection(shndx, needed_size, alignment.toByteUnits().?) else - try self.growNonAllocSection(shndx, needed_size, @intCast(alignment.toByteUnits().?), true); + try self.growNonAllocSection(shndx, needed_size, alignment.toByteUnits().?, true); } return res; @@ -1831,8 +1830,8 @@ pub fn initOutputSection(self: *Elf, args: struct { break :blk args.name; }; const @"type" = tt: { - if (self.getTarget().cpu.arch == .x86_64 and - args.type == elf.SHT_X86_64_UNWIND) break :tt elf.SHT_PROGBITS; + if (self.getTarget().cpu.arch == .x86_64 and args.type == elf.SHT_X86_64_UNWIND) + break :tt elf.SHT_PROGBITS; switch (args.type) { elf.SHT_NULL => unreachable, elf.SHT_PROGBITS => { @@ -2896,21 +2895,28 @@ fn initSyntheticSections(self: *Elf) !void { const target = self.getTarget(); const ptr_size = self.ptrWidthBytes(); - const needs_eh_frame = for (self.objects.items) |index| { + const needs_eh_frame = if (self.zigObjectPtr()) |zo| + zo.eh_frame_index != null + else for (self.objects.items) |index| { if (self.file(index).?.object.cies.items.len > 0) break true; } else false; if (needs_eh_frame) { if (self.eh_frame_section_index == null) { - self.eh_frame_section_index = try self.addSection(.{ - .name = try self.insertShString(".eh_frame"), - .type = if (target.cpu.arch == .x86_64) - elf.SHT_X86_64_UNWIND - else - elf.SHT_PROGBITS, - .flags = elf.SHF_ALLOC, - .addralign = ptr_size, - .offset = std.math.maxInt(u64), - }); + self.eh_frame_section_index = blk: { + if (self.zigObjectPtr()) |zo| { + if (zo.eh_frame_index) |idx| break :blk zo.symbol(idx).atom(self).?.output_section_index; + } + break :blk try self.addSection(.{ + .name = try self.insertShString(".eh_frame"), + .type = if (target.cpu.arch == .x86_64) + elf.SHT_X86_64_UNWIND + else + elf.SHT_PROGBITS, + .flags = elf.SHF_ALLOC, + .addralign = ptr_size, + .offset = std.math.maxInt(u64), + }); + }; } if (comp.link_eh_frame_hdr) { self.eh_frame_hdr_section_index = try self.addSection(.{ From f87a7251a3f9ae7259c64f458823117370071943 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Mon, 2 Sep 2024 08:42:47 +0200 Subject: [PATCH 045/202] elf: actually write allocated atoms in object files --- src/link/Elf.zig | 102 ++-------------------------------------- src/link/Elf/Object.zig | 77 ++++++++++++++++++++++++++++-- 2 files changed, 77 insertions(+), 102 deletions(-) diff --git a/src/link/Elf.zig b/src/link/Elf.zig index bbacf8737cc3..8963da645fb5 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -4037,107 +4037,19 @@ fn allocateSpecialPhdrs(self: *Elf) void { } fn writeAtoms(self: *Elf) !void { - const gpa = self.base.comp.gpa; - - var undefs = std.AutoArrayHashMap(SymbolResolver.Index, std.ArrayList(Ref)).init(gpa); - defer { - for (undefs.values()) |*refs| { - refs.deinit(); - } - undefs.deinit(); - } - - var has_reloc_errors = false; - const slice = self.sections.slice(); - for (slice.items(.shdr), slice.items(.atom_list), 0..) |shdr, atom_list, shndx| { - if (shdr.sh_type == elf.SHT_NULL) continue; - if (shdr.sh_type == elf.SHT_NOBITS) continue; - if (atom_list.items.len == 0) continue; - - log.debug("writing atoms in '{s}' section", .{self.getShString(shdr.sh_name)}); - - // TODO really, really handle debug section separately - const base_offset = if (self.zigObjectPtr()) |zo| base_offset: { - for ([_]?Symbol.Index{ - zo.text_index, - zo.rodata_index, - zo.data_relro_index, - zo.data_index, - zo.tdata_index, - zo.eh_frame_index, - zo.debug_info_index, - zo.debug_abbrev_index, - zo.debug_aranges_index, - zo.debug_str_index, - zo.debug_line_index, - zo.debug_line_str_index, - zo.debug_loclists_index, - zo.debug_rnglists_index, - }) |maybe_sym_index| { - const sym_index = maybe_sym_index orelse continue; - const sym = zo.symbol(sym_index); - const atom_ptr = sym.atom(self).?; - if (atom_ptr.output_section_index == shndx) break :base_offset atom_ptr.size; - } - break :base_offset 0; - } else 0; - const sh_offset = shdr.sh_offset + base_offset; - const sh_size = math.cast(usize, shdr.sh_size - base_offset) orelse return error.Overflow; - - const buffer = try gpa.alloc(u8, sh_size); - defer gpa.free(buffer); - const padding_byte: u8 = if (shdr.sh_type == elf.SHT_PROGBITS and - shdr.sh_flags & elf.SHF_EXECINSTR != 0 and self.getTarget().cpu.arch == .x86_64) - 0xcc // int3 - else - 0; - @memset(buffer, padding_byte); - - for (atom_list.items) |ref| { - const atom_ptr = self.atom(ref).?; - assert(atom_ptr.alive); - - const offset = math.cast(usize, atom_ptr.value - @as(i64, @intCast(base_offset))) orelse - return error.Overflow; - const size = math.cast(usize, atom_ptr.size) orelse return error.Overflow; - - log.debug("writing atom({}) at 0x{x}", .{ ref, sh_offset + offset }); - - // TODO decompress directly into provided buffer - const out_code = buffer[offset..][0..size]; - const in_code = switch (atom_ptr.file(self).?) { - .object => |x| try x.codeDecompressAlloc(self, ref.index), - .zig_object => |x| try x.codeAlloc(self, ref.index), - else => unreachable, - }; - defer gpa.free(in_code); - @memcpy(out_code, in_code); - - const res = if (shdr.sh_flags & elf.SHF_ALLOC == 0) - atom_ptr.resolveRelocsNonAlloc(self, out_code, &undefs) - else - atom_ptr.resolveRelocsAlloc(self, out_code); - _ = res catch |err| switch (err) { - error.UnsupportedCpuArch => { - try self.reportUnsupportedCpuArch(); - return error.FlushFailure; - }, - error.RelocFailure, error.RelaxFailure => has_reloc_errors = true, - else => |e| return e, - }; - } - - try self.base.file.?.pwriteAll(buffer, sh_offset); + for (self.objects.items) |index| { + try self.file(index).?.object.writeAtoms(self); } if (self.requiresThunks()) { + const gpa = self.base.comp.gpa; var buffer = std.ArrayList(u8).init(gpa); defer buffer.deinit(); for (self.thunks.items) |th| { const thunk_size = th.size(self); try buffer.ensureUnusedCapacity(thunk_size); - const shdr = slice.items(.shdr)[th.output_section_index]; + const shdr = self.sections.items(.shdr)[th.output_section_index]; const offset = @as(u64, @intCast(th.value)) + shdr.sh_offset; try th.write(self, buffer.writer()); assert(buffer.items.len == thunk_size); @@ -4145,10 +4057,6 @@ fn writeAtoms(self: *Elf) !void { buffer.clearRetainingCapacity(); } } - - try self.reportUndefinedSymbols(&undefs); - - if (has_reloc_errors) return error.FlushFailure; } pub fn updateSymtabSize(self: *Elf) !void { @@ -5089,7 +4997,7 @@ pub fn insertDynString(self: *Elf, name: []const u8) error{OutOfMemory}!u32 { return off; } -fn reportUndefinedSymbols(self: *Elf, undefs: anytype) !void { +pub fn reportUndefinedSymbols(self: *Elf, undefs: anytype) !void { const gpa = self.base.comp.gpa; const max_notes = 4; diff --git a/src/link/Elf/Object.zig b/src/link/Elf/Object.zig index 26c2f2585b0e..590f7b6762a6 100644 --- a/src/link/Elf/Object.zig +++ b/src/link/Elf/Object.zig @@ -978,6 +978,68 @@ pub fn allocateAtoms(self: *Object, elf_file: *Elf) !void { } } +pub fn writeAtoms(self: *Object, elf_file: *Elf) !void { + const gpa = elf_file.base.comp.gpa; + + var undefs = std.AutoArrayHashMap(Elf.SymbolResolver.Index, std.ArrayList(Elf.Ref)).init(gpa); + defer { + for (undefs.values()) |*refs| { + refs.deinit(); + } + undefs.deinit(); + } + + var buffer = std.ArrayList(u8).init(gpa); + defer buffer.deinit(); + + log.debug("writing atoms in {}", .{self.fmtPath()}); + + var has_reloc_errors = false; + for (self.section_chunks.items) |chunk| { + const osec = elf_file.sections.items(.shdr)[chunk.output_section_index]; + if (osec.sh_type == elf.SHT_NOBITS) continue; + + log.debug(" in section '{s}'", .{elf_file.getShString(osec.sh_name)}); + + try buffer.ensureUnusedCapacity(chunk.size); + buffer.appendNTimesAssumeCapacity(0, chunk.size); + + for (chunk.atoms.items) |atom_index| { + const atom_ptr = self.atom(atom_index).?; + assert(atom_ptr.alive); + + const offset = math.cast(usize, atom_ptr.value) orelse return error.Overflow; + const size = math.cast(usize, atom_ptr.size) orelse return error.Overflow; + + log.debug(" * atom({d}) at 0x{x}", .{ atom_index, chunk.offset(elf_file) + offset }); + + const code = try self.codeDecompressAlloc(elf_file, atom_index); + defer gpa.free(code); + const out_code = buffer.items[offset..][0..size]; + @memcpy(out_code, code); + + const res = if (osec.sh_flags & elf.SHF_ALLOC == 0) + atom_ptr.resolveRelocsNonAlloc(elf_file, out_code, &undefs) + else + atom_ptr.resolveRelocsAlloc(elf_file, out_code); + _ = res catch |err| switch (err) { + error.UnsupportedCpuArch => { + try elf_file.reportUnsupportedCpuArch(); + return error.FlushFailure; + }, + error.RelocFailure, error.RelaxFailure => has_reloc_errors = true, + else => |e| return e, + }; + } + + try elf_file.base.file.?.pwriteAll(buffer.items, chunk.offset(elf_file)); + buffer.clearRetainingCapacity(); + } + + try elf_file.reportUndefinedSymbols(&undefs); + if (has_reloc_errors) return error.FlushFailure; +} + pub fn initRelaSections(self: *Object, elf_file: *Elf) !void { for (self.atoms_indexes.items) |atom_index| { const atom_ptr = self.atom(atom_index) orelse continue; @@ -1544,12 +1606,12 @@ fn formatComdatGroups( } } -pub fn fmtPath(self: *Object) std.fmt.Formatter(formatPath) { +pub fn fmtPath(self: Object) std.fmt.Formatter(formatPath) { return .{ .data = self }; } fn formatPath( - object: *Object, + object: Object, comptime unused_fmt_string: []const u8, options: std.fmt.FormatOptions, writer: anytype, @@ -1586,13 +1648,18 @@ const SectionChunk = struct { return @as(i64, @intCast(shdr.sh_addr)) + chunk.value; } + fn offset(chunk: SectionChunk, elf_file: *Elf) u64 { + const shdr = elf_file.sections.items(.shdr)[chunk.output_section_index]; + return shdr.sh_offset + @as(u64, @intCast(chunk.value)); + } + fn updateSize(chunk: *SectionChunk, object: *Object) void { for (chunk.atoms.items) |atom_index| { const atom_ptr = object.atom(atom_index).?; assert(atom_ptr.alive); - const offset = atom_ptr.alignment.forward(chunk.size); - const padding = offset - chunk.size; - atom_ptr.value = @intCast(offset); + const off = atom_ptr.alignment.forward(chunk.size); + const padding = off - chunk.size; + atom_ptr.value = @intCast(off); chunk.size += padding + atom_ptr.size; chunk.alignment = chunk.alignment.max(atom_ptr.alignment); } From 6a50a0f0ed3902b46afeda9c4a5b7f10cec60dba Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Mon, 2 Sep 2024 08:58:49 +0200 Subject: [PATCH 046/202] elf: update osec index for section chunks in objects --- src/link/Elf.zig | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 8963da645fb5..752d0082c87c 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -3411,6 +3411,7 @@ fn shdrRank(self: *Elf, shndx: u32) u8 { return 0xf9; } }, + elf.SHT_X86_64_UNWIND => return 0xf0, elf.SHT_NOBITS => return if (flags & elf.SHF_TLS != 0) 0xf5 else 0xf7, elf.SHT_SYMTAB => return 0xfa, @@ -3503,6 +3504,12 @@ fn resetShdrIndexes(self: *Elf, backlinks: []const u32) void { } } + for (self.objects.items) |index| { + for (self.file(index).?.object.section_chunks.items) |*chunk| { + chunk.output_section_index = backlinks[chunk.output_section_index]; + } + } + for (self.comdat_group_sections.items) |*cg| { cg.shndx = backlinks[cg.shndx]; } From 6b53dc946117110907867ee67425585ae3729750 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Mon, 2 Sep 2024 09:08:37 +0200 Subject: [PATCH 047/202] elf: actually allocate atoms within each section chunk --- src/link/Elf.zig | 6 +++++- src/link/Elf/Object.zig | 8 +++++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 752d0082c87c..b6efa6624741 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -3505,8 +3505,12 @@ fn resetShdrIndexes(self: *Elf, backlinks: []const u32) void { } for (self.objects.items) |index| { - for (self.file(index).?.object.section_chunks.items) |*chunk| { + const object = self.file(index).?.object; + for (object.section_chunks.items) |*chunk| { chunk.output_section_index = backlinks[chunk.output_section_index]; + for (chunk.atoms.items) |atom_index| { + object.atom(atom_index).?.output_section_index = chunk.output_section_index; + } } } diff --git a/src/link/Elf/Object.zig b/src/link/Elf/Object.zig index 590f7b6762a6..dd58727202e8 100644 --- a/src/link/Elf/Object.zig +++ b/src/link/Elf/Object.zig @@ -975,6 +975,12 @@ pub fn allocateAtoms(self: *Object, elf_file: *Elf) !void { shdr.sh_addralign = @max(shdr.sh_addralign, chunk.alignment.toByteUnits().?); // TODO create back and forward links + // TODO if we had a link from Atom to parent Chunk we would not need to update Atom's value or osec index + for (chunk.atoms.items) |atom_index| { + const atom_ptr = self.atom(atom_index).?; + atom_ptr.output_section_index = chunk.output_section_index; + atom_ptr.value += chunk.value; + } } } @@ -1008,7 +1014,7 @@ pub fn writeAtoms(self: *Object, elf_file: *Elf) !void { const atom_ptr = self.atom(atom_index).?; assert(atom_ptr.alive); - const offset = math.cast(usize, atom_ptr.value) orelse return error.Overflow; + const offset = math.cast(usize, atom_ptr.value - chunk.value) orelse return error.Overflow; const size = math.cast(usize, atom_ptr.size) orelse return error.Overflow; log.debug(" * atom({d}) at 0x{x}", .{ atom_index, chunk.offset(elf_file) + offset }); From 5cb51c10debd3e9542e35e22648bca2a8b59259e Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Mon, 2 Sep 2024 11:42:47 +0200 Subject: [PATCH 048/202] elf: fix relocatable mode --- src/link/Elf.zig | 32 +++------- src/link/Elf/Object.zig | 39 ++++++++++++- src/link/Elf/ZigObject.zig | 2 +- src/link/Elf/eh_frame.zig | 2 +- src/link/Elf/relocatable.zig | 109 ++++++----------------------------- 5 files changed, 67 insertions(+), 117 deletions(-) diff --git a/src/link/Elf.zig b/src/link/Elf.zig index b6efa6624741..cdc43b2d1a13 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -3577,29 +3577,15 @@ fn updateSectionSizes(self: *Elf) !void { } const slice = self.sections.slice(); - // for (slice.items(.shdr), slice.items(.atom_list)) |*shdr, atom_list| { - // if (atom_list.items.len == 0) continue; - // if (self.requiresThunks() and shdr.sh_flags & elf.SHF_EXECINSTR != 0) continue; - // for (atom_list.items) |ref| { - // const atom_ptr = self.atom(ref) orelse continue; - // if (!atom_ptr.alive) continue; - // const offset = atom_ptr.alignment.forward(shdr.sh_size); - // const padding = offset - shdr.sh_size; - // atom_ptr.value = @intCast(offset); - // shdr.sh_size += padding + atom_ptr.size; - // shdr.sh_addralign = @max(shdr.sh_addralign, atom_ptr.alignment.toByteUnits() orelse 1); - // } - // } - - // if (self.requiresThunks()) { - // for (slice.items(.shdr), slice.items(.atom_list), 0..) |*shdr, atom_list, shndx| { - // if (shdr.sh_flags & elf.SHF_EXECINSTR == 0) continue; - // if (atom_list.items.len == 0) continue; - - // // Create jump/branch range extenders if needed. - // try self.createThunks(shdr, @intCast(shndx)); - // } - // } + if (self.requiresThunks()) { + for (slice.items(.shdr), slice.items(.atom_list), 0..) |*shdr, atom_list, shndx| { + if (shdr.sh_flags & elf.SHF_EXECINSTR == 0) continue; + if (atom_list.items.len == 0) continue; + + // Create jump/branch range extenders if needed. + try self.createThunks(shdr, @intCast(shndx)); + } + } const shdrs = slice.items(.shdr); if (self.eh_frame_section_index) |index| { diff --git a/src/link/Elf/Object.zig b/src/link/Elf/Object.zig index dd58727202e8..92c8c1e23fff 100644 --- a/src/link/Elf/Object.zig +++ b/src/link/Elf/Object.zig @@ -575,7 +575,7 @@ pub fn claimUnresolved(self: *Object, elf_file: *Elf) void { } } -pub fn claimUnresolvedObject(self: *Object, elf_file: *Elf) void { +pub fn claimUnresolvedRelocatable(self: *Object, elf_file: *Elf) void { const first_global = self.first_global orelse return; for (self.globals(), 0..) |*sym, i| { const esym_index = @as(u32, @intCast(first_global + i)); @@ -1046,6 +1046,43 @@ pub fn writeAtoms(self: *Object, elf_file: *Elf) !void { if (has_reloc_errors) return error.FlushFailure; } +pub fn writeAtomsRelocatable(self: *Object, elf_file: *Elf) !void { + const gpa = elf_file.base.comp.gpa; + + var buffer = std.ArrayList(u8).init(gpa); + defer buffer.deinit(); + + log.debug("writing atoms in {}", .{self.fmtPath()}); + + for (self.section_chunks.items) |chunk| { + const osec = elf_file.sections.items(.shdr)[chunk.output_section_index]; + if (osec.sh_type == elf.SHT_NOBITS) continue; + + log.debug(" in section '{s}'", .{elf_file.getShString(osec.sh_name)}); + + try buffer.ensureUnusedCapacity(chunk.size); + buffer.appendNTimesAssumeCapacity(0, chunk.size); + + for (chunk.atoms.items) |atom_index| { + const atom_ptr = self.atom(atom_index).?; + assert(atom_ptr.alive); + + const offset = math.cast(usize, atom_ptr.value - chunk.value) orelse return error.Overflow; + const size = math.cast(usize, atom_ptr.size) orelse return error.Overflow; + + log.debug(" * atom({d}) at 0x{x}", .{ atom_index, chunk.offset(elf_file) + offset }); + + const code = try self.codeDecompressAlloc(elf_file, atom_index); + defer gpa.free(code); + const out_code = buffer.items[offset..][0..size]; + @memcpy(out_code, code); + } + + try elf_file.base.file.?.pwriteAll(buffer.items, chunk.offset(elf_file)); + buffer.clearRetainingCapacity(); + } +} + pub fn initRelaSections(self: *Object, elf_file: *Elf) !void { for (self.atoms_indexes.items) |atom_index| { const atom_ptr = self.atom(atom_index) orelse continue; diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig index 2d20895d2e33..b62c0beae4ec 100644 --- a/src/link/Elf/ZigObject.zig +++ b/src/link/Elf/ZigObject.zig @@ -648,7 +648,7 @@ pub fn claimUnresolved(self: *ZigObject, elf_file: *Elf) void { } } -pub fn claimUnresolvedObject(self: ZigObject, elf_file: *Elf) void { +pub fn claimUnresolvedRelocatable(self: ZigObject, elf_file: *Elf) void { for (self.global_symbols.items, 0..) |index, i| { const global = &self.symbols.items[index]; const esym = self.symtab.items(.elf_sym)[index]; diff --git a/src/link/Elf/eh_frame.zig b/src/link/Elf/eh_frame.zig index d6604860104b..8431a9e572d9 100644 --- a/src/link/Elf/eh_frame.zig +++ b/src/link/Elf/eh_frame.zig @@ -386,7 +386,7 @@ pub fn writeEhFrame(elf_file: *Elf, writer: anytype) !void { if (has_reloc_errors) return error.RelocFailure; } -pub fn writeEhFrameObject(elf_file: *Elf, writer: anytype) !void { +pub fn writeEhFrameRelocatable(elf_file: *Elf, writer: anytype) !void { for (elf_file.objects.items) |index| { const object = elf_file.file(index).?.object; diff --git a/src/link/Elf/relocatable.zig b/src/link/Elf/relocatable.zig index 71d28abb32d8..0ff2943ffa29 100644 --- a/src/link/Elf/relocatable.zig +++ b/src/link/Elf/relocatable.zig @@ -40,7 +40,7 @@ pub fn flushStaticLib(elf_file: *Elf, comp: *Compilation, module_obj_path: ?[]co try zig_object.resolveSymbols(elf_file); try elf_file.addCommentString(); try elf_file.finalizeMergeSections(); - zig_object.claimUnresolvedObject(elf_file); + zig_object.claimUnresolvedRelocatable(elf_file); for (elf_file.merge_sections.items) |*msec| { if (msec.finalized_subsections.items.len == 0) continue; @@ -280,10 +280,10 @@ fn parseArchive(elf_file: *Elf, path: []const u8) Elf.ParseError!void { fn claimUnresolved(elf_file: *Elf) void { if (elf_file.zigObjectPtr()) |zig_object| { - zig_object.claimUnresolvedObject(elf_file); + zig_object.claimUnresolvedRelocatable(elf_file); } for (elf_file.objects.items) |index| { - elf_file.file(index).?.object.claimUnresolvedObject(elf_file); + elf_file.file(index).?.object.claimUnresolvedRelocatable(elf_file); } } @@ -349,29 +349,22 @@ fn initComdatGroups(elf_file: *Elf) !void { } fn updateSectionSizes(elf_file: *Elf) !void { + for (elf_file.objects.items) |index| { + try elf_file.file(index).?.object.allocateAtoms(elf_file); + } + const slice = elf_file.sections.slice(); for (slice.items(.shdr), 0..) |*shdr, shndx| { const atom_list = slice.items(.atom_list)[shndx]; - if (shdr.sh_type != elf.SHT_RELA) { - for (atom_list.items) |ref| { - const atom_ptr = elf_file.atom(ref) orelse continue; - if (!atom_ptr.alive) continue; - const offset = atom_ptr.alignment.forward(shdr.sh_size); - const padding = offset - shdr.sh_size; - atom_ptr.value = @intCast(offset); - shdr.sh_size += padding + atom_ptr.size; - shdr.sh_addralign = @max(shdr.sh_addralign, atom_ptr.alignment.toByteUnits() orelse 1); - } - } else { - for (atom_list.items) |ref| { - const atom_ptr = elf_file.atom(ref) orelse continue; - if (!atom_ptr.alive) continue; - const relocs = atom_ptr.relocs(elf_file); - shdr.sh_size += shdr.sh_entsize * relocs.len; - } - - if (shdr.sh_size == 0) shdr.sh_offset = 0; + if (shdr.sh_type != elf.SHT_RELA) continue; + for (atom_list.items) |ref| { + const atom_ptr = elf_file.atom(ref) orelse continue; + if (!atom_ptr.alive) continue; + const relocs = atom_ptr.relocs(elf_file); + shdr.sh_size += shdr.sh_entsize * relocs.len; } + + if (shdr.sh_size == 0) shdr.sh_offset = 0; } if (elf_file.eh_frame_section_index) |index| { @@ -422,74 +415,8 @@ fn allocateAllocSections(elf_file: *Elf) !void { } fn writeAtoms(elf_file: *Elf) !void { - const gpa = elf_file.base.comp.gpa; - const slice = elf_file.sections.slice(); - - // TODO iterate over `output_sections` directly - for (slice.items(.shdr), slice.items(.atom_list), 0..) |shdr, atom_list, shndx| { - if (shdr.sh_type == elf.SHT_NULL) continue; - if (shdr.sh_type == elf.SHT_NOBITS) continue; - if (shdr.sh_type == elf.SHT_RELA) continue; - if (atom_list.items.len == 0) continue; - - log.debug("writing atoms in '{s}' section", .{elf_file.getShString(shdr.sh_name)}); - - // TODO really, really handle debug section separately - const base_offset = if (elf_file.zigObjectPtr()) |zo| blk: { - break :blk for ([_]?Symbol.Index{ - zo.debug_info_index, - zo.debug_abbrev_index, - zo.debug_aranges_index, - zo.debug_str_index, - zo.debug_line_index, - zo.debug_line_str_index, - zo.debug_loclists_index, - zo.debug_rnglists_index, - }) |maybe_sym_index| { - const sym_index = maybe_sym_index orelse continue; - const sym = zo.symbol(sym_index); - const atom_ptr = sym.atom(elf_file).?; - if (atom_ptr.output_section_index == shndx) break atom_ptr.size; - } else 0; - } else 0; - const sh_offset = shdr.sh_offset + base_offset; - const sh_size = math.cast(usize, shdr.sh_size - base_offset) orelse return error.Overflow; - - const buffer = try gpa.alloc(u8, sh_size); - defer gpa.free(buffer); - const padding_byte: u8 = if (shdr.sh_type == elf.SHT_PROGBITS and - shdr.sh_flags & elf.SHF_EXECINSTR != 0) - 0xcc // int3 - else - 0; - @memset(buffer, padding_byte); - - for (atom_list.items) |ref| { - const atom_ptr = elf_file.atom(ref).?; - assert(atom_ptr.alive); - - const offset = math.cast(usize, atom_ptr.value - @as(i64, @intCast(shdr.sh_addr - base_offset))) orelse - return error.Overflow; - const size = math.cast(usize, atom_ptr.size) orelse return error.Overflow; - - log.debug("writing atom({}) from 0x{x} to 0x{x}", .{ - ref, - sh_offset + offset, - sh_offset + offset + size, - }); - - // TODO decompress directly into provided buffer - const out_code = buffer[offset..][0..size]; - const in_code = switch (atom_ptr.file(elf_file).?) { - .object => |x| try x.codeDecompressAlloc(elf_file, ref.index), - .zig_object => |x| try x.codeAlloc(elf_file, ref.index), - else => unreachable, - }; - defer gpa.free(in_code); - @memcpy(out_code, in_code); - } - - try elf_file.base.file.?.pwriteAll(buffer, sh_offset); + for (elf_file.objects.items) |index| { + try elf_file.file(index).?.object.writeAtomsRelocatable(elf_file); } } @@ -541,7 +468,7 @@ fn writeSyntheticSections(elf_file: *Elf) !void { const sh_size = math.cast(usize, shdr.sh_size) orelse return error.Overflow; var buffer = try std.ArrayList(u8).initCapacity(gpa, @intCast(sh_size - existing_size)); defer buffer.deinit(); - try eh_frame.writeEhFrameObject(elf_file, buffer.writer()); + try eh_frame.writeEhFrameRelocatable(elf_file, buffer.writer()); log.debug("writing .eh_frame from 0x{x} to 0x{x}", .{ shdr.sh_offset + existing_size, shdr.sh_offset + sh_size, From 2ef3e30e2d04f16510d0953e6d266a97bcb4eb49 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Tue, 3 Sep 2024 13:28:01 +0200 Subject: [PATCH 049/202] elf: emit relocs for self-hosted generated .eh_frame section --- src/link/Elf.zig | 24 +++++++++++++----------- src/link/Elf/Object.zig | 28 +++++++++++++++++++--------- src/link/Elf/ZigObject.zig | 1 + src/link/Elf/eh_frame.zig | 30 ++++++++++++++++++++++++++---- src/link/Elf/relocatable.zig | 3 ++- 5 files changed, 61 insertions(+), 25 deletions(-) diff --git a/src/link/Elf.zig b/src/link/Elf.zig index cdc43b2d1a13..d660c48b4bbf 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -3388,34 +3388,36 @@ fn shdrRank(self: *Elf, shndx: u32) u8 { elf.SHT_PREINIT_ARRAY, elf.SHT_INIT_ARRAY, elf.SHT_FINI_ARRAY, - => return 0xf2, + => return 0xf1, - elf.SHT_DYNAMIC => return 0xf3, + elf.SHT_DYNAMIC => return 0xf2, elf.SHT_RELA, elf.SHT_GROUP => return 0xf, elf.SHT_PROGBITS => if (flags & elf.SHF_ALLOC != 0) { if (flags & elf.SHF_EXECINSTR != 0) { - return 0xf1; + return 0xf0; } else if (flags & elf.SHF_WRITE != 0) { - return if (flags & elf.SHF_TLS != 0) 0xf4 else 0xf6; + return if (flags & elf.SHF_TLS != 0) 0xf3 else 0xf5; } else if (mem.eql(u8, name, ".interp")) { return 1; + } else if (mem.startsWith(u8, name, ".eh_frame")) { + return 0xe1; } else { - return 0xf0; + return 0xe0; } } else { if (mem.startsWith(u8, name, ".debug")) { - return 0xf8; + return 0xf7; } else { - return 0xf9; + return 0xf8; } }, - elf.SHT_X86_64_UNWIND => return 0xf0, + elf.SHT_X86_64_UNWIND => return 0xe1, - elf.SHT_NOBITS => return if (flags & elf.SHF_TLS != 0) 0xf5 else 0xf7, - elf.SHT_SYMTAB => return 0xfa, - elf.SHT_STRTAB => return if (mem.eql(u8, name, ".dynstr")) 0x4 else 0xfb, + elf.SHT_NOBITS => return if (flags & elf.SHF_TLS != 0) 0xf4 else 0xf6, + elf.SHT_SYMTAB => return 0xf9, + elf.SHT_STRTAB => return if (mem.eql(u8, name, ".dynstr")) 0x4 else 0xfa, else => return 0xff, } } diff --git a/src/link/Elf/Object.zig b/src/link/Elf/Object.zig index 92c8c1e23fff..a72ac697ddc8 100644 --- a/src/link/Elf/Object.zig +++ b/src/link/Elf/Object.zig @@ -391,15 +391,24 @@ fn parseEhFrame(self: *Object, allocator: Allocator, handle: std.fs.File, shndx: .input_section_index = shndx, .file_index = self.index, }), - .fde => try self.fdes.append(allocator, .{ - .offset = data_start + rec.offset, - .size = rec.size, - .cie_index = undefined, - .rel_index = rel_start + @as(u32, @intCast(rel_range.start)), - .rel_num = @as(u32, @intCast(rel_range.len)), - .input_section_index = shndx, - .file_index = self.index, - }), + .fde => { + if (rel_range.len == 0) { + // No relocs for an FDE means we cannot associate this FDE to an Atom + // so we skip it. According to mold source code + // (https://github.com/rui314/mold/blob/a3e69502b0eaf1126d6093e8ea5e6fdb95219811/src/input-files.cc#L525-L528) + // this can happen for object files built with -r flag by the linker. + continue; + } + try self.fdes.append(allocator, .{ + .offset = data_start + rec.offset, + .size = rec.size, + .cie_index = undefined, + .rel_index = rel_start + @as(u32, @intCast(rel_range.start)), + .rel_num = @as(u32, @intCast(rel_range.len)), + .input_section_index = shndx, + .file_index = self.index, + }); + }, } } @@ -1106,6 +1115,7 @@ pub fn addAtomsToRelaSections(self: *Object, elf_file: *Elf) !void { for (self.atoms_indexes.items) |atom_index| { const atom_ptr = self.atom(atom_index) orelse continue; if (!atom_ptr.alive) continue; + if (atom_ptr.output_section_index == elf_file.eh_frame_section_index) continue; const shndx = blk: { const shndx = atom_ptr.relocsShndx() orelse continue; const shdr = self.shdrs.items[shndx]; diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig index b62c0beae4ec..6d2325fb7e0f 100644 --- a/src/link/Elf/ZigObject.zig +++ b/src/link/Elf/ZigObject.zig @@ -808,6 +808,7 @@ pub fn addAtomsToRelaSections(self: *ZigObject, elf_file: *Elf) !void { for (self.atoms_indexes.items) |atom_index| { const atom_ptr = self.atom(atom_index) orelse continue; if (!atom_ptr.alive) continue; + if (atom_ptr.output_section_index == elf_file.eh_frame_section_index) continue; const rela_shndx = atom_ptr.relocsShndx() orelse continue; // TODO this check will become obsolete when we rework our relocs mechanism at the ZigObject level if (self.relocs.items[rela_shndx].items.len == 0) continue; diff --git a/src/link/Elf/eh_frame.zig b/src/link/Elf/eh_frame.zig index 8431a9e572d9..54024fa1bb49 100644 --- a/src/link/Elf/eh_frame.zig +++ b/src/link/Elf/eh_frame.zig @@ -288,6 +288,13 @@ pub fn calcEhFrameHdrSize(elf_file: *Elf) usize { pub fn calcEhFrameRelocs(elf_file: *Elf) usize { var count: usize = 0; + if (elf_file.zigObjectPtr()) |zo| zo: { + const sym_index = zo.eh_frame_index orelse break :zo; + const sym = zo.symbol(sym_index); + const atom_ptr = zo.atom(sym.ref.index).?; + if (!atom_ptr.alive) break :zo; + count += atom_ptr.relocs(elf_file).len; + } for (elf_file.objects.items) |index| { const object = elf_file.file(index).?.object; for (object.cies.items) |cie| { @@ -416,9 +423,9 @@ pub fn writeEhFrameRelocatable(elf_file: *Elf, writer: anytype) !void { } } -fn emitReloc(elf_file: *Elf, rec: anytype, sym: *const Symbol, rel: elf.Elf64_Rela) elf.Elf64_Rela { +fn emitReloc(elf_file: *Elf, base_offset: u64, sym: *const Symbol, rel: elf.Elf64_Rela) elf.Elf64_Rela { const cpu_arch = elf_file.getTarget().cpu.arch; - const r_offset = rec.address(elf_file) + rel.r_offset - rec.offset; + const r_offset = base_offset + rel.r_offset; const r_type = rel.r_type(); var r_addend = rel.r_addend; var r_sym: u32 = 0; @@ -452,6 +459,19 @@ pub fn writeEhFrameRelocs(elf_file: *Elf, writer: anytype) !void { elf_file.sections.items(.shdr)[elf_file.eh_frame_section_index.?].sh_addr, }); + if (elf_file.zigObjectPtr()) |zo| zo: { + const sym_index = zo.eh_frame_index orelse break :zo; + const sym = zo.symbol(sym_index); + const atom_ptr = zo.atom(sym.ref.index).?; + if (!atom_ptr.alive) break :zo; + for (atom_ptr.relocs(elf_file)) |rel| { + const ref = zo.resolveSymbol(rel.r_sym(), elf_file); + const target = elf_file.symbol(ref).?; + const out_rel = emitReloc(elf_file, 0, target, rel); + try writer.writeStruct(out_rel); + } + } + for (elf_file.objects.items) |index| { const object = elf_file.file(index).?.object; @@ -460,7 +480,8 @@ pub fn writeEhFrameRelocs(elf_file: *Elf, writer: anytype) !void { for (cie.relocs(elf_file)) |rel| { const ref = object.resolveSymbol(rel.r_sym(), elf_file); const sym = elf_file.symbol(ref).?; - const out_rel = emitReloc(elf_file, cie, sym, rel); + const offset = cie.address(elf_file) - cie.offset; + const out_rel = emitReloc(elf_file, offset, sym, rel); try writer.writeStruct(out_rel); } } @@ -470,7 +491,8 @@ pub fn writeEhFrameRelocs(elf_file: *Elf, writer: anytype) !void { for (fde.relocs(elf_file)) |rel| { const ref = object.resolveSymbol(rel.r_sym(), elf_file); const sym = elf_file.symbol(ref).?; - const out_rel = emitReloc(elf_file, fde, sym, rel); + const offset = fde.address(elf_file) - fde.offset; + const out_rel = emitReloc(elf_file, offset, sym, rel); try writer.writeStruct(out_rel); } } diff --git a/src/link/Elf/relocatable.zig b/src/link/Elf/relocatable.zig index 0ff2943ffa29..b4a7f08f6eee 100644 --- a/src/link/Elf/relocatable.zig +++ b/src/link/Elf/relocatable.zig @@ -424,8 +424,9 @@ fn writeSyntheticSections(elf_file: *Elf) !void { const gpa = elf_file.base.comp.gpa; const slice = elf_file.sections.slice(); - for (slice.items(.shdr), slice.items(.atom_list)) |shdr, atom_list| { + for (slice.items(.shdr), slice.items(.atom_list), 0..) |shdr, atom_list, shndx| { if (shdr.sh_type != elf.SHT_RELA) continue; + if (@as(u32, @intCast(shndx)) == elf_file.eh_frame_rela_section_index) continue; if (atom_list.items.len == 0) continue; const num_relocs = math.cast(usize, @divExact(shdr.sh_size, shdr.sh_entsize)) orelse From 88e0d49febf5839654e4a5f3dc02ad6b2a82e242 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Tue, 3 Sep 2024 13:49:14 +0200 Subject: [PATCH 050/202] elf: init rela sections in a separate pass for ZigObject --- src/link/Elf/ZigObject.zig | 26 ++++++++++++++++++++++---- src/link/Elf/relocatable.zig | 3 +++ 2 files changed, 25 insertions(+), 4 deletions(-) diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig index 6d2325fb7e0f..8eef1a84e406 100644 --- a/src/link/Elf/ZigObject.zig +++ b/src/link/Elf/ZigObject.zig @@ -803,7 +803,7 @@ pub fn writeAr(self: ZigObject, writer: anytype) !void { try writer.writeAll(self.data.items); } -pub fn addAtomsToRelaSections(self: *ZigObject, elf_file: *Elf) !void { +pub fn initRelaSections(self: *ZigObject, elf_file: *Elf) !void { const gpa = elf_file.base.comp.gpa; for (self.atoms_indexes.items) |atom_index| { const atom_ptr = self.atom(atom_index) orelse continue; @@ -819,10 +819,28 @@ pub fn addAtomsToRelaSections(self: *ZigObject, elf_file: *Elf) !void { elf_file.getShString(out_shdr.sh_name), }); defer gpa.free(rela_sect_name); - const out_rela_shndx = if (elf_file.sectionByName(rela_sect_name)) |out_rela_shndx| - out_rela_shndx - else + _ = elf_file.sectionByName(rela_sect_name) orelse try elf_file.addRelaShdr(try elf_file.insertShString(rela_sect_name), out_shndx); + } +} + +pub fn addAtomsToRelaSections(self: *ZigObject, elf_file: *Elf) !void { + const gpa = elf_file.base.comp.gpa; + for (self.atoms_indexes.items) |atom_index| { + const atom_ptr = self.atom(atom_index) orelse continue; + if (!atom_ptr.alive) continue; + if (atom_ptr.output_section_index == elf_file.eh_frame_section_index) continue; + const rela_shndx = atom_ptr.relocsShndx() orelse continue; + // TODO this check will become obsolete when we rework our relocs mechanism at the ZigObject level + if (self.relocs.items[rela_shndx].items.len == 0) continue; + const out_shndx = atom_ptr.output_section_index; + const out_shdr = elf_file.sections.items(.shdr)[out_shndx]; + if (out_shdr.sh_type == elf.SHT_NOBITS) continue; + const rela_sect_name = try std.fmt.allocPrintZ(gpa, ".rela{s}", .{ + elf_file.getShString(out_shdr.sh_name), + }); + defer gpa.free(rela_sect_name); + const out_rela_shndx = elf_file.sectionByName(rela_sect_name).?; const out_rela_shdr = &elf_file.sections.items(.shdr)[out_rela_shndx]; out_rela_shdr.sh_info = out_shndx; out_rela_shdr.sh_link = elf_file.symtab_section_index.?; diff --git a/src/link/Elf/relocatable.zig b/src/link/Elf/relocatable.zig index b4a7f08f6eee..6499bc207b1b 100644 --- a/src/link/Elf/relocatable.zig +++ b/src/link/Elf/relocatable.zig @@ -288,6 +288,9 @@ fn claimUnresolved(elf_file: *Elf) void { } fn initSections(elf_file: *Elf) !void { + if (elf_file.zigObjectPtr()) |zo| { + try zo.initRelaSections(elf_file); + } for (elf_file.objects.items) |index| { const object = elf_file.file(index).?.object; try object.initOutputSections(elf_file); From 5cdad186fe1cb83c6d79642433bfbe330436914a Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Tue, 3 Sep 2024 14:45:59 +0200 Subject: [PATCH 051/202] elf: do not create .eh_frame section if ZigObject already did so in relocatable mode --- src/link/Elf/relocatable.zig | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/src/link/Elf/relocatable.zig b/src/link/Elf/relocatable.zig index 6499bc207b1b..ea710e7b2acb 100644 --- a/src/link/Elf/relocatable.zig +++ b/src/link/Elf/relocatable.zig @@ -302,21 +302,28 @@ fn initSections(elf_file: *Elf) !void { try msec.initOutputSection(elf_file); } - const needs_eh_frame = for (elf_file.objects.items) |index| { + const needs_eh_frame = if (elf_file.zigObjectPtr()) |zo| + zo.eh_frame_index != null + else for (elf_file.objects.items) |index| { if (elf_file.file(index).?.object.cies.items.len > 0) break true; } else false; if (needs_eh_frame) { if (elf_file.eh_frame_section_index == null) { - elf_file.eh_frame_section_index = try elf_file.addSection(.{ - .name = try elf_file.insertShString(".eh_frame"), - .type = if (elf_file.getTarget().cpu.arch == .x86_64) - elf.SHT_X86_64_UNWIND - else - elf.SHT_PROGBITS, - .flags = elf.SHF_ALLOC, - .addralign = elf_file.ptrWidthBytes(), - .offset = std.math.maxInt(u64), - }); + elf_file.eh_frame_section_index = blk: { + if (elf_file.zigObjectPtr()) |zo| { + if (zo.eh_frame_index) |idx| break :blk zo.symbol(idx).atom(elf_file).?.output_section_index; + } + break :blk try elf_file.addSection(.{ + .name = try elf_file.insertShString(".eh_frame"), + .type = if (elf_file.getTarget().cpu.arch == .x86_64) + elf.SHT_X86_64_UNWIND + else + elf.SHT_PROGBITS, + .flags = elf.SHF_ALLOC, + .addralign = elf_file.ptrWidthBytes(), + .offset = std.math.maxInt(u64), + }); + }; } elf_file.eh_frame_rela_section_index = try elf_file.addRelaShdr( try elf_file.insertShString(".rela.eh_frame"), From fca92fd7c0a2e1431338ef62440751870563a0e3 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Tue, 3 Sep 2024 15:07:30 +0200 Subject: [PATCH 052/202] elf: copy existing data when allocating other alloc sections in relocatable mode --- src/link/Elf/relocatable.zig | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/src/link/Elf/relocatable.zig b/src/link/Elf/relocatable.zig index ea710e7b2acb..e66ae99ad217 100644 --- a/src/link/Elf/relocatable.zig +++ b/src/link/Elf/relocatable.zig @@ -407,7 +407,7 @@ fn updateComdatGroupsSizes(elf_file: *Elf) void { /// Allocates alloc sections when merging relocatable objects files together. fn allocateAllocSections(elf_file: *Elf) !void { - for (elf_file.sections.items(.shdr)) |*shdr| { + for (elf_file.sections.items(.shdr), 0..) |*shdr, shndx| { if (shdr.sh_type == elf.SHT_NULL) continue; if (shdr.sh_flags & elf.SHF_ALLOC == 0) continue; if (shdr.sh_type == elf.SHT_NOBITS) { @@ -418,6 +418,34 @@ fn allocateAllocSections(elf_file: *Elf) !void { if (needed_size > elf_file.allocatedSize(shdr.sh_offset)) { shdr.sh_size = 0; const new_offset = try elf_file.findFreeSpace(needed_size, shdr.sh_addralign); + + if (elf_file.zigObjectPtr()) |zo| blk: { + const existing_size = for ([_]?Symbol.Index{ + zo.text_index, + zo.rodata_index, + zo.data_relro_index, + zo.data_index, + zo.tdata_index, + zo.eh_frame_index, + }) |maybe_sym_index| { + const sect_sym_index = maybe_sym_index orelse continue; + const sect_atom_ptr = zo.symbol(sect_sym_index).atom(elf_file).?; + if (sect_atom_ptr.output_section_index == shndx) break sect_atom_ptr.size; + } else break :blk; + log.debug("moving {s} from 0x{x} to 0x{x}", .{ + elf_file.getShString(shdr.sh_name), + shdr.sh_offset, + new_offset, + }); + const amt = try elf_file.base.file.?.copyRangeAll( + shdr.sh_offset, + elf_file.base.file.?, + new_offset, + existing_size, + ); + if (amt != existing_size) return error.InputOutput; + } + shdr.sh_offset = new_offset; shdr.sh_size = needed_size; } From eeec50d2515c5a3b65cab697ba3e28d89cc3b14c Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Tue, 3 Sep 2024 21:01:12 +0200 Subject: [PATCH 053/202] elf: misc .eh_frame management fixes --- src/link/Elf.zig | 44 +++++++++++++------------------ src/link/Elf/Object.zig | 1 + src/link/Elf/ZigObject.zig | 12 +++++++++ src/link/Elf/eh_frame.zig | 18 +++++++------ src/link/Elf/relocatable.zig | 50 +++++++++++++++++------------------- 5 files changed, 64 insertions(+), 61 deletions(-) diff --git a/src/link/Elf.zig b/src/link/Elf.zig index d660c48b4bbf..99ba66e92dac 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -621,7 +621,6 @@ pub fn growNonAllocSection( try self.base.file.?.setEndPos(shdr.sh_offset + needed_size); } shdr.sh_size = needed_size; - self.markDirty(shdr_index); } @@ -2895,28 +2894,25 @@ fn initSyntheticSections(self: *Elf) !void { const target = self.getTarget(); const ptr_size = self.ptrWidthBytes(); - const needs_eh_frame = if (self.zigObjectPtr()) |zo| - zo.eh_frame_index != null - else for (self.objects.items) |index| { - if (self.file(index).?.object.cies.items.len > 0) break true; - } else false; + const needs_eh_frame = blk: { + if (self.zigObjectPtr()) |zo| + if (zo.eh_frame_index != null) break :blk true; + break :blk for (self.objects.items) |index| { + if (self.file(index).?.object.cies.items.len > 0) break true; + } else false; + }; if (needs_eh_frame) { if (self.eh_frame_section_index == null) { - self.eh_frame_section_index = blk: { - if (self.zigObjectPtr()) |zo| { - if (zo.eh_frame_index) |idx| break :blk zo.symbol(idx).atom(self).?.output_section_index; - } - break :blk try self.addSection(.{ - .name = try self.insertShString(".eh_frame"), - .type = if (target.cpu.arch == .x86_64) - elf.SHT_X86_64_UNWIND - else - elf.SHT_PROGBITS, - .flags = elf.SHF_ALLOC, - .addralign = ptr_size, - .offset = std.math.maxInt(u64), - }); - }; + self.eh_frame_section_index = self.sectionByName(".eh_frame") orelse try self.addSection(.{ + .name = try self.insertShString(".eh_frame"), + .type = if (target.cpu.arch == .x86_64) + elf.SHT_X86_64_UNWIND + else + elf.SHT_PROGBITS, + .flags = elf.SHF_ALLOC, + .addralign = ptr_size, + .offset = std.math.maxInt(u64), + }); } if (comp.link_eh_frame_hdr) { self.eh_frame_hdr_section_index = try self.addSection(.{ @@ -3591,11 +3587,7 @@ fn updateSectionSizes(self: *Elf) !void { const shdrs = slice.items(.shdr); if (self.eh_frame_section_index) |index| { - shdrs[index].sh_size = existing_size: { - const zo = self.zigObjectPtr() orelse break :existing_size 0; - const sym = zo.symbol(zo.eh_frame_index orelse break :existing_size 0); - break :existing_size sym.atom(self).?.size; - } + try eh_frame.calcEhFrameSize(self); + shdrs[index].sh_size = try eh_frame.calcEhFrameSize(self); } if (self.eh_frame_hdr_section_index) |index| { diff --git a/src/link/Elf/Object.zig b/src/link/Elf/Object.zig index a72ac697ddc8..884826d82b29 100644 --- a/src/link/Elf/Object.zig +++ b/src/link/Elf/Object.zig @@ -1096,6 +1096,7 @@ pub fn initRelaSections(self: *Object, elf_file: *Elf) !void { for (self.atoms_indexes.items) |atom_index| { const atom_ptr = self.atom(atom_index) orelse continue; if (!atom_ptr.alive) continue; + if (atom_ptr.output_section_index == elf_file.eh_frame_section_index) continue; const shndx = atom_ptr.relocsShndx() orelse continue; const shdr = self.shdrs.items[shndx]; const out_shndx = try elf_file.initOutputSection(.{ diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig index 8eef1a84e406..416ed0612614 100644 --- a/src/link/Elf/ZigObject.zig +++ b/src/link/Elf/ZigObject.zig @@ -111,6 +111,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { }); self.debug_str_section_dirty = true; self.debug_str_index = try self.addSectionSymbol(gpa, ".debug_str", .@"1", osec); + elf_file.sections.items(.last_atom)[osec] = self.symbol(self.debug_str_index.?).ref; } if (self.debug_info_index == null) { @@ -121,6 +122,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { }); self.debug_info_section_dirty = true; self.debug_info_index = try self.addSectionSymbol(gpa, ".debug_info", .@"1", osec); + elf_file.sections.items(.last_atom)[osec] = self.symbol(self.debug_info_index.?).ref; } if (self.debug_abbrev_index == null) { @@ -131,6 +133,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { }); self.debug_abbrev_section_dirty = true; self.debug_abbrev_index = try self.addSectionSymbol(gpa, ".debug_abbrev", .@"1", osec); + elf_file.sections.items(.last_atom)[osec] = self.symbol(self.debug_abbrev_index.?).ref; } if (self.debug_aranges_index == null) { @@ -141,6 +144,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { }); self.debug_aranges_section_dirty = true; self.debug_aranges_index = try self.addSectionSymbol(gpa, ".debug_aranges", .@"16", osec); + elf_file.sections.items(.last_atom)[osec] = self.symbol(self.debug_aranges_index.?).ref; } if (self.debug_line_index == null) { @@ -151,6 +155,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { }); self.debug_line_section_dirty = true; self.debug_line_index = try self.addSectionSymbol(gpa, ".debug_line", .@"1", osec); + elf_file.sections.items(.last_atom)[osec] = self.symbol(self.debug_line_index.?).ref; } if (self.debug_line_str_index == null) { @@ -163,6 +168,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { }); self.debug_line_str_section_dirty = true; self.debug_line_str_index = try self.addSectionSymbol(gpa, ".debug_line_str", .@"1", osec); + elf_file.sections.items(.last_atom)[osec] = self.symbol(self.debug_line_str_index.?).ref; } if (self.debug_loclists_index == null) { @@ -173,6 +179,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { }); self.debug_loclists_section_dirty = true; self.debug_loclists_index = try self.addSectionSymbol(gpa, ".debug_loclists", .@"1", osec); + elf_file.sections.items(.last_atom)[osec] = self.symbol(self.debug_loclists_index.?).ref; } if (self.debug_rnglists_index == null) { @@ -183,6 +190,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { }); self.debug_rnglists_section_dirty = true; self.debug_rnglists_index = try self.addSectionSymbol(gpa, ".debug_rnglists", .@"1", osec); + elf_file.sections.items(.last_atom)[osec] = self.symbol(self.debug_rnglists_index.?).ref; } if (self.eh_frame_index == null) { @@ -197,6 +205,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void { }); self.eh_frame_section_dirty = true; self.eh_frame_index = try self.addSectionSymbol(gpa, ".eh_frame", Atom.Alignment.fromNonzeroByteUnits(ptr_size), osec); + elf_file.sections.items(.last_atom)[osec] = self.symbol(self.eh_frame_index.?).ref; } try dwarf.initMetadata(); @@ -1116,6 +1125,9 @@ pub fn getOrCreateMetadataForNav( return gop.value_ptr.symbol_index; } +// FIXME: we always create an atom to basically store size and alignment, however, this is only true for +// sections that have a single atom like the debug sections. It would be a better solution to decouple this +// concept from the atom, maybe. fn addSectionSymbol( self: *ZigObject, allocator: Allocator, diff --git a/src/link/Elf/eh_frame.zig b/src/link/Elf/eh_frame.zig index 54024fa1bb49..a9e3e618cb48 100644 --- a/src/link/Elf/eh_frame.zig +++ b/src/link/Elf/eh_frame.zig @@ -233,7 +233,10 @@ pub fn calcEhFrameSize(elf_file: *Elf) !usize { const comp = elf_file.base.comp; const gpa = comp.gpa; - var offset: usize = 0; + var offset: usize = if (elf_file.zigObjectPtr()) |zo| blk: { + const sym = zo.symbol(zo.eh_frame_index orelse break :blk 0); + break :blk sym.atom(elf_file).?.size; + } else 0; var cies = std.ArrayList(Cie).init(gpa); defer cies.deinit(); @@ -423,9 +426,8 @@ pub fn writeEhFrameRelocatable(elf_file: *Elf, writer: anytype) !void { } } -fn emitReloc(elf_file: *Elf, base_offset: u64, sym: *const Symbol, rel: elf.Elf64_Rela) elf.Elf64_Rela { +fn emitReloc(elf_file: *Elf, r_offset: u64, sym: *const Symbol, rel: elf.Elf64_Rela) elf.Elf64_Rela { const cpu_arch = elf_file.getTarget().cpu.arch; - const r_offset = base_offset + rel.r_offset; const r_type = rel.r_type(); var r_addend = rel.r_addend; var r_sym: u32 = 0; @@ -467,7 +469,7 @@ pub fn writeEhFrameRelocs(elf_file: *Elf, writer: anytype) !void { for (atom_ptr.relocs(elf_file)) |rel| { const ref = zo.resolveSymbol(rel.r_sym(), elf_file); const target = elf_file.symbol(ref).?; - const out_rel = emitReloc(elf_file, 0, target, rel); + const out_rel = emitReloc(elf_file, rel.r_offset, target, rel); try writer.writeStruct(out_rel); } } @@ -480,8 +482,8 @@ pub fn writeEhFrameRelocs(elf_file: *Elf, writer: anytype) !void { for (cie.relocs(elf_file)) |rel| { const ref = object.resolveSymbol(rel.r_sym(), elf_file); const sym = elf_file.symbol(ref).?; - const offset = cie.address(elf_file) - cie.offset; - const out_rel = emitReloc(elf_file, offset, sym, rel); + const r_offset = cie.address(elf_file) + rel.r_offset - cie.offset; + const out_rel = emitReloc(elf_file, r_offset, sym, rel); try writer.writeStruct(out_rel); } } @@ -491,8 +493,8 @@ pub fn writeEhFrameRelocs(elf_file: *Elf, writer: anytype) !void { for (fde.relocs(elf_file)) |rel| { const ref = object.resolveSymbol(rel.r_sym(), elf_file); const sym = elf_file.symbol(ref).?; - const offset = fde.address(elf_file) - fde.offset; - const out_rel = emitReloc(elf_file, offset, sym, rel); + const r_offset = fde.address(elf_file) + rel.r_offset - fde.offset; + const out_rel = emitReloc(elf_file, r_offset, sym, rel); try writer.writeStruct(out_rel); } } diff --git a/src/link/Elf/relocatable.zig b/src/link/Elf/relocatable.zig index e66ae99ad217..9182c0694617 100644 --- a/src/link/Elf/relocatable.zig +++ b/src/link/Elf/relocatable.zig @@ -302,30 +302,29 @@ fn initSections(elf_file: *Elf) !void { try msec.initOutputSection(elf_file); } - const needs_eh_frame = if (elf_file.zigObjectPtr()) |zo| - zo.eh_frame_index != null - else for (elf_file.objects.items) |index| { - if (elf_file.file(index).?.object.cies.items.len > 0) break true; - } else false; + const needs_eh_frame = blk: { + if (elf_file.zigObjectPtr()) |zo| + if (zo.eh_frame_index != null) break :blk true; + break :blk for (elf_file.objects.items) |index| { + if (elf_file.file(index).?.object.cies.items.len > 0) break true; + } else false; + }; if (needs_eh_frame) { if (elf_file.eh_frame_section_index == null) { - elf_file.eh_frame_section_index = blk: { - if (elf_file.zigObjectPtr()) |zo| { - if (zo.eh_frame_index) |idx| break :blk zo.symbol(idx).atom(elf_file).?.output_section_index; - } - break :blk try elf_file.addSection(.{ - .name = try elf_file.insertShString(".eh_frame"), - .type = if (elf_file.getTarget().cpu.arch == .x86_64) - elf.SHT_X86_64_UNWIND - else - elf.SHT_PROGBITS, - .flags = elf.SHF_ALLOC, - .addralign = elf_file.ptrWidthBytes(), - .offset = std.math.maxInt(u64), - }); - }; + elf_file.eh_frame_section_index = elf_file.sectionByName(".eh_frame") orelse + try elf_file.addSection(.{ + .name = try elf_file.insertShString(".eh_frame"), + .type = if (elf_file.getTarget().cpu.arch == .x86_64) + elf.SHT_X86_64_UNWIND + else + elf.SHT_PROGBITS, + .flags = elf.SHF_ALLOC, + .addralign = elf_file.ptrWidthBytes(), + .offset = std.math.maxInt(u64), + }); } - elf_file.eh_frame_rela_section_index = try elf_file.addRelaShdr( + elf_file.eh_frame_rela_section_index = elf_file.sectionByName(".rela.eh_frame") orelse + try elf_file.addRelaShdr( try elf_file.insertShString(".rela.eh_frame"), elf_file.eh_frame_section_index.?, ); @@ -367,6 +366,7 @@ fn updateSectionSizes(elf_file: *Elf) !void { for (slice.items(.shdr), 0..) |*shdr, shndx| { const atom_list = slice.items(.atom_list)[shndx]; if (shdr.sh_type != elf.SHT_RELA) continue; + if (@as(u32, @intCast(shndx)) == elf_file.eh_frame_section_index) continue; for (atom_list.items) |ref| { const atom_ptr = elf_file.atom(ref) orelse continue; if (!atom_ptr.alive) continue; @@ -378,11 +378,7 @@ fn updateSectionSizes(elf_file: *Elf) !void { } if (elf_file.eh_frame_section_index) |index| { - slice.items(.shdr)[index].sh_size = existing_size: { - const zo = elf_file.zigObjectPtr() orelse break :existing_size 0; - const sym = zo.symbol(zo.eh_frame_index orelse break :existing_size 0); - break :existing_size sym.atom(elf_file).?.size; - } + try eh_frame.calcEhFrameSize(elf_file); + slice.items(.shdr)[index].sh_size = try eh_frame.calcEhFrameSize(elf_file); } if (elf_file.eh_frame_rela_section_index) |index| { const shdr = &slice.items(.shdr)[index]; @@ -464,8 +460,8 @@ fn writeSyntheticSections(elf_file: *Elf) !void { for (slice.items(.shdr), slice.items(.atom_list), 0..) |shdr, atom_list, shndx| { if (shdr.sh_type != elf.SHT_RELA) continue; - if (@as(u32, @intCast(shndx)) == elf_file.eh_frame_rela_section_index) continue; if (atom_list.items.len == 0) continue; + if (@as(u32, @intCast(shndx)) == elf_file.eh_frame_section_index) continue; const num_relocs = math.cast(usize, @divExact(shdr.sh_size, shdr.sh_entsize)) orelse return error.Overflow; From 801f038c2ced9c0d7147f4304834ac6ae68e4cb0 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Wed, 4 Sep 2024 06:43:33 +0200 Subject: [PATCH 054/202] elf: do not pad placeholders coming from input object files This is currently not entirely accurate since no padding will affect the last-most atom of ZigObject that should be padded. --- src/link/Elf.zig | 33 +++++++++++++++++++-------------- src/link/Elf/Object.zig | 7 ++++++- src/link/Elf/ZigObject.zig | 6 +++++- 3 files changed, 30 insertions(+), 16 deletions(-) diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 99ba66e92dac..12f564edcf2c 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -659,12 +659,17 @@ const AllocateChunkResult = struct { placement: Ref, }; -pub fn allocateChunk(self: *Elf, shndx: u32, size: u64, alignment: Atom.Alignment) !AllocateChunkResult { +pub fn allocateChunk(self: *Elf, args: struct { + size: u64, + shndx: u32, + alignment: Atom.Alignment, + requires_padding: bool = true, +}) !AllocateChunkResult { const slice = self.sections.slice(); - const shdr = &slice.items(.shdr)[shndx]; - const free_list = &slice.items(.free_list)[shndx]; - const last_atom_ref = &slice.items(.last_atom)[shndx]; - const new_atom_ideal_capacity = padToIdeal(size); + const shdr = &slice.items(.shdr)[args.shndx]; + const free_list = &slice.items(.free_list)[args.shndx]; + const last_atom_ref = &slice.items(.last_atom)[args.shndx]; + const new_atom_ideal_capacity = if (args.requires_padding) padToIdeal(args.size) else args.size; // First we look for an appropriately sized free list node. // The list is unordered. We'll just take the first thing that works. @@ -676,11 +681,11 @@ pub fn allocateChunk(self: *Elf, shndx: u32, size: u64, alignment: Atom.Alignmen // We now have a pointer to a live atom that has too much capacity. // Is it enough that we could fit this new atom? const cap = big_atom.capacity(self); - const ideal_capacity = padToIdeal(cap); + const ideal_capacity = if (args.requires_padding) padToIdeal(cap) else cap; const ideal_capacity_end_vaddr = std.math.add(u64, @intCast(big_atom.value), ideal_capacity) catch ideal_capacity; const capacity_end_vaddr = @as(u64, @intCast(big_atom.value)) + cap; const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity; - const new_start_vaddr = alignment.backward(new_start_vaddr_unaligned); + const new_start_vaddr = args.alignment.backward(new_start_vaddr_unaligned); if (new_start_vaddr < ideal_capacity_end_vaddr) { // Additional bookkeeping here to notice if this free list node // should be deleted because the block that it points to has grown to take up @@ -703,9 +708,9 @@ pub fn allocateChunk(self: *Elf, shndx: u32, size: u64, alignment: Atom.Alignmen } break :blk .{ .value = new_start_vaddr, .placement = big_atom_ref }; } else if (self.atom(last_atom_ref.*)) |last_atom| { - const ideal_capacity = padToIdeal(last_atom.size); + const ideal_capacity = if (args.requires_padding) padToIdeal(last_atom.size) else last_atom.size; const ideal_capacity_end_vaddr = @as(u64, @intCast(last_atom.value)) + ideal_capacity; - const new_start_vaddr = alignment.forward(ideal_capacity_end_vaddr); + const new_start_vaddr = args.alignment.forward(ideal_capacity_end_vaddr); break :blk .{ .value = new_start_vaddr, .placement = last_atom.ref() }; } else { break :blk .{ .value = 0, .placement = .{} }; @@ -713,8 +718,8 @@ pub fn allocateChunk(self: *Elf, shndx: u32, size: u64, alignment: Atom.Alignmen }; log.debug("allocated chunk (size({x}),align({x})) at 0x{x} (file(0x{x}))", .{ - size, - alignment.toByteUnits().?, + args.size, + args.alignment.toByteUnits().?, shdr.sh_addr + res.value, shdr.sh_offset + res.value, }); @@ -724,11 +729,11 @@ pub fn allocateChunk(self: *Elf, shndx: u32, size: u64, alignment: Atom.Alignmen else true; if (expand_section) { - const needed_size = res.value + size; + const needed_size = res.value + args.size; if (shdr.sh_flags & elf.SHF_ALLOC != 0) - try self.growAllocSection(shndx, needed_size, alignment.toByteUnits().?) + try self.growAllocSection(args.shndx, needed_size, args.alignment.toByteUnits().?) else - try self.growNonAllocSection(shndx, needed_size, alignment.toByteUnits().?, true); + try self.growNonAllocSection(args.shndx, needed_size, args.alignment.toByteUnits().?, true); } return res; diff --git a/src/link/Elf/Object.zig b/src/link/Elf/Object.zig index 884826d82b29..4a1ca685c523 100644 --- a/src/link/Elf/Object.zig +++ b/src/link/Elf/Object.zig @@ -969,7 +969,12 @@ pub fn allocateAtoms(self: *Object, elf_file: *Elf) !void { } for (self.section_chunks.items) |*chunk| { - const alloc_res = try elf_file.allocateChunk(chunk.output_section_index, chunk.size, chunk.alignment); + const alloc_res = try elf_file.allocateChunk(.{ + .shndx = chunk.output_section_index, + .size = chunk.size, + .alignment = chunk.alignment, + .requires_padding = false, + }); chunk.value = @intCast(alloc_res.value); const slice = elf_file.sections.slice(); diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig index 416ed0612614..0dc4bd9dae5f 100644 --- a/src/link/Elf/ZigObject.zig +++ b/src/link/Elf/ZigObject.zig @@ -1996,7 +1996,11 @@ fn writeTrampoline(tr_sym: Symbol, target: Symbol, elf_file: *Elf) !void { } fn allocateAtom(self: *ZigObject, atom_ptr: *Atom, elf_file: *Elf) !void { - const alloc_res = try elf_file.allocateChunk(atom_ptr.output_section_index, atom_ptr.size, atom_ptr.alignment); + const alloc_res = try elf_file.allocateChunk(.{ + .shndx = atom_ptr.output_section_index, + .size = atom_ptr.size, + .alignment = atom_ptr.alignment, + }); atom_ptr.value = @intCast(alloc_res.value); const slice = elf_file.sections.slice(); From 8c76a61ef541cf285a82fcc928face4082606c03 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Wed, 4 Sep 2024 06:46:57 +0200 Subject: [PATCH 055/202] test/link/elf: test --gc-sections on Zig input with LLVM too --- test/link/elf.zig | 1 + 1 file changed, 1 insertion(+) diff --git a/test/link/elf.zig b/test/link/elf.zig index 159703bbb3f2..34bc4bb1283f 100644 --- a/test/link/elf.zig +++ b/test/link/elf.zig @@ -66,6 +66,7 @@ pub fn testAll(b: *Build, build_opts: BuildOptions) *Step { elf_step.dependOn(testEmptyObject(b, .{ .target = musl_target })); elf_step.dependOn(testEntryPoint(b, .{ .target = musl_target })); elf_step.dependOn(testGcSections(b, .{ .target = musl_target })); + elf_step.dependOn(testGcSectionsZig(b, .{ .target = musl_target })); elf_step.dependOn(testImageBase(b, .{ .target = musl_target })); elf_step.dependOn(testInitArrayOrder(b, .{ .target = musl_target })); elf_step.dependOn(testLargeAlignmentExe(b, .{ .target = musl_target })); From 6ec8b15918dcd692f88875843a51a0514e315d9d Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Wed, 4 Sep 2024 07:19:25 +0200 Subject: [PATCH 056/202] elf: fix emitting static lib when ZigObject is present --- src/link/Elf/relocatable.zig | 21 ++++++++------------- test/link/elf.zig | 1 + 2 files changed, 9 insertions(+), 13 deletions(-) diff --git a/src/link/Elf/relocatable.zig b/src/link/Elf/relocatable.zig index 9182c0694617..5df6bb994649 100644 --- a/src/link/Elf/relocatable.zig +++ b/src/link/Elf/relocatable.zig @@ -18,7 +18,7 @@ pub fn flushStaticLib(elf_file: *Elf, comp: *Compilation, module_obj_path: ?[]co } for (positionals.items) |obj| { - parsePositional(elf_file, obj.path) catch |err| switch (err) { + parsePositionalStaticLib(elf_file, obj.path) catch |err| switch (err) { error.MalformedObject, error.MalformedArchive, error.InvalidMachineType, @@ -38,17 +38,12 @@ pub fn flushStaticLib(elf_file: *Elf, comp: *Compilation, module_obj_path: ?[]co // First, we flush relocatable object file generated with our backends. if (elf_file.zigObjectPtr()) |zig_object| { try zig_object.resolveSymbols(elf_file); + elf_file.markEhFrameAtomsDead(); try elf_file.addCommentString(); try elf_file.finalizeMergeSections(); zig_object.claimUnresolvedRelocatable(elf_file); - for (elf_file.merge_sections.items) |*msec| { - if (msec.finalized_subsections.items.len == 0) continue; - try msec.initOutputSection(elf_file); - } - - try elf_file.initSymtab(); - try elf_file.initShStrtab(); + try initSections(elf_file); try elf_file.sortShdrs(); try zig_object.addAtomsToRelaSections(elf_file); try elf_file.updateMergeSectionSizes(); @@ -229,17 +224,17 @@ pub fn flushObject(elf_file: *Elf, comp: *Compilation, module_obj_path: ?[]const if (elf_file.base.hasErrors()) return error.FlushFailure; } -fn parsePositional(elf_file: *Elf, path: []const u8) Elf.ParseError!void { +fn parsePositionalStaticLib(elf_file: *Elf, path: []const u8) Elf.ParseError!void { if (try Object.isObject(path)) { - try parseObject(elf_file, path); + try parseObjectStaticLib(elf_file, path); } else if (try Archive.isArchive(path)) { - try parseArchive(elf_file, path); + try parseArchiveStaticLib(elf_file, path); } else return error.UnknownFileType; // TODO: should we check for LD script? // Actually, should we even unpack an archive? } -fn parseObject(elf_file: *Elf, path: []const u8) Elf.ParseError!void { +fn parseObjectStaticLib(elf_file: *Elf, path: []const u8) Elf.ParseError!void { const gpa = elf_file.base.comp.gpa; const handle = try std.fs.cwd().openFile(path, .{}); const fh = try elf_file.addFileHandle(handle); @@ -256,7 +251,7 @@ fn parseObject(elf_file: *Elf, path: []const u8) Elf.ParseError!void { try object.parseAr(elf_file); } -fn parseArchive(elf_file: *Elf, path: []const u8) Elf.ParseError!void { +fn parseArchiveStaticLib(elf_file: *Elf, path: []const u8) Elf.ParseError!void { const gpa = elf_file.base.comp.gpa; const handle = try std.fs.cwd().openFile(path, .{}); const fh = try elf_file.addFileHandle(handle); diff --git a/test/link/elf.zig b/test/link/elf.zig index 34bc4bb1283f..9d169faf1d10 100644 --- a/test/link/elf.zig +++ b/test/link/elf.zig @@ -55,6 +55,7 @@ pub fn testAll(b: *Build, build_opts: BuildOptions) *Step { // Exercise linker in ar mode elf_step.dependOn(testEmitStaticLib(b, .{ .target = musl_target })); + elf_step.dependOn(testEmitStaticLibZig(b, .{ .target = musl_target })); // Exercise linker with LLVM backend // musl tests From 64ad6eff16aac1f6154efa12406818efed57bf73 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Wed, 4 Sep 2024 08:10:19 +0200 Subject: [PATCH 057/202] elf: create back/forward links for atoms within section chunks --- src/link/Elf/Object.zig | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/src/link/Elf/Object.zig b/src/link/Elf/Object.zig index 4a1ca685c523..0d063927ce75 100644 --- a/src/link/Elf/Object.zig +++ b/src/link/Elf/Object.zig @@ -988,7 +988,25 @@ pub fn allocateAtoms(self: *Object, elf_file: *Elf) !void { if (expand_section) last_atom_ref.* = chunk.lastAtom(self).ref(); shdr.sh_addralign = @max(shdr.sh_addralign, chunk.alignment.toByteUnits().?); - // TODO create back and forward links + { + var idx: usize = 0; + while (idx < chunk.atoms.items.len) : (idx += 1) { + const curr_atom_ptr = self.atom(chunk.atoms.items[idx]).?; + if (idx > 0) { + curr_atom_ptr.prev_atom_ref = .{ .index = chunk.atoms.items[idx - 1], .file = self.index }; + } + if (idx + 1 < chunk.atoms.items.len) { + curr_atom_ptr.next_atom_ref = .{ .index = chunk.atoms.items[idx + 1], .file = self.index }; + } + } + } + + if (elf_file.atom(alloc_res.placement)) |placement_atom| { + chunk.firstAtom(self).prev_atom_ref = placement_atom.ref(); + chunk.lastAtom(self).next_atom_ref = placement_atom.next_atom_ref; + placement_atom.next_atom_ref = chunk.firstAtom(self).ref(); + } + // TODO if we had a link from Atom to parent Chunk we would not need to update Atom's value or osec index for (chunk.atoms.items) |atom_index| { const atom_ptr = self.atom(atom_index).?; From d302a1068effe4edcd36bd01f77ac2e1b1048e16 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Wed, 4 Sep 2024 11:40:56 +0200 Subject: [PATCH 058/202] elf: rename SectionChunk into AtomList and store as part of Section --- src/link/Elf.zig | 133 ++++++++++------- src/link/Elf/Object.zig | 276 +---------------------------------- src/link/Elf/relocatable.zig | 20 ++- 3 files changed, 103 insertions(+), 326 deletions(-) diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 12f564edcf2c..67969b7fb794 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -393,7 +393,8 @@ pub fn deinit(self: *Elf) void { self.objects.deinit(gpa); self.shared_objects.deinit(gpa); - for (self.sections.items(.atom_list), self.sections.items(.free_list)) |*atoms, *free_list| { + for (self.sections.items(.atom_list_2), self.sections.items(.atom_list), self.sections.items(.free_list)) |*atom_list, *atoms, *free_list| { + atom_list.deinit(gpa); atoms.deinit(gpa); free_list.deinit(gpa); } @@ -3204,8 +3205,9 @@ fn sortInitFini(self: *Elf) !void { } }; - for (slice.items(.shdr), slice.items(.atom_list)) |shdr, *atom_list| { + for (slice.items(.shdr), slice.items(.atom_list_2)) |shdr, *atom_list| { if (shdr.sh_flags & elf.SHF_ALLOC == 0) continue; + if (atom_list.atoms.items.len == 0) continue; var is_init_fini = false; var is_ctor_dtor = false; @@ -3219,15 +3221,13 @@ fn sortInitFini(self: *Elf) !void { is_ctor_dtor = mem.indexOf(u8, name, ".ctors") != null or mem.indexOf(u8, name, ".dtors") != null; }, } - if (!is_init_fini and !is_ctor_dtor) continue; - if (atom_list.items.len == 0) continue; var entries = std.ArrayList(Entry).init(gpa); - try entries.ensureTotalCapacityPrecise(atom_list.items.len); + try entries.ensureTotalCapacityPrecise(atom_list.atoms.items.len); defer entries.deinit(); - for (atom_list.items) |ref| { + for (atom_list.atoms.items) |ref| { const atom_ptr = self.atom(ref).?; const object = atom_ptr.file(self).?.object; const priority = blk: { @@ -3246,9 +3246,9 @@ fn sortInitFini(self: *Elf) !void { mem.sort(Entry, entries.items, self, Entry.lessThan); - atom_list.clearRetainingCapacity(); + atom_list.atoms.clearRetainingCapacity(); for (entries.items) |entry| { - atom_list.appendAssumeCapacity(entry.atom_ref); + atom_list.atoms.appendAssumeCapacity(entry.atom_ref); } } } @@ -3491,13 +3491,19 @@ fn resetShdrIndexes(self: *Elf, backlinks: []const u32) void { msec.output_section_index = backlinks[msec.output_section_index]; } - for (self.sections.items(.shdr)) |*shdr| { - if (shdr.sh_type != elf.SHT_RELA) continue; - // FIXME:JK we should spin up .symtab potentially earlier, or set all non-dynamic RELA sections - // to point at symtab - // shdr.sh_link = backlinks[shdr.sh_link]; - shdr.sh_link = self.symtab_section_index.?; - shdr.sh_info = backlinks[shdr.sh_info]; + const slice = self.sections.slice(); + for (slice.items(.shdr), slice.items(.atom_list_2)) |*shdr, *atom_list| { + atom_list.output_section_index = backlinks[atom_list.output_section_index]; + for (atom_list.atoms.items) |ref| { + self.atom(ref).?.output_section_index = atom_list.output_section_index; + } + if (shdr.sh_type == elf.SHT_RELA) { + // FIXME:JK we should spin up .symtab potentially earlier, or set all non-dynamic RELA sections + // to point at symtab + // shdr.sh_link = backlinks[shdr.sh_link]; + shdr.sh_link = self.symtab_section_index.?; + shdr.sh_info = backlinks[shdr.sh_info]; + } } if (self.zigObjectPtr()) |zo| { @@ -3507,79 +3513,71 @@ fn resetShdrIndexes(self: *Elf, backlinks: []const u32) void { } } - for (self.objects.items) |index| { - const object = self.file(index).?.object; - for (object.section_chunks.items) |*chunk| { - chunk.output_section_index = backlinks[chunk.output_section_index]; - for (chunk.atoms.items) |atom_index| { - object.atom(atom_index).?.output_section_index = chunk.output_section_index; - } - } - } - for (self.comdat_group_sections.items) |*cg| { cg.shndx = backlinks[cg.shndx]; } if (self.symtab_section_index) |index| { - const shdr = &self.sections.items(.shdr)[index]; + const shdr = &slice.items(.shdr)[index]; shdr.sh_link = self.strtab_section_index.?; } if (self.dynamic_section_index) |index| { - const shdr = &self.sections.items(.shdr)[index]; + const shdr = &slice.items(.shdr)[index]; shdr.sh_link = self.dynstrtab_section_index.?; } if (self.dynsymtab_section_index) |index| { - const shdr = &self.sections.items(.shdr)[index]; + const shdr = &slice.items(.shdr)[index]; shdr.sh_link = self.dynstrtab_section_index.?; } if (self.hash_section_index) |index| { - const shdr = &self.sections.items(.shdr)[index]; + const shdr = &slice.items(.shdr)[index]; shdr.sh_link = self.dynsymtab_section_index.?; } if (self.gnu_hash_section_index) |index| { - const shdr = &self.sections.items(.shdr)[index]; + const shdr = &slice.items(.shdr)[index]; shdr.sh_link = self.dynsymtab_section_index.?; } if (self.versym_section_index) |index| { - const shdr = &self.sections.items(.shdr)[index]; + const shdr = &slice.items(.shdr)[index]; shdr.sh_link = self.dynsymtab_section_index.?; } if (self.verneed_section_index) |index| { - const shdr = &self.sections.items(.shdr)[index]; + const shdr = &slice.items(.shdr)[index]; shdr.sh_link = self.dynstrtab_section_index.?; } if (self.rela_dyn_section_index) |index| { - const shdr = &self.sections.items(.shdr)[index]; + const shdr = &slice.items(.shdr)[index]; shdr.sh_link = self.dynsymtab_section_index orelse 0; } if (self.rela_plt_section_index) |index| { - const shdr = &self.sections.items(.shdr)[index]; + const shdr = &slice.items(.shdr)[index]; shdr.sh_link = self.dynsymtab_section_index.?; shdr.sh_info = self.plt_section_index.?; } if (self.eh_frame_rela_section_index) |index| { - const shdr = &self.sections.items(.shdr)[index]; + const shdr = &slice.items(.shdr)[index]; shdr.sh_link = self.symtab_section_index.?; shdr.sh_info = self.eh_frame_section_index.?; } } fn updateSectionSizes(self: *Elf) !void { - for (self.objects.items) |index| { - try self.file(index).?.object.allocateAtoms(self); + const slice = self.sections.slice(); + for (slice.items(.atom_list_2)) |*atom_list| { + if (atom_list.atoms.items.len == 0) continue; + atom_list.updateSize(self); + try atom_list.allocate(self); } - const slice = self.sections.slice(); if (self.requiresThunks()) { for (slice.items(.shdr), slice.items(.atom_list), 0..) |*shdr, atom_list, shndx| { if (shdr.sh_flags & elf.SHF_EXECINSTR == 0) continue; @@ -4033,15 +4031,38 @@ fn allocateSpecialPhdrs(self: *Elf) void { } fn writeAtoms(self: *Elf) !void { - for (self.objects.items) |index| { - try self.file(index).?.object.writeAtoms(self); + const gpa = self.base.comp.gpa; + + var undefs = std.AutoArrayHashMap(SymbolResolver.Index, std.ArrayList(Ref)).init(gpa); + defer { + for (undefs.values()) |*refs| { + refs.deinit(); + } + undefs.deinit(); } - if (self.requiresThunks()) { - const gpa = self.base.comp.gpa; - var buffer = std.ArrayList(u8).init(gpa); - defer buffer.deinit(); + var buffer = std.ArrayList(u8).init(gpa); + defer buffer.deinit(); + + const slice = self.sections.slice(); + var has_reloc_errors = false; + for (slice.items(.shdr), slice.items(.atom_list_2)) |shdr, atom_list| { + if (shdr.sh_type == elf.SHT_NOBITS) continue; + if (atom_list.atoms.items.len == 0) continue; + atom_list.write(&buffer, &undefs, self) catch |err| switch (err) { + error.UnsupportedCpuArch => { + try self.reportUnsupportedCpuArch(); + return error.FlushFailure; + }, + error.RelocFailure, error.RelaxFailure => has_reloc_errors = true, + else => |e| return e, + }; + } + + try self.reportUndefinedSymbols(&undefs); + if (has_reloc_errors) return error.FlushFailure; + if (self.requiresThunks()) { for (self.thunks.items) |th| { const thunk_size = th.size(self); try buffer.ensureUnusedCapacity(thunk_size); @@ -4993,7 +5014,7 @@ pub fn insertDynString(self: *Elf, name: []const u8) error{OutOfMemory}!u32 { return off; } -pub fn reportUndefinedSymbols(self: *Elf, undefs: anytype) !void { +fn reportUndefinedSymbols(self: *Elf, undefs: anytype) !void { const gpa = self.base.comp.gpa; const max_notes = 4; @@ -5061,7 +5082,7 @@ fn reportMissingLibraryError( } } -pub fn reportUnsupportedCpuArch(self: *Elf) error{OutOfMemory}!void { +fn reportUnsupportedCpuArch(self: *Elf) error{OutOfMemory}!void { var err = try self.base.addErrorWithNotes(0); try err.addMsg("fatal linker error: unsupported CPU architecture {s}", .{ @tagName(self.getTarget().cpu.arch), @@ -5248,9 +5269,8 @@ fn fmtDumpState( try writer.print("object({d}) : {}", .{ index, object.fmtPath() }); if (!object.alive) try writer.writeAll(" : [*]"); try writer.writeByte('\n'); - try writer.print("{}{}{}{}{}{}\n", .{ + try writer.print("{}{}{}{}{}\n", .{ object.fmtAtoms(self), - object.fmtSectionChunks(self), object.fmtCies(self), object.fmtFdes(self), object.fmtSymtab(self), @@ -5274,6 +5294,14 @@ fn fmtDumpState( try writer.print("{}\n", .{linker_defined.fmtSymtab(self)}); } + const slice = self.sections.slice(); + { + try writer.writeAll("atom lists\n"); + for (slice.items(.shdr), slice.items(.atom_list_2), 0..) |shdr, atom_list, shndx| { + try writer.print("shdr({d}) : {s} : {}", .{ shndx, self.getShString(shdr.sh_name), atom_list.fmt(self) }); + } + } + if (self.requiresThunks()) { try writer.writeAll("thunks\n"); for (self.thunks.items, 0..) |th, index| { @@ -5295,7 +5323,7 @@ fn fmtDumpState( } try writer.writeAll("\nOutput shdrs\n"); - for (self.sections.items(.shdr), self.sections.items(.phndx), 0..) |shdr, phndx, shndx| { + for (slice.items(.shdr), slice.items(.phndx), 0..) |shdr, phndx, shndx| { try writer.print(" shdr({d}) : phdr({?d}) : {}\n", .{ shndx, phndx, @@ -5549,8 +5577,14 @@ const Section = struct { phndx: ?u32 = null, /// List of atoms contributing to this section. + /// TODO currently this is only used for relocations tracking in relocatable mode + /// but will be merged with atom_list_2. atom_list: std.ArrayListUnmanaged(Ref) = .{}, + /// List of atoms contributing to this section. + /// This can be used by sections that require special handling such as init/fini array, etc. + atom_list_2: AtomList = .{}, + /// Index of the last allocated atom in this section. last_atom: Ref = .{ .index = 0, .file = 0 }, @@ -5690,6 +5724,7 @@ const Air = @import("../Air.zig"); const Allocator = std.mem.Allocator; const Archive = @import("Elf/Archive.zig"); pub const Atom = @import("Elf/Atom.zig"); +const AtomList = @import("Elf/AtomList.zig"); const Cache = std.Build.Cache; const Path = Cache.Path; const Compilation = @import("../Compilation.zig"); diff --git a/src/link/Elf/Object.zig b/src/link/Elf/Object.zig index 0d063927ce75..18c7a91c8f0e 100644 --- a/src/link/Elf/Object.zig +++ b/src/link/Elf/Object.zig @@ -17,7 +17,6 @@ relocs: std.ArrayListUnmanaged(elf.Elf64_Rela) = .{}, atoms: std.ArrayListUnmanaged(Atom) = .{}, atoms_indexes: std.ArrayListUnmanaged(Atom.Index) = .{}, atoms_extra: std.ArrayListUnmanaged(u32) = .{}, -section_chunks: std.ArrayListUnmanaged(SectionChunk) = .{}, comdat_groups: std.ArrayListUnmanaged(Elf.ComdatGroup) = .{}, comdat_group_data: std.ArrayListUnmanaged(u32) = .{}, @@ -59,10 +58,6 @@ pub fn deinit(self: *Object, allocator: Allocator) void { self.atoms.deinit(allocator); self.atoms_indexes.deinit(allocator); self.atoms_extra.deinit(allocator); - for (self.section_chunks.items) |*chunk| { - chunk.deinit(allocator); - } - self.section_chunks.deinit(allocator); self.comdat_groups.deinit(allocator); self.comdat_group_data.deinit(allocator); self.relocs.deinit(allocator); @@ -952,166 +947,9 @@ pub fn initOutputSections(self: *Object, elf_file: *Elf) !void { .flags = shdr.sh_flags, .type = shdr.sh_type, }); - const chunk = for (self.section_chunks.items) |*chunk| { - if (chunk.output_section_index == osec) break chunk; - } else blk: { - const chunk = try self.section_chunks.addOne(elf_file.base.comp.gpa); - chunk.* = .{ .output_section_index = osec }; - break :blk chunk; - }; - try chunk.atoms.append(elf_file.base.comp.gpa, atom_index); - } -} - -pub fn allocateAtoms(self: *Object, elf_file: *Elf) !void { - for (self.section_chunks.items) |*chunk| { - chunk.updateSize(self); - } - - for (self.section_chunks.items) |*chunk| { - const alloc_res = try elf_file.allocateChunk(.{ - .shndx = chunk.output_section_index, - .size = chunk.size, - .alignment = chunk.alignment, - .requires_padding = false, - }); - chunk.value = @intCast(alloc_res.value); - - const slice = elf_file.sections.slice(); - const shdr = &slice.items(.shdr)[chunk.output_section_index]; - const last_atom_ref = &slice.items(.last_atom)[chunk.output_section_index]; - - const expand_section = if (elf_file.atom(alloc_res.placement)) |placement_atom| - placement_atom.nextAtom(elf_file) == null - else - true; - if (expand_section) last_atom_ref.* = chunk.lastAtom(self).ref(); - shdr.sh_addralign = @max(shdr.sh_addralign, chunk.alignment.toByteUnits().?); - - { - var idx: usize = 0; - while (idx < chunk.atoms.items.len) : (idx += 1) { - const curr_atom_ptr = self.atom(chunk.atoms.items[idx]).?; - if (idx > 0) { - curr_atom_ptr.prev_atom_ref = .{ .index = chunk.atoms.items[idx - 1], .file = self.index }; - } - if (idx + 1 < chunk.atoms.items.len) { - curr_atom_ptr.next_atom_ref = .{ .index = chunk.atoms.items[idx + 1], .file = self.index }; - } - } - } - - if (elf_file.atom(alloc_res.placement)) |placement_atom| { - chunk.firstAtom(self).prev_atom_ref = placement_atom.ref(); - chunk.lastAtom(self).next_atom_ref = placement_atom.next_atom_ref; - placement_atom.next_atom_ref = chunk.firstAtom(self).ref(); - } - - // TODO if we had a link from Atom to parent Chunk we would not need to update Atom's value or osec index - for (chunk.atoms.items) |atom_index| { - const atom_ptr = self.atom(atom_index).?; - atom_ptr.output_section_index = chunk.output_section_index; - atom_ptr.value += chunk.value; - } - } -} - -pub fn writeAtoms(self: *Object, elf_file: *Elf) !void { - const gpa = elf_file.base.comp.gpa; - - var undefs = std.AutoArrayHashMap(Elf.SymbolResolver.Index, std.ArrayList(Elf.Ref)).init(gpa); - defer { - for (undefs.values()) |*refs| { - refs.deinit(); - } - undefs.deinit(); - } - - var buffer = std.ArrayList(u8).init(gpa); - defer buffer.deinit(); - - log.debug("writing atoms in {}", .{self.fmtPath()}); - - var has_reloc_errors = false; - for (self.section_chunks.items) |chunk| { - const osec = elf_file.sections.items(.shdr)[chunk.output_section_index]; - if (osec.sh_type == elf.SHT_NOBITS) continue; - - log.debug(" in section '{s}'", .{elf_file.getShString(osec.sh_name)}); - - try buffer.ensureUnusedCapacity(chunk.size); - buffer.appendNTimesAssumeCapacity(0, chunk.size); - - for (chunk.atoms.items) |atom_index| { - const atom_ptr = self.atom(atom_index).?; - assert(atom_ptr.alive); - - const offset = math.cast(usize, atom_ptr.value - chunk.value) orelse return error.Overflow; - const size = math.cast(usize, atom_ptr.size) orelse return error.Overflow; - - log.debug(" * atom({d}) at 0x{x}", .{ atom_index, chunk.offset(elf_file) + offset }); - - const code = try self.codeDecompressAlloc(elf_file, atom_index); - defer gpa.free(code); - const out_code = buffer.items[offset..][0..size]; - @memcpy(out_code, code); - - const res = if (osec.sh_flags & elf.SHF_ALLOC == 0) - atom_ptr.resolveRelocsNonAlloc(elf_file, out_code, &undefs) - else - atom_ptr.resolveRelocsAlloc(elf_file, out_code); - _ = res catch |err| switch (err) { - error.UnsupportedCpuArch => { - try elf_file.reportUnsupportedCpuArch(); - return error.FlushFailure; - }, - error.RelocFailure, error.RelaxFailure => has_reloc_errors = true, - else => |e| return e, - }; - } - - try elf_file.base.file.?.pwriteAll(buffer.items, chunk.offset(elf_file)); - buffer.clearRetainingCapacity(); - } - - try elf_file.reportUndefinedSymbols(&undefs); - if (has_reloc_errors) return error.FlushFailure; -} - -pub fn writeAtomsRelocatable(self: *Object, elf_file: *Elf) !void { - const gpa = elf_file.base.comp.gpa; - - var buffer = std.ArrayList(u8).init(gpa); - defer buffer.deinit(); - - log.debug("writing atoms in {}", .{self.fmtPath()}); - - for (self.section_chunks.items) |chunk| { - const osec = elf_file.sections.items(.shdr)[chunk.output_section_index]; - if (osec.sh_type == elf.SHT_NOBITS) continue; - - log.debug(" in section '{s}'", .{elf_file.getShString(osec.sh_name)}); - - try buffer.ensureUnusedCapacity(chunk.size); - buffer.appendNTimesAssumeCapacity(0, chunk.size); - - for (chunk.atoms.items) |atom_index| { - const atom_ptr = self.atom(atom_index).?; - assert(atom_ptr.alive); - - const offset = math.cast(usize, atom_ptr.value - chunk.value) orelse return error.Overflow; - const size = math.cast(usize, atom_ptr.size) orelse return error.Overflow; - - log.debug(" * atom({d}) at 0x{x}", .{ atom_index, chunk.offset(elf_file) + offset }); - - const code = try self.codeDecompressAlloc(elf_file, atom_index); - defer gpa.free(code); - const out_code = buffer.items[offset..][0..size]; - @memcpy(out_code, code); - } - - try elf_file.base.file.?.pwriteAll(buffer.items, chunk.offset(elf_file)); - buffer.clearRetainingCapacity(); + const atom_list = &elf_file.sections.items(.atom_list_2)[osec]; + atom_list.output_section_index = osec; + try atom_list.atoms.append(elf_file.base.comp.gpa, atom_ptr.ref()); } } @@ -1585,29 +1423,6 @@ fn formatAtoms( } } -pub fn fmtSectionChunks(self: *Object, elf_file: *Elf) std.fmt.Formatter(formatSectionChunks) { - return .{ .data = .{ - .object = self, - .elf_file = elf_file, - } }; -} - -fn formatSectionChunks( - ctx: FormatContext, - comptime unused_fmt_string: []const u8, - options: std.fmt.FormatOptions, - writer: anytype, -) !void { - _ = unused_fmt_string; - _ = options; - const object = ctx.object; - const elf_file = ctx.elf_file; - try writer.writeAll(" section chunks\n"); - for (object.section_chunks.items) |chunk| { - try writer.print(" {}\n", .{chunk.fmt(elf_file)}); - } -} - pub fn fmtCies(self: *Object, elf_file: *Elf) std.fmt.Formatter(formatCies) { return .{ .data = .{ .object = self, @@ -1709,90 +1524,6 @@ const InArchive = struct { size: u32, }; -const SectionChunk = struct { - value: i64 = 0, - size: u64 = 0, - alignment: Atom.Alignment = .@"1", - output_section_index: u32 = 0, - atoms: std.ArrayListUnmanaged(Atom.Index) = .{}, - - fn deinit(chunk: *SectionChunk, allocator: Allocator) void { - chunk.atoms.deinit(allocator); - } - - fn address(chunk: SectionChunk, elf_file: *Elf) i64 { - const shdr = elf_file.sections.items(.shdr)[chunk.output_section_index]; - return @as(i64, @intCast(shdr.sh_addr)) + chunk.value; - } - - fn offset(chunk: SectionChunk, elf_file: *Elf) u64 { - const shdr = elf_file.sections.items(.shdr)[chunk.output_section_index]; - return shdr.sh_offset + @as(u64, @intCast(chunk.value)); - } - - fn updateSize(chunk: *SectionChunk, object: *Object) void { - for (chunk.atoms.items) |atom_index| { - const atom_ptr = object.atom(atom_index).?; - assert(atom_ptr.alive); - const off = atom_ptr.alignment.forward(chunk.size); - const padding = off - chunk.size; - atom_ptr.value = @intCast(off); - chunk.size += padding + atom_ptr.size; - chunk.alignment = chunk.alignment.max(atom_ptr.alignment); - } - } - - fn firstAtom(chunk: SectionChunk, object: *Object) *Atom { - assert(chunk.atoms.items.len > 0); - return object.atom(chunk.atoms.items[0]).?; - } - - fn lastAtom(chunk: SectionChunk, object: *Object) *Atom { - assert(chunk.atoms.items.len > 0); - return object.atom(chunk.atoms.items[chunk.atoms.items.len - 1]).?; - } - - pub fn format( - chunk: SectionChunk, - comptime unused_fmt_string: []const u8, - options: std.fmt.FormatOptions, - writer: anytype, - ) !void { - _ = chunk; - _ = unused_fmt_string; - _ = options; - _ = writer; - @compileError("do not format SectionChunk directly"); - } - - const FormatCtx = struct { SectionChunk, *Elf }; - - pub fn fmt(chunk: SectionChunk, elf_file: *Elf) std.fmt.Formatter(format2) { - return .{ .data = .{ chunk, elf_file } }; - } - - fn format2( - ctx: FormatCtx, - comptime unused_fmt_string: []const u8, - options: std.fmt.FormatOptions, - writer: anytype, - ) !void { - _ = unused_fmt_string; - _ = options; - const chunk, const elf_file = ctx; - try writer.print("chunk : @{x} : shdr({d}) : align({x}) : size({x})", .{ - chunk.address(elf_file), chunk.output_section_index, - chunk.alignment.toByteUnits() orelse 0, chunk.size, - }); - try writer.writeAll(" : atoms{ "); - for (chunk.atoms.items, 0..) |atom_index, i| { - try writer.print("{d}", .{atom_index}); - if (i < chunk.atoms.items.len - 1) try writer.writeAll(", "); - } - try writer.writeAll(" }"); - } -}; - const Object = @This(); const std = @import("std"); @@ -1807,6 +1538,7 @@ const mem = std.mem; const Allocator = mem.Allocator; const Archive = @import("Archive.zig"); const Atom = @import("Atom.zig"); +const AtomList = @import("AtomList.zig"); const Cie = eh_frame.Cie; const Elf = @import("../Elf.zig"); const Fde = eh_frame.Fde; diff --git a/src/link/Elf/relocatable.zig b/src/link/Elf/relocatable.zig index 5df6bb994649..5fc0d5790b3b 100644 --- a/src/link/Elf/relocatable.zig +++ b/src/link/Elf/relocatable.zig @@ -353,11 +353,13 @@ fn initComdatGroups(elf_file: *Elf) !void { } fn updateSectionSizes(elf_file: *Elf) !void { - for (elf_file.objects.items) |index| { - try elf_file.file(index).?.object.allocateAtoms(elf_file); + const slice = elf_file.sections.slice(); + for (slice.items(.atom_list_2)) |*atom_list| { + if (atom_list.atoms.items.len == 0) continue; + atom_list.updateSize(elf_file); + try atom_list.allocate(elf_file); } - const slice = elf_file.sections.slice(); for (slice.items(.shdr), 0..) |*shdr, shndx| { const atom_list = slice.items(.atom_list)[shndx]; if (shdr.sh_type != elf.SHT_RELA) continue; @@ -444,8 +446,16 @@ fn allocateAllocSections(elf_file: *Elf) !void { } fn writeAtoms(elf_file: *Elf) !void { - for (elf_file.objects.items) |index| { - try elf_file.file(index).?.object.writeAtomsRelocatable(elf_file); + const gpa = elf_file.base.comp.gpa; + + var buffer = std.ArrayList(u8).init(gpa); + defer buffer.deinit(); + + const slice = elf_file.sections.slice(); + for (slice.items(.shdr), slice.items(.atom_list_2)) |shdr, atom_list| { + if (shdr.sh_type == elf.SHT_NOBITS) continue; + if (atom_list.atoms.items.len == 0) continue; + try atom_list.writeRelocatable(&buffer, elf_file); } } From f3d527c0822bd24bcd01fbcf3d670a7633a92468 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Wed, 4 Sep 2024 12:11:04 +0200 Subject: [PATCH 059/202] elf: migrate thunks to the new mechanism (AtomList) --- src/link/Elf.zig | 61 +++++++++++++++++++++++++++--------------------- 1 file changed, 35 insertions(+), 26 deletions(-) diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 67969b7fb794..333501b29f47 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -3572,19 +3572,26 @@ fn resetShdrIndexes(self: *Elf, backlinks: []const u32) void { fn updateSectionSizes(self: *Elf) !void { const slice = self.sections.slice(); - for (slice.items(.atom_list_2)) |*atom_list| { + for (slice.items(.shdr), slice.items(.atom_list_2)) |shdr, *atom_list| { if (atom_list.atoms.items.len == 0) continue; + if (self.requiresThunks() and shdr.sh_flags & elf.SHF_EXECINSTR != 0) continue; atom_list.updateSize(self); try atom_list.allocate(self); } if (self.requiresThunks()) { - for (slice.items(.shdr), slice.items(.atom_list), 0..) |*shdr, atom_list, shndx| { + for (slice.items(.shdr), slice.items(.atom_list_2)) |shdr, *atom_list| { if (shdr.sh_flags & elf.SHF_EXECINSTR == 0) continue; - if (atom_list.items.len == 0) continue; + if (atom_list.atoms.items.len == 0) continue; // Create jump/branch range extenders if needed. - try self.createThunks(shdr, @intCast(shndx)); + try self.createThunks(atom_list); + try atom_list.allocate(self); + } + + // FIXME:JK this will hopefully not be needed once we create a link from Atom/Thunk to AtomList. + for (self.thunks.items) |*th| { + th.value += slice.items(.atom_list_2)[th.output_section_index].value; } } @@ -4066,7 +4073,7 @@ fn writeAtoms(self: *Elf) !void { for (self.thunks.items) |th| { const thunk_size = th.size(self); try buffer.ensureUnusedCapacity(thunk_size); - const shdr = self.sections.items(.shdr)[th.output_section_index]; + const shdr = slice.items(.shdr)[th.output_section_index]; const offset = @as(u64, @intCast(th.value)) + shdr.sh_offset; try th.write(self, buffer.writer()); assert(buffer.items.len == thunk_size); @@ -5613,9 +5620,10 @@ fn defaultEntrySymbolName(cpu_arch: std.Target.Cpu.Arch) []const u8 { }; } -fn createThunks(elf_file: *Elf, shdr: *elf.Elf64_Shdr, shndx: u32) !void { +fn createThunks(elf_file: *Elf, atom_list: *AtomList) !void { const gpa = elf_file.base.comp.gpa; const cpu_arch = elf_file.getTarget().cpu.arch; + // A branch will need an extender if its target is larger than // `2^(jump_bits - 1) - margin` where margin is some arbitrary number. const max_distance = switch (cpu_arch) { @@ -5623,36 +5631,44 @@ fn createThunks(elf_file: *Elf, shdr: *elf.Elf64_Shdr, shndx: u32) !void { .x86_64, .riscv64 => unreachable, else => @panic("unhandled arch"), }; - const atoms = elf_file.sections.items(.atom_list)[shndx].items; - assert(atoms.len > 0); - for (atoms) |ref| { + const advance = struct { + fn advance(list: *AtomList, size: u64, alignment: Atom.Alignment) !i64 { + const offset = alignment.forward(list.size); + const padding = offset - list.size; + list.size += padding + size; + list.alignment = list.alignment.max(alignment); + return @intCast(offset); + } + }.advance; + + for (atom_list.atoms.items) |ref| { elf_file.atom(ref).?.value = -1; } var i: usize = 0; - while (i < atoms.len) { + while (i < atom_list.atoms.items.len) { const start = i; - const start_atom = elf_file.atom(atoms[start]).?; + const start_atom = elf_file.atom(atom_list.atoms.items[start]).?; assert(start_atom.alive); - start_atom.value = try advanceSection(shdr, start_atom.size, start_atom.alignment); + start_atom.value = try advance(atom_list, start_atom.size, start_atom.alignment); i += 1; - while (i < atoms.len) : (i += 1) { - const atom_ptr = elf_file.atom(atoms[i]).?; + while (i < atom_list.atoms.items.len) : (i += 1) { + const atom_ptr = elf_file.atom(atom_list.atoms.items[i]).?; assert(atom_ptr.alive); - if (@as(i64, @intCast(atom_ptr.alignment.forward(shdr.sh_size))) - start_atom.value >= max_distance) + if (@as(i64, @intCast(atom_ptr.alignment.forward(atom_list.size))) - start_atom.value >= max_distance) break; - atom_ptr.value = try advanceSection(shdr, atom_ptr.size, atom_ptr.alignment); + atom_ptr.value = try advance(atom_list, atom_ptr.size, atom_ptr.alignment); } // Insert a thunk at the group end const thunk_index = try elf_file.addThunk(); const thunk_ptr = elf_file.thunk(thunk_index); - thunk_ptr.output_section_index = shndx; + thunk_ptr.output_section_index = atom_list.output_section_index; // Scan relocs in the group and create trampolines for any unreachable callsite - for (atoms[start..i]) |ref| { + for (atom_list.atoms.items[start..i]) |ref| { const atom_ptr = elf_file.atom(ref).?; const file_ptr = atom_ptr.file(elf_file).?; log.debug("atom({}) {s}", .{ ref, atom_ptr.name(elf_file) }); @@ -5682,18 +5698,11 @@ fn createThunks(elf_file: *Elf, shdr: *elf.Elf64_Shdr, shndx: u32) !void { atom_ptr.addExtra(.{ .thunk = thunk_index }, elf_file); } - thunk_ptr.value = try advanceSection(shdr, thunk_ptr.size(elf_file), Atom.Alignment.fromNonzeroByteUnits(2)); + thunk_ptr.value = try advance(atom_list, thunk_ptr.size(elf_file), Atom.Alignment.fromNonzeroByteUnits(2)); log.debug("thunk({d}) : {}", .{ thunk_index, thunk_ptr.fmt(elf_file) }); } } -fn advanceSection(shdr: *elf.Elf64_Shdr, adv_size: u64, alignment: Atom.Alignment) !i64 { - const offset = alignment.forward(shdr.sh_size); - const padding = offset - shdr.sh_size; - shdr.sh_size += padding + adv_size; - shdr.sh_addralign = @max(shdr.sh_addralign, alignment.toByteUnits() orelse 1); - return @intCast(offset); -} const std = @import("std"); const build_options = @import("build_options"); From b6caab63cb85d195230f939c4fc408b680866e22 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Wed, 4 Sep 2024 13:44:06 +0200 Subject: [PATCH 060/202] elf: actually commit AtomList.zig --- src/link/Elf/AtomList.zig | 206 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 206 insertions(+) create mode 100644 src/link/Elf/AtomList.zig diff --git a/src/link/Elf/AtomList.zig b/src/link/Elf/AtomList.zig new file mode 100644 index 000000000000..890f1872b526 --- /dev/null +++ b/src/link/Elf/AtomList.zig @@ -0,0 +1,206 @@ +value: i64 = 0, +size: u64 = 0, +alignment: Atom.Alignment = .@"1", +output_section_index: u32 = 0, +atoms: std.ArrayListUnmanaged(Elf.Ref) = .{}, + +pub fn deinit(list: *AtomList, allocator: Allocator) void { + list.atoms.deinit(allocator); +} + +pub fn address(list: AtomList, elf_file: *Elf) i64 { + const shdr = elf_file.sections.items(.shdr)[list.output_section_index]; + return @as(i64, @intCast(shdr.sh_addr)) + list.value; +} + +pub fn offset(list: AtomList, elf_file: *Elf) u64 { + const shdr = elf_file.sections.items(.shdr)[list.output_section_index]; + return shdr.sh_offset + @as(u64, @intCast(list.value)); +} + +pub fn updateSize(list: *AtomList, elf_file: *Elf) void { + for (list.atoms.items) |ref| { + const atom_ptr = elf_file.atom(ref).?; + assert(atom_ptr.alive); + const off = atom_ptr.alignment.forward(list.size); + const padding = off - list.size; + atom_ptr.value = @intCast(off); + list.size += padding + atom_ptr.size; + list.alignment = list.alignment.max(atom_ptr.alignment); + } +} + +pub fn allocate(list: *AtomList, elf_file: *Elf) !void { + const alloc_res = try elf_file.allocateChunk(.{ + .shndx = list.output_section_index, + .size = list.size, + .alignment = list.alignment, + .requires_padding = false, + }); + list.value = @intCast(alloc_res.value); + + const slice = elf_file.sections.slice(); + const shdr = &slice.items(.shdr)[list.output_section_index]; + const last_atom_ref = &slice.items(.last_atom)[list.output_section_index]; + + const expand_section = if (elf_file.atom(alloc_res.placement)) |placement_atom| + placement_atom.nextAtom(elf_file) == null + else + true; + if (expand_section) last_atom_ref.* = list.lastAtom(elf_file).ref(); + shdr.sh_addralign = @max(shdr.sh_addralign, list.alignment.toByteUnits().?); + + // FIXME:JK this currently ignores Thunks as valid chunks. + { + var idx: usize = 0; + while (idx < list.atoms.items.len) : (idx += 1) { + const curr_atom_ptr = elf_file.atom(list.atoms.items[idx]).?; + if (idx > 0) { + curr_atom_ptr.prev_atom_ref = list.atoms.items[idx - 1]; + } + if (idx + 1 < list.atoms.items.len) { + curr_atom_ptr.next_atom_ref = list.atoms.items[idx + 1]; + } + } + } + + if (elf_file.atom(alloc_res.placement)) |placement_atom| { + list.firstAtom(elf_file).prev_atom_ref = placement_atom.ref(); + list.lastAtom(elf_file).next_atom_ref = placement_atom.next_atom_ref; + placement_atom.next_atom_ref = list.firstAtom(elf_file).ref(); + } + + // FIXME:JK if we had a link from Atom to parent AtomList we would not need to update Atom's value or osec index + for (list.atoms.items) |ref| { + const atom_ptr = elf_file.atom(ref).?; + atom_ptr.output_section_index = list.output_section_index; + atom_ptr.value += list.value; + } +} + +pub fn write(list: AtomList, buffer: *std.ArrayList(u8), undefs: anytype, elf_file: *Elf) !void { + const gpa = elf_file.base.comp.gpa; + const osec = elf_file.sections.items(.shdr)[list.output_section_index]; + assert(osec.sh_type != elf.SHT_NOBITS); + + log.debug("writing atoms in section '{s}'", .{elf_file.getShString(osec.sh_name)}); + + try buffer.ensureUnusedCapacity(list.size); + buffer.appendNTimesAssumeCapacity(0, list.size); + + for (list.atoms.items) |ref| { + const atom_ptr = elf_file.atom(ref).?; + assert(atom_ptr.alive); + + const off = math.cast(usize, atom_ptr.value - list.value) orelse return error.Overflow; + const size = math.cast(usize, atom_ptr.size) orelse return error.Overflow; + + log.debug(" atom({}) at 0x{x}", .{ ref, list.offset(elf_file) + off }); + + const object = atom_ptr.file(elf_file).?.object; + const code = try object.codeDecompressAlloc(elf_file, ref.index); + defer gpa.free(code); + const out_code = buffer.items[off..][0..size]; + @memcpy(out_code, code); + + if (osec.sh_flags & elf.SHF_ALLOC == 0) + try atom_ptr.resolveRelocsNonAlloc(elf_file, out_code, undefs) + else + try atom_ptr.resolveRelocsAlloc(elf_file, out_code); + } + + try elf_file.base.file.?.pwriteAll(buffer.items, list.offset(elf_file)); + buffer.clearRetainingCapacity(); +} + +pub fn writeRelocatable(list: AtomList, buffer: *std.ArrayList(u8), elf_file: *Elf) !void { + const gpa = elf_file.base.comp.gpa; + const osec = elf_file.sections.items(.shdr)[list.output_section_index]; + assert(osec.sh_type != elf.SHT_NOBITS); + + log.debug("writing atoms in section '{s}'", .{elf_file.getShString(osec.sh_name)}); + + try buffer.ensureUnusedCapacity(list.size); + buffer.appendNTimesAssumeCapacity(0, list.size); + + for (list.atoms.items) |ref| { + const atom_ptr = elf_file.atom(ref).?; + assert(atom_ptr.alive); + + const off = math.cast(usize, atom_ptr.value - list.value) orelse return error.Overflow; + const size = math.cast(usize, atom_ptr.size) orelse return error.Overflow; + + log.debug(" atom({}) at 0x{x}", .{ ref, list.offset(elf_file) + off }); + + const object = atom_ptr.file(elf_file).?.object; + const code = try object.codeDecompressAlloc(elf_file, ref.index); + defer gpa.free(code); + const out_code = buffer.items[off..][0..size]; + @memcpy(out_code, code); + } + + try elf_file.base.file.?.pwriteAll(buffer.items, list.offset(elf_file)); + buffer.clearRetainingCapacity(); +} + +pub fn firstAtom(list: AtomList, elf_file: *Elf) *Atom { + assert(list.atoms.items.len > 0); + return elf_file.atom(list.atoms.items[0]).?; +} + +pub fn lastAtom(list: AtomList, elf_file: *Elf) *Atom { + assert(list.atoms.items.len > 0); + return elf_file.atom(list.atoms.items[list.atoms.items.len - 1]).?; +} + +pub fn format( + list: AtomList, + comptime unused_fmt_string: []const u8, + options: std.fmt.FormatOptions, + writer: anytype, +) !void { + _ = list; + _ = unused_fmt_string; + _ = options; + _ = writer; + @compileError("do not format AtomList directly"); +} + +const FormatCtx = struct { AtomList, *Elf }; + +pub fn fmt(list: AtomList, elf_file: *Elf) std.fmt.Formatter(format2) { + return .{ .data = .{ list, elf_file } }; +} + +fn format2( + ctx: FormatCtx, + comptime unused_fmt_string: []const u8, + options: std.fmt.FormatOptions, + writer: anytype, +) !void { + _ = unused_fmt_string; + _ = options; + const list, const elf_file = ctx; + try writer.print("list : @{x} : shdr({d}) : align({x}) : size({x})", .{ + list.address(elf_file), list.output_section_index, + list.alignment.toByteUnits() orelse 0, list.size, + }); + try writer.writeAll(" : atoms{ "); + for (list.atoms.items, 0..) |ref, i| { + try writer.print("{}", .{ref}); + if (i < list.atoms.items.len - 1) try writer.writeAll(", "); + } + try writer.writeAll(" }"); +} + +const assert = std.debug.assert; +const elf = std.elf; +const log = std.log.scoped(.link); +const math = std.math; +const std = @import("std"); + +const Allocator = std.mem.Allocator; +const Atom = @import("Atom.zig"); +const AtomList = @This(); +const Elf = @import("../Elf.zig"); +const Object = @import("Object.zig"); From 516955dbdb6cb1a54a00e94dff516d5bfbe6dfc4 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Wed, 4 Sep 2024 13:45:16 +0200 Subject: [PATCH 061/202] elf: add AtomList.zig to CMakeLists.txt --- CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 8348b45cfd41..781076ef6159 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -600,6 +600,7 @@ set(ZIG_STAGE2_SOURCES src/link/Elf.zig src/link/Elf/Archive.zig src/link/Elf/Atom.zig + src/link/Elf/AtomList.zig src/link/Elf/LdScript.zig src/link/Elf/LinkerDefined.zig src/link/Elf/Object.zig From e1d5bb365b3b8d645fbdfc4ffb6a14ef3bb0e766 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Wed, 4 Sep 2024 15:55:38 +0200 Subject: [PATCH 062/202] elf: fix 32bit build --- src/link/Elf/AtomList.zig | 10 ++++++---- src/link/Elf/eh_frame.zig | 2 +- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/src/link/Elf/AtomList.zig b/src/link/Elf/AtomList.zig index 890f1872b526..51407ca6d977 100644 --- a/src/link/Elf/AtomList.zig +++ b/src/link/Elf/AtomList.zig @@ -85,8 +85,9 @@ pub fn write(list: AtomList, buffer: *std.ArrayList(u8), undefs: anytype, elf_fi log.debug("writing atoms in section '{s}'", .{elf_file.getShString(osec.sh_name)}); - try buffer.ensureUnusedCapacity(list.size); - buffer.appendNTimesAssumeCapacity(0, list.size); + const list_size = math.cast(usize, list.size) orelse return error.Overflow; + try buffer.ensureUnusedCapacity(list_size); + buffer.appendNTimesAssumeCapacity(0, list_size); for (list.atoms.items) |ref| { const atom_ptr = elf_file.atom(ref).?; @@ -120,8 +121,9 @@ pub fn writeRelocatable(list: AtomList, buffer: *std.ArrayList(u8), elf_file: *E log.debug("writing atoms in section '{s}'", .{elf_file.getShString(osec.sh_name)}); - try buffer.ensureUnusedCapacity(list.size); - buffer.appendNTimesAssumeCapacity(0, list.size); + const list_size = math.cast(usize, list.size) orelse return error.Overflow; + try buffer.ensureUnusedCapacity(list_size); + buffer.appendNTimesAssumeCapacity(0, list_size); for (list.atoms.items) |ref| { const atom_ptr = elf_file.atom(ref).?; diff --git a/src/link/Elf/eh_frame.zig b/src/link/Elf/eh_frame.zig index a9e3e618cb48..b520c94aaf14 100644 --- a/src/link/Elf/eh_frame.zig +++ b/src/link/Elf/eh_frame.zig @@ -235,7 +235,7 @@ pub fn calcEhFrameSize(elf_file: *Elf) !usize { var offset: usize = if (elf_file.zigObjectPtr()) |zo| blk: { const sym = zo.symbol(zo.eh_frame_index orelse break :blk 0); - break :blk sym.atom(elf_file).?.size; + break :blk math.cast(usize, sym.atom(elf_file).?.size) orelse return error.Overflow; } else 0; var cies = std.ArrayList(Cie).init(gpa); From 19895834b9c00caff29ad8b9173d1845314104c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Thu, 5 Sep 2024 01:12:53 +0200 Subject: [PATCH 063/202] compiler: Force ELFv2 for powerpc64. LLD does not support ELFv1. By forcing ELFv2, we can at least build working binaries for triples like `powerpc64-linux-none`. --- src/target.zig | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/target.zig b/src/target.zig index bc2ba4c8316f..221c2029baf9 100644 --- a/src/target.zig +++ b/src/target.zig @@ -381,6 +381,14 @@ pub fn addrSpaceCastIsValid( } pub fn llvmMachineAbi(target: std.Target) ?[:0]const u8 { + // LLD does not support ELFv1. Rather than having LLVM produce ELFv1 code and then linking it + // into a broken ELFv2 binary, just force LLVM to use ELFv2 as well. This will break when glibc + // is linked as glibc only supports ELFv2 for little endian, but there's nothing we can do about + // that. With this hack, `powerpc64-linux-none` will at least work. + // + // Once our self-hosted linker can handle both ABIs, this hack should go away. + if (target.cpu.arch == .powerpc64) return "elfv2"; + const have_float = switch (target.abi) { .gnueabihf, .musleabihf, .eabihf => true, else => false, @@ -409,7 +417,6 @@ pub fn llvmMachineAbi(target: std.Target) ?[:0]const u8 { return "ilp32"; } }, - //TODO add ARM, Mips, and PowerPC else => return null, } } From 2a6eecff3e218015bb425737dabc27760f29fb2d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Thu, 5 Sep 2024 01:14:03 +0200 Subject: [PATCH 064/202] test: Disable `reinterpret packed union` for powerpc64 too. https://github.com/ziglang/zig/issues/21050 --- test/behavior/union.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/behavior/union.zig b/test/behavior/union.zig index a952e9b9d31a..9938c3c04578 100644 --- a/test/behavior/union.zig +++ b/test/behavior/union.zig @@ -1891,7 +1891,7 @@ test "reinterpret packed union" { try comptime S.doTheTest(); if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO - if (builtin.cpu.arch.isPowerPC32()) return error.SkipZigTest; // TODO + if (builtin.cpu.arch.isPowerPC()) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21050 if (builtin.cpu.arch.isMIPS()) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21050 if (builtin.cpu.arch.isWasm()) return error.SkipZigTest; // TODO try S.doTheTest(); From 40ee6825170ed52ebd707262d1a88f237de54d6f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Thu, 5 Sep 2024 01:14:38 +0200 Subject: [PATCH 065/202] test: Disable `store vector with memset` on powerpc64. This will be re-enabled with LLVM 19. --- test/behavior/vector.zig | 1 + 1 file changed, 1 insertion(+) diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig index 08c81fd28bb0..cee9f168f193 100644 --- a/test/behavior/vector.zig +++ b/test/behavior/vector.zig @@ -1450,6 +1450,7 @@ test "store vector with memset" { .mips64el, .riscv64, .powerpc, + .powerpc64, => { // LLVM 16 ERROR: "Converting bits to bytes lost precision" // https://github.com/ziglang/zig/issues/16177 From ee3efe8007402c496a6e589fc279e07578e8280f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Thu, 5 Sep 2024 01:15:04 +0200 Subject: [PATCH 066/202] test: Add `powerpc64-linux-(none,musl)` triples to module tests. --- test/tests.zig | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/test/tests.zig b/test/tests.zig index 73934950db34..7d5be7bbf74c 100644 --- a/test/tests.zig +++ b/test/tests.zig @@ -438,6 +438,30 @@ const test_targets = blk: { // .link_libc = true, //}, + .{ + .target = .{ + .cpu_arch = .powerpc64, + .os_tag = .linux, + .abi = .none, + }, + }, + .{ + .target = .{ + .cpu_arch = .powerpc64, + .os_tag = .linux, + .abi = .musl, + }, + .link_libc = true, + }, + // Requires ELFv1 linker support. + // .{ + // .target = .{ + // .cpu_arch = .powerpc64, + // .os_tag = .linux, + // .abi = .gnu, + // }, + // .link_libc = true, + // }, .{ .target = .{ .cpu_arch = .powerpc64le, From c852992c7eb07907091a35b2ef4cc87cf9651c6e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Sat, 24 Aug 2024 05:14:55 +0200 Subject: [PATCH 067/202] glibc: Also pass `-Wno-unsupported-floating-point-opt` when building libc_nonshared.a. Apparently 3fb6e46f6e4231b9569193a15a4357a2ae11fb0f wasn't enough. --- src/glibc.zig | 1 + 1 file changed, 1 insertion(+) diff --git a/src/glibc.zig b/src/glibc.zig index ee1b208cdd49..e34e12119e9d 100644 --- a/src/glibc.zig +++ b/src/glibc.zig @@ -369,6 +369,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile, prog_node: std.Progre "-fgnu89-inline", "-fmerge-all-constants", "-frounding-math", + "-Wno-unsupported-floating-point-opt", // For targets that don't support -frounding-math. "-fno-stack-protector", "-fno-common", "-fmath-errno", From e492e7232fa03d5921ec5f024b0f2a7a87956bfb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Sat, 24 Aug 2024 05:05:31 +0200 Subject: [PATCH 068/202] glibc: Set asm and include paths for s390x. --- src/glibc.zig | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/glibc.zig b/src/glibc.zig index e34e12119e9d..926a5e297229 100644 --- a/src/glibc.zig +++ b/src/glibc.zig @@ -468,6 +468,8 @@ fn start_asm_path(comp: *Compilation, arena: Allocator, basename: []const u8) ![ } else { try result.appendSlice("powerpc" ++ s ++ "powerpc32"); } + } else if (arch == .s390x) { + try result.appendSlice("s390" ++ s ++ "s390-64"); } else if (arch.isLoongArch()) { try result.appendSlice("loongarch"); } @@ -658,6 +660,16 @@ fn add_include_dirs_arch( try args.append("-I"); try args.append(try path.join(arena, &[_][]const u8{ dir, "riscv" })); } + } else if (arch == .s390x) { + if (opt_nptl) |nptl| { + try args.append("-I"); + try args.append(try path.join(arena, &[_][]const u8{ dir, "s390", nptl })); + } else { + try args.append("-I"); + try args.append(try path.join(arena, &[_][]const u8{ dir, "s390" ++ s ++ "s390-64" })); + try args.append("-I"); + try args.append(try path.join(arena, &[_][]const u8{ dir, "s390" })); + } } else if (arch.isLoongArch()) { try args.append("-I"); try args.append(try path.join(arena, &[_][]const u8{ dir, "loongarch" })); From 0bbfa199e9728df1a05a9da97aca3eee246ffc47 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Sat, 24 Aug 2024 15:41:41 +0200 Subject: [PATCH 069/202] glibc: Add include path for gnux32. --- src/glibc.zig | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/glibc.zig b/src/glibc.zig index 926a5e297229..9e29fe1b67fe 100644 --- a/src/glibc.zig +++ b/src/glibc.zig @@ -572,6 +572,10 @@ fn add_include_dirs_arch( try args.append("-I"); try args.append(try path.join(arena, &[_][]const u8{ dir, "x86_64", nptl })); } else { + if (target.abi == .gnux32) { + try args.append("-I"); + try args.append(try path.join(arena, &[_][]const u8{ dir, "x86_64", "x32" })); + } try args.append("-I"); try args.append(try path.join(arena, &[_][]const u8{ dir, "x86_64" })); } From 9eb66ab3fb796daf92bd38d548aadf44cceaa592 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Sat, 24 Aug 2024 15:50:47 +0200 Subject: [PATCH 070/202] glibc: Set asm and include paths for m68k. --- src/glibc.zig | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/src/glibc.zig b/src/glibc.zig index 9e29fe1b67fe..01c7b2221461 100644 --- a/src/glibc.zig +++ b/src/glibc.zig @@ -472,6 +472,8 @@ fn start_asm_path(comp: *Compilation, arena: Allocator, basename: []const u8) ![ try result.appendSlice("s390" ++ s ++ "s390-64"); } else if (arch.isLoongArch()) { try result.appendSlice("loongarch"); + } else if (arch == .m68k) { + try result.appendSlice("m68k"); } try result.appendSlice(s); @@ -677,6 +679,17 @@ fn add_include_dirs_arch( } else if (arch.isLoongArch()) { try args.append("-I"); try args.append(try path.join(arena, &[_][]const u8{ dir, "loongarch" })); + } else if (arch == .m68k) { + if (opt_nptl) |nptl| { + try args.append("-I"); + try args.append(try path.join(arena, &[_][]const u8{ dir, "m68k", nptl })); + } else { + // coldfire ABI support requires: https://github.com/ziglang/zig/issues/20690 + try args.append("-I"); + try args.append(try path.join(arena, &[_][]const u8{ dir, "m68k" ++ s ++ "m680x0" })); + try args.append("-I"); + try args.append(try path.join(arena, &[_][]const u8{ dir, "m68k" })); + } } } From 06945d5eb7db3cb95adbf2abb5e5ae39f47e5c8f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Sat, 24 Aug 2024 17:09:13 +0200 Subject: [PATCH 071/202] glibc: Add arc start files. --- lib/libc/glibc/sysdeps/arc/bits/endianness.h | 15 ++++ lib/libc/glibc/sysdeps/arc/entry.h | 5 ++ lib/libc/glibc/sysdeps/arc/start-2.33.S | 74 ++++++++++++++++ lib/libc/glibc/sysdeps/arc/start.S | 90 ++++++++++++++++++++ 4 files changed, 184 insertions(+) create mode 100644 lib/libc/glibc/sysdeps/arc/bits/endianness.h create mode 100644 lib/libc/glibc/sysdeps/arc/entry.h create mode 100644 lib/libc/glibc/sysdeps/arc/start-2.33.S create mode 100644 lib/libc/glibc/sysdeps/arc/start.S diff --git a/lib/libc/glibc/sysdeps/arc/bits/endianness.h b/lib/libc/glibc/sysdeps/arc/bits/endianness.h new file mode 100644 index 000000000000..8f17ca84b485 --- /dev/null +++ b/lib/libc/glibc/sysdeps/arc/bits/endianness.h @@ -0,0 +1,15 @@ +#ifndef _BITS_ENDIANNESS_H +#define _BITS_ENDIANNESS_H 1 + +#ifndef _BITS_ENDIAN_H +# error "Never use directly; include instead." +#endif + +/* ARC has selectable endianness. */ +#ifdef __BIG_ENDIAN__ +# define __BYTE_ORDER __BIG_ENDIAN +#else +# define __BYTE_ORDER __LITTLE_ENDIAN +#endif + +#endif /* bits/endianness.h */ diff --git a/lib/libc/glibc/sysdeps/arc/entry.h b/lib/libc/glibc/sysdeps/arc/entry.h new file mode 100644 index 000000000000..adb01d981afd --- /dev/null +++ b/lib/libc/glibc/sysdeps/arc/entry.h @@ -0,0 +1,5 @@ +#ifndef __ASSEMBLY__ +extern void __start (void) attribute_hidden; +#endif + +#define ENTRY_POINT __start diff --git a/lib/libc/glibc/sysdeps/arc/start-2.33.S b/lib/libc/glibc/sysdeps/arc/start-2.33.S new file mode 100644 index 000000000000..dbec87e6bb78 --- /dev/null +++ b/lib/libc/glibc/sysdeps/arc/start-2.33.S @@ -0,0 +1,74 @@ +/* Startup code for ARC. + Copyright (C) 2020-2021 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + . */ + +#define __ASSEMBLY__ 1 +#include +#include + +#ifndef ENTRY_POINT +# error ENTRY_POINT needs to be defined for ARC +#endif + +/* When we enter this piece of code, the program stack looks like this: + argc argument counter (integer) + argv[0] program name (pointer) + argv[1...N] program args (pointers) + argv[argc-1] end of args (integer) + NULL + env[0...N] environment variables (pointers) + NULL. */ + +ENTRY (ENTRY_POINT) + + /* Needed to make gdb backtraces stop here. */ + .cfi_label .Ldummy + cfi_undefined (blink) + + mov fp, 0 + ld_s r1, [sp] /* argc. */ + + mov_s r5, r0 /* rltd_fini. */ + add_s r2, sp, 4 /* argv. */ + and sp, sp, -8 + mov r6, sp + + /* __libc_start_main (main, argc, argv, init, fini, rtld_fini, stack_end). */ + +#ifdef SHARED + ld r0, [pcl, @main@gotpc] + ld r3, [pcl, @__libc_csu_init@gotpc] + ld r4, [pcl, @__libc_csu_fini@gotpc] + bl __libc_start_main@plt +#else + mov_s r0, main + mov_s r3, __libc_csu_init + mov r4, __libc_csu_fini + bl __libc_start_main +#endif + + /* Should never get here. */ + flag 1 +END (ENTRY_POINT) + +/* Define a symbol for the first piece of initialized data. */ + .data + .globl __data_start +__data_start: + .long 0 + .weak data_start + data_start = __data_start diff --git a/lib/libc/glibc/sysdeps/arc/start.S b/lib/libc/glibc/sysdeps/arc/start.S new file mode 100644 index 000000000000..03ee8a0f2e66 --- /dev/null +++ b/lib/libc/glibc/sysdeps/arc/start.S @@ -0,0 +1,90 @@ +/* Startup code for ARC. + Copyright (C) 2020-2024 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + In addition to the permissions in the GNU Lesser General Public + License, the Free Software Foundation gives you unlimited + permission to link the compiled version of this file with other + programs, and to distribute those programs without any restriction + coming from the use of this file. (The GNU Lesser General Public + License restrictions do apply in other respects; for example, they + cover modification of the file, and distribution when not linked + into another program.) + + Note that people who make modified versions of this file are not + obligated to grant this special exception for their modified + versions; it is their choice whether to do so. The GNU Lesser + General Public License gives permission to release a modified + version without this exception; this exception also makes it + possible to release a modified version which carries forward this + exception. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#define __ASSEMBLY__ 1 +#include +#include + +#ifndef ENTRY_POINT +# error ENTRY_POINT needs to be defined for ARC +#endif + +/* When we enter this piece of code, the program stack looks like this: + argc argument counter (integer) + argv[0] program name (pointer) + argv[1...N] program args (pointers) + argv[argc-1] end of args (integer) + NULL + env[0...N] environment variables (pointers) + NULL. */ + +ENTRY (ENTRY_POINT) + + /* Needed to make gdb backtraces stop here. */ + .cfi_label .Ldummy + cfi_undefined (blink) + + mov fp, 0 + ld_s r1, [sp] /* argc. */ + + mov_s r5, r0 /* rltd_fini. */ + add_s r2, sp, 4 /* argv. */ + and sp, sp, -8 + mov r6, sp + + /* __libc_start_main (main, argc, argv, init, fini, rtld_fini, stack_end). */ + + mov_s r3, 0 /* Used to be init. */ + mov r4, 0 /* Used to be fini. */ + +#ifdef SHARED + ld r0, [pcl, @main@gotpc] + bl __libc_start_main@plt +#else + mov_s r0, main + bl __libc_start_main +#endif + + /* Should never get here. */ + flag 1 +END (ENTRY_POINT) + +/* Define a symbol for the first piece of initialized data. */ + .data + .globl __data_start +__data_start: + .long 0 + .weak data_start + data_start = __data_start From 747460025e9b4349139a310ee93a8c52145285fa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Sat, 24 Aug 2024 17:09:21 +0200 Subject: [PATCH 072/202] glibc: Add csky start files. --- .../glibc/sysdeps/csky/abiv2/start-2.33.S | 112 ++++++++++++++++++ lib/libc/glibc/sysdeps/csky/abiv2/start.S | 103 ++++++++++++++++ lib/libc/glibc/sysdeps/csky/bits/endianness.h | 14 +++ 3 files changed, 229 insertions(+) create mode 100644 lib/libc/glibc/sysdeps/csky/abiv2/start-2.33.S create mode 100644 lib/libc/glibc/sysdeps/csky/abiv2/start.S create mode 100644 lib/libc/glibc/sysdeps/csky/bits/endianness.h diff --git a/lib/libc/glibc/sysdeps/csky/abiv2/start-2.33.S b/lib/libc/glibc/sysdeps/csky/abiv2/start-2.33.S new file mode 100644 index 000000000000..d65e37e61b0e --- /dev/null +++ b/lib/libc/glibc/sysdeps/csky/abiv2/start-2.33.S @@ -0,0 +1,112 @@ +/* Startup code compliant to the ELF C-SKY ABIV2. + Copyright (C) 2018-2021 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + In addition to the permissions in the GNU Lesser General Public + License, the Free Software Foundation gives you unlimited + permission to link the compiled version of this file with other + programs, and to distribute those programs without any restriction + coming from the use of this file. (The GNU Lesser General Public + License restrictions do apply in other respects; for example, they + cover modification of the file, and distribution when not linked + into another program.) + + Note that people who make modified versions of this file are not + obligated to grant this special exception for their modified + versions; it is their choice whether to do so. The GNU Lesser + General Public License gives permission to release a modified + version without this exception; this exception also makes it + possible to release a modified version which carries forward this + exception. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + . */ + +/* We need to call: + __libc_start_main (int (*main) (int, char **, char **), int argc, + char **argv, void (*init) (void), void (*fini) (void), + void (*rtld_fini) (void), void *stack_end) + */ + +#include + + .text + .globl _start; + .type _start,@function; + .align 4; +_start: + cfi_startproc + .cfi_label .Ldummy + cfi_undefined (lr) + subi sp, 8 + /* Clear the link register since this is the outermost frame. */ + movi lr, 0 + /* Pop argc off the stack and save a pointer to argv. */ + ldw a1, (sp, 8) /* Init argc for __libc_start_main. */ + addi a2, sp, 12 /* Init argv for __libc_start_main. */ + + /* Push stack limit. */ + stw a2, (sp, 8) + /* Push rtld_fini. */ + stw a0, (sp, 4) + +#ifdef SHARED + grs t0, .Lgetpc +.Lgetpc: + lrw gb, .Lgetpc@GOTPC + addu gb, t0 + lrw a3, __libc_csu_fini@GOT + ldr.w a3, (gb, a3 << 0) + stw a3, (sp, 0) + + lrw a3, __libc_csu_init@GOT + addu a3, gb + ldw a3, (a3, 0) + + lrw t0, main@GOT + addu t0, gb + ldw a0, (t0, 0) + lrw t1, __libc_start_main@PLT + ldr.w t1, (gb, t1 << 0) + jsr t1 + + lrw t1, abort@PLT + ldr.w t1, (gb, t1 << 0) + jsr t1 +#else + /* Fetch address of __libc_csu_fini. */ + lrw a0, __libc_csu_fini + /* Push __libc_csu_fini */ + stw a0, (sp, 0) + + /* Set up the other arguments in registers. */ + lrw a0, main + lrw a3, __libc_csu_init + /* Let the libc call main and exit with its return code. */ + jsri __libc_start_main + + /* Should never get here. */ + jsri abort +#endif /* !SHARED */ + cfi_endproc + .size _start,.-_start + + + /* Define a symbol for the first piece of initialized data. */ + .data + .globl __data_start +__data_start: + .long 0 + .weak data_start + data_start = __data_start diff --git a/lib/libc/glibc/sysdeps/csky/abiv2/start.S b/lib/libc/glibc/sysdeps/csky/abiv2/start.S new file mode 100644 index 000000000000..4c03a5533558 --- /dev/null +++ b/lib/libc/glibc/sysdeps/csky/abiv2/start.S @@ -0,0 +1,103 @@ +/* Startup code compliant to the ELF C-SKY ABIV2. + Copyright (C) 2018-2024 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + In addition to the permissions in the GNU Lesser General Public + License, the Free Software Foundation gives you unlimited + permission to link the compiled version of this file with other + programs, and to distribute those programs without any restriction + coming from the use of this file. (The GNU Lesser General Public + License restrictions do apply in other respects; for example, they + cover modification of the file, and distribution when not linked + into another program.) + + Note that people who make modified versions of this file are not + obligated to grant this special exception for their modified + versions; it is their choice whether to do so. The GNU Lesser + General Public License gives permission to release a modified + version without this exception; this exception also makes it + possible to release a modified version which carries forward this + exception. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + . */ + +/* We need to call: + __libc_start_main (int (*main) (int, char **, char **), int argc, + char **argv, void (*init) (void), void (*fini) (void), + void (*rtld_fini) (void), void *stack_end) + */ + +#include + + .text + .globl _start; + .type _start,@function; + .align 4; +_start: + cfi_startproc + .cfi_label .Ldummy + cfi_undefined (lr) + subi sp, 8 + /* Clear the link register since this is the outermost frame. */ + movi lr, 0 + /* Pop argc off the stack and save a pointer to argv. */ + ldw a1, (sp, 8) /* Init argc for __libc_start_main. */ + addi a2, sp, 12 /* Init argv for __libc_start_main. */ + + /* Push stack limit. */ + stw a2, (sp, 8) + /* Push rtld_fini. */ + stw a0, (sp, 4) + +#ifdef SHARED + grs t0, .Lgetpc +.Lgetpc: + lrw gb, .Lgetpc@GOTPC + addu gb, t0 + + movi a3, 0 /* Used to be init. */ + stw a3, (sp, 0) /* Used to be fini. */ + + lrw t0, main@GOT + addu t0, gb + ldw a0, (t0, 0) + lrw t1, __libc_start_main@PLT + ldr.w t1, (gb, t1 << 0) + jsr t1 + + lrw t1, abort@PLT + ldr.w t1, (gb, t1 << 0) + jsr t1 +#else + movi a3, 0 /* Used to be init. */ + stw a3, (sp, 0) /* Used to be fini. */ + lrw a0, main + /* Let the libc call main and exit with its return code. */ + jsri __libc_start_main + + /* Should never get here. */ + jsri abort +#endif /* !SHARED */ + cfi_endproc + .size _start,.-_start + + + /* Define a symbol for the first piece of initialized data. */ + .data + .globl __data_start +__data_start: + .long 0 + .weak data_start + data_start = __data_start diff --git a/lib/libc/glibc/sysdeps/csky/bits/endianness.h b/lib/libc/glibc/sysdeps/csky/bits/endianness.h new file mode 100644 index 000000000000..ad20cf64e0bb --- /dev/null +++ b/lib/libc/glibc/sysdeps/csky/bits/endianness.h @@ -0,0 +1,14 @@ +#ifndef _BITS_ENDIANNESS_H +#define _BITS_ENDIANNESS_H 1 + +#ifndef _BITS_ENDIAN_H +# error "Never use directly; include instead." +#endif + +#ifdef __CSKYBE__ +# error "Big endian not supported for C-SKY." +#else +# define __BYTE_ORDER __LITTLE_ENDIAN +#endif + +#endif /* bits/endianness.h */ From 4b4fce3e8909cf803efed1f6cabfa14124ff786f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Sat, 24 Aug 2024 17:14:12 +0200 Subject: [PATCH 073/202] glibc: Set asm and include paths for arc. --- src/glibc.zig | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/glibc.zig b/src/glibc.zig index 01c7b2221461..600f77991975 100644 --- a/src/glibc.zig +++ b/src/glibc.zig @@ -474,6 +474,8 @@ fn start_asm_path(comp: *Compilation, arena: Allocator, basename: []const u8) ![ try result.appendSlice("loongarch"); } else if (arch == .m68k) { try result.appendSlice("m68k"); + } else if (arch == .arc) { + try result.appendSlice("arc"); } try result.appendSlice(s); @@ -690,6 +692,9 @@ fn add_include_dirs_arch( try args.append("-I"); try args.append(try path.join(arena, &[_][]const u8{ dir, "m68k" })); } + } else if (arch == .arc) { + try args.append("-I"); + try args.append(try path.join(arena, &[_][]const u8{ dir, "arc" })); } } From 1773a88ab1a0dca05e6f8a19f873efec9ecfa858 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Sat, 24 Aug 2024 17:15:56 +0200 Subject: [PATCH 074/202] glibc: Set asm and include paths for csky. --- src/glibc.zig | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/glibc.zig b/src/glibc.zig index 600f77991975..2849883e043d 100644 --- a/src/glibc.zig +++ b/src/glibc.zig @@ -476,6 +476,8 @@ fn start_asm_path(comp: *Compilation, arena: Allocator, basename: []const u8) ![ try result.appendSlice("m68k"); } else if (arch == .arc) { try result.appendSlice("arc"); + } else if (arch == .csky) { + try result.appendSlice("csky" ++ s ++ "abiv2"); } try result.appendSlice(s); @@ -695,6 +697,9 @@ fn add_include_dirs_arch( } else if (arch == .arc) { try args.append("-I"); try args.append(try path.join(arena, &[_][]const u8{ dir, "arc" })); + } else if (arch == .csky) { + try args.append("-I"); + try args.append(try path.join(arena, &[_][]const u8{ dir, "csky" })); } } From 27c72c555a3e5cb170703e3820a1f0ce25fc3e2d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Sat, 24 Aug 2024 22:42:57 +0200 Subject: [PATCH 075/202] glibc: Fix an edge case leading to duplicate stub symbols. Closes #20376. Closes #21076. --- src/glibc.zig | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/src/glibc.zig b/src/glibc.zig index 2849883e043d..7042f9c102b9 100644 --- a/src/glibc.zig +++ b/src/glibc.zig @@ -854,6 +854,23 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) !voi var opt_symbol_name: ?[]const u8 = null; var versions_buffer: [32]u8 = undefined; var versions_len: usize = undefined; + + // There can be situations where there are multiple inclusions for the same symbol with + // partially overlapping versions, due to different target lists. For example: + // + // lgammal: + // library: libm.so + // versions: 2.4 2.23 + // targets: ... powerpc64-linux-gnu s390x-linux-gnu + // lgammal: + // library: libm.so + // versions: 2.2 2.23 + // targets: sparc64-linux-gnu s390x-linux-gnu + // + // If we don't handle this, we end up writing the default `lgammal` symbol for version 2.33 + // twice, which causes a "duplicate symbol" assembler error. + var versions_written = std.AutoArrayHashMap(Version, void).init(arena); + while (sym_i < fn_inclusions_len) : (sym_i += 1) { const sym_name = opt_symbol_name orelse n: { const name = mem.sliceTo(metadata.inclusions[inc_i..], 0); @@ -907,6 +924,10 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) !voi } } } + + versions_written.clearRetainingCapacity(); + try versions_written.ensureTotalCapacity(versions_len); + { var ver_buf_i: u8 = 0; while (ver_buf_i < versions_len) : (ver_buf_i += 1) { @@ -917,6 +938,9 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) !voi // _Exit_2_2_5: const ver_index = versions_buffer[ver_buf_i]; const ver = metadata.all_versions[ver_index]; + + if (versions_written.getOrPutAssumeCapacity(ver).found_existing) continue; + // Default symbol version definition vs normal symbol version definition const want_default = chosen_def_ver_index != 255 and ver_index == chosen_def_ver_index; const at_sign_str: []const u8 = if (want_default) "@@" else "@"; @@ -1066,6 +1090,10 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) !voi } } } + + versions_written.clearRetainingCapacity(); + try versions_written.ensureTotalCapacity(versions_len); + { var ver_buf_i: u8 = 0; while (ver_buf_i < versions_len) : (ver_buf_i += 1) { @@ -1077,6 +1105,9 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) !voi // environ_2_2_5: const ver_index = versions_buffer[ver_buf_i]; const ver = metadata.all_versions[ver_index]; + + if (versions_written.getOrPutAssumeCapacity(ver).found_existing) continue; + // Default symbol version definition vs normal symbol version definition const want_default = chosen_def_ver_index != 255 and ver_index == chosen_def_ver_index; const at_sign_str: []const u8 = if (want_default) "@@" else "@"; From 3543f283208951514c0fdb45b0dde7393e5e40c7 Mon Sep 17 00:00:00 2001 From: Ian Johnson Date: Thu, 5 Sep 2024 23:05:21 -0400 Subject: [PATCH 076/202] std.math.big.int: fix shiftRight sign handling Closes #21311 The sign of the result `r` needs to be initialized before the correction `r.addScalar(r.toConst(), -1)`, or the intended end result could be off by 2 (depending on the original sign of `r`). --- lib/std/math/big/int.zig | 2 +- lib/std/math/big/int_test.zig | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig index 2c656033e91b..2d38517661a8 100644 --- a/lib/std/math/big/int.zig +++ b/lib/std/math/big/int.zig @@ -1202,6 +1202,7 @@ pub const Mutable = struct { llshr(r.limbs[0..], a.limbs[0..a.limbs.len], shift); r.len = a.limbs.len - full_limbs_shifted_out; + r.positive = a.positive; if (nonzero_negative_shiftout) { if (full_limbs_shifted_out > 0) { r.limbs[a.limbs.len - full_limbs_shifted_out] = 0; @@ -1210,7 +1211,6 @@ pub const Mutable = struct { r.addScalar(r.toConst(), -1); } r.normalize(r.len); - r.positive = a.positive; } /// r = ~a under 2s complement wrapping semantics. diff --git a/lib/std/math/big/int_test.zig b/lib/std/math/big/int_test.zig index 17652179f599..2e0ccc96c17b 100644 --- a/lib/std/math/big/int_test.zig +++ b/lib/std/math/big/int_test.zig @@ -2083,6 +2083,15 @@ test "shift-right negative" { try a.shiftRight(&a, 1); a.setSign(true); try testing.expect(try a.to(u64) == 0x8000000000000000); + + var arg7 = try Managed.initSet(testing.allocator, -32767); + defer arg7.deinit(); + a.setSign(false); + try a.shiftRight(&arg7, 4); + try testing.expect(try a.to(i16) == -2048); + a.setSign(true); + try a.shiftRight(&arg7, 4); + try testing.expect(try a.to(i16) == -2048); } test "sat shift-left simple unsigned" { From 204107c499f78a3deca1b446e3e093384214b821 Mon Sep 17 00:00:00 2001 From: Travis Staloch <1562827+travisstaloch@users.noreply.github.com> Date: Thu, 5 Sep 2024 15:24:38 -0700 Subject: [PATCH 077/202] package-manager: add application/x-tar-gz mime type Closes #21314 This allows the package manger to download tar.gz bitbucket urls. --- src/Package/Fetch.zig | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Package/Fetch.zig b/src/Package/Fetch.zig index 2540cf992247..81f5cce819b7 100644 --- a/src/Package/Fetch.zig +++ b/src/Package/Fetch.zig @@ -1067,7 +1067,8 @@ fn unpackResource( if (ascii.eqlIgnoreCase(mime_type, "application/gzip") or ascii.eqlIgnoreCase(mime_type, "application/x-gzip") or - ascii.eqlIgnoreCase(mime_type, "application/tar+gzip")) + ascii.eqlIgnoreCase(mime_type, "application/tar+gzip") or + ascii.eqlIgnoreCase(mime_type, "application/x-tar-gz")) { break :ft .@"tar.gz"; } From b230e4f598bd18b47f3f1c981869c597a06c7452 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Thu, 5 Sep 2024 05:50:26 +0200 Subject: [PATCH 078/202] glibc: Patch to work around missing features in LLVM's s390x assembler. Revert this with LLVM 20. --- lib/libc/glibc/sysdeps/s390/s390-32/start-2.33.S | 3 ++- lib/libc/glibc/sysdeps/s390/s390-32/start.S | 3 ++- lib/libc/glibc/sysdeps/s390/s390-64/crti.S | 3 ++- lib/libc/glibc/sysdeps/s390/s390-64/start-2.33.S | 6 ++++-- lib/libc/glibc/sysdeps/s390/s390-64/start.S | 9 ++++++--- 5 files changed, 16 insertions(+), 8 deletions(-) diff --git a/lib/libc/glibc/sysdeps/s390/s390-32/start-2.33.S b/lib/libc/glibc/sysdeps/s390/s390-32/start-2.33.S index af9dccfb9d12..e60c298d8472 100644 --- a/lib/libc/glibc/sysdeps/s390/s390-32/start-2.33.S +++ b/lib/libc/glibc/sysdeps/s390/s390-32/start-2.33.S @@ -61,7 +61,8 @@ _start: cfi_startproc /* Mark r14 as undefined in order to stop unwinding here! */ - cfi_undefined (r14) + /* zig patch: r14 -> %r14. revert with llvm 20. */ + cfi_undefined (%r14) /* Check if the kernel provides highgprs facility if needed by the binary. */ diff --git a/lib/libc/glibc/sysdeps/s390/s390-32/start.S b/lib/libc/glibc/sysdeps/s390/s390-32/start.S index 24f3d85d5c78..b5d11ad9f764 100644 --- a/lib/libc/glibc/sysdeps/s390/s390-32/start.S +++ b/lib/libc/glibc/sysdeps/s390/s390-32/start.S @@ -60,7 +60,8 @@ _start: cfi_startproc /* Mark r14 as undefined in order to stop unwinding here! */ - cfi_undefined (r14) + /* zig patch: r14 -> %r14. revert with llvm 20. */ + cfi_undefined (%r14) /* Check if the kernel provides highgprs facility if needed by the binary. */ diff --git a/lib/libc/glibc/sysdeps/s390/s390-64/crti.S b/lib/libc/glibc/sysdeps/s390/s390-64/crti.S index 81ebb0454df3..6323b753beb8 100644 --- a/lib/libc/glibc/sysdeps/s390/s390-64/crti.S +++ b/lib/libc/glibc/sysdeps/s390/s390-64/crti.S @@ -67,7 +67,8 @@ _init: stg %r1,0(%r15) larl %r12,_GLOBAL_OFFSET_TABLE_ #if PREINIT_FUNCTION_WEAK - larl %r1,PREINIT_FUNCTION@GOTENT + /* zig patch: GOTENT -> GOT. revert with llvm 20. */ + larl %r1,PREINIT_FUNCTION@GOT lg %r1,0(%r1) ltgr %r1,%r1 je 1f diff --git a/lib/libc/glibc/sysdeps/s390/s390-64/start-2.33.S b/lib/libc/glibc/sysdeps/s390/s390-64/start-2.33.S index 02ed4aad5127..37503dd1dc87 100644 --- a/lib/libc/glibc/sysdeps/s390/s390-64/start-2.33.S +++ b/lib/libc/glibc/sysdeps/s390/s390-64/start-2.33.S @@ -61,7 +61,8 @@ _start: cfi_startproc /* Mark r14 as undefined in order to stop unwinding here! */ - cfi_undefined (r14) + /* zig patch: r14 -> %r14. revert with llvm 20. */ + cfi_undefined (%r14) /* Load argc and argv from stack. */ la %r4,8(%r15) # get argv lg %r3,0(%r15) # get argc @@ -85,7 +86,8 @@ _start: /* Ok, now branch to the libc main routine. */ #ifdef PIC - larl %r2,main@GOTENT # load pointer to main + /* zig patch: GOTENT -> GOT. revert with llvm 20. */ + larl %r2,main@GOT # load pointer to main lg %r2,0(%r2) brasl %r14,__libc_start_main@plt #else diff --git a/lib/libc/glibc/sysdeps/s390/s390-64/start.S b/lib/libc/glibc/sysdeps/s390/s390-64/start.S index 59eeb7e99822..6e9d66be7943 100644 --- a/lib/libc/glibc/sysdeps/s390/s390-64/start.S +++ b/lib/libc/glibc/sysdeps/s390/s390-64/start.S @@ -60,7 +60,8 @@ _start: cfi_startproc /* Mark r14 as undefined in order to stop unwinding here! */ - cfi_undefined (r14) + /* zig patch: r14 -> %r14. revert with llvm 20. */ + cfi_undefined (%r14) /* Load argc and argv from stack. */ la %r4,8(%r15) # get argv lg %r3,0(%r15) # get argc @@ -87,7 +88,8 @@ _start: # ifdef SHARED /* Used for dynamic linked position independent executable. => Scrt1.o */ - larl %r2,main@GOTENT # load pointer to main + /* zig patch: GOTENT -> GOT. revert with llvm 20. */ + larl %r2,main@GOT # load pointer to main lg %r2,0(%r2) # else /* Used for dynamic linked position dependent executable. @@ -119,7 +121,8 @@ _start: use of GOT relocations before __libc_start_main is called. */ __wrap_main: cfi_startproc - larl %r1,main@GOTENT # load pointer to main + /* zig patch: GOTENT -> GOT. revert with llvm 20. */ + larl %r1,main@GOT # load pointer to main lg %r1,0(%r1) br %r1 cfi_endproc From 804319799586961052a9e4f283ad94d5018ce5fa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Sat, 31 Aug 2024 03:24:55 +0200 Subject: [PATCH 079/202] std.os.linux: Add clock_nanosleep() syscall wrapper. --- lib/std/os/linux.zig | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig index 90298bc7cd00..9039acedee9e 100644 --- a/lib/std/os/linux.zig +++ b/lib/std/os/linux.zig @@ -1456,6 +1456,16 @@ pub fn clock_settime(clk_id: i32, tp: *const timespec) usize { return syscall2(.clock_settime, @as(usize, @bitCast(@as(isize, clk_id))), @intFromPtr(tp)); } +pub fn clock_nanosleep(clockid: clockid_t, flags: TIMER, request: *const timespec, remain: ?*timespec) usize { + return syscall4( + .clock_nanosleep, + @intFromEnum(clockid), + @as(u32, @bitCast(flags)), + @intFromPtr(request), + @intFromPtr(remain), + ); +} + pub fn gettimeofday(tv: ?*timeval, tz: ?*timezone) usize { return syscall2(.gettimeofday, @intFromPtr(tv), @intFromPtr(tz)); } @@ -4527,6 +4537,11 @@ pub const clockid_t = enum(u32) { _, }; +pub const TIMER = packed struct(u32) { + ABSTIME: bool, + _: u31 = 0, +}; + pub const CSIGNAL = 0x000000ff; pub const CLONE = struct { From f35015575ef7ef5b493cf217f5e9c7152d6b6658 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Sat, 31 Aug 2024 03:28:50 +0200 Subject: [PATCH 080/202] std.time: Use clock_nanosleep() to implement sleep() on Linux. This fixes the function for riscv32 where the old nanosleep() is not available. clock_nanosleep() has been available since Linux 2.6 and glibc 2.1 anyway. --- lib/std/time.zig | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/lib/std/time.zig b/lib/std/time.zig index 34e544a28b19..a80a2477acc7 100644 --- a/lib/std/time.zig +++ b/lib/std/time.zig @@ -50,6 +50,34 @@ pub fn sleep(nanoseconds: u64) void { const s = nanoseconds / ns_per_s; const ns = nanoseconds % ns_per_s; + + // Newer kernel ports don't have old `nanosleep()` and `clock_nanosleep()` has been around + // since Linux 2.6 and glibc 2.1 anyway. + if (builtin.os.tag == .linux) { + const linux = std.os.linux; + + var req: linux.timespec = .{ + .sec = std.math.cast(linux.time_t, s) orelse std.math.maxInt(linux.time_t), + .nsec = std.math.cast(linux.time_t, ns) orelse std.math.maxInt(linux.time_t), + }; + var rem: linux.timespec = undefined; + + while (true) { + switch (linux.E.init(linux.clock_nanosleep(.MONOTONIC, .{ .ABSTIME = false }, &req, &rem))) { + .SUCCESS => return, + .INTR => { + req = rem; + continue; + }, + .FAULT, + .INVAL, + .OPNOTSUPP, + => unreachable, + else => return, + } + } + } + posix.nanosleep(s, ns); } From 65a6e9eee5f0c59622e97c6fd0f283bf2594d6c5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Sat, 31 Aug 2024 03:28:28 +0200 Subject: [PATCH 081/202] std.posix: Skip a couple of tests that use fstat()/fstatat() on riscv32. --- lib/std/posix/test.zig | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/std/posix/test.zig b/lib/std/posix/test.zig index 474de28f6dec..e62ffa39ca54 100644 --- a/lib/std/posix/test.zig +++ b/lib/std/posix/test.zig @@ -349,6 +349,7 @@ test "linkat with different directories" { } test "fstatat" { + if (builtin.cpu.arch == .riscv32 and builtin.os.tag == .linux and !builtin.link_libc) return error.SkipZigTest; // No `fstatat()`. // enable when `fstat` and `fstatat` are implemented on Windows if (native_os == .windows) return error.SkipZigTest; @@ -1264,6 +1265,9 @@ test "fchmodat smoke test" { 0o644, ); posix.close(fd); + + if (builtin.cpu.arch == .riscv32 and builtin.os.tag == .linux and !builtin.link_libc) return error.SkipZigTest; // No `fstatat()`. + try posix.symlinkat("regfile", tmp.dir.fd, "symlink"); const sym_mode = blk: { const st = try posix.fstatat(tmp.dir.fd, "symlink", posix.AT.SYMLINK_NOFOLLOW); From a0205fff98f1e3df24d28b78b86d6e8f385d350f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Sat, 31 Aug 2024 03:24:19 +0200 Subject: [PATCH 082/202] std.DynLib: Prefer std.fs.File.stat() over posix.fstat(). This is necessary for riscv32-linux. --- lib/std/dynamic_library.zig | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/std/dynamic_library.zig b/lib/std/dynamic_library.zig index 110393d91a45..7fd231aba7fb 100644 --- a/lib/std/dynamic_library.zig +++ b/lib/std/dynamic_library.zig @@ -215,7 +215,8 @@ pub const ElfDynLib = struct { const fd = try resolveFromName(path); defer posix.close(fd); - const stat = try posix.fstat(fd); + const file: std.fs.File = .{ .handle = fd }; + const stat = try file.stat(); const size = std.math.cast(usize, stat.size) orelse return error.FileTooBig; // This one is to read the ELF info. We do more mmapping later From ae10adb6ef7182352c6176a135e181ba70c81212 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Sat, 31 Aug 2024 03:29:46 +0200 Subject: [PATCH 083/202] llvm: Don't lower to f16 for riscv32. This causes so many test failures that I doubt this has been tested at all. --- src/codegen/llvm.zig | 1 + 1 file changed, 1 insertion(+) diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index dc8996afda08..69e32ad2d257 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -11990,6 +11990,7 @@ fn backendSupportsF16(target: std.Target) bool { .mipsel, .mips64, .mips64el, + .riscv32, .s390x, => false, .aarch64, From c1a70acc916b7278a3f7702d6712b20ecbbb62a4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Sun, 25 Aug 2024 00:53:24 +0200 Subject: [PATCH 084/202] std.zig.target: Split `mips(el)-linux-musl` triples into `mips(el)-linux-musleabi(hf)`. Closes #21184. --- lib/std/zig/target.zig | 6 ++++-- test/tests.zig | 6 +++--- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/lib/std/zig/target.zig b/lib/std/zig/target.zig index d0d68acd70fd..f28904a65c1f 100644 --- a/lib/std/zig/target.zig +++ b/lib/std/zig/target.zig @@ -46,10 +46,12 @@ pub const available_libcs = [_]ArchOsAbi{ .{ .arch = .mips64, .os = .linux, .abi = .musl }, .{ .arch = .mipsel, .os = .linux, .abi = .gnueabi }, .{ .arch = .mipsel, .os = .linux, .abi = .gnueabihf }, - .{ .arch = .mipsel, .os = .linux, .abi = .musl }, + .{ .arch = .mipsel, .os = .linux, .abi = .musleabi }, + .{ .arch = .mipsel, .os = .linux, .abi = .musleabihf }, .{ .arch = .mips, .os = .linux, .abi = .gnueabi }, .{ .arch = .mips, .os = .linux, .abi = .gnueabihf }, - .{ .arch = .mips, .os = .linux, .abi = .musl }, + .{ .arch = .mips, .os = .linux, .abi = .musleabi }, + .{ .arch = .mips, .os = .linux, .abi = .musleabihf }, .{ .arch = .powerpc64le, .os = .linux, .abi = .gnu, .glibc_min = .{ .major = 2, .minor = 19, .patch = 0 } }, .{ .arch = .powerpc64le, .os = .linux, .abi = .musl }, .{ .arch = .powerpc64, .os = .linux, .abi = .gnu }, diff --git a/test/tests.zig b/test/tests.zig index 7d5be7bbf74c..6733c52de614 100644 --- a/test/tests.zig +++ b/test/tests.zig @@ -323,7 +323,7 @@ const test_targets = blk: { .target = .{ .cpu_arch = .mips, .os_tag = .linux, - .abi = .musl, + .abi = .musleabihf, }, .link_libc = true, .slow_backend = true, @@ -350,7 +350,7 @@ const test_targets = blk: { .target = .{ .cpu_arch = .mipsel, .os_tag = .linux, - .abi = .musl, + .abi = .musleabihf, }, .link_libc = true, .slow_backend = true, @@ -661,7 +661,7 @@ const c_abi_targets = [_]CAbiTarget{ .target = .{ .cpu_arch = .mips, .os_tag = .linux, - .abi = .musl, + .abi = .musleabihf, }, }, .{ From 17f54e827499058e281abeab5d6634bf415c3580 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Sun, 25 Aug 2024 00:54:06 +0200 Subject: [PATCH 085/202] std.zig.target: Split `powerpc-linux-musl` triple into `powerpc-linux-musleabi(hf)`. --- lib/std/zig/target.zig | 3 ++- test/tests.zig | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/lib/std/zig/target.zig b/lib/std/zig/target.zig index f28904a65c1f..b60586da8519 100644 --- a/lib/std/zig/target.zig +++ b/lib/std/zig/target.zig @@ -58,7 +58,8 @@ pub const available_libcs = [_]ArchOsAbi{ .{ .arch = .powerpc64, .os = .linux, .abi = .musl }, .{ .arch = .powerpc, .os = .linux, .abi = .gnueabi }, .{ .arch = .powerpc, .os = .linux, .abi = .gnueabihf }, - .{ .arch = .powerpc, .os = .linux, .abi = .musl }, + .{ .arch = .powerpc, .os = .linux, .abi = .musleabi }, + .{ .arch = .powerpc, .os = .linux, .abi = .musleabihf }, .{ .arch = .riscv32, .os = .linux, .abi = .gnu, .glibc_min = .{ .major = 2, .minor = 33, .patch = 0 } }, .{ .arch = .riscv32, .os = .linux, .abi = .musl }, .{ .arch = .riscv64, .os = .linux, .abi = .gnu, .glibc_min = .{ .major = 2, .minor = 27, .patch = 0 } }, diff --git a/test/tests.zig b/test/tests.zig index 6733c52de614..b87113c12dc2 100644 --- a/test/tests.zig +++ b/test/tests.zig @@ -424,7 +424,7 @@ const test_targets = blk: { .target = .{ .cpu_arch = .powerpc, .os_tag = .linux, - .abi = .musl, + .abi = .musleabihf, }, .link_libc = true, }, @@ -682,7 +682,7 @@ const c_abi_targets = [_]CAbiTarget{ .target = .{ .cpu_arch = .powerpc, .os_tag = .linux, - .abi = .musl, + .abi = .musleabihf, }, }, .{ From af370a69cd16305e2cd14b584187e84e30bd065f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Sun, 25 Aug 2024 00:44:35 +0200 Subject: [PATCH 086/202] std.Target: Make Abi.floatAbi() more accurate. Also rename Target.getFloatAbi() to floatAbi(). --- lib/std/Target.zig | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/lib/std/Target.zig b/lib/std/Target.zig index d215e29fc88e..60f75e25fad0 100644 --- a/lib/std/Target.zig +++ b/lib/std/Target.zig @@ -765,12 +765,13 @@ pub const Abi = enum { pub inline fn floatAbi(abi: Abi) FloatAbi { return switch (abi) { - .gnueabihf, - .eabihf, - .musleabihf, - => .hard, - .ohos => .soft, - else => .soft, + .eabi, + .gnueabi, + .musleabi, + .gnusf, + .ohos, + => .soft, + else => .hard, }; } }; @@ -1645,7 +1646,7 @@ pub const FloatAbi = enum { soft, }; -pub inline fn getFloatAbi(target: Target) FloatAbi { +pub inline fn floatAbi(target: Target) FloatAbi { return target.abi.floatAbi(); } From 92517fbd625b1f6d6162ce2875ef167701b99733 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Sun, 25 Aug 2024 00:45:38 +0200 Subject: [PATCH 087/202] llvm: Set float ABI based on std.Target.floatAbi(). --- src/codegen/llvm.zig | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index a46d875b342b..59df12df610d 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1284,8 +1284,7 @@ pub const Object = struct { .large => .Large, }; - // TODO handle float ABI better- it should depend on the ABI portion of std.Target - const float_abi: llvm.ABIType = .Default; + const float_abi: llvm.ABIType = if (comp.root_mod.resolved_target.result.floatAbi() == .hard) .Hard else .Soft; var target_machine = llvm.TargetMachine.create( target, From 5c128a899057aadbb175c5a22ec840ee2276b0d7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Thu, 5 Sep 2024 09:37:29 +0200 Subject: [PATCH 088/202] test: Re-enable `vector shift operators` for all LLVM targets. Closes #4951. --- test/behavior/vector.zig | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig index cee9f168f193..76134255e26e 100644 --- a/test/behavior/vector.zig +++ b/test/behavior/vector.zig @@ -743,23 +743,6 @@ test "vector shift operators" { } }; - switch (builtin.target.cpu.arch) { - .aarch64_be, - .armeb, - .thumb, - .thumbeb, - .mips, - .mips64, - .mips64el, - .sparc64, - => { - // LLVM miscompiles on this architecture - // https://github.com/ziglang/zig/issues/4951 - return error.SkipZigTest; - }, - else => {}, - } - try S.doTheTest(); try comptime S.doTheTest(); } From ccf852c31e04e17cfe3b5c017e37479de3439c19 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Thu, 5 Sep 2024 09:42:28 +0200 Subject: [PATCH 089/202] test: Re-enable `vector reduce operation` for most LLVM targets. mips64 failure is tracked in #21091. Closes #7138. --- test/behavior/vector.zig | 36 ++++++++++-------------------------- 1 file changed, 10 insertions(+), 26 deletions(-) diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig index 76134255e26e..6539a5f6c5db 100644 --- a/test/behavior/vector.zig +++ b/test/behavior/vector.zig @@ -805,14 +805,8 @@ test "vector reduce operation" { try testReduce(.Min, [4]u16{ 1, 2, 3, 4 }, @as(u16, 1)); try testReduce(.Min, [4]i32{ 1234567, -386, 0, 3 }, @as(i32, -386)); try testReduce(.Min, [4]u32{ 99, 9999, 9, 99999 }, @as(u32, 9)); - - // LLVM 11 ERROR: Cannot select type - // https://github.com/ziglang/zig/issues/7138 - if (builtin.zig_backend != .stage2_llvm or builtin.target.cpu.arch != .aarch64) { - try testReduce(.Min, [4]i64{ 1234567, -386, 0, 3 }, @as(i64, -386)); - try testReduce(.Min, [4]u64{ 99, 9999, 9, 99999 }, @as(u64, 9)); - } - + try testReduce(.Min, [4]i64{ 1234567, -386, 0, 3 }, @as(i64, -386)); + try testReduce(.Min, [4]u64{ 99, 9999, 9, 99999 }, @as(u64, 9)); try testReduce(.Min, [4]i128{ 1234567, -386, 0, 3 }, @as(i128, -386)); try testReduce(.Min, [4]u128{ 99, 9999, 9, 99999 }, @as(u128, 9)); try testReduce(.Min, [4]f16{ -10.3, 10.0e9, 13.0, -100.0 }, @as(f16, -100.0)); @@ -823,14 +817,8 @@ test "vector reduce operation" { try testReduce(.Max, [4]u16{ 1, 2, 3, 4 }, @as(u16, 4)); try testReduce(.Max, [4]i32{ 1234567, -386, 0, 3 }, @as(i32, 1234567)); try testReduce(.Max, [4]u32{ 99, 9999, 9, 99999 }, @as(u32, 99999)); - - // LLVM 11 ERROR: Cannot select type - // https://github.com/ziglang/zig/issues/7138 - if (builtin.zig_backend != .stage2_llvm or builtin.target.cpu.arch != .aarch64) { - try testReduce(.Max, [4]i64{ 1234567, -386, 0, 3 }, @as(i64, 1234567)); - try testReduce(.Max, [4]u64{ 99, 9999, 9, 99999 }, @as(u64, 99999)); - } - + try testReduce(.Max, [4]i64{ 1234567, -386, 0, 3 }, @as(i64, 1234567)); + try testReduce(.Max, [4]u64{ 99, 9999, 9, 99999 }, @as(u64, 99999)); try testReduce(.Max, [4]i128{ 1234567, -386, 0, 3 }, @as(i128, 1234567)); try testReduce(.Max, [4]u128{ 99, 9999, 9, 99999 }, @as(u128, 99999)); try testReduce(.Max, [4]f16{ -10.3, 10.0e9, 13.0, -100.0 }, @as(f16, 10.0e9)); @@ -872,17 +860,13 @@ test "vector reduce operation" { try testReduce(.Add, [4]f32{ -1.9, 5.1, f32_nan, 100.0 }, f32_nan); try testReduce(.Add, [4]f64{ -1.9, 5.1, f64_nan, 100.0 }, f64_nan); - // LLVM 11 ERROR: Cannot select type - // https://github.com/ziglang/zig/issues/7138 - if (builtin.zig_backend != .stage2_llvm) { - try testReduce(.Min, [4]f16{ -1.9, 5.1, f16_nan, 100.0 }, @as(f16, -1.9)); - try testReduce(.Min, [4]f32{ -1.9, 5.1, f32_nan, 100.0 }, @as(f32, -1.9)); - try testReduce(.Min, [4]f64{ -1.9, 5.1, f64_nan, 100.0 }, @as(f64, -1.9)); + try testReduce(.Min, [4]f16{ -1.9, 5.1, f16_nan, 100.0 }, @as(f16, -1.9)); + try testReduce(.Min, [4]f32{ -1.9, 5.1, f32_nan, 100.0 }, @as(f32, -1.9)); + try testReduce(.Min, [4]f64{ -1.9, 5.1, f64_nan, 100.0 }, @as(f64, -1.9)); - try testReduce(.Max, [4]f16{ -1.9, 5.1, f16_nan, 100.0 }, @as(f16, 100.0)); - try testReduce(.Max, [4]f32{ -1.9, 5.1, f32_nan, 100.0 }, @as(f32, 100.0)); - try testReduce(.Max, [4]f64{ -1.9, 5.1, f64_nan, 100.0 }, @as(f64, 100.0)); - } + try testReduce(.Max, [4]f16{ -1.9, 5.1, f16_nan, 100.0 }, @as(f16, 100.0)); + try testReduce(.Max, [4]f32{ -1.9, 5.1, f32_nan, 100.0 }, @as(f32, 100.0)); + try testReduce(.Max, [4]f64{ -1.9, 5.1, f64_nan, 100.0 }, @as(f64, 100.0)); try testReduce(.Mul, [4]f16{ -1.9, 5.1, f16_nan, 100.0 }, f16_nan); try testReduce(.Mul, [4]f32{ -1.9, 5.1, f32_nan, 100.0 }, f32_nan); From c97db8e4974b912c7ae97df878d04d2d71480b9a Mon Sep 17 00:00:00 2001 From: pfg Date: Sat, 7 Sep 2024 11:06:41 -0400 Subject: [PATCH 090/202] Support stringifying non-exhaustive enum to json (#21228) --- lib/std/json/stringify.zig | 16 +++++++++++++++- lib/std/json/stringify_test.zig | 9 +++++++++ 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/lib/std/json/stringify.zig b/lib/std/json/stringify.zig index 11e1dcf710bf..9bc0e564a1ad 100644 --- a/lib/std/json/stringify.zig +++ b/lib/std/json/stringify.zig @@ -483,6 +483,7 @@ pub fn WriteStream( /// * If the union declares a method `pub fn jsonStringify(self: *@This(), jw: anytype) !void`, it is called to do the serialization instead of the default behavior. The given `jw` is a pointer to this `WriteStream`. /// * Zig `enum` -> JSON string naming the active tag. /// * If the enum declares a method `pub fn jsonStringify(self: *@This(), jw: anytype) !void`, it is called to do the serialization instead of the default behavior. The given `jw` is a pointer to this `WriteStream`. + /// * If the enum is non-exhaustive, unnamed values are rendered as integers. /// * Zig untyped enum literal -> JSON string naming the active tag. /// * Zig error -> JSON string naming the error. /// * Zig `*T` -> the rendering of `T`. Note there is no guard against circular-reference infinite recursion. @@ -540,11 +541,24 @@ pub fn WriteStream( return try self.write(null); } }, - .@"enum", .enum_literal => { + .@"enum" => |enum_info| { if (std.meta.hasFn(T, "jsonStringify")) { return value.jsonStringify(self); } + if (!enum_info.is_exhaustive) { + inline for (enum_info.fields) |field| { + if (value == @field(T, field.name)) { + break; + } + } else { + return self.write(@intFromEnum(value)); + } + } + + return self.stringValue(@tagName(value)); + }, + .enum_literal => { return self.stringValue(@tagName(value)); }, .@"union" => { diff --git a/lib/std/json/stringify_test.zig b/lib/std/json/stringify_test.zig index c0003b87dcc5..52e38d1e30ad 100644 --- a/lib/std/json/stringify_test.zig +++ b/lib/std/json/stringify_test.zig @@ -172,6 +172,15 @@ test "stringify enums" { try testStringify("\"bar\"", E.bar, .{}); } +test "stringify non-exhaustive enum" { + const E = enum(u8) { + foo = 0, + _, + }; + try testStringify("\"foo\"", E.foo, .{}); + try testStringify("1", @as(E, @enumFromInt(1)), .{}); +} + test "stringify enum literals" { try testStringify("\"foo\"", .foo, .{}); try testStringify("\"bar\"", .bar, .{}); From af04404b49d78f2ba7c3b6ba8c082b4fbeec7859 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Thu, 5 Sep 2024 09:37:09 +0200 Subject: [PATCH 091/202] std: Fix assembler comment syntax for sparc. --- lib/std/Thread.zig | 8 ++++---- lib/std/os/linux/sparc64.zig | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/lib/std/Thread.zig b/lib/std/Thread.zig index 60021346e448..446b629cc6ec 100644 --- a/lib/std/Thread.zig +++ b/lib/std/Thread.zig @@ -1221,12 +1221,12 @@ const LinuxThreadImpl = struct { \\ ba 1b \\ restore \\ 2: - \\ mov 73, %%g1 # SYS_munmap + \\ mov 73, %%g1 // SYS_munmap \\ mov %[ptr], %%o0 \\ mov %[len], %%o1 \\ t 0x3 # ST_FLUSH_WINDOWS \\ t 0x10 - \\ mov 1, %%g1 # SYS_exit + \\ mov 1, %%g1 // SYS_exit \\ mov 0, %%o0 \\ t 0x10 : @@ -1246,14 +1246,14 @@ const LinuxThreadImpl = struct { \\ ba 1b \\ restore \\ 2: - \\ mov 73, %%g1 # SYS_munmap + \\ mov 73, %%g1 // SYS_munmap \\ mov %[ptr], %%o0 \\ mov %[len], %%o1 \\ # Flush register window contents to prevent background \\ # memory access before unmapping the stack. \\ flushw \\ t 0x6d - \\ mov 1, %%g1 # SYS_exit + \\ mov 1, %%g1 // SYS_exit \\ mov 0, %%o0 \\ t 0x6d : diff --git a/lib/std/os/linux/sparc64.zig b/lib/std/os/linux/sparc64.zig index b30f00100070..c146ed17cf2a 100644 --- a/lib/std/os/linux/sparc64.zig +++ b/lib/std/os/linux/sparc64.zig @@ -190,7 +190,7 @@ pub fn clone() callconv(.Naked) usize { \\ mov %%i0, %%g2 \\ mov %%i3, %%g3 \\ # Shuffle the arguments - \\ mov 217, %%g1 # SYS_clone + \\ mov 217, %%g1 // SYS_clone \\ mov %%i2, %%o0 \\ # Add some extra space for the initial frame \\ sub %%i1, 176 + 2047, %%o1 @@ -214,7 +214,7 @@ pub fn clone() callconv(.Naked) usize { \\ call %%g2 \\ mov %%g3, %%o0 \\ # Exit - \\ mov 1, %%g1 # SYS_exit + \\ mov 1, %%g1 // SYS_exit \\ t 0x6d \\2: \\ # The syscall failed From fb0028a0d7b43a2a5dd05f075ded22746f92faf6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maciej=20=27vesim=27=20Kuli=C5=84ski?= Date: Sat, 7 Sep 2024 17:29:43 +0200 Subject: [PATCH 092/202] mips: fix C ABI compatibility --- src/arch/mips/abi.zig | 86 +++++++++++++++++++++++++++++++++++++++++++ src/codegen/llvm.zig | 22 +++++++++-- test/c_abi/cfuncs.c | 14 +++---- test/c_abi/main.zig | 66 ++++++++++++++++----------------- 4 files changed, 145 insertions(+), 43 deletions(-) create mode 100644 src/arch/mips/abi.zig diff --git a/src/arch/mips/abi.zig b/src/arch/mips/abi.zig new file mode 100644 index 000000000000..8f3cebd21cea --- /dev/null +++ b/src/arch/mips/abi.zig @@ -0,0 +1,86 @@ +const std = @import("std"); +const Type = @import("../../Type.zig"); +const Zcu = @import("../../Zcu.zig"); +const assert = std.debug.assert; + +pub const Class = union(enum) { + memory, + byval, + i32_array: u8, +}; + +pub const Context = enum { ret, arg }; + +pub fn classifyType(ty: Type, zcu: *Zcu, ctx: Context) Class { + const target = zcu.getTarget(); + std.debug.assert(ty.hasRuntimeBitsIgnoreComptime(zcu)); + + const max_direct_size = target.ptrBitWidth() * 2; + switch (ty.zigTypeTag(zcu)) { + .@"struct" => { + const bit_size = ty.bitSize(zcu); + if (ty.containerLayout(zcu) == .@"packed") { + if (bit_size > max_direct_size) return .memory; + return .byval; + } + if (bit_size > max_direct_size) return .memory; + // TODO: for bit_size <= 32 using byval is more correct, but that needs inreg argument attribute + const count = @as(u8, @intCast(std.mem.alignForward(u64, bit_size, 32) / 32)); + return .{ .i32_array = count }; + }, + .@"union" => { + const bit_size = ty.bitSize(zcu); + if (ty.containerLayout(zcu) == .@"packed") { + if (bit_size > max_direct_size) return .memory; + return .byval; + } + if (bit_size > max_direct_size) return .memory; + + return .byval; + }, + .bool => return .byval, + .float => return .byval, + .int, .@"enum", .error_set => { + const bit_size = ty.bitSize(zcu); + if (bit_size > max_direct_size) return .memory; + return .byval; + }, + .vector => { + const elem_type = ty.elemType2(zcu); + switch (elem_type.zigTypeTag(zcu)) { + .bool, .int => { + const bit_size = ty.bitSize(zcu); + if (ctx == .ret and bit_size > 128) return .memory; + if (bit_size > 512) return .memory; + // TODO: byval vector arguments with non power of 2 size need inreg attribute + return .byval; + }, + .float => return .memory, + else => unreachable, + } + }, + .optional => { + std.debug.assert(ty.isPtrLikeOptional(zcu)); + return .byval; + }, + .pointer => { + std.debug.assert(!ty.isSlice(zcu)); + return .byval; + }, + .error_union, + .frame, + .@"anyframe", + .noreturn, + .void, + .type, + .comptime_float, + .comptime_int, + .undefined, + .null, + .@"fn", + .@"opaque", + .enum_literal, + .array, + => unreachable, + } +} diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index dbaaa92597ae..2f9f9e096ba4 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -26,6 +26,7 @@ const wasm_c_abi = @import("../arch/wasm/abi.zig"); const aarch64_c_abi = @import("../arch/aarch64/abi.zig"); const arm_c_abi = @import("../arch/arm/abi.zig"); const riscv_c_abi = @import("../arch/riscv64/abi.zig"); +const mips_c_abi = @import("../arch/mips/abi.zig"); const dev = @import("../dev.zig"); const target_util = @import("../target.zig"); @@ -11681,7 +11682,10 @@ fn firstParamSRet(fn_info: InternPool.Key.FuncType, zcu: *Zcu, target: std.Targe return switch (fn_info.cc) { .Unspecified, .Inline => returnTypeByRef(zcu, target, return_type), .C => switch (target.cpu.arch) { - .mips, .mipsel => false, + .mips, .mipsel => switch (mips_c_abi.classifyType(return_type, zcu, .ret)) { + .memory, .i32_array => true, + .byval => false, + }, .x86 => isByRef(return_type, zcu), .x86_64 => switch (target.os.tag) { .windows => x86_64_abi.classifyWindows(return_type, zcu) == .memory, @@ -11732,7 +11736,12 @@ fn lowerFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.Error!Bu .C => { switch (target.cpu.arch) { - .mips, .mipsel => return o.lowerType(return_type), + .mips, .mipsel => { + switch (mips_c_abi.classifyType(return_type, zcu, .ret)) { + .memory, .i32_array => return .void, + .byval => return o.lowerType(return_type), + } + }, .x86 => return if (isByRef(return_type, zcu)) .void else o.lowerType(return_type), .x86_64 => switch (target.os.tag) { .windows => return lowerWin64FnRetTy(o, fn_info), @@ -11978,7 +11987,14 @@ const ParamTypeIterator = struct { .mips, .mipsel => { it.zig_index += 1; it.llvm_index += 1; - return .byval; + switch (mips_c_abi.classifyType(ty, zcu, .arg)) { + .memory => { + it.byval_attr = true; + return .byref; + }, + .byval => return .byval, + .i32_array => |size| return Lowering{ .i32_array = size }, + } }, .x86_64 => switch (target.os.tag) { .windows => return it.nextWin64(ty), diff --git a/test/c_abi/cfuncs.c b/test/c_abi/cfuncs.c index 028f6e06d27e..92f95c339c5b 100644 --- a/test/c_abi/cfuncs.c +++ b/test/c_abi/cfuncs.c @@ -2657,7 +2657,7 @@ void run_c_tests(void) { } #endif -#if !defined(__mips__) && !defined(ZIG_PPC32) +#if !defined(ZIG_PPC32) { struct Struct_u64_u64 s = zig_ret_struct_u64_u64(); assert_or_panic(s.a == 1); @@ -2708,7 +2708,7 @@ void run_c_tests(void) { #endif #if !defined __i386__ && !defined __arm__ && !defined __aarch64__ && \ - !defined __mips__ && !defined __powerpc__ && !defined ZIG_RISCV64 + !defined __powerpc__ && !defined ZIG_RISCV64 { struct SmallStructInts s = {1, 2, 3, 4}; zig_small_struct_ints(s); @@ -2716,7 +2716,7 @@ void run_c_tests(void) { #endif #if !defined __arm__ && !defined __aarch64__ && \ - !defined __mips__ && !defined __powerpc__ && !defined ZIG_RISCV64 + !defined __powerpc__ && !defined ZIG_RISCV64 { struct MedStructInts s = {1, 2, 3}; zig_med_struct_ints(s); @@ -2741,7 +2741,7 @@ void run_c_tests(void) { zig_small_packed_struct(s); } -#if !defined __i386__ && !defined __arm__ && !defined __mips__ && \ +#if !defined __i386__ && !defined __arm__ && \ !defined ZIG_PPC32 && !defined _ARCH_PPC64 { struct SplitStructInts s = {1234, 100, 1337}; @@ -2756,7 +2756,7 @@ void run_c_tests(void) { } #endif -#if !defined __i386__ && !defined __arm__ && !defined __mips__ && \ +#if !defined __i386__ && !defined __arm__ && \ !defined ZIG_PPC32 && !defined _ARCH_PPC64 { struct SplitStructMixed s = {1234, 100, 1337.0f}; @@ -2764,7 +2764,7 @@ void run_c_tests(void) { } #endif -#if !defined __mips__ && !defined ZIG_PPC32 +#if !defined ZIG_PPC32 { struct BigStruct s = {30, 31, 32, 33, 34}; struct BigStruct res = zig_big_struct_both(s); @@ -2784,7 +2784,7 @@ void run_c_tests(void) { } #endif -#if !defined __mips__ && !defined ZIG_PPC32 +#if !defined ZIG_PPC32 { struct FloatRect r1 = {1, 21, 16, 4}; struct FloatRect r2 = {178, 189, 21, 15}; diff --git a/test/c_abi/main.zig b/test/c_abi/main.zig index 86b9584a6ee7..09b50116653b 100644 --- a/test/c_abi/main.zig +++ b/test/c_abi/main.zig @@ -322,7 +322,7 @@ extern fn c_struct_u64_u64_7(usize, usize, usize, usize, usize, usize, usize, St extern fn c_struct_u64_u64_8(usize, usize, usize, usize, usize, usize, usize, usize, Struct_u64_u64) void; test "C ABI struct u64 u64" { - if (builtin.cpu.arch.isMIPS()) return error.SkipZigTest; + if (builtin.cpu.arch.isMIPS64()) return error.SkipZigTest; if (builtin.cpu.arch.isPowerPC32()) return error.SkipZigTest; const s = c_ret_struct_u64_u64(); @@ -359,7 +359,7 @@ extern fn c_ret_struct_f32f32_f32() Struct_f32f32_f32; extern fn c_struct_f32f32_f32(Struct_f32f32_f32) void; test "C ABI struct {f32,f32} f32" { - if (builtin.cpu.arch.isMIPS()) return error.SkipZigTest; + if (builtin.cpu.arch.isMIPS64()) return error.SkipZigTest; if (builtin.cpu.arch.isPowerPC32()) return error.SkipZigTest; const s = c_ret_struct_f32f32_f32(); @@ -389,7 +389,7 @@ extern fn c_ret_struct_f32_f32f32() Struct_f32_f32f32; extern fn c_struct_f32_f32f32(Struct_f32_f32f32) void; test "C ABI struct f32 {f32,f32}" { - if (builtin.cpu.arch.isMIPS()) return error.SkipZigTest; + if (builtin.cpu.arch.isMIPS64()) return error.SkipZigTest; if (builtin.cpu.arch.isPowerPC32()) return error.SkipZigTest; const s = c_ret_struct_f32_f32f32(); @@ -424,7 +424,7 @@ extern fn c_ret_struct_u32_union_u32_u32u32() Struct_u32_Union_u32_u32u32; extern fn c_struct_u32_union_u32_u32u32(Struct_u32_Union_u32_u32u32) void; test "C ABI struct{u32,union{u32,struct{u32,u32}}}" { - if (builtin.cpu.arch.isMIPS()) return error.SkipZigTest; + if (builtin.cpu.arch.isMIPS64()) return error.SkipZigTest; if (builtin.cpu.arch.isPowerPC()) return error.SkipZigTest; const s = c_ret_struct_u32_union_u32_u32u32(); @@ -444,7 +444,7 @@ const BigStruct = extern struct { extern fn c_big_struct(BigStruct) void; test "C ABI big struct" { - if (builtin.cpu.arch.isMIPS()) return error.SkipZigTest; + if (builtin.cpu.arch.isMIPS64()) return error.SkipZigTest; if (builtin.cpu.arch.isPowerPC32()) return error.SkipZigTest; const s = BigStruct{ @@ -503,7 +503,7 @@ extern fn c_med_struct_mixed(MedStructMixed) void; extern fn c_ret_med_struct_mixed() MedStructMixed; test "C ABI medium struct of ints and floats" { - if (builtin.cpu.arch.isMIPS()) return error.SkipZigTest; + if (builtin.cpu.arch.isMIPS64()) return error.SkipZigTest; if (builtin.cpu.arch.isPowerPC()) return error.SkipZigTest; const s = MedStructMixed{ @@ -535,7 +535,7 @@ extern fn c_ret_small_struct_ints() SmallStructInts; test "C ABI small struct of ints" { if (builtin.cpu.arch == .x86) return error.SkipZigTest; - if (builtin.cpu.arch.isMIPS()) return error.SkipZigTest; + if (builtin.cpu.arch.isMIPS64()) return error.SkipZigTest; if (builtin.cpu.arch.isPowerPC()) return error.SkipZigTest; const s = SmallStructInts{ @@ -568,7 +568,7 @@ extern fn c_med_struct_ints(MedStructInts) void; extern fn c_ret_med_struct_ints() MedStructInts; test "C ABI medium struct of ints" { - if (builtin.cpu.arch.isMIPS()) return error.SkipZigTest; + if (builtin.cpu.arch.isMIPS64()) return error.SkipZigTest; if (builtin.cpu.arch.isPowerPC()) return error.SkipZigTest; const s = MedStructInts{ @@ -646,7 +646,7 @@ extern fn c_split_struct_ints(SplitStructInt) void; test "C ABI split struct of ints" { if (builtin.cpu.arch == .x86) return error.SkipZigTest; - if (builtin.cpu.arch.isMIPS()) return error.SkipZigTest; + if (builtin.cpu.arch.isMIPS64()) return error.SkipZigTest; if (builtin.cpu.arch.isPowerPC()) return error.SkipZigTest; const s = SplitStructInt{ @@ -673,7 +673,7 @@ extern fn c_ret_split_struct_mixed() SplitStructMixed; test "C ABI split struct of ints and floats" { if (builtin.cpu.arch == .x86) return error.SkipZigTest; - if (builtin.cpu.arch.isMIPS()) return error.SkipZigTest; + if (builtin.cpu.arch.isMIPS64()) return error.SkipZigTest; if (builtin.cpu.arch.isPowerPC()) return error.SkipZigTest; const s = SplitStructMixed{ @@ -700,7 +700,7 @@ extern fn c_multiple_struct_ints(Rect, Rect) void; extern fn c_multiple_struct_floats(FloatRect, FloatRect) void; test "C ABI sret and byval together" { - if (builtin.cpu.arch.isMIPS()) return error.SkipZigTest; + if (builtin.cpu.arch.isMIPS64()) return error.SkipZigTest; if (builtin.cpu.arch.isPowerPC32()) return error.SkipZigTest; const s = BigStruct{ @@ -752,7 +752,7 @@ const Vector5 = extern struct { extern fn c_big_struct_floats(Vector5) void; test "C ABI structs of floats as parameter" { - if (builtin.cpu.arch.isMIPS()) return error.SkipZigTest; + if (builtin.cpu.arch.isMIPS64()) return error.SkipZigTest; if (builtin.cpu.arch.isPowerPC()) return error.SkipZigTest; const v3 = Vector3{ @@ -828,7 +828,7 @@ export fn zig_multiple_struct_floats(x: FloatRect, y: FloatRect) void { } test "C ABI structs of floats as multiple parameters" { - if (builtin.cpu.arch.isMIPS()) return error.SkipZigTest; + if (builtin.cpu.arch.isMIPS64()) return error.SkipZigTest; if (builtin.cpu.arch.isPowerPC32()) return error.SkipZigTest; const r1 = FloatRect{ @@ -941,7 +941,7 @@ extern fn c_ret_struct_with_array() StructWithArray; test "Struct with array as padding." { if (builtin.cpu.arch == .x86) return error.SkipZigTest; - if (builtin.cpu.arch.isMIPS()) return error.SkipZigTest; + if (builtin.cpu.arch.isMIPS64()) return error.SkipZigTest; if (builtin.cpu.arch.isPowerPC()) return error.SkipZigTest; c_struct_with_array(.{ .a = 1, .padding = undefined, .b = 2 }); @@ -966,7 +966,7 @@ extern fn c_float_array_struct(FloatArrayStruct) void; extern fn c_ret_float_array_struct() FloatArrayStruct; test "Float array like struct" { - if (builtin.cpu.arch.isMIPS()) return error.SkipZigTest; + if (builtin.cpu.arch.isMIPS64()) return error.SkipZigTest; if (builtin.cpu.arch.isPowerPC32()) return error.SkipZigTest; c_float_array_struct(.{ @@ -1031,7 +1031,7 @@ extern fn c_ret_big_vec() BigVec; test "big simd vector" { if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; - if (builtin.cpu.arch.isMIPS() and builtin.mode != .Debug) return error.SkipZigTest; + if (builtin.cpu.arch.isMIPS64() and builtin.mode != .Debug) return error.SkipZigTest; if (builtin.cpu.arch.isPowerPC64()) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .x86_64 and builtin.os.tag == .macos and builtin.mode != .Debug) return error.SkipZigTest; @@ -5340,7 +5340,7 @@ extern fn c_ptr_size_float_struct(Vector2) void; extern fn c_ret_ptr_size_float_struct() Vector2; test "C ABI pointer sized float struct" { - if (builtin.cpu.arch.isMIPS()) return error.SkipZigTest; + if (builtin.cpu.arch.isMIPS64()) return error.SkipZigTest; if (builtin.cpu.arch.isRISCV()) return error.SkipZigTest; if (builtin.cpu.arch.isPowerPC32()) return error.SkipZigTest; @@ -5362,25 +5362,25 @@ pub inline fn expectOk(c_err: c_int) !void { /// Tests for Double + Char struct const DC = extern struct { v1: f64, v2: u8 }; test "DC: Zig passes to C" { - if (builtin.cpu.arch.isMIPS()) return error.SkipZigTest; + if (builtin.cpu.arch.isMIPS64()) return error.SkipZigTest; if (builtin.cpu.arch.isRISCV()) return error.SkipZigTest; if (builtin.cpu.arch.isPowerPC()) return error.SkipZigTest; try expectOk(c_assert_DC(.{ .v1 = -0.25, .v2 = 15 })); } test "DC: Zig returns to C" { - if (builtin.cpu.arch.isMIPS() and builtin.mode != .Debug) return error.SkipZigTest; + if (builtin.cpu.arch.isMIPS64() and builtin.mode != .Debug) return error.SkipZigTest; if (builtin.cpu.arch.isRISCV()) return error.SkipZigTest; if (builtin.cpu.arch.isPowerPC()) return error.SkipZigTest; try expectOk(c_assert_ret_DC()); } test "DC: C passes to Zig" { - if (builtin.cpu.arch.isMIPS()) return error.SkipZigTest; + if (builtin.cpu.arch.isMIPS64()) return error.SkipZigTest; if (builtin.cpu.arch.isRISCV()) return error.SkipZigTest; if (builtin.cpu.arch.isPowerPC()) return error.SkipZigTest; try expectOk(c_send_DC()); } test "DC: C returns to Zig" { - if (builtin.cpu.arch.isMIPS() and builtin.mode != .Debug) return error.SkipZigTest; + if (builtin.cpu.arch.isMIPS64() and builtin.mode != .Debug) return error.SkipZigTest; if (builtin.cpu.arch.isRISCV()) return error.SkipZigTest; if (builtin.cpu.arch.isPowerPC()) return error.SkipZigTest; try expectEqual(DC{ .v1 = -0.25, .v2 = 15 }, c_ret_DC()); @@ -5406,12 +5406,12 @@ const CFF = extern struct { v1: u8, v2: f32, v3: f32 }; test "CFF: Zig passes to C" { if (builtin.target.cpu.arch == .x86) return error.SkipZigTest; - if (builtin.cpu.arch.isMIPS()) return error.SkipZigTest; + if (builtin.cpu.arch.isMIPS64()) return error.SkipZigTest; if (builtin.cpu.arch.isPowerPC()) return error.SkipZigTest; try expectOk(c_assert_CFF(.{ .v1 = 39, .v2 = 0.875, .v3 = 1.0 })); } test "CFF: Zig returns to C" { - if (builtin.cpu.arch.isMIPS()) return error.SkipZigTest; + if (builtin.cpu.arch.isMIPS64()) return error.SkipZigTest; if (builtin.cpu.arch.isPowerPC()) return error.SkipZigTest; try expectOk(c_assert_ret_CFF()); } @@ -5419,7 +5419,7 @@ test "CFF: C passes to Zig" { if (builtin.target.cpu.arch == .x86) return error.SkipZigTest; if (builtin.cpu.arch.isRISCV() and builtin.mode != .Debug) return error.SkipZigTest; if (builtin.cpu.arch == .aarch64 and builtin.mode != .Debug) return error.SkipZigTest; - if (builtin.cpu.arch.isMIPS()) return error.SkipZigTest; + if (builtin.cpu.arch.isMIPS64()) return error.SkipZigTest; if (builtin.cpu.arch.isPowerPC()) return error.SkipZigTest; try expectOk(c_send_CFF()); @@ -5427,7 +5427,7 @@ test "CFF: C passes to Zig" { test "CFF: C returns to Zig" { if (builtin.cpu.arch == .aarch64 and builtin.mode != .Debug) return error.SkipZigTest; if (builtin.cpu.arch.isRISCV() and builtin.mode != .Debug) return error.SkipZigTest; - if (builtin.cpu.arch.isMIPS()) return error.SkipZigTest; + if (builtin.cpu.arch.isMIPS64()) return error.SkipZigTest; if (builtin.cpu.arch.isPowerPC()) return error.SkipZigTest; try expectEqual(CFF{ .v1 = 39, .v2 = 0.875, .v3 = 1.0 }, c_ret_CFF()); } @@ -5451,22 +5451,22 @@ pub export fn zig_ret_CFF() CFF { const PD = extern struct { v1: ?*anyopaque, v2: f64 }; test "PD: Zig passes to C" { - if (builtin.cpu.arch.isMIPS()) return error.SkipZigTest; + if (builtin.cpu.arch.isMIPS64()) return error.SkipZigTest; if (builtin.cpu.arch.isPowerPC()) return error.SkipZigTest; try expectOk(c_assert_PD(.{ .v1 = null, .v2 = 0.5 })); } test "PD: Zig returns to C" { - if (builtin.cpu.arch.isMIPS() and builtin.mode != .Debug) return error.SkipZigTest; + if (builtin.cpu.arch.isMIPS64() and builtin.mode != .Debug) return error.SkipZigTest; if (builtin.cpu.arch.isPowerPC()) return error.SkipZigTest; try expectOk(c_assert_ret_PD()); } test "PD: C passes to Zig" { - if (builtin.cpu.arch.isMIPS()) return error.SkipZigTest; + if (builtin.cpu.arch.isMIPS64()) return error.SkipZigTest; if (builtin.cpu.arch.isPowerPC()) return error.SkipZigTest; try expectOk(c_send_PD()); } test "PD: C returns to Zig" { - if (builtin.cpu.arch.isMIPS() and builtin.mode != .Debug) return error.SkipZigTest; + if (builtin.cpu.arch.isMIPS64() and builtin.mode != .Debug) return error.SkipZigTest; if (builtin.cpu.arch.isPowerPC()) return error.SkipZigTest; try expectEqual(PD{ .v1 = null, .v2 = 0.5 }, c_ret_PD()); } @@ -5520,7 +5520,7 @@ const ByVal = extern struct { extern fn c_func_ptr_byval(*anyopaque, *anyopaque, ByVal, c_ulong, *anyopaque, c_ulong) void; test "C function that takes byval struct called via function pointer" { - if (builtin.cpu.arch.isMIPS() and builtin.mode != .Debug) return error.SkipZigTest; + if (builtin.cpu.arch.isMIPS64() and builtin.mode != .Debug) return error.SkipZigTest; if (builtin.cpu.arch.isPowerPC32()) return error.SkipZigTest; var fn_ptr = &c_func_ptr_byval; @@ -5551,7 +5551,7 @@ const f16_struct = extern struct { }; extern fn c_f16_struct(f16_struct) f16_struct; test "f16 struct" { - if (builtin.target.cpu.arch.isMIPS()) return error.SkipZigTest; + if (builtin.target.cpu.arch.isMIPS64()) return error.SkipZigTest; if (builtin.target.cpu.arch.isPowerPC32()) return error.SkipZigTest; if (builtin.cpu.arch.isARM() and builtin.mode != .Debug) return error.SkipZigTest; @@ -5666,7 +5666,7 @@ const Coord2 = extern struct { extern fn stdcall_coord2(Coord2, Coord2, Coord2) callconv(stdcall_callconv) Coord2; test "Stdcall ABI structs" { - if (builtin.cpu.arch.isMIPS()) return error.SkipZigTest; + if (builtin.cpu.arch.isMIPS64()) return error.SkipZigTest; if (builtin.cpu.arch.isPowerPC()) return error.SkipZigTest; const res = stdcall_coord2( @@ -5751,7 +5751,7 @@ const byval_tail_callsite_attr = struct { }; test "byval tail callsite attribute" { - if (builtin.cpu.arch.isMIPS()) return error.SkipZigTest; + if (builtin.cpu.arch.isMIPS64()) return error.SkipZigTest; if (builtin.cpu.arch.isPowerPC32()) return error.SkipZigTest; // Originally reported at https://github.com/ziglang/zig/issues/16290 From 54b668f8a33e8466e4771e12f50069a068244640 Mon Sep 17 00:00:00 2001 From: Linus Groh Date: Tue, 3 Sep 2024 22:16:11 +0100 Subject: [PATCH 093/202] std.fmt: Update casing of a few functions to match naming style guide --- lib/std/fmt.zig | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/lib/std/fmt.zig b/lib/std/fmt.zig index dad39025b658..12504b687257 100644 --- a/lib/std/fmt.zig +++ b/lib/std/fmt.zig @@ -807,11 +807,11 @@ test { pub const Case = enum { lower, upper }; -fn formatSliceHexImpl(comptime case: Case) type { +fn SliceHex(comptime case: Case) type { const charset = "0123456789" ++ if (case == .upper) "ABCDEF" else "abcdef"; return struct { - pub fn formatSliceHexImpl( + pub fn format( bytes: []const u8, comptime fmt: []const u8, options: std.fmt.FormatOptions, @@ -830,8 +830,8 @@ fn formatSliceHexImpl(comptime case: Case) type { }; } -const formatSliceHexLower = formatSliceHexImpl(.lower).formatSliceHexImpl; -const formatSliceHexUpper = formatSliceHexImpl(.upper).formatSliceHexImpl; +const formatSliceHexLower = SliceHex(.lower).format; +const formatSliceHexUpper = SliceHex(.upper).format; /// Return a Formatter for a []const u8 where every byte is formatted as a pair /// of lowercase hexadecimal digits. @@ -845,11 +845,11 @@ pub fn fmtSliceHexUpper(bytes: []const u8) std.fmt.Formatter(formatSliceHexUpper return .{ .data = bytes }; } -fn formatSliceEscapeImpl(comptime case: Case) type { +fn SliceEscape(comptime case: Case) type { const charset = "0123456789" ++ if (case == .upper) "ABCDEF" else "abcdef"; return struct { - pub fn formatSliceEscapeImpl( + pub fn format( bytes: []const u8, comptime fmt: []const u8, options: std.fmt.FormatOptions, @@ -875,8 +875,8 @@ fn formatSliceEscapeImpl(comptime case: Case) type { }; } -const formatSliceEscapeLower = formatSliceEscapeImpl(.lower).formatSliceEscapeImpl; -const formatSliceEscapeUpper = formatSliceEscapeImpl(.upper).formatSliceEscapeImpl; +const formatSliceEscapeLower = SliceEscape(.lower).format; +const formatSliceEscapeUpper = SliceEscape(.upper).format; /// Return a Formatter for a []const u8 where every non-printable ASCII /// character is escaped as \xNN, where NN is the character in lowercase @@ -892,9 +892,9 @@ pub fn fmtSliceEscapeUpper(bytes: []const u8) std.fmt.Formatter(formatSliceEscap return .{ .data = bytes }; } -fn formatSizeImpl(comptime base: comptime_int) type { +fn Size(comptime base: comptime_int) type { return struct { - fn formatSizeImpl( + fn format( value: u64, comptime fmt: []const u8, options: FormatOptions, @@ -950,8 +950,8 @@ fn formatSizeImpl(comptime base: comptime_int) type { } }; } -const formatSizeDec = formatSizeImpl(1000).formatSizeImpl; -const formatSizeBin = formatSizeImpl(1024).formatSizeImpl; +const formatSizeDec = Size(1000).format; +const formatSizeBin = Size(1024).format; /// Return a Formatter for a u64 value representing a file size. /// This formatter represents the number as multiple of 1000 and uses the SI @@ -1480,8 +1480,8 @@ pub const ParseIntError = error{ /// writer: anytype, /// ) !void; /// -pub fn Formatter(comptime format_fn: anytype) type { - const Data = @typeInfo(@TypeOf(format_fn)).@"fn".params[0].type.?; +pub fn Formatter(comptime formatFn: anytype) type { + const Data = @typeInfo(@TypeOf(formatFn)).@"fn".params[0].type.?; return struct { data: Data, pub fn format( @@ -1490,7 +1490,7 @@ pub fn Formatter(comptime format_fn: anytype) type { options: std.fmt.FormatOptions, writer: anytype, ) @TypeOf(writer).Error!void { - try format_fn(self.data, fmt, options, writer); + try formatFn(self.data, fmt, options, writer); } }; } From 9e6d167bb7878682e99482bfaef12e45376665f9 Mon Sep 17 00:00:00 2001 From: David Rubin Date: Sun, 8 Sep 2024 21:27:35 -0700 Subject: [PATCH 094/202] comp: `populateTestFunctions` shouldn't bubble up `AnalysisFail` `ensureCauAnalyzed` adds the anal_unit to the transitive failures, so we don't need to do anything here. The errors will be handled after this function. --- src/Zcu/PerThread.zig | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index 0a581773e9ae..17c5413b30f0 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -2429,7 +2429,7 @@ fn processExportsInner( pub fn populateTestFunctions( pt: Zcu.PerThread, main_progress_node: std.Progress.Node, -) !void { +) Allocator.Error!void { const zcu = pt.zcu; const gpa = zcu.gpa; const ip = &zcu.intern_pool; @@ -2454,7 +2454,10 @@ pub fn populateTestFunctions( zcu.sema_prog_node = std.Progress.Node.none; } const cau_index = ip.getNav(nav_index).analysis_owner.unwrap().?; - try pt.ensureCauAnalyzed(cau_index); + pt.ensureCauAnalyzed(cau_index) catch |err| switch (err) { + error.AnalysisFail => return, + error.OutOfMemory => return error.OutOfMemory, + }; } const test_fns_val = zcu.navValue(nav_index); From de8cece6e7be570a7e622c54c0cbbc7c99308a08 Mon Sep 17 00:00:00 2001 From: Veikka Tuominen Date: Sun, 8 Sep 2024 14:23:03 +0300 Subject: [PATCH 095/202] sync Aro dependency ref: adfd13c6ffb563b1379052b92f6ae4148b91cc12 --- lib/compiler/aro/aro.zig | 1 + lib/compiler/aro/aro/Attribute.zig | 144 ++- lib/compiler/aro/aro/Attribute/names.zig | 3 +- lib/compiler/aro/aro/Builtins.zig | 4 +- lib/compiler/aro/aro/Builtins/Builtin.zig | 3 +- lib/compiler/aro/aro/Builtins/eval.zig | 86 ++ lib/compiler/aro/aro/Compilation.zig | 90 +- lib/compiler/aro/aro/Diagnostics.zig | 15 +- lib/compiler/aro/aro/Diagnostics/messages.zig | 28 +- lib/compiler/aro/aro/Driver.zig | 32 +- lib/compiler/aro/aro/Driver/Filesystem.zig | 4 +- lib/compiler/aro/aro/Hideset.zig | 49 +- lib/compiler/aro/aro/Parser.zig | 833 ++++++++++++------ lib/compiler/aro/aro/Preprocessor.zig | 200 ++++- lib/compiler/aro/aro/Source.zig | 12 +- lib/compiler/aro/aro/SymbolStack.zig | 15 +- lib/compiler/aro/aro/Tokenizer.zig | 56 +- lib/compiler/aro/aro/Tree.zig | 248 ++++-- lib/compiler/aro/aro/Tree/number_affixes.zig | 13 +- lib/compiler/aro/aro/Type.zig | 340 +++---- lib/compiler/aro/aro/Value.zig | 411 +++++++-- lib/compiler/aro/aro/annex_g.zig | 118 +++ lib/compiler/aro/aro/features.zig | 4 +- lib/compiler/aro/aro/record_layout.zig | 90 +- lib/compiler/aro/aro/target.zig | 20 +- lib/compiler/aro/aro/text_literal.zig | 4 +- lib/compiler/aro/aro/toolchains/Linux.zig | 2 +- lib/compiler/aro/backend/Interner.zig | 226 +++++ lib/compiler/aro/backend/Ir.zig | 1 + lib/compiler/aro/backend/Object.zig | 10 +- lib/compiler/aro_translate_c.zig | 3 +- lib/compiler/resinator/main.zig | 2 +- lib/compiler/resinator/preprocess.zig | 2 +- src/mingw.zig | 4 +- src/translate_c.zig | 2 +- 35 files changed, 2281 insertions(+), 794 deletions(-) create mode 100644 lib/compiler/aro/aro/Builtins/eval.zig create mode 100644 lib/compiler/aro/aro/annex_g.zig diff --git a/lib/compiler/aro/aro.zig b/lib/compiler/aro/aro.zig index c39972f5c933..8e3da2aa9763 100644 --- a/lib/compiler/aro/aro.zig +++ b/lib/compiler/aro/aro.zig @@ -23,6 +23,7 @@ pub const version_str = backend.version_str; pub const version = backend.version; test { + _ = @import("aro/annex_g.zig"); _ = @import("aro/Builtins.zig"); _ = @import("aro/char_info.zig"); _ = @import("aro/Compilation.zig"); diff --git a/lib/compiler/aro/aro/Attribute.zig b/lib/compiler/aro/aro/Attribute.zig index 4671028b8fa5..a5b78b8463a4 100644 --- a/lib/compiler/aro/aro/Attribute.zig +++ b/lib/compiler/aro/aro/Attribute.zig @@ -38,12 +38,64 @@ pub const Kind = enum { } }; +pub const Iterator = struct { + source: union(enum) { + ty: Type, + slice: []const Attribute, + }, + index: usize, + + pub fn initSlice(slice: ?[]const Attribute) Iterator { + return .{ .source = .{ .slice = slice orelse &.{} }, .index = 0 }; + } + + pub fn initType(ty: Type) Iterator { + return .{ .source = .{ .ty = ty }, .index = 0 }; + } + + /// returns the next attribute as well as its index within the slice or current type + /// The index can be used to determine when a nested type has been recursed into + pub fn next(self: *Iterator) ?struct { Attribute, usize } { + switch (self.source) { + .slice => |slice| { + if (self.index < slice.len) { + defer self.index += 1; + return .{ slice[self.index], self.index }; + } + }, + .ty => |ty| { + switch (ty.specifier) { + .typeof_type => { + self.* = .{ .source = .{ .ty = ty.data.sub_type.* }, .index = 0 }; + return self.next(); + }, + .typeof_expr => { + self.* = .{ .source = .{ .ty = ty.data.expr.ty }, .index = 0 }; + return self.next(); + }, + .attributed => { + if (self.index < ty.data.attributed.attributes.len) { + defer self.index += 1; + return .{ ty.data.attributed.attributes[self.index], self.index }; + } + self.* = .{ .source = .{ .ty = ty.data.attributed.base }, .index = 0 }; + return self.next(); + }, + else => {}, + } + }, + } + return null; + } +}; + pub const ArgumentType = enum { string, identifier, int, alignment, float, + complex_float, expression, nullptr_t, @@ -54,6 +106,7 @@ pub const ArgumentType = enum { .int, .alignment => "an integer constant", .nullptr_t => "nullptr", .float => "a floating point number", + .complex_float => "a complex floating point number", .expression => "an expression", }; } @@ -65,7 +118,7 @@ pub fn requiredArgCount(attr: Tag) u32 { inline else => |tag| { comptime var needed = 0; comptime { - const fields = std.meta.fields(@field(attributes, @tagName(tag))); + const fields = @typeInfo(@field(attributes, @tagName(tag))).@"struct".fields; for (fields) |arg_field| { if (!mem.eql(u8, arg_field.name, "__name_tok") and @typeInfo(arg_field.type) != .optional) needed += 1; } @@ -81,7 +134,7 @@ pub fn maxArgCount(attr: Tag) u32 { inline else => |tag| { comptime var max = 0; comptime { - const fields = std.meta.fields(@field(attributes, @tagName(tag))); + const fields = @typeInfo(@field(attributes, @tagName(tag))).@"struct".fields; for (fields) |arg_field| { if (!mem.eql(u8, arg_field.name, "__name_tok")) max += 1; } @@ -106,7 +159,7 @@ pub const Formatting = struct { switch (attr) { .calling_convention => unreachable, inline else => |tag| { - const fields = std.meta.fields(@field(attributes, @tagName(tag))); + const fields = @typeInfo(@field(attributes, @tagName(tag))).@"struct".fields; if (fields.len == 0) unreachable; const Unwrapped = UnwrapOptional(fields[0].type); @@ -123,14 +176,13 @@ pub const Formatting = struct { switch (attr) { .calling_convention => unreachable, inline else => |tag| { - const fields = std.meta.fields(@field(attributes, @tagName(tag))); + const fields = @typeInfo(@field(attributes, @tagName(tag))).@"struct".fields; if (fields.len == 0) unreachable; const Unwrapped = UnwrapOptional(fields[0].type); if (@typeInfo(Unwrapped) != .@"enum") unreachable; const enum_fields = @typeInfo(Unwrapped).@"enum".fields; - @setEvalBranchQuota(3000); const quote = comptime quoteChar(@enumFromInt(@intFromEnum(tag))); comptime var values: []const u8 = quote ++ enum_fields[0].name ++ quote; inline for (enum_fields[1..]) |enum_field| { @@ -148,7 +200,7 @@ pub fn wantsIdentEnum(attr: Tag) bool { switch (attr) { .calling_convention => return false, inline else => |tag| { - const fields = std.meta.fields(@field(attributes, @tagName(tag))); + const fields = @typeInfo(@field(attributes, @tagName(tag))).@"struct".fields; if (fields.len == 0) return false; const Unwrapped = UnwrapOptional(fields[0].type); @@ -162,7 +214,7 @@ pub fn wantsIdentEnum(attr: Tag) bool { pub fn diagnoseIdent(attr: Tag, arguments: *Arguments, ident: []const u8) ?Diagnostics.Message { switch (attr) { inline else => |tag| { - const fields = std.meta.fields(@field(attributes, @tagName(tag))); + const fields = @typeInfo(@field(attributes, @tagName(tag))).@"struct".fields; if (fields.len == 0) unreachable; const Unwrapped = UnwrapOptional(fields[0].type); if (@typeInfo(Unwrapped) != .@"enum") unreachable; @@ -181,7 +233,7 @@ pub fn diagnoseIdent(attr: Tag, arguments: *Arguments, ident: []const u8) ?Diagn pub fn wantsAlignment(attr: Tag, idx: usize) bool { switch (attr) { inline else => |tag| { - const fields = std.meta.fields(@field(attributes, @tagName(tag))); + const fields = @typeInfo(@field(attributes, @tagName(tag))).@"struct".fields; if (fields.len == 0) return false; return switch (idx) { @@ -195,7 +247,7 @@ pub fn wantsAlignment(attr: Tag, idx: usize) bool { pub fn diagnoseAlignment(attr: Tag, arguments: *Arguments, arg_idx: u32, res: Parser.Result, p: *Parser) !?Diagnostics.Message { switch (attr) { inline else => |tag| { - const arg_fields = std.meta.fields(@field(attributes, @tagName(tag))); + const arg_fields = @typeInfo(@field(attributes, @tagName(tag))).@"struct".fields; if (arg_fields.len == 0) unreachable; switch (arg_idx) { @@ -249,8 +301,7 @@ fn diagnoseField( }, .bytes => |bytes| { if (Wanted == Value) { - std.debug.assert(node.tag == .string_literal_expr); - if (!node.ty.elemType().is(.char) and !node.ty.elemType().is(.uchar)) { + if (node.tag != .string_literal_expr or (!node.ty.elemType().is(.char) and !node.ty.elemType().is(.uchar))) { return .{ .tag = .attribute_requires_string, .extra = .{ .str = decl.name }, @@ -264,7 +315,6 @@ fn diagnoseField( @field(@field(arguments, decl.name), field.name) = enum_val; return null; } else { - @setEvalBranchQuota(3000); return .{ .tag = .unknown_attr_enum, .extra = .{ .attr_enum = .{ .tag = std.meta.stringToEnum(Tag, decl.name).? } }, @@ -278,8 +328,19 @@ fn diagnoseField( .int => .int, .bytes => .string, .float => .float, + .complex => .complex_float, .null => .nullptr_t, - else => unreachable, + .int_ty, + .float_ty, + .complex_ty, + .ptr_ty, + .noreturn_ty, + .void_ty, + .func_ty, + .array_ty, + .vector_ty, + .record_ty, + => unreachable, }); } @@ -309,7 +370,7 @@ pub fn diagnose(attr: Tag, arguments: *Arguments, arg_idx: u32, res: Parser.Resu .tag = .attribute_too_many_args, .extra = .{ .attr_arg_count = .{ .attribute = attr, .expected = max_arg_count } }, }; - const arg_fields = std.meta.fields(@field(attributes, decl.name)); + const arg_fields = @typeInfo(@field(attributes, decl.name)).@"struct".fields; switch (arg_idx) { inline 0...arg_fields.len - 1 => |arg_i| { return diagnoseField(decl, arg_fields[arg_i], UnwrapOptional(arg_fields[arg_i].type), arguments, res, node, p); @@ -645,7 +706,7 @@ pub const Arguments = blk: { var union_fields: [decls.len]ZigType.UnionField = undefined; for (decls, &union_fields) |decl, *field| { field.* = .{ - .name = decl.name ++ "", + .name = decl.name, .type = @field(attributes, decl.name), .alignment = 0, }; @@ -730,7 +791,6 @@ pub fn applyVariableAttributes(p: *Parser, ty: Type, attr_buf_start: usize, tag: const toks = p.attr_buf.items(.tok)[attr_buf_start..]; p.attr_application_buf.items.len = 0; var base_ty = ty; - if (base_ty.specifier == .attributed) base_ty = base_ty.data.attributed.base; var common = false; var nocommon = false; for (attrs, toks) |attr, tok| switch (attr.tag) { @@ -772,15 +832,10 @@ pub fn applyVariableAttributes(p: *Parser, ty: Type, attr_buf_start: usize, tag: .copy, .tls_model, .visibility, - => std.debug.panic("apply variable attribute {s}", .{@tagName(attr.tag)}), + => |t| try p.errExtra(.attribute_todo, tok, .{ .attribute_todo = .{ .tag = t, .kind = .variables } }), else => try ignoredAttrErr(p, tok, attr.tag, "variables"), }; - const existing = ty.getAttributes(); - if (existing.len == 0 and p.attr_application_buf.items.len == 0) return base_ty; - if (existing.len == 0) return base_ty.withAttributes(p.arena, p.attr_application_buf.items); - - const attributed_type = try Type.Attributed.create(p.arena, base_ty, existing, p.attr_application_buf.items); - return Type{ .specifier = .attributed, .data = .{ .attributed = attributed_type } }; + return base_ty.withAttributes(p.arena, p.attr_application_buf.items); } pub fn applyFieldAttributes(p: *Parser, field_ty: *Type, attr_buf_start: usize) ![]const Attribute { @@ -789,7 +844,7 @@ pub fn applyFieldAttributes(p: *Parser, field_ty: *Type, attr_buf_start: usize) p.attr_application_buf.items.len = 0; for (attrs, toks) |attr, tok| switch (attr.tag) { // zig fmt: off - .@"packed", .may_alias, .deprecated, .unavailable, .unused, .warn_if_not_aligned, .mode, + .@"packed", .may_alias, .deprecated, .unavailable, .unused, .warn_if_not_aligned, .mode, .warn_unused_result, .nodiscard, => try p.attr_application_buf.append(p.gpa, attr), // zig fmt: on .vector_size => try attr.applyVectorSize(p, tok, field_ty), @@ -805,7 +860,6 @@ pub fn applyTypeAttributes(p: *Parser, ty: Type, attr_buf_start: usize, tag: ?Di const toks = p.attr_buf.items(.tok)[attr_buf_start..]; p.attr_application_buf.items.len = 0; var base_ty = ty; - if (base_ty.specifier == .attributed) base_ty = base_ty.data.attributed.base; for (attrs, toks) |attr, tok| switch (attr.tag) { // zig fmt: off .@"packed", .may_alias, .deprecated, .unavailable, .unused, .warn_if_not_aligned, .mode, @@ -823,22 +877,10 @@ pub fn applyTypeAttributes(p: *Parser, ty: Type, attr_buf_start: usize, tag: ?Di .copy, .scalar_storage_order, .nonstring, - => std.debug.panic("apply type attribute {s}", .{@tagName(attr.tag)}), + => |t| try p.errExtra(.attribute_todo, tok, .{ .attribute_todo = .{ .tag = t, .kind = .types } }), else => try ignoredAttrErr(p, tok, attr.tag, "types"), }; - - const existing = ty.getAttributes(); - // TODO: the alignment annotation on a type should override - // the decl it refers to. This might not be true for others. Maybe bug. - - // if there are annotations on this type def use those. - if (p.attr_application_buf.items.len > 0) { - return try base_ty.withAttributes(p.arena, p.attr_application_buf.items); - } else if (existing.len > 0) { - // else use the ones on the typedef decl we were refering to. - return try base_ty.withAttributes(p.arena, existing); - } - return base_ty; + return base_ty.withAttributes(p.arena, p.attr_application_buf.items); } pub fn applyFunctionAttributes(p: *Parser, ty: Type, attr_buf_start: usize) !Type { @@ -846,7 +888,6 @@ pub fn applyFunctionAttributes(p: *Parser, ty: Type, attr_buf_start: usize) !Typ const toks = p.attr_buf.items(.tok)[attr_buf_start..]; p.attr_application_buf.items.len = 0; var base_ty = ty; - if (base_ty.specifier == .attributed) base_ty = base_ty.data.attributed.base; var hot = false; var cold = false; var @"noinline" = false; @@ -896,6 +937,13 @@ pub fn applyFunctionAttributes(p: *Parser, ty: Type, attr_buf_start: usize) !Typ else => try p.errStr(.callconv_not_supported, tok, p.tok_ids[tok].lexeme().?), }, }, + .malloc => { + if (base_ty.returnType().isPtr()) { + try p.attr_application_buf.append(p.gpa, attr); + } else { + try ignoredAttrErr(p, tok, attr.tag, "functions that do not return pointers"); + } + }, .access, .alloc_align, .alloc_size, @@ -908,7 +956,6 @@ pub fn applyFunctionAttributes(p: *Parser, ty: Type, attr_buf_start: usize) !Typ .ifunc, .interrupt, .interrupt_handler, - .malloc, .no_address_safety_analysis, .no_icf, .no_instrument_function, @@ -937,7 +984,7 @@ pub fn applyFunctionAttributes(p: *Parser, ty: Type, attr_buf_start: usize) !Typ .visibility, .weakref, .zero_call_used_regs, - => std.debug.panic("apply type attribute {s}", .{@tagName(attr.tag)}), + => |t| try p.errExtra(.attribute_todo, tok, .{ .attribute_todo = .{ .tag = t, .kind = .functions } }), else => try ignoredAttrErr(p, tok, attr.tag, "functions"), }; return ty.withAttributes(p.arena, p.attr_application_buf.items); @@ -1043,11 +1090,14 @@ fn applyTransparentUnion(attr: Attribute, p: *Parser, tok: TokenIndex, ty: Type) } fn applyVectorSize(attr: Attribute, p: *Parser, tok: TokenIndex, ty: *Type) !void { - if (!(ty.isInt() or ty.isFloat()) or !ty.isReal()) { - const orig_ty = try p.typeStr(ty.*); - ty.* = Type.invalid; - return p.errStr(.invalid_vec_elem_ty, tok, orig_ty); + const base = ty.base(); + const is_enum = ty.is(.@"enum"); + if (!(ty.isInt() or ty.isFloat()) or !ty.isReal() or (is_enum and p.comp.langopts.emulate == .gcc)) { + try p.errStr(.invalid_vec_elem_ty, tok, try p.typeStr(ty.*)); + return error.ParsingFailed; } + if (is_enum) return; + const vec_bytes = attr.args.vector_size.bytes; const ty_size = ty.sizeof(p.comp).?; if (vec_bytes % ty_size != 0) { @@ -1057,7 +1107,7 @@ fn applyVectorSize(attr: Attribute, p: *Parser, tok: TokenIndex, ty: *Type) !voi const arr_ty = try p.arena.create(Type.Array); arr_ty.* = .{ .elem = ty.*, .len = vec_size }; - ty.* = Type{ + base.* = .{ .specifier = .vector, .data = .{ .array = arr_ty }, }; diff --git a/lib/compiler/aro/aro/Attribute/names.zig b/lib/compiler/aro/aro/Attribute/names.zig index d31538906664..c0732b6118be 100644 --- a/lib/compiler/aro/aro/Attribute/names.zig +++ b/lib/compiler/aro/aro/Attribute/names.zig @@ -69,6 +69,7 @@ pub const longest_name = 30; /// If found, returns the index of the node within the `dafsa` array. /// Otherwise, returns `null`. pub fn findInList(first_child_index: u16, char: u8) ?u16 { + @setEvalBranchQuota(206); var index = first_child_index; while (true) { if (dafsa[index].char == char) return index; @@ -787,7 +788,7 @@ const dafsa = [_]Node{ .{ .char = 'i', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 215 }, }; pub const data = blk: { - @setEvalBranchQuota(103); + @setEvalBranchQuota(721); break :blk [_]@This(){ // access .{ .tag = @enumFromInt(0), .properties = .{ .tag = .access, .gnu = true } }, diff --git a/lib/compiler/aro/aro/Builtins.zig b/lib/compiler/aro/aro/Builtins.zig index be24a3ff6091..fa92de328a2d 100644 --- a/lib/compiler/aro/aro/Builtins.zig +++ b/lib/compiler/aro/aro/Builtins.zig @@ -350,7 +350,7 @@ test Iterator { } test "All builtins" { - var comp = Compilation.init(std.testing.allocator); + var comp = Compilation.init(std.testing.allocator, std.fs.cwd()); defer comp.deinit(); _ = try comp.generateBuiltinMacros(.include_system_defines); var arena = std.heap.ArenaAllocator.init(std.testing.allocator); @@ -373,7 +373,7 @@ test "All builtins" { test "Allocation failures" { const Test = struct { fn testOne(allocator: std.mem.Allocator) !void { - var comp = Compilation.init(allocator); + var comp = Compilation.init(allocator, std.fs.cwd()); defer comp.deinit(); _ = try comp.generateBuiltinMacros(.include_system_defines); var arena = std.heap.ArenaAllocator.init(comp.gpa); diff --git a/lib/compiler/aro/aro/Builtins/Builtin.zig b/lib/compiler/aro/aro/Builtins/Builtin.zig index c5cf98608b41..6e5217b4da32 100644 --- a/lib/compiler/aro/aro/Builtins/Builtin.zig +++ b/lib/compiler/aro/aro/Builtins/Builtin.zig @@ -71,6 +71,7 @@ pub const longest_name = 43; /// If found, returns the index of the node within the `dafsa` array. /// Otherwise, returns `null`. pub fn findInList(first_child_index: u16, char: u8) ?u16 { + @setEvalBranchQuota(7972); var index = first_child_index; while (true) { if (dafsa[index].char == char) return index; @@ -5165,7 +5166,7 @@ const dafsa = [_]Node{ .{ .char = 'e', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 4913 }, }; pub const data = blk: { - @setEvalBranchQuota(30_000); + @setEvalBranchQuota(27902); break :blk [_]@This(){ // _Block_object_assign .{ .tag = @enumFromInt(0), .properties = .{ .param_str = "vv*vC*iC", .header = .blocks, .attributes = .{ .lib_function_without_prefix = true } } }, diff --git a/lib/compiler/aro/aro/Builtins/eval.zig b/lib/compiler/aro/aro/Builtins/eval.zig new file mode 100644 index 000000000000..008da152d4a9 --- /dev/null +++ b/lib/compiler/aro/aro/Builtins/eval.zig @@ -0,0 +1,86 @@ +const std = @import("std"); +const backend = @import("../../backend.zig"); +const Interner = backend.Interner; +const Builtins = @import("../Builtins.zig"); +const Builtin = Builtins.Builtin; +const Parser = @import("../Parser.zig"); +const Tree = @import("../Tree.zig"); +const NodeIndex = Tree.NodeIndex; +const Type = @import("../Type.zig"); +const Value = @import("../Value.zig"); + +fn makeNan(comptime T: type, str: []const u8) T { + const UnsignedSameSize = std.meta.Int(.unsigned, @bitSizeOf(T)); + const parsed = std.fmt.parseUnsigned(UnsignedSameSize, str[0 .. str.len - 1], 0) catch 0; + const bits: switch (T) { + f32 => u23, + f64 => u52, + f80 => u63, + f128 => u112, + else => @compileError("Invalid type for makeNan"), + } = @truncate(parsed); + return @bitCast(@as(UnsignedSameSize, bits) | @as(UnsignedSameSize, @bitCast(std.math.nan(T)))); +} + +pub fn eval(tag: Builtin.Tag, p: *Parser, args: []const NodeIndex) !Value { + const builtin = Builtin.fromTag(tag); + if (!builtin.properties.attributes.const_evaluable) return .{}; + + switch (tag) { + Builtin.tagFromName("__builtin_inff").?, + Builtin.tagFromName("__builtin_inf").?, + Builtin.tagFromName("__builtin_infl").?, + => { + const ty: Type = switch (tag) { + Builtin.tagFromName("__builtin_inff").? => .{ .specifier = .float }, + Builtin.tagFromName("__builtin_inf").? => .{ .specifier = .double }, + Builtin.tagFromName("__builtin_infl").? => .{ .specifier = .long_double }, + else => unreachable, + }; + const f: Interner.Key.Float = switch (ty.bitSizeof(p.comp).?) { + 32 => .{ .f32 = std.math.inf(f32) }, + 64 => .{ .f64 = std.math.inf(f64) }, + 80 => .{ .f80 = std.math.inf(f80) }, + 128 => .{ .f128 = std.math.inf(f128) }, + else => unreachable, + }; + return Value.intern(p.comp, .{ .float = f }); + }, + Builtin.tagFromName("__builtin_isinf").? => blk: { + if (args.len == 0) break :blk; + const val = p.value_map.get(args[0]) orelse break :blk; + return Value.fromBool(val.isInf(p.comp)); + }, + Builtin.tagFromName("__builtin_isinf_sign").? => blk: { + if (args.len == 0) break :blk; + const val = p.value_map.get(args[0]) orelse break :blk; + switch (val.isInfSign(p.comp)) { + .unknown => {}, + .finite => return Value.zero, + .positive => return Value.one, + .negative => return Value.int(@as(i64, -1), p.comp), + } + }, + Builtin.tagFromName("__builtin_isnan").? => blk: { + if (args.len == 0) break :blk; + const val = p.value_map.get(args[0]) orelse break :blk; + return Value.fromBool(val.isNan(p.comp)); + }, + Builtin.tagFromName("__builtin_nan").? => blk: { + if (args.len == 0) break :blk; + const val = p.getDecayedStringLiteral(args[0]) orelse break :blk; + const bytes = p.comp.interner.get(val.ref()).bytes; + + const f: Interner.Key.Float = switch ((Type{ .specifier = .double }).bitSizeof(p.comp).?) { + 32 => .{ .f32 = makeNan(f32, bytes) }, + 64 => .{ .f64 = makeNan(f64, bytes) }, + 80 => .{ .f80 = makeNan(f80, bytes) }, + 128 => .{ .f128 = makeNan(f128, bytes) }, + else => unreachable, + }; + return Value.intern(p.comp, .{ .float = f }); + }, + else => {}, + } + return .{}; +} diff --git a/lib/compiler/aro/aro/Compilation.zig b/lib/compiler/aro/aro/Compilation.zig index f04df5001ef1..6093bdc509f3 100644 --- a/lib/compiler/aro/aro/Compilation.zig +++ b/lib/compiler/aro/aro/Compilation.zig @@ -127,22 +127,27 @@ types: struct { } = .{}, string_interner: StrInt = .{}, interner: Interner = .{}, +/// If this is not null, the directory containing the specified Source will be searched for includes +/// Used by MS extensions which allow searching for includes relative to the directory of the main source file. ms_cwd_source_id: ?Source.Id = null, +cwd: std.fs.Dir, -pub fn init(gpa: Allocator) Compilation { +pub fn init(gpa: Allocator, cwd: std.fs.Dir) Compilation { return .{ .gpa = gpa, .diagnostics = Diagnostics.init(gpa), + .cwd = cwd, }; } /// Initialize Compilation with default environment, /// pragma handlers and emulation mode set to target. -pub fn initDefault(gpa: Allocator) !Compilation { +pub fn initDefault(gpa: Allocator, cwd: std.fs.Dir) !Compilation { var comp: Compilation = .{ .gpa = gpa, .environment = try Environment.loadAll(gpa), .diagnostics = Diagnostics.init(gpa), + .cwd = cwd, }; errdefer comp.deinit(); try comp.addDefaultPragmaHandlers(); @@ -534,7 +539,7 @@ pub fn generateBuiltinMacros(comp: *Compilation, system_defines_mode: SystemDefi if (system_defines_mode == .include_system_defines) { try buf.appendSlice( \\#define __VERSION__ "Aro - ++ @import("../backend.zig").version_str ++ "\"\n" ++ + ++ " " ++ @import("../backend.zig").version_str ++ "\"\n" ++ \\#define __Aro__ \\ ); @@ -550,6 +555,9 @@ pub fn generateBuiltinMacros(comp: *Compilation, system_defines_mode: SystemDefi \\#define __STDC_NO_VLA__ 1 \\#define __STDC_UTF_16__ 1 \\#define __STDC_UTF_32__ 1 + \\#define __STDC_EMBED_NOT_FOUND__ 0 + \\#define __STDC_EMBED_FOUND__ 1 + \\#define __STDC_EMBED_EMPTY__ 2 \\ ); if (comp.langopts.standard.StdCVersionMacro()) |stdc_version| { @@ -719,8 +727,13 @@ fn generateBuiltinTypes(comp: *Compilation) !void { try comp.generateNsConstantStringType(); } +pub fn float80Type(comp: *const Compilation) ?Type { + if (comp.langopts.emulate != .gcc) return null; + return target_util.float80Type(comp.target); +} + /// Smallest integer type with at least N bits -fn intLeastN(comp: *const Compilation, bits: usize, signedness: std.builtin.Signedness) Type { +pub fn intLeastN(comp: *const Compilation, bits: usize, signedness: std.builtin.Signedness) Type { if (bits == 64 and (comp.target.isDarwin() or comp.target.isWasm())) { // WebAssembly and Darwin use `long long` for `int_least64_t` and `int_fast64_t`. return .{ .specifier = if (signedness == .signed) .long_long else .ulong_long }; @@ -903,7 +916,7 @@ fn generateNsConstantStringType(comp: *Compilation) !void { comp.types.ns_constant_string.fields[2] = .{ .name = try StrInt.intern(comp, "str"), .ty = const_char_ptr }; comp.types.ns_constant_string.fields[3] = .{ .name = try StrInt.intern(comp, "length"), .ty = .{ .specifier = .long } }; comp.types.ns_constant_string.ty = .{ .specifier = .@"struct", .data = .{ .record = &comp.types.ns_constant_string.record } }; - record_layout.compute(&comp.types.ns_constant_string.record, comp.types.ns_constant_string.ty, comp, null); + record_layout.compute(&comp.types.ns_constant_string.record, comp.types.ns_constant_string.ty, comp, null) catch unreachable; } fn generateVaListType(comp: *Compilation) !Type { @@ -911,12 +924,12 @@ fn generateVaListType(comp: *Compilation) !Type { const kind: Kind = switch (comp.target.cpu.arch) { .aarch64 => switch (comp.target.os.tag) { .windows => @as(Kind, .char_ptr), - .ios, .macos, .tvos, .watchos, .visionos => .char_ptr, + .ios, .macos, .tvos, .watchos => .char_ptr, else => .aarch64_va_list, }, .sparc, .wasm32, .wasm64, .bpfel, .bpfeb, .riscv32, .riscv64, .avr, .spirv32, .spirv64 => .void_ptr, .powerpc => switch (comp.target.os.tag) { - .ios, .macos, .tvos, .watchos, .visionos, .aix => @as(Kind, .char_ptr), + .ios, .macos, .tvos, .watchos, .aix => @as(Kind, .char_ptr), else => return Type{ .specifier = .void }, // unknown }, .x86, .msp430 => .char_ptr, @@ -951,7 +964,7 @@ fn generateVaListType(comp: *Compilation) !Type { record_ty.fields[3] = .{ .name = try StrInt.intern(comp, "__gr_offs"), .ty = .{ .specifier = .int } }; record_ty.fields[4] = .{ .name = try StrInt.intern(comp, "__vr_offs"), .ty = .{ .specifier = .int } }; ty = .{ .specifier = .@"struct", .data = .{ .record = record_ty } }; - record_layout.compute(record_ty, ty, comp, null); + record_layout.compute(record_ty, ty, comp, null) catch unreachable; }, .x86_64_va_list => { const record_ty = try arena.create(Type.Record); @@ -969,7 +982,7 @@ fn generateVaListType(comp: *Compilation) !Type { record_ty.fields[2] = .{ .name = try StrInt.intern(comp, "overflow_arg_area"), .ty = void_ptr }; record_ty.fields[3] = .{ .name = try StrInt.intern(comp, "reg_save_area"), .ty = void_ptr }; ty = .{ .specifier = .@"struct", .data = .{ .record = record_ty } }; - record_layout.compute(record_ty, ty, comp, null); + record_layout.compute(record_ty, ty, comp, null) catch unreachable; }, } if (kind == .char_ptr or kind == .void_ptr) { @@ -988,13 +1001,28 @@ fn generateVaListType(comp: *Compilation) !Type { fn generateIntMax(comp: *const Compilation, w: anytype, name: []const u8, ty: Type) !void { const bit_count: u8 = @intCast(ty.sizeof(comp).? * 8); const unsigned = ty.isUnsignedInt(comp); - const max = if (bit_count == 128) - @as(u128, if (unsigned) std.math.maxInt(u128) else std.math.maxInt(u128)) - else - ty.maxInt(comp); + const max: u128 = switch (bit_count) { + 8 => if (unsigned) std.math.maxInt(u8) else std.math.maxInt(i8), + 16 => if (unsigned) std.math.maxInt(u16) else std.math.maxInt(i16), + 32 => if (unsigned) std.math.maxInt(u32) else std.math.maxInt(i32), + 64 => if (unsigned) std.math.maxInt(u64) else std.math.maxInt(i64), + 128 => if (unsigned) std.math.maxInt(u128) else std.math.maxInt(i128), + else => unreachable, + }; try w.print("#define __{s}_MAX__ {d}{s}\n", .{ name, max, ty.intValueSuffix(comp) }); } +/// Largest value that can be stored in wchar_t +pub fn wcharMax(comp: *const Compilation) u32 { + const unsigned = comp.types.wchar.isUnsignedInt(comp); + return switch (comp.types.wchar.bitSizeof(comp).?) { + 8 => if (unsigned) std.math.maxInt(u8) else std.math.maxInt(i8), + 16 => if (unsigned) std.math.maxInt(u16) else std.math.maxInt(i16), + 32 => if (unsigned) std.math.maxInt(u32) else std.math.maxInt(i32), + else => unreachable, + }; +} + fn generateExactWidthIntMax(comp: *const Compilation, w: anytype, specifier: Type.Specifier) !void { var ty = Type{ .specifier = specifier }; const bit_count: u8 = @intCast(ty.sizeof(comp).? * 8); @@ -1039,6 +1067,12 @@ pub fn nextLargestIntSameSign(comp: *const Compilation, ty: Type) ?Type { return null; } +/// Maximum size of an array, in bytes +pub fn maxArrayBytes(comp: *const Compilation) u64 { + const max_bits = @min(61, comp.target.ptrBitWidth()); + return (@as(u64, 1) << @truncate(max_bits)) - 1; +} + /// If `enum E { ... }` syntax has a fixed underlying integer type regardless of the presence of /// __attribute__((packed)) or the range of values of the corresponding enumerator constants, /// specify it here. @@ -1060,7 +1094,7 @@ pub fn getCharSignedness(comp: *const Compilation) std.builtin.Signedness { pub fn addBuiltinIncludeDir(comp: *Compilation, aro_dir: []const u8) !void { var search_path = aro_dir; while (std.fs.path.dirname(search_path)) |dirname| : (search_path = dirname) { - var base_dir = std.fs.cwd().openDir(dirname, .{}) catch continue; + var base_dir = comp.cwd.openDir(dirname, .{}) catch continue; defer base_dir.close(); base_dir.access("include/stddef.h", .{}) catch continue; @@ -1266,7 +1300,7 @@ fn addSourceFromPathExtra(comp: *Compilation, path: []const u8, kind: Source.Kin return error.FileNotFound; } - const file = try std.fs.cwd().openFile(path, .{}); + const file = try comp.cwd.openFile(path, .{}); defer file.close(); const contents = file.readToEndAlloc(comp.gpa, std.math.maxInt(u32)) catch |err| switch (err) { @@ -1349,10 +1383,9 @@ pub fn hasInclude( return false; } - const cwd = std.fs.cwd(); if (std.fs.path.isAbsolute(filename)) { if (which == .next) return false; - return !std.meta.isError(cwd.access(filename, .{})); + return !std.meta.isError(comp.cwd.access(filename, .{})); } const cwd_source_id = switch (include_type) { @@ -1372,7 +1405,7 @@ pub fn hasInclude( while (try it.nextWithFile(filename, sf_allocator)) |found| { defer sf_allocator.free(found.path); - if (!std.meta.isError(cwd.access(found.path, .{}))) return true; + if (!std.meta.isError(comp.cwd.access(found.path, .{}))) return true; } return false; } @@ -1392,7 +1425,7 @@ fn getFileContents(comp: *Compilation, path: []const u8, limit: ?u32) ![]const u return error.FileNotFound; } - const file = try std.fs.cwd().openFile(path, .{}); + const file = try comp.cwd.openFile(path, .{}); defer file.close(); var buf = std.ArrayList(u8).init(comp.gpa); @@ -1571,6 +1604,17 @@ pub fn hasBuiltinFunction(comp: *const Compilation, builtin: Builtin) bool { } } +pub fn locSlice(comp: *const Compilation, loc: Source.Location) []const u8 { + var tmp_tokenizer = Tokenizer{ + .buf = comp.getSource(loc.id).buf, + .langopts = comp.langopts, + .index = loc.byte_offset, + .source = .generated, + }; + const tok = tmp_tokenizer.next(); + return tmp_tokenizer.buf[tok.start..tok.end]; +} + pub const CharUnitSize = enum(u32) { @"1" = 1, @"2" = 2, @@ -1590,7 +1634,7 @@ pub const addDiagnostic = Diagnostics.add; test "addSourceFromReader" { const Test = struct { fn addSourceFromReader(str: []const u8, expected: []const u8, warning_count: u32, splices: []const u32) !void { - var comp = Compilation.init(std.testing.allocator); + var comp = Compilation.init(std.testing.allocator, std.fs.cwd()); defer comp.deinit(); var buf_reader = std.io.fixedBufferStream(str); @@ -1602,7 +1646,7 @@ test "addSourceFromReader" { } fn withAllocationFailures(allocator: std.mem.Allocator) !void { - var comp = Compilation.init(allocator); + var comp = Compilation.init(allocator, std.fs.cwd()); defer comp.deinit(); _ = try comp.addSourceFromBuffer("path", "spliced\\\nbuffer\n"); @@ -1644,7 +1688,7 @@ test "addSourceFromReader - exhaustive check for carriage return elimination" { const alen = alphabet.len; var buf: [alphabet.len]u8 = [1]u8{alphabet[0]} ** alen; - var comp = Compilation.init(std.testing.allocator); + var comp = Compilation.init(std.testing.allocator, std.fs.cwd()); defer comp.deinit(); var source_count: u32 = 0; @@ -1672,7 +1716,7 @@ test "ignore BOM at beginning of file" { const Test = struct { fn run(buf: []const u8) !void { - var comp = Compilation.init(std.testing.allocator); + var comp = Compilation.init(std.testing.allocator, std.fs.cwd()); defer comp.deinit(); var buf_reader = std.io.fixedBufferStream(buf); diff --git a/lib/compiler/aro/aro/Diagnostics.zig b/lib/compiler/aro/aro/Diagnostics.zig index 8f80e4393dc8..3039a45ef821 100644 --- a/lib/compiler/aro/aro/Diagnostics.zig +++ b/lib/compiler/aro/aro/Diagnostics.zig @@ -47,6 +47,10 @@ pub const Message = struct { tag: Attribute.Tag, specifier: enum { @"struct", @"union", @"enum" }, }, + attribute_todo: struct { + tag: Attribute.Tag, + kind: enum { variables, fields, types, functions }, + }, builtin_with_header: struct { builtin: Builtin.Tag, header: Header, @@ -210,6 +214,9 @@ pub const Options = struct { normalized: Kind = .default, @"shift-count-negative": Kind = .default, @"shift-count-overflow": Kind = .default, + @"constant-conversion": Kind = .default, + @"sign-conversion": Kind = .default, + nonnull: Kind = .default, }; const Diagnostics = @This(); @@ -222,14 +229,14 @@ errors: u32 = 0, macro_backtrace_limit: u32 = 6, pub fn warningExists(name: []const u8) bool { - inline for (std.meta.fields(Options)) |f| { + inline for (@typeInfo(Options).@"struct".fields) |f| { if (mem.eql(u8, f.name, name)) return true; } return false; } pub fn set(d: *Diagnostics, name: []const u8, to: Kind) !void { - inline for (std.meta.fields(Options)) |f| { + inline for (@typeInfo(Options).@"struct".fields) |f| { if (mem.eql(u8, f.name, name)) { @field(d.options, f.name) = to; return; @@ -422,6 +429,10 @@ pub fn renderMessage(comp: *Compilation, m: anytype, msg: Message) void { @tagName(msg.extra.ignored_record_attr.tag), @tagName(msg.extra.ignored_record_attr.specifier), }), + .attribute_todo => printRt(m, prop.msg, .{ "{s}", "{s}" }, .{ + @tagName(msg.extra.attribute_todo.tag), + @tagName(msg.extra.attribute_todo.kind), + }), .builtin_with_header => printRt(m, prop.msg, .{ "{s}", "{s}" }, .{ @tagName(msg.extra.builtin_with_header.header), Builtin.nameFromTag(msg.extra.builtin_with_header.builtin).span(), diff --git a/lib/compiler/aro/aro/Diagnostics/messages.zig b/lib/compiler/aro/aro/Diagnostics/messages.zig index acc5fd562c27..c56641a4614d 100644 --- a/lib/compiler/aro/aro/Diagnostics/messages.zig +++ b/lib/compiler/aro/aro/Diagnostics/messages.zig @@ -107,6 +107,9 @@ pub const Tag = enum { multiple_default, previous_case, expected_arguments, + callee_with_static_array, + array_argument_too_small, + non_null_argument, expected_arguments_old, expected_at_least_arguments, invalid_static_star, @@ -214,6 +217,7 @@ pub const Tag = enum { pre_c23_compat, unbound_vla, array_too_large, + record_too_large, incompatible_ptr_init, incompatible_ptr_init_sign, incompatible_ptr_assign, @@ -349,6 +353,8 @@ pub const Tag = enum { non_standard_escape_char, invalid_pp_stringify_escape, vla, + int_value_changed, + sign_conversion, float_overflow_conversion, float_out_of_range, float_zero_conversion, @@ -425,7 +431,8 @@ pub const Tag = enum { bit_int, unsigned_bit_int_too_small, signed_bit_int_too_small, - bit_int_too_big, + unsigned_bit_int_too_big, + signed_bit_int_too_big, keyword_macro, ptr_arithmetic_incomplete, callconv_not_supported, @@ -509,6 +516,9 @@ pub const Tag = enum { complex_conj, overflow_builtin_requires_int, overflow_result_requires_ptr, + attribute_todo, + invalid_type_underlying_enum, + auto_type_self_initialized, pub fn property(tag: Tag) Properties { return named_data[@intFromEnum(tag)]; @@ -613,6 +623,9 @@ pub const Tag = enum { .{ .msg = "multiple default cases in the same switch", .kind = .@"error" }, .{ .msg = "previous case defined here", .kind = .note }, .{ .msg = expected_arguments, .extra = .arguments, .kind = .@"error" }, + .{ .msg = "callee declares array parameter as static here", .kind = .note }, + .{ .msg = "array argument is too small; contains {d} elements, callee requires at least {d}", .extra = .arguments, .kind = .warning, .opt = W("array-bounds") }, + .{ .msg = "null passed to a callee that requires a non-null argument", .kind = .warning, .opt = W("nonnull") }, .{ .msg = expected_arguments, .extra = .arguments, .kind = .warning }, .{ .msg = "expected at least {d} argument(s) got {d}", .extra = .arguments, .kind = .warning }, .{ .msg = "'static' may not be used with an unspecified variable length array size", .kind = .@"error" }, @@ -720,6 +733,7 @@ pub const Tag = enum { .{ .msg = "{s} is incompatible with C standards before C23", .extra = .str, .kind = .off, .suppress_unless_version = .c23, .opt = W("pre-c23-compat") }, .{ .msg = "variable length array must be bound in function definition", .kind = .@"error" }, .{ .msg = "array is too large", .kind = .@"error" }, + .{ .msg = "type '{s}' is too large", .kind = .@"error", .extra = .str }, .{ .msg = "incompatible pointer types initializing {s}", .extra = .str, .opt = W("incompatible-pointer-types"), .kind = .warning }, .{ .msg = "incompatible pointer types initializing {s}" ++ pointer_sign_message, .extra = .str, .opt = W("pointer-sign"), .kind = .warning }, .{ .msg = "incompatible pointer types assigning to {s}", .extra = .str, .opt = W("incompatible-pointer-types"), .kind = .warning }, @@ -855,6 +869,8 @@ pub const Tag = enum { .{ .msg = "use of non-standard escape character '\\{s}'", .kind = .off, .opt = W("pedantic"), .extra = .invalid_escape }, .{ .msg = "invalid string literal, ignoring final '\\'", .kind = .warning }, .{ .msg = "variable length array used", .kind = .off, .opt = W("vla") }, + .{ .msg = "implicit conversion from {s}", .extra = .str, .kind = .warning, .opt = W("constant-conversion") }, + .{ .msg = "implicit conversion changes signedness: {s}", .extra = .str, .kind = .off, .opt = W("sign-conversion") }, .{ .msg = "implicit conversion of non-finite value from {s} is undefined", .extra = .str, .kind = .off, .opt = W("float-overflow-conversion") }, .{ .msg = "implicit conversion of out of range value from {s} is undefined", .extra = .str, .kind = .warning, .opt = W("literal-conversion") }, .{ .msg = "implicit conversion from {s}", .extra = .str, .kind = .off, .opt = W("float-zero-conversion") }, @@ -929,9 +945,10 @@ pub const Tag = enum { .{ .msg = "this declarator", .kind = .note }, .{ .msg = "{s} is not supported on this target", .extra = .str, .kind = .@"error" }, .{ .msg = "'_BitInt' in C17 and earlier is a Clang extension'", .kind = .off, .pedantic = true, .opt = W("bit-int-extension"), .suppress_version = .c23 }, - .{ .msg = "{s} must have a bit size of at least 1", .extra = .str, .kind = .@"error" }, - .{ .msg = "{s} must have a bit size of at least 2", .extra = .str, .kind = .@"error" }, - .{ .msg = "{s} of bit sizes greater than " ++ std.fmt.comptimePrint("{d}", .{Properties.max_bits}) ++ " not supported", .extra = .str, .kind = .@"error" }, + .{ .msg = "{s}unsigned _BitInt must have a bit size of at least 1", .extra = .str, .kind = .@"error" }, + .{ .msg = "{s}signed _BitInt must have a bit size of at least 2", .extra = .str, .kind = .@"error" }, + .{ .msg = "{s}unsigned _BitInt of bit sizes greater than " ++ std.fmt.comptimePrint("{d}", .{Properties.max_bits}) ++ " not supported", .extra = .str, .kind = .@"error" }, + .{ .msg = "{s}signed _BitInt of bit sizes greater than " ++ std.fmt.comptimePrint("{d}", .{Properties.max_bits}) ++ " not supported", .extra = .str, .kind = .@"error" }, .{ .msg = "keyword is hidden by macro definition", .kind = .off, .pedantic = true, .opt = W("keyword-macro") }, .{ .msg = "arithmetic on a pointer to an incomplete type '{s}'", .extra = .str, .kind = .@"error" }, .{ .msg = "'{s}' calling convention is not supported for this target", .extra = .str, .opt = W("ignored-attributes"), .kind = .warning }, @@ -1015,6 +1032,9 @@ pub const Tag = enum { .{ .msg = "ISO C does not support '~' for complex conjugation of '{s}'", .opt = W("pedantic"), .extra = .str, .kind = .off }, .{ .msg = "operand argument to overflow builtin must be an integer ('{s}' invalid)", .extra = .str, .kind = .@"error" }, .{ .msg = "result argument to overflow builtin must be a pointer to a non-const integer ('{s}' invalid)", .extra = .str, .kind = .@"error" }, + .{ .msg = "TODO: implement '{s}' attribute for {s}", .extra = .attribute_todo, .kind = .@"error" }, + .{ .msg = "non-integral type '{s}' is an invalid underlying type", .extra = .str, .kind = .@"error" }, + .{ .msg = "variable '{s}' declared with deduced type '__auto_type' cannot appear in its own initializer", .extra = .str, .kind = .@"error" }, }; }; }; diff --git a/lib/compiler/aro/aro/Driver.zig b/lib/compiler/aro/aro/Driver.zig index 6876395b8ac5..7bdfd2c81e6e 100644 --- a/lib/compiler/aro/aro/Driver.zig +++ b/lib/compiler/aro/aro/Driver.zig @@ -47,6 +47,20 @@ color: ?bool = null, nobuiltininc: bool = false, nostdinc: bool = false, nostdlibinc: bool = false, +debug_dump_letters: packed struct(u3) { + d: bool = false, + m: bool = false, + n: bool = false, + + /// According to GCC, specifying letters whose behavior conflicts is undefined. + /// We follow clang in that `-dM` always takes precedence over `-dD` + pub fn getPreprocessorDumpMode(self: @This()) Preprocessor.DumpMode { + if (self.m) return .macros_only; + if (self.d) return .macros_and_result; + if (self.n) return .macro_names_and_result; + return .result_only; + } +} = .{}, /// Full path to the aro executable aro_name: []const u8 = "", @@ -92,6 +106,9 @@ pub const usage = \\ \\Compile options: \\ -c, --compile Only run preprocess, compile, and assemble steps + \\ -dM Output #define directives for all the macros defined during the execution of the preprocessor + \\ -dD Like -dM except that it outputs both the #define directives and the result of preprocessing + \\ -dN Like -dD, but emit only the macro names, not their expansions. \\ -D = Define to (defaults to 1) \\ -E Only run the preprocessor \\ -fchar8_t Enable char8_t (enabled by default in C23 and later) @@ -234,6 +251,12 @@ pub fn parseArgs( d.system_defines = .no_system_defines; } else if (mem.eql(u8, arg, "-c") or mem.eql(u8, arg, "--compile")) { d.only_compile = true; + } else if (mem.eql(u8, arg, "-dD")) { + d.debug_dump_letters.d = true; + } else if (mem.eql(u8, arg, "-dM")) { + d.debug_dump_letters.m = true; + } else if (mem.eql(u8, arg, "-dN")) { + d.debug_dump_letters.n = true; } else if (mem.eql(u8, arg, "-E")) { d.only_preprocess = true; } else if (mem.eql(u8, arg, "-P") or mem.eql(u8, arg, "--no-line-commands")) { @@ -636,13 +659,17 @@ fn processSource( if (d.comp.langopts.ms_extensions) { d.comp.ms_cwd_source_id = source.id; } - + const dump_mode = d.debug_dump_letters.getPreprocessorDumpMode(); if (d.verbose_pp) pp.verbose = true; if (d.only_preprocess) { pp.preserve_whitespace = true; if (d.line_commands) { pp.linemarkers = if (d.use_line_directives) .line_directives else .numeric_directives; } + switch (dump_mode) { + .macros_and_result, .macro_names_and_result => pp.store_macro_tokens = true, + .result_only, .macros_only => {}, + } } try pp.preprocessSources(&.{ source, builtin, user_macros }); @@ -663,7 +690,8 @@ fn processSource( defer if (d.output_name != null) file.close(); var buf_w = std.io.bufferedWriter(file.writer()); - pp.prettyPrintTokens(buf_w.writer()) catch |er| + + pp.prettyPrintTokens(buf_w.writer(), dump_mode) catch |er| return d.fatal("unable to write result: {s}", .{errorDescription(er)}); buf_w.flush() catch |er| diff --git a/lib/compiler/aro/aro/Driver/Filesystem.zig b/lib/compiler/aro/aro/Driver/Filesystem.zig index a81f31375306..07cbeac03c72 100644 --- a/lib/compiler/aro/aro/Driver/Filesystem.zig +++ b/lib/compiler/aro/aro/Driver/Filesystem.zig @@ -56,7 +56,7 @@ fn existsFake(entries: []const Filesystem.Entry, path: []const u8) bool { } fn canExecutePosix(path: []const u8) bool { - std.os.access(path, std.os.X_OK) catch return false; + std.posix.access(path, std.posix.X_OK) catch return false; // Todo: ensure path is not a directory return true; } @@ -173,7 +173,7 @@ pub const Filesystem = union(enum) { pub fn exists(fs: Filesystem, path: []const u8) bool { switch (fs) { .real => { - std.os.access(path, std.os.F_OK) catch return false; + std.fs.cwd().access(path, .{}) catch return false; return true; }, .fake => |paths| return existsFake(paths, path), diff --git a/lib/compiler/aro/aro/Hideset.zig b/lib/compiler/aro/aro/Hideset.zig index 433be9f39324..ad8a089ae629 100644 --- a/lib/compiler/aro/aro/Hideset.zig +++ b/lib/compiler/aro/aro/Hideset.zig @@ -46,15 +46,15 @@ const Item = struct { const List = std.MultiArrayList(Item); }; -const Index = enum(u32) { +pub const Index = enum(u32) { none = std.math.maxInt(u32), _, }; map: std.AutoHashMapUnmanaged(Identifier, Index) = .{}, -/// Used for computing intersection of two lists; stored here so that allocations can be retained +/// Used for computing union/intersection of two lists; stored here so that allocations can be retained /// until hideset is deinit'ed -intersection_map: std.AutoHashMapUnmanaged(Identifier, void) = .{}, +tmp_map: std.AutoHashMapUnmanaged(Identifier, void) = .{}, linked_list: Item.List = .{}, comp: *const Compilation, @@ -72,7 +72,7 @@ const Iterator = struct { pub fn deinit(self: *Hideset) void { self.map.deinit(self.comp.gpa); - self.intersection_map.deinit(self.comp.gpa); + self.tmp_map.deinit(self.comp.gpa); self.linked_list.deinit(self.comp.gpa); } @@ -83,7 +83,7 @@ pub fn clearRetainingCapacity(self: *Hideset) void { pub fn clearAndFree(self: *Hideset) void { self.map.clearAndFree(self.comp.gpa); - self.intersection_map.clearAndFree(self.comp.gpa); + self.tmp_map.clearAndFree(self.comp.gpa); self.linked_list.shrinkAndFree(self.comp.gpa, 0); } @@ -109,8 +109,13 @@ fn ensureUnusedCapacity(self: *Hideset, new_size: usize) !void { /// Creates a one-item list with contents `identifier` fn createNodeAssumeCapacity(self: *Hideset, identifier: Identifier) Index { + return self.createNodeAssumeCapacityExtra(identifier, .none); +} + +/// Creates a one-item list with contents `identifier` +fn createNodeAssumeCapacityExtra(self: *Hideset, identifier: Identifier, next: Index) Index { const next_idx = self.linked_list.len; - self.linked_list.appendAssumeCapacity(.{ .identifier = identifier }); + self.linked_list.appendAssumeCapacity(.{ .identifier = identifier, .next = next }); return @enumFromInt(next_idx); } @@ -121,24 +126,24 @@ pub fn prepend(self: *Hideset, loc: Source.Location, tail: Index) !Index { return @enumFromInt(new_idx); } -/// Copy a, then attach b at the end +/// Attach elements of `b` to the front of `a` (if they're not in `a`) pub fn @"union"(self: *Hideset, a: Index, b: Index) !Index { - var cur: Index = .none; + if (a == .none) return b; + if (b == .none) return a; + self.tmp_map.clearRetainingCapacity(); + + var it = self.iterator(b); + while (it.next()) |identifier| { + try self.tmp_map.put(self.comp.gpa, identifier, {}); + } + var head: Index = b; try self.ensureUnusedCapacity(self.len(a)); - var it = self.iterator(a); + it = self.iterator(a); while (it.next()) |identifier| { - const new_idx = self.createNodeAssumeCapacity(identifier); - if (head == b) { - head = new_idx; + if (!self.tmp_map.contains(identifier)) { + head = self.createNodeAssumeCapacityExtra(identifier, head); } - if (cur != .none) { - self.linked_list.items(.next)[@intFromEnum(cur)] = new_idx; - } - cur = new_idx; - } - if (cur != .none) { - self.linked_list.items(.next)[@intFromEnum(cur)] = b; } return head; } @@ -163,20 +168,20 @@ fn len(self: *const Hideset, list: Index) usize { pub fn intersection(self: *Hideset, a: Index, b: Index) !Index { if (a == .none or b == .none) return .none; - self.intersection_map.clearRetainingCapacity(); + self.tmp_map.clearRetainingCapacity(); var cur: Index = .none; var head: Index = .none; var it = self.iterator(a); var a_len: usize = 0; while (it.next()) |identifier| : (a_len += 1) { - try self.intersection_map.put(self.comp.gpa, identifier, {}); + try self.tmp_map.put(self.comp.gpa, identifier, {}); } try self.ensureUnusedCapacity(@min(a_len, self.len(b))); it = self.iterator(b); while (it.next()) |identifier| { - if (self.intersection_map.contains(identifier)) { + if (self.tmp_map.contains(identifier)) { const new_idx = self.createNodeAssumeCapacity(identifier); if (head == .none) { head = new_idx; diff --git a/lib/compiler/aro/aro/Parser.zig b/lib/compiler/aro/aro/Parser.zig index d8d6f9d71c7b..0a8907b23a74 100644 --- a/lib/compiler/aro/aro/Parser.zig +++ b/lib/compiler/aro/aro/Parser.zig @@ -28,6 +28,7 @@ const StrInt = @import("StringInterner.zig"); const StringId = StrInt.StringId; const Builtins = @import("Builtins.zig"); const Builtin = Builtins.Builtin; +const evalBuiltin = @import("Builtins/eval.zig").eval; const target_util = @import("target.zig"); const Switch = struct { @@ -100,7 +101,7 @@ value_map: Tree.ValueMap, // buffers used during compilation syms: SymbolStack = .{}, -strings: std.ArrayList(u8), +strings: std.ArrayListAligned(u8, 4), labels: std.ArrayList(Label), list_buf: NodeList, decl_buf: NodeList, @@ -130,6 +131,10 @@ const_decl_folding: ConstDeclFoldingMode = .fold_const_decls, /// address-of-label expression (tracked with contains_address_of_label) computed_goto_tok: ?TokenIndex = null, +/// __auto_type may only be used with a single declarator. Keep track of the name +/// so that it is not used in its own initializer. +auto_type_decl_name: StringId = .empty, + /// Various variables that are different for each function. func: struct { /// null if not in function, will always be plain func, var_args_func or old_style_func @@ -160,7 +165,7 @@ record: struct { } fn addFieldsFromAnonymous(r: @This(), p: *Parser, ty: Type) Error!void { - for (ty.data.record.fields) |f| { + for (ty.getRecord().?.fields) |f| { if (f.isAnonymousRecord()) { try r.addFieldsFromAnonymous(p, f.ty.canonicalize(.standard)); } else if (f.name_tok != 0) { @@ -470,7 +475,7 @@ pub fn typePairStrExtra(p: *Parser, a: Type, msg: []const u8, b: Type) ![]const return try p.comp.diagnostics.arena.allocator().dupe(u8, p.strings.items[strings_top..]); } -pub fn floatValueChangedStr(p: *Parser, res: *Result, old_value: Value, int_ty: Type) ![]const u8 { +pub fn valueChangedStr(p: *Parser, res: *Result, old_value: Value, int_ty: Type) ![]const u8 { const strings_top = p.strings.items.len; defer p.strings.items.len = strings_top; @@ -572,6 +577,14 @@ fn nodeIs(p: *Parser, node: NodeIndex, tag: Tree.Tag) bool { return p.getNode(node, tag) != null; } +pub fn getDecayedStringLiteral(p: *Parser, node: NodeIndex) ?Value { + const cast_node = p.getNode(node, .implicit_cast) orelse return null; + const data = p.nodes.items(.data)[@intFromEnum(cast_node)]; + if (data.cast.kind != .array_to_pointer) return null; + const literal_node = p.getNode(data.cast.operand, .string_literal_expr) orelse return null; + return p.value_map.get(literal_node); +} + fn getNode(p: *Parser, node: NodeIndex, tag: Tree.Tag) ?NodeIndex { var cur = node; const tags = p.nodes.items(.tag); @@ -680,7 +693,7 @@ pub fn parse(pp: *Preprocessor) Compilation.Error!Tree { .gpa = pp.comp.gpa, .arena = arena.allocator(), .tok_ids = pp.tokens.items(.id), - .strings = std.ArrayList(u8).init(pp.comp.gpa), + .strings = std.ArrayListAligned(u8, 4).init(pp.comp.gpa), .value_map = Tree.ValueMap.init(pp.comp.gpa), .data = NodeList.init(pp.comp.gpa), .labels = std.ArrayList(Label).init(pp.comp.gpa), @@ -725,7 +738,7 @@ pub fn parse(pp: *Preprocessor) Compilation.Error!Tree { defer p.syms.popScope(); // NodeIndex 0 must be invalid - _ = try p.addNode(.{ .tag = .invalid, .ty = undefined, .data = undefined }); + _ = try p.addNode(.{ .tag = .invalid, .ty = undefined, .data = undefined, .loc = undefined }); { if (p.comp.langopts.hasChar8_T()) { @@ -747,6 +760,10 @@ pub fn parse(pp: *Preprocessor) Compilation.Error!Tree { if (ty.isArray()) ty.decayArray(); try p.syms.defineTypedef(&p, try StrInt.intern(p.comp, "__NSConstantString"), pp.comp.types.ns_constant_string.ty, 0, .none); + + if (p.comp.float80Type()) |float80_ty| { + try p.syms.defineTypedef(&p, try StrInt.intern(p.comp, "__float80"), float80_ty, 0, .none); + } } while (p.eatToken(.eof) == null) { @@ -862,6 +879,8 @@ fn nextExternDecl(p: *Parser) void { .keyword_int, .keyword_long, .keyword_signed, + .keyword_signed1, + .keyword_signed2, .keyword_unsigned, .keyword_float, .keyword_double, @@ -1018,10 +1037,8 @@ fn decl(p: *Parser) Error!bool { // Collect old style parameter declarations. if (init_d.d.old_style_func != null) { - const attrs = init_d.d.ty.getAttributes(); - var base_ty = if (init_d.d.ty.specifier == .attributed) init_d.d.ty.data.attributed.base else init_d.d.ty; + var base_ty = init_d.d.ty.base(); base_ty.specifier = .func; - init_d.d.ty = try base_ty.withAttributes(p.arena, attrs); const param_buf_top = p.param_buf.items.len; defer p.param_buf.items.len = param_buf_top; @@ -1116,6 +1133,7 @@ fn decl(p: *Parser) Error!bool { .ty = init_d.d.ty, .tag = try decl_spec.validateFnDef(p), .data = .{ .decl = .{ .name = init_d.d.name, .node = body } }, + .loc = @enumFromInt(init_d.d.name), }); try p.decl_buf.append(node); @@ -1142,9 +1160,18 @@ fn decl(p: *Parser) Error!bool { if (init_d.d.old_style_func) |tok_i| try p.errTok(.invalid_old_style_params, tok_i); const tag = try decl_spec.validate(p, &init_d.d.ty, init_d.initializer.node != .none); - const node = try p.addNode(.{ .ty = init_d.d.ty, .tag = tag, .data = .{ - .decl = .{ .name = init_d.d.name, .node = init_d.initializer.node }, - } }); + const tok = switch (decl_spec.storage_class) { + .auto, .@"extern", .register, .static, .typedef => |tok| tok, + .none => init_d.d.name, + }; + const node = try p.addNode(.{ + .ty = init_d.d.ty, + .tag = tag, + .data = .{ + .decl = .{ .name = init_d.d.name, .node = init_d.initializer.node }, + }, + .loc = @enumFromInt(tok), + }); try p.decl_buf.append(node); const interned_name = try StrInt.intern(p.comp, p.tokSlice(init_d.d.name)); @@ -1287,6 +1314,7 @@ fn staticAssert(p: *Parser) Error!bool { .lhs = res.node, .rhs = str.node, } }, + .loc = @enumFromInt(static_assert), }); try p.decl_buf.append(node); return true; @@ -1407,6 +1435,8 @@ fn typeof(p: *Parser) Error!?Type { const l_paren = try p.expectToken(.l_paren); if (try p.typeName()) |ty| { try p.expectClosing(l_paren, .r_paren); + if (ty.is(.invalid)) return null; + const typeof_ty = try p.arena.create(Type); typeof_ty.* = .{ .data = ty.data, @@ -1428,6 +1458,8 @@ fn typeof(p: *Parser) Error!?Type { .specifier = .nullptr_t, .qual = if (unqual) .{} else typeof_expr.ty.qual.inheritFromTypeof(), }; + } else if (typeof_expr.ty.is(.invalid)) { + return null; } const inner = try p.arena.create(Type.Expr); @@ -1774,6 +1806,8 @@ fn initDeclarator(p: *Parser, decl_spec: *DeclSpec, attr_buf_top: usize) Error!? } else { apply_var_attributes = true; } + const c23_auto = init_d.d.ty.is(.c23_auto); + const auto_type = init_d.d.ty.is(.auto_type); if (p.eatToken(.equal)) |eq| init: { if (decl_spec.storage_class == .typedef or @@ -1801,19 +1835,21 @@ fn initDeclarator(p: *Parser, decl_spec: *DeclSpec, attr_buf_top: usize) Error!? const interned_name = try StrInt.intern(p.comp, p.tokSlice(init_d.d.name)); try p.syms.declareSymbol(p, interned_name, init_d.d.ty, init_d.d.name, .none); + if (c23_auto or auto_type) { + p.auto_type_decl_name = interned_name; + } + defer p.auto_type_decl_name = .empty; + var init_list_expr = try p.initializer(init_d.d.ty); init_d.initializer = init_list_expr; if (!init_list_expr.ty.isArray()) break :init; - if (init_d.d.ty.specifier == .incomplete_array) { - // Modifying .data is exceptionally allowed for .incomplete_array. - init_d.d.ty.data.array.len = init_list_expr.ty.arrayLen() orelse break :init; - init_d.d.ty.specifier = .array; + if (init_d.d.ty.is(.incomplete_array)) { + init_d.d.ty.setIncompleteArrayLen(init_list_expr.ty.arrayLen() orelse break :init); } } const name = init_d.d.name; - const c23_auto = init_d.d.ty.is(.c23_auto); - if (init_d.d.ty.is(.auto_type) or c23_auto) { + if (auto_type or c23_auto) { if (init_d.initializer.node == .none) { init_d.d.ty = Type.invalid; if (c23_auto) { @@ -1872,6 +1908,8 @@ fn initDeclarator(p: *Parser, decl_spec: *DeclSpec, attr_buf_top: usize) Error!? /// | keyword_float /// | keyword_double /// | keyword_signed +/// | keyword_signed1 +/// | keyword_signed2 /// | keyword_unsigned /// | keyword_bool /// | keyword_c23_bool @@ -1911,14 +1949,13 @@ fn typeSpec(p: *Parser, ty: *Type.Builder) Error!bool { .keyword_long => try ty.combine(p, .long, p.tok_i), .keyword_int64, .keyword_int64_2 => try ty.combine(p, .long_long, p.tok_i), .keyword_int128 => try ty.combine(p, .int128, p.tok_i), - .keyword_signed => try ty.combine(p, .signed, p.tok_i), + .keyword_signed, .keyword_signed1, .keyword_signed2 => try ty.combine(p, .signed, p.tok_i), .keyword_unsigned => try ty.combine(p, .unsigned, p.tok_i), .keyword_fp16 => try ty.combine(p, .fp16, p.tok_i), .keyword_float16 => try ty.combine(p, .float16, p.tok_i), .keyword_float => try ty.combine(p, .float, p.tok_i), .keyword_double => try ty.combine(p, .double, p.tok_i), .keyword_complex => try ty.combine(p, .complex, p.tok_i), - .keyword_float80 => try ty.combine(p, .float80, p.tok_i), .keyword_float128_1, .keyword_float128_2 => { if (!p.comp.hasFloat128()) { try p.errStr(.type_not_supported_on_target, p.tok_i, p.tok_ids[p.tok_i].lexeme().?); @@ -2128,6 +2165,7 @@ fn recordSpec(p: *Parser) Error!Type { .tag = if (is_struct) .struct_forward_decl else .union_forward_decl, .ty = ty, .data = .{ .decl_ref = ident }, + .loc = @enumFromInt(ident), })); return ty; } @@ -2248,19 +2286,22 @@ fn recordSpec(p: *Parser) Error!Type { // TODO: msvc considers `#pragma pack` on a per-field basis .msvc => p.pragma_pack, }; - record_layout.compute(record_ty, ty, p.comp, pragma_pack_value); + record_layout.compute(record_ty, ty, p.comp, pragma_pack_value) catch |er| switch (er) { + error.Overflow => try p.errStr(.record_too_large, maybe_ident orelse kind_tok, try p.typeStr(ty)), + }; } // finish by creating a node var node: Tree.Node = .{ .tag = if (is_struct) .struct_decl_two else .union_decl_two, .ty = ty, - .data = .{ .bin = .{ .lhs = .none, .rhs = .none } }, + .data = .{ .two = .{ .none, .none } }, + .loc = @enumFromInt(maybe_ident orelse kind_tok), }; switch (record_decls.len) { 0 => {}, - 1 => node.data = .{ .bin = .{ .lhs = record_decls[0], .rhs = .none } }, - 2 => node.data = .{ .bin = .{ .lhs = record_decls[0], .rhs = record_decls[1] } }, + 1 => node.data = .{ .two = .{ record_decls[0], .none } }, + 2 => node.data = .{ .two = .{ record_decls[0], record_decls[1] } }, else => { node.tag = if (is_struct) .struct_decl else .union_decl; node.data = .{ .range = try p.addList(record_decls) }; @@ -2383,6 +2424,7 @@ fn recordDeclarator(p: *Parser) Error!bool { .tag = .indirect_record_field_decl, .ty = ty, .data = undefined, + .loc = @enumFromInt(first_tok), }); try p.decl_buf.append(node); try p.record.addFieldsFromAnonymous(p, ty); @@ -2402,6 +2444,7 @@ fn recordDeclarator(p: *Parser) Error!bool { .tag = .record_field_decl, .ty = ty, .data = .{ .decl = .{ .name = name_tok, .node = bits_node } }, + .loc = @enumFromInt(if (name_tok != 0) name_tok else first_tok), }); try p.decl_buf.append(node); } @@ -2461,7 +2504,8 @@ fn enumSpec(p: *Parser) Error!Type { const maybe_ident = try p.eatIdentifier(); const fixed_ty = if (p.eatToken(.colon)) |colon| fixed: { - const fixed = (try p.typeName()) orelse { + const ty_start = p.tok_i; + const fixed = (try p.specQual()) orelse { if (p.record.kind != .invalid) { // This is a bit field. p.tok_i -= 1; @@ -2471,6 +2515,12 @@ fn enumSpec(p: *Parser) Error!Type { try p.errTok(.enum_fixed, colon); break :fixed null; }; + + if (!fixed.isInt() or fixed.is(.@"enum")) { + try p.errStr(.invalid_type_underlying_enum, ty_start, try p.typeStr(fixed)); + break :fixed Type.int; + } + try p.errTok(.enum_fixed, colon); break :fixed fixed; } else null; @@ -2505,6 +2555,7 @@ fn enumSpec(p: *Parser) Error!Type { .tag = .enum_forward_decl, .ty = ty, .data = .{ .decl_ref = ident }, + .loc = @enumFromInt(ident), })); return ty; } @@ -2587,7 +2638,7 @@ fn enumSpec(p: *Parser) Error!Type { continue; const symbol = p.syms.getPtr(field.name, .vars); - try symbol.val.intCast(dest_ty, p.comp); + _ = try symbol.val.intCast(dest_ty, p.comp); symbol.ty = dest_ty; p.nodes.items(.ty)[@intFromEnum(field_nodes[i])] = dest_ty; field.ty = dest_ty; @@ -2615,13 +2666,18 @@ fn enumSpec(p: *Parser) Error!Type { } // finish by creating a node - var node: Tree.Node = .{ .tag = .enum_decl_two, .ty = ty, .data = .{ - .bin = .{ .lhs = .none, .rhs = .none }, - } }; + var node: Tree.Node = .{ + .tag = .enum_decl_two, + .ty = ty, + .data = .{ + .two = .{ .none, .none }, + }, + .loc = @enumFromInt(maybe_ident orelse enum_tok), + }; switch (field_nodes.len) { 0 => {}, - 1 => node.data = .{ .bin = .{ .lhs = field_nodes[0], .rhs = .none } }, - 2 => node.data = .{ .bin = .{ .lhs = field_nodes[0], .rhs = field_nodes[1] } }, + 1 => node.data = .{ .two = .{ field_nodes[0], .none } }, + 2 => node.data = .{ .two = .{ field_nodes[0], field_nodes[1] } }, else => { node.tag = .enum_decl; node.data = .{ .range = try p.addList(field_nodes) }; @@ -2679,8 +2735,6 @@ const Enumerator = struct { return; } if (try e.res.val.add(e.res.val, Value.one, e.res.ty, p.comp)) { - const byte_size = e.res.ty.sizeof(p.comp).?; - const bit_size: u8 = @intCast(if (e.res.ty.isUnsignedInt(p.comp)) byte_size * 8 else byte_size * 8 - 1); if (e.fixed) { try p.errStr(.enum_not_representable_fixed, tok, try p.typeStr(e.res.ty)); return; @@ -2689,6 +2743,8 @@ const Enumerator = struct { try p.errTok(.enumerator_overflow, tok); break :blk larger; } else blk: { + const signed = !e.res.ty.isUnsignedInt(p.comp); + const bit_size: u8 = @intCast(e.res.ty.bitSizeof(p.comp).? - @intFromBool(signed)); try p.errExtra(.enum_not_representable, tok, .{ .pow_2_as_string = bit_size }); break :blk Type{ .specifier = .ulong_long }; }; @@ -2792,14 +2848,12 @@ fn enumerator(p: *Parser, e: *Enumerator) Error!?EnumFieldAndNode { if (err_start == p.comp.diagnostics.list.items.len) { // only do these warnings if we didn't already warn about overflow or non-representable values if (e.res.val.compare(.lt, Value.zero, p.comp)) { - const min_int = (Type{ .specifier = .int }).minInt(p.comp); - const min_val = try Value.int(min_int, p.comp); + const min_val = try Value.minInt(Type.int, p.comp); if (e.res.val.compare(.lt, min_val, p.comp)) { try p.errStr(.enumerator_too_small, name_tok, try e.res.str(p)); } } else { - const max_int = (Type{ .specifier = .int }).maxInt(p.comp); - const max_val = try Value.int(max_int, p.comp); + const max_val = try Value.maxInt(Type.int, p.comp); if (e.res.val.compare(.gt, max_val, p.comp)) { try p.errStr(.enumerator_too_large, name_tok, try e.res.str(p)); } @@ -2815,6 +2869,7 @@ fn enumerator(p: *Parser, e: *Enumerator) Error!?EnumFieldAndNode { .name = name_tok, .node = res.node, } }, + .loc = @enumFromInt(name_tok), }); try p.value_map.put(node, e.res.val); return EnumFieldAndNode{ .field = .{ @@ -2991,15 +3046,12 @@ fn directDeclarator(p: *Parser, base_type: Type, d: *Declarator, kind: Declarato } const outer = try p.directDeclarator(base_type, d, kind); - var max_bits = p.comp.target.ptrBitWidth(); - if (max_bits > 61) max_bits = 61; - const max_bytes = (@as(u64, 1) << @truncate(max_bits)) - 1; if (!size.ty.isInt()) { try p.errStr(.array_size_non_int, size_tok, try p.typeStr(size.ty)); return error.ParsingFailed; } - if (base_type.is(.c23_auto)) { + if (base_type.is(.c23_auto) or outer.is(.invalid)) { // issue error later return Type.invalid; } else if (size.val.opt_ref == .none) { @@ -3030,7 +3082,7 @@ fn directDeclarator(p: *Parser, base_type: Type, d: *Declarator, kind: Declarato } else { // `outer` is validated later so it may be invalid here const outer_size = outer.sizeof(p.comp); - const max_elems = max_bytes / @max(1, outer_size orelse 1); + const max_elems = p.comp.maxArrayBytes() / @max(1, outer_size orelse 1); var size_val = size.val; if (size_val.isZero(p.comp)) { @@ -3047,7 +3099,7 @@ fn directDeclarator(p: *Parser, base_type: Type, d: *Declarator, kind: Declarato arr_ty.len = max_elems; } res_ty.data = .{ .array = arr_ty }; - res_ty.specifier = .array; + res_ty.specifier = if (static != null) .static_array else .array; } try res_ty.combine(outer); @@ -3120,12 +3172,14 @@ fn directDeclarator(p: *Parser, base_type: Type, d: *Declarator, kind: Declarato fn pointer(p: *Parser, base_ty: Type) Error!Type { var ty = base_ty; while (p.eatToken(.asterisk)) |_| { - const elem_ty = try p.arena.create(Type); - elem_ty.* = ty; - ty = Type{ - .specifier = .pointer, - .data = .{ .sub_type = elem_ty }, - }; + if (!ty.is(.invalid)) { + const elem_ty = try p.arena.create(Type); + elem_ty.* = ty; + ty = Type{ + .specifier = .pointer, + .data = .{ .sub_type = elem_ty }, + }; + } var quals = Type.Qualifiers.Builder{}; _ = try p.typeQual(&quals); try quals.finish(p, &ty); @@ -3237,6 +3291,75 @@ fn typeName(p: *Parser) Error!?Type { return try Attribute.applyTypeAttributes(p, ty, attr_buf_top, .align_ignored); } +fn complexInitializer(p: *Parser, init_ty: Type) Error!Result { + assert(p.tok_ids[p.tok_i] == .l_brace); + assert(init_ty.isComplex()); + + const real_ty = init_ty.makeReal(); + if (real_ty.isInt()) { + return p.todo("Complex integer initializers"); + } + const l_brace = p.tok_i; + p.tok_i += 1; + try p.errTok(.complex_component_init, l_brace); + + const first_tok = p.tok_i; + var first = try p.assignExpr(); + try first.expect(p); + try p.coerceInit(&first, first_tok, real_ty); + + var second: Result = .{ + .ty = real_ty, + .val = Value.zero, + }; + if (p.eatToken(.comma)) |_| { + const second_tok = p.tok_i; + const maybe_second = try p.assignExpr(); + if (!maybe_second.empty(p)) { + second = maybe_second; + try p.coerceInit(&second, second_tok, real_ty); + } + } + + // Eat excess initializers + var extra_tok: ?TokenIndex = null; + while (p.eatToken(.comma)) |_| { + if (p.tok_ids[p.tok_i] == .r_brace) break; + extra_tok = p.tok_i; + const extra = try p.assignExpr(); + if (extra.empty(p)) { + try p.errTok(.expected_expr, p.tok_i); + p.skipTo(.r_brace); + return error.ParsingFailed; + } + } + try p.expectClosing(l_brace, .r_brace); + if (extra_tok) |tok| { + try p.errTok(.excess_scalar_init, tok); + } + + const arr_init_node: Tree.Node = .{ + .tag = .array_init_expr_two, + .ty = init_ty, + .data = .{ .two = .{ first.node, second.node } }, + .loc = @enumFromInt(l_brace), + }; + var res: Result = .{ + .node = try p.addNode(arr_init_node), + .ty = init_ty, + }; + if (first.val.opt_ref != .none and second.val.opt_ref != .none) { + res.val = try Value.intern(p.comp, switch (real_ty.bitSizeof(p.comp).?) { + 32 => .{ .complex = .{ .cf32 = .{ first.val.toFloat(f32, p.comp), second.val.toFloat(f32, p.comp) } } }, + 64 => .{ .complex = .{ .cf64 = .{ first.val.toFloat(f64, p.comp), second.val.toFloat(f64, p.comp) } } }, + 80 => .{ .complex = .{ .cf80 = .{ first.val.toFloat(f80, p.comp), second.val.toFloat(f80, p.comp) } } }, + 128 => .{ .complex = .{ .cf128 = .{ first.val.toFloat(f128, p.comp), second.val.toFloat(f128, p.comp) } } }, + else => unreachable, + }); + } + return res; +} + /// initializer /// : assignExpr /// | '{' initializerItems '}' @@ -3255,6 +3378,9 @@ fn initializer(p: *Parser, init_ty: Type) Error!Result { return error.ParsingFailed; } + if (init_ty.isComplex()) { + return p.complexInitializer(init_ty); + } var il: InitList = .{}; defer il.deinit(p.gpa); @@ -3754,9 +3880,15 @@ fn convertInitList(p: *Parser, il: InitList, init_ty: Type) Error!NodeIndex { var arr_init_node: Tree.Node = .{ .tag = .array_init_expr_two, .ty = init_ty, - .data = .{ .bin = .{ .lhs = .none, .rhs = .none } }, + .data = .{ .two = .{ .none, .none } }, }; + const max_elems = p.comp.maxArrayBytes() / (@max(1, elem_ty.sizeof(p.comp) orelse 1)); + if (start > max_elems) { + try p.errTok(.array_too_large, il.tok); + start = max_elems; + } + if (init_ty.specifier == .incomplete_array) { arr_init_node.ty.specifier = .array; arr_init_node.ty.data.array.len = start; @@ -3767,8 +3899,6 @@ fn convertInitList(p: *Parser, il: InitList, init_ty: Type) Error!NodeIndex { .specifier = .array, .data = .{ .array = arr_ty }, }; - const attrs = init_ty.getAttributes(); - arr_init_node.ty = try arr_init_node.ty.withAttributes(p.arena, attrs); } else if (start < max_items) { const elem = try p.addNode(.{ .tag = .array_filler_expr, @@ -3781,8 +3911,8 @@ fn convertInitList(p: *Parser, il: InitList, init_ty: Type) Error!NodeIndex { const items = p.list_buf.items[list_buf_top..]; switch (items.len) { 0 => {}, - 1 => arr_init_node.data.bin.lhs = items[0], - 2 => arr_init_node.data.bin = .{ .lhs = items[0], .rhs = items[1] }, + 1 => arr_init_node.data.two[0] = items[0], + 2 => arr_init_node.data.two = .{ items[0], items[1] }, else => { arr_init_node.tag = .array_init_expr; arr_init_node.data = .{ .range = try p.addList(items) }; @@ -3813,13 +3943,13 @@ fn convertInitList(p: *Parser, il: InitList, init_ty: Type) Error!NodeIndex { var struct_init_node: Tree.Node = .{ .tag = .struct_init_expr_two, .ty = init_ty, - .data = .{ .bin = .{ .lhs = .none, .rhs = .none } }, + .data = .{ .two = .{ .none, .none } }, }; const items = p.list_buf.items[list_buf_top..]; switch (items.len) { 0 => {}, - 1 => struct_init_node.data.bin.lhs = items[0], - 2 => struct_init_node.data.bin = .{ .lhs = items[0], .rhs = items[1] }, + 1 => struct_init_node.data.two[0] = items[0], + 2 => struct_init_node.data.two = .{ items[0], items[1] }, else => { struct_init_node.tag = .struct_init_expr; struct_init_node.data = .{ .range = try p.addList(items) }; @@ -3894,7 +4024,7 @@ fn asmOperand(p: *Parser, names: *std.ArrayList(?TokenIndex), constraints: *Node /// | asmStr ':' asmOperand* ':' asmOperand* /// | asmStr ':' asmOperand* ':' asmOperand* : asmStr? (',' asmStr)* /// | asmStr ':' asmOperand* ':' asmOperand* : asmStr? (',' asmStr)* : IDENTIFIER (',' IDENTIFIER)* -fn gnuAsmStmt(p: *Parser, quals: Tree.GNUAssemblyQualifiers, l_paren: TokenIndex) Error!NodeIndex { +fn gnuAsmStmt(p: *Parser, quals: Tree.GNUAssemblyQualifiers, asm_tok: TokenIndex, l_paren: TokenIndex) Error!NodeIndex { const asm_str = try p.asmStr(); try p.checkAsmStr(asm_str.val, l_paren); @@ -3903,6 +4033,7 @@ fn gnuAsmStmt(p: *Parser, quals: Tree.GNUAssemblyQualifiers, l_paren: TokenIndex .tag = .gnu_asm_simple, .ty = .{ .specifier = .void }, .data = .{ .un = asm_str.node }, + .loc = @enumFromInt(asm_tok), }); } @@ -4007,6 +4138,7 @@ fn gnuAsmStmt(p: *Parser, quals: Tree.GNUAssemblyQualifiers, l_paren: TokenIndex .tag = .addr_of_label, .data = .{ .decl_ref = label }, .ty = result_ty, + .loc = @enumFromInt(ident), }); try exprs.append(label_addr_node); @@ -4088,9 +4220,10 @@ fn assembly(p: *Parser, kind: enum { global, decl_label, stmt }) Error!?NodeInde .tag = .file_scope_asm, .ty = .{ .specifier = .void }, .data = .{ .decl = .{ .name = asm_tok, .node = asm_str.node } }, + .loc = @enumFromInt(asm_tok), }); }, - .stmt => result_node = try p.gnuAsmStmt(quals, l_paren), + .stmt => result_node = try p.gnuAsmStmt(quals, asm_tok, l_paren), } try p.expectClosing(l_paren, .r_paren); @@ -4141,7 +4274,7 @@ fn asmStr(p: *Parser) Error!Result { fn stmt(p: *Parser) Error!NodeIndex { if (try p.labeledStmt()) |some| return some; if (try p.compoundStmt(false, null)) |some| return some; - if (p.eatToken(.keyword_if)) |_| { + if (p.eatToken(.keyword_if)) |kw_if| { const l_paren = try p.expectToken(.l_paren); const cond_tok = p.tok_i; var cond = try p.expr(); @@ -4160,14 +4293,16 @@ fn stmt(p: *Parser) Error!NodeIndex { return try p.addNode(.{ .tag = .if_then_else_stmt, .data = .{ .if3 = .{ .cond = cond.node, .body = (try p.addList(&.{ then, @"else" })).start } }, + .loc = @enumFromInt(kw_if), }) else return try p.addNode(.{ .tag = .if_then_stmt, .data = .{ .bin = .{ .lhs = cond.node, .rhs = then } }, + .loc = @enumFromInt(kw_if), }); } - if (p.eatToken(.keyword_switch)) |_| { + if (p.eatToken(.keyword_switch)) |kw_switch| { const l_paren = try p.expectToken(.l_paren); const cond_tok = p.tok_i; var cond = try p.expr(); @@ -4197,9 +4332,10 @@ fn stmt(p: *Parser) Error!NodeIndex { return try p.addNode(.{ .tag = .switch_stmt, .data = .{ .bin = .{ .lhs = cond.node, .rhs = body } }, + .loc = @enumFromInt(kw_switch), }); } - if (p.eatToken(.keyword_while)) |_| { + if (p.eatToken(.keyword_while)) |kw_while| { const l_paren = try p.expectToken(.l_paren); const cond_tok = p.tok_i; var cond = try p.expr(); @@ -4221,9 +4357,10 @@ fn stmt(p: *Parser) Error!NodeIndex { return try p.addNode(.{ .tag = .while_stmt, .data = .{ .bin = .{ .lhs = cond.node, .rhs = body } }, + .loc = @enumFromInt(kw_while), }); } - if (p.eatToken(.keyword_do)) |_| { + if (p.eatToken(.keyword_do)) |kw_do| { const body = body: { const old_loop = p.in_loop; p.in_loop = true; @@ -4248,9 +4385,10 @@ fn stmt(p: *Parser) Error!NodeIndex { return try p.addNode(.{ .tag = .do_while_stmt, .data = .{ .bin = .{ .lhs = cond.node, .rhs = body } }, + .loc = @enumFromInt(kw_do), }); } - if (p.eatToken(.keyword_for)) |_| { + if (p.eatToken(.keyword_for)) |kw_for| { try p.syms.pushScope(p); defer p.syms.popScope(); const decl_buf_top = p.decl_buf.items.len; @@ -4301,16 +4439,22 @@ fn stmt(p: *Parser) Error!NodeIndex { return try p.addNode(.{ .tag = .for_decl_stmt, .data = .{ .range = .{ .start = start, .end = end } }, + .loc = @enumFromInt(kw_for), }); } else if (init.node == .none and cond.node == .none and incr.node == .none) { return try p.addNode(.{ .tag = .forever_stmt, .data = .{ .un = body }, + .loc = @enumFromInt(kw_for), }); - } else return try p.addNode(.{ .tag = .for_stmt, .data = .{ .if3 = .{ - .cond = body, - .body = (try p.addList(&.{ init.node, cond.node, incr.node })).start, - } } }); + } else return try p.addNode(.{ + .tag = .for_stmt, + .data = .{ .if3 = .{ + .cond = body, + .body = (try p.addList(&.{ init.node, cond.node, incr.node })).start, + } }, + .loc = @enumFromInt(kw_for), + }); } if (p.eatToken(.keyword_goto)) |goto_tok| { if (p.eatToken(.asterisk)) |_| { @@ -4338,7 +4482,7 @@ fn stmt(p: *Parser) Error!NodeIndex { } } - try e.un(p, .computed_goto_stmt); + try e.un(p, .computed_goto_stmt, goto_tok); _ = try p.expectToken(.semicolon); return e.node; } @@ -4351,17 +4495,18 @@ fn stmt(p: *Parser) Error!NodeIndex { return try p.addNode(.{ .tag = .goto_stmt, .data = .{ .decl_ref = name_tok }, + .loc = @enumFromInt(goto_tok), }); } if (p.eatToken(.keyword_continue)) |cont| { if (!p.in_loop) try p.errTok(.continue_not_in_loop, cont); _ = try p.expectToken(.semicolon); - return try p.addNode(.{ .tag = .continue_stmt, .data = undefined }); + return try p.addNode(.{ .tag = .continue_stmt, .data = undefined, .loc = @enumFromInt(cont) }); } if (p.eatToken(.keyword_break)) |br| { if (!p.in_loop and p.@"switch" == null) try p.errTok(.break_not_in_loop_or_switch, br); _ = try p.expectToken(.semicolon); - return try p.addNode(.{ .tag = .break_stmt, .data = undefined }); + return try p.addNode(.{ .tag = .break_stmt, .data = undefined, .loc = @enumFromInt(br) }); } if (try p.returnStmt()) |some| return some; if (try p.assembly(.stmt)) |some| return some; @@ -4380,8 +4525,8 @@ fn stmt(p: *Parser) Error!NodeIndex { defer p.attr_buf.len = attr_buf_top; try p.attributeSpecifier(); - if (p.eatToken(.semicolon)) |_| { - var null_node: Tree.Node = .{ .tag = .null_stmt, .data = undefined }; + if (p.eatToken(.semicolon)) |semicolon| { + var null_node: Tree.Node = .{ .tag = .null_stmt, .data = undefined, .loc = @enumFromInt(semicolon) }; null_node.ty = try Attribute.applyStatementAttributes(p, null_node.ty, expr_start, attr_buf_top); return p.addNode(null_node); } @@ -4422,6 +4567,7 @@ fn labeledStmt(p: *Parser) Error!?NodeIndex { var labeled_stmt = Tree.Node{ .tag = .labeled_stmt, .data = .{ .decl = .{ .name = name_tok, .node = try p.labelableStmt() } }, + .loc = @enumFromInt(name_tok), }; labeled_stmt.ty = try Attribute.applyLabelAttributes(p, labeled_stmt.ty, attr_buf_top); return try p.addNode(labeled_stmt); @@ -4464,9 +4610,11 @@ fn labeledStmt(p: *Parser) Error!?NodeIndex { if (second_item) |some| return try p.addNode(.{ .tag = .case_range_stmt, .data = .{ .if3 = .{ .cond = s, .body = (try p.addList(&.{ first_item.node, some.node })).start } }, + .loc = @enumFromInt(case), }) else return try p.addNode(.{ .tag = .case_stmt, .data = .{ .bin = .{ .lhs = first_item.node, .rhs = s } }, + .loc = @enumFromInt(case), }); } else if (p.eatToken(.keyword_default)) |default| { _ = try p.expectToken(.colon); @@ -4474,6 +4622,7 @@ fn labeledStmt(p: *Parser) Error!?NodeIndex { const node = try p.addNode(.{ .tag = .default_stmt, .data = .{ .un = s }, + .loc = @enumFromInt(default), }); const @"switch" = p.@"switch" orelse { try p.errStr(.case_not_in_switch, default, "default"); @@ -4492,7 +4641,7 @@ fn labeledStmt(p: *Parser) Error!?NodeIndex { fn labelableStmt(p: *Parser) Error!NodeIndex { if (p.tok_ids[p.tok_i] == .r_brace) { try p.err(.label_compound_end); - return p.addNode(.{ .tag = .null_stmt, .data = undefined }); + return p.addNode(.{ .tag = .null_stmt, .data = undefined, .loc = @enumFromInt(p.tok_i) }); } return p.stmt(); } @@ -4557,6 +4706,7 @@ fn compoundStmt(p: *Parser, is_fn_body: bool, stmt_expr_state: ?*StmtExprState) else => {}, } } + const r_brace = p.tok_i - 1; if (noreturn_index) |some| { // if new labels were defined we cannot be certain that the code is unreachable @@ -4580,7 +4730,7 @@ fn compoundStmt(p: *Parser, is_fn_body: bool, stmt_expr_state: ?*StmtExprState) try p.errStr(.func_does_not_return, p.tok_i - 1, func_name); } } - try p.decl_buf.append(try p.addNode(.{ .tag = .implicit_return, .ty = p.func.ty.?.returnType(), .data = .{ .return_zero = return_zero } })); + try p.decl_buf.append(try p.addNode(.{ .tag = .implicit_return, .ty = p.func.ty.?.returnType(), .data = .{ .return_zero = return_zero }, .loc = @enumFromInt(r_brace) })); } if (p.func.ident) |some| try p.decl_buf.insert(decl_buf_top, some.node); if (p.func.pretty_ident) |some| try p.decl_buf.insert(decl_buf_top, some.node); @@ -4588,13 +4738,14 @@ fn compoundStmt(p: *Parser, is_fn_body: bool, stmt_expr_state: ?*StmtExprState) var node: Tree.Node = .{ .tag = .compound_stmt_two, - .data = .{ .bin = .{ .lhs = .none, .rhs = .none } }, + .data = .{ .two = .{ .none, .none } }, + .loc = @enumFromInt(l_brace), }; const statements = p.decl_buf.items[decl_buf_top..]; switch (statements.len) { 0 => {}, - 1 => node.data = .{ .bin = .{ .lhs = statements[0], .rhs = .none } }, - 2 => node.data = .{ .bin = .{ .lhs = statements[0], .rhs = statements[1] } }, + 1 => node.data = .{ .two = .{ statements[0], .none } }, + 2 => node.data = .{ .two = .{ statements[0], statements[1] } }, else => { node.tag = .compound_stmt; node.data = .{ .range = try p.addList(statements) }; @@ -4618,8 +4769,8 @@ fn nodeIsNoreturn(p: *Parser, node: NodeIndex) NoreturnKind { }, .compound_stmt_two => { const data = p.nodes.items(.data)[@intFromEnum(node)]; - const lhs_type = if (data.bin.lhs != .none) p.nodeIsNoreturn(data.bin.lhs) else .no; - const rhs_type = if (data.bin.rhs != .none) p.nodeIsNoreturn(data.bin.rhs) else .no; + const lhs_type = if (data.two[0] != .none) p.nodeIsNoreturn(data.two[0]) else .no; + const rhs_type = if (data.two[1] != .none) p.nodeIsNoreturn(data.two[1]) else .no; if (lhs_type == .complex or rhs_type == .complex) return .complex; if (lhs_type == .yes or rhs_type == .yes) return .yes; return .no; @@ -4704,6 +4855,8 @@ fn nextStmt(p: *Parser, l_brace: TokenIndex) !void { .keyword_int, .keyword_long, .keyword_signed, + .keyword_signed1, + .keyword_signed2, .keyword_unsigned, .keyword_float, .keyword_double, @@ -4743,17 +4896,17 @@ fn returnStmt(p: *Parser) Error!?NodeIndex { if (e.node == .none) { if (!ret_ty.is(.void)) try p.errStr(.func_should_return, ret_tok, p.tokSlice(p.func.name)); - return try p.addNode(.{ .tag = .return_stmt, .data = .{ .un = e.node } }); + return try p.addNode(.{ .tag = .return_stmt, .data = .{ .un = e.node }, .loc = @enumFromInt(ret_tok) }); } else if (ret_ty.is(.void)) { try p.errStr(.void_func_returns_value, e_tok, p.tokSlice(p.func.name)); - return try p.addNode(.{ .tag = .return_stmt, .data = .{ .un = e.node } }); + return try p.addNode(.{ .tag = .return_stmt, .data = .{ .un = e.node }, .loc = @enumFromInt(ret_tok) }); } try e.lvalConversion(p); try e.coerce(p, ret_ty, e_tok, .ret); try e.saveValue(p); - return try p.addNode(.{ .tag = .return_stmt, .data = .{ .un = e.node } }); + return try p.addNode(.{ .tag = .return_stmt, .data = .{ .un = e.node }, .loc = @enumFromInt(ret_tok) }); } // ====== expressions ====== @@ -4802,7 +4955,6 @@ const CallExpr = union(enum) { } fn shouldPromoteVarArg(self: CallExpr, arg_idx: u32) bool { - @setEvalBranchQuota(2000); return switch (self) { .standard => true, .builtin => |builtin| switch (builtin.tag) { @@ -4810,10 +4962,13 @@ const CallExpr = union(enum) { Builtin.tagFromName("__va_start").?, Builtin.tagFromName("va_start").?, => arg_idx != 1, - Builtin.tagFromName("__builtin_complex").?, Builtin.tagFromName("__builtin_add_overflow").?, - Builtin.tagFromName("__builtin_sub_overflow").?, + Builtin.tagFromName("__builtin_complex").?, + Builtin.tagFromName("__builtin_isinf").?, + Builtin.tagFromName("__builtin_isinf_sign").?, Builtin.tagFromName("__builtin_mul_overflow").?, + Builtin.tagFromName("__builtin_isnan").?, + Builtin.tagFromName("__builtin_sub_overflow").?, => false, else => true, }, @@ -4827,7 +4982,6 @@ const CallExpr = union(enum) { } fn checkVarArg(self: CallExpr, p: *Parser, first_after: TokenIndex, param_tok: TokenIndex, arg: *Result, arg_idx: u32) !void { - @setEvalBranchQuota(10_000); if (self == .standard) return; const builtin_tok = p.nodes.items(.data)[@intFromEnum(self.builtin.node)].decl.name; @@ -4852,13 +5006,15 @@ const CallExpr = union(enum) { /// of arguments, `paramCountOverride` is used to tell us how many arguments we should actually expect to see for /// these custom-typechecked functions. fn paramCountOverride(self: CallExpr) ?u32 { - @setEvalBranchQuota(10_000); return switch (self) { .standard => null, .builtin => |builtin| switch (builtin.tag) { Builtin.tagFromName("__c11_atomic_thread_fence").?, Builtin.tagFromName("__c11_atomic_signal_fence").?, Builtin.tagFromName("__c11_atomic_is_lock_free").?, + Builtin.tagFromName("__builtin_isinf").?, + Builtin.tagFromName("__builtin_isinf_sign").?, + Builtin.tagFromName("__builtin_isnan").?, => 1, Builtin.tagFromName("__builtin_complex").?, @@ -4903,7 +5059,6 @@ const CallExpr = union(enum) { } fn returnType(self: CallExpr, p: *Parser, callable_ty: Type) Type { - @setEvalBranchQuota(6000); return switch (self) { .standard => callable_ty.returnType(), .builtin => |builtin| switch (builtin.tag) { @@ -4977,12 +5132,12 @@ const CallExpr = union(enum) { var call_node: Tree.Node = .{ .tag = .call_expr_one, .ty = ret_ty, - .data = .{ .bin = .{ .lhs = func_node, .rhs = .none } }, + .data = .{ .two = .{ func_node, .none } }, }; const args = p.list_buf.items[list_buf_top..]; switch (arg_count) { 0 => {}, - 1 => call_node.data.bin.rhs = args[1], // args[0] == func.node + 1 => call_node.data.two[1] = args[1], // args[0] == func.node else => { call_node.tag = .call_expr; call_node.data = .{ .range = try p.addList(args) }; @@ -5005,7 +5160,8 @@ const CallExpr = union(enum) { call_node.data = .{ .range = try p.addList(args) }; }, } - return Result{ .node = builtin.node, .ty = ret_ty }; + const val = try evalBuiltin(builtin.tag, p, args[1..]); + return Result{ .node = builtin.node, .ty = ret_ty, .val = val }; }, } } @@ -5016,6 +5172,8 @@ pub const Result = struct { ty: Type = .{ .specifier = .int }, val: Value = .{}, + const invalid: Result = .{ .ty = Type.invalid }; + pub fn str(res: Result, p: *Parser) ![]const u8 { switch (res.val.opt_ref) { .none => return "(none)", @@ -5073,30 +5231,21 @@ pub const Result = struct { .post_inc_expr, .post_dec_expr, => return, - .call_expr_one => { - const fn_ptr = p.nodes.items(.data)[@intFromEnum(cur_node)].bin.lhs; - const fn_ty = p.nodes.items(.ty)[@intFromEnum(fn_ptr)].elemType(); - const cast_info = p.nodes.items(.data)[@intFromEnum(fn_ptr)].cast.operand; - const decl_ref = p.nodes.items(.data)[@intFromEnum(cast_info)].decl_ref; - if (fn_ty.hasAttribute(.nodiscard)) try p.errStr(.nodiscard_unused, expr_start, p.tokSlice(decl_ref)); - if (fn_ty.hasAttribute(.warn_unused_result)) try p.errStr(.warn_unused_result, expr_start, p.tokSlice(decl_ref)); - return; - }, - .call_expr => { - const fn_ptr = p.data.items[p.nodes.items(.data)[@intFromEnum(cur_node)].range.start]; - const fn_ty = p.nodes.items(.ty)[@intFromEnum(fn_ptr)].elemType(); - const cast_info = p.nodes.items(.data)[@intFromEnum(fn_ptr)].cast.operand; - const decl_ref = p.nodes.items(.data)[@intFromEnum(cast_info)].decl_ref; - if (fn_ty.hasAttribute(.nodiscard)) try p.errStr(.nodiscard_unused, expr_start, p.tokSlice(decl_ref)); - if (fn_ty.hasAttribute(.warn_unused_result)) try p.errStr(.warn_unused_result, expr_start, p.tokSlice(decl_ref)); + .call_expr, .call_expr_one => { + const tmp_tree = p.tmpTree(); + const child_nodes = tmp_tree.childNodes(cur_node); + const fn_ptr = child_nodes[0]; + const call_info = tmp_tree.callableResultUsage(fn_ptr) orelse return; + if (call_info.nodiscard) try p.errStr(.nodiscard_unused, expr_start, p.tokSlice(call_info.tok)); + if (call_info.warn_unused_result) try p.errStr(.warn_unused_result, expr_start, p.tokSlice(call_info.tok)); return; }, .stmt_expr => { const body = p.nodes.items(.data)[@intFromEnum(cur_node)].un; switch (p.nodes.items(.tag)[@intFromEnum(body)]) { .compound_stmt_two => { - const body_stmt = p.nodes.items(.data)[@intFromEnum(body)].bin; - cur_node = if (body_stmt.rhs != .none) body_stmt.rhs else body_stmt.lhs; + const body_stmt = p.nodes.items(.data)[@intFromEnum(body)].two; + cur_node = if (body_stmt[1] != .none) body_stmt[1] else body_stmt[0]; }, .compound_stmt => { const data = p.nodes.items(.data)[@intFromEnum(body)]; @@ -5112,29 +5261,31 @@ pub const Result = struct { try p.errTok(.unused_value, expr_start); } - fn boolRes(lhs: *Result, p: *Parser, tag: Tree.Tag, rhs: Result) !void { + fn boolRes(lhs: *Result, p: *Parser, tag: Tree.Tag, rhs: Result, tok_i: TokenIndex) !void { if (lhs.val.opt_ref == .null) { lhs.val = Value.zero; } if (lhs.ty.specifier != .invalid) { lhs.ty = Type.int; } - return lhs.bin(p, tag, rhs); + return lhs.bin(p, tag, rhs, tok_i); } - fn bin(lhs: *Result, p: *Parser, tag: Tree.Tag, rhs: Result) !void { + fn bin(lhs: *Result, p: *Parser, tag: Tree.Tag, rhs: Result, tok_i: TokenIndex) !void { lhs.node = try p.addNode(.{ .tag = tag, .ty = lhs.ty, .data = .{ .bin = .{ .lhs = lhs.node, .rhs = rhs.node } }, + .loc = @enumFromInt(tok_i), }); } - fn un(operand: *Result, p: *Parser, tag: Tree.Tag) Error!void { + fn un(operand: *Result, p: *Parser, tag: Tree.Tag, tok_i: TokenIndex) Error!void { operand.node = try p.addNode(.{ .tag = tag, .ty = operand.ty, .data = .{ .un = operand.node }, + .loc = @enumFromInt(tok_i), }); } @@ -5368,10 +5519,14 @@ pub const Result = struct { fn lvalConversion(res: *Result, p: *Parser) Error!void { if (res.ty.isFunc()) { - const elem_ty = try p.arena.create(Type); - elem_ty.* = res.ty; - res.ty.specifier = .pointer; - res.ty.data = .{ .sub_type = elem_ty }; + if (res.ty.isInvalidFunc()) { + res.ty = .{ .specifier = .invalid }; + } else { + const elem_ty = try p.arena.create(Type); + elem_ty.* = res.ty; + res.ty.specifier = .pointer; + res.ty.data = .{ .sub_type = elem_ty }; + } try res.implicitCast(p, .function_to_pointer); } else if (res.ty.isArray()) { res.val = .{}; @@ -5455,7 +5610,14 @@ pub const Result = struct { try res.implicitCast(p, .complex_float_to_complex_int); } } else if (!res.ty.eql(int_ty, p.comp, true)) { - try res.val.intCast(int_ty, p.comp); + const old_val = res.val; + const value_change_kind = try res.val.intCast(int_ty, p.comp); + switch (value_change_kind) { + .none => {}, + .truncated => try p.errStr(.int_value_changed, tok, try p.valueChangedStr(res, old_val, int_ty)), + .sign_changed => try p.errStr(.sign_conversion, tok, try p.typePairStrExtra(res.ty, " to ", int_ty)), + } + const old_real = res.ty.isReal(); const new_real = int_ty.isReal(); if (old_real and new_real) { @@ -5486,8 +5648,8 @@ pub const Result = struct { .none => return p.errStr(.float_to_int, tok, try p.typePairStrExtra(res.ty, " to ", int_ty)), .out_of_range => return p.errStr(.float_out_of_range, tok, try p.typePairStrExtra(res.ty, " to ", int_ty)), .overflow => return p.errStr(.float_overflow_conversion, tok, try p.typePairStrExtra(res.ty, " to ", int_ty)), - .nonzero_to_zero => return p.errStr(.float_zero_conversion, tok, try p.floatValueChangedStr(res, old_value, int_ty)), - .value_changed => return p.errStr(.float_value_changed, tok, try p.floatValueChangedStr(res, old_value, int_ty)), + .nonzero_to_zero => return p.errStr(.float_zero_conversion, tok, try p.valueChangedStr(res, old_value, int_ty)), + .value_changed => return p.errStr(.float_value_changed, tok, try p.valueChangedStr(res, old_value, int_ty)), } } @@ -5555,7 +5717,7 @@ pub const Result = struct { res.ty = ptr_ty; try res.implicitCast(p, .bool_to_pointer); } else if (res.ty.isInt()) { - try res.val.intCast(ptr_ty, p.comp); + _ = try res.val.intCast(ptr_ty, p.comp); res.ty = ptr_ty; try res.implicitCast(p, .int_to_pointer); } @@ -5620,16 +5782,14 @@ pub const Result = struct { // if either is a float cast to that type if (a.ty.isFloat() or b.ty.isFloat()) { - const float_types = [7][2]Type.Specifier{ + const float_types = [6][2]Type.Specifier{ .{ .complex_long_double, .long_double }, .{ .complex_float128, .float128 }, - .{ .complex_float80, .float80 }, .{ .complex_double, .double }, .{ .complex_float, .float }, // No `_Complex __fp16` type .{ .invalid, .fp16 }, - // No `_Complex _Float16` - .{ .invalid, .float16 }, + .{ .complex_float16, .float16 }, }; const a_spec = a.ty.canonicalize(.standard).specifier; const b_spec = b.ty.canonicalize(.standard).specifier; @@ -5647,7 +5807,7 @@ pub const Result = struct { if (try a.floatConversion(b, a_spec, b_spec, p, float_types[3])) return; if (try a.floatConversion(b, a_spec, b_spec, p, float_types[4])) return; if (try a.floatConversion(b, a_spec, b_spec, p, float_types[5])) return; - if (try a.floatConversion(b, a_spec, b_spec, p, float_types[6])) return; + unreachable; } if (a.ty.eql(b.ty, p.comp, true)) { @@ -5875,6 +6035,10 @@ pub const Result = struct { if (to.is(.bool)) { res.val.boolCast(p.comp); } else if (old_float and new_int) { + if (to.hasIncompleteSize()) { + try p.errStr(.cast_to_incomplete_type, l_paren, try p.typeStr(to)); + return error.ParsingFailed; + } // Explicit cast, no conversion warning _ = try res.val.floatToInt(to, p.comp); } else if (new_float and old_int) { @@ -5886,7 +6050,7 @@ pub const Result = struct { try p.errStr(.cast_to_incomplete_type, l_paren, try p.typeStr(to)); return error.ParsingFailed; } - try res.val.intCast(to, p.comp); + _ = try res.val.intCast(to, p.comp); } } else if (to.get(.@"union")) |union_ty| { if (union_ty.data.record.hasFieldOfType(res.ty, p.comp)) { @@ -5918,12 +6082,13 @@ pub const Result = struct { .tag = .explicit_cast, .ty = res.ty, .data = .{ .cast = .{ .operand = res.node, .kind = cast_kind } }, + .loc = @enumFromInt(l_paren), }); } fn intFitsInType(res: Result, p: *Parser, ty: Type) !bool { - const max_int = try Value.int(ty.maxInt(p.comp), p.comp); - const min_int = try Value.int(ty.minInt(p.comp), p.comp); + const max_int = try Value.maxInt(ty, p.comp); + const min_int = try Value.minInt(ty, p.comp); return res.val.compare(.lte, max_int, p.comp) and (res.ty.isUnsignedInt(p.comp) or res.val.compare(.gte, min_int, p.comp)); } @@ -6091,7 +6256,7 @@ fn expr(p: *Parser) Error!Result { var err_start = p.comp.diagnostics.list.items.len; var lhs = try p.assignExpr(); if (p.tok_ids[p.tok_i] == .comma) try lhs.expect(p); - while (p.eatToken(.comma)) |_| { + while (p.eatToken(.comma)) |comma| { try lhs.maybeWarnUnused(p, expr_start, err_start); expr_start = p.tok_i; err_start = p.comp.diagnostics.list.items.len; @@ -6101,7 +6266,7 @@ fn expr(p: *Parser) Error!Result { try rhs.lvalConversion(p); lhs.val = rhs.val; lhs.ty = rhs.ty; - try lhs.bin(p, .comma_expr, rhs); + try lhs.bin(p, .comma_expr, rhs, comma); } return lhs; } @@ -6183,7 +6348,7 @@ fn assignExpr(p: *Parser) Error!Result { } } _ = try lhs_copy.adjustTypes(tok, &rhs, p, if (tag == .mod_assign_expr) .integer else .arithmetic); - try lhs.bin(p, tag, rhs); + try lhs.bin(p, tag, rhs, bit_or.?); return lhs; }, .sub_assign_expr, @@ -6194,7 +6359,7 @@ fn assignExpr(p: *Parser) Error!Result { } else { _ = try lhs_copy.adjustTypes(tok, &rhs, p, .arithmetic); } - try lhs.bin(p, tag, rhs); + try lhs.bin(p, tag, rhs, bit_or.?); return lhs; }, .shl_assign_expr, @@ -6204,7 +6369,7 @@ fn assignExpr(p: *Parser) Error!Result { .bit_or_assign_expr, => { _ = try lhs_copy.adjustTypes(tok, &rhs, p, .integer); - try lhs.bin(p, tag, rhs); + try lhs.bin(p, tag, rhs, bit_or.?); return lhs; }, else => unreachable, @@ -6212,7 +6377,7 @@ fn assignExpr(p: *Parser) Error!Result { try rhs.coerce(p, lhs.ty, tok, .assign); - try lhs.bin(p, tag, rhs); + try lhs.bin(p, tag, rhs, bit_or.?); return lhs; } @@ -6280,6 +6445,7 @@ fn condExpr(p: *Parser) Error!Result { .tag = .binary_cond_expr, .ty = cond.ty, .data = .{ .if3 = .{ .cond = cond.node, .body = (try p.addList(&.{ cond_then.node, then_expr.node })).start } }, + .loc = @enumFromInt(cond_tok), }); return cond; } @@ -6305,6 +6471,7 @@ fn condExpr(p: *Parser) Error!Result { .tag = .cond_expr, .ty = cond.ty, .data = .{ .if3 = .{ .cond = cond.node, .body = (try p.addList(&.{ then_expr.node, else_expr.node })).start } }, + .loc = @enumFromInt(cond_tok), }); return cond; } @@ -6324,8 +6491,10 @@ fn lorExpr(p: *Parser) Error!Result { if (try lhs.adjustTypes(tok, &rhs, p, .boolean_logic)) { const res = lhs.val.toBool(p.comp) or rhs.val.toBool(p.comp); lhs.val = Value.fromBool(res); + } else { + lhs.val.boolCast(p.comp); } - try lhs.boolRes(p, .bool_or_expr, rhs); + try lhs.boolRes(p, .bool_or_expr, rhs, tok); } return lhs; } @@ -6345,8 +6514,10 @@ fn landExpr(p: *Parser) Error!Result { if (try lhs.adjustTypes(tok, &rhs, p, .boolean_logic)) { const res = lhs.val.toBool(p.comp) and rhs.val.toBool(p.comp); lhs.val = Value.fromBool(res); + } else { + lhs.val.boolCast(p.comp); } - try lhs.boolRes(p, .bool_and_expr, rhs); + try lhs.boolRes(p, .bool_and_expr, rhs, tok); } return lhs; } @@ -6362,7 +6533,7 @@ fn orExpr(p: *Parser) Error!Result { if (try lhs.adjustTypes(tok, &rhs, p, .integer)) { lhs.val = try lhs.val.bitOr(rhs.val, p.comp); } - try lhs.bin(p, .bit_or_expr, rhs); + try lhs.bin(p, .bit_or_expr, rhs, tok); } return lhs; } @@ -6378,7 +6549,7 @@ fn xorExpr(p: *Parser) Error!Result { if (try lhs.adjustTypes(tok, &rhs, p, .integer)) { lhs.val = try lhs.val.bitXor(rhs.val, p.comp); } - try lhs.bin(p, .bit_xor_expr, rhs); + try lhs.bin(p, .bit_xor_expr, rhs, tok); } return lhs; } @@ -6394,7 +6565,7 @@ fn andExpr(p: *Parser) Error!Result { if (try lhs.adjustTypes(tok, &rhs, p, .integer)) { lhs.val = try lhs.val.bitAnd(rhs.val, p.comp); } - try lhs.bin(p, .bit_and_expr, rhs); + try lhs.bin(p, .bit_and_expr, rhs, tok); } return lhs; } @@ -6414,8 +6585,10 @@ fn eqExpr(p: *Parser) Error!Result { const op: std.math.CompareOperator = if (tag == .equal_expr) .eq else .neq; const res = lhs.val.compare(op, rhs.val, p.comp); lhs.val = Value.fromBool(res); + } else { + lhs.val.boolCast(p.comp); } - try lhs.boolRes(p, tag, rhs); + try lhs.boolRes(p, tag, rhs, ne.?); } return lhs; } @@ -6443,8 +6616,10 @@ fn compExpr(p: *Parser) Error!Result { }; const res = lhs.val.compare(op, rhs.val, p.comp); lhs.val = Value.fromBool(res); + } else { + lhs.val.boolCast(p.comp); } - try lhs.boolRes(p, tag, rhs); + try lhs.boolRes(p, tag, rhs, ge.?); } return lhs; } @@ -6474,7 +6649,7 @@ fn shiftExpr(p: *Parser) Error!Result { lhs.val = try lhs.val.shr(rhs.val, lhs.ty, p.comp); } } - try lhs.bin(p, tag, rhs); + try lhs.bin(p, tag, rhs, shr.?); } return lhs; } @@ -6504,7 +6679,7 @@ fn addExpr(p: *Parser) Error!Result { try p.errStr(.ptr_arithmetic_incomplete, minus.?, try p.typeStr(lhs_ty.elemType())); lhs.ty = Type.invalid; } - try lhs.bin(p, tag, rhs); + try lhs.bin(p, tag, rhs, minus.?); } return lhs; } @@ -6538,7 +6713,7 @@ fn mulExpr(p: *Parser) Error!Result { lhs.ty.signedness(p.comp) != .unsigned) try p.errOverflow(mul.?, lhs); } else if (div != null) { if (try lhs.val.div(lhs.val, rhs.val, lhs.ty, p.comp) and - lhs.ty.signedness(p.comp) != .unsigned) try p.errOverflow(mul.?, lhs); + lhs.ty.signedness(p.comp) != .unsigned) try p.errOverflow(div.?, lhs); } else { var res = try Value.rem(lhs.val, rhs.val, lhs.ty, p.comp); if (res.opt_ref == .none) { @@ -6554,7 +6729,7 @@ fn mulExpr(p: *Parser) Error!Result { } } - try lhs.bin(p, tag, rhs); + try lhs.bin(p, tag, rhs, percent.?); } return lhs; } @@ -6573,7 +6748,7 @@ fn removeUnusedWarningForTok(p: *Parser, last_expr_tok: TokenIndex) void { } /// castExpr -/// : '(' compoundStmt ')' +/// : '(' compoundStmt ')' suffixExpr* /// | '(' typeName ')' castExpr /// | '(' typeName ')' '{' initializerItems '}' /// | __builtin_choose_expr '(' integerConstExpr ',' assignExpr ',' assignExpr ')' @@ -6584,6 +6759,7 @@ fn removeUnusedWarningForTok(p: *Parser, last_expr_tok: TokenIndex) void { fn castExpr(p: *Parser) Error!Result { if (p.eatToken(.l_paren)) |l_paren| cast_expr: { if (p.tok_ids[p.tok_i] == .l_brace) { + const tok = p.tok_i; try p.err(.gnu_statement_expression); if (p.func.ty == null) { try p.err(.stmt_expr_not_allowed_file_scope); @@ -6599,7 +6775,12 @@ fn castExpr(p: *Parser) Error!Result { .val = stmt_expr_state.last_expr_res.val, }; try p.expectClosing(l_paren, .r_paren); - try res.un(p, .stmt_expr); + try res.un(p, .stmt_expr, tok); + while (true) { + const suffix = try p.suffixExpr(res); + if (suffix.empty(p)) break; + res = suffix; + } return res; } const ty = (try p.typeName()) orelse { @@ -6634,23 +6815,26 @@ fn castExpr(p: *Parser) Error!Result { } fn typesCompatible(p: *Parser) Error!Result { + const builtin_tok = p.tok_i; p.tok_i += 1; const l_paren = try p.expectToken(.l_paren); + const first_tok = p.tok_i; const first = (try p.typeName()) orelse { try p.err(.expected_type); p.skipTo(.r_paren); return error.ParsingFailed; }; - const lhs = try p.addNode(.{ .tag = .invalid, .ty = first, .data = undefined }); + const lhs = try p.addNode(.{ .tag = .invalid, .ty = first, .data = undefined, .loc = @enumFromInt(first_tok) }); _ = try p.expectToken(.comma); + const second_tok = p.tok_i; const second = (try p.typeName()) orelse { try p.err(.expected_type); p.skipTo(.r_paren); return error.ParsingFailed; }; - const rhs = try p.addNode(.{ .tag = .invalid, .ty = second, .data = undefined }); + const rhs = try p.addNode(.{ .tag = .invalid, .ty = second, .data = undefined, .loc = @enumFromInt(second_tok) }); try p.expectClosing(l_paren, .r_paren); @@ -6665,10 +6849,15 @@ fn typesCompatible(p: *Parser) Error!Result { const res = Result{ .val = Value.fromBool(compatible), - .node = try p.addNode(.{ .tag = .builtin_types_compatible_p, .ty = Type.int, .data = .{ .bin = .{ - .lhs = lhs, - .rhs = rhs, - } } }), + .node = try p.addNode(.{ + .tag = .builtin_types_compatible_p, + .ty = Type.int, + .data = .{ .bin = .{ + .lhs = lhs, + .rhs = rhs, + } }, + .loc = @enumFromInt(builtin_tok), + }), }; try p.value_map.put(res.node, res.val); return res; @@ -6786,11 +6975,11 @@ fn offsetofMemberDesignator(p: *Parser, base_ty: Type, want_bits: bool) Error!Re errdefer p.skipTo(.r_paren); const base_field_name_tok = try p.expectIdentifier(); const base_field_name = try StrInt.intern(p.comp, p.tokSlice(base_field_name_tok)); - try p.validateFieldAccess(base_ty, base_ty, base_field_name_tok, base_field_name); + const base_record_ty = base_ty.getRecord().?; + try p.validateFieldAccess(base_record_ty, base_ty, base_field_name_tok, base_field_name); const base_node = try p.addNode(.{ .tag = .default_init_expr, .ty = base_ty, .data = undefined }); var cur_offset: u64 = 0; - const base_record_ty = base_ty.canonicalize(.standard); var lhs = try p.fieldAccessExtra(base_node, base_record_ty, base_field_name, false, &cur_offset); var total_offset = cur_offset; @@ -6800,13 +6989,12 @@ fn offsetofMemberDesignator(p: *Parser, base_ty: Type, want_bits: bool) Error!Re const field_name_tok = try p.expectIdentifier(); const field_name = try StrInt.intern(p.comp, p.tokSlice(field_name_tok)); - if (!lhs.ty.isRecord()) { + const lhs_record_ty = lhs.ty.getRecord() orelse { try p.errStr(.offsetof_ty, field_name_tok, try p.typeStr(lhs.ty)); return error.ParsingFailed; - } - try p.validateFieldAccess(lhs.ty, lhs.ty, field_name_tok, field_name); - const record_ty = lhs.ty.canonicalize(.standard); - lhs = try p.fieldAccessExtra(lhs.node, record_ty, field_name, false, &cur_offset); + }; + try p.validateFieldAccess(lhs_record_ty, lhs.ty, field_name_tok, field_name); + lhs = try p.fieldAccessExtra(lhs.node, lhs_record_ty, field_name, false, &cur_offset); total_offset += cur_offset; }, .l_bracket => { @@ -6824,11 +7012,14 @@ fn offsetofMemberDesignator(p: *Parser, base_ty: Type, want_bits: bool) Error!Re try ptr.lvalConversion(p); try index.lvalConversion(p); - if (!index.ty.isInt()) try p.errTok(.invalid_index, l_bracket_tok); - try p.checkArrayBounds(index, lhs, l_bracket_tok); + if (index.ty.isInt()) { + try p.checkArrayBounds(index, lhs, l_bracket_tok); + } else { + try p.errTok(.invalid_index, l_bracket_tok); + } try index.saveValue(p); - try ptr.bin(p, .array_access_expr, index); + try ptr.bin(p, .array_access_expr, index, l_bracket_tok); lhs = ptr; }, else => break, @@ -6867,6 +7058,7 @@ fn unExpr(p: *Parser) Error!Result { .tag = .addr_of_label, .data = .{ .decl_ref = name_tok }, .ty = result_ty, + .loc = @enumFromInt(address_tok), }), .ty = result_ty, }; @@ -6886,19 +7078,21 @@ fn unExpr(p: *Parser) Error!Result { { if (tree.isBitfield(member_node)) try p.errTok(.addr_of_bitfield, tok); } - if (!tree.isLval(operand.node)) { + if (!tree.isLval(operand.node) and !operand.ty.is(.invalid)) { try p.errTok(.addr_of_rvalue, tok); } if (operand.ty.qual.register) try p.errTok(.addr_of_register, tok); - const elem_ty = try p.arena.create(Type); - elem_ty.* = operand.ty; - operand.ty = Type{ - .specifier = .pointer, - .data = .{ .sub_type = elem_ty }, - }; + if (!operand.ty.is(.invalid)) { + const elem_ty = try p.arena.create(Type); + elem_ty.* = operand.ty; + operand.ty = Type{ + .specifier = .pointer, + .data = .{ .sub_type = elem_ty }, + }; + } try operand.saveValue(p); - try operand.un(p, .addr_of_expr); + try operand.un(p, .addr_of_expr, tok); return operand; }, .asterisk => { @@ -6917,7 +7111,7 @@ fn unExpr(p: *Parser) Error!Result { try p.errStr(.deref_incomplete_ty_ptr, asterisk_loc, try p.typeStr(operand.ty)); } operand.ty.qual = .{}; - try operand.un(p, .deref_expr); + try operand.un(p, .deref_expr, tok); return operand; }, .plus => { @@ -6943,12 +7137,12 @@ fn unExpr(p: *Parser) Error!Result { try p.errStr(.invalid_argument_un, tok, try p.typeStr(operand.ty)); try operand.usualUnaryConversion(p, tok); - if (operand.val.is(.int, p.comp) or operand.val.is(.float, p.comp)) { + if (operand.val.isArithmetic(p.comp)) { _ = try operand.val.sub(Value.zero, operand.val, operand.ty, p.comp); } else { operand.val = .{}; } - try operand.un(p, .negate_expr); + try operand.un(p, .negate_expr, tok); return operand; }, .plus_plus => { @@ -6974,7 +7168,7 @@ fn unExpr(p: *Parser) Error!Result { operand.val = .{}; } - try operand.un(p, .pre_inc_expr); + try operand.un(p, .pre_inc_expr, tok); return operand; }, .minus_minus => { @@ -7000,7 +7194,7 @@ fn unExpr(p: *Parser) Error!Result { operand.val = .{}; } - try operand.un(p, .pre_dec_expr); + try operand.un(p, .pre_dec_expr, tok); return operand; }, .tilde => { @@ -7016,11 +7210,14 @@ fn unExpr(p: *Parser) Error!Result { } } else if (operand.ty.isComplex()) { try p.errStr(.complex_conj, tok, try p.typeStr(operand.ty)); + if (operand.val.is(.complex, p.comp)) { + operand.val = try operand.val.complexConj(operand.ty, p.comp); + } } else { try p.errStr(.invalid_argument_un, tok, try p.typeStr(operand.ty)); operand.val = .{}; } - try operand.un(p, .bit_not_expr); + try operand.un(p, .bit_not_expr, tok); return operand; }, .bang => { @@ -7045,7 +7242,7 @@ fn unExpr(p: *Parser) Error!Result { } } operand.ty = .{ .specifier = .int }; - try operand.un(p, .bool_not_expr); + try operand.un(p, .bool_not_expr, tok); return operand; }, .keyword_sizeof => { @@ -7089,7 +7286,7 @@ fn unExpr(p: *Parser) Error!Result { res.ty = p.comp.types.size; } } - try res.un(p, .sizeof_expr); + try res.un(p, .sizeof_expr, tok); return res; }, .keyword_alignof, @@ -7127,7 +7324,7 @@ fn unExpr(p: *Parser) Error!Result { try p.errStr(.invalid_alignof, expected_paren, try p.typeStr(res.ty)); res.ty = Type.invalid; } - try res.un(p, .alignof_expr); + try res.un(p, .alignof_expr, tok); return res; }, .keyword_extension => { @@ -7147,15 +7344,18 @@ fn unExpr(p: *Parser) Error!Result { var operand = try p.castExpr(); try operand.expect(p); try operand.lvalConversion(p); + if (operand.ty.is(.invalid)) return Result.invalid; if (!operand.ty.isInt() and !operand.ty.isFloat()) { try p.errStr(.invalid_imag, imag_tok, try p.typeStr(operand.ty)); } - if (operand.ty.isReal()) { + if (operand.ty.isComplex()) { + operand.val = try operand.val.imaginaryPart(p.comp); + } else if (operand.ty.isReal()) { switch (p.comp.langopts.emulate) { .msvc => {}, // Doesn't support `_Complex` or `__imag` in the first place .gcc => operand.val = Value.zero, .clang => { - if (operand.val.is(.int, p.comp)) { + if (operand.val.is(.int, p.comp) or operand.val.is(.float, p.comp)) { operand.val = Value.zero; } else { operand.val = .{}; @@ -7165,7 +7365,7 @@ fn unExpr(p: *Parser) Error!Result { } // convert _Complex T to T operand.ty = operand.ty.makeReal(); - try operand.un(p, .imag_expr); + try operand.un(p, .imag_expr, tok); return operand; }, .keyword_real1, .keyword_real2 => { @@ -7175,12 +7375,14 @@ fn unExpr(p: *Parser) Error!Result { var operand = try p.castExpr(); try operand.expect(p); try operand.lvalConversion(p); + if (operand.ty.is(.invalid)) return Result.invalid; if (!operand.ty.isInt() and !operand.ty.isFloat()) { try p.errStr(.invalid_real, real_tok, try p.typeStr(operand.ty)); } // convert _Complex T to T operand.ty = operand.ty.makeReal(); - try operand.un(p, .real_expr); + operand.val = try operand.val.realPart(p.comp); + try operand.un(p, .real_expr, tok); return operand; }, else => { @@ -7253,7 +7455,7 @@ fn compoundLiteral(p: *Parser) Error!Result { if (d.constexpr) |_| { // TODO error if not constexpr } - try init_list_expr.un(p, tag); + try init_list_expr.un(p, tag, l_paren); return init_list_expr; } @@ -7284,7 +7486,7 @@ fn suffixExpr(p: *Parser, lhs: Result) Error!Result { } try operand.usualUnaryConversion(p, p.tok_i); - try operand.un(p, .post_inc_expr); + try operand.un(p, .post_inc_expr, p.tok_i); return operand; }, .minus_minus => { @@ -7302,7 +7504,7 @@ fn suffixExpr(p: *Parser, lhs: Result) Error!Result { } try operand.usualUnaryConversion(p, p.tok_i); - try operand.un(p, .post_dec_expr); + try operand.un(p, .post_dec_expr, p.tok_i); return operand; }, .l_bracket => { @@ -7319,12 +7521,18 @@ fn suffixExpr(p: *Parser, lhs: Result) Error!Result { try index.lvalConversion(p); if (ptr.ty.isPtr()) { ptr.ty = ptr.ty.elemType(); - if (!index.ty.isInt()) try p.errTok(.invalid_index, l_bracket); - try p.checkArrayBounds(index_before_conversion, array_before_conversion, l_bracket); + if (index.ty.isInt()) { + try p.checkArrayBounds(index_before_conversion, array_before_conversion, l_bracket); + } else { + try p.errTok(.invalid_index, l_bracket); + } } else if (index.ty.isPtr()) { index.ty = index.ty.elemType(); - if (!ptr.ty.isInt()) try p.errTok(.invalid_index, l_bracket); - try p.checkArrayBounds(array_before_conversion, index_before_conversion, l_bracket); + if (ptr.ty.isInt()) { + try p.checkArrayBounds(array_before_conversion, index_before_conversion, l_bracket); + } else { + try p.errTok(.invalid_index, l_bracket); + } std.mem.swap(Result, &ptr, &index); } else { try p.errTok(.invalid_subscript, l_bracket); @@ -7332,7 +7540,7 @@ fn suffixExpr(p: *Parser, lhs: Result) Error!Result { try ptr.saveValue(p); try index.saveValue(p); - try ptr.bin(p, .array_access_expr, index); + try ptr.bin(p, .array_access_expr, index, l_bracket); return ptr; }, .period => { @@ -7364,16 +7572,12 @@ fn fieldAccess( const expr_ty = lhs.ty; const is_ptr = expr_ty.isPtr(); const expr_base_ty = if (is_ptr) expr_ty.elemType() else expr_ty; - const record_ty = expr_base_ty.canonicalize(.standard); + const record_ty = expr_base_ty.getRecord() orelse { + try p.errStr(.expected_record_ty, field_name_tok, try p.typeStr(expr_ty)); + return error.ParsingFailed; + }; - switch (record_ty.specifier) { - .@"struct", .@"union" => {}, - else => { - try p.errStr(.expected_record_ty, field_name_tok, try p.typeStr(expr_ty)); - return error.ParsingFailed; - }, - } - if (record_ty.hasIncompleteSize()) { + if (record_ty.isIncomplete()) { try p.errStr(.deref_incomplete_ty_ptr, field_name_tok - 2, try p.typeStr(expr_base_ty)); return error.ParsingFailed; } @@ -7386,7 +7590,7 @@ fn fieldAccess( return p.fieldAccessExtra(lhs.node, record_ty, field_name, is_arrow, &discard); } -fn validateFieldAccess(p: *Parser, record_ty: Type, expr_ty: Type, field_name_tok: TokenIndex, field_name: StringId) Error!void { +fn validateFieldAccess(p: *Parser, record_ty: *const Type.Record, expr_ty: Type, field_name_tok: TokenIndex, field_name: StringId) Error!void { if (record_ty.hasField(field_name)) return; p.strings.items.len = 0; @@ -7401,8 +7605,8 @@ fn validateFieldAccess(p: *Parser, record_ty: Type, expr_ty: Type, field_name_to return error.ParsingFailed; } -fn fieldAccessExtra(p: *Parser, lhs: NodeIndex, record_ty: Type, field_name: StringId, is_arrow: bool, offset_bits: *u64) Error!Result { - for (record_ty.data.record.fields, 0..) |f, i| { +fn fieldAccessExtra(p: *Parser, lhs: NodeIndex, record_ty: *const Type.Record, field_name: StringId, is_arrow: bool, offset_bits: *u64) Error!Result { + for (record_ty.fields, 0..) |f, i| { if (f.isAnonymousRecord()) { if (!f.ty.hasField(field_name)) continue; const inner = try p.addNode(.{ @@ -7410,7 +7614,7 @@ fn fieldAccessExtra(p: *Parser, lhs: NodeIndex, record_ty: Type, field_name: Str .ty = f.ty, .data = .{ .member = .{ .lhs = lhs, .index = @intCast(i) } }, }); - const ret = p.fieldAccessExtra(inner, f.ty, field_name, false, offset_bits); + const ret = p.fieldAccessExtra(inner, f.ty.getRecord().?, field_name, false, offset_bits); offset_bits.* = f.layout.offset_bits; return ret; } @@ -7527,6 +7731,23 @@ fn callExpr(p: *Parser, lhs: Result) Error!Result { continue; } const p_ty = params[arg_count].ty; + if (p_ty.specifier == .static_array) { + const arg_array_len: u64 = arg.ty.arrayLen() orelse std.math.maxInt(u64); + const param_array_len: u64 = p_ty.arrayLen().?; + if (arg_array_len < param_array_len) { + const extra = Diagnostics.Message.Extra{ .arguments = .{ + .expected = @intCast(arg_array_len), + .actual = @intCast(param_array_len), + } }; + try p.errExtra(.array_argument_too_small, param_tok, extra); + try p.errTok(.callee_with_static_array, params[arg_count].name_tok); + } + if (arg.val.isZero(p.comp)) { + try p.errTok(.non_null_argument, param_tok); + try p.errTok(.callee_with_static_array, params[arg_count].name_tok); + } + } + if (call_expr.shouldCoerceArg(arg_count)) { try arg.coerce(p, p_ty, param_tok, .{ .arg = params[arg_count].name_tok }); } @@ -7618,7 +7839,7 @@ fn primaryExpr(p: *Parser) Error!Result { var e = try p.expr(); try e.expect(p); try p.expectClosing(l_paren, .r_paren); - try e.un(p, .paren_expr); + try e.un(p, .paren_expr, l_paren); return e; } switch (p.tok_ids[p.tok_i]) { @@ -7626,6 +7847,10 @@ fn primaryExpr(p: *Parser) Error!Result { const name_tok = try p.expectIdentifier(); const name = p.tokSlice(name_tok); const interned_name = try StrInt.intern(p.comp, name); + if (interned_name == p.auto_type_decl_name) { + try p.errStr(.auto_type_self_initialized, name_tok, name); + return error.ParsingFailed; + } if (p.syms.findSymbol(interned_name)) |sym| { try p.checkDeprecatedUnavailable(sym.ty, name_tok, sym.tok); if (sym.kind == .constexpr) { @@ -7636,6 +7861,7 @@ fn primaryExpr(p: *Parser) Error!Result { .tag = .decl_ref_expr, .ty = sym.ty, .data = .{ .decl_ref = name_tok }, + .loc = @enumFromInt(name_tok), }), }; } @@ -7653,6 +7879,7 @@ fn primaryExpr(p: *Parser) Error!Result { .tag = if (sym.kind == .enumeration) .enumeration_ref else .decl_ref_expr, .ty = sym.ty, .data = .{ .decl_ref = name_tok }, + .loc = @enumFromInt(name_tok), }), }; } @@ -7679,6 +7906,7 @@ fn primaryExpr(p: *Parser) Error!Result { .tag = .builtin_call_expr_one, .ty = some.ty, .data = .{ .decl = .{ .name = name_tok, .node = .none } }, + .loc = @enumFromInt(name_tok), }), }; } @@ -7696,6 +7924,7 @@ fn primaryExpr(p: *Parser) Error!Result { .ty = ty, .tag = .fn_proto, .data = .{ .decl = .{ .name = name_tok } }, + .loc = @enumFromInt(name_tok), }); try p.decl_buf.append(node); @@ -7707,6 +7936,7 @@ fn primaryExpr(p: *Parser) Error!Result { .tag = .decl_ref_expr, .ty = ty, .data = .{ .decl_ref = name_tok }, + .loc = @enumFromInt(name_tok), }), }; } @@ -7714,11 +7944,12 @@ fn primaryExpr(p: *Parser) Error!Result { return error.ParsingFailed; }, .keyword_true, .keyword_false => |id| { + const tok_i = p.tok_i; p.tok_i += 1; const res = Result{ .val = Value.fromBool(id == .keyword_true), .ty = .{ .specifier = .bool }, - .node = try p.addNode(.{ .tag = .bool_literal, .ty = .{ .specifier = .bool }, .data = undefined }), + .node = try p.addNode(.{ .tag = .bool_literal, .ty = .{ .specifier = .bool }, .data = undefined, .loc = @enumFromInt(tok_i) }), }; std.debug.assert(!p.in_macro); // Should have been replaced with .one / .zero try p.value_map.put(res.node, res.val); @@ -7734,6 +7965,7 @@ fn primaryExpr(p: *Parser) Error!Result { .tag = .nullptr_literal, .ty = .{ .specifier = .nullptr_t }, .data = undefined, + .loc = @enumFromInt(p.tok_i), }), }; }, @@ -7770,6 +8002,7 @@ fn primaryExpr(p: *Parser) Error!Result { .tag = .decl_ref_expr, .ty = ty, .data = .{ .decl_ref = tok }, + .loc = @enumFromInt(tok), }), }; }, @@ -7805,6 +8038,7 @@ fn primaryExpr(p: *Parser) Error!Result { .tag = .decl_ref_expr, .ty = ty, .data = .{ .decl_ref = p.tok_i }, + .loc = @enumFromInt(p.tok_i), }), }; }, @@ -7824,16 +8058,16 @@ fn primaryExpr(p: *Parser) Error!Result { .unterminated_char_literal, => return p.charLiteral(), .zero => { - p.tok_i += 1; + defer p.tok_i += 1; var res: Result = .{ .val = Value.zero, .ty = if (p.in_macro) p.comp.types.intmax else Type.int }; - res.node = try p.addNode(.{ .tag = .int_literal, .ty = res.ty, .data = undefined }); + res.node = try p.addNode(.{ .tag = .int_literal, .ty = res.ty, .data = undefined, .loc = @enumFromInt(p.tok_i) }); if (!p.in_macro) try p.value_map.put(res.node, res.val); return res; }, .one => { - p.tok_i += 1; + defer p.tok_i += 1; var res: Result = .{ .val = Value.one, .ty = if (p.in_macro) p.comp.types.intmax else Type.int }; - res.node = try p.addNode(.{ .tag = .int_literal, .ty = res.ty, .data = undefined }); + res.node = try p.addNode(.{ .tag = .int_literal, .ty = res.ty, .data = undefined, .loc = @enumFromInt(p.tok_i) }); if (!p.in_macro) try p.value_map.put(res.node, res.val); return res; }, @@ -7841,7 +8075,7 @@ fn primaryExpr(p: *Parser) Error!Result { .embed_byte => { assert(!p.in_macro); const loc = p.pp.tokens.items(.loc)[p.tok_i]; - p.tok_i += 1; + defer p.tok_i += 1; const buf = p.comp.getSource(.generated).buf[loc.byte_offset..]; var byte: u8 = buf[0] - '0'; for (buf[1..]) |c| { @@ -7850,7 +8084,7 @@ fn primaryExpr(p: *Parser) Error!Result { byte += c - '0'; } var res: Result = .{ .val = try Value.int(byte, p.comp) }; - res.node = try p.addNode(.{ .tag = .int_literal, .ty = res.ty, .data = undefined }); + res.node = try p.addNode(.{ .tag = .int_literal, .ty = res.ty, .data = undefined, .loc = @enumFromInt(p.tok_i) }); try p.value_map.put(res.node, res.val); return res; }, @@ -7869,17 +8103,19 @@ fn makePredefinedIdentifier(p: *Parser, strings_top: usize) !Result { const slice = p.strings.items[strings_top..]; const val = try Value.intern(p.comp, .{ .bytes = slice }); - const str_lit = try p.addNode(.{ .tag = .string_literal_expr, .ty = ty, .data = undefined }); + const str_lit = try p.addNode(.{ .tag = .string_literal_expr, .ty = ty, .data = undefined, .loc = @enumFromInt(p.tok_i) }); if (!p.in_macro) try p.value_map.put(str_lit, val); return Result{ .ty = ty, .node = try p.addNode(.{ .tag = .implicit_static_var, .ty = ty, .data = .{ .decl = .{ .name = p.tok_i, .node = str_lit } }, + .loc = @enumFromInt(p.tok_i), }) }; } fn stringLiteral(p: *Parser) Error!Result { + const string_start = p.tok_i; var string_end = p.tok_i; var string_kind: text_literal.Kind = .char; while (text_literal.Kind.classify(p.tok_ids[string_end], .string_literal)) |next| : (string_end += 1) { @@ -7894,13 +8130,17 @@ fn stringLiteral(p: *Parser) Error!Result { return error.ParsingFailed; } } - assert(string_end > p.tok_i); + const count = string_end - p.tok_i; + assert(count > 0); const char_width = string_kind.charUnitSize(p.comp); const strings_top = p.strings.items.len; defer p.strings.items.len = strings_top; + const literal_start = mem.alignForward(usize, strings_top, @intFromEnum(char_width)); + try p.strings.resize(literal_start); + while (p.tok_i < string_end) : (p.tok_i += 1) { const this_kind = text_literal.Kind.classify(p.tok_ids[p.tok_i], .string_literal).?; const slice = this_kind.contentSlice(p.tokSlice(p.tok_i)); @@ -7940,12 +8180,18 @@ fn stringLiteral(p: *Parser) Error!Result { }, } }, - .improperly_encoded => |bytes| p.strings.appendSliceAssumeCapacity(bytes), + .improperly_encoded => |bytes| { + if (count > 1) { + try p.errTok(.illegal_char_encoding_error, p.tok_i); + return error.ParsingFailed; + } + p.strings.appendSliceAssumeCapacity(bytes); + }, .utf8_text => |view| { switch (char_width) { .@"1" => p.strings.appendSliceAssumeCapacity(view.bytes), .@"2" => { - const capacity_slice: []align(@alignOf(u16)) u8 = @alignCast(p.strings.unusedCapacitySlice()); + const capacity_slice: []align(@alignOf(u16)) u8 = @alignCast(p.strings.allocatedSlice()[literal_start..]); const dest_len = std.mem.alignBackward(usize, capacity_slice.len, 2); const dest = std.mem.bytesAsSlice(u16, capacity_slice[0..dest_len]); const words_written = std.unicode.utf8ToUtf16Le(dest, view.bytes) catch unreachable; @@ -7966,7 +8212,7 @@ fn stringLiteral(p: *Parser) Error!Result { } } p.strings.appendNTimesAssumeCapacity(0, @intFromEnum(char_width)); - const slice = p.strings.items[strings_top..]; + const slice = p.strings.items[literal_start..]; // TODO this won't do anything if there is a cache hit const interned_align = mem.alignForward( @@ -7987,7 +8233,7 @@ fn stringLiteral(p: *Parser) Error!Result { }, .val = val, }; - res.node = try p.addNode(.{ .tag = .string_literal_expr, .ty = res.ty, .data = undefined }); + res.node = try p.addNode(.{ .tag = .string_literal_expr, .ty = res.ty, .data = undefined, .loc = @enumFromInt(string_start) }); if (!p.in_macro) try p.value_map.put(res.node, res.val); return res; } @@ -8004,7 +8250,7 @@ fn charLiteral(p: *Parser) Error!Result { return .{ .ty = Type.int, .val = Value.zero, - .node = try p.addNode(.{ .tag = .char_literal, .ty = Type.int, .data = undefined }), + .node = try p.addNode(.{ .tag = .char_literal, .ty = Type.int, .data = undefined, .loc = @enumFromInt(p.tok_i) }), }; }; if (char_kind == .utf_8) try p.err(.u8_char_lit); @@ -8013,7 +8259,7 @@ fn charLiteral(p: *Parser) Error!Result { const slice = char_kind.contentSlice(p.tokSlice(p.tok_i)); var is_multichar = false; - if (slice.len == 1 and std.ascii.isAscii(slice[0])) { + if (slice.len == 1 and std.ascii.isASCII(slice[0])) { // fast path: single unescaped ASCII char val = slice[0]; } else { @@ -8096,25 +8342,25 @@ fn charLiteral(p: *Parser) Error!Result { // > that of the single character or escape sequence is converted to type int. // This conversion only matters if `char` is signed and has a high-order bit of `1` if (char_kind == .char and !is_multichar and val > 0x7F and p.comp.getCharSignedness() == .signed) { - try value.intCast(.{ .specifier = .char }, p.comp); + _ = try value.intCast(.{ .specifier = .char }, p.comp); } const res = Result{ .ty = if (p.in_macro) macro_ty else ty, .val = value, - .node = try p.addNode(.{ .tag = .char_literal, .ty = ty, .data = undefined }), + .node = try p.addNode(.{ .tag = .char_literal, .ty = ty, .data = undefined, .loc = @enumFromInt(p.tok_i) }), }; if (!p.in_macro) try p.value_map.put(res.node, res.val); return res; } -fn parseFloat(p: *Parser, buf: []const u8, suffix: NumberSuffix) !Result { +fn parseFloat(p: *Parser, buf: []const u8, suffix: NumberSuffix, tok_i: TokenIndex) !Result { const ty = Type{ .specifier = switch (suffix) { .None, .I => .double, .F, .IF => .float, - .F16 => .float16, + .F16, .IF16 => .float16, .L, .IL => .long_double, - .W, .IW => .float80, + .W, .IW => p.comp.float80Type().?.specifier, .Q, .IQ, .F128, .IF128 => .float128, else => unreachable, } }; @@ -8140,21 +8386,29 @@ fn parseFloat(p: *Parser, buf: []const u8, suffix: NumberSuffix) !Result { }); var res = Result{ .ty = ty, - .node = try p.addNode(.{ .tag = .float_literal, .ty = ty, .data = undefined }), + .node = try p.addNode(.{ .tag = .float_literal, .ty = ty, .data = undefined, .loc = @enumFromInt(tok_i) }), .val = val, }; if (suffix.isImaginary()) { try p.err(.gnu_imaginary_constant); res.ty = .{ .specifier = switch (suffix) { .I => .complex_double, + .IF16 => .complex_float16, .IF => .complex_float, .IL => .complex_long_double, - .IW => .complex_float80, + .IW => p.comp.float80Type().?.makeComplex().specifier, .IQ, .IF128 => .complex_float128, else => unreachable, } }; - res.val = .{}; // TODO add complex values - try res.un(p, .imaginary_literal); + res.val = try Value.intern(p.comp, switch (res.ty.bitSizeof(p.comp).?) { + 32 => .{ .complex = .{ .cf16 = .{ 0.0, val.toFloat(f16, p.comp) } } }, + 64 => .{ .complex = .{ .cf32 = .{ 0.0, val.toFloat(f32, p.comp) } } }, + 128 => .{ .complex = .{ .cf64 = .{ 0.0, val.toFloat(f64, p.comp) } } }, + 160 => .{ .complex = .{ .cf80 = .{ 0.0, val.toFloat(f80, p.comp) } } }, + 256 => .{ .complex = .{ .cf128 = .{ 0.0, val.toFloat(f128, p.comp) } } }, + else => unreachable, + }); + try res.un(p, .imaginary_literal, tok_i); } return res; } @@ -8233,12 +8487,14 @@ fn fixedSizeInt(p: *Parser, base: u8, buf: []const u8, suffix: NumberSuffix, tok if (overflow) { try p.errTok(.int_literal_too_big, tok_i); res.ty = .{ .specifier = .ulong_long }; - res.node = try p.addNode(.{ .tag = .int_literal, .ty = res.ty, .data = undefined }); + res.node = try p.addNode(.{ .tag = .int_literal, .ty = res.ty, .data = undefined, .loc = @enumFromInt(tok_i) }); if (!p.in_macro) try p.value_map.put(res.node, res.val); return res; } + const interned_val = try Value.int(val, p.comp); if (suffix.isSignedInteger()) { - if (val > p.comp.types.intmax.maxInt(p.comp)) { + const max_int = try Value.maxInt(p.comp.types.intmax, p.comp); + if (interned_val.compare(.gt, max_int, p.comp)) { try p.errTok(.implicitly_unsigned_literal, tok_i); } } @@ -8266,13 +8522,23 @@ fn fixedSizeInt(p: *Parser, base: u8, buf: []const u8, suffix: NumberSuffix, tok for (specs) |spec| { res.ty = Type{ .specifier = spec }; if (res.ty.compareIntegerRanks(suffix_ty, p.comp).compare(.lt)) continue; - const max_int = res.ty.maxInt(p.comp); - if (val <= max_int) break; + const max_int = try Value.maxInt(res.ty, p.comp); + if (interned_val.compare(.lte, max_int, p.comp)) break; } else { - res.ty = .{ .specifier = .ulong_long }; + res.ty = .{ .specifier = spec: { + if (p.comp.langopts.emulate == .gcc) { + if (target_util.hasInt128(p.comp.target)) { + break :spec .int128; + } else { + break :spec .long_long; + } + } else { + break :spec .ulong_long; + } + } }; } - res.node = try p.addNode(.{ .tag = .int_literal, .ty = res.ty, .data = undefined }); + res.node = try p.addNode(.{ .tag = .int_literal, .ty = res.ty, .data = undefined, .loc = @enumFromInt(tok_i) }); if (!p.in_macro) try p.value_map.put(res.node, res.val); return res; } @@ -8291,7 +8557,7 @@ fn parseInt(p: *Parser, prefix: NumberPrefix, buf: []const u8, suffix: NumberSuf try p.errTok(.gnu_imaginary_constant, tok_i); res.ty = res.ty.makeComplex(); res.val = .{}; - try res.un(p, .imaginary_literal); + try res.un(p, .imaginary_literal, tok_i); } return res; } @@ -8326,17 +8592,6 @@ fn bitInt(p: *Parser, base: u8, buf: []const u8, suffix: NumberSuffix, tok_i: To // value of the constant is positive or was specified in hexadecimal or octal notation. const sign_bits = @intFromBool(suffix.isSignedInteger()); const bits_needed = count + sign_bits; - if (bits_needed > Compilation.bit_int_max_bits) { - const specifier: Type.Builder.Specifier = switch (suffix) { - .WB => .{ .bit_int = 0 }, - .UWB => .{ .ubit_int = 0 }, - .IWB => .{ .complex_bit_int = 0 }, - .IUWB => .{ .complex_ubit_int = 0 }, - else => unreachable, - }; - try p.errStr(.bit_int_too_big, tok_i, specifier.str(p.comp.langopts).?); - return error.ParsingFailed; - } break :blk @intCast(bits_needed); }; @@ -8347,7 +8602,7 @@ fn bitInt(p: *Parser, base: u8, buf: []const u8, suffix: NumberSuffix, tok_i: To .data = .{ .int = .{ .bits = bits_needed, .signedness = suffix.signedness() } }, }, }; - res.node = try p.addNode(.{ .tag = .int_literal, .ty = res.ty, .data = undefined }); + res.node = try p.addNode(.{ .tag = .int_literal, .ty = res.ty, .data = undefined, .loc = @enumFromInt(tok_i) }); if (!p.in_macro) try p.value_map.put(res.node, res.val); return res; } @@ -8420,6 +8675,10 @@ pub fn parseNumberToken(p: *Parser, tok_i: TokenIndex) !Result { } return error.ParsingFailed; }; + if (suffix.isFloat80() and p.comp.float80Type() == null) { + try p.errStr(.invalid_float_suffix, tok_i, suffix_str); + return error.ParsingFailed; + } if (is_float) { assert(prefix == .hex or prefix == .decimal); @@ -8428,7 +8687,7 @@ pub fn parseNumberToken(p: *Parser, tok_i: TokenIndex) !Result { return error.ParsingFailed; } const number = buf[0 .. buf.len - suffix_str.len]; - return p.parseFloat(number, suffix); + return p.parseFloat(number, suffix, tok_i); } else { return p.parseInt(prefix, int_part, suffix, tok_i); } @@ -8444,7 +8703,6 @@ fn ppNum(p: *Parser) Error!Result { } res.ty = if (res.ty.isUnsignedInt(p.comp)) p.comp.types.intmax.makeIntegerUnsigned() else p.comp.types.intmax; } else if (res.val.opt_ref != .none) { - // TODO add complex values try p.value_map.put(res.node, res.val); } return res; @@ -8465,6 +8723,7 @@ fn parseNoEval(p: *Parser, comptime func: fn (*Parser) Error!Result) Error!Resul /// : typeName ':' assignExpr /// | keyword_default ':' assignExpr fn genericSelection(p: *Parser) Error!Result { + const kw_generic = p.tok_i; p.tok_i += 1; const l_paren = try p.expectToken(.l_paren); const controlling_tok = p.tok_i; @@ -8508,17 +8767,23 @@ fn genericSelection(p: *Parser) Error!Result { try p.errStr(.generic_duplicate, start, try p.typeStr(ty)); try p.errStr(.generic_duplicate_here, chosen_tok, try p.typeStr(ty)); } - for (p.list_buf.items[list_buf_top + 1 ..], p.decl_buf.items[decl_buf_top..]) |item, prev_tok| { - const prev_ty = p.nodes.items(.ty)[@intFromEnum(item)]; - if (prev_ty.eql(ty, p.comp, true)) { - try p.errStr(.generic_duplicate, start, try p.typeStr(ty)); - try p.errStr(.generic_duplicate_here, @intFromEnum(prev_tok), try p.typeStr(ty)); + const list_buf = p.list_buf.items[list_buf_top + 1 ..]; + const decl_buf = p.decl_buf.items[decl_buf_top..]; + if (list_buf.len == decl_buf.len) { + // If these do not have the same length, there is already an error + for (list_buf, decl_buf) |item, prev_tok| { + const prev_ty = p.nodes.items(.ty)[@intFromEnum(item)]; + if (prev_ty.eql(ty, p.comp, true)) { + try p.errStr(.generic_duplicate, start, try p.typeStr(ty)); + try p.errStr(.generic_duplicate_here, @intFromEnum(prev_tok), try p.typeStr(ty)); + } } } try p.list_buf.append(try p.addNode(.{ .tag = .generic_association_expr, .ty = ty, .data = .{ .un = node.node }, + .loc = @enumFromInt(start), })); try p.decl_buf.append(@enumFromInt(start)); } else if (p.eatToken(.keyword_default)) |tok| { @@ -8542,10 +8807,12 @@ fn genericSelection(p: *Parser) Error!Result { try p.expectClosing(l_paren, .r_paren); if (chosen.node == .none) { - if (default_tok != null) { + if (default_tok) |tok| { try p.list_buf.insert(list_buf_top + 1, try p.addNode(.{ .tag = .generic_default_expr, .data = .{ .un = default.node }, + .ty = default.ty, + .loc = @enumFromInt(tok), })); chosen = default; } else { @@ -8556,11 +8823,15 @@ fn genericSelection(p: *Parser) Error!Result { try p.list_buf.insert(list_buf_top + 1, try p.addNode(.{ .tag = .generic_association_expr, .data = .{ .un = chosen.node }, + .ty = chosen.ty, + .loc = @enumFromInt(chosen_tok), })); - if (default_tok != null) { + if (default_tok) |tok| { try p.list_buf.append(try p.addNode(.{ .tag = .generic_default_expr, - .data = .{ .un = chosen.node }, + .data = .{ .un = default.node }, + .ty = default.ty, + .loc = @enumFromInt(tok), })); } } @@ -8568,7 +8839,8 @@ fn genericSelection(p: *Parser) Error!Result { var generic_node: Tree.Node = .{ .tag = .generic_expr_one, .ty = chosen.ty, - .data = .{ .bin = .{ .lhs = controlling.node, .rhs = chosen.node } }, + .data = .{ .two = .{ controlling.node, chosen.node } }, + .loc = @enumFromInt(kw_generic), }; const associations = p.list_buf.items[list_buf_top..]; if (associations.len > 2) { // associations[0] == controlling.node @@ -8578,3 +8850,42 @@ fn genericSelection(p: *Parser) Error!Result { chosen.node = try p.addNode(generic_node); return chosen; } + +test "Node locations" { + var comp = Compilation.init(std.testing.allocator, std.fs.cwd()); + defer comp.deinit(); + + const file = try comp.addSourceFromBuffer("file.c", + \\int foo = 5; + \\int bar = 10; + \\int main(void) {} + \\ + ); + + const builtin_macros = try comp.generateBuiltinMacros(.no_system_defines); + + var pp = Preprocessor.init(&comp); + defer pp.deinit(); + try pp.addBuiltinMacros(); + + _ = try pp.preprocess(builtin_macros); + + const eof = try pp.preprocess(file); + try pp.addToken(eof); + + var tree = try Parser.parse(&pp); + defer tree.deinit(); + + try std.testing.expectEqual(0, comp.diagnostics.list.items.len); + for (tree.root_decls, 0..) |node, i| { + const tok_i = tree.nodeTok(node).?; + const slice = tree.tokSlice(tok_i); + const expected = switch (i) { + 0 => "foo", + 1 => "bar", + 2 => "main", + else => unreachable, + }; + try std.testing.expectEqualStrings(expected, slice); + } +} diff --git a/lib/compiler/aro/aro/Preprocessor.zig b/lib/compiler/aro/aro/Preprocessor.zig index aa0a64c3e721..63bf0858369d 100644 --- a/lib/compiler/aro/aro/Preprocessor.zig +++ b/lib/compiler/aro/aro/Preprocessor.zig @@ -97,6 +97,11 @@ poisoned_identifiers: std.StringHashMap(void), /// Map from Source.Id to macro name in the `#ifndef` condition which guards the source, if any include_guards: std.AutoHashMapUnmanaged(Source.Id, []const u8) = .{}, +/// Store `keyword_define` and `keyword_undef` tokens. +/// Used to implement preprocessor debug dump options +/// Must be false unless in -E mode (parser does not handle those token types) +store_macro_tokens: bool = false, + /// Memory is retained to avoid allocation on every single token. top_expansion_buf: ExpandBuf, @@ -622,9 +627,12 @@ fn preprocessExtra(pp: *Preprocessor, source: Source) MacroError!TokenWithExpans } if_level -= 1; }, - .keyword_define => try pp.define(&tokenizer), + .keyword_define => try pp.define(&tokenizer, directive), .keyword_undef => { const macro_name = (try pp.expectMacroName(&tokenizer)) orelse continue; + if (pp.store_macro_tokens) { + try pp.addToken(tokFromRaw(directive)); + } _ = pp.defines.remove(macro_name); try pp.expectNl(&tokenizer); @@ -975,7 +983,7 @@ fn expr(pp: *Preprocessor, tokenizer: *Tokenizer) MacroError!bool { .tok_i = @intCast(token_state.tokens_len), .arena = pp.arena.allocator(), .in_macro = true, - .strings = std.ArrayList(u8).init(pp.comp.gpa), + .strings = std.ArrayListAligned(u8, 4).init(pp.comp.gpa), .data = undefined, .value_map = undefined, @@ -1328,19 +1336,41 @@ fn stringify(pp: *Preprocessor, tokens: []const TokenWithExpansionLocs) !void { try pp.char_buf.append(c); } } - if (pp.char_buf.items[pp.char_buf.items.len - 1] == '\\') { + try pp.char_buf.ensureUnusedCapacity(2); + if (pp.char_buf.items[pp.char_buf.items.len - 1] != '\\') { + pp.char_buf.appendSliceAssumeCapacity("\"\n"); + return; + } + pp.char_buf.appendAssumeCapacity('"'); + var tokenizer: Tokenizer = .{ + .buf = pp.char_buf.items, + .index = 0, + .source = .generated, + .langopts = pp.comp.langopts, + .line = 0, + }; + const item = tokenizer.next(); + if (item.id == .unterminated_string_literal) { const tok = tokens[tokens.len - 1]; try pp.comp.addDiagnostic(.{ .tag = .invalid_pp_stringify_escape, .loc = tok.loc, }, tok.expansionSlice()); - pp.char_buf.items.len -= 1; + pp.char_buf.items.len -= 2; // erase unpaired backslash and appended end quote + pp.char_buf.appendAssumeCapacity('"'); } - try pp.char_buf.appendSlice("\"\n"); + pp.char_buf.appendAssumeCapacity('\n'); } fn reconstructIncludeString(pp: *Preprocessor, param_toks: []const TokenWithExpansionLocs, embed_args: ?*[]const TokenWithExpansionLocs, first: TokenWithExpansionLocs) !?[]const u8 { - assert(param_toks.len != 0); + if (param_toks.len == 0) { + try pp.comp.addDiagnostic(.{ + .tag = .expected_filename, + .loc = first.loc, + }, first.expansionSlice()); + return null; + } + const char_top = pp.char_buf.items.len; defer pp.char_buf.items.len = char_top; @@ -1539,11 +1569,13 @@ fn getPasteArgs(args: []const TokenWithExpansionLocs) []const TokenWithExpansion fn expandFuncMacro( pp: *Preprocessor, - loc: Source.Location, + macro_tok: TokenWithExpansionLocs, func_macro: *const Macro, args: *const MacroArguments, expanded_args: *const MacroArguments, + hideset_arg: Hideset.Index, ) MacroError!ExpandBuf { + var hideset = hideset_arg; var buf = ExpandBuf.init(pp.gpa); try buf.ensureTotalCapacity(func_macro.tokens.len); errdefer buf.deinit(); @@ -1594,16 +1626,21 @@ fn expandFuncMacro( }, else => &[1]TokenWithExpansionLocs{tokFromRaw(raw_next)}, }; - try pp.pasteTokens(&buf, next); if (next.len != 0) break; }, .macro_param_no_expand => { + if (tok_i + 1 < func_macro.tokens.len and func_macro.tokens[tok_i + 1].id == .hash_hash) { + hideset = pp.hideset.get(tokFromRaw(func_macro.tokens[tok_i + 1]).loc); + } const slice = getPasteArgs(args.items[raw.end]); const raw_loc = Source.Location{ .id = raw.source, .byte_offset = raw.start, .line = raw.line }; try bufCopyTokens(&buf, slice, &.{raw_loc}); }, .macro_param => { + if (tok_i + 1 < func_macro.tokens.len and func_macro.tokens[tok_i + 1].id == .hash_hash) { + hideset = pp.hideset.get(tokFromRaw(func_macro.tokens[tok_i + 1]).loc); + } const arg = expanded_args.items[raw.end]; const raw_loc = Source.Location{ .id = raw.source, .byte_offset = raw.start, .line = raw.line }; try bufCopyTokens(&buf, arg, &.{raw_loc}); @@ -1642,9 +1679,9 @@ fn expandFuncMacro( const arg = expanded_args.items[0]; const result = if (arg.len == 0) blk: { const extra = Diagnostics.Message.Extra{ .arguments = .{ .expected = 1, .actual = 0 } }; - try pp.comp.addDiagnostic(.{ .tag = .expected_arguments, .loc = loc, .extra = extra }, &.{}); + try pp.comp.addDiagnostic(.{ .tag = .expected_arguments, .loc = macro_tok.loc, .extra = extra }, &.{}); break :blk false; - } else try pp.handleBuiltinMacro(raw.id, arg, loc); + } else try pp.handleBuiltinMacro(raw.id, arg, macro_tok.loc); const start = pp.comp.generated_buf.items.len; const w = pp.comp.generated_buf.writer(pp.gpa); try w.print("{}\n", .{@intFromBool(result)}); @@ -1655,7 +1692,7 @@ fn expandFuncMacro( const not_found = "0\n"; const result = if (arg.len == 0) blk: { const extra = Diagnostics.Message.Extra{ .arguments = .{ .expected = 1, .actual = 0 } }; - try pp.comp.addDiagnostic(.{ .tag = .expected_arguments, .loc = loc, .extra = extra }, &.{}); + try pp.comp.addDiagnostic(.{ .tag = .expected_arguments, .loc = macro_tok.loc, .extra = extra }, &.{}); break :blk not_found; } else res: { var invalid: ?TokenWithExpansionLocs = null; @@ -1687,7 +1724,7 @@ fn expandFuncMacro( if (vendor_ident != null and attr_ident == null) { invalid = vendor_ident; } else if (attr_ident == null and invalid == null) { - invalid = .{ .id = .eof, .loc = loc }; + invalid = .{ .id = .eof, .loc = macro_tok.loc }; } if (invalid) |some| { try pp.comp.addDiagnostic( @@ -1731,7 +1768,7 @@ fn expandFuncMacro( const not_found = "0\n"; const result = if (arg.len == 0) blk: { const extra = Diagnostics.Message.Extra{ .arguments = .{ .expected = 1, .actual = 0 } }; - try pp.comp.addDiagnostic(.{ .tag = .expected_arguments, .loc = loc, .extra = extra }, &.{}); + try pp.comp.addDiagnostic(.{ .tag = .expected_arguments, .loc = macro_tok.loc, .extra = extra }, &.{}); break :blk not_found; } else res: { var embed_args: []const TokenWithExpansionLocs = &.{}; @@ -1877,11 +1914,11 @@ fn expandFuncMacro( break; }, }; - if (string == null and invalid == null) invalid = .{ .loc = loc, .id = .eof }; + if (string == null and invalid == null) invalid = .{ .loc = macro_tok.loc, .id = .eof }; if (invalid) |some| try pp.comp.addDiagnostic( .{ .tag = .pragma_operator_string_literal, .loc = some.loc }, some.expansionSlice(), - ) else try pp.pragmaOperator(string.?, loc); + ) else try pp.pragmaOperator(string.?, macro_tok.loc); }, .comma => { if (tok_i + 2 < func_macro.tokens.len and func_macro.tokens[tok_i + 1].id == .hash_hash) { @@ -1930,6 +1967,15 @@ fn expandFuncMacro( } removePlacemarkers(&buf); + const macro_expansion_locs = macro_tok.expansionSlice(); + for (buf.items) |*tok| { + try tok.addExpansionLocation(pp.gpa, &.{macro_tok.loc}); + try tok.addExpansionLocation(pp.gpa, macro_expansion_locs); + const tok_hidelist = pp.hideset.get(tok.loc); + const new_hidelist = try pp.hideset.@"union"(tok_hidelist, hideset); + try pp.hideset.put(tok.loc, new_hidelist); + } + return buf; } @@ -2207,8 +2253,10 @@ fn expandMacroExhaustive( else => |e| return e, }; assert(r_paren.id == .r_paren); + var free_arg_expansion_locs = false; defer { for (args.items) |item| { + if (free_arg_expansion_locs) for (item) |tok| TokenWithExpansionLocs.free(tok.expansion_locs, pp.gpa); pp.gpa.free(item); } args.deinit(); @@ -2234,6 +2282,7 @@ fn expandMacroExhaustive( .arguments = .{ .expected = @intCast(macro.params.len), .actual = args_count }, }; if (macro.var_args and args_count < macro.params.len) { + free_arg_expansion_locs = true; try pp.comp.addDiagnostic( .{ .tag = .expected_at_least_arguments, .loc = buf.items[idx].loc, .extra = extra }, buf.items[idx].expansionSlice(), @@ -2243,6 +2292,7 @@ fn expandMacroExhaustive( continue; } if (!macro.var_args and args_count != macro.params.len) { + free_arg_expansion_locs = true; try pp.comp.addDiagnostic( .{ .tag = .expected_arguments, .loc = buf.items[idx].loc, .extra = extra }, buf.items[idx].expansionSlice(), @@ -2264,19 +2314,9 @@ fn expandMacroExhaustive( expanded_args.appendAssumeCapacity(try expand_buf.toOwnedSlice()); } - var res = try pp.expandFuncMacro(macro_tok.loc, macro, &args, &expanded_args); + var res = try pp.expandFuncMacro(macro_tok, macro, &args, &expanded_args, hs); defer res.deinit(); const tokens_added = res.items.len; - - const macro_expansion_locs = macro_tok.expansionSlice(); - for (res.items) |*tok| { - try tok.addExpansionLocation(pp.gpa, &.{macro_tok.loc}); - try tok.addExpansionLocation(pp.gpa, macro_expansion_locs); - const tok_hidelist = pp.hideset.get(tok.loc); - const new_hidelist = try pp.hideset.@"union"(tok_hidelist, hs); - try pp.hideset.put(tok.loc, new_hidelist); - } - const tokens_removed = macro_scan_idx - idx + 1; for (buf.items[idx .. idx + tokens_removed]) |tok| TokenWithExpansionLocs.free(tok.expansion_locs, pp.gpa); try buf.replaceRange(idx, tokens_removed, res.items); @@ -2476,7 +2516,7 @@ fn makeGeneratedToken(pp: *Preprocessor, start: usize, id: Token.Id, source: Tok } /// Defines a new macro and warns if it is a duplicate -fn defineMacro(pp: *Preprocessor, name_tok: RawToken, macro: Macro) Error!void { +fn defineMacro(pp: *Preprocessor, define_tok: RawToken, name_tok: RawToken, macro: Macro) Error!void { const name_str = pp.tokSlice(name_tok); const gop = try pp.defines.getOrPut(pp.gpa, name_str); if (gop.found_existing and !gop.value_ptr.eql(macro, pp)) { @@ -2497,11 +2537,14 @@ fn defineMacro(pp: *Preprocessor, name_tok: RawToken, macro: Macro) Error!void { if (pp.verbose) { pp.verboseLog(name_tok, "macro {s} defined", .{name_str}); } + if (pp.store_macro_tokens) { + try pp.addToken(tokFromRaw(define_tok)); + } gop.value_ptr.* = macro; } /// Handle a #define directive. -fn define(pp: *Preprocessor, tokenizer: *Tokenizer) Error!void { +fn define(pp: *Preprocessor, tokenizer: *Tokenizer, define_tok: RawToken) Error!void { // Get macro name and validate it. const macro_name = tokenizer.nextNoWS(); if (macro_name.id == .keyword_defined) { @@ -2524,7 +2567,7 @@ fn define(pp: *Preprocessor, tokenizer: *Tokenizer) Error!void { // Check for function macros and empty defines. var first = tokenizer.next(); switch (first.id) { - .nl, .eof => return pp.defineMacro(macro_name, .{ + .nl, .eof => return pp.defineMacro(define_tok, macro_name, .{ .params = &.{}, .tokens = &.{}, .var_args = false, @@ -2532,7 +2575,7 @@ fn define(pp: *Preprocessor, tokenizer: *Tokenizer) Error!void { .is_func = false, }), .whitespace => first = tokenizer.next(), - .l_paren => return pp.defineFn(tokenizer, macro_name, first), + .l_paren => return pp.defineFn(tokenizer, define_tok, macro_name, first), else => try pp.err(first, .whitespace_after_macro_name), } if (first.id == .hash_hash) { @@ -2591,7 +2634,7 @@ fn define(pp: *Preprocessor, tokenizer: *Tokenizer) Error!void { } const list = try pp.arena.allocator().dupe(RawToken, pp.token_buf.items); - try pp.defineMacro(macro_name, .{ + try pp.defineMacro(define_tok, macro_name, .{ .loc = tokFromRaw(macro_name).loc, .tokens = list, .params = undefined, @@ -2601,7 +2644,7 @@ fn define(pp: *Preprocessor, tokenizer: *Tokenizer) Error!void { } /// Handle a function like #define directive. -fn defineFn(pp: *Preprocessor, tokenizer: *Tokenizer, macro_name: RawToken, l_paren: RawToken) Error!void { +fn defineFn(pp: *Preprocessor, tokenizer: *Tokenizer, define_tok: RawToken, macro_name: RawToken, l_paren: RawToken) Error!void { assert(macro_name.id.isMacroIdentifier()); var params = std.ArrayList([]const u8).init(pp.gpa); defer params.deinit(); @@ -2778,7 +2821,7 @@ fn defineFn(pp: *Preprocessor, tokenizer: *Tokenizer, macro_name: RawToken, l_pa const param_list = try pp.arena.allocator().dupe([]const u8, params.items); const token_list = try pp.arena.allocator().dupe(RawToken, pp.token_buf.items); - try pp.defineMacro(macro_name, .{ + try pp.defineMacro(define_tok, macro_name, .{ .is_func = true, .params = param_list, .var_args = var_args or gnu_var_args.len != 0, @@ -3241,8 +3284,78 @@ fn printLinemarker( // After how many empty lines are needed to replace them with linemarkers. const collapse_newlines = 8; +pub const DumpMode = enum { + /// Standard preprocessor output; no macros + result_only, + /// Output only #define directives for all the macros defined during the execution of the preprocessor + /// Only macros which are still defined at the end of preprocessing are printed. + /// Only the most recent definition is printed + /// Defines are printed in arbitrary order + macros_only, + /// Standard preprocessor output; but additionally output #define's and #undef's for macros as they are encountered + macros_and_result, + /// Same as macros_and_result, except only the macro name is printed for #define's + macro_names_and_result, +}; + +/// Pretty-print the macro define or undef at location `loc`. +/// We re-tokenize the directive because we are printing a macro that may have the same name as one in +/// `pp.defines` but a different definition (due to being #undef'ed and then redefined) +fn prettyPrintMacro(pp: *Preprocessor, w: anytype, loc: Source.Location, parts: enum { name_only, name_and_body }) !void { + const source = pp.comp.getSource(loc.id); + var tokenizer: Tokenizer = .{ + .buf = source.buf, + .langopts = pp.comp.langopts, + .source = source.id, + .index = loc.byte_offset, + }; + var prev_ws = false; // avoid printing multiple whitespace if /* */ comments are within the macro def + var saw_name = false; // do not print comments before the name token is seen. + while (true) { + const tok = tokenizer.next(); + switch (tok.id) { + .comment => { + if (saw_name) { + prev_ws = false; + try w.print("{s}", .{pp.tokSlice(tok)}); + } + }, + .nl, .eof => break, + .whitespace => { + if (!prev_ws) { + try w.writeByte(' '); + prev_ws = true; + } + }, + else => { + prev_ws = false; + try w.print("{s}", .{pp.tokSlice(tok)}); + }, + } + if (tok.id == .identifier or tok.id == .extended_identifier) { + if (parts == .name_only) break; + saw_name = true; + } + } +} + +fn prettyPrintMacrosOnly(pp: *Preprocessor, w: anytype) !void { + var it = pp.defines.valueIterator(); + while (it.next()) |macro| { + if (macro.is_builtin) continue; + + try w.writeAll("#define "); + try pp.prettyPrintMacro(w, macro.loc, .name_and_body); + try w.writeByte('\n'); + } +} + /// Pretty print tokens and try to preserve whitespace. -pub fn prettyPrintTokens(pp: *Preprocessor, w: anytype) !void { +pub fn prettyPrintTokens(pp: *Preprocessor, w: anytype, macro_dump_mode: DumpMode) !void { + if (macro_dump_mode == .macros_only) { + return pp.prettyPrintMacrosOnly(w); + } + const tok_ids = pp.tokens.items(.id); var i: u32 = 0; @@ -3334,6 +3447,17 @@ pub fn prettyPrintTokens(pp: *Preprocessor, w: anytype) !void { try pp.printLinemarker(w, line_col.line_no, source, .@"resume"); last_nl = true; }, + .keyword_define, .keyword_undef => { + switch (macro_dump_mode) { + .macros_and_result, .macro_names_and_result => { + try w.writeByte('#'); + try pp.prettyPrintMacro(w, cur.loc, if (macro_dump_mode == .macros_and_result) .name_and_body else .name_only); + last_nl = false; + }, + .result_only => unreachable, // `pp.store_macro_tokens` should be false for standard preprocessor output + .macros_only => unreachable, // handled by prettyPrintMacrosOnly + } + }, else => { const slice = pp.expandedSlice(cur); try w.writeAll(slice); @@ -3350,7 +3474,7 @@ test "Preserve pragma tokens sometimes" { var buf = std.ArrayList(u8).init(allocator); defer buf.deinit(); - var comp = Compilation.init(allocator); + var comp = Compilation.init(allocator, std.fs.cwd()); defer comp.deinit(); try comp.addDefaultPragmaHandlers(); @@ -3364,7 +3488,7 @@ test "Preserve pragma tokens sometimes" { const test_runner_macros = try comp.addSourceFromBuffer("", source_text); const eof = try pp.preprocess(test_runner_macros); try pp.addToken(eof); - try pp.prettyPrintTokens(buf.writer()); + try pp.prettyPrintTokens(buf.writer(), .result_only); return allocator.dupe(u8, buf.items); } @@ -3410,7 +3534,7 @@ test "destringify" { try std.testing.expectEqualStrings(destringified, pp.char_buf.items); } }; - var comp = Compilation.init(allocator); + var comp = Compilation.init(allocator, std.fs.cwd()); defer comp.deinit(); var pp = Preprocessor.init(&comp); defer pp.deinit(); @@ -3468,7 +3592,7 @@ test "Include guards" { } fn testIncludeGuard(allocator: std.mem.Allocator, comptime template: []const u8, tok_id: RawToken.Id, expected_guards: u32) !void { - var comp = Compilation.init(allocator); + var comp = Compilation.init(allocator, std.fs.cwd()); defer comp.deinit(); var pp = Preprocessor.init(&comp); defer pp.deinit(); diff --git a/lib/compiler/aro/aro/Source.zig b/lib/compiler/aro/aro/Source.zig index 06e58ecb1615..20788af21c3e 100644 --- a/lib/compiler/aro/aro/Source.zig +++ b/lib/compiler/aro/aro/Source.zig @@ -75,7 +75,17 @@ pub fn lineCol(source: Source, loc: Location) LineCol { i += 1; continue; }; - const cp = std.unicode.utf8Decode(source.buf[i..][0..len]) catch { + const slice = source.buf[i..]; + if (len > slice.len) { + break; + } + const cp = switch (len) { + 1 => slice[0], + 2 => std.unicode.utf8Decode2(slice[0..2].*), + 3 => std.unicode.utf8Decode3(slice[0..3].*), + 4 => std.unicode.utf8Decode4(slice[0..4].*), + else => unreachable, + } catch { i += 1; continue; }; diff --git a/lib/compiler/aro/aro/SymbolStack.zig b/lib/compiler/aro/aro/SymbolStack.zig index dba722344701..be2ee20cb03d 100644 --- a/lib/compiler/aro/aro/SymbolStack.zig +++ b/lib/compiler/aro/aro/SymbolStack.zig @@ -178,9 +178,11 @@ pub fn defineTypedef( if (s.get(name, .vars)) |prev| { switch (prev.kind) { .typedef => { - if (!ty.eql(prev.ty, p.comp, true)) { - try p.errStr(.redefinition_of_typedef, tok, try p.typePairStrExtra(ty, " vs ", prev.ty)); - if (prev.tok != 0) try p.errTok(.previous_definition, prev.tok); + if (!prev.ty.is(.invalid)) { + if (!ty.eql(prev.ty, p.comp, true)) { + try p.errStr(.redefinition_of_typedef, tok, try p.typePairStrExtra(ty, " vs ", prev.ty)); + if (prev.tok != 0) try p.errTok(.previous_definition, prev.tok); + } } }, .enumeration, .decl, .def, .constexpr => { @@ -194,7 +196,12 @@ pub fn defineTypedef( .kind = .typedef, .name = name, .tok = tok, - .ty = ty, + .ty = .{ + .name = name, + .specifier = ty.specifier, + .qual = ty.qual, + .data = ty.data, + }, .node = node, .val = .{}, }); diff --git a/lib/compiler/aro/aro/Tokenizer.zig b/lib/compiler/aro/aro/Tokenizer.zig index c5a84b8cc0b5..f703940fd8ea 100644 --- a/lib/compiler/aro/aro/Tokenizer.zig +++ b/lib/compiler/aro/aro/Tokenizer.zig @@ -178,6 +178,8 @@ pub const Token = struct { keyword_return, keyword_short, keyword_signed, + keyword_signed1, + keyword_signed2, keyword_sizeof, keyword_static, keyword_struct, @@ -258,7 +260,6 @@ pub const Token = struct { keyword_asm, keyword_asm1, keyword_asm2, - keyword_float80, /// _Float128 keyword_float128_1, /// __float128 @@ -369,6 +370,8 @@ pub const Token = struct { .keyword_return, .keyword_short, .keyword_signed, + .keyword_signed1, + .keyword_signed2, .keyword_sizeof, .keyword_static, .keyword_struct, @@ -417,7 +420,6 @@ pub const Token = struct { .keyword_asm, .keyword_asm1, .keyword_asm2, - .keyword_float80, .keyword_float128_1, .keyword_float128_2, .keyword_int128, @@ -627,6 +629,8 @@ pub const Token = struct { .keyword_return => "return", .keyword_short => "short", .keyword_signed => "signed", + .keyword_signed1 => "__signed", + .keyword_signed2 => "__signed__", .keyword_sizeof => "sizeof", .keyword_static => "static", .keyword_struct => "struct", @@ -702,7 +706,6 @@ pub const Token = struct { .keyword_asm => "asm", .keyword_asm1 => "__asm", .keyword_asm2 => "__asm__", - .keyword_float80 => "__float80", .keyword_float128_1 => "_Float128", .keyword_float128_2 => "__float128", .keyword_int128 => "__int128", @@ -732,7 +735,8 @@ pub const Token = struct { pub fn symbol(id: Id) []const u8 { return switch (id) { - .macro_string, .invalid => unreachable, + .macro_string => unreachable, + .invalid => "invalid bytes", .identifier, .extended_identifier, .macro_func, @@ -873,10 +877,7 @@ pub const Token = struct { } const all_kws = std.StaticStringMap(Id).initComptime(.{ - .{ "auto", auto: { - @setEvalBranchQuota(3000); - break :auto .keyword_auto; - } }, + .{ "auto", .keyword_auto }, .{ "break", .keyword_break }, .{ "case", .keyword_case }, .{ "char", .keyword_char }, @@ -898,6 +899,8 @@ pub const Token = struct { .{ "return", .keyword_return }, .{ "short", .keyword_short }, .{ "signed", .keyword_signed }, + .{ "__signed", .keyword_signed1 }, + .{ "__signed__", .keyword_signed2 }, .{ "sizeof", .keyword_sizeof }, .{ "static", .keyword_static }, .{ "struct", .keyword_struct }, @@ -982,7 +985,6 @@ pub const Token = struct { .{ "asm", .keyword_asm }, .{ "__asm", .keyword_asm1 }, .{ "__asm__", .keyword_asm2 }, - .{ "__float80", .keyword_float80 }, .{ "_Float128", .keyword_float128_1 }, .{ "__float128", .keyword_float128_2 }, .{ "__int128", .keyword_int128 }, @@ -1300,11 +1302,17 @@ pub fn next(self: *Tokenizer) Token { else => {}, }, .char_escape_sequence => switch (c) { - '\r', '\n' => unreachable, // removed by line splicing + '\r', '\n' => { + id = .unterminated_char_literal; + break; + }, else => state = .char_literal, }, .string_escape_sequence => switch (c) { - '\r', '\n' => unreachable, // removed by line splicing + '\r', '\n' => { + id = .unterminated_string_literal; + break; + }, else => state = .string_literal, }, .identifier, .extended_identifier => switch (c) { @@ -1792,7 +1800,7 @@ pub fn nextNoWSComments(self: *Tokenizer) Token { /// Try to tokenize a '::' even if not supported by the current language standard. pub fn colonColon(self: *Tokenizer) Token { var tok = self.nextNoWS(); - if (tok.id == .colon and self.buf[self.index] == ':') { + if (tok.id == .colon and self.index < self.buf.len and self.buf[self.index] == ':') { self.index += 1; tok.id = .colon_colon; } @@ -2142,8 +2150,30 @@ test "C23 keywords" { }, .c23); } +test "Tokenizer fuzz test" { + var comp = Compilation.init(std.testing.allocator, std.fs.cwd()); + defer comp.deinit(); + + const input_bytes = std.testing.fuzzInput(.{}); + if (input_bytes.len == 0) return; + + const source = try comp.addSourceFromBuffer("fuzz.c", input_bytes); + + var tokenizer: Tokenizer = .{ + .buf = source.buf, + .source = source.id, + .langopts = comp.langopts, + }; + while (true) { + const prev_index = tokenizer.index; + const tok = tokenizer.next(); + if (tok.id == .eof) break; + try std.testing.expect(prev_index < tokenizer.index); // ensure that the tokenizer always makes progress + } +} + fn expectTokensExtra(contents: []const u8, expected_tokens: []const Token.Id, standard: ?LangOpts.Standard) !void { - var comp = Compilation.init(std.testing.allocator); + var comp = Compilation.init(std.testing.allocator, std.fs.cwd()); defer comp.deinit(); if (standard) |provided| { comp.langopts.standard = provided; diff --git a/lib/compiler/aro/aro/Tree.zig b/lib/compiler/aro/aro/Tree.zig index e353beaebc62..a1b15bd66916 100644 --- a/lib/compiler/aro/aro/Tree.zig +++ b/lib/compiler/aro/aro/Tree.zig @@ -137,15 +137,22 @@ pub const Node = struct { tag: Tag, ty: Type = .{ .specifier = .void }, data: Data, + loc: Loc = .none, pub const Range = struct { start: u32, end: u32 }; + pub const Loc = enum(u32) { + none = std.math.maxInt(u32), + _, + }; + pub const Data = union { decl: struct { name: TokenIndex, node: NodeIndex = .none, }, decl_ref: TokenIndex, + two: [2]NodeIndex, range: Range, if3: struct { cond: NodeIndex, @@ -277,7 +284,8 @@ pub const Tag = enum(u8) { // ====== Decl ====== - // _Static_assert + /// _Static_assert + /// loc is token index of _Static_assert static_assert, // function prototype @@ -303,17 +311,18 @@ pub const Tag = enum(u8) { threadlocal_static_var, /// __asm__("...") at file scope + /// loc is token index of __asm__ keyword file_scope_asm, // typedef declaration typedef, // container declarations - /// { lhs; rhs; } + /// { two[0]; two[1]; } struct_decl_two, - /// { lhs; rhs; } + /// { two[0]; two[1]; } union_decl_two, - /// { lhs, rhs, } + /// { two[0], two[1], } enum_decl_two, /// { range } struct_decl, @@ -339,7 +348,7 @@ pub const Tag = enum(u8) { // ====== Stmt ====== labeled_stmt, - /// { first; second; } first and second may be null + /// { two[0]; two[1]; } first and second may be null compound_stmt_two, /// { data } compound_stmt, @@ -476,7 +485,7 @@ pub const Tag = enum(u8) { real_expr, /// lhs[rhs] lhs is pointer/array type, rhs is integer type array_access_expr, - /// first(second) second may be 0 + /// two[0](two[1]) two[1] may be 0 call_expr_one, /// data[0](data[1..]) call_expr, @@ -515,7 +524,7 @@ pub const Tag = enum(u8) { sizeof_expr, /// _Alignof(un?) alignof_expr, - /// _Generic(controlling lhs, chosen rhs) + /// _Generic(controlling two[0], chosen two[1]) generic_expr_one, /// _Generic(controlling range[0], chosen range[1], rest range[2..]) generic_expr, @@ -534,28 +543,34 @@ pub const Tag = enum(u8) { // ====== Initializer expressions ====== - /// { lhs, rhs } + /// { two[0], two[1] } array_init_expr_two, /// { range } array_init_expr, - /// { lhs, rhs } + /// { two[0], two[1] } struct_init_expr_two, /// { range } struct_init_expr, /// { union_init } union_init_expr, + /// (ty){ un } + /// loc is token index of l_paren compound_literal_expr, /// (static ty){ un } + /// loc is token index of l_paren static_compound_literal_expr, /// (thread_local ty){ un } + /// loc is token index of l_paren thread_local_compound_literal_expr, /// (static thread_local ty){ un } + /// loc is token index of l_paren static_thread_local_compound_literal_expr, /// Inserted at the end of a function body if no return stmt is found. /// ty is the functions return type /// data is return_zero which is true if the function is called "main" and ty is compatible with int + /// loc is token index of closing r_brace of function implicit_return, /// Inserted in array_init_expr to represent unspecified elements. @@ -608,6 +623,57 @@ pub fn bitfieldWidth(tree: *const Tree, node: NodeIndex, inspect_lval: bool) ?u3 } } +const CallableResultUsage = struct { + /// name token of the thing being called, for diagnostics + tok: TokenIndex, + /// true if `nodiscard` attribute present + nodiscard: bool, + /// true if `warn_unused_result` attribute present + warn_unused_result: bool, +}; + +pub fn callableResultUsage(tree: *const Tree, node: NodeIndex) ?CallableResultUsage { + const data = tree.nodes.items(.data); + + var cur_node = node; + while (true) switch (tree.nodes.items(.tag)[@intFromEnum(cur_node)]) { + .decl_ref_expr => { + const tok = data[@intFromEnum(cur_node)].decl_ref; + const fn_ty = tree.nodes.items(.ty)[@intFromEnum(node)].elemType(); + return .{ + .tok = tok, + .nodiscard = fn_ty.hasAttribute(.nodiscard), + .warn_unused_result = fn_ty.hasAttribute(.warn_unused_result), + }; + }, + .paren_expr => cur_node = data[@intFromEnum(cur_node)].un, + .comma_expr => cur_node = data[@intFromEnum(cur_node)].bin.rhs, + + .explicit_cast, .implicit_cast => cur_node = data[@intFromEnum(cur_node)].cast.operand, + .addr_of_expr, .deref_expr => cur_node = data[@intFromEnum(cur_node)].un, + .call_expr_one => cur_node = data[@intFromEnum(cur_node)].two[0], + .call_expr => cur_node = tree.data[data[@intFromEnum(cur_node)].range.start], + .member_access_expr, .member_access_ptr_expr => { + const member = data[@intFromEnum(cur_node)].member; + var ty = tree.nodes.items(.ty)[@intFromEnum(member.lhs)]; + if (ty.isPtr()) ty = ty.elemType(); + const record = ty.getRecord().?; + const field = record.fields[member.index]; + const attributes = if (record.field_attributes) |attrs| attrs[member.index] else &.{}; + return .{ + .tok = field.name_tok, + .nodiscard = for (attributes) |attr| { + if (attr.tag == .nodiscard) break true; + } else false, + .warn_unused_result = for (attributes) |attr| { + if (attr.tag == .warn_unused_result) break true; + } else false, + }; + }, + else => return null, + }; +} + pub fn isLval(tree: *const Tree, node: NodeIndex) bool { var is_const: bool = undefined; return tree.isLvalExtra(node, &is_const); @@ -672,17 +738,66 @@ pub fn isLvalExtra(tree: *const Tree, node: NodeIndex, is_const: *bool) bool { } } +/// This should only be used for node tags that represent AST nodes which have an arbitrary number of children +/// It particular it should *not* be used for nodes with .un or .bin data types +/// +/// For call expressions, child_nodes[0] is the function pointer being called and child_nodes[1..] +/// are the arguments +/// +/// For generic selection expressions, child_nodes[0] is the controlling expression, +/// child_nodes[1] is the chosen expression (it is a syntax error for there to be no chosen expression), +/// and child_nodes[2..] are the remaining expressions. +pub fn childNodes(tree: *const Tree, node: NodeIndex) []const NodeIndex { + const tags = tree.nodes.items(.tag); + const data = tree.nodes.items(.data); + switch (tags[@intFromEnum(node)]) { + .compound_stmt_two, + .array_init_expr_two, + .struct_init_expr_two, + .enum_decl_two, + .struct_decl_two, + .union_decl_two, + .call_expr_one, + .generic_expr_one, + => { + const index: u32 = @intFromEnum(node); + const end = std.mem.indexOfScalar(NodeIndex, &data[index].two, .none) orelse 2; + return data[index].two[0..end]; + }, + .compound_stmt, + .array_init_expr, + .struct_init_expr, + .enum_decl, + .struct_decl, + .union_decl, + .call_expr, + .generic_expr, + => { + const range = data[@intFromEnum(node)].range; + return tree.data[range.start..range.end]; + }, + else => unreachable, + } +} + pub fn tokSlice(tree: *const Tree, tok_i: TokenIndex) []const u8 { if (tree.tokens.items(.id)[tok_i].lexeme()) |some| return some; const loc = tree.tokens.items(.loc)[tok_i]; - var tmp_tokenizer = Tokenizer{ - .buf = tree.comp.getSource(loc.id).buf, - .langopts = tree.comp.langopts, - .index = loc.byte_offset, - .source = .generated, + return tree.comp.locSlice(loc); +} + +pub fn nodeTok(tree: *const Tree, node: NodeIndex) ?TokenIndex { + std.debug.assert(node != .none); + const loc = tree.nodes.items(.loc)[@intFromEnum(node)]; + return switch (loc) { + .none => null, + else => |tok_i| @intFromEnum(tok_i), }; - const tok = tmp_tokenizer.next(); - return tmp_tokenizer.buf[tok.start..tok.end]; +} + +pub fn nodeLoc(tree: *const Tree, node: NodeIndex) ?Source.Location { + const tok_i = tree.nodeTok(node) orelse return null; + return tree.tokens.items(.loc)[@intFromEnum(tok_i)]; } pub fn dump(tree: *const Tree, config: std.io.tty.Config, writer: anytype) !void { @@ -766,6 +881,10 @@ fn dumpNode( } try config.setColor(w, TYPE); try w.writeByte('\''); + const name = ty.getName(); + if (name != .empty) { + try w.print("{s}': '", .{mapper.lookup(name)}); + } try ty.dump(mapper, tree.comp.langopts, w); try w.writeByte('\''); @@ -794,7 +913,9 @@ fn dumpNode( if (ty.specifier == .attributed) { try config.setColor(w, ATTRIBUTE); - for (ty.data.attributed.attributes) |attr| { + var it = Attribute.Iterator.initType(ty); + while (it.next()) |item| { + const attr, _ = item; try w.writeByteNTimes(' ', level + half); try w.print("attr: {s}", .{@tagName(attr.tag)}); try tree.dumpAttribute(attr, w); @@ -900,9 +1021,16 @@ fn dumpNode( .enum_decl, .struct_decl, .union_decl, + .compound_stmt_two, + .array_init_expr_two, + .struct_init_expr_two, + .enum_decl_two, + .struct_decl_two, + .union_decl_two, => { + const child_nodes = tree.childNodes(node); const maybe_field_attributes = if (ty.getRecord()) |record| record.field_attributes else null; - for (tree.data[data.range.start..data.range.end], 0..) |stmt, i| { + for (child_nodes, 0..) |stmt, i| { if (i != 0) try w.writeByte('\n'); try tree.dumpNode(stmt, level + delta, mapper, config, w); if (maybe_field_attributes) |field_attributes| { @@ -914,33 +1042,6 @@ fn dumpNode( } } }, - .compound_stmt_two, - .array_init_expr_two, - .struct_init_expr_two, - .enum_decl_two, - .struct_decl_two, - .union_decl_two, - => { - var attr_array = [2][]const Attribute{ &.{}, &.{} }; - const empty: [][]const Attribute = &attr_array; - const field_attributes = if (ty.getRecord()) |record| (record.field_attributes orelse empty.ptr) else empty.ptr; - if (data.bin.lhs != .none) { - try tree.dumpNode(data.bin.lhs, level + delta, mapper, config, w); - if (field_attributes[0].len > 0) { - try config.setColor(w, ATTRIBUTE); - try tree.dumpFieldAttributes(field_attributes[0], level + delta + half, w); - try config.setColor(w, .reset); - } - } - if (data.bin.rhs != .none) { - try tree.dumpNode(data.bin.rhs, level + delta, mapper, config, w); - if (field_attributes[1].len > 0) { - try config.setColor(w, ATTRIBUTE); - try tree.dumpFieldAttributes(field_attributes[1], level + delta + half, w); - try config.setColor(w, .reset); - } - } - }, .union_init_expr => { try w.writeByteNTimes(' ', level + half); try w.writeAll("field index: "); @@ -1130,23 +1231,21 @@ fn dumpNode( try tree.dumpNode(data.un, level + delta, mapper, config, w); } }, - .call_expr => { - try w.writeByteNTimes(' ', level + half); - try w.writeAll("lhs:\n"); - try tree.dumpNode(tree.data[data.range.start], level + delta, mapper, config, w); + .call_expr, .call_expr_one => { + const child_nodes = tree.childNodes(node); + const fn_ptr = child_nodes[0]; + const args = child_nodes[1..]; - try w.writeByteNTimes(' ', level + half); - try w.writeAll("args:\n"); - for (tree.data[data.range.start + 1 .. data.range.end]) |arg| try tree.dumpNode(arg, level + delta, mapper, config, w); - }, - .call_expr_one => { try w.writeByteNTimes(' ', level + half); try w.writeAll("lhs:\n"); - try tree.dumpNode(data.bin.lhs, level + delta, mapper, config, w); - if (data.bin.rhs != .none) { + try tree.dumpNode(fn_ptr, level + delta, mapper, config, w); + + if (args.len > 0) { try w.writeByteNTimes(' ', level + half); - try w.writeAll("arg:\n"); - try tree.dumpNode(data.bin.rhs, level + delta, mapper, config, w); + try w.writeAll("args:\n"); + for (args) |arg| { + try tree.dumpNode(arg, level + delta, mapper, config, w); + } } }, .builtin_call_expr => { @@ -1295,28 +1394,25 @@ fn dumpNode( try tree.dumpNode(data.un, level + delta, mapper, config, w); } }, - .generic_expr_one => { - try w.writeByteNTimes(' ', level + 1); - try w.writeAll("controlling:\n"); - try tree.dumpNode(data.bin.lhs, level + delta, mapper, config, w); - try w.writeByteNTimes(' ', level + 1); - if (data.bin.rhs != .none) { - try w.writeAll("chosen:\n"); - try tree.dumpNode(data.bin.rhs, level + delta, mapper, config, w); - } - }, - .generic_expr => { - const nodes = tree.data[data.range.start..data.range.end]; + .generic_expr, .generic_expr_one => { + const child_nodes = tree.childNodes(node); + const controlling = child_nodes[0]; + const chosen = child_nodes[1]; + const rest = child_nodes[2..]; + try w.writeByteNTimes(' ', level + 1); try w.writeAll("controlling:\n"); - try tree.dumpNode(nodes[0], level + delta, mapper, config, w); + try tree.dumpNode(controlling, level + delta, mapper, config, w); try w.writeByteNTimes(' ', level + 1); try w.writeAll("chosen:\n"); - try tree.dumpNode(nodes[1], level + delta, mapper, config, w); - try w.writeByteNTimes(' ', level + 1); - try w.writeAll("rest:\n"); - for (nodes[2..]) |expr| { - try tree.dumpNode(expr, level + delta, mapper, config, w); + try tree.dumpNode(chosen, level + delta, mapper, config, w); + + if (rest.len > 0) { + try w.writeByteNTimes(' ', level + 1); + try w.writeAll("rest:\n"); + for (rest) |expr| { + try tree.dumpNode(expr, level + delta, mapper, config, w); + } } }, .generic_association_expr, .generic_default_expr, .stmt_expr, .imaginary_literal => { diff --git a/lib/compiler/aro/aro/Tree/number_affixes.zig b/lib/compiler/aro/aro/Tree/number_affixes.zig index 7f01e9f2e7ef..38ef6b8a5667 100644 --- a/lib/compiler/aro/aro/Tree/number_affixes.zig +++ b/lib/compiler/aro/aro/Tree/number_affixes.zig @@ -74,8 +74,8 @@ pub const Suffix = enum { // float and imaginary float F, IF, - // _Float16 - F16, + // _Float16 and imaginary _Float16 + F16, IF16, // __float80 W, @@ -129,6 +129,7 @@ pub const Suffix = enum { .{ .I, &.{"I"} }, .{ .IL, &.{ "I", "L" } }, + .{ .IF16, &.{ "I", "F16" } }, .{ .IF, &.{ "I", "F" } }, .{ .IW, &.{ "I", "W" } }, .{ .IF128, &.{ "I", "F128" } }, @@ -161,7 +162,7 @@ pub const Suffix = enum { pub fn isImaginary(suffix: Suffix) bool { return switch (suffix) { - .I, .IL, .IF, .IU, .IUL, .ILL, .IULL, .IWB, .IUWB, .IF128, .IQ, .IW => true, + .I, .IL, .IF, .IU, .IUL, .ILL, .IULL, .IWB, .IUWB, .IF128, .IQ, .IW, .IF16 => true, .None, .L, .F16, .F, .U, .UL, .LL, .ULL, .WB, .UWB, .F128, .Q, .W => false, }; } @@ -170,7 +171,7 @@ pub const Suffix = enum { return switch (suffix) { .None, .L, .LL, .I, .IL, .ILL, .WB, .IWB => true, .U, .UL, .ULL, .IU, .IUL, .IULL, .UWB, .IUWB => false, - .F, .IF, .F16, .F128, .IF128, .Q, .IQ, .W, .IW => unreachable, + .F, .IF, .F16, .F128, .IF128, .Q, .IQ, .W, .IW, .IF16 => unreachable, }; } @@ -184,4 +185,8 @@ pub const Suffix = enum { else => false, }; } + + pub fn isFloat80(suffix: Suffix) bool { + return suffix == .W or suffix == .IW; + } }; diff --git a/lib/compiler/aro/aro/Type.zig b/lib/compiler/aro/aro/Type.zig index 13fa8ce2e204..8ab2d3164a77 100644 --- a/lib/compiler/aro/aro/Type.zig +++ b/lib/compiler/aro/aro/Type.zig @@ -146,17 +146,14 @@ pub const Attributed = struct { attributes: []Attribute, base: Type, - pub fn create(allocator: std.mem.Allocator, base: Type, existing_attributes: []const Attribute, attributes: []const Attribute) !*Attributed { + pub fn create(allocator: std.mem.Allocator, base_ty: Type, attributes: []const Attribute) !*Attributed { const attributed_type = try allocator.create(Attributed); errdefer allocator.destroy(attributed_type); - - const all_attrs = try allocator.alloc(Attribute, existing_attributes.len + attributes.len); - @memcpy(all_attrs[0..existing_attributes.len], existing_attributes); - @memcpy(all_attrs[existing_attributes.len..], attributes); + const duped = try allocator.dupe(Attribute, attributes); attributed_type.* = .{ - .attributes = all_attrs, - .base = base, + .attributes = duped, + .base = base_ty, }; return attributed_type; } @@ -190,13 +187,10 @@ pub const Enum = struct { } }; -// might not need all 4 of these when finished, -// but currently it helps having all 4 when diff-ing -// the rust code. pub const TypeLayout = struct { /// The size of the type in bits. /// - /// This is the value returned by `sizeof` and C and `std::mem::size_of` in Rust + /// This is the value returned by `sizeof` in C /// (but in bits instead of bytes). This is a multiple of `pointer_alignment_bits`. size_bits: u64, /// The alignment of the type, in bits, when used as a field in a record. @@ -205,9 +199,7 @@ pub const TypeLayout = struct { /// cases in GCC where `_Alignof` returns a smaller value. field_alignment_bits: u32, /// The alignment, in bits, of valid pointers to this type. - /// - /// This is the value returned by `std::mem::align_of` in Rust - /// (but in bits instead of bytes). `size_bits` is a multiple of this value. + /// `size_bits` is a multiple of this value. pointer_alignment_bits: u32, /// The required alignment of the type in bits. /// @@ -301,6 +293,15 @@ pub const Record = struct { } return false; } + + pub fn hasField(self: *const Record, name: StringId) bool { + std.debug.assert(!self.isIncomplete()); + for (self.fields) |f| { + if (f.isAnonymousRecord() and f.ty.getRecord().?.hasField(name)) return true; + if (name == f.name) return true; + } + return false; + } }; pub const Specifier = enum { @@ -354,12 +355,11 @@ pub const Specifier = enum { float, double, long_double, - float80, float128, + complex_float16, complex_float, complex_double, complex_long_double, - complex_float80, complex_float128, // data.sub_type @@ -422,6 +422,8 @@ data: union { specifier: Specifier, qual: Qualifiers = .{}, decayed: bool = false, +/// typedef name, if any +name: StringId = .empty, pub const int = Type{ .specifier = .int }; pub const invalid = Type{ .specifier = .invalid }; @@ -435,8 +437,8 @@ pub fn is(ty: Type, specifier: Specifier) bool { pub fn withAttributes(self: Type, allocator: std.mem.Allocator, attributes: []const Attribute) !Type { if (attributes.len == 0) return self; - const attributed_type = try Type.Attributed.create(allocator, self, self.getAttributes(), attributes); - return Type{ .specifier = .attributed, .data = .{ .attributed = attributed_type }, .decayed = self.decayed }; + const attributed_type = try Type.Attributed.create(allocator, self, attributes); + return .{ .specifier = .attributed, .data = .{ .attributed = attributed_type }, .decayed = self.decayed }; } pub fn isCallable(ty: Type) ?Type { @@ -470,6 +472,23 @@ pub fn isArray(ty: Type) bool { }; } +/// Must only be used to set the length of an incomplete array as determined by its initializer +pub fn setIncompleteArrayLen(ty: *Type, len: u64) void { + switch (ty.specifier) { + .incomplete_array => { + // Modifying .data is exceptionally allowed for .incomplete_array. + ty.data.array.len = len; + ty.specifier = .array; + }, + + .typeof_type => ty.data.sub_type.setIncompleteArrayLen(len), + .typeof_expr => ty.data.expr.ty.setIncompleteArrayLen(len), + .attributed => ty.data.attributed.base.setIncompleteArrayLen(len), + + else => unreachable, + } +} + /// Whether the type is promoted if used as a variadic argument or as an argument to a function with no prototype fn undergoesDefaultArgPromotion(ty: Type, comp: *const Compilation) bool { return switch (ty.specifier) { @@ -536,7 +555,7 @@ pub fn isFloat(ty: Type) bool { return switch (ty.specifier) { // zig fmt: off .float, .double, .long_double, .complex_float, .complex_double, .complex_long_double, - .fp16, .float16, .float80, .float128, .complex_float80, .complex_float128 => true, + .fp16, .float16, .float128, .complex_float128, .complex_float16 => true, // zig fmt: on .typeof_type => ty.data.sub_type.isFloat(), .typeof_expr => ty.data.expr.ty.isFloat(), @@ -548,11 +567,11 @@ pub fn isFloat(ty: Type) bool { pub fn isReal(ty: Type) bool { return switch (ty.specifier) { // zig fmt: off - .complex_float, .complex_double, .complex_long_double, .complex_float80, + .complex_float, .complex_double, .complex_long_double, .complex_float128, .complex_char, .complex_schar, .complex_uchar, .complex_short, .complex_ushort, .complex_int, .complex_uint, .complex_long, .complex_ulong, .complex_long_long, .complex_ulong_long, .complex_int128, .complex_uint128, - .complex_bit_int => false, + .complex_bit_int, .complex_float16 => false, // zig fmt: on .typeof_type => ty.data.sub_type.isReal(), .typeof_expr => ty.data.expr.ty.isReal(), @@ -564,11 +583,11 @@ pub fn isReal(ty: Type) bool { pub fn isComplex(ty: Type) bool { return switch (ty.specifier) { // zig fmt: off - .complex_float, .complex_double, .complex_long_double, .complex_float80, + .complex_float, .complex_double, .complex_long_double, .complex_float128, .complex_char, .complex_schar, .complex_uchar, .complex_short, .complex_ushort, .complex_int, .complex_uint, .complex_long, .complex_ulong, .complex_long_long, .complex_ulong_long, .complex_int128, .complex_uint128, - .complex_bit_int => true, + .complex_bit_int, .complex_float16 => true, // zig fmt: on .typeof_type => ty.data.sub_type.isComplex(), .typeof_expr => ty.data.expr.ty.isComplex(), @@ -671,11 +690,11 @@ pub fn elemType(ty: Type) Type { .attributed => ty.data.attributed.base.elemType(), .invalid => Type.invalid, // zig fmt: off - .complex_float, .complex_double, .complex_long_double, .complex_float80, + .complex_float, .complex_double, .complex_long_double, .complex_float128, .complex_char, .complex_schar, .complex_uchar, .complex_short, .complex_ushort, .complex_int, .complex_uint, .complex_long, .complex_ulong, .complex_long_long, .complex_ulong_long, .complex_int128, .complex_uint128, - .complex_bit_int => ty.makeReal(), + .complex_bit_int, .complex_float16 => ty.makeReal(), // zig fmt: on else => unreachable, }; @@ -703,6 +722,16 @@ pub fn params(ty: Type) []Func.Param { }; } +/// Returns true if the return value or any param of `ty` is `.invalid` +/// Asserts that ty is a function type +pub fn isInvalidFunc(ty: Type) bool { + if (ty.returnType().is(.invalid)) return true; + for (ty.params()) |param| { + if (param.ty.is(.invalid)) return true; + } + return false; +} + pub fn arrayLen(ty: Type) ?u64 { return switch (ty.specifier) { .array, .static_array => ty.data.array.len, @@ -726,15 +755,6 @@ pub fn anyQual(ty: Type) bool { }; } -pub fn getAttributes(ty: Type) []const Attribute { - return switch (ty.specifier) { - .attributed => ty.data.attributed.attributes, - .typeof_type => ty.data.sub_type.getAttributes(), - .typeof_expr => ty.data.expr.ty.getAttributes(), - else => &.{}, - }; -} - pub fn getRecord(ty: Type) ?*const Type.Record { return switch (ty.specifier) { .attributed => ty.data.attributed.base.getRecord(), @@ -795,8 +815,8 @@ fn realIntegerConversion(a: Type, b: Type, comp: *const Compilation) Type { pub fn makeIntegerUnsigned(ty: Type) Type { // TODO discards attributed/typeof - var base = ty.canonicalize(.standard); - switch (base.specifier) { + var base_ty = ty.canonicalize(.standard); + switch (base_ty.specifier) { // zig fmt: off .uchar, .ushort, .uint, .ulong, .ulong_long, .uint128, .complex_uchar, .complex_ushort, .complex_uint, .complex_ulong, .complex_ulong_long, .complex_uint128, @@ -804,21 +824,21 @@ pub fn makeIntegerUnsigned(ty: Type) Type { // zig fmt: on .char, .complex_char => { - base.specifier = @enumFromInt(@intFromEnum(base.specifier) + 2); - return base; + base_ty.specifier = @enumFromInt(@intFromEnum(base_ty.specifier) + 2); + return base_ty; }, // zig fmt: off .schar, .short, .int, .long, .long_long, .int128, .complex_schar, .complex_short, .complex_int, .complex_long, .complex_long_long, .complex_int128 => { - base.specifier = @enumFromInt(@intFromEnum(base.specifier) + 1); - return base; + base_ty.specifier = @enumFromInt(@intFromEnum(base_ty.specifier) + 1); + return base_ty; }, // zig fmt: on .bit_int, .complex_bit_int => { - base.data.int.signedness = .unsigned; - return base; + base_ty.data.int.signedness = .unsigned; + return base_ty; }, else => unreachable, } @@ -837,6 +857,8 @@ pub fn integerPromotion(ty: Type, comp: *Compilation) Type { switch (specifier) { .@"enum" => { if (ty.hasIncompleteSize()) return .{ .specifier = .int }; + if (ty.data.@"enum".fixed) return ty.data.@"enum".tag_ty.integerPromotion(comp); + specifier = ty.data.@"enum".tag_ty.specifier; }, .bit_int, .complex_bit_int => return .{ .specifier = specifier, .data = ty.data }, @@ -915,53 +937,7 @@ pub fn hasUnboundVLA(ty: Type) bool { } pub fn hasField(ty: Type, name: StringId) bool { - switch (ty.specifier) { - .@"struct" => { - std.debug.assert(!ty.data.record.isIncomplete()); - for (ty.data.record.fields) |f| { - if (f.isAnonymousRecord() and f.ty.hasField(name)) return true; - if (name == f.name) return true; - } - }, - .@"union" => { - std.debug.assert(!ty.data.record.isIncomplete()); - for (ty.data.record.fields) |f| { - if (f.isAnonymousRecord() and f.ty.hasField(name)) return true; - if (name == f.name) return true; - } - }, - .typeof_type => return ty.data.sub_type.hasField(name), - .typeof_expr => return ty.data.expr.ty.hasField(name), - .attributed => return ty.data.attributed.base.hasField(name), - .invalid => return false, - else => unreachable, - } - return false; -} - -// TODO handle bitints -pub fn minInt(ty: Type, comp: *const Compilation) i64 { - std.debug.assert(ty.isInt()); - if (ty.isUnsignedInt(comp)) return 0; - return switch (ty.sizeof(comp).?) { - 1 => std.math.minInt(i8), - 2 => std.math.minInt(i16), - 4 => std.math.minInt(i32), - 8 => std.math.minInt(i64), - else => unreachable, - }; -} - -// TODO handle bitints -pub fn maxInt(ty: Type, comp: *const Compilation) u64 { - std.debug.assert(ty.isInt()); - return switch (ty.sizeof(comp).?) { - 1 => if (ty.isUnsignedInt(comp)) @as(u64, std.math.maxInt(u8)) else std.math.maxInt(i8), - 2 => if (ty.isUnsignedInt(comp)) @as(u64, std.math.maxInt(u16)) else std.math.maxInt(i16), - 4 => if (ty.isUnsignedInt(comp)) @as(u64, std.math.maxInt(u32)) else std.math.maxInt(i32), - 8 => if (ty.isUnsignedInt(comp)) @as(u64, std.math.maxInt(u64)) else std.math.maxInt(i64), - else => unreachable, - }; + return ty.getRecord().?.hasField(name); } const TypeSizeOrder = enum { @@ -1004,16 +980,15 @@ pub fn sizeof(ty: Type, comp: *const Compilation) ?u64 { .fp16, .float16 => 2, .float => comp.target.cTypeByteSize(.float), .double => comp.target.cTypeByteSize(.double), - .float80 => 16, .float128 => 16, .bit_int => { - return std.mem.alignForward(u64, (ty.data.int.bits + 7) / 8, ty.alignof(comp)); + return std.mem.alignForward(u64, (@as(u32, ty.data.int.bits) + 7) / 8, ty.alignof(comp)); }, // zig fmt: off .complex_char, .complex_schar, .complex_uchar, .complex_short, .complex_ushort, .complex_int, .complex_uint, .complex_long, .complex_ulong, .complex_long_long, .complex_ulong_long, .complex_int128, .complex_uint128, .complex_float, .complex_double, - .complex_long_double, .complex_float80, .complex_float128, .complex_bit_int, + .complex_long_double, .complex_float128, .complex_bit_int, .complex_float16, => return 2 * ty.makeReal().sizeof(comp).?, // zig fmt: on .pointer => unreachable, @@ -1050,7 +1025,6 @@ pub fn bitSizeof(ty: Type, comp: *const Compilation) ?u64 { .attributed => ty.data.attributed.base.bitSizeof(comp), .bit_int => return ty.data.int.bits, .long_double => comp.target.cTypeBitSize(.longdouble), - .float80 => return 80, else => 8 * (ty.sizeof(comp) orelse return null), }; } @@ -1100,7 +1074,7 @@ pub fn alignof(ty: Type, comp: *const Compilation) u29 { .complex_char, .complex_schar, .complex_uchar, .complex_short, .complex_ushort, .complex_int, .complex_uint, .complex_long, .complex_ulong, .complex_long_long, .complex_ulong_long, .complex_int128, .complex_uint128, .complex_float, .complex_double, - .complex_long_double, .complex_float80, .complex_float128, .complex_bit_int, + .complex_long_double, .complex_float128, .complex_bit_int, .complex_float16, => return ty.makeReal().alignof(comp), // zig fmt: on @@ -1114,10 +1088,15 @@ pub fn alignof(ty: Type, comp: *const Compilation) u29 { .long_long => comp.target.cTypeAlignment(.longlong), .ulong_long => comp.target.cTypeAlignment(.ulonglong), - .bit_int => @min( - std.math.ceilPowerOfTwoPromote(u16, (ty.data.int.bits + 7) / 8), - 16, // comp.target.maxIntAlignment(), please use your own logic for this value as it is implementation-defined - ), + .bit_int => { + // https://www.open-std.org/jtc1/sc22/wg14/www/docs/n2709.pdf + // _BitInt(N) types align with existing calling conventions. They have the same size and alignment as the + // smallest basic type that can contain them. Types that are larger than __int64_t are conceptually treated + // as struct of register size chunks. The number of chunks is the smallest number that can contain the type. + if (ty.data.int.bits > 64) return 8; + const basic_type = comp.intLeastN(ty.data.int.bits, ty.data.int.signedness); + return basic_type.alignof(comp); + }, .float => comp.target.cTypeAlignment(.float), .double => comp.target.cTypeAlignment(.double), @@ -1126,7 +1105,7 @@ pub fn alignof(ty: Type, comp: *const Compilation) u29 { .int128, .uint128 => if (comp.target.cpu.arch == .s390x and comp.target.os.tag == .linux and comp.target.isGnu()) 8 else 16, .fp16, .float16 => 2, - .float80, .float128 => 16, + .float128 => 16, .pointer, .static_array, .nullptr_t, @@ -1142,7 +1121,11 @@ pub fn alignof(ty: Type, comp: *const Compilation) u29 { }; } -pub const QualHandling = enum { standard, preserve_quals }; +// This enum should be kept public because it is used by the downstream zig translate-c +pub const QualHandling = enum { + standard, + preserve_quals, +}; /// Canonicalize a possibly-typeof() type. If the type is not a typeof() type, simply /// return it. Otherwise, determine the actual qualified type. @@ -1151,17 +1134,12 @@ pub const QualHandling = enum { standard, preserve_quals }; /// arrays and pointers. pub fn canonicalize(ty: Type, qual_handling: QualHandling) Type { var cur = ty; - if (cur.specifier == .attributed) { - cur = cur.data.attributed.base; - cur.decayed = ty.decayed; - } - if (!cur.isTypeof()) return cur; - var qual = cur.qual; while (true) { switch (cur.specifier) { .typeof_type => cur = cur.data.sub_type.*, .typeof_expr => cur = cur.data.expr.ty, + .attributed => cur = cur.data.attributed.base, else => break, } qual = qual.mergeAll(cur.qual); @@ -1189,7 +1167,7 @@ pub fn requestedAlignment(ty: Type, comp: *const Compilation) ?u29 { return switch (ty.specifier) { .typeof_type => ty.data.sub_type.requestedAlignment(comp), .typeof_expr => ty.data.expr.ty.requestedAlignment(comp), - .attributed => annotationAlignment(comp, ty.data.attributed.attributes), + .attributed => annotationAlignment(comp, Attribute.Iterator.initType(ty)), else => null, }; } @@ -1199,12 +1177,27 @@ pub fn enumIsPacked(ty: Type, comp: *const Compilation) bool { return comp.langopts.short_enums or target_util.packAllEnums(comp.target) or ty.hasAttribute(.@"packed"); } -pub fn annotationAlignment(comp: *const Compilation, attrs: ?[]const Attribute) ?u29 { - const a = attrs orelse return null; +pub fn getName(ty: Type) StringId { + return switch (ty.specifier) { + .typeof_type => if (ty.name == .empty) ty.data.sub_type.getName() else ty.name, + .typeof_expr => if (ty.name == .empty) ty.data.expr.ty.getName() else ty.name, + .attributed => if (ty.name == .empty) ty.data.attributed.base.getName() else ty.name, + else => ty.name, + }; +} +pub fn annotationAlignment(comp: *const Compilation, attrs: Attribute.Iterator) ?u29 { + var it = attrs; var max_requested: ?u29 = null; - for (a) |attribute| { + var last_aligned_index: ?usize = null; + while (it.next()) |item| { + const attribute, const index = item; if (attribute.tag != .aligned) continue; + if (last_aligned_index) |aligned_index| { + // once we recurse into a new type, after an `aligned` attribute was found, we're done + if (index <= aligned_index) break; + } + last_aligned_index = index; const requested = if (attribute.args.aligned.alignment) |alignment| alignment.requested else target_util.defaultAlignment(comp.target); if (max_requested == null or max_requested.? < requested) { max_requested = requested; @@ -1225,6 +1218,10 @@ pub fn eql(a_param: Type, b_param: Type, comp: *const Compilation, check_qualifi if (!b.isFunc()) return false; } else if (a.isArray()) { if (!b.isArray()) return false; + } else if (a.specifier == .@"enum" and b.specifier != .@"enum") { + return a.data.@"enum".tag_ty.eql(b, comp, check_qualifiers); + } else if (b.specifier == .@"enum" and a.specifier != .@"enum") { + return a.eql(b.data.@"enum".tag_ty, comp, check_qualifiers); } else if (a.specifier != b.specifier) return false; if (a.qual.atomic != b.qual.atomic) return false; @@ -1315,6 +1312,12 @@ pub fn integerRank(ty: Type, comp: *const Compilation) usize { .long_long, .ulong_long => 6 + (ty.bitSizeof(comp).? << 3), .int128, .uint128 => 7 + (ty.bitSizeof(comp).? << 3), + .typeof_type => ty.data.sub_type.integerRank(comp), + .typeof_expr => ty.data.expr.ty.integerRank(comp), + .attributed => ty.data.attributed.base.integerRank(comp), + + .@"enum" => real.data.@"enum".tag_ty.integerRank(comp), + else => unreachable, }); } @@ -1322,25 +1325,26 @@ pub fn integerRank(ty: Type, comp: *const Compilation) usize { /// Returns true if `a` and `b` are integer types that differ only in sign pub fn sameRankDifferentSign(a: Type, b: Type, comp: *const Compilation) bool { if (!a.isInt() or !b.isInt()) return false; + if (a.hasIncompleteSize() or b.hasIncompleteSize()) return false; if (a.integerRank(comp) != b.integerRank(comp)) return false; return a.isUnsignedInt(comp) != b.isUnsignedInt(comp); } pub fn makeReal(ty: Type) Type { // TODO discards attributed/typeof - var base = ty.canonicalize(.standard); - switch (base.specifier) { - .complex_float, .complex_double, .complex_long_double, .complex_float80, .complex_float128 => { - base.specifier = @enumFromInt(@intFromEnum(base.specifier) - 5); - return base; + var base_ty = ty.canonicalize(.standard); + switch (base_ty.specifier) { + .complex_float16, .complex_float, .complex_double, .complex_long_double, .complex_float128 => { + base_ty.specifier = @enumFromInt(@intFromEnum(base_ty.specifier) - 5); + return base_ty; }, .complex_char, .complex_schar, .complex_uchar, .complex_short, .complex_ushort, .complex_int, .complex_uint, .complex_long, .complex_ulong, .complex_long_long, .complex_ulong_long, .complex_int128, .complex_uint128 => { - base.specifier = @enumFromInt(@intFromEnum(base.specifier) - 13); - return base; + base_ty.specifier = @enumFromInt(@intFromEnum(base_ty.specifier) - 13); + return base_ty; }, .complex_bit_int => { - base.specifier = .bit_int; - return base; + base_ty.specifier = .bit_int; + return base_ty; }, else => return ty, } @@ -1348,19 +1352,19 @@ pub fn makeReal(ty: Type) Type { pub fn makeComplex(ty: Type) Type { // TODO discards attributed/typeof - var base = ty.canonicalize(.standard); - switch (base.specifier) { - .float, .double, .long_double, .float80, .float128 => { - base.specifier = @enumFromInt(@intFromEnum(base.specifier) + 5); - return base; + var base_ty = ty.canonicalize(.standard); + switch (base_ty.specifier) { + .float, .double, .long_double, .float128 => { + base_ty.specifier = @enumFromInt(@intFromEnum(base_ty.specifier) + 5); + return base_ty; }, .char, .schar, .uchar, .short, .ushort, .int, .uint, .long, .ulong, .long_long, .ulong_long, .int128, .uint128 => { - base.specifier = @enumFromInt(@intFromEnum(base.specifier) + 13); - return base; + base_ty.specifier = @enumFromInt(@intFromEnum(base_ty.specifier) + 13); + return base_ty; }, .bit_int => { - base.specifier = .complex_bit_int; - return base; + base_ty.specifier = .complex_bit_int; + return base_ty; }, else => return ty, } @@ -1541,13 +1545,12 @@ pub const Builder = struct { float, double, long_double, - float80, float128, complex, + complex_float16, complex_float, complex_double, complex_long_double, - complex_float80, complex_float128, pointer: *Type, @@ -1613,9 +1616,6 @@ pub const Builder = struct { .int128 => "__int128", .sint128 => "signed __int128", .uint128 => "unsigned __int128", - .bit_int => "_BitInt", - .sbit_int => "signed _BitInt", - .ubit_int => "unsigned _BitInt", .complex_char => "_Complex char", .complex_schar => "_Complex signed char", .complex_uchar => "_Complex unsigned char", @@ -1645,22 +1645,18 @@ pub const Builder = struct { .complex_int128 => "_Complex __int128", .complex_sint128 => "_Complex signed __int128", .complex_uint128 => "_Complex unsigned __int128", - .complex_bit_int => "_Complex _BitInt", - .complex_sbit_int => "_Complex signed _BitInt", - .complex_ubit_int => "_Complex unsigned _BitInt", .fp16 => "__fp16", .float16 => "_Float16", .float => "float", .double => "double", .long_double => "long double", - .float80 => "__float80", .float128 => "__float128", .complex => "_Complex", + .complex_float16 => "_Complex _Float16", .complex_float => "_Complex float", .complex_double => "_Complex double", .complex_long_double => "_Complex long double", - .complex_float80 => "_Complex __float80", .complex_float128 => "_Complex __float128", .attributed => |attributed| Builder.fromType(attributed.base).str(langopts), @@ -1757,19 +1753,20 @@ pub const Builder = struct { .complex_uint128 => ty.specifier = .complex_uint128, .bit_int, .sbit_int, .ubit_int, .complex_bit_int, .complex_ubit_int, .complex_sbit_int => |bits| { const unsigned = b.specifier == .ubit_int or b.specifier == .complex_ubit_int; + const complex_str = if (b.complex_tok != null) "_Complex " else ""; if (unsigned) { if (bits < 1) { - try p.errStr(.unsigned_bit_int_too_small, b.bit_int_tok.?, b.specifier.str(p.comp.langopts).?); + try p.errStr(.unsigned_bit_int_too_small, b.bit_int_tok.?, complex_str); return Type.invalid; } } else { if (bits < 2) { - try p.errStr(.signed_bit_int_too_small, b.bit_int_tok.?, b.specifier.str(p.comp.langopts).?); + try p.errStr(.signed_bit_int_too_small, b.bit_int_tok.?, complex_str); return Type.invalid; } } if (bits > Compilation.bit_int_max_bits) { - try p.errStr(.bit_int_too_big, b.bit_int_tok.?, b.specifier.str(p.comp.langopts).?); + try p.errStr(if (unsigned) .unsigned_bit_int_too_big else .signed_bit_int_too_big, b.bit_int_tok.?, complex_str); return Type.invalid; } ty.specifier = if (b.complex_tok != null) .complex_bit_int else .bit_int; @@ -1784,12 +1781,11 @@ pub const Builder = struct { .float => ty.specifier = .float, .double => ty.specifier = .double, .long_double => ty.specifier = .long_double, - .float80 => ty.specifier = .float80, .float128 => ty.specifier = .float128, + .complex_float16 => ty.specifier = .complex_float16, .complex_float => ty.specifier = .complex_float, .complex_double => ty.specifier = .complex_double, .complex_long_double => ty.specifier = .complex_long_double, - .complex_float80 => ty.specifier = .complex_float80, .complex_float128 => ty.specifier = .complex_float128, .complex => { try p.errTok(.plain_complex, p.tok_i - 1); @@ -1907,6 +1903,7 @@ pub const Builder = struct { /// Try to combine type from typedef, returns true if successful. pub fn combineTypedef(b: *Builder, p: *Parser, typedef_ty: Type, name_tok: TokenIndex) bool { + if (typedef_ty.is(.invalid)) return false; b.error_on_invalid = true; defer b.error_on_invalid = false; @@ -2094,6 +2091,7 @@ pub const Builder = struct { }, .long => b.specifier = switch (b.specifier) { .none => .long, + .double => .long_double, .long => .long_long, .unsigned => .ulong, .signed => .long, @@ -2106,6 +2104,7 @@ pub const Builder = struct { .complex_long => .complex_long_long, .complex_slong => .complex_slong_long, .complex_ulong => .complex_ulong_long, + .complex_double => .complex_long_double, else => return b.cannotCombine(p, source_tok), }, .int128 => b.specifier = switch (b.specifier) { @@ -2140,6 +2139,7 @@ pub const Builder = struct { }, .float16 => b.specifier = switch (b.specifier) { .none => .float16, + .complex => .complex_float16, else => return b.cannotCombine(p, source_tok), }, .float => b.specifier = switch (b.specifier) { @@ -2154,11 +2154,6 @@ pub const Builder = struct { .complex => .complex_double, else => return b.cannotCombine(p, source_tok), }, - .float80 => b.specifier = switch (b.specifier) { - .none => .float80, - .complex => .complex_float80, - else => return b.cannotCombine(p, source_tok), - }, .float128 => b.specifier = switch (b.specifier) { .none => .float128, .complex => .complex_float128, @@ -2166,10 +2161,10 @@ pub const Builder = struct { }, .complex => b.specifier = switch (b.specifier) { .none => .complex, + .float16 => .complex_float16, .float => .complex_float, .double => .complex_double, .long_double => .complex_long_double, - .float80 => .complex_float80, .float128 => .complex_float128, .char => .complex_char, .schar => .complex_schar, @@ -2207,7 +2202,6 @@ pub const Builder = struct { .complex_float, .complex_double, .complex_long_double, - .complex_float80, .complex_float128, .complex_char, .complex_schar, @@ -2294,13 +2288,12 @@ pub const Builder = struct { .float16 => .float16, .float => .float, .double => .double, - .float80 => .float80, .float128 => .float128, .long_double => .long_double, + .complex_float16 => .complex_float16, .complex_float => .complex_float, .complex_double => .complex_double, .complex_long_double => .complex_long_double, - .complex_float80 => .complex_float80, .complex_float128 => .complex_float128, .pointer => .{ .pointer = ty.data.sub_type }, @@ -2350,22 +2343,30 @@ pub const Builder = struct { } }; +/// Use with caution +pub fn base(ty: *Type) *Type { + return switch (ty.specifier) { + .typeof_type => ty.data.sub_type.base(), + .typeof_expr => ty.data.expr.ty.base(), + .attributed => ty.data.attributed.base.base(), + else => ty, + }; +} + pub fn getAttribute(ty: Type, comptime tag: Attribute.Tag) ?Attribute.ArgumentsForTag(tag) { - switch (ty.specifier) { - .typeof_type => return ty.data.sub_type.getAttribute(tag), - .typeof_expr => return ty.data.expr.ty.getAttribute(tag), - .attributed => { - for (ty.data.attributed.attributes) |attribute| { - if (attribute.tag == tag) return @field(attribute.args, @tagName(tag)); - } - return null; - }, - else => return null, + if (tag == .aligned) @compileError("use requestedAlignment"); + var it = Attribute.Iterator.initType(ty); + while (it.next()) |item| { + const attribute, _ = item; + if (attribute.tag == tag) return @field(attribute.args, @tagName(tag)); } + return null; } pub fn hasAttribute(ty: Type, tag: Attribute.Tag) bool { - for (ty.getAttributes()) |attr| { + var it = Attribute.Iterator.initType(ty); + while (it.next()) |item| { + const attr, _ = item; if (attr.tag == tag) return true; } return false; @@ -2489,6 +2490,8 @@ fn printPrologue(ty: Type, mapper: StringInterner.TypeMapper, langopts: LangOpts _ = try elem_ty.printPrologue(mapper, langopts, w); try w.writeAll("' values)"); }, + .bit_int => try w.print("{s} _BitInt({d})", .{ @tagName(ty.data.int.signedness), ty.data.int.bits }), + .complex_bit_int => try w.print("_Complex {s} _BitInt({d})", .{ @tagName(ty.data.int.signedness), ty.data.int.bits }), else => try w.writeAll(Builder.fromType(ty).str(langopts).?), } return true; @@ -2644,15 +2647,12 @@ pub fn dump(ty: Type, mapper: StringInterner.TypeMapper, langopts: LangOpts, w: .attributed => { if (ty.isDecayed()) try w.writeAll("*d:"); try w.writeAll("attributed("); - try ty.data.attributed.base.dump(mapper, langopts, w); + try ty.data.attributed.base.canonicalize(.standard).dump(mapper, langopts, w); try w.writeAll(")"); }, - else => { - try w.writeAll(Builder.fromType(ty).str(langopts).?); - if (ty.specifier == .bit_int or ty.specifier == .complex_bit_int) { - try w.print("({d})", .{ty.data.int.bits}); - } - }, + .bit_int => try w.print("{s} _BitInt({d})", .{ @tagName(ty.data.int.signedness), ty.data.int.bits }), + .complex_bit_int => try w.print("_Complex {s} _BitInt({d})", .{ @tagName(ty.data.int.signedness), ty.data.int.bits }), + else => try w.writeAll(Builder.fromType(ty).str(langopts).?), } } diff --git a/lib/compiler/aro/aro/Value.zig b/lib/compiler/aro/aro/Value.zig index 2dd9a86abe24..892a09b1d67d 100644 --- a/lib/compiler/aro/aro/Value.zig +++ b/lib/compiler/aro/aro/Value.zig @@ -8,6 +8,7 @@ const BigIntSpace = Interner.Tag.Int.BigIntSpace; const Compilation = @import("Compilation.zig"); const Type = @import("Type.zig"); const target_util = @import("target.zig"); +const annex_g = @import("annex_g.zig"); const Value = @This(); @@ -41,6 +42,14 @@ pub fn is(v: Value, tag: std.meta.Tag(Interner.Key), comp: *const Compilation) b return comp.interner.get(v.ref()) == tag; } +pub fn isArithmetic(v: Value, comp: *const Compilation) bool { + if (v.opt_ref == .none) return false; + return switch (comp.interner.get(v.ref())) { + .int, .float, .complex => true, + else => false, + }; +} + /// Number of bits needed to hold `v`. /// Asserts that `v` is not negative pub fn minUnsignedBits(v: Value, comp: *const Compilation) usize { @@ -58,7 +67,7 @@ test "minUnsignedBits" { } }; - var comp = Compilation.init(std.testing.allocator); + var comp = Compilation.init(std.testing.allocator, std.fs.cwd()); defer comp.deinit(); const target_query = try std.Target.Query.parse(.{ .arch_os_abi = "x86_64-linux-gnu" }); comp.target = try std.zig.system.resolveTargetQuery(target_query); @@ -93,7 +102,7 @@ test "minSignedBits" { } }; - var comp = Compilation.init(std.testing.allocator); + var comp = Compilation.init(std.testing.allocator, std.fs.cwd()); defer comp.deinit(); const target_query = try std.Target.Query.parse(.{ .arch_os_abi = "x86_64-linux-gnu" }); comp.target = try std.zig.system.resolveTargetQuery(target_query); @@ -134,7 +143,7 @@ pub fn floatToInt(v: *Value, dest_ty: Type, comp: *Compilation) !FloatToIntChang v.* = fromBool(!was_zero); if (was_zero or was_one) return .none; return .value_changed; - } else if (dest_ty.isUnsignedInt(comp) and v.compare(.lt, zero, comp)) { + } else if (dest_ty.isUnsignedInt(comp) and float_val < 0) { v.* = zero; return .out_of_range; } @@ -154,7 +163,7 @@ pub fn floatToInt(v: *Value, dest_ty: Type, comp: *Compilation) !FloatToIntChang }; // The float is reduced in rational.setFloat, so we assert that denominator is equal to one - const big_one = std.math.big.int.Const{ .limbs = &.{1}, .positive = true }; + const big_one = BigIntConst{ .limbs = &.{1}, .positive = true }; assert(rational.q.toConst().eqlAbs(big_one)); if (is_negative) { @@ -179,6 +188,20 @@ pub fn floatToInt(v: *Value, dest_ty: Type, comp: *Compilation) !FloatToIntChang /// `.none` value remains unchanged. pub fn intToFloat(v: *Value, dest_ty: Type, comp: *Compilation) !void { if (v.opt_ref == .none) return; + + if (dest_ty.isComplex()) { + const bits = dest_ty.bitSizeof(comp).?; + const cf: Interner.Key.Complex = switch (bits) { + 32 => .{ .cf16 = .{ v.toFloat(f16, comp), 0 } }, + 64 => .{ .cf32 = .{ v.toFloat(f32, comp), 0 } }, + 128 => .{ .cf64 = .{ v.toFloat(f64, comp), 0 } }, + 160 => .{ .cf80 = .{ v.toFloat(f80, comp), 0 } }, + 256 => .{ .cf128 = .{ v.toFloat(f128, comp), 0 } }, + else => unreachable, + }; + v.* = try intern(comp, .{ .complex = cf }); + return; + } const bits = dest_ty.bitSizeof(comp).?; return switch (comp.interner.get(v.ref()).int) { inline .u64, .i64 => |data| { @@ -207,40 +230,89 @@ pub fn intToFloat(v: *Value, dest_ty: Type, comp: *Compilation) !void { }; } +pub const IntCastChangeKind = enum { + /// value did not change + none, + /// Truncation occurred (e.g., i32 to i16) + truncated, + /// Sign conversion occurred (e.g., i32 to u32) + sign_changed, +}; + /// Truncates or extends bits based on type. /// `.none` value remains unchanged. -pub fn intCast(v: *Value, dest_ty: Type, comp: *Compilation) !void { - if (v.opt_ref == .none) return; - const bits: usize = @intCast(dest_ty.bitSizeof(comp).?); +pub fn intCast(v: *Value, dest_ty: Type, comp: *Compilation) !IntCastChangeKind { + if (v.opt_ref == .none) return .none; + + const dest_bits: usize = @intCast(dest_ty.bitSizeof(comp).?); + const dest_signed = dest_ty.signedness(comp) == .signed; + var space: BigIntSpace = undefined; const big = v.toBigInt(&space, comp); + const value_bits = big.bitCountTwosComp(); + + // if big is negative, then is signed. + const src_signed = !big.positive; + const sign_change = src_signed != dest_signed; const limbs = try comp.gpa.alloc( std.math.big.Limb, - std.math.big.int.calcTwosCompLimbCount(@max(big.bitCountTwosComp(), bits)), + std.math.big.int.calcTwosCompLimbCount(@max(value_bits, dest_bits)), ); defer comp.gpa.free(limbs); - var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; - result_bigint.truncate(big, dest_ty.signedness(comp), bits); + + var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; + result_bigint.truncate(big, dest_ty.signedness(comp), dest_bits); v.* = try intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } }); + + const truncation_occurred = value_bits > dest_bits; + if (truncation_occurred) { + return .truncated; + } else if (sign_change) { + return .sign_changed; + } else { + return .none; + } } /// Converts the stored value to a float of the specified type /// `.none` value remains unchanged. pub fn floatCast(v: *Value, dest_ty: Type, comp: *Compilation) !void { if (v.opt_ref == .none) return; - // TODO complex values - const bits = dest_ty.makeReal().bitSizeof(comp).?; - const f: Interner.Key.Float = switch (bits) { - 16 => .{ .f16 = v.toFloat(f16, comp) }, - 32 => .{ .f32 = v.toFloat(f32, comp) }, - 64 => .{ .f64 = v.toFloat(f64, comp) }, - 80 => .{ .f80 = v.toFloat(f80, comp) }, - 128 => .{ .f128 = v.toFloat(f128, comp) }, + const bits = dest_ty.bitSizeof(comp).?; + if (dest_ty.isComplex()) { + const cf: Interner.Key.Complex = switch (bits) { + 32 => .{ .cf16 = .{ v.toFloat(f16, comp), v.imag(f16, comp) } }, + 64 => .{ .cf32 = .{ v.toFloat(f32, comp), v.imag(f32, comp) } }, + 128 => .{ .cf64 = .{ v.toFloat(f64, comp), v.imag(f64, comp) } }, + 160 => .{ .cf80 = .{ v.toFloat(f80, comp), v.imag(f80, comp) } }, + 256 => .{ .cf128 = .{ v.toFloat(f128, comp), v.imag(f128, comp) } }, + else => unreachable, + }; + v.* = try intern(comp, .{ .complex = cf }); + } else { + const f: Interner.Key.Float = switch (bits) { + 16 => .{ .f16 = v.toFloat(f16, comp) }, + 32 => .{ .f32 = v.toFloat(f32, comp) }, + 64 => .{ .f64 = v.toFloat(f64, comp) }, + 80 => .{ .f80 = v.toFloat(f80, comp) }, + 128 => .{ .f128 = v.toFloat(f128, comp) }, + else => unreachable, + }; + v.* = try intern(comp, .{ .float = f }); + } +} + +pub fn imag(v: Value, comptime T: type, comp: *const Compilation) T { + return switch (comp.interner.get(v.ref())) { + .int => 0.0, + .float => 0.0, + .complex => |repr| switch (repr) { + inline else => |components| return @floatCast(components[1]), + }, else => unreachable, }; - v.* = try intern(comp, .{ .float = f }); } pub fn toFloat(v: Value, comptime T: type, comp: *const Compilation) T { @@ -252,6 +324,39 @@ pub fn toFloat(v: Value, comptime T: type, comp: *const Compilation) T { .float => |repr| switch (repr) { inline else => |data| @floatCast(data), }, + .complex => |repr| switch (repr) { + inline else => |components| @floatCast(components[0]), + }, + else => unreachable, + }; +} + +pub fn realPart(v: Value, comp: *Compilation) !Value { + if (v.opt_ref == .none) return v; + return switch (comp.interner.get(v.ref())) { + .int, .float => v, + .complex => |repr| Value.intern(comp, switch (repr) { + .cf16 => |components| .{ .float = .{ .f16 = components[0] } }, + .cf32 => |components| .{ .float = .{ .f32 = components[0] } }, + .cf64 => |components| .{ .float = .{ .f64 = components[0] } }, + .cf80 => |components| .{ .float = .{ .f80 = components[0] } }, + .cf128 => |components| .{ .float = .{ .f128 = components[0] } }, + }), + else => unreachable, + }; +} + +pub fn imaginaryPart(v: Value, comp: *Compilation) !Value { + if (v.opt_ref == .none) return v; + return switch (comp.interner.get(v.ref())) { + .int, .float => Value.zero, + .complex => |repr| Value.intern(comp, switch (repr) { + .cf16 => |components| .{ .float = .{ .f16 = components[1] } }, + .cf32 => |components| .{ .float = .{ .f32 = components[1] } }, + .cf64 => |components| .{ .float = .{ .f64 = components[1] } }, + .cf80 => |components| .{ .float = .{ .f80 = components[1] } }, + .cf128 => |components| .{ .float = .{ .f128 = components[1] } }, + }), else => unreachable, }; } @@ -298,11 +403,56 @@ pub fn isZero(v: Value, comp: *const Compilation) bool { inline .i64, .u64 => |data| return data == 0, .big_int => |data| return data.eqlZero(), }, + .complex => |repr| switch (repr) { + inline else => |data| return data[0] == 0.0 and data[1] == 0.0, + }, .bytes => return false, else => unreachable, } } +const IsInfKind = enum(i32) { + negative = -1, + finite = 0, + positive = 1, + unknown = std.math.maxInt(i32), +}; + +pub fn isInfSign(v: Value, comp: *const Compilation) IsInfKind { + if (v.opt_ref == .none) return .unknown; + return switch (comp.interner.get(v.ref())) { + .float => |repr| switch (repr) { + inline else => |data| if (std.math.isPositiveInf(data)) .positive else if (std.math.isNegativeInf(data)) .negative else .finite, + }, + else => .unknown, + }; +} +pub fn isInf(v: Value, comp: *const Compilation) bool { + if (v.opt_ref == .none) return false; + return switch (comp.interner.get(v.ref())) { + .float => |repr| switch (repr) { + inline else => |data| std.math.isInf(data), + }, + .complex => |repr| switch (repr) { + inline else => |components| std.math.isInf(components[0]) or std.math.isInf(components[1]), + }, + else => false, + }; +} + +pub fn isNan(v: Value, comp: *const Compilation) bool { + if (v.opt_ref == .none) return false; + return switch (comp.interner.get(v.ref())) { + .float => |repr| switch (repr) { + inline else => |data| std.math.isNan(data), + }, + .complex => |repr| switch (repr) { + inline else => |components| std.math.isNan(components[0]) or std.math.isNan(components[1]), + }, + else => false, + }; +} + /// Converts value to zero or one; /// `.none` value remains unchanged. pub fn boolCast(v: *Value, comp: *const Compilation) void { @@ -326,9 +476,45 @@ pub fn toInt(v: Value, comptime T: type, comp: *const Compilation) ?T { return big_int.to(T) catch null; } +const ComplexOp = enum { + add, + sub, +}; + +fn complexAddSub(lhs: Value, rhs: Value, comptime T: type, op: ComplexOp, comp: *Compilation) !Value { + const res_re = switch (op) { + .add => lhs.toFloat(T, comp) + rhs.toFloat(T, comp), + .sub => lhs.toFloat(T, comp) - rhs.toFloat(T, comp), + }; + const res_im = switch (op) { + .add => lhs.imag(T, comp) + rhs.imag(T, comp), + .sub => lhs.imag(T, comp) - rhs.imag(T, comp), + }; + + return switch (T) { + f16 => intern(comp, .{ .complex = .{ .cf16 = .{ res_re, res_im } } }), + f32 => intern(comp, .{ .complex = .{ .cf32 = .{ res_re, res_im } } }), + f64 => intern(comp, .{ .complex = .{ .cf64 = .{ res_re, res_im } } }), + f80 => intern(comp, .{ .complex = .{ .cf80 = .{ res_re, res_im } } }), + f128 => intern(comp, .{ .complex = .{ .cf128 = .{ res_re, res_im } } }), + else => unreachable, + }; +} + pub fn add(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !bool { const bits: usize = @intCast(ty.bitSizeof(comp).?); if (ty.isFloat()) { + if (ty.isComplex()) { + res.* = switch (bits) { + 32 => try complexAddSub(lhs, rhs, f16, .add, comp), + 64 => try complexAddSub(lhs, rhs, f32, .add, comp), + 128 => try complexAddSub(lhs, rhs, f64, .add, comp), + 160 => try complexAddSub(lhs, rhs, f80, .add, comp), + 256 => try complexAddSub(lhs, rhs, f128, .add, comp), + else => unreachable, + }; + return false; + } const f: Interner.Key.Float = switch (bits) { 16 => .{ .f16 = lhs.toFloat(f16, comp) + rhs.toFloat(f16, comp) }, 32 => .{ .f32 = lhs.toFloat(f32, comp) + rhs.toFloat(f32, comp) }, @@ -350,7 +536,7 @@ pub fn add(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !b std.math.big.int.calcTwosCompLimbCount(bits), ); defer comp.gpa.free(limbs); - var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; + var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; const overflowed = result_bigint.addWrap(lhs_bigint, rhs_bigint, ty.signedness(comp), bits); res.* = try intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } }); @@ -361,6 +547,17 @@ pub fn add(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !b pub fn sub(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !bool { const bits: usize = @intCast(ty.bitSizeof(comp).?); if (ty.isFloat()) { + if (ty.isComplex()) { + res.* = switch (bits) { + 32 => try complexAddSub(lhs, rhs, f16, .sub, comp), + 64 => try complexAddSub(lhs, rhs, f32, .sub, comp), + 128 => try complexAddSub(lhs, rhs, f64, .sub, comp), + 160 => try complexAddSub(lhs, rhs, f80, .sub, comp), + 256 => try complexAddSub(lhs, rhs, f128, .sub, comp), + else => unreachable, + }; + return false; + } const f: Interner.Key.Float = switch (bits) { 16 => .{ .f16 = lhs.toFloat(f16, comp) - rhs.toFloat(f16, comp) }, 32 => .{ .f32 = lhs.toFloat(f32, comp) - rhs.toFloat(f32, comp) }, @@ -382,7 +579,7 @@ pub fn sub(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !b std.math.big.int.calcTwosCompLimbCount(bits), ); defer comp.gpa.free(limbs); - var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; + var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; const overflowed = result_bigint.subWrap(lhs_bigint, rhs_bigint, ty.signedness(comp), bits); res.* = try intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } }); @@ -393,6 +590,18 @@ pub fn sub(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !b pub fn mul(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !bool { const bits: usize = @intCast(ty.bitSizeof(comp).?); if (ty.isFloat()) { + if (ty.isComplex()) { + const cf: Interner.Key.Complex = switch (bits) { + 32 => .{ .cf16 = annex_g.complexFloatMul(f16, lhs.toFloat(f16, comp), lhs.imag(f16, comp), rhs.toFloat(f16, comp), rhs.imag(f16, comp)) }, + 64 => .{ .cf32 = annex_g.complexFloatMul(f32, lhs.toFloat(f32, comp), lhs.imag(f32, comp), rhs.toFloat(f32, comp), rhs.imag(f32, comp)) }, + 128 => .{ .cf64 = annex_g.complexFloatMul(f64, lhs.toFloat(f64, comp), lhs.imag(f64, comp), rhs.toFloat(f64, comp), rhs.imag(f64, comp)) }, + 160 => .{ .cf80 = annex_g.complexFloatMul(f80, lhs.toFloat(f80, comp), lhs.imag(f80, comp), rhs.toFloat(f80, comp), rhs.imag(f80, comp)) }, + 256 => .{ .cf128 = annex_g.complexFloatMul(f128, lhs.toFloat(f128, comp), lhs.imag(f128, comp), rhs.toFloat(f128, comp), rhs.imag(f128, comp)) }, + else => unreachable, + }; + res.* = try intern(comp, .{ .complex = cf }); + return false; + } const f: Interner.Key.Float = switch (bits) { 16 => .{ .f16 = lhs.toFloat(f16, comp) * rhs.toFloat(f16, comp) }, 32 => .{ .f32 = lhs.toFloat(f32, comp) * rhs.toFloat(f32, comp) }, @@ -438,6 +647,18 @@ pub fn mul(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !b pub fn div(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !bool { const bits: usize = @intCast(ty.bitSizeof(comp).?); if (ty.isFloat()) { + if (ty.isComplex()) { + const cf: Interner.Key.Complex = switch (bits) { + 32 => .{ .cf16 = annex_g.complexFloatDiv(f16, lhs.toFloat(f16, comp), lhs.imag(f16, comp), rhs.toFloat(f16, comp), rhs.imag(f16, comp)) }, + 64 => .{ .cf32 = annex_g.complexFloatDiv(f32, lhs.toFloat(f32, comp), lhs.imag(f32, comp), rhs.toFloat(f32, comp), rhs.imag(f32, comp)) }, + 128 => .{ .cf64 = annex_g.complexFloatDiv(f64, lhs.toFloat(f64, comp), lhs.imag(f64, comp), rhs.toFloat(f64, comp), rhs.imag(f64, comp)) }, + 160 => .{ .cf80 = annex_g.complexFloatDiv(f80, lhs.toFloat(f80, comp), lhs.imag(f80, comp), rhs.toFloat(f80, comp), rhs.imag(f80, comp)) }, + 256 => .{ .cf128 = annex_g.complexFloatDiv(f128, lhs.toFloat(f128, comp), lhs.imag(f128, comp), rhs.toFloat(f128, comp), rhs.imag(f128, comp)) }, + else => unreachable, + }; + res.* = try intern(comp, .{ .complex = cf }); + return false; + } const f: Interner.Key.Float = switch (bits) { 16 => .{ .f16 = lhs.toFloat(f16, comp) / rhs.toFloat(f16, comp) }, 32 => .{ .f32 = lhs.toFloat(f32, comp) / rhs.toFloat(f32, comp) }, @@ -491,11 +712,11 @@ pub fn rem(lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !Value { const signedness = ty.signedness(comp); if (signedness == .signed) { - var spaces: [3]BigIntSpace = undefined; - const min_val = BigIntMutable.init(&spaces[0].limbs, ty.minInt(comp)).toConst(); - const negative = BigIntMutable.init(&spaces[1].limbs, -1).toConst(); - const big_one = BigIntMutable.init(&spaces[2].limbs, 1).toConst(); - if (lhs_bigint.eql(min_val) and rhs_bigint.eql(negative)) { + var spaces: [2]BigIntSpace = undefined; + const min_val = try Value.minInt(ty, comp); + const negative = BigIntMutable.init(&spaces[0].limbs, -1).toConst(); + const big_one = BigIntMutable.init(&spaces[1].limbs, 1).toConst(); + if (lhs.compare(.eq, min_val, comp) and rhs_bigint.eql(negative)) { return .{}; } else if (rhs_bigint.order(big_one).compare(.lt)) { // lhs - @divTrunc(lhs, rhs) * rhs @@ -542,7 +763,7 @@ pub fn bitOr(lhs: Value, rhs: Value, comp: *Compilation) !Value { @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len), ); defer comp.gpa.free(limbs); - var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; + var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.bitOr(lhs_bigint, rhs_bigint); return intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } }); @@ -554,12 +775,13 @@ pub fn bitXor(lhs: Value, rhs: Value, comp: *Compilation) !Value { const lhs_bigint = lhs.toBigInt(&lhs_space, comp); const rhs_bigint = rhs.toBigInt(&rhs_space, comp); + const extra = @intFromBool(lhs_bigint.positive != rhs_bigint.positive); const limbs = try comp.gpa.alloc( std.math.big.Limb, - @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len), + @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + extra, ); defer comp.gpa.free(limbs); - var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; + var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.bitXor(lhs_bigint, rhs_bigint); return intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } }); @@ -571,12 +793,18 @@ pub fn bitAnd(lhs: Value, rhs: Value, comp: *Compilation) !Value { const lhs_bigint = lhs.toBigInt(&lhs_space, comp); const rhs_bigint = rhs.toBigInt(&rhs_space, comp); - const limbs = try comp.gpa.alloc( - std.math.big.Limb, - @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len), - ); + const limb_count = if (lhs_bigint.positive and rhs_bigint.positive) + @min(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + else if (lhs_bigint.positive) + lhs_bigint.limbs.len + else if (rhs_bigint.positive) + rhs_bigint.limbs.len + else + @max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1; + + const limbs = try comp.gpa.alloc(std.math.big.Limb, limb_count); defer comp.gpa.free(limbs); - var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; + var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.bitAnd(lhs_bigint, rhs_bigint); return intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } }); @@ -592,7 +820,7 @@ pub fn bitNot(val: Value, ty: Type, comp: *Compilation) !Value { std.math.big.int.calcTwosCompLimbCount(bits), ); defer comp.gpa.free(limbs); - var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; + var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.bitNotWrap(val_bigint, ty.signedness(comp), bits); return intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } }); @@ -606,9 +834,9 @@ pub fn shl(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !b const bits: usize = @intCast(ty.bitSizeof(comp).?); if (shift > bits) { if (lhs_bigint.positive) { - res.* = try intern(comp, .{ .int = .{ .u64 = ty.maxInt(comp) } }); + res.* = try Value.maxInt(ty, comp); } else { - res.* = try intern(comp, .{ .int = .{ .i64 = ty.minInt(comp) } }); + res.* = try Value.minInt(ty, comp); } return true; } @@ -618,7 +846,7 @@ pub fn shl(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !b lhs_bigint.limbs.len + (shift / (@sizeOf(std.math.big.Limb) * 8)) + 1, ); defer comp.gpa.free(limbs); - var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; + var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.shiftLeft(lhs_bigint, shift); const signedness = ty.signedness(comp); @@ -652,12 +880,25 @@ pub fn shr(lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !Value { std.math.big.int.calcTwosCompLimbCount(bits), ); defer comp.gpa.free(limbs); - var result_bigint = std.math.big.int.Mutable{ .limbs = limbs, .positive = undefined, .len = undefined }; + var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; result_bigint.shiftRight(lhs_bigint, shift); return intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } }); } +pub fn complexConj(val: Value, ty: Type, comp: *Compilation) !Value { + const bits = ty.bitSizeof(comp).?; + const cf: Interner.Key.Complex = switch (bits) { + 32 => .{ .cf16 = .{ val.toFloat(f16, comp), -val.imag(f16, comp) } }, + 64 => .{ .cf32 = .{ val.toFloat(f32, comp), -val.imag(f32, comp) } }, + 128 => .{ .cf64 = .{ val.toFloat(f64, comp), -val.imag(f64, comp) } }, + 160 => .{ .cf80 = .{ val.toFloat(f80, comp), -val.imag(f80, comp) } }, + 256 => .{ .cf128 = .{ val.toFloat(f128, comp), -val.imag(f128, comp) } }, + else => unreachable, + }; + return intern(comp, .{ .complex = cf }); +} + pub fn compare(lhs: Value, op: std.math.CompareOperator, rhs: Value, comp: *const Compilation) bool { if (op == .eq) { return lhs.opt_ref == rhs.opt_ref; @@ -672,6 +913,12 @@ pub fn compare(lhs: Value, op: std.math.CompareOperator, rhs: Value, comp: *cons const rhs_f128 = rhs.toFloat(f128, comp); return std.math.compare(lhs_f128, op, rhs_f128); } + if (lhs_key == .complex or rhs_key == .complex) { + assert(op == .neq); + const real_equal = std.math.compare(lhs.toFloat(f128, comp), .eq, rhs.toFloat(f128, comp)); + const imag_equal = std.math.compare(lhs.imag(f128, comp), .eq, rhs.imag(f128, comp)); + return !real_equal or !imag_equal; + } var lhs_bigint_space: BigIntSpace = undefined; var rhs_bigint_space: BigIntSpace = undefined; @@ -680,6 +927,42 @@ pub fn compare(lhs: Value, op: std.math.CompareOperator, rhs: Value, comp: *cons return lhs_bigint.order(rhs_bigint).compare(op); } +fn twosCompIntLimit(limit: std.math.big.int.TwosCompIntLimit, ty: Type, comp: *Compilation) !Value { + const signedness = ty.signedness(comp); + if (limit == .min and signedness == .unsigned) return Value.zero; + const mag_bits: usize = @intCast(ty.bitSizeof(comp).?); + switch (mag_bits) { + inline 8, 16, 32, 64 => |bits| { + if (limit == .min) return Value.int(@as(i64, std.math.minInt(std.meta.Int(.signed, bits))), comp); + return switch (signedness) { + inline else => |sign| Value.int(std.math.maxInt(std.meta.Int(sign, bits)), comp), + }; + }, + else => {}, + } + + const sign_bits = @intFromBool(signedness == .signed); + const total_bits = mag_bits + sign_bits; + + const limbs = try comp.gpa.alloc( + std.math.big.Limb, + std.math.big.int.calcTwosCompLimbCount(total_bits), + ); + defer comp.gpa.free(limbs); + + var result_bigint: BigIntMutable = .{ .limbs = limbs, .positive = undefined, .len = undefined }; + result_bigint.setTwosCompIntLimit(limit, signedness, mag_bits); + return Value.intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } }); +} + +pub fn minInt(ty: Type, comp: *Compilation) !Value { + return twosCompIntLimit(.min, ty, comp); +} + +pub fn maxInt(ty: Type, comp: *Compilation) !Value { + return twosCompIntLimit(.max, ty, comp); +} + pub fn print(v: Value, ty: Type, comp: *const Compilation, w: anytype) @TypeOf(w).Error!void { if (ty.is(.bool)) { return w.writeAll(if (v.isZero(comp)) "false" else "true"); @@ -696,6 +979,10 @@ pub fn print(v: Value, ty: Type, comp: *const Compilation, w: anytype) @TypeOf(w inline else => |x| return w.print("{d}", .{@as(f64, @floatCast(x))}), }, .bytes => |b| return printString(b, ty, comp, w), + .complex => |repr| switch (repr) { + .cf32 => |components| return w.print("{d} + {d}i", .{ @round(@as(f64, @floatCast(components[0])) * 1000000) / 1000000, @round(@as(f64, @floatCast(components[1])) * 1000000) / 1000000 }), + inline else => |components| return w.print("{d} + {d}i", .{ @as(f64, @floatCast(components[0])), @as(f64, @floatCast(components[1])) }), + }, else => unreachable, // not a value } } @@ -703,26 +990,44 @@ pub fn print(v: Value, ty: Type, comp: *const Compilation, w: anytype) @TypeOf(w pub fn printString(bytes: []const u8, ty: Type, comp: *const Compilation, w: anytype) @TypeOf(w).Error!void { const size: Compilation.CharUnitSize = @enumFromInt(ty.elemType().sizeof(comp).?); const without_null = bytes[0 .. bytes.len - @intFromEnum(size)]; + try w.writeByte('"'); switch (size) { - inline .@"1", .@"2" => |sz| { - const data_slice: []const sz.Type() = @alignCast(std.mem.bytesAsSlice(sz.Type(), without_null)); - const formatter = if (sz == .@"1") std.zig.fmtEscapes(data_slice) else std.unicode.fmtUtf16Le(data_slice); - try w.print("\"{}\"", .{formatter}); + .@"1" => try w.print("{}", .{std.zig.fmtEscapes(without_null)}), + .@"2" => { + var items: [2]u16 = undefined; + var i: usize = 0; + while (i < without_null.len) { + @memcpy(std.mem.sliceAsBytes(items[0..1]), without_null[i..][0..2]); + i += 2; + const is_surrogate = std.unicode.utf16IsHighSurrogate(items[0]); + if (is_surrogate and i < without_null.len) { + @memcpy(std.mem.sliceAsBytes(items[1..2]), without_null[i..][0..2]); + if (std.unicode.utf16DecodeSurrogatePair(&items)) |decoded| { + i += 2; + try w.print("{u}", .{decoded}); + } else |_| { + try w.print("\\x{x}", .{items[0]}); + } + } else if (is_surrogate) { + try w.print("\\x{x}", .{items[0]}); + } else { + try w.print("{u}", .{items[0]}); + } + } }, .@"4" => { - try w.writeByte('"'); - const data_slice = std.mem.bytesAsSlice(u32, without_null); - var buf: [4]u8 = undefined; - for (data_slice) |item| { - if (item <= std.math.maxInt(u21) and std.unicode.utf8ValidCodepoint(@intCast(item))) { - const codepoint: u21 = @intCast(item); - const written = std.unicode.utf8Encode(codepoint, &buf) catch unreachable; - try w.print("{s}", .{buf[0..written]}); + var item: [1]u32 = undefined; + const data_slice = std.mem.sliceAsBytes(item[0..1]); + for (0..@divExact(without_null.len, 4)) |n| { + @memcpy(data_slice, without_null[n * 4 ..][0..4]); + if (item[0] <= std.math.maxInt(u21) and std.unicode.utf8ValidCodepoint(@intCast(item[0]))) { + const codepoint: u21 = @intCast(item[0]); + try w.print("{u}", .{codepoint}); } else { - try w.print("\\x{x}", .{item}); + try w.print("\\x{x}", .{item[0]}); } } - try w.writeByte('"'); }, } + try w.writeByte('"'); } diff --git a/lib/compiler/aro/aro/annex_g.zig b/lib/compiler/aro/aro/annex_g.zig new file mode 100644 index 000000000000..56765ee3530f --- /dev/null +++ b/lib/compiler/aro/aro/annex_g.zig @@ -0,0 +1,118 @@ +//! Complex arithmetic algorithms from C99 Annex G + +const std = @import("std"); +const copysign = std.math.copysign; +const ilogb = std.math.ilogb; +const inf = std.math.inf; +const isFinite = std.math.isFinite; +const isInf = std.math.isInf; +const isNan = std.math.isNan; +const isPositiveZero = std.math.isPositiveZero; +const scalbn = std.math.scalbn; + +/// computes floating point z*w where a_param, b_param are real, imaginary parts of z and c_param, d_param are real, imaginary parts of w +pub fn complexFloatMul(comptime T: type, a_param: T, b_param: T, c_param: T, d_param: T) [2]T { + var a = a_param; + var b = b_param; + var c = c_param; + var d = d_param; + + const ac = a * c; + const bd = b * d; + const ad = a * d; + const bc = b * c; + var x = ac - bd; + var y = ad + bc; + if (isNan(x) and isNan(y)) { + var recalc = false; + if (isInf(a) or isInf(b)) { + // lhs infinite + // Box the infinity and change NaNs in the other factor to 0 + a = copysign(if (isInf(a)) @as(T, 1.0) else @as(T, 0.0), a); + b = copysign(if (isInf(b)) @as(T, 1.0) else @as(T, 0.0), b); + if (isNan(c)) c = copysign(@as(T, 0.0), c); + if (isNan(d)) d = copysign(@as(T, 0.0), d); + recalc = true; + } + if (isInf(c) or isInf(d)) { + // rhs infinite + // Box the infinity and change NaNs in the other factor to 0 + c = copysign(if (isInf(c)) @as(T, 1.0) else @as(T, 0.0), c); + d = copysign(if (isInf(d)) @as(T, 1.0) else @as(T, 0.0), d); + if (isNan(a)) a = copysign(@as(T, 0.0), a); + if (isNan(b)) b = copysign(@as(T, 0.0), b); + recalc = true; + } + if (!recalc and (isInf(ac) or isInf(bd) or isInf(ad) or isInf(bc))) { + // Recover infinities from overflow by changing NaN's to 0 + if (isNan(a)) a = copysign(@as(T, 0.0), a); + if (isNan(b)) b = copysign(@as(T, 0.0), b); + if (isNan(c)) c = copysign(@as(T, 0.0), c); + if (isNan(d)) d = copysign(@as(T, 0.0), d); + } + if (recalc) { + x = inf(T) * (a * c - b * d); + y = inf(T) * (a * d + b * c); + } + } + return .{ x, y }; +} + +/// computes floating point z / w where a_param, b_param are real, imaginary parts of z and c_param, d_param are real, imaginary parts of w +pub fn complexFloatDiv(comptime T: type, a_param: T, b_param: T, c_param: T, d_param: T) [2]T { + var a = a_param; + var b = b_param; + var c = c_param; + var d = d_param; + var denom_logb: i32 = 0; + const max_cd = @max(@abs(c), @abs(d)); + if (isFinite(max_cd)) { + if (max_cd == 0) { + denom_logb = std.math.minInt(i32) + 1; + c = 0; + d = 0; + } else { + denom_logb = ilogb(max_cd); + c = scalbn(c, -denom_logb); + d = scalbn(d, -denom_logb); + } + } + const denom = c * c + d * d; + var x = scalbn((a * c + b * d) / denom, -denom_logb); + var y = scalbn((b * c - a * d) / denom, -denom_logb); + if (isNan(x) and isNan(y)) { + if (isPositiveZero(denom) and (!isNan(a) or !isNan(b))) { + x = copysign(inf(T), c) * a; + y = copysign(inf(T), c) * b; + } else if ((isInf(a) or isInf(b)) and isFinite(c) and isFinite(d)) { + a = copysign(if (isInf(a)) @as(T, 1.0) else @as(T, 0.0), a); + b = copysign(if (isInf(b)) @as(T, 1.0) else @as(T, 0.0), b); + x = inf(T) * (a * c + b * d); + y = inf(T) * (b * c - a * d); + } else if (isInf(max_cd) and isFinite(a) and isFinite(b)) { + c = copysign(if (isInf(c)) @as(T, 1.0) else @as(T, 0.0), c); + d = copysign(if (isInf(d)) @as(T, 1.0) else @as(T, 0.0), d); + x = 0.0 * (a * c + b * d); + y = 0.0 * (b * c - a * d); + } + } + return .{ x, y }; +} + +test complexFloatMul { + // Naive algorithm would produce NaN + NaNi instead of inf + NaNi + const result = complexFloatMul(f64, inf(f64), std.math.nan(f64), 2, 0); + try std.testing.expect(isInf(result[0])); + try std.testing.expect(isNan(result[1])); +} + +test complexFloatDiv { + // Naive algorithm would produce NaN + NaNi instead of inf + NaNi + var result = complexFloatDiv(f64, inf(f64), std.math.nan(f64), 2, 0); + try std.testing.expect(isInf(result[0])); + try std.testing.expect(isNan(result[1])); + + result = complexFloatDiv(f64, 2.0, 2.0, 0.0, 0.0); + try std.testing.expect(isInf(result[0])); + try std.testing.expect(isInf(result[1])); +} diff --git a/lib/compiler/aro/aro/features.zig b/lib/compiler/aro/aro/features.zig index d66ba7cabc81..fdc49b722bea 100644 --- a/lib/compiler/aro/aro/features.zig +++ b/lib/compiler/aro/aro/features.zig @@ -45,7 +45,7 @@ pub fn hasFeature(comp: *Compilation, ext: []const u8) bool { .c_static_assert = comp.langopts.standard.atLeast(.c11), .c_thread_local = comp.langopts.standard.atLeast(.c11) and target_util.isTlsSupported(comp.target), }; - inline for (std.meta.fields(@TypeOf(list))) |f| { + inline for (@typeInfo(@TypeOf(list)).@"struct".fields) |f| { if (std.mem.eql(u8, f.name, ext)) return @field(list, f.name); } return false; @@ -69,7 +69,7 @@ pub fn hasExtension(comp: *Compilation, ext: []const u8) bool { .matrix_types = false, // TODO .matrix_types_scalar_division = false, // TODO }; - inline for (std.meta.fields(@TypeOf(list))) |f| { + inline for (@typeInfo(@TypeOf(list)).@"struct".fields) |f| { if (std.mem.eql(u8, f.name, ext)) return @field(list, f.name); } return false; diff --git a/lib/compiler/aro/aro/record_layout.zig b/lib/compiler/aro/aro/record_layout.zig index 2009a29bc9ec..da0517d9fc5b 100644 --- a/lib/compiler/aro/aro/record_layout.zig +++ b/lib/compiler/aro/aro/record_layout.zig @@ -19,6 +19,13 @@ const OngoingBitfield = struct { unused_size_bits: u64, }; +pub const Error = error{Overflow}; + +fn alignForward(addr: u64, alignment: u64) !u64 { + const forward_addr = try std.math.add(u64, addr, alignment - 1); + return std.mem.alignBackward(u64, forward_addr, alignment); +} + const SysVContext = struct { /// Does the record have an __attribute__((packed)) annotation. attr_packed: bool, @@ -36,14 +43,8 @@ const SysVContext = struct { comp: *const Compilation, fn init(ty: Type, comp: *const Compilation, pragma_pack: ?u8) SysVContext { - var pack_value: ?u64 = null; - if (pragma_pack) |pak| { - pack_value = pak * BITS_PER_BYTE; - } - var req_align: u29 = BITS_PER_BYTE; - if (ty.requestedAlignment(comp)) |aln| { - req_align = aln * BITS_PER_BYTE; - } + const pack_value: ?u64 = if (pragma_pack) |pak| @as(u64, pak) * BITS_PER_BYTE else null; + const req_align = @as(u32, (ty.requestedAlignment(comp) orelse 1)) * BITS_PER_BYTE; return SysVContext{ .attr_packed = ty.hasAttribute(.@"packed"), .max_field_align_bits = pack_value, @@ -55,7 +56,7 @@ const SysVContext = struct { }; } - fn layoutFields(self: *SysVContext, rec: *const Record) void { + fn layoutFields(self: *SysVContext, rec: *const Record) !void { for (rec.fields, 0..) |*fld, fld_indx| { if (fld.ty.specifier == .invalid) continue; const type_layout = computeLayout(fld.ty, self.comp); @@ -65,12 +66,12 @@ const SysVContext = struct { field_attrs = attrs[fld_indx]; } if (self.comp.target.isMinGW()) { - fld.layout = self.layoutMinGWField(fld, field_attrs, type_layout); + fld.layout = try self.layoutMinGWField(fld, field_attrs, type_layout); } else { if (fld.isRegularField()) { - fld.layout = self.layoutRegularField(field_attrs, type_layout); + fld.layout = try self.layoutRegularField(field_attrs, type_layout); } else { - fld.layout = self.layoutBitField(field_attrs, type_layout, fld.isNamed(), fld.specifiedBitWidth()); + fld.layout = try self.layoutBitField(field_attrs, type_layout, fld.isNamed(), fld.specifiedBitWidth()); } } } @@ -99,8 +100,8 @@ const SysVContext = struct { field: *const Field, field_attrs: ?[]const Attribute, field_layout: TypeLayout, - ) FieldLayout { - const annotation_alignment_bits = BITS_PER_BYTE * (Type.annotationAlignment(self.comp, field_attrs) orelse 1); + ) !FieldLayout { + const annotation_alignment_bits = BITS_PER_BYTE * @as(u32, (Type.annotationAlignment(self.comp, Attribute.Iterator.initSlice(field_attrs)) orelse 1)); const is_attr_packed = self.attr_packed or isPacked(field_attrs); const ignore_type_alignment = ignoreTypeAlignment(is_attr_packed, field.bit_width, self.ongoing_bitfield, field_layout); @@ -157,7 +158,7 @@ const SysVContext = struct { field_alignment_bits: u64, is_named: bool, width: u64, - ) FieldLayout { + ) !FieldLayout { std.debug.assert(width <= ty_size_bits); // validated in parser // In a union, the size of the underlying type does not affect the size of the union. @@ -194,8 +195,8 @@ const SysVContext = struct { .unused_size_bits = ty_size_bits - width, }; } - const offset_bits = std.mem.alignForward(u64, self.size_bits, field_alignment_bits); - self.size_bits = if (width == 0) offset_bits else offset_bits + ty_size_bits; + const offset_bits = try alignForward(self.size_bits, field_alignment_bits); + self.size_bits = if (width == 0) offset_bits else try std.math.add(u64, offset_bits, ty_size_bits); if (!is_named) return .{}; return .{ .offset_bits = offset_bits, @@ -207,16 +208,16 @@ const SysVContext = struct { self: *SysVContext, ty_size_bits: u64, field_alignment_bits: u64, - ) FieldLayout { + ) !FieldLayout { self.ongoing_bitfield = null; // A struct field starts at the next offset in the struct that is properly // aligned with respect to the start of the struct. See test case 0033. // A union field always starts at offset 0. - const offset_bits = if (self.is_union) 0 else std.mem.alignForward(u64, self.size_bits, field_alignment_bits); + const offset_bits = if (self.is_union) 0 else try alignForward(self.size_bits, field_alignment_bits); // Set the size of the record to the maximum of the current size and the end of // the field. See test case 0034. - self.size_bits = @max(self.size_bits, offset_bits + ty_size_bits); + self.size_bits = @max(self.size_bits, try std.math.add(u64, offset_bits, ty_size_bits)); return .{ .offset_bits = offset_bits, @@ -228,7 +229,7 @@ const SysVContext = struct { self: *SysVContext, fld_attrs: ?[]const Attribute, fld_layout: TypeLayout, - ) FieldLayout { + ) !FieldLayout { var fld_align_bits = fld_layout.field_alignment_bits; // If the struct or the field is packed, then the alignment of the underlying type is @@ -239,8 +240,8 @@ const SysVContext = struct { // The field alignment can be increased by __attribute__((aligned)) annotations on the // field. See test case 0085. - if (Type.annotationAlignment(self.comp, fld_attrs)) |anno| { - fld_align_bits = @max(fld_align_bits, anno * BITS_PER_BYTE); + if (Type.annotationAlignment(self.comp, Attribute.Iterator.initSlice(fld_attrs))) |anno| { + fld_align_bits = @max(fld_align_bits, @as(u32, anno) * BITS_PER_BYTE); } // #pragma pack takes precedence over all other attributes. See test cases 0084 and @@ -251,12 +252,12 @@ const SysVContext = struct { // A struct field starts at the next offset in the struct that is properly // aligned with respect to the start of the struct. - const offset_bits = if (self.is_union) 0 else std.mem.alignForward(u64, self.size_bits, fld_align_bits); + const offset_bits = if (self.is_union) 0 else try alignForward(self.size_bits, fld_align_bits); const size_bits = fld_layout.size_bits; // The alignment of a record is the maximum of its field alignments. See test cases // 0084, 0085, 0086. - self.size_bits = @max(self.size_bits, offset_bits + size_bits); + self.size_bits = @max(self.size_bits, try std.math.add(u64, offset_bits, size_bits)); self.aligned_bits = @max(self.aligned_bits, fld_align_bits); return .{ @@ -271,7 +272,7 @@ const SysVContext = struct { fld_layout: TypeLayout, is_named: bool, bit_width: u64, - ) FieldLayout { + ) !FieldLayout { const ty_size_bits = fld_layout.size_bits; var ty_fld_algn_bits: u32 = fld_layout.field_alignment_bits; @@ -301,7 +302,7 @@ const SysVContext = struct { const attr_packed = self.attr_packed or isPacked(fld_attrs); const has_packing_annotation = attr_packed or self.max_field_align_bits != null; - const annotation_alignment: u32 = if (Type.annotationAlignment(self.comp, fld_attrs)) |anno| anno * BITS_PER_BYTE else 1; + const annotation_alignment = if (Type.annotationAlignment(self.comp, Attribute.Iterator.initSlice(fld_attrs))) |anno| @as(u32, anno) * BITS_PER_BYTE else 1; const first_unused_bit: u64 = if (self.is_union) 0 else self.size_bits; var field_align_bits: u64 = 1; @@ -322,7 +323,7 @@ const SysVContext = struct { // - the alignment of the type is larger than its size, // then it is aligned to the type's field alignment. See test case 0083. if (!has_packing_annotation) { - const start_bit = std.mem.alignForward(u64, first_unused_bit, field_align_bits); + const start_bit = try alignForward(first_unused_bit, field_align_bits); const does_field_cross_boundary = start_bit % ty_fld_algn_bits + bit_width > ty_size_bits; @@ -349,8 +350,8 @@ const SysVContext = struct { } } - const offset_bits = std.mem.alignForward(u64, first_unused_bit, field_align_bits); - self.size_bits = @max(self.size_bits, offset_bits + bit_width); + const offset_bits = try alignForward(first_unused_bit, field_align_bits); + self.size_bits = @max(self.size_bits, try std.math.add(u64, offset_bits, bit_width)); // Unnamed fields do not contribute to the record alignment except on a few targets. // See test case 0079. @@ -419,10 +420,7 @@ const MsvcContext = struct { // The required alignment can be increased by adding a __declspec(align) // annotation. See test case 0023. - var must_align: u29 = BITS_PER_BYTE; - if (ty.requestedAlignment(comp)) |req_align| { - must_align = req_align * BITS_PER_BYTE; - } + const must_align = @as(u32, (ty.requestedAlignment(comp) orelse 1)) * BITS_PER_BYTE; return MsvcContext{ .req_align_bits = must_align, .pointer_align_bits = must_align, @@ -436,15 +434,15 @@ const MsvcContext = struct { }; } - fn layoutField(self: *MsvcContext, fld: *const Field, fld_attrs: ?[]const Attribute) FieldLayout { + fn layoutField(self: *MsvcContext, fld: *const Field, fld_attrs: ?[]const Attribute) !FieldLayout { const type_layout = computeLayout(fld.ty, self.comp); // The required alignment of the field is the maximum of the required alignment of the // underlying type and the __declspec(align) annotation on the field itself. // See test case 0028. var req_align = type_layout.required_alignment_bits; - if (Type.annotationAlignment(self.comp, fld_attrs)) |anno| { - req_align = @max(anno * BITS_PER_BYTE, req_align); + if (Type.annotationAlignment(self.comp, Attribute.Iterator.initSlice(fld_attrs))) |anno| { + req_align = @max(@as(u32, anno) * BITS_PER_BYTE, req_align); } // The required alignment of a record is the maximum of the required alignments of its @@ -480,7 +478,7 @@ const MsvcContext = struct { } } - fn layoutBitField(self: *MsvcContext, ty_size_bits: u64, field_align: u32, bit_width: u32) FieldLayout { + fn layoutBitField(self: *MsvcContext, ty_size_bits: u64, field_align: u32, bit_width: u32) !FieldLayout { if (bit_width == 0) { // A zero-sized bit-field that does not follow a non-zero-sized bit-field does not affect // the overall layout of the record. Even in a union where the order would otherwise @@ -522,7 +520,7 @@ const MsvcContext = struct { self.pointer_align_bits = @max(self.pointer_align_bits, p_align); self.field_align_bits = @max(self.field_align_bits, field_align); - const offset_bits = std.mem.alignForward(u64, self.size_bits, field_align); + const offset_bits = try alignForward(self.size_bits, field_align); self.size_bits = if (bit_width == 0) offset_bits else offset_bits + ty_size_bits; break :bits offset_bits; @@ -534,7 +532,7 @@ const MsvcContext = struct { return .{ .offset_bits = offset_bits, .size_bits = bit_width }; } - fn layoutRegularField(self: *MsvcContext, size_bits: u64, field_align: u32) FieldLayout { + fn layoutRegularField(self: *MsvcContext, size_bits: u64, field_align: u32) !FieldLayout { self.contains_non_bitfield = true; self.ongoing_bitfield = null; // The alignment of the field affects both the pointer alignment and the field @@ -543,7 +541,7 @@ const MsvcContext = struct { self.field_align_bits = @max(self.field_align_bits, field_align); const offset_bits = switch (self.is_union) { true => 0, - false => std.mem.alignForward(u64, self.size_bits, field_align), + false => try alignForward(self.size_bits, field_align), }; self.size_bits = @max(self.size_bits, offset_bits + size_bits); return .{ .offset_bits = offset_bits, .size_bits = size_bits }; @@ -569,14 +567,14 @@ const MsvcContext = struct { } }; -pub fn compute(rec: *Type.Record, ty: Type, comp: *const Compilation, pragma_pack: ?u8) void { +pub fn compute(rec: *Type.Record, ty: Type, comp: *const Compilation, pragma_pack: ?u8) Error!void { switch (comp.langopts.emulate) { .gcc, .clang => { var context = SysVContext.init(ty, comp, pragma_pack); - context.layoutFields(rec); + try context.layoutFields(rec); - context.size_bits = std.mem.alignForward(u64, context.size_bits, context.aligned_bits); + context.size_bits = try alignForward(context.size_bits, context.aligned_bits); rec.type_layout = .{ .size_bits = context.size_bits, @@ -594,7 +592,7 @@ pub fn compute(rec: *Type.Record, ty: Type, comp: *const Compilation, pragma_pac field_attrs = attrs[fld_indx]; } - fld.layout = context.layoutField(fld, field_attrs); + fld.layout = try context.layoutField(fld, field_attrs); } if (context.size_bits == 0) { // As an extension, MSVC allows records that only contain zero-sized bitfields and empty @@ -602,7 +600,7 @@ pub fn compute(rec: *Type.Record, ty: Type, comp: *const Compilation, pragma_pac // ensure that there are no zero-sized records. context.handleZeroSizedRecord(); } - context.size_bits = std.mem.alignForward(u64, context.size_bits, context.pointer_align_bits); + context.size_bits = try alignForward(context.size_bits, context.pointer_align_bits); rec.type_layout = .{ .size_bits = context.size_bits, .field_alignment_bits = context.field_align_bits, diff --git a/lib/compiler/aro/aro/target.zig b/lib/compiler/aro/aro/target.zig index 407ef4dfd01b..7b2e1576bfe1 100644 --- a/lib/compiler/aro/aro/target.zig +++ b/lib/compiler/aro/aro/target.zig @@ -35,10 +35,7 @@ pub fn intMaxType(target: std.Target) Type { /// intptr_t for this target pub fn intPtrType(target: std.Target) Type { - switch (target.os.tag) { - .haiku => return .{ .specifier = .long }, - else => {}, - } + if (target.os.tag == .haiku) return .{ .specifier = .long }; switch (target.cpu.arch) { .aarch64, .aarch64_be => switch (target.os.tag) { @@ -127,6 +124,14 @@ pub fn int64Type(target: std.Target) Type { return .{ .specifier = .long_long }; } +pub fn float80Type(target: std.Target) ?Type { + switch (target.cpu.arch) { + .x86, .x86_64 => return .{ .specifier = .long_double }, + else => {}, + } + return null; +} + /// This function returns 1 if function alignment is not observable or settable. pub fn defaultFunctionAlignment(target: std.Target) u8 { return switch (target.cpu.arch) { @@ -474,6 +479,7 @@ pub fn get32BitArchVariant(target: std.Target) ?std.Target { .kalimba, .lanai, .wasm32, + .spirv, .spirv32, .loongarch32, .dxil, @@ -544,6 +550,7 @@ pub fn get64BitArchVariant(target: std.Target) ?std.Target { .powerpcle => copy.cpu.arch = .powerpc64le, .riscv32 => copy.cpu.arch = .riscv64, .sparc => copy.cpu.arch = .sparc64, + .spirv => copy.cpu.arch = .spirv64, .spirv32 => copy.cpu.arch = .spirv64, .thumb => copy.cpu.arch = .aarch64, .thumbeb => copy.cpu.arch = .aarch64_be, @@ -599,6 +606,7 @@ pub fn toLLVMTriple(target: std.Target, buf: []u8) []const u8 { .xtensa => "xtensa", .nvptx => "nvptx", .nvptx64 => "nvptx64", + .spirv => "spirv", .spirv32 => "spirv32", .spirv64 => "spirv64", .kalimba => "kalimba", @@ -646,9 +654,10 @@ pub fn toLLVMTriple(target: std.Target, buf: []u8) []const u8 { .ios => "ios", .tvos => "tvos", .watchos => "watchos", - .visionos => "xros", .driverkit => "driverkit", .shadermodel => "shadermodel", + .visionos => "xros", + .serenity => "serenity", .opencl, .opengl, .vulkan, @@ -707,6 +716,7 @@ pub fn toLLVMTriple(target: std.Target, buf: []u8) []const u8 { .callable => "callable", .mesh => "mesh", .amplification => "amplification", + .ohos => "openhos", }; writer.writeAll(llvm_abi) catch unreachable; return stream.getWritten(); diff --git a/lib/compiler/aro/aro/text_literal.zig b/lib/compiler/aro/aro/text_literal.zig index 1c5d59298234..d9f6b2a88bc8 100644 --- a/lib/compiler/aro/aro/text_literal.zig +++ b/lib/compiler/aro/aro/text_literal.zig @@ -71,7 +71,7 @@ pub const Kind = enum { pub fn maxCodepoint(kind: Kind, comp: *const Compilation) u21 { return @intCast(switch (kind) { .char => std.math.maxInt(u7), - .wide => @min(0x10FFFF, comp.types.wchar.maxInt(comp)), + .wide => @min(0x10FFFF, comp.wcharMax()), .utf_8 => std.math.maxInt(u7), .utf_16 => std.math.maxInt(u16), .utf_32 => 0x10FFFF, @@ -83,7 +83,7 @@ pub const Kind = enum { pub fn maxInt(kind: Kind, comp: *const Compilation) u32 { return @intCast(switch (kind) { .char, .utf_8 => std.math.maxInt(u8), - .wide => comp.types.wchar.maxInt(comp), + .wide => comp.wcharMax(), .utf_16 => std.math.maxInt(u16), .utf_32 => std.math.maxInt(u32), .unterminated => unreachable, diff --git a/lib/compiler/aro/aro/toolchains/Linux.zig b/lib/compiler/aro/aro/toolchains/Linux.zig index 36ab916b10fb..a7d8c71bef89 100644 --- a/lib/compiler/aro/aro/toolchains/Linux.zig +++ b/lib/compiler/aro/aro/toolchains/Linux.zig @@ -423,7 +423,7 @@ test Linux { defer arena_instance.deinit(); const arena = arena_instance.allocator(); - var comp = Compilation.init(std.testing.allocator); + var comp = Compilation.init(std.testing.allocator, std.fs.cwd()); defer comp.deinit(); comp.environment = .{ .path = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", diff --git a/lib/compiler/aro/backend/Interner.zig b/lib/compiler/aro/backend/Interner.zig index 45b6e51fd950..631ec8ee16be 100644 --- a/lib/compiler/aro/backend/Interner.zig +++ b/lib/compiler/aro/backend/Interner.zig @@ -34,6 +34,7 @@ const KeyAdapter = struct { pub const Key = union(enum) { int_ty: u16, float_ty: u16, + complex_ty: u16, ptr_ty, noreturn_ty, void_ty, @@ -62,6 +63,7 @@ pub const Key = union(enum) { } }, float: Float, + complex: Complex, bytes: []const u8, pub const Float = union(enum) { @@ -71,6 +73,13 @@ pub const Key = union(enum) { f80: f80, f128: f128, }; + pub const Complex = union(enum) { + cf16: [2]f16, + cf32: [2]f32, + cf64: [2]f64, + cf80: [2]f80, + cf128: [2]f128, + }; pub fn hash(key: Key) u32 { var hasher = Hash.init(0); @@ -89,6 +98,12 @@ pub const Key = union(enum) { @as(std.meta.Int(.unsigned, @bitSizeOf(@TypeOf(data))), @bitCast(data)), ), }, + .complex => |repr| switch (repr) { + inline else => |data| std.hash.autoHash( + &hasher, + @as(std.meta.Int(.unsigned, @bitSizeOf(@TypeOf(data))), @bitCast(data)), + ), + }, .int => |repr| { var space: Tag.Int.BigIntSpace = undefined; const big = repr.toBigInt(&space); @@ -154,6 +169,14 @@ pub const Key = union(enum) { 128 => return .f128, else => unreachable, }, + .complex_ty => |bits| switch (bits) { + 16 => return .cf16, + 32 => return .cf32, + 64 => return .cf64, + 80 => return .cf80, + 128 => return .cf128, + else => unreachable, + }, .ptr_ty => return .ptr, .func_ty => return .func, .noreturn_ty => return .noreturn, @@ -199,6 +222,11 @@ pub const Ref = enum(u32) { zero = max - 16, one = max - 17, null = max - 18, + cf16 = max - 19, + cf32 = max - 20, + cf64 = max - 21, + cf80 = max - 22, + cf128 = max - 23, _, }; @@ -224,6 +252,11 @@ pub const OptRef = enum(u32) { zero = max - 16, one = max - 17, null = max - 18, + cf16 = max - 19, + cf32 = max - 20, + cf64 = max - 21, + cf80 = max - 22, + cf128 = max - 23, _, }; @@ -232,6 +265,8 @@ pub const Tag = enum(u8) { int_ty, /// `data` is `u16` float_ty, + /// `data` is `u16` + complex_ty, /// `data` is index to `Array` array_ty, /// `data` is index to `Vector` @@ -254,6 +289,16 @@ pub const Tag = enum(u8) { f80, /// `data` is `F128` f128, + /// `data` is `CF16` + cf16, + /// `data` is `CF32` + cf32, + /// `data` is `CF64` + cf64, + /// `data` is `CF80` + cf80, + /// `data` is `CF128` + cf128, /// `data` is `Bytes` bytes, /// `data` is `Record` @@ -354,6 +399,134 @@ pub const Tag = enum(u8) { } }; + pub const CF16 = struct { + piece0: u32, + + pub fn get(self: CF16) [2]f16 { + const real: f16 = @bitCast(@as(u16, @truncate(self.piece0 >> 16))); + const imag: f16 = @bitCast(@as(u16, @truncate(self.piece0))); + return .{ + real, + imag, + }; + } + + fn pack(val: [2]f16) CF16 { + const real: u16 = @bitCast(val[0]); + const imag: u16 = @bitCast(val[1]); + return .{ + .piece0 = (@as(u32, real) << 16) | @as(u32, imag), + }; + } + }; + + pub const CF32 = struct { + piece0: u32, + piece1: u32, + + pub fn get(self: CF32) [2]f32 { + return .{ + @bitCast(self.piece0), + @bitCast(self.piece1), + }; + } + + fn pack(val: [2]f32) CF32 { + return .{ + .piece0 = @bitCast(val[0]), + .piece1 = @bitCast(val[1]), + }; + } + }; + + pub const CF64 = struct { + piece0: u32, + piece1: u32, + piece2: u32, + piece3: u32, + + pub fn get(self: CF64) [2]f64 { + return .{ + (F64{ .piece0 = self.piece0, .piece1 = self.piece1 }).get(), + (F64{ .piece0 = self.piece2, .piece1 = self.piece3 }).get(), + }; + } + + fn pack(val: [2]f64) CF64 { + const real = F64.pack(val[0]); + const imag = F64.pack(val[1]); + return .{ + .piece0 = real.piece0, + .piece1 = real.piece1, + .piece2 = imag.piece0, + .piece3 = imag.piece1, + }; + } + }; + + /// TODO pack into 5 pieces + pub const CF80 = struct { + piece0: u32, + piece1: u32, + piece2: u32, // u16 part, top bits + piece3: u32, + piece4: u32, + piece5: u32, // u16 part, top bits + + pub fn get(self: CF80) [2]f80 { + return .{ + (F80{ .piece0 = self.piece0, .piece1 = self.piece1, .piece2 = self.piece2 }).get(), + (F80{ .piece0 = self.piece3, .piece1 = self.piece4, .piece2 = self.piece5 }).get(), + }; + } + + fn pack(val: [2]f80) CF80 { + const real = F80.pack(val[0]); + const imag = F80.pack(val[1]); + return .{ + .piece0 = real.piece0, + .piece1 = real.piece1, + .piece2 = real.piece2, + .piece3 = imag.piece0, + .piece4 = imag.piece1, + .piece5 = imag.piece2, + }; + } + }; + + pub const CF128 = struct { + piece0: u32, + piece1: u32, + piece2: u32, + piece3: u32, + piece4: u32, + piece5: u32, + piece6: u32, + piece7: u32, + + pub fn get(self: CF128) [2]f128 { + return .{ + (F128{ .piece0 = self.piece0, .piece1 = self.piece1, .piece2 = self.piece2, .piece3 = self.piece3 }).get(), + (F128{ .piece0 = self.piece4, .piece1 = self.piece5, .piece2 = self.piece6, .piece3 = self.piece7 }).get(), + }; + } + + fn pack(val: [2]f128) CF128 { + const real = F128.pack(val[0]); + const imag = F128.pack(val[1]); + return .{ + .piece0 = real.piece0, + .piece1 = real.piece1, + .piece2 = real.piece2, + .piece3 = real.piece3, + .piece4 = imag.piece0, + .piece5 = imag.piece1, + .piece6 = imag.piece2, + .piece7 = imag.piece3, + }; + } + }; + pub const Bytes = struct { strings_index: u32, len: u32, @@ -407,6 +580,12 @@ pub fn put(i: *Interner, gpa: Allocator, key: Key) !Ref { .data = bits, }); }, + .complex_ty => |bits| { + i.items.appendAssumeCapacity(.{ + .tag = .complex_ty, + .data = bits, + }); + }, .array_ty => |info| { const split_len = PackedU64.init(info.len); i.items.appendAssumeCapacity(.{ @@ -493,6 +672,28 @@ pub fn put(i: *Interner, gpa: Allocator, key: Key) !Ref { .data = try i.addExtra(gpa, Tag.F128.pack(data)), }), }, + .complex => |repr| switch (repr) { + .cf16 => |data| i.items.appendAssumeCapacity(.{ + .tag = .cf16, + .data = try i.addExtra(gpa, Tag.CF16.pack(data)), + }), + .cf32 => |data| i.items.appendAssumeCapacity(.{ + .tag = .cf32, + .data = try i.addExtra(gpa, Tag.CF32.pack(data)), + }), + .cf64 => |data| i.items.appendAssumeCapacity(.{ + .tag = .cf64, + .data = try i.addExtra(gpa, Tag.CF64.pack(data)), + }), + .cf80 => |data| i.items.appendAssumeCapacity(.{ + .tag = .cf80, + .data = try i.addExtra(gpa, Tag.CF80.pack(data)), + }), + .cf128 => |data| i.items.appendAssumeCapacity(.{ + .tag = .cf128, + .data = try i.addExtra(gpa, Tag.CF128.pack(data)), + }), + }, .bytes => |bytes| { const strings_index: u32 = @intCast(i.strings.items.len); try i.strings.appendSlice(gpa, bytes); @@ -564,6 +765,10 @@ pub fn get(i: *const Interner, ref: Ref) Key { .zero => return .{ .int = .{ .u64 = 0 } }, .one => return .{ .int = .{ .u64 = 1 } }, .null => return .null, + .cf16 => return .{ .complex_ty = 16 }, + .cf32 => return .{ .complex_ty = 32 }, + .cf64 => return .{ .complex_ty = 64 }, + .cf80 => return .{ .complex_ty = 80 }, else => {}, } @@ -572,6 +777,7 @@ pub fn get(i: *const Interner, ref: Ref) Key { return switch (item.tag) { .int_ty => .{ .int_ty = @intCast(data) }, .float_ty => .{ .float_ty = @intCast(data) }, + .complex_ty => .{ .complex_ty = @intCast(data) }, .array_ty => { const array_ty = i.extraData(Tag.Array, data); return .{ .array_ty = .{ @@ -612,6 +818,26 @@ pub fn get(i: *const Interner, ref: Ref) Key { const float = i.extraData(Tag.F128, data); return .{ .float = .{ .f128 = float.get() } }; }, + .cf16 => { + const components = i.extraData(Tag.CF16, data); + return .{ .complex = .{ .cf16 = components.get() } }; + }, + .cf32 => { + const components = i.extraData(Tag.CF32, data); + return .{ .complex = .{ .cf32 = components.get() } }; + }, + .cf64 => { + const components = i.extraData(Tag.CF64, data); + return .{ .complex = .{ .cf64 = components.get() } }; + }, + .cf80 => { + const components = i.extraData(Tag.CF80, data); + return .{ .complex = .{ .cf80 = components.get() } }; + }, + .cf128 => { + const components = i.extraData(Tag.CF128, data); + return .{ .complex = .{ .cf128 = components.get() } }; + }, .bytes => { const bytes = i.extraData(Tag.Bytes, data); return .{ .bytes = i.strings.items[bytes.strings_index..][0..bytes.len] }; diff --git a/lib/compiler/aro/backend/Ir.zig b/lib/compiler/aro/backend/Ir.zig index 15c153e8f178..e694a23c9ae1 100644 --- a/lib/compiler/aro/backend/Ir.zig +++ b/lib/compiler/aro/backend/Ir.zig @@ -37,6 +37,7 @@ pub const Builder = struct { for (b.decls.values()) |*decl| { decl.deinit(b.gpa); } + b.decls.deinit(b.gpa); b.arena.deinit(); b.instructions.deinit(b.gpa); b.body.deinit(b.gpa); diff --git a/lib/compiler/aro/backend/Object.zig b/lib/compiler/aro/backend/Object.zig index b42ad4bdcbb7..98355e88b6a0 100644 --- a/lib/compiler/aro/backend/Object.zig +++ b/lib/compiler/aro/backend/Object.zig @@ -16,7 +16,7 @@ pub fn create(gpa: Allocator, target: std.Target) !*Object { pub fn deinit(obj: *Object) void { switch (obj.format) { - .elf => @as(*Elf, @fieldParentPtr("obj", obj)).deinit(), + .elf => @as(*Elf, @alignCast(@fieldParentPtr("obj", obj))).deinit(), else => unreachable, } } @@ -32,7 +32,7 @@ pub const Section = union(enum) { pub fn getSection(obj: *Object, section: Section) !*std.ArrayList(u8) { switch (obj.format) { - .elf => return @as(*Elf, @fieldParentPtr("obj", obj)).getSection(section), + .elf => return @as(*Elf, @alignCast(@fieldParentPtr("obj", obj))).getSection(section), else => unreachable, } } @@ -53,21 +53,21 @@ pub fn declareSymbol( size: u64, ) ![]const u8 { switch (obj.format) { - .elf => return @as(*Elf, @fieldParentPtr("obj", obj)).declareSymbol(section, name, linkage, @"type", offset, size), + .elf => return @as(*Elf, @alignCast(@fieldParentPtr("obj", obj))).declareSymbol(section, name, linkage, @"type", offset, size), else => unreachable, } } pub fn addRelocation(obj: *Object, name: []const u8, section: Section, address: u64, addend: i64) !void { switch (obj.format) { - .elf => return @as(*Elf, @fieldParentPtr("obj", obj)).addRelocation(name, section, address, addend), + .elf => return @as(*Elf, @alignCast(@fieldParentPtr("obj", obj))).addRelocation(name, section, address, addend), else => unreachable, } } pub fn finish(obj: *Object, file: std.fs.File) !void { switch (obj.format) { - .elf => return @as(*Elf, @fieldParentPtr("obj", obj)).finish(file), + .elf => return @as(*Elf, @alignCast(@fieldParentPtr("obj", obj))).finish(file), else => unreachable, } } diff --git a/lib/compiler/aro_translate_c.zig b/lib/compiler/aro_translate_c.zig index 692786c2bead..4255989416df 100644 --- a/lib/compiler/aro_translate_c.zig +++ b/lib/compiler/aro_translate_c.zig @@ -731,7 +731,6 @@ fn transType(c: *Context, scope: *Scope, raw_ty: Type, qual_handling: Type.QualH .float => return ZigTag.type.create(c.arena, "f32"), .double => return ZigTag.type.create(c.arena, "f64"), .long_double => return ZigTag.type.create(c.arena, "c_longdouble"), - .float80 => return ZigTag.type.create(c.arena, "f80"), .float128 => return ZigTag.type.create(c.arena, "f128"), .@"enum" => { const enum_decl = ty.data.@"enum"; @@ -1799,7 +1798,7 @@ pub fn main() !void { const args = try std.process.argsAlloc(arena); - var aro_comp = aro.Compilation.init(gpa); + var aro_comp = aro.Compilation.init(gpa, std.fs.cwd()); defer aro_comp.deinit(); var tree = translate(gpa, &aro_comp, args) catch |err| switch (err) { diff --git a/lib/compiler/resinator/main.zig b/lib/compiler/resinator/main.zig index c09801096cc2..4159ad03e30f 100644 --- a/lib/compiler/resinator/main.zig +++ b/lib/compiler/resinator/main.zig @@ -126,7 +126,7 @@ pub fn main() !void { defer aro_arena_state.deinit(); const aro_arena = aro_arena_state.allocator(); - var comp = aro.Compilation.init(aro_arena); + var comp = aro.Compilation.init(aro_arena, std.fs.cwd()); defer comp.deinit(); var argv = std.ArrayList([]const u8).init(comp.gpa); diff --git a/lib/compiler/resinator/preprocess.zig b/lib/compiler/resinator/preprocess.zig index 3d6912a7c095..c5b4b1edea54 100644 --- a/lib/compiler/resinator/preprocess.zig +++ b/lib/compiler/resinator/preprocess.zig @@ -59,7 +59,7 @@ pub fn preprocess( if (hasAnyErrors(comp)) return error.PreprocessError; - try pp.prettyPrintTokens(writer); + try pp.prettyPrintTokens(writer, .result_only); if (maybe_dependencies_list) |dependencies_list| { for (comp.sources.values()) |comp_source| { diff --git a/src/mingw.zig b/src/mingw.zig index 114c16876552..ab5f4f26dbb3 100644 --- a/src/mingw.zig +++ b/src/mingw.zig @@ -230,7 +230,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void { }; const aro = @import("aro"); - var aro_comp = aro.Compilation.init(comp.gpa); + var aro_comp = aro.Compilation.init(comp.gpa, std.fs.cwd()); defer aro_comp.deinit(); const include_dir = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libc", "mingw", "def-include" }); @@ -268,7 +268,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void { // new scope to ensure definition file is written before passing the path to WriteImportLibrary const def_final_file = try o_dir.createFile(final_def_basename, .{ .truncate = true }); defer def_final_file.close(); - try pp.prettyPrintTokens(def_final_file.writer()); + try pp.prettyPrintTokens(def_final_file.writer(), .result_only); } const lib_final_path = try comp.global_cache_directory.join(comp.gpa, &[_][]const u8{ diff --git a/src/translate_c.zig b/src/translate_c.zig index ef81a5dfdacf..9e974fc237c6 100644 --- a/src/translate_c.zig +++ b/src/translate_c.zig @@ -5258,7 +5258,7 @@ fn getMacroText(unit: *const clang.ASTUnit, c: *const Context, macro: *const cla const end_c = c.source_manager.getCharacterData(end_loc); const slice_len = @intFromPtr(end_c) - @intFromPtr(begin_c); - var comp = aro.Compilation.init(c.gpa); + var comp = aro.Compilation.init(c.gpa, std.fs.cwd()); defer comp.deinit(); const result = comp.addSourceFromBuffer("", begin_c[0..slice_len]) catch return error.OutOfMemory; From 95bb53653d11e62e3bfb9de6b3668ac0c1c466f5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Sun, 8 Sep 2024 01:38:44 +0200 Subject: [PATCH 096/202] zig cc: Support `-rtlib=none` for disabling compiler-rt. --- src/clang_options_data.zig | 4 ++-- src/main.zig | 14 ++++++++++++++ tools/update_clang_options.zig | 8 ++++++++ 3 files changed, 24 insertions(+), 2 deletions(-) diff --git a/src/clang_options_data.zig b/src/clang_options_data.zig index f2f3708d4dee..ad07b45500e8 100644 --- a/src/clang_options_data.zig +++ b/src/clang_options_data.zig @@ -1984,7 +1984,7 @@ flagpsl("MT"), .{ .name = "rtlib", .syntax = .separate, - .zig_equivalent = .other, + .zig_equivalent = .rtlib, .pd1 = false, .pd2 = true, .psl = false, @@ -7331,7 +7331,7 @@ jspd1("iquote"), .{ .name = "rtlib=", .syntax = .joined, - .zig_equivalent = .other, + .zig_equivalent = .rtlib, .pd1 = true, .pd2 = true, .psl = false, diff --git a/src/main.zig b/src/main.zig index 1db2be6f2548..0e7a801b4617 100644 --- a/src/main.zig +++ b/src/main.zig @@ -2180,6 +2180,19 @@ fn buildOutputType( fatal("unsupported -undefined option '{s}'", .{it.only_arg}); } }, + .rtlib => { + // Unlike Clang, we support `none` for explicitly omitting compiler-rt. + if (mem.eql(u8, "none", it.only_arg)) { + want_compiler_rt = false; + } else if (mem.eql(u8, "compiler-rt", it.only_arg) or + mem.eql(u8, "libgcc", it.only_arg)) + { + want_compiler_rt = true; + } else { + // Note that we don't support `platform`. + fatal("unsupported -rtlib option '{s}'", .{it.only_arg}); + } + }, } } // Parse linker args. @@ -5810,6 +5823,7 @@ pub const ClangArgIterator = struct { san_cov_trace_pc_guard, san_cov, no_san_cov, + rtlib, }; const Args = struct { diff --git a/tools/update_clang_options.zig b/tools/update_clang_options.zig index 69f4d589746d..f21993fbd6fc 100644 --- a/tools/update_clang_options.zig +++ b/tools/update_clang_options.zig @@ -548,6 +548,14 @@ const known_options = [_]KnownOpt{ .name = "fno-sanitize-coverage", .ident = "no_san_cov", }, + .{ + .name = "rtlib", + .ident = "rtlib", + }, + .{ + .name = "rtlib=", + .ident = "rtlib", + }, }; const blacklisted_options = [_][]const u8{}; From 6b1f50942212def19b094016a00dbba5387c2d58 Mon Sep 17 00:00:00 2001 From: Des-Nerger Date: Tue, 10 Sep 2024 03:45:18 +1000 Subject: [PATCH 097/202] std.zig.render: replace magic number `4` with `indent_delta`. --- lib/std/zig/render.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/std/zig/render.zig b/lib/std/zig/render.zig index e5950429ee63..cd17b6963956 100644 --- a/lib/std/zig/render.zig +++ b/lib/std/zig/render.zig @@ -3207,7 +3207,7 @@ fn anythingBetween(tree: Ast, start_token: Ast.TokenIndex, end_token: Ast.TokenI fn writeFixingWhitespace(writer: std.ArrayList(u8).Writer, slice: []const u8) Error!void { for (slice) |byte| switch (byte) { - '\t' => try writer.writeAll(" " ** 4), + '\t' => try writer.writeAll(" " ** indent_delta), '\r' => {}, else => try writer.writeByte(byte), }; From d6d09f4ea7cc590035bf8af6617f93767423c691 Mon Sep 17 00:00:00 2001 From: xdBronch <51252236+xdBronch@users.noreply.github.com> Date: Mon, 9 Sep 2024 00:49:03 -0400 Subject: [PATCH 098/202] add error for discarding if/while pointer capture --- lib/std/zig/AstGen.zig | 16 +++++++++--- .../compile_errors/capture_by_ref_discard.zig | 26 +++++++++++++++++++ 2 files changed, 38 insertions(+), 4 deletions(-) create mode 100644 test/cases/compile_errors/capture_by_ref_discard.zig diff --git a/lib/std/zig/AstGen.zig b/lib/std/zig/AstGen.zig index 44af61048dc2..c15a995e6010 100644 --- a/lib/std/zig/AstGen.zig +++ b/lib/std/zig/AstGen.zig @@ -6333,8 +6333,10 @@ fn ifExpr( const token_name_index = payload_token + @intFromBool(payload_is_ref); const ident_name = try astgen.identAsString(token_name_index); const token_name_str = tree.tokenSlice(token_name_index); - if (mem.eql(u8, "_", token_name_str)) + if (mem.eql(u8, "_", token_name_str)) { + if (payload_is_ref) return astgen.failTok(payload_token, "pointer modifier invalid on discard", .{}); break :s &then_scope.base; + } try astgen.detectLocalShadowing(&then_scope.base, ident_name, token_name_index, token_name_str, .capture); payload_val_scope = .{ .parent = &then_scope.base, @@ -6357,8 +6359,10 @@ fn ifExpr( else .optional_payload_unsafe; const ident_bytes = tree.tokenSlice(ident_token); - if (mem.eql(u8, "_", ident_bytes)) + if (mem.eql(u8, "_", ident_bytes)) { + if (payload_is_ref) return astgen.failTok(payload_token, "pointer modifier invalid on discard", .{}); break :s &then_scope.base; + } const payload_inst = try then_scope.addUnNode(tag, cond.inst, then_node); const ident_name = try astgen.identAsString(ident_token); try astgen.detectLocalShadowing(&then_scope.base, ident_name, ident_token, ident_bytes, .capture); @@ -6581,8 +6585,10 @@ fn whileExpr( opt_payload_inst = payload_inst.toOptional(); const ident_token = payload_token + @intFromBool(payload_is_ref); const ident_bytes = tree.tokenSlice(ident_token); - if (mem.eql(u8, "_", ident_bytes)) + if (mem.eql(u8, "_", ident_bytes)) { + if (payload_is_ref) return astgen.failTok(payload_token, "pointer modifier invalid on discard", .{}); break :s &then_scope.base; + } const ident_name = try astgen.identAsString(ident_token); try astgen.detectLocalShadowing(&then_scope.base, ident_name, ident_token, ident_bytes, .capture); payload_val_scope = .{ @@ -6611,8 +6617,10 @@ fn whileExpr( opt_payload_inst = payload_inst.toOptional(); const ident_name = try astgen.identAsString(ident_token); const ident_bytes = tree.tokenSlice(ident_token); - if (mem.eql(u8, "_", ident_bytes)) + if (mem.eql(u8, "_", ident_bytes)) { + if (payload_is_ref) return astgen.failTok(payload_token, "pointer modifier invalid on discard", .{}); break :s &then_scope.base; + } try astgen.detectLocalShadowing(&then_scope.base, ident_name, ident_token, ident_bytes, .capture); payload_val_scope = .{ .parent = &then_scope.base, diff --git a/test/cases/compile_errors/capture_by_ref_discard.zig b/test/cases/compile_errors/capture_by_ref_discard.zig new file mode 100644 index 000000000000..a80415119c0d --- /dev/null +++ b/test/cases/compile_errors/capture_by_ref_discard.zig @@ -0,0 +1,26 @@ +export fn a() void { + for (.{}) |*_| {} +} + +export fn b() void { + switch (0) { + else => |*_| {}, + } +} + +export fn c() void { + if (null) |*_| {} +} + +export fn d() void { + while (null) |*_| {} +} + +// error +// backend=stage2 +// target=native +// +// :2:16: error: pointer modifier invalid on discard +// :7:18: error: pointer modifier invalid on discard +// :12:16: error: pointer modifier invalid on discard +// :16:19: error: pointer modifier invalid on discard From 68367999351663421939d17c87af8269c824c1b0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Sun, 25 Aug 2024 06:56:41 +0200 Subject: [PATCH 099/202] llvm: Set use-soft-float and noimplicitfloat on functions for soft float. Closes #10961. --- src/codegen/llvm.zig | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 59df12df610d..d7d1be6a5074 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -3108,6 +3108,30 @@ pub const Object = struct { .value = .empty, } }, &o.builder); } + if (target.floatAbi() == .soft) { + // `use-soft-float` means "use software routines for floating point computations". In + // other words, it configures how LLVM lowers basic float instructions like `fcmp`, + // `fadd`, etc. The float calling convention is configured on `TargetMachine` and is + // mostly an orthogonal concept, although obviously we do need hardware float operations + // to actually be able to pass float values in float registers. + // + // Ideally, we would support something akin to the `-mfloat-abi=softfp` option that GCC + // and Clang support for Arm32 and CSKY. We don't currently expose such an option in + // Zig, and using CPU features as the source of truth for this makes for a miserable + // user experience since people expect e.g. `arm-linux-gnueabi` to mean full soft float + // unless the compiler has explicitly been told otherwise. (And note that our baseline + // CPU models almost all include FPU features!) + // + // Revisit this at some point. + try attributes.addFnAttr(.{ .string = .{ + .kind = try o.builder.string("use-soft-float"), + .value = try o.builder.string("true"), + } }, &o.builder); + + // This prevents LLVM from using FPU/SIMD code for things like `memcpy`. As for the + // above, this should be revisited if `softfp` support is added. + try attributes.addFnAttr(.noimplicitfloat, &o.builder); + } } fn resolveGlobalUav( From 70c92331c79018f8c5cc845f1015af7fa941f3a4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Wed, 28 Aug 2024 21:32:06 +0200 Subject: [PATCH 100/202] llvm: Limit f16/f128 lowering on arm to fp_armv8 and soft float. --- src/codegen/llvm.zig | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index d7d1be6a5074..5ba80e3c4e34 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -12423,6 +12423,11 @@ fn backendSupportsF16(target: std.Target) bool { .mips64el, .s390x, => false, + .arm, + .armeb, + .thumb, + .thumbeb, + => target.floatAbi() == .soft or std.Target.arm.featureSetHas(target.cpu.features, .fp_armv8), .aarch64, .aarch64_be, => std.Target.aarch64.featureSetHas(target.cpu.features, .fp_armv8), @@ -12445,6 +12450,11 @@ fn backendSupportsF128(target: std.Target) bool { .powerpc64, .powerpc64le, => target.os.tag != .aix, + .arm, + .armeb, + .thumb, + .thumbeb, + => target.floatAbi() == .soft or std.Target.arm.featureSetHas(target.cpu.features, .fp_armv8), .aarch64, .aarch64_be, => std.Target.aarch64.featureSetHas(target.cpu.features, .fp_armv8), From 77c8f4b6713c5209f0cde62001b5995641f8bf60 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Sun, 25 Aug 2024 07:18:29 +0200 Subject: [PATCH 101/202] Compilation: Pass hard/soft float flags to Clang as appropriate. --- src/Compilation.zig | 22 ++++++++++++++++++---- src/target.zig | 42 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+), 4 deletions(-) diff --git a/src/Compilation.zig b/src/Compilation.zig index 54fa1208569f..ffd004b93e2a 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -5484,6 +5484,9 @@ pub fn addCCArgs( const is_enabled = target.cpu.features.isEnabled(index); if (feature.llvm_name) |llvm_name| { + // We communicate float ABI to Clang through the dedicated options further down. + if (std.mem.eql(u8, llvm_name, "soft-float")) continue; + argv.appendSliceAssumeCapacity(&[_][]const u8{ "-Xclang", "-target-feature", "-Xclang" }); const plus_or_minus = "-+"[@intFromBool(is_enabled)]; const arg = try std.fmt.allocPrint(arena, "{c}{s}", .{ plus_or_minus, llvm_name }); @@ -5705,10 +5708,6 @@ pub fn addCCArgs( if (target.cpu.model.llvm_name) |llvm_name| { try argv.append(try std.fmt.allocPrint(arena, "-march={s}", .{llvm_name})); } - - if (std.Target.mips.featureSetHas(target.cpu.features, .soft_float)) { - try argv.append("-msoft-float"); - } }, else => { // TODO @@ -5751,6 +5750,21 @@ pub fn addCCArgs( try argv.append(try std.fmt.allocPrint(arena, "-mabi={s}", .{mabi})); } + // We might want to support -mfloat-abi=softfp for Arm and CSKY here in the future. + if (target_util.clangSupportsFloatAbiArg(target)) { + const fabi = @tagName(target.floatAbi()); + + try argv.append(switch (target.cpu.arch) { + // For whatever reason, Clang doesn't support `-mfloat-abi` for s390x. + .s390x => try std.fmt.allocPrint(arena, "-m{s}-float", .{fabi}), + else => try std.fmt.allocPrint(arena, "-mfloat-abi={s}", .{fabi}), + }); + } + + if (target_util.clangSupportsNoImplicitFloatArg(target) and target.floatAbi() == .soft) { + try argv.append("-mno-implicit-float"); + } + if (out_dep_path) |p| { try argv.appendSlice(&[_][]const u8{ "-MD", "-MV", "-MF", p }); } diff --git a/src/target.zig b/src/target.zig index 221c2029baf9..59c151bd6e64 100644 --- a/src/target.zig +++ b/src/target.zig @@ -339,6 +339,48 @@ pub fn clangAssemblerSupportsMcpuArg(target: std.Target) bool { }; } +pub fn clangSupportsFloatAbiArg(target: std.Target) bool { + return switch (target.cpu.arch) { + .arm, + .armeb, + .thumb, + .thumbeb, + .csky, + .mips, + .mipsel, + .mips64, + .mips64el, + .powerpc, + .powerpcle, + .powerpc64, + .powerpc64le, + .s390x, + .sparc, + .sparc64, + => true, + // We use the target triple for LoongArch. + .loongarch32, .loongarch64 => false, + else => false, + }; +} + +pub fn clangSupportsNoImplicitFloatArg(target: std.Target) bool { + return switch (target.cpu.arch) { + .aarch64, + .aarch64_be, + .arm, + .armeb, + .thumb, + .thumbeb, + .riscv32, + .riscv64, + .x86, + .x86_64, + => true, + else => false, + }; +} + pub fn needUnwindTables(target: std.Target) bool { return target.os.tag == .windows or target.isDarwin() or std.debug.Dwarf.abi.supportsUnwinding(target); } From f9455511faba40ad719fa0e6897ea09ac5fb9d23 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Sun, 25 Aug 2024 06:57:18 +0200 Subject: [PATCH 102/202] Compilation: Work around llvm/llvm-project#105972 by defining the macros. https://github.com/llvm/llvm-project/issues/105972 --- src/Compilation.zig | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/Compilation.zig b/src/Compilation.zig index ffd004b93e2a..bd93b2061b48 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -5765,6 +5765,13 @@ pub fn addCCArgs( try argv.append("-mno-implicit-float"); } + // https://github.com/llvm/llvm-project/issues/105972 + if (target.cpu.arch.isPowerPC() and target.floatAbi() == .soft) { + try argv.append("-D__NO_FPRS__"); + try argv.append("-D_SOFT_FLOAT"); + try argv.append("-D_SOFT_DOUBLE"); + } + if (out_dep_path) |p| { try argv.appendSlice(&[_][]const u8{ "-MD", "-MV", "-MF", p }); } From 65d36be4a80c45897b344e10cc7e63daafcab9fb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Tue, 27 Aug 2024 02:46:54 +0200 Subject: [PATCH 103/202] std.zig.system: Work around llvm/llvm-project#105978 by disabling vfp2. https://github.com/llvm/llvm-project/issues/105978 --- lib/std/zig/system.zig | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/std/zig/system.zig b/lib/std/zig/system.zig index 4de1e2065671..046bd3854e45 100644 --- a/lib/std/zig/system.zig +++ b/lib/std/zig/system.zig @@ -384,6 +384,12 @@ pub fn resolveTargetQuery(query: Target.Query) DetectError!Target { query.cpu_features_add, query.cpu_features_sub, ); + + // https://github.com/llvm/llvm-project/issues/105978 + if (result.cpu.arch.isArmOrThumb() and result.floatAbi() == .soft) { + result.cpu.features.removeFeature(@intFromEnum(Target.arm.Feature.vfp2)); + } + return result; } From 4fcd3e00fad48b15c19f6ed4771fc73adae30f89 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Wed, 28 Aug 2024 17:37:23 +0200 Subject: [PATCH 104/202] musl: Build with -ffp-contract=off. https://www.openwall.com/lists/musl/2024/08/28/1 --- src/musl.zig | 1 + 1 file changed, 1 insertion(+) diff --git a/src/musl.zig b/src/musl.zig index acf590203349..5ddbcb66522c 100644 --- a/src/musl.zig +++ b/src/musl.zig @@ -387,6 +387,7 @@ fn addCcArgs( "-fno-builtin", "-fexcess-precision=standard", "-frounding-math", + "-ffp-contract=off", "-fno-strict-aliasing", "-Wa,--noexecstack", "-D_XOPEN_SOURCE=700", From 5285f41267f20150d58a40a96e4380a3b611955d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Wed, 28 Aug 2024 10:00:04 +0200 Subject: [PATCH 105/202] test: Disable `store vector with memset` on soft float arm. https://github.com/ziglang/zig/issues/16177 --- test/behavior/vector.zig | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig index cee9f168f193..9baba8485085 100644 --- a/test/behavior/vector.zig +++ b/test/behavior/vector.zig @@ -1442,7 +1442,14 @@ test "store vector with memset" { if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm) { + // LLVM 16 ERROR: "Converting bits to bytes lost precision" + // https://github.com/ziglang/zig/issues/16177 switch (builtin.target.cpu.arch) { + .arm, + .armeb, + .thumb, + .thumbeb, + => if (builtin.target.floatAbi() == .soft) return error.SkipZigTest, .wasm32, .mips, .mipsel, @@ -1451,11 +1458,7 @@ test "store vector with memset" { .riscv64, .powerpc, .powerpc64, - => { - // LLVM 16 ERROR: "Converting bits to bytes lost precision" - // https://github.com/ziglang/zig/issues/16177 - return error.SkipZigTest; - }, + => return error.SkipZigTest, else => {}, } } From 26119bd98d2f867eb06eeea9431883931d806dd5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Wed, 28 Aug 2024 21:04:45 +0200 Subject: [PATCH 106/202] test: Skip some floating point tests that fail on `arm-linux-(gnu,musl)eabi`. https://github.com/ziglang/zig/issues/21234 --- lib/std/math/gamma.zig | 3 +++ test/behavior/floatop.zig | 2 ++ test/behavior/math.zig | 1 + 3 files changed, 6 insertions(+) diff --git a/lib/std/math/gamma.zig b/lib/std/math/gamma.zig index ce37db496215..5ac041e195df 100644 --- a/lib/std/math/gamma.zig +++ b/lib/std/math/gamma.zig @@ -3,6 +3,7 @@ // // https://git.musl-libc.org/cgit/musl/tree/src/math/tgamma.c +const builtin = @import("builtin"); const std = @import("../std.zig"); /// Returns the gamma function of x, @@ -262,6 +263,8 @@ test gamma { } test "gamma.special" { + if (builtin.cpu.arch.isArmOrThumb() and builtin.target.floatAbi() == .soft) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21234 + inline for (&.{ f32, f64 }) |T| { try expect(std.math.isNan(gamma(T, -std.math.nan(T)))); try expect(std.math.isNan(gamma(T, std.math.nan(T)))); diff --git a/test/behavior/floatop.zig b/test/behavior/floatop.zig index f9f2579b0aca..d799c9133119 100644 --- a/test/behavior/floatop.zig +++ b/test/behavior/floatop.zig @@ -126,6 +126,7 @@ test "cmp f16" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + if (builtin.cpu.arch.isArmOrThumb() and builtin.target.floatAbi() == .soft) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21234 try testCmp(f16); try comptime testCmp(f16); @@ -134,6 +135,7 @@ test "cmp f16" { test "cmp f32/f64" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.cpu.arch.isArmOrThumb() and builtin.target.floatAbi() == .soft) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21234 try testCmp(f32); try comptime testCmp(f32); diff --git a/test/behavior/math.zig b/test/behavior/math.zig index 7837ffb74b4b..c658ea3081df 100644 --- a/test/behavior/math.zig +++ b/test/behavior/math.zig @@ -1597,6 +1597,7 @@ test "NaN comparison" { if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + if (builtin.cpu.arch.isArmOrThumb() and builtin.target.floatAbi() == .soft) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21234 try testNanEqNan(f16); try testNanEqNan(f32); From a872b6102167f8588704877f8143d58f3386b78c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Wed, 28 Aug 2024 21:31:40 +0200 Subject: [PATCH 107/202] test: Add arm, mips, and powerpc soft float targets to module tests. Prefer `eabi` and `eabihf` over the ambiguous `none` when not using libc. --- test/tests.zig | 95 +++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 91 insertions(+), 4 deletions(-) diff --git a/test/tests.zig b/test/tests.zig index b87113c12dc2..cf5fd93df817 100644 --- a/test/tests.zig +++ b/test/tests.zig @@ -292,10 +292,23 @@ const test_targets = blk: { .{ .target = std.Target.Query.parse(.{ - .arch_os_abi = "arm-linux-none", + .arch_os_abi = "arm-linux-eabi", .cpu_features = "generic+v8a", }) catch unreachable, }, + .{ + .target = std.Target.Query.parse(.{ + .arch_os_abi = "arm-linux-eabihf", + .cpu_features = "generic+v8a", + }) catch unreachable, + }, + .{ + .target = std.Target.Query.parse(.{ + .arch_os_abi = "arm-linux-musleabi", + .cpu_features = "generic+v8a", + }) catch unreachable, + .link_libc = true, + }, .{ .target = std.Target.Query.parse(.{ .arch_os_abi = "arm-linux-musleabihf", @@ -303,6 +316,13 @@ const test_targets = blk: { }) catch unreachable, .link_libc = true, }, + .{ + .target = std.Target.Query.parse(.{ + .arch_os_abi = "arm-linux-gnueabi", + .cpu_features = "generic+v8a", + }) catch unreachable, + .link_libc = true, + }, .{ .target = std.Target.Query.parse(.{ .arch_os_abi = "arm-linux-gnueabihf", @@ -315,8 +335,25 @@ const test_targets = blk: { .target = .{ .cpu_arch = .mips, .os_tag = .linux, - .abi = .none, + .abi = .eabi, + }, + .slow_backend = true, + }, + .{ + .target = .{ + .cpu_arch = .mips, + .os_tag = .linux, + .abi = .eabihf, + }, + .slow_backend = true, + }, + .{ + .target = .{ + .cpu_arch = .mips, + .os_tag = .linux, + .abi = .musleabi, }, + .link_libc = true, .slow_backend = true, }, .{ @@ -328,6 +365,15 @@ const test_targets = blk: { .link_libc = true, .slow_backend = true, }, + .{ + .target = .{ + .cpu_arch = .mips, + .os_tag = .linux, + .abi = .gnueabi, + }, + .link_libc = true, + .slow_backend = true, + }, .{ .target = .{ .cpu_arch = .mips, @@ -342,8 +388,25 @@ const test_targets = blk: { .target = .{ .cpu_arch = .mipsel, .os_tag = .linux, - .abi = .none, + .abi = .eabi, + }, + .slow_backend = true, + }, + .{ + .target = .{ + .cpu_arch = .mipsel, + .os_tag = .linux, + .abi = .eabihf, + }, + .slow_backend = true, + }, + .{ + .target = .{ + .cpu_arch = .mipsel, + .os_tag = .linux, + .abi = .musleabi, }, + .link_libc = true, .slow_backend = true, }, .{ @@ -355,6 +418,15 @@ const test_targets = blk: { .link_libc = true, .slow_backend = true, }, + .{ + .target = .{ + .cpu_arch = .mipsel, + .os_tag = .linux, + .abi = .gnueabi, + }, + .link_libc = true, + .slow_backend = true, + }, .{ .target = .{ .cpu_arch = .mipsel, @@ -417,9 +489,24 @@ const test_targets = blk: { .target = .{ .cpu_arch = .powerpc, .os_tag = .linux, - .abi = .none, + .abi = .eabi, }, }, + .{ + .target = .{ + .cpu_arch = .powerpc, + .os_tag = .linux, + .abi = .eabihf, + }, + }, + .{ + .target = .{ + .cpu_arch = .powerpc, + .os_tag = .linux, + .abi = .musleabi, + }, + .link_libc = true, + }, .{ .target = .{ .cpu_arch = .powerpc, From 75983c692fad81120a7c641a54c2eb90bc467def Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Wed, 28 Aug 2024 21:42:34 +0200 Subject: [PATCH 108/202] test: Switch all `arm-linux-*` triples for module tests from v8a to v7a. Broadly speaking, versions 6, 7, and 8 are the ones that are in common use. Of these, v7 is what you'll typically see for 32-bit Arm today. So let's actually make sure that that's what we're testing. --- test/tests.zig | 54 ++++++++++++++++++++++++++++---------------------- 1 file changed, 30 insertions(+), 24 deletions(-) diff --git a/test/tests.zig b/test/tests.zig index cf5fd93df817..f07f67c2e1c6 100644 --- a/test/tests.zig +++ b/test/tests.zig @@ -291,43 +291,49 @@ const test_targets = blk: { }, .{ - .target = std.Target.Query.parse(.{ - .arch_os_abi = "arm-linux-eabi", - .cpu_features = "generic+v8a", - }) catch unreachable, + .target = .{ + .cpu_arch = .arm, + .os_tag = .linux, + .abi = .eabi, + }, }, .{ - .target = std.Target.Query.parse(.{ - .arch_os_abi = "arm-linux-eabihf", - .cpu_features = "generic+v8a", - }) catch unreachable, + .target = .{ + .cpu_arch = .arm, + .os_tag = .linux, + .abi = .eabihf, + }, }, .{ - .target = std.Target.Query.parse(.{ - .arch_os_abi = "arm-linux-musleabi", - .cpu_features = "generic+v8a", - }) catch unreachable, + .target = .{ + .cpu_arch = .arm, + .os_tag = .linux, + .abi = .musleabi, + }, .link_libc = true, }, .{ - .target = std.Target.Query.parse(.{ - .arch_os_abi = "arm-linux-musleabihf", - .cpu_features = "generic+v8a", - }) catch unreachable, + .target = .{ + .cpu_arch = .arm, + .os_tag = .linux, + .abi = .musleabihf, + }, .link_libc = true, }, .{ - .target = std.Target.Query.parse(.{ - .arch_os_abi = "arm-linux-gnueabi", - .cpu_features = "generic+v8a", - }) catch unreachable, + .target = .{ + .cpu_arch = .arm, + .os_tag = .linux, + .abi = .gnueabi, + }, .link_libc = true, }, .{ - .target = std.Target.Query.parse(.{ - .arch_os_abi = "arm-linux-gnueabihf", - .cpu_features = "generic+v8a", - }) catch unreachable, + .target = .{ + .cpu_arch = .arm, + .os_tag = .linux, + .abi = .gnueabihf, + }, .link_libc = true, }, From f1645772c1881a233ffcfc5072710040c4881deb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Sun, 1 Sep 2024 10:40:23 +0200 Subject: [PATCH 109/202] compiler_rt: Export __truncdfhf2() for AEABI too. Similar to __truncsfhf2() and __extendhfsf2(). --- lib/compiler_rt/truncdfhf2.zig | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/compiler_rt/truncdfhf2.zig b/lib/compiler_rt/truncdfhf2.zig index e15a2202a165..10f13d504788 100644 --- a/lib/compiler_rt/truncdfhf2.zig +++ b/lib/compiler_rt/truncdfhf2.zig @@ -6,9 +6,8 @@ pub const panic = common.panic; comptime { if (common.want_aeabi) { @export(&__aeabi_d2h, .{ .name = "__aeabi_d2h", .linkage = common.linkage, .visibility = common.visibility }); - } else { - @export(&__truncdfhf2, .{ .name = "__truncdfhf2", .linkage = common.linkage, .visibility = common.visibility }); } + @export(&__truncdfhf2, .{ .name = "__truncdfhf2", .linkage = common.linkage, .visibility = common.visibility }); } pub fn __truncdfhf2(a: f64) callconv(.C) common.F16T(f64) { From 90075345519f165a0164c52b62b78453068c6ce6 Mon Sep 17 00:00:00 2001 From: Ian Johnson Date: Mon, 9 Sep 2024 22:23:45 -0400 Subject: [PATCH 110/202] std.zig.tokenizer: simplify line-based tokens Closes #21358 Closes #21360 This commit modifies the `multiline_string_literal_line`, `doc_comment`, and `container_doc_comment` tokens to no longer include the line ending as part of the token. This makes it easier to handle line endings (which may be LF, CRLF, or in edge cases possibly nonexistent) consistently. In the two issues linked above, Autodoc was already assuming this for doc comments, and yielding incorrect results when handling files with CRLF line endings (both in Markdown parsing and source rendering). Applying the same simplification for multiline string literals also brings `zig fmt` into conformance with https://github.com/ziglang/zig-spec/issues/38 regarding formatting of multiline strings with CRLF line endings: the spec says that `zig fmt` should remove the CR from such line endings, but this was not previously the case. --- lib/std/zig/AstGen.zig | 6 ++---- lib/std/zig/parser_test.zig | 38 +++++++++++++++++++++++++++++++++++++ lib/std/zig/render.zig | 3 --- lib/std/zig/tokenizer.zig | 4 ---- 4 files changed, 40 insertions(+), 11 deletions(-) diff --git a/lib/std/zig/AstGen.zig b/lib/std/zig/AstGen.zig index c15a995e6010..675fe095a26e 100644 --- a/lib/std/zig/AstGen.zig +++ b/lib/std/zig/AstGen.zig @@ -11721,16 +11721,14 @@ fn strLitNodeAsString(astgen: *AstGen, node: Ast.Node.Index) !IndexSlice { var tok_i = start; { const slice = tree.tokenSlice(tok_i); - const carriage_return_ending: usize = if (slice[slice.len - 2] == '\r') 2 else 1; - const line_bytes = slice[2 .. slice.len - carriage_return_ending]; + const line_bytes = slice[2..]; try string_bytes.appendSlice(gpa, line_bytes); tok_i += 1; } // Following lines: each line prepends a newline. while (tok_i <= end) : (tok_i += 1) { const slice = tree.tokenSlice(tok_i); - const carriage_return_ending: usize = if (slice[slice.len - 2] == '\r') 2 else 1; - const line_bytes = slice[2 .. slice.len - carriage_return_ending]; + const line_bytes = slice[2..]; try string_bytes.ensureUnusedCapacity(gpa, line_bytes.len + 1); string_bytes.appendAssumeCapacity('\n'); string_bytes.appendSliceAssumeCapacity(line_bytes); diff --git a/lib/std/zig/parser_test.zig b/lib/std/zig/parser_test.zig index d399c58c9cd6..e2c9b034ed7e 100644 --- a/lib/std/zig/parser_test.zig +++ b/lib/std/zig/parser_test.zig @@ -3087,6 +3087,22 @@ test "zig fmt: multiline string" { ); } +test "zig fmt: multiline string with CRLF line endings" { + try testTransform("" ++ + "const s =\r\n" ++ + " \\\\one\r\n" ++ + " \\\\two)\r\n" ++ + " \\\\three\r\n" ++ + ";\r\n", + \\const s = + \\ \\one + \\ \\two) + \\ \\three + \\; + \\ + ); +} + test "zig fmt: values" { try testCanonical( \\test "values" { @@ -4404,6 +4420,28 @@ test "zig fmt: invalid doc comments on comptime and test blocks" { }); } +test "zig fmt: comments with CRLF line endings" { + try testTransform("" ++ + "//! Top-level doc comment\r\n" ++ + "//! Continuing to another line\r\n" ++ + "\r\n" ++ + "/// Regular doc comment\r\n" ++ + "const S = struct {\r\n" ++ + " // Regular comment\r\n" ++ + " // More content\r\n" ++ + "};\r\n", + \\//! Top-level doc comment + \\//! Continuing to another line + \\ + \\/// Regular doc comment + \\const S = struct { + \\ // Regular comment + \\ // More content + \\}; + \\ + ); +} + test "zig fmt: else comptime expr" { try testCanonical( \\comptime { diff --git a/lib/std/zig/render.zig b/lib/std/zig/render.zig index cd17b6963956..c0391b4faff4 100644 --- a/lib/std/zig/render.zig +++ b/lib/std/zig/render.zig @@ -3170,9 +3170,6 @@ fn discardAllParams(r: *Render, fn_proto_node: Ast.Node.Index) Error!void { fn tokenSliceForRender(tree: Ast, token_index: Ast.TokenIndex) []const u8 { var ret = tree.tokenSlice(token_index); switch (tree.tokens.items(.tag)[token_index]) { - .multiline_string_literal_line => { - if (ret[ret.len - 1] == '\n') ret.len -= 1; - }, .container_doc_comment, .doc_comment => { ret = mem.trimRight(u8, ret, &std.ascii.whitespace); }, diff --git a/lib/std/zig/tokenizer.zig b/lib/std/zig/tokenizer.zig index b63bde563385..05c0f8ed89cd 100644 --- a/lib/std/zig/tokenizer.zig +++ b/lib/std/zig/tokenizer.zig @@ -847,12 +847,10 @@ pub const Tokenizer = struct { break; }, '\n' => { - self.index += 1; break; }, '\r' => { if (self.buffer[self.index + 1] == '\n') { - self.index += 2; break; } else { state = .invalid; @@ -1117,7 +1115,6 @@ pub const Tokenizer = struct { }, '\r' => { if (self.buffer[self.index + 1] == '\n') { - self.index += 1; result.tag = .doc_comment; break; } else { @@ -1167,7 +1164,6 @@ pub const Tokenizer = struct { }, '\r' => { if (self.buffer[self.index + 1] == '\n') { - self.index += 1; break; } else { state = .invalid; From 778519bb0d883eed7b2a4e89ae7db68c15271ee3 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Wed, 4 Sep 2024 08:47:17 -0400 Subject: [PATCH 111/202] Dwarf: fix missing padding before incrementally updated entries --- src/link/Dwarf.zig | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index bff33ecf140c..26aafd8092e2 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -847,10 +847,11 @@ const Entry = struct { unit.len -| (unit.header_len + unit.trailer_len); if (entry_ptr.off + len > end) { if (entry_ptr.next.unwrap()) |next_entry| { - if (entry_ptr.prev.unwrap()) |prev_entry| - unit.getEntry(prev_entry).next = entry_ptr.next - else - unit.first = entry_ptr.next; + if (entry_ptr.prev.unwrap()) |prev_entry| { + const prev_entry_ptr = unit.getEntry(prev_entry); + prev_entry_ptr.next = entry_ptr.next; + try prev_entry_ptr.pad(unit, sec, dwarf); + } else unit.first = entry_ptr.next; const next_entry_ptr = unit.getEntry(next_entry); const entry = next_entry_ptr.prev; next_entry_ptr.prev = entry_ptr.prev; @@ -860,6 +861,7 @@ const Entry = struct { entry_ptr.next = .none; entry_ptr.off = last_entry_ptr.off + sec.padToIdeal(last_entry_ptr.len); unit.last = entry; + try last_entry_ptr.pad(unit, sec, dwarf); } try unit.resize(sec, dwarf, 0, @intCast(unit.header_len + entry_ptr.off + sec.padToIdeal(len) + unit.trailer_len)); } From f34b19825133ff13ea6e9faa576aa253f4d1252c Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Thu, 5 Sep 2024 15:58:24 -0400 Subject: [PATCH 112/202] Dwarf: implement and test decls --- ci/x86_64-linux-debug.sh | 2 +- ci/x86_64-linux-release.sh | 2 +- src/link/Dwarf.zig | 747 +++++++++++++++++-------------------- test/src/Debugger.zig | 67 +++- 4 files changed, 405 insertions(+), 413 deletions(-) diff --git a/ci/x86_64-linux-debug.sh b/ci/x86_64-linux-debug.sh index 8e9929bc22f9..370100e68e03 100755 --- a/ci/x86_64-linux-debug.sh +++ b/ci/x86_64-linux-debug.sh @@ -64,7 +64,7 @@ stage3-debug/bin/zig build \ stage3-debug/bin/zig build test docs \ --maxrss 21000000000 \ - -Dlldb=$HOME/deps/lldb-zig/Debug-befcd57a8/bin/lldb \ + -Dlldb=$HOME/deps/lldb-zig/Debug-4a44163df/bin/lldb \ -fqemu \ -fwasmtime \ -Dstatic-llvm \ diff --git a/ci/x86_64-linux-release.sh b/ci/x86_64-linux-release.sh index 346e942602c8..8ee7f7941266 100755 --- a/ci/x86_64-linux-release.sh +++ b/ci/x86_64-linux-release.sh @@ -64,7 +64,7 @@ stage3-release/bin/zig build \ stage3-release/bin/zig build test docs \ --maxrss 21000000000 \ - -Dlldb=$HOME/deps/lldb-zig/Release-befcd57a8/bin/lldb \ + -Dlldb=$HOME/deps/lldb-zig/Release-4a44163df/bin/lldb \ -fqemu \ -fwasmtime \ -Dstatic-llvm \ diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 26aafd8092e2..2a9fd8f8f933 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -2509,6 +2509,19 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool return; } + const parent_type, const accessibility: u8 = if (nav.analysis_owner.unwrap()) |cau| parent: { + const parent_namespace_ptr = ip.namespacePtr(ip.getCau(cau).namespace); + break :parent .{ + parent_namespace_ptr.owner_type, + if (parent_namespace_ptr.pub_decls.containsContext(nav_index, .{ .zcu = zcu })) + DW.ACCESS.public + else if (parent_namespace_ptr.priv_decls.containsContext(nav_index, .{ .zcu = zcu })) + DW.ACCESS.private + else + unreachable, + }; + } else .{ zcu.fileRootType(inst_info.file), DW.ACCESS.private }; + const tree = try file.getTree(dwarf.gpa); const loc = tree.tokenLocation(0, tree.nodes.items(.main_token)[decl_inst.data.declaration.src_node]); assert(loc.line == zcu.navSrcLine(nav_index)); @@ -2534,459 +2547,380 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool const nav_gop = try dwarf.navs.getOrPut(dwarf.gpa, nav_index); errdefer _ = dwarf.navs.pop(); - switch (ip.indexToKey(nav_val.toIntern())) { - .func => |func| { - if (nav_gop.found_existing) { - const unit_ptr = dwarf.debug_info.section.getUnit(wip_nav.unit); - const entry_ptr = unit_ptr.getEntry(nav_gop.value_ptr.*); - if (entry_ptr.len >= AbbrevCode.decl_bytes) { - var abbrev_code_buf: [AbbrevCode.decl_bytes]u8 = undefined; - if (try dwarf.getFile().?.preadAll( - &abbrev_code_buf, - dwarf.debug_info.section.off(dwarf) + unit_ptr.off + unit_ptr.header_len + entry_ptr.off, - ) != abbrev_code_buf.len) return error.InputOutput; - var abbrev_code_fbs = std.io.fixedBufferStream(&abbrev_code_buf); - const abbrev_code: AbbrevCode = @enumFromInt( - std.leb.readUleb128(@typeInfo(AbbrevCode).@"enum".tag_type, abbrev_code_fbs.reader()) catch unreachable, - ); - switch (abbrev_code) { - else => unreachable, - .decl_func, .decl_empty_func => return, - .decl_func_generic, .decl_empty_func_generic => {}, - } - } - entry_ptr.clear(); - } else nav_gop.value_ptr.* = try dwarf.addCommonEntry(wip_nav.unit); - wip_nav.entry = nav_gop.value_ptr.*; - - const parent_type, const accessibility: u8 = if (nav.analysis_owner.unwrap()) |cau| parent: { - const parent_namespace_ptr = ip.namespacePtr(ip.getCau(cau).namespace); - break :parent .{ - parent_namespace_ptr.owner_type, - if (parent_namespace_ptr.pub_decls.containsContext(nav_index, .{ .zcu = zcu })) - DW.ACCESS.public - else if (parent_namespace_ptr.priv_decls.containsContext(nav_index, .{ .zcu = zcu })) - DW.ACCESS.private - else - unreachable, - }; - } else .{ zcu.fileRootType(inst_info.file), DW.ACCESS.private }; - const func_type = ip.indexToKey(func.ty).func_type; - const diw = wip_nav.debug_info.writer(dwarf.gpa); - try wip_nav.abbrevCode(if (func_type.param_types.len > 0 or func_type.is_var_args) - .decl_func_generic - else - .decl_empty_func_generic); - try wip_nav.refType(Type.fromInterned(parent_type)); - assert(wip_nav.debug_info.items.len == DebugInfo.declEntryLineOff(dwarf)); - try diw.writeInt(u32, @intCast(loc.line + 1), dwarf.endian); - try uleb128(diw, loc.column + 1); - try diw.writeByte(accessibility); - try wip_nav.strp(nav.name.toSlice(ip)); - try wip_nav.refType(Type.fromInterned(func_type.return_type)); - if (func_type.param_types.len > 0 or func_type.is_var_args) { - for (0..func_type.param_types.len) |param_index| { - try wip_nav.abbrevCode(.func_type_param); - try wip_nav.refType(Type.fromInterned(func_type.param_types.get(ip)[param_index])); - } - if (func_type.is_var_args) try wip_nav.abbrevCode(.is_var_args); - try uleb128(diw, @intFromEnum(AbbrevCode.null)); - } - }, - .struct_type => done: { + const tag: enum { done, decl_alias } = switch (ip.indexToKey(nav_val.toIntern())) { + .int_type, + .ptr_type, + .array_type, + .vector_type, + .opt_type, + .anyframe_type, + .error_union_type, + .simple_type, + .anon_struct_type, + .func_type, + .error_set_type, + .inferred_error_set_type, + => .decl_alias, + .struct_type => tag: { const loaded_struct = ip.loadStructType(nav_val.toIntern()); - - const parent_type, const accessibility: u8 = if (nav.analysis_owner.unwrap()) |cau| parent: { - const parent_namespace_ptr = ip.namespacePtr(ip.getCau(cau).namespace); - break :parent .{ - parent_namespace_ptr.owner_type, - if (parent_namespace_ptr.pub_decls.containsContext(nav_index, .{ .zcu = zcu })) - DW.ACCESS.public - else if (parent_namespace_ptr.priv_decls.containsContext(nav_index, .{ .zcu = zcu })) - DW.ACCESS.private - else - unreachable, + if (loaded_struct.zir_index == .none) break :tag .decl_alias; + + const type_inst_info = loaded_struct.zir_index.unwrap().?.resolveFull(ip).?; + if (type_inst_info.file != inst_info.file) break :tag .decl_alias; + + const value_inst = value_inst: { + const decl_value_body = decl_extra.data.getBodies(@intCast(decl_extra.end), file.zir).value_body; + const break_inst = file.zir.instructions.get(@intFromEnum(decl_value_body[decl_value_body.len - 1])); + if (break_inst.tag != .break_inline) break :value_inst null; + assert(file.zir.extraData(Zir.Inst.Break, break_inst.data.@"break".payload_index).data.block_inst == inst_info.inst); + var value_inst = break_inst.data.@"break".operand.toIndex(); + while (value_inst) |value_inst_index| switch (file.zir.instructions.items(.tag)[@intFromEnum(value_inst_index)]) { + else => break, + .as_node => value_inst = file.zir.extraData( + Zir.Inst.As, + file.zir.instructions.items(.data)[@intFromEnum(value_inst_index)].pl_node.payload_index, + ).data.operand.toIndex(), }; - } else .{ zcu.fileRootType(inst_info.file), DW.ACCESS.private }; - - decl_struct: { - if (loaded_struct.zir_index == .none) break :decl_struct; - - const type_inst_info = loaded_struct.zir_index.unwrap().?.resolveFull(ip).?; - if (type_inst_info.file != inst_info.file) break :decl_struct; - - const value_inst = value_inst: { - const decl_value_body = decl_extra.data.getBodies(@intCast(decl_extra.end), file.zir).value_body; - const break_inst = file.zir.instructions.get(@intFromEnum(decl_value_body[decl_value_body.len - 1])); - if (break_inst.tag != .break_inline) break :value_inst null; - assert(file.zir.extraData(Zir.Inst.Break, break_inst.data.@"break".payload_index).data.block_inst == inst_info.inst); - var value_inst = break_inst.data.@"break".operand.toIndex(); - while (value_inst) |value_inst_index| switch (file.zir.instructions.items(.tag)[@intFromEnum(value_inst_index)]) { - else => break, - .as_node => value_inst = file.zir.extraData( - Zir.Inst.As, - file.zir.instructions.items(.data)[@intFromEnum(value_inst_index)].pl_node.payload_index, - ).data.operand.toIndex(), - }; - break :value_inst value_inst; - }; - if (type_inst_info.inst != value_inst) break :decl_struct; + break :value_inst value_inst; + }; + if (type_inst_info.inst != value_inst) break :tag .decl_alias; - const type_gop = try dwarf.types.getOrPut(dwarf.gpa, nav_val.toIntern()); - if (type_gop.found_existing) { - dwarf.debug_info.section.getUnit(wip_nav.unit).getEntry(type_gop.value_ptr.*).clear(); - nav_gop.value_ptr.* = type_gop.value_ptr.*; - } else { - if (nav_gop.found_existing) - dwarf.debug_info.section.getUnit(wip_nav.unit).getEntry(nav_gop.value_ptr.*).clear() - else - nav_gop.value_ptr.* = try dwarf.addCommonEntry(wip_nav.unit); - type_gop.value_ptr.* = nav_gop.value_ptr.*; - } - wip_nav.entry = nav_gop.value_ptr.*; + const type_gop = try dwarf.types.getOrPut(dwarf.gpa, nav_val.toIntern()); + if (type_gop.found_existing) { + dwarf.debug_info.section.getUnit(wip_nav.unit).getEntry(type_gop.value_ptr.*).clear(); + nav_gop.value_ptr.* = type_gop.value_ptr.*; + } else { + if (nav_gop.found_existing) + dwarf.debug_info.section.getUnit(wip_nav.unit).getEntry(nav_gop.value_ptr.*).clear() + else + nav_gop.value_ptr.* = try dwarf.addCommonEntry(wip_nav.unit); + type_gop.value_ptr.* = nav_gop.value_ptr.*; + } + wip_nav.entry = nav_gop.value_ptr.*; - const diw = wip_nav.debug_info.writer(dwarf.gpa); + const diw = wip_nav.debug_info.writer(dwarf.gpa); - switch (loaded_struct.layout) { - .auto, .@"extern" => { - try wip_nav.abbrevCode(if (loaded_struct.field_types.len == 0) .decl_namespace_struct else .decl_struct); - try wip_nav.refType(Type.fromInterned(parent_type)); - assert(wip_nav.debug_info.items.len == DebugInfo.declEntryLineOff(dwarf)); - try diw.writeInt(u32, @intCast(loc.line + 1), dwarf.endian); - try uleb128(diw, loc.column + 1); - try diw.writeByte(accessibility); - try wip_nav.strp(nav.name.toSlice(ip)); - if (loaded_struct.field_types.len == 0) try diw.writeByte(@intFromBool(false)) else { - try uleb128(diw, nav_val.toType().abiSize(zcu)); - try uleb128(diw, nav_val.toType().abiAlignment(zcu).toByteUnits().?); - for (0..loaded_struct.field_types.len) |field_index| { - const is_comptime = loaded_struct.fieldIsComptime(ip, field_index); - try wip_nav.abbrevCode(if (is_comptime) .struct_field_comptime else .struct_field); - if (loaded_struct.fieldName(ip, field_index).unwrap()) |field_name| try wip_nav.strp(field_name.toSlice(ip)) else { - const field_name = try std.fmt.allocPrint(dwarf.gpa, "{d}", .{field_index}); - defer dwarf.gpa.free(field_name); - try wip_nav.strp(field_name); - } - const field_type = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); - try wip_nav.refType(field_type); - if (!is_comptime) { - try uleb128(diw, loaded_struct.offsets.get(ip)[field_index]); - try uleb128(diw, loaded_struct.fieldAlign(ip, field_index).toByteUnits() orelse - field_type.abiAlignment(zcu).toByteUnits().?); - } - } - try uleb128(diw, @intFromEnum(AbbrevCode.null)); - } - }, - .@"packed" => { - try wip_nav.abbrevCode(.decl_packed_struct); - try wip_nav.refType(Type.fromInterned(parent_type)); - assert(wip_nav.debug_info.items.len == DebugInfo.declEntryLineOff(dwarf)); - try diw.writeInt(u32, @intCast(loc.line + 1), dwarf.endian); - try uleb128(diw, loc.column + 1); - try diw.writeByte(accessibility); - try wip_nav.strp(nav.name.toSlice(ip)); - try wip_nav.refType(Type.fromInterned(loaded_struct.backingIntTypeUnordered(ip))); - var field_bit_offset: u16 = 0; + switch (loaded_struct.layout) { + .auto, .@"extern" => { + try wip_nav.abbrevCode(if (loaded_struct.field_types.len == 0) .decl_namespace_struct else .decl_struct); + try wip_nav.refType(Type.fromInterned(parent_type)); + assert(wip_nav.debug_info.items.len == DebugInfo.declEntryLineOff(dwarf)); + try diw.writeInt(u32, @intCast(loc.line + 1), dwarf.endian); + try uleb128(diw, loc.column + 1); + try diw.writeByte(accessibility); + try wip_nav.strp(nav.name.toSlice(ip)); + if (loaded_struct.field_types.len == 0) try diw.writeByte(@intFromBool(false)) else { + try uleb128(diw, nav_val.toType().abiSize(zcu)); + try uleb128(diw, nav_val.toType().abiAlignment(zcu).toByteUnits().?); for (0..loaded_struct.field_types.len) |field_index| { - try wip_nav.abbrevCode(.packed_struct_field); - try wip_nav.strp(loaded_struct.fieldName(ip, field_index).unwrap().?.toSlice(ip)); + const is_comptime = loaded_struct.fieldIsComptime(ip, field_index); + try wip_nav.abbrevCode(if (is_comptime) .struct_field_comptime else .struct_field); + if (loaded_struct.fieldName(ip, field_index).unwrap()) |field_name| try wip_nav.strp(field_name.toSlice(ip)) else { + const field_name = try std.fmt.allocPrint(dwarf.gpa, "{d}", .{field_index}); + defer dwarf.gpa.free(field_name); + try wip_nav.strp(field_name); + } const field_type = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); try wip_nav.refType(field_type); - try uleb128(diw, field_bit_offset); - field_bit_offset += @intCast(field_type.bitSize(zcu)); + if (!is_comptime) { + try uleb128(diw, loaded_struct.offsets.get(ip)[field_index]); + try uleb128(diw, loaded_struct.fieldAlign(ip, field_index).toByteUnits() orelse + field_type.abiAlignment(zcu).toByteUnits().?); + } } try uleb128(diw, @intFromEnum(AbbrevCode.null)); - }, - } - break :done; + } + }, + .@"packed" => { + try wip_nav.abbrevCode(.decl_packed_struct); + try wip_nav.refType(Type.fromInterned(parent_type)); + assert(wip_nav.debug_info.items.len == DebugInfo.declEntryLineOff(dwarf)); + try diw.writeInt(u32, @intCast(loc.line + 1), dwarf.endian); + try uleb128(diw, loc.column + 1); + try diw.writeByte(accessibility); + try wip_nav.strp(nav.name.toSlice(ip)); + try wip_nav.refType(Type.fromInterned(loaded_struct.backingIntTypeUnordered(ip))); + var field_bit_offset: u16 = 0; + for (0..loaded_struct.field_types.len) |field_index| { + try wip_nav.abbrevCode(.packed_struct_field); + try wip_nav.strp(loaded_struct.fieldName(ip, field_index).unwrap().?.toSlice(ip)); + const field_type = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); + try wip_nav.refType(field_type); + try uleb128(diw, field_bit_offset); + field_bit_offset += @intCast(field_type.bitSize(zcu)); + } + try uleb128(diw, @intFromEnum(AbbrevCode.null)); + }, } + break :tag .done; + }, + .enum_type => tag: { + const loaded_enum = ip.loadEnumType(nav_val.toIntern()); + if (loaded_enum.zir_index == .none) break :tag .decl_alias; + + const type_inst_info = loaded_enum.zir_index.unwrap().?.resolveFull(ip).?; + if (type_inst_info.file != inst_info.file) break :tag .decl_alias; + + const value_inst = value_inst: { + const decl_value_body = decl_extra.data.getBodies(@intCast(decl_extra.end), file.zir).value_body; + const break_inst = file.zir.instructions.get(@intFromEnum(decl_value_body[decl_value_body.len - 1])); + if (break_inst.tag != .break_inline) break :value_inst null; + assert(file.zir.extraData(Zir.Inst.Break, break_inst.data.@"break".payload_index).data.block_inst == inst_info.inst); + var value_inst = break_inst.data.@"break".operand.toIndex(); + while (value_inst) |value_inst_index| switch (file.zir.instructions.items(.tag)[@intFromEnum(value_inst_index)]) { + else => break, + .as_node => value_inst = file.zir.extraData( + Zir.Inst.As, + file.zir.instructions.items(.data)[@intFromEnum(value_inst_index)].pl_node.payload_index, + ).data.operand.toIndex(), + }; + break :value_inst value_inst; + }; + if (type_inst_info.inst != value_inst) break :tag .decl_alias; - if (nav_gop.found_existing) - dwarf.debug_info.section.getUnit(wip_nav.unit).getEntry(nav_gop.value_ptr.*).clear() - else - nav_gop.value_ptr.* = try dwarf.addCommonEntry(wip_nav.unit); + const type_gop = try dwarf.types.getOrPut(dwarf.gpa, nav_val.toIntern()); + if (type_gop.found_existing) { + dwarf.debug_info.section.getUnit(wip_nav.unit).getEntry(type_gop.value_ptr.*).clear(); + nav_gop.value_ptr.* = type_gop.value_ptr.*; + } else { + if (nav_gop.found_existing) + dwarf.debug_info.section.getUnit(wip_nav.unit).getEntry(nav_gop.value_ptr.*).clear() + else + nav_gop.value_ptr.* = try dwarf.addCommonEntry(wip_nav.unit); + type_gop.value_ptr.* = nav_gop.value_ptr.*; + } wip_nav.entry = nav_gop.value_ptr.*; const diw = wip_nav.debug_info.writer(dwarf.gpa); - try wip_nav.abbrevCode(.decl_alias); + try wip_nav.abbrevCode(if (loaded_enum.names.len > 0) .decl_enum else .decl_empty_enum); try wip_nav.refType(Type.fromInterned(parent_type)); assert(wip_nav.debug_info.items.len == DebugInfo.declEntryLineOff(dwarf)); try diw.writeInt(u32, @intCast(loc.line + 1), dwarf.endian); try uleb128(diw, loc.column + 1); try diw.writeByte(accessibility); try wip_nav.strp(nav.name.toSlice(ip)); - try wip_nav.refType(nav_val.toType()); + try wip_nav.refType(Type.fromInterned(loaded_enum.tag_ty)); + for (0..loaded_enum.names.len) |field_index| { + try wip_nav.enumConstValue(loaded_enum, .{ + .sdata = .signed_enum_field, + .udata = .unsigned_enum_field, + .block = .big_enum_field, + }, field_index); + try wip_nav.strp(loaded_enum.names.get(ip)[field_index].toSlice(ip)); + } + if (loaded_enum.names.len > 0) try uleb128(diw, @intFromEnum(AbbrevCode.null)); + break :tag .done; }, - .enum_type => done: { - const loaded_enum = ip.loadEnumType(nav_val.toIntern()); - - const parent_type, const accessibility: u8 = if (nav.analysis_owner.unwrap()) |cau| parent: { - const parent_namespace_ptr = ip.namespacePtr(ip.getCau(cau).namespace); - break :parent .{ - parent_namespace_ptr.owner_type, - if (parent_namespace_ptr.pub_decls.containsContext(nav_index, .{ .zcu = zcu })) - DW.ACCESS.public - else if (parent_namespace_ptr.priv_decls.containsContext(nav_index, .{ .zcu = zcu })) - DW.ACCESS.private - else - unreachable, - }; - } else .{ zcu.fileRootType(inst_info.file), DW.ACCESS.private }; + .union_type => tag: { + const loaded_union = ip.loadUnionType(nav_val.toIntern()); - decl_enum: { - if (loaded_enum.zir_index == .none) break :decl_enum; - - const type_inst_info = loaded_enum.zir_index.unwrap().?.resolveFull(ip).?; - if (type_inst_info.file != inst_info.file) break :decl_enum; - - const value_inst = value_inst: { - const decl_value_body = decl_extra.data.getBodies(@intCast(decl_extra.end), file.zir).value_body; - const break_inst = file.zir.instructions.get(@intFromEnum(decl_value_body[decl_value_body.len - 1])); - if (break_inst.tag != .break_inline) break :value_inst null; - assert(file.zir.extraData(Zir.Inst.Break, break_inst.data.@"break".payload_index).data.block_inst == inst_info.inst); - var value_inst = break_inst.data.@"break".operand.toIndex(); - while (value_inst) |value_inst_index| switch (file.zir.instructions.items(.tag)[@intFromEnum(value_inst_index)]) { - else => break, - .as_node => value_inst = file.zir.extraData( - Zir.Inst.As, - file.zir.instructions.items(.data)[@intFromEnum(value_inst_index)].pl_node.payload_index, - ).data.operand.toIndex(), - }; - break :value_inst value_inst; + const type_inst_info = loaded_union.zir_index.resolveFull(ip).?; + if (type_inst_info.file != inst_info.file) break :tag .decl_alias; + + const value_inst = value_inst: { + const decl_value_body = decl_extra.data.getBodies(@intCast(decl_extra.end), file.zir).value_body; + const break_inst = file.zir.instructions.get(@intFromEnum(decl_value_body[decl_value_body.len - 1])); + if (break_inst.tag != .break_inline) break :value_inst null; + assert(file.zir.extraData(Zir.Inst.Break, break_inst.data.@"break".payload_index).data.block_inst == inst_info.inst); + var value_inst = break_inst.data.@"break".operand.toIndex(); + while (value_inst) |value_inst_index| switch (file.zir.instructions.items(.tag)[@intFromEnum(value_inst_index)]) { + else => break, + .as_node => value_inst = file.zir.extraData( + Zir.Inst.As, + file.zir.instructions.items(.data)[@intFromEnum(value_inst_index)].pl_node.payload_index, + ).data.operand.toIndex(), }; - if (type_inst_info.inst != value_inst) break :decl_enum; + break :value_inst value_inst; + }; + if (type_inst_info.inst != value_inst) break :tag .decl_alias; - const type_gop = try dwarf.types.getOrPut(dwarf.gpa, nav_val.toIntern()); - if (type_gop.found_existing) { - dwarf.debug_info.section.getUnit(wip_nav.unit).getEntry(type_gop.value_ptr.*).clear(); - nav_gop.value_ptr.* = type_gop.value_ptr.*; - } else { - if (nav_gop.found_existing) - dwarf.debug_info.section.getUnit(wip_nav.unit).getEntry(nav_gop.value_ptr.*).clear() - else - nav_gop.value_ptr.* = try dwarf.addCommonEntry(wip_nav.unit); - type_gop.value_ptr.* = nav_gop.value_ptr.*; - } - wip_nav.entry = nav_gop.value_ptr.*; - const diw = wip_nav.debug_info.writer(dwarf.gpa); - try wip_nav.abbrevCode(if (loaded_enum.names.len > 0) .decl_enum else .decl_empty_enum); - try wip_nav.refType(Type.fromInterned(parent_type)); - assert(wip_nav.debug_info.items.len == DebugInfo.declEntryLineOff(dwarf)); - try diw.writeInt(u32, @intCast(loc.line + 1), dwarf.endian); - try uleb128(diw, loc.column + 1); - try diw.writeByte(accessibility); - try wip_nav.strp(nav.name.toSlice(ip)); - try wip_nav.refType(Type.fromInterned(loaded_enum.tag_ty)); - for (0..loaded_enum.names.len) |field_index| { - try wip_nav.enumConstValue(loaded_enum, .{ - .sdata = .signed_enum_field, - .udata = .unsigned_enum_field, - .block = .big_enum_field, - }, field_index); - try wip_nav.strp(loaded_enum.names.get(ip)[field_index].toSlice(ip)); - } - if (loaded_enum.names.len > 0) try uleb128(diw, @intFromEnum(AbbrevCode.null)); - break :done; + const type_gop = try dwarf.types.getOrPut(dwarf.gpa, nav_val.toIntern()); + if (type_gop.found_existing) { + dwarf.debug_info.section.getUnit(wip_nav.unit).getEntry(type_gop.value_ptr.*).clear(); + nav_gop.value_ptr.* = type_gop.value_ptr.*; + } else { + if (nav_gop.found_existing) + dwarf.debug_info.section.getUnit(wip_nav.unit).getEntry(nav_gop.value_ptr.*).clear() + else + nav_gop.value_ptr.* = try dwarf.addCommonEntry(wip_nav.unit); + type_gop.value_ptr.* = nav_gop.value_ptr.*; } - - if (nav_gop.found_existing) - dwarf.debug_info.section.getUnit(wip_nav.unit).getEntry(nav_gop.value_ptr.*).clear() - else - nav_gop.value_ptr.* = try dwarf.addCommonEntry(wip_nav.unit); wip_nav.entry = nav_gop.value_ptr.*; const diw = wip_nav.debug_info.writer(dwarf.gpa); - try wip_nav.abbrevCode(.decl_alias); + try wip_nav.abbrevCode(.decl_union); try wip_nav.refType(Type.fromInterned(parent_type)); assert(wip_nav.debug_info.items.len == DebugInfo.declEntryLineOff(dwarf)); try diw.writeInt(u32, @intCast(loc.line + 1), dwarf.endian); try uleb128(diw, loc.column + 1); try diw.writeByte(accessibility); try wip_nav.strp(nav.name.toSlice(ip)); - try wip_nav.refType(nav_val.toType()); - }, - .union_type => done: { - const loaded_union = ip.loadUnionType(nav_val.toIntern()); - - const parent_type, const accessibility: u8 = if (nav.analysis_owner.unwrap()) |cau| parent: { - const parent_namespace_ptr = ip.namespacePtr(ip.getCau(cau).namespace); - break :parent .{ - parent_namespace_ptr.owner_type, - if (parent_namespace_ptr.pub_decls.containsContext(nav_index, .{ .zcu = zcu })) - DW.ACCESS.public - else if (parent_namespace_ptr.priv_decls.containsContext(nav_index, .{ .zcu = zcu })) - DW.ACCESS.private - else - unreachable, - }; - } else .{ zcu.fileRootType(inst_info.file), DW.ACCESS.private }; - - decl_union: { - const type_inst_info = loaded_union.zir_index.resolveFull(ip).?; - if (type_inst_info.file != inst_info.file) break :decl_union; - - const value_inst = value_inst: { - const decl_value_body = decl_extra.data.getBodies(@intCast(decl_extra.end), file.zir).value_body; - const break_inst = file.zir.instructions.get(@intFromEnum(decl_value_body[decl_value_body.len - 1])); - if (break_inst.tag != .break_inline) break :value_inst null; - assert(file.zir.extraData(Zir.Inst.Break, break_inst.data.@"break".payload_index).data.block_inst == inst_info.inst); - var value_inst = break_inst.data.@"break".operand.toIndex(); - while (value_inst) |value_inst_index| switch (file.zir.instructions.items(.tag)[@intFromEnum(value_inst_index)]) { - else => break, - .as_node => value_inst = file.zir.extraData( - Zir.Inst.As, - file.zir.instructions.items(.data)[@intFromEnum(value_inst_index)].pl_node.payload_index, - ).data.operand.toIndex(), - }; - break :value_inst value_inst; - }; - if (type_inst_info.inst != value_inst) break :decl_union; - - const type_gop = try dwarf.types.getOrPut(dwarf.gpa, nav_val.toIntern()); - if (type_gop.found_existing) { - dwarf.debug_info.section.getUnit(wip_nav.unit).getEntry(type_gop.value_ptr.*).clear(); - nav_gop.value_ptr.* = type_gop.value_ptr.*; - } else { - if (nav_gop.found_existing) - dwarf.debug_info.section.getUnit(wip_nav.unit).getEntry(nav_gop.value_ptr.*).clear() - else - nav_gop.value_ptr.* = try dwarf.addCommonEntry(wip_nav.unit); - type_gop.value_ptr.* = nav_gop.value_ptr.*; - } - wip_nav.entry = nav_gop.value_ptr.*; - const diw = wip_nav.debug_info.writer(dwarf.gpa); - try wip_nav.abbrevCode(.decl_union); - try wip_nav.refType(Type.fromInterned(parent_type)); - assert(wip_nav.debug_info.items.len == DebugInfo.declEntryLineOff(dwarf)); - try diw.writeInt(u32, @intCast(loc.line + 1), dwarf.endian); - try uleb128(diw, loc.column + 1); - try diw.writeByte(accessibility); - try wip_nav.strp(nav.name.toSlice(ip)); - const union_layout = Type.getUnionLayout(loaded_union, zcu); - try uleb128(diw, union_layout.abi_size); - try uleb128(diw, union_layout.abi_align.toByteUnits().?); - const loaded_tag = loaded_union.loadTagType(ip); - if (loaded_union.hasTag(ip)) { - try wip_nav.abbrevCode(.tagged_union); - try wip_nav.infoSectionOffset( - .debug_info, - wip_nav.unit, - wip_nav.entry, - @intCast(wip_nav.debug_info.items.len + dwarf.sectionOffsetBytes()), - ); - { - try wip_nav.abbrevCode(.generated_field); - try wip_nav.strp("tag"); - try wip_nav.refType(Type.fromInterned(loaded_union.enum_tag_ty)); - try uleb128(diw, union_layout.tagOffset()); - - for (0..loaded_union.field_types.len) |field_index| { - try wip_nav.enumConstValue(loaded_tag, .{ - .sdata = .signed_tagged_union_field, - .udata = .unsigned_tagged_union_field, - .block = .big_tagged_union_field, - }, field_index); - { - try wip_nav.abbrevCode(.struct_field); - try wip_nav.strp(loaded_tag.names.get(ip)[field_index].toSlice(ip)); - const field_type = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]); - try wip_nav.refType(field_type); - try uleb128(diw, union_layout.payloadOffset()); - try uleb128(diw, loaded_union.fieldAlign(ip, field_index).toByteUnits() orelse - if (field_type.isNoReturn(zcu)) 1 else field_type.abiAlignment(zcu).toByteUnits().?); - } - try uleb128(diw, @intFromEnum(AbbrevCode.null)); + const union_layout = Type.getUnionLayout(loaded_union, zcu); + try uleb128(diw, union_layout.abi_size); + try uleb128(diw, union_layout.abi_align.toByteUnits().?); + const loaded_tag = loaded_union.loadTagType(ip); + if (loaded_union.hasTag(ip)) { + try wip_nav.abbrevCode(.tagged_union); + try wip_nav.infoSectionOffset( + .debug_info, + wip_nav.unit, + wip_nav.entry, + @intCast(wip_nav.debug_info.items.len + dwarf.sectionOffsetBytes()), + ); + { + try wip_nav.abbrevCode(.generated_field); + try wip_nav.strp("tag"); + try wip_nav.refType(Type.fromInterned(loaded_union.enum_tag_ty)); + try uleb128(diw, union_layout.tagOffset()); + + for (0..loaded_union.field_types.len) |field_index| { + try wip_nav.enumConstValue(loaded_tag, .{ + .sdata = .signed_tagged_union_field, + .udata = .unsigned_tagged_union_field, + .block = .big_tagged_union_field, + }, field_index); + { + try wip_nav.abbrevCode(.struct_field); + try wip_nav.strp(loaded_tag.names.get(ip)[field_index].toSlice(ip)); + const field_type = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]); + try wip_nav.refType(field_type); + try uleb128(diw, union_layout.payloadOffset()); + try uleb128(diw, loaded_union.fieldAlign(ip, field_index).toByteUnits() orelse + if (field_type.isNoReturn(zcu)) 1 else field_type.abiAlignment(zcu).toByteUnits().?); } + try uleb128(diw, @intFromEnum(AbbrevCode.null)); } - try uleb128(diw, @intFromEnum(AbbrevCode.null)); - } else for (0..loaded_union.field_types.len) |field_index| { - try wip_nav.abbrevCode(.untagged_union_field); - try wip_nav.strp(loaded_tag.names.get(ip)[field_index].toSlice(ip)); - const field_type = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]); - try wip_nav.refType(field_type); - try uleb128(diw, loaded_union.fieldAlign(ip, field_index).toByteUnits() orelse - field_type.abiAlignment(zcu).toByteUnits().?); } try uleb128(diw, @intFromEnum(AbbrevCode.null)); - break :done; + } else for (0..loaded_union.field_types.len) |field_index| { + try wip_nav.abbrevCode(.untagged_union_field); + try wip_nav.strp(loaded_tag.names.get(ip)[field_index].toSlice(ip)); + const field_type = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]); + try wip_nav.refType(field_type); + try uleb128(diw, loaded_union.fieldAlign(ip, field_index).toByteUnits() orelse + field_type.abiAlignment(zcu).toByteUnits().?); } + try uleb128(diw, @intFromEnum(AbbrevCode.null)); + break :tag .done; + }, + .opaque_type => tag: { + const loaded_opaque = ip.loadOpaqueType(nav_val.toIntern()); - if (nav_gop.found_existing) - dwarf.debug_info.section.getUnit(wip_nav.unit).getEntry(nav_gop.value_ptr.*).clear() - else - nav_gop.value_ptr.* = try dwarf.addCommonEntry(wip_nav.unit); + const type_inst_info = loaded_opaque.zir_index.resolveFull(ip).?; + if (type_inst_info.file != inst_info.file) break :tag .decl_alias; + + const value_inst = value_inst: { + const decl_value_body = decl_extra.data.getBodies(@intCast(decl_extra.end), file.zir).value_body; + const break_inst = file.zir.instructions.get(@intFromEnum(decl_value_body[decl_value_body.len - 1])); + if (break_inst.tag != .break_inline) break :value_inst null; + assert(file.zir.extraData(Zir.Inst.Break, break_inst.data.@"break".payload_index).data.block_inst == inst_info.inst); + var value_inst = break_inst.data.@"break".operand.toIndex(); + while (value_inst) |value_inst_index| switch (file.zir.instructions.items(.tag)[@intFromEnum(value_inst_index)]) { + else => break, + .as_node => value_inst = file.zir.extraData( + Zir.Inst.As, + file.zir.instructions.items(.data)[@intFromEnum(value_inst_index)].pl_node.payload_index, + ).data.operand.toIndex(), + }; + break :value_inst value_inst; + }; + if (type_inst_info.inst != value_inst) break :tag .decl_alias; + + const type_gop = try dwarf.types.getOrPut(dwarf.gpa, nav_val.toIntern()); + if (type_gop.found_existing) { + dwarf.debug_info.section.getUnit(wip_nav.unit).getEntry(type_gop.value_ptr.*).clear(); + nav_gop.value_ptr.* = type_gop.value_ptr.*; + } else { + if (nav_gop.found_existing) + dwarf.debug_info.section.getUnit(wip_nav.unit).getEntry(nav_gop.value_ptr.*).clear() + else + nav_gop.value_ptr.* = try dwarf.addCommonEntry(wip_nav.unit); + type_gop.value_ptr.* = nav_gop.value_ptr.*; + } wip_nav.entry = nav_gop.value_ptr.*; const diw = wip_nav.debug_info.writer(dwarf.gpa); - try wip_nav.abbrevCode(.decl_alias); + try wip_nav.abbrevCode(.decl_namespace_struct); try wip_nav.refType(Type.fromInterned(parent_type)); assert(wip_nav.debug_info.items.len == DebugInfo.declEntryLineOff(dwarf)); try diw.writeInt(u32, @intCast(loc.line + 1), dwarf.endian); try uleb128(diw, loc.column + 1); try diw.writeByte(accessibility); try wip_nav.strp(nav.name.toSlice(ip)); - try wip_nav.refType(nav_val.toType()); + try diw.writeByte(@intFromBool(false)); + break :tag .done; }, - .opaque_type => done: { - const loaded_opaque = ip.loadOpaqueType(nav_val.toIntern()); - - const parent_type, const accessibility: u8 = if (nav.analysis_owner.unwrap()) |cau| parent: { - const parent_namespace_ptr = ip.namespacePtr(ip.getCau(cau).namespace); - break :parent .{ - parent_namespace_ptr.owner_type, - if (parent_namespace_ptr.pub_decls.containsContext(nav_index, .{ .zcu = zcu })) - DW.ACCESS.public - else if (parent_namespace_ptr.priv_decls.containsContext(nav_index, .{ .zcu = zcu })) - DW.ACCESS.private - else - unreachable, - }; - } else .{ zcu.fileRootType(inst_info.file), DW.ACCESS.private }; - - decl_opaque: { - const type_inst_info = loaded_opaque.zir_index.resolveFull(ip).?; - if (type_inst_info.file != inst_info.file) break :decl_opaque; - - const value_inst = value_inst: { - const decl_value_body = decl_extra.data.getBodies(@intCast(decl_extra.end), file.zir).value_body; - const break_inst = file.zir.instructions.get(@intFromEnum(decl_value_body[decl_value_body.len - 1])); - if (break_inst.tag != .break_inline) break :value_inst null; - assert(file.zir.extraData(Zir.Inst.Break, break_inst.data.@"break".payload_index).data.block_inst == inst_info.inst); - var value_inst = break_inst.data.@"break".operand.toIndex(); - while (value_inst) |value_inst_index| switch (file.zir.instructions.items(.tag)[@intFromEnum(value_inst_index)]) { - else => break, - .as_node => value_inst = file.zir.extraData( - Zir.Inst.As, - file.zir.instructions.items(.data)[@intFromEnum(value_inst_index)].pl_node.payload_index, - ).data.operand.toIndex(), - }; - break :value_inst value_inst; - }; - if (type_inst_info.inst != value_inst) break :decl_opaque; + .undef, + .simple_value, + .variable, + .@"extern", + .int, + .err, + .error_union, + .enum_literal, + .enum_tag, + .empty_enum_value, + .float, + .ptr, + .slice, + .opt, + .aggregate, + .un, + => { + _ = dwarf.navs.pop(); + return; + }, + .func => |func| tag: { + if (nav_gop.found_existing) { + const unit_ptr = dwarf.debug_info.section.getUnit(wip_nav.unit); + const entry_ptr = unit_ptr.getEntry(nav_gop.value_ptr.*); + if (entry_ptr.len >= AbbrevCode.decl_bytes) { + var abbrev_code_buf: [AbbrevCode.decl_bytes]u8 = undefined; + if (try dwarf.getFile().?.preadAll( + &abbrev_code_buf, + dwarf.debug_info.section.off(dwarf) + unit_ptr.off + unit_ptr.header_len + entry_ptr.off, + ) != abbrev_code_buf.len) return error.InputOutput; + var abbrev_code_fbs = std.io.fixedBufferStream(&abbrev_code_buf); + const abbrev_code: AbbrevCode = @enumFromInt( + std.leb.readUleb128(@typeInfo(AbbrevCode).@"enum".tag_type, abbrev_code_fbs.reader()) catch unreachable, + ); + switch (abbrev_code) { + else => unreachable, + .decl_func, .decl_empty_func => return, + .decl_func_generic, .decl_empty_func_generic => {}, + } + } + entry_ptr.clear(); + } else nav_gop.value_ptr.* = try dwarf.addCommonEntry(wip_nav.unit); + wip_nav.entry = nav_gop.value_ptr.*; - const type_gop = try dwarf.types.getOrPut(dwarf.gpa, nav_val.toIntern()); - if (type_gop.found_existing) { - dwarf.debug_info.section.getUnit(wip_nav.unit).getEntry(type_gop.value_ptr.*).clear(); - nav_gop.value_ptr.* = type_gop.value_ptr.*; - } else { - if (nav_gop.found_existing) - dwarf.debug_info.section.getUnit(wip_nav.unit).getEntry(nav_gop.value_ptr.*).clear() - else - nav_gop.value_ptr.* = try dwarf.addCommonEntry(wip_nav.unit); - type_gop.value_ptr.* = nav_gop.value_ptr.*; + const func_type = ip.indexToKey(func.ty).func_type; + const diw = wip_nav.debug_info.writer(dwarf.gpa); + try wip_nav.abbrevCode(if (func_type.param_types.len > 0 or func_type.is_var_args) + .decl_func_generic + else + .decl_empty_func_generic); + try wip_nav.refType(Type.fromInterned(parent_type)); + assert(wip_nav.debug_info.items.len == DebugInfo.declEntryLineOff(dwarf)); + try diw.writeInt(u32, @intCast(loc.line + 1), dwarf.endian); + try uleb128(diw, loc.column + 1); + try diw.writeByte(accessibility); + try wip_nav.strp(nav.name.toSlice(ip)); + try wip_nav.refType(Type.fromInterned(func_type.return_type)); + if (func_type.param_types.len > 0 or func_type.is_var_args) { + for (0..func_type.param_types.len) |param_index| { + try wip_nav.abbrevCode(.func_type_param); + try wip_nav.refType(Type.fromInterned(func_type.param_types.get(ip)[param_index])); } - wip_nav.entry = nav_gop.value_ptr.*; - const diw = wip_nav.debug_info.writer(dwarf.gpa); - try wip_nav.abbrevCode(.decl_namespace_struct); - try wip_nav.refType(Type.fromInterned(parent_type)); - assert(wip_nav.debug_info.items.len == DebugInfo.declEntryLineOff(dwarf)); - try diw.writeInt(u32, @intCast(loc.line + 1), dwarf.endian); - try uleb128(diw, loc.column + 1); - try diw.writeByte(accessibility); - try wip_nav.strp(nav.name.toSlice(ip)); - try diw.writeByte(@intFromBool(false)); - break :done; + if (func_type.is_var_args) try wip_nav.abbrevCode(.is_var_args); + try uleb128(diw, @intFromEnum(AbbrevCode.null)); } - + break :tag .done; + }, + // memoization, not types + .memoized_call => unreachable, + }; + switch (tag) { + .done => {}, + .decl_alias => { if (nav_gop.found_existing) dwarf.debug_info.section.getUnit(wip_nav.unit).getEntry(nav_gop.value_ptr.*).clear() else @@ -3002,10 +2936,6 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool try wip_nav.strp(nav.name.toSlice(ip)); try wip_nav.refType(nav_val.toType()); }, - else => { - _ = dwarf.navs.pop(); - return; - }, } try dwarf.debug_info.section.replaceEntry(wip_nav.unit, wip_nav.entry, dwarf, wip_nav.debug_info.items); try wip_nav.flush(); @@ -3734,12 +3664,15 @@ fn refAbbrevCode(dwarf: *Dwarf, abbrev_code: AbbrevCode) UpdateError!@typeInfo(A pub fn flushModule(dwarf: *Dwarf, pt: Zcu.PerThread) FlushError!void { const zcu = pt.zcu; const ip = &zcu.intern_pool; - if (dwarf.types.get(.anyerror_type)) |entry| { + + { + const type_gop = try dwarf.types.getOrPut(dwarf.gpa, .anyerror_type); + if (!type_gop.found_existing) type_gop.value_ptr.* = try dwarf.addCommonEntry(.main); var wip_nav: WipNav = .{ .dwarf = dwarf, .pt = pt, .unit = .main, - .entry = entry, + .entry = type_gop.value_ptr.*, .any_children = false, .func = .none, .func_sym_index = undefined, diff --git a/test/src/Debugger.zig b/test/src/Debugger.zig index afa4f6d803c8..ca1348d13fad 100644 --- a/test/src/Debugger.zig +++ b/test/src/Debugger.zig @@ -411,10 +411,27 @@ pub fn addTestsForTarget(db: *Debugger, target: Target) void { }, \\breakpoint set --file enums.zig --source-pattern-regexp '_ = enums;' \\process launch + \\expression --show-types -- Enums \\frame variable --show-types enums \\breakpoint delete --force 1 , &.{ + \\(lldb) expression --show-types -- Enums + \\(type) Enums = struct { + \\ (type) Zero = enum {} + \\ (type) One = enum { + \\ (root.enums.Enums.One) first = .first + \\ } + \\ (type) Two = enum { + \\ (root.enums.Enums.Two) first = .first + \\ (root.enums.Enums.Two) second = .second + \\ } + \\ (type) Three = enum { + \\ (root.enums.Enums.Three) first = .first + \\ (root.enums.Enums.Three) second = .second + \\ (root.enums.Enums.Three) third = .third + \\ } + \\} \\(lldb) frame variable --show-types enums \\(root.enums.Enums) enums = { \\ (root.enums.Enums.Zero) zero = @enumFromInt(13) @@ -434,12 +451,17 @@ pub fn addTestsForTarget(db: *Debugger, target: Target) void { .path = "errors.zig", .source = \\const Errors = struct { - \\ one: error{One} = error.One, - \\ two: error{One,Two} = error.Two, - \\ three: error{One,Two,Three} = error.Three, + \\ const Zero = error{}; + \\ const One = Zero || error{One}; + \\ const Two = One || error{Two}; + \\ const Three = Two || error{Three}; + \\ + \\ one: One = error.One, + \\ two: Two = error.Two, + \\ three: Three = error.Three, \\ any: anyerror = error.Any, \\ any_void: anyerror!void = error.NotVoid, - \\ any_u32: error{One}!u32 = 42, + \\ any_u32: One!u32 = 42, \\}; \\fn testErrors(errors: Errors) void { \\ _ = errors; @@ -453,10 +475,27 @@ pub fn addTestsForTarget(db: *Debugger, target: Target) void { }, \\breakpoint set --file errors.zig --source-pattern-regexp '_ = errors;' \\process launch + \\expression --show-types -- Errors \\frame variable --show-types errors \\breakpoint delete --force 1 , &.{ + \\(lldb) expression --show-types -- Errors + \\(type) Errors = struct { + \\ (type) Zero = error {} + \\ (type) One = error { + \\ (error{One}) One = error.One + \\ } + \\ (type) Two = error { + \\ (error{One,Two}) One = error.One + \\ (error{One,Two}) Two = error.Two + \\ } + \\ (type) Three = error { + \\ (error{One,Two,Three}) One = error.One + \\ (error{One,Two,Three}) Two = error.Two + \\ (error{One,Two,Three}) Three = error.Three + \\ } + \\} \\(lldb) frame variable --show-types errors \\(root.errors.Errors) errors = { \\ (error{One}) one = error.One @@ -565,10 +604,30 @@ pub fn addTestsForTarget(db: *Debugger, target: Target) void { }, \\breakpoint set --file unions.zig --source-pattern-regexp '_ = unions;' \\process launch + \\expression --show-types -- Unions \\frame variable --show-types unions \\breakpoint delete --force 1 , &.{ + \\(lldb) expression --show-types -- Unions + \\(type) Unions = struct { + \\ (type) Untagged = union {} + \\ (type) SafetyTagged = union(enum) { + \\ (@typeInfo(unions.Unions.SafetyTagged).@"union".tag_type.?) void = .void + \\ (@typeInfo(unions.Unions.SafetyTagged).@"union".tag_type.?) en = .en + \\ (@typeInfo(unions.Unions.SafetyTagged).@"union".tag_type.?) eu = .eu + \\ } + \\ (type) Enum = enum { + \\ (root.unions.Unions.Enum) first = .first + \\ (root.unions.Unions.Enum) second = .second + \\ (root.unions.Unions.Enum) third = .third + \\ } + \\ (type) Tagged = union(enum) { + \\ (@typeInfo(unions.Unions.Tagged).@"union".tag_type.?) void = .void + \\ (@typeInfo(unions.Unions.Tagged).@"union".tag_type.?) en = .en + \\ (@typeInfo(unions.Unions.Tagged).@"union".tag_type.?) eu = .eu + \\ } + \\} \\(lldb) frame variable --show-types unions \\(root.unions.Unions) unions = { \\ (root.unions.Unions.Untagged) untagged = { From 6459212ebe20da1607ea3b56bff37b7ace019343 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sat, 7 Sep 2024 10:48:11 -0400 Subject: [PATCH 113/202] Dwarf: implement and test segmented list --- test/src/Debugger.zig | 182 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 182 insertions(+) diff --git a/test/src/Debugger.zig b/test/src/Debugger.zig index ca1348d13fad..977bd4a776df 100644 --- a/test/src/Debugger.zig +++ b/test/src/Debugger.zig @@ -1299,6 +1299,7 @@ pub fn addTestsForTarget(db: *Debugger, target: Target) void { \\ x = fabsf(x); \\ _ = &x; \\} + \\ , }, }, @@ -1324,6 +1325,187 @@ pub fn addTestsForTarget(db: *Debugger, target: Target) void { \\1 breakpoints deleted; 0 breakpoint locations disabled. }, ); + db.addLldbTest( + "segmented_list", + target, + &.{ + .{ + .path = "main.zig", + .source = + \\const std = @import("std"); + \\fn testSegmentedList() void {} + \\pub fn main() !void { + \\ var list0: std.SegmentedList(usize, 0) = .{}; + \\ defer list0.deinit(std.heap.page_allocator); + \\ + \\ var list1: std.SegmentedList(usize, 1) = .{}; + \\ defer list1.deinit(std.heap.page_allocator); + \\ + \\ var list2: std.SegmentedList(usize, 2) = .{}; + \\ defer list2.deinit(std.heap.page_allocator); + \\ + \\ var list4: std.SegmentedList(usize, 4) = .{}; + \\ defer list4.deinit(std.heap.page_allocator); + \\ + \\ for (0..32) |i| { + \\ try list0.append(std.heap.page_allocator, i); + \\ try list1.append(std.heap.page_allocator, i); + \\ try list2.append(std.heap.page_allocator, i); + \\ try list4.append(std.heap.page_allocator, i); + \\ } + \\ testSegmentedList(); + \\} + \\ + , + }, + }, + \\breakpoint set --file main.zig --source-pattern-regexp 'testSegmentedList\(\);' + \\process launch + \\frame variable list0 list1 list2 list4 + \\breakpoint delete --force 1 + , + &.{ + \\(lldb) frame variable list0 list1 list2 list4 + \\(std.segmented_list.SegmentedList(usize,0)) list0 = len=32 { + \\ [0] = 0 + \\ [1] = 1 + \\ [2] = 2 + \\ [3] = 3 + \\ [4] = 4 + \\ [5] = 5 + \\ [6] = 6 + \\ [7] = 7 + \\ [8] = 8 + \\ [9] = 9 + \\ [10] = 10 + \\ [11] = 11 + \\ [12] = 12 + \\ [13] = 13 + \\ [14] = 14 + \\ [15] = 15 + \\ [16] = 16 + \\ [17] = 17 + \\ [18] = 18 + \\ [19] = 19 + \\ [20] = 20 + \\ [21] = 21 + \\ [22] = 22 + \\ [23] = 23 + \\ [24] = 24 + \\ [25] = 25 + \\ [26] = 26 + \\ [27] = 27 + \\ [28] = 28 + \\ [29] = 29 + \\ [30] = 30 + \\ [31] = 31 + \\} + \\(std.segmented_list.SegmentedList(usize,1)) list1 = len=32 { + \\ [0] = 0 + \\ [1] = 1 + \\ [2] = 2 + \\ [3] = 3 + \\ [4] = 4 + \\ [5] = 5 + \\ [6] = 6 + \\ [7] = 7 + \\ [8] = 8 + \\ [9] = 9 + \\ [10] = 10 + \\ [11] = 11 + \\ [12] = 12 + \\ [13] = 13 + \\ [14] = 14 + \\ [15] = 15 + \\ [16] = 16 + \\ [17] = 17 + \\ [18] = 18 + \\ [19] = 19 + \\ [20] = 20 + \\ [21] = 21 + \\ [22] = 22 + \\ [23] = 23 + \\ [24] = 24 + \\ [25] = 25 + \\ [26] = 26 + \\ [27] = 27 + \\ [28] = 28 + \\ [29] = 29 + \\ [30] = 30 + \\ [31] = 31 + \\} + \\(std.segmented_list.SegmentedList(usize,2)) list2 = len=32 { + \\ [0] = 0 + \\ [1] = 1 + \\ [2] = 2 + \\ [3] = 3 + \\ [4] = 4 + \\ [5] = 5 + \\ [6] = 6 + \\ [7] = 7 + \\ [8] = 8 + \\ [9] = 9 + \\ [10] = 10 + \\ [11] = 11 + \\ [12] = 12 + \\ [13] = 13 + \\ [14] = 14 + \\ [15] = 15 + \\ [16] = 16 + \\ [17] = 17 + \\ [18] = 18 + \\ [19] = 19 + \\ [20] = 20 + \\ [21] = 21 + \\ [22] = 22 + \\ [23] = 23 + \\ [24] = 24 + \\ [25] = 25 + \\ [26] = 26 + \\ [27] = 27 + \\ [28] = 28 + \\ [29] = 29 + \\ [30] = 30 + \\ [31] = 31 + \\} + \\(std.segmented_list.SegmentedList(usize,4)) list4 = len=32 { + \\ [0] = 0 + \\ [1] = 1 + \\ [2] = 2 + \\ [3] = 3 + \\ [4] = 4 + \\ [5] = 5 + \\ [6] = 6 + \\ [7] = 7 + \\ [8] = 8 + \\ [9] = 9 + \\ [10] = 10 + \\ [11] = 11 + \\ [12] = 12 + \\ [13] = 13 + \\ [14] = 14 + \\ [15] = 15 + \\ [16] = 16 + \\ [17] = 17 + \\ [18] = 18 + \\ [19] = 19 + \\ [20] = 20 + \\ [21] = 21 + \\ [22] = 22 + \\ [23] = 23 + \\ [24] = 24 + \\ [25] = 25 + \\ [26] = 26 + \\ [27] = 27 + \\ [28] = 28 + \\ [29] = 29 + \\ [30] = 30 + \\ [31] = 31 + \\} + \\(lldb) breakpoint delete --force 1 + \\1 breakpoints deleted; 0 breakpoint locations disabled. + }, + ); } const File = struct { import: ?[]const u8 = null, path: []const u8, source: []const u8 }; From e0469773542e49c9a76df6746afa10f22d44dae4 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Sun, 8 Sep 2024 20:08:44 -0400 Subject: [PATCH 114/202] codegen: implement output to the `.debug_info` section --- src/arch/aarch64/CodeGen.zig | 5 +- src/arch/aarch64/Emit.zig | 3 +- src/arch/arm/CodeGen.zig | 5 +- src/arch/arm/Emit.zig | 3 +- src/arch/riscv64/CodeGen.zig | 7 +- src/arch/riscv64/Emit.zig | 3 +- src/arch/sparc64/CodeGen.zig | 5 +- src/arch/sparc64/Emit.zig | 3 +- src/arch/wasm/CodeGen.zig | 4 +- src/arch/wasm/Emit.zig | 2 +- src/arch/x86_64/CodeGen.zig | 7 +- src/arch/x86_64/Emit.zig | 3 +- src/codegen.zig | 94 ++++++----------- src/link.zig | 12 ++- src/link/Coff.zig | 19 ++-- src/link/Dwarf.zig | 198 +++++++++++++++++++++++------------ src/link/Elf/ZigObject.zig | 63 +++++++---- src/link/MachO/ZigObject.zig | 96 +++++++++++------ src/link/Plan9.zig | 35 +++---- src/link/Wasm/ZigObject.zig | 20 ++-- 20 files changed, 339 insertions(+), 248 deletions(-) diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 0d01b3d458ef..463fdde84447 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -25,7 +25,6 @@ const Alignment = InternPool.Alignment; const CodeGenError = codegen.CodeGenError; const Result = codegen.Result; -const DebugInfoOutput = codegen.DebugInfoOutput; const bits = @import("bits.zig"); const abi = @import("abi.zig"); @@ -48,7 +47,7 @@ pt: Zcu.PerThread, air: Air, liveness: Liveness, bin_file: *link.File, -debug_output: DebugInfoOutput, +debug_output: link.File.DebugInfoOutput, target: *const std.Target, func_index: InternPool.Index, owner_nav: InternPool.Nav.Index, @@ -327,7 +326,7 @@ pub fn generate( air: Air, liveness: Liveness, code: *std.ArrayList(u8), - debug_output: DebugInfoOutput, + debug_output: link.File.DebugInfoOutput, ) CodeGenError!Result { const zcu = pt.zcu; const gpa = zcu.gpa; diff --git a/src/arch/aarch64/Emit.zig b/src/arch/aarch64/Emit.zig index 6c5630718365..860a264e72ec 100644 --- a/src/arch/aarch64/Emit.zig +++ b/src/arch/aarch64/Emit.zig @@ -13,11 +13,10 @@ const assert = std.debug.assert; const Instruction = bits.Instruction; const Register = bits.Register; const log = std.log.scoped(.aarch64_emit); -const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput; mir: Mir, bin_file: *link.File, -debug_output: DebugInfoOutput, +debug_output: link.File.DebugInfoOutput, target: *const std.Target, err_msg: ?*ErrorMsg = null, src_loc: Zcu.LazySrcLoc, diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 5d2ebf52092f..a0b529b75eb9 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -25,7 +25,6 @@ const Alignment = InternPool.Alignment; const Result = codegen.Result; const CodeGenError = codegen.CodeGenError; -const DebugInfoOutput = codegen.DebugInfoOutput; const bits = @import("bits.zig"); const abi = @import("abi.zig"); @@ -49,7 +48,7 @@ pt: Zcu.PerThread, air: Air, liveness: Liveness, bin_file: *link.File, -debug_output: DebugInfoOutput, +debug_output: link.File.DebugInfoOutput, target: *const std.Target, func_index: InternPool.Index, err_msg: ?*ErrorMsg, @@ -335,7 +334,7 @@ pub fn generate( air: Air, liveness: Liveness, code: *std.ArrayList(u8), - debug_output: DebugInfoOutput, + debug_output: link.File.DebugInfoOutput, ) CodeGenError!Result { const zcu = pt.zcu; const gpa = zcu.gpa; diff --git a/src/arch/arm/Emit.zig b/src/arch/arm/Emit.zig index c1b5baad578f..9ccef5a29973 100644 --- a/src/arch/arm/Emit.zig +++ b/src/arch/arm/Emit.zig @@ -16,12 +16,11 @@ const assert = std.debug.assert; const Instruction = bits.Instruction; const Register = bits.Register; const log = std.log.scoped(.aarch32_emit); -const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput; const CodeGen = @import("CodeGen.zig"); mir: Mir, bin_file: *link.File, -debug_output: DebugInfoOutput, +debug_output: link.File.DebugInfoOutput, target: *const std.Target, err_msg: ?*ErrorMsg = null, src_loc: Zcu.LazySrcLoc, diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 1207eed88dbf..262dad6d2448 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -32,7 +32,6 @@ const Alignment = InternPool.Alignment; const CodeGenError = codegen.CodeGenError; const Result = codegen.Result; -const DebugInfoOutput = codegen.DebugInfoOutput; const bits = @import("bits.zig"); const abi = @import("abi.zig"); @@ -61,7 +60,7 @@ gpa: Allocator, mod: *Package.Module, target: *const std.Target, -debug_output: DebugInfoOutput, +debug_output: link.File.DebugInfoOutput, err_msg: ?*ErrorMsg, args: []MCValue, ret_mcv: InstTracking, @@ -760,7 +759,7 @@ pub fn generate( air: Air, liveness: Liveness, code: *std.ArrayList(u8), - debug_output: DebugInfoOutput, + debug_output: link.File.DebugInfoOutput, ) CodeGenError!Result { const zcu = pt.zcu; const comp = zcu.comp; @@ -928,7 +927,7 @@ pub fn generateLazy( src_loc: Zcu.LazySrcLoc, lazy_sym: link.File.LazySymbol, code: *std.ArrayList(u8), - debug_output: DebugInfoOutput, + debug_output: link.File.DebugInfoOutput, ) CodeGenError!Result { const comp = bin_file.comp; const gpa = comp.gpa; diff --git a/src/arch/riscv64/Emit.zig b/src/arch/riscv64/Emit.zig index 258941f19d9a..8ee566c7edf2 100644 --- a/src/arch/riscv64/Emit.zig +++ b/src/arch/riscv64/Emit.zig @@ -2,7 +2,7 @@ bin_file: *link.File, lower: Lower, -debug_output: DebugInfoOutput, +debug_output: link.File.DebugInfoOutput, code: *std.ArrayList(u8), prev_di_line: u32, @@ -216,7 +216,6 @@ const log = std.log.scoped(.emit); const mem = std.mem; const std = @import("std"); -const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput; const Emit = @This(); const Lower = @import("Lower.zig"); const Mir = @import("Mir.zig"); diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 4f838b9c5289..589e9978a256 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -22,7 +22,6 @@ const Liveness = @import("../../Liveness.zig"); const Type = @import("../../Type.zig"); const CodeGenError = codegen.CodeGenError; const Result = @import("../../codegen.zig").Result; -const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput; const Endian = std.builtin.Endian; const Alignment = InternPool.Alignment; @@ -57,7 +56,7 @@ bin_file: *link.File, target: *const std.Target, func_index: InternPool.Index, code: *std.ArrayList(u8), -debug_output: DebugInfoOutput, +debug_output: link.File.DebugInfoOutput, err_msg: ?*ErrorMsg, args: []MCValue, ret_mcv: MCValue, @@ -268,7 +267,7 @@ pub fn generate( air: Air, liveness: Liveness, code: *std.ArrayList(u8), - debug_output: DebugInfoOutput, + debug_output: link.File.DebugInfoOutput, ) CodeGenError!Result { const zcu = pt.zcu; const gpa = zcu.gpa; diff --git a/src/arch/sparc64/Emit.zig b/src/arch/sparc64/Emit.zig index 4e49bcf5b42e..a87c9cd0ae3a 100644 --- a/src/arch/sparc64/Emit.zig +++ b/src/arch/sparc64/Emit.zig @@ -9,7 +9,6 @@ const Zcu = @import("../../Zcu.zig"); const ErrorMsg = Zcu.ErrorMsg; const Liveness = @import("../../Liveness.zig"); const log = std.log.scoped(.sparcv9_emit); -const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput; const Emit = @This(); const Mir = @import("Mir.zig"); @@ -19,7 +18,7 @@ const Register = bits.Register; mir: Mir, bin_file: *link.File, -debug_output: DebugInfoOutput, +debug_output: link.File.DebugInfoOutput, target: *const std.Target, err_msg: ?*ErrorMsg = null, src_loc: Zcu.LazySrcLoc, diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index d78b4ae80e26..4c42cd4ad297 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -648,7 +648,7 @@ block_depth: u32 = 0, air: Air, liveness: Liveness, gpa: mem.Allocator, -debug_output: codegen.DebugInfoOutput, +debug_output: link.File.DebugInfoOutput, func_index: InternPool.Index, /// Contains a list of current branches. /// When we return from a branch, the branch will be popped from this list, @@ -1211,7 +1211,7 @@ pub fn generate( air: Air, liveness: Liveness, code: *std.ArrayList(u8), - debug_output: codegen.DebugInfoOutput, + debug_output: link.File.DebugInfoOutput, ) codegen.CodeGenError!codegen.Result { const zcu = pt.zcu; const gpa = zcu.gpa; diff --git a/src/arch/wasm/Emit.zig b/src/arch/wasm/Emit.zig index e02067f8ca72..648af47153a3 100644 --- a/src/arch/wasm/Emit.zig +++ b/src/arch/wasm/Emit.zig @@ -26,7 +26,7 @@ owner_nav: InternPool.Nav.Index, // Debug information /// Holds the debug information for this emission -dbg_output: codegen.DebugInfoOutput, +dbg_output: link.File.DebugInfoOutput, /// Previous debug info line prev_di_line: u32, /// Previous debug info column diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 35f47ad882a4..2d0b35a63680 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -17,7 +17,6 @@ const Air = @import("../../Air.zig"); const Allocator = mem.Allocator; const CodeGenError = codegen.CodeGenError; const Compilation = @import("../../Compilation.zig"); -const DebugInfoOutput = codegen.DebugInfoOutput; const ErrorMsg = Zcu.ErrorMsg; const Result = codegen.Result; const Emit = @import("Emit.zig"); @@ -53,7 +52,7 @@ pt: Zcu.PerThread, air: Air, liveness: Liveness, bin_file: *link.File, -debug_output: DebugInfoOutput, +debug_output: link.File.DebugInfoOutput, target: *const std.Target, owner: Owner, inline_func: InternPool.Index, @@ -819,7 +818,7 @@ pub fn generate( air: Air, liveness: Liveness, code: *std.ArrayList(u8), - debug_output: DebugInfoOutput, + debug_output: link.File.DebugInfoOutput, ) CodeGenError!Result { const zcu = pt.zcu; const comp = zcu.comp; @@ -1000,7 +999,7 @@ pub fn generateLazy( src_loc: Zcu.LazySrcLoc, lazy_sym: link.File.LazySymbol, code: *std.ArrayList(u8), - debug_output: DebugInfoOutput, + debug_output: link.File.DebugInfoOutput, ) CodeGenError!Result { const comp = bin_file.comp; const gpa = comp.gpa; diff --git a/src/arch/x86_64/Emit.zig b/src/arch/x86_64/Emit.zig index da9fe303f17e..372a520e52a0 100644 --- a/src/arch/x86_64/Emit.zig +++ b/src/arch/x86_64/Emit.zig @@ -3,7 +3,7 @@ air: Air, lower: Lower, atom_index: u32, -debug_output: DebugInfoOutput, +debug_output: link.File.DebugInfoOutput, code: *std.ArrayList(u8), prev_di_line: u32, @@ -546,7 +546,6 @@ const log = std.log.scoped(.emit); const std = @import("std"); const Air = @import("../../Air.zig"); -const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput; const Emit = @This(); const Lower = @import("Lower.zig"); const Mir = @import("Mir.zig"); diff --git a/src/codegen.zig b/src/codegen.zig index adc77c78edb8..b41fa3c2ef8c 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -38,12 +38,6 @@ pub const CodeGenError = error{ CodegenFail, } || link.File.UpdateDebugInfoError; -pub const DebugInfoOutput = union(enum) { - dwarf: *link.File.Dwarf.WipNav, - plan9: *link.File.Plan9.DebugInfoOutput, - none, -}; - fn devFeatureForBackend(comptime backend: std.builtin.CompilerBackend) dev.Feature { comptime assert(mem.startsWith(u8, @tagName(backend), "stage2_")); return @field(dev.Feature, @tagName(backend)["stage2_".len..] ++ "_backend"); @@ -69,7 +63,7 @@ pub fn generateFunction( air: Air, liveness: Liveness, code: *std.ArrayList(u8), - debug_output: DebugInfoOutput, + debug_output: link.File.DebugInfoOutput, ) CodeGenError!Result { const zcu = pt.zcu; const func = zcu.funcInfo(func_index); @@ -95,7 +89,7 @@ pub fn generateLazyFunction( src_loc: Zcu.LazySrcLoc, lazy_sym: link.File.LazySymbol, code: *std.ArrayList(u8), - debug_output: DebugInfoOutput, + debug_output: link.File.DebugInfoOutput, ) CodeGenError!Result { const zcu = pt.zcu; const file = Type.fromInterned(lazy_sym.ty).typeDeclInstAllowGeneratedTag(zcu).?.resolveFile(&zcu.intern_pool); @@ -127,10 +121,10 @@ pub fn generateLazySymbol( // TODO don't use an "out" parameter like this; put it in the result instead alignment: *Alignment, code: *std.ArrayList(u8), - debug_output: DebugInfoOutput, - reloc_info: RelocInfo, + debug_output: link.File.DebugInfoOutput, + reloc_parent: link.File.RelocInfo.Parent, ) CodeGenError!Result { - _ = reloc_info; + _ = reloc_parent; const tracy = trace(@src()); defer tracy.end(); @@ -192,8 +186,7 @@ pub fn generateSymbol( src_loc: Zcu.LazySrcLoc, val: Value, code: *std.ArrayList(u8), - debug_output: DebugInfoOutput, - reloc_info: RelocInfo, + reloc_parent: link.File.RelocInfo.Parent, ) CodeGenError!Result { const tracy = trace(@src()); defer tracy.end(); @@ -290,7 +283,7 @@ pub fn generateSymbol( switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(switch (error_union.val) { .err_name => try pt.intern(.{ .undef = payload_ty.toIntern() }), .payload => |payload| payload, - }), code, debug_output, reloc_info)) { + }), code, reloc_parent)) { .ok => {}, .fail => |em| return .{ .fail = em }, } @@ -318,7 +311,7 @@ pub fn generateSymbol( }, .enum_tag => |enum_tag| { const int_tag_ty = ty.intTagType(zcu); - switch (try generateSymbol(bin_file, pt, src_loc, try pt.getCoerced(Value.fromInterned(enum_tag.int), int_tag_ty), code, debug_output, reloc_info)) { + switch (try generateSymbol(bin_file, pt, src_loc, try pt.getCoerced(Value.fromInterned(enum_tag.int), int_tag_ty), code, reloc_parent)) { .ok => {}, .fail => |em| return .{ .fail = em }, } @@ -334,16 +327,16 @@ pub fn generateSymbol( }, .f128 => |f128_val| writeFloat(f128, f128_val, target, endian, try code.addManyAsArray(16)), }, - .ptr => switch (try lowerPtr(bin_file, pt, src_loc, val.toIntern(), code, debug_output, reloc_info, 0)) { + .ptr => switch (try lowerPtr(bin_file, pt, src_loc, val.toIntern(), code, reloc_parent, 0)) { .ok => {}, .fail => |em| return .{ .fail = em }, }, .slice => |slice| { - switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(slice.ptr), code, debug_output, reloc_info)) { + switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(slice.ptr), code, reloc_parent)) { .ok => {}, .fail => |em| return .{ .fail = em }, } - switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(slice.len), code, debug_output, reloc_info)) { + switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(slice.len), code, reloc_parent)) { .ok => {}, .fail => |em| return .{ .fail = em }, } @@ -355,7 +348,7 @@ pub fn generateSymbol( if (ty.optionalReprIsPayload(zcu)) { if (payload_val) |value| { - switch (try generateSymbol(bin_file, pt, src_loc, value, code, debug_output, reloc_info)) { + switch (try generateSymbol(bin_file, pt, src_loc, value, code, reloc_parent)) { .ok => {}, .fail => |em| return Result{ .fail = em }, } @@ -368,7 +361,7 @@ pub fn generateSymbol( const value = payload_val orelse Value.fromInterned(try pt.intern(.{ .undef = payload_type.toIntern(), })); - switch (try generateSymbol(bin_file, pt, src_loc, value, code, debug_output, reloc_info)) { + switch (try generateSymbol(bin_file, pt, src_loc, value, code, reloc_parent)) { .ok => {}, .fail => |em| return Result{ .fail = em }, } @@ -390,7 +383,7 @@ pub fn generateSymbol( elem else array_type.sentinel, - }), code, debug_output, reloc_info)) { + }), code, reloc_parent)) { .ok => {}, .fail => |em| return .{ .fail = em }, } @@ -449,7 +442,7 @@ pub fn generateSymbol( math.cast(usize, index) orelse return error.Overflow ], .repeated_elem => |elem| elem, - }), code, debug_output, reloc_info)) { + }), code, reloc_parent)) { .ok => {}, .fail => |em| return .{ .fail = em }, } @@ -482,7 +475,7 @@ pub fn generateSymbol( .repeated_elem => |elem| elem, }; - switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(field_val), code, debug_output, reloc_info)) { + switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(field_val), code, reloc_parent)) { .ok => {}, .fail => |em| return Result{ .fail = em }, } @@ -524,7 +517,7 @@ pub fn generateSymbol( return error.Overflow; var tmp_list = try std.ArrayList(u8).initCapacity(code.allocator, field_size); defer tmp_list.deinit(); - switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(field_val), &tmp_list, debug_output, reloc_info)) { + switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(field_val), &tmp_list, reloc_parent)) { .ok => @memcpy(code.items[current_pos..][0..tmp_list.items.len], tmp_list.items), .fail => |em| return Result{ .fail = em }, } @@ -559,7 +552,7 @@ pub fn generateSymbol( ) orelse return error.Overflow; if (padding > 0) try code.appendNTimes(0, padding); - switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(field_val), code, debug_output, reloc_info)) { + switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(field_val), code, reloc_parent)) { .ok => {}, .fail => |em| return Result{ .fail = em }, } @@ -583,12 +576,12 @@ pub fn generateSymbol( const layout = ty.unionGetLayout(zcu); if (layout.payload_size == 0) { - return generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.tag), code, debug_output, reloc_info); + return generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.tag), code, reloc_parent); } // Check if we should store the tag first. if (layout.tag_size > 0 and layout.tag_align.compare(.gte, layout.payload_align)) { - switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.tag), code, debug_output, reloc_info)) { + switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.tag), code, reloc_parent)) { .ok => {}, .fail => |em| return Result{ .fail = em }, } @@ -601,7 +594,7 @@ pub fn generateSymbol( if (!field_ty.hasRuntimeBits(zcu)) { try code.appendNTimes(0xaa, math.cast(usize, layout.payload_size) orelse return error.Overflow); } else { - switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.val), code, debug_output, reloc_info)) { + switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.val), code, reloc_parent)) { .ok => {}, .fail => |em| return Result{ .fail = em }, } @@ -612,14 +605,14 @@ pub fn generateSymbol( } } } else { - switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.val), code, debug_output, reloc_info)) { + switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.val), code, reloc_parent)) { .ok => {}, .fail => |em| return Result{ .fail = em }, } } if (layout.tag_size > 0 and layout.tag_align.compare(.lt, layout.payload_align)) { - switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.tag), code, debug_output, reloc_info)) { + switch (try generateSymbol(bin_file, pt, src_loc, Value.fromInterned(un.tag), code, reloc_parent)) { .ok => {}, .fail => |em| return Result{ .fail = em }, } @@ -640,40 +633,29 @@ fn lowerPtr( src_loc: Zcu.LazySrcLoc, ptr_val: InternPool.Index, code: *std.ArrayList(u8), - debug_output: DebugInfoOutput, - reloc_info: RelocInfo, + reloc_parent: link.File.RelocInfo.Parent, prev_offset: u64, ) CodeGenError!Result { const zcu = pt.zcu; const ptr = zcu.intern_pool.indexToKey(ptr_val).ptr; const offset: u64 = prev_offset + ptr.byte_offset; return switch (ptr.base_addr) { - .nav => |nav| try lowerNavRef(bin_file, pt, src_loc, nav, code, debug_output, reloc_info, offset), - .uav => |uav| try lowerUavRef(bin_file, pt, src_loc, uav, code, debug_output, reloc_info, offset), - .int => try generateSymbol(bin_file, pt, src_loc, try pt.intValue(Type.usize, offset), code, debug_output, reloc_info), + .nav => |nav| try lowerNavRef(bin_file, pt, src_loc, nav, code, reloc_parent, offset), + .uav => |uav| try lowerUavRef(bin_file, pt, src_loc, uav, code, reloc_parent, offset), + .int => try generateSymbol(bin_file, pt, src_loc, try pt.intValue(Type.usize, offset), code, reloc_parent), .eu_payload => |eu_ptr| try lowerPtr( bin_file, pt, src_loc, eu_ptr, code, - debug_output, - reloc_info, + reloc_parent, offset + errUnionPayloadOffset( Value.fromInterned(eu_ptr).typeOf(zcu).childType(zcu).errorUnionPayload(zcu), zcu, ), ), - .opt_payload => |opt_ptr| try lowerPtr( - bin_file, - pt, - src_loc, - opt_ptr, - code, - debug_output, - reloc_info, - offset, - ), + .opt_payload => |opt_ptr| try lowerPtr(bin_file, pt, src_loc, opt_ptr, code, reloc_parent, offset), .field => |field| { const base_ptr = Value.fromInterned(field.base); const base_ty = base_ptr.typeOf(zcu).childType(zcu); @@ -692,27 +674,21 @@ fn lowerPtr( }, else => unreachable, }; - return lowerPtr(bin_file, pt, src_loc, field.base, code, debug_output, reloc_info, offset + field_off); + return lowerPtr(bin_file, pt, src_loc, field.base, code, reloc_parent, offset + field_off); }, .arr_elem, .comptime_field, .comptime_alloc => unreachable, }; } -const RelocInfo = struct { - parent_atom_index: u32, -}; - fn lowerUavRef( lf: *link.File, pt: Zcu.PerThread, src_loc: Zcu.LazySrcLoc, uav: InternPool.Key.Ptr.BaseAddr.Uav, code: *std.ArrayList(u8), - debug_output: DebugInfoOutput, - reloc_info: RelocInfo, + reloc_parent: link.File.RelocInfo.Parent, offset: u64, ) CodeGenError!Result { - _ = debug_output; const zcu = pt.zcu; const ip = &zcu.intern_pool; const target = lf.comp.root_mod.resolved_target.result; @@ -735,7 +711,7 @@ fn lowerUavRef( } const vaddr = try lf.getUavVAddr(uav_val, .{ - .parent_atom_index = reloc_info.parent_atom_index, + .parent = reloc_parent, .offset = code.items.len, .addend = @intCast(offset), }); @@ -756,12 +732,10 @@ fn lowerNavRef( src_loc: Zcu.LazySrcLoc, nav_index: InternPool.Nav.Index, code: *std.ArrayList(u8), - debug_output: DebugInfoOutput, - reloc_info: RelocInfo, + reloc_parent: link.File.RelocInfo.Parent, offset: u64, ) CodeGenError!Result { _ = src_loc; - _ = debug_output; const zcu = pt.zcu; const ip = &zcu.intern_pool; const target = zcu.navFileScope(nav_index).mod.resolved_target.result; @@ -775,7 +749,7 @@ fn lowerNavRef( } const vaddr = try lf.getNavVAddr(pt, nav_index, .{ - .parent_atom_index = reloc_info.parent_atom_index, + .parent = reloc_parent, .offset = code.items.len, .addend = @intCast(offset), }); diff --git a/src/link.zig b/src/link.zig index 508bc81352fc..2894302c115a 100644 --- a/src/link.zig +++ b/src/link.zig @@ -330,6 +330,11 @@ pub const File = struct { } } + pub const DebugInfoOutput = union(enum) { + dwarf: *Dwarf.WipNav, + plan9: *Plan9.DebugInfoOutput, + none, + }; pub const UpdateDebugInfoError = Dwarf.UpdateError; pub const FlushDebugInfoError = Dwarf.FlushError; @@ -673,9 +678,14 @@ pub const File = struct { } pub const RelocInfo = struct { - parent_atom_index: u32, + parent: Parent, offset: u64, addend: u32, + + pub const Parent = union(enum) { + atom_index: u32, + debug_output: DebugInfoOutput, + }; }; /// Get allocated `Nav`'s address in virtual memory. diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 6bf47e260200..f67c7d54d798 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1163,8 +1163,8 @@ fn lowerConst( try self.setSymbolName(sym, name); sym.section_number = @as(coff.SectionNumber, @enumFromInt(sect_id + 1)); - const res = try codegen.generateSymbol(&self.base, pt, src_loc, val, &code_buffer, .none, .{ - .parent_atom_index = self.getAtom(atom_index).getSymbolIndex().?, + const res = try codegen.generateSymbol(&self.base, pt, src_loc, val, &code_buffer, .{ + .atom_index = self.getAtom(atom_index).getSymbolIndex().?, }); const code = switch (res) { .ok => code_buffer.items, @@ -1235,8 +1235,7 @@ pub fn updateNav( zcu.navSrcLoc(nav_index), nav_init, &code_buffer, - .none, - .{ .parent_atom_index = atom.getSymbolIndex().? }, + .{ .atom_index = atom.getSymbolIndex().? }, ); const code = switch (res) { .ok => code_buffer.items, @@ -1284,7 +1283,7 @@ fn updateLazySymbolAtom( &required_alignment, &code_buffer, .none, - .{ .parent_atom_index = local_sym_index }, + .{ .atom_index = local_sym_index }, ); const code = switch (res) { .ok => code_buffer.items, @@ -1823,7 +1822,10 @@ pub fn getNavVAddr( .@"extern" => |@"extern"| try self.getGlobalSymbol(nav.name.toSlice(ip), @"extern".lib_name.toSlice(ip)), else => self.getAtom(try self.getOrCreateAtomForNav(nav_index)).getSymbolIndex().?, }; - const atom_index = self.getAtomIndexForSymbol(.{ .sym_index = reloc_info.parent_atom_index, .file = null }).?; + const atom_index = self.getAtomIndexForSymbol(.{ + .sym_index = reloc_info.parent.atom_index, + .file = null, + }).?; const target = SymbolWithLoc{ .sym_index = sym_index, .file = null }; try Atom.addRelocation(self, atom_index, .{ .type = .direct, @@ -1901,7 +1903,10 @@ pub fn getUavVAddr( const this_atom_index = self.uavs.get(uav).?.atom; const sym_index = self.getAtom(this_atom_index).getSymbolIndex().?; - const atom_index = self.getAtomIndexForSymbol(.{ .sym_index = reloc_info.parent_atom_index, .file = null }).?; + const atom_index = self.getAtomIndexForSymbol(.{ + .sym_index = reloc_info.parent.atom_index, + .file = null, + }).?; const target = SymbolWithLoc{ .sym_index = sym_index, .file = null }; try Atom.addRelocation(self, atom_index, .{ .type = .direct, diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 2a9fd8f8f933..ab71e0ec64d2 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -19,8 +19,8 @@ debug_rnglists: DebugRngLists, debug_str: StringSection, pub const UpdateError = error{ + CodegenFail, ReinterpretDeclRef, - IllDefinedMemoryLayout, Unimplemented, OutOfMemory, EndOfStream, @@ -1584,6 +1584,18 @@ pub const WipNav = struct { wip_nav.func = func; } + fn externalReloc(wip_nav: *WipNav, sec: *Section, reloc: ExternalReloc) std.mem.Allocator.Error!void { + try sec.getUnit(wip_nav.unit).getEntry(wip_nav.entry).external_relocs.append(wip_nav.dwarf.gpa, reloc); + } + + pub fn infoExternalReloc(wip_nav: *WipNav, reloc: ExternalReloc) std.mem.Allocator.Error!void { + try wip_nav.externalReloc(&wip_nav.dwarf.debug_info.section, reloc); + } + + fn frameExternalReloc(wip_nav: *WipNav, reloc: ExternalReloc) std.mem.Allocator.Error!void { + try wip_nav.externalReloc(&wip_nav.dwarf.debug_frame.section, reloc); + } + fn abbrevCode(wip_nav: *WipNav, abbrev_code: AbbrevCode) UpdateError!void { try uleb128(wip_nav.debug_info.writer(wip_nav.dwarf.gpa), try wip_nav.dwarf.refAbbrevCode(abbrev_code)); } @@ -1660,12 +1672,11 @@ pub const WipNav = struct { } fn infoAddrSym(wip_nav: *WipNav, sym_index: u32) UpdateError!void { - const dwarf = wip_nav.dwarf; - try dwarf.debug_info.section.getUnit(wip_nav.unit).getEntry(wip_nav.entry).external_relocs.append(dwarf.gpa, .{ + try wip_nav.infoExternalReloc(.{ .source_off = @intCast(wip_nav.debug_info.items.len), .target_sym = sym_index, }); - try wip_nav.debug_info.appendNTimes(dwarf.gpa, 0, @intFromEnum(dwarf.address_size)); + try wip_nav.debug_info.appendNTimes(wip_nav.dwarf.gpa, 0, @intFromEnum(wip_nav.dwarf.address_size)); } fn frameExprloc(wip_nav: *WipNav, loc: Loc) UpdateError!void { @@ -1692,12 +1703,11 @@ pub const WipNav = struct { } fn frameAddrSym(wip_nav: *WipNav, sym_index: u32) UpdateError!void { - const dwarf = wip_nav.dwarf; - try dwarf.debug_frame.section.getUnit(wip_nav.unit).getEntry(wip_nav.entry).external_relocs.append(dwarf.gpa, .{ + try wip_nav.frameExternalReloc(.{ .source_off = @intCast(wip_nav.debug_frame.items.len), .target_sym = sym_index, }); - try wip_nav.debug_frame.appendNTimes(dwarf.gpa, 0, @intFromEnum(dwarf.address_size)); + try wip_nav.debug_frame.appendNTimes(wip_nav.dwarf.gpa, 0, @intFromEnum(wip_nav.dwarf.address_size)); } fn getTypeEntry(wip_nav: *WipNav, ty: Type) UpdateError!struct { Unit.Index, Entry.Index } { @@ -1749,6 +1759,27 @@ pub const WipNav = struct { reloc.target_off = @intCast(wip_nav.debug_info.items.len); } + fn blockValue(wip_nav: *WipNav, src_loc: Zcu.LazySrcLoc, val: Value) UpdateError!void { + const ty = val.typeOf(wip_nav.pt.zcu); + const diw = wip_nav.debug_info.writer(wip_nav.dwarf.gpa); + const bytes = ty.abiSize(wip_nav.pt.zcu); + try uleb128(diw, bytes); + if (bytes == 0) return; + var dim = wip_nav.debug_info.toManaged(wip_nav.dwarf.gpa); + defer wip_nav.debug_info = dim.moveToUnmanaged(); + switch (try codegen.generateSymbol( + wip_nav.dwarf.bin_file, + wip_nav.pt, + src_loc, + val, + &dim, + .{ .debug_output = .{ .dwarf = wip_nav } }, + )) { + .ok => assert(dim.items.len == wip_nav.debug_info.items.len + bytes), + .fail => unreachable, + } + } + fn enumConstValue( wip_nav: *WipNav, loaded_enum: InternPool.LoadedEnumType, @@ -1814,8 +1845,8 @@ pub const WipNav = struct { } } - fn flush(wip_nav: *WipNav) UpdateError!void { - while (wip_nav.pending_types.popOrNull()) |ty| try wip_nav.dwarf.updateType(wip_nav.pt, ty, &wip_nav.pending_types); + fn flush(wip_nav: *WipNav, src_loc: Zcu.LazySrcLoc) UpdateError!void { + while (wip_nav.pending_types.popOrNull()) |ty| try wip_nav.dwarf.updateType(wip_nav.pt, src_loc, ty, &wip_nav.pending_types); } }; @@ -2171,15 +2202,15 @@ pub fn initWipNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool.Nav.In try diw.writeByte(accessibility); try wip_nav.strp(nav.name.toSlice(ip)); try wip_nav.strp(nav.fqn.toSlice(ip)); - const ty = nav_val.typeOf(zcu); - const ty_reloc_index = try wip_nav.refForward(); + const nav_ty = nav_val.typeOf(zcu); + const nav_ty_reloc_index = try wip_nav.refForward(); try wip_nav.exprloc(.{ .addr = .{ .sym = sym_index } }); try uleb128(diw, nav.status.resolved.alignment.toByteUnits() orelse - ty.abiAlignment(zcu).toByteUnits().?); + nav_ty.abiAlignment(zcu).toByteUnits().?); try diw.writeByte(@intFromBool(false)); - wip_nav.finishForward(ty_reloc_index); + wip_nav.finishForward(nav_ty_reloc_index); try wip_nav.abbrevCode(.is_const); - try wip_nav.refType(ty); + try wip_nav.refType(nav_ty); }, .variable => |variable| { assert(file.zir_loaded); @@ -2288,20 +2319,16 @@ pub fn initWipNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool.Nav.In .source_off = @intCast(wip_nav.debug_frame.items.len), }); try dfw.writeByteNTimes(0, dwarf.sectionOffsetBytes()); - try entry.external_relocs.append(dwarf.gpa, .{ - .source_off = @intCast(wip_nav.debug_frame.items.len), - .target_sym = sym_index, - }); - try dfw.writeByteNTimes(0, @intFromEnum(dwarf.address_size)); + try wip_nav.frameAddrSym(sym_index); try dfw.writeByteNTimes(undefined, @intFromEnum(dwarf.address_size)); }, .eh_frame => { try dfw.writeInt(u32, undefined, dwarf.endian); - try entry.external_relocs.append(dwarf.gpa, .{ + try wip_nav.frameExternalReloc(.{ .source_off = @intCast(wip_nav.debug_frame.items.len), .target_sym = sym_index, }); - try dfw.writeByteNTimes(0, dwarf.sectionOffsetBytes()); + try dfw.writeInt(u32, 0, dwarf.endian); try dfw.writeInt(u32, undefined, dwarf.endian); try uleb128(dfw, 0); }, @@ -2481,12 +2508,13 @@ pub fn finishWipNav( } try dwarf.debug_loclists.section.replaceEntry(wip_nav.unit, wip_nav.entry, dwarf, wip_nav.debug_loclists.items); - try wip_nav.flush(); + try wip_nav.flush(zcu.navSrcLoc(nav_index)); } pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) UpdateError!void { const zcu = pt.zcu; const ip = &zcu.intern_pool; + const nav_src_loc = zcu.navSrcLoc(nav_index); const nav_val = zcu.navValue(nav_index); const nav = ip.getNav(nav_index); @@ -2548,7 +2576,7 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool const nav_gop = try dwarf.navs.getOrPut(dwarf.gpa, nav_index); errdefer _ = dwarf.navs.pop(); - const tag: enum { done, decl_alias } = switch (ip.indexToKey(nav_val.toIntern())) { + const tag: enum { done, decl_alias, decl_const } = switch (ip.indexToKey(nav_val.toIntern())) { .int_type, .ptr_type, .array_type, @@ -2623,7 +2651,10 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool } const field_type = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); try wip_nav.refType(field_type); - if (!is_comptime) { + if (is_comptime) try wip_nav.blockValue( + nav_src_loc, + Value.fromInterned(loaded_struct.fieldInit(ip, field_index)), + ) else { try uleb128(diw, loaded_struct.offsets.get(ip)[field_index]); try uleb128(diw, loaded_struct.fieldAlign(ip, field_index).toByteUnits() orelse field_type.abiAlignment(zcu).toByteUnits().?); @@ -2850,7 +2881,6 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool }, .undef, .simple_value, - .variable, .@"extern", .int, .err, @@ -2864,10 +2894,8 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool .opt, .aggregate, .un, - => { - _ = dwarf.navs.pop(); - return; - }, + => .decl_const, + .variable => unreachable, .func => |func| tag: { if (nav_gop.found_existing) { const unit_ptr = dwarf.debug_info.section.getUnit(wip_nav.unit); @@ -2918,14 +2946,16 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool // memoization, not types .memoized_call => unreachable, }; + if (tag != .done) { + if (nav_gop.found_existing) + dwarf.debug_info.section.getUnit(wip_nav.unit).getEntry(nav_gop.value_ptr.*).clear() + else + nav_gop.value_ptr.* = try dwarf.addCommonEntry(wip_nav.unit); + wip_nav.entry = nav_gop.value_ptr.*; + } switch (tag) { .done => {}, .decl_alias => { - if (nav_gop.found_existing) - dwarf.debug_info.section.getUnit(wip_nav.unit).getEntry(nav_gop.value_ptr.*).clear() - else - nav_gop.value_ptr.* = try dwarf.addCommonEntry(wip_nav.unit); - wip_nav.entry = nav_gop.value_ptr.*; const diw = wip_nav.debug_info.writer(dwarf.gpa); try wip_nav.abbrevCode(.decl_alias); try wip_nav.refType(Type.fromInterned(parent_type)); @@ -2936,14 +2966,35 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool try wip_nav.strp(nav.name.toSlice(ip)); try wip_nav.refType(nav_val.toType()); }, + .decl_const => { + const diw = wip_nav.debug_info.writer(dwarf.gpa); + try wip_nav.abbrevCode(.decl_const); + try wip_nav.refType(Type.fromInterned(parent_type)); + assert(wip_nav.debug_info.items.len == DebugInfo.declEntryLineOff(dwarf)); + try diw.writeInt(u32, @intCast(loc.line + 1), dwarf.endian); + try uleb128(diw, loc.column + 1); + try diw.writeByte(accessibility); + try wip_nav.strp(nav.name.toSlice(ip)); + try wip_nav.strp(nav.fqn.toSlice(ip)); + const nav_ty = nav_val.typeOf(zcu); + const nav_ty_reloc_index = try wip_nav.refForward(); + try wip_nav.blockValue(nav_src_loc, nav_val); + try uleb128(diw, nav.status.resolved.alignment.toByteUnits() orelse + nav_ty.abiAlignment(zcu).toByteUnits().?); + try diw.writeByte(@intFromBool(false)); + wip_nav.finishForward(nav_ty_reloc_index); + try wip_nav.abbrevCode(.is_const); + try wip_nav.refType(nav_ty); + }, } try dwarf.debug_info.section.replaceEntry(wip_nav.unit, wip_nav.entry, dwarf, wip_nav.debug_info.items); - try wip_nav.flush(); + try wip_nav.flush(nav_src_loc); } fn updateType( dwarf: *Dwarf, pt: Zcu.PerThread, + src_loc: Zcu.LazySrcLoc, type_index: InternPool.Index, pending_types: *std.ArrayListUnmanaged(InternPool.Index), ) UpdateError!void { @@ -3000,15 +3051,10 @@ fn updateType( const ptr_child_type = Type.fromInterned(ptr_type.child); try wip_nav.abbrevCode(if (ptr_type.sentinel == .none) .ptr_type else .ptr_sentinel_type); try wip_nav.strp(name); - if (ptr_type.sentinel != .none) { - const bytes = ptr_child_type.abiSize(zcu); - try uleb128(diw, bytes); - const mem = try wip_nav.debug_info.addManyAsSlice(dwarf.gpa, @intCast(bytes)); - Value.fromInterned(ptr_type.sentinel).writeToMemory(pt, mem) catch |err| switch (err) { - error.IllDefinedMemoryLayout => @memset(mem, 0), - else => |e| return e, - }; - } + if (ptr_type.sentinel != .none) try wip_nav.blockValue( + src_loc, + Value.fromInterned(ptr_type.sentinel), + ); try uleb128(diw, ptr_type.flags.alignment.toByteUnits() orelse ptr_child_type.abiAlignment(zcu).toByteUnits().?); try diw.writeByte(@intFromEnum(ptr_type.flags.address_space)); @@ -3054,15 +3100,10 @@ fn updateType( const array_child_type = Type.fromInterned(array_type.child); try wip_nav.abbrevCode(if (array_type.sentinel == .none) .array_type else .array_sentinel_type); try wip_nav.strp(name); - if (array_type.sentinel != .none) { - const bytes = array_child_type.abiSize(zcu); - try uleb128(diw, bytes); - const mem = try wip_nav.debug_info.addManyAsSlice(dwarf.gpa, @intCast(bytes)); - Value.fromInterned(array_type.sentinel).writeToMemory(pt, mem) catch |err| switch (err) { - error.IllDefinedMemoryLayout => @memset(mem, 0), - else => |e| return e, - }; - } + if (array_type.sentinel != .none) try wip_nav.blockValue( + src_loc, + Value.fromInterned(array_type.sentinel), + ); try wip_nav.refType(array_child_type); try wip_nav.abbrevCode(.array_index); try wip_nav.refType(Type.usize); @@ -3292,7 +3333,10 @@ fn updateType( } const field_type = Type.fromInterned(anon_struct_type.types.get(ip)[field_index]); try wip_nav.refType(field_type); - if (comptime_value == .none) { + if (comptime_value != .none) try wip_nav.blockValue( + src_loc, + Value.fromInterned(comptime_value), + ) else { const field_align = field_type.abiAlignment(zcu); field_byte_offset = field_align.forward(field_byte_offset); try uleb128(diw, field_byte_offset); @@ -3359,16 +3403,13 @@ fn updateType( } if (error_set_type.names.len > 0) try uleb128(diw, @intFromEnum(AbbrevCode.null)); }, - .inferred_error_set_type => |func| switch (ip.funcIesResolvedUnordered(func)) { - .none => { - try wip_nav.abbrevCode(.void_type); - try wip_nav.strp(name); - }, - else => |ies| { - try wip_nav.abbrevCode(.inferred_error_set_type); - try wip_nav.strp(name); - try wip_nav.refType(Type.fromInterned(ies)); - }, + .inferred_error_set_type => |func| { + try wip_nav.abbrevCode(.inferred_error_set_type); + try wip_nav.strp(name); + try wip_nav.refType(Type.fromInterned(switch (ip.funcIesResolvedUnordered(func)) { + .none => .anyerror_type, + else => |ies| ies, + })); }, // values, not types @@ -3400,6 +3441,7 @@ pub fn updateContainerType(dwarf: *Dwarf, pt: Zcu.PerThread, type_index: InternP const zcu = pt.zcu; const ip = &zcu.intern_pool; const ty = Type.fromInterned(type_index); + const ty_src_loc = ty.srcLoc(zcu); log.debug("updateContainerType({}({d}))", .{ ty.fmt(pt), @intFromEnum(type_index) }); const inst_info = ty.typeDeclInst(zcu).?.resolveFull(ip).?; @@ -3447,7 +3489,10 @@ pub fn updateContainerType(dwarf: *Dwarf, pt: Zcu.PerThread, type_index: InternP } const field_type = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); try wip_nav.refType(field_type); - if (!is_comptime) { + if (is_comptime) try wip_nav.blockValue( + ty_src_loc, + Value.fromInterned(loaded_struct.fieldInit(ip, field_index)), + ) else { try uleb128(diw, loaded_struct.offsets.get(ip)[field_index]); try uleb128(diw, loaded_struct.fieldAlign(ip, field_index).toByteUnits() orelse field_type.abiAlignment(zcu).toByteUnits().?); @@ -3457,7 +3502,7 @@ pub fn updateContainerType(dwarf: *Dwarf, pt: Zcu.PerThread, type_index: InternP } try dwarf.debug_info.section.replaceEntry(wip_nav.unit, wip_nav.entry, dwarf, wip_nav.debug_info.items); - try wip_nav.flush(); + try wip_nav.flush(ty_src_loc); } else { const decl_inst = file.zir.instructions.get(@intFromEnum(inst_info.inst)); assert(decl_inst.tag == .extended); @@ -3515,7 +3560,10 @@ pub fn updateContainerType(dwarf: *Dwarf, pt: Zcu.PerThread, type_index: InternP } const field_type = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); try wip_nav.refType(field_type); - if (!is_comptime) { + if (is_comptime) try wip_nav.blockValue( + ty_src_loc, + Value.fromInterned(loaded_struct.fieldInit(ip, field_index)), + ) else { try uleb128(diw, loaded_struct.offsets.get(ip)[field_index]); try uleb128(diw, loaded_struct.fieldAlign(ip, field_index).toByteUnits() orelse field_type.abiAlignment(zcu).toByteUnits().?); @@ -3616,7 +3664,7 @@ pub fn updateContainerType(dwarf: *Dwarf, pt: Zcu.PerThread, type_index: InternP } try dwarf.debug_info.section.replaceEntry(wip_nav.unit, wip_nav.entry, dwarf, wip_nav.debug_info.items); try dwarf.debug_loclists.section.replaceEntry(wip_nav.unit, wip_nav.entry, dwarf, wip_nav.debug_loclists.items); - try wip_nav.flush(); + try wip_nav.flush(ty_src_loc); } } @@ -4085,6 +4133,7 @@ const AbbrevCode = enum { decl_packed_struct, decl_union, decl_var, + decl_const, decl_func, decl_empty_func, decl_func_generic, @@ -4220,6 +4269,16 @@ const AbbrevCode = enum { .{ .external, .flag }, }, }, + .decl_const = .{ + .tag = .constant, + .attrs = decl_abbrev_common_attrs ++ .{ + .{ .linkage_name, .strp }, + .{ .type, .ref_addr }, + .{ .const_value, .block }, + .{ .alignment, .udata }, + .{ .external, .flag }, + }, + }, .decl_func = .{ .tag = .subprogram, .children = true, @@ -4339,9 +4398,10 @@ const AbbrevCode = enum { .struct_field_comptime = .{ .tag = .member, .attrs = &.{ + .{ .const_expr, .flag_present }, .{ .name, .strp }, .{ .type, .ref_addr }, - .{ .const_expr, .flag_present }, + .{ .default_value, .block }, }, }, .packed_struct_field = .{ diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig index 0dc4bd9dae5f..98449d6a5be2 100644 --- a/src/link/Elf/ZigObject.zig +++ b/src/link/Elf/ZigObject.zig @@ -957,13 +957,26 @@ pub fn getNavVAddr( }; const this_sym = self.symbol(this_sym_index); const vaddr = this_sym.address(.{}, elf_file); - const parent_atom = self.symbol(reloc_info.parent_atom_index).atom(elf_file).?; - const r_type = relocation.encode(.abs, elf_file.getTarget().cpu.arch); - try parent_atom.addReloc(elf_file.base.comp.gpa, .{ - .r_offset = reloc_info.offset, - .r_info = (@as(u64, @intCast(this_sym_index)) << 32) | r_type, - .r_addend = reloc_info.addend, - }, self); + switch (reloc_info.parent) { + .atom_index => |atom_index| { + const parent_atom = self.symbol(atom_index).atom(elf_file).?; + const r_type = relocation.encode(.abs, elf_file.getTarget().cpu.arch); + try parent_atom.addReloc(elf_file.base.comp.gpa, .{ + .r_offset = reloc_info.offset, + .r_info = (@as(u64, @intCast(this_sym_index)) << 32) | r_type, + .r_addend = reloc_info.addend, + }, self); + }, + .debug_output => |debug_output| switch (debug_output) { + .dwarf => |wip_nav| try wip_nav.infoExternalReloc(.{ + .source_off = @intCast(reloc_info.offset), + .target_sym = this_sym_index, + .target_off = reloc_info.addend, + }), + .plan9 => unreachable, + .none => unreachable, + }, + } return @intCast(vaddr); } @@ -976,13 +989,26 @@ pub fn getUavVAddr( const sym_index = self.uavs.get(uav).?.symbol_index; const sym = self.symbol(sym_index); const vaddr = sym.address(.{}, elf_file); - const parent_atom = self.symbol(reloc_info.parent_atom_index).atom(elf_file).?; - const r_type = relocation.encode(.abs, elf_file.getTarget().cpu.arch); - try parent_atom.addReloc(elf_file.base.comp.gpa, .{ - .r_offset = reloc_info.offset, - .r_info = (@as(u64, @intCast(sym_index)) << 32) | r_type, - .r_addend = reloc_info.addend, - }, self); + switch (reloc_info.parent) { + .atom_index => |atom_index| { + const parent_atom = self.symbol(atom_index).atom(elf_file).?; + const r_type = relocation.encode(.abs, elf_file.getTarget().cpu.arch); + try parent_atom.addReloc(elf_file.base.comp.gpa, .{ + .r_offset = reloc_info.offset, + .r_info = (@as(u64, @intCast(sym_index)) << 32) | r_type, + .r_addend = reloc_info.addend, + }, self); + }, + .debug_output => |debug_output| switch (debug_output) { + .dwarf => |wip_nav| try wip_nav.infoExternalReloc(.{ + .source_off = @intCast(reloc_info.offset), + .target_sym = sym_index, + .target_off = reloc_info.addend, + }), + .plan9 => unreachable, + .none => unreachable, + }, + } return @intCast(vaddr); } @@ -1600,15 +1626,13 @@ pub fn updateNav( var debug_wip_nav = if (self.dwarf) |*dwarf| try dwarf.initWipNav(pt, nav_index, sym_index) else null; defer if (debug_wip_nav) |*wip_nav| wip_nav.deinit(); - // TODO implement .debug_info for global variables const res = try codegen.generateSymbol( &elf_file.base, pt, zcu.navSrcLoc(nav_index), Value.fromInterned(nav_init), &code_buffer, - if (debug_wip_nav) |*wip_nav| .{ .dwarf = wip_nav } else .none, - .{ .parent_atom_index = sym_index }, + .{ .atom_index = sym_index }, ); const code = switch (res) { @@ -1691,7 +1715,7 @@ fn updateLazySymbol( &required_alignment, &code_buffer, .none, - .{ .parent_atom_index = symbol_index }, + .{ .atom_index = symbol_index }, ); const code = switch (res) { .ok => code_buffer.items, @@ -1780,8 +1804,7 @@ fn lowerConst( src_loc, val, &code_buffer, - .{ .none = {} }, - .{ .parent_atom_index = sym_index }, + .{ .atom_index = sym_index }, ); const code = switch (res) { .ok => code_buffer.items, diff --git a/src/link/MachO/ZigObject.zig b/src/link/MachO/ZigObject.zig index 6a771a3b8452..3ffa9c474581 100644 --- a/src/link/MachO/ZigObject.zig +++ b/src/link/MachO/ZigObject.zig @@ -633,20 +633,33 @@ pub fn getNavVAddr( }; const sym = self.symbols.items[sym_index]; const vaddr = sym.getAddress(.{}, macho_file); - const parent_atom = self.symbols.items[reloc_info.parent_atom_index].getAtom(macho_file).?; - try parent_atom.addReloc(macho_file, .{ - .tag = .@"extern", - .offset = @intCast(reloc_info.offset), - .target = sym_index, - .addend = reloc_info.addend, - .type = .unsigned, - .meta = .{ - .pcrel = false, - .has_subtractor = false, - .length = 3, - .symbolnum = @intCast(sym.nlist_idx), + switch (reloc_info.parent) { + .atom_index => |atom_index| { + const parent_atom = self.symbols.items[atom_index].getAtom(macho_file).?; + try parent_atom.addReloc(macho_file, .{ + .tag = .@"extern", + .offset = @intCast(reloc_info.offset), + .target = sym_index, + .addend = reloc_info.addend, + .type = .unsigned, + .meta = .{ + .pcrel = false, + .has_subtractor = false, + .length = 3, + .symbolnum = @intCast(sym.nlist_idx), + }, + }); }, - }); + .debug_output => |debug_output| switch (debug_output) { + .dwarf => |wip_nav| try wip_nav.infoExternalReloc(.{ + .source_off = @intCast(reloc_info.offset), + .target_sym = sym_index, + .target_off = reloc_info.addend, + }), + .plan9 => unreachable, + .none => unreachable, + }, + } return vaddr; } @@ -659,20 +672,33 @@ pub fn getUavVAddr( const sym_index = self.uavs.get(uav).?.symbol_index; const sym = self.symbols.items[sym_index]; const vaddr = sym.getAddress(.{}, macho_file); - const parent_atom = self.symbols.items[reloc_info.parent_atom_index].getAtom(macho_file).?; - try parent_atom.addReloc(macho_file, .{ - .tag = .@"extern", - .offset = @intCast(reloc_info.offset), - .target = sym_index, - .addend = reloc_info.addend, - .type = .unsigned, - .meta = .{ - .pcrel = false, - .has_subtractor = false, - .length = 3, - .symbolnum = @intCast(sym.nlist_idx), + switch (reloc_info.parent) { + .atom_index => |atom_index| { + const parent_atom = self.symbols.items[atom_index].getAtom(macho_file).?; + try parent_atom.addReloc(macho_file, .{ + .tag = .@"extern", + .offset = @intCast(reloc_info.offset), + .target = sym_index, + .addend = reloc_info.addend, + .type = .unsigned, + .meta = .{ + .pcrel = false, + .has_subtractor = false, + .length = 3, + .symbolnum = @intCast(sym.nlist_idx), + }, + }); }, - }); + .debug_output => |debug_output| switch (debug_output) { + .dwarf => |wip_nav| try wip_nav.infoExternalReloc(.{ + .source_off = @intCast(reloc_info.offset), + .target_sym = sym_index, + .target_off = reloc_info.addend, + }), + .plan9 => unreachable, + .none => unreachable, + }, + } return vaddr; } @@ -903,8 +929,7 @@ pub fn updateNav( zcu.navSrcLoc(nav_index), Value.fromInterned(nav_init), &code_buffer, - if (debug_wip_nav) |*wip_nav| .{ .dwarf = wip_nav } else .none, - .{ .parent_atom_index = sym_index }, + .{ .atom_index = sym_index }, ); const code = switch (res) { @@ -1212,11 +1237,14 @@ fn lowerConst( const name_str = try self.addString(gpa, name); const sym_index = try self.newSymbolWithAtom(gpa, name_str, macho_file); - const res = try codegen.generateSymbol(&macho_file.base, pt, src_loc, val, &code_buffer, .{ - .none = {}, - }, .{ - .parent_atom_index = sym_index, - }); + const res = try codegen.generateSymbol( + &macho_file.base, + pt, + src_loc, + val, + &code_buffer, + .{ .atom_index = sym_index }, + ); const code = switch (res) { .ok => code_buffer.items, .fail => |em| return .{ .fail = em }, @@ -1378,7 +1406,7 @@ fn updateLazySymbol( &required_alignment, &code_buffer, .none, - .{ .parent_atom_index = symbol_index }, + .{ .atom_index = symbol_index }, ); const code = switch (res) { .ok => code_buffer.items, diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index 080b6853e71f..7737c22d05fa 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -422,10 +422,7 @@ pub fn updateFunc(self: *Plan9, pt: Zcu.PerThread, func_index: InternPool.Index, ); const code = switch (res) { .ok => try code_buffer.toOwnedSlice(), - .fail => |em| { - try zcu.failed_codegen.put(gpa, func.owner_nav, em); - return; - }, + .fail => |em| return zcu.failed_codegen.put(gpa, func.owner_nav, em), }; self.getAtomPtr(atom_idx).code = .{ .code_ptr = null, @@ -463,15 +460,17 @@ pub fn updateNav(self: *Plan9, pt: Zcu.PerThread, nav_index: InternPool.Nav.Inde var code_buffer = std.ArrayList(u8).init(gpa); defer code_buffer.deinit(); // TODO we need the symbol index for symbol in the table of locals for the containing atom - const res = try codegen.generateSymbol(&self.base, pt, zcu.navSrcLoc(nav_index), nav_init, &code_buffer, .none, .{ - .parent_atom_index = @intCast(atom_idx), - }); + const res = try codegen.generateSymbol( + &self.base, + pt, + zcu.navSrcLoc(nav_index), + nav_init, + &code_buffer, + .{ .atom_index = @intCast(atom_idx) }, + ); const code = switch (res) { .ok => code_buffer.items, - .fail => |em| { - try zcu.failed_codegen.put(gpa, nav_index, em); - return; - }, + .fail => |em| return zcu.failed_codegen.put(gpa, nav_index, em), }; try self.data_nav_table.ensureUnusedCapacity(gpa, 1); const duped_code = try gpa.dupe(u8, code); @@ -1116,7 +1115,7 @@ fn updateLazySymbolAtom(self: *Plan9, pt: Zcu.PerThread, sym: File.LazySymbol, a &required_alignment, &code_buffer, .none, - .{ .parent_atom_index = @as(Atom.Index, @intCast(atom_index)) }, + .{ .atom_index = @intCast(atom_index) }, ); const code = switch (res) { .ok => code_buffer.items, @@ -1373,21 +1372,21 @@ pub fn getNavVAddr( log.debug("getDeclVAddr for {}", .{nav.name.fmt(ip)}); if (ip.indexToKey(nav.status.resolved.val) == .@"extern") { if (nav.name.eqlSlice("etext", ip)) { - try self.addReloc(reloc_info.parent_atom_index, .{ + try self.addReloc(reloc_info.parent.atom_index, .{ .target = undefined, .offset = reloc_info.offset, .addend = reloc_info.addend, .type = .special_etext, }); } else if (nav.name.eqlSlice("edata", ip)) { - try self.addReloc(reloc_info.parent_atom_index, .{ + try self.addReloc(reloc_info.parent.atom_index, .{ .target = undefined, .offset = reloc_info.offset, .addend = reloc_info.addend, .type = .special_edata, }); } else if (nav.name.eqlSlice("end", ip)) { - try self.addReloc(reloc_info.parent_atom_index, .{ + try self.addReloc(reloc_info.parent.atom_index, .{ .target = undefined, .offset = reloc_info.offset, .addend = reloc_info.addend, @@ -1400,7 +1399,7 @@ pub fn getNavVAddr( // otherwise, we just add a relocation const atom_index = try self.seeNav(pt, nav_index); // the parent_atom_index in this case is just the decl_index of the parent - try self.addReloc(reloc_info.parent_atom_index, .{ + try self.addReloc(reloc_info.parent.atom_index, .{ .target = atom_index, .offset = reloc_info.offset, .addend = reloc_info.addend, @@ -1435,7 +1434,7 @@ pub fn lowerUav( gop.value_ptr.* = index; // we need to free name latex var code_buffer = std.ArrayList(u8).init(gpa); - const res = try codegen.generateSymbol(&self.base, pt, src_loc, val, &code_buffer, .{ .none = {} }, .{ .parent_atom_index = index }); + const res = try codegen.generateSymbol(&self.base, pt, src_loc, val, &code_buffer, .{ .atom_index = index }); const code = switch (res) { .ok => code_buffer.items, .fail => |em| return .{ .fail = em }, @@ -1459,7 +1458,7 @@ pub fn lowerUav( pub fn getUavVAddr(self: *Plan9, uav: InternPool.Index, reloc_info: link.File.RelocInfo) !u64 { const atom_index = self.uavs.get(uav).?; - try self.addReloc(reloc_info.parent_atom_index, .{ + try self.addReloc(reloc_info.parent.atom_index, .{ .target = atom_index, .offset = reloc_info.offset, .addend = reloc_info.addend, diff --git a/src/link/Wasm/ZigObject.zig b/src/link/Wasm/ZigObject.zig index c0d531582ffd..afb0216fd77c 100644 --- a/src/link/Wasm/ZigObject.zig +++ b/src/link/Wasm/ZigObject.zig @@ -277,8 +277,7 @@ pub fn updateNav( zcu.navSrcLoc(nav_index), nav_init, &code_writer, - .none, - .{ .parent_atom_index = @intFromEnum(atom.sym_index) }, + .{ .atom_index = @intFromEnum(atom.sym_index) }, ); const code = switch (res) { @@ -520,10 +519,7 @@ fn lowerConst( src_loc, val, &value_bytes, - .none, - .{ - .parent_atom_index = @intFromEnum(atom.sym_index), - }, + .{ .atom_index = @intFromEnum(atom.sym_index) }, ); break :code switch (result) { .ok => value_bytes.items, @@ -762,8 +758,11 @@ pub fn getNavVAddr( else => {}, } - std.debug.assert(reloc_info.parent_atom_index != 0); - const atom_index = wasm_file.symbol_atom.get(.{ .file = zig_object.index, .index = @enumFromInt(reloc_info.parent_atom_index) }).?; + std.debug.assert(reloc_info.parent.atom_index != 0); + const atom_index = wasm_file.symbol_atom.get(.{ + .file = zig_object.index, + .index = @enumFromInt(reloc_info.parent.atom_index), + }).?; const atom = wasm_file.getAtomPtr(atom_index); const is_wasm32 = target.cpu.arch == .wasm32; if (ip.isFunctionType(ip.getNav(nav_index).typeOf(ip))) { @@ -800,7 +799,10 @@ pub fn getUavVAddr( const atom_index = zig_object.uavs.get(uav).?; const target_symbol_index = @intFromEnum(wasm_file.getAtom(atom_index).sym_index); - const parent_atom_index = wasm_file.symbol_atom.get(.{ .file = zig_object.index, .index = @enumFromInt(reloc_info.parent_atom_index) }).?; + const parent_atom_index = wasm_file.symbol_atom.get(.{ + .file = zig_object.index, + .index = @enumFromInt(reloc_info.parent.atom_index), + }).?; const parent_atom = wasm_file.getAtomPtr(parent_atom_index); const is_wasm32 = target.cpu.arch == .wasm32; const zcu = wasm_file.base.comp.zcu.?; From e048e788862881a42304bff91c2f832a924d77c7 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 9 Sep 2024 03:27:52 -0400 Subject: [PATCH 115/202] Dwarf: implement and test multi array list --- lib/std/hash_map.zig | 2 +- lib/std/multi_array_list.zig | 2 +- src/InternPool.zig | 2 +- src/link/Dwarf.zig | 2 +- test/src/Debugger.zig | 143 +++++++++++++++++++++++++++++++++++ 5 files changed, 147 insertions(+), 4 deletions(-) diff --git a/lib/std/hash_map.zig b/lib/std/hash_map.zig index a8127601725f..e9ec5c8149c9 100644 --- a/lib/std/hash_map.zig +++ b/lib/std/hash_map.zig @@ -1767,7 +1767,7 @@ pub fn HashMapUnmanaged( } comptime { - if (!builtin.strip_debug_info) { + if (builtin.zig_backend == .stage2_llvm and !builtin.strip_debug_info) { _ = &dbHelper; } } diff --git a/lib/std/multi_array_list.zig b/lib/std/multi_array_list.zig index 0d9272d4929d..eedc540db27b 100644 --- a/lib/std/multi_array_list.zig +++ b/lib/std/multi_array_list.zig @@ -573,7 +573,7 @@ pub fn MultiArrayList(comptime T: type) type { } comptime { - if (!builtin.strip_debug_info) { + if (builtin.zig_backend == .stage2_llvm and !builtin.strip_debug_info) { _ = &dbHelper; _ = &Slice.dbHelper; } diff --git a/src/InternPool.zig b/src/InternPool.zig index e09576c5aaa4..fbfd29369f5e 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -4723,7 +4723,7 @@ pub const Index = enum(u32) { } comptime { - if (!builtin.strip_debug_info) { + if (builtin.zig_backend == .stage2_llvm and !builtin.strip_debug_info) { _ = &dbHelper; } } diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index ab71e0ec64d2..8fda9a38a328 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -1762,7 +1762,7 @@ pub const WipNav = struct { fn blockValue(wip_nav: *WipNav, src_loc: Zcu.LazySrcLoc, val: Value) UpdateError!void { const ty = val.typeOf(wip_nav.pt.zcu); const diw = wip_nav.debug_info.writer(wip_nav.dwarf.gpa); - const bytes = ty.abiSize(wip_nav.pt.zcu); + const bytes = if (ty.hasRuntimeBits(wip_nav.pt.zcu)) ty.abiSize(wip_nav.pt.zcu) else 0; try uleb128(diw, bytes); if (bytes == 0) return; var dim = wip_nav.debug_info.toManaged(wip_nav.dwarf.gpa); diff --git a/test/src/Debugger.zig b/test/src/Debugger.zig index 977bd4a776df..7df3dedc0c18 100644 --- a/test/src/Debugger.zig +++ b/test/src/Debugger.zig @@ -1325,6 +1325,149 @@ pub fn addTestsForTarget(db: *Debugger, target: Target) void { \\1 breakpoints deleted; 0 breakpoint locations disabled. }, ); + db.addLldbTest( + "multi_array_list", + target, + &.{ + .{ + .path = "main.zig", + .source = + \\const std = @import("std"); + \\fn testMultiArrayList() void {} + \\pub fn main() !void { + \\ const Elem0 = struct { u32, u8, u16 }; + \\ var list0: std.MultiArrayList(Elem0) = .{}; + \\ defer list0.deinit(std.heap.page_allocator); + \\ try list0.setCapacity(std.heap.page_allocator, 8); + \\ list0.appendAssumeCapacity(.{ 1, 2, 3 }); + \\ list0.appendAssumeCapacity(.{ 4, 5, 6 }); + \\ list0.appendAssumeCapacity(.{ 7, 8, 9 }); + \\ + \\ const Elem1 = struct { a: u32, b: u8, c: u16 }; + \\ var list1: std.MultiArrayList(Elem1) = .{}; + \\ defer list1.deinit(std.heap.page_allocator); + \\ try list1.setCapacity(std.heap.page_allocator, 12); + \\ list1.appendAssumeCapacity(.{ .a = 1, .b = 2, .c = 3 }); + \\ list1.appendAssumeCapacity(.{ .a = 4, .b = 5, .c = 6 }); + \\ list1.appendAssumeCapacity(.{ .a = 7, .b = 8, .c = 9 }); + \\ + \\ testMultiArrayList(); + \\} + \\ + , + }, + }, + \\breakpoint set --file main.zig --source-pattern-regexp 'testMultiArrayList\(\);' + \\process launch + \\frame variable --show-types -- list0 list0.len list0.capacity list0[0] list0[1] list0[2] list0.0 list0.1 list0.2 + \\frame variable --show-types -- list1 list1.len list1.capacity list1[0] list1[1] list1[2] list1.a list1.b list1.c + \\breakpoint delete --force 1 + , + &.{ + \\(lldb) frame variable --show-types -- list0 list0.len list0.capacity list0[0] list0[1] list0[2] list0.0 list0.1 list0.2 + \\(std.multi_array_list.MultiArrayList(main.main.Elem0)) list0 = len=3 capacity=8 { + \\ (root.main.main.Elem0) [0] = { + \\ (u32) 0 = 1 + \\ (u8) 1 = 2 + \\ (u16) 2 = 3 + \\ } + \\ (root.main.main.Elem0) [1] = { + \\ (u32) 0 = 4 + \\ (u8) 1 = 5 + \\ (u16) 2 = 6 + \\ } + \\ (root.main.main.Elem0) [2] = { + \\ (u32) 0 = 7 + \\ (u8) 1 = 8 + \\ (u16) 2 = 9 + \\ } + \\} + \\(usize) list0.len = 3 + \\(usize) list0.capacity = 8 + \\(root.main.main.Elem0) list0[0] = { + \\ (u32) 0 = 1 + \\ (u8) 1 = 2 + \\ (u16) 2 = 3 + \\} + \\(root.main.main.Elem0) list0[1] = { + \\ (u32) 0 = 4 + \\ (u8) 1 = 5 + \\ (u16) 2 = 6 + \\} + \\(root.main.main.Elem0) list0[2] = { + \\ (u32) 0 = 7 + \\ (u8) 1 = 8 + \\ (u16) 2 = 9 + \\} + \\([3]u32) list0.0 = { + \\ (u32) [0] = 1 + \\ (u32) [1] = 4 + \\ (u32) [2] = 7 + \\} + \\([3]u8) list0.1 = { + \\ (u8) [0] = 2 + \\ (u8) [1] = 5 + \\ (u8) [2] = 8 + \\} + \\([3]u16) list0.2 = { + \\ (u16) [0] = 3 + \\ (u16) [1] = 6 + \\ (u16) [2] = 9 + \\} + \\(lldb) frame variable --show-types -- list1 list1.len list1.capacity list1[0] list1[1] list1[2] list1.a list1.b list1.c + \\(std.multi_array_list.MultiArrayList(main.main.Elem1)) list1 = len=3 capacity=12 { + \\ (root.main.main.Elem1) [0] = { + \\ (u32) a = 1 + \\ (u8) b = 2 + \\ (u16) c = 3 + \\ } + \\ (root.main.main.Elem1) [1] = { + \\ (u32) a = 4 + \\ (u8) b = 5 + \\ (u16) c = 6 + \\ } + \\ (root.main.main.Elem1) [2] = { + \\ (u32) a = 7 + \\ (u8) b = 8 + \\ (u16) c = 9 + \\ } + \\} + \\(usize) list1.len = 3 + \\(usize) list1.capacity = 12 + \\(root.main.main.Elem1) list1[0] = { + \\ (u32) a = 1 + \\ (u8) b = 2 + \\ (u16) c = 3 + \\} + \\(root.main.main.Elem1) list1[1] = { + \\ (u32) a = 4 + \\ (u8) b = 5 + \\ (u16) c = 6 + \\} + \\(root.main.main.Elem1) list1[2] = { + \\ (u32) a = 7 + \\ (u8) b = 8 + \\ (u16) c = 9 + \\} + \\([3]u32) list1.a = { + \\ (u32) [0] = 1 + \\ (u32) [1] = 4 + \\ (u32) [2] = 7 + \\} + \\([3]u8) list1.b = { + \\ (u8) [0] = 2 + \\ (u8) [1] = 5 + \\ (u8) [2] = 8 + \\} + \\([3]u16) list1.c = { + \\ (u16) [0] = 3 + \\ (u16) [1] = 6 + \\ (u16) [2] = 9 + \\} + \\(lldb) breakpoint delete --force 1 + \\1 breakpoints deleted; 0 breakpoint locations disabled. + }, + ); db.addLldbTest( "segmented_list", target, From cdaf3154eee467da887198d15558cb7d83296258 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 9 Sep 2024 10:25:59 -0400 Subject: [PATCH 116/202] Dwarf: implement variables without runtime bits --- src/link/Dwarf.zig | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 8fda9a38a328..1f780affde20 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -2576,7 +2576,7 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool const nav_gop = try dwarf.navs.getOrPut(dwarf.gpa, nav_index); errdefer _ = dwarf.navs.pop(); - const tag: enum { done, decl_alias, decl_const } = switch (ip.indexToKey(nav_val.toIntern())) { + const tag: enum { done, decl_alias, decl_var, decl_const } = switch (ip.indexToKey(nav_val.toIntern())) { .int_type, .ptr_type, .array_type, @@ -2895,7 +2895,7 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool .aggregate, .un, => .decl_const, - .variable => unreachable, + .variable => .decl_var, .func => |func| tag: { if (nav_gop.found_existing) { const unit_ptr = dwarf.debug_info.section.getUnit(wip_nav.unit); @@ -2966,6 +2966,23 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool try wip_nav.strp(nav.name.toSlice(ip)); try wip_nav.refType(nav_val.toType()); }, + .decl_var => { + const diw = wip_nav.debug_info.writer(dwarf.gpa); + try wip_nav.abbrevCode(.decl_var); + try wip_nav.refType(Type.fromInterned(parent_type)); + assert(wip_nav.debug_info.items.len == DebugInfo.declEntryLineOff(dwarf)); + try diw.writeInt(u32, @intCast(loc.line + 1), dwarf.endian); + try uleb128(diw, loc.column + 1); + try diw.writeByte(accessibility); + try wip_nav.strp(nav.name.toSlice(ip)); + try wip_nav.strp(nav.fqn.toSlice(ip)); + const nav_ty = nav_val.typeOf(zcu); + try wip_nav.refType(nav_ty); + try wip_nav.blockValue(nav_src_loc, nav_val); + try uleb128(diw, nav.status.resolved.alignment.toByteUnits() orelse + nav_ty.abiAlignment(zcu).toByteUnits().?); + try diw.writeByte(@intFromBool(false)); + }, .decl_const => { const diw = wip_nav.debug_info.writer(dwarf.gpa); try wip_nav.abbrevCode(.decl_const); From 0f0142527a0f2adc8f2348f0c97db80b0a24d330 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 9 Sep 2024 11:17:12 -0400 Subject: [PATCH 117/202] Dwarf: implement default field values --- src/link/Dwarf.zig | 58 +++++++++++++++++++++++++++++++++------------- 1 file changed, 42 insertions(+), 16 deletions(-) diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 1f780affde20..6efa708d0510 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -2643,7 +2643,14 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool try uleb128(diw, nav_val.toType().abiAlignment(zcu).toByteUnits().?); for (0..loaded_struct.field_types.len) |field_index| { const is_comptime = loaded_struct.fieldIsComptime(ip, field_index); - try wip_nav.abbrevCode(if (is_comptime) .struct_field_comptime else .struct_field); + const field_init = loaded_struct.fieldInit(ip, field_index); + assert(!(is_comptime and field_init == .none)); + try wip_nav.abbrevCode(if (is_comptime) + .struct_field_comptime + else if (field_init != .none) + .struct_field_default + else + .struct_field); if (loaded_struct.fieldName(ip, field_index).unwrap()) |field_name| try wip_nav.strp(field_name.toSlice(ip)) else { const field_name = try std.fmt.allocPrint(dwarf.gpa, "{d}", .{field_index}); defer dwarf.gpa.free(field_name); @@ -2651,14 +2658,12 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool } const field_type = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); try wip_nav.refType(field_type); - if (is_comptime) try wip_nav.blockValue( - nav_src_loc, - Value.fromInterned(loaded_struct.fieldInit(ip, field_index)), - ) else { + if (!is_comptime) { try uleb128(diw, loaded_struct.offsets.get(ip)[field_index]); try uleb128(diw, loaded_struct.fieldAlign(ip, field_index).toByteUnits() orelse field_type.abiAlignment(zcu).toByteUnits().?); } + if (field_init != .none) try wip_nav.blockValue(nav_src_loc, Value.fromInterned(field_init)); } try uleb128(diw, @intFromEnum(AbbrevCode.null)); } @@ -3498,7 +3503,14 @@ pub fn updateContainerType(dwarf: *Dwarf, pt: Zcu.PerThread, type_index: InternP try uleb128(diw, ty.abiAlignment(zcu).toByteUnits().?); for (0..loaded_struct.field_types.len) |field_index| { const is_comptime = loaded_struct.fieldIsComptime(ip, field_index); - try wip_nav.abbrevCode(if (is_comptime) .struct_field_comptime else .struct_field); + const field_init = loaded_struct.fieldInit(ip, field_index); + assert(!(is_comptime and field_init == .none)); + try wip_nav.abbrevCode(if (is_comptime) + .struct_field_comptime + else if (field_init != .none) + .struct_field_default + else + .struct_field); if (loaded_struct.fieldName(ip, field_index).unwrap()) |field_name| try wip_nav.strp(field_name.toSlice(ip)) else { const field_name = try std.fmt.allocPrint(dwarf.gpa, "{d}", .{field_index}); defer dwarf.gpa.free(field_name); @@ -3506,14 +3518,12 @@ pub fn updateContainerType(dwarf: *Dwarf, pt: Zcu.PerThread, type_index: InternP } const field_type = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); try wip_nav.refType(field_type); - if (is_comptime) try wip_nav.blockValue( - ty_src_loc, - Value.fromInterned(loaded_struct.fieldInit(ip, field_index)), - ) else { + if (!is_comptime) { try uleb128(diw, loaded_struct.offsets.get(ip)[field_index]); try uleb128(diw, loaded_struct.fieldAlign(ip, field_index).toByteUnits() orelse field_type.abiAlignment(zcu).toByteUnits().?); } + if (field_init != .none) try wip_nav.blockValue(ty_src_loc, Value.fromInterned(field_init)); } try uleb128(diw, @intFromEnum(AbbrevCode.null)); } @@ -3569,7 +3579,14 @@ pub fn updateContainerType(dwarf: *Dwarf, pt: Zcu.PerThread, type_index: InternP try uleb128(diw, ty.abiAlignment(zcu).toByteUnits().?); for (0..loaded_struct.field_types.len) |field_index| { const is_comptime = loaded_struct.fieldIsComptime(ip, field_index); - try wip_nav.abbrevCode(if (is_comptime) .struct_field_comptime else .struct_field); + const field_init = loaded_struct.fieldInit(ip, field_index); + assert(!(is_comptime and field_init == .none)); + try wip_nav.abbrevCode(if (is_comptime) + .struct_field_comptime + else if (field_init != .none) + .struct_field_default + else + .struct_field); if (loaded_struct.fieldName(ip, field_index).unwrap()) |field_name| try wip_nav.strp(field_name.toSlice(ip)) else { const field_name = try std.fmt.allocPrint(dwarf.gpa, "{d}", .{field_index}); defer dwarf.gpa.free(field_name); @@ -3577,14 +3594,12 @@ pub fn updateContainerType(dwarf: *Dwarf, pt: Zcu.PerThread, type_index: InternP } const field_type = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); try wip_nav.refType(field_type); - if (is_comptime) try wip_nav.blockValue( - ty_src_loc, - Value.fromInterned(loaded_struct.fieldInit(ip, field_index)), - ) else { + if (!is_comptime) { try uleb128(diw, loaded_struct.offsets.get(ip)[field_index]); try uleb128(diw, loaded_struct.fieldAlign(ip, field_index).toByteUnits() orelse field_type.abiAlignment(zcu).toByteUnits().?); } + if (field_init != .none) try wip_nav.blockValue(ty_src_loc, Value.fromInterned(field_init)); } try uleb128(diw, @intFromEnum(AbbrevCode.null)); } @@ -4165,6 +4180,7 @@ const AbbrevCode = enum { big_enum_field, generated_field, struct_field, + struct_field_default, struct_field_comptime, packed_struct_field, untagged_union_field, @@ -4412,13 +4428,23 @@ const AbbrevCode = enum { .{ .alignment, .udata }, }, }, + .struct_field_default = .{ + .tag = .member, + .attrs = &.{ + .{ .name, .strp }, + .{ .type, .ref_addr }, + .{ .data_member_location, .udata }, + .{ .alignment, .udata }, + .{ .default_value, .block }, + }, + }, .struct_field_comptime = .{ .tag = .member, .attrs = &.{ .{ .const_expr, .flag_present }, .{ .name, .strp }, .{ .type, .ref_addr }, - .{ .default_value, .block }, + .{ .const_value, .block }, }, }, .packed_struct_field = .{ From faafc4132731e854a471ad4c4bb231efb525ea9a Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Mon, 9 Sep 2024 08:14:27 -0400 Subject: [PATCH 118/202] Dwarf: prevent crash on missing field inits Workaround for #21362 --- src/link/Dwarf.zig | 78 ++++++++++++++++++++++++++++++---------------- 1 file changed, 51 insertions(+), 27 deletions(-) diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 6efa708d0510..9ec0fa301288 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -2643,8 +2643,10 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool try uleb128(diw, nav_val.toType().abiAlignment(zcu).toByteUnits().?); for (0..loaded_struct.field_types.len) |field_index| { const is_comptime = loaded_struct.fieldIsComptime(ip, field_index); - const field_init = loaded_struct.fieldInit(ip, field_index); - assert(!(is_comptime and field_init == .none)); + const field_init = if (loaded_struct.haveFieldInits(ip)) + loaded_struct.fieldInit(ip, field_index) + else + .none; try wip_nav.abbrevCode(if (is_comptime) .struct_field_comptime else if (field_init != .none) @@ -2656,14 +2658,20 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool defer dwarf.gpa.free(field_name); try wip_nav.strp(field_name); } - const field_type = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); - try wip_nav.refType(field_type); - if (!is_comptime) { - try uleb128(diw, loaded_struct.offsets.get(ip)[field_index]); - try uleb128(diw, loaded_struct.fieldAlign(ip, field_index).toByteUnits() orelse - field_type.abiAlignment(zcu).toByteUnits().?); + if (is_comptime and field_init == .none) { + // workaround frontend bug + try wip_nav.refType(Type.void); + try wip_nav.blockValue(nav_src_loc, Value.void); + } else { + const field_type = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); + try wip_nav.refType(field_type); + if (!is_comptime) { + try uleb128(diw, loaded_struct.offsets.get(ip)[field_index]); + try uleb128(diw, loaded_struct.fieldAlign(ip, field_index).toByteUnits() orelse + field_type.abiAlignment(zcu).toByteUnits().?); + } + if (field_init != .none) try wip_nav.blockValue(nav_src_loc, Value.fromInterned(field_init)); } - if (field_init != .none) try wip_nav.blockValue(nav_src_loc, Value.fromInterned(field_init)); } try uleb128(diw, @intFromEnum(AbbrevCode.null)); } @@ -3503,8 +3511,10 @@ pub fn updateContainerType(dwarf: *Dwarf, pt: Zcu.PerThread, type_index: InternP try uleb128(diw, ty.abiAlignment(zcu).toByteUnits().?); for (0..loaded_struct.field_types.len) |field_index| { const is_comptime = loaded_struct.fieldIsComptime(ip, field_index); - const field_init = loaded_struct.fieldInit(ip, field_index); - assert(!(is_comptime and field_init == .none)); + const field_init = if (loaded_struct.haveFieldInits(ip)) + loaded_struct.fieldInit(ip, field_index) + else + .none; try wip_nav.abbrevCode(if (is_comptime) .struct_field_comptime else if (field_init != .none) @@ -3516,14 +3526,20 @@ pub fn updateContainerType(dwarf: *Dwarf, pt: Zcu.PerThread, type_index: InternP defer dwarf.gpa.free(field_name); try wip_nav.strp(field_name); } - const field_type = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); - try wip_nav.refType(field_type); - if (!is_comptime) { - try uleb128(diw, loaded_struct.offsets.get(ip)[field_index]); - try uleb128(diw, loaded_struct.fieldAlign(ip, field_index).toByteUnits() orelse - field_type.abiAlignment(zcu).toByteUnits().?); + if (is_comptime and field_init == .none) { + // workaround frontend bug + try wip_nav.refType(Type.void); + try wip_nav.blockValue(ty_src_loc, Value.void); + } else { + const field_type = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); + try wip_nav.refType(field_type); + if (!is_comptime) { + try uleb128(diw, loaded_struct.offsets.get(ip)[field_index]); + try uleb128(diw, loaded_struct.fieldAlign(ip, field_index).toByteUnits() orelse + field_type.abiAlignment(zcu).toByteUnits().?); + } + if (field_init != .none) try wip_nav.blockValue(ty_src_loc, Value.fromInterned(field_init)); } - if (field_init != .none) try wip_nav.blockValue(ty_src_loc, Value.fromInterned(field_init)); } try uleb128(diw, @intFromEnum(AbbrevCode.null)); } @@ -3579,8 +3595,10 @@ pub fn updateContainerType(dwarf: *Dwarf, pt: Zcu.PerThread, type_index: InternP try uleb128(diw, ty.abiAlignment(zcu).toByteUnits().?); for (0..loaded_struct.field_types.len) |field_index| { const is_comptime = loaded_struct.fieldIsComptime(ip, field_index); - const field_init = loaded_struct.fieldInit(ip, field_index); - assert(!(is_comptime and field_init == .none)); + const field_init = if (loaded_struct.haveFieldInits(ip)) + loaded_struct.fieldInit(ip, field_index) + else + .none; try wip_nav.abbrevCode(if (is_comptime) .struct_field_comptime else if (field_init != .none) @@ -3592,14 +3610,20 @@ pub fn updateContainerType(dwarf: *Dwarf, pt: Zcu.PerThread, type_index: InternP defer dwarf.gpa.free(field_name); try wip_nav.strp(field_name); } - const field_type = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); - try wip_nav.refType(field_type); - if (!is_comptime) { - try uleb128(diw, loaded_struct.offsets.get(ip)[field_index]); - try uleb128(diw, loaded_struct.fieldAlign(ip, field_index).toByteUnits() orelse - field_type.abiAlignment(zcu).toByteUnits().?); + if (is_comptime and field_init == .none) { + // workaround frontend bug + try wip_nav.refType(Type.void); + try wip_nav.blockValue(ty_src_loc, Value.void); + } else { + const field_type = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); + try wip_nav.refType(field_type); + if (!is_comptime) { + try uleb128(diw, loaded_struct.offsets.get(ip)[field_index]); + try uleb128(diw, loaded_struct.fieldAlign(ip, field_index).toByteUnits() orelse + field_type.abiAlignment(zcu).toByteUnits().?); + } + if (field_init != .none) try wip_nav.blockValue(ty_src_loc, Value.fromInterned(field_init)); } - if (field_init != .none) try wip_nav.blockValue(ty_src_loc, Value.fromInterned(field_init)); } try uleb128(diw, @intFromEnum(AbbrevCode.null)); } From d5a7fcfc13b8aa65a56cfa12c235b207ab6ae949 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Tue, 10 Sep 2024 08:26:59 -0400 Subject: [PATCH 119/202] Dwarf: implement and test multi array list slices --- test/src/Debugger.zig | 151 ++++++++++++++++++++++++++++++++++++------ 1 file changed, 131 insertions(+), 20 deletions(-) diff --git a/test/src/Debugger.zig b/test/src/Debugger.zig index 7df3dedc0c18..c83e6f81a424 100644 --- a/test/src/Debugger.zig +++ b/test/src/Debugger.zig @@ -538,7 +538,7 @@ pub fn addTestsForTarget(db: *Debugger, target: Target) void { \\frame variable null_u32 maybe_u32 nonnull_u32 \\breakpoint delete --force 1 \\ - \\breakpoint set --file optionals.zig --source-pattern-regexp '_ = .{ &null_u32, &nonnull_u32 };' + \\breakpoint set --file optionals.zig --source-pattern-regexp '_ = \.{ &null_u32, &nonnull_u32 };' \\process continue \\frame variable --show-types null_u32 maybe_u32 nonnull_u32 \\breakpoint delete --force 2 @@ -1333,50 +1333,61 @@ pub fn addTestsForTarget(db: *Debugger, target: Target) void { .path = "main.zig", .source = \\const std = @import("std"); - \\fn testMultiArrayList() void {} + \\const Elem0 = struct { u32, u8, u16 }; + \\const Elem1 = struct { a: u32, b: u8, c: u16 }; + \\fn testMultiArrayList( + \\ list0: std.MultiArrayList(Elem0), + \\ slice0: std.MultiArrayList(Elem0).Slice, + \\ list1: std.MultiArrayList(Elem1), + \\ slice1: std.MultiArrayList(Elem1).Slice, + \\) void { + \\ _ = .{ list0, slice0, list1, slice1 }; + \\} \\pub fn main() !void { - \\ const Elem0 = struct { u32, u8, u16 }; \\ var list0: std.MultiArrayList(Elem0) = .{}; \\ defer list0.deinit(std.heap.page_allocator); \\ try list0.setCapacity(std.heap.page_allocator, 8); \\ list0.appendAssumeCapacity(.{ 1, 2, 3 }); \\ list0.appendAssumeCapacity(.{ 4, 5, 6 }); \\ list0.appendAssumeCapacity(.{ 7, 8, 9 }); + \\ const slice0 = list0.slice(); \\ - \\ const Elem1 = struct { a: u32, b: u8, c: u16 }; \\ var list1: std.MultiArrayList(Elem1) = .{}; \\ defer list1.deinit(std.heap.page_allocator); \\ try list1.setCapacity(std.heap.page_allocator, 12); \\ list1.appendAssumeCapacity(.{ .a = 1, .b = 2, .c = 3 }); \\ list1.appendAssumeCapacity(.{ .a = 4, .b = 5, .c = 6 }); \\ list1.appendAssumeCapacity(.{ .a = 7, .b = 8, .c = 9 }); + \\ const slice1 = list1.slice(); \\ - \\ testMultiArrayList(); + \\ testMultiArrayList(list0, slice0, list1, slice1); \\} \\ , }, }, - \\breakpoint set --file main.zig --source-pattern-regexp 'testMultiArrayList\(\);' + \\breakpoint set --file main.zig --source-pattern-regexp '_ = \.{ list0, slice0, list1, slice1 };' \\process launch \\frame variable --show-types -- list0 list0.len list0.capacity list0[0] list0[1] list0[2] list0.0 list0.1 list0.2 + \\frame variable --show-types -- slice0 slice0.len slice0.capacity slice0[0] slice0[1] slice0[2] slice0.0 slice0.1 slice0.2 \\frame variable --show-types -- list1 list1.len list1.capacity list1[0] list1[1] list1[2] list1.a list1.b list1.c + \\frame variable --show-types -- slice1 slice1.len slice1.capacity slice1[0] slice1[1] slice1[2] slice1.a slice1.b slice1.c \\breakpoint delete --force 1 , &.{ \\(lldb) frame variable --show-types -- list0 list0.len list0.capacity list0[0] list0[1] list0[2] list0.0 list0.1 list0.2 - \\(std.multi_array_list.MultiArrayList(main.main.Elem0)) list0 = len=3 capacity=8 { - \\ (root.main.main.Elem0) [0] = { + \\(std.multi_array_list.MultiArrayList(main.Elem0)) list0 = len=3 capacity=8 { + \\ (root.main.Elem0) [0] = { \\ (u32) 0 = 1 \\ (u8) 1 = 2 \\ (u16) 2 = 3 \\ } - \\ (root.main.main.Elem0) [1] = { + \\ (root.main.Elem0) [1] = { \\ (u32) 0 = 4 \\ (u8) 1 = 5 \\ (u16) 2 = 6 \\ } - \\ (root.main.main.Elem0) [2] = { + \\ (root.main.Elem0) [2] = { \\ (u32) 0 = 7 \\ (u8) 1 = 8 \\ (u16) 2 = 9 @@ -1384,17 +1395,17 @@ pub fn addTestsForTarget(db: *Debugger, target: Target) void { \\} \\(usize) list0.len = 3 \\(usize) list0.capacity = 8 - \\(root.main.main.Elem0) list0[0] = { + \\(root.main.Elem0) list0[0] = { \\ (u32) 0 = 1 \\ (u8) 1 = 2 \\ (u16) 2 = 3 \\} - \\(root.main.main.Elem0) list0[1] = { + \\(root.main.Elem0) list0[1] = { \\ (u32) 0 = 4 \\ (u8) 1 = 5 \\ (u16) 2 = 6 \\} - \\(root.main.main.Elem0) list0[2] = { + \\(root.main.Elem0) list0[2] = { \\ (u32) 0 = 7 \\ (u8) 1 = 8 \\ (u16) 2 = 9 @@ -1414,19 +1425,69 @@ pub fn addTestsForTarget(db: *Debugger, target: Target) void { \\ (u16) [1] = 6 \\ (u16) [2] = 9 \\} + \\(lldb) frame variable --show-types -- slice0 slice0.len slice0.capacity slice0[0] slice0[1] slice0[2] slice0.0 slice0.1 slice0.2 + \\(std.multi_array_list.MultiArrayList(main.Elem0).Slice) slice0 = len=3 capacity=8 { + \\ (root.main.Elem0) [0] = { + \\ (u32) 0 = 1 + \\ (u8) 1 = 2 + \\ (u16) 2 = 3 + \\ } + \\ (root.main.Elem0) [1] = { + \\ (u32) 0 = 4 + \\ (u8) 1 = 5 + \\ (u16) 2 = 6 + \\ } + \\ (root.main.Elem0) [2] = { + \\ (u32) 0 = 7 + \\ (u8) 1 = 8 + \\ (u16) 2 = 9 + \\ } + \\} + \\(usize) slice0.len = 3 + \\(usize) slice0.capacity = 8 + \\(root.main.Elem0) slice0[0] = { + \\ (u32) 0 = 1 + \\ (u8) 1 = 2 + \\ (u16) 2 = 3 + \\} + \\(root.main.Elem0) slice0[1] = { + \\ (u32) 0 = 4 + \\ (u8) 1 = 5 + \\ (u16) 2 = 6 + \\} + \\(root.main.Elem0) slice0[2] = { + \\ (u32) 0 = 7 + \\ (u8) 1 = 8 + \\ (u16) 2 = 9 + \\} + \\([3]u32) slice0.0 = { + \\ (u32) [0] = 1 + \\ (u32) [1] = 4 + \\ (u32) [2] = 7 + \\} + \\([3]u8) slice0.1 = { + \\ (u8) [0] = 2 + \\ (u8) [1] = 5 + \\ (u8) [2] = 8 + \\} + \\([3]u16) slice0.2 = { + \\ (u16) [0] = 3 + \\ (u16) [1] = 6 + \\ (u16) [2] = 9 + \\} \\(lldb) frame variable --show-types -- list1 list1.len list1.capacity list1[0] list1[1] list1[2] list1.a list1.b list1.c - \\(std.multi_array_list.MultiArrayList(main.main.Elem1)) list1 = len=3 capacity=12 { - \\ (root.main.main.Elem1) [0] = { + \\(std.multi_array_list.MultiArrayList(main.Elem1)) list1 = len=3 capacity=12 { + \\ (root.main.Elem1) [0] = { \\ (u32) a = 1 \\ (u8) b = 2 \\ (u16) c = 3 \\ } - \\ (root.main.main.Elem1) [1] = { + \\ (root.main.Elem1) [1] = { \\ (u32) a = 4 \\ (u8) b = 5 \\ (u16) c = 6 \\ } - \\ (root.main.main.Elem1) [2] = { + \\ (root.main.Elem1) [2] = { \\ (u32) a = 7 \\ (u8) b = 8 \\ (u16) c = 9 @@ -1434,17 +1495,17 @@ pub fn addTestsForTarget(db: *Debugger, target: Target) void { \\} \\(usize) list1.len = 3 \\(usize) list1.capacity = 12 - \\(root.main.main.Elem1) list1[0] = { + \\(root.main.Elem1) list1[0] = { \\ (u32) a = 1 \\ (u8) b = 2 \\ (u16) c = 3 \\} - \\(root.main.main.Elem1) list1[1] = { + \\(root.main.Elem1) list1[1] = { \\ (u32) a = 4 \\ (u8) b = 5 \\ (u16) c = 6 \\} - \\(root.main.main.Elem1) list1[2] = { + \\(root.main.Elem1) list1[2] = { \\ (u32) a = 7 \\ (u8) b = 8 \\ (u16) c = 9 @@ -1464,6 +1525,56 @@ pub fn addTestsForTarget(db: *Debugger, target: Target) void { \\ (u16) [1] = 6 \\ (u16) [2] = 9 \\} + \\(lldb) frame variable --show-types -- slice1 slice1.len slice1.capacity slice1[0] slice1[1] slice1[2] slice1.a slice1.b slice1.c + \\(std.multi_array_list.MultiArrayList(main.Elem1).Slice) slice1 = len=3 capacity=12 { + \\ (root.main.Elem1) [0] = { + \\ (u32) a = 1 + \\ (u8) b = 2 + \\ (u16) c = 3 + \\ } + \\ (root.main.Elem1) [1] = { + \\ (u32) a = 4 + \\ (u8) b = 5 + \\ (u16) c = 6 + \\ } + \\ (root.main.Elem1) [2] = { + \\ (u32) a = 7 + \\ (u8) b = 8 + \\ (u16) c = 9 + \\ } + \\} + \\(usize) slice1.len = 3 + \\(usize) slice1.capacity = 12 + \\(root.main.Elem1) slice1[0] = { + \\ (u32) a = 1 + \\ (u8) b = 2 + \\ (u16) c = 3 + \\} + \\(root.main.Elem1) slice1[1] = { + \\ (u32) a = 4 + \\ (u8) b = 5 + \\ (u16) c = 6 + \\} + \\(root.main.Elem1) slice1[2] = { + \\ (u32) a = 7 + \\ (u8) b = 8 + \\ (u16) c = 9 + \\} + \\([3]u32) slice1.a = { + \\ (u32) [0] = 1 + \\ (u32) [1] = 4 + \\ (u32) [2] = 7 + \\} + \\([3]u8) slice1.b = { + \\ (u8) [0] = 2 + \\ (u8) [1] = 5 + \\ (u8) [2] = 8 + \\} + \\([3]u16) slice1.c = { + \\ (u16) [0] = 3 + \\ (u16) [1] = 6 + \\ (u16) [2] = 9 + \\} \\(lldb) breakpoint delete --force 1 \\1 breakpoints deleted; 0 breakpoint locations disabled. }, From 6aa6d088d93174767d3a8387d536f4083c39e14f Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Tue, 10 Sep 2024 12:15:51 -0400 Subject: [PATCH 120/202] Dwarf: implement and test hash maps --- lib/std/hash_map.zig | 8 ++- test/src/Debugger.zig | 129 ++++++++++++++++++++++++++++++++---------- 2 files changed, 104 insertions(+), 33 deletions(-) diff --git a/lib/std/hash_map.zig b/lib/std/hash_map.zig index e9ec5c8149c9..54b85e4b3d85 100644 --- a/lib/std/hash_map.zig +++ b/lib/std/hash_map.zig @@ -1767,9 +1767,11 @@ pub fn HashMapUnmanaged( } comptime { - if (builtin.zig_backend == .stage2_llvm and !builtin.strip_debug_info) { - _ = &dbHelper; - } + if (!builtin.strip_debug_info) _ = switch (builtin.zig_backend) { + .stage2_llvm => &dbHelper, + .stage2_x86_64 => KV, + else => {}, + }; } }; } diff --git a/test/src/Debugger.zig b/test/src/Debugger.zig index c83e6f81a424..6a930bf9dd32 100644 --- a/test/src/Debugger.zig +++ b/test/src/Debugger.zig @@ -107,11 +107,11 @@ pub fn addTestsForTarget(db: *Debugger, target: Target) void { }, \\breakpoint set --file basic.zig --source-pattern-regexp '_ = basic;' \\process launch - \\frame variable --show-types basic + \\frame variable --show-types -- basic \\breakpoint delete --force 1 , &.{ - \\(lldb) frame variable --show-types basic + \\(lldb) frame variable --show-types -- basic \\(root.basic.Basic) basic = { \\ (void) void = {} \\ (bool) bool_false = false @@ -243,11 +243,11 @@ pub fn addTestsForTarget(db: *Debugger, target: Target) void { }, \\breakpoint set --file pointers.zig --source-pattern-regexp '_ = pointers;' \\process launch - \\frame variable --show-types pointers + \\frame variable --show-types -- pointers \\breakpoint delete --force 1 , &.{ - \\(lldb) frame variable --show-types pointers + \\(lldb) frame variable --show-types -- pointers \\(root.pointers.Pointers) pointers = { \\ (*u32) single = 0x0000000000001010 \\ (*const u32) single_const = 0x0000000000001014 @@ -330,13 +330,13 @@ pub fn addTestsForTarget(db: *Debugger, target: Target) void { }, \\breakpoint set --file strings.zig --source-pattern-regexp '_ = strings;' \\process launch - \\frame variable --show-types strings.slice - \\frame variable --show-types --format character strings.slice - \\frame variable --show-types --format c-string strings + \\frame variable --show-types -- strings.slice + \\frame variable --show-types --format character -- strings.slice + \\frame variable --show-types --format c-string -- strings \\breakpoint delete --force 1 , &.{ - \\(lldb) frame variable --show-types strings.slice + \\(lldb) frame variable --show-types -- strings.slice \\([:0]const u8) strings.slice = len=9 { \\ (u8) [0] = 115 \\ (u8) [1] = 108 @@ -348,7 +348,7 @@ pub fn addTestsForTarget(db: *Debugger, target: Target) void { \\ (u8) [7] = 92 \\ (u8) [8] = 0 \\} - \\(lldb) frame variable --show-types --format character strings.slice + \\(lldb) frame variable --show-types --format character -- strings.slice \\([:0]const u8) strings.slice = len=9 { \\ (u8) [0] = 's' \\ (u8) [1] = 'l' @@ -360,7 +360,7 @@ pub fn addTestsForTarget(db: *Debugger, target: Target) void { \\ (u8) [7] = '\\' \\ (u8) [8] = '\x00' \\} - \\(lldb) frame variable --show-types --format c-string strings + \\(lldb) frame variable --show-types --format c-string -- strings \\(root.strings.Strings) strings = { \\ ([*c]const u8) c_ptr = "c_ptr\x07\x08\t" \\ ([*:0]const u8) many_ptr = "many_ptr\n\x0b\x0c" @@ -412,7 +412,7 @@ pub fn addTestsForTarget(db: *Debugger, target: Target) void { \\breakpoint set --file enums.zig --source-pattern-regexp '_ = enums;' \\process launch \\expression --show-types -- Enums - \\frame variable --show-types enums + \\frame variable --show-types -- enums \\breakpoint delete --force 1 , &.{ @@ -432,7 +432,7 @@ pub fn addTestsForTarget(db: *Debugger, target: Target) void { \\ (root.enums.Enums.Three) third = .third \\ } \\} - \\(lldb) frame variable --show-types enums + \\(lldb) frame variable --show-types -- enums \\(root.enums.Enums) enums = { \\ (root.enums.Enums.Zero) zero = @enumFromInt(13) \\ (root.enums.Enums.One) one = .first @@ -476,7 +476,7 @@ pub fn addTestsForTarget(db: *Debugger, target: Target) void { \\breakpoint set --file errors.zig --source-pattern-regexp '_ = errors;' \\process launch \\expression --show-types -- Errors - \\frame variable --show-types errors + \\frame variable --show-types -- errors \\breakpoint delete --force 1 , &.{ @@ -496,7 +496,7 @@ pub fn addTestsForTarget(db: *Debugger, target: Target) void { \\ (error{One,Two,Three}) Three = error.Three \\ } \\} - \\(lldb) frame variable --show-types errors + \\(lldb) frame variable --show-types -- errors \\(root.errors.Errors) errors = { \\ (error{One}) one = error.One \\ (error{One,Two}) two = error.Two @@ -535,23 +535,23 @@ pub fn addTestsForTarget(db: *Debugger, target: Target) void { }, \\breakpoint set --file optionals.zig --source-pattern-regexp 'maybe_u32 = 123;' \\process launch - \\frame variable null_u32 maybe_u32 nonnull_u32 + \\frame variable -- null_u32 maybe_u32 nonnull_u32 \\breakpoint delete --force 1 \\ \\breakpoint set --file optionals.zig --source-pattern-regexp '_ = \.{ &null_u32, &nonnull_u32 };' \\process continue - \\frame variable --show-types null_u32 maybe_u32 nonnull_u32 + \\frame variable --show-types -- null_u32 maybe_u32 nonnull_u32 \\breakpoint delete --force 2 , &.{ - \\(lldb) frame variable null_u32 maybe_u32 nonnull_u32 + \\(lldb) frame variable -- null_u32 maybe_u32 nonnull_u32 \\(?u32) null_u32 = null \\(?u32) maybe_u32 = null \\(?u32) nonnull_u32 = (nonnull_u32.? = 456) \\(lldb) breakpoint delete --force 1 \\1 breakpoints deleted; 0 breakpoint locations disabled. , - \\(lldb) frame variable --show-types null_u32 maybe_u32 nonnull_u32 + \\(lldb) frame variable --show-types -- null_u32 maybe_u32 nonnull_u32 \\(?u32) null_u32 = null \\(?u32) maybe_u32 = { \\ (u32) maybe_u32.? = 123 @@ -605,7 +605,7 @@ pub fn addTestsForTarget(db: *Debugger, target: Target) void { \\breakpoint set --file unions.zig --source-pattern-regexp '_ = unions;' \\process launch \\expression --show-types -- Unions - \\frame variable --show-types unions + \\frame variable --show-types -- unions \\breakpoint delete --force 1 , &.{ @@ -628,7 +628,7 @@ pub fn addTestsForTarget(db: *Debugger, target: Target) void { \\ (@typeInfo(unions.Unions.Tagged).@"union".tag_type.?) eu = .eu \\ } \\} - \\(lldb) frame variable --show-types unions + \\(lldb) frame variable --show-types -- unions \\(root.unions.Unions) unions = { \\ (root.unions.Unions.Untagged) untagged = { \\ (u32) u32 = 3217031168 @@ -694,17 +694,17 @@ pub fn addTestsForTarget(db: *Debugger, target: Target) void { }, \\breakpoint set --file storage.zig --source-pattern-regexp 'local_var = local_var;' \\process launch - \\target variable --show-types --format hex global_const global_var global_threadlocal1 global_threadlocal2 - \\frame variable --show-types --format hex param1 param2 param3 param4 param5 param6 param7 param8 local_comptime_val local_comptime_ptr.0 local_const local_var + \\target variable --show-types --format hex -- global_const global_var global_threadlocal1 global_threadlocal2 + \\frame variable --show-types --format hex -- param1 param2 param3 param4 param5 param6 param7 param8 local_comptime_val local_comptime_ptr.0 local_const local_var \\breakpoint delete --force 1 , &.{ - \\(lldb) target variable --show-types --format hex global_const global_var global_threadlocal1 global_threadlocal2 + \\(lldb) target variable --show-types --format hex -- global_const global_var global_threadlocal1 global_threadlocal2 \\(u64) global_const = 0x19e50dc8d6002077 \\(u64) global_var = 0xcc423cec08622e32 \\(u64) global_threadlocal1 = 0xb4d643528c042121 \\(u64) global_threadlocal2 = 0x43faea1cf5ad7a22 - \\(lldb) frame variable --show-types --format hex param1 param2 param3 param4 param5 param6 param7 param8 local_comptime_val local_comptime_ptr.0 local_const local_var + \\(lldb) frame variable --show-types --format hex -- param1 param2 param3 param4 param5 param6 param7 param8 local_comptime_val local_comptime_ptr.0 local_const local_var \\(u64) param1 = 0x6a607e08125c7e00 \\(u64) param2 = 0x98944cb2a45a8b51 \\(u64) param3 = 0xa320cf10601ee6fb @@ -1305,26 +1305,95 @@ pub fn addTestsForTarget(db: *Debugger, target: Target) void { }, \\breakpoint set --file main.zig --source-pattern-regexp 'x = fabsf\(x\);' \\process launch - \\frame variable x + \\frame variable -- x \\breakpoint delete --force 1 \\ \\breakpoint set --file main.zig --source-pattern-regexp '_ = &x;' \\process continue - \\frame variable x + \\frame variable -- x \\breakpoint delete --force 2 , &.{ - \\(lldb) frame variable x + \\(lldb) frame variable -- x \\(f32) x = -1234.5 \\(lldb) breakpoint delete --force 1 \\1 breakpoints deleted; 0 breakpoint locations disabled. , - \\(lldb) frame variable x + \\(lldb) frame variable -- x \\(f32) x = 1234.5 \\(lldb) breakpoint delete --force 2 \\1 breakpoints deleted; 0 breakpoint locations disabled. }, ); + db.addLldbTest( + "hash_map", + target, + &.{ + .{ + .path = "main.zig", + .source = + \\const std = @import("std"); + \\const Context = struct { + \\ pub fn hash(_: Context, key: u32) Map.Hash { + \\ return key; + \\ } + \\ pub fn eql(_: Context, lhs: u32, rhs: u32) bool { + \\ return lhs == rhs; + \\ } + \\}; + \\const Map = std.HashMap(u32, u32, Context, 63); + \\fn testHashMap(map: Map) void { + \\ _ = map; + \\} + \\pub fn main() !void { + \\ var map = Map.init(std.heap.page_allocator); + \\ defer map.deinit(); + \\ try map.ensureTotalCapacity(10); + \\ map.putAssumeCapacity(0, 1); + \\ map.putAssumeCapacity(2, 3); + \\ map.putAssumeCapacity(4, 5); + \\ map.putAssumeCapacity(6, 7); + \\ map.putAssumeCapacity(8, 9); + \\ + \\ testHashMap(map); + \\} + \\ + , + }, + }, + \\breakpoint set --file main.zig --source-pattern-regexp '_ = map;' + \\process launch + \\frame variable --show-types -- map.unmanaged + \\breakpoint delete --force 1 + , + &.{ + \\(lldb) frame variable --show-types -- map.unmanaged + \\(std.hash_map.HashMapUnmanaged(u32,u32,main.Context,63)) map.unmanaged = len=5 capacity=16 { + \\ (std.hash_map.HashMapUnmanaged(u32,u32,main.Context,63).KV) [0] = { + \\ (u32) key = 0 + \\ (u32) value = 1 + \\ } + \\ (std.hash_map.HashMapUnmanaged(u32,u32,main.Context,63).KV) [1] = { + \\ (u32) key = 2 + \\ (u32) value = 3 + \\ } + \\ (std.hash_map.HashMapUnmanaged(u32,u32,main.Context,63).KV) [2] = { + \\ (u32) key = 4 + \\ (u32) value = 5 + \\ } + \\ (std.hash_map.HashMapUnmanaged(u32,u32,main.Context,63).KV) [3] = { + \\ (u32) key = 6 + \\ (u32) value = 7 + \\ } + \\ (std.hash_map.HashMapUnmanaged(u32,u32,main.Context,63).KV) [4] = { + \\ (u32) key = 8 + \\ (u32) value = 9 + \\ } + \\} + \\(lldb) breakpoint delete --force 1 + \\1 breakpoints deleted; 0 breakpoint locations disabled. + }, + ); db.addLldbTest( "multi_array_list", target, @@ -1615,11 +1684,11 @@ pub fn addTestsForTarget(db: *Debugger, target: Target) void { }, \\breakpoint set --file main.zig --source-pattern-regexp 'testSegmentedList\(\);' \\process launch - \\frame variable list0 list1 list2 list4 + \\frame variable -- list0 list1 list2 list4 \\breakpoint delete --force 1 , &.{ - \\(lldb) frame variable list0 list1 list2 list4 + \\(lldb) frame variable -- list0 list1 list2 list4 \\(std.segmented_list.SegmentedList(usize,0)) list0 = len=32 { \\ [0] = 0 \\ [1] = 1 From 36b89101df52121ffbdc08d6a77b939d05821080 Mon Sep 17 00:00:00 2001 From: Eric Petersen Date: Tue, 10 Sep 2024 16:07:11 -0700 Subject: [PATCH 121/202] tokenizer: use labeled switch statements --- lib/std/zig/tokenizer.zig | 1014 ++++++++++++++++--------------------- 1 file changed, 450 insertions(+), 564 deletions(-) diff --git a/lib/std/zig/tokenizer.zig b/lib/std/zig/tokenizer.zig index 05c0f8ed89cd..06c6b859ac68 100644 --- a/lib/std/zig/tokenizer.zig +++ b/lib/std/zig/tokenizer.zig @@ -402,7 +402,6 @@ pub const Tokenizer = struct { /// After this returns invalid, it will reset on the next newline, returning tokens starting from there. /// An eof token will always be returned at the end. pub fn next(self: *Tokenizer) Token { - var state: State = .start; var result: Token = .{ .tag = undefined, .loc = .{ @@ -410,676 +409,557 @@ pub const Tokenizer = struct { .end = undefined, }, }; - while (true) : (self.index += 1) { - const c = self.buffer[self.index]; - switch (state) { - .start => switch (c) { - 0 => { - if (self.index == self.buffer.len) return .{ + state: switch (State.start) { + .start => switch (self.buffer[self.index]) { + 0 => { + if (self.index == self.buffer.len) { + return .{ .tag = .eof, .loc = .{ .start = self.index, .end = self.index, }, }; - state = .invalid; - }, - ' ', '\n', '\t', '\r' => { - result.loc.start = self.index + 1; - }, - '"' => { - state = .string_literal; - result.tag = .string_literal; - }, - '\'' => { - state = .char_literal; - result.tag = .char_literal; - }, - 'a'...'z', 'A'...'Z', '_' => { - state = .identifier; - result.tag = .identifier; - }, - '@' => { - state = .saw_at_sign; - }, - '=' => { - state = .equal; - }, - '!' => { - state = .bang; - }, - '|' => { - state = .pipe; - }, - '(' => { - result.tag = .l_paren; - self.index += 1; - break; - }, - ')' => { - result.tag = .r_paren; - self.index += 1; - break; - }, - '[' => { - result.tag = .l_bracket; - self.index += 1; - break; - }, - ']' => { - result.tag = .r_bracket; - self.index += 1; - break; - }, - ';' => { - result.tag = .semicolon; - self.index += 1; - break; - }, - ',' => { - result.tag = .comma; - self.index += 1; - break; - }, - '?' => { - result.tag = .question_mark; - self.index += 1; - break; - }, - ':' => { - result.tag = .colon; - self.index += 1; - break; - }, - '%' => { - state = .percent; - }, - '*' => { - state = .asterisk; - }, - '+' => { - state = .plus; - }, - '<' => { - state = .angle_bracket_left; - }, - '>' => { - state = .angle_bracket_right; - }, - '^' => { - state = .caret; - }, - '\\' => { - state = .backslash; - result.tag = .multiline_string_literal_line; - }, - '{' => { - result.tag = .l_brace; - self.index += 1; - break; - }, - '}' => { - result.tag = .r_brace; - self.index += 1; - break; - }, - '~' => { - result.tag = .tilde; - self.index += 1; - break; - }, - '.' => { - state = .period; - }, - '-' => { - state = .minus; - }, - '/' => { - state = .slash; - }, - '&' => { - state = .ampersand; - }, - '0'...'9' => { - state = .int; - result.tag = .number_literal; - }, - else => { - state = .invalid; - }, + } else { + continue :state .invalid; + } + }, + ' ', '\n', '\t', '\r' => { + self.index += 1; + result.loc.start = self.index; + continue :state .start; + }, + '"' => { + result.tag = .string_literal; + continue :state .string_literal; + }, + '\'' => { + result.tag = .char_literal; + continue :state .char_literal; }, + 'a'...'z', 'A'...'Z', '_' => { + result.tag = .identifier; + continue :state .identifier; + }, + '@' => continue :state .saw_at_sign, + '=' => continue :state .equal, + '!' => continue :state .bang, + '|' => continue :state .pipe, + '(' => { + result.tag = .l_paren; + self.index += 1; + }, + ')' => { + result.tag = .r_paren; + self.index += 1; + }, + '[' => { + result.tag = .l_bracket; + self.index += 1; + }, + ']' => { + result.tag = .r_bracket; + self.index += 1; + }, + ';' => { + result.tag = .semicolon; + self.index += 1; + }, + ',' => { + result.tag = .comma; + self.index += 1; + }, + '?' => { + result.tag = .question_mark; + self.index += 1; + }, + ':' => { + result.tag = .colon; + self.index += 1; + }, + '%' => continue :state .percent, + '*' => continue :state .asterisk, + '+' => continue :state .plus, + '<' => continue :state .angle_bracket_left, + '>' => continue :state .angle_bracket_right, + '^' => continue :state .caret, + '\\' => { + result.tag = .multiline_string_literal_line; + continue :state .backslash; + }, + '{' => { + result.tag = .l_brace; + self.index += 1; + }, + '}' => { + result.tag = .r_brace; + self.index += 1; + }, + '~' => { + result.tag = .tilde; + self.index += 1; + }, + '.' => continue :state .period, + '-' => continue :state .minus, + '/' => continue :state .slash, + '&' => continue :state .ampersand, + '0'...'9' => { + result.tag = .number_literal; + self.index += 1; + continue :state .int; + }, + else => continue :state .invalid, + }, - .expect_newline => switch (c) { + .expect_newline => { + self.index += 1; + switch (self.buffer[self.index]) { 0 => { if (self.index == self.buffer.len) { result.tag = .invalid; - break; + } else { + continue :state .invalid; } - state = .invalid; }, '\n' => { - result.loc.start = self.index + 1; - state = .start; - }, - else => { - state = .invalid; + self.index += 1; + result.loc.start = self.index; + continue :state .start; }, - }, + else => continue :state .invalid, + } + }, - .invalid => switch (c) { + .invalid => { + self.index += 1; + switch (self.buffer[self.index]) { 0 => if (self.index == self.buffer.len) { result.tag = .invalid; - break; - }, - '\n' => { - result.tag = .invalid; - break; }, - else => continue, - }, + '\n' => result.tag = .invalid, + else => continue :state .invalid, + } + }, - .saw_at_sign => switch (c) { - 0, '\n' => { - result.tag = .invalid; - break; - }, + .saw_at_sign => { + self.index += 1; + switch (self.buffer[self.index]) { + 0, '\n' => result.tag = .invalid, '"' => { result.tag = .identifier; - state = .string_literal; + continue :state .string_literal; }, 'a'...'z', 'A'...'Z', '_' => { - state = .builtin; result.tag = .builtin; + continue :state .builtin; }, - else => { - state = .invalid; - }, - }, + else => continue :state .invalid, + } + }, - .ampersand => switch (c) { + .ampersand => { + self.index += 1; + switch (self.buffer[self.index]) { '=' => { result.tag = .ampersand_equal; self.index += 1; - break; - }, - else => { - result.tag = .ampersand; - break; }, - }, + else => result.tag = .ampersand, + } + }, - .asterisk => switch (c) { + .asterisk => { + self.index += 1; + switch (self.buffer[self.index]) { '=' => { result.tag = .asterisk_equal; self.index += 1; - break; }, '*' => { result.tag = .asterisk_asterisk; self.index += 1; - break; - }, - '%' => { - state = .asterisk_percent; - }, - '|' => { - state = .asterisk_pipe; }, - else => { - result.tag = .asterisk; - break; - }, - }, + '%' => continue :state .asterisk_percent, + '|' => continue :state .asterisk_pipe, + else => result.tag = .asterisk, + } + }, - .asterisk_percent => switch (c) { + .asterisk_percent => { + self.index += 1; + switch (self.buffer[self.index]) { '=' => { result.tag = .asterisk_percent_equal; self.index += 1; - break; - }, - else => { - result.tag = .asterisk_percent; - break; }, - }, + else => result.tag = .asterisk_percent, + } + }, - .asterisk_pipe => switch (c) { + .asterisk_pipe => { + self.index += 1; + switch (self.buffer[self.index]) { '=' => { result.tag = .asterisk_pipe_equal; self.index += 1; - break; - }, - else => { - result.tag = .asterisk_pipe; - break; }, - }, + else => result.tag = .asterisk_pipe, + } + }, - .percent => switch (c) { + .percent => { + self.index += 1; + switch (self.buffer[self.index]) { '=' => { result.tag = .percent_equal; self.index += 1; - break; }, - else => { - result.tag = .percent; - break; - }, - }, + else => result.tag = .percent, + } + }, - .plus => switch (c) { + .plus => { + self.index += 1; + switch (self.buffer[self.index]) { '=' => { result.tag = .plus_equal; self.index += 1; - break; }, '+' => { result.tag = .plus_plus; self.index += 1; - break; - }, - '%' => { - state = .plus_percent; - }, - '|' => { - state = .plus_pipe; - }, - else => { - result.tag = .plus; - break; }, - }, + '%' => continue :state .plus_percent, + '|' => continue :state .plus_pipe, + else => result.tag = .plus, + } + }, - .plus_percent => switch (c) { + .plus_percent => { + self.index += 1; + switch (self.buffer[self.index]) { '=' => { result.tag = .plus_percent_equal; self.index += 1; - break; - }, - else => { - result.tag = .plus_percent; - break; }, - }, + else => result.tag = .plus_percent, + } + }, - .plus_pipe => switch (c) { + .plus_pipe => { + self.index += 1; + switch (self.buffer[self.index]) { '=' => { result.tag = .plus_pipe_equal; self.index += 1; - break; }, - else => { - result.tag = .plus_pipe; - break; - }, - }, + else => result.tag = .plus_pipe, + } + }, - .caret => switch (c) { + .caret => { + self.index += 1; + switch (self.buffer[self.index]) { '=' => { result.tag = .caret_equal; self.index += 1; - break; - }, - else => { - result.tag = .caret; - break; }, - }, + else => result.tag = .caret, + } + }, - .identifier => switch (c) { - 'a'...'z', 'A'...'Z', '_', '0'...'9' => continue, + .identifier => { + self.index += 1; + switch (self.buffer[self.index]) { + 'a'...'z', 'A'...'Z', '_', '0'...'9' => continue :state .identifier, else => { - if (Token.getKeyword(self.buffer[result.loc.start..self.index])) |tag| { + const ident = self.buffer[result.loc.start..self.index]; + if (Token.getKeyword(ident)) |tag| { result.tag = tag; } - break; - }, - }, - .builtin => switch (c) { - 'a'...'z', 'A'...'Z', '_', '0'...'9' => continue, - else => break, - }, - .backslash => switch (c) { - 0 => { - result.tag = .invalid; - break; }, - '\\' => { - state = .multiline_string_literal_line; - }, - '\n' => { - result.tag = .invalid; - break; - }, - else => { - state = .invalid; - }, - }, - .string_literal => switch (c) { + } + }, + .builtin => { + self.index += 1; + switch (self.buffer[self.index]) { + 'a'...'z', 'A'...'Z', '_', '0'...'9' => continue :state .builtin, + else => {}, + } + }, + .backslash => { + self.index += 1; + switch (self.buffer[self.index]) { + 0 => result.tag = .invalid, + '\\' => continue :state .multiline_string_literal_line, + '\n' => result.tag = .invalid, + else => continue :state .invalid, + } + }, + .string_literal => { + self.index += 1; + switch (self.buffer[self.index]) { 0 => { if (self.index != self.buffer.len) { - state = .invalid; - continue; + continue :state .invalid; + } else { + result.tag = .invalid; } - result.tag = .invalid; - break; - }, - '\n' => { - result.tag = .invalid; - break; - }, - '\\' => { - state = .string_literal_backslash; - }, - '"' => { - self.index += 1; - break; }, + '\n' => result.tag = .invalid, + '\\' => continue :state .string_literal_backslash, + '"' => self.index += 1, 0x01...0x09, 0x0b...0x1f, 0x7f => { - state = .invalid; + continue :state .invalid; }, - else => continue, - }, + else => continue :state .string_literal, + } + }, - .string_literal_backslash => switch (c) { - 0, '\n' => { - result.tag = .invalid; - break; - }, - else => { - state = .string_literal; - }, - }, + .string_literal_backslash => { + self.index += 1; + switch (self.buffer[self.index]) { + 0, '\n' => result.tag = .invalid, + else => continue :state .string_literal, + } + }, - .char_literal => switch (c) { + .char_literal => { + self.index += 1; + switch (self.buffer[self.index]) { 0 => { if (self.index != self.buffer.len) { - state = .invalid; - continue; + continue :state .invalid; + } else { + result.tag = .invalid; } - result.tag = .invalid; - break; - }, - '\n' => { - result.tag = .invalid; - break; - }, - '\\' => { - state = .char_literal_backslash; - }, - '\'' => { - self.index += 1; - break; }, + '\n' => result.tag = .invalid, + '\\' => continue :state .char_literal_backslash, + '\'' => self.index += 1, 0x01...0x09, 0x0b...0x1f, 0x7f => { - state = .invalid; + continue :state .invalid; }, - else => continue, - }, + else => continue :state .char_literal, + } + }, - .char_literal_backslash => switch (c) { + .char_literal_backslash => { + self.index += 1; + switch (self.buffer[self.index]) { 0 => { if (self.index != self.buffer.len) { - state = .invalid; - continue; + continue :state .invalid; + } else { + result.tag = .invalid; } - result.tag = .invalid; - break; - }, - '\n' => { - result.tag = .invalid; - break; }, + '\n' => result.tag = .invalid, 0x01...0x09, 0x0b...0x1f, 0x7f => { - state = .invalid; + continue :state .invalid; }, - else => { - state = .char_literal; - }, - }, + else => continue :state .char_literal, + } + }, - .multiline_string_literal_line => switch (c) { - 0 => { - if (self.index != self.buffer.len) { - state = .invalid; - continue; - } - break; - }, - '\n' => { - break; - }, - '\r' => { - if (self.buffer[self.index + 1] == '\n') { - break; - } else { - state = .invalid; - } + .multiline_string_literal_line => { + self.index += 1; + switch (self.buffer[self.index]) { + 0 => if (self.index != self.buffer.len) { + continue :state .invalid; }, - 0x01...0x09, 0x0b...0x0c, 0x0e...0x1f, 0x7f => { - state = .invalid; + '\n' => {}, + '\r' => if (self.buffer[self.index + 1] != '\n') { + continue :state .invalid; }, - else => continue, - }, + 0x01...0x09, 0x0b...0x0c, 0x0e...0x1f, 0x7f => continue :state .invalid, + else => continue :state .multiline_string_literal_line, + } + }, - .bang => switch (c) { + .bang => { + self.index += 1; + switch (self.buffer[self.index]) { '=' => { result.tag = .bang_equal; self.index += 1; - break; }, - else => { - result.tag = .bang; - break; - }, - }, + else => result.tag = .bang, + } + }, - .pipe => switch (c) { + .pipe => { + self.index += 1; + switch (self.buffer[self.index]) { '=' => { result.tag = .pipe_equal; self.index += 1; - break; }, '|' => { result.tag = .pipe_pipe; self.index += 1; - break; - }, - else => { - result.tag = .pipe; - break; }, - }, + else => result.tag = .pipe, + } + }, - .equal => switch (c) { + .equal => { + self.index += 1; + switch (self.buffer[self.index]) { '=' => { result.tag = .equal_equal; self.index += 1; - break; }, '>' => { result.tag = .equal_angle_bracket_right; self.index += 1; - break; }, - else => { - result.tag = .equal; - break; - }, - }, + else => result.tag = .equal, + } + }, - .minus => switch (c) { + .minus => { + self.index += 1; + switch (self.buffer[self.index]) { '>' => { result.tag = .arrow; self.index += 1; - break; }, '=' => { result.tag = .minus_equal; self.index += 1; - break; - }, - '%' => { - state = .minus_percent; }, - '|' => { - state = .minus_pipe; - }, - else => { - result.tag = .minus; - break; - }, - }, + '%' => continue :state .minus_percent, + '|' => continue :state .minus_pipe, + else => result.tag = .minus, + } + }, - .minus_percent => switch (c) { + .minus_percent => { + self.index += 1; + switch (self.buffer[self.index]) { '=' => { result.tag = .minus_percent_equal; self.index += 1; - break; - }, - else => { - result.tag = .minus_percent; - break; }, - }, - .minus_pipe => switch (c) { + else => result.tag = .minus_percent, + } + }, + .minus_pipe => { + self.index += 1; + switch (self.buffer[self.index]) { '=' => { result.tag = .minus_pipe_equal; self.index += 1; - break; }, - else => { - result.tag = .minus_pipe; - break; - }, - }, + else => result.tag = .minus_pipe, + } + }, - .angle_bracket_left => switch (c) { - '<' => { - state = .angle_bracket_angle_bracket_left; - }, + .angle_bracket_left => { + self.index += 1; + switch (self.buffer[self.index]) { + '<' => continue :state .angle_bracket_angle_bracket_left, '=' => { result.tag = .angle_bracket_left_equal; self.index += 1; - break; }, - else => { - result.tag = .angle_bracket_left; - break; - }, - }, + else => result.tag = .angle_bracket_left, + } + }, - .angle_bracket_angle_bracket_left => switch (c) { + .angle_bracket_angle_bracket_left => { + self.index += 1; + switch (self.buffer[self.index]) { '=' => { result.tag = .angle_bracket_angle_bracket_left_equal; self.index += 1; - break; - }, - '|' => { - state = .angle_bracket_angle_bracket_left_pipe; - }, - else => { - result.tag = .angle_bracket_angle_bracket_left; - break; }, - }, + '|' => continue :state .angle_bracket_angle_bracket_left_pipe, + else => result.tag = .angle_bracket_angle_bracket_left, + } + }, - .angle_bracket_angle_bracket_left_pipe => switch (c) { + .angle_bracket_angle_bracket_left_pipe => { + self.index += 1; + switch (self.buffer[self.index]) { '=' => { result.tag = .angle_bracket_angle_bracket_left_pipe_equal; self.index += 1; - break; }, - else => { - result.tag = .angle_bracket_angle_bracket_left_pipe; - break; - }, - }, + else => result.tag = .angle_bracket_angle_bracket_left_pipe, + } + }, - .angle_bracket_right => switch (c) { - '>' => { - state = .angle_bracket_angle_bracket_right; - }, + .angle_bracket_right => { + self.index += 1; + switch (self.buffer[self.index]) { + '>' => continue :state .angle_bracket_angle_bracket_right, '=' => { result.tag = .angle_bracket_right_equal; self.index += 1; - break; - }, - else => { - result.tag = .angle_bracket_right; - break; }, - }, + else => result.tag = .angle_bracket_right, + } + }, - .angle_bracket_angle_bracket_right => switch (c) { + .angle_bracket_angle_bracket_right => { + self.index += 1; + switch (self.buffer[self.index]) { '=' => { result.tag = .angle_bracket_angle_bracket_right_equal; self.index += 1; - break; }, - else => { - result.tag = .angle_bracket_angle_bracket_right; - break; - }, - }, + else => result.tag = .angle_bracket_angle_bracket_right, + } + }, - .period => switch (c) { - '.' => { - state = .period_2; - }, - '*' => { - state = .period_asterisk; - }, - else => { - result.tag = .period; - break; - }, - }, + .period => { + self.index += 1; + switch (self.buffer[self.index]) { + '.' => continue :state .period_2, + '*' => continue :state .period_asterisk, + else => result.tag = .period, + } + }, - .period_2 => switch (c) { + .period_2 => { + self.index += 1; + switch (self.buffer[self.index]) { '.' => { result.tag = .ellipsis3; self.index += 1; - break; }, - else => { - result.tag = .ellipsis2; - break; - }, - }, + else => result.tag = .ellipsis2, + } + }, - .period_asterisk => switch (c) { - '*' => { - result.tag = .invalid_periodasterisks; - break; - }, - else => { - result.tag = .period_asterisk; - break; - }, - }, + .period_asterisk => { + self.index += 1; + switch (self.buffer[self.index]) { + '*' => result.tag = .invalid_periodasterisks, + else => result.tag = .period_asterisk, + } + }, - .slash => switch (c) { - '/' => { - state = .line_comment_start; - }, + .slash => { + self.index += 1; + switch (self.buffer[self.index]) { + '/' => continue :state .line_comment_start, '=' => { result.tag = .slash_equal; self.index += 1; - break; - }, - else => { - result.tag = .slash; - break; }, - }, - .line_comment_start => switch (c) { + else => result.tag = .slash, + } + }, + .line_comment_start => { + self.index += 1; + switch (self.buffer[self.index]) { 0 => { if (self.index != self.buffer.len) { - state = .invalid; - continue; - } - return .{ + continue :state .invalid; + } else return .{ .tag = .eof, .loc = .{ .start = self.index, @@ -1087,58 +967,51 @@ pub const Tokenizer = struct { }, }; }, - '/' => { - state = .doc_comment_start; - }, '!' => { result.tag = .container_doc_comment; - state = .doc_comment; - }, - '\r' => { - state = .expect_newline; + continue :state .doc_comment; }, '\n' => { - state = .start; - result.loc.start = self.index + 1; + self.index += 1; + result.loc.start = self.index; + continue :state .start; }, + '/' => continue :state .doc_comment_start, + '\r' => continue :state .expect_newline, 0x01...0x09, 0x0b...0x0c, 0x0e...0x1f, 0x7f => { - state = .invalid; - }, - else => { - state = .line_comment; - }, - }, - .doc_comment_start => switch (c) { - 0, '\n' => { - result.tag = .doc_comment; - break; + continue :state .invalid; }, + else => continue :state .line_comment, + } + }, + .doc_comment_start => { + self.index += 1; + switch (self.buffer[self.index]) { + 0, '\n' => result.tag = .doc_comment, '\r' => { if (self.buffer[self.index + 1] == '\n') { result.tag = .doc_comment; - break; } else { - state = .invalid; + continue :state .invalid; } }, - '/' => { - state = .line_comment; - }, + '/' => continue :state .line_comment, 0x01...0x09, 0x0b...0x0c, 0x0e...0x1f, 0x7f => { - state = .invalid; + continue :state .invalid; }, else => { - state = .doc_comment; result.tag = .doc_comment; + continue :state .doc_comment; }, - }, - .line_comment => switch (c) { + } + }, + .line_comment => { + self.index += 1; + switch (self.buffer[self.index]) { 0 => { if (self.index != self.buffer.len) { - state = .invalid; - continue; - } - return .{ + continue :state .invalid; + } else return .{ .tag = .eof, .loc = .{ .start = self.index, @@ -1146,72 +1019,85 @@ pub const Tokenizer = struct { }, }; }, - '\r' => { - state = .expect_newline; - }, '\n' => { - state = .start; - result.loc.start = self.index + 1; + self.index += 1; + result.loc.start = self.index; + continue :state .start; }, + '\r' => continue :state .expect_newline, 0x01...0x09, 0x0b...0x0c, 0x0e...0x1f, 0x7f => { - state = .invalid; + continue :state .invalid; }, - else => continue, - }, - .doc_comment => switch (c) { - 0, '\n' => { - break; - }, - '\r' => { - if (self.buffer[self.index + 1] == '\n') { - break; - } else { - state = .invalid; - } + else => continue :state .line_comment, + } + }, + .doc_comment => { + self.index += 1; + switch (self.buffer[self.index]) { + 0, '\n' => {}, + '\r' => if (self.buffer[self.index + 1] != '\n') { + continue :state .invalid; }, 0x01...0x09, 0x0b...0x0c, 0x0e...0x1f, 0x7f => { - state = .invalid; + continue :state .invalid; }, - else => continue, + else => continue :state .doc_comment, + } + }, + .int => switch (self.buffer[self.index]) { + '.' => continue :state .int_period, + '_', 'a'...'d', 'f'...'o', 'q'...'z', 'A'...'D', 'F'...'O', 'Q'...'Z', '0'...'9' => { + self.index += 1; + continue :state .int; }, - .int => switch (c) { - '.' => state = .int_period, - '_', 'a'...'d', 'f'...'o', 'q'...'z', 'A'...'D', 'F'...'O', 'Q'...'Z', '0'...'9' => continue, - 'e', 'E', 'p', 'P' => state = .int_exponent, - else => break, + 'e', 'E', 'p', 'P' => { + continue :state .int_exponent; }, - .int_exponent => switch (c) { + else => {}, + }, + .int_exponent => { + self.index += 1; + switch (self.buffer[self.index]) { '-', '+' => { - state = .float; - }, - else => { - self.index -= 1; - state = .int; + self.index += 1; + continue :state .float; }, - }, - .int_period => switch (c) { + else => continue :state .int, + } + }, + .int_period => { + self.index += 1; + switch (self.buffer[self.index]) { '_', 'a'...'d', 'f'...'o', 'q'...'z', 'A'...'D', 'F'...'O', 'Q'...'Z', '0'...'9' => { - state = .float; + self.index += 1; + continue :state .float; }, - 'e', 'E', 'p', 'P' => state = .float_exponent, - else => { - self.index -= 1; - break; + 'e', 'E', 'p', 'P' => { + continue :state .float_exponent; }, + else => self.index -= 1, + } + }, + .float => switch (self.buffer[self.index]) { + '_', 'a'...'d', 'f'...'o', 'q'...'z', 'A'...'D', 'F'...'O', 'Q'...'Z', '0'...'9' => { + self.index += 1; + continue :state .float; }, - .float => switch (c) { - '_', 'a'...'d', 'f'...'o', 'q'...'z', 'A'...'D', 'F'...'O', 'Q'...'Z', '0'...'9' => continue, - 'e', 'E', 'p', 'P' => state = .float_exponent, - else => break, + 'e', 'E', 'p', 'P' => { + continue :state .float_exponent; }, - .float_exponent => switch (c) { - '-', '+' => state = .float, - else => { - self.index -= 1; - state = .float; + else => {}, + }, + .float_exponent => { + self.index += 1; + switch (self.buffer[self.index]) { + '-', '+' => { + self.index += 1; + continue :state .float; }, - }, - } + else => continue :state .float, + } + }, } result.loc.end = self.index; From 2fc1f9b9716f8561c94ecdeb8196ac4ce582f318 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Tue, 10 Sep 2024 05:00:02 +0200 Subject: [PATCH 122/202] llvm: Don't use the optimized jump table construction logic for wasm. --- src/codegen/llvm.zig | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 2f9f9e096ba4..c8eb37b30adf 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -6544,6 +6544,12 @@ pub const FuncGen = struct { const jmp_table: ?SwitchDispatchInfo.JmpTable = jmp_table: { if (!is_dispatch_loop) break :jmp_table null; + + // Workaround for: + // * https://github.com/llvm/llvm-project/blob/56905dab7da50bccfcceaeb496b206ff476127e1/llvm/lib/MC/WasmObjectWriter.cpp#L560 + // * https://github.com/llvm/llvm-project/blob/56905dab7da50bccfcceaeb496b206ff476127e1/llvm/test/MC/WebAssembly/blockaddress.ll + if (zcu.comp.getTarget().isWasm()) break :jmp_table null; + // On a 64-bit target, 1024 pointers in our jump table is about 8K of pointers. This seems just // about acceptable - it won't fill L1d cache on most CPUs. const max_table_len = 1024; From 0e84fedbe040bca2993115c5ac198e8695a506a1 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 10 Sep 2024 16:13:33 -0700 Subject: [PATCH 123/202] update zig1.wasm Notably, contains the implementation of labeled switch expressions and decl literals. --- stage1/zig1.wasm | Bin 2786306 -> 2803404 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/stage1/zig1.wasm b/stage1/zig1.wasm index 32add10535ccc486c8bf70a4a5d3c7b77ec18151..8b9423708f95aaa0d9f764053382cee38b3b1c29 100644 GIT binary patch literal 2803404 zcmc${3t(MEwg12OKF{QwlkD{Upk?nv(G(h5o`n{*yIRoe+j~WPQMiIiit<#i_bNmP z7_?}BAVGo#EfBG2#Ny5WS}|zV2t@)GjZh(K!2(4BR*l*#zt4AOpI7>@2E6~Y$;_TT zk2PzpS+i!%n%T~2=YPO;9LN1q`rW6dF4B*4s;57RQq;ZZRF~(ZeWzqn%oAZ+smksu zHGU$NvFW_9m6DW=lT6(gUg(@!S4tqoamq!=3IOhfrVX;CF1nB(LB${Ux>K;Vz5 z)1ctvr>4ndDB>9udLK7*B`%7Z!27t^NUuL`_(9FioY-vq?8ICRF1#;hX3hA>Y3HB0 z`uq=`cFy_lKl_5!=bU%OhtE9YBZoJfcHaBWUw!_W|9QswtKWCV1x{iw`LZV8zH&6Y z_jJ{I{spI9u+N&NR^>bW%=1=%@U#z{;iUJ{Y~4BMo&BEGXZ**R7dXkiWNA9%gC9Qg zyt6;JADzgRvhJ%NS>SumS!bVrhEunf77OHf@9BTHqZzWDcgAU_JDI)I)2MpRJnQeH zOf{W<#u;mzqxMo$?wr#uct4F9T-NU;({z*RJ%Dt<8LPqk8SlB^?DIalchuA9IUhOi z%nQzN!oAcut-1r}pK*a9fpfrKatpCA2ng9*eRT%Hv(Gu>gU-==sigkA(>{3m*&o;^ zht<=N+M$noZA!q{&h3plO$L}6`sDWll*R0MXMEu75AP3VeN*#?K6uudA6#P?z)2@= zn0Ng=-}5}zODB`bgqQZ*jO+V;(odx`e!}xIb@fgrUGI2_w3qR`l&fxk=AWyI!4}N;t`sn@yyWR7l>8o92I>|1b4CC*^pVlrm1Ka}ycYolZ@Ogv)=| zRr@J7mGZoVn@ZQE94Dz39JemPAJ=sfDdkNi-MW-FC81nuB*Bkh&!pZd&g^g_lsX>PaH8)J@Ta1O=URT}Bl+seGEO;7LEk4Y&z|t5hltHng zoqkdauS6ND0uOab2SgNw959$lWK=wn@n)qm=?wIMs-BmloI2@39`iHXcX^jeB-28p z>o%52L_U+1KLQtID{e43;L_yx^$QQ$(+ zq~I2%1=hSwsy>mS8nUFPOr_~%%e@Bpf}d2{o6lHG!eVK!4rJEV(^$TkQg=LW9>W2i zpglnJ92#XjI8HuMH?IIo)#ttAr=;sNj2y@G6#$4r3iB`PzRbyF;NW_g7Tk)-rcR|( z>2x-cW&~4+(FcP7zUyV(Y@Yewc&}9n4jTsoSi3mVz zMDoKMaeNx?MJ_OC#FK^DDJKKA=t#OQ0rJv90V&GL-&tPWJX(Wx&@q?I=CbJn-Q>co z1H?uGkuKiVa3T}GseA$+HUIgONy3rHN&>N}NAKu=rrr^df+_w(1&RR!Qj7;-R80bp zI+RFgPyj|Qk!NV7giC=0&Q+}gj>$XSs{>Xi!+?asAYOyjOQz?*x0ysX%V5d^0RMnL z2U=h;vbouU>*Sm)ve?UJQb{;84THjzlJS(H?qpu^GPo9S^_|v@M0ZnlfSAfUo5la=wDY-}t*U}Tc0R9BaWpt)>DBP*LToHq6487ITg zOU)6^(}%)=PMV}tx)Ij*vP@uw95@q?2^IQDJ6VkoUh7XvPxJ=Cko4-4pj@O*ra`Hp zY?7%?!eUMxGCf;pqz*bJ21#a_B{CXeIa>7+g=|tKlX;j3zG1NNDggs&5)m6^<}j(5 z9CV1ug4|+ui4%UZnOqLi)Ol$LRe*b-UY;8BOcj9Alxbp0$U03>*UQuYdS?-kr?VPq zz@fgBeuTXRvu4e5oq9E#5?pj9mjqMEWF{-Ak`3yX!fIPDfxe#;uphk&py9a@1vWNCqHeXw~OZ`6*K}j1Tcz9nc~9(HUF^YEZhx zL%n%vqN&cwf)=E(rV_?12x6+y=uM;wj7JJ+nuF~#g}}-3O5B@BFGw|dS-1klOTtC+ zC6{A_N^a#KD)KSw)nyCWYyn^snxQgCb(qnUj1yS(%-)P3rUf9Xvw~1^BavxDW6LrL zrt(SGNxG5rF{xkt2(s!Qu?nJA^PW2=(FlebS0=M?elDeP!Dvioov98~5Q>s+j(QN4 zAP&vN$>-^erV^M13Z>Jzz!SX5LtptS$wTQ(jtgI9ZFvCDKtst8GtijGVk-6wLqg(Y zMg%l%DUZzn7p-#Z(_$2o4r~+)Kr?g>q#0>-5ThU>s*A`t^gd8@0CN`H9uvI@X5sA# zj5DS;`VDc74g)JUdWkGVKsf_Z2(&uGd2T|Yw6TyYY$VbQ~_w#1j1n&{kjQfA?hX?AcG8D zk-u)v0=$op=QJ`qp<^Pk814K)SQrfJkL&&mz|N+K>H^Uj&7g+X z0uj6em$IpO7|fPzN_w!)R3m-Jl}k#lH>MAP)6han16Cw+rwSZXUM-k9VU2`X$NXa$ zkq;Fsm1TlUi)6|!84te_W1J(?QphkpWwUjR1+a<)OBHxF3Yx0rZMu#>s@AllB)H4A zZvHa97!qQeL_I8z$^^mCj=X$UQ#YuEr;H}S*rqY0HcE0K3AH3^k{Wx-B&{j{Fd>Ff zY#OB<5UE%EWb&{h5_SP{v_2=v$K)#|y$s=N6Uxp{zR3iML`y2sT=eF{C8&pbV>B@n z)@AB52Om7`b+3Pe`^J~Nq~T3(KJ1{k9M&*@`uyqB8xo1R{48Fam{vD!+O&V2mY6o} z-wvMJFk{J*x8`Rae$o+dYnU-p|LYnW8eVc_!?Yt4Cm%j-noZMRy=$29_L)bR9ErK` zc~)v;;T>rhv);>tDN-_CZYrWJ4`&q8P7ax$eP^nmF;OVwQiY5r=lVReatd)l74OPt z>Sx0`2SQkM(M7rJ94Q72`E%S^v(PG>%abg-E^?!`wl+VPnd+rKz`FOmk0MJ~V-h*< z0%um;BG-xB+|0wSbid&Cy07ndKb81&;)}_9lGoSWTz7rl(Myj#Zu#+_NNuhEW&I;e zHa8wvzwO~`-0SDv+Me(_-HRG8JjjX0+%?6z1x`>Gb<@x>-h!ZRrL#0Ki-b-R7AAP` zgE}7l)?>UN^1_t2qV$i{4!4!RYu#4giJpHxac;qhZhi392mdq726YP)L3pJbKR4P! zL0z-g;TGNG3M#$QU7F~0Dc@si>Ey5L&kpJqyX)Lys*sGc^pU06>vcn3HG4g7s3QHQ zNT2>Tduv^Pj*1OXtX%V;vJJYeVSPTC^Hv0=mVVcF4WO%(Pq2iUxS3Jq-)^cMmF81E=vImdWQ;;hX! z^j>F^Uk&+$|N5nL*dW+D-M%@s^5$fElk`?3R;X9}0%4FTx+faUC4sW&s)0rTPh8Mi zOg4K-<)L?BDi@?y6$IXjHAT8a7a{%pV#?rD6+|OWtAAjS^aJ9}t$vtizBH*pPI-%Q z-WdNU7h#lU?_I@YO0YHEawrG};)<4l=vyQL{^(IyZ8Z zGXqvq@L!HhDOfDXLYiVysDmIuBLmeDve$&UAS)Kkk_{G2LFf8bP|Jwe3r>qkKcpK= zTBEHuT;Wr#p+~fZhlL)Z04$5Yqo6fP_zPlE79d%(2PZari%2p&6Qz#vlF;#z8`_a7 zfF0#qqqL1>TcdUNN!TcPS~=Nojm93o*d*5haH6#cH!LcNk;ICU)Y+d9G#IJkdxkNK zNl?WwJdr=kPCVD}9*jhuX87Cmr&#YFEV8|t;SL`ygq^O0`a!+_5}2sRm#_lhOys`4 zkc>vJzrtroyUkv|B0+uhRNJL~ROi1ma?dR!%<#DPiOb38kprQW36^F&7(9-Fe`k&DZ1*_1m~Pux1PfqMS3iF&fOW}DJ~ z6M15oZOZtsNr~afn^Hf&{_--+Hs$We$dj}66il8w9x9hwh0M)F-U)@dqJa+urilX4 zz5h!`oBWr~$94wJWVF$w-1sh#H3T3=we7IXg}2a(9eSz>LSa)@I1X`zsWFCIie-aaT^D4)9Plp*fWatzUTu zc`cnLXf5XbS89FSz?`Qvf;+c?~=&|Bs_5kEhv5WGDVKktacr{nNq;Y6TXEE9q6n^Yk>JG(LScH`rGoA?1lP^cda^=?B&<)0WYMZ=MRnqie9Bz!mfjHa{ zhqn`o2}W`z^u`)(*p-h#-p-3?jM@#P<9Zru1UAdh->O76RH6eFh`(#1hWGDoYz>Zv z1Eb^#w(w?^DDhY~;w|oQr(f=6M(Fwv?VW@MIL0K_zc3*Ai_nM^)iwy#)~$Pl>Z{DU}=5n;m{@@q9YC$+09I zY$`z$ZLUO}<<2Fg@&iqAMVp939aGQ}OCz9jiXBbSXj5yny{eOev(W39veX-Ni}1B9<8>?J&&aMW)NPa$k0d^zDZ(hn!>%D0Bc5svWuuY@H* zzIvIn6-gANqr|z(oFBu7Q8yTHqD2;cFvn&giiX!5;|(dQIq?{8)0AlRf4cp}u2Y1!APAQQ^TT6; zS>e)PZg^xcB|MDPM=~gci-ToQw7jzUfxc z^-m_cooL_&`M}dcF&iYHd9hK8y_6407Q17o6kTcgRJQ#T6a>N@r?7BBY#TAqxtdyf zRf`jKK1zUI---=^qJ5>JV!$CW0O6n*k#HlSM$QIN56101WvRDbaTe%(3Q>~Q5lRyD z>e)8eJtd!j)#9f%HQuIHQ&<^})C(5&Bg&pV#@nLMy^Cd3wDykc{8qv1ZB?CHrdZ8u zNsZ<;4K70SLIER^hn-NPq8b^ORcpsVQg5TmCThxN z8ER(HV->gEN;Xrnxi$KpN*b*#zqpzKFRWrkL-_bv%usp0s48Ab-D?cyNkrFO$3h}z z8*#u}Wv+NlR9Mqc=QlKXjZ>NmjZ*=|WUkVWl~?7f6l6+DKA_!h6Z*F?m>;_0(|+`m zYh3?Yub8%a>dO2pYuTjq?@z)IOF z{H5NQ7{FPW=w-pVT@81e?Axb^`&WsTF@c%+yc;_J@2%a;yy1_DyFwZ*oZ$ng>8Jy_A_GF&$eLCx3W!qG7UY^e_?16q zp}<%cA2SR(rV=s?+6klfsjdE{DJK`D?97=uq42UfsOEl!h3%>#UPHh&@!|m(qAho= z^`k}T{P};6D{qS{-zB{4nqn&#N_ORn2nyhXvOKoskqYi>QQxxs%lV&xTyK;zx7!dT z^LVZ}g1Be;RuuTtX7-_LlodaMW|)A-l_0jvp3|@4B=+C$dbvw3_d!{T{pFuGW|c=>z!)Z zu|4c^^GC$kUoY%49Qq6uY9&;^$lW*f1xTnH{(!7lP$Ok(*M|F$P@7bCQWENJm9!FS zwU|d(efZlL4(#H;G={(VQ8V)HC3?39QJ^4Va+*)o#;(_79qiuKA|t8rY|tD`m$|eUyh(e z8>@`n`r_pBD|0~;5>2RY|2@_SKW6CTf*Eo0HA?jk-04B@56{?r8MIjh4 z)MH1ram@H`lv)#g{59-Qcnmb}f~Xs-rm@?lq?5gVuH=VIrOdp$STD53!Z4{kfqNok zPn#ID_zfC|EPN7`NC1^W5lNkh6x3K@jBL~xVdM^ITnxnHVp2Af$L>Q49<`5gu~(Z( z&oCuD*-Y~9*?)HV)lDU-dz$)^in@d+t$W1P*G@t9XkIqtx=%*TSpJlT%C)zWRvIek zbL=sdDAS&b);`qjTbVfqNkpcdVwHP-hJ7oxx4F^g-(RataDDWfu8O(LjfN{PI_^Nu z#r8@!+ERJZeo4ht=0+ox7ysK1V2#$U~j6fC6J5%vdnpSs#bxL9^bs1(%Fg#j1tDKl!&rx!gJ^# z!}`N{RCDG1!?&>NVpJ}*)aF(A*>u&3#?z8)wAp4BFzudyepy%_kFYlN+%#0?CIfLt zbv?0uKouRH9T#gokn8xLGE*}Pnp6Q*V_|~z%r&Sw;YUi~edQYXJzoK@bI;Xw#?^N1 zRxKK%srGZK_CHG14n0sFMh4p6+G@M?h~ncFmndB46_54T7;G#Fj#Wyx!9-7NeP%%& z9D9s6Dyg{S7;mej;?@@;6|cFRNo`(*&G+Aue1}S+XBeqy2y^>=5GFQ?AL&`icKMeO zFH;RxOM-3iJ7tcGmj?aSIJwddJKf;Ga9R+C)6FnAAe<8%gko3>eS?{!!iM0bOfid5 z$Td6;3#SA}hI50Z;jG}8aDK2X41y!Vmf&hPyvhw0g)@Ug!v(=?^!S&ccsGZK1P6x; zgHtT0^&9+d@T&EKGOOMD-7=_#cmvgdYIMHZRx)5J*|_IQzP1Up*nI#~$>79Fj<=Nz zno2f#(Jm}C{)cyG{I#mc3SLvu&|WGUG8Jw1qQC8>q8?RLH4=ySQqi!fXp0xMv3%JR zM5Fh^+*L~QNOeUp?^ZATsF--f3;(k;AUdQq%_)lvFPhT|A{_+89ArF3!Q@5JFjFNv zxutcK9?iZKyUMZ7P>+B`&p?;ZDhmU1E=saSL#yARv4O_0U1MYW3mF@~)x7_GXEKB1 zw)-hsQW+2j^9|#^7Y3Ciyzxs+d$aAdx7?(B`hTHFLq+&(uR&xqsz)@jSoZ8d_V}OH znsv-BLq-juws}lgR?dyvB?vA5m!#6z)bLt2f0XnPFw;JbkSVrJq0!SS7LpFqqPA%- zWc~3hbKWE^BtKBuNv%KbQAxZ0V9qm09xh9s!V;6x{|FP%oC=aL**=xg;zHT)Q1)q+ zmsqKIxx~JFZo*4jGJbheU%4SOEN#ifw&eB+FKx;A<#P`LP?cmDuci8!rTTbG^<8DE zcTO|xFlMN}Q&hJL&#r0Zg=goq(!#U5LinDT@ZEbLe6I-K{Xz);%RfSbm+oV<>|^2i z$v;h&@K>m04dI`@m(g{A9bGM~hzjM2L3G}vo!fXn+>X85ww9I968)ael%&Fx7ft^P-A=HBuu?Mv#cB?qh;$(;@suV`r6)q?eF#e#ZiCao_Cv`_o(OV_t10G z%V%$*Pj;3s5&33yJT~pvZuZu6GivjeJN>m({$_1IZ~uG!lz#Q1_O;jab)EX!zgu5T zPrEeQE$^D4(^u1}Ab}l1oxs+*!E|O}qA#)3+c?dx(xUC`#j<>fHsFor`D-hNrm{l$ zjR^{6e{}=WWwD6Gl~5Vm<8YZ+v)}yPkYFM6pEb;B!2XO(1y#nSfqmC%hO=rNFtu*n zORWz``cx@*gZr-4E5SNwYTdM#S~s^Nf2-!ap?%kyDAhV-YTdk-THB=2TB&XbH(Xt7 zEl!iA8iy0bdSPKpqBy$>uUMo-2x<29_>ESX3q{4UF!;8xV+CnxP+Tqtq*xf@SjCP4 zPKDKiztd0{L6aSf{Ont%TgxPgG1thX|nPQF%61O^Uq} z`nfI{zE+s+O@^;C6I?u7Y}#1Mr+qc$RH84bzw49!6)9OPUcbzFa9YXMFs_9|on1IE z9cmm#b+WhCt*K=H*)%*h!;?zW-P5v9q|1v9tuoT=8t-CPnbK*t`NkVddAHW)RZhC0 zT&5SM=drcK?537rKuePer(z!Yxq;zrrOk!gu7u<5v|)I&e?qSpCi;_XS2dRMkBYM_ z8=3qAHTk1Dmi-8y=o@5XyVJ;_W^ZFs-p0fD8wh~|NjZJWFF|_#Hs#@YbfTOZnJA~0 z$%ZTnUoD?JyOcLD4A#J)CB+~x4>y%yzEh*RN>&V2=eKgL-&o)QZphGhbFzA9l^gzr z%#l|0Fbxk^S4810$?!t;a5x!WRBHT}m)ABvGNH`YWVl9UMv~#Fr84*3P^)^6R+q5= z_Zl-h^;z)D&=YWa_yMxsLCUxb+F9j zWI(F4Bm7cX>@(4~zC^JfJH{(i40DC{Yh!0e{6`R=O1<$hs5#xL;>Vek2S!P$68|ig zwr5JPJaZ?N+CCPRGmIAmXJEWhMgOZevFc?j+W2LpQq_vNa{}C5sjv+h(d>1m!cVwr zp$Ril$rz+MzbR1S*0e=3#k6#nQfpf3l3A-VeuE}?>XbmfNiQqPb}*y*<*j&aS74Nx z3X`_D3U3x?kKEMtE7-ECSD@#abg8XtA1woH#s)9{T&81(Ue=cUi+S0E6-`a6%eVaq zaTpu>)#d3X^_xnKU5zbrw|%diPc6}9_kpr}dQvYd+6G;&dU?Y)7>kvuK_$1AOMY1| zYfDyMK6W1rUa@WkP0O5tY`)pbqd(|nWyAzCN-t$2Y{EXM-u+lExK#wTsXte0iJ&Xn z*O}X&n@(F1g&@Qs#*pZ{lTj$?BJXQ^~5AaMRDqB|B8IVz3t*YJW?+C1HG)}!^+FGa^Sa1n^MV#$|bw>vLe3JX65CkOR2V4Sz9wcpDvgDtzI5n zdC9`tyj}{pXrVJ04tL z?Ra36T!XTkgv9dL6c$!gP!?Q}#eWNO>C=$WHDQl^sD>ai;@tyH@&mR;6 zFU1(=94z8?506&xg-=AeHQ^g*CD$6BTxz996#E*c|`40x@y|NA;K|Efr(1Koj2F?+8jUFGdykO zx_9gW866g$9PqSTcyho~Df7JAsUW55m;+9V95W~(@4jad*Hv?D_g?YSSuXP(!P8hv zoNfzGBk*)tcy!pTQsxo0QxVctXBq@gCEu8|^eT9C;9<}B=`F)^@x8ml(^r9KT?L*> zndj9`6+D%L6@sUd@3LP2cNJX+_6kpb8J>ROr^<;&LuA0hlLtQ=EIe|^tdzM*?Ns$! z?Ns=ulJ9#u(^W;+iRV4!imG!R6}}!U!*!GJ)KE*eA?1zBz~2qPx5>iS0DP6Q!)mvp zF-B^)0-rexVrnVz)wl#q)wtuOjwVQXx>NqoJU>}hlJC(jMrD4I0A8)L|kO=W~S>yhccJXRW>6Moc>1AcMQOVzx zOFpTWmDxrlth{_ch_7HqFP|xw{F7c*bRJb(dAU;tjM%#iNB%{iXewK(s|p9sk=e>y zfgqlW6N`M1WbSc1*OLLtcvN~E{iDFNHU4K=@yU?^3^AW}x<)A;OogZ@dS7fwr&5*jmo8@8D$yYNl*j%PdRZAYhCbz&<0BBSw&WFNa5jMpxgn_fPyRPt zRaWe0@J6X$RI1&8gkhEPztWql49?vXR8*B|b# zQP0I0zb(J){0=bNBaKE4;pO)7%g^X#MLkz6bh{}&Dd)qEr_haC z_Msc^)%kEhC2RIKe;}8dcoAs*3GTU(lrm1_8pGb&&U7^Yf@uDSBInhGWx!u``p~h5 z{(>{*tQ9%=S$35CRMo_5+8KsD@2E6ov_&SQUzewkb+^MeRigwlJz5?m*N?)hmFYt? z94)_GyZg&um0#X1DOOFxcDoukG`!weax`)_DqEq7)R)?91<|mlob6Y;_h4JO2j8B^ z2bbA83?JN3uH!dqs-ndk{6?L|I#{tlm-h(@EAz`_Y_dK|0`(K443ikI$C)iHZA$CS zW=#ZEmY5mLrhZ$g7O}CIca;sr=yjaV|TK!Yg}CR^V(6V9Jk`F88;K>df+C2nNZJ9s&mn@YJ0wN?p-TV zC4GNK8H9L4TTMJyUD)LNQMluM9i zCb`xqV0tNNqARTNFOsu99)an?492-^wWlm|?vmdr?co?>kgNJYw@s|zTifNL;4R<9 z3~7~s60NiqRm=I$mFMr@>BN5R{Qdj#%g;&eubo+TmS0|{8w;#jQxd0tD8GDGZuN63 z#sx;(AN3N^)@Tg3v0^n=Sy;r--YfOHVp@?n_s zTOWZI6&*|C{ZD1^p5C+@c;n@lu?t0M^{TBD+#}>0i+4oqJs-OamhzNb`Q*ZG2nsPCbmp$BHsKW^8Q@Y^W#B$%ekqSDkFAwj#}wFLs};r8cZzk%3g{Ma)E9@sMVfq;JDJLFpnp}R>ElO3sEjQ z3+cvn z4**OsnQm-UN!yM1XwDZNL(Y2sr)3f=G#N>K;}AWoO?pz&I8l?U?Pk==WKGhI-!!vg zbzMWcW~DaQ$m7(NswsH&*AdyZEslJJq}mq0@U7jFcF5jRn`_gr5TUhoZMtT+4n1Hm zDyatb?q{jK1S$;2aK1q*bg6pQTO@sULsc%D^lf_~j;+fk4a#4!R96G^K8)L)m@nx;efTx8IBP%B(*fN=PsjR65!wkb!1?z>83K>lrg^Z{#2tt-cG%-{z zZVzp4CZ9&|P>MAo#AV=djw{c3AzM>4ST2B*>?&p5lw!MukTtAJjRRze-)u}tS7yIF zB^P9Sm{{%C`&Uwe%coWYoLk7`>vI2em$XEPa|M3s2d^qD6_hhtgJyeF@nFng(on!` zzd|s(o-kU^I$2l9OzI%>mvtQKz0+ zku{`nf;aLoY0=ONISu(OZAu(e1L}UChTN*Mlk!IXwMyDW+dJcKR-bE{*iDU_%2}sL zk@M?vzMW)0A5_^%_48qswEcX?|4lz{m%HyI`#CacC*wydPR4InKV6y7I14okBa2Q{ z&7Z7vz!@w0oo=A|k#(ie2Yc=bs zytn_S6Mv$^tVJCWduz^IlNYa$GF)|6s&vOG*0r<0EK#jI?dR^oq*WI#|06R(X0Qvm)P8A)os;~$Gbs@c%S?PTQ z_M=#OuZ^X5hopDM3z6PW+>0DtIytfS<)2WtuBPmMO7E|$q?O)p78_zm+7x$fFz(vM z87~5=_wNALzp~EaBA*$p{?!$~tV89^hZi?2tF_0@Wb`*JoQ(%ppV0!=r~Ize9L5yW z>1Yh~RuN=tOpr+#_-+?!4zEFK?L4zr1K;DjBXwLu9`JU?y7k!fz;Ug#)nCph7~QBn>tAD( zp}rYdN*3$Ix*6CA2zzIQ1#g978+~i19`K84T^Hf>Y5?#6CB9|Uol9Pi3>IcCuWV8&bM4FqUlG)rjW}PTSaLR63Bm==<0g%! z(liiXP@!vPxS&Gk`JuS3`$N4;YU83rm?05n*brvO62@LbwlfnGW<-QJOwDbH2{T+K zOn*$6_4`4XzKMk4W|rq)!4h2*RoJ}#Gga@t&tv(D8; zSGOj$I9II>U9uNsFbfLngY0b=7OhwX6EM|xbd_`VzMRV!)#(bkvR^%IA3}tgS4G2QNAviQY^`BMwX;eN%9f&Zc5UkACD|jWA z4}w?av+8H}1fTPsWcYST?(Ssxj(jEuC4=;B?K!#EmTf~4V-jCAHlmDXbFP+(oeTU+ z-c)RyYsFpEVHz>^qLkJf(U7@c4o`HW#H~&G|K-8Ci`~tdIUKb=bV~T|WNf1(Mf4Ws zC0Q)!N+8bHs5~E3;ov1@RdTCJ0wYHPP6 zT-)l9E3BfiO1ZH_!O<|J^+q!;-wDayC6;GNwFU{#P~?t14dWI$KQJw<3E#+Si(&F6 zW##vs3}tFHBUg9f9O_@lmEM&zw3En(d-)dxEpW7M(j1#q6qruC%unIb1yKRF+2Sa7)h#9BxW+Y$zV% z>e3SH;ZcwAJf~aPnb0&taG4-)TDXG-sem2ah_(9H0MnyPcNM>o^GInq;XDLQR#Ttd ztOHz&_8pK*%iA>X3Wyj06UN?XMA076^Tv4Ue3LK)(A=^#p}V6A`LVdd-8R!*k8JcB zDv2<3S>UJk41Q`i@KXkS=50$s1j3FKBSKxLKbi!GaFvW-@a>U=Q}hk|63Lj;f|hL^~DL zM$<9gTB7>e1%KD#D;3+RH^nyv4%(Xn2Y4U9+34D((15Eoh4nx>4p4L@B{FHT+b>yT zEY|BzG4n2Cbp0tPOWG5Eg<-)K+|k5}Le%#f4Ofd}cF3B}ZFO4Fa1*6BjYDEY&ES~4 z-S;+G4)%57%RU(8UX`6u!({tuIe1(pt>s`5+DhT;;s@r%iulDwtr!gshfI1Dr{7sAu)(~@O7Rh{za`(wF3k>k7J-fa>7rMU z=w%iJ-R7oKPHUQ*Pt7#spNZy!;|J?8im}%x6AzSI$P{F6X4dyqM0*&7OKHWC8_<;$ z_PfO_bDZ*LqtD>-5aQUp`EH`&a#uNY=J!Aw{Snc~ zW_W<;K{i##cMq`9pAemAqxTYBVe@Vx%0L>-+Q%Q-$2;T4&GvC){P;5-i?)gfiN4B~ z`32EenrQG2EI|q2+D6jbEs0*oxKq*&l3r_)qDKgBX)Os6znCMDd;})U-H4ryz z&h?7L?c0a!+uO}!JjtVZ7+RbqT6C9av5Et#2`0FIp?!!s4jiISC-fOJaZbp>4Q;y3 z@&puCg)rCvY0{B&g52-cO{H*me2qBb_8++kTn&WiUdDiN!b*980?*s{eLcS?@%svX zBYvmxJDuMZ{LbNbD!;Gd*XQ?@{I29TW7a*K2_*hneqYD$+xdNm7DcJDf%V8-IJfHV zrkqv@=Dc2xgMh}dAQzUeZ$e6%8>y+mbYAw2$UOm!adUOz#PcLwnP)ltD(lI%7h>8Q z`Z2QW*cz!PCdQ!01!DGNO%R~!8w;1*T^4p6k7AZGH~dzA@hSQoyRrczumxvocKpso zG!!#}nXg@GBh&P$58DmRAZB!lpFSVtGPg$H0MS8Gsx)aJ$S@t)$#!d#3aIyJw@pP; z%#?QG94+{9M;;1Ya5AWS18X!*(xS92v|$gelEWaDduLrX9QW_q*)M$yG(3#OVUFFj z=s?UJ=U<1GiZ0K5CawZI<1=)O={W!MxVj>~Vxfv|O}dPKS#x!B!hxJBgoSU0$qn#` z&Y5VT8lDU&qA7i7%RF`SBpt}S6LGw@nbg3vx=x`SoM$ZcIuy54YY)+=ZRVbKR1voO z^HGi)H`CvJ-G3vIx|iDq?@?Jc(n|c4)GU+nAG9BYVq&8&d=iU01?kgX?urfolRF3R zS5>u_9xk9{^FB((w@g2$l6I1Q$^P#oZMlq{q-CP=OLCr3X+W=0X+#yH(kT7^w;3NG z*24^%G1GR6-!PL&FB2tMkLX)LQrwuHXNG3bt)GcVvY#i=G7X+Ev$M;seDEr|0(sl` zbeY0-w^%UgT1F}1KUmWee5{!BuV?wfx2JgOJ%z=stjsMA#?C2oX|IuWC26^UC+r36 z_Rnl(o`>nF2d1~0!v**lQvMuin9=t54PZ2TX8|L@`;muC)%31rQr~R)r}l2o>ddj` zAdaCD4rDp?O1Lr0u~EW1vmBo#9L#d|kP3%pazh#6rkRLhlpIrY)f6=n*KFp6Cgo(F zVBlbRo*4!Thi9@GTBQfGkgC#yak60dfSchppAF2^P<4Y@d=QTSWeDv<3NN3k(e=zS zxE9czWEqgvwb5!#den$C`pw|XlEA3aTexa9)?0!mGbL80$(ixAX{J~zDNU~dn=P7J z=W{qxAyfVig|QkStzes;OPT_Dz$1D_3D~L-4d7vgQ`o0fh+^>zg;SR~BMMPJex?xJ z<3WXUmN{D#qVGJQ5M^grA)3xl6r$$btI%KOY*vWkbhkpZrXhu>Og~aMo$Vim=vY5c zIBl7;Ng=x0_Z6b3eNQ1;+Fc4!(YT()HTs!ue<|q-EcPd_E3nCNg5`x`I$XtIuly5p zeajq3jlcLI3t>7~C0{vHX23rYYrxTnyQ139ZNnawoC#>TGHh%t zouG{+1KT#iw(W($_Q2OB3)@pFX<;+&3Oi=DMuXNZpeJS5w-B=nQKRABsj$A*(ArH} z)H!QUv+;g@sG;q-$!`m+SJ_E9#N4iu)*)trzN)p{Od-ASsQu!`I(XC#mIp8MFU3&7 zS2AF6t9I?2>1XE1&rA|>mNjjy=YtdDk6o$*I6X$-_s_BygRGwwE?344vvLyT(I6RP zRfk1Dr^d|7uw^J*jszssLCpanngQ9(ZUol-dze=A*k>Q_h#%1o_4dEbqXfKp{E_CM zO+ZgwQ3oFya*)h?+s4cg1G8HF8vu}xJ=y;F^hNO5PRZcIoH8q{r^G1pSV;>!9VM1G zr!pf-|C1S(MMBbQ$_Z?Zpauoj(P!%1Xf2;$G`q@cr28WO`RJrp0nB}LAI(kxjS|I( zujqfHV<&y z+*98~u%O-;XF6uvXdYeDMD;y3rU*J<+SxUsopGK4W>#o{nWZ^B>YQy+JYH$Hd$!?` zTAb2uJWjvtksJq-idJa`A;EE0eU=T$n3zEvxk_KJI90rW zACWO$x5QR9)iSXHS@UIa7Tm_*1?dcV&SEYePlPRYgv;6-CD*7;Z?jOkIUdfF5|j^b zK%_KQDac@TKfidVl6)%YK0L%*QPS_wt4}opv}d+ezxXI$aB8r|s$cK2N|shh zs6z3ZHCj5nNh_Cn|6)(7W3**<>&kX^$9wr;?5!8s<;M_Y)S?5lN1KQlHGL`v{}!x9QfWVI`qL^~~lQI!fL;TO)EXUhUsG zuQfQFQKgYN8#ayetkrvAlEwLV&8V*0YBS&4*ZU@R~Q>}I1y@(I8cn2!p{ zz7~r#r45~8bqayO8+IL00sa3dU48AcuRNn#l7U?s9c$z8Nl z8Ag8$Ck&%8lTbB`wwr|7VYD@*Qh0DgA;V~kLOYB`iAHTjJB&u>m4;D!aSy{t8?LN0 zi{T+cZU=?xqoW{o)M3{)(Bfdp5&MWWZH~7>84oh9qp#q!HDA$C6&s2aYHRCgW;sh1 z^C@)2O{~2ci;hhTPUQRCE=Ig$SfsjxwCAtKRQ$+yjr8b6t|p59VniEHcf*T`Fab zQ2&5UgT2{E{HK6X2w=E#4K>U|Amyg*X+H{&XtXJWQKv6_awA>n~`Mk_rtr1 zD{Vtn+IpqM--Xuz@z#-G*JOILVvT7;O}vV4z!9wAb8zvTemrwJGSx7AaC~J(w3b)!iIci}geROdjG#vfHnVlNelorVk=w`N&Qm+Ml^{w-`>8OtPH zVfWvSerTu>G~)+unxonH^bf9##%T@1i}b`GJ7K3=$A!$yx`!Q1S#{+67)T=R{o}`f z)~53T`&0$@YG>`%frWfc*}u1g82}7s)mwHyB*AxI;_qb}n(b!Wjm@gJRSroUCRx6| zxz!H{VOql4Nfvh^S+?MGhfhs{Kgph@iBYlpj%Lknqx#FTj19y^vw z!_C$%yBTVEI2Hv{aj@azcvC0PXlhKO+g9c;L}(dLYD}lrle%E26a-VFy!@!IG=9|k zNu=Hik+2rVM8dxbA_c&ij~%8>)c`iPIM)y0|HZZLNuk+6EZA%1MOyoeN&1zI7K zr5J2&Ugn5iL?0$v2eI(rV4`UaXEMoxv3)XnR_X_wl;E#S2k{AAKDXLnKeiciP}feQ z>`m&(ZkXZ&?9kVWSqUMtaMKl#MgR&F*I5AyY9 zcBwT0zV#=WS;iF|Z+-z+q+BhdVWxtP-8`7Gd;6yS4Jyf(-OLI+ue>GT$%$r zLRUP076wj$TUfWkH!+1O#6_-C;XARFDf}R|GKC+24;218q$iBp7Pg9Gmq>rDw|j8m zu`=u&5H5ikTb!|!*#w^}0Z4W8@Z{d5-U(rKYF?HN=jP|!_Q1Ed=3 zQ=`W+gDPCY464SlW@$z(t&B-AU+~4=WhPk5hcbtYWesC!6DsOm$ST>tQQqem+00T5 z{ZBHy@uUJnE1p@*M|8!G6fy20=!W(KzUE>KGA`7oe~tDF#k){8q}%n;mRZgj7=T4u zQoQ0$1!~tAkkpy}xHIctKxfvQ&V0#q#sjw#x&u4zUw6njsqTEqbfGF6%#r{9cHifkpB#PP(Yt?eC`+8by2&Th!*2FdMf@t1{q6=S* zXC=8dE-Y6)2r*Rk1`%Zg?A>4&FUPh9CkJoPo@TRmysB>Yjuro4$T2UB?y))OoFVTCZ!<`()?prXw! z>W_Un7{A=8uu!{Au!*#2W6KNJCb(DM4Qk$p)Y*qS-1wxwR`j>Z*NR>$qK~zdDYKI< zi89-T3tbq!9bCBDOMqb>)Av%Mgq$~wpcsnB)~0=oEhe>UK>?a;1;r-DR?h$O|Iye2 znkrGj=Z|Sw48d?QdjWl?NO05+ELol;Nw&nj9aiW>kK4&hUyQkr6tJW3*B~AjN|<3W zshCPGZs7=p+^|3=cBrXL{>HAUgfWv)Wz-or3A4&M7;*{PXcPa@%6$u$IiFB`s;Odh z5p}e~_<6!%M%X%|GOcadgH1;r!D~SN(1aZ=#&;>|rm)lzqv_K`&|qSnB?jnhY0a_z z0{&U7%Rt~X0-RpVktWR+hzT=W5bkBRs9}LB2B1JM^5!OE(5MlrCmR;CtB-xo5jc%w z$Sd$s#VOjrlDqS6BDOqJl&6l8W@UK6&RQ{8@y>(=3-~r4CTb4jZEm=vXj};RxO~cp zIaCt=R*23$PKOm;!QA4Em}%FOS;3emI~Ehd_U#IxW%co+wWLMui(CDhIncptW|raf zQg5eFIw9*Wp?BwDFF@AZ_!nf&^2y1XM^$!GTau5dWNAy1F7z3Mg4Sq(aqcujt6R81 zRV**3%60%^ZafK7Oq-_b6kJm6%8w|C54_U{A>+OqQ07L!a&yeP6c?Gz?=fS8J?>V&hl?jh?oW!<{u1#s3C;;g&& zby{>EdeQwVJSkcAbCs-;Rh)kt#q08Os+C_$@#m$G8cnvoX@tREeW?I}PG{S463McT|cG9BJ z!%MstwVPIHyF`bm>j=TouM56)8Fvyz?p8Ao4;-Nzhq374ZCKsngcdd*LGjH;SQed> zN5K91xc9y^_r2N+cB!Q0GPx&d((h#?4etRFBSOUR;V)n${m)-%Y_cQiZz?>gk`fxY#+Ve7I)c4PhoN6Sxq0Z)NehclPY@PkhDl*e%cRu{?Z=wL z`?AD1p3ySOYX_rsa4K_Fd|IgEAnj|;VU&P1l>M|_x=y^o-K>^doFTLpXTNta|0aj{ zpOkhLjovjPdN56H5e@!fS+?bfR{z#mmQ}htIX%9le}P3S(-`wMvUe#5^7gFom-T%4 zz{IOhS*_y5-<}kH+ZQSMSWu88W=WWd`ef2zxg=bdXy|%~Rihl5lZJywwqkLpT0dpY z`f1=uyO7>OsKwJTp;k|u6(aYBls<5zJ*2MoHl?k@&tN))_EFoBd{>%q5LwGf_X*35 zbw^6fF|OnQv0hc;vD{~xs9j<7tC>EUneef)j%`$G(s4M{%szXg(mn z?I@Wr$A!Q*cr8QM7OEWyAh z&W^I=?>I`wG~?}QN1xm4J&Ho#HSQ-pL`Cs#g)mc>UaeDogk4AJv!7oxrD>At*H^0V zJBqyqI=b#C!(ILOax`Ex!)DJpk6Y|PB2L_~y_yIzg*vJmq`G?R*z!D1pxmDkH$D(I zzCn#|iW}b$H?C$k)2ya>+_313;ce4;G)${0W2)OiF^-7gL}eXOM^uDaTbZ|1Xh9bZ zEf_D*!tA_Ejuuts0ddkKs9B}8PqKzGKY%v&>o{v*9sQa$l#LZ?QS4G$Yky)Fur`4` zyV``wBSr&2TspBvWAlRMW<=ngn z=C+MaSRfiax-|@yh$u2XC7j$iP}JtupBdCSU+x$a{{{1d12v3i1oO?+I8AKr22IG? z;zD-KYCZ|*%6D0o=T&AaBo!QL4*NSN28+%u<_ei$x@L2m1 z9f+On(WCs|B*QCRU=3R3_tHWVCjgB}FPvs)+nEb+|G-a0_q^KSTB>*NA+8_?8YN)5 z<9hrglBskiTUVdU7aAJmCz3>Xj`sqfqK< z30;U{*M5eVCvKj}?Q8!uqf&2-W<;GvGn(eD2NI zG&Y5|phWpkFrsi`N6y<^DXDHv_}Q4T#Wdb~G`Mz_Io*US?-_V-A_G;p-sD?5T+IYaJ^N9+5<8n7r9ZGoAb*>65NY!V=L+r8weZuTZajt(`o7MoN{ z+o=HG7QCi-@Z46ypow<}&t2ocLA~S>lz0ME8-fE*)FvZpR0C7#(Xc1}0p{>Ugs;B= z&R-AHSifJ^wnvlN%waVC9N>T5V9$7~;0U3#jr2&QC^oGrVvW}$K#-(fK7Zi3T=%9% zAeaO(e+M7L7p%$ep{4*n-}+WUSbT&qLeg*1Gma|5dj5x5ewfPr+pyzL{5N3~oP;l( zMgarzAK+(K=Q+xpWegfGkf~#M(LrEP{#sNT?#8%3&A&5QVd+xqo>^=}+v#dBuj~WK z7#lkh86x!INiwDo5wk;R+^MWsG89Nd+d+ht|2GUVcfD2D%HXyXcP^oeOETKTz~*d1 zmKn@D9AkN_Bo}*3_>7yVisrP1td>gOJ?7rDmiFM5sBRHL*fjcGej@tO*awA%uj&2&6-0?Gp&yFqKerfPU-J}#}a@d6Bw}a$IGDf{hn@1XZ zs-?7>m3DwVltP-TeA}qVtkm01V#pqD@^%u9MyyY6NOZ`!AxTk;xlsQCx zPa!@HEzW1N;b{sV&xB_veWWRTuX*1To~5wUw?2A3et5X@J&_5IQ~1ktc)Y?#(&5V$ zKAH|+p>Q-Eu2A@^bQmf8bvj(B@Hgr3l?u0|!&jMlv-Sp#uB=Yivb&uQPgMALI()Ul z-=@RYDBPY7|3%>w>F~7*pG=3ZQ}|Rme7(XQX|Dbvd^#PzN#XC(`XG4=Hx7nxQFuir z{8xoL)8W5?x^Ib6lAU}n3cA}(^FbwwG;NtyU9liKYn~lTFLyVHdZ=3CX-*g_Pmdjl zZ&RgDr*uOcji$nPD10UrzEk02nebf-f1lD<%Uhi1QXw8i0Feq;EBr%BH@da(6`Sxq z3jdS}Pggje3g4&j&#CbJ3jdM{&lIxyGn~XA+>p_IZY|EUDc#Qok}~0^UW8=6p)D$tJNVD-WsP{*qh==;)TCLch}EJR#jY{z8v4OOH}8 zb$Den$W)w((A%guE5%+yIg)hQcZtrEmOA%?Z0a065*4V2^k#4KN%s8y@JU(~!c7@- z)L`>NG=pz}a_{&#@dYNX^DLVnWN--j3xo_>FnZ~E3HCIFhoZ7W_-I^Zs*Q3{o6go{ zB@?_@PITCmS*5PaY-b(Ky7UGO?69&AM$}`I2C9aCSm z(xA<^VR1QMG+>j~FRnn-Z-ZL1`ylC4K9O>{G=b@{-s7z$H_d~k?H zn^odK%Qu@84=vv`!yhVgFt@=HMkBm~In|1SVWsUwsuG<}T>ZjQKH z&qs21y27^`3A1;f!X#KX_n{Xt3UUK+y+j_K3^FZ98XRG34nn*5*GwrN;u~QWu zv&<1~UT)g^gW7wwi9Vy~YfSWaik@hqPb>O*6WyWc>rC`1MPF;8Pb&H^Ci(bg<*{dW`XRdlt9eo;}~h`7x8f}-y>(Hj)SMrb@( z9qujZQ3Cg+Y*T&Qr!uPWB^<0%*g?B%6*s#X*j3<>ftq(J1)wydRTpwd1_#BY|MbNtjPFk86dna&iQK6|A zGc?0`f_$@0v2(6T*<@2H?vJq-<3?4mftOIsWL#erUss9ibq{%i^AWIbLq2q>;9AxC zSxfrhU4{Y!Lg;5m>QL^hfp<)jafgHw?)5+ypo45z+BT(KXwxo2h%4<*4X+O^bJUBi zdcVfL{}eVn_+(JEpJAfkRrG>o&UX}^ijYS611H?bqj)#=y^U&j1F*zcRj{74 zpo^BqHCb)c+&0ewicM%h6B$}Iq5{TNhV2&jrHOJ>=pF+(M(s2*f8nb`IZywNp35&&eOk86DJFfN}3)CXCkJab3(r?WpNdJ6pMCKXX!! z8+~JJNglipZ-Ko!ZVaoWb=&|#Z+pV45@WajknWvhD{CFJ2U>EtJ03B>H`{sIRpqa=2T_r zA~&m(%Ft`i?A%(u2$fShi((?l7>B78c%*{=HHBp;>b?B)T4u|8C+e~B=x-d1#CDpC z)|FmL$l()g_^X`ldCfS{2J+}c6H~ilZ73^Dri!lrM2}V{Q5z2{+uFbWs1&hiyhBgh zuX>3d?o_^b@u%vprtkFLT;h~48@nyewNtcn5dd>^H4NcDTr%02O;&RrIq83m*YPE| zLTm@fA?PE2X&oSyvX{#W+E%k&kTxh@OD_6u_Sf-~UR{ct+Ya_p+z<=3&3uI5%d4aH zJzYNXRC~JG{l7(o z9-T#F12G`Z_R0l#5hPYn8~1L@3pq^=T29Nlj`_J{#9pV~3!pwC(chlJaSGiza{RB^ zd;YY7^kfuj0bbL4dyfkwqyW=F?cECTu}U_8ELUQ3=gTNre*-vY4=8p}&^8clDyeU^ zJtZx+(3KdJTTjJkIpWVDh!Q;eB!!`%(2I|xnG?FYdV1CA0FKY z6*V}G#XcErNa0^@zfb@+)Aks#OD=H{9}Ea`T=8Wu2Sadq3C>yN3OVh!@I@yKGrJ9A zW2OZ|&u~p|F8chHd=83}`pr(xKNJ`4f0mt&(`XBH;R}*OSXe7;tOW-;sy>|1q4;LV zZ!S6Uuk|4Mo}s7D{}c$dUHqck{a)l6*{z(fVKd*HPV~PnkNetfbnNf72?*@kgxz+$ z5uG(T*#9=%*AHK_zo?B4pMwLUIF8-J*q_>g7Kg$V91cCuk--}#4FmW_t4E$4~;=PKfH zoj<;s_}#XiCscnNzqFHh+}4KARpJk+-MH*ul^(;nS^4iZ^#pZhzqQ{D>bOdymA|^L zx{hrummSQy#qO!a6hpFJNhcSR3f_goNWzR-?N^Y{tUrs2y0DG!8E9FZ!-d zwu42KrQ6K}iDr*!d;#eTI3VPewSLt_g>*_ZNf_w%tNnWsxBsm6XETvNQ!h&BLsD%X z)O`MV*ZEP{rru! z1uPnJ?eXk=ONrcKi+FowF)z?I@=FuwOu{*HBCTuALo66F9bL{ z z6&(_xxQ`?hFu-d*zGg6PzFH>s^h_z4^gP4Pe0>jw_7=M+ zBAV)rYgOP$9RWS4P3F=p6Xg&n^;?Ri)hOhEb9HG0Xn~9MaV_I7lFD7`damttJeCa0 z#^Y%gfCd^*?C5*`Q9)v1A}AW9fn*~;R%q-kcj>JeQW{G>Ny(RUPlOp@(+Ki^e>@#u zW-BwEV`hR4{g-I~=Zl`iCD4n+S^pIwGnd9XgWt>j84uQ)K7c0;-$WM57-3+!g_6=Q zl77TeSJg=ZkVIZ+=|)>_xWeq98P4Liy{$aMP#X5okcE8QM^iW5#LLP6VJlrJKo=B~ z2F@fEo7_6oo)pkQF~u8rNQ|rR^2Q@71{DcpoapqHELk5CYZ;<3Y~}>G)c|5(R_7%p ztA-aG3(u6~py3&~0?2J;mf*ShBveL$M1r}tC`inOzs|{>R3<}Qo$mWUjKMl~TsRAy z(i$&MO^{mvI`Q=~j^R}P_&-f)qSvThz_ltS6T0W_TJ~$oO5PoPcmuKByIOO9m)g~616t9RG6J_ zo3<2$%M0(VUS9X&>w2%eX8KhEy_G*|JJJ*a1c*{0*osk08>Pjlg>T?S7^D}1RE=67 zKxxxjv_jEvshayD((n88JZqn`&&(uEYw>b@*&lQEkG0lb>sin9tmoHy7I3rl-~a;{ z>|wN7b$!y5-KJ{&Z~xZD$ZG9{B&ummJ6f@?=o?Uujl{`ZxiG|JVMC9BwMy5Bpa5~K?UBIq zWrkfzz)iJkB{y|rmQu8&I$an5=#U}u17{SXap%j(&4JvDj!>g-#WgaDsPHd>kj5#0rL69oHPpGCp5TEo{ zXCE>_2tji!a#+c9C38T#eQ_TVjpF)@FM{zovvppMe zttVK6#?#x+ZZAaYQKg$`zVaJwz3f3fS#;&v9=*$A#5dhug$hq+f4V0lK|HfH%CoF8 zclVp_>{VWqCvyO3^h)rH#;@gi=lpG~#!4t6omfz#2_+}xiFi~*c9|%@iSxtq^t$xI zuB{Dme$c9ko&&%IlqkIr-foJz@k>>uISC>BZNChD;Js1ziXO3e9<#=PikWo8qd?## zy{ODo7-y=rwvyj?`P%loBYv5(>a4+`3LF@Ch8Rl;fkieL`(UgxVg_`>p%uO#3nF>v zrQ%HvGa?}?YyP6yNq|e99jS$HPZy)a^ zJ{=}S*kr(2m`qIpKwQ2fn#lrgR$OpV4c3w{Z!`kb+aHUf#*K7slVr77Kx3RVmPW!<5glFC{29v+=hO-pRinyqDUN;<`b3A?S6g9bQxH+A zXn24=>N20VQ<-Qu;xKiFvXWesDUH`57p9Uj>lF>L{DGG3*E==U3V3L@5?oBOpztt3 zFg00uR<;=W_p%_PapglqP6ZqcG#7~g1msv5w-_WZ=B^Ta>BFE%t^NI#;%kY%hDsB@ z25}W%vubLMmfP)DKr+5q2h|8LNQS<$SG^b}4I{o7?F!@twbZTR056knz&9JSqzudl zE@HLM)v6pBYe(70`B;mY z0LMZ=i5#I!ysjuw7O>*9kgi}i5~heT;zy#D=F<|W*d<$$N}`p2Du`BEZWykKQNkGp z%IU~m8>M8atUBPRiS(qOkK?{Rz_QP<67`So*a)Z~7^d*BFwdGG@ZEJWbO4FKrk$C_ zKqZ6#Rgr!||6NTniGVS10JCzf^~Hf3d|?Puc>xrGZJ^<<&Aw|2=I=*QGC-qzvRlo+ zzK>9Qkl+7>`6<5@sdg&bK$fqh#22gt@j~tu2w{=&9lw;t10-n6=%hV%i`x6vdvWU9oU1acFqZpUWHMJ8oQ*wg>EFfSw8dKk>%&1$j*opM5t(o zp`4MreOF5?)+aWaBmwwz5f$D>uriwzyD`ylMK%eK7c)wAx(z4v9i(bO;P+^ycG31O zyUg!GX}(M!tMtG;5z=V7jf#(2uaTm7rdkAbMNny#J;=(WtOrzu^)=_x%Qd9OBX*?l z^s%T5P&fmU)VBJa>5b^;iqR_OF!;s@>&*|vm92*>H}s~msjZJz-uUzUxt<5}%6Osk ztE!r=T@vW3>*JRYV&Q)`8LXh-}#zlbH`buPFN} z2ePX3^m+bH?XK8O=ceOJb--r=GeN>8UDXKlp^*aD?rPcGI&QPEfL(!CTVXo*- zWovoLwm0O?UN&{-cECBc^@mAPdmi87>{5NAH*s0kxQrB5)IE=ofF~>4FXyN7qsmlA zJJ~wgFb$RPdR=OlQmRb&Mho)(>@%H0pJC}|+Bgr6PkENSa!=HchW8_Uzc=B76eG2Y z4OP@4KH)vh=cCsl7~A)e71S5a=a^RKhDQN*tbdXuGCi$jT`gIpGn^y#PUj?ps!e5U zZW5RedzTRvlW0X5Knt?NREmU8$Ir-CVK*usAJO9@JYJElOwZP?qWeDh-qbBTOtSzW znWfvw$kCHt(VM!Hg>gi3tmghkv7|8Zpky>ho}s}M*rDawh)qH3GN#R^V2u9R6tuz= z&~oNO1P8GGYP9@WGzIe_5DWt+)Z-Hj&`t!^M=@m>Wc6tubYo)$`xNw~x!b`Z^E4^^ zYC39Se}=cz)N|oxh5*OZx>goYTqLv0bRF1f&)jiP8x8J8w$QC+0I2K7==V}Fni&vM zRGjQ&R|_mEn<55D)VvdFK0CeAI7GWU;=t@q4Pe>KZ|t@}oH9U3*@UntY!a6U(;#R= z436aJr`>vdqoLXev@2w97^ImYuw6M94b%;RS|})3%NE1=Qvx}dCIIlKfVQA3=ca_w zjtCkWG9Cp(5^ixG6Ay)KI-d%#k-?kws5}p?0>c3}p>m-2dF5{MI!D15TQ%zrNA^#cq zghfYfpQvMCx|I_)8zVoi?9ddT8&4qisw|hsq!-XYpawmf>Bc~m3*&uAQzDC2nJi?n z8;}L+l)x9rVq`!T9i|I$z{nyQkcDI?o2#Tm7Dfx2FSi9t1S}@1;uEzmkOdQF09i^F z`gEBr=(-UEmICn|oG%9cOC9{o1>nCzav96v8k{IE9x#ywVaLJ}CUQuR5A~S{GEYrG zUE)-01ouhtqshRJWGFFP1Hkb~!PaW#i4d-21T?cH!BB&Fiq#o6MYlP`mnOxRhz_13 z*`EppzSJC=fab|2K+BL07ZJ6z2NS?t`UId-+0sDE*{I=@O+Z_h{8=;s`x(6W(p>@q zLc%E~0CG3Z6wt&3x@MrVz@kuG_yQbFihjGWpqel?P5bNvGg}g*LE>hK&goUij_4KD z{1WjpL6kXNA73ErE%aI{lRwTjzM@S2C^se-3eOpW3v_!liakqac&{Gsjb=%kpmoBr zBYZl4^r6y*jt98$W>sh*md*C9QB)m>)|o{3$Sr*~Vd8mxqqWp!ARzq>#k5DiHdfN~ z=OMjhBLhN<6WS^#GqfOEnb6fN&t^S_6+sAdXiH4WtWiDqE?f+n-H|A+Au%VgN7|&7 zm!j84lt=VN?-ov=;r%EQ6w)zgjDP#D-jm8&$kT7?7y@vjmN&NdVCa$5SGc9Uu5X?H z#2a^T0uW2lb;!Tl@9s5r-idp3`<2*0KmO!*PJDYJ(-RuK3cr zNSS~9#+Qt`39kR(#q_djMG`AAa|H=TWK*Hnn47fJzsB_^LaM6L?W8r8&cifuSw1Vn zC6T>PxJph6eL7~v1*-x&ts6oo&W=_|b4a#POaNDuezn%tMod0aO@I6B_Lcn(m>w=4 z3MQAL8B^obcIW_#yO0@eQl0ZLAVR%2xgFL_>uu@eRzQj4W%+=b+EK&kV~7 zUyNrK|4Y*{BdbD2hVF{G zhzDsTDo*JD2QrEoOP3eJTdSh~V$+_qDFxPHvO%y8xX4dx-ZRir$W`_ErXS{FUJVXH zLiS3Xf;{>TGFbxoqHk&l7!f<|{Kvw*={NzeUDk;6g9yABDnN8Y0MTNv=EfS@<#}!3 zs`XU_M*iRW2vmc`=m<6M9=MtVn63YG(|mpFlQ+#vhN>WO6H6xnaqAuP^AYapt(Ei@ zSVnk-B)B87}TqYdWy3GQKa4O{mL`CJpBhzc< zlXGL|O-xoNCs$5J@#JK^(VU!|9O3`gWV^%9=;Y+s;F=5sgp`<|=(BQ%OiI46{(|^ar)P+M;R$t5^k^;ys^J9fgQ6NLKfnA)f

oUEpW`Pce?WDBmfiUa5z2=>u@H`A* zZTe$a<%4IhZs+XQAvo(N{jlS8>(Fah-RRi*INg%aE#->3);mTJEtpy3=Vfct7Z&Xi z_%I#+R4?vE2|d!yYkE-jPf-WC@_IdZHER@a@+TBD`35d*sK+tNf$#zjkW12kuVGID z7{h<#E^-4z7|O3?uu?j@QGR|`EK>l*jE25qcLq0j9QLuIU+{&tR$4(YUo})L{SQtc z4iF%8zASPI#vDvSFHT>k{;ck?PRFe7nZD}tlUaMT9d4N*jdBvE$Pc+ciWll$KPHo{ zMs6Z4Er;ZNh=fG%2y5G603p_njOkMjwipMEWphliJctuSV+kw<#ZeozHnT!$O}ek5 zq;0t@J^6@w6pOJ6Y*sg4sE9vvBZ~kqO}`?cG2&Lw@d~^kx`MaZSm@8YqNqL)dFh1Q zEIrn|B9;a@H@E_LXiJS)qT~xOM8R|(1OiAU=K>L_eiBn995-w>RX=O^q9J})Wrd)B z1w(afKR|ZL7vJJ_arMSH#QVU&;~C)_ol1lqWp3{DHYdnc<|X z)-^?8a6&HJTp*Ev1yp_`(g2OG%2p1gQiNY;B+p}Y%I9&D0vCl#ap(#@e{Sq(&OAz= zyM~6eW)A!w@xYUupFktZcOHSq{Lb0!k0wz|6R6+>#M!t%gL3KsH?&j)eYvCnpiZ<9 zMY%{FBwABxP^PNaV3uVz*I5n&rg5^EM#bs(YiaUl<_sweS*)VAbFkLiV6~P=Q_=%G zJkmL*_+h>?s*-cOlKzCSD?oSSDTzv-Y@|>RP6?h_zC&;#@(>G|Od*cpo(zT5JuPYm|%bs)l1= z3J6|JUoGT77%rW`qTd2UAO*rHcmqHHPqIGFKmwsTtY51~`^P~9=3bF$74!HJ?-}8{ zr70*{Eka@#J$^dB@%5I{35SUuTuuNT$b{2as9@?qbfx*t^V&<-UR;UW^N~v?2#}`Z zEHmSA`k!zlgS6>+AdSs?1eo{$i=IuOi~4|fH5Lk6q!iYNVu%DNldbgb5-Qrjo^P+f^e!h_%)NPx_C$j{g66L0F$^L`~7cVQ2x(reLh`j2Hy+GsBaaA0v&rQ=?%l^4^<9 zzc+CrmP+$K31U8#HVTGH6)KQKnVp(MD(GRciI#g@CwN8~GuO>TYbW{zdW*RVvWAS5 zk_Cpryjb3v&(w5|$VGZ|!bl=;;oSaw~yo zF1lGX(!cm%8Ap$8J~r4V6C3TO2-LLY`nxt<*2eGda9QJr=e5S+8w^zr2&ASPHBhdR z?TRr%K<3UYPpRx0UdIYAno?#D;$AwJ%(**|HAq6c;2oWT;pWcvku&0PcO7eqlRl@_ zUgm0iN04~m;R-z=hAW`Ms2#^&l#%cjDZa!f>n|0OMId1-{f|(;3Cf_W_(L0clvNAD z9|1k5wUGY=NRNS^RSN3m=b~OqTDYa@Bm zLvv${h~oK@gh&V*mc(zL;xWu!T)BiETq5s5aWZa;Wou5U)*9kq1jvd0VI-UuPJ9m> z5OByAr@%QuxeWt}=uoOte%Nj3;QeCmA%y!2?5&PG8zT7aqIPm;< z!1E)Pvzk2lnsYV$lF%@huR_q^@OiGT1_`*9V`L=jAdbTyWoBKdmF&vbWpE-&$+Ae%!>2h2BRnyes)Iz zt2+G&5idvY(7{?1>>g91zF7_EgEgPPaQjJp$CX#EfW9q2z@$|6~!gCQ-Dc}dJ`J;FAw#}`&k~jMr=z*lYxtR z7&u-MT(erbG;ot#!mx?`5Q!30VB<7GT{1LeGNOT^?I5`JD$v!Y4h{@;N3y!06Cq?N z>H+e^rMC>#4%p9<2@expOniUBZKhxD^Uj2~=p_?A8Ya9`9=3!xhRz)5*fPRrmtFPTxprCxm&r8H8MD9f`q29VW&K1~!v3mzR& z)n=bq1b4n-zU2#3TFpfTtiZFfWA&N$5=3}*Dc@$PO#5PpE2d$H!TQwai%U^LOiz?1 zf@isfSq79CQ2!Mr>Nk{l2GoCrP@+Ywx>G;Aw}cjjfHU;zp-zFjbhL~XCnbN4eMDn- zQNCCaAYvaX_BMuU>w+Ogd30tjlpq;60NpJ(fGWcQ1ff%KfS6TV^Nzs|To9QUE||D< z@6fZh7JGz^tEzm^5?zsf7vG+i3nm!rtE6PdlFMWPw$2l?D|$p5C& zlOYP&wb9jx`M8S|86Yzt*keuTC2}`)J3R|+N!jl5(;;Fe8`+X%)FUK=Ll>!6R&6hE z)z-d8tU*wEv3$u)6+R=A!LpGD>bzPfsK<7m6x|ANgWUzVnd{#Cj_flLbYDpgC9`4J z`Tc~lM(Jxb*q@3#Lt9$jqfE*UoFoV2*wQAP-0OB{4+c6ODgoSC6{qsBc zu3BX;0@*ym&1+?kzn(^SQC1y0LC0csEI_3nHi1D!;CHy(5)I5$gk&kNy`je(UIc@N zs(?kEok@V=ja+StUTzr4Y7NqnkV9`V&%@3EidYVMk$GxFTd(pjT+I&+;;})xl9z*U zFz~gWKN^}B4ixQQGt~ZTDRqoeU^g&7#aHu97gunj$li?aL?6c=qoqwP@NSnn%FCVn zsspd-wK(lB-27Az0dpSHnZ!$zasa!1IK7D?7@&Ii(Cuo<^hb!$hTZF=pEcLcI#w#m ztww}2_Xw$Gta#c}JPY}0VfnnDEEIdZvKGdWi+UQl5bBXlKykbAhCWIc8%xGKfz>Z(!ne_!#XdcKeEarQ4o zqq+1xM|h+##&UqMcqLqFhg3t+X2T=t4{&%S{Q(*hs@vv<%4ldo4K4UbOCb5@v~O~T z40z+&n0bqeVfK4B-06PrhC4kB{z0dmuGB$#4rLjF**rN!m>kw5mI)ZbGy5`(%+blf z<**l4Z;C#ucmcm?3o8{U@A$AEe2{&B@P-yYT6y@fz{|kmaPt{!dp>Tt9FB)Lzp&>m zDRlh{7$g;Dy(W)%$l+oHqhRQa^G(~@?Atd^*bi=i4FDn#A!H(Vos5f0uO-ztJiVv!CN2oQ*Ju{nWwesy!up| z&VP0gQ^V4vHJR06uqKC1=clSvi*#r8WA3E=L68f=Vm_pj8XuraFY8>9Gva0d}L zLmQ(`C-uj#$<+K^O8tR^3RZ&8Rc#Xccpi23Wg`LJ$?!liz-FeIX9b6~=u z@_ccx_$Ffn<@r(&gXW17Bb$po#VD-6J*!@c)T!wRQ&cZ-uMwt5zU7ji*i0Onq5|cx zC>hgnXP~B4Ju4i3T&}|t$l!hD)vi#eZls_{qrQ$Kk5gYXpg#BZ32_5kEAW$6uU9~X zxpv%lRzEY916ORq7=c zW4Qx-(%joev8dN_yq0UJZdu}l1*HmYg~q^BwA!P0x{4~|H1d49Ly@=dw1*;kYBEfQ zV&-x>CJ~2`NV+MH1Ph5wf0j35Fp5&)XLl6x_MP@9M0HJ3ovn02M#drYEaQ-Qwmc5X z#Xcj!+cy|Ysm7@WgIz)1K3Ez?6Cq~v*+EPK6WGTdxTztkQ|AyU$Y5iflmSAa3?$v& z8#9a?mxD_fxm_{6hL?*L)A7u#Ah-q0;}tx6zQEeF+XA}*><PESJbR%o zR%+F%bP-jYZDGb;hC*rg6!gjZKQyCwGmPI3%_ux5HiVV=YLsY#-nfBQJJ@;?cOqdJ zWiFaaj;3Z5J8>WsIruSr&nasfCo5b=wj4X-2eD&F#~n)-b;VdKJSO zmq*G)a}UDTm>1156zfdMAsoDMi)VRriDJVnKW&QLtXXc$8)p<7ym3!OvYK}56eLSW z%QJmalGWH78apJ*XY@xShwyx;?gxf_D{6>&*60eNHIc}?aZa^#x1d^8hH6#q6jU3t zrb_1BY{^J#cpjuXiI)=vupRn0FAyuAd3sfHUPi;L9Ws382gpv?kI(+_vlX8`_6#*< z>OD=3`P!1ke5&YdEBCzuS z>`*#rEB)_*qDNUfq@E9yZQ+tD+nCv067MVA#2uoG%lBklrO>;HJM5rS@uE-;c`|B} zXx;Q~Np1umg0`$Hrg0&EFROPKMhqei8=kAe^>EfS-Lg$xp=L6LcBPfa#0@T|j(1p1 zMDw<%*{E}xC(*TiBS{Onz20X>T6PB-dI>s(h92S#PhUe10f#5i&~5Z(?ffYsz)wp% zH`Sz=0OsPA3KO;I8qAXgT=lhc-fMe5_#o{ZosVe}i_;{zcce5)(#`|;L)hmt(9W%o zh3H5huUGa+!<<6<;l9xJMqtx@?R-?oNM~6il_+D>wVN?%H-%=-jc_7p<|Vanq!^Si zM+>;Jaws*&Kr4i-D=xRM z7%Fj6KQ7mT9y5>2CBPQV$kc;+AD1D)eweK*Ka%Lnak(;-f#+iQjc!{a(QpOgd?Y1F zMTcnlQRIlQx}>R}{!oTA^`{z&=u6DQrrbv&=@^VKWvVn1Jf+gq+wy!2Mo}vK?2ckc znR%*Fh!XMR8gp3`m+Qi9X&jV`dp09MQy&aQ!X6_Jp;srJlAx(SeQfmTyz6n@xH{eq zk@`g1nlnI{0d8LFDEeBrDP5zuT*H4N{!Gs!@$3Lsi@4l6n5=w6By(*n8<$&Hm4fgs zbkh{CaIJPc#O0c)i6l<89_u6==r*XhB=SJTRlq!oht3wTLf}pzDU9+&_@q@pOTi$U z%s_j$xZJ`b3nne<&Cr*J`sDpAk6a_RMGXRmz|h`7(e2XD=tFa{_Kt!t?Va3EdY_F9 zY463*(B8E?ZHL^Ew4}X{NP7=;VS=u`xAfjsJSW$J<2ls=GS}X(T{7WeqKk>|Pq4$}KIT0l4a;Pc_j<7*Io8uG!*R zFdT_qcIu-|_0=%HW|(SI`^H%ztWasHLy?nBwJG-=@d~diDfgNtbp_wT;Il&=S|QG} zSmk5L5wp#T1AfzltTcYehGpX?5;-XMtW-WkkIO}GU!>fR@nJ(_cvw^Bq#`cYoJZg; z8Z^S|RUU_AjR9nJS)V3kr)3I$(`P>{bguqs_XVgWAx?gxvZ}un5uRPj2N{NGUktGv zm)o~sE}}Q6N=#3bCM+i>2TISON+TpvrInw*;J&12`&-0j>fS+$K=N&Ug6{ha1 zMT51NsAc$m*+NhU+yPKo<-100Hda`@e{JzyR~F!#r^ML-}4d51jre{uP=Ber}1j`0OePi&O+@ z$)#<(|7ITe7dHfvC>AiWbH)ffvw0{9TiamAu;n2!Ohjy&X)@@ySXa{jOA@qDT zfiM}_8Q6OpPU>j+NgbRgYbSMZ$OdP1aLNYu^p2T+3B5`Dv<}-nf%rS!YiX+j?z^l< zElhW2pu@Z=3o^z)86-ytp7F=z8Ni~Qb#y>SXK*}6chZMI3>k_cNf*~-qqI8edCxt`AeR&sQ8vQ^ti?MD&^&z*b)Evyb5Z_%u*d#W};ku$Y* zqkEPpN8x7^oTPk3cU^Xd;E0+&*ISdVyK_5++I4m$$6A_iWh>QgZ^dO$F3ktaq6kFjvrG#-D+)oB*hcD^oHb>O+_tTe5hr;bSs7>Tr!gU(}Pf_ayY7 zw}L+KH38i!wpCF|^$PDRp(l1I$8a||Dq2=3TDN&nr&_bwS^0~$Ygr`A{Tx$(Wfb$M zO#vP=%{hv9$9p9V!>nybY>YeNF;DE3%SmOg+@lRr*{{8}nC^E*5*a%Z-KyCLr z+cA1ONGBMO@IoN13_v=8mTzYXXpkfuSIe0;Spio$AJh-)VSkT+!P7iQO1(J7Ho5M2 z3A#k{$)1eE&Y(LU&eyP7la5w85>w5N^9LlB{TLgCeG?vvND7baLtz!-I*C?u>qaT+=5YTnk0Xco67r;lZ4mRDcvr{z+7%DW4 z`2jKILNLg$*A9FA_fDxW(FuLCbz{Hd_%_dj?IkxLwGcV&fAC=P zyOZTxnaw6agU(gGk+m491MUhCqIYF4(MPRUKJBzAL>`mZD&wPAYB_UHopS3CdvxiX zhi56Zpy&i|pd-H`1DRPpBmD`OPhZ6cDuu7lf3t)%`E+rtM%wKvXSL^1Hvy$vd_!A4 zYlMO3asN@wqbiw&$#P$ms`4lSLgX(j9yNKS<1_QL|0vyVx$^U@|0LV4Tx5b$VnodL zOm>=1h4dT1nIw;8%NaL-#R)?j)Qeq!xjGA!6`OXdDh3y5%-}Lcd>cV)Ep6Q_cCRKy z&crOvl`7P?f^OE7S1bqL;Uaaok$$xIfGHc9wm zJY%JM39q#QRlALt>sC5X`2D7++Dmjk*v3j7A&!kSET{avdu!{4Uc&-enaazR&>&%z zX>bG{DB0+tz7zn#muS+`Tn>lOnn9_LQ?&S}o&j z*!%lRO;{o)yzl$f*@JRf+P*2?Sic#m?>$s8(WgNcu#U5DR$tX<6|(4Uv^VzW6A02C z%QUdcnT_=4nUh7y_TL=$&%CfyyHp5AFtg?L_RsC=Z+Xpc<3<~kEWInxRm$7wS~?H@ z#ezMjDFQ3Y&r9Et(KI)y;7ww|Oyw`Rz}>tJG(1`hBKFbCZHQK+mz1M8ILY<8`*P znt4RzVnZxPYK(B?2KS$+%-QW4S?;6!;o?@F&?x^xMSGQn;Q59NBPDN*@+~}4o>w`a zl|jpbMf&fV2vW5A=PT{A_^f95W#RBfMFl)p*w&tJ#v?8ShLkK!4FTPpA6KLxo@5Se zV6p+Rdvc~O)x!?UB{)#g@aN-(Apq zad&Ndvhvw7g{yO-d&HfJ0bIm$#hvQ(=KPK~lkh;9T8id;{_h)}u1lQ_P>8%F7w|*7 z91$%7+rSU2(=;LhX^UZIi0i^LZ)e)FgD`v&b&I1Dg{~mjdVwa-)jDQyS*;^2c~9;O z&%vNq0+IY?R5Ces#M;K*Y!R!3CkEt}dnnqL)w?PUZ?vARR`i)RTvjf(L&h}19%&j$ zMRbnR*W$+(fi#7rp4&(><%Ps>(jN$==*6N!bjP#&khXtTFoDr(O;-DLgqYPj4G`vt zr%m6}e2Hl&7h4FUMeaZnH3Y2Ydo2yc{SnF86?|hE*(C~S^LXv5wc@v;ckx`U5f-FV z)!4P>k(m+zb!nat>bk}Smz<%De-5I99U#ycqh|H&>WBsl1K~&PxBYujKfiO4XDUVm1s9M6 zJ3XB$lhBtTuPDjPX_nC;a=bz8nW>_iwx-8y;SCGAp4f7yb)6l&Jf=Yv1cva1!3VT~ zR}IGL&5?e5KfaxZ9ljzK?#pzRwBJ{Y+S>O{t|RTcwjI8mg5wT$`kpkisBLC6gLo%w z-oh=iu2%^WxT{&6xdxe;i4hNA)I`}LBEwIPF0&Q9ba^ro+P*8HVCb z7>43ahUP=zi#8u2d~qQ%Fd?lL5jg`lM&USH-P?V=^ zk9|0tQ~zdTaqaGUYyV+ypL+RGyIij7mqOL|hN``SXf>)GM8IMc={57`&E@gS=u>?z zkFG+G@=_Y4)$>BDIaw!;EA)=KN2@%{S1M@^jJBDR(^=ilzcT0*01_W#1w2RWb1-_1 z=}5v;vFK1ki&`w0QbpIU`|PzmGZgD`NYQ0kj}?{H3h2lA+pT-OiS*4;ccVHjAkLF; z_p)r`*7_?jsql#2K3vtA&>Z4cjVhJMbli31x6(J&mHzh@itmm2d>qkybk{AJka1C?HfHGyqLDntmJe0J%=TngAB^CVJlNLNm!RK0E zzTkaPw?4ldGlE$;GkPARcFwi=IsYA-S$B=q{rX@WUn`G;Gt-QrA!5=m)T#&hoM!&3 zR?5UzBMy#_D@da^DQa1BxiFJ?rJB#=qgG`=_`3`E*9?CCT7cIfQKj$dvkxTQHB5CN z7?~95!#WMlBo+6uKh){=P&?nvN(A z{K&e6zBTv&Efy{P$d%Av>2r2hG^dVubIvj`H7@$#poBza|KNW6ptxt~mkrL_>7Hu{ z>T|$zM5=oef7s1q2}XJK_1x3La__6@n>5ZDgp4H(8@4yn%ot714MK+$kHL@-|&ekjwp4bOG zq^X!HBa@{*@c2m2K6Gq^s##PP|1uaO*+1tq{U<|76{N!h!?gTfihlq zX7ddyhuWZ$$n(eS2r5L;{MPrH;`lZecPj{^H90iN)wO#1AN>1t9JZq5tbjmH2n_fp zTNcrn^Bnh+@5Et|ozb zdyZXxS(iP#{E{xuvdb^(@*KN7qRT0}{DLmm+2!YTDW2D3kJ0_ypwt42TGqJD%7vb|zdn!}5i%n*r7x#$LLsxO}%? zp3UVi`K83qU+_zbpS~zc0o_=+sfv4#d4zwBGY5C!E}Y;OVJ@FCgGQ478R(bfH*@OH z7yMMSB>%vu9zA4DIw~fcDqpF-pz>Q}K9{7!#)~BXAPy1k^GB%qMmLWrIh!Eyo|d6+ z!sQ-aG6nC}FR;5szbmZNZY~9uJ&;R5*1MHV-wkfJ>2mDi%H2x0zVhPAUHY9w#LzFP zB@XL7UL!<~MERZC>D!I;FJb5m`644(j_cW&z9Y&{%-@`{^^)fobkESw=I-hMdq%Ve zXN@^15IvCszVJ71qA(2$e_lyhUd-S{d!g#-a$Bl9y)g}EWA@KcN=4j?S zQa#E~jAKWc_v_squ1|!b^J#CxmgKl_=lBHYQ~lFh)BHf3Zt9;~m>+oL&1rsCe%lA% zOZ9x7ZB?~&bN@B1Q-6MiUagqTKRW-mG~ZIH&h!0(Q~FEMqB=WOXD4<3UUhz8xXyv) z>+DvY-PHNOFHmRWaGiza>+DgTJ=A$Xb_7Wl8<=1zm5Y_%rWWNzrAGlwi43<9-ff!U+}P?Q;-9$x4_-+{i(lU&IPM8>++%R;kMsUd?;1h`Nr$^^oO_1GDFL#KAP=kFfTzhVJ%U@` zmXUq$y(QhK?9~U9`KiVK$}Nh)1=+G<0W;VavGlnKSH&cO(R*F$`=b)_wesY<{c<&z zxA>*IF>_p22RJ$amP)w6N+;Ot)O#`Cck2C$0S<)seMb8mdS7Ci z4X6Y*qs_JHFZWqzsMTk+A62b0%dIMQV@dhF>V;VCUo0ulzR0EJ_o=*C?c*xnF1K%^ z>a*43dcShX`#xLU^~uGf>MZxY1$|E}^>&pX8V||Pm-equ@qe(SJi9rUmhW?b&#U~> z@$@;sBYNLhGM+wVd`<65EHKPRpVA-K`!WT@&fuWI`yGew6Fwa-;nRHqpAI`O)2tst zK=?bg)EV>y;x}HV_lJ3ZLhmP+yg#D%M|l5>_wv4kPoe%%y+6wPeR{v9T;Ey--`XYk zg<6m4-DA|cSG7vZ3aQhP-3Is;${$ntW0Ze*N%_`t<@;jMcT|47+$$S(pUU3xKY3rm zkMO-dmEEEDL!%-HXleiYRQHD}KQx}&a^?H<_Rx~@Y!hADexKe>sQl9L^yzEYr+8n& z$1t8geeKcvRZHN}r>_s{{m?9*v0Q)p^z|u~AA;ZN<;wTz>!Bs(NpH1uynXsQuJTLA z)2FW;;}P`UST5TzcYUQsMFK~qOYSR`g$PH*HNRdJ|uoo zEtLd5rFVy4_eZ{0@6!R~YsQa{8b5w_pC5->{qg_4YE6`Di61X1--pMiRlWp|&|V)N zf2#K-c!c+Tc>I~(5BQC8@Yuq}$vfZCtG})E*ET!Lbrd@#i{ahly?Qv8SmMDG4z9-q zx?{Q~2Exiv?dJe0QObCg`KY6%d}7fADc>u!t3Q zcgKRqFcCzECc;UB+^r9Pi*og%ZT+hKTUtdxp4zWks9iNbc&ExL z%C1JuThwM9i6*ae_sx&k9bebo{yXdr1z9EE^H=oD5P8o81~OM`cW~LVOR2NBz*41^ z&g+-sAWmq0(3Ow#t2w+&&)wd0n=X59AkbxIm+#c2n||J*OE=rRU6*dQ*`-T22)#|0 zZV-B_E>~F#Z_(wMcDYlRj%oDacE7uQ0{q!$*Xxsg6Vi9*wXjP(dcS73q`qtsblu5%{k2O4Pj#vb)f8teO2zO-yq zvsKHL->dQxqduYX)8+Q3mMgzc z2r+k}tSSx|dLzG8Y_8Nh*qn@1);B3DPu{`%!zGSB;Pzrzww#JdChvU;Yb1gW)Ajxiwo|H1$9E~F+YNpDI^Dj@rq2L%vWxzok%Q+%(o5+d zv5ZY>E2I$%IF54M0Y4O1wjQqB(A$u0SZ2?MdS^z;L}s;IJuw4T*!EHNZ67sSYQB_Q zfrO}YSlg(D6?3YQ0!`Qi;u$39`U?D>bbVh3A1vzv?2#~dh0?BMDz4wo=(*?LTDU>e zTulLNxvN+ZN4521w6yi(OdrV^`e92!vPPO|b_N4I!~GnJ7dk`X;COQ?%Av!zbhUy; z+5Rz3`N@Q&RoNtAGK7<#G1&ew2FisnnTc!#H;N~rnQ9j!ncB#J>}s4gS(cqm6#fR^ z0|55TVJqIi4v;DU{o$Q=TJ~fZm2Fq3U6!@N4iF`Sv4DDXYum0sI|d9I$DT)ERZ9?pO!x!ffxec0%8^Gs;H2*ns>Gc zqt~RA`jMTjAi!Fd*#!9ufa6<2SZU_4y%PvH+goi@!FlxZ4C0NM9QMZSmzCszd1LnX zy)g%eyfG`0jHcrQE&2u72veCR+dG02KTI_HEpqmbuq7euuyR~d-vNSvy+MzH}f`BfU&V()Qhyk{^vrKl!G&A~WmpE|E zj6Ur!Agf{3SOhbO?SEWoX$kU_#1PsCo>XE_h5?@FOzxRuIp){~LGB#vk-`YH8Nydbn;31nxK$f8Q&!K5GHK&4jBGydT6hb+HgdS!u%o?wOe19iN zO-^*Sxu_y)HB$w>i4z9Sx>r=O&!}Q=pDONhs$k=pO%4#JQ*(K8UAiX%Avi3v+8L7< zRs6K2*hv}7LW>J(@sPE+K#T9?TbrW$yhUFPqRU1(JHasESKPQe9Up1|AgmTG1igIU zgYQ9>H_sWWBy@Hw`DJ*!t1U6`Hq8$E!WQ&R*5iZHXYOrlak$O?9&FF1zp6sO>Dzkw z$b&>de{s;o!@5k?S8i)>seCzH-_G@4hU>ey{(88+hwE=~ou9C_58K}_lEAA+WHBS# z(J_B+7#TZb5YY)Z2+mvLK}Ayd%9ywH@rm}9xC3#rbD}y*p+1#glaxzxXy2kv~tcy}OT&YnZ`>u}L((WbWh$wqL`P zE}Q#$D)csN)iGJ)SBoWtS(8W5oI@tCLu^ESGr9xgY?Rcdy8g^jp_n-yTaw1=XcHUm z&~wJRf7~2*Acr`WJdpeO24cIp5{oW}?$goGWf%Cp%fheyXkNZ<+x7_1kAzuOSzv*( zZ6c6wuq7T;UE%_-=F7XliMkxPz^g$e!UbL(xWKCe7dVo*DKxmit3l(z1zsJvz^mp0 zHwQTST@oDN)vWITN7l9$j;fk#QaHdZuG$^o)$BYgfCIdm$pLQe?`m*=R|lT%{+5YD zlr*n+Y!w;3`@Aha92a=C@O{IQHn5dY0KyKZaP~Uq5;^|W1O8<^6H{S-iZo07tmx|y zp?uI6u|w!9>Ca^A|puSsHL z!K-vfq~yDVje?C8ZG)(~eP6g6lTK=fj32)aId81o(-AjwAuf_APTaqha0Vo~)D&a& zg2P5DS;h8mX;B6~v@AM4wnAsVzB!7b8@9$ebC5PH-mbE-a&tsY`VjJLPe^>XpQ zI#+EP?@F(dpF1};8aJCsvsrC6Yqe;k!e_`PJyx&rPrcq~Z*9`d2>-YEzs>(u{;%#o(#$JO?S?GESB z1$y@fcGqm@e_qk;7wopt&JS8U^S`K??0?*(mk-(<_L$q)J&^DH2fM`}^HJ-^Fb(?N z9sZ7K(D%Oh*KbxyGmFLfXMW#GG6`z)3o?XhZ<^J6;(yvZh}!o^C@h3{u?|uGwmX7l z$!0>}WQEYlK6VV?Bf#TEYvCR5xrz3c!p@&$5yUq))^?*^z0n+LF>b;CZyz%#Fzz;Y zzx@7U-0aB6?|IZl%DDCJ)81po-Qw=E4ok*8%H5OqSSyTsjJrQ^$TDt~{Bwr_DbDeaL33KDl`NA0Fj4#mlxbqy}X#AUkK-`%U z#vlRS2UO$Ed0z7E|7dqT!9U60cf?9|g$D#v2@4UY{M|o%&fcBtZQki$ILF_;`xv+t zcg}9-4|wbA{Q0fk`dV-Ax4q69@5iloFrRVftag6Ke*4a9Z~2I~aHi%f$=~BGoZ)qD z`n>f4uKD#ZT5UL{#r* z)W7=+m0!#u-&z-z>O1ehT_sNs_nuj)KE|?RNJe<4=Tl+q^!n$0--eBjxBJAS;?4%Y zGoBcCzRzdp8;W0xJJ0jGzx->vTknwm-~Mje?~HrK9cV$&eC#_0HE2PL+TllR{-FiU z`0pJw$UzIb`v6X}t z+T8t;(-5?vyFYZ=gC5l86Q8k?(1Y%blgAzC;h^Iov~b|}48sF0eCQ9XJhX84x9vG} zpf4KtHt9jVc|@Elmh_-ITM>*Fg#9}nFjxdyxJMk$XhD7Z&R^O)XhHS9?6d_f=x#xr zF-8fs@7c-~Yvt0{ZC#0dwe$)F2J*ed8{>;7rJxE6Q ziyhV%=;4o?OF#?na@au&nnzo|Ol)}5hpZ;FpmyH%TUHZVxak|3mBqBMX_-y#k{Kiy z+!3*aE!yV(Zm`%lQzIC}Q!H|n?jbs2z35fbx3b5|R5}aVN5Ga?&CbUyiQ@UuG0_at zP2GxQVtdS&^7lPLprMS)eoB#9auBW4x3vdPe0Oxw4BGEI5^b-GZEeje?Ws!sWq_2w z`IehGu8Z%oCor$(oB_JlU4K!!x&4SOPps7WN4^2-oOB6(W~A=5`UJoJ^a);573VG% zbc|>bWIn4NifFaG-}M=YFVO-fvF3@tV=5CZU=jI;7)rkZphT8UqO z`aydK(W+;k^)EoQ>e)fRgK+iklYR&Bs@_dv>PZLURhwV;HjRMe{A2gq7g)&D>p$^# z5U~34CBI`aGf4Oy1gyKi^g9+a-F^Cijll?*cehxZ5U|?3T?<_z0@l}$e-x7Gi=LnT zfZeg6>D}kPWpIIjB_O@!H?1BEnp!!kz~MvzWE$-+iNH+s)ZM2S>>UKGl3zMz?;v2k zyI;$5f?37heL`%^L{Gi@l;5$a>F#&L`b_kcSa(3|Svx80-NRbEmWW{Qmzas%uIvj_ zm1G8Dc(kI73sCfI{`^@;T2Fok(t5OV4cqjt&ENiKZ%$`DzcvIFlZAs%Xmdw})D1<= zP1MXcdwvy69!|ia#ifm$I}7ci?T*jR=H09^*QY_ADPKzaKom#LN7xE1J`!_==q{`{ z==>r~S$rzC<)4cxOa%s@-QSF2OOmRzj4vX8{;Zx&-`=*QA^zS@b+KZ+&%g1j?59wG zi|hHY7qpr@%I{s+Lqjm7bfGbLXCL4@GLqf&KHG`0sVi>9py_7R{(F<-Bc|j2uWA}k z&MaUBI>B^qtTV(4eAYiK(alsE&Ft-Ol}aE%NjP`!KiE6)O1QJ%OR_NN-Te~7`YO%; z`jFKFuSCN^Z2_-J6m1JcoPWsG7Vt{%f)c|bp)Ujl2fWg|583w?i}vfC)n^!DFimq~ zET7Ry#jzVq%UIEx?Vv~^XGc;bzZwbP$};`-WVFUK-|n@xcZo1C*HRnQ#{OD(Gn&Dhp)@2zt|c1O>9|71<^4+7wJ)w< zxRLhEL5bi4<|5WfeUx2j38P|nI3TaIS#UVvMy?EuS+t2gT3Vo~ceh69%NojbFS$%; z$S#1F+6KW22CyzZ2sCaS2xR}Wwp+degWWWHiL5PF*P=P~gHMq%g(;bC-&>8FHx%l8ORKi`ln-sJWE|h9 zv`{VCbDLS4-f083RaxCz; z!ns&mv$tH&E~w;Y+4Z)Lz9nni+)GqE**ZaSE`bNnx#R)Ei?^Y+vsLW zU*Ah^rY%*s?__lqs9NK^tm-AT^^)55GO?HX)NoB20*3Zdr)u|WCaM|hrM`Q8kC#^S zE78rpn!d~+7@-#_*RSczI8mr&!&>zg+MppCXfpJhdn2mNHYwF^4*Ff?yB~Tgq7m$r5jA;GHx(lJX6!Hdob<0E2s&o(xcS zM0~UVnBxnZFi9M5BzR;JFu$8Quu&XMWJem32})1(Zrs&OTg$}bvd=^G2k0DMpxe2@d!^g##(vZpZ66QS^EnDF?WOS7T1X!+s+p3eKRLcpqkUnp|2gPAk=&Dc(cjVDBC%tgnP&vkTG3Y40;j zVM4ft=l?5)#_ajQ%rPqylyY4!*4E=t7gnjnClSjs@3sDsc1Io1WSdtdVo7BA3LwUj z0WCl)`izjq7z3{XZZ9T|$9y7XR1)n-dfZHYjpMem8? z`VEmHkD$Gmnp_1l9+_RwVnD}DNH<~Kgmmr^&Lxo2q)F%qkqsnKL_i6E;%XA5ks1X; ziRqH+O*k&Z<|K%e=r3GBrds^P;{q0*$*{NyK-nk_IZM zRRFML+*2Fa6QwvmjvlI{Fc()~7>u}jUFnet8q%kbP-eRTQyNR0*JsU;=!wmgrK4{m zOxi!tH+8zbAgpYFq?mLLZTs2-r3$%B#z0pO!qYFrh zNX9r9^w}rRg#nV=gc(ord7?yU%C#xG?QvU#xk4E8ncf8m<@MAoSM61~q4EmUUHrZF^o zm{+zE4lo~WWypjR&*aHAGFYw6YCELGtxt~3+?3T`v6fHLfE=46+bn|>1D8$bY%EWp zB5+}>nwE%8YYpILu6FRuRgTE>#Xk#K?-nCq462AB>LQrKZB&YYINC)1fSl~$^j_UsCxaPGKwrJ*)?3o_ z0~Fd6y`-mXLF#vUR|H5|;ie?$*5F9$4`WhWS16~+2A=YWt@2vcSL>_N44J75W7|7h zLn~z3O2<<>QKmi&RK_q$n$RfOIqRYkZ9s%m{J;-0fteYeqB{mq%X*=rhYc_POJQon z0Oddr@Ns0@SRuKd5^U!2)}1Gw{E@9Y=jZ7uxVsj%Ur%45VRwKMYr;Ro32a~83G9QlMtnlEChE26}QRs$*nMyJlyF%8h>w98r zOqWaD07ZO-oHzR61%?A!N1oFAp3bi)Pp5HL+PaGB%Il~ZwxitH=EV%n(M7V>5~yuS zZY;Mjw{>zC)yQtI86Vv~*X0`d3}ix3Lps!oFp^J_b5pogV{seV6s;p3A0a^7=~;Bp zO(og!ML#=PLH6A9`K)HFO%^lcgLctv)N0Eti$xDBv!v5bt!4_Ip%_S7>TXM_pcw;6 zc%qo$P1`*6s0QU}Ld9_)QgS&mDVw&Dd}FTH5)dfDOk;0rIr~Lm7wmu);;GI2R2)jg=XSD`_h==gB3^ou1}{r#m7L&!27$PgN?Pl-7hpp`z$QnN36VFmg?L51K7b zfXnT}IqN#>E}6@kG9myes`tnVK;~FTQ*AX78fa^et)UlLq9w$vDBo=lHV+;sMREjq zjXKp9h?cRPjf;(y+u-S{b$~(@<)RKik1)#X$rF`+y?mcN;c(`DOL7hQ-CkPDNutZ& zd4eSegV7Sapyaw)Osa06$!P+yk;*p`lI=1XM`bc&GNNu3=!*?v8nsoahK+$oEJhkD zZ^$8U_}Jr(43EWgS(>TQ9D2?J0!F+^4}OM!VwuaIkIJ0IX44}tM<$FkkC9$nGI(); zFkWkurYd#7;7MXU;bt?uSn?l#Sb#1Q3!8)=@}nhKjF&$f!kZ2zFB4nT&FWeuTw}o$ zYr=6a+flBmsjjsiB9<}b>Gi&;qvAhweN%L)GkX@xm(UeHHFIHn1)pIFmWwaeN(LR( z)9Y3GmHpDM^3pHAFus~X9Iz~>IQUzI6(hpQRl)J7NP+@?+ z*z-l5ou;#w&{+u7`x{K3>eWknk!-kJT@nVtJurn${$#g-O0F)Nk#huSGi&h;@eD&? zdDl`l0y{w?H$@3&S0tf?{+%c!QIx1Oxv=^{ht{u=UTXyG zGE@X9NEJ&Q1LYdBEXF81>f$LiyXhhe;$TW`E~BM$$(*C=*@9|k3n0IIJa&b>kQCw> z`&VZw0BiaIawu3Hy%w;G0ZHQLt<<$&*Ge{qV3@y1B5=?GHdTJ)1(7)=E@IgOsV{&l z@v|A82_v|K-cbWa>y3PfMmAO^gEB&_5-+75$p0aNgku8$HIN0e;c}WR0cKGIc6zlg zywX$9x(y_C_>3hNX03WbWO|2fGXSy!QOb=(C=;@LSZKdfKWhY7!nk?H{v}jFOyBrbzXRWm))6#2#71K z`qFCqRgFcxRPErBY7&G5w`D(kSS=j!7W|;{{DCDcEdB6NReQ{Nd&^{UH zF;zTnMbn=xjmTb}(90dMhLfJn<*v|V-n`paCZen*sVU$2zrp%bm zm|^XmFgJScr)ywcrtmyFj*=N+F?z^|obIxQE=1ShnbR>7qO)eejOo}=o0rRecmCAh zO-u$y4ytL&Vdc`?yN3Bw_#N1M2luuGD8dn7z5-3)yiB5`_*8UGn+GhcTIahlr8V$4 zp9#~5oQvq7361kiS0B;UWVl*1TXAk1=1lbhs)UTFP2feJw1OA>-UAwhy~-Uy-;bM^ z*o!4PLQ1QpX;$eA4{UXX1Jf&(LE)yt9;C^sRTuo2+`6rT+7jmYjy+LpiZ;&?X0yS* z*oHD--bwnu*0l%Gho>^h*&r;);*X=K5tL|CSy`N%A)643U{&*NCn_MUE;ku5ivolwX#)k6_9O-=F}b0)aBeh7B(&b4W@4OteMQVC7afFCvx$q8uc zcx-v{kW0a0ml;3Sibs9iL25+!j0Xag&16$EiTouMx@j|sG>_)Yjm@Mm6aa|4nN*oj zd^aG*kYF|t78@n@w6Rfh%fxHe&3EHVnA!k2^kWIy;Qav)Oy#CAqIP=h`y8RIHfJg3 zl~mya9KM!*wZ=AVfU+~{wX%sj0f4hFC}MkW3>JphM393Vq~e5NCeh1iHep9&HH-X% zU>zv~uvnM`V7nR<`QmOg6~Qo8ngNQm)Pkzi0`u8Tn3Q(x+?!sm8K55EL2i)h&WV-k zg48z5dGC&WpEJl}(MOJ7-WQG*$>>R2yt3$s`S*Huv>$<2%X9>gp1dQ}qa#wKC+@^@ z46xuCf5jrMl~{5wj(s5()-@9>(As5CF$)_^U8jZNe5Hg;E}A@+23Fy!MDb!=z}m&0 zB1nw0Hm}KRKO<&Z4am=VJOQsVuAD=Ru8(K7V409{C4&ld=8g&K7$7(z^IGPh`Z`E~ zqbDx4_(Ke3WP#lRYzEym2M~u7f;aFIbx_s?I=!gxww+K;c0_&DFRV=0s<8BpUaXU2 z?7W7=_rt;&;21=~VcV3VA=gOTs7~{gu#7}hpcM)9hC1-s+IC4e#Ft4*GwcDs)vX&b zFxZDVxP^PBflg@uWZ&n&d|1~bg?eBl^En|TQe9kb)HX>&qh5Z-Y;G$)V_4tWY;7HD zSvQ_OHci}#XP?T|-gvMVGU!$@Xp$&FXlgqQ83fS)T8id?{{*F`f)UR!&sZsDnqg>Y zIdy1gt)fMT$@sE(`ZPg+l@Z>oL$RzAkOW54GD)D=_DRCQ2HK1Bx}$qgwUE^xs}&)Y zyW?PO4A{+Tw>ltwj|0S5wsek|u)iUuVOzbqoWT(8tQG1pHmftI8j?P=U~b@s_tUl% z%F7S%EudejIXv`ZQO~_bAR-EOpJU?Vf`eVdVAGE;Tf~-Dxr^bL;&PPNT)}^+V{Do) zCt3+&ou!qL>5rd_h|q;?=mmxyDYx>b*fZIt2vkm3EQDPh5x+JYU#sFQlo!UYwc^Kc zbg2!oX!|lIU{#QU&r`%k)Agyvlk54RUvJQlMLX@(*U=`>FaY7a+Cf(G6$QK5(gvkl zz1kb&OeHmTb-%IKP#Z%{8%C4%O&mGFI``=-HCqO?rBJ-U-|x`}5z?W3iE)US=_h!! zDSA9I4h2JD3V@E@BgF{{uQ}?ty4a0@L1DG5e7h7wTQn)dxV2^baHE6Eg2J zTw65fiQUn12*4a%i1R$4_X&;x;OCn}5=4F2{)O>Q^>Q+|oaCLg7gu)a{+QL-NlN`s zhOhPF^xlY1$4fq|3Emycs{SkSauSkIXL$+9!emTBYLq3U>QhKa4THME%1$w^q^Mf^#P^W& z7;{8KA2ucqo(wi+0rI4yKMfi3g*Y}`Hg@v_Fy@mL;>-j$`--o3aCXguq*t2~gLnH! z-5K3?*5Hp3b+O8(f6dorm8c>ZQ^~0Tqpz%w#+slGk&_*lASMN#PVb7`$C4WPl0fK?! zKvW-Jvn7GEZLIv1#2*HIbyvgGLPpk}e5>3qz%Bd| zHV88#fu9=2@_{x}a2?pHF-oZpy>WAW5TCat5x>;$e*M;b-P@&WJ|W@Jl17fX%vrFzu*2zlX`99`@f&9(9P~h ztFhwj_Jvy{11TB=q&_4-fJ&;(&vy8<79hcfzlT_3J{fghDo)ISPmjW?w4PG$sJNCF z;(!SidWj0Lemy=Y^w^-#V_xVg6&k&`@&srTS~?~X+*>*Z^Pr^{tB@vV?|0sn>cN%8 zgMHz_6?(Ad;>w9ZGsg$b9QS4}N8m!!)?pY#JO%wrMhFHV;d)~wl|r8{aMBv%$DW9W z^eGK~H|BLae6+x^e2+b0$+f|Qnp*4TSx9$EXc^z$8xiE2qiyz|ubiCZ6ba-N`X}$L z1ME1LVVK7`x0O*{rb3@aRk-bpeyKfNhEjXZuu_*wY(xS`DjI=)7>{h9YuQkA=4!UN zpKt&4&Z0TJA-4R&co(Oo>ZaKv-g0NSsY=e-5pFPgA;WN!?_Q(vH#Kzqd^%EuHD)X5 z`Mx#MMd!vE=RJpirlyd?^aTeZx`*!2zV$=nGFH#fMm;?gt?A5ISu13{c1p4OJt8g4 z9f7Q#jl0_O?>Q}u&Kxr#qXg*uF?+HT8Kxlk0Q#fI6OyS;=y&{u@o~u)^NFTzk74g| zq>RtdDCvVfN8X-`Y1oiu{N@**o0V zJ)p_qjFN}5*}N)|*nJkJ1TX5)|D%4u^LL4eV5IZN|2yql4h5qkiZG0m%T1lFv_eiS zTkE*E9WOTX#AmgL04c2~QizPnE>qCMn8z*tKxO3^I(1p5;i(FmC(5--^wpw8Wx~Hp zgJpHvr-AM^kneML0SPfDFE7{u|SOIn(aaV*Q7`P8=27f}6~ zpiewNpJhg1;PTOO23Pt=kuAd<#B&=15`_2goA0U4b}ljElrIe>EUd)PFlJ3ra*}=) zULf&-JBS_9;1!gDYf`Jjrs(#nhp-ON7oSSC18{haTZ7lwNnn~u`o|*c{SYLAuqlLn zYio%b&FpHRJ+K&%`F?w{3=!_rs4!F9t6wH-LB9p@FR%#w5ko$tN9b82mKo~^hLl79x@d!%h*o=?qyo^ zc_~(l($t*1td^is!F=7tmD^!6n!~-uZW^7{IbmVA%&)eo$c1 z^4~y@J(4A-$ z8XfKv(C8Ej9gOR{fxc>X83g*CMBj5b>JIiH^xZ9uVs~@4^Zoxnaqj~k*LBtVoZlc;roH!(D>th-_Aq@?X@WQ>lkMJI!M?Z2tpVx%E()&K)MkP+- zg4+t}7L}SJz|;a<5a70IF-3q&Rcg2b+@ewg0$ecE1@Uk}fJ@%@xAs0~&PXHINy^3f zM03tQ`_I~IueJ8tYp=c6|62HM0bO9J4ygM&@ac7^627Bj>gX69-T5Cg{w=b-)Y}G9$@g|by`7-9uae?dW+8>*(w&y~_1(aS zN#c}BSFttqg1x}flieh6mlYYfnU@S8rPHbcW<(PVL;RjJk{!5AKF`g_MorXN_Qv!6R7epH>k~{X5#BkW{Y25S*&2Y zX<|~r%qoCYF8O_euxicPCx4S!B4+YM(5iPV9!Z?Q?WtW48@H6A9)^x{svj|dQZ%Js zzm6T_Ii0rLPIsT`&sHj@`#Y{wZdONLflh0+pwh6_HKST)h%_5=JrHXkbVxH4=*nkQ z(Oy(h>rO&<{s52(8G@tPy?xs#Hopw`xn;n6U}x3Li&28EymGmsv@|91YLpeP)#SRN z1o8y|d`r5^cX%HLu8T^VwH{so)wU6F_N zNw^TlS?C9fSw5{B+Gh@t$AgbG^?a69)nkss0Oii@{df*f_Ze}zZ;iQK{^^r{-QEf<^md)O3Cq@n7C&pCG1&0VK0N2~p zg-NTpU=mCxETscwjH`y*>B1+HOV$VW#7s=RCYn}54)W91=Jxn@b#JPbeoCch3f-8> zyK$y-Hw?w<##yTp(+J&|RW;*Q&8+Lj%hZhvmQw7-yrp#L#+(7ddUDmTCtq>>VAIZb zWKm7Ij$F1@x5xLWBMYtcODaEdO=~m+|#NB zaHjxwTGfnNHPa5>I|-- z?Kc4TvT6a`1;AZYH8WPtq61eG;$Sqk*-lRC@d%GK4zTceRGMbAf1uGV^}WvJbsSsa zICB^Vy0!w98v8;zYgl66yDF|>(3wL+h~LuIT6|LbEZX*! zZ>h}A?wn4$dDymUHZAL@uu6XI5t`41yiPirhb^PK?FA^c?#l{N@nMq8R)IiuTk zG}Iwhml8udxrAT}n*|x6ObB6TBwmFIA1mi5JDJeL%mWOb<}{-jo!leQr5p(^3@G{?Ga$A3l-&gbDYcLZF;3Upnq z$nznWVQA@rLvSuE#ODk{qYDm1a&XdttMn>64}ek8d@fByYzyH=Q*`%SX4u5T*2(FB5ya_sOjtH3yYRoio#kt!@61 z$F`d7TjS&0gR<@VfD-hoU_dt-fq*#go^R{7^P27OC|kQwDQv|UuU*YT%2)_WrGM>~ zrfExQsWAmJaK;%^!t!T8?^zxsTV|CoWd&!qUKIs$>BcI}*!w^9WFukZakSSNg5Zf*lawxnUj7a98)Vl*E?75A z*D^uwbOBfKwW>H5tm^dCb&dmjlRq}@J9S-0ZuBb7qU3axq?xuuZ@2%jNznu_`so{sO!PIEmOJHU-wWMDK#?N?l zUO@j2y}0aqv8aT3E4b);p=EX$&rL(8l;BP&^N7T2qDHbqYq9Wg9r zyx@odC$)J}U0LMzEgKypcDH@#oef_2?sxhP?DWYd1Z|DV9<+#A5Mk`>L0p#Z0?wMJ zsFOY~{4bJH9(U}dG#YUUPUaRgt}Nh(ORmaheblHgNX=NaJG!Kpu6|L~RYnarnjloI zeLcCV^|iZ;Q#h`SzN(JG*BbGK+}ATYYsc3aPETev_toUmsP>4xZC{D2oqt``XB5QO zGydyL@oQq!tPO)9Hfa4AL*LrX>CAHUB?KiR?<=tr8e$0f%o(ov z7GmZdV(LEE#X_zNty~veuIrTRkooa1uFBnaQ!f2cdP}~fy)o(LFPN zZsnSGxwa_RywyJIFRFpY1>Q`uJDyYi3-)!6{C_I&Ze&};szeh2yoFYk3$9A;?z3D3 z$n+KAOWVq|=yI7{r>=wIaBB{$D$l+X#JN{jwR}#;i5?CLR_EAO{zI$r$XEUe`#MH` zVQ)uxVs#&<4AUX7XjOy7)nKtz^`xs>3szOrOuWNzA1=x=aS;jKiWBENgJ#MV5gWD_ zAwfk5gKk9#o}Lv^uyZ=V0#KdHTX_h>OtLrlHZ(meIl6`t+$3~Fzi#|S*I3E*luYNX zbF=MTpP}n>YUzS~om(yE9oPAbtDQ&KB44^}ymo|_SNF+5joZu)(ukOO5rmAi8XR#A zu3G}?xA$pRp57huFdl9dIpm70C4RDP>%H|s*?p9GOqpowXe;xW%WPc8dE286a%#bH zNALk2)1wV-GTR;%UzKEET}J=aj*5V$OLSX3xIhmUTF72-$mXhqa1f>3+*INBbK+#F zZQ|l-6L52miaH6s@W%F5V9XWZ$gE9M<8({Y*0D8iCT>kyQD8uR zbd8Ah=hw6(B(ulQuDLV#u$8pQWm1pyVtkD}=ug-O=AgTRlRTzVR>KL9=En5Y8nngG z{M&9$Z^~ze2e>jF7l}V>xqpl7+t-ttM$;LquPB;kEoF&l(nLA!0h?NLTXas(ETrcI z)4Y{%`$uy?=60FSuj#0~TdOg$DqhN^ee6} zEV9;QK|!-yT-rSqQ)d>`JfiXNTGx?7{HCK;-gV^A+N>i-)?TqAM^yvkHM$nkP%T7T znJT1q~ z%re|eTS{T*Gu%w6N8skHei`Ih^=htwo7o&U=Q`s?!;><|=T!qB&w-nH)iPzZ%wHXD z)RA<-N?h=``ST^Xxt!x>f$9-nZMf+X+-)`#acVk-H%2J=O9bcKVW)Y#TjxraC zX`~Jy{LNfslK|!-|9(clN6bUV%ti#Ac4niozGY@3+5)~KeOex;M}3*hY;>ZpBZ;Yu zHya)6Ycm@i<%IRad}LB6nvIML+IwtuOw%6ZU0{qUOX)E9lj`$}St1o2k5>R=MXMBY zPAgZVn2sP+5`telvTN$8@yhMoEze^q*6WaI8c#GJ_{lixZLNWj?EW z8JPdvz1h?QLfI{SlFeM8(LO9utevH>oZ0Ck6Sx&;E;l>9=z%Ta$OP^&pFb10C!>x6 z*J#Mw>7zD*dyH^JS0`{)hZgozni7cna!S8`@s-^~pw6y%6M>6EGE#lqZA{MsdOJza zSV{+yo>o&YhNQ17C^SAD=*pS5Uf7Pp)d-5KmMBX}I<279g96j)g^FoI)SO{mba_Rz zE5mq02kSpE6qh-z9zLn_>ZR zKkZp+<~p6vJJ5e0UJn%u0M5zo)oEJ##@DU-mTyV!UbD8GMY_KCo335I&PTf*y#5t? z_uhHet6%e43nBcf9XoHjdDkts-gd|C+rMEC_QXJX(_M88{R+KdZ&B}eE>Gtkuc-Hb z&v#5@fn;yEsVm($^6Rf_*ByH)QH4}rw+k4--5Ejee_LA4mCEw!E`(l+5OadLzG_d; z^KaGDD*Q$P1D~o=iQV4+s)W^rg#WTq^_cNgsw%x8tG?9x9ZR!!>IMCEy>Rew4^!{M z5*(=8@=L-+!Y%_dD)YNYMs8aiS~#T58X2owjv%{?hP7>=E74Rn;@LW_E1u|?#Ak%~ ztoHtI>L15`m5Jtp-fvd|XHoNnhnDsr!zT=;OjUvVpi21!s^Mm>GRiW{3Sz2BlRlhjy@B6im^1gfgxZ0RAY|`WlF)nn`5Kp>3M=#ah2d=7C z-Jv7;Sh?2TH!Q6Ppx&?1ND8CzdL?k=NcF--hHDMmeM`}hk%-H?lqbGv zX?C#IJA7--+a_JqVP4CUm%jwSxEv+mnZr;6{~j6Yp`cjI0!bmny(8hnABLCkgT%s0OD zx)Fw*>)yM(ZpSW~*D~?O`AaX}Y=#myreZ$q5T5)I=We{pXJIYh*J7?UN zmPcJ{eSb~iJ1n63?H*I=KcV_LZN0QS){s_t4)!no{_^5?P6g-Ref`;YtDobsOF7qz z5u5Ry@S>n0vkwL8tTmH*KPp8a&-+H^$pnFXp|Zc zb&%fZdxU1Z&O#5GXaoYbZRiYGVRg5#!YZ~P)Vy*ufY^|r&p*=(!?wf37JcSSUXX5; zd};654Ty$9NiPs;-#~*LZbpVLFOumc^Z+_U1iw0Tq!(+_@DPgobq+X4ah6!xC{o;x z@=8V1$Vor*;M-ooXqL`jz0gX~h~|2pk-A3exL4!SOBcnikeJA&2r61tbfPB>;>xo%2}eBIo+>5vrx zt3-!9Qz!5$MC!GBALNPtHgo11h!{sIh-gUVr5Ou8s z7&|>#C}(&09gfiYDaEaQlzNv+h3%#6s8fb|TGLAJ45fT)Rt}rPvetCMNDA*E3@{$K z3>DWix(i(+EcA=Axy{p;7SzsCRDw26le8`rY}Vk0ao?baxsc2kZki=&B0gJ>|C zZ(kO0a3)+*)O*P!KU{)=V6eOf_+CgtP4jCDX+|yep_j8QUkeBXKnEbMUs^De0mYf9 zo)d}_2h2n-Us^8f^#+&MRs`Et%CymSEW4~4ifl&>z6qVX(<FYYfK#TirouiqP_pTUadvAe}}v4jJUWU z2V(vaTfvQ%5ELC(3TTZhtrQZi6krQ>IN~PPH1F?|#Grsi2qI=@fjN+C|4OYD16!(O zX%i!KZpqvPrDKY_MOIab1GhvDdbe`uSqNz>%>pG;R}^;%{AkItuTSxz00kADA_Wc* zt`IXKLtRP@u6o)2$L*0@I#Xh@w(5uDotUfm7I245*I5gdl!KvFcV9Y{DN4Axm1P9z zCpxnO;1FPfjay(4e(XZ1VO={e@dq%zD1ZZ%{AL0z_)4}9pZ8Tmw~dh-)GBJ5#_ioU zZZ|S)BO`TJEeOA-fc)1B_dvvC$5M^Wc@v-Hknr-5P=p{XX3+KE(0`Tl;2>B$IBcA& zTy_&=Y612bWgA%M`;HRH^rgpSIaIT)&) z-eNlpsxVms#w zk}S}sE>Er})L7@Uw;*8Tljc*p6@PK3mdb`uubLHsJ-8K$QMoVn#IjQCq2&TE1C}e$ z1dMT?kmoHISzmQ2sZ28I%)ujVi{a_w`&9m--0GkF!i8GXrzQFIft{ADW${hw)MXJ`d17>cx8N%ZX|poKR49UZhl@_OIPu;y_Vj<&tNSjDj4cn zx`m(XYw1>gUS3PDqsT}nSn1T7At(g z*KdU{`1h^w1)shZzA!hh@CA3g6~1W3&2)rzsPUj%;fvPKcd9%v5c0$R~ z%E?Q1Zi1Y?xyZW##I@TIvuVf-jUsvfK!XpBFs5(yNat^)T{c3Y7$A|XO+DRDXtrAI zlT~iu4s`=JVSf}WmfA-YjA+pXU?Pw&Qk!Fa#9%_JcVTZs1vW8z!zyc}_dY|U5BlSx z_Tf%tL4V{9ivKd3SBm|!XLnrZNqbn5^%i?pfq?(}|H0_3K1#F!WhG@i(EN#e_x8BHn>rW^9n@lOswm_rMhex3`hQ38q|#W6KPgv8 zZL76PsqzN2PXHxD?OQp2r6}z99WcX!J*{-?DVvcP+iqclBZ@>uvP0G~h%mWbCQP;z zsyg=@0UC}A$={Sw^etpvpT(MQOS3cRU?NW5C=9R~v=p|K(FuowO7h>7oVa9NW#__7}g0+zOrNA0q4XlWsW*7ITyU1Z4TJ5HT z-8(}fw-%r!AD%G6F82hrqqk7vN+9=JL=TBHh)QBaL6C%X#nGyLr&IH?5C%e*d?KB? z?Iimq<5YroK|unU5ek4p_=@3C z5Of+=FD`L2AQw7S&W?9IruH6&3kN9n`(5X3hl-`39cfo)$Va7dbPQ;d7fSQa8Hiqg_&dz-;l*alK3=QCc~vl07OJ zxmFN~2huV&dY4J(oo{qC4N?UW=4EmmguDm`sp05n5Z!8j6;F-=F(pEJ`nozjr3MuE zEPAY2v58p2vy&?6$S&&INf70%BaDdO{o`bh29#qK8c>E1gG)A`jIsuF&29~-#RPAXOgRD{X76TYPBZ40LewT!YMMFItxhUYUxv7gz zEVZotP1GsJkPIpFtvHdj?vYZnijB{>3zpd>!NhGWh+E7ZPl1b^wcJdG_^N4m(Ugqc zE9shys2U|V+A?37O@(>#w&*S}$lPws6La&D{Q5ApItHK`&?yA+w9R1@IE6g!*&Ez# zn8wp>(B;@ElMN2HuKskIO|$nEn$cFfmlbZ@Q3l%sH%AeohZ8bWfYnn3)s+gu*i~-${O)56Wl_AHZ+}} zHMbNefO|-DJj4cq=PRZW&rPZv6&BtX5`FzhbGgW7qFK`28+MHdG-W~G5>}f)ofnkI zJ@FMRD6;T$g1oFVFkl0w*AVrjx`XAZea&!y3k1;FnO^JRKvPSSM3M|f%L$9EW6Fr4 zws{XHYgpZeqI|`?+~|ViU^3%~7&Vf#3r2WqKU>qP06R@;6H6U^lD10&u4SePwJZ)Z zTNx0caSH}|A(0NXih_Yi}d_`!(lP?8bhtX2$DtZ%yXll*dqr8J!dpXxWH?0e=rhXI_vW%ldn?mV9 zW-g3JD}ZPlFbt3sLkKKi$mUlnw1aIq%~YFruoW6(d7XJq>);AG4HY&$s!mfFwUgtJ zW}|JWP<&ImVyL(e@!8R4W|h0DE08xh)!L1?Vni*VV0(;)k|a>1BSh$_ zRC^U9>4P)F1ZHYP{tO_nUBu>i0AN_6QEDK0bM0@hA_QzV94;QFxp&%fI@T1aFGra?`rCUZ^_(ILHRtNTX6Eu!hTLBHB zeJb-IU|EnW1Z;bOIxs&{t?7u%JGi=}&-N-Bt88mCV!m|VD6 z4@kv=aK5B)v0Q)V!^MIuCawP70Us z%6&8Oe{i_i7Pw*%W>dBA`O(6~E`!~!7h&~+{Ob@dwh}|F8jWS)VzpCiw=WsH(gqQ| zr9x4_hl@4Rffp(r!o^BE)eNllyeul>2tvc~EII34C~|_PMy5Y*4Ot;vY^&F{v(VOS z3m3br*Tyh@#IOTtwwFnuM_{~&;KNJ9#agF(z&iwt6(z6RcljlTi$%YvC0&bohDM@4wiuh3dznVGA?q&N2sW!? zE#qKBrV|WwQfZk*8383UG#7JSBU@(K;cM1PI4@CQd3UVv@xhLu`_AAR@^J~l-|56U zD$e&ZEwMO`aEy-K1om^GQ-FQ=bQ!D3SY^%OA!MrZvv&|FZg<4T7MKyW7+0z@S=LSF z6VC0c5vmWWc|E(dBG{as(WMor#dLzzNF*t9qYBcRH8@Dx;BeXUN#fRIzjR^GYoGG4 zf=k;61d`6M2GJgjiiS(Z$eBZ5dfw&{3W|{xecd+3Pr3^Ghjm#2`|EU(|ipvf5-~W zcNc_FGbpPSlix57&)CInJ*8)y=bKcqO;tz_uF&5mlYHBKOlDD*C3Ba`8`5|3R6&i^ z5%0j#nt?meV6!@3Q5)Eqb&6SLP0+|7ItpvrH(+JO9;zAWQ=@lj zfQTep{W1^9O~>q4C{%8_2Vo~)hR8Lx7Ri@JU{{eKPf{&m#~%%&@PEX<_A>Yjt#*Ym zen5*s(4MmFt;c&``&_g1Ouh8|&C-8Aj;Y~*mbxsD_6Fafro%=BKBf%gV6)cXrQU-mghZQhm8hMCY?H48W3Un z;JwOLYvrjH^Ta-ns`WMYw6b;;v-Uo!J(U&$9|l&A$$4Z5#bRZL5vlfrnH&b-?NQx$ zG?tz$o~-?!j)Iqp8)F$j@q@&Z%rB9U!LmU&8j*xKMc@ubTbePeO)Y&x52@Z&$V9bYn2nV+J(F5b!dWQtlcVvMTO-ju4xQ!6h^q^Vu8zFQ<8zIoCHbS&CVnt&cA!G}i zvfKKNkjU9o+Y8hhjEz>I5U$P86?n^UguL*sV3fG}u1FtZ(pOh(iv++l3SRgWdt$y! zrDm75NLsxpWd8d0f?4_Md%?oE$#$9z$n&vm_w++sA@oC=C&hk{FLEQ|H&0rfDU`T! zXN*K&6dSC2VQk<{X`7PETWWt+ipoz#+T4Zn#1z`TM1_+WG_j}@8N>La5k)YQA_g{h zqA2}nMM@F$5BqH!3eLv1>B!V?*0NqGaP0ZRN%A*gw5MJ+c?u$7crAFC!NfHf)@%~) zIi4<}Os81?nG257-0ChiA8igzx^NXuZN)(;m|p^x%y$HP`0*w?N(ele-|`OV-GPe| zD$SBR+Uz|3R{9SxR+-JHx2rd(_JozVR*t$c=!UD%W%$3u{}uj^ z`M=EnU9t@8*5B^^k{YJLQ397ev7G>#~_xcY!)>2gOI`*sD@c z3`k9{S5{9xQjU7vDq?gb!aisF7p5<4Nv|7DuX_wfpNeKNS$ES*zvaCaqnc>2E)8VU zqZkR&gE3o#;z{=0bbO?#E!JiUb4D7nsVc#XExdfUW`4VnADz){#+$gzDhkq(cv!It zAmu~UBBO{R-NsC#x*OPbY_($O)`u?k58f=Ywch!%hGyq z;6u-H8z$}r&%gFD+p0G_XcSc)gRRc%>wQfWpTi^Q+g#1xf1E{?%;^9fr4koBXvd;o zbA&uGN~?PUBs8ts>Wfb@F>2}n6z=vnS;4lXFiit}$ zNaI?i@ykq-{1ZZ4?lyf~Z>p=qm9NxLiCVKf(1<^I8@{q*AN< zYX3EeAqhjli>;r*GuJTvi2g9bL|UfRHi-h5iV@$;iPM1%Uh>|pwP#zl18vGs1SK>U zBdBy!)dFrLwvzs#a zjRn?&nt#y7OHC~7*dJlce2QT9uZqj~jC-vJxW?ozPR?Dly5$U~-g0~JUxrfB(e%iu zB8?uoCocU4IpAVI_BTrL+0|qkAcdpzBEzBJHoUb+f5fE^MsH+cXYE(ie#JX#0j2-X zePk#>9baYoq9bqw4$2(O@oSc9Yt0e^FALA)yR+V!HVkyEbGkMVV%eQJ_Tg7f4d|Wj*U5O-mecws^1Px|dl(oo66^ zXXbPEpVcp|UV629vB(;_0iHLa>>AzP%j*m18PdzGASn*(yyTu$A*14m@!pe>YH`H+ z{YW7K`UwI_kK}tqk2pCN$`1)D!*jB(0r7)W%?3%!r-X~P%5?B7Ja=Z`C^DHV5_~i){-#Dz8+c2 z;MX2UZA)zor53*m8p(;!llz-sHJW=={D`B~VD#PX6yH72>~k2+7=skTu838QJ^LAd zAP{@jYv>xC8T4)wP;_2FD0j&ZDKq_a57es%&4Ywz7$F)tCP^-%a_lmmfN0p9 zDxo{XnjL^bU0D_fCb zJbz`BlT(cGz$1_dgagHNJBq<0(kLb=;tyfZK=$E^mkc-YWE0J3_|~92HP;bc(^;sN zbcS&dRrEnGWR^9Wt!LjsT&{9@$vX&WG?QeR3iRT$tV}P=((6o>g}VyGBAmNsU0@<> zwhf>)=HFB7Y)`yLa~ve2mBcD65V-t225YFta+6b!?F2EhAnT%iC3{WT)uJaD_{`>pU>bpB?ko6o1;JVu5I9Xj?COvEd6~?^y0NhM` zs4(I~#jYSe+DJfBLnJ`YJyH0Sn&b@=WsSuvk>U`dw)-XG(&EbYh}^kzI8|FM*ncq3 z+Qu>~DD1SHx+?3~F*`*+bU2J&^;Ah#&XLct5@3%^+gIton(eZA9VS1yZ0=VpX|!L^ z#Oc4;M;dRwywN*7vvSRr2xWQqz zIDOmF51)(E@OvAIv(sqsf0H~SxaoZLRcQ&&zFP%v{OV}XU&ba_v3oa@5%e23JK<3 zzXY~w#~*A}JH~VUX0B6z&`F+E($lM05_+8*X0zKC%`NR6qlsqjI9)7jaHNk8MwiNs zvRNuB>2Jgu3`dL*KqWotyjzL+xLGP^C7sIMT}kV<13eiz98%9l=@~zV_(^`5p-+$S zBvPv&4U=DSJ_s2CSNhEx20gDDR>Vm=U6M7l_#nftOZms#1#gS|;6b)mgf{*le%gJk zEF0b2S^)ibtGEMxD3}U4sZ(zBrJp-JA{S8B^zfR}Sz3k%l(qZbX4e2Y4*un{N&3Ob zgD4`HJJvEy@DcCeu5{CVDbX39NZ+Mv7w&$%Z&eyMYU$_y`Z=8ms7R#+hss?!(0_fk z+MoRkqn@5xjkmbJx0lG$KE1DRU6<~lmM(qtA%59C9pjg)Ss!G6;dksov_}ieb`lh0 z6p!bWkLx<=xSZ&WymjgOm2w<#PVf2d!L%2|gccNCHRh68w>3wu2KH`qDm8k!vUY z^4~lwDxw8=Pg^Pkml$O@ustA;_}RlJol(&o0PSYOGA4!^cMLU_Gt@DjYFhH@1Ub%} z5PHS$b3?X4BU2|g$kVQ`4s|jQ_BdZCc4wv6VKzo}G5a1q7WMArh(U9gBe&ZLDECxO z@7<2xyK{QiQm2GbM!X@$F3DNBn!L-Z*d@xqsI7^OONRuzonyi6U(dD*;j$z>n+lCN zg$s)4r*x`m{X)NzoZ<#iySPErrF-2@Le~ZA7p02aCrKuQQ9qUD#B9_rN!whPd=~y$W%Aq+c)iu|HrH^yA&=1Xv}9&R)OvQrZp18t5Hagl z#H?QtGrtoC@1EJGGe~00ub}GND*T8FkMm=(1C!E%)ayx#H{AYeN4S|Jy&m~mDmKPUrk{A z67TLIGz@;H9>U4cdVPu()N89UBQd&7)fg0$ktk!+#RogFDLt#zUqoM)c0w(A0NETe z0Bxtl)Y>t@g1C1I#k9wlGNG5j)0>d0bTQ$F&@5Gc;ATy0>%`lAb~400t@CG5`s7oy zfwb17r&dorgx!a^G?U&Nc)_`*u^OsF4{y`HWSA+*jmT6y{pn>3fl3~+9YP+y+e9w7 zJPq#C-?{jVB&r@6xl{Um$Dcn-(iD%POA{mXMs%5;(P)AZqp&niet{w05uTOYi|Clm zA(DS*bO$c4A=8J^mV<|dqfA5w&%=r}4XiTti)LB4oL3VknK==9zI6uDt zQsq_x*ZL*3j{P&A!D#=YwQpTI2qmQpA6Az}M7DHAWUQ`Wr_Cfj_T8v;SbIS3T5s&eu1mPZzTK=8E+#cr4ZXhP6=zqAqwu#WHyqo;X4& z+V$4X4Yq*j@!EO?H$=6&oLV1{cCK$arN~-$FnZ9D%{@(GZyDLVzM=cQr8)>9$}80& z1L7~N^WsN-06a;1NdM@AhB7@q^YiMh=@78JX{MW;E|E&0IWku*B~NIVAF{=% zRtzlr0cr6e6nTCxJ!BL-CIVvbHF;a46`MOqRf0eB0%H$KZIHswSEp_PE&@^b)-o#8 z5=fk#2DQdeI{$G~ebb=6dZ59rGR`=I>Zb)i)(O97c*MfdG+w1e+GyC2>&b>?PVVM<1SlpJQp?Tfg?PT64$Ub*=tc^gj-cZFNj2i66jw z3w+@Wv-gMC*&Ju7`g2c-AHX=yGk#i|=J87lLaCvjHzo=_vJy<+`Psw%gKpT2(^*?q zy6-F<>uaG%n8+m@pVt`8AX?<9G=o$5xSrSA>h^>tm9wfnomO~W-abOuj3nJKygdkZ zV{<_`Spu7;B(O=!wVsy1c3+l2OBQ)UEXj zoY1ECV02pM16qVm9KP^$!ib|Tq_yNDR&JX0x$G8XM^hC|N?Sm?b_0k6XApkZSyE(> zV4ZNvY$DBWkviKHyQ~qAhDLt-FkTbR+Mv45l#?L_6va3AoMtF+q>Jq4%yITo z%m6Z)3$f@FHjU@1&6{_}lMm9im*vw7KbWL_$qqCB(W>m*M}Ogc7Rm{)`<+?3=Ka_d zZL>|xs#w@6#?&L(lAaOvlBYr7j_^WNQ%`bJ)@3n|0KVt}zT^PDD1aAuCgyQffDT6U z_GNp#;KB94f}k^hN4q_(cB>h%#QRwZSeEXeSFtv5LD)gIKOsH`)MW?EWdSo{A3&H; zHqz}-241FBOIO}_*7Qhsho-CP?{sSO2x_TtaF}174(l4YQG4DVkG5L+^xxA`Z{89H zbu1?|bI|Owk4X^v@u;SA%1hri*OHrL(1_D77a0Cx&hXB9b~c95@2)il$#=}GIiMUG z8OJdXMr+=DoZvrcjOe+!;C*OxjEM81hr33MDenQSv2HQ;SeGNs^rXafdJwDoh0h)o zT+bLSOeIAgf(=Z7o`l~Y0I_p&UccRH=wkUR1rYkz&+U9=^7RkOP(>b(r$^?!VEbx!S2YM_^T9@5)By?CBFmt5M6-n_mrq@~|6g+G zeA(qiC8xDagi*1{D61KxhX9eYp|LrSTnZ#bS&dBn1s6PClRV4C7RhtE-6Bq(F#}`5 zb-jA~!Tb%u*D|=?5x{N|z_vtX7_AD)j4xGu(V^gPTaSO^AN#X8v1fBl-^IVfN(AW+rGt8W8+-kLcy$mjzI5i1^nZA%Yx@uZLIt`&0 z=zeaa=>Eip;q>2+G6!UIe{Q4Ey=%@{w=v{ErgIzF>P2c&tWe8kqP;T(5oA6}XK)y% zo?o)LB%Po;+lK3=GF{CZV7hbwLyXveN^R_R-@N;^aA z;2Xc*>3!M8$nh+(;I$841=oeM*w0@ngG0S7R zKpE*8vqPD#ap|V7iN}ST&?}Lkjtp$)Wu8mX%^fcz`{}L}w}&E#G-6d}7RA4ezls#@V+l6TDlNw(Up zrWbkKW*O@{2~?^&!3a^Ns`Z;tUX+`Cv^#_M=)RnZKk%2xj_BGd90b!zQoM#(O)ufo zE5^Qsz~;)powDg=(zFgUIbKcAm4-DT0GEEw3e?%Sm^7n;lX+4)(Rv)uA60ft8AnFm zkq`yTrlx92+^|CB)4;CR(`w{QTf0*#JDpdT&H-q^%p=9MnqDaBZKu7cf@AV6I8n~( z)`WLN*cdN!$LSBeqt*XuUPP=?UaOg_J8$x}(r%RZba%sYjD%t<7nSQ$C2!>%-_I6n zKa z&D1a9k6JnP5LTTYbkduTv!OE`wrIDxZP59!nwHEKjLo#{uA-!|hm%hK5i)W*wBhMa zu?~sq+sh~~>O(a>%;JIaT5e5$o?;_zRGnd%4m+coEm^oM;NvEC&pp)a!6#Yk*y%4E zPRyyG8z*%+l6mc)+Ux==!0%3P!;;Adtq5B+j_BmauUUO80DIhxc251yGx^$lFus$@ z23P7}Ba+=bHM^Z>Q3hx7(Qp|z*L(7%;b~h)AW`AmnYhuT-|5(a;-8j}`;K z?%-*ztB08T@$76}2VxS;tUw%RS(lurBRcI>QBOIxg13;?v>F2G_on{I4w?LwW#kqh zmoZ_rTVa>mWn5o>+2aNcTdC3z8DQqQroLJa94=1JGa;|0W?Nra z%?<=Sy97Yhrj2o7oDt*)iqR;W@;><>Va#Riv4xr&7aM%HVht|mW7a&hu+<*LZr&X%*M z9#qkhn{|50;lqau%JH#yQ=MCOOpKhw8{7$g92|7b3}Qw0%d*7&37p1Rb;6Neir5`L z2bmbQ@!H#b>Fr<6@A^}-}?$FXL^-O+-)y%x>hFc)U-;y7nB5yk0zA;ltO zq!;p_zE;CUYB;MJ&RGqY3i)-=hb{d=%0){#7-d9fH5`hVGgSko5%Q1P5dcj|`0iTD zSRsW_o#YwMYw#0sIn5`dayrID;vEH4$!P4?7G=$lnn08~(w0dBxQGKvHo7ydjGC5O zwb=w8w=xoFA*=DivW)YEjEAxbkEVHSuW^y{8Z40FLoqWo!>#90o)797Qot( zqij~1A+cFmHgG1xvNjIbCUBc!MvU`HMf3kBWDN_CpSC)Og1=^k!s{`6l~sn-w9k3k zefl|{bKVvFdH?!J_w|T>{giwCS^qlYUXT0NPrKKjvDfXI!eyhBL9|~x2cM1>>N#Zfi1$va@TpAI zwE8YkFkL|2x%zPI3Qo{edOp+0POA(teA0_8jm(_ZKB#O9S+-WHsx9ps|RcN7Y#n?pz_5;rjonY*>R#XL zUVqBJ{;+%ffPeiF_xgVS`X2ZCWB&Es?)82C^@w{N<27BpSt6Th4WgG8B`A1W(9ZwZ zE&9eep2D|ecRYFv*#_lwk1BTt&v<}FDyZRx z#U0@XaKan0bc(rtztg||p?iIYfBh^BmVVA!rE2K^-TuS-rH9AqQTsGtg5yu%emM?G z-ANA_n%LvX@RVLKq~6|2n6QMwJi&AA5o=+4yocjy9#K2N8MqfN>+Bi0$LK8k2h*M| ze#e3Ru3H7dZ#fWV4Xcy6!H+E`YmvJfGN62uqt@hLbcmJ2oYgqUq{iCSo)Jg1G~=Hi zpiu*W7+9{+a~_5<_j=f4=zZ?>z5eyb-0ORIP3H^_TqScj=-DF>iSOhD6hH23{WDkY zIjd$z@Fxu2A^-dZo`1ks|K~2-F?-z}ouY;@YDh;639Ol=V2PH~MH`v3g_H$LVSStB z0nw~^cgGh*oPSCUFUt;Wr=tcs#?Kb$JAy9)$Pc=f<3$L3%8%P2Vu(!o=Ql&m9&dl( zX6hgDpku+4Wc^Y9`p*>Sa+jgO`e0W3 z8RvT$>~{&^Jqz>1C`b$vdOg9wh^g4Ju6Dg)QUmJ`TCE$iQO;Tso^W z59&;-E2Z>{w+<%+axAB3rQjn6$@Aq~GbO(V z>F-x=`{C7Y8QRucY!&LQ9Cfq^JV_o*bg!o94Am5Bz2I&pIQd(x#bw%L&+2ss zOmyNK5kU5=Hre0eIlbh{%OB4Gd`ZHgMn4Bj^WH+oCcQCR?uH#;z;-1V z*shDEcnxA1?BS6eTs?AoQab)gdbLGUpcfnUE%r2aHy_9NsEy;H;FviprjyED1j)Jv zxbcMYPgs7Ys*IiT&dxDsF^?2CqO?M{3FPMCblh);rWe&fRNs>PFqqyEUbzx@xylEh9k1BRFl+ez9Kl&`LAYTIoefpHS(8 zzimKmw9>Bp;e9%s-c}yCXx~a6+=uGN)uV_k;$R_7(5cv!tEAX+lqSQ~nbzZ3nG{X6 zl7(vv{Kz`IjALH3i)$Uf4;0gf2Tix zuRja=^AG%?T{GLDQ1Un;5P;<7;p9T-@QDMw@Y#WcTZ6LB*?It4i1wW{jKE_>6qwul zTy`f)oSwBMc|+>{ta}vQO$&;xEy~{t-P?!g(MmT-9n-^wXA6m?r zbHa=Y%vb^3J^W?f(u3s(!)if~5+w|^Y*X-fYc#~CEEm|HG7p(_Uaf3w!5F6}&ENo> z8wD=QmGpcDt~+9B%Zd^!mw4`OzOsi#FiG#E1JxpZq!*ne!YD3wq+xsKS(>ZvAgrZdIgFpZ zyA}k9M&T+zGveAcjQvA1EcRIpghRoR{fz-@o9EH}%?)zYA^-Y`_w47s6glwhqvw@f5Eb?$gnECLfuZNDQtOmHSGs|}Rhs;he+;qU>7>%#&hWe> zRoHR%>d1vt!B@$gnZ5Z@Q2yA1&T5HS+CG_cFF{o>oJL$@fCxm-Z~;}y$&YYhh4WFu zomTM@Rtl%G!u?{N=~wBh6V1wPW?HUy!@Y^*Q};75@S?NPwK@lTV~!niYmJ>zPu?9h z@NF_ft{s?N10X$hF5)A%&IGPmdf=U5;*KVb5`Ga|O`mi`cx_8V*2s&>cqC$x)b&3n8*_(pT%PF1;>^qco zK@=`^u%R3Rq(L8!8)#1`;&EfEC_B!t&VU5B><2;y`qRj#N_{Kn)^#Sj{nIofaoL2v zaYnRttp}Qv(sY!*pi4>DV~ujJ8D*vP?Odkx^=UqFyyhlwruRN633ccj>)hDA(ii&h zhMY6_owP}?)fQ}17iaQ4qhR!TKtY0o6W|mO5aV8v! z_gggJ_@T(g+@{E-9S7{fj2;ARvqzF&GNOsUq;tsX!?hcG&UBi7FSq8kghH4kaPRe)J zi5rO+lA;A9I>ZcF!rFFnh8qj@YG<1F+2|X<`p9_d{HnV@Lqp{oz)F}kU^REw`w==t zfY$@8$&adFCVR7|zPC-uLLC#X$(rVI(NjqiqQpq;VsQ7lV7WIcEr646^HL_ zN8Hsex4DFzVqzdN;02apC&LFTOV^TUKx0%g9bP8I^BF;AgQtywQrq%c6{L@i;onG^$owvCGMM-@~Y)v7{& z=y73om{IK-0dTIuodIvFXT?uFO{RF|Oh^F7iC?Rq_~ojf=tdUH#7--sJY=U8 z1N!ZcD1wSXQ=d%jx~iV4+SObYoNj6j)AZYVeMgTT{F)s_#YQBi$DVPX4rY^@QOb#8 z%B)gO7E|Vxa=MsuK`Cd7DGN$DTTB`K9a1h8Qw}NRVlibzDd&qRN3)lyd%^js>}^hO z=d!m&y-lj@W%c?Te;Dd1XDo&Eg`>jUKt>RsU--n#;8F_ng6-MHW8Xx*Q+#Bcc~F7gNTRa-^6- z`*XA}I48*7XMoKE*p-;|s`=2%8?d_Bq1iL$ z3llMPU{YN?=DTPNGo_Rh#gu8KoGhlCR|>lULK{Eh>io;uO7*`p{H8~<0u!&dzE7x3 zQBZO8W&VFOVmG*2G1`LtnmYPYdhWR_tYbRwACpB0U`L2*5Nb*&j3+nLnU>JB>pewu zgl(}+CGE6W3==mO70UEz)rou^)y&|K$m-HrsY2#`frZDeE=ek-YuNX=pDyAqc;WRT zG%B?|U7$}QH0N2(e2^2Gx0^^SqM@?C<5k-ztP7(cLiZ^!v=*zp|Ef_>Hm!IA2zSh>O-EYvD4DLPng!w>i!sX%VkK$o^kUwsAQSV==C`M^5 z?cu<0^1UstSzbRivEjPf0J>JAAT{g9v|90JK!h(#e4iy=tG0hbCEW6z++%1sB#!&@ zBd%*7%h-k+iCoHMMt$YZT9k3NY=e4Wx39LW&h?cPynd(PtuN|J8yRR^cNie4cPC%5d&c|UZePx{7-s!U1v9$}{ybV9Wjc+&56qTt=+cV+)=9?$TV|GQvbxg! zBw6ijO=h)QwcYjgbg>QKcn=^~piQCx5HpYh8N@vvV(x2`%H&T6v7W5=DqB04>%EeP zyRL(TyBth&)Ri=wA<_Fp%6T35b(X9>)V*$0c?tam2F#oT+k(m3gxd%ow zoUJl87YTPbEesAc)400PX7h}=@5d{>TAe34ia2B>%R+7cDK~YbA&lT_!M$<(egd`v) zG=ho+MpbZdycC>Lh+EWw$sV;Ck~t>g^ueuIv}DFqAQfy?7!h>Es9m5SswK3}%rlTa zJu$*v-CPSE=13y?2Y`k^r9VuKIwKXdjvUdVM<8pz;q)WVA)@i@GiN-HK&> zSddStY~ipHWfRI;)Y28p`p74rSzcD*WGzH$UY2tbksAr$;|=K_QZ9LVYwb5*bdNuM z{Kfb9k5%?X_V`a#wxr`vS*M=%eSo~KcGMXC^siLWnin`^&UBOhTM-Dmd;8^34wC*U z=sA2#9kq%3e-zYjFbHl7RMTHtLheo)r(duHmP5{&lr1)U z`nJ~PlCXRCf&NXQE{O>8e{m2gAr5RmbJPID%_M_(%o6q-$P;vOD^$GHJOwP0!m21w zp;IH^Qkv5Vhv}W}BaY_kDk`PreX}}vBNIWWeH{AgHdX(#fiRhefy?^9FMY`2+-YDU z;o1KVhp`^oxB_wZ2!66h5aS>9qrS^frdsv>X7_p`N`soNcOdp2u7F7@0PG+IT zeg?VqI+nLA+Efi;oNlxAY_Dde}%?t5Fps*AgCJzSVFsW)eYTXcc0(Ps}*L5?fjqy+sw1k zC6Y+IAhqnWd&V)CV+mxz)Wu}P&nB&WZS_D;ocP6&i<59Y$xg4Mi?skX9>Je$nz(Oi zCc#_ZWiqzurd7%o8`gSKXF7I`z{K3u?^h?G4KhRvkc^(U_To)%DGY6vqAkS-91J+* zaFWt`idU)cp6)xx<3w+VBRVM2BA@2;+4sHOgtiA%vo65E)P;g$%B*AlP*XIV{8*H) zOwbz0Z9GW>wCZa0usFQZl-y(}pS(XS$to-FEq6A$-iD63spyy+3jC?rJTDB|Sd;X$VG#EQ6s{y<}-m2e-mz$6AFHVR2n1AEp|@LkDy zWyVtO%oD^aZ2dxhQiX6H;I1a9HxUtQBv8kAUB~U>z;SbHyvzA`a}qNoT?}xMX-nBD zmk!CiK=17{A^@9}Whe<$Wg)T29n?umKFIW^@S4&#zW>98b5&29;f=$Q^lQ$s7>e5+1 z9!bt{G9;4N!MQkU^h=xeoGi7pbZ6MZkMwMQ`n6v}M|F*TZKqQfoLYlSpnE~W4b4(5 z`P1u!M40U4o9Q4Q5A$eJUdJEQZoL+}l;yHEAs@Pa|B)p5`OwKJDb{6%hl`+H?0QYn z7R^p+Xdu+k&;I6<{{7M6Ms8l#` zRpo1h-Fau7dc&AoY-@MTWsIVP()a$5Vv;00I4!>RioBdDRC^JtLZsP~syc2gBP+;! zWuDLC`ibN)MXn-il9;gDv$abD}dbGaN<6JQ(f35b5N!0aJtP3_r74K9U z9m|54TOX^t;|F-*=V6*!Kq3Y@s%R*4xj3t+8#6;YIv(n1dh&h3>68AL5|4+lQW0lC zsi<&3I-oUAinC^;6eE5CiU2i|w$uwk+zBQ*;LC(Ycd)Kn%mT8>ucYL4%X^?o>U_P6 zP7t}84DN<#kwGF*NAly0yY3m$$z7>5zGnmre1sQ%-b7x@M)NM448G!KChBHa@&V|9 z3HB!<30T90BJ@b#Lb>InW`>cpWD!%+d4u0vd^BvD2ELPm9(%KP!c8JhkZ{PYBlZr4 z>e-ghG^|-n?812b1D=c85?ucNm^rtyDPc5Lx z!f1%Q#O%vaFCr*(d?p4S*O{#2xONHU8qY$a-@OlV<&7cDL5fqO*&k8*>SQXuAsaEb$(Ez zOYk9t*2j<`I%db?ak{XyegVe87mY<$e-SWGvlj|9OD%9B305KWjH0uQA7l_u zP83mlvNdblzHqE9NKaI(T^B_vowPSke%7vLHnN?p3@2!eV87~Sh+Y@;;04SHOM1=G zv>-j@TKAmFp_~rG0o`YU2~WBu_#?uTq`T_k1d#gpvqH^0B_fCRi}u(mW(7E5j$r6& z6*$D9a#w(#k0MvJs@WFkmN_RVlAmH$X%}gMyccXL)r^P+mTt2OnGs25Qc8Z#W+LXU zOdylb7JwW>6Rbavc;s%JuqjEA6+-vyK2BxzG(zBayK8bJ^s)WQ5uoQd+S8=FM5m|t z)z1l$n}-G6EmAEa{Pbf45k-;CGz)M>zD`|X?ncGQ=kD5Om0|ifFMn^9r4H#IE#=26 zHBy>rz-db@jpk>zMC)C+!^w(8@xo}m=YU(w%l|mOJ(3p+hkvCz#mR>vEK5@PPY9jbb@{18U*h?x2aU>u- zI7){d2;>W1rJx4u_COY)_S#DZi^50-DULpSlkH`jegmf6is?7~aFf*-CzC8CcZ3h% zzbKDttUHhm(B7jzuTOr-zCNt4s&5+(U%;R6KcR1!MFcf*ODM_wxJFtN#deaao-h&( zid8h*tVw>^)~t5~Z{S4nhp_m1FA^^N3Zb(wr$ z8gdJDSnv5B?q_>~b&8$`8f!`J10*>y_J|zT@xbkpk2S7|_tbL?TL@K@cR^{+UqB(`C@OCLuqn56lDnnVV+VzGG6OvoBtY#AbackFD(hgD$9FG> zNC9Bvs0K{b%v-BDlQ8Ctr?}U9*}P7t5eXk^4(tuT|GytUU`&&ISrLfCw{XkQ^UrrZ zMmL)3q-lpzy+kRS+@;%3Hn!LpmF`mqzfSNoAm@s6SKgIYMm*6{_S^*^KF&Qb*OAsmCdR$(VIYYo2g)8Kd$l-v}YS542{7Z2yU`Ed76pP7WfjLE$}5a zmx7-QOm&boW2QkIe z6U_@1JFpMa5QkIN7ad&C=4p>D^BWMU<{la98x|0(pD9n%pD94eMfX2ElTgxuGlh7}X91 zV=nG7&evS9XZzmy!rE4?>bA{&cBwD|)3`vhS^=S!wuE*wZdp#dCT;i(DiBaPzq|x$ z?V=insVA35vJ2SQ!07jKL4ULdJapGF6Uq7JC6F@5?nI$EbuoRCfVZ_jo8W{YK^&z8 zNdstM=>?E46R$&ym-60v;HIlqjW2+ZokaD7o6h8e*p~~N>3k4d4NT>OnDdLDyC*e> ztO%L;4E5Zx&*M4?*m_qZHhj}&j&E|dnwsaW)yN!gORJw%^(@Cth{9rB8I{PC3%c0^ zk>t_S@8(Q(1dY+vFTZie0j($PhtZo;&25D(;3 z->O{#yj8md_|n>Q0iF+l<}Vunr@A;2Z&fK0Z&fJ?y|l_)Li;Jc1*+ke@~zrL-mTgs zr0Bw*!P^(=5~MlPj5B<_(r~Ydiwp`O?^IO<*yh zPVv=N-QVbQW_Nv&6*SM^BMX0?v5$<%atqCBG)b)Qn)^@pXbd;jXIv*CoqSv~DMp5E zXtA|xY^BKsR9eFKHUoeQ*vk0MTJ4>oGoQ~AM#`3&S{TU^vEA**Vu-M(L6&S03!~ej zyZGgDMizN?jxo|C5v6wxCqGW@+A)YQp{YzjqncXm=uGx*nR91=fIZlts+^Im!dUYs zq9}B6srR5^72DF+l$Kq9%oVwO zxt%AjuF&dE#@ysrBc?~KTG_w3y;*X4`@vu;H`Kj>&101NX72 z=@#4BBxZ0GG3PFR0E(^ZI=9WZW(C0Q27s_902m(+fNT*<*r7 z*3$k;Fzq=2rv8z}9i|ivSYyZ7-e}%kVR8zkhgSl8(x1@I5c9gF5z^g&@fmV~xHQmM zo+WU?nIMUdQT*b@LoBVE*f zAK`_K)i?43ZXV{RSDPm`)KGaj`2^Gj#WVzf+SZanM1)b_PZyDPL6FO4PR!T0*qydV zx_QM{)-<1E5TFy*z-89AKrS}E8dZQfyS;*Ws~|6Wu~-zVb)o2p_B%6()V40ac!1qz zv+-nhZ(23LPL`YW6J)8o_s}xu&*WGRIa7h)scAmkITSo3-z5 z&vu-K16K6M1-5+RQPd8MQBO#j)u7ZXQZlsldSk#yeX~ec5lkUI#)rYt-ls=ynGf3a z34la9Mufu>rt0)eY8oY+p$d!I0SD}_Yj9fS%~7Fk1g%QiceIeMe^lFTPd;Wl0rL=$ zxRc)VGbd`)-BFMoKw>mxb~KFqx_voZ&cBQx%L`w)g(@qfl~3WziPo3#_Alq`GHh#y z-I!vTb$Mkbh8PjoKGylHmFf1nPPXcrK;Qik_PZhxW7IXOp-axHoZKQhrgrSO>{d*Z z+wVj5Y9C5Fyo`(Xcr(kor~#%lT{+|qAOK;$?vDeue5tG08q`(?-I?iAyCOg&!)Tpd zIVTr&nB1P2=J?eLp8fD5Qq~b4w>xhDBGypBhj`C zB*#z5$qJF7NGcM^|AyoNyHlcETJ@a1q}5TG{l|;|vwe!A_O_(eYzwCmibYOyEJbWU zd#-MTthM=@P6l~pwhe_=jKez|Q;jcu&&}7k=8u*&-zOMI4S`TB+OE zMsze3?Vh~@Tq(q37N}c(t1tsA6+;DCZ97Vxd=n6{u5LF5Yadr50N64KoA6Un{4U}r z(g4SQTcly1A~D-E9-t87LrX7bp>FG!aW;QS2^ZgpYn>S*&=p~>=2BbD*0U9>4wBzu zfC-Ob7YG>D%VMC-_H1^>cfs$9W0~Fy%+50XMPwr4e_L?8_FcFvq+uiU1Riucb-|WX zc8p5?B0=iT9^73IiMAIAgp=Xj`pGV!;QR$v+Q%AVjoSZ_J4HCn1-A3TNRmgIOJrtF zS=e%|%y(7TA&9Gq_h2q&on&3`~5>}tLYh61~Y8E(BJ z;CyCa@-7XDOIE5q62?#AJK)@+WAQgrptGfyRhapRQBk{^N%@1+ktd3>9Z2MBW!@X0 z08T=6%*InwD-(U0sBocg!2Cf}iOofnO#WF~1U?!H;t4UBlf*(8m7PDsPNo5>uz9xv zsfus`-W>Hr==F-Y*g-6Er;6kU;V-j%Nzwo~tl`hR!8ZP^8|pClu9}|#GbAnW=b))- zsKlgHd8yJ8af@S^Na}+7j0Qjox?3v+d4@J^V&rk@c%V#hrrQK*@@9iZw?6Cy8*CiO z7l3i~gzt3X_eW95sYmqKYe$&j4Dp&8)s2vDG7wdpgBfyjO@VGSFj9Br8YIO%0>8~L zc~963+~!HB&%$twUW^z{fA&2iDC)R@?oPmml2myCke z)nbmS$zL9N8)^jTaZazJ`{3VRhkWT0!)cvJ-cd72*7{C1@5+8Y>wr4~D8{ye$uBcw z2r98!J*<7A9QKH+rO%2+xyMkV6W0AbyC~#g&Yfruk!!{y&>%l=Y~l|l2?Hf#(|7t% zB#^MmaF9T==?+f5n)O6J60!wB1*ImkgEA1Zoh#eSFY7)xk60m2@`hj>mm@k2DSUVezGhclZl&tm)9OC`;1pdW#x4*WX0y0+Y~ z^@_k_G6K7eY24IFObUr01{5Md$r$Q3ZV<#I zrXfKAPpT#k%$~i8k#%Gn$TP9v?7hFsTJKu#WxcocF3H*aB&5Ke2^o8kM}m~&mL3-N1KQ&pt5B`7->@xh9HP)FEwLiH0{UQh z7l=yRuYE-RCnSe&LM zZF$DlqW2`o@K`x%tSc}rw8A3vp;(@nNcq|yqJO?oU5K|RC?mVVmMX-f#2xs>PUqVI z4q$p58Z0zPJAk4mM8!vBEWvncqfPJyWyyYkU^3L~D`i2w3EpiC2qVv%py(AXAxIQt zWS~m9r#Zfywl7#p*kkq;<}#g!ppRu>>a9`tZtj!c%sbVh4B;gOJlG7wOZuRcTu#ed zqr0UDMw*AaV?IF&gSnSjdc9Vfqzixnw}@O6uRlE$r_kKbjHA)kR{ro{7+*OsN8SHG zV6RxOAin>`JOaQ!M>=cBiH1CDLrfY&Tp=vqz7SYe*vEf>T2?&XAF1?}CJu}^0s^GD zmPAwIMhb&D7-U>KwihJNE{D!+AaIy{d{vv=wUv#>93& zKS-f}DYN~-%d1ty>||?|Kft3^Bo5TcH?1PEh=gHmnBjuwqKl~l$wbqGziB5uU;9t7 zPf#+H$4KUdfmLsbRybAAH(Oag9Hb9uW&IZhlF03C8FVb?wo4-pB@Jd|%spWH3(cDI zY8IGt2n;q2?uw2BEbp^N1KzWAdNWJyPfNL73i^8A(%Cn8x|eNWB#*=1_V-HOE zi|bQ>i&e2WYF2@oGsm_KNjsSyLM(Gz-nxepse6f?AgB2XeTG(!$J{z$u3WVeW--i6 z*~}wkeT5d!+E+ygEOiiE_3xIxD1-W4>NMp;ZTA+=a_X}zV`Wu2|KXbGg~VD$uFAmr z_3`SL$w#XW9%j922f6u$)GD+v2s$pY$n`I#p`oqOf2zMeTz~z~_1FJWfBl2{>m&8o z|5|_j!}{x&>aVBkuV1deex>}X6K)t1wNYUcWgrkLunjYxh@G3}AVJm^geFV7SliMx zi9Mi)7BN!QoN^L|LvzyD9i4)Lu1mQpF%9KT*)(R4W^zlmS;(*s#guL5kxHD)zfksM z_rnsI^CZQx+gij}zbIS!@0nI{c-!@BJxp_(lW_il#cQ5OlQgg}_HDI|XV2zj4{CBU zM^6tC>tM~{o9o{Q%112H&iaLELPZ1XmmxEG*(vLI&g}U&2~3PVOqf&b&xhVA zdRd(0D_Cc-mM6n5BD|%JZoyv^^Dn&t@7;FV+itZlXwm#&d7j8Aeg*F#1~r%wBHZ

X;}wc(7ycT_3;87fm1rUgZ1Hv^Uz)(D`0I#iZ#i?R5N!KxMnI~y4r z<)^FVz!YE+%ai&lGX~03ov*L*@1tXzJp)zCJ1H+2ozTLx<%2P6H{dK2hiPdnEg#wK z)GcjZ$?oF_cQlae2A`CUX}Vas7Rf5rWMi$WJ_(Z|!_ph!601=;v+$NNLT%F0Wq4WL zLBlOx1!2Hw)%FI3)vFW+qn>K-5PJD z5L8L-eC2*6Lzo!5!%1i2FB1|749l-Y(+r%a2E8IL26M{;#L#HBY7{P3qoZDVB{U1u zi=vsuGI50q(n_touQetgTHO#)cFIQv*h3?Gc*kObv_b%=9YPYEY8c79{$}cS#&I-z zcgqpv*SIT)b-D;w8W5WUNu#YGX7Hg~p**V7j-*UQ%(h8b4WfrZ#0e)cJnJ}sMUqD_C!?yu7vHi zG%ZA^Z3m96+CFVw7F4vx&Ig~j8ffL1+r(8P53tTl`A567;TrwOkQ$%8d$?V_r|R(a zlv&k>s^`$=QMGwAwCPZy=7Do1jG$R!Y)F(uqbU2yJi49ZT?P+!;z6)F;iO~^PD+a2 z%VnC%VzcZ*-atGwsu;gQCAH92(AlYCoR&^>gr=7&CM$7L64%2ZTcgCw6!-kHqij+@=%3k$!DMn9F|KVysgo_~+S2VM%S%5SU#W>h?w&3xD#Ujk zA*VcAYy~0}VY`FO4NM8y5pJ*v1~d%wfle~Gfg5ckJ4U0Sj5dJ9(9?!zm|yir(Y(+( zycjW$#Uq^9GU!z4wms9;S~~s$dw+Bkt{G0j4;hz9KPw-NswS;uG?LQ98#NlMV+CyD zQU>Fpt9meKSYTEK!wnf?B#*h>VfT+4n8?dCS4gsrR_K$m1JyMs_jzgi@Yfhh3lg) zqDq771G_1>K3Z~pbc}CgvN9kMwtcJ{^``ehp9&a<<+3+TxTIyYZQ1Ika77|hRvlX| zrOFIpePG-t-)?Dw>X%X_xxIea)k0?85ax|&3ZpdVcn1#_ya^mSL++n&DkObx4}J4> zw<`Nqevf|1MymjbsmUqHj++NeO{%W1RorEDb~cJNDJ|KbtP?Fth`~+~Z`li~_pM}G z6t_Gd!cm|bs90QaDfv%RN{k;hgq(|C1jhD0cJ1?6PZf2EUWgTyxVNCUv2MI;#6ShV z2@A1kqG9l5)XMwiSh^aEJUIKqNI0vc2|ChY3D60~ASC1cIdKbAEubUa9_P2eOJG!Q z#mZS=oXkB(@d*G=8piz-5E8Jb83Uz;S)WJPn80nGgz40Vkwmj*467$MD6n$=_+*nr zJs;pRag4J3leVj=kR(yU+LN{b!5TX|s(e4-IsS7Uulidmy3{1fRMyel>lRdO~i>*K`!k?Y9) zFbMgSpp?GHO`tS~np~{VNdQej?TqL(SK7aHrUE*$oi_Tx}7 zMk;cN<>1{*;5P{tnZ0~)dNT+P+O~4-XF;{hl=DZR9FY}4*9bJ+NqA1qB0TMU=l$YI zm!ScJ4W^WU)a>#~&gmploWbCxk_#CIs9A<7H#M1($9gKcXG|%7;?i7M^#fPZcXJTS za;1jT)o?r-T@D7`YC_NIumtolg)1+CF@dmTOuYa}w)W{Ei#&<5s=@)6owqfNpb zgEwq0`9#hW!mRleLTDq&zPX{SdH3w%n{THA-1{~T{;F~~5Fv;+Nu%2>qgmn}ZCYtP z5ms@!lkP|=3wLnCbC^-(hp!eT)WEs=>=zyV(&coQKaKfQo&ADHK}$o)CIylj7>ZG2 z=US*Qdpm+m0STrDMXt<#V>-2hNT)-m&fbE>B@LSpt5q=|ixyvoa!u6mEi}bVnG26c zzO5Wjob;2R7ODvqmf$&ZE<=}*cS-IC9Bu093c5U?w%E|p>=UJZsXoU*W$SUI6EO8& z+d)OmUi=!Vw%7<7I~%T#;?XM}MB0R}r|Pfx70auSm0x31CRQzZ;?A>#ZL%LrbmLMh z@tvGZ2Lq|=%u`adv%5DA>Jlw-PDGaQ6?|QFC{AmL`L8cHQi|SDy)#PK|vuY(Z1->37)4&FfX>>THtl?EWj!tP6I)e zO3GZov0!-hVBulp#!Y~<7~TI_rKK63J+cY}Vp`V=0gegF$e`405jUtbJjrdJNkFqV z^k+y4WqG|O6=;artLfzK*xfJWEw~(FQTPx+3+9}yJ)>6k%bI>2&%s*f^CjJ&;#gM8 z?8_L7xN66-Mhg-)stn|!h68QX6#I#2rr7A5_fgL!HNi(qNC=%Kh#@08+rNneF?-)0 zmw8bnjS8h`x20e^Ock@kpyt8<&Lpudi7bh8n4QZZLC=~&HnF=;fdnJw|^lk z?*Gr61^CMw2frV@T&Hns0emuNXc{qbc(D+E3S!20SOH6}p-`GL(8dPD1(9G2e#vEd zYxW@HQ?=L*q5==iI;UY&aEsrDDS?#bKLOhS3=!FzpMpH3ziIiBy{Pq!Iq+>5EPR{z z#r2$bu@|E3)AWRKAk1Pe7|qdU%3OzAH^L3vx`|lrsH-`iO|A*7(4J;YW^KSPD8WFk z;8r~_g<|Caa}b$mxg8OWj1V|wkVoQ*?KnNYHSt-~vtu z$mTWjf`5129n3GnYz6KPCTDNF$OIQ{(!f$4RW>ZNu zj*ikwV%}_bU}v9Y>5TY=$oW{>Ukex4^wvc^j%pbH}0Q4wD znTho>gJw1(wF3gg-#2hI$+MZUU z0z_>q>s&THw!Ce^nc6Ng!1v%BD+?u$>4N-@SdVGfs#2$!<`n96y3UvXTO zM3d}!mfWtzf=)tYhF-{SsQkZTuae}LP7Z(4ic%Z1tfaOW1n)9hVaG@IvLe)Ncau|=L>5rwrlli1kRNs2a z?EC4JW&9B9S+Q?y#$X_h>;sTC9HVRi7pfQ|8Vt%Odi4`Me}Y`4jD6XQD-BB3VxpPJ z!pba%iGFD{(dIi+7fIb{aKK{PUKLN->r*;kj8cXrqTUv}^XXL2^H-DNAU)jkhwsUX zol_-zG*%qH9D?sWR({+NgX3(LmDZEy9p?Q)q z*b{bX+2m*4X?i=xPNGDU26Cj~YpWt1Q0U{oR6Y%P zErmEwE-_tOxmegx+Q<5%YW|~Maci-V*5p7n;hnOgO4(zh&vsUCsCySyvr zb0>*TTH*6IwUc(MTq;(a!;5Ex(AOHG?;IeaA__ zt;ioxC5W+Dls7vt$D2(k>Do{dqg0YEyDOfP{Rm2MXq$P!h6~Gh6BEl~3pMa$q{2Fj zI^9WsniJ7Y==G+$*Vjx#9BGEZ(NQxOe6Ts1-Z|qef;+Ct?*MM~fvo?V6L&*%v zqTm`@`jM~;d14SZv=#%E8ddd+DwiUxHB*#XQVvTBpR zU^LsrrKJ&+Hlt{1;MoJ75Q6;=vw2Dv)Me(hNusNwN%(D*Xk@Bn6~g;h#Ufba|CMg1 zgSwfA6$Su5tuVF;agWJ}`|<>?X6-3?QM?Z!7B5iYlq8ls^VLeRQo?IRKl`#JZtI6_ zkt;X$j@VKh6<;zRNt{!)Cf-+F5Z8e%_NBJzKo1h<(WBB4Z87X;^9Yq~aj3Hd)5-ST z5?wYE8w7YmCTplTUHu3+rZt4hBufIcPO6QPp@eX9 zQMQ{$O}Uh5k>94V6d#W$*Gt0HWj9RglZVbma>BKXZ|Kt+X%}B%*SyS#gy3u8c~;3@ zJZ%ijFM>E-U~9WueD-8{F_T6h5Xj*caRjb%JmhJwVD zp2&xr(E#e{<`Zuibs!ub$-6(ne6p8H&uCbZGmJ!p6qb>5;Ya|Rr}+h#2)6S;6Vp@S zUi1kXmP})@1Z6Uko`(b)3?Asj6G|gKC4NMc&@DylC0GEUGX2U$@@8&*6mD+WmP##? zR#yjUtrUJ18A-k4A@fk*PsY=Gxh~TJ{McGOffA+_j07bX>|SChv1NNgC`rPyom9(q z>^nHrZ>&Xf*^U$~LxSfM66Ne?f7AbNYcxq;TBrAGH^J=^p`nNx)9dkmTWmbA!3F_6 z1otTdLDX5Tgg2#=!txNa$sQwiDU`G={RM_)8k7>~D};ezL@QGM4l9FI$@2+(ssmw7 zg{_}tAUw2qAY2?FD{Ua!FbZ=h)z+%LGZKk8ABl3PmK@1_EA2y&U)!{J}MEsrjFc-LWA1)#5n~JB^-gT|(XS z@{K7vroLG$1_jfMC0JbnlbG3nH*d}p8-EHDk6^d#k6@cvTqWak_#ZU?GY8osMg-0#rSwQVW@%hQ@7Fv|5N3Rabw(dVvz!{<$!7y zH@PJIWK$ZQpyQj~EY%Gyir5A^EO^v27$C9?WwsNo`sBsQDvu}0MzDY%(@ivo0d!#h zH#Lr&>|nlo{oan%!N->B@@UmehqrB3^C3$Xgowb_AgMtn zVw23*FQtBK=pHpPQri{n&IeUfN+Q+HnB9A&nsk^jNV#hUi?C4&A{#~vmhcQ&Hvcm8 z5pvJMNl5Hn(Ok1tUK~)*-PnKz+bHp-QH@WI;K8!#ko7?LbknE{=r%j>_txmSZ1Ujp z7Qd`Qm_wh;zNWvA`|YQsdsrD7=#1I@RU{lM3ut1xbWE@nuZpSc1&b-ITTts>xj&4r z+S?&u1*&e9c@Vk^%LBClA8JNc`HGOt*_to}|4Xb12^o)c=h`w&!FOXy&+WCBc&nYIaN5MZ!_A zk}Two$Gpn)Pc(LsC|MRoNA%m{BxxqClQ&;TX8PfnY0CZUW$iRkp>U^`$y`*G~PJZU^qM!-(zLmoIf>^tR^Bv<8zYa*a8n|2BJ51l3&9}^d_oWi+l%2 zA)|lF3!4a=YEI2sk*7o%luWLCKBdzi*6tszHY#1C?G4oRK+85K05cPdiEB}^#y3Ds z=JtYb)o}5$SBdKq2nd&JYGDF9BJQSO8*#H(q?C9;-q@BTBbIToca0bsF|PCD7jESk z7i^*DF_;|A=EyZ-Qwlhm1=+{G4m^ze*$XB1oY26V`#NyYibXlxUiY0*7Yz-Y&fFsq zct=|99p#zw_AUjbeP2gk_vIczffTzg;M!XJEJAodbcy&1;fCp(9Pc2!?+BMQsWoe| zIczz37&6X<+Lt^OCt~Yz`13dbcKDz1uN>)+kXzI|VA{AEt8*xdXN( zH;G%Y><#xr)f%yOQ)vwOWQ2(ZSQp_=cn=>q{Q4(H;sf z=_B4B+Y-{C2+2G#8n5`4kj2cur@{P25f{Mxdqfd^Z5c7P3^47$VX zriqHjhamyky0K&DtYO+m?kE|Y5S9WB+N77v)gw}IKA*}_>t)iD`DcL)pzNp$K{*kG zOOz#EPBo8pd=PTD8SCS&vMJl1B;C+yUpypBWy$FRr}7qm2NeJsdOUS>6i+Kri>_m1sTOq4W6&-y+5c zekPWXZ&{ZKlwx%yBx#ApL>d@o7i7ZGwc0a3gJ)96)g|?Q$owME)~@It+DoxUa4EIO zRzxt`6SY3V<7noBOQq6ds8oilS78cJ#09DF4IdOYd6JhZ<5i6eR@1WJqQBwQ!|bk& zl;gLmyzZwqTrey`SxOqEam8y~0g0&d}5))^9l3|p2m zkH|mIZ3D&EzeZ5~k^khI0{-7#nNxX`%29F{(DvqJ-42Xv`vxM!S}1Kqt|+VDQuTU0 z#Rs5klA%MJYfLHQJ0?eG#I09?>i~&nUdu0S>jEskAw#oonCy51{6~mKqyE{EMY3%muz`()gOPkF z>^IA*p}70Dgq61>o*S@QOhA&u#~KDGF16G-+D)1%i9YW7652dO$%VVcQ{iZ=(MTmsQy%1Dpbq-%0< z2rfCf%;WHGx(-K|4(o!VyreBKRSQp)E#O%R=pn#oGkvZWs`n2kV{|8^QfLj=+b?zE zVJH(qc>^;EVSl))Ay+YhG+hWAmQC=b^D^bl$A<&4l|CH4ZosW#N=Bb`7B>!;N;eKS zMv_o7#-`ECFqu#cPa?H<9Mg4}KpPkCG zfwBEqv4TUDl}e!_IiL7iJCu2dIF>t{}=;xOr zp?Rk*hqrya-%pt|i zB(PNMgn0xcObUQr+gh#q=Iqgg3S~{mDv@3{ zvkJT*aqJ*eLW{SfUCsER7p>yR{5)q#?|@Mo0y+^^?39xhAim+ue#bZr__xo13tR4luG0(y*lW-lT4CWU=96H@XTR*C2%W zn8T0VQVt=1*==C&&!QV}>$Upxn&2#fvi!D{sljIlN_wr*FjCtTrXDSzxHF8S{G z?b6Y&+qxs40DIzt(rjMw+jWga2=BMxUdKl{AEFyZpvfA*12(*Ce0aP05YNccVur(T zNdEg%g7_N6t;~GzE-Fz47W7RCEH3HhxVHmVm#9ZDQ7bEs`0Y=NEHnxBj+UY{oB&oo z?X<{4uLYBQyiR@@S-c}w8`Ke7ZvCq*$@odGOkNLm<=KI#QQ^>E%JOArTji!N^m40T zM)q6SG>jg9wk;>q0ruoDP7yIMYv@*A5`}he_7@4sXBO;XNRk7Q3F;Q!EP4}(N|QN=A5&WvuwIy9t<&?8ZVV0tD4tUW0u?7V_Z2H6PhsOp?DG)mZ-JJj zYx|&Okp(ba3=#R0=@gb!lxx`y!M1{?ZcguEYt)YPzFg5WUb!zPRrD7)|KhT2TQyHyzlV5Yl6}Do z0wX!W&yMr~+9)32jR*Bc(s%=Y6o*S@7k5$U5nE|{*&KK}n$IwWSemL5O&f=3<71zD zXQuEblN1%dR{nTrUi`d0Tue~eGtOq2U*?b~Wga^MJK)QGAwJD0UKU~vw@b_#y20!= z!)&+-o9Nvl0+n8|UicI>=B<(gw<|X?8ob2K!H{Qe>MjF-xZGhu@}MdAKm}3vvSaCt zDZrLso|HShPhrPZ#@yc|aaUTE!kOlZ(TtD;z2Y6-t=qF_%Me)U{%+oTHHd_8w63Ab zKI!i6Z=&-`LYg6Q1rbqxZO6wcBb`4a^ZOHfFfq8*-0LmFoJ)W+D3`9El_jXJ8x5l@G|H_(V%Cj;;IaZd*DiZ@?h?q7Fd2jZ3BZpgS zscsBMaiCYS!mlUso_UTHwK<@DI+8{Ls z)1$uUTc>kC!on?^9SzCK_20+~aF9?5NAKIV${FU5gEVtT=2 zk*rt-x(QDCWR+hEDM^mvq8}Te?hO>3#3!JB8UZ#9N=xTh+umOjXxUB1?~y zQRcDJ-on^vFCRF(9y_%t=puMIW6f%1<{hROWsyjaLMP!}>^F~{qJnv9xnKtY3uC9T zs{rLVLN{^z^Nr8i-DSr4HK*Hq~zf3FJZP zqe}UIreG5(+nE#|kfeJ%lfrYgbn?SEbny2x0iD=arj@a8IdhhCD4&F%NudZjqQ%an zuzde<$Yh8#Ck&%RQViT7fV8&^as^wBb$mUw2}HIcTHPuQM_r{Y+{BjtTM5KPQfL*~ zO1k(uvIL9(CYlVrI+gsD9GcMnuGQjY4Q2{D9<5m}BVfu@L|(IpJtH7+++@jKQs+}b z#7hby?ya#fKe-4EDKx?4xD;wMOw^bax(T#L_VK#`0pg&H>~{=ui|hmWcx1ni^k!T< zJ}4vm6~LBKfIJA1eFuy}6Nal%o5{d4MC?v^6Q(SX%q1w*HFBskgEg{bWZ%$P;&PRt z(PR$hz*AW^0>W(J5N8Z$@yz+AC^7Gn5qm_rymS)pyyDqSVFCh$dN{9QrG-le7~=%G zuyiaPh97I(J8WWY*|o#CTVoiHyt4v0$R2sO5c(pJah|&&3L3P0C{Z9RA9|VP!z1s@ zE+2{IU>K%Caz#E>V~SBPbIT*~9rO=52P_hAra4rCW6(qJEO(_AQDXyO|yPvNk|WwhD3x zg2Z71TwyxXE0w@%I@67=OYI43ReyTK?m$MEz-h=-IVBh9Oq&?Hd%614`GmA1x%E^@ zIILY0(wRhI3`je5l<;Bz)sb7n>8#Il-J0AentX`Emg?EP3fUnC30Bu*V*qCNwcX~r z8Q-8TX_N@ntk}-NX*;lD{?utfMU~bvn#-a3*`;lOi!GL8gP1PH9-3*zRoS*PvM=u zzSccBqKYt>bkQ+2Kwd8!Z&n(9EWoIPZ#Z~rqRGpGZ6XSvY!zj4SGssq}OUa14QvjoIZhRpct zY0vI972TApi_)H@4p`c=5${06J6V#p5wT>}5++p=Bea}sym$P@4U$prGoEjP+t4WA zrxEzR!S^Y(lAjS*#|74b@0p6 zpQU?ZS{TVG{kh{s^v7T5%dcjdp0@sgVZ|mQ`*$J3sp=P>=Wr^WukW4&fRiVI(U@{z zB0MB>n?C4O^y`r{9)6lfLZUJ97cM!^kyQL-X@3DTzI@uM8+6u$70DED$4p3rt_Re3 z<6HwG!9Z!yMju9`VaHZ7_f&|qG`4RqjcpRwYHFl*zGXUtoCOB=Ig^}&!A+vD8QfNn z-j=H^rNx0bQBFnqb6jiMqI;IPtd!5C4eqhFD}%dYqe^L_jprZ;CO zH@KIE7bk!l++EX{Fu1$Yn8Lf1DX6%=le~iyG)^ z+8sg{XPhl%cj(rZkO6GT{95#s0bJW1A@z4+%RHQ) z(6P80nZ}53Zsim4*B3GoUiG=S|Bp%E)F4#T`K>>bTjn z#lz{|ne}{9^?K&g6hBSV9CXEwaML9+%Pt~ugQU0o5>1YpSzBzO#NcX8<&;gf#h@|s zNY<+ML$V3OoslzQ1V@7G10)qF-E9Gc`KSTXZvY-^NTcuTj=ZrfW3i- zo?x-PD0^#KJZpck=6bl^VY0-V__hkFF4vn-u{jjlI0g4^8BHm*gDGLhXfO*J?uEs) z6`m2@@h_j+8a??z5hOv2`Vb%F8&B^@=VL?4%xXiG_kT$q=c}^=coG!4xD%Q>6wi>p zQXIjTI;CWzAbc<~HYd02NDqk&m?Zul;_vG0ZM?fR`ns~)+r>dn1)fs=F7Bhs5f47C zk$I!L$GS*~c7d;+lh!(RfCRHU3n`pYVa`ifT$NOdpLD9=s5OniR35p%ja#O4=9(_! zcY$yha!mK-lT!m$^>B_`_KtMq+XQ*IdNrO@u7(CMnH_{#g!5A^za8X9QW5urI8DRf%Ctqz@ic->zcn|W9O0HQzq)Ip%>>IANK*>I?Tc+fMB0u|V z7pbw1y{y#a+2o<5G5sVdH>dwVe6IOtUt*a51E?)eHvjqf{O4i*A%nrpB}^126dMX2 zSQ-UX{|YF8ca}f_q`njiaMl*#)fo2wZlYj#P84K+j@_MoLx0Zb&tLEdu{fpt?2s`; zg0V5lp3rbB`X|Jr{ z4l^5-~G&e~}O8!RH*%?EOilzreya>S1{zE-6S6qBt4-P%;TrD-XMMe}&5oB*Z9 zfPYnhKR8HMmEhW4i#=N}%6I@P2DBO9AY#ald8o-@Qv_CMAK^K=xD3jM)>h>go3x>C zwEh7)6z2vwo`O_#liSuZv?94~^3qeXW;$%VMm=0^0XRR73izvnv#aD_@JO8Rs+Q(f zPHvN{Twu7HBzRq!i$~if8<}@|Ij(xt_F{Gw{xq%%j*>$CfO-P7a6ZD?7AP&05 zcrcx4;k^M$f)`)qI*!V2OysL&3{Vk%KAxGDZFZj_`Pe+53>7#hv(xIy}$l?gv!lNa116EqyeI*?~=p+aW0yllD+rs0(ZlM z%ExTWeTzLULxSC&50h`_ff3$UK!gum(LA>~J#PN#6aLl-?frU4ZylmE@T43@e5a^I z@qtHwF5|q6ZKXq(z>ylE%4F%$)=Qey;yA&8PIhPI`_&c!`oc1ML@nz!feH%#zpHx0 zG*nS=IvaZtzW9nw38IP$zd-nG|rNss9EF>CS&f1~W5 zrg_9a?Al}Ox+G+g{j(H>8$*Y@W?9JT%m9NOb9J9Q=mGG1)C>^ZPtKXhwbVw5W?aIc}rjCqUawBjjaR>0F>#uni%8`5yCKA zvcuK9C-^IH^T=ia;3xDp0CQ}xUOpp$&+6M*0RH^(J8dszAU?LKTMAl~O%{fVApx{_zfxo@634=~Z!}am&VV?9l_H=X z^0F*r7&*}avQv8CRA(_pv{yzLA;W9|MjquA4e()oGK*^Fw3<0hGruM7T3us=Bn3Nl zQS=Z*9|O+$E0f3dxoTJR4&B$bb~u5r4J;yStZ{k3+r>L940Vvd_JNCf;V z*XNgr?xBnTzjiI(s}}h@8i2S-VuKOa;@Uazc`$gza~g02<4j@7YM#>vF6cSKr4afX!?|(0fNoxp;RmFGP8gj(Znt_6GZwi zcqZ_-wDU`$URz+`Jlih* zRRzl=b;D7vjpeV78;=245Km1FZ<^!EkU+>H&^PL%yDH$UQ^rJzfN3{Z(AJBvT zd}o4PC7{h*mMJ>3vbYK~#YkP*nw@zyg%#AYEShIqXDJZ3&%z z6=nY_Rh|N{kr|?TjE&q7%`hy(HLD4IoUn)q2h9&y*cyEz@fhLbOpop$pHunSwexc< zL|8`a!YyXo%38Cak%r$@^lq7RcsvxnOb|LCO40vsgSXkFN>1Us=d7q2F5DbaS%~m; zQ~J(H#wYDW3lb^MR#QYy$l$?>Jjix}kuTA)QAp4Ss0ig9A1)6wpr@n6{D=9JqBA;7 zrb{mtH6$q@6fPz0(<6qH8oA;V*G*#JXltGaPxv(i8r-h|#3~=(w@Jzat`+hb2GJbE zO361*?{(y`OcEOtjB-Wua~#MjCI9IIk`k|)N5xad=CKDB6*47oIvP(P&XlH1QNP%d7Z5> z9TYwbOY`2MkhGmrv1~|M^y59N24`@48|^40t;&`bKfeoAunbENJuJPBTv?}!GW@4V zTq7(^V3Nscgr$whv^7`?brtwBm#usxGn8^u4!Fo~wAf`k{zC7ZVd)DURg$l4REjfFj#S|=R4$5g^EHur48V^4+1BzETpo-CK84W{H5QZTsjM5yESkUn_9-giO zFZRZ{#)Ye1x%y7EW*195n<&0us$Dnj1~$swjELmav`i<8kSbXy-0l z5b05A4g*(s0X=3{gjRs_M+`th1QEok^5=NY;i0Z>T0B}C`Nos<>*k#YtQQ1K4J?4d z*h9Se;t0KhY3hWKd)%dMkG?!h_KZyBh3L7 z35XC5$xdoSn6)fONm4GpTbRqp_4DJ9fF;OHGQpDZ7siaV&nR5=fEin zdIwJN_t<`mg0VcOIC>--syOmGU$tdM$ZN9XJ&X(Opkp9x{7{J}s2ZBIWLhg0O*Qn= zcA>spWx52ryqmA(X^&P@qk%yE>cZ2}Q)$mXtaAuf7td0WH*?PB7pXQKo&*-{E0b61 zn{wNkO1>2v_mZr0RlJ+88}y2_02wOVWoa#M5g{DSB9b+=*doHH7`KSb)N&1$vb)*Q zRN{Z!9(ol?#h6L%>^wiSsI!DwB%T;Ii$tLAJ#Lsp24QUyjVQ5iFuGCXFlvYxMcA_@ z=#=V#xievY6uf2ve@;eECm-&LGF_m`_*Kzro?E*&7m79ic9zp7j$m`I045Q+Mly-) zsFQVWxiafgHqFb_m`*AFfDge`(+S`69kn?7*VQCqB~80gIW~!xnj30fD01^sw(WGo zyp;XaoT~?onHSBKbzg!aVQORnAb=3&#_S#DN&-n!c&X~sFjA@@vo%Xe(AfvfoW94c zS{^_d$X%;ST;PbDvx~{nWZN8>^|C)_YoUkCTd?}1PAQx zF|WLQrIx*o(b|Qg%UNepDK_o3!kc`<^=-8PyD*NjoOHLeWD!nm$?Wu+mck?f_@uRc*+k zQ+khY*8^8n52)tuDE>I^t!U4#sD=1s&&GZ&A8CQoVzMTcgH-+p`SW?usxH`)n~HvQ zT0&BE?_lr8h@W`x)z2NpOQ|o61Vj9y!s4*esgIGGx|m-)co82wu#OL^PUuhw)U+WV z@Tn?NAkGN7gz}my8`!%lx|9plXQwT>jixoP{c=f4tpNYvTCaat?&T8S$)RLYiw;$W z6DJ$np~bI>7&Oj)r=xh8pPs%67_YyeR=)YPwDPS*t^8nB(Xl!?o~1`4(DpYU0g(bm zK;;|8%f_)#1hSk2W-ZpnBe}^w+M0>^t8wz+4T$ zjoJ>D$S0v96bhYHPA}#yd1EpMKyvHjuTeg4q~W*B^~HU;zAU--hRGOswJS2Q>Ajvw zgSEzhB<%HJ+sKt20OYHq0c(z((yP5~&m`P{G>XLez^y|u0bKqtPoKPC$WfA>;*6_? zRW;+Fw>jHX4RP}c%0*+Dk$E=mPdyLbxm6 zbo#U{>BCzutdc74t#C;{$i~fcS5oySo|z>*4b@gl&Ru3nw_%<4l78lw8pUnDi?Yrq z>U${b9Du)lvhFFt{XLa+=YafsDeJaW8ih{5R?Yid5;V;|gh*APcEaAeZo0%3OFTTw zZIviRgJ`X8qfIP)>Aj?RghOyi;y{T&&QeStdEFcFYW79cQR&58$D|`wM5hyNjb2l% zl}pS5gL;CEv;N3-sV}BY@cb>Fy?~l+iCkP-5(u}nB|9sHAiTAzQ(JVCxGu5yQVN~Y-A4`db&MwB`ztt<=!8A2 z^HE6d(jyOBzYo&y?~>w@7N@Pc!K=Nx0R_54bpvU=rCzFOwTf)aEDE=&ccZlU{Neem z=#q-!HQ}Y7(?Rwttr=D@*ZJ~j@S+M+;Z);Y5vx=~;|0%X>R6rGw(!>Ah6)--k{)r@ zkV@e>E@Rm&X4~ZPz9P!r==ccAIp&TqYjiS!lF1aF>eVY}j86H9CXr z4S=L!?nqy6z_Abh%H(Ic$B|RT?+LhxCUC{`ymuyoY=QFCd426DA%z4Rg^1bCUHtV| zczLq{p`)!Z>>Va$~N#^_?o}BK!sWgq^ge~XXe&~MP1_ec*U*GUwwjChB)SZB)Y#tfO z{RaM4wTq9HU9rb>!Dj{Op`lrVC)qk6L7VbN{p@qSfM>K-p4gnSAqToRsz5mSEB^AX zca9Gq(w^|62>RLf=DWOgT5o~T;_d&Ex8UN&ePQ-JMizjI{IZ!aP9_p-ErirpyJd4xHCyCHVC9ju7yH>V=An(0M_N*EB8$anklG9LY6aw zB-G3x3rX5iho`MWq-6H_a&7_-1U~A>GRI3t5JP#S#z$xLCIfrITC+UT$GqV)YWS>f zpS6Y`qhaz$9X#WcKuz;*j&Wd8goVE=dbh&-^9ioH21#-0-MAo6m5@y`6?tTYmCB=m zL*(a?sKrZFrQ^QOAIZoj)672xoL9$r^x?M)yCt6R*7i#3Au+g|60(9dC1msB6}9i1 z(<8}fvOyMyMHW*vf>r(vE>NBZB}^=DfxJkf!gXiGF}>3u3Z#mm&tw$yN!=RSV71fS zBG-L(mBI+$D1|X0g;Av4zLuq~b=_^Y%z#h)$oQbb^TkKWdd7<*!Ej&w^&{M( zb@qr_pfgS{vd$h-D|GU>emi?Urmsj+&G;GJK4YCcL?=hQ6L=k01fzyeD>`H`tcHCj zY!`-o5MOjXR%(TGbZ0)0>LLB6`@8g;F``SI_3jTTjq8YRAE5;ZmhRuH?oHf~dd09? z*rIuN@V`=#s33Jp)Q9ipt1uzOl<+*nd2&5?eX%JP_3!d|Il;)N{DfZMSeN<%c~5xfGp+hk`Hl{)<e!{SCm(Tukp=>Q>Zi(8#8Sf9E%>~DK4lm| z;3%OCD*H5@s~C{3Y_Th!(v_WlWq19I6-(w0(OSLt0Wr3@Z-25f1I)UUk=85Eo{H^^ zP_y`C@BR1A$g0$u-f94uqON*Qw3(;-m{LJ%=R*d2@Kk-sqS|Msyjrq^71+bTcsU%) zWl|@ZW+g>3k98gBn%+NR2^^GX(tE-N>abJuz!yG(ZkG_84Q zX3Tx|Qv;+oW$onyo)i%JXTE9euEtmB^;F{VdMY{y8qy^qh0C!_gy5@+*pe~BxAg^3 zQj-|5OKn`$Qkh_Q(1hk22eFEHD)-VH#DW3a(dxpH?NnAOW(AJe!skhA_^Ca3o+?`= z$YD!b<1v+#)q#puOT||OglO}H;S3X}u%|8Ld?*MbPd7MP`Dk=D%}Pe2L>p-ljK=a- zfK3e77J17^v_-`RWAWQ?HcdT{#=}qVK;jh+1O$!s$o6#H!&rzye zcnL{r^>`X@oNGK5$kN%{mP=HU{S0bN%h)wf24WYS?@2M6o!&6W{MVY|*WNXsW$)WK zc<-<*!;ZMrVThc7R{nEFn1B&1b(6FtA~Kfzwm1*D<%fbas9pqa6rY9~>m?iT8~9XI zu?O*!0S%imJp(_TNVXjtc6LQQc+M+4rR0k%vs98r1=gX`ef;z~>w!woL8H`U4gC?2 zDq}BIf{toBD}iLHJdCDamx3wtL4zPtij&BZM1FDFFx191erqY5sgg2g;=(*uNHF}g zW0&fI>7J$Cq44Yp`ZBDd^DHl9%A&gKs2pPmRGq0N0ux6e6x0(QRXEG@EJmyEX8AUW5&#p> zjzc7iaA*(v5m*7(Nk#@6n*f9|!_S2{L|U%4sxg?Q4FoFJOYM&}+_#L4CeVm|xV9Z= z5B&o3vR^_zfl?yK$@vq}aC*%csH9Yn&+a5H7y~#TaDo>BaR&@7JJ3d>sBUO%qAaTG z<@1DcUNy7^d?pBf`4t<9d@8CNxX>g_gV{^oHimyvh70&ImXpBj&mu70@bnFT8ukK? zfC)UsZBk=(VHyxtGaX2YOCDSTVV1>bOc%2F7W5&ms~YzXn@C%h>okT5jln&r7WXv7 zg={{{#a#wub8u(@2#iNcU&(kHFN~*U;~b3Vh?3Z;GAUS+SGV-&4fa+?EO_=tu1J6y z(Tt8i;av=@4^2VA7))U$3nn-HT}rY!0JI;Jbc@|+++0HVG87n}Q#H{#MKNw@R64S0 zN_q;fsd8)Jg~4yfaa-9I929HG7P{SIu}EHlY{!ZwOz~#aWc?}w5IV?G7NN8;1PD*u z6v(7}g42{_R>-H$-OH3dLuSP!IydP-;FpgLzJnG$jfrDKG*%wxTehM^2}}{X;N*83 zIS|{>l@7%0E8Y)PwFaLXCX*iTIZnLCC=K5_O(Ko3KXRm33_t>SNRhFjwp@#}q{rj} zK%N0Tj(1lcslK@!aB&F7x}vRa{?8%?+NB5xq+d(vUtKHvi`AY)Y7hJA2qkIO=mbXM zT}fWPQodHFZklJiT~H~M%Xqh~$|I%F8hcs}$D;}9NhraGNQKJzF_FqU!X_;Co6)e0 z1lfmxBoDA@=(gmx3%RlD?yEj=aobK`T*+1*fWJqepM$1C-`nW?0Su9;yf58Ik;BUD zSj9*L#cJp{oMMs5Q3JWjh#F+~AajKluh|cKwjd!sA`cD?jdhgw&;M_j=2M4J>IIl% z-F2wXieSJHMKpl2`N%o9Pel0cFWgBMG1YrT^2Tm#spmEKq3A2yRq>!?WOGYe;PB-2 zCw_y-%8zLoSf?k%k_ZK5-|}buJ_h+=x^U1v-QAW5F;uSMDQZ@3S;qN+h!Tq>n~FF0 zfC9D``vaZVTnH`E-j=|A__1HERA@@4tg#fSGv9gXTd$6OoKq6M!+)#Zded!!Yism+ z%u%>bImSrjgFd0K5G5!4`+B~L%Z#;$l#Fn>j1PkOvb9UWJ)3>tR51EOmQ#GiXoy%+ zL5nQMGts>a!8hdOED0nY+pcN$yNE$N7nkl`a_}`&tb>3Nwl#usTe- zy^iatW|_f)+JWNKH|kkOKdY3&^W0-)(7l^@O+k0fCyu4;Zkvl+N3j8pl^g)ai8W*- zfN_guYZhoHnwWyX(xzw?8V$10#JUT8c7<|j@@3O}b_(IYX;P9W5v77h$K6c1n_ zG%Z1oXg!nLNz=*LgiNjTws**$Z$VEn;`~4NW-84L#h*FP{1~=!SBa@R>M4WYqQQAp)8?3xp%g>LYu##7n+Z<58{)Q z@9>&Y8*$MRNpq)@{Sr}&ZSjqE)u)Uu&91;Y+M}KwOGXULgnMH71(l*G&CWz=CA~}$ zX-iDE6zFdN15kTsm_-4DjQF=HNH?XBQYm_RUc8dG2QRBaFercxCC}zbN(0AKFi%e0 zu2IrrcCTzsp+}@1rg(6RCRh$~qW&Nf{h0}}0g2AAQlKp6fIGUVUxbiEXI!!=FE2r7 zTrxTZwKmB}of zKmq)SMYS|N)X4d5ts=j{?k4@n;VX11&4aMUa*t7zl+${tUJ+9yK2jfh_KO<_Cu1kS zC@U=Ls2@ox{iUaxH?BA!h za+3}LuiceT#+S*ZcqC^=_&kgmA||+xm(-KUu~pndCJt~iVw)E(1<8L@9Xz0C#Y?4r zLaN#WxzeV&dE9~@UyMEa!C;J;uAa|#psOKZoe7vyB&!CaeG5uNA{=8>A#Ty+wLJSm zB8!77wj0MX;XJ%KB`IY+FA)m-v$vCxMXG!JrwJ2&RsL)viq0;QJ?H==9?bvX&aNI zwTmELr;yhx_2|t`Jgz!%;*dX2?#8HRgpvJ(Xv=OFM-RR$a}n+!>=Zy)Cd~QvnZ8Z^ z44{P~NUxLBYrCDc5wr)&{_QgR!Q~C7dli zb%Y48ybc!pn)~HE064w$Vp5-aKMwTg{T8!`zx;ztsTgOLpJ=+KT7(Gz4d#o%hTM^c z%`mXFo(78&Goqh?Vb{=Y3yx3TsjiokOiQi3t*Mbi`^``nv9li;bPo)`CXzvMSzb93K`l$k&wc1_2{& zZMe>*))JJJv?M>mP>uy*IL2fjs+de8*Gd&Q*I9+7_*abnw%5F2s;BKTVN!LP3xBp` zziyAcVWeddS;Mle$x(x>%zC;w9+?{BBXpwqP^@R^vBu`7`pK})u5~pC8R9aO0C+0t zz?{=&dPnv^sdvyT7qgGB4Cw^0Id!gnkIf^?((KJK6Ac$@GnUDDr8K560Pg1f*aqu_ zkCu7FTM`?J24ug1FjtT^FTNA)9}#rfq^H&^fJ*;zRG*`NEdW|%@cDshtzI~O)QKYT zG`dNRDlQ^JR%@H>uT;K?!YLxBUeO^(QKWDM#3YpZskYqI*WH+t{*BGI#bfU~T*}y% zBuAJqiITF8qgv%f%{K?+YAc%Ntbb*)GjC~PIl(q8NYWWj)1b-e8~G86K8~^-$?Nzj ze^cliyzJpU7LV6+1Lu-vhooVZ>S+}x`IOU>+)#v*vT9KL<2$ndSXvICjZ#FE?!tny z51z_C$|3iBJXi2IMp4oYqZs8ZAKAx1j1|KPRxTps`}&ooE%5 z*3=kSHbCU&_X;oEWS4Wm3vLq)%W!m~ zC@SouDVs6`3$+d>=RIdYB?7-myo77|XnbBo2V%5=la20Lvv5uf_2H#`d>bcBDXA#| z=7L+I4>7)z53qX$AJi$>&c`Y_jhL*>BqQ%o#*8V{%G-BchzqkC{GqD!D-Ul4(p}MR ziL&!0FOBfDrQ*M(PD65y3PsdbJ*zeo+jWU2 z>}m+;c#c*x8ZPy+)rsiWykS;m3(2{E!oeD7WDH9LMAWrsIZbeWj+dz zbg2X1txM`7gl|8nF11FFby>WGaqDWPf=lp`Guf8d-iZfrbKzj(Q>Za?RX zYa5T$FRs(oZRdP(XXBCj#UIku7jle?T(v_yw5csa>4G%b$(g*3KZ016+l|j7reYDBOcDExK-Ut5n4)AI+K@_+AqxKLSZ6Q0W$xOl%b| z_KB)U<@pQ`B5cTQ8|4c%%5?AWxjstmENOBGd9CNiCaFfq3 zKCQr91IEec`~HHKDHr|gCpQD{rwuh*1iF8H!2s9ZjK-oQ7>+|`%Hljhb_CkCNknBL z{e(--&jRlS4jy%%pAVA>JW$wa%-tAMkGNINo)~RpjD}ZGg4r^$B^EYof)cv1Dv1aQ zCdp?7O~!?Z&rO`*-(oh57`_u-KbZ_SGPMv|j#jHGm%J(@1gpo8z^JAE4mX`+8ZXP0 z*J<9$dMa7d2E*r^AA!Ck)M*A2&yj$`;Ue(Hh&2dCQ88+TQHIk)t9XIv2_hV)?HB1P z;E!K@o~}X@t>U@*_WbetRVR|RXtH`_vmSoFz9HSlJKn6T&-1Gd%p3O0rk>AAkVH`w zGxe?F($HcPsPf4ddZ*Kku1ZcVA9=ou%FDtdmK9e)P%=oX*r5A?ZAxj|Dn4msJdP4T zx>yf`UV&6Ue37n>%NL%d62`DOItv>?U4lH-iZwaPS=rn>ZQapukp9mm22Uqa(X}t4 zq*3D%twZ9H#F+RwNut@3*-E;u5$bx8VIs;Gy%x#79Hsv`a71|-)6-e@y2(w1`+2bz zNYG}zQeI^A%wm}|cneZpH#P%-fy#Lofkp{RUlVVw;uAJRW~s$C2u&F9G@qzmceMR> z^O+i3587p5ZeYOsUjW5!K@nrcF3|_F=Ov0uI_sdLH8`XOBd`uJ3=^f;WY%|}iIUnd zWCKmKimMuQP=ox#VX&>@CHe;Jt>UHnmMwdsH+T$YV-OQ2&lYYA>D_!O^-0BR`CP+76wueZ@aEuJ~lj_y+T*DNQ}vbUAnqq z;nf|wivBYG>>6D~7aG61URN(&c=Zxph118MU9GDeOFVux)76W|u0mR2&|-#q@?(3o zoR?P7Z!}n6rJ|%Y*eY~l{P3a$O*f@pVd7vRBesgk226FsQ=vKhSNb+CXKIMAg)jQo z-DZMx#NtDlp-7?>1wUTUy82R-{uaiSL~m}IZ=?>s}x{KOJ4Hq2c{sA zbeD2q>IyEd5ir`0g_Nb?IvP6_U3VKFU$q}+_+eY}(&B|amtV5$rPg|b9{s(r`KvFj z?^D%}+jOto7}nCgzo_r=>&xL+>xSEIlSWzmae4a{lNVbGRt+txj%!d&C~cO2$6l`C zwaXQNv|Ga`h-v`&QG;jD)&O#^0Uy6A;0)R!dvqI} zgN2$Wvd?A4mFsNDq4my$V8X1e<)+MTR+ILM$u&VeP3Y1R#kGN?L+_}wzNioT z48*mAg;TSfAv_Z9moS8>NwvTbo{~miTBYI-Tk^7so5&sshA_S(QXPZN473no|N3e4 z*zi)G0yOy}(pF|{85SlOhJn7oI9bRo;_rprT273o!lBl{xga+6sUTqC%QTj_X-8e# zcG+zpj4Mq60D(eavO>_oV!Z1G`DEBP**o$0ZjDx53-EZEsN98Gg{ON6?3OTSuSi-5 zFBN@{%?CaKiGhyDDOjfpSZgwEUf3;w%-TDUOIwGnFimR864W{(QlBK@*V@I);Df?Q z)}uU$xmC?IEx2itaxwH{O-i>6;PQymkK9d6e{Cjfj7Zy_FlT8bznaE@!5`&X^T_xy)4C(Oq1N`j=fg! z^dICPGQbk#l{Gp zLje|NZF&nQ?21kHx;8L%3}pS zBWJ=0$gki*X>9E>p_5c;plIHZ}AgA?&g>ie@GQ6qgW7rksUD&jo5 zy?D##5;jol^*o~!Ze5RY>`9*@l*Dr9;*4{AfaF$ou)<0Kl&P>Zc<1?ACMRmP za#Hp;_U?_UxPU){7RXN*Q$LrF=Uo+VLUQ6S&*?Q0t$n+LZdGQQYY61=!eyb;wTz#@ zjK)uAWDunkt<$<_Nzt)tEZ&$$#e-!WE8+*ih?C9V?3I3q-=GSr7;5cMWm=LUNxO$* zR%hSoz!C=ItN0+Q`AQzq$?ABp?X8;(SaPET?Rbh_Hbm~*yFbat$9VZQybSY^#>fR? zVtU8ZrVOpbE$esw5s&;P@4Sw82-UsT-XXBJ+&ei6A;Ah|`t^A>&Ax?;-e#|Edlh(c zS}>nqR$_^1-T|IiGUa8%*wv<^i&?x`fC|=r+LO61)O0C9I3@bslFfJPoUa_$p`h2T z?9CLg<*}i914XxpRw9O@nR7H<#|a9CF~MAcTPG80H!5c=Ob7|Mh}1HxiHO<2Cx*DM zB(OEtvCgvqG%1x(N}Ro=8^}>9OB~G=(bk1fMHmv3qkiMJ(jZmN8S%o!R#MXt-9l`RTT3iqdgF_ zhCf|$Q4_eVT>NJmHgd{xQks&HDoDCT9i)0j3@PP=*hIch&qy|s$8N%rqoQ<)Q1()i zP7|W}|A3y6cs#1t;K956HOiqa_u2vhX>7GnKtfWQX5}g%Ybx>Bt;J&(@rd?#F~&2V z;ieR<#jKF6b=O+S%T|gt3W=b7RTn5;qhTX5@%0XX|-NhMIkHywkx_fRy2l(U$+eat{!!SCbIf z5`%isPW|(RN6wDCIQj41D}}6+uQ4Qln|u*FB2&-E51oT%RtQR1VUbvoDpVsGP$KJ7 zCU;r3u;2yYL!OcXH!C^Cta&n--mW7rSjy6^R&fqZJ8Bn^TsPL_X*CVFRQEHz;bA^y z>OHE@$J7I#ECIGIZB8GHN9&XT%%ZmLZZ$igrDRV3Wbm?E$q%(O%CfbO6Gf$Z1~H!4 zg2)Y9Ar>z3gL+-liascpw3M8k#-hgRpcqR-Fcg)pM*Y&OVi7lcATx`RIv*`83P$<({g@Xkz6caMR#2 zO_JYbr6BGY*jQzZaKj9@l*}F*Ji!)-A(^RHB7|D)PIsa| z7*0;D%Jy8&f7f4k?KOYz>Z13O-B<1Up`AOfeDM`8`oSOAzU_rux4hu_n`fW*+~;h1 z_Gl)bd+85v+_+)GH2*#8vP;)ra`8p$)~;Et|3ry05)QueT_H)C-}BXv;g5(?gIUc7 zb1hOC>Y^Ko!y804q`w(yl0ErrmKJ$Sy~${3kIW{oLwUBnLX?fvE;Zw0vy_X-w3}ZK0WQbf&iq;OdakK1j-;5b5sz#z6pUn@;HwmJ#MA#Po?!h z;ELN#gsWo(c-+p3hS#fI(+!*L9*FCV&!Im<4wI2~$h=Ffz@f74%L~C;Kgj4*oY6)} zCRFz6bcC$B-7rlbOiZBDw$uE;=0qln61^Fk&&fKQ6SDTnX+PJ*T&DpX6>!Y~92*;e zJ#*i}0Vtbni2<-!Gk~K$fO~8J?C8O+dD1aRFey2`!s`Ym0c1J&wzw_A>$L*^_(Jd> z90T8;VL=gG$72l?i)-&f{GMP{6X_a)S#_8WXnA;zI(TSd2dLvfG@hM*xOQJ~2*ok2HaLv;yky2`F2)nn?Tl7*O^M z>xE!?Y}rwtX?Ae7>fma1@WjGV&$BDN9<`p~bV@ocrs$b0A(7;vVma475?0O#s?V8fy!qYaDo)uhovGCI-( z`Dg|5oIrkjA;@RPK(=R|;2D5@oIB+Jvfouc#_`mb?t4=Kv#{IacA9RW-V^qSsQ0Dn z_9SnP&D1GGN{vT*<{_S;+mqZG@Ae3OIx*&*w)uErw?{0MK6HE39#OYHtZt9V!`DR6 zJ(LZryR~QT^-UVbxHH}@hr?U9rxgt}Y-#ws6}DCq+}FCmXe- zIH_}FeY?aOIZb|>c*D=xk(eDUCCn*NBBeX7ve(&#?N^u+6EOJa?e;Dl%zg*gE06z> zpnhy2)L$Qi`l-TsA<6Xv`2l0o-zCVqSEvN#EUY>ocA?egQx84OrypT)+Pvz;f- z=Iw^ zm?6Hy6u>^zSScsjMN_%582(!_Q-c1AC6&%WSS&?AZ(A{kDuFH z(EF;pOqqfhCo=V}ia#S*YA0b1by{LRxbFYs?rp&2y3RV!y7yL9*GIQh`m$xUWLH&` zqzw`lU@T)iLDv(@&NwDvCd?Qn3>jdCAbFSwJbBn>P23?$Xo!HGMYCy$vIr1S0}5#8 z5sMhK(6AyXtO(6SBnl&f01YTXFq(`u(SQ=~@BhB%+`3iWEm=;o%OF4QTld~`&&T_H z-t(UK?EB7%nkLJuhl;Q}zB|D(q~>10quYM$J0G2}Y)gY`;41By?@eent0G!`O}1Bv z-p=paV}>)ieYE&xhUY{zpo59ER*5afP#h!Qx9OA)lm+0~QotyPas;voD;fbB9=1sT zk#tG7p%6{V!El zF|e;}=hMGIs_(IU9N!PlA>8lzFTQu0Cs0yewlqI4DzGWV0K6vd})vd2ku@fPRn9cN&v|j zm4Fn|A!?TKc<}B$C=zII1(kpr6)K=KVFBU04fsV{MNG+?wz#(1`Cuj)u)T8ZZs-Sf zeYYsWNL9Rg7)bzCLsv}8J(icX8Tx>L)b|ZcJ(wPwjroQXkUnFvaf;j&u(mNdl3Z{( zWxa1=8uEEV<{&+r!WImAqOb+czou?1wFOVzT_WGp)>$cE!qv0*uzVWxbU-LKz@%a0`A;u;yQ2W^{hnbkylB2J4es zOw3Dk)B!STgo4i8oyCBbt6NMBcI!{@iAy3-YC<^%vq*~Nr&;32Dv5S{pwGA1ajsc@ zksro}m$o>*6Nx=cP-}=DUf9z4yE9Jx?JF;~B^xeRRW4hR-SFrV<(xIP8&tf5fa6)uUR zH0LlAzMwPDMyOOgvK9r#U$5+*q`6+wweqlFQP|C{-wgp$V^10iFDq{b{k*7tcK)L* z^c((YD!syyl~DnXcoA-FE&+M}@Q`PK63jXZrI|96W{XdV%?GfXMxbND4Bp zbC$eB2K8&J=9NjK=4k){El%#~oRe%&IDp9kq>6HoA#)CG#z>@^xy?+84gL$JF~C>`?-pFT?J5^jT`7NS2?lPnJl46Xj1%6`u$$+bah*do0JS{0!V5*sP!qQh;q{ zJl>N9s}`>ca;r&5#w0qB4v%V}g>o}zO3ff)iEQYmG|=QKm(>R{*#gYULe+YPKwMJi z*>audiaH6hXSAbeKRJZ-$4{SxD{<3|^gtLdWY1msGt6YV)9&$`G{o@qO4(iR1(#bU8j;5wJ1Scxb& z>;a1|iMg(bq;>hceX?YVc)t9} z#p07M(kPFNj&gozlwTB#!XrE#og$V-2K{Y>#)Bf$jM~QN1Y)CniKZM`evIarUPq&E ziAX$F?&^u*uAa1SmuR4qP9oxMn%i@jU|GdE_(QK_3+xABR#x+XSs(j5AeUnQ>QLjLqpch z&`jbzK8eq7P_5Yh_az=?HwI&J&bpj*KCGw5=&ZR!PuNo=@)B&ED0lpno#7yp@@4Ay z!O@P-4Ru^)u%nA^6G3^?j{hYo9ppJk+&nuJ`u`F2^pxE&CB0L;JPO@Ycb1@g+KLpN zJQJ!puWByajgWlFO1XSY_Xhch9YhG{%Kq(9qv3M7-K#~rFBNc3-!X#KGk27+YGuw+ z1`wU*jcC;DxsR%!Ghw*b?^tfQGv#Jyi)ObPF>M%W_WFi$vsUH?WoY&qZ-$#aNq+>K zBg-~>@{SUe7p#b&{1P>LWVG4&p=PbjF>m(Bl4dXSLd~Atu-s@bZzwf;)rzRuzM4J9 zV)zhqpXWp*WGLLORUIpHL2<{pZFw_1+PTo|b@LjH($rkJ*~3M%JvF<3wAq=VX06Nt zZ+8EZX3y|KqdmTCvuCt)U#wwS5selv88+j(kY^}@%%4nQxY#Msrzc8`IG<4;4e?Gag#@3HDN=?<2 zbi>8A?mnf=nNn_nz5(T-B^qFv-2#;|1>O*{65Pg3a(03eOw>x8T5-E!=W&SFzsG4kVEBx94Q9E?ni{KA}j>Q7{PL) zQzmjNbBZzy=QwZp;-r5Oi5%Tb^LlaFlAn)q^-Q_(*`jeaS)$)ok2Joxy4<*xIZqiH zKg%1wIOkss8b3mB^y1L6jUOS8RiQkNSrLg8wqep;b} ze=%r$f!^rFiPg)gkcHKy#?M$0HO^K|6y|8-GeeDAnOSdqzc+qhwDAi(P~&G;E!X&k zRi(x+SrIkPUJCTr#K_2xOq54%WsXsXkssy_U(EX#gOOk7fnHpiSg!Hw6Q#zdi^kb@ z0oWDrRwlSBoY6b5P$(0m5DAEEdVHB|jV=k6OpkkE!;yJl!a5^a$$VN_jcW}|hqauj zQwZ>S1>qL_0lHXLaUGEYhUpyn*bqHDr;}j`ae6}8ko2EmvVWpa0CuWv24UgA@?o%8 zs3|$qB_uiV(TW81N?fEru5VvmUM{p%J6oNg@wh5M7irj>Jz83$39tPxHQ^Tk4+46U zx{dPL!7|Jb8Cn9uwW9-vPemIfVfaz_pBREayMRaFf1G5Bqwo)cy1dLEPDY^Pe7}*o zeDGUnKE;!5eJ!&nEZ_VULkqb`K0!7=sjk`Zk%+&;nt{Xn?0A5-dBM+&10GWA$q5o6 zzH2Yvh4gyDngiUB^~zCz2QR*_j$g1AmuR{R<0bmNWFdQkCA)kOG^6l4JOn?=2!8S& zPo;+n(4pW0Jfe;>8}Nv=AG|m#8h}U2-8oithb^WE-_h#U3pSer5f&|GUrs#*awH&mGz?54Ky4a=Jwf_3&6 z-LdVoS6d_9xz;Lo$I4983Sha)8wPOQzmS+?BK)7Qk+)ao+Kd3PMM1;qjMmTtV~;F^ zTrPL!ss%BMY}i`mt2l$zoYWc0lD3)`3@|yHNh*Qt=7fi%?^u_&qX&-|-0cpd`d_D702#a}s+Xr#KN9#v;+&w8!NB z#Hmk&M2;vn@x)dwlR0sJx}Rl}&(jIgymdINoS+-EaJ=P9FvnUJLqYt5a%Ip2wyL=^ zF=p(Q$u~&6uv)>Y;vUGnp-bv!s(kkZk(lS6dRRrY7O7FrnI%gs=UXMwb+IKSCK@!= zd#U(RL3^X-?xpgVSBfvC5KKo&fk>(Y6@;`N$%ZzqQa&#PUtn{D?=CjWd`H9yb^~RA z)dk)-^a)e4{LgN?t@m%U`j1mfROlOjOccIfleD{U-D?X7dOR&d{E_X1-uCGc9tB>)DadSMCj25)HfW;>^_(M(MOJtOvw&&oR1;wUCkSRDp7L09e zQ_#a+sXCU~YNMDu-7L|-S*uetVDMQC?Tv`8#quZTi%(zz@W~kyLm<64RtBk+xkMR_ zi#HLxoj(#34l!;5xr(V=I_wG2M>AU8a;-YfGlT#5XD~P4p^zcRlBP$7y2g^=|hoQ7vI2mkp z@a$-x7Ki$zY%uE8S)Ps>iZ@&BvF%R-&6t)XG|@mq_qT5uS-IVCp#9^2|`nD#IM5z{OG{!Yt1N z)_4K^V3m*d<3PC|2aA4~M7uy_sWJN!roB)OF1=L56q~#|g6iQbkN;k@SaRF2f`t{{ zj4;dPa_~7P-Uo$7Wh-LVh%rS&83h!0?w0Yq^_F-RtS{c9ePO>6_NH-`N5*ch404CF z*AJ$nfuux!ykK=*7vzsG)hck8Y!P=-AHuNOxyZ~$T_i~_RL&3f`j|5C?J@-FBhwk3 z8=2ZmnB`lcBsP*LQ#l`4j4(&x;l@%+a)K!x09S+~7xtCNmfxYye|xd1&H9e|Mu$k_ zd?2%n;xr2B942v*MT$cgdd;TJKhB6zov3gQhdrp49smwD2lANF`{T(r zLCOwD+pZa*N=Z9&iD{!|i_A=8DFlu%qCJAXJtul$sae~u_|A6teWXR2;TT%V{9QW& zL-Yner~1`N$ber(L`v!OvTT=(#cWqP{9lM`89Y@`WZyFolJMeZII3v_|Eksc%Ks`W zys8%e#`zZt)%d5M@j5nEuaAvYtE<`ysP;WmED+~a9KCq zt2}LLgzzOPiE=|CJleC&L+(qyc|ppvd%)%)%XLkSnNw`}m-%zwwMPba=z}RCGmqmF z1*cN~=Qi};FAv>jVaxRfV2nbLvuJ?9cHAWj!Mu@~23>)6eohv}wwO?+vLwlmFDOSJ)W>ORzCy zqROd%s=4AWz)_sOkVYMKBaJGtP&IByq0zU#WBRSykP@2BhMQe}R15_)01S4vGZV(w z+5Qh3l2x4kg$xzNASU#Oj6k{&Lmqr7aZ*37$LnPvYc6kh zZ&vA>PH%O(Yl$Qtmdl@)J9dz;tx%InfM(LXJwCblu1!eei3!Q%+xZve?s!XItxK~V zFP&QVvX`$}ySfvvFJ1T2bz4_$U3F*2E3S`?H`k9hUednz-VNj9)mnFahyR5a=?cJYq5- zlR5OUVkw_8CrZ0quB0y`M-rWr=f~Oh(rtOGXlc9zGJ&dava9)U>7}SxG9-DdWq)j9 zv$8U;RB6X6HQVNeRBHXSN~eY^r6=luw~EXkX*0f#qAY4yT}!=^jBh18j0nG|b>W;*=dbj?;c(nsx~LRgiu#S=`HiAiOl6~q~5 zc+tm^YeWE_8+7`<9c}l-9g*}cOGtF#(9Uht!MShFKBIN84F5v=v38|8ShkLXV95-g znz&PIXBz08N})UQvKUV&!@W7|oe?DvMn2f!#Hr#!5&$xe3ll&|1sQb_Y>8$^HduS= z{n()2Bh_+vizH5|T8De70#1*p|)bl_E1TTjnJHT4vjO!J_GVyiFjpnTbJDNNb7?dimEk^wv4nmHb5{0L;5ia z6;E&m)_;#oz}5VtL~F2&T4g`5JH!7io7nNs|m)5F(am+R;*?kH6i$l)YZUhub@)CrnfKw4ZMzO{L$t%G5&c7V?Ja zv|S98t?Z3H$&qGu22nB5*PD&~XsvRiO)J4|lyS00CCO6I%nRF8gp|XCm2}dhZa*5c z+QuUCVMmI{2@596$(mMb{c@CKCVF zUV6d44?RsUamR1u5&LuLAS);7p3a}pf@SXdI|T@(P}6uLKm>jRhqB5p2LVmc1%aE; zK-FnLiPn}u`4XZy3yA9x_6VN*oIQw160u*j2PKiH?chT~bHCQu5#pGtaW6mLk?NAsFJY3`LdHr} zxHEm0Ux>(O#6K{sP8NRJe|MF->P`HHX}&v%^3e4fQk>G-_GfGp!bQ3~*Hc_cs*I{g zfob29Ns~tp=#XQY%PWsPz+sMbFNjY?6thZ7U(?RVDJaLJdP8^umu(<)ph;rpZuLP1 zTF;VnRD0}bRsP~(pezB~62Ow=i%*uQ76Z@&1|1uI0VJ4TFhRQ*m`s^WYg7h^sOB6H z4{&lNZxBWX2M#uMW#WdnhQ@qluY*F10eb5&5*`JgQNU=W5IV#xk=?oVMhikjfm&X| zm}ZX$AF1Tm-+HjqdGD6?`+=p%a-oU~Eb951+TPj|j};1D{FY*wO3ja#Nwkii%$z}E z`48g_8n0ByXPwHmU#mqt+2W2gJ`6FU@#~H!Nqa*V=3{;x)+r?~YqGyx$8S(_Bu&gzP0KcFi(nbDQSko~%HSmv~@uJPxjW}x#B*WIOGq8?UY z@{=@1@4y656`9WS8H}GE)91v@IATt4Ckc2;*C`>WnqQ{l9HUN5$Q98onJq49?=MIv zg)p#;3oZ3YiMR^Jr=|t^#Yj}yW^ZZSE}7bgl<3Ju^+a<;(G^s|X~0?}u=8sbxe(W? zs$y}vLdBhrs0*0Z9B#`q!W&_H1DlA2q-speN+QD!n`P^{R+oxW3zVARQ6VuRQDx_82G;RMiV1x)BaTcRCo0cT4a|Drdtd_#VP1 zr_hrw&S)V&B-E(}t6fR2WScu8DR-O%YtV`zKI-;>_yB)HCIpZRrYr%pICx=I_gBT?KwbL7YAKVm@uc3RFN zEf2Vcf!vg9kWxcXF1V(kc`=?i1M*N>M`I4rv(cw9#|$UxEW;HTgPeQJyCJ&0VueMw z?C&#?PwzR9X@qW%^Js)_=Xf+iw}*MOEZxqV#4s?-(#(sa+m+}?s4D1pgtiAbxgElu z&u&fFg$!DTYy-I}-L9dEo<~%{4zuK`6A|R60oPJQwfk5^LAzu@yKqEWRp#1}YT9}( zeTy`S&lvNW{bkE0s*Xt6KzgcQSU2z1DF~I2CIzLBREYx2qR`bDbpMA%2tIY#3QI`l z<3gt@%dmSdi`^_uu!w@P9~cDjBn}w_B(|yI7~oRaCPI0k(1L*?s#SPL!inWUToC_d z5AF|nkPyQ(zYTa%Sd>4jE872nt_ZamT~RUnbw%||>k1RykjiIE!!=zofv)O00akG* zply)uz`vMs_N%yn%=bf5=I3}*td-^WKZYsFsa%@P3gtGNMc3H`2Hj_~S@3;K@3Cuu zAy>>9HU~2i{mZ~q1YRSuh5_y}41lx2I0-fd8`F>@m}#E zWkYkpjQUqB7s8?JXkn+%E#fjtCd}_v@=L*l`PK1s+(~b`7K|9PUh?Cztx3{1_)pZa zg87o`5aL>vq?Ky)Q$S&o*|Hp$vs&csM9G;C%iMZqla!PtY9A6B=-6PSU}@M<_NR3P zQ46|)IH?+QOXvzENo0zGj#Jvzd_Q%Nl3p(LdO48A+E)rbw;R-xMQgY)T1`jGsSz|? zG?9bi5u;z>3*vi&T41Bobv4eOqpP}(vDj!*{Ic#K_Di~=vM=hY)ZMyP^Eqm%<`=Mb zJ|Xpqta!W&h(c<2WtB02wn_jk^~IcynDjOvc7O#V?EGvVby{#3W%DML<3$5 zzk}v1a#IZ1RwAbSI9#e~O7OwZH}yD-c>ppm`582JA|z}^ZVsrrj09qas$pJ*#Nf^s zIFwsP;qdPlqZI5s7T7Bmw(Ldv{6z9F-F7@AtOYbp9X}>qvC+YKZ6SF5|qvmDtIL9lu&`k z&#=QpZODMeZv~2$F`(xmv0y->N(@D-S3nUjg(53eDTBWNxPgD70RO0vi7c>s8L+1T z>}Ml74Y}vI)AY=NDpP@AMo?;&&+1zsp~N{+XcUYwijTk`SP?7XVa9@Enb%;W$QBGi zdCYuxz+?=#6qG&GvCfCE!sTf?>z<}+K3$XP@~(P1_FVQB!MxHBi6+st47b?Ei~3>6+9&G>os}QtC}N9)!F+lb2ilnKvbXYt&r!62+NB0zg6-|K6RswC#>6igaoS7 zo&D`zQ>w0LaI==3GLFsifBF3~@)W0Cf+@(^+iN^x?jtJL zp(6AHnJp@*6a3jrdE4ZF9dC&4M1ks?G#G6CjA$!uwg3>Bc5cflDbx<(1dk)ePciD2 zoyAz(+OZXbw>n!&z?T)M{q!tjl85cLm4ZGJO2B|wj0IK7#Ak`K60j?q1MG?bK&uEq zAOms&ne9@-6onkxhts5HYe0lY$z@sC)Pb!4pKSQBVq^XcTe}W10{{w*AAGCkqYE?N z@cF>lF1Q#ISk6I{Y^9mPdfI^hN`^2!6s6fXqahr8a<8TWbUT|=HPBb8x(5sg=j25M z2fOyl_%##OP93115^H9|vY)3iWW=&*Wbi<7!mpDB!2a5bM8`~ulbSit)tf!%J)&26~?Q(boyc+Ib&xonpEv(mdGm*N;0%Z>{ z%vr7jBH7+7w_KnE&cP~XFU)2P#l-`gm@tWo4@3!Xa7vgq@9F{0FdH8KFtHo2czm#V z#lwUmu!2a$%wCg8+t6#=IU5;Z6yrSv7Lnm4~N(|nd6?K6WOQ)6IVxHq90p**u$IsmgJWo8yO z>&J$)SwGUoX8p|JY}RjX(DaZZk0FAQnS|8c>2!}sQYeBXZNd(!&WitKnsL=@P3%R6 zHf2A-l>INbE!K{20)wHI)BnF=fDBs-g~6Z~!k{&H4LS~9Yp!@-%xN(Mf#?8|e;Jz5 z^!$Yp!xG#tfsDr$6qBbR+=F8fYD}{%4Ip5LXuv01hxLh#f7rY1hNW3rVauOiRJH^`QB-@`uBE8Q~=8_FoFt4|}Cp7|UbxSw}~+Ba$kCCSZ==+YEl)pI)J2Kv8K(&C5&9Y3ez#N z-d&j}dTKLAQkz@11GJEE;|?@5f?#ZWvCwrUt%8MA2*;4Dz{M%jtm^x;j@QGIBBp>X zDY`fA#!$?VdKEVm@pGylml3sVa#bVYUxk0ZZc^~Ncpy@Cv=3!eXlm*Uh(_Sz6BJ}2 z=b7o5;4d^mRZdSz6f5Yg(bR7NfUmdH{=Bs1D+8kn>9BE&O zXhmko{b7!WXsh`t+R#}BLPc2Tvvr+*u^xlBPc=c!Cv??#%}sPtpDULG7fet7;{NGe zg1$Aa!byT)kWEk;5Y~;&k4uBzwbv}F5?olO9Otu9FU5`(R!bh#Xbmo1A#iw&F9~!m zgtUYZj+`sj_(UDTiY>-vx)VwYzmDNUyQkg3AWf_BqfrggXvrZ+r02Ge3cYdtfCk;h19eMCa(b$jJhs7di9i@Td!SxO z$j*KPFA^+**X(!&HjOQ{2*s6#=mzSvMq*_#}}0C2@(^=q5@Jadg=fV7@FHz^2Wbxr4Xx3 zif4GxOTe-Oa59}sF;|i73;+857xPBEkTEImM%*`4NmAKJmS&18`M+s9lotY;?(cP}=im6B>#vs^bgG&gS zV1}wHFEAS=mMGE?|S3G zI14w^_~I%K2+gw|fI9U3erU=rCgFdF|AW}I2nc`uPjC9y_OF8LQ|CrE;=$2t)~;K> z;f^~u-nD6RGdrBSz5YwKzVu};pSt&pzIfaAFS&2Wmwwro-~Sa4?0oPmANs0?{~lGm z;;VP%kNo{tKDzs{$6xgio_O_>U-O#Re(gW}N3Z+3fBa9L`ucD9r?3CUfA&pp_~+mJ zE#LYt{^hs5@!R+8efnR0$G_H|yDWd-pZowI9}jmQc{iu5rQ_ex<^;AZ|A629E5AGb z+ux^mPy2Tt`yIR6>+gQW@AmlJte5H@IAC}G((iuggI4Fi z@Vf)vvv2iU{=jSb7Jv7FL)Oo4_PgKme*Sa6`-{)ML;ZY%-+jpMzRB<2@uMQJ@qgxZ ze&*eF_l*ez46^@>%|qUe`{q>#*NF;9sBgZ@$9s ze$e0D-_C!@pMSZ3GtU}4wD4u^yh(j;y}`MI>G+rGEJJGl3C=o9$9MRz~`Njz2&6+t%`GzkAnt>-(y9-lkW-`Dwc&HLCjffw#X^ zVb9DI%n`8!G~)3=&#$mMmvA|Pug=%#1!3p=Ev+g`vz3% z%%566I1zIdckgrx<3!Bg_zCMn=CHW1>qZ;G;}#EouAj8cl_D}hBo@JnXOyxV*&5fbGjqLvIMPZgMf*Vp4FJ{jUC zK2gL`JjR)@iW*4BL<3+F46lT9S#ew^MI6Prt(q0L7FIEM+mh*4;fSe9O%vCLD$1CJ zMFoWP@TJ_ve^5C}m~@g}}eJS8Rq z=UGJ;+bB-FO?xh(9?-iBk2B~M4g?{EOe@l2u!?Le&V;p)CqNmVnH3QI7u*yqPsd=3 z#ma7i>y!%HfU|)P0x|Ly$k!xr_?R`uJ0l!KQl-($3RKutExMBA_H20P%5(q8(9zHd zBOe01L_UglX<4X|yeht;WQ5h|H?=)H-36#{>L}m|>sJJ;0le4FPm({+OmE;Ms9Nd) z(Q?QKfMZ{FYALo%QDr&@P^noo8L_n_z*9hF=Wfm(Ebi`)$sSd%c$(?yfa=CYb#-W) z4e6!E@g3w$fl95XyRfpEDcq`ktqw#i(E|`dKZhP5ps0$lf{|_!Zd33EaY7VR-v%KD zk{Wzyk+}{AyjG)?tJRji&B`ikLTurVvnYkGyh3rHSt> z9IfD@$w*=BUe^-J4ycaM4RIDIcMcw=J-F7a>+6->4^=Gs3E2YbA7Cd{LrxJ0^HWJ$ z^WcSP2s5l;try$DHIP%}nLwA>Bt(fcWI`tt)s=GrKvrob$*i#CL@x3cM&lj(L3yT8zu# zmL-0)r5MKjo{Tt1+rpu7lgKcA%OkYHpoch+^>!EFc=D<_{p(g=qf}XfRlG%Ez;*&d zpR>N1&5);z9c~5YMW36fToYh1zkSyjKXsd%iSDU?9&T$Q$V{ zb*`g44>iG>xp)kdG4U|&Oa!cXLkCMVYp^CM`2f}=|1emK9+_whjLfI+=Yk_(8y6J7 z3iNnODlDMlR9MC_U}*$wP40y5Ymx_`=f9+&C(#ZYp=S*A>{^PPLc87yITla4GIGRF zD`RE|F+Jc^l9i-;F={ljPhWaDzc`?CIEcd8LR-suibH2I|Y_3kFSiPG~Sa_TgI@PZ*DB6c) zZFvau5VIn*=zUevTDC&9Y;7Tz)bG$yKmZ#m``%bS@r070bjQfPM?P@F&;-gWPsBB0 zJ4Z1N3t4TDpU;l4M#M#>N85%uiK@CARbXydKn0(LpPPX&R66B5pSZW&YOTTN)U|8rYW>>T=HA_h$M@t(b5`T`Qvlab; zu5kskQDMjROnAe%d6f_t|KmX;b_I0nq~Nrg5GP*EKWuT|T7D<}klV<}UCc~uM$^OA0MU$VJhmr0{p zh^54=_G=O_@gshr47!?O>Q6)8eLdYm{W1WB090g0nd5tk3j$>;7oKKixCWYVnwhWy66f6$CxVFn0+N%mC!7a5k%KcDGv2k_H*eYbjF z!rWW8nBtUSduiJFMm=u$V=*$IR`nVuQ-cofQB44J*mrLt%A1_2P`5$j%!$^#YEsfP zZ@z|e=aD<9%N@?&F?6uN-3cJ&=jMJg3F!H?Q6EkATF?;-1-r=ytMyvf2bnhnutwn>8>ut5~fr& z9YXsWhE=n)waR%_`heu+TIZJ0Ati!64y{RGfD?r+-ri_9)BLtZLt+|>MqxD1X*&^p zlXPS-N{~<<49RDt)5mm{?qDFL!k^!PB)2&b5ZJGa%s|WYSs2P!B{r0=O0;}i!pnV0 z>Ue=jhY?fy;%4J9Pt7>QVRP0<_VRL7;dQtuYyY>JhvqLvAr@&^QkEmx0He5pguJ3@ zZMY^r!)dxcpy|xCGs3JKI~8Odt(dIqd=j!77=rvHwLOXA^so!H^ScWD5<|Cp0)ABc z6#)oqYhWOa*F}ExNB$QPW1vFXK)Ef`I)ac$Y5q*@2_4uKr=}d!W*#@OQw?^4>ztj& zfLPLHhL`T5LmU&=9dm}NX!z_{vlNBgB(w-@4dJ98vsHUDba8t zXz1;g^>R~E1dlkcuy+#CCoMlz*=G7wFJt60Kc^-odei*FEVkD;H(ejJ?%})Jw0r*BN_LN8&!md5t1_8CUhwfA;TYIDJ*XTh zfv`r1$qq_!XF(18j#4C@EX*Bj9Bq1c8%M=rEG1=1w$h-Cstq;905gh~OePM*ePJe( zK?K^mg~{~CThcWHFG+CTVP#5^;vOP;zsP9CQ0#Ob(*kwTs;miyH!!AZPuj_ruuP+K zE$ibtaAV3oX=AZulC0Xxa2g{o0k+6hV~f`^a7g-f{l+99=GUuxS{zCC5f#l{v@si# z{RwPTo;)68Hcwi}>WW!9X}1mcr}#*a5?ymqt@2Ixq%q(3C=-+J>hH>vCp^zA%&?WN zY?XIAuHO)_Q{SsBt-gb8`) zo9JF4ztB3tGzCJE6@}i`I>?H8TUe6vV%epnK$)n@m7MJe9g+kTdz7%jPeY-hXn-_F zkO~UVpjh(@a66vKWagN>DmYUJVz-X3pgV5(V*yn-)BLB~25V1FkQg`MBVZ@sBbb~_ z1D5OVQ*`$VuMDuEzi%2C*v~SRagusEJmelO7Ywj1ZH{Rs^(q6Q^?QP!b^0zt07G|` z+X5|t6a|4TfPpSs03VcW0k**~CM~@{B6ez>Mmd4!&u++VUktSMT{-hYym+p`r~!(r z`xf@9?ZjLLA^)F@P*y`rR)eWPWoG71jvn#&t0vOh9V6b9&+8cLlSm9GEMk#+tpbUX z%UG0^BB>Iv$H@~+jbvD89|U7F!-C&#STHDa&I2aPk%4GRSsn=GQpZV{QVvE_`r>Az!MUJ(2`_>mF|S33Skuk~y^!{>7t++2 zjXae;<5c=4HwB(1ZHMy!7Ma+4bs02kjX;gX*YR@=y?Y@4;ydZ1tgIrlO zO7^+920+QMIwx6=Xt>q254!kYHLW-Kjfy7^Oi=OL_;3~6i7H1_td3>u|UHCQkG4M!t$)8(=S2PB2oaZpZS+sr9@N|GOxw+fU$1HJ23dgglQ3`(+)~nzEEMAjD`O@@3(^=!gB?*C7-_sHX|O}2(a1YN8pyN)ZzKI# zA`XgPLB#a5OdczW0}Z~Z!Mool4jqBuVyv&ae~0f>Po1Cc!5#cc7Fj-{Rq)#dBbSH{y- zKE)-dtL)s7l(6YhlbV~ds-W#0^(bqG{8Q$p&kAd*Dwn3O187ae3^de$*8JiOVjM34 z58~9GHIk;y6G#XsgBfcX^?pwiAw(zWxzyK ziwnrt1`vbK9BNp2-l-h}gYx12s$dk~4{N5;7H&q4KV`To_@KzibS zb%>;}t{7ckm|`^Y=t}&EWbiCw@22tALK;+&+r4^c%8sofBg!r|(F(fA>h|^^=%Ula zKT6CibdSDEC}EJs0gR36Hy~f`FIrt5HS}A#zfAX8_d&lAsPFnsYjX%V`xdPd^_!A` z0dXm6I>0_1UFs1H71b-EbiE=3Cy=%S4pPoFLp~VcRNBsPYPV)17dV~L zyEUfGAk&h#5|OH|p^7IgRJ7b_S77x(i!24Z0@J=cBvj~mP+&ObysP#WaX|Q(H)2Sg zg6b^wCJKxIaVaqG7zu#_tpJRuz(QqBZSy5t0r5?s6vuy8(W8dufM1tZ_#vEU-Crt? z?UdsirhJ-%@_di#s!0vn)!PfkqNbta5L#-18NAQ)3ugs%+q}a+lFvLD`hM3_tj) zY+z56O=M4f40}dpvjTfY^no{tT%#Za_x8$GN1uGEqv!(`OqWJK=&tyIc-GwGAFum9 z!4M0{tcW8mkprw-*c!ss3{zaHQ|e4>ZoEx9pE7;271~36bXjVmciTWnoK*P=bHXRK z)a80Ak)B#%>CJq1N;m@l>{R}UZpD5L`R-g{L{BO$zmBvb=Bu-H2>I%`ufv(RvUZb! z-&}O57UO~+F0A{)ECPTWaLcqg?4I&Q%MLg>YD_t4$qm`*eg~y=QPVA0x8ZIEQ_PyZ z?4H1MEjic}@EezZy7Z4|AH)LcQgrKck}q`sMBz=%xy_ssh`1ZL#|ZBn>Y?6zK6{97 zTIyX(Xafia=#Ak{b|05SjLiiUJP>?bz-p*x#WHF@DoeN?mLMaA>^Nc;NF_R___-0P z0m@d3UPWKJo)PU{1kGaRyzL=+@#o<#CVLp_Tl}0jSd1p4o z4HqU7Q|V&bTbJ%i#qwwBwwEM(6ssgv7K#>^DFqQ_oxhGjEB=x+HpJfl2W5iNIzqMc zH`+t5Y#-Im4cyXPKBGV^gr+&)tk|HwBtLF5*$#5br7oCDmHN+X)%U|Ssz0NZCi&_5 zp58`!P9j{hsg7_`AL|EG>G7J)RV|Du$*)lCF6zI^kwNRZ9_PA&>wd1AxE^TY>f}nA zIZW#`pUHGyYnq?nx|Sw>*pY+9TX6*l+uJJ6g$hDGUAb14Qd@%u^>TVKhD3c;G@3w*MsAX}q= zGdxl@;)1dX;}Csi$C>Y=K#6apvWu}|&y&#$VUXY2Zwi$*A<+^X{|%-kwJ&~;#HhfRoXBZqgb0ZIhk!|P?A>XlRe!#V)K^K zS(A6Qc&EV}CO!H>;{D+-l8y;wkgnsMPr^+1C3DR_vKJ!=LARo4T_OnC8tYl&#Ep8?fBrnX`VrI@JXLF%g$|JM_awzG1 zHj|ZeQNO$6o%c(o*?-!mdWeNw68UDym}_`DVTV$`OH>Inc{$ng>CC@eWKF{T9oRzQ z+3zRawLHr-H}jsFR7(aDEE$+`iHyUFV!-6ULMA;(c$`h<_;!~Lct`w?bF8sRxvaaX z^Spp_1nIUBBXn8;9Q1h};PLrXdW8M5=q_e-3e>~yJEzGQUS7}NrBDi+NN}5jv@0j3pAR~ALD9g_Arr@T`c1Sqp`84b3OAY zNAOjEp+0~>T@X-Kct+30Qb$%*$g-iHq0PH&cpBVU21m!I(le#Oi8jvTZNt|$mEI@@ zS(d;Td99eIVk|g0G|Fs|?U5*9rcSPRENX_Z%+B`)a8Qu*H*9e9#K~p70HcYV6!m&R z#SVbejqtW;&@<@lm$n5uS5yI(A$<(cA(%G50(3q-1RcQv?Z)EMfG!L&hVwFTl-f^- zLxi)@iPR83|DzpqEs4Vpgk~d$wXuBro9)zttM|N3yVk9~%}22#`x&Sg3t$ZJ7xBd3 zDJFfI!$rj_oj#QwKnDkX%~)-pgz0g+0VJm4?FP;d+TZ&mCbfazg!HXTNoi_?>(ES< z%OdkD#!~bz+tYblAoW9LI?=Hknkkt@mF7CsMFE}f$Dr6=ISv}N3hda$j2R%-@Db$- z#y8Ul0!+b&PrAfK_=1EE$&g#jxME)~R%J3$*F|-svtMz1)B}Q?<`(b0>yLu}hCSAZ72bs_Rnweucq>Ft#xNzOM_2?*HWum!EAqV7<`)~@AW z(jsMNPv;%fqu5xxte13=$jJ}ay>B(^*oB(lFQL+T9{EgTHA4)zd_9SoNy#XJV@3f znvtAih-20umZAeeR|pI~lWeF4wE)$aaF_Bi;V$tdb}0#Wl@Xth%64X!lI;?0rie|p zuO)nQ6e}9d!!{bzb~f|E=4aU~zgQlO5go%pmQJOYU(9eSREeNNC9FIgm04~$(oqaY zB6={KbqYGe&mvSLtPX1JV%bNQ%vQ6lje`l+41*h*FHGBmfqx@-F0M;h9pYV@Ux&6B z{MCBr`>Nf^?UhR2m3rk3aIq~xF4tY9n-o1I#e7O|3?`2NP$=st{=nxz{4EcFpsR(} zfg5W=UOE6Ew=@8-7!)lUFwGhn?HpYn8ttq-STfofjdot+%x8SG(>B^P|G#V}_^n5L zMm6BK`@)PE>tvY`Tdr#*S}x&t8mSgjD%%8@W7&968nGv+=Ge6~pQUh`U!{bIy9$0q z?ufG*-Y`>f>o9{X_8S>X%2U|rJh=xF^bQ{G{MpCQH)Hw#{3L!in7_l48sq^|+LJ~f z_!bchf%i5OF`~8d9m?Ufgic^!&!IT6o*I14N=w4gIbp3tbvaz=s<5Erw zIWT01FH-Yr*xXhq#0?TXGa?8)iF~@fa;_#5&<$8fZ;d2OtuQ=|O3zC_oL3)9R2rm? zB3ufo6Q=-?O-H=~V8^9S+5E>~hd<>~6MxDYHAU5L-J7o8nI1rbaUNVs(vJQ1)7FcW z54_UXA-dD{FjvwwJ5={sdUKF@gUJxNL`i9xXPr07YC4qkTPB7jLM8^*?!>^5m{9-B z=$UXvYRTx8_YJrw^05N+#C@!oW-_iagseeZEWP)%_}Lh2nZ8S`|~n zg1ZPBf$E>#XG`z8srwBD>E$>RFs*7Mp@;U3OZP>O!l;YQd?j$JSs4>CpEWHRRCtj+F$dTAy^myD=3cGGR>5z8NL zro9^A>y3Lf&t!z-)-8f)EWjEWPe0%#Eu&dnj1FeNl`Ot8Swvm(XKK+CJW$oWnN5r& z{QX!5YOwjO?I_b+3QJiRjD?+a%RNKId6{ft_`Q+yKdu!)iTjK9p z=V!OH-;*SX_Cg&dcY-Yv!4U$b<^;O-Drw1aDMEhO=J~ZG^Lf6QmZF;HL(s2QJO~qQ za}iWJO!Rp;A8h4w;VDyno}SYNT*@9uy@Gd|sXS{g^}|YHyBg?ROpKy6j?TZUZpxnn zjY;P)`(uwf#$`w&K!tHBQ|1=ceQphrY^Ps0G<{86AFy}kz&Cn9nBzvqI_Zu*@|813 z3BdYJIsP^5yrCL_C3A+b>0VFA!&7+%7=(y{QaYG{ffN&FrSs;#SQsP*RrjvM40SYt zIuOmbe>y3)@?~Hx6G`fWzLT0T?E6@7T! z?qdj3RK;o?1Ul7&`gzZmn4e(Qt+CpQw1fhu4RYL|WP%2ld7!{oIGK<~&X*Dyk=7>p zP*ezQhEB0M2%>EC2&6~-E7m;yE8wE9in9KdCi(_JDowPz5GvcuGp%HDenb1;cn&+o zC;+0=q^P!oT67R4^7|1DA|@b$DtmvDYr7wmVt{x!Z|QL5%+6tDn4&$SOw`*$m1nz1 z3KeftizNWapaTihpG&;iWjF&e%+g))KWB;t=L;v*w32ChYu0dLl+PEb~U*7||=?b$H`^bK3L5@zLeMD`%doyiz;HDC~lMXObo^`9r2ZxZ$CS;ztJ*Nf?m0mS6LxI!YuoFUc186rMHu)KnRAw`_HiV{Lifl2fz zJz)D=9B>8Jh1N2yI6c!ft%0W<&Vi+q9VpUztHc4^Z|-N<5yD|SLPGcYNMzCe=88%) zBpXKvW@1R=O7pCZZewtlG6?YuaV6QMBZ2cpU<&bZqGEtBxO&8WAI9VX&Ky%yKB})YtT|ZN zwxZ4|!owXiDo1rdCa7>dmqpI*`SvFhw)yjdm2`lm&opDPZqdGWK0Af=Vc&*FEA`Sm ztZAZi%`+7mQhq~K(Nq2jbTglH_8}rdje4lbUa|+J+wK8tg;cKRf?wYD96NYIycMCb z1{3domal#@Gs%a@ZzkEf%5RipEXybSMl{C~q2JtTH#ocWjq)uL!Rt47v@axn{^Tx{ zf#Nsl7_T*E{WPP(4I1^GK6ebq`;vihjSs5W zVO>CV>QKV9&*b;~8x9+>LBpaxZst;5=PfkR)}3jWPe^07K$@+IH+Plf>VBT^Zre_! z@{$fRzfiUFPFm!RYm{FQO}Y@Fe9nR`ZA*}Zo|y0g-vup--yPv$`E?u36r4;;+^ly} z+-f%=PoVl2h%$(TNU_fS205L?Yb@ZP5j>4X1~6zJEoAJhW^yvIN;rWZ`!&K!Ujd*k zxW{ZpPU2lz;Np*Ih_Bd1z7w3dA?gOf%rp;q~07N%yq75c>cqB&YZ$& zvi1rgk)bd+&#GQk0)FJsvr!iurk_C+!Q!@FLrMt$&$U*zchy){<6k8)9VB}nXqh#a zb66_&fLU~3;W46k?2QiDY5Lgvo;CZ85~hgMvMjd8MOgx0$nGVg4S18tQlAZJc5^fF z`*mJXlZp(T%U9HOgm+(PtzWiOMniHEaUfix&t)MDCfRJF_+v>-Ws3L&?dF@c1duZ0 zn0sIj)Vk8>gf})gSC}fWP}@f>0Vo!~j7G$&d_Bwu_93L(vNyH&1h%)0jIib&Y*p6n zyXm9fIgzA<_`~buZF`JMc;HekNF;QD2Mhp;WY{evuDKTx0Sw%iq{bvR&N-JAjw@{GYwccuYO= zKvgWTd^SAoR-g80D#WjBeZ{ColR2cgbI*?rYS?6!t}~njnn_*jzb#8MH*;nigc-Oo zNKOseC1#D#OhMo{ax96?L?Qs7L=<4ndC;#yTGd_!kOd(fD+CC!fyIxN&iBws&MlK# zH^OJOtG)VE`W6+UrJO~JM@+E*dBCa(Z0F^|#YEz^v$p&xR6cv(epN%OpelM&0AC#y zWg#&-!U)9sOwOl<1}F0iBFb&!YMsGZLFHpINSpX(^1_}9>H@>W^rNamS3@@=Kp;8w zv#-tHv8Mu;K}1^v=mH_D1g2Y7G%zV5L(MAsTK^b|Vvq%@*@nbRNU3pyEMi4UjbBz0 zK1*MTvN9vRKm(LB-Vf6>ly{D!oq_bno>2;rTnFa))|Ig$Ml99V6%P;va>5UPLsRyW7mr@?(aokeO}=1|-C`4F=UH`NJxj zmv|+@fmg4F4!M*w)5VM|FD5fxHDqmx?6E>-IvW<^0Dl2o?xqd_YF)&X@Nv-`NA>m( zJf3cGZeAdE*exARp8CC=SqGZrvJsl^hE*ZWo4J)I2*KBRkCY3G8T`dq34B|%)p9$^ zGCq-s__(Tk10}ChzIRmK4FpoZ+4=Wb6d<;iTm6#caMG`90!l+@GY{Wyc1|O=?@P{+ z|BZluOslgAfxKMLBt+`c0Tq>xnBSa)DAC64V$$E#`QNOYUR6?bQ=K|bjNN5@al^lU zL$}%aMQdk2`OMUg5>MuH9C;d`$OCR{U^C_#-paCuf(<;@0G6ueI_b!V7b-iUPV`mrJetE7VRiEwa zZBBQgp^y_et=;Twf>G|xBs?ZRYi)Ac?CT{T?v5RQcZT;m@jG$}eq7c5pED~S~QTuN1_3FISiX#ivhpW9_ zcP(G{LG<4K-4OJEbnaoOkWQC6*$Hi=bH}6IMj)Mgx_3C~+!08p+ucA#1Jb#}NvGbu zV}x`>DrydeX{2&@K`NyVsXGntPH4Y>$H2jF-!=4|rW$uRt>?j~Kj?;IVU^k<*%oAp z&S}Z$Yq1iKOv30B>Dtnaf^d`+)gdnM2(XxC1D0z$%dxLjEEiB?ZY&5mTNl8 zk?R3`8wyZn1>8hH#&Y!lx3#vCO7|Q+P@1t^eT3yqOCXRW^(Ci!6D2rmY~*^1@dXpG zoJmAj6&(^N#LiPzE9A0BBo<-aATj%u&QD0MNP<>|#V0$E?Eq!bq6Ff87WmhT-fBek z@dX;A#jU8i<&zteqsQRTdl7`tm$BJ&)5PMCKH+nOwHkI_Cb^DLBs?8JP0%kURPSgR z-oQtPUqh+lWkb@0A%i7hM}Q?4*~e}jl76Jm8TqvL^j8gJA9w`O%&|+tPaM3#K;$Ic zUlrJ*kbmiG5SsKEphmeTDAtXEL{&t9h_H7v zF3%Vwry~PIr$_KhENSZfLy(g_-7!gjq&c@UV}Ncv2QxAsbkGHLmxbrFdnZYrDY*!G z*F~%Cy5as}T9`xmReN%N@I-18ErxY*S+6rk(x-UIiRBEFcpc~ z2))fTe-8#VjdQ7meFT8$Gi!o*&YD>ObX{YOh8QS55@zyR1-=e=f1U1G7-hOm)zrYMkF zM==P6lt>2nO~7xZTUc0Bd9i9py=+zLQ?K&Mi>^{mo*CJ@Z2Z};tJI~gc;Rm_I#K88 zY-f7Sek&^Ifs|Mw>~o9d!3br!<*1G8AW=duZd?M^EMo7F8patj$2CVx_60tONpw6{ z!`RgspfU~#Gj+JR@r@!;NrA8~=qWtxWyAwHoZO4=}l7L*cbPr-b#%z@+z%sbdjH43E;IcC;*H zqGMQ$)b+eh>f$t|07`uJFN0t{yqX^&~KJinws|PMAq; zPB%I*qYy8y=WD%T($e9}{jVSZ8EN&FjzStf!UE&lrgxy3OtZMs*6)n!gB#$jGv& zFV}Wv)0zTc_3;<4jC_ss`&it`$nTR)CD1gPb)uQG5<^;B%tZ@^YuG^MZH8Tlgeb-i z+a8Sd25mOH8x#%$g`cx=oVbl~)L13catFOR4n&#!B9Z7)2#WLhA2S%wzjkZ5iug3) zG0k`pDw8g_per7<^SUDI&*_S_u&66S?5wWnjx)N_i_={5OU5RcicqlY0%GM$*1-$q zz1SJt;)Z}rbm*FXBQUS%3Q^rqgIuq26&d3lK&g;qP>xRj55QRNYTjR~X`-JfL9Z5n zMw3^=dO;kZ-QOM6i`)2oIssyt_~AM7=XA>f8b%%?J(0XBQG^+`MT58r8Q3UM|sp`htA*M};XM97yKdpvJ7 z^KdrwYyCuf^+gYuwyZ@Iz+rOgYm1=yp$gl}K%-54u0T2btTK6(KEF(jX2V2?^kwl3 zV4~KUBlFA-O2I=qW<7#k)Y|sSk2O78_5Sb%zHfRiM<8h%s!2#Ony}hYq-`V$qzNh6 za7hvjuq5|^t!tGaBP7srejTn_8#Z}EHqOxU^NN-EE61rbbOCrrQKp3~eikU=fkd)# zxuKE=8R$W$C<%?PAn&HLrCwls8h(_p#=3kne&w3SzWPRsI;F$RsJP1l+6;2`T}o>ND}3}ltE0oz|Wb(Ft>gwD7C z>94y>SkN08BG$L)Bn%WPLDq=im>=)ywcRKAw_t>Hzdxa@JQlI9#6aAUCEjdC5_8|i z7*T0o_%TZK-U5q57*qh_I7nuEY|mg51CH!&gKXSPfnF%`5V1b6h&9}^bR&u|37CtL zB-Llc8E0d1Lw=ucgv`_?C2q^6f;A?@-gfw`WVEeZ8!?UtHXk1>Uu!KjD&yEM`GE(i z4pGK%rc$&UXP83ffwqa5t2Tohtq}Ey+fNMOdbz6?K;SVyM`e?ji&MP>DG8Oypa~zoRTi?dOL@+hM z0f307>89BYQs8KLac@Z0jkJQ8`fbl}9miBE{Dm8nl3m}%n8f6k?%Fnwg;6O-3)oVE zmhz}9-L+whHM?PSmA<&?(BvVxwmkjnWUYOGb~JS;exw&hz^rqXj7(AVs6dE>Y-GGz zl*|8$x?p`AQ?TETIoVtHA=o*AL;lJ@#HdEMk3`C-E|GE&@+E_S?cybaKrVeV2rvbh z5tk*Vys;&51xr~`c2y0VCs*uv@usJ^wzS&{FS4OxtgV)g@#aI9THV#^V3nMFa>!mm zwi!9tkTq0Oqb)`(j$*_iJzQNQ7Q^lf5X(pbxX{X47oGMLZY^82J}Muk zD7`?b7!&x3i-YJdq0|Qdl>HPaEMV zJ}ozL=+=t(gn-TBg6SX>adibY7Y);fnj{*j^UsRAbY^S0oenqZWP%o@$XeBE2O9;rS??WD>JbA1OuywDXg2mADPgQUfVXjUy~M`3rY z{Y+x}!*Luy}9LoywvW@8xY|S80VpI}~F0 z?j)&ck&Y><#0EQpEUM8C*fSYWm7^11nR%F5zU8N#x$K4k{TA=BUyJdZvz!}>*BEnS z=(t#AoRE~{W!3B2tuvkEn1mPkR>%J+U#CUXx)JnJRhGTaLSlghYa0ZibUw{GJyh_v zN4oD&kRH-tupuV_l8GElQem(^- z4_la3=0S5ubtJUKTY5f8*`$gu(NWf1s{QN$%lUGKVJRo zd$iMU=yq!Ow#(OS?4AEOahJY-fDsxSez#(>EV0p>D1!`170w4YsP~-{3YNF^CJ&Nl z{jhUj%=+E0fY~%>{~Xj%>HH!k`9ZPiM4!RN8X5KL#I)N(MwgHE<-9#JAw4Uo88Ws>b% zH;832wiR;)Gxr`ma~FZWlJnT*yXZR=Ea>D-(JhpH?+$ggnEhX%`T z9dH;LLfuM;xLG77aH`TxI$E2fX5lRcOvP3jlI$ zASHsjP#G|45cCcU6{dc_hwVwGTtG&3tFehp3%)WDO)mt{&X$}7Uv&n?&@u=gemWBj?@MMdP9TSkEJm_|=5{dm z&lwMd^X>?KV``=FKRe28+>Bs0&M5o|ko7DB(-qDFH8|ln z7MwVH#%7yHqjA_=LW#yn?n2WJ2NR${)U*D|xCm4?8{C^G>uyJD78q9#q?aH)MHye< zDjm7V8qs-vYbcD-^27>oKI5SM+DIj~3LA?@KweqCK_X+c?bc?pjsQ^_D={Fy=)SE@ z!`M(CK+w7>Be>=jt}8WOQ}3Y0{q$QSxDiHhl@VOW>L$k6hr!?LaL_RkJJn)S%XNca z)m#;}kI9{kX+CXKeH^O(gJul5RS7igS38H8n%Hv2 zqZ8nBd*uSUgV`4EC0jp}X;W326M+cX7!F8Q?(hB&Bfm=yX#0{VIP?bNkO($OPnDAN zGh8kEm%b%sm|`;^-6P~b1G#HBu0~A#5@lSS>TT7K4v+Qj*7eX>?;Z*uQz?NkhN(5j zcgab_%^FFVEnKj_2*fd&)P;I_$R$FKnXGbS8x5&@Ch2zdsmXXCtmRELyqaIWb7Lb` zni#-Z0e~h3F?Y7)3l*2STCA0XV}|*D2El^xn3zVSpy_?=o`&*pz!pDWsVoShX_w4H z71DF7?l-9pAT!%Rh+Aq>uBSjS4{7Ha=E@7&1A7V&gVm&kmaW~FK%Q)!frz-h3zq3` zGF>U!V+oc=%nc@sM;GmpT!QgY_b&T&UUidKjlU!$CED<3RjHOjjbzX!bmEQr^K{^` z8*D|D(4R`LH!RhYR!7%$@9Ewh3hZxM0l7Dj*j$Kc)J0QXzB)_1q^`<`sw=Cz)Mgm2$Bzlgdo8_7x zx2{6Sa2^2$5aWqZJ+TnhkM3$_7)~_iW2W0O_(4smXF>HKQBUc9EkSI$kGkO`^`aZx zFtHPSSGzbkL&F?)7u4!mH42;og4UB*hAp)Sog<2E4&*LQ%aH_nsG`7Lev!KYdM|TV z;6GrNg9^M>*jV1@${E?oDUkl};| zTH3f5zL=4W+TVsRWbw=rzg^;sd2Z8uu5HdOiD}v5vu$#~>#O7TR%ly?rfLRSJJAL& zMwG|fI{Pd?NwG3%pYj4yXOKRZ1{BUk^9!(fP+N|<&MH8X>&bR6lOUp|!`2|Y>!De) z;~BqIPr*6*gw$(?ewRubB z(YuiB(c(lkJIRLqzAgGxQg3DDrm4ELo9{A*#1U-7^&8ErpQe_tXFId=xbS&%!%n=A z#$ng`q@5-Lu&5v5s6Ec~Xt5mxJipJADRQzmsmEvIV_SdY!I{#7)l_{F7~H}2dd3D3 zsG`XRiZQN$%9i441Ru%#Z*(_FiMx7+$`t0;>{T;VT9*wClOi30+|6LM&7Kp=9c9*p z`tdj(RIGX%A#pOeeF4CXb}3Qjq?v%zt%51??yyV_*YjqAey%iOyMS9aC+p8IiLI(qGXxBKO- zV;gsy7WN$++t>Dudv}42hesSPmGM;ZN2(@M$*7qc%+2I-s|Y^17Dik;L;*nrbP|#D zqy@A=kPZ=~fddE-&_mu2_k6*CF#WT`To}0`|KmhHrzW?b?s{HefHk# z^;^I9`mNt;wyI{|=xE}U)wmnQfYeM1+SreT9LSuk$@w2wm6)x{M?vxd7ro>xU2$;w zCu?Q5HlKovr-+quOqHb_P|7~QASyn%RD7`9_fxj-Q78Q3oMq_8(>KT+!giWXPdK@y z=NrG82kxElx#Iv#VGS6zNqaO+i@#oZ<9*s&}<8p@Yo5GCBCbq z4YI#W+TcE67g&XB=5Tb3t&0Sj#a=o0n&E&$s{I&NvTnUv5~sWEZcxhgJ5jC;S)dkC z`6|j^Q~Nl9?N!u08}4dz#sD zoLli6zpSNO;7TGh!SY9ELo9!mn#>uIpeTK?ixy4hjMSoB*y6z!1EtA=jh}^R3Y9uq zU_9wAR6$))t;N*&4*EnB5~Z@O+3`dxMEElwt@}6`UPA}j+up_&;UjDj_UZ|^!y8Bl z;JlSXcg!A;@uCK7&Jgt3CsnFeF2zb}@<9%P$-kFJMiQm#k&&aNfba%XF(@NhE-|CB zQZI6qm|6n7R3tu536P*{V?S+WD_Z(d!7N0-atN=#LR>&?LSmtS=3e0*5Fjo@TaAQ!t;-R{#FO|GE zhb()5yzIUPXr7U#d#D^EylM=D>d^ktMS_8YqI_UqGUrEn9+|r>?RjMWmg_e^I@I&X z3T{#Tfu66duKlvY5U+9`aIs~>oH^4yOm+S`AClv{2&PqR@g>m^PV`IWfZsTdJ!(h* z7<37L0hZ#=*!~4w>E@Bvu!J#`+r}G1$`jeC2T-YP$Cxba+An3-o_zB9M?{;F5d@i# zHD7*2JHd+j*TrSix;CWQ0c9xGz;;R@t}X}uc}DvUiA_pX(9sc$1t=n9)MakZ ziBs(~NoK4FuE7iVO7iuq-U>x?Ef}Z(NhsR==xNI=u?P;Hu z=P=i~rwCP= z?5cQ6)?-uaJNaXE(!qsOu~O|uwJ0MD#Uii@>p&Yyt1c?gB9UR3#LJ>uDqhw_G?|I0 zL(yd?IkD`9%E2On^M=uKf+>C(uYmZNlK@xUe-FcygM|eWB?MIz1|pbWoC{)7g4j|> z=EoW+topx;fr5|TtC!cukzQJpq1pt4$hw82>Fw(gxB zHj@=Ud+eH==*mL8j;iNq?r18j3XwTLF?0i=-REPurxqUR96~*9q-0 z<=U1|wJtT>Sxad#YS%ssMBO2$DsrQ5f6C-BGv-uNso*`+kM!N}#Kyeo8{7|MEOSok zUctOiHHcvYdj58#`R#!X-v$~>sp4Jtke@0h44X1RKOO30{SpG`j;9WtPIOD8V zX6bwxfG*cGE)ezeYrMuA+=Z?M0k->yHM zvf8LsCI?KR&NDQ>dvo)7uJo${5N7;f| z6bM>u!?Q3ee_BVIjZQZ5Z2S$6C)rLTSgG3M?9R!F)bz?8kUM;28mAEc()&+xHHyJU z%U1Gy3MbeDRcQW&diwO^G#$_uEPBUTMeFgsk9_ah5AJ*a#&5E2w7uy4sUM;DtQej3 zy+7@HKTYpvss=BIg4+6B^#7T_p`F+cmc-{lM*<^@<V0Ah`}C z+uVBw-L7p(9h$qI>SU*9I%UTd%vaH;ILfL!kK+41rDOyn{S=m-uc+G-zoM>Vd|Z8U zADe8ECgKKu>Y1w+>YerDJMG6e&6sC2KK9!C%4vd8I{)~&T5F)&;msmlk)bemdOb%) z>JvNuo?lFs(2*GM45Ugb^4=`XRzMx!eBPCgOtP)oEujw_&))w5jj6 z(|w^!iCSv1g&>jvWIoG#{qCUCq_sc!yEb{JFKDPdTGa4y+ML&wDQ5dUt&s1Ilzl7u zMkk3ugZxEuoJc-o&K>-SJx~t$q-O$2rs7shmsMQ31Qg~1@6eAcz!F_o#yznD#8@g< z^(B1cSdI1llDRc>^%~$-)X>OHfR;R7b%GC%69UzKrTn{f7D|u5fnPIUAu0Q#?$OS% z>{rgdj?w9TGN$qA4pwjzp;1cjTu*P(C21}@yDixMnl@O{@AC}Klf=w$>%qbCE2NTQb;5kg&hG@!4BdLEfzAyWYiZJT~jKXY$x|kMwegu7`d}JiCo31ETk+Zj zWyyXaTZtCct@W#Yu0v~m5MY>cIC%0YlWvPmJ_!XlRlCrX`&SE6BZ&n2kET$kfl_!4 zaA81%+e+$L3>!n=N@akQdrSb!SwnBcHcv4R(uJqET8si$A(fnf%5B=CNtVNr%-uVO z>yZ650*hyp*{nV4+VyFlx#ec$eKGf=k+nl2aXKoY38|Y&ieen(U65kO8;H#Xvlt&LkwI4N>yqV zMFC7~B^Vf# z7q4Xe^n+GI1T2InxX~r-!az25pr0qYf$9DKv3K_jC~AN|~o zh7is?yWN(eY1D65I@OwptJa zE=&~Qb%iXw4f#Bb&4h4PDwc2ZCvm)9aTLn@C&FF ze-a5=l(b|jxYt7GOP#`JiCwD$N@&iMf4+L&=C2?qH2pG6EMG0spBd*dT^4s%gv(uMk4uc16UK-f z6~qFU@oD+z-#(ROQ=X7sz-I>K;xG=!af{9Y}WCSY7o2HduW8>Mts|XDd#OCZEm7K1QA5LZu z_J1o3qGMbA4~%Wh!n}mQB%?R_bM(Z}K-kyU7x(p5`ud`3P%MT;%zJh{XRm~WJPZz2 z+#88Ag4CD3X!p z(UBqOTAKyf;ZDCN!Uq8cbu$FpZL;Uic&`*txON{8**KiL9P*y%?%@ z*zl6rlwZKHJj=b=`u^Dkp3Uig4;xeTjuqq6_RZbFc`Dbp=iU2wQdeB)({?3{6*+P$ z1~r!62|rkJ(Zsc~IiSe*vN<3*MX)ufLgqm)Z$!J~(^u3c@rnbwH2teM-D_5CI^ZAJ zFqY{uesb$Fesg=KjSL;#vx|Mzcl(OJ;!C$-t;t4(jIz1rSn^4o-zI-E^oRC<)laT_ z0gDjW(9XBEPQ<)*t;Zz$;sq;I9Q}t|qmq0|^`R$Rwxq%LnMNYoUm>iKk5(}Zn_#y1 zXkG{IgD{gNLQVKl)H4X0GIRL>bllH}`)y0zeUG#vQu89nz!w&Oz!r;5MT}*SCZEm+ zV4Joo*{4Gt1@P66jwji3%nsDJG}3oyw&LV*E}}DQ89BfY0Sv6DB=6+it>J2|6#)(4 zUjY@7PDgNe2>x!sRz&E7aYfE6E3#6zB07Y@zF)RI;Wk9=WzBJ-7egdWPA~?KFcRmo zdLRS4po*c>k&t4H)v1OB!Q=6Bxgz;!d#d#O`rJA+hY#ffD4^cXl9UzOX@J9Z+!xJs zM~*ncW8wL68JUFbF%S(5xF(-L2ludITb|Xr2qLgA#%k^0b<$ahhAnsz2Aar68)7#3 zIC#Grnoa5EhTv8D|RIMzywMF^4e2uHvlO z&*d#^4JFL*eI}_^lMF=MLNbAvW;RYSQ_i$?QlhkAYQ+h%0eNu{)-YDgS&9c&6NTHL3IEng7op zr?^h)$dOXZi;Uo0EysL~{RpF0k8|QM7%&yrq?*zDt6cr8Y8r^U)^a#kiWxxWYOTO* zQOQZ-Tnsc!>yVokH?3ypY?{`st3zJ))5B_ZCEpfV%~tYT*MHg&<*NTZ{RVfe-@IE_ z|AD&tZz9}vO{a#Cp4qUsD|ie+9qKx_oa#sDN)DBUJxL|2K9Mm!GzKH3{fJT`@1+c} z%B?~8rDvz1(sh`G^{mh2>vep1jpq`tWtK>UkU^Y;k)fbQR9ZtICVE=HfMhC!uK8%TdNfufd{Tg&-`n1%lLhjrPHw z69;Gkg5l{5i#gqc80>cm)y+v==_ZE*(6PG%4%G;=CY-}D$~ilxM)!1fs)-!|*oZ@Q z!3%ZgXcU*=vCTV-1K`jZobTJXk~xx)2-v}Nb7YSl4pg`4!rD8lYC-uK*VK#!sdAsq z!jr%M*c+3qUZB6YlVQK|ZGk#{DLNYREf)Qzb*a~g4TxP}5YN#Q`+aOr-xwb`ARbJqg#Y7ouFRKoC#wA^^zdJZJ;C^3GBM2np{(LVX zpUvR-1JA8R@p6YXrw6ufi&>aeZ6L@zSMFv9gu>jjlW~A0sxUi8l9~2jA?mvrzeTq* zb~-Gm3QYz2wQi~PJXdKn?5)ZX^rH3cJhi^{)13QvU-GutBNWwwOa&y=t7X+gFBf%1 zPg3tLs?Lj)UsA8SNU=q)1fENqyKsrZQI|$~+x7ku1IiWO{EBOSMa{3OzEx`YVh5az z3O0ZhstM@Mrf~H52W;IgURMP~)r^4?YpP&;uuI)*136Ai3~rhy)uP!L$L2YSRw!sp zN;?EPr}P`Dl=e8SXVZhvG*?DAnh=q*;}|C?G;<0acZ>NDi3>BW7%iUlJ(zJV{D zwLD8DpFBkbc%=&vn-KE2kHm1pT6_%kgME9BAGRSXX)73XZCB!PK5^OlJ9Mg9@vJ@F{tDotB?{BuB`a~sl}Nf6&y*ZN^Ox0Q zuHG;O*)l%rW`XLdHn_Doc>_D%qVEN}`J=U)uOR z!otVL>hNLI9w#$8z5UXXt{~Z+*Bpl(GWKLXoUeF^DI?8fd@oTMn_wj|0@K*+pTz_=;7Lx#Inur$CW**`MLanO2Oa}ph8 z&9^S0;wCdN41SlrXa-wEwN9fdeKyfp^!YaO~g$!0|>&u89K

1R3#K%b;^8GZx7`Lbr>l2)J%o>PI3r+^} zO_01nFaeY-aHt$SLFMHl0(|l<$;^>YiKp6UtnFzqxQZgaK)CzblSNE&+ z{bMq4_{Ifu=> z?Rm-`$_{wy9%hPzakLGe+vQl3Ci7%uujngr3brT00>)safJ`1LAc{mTIg3<^I?Vq7 z+_B$R*fj+Tm{&@&QCQA7TzuET7oz8Ugb7b@a=zFm6T4e;?A%an(KqaY{H_GTNR`9z z;JoH6d{x&KR+1K|7kooj9%JQda$G5Wyr+a$tGPxc15OMXd8DS1#f96d*AaHO%PNn$ znLKO^JoUViHd6872Y?Kz=6A(5@%{8YL@Mb^s=Qtvsh`^kRcJPL)9Ge4KXg&FHks_M(9S zln8QLk$X?r$N!Wf5Q87mFS3I~9%V-e8_aIT!=3Gdt!9U?h_gLVQKq!ZS*iqrId_Dj z{4LKz4LnwdBseBSDk&xj&Nezdp@P5(+?zCr^k30*Na_{&TvN4#Mkv?0UH zxD1@v>3N_;cGV(Qq(BAAFNpl)L6Ce+sEpr{Th=$i?4;f512dWj0kr#_rD5FJ;?rwC1CSa8dsYfQZBvd#|=IDw8dH{BOx;lclgmlj}EbB zOb6~2&AOxB0*ka@RpTEj{r&$??FN#>ZUxwpg9gcJIp}B3CzQDi+#3x@7U(yG*1BgZ zXhWzsuL|-9OrBN6nXKsswGsm`hME ze%3-cG#U&Rxi7MQhcQPtkutQxwgOj{d~5xhH&)-ro^Dj|=TQ#6t%2a(*$i7jUV{NVy|= zm>Z>Gs%8rTlvK;RDd_U>E-eW@!%$w~n)q3(6kG9*=ocu0Wz=;#7K4EufTHG4ES_1< zUgIixrB#9;xg&Ztm0*?^u0wZ}FTOCQu-5;-x{w)CrAC@k63Bi;QMHaWDclO7#zZGJ zr@$wxz9RXb`9TPx<(gF{T35)iO*tO|!GMP|idwZK;g5$jlZAQArQ@JXJv`#D7N?1W z8Lh_rSFP5n{1@{-f8$Cea2FmBC1bDRY7|zH(GhV!75+6cI!9}-50BS=S^-1~7P5z< z!Rx|o_z^uzMxe;3zn>vyO+Nm3z5jGM2|27Cf^}nsxhDUP&Jpa03wtnt+YxafWOr#Y zTb37Aix3s1Ca-Rtn%uSm$kbPem`hDwwXCw;)Z~_vT)~DQj^oU z15Wy&Du~-C&<8dlM@K_KNw->n$TO7Z44-__3SO+EssMip4Mk&1JBRQ32V!29fjsK} zPUPetIo%CGMgyg#Q{6-msx=fBX0gu{7iHs<2MO`33!6TZYV;XFHd!lqlN|y4v>*L*z*qgO|2y7+k|lZ}>5$ zwP6yt-9Gjh)7dyqs(ht326V;Y0BL)enX&O&A4rug;5X2Gz5k*0o`y@#A>A!B}y2V8!Z zi}PCj0zl#vEx1lCd9y#Y8gTIpKFV0%!r`AnZ|T}($l@@b|ATk1N4n;|X(UtiWD>LH z){qpS5dCYj5bBArsh|%e3j4q;-s6=zOZL^=b9oJgVWfHIJ`?7Q?f;9bl*#j9w*AD= z@W{wWEvS#cKZl3n%5Iwz28Z<>CU4cyZqm@~w{1svL!V_UmR4nXf1?6zFEiX`cKcZJ zHj41Q1Kn9+4z3*zaOL}2@*BY5Nc;kp;kX=Q&6)#od0_`41CnYoE!z*=XO&MynPdx^ zLs+90WCOnlp{*gov&t{Mifv>fNU`^N?NGgO=ujVhw|;_PL~G&W;$$BB)t9`T2cy9% zF1`~(wZVh@H^hD`CJFJ+3J#{O-k;?lGu|KIzu_S^lS@t!%te*$F#D8gmPCD7fFdWN zVBt`TN){#X3ALh0{2Q;IVh?jQxfQ^Ss0qHmtnc`q&@6p#3c`xMU*Y?&kMliiOnjeq zn;Z>JhFPp*t*nM~Fa*^=T#agAO9^Z1+-K}J$$tCS{GyR3$P~O&?a1Zi9VCl(WSUYE zte;m6ZgS2$2C5Ne9UMj~Gf#bP87MoC=~YYKSx-&@yj#NsgwY_CXydCIV4ad>A6{I_ zU##(h(G}@+p&5I!6ylAjen9wJX{d_=-%TlVEsQ{0`1%;e83G&EIDAakDdnSxvqQgV61s9C zJM@2M!3&TKmTpOV_zzMYcrq_2IOS;80fe;)7Ow@6bA-a3di@)0earVtMJ{xUbX*Za zb0HZOS)|BeL_v-LhqlCvL$2*h-HJ#oBAAe}gXL;fL?V*xNVg*TJ3v~f9F3Os@v@Qk zZyW8)WMmq|@J#Q+v<1Q7!PRom|sK}x*j1?>R zuKGulzpY9|$v?TQPq6CXNcP3=h!xy5_S`TaG6tq|o1uHr!|X{yGyhXM{GW~=`{i`| z@!+NV&z#7rY{*fH^zGvh;Xu_8EKy_?$xpsm?K<6V=h?KjKa>tTyYgj^u#0Lt2i))= zofxMPw><$b*LLS@aUIw3@xP0wCo#Dw+4yAem=}y4?q8Nu{8d`@xUi~Q&|kc8O?PV* zmZpjO$og2z^yGb=pIk&zLNe##sUt6OWQSPmuCjGi*5+~6uJ&RHAy>xs%2Ed9s$jL@ zz9`#>$D7qlk{W&(2%D z@3PayRqvwe+4*ndj$TslvVJpXs`sLvU9@^T)O)5|Z!~bATkj=(h6W8*TjfGMRlVk` zyymLhD`fn4;i~<@W`!HN7i3Cc8DAeO7XaLkwn!T2rc#tN%$fm+3uR7_jl zzovwJ)0bZ-(*z-ps{FgOk2?%=a1d4$aON!cTss}EAy zC>35ZC>0y@yC{XwQ7jSu1|9_mE^)6csy&_Avn5~O<0`t`t!UX*M9ykJq>5IksE4m7 z1LW5;BWt$F`)T49pek-tf_3K!D4)?ic*QPY7JZ1!&1q^~a(MC37=PEV*I%tLA{GGj zdrW;;Ib>VK;^e5ck&%*C?+=aThdbL2R0h|5B6huah>(G%AK*MegYR`Bh8|E88G>*N zJ;0yj0Xw0}V0W!aw;b%&bj$Q9+HsxxY@KLF)3~Ij!PXwVzZHO48Rq7G)4`}xPjfbW z5_tH!+Pa0B6vju;AuNx;hLT98B>au#o~}Yk{^7)`(u7Z9hmsY{>1K)<^OghU?3q$k zJB6DpN9qIHm&_6&EY1~D!f3B^@1!vn>zYo(xYPuftVlCDhCy+`t+++rO_%)(Vx~Se z#0rU>A20J}Ew6N~Tj>ss(BbMITEejQpzxWDS_2Me*JLhj##vc@;OqirJgf+4608Vm zf)j9WaYAKXWV;_yua1g@HstSC#3F(!Vi^JCS4Avy_2O*9XfT@_c7XpPS}a*KhF!-m z46EZPa7n9aY((DSVvOul-=vjQv|qfEoisi!qo$US^_7Wrj2%}O4crNe^hHLUf z-OQF-!>I&?6r09o8_L0{*vAy$-wxctuGTnOmnAb zu9mz{)}}jRSg9a#q&y3g@PMAtEd-eWpIPyaS?u$z0G*qvIUDw%KHsfs&Q&$1s^(SI zJXMK3pl*uIWeytOuO06MKf@w{P1R{K!~3pG>lUW5gYq-nXEQr`yCQ23#p}I098?Um zn5aHMhktQ=%;_9BbNbvo6`)c;shs<*CQIk4`|OSaK01_yDSqz`=WO96xHz|~=+}&H z>CGpLL=Uc)tZ;^3h_!w(te%BOK{rw76{61-SYw+E=^1u#r@ zr>*U#t*vQ0sn;i&wu=goEh!UvKDA3hSmr|D?=*xn>`m6`U36uawb*3EjS%u&mk9?n z^Xkzl_a2I)`f`tKiXEDLzh>|NR^aY%n!IL&L;wxZtX7j*YKn6epCOEc*PbHQof@$& zHKHvhLh_Vsv}{N~bHdwl7BmsO4}pvrocHnjEr%D!U!E4l?&_c8ue?z7c>y&NIOhUI-@$y}kK3b&}}TDPJ#SJ9d( zT31EuR8%I3oo+SH6h+npE7|ydjrkJ%oAyyczDGwm8exBT%;T7LCpm>vO`YQE=WJ59 zu>1WqHLcgv`^U21l7~fc*&bVSQa-uA+;7n(m7VQYHsdOrQDw8LY?jL2xxrnNSJn#z zUsRuMtyRoKt1 z;WN0%4rftDl4SY1>HM}Ju+gx&=z8-);O9K4X7RDNxnk1gU9#@^uF@&rJvvJFG_LjQ zokOaf=D|XyOGB&je`zJ2YNbj?w&fHU;CDMDA35u}S}x6lfJrn!48R>6^cgBF%?Oh9 z2Darg_1nZFn{+P6rnbi>-idE1PidP>1=%_u8nFqgX(ExU@ARndwnTA`v6(hlx0mzD7q9D6xm z(JezgNU3T0`=$@}SPzEg42U5-7H8K#l2vuYsSV<4x9wHe_Nv-mQ`>7B2C*$=ih15U z2x(qGW1`!gbyv~4DjHXJ#_3LZ5Zk0GIPhx<5kRIvaEyFuCVPZL?U9!GK-JROZc8(| zWp*|2S+z7vOSYtz^DLoRHg`}sko#0M?H$h0RO)f*hlcd$4@#LP&46h+oDd`dShp26 zC2ZjvHNAjC)5CmTw^o zEw60^()JwVL698Zouf{bDR`@61NYv@FJP% z4wK0~Bju_jOLjs~Q1cX|dq zCtW8eHA&N|cA9F_63}-Vz|fKQN!5zL203CQwA)r>gx-Uaj=4D7s>KeY8CB2q^r76! zs${3>l!xN;wa3X#)QolYEyYlC!FR6P*IC!sS@m^JeVxk@D#vBW7zRY7o!1Br7&$7O z1QopfZYQmiH5d%GsXB)f$^&Fpw~&dVT;}APn&XJzvUFu{#uhZLRUDyzx1xDh(Yz|^ zsG<%Pm1&$#FpI3&=C<93jLw$!X0fzA>7Tz|F-f(SpQUXwbX66DIV;_^)>Z$?-m&Bl z3-}E)QxF3_oV_E%BHF?baLZ5Q*HL7`B*wV?T}E3Al6m@oeSIAw92-?gJ8>O`!FAbt z5J{AnL^oCfq7RGF!y8g#&v%Q>x?-~`c2334Z7gPrn8UjZ0J2Qe#=jZq!j5QnBV4e&Xd%I&ZLds1?4PAS3-r1A$vL`#T z8#@?$WwILFlC0!&Vr&HBX}&V$t&|H!q82v{4wwN2{o(e_g*{F}=YYXvUR6_Euc()d8sS(^8B{EeFE_}I3}*~jry?UzrCQ} zI3_y!J+WOZZGM~ncDCh?iIdwMI>Au3Yx{yZqt1%Q;0~RXeD#hBP1AWjcLHk8*ibgN z9mvjepUrO{%YIEO3ytnHS(F&>n5I^}v;Zd%Y1X^dbzF5FRkx_>7OAd`hpsAVGm|!s zRVFe>&KE4oS9z&h<&vv%NmVYZ%4Mo7wvu{8Qn8io^p2~ui_5N(%L0yXx$G***CVcW zTU&Llt*W&(wYElUWwBxzPaUg}*YT;wA+9+JDjM1Vqj6Mi5At=_#JZYjO9O9jaDh>_ zFSR^h2HvS|(Fxsx0`i_ss^}y|H^Ragw!_@-*V19}62k_g3-~?B!Z*8jppB|R{;kE; zEwSD{5Zn{|@ooqo>)-0UvTH2)s4OvNXnD70$Ry8hHS~+1P{9}E-yYML4BpcOj{+s69tkE4l?GcLTOn6PQbK+Ff>$UfbQniInr3VVD}>Mk`LFMKp1uJ^6@BE>htPF4Aj! zBQ0;qe|tf{QI0zLebSVx`8}G~&YqkTP40!*SlnU97N_TdQQ~A%`QqdhWlG}p`O+}h zsA6b$xw|s5>{dqDKaCkGDaGFCUbNE^AHewTAc0vJPE9mZF8$*1{PSBJiPbbas+dtN z2MH1u_id3CVqj+_huy@@m`KiMhG}?KK$rz3%QKL}3b6RPlq8%T+(-+ynH0zYu8z{X zLI-A7d)@}yD>cjPO2JE`g6v)_t1*K%)7rVzGsELU)*sz>Fe3zSIQ()PKh#AM-!4LIzLlBWDQ`0l9r9wA7>%u1hre(@DkFz$J9m?{h zFWt&~5VBCwRJWoD-NH!~_&KSH$k^M%=i!o8=5)8rv@0{MGBYYOLzy042w!Ef)fQg+ z_JfSw#aMol`dKI{^o-16;LJ!F`BA|@NaT?Zlsq5mpXK+MQ(JSiwL#U**)?e|@?tBl z*owMH<_q`^#d7p*nt|`t6?}~^^0bgIphj`1z!yP^+#q?CWExRV=2c}!Rdxs~BxbEp z4_Km-N@(TC-jSDEa^;p(j=UhqZMyQebFVKiaY&m4*_kpIm)9WNG}jILWf}Lm#TA(S zg#kaa7ihkOUsC-H5R2sAmkH$-@;+9#&siHPRSBbs3H8M?!NA7aE-h@V3b7oHk-$X* zUKWmB*0?US6rOh+7HZlRFuDp>`68>j1=tEc;2cY6o8W^U%z%egL#oVNh-^Eex4VSmHq=lFT5TWnIdV6x%?rd5oDA;si3G%949oXjtB{;uqG za_Zc#rGh5i12Br&05HU*V|l{hVR1d6hFP8+0VY!Syy~k#W@fVt8U9yy8%2R zGtKbGupXlAvmq6U+Z>%gkue!wqNy+Qed6#e4^-%uk5M86dP4~Fp)p4lBHw(8Mo~S zd8pcip3}D}^AIXW8~Cbchr1D!skCJ|E+!i-g3(s}YDl&bcr^%Fo;tJ}BC%|tg#9ut z7BXK2Dg7CiL6kKg+%KJV`1tV85IuRXq9-dpdJ^lon#WltJSBXSn5&nj$wT|)v$I;? zj@Ob61Sj$tnrw_Ex8)P0MI!oFOztMcu|2kq^jbc)j18u z@$OkS##xPVj{9s*sqON-L*7`lm|I1Q+sF6_SgPvydnFk?Z}+2w`Qn5lPZr5gjBQZe zqSA+Oowqe87u|oJj(-viy*0eRwh>tc*%Ge<9L#wX`Q`k@x|LfozkmzoUb81;y3=Y_ zjMgZ#Xo>xDtLgfqq`X*g>&5|Lbq|E&FS~QVxkT0 z$~1_4lm%Ojobyp&?KaSM4Ybw3gc_Kjf!n&ydZ`-pOUc`v{mGiC$;l~#u}g9ieaK)r zSt)BI3tMR<+J3f`Mlxx8$g2JSyW1l~=HHgvBt-II)V5;p5}J&K7!pXz41h$+45@u; zayP4H{v@Wiv+M2UE{8W;ipUfL_Sx){N}g|UWw0aZ?qWw$g-w0H9ZN|mD3}RxQ$`6& zKarfWZEt>Cl7fOM`o1uXb!tGdA4zI`i!6|2CSZS%oWZINI&elSyOE$ksfb*_bQfAo zf2%Gcjkp_IAY=f3ZxLptltKDsCnQK6+9I5Yd_6_a8 z7cu2~_T1)Ao(p$41x34Z?R-_f%(N7>C|$(sj!ol?B@-TA$-Ve-2& ziEKL9H_o|o^b+`UP{oS@j7ST1b|QIKU$Nhirz4Ch?tgB9sjgvAlkbx!cIfbc0fU}` z5ZR*vma8A2=*Mn|4zhEODev^*v!=rqap4lUbLi1tesf8gk`7Anb*3MhJ;8WdOt zDQg@Lvow9y8LAvQG4=$7UQ6NjnV&!L#N%g(_#)ZKi}$yNGQ1D3d8E~!x+J;-$qi}a z*6;OH8ixG%VaP?LLG?*aA+Y+ zuM=muDZP%IW$`*Gsx^)_nXsFhogqMc9g||e-259(7~(PS4LkL{0j-gPM3R+5|Ms~G z_yO}bJo01;#@L;eGlXU5TJj%-Z!ho}Hn@GL)!aXFNDrUqAM*Qpy35v3v{|0NXe6bH z^W?hP!<9{FBO_edwl>n@DiYhz6+txuQd_azd3NKGn|X49^eG3_(;GdCi9NmtkSw!1L3l?Rcg~J3%8uKh~LEV!NOZA|28l zT@)NJ6buc;(*d3<_{FgB7?)!w#Kim4z4ijMeAFRkN~G(GEKmg208YW5?hcMo5{O`J1Bn6a5bS28 z$sn`_&1#t)Z|E?N@7Yw?%Qx>zr6-IaVJ-djv&6$ME97%G6zTIX9w0i6_78xg?(W9? zfvvssYJZV*+J5wAq&7zS3fhH?evV61JD3~kv+j%^=5n)=OCVJ4z_G!p1WGPph>x;B z9?cNuLXXzeBUs64PBV4DpAlU^T&Fak7TU3{G;j^TsD*W1X@WeEjkJ$&VXhg^FN!)x z@QLe6YwS|A>tbv(tY(hHOEGA9EEOuW22q8?E38v1`kGqUl1R!eJjeRoC{*l*sn|sV z-#({2sO4iPT06P0aj~@{Tl(Q|Bnm3+zE6pAz3S^6=Ln>97n6cFO4Y-K(X-VInl|i3 zujymE(qecfmn4!tdD8nHx|L?==#)YuPCnn$80B)d(i+Y_y{Y6*GNd)KOEC#JK1(K) zZmQZ+-3%U;-g<|&t=^$;W1VxKp;2f5sJz`f@r4v+hC$V4@)e+QB$|j@&pIR11Kw=g z?R7*Uf(Qag#37W=Eagz2w(Ul+!6k;d^nVf38(J4iN$S#|b2)u@75z(lfW3U^9{Ii#F5DiQ=SLUKhD zj<~2oP!itshB8ERFtz<=&uAm%@B+4%f^Z<1-fJge!?uMwxuin3h8!Mky<;D46<*iS zPC~NTH?q_8MWA0K?q7&?Zn7_B|&? z7jglx&sOn$(O=;dy9f=1FY7m)Ts!<$?4C0Ws=_VO##1K;q#Vq?a~bT?(!ZZgK$~@} zg%x%Jl5Dp7Ttt^+Wou{lJ(W}xR__-D{_V*AL0`xrU3`JoV3Z<|EPzWFp_k2As6K#& z>`JupuxU}_Sk*YLa-Utrw^L-TF6y>9V!IIGDN6n% z_t{A{+r9VK=p3lm!{RZshVbWuXBzMDt9qcHeK^m#yO(uL@}2fz@WKA+8*J! z4)I%^{7E&ZYc_}W0F|6`eeDENQxBDoU|t>j{FihhEgL7T-)vPh*l^sf`p>3Cy!dv1 zBwns*Zs#<&B=yg}sxwa%SyzGp$7Jx9{fez&B_8g+(cFR+2eYqlDz%5L4O`*C&g>id zR$kHGdcc4k**7=UWBtD3$FWjFm?F&woXl_9XxKf8KSLvA6J&N(2cu})g{$3J`C21q z8n5|k*e5A4uu~FrQ9-snWEV7zCA4!~nwR3y&1_zr&%)DvxAmSnPePW48oGiSa*g6b zhyCHeeteZWOTbha-05jE`D3HzQ?Oyv9;j%}SA;KE70sxkv#RJU6{K zb+_I+T;d4B+56=sF9AMC6I3^IWHU2(j&I8`Mg691&MUHy=Kgg*CE{~(5(_RQ0yU-V zER9N<;!A93W%P6d9S>AtuyEGVU>au4@6$Nmrs7gVnp<3lrZeDs4% z(3cvg%msZ5tLV5g9hF(MdzKU$UPKRPA5z!5Xv&n728~~`chCU%X$Or7bV+S!Z;Y;t z^P(H4khdSgx-SzvE^al-D=I;cSFD#A?o4K#`wVAf_O5SloE$W0z*vT4$JlGKlWfo9=E2Ftm7T9 zOlPWo9qxmAZS}8_z+{yZdT2VIF9*j?AKsg^um9%4*<`NJ6!(y57OMt*PO>a^zNEi@ zPPEtMgKF1Yx;^$k<9f7%`VQjf0l<-rUVvx;V>|6`yZmqUbh{Fhk)`#8{KAF!Ft(q3 z7Ni!(foMGgLNB~Kn9&bSj-@m^C`XPWP#_vP#NiMJ-Nu;8wx-!p*rL^x)>l~Dk+#NP zW%=k!-cRU;X=x2)ca;qxRUCk{{PI>n7wq<7ylD9WtyT&8XEoF61C8o{YB^CKmah-3 z@n^B`#X|}9jhJ@Wu!@jn3(xD3=;!W?Qr-Ey#g>OFyHO^(ymy$uRch9jA+iHzGYC$S z66*Y=k?Xk1X5Yt^TsYY7{N{QLtcYpO{d%9c1S_MCzHP+f?X#?HB!13p) z6M$DE#4t>I)^X3+ zs;_6O_G}){{$72_gzBkOYk!NroceE3rc+my5}qvpS+UuUuH;XgtEjZlK}Bc$(_<|P zFIEktz-MP`7!caId)q8c>aYyTz^6$KiZrR!itsCQcy$+f`s?t6s!kI~2a<~-b-H;t zIvrDWWAw!c+V+>`)%|6s(d)FBY!r-b-cGYNCu{v$m=fJ}m~I>k;) z)II|*s7^q+^P_c}?`elNXZ^+(&eH7Z@*w@}rw7}}(L2N;1my7?rVq-TN~jagygI>r zXjVASEzBL)JU9=S2KXtT2BEYmhE>3Y18ze68X_EY57+~d3yevb?d=)$KHbQ)o{XFG zmq=XLZ;4npH&w1YZ_qVg>bEurz_Ec!AH#yn>uiN#H=2Hph5!VX0?7MH12n15A|UM>UPGE_TTY!9jmeHOC^f929ft5F+b-45z? zCXAzl>xq#5kT3AHJ%9C4`3r%AGN3 zZzX-mVXIA&FfXK-2~iR}CSOI+rODmFYtovS3zR-*xq3%~c||%v4B(UbJ?%66pwtqM z*|+-oTtyiqXZ83j2=62;KP;_!7QZ6-Ga9%xoC{kGGH6&G^L(tLXHoRdh1gGaeoI2M zQVlL-6oh6aQMCYPG|_(`g#!AVj!Tx)M+9e-02*vU+s3u*h(IlAomcgJ*!T9PV~ri* zsbw!Dsjta&N>-9Tr)ye%ZfcaKk^AnO5FALpW?jFet^+joR)=!G8!%T`e4z~K4AXrM zhP1qDZIL*TY}eP5{S7U44 zY7IYdy3@4;=1f<9GQwv}CwN`o*v?7P;*z_}6js`6i2M=q!%ngjZQ8{QCf5x*?y0EM8-@fZkNLQe*&>B4hgm2N5o zwT@4zg#}swIqdHgCFz54#KSO3v3nNgNkh*h6v)Qm2J2NL_VzNnO8CP|lNXb5Xs-!NhD+GADI%A~{m$SuuJ5x-%zr8@)ih z9Cb-O!aQX*DRaPq5mLWmZ7D~nqel*s6{%lgXj_uHi-M7j%35D#gl*npL^`m{Afr_{ zJC+s8!A0nxpwmS@MvdiUDVI46dxL8B2y5?A7E)3F%nJm+&EVVvSrOTL97MU|}ou@pypEVt>1c9l3B-QH?q zd+E8+Dy}uV$F*ivnK=$TvTy%2PM^JafeT}?mV6r&x0RHCg#~tPNN=uXSX z-S>i?J1B~oq=MTWs}#FJaml7g$Da_ki=@0LMkMY+7hmQ^;P}yK@YNW|z$Oj%WC$C3 zGUmjGqVm5g*|lQD_l_ms=|1XqKnj1D{B`%0Yv{7TK)ccyELA>hH#`z2>pCjkpp>~% z##2}Sm{IQ_uNrax24&`jX@}z{{BB_AI);s9!FIY4wa2B;~k6 zn%<#hP!ovRuUuvhwcXLTB6S36=^aH&D7JANGa83Jr)mNTjVYgXgFr*FC_)penQi$m zRnH-Iuz^a-Kq!6J2JoCKRcnHh5iGlEmsM>pF^h>Z6;1f43X?DLDY{+NnN*Kv7l`oI zs4>|kokfssO*P>ob`JGHCwcSYOBDYJ+!f|}OvXnJ%1Ci0ooat0)e`i8~?oY&a&)qiM@g+Y$1zp|+lpfVu}O94<&wP18(}{7tDkC&|R$ zR13APxZ03Y@H5qh2fWqQIIjAhn6lP?elKll1Chk0vm4)6ZNG@WT#b5i^h354Ur~I zwV?|{v8ZX3Cx}7aL9fZ0uo~$@IR~+C{6xy6QkO+QW>nE-bp{Q169Zvbc5aByYL__Y zr$?lX4CK|)o_I~o1aBB{+72jK0!*J;K$-5r6n!si;i>t@g!qTA_L{GD(p8JOfrY1P z*Gr6EXK%}@XNhD>A5iZ9fhu>);jlaR@U?65%{TgAj&D8}5J+#_?Fz>nW7Z1)3$akh)&Mh@{$$By*zFB+l2sQ+aIhw4-cT)&m< zS9Ls~`A2DoJngU?B?1gNb!>pwPq6xA+g!5-zn+A%`*Rr3XBP1dpsU2(^!F14Nf$wp z_Z;khe?#K;T-Aq@3Pk))<)b!sMJF&QT7$_)`JVNVDj~?oR*q8qbvmjF=02bs zp0G33J=iAvxz5wbzIfG@hUD=Hu%m65C$IXTRG$y&E8fY zb8DR^H7qzsNDEgbndL7Q9|&*9v+6I5GR~GbV)jju&cOm1jh9mmI#3`UA%^otktEO1 z7&!dGJ(KpJgH&c}i3}FqzRllK@8wGJt7haFXjJybX>E;6Mq&pfT-PJ4Jwk) zy97d<3$afHIl^1;SeFwxI00>vFfk6V+Xz77_B< zr)q+SRnM=5o?jPwetoXz*XMhFeWB;qpZ5IvQu)_m4yFoVpaL^H)hs+$gII zQIDq(2^aGrsjHBOVS?Q{{IJ_Uu%{sGXz;rnCFH;&$^$IaD!}kVMtvX=k$gqXGBGP) zIVcqiLR55^V@!e!POYx-h~RLxN(b3Vd^&P#8`+5}#_>pXbZqM~Rf0uqrM|2TBvgje zfxnlm%2>P2(@iCf@|R>bCEvlCB&Z@5$>RnwJb~C8!n(sVg|H5FEuEQ8_$)BiVRC_+ zIhIzmp3Zo|G*|~~k`9|M3#)1-5ItMrKt@3P2BWgtJ{`9y;)d)1LdFdh!P!w09rI9C<9#OJDL}Op4Jh{<7@$^g!AEJd!GT{$kM53w+xhkoz zOYsPovkpW;r6)Y-xE~GvsCYgae4%vz`O^JoO81{C-G8!l|B2Flr?|)7e{*zPeVbuI z=-d7TF+sze{On(JN-u!_Xz-D#=&1-=uU}Vw?I~TOdN-re;QRb6ndDM|SqjJ!hX`da zmUt!%2Gr;nYAkrvI2kcNlzM!Mb7Qiq7WF(+;re>o$VP02Wg_rdXBbXn1eOwHVstKM zyoo)lsce>^3YT?1QMzCBcx~NTweDOgW9n7Dg+WPgYpTaSrf>nT0LZV}UP(oK@^^l(dDg=N$nkBml^jl3?o9wi%yJQJtL zsr3rq7&hjT%t(puuf91#qUW_T=+R)Iz?jkC)1~{*75CZi`}r-448xZXGQahFezyet zXmGBmXEgX&>Hed|ebyFL{JEQ)Yd~N3#t&B5&iR5Cs^Wz9=y9FlDh=ye#b$E0ABmHk z68}Z`0LDpq(>M-Jr|KwMP7vG~$YD$(9Ag%xN80cq#OOYPG(aO2Aa}%H67%ZRKw)l@ zyi<)V8naQYrW1DbvsV0^*Ux70^L?w|{9XLOOOgj^-5$fNz@6AE^+~LCwxV366GE&N zIMPFeB_ilFE3boE0PHn&k-Y2ri*$7CF1wMNs~mIiH}vV&@LU3WM%Apgb(myPdvAw?b-OO4<3YA!E;G|TEee6@z8OnPriJrC7suD%^>9xjcJ2#N(@t9@-rGqdz(fg z27{DkR9YxQCxnQzUun1h2c4&d9m^eS3fZ$pY{>>v1&A41fRp2N6e>kr=Uo+%>JUHE zN=S6@KfxDN$BShOI#oq-!&`@1Y|2$qsseL=Nrhl-^Fpt$w+`c*~Jo>f409{em@iGG9f>>5kx>UTxxa$!*%07FJiF69K*+2wD$beMNZe*7~Ue$q0 zRvHss+rm`+e#_>7-~awWCAnXP*>6j}VTdNR1L4ZD9m`s|@-25Bn~m6_$Ho|q$!$`^ zgvZ53$Cv@FAt1^?WM+a;?&P@xd)099Gy799ACBJH^o%{Fo1SrU!KP;%RGtzfHBS3{QGI02dG-emV<%r~{8PuzNx8{mqQ)y-!4>vVHwmnD`vxuh;A7qt6vws2G$H0vpf>FuQ`;kIJASjLB<{=z8# zTEbF{=m?5n0o&6ONtt_SiQg%S6}z-liH?;iDKa_hXlbaRCCP@Nf|fc%&=Ore8hlb1 zOYRJ!?z9EpTBiN~aWH~DhR9^%sUvH?0?HZ3;2Xrdq9Az!1S*nnjCh|alGJ=EvE zU=i_wK^AR#2?u|J1HXNpC7d#&MF_`a7@7yhVl98a@)Tr+cCYp|{C7GRhEfr(J zBzzm?=P!-@t|2^4VszV6pdGqML7ph>a>*CQ4_qYthHfgt(O2rr`kG2rC;;nT`JG_X zOZ`r>WjWo_UIc_bnY3IH%RMZ6LHWuR*NgeWdb3oD%LZ;`vcq9VXPpV0UHb^jus_ZG zFz!{9DW*s&sAlS!8F1gm;e1IvPHT^X`(;i_Vx>Th%gupui9Yi(HMEWfXSlG5e^wI~ z+e`DP$^=Pq_-uKb;{E`@+_sThaxF%`=eM<(7o5CL#$^hi&5~p zafB5wZ_$eYsV2mNb0ZY|v{^9(f5i&5+#G5im}*UqVGGRh+39FD{Xo6r?~cy|PCWIgyOqVyHw;5=J4|mLS3URjY+YN^D#66vZ{RvmFAw?;&nnWpcJXTMzJzE$A<|u`5S{eb7 z^_V~3F!JXEbT6GMI{Y)>Pi#36FsO~te%*#D6Z#e%Spu=m|9YgaNHWV>iH3^#bxtkI zjw21Tjmz3rAFNBPRnj`)wbml`z_t;Fh*G}|b4KzZN?`Rdm`@z2p6 z!8>8%QWoE@D{_mYFPVtZ;ElK+f7|YF4NlhX31;m6NbsiGox$(e{b80^?g-xFzkdhc zzfbqLx)-a=uRYJ+Df<+N@+(`U7fiAJU5jPUwZcjOfaw4Is;jTs0XSwP9w ziv8w>JgXqjqEXz1YKwpZ%Y#HT96F0yG(M?t5-jGwQnsd0*oy#i=nthC(Xr+lXJ4lU zPb0~9Bt*Y!O@{KX0e2yviU!v4IQur++?}V5ULOg*UcEEm5La-8rv92UHngYLmB3ig zs-gv(`K3|~uL%f8#N>;R-{=^%&ztYn#=2lLgU1eha<|nJN+JYbP3arh{2TEtw@qI+;8uOl$CzL@mET7NaOeGy3mH}#NL+ep)_CLxI*Nob74TV)9;8> zC)i;fXX9*Cx6trQQLC9;F}6QdhoJzD%q=e)3z_C?`))b%UvV;cWe_uwu=0x$YJPTR zrLBG9e}jOuBC~k|5VfoLoGU^P>xz_M*wA5+UGc=m?bTjVWw0AbRrK5VDNm}#)V3y7 zR`qjI9nj3K{@A3tnv*I^^<7dW7R8b3T9;I*dn;01+eoT(_eiv|nN+purA(?<2*Gr? z>0%ThosFd05UCOi0v~`v2Ow2#@hFihNYy1(_Td@v3DZQX>|}sMX#!G*R2g78@SrDE zRZ4x3DqlY%sn*J*YRa%9)haB;lWNVADtPNj6*Yy(E+f@dBh^_brJh|>FirASaF9)l zo>b2ose&=WDfW06&JYQw5I8N(Lq0aS`g;Xi3q^s`LXDPDd>k1gwBfSuh0*X;mtRm% z4ht3{))~5xH-0)2v6^I@am0%A6dqn8)`A|{Lk@obCPb(%i8ND4(=Gh=l4eb$S&>E+{q|F`9ceH>2rv$Sqrp%M9|nMqsFz%$jK-}eDV%$DWe*N zjKaY0kx^|eV2S^cQ=*Ib6R;eUQ4^IyMyYQ4;$&2#kWmvAC!=t-IvIs*KSCVbCK-kI zrw8%JiTrkm->%qNjsM_SPp(VHfcg;TiFYhdJUm z)2bmpfy4}(e>vi-QYWMM+7Q3`G%~7MMtqi)av9YJ;#ZxFs%}Dj6k=kc4e{INVI$BA zltAjh_Z{L-d&EE0BcqrSkd>fk{xa{3Dm<};j5>)-LPqi3CK&}<$aCn$!(l17LVKdR z@FKgWS)0w#esUw)BQN;U$j?x`uGCHi5um1M+YLaNFzP)SZ4gT+i**^lW_Zyo)5$gr zbP&sjqK=RRCAq4&4f6oY5r$xyvqcLe`Wz3k)97s;*~l}evQpTHD!RZL5R0}7YeVQH&})=7Fr;=wHXELt_5X7dD=mgS`b%~ zQqwb2j@deea$j$$;Lx+-lQ`w@)U ztKX4!_30y`GIb$5II`egiq# zIAhD(pa3yfSMokbj!v}01%+9BdQM63P;t1OW2;{bI~PMWwVJG9!3YWUq*Spgr7q#H z?o)ozyf4fyYXkTRK0No`GIlp-V6|Far$PKWx1ZZc?6=5vNn!kQIf=x5~Gz;wvsl3saaEEc-Y{~M zPq#5K77uOmO!EV#+WDFiQzgt36+GL>+j);RFU{52P(NFrNhG(%`4Foe!8{k`s)aDVG-N-Gx0zFUDra0@8TEl&~s4LH{FL>E#7d6G& zf*5Xagviv(NyZ|+ylkN7g&QD468c`eL3$499OPxggKbd6%f@ZtTBGu(bFHb6dAdLx zM|1c%BR17_s5pxiH5X}a7N}D!9?=xwAI{$W15Aj8AUSLpr=15s0?mho$pb$Lww(RT zu`)tL+AC&jvamYsj}?l@GSJ_(7{-Uv16BhDd{Hnc+a^GbxoTObtJ-~xVURNXTp4z8jOFhSZfK=OlZ?JvkSNtV2= zAt%%^$Vr&zVCEUx?#K=pE1hKvvAHa{b$Y{ue3pJ}gR})XA5sS}}%S~Q2tJe6w|sEM29d9Q_lwo#oDsoK~pb-{QDf5q9rw^>@z zUiD;7Wc6si(!xFei%(#ZDjLD{1Z`BSmSh3Ne0Y6L-%26_6z^aYWN*SrFGhO+G!|4kvI0za+5?k;?Gapc0;bOAKQ=}l zk~_nRE&LK-(jlxjRfVk0>ygVFuAX5RrJ)W$$|fNMgXEBbaRt647{NwD&$y`VBDHBP z;_t;RcA7b$I=L0S51>zPB|v3GwzY+&(b$Ip7}lX`Y{CTg_#bMT8e>?uI^V!+Y=UTT z8MnGsegWfFpi>*>MS5CyhPjF9PQq3$o2zp&#-VO`&Pr!|x3sDlHqb4N3XW!s3=UVP zHPo6K)2r2k!4hz@)!jlpv7Qh}wdKxWn3Ahm8~miMa%Y;a%A?LUQWAA_9T#{WXPRHO zwhSrdB@V+OG<${Bfzj)q;-laASzrL zq8ThPIXtxz%53p=tFc79?-_?IK<;KD9O)?glg)-r5D!9_GT0ywCQ1c-=x(CQd;%v! z6|YX4l)X~Jg*0g3@eQBe^fpJk05R+<67hLlL<5fKMDtL=2We zeg`)@Vli5s$}@v8p+q&-M!ddsEPq}bNf;{#Ec)@{mDrdVNBj|g#0n@roD+bddR3?% zdQ=a)s4l0HW9+U!6938=m4dkh7D@ZVzPl+yXc*~mwlp@OSo5MV04GrhT*@7W1;C-* z8lhd%-|3??Nw|_V4)q$YCocCEiiG?@?8{eLEg}lgGy5YgE)?Ui5{H)JpTzV^^6dS9 zfCVYeHupdTuy_vjeT|9}8=ljsDqyQf8u(f=E&3=Sh(Sp@E)U8`znaz+=+qrhsqhbH zKxoy^X+R>shRBaJJ1*T6Lvs10IL#&M84~4Kc|2+MPE_fOD~*dS9tg@Hv(CEd^#5Sc zc6fxP7?ec@z(<_3!~!>!wp=Vxsq12gmwn0$$AF2GQvx_Gj``Xq470Tj!=TU=lcCVn zRyyP>&iND#fP4&sBwp_4vL$wlULrF;BA27_+Larynq50(Vp3?lVK@|J%mf|WC$zJU zi^)HXaikDC0ch3DI{-aaVlS)Zi4#!h7js+Inpy=9!oi05A|wgEDW=_;qosTb)B~FWdL?>+ql&m}3RoBwDe0yF9-%3) zpS@GCv{ekm8HZ7W?<>I{19e`zRg2O~BRw6i>~&qH!{YvYkoOqD-Fob)f5>%kh;bFU z=epfgno=+&>2tA_t~VH-{G5)FdEIBBE9$Ysmei(vjeHt9Em@XOW~G6-!zDY59>#d) zgsow>YDkQdY4M5-{_>fU@UWax{7f-PVh(-Va^_&f;&?vghii`xJMvMfrSRY;P0_%Vg z(G1`04BlVI5NVk$d4pM2#tyR5@#gbP-h<4gGnU5j2hrp?Sz)W3$QsAQR+^uEk|gfi zykjbk|1mdjB5;^KnqZ-ns3jW&{UnZ-I@skQceSa2(h;?tBmQo#GsQ7mWGS_PF(EG{ zH^FWU6E!T&G zDc&;f^~e7Nyrc|l=YzB`@Tqj%RVkcuEloW)@L|GLS5b`7Xm6orI(vZ@&BUj#W#ZG7 zwyh_ap=q^2dm7Dbg*KZyjgB94HU`+e&VAERGOQ>ub1HS|do|)6S)Nvy6RN-3oP>NQ zf%aMFgeFwWRp!K(`u}Q9o>_5D7|S)x38kL%oIGQ@WK za7VBJO=VwS;`H93jI@<#(c^VF0BS_*f7WN2)16GCp67iIosVfJ>@+*{D-(u*V0V}u z$bTf%UR@S*YREhRZgvS)TDhSH=W`b1s%9wjk4{rF#r@e)gV#p#S3-=u3~0f+pD>6r zsgkCpOR6kco>X;*>fsDqU~q|4QIceVFOe$5GjGN%+2Q#{#M~eMaSf|gFTFC}tKo(Q zd{xY#d81Z%1Pv2$;Tki7m26lhSmHYDpvE48twOLHv}g#_%q{(5CZ_WR06uHBb+AO* z<;jquXo@?7C+gz6!l!c%pRijBp9mk!@rm3(C47=SbR21sZzqaxm)y6+CGhEm?xz>^ z>34;S9DV0ev9QPwIk(#oF-&2?@%7*dm43kdwOEKp$23I%PnXG2xV)hRxE99TjiVht#bB)iC*94I={)(a6LkwF-B5o>ETG) zXHjhILyTmdq@RUh5B$pQZ{bMN*l+6>B+%biLlD8R=anuQqGEEVf+@)RZETJQ+<D- z(8*D6;+{8XL%cxyZFQy`Q{mZB^9N)nHv51B!>_5$q8-*OYP_s2S?2-xsB04;-`2$V zKwXc=L%Lq2S1h2eSBE#t86cYFiAf3DNj`ZCU(qI3O_^EsSZj3!JCF7GAcuHNZI;bu zJ6AJCn~}OG3t?fBHRmVK_)lCAdB-V5d1^VM?xXZq3WxeF_p>F!zQy*5>--9RD6Lw| z@B|QArKp)|{B{qZr$+(0;nRsx1&!0u>>^+ZmA^PGd8CJV1WHI+q0q3p8!F+7@A7Ji zW6%*)i_cWG%m}z$ihzH^$;7OTl-C`O~6-8>e*qagJ`^$b1M z!IknezWj7ve!8dpfXcIpQ2w6mQYi1J+y{`su253ASwFMN%Xtp=ZW$}_%!#ghMiOK- zK4=KLxJfXZ-6WPL0o0tkFl8tXb|JEIz7xIyjYVtmI|wom^_cZB%>KPvLo`#LJP#LV z#!DrHljj|UH2yW}&K78Ri0eF8K7W(`JQ{zRR(6Hwwx$i>35o*f8x@nm7E%98LE^Yqpqhb^7Gn~ib09t66U1g7GCtUnr!Ua z#NJ`KHTLt3v({NvI2!Ze+wm4X!(Y_!Arj`x4}Ti+((o7E@ZX?19`?gOIl6rKY+&X6 zG<-(j&j0Dzu`NEj1F~fcVTLdU>>+*Url@sox6Vx=mS>QRzrzUGgtW!8UG5K}Ryq4^ zL$3~O1xtBgLC;C!ZgvYO>LxKSfDgR!Mg?02jH9^9!2vwN@w<$Wj&9Y$M};VB(#2sf z18A@~7U-$B>-YzdB7vkO)m#Pxc_8FyW6P>M5IDVM?6m+)OqH@-;U@y&Wrvcg=_1zP z2Xd{0{4Drem>h*-Y7|I#KJo_a5)`DrTB)sFP)uzd5x2U`N-bEh-5_ap9V6_L1y>7u zGRnDfu>}@b({0E@8$B9I+s)DlEQhbuvZ=P9Rt8APzA2qdzr<{*)k!GT1E zhF*|7Q_Q2fG!Pm}J_EC9pH*p#&2i zTn3Z0DLjz?AOz(gL)PDXNTqfMp_ z?C2J&RpC$zT0Erv`9)-Ya~8t%hB78Un=daU0O)dliNxiRVa6uq0nwEa@#{NoMbHxC z_CyQjObG@D_LKW&&--Q_ z#yW)zdoavS8z6?fIAc+XZJS?{+=`WGoMV^Vu24QZ5CR`30xLYT37$ZoLJLLWn+VKm z8mE1}3w>S$?t>fCk3#(AHw-A^p9QuX;90mpYZoklOmwpO_;b7@B8+JSwg+ErFLo$WR?GuM%>xz*h?|0LqKlv zQ4B=e1&&BwM5|iX8MO_N&9gs__lQ2oUjxOeeIKRQBW+J(#(=9qCHaWTQoh@UShS!?nuC#%GTm{I@LxDLduLNS~N;z>$~d+ zWnp&7^hMJ2huHqto9Be0pVjI+<{6!bc~P(dO=^+7&&C!p8@#C|Iv zOWc2(0a%b;u0w%dNM zdV45FuqQQ@ld9p=pa4-#du6$Rmf-Z{R78$+t|)1MDJEX%#~53e?x22W1AqxQ-sYWa zd4)X;m#%;w>e{H{d&^c0aUsd#Ma)jxFm5lScC{R}b9$3|Bdx@V=-5ImN)Z9m;&#+Z zMXk^lt*kI!`XMOBUYYl+p%B2Gt- zVS$qA;LS+dVsH9q61NO9Y^HJ&cYjyn_R$h~_dJoEu5??RuZYN8o*3FYQ*neg4D4&k zuw+&5n*?uiue8{5+OPB0ETvW}sadd)g1%RGrBW6*=amMxB?ZHSOY*VBOel~C!m^{< z_@AwBCbPj`oq#4+)GGY!_R}nY zg+X&-t6q{9^AT8s9pZN%2+`9(w?^w4>nwa+ZSqs{A*NM2%uQPA^`LrfVHv*9c&XLf z!QS3>yNJzJ^!Q-XfVo@xC7n>FfJPS~qNhYG1??dB(_5RiKbVJI3kF39cIB0L{hS?DTadC667q@7x+U|=#{;J6WFf+g~6XQ?mYZq z5UUu8{p^K*IfjA|3;{o{-426wu|cZNfN$Ng2I+0CY1b`yqUh|f?<}!n>g=%X=i<^|KL)i=L<} zdcugrM^E&$(5=!Y%2ZHm^x3FH9UoN3oog>=Y~Z^(44sra+@%vH$@v{Uq0py-Q1oHn zvlEI7nn0-U22HH?^E2&&CRTih2wV=DaKRF+cvwqWQ<}*-qXI1UZV%!bxS7lp*$hh5 z@vprv=C?`^)z5>|ADPgDlmol2pD2lh zfc&QGS^Qx4W?#p#r}OT#@M> z7m)d*@CMkt<3(`u@2`uxIy}G|#Jq2kR7_YtQudY1`<1;>>7R-Gph&s(Y^tYcC-_zL z+P7#9g=t;@D*igv59ffUZ*~hzpgJ~KtyH2A0*|u8i2hxyJIJpt!bxFWj%%jiIZ;z; zUW@TCbD=7q-`qj_1D!Uz0M5nD9i#+yUTu~WlEyc8w&{9$vz(D;@!xL0m19-!Ng+#J zG8pt*1d?oH8invReoryW4QI~9bhX0Ey665Yy~L5=>ct<_ozQHNU){-G;@5JJ&P%I3 zY%axlUZZ)!S-fXu%v>lWk;T-ZQ5+6I^U`~01d=PdL&zk9d2cZo?dYb!)Uy^~mI{4T zltg*iHrZfo8?GMXz7LHbC6T37GF^kLzG1S)X@^i-E!hqcvIuAy@5QeylsN?yD^GMb z)q?~l_*Ec?XE>()!pZqmW(z~HO*x8fMQ9a@MNu`hjhn?prJ)Y8HEb~<5c1h3M3`U7FAa-V(h` z@vC5to&`LaXQvChC})d@;Q~(v?#|1DD|>6jmfsfvSHka#j=9l!DL`c(Hhq%t#RH4 z>(@BH7sy#;i#1Nbc1xT$dK>hwpBjXV8oQgUoPc=XZgbPrqR`K~G9{=GU^*JZ7EEfz zuLA>t!&>BuQmrvjNF^>x;x%rq=9AYJ#`K)|xMt%oDQ%JLV&X%Lug4xHe8kLSjZTeb zYaWRI7d?=j2rvqpL9_VZD3gRlXbbU>Nf9rh#miB)n+O2LceA90n+ zD#OGuaz&!WDe=r27h*Jnwg1L!oR~Q1jg;YpzihJb7n1FxkI}~c7R(~!xFAvDigaT{Hl*)x^AZs zov{O#e5|pEUhnXxOHMZPC1 zQ`@2lCn~nOQT1twIpk81_+x8zw`%yhulnrrs+(7>8b{NF!=cR_SjohbTG!bcx3!Oi<}{M`IgjgeEmtJPS~%lz?w)~=EbrZ z4a!D&j;4HQk3UF!ah5>Rt?6K!qe>@;2R$DZXXnVG3EHszT6K>Vqyb{mh#17)a< zx6trU497IBF8+z9In+LH0UVh&-#~glF3(8;5lHFE5RuMs!ccM1=}ymW5{cW2pZNaJ zjs#>y*RyR8*p5(>{~Q;9m<;p|2iv01w&TpE=CRvdP{XI;)IR@PP+dOR;uV_At4_L` zY)4U43$ZRjewx3ty5il}YArOF0$SwZw);u$Ng)Si!JeH2zq|)mZMhfS9B`MRn|3SK zfs_0XEouJMF7}CeV>>8eAGYh=_=Kw6t>OrY+A5B|%2shfjW*4z2%9@7zH_n5K_zHN z?e>Uv2}TEB7JujByMg5MO_J0WQ&`5ntI{K$pv?$sw#ZCCk79BBoxn% z;!sya=)lab%kgCejJ08w*^~_vjAfY1Fr|f92cMIaS|n?vsjbGCJMRnm?>P)NVwWyFY%p*hns;`RXd zkScN!o4SOhjV_kT{p0r7v_$bOLu@bwZYKISa8y`qQo#YgeDil_H1MCSCp^17)aHgC63m0wnhd1DidHB1L=&Hx)t z6VOLfTh%o*jCLt?iDmPkKeQ>3k)SZjVP;ufh)JK-K_@u$iY7!p$Hj|@-Rqe`*4fCBnRdWv)EQQ@ghM2_cZ+q z4uTI+Jzu!la)O4+sA2}|%@MPRuqM3n>Ru82w&)d3{x-5sNcN<-O_s@_Y!Mp(VIJ8< zuAEbn9kqnOP(4u>6DYYA$qO>fA+LeMsN5f9>Xbg-Is$!#@rR7$cLsBu5lcB+YY}~E zmc#gdxrC~=9mT&U$x!!IK)hraPv%9@Dgc8j=#Gq)dPp;URJ}ynzcctnTh8+p*HjIM zs;;XNvqOp5bV|Lw1nfVI0bzv?FgUFbT35;G$6Xvn(@0-;0F4V_i~I zvo;fZFUMTt8Utt9n!uN0lB4l z>X$4k)FaKFztv@2Y@Txo+_K!4ja#zQq%S+j*G!uH2{kPACeH&2&K<>k#vy1`P+g}L z$_#p|QMfaRVsS-i5px=%mIGED4_NkWXP@r@i<9L6>$mJ*Ob&E09+ii4E%$`=-;5u7 z|KO?{!)R@_R;f3deeM2%!FZ?@h-*n&4wM52r&;`rp#b7C8;0-ZL>_mJx&^yix;31g z(_%x@mj%1uH4yi2QhgB#m^RT{f*C+}jRiR{{z%>4x|UQ6BALlIwfMaAQ4B^yt4D={ zd^pcrD{7HfunE(dWqarBQt6$Ty#_Y*uWD!C`q10rY@N$h<%hxN;(m0IzwYK z9gEbL?|{m?$wv$?-?2EpRc(^ua|>u^top19LclZh0qn7Hk! ziT<}rIfgbXJ|)@7tzu{#If&tkb21s?m#aEy$si&H(nDnSY7kMN6YJM=hRCczwt*bS^*+`D?o&! zlO06z%$R)$gf`p5guF>BKqjLFK23N1&Fj(fCy5Wg9yjo8IH(M z;Bn;O)u^U+oe;s^0FE5L3?kHbRftgLIv}F9R)9!*1&H+L5aDD;gUF>WL~w*(8AlKk zt_w%7$N-V2E`td5T@@mfxekb^trZ|LumVI-%^i;5pA549`fc`DT0u=eUS}cs8&gJS%!g$EmxUYiSgI7LQEjwHsX(8HnY@s)tRNtbI=E}08RT4^=oq-KfnW(sv% zQ(Vxt6mYtepmVdq`{8^$quaOW!%n-I6+@-3QADQsKuFUoYtu8~oq_6&_M3qQ)!G}M z>>Ug&%3cJZvV*m;_;JX1 zTl6AyY16WKb(>ycB1wnhPui5;?mU0D$?1;CjJFt4aP57vj?c{yeFrB5Q_Hr_2A;I| zzr|7{Yl-l4KWHA63G;7NMgm;!<|^EPDb70zhefa0Wi=U&CVBEmx;CfCVMmdANH?ss zMX?j9uzxU$0ux+8B$HNA>@3OxiIRvGDzy+-nbUNfWj?G?nR~ZINk<|FnZAeO4}&e} z_ z`EuR%?e4VL3@n=#+aY1oQYlT#_%?vUv|RGjqJ%I&~w3r{7X_4@*kChYYqRKG& znieE^o0i4p)ACCiU|$y)HNKE^{_NB8VSRCp)AFk-TbhAc91^yrae)KU%5636_*3l6bGBBQ6D79HLgGf~@ieo^O1oLn;o9$IBzPA$ z?fV`gy(f0}wd@PvhE{Y=E)mS~ZPBp88cz;TH% zz$R&mfqKnlajsr-S@80j%YtCnT$ZKen#+Pv z*IZUY>b1f*y!Nt^pm3{w%o1Qlx;-iX3^Ry-NOU7dYWftm>@7OKt%Awg^0)_h>;?}vleltVI8+a$^1+;eLyzjA|{Cg zgtT%k*KfB-X^-1~Lq-%xUg zU*e>mY{u2bytI(qktZ0{ETby>11b`hkLoyC)~X4v*~Bmbi5^%Bs(68Fn4HR3;^ko}{%bFFkBo(Q8N(vZqDs7GFNim+H_7b$}&nTGx$t1ebK(2~~16w}*c}$2Gb2CDAEe>FA7} zj}NGWPg{$epzS+&fWzVQW)5=nsK$Fp*R_mWSLE4A|NJOd(d=Qa(9(bnWZtShXnzk6 zVmm0K| zPuOS2`P&~a8Td{Nrf#2<=Ly0Tfk|KA@{(wQtAUBD#yO|wZ04Hrzo&IQ$O+F(kJZij z(H=~PhPs_O%z4o2#z8uRfce39d7#rv8#GqS73BOO@vPo=coD) z+UfKctp1DC?{NMQDa(u>YZ%-!0RQy_S7SK3GLZ8+w^|QQ>P9SR+HP2D?5u0yznyKJ z={RY4ll|pMK9pm5Q<^?1W%}YyosuW4 z(|f=5nZ0|WQ~VNSXLO~FlU%botM#P)ouTe;8!?_yS71^qHt&m{jm6Hf2E?o--w^vW zlsW$%jaIa+8zIfCZcJXZSD@3=c0=9@cbY47xuCYsS*vI5ug-S$Q@U7c{5<90ys>f2 zB`bM89?LGi{|R~nC#=VRVEDYW3>adt5Y%})Tl^*NjB!nbUmSGScy`IEn|IB_XdNFK z*NMq|PV_JyABF}k!jWxBWVzwsXsjBte_*n>Be);a1$&UGjirXaFl@Lir|0nf#IU4v za8S_EItn4(@7J7P)ai2o_kcQbfR7sSce*PQZ#`YwCXUj)YJ($f4U7rUn(^8G{8mrI zZ*&zKa}osiEtWSHP_~1zlDLI5JD$f19JLz+X?De%@@y-~MfMnQgc0+|vc*DU;0Tp1 zBdJpWtzgvlT5ZW+D(7E%>g2I%zU9{>LtAZF(t%#a4PKnTB*vxh)a!&@0&{q3VO`~j+ zHeT45dYt#0vHvaA?+WMOtX6^~bA!SLls57aV$rlS6(AoQn-&Q-wmCnxGuI2O*=4|* z^%b5jfW_I#&jx-27ORBgl(x`FZVKbSv(cP@SK4qFmi2nUH*)TJdp*Cb*Ym!@v)A5h zj6FjR!pDbX?lP;jLVCYwyDgorb%7s}HC-Q+4Z$ZP$=76-NhQ_2VtzQ?JX%|_U%-Akw6&@+(0NL;= zm?JLiD&enCr|MB><~4&j?@3?bv1{))lm4C@-tUPtPf`&r zAbe%)u@olzfse5dgokX05J7z05zOD{@FE4;pp2RLpj}tIEC`+5RR&#lTV3CDjV~Ma zxUcX)F>J8zD$})CtOH{DCLzw7Sk0;c-a4%UVR@#1PsK#<6Shc6=>Cq;%+n>UVrJN- z0)m82lEr~kKw_<8y2|KS9{Za*PV2zmlplP|DknlI&MR-}7b=q0yaS-S80Bf;$@Va=1I8C5JnB z0SR0Y?%LN?MupcnK)v=l;jS%KgqS<2np>uP)1HPoj4#>$^>OR`f$3ii^Bd)SLQyo|fdcxG_wdb!K= zGVU_%D?C|n7fW@uvJ~gU2x-|fEzAQFQH^o11sVAg!FGWB3v9sS_UM4$fX5gBCmZm* z;EL_feHFwXg{@*dkX)_hbK0#j)5Be>QDu8{VwjdpaX86)h=_ZdUG0Mn||m+RyI z5Sinw>P&mbW!gie3$jnuO{OD|D@*+rNSS+S<4O|7F}({%D8 zKS;U4q6+5Ql9-XR6nToim{?KIMiIgB33K8!tIN)y->yU$uuvd^tzl;(msqAP!VF}% z&`qL9)brq-mJlfUcp_HiWsfjySBH2*O?AaVmTk%Ud1) zE!@C`01DYBhft^$>M-<^rtz=0ahu6QB3~-xMNvq#FNO0f!D$qHkQ!)%zX*J{PykiW ztQ9Q2(`ugZR_g}cvCgpxh>j4aLF0zt!&)XutUQb_)!A628q1VNYb=X8%>L4+C*t@g z1j=0@hP=WcF^=6Dmy=u=tkT#l?ssJ_=KYev#SEb=!?qmI($dE@jnEUZ|8)9RI^(q0 zp+pdpaPW6Vv8QBO2@*NyM|JMWzCa@+h+)uWMuVf$(Q#@!qUkZ<+UR7jWeTz|5k$ln zVw0uBDsuU{e5HZU2m1#bePP%P6?e}l!tAE~gOy5Hh4RDf#rw19K8`2lY!`|1VK$^Z z#bI{K-Tb-j?uS^Q!|a6|qu9=n1N+;JtZR>&=ulx2Sl9#&KNWH zJE6Og7YLqBegmK7rmm;8s!$s26^DQ02|G+wJ;)CEABI#%KZ*^-b+HlOW)oy+PrV>B zZ-n&O=T&c1syDOmF8e}61#xA+r}E}Ijei8_T-R8Z&Oev2)R1aQV+p%sA-}78Q+j4* z+0yu)Sb2PxeUXpvL%rim)(e*L3(OxZW24qi;kE*o5;PIeh*+A9U{cQICS zv?(szwNcsHsam03N>t;o8a_%im;X{)jv8U{)A=2)2k~Em;n_4H24Q?y$gspM%z>R! z0elvgmo3_p+0v5*dot5~0;UGjO*R=caW|jBX!BH<45MC<57CCApfsLjGiHPm6o<2u zY+kXW#91TI;Na@EoL!{uo$)#K>cW>k``u`Fa8S-A#&A^EZ~Ym&={_h9iizeTN#zdN z{jT62ns)~8w)^eDcbj{HpSSyM!Q1+F2k-Oc-@$p4AMoX;DgVp<`KPJp!`!Qt&-iEm ziDw_N`)$F==I-EE?0$PNL)}mM`&sTk=I`gY|5P8-HCxm6Az(T@#tLVKyUv^Ey~&!$KSPnVPR{D?Z~hu zLm;hVX*2sP>kKOiM>Gr)>BPB)(cO|Ox(kPNGSL?YwWv~t%^q(;DNcXjNF4t#eyEQQr<(SrjG)(|a$iWOkBHy?u2Mp%i}kz}oh{bB+8bHJW4X_O22eeiaO7my~V z1Mlw;@nHv}y15V!2G~eF=0~u(_;}PEezaGudt;w?wTCEHzLp@e6PJqI)3k+UjWq;+ z!VrhF@M3Dn!;|^UD`c<+O_Knq7fG00asvf4f#jLChvbdnjry-1-Wc*f`@_#0E1|`c z72kH@d86PNBsU7472tVe37+CXf+rCRO)Zb@fgwduxcj>X>I%Kk!4wu&k%e!BPqMhO z37Eo4qvJo~6MI5Hg?|DJ%x7h*9Fi0n|KfitX810eZQlKmF0wXdrG z>V4Hd{%3#qS&jNI$hnRPhy|VX2`{uvgBNr+>66If=}gj|@S-L-I2Kbi7Go|=y3%2Y zjGv{84!+T+!v>h7A4J%0az#Tpsw+Y=3*VqvT)$mdz({3jbnCKcC$OOH!4%B;OdV!@ zu8#Do7+0hIYsjg$5JCC7FwVKpD(bZ z^WHs?{HMG?7IeIbd-#yV|435@{$?Ni?4cf6u99uj*a+i||6FUlG*EzA$-bx?)L#Xy z8speuOIE@;OrSD3*R89yWJqz~I?7{?G}mcJY%$_5mu6>&scufO8%480AlU|H^Dge7 zaVTVj!V+}FxsYeVZ2MTY)s9zWZKVBVr->QbWQvx}*vO$eZ#U?JrUXkNG<8(_QBlq* zBiWT3b_*1B0jX1riI&i&5TM4;i{e5rQeL-^T&!80S+!y>ySy*r3!OrB?eZi}rdh}~!vjnEYdSlcV>(px!qz7nK*u`h_f#jB(`mVFE!b&Co# ze@2o1*}MPn9q|}RY)hfkw{~t3_oR)!pHz=tova?QV8L`1d--1ZQNuUvPIj}sVj zY$%@3y!M4@x)S-BaAM{Ktb1kba$~ zWyjTd%gn@ySg9;^AclQv9qSzt%#Q|p3S~R#dBH2jvbU%+g>KLq%QXY>N+xVZ1|BrL z?7KY^hM(5SIaw_9wxUi5N?I~7!J!aF&j0;^SikNFo)AWbS@n_9Gq^836HB-5^B1OB zs8L$XD$)d+XQBNEu=wU#{=uBUQ3$Z_Ee! z+2iF8VK%0#I>3uh?+o7VSkTVk@h&fVoVB*g@8Xk0Q;Ie1KxlVd3^}rb06$mpywk*( zb9RX6nleYMI9@6EugOlc%@Td_Xyheac~yft=`Qhl?9X7Gh}ZD;_eZmiAC=-ZfA)Az{mCZ zPHN{lE*YOptJI{S^t199&UpR?r<##AFY^;~lFcBU_r>F&|8^CyOu9Wb~Mv)Q69YD*^>t2Rp1qjH<&OBRp(QEV9B>?LvW= zpC0Z|_0Hhrutf2Yv3|KKP7r7@WEV>5mN_ylO%>1(3|#D3%UvE1D<-c#1*YyYXGj!P znRdKptVIrd+NjeSCY?qYvXF?VAUiC7uaGfB-B-|stx(Kv2WpeMwhq%#C;~D7sVy?> z{_$74VFaot4@a;9N`HN?Q)V;?dUih*pLItLvV6s#XTTXA7sDWRK!OmpV$#O^j6fkh zyjbqb=+Du_74=#Js^w}fgF?;KNY*1Va571u;?_*Xg>Or-4RRG%Y-^l_SPRnJZ$%0> zt+r>PD0-~==1oDC>`yDB0lWvZIZ_u%FTK^GTs#z-@2M2bAYupLYN%hXKDH*f!Bf$+FG?Z6W_hJaF*#d!% zQ``5_ckFBBi;&hHQeko-Cp2%;=Bw`r*0BIIe_|k{n;c8kl*|r9T)zW7FjbbSlKP`* z?O&yYY?CL+(8GNI7GA z60nC9r;N0^kO~B;8m4XI{K|J_#ghA4=G(YIgmn&juomQwSUj7{6N&`vnY;sS199&D9i$j&&-= z;i9bZN=K9ZmT#&dlqnx#8ogY8^&kO1;&0dZ)J#V(5PrWlsp_aQ@g}7Sq)c^Z5XJvm zkDKnX$hN6g`%@tw^agRU*h=L<02ojw^w0K**->!Maz=rYZ6Qd1njn1q1wJm8+l>;h z*kZ^orHo#(jRGb?GJ7RvWpjpTH2w}5_(_?sGdS(M0(F45{%{S_wmpdC1plTihkfK{ z6__r-RJcA)%`t^7fMhex)910h!6)&dqd&){<(iZ(M` z^H7xdU+0oC6icK}$Hb%=iA$m19SLWo3mqCdOQSoAQsE!$D8lD53Zm#ZLIUh!Lfp|A zjKihGhoJxv0X3fy9i3q_g1M}BN=jc`X)tV?+#M-`$OMrb+dI&2)p$x0wU2~h=($P+ zAwjR^DhkA(vv(gJkFnVmThT&L^|y#LmzkB?K!3QI+2oyQ8suVWw<5N1io-vxnp&lr zaFX%uI;~N~w=9v7Bkw&$r&ubqZekbsn_8m90t`igm4XV{OL(xmiu>+m#dmHn#PGAK zxITzn9jAz2KuzoX>TGp}cw&pje<2F%9wcfVAzBz#(f9Y-(u--D3fxK#oV1ARU^#lD z(U5B_s6hMXqo;NYyJ|>Ou%o1`|9tKa6{}|$yS^QH2IQn^Mhj@A4$!Vm9({AqjK>+i zs|W!Cl7*$*``3ycPEivH@&Nf&1jU6lt8Ut;YSrWeO+Hg8E5Vv-_H*2*h#InC$=Kpn zCI?sWFzLt==T7x#BRmFTu({g4=A-`HOpr~Me4t=+DSY$4JvM!) z8ns<7#?iw(M!CsM2ZbI)bML8OTcZLmpP8U0Hzsr<(=9^OT!$FMP zzqsqg#^@1br-I@EzixLw)-f7{NPTuk@LD>!VBsX3m4J~0$H^-s7`?vhfBh-R`GgB- zjIWc`?S8&F&o_dBQJo<3XUJB&!bjaPiX4H80Sr$E;$CP{-jL)qgzh)xRyH$LQpRAk zEqXvp{`TnpJyF6+JGdv>p#^t)CT zwWL-202C}w#r)egtlL4bDnQHP_ybz=J(go=+wTo+FYMtO;x$J@%F$*Cm%3QyvFcnD z;$nq@j#X0Shy&i%e~F(P^eL#|(6W-P(r*wU|1AT5(M<@Fy+W>%P>I8ZY{2EY)lzC7 z>I@2_pTdaHt3(`vjzw6ANTb=>p6Eq9-Me2f$vtq^nWyv4OuNn?C4C>AIoIuskkFQG zjFFdh=2i4VL0RSAv~LaI7m~4Ty^#(ELq^2X1aV=-}+U>W7%m^&%Hc#Y26is7_ zSzU$(S-|iy=@q%Q-hbrsp;ZrVqO= zz$L}*Y{>~pfdfCNldyv<><}>t+P~ijTN?OMVB)aa=>8LSc_~;B>ob{D0x^QSJ2$J|Jpswe}a4bLw@6g+7|82f`t3N z3adcUr_cwIz|sO41cbr1_dfo8xx?f|@zG`tbzF9Ogrk~Un5rUUP@&sBna1xgx_2)< z6N=#yfp57EzOLKB2edVrgKtUV?jLHaGE>2}ZW}(gb^6Gb_XP2UlNITsq--Zs>jWMr zQ?tKUHY%IADEfJco{{J;3Nu}+sREkfP zUot^R(z|b<3d&x~u)xdSVacGSdYz8_eUFbrPJvQ)%J@w>K229XRror}Qze~2>JbJx z4p=7aiuIcld@TQj4oPF4TAG%mp3hS&X_w}y7?9YDDOx6SXvP_nW(+PO9Q6Dprw!6T z@sL04LL%=5UFH-}&n%2{p;NKG8!HqwI}3{HTMEQPp86NjjgM6%ZO!yMMgkYmI5>et zNlw>^WX7TEjkNAE+OP?LoHnWW66yT8GV`i9csBVP7p-JUz=hsr9$qVDR`*Z<* zY~&+&oy74_UdY#wGL_>s!;GvyRe~w$qxh!FVVZN|in<7Yw;o$qOE>u`>&svigqS6a z6(68hrG%r|?^QH;QN{vCTY;qp*%rMX?#$*?6=$hCNO5sYP21s6ytq6V=BcW7H6}Pu zc2u#h8Z;^oqq2iRZP#^VakVy}qhza%h)p1jAGAx?OLiiM0bdSE4%4#@?wPif3@YSZ z&qmxcA|(@lJI}2zxxlTf2yaaE230zxj&q&me8@8V1XVlcrxE{QJ=*L3f!&r!3G!E} z11?|#j)C%QyZkK%Fyc_Y^Yl*xglD~Cf3aYDb=?}?O*K}y~UuDNS+O7B3Q zgUlhm04q16D_>73cGcczByQk6_t_<>QLcLjaT}EO`b60g9?bqxvH8amFCVql4yPv9 zeJP=px-yyzs*D|;Q~JHwAf+fb8OFtV&>`>Lq@Gb|R@Y)D(YSui(2m$qbckR4o>C8w z>&p=`4Jqcj;V~E9_rrCb#JG-x0fnK*=cGxRvzam9-JsW zP^AYn4z9EQI_a+$)W@TF?@#bFo63Kk)GwTW|6Ny@CkLz`EDw5 z%;R#b4f5ztoHDz4d`=m#8NlKVByuNGzbrH$w*Nk*~>%mu?SaRL+}r4>X^9Pu^;sgXd=-S5RfvBYgQj%-Rc96~CB_zP6FD?I47z>{8VyuY)am#82o z)iS+hL%Wfa^^5*lZ=q~@#*wAgn`~0jF2xVSDT4SUw*)o!r|a!BJMTlJk|!MlCVr68 z70-Y#)Y`T{kOj*lv7w!x)pMHF3D5ym+2K73@?Y~X&#HA=1$cJeu6KrC(>w5WUcaHa zGrFE+CZH&K2ec-`G`54ivT1vK#u6c^9af?OSGPsxi)KbXm+_vpo*5%rl1^fTT9as1y56DG+qf_R}~OePy_ZOYhg(NMU$?~dz$#UOVVURH3cG@j2% zxwjt3XBD!?98oSs<^GE=8qhGUeMvXUuCoK#q9V5h>qcWNJ9RSOeaVAUJUH^_?$}D> zdGkCtQ!a6y2UDd7g6Kjmh2)MkKxVGf{(9P97r168^G~Mq>rmdrN&PyVznjyq$^6$j z{c`jqcvj>8Zi{R|Qw)``ntmVw$wHp}vfCW$&)B7_b@%i5$sm^OkSNkp)t@y14hKLT~4g5XKCcqDZAG^Y%B3Qm8t`>_juNc#LpjBcgWRYOo0?&90Gr?D@ilF-V zPiezu6Dpk+mD<*W*SJo)MkiGkw!>f1_IhnQpxF~Ni`A1#RV-cqByxZ|CBsM^xoFr$ zaPWTWGEO4qqXpb|$~x$IAKmgDq^opLBU?1Sz_x)lu#(}L&MY+~?~76;$7?(d5vtZc zps+n!U>M*cAVGic2*rY=daD1k%M9F7|QIj@oV z`KUw?)KnuQi((`%2n8+}Ng?CK3oBzkFhi9mm@PXB?jSH@t!!qS6z72#>Zv4pA+n2| zHd?tzh56(g`_~K@3fK`TnulYWe!2uRyKMT$#jg!lis=VISBUxR2eh33<*GKVu)Rr4GFB9j-u#h+N7f%8CcN`8F0a98a2&_ zb2=1)5Kl9~?*c#Ul#?(a#9Fby`6(wW;RZA*+yF&9Zb+bqb^b*@m%v)$j+mvxmmY*L z!GJmr$a8T3vycOhMVyuw>}w@klIj-9RA@qhN=+&KKnJ6_bhTkDrL0m!C6yW9aI&sg^$+wdU-hLB zDb zLfMcJNFi8|C>esx&jyN$(*y3nJH0QBQUR)o*c9%7zw?jmBm8(4*{k0 zO)brSP+(Uzb4!G2+}DQnwAtVN||uIL}Tma>;Lz!gpC^~B?m@T_TIb3iS5{?QN2&XxwD-!#puHBht;%u-- zslGq{dM=9zW`a#g8rMh9rgmCTVv~=AmWgeGDU0jBl2>b8ENVsKccsGkw_zYQQ6SS? z-bKn-XA8M}2s%y+sH&x& zjyQnCD=ElciePWCzC=ax$Hw7#YiDci8O|=|!RB+;=EyQL0@t>^7HX0`1c4dan-O;9F?Sne75 z!!q_tLAO1w5FfY$o!$}LlLcaE&NX5Q#~U%mY(gIS*cY^qQ#HCyR+V~&5f!N66O7O2 zMNK4QfCwCQe}Wy|`Z4urCQ2*3uFH-kAQGS(#lP3{VdIYAUPqJbWa^$3KqMW)J3t(# z^B-$kHGDqVG&>GT&S^ zH=RWR{ydFn0p>Rt;+;OJs`m)!jss}#v@RlM=u*OLlM7S?gT2sy-0M(?&wjfrRlB%K!!p>Mx9uXf@QX5q(lr~aj81?U~JihPZGnKc!ng9}on&JH+b}eb#89t9o{myVB7kGdI-bi4qC_XnW{S=mu zSndn{;EF$u{JAT9t!?@iWN;2c%F-Z^85D4Za{~bV+Qw*&^S&P*>W1;8*nwEB=`A>s zU;y&ohEdt!O|c=t3xXtRiRHm2%uZ$gL^f`2sfn0y2irPaZO7l#SiYq{OZxL| z{s6f(0M>lM6ReEC!}?*1DBKFe0b!tKWnC4|D39cs(0ecIa?6ODtLuUW6Jn6i#uvtd zEv*9Aj4~33iZ|<)Obry(7a1XM)ONZ^C=}Teq&k4Wl&|YB_s2BRlFNW;LSV)zr8m+u zw$Q*yRuRo(QMXJ5UeF$YaPG0K5SK(hAeee~2q=v^N9=bl;Eykt0~L zGrX}5*CnVk&5Vfe$k}-VZ!gfG13-q)Rvto)3~!Cw2^=Li?um}6+v{+C>k63Uj-}9S zl?9K)%Ju9B!B(P4VjVCo;|aytgJ_=}<0}3-!F7;1s_Kq7b({M5)A=f4lSEh{OK_Co zesytcG$7kE5=VvYpfX0IdW&nV%MvHha9LYp%5Sx6-PylojqN{yIw+NEl{YLuW(a(@@1L0kNHTp~lbC$jxAr#bz zPBp487xgW@Ab*wmN!~_07OLk0z4(I}w=LVR_Hx-@vT{3vwjO}-$564)O|DHG7PA^tAs%Q@gky>4|bxjL6r=S9_dk@9!OKUeBQ zfBZR+Uu%uDqclL)l>^gFNOrE_*vApBrc{BUz}Ubmeuaax8h-#X+7&LK%NPPY-RHhJ z$M#zLhINNimYR;cPhkMC&&8=R7ghY{9OU3WKR_}DL6Kt{N}o^R^OHtH3_Y4ymR2)yl;J$ShP8_y8RE< zWd;XW=9r%xEVI=%lKgmNdm@toQ~`jri0Cr_ELj$vsx}}fR~GP$n0jB!FlEJ`V;|f8 zw1Fg@uwQH`%;SK`mL8_Th%Ktad_hPilJ-*YfQntni&4N!FA)MBM4poKC&*;@7*w<} zY(@Vp*qZpc0MvHD)6dua z(Au?M`vqI7x@EnvfKKfN)UhU(W8nug5)^C#hOagfN?kq@9;QJlikYEk2VFiC?U{2! z(MG!!hw==2@B-dVqomTC zt{{3HU!gwGSuH9kVex;W*skyaViJ@AB^3L7y?E_fIl&*xPLuBswL%h?g;B0`VU$_| z+mL#C!#-KVmu`OBtDHB;_wo&pC+AU`)eNbT7H@bG(uv<$#?YWt;FD(H1+v%~zxU+Bvd;W6WW(hvw)0a-FZ1XeoP zkWlTJov{5tv8UT_OTz-OpE9f;76)ubhiY1nKTt~ob7CN(DEf!D=VC}Own%G=-7dcNo$9QVWBsWX+b&bL`&oX~n%*c|k!@cnO0+k$KNy7-8BMf< zuEx-7>LOD~B=ue|nZq8NsrDUm(+@tYr4H8poq^1!m4fwZada>qpi!7?lO5hSj7^XTd7Qw4dL%DN8C1FjTE7z89pvR4*de3o(ju zGphV*1Z)8h`+nGu9x*>=cE^(H^7~_5kcESli)jI3%l;*R2{O!dAzW#VlZJ>f**{nf zn@w>uz$Rhe8 z^SSK_eT9+CP6%l)^2t-7`g(`5ia{Rz*u=GCGQ)OaB*iaV$CWqGCz#Y{`XUwm{BH%= zLy(fC>+)K6Vgdm~R=svo3AueEWbfKgRK~}ftZpXYDbbbd?#H9Tx+%Q3ZK~1TmR83Q zyuN%cnJKA#RE61C)K@bGE5s@?(1DAP3T%0BeY4Wke^I4b;eYmrpUpn)#Ig$dsX(GU zcuBG`+h;U5Qfbs0`mb7R)cBun`G3=%3gSve!372>BoGD*36!qFP?A^dYLav$YJ;kj zyTT^YBHy}iZ8n!Z)0Em5LVr!^klVdv`RoGj-wDo&7;bacGo=wjECO2c9C6_Y9DpQt z_L#{gFuDENx*yjX=U54EDb{4g9OpZnjv~0Rc(o%7+iaPwSH`+ZwnORyfDP^$sAQJ6 zjp*!DEbc8-}23Lp|iBHI&`XK2QdwU4f*s zaUPz>pLnOT2W+qqpoZw-N9bhp$4A&Xo#J@f zkztq*DR;9Jzg3b5-Do%I_h9!o!yXkr-4Sf$0*+!)^08`KcH=vPn|J{3BwTpVe`nC4 zhBagDl-;`?8(`pCR$!3AabXOVamRkp(QUUw5zh zWl3<^l;77r?8&d|3ouzei1t$Be|-;&^wv^V0VzC1eu605Y?kQ$w2}?vdv7Bx3)WFL z2Y|t5Lq;`bvy!bTRz1Tux`%FVZAt-zYW`M@-cV{3DD<>zIKDC4Wb#xcOP{qup&-2O z@O!DSTB=Zn3jJ zNn->Cf*}+3I>$Ct_XRY@@3xzFws^J^<*E}DWA^FtMLgCquRY5G2Wr56K-qYe0h_Ka z{YQYYS#p=;aLaaY8+}JAqUXMf^qWL$9(l-6^G297Pfh0E;DOs_4S6|#$cT{A&2=Ri zvAc0`jJ^AzJg%(Y+(5eOWS>+wr#s0zkZzWob#IzvpZLZcKuMXapR8^wGQEJ{wqfZ?EI$z#P1#0du-~Ghl7wmy*NeZ?*dIJ~auGevS-tOpCdZacFI4o%|0AV zQcgV|OV_|>YnlvTT1}pB?^Y$dlvjVFeSL#{%Nf~8%j&HqwbZ^ zdXck^?X$JXYTk^c>(m|VC|rL{a-+SxF@ew68_+}@UG1y3!LSk1O4Y52Av$;ieH%+} zDB4jmwOdVA*@snf`Oz{^0DS#O)tmfn*YjcP%&@_BEL~fSN8?lb)VV5I>)+UT*wF1OX3R}AR5zZ6ACERdvpJgX&FA2yYpjU$j9S=cpdXI)ejGB`{BTyiwc}?l9}gAsUw3{4)MulgylUoWi1}G#ExY;A zc<3JE>CTUX}tb1WFDWj{pqGq-1>xy9Flve37fkUX9v4#Z{|TtG;Z;ENe(6 z;^KZ$m%J&(eQf!PZfkvoZzQf{d!00aDAA`yl5~d{gmQTR>(yQ*ZrUA4f3qAadS5&t zP6jQH=f#sbd%|=7gq1!QC1UC#eLPv`$2#lH{uh|;h4{BnQDQHmpSd$3N{>uV;wnK& zW?YkkD3e4^&dQLcOv3VJB!*;0h#MtJV?INa=Cn%b(cFE$+I~w|wlYavlzMeL5DT;0 z)L-iZX^maR5#GrlqT~taQaWIPOJK4a`vigN?Gnw*Mis^<%tB>J0@2ty(ig2)T$nvj zgkE4y{8SWG%ZZ?z%<8%!Rcm=S&_%5q73&5@j4DASvcb?BNpD3rkSNs$_0Xb1t;aQW zP(yTnNb3r)k!~lA(CMs5q}?X6=6=&`oQ&TIk*^a=bj7J3$k9^XkXQA&U@RYbQ+LV{ z$*#>f+H2s>KB412k{(eW#fIK{9ROLhn<5P*TB#hAGb;OnKJ;ge@I(n8`T4&G?Vk(t z#9QfpDW4aiBe1Ej%t=k{uaP`NM zo&K>t?8Lh`!sSPJ((TWV!r+3P3lc!><2!569@5C{nj5- zW$ z_2cIw{xM-Gqy*OSSD`~Tj|Kz59$1Jmfb#8+mFcV^OC)^_vqxR-i%b+x8qAq@a`v() zM(*a?EB)z+?D>2+JE&JaX>?U(5;EvxzOXyg+^;};Iy+P#f)=4XrX|%WWiToz*)29` z4kCS&sq3Yr&-{^QuZy`55nU-TSJTwR+%hG7O&@Z~k6~_^lIZEQ2FHxiHSqw#s@fNe zXgKH#1D^p5@Iu!f?k@-41Q;=5@bWEWWHdt+-9bkpu}9Ep**6x_g&c|^`^+=p%N0}F zK)oIxk5s<(B&!S&#SdqsIMRhj+RM&>hFg&OgTUBTCXJw5$;P4*v@-VC%1DQ(q@;Cb zLpL+LDvWPWYGkBOtyXA7?hK}>^(2#I_|s&av0IWtQr;nq^@!n(qy|}_!7`t6fvQXu2KkL;96s;0%Uv_zMj`k-j#34pLrJzBcg^peXW@?@>GqnO= zqsOuqb62@MhIJAc zYz!lx7AqKJo_tr*6Be%jNkKRqD`0%YeW2AA?CyjN*RrX}id&utO`EOb|F*Om!Ct5m zCT2^(e(ZbiqLYNAT&hF+2~5aYydk>ZdXm_%^D z_aQOQBIn+Rv72tK>uyzLL;%u^^K?uxuyv1h$TN71y4kSZU^)#@CUS_ELn-m*aEn2= z@r70gZY|oW2n#LcaR{fvT^9)vAQqGPW>|@WR{)}lmaOn8wv-EyAh_H_4D_j_@zk5s zre)@cEEP0fV7Jc!))Cq|*s%n()uj0(Y?Ud%>I{dZ99Q`;kt+qQz$Qg9uT9x)9o$qH z$PcvfGr@li0~3h~bo$3JKT)q3=ohrkqr;^wyIMwQog+jsX-SpPBnj zw1DU*5oNKe+vW=vR;9DDjy1%nbW5ND^*R=9EzsYQRIiOitK~deWrU(YJ#Rc`(IR`X zYIHFKRk3I=P3T^dp$LqA02xZXiugmbd_*AydINB#pKl!0iOK(W7Jn~*v({JW$z4Kk zxosoiCH>Y#h!0^&jxzd!%=Uv#RgsWl%a`$II<&IDM8E^`M2%L>t;*VL;--yXi(ge< zWRpHC@xExU;7Q7CW1={BZ65%TU8AN+-Dt60bd=`ul@C}r(NVIOW0xQs{e)eaPlAy~ z`_{x$H?=qFJ&8I0UcqqLuj#Tb`*|+$@tfLj04z3TYTX?4h~v^69TNnK)(r4W6>QUPxgF?j{>rDS4x^^$#Aa9_K{^Gtwz$F zy6nLCQDtbri8t==s@4v^Q@I9BaFuVJGHTY`X%aJX0akb_eNp6reh6(FpRpy2cGyMW zg<2rc37>;r*eL0RUy~AY?Rp`fdcE-7XP!lxed4`Qw8tH&u#ghQ<|Nk$_~I;1e=L4~ zEybKf_AW^L4$=lyOu|F5cKxs-5rNBoDcIJ5x4k`1nj`6; zoB#xkbJ`;@2IG3k{Sx;Fb)Q$C61t}XZ{pcNcFygR!$5_=x;uKH`c6fYT$6KT? zCF-8vM22ej%pJk|^io??n(;ezdkj?Mgb5r^okW+{c19~eJ@<(-+6~F5q}c>}Q62r4 z32LTWI9Sfj(F-lXWoNf8{T)^Y!t6t29j+5$s`R!SyNo4$N%9hy%UgejKaav`#-C&n zZnL!8Y#mMMXM>}@w4t}dGvLMFrOsH^+y{v#&HrQnQ;$ChV2E6O8Uy9flIsi%z-;GQvX7sf81Eg z-K7*%=-NQChv*rgyD`}_wgYw){Ljqo&B-1Oh7Ryq74w~ay#_=8AMDYM?$L|*vLp0+ zQF3E^@Bho(`#{HiRQaCWzhAf1QtMCKmTlRx$*&tH$TKlE1H>U=f-8pvI2j)7+c|G} z=giriv-8f`IW}j{IAoc(Z)YcBaDo_2A_fx#F+oF|SOF&^5+?{SL4XMgW56(mD8VEG z3?ogrbv@2_^zR&TOgO<*jVtYp&UDGOz}XL37VLy-Fqnn%==TudEFF929uJn;q$ zo^_D{!&MC5s@fK)87?68!o`whH07P+W+4$AT^^3meyztk`C2e!<+@Z(46w%o>W1=q zH_Mu*s6dfAe6^TvPjb19oWK)m68yF))qI^@DPXQlssK}~a%~q-OVz3b->u@ZTRm{? z3G-l5^bS`zNSNNtJfO4#l(z^3wukINwwTp8 z$;KL9s=qidCxv&v^LUkKzRxoi4ViTMY&F4J=CB@vtqV1oTs(Nq_^{OSgty44+bFYL z58jt6T0?X-%dcX_(x*v+QN@?z(Ne#n39mfFE|O1a?L!&ftIw2~31xU{@Tkh*{J_eX zU1^mEUe%ku=67`+ieZT9l4PLr+S*n7k@eOgJis`xLx#n^N&*EEyDWbd)bw$3Iy zFH)wHT|;Y=y%Z)pHOoXdo$PRKNcoJ(enjk;ySmeaZo0O;x~g^Z+a$Ja`zq`$4gD~Q z$y%~Xb4Og+^FTEq3LpGQ8LdbI93nJa#Ok3RK~}GNP~9$s@e(wN;&M86tZ|=vKpR$1 zwp0zx^`)$}1W=mSR^)ZH9(gnfY!<9pBVbGGH9cibGYS#ICRK8oSaomma*M7NM_wB^vO7hD6}GyJ|6j=e z{jx7&#VjNL>+SSgx`8wS!U`qNi=tVj~t zs7E?|co-uMH+mTVi+aLgSmF>wG^NxLP8m129G-o((w8O9;aS8h0}$u%gi?l#;Kvz?;9BY8~p zSy;>NbJ%(@{J^1KpX!v*cF7gF?+OUQDTdlUz8@04*O{z=tk&qqeh&NA_~Kym5>+R2 z^8l}hQ-J+dtxKdRkkGe$O>IM1{BQ$;SlPY!irfUzRGb}Gi-@!P6k^poMriVT*^S}U z;TjcuGc$tVFIfjXG0iJ%qp9f_OhET zBRyB}FiTbVRRO3s>d&869w%vYeovR48E`X94rYZGbQ>dzJ;|z3c=c>vdtRSCL37e zv05SQ&-LfORGu)b;c%v89ce)mW`{PZ^}02e;>3Wo2}}F`CT%40N~BH*f_+%1@OY?D zX)_c(jp_}m%LyU`$1uC~>o(z#IwU8CiF~TF(WjGqt1l-MGNI=PO~P>}aO`xA%|pt* zl5JJb4)=d(tH~6HNY|^%%c0d5F|5qZk$b}2T+5E|Md{pJtGPLTckA3_-i5g-rgz5N z)QV7B>*pr-$ju%cxBqn#qlLNg#=LyN6}bh0PbiMA@ZVDo$i(^gPFWYP!IMTh9Uab} znA6po`Y_kYMvU-eCwzhMM5pE=W&r##94K*%;SesAB>e+5?xOvs(|ui&w98$E>n%Ag zw03{nnY@5yQ!}-6osIZu~2h|n%>AFT;8Kf)j8^A6q?zF-S zZG4tmu0joo8W*t5l%~ns&vW7*>BL{2L`@;3Lt8mHHB(%j`5{^L!?|U^><<0{_2k88 zJvWu{L#^qVHVP*cO}4w^D#~qNe|DIYIuCs~O@WO?Y?vuHNWsDF?t3bj8ZSf)VrsmQ z^m$H_fQwDJ?{(rP`<*C2H6;e44W0rL?TdjP8B&jM^gwu8iHMsUDm&T9=khslC6PtH zmXkiHr3B7B_lG-_AT`FBxs=mjXizjO4{~w1MrC~zZd&vBZ494}K?D&yQWotuUCAqe zwvlD#^G#VWW!8#RL0r>v0-vo^z74BECK6~b)K|YPhcA@8`a*3LO*e^OOx8yzoko&= zt&{vo#Y}oucG%+YX6TcCUW{ljv!H^GH&A>-QfpuW`BG%2CDgw{rd&%$8q4nvfb0gb zMzU*@t}2!*G`2RmNV?K)v@f=Hht<5UD-8xrL1xqdEQ1@&#yE<3tCl2fU>pWYH(U?? zZGh?nBeN$|^)K`T^TKpshH%kw3I!XF$@*gs69Vx&QBJi1bA0;^ryTX`GIer^)MVhe zOW0nkuadCM_yXCE+FB`LDzpYPoJU}*tuI5pQUdQQYZC2Z)Tr~c3}=S?H?N<-(bXt# ziwzYusc?C3>#_UoPdtIwXNI2+tf5#BXJ(V6)7WSO_uB3mmBJDsEft?Ji2`1&mCNk*Tl8(b# z%|*yZgH^}+>1@Fi>ojLrM|eJ=_Q3kAdNfM~7mD>yNTa{<0LMV%yq=_lUrO?xB}bTvr?M<3^>CTacRxSYsyZ(yLHOq z6|Fw-Bcez~?|>*YZ;KqR(==`)nkQ^rd~9ElQ5?5#9#gTYc9sF;8O9&n%-WKU<9M+? zj&Opr51?c(L2}49cu);a>Kh=?>x62o0YK4KJL#;bRXCWqQ1Do(=Do09SUq+Gu!K z-=KPzrEQFd+s?t3^6nQ8GRDPpjECFKkE(h0MGf;B3`ZlOVcv(aF&;g4!;d~dkLwNd z{7cFfmo%LY_=fkX;qlP$zR>Xaxf?$8wRdD1=KYiNYWR?E_@Ek|3=JO)4NsoC;RnB% z9b+D7Ij@F!j8Uk@!wd5`G<-NTJbmtlPpRQLs;AGh;p4txo|0i6hlY8l$HtiVP|m*Q z<=tazul z`jX(hOv}vGk*pPHOURBUBwmwJdpGq0RcafV*vre?T9(vJ5Z$=m1UtyQV_RH6Q zgm_c^he=ps{fE-5)Zb72E2#gZufHp;|J1*?)PItMZ2fN6Z!!t=!tKr`oJg}zU{vW& zr?H`uTNZk>hwStSIo$VXns`%>rn2>cF*lP

ZwKgvm5wEgnr{wAe|DZS--DG+N{| z)3>{2eJuj@nAM#cN(!4_ocDW$8K6YL>et3&ifYUi8u9eE-e(O!VR6y zfS?wKvk9|lc587VjnQJB)<(!$)n}hXS>NJe(tL}KlN=I7i-)pxo|_gArdgpC;qipE zp*#J){?Xs1^@qAgNNB?0cs4;jn#d*$rxEMXSQ?{8G5UsfdNoNJy=tddD208iCrPXK zipMLgS0~cKDK1ZE6V&pqr!%k}Pk*(RPo*(hZlj+?THZ}|Mi0AR-|7_crqP?s)+Y)* znoUrPGuecRG-54Ir!iVA)7mU?`dOx*94Yt~_dL_m;%*X}u)8mtpcW5g6LzH$YjJNH zGkRI(4icw-m?Qeu8r&*nPG_YJ)#v$@%y5oMH#B4+oa!T^s+GeYU+Y*aCmBtdK{`$o z*#z}rvMC{rSRcmI7<~|LY^P;mZ;EtUW#L=yK54w26ExU877-qdIXc@kc(W-sQZN>> z2?Aqi$iHT0e0nDR)xelfW5%$LXlLgR|IgJ`ZcxXGuiA+&`S3pq5Ail5rxk!H^NCf` zy>_|-s9jcBj_vN1($ui$T;*;8bK!i(b?5-Ji^N@xL?x|qLm@$G8CDY1a~MuaB_rc6 zC2;~l68H5E?0mXTwd0I0IkRWyj?}MpZh*s3U#I5)7^YRjeG|xrG2paJ9=CIBx{W zJv}OiTbVLZ4Pgw%Ul!MTjkts*1tP8I;pI(I7`%GQxTP$_jTC%-U@kZ#xq$DX($;t?d}B-(k4Z_eT!)Vc5gLf)94*=T!g6=gzhZ$F&pbcmnFz~y za8gjCXY}1zZjP9GyeiF)n)=(5MFua%f{d9M>Xzbmxje@D+{P#i680VUu|P8`K2-BxWrGwS+lyMeb*D!zfQ(#^fUOOSce= zEavg{P(rK1hP+eK?Ov?kJ-_t7Fc3L@{!aF13YKIJ53M#z8?Q)HdYbAhOMeR%K9gbr zKn~@hY}JwA0%VZWZozw?DNj{g$<6=4&E6mFs zk^bcixm*sv<@%?1t_&Yr1v6&-mKlm_ra?s5Rf2p7j!|)!idPU@;1Lv~r216g!97wS zqB>E}SPCOvPnorpqnQ+@7p0gY^*l^3QYJDfyemWsWRSL%Fd=4`1aqtb`qtxzN5l6R z-(ryPDML`ZA@Z}aNEC7ckq^hlrV(n5fV0SI+tIVCSSS=Qw8;|`BKub;#4+B2N)#g~ z5P+jLV>zJ8A8XW+uEC*Yi0g$o_7<9{DTnQs>)+upF6Evc$0R+j3y)qL5BSR3ePx|i z8KWTKi9zjUgg-`Kj9aH$3!--y!%4PUCH~`%?M{R8BYzGq%LDZu8tE}bpzEnTS)|oY zz1F%LyN;`$!A1+`zF^M{$%`v4b@e?jr@uqtTZgb5baz`5INPO5x86Kdam(wwD)+qy zhb!)uZ|=qfz?86D!M>KsL@a-USAz7UyK>-@*~mgSlT1?JN8rVn%Gk|kkr#180#yHS z-pp{o0Q(^`UP!TfexWdkEuHJXjx&tX4er*Vq0&yX8?rI5Y1AV;b!nDF7v%>~%iX0l z(@&lD9X%`B*B95XZ(Cnr`gGX8Qfa*(w)KLVh08tk^I_cD1kr^>&D}@BM(>%X)Ag~{ zZ6oL1X=1vH(c6KvirJ69y@LI*JXzYh=w?W$SFt=)aWqZLRKcYJ55*n#w^VT(31?Js zOPZLeVoh4b)Ke`TnIxfJMcR>xG%-^}KPPCO?zw2!^9SAhD->6!|Jqjf@Q+w;Z>7>% zZtp6EU%EP0r)|Hh75ulSkP0O!27bE;28=n~2im7TqDKNP7^bAvhJFA z#fD_+zY%_qnjOb65o-?P7`M)e>VE_Lkfs@a3uj_^JVKN(BwU11T<3bbTzF zpdq+3n~;R=-JT|9U^BIWx;F|?O_O&d?TH#mNq0C+%v2#gfalaba7C@Oo}1bNvFy5T z*l5yogBu$w?d*=(QNy;njOUU;3|^D1J#N!V#R3(^=evwuogHCYVdL}oBI9j4Xm$B3 zDQJQ>9nmkQi5YltmDdJ{H=_=Ou35h3zCEljWQ81%X(1qnjH1JSWF86p5;|+t%gTd? z_okHxH}H+sM&G!Uw^fLhMdZyArxkJ$4qGTzOTQYMkjcn#du4>JqI^aFggi0{y+}bx zA~F)pRT3~FncQ^}ld$zu*>V&lh9WZ>Ie3B93nPKV{SSUc7G+;UnPt~;KVmK9y~^DS zaUuS;QjBH_TD&a9c=;sIjHbS@Y55v76Da20;%6CLUX;NkcL3@q$YL&r<4l@uJvB;_ zj@+1PIs?BLi!se~;iDnYfKtv@y=NC=V$^^PU6-IwAOM*hol3RtxTPRROBS|`H1Bam-=I*XbWz?6l`d&@mK{f|FHd<(`yeeqtlLi5;4ZMLOMK z27Eoj;-EMTp?A>ZZhTAaBD+?yQ)S4*MlIYkk`FmJF(mJVxZJfa!AG3u}tSx>#zdV;|$IkGPGI0) z7Y99xeR?GapFO09W1b%EN!VibxX;)|a=>zW9mO@;t@V)8x63|YIHq0JZaCs@eYU%w z+3Syd{M*R8q%c(?CNd-6w{C?>1OA#Nfm_U>2{>zQ)$N1OAv zuw%qH55I<;jWUt@Xbij7_91fj@JV)kO7lcL`eZ-dxO=JNtL;H#;gt)Obw@HO2QB51 zdI~okc|E5)MKU)MmN!LpWkgntXtZj&Iw3N#Yo7~j#{J!psyb*eJp(5;lJ=!T|FMs zxqq=)YUXhU9ZgFA4|UW*$237 zIGO$N5&z?+940!!820rPB%gcwslU@|WWI&b@+oQmEJ9A%C-qv&&)$Wljlccc&GnN_ zxzQDp9kB?;2_r9etw?LVt{cr9n(VJXCaJE`L*<(66+^d;a$M+;)n*qKoc>k94?d7e zfjYDJ%;OeAairMcgNmtdP{kNSo|*yb%5JbFf)eD`ttR*PSYBh3mGB$bs3H>{_ut18 zzGoBGSh5D|opv{ya4Wa61-Q&Dj-26fM`<>Zz&J|-ZkBtkLkpZv z;Y=Rvx2Y;U{1m{>Apr)kxw%8fkx?4IwVTb<*?`X(A*7N`OR2K~e@(Z+&L~t8b>kJX z`Nd)xbM2xISB!F}V{N@`tjNVc^C*NgIien2)jV9NEfbaN{V~Rv+bftJ6Ghe`sz(|& zASU=X8uAH!)D1Ouw=lVlSP1%i2*{$5Z1X4qdyq^?&Bw!K&>1d+#$5K*xrXk6n(8#Y z35uOETm#jg+pM#TcF8bDxl^U722XmJc>!l&suoK2z9ZS9sOz510gM_2}WzSioHn{5^C2q z1-lE++x8(yRW@@%H)gcNzpq|4m$AyVS zDX|8K+@ZC&uLC2=T=>~ z;oi4(OJY+)ft90o34fmT^1{1*=5GjOJewl5Fnw6dri-S7pKMr5C?WnHh{}=H8mHdAR|F7cz7Y4Jw4tARzs1%@@RvULGatOs2d|E|*?P8u+nd@_bjYef zj_12deuP9c=IoXh#GIqEud?znQ}Ax5M$0F4ys>WGv4Pi;%2@n{wm%f!gXFZ0$%R`)V>iI+nr)ZC%G+QS%un^NV4CIgIpvQFNH)TRpT>WM8(R`L@tAP;HYikh8ZY3l&`&{da=a+ z1i6KKNga+Wj3{9{v~fdC3jO$FWZskIdt-usuhbu#I`MVH=xk!#4KThV7#Y!8S(V zhHVVL4ck)+!8VrYhHWg-4ciA4g6;7(unis4mG1iF#N^$cKCsDoUf6cy%ntW5tqL;n z>(&(Q_EU7j^M%|GiM)F+6uFpTGn&oFooojgAorj`Ncn(5NO@c#q&%h&AY-yDpBTm~rjWZ! zA;>)~B!S!og&=ocA;>+c5agav2y$l?f?O`W8*+~+1i42PLb=mOuV+xM>}vFA$MZ_L zwdV`D4+^>AVDJ3d!UzbI6?S8(Hd+&2%3lkv3P4I+O zMtoQhl0fb$g&_B&LXbPB5aiA(1i8l*g4|;YLGDq7Aa`0JlzUhqlzT`alskzCcn0N8 z()RPt7A{7RU6UdhNdY4>lN_){1D3iGnqTN|+4Zgp?}=|-%NqshI**T_4tEsy4;i$K zBG(BmdlZ6}-3md=utLx>q!6^6rmWC{s}4iUyh6}|dk#a(35B5LxI)k})42#*X5@O} zIde;7tL!?zHCeA4M##K~qJyU1yW$PrEP`jDc33{*Ot5oOUoC-j1T{N5P)k1GWIGYUcfQH7u%2QY^I!wR9wDTSbaQXy1% zP$B3)pb+$r%bes4F^jgJZ}cO_=`NQYhR8wVdY;SUG1i@Kg_Ok|=u_N^%B_YyIi>)A zm1%RUYaQe#n$hhzCd0R$uz_5VrW3$w2 z6GYstR#xZ?; z1lRcy?7zSfyiG`*%3aV9K7QUq_?^Xua0)c*zF~F_4vYF2!d(g(!qXZGhHyb4LpZOH zAv~#&Av~dwA)Hmn5Y8xM2#+ab2#+Xa4o>U+)YdsTP210X2(dfMvOt{MY5?0`!wgL4 z)_LtGf7Q(O+%wahfQH+u=ag+GvTyUG^JGNRdZfDv$uj}T#{!a%CeAw#f7Aw#fRAww{{*it|Iye#$T=CX8-_SpH|c-{SulKdXhlFVb@C8(cdcd)fp z{WQd5Xc&oX7jF35P(SU7o77LE$~Q`mjG#12ny`0VA=o>n5bPaQ2==BGg1y5E!QLT- zU~f_(*qcxY_6{fnd;1iEy|EZbTh&ivv|ZO$o|i)8Z0e_xxOQpv60N@&#%#jhFzP4a z&TkQ$(CIfe4Rqo^42xCNx*nZ^+iUYUwe2vAJETf<cObe>WOI_C?EpmUzKp9?x+ zw=LSd*+Oj)V;RtSyinU%y-cRMLZ{!icy#hEi1gmN)Qeucl@@Wif>xSyhz>bW$sq4Y zv56LsD&J9ZWN2|z`W|Qz#}c&)Ol88TM0IOiha{#Pbx2=P4A=Qz)LNP&`kec%DM>JcZ(U3dQpjisva5 z&s&VlFr0c$m_mWsLS{To@CVf^g{f0(!<22UPsIhm>QMFe_te; z8s%?eLLP{kXlg?FCdiSYDPBc1Or25)rcNpZQ*#Qz)T}}WknbXN4f{0l&w!b)k zU^T}h2)L1x-EgnlVrPkw!`9baY)E}?)-4BD6XI=dLZK{DGLJD$lIA8j1j%!FbsMs^ai!nhvD~WmD=LX~#2Z z$3ohCCT&h>Tp72Q8!$UKLR{ttvkGl(h)%6eQq#hofod&z1*d6zRFW!&p4j*VvqO?( z^$35n6?+v2To{ncZy!gfCs*64A768;y@3+TJ2uVko})wTB6Y&Y?y#3VrVhL^cV}^O zP;&ojnKWa*JTxjwifcw@jt0vLCXeziQ-7pz_FG?K+s*tiYWlv+Mw)->_Gl&DsGAn@Q3fj)c&gS5)fbZc5TO2%PkG= z=PitQcSX3jiTH4wxnpiCv0+*l#f4&9d#R%nP((>dBgQcQ_XAt=EP#>w1q&TkP6OLF=5t%~IZ{qq$_FzkxbPy+ zKcp#LFDk6yzDfsGMQD5F(o5g|(KFiaTYBjeyXp7g<{?|L^x}_+ z2|m(XeEHIg-=WsO*j#+s(u?1EfUdXDTV?6R@41uW2%Q;X?_GN7$5r~pO{II5UizMQ z)AKD&rMs72`ruu(eQ8r^BsrdLkr3%$VOy{JAXt}SrnpKMM{1=_xmZ^6$aVUICHGpl8Y)$gsb_}U=Twxjr?@Mhh56A2rzc&7=TvQ3v6K0+y<7&A%}!=pTFBm*GL zm`x_qO|5DoV=6&%fn_}zbl<1H@-ViUoAU?O)Orwj5P!LNlO%}3GK0tZFBq+}BHXtI z>pHCt!BR@^$Q-hkCiE8rB7|#p%&YxFLWkFchlHA?t6*HH7l|mb2KPyXht|-)erFEx0a20z$5jWW|~{Au|5TyQ%fVGbb8H0ite;cuX#Vcn&I4aPB`tl4o_u_3MSy)9pD`f@Sa|? zH5$=3aDIR=z&YFur)I#SfKB*9F|Eofj{r$SB6-8MOg7Mq-V&bjt6dkoUOT5W)I zR3S`cHoH+h8xCghBX z)FZl&TiKrv7X@wF;3iMLq)ZUuvaVg&G=pu^FWEgM)2g7jPXx0&t$RE}(k}6uc3nki zcX(v%kiCS+MsmoMQBqCW?aSMB`Ehmj@?-t>WlLF8VNr@ZNAAJq+n3yO0i9Cya5d3B zQB3&+Ab10oF+jRGf3&7c09=f#anM0HUGqmGX-T`tfo|jgz)^ZgI<#^BP1W=fglR84 z9=bq}eJAz`9QLP;uGTwY*I}BGaL$fMyBj)UpS!rM!i9t9JZ=Xw{TQ}>xMB5a;>>a5 zI@coOHqkt8GatT?ar=czwbv1K=D49AJagO@c!E^pb*k#eZN9qrxapo5rFW}e;a-`I z+wL%Kx*n&Czx8o@T;QB-+_;E#_HjF-Bdy~$l8)Qb=9-bN`}`B56`j#R0gKV^SfJm% zfqq9tzm0~=R-TQ4?jT}irbp7&${itC2}hw!19y~*6qhA0ue{VfLUX+8Bb~4I3^yGkbAr zpm4NM>%I;!R`Z`;1+2U+s;zkg3H-LFz+sALj>kb^lF91A-*Q8g4e@H4;j&eFWvQ^N zQ`86XDRVKos@!lR5oW=f%2y&W@4eMp7k7nV1EW<8ygUGMBw*j1e0J%etV~netTNq{ zs)h5L)k4u3H>D!I2D69dB9e6r-aV~OJv+rObjZ$CtuvVEAnUf{<|8MI1Wfa^F)r%P zSx@qc>u9kGUVExtnvDc8+jHh&{;k`YMeM&{@%pShWhW<0csi(Ms>^k~x&F+yp1bp1 z)Y!hlcEe%vrpG60qm((e;?OF-!43&^F-#ZJTMw_u5&0Y`BS;Tc%9fY*|1<7bs|inm zs%}-NE0XqJbhjov8}?AQs@k5^F3aV}hJf)3?LGg^>{z9H)YpT2XhHPu`%;k$QyB zZT|BZpGc9UCNER`o_HrZ!V5v(?^N(;3-a&@ zXyBa+;LQuX`R4%MCnhcsc~7XIPVF+tlN$gb?{t7}LC`Hc2j~_)eu2>4u0xEO7Uapd zfxz395Hc}_2;ER};S=M17Yg2_3hLx9gFGy;47%L`x?w>#{2ZYB?7bI=yzi-?)Y=*7 zOgIB~djfPLf^OtFK=-&#JT7EjoL0e>Su3{<8ri)8x=}$l`W&EpuTHct1iFV*@DNG_hy=;KVF zpubq96?W)?JXfg;vZAdrL}sUs-!EV+DTgyp@ys22vYElc>jt5B~7 z(gw<=m|D!MeQuCcFC(4Lw^Ziwt@S>M;$8r!g5{7Y2+BG=$fKKj4rqZ|DuG1?## z7j8G{G0U(QjedStq96U{N5A>s62(jj21eT`df)rFfYAKj zE+y+Ku#7ztJ7X)y2_F4$2URfR*w$?1a+UYx@$xO(I^bJvP`KFr8dul^KPRZ%yS`lU z=>_+@W4kJqAFV6@>P1oejrqR?#;NulwOIH9f-1H`njLM-Du~Kj9So*~Tg;ZMonIc! z>8F?A0XV%*_6_=n%!EQ6k*6kLIm?MLV2y>vU)84 zNuz|M@cIDr$A6FNds5vW=U#N|#(0qIa~O0bF`S{jhUa4b_5LTU`0~|Av>FF`<6?X4 zD9zsy{&vdcQM@@n)?SOCnV5Gd+N+)X$c#V4?w}g+%^rbA3V6>0`x)OnFB<#i`4s@T zM51Z|kiQ<31co@#t}(!3w7rTn!I)l|$3jb64N-JEgxrNMcI!IE>DKk}^Y6M|Ki^TuxlZK^zaL%qpe(WLc)_=>+v0q? zuE#?k`Tgj+2W5#}$7^I09oR|{OGwaKSNW1=21^2Wm^@!p2yZWK)%7M=Eg~_Cn4IQT zri;j5RuD(ILA)?lJ{hA^lCM^)YC2SPIA%@I+IZ_h)TGlt!eSaoK2GY+fe>}~d&j|8 zFsQLbiLnNBjm4z8%cgreNP>wTJA%T0YRS1n4 zD-hQctVLUSSh)Pk(}h0J5Wqb$!zK5~&eErBj~e|UH;({SzWn<^Wge6zQkiw#pPTSD zSL{weHnE%GZ(CBtP9)IH$!5AaVRU29Xhne(*g&L2BJX{%J53!>0CXmaWkHp5F(8RI z<@WhY>DTIeEU&ko)?RykEkbTRrCfS7ALHYAS_p?sGlgiah-rN|C-9ngv za{$$tfJ5eWP;=Z?lZ5~j{h{6AAiC{3f6;9hqT5%0KgQXEvcz#VLCrJMDfJq!jQpMA zZxb^;){M3zMe!|E(XsqHV6;;CnotJ{xe8+>GNTX@;U(!=)yvcK#tyj-njw+-=4$7x zT2#oePuz|*DB-lKcvebSP_AZ5;Oc6uXXLHzQ?%WngxzgTXG*g!&O zYojux9WF~6p58zQJ6g@w)0F3Wa9A#P*Wc8%iLr|(NODrFHLXm?o4PSWZRyH1Yv1*i z3IBgf=;1NtY925g1D@~7bd-L4{r6+)c~F)(^*}CMstKc~mV`EZ0=A;<L{aW?qVBFa~GTW6_`CQjAdwgx_JefGG?#&DAqCpYtWp)H9)LOyf!Cx z!y+1Uf=7F@E7%@mPe_WWd%V`i-`6v(j|LN0f+z@sA~15__@-9`u4l1vc{ll6%uZr= zzk=zU``6$6eWh}st~cZrknjGT=?z2q*xm7vFJDhM`Td)-ISRJ3k*e{(okJJ;(9JC> zP40{(xyt9-Q)`_T73gP_pH~mkpBKF*+)#)91sdTb$Ny5V)*_dCojK9qK@_QWrKRJC zTn&z%=oumy59Ui3?cfJeQ0%j%(wheSI*`OWP|aD6()O}nqlMvfs(SBg?H+wYTDR9I z(9SihcMm9Y+midXur?}r4eoq_`x|I*OHy54&iMlSF!pY;tjuGjbVY6}Uzj~fjjTH) zTqNf;jmx6flHB1BNG^;1)FP%WuXSD)z0smZqBVN^C(_g=o#3eCW>No&F9*v8sd`aX?XdqEzefc!@~eD*Kn1 z`@#D=vdKK)yI9NkDwcON{*01=3p0ONIT#v0s0L~4fN%JKfWT2NdETr)e}zpD!Jz(J z>hB+yIpPVfms!prN9ST z3z)UgVnq-T!6ZdKq#_)GG^zwCoH8{d1r#6ly~Fu1hG?P3hX~yiKo!_`W~#6;IqD1G zg&2iHC@`%8M;MNeozX1Vxf}(ChGcnD>t_rS=Lxg99kn9VSk!X~89HGxH#!;Q7j!2@ zKVsAN?zG0MD7Vual45!kxeo0@(ey8Uv51ctx3DYMpdv@y5eaw54o|kjO3=OJSCh%p z{wpo}E9o8u-L@V0YhY&8+QQWHb!^HdR)dPZv0n7GRus6Pddl)}nCq! zetTuSKHXTzLZ!_k4k--fYXw<;!doJRCAUBYnUPL8!n}SmJQWH zye%n!1&vs>O#=n9g4R2P8UuPayg;iU?y!Z9T@erui$4rUAX$UbKnDV@mBCMlmBFR9 z%8aPY9+lY>;X9;JrW^4SqS+2K1)Z!xVj*eUQES_ACjZlt9c9KdG=%Gsw4sCg zEV+ZKb5cL!dBi;piM3S5=;Od7Ej8s!O{ox*Mr1<&)zPLz;n;k{pFmNUmvyB>Oe2p`z5w9yHcbpoV@hSvlXip@M zbYyEoXA2t?odNL)(YzlwJXr}h$7AZB)%=)cWp7?*LLu~FX`wk^2uD237ZpNFr@l1U zXDD;JQ3eg08o~s@baKoE8fxXB`2>iwov~Rnj99qHtQitJfI(tLONK7Z8k-?TPhFm# zN;Y2t7qPyhd@Jdk4~3lI*(2u+IQS7`^zS-Jao1SX3~Kbm1zt&4ds=Sp|6S9PCWMO% zXab_kv)WNRS!-HeC8nZtQfmj)8j35{PCzc{g}}4_>_zp84QpHe~m7qi2 z#41HcuLYGaB%H@1oyG1wx8B5wyK7@k-Iw-t7JxsD_#C&6BZP4{x%F<}(szXlPaqNy0pQMiGq37cEjJ zf$7#FDT@s>=I4fuOk0vqmt~H1j4N3e@BPO>DHPk%_GA zJP~W6q>Vc@O7E({h-5}ni(Lm#-@dLp(i9_4%+;2j0#8fnVYt|JU)#;HTGd(_^H|hc zaRepARNM5zq$=0laaA6wjzZN1HLYsnVqBos$Z92c(X~83#S?qEYgvXOJwyZpsT?b+ zl0N-Ocpyg4g61uvWqo^%6Uju;b$$~p*Prxdhd70m+b+ACn4Eg5;*9zdE)=Krk%zj( zbH%svZ&k~Lc9|#xLG6qZive22x=3X~L3|-Vn@Ux{Ii1k@4hH~X@l^emD({%*%CuuY zLPe+qpaBK9}1eXx-Y^4S1;Cg1skHa6eOMl@#3fa(b80af=KFr&!6MOly$sN`ATX}HwzV+p@vgD9VDz7Ww9;NCWhPe$s1A&P;j@&5 z2#XD`EA2o$Ks>3uw$`;D<(A1YHwydUV+X}FWp|WoojU=u>*`kw@2{32PEfS{QV`mH zO;Wh#dV97|E73E#ov$It{RbQ@t5*3Lq;BGRelh@ZC)kBtiB!QNApTg= zqM;TwgTPIqc8zp_4fiAkFJ^&p4`P=<)M5+X(MG9XDoMNyyo_G3ztm8-d0f82^mr0O zG}S6s)gs%-+cdCL$XaX#Qcx8A6HtohmDdTuk^A}A;qi(4s)7MGK_Kns>5hDx%ru91 z&5fDm^3^!nM*iS(znx2SY~uT_1&p=V)=Jky7#h%HKj3y;zVZ}8B(p8a$t^tjUbS7t z^>lR&wf}ST@pttB*tmkP+v~RXH?a(yWC8mi?Fjc zcY0(`rE(DVh%K^~$?!b5XKi}cEo8S!H|KMRa7Bl%t*PL(1H9^e3k+F?(yWoN3~YC7 zk4Kn;&1Z1@&4XDgk<0*-+mstJtDko6^k(wjeuBIyM$)ld&`Y4AD6PgtxShn&#-85!Otl@9eQpL`_!Ai5(uK49yM*^ zef6PA<%9k(NiQH;P9D#+9PZMV1lEvC+)qb*kuTSxet*a@5~FGYZtc*?GVCv7=G~{w zGzsrhg-y9(Bg{>?OQjx3|I-ifXB*r^vNG?UfyD2w?_zuEU(YNZq5vQWBX5&=!NH+d( zJ;LG&bpYyJ96j%J<|zt=0^x>yo?3bS5EB%4~~U`Z|B_7BaV-;#~f;?3Fk(~TBQ z#_@;{^r6zwjbGLC`EL9)8>iAAX5&xRBa#uQ=#E%xw$)e%97${^8%<-EczX^*AW_Hi zX@#|c$U>`u$i_7g+4vpx2=xko?g1OvQ0x65DOCHddd@H`*|>%!8-KVS(R|Ol_j%&$ z6r_73I}rXu%wwW-60&<|zA9Cbi3I$zKkfW`L=_&ak7vP}h*PM3Rc|O@IU5(2v+*bE z5!NP`=aQ675(A)CuFRU85{B=#_qNbPHcrJiXX8&dDuyP`tklNRrCl&enP?FFb7(3b zDy_0Vwsu{v@{ar))gB75&=As>$)48ZMshme9?JGa^pcH>Ub68!>X9&U-;2ohmMYdh zjN5ySTG;ySq)8X*WaA>5Z2aMRgh>GB@xx<1x;pnC(-FT=a6Nf>X2-cw?@nZ6 z+ylq?C~uqoGJ7YH>Vqj`(?krhfXai7G5b{=)RnHm5GaJ`jeXHvvDCe8-KDM(Sxf{;fG@lq?WNTY#)L|^{pwt z+<1&_SR%J#M-R>d_!q~ySpOml+ID0G3x>ujGMdwGH}z#K-&fw8^9kL%;U*nC|71 z=mxGQ&cA5q0KZ7shbQ#!sCBy@|8w^Z?h8NHH2f-dS4?)vxI22H zoahH+eNfbLdxKhZ%?(7FfMVj68e4C#)WvTY5Z2S|LOj)b{ui0dyeS8uJFoHe@+DEm z2I98cT1YD!7ineVPd5-HFpQc|F7B-L&@Fn>F43+eksj56A>xYMtyowkVv-VhJ6>l9 zv{@nY(|oFfZF~2~h9LACeW%dke-P{+36l^dOm}25|Ct>N;@G#cfJ4Mz&BjFm+4#ft z2+a%0XQ?}#Aoc8?y6(%ysr&A1{Em7=q*~NCX>jR1L&T8kAWZmE^1F>m#%|4aW+(W% z6kk18zio(rv!L)9PqT5l@xyHV$$CT_GIZl%>qd_{#I~bLz@F0M5OE?Ii}vquc{FE;ew-@m zaPzxChLufN<9m7Ldqw8EFY~=J{VuyF0k#SZI;7#&jBs9dvz8cOmi#1^*Ts^F0HrlF zj~Hc{-1o@o8Oqvg2hal{%qhwA-l6UwB$p6Yc?2lloZ}&zIyyd(qJv!lKrlF)a-%{C zaEM;plC@Vy{}C;$e&qi$M}9Fs`jH)pNz7HTQQ5%GPXous%qI0zHqMxx z%*IdDBYv{m?&V9Om1%>}Xk{7EM=V-hWqefX0qvewg<1Q>2FdOnZEk%}Hcsoiv+-T^ zi2A(BGoLT2Pt{J~;bEV?>#Lk4K?+yq>7HC!YMyblEAFCGp`7Ch+7=ay&p|aK7QtM2OTgk{B zoLcYJ7&9=Q7DWU0$#0svZgV3OXEMHgOHoQKs1$}}ZmPMICM{Ia32}&kI0(_Bga+NE zf>nkE98L<(4Zt}_IH*UX)@`qRs~B2_=rT26r&#N(yea9lGYC6pk#keulUmt4A|_?{ zDu_Oe^Fz)Tz`AeTH}^7{>um8`_)6M*S>5SI&b$^fvxP$rB+@rj43z+r3%KfMJZwh) z^DnU);0cEmojM*d&^*4RL8?WKH(~J=vNcX9%GvF&RkYc^M2pjn)`Ei+9Ng}Hr-G@& z!<=`zA?D-h00IfG-(Ks91q}JChE++~qCc%Y1s5gdfohkTf4g`7ip#Dr0NQ6 zwcCiNQ}V>}9X*|S`<5}q>b38=WZq@Ye&%bCmLf}dTM|V%joj8 zt(1uArrGZ{lj3VLB=!AWXi4d47-KkY1Ho$d4TKqAwRGtt*l zHapwB-;#Q%EF$TIA#!hkai3uHsMHhIfV)@Vj_Kzy6!9Hy4`+poOnO!1#_5A0^KA28 zcnnUB#Tr?iDA$VB6o3N)pp^w+eBR+OIj`n9W8BhsNNLGTU{n*5r;RRv34Gx~nZToq zFag87$Mg+!NzZ(VoOtrTD|dEbK7w(UCJzSYfjCnH`}`_|_H!AH8i6 z6xt@)INn-$VHNr`8R4jx*;Zrg>spsw4ESv-9u{eJxM1AhIu)lN;}{-Qzc52yjMti{ z;@`oAZ7SAt)j3&9+3Zvl?>56VUn)$+_UyuYG{A_BdO8!PJ-A0T6)|InquEsCWK2`h zpWEB|i*Fjgh`QtSAz}eGOCrC@X#w_mYq)5Ty@=_~o2H+w{+=pl)o9eR0?(=S+2=6* zp3yk+KFbA6zxz&6^g`C)gDQCD8jPE!y5u45WoLM)8?4#Ln=!y*I3Y6%k-=uI7)&Ex zU)UCNrpeJ?@GXnMmHC$C5Z3H4rIALw)NYGReR3>Y*qp}|7CT(dn)=MzSjHDM8-=u3 zLJ7?KWpJTCC6)Zo)~2@wGCSN{trQ$-8BFb5q{8gO4peo%D$lcA(N!3-S&ptZTaL7) zL0UqJYi>zmAU0FnHZZdwZ61`NiZTFl3HBg%^^i8qZ7gu6ZN~V|-Oi~eJ(h*AWIFXi z`SAFWa~!fta^B*PN}q}lol_euoMkm|4Brx7BF@oKF<}xX`xJZ{7v&TPyHRU_p_YxR_8sJV4Qv% zqq#ZXg*PdTcg*(CgVe?>W8FUc!Xd-VTEAoU~4Y~N9kE{?i&v@TTi3KxLRB;V}`@-S44x6!M@F(W%6V)Yu90O5xKC$9{|E!SBE2}y@^3)V{emO^{|U&%N9J4_0?q2 zOE0)Cs@)7uV7h?KJds^Wz>G+geJcjmX`{aR{?I5>QjLo2`oRoAg6fd5ZMuMpxMP-I znr!aGQ+4O?Id`0hCk&B-=303(mqi<*ZP@IvzJ-P3#w050XC{5+o{lmON{y!2b=>!5 zPX!l_z3-!-{Qn$#-)Htdf7m<4PO9Mok?yo^8yh=DLt;MMiZE$r5yyj~$$w(|r4=c& z3i9GBUI)_zMm(${kw*A|GxM|MT_itVkRW*Xi8J#fxh;~vpZv&k+74oOf^%Bp=9Dqe zK%4srKUF5`x0Z`GC&eos%je@T@nz@7BRTbKV<@`4ltK*Z;#r|r*?ZuyH`(S z%gPYAe&UCZ6IKc-3zDxO=C&=Oi z9|-uKGxK|bKZBxU_api5grZl%rnQpuCurQ5t>u^GVy3v5xGeYigZAgRneM|CiP`#` zunJ+Dn}?RlgUs+a)h&Ro@^4{VtF+~yB(jSmOuVJ1bEU+_S_n$N7qAgLHBOv?k zcVKlU)s4@Q$XnUtoHtJ$)JplGX?42i)ot=Vpp z%P{Y^rF#=zO&C#cV{Hp+i4turwkyzU$(P=4XX%51JWQtm$a*b9a-SMzRiP=h(4D>ZDC8ShLYUD$g zr>twCM5$`Pc46Bhy0BVuqDwLCvAa1QkX9BHiQ$iC<182>+4yihB9X*I2_zp`4Z=q& zKc?Bu`B&OlF>qJr*I6#XcDW>|SXaH?9meH4Kl)xGfY&%+AE#PqcExBj9J)LGu){+g>CP6W=u z{_(r^*nR~U+DroBwEy*-*Bv$?sK`31wc`r$_-dA?1oVNTiz}n(3NZmG8$W zBJ3O!Lw=R9>yK;{F^K1$qu`T3uQ^hAPthodmoOQFzV3(vE=hU}kI}|u5)&D3PxkzP za{#;gD2$0%%J}vuiwa`DY`-5HB@9rju^Zd>{z__mAolZY|1of{+S&8sVqb}W9UZyO z&ismK#fow{?&4oz*|Ki__4Ks$_V)Glt!Q7qyi!@whlRtZBO05&hcmmVi|DA@o_OGW ziY_m^9%A$VX0c^u*G_Ecw8bjYO2_Wl(-vdbJ%#Y@rk}BB4`+so-F};@=`OqD|7c%W zX*{WHdp=@OE_FVt==_&0+9}mGj;DTXu?|TPvD@_%iy>>Q&~HDr7_!F2#72H?F=UN( z#Kzwtgh|#IB(~?h7Q^=MyJ}*_uqm-(C9#pO*^h`7>cjYq#Skl$)4ZUBy`1gV5=4wb z`6};6o85;wuZAXSVvNaKz`BTPXgN}jmywrBE;buR`Sm$dh7BP_|gFbZWZflNR=EygC<&PfD(G%e@VI z{sy<}k&0af!#W!(XP6kPgez5;I1dK!$u;sQ;A5Gs?N<)+7sq8Xh;i*H`U_YVc~SXN z77CZAo!*@{xTpVkXW_=H-yt{8&+JI6^ac>RG*q_BJb+ZWe|`C5c}g}@v4_@-vpS-v zpnrK_aECrRj1O|3R}*|kPqEb2*2YitFmiuj`%nh@j$gimd=fn${egW!3SWQ9zWDXa zcOoRpCxm-s8o|+SZbz<)QBB$X`WenowEVl<*&|h@CJFd8#cR5S!Yt?Cn#AYGL{Ev& zw-%$KSAWdc*AyLB3{p;mkzw+(_=T@ban3PZ-4#hO&@&Xski-aSLZ4}lcI+c z3pw;*OZw+GCApibj9R^?%eYBE2hl|3g@1HY4!fPg_Lt`Xk3Ux@H}7IS(NA!?qS8qs zF0TWM?ZRjn`5LNwL$GRN*_Av(5nUF2i7K*+xAS>*iLweSH}9feTQ_Q@Y6q-QY95UJ zqc`Y`&z2A<5~AIRa;AfRC46N%M~d&=7LjHDc79AsmJJ$L&hNkb&J*u^@`F#_BBc1N zsL7&3=5d&SK;oCD@hvxjpSIw+ zLNEYr6|xreh;t?-w9`NB6_|qUp?@qmlCosbgI*?us3^$m16YR!cP63rH>!0!#6w6P z#Z0}@4oU$ev)jJRJ01&0si~#}O;%bV@Po0qxhW9{^4+~RHPe&F2Qd2XtMBt#EN?1b zZPQCkV+c8_tA@_PE|ukWB#~X#OSoYS`_TB;udWd{*^Il0Num5edsGhYlaK>-;ZCZ; zDeuv~4c2zDol6DO&aqZ|QrS-*cjry@pWm@lr@~xa<3$WPEU~sFV|OJL)9iJ7DH_yw zfN7}O^qjR$WiPoKB~7>Ma0#8~wjH(h%5B(N*>6dQj_*p@8mTLROu4`p@n3U=V|X@F>v`COp=XVn+F#9ZF8dtsyOaY!8SJ?oP@MF{1*_%RR|+hxB;vdu18hf zwmE;2nm`=)61jmMZ3Rya09i^{el)e4AQTtm+GoWe9gI#;gXuX{HCQzGiWG{xCQWjS z-MrLx9$86-a!5xEfCiqLbvGNtnXO^B?_JWakw-E!QC%u-WxtX0AKrVD<}a6pxBxo? zYnt)otVu-(ncXOYkk+u)X*Y~{AH*!YP22;NG;64qvv*Y+C7oKxsfyNPe@WRO@psvc zlAuN-2$-awMmdO}Gmu^+wY<$1c35qq(#{OMKt8=>4fMhjEGfOnHY2nn^yC&H9f^me zi-vmG=S|-Nqhf166Gwg1V|(1Q5N>Ph#Havq1%o{!uhJB5%Gu~P^+2@${yomx6yzd6-M&$9Ys$zlMr|A8&%(%`@czxOY9eq;VO7j2CbQZrF> z8`-Fa9b1#}AHBa4?$zOYsGURviVeB_;!t zEj)-hr-Vu7!VW*U{w){;5o0HTNMVszd6LrQe%j`4o7!d(`bt~1 z=x$7ybh#Vcjc=*;2I0FKD5N9g-Ui+}w5DJ*$o*~k5x~`mxe3dw*K#c9uQ>O3>#{Ot zClp9ZSLE_$h3;i&&Ww%E*dinN5*CDS0e%tKad&5n0VjQLMdB%`8Hr&nEo7^PQ^}Yu z3p-czK{K2MZe!COLXMvq7a@+{z?N3ND#nyeWGE+W08SSlb(5+SD`M!dC# zFAOG<&x(&bG9~iJ!0+N-)UWpNIix@$h#@%Vk!Wlq{SuiqELW@Shv;lE59kw4)CS9tW|L0a#KPp)!M8gY z23-`A5*-I_&Equ)YQlIG%m^w76>!ANP>Hjv-lj&_ano?NBc1U!hfXgL8Ojz8<2$v0 zXx%7;UgUi*m<%3l`;|U-DEpbfCozKp0==vgNhBd361pgO2b(cq@>PKf6=(D`c98_d zbUedAMhv7@Vp=W{h}#p`(#S+*+r|PRk-lZH7N1Jdjq$cHTsECREFxOorjs5wHrG9x z7h$|I6AD`dP}ybtvUFwM9`FfV;fx6-Ys01q1$+fp2@{HjGQ*TMk*!-ZiuSU@bk-TA zF2w4BKW&j#R2^WVc!)ao^g4d)p0l+aJ|L1;Jd@C0FqR{*sF(VRAyq=1L ztC&yu`V&hDTS9T4Jkqvb)XM)?{eZ{x+ALhQG^&PmrvH+Bh#Xk?skro@oP05bcGwY% z=3;tAit>RH`jb>X0sjd<_@ehv&N{W z1~J|RG%y&wOuFpB zXbT+5DvqI@kmI%5e-B1~0&)o6SSyB8g2Cw3VNi-v9S7N9AU!hB0CY4mWUMQYwSpl= zih?)MsQl&n?tlpSL&^C*p;L!aiHa=)!p0~;YLirNvQpAk-jqsa96{v0;EGNRdxNVX zNUXv+Dft0oZ)Cc^JlYlYddftQ)KFyvt7w}St=fQH?%lAvm1xx_(W)3kt75Zg70a9; zA5f*|^S}{fhwZPvDggn+4T&5X7%_B7;9`>sc~UpfLXNN6X-rMrKpRMp&o)rI!v>|a zT_RS=ty2_YV)CXEKobmUlLki*M#qAqkE@GO+TGEu3jBxhx>8+0snjei%N4De3Vy)yF zAqb^2S*AWqWZ_-#Ty44BH)RE{6s-ED+A_=kpSZjSm0>c$5eUydVDOFQron*>7g@!- zP?AWZvx-^8y|7g$Ds(hK*wSmDohEFRM!{Oo0C5e`G?Wi1=Cbf)wUXv}P|5Ms_g zSkE$D9>}r*DJjYhy{&7SHsY_1X{EeOD=%x+rV| zouN#LkW8kq@kn|a1A~mUw47z}YR-=B2!uVStpqF=ECnWIr7I+QJPCHJi?D1KJEXB& zV{aBPQS};zyccnh6s+>(T8?}toWV2$Yz>%9psU4IY(DLyFVF@vQMj@q;{EzYJ zPLM0oN`c=j7S@lm5~Ht6x{;UAx+U7<4MuZ`ELp`>)sWdjP ziifMLmC(DU_(`-0m0wYno1lzNoBUA|O&zG#@@mZO<`;y5^c+bm?jX{aiBFaHM?u17 z`HL(6USP7gj%uD>`+~*Ff^<%SnoUbvpjK0VpwS3g5U>`$lHALqOSi^MS1N-kgaa91 z@Rj>&`A%Yf<7lR=a~Z&|G8{?1{N!r1(`Ilw+ZRspRkIB7Of>AlHlBX(S;29-=}H;O zgv8+$ggUdu*FK+aeudBxrK-Bv?W2Ur6S5|Z_O9T!%T0hp`z-<)RjIuzJP+T(N(&0w zq(ftrf|Re%L3w-j2g*`haTYBegT@zm5*#y?R5F$vKkg2HJdTmocrw?1Cwn}BPF=;f_xL(|w)eF-{_@;9WG)_{XN28l> zd>-cW^y6mWD36btUPnnfd8}^p$ip@y9QsHjLBl#h8w%&t7S$g2@#vW1r>IbCN6*&i zkm4t(&3Zkp*g>jNEILJuI7H6sbU+a%_q-*YdqxBqJ4y-tx-ZZ-77^%zH(>mxw=kIO>E9rM%di>fh*dm!FUss#N7miaQXlO zZLd>#4=xsME;SVv@GS%1sNxV+P`e0h10E z^jo%M``n8HEwGRPV;R{oeLVg}0TU|an&J85GhXN@%b`dO(wOCQAH1-?jHS_uTH5u#G+M4Z?lS zJ!kK;*Is+=wbov1?X@S@jAbWFn9ok|z+>a#HNugNSZ8fT1U!XRcEO2#TEj{kA zo*3swwWij%>oHyL+a1mM-%DJxSz6qoLpa`{Unl%<#S{GE_dC|`-D!6}q5BzkKZ@D$ z=$Z(&tAFbC4Th@=!(&pXv!%5Kr5Rityf`f0d|g{U7q+_Nd!quMP2LN!9pz?*I^zzkV@L47I(sU_ zp3@hZ?%=RmOdU2VMkgJYnqNzY=hhhq@4ywHu61L{yK>@CJiCCNU&~2uVoT^-^Hx(G z*&Ur)&wGT~NnMS*Fmrf65}h1wNobz6Z=U9x?=F8+Lv~T$8M--I(su#%f_~#VS=8_2 z#-~_g8vjfV@~!Rh@%8N?Qw<&E0-Pnkl+$o=tzbXJ-=QPPPe?*)v<=ULXDQHve(VUo zyVeF!44FKz1v3TRb+8>&j@(IIMyT&I#b&u*!-mJ7L&a=)r10FQF8A zsJO5hO0WbNiDrk}HL11T*(y2U-#B$`77jHc#%4mpD^&4=T<%hgic~Pj&Vqf2pH481mJ3b``_+^%`DvqZl)36T(KJ9P!ojhd!vd+0sqWc~ z4fhmqF}qYzQ0j_2eJ>#F43Dkld=dHLU=+g=mTV58gTk{l<7DX*>REOQj^%VF)iM^G zRbQt;osQeZcCIIiCoGjtlKUZ!_)vF!pN z-9j%J8A2$VS28Ep!?RrSSqY{CE>upLf4coQ^SkQ_;XvUVp5dMxz?JQLB3LJkD~+R3 zh{2j9*JO3Ra_MhU+V~6OO#aPm&`ayQNrdiq)^o5cb4{4YAM?Pl+<~;9 zoq*+FKId3xIdp5m)i%2ff>pBy5NsM^^T%OkYXfg&HEU4K8J;x&v@(;B5bErYL0A=POLSF`NZnMD{*Ox^I!^={6YVhCN;nXym=d>R#;*rEd7Pt zKpt-vqcEBET5ufY8!9sM|d*?NNOx!>ZW;NtzykCOZ9N^m}lj}n(J zW6I)ea~lq@p8MhL923&sygR;=A6#CQ4v%mQO=}INr+G}LKsXWVZ>n{ls4tm`bJYoi z_U#ORrCkSe78CN?Hq`zasuG*VU$);Tcc}>Hd)1+yM$&hsl-53FfFIG&3*nTWV0+Xa8w1FN1hEojZ2unV?$!p>Bl6N-~R z;Y-2s5JfD+3~(XJ76_9``)}Vuj|c$DLhV-37PZ4+R#O5aOUJ&6^Im_Sb?@ChB_uTG~*xpF{L@`F&=JQ!j7(Z1bgcOd*-kI}jNHm!?C+Yn7eI&zMK zoCuP-KH>mQij;{CpxFxct@lv%JJhW_PRFgQ<~FO)ezR7acs(79!+zoraRL`jyIZ&v#Us- z*;lPwoKH;HHYan$)oKO91eVg(FH24zHj@f{r6v5kX=GnOSl z9=~!6Ne036bQQzX(3oV*ZLI-jn+O5w*$7dzla~!PC{AWZ?dxX6#gL{6j!C|NNu^G| z2TbZ_Ed~F!aVli}iu{hS3un(#nD^CaX4yvio~>`fr}CKr6_Of&)5aWG#>w&i0Rl8} z>`07kOj*vGmPy6{03l-1ArNCTBhmjlWh;^{Hlzr&%Y2h}I(Y2B5C5w(OEBxQ86NB} zKj7fA>>@@wukQ3^7xEudk$|#q@OO!M2vQ(({~4Zf?24XkbkS40uRq{Hftbh(ub(v56SkvM*Q;=7GZu3SK}1T!N!B*YRoBV5r;{z-XB+}2vTaJqWUHbm zl$#v^fw1?qD4)UXyuXm6{Cp@@T-GRapd(c8Q*^C47Kr`Xw}H_Td~h$o z!;yiK^M^clVL4^={kT9M0!MPPj6PTidoD`oGhWc)fCX@u$uK9S z)yZ(MhYZa%;smWo7+@vr92sAN6cO3#OG=8qgq-#oet<^v6#*Iov=3S#(anJ8Qv218wntkb=(ls`ln1S(*WM zy4gk91Z(;@tqVx95T=Zzg5g+>DV9IljnJ-3q)B{A?&$Ek;(JY-V zDcKUQx^K@K98D92?TcA@dH`Y!`~ZDz9f2vl`PYR>I@NK~MVo4)+tfr{Vo} zWfqe|PPeRZrC2V-g+f2#6na)aB4LzUQ%*QGb)`~|AapvZ@jFGps93-8hRukf zK$G>R0BM%O$|z8AJ7V+Y8jZtW>e9KDy|F#W!Fx6r!5>MBrEC+;!4zosYjcLJSzRY7 z4U(R!()VP@)oFyT1|)iU0vFrROcaN_d{H=InM*)d#cwj&p}rPB#YFmv>P5P`bpdM_ zy6@Ko0s4|hnNv+K->e4Jlvg-`jdK|Y$-U+5OGL2jnc9U}bDgBj zj^G70Y~ufXI>pL$6Bq2iaX{lPSYgF5-Xv#*=_z9ZOW)hL0&=upU=EM+6@hO}`~ znrb>@3*&D}51cJ*(S3vxgn~FB=%$>wnl+^u&_?o4)%>+!+-1AJ09U=b)qGvt+onaC z^4RXRJ=my?oKaPzztB}39X=i6RrDWiA;ZWQ%K)?eX zl#$Eiimlz~3eTu){M`(4PH-=v%lxy+R69iXif$$(;KL2)x$o)N#NfZ{MW(_v4SFJLnlrto@2 z3!IADE4f0&n+dQI%K?nn;-Q)=?B=ACVBnqan9>U1Rhh^CAat&RV)K``@JFMm97x%x zW#dmSnuatCAT$ zkK*Fa#f8lW1b!2$wce!{#AlT!rQs-5BwSv>XLymiZr>t>pCZ3AjF5HMhGNmg7{SW* zL?PjQk;aKXymFnXQaVeAwM@@M@Y|CHmX?1s%BHG)-!t z*dA>|pA_qkTRL<^z7KpQ)#gVo6V@#}4YUUVX;9{mhS_HZWt^yZBTc0+(%62vSVLlX z!jDq~bz^_2$+5HEdHiwo8r&=D7KA;ZRhdua2A3J!_RI$qfs7%ny;6>jedV!sF za={WRcW*mTytl8*4sq40wonD~=wP=K%bM>FgYO692}hOJ+L|FT_C2LhRdkSa3d0m- z7+qEpmmWc)h>32}2x2_b2(o>yX9OdSAtF^FW(?%8Df+S*E99F!U$XIe$ON?@OPsG- zmPsrJJt5yH5PlGuGyccQLFAnau13U^Mlj#Y*Yn{-oL&L?1=$T-$hW3I1J%IouVe3- z5H;2w^pIm~GwWmys=cLnb6Lpu8fJ#WrGoL%Z5&|QnIF%Wg zE6q9?=`=H_;sT{tP%(pY$yvv{rUI^Q*3nXF(DF8VKF+kG!SbM8a@wg61L}iwwyPuR z0h7mpEJGF(Plm@8AVc{sY=}`1Xh4|HJXL1qsoG^`o*O9KM)wpRGKJ^+fuh%!cqQAr z5Bi2mAPV-@N)=%bX6~UkdHnz6dxg9rVd=_5q4AMymuXya$>e2A%Shf|?Ov}~RwsT+ z!$BM?=pj-_gG6#N*2Kl{tYC$UAHFR5jGT*qh*_Imn|<Ip|Ply@gm=4 zW+b6qHi9;_T{g=*rPUD{b|ra@I~Bc#1hAe_Wk*m;{-Yib*fu+QQExS*CLF4`IBf_n z`6a0t5P9;)=#PTq6E(4R+jBn%?(LuH61MlAQ+aIfJt3Oj;SFgjbhW1}Fiiuo$g(Fs5GG7|YI z8QJILocIBY!pK%5a>DdG;V5cRx(zznTVY7aqs$6nYaq$=*n2ur5(= zOke<;<`A9HY%X^Otx498elmNPI#Me9^mc@RlqWST;-jqTY*-u%r7PlsbVXdS+!e}2 zZd{m-y#rIKak>5=Ok|goGXTSa=G45e)`;8u~RN zZquEdGo|f<`DmzZ8x8g=YSIjX(n5Q*zPZE-ZZzN$at~X8a_?wZAgNQ*Fi4b#0{^aS zq36>qENHGg6aX%=^^{}@7aTo_7c@NdKFb6erUO=r0HJ2+$<~S&6#{uk3JN1IgmPc# zkFs&QQ66g(wcvIQ`j4@-QoKdNOKxte*79{hkN&96FH=h-Vzk`8?3fMpjG0z!%cqbZ zH=9J_8@6xI31`hB$3F6bD<~YI7IzS#Q_1UABFd3`-Wcn?)HE6q>#s>ivuRY;BA3sj z;`6S_at|&!kJOKb^hl=>=DlDxIG~;Ct2`5KCY6D<6_ivHKb3$npGvCCR8qCeOeLpB zn$I;~KRZ3Lid|a$UiH`Hn*0%vY>)43_vb!Z-uS@nU6i?Dj6ax}`eoB3E7>U8C7WYz zU}h0St+T6YGFp=0CGFpBEzzJ_N1wixIp`XYfgc+QCVa%1?9*{Fk1o%?rxu^v5nO97 z4~&F=tzC+A4qMCb)XKMFSh-n~7G&0>Ga#H^=(+C5St|uy^-aCgbimJyz42e4u6z!5 zjtFttPd1-}J4HmyPF6j||BGswsF%#=a6dXLY9-=?%c}VtT!}jw2}-;oL+gA(`5a&_ z&Zrkbj=52;mHqZyDQ2T^Fq*dp!J`5F^HeWl37iA%+?)Xi2y(C;{fQ63LKeV5~_b>iEzv z7n>1mheS@eG$vk_`vhcS(B^W{4aWKPq6yu|bj^h`jEFK8VTL(H@mrP5Ci3Pesyng7 zMvMwlN%Y)`vy&fxUt)(I@!Or;BO^6#f`p*9<`KMv77}!yF($t^f)4I&w)^)1LGzZE zjI&n~x@6*vT?;+--NpgY9JUvYBEdONE7*4jm*C$k!@YUgb5x~~oSP`9+OiMl78BHl zW;QjMt`+V@IEZdBkhVhSif33P800It1ry*c;+Kw$TSDi6rAC$eQN!_KG5zL#NdYAZ zg5l3p$(_{YDgMq*VHx0)4sRgESe|+;q@XDZS6B%Y6$#xwN|EKLYnXIKi9PgKDQDuF=nmVxe6 zN){@hAyJUR0r-MB|9L!2N~wKgSuCYEA`VEQxLVpfy0qypypnuL1wp9m-byg?FWv^* z>jG>z(8u_KK?_-lpW;Fg#kpXxfHc%b2yEcs{FU=V8j5{6|7&PVE^)h z=_Lv_mly0&$zoC1x%03LXzN0Qlr0?i}>$N(nX&eN6 zMqACbaFj{Vp6O-&aT{unGeej(5t?#>DX3reL(NZk*CLio+o;6oQ#(pNQ=h(e`duE(R1mcTHMn7YYv^oJcmB4Q6LF&1(Hfrn&G{<*ImLNkPl zh_FQRP_B|d0h=& zBRNSZDrX@@?X7r8Bl%$dVXdX5w=Qc>7ESw#STF5O<>lo}7aZbp9Ith-%E4E(4*aDl z?IVK?06K>H#QO@d1|Wx7s)=nA807%!)C9&!tAr;oR>gI86G*LKA9Cl~9Ipgn6tlH~ z{JvvMeVF7vtbDTU(cS>(Jj;ryDe7NG<<~HW%e^+EL&RtGEfJ>G)xtN=4B)u+34CBC0(T@fL5$*E3zhWAR*T!9cIrZq=HwobxCtbJuy!ncWZv0mre zQd@iduQaNliUKt?UolUbXw?jsvd)}`9E%WFCCcM>5ds@Pj{MF;WShq;PObGxkW!|z?8@n{CAgAo75U=X-U;cMg^-U>;ELcBD5d{-YEw0~# z(-Dq_-N2cw>s^zz(~nuiiDRit(u#ff#4g%bgn%^Is@4qYgMc{eko>v z-&GU;=*gH`pqn((b_MVAc8aHy-mx6{tF@_h1W_yeRfqr+l4LS(Gludy1+GD2BhlSb zd6}mzz9@h6lls_i<{x8Tx{ona&_W#xdlFC`Sn(m41$}NBSe68V{UV}gFiUj7kAYU> zCuyMlsMwb1OYU9acJr1oXU>HVv{^}uK-DG!3VzntJRKG%Udnx>1os8uF=}W?Hhv00 z;8=FKJ~U{U5Azf@++JI(E6`oles^6F$08^GNdr^DZ8O%k0eC*i)rU z*G?szu23)x%Z}(Je1MI^1el9iF0rzxWb94O>bHbhnsQ+2x!{eroC-Qe{*S#MdkbM0 znoh6W0$NFqL!>6OuGGK8VT@(JzK=jIwEGT>8_I|6Ers~fne%vPG%X3)rVy5Kcp3i^ z_XEv^j}BP6rBkvQpzK_1@>Cm(X|VAz+J7T(bdJa>huPPTQb5aNE#*E;AgRjJp{?r8 zX0~lxw@$I2TBsw8o-bFlC7ZvbmHM{%QLyvj-l<`HATmX^yVhDR=@|PqEmy1A{njUM z;al>P;=Y1SnFCTzU*(a-u`ich@;LmNB%&@#ZK6Qw*pSqZ z{#NlN7jp3UBs363$98aQ;P7amfU z%9Z#G!Xqj}CDJw{b7MF_B({1G^ZI8T5Yae*tmy?rTR(dMA$azgM#(^S7-hC$JF;3E zQtdcsZOjgT)}n(m_Gw19w>H7Ih8efvbc1bkDrjLNflN+%nC_EPgmOFK`F4aOdf|+- z_e=9(=pBE9Mzn$ut=wxCw{(*h0bY*r{h69=q0ue~=|E{+(yVz8VRX`mnqD7bb8+s% zmpc^#&~+KT368{DTF zuz!nlw8#Kd2!4d^L9|#FWCSU zNnKfi4BM2M8!2-@3>!>e^~@P3r9#+%qmdSET3ny@1vZ5xY2(I*8y+F3X&xaq2)zh_ zR*>J?!RZmggH;j2^Y)-bJqU5zD)|)^7?E$uVZ?c*y3nb(-aZp35m3@URzV3(7AWZw zqDKh|r$-4~-=s{5Mx5?hLJ1F6prk!0Dv^?4C+}>Q`I4Qc@Kd+bv_IV}kpiti117IZ z1Lk%}mJm|yr4c4K3-!y>iJK@meMlAI?5NUKu&diLUyaEt4n^9wxEvMQZqBP=CkwI} zy&r!ZxE(W_h5=5h&D3ro|Q)<1Cas##9ND4>pa1$tc1|T7CW^ zdOu`tb2R%s!Aakk`=U)Z%N1z`2I*073%G!NG5G0U1jM`v4S`xk&U$&zm7FEEeE2U; zhuh`MN0Gyg)+V}R(WhGFMIaghzuh|4TqVeTEpGEgqLANRD=JN*)*~$0V)6k@qUwW= z4{5z4)xUXtp+QO!v;{QF+CxCApsYaS4^2}XSkwilOEzChwY0oE{#x!eRgJ(_&20^x z23liDd(1Dtb_$zsYDVk2F4(#v^0nSZu2_94pcUQ+*;e*qBbLV1-pnr8bHV-pg+IS; zDj-~|32Bp*LW&mKCJX~3Jh)DqTX7FNt+B6HSFB1`6pz+?gRKcJQx*XM+nP@F>=O|1 z&Fk9z(ihQ}n0yRB^k2)hM0WO}Z<_vytl>2(BLNkgnWIb`X^brRO1dIDt5QRYENe=G zWlHxN8d~FQ_N|1#VC>_T)fSykAq}EsGR6T!i@D>^Tp~10YveVu`hwVEx{=15t$}Y8un3Zx zgTzD79<=WLdkuwEtf``y(4d1g>x6GG(dqEy!5|1+Bu9#J8#D)>D36pef%w&4g8kTe zprL03DunrI84Qncr%~CkkdVNfE)CG8Lc$z_uorYH4)IPlW<#z0fX1Mp@&J~wA=ic> zwb`!9^Wspd94jFqEkbF~Md<*O)=(OFl=5ZM(pXFhpCS)yyceH|2wh9OIt0h2$u4%a zEKJp|!!EIqz-^IwdjioAlx5#+2uh;y_u4SRrKgkzLkp@S6|E?He2*F) z(>vS@5a+Vc(qaZc6Y@k>ohMRjc?HxK{KaN~+UgFYk%Oo9R7~(vF2q z^F$7qCsGDHB;)>SXs46sMv}-)16yZ1V)qi4oL){Mx0QTW!ndO68_8r#v~N$m={Duv z=wALH#N*^QoL0k6jH&>lIU^CvAKcEx#}GjkWo5`8U{uh_g(^GG+Ln)yb^%M~BPgQU zkn9wPN;WBmkXOp4_|*ka+KEm9u+K)Z)b)kr8QbB3Jk1d=M4{~2H>o;@Os|@CKzKh zIpDGl@t{zKD%pZPD72xxONG6q=#Vw%5*+k2$20+uk?7atzZHtL*3x(vZFZt%ZI0W6 z6>To6dyCZWwkcza%dns}-Kp_vg*K&JfoG?j<=SD~yilA#C&z@-(MS@hal1v()j3l| zLBV!3X#XXS2DO=D(U0{uOK#0lv8;waHETeMg|r`rT}+BYJ)CwV28rIh+-UO(CvL!kWPj7lj@;TK>KMbW!v+EmK&6ccJMcr~z1D z7Y>**KwTwc6_beovgox-0VKV|0VH-UL#JYA6VI^lH^{f4&W10SLy#WXHUJp)%hVtE z&OO=se2L7&vMl#Zh7?OBXi(bDQRRZvdKZ~qe)kHJSbZA>+A>s%_Cy@ zOttjkNkDXW9yB26tePUe+$tO|E{D9q`;rG}W=F79eiuGNhG8JT!~MYbM_2WOFhcGm zL{S$i?#xjHp*9qvvA}5%?5=qv!;N7@{z$$@s)yZ>K|W}#(27GW=@#O$_^j>fv0Sz* zKFdZO+pHLCJ|mZe!&togb@DU-!|P}YuvnbrE!x`B=tNDL6!4VF95V

30 zyBOz}xOF{2DJC-%UBVYx+Am_LR~W2KN}yy8f@Z;C;Tb{R)cWj;`to0FZQfRV%Ei8+ zpOoZbzNcT?)=~2#=nYJ~kLdx6;-|iv1$Q)OYGG45@D*|$+j@Vr%jKC8P-iK46by^U zq0rx>Y_r&eFA+m(v6xCc%FGLse@2C5M|~b$M|5qw4>PbH-sB(hyTJs2XKVL7gQ2|~ z+~3;L-a_JW2ALYHA~6~RRfQ9b7(|Z3MoJIz)5UTzRQqnborzNAh`^UCZZ7v1_iaJZ zr3RKFj8I7j)xT9yDjV-?XVD;IbHFw(fL^1hpjZrpq2cR&$(sOnM_}hGD0-M*MbEUm z0RAsn%N=aE6ns5BZ*0K>CMXaW%o~09{yHvs*KzbEry|Bu{OT^Kw6y}1H$PLL?61Sg zJSb@yl+BWKarS~0pgizQf$|ph-h;BM42m{o@LZ|^rv@OhWWkc0$oT@|bt{oS1`n||4K$yeYqfpa= zzsd|6C3dkifqLz5=gW3Dw=!YU|41u?A<3qf=B=PZ>_*}&}xGmvGPo^CCsV2Y~~StO&Z z97UEf4^q_$=um(miur*|Dlhkx-&c+tXJOD_pdy)_^2UH2=$Q2YWXShVkn3zuZGG^} z4t*$RcD0<@ea-JL0LNl(F!_63#F?Ua^i%6;?pUFB;80r`Cv%C`(K*bM?`pV`rtd-< zVcEZ?!=D2Lh}k~q0I708E3;MiAxRX(cEnP-5dBv~4a%aTC_zOHDtfSw>v67i*puh9 zn6C*I6rTl1wF~kbPsL22u;XsXpk#lWlvQxHN^(lhE}_l>3N02!@3jVTCU69TE5Wdl z9D%?;7yNC~%L4n{@7que=!-*U*nBK>fiNDKSSKvc5d*y^m=1--Jl7#zb9u91W*kw% zcjz5*klrfD3+YZzVbPw8#q^ovTH=wQLrWdUWL-im+2`SlTq@90Z$d1%FmGK*1W2~< zY$hQ5@B%1;G-w$FUZD%*p<^JYLw-oj{*ZuL24GnLqOzlHMe!PnLJ3*~CPPEoMvnT(K+>q~yW>>)I9U-Rx&Lc?>%&N1TY5klh{E=IMFVhmH_N>!dzkh% zNv(>MCt0eEZjW)h(cO|P*`?!*Qog<|%W5g>b zS1`QbbGGWvp^vgTK18!5(6#v*W%J6q?9QWdO`Ya}d#8fOsYxy+8bT;VC)FSkeY0G< zrvs7XDL`WlGm#6sVI)K&B>{$3$X&w~6R#3%>Mh~g8W2T`PWO#T(y6Oc#jT!9LIgGj zoO57_K1`oRS4%$M-wKhOqHXqC;V+4@!+p3FxW(w@$RAL!aq2i0^a6r79F8W0=ZS_- zQZSAmXJ*c#_MdwQ#~6RoC-^A~uZn|1mSt`tfK`J) zW+-qtMK$I1l>VeVpYVE0BMg~EIt&A~t3{(2W+DZ`Fp#S3K9yXhaiuF3 zB)c8ocWXPJ6F90LWd|{oz&bn}N)pLaA!fg>+jmEkx<=apZg+G*HJJQz{J#TuGyzWN zJtQv|qUar;qY=7rpjk1?)l5?*a+0lFa~GMC{4q)?f(xN`t(NSSXtexKi48_GZ2{Ul z)K?E{QCz8_SU-jrIr)42`H}wogZ?b)&wu9+?G}@IPOXEK?u0(TuvRSqlbOW}khKuP z-oL;s-x(ev#}{l|ikx^0vw#Dm3GzW^h&e#XUSeoagXG@`Bz2ge&{PzS@>+BgBy*^z za*>MLT&r(FjCofu=SU+`WM^{T04Dnzxtqm5CnF~*v7DcPEMettr<#(J;1-Ec*||uZ$G{O24-O`qm`~kg);-0d`L-aCo*Q!OBbz#$b^*ltW|y zzia`Ihc^cFBa8Ne7<3dq=ncehiM$|!La_zchboDaK_%66UQ$iFPloJge4PJr{FTbG z%2rwAgD+dnPUj}rs>aImxa@4f?!*ekO-0eOHeU~L@P_7+o*3g4g%?khNH@w3nZ&f2 z2kVMXi$g-m$8?2aXLN<4NwIGfZ7dtsJ*D56D)tk6c3k(2Ip-`y+5Q;%aub;ebJ3{= z`(t{;^odl(Q$PAx{Fg}96SmZ6rXUieSv(>hXo6>^n#}R?SXEzR-XLUD zCixQuwJ7SyeR7KLcuXkoUsWLi`Mk~!k_Zz=3rFHGi`6PgZrfYa&8#6sUOd=eSte61 zQsP-;W^$FjE1c*vlRUUu4-WbVQ&tCz`#Mzx{{fdF=P0Gg;Ius= zp3lHKCcfbCf`*p&hpiv@f^mv%%NtI0U@)PL!v0d18Wy8522_CPvcMQ+7wOl-A1%&h z1bO^WWgVD-3?ar5e{goK+{{sSmeQzLx;?|K2jw(iR5i_Ty`QGX^`UW+BmCLyfh3E18|1s>Do(y=Q=s0KvI3k+AP6uwh8Of5 zhUdJ#!S@g8Hw%G9UHxv)sc=7{Tn`{!H8^Sa+vCZ8VIkKpvaf!h{;umbFCKp$oeXNG zD_<>mEzi{j({#{rvU$}jfvv?z*5J?+g8_NKRfy|Is{nCJwNxmZvQIPS*-J6Tfb!ZZ z_~P`3-hNCQ?Af&XaZDdR2IA7>?IlDH3PUrTO*NLhu!Md+JwXB@_qH9N%^|gM=Qxzk zvN)ixBOK6@dS;;VEn8Y)CZs(I^PYkzE-?SGY+~VU?(j>o?YHwo>+?W^zrUK#8vDqV zk%)@ZYnbu>+lrxF6|{|#B5|;cnjo(2wBtgZAuQ;N{aw^ok}u_K06`5oHN;0CMwpIi zrd`EL%Bv?yNUN=tEQ&As@(IS8Gm0P~0vG#Q&>$(DFg%u&w!E&l34~i(F;H^BYm7GS zzA%;VZMW~(cJIh*T4@L+RG7Z3y0Ni1INOAn!IsoVC)w8aLa2ENgvpl-z7k}pTg=_% z_2%+KSAg|w^Zb(K%%#vXDc0M$oA^ z6UcFqVAObqyhgOphYkk(`~JRH6rkk^w|W@V-O--k(0GKb4sV5_#e%e zrA;DvNr$;7fL-t=`6SDD)9WzefRS6$x0>>jK1W{xVHetiAo_=nNIo?aDxxu?^zJ@GQS=O{0To z&p;{sh0D(IcxQN=>7x7tFrylSg*tppYBc7G;1)xsW{us^rW?n!A?GI&rC|aCXOWv* zYMzyxFs=E~=G1O2!8Hfd=iUrrqm)M`1DJ5U5HbHLB{aP}!Ery`*A6t1xIK)wCk+gJ zm6!oMF3MDbu|*{gn0YZ^h}e zSZ81axPrDJhAWs@%$F-@T$$8mOHnW}kG5eGVSI-i1%&=0NcB8orD-mB`G29+?eQGC zf398W-D_Ejx~KrAC#jvK%RAkAD4)#TieebjAvH?;W$k#6OC!CY5I4vKBKsAJb1@~5 zt0{fpexB_@rVz@aHkABljmNk+Ki5vf@vMvea72DD^~=hlS4x0cwx%7AN|mB!XDTf|EBv>0U}1^U!MgYX8z43e*D91l6oFJ$1<7UPoq5{)mf zYD+OzYE(_2A(g{3zcx9kZ!EB850n3Z&%Gg%T>oMIsyCSUJwMx#)no1^V; zD2f8p#aW_J2us(W^=F}V7Tb*6B4K$V(^8E9`>%-627k=(WCuR|KEJuyY--L*i~WxA z;(J(=l0T#{N6TPnHEyJa0$LGBw1Dvld>pU3e5QV7D6T}PTFqxOg})@9&AkCOb7VJ7 z10eY-AD0wr8R1R#!H$rGhZVBRH=I3iee$Q2+7TShyyuJR+<#}DVvakj?rJ=`eX{1m4gCId6a3FY)ZGpX6?hn23`tpzWMi|wf8cNSei zAhHbTV%~veI9cT}gJBD2y4{KG#M)9-*r*UruZZxHg+?dyVxUnP3+yukTrfDw9r!%) zCx_wWl<?4ebdCn-5i->u(BaZBBQunhuC$djHXs_Ig5uu*WyY*){vJ+TNkTAndF2 zzbLNPs})ig$h#~7nEVLLxcvJN;cwSagb-srZ%L|4uv7llf_9x^EJNN_5tLaIE6L}v zFXqLF&wvnVK#{{yw#^lTbSlY-QZduiM|jGJe*mVnBe+?r60{<>qmd8?E?{fZgKH(u z2u;Io0Uj)!3V^~D=`9~{T;on=5DKG!BJ#?>X}jP=Q!qr;9WbJF=% zmv@5$N{W8Sx~;J(^@A^GQ=y0>Za#Pw35z51JXOl%(QcSiXpKUyqm)aEkt5_hk(M^> zgvnbgS1K`H9F6LTL=Nu7Kutu7Fh@24lW7hQ`R(cMQyFLve!~n>ZN-aF5Oad9+B<#VCSZUQ@8UJlX4e*@Fv49!=UpgZid8n%5ZmoFyNQ_-)Y08Mopeukv$-v@ zsK`j}t%wx9iM$*;!~F#C333sG#UqRK+Suf@CwWO)hdM5Ippgk%NjT-xZv8m`M6jyH z0B(0gZC^I-?0a==Vyp&ggknb)N?n1ym^2!2Sv=$s#kWM=CZd>tZDq1hl!kk#Qvn!t z!Umd>;F44OVJ*T$M=WL>URUKxP-z=kopq26?}hAlh6f_oBiUXiRFtc1ySjc(pTNr@ z8*i>UaS}2AP!*y!Zzj1{NwfOaGE1PdC-_5^sb!XUXHP(`3jG>T8jd}I*@A1@>aAZa zq~N}+ENfZ1Z_A$G4_w)bd;4jRaemxu9VMK!`A48gbQub9P1lH%Xj_x_A3^s%% z^!eDq+#<^2AFPsA4!l0Bg43l+hgC^-^m<+icet2>S+}{Kq2oy+cNPopN_&mguAYBr zw@}VB2Sump8Z9OLvu&uk5J9oupP%_&ez^yvZk{g@EkS;$W3^=5Mt@PGZ-_e|LGOI> z6rA!Q?YlN|#H(P0djSw^076aZ3lRN*dJdg&XyRZy8rcrr=*NdUl)OD>ew8n!-FqcG!3K9=uO~%|F&@pt#xPpXAX0O7Y!cn`2?`Q zIq2Ry=w5c(ND9JcwmXu>v@qxEpU@R#&qsKx^`#S(CZ;0oPadqd`q2Wg-w4ij1q})! zVV?Pj-L1+!HWf>vsUoZgdy8Z$@zK8zd+z>Aqvwa51+V;Ww1ycb#qq-!Yaa@m#|b+g zpmjY^AVtPP5Kt}4oLq=ZpSQg4t;D>LWlj#y>?|!Y`?3>wrQ+mnef=C?!BW9Qm=E1r z<1D#TWmr3Ht@6=5+zKyj;8MA1ed(yNOKt!@d1pSBLs2EN;naGeq>!4I)i^XuG~yPJ zP4iMYj7KBfElk8_r4(daByr6&rA7Di##%%*j#V;cV-CIsmJ6|TEy=o$3Tg-<%`zPw z#mt{83Z95fLtu7K^9K18&#I+Ja|+ux&$x%fxmhQw;P=Dy?Oj-3)$9mQ(*{Ya14ci1=a|E%DMw4N6C>>@gr3&{`Ori&y_HulSDIFCc$UqN}0p|e) zBoCYzaSoi2a5$E(jrA#J08z+9s>Bm~*n5m*#TgjOF7n&caeyy^6QzRgAz|pI#hFqh z3DFrvK`v;v@=Z)@o4J6#{7fn4lslCf3xqp*+T;S#(SgU9u|%mR?>0U$4WEF7A^TJG z(+;DS=J?^r9Q-UBqE;9|;;1Evq1*!vC8NXHH05)44i#FGx>x#;8GoVl9)goq!?Zqs zy4ur0a1huNBRDJgln8DCf;;$(5S-NWpON4kaR1vAoG|gT5gZ(@A|gJ`th2>2M!6TI z4H*AhZS35FDqglIUo1$n(0%456^#e7e!UD2gw^%%zz7K;E@ON!c%o!!1)maGE<%>? zc}B=mD$LJFmJYaOWU1BlGrg;C7o|Sz{n;On+&9@$G$$s>Xo|>Dp-|GF;mh8da#;(qm zdN)RF%iiI?F>__K=DygL%bR>8wd6m6lYSr9wP-eY4~|uJ2&a;|bJ@(LhDN3=--I5` z)LIS6lWlD_lzuyIHJN}se!mo5o}5PVf>)cD_EXLzf?Npslu<8puQT@P*doES!JGGG zBP_nNH!4?*a!N==eCa;ihZvVwkdilSU{CM@?q%GPFYkriBj-4ZY)|m0Hh2ZuXHCtu z72Jz}_d3PPXftrTKbwdxMy8hiDFHli7oyg(zwF+k+lv15^lROx{}_*fn@~@G)V=+? z?k&>y6isLIpPNkNG)ry_zpmdnk5GcS?g?1Y+3!cVrg%E{wHteaZ~2Gc*0s~w&qP9j zuBox_s$7kOX!itj{`Q4?g0ueiMSFs;ax0|{Hv30a5KlyRhe%&EgIYE#>tlAp47O4X zYS~G9a<=FXQ@Xj9tAZ z6|udRWl@3A^Ek6i4XI`4`o?bA-|2LwzJ&q&KH2kX*;yda#%pfRaBF<1mYwcaI>qhV zzjc6RCuLGb`6EhL0E1-*7Ggj@I2wba_x1!I)(^72>}!)I=tKGeGJjiF@&)Z{b4cy) z=tm#6oHlEuBl-cUOmWQ~I@h6zd1dTc1dg@c4t!LHy}7}kX72%C5wO3*Q^rxZAJ(m6 z;cD53x@D&*OY?gA!S2%!@YHe`VI<}jico=vx&?lVr*_UvEqh=0=_F5qRAv8b_vw3i zY6l_KvV%PJ;7sV&Ve}o{vTvs>ebyHq>^?ofQ&6g>`znGbT|iYa#+SJJV?Ws zM}lI#3U>oA(w1u}+|ouDEL~cv^<)gT8-E7CgA=hiXhiuJ5Ta4Y?kvF%7wXv_>L4u0#Q^)AeADoG4s> zw??)TCiI~yaq(OaR+XM&do*7q;U3?es1iL-zt2(u(5a`EB?9lbmEHL(OF#^tRT6HL=UWU*F{qIX#^(t?f2O)sAyzg zeV^c(wev5G)Q)#=Pq8=4PtB8sW7Zx636=8yU5fD8EY=(};H8SF$ihVSKj`u_3K%bC*sbm@MfbU$CZKU2CtRl1)m z-A@_(*N5*C{7mr!y3VZ=JS5;WlOy2UmyNRx94=$bT%6}gq&JmP}1D`V!&p*w^Q9=obKLEb`kPveL-P& zwd^yxb*%arGt=|^8J>3TKVTWpYS~d(72TJc|{JTkiorW0oy$N@X_UpO-P6wiu(PP>OhJaM^a;ogCh#BsDfvd;W+eLH_ z8%JTT)@(OUHZ{!Y<0c1YH|DJN;f*c1ZmV{ZkR|y03RM-BgmrTy!fY6grj*FOvT-c= zj&?MeaT5fHw>c$sPK}+Tu`u~Kf@EiS!GAtGV-TE&Sw+C9N_areZNwA4keMc zjHWg@Om2@RH^KMuz+g7h74AFERa3>91_T3$=QPftl{BbccQ!dpk8i?khAD}m_zB{W z;J!@&2dEg#oc;zNhCDfI#gvgQay`^Ck6%It9;F9{7sHa_Pncp|Yk0HXf>WOHV4QA* z*{PCh%7(8wR8xbO`(xZsst-6gw{FLvtO54B;Bj|d|`lYG+X>bx6xl~;Vz$(X)b z)S0w#0S}7Igv&NhVWz?b!~`f1+HaP;%Us!;#}XDn*~})b==z@0hHpi%^lte6RSxoC zcbq?Q9g&Kl6>2g2v0X+2+WB#@#@G>LTK{0h2lpA8(AS;RRg@@lcII-ND`;}x1w%?CzHL{=P|9Fdi-gdCkg{iQO zNmrR3Fu5~VLSE0{2rx6{I2maD2EpT2jtm8s^~rqWmc0f{TO(Fkc4}=Abd@7tIXfzo zG{GY}$+na&Ceo}qEWH%J%@o&csy2q}B1q2bY*}M$GNNgNi{_!*sd$#o-Z5zBiFH4~ zEWX4C)SpwdX_LykPpTm8&$iRl#O@2oTn?r<&157lfgJ4b^vkX~gT+u*grzm0QJ6e&8 zI+P2=bL%LZ<7JqU2dH>5YfqMn>`zRK?a}Lzf3%P-0LnVP$$lO2x58znNd#O1lUV<6 zaQa8yF<@IVXEk1Gk(V9TqGI>Mi#l|YTg3(DBKL13_EA^3l4a}p0etI(_iF1>#$W(AG}x{ zvyhbRSZ&N(hJ?mhUh|h_pvx}97}*m>s(5nBo-nZa6U~(wyk%7}L_LhNgvQRWbdtB9mq?|W3ybO0SS<2;++vK?S8eH#JLDi&;&XHC z+TiWaqZE)poI=z0N?d>h;OpxPRjgVQsxwbVsZTGZ%$%XQ>^zTpHk=mQQqSo%QLowD zpBIiK@WKyU)h+5ZADhyL(WMm$EamQx=#qXP<##?X`}OW-|yS)bIWNdPXoF%6oTE zy_n8_P3adp>=$%}!+oBsgw+XqP9oWQG|C%z9oWY!$zFkP_DoIcXbY8w^CV<=3jKm| z>_F%#clkMsyUnst&Fe!@%ULzf{v7_E`4>g5Vs4y~NB|_5UvS8jfJJ=lBaDd~!x-U$}3dNGuRB z+$9jf%6hKMUj+EK`g1v8W6tiNhf4#L9q7*i$rcQo#yFG`k>bi;b-_?hgos_;b}y0P zLDUhmfaq$RgEc$%HpE=BWBFieD;d8TEVzfhGD#RGQLfo_jt&)-3qzuH?MNutvLg2f zWF?49&yKQjdr%~SgFskIsU@yIP|JSLsG>s!g+&ar?)#z2FhNdp)u9S}PLaVD`&y=e zi^jFVC@k<16=2g_YbY8u_MzjLEVj#H&}_Q^b48G225BUJL3wtdG+@Dm*b4PXhFWr_ z2l;2{qHgC4y2AMNH5N z!l%}aMmDw(>;!tL>OTW=f+EIzV84J#_kEh-4svJ1Uo-PZTA zS&Qw49Jzm(-5uYdS41*2n8=D#!xIB%+7X;+&=ZUHp2hqW$XjE-&Wh2neDD0)8{8xT z!i#lI@6e>thDtEYN!x~6SPxu4!2{WOSjJiM!4oF~1wiPS3ag~o;QnM6Amy~;0|Uul z8c?SkP&_%=D8e5v)?11hn$K%NY4&+k#qV0JbE-v~`qF%(&1AJ*BV3sRirnM5_VJKc zLqwJQRY7e6baD`5GVt-n_0Noo@r2^i&@%WtG06Ek_VvTf?ugA0jLQ}?`#26Tw#2l8 zcr*>b;;>^>r`Tef%qx`?G$uX=jVba^`pc|s6oml6{1@tO+?V7%c*1s8u^JiJ(-6>PL z!uRL&O}J)9Eh2jNi{=WRj&Xd zPB`^d6Yz%|@%;f`>?r<)L;n8vxPQODpXPpw`)r0>;39im7%;b45!Tl=m5w&y8Df|_ z!(#)?0^#>KWie!-Tz}7|$HV!HS;D%Frm@9rgIoWNy*}Yzv$K`6Unnp~vHt9%|MEbR zH4=%qo#9DeMrfWJ(8k$Qf{^QJL3i3dj87%`+hS@7k~0Q1U&u}kjCp9z=FrSjNpOT7 zj&lVZ^8*^tY=Hv$3Vt29&6mG;f0AuTKF6D#;raX{i`KIAqJBPhl4LMUcz|3Oupj6H ze`g)I;3^b_D6;5^K#HnH-r(f>J-{zn^ECV!Jjzwy@Ws60j<`lolS|8*+&^gJQ52%d z&Y)}SC6{P&oR9V)b-{^tnG^NpzCqQaTJ0DsLf{US{l0+bU!ormMN(qU=quhG=K-gNEBy{bv2l z9&1P_%FG+7-1!Dq0Jy+aO=$bmqMkXC$E4$a5$O>mKP*OAlSk`pv{xKuwo+YZ1{U`X zm>xV~QAEylLFkq&5YqBhotm0P2C$)>*96VM7O;(ojfixNe!C&}gK^|zNlU!e7F!(& z8j(}g@m(=tH_$*xm|f3AWN?w%O(`LW6BHOE4#Pr9R#QVzb)2=qZ)F&V0wjF&MG@CB zpa>ixT0;NAXS6^Z&Y9_aO=QOm9fpVd)t(%qISsY@e2$(2M8r!h z6OPk_BM+s0+GSTu9u<>e&6s>!H!Mvt6&fqh8n z!7^~yx*bF0yU-_tF*dynSSX27e}L7vD`XDZ6Ov@)V#h=Up|%HT)vQWtorTOelA0&u ze_M(XTPe)46zs8Dx0OuZj^z zjq;FcGo}n%Fb2}_Hl>vZmoWd?HX1YA5!cJe$lmLwawz16A@aX?Hf74)SGzgWSsn%ETZ!`x6hlZ0iYuBycuyNDo zE3VwK^{T6{Nn7n_ZTq=rKWAk7bDwwZj_2>(^@10^=(-nQzx#%l?78u#pTBwZrHnv| zanS9mvXE;eS<(4BdF$|_{ZpIAvImYMtI1S?%6zA$x%lsBV$S5&>Ns0!%XRF(bP9~5Y&mOa=tAr z`(C&8QD1#Z)!D|iqV<;+)u&Ztnj(*NtLFqZqj#9@S5&{bsD4~Uj#K0--Rd{{)@Su@ zc9rTsUsRt{kvWQ->Q?`GU;UKcVQ62`y6yBb25?42$dr_QOLXc`cayI^uXppSRKKxk zeL+PQDDrl--fyFRqp!ZGcZ;i3-&0gyQV}**Ws}|Ndwlf{oG$CIZ)y2Fc}Y=yTt(QQ zm3{8hwBE4RU*fCVhOCKIsx!IraaVQijQUcy`VGGNl-^CPQhj$(T^qNuX^MRRQ?%Z+ z)_42r+V&-{%L?GHFRE)dmohqLANqZ&4_Ni)i;L=WDl$ircdPoKRe!Os zZr-<3t5mcfPYa@eL+RoW|Mtf)m`6Tm$OvtNub?T}p}+ixt&hP*g{QF#RF>r(;xisC$90KB0HYfVP}$;JW#^ ztH>lp-lNus4RyPG^(noZTBZ8VqWZLo$QHU^)sdMfP)r)pr!t=Tu~lBA@Th_Z_}Eah}ZgRjNz>a{^79_q6jYyU?wEt*<_> zck`=Me_m02K}8lQ@@TjE^L+J1y+h?+f%)ea)t6KRe_r<8F1LBEuinv$p~EJ-sbVwNrWW{~^@5zNfzWl-`lvVMX<8it5uUGEI^1bjSS~UwuaJNTs-<`qf4C<0^8T zBF8lDj_R)V)o1l?c9rT^71ifdWR4<#tJWRnm1|Y%>nXh>IAulaTZ`)Qp=t@3{i9lU zyl$(nKCgH4t5n}oR9{dLmXg^?Ro`F(x5ZbN<2GAdr8@iRJm!~FWQij0(R@#>x{@yl z_>LB*9bAgb=lc~!^>Gy$r^x3-b=$4_6~6j}-c77heREM=DUFoSDEpp(cT~68SD(^5 z7Ue5iSH5P)DyLP1EEw4z2>7e5^-aF|jNZ+xQhj4lU5;Wop|YQdXg6B*jlTM<-p#I3 z9Y2U~eNIL2TW258xVzRj`098m8244GV`cKy%{7dFDEkMY?ka12y{|s6>R1_8^qsf_ zUwuJE@ZV)`Q|k_O>wNV^y~DkboP_wZ3{sEBy|BC;oIRL7>^Tc1{u zX^On_UufN_Mx6Cw)fFL-&8$)#n}e^;rUk~GBA&03fJN)vWb@9^fYs4fGItG=Wn`2Mp`cKhCtjb?Ogs4ri$`_FailvT*UaysC- z|D1NLXX6z4`#Eso^gLOg1dWnPWD~1Y2LxYzQbi^y^3k_Y-3=s;aYqMwH?>N2DA!k? zRuKgtyh+s^5nE~%TA$IonN_O8@O<^-Dsr46|M)=Bx^pQKjjDHes8;kHIq9pZn(~`WY1=`&f4W!J>5>j#gdq2ig28)zQj*^#v7KpvZ;& zZ^|*R=q#fHy<1$RI*U199V0Y!K#_BgG6S8=lj+NQjcxp}sw-P+_E+y9$6U$y5@Oj3 zlix3*&AMNmR26*v+3W#Vg{dK1nm*UjJF>m5z-UkaRiuHP7d07Mpt91WS{UL3VXr05 zccmU-D3Bp`To$tRT_aKV!*CwL3RZL^Sk{ql1w@prO5JV<4m-;yZr)s+?%T~GBon$t zQRVm*dS!JVagKNTVjyq?VyBv$`V_^KHNZ_Kim&{hbo$l7%j>9Yo`Y4Q_27A zOJj0E#5)2zxJu6mPSvr)l)}Pc76a|>X10^2Js#m5$0o+v2(Tzo6age0LQ;97+_N|E z+|v8Vd}8}G_?$cu5UG7=lmbfO6JyaLmlP&&>{c zbv--)~t06H7`N%$X++CT5za4@?sI6Q)yu$=r#d5u#~B*>h~<4Bed1A{x$d z!eMX@+n8j^-5_su54Ls0bn-6@*QyA|^Cz4U$B%W{eZ$kBf{1Jj2g0fUW7HGu_O=M=# z34ihx$D&lW1p-|Y?v*%>fCUdFmVO9bel`v-c`%W`CXIf0H zTTojs6{(>^r6^O`5k$#<)MFR#1wt!q)w0h==4uMbV25LYD0n%T1_gxvS;`-FItF+P z*Su^C_J5LG{Q}uSwR1J33mlUwPaMhF_-5i~6SrHH=+?qY!AIubm^Y#zW<9YWOk%dE21=q^QBFWNMIP){i#$;kB5pqdXl}t=hbE_fqG*h z*K|hYOdybGtK6AtN;&pfyt1bwrNU2dN5UuB5eBf$#u6Kr$74S(wzV^@-WZCHm&Rph zNUAN33x2QOfhpCvTz|;UM5-Z(!(=c7Wn}LTMfIX=jY&Y!G{O1GLfHJ52;N|M&_>~f z2qz}8XLEKaAK}$<<1#A-+EuX31l1vmFh~ekK|CZKS7tKH7pkKjb#kLNeCzH8?sT9d z!YED%{URxjzO_BRq3sV@d^MlUTTCdq756j~KJC*+6&l^fVbq$$f~nni1oXH(Fn!%T zFo0CcLE0W|RXmZlv)sf5;kJv~(Ambum!MG<(8;iBguAqE7e&=+3$eKh36iWD2s zj^qltupw$V4N6XNh7(&p>Y`~14A_W@4fQhX<|}QSfuWjA>4aU|*burZgK$J6qcHiB zPTnL983+5pG`ZZ+ZYW*24V?|-wVuN^NZ#G5QaXZBva44Cg_iaz5W;Lj&rO8FXpH{v zw6P`S5E9YOO{5jBGN!P2h7&KD@@Yrvls0eatS$qpf;@M}ibH360u9V!@Ve6X6k{mV z!31oYjP6xl2qtAu2k8RBoJS^*NR7ZSSWgiaJA!9Bfpo2VEETuzhPy&mb5!EC4 z>PWsp?oIMF00Jps1Dd2{Id_t;Um!>i(zvqfkmO^Rnj|0K(+4N{C??6rdG=27u@^wd z>#fi%B%dV{;HbE=@btqKA`T-HCw%|gf1a<%H zM4IT>Q;75~Xf+l!vN5Zd8VQLe%7@+SB$_ZSY=HAHj28DXDI7&EV;5+`BG4fM4pG~b zd_gKGEaX-w+OrFB3e-&Zv#n_Ef@R&Rcd*lYIjxUvt>Hf9d|%c?&$nHBWx#E+X<~$e zof~2&2llla)`5O?z&>0|D9bQ0l-hgkeO?7#tT3igrXgH{j0N|$LkSd4z;=_-)X=^b z{Eyj)BB??EpfTs7qZFlGf`?1au>LJ1GO)2A7oWxg<*+msF~e;mLHuQ(hNG^~srIn+ zhP6i+3!6^M@Pvg?6NY>xD(vQ%4aGLH;^#vFi-_}|y+g50>|?Do5j-cg(l~%A^ovUG zhliEo0DDUp=;3%&T-8Vjgp5%u9p1+(LXg)zVw;I-sOEw2eJ*44ryHlWHc?O{xcSuAV8SUBF6@8 z(QuhrR35HjFKrV)To``TZ#S09Uy{0*Y{o$o2IrEZSS-9Lt&j@~3mp@So~?sck=HKs zHu($-9l7F74L6`4Cf`@UYw80fwIj-fOp?QK%)L`MlxfZZ+Eyl8Me~);)aMNp#6H6F zXeKjl)v3I1w!GYHZC{9g&%ihZ=i{VGcbYD0Xxto+ql!~aI|Pc#=7v(wdIg?o2WrJS z=L9rbG#xiU02)MP7z`Q}UI0P$$Q1H>zq)q)!kt_JwnN0xNc2OUbWPF;YA%;128v z9*Ma%;9l)Ma;JR+1YRHA#S6BPWs8DDfr7qqMwxXnO9nxokr3i;>VV|ea$yni8ZI;f zAOAlY%b0=U4>6JaMa$>!KiJ>(3OG*Q8|Pp3VEj6@>C5Z6#EKxhKKeCo6eyM-<>k(C z?pU7fO7mBvurNJ&+b8RXl65GSUW#o_5IReuS)3|`#mN$WM=pOf9XP_OmffqeHMLkl zLndw~o~Tm&HEYp-T24(cQ!+USz1cCH-PIdc3C(iWR}p6k^;vmw56sM8Q3VeB>Y=db z7DnMv3m;KvkUdB~Dh%g;$BCn?mw9Ty>?w+RLWBlM^pCk}K(V~)g4yekGjz)FdEbxQ zT|aht8iv-}GPFh7&&xW24?WC?LM)n2Ohy`XVdN0_p5R*0u}j9Y2dZIJX#{4XJl3?k zShS_AEfOwhfv&|qvGvIy$m;zkqFJl7_(>F7TKpia*DJMpwQ3HaP!#Jj<14bBj3Ziu zVv8{Uj%!a=_HOR`Fi&c4#mtXW`bbJyNYj7B+%%=P+jz^e$-hO~5u}dvq(t&&&@$p< z!m*cMw3vNVLZBm%XJ6KdZwIQlnSK7hvAyE?$p{=Sl8Hik^>KB?;S=-V>gjZ1HD{*~c}P&>TsKYEp}ExJ)`>3Xs|xBfe6c%+ zl*tk`w!)S5lg|cpjGbDhh2hfixcnfLoOcrtCLJ!m$F z$IWYTkNi%Q7t7YlWZLXyAh+Av6&>cpU79g->=ZO6Q1x~lI;Hka!v+i93eBtpY*H>0<*rq+ z_O!tbNbNvtK9fWSI4e5=E+y-!LT;X#p9%85GRC3xGPW@|V*L`}U{lE&3~VvtY7Vhc zy~nj*wrWRXxDv@Cd)qW35@lM$d%r)BTgjY(wM+GITBG}ZhOl3B#TK={Lj<%($SnB;wq(I46BsshJpIjpuAM z^(_>ZNHMd~W?95Y5~SV0$ZPo$iUr%X;wsLyvCCJc>6_X^7_bNEKSZtl-J(5DXA6!qZuxm;ZNH(B)i zb#obBgd=K4xG#-h<}kE-+mY7IzAoNG)e2>Rq;oK5EelpzHzWU@YHk=a{Tw#o5hrOB z-O~y=^#&VJY|_HP{Vja&KgmY752Gd)uhsCbL}-cM4`O? z782c%LWFKed)MD13~4M5=N5A8>32VS*WV)&GwzTE$g{MFLx9-#h5O>eWGAJuGSE$1 zf7d)Iy7hNc%T7VAEzyG9lvO6E=8HhJww*$I-9&_t&1Op@ON4aPHozGS24*@e56Yq!{ZsCYcX>QIAZBeI{bLdHoqAXgg zjH1m;vGc(Z*ts*tYhw1S$4fI)>r-KyDy8-Kw$jXGs5Fc=QF~{mnNjLm?b^)jU61E& z^n9GDNrPp`FF7^Uhd%Ye38B0POt3P@*c1GEybLn)!L14W>;%GmcB(S7Q`IgrJKcI5 zw~$*f8wJ_$ZF9pXWGo*(iI9~Q(o0s*Xpini(A*zIwenJn`G`od42j`faFsO;v?itF za^D1JX}_}OnU2N>lTWZ(llw4&;bKA+fue@hTeirrwCj3okk|}T3MhTUDRgc{sJ4@< zMEMSpvf7!7X%93?T4<%U<~KJ-11?SV1gAQAcc$dJ`u^pxEPL~N&8C~Oe#PI!K?T@g zmp0AJ2gue+ph1iUt%mYfRmf;o(MeB+%|A2OI_h*q!lhgaQ>D^}St<7X!0;$#C)8Le zoc{WjO0iYPN_`9HtejR}pqkiheet|bW+^R~O_rTz5X6tzRl@O(oRJ26NKsDlDH5tx zEumWeK(4i_B~*(^s758E5u0UDec7CEws@Ygx90yP?){_ex~@9Uckg|_U;Vg}l|&s0 zxb;TElo@Lz8prZDPDfXh*v`Zu8YU~tGwq)C%*tAwCHW7_aeKL0CNA4$ryxoqI%-f7 z1u8*AA}xcuTY!>?P=FzVF)IQLsEJ7zjhNA5l%OO|5THc!`R;x0k5~2d!zK{P*1PY% z^JAZV_OG+|KIf$GX(Rq*ttq|)CKOm@Wj2Zvjnz!%AyJ8=YKAikbg~y4Xqvh9q2lA` z;4W?3j2R3VK4h0rSa|MWVkdTuT%Og`4h{5QbwU?cVAz`l%eon^6-70*NrO`O79d}H zBQoI}uOraeY`mBtx%F@~sVtG>$p+35FdEP`RY|t?@C^3g4ECF@sKVw__6)o%*`;|F zraFH46svEPrr%*@JIL{p7R6W@A=U>p$X&ZDnjl)hZ@A9}Gm+^5Yo}}18sXY=Uxr!j z=<~8k+B!^pub-#hoQX<7B&n(9T567SM-;cEgWY-SVJrh0$k^wsa|Z+)rS)c7>PBC+ zz=%x0W*V&X{nQKR;0`ZObYAWKxS+j!pQce-G z-;aq1*jCK)kFm@}x(Vwqm>-#MXY~o;F6)ykS~y^lyMF9GAgjw>7^d$+#5j$xhaAw1 zpSNk-Yohc$$RuuTh=(y4u!GwW8MBfY$^)5`@niLEF7+D2}Ca?|hm#{I^&=NP0N z25mqM!Mi3yeJmv68W;++>s5eIhyVGAD6^>iABRX?_tBfvnY?<8!Om=(On(gdxr3p- z2B4WtvFwH!QmSeA8BR$`p4^c=ou%_y`8DNMECtGO=uJv}al4FQB#4-(2M=;UoOlq9z|_)Yf@MvDe%D%G{H!xZyL*^= z!j(2bx}MHT;N~)$E^8SBf(INM9Kr!;hE(T(zhny@R=cQfE(vaKPE3q<*@`?Iac{y5 zqGZcp)6TT|1wZ;exgcqZoqArMFo;R|eqc5#EbBcGJ)E!KBS0SpxCi03cHVgyPs|&6 zc)Yo1!-M(U+2ld)qNX_bbbexs8UzHqQW>}@-)n@voY+yYP3_Qw-J05p20k>2Xz7xt zp5vyif`pGb%}R%zPPQcrqDq}LpvL=7x2IqqI7w5>K^;XGtTGN!Sb;DuFg92?!v=TMlM1r3jtU%V z9nmL=Nei47%Z@?kGTojKaS_=wcCM%;3&3l3a|c3mC7Z<~YwQ7Ig_cB!)LECW*JNESR7Y8tTBEEBZ74W>#Ke&tuG}MNn)GCh8i%u`Uf}8vm@;RX z5!g9;rpD1yn=p$uud=Y3ZQ#pJAI=)@%AfhmqnA7P4;%TkeSPom{>PtrL|^UK!K?>* zNKp=f9>4_xVl_xInQWGY8Qn41mqb0qV<^d%f^sFoYAj?j%OHc6ETwtaen?qZCgjID z!y|duNKW)D<8T~Fp&pfGjJKQh4~-XAF|KIvge>#pO8DKZ**$jSViRK#9fV^X>8&(e zGUpJ+y-aB+W-q_-4upLFU3v882l)X_m(JmFaqnC35|+UQtjuN+RhS?~MolImeH>=r z+(?HJybX?k&0ar&j)feF8)%j2BMLR19A9I;s)2kPsqxhK8c&xsR6ZoeUhYKwFeA)e zF(V+r=0pawjmY^MK{z($O&;1X8QN+&l5h;~*DFdn5L2 z6Vbk7vIQJMzE1?B(nOFww8ti*SD%PW9WB=a_H|k~q8FgcNpQG8DGuWFo2+`61YMvB z0Gx5=YXX#nT(S((Pke+L>;=wWbR}LF!6GlNQdJbHu-tduczOrqz?W#)l`Thxn(QFU zquVV%W^CRDouNyA5WB)gkbh9JX|;%7M?UrFvg> z!HrDQP6~d^s}o33Sol_f1)r8W7SK3EGiq4i#aJxxe%Mvu11#Vo6B?A#R-i%NB1eN9 zMs?6s+IRG*H9k0uOL#Nezg3CLez&Q(i75!z*E;Mh#fP zEOT}h`AdBfsj?UmyF53CeYq6cer_&n8s=Qi&wz}E+${JaCEZ^ZO?V!a*+w|Ev!2Iv zB)&vfT0e)-7~WPoDX<_MNCXUiYZA{<^lpT5h)yE&m#JjVv05T?BR_BJ30p)LG9fdG z(0d56DmmQ?LCfA~(9)tH+SftsvXdgGh31O9*ALs0!@MlwIDBWBq*PtVXTuDz>YS+w z;cU;$Boaaq=~gzlW75`8nH}|+^(3t)CBijnL)}e+oQSijLQWKYwl{dVn0b_cZLX_O zeFz5!gxR#%jxzL%$&7CpmE5kdGtk_Mw+JM;U+`GE@FxXjRuIRcgcB`{_yEN*onp~D~>`B|GPS{hn7 z-3KDfYc+20R~7HNf^^XUO8FwF@4#p+$X24PZ(@$^GTdD$J?H|4jRghD{<=V!G1dsm zf`F+|CIK@JWn${S1dM0X3TF~91cIf*lGb=B#MLNmC}&n5d1MhA5({C5Ny(!|#&dHx;q*obMz~XM zqy=N@^k(Adr;9cSSaRct`-6jQH>L|w1DPtVm#HJh|KQ=E5J|=ktP`m^=Zp8~%205k=8+}=jL(VzG z?)2Co2WJnkPhne<8D9Cv1d_)&ZV!?XliPvo8cqxflRIMr9LN|8aNv@-gcXSra};5&~w0tniE2TkoI(g|88^*QYiR1{Sc~UOSI8|Mc;j7W1PCaTl5S` z*}Q^yUo>2!^O1D}y*Q+i&m)0#w>A5C(MJv4=q+9{DZ|$0nsUS;rCmPUCFw<)Qu$3} zrjS`~%WT_DpMJ(B?er@>+F>dbKPsyYFi4~Ht3C8elAk5k(oZ?f9XbC1Gs4G1+C5^H z^*e(6jyDWDAe`UcS-rtS3R94*#lXybZc8&_hNfRLmMNvY@oj0>hal z6WAjmT0$N?;DC%w-Eq`-B;jr|V7}Z$3wKXGp15Box9zi55P0P%)g?g=U?2^1OZ3RF zZBS^Cq_uBu{4g7Sc>hj+KO4UTtxwO-(z9?GXQ}P@(PZgwg)@eUFc1d-I$9{_5yZpH zzGm!<+9HNt1=r39z4b(@zJ*qH@{@qFES7-%0<|^+9jz&ODrSK1F#y=CXqx!0y%B7_ zyJNDCeASx8lRCI5O6Z?ds)jF6TEa7?xI}MQEG9WYo^gOF+jK8K85#p%`=UyX9|Tju zRN7m-<^_&k5!6WPl`JU1EI0vBX5*wUBS^e0a<6(BUzl$BE#5TQGxsT+Cjs5^@yKRk z*2Acn^H}4Md!LU^!JXf!;Ya~b$Z!ZLS!g6G)s3oV?P zpOHi@tRkBiu3Zi0yf%U{5OjD)yC@KvZ{kT45T>LPNNi+E%#)xgDdys)LZV1TLZeKd zNV!N~;_-Hp<{34noYC|^bUIluoiUfv<&g%?Tr0M{IfWKNs~jfnI(6dfuv|)X)`poW zZ7^lwzqHpNbrqb z|477dw(ZSwX2Q(KJP+I6>SMXSZEsf_P(#b%#wn9*htMpKSnA50gzgs$(ee5LeK-LoBGEago$K=PNLYEU?t-9=GHDF*v z0IUyK8I>jvB{?PHhql|ylNehXv+XYpT!~Wi0ow0M2xJXdjAL(UV;5vohGyH0wwhaa*0|N? z`1-kZr{O6-JoHffv`T$S7cWWyQN|K^26M=5EV@4V;s0dkUPa@ zS;H!3*-RoWS^~!caKW*v49A)m=E@vf%r8qe4cQJ|vi*11#T<_S9E7N3heOD5vDE=# z2NC1cummv{1@|&#wofmz6QsmH=FnYZ85e||%xu8aSO-$0hm{1bT3Sx7VYPu5muS1~ zy;VVhv1psN+Jo2mBCm9J6kBf zHd0gO)qOsSNbaKYdYJjU9|m32WnMtadi)pUedUcGw3QQ+Ji}VGNY3I^6D>*P;TEN^ zdLgSZxY_DAH1_% z->w3tSTp&oa<0m2un0WrBpax}mcSVW&a(uLz(x9Z%z^w5o@veFqeE6L2L1ivr7dgt z615<0N)Cz%LXuhR3*+O>EE#K-Q0MGMffy;`(&Ddq8XZ1%IZsR@o$S3MQ_$3 zl*#&1a_ptI_IImVL&G|E0bo2}`DM;cnwF!YK3N+3z2(?<^2Dvs-0bnU+MF>;UvQ8# zx3v1Y!=klpos@KXsh~Ke#~PUtea(7yt7vkcx5qlHo&Hpt$?N5N6Hx;wRtn4JhR!Ym z18-AwUc+~S?brNaC5Y!DT z71Xy0$ihnn`SS)oa6q~V66Cv`6*D`dz#5b|#ji4WFn2s)WIi#A?N2#1P8&5sIMYR_ zsJQ#B$>})4-^@CY%VBE0Y~5kRqoGlLvr(F5myMk9qogZV(*6+4r$CkuguRRRyDL%R z3qYMdyUBJ?9$wIsY%T84g)}_yYz2oMVRBNoHY~&!a=j#$IHyKdx9yd&AQG7NJxhYr zgGiB%Ni@XDsuR+Ry?Q(n3-ZBY|i_At;Otmp=v4obx~$lun;@q&W-DR7m5+SfufO*pW8~ zqWs4E-EL= zp$-jOVsZ;0RB}@yLh_Qvs{lgs7Trre!9-(4G}FH>#kge+`TO)680d?68LWTu!E6PsFLgmXMu zLi&Vh6olnv1uAuPSb&&aPIZ{!X36*_0Rg(m=t6H0PDuC7jUn*>Uf=Go3Akfz(c^K@ z&p<$!A4!SMTkWD$u$ozN_0p`=i}=NPWFjb*x&V`6;di%Vc$R;Xee!W%s**zTag}IAc`I(wj`lsy+TW?7IS;( z^i4Qe5kXPb%NvWFlh-{K@sajPkN|DYNgxxeGZb6K3mT?Zu2#f^1VZ~QBg3c_{x%Oo zp2_PTMk)KtZF~$w5}$!c+{2pXKujyxoM4ZSPD-uD)kf1(lJ{|xn6V3o24gx@3?<%} zV*w?JP&30CS>rXlnR%u0&P>geG8PU7e}@H|EGMI{MxgITN#C|nx-audc}h~&B6CR8 zOe0z6($l)g)|Qnd?I_t~4|z3GtHGLTD4=NeRblhR6x#m92_9ab>s>rUmz=67zP8-f-x81S|)&js?Y-jPMf%Ai=`u z+0u;(WCn)`P)08k6P_ld??biU!{%%s8_NWVLx!jGPJ}T8nZmK!aPID0s~g<8%^B1( zg%Mz)06346DfLmrU0lsFEd6MBhE0Xj7+oQW5^$Qxl&C07W}p0-bZul-T9M*`Ra6wr z`+}NEDl%4hz}3M013)z@Vr!bHNZDKxhekyb{hT-^{#lbq$tym^R%eb>6AejprdoHs zqTB*rOjkKB;@%q1B=kdZcO8Qqjg>8s<}nRpZF1-tI5V$e8)^z=fJOAfW=ei{`azeA z7ujjDf__-0y}}%{!HHZzqp3G6vlQ1ndcPwuao{9y)&~#qF#H%2(q$1-yJ$niBkL|W zkr_5MoOHq{?N^GIPUJ8+k)II|PDR^4?r_?|frjNsmSG+o$<`PP9Csv7ao%CpmanAP zKR)6}*5-LYlDw1XDNW?DH1Q81S)hwi*&x=7=QjWsRi z=Dn(PG3t1k(go^JF24thfJNbFC|=v7hqjMA>+l15Mb_S_jDG+N5J@KeQ%)e zFJCEO;AwZ11qlPdg@jRM1c21}RV0kNpC=I;HL76jqi^Jp78`*B%u7gL0gP8_Tj4tG zj}Ya{jwG=GCuBU>o7A?SN4$!3iqYS;^}u;G*k_Fvf7Jkij%4UA^k%Q8wV;yKY=Zj@3qGhDna5ZZ$FI? z_Xf;VvR{@)zzBI(4TcKB1x7e?Y-{PkFQ;OnJ|x3yH~pi2QQ-@%;*NlDVjesj=bvj> zq)m2=9N6yHmjBoi@-RvEky)pe3hSIxLaqkRqT~ytYiY~BZ|}ebt@5@AB*&>Mipm>z zl@|n{3T|uMOf$5XMN1>X$I!s+S7sLn@~9#{BHR4q46Ii3>A-?&m+0N{1UhbxKeD|!P2)Er5-v@!4D?a5y@x<=u)-UUCi67;cOD6T~7a5D28|x)! zC*>RU>N_dqqR|SqSz6GF66P`{;BEZMxsUL>{mR1FUf;uOImyFoX^GHhb)36Z0RQU7 zEE5x$wD}GyE8j0vW;$b??#PMv$$H6enz}JbwR9q9~a} zF|)Co<{Rwe(g}5x5oy!SBb7vkD6Kaey29OYP$kJ=N;CLR^ACRP$u#}=cMZYVwowN9!<0*yID2C?{tQzIyZ-r>qwqNsH1KT2I-FTu$Wa)N;2`%PFEN{$vGD(Itw z+tS}7kbYybb=bEp+0$>=I&;SJ`4N*`6gey+LzWgvr3@dkZYn_7-7}1o`FkTo6`vQI#M0YGmuE^`p|za>@9HevIkNphHWyO#r5O~c=7fmzNgAb5yKQcp!;;oy96bGr-^p9cg<`enQo z7K_oxPAR3$jeA`f8ZO)ES1AFji7qIzj=G%HM=S#DNpfvPhO0j&BKQD!yqhSW>}DQ| z@IXhqX0$m|>-}LQDCi#qpnG|4Xv&X^;sts7@mM>mEj$E&*e}c%rR}vO6klde&mXoH5Ky==-pM6sK)c0tC9aznXW!RZNlL4U# z(J4+rCP0V*8v&;yIB$)JgH|aHT5^q>5NJsp80*^wC^ivxHHrxfSf)F4`&0TPrAKY{n$!vK=P7x$%BP_Z|!`1o>Q~1&iB@GxUT@PnuJ{ zH|BD;7>sh7IX5lC$+8E~WJqt-@RL~qYUp7ZG~Pg=Ld_t=BQ(i?Gux%7*ikXQ(gxrH z%P(u}s-)D5;-uSkD88)H z$t7413|!QN^aX3I2yK-|{z@zT`M&Ig&5cd8WK%}D*}OFq=imJBeCoT1oB*i@gSP_J zr8+;XnV<#oHidutsmCd-^l`Gb^Y?#3RndC(+n>)rrEk)cbOA=NK78flLa9N^m@DiL zk-73tezdI^Qt9{D7syk5{SMxPeGdJVIWz2}zknMKzaJ*dJBQt1!bsUMS&~kUHtEdg zAlpQ_B;8=Uyv@Q($i}t6su?jNT7p!^XAfvEb2x5U%IVhUQD2T)q@vTGp$^7|d*)Y@w;vl0y#*HzjWym5# z*z1zLLW29;SwB(+mVyRn;PC{?S_3e1uDF6{hrzhTC?E-N z5!qTHGgUvq3;n#Zln|@mNa4^-fJ(&vHA~t|WY;bSGC2sD86$e~5Ww_;q0`-fS~Zqv zgg5;S+6}`yLn8uA$MbmZlTKO!UBu$^xn)uy5j8kZAIiVg*z=&i^z!f7lXo6^FpoHY z2+1$+)!P`i9Yp@cq_}NNrhH4nUehWO0iN;;Ck~|fi~N&*P<%+hKUJJl`b_F5lxy9X zbkh%o0vxD-W+kKIyP25RyQh`ZNi7aCDv3p*Ma(9(b`ymySy0AOOI&t0mvq5p_dt{_ z&8u{<9$_pkv|6%&(vOI-l)pyL_Y|xpdkJ@uceRz0Sd%R&eq$;+d&5xM-=K2yf^{o{ zsarFcWC`DBHa{seVtR6TZi#o&GHmCd7*q}*z&KQZ2UPNt-NkhBu@l`bgA?Wu;pl^u zd}om|^OEg(fams7?;7?Jz^pTTo1V4|LLB?IHC_$xpc9zb+Zq6mQDZ(wH)97sSKf8H z4|QhQi4q8iR%`-=NRYP@^m|VpPy%X_?$WsEGl{J`gYJAjc%m%50Q`B2ljZM0y@+OR-MAsxJlZT1ZH*}{~G&2!GzP!DR z=kQngh4<&*wR;MOf-HXW#Y`DJ!V}GIg?+i3psg*Fg!$WM3Gg0l!Aj}f`dHwAC5`0$ z)-MOmMRE^V^;@+-w#xi_6EO&@-$_5x0ToK~Ob^ zIK)z**}hHF^A(gB)#;DxR!awwg^PFL1_#o}$QL*T7YkYpVr&5BxhfA_YUOhvink*$ zVZ5lgS)brnaVab%aPpCkx~y;|(2B0DE$N}6&ln1$X+py|Jse{sD499phj+kXS?AKA zVQ@J@b6mluO)yzRluUM?4>=zBfc7vzEX<^l?32gZoeckN{*7OK!ki4|bJ&o-|D$vE z^TiqoowHP9w&EABA-aMC5w}#` zlrMkrC)2=Mq!bn`Xhoq?3a3Bamn2c?b+a5=rJF)fe#D|)PhRtFjcu%b{@ET#x|x3$ zSs4YR6rbpr@3EVuc2BK|q9|#!k|d7frv1VEZsH`Oh4pumOHYe={N_C%$*vT5aI!7C zyOV$4o&GAh^TElM3{oqZyp>Z|nQVkC_GP|)Hy};EImcoCQ2ykF#-2AmIBAs2ijJ^= zXhq$)qtb#b9HD>+1C6=%9iIQ#6KMs$cL+Bn4Y@$`DN1}0N+B|j6*tYdU;nIqqjr~X zCr)ASi5(?jd+8!%bIRm8mry9 z$pqeO>wK_(4}zY6jXa2LsjLNST%2T5gWot{jZNd?%8S;_t-I2XNprQ-J4f3^!$Gyw zFT1^u@=B6uON3{Gz=@a#HRUxp42FHRKd31$G5U@1{WBUj`eLuTMtjW(GVUBOq38ex zV1X+@2&@z~2t`_tU_bIPC)rr2F{~=7G3|piE4@@>TBECdExae}!tS_qgRA z86biYCXh27BToFkQI~cy*|H7HGM05P?B%WWXVBbRqML1xH{F&1X+#RRLWBM5}N;}>z6YoSc3?5*&@q!m_pE}hFwIvPW zeqW~X5ZxN9Sg~RMFj#R`>5sAscHY4VU!56x>^d|{@0ftWks5$900Xdq6#xWO0My8i zDBQ;9F>@q1vT4%%V{#@kgEfq6dfZE!IKPmVY}Z~@jDi{vqu3&49VAl_gvenj><0SG z-}CD2uXlp80usWh{hhda8z(P5jF8&kp0O)ly-obSx@sGTTEb$CrbGE zPTjlx5{9BI$Lk&nsQ~|omu|m=Q7Fmrx<|oOeTQ${{yKSsvTL+~iUYvSuKe!&4-UCq z`5V*mTK7h6C%Pfivihr)(u9t~_kAIov9hjk4UC;6rcNx+z;T)8=tG6|*n5P|#QqEm zcD|c4xLCmR-8{+@*mW_oer6JSw!3#@+p$)`>Mu8@&{!iy=odKx@Sb%Had08KR&Wj_DcnY-Ln6J{ zU&Y3S)quKI%!H~eWa{a%n7GOsD1?vr@3`DxLB5U{`N6y&P)g_Je z(c0O_B+W>oV<3})xnW`X(ZtqN@ifPAvSt%xVzS1_12OUcr+b#|Ssx~>9>r90{ z-+nc?jkvK8GW*;yA>{?neT3(OZuxnutz$SdKQ8qbnu}-@oOFDNd+cWo~C# z3UoJv(U@g#`d1Zb;Y?==ZKV(bf7S|dSz@h_!5_~E79^v@g6jwLWh8h(52!-&-6otOr@yO5L|EfyUDT>jt z7E{`cEq9r`;0@&|Ux!`@=fRHLSjjzp=G0slEIGbgY$(ggQkNA6o;b4`8IAYA&+A6L zLCR7XB%exGAXokP*UD@Ya^(+|oK*-sB*~6mb!p1l()7jvJqyr;I}`y8a2MGz5GV)m z>%29dZVzztJ(y?OJ0K8k?||kI_&eyCD68Me*hc^3%SL)MWCZIFA!YXRB?5)AjwU0c zS272H9#v|Aa*bNPnVxhe>1P@^ub9(!<;V{B!c7s8IUar(g|R{xFt0eUema>N0))?#5R|q|DYpBO}_jYhr~x_oW+H#DXreh8HWlq#MRB!p}CjW3KyY&m1y=^3Lfr>_A&_!lI4dy_r|#dp{z6gzq`) z+4#KRd%oi=nlV`*-sd~ILYS25#UOUx$1el}hzo3eR~`|MpRlkKVG8Gcq_6S5jS0`3 zmQW}JXN+s|{;kgYNLR)3IYyrOl&e&$w;c)|I8(v(HjZ1x%iphS-THPRk?HoWW5uB) z%V_Hy&wj1e;g}zgPuE+&1{IIC&Y|bmY8|tk?Tbb0BeY}Iz<-IkCX5XMQAPw zN}YDD{;vz@XlmQ!75_}xpS6uBo=%#rcBk7*?N5I&eZ$0>LH?q3sHcnmln(Ms{xm(v z=e{OzCZ-1YygyA2@&$jI8035Xi8D6#`BQ(8@AoIJSUKQN-9diPpE`ql(Vs|1KIBiW zL4L%anuGkPKM^fC=1)XQj{6fxi)72lsK6S8&MZ$t=AZ4=U~~78-};cJq>4DO8N;Es zw=l2&vE$~}BDa^D%Oum*kifza8`L|Crbo992}j`V#msYOa+2{Igw|A0EJ=S+z6ku* z`GLzHOo2pJ0+)R7#eLB6c8RLH{n&mkSF{z=69ZI!*=?h_O66Oq%bNj@Riw*k)9c*I1qLI}T+{2;Tm$;~Lr$ zaj@+bE9&|vR&DSlhkDSBASPMMIvKJm7UU>y?%NRyu$mcdz+zCV}`b*%DAk# z>yKXnaI25*bTK+|1TcWZ=$_{$#1*4s4Pe$grQRUe(8uoUQnn=DZUt^WZf5n!$7y-r z`Mhg5V2c4WNH;~mqDC`k1Qo^_q(Xs#gLJL{$z5jwr1Oy_7t|5XYR1=_x_l0)J-`KH zNu7K3N#^f@KCzHF-P!Vn=k&?yBBbH`#Jr3 z1vac}A)7pC(IgEkF~sn)kA0i*cC-a*gFO(UOrS8bKY$_QbE}>Xa0%ogF6FG|vOYQj z&yMi0JwYVK!l}XfhM218EML@R?}PWm#dabN5ksHSg>4Orx%6#RDoSrmFcPeXE+!AV zSO?PNBMYwSaX@i`hWLxnV z8<*BBO)@x|+D>ei00XG)Zc(B;1|p&;JfTyN zGhj@=Y`1(f_*(tbg%)>$2wLb49`cn@R?L8wsp+rVDzXTLfc1@Rg~z+5HIfdyq){?A zc%KHWlkg=Jo3xBHKm1l$CiWXQnJVCyn9FfMe5ZBfNiNOI(nACy4s3BE{A)jil*{9<+u8L)Mu((s5>V zBsB|-tfHlH@9e1l*z%jIb~fsK$kIBq<-M^KgcLhoKh-WQRHe=_>RhqF$PtSTEQA%D zCHV_b1)}pmZD^ZPgT#x7zck4Z63|5xx{j9*?c+Ai#`2~ieN)d`v!-9`mhHw$c}a8t zq5sv`xna0zUmX-+s&~yUvS!j~pvGwvUq!wyoh##kZA)Py%P64ZawC0_RjtyzlF61D zU(Xn0Y40d~0(Z!kh|Dce4%??+?^^ygbcKkE6kCtd%fzxNkIFX`iomq95Ia@rUgj>5 zFfeMC$5oAM4T~1UyNHXstMg`TuWJ5P5fooYx}*1R7+l=yCupTZ7&Id?zX>#FI_vMZBujRQhfn%eCU~d=p^7ZM{gsBvC5T``>KPkU!@(jF^c)rol8kFo< zF9ThaK&gRPiNsMl4_G%Q+lO{5G!xR}d@!*@ln-iSS00YhfpOj|sl4&E(K;puqN7{B zr!~&Q#Uy9Ak6*?d2z;(iwu}tCG5NH@_$l&)iqmE>f62J>_@rN!h zeBwxk5zP)!MI3g~APq^Ys07RxFK*TgR7kF)C$o|@{tDI*Rr2x!@*>CgT|;Wkh0oENN`ID;5z|Oh+)45VNzl|Bk%WU|su|qGvR9wIe#+M=#D%MSBY>zeX z+{?l=>IRssKylC-7`m+00&gLq&`-E=Uj3FFrKbG7!@sul{d))>=YQDPjqC2gH#XH` zlDBtH-C(EhwY>E0Z>o?qJOkX~zZh zAYEpHL0+3xa`u%Bz%XZqE0v=3|1-fsVpBK&n4Oyj+w6wz;eP6y-V?%gz?k+BnSldh zg>VpNf6F9hJkGsBoDB!s-Q9}1-A01MkXuy7jkX_2r?OAwPB7zdlz}EBkf4Xg0DVxi zAEv1OOc>T%W|CxlC;2Pyq%jK`TXFf6OjwbIh^!)rs_N<2nn6KnBNH=Y0+f#3k`f4b zvkIZ=w%04u^JC>H7`Y2=DO=xYJ*crf-VLE?_2Ykqxx2F_k|-p1X0cWPY_yfUTMt<; z!hxIk#HkSRvEJkeG@#uAQ1y+;tmK{9N4M@$!XqzLS5XaYV_ry1e(bS)QHSkHovgis zJJ5-Yn5yIhZH?@#H@RI+GFofsE;XdJ86A#k=xw|-TYygpY-LmTs43F}wxOGOZ>1Mu z*DcZgD>ab3B=6XUxo_QSXWP#_mVbN%>PLAwXNzAGpDR0?i`1Lso#979mgG2u?CTIu z&@fgRlflEy=BpK_rJqUMwQZp$6kz%*>~OKulZ`jEj{Nb6bDPoFne!M8K<*|^AUL3v z85D2Vn4lk1GBwSMRYr%w{28N%%+8lgV@9DQ3EXvjNDcFW>0juz^^8p;lBgW~Y!ca? zL=XDWIUun+#UaDNQ;4(tPY`CU{MAZDZatJgKDU&Si;M)r*c9swTLl*)s|0$!^d|?* z8%0hx(f{z*onXTPu5$g&NKy7RCWem5n=T+{(c%Zn7*4b`!zqD=UQK%^ zSr$U!vW6M4jezB6(H`wX`0L%`%)FzmqY=k$o_e` zaIM68x)8{0oPlSHOHpp##WW5=3K1rO&#LS&;DeZU-&cr*_t2y3qIbJPp3csU4whPxe%O$!_l##E}Biu&%zldyoEyN_#ZOo zj4h@U&#R?~BBZ+M2W(ZI-)@Ih7GV&1D_}dfi$o#jSP0F4C_1pPYC|1Jmr2|r@(LV& zpyf=WC91qW{AFi)CVIvTy$cR$Z(4EDV?h3YFJWnVprPGW?+WtA?t9 zQOX-gX+Vj?)Xu0)!$s}PYOfleY6GCxZcsbJ7ru43e6u#QvO~7M38HJ^=Fii&n#@Eo zi85FgWbtm`8g_P*r0z3LNsI4_cv!&uOEd0nqN`=I^lQnkh+x$#l)|c3ekyBJi(^DTAA#t1%HqMgZe}G41Sn-0S zwB1A@L#GNz?^KGUb?>C<&3CJTNl70-6%7?Dt;#twn#u#epLZX3QtBYi+f!NZshJ+OIYL#YP}6uers=E!X_KX3JWHt^Ou4vjrL@v~h6&DSigC%_ z(tub)QGGX+SYt?N>nSV{v+hVkgwOjy%E-TE=o&2m86rWRd1}Wmocj82e(vadKC@T| zxY)Utt|=q>lAZ-m2j|9?OKpNjZrg8xZ4W0f;_bEymcFcz)REw(5o zmu1Ur+wphOllG4EZI(2@20`j%6;tYpZ(%(CRI><&%t24SFBZ#`t#Jy_i8F8)6{y5s z|F0eGfm3ExBL;7X{MSZo84Ec>OcZ7Q+|H;&=*uMLM|#rc`L;GnOBJLq;F)#jpCwgU zBi~y#vbAhvR>gg9n)bok_B45UB-GO{l#Li1Y;xfgC|ChxKoRZycTjX~SDP>nM-oAp zan5QW{avM4je|FV9k2~@C zWJacw)Dz^|N%wcseKKtc{!WCGgzU}+ixV^DG8ru#xvrqftAENEY>;gBY61;}8Tb>) zTT749v@nV>kDvRalZZC))23bdd-k)x^(VgabgBell)wfKUw|Dpq+5{QYX|wep(KvN zyh90Prl&C$@;QQ`Iy2_Cpt8-4lTh34smZw4OQKGfB#Ed?=!-7SHlKm1|s%{I{I zK@3k9)xeyPq@})PIHaGbT1Y0=g%;QY=esQBIQ-;ewhPr7Qbqt3S^I|+19AfRzMyz@ z)lfWf0}ZkJEPh>dWMxx}YJxGcCC-{;i=3J~rY6}E=UEGu1(6=9nv5Wm6)>Hmuc7Evw!+wMAAye(C@D zF%HpLNhYE@7I+^ln+vpJQGRbtfqmMzpw<-h&o9#2jgSkZpGv3@y>gJI|Giqm9?JU!dW+Slw8z(18q|%(s zkxIeuiv~HVyPfny2$Ri?6HU23MKv?|3<8S%w0CffO5VAHOS={^UTr28Nq0=33kGXt z5Aodsd5!e*QOq=NZd_{N3@@jh%b#1=xOBYKvRo2J&*$Z19YxPwc!p&d7Lz;2)-BOk z9AZB~`6If~IABo=F8;WmLX+Bu)E&(xBDq&m$Z1iC?UPq_tf1}@8o&j^0u}w6 zZ{3q;CM(EA>(z+mvUahK4o$>@cfKT+W4%#gIo^X<)cpy%uM^92#ShE;0OMJ z_GYO!LM$h+{7xfTL@ej{6dEq-JM+|I>@Vuwi&pb=FA$53n{s>mCEDlo2r~LqS>QQe z=#QekKaN;bkajpWrI@~b-p)R?b3i{Ipq&x4w)gB7x2k((Ckvf+@dKvCfl-^+rGa;bl^_~A4^Kn)X>Y&d0aj!leA z_YsbnUV=w8-Dd)?p3`^OdjjCj>fKqZxx}UylMQCB$$&)L>gNmg^LhSlr@v(Qyf~5D zO-Kc{e}SE;D+#*LktY#x0lUZahzT)C$0sEDPH@v}oma1h-RG)ymb{%MwR1}CoT8l? zb`@bXLVKaAbK2{iR-H4dbA~#9*QhiK=njIP6M5Kf2WwAeQ$NHoCUt0@wRfRvZ`s>h zR(t2w-g(+vVSJDS;4%Xun@7h5G+gwm7gcpms&9_F3ce{&&_W{i0uY_g_u?ccdPa}3 zanlL!#YvrLQW`Rf)A@l#nlK$EBv=ki?jneWcfgJw-KYMT{{?-ztY1{Ag~?s%FPCNr z`x&htR+BvZ$Z&F}+?iG?|0IW>7X>w_o=F7N|BRFgg3HkJqG0z9&gl`6A_lmipD(}w zBj{Gdn)KWlCsAV9W}l*jrDsk^Oe}~_y}66t+(k7vFPfa6*p=T@O`ZegP#mnP?b9Rn zjVS$qsvV$OfoUUi&VSL;jKH2r?8h;09+82t*B5&xHWV-o*xw5iti-;y>?qq|sn9e_ zh^AmuXjO^GlGoqUDuQNcb9-6AAV(zv!?frTWhQBOgy;MS(r^T8GzbIr#rvh~!xUj# zk+vZ99j6I6H9kGr7m5c$um0i<=nKUirjYe@cQP_Bz7QdNFIRw^c0f)GkQW5V3jneL z-^HvS)c76#eR>4nnx+G4^#HS8;JZCA7`l+GqZ~AKd_!9!e_DvYlV7OMsV*eoo-H)U zSw>sK`K0#bI>0Yh0H1S!&k5iQ0{Fs8%wdVl#s_+emZJ-F3J;VXv#Kl7Z_9~li4=6u z>_s)o=OsHOggx@KdkJXT>1Ro4AZ3wNfpg`>l`sYf_X;77n>jtg>%z@~eqJbWQ>9}F*4b91EnI+!9k{($W$wGNfy5Tu2lo8_4vV+j#ALHH7yz9zSPk=YXD?hm$ zqpcQnX`Ur5+4Ntky1L+9T~Jp9#aIi`RW*;mQ8wh8esEBnQ5`;CQ27_|Y_!v#x4l4` z)Xe}@&^uAeVo1w+L^-OTbEmm1TOtEz=9KObNj=EG*Lz3~HsZNQk4EpEN3PL+XINIo>kOD!)#?&)yuZ-e`q& zsI@I#kcE2Gem%u?!Z*tq&6_{BH{ZlXtZ4G)2D2nm$1}7{1!Ytn(I;`kLuvrcO9G$g zquck}8vn1pCma><+Qy%Cpj>ceZO(OerN3gXqs@);9pSi|8wps(Lh8HSUm~g|8hXK6 zLbIsl7ipxNC`P}Tjx2Re#5Z5f)J0VSVI5F6rNc_zQoY5wHBmVoS5;ly%L>_?mz7^5x5&T}mA>|CY&Ni=@)@ z5=+7$hjB`RhGR{MMJUA+c~3lmetNO?bK}+fv7q!R&~yK9kwttYpQdBIwSNNU0aDFw8Bs>dUNc2 zW@;tN=AqCUw`mS-PJrTIRc)UhA^8^74yf7zsujWjHNpZLgQccKpD2~6;#j(43{2OR zC~NGM(_D!3i(oE*drps#xm*A*3ZXCJ0{FoDMm;LxqF}at32W(6F&mfIH*u^mS&%>y zp=KJ*nTw^HU#wg#^YWz7P`;4t(qxt{mV?C)$M}J9*IX<|>u&{(Or zPxbau?{lhGb9G&#-T~D+K)v4${mJJ|PgCymUem}wh|0~`or|+0x&Pt8L&eT)(teaZ zU^s+p@K`h3q%4r&myso@Ks_U6d%7aoc=#19RAz%6Wkqz0z|v{;*=LNNNaB%lxxdrj zYbsKg5Q0+|gLiw{$G*h+B#ak}|Es_1x{+_tsp4$IYqsrAlU;5ZQlD*aOAS8xdRm|G zl!DwtP3nT};b4G+^ir4K2p`WyGgg9Wr0;kdt^1#vx{pR7)C*V!8g*jFixE?z-z(fs zqA197B;eeE{2=bJUpoQ#3L6F3Yk9ngtkeCo0cm02vywAoe2 z#1P12y7`6eXwjbZ{kRN>VRiZKs%U zXSp1$!|B&ZNag%r|8XZjB8q^W;7;rCXVVvzu1Y9LacJx1K1$eR6K={{znXqs1#q;N z1s%8(Agzi@(SkmryUI%LxBv4Ef1P=EJ`wi z?S1VJCMKt+(ls}%UH6Jtf;tEUVDrYrt9Vw*vJ^43IT5vyM-#8q+i2xmf+1xgexHje zKw%;rjr?(asea})@d}92j>4c_83D-OVe6m8nv<4qDEpp0pem0Qh!|g1wxr53lX=2s z-O&bzh&eJ~C{xUvPdY`@>uIqmuXza~^3E+47>d!`8786&!C+Zp5={y@H1d`l+E=K^ zAsu*MuUOII?V)i#)OdmSmVB*9b#GwRk1XU@bUC!s3(?#Pf*a7UB_C7y5@ACfEMVwH zPXc!VR^FED;#zGnq1V?I9-M2nCF&S$Y!o8RGLQlovhrGuJ#pQQvFu&d7`%tb=XG;D z#vp=#sxSeVDgq4Qc&)a^z%{-tq|>$98i%p*jUi~S)z~m513*y3#NI618v?{=IdAnN|hHIvRO<=NWz;cVC90* zoE{V>ugG2bKS%Ro1)MCmfnWy#Fa{1k=u$SBL7w%5EtLM<2Kmq_V0W+qKLP$M3PunV_0O4J zETG5C0@6|h>Uk%YPve=V)t7XsF4M(Fs|oN-+BAn zt1N^3E0-I)?I-B^1LP*zvl8^$J6^MJcqo&r|ImX5w6%Bn^7NC#<_;EI`qRA!M>T2_ z3x=#TKxprz9h&B(ZmdZ)k8kLO-UG~irf#zm%Ba2Q>CTZw2h`VracSYDnbK0=*8Qm4w|`5%iq zFTH%~{6*XpC3$xKrv`tVLdTNlFOCrBFOHDraVO05D;olBI${3O7_xNy*t8{29yW<# zB>8xaBrlJW#3v!$zAz>^*(g3qO_pMHiB)xD!8R=@0oSVDUF*^KK zohX0pTY)GM7rQH>q=zv?`8`jTB$-G(zNd3_k&F+S=RQc%KO)@s*WnxCwe=5x^uGIVoeZ@JvA zmgKDTQ2RF~l);<4xv{S-)9&=Twn8=Of@M8#jVzQab5+9&f-XzHrFCUj2OxQCL}G*= zTQWaOHGX||bglk>M+n%Op2AmlTjNaROLT#v%&39j#;VCWXe+~d`lMFKvsF(SJ)x$o zr{`39tBjoIip9AVzqKm9mEs&7^wGb=G4r}~mIDE5k2p`LyPe!vPLk>zwp~mn4d%M8 zlk&L>ktdG#A0|%@p^b;`nH{fry3i$abC1o7QAo+X7;)I4)X){mXhisWb@X9Bi~dzk zTtu0;yC2S@cS$3%1K~lc;u~n?uh}Ku9_eME6*=UI?PMC`nb$=!@$em#ldzq!;uG>w zk=;6M`LJ})IsRj5Uc^vyxBk7@@o~NC;e5`=6+N?KcBIEJE(xM)WaCQJM)oR=jQxrD zXPQ1T1S|Ujo|kv3*1Kmrok@P z%7_R#f#UcmOFS?m<3<--Bl5DYV{h3U8+&kUNGpfMV~*AIZ?fp$?AZPzS&Wk3a5!&0 zjMFBI=XVk_?IzpJFwF2>Mp}|be!s=rt#B6a+<@+nwRs?7^*wJC?J*m^nn!LhxV^M) z-S>v`PPRS%eiCG5lur<@lLke{jjQm1hq${dLP2e=UEc6=e; z7UNDjpOZ*9v5H(oi5J6iL8>in+X%?nRhs#xkwNP^k#z2W|i4p zQt1Z>C@QPoDlW-28s~V6NiFhcy1OMl#&dp>e88nc^PD@JZh5s+kc>XD2VT1CY~ZiI zKGFcZCr$x+5pmPE=x7Lsy!WGmWU+)q{-L9S1@ChH3okAhEMYJXz;Q2&Bix~uM`uAb z7*oFd1gi`D%_B9!nD>`srpA8N(0EU(4{F$W;~kt!b3B0y6)xeFNXf#v>_PT9^SyEB zo}nGIS7I>0J?F4MTg>FH{5f-sPw&A%+4>+2{4qv4M)8~2(aVG@_@~xoT~ng@Fou4T zPh-0xiB$b}fr@gw^W{%+<&|NA```AltgtIneE6af0%i05ZC(Hv5@ zRPTUFQe&Ujz@HqD09vHs=aQ0Ys6m~_X(`DMzQ>kpKThwgr$niE5bW9?Yc)*bLA7Sc zpofEGN#sv|$d`FxB4iT-M3_4w6JhS0*r{{`ur!0lGhzAyf_LZX}}V`@LgN z+Hx|CHD{{w>D`=nNOKnmu>4#1(vY?Z;O9Bdf(y@CZ?f@P*{|5>2Rd>4F}KhQmi#^j z%Hq>lQcUGdi6sNuy#nwrh$Xux5z7@^7ojQDIU zXFlt|6N7T##hD^I4Q)JU2B;5}L#KvF%7(pyGdtS|RKc0Jr8ykZIg`bj@g|etya|04 zs66J-*eI3~coQMHD?cw1;~Ms{yopy=o3Qx(&HMvrMq)~<_a%lPU6@~VrUVH!riA1R zDuFm9TY$pkjg23SjAu$wy_hn{kwxLb#t%lslzT^*Qht{>e;num9)mGira5OS^cgr)T5c8{ zR_DwM#+gSt37qNcm3{2%Gj;jjC)z~hym_9gbVu}6v>7;68Efano4sEdc6T)X*MiH$ zhZIIX1~#UOG-nPhqrBP6djD5ZD~!?PYv4@__RbgM98s5kg{t`!8c!Bh=Ecj6|K+-y z8{yLKj+=G*9K_nn%BDL{YFIxstnq7@y^8NjoRr)*mS2OoGVarW#IJ`mFot_oeuenM zlv6`|HjZDpUK_v=bOj>1^AJ(%jiy5puhcS;7x>gryd(yX;8pl|SH2e)WB2ftm^CQU z?vClV(?@e*rdw*z_rd6OVjVm$h{0Or$yr)3p8dOId z)Ky}|sq}z$dkLHx22$fx%(oHUzlu}o}FI*Z<`b1K_1(4i#v zv>t0JBE4AfDYELHMl39e$iuOPHmw=BP|ivNmvv*wuB!}O%8xg2$s|iZST}LQXy-~I zH6tTv;^&x?5uWYrVAm>E?voXvvMzjTerGX3*KBTtXFEDB(iE$>w9=_#cvL1Fqq;he z8ehh_@u-O~Y8a0)yuUw>u5ljS@%jaXYIjHUG!wRxF%6HSjCl~oWT02jq8)K$Z^!LV z;|OT7hJps%=Iw#!e!QgLm&w0nb!4Qr!sAJ>kxd()T6 zwe2g|yEilF#%0clSmxM9~r-m%HD=XeG08^NT6KX7} zhVZe%Q2wrMF@RNycd0R-wV{cuC+OMhYxh7ne8Er;*gid?eqVsgdspQ0p6gwBxVsY+ z!R>c>kOk1H@J)kgKS77fz2d6ZjH9DZuDJqK&FlbgPC#93MlCxBK6WjNBAuz_d?wJI%P!Y9|Byc@5=)XbW^- znMI9^Qve!z$$?U~FH+{_T_ns9EoaT8=ca8%$)*o$Z~76ijyJHaJ5l+1wD|gEj+$n^ z`I;YNTX!dYGSU6<9Q2p8$@;M7?Y!H#NEk^11jy zT`)?@JU_6>h<{t@SNL&DG)J$P0Jv=huQrpY%A-2bEkJAAqXc3UK*Z&a_t9e43-a*n z$FEKLBpaH)Nsn8i1Ab;Y&SEubyJ#5pL5f;Jo~kTb6O>IxoFEEDXLNM z#$=J}>g7$s-)}pf5!e30g7ZfrWsT{D1IveFV~NU!Rc+kC*-zRq2aM@cF8@yYEM1yI zKAf;__ijh&w`>5XqhVx_aK2U;0H8m|AnqnLh!8>@8)W?z*svYbXKiTgn-17GH!>`3 zhY}HJ@APbZHey9fF0oZPKZ1>gxQ-2~QSip(LLD1(aWQy1WP@qH5;pc%M1>(Hk~%2) z%_oPNgN@G$8;d+Lnf9-87z=D1itE_00RZ|UNgsEUH`DR`5htx0geq4HEnWtZ8sW)U zM15RcE>T23YJ$=^@USIX8W|+MZ|ZDr{CbRQ{-WqD#+9tZJ$YOlMz0BQ54lEsN`c&u8t?68qzb6GKX#G1!A_K?R=Tm+HlHG z*!IvHljUSM&8E>Ln^vM7J6+}5GLkJiAV=HaILE&(3bd7BKUmOBx13Q{aLT+Mf&N9@ z8Vlk1Qj$Nq2Olb5!#?zRlq5Xf+!e&eL8Q}Yf6DjY=}jGg=Q!oT!bFy)u`RhPw=;xn zAB9nalD+}V(quK)n(3oPKArT50V}BjzUQEw%@pAG(>BdMn%VeF%-t`~2Vtm$c`Y2R?K#6fZkRwgWVWxUHH**G2xh^G9-*5`7%q+w z44oLkD673B82lik1hcPvVMEenNM!NBfbPnbk97}fF5US?OR>i&W^}~n0$m|%n?t9k z(IW=eE3DN=U1?L8D>n26^m~_}u?#Tg#`1Ulh0!yiF$Yfp775ILib`V(*U@IBQS+k! z(Miv+X-?!@VDgvh|PZ4vxdhzF=YiF zO&jD(Tw|);q-)t>IdcQP7!rW3$0h!CPP;KVb0v(JKZS`TMh#gv_+ePb1CUWyZ)$2${# zY*(OP$+~u+W4mX)TL`_N$r}&(D(}rOW zv7kMc1LOOi_+GTgJwOoxKs&_|0DGYhT{5yDDnT;N>pN(j(|1mH;NSDsIcG)$0kFS4 z?AfS#qC<@;XZM(PBnhy6Fq`dEf-0WXJ21GU(;9h{4dwK5UcXS{9L%6Dj1At~)BI&O zJo;!hJ;%fJ+;9yWfwl2j^D1HpsJG2OIv@l2Tw?o3YlK0}PH{8Mb}U^o>So_k;HNo? z+c3<^@yWRaF;|mgW<@bMW`Y%ygW^=qXER&Fo^}nzt*&{^71ugS%bTv+675~pi?%OO zj2%y*Fm@|Ix#_ikb@6Tux8Rbz{ON4XyJw~irL%1HTLrSSeJ2o7upN;O;RUlW4)~)2 z{@4ic$48)%eUhIru-R@H3fF-@xw?1__!AC1(+c=MGLtINhA(<*ghOq(#Kh1a1gn-1 z@Fvi*uE=}kDr16|w#o>0UnShaSW5|LeIV~eeLB5_e=GiH>r4BJ0)Iz?h0-{ozhrzH zq(0&`Al0(cO$Pf5CwM2nMe&lKm!eZoT$~@}`pb&^1mJ9Zgm)GBm8#~N2&lkVIoTTb zv9?x;AHO4B<7eAn=!fk@N|ESFB`L3!k5Vxe3aa?j3Y+jNXR9@$B#C?Mxxj{FX5=Yu zq&iVV>l{|K41T!o@GwQqkt30Dm9Q2~*viNCmCs}PTxV7rR?-Q5L4&%u_T~TNJT!Z(*6fK|v&XGj4t}xYN_7y+lC{dgFPP(MaC75Kms5fu zlO$iZ-%i$kyQtqLg?H_F+bg`VVT9G-oE{mLQc4!o?n1A20F+-7#MlfK+7PP~pvt$_ z*uknX5)#zdJ~gI2>zHgwet-sT$Ea6(<77|sA8>mn;FhJ5HTEJk0%Yg($ZjZYdu9u? zsE0r-8A0rR{&;ahRnexFn#5(5Q$yxWjHV2{=5*%Uo=Ag74wuumky*1oY_Cs7 zd{Sm`lNZ-b4}#e((QH+6lg@)PP;W0Gc1*D4*+bjdXiUA%6*uG+Jq9L)Of-ajtaM>mfRw@l z(*3Y2niep$ZBT@NfO7CujOsQwMq=&@F$@lj67z>bJ-ez73ZdQB5QE-R6w{yVRs48d zu0w9X(RMPJo{~xdDL8LQ4FzsGiRi<7VJNWeg-+n71QAHnZY7Rxv$VvK31YXIO%TqG z+6>XCaCug>y&*gnY6x!yOCB{uvRR$5fkO>7(|<1kqm)K-RwO!muEZxqbp*eJ?PsL< zc?KPtzv#{5m%yOnb&ktoC7naAMHlsXq#+y?MMDdEWHJ@i#+i}GO_Bf9@4Uy-rmAW_ z=yMF(1fmE)uo+F_@fbn#BG<$f^L&t5KBQ*pmDOp{E3P^9%F4_fag3T?>wM4(Xgx(4QRfcde>)0?aes`Wdy( zx|KDpV z=VCYtbctz+^%;=(xAkNZ`$au1T3f7*(*x2zmdSgJcR{!fsS95jhi*%bs0Oj5I)C1n z;DK@V83rctiI)bCftYli7G&n_N1IS}BAgFmD&$#S;DRYR?Nm~cz_YkNa7!E5L>>y< z;^x*%oHHD&p{Fx==*N)&{_WKyaK372*&A9`L+8{Gj+qrCP_RLPmFParPXiG}NpGCZ(9_f)`l}7P?c*C+U_tN)Dn) zG?lhIfuG6)Q4mfPM~nIKC&oOe&jq$dFA5+g=!75uz-RRxbn5JgCB2hdf$3uTXuOC) z3!U?BjG=K#6pnjb|62nF! z=Rpmvssq$4%Zr9CdP5i05UcbgpT}sbqm?Q*CZ}4%b;@b5slv`}x$3gf?PUQ}F;pR8 z_qD4r>}B!3Uv1O%p{nZxdIaw>+gQwiHM(9YVZ%5=pT*2N^rnHd0@BRxr*cW`q4WH- z!FHlNU=rHnXfVSJGQhBJ(mc z0?~RS7uCp|3*^M5hyx{hK%sinB_#CRZ2cu1%j*% zfIm{T$H~KnWwm#R=lswp2&@sU%F)U=Uv%SqQO5ZZp7SFZ=HIKs$O6wg$8rh~hfr7V z<&b1J{|k@wj}&sod`0P?s3e7eXY$-Mg%EUWeG zLa!5?bk$FPvxDOh)*v9#CmT%A^oPw2?9vcGed$HQF#T|$S#!g%ZJB2}f{Ig@0m4N( z8w)sTtXmdir-xe2Id^QLBLkY0Q8MRrvJ){AZS&)hhM4tLYJrZQHoC_xiTMD+PS zr7tw@ueGUTyI3VKRyBgTLEA9q%7KbUjx)9>y;lbL@jm>icN#CBn;XB>PydB}IT?Q8 zBIZ8+JfI--Xg*?amHtb)AmBD(i3Raf&=yB{^f~rf7}?)95Do?$QVBu#&{8Cl6%$meakVjV zKZRDb+JV(_a>e!Je=Bn3N>d}(=p~TrKxZC8u4nrk?v#$FOK7rVG%4B0u%|dg&8U(c zGl4b>VIul6VZs3|POMhNNj8yyIn#%)@q9>_NQ}r`XekGV$o)Tzd zpEVMh-T|yhEH}AfW-81IAq34zzdkJDUgCP@q%R-S z4SN`dm`j&R%a7T1{wlDg{%ul-M>-E#afLW!oiZuM`$(O3m=xr1T6`Uyl;eE~XJ5y4 zvsQgZ2nBi{nh{{}#?caVSOP?N((Lb^@+5HHdV$Zgxb6k6+LH<+}cZ+v(!j%sQ*drmT}1<2A4`P^Buh2c5QbB;g)A|ya*K~NR(oFvYDAzftSc$kLXD0Y0%SuX8b1@SkTWAgo7xS? zr9Fx@hF7dFGoxmgM!RK?av6%-V-dRy>vrg3gkxB|y%_9JtXd3AQA=UZ(w@9Pe#0*Y zoY;Nk5LervEl@k&2DM)*8}vvt)&^Bj<~I;HvOyUu_`n2JlQKbtJRX_NW3q<%ag6yX zw2v`A5jGkDS0La|(+s#;A28wew^HlPpw^q?)VhPP$_iyAVM`mcLiw^bkrldDR_Ge& z4zva<6jf1Kp_9OYA!d`S+?C0i71|F<60PG(68#?0RL#nf6{~ieKW0N-GqV>Sh~yP+A`cm|Ed)|6rn* zI2g%WvqG7k!U`p8!-g}~3bpw`>Pw1LW2)7-+L+7=Jz`cUspsP5muiJxVRF}0R_I!{ zcUIFS6pU#NO%5l_d>hDoGfT{DQB5UMltmsyScnox&56~j$Rw?nxmq@xcqyz<`#RKZ z?xiey-mFmK^$*MWD#9+cWxbnDR%_q;th3bwpqp)>R1y(AxlP;Zi3v@TY(kkj#geXIreJ%HVJ4Pz{79-tV5Yk5 zS?@>ORCeH2eT@CjC&))k0*>P2O{{6b&znS;mOiLQs zvLxF*4*_E$A?1f-X=Q`evsf|(S-dGiIa^dM`ztE=C%N{AjDP;IzjE*pY!Ja1WicR( z2orE3ZB`}<5TXPJ1~8xyC5T{x00EQ4AtG@=1QV1*3BS+x+s}1^ z7<>xn9bs1z<1rmx-X`p%MvmpUS+7S(cd$6T2si`J9fk~h#q|&j*e?A^U(;s|bu$bb ze1<(5Q_b@U32n&D)+k;-*60^l(J&&nOrF;5y|O##K-B_lM&XfqKYBTaM#ZAwE5Jgz zMraq&p2v4aY%RHCWwqL?vxuFVoYP0M=)X;0dl$pZuw()3bUIWmsWAQ0+RnXk zI3%<@*jtq7a_n*ti1r@(b$n`(@pV5LgC7#^G8N@yYs;`~gk**?o3~DRPe22f{5OtB zJDGm%%*@Oybn9SUL$iqeNN{WE3sjFgqX7HG? zX{yGy6pi6lBcB-aqmkTjjT-(=^MDRpq}u0JtF?_5s(pd0b{nqSl)z29Bkv#QGJ?Y& zD7>%)J@?_kA@utMJ>#G^|3Z}i?MGjerti75^Rq5-veNg}m9Ix#itt@q%~o-oZ}NvH z;OZrZ`C9q`MT`(`B+7B0zFYoixK`h!l(bTg1tR83WdY(0VFj08g5P(} zkLd`^Chiq3!43O0$u`<-Kxsf)XFhmfQT}zv1m8VVDsDahI*0GtNgts)xym6reBBN} z^3|yVeTyvjF{Pp8x>`~hhbOvUqY_sH4#eAVsZo(hpAL+kdhrWO@~0EjPPPnN#dtJNf)z6}-3CyQRG)CAz({N+_KI~2Jd2#g~= zMG6dtO~mxm7SR-DrGz0o{Ip4nztw#rDfVQz?nFN0UBRm+s4t5Tueb&%rJU^4 zGV5b%4+km!KE~gMb-Gf~8DEhnr@_;Y#Y+%H)GKjn>R>ffpq=j&o@;0!GjzY3QF)@1 zy^#~^`O2*kN#pA|z9WZqrPv`AYvVVd--<=UVc7m+Me;p9J=|k@^=M6J27m+!w+;h> z14l2W^A@IaUYlfv21Im|%{z$3;adk2Pbg8}#XI#v(`;;zMzxRwd zKGDoqi_PhgT9YhsSz(QF<5>RXgs-{i#e}MkO@A!eHuwZ`n^OpoG;m45-HTzU4|D7S5w)~=@3 zF1L~HKEJMKX~28*_eH+fJ?+c@!QG_T^Ht2~wkbNo2}GIq@cknhr-its3GD&#T$$8X>rS1;K&@qeJAXAy_40wD5`u%RtT*wi;yP%1#~F$Wgk zrp$?h2+{X+hba{*VOHrOa|uTU?qxq6M2elyV8zG@5{i*JM8$Bmvn)Q)=t3-DxRoQF zYq2ak>x#~*D2|M@NYN_T&I@U@GLD)=fc;=u3E{_TAHPSOB2>u{Dlwm%FqUR}bp7Y} zXO9ulu7;Cq7*5?XoN|BLqt}S_`Fpg8w#!jS43Q%`M|!ZL{bfaaT}69U5pITz1{GDi zrDsPHC${Mr10eId1!_^E!nmg-X^Ayl zElRHC+r7D{;iaxs)o_y<9b{WQR(MdNWwbgD$c}^Ka7F=@^3@-ttXEJNnb@FcKv;AI zHtgnG$tGBg)pNY7nQvkfM_>~_5K*R08vG*qGM%6bFQK}RdiGp9UWp?DspPL zGxNipIbjbLbmq8fJkGn}vGjnPEde?0068r{a0iEjdB&$i{cu%faLDSNZ`IrZ0Zu%O zw@F6850qIrCGp0#s-~s>fHl3%=OdFBEb3WdP?L28a3nT9W^MWdvGeOVRW_E9)C9*V zj7Ok<`e{}plUk?3ZhLb!Ho{-)2pKHZ54 z`RYi68t*D}f0@9jWJH@6R>$SkLB>znmKAR0>;<8Aa5G%jxIV$wzs#jPLsCK0ujk`C z1&ap~1`I@5a`P^B@X03Huxz4cm<}05VPrqe5K6in$2n|g&i>Obm!vBu(PbTJ36b+| zkC;tVv@VJPS=v*-HHer;qUT9zLOkA?KhuFxLCJz1RIuUEK)H=oG-PC5mJU$VxDXAO z>l4thnXZZUc9WkbpEO4;exdNvX^)ev^>8sKskXI?q^o7RabzCT>sgD~#*qC;tBJX@2sH`_l9` zFYSEWkG1-PQpsR5eL0mGtYoog2t2935cv`&gK~wxsN21ejl4sF*y+weY*k~XtbjIb z6`FO-{q*OcB6pr#AQ8v$Y4kmTjgH8fZ^lS4gChbn^epFHgElHV958k@fs*3zBM=) zIN*q(vQw=<0s6!rKO~HCOH+nbfeqtgi2_YzmXKbp-CY|qjiA$RIj|+6 zt_N->N~P1eC7j78@~g3mX&Hh(_@ovthwl6H&%gIiPXFWOo0E*Em7l8yf8vVEXygaK z33)q<7$lp%`64FnJ@_|y{J*_Mo2LK5_wAGoJ42>V1cb{p2@%#6TbjNFg&+FVu_iWm z`w{(R63YkyrOCn>>j)ijhK=5ArpF{=F5l*7-{J%89G8{>>Hy_9U-SKSXF3zl&7#hk z*ctlfg5^aTw`LF2Al8Ag4?s}6r!=~C*&D=gFhiJ~g%_Mm_EPuaC5*Ne1?kV#y|hRM z;epM`4OSUl2(}4SOYTdX%zk3$G_YChZ8G)z5@+gAs21ag~+=!aFB>QHe^sF)lL=QLqs`zd;2=QwmRE%0hDf?+Z2 zDsc#(Y~=6Ii7L7dnhS}sF1M;gF0lueG>0=9H~p0W@_46IkfIvVh{l3vhWY)B3=*($ z4PGGi0wti(7yEmRzs>ZsVndjzkJrTQSh2(ROnMGWHqcwLOr<@17>{uj6SA@RENa?pVYGW9GK3Kkb@ zGg>ihCI9wE?@sgO=m2hW)o2{Mqn*^{79|j4>_^0*DM|~MMfA0nANhm3(!860{zD|J zAI-n@IexX%udeOt2s~16u&=#}NOSNoYc>BLigRHd)?&#v z(r*_rlGH8(^o^U7SMe6rF8}a52V;(9vrq{dgb=MkA_N!=S-C1b+F*0hW-`)+*MyLNB zXZegIRDw>iK8nOl;_FDHMZGdr{vg`DgG=-uZlM8OpJ>H1#LPBgSZhszf?_R-1cI&Y zLIR3dgEKW@;#t<%5HUNAmN9WMVG^l4VZ!)DB7YhxP*!_>Kas-4>a{h(Yfvhkh6bFx z5RHxc)gV(*f~{uMfW{Uk%&*7p)CyKtn-Eq(oW6&g25t`p=EDwS#L7vhXf{IjQ};+* z6~C|-ff43?Ub}54E$})voimI)TvW>m5vpba0Mx>T$ zHq7*Al}i4chDBUOD~;atk$HHBlEV`(50XByX*+8%_7dmns7HL^_S-x)E?Pg`W@kd@ zehOboK>{n{#LdK1tgOMdUac|`w7pI<$p*xjn7SFIeiWv91FDepo`T6VM?nqLa{taM&6 zx32!II4&Zs#)k#{VB$@IHmH0KRn|Zw+Ma7DNS#74MRlI4q+A-dWu6g0lIc7LPR%fY zX^bLPM5NA7^VRiT=vhy3bVN|(*$V+8w`9J8;9ZTBxeEAa7X@wp5 z2?Tx9Eb@iTCq;v~X)Bj}={;>0HEL;2&+y;r%U^4|`ID@GMxayLJUAdYXvczheWN;i zfX+V`9o{f!R2C+B=Y_?1G}cA&Hn1fjgn12j`+l?UPJFTTkE4Cfh-NvYv?l$&WdDZk z7qUtzn-q)SZ9<)TEB@jzNI#$yddE|`A#4Zi+o_?C`&E0g7V3(tb$P67$C~(MmD?Zem-+0(4e~VipjKX~v5X3%*75&aAYw&~~D>;m8t}+1_|uej$sd{&<)WBesdBQ3v+meSdINNamFWFofacRTphgus~NYoi>|J{&2BeDEBnMIzSaMj=;M5Qul=|p?rx6J8TlT` zv``fZuu~If{|!N7so(T_>0_YME1&HiLXnI2w(``C_AUx)R!scD+p>b3sV?oV6!~ z2LXrm=RMZhs3V!%4W|Iy;(D6@;PTyo!rvWaVR7(p2AYPfU=1yv<@lMKFrsrG&R4Fw zWtG=wqsDrFMws=Ka3kfBs58KHM8Gy>L!$3emAtg5L_V#KoL)f@3CTqz$n}hU*AMfA z4YMtRI)*`1q2a}tYFu2IgBn>&iIQ!+mE2l8G#VkT?kp~Sv{B}9+-K~>b*SrRyy>DcU`FMYl?m<5r3h;T6*0)ajI zzy9`gXGhaqVi2q2)?&4-0a}f*g&MA|jnR-+&m4lZC?zg)%07KtKYAWZUv2xE&W2$B zLLeV5{H)p-wCw7bW@&z1=UY9_FE)NQkENnbV~o?*2)|aLDC!fHx|T;7^1t;4#X^SziI2svgw%YVj@{b zOP;|t3oZLPIg`r0hZC`>&gzm!5O;FGU|WIZlD!K3d@4UV`0Qt`8FYkp{-;J}Cb;m6 zsgtYEUqI7)1b2gtwHKF09$!DJ#Y^}E@LuNHxRFYAcrp!2fR%}?EwPoqU!k7Qw};FD zt{z#})`)+T5cjJ1W>@2@_ysco12=K!aPbPOn8|e&MAOb6vx=FX>BFI75}O4Kg!*P` zu0oh_sPXoq#yj<}tz_D4A?w3EOpdnFj;CKJ>y(&gShZx*jr1EW-w*g>c!g=9Z4O9n z))Ba_?iQ zTe7yCqgVLV1mwZ&+e+%i-%UY5f&}fs6w{JLb zvc_Wyaz9y1PY5;I%_ZnekG@*#*Eo8Td+7&k0UHkPME}&FrJHdTr_2a4{oIbp|_ITL#heOF`*2Szn<@=GhDa^c^c)UQ53j3^n!*HedS&YokVscp*KVwK?YIaPadlWthAAAq><2V$gz#n1<#nnh-65LoI<}f`g$C1 zOSaL(#|>Lw2dJ+D?S|+Rv87NEbs8-xeSA94E4?Ju24x?z?vg2wtEhr}X}r}Oh>yBH zvm%a`Qvuy z-xmw5Gj;%nS$mBoYcZEHY7ziTvpjH|nF_7;PjzRi>`7#G*vV;HCL4NVAo3L|8L-#Q5QlZQ*-ez*iNlZQOYAQ3CJttf>hfk#_F z%QalDSx^GR8=T5R2=B7eC(A}3c8zkX0i&<@URBNZhtp*OUZar4wx^6$C66=tj%>D+#H5Daos3-uTnLv|sPJ z2X2DeK(J)V=?mcTsnvA(KaF?1tmugR#G3h|>VEL$YT`kIG*?hiOaK;R-j+_rA}Hmq zF$@S`7HF0;OOx3wt)Sj{SM0W9WDwQtmb4}cVk(;LQ44M}Uxx;wMV7fQCj$7ZoC^$a z^V+qQc9p?81*q#|s7#IqO|*w{O~pWfIa8XF=*&uUF*C+h@s->;QE_%m7pEtgC5>R4-Qz3_F^gOdA(QI6}Cs+YC|o5H1I7n$T?};U{x} zrn+fQ{g%>p2e{U9*FdAuU^OI1WLYQuay!73c>df<|GaSRck(MXkn7+i*Y0Lt1+CS0 zQ(+3N^e;Fe+=KZdtDo2ep}1jYbE)s?S3(nbl4~}H3SL%JAnlxf-fR^>$eIAAo%0v1 zy{rLl!n86_dhm4*$(2P#Q?8o-^l7 z#|B6B3XbF$1)zt@LhuO{x?hECsX;=|2@$x512qJzSq$=h59w6QOq^h}!T}l^Yp>`x zttZqf^8f>gc|dLKv5GfDxYHvz+{!ygT{Eu188rw;YuIr?4QWj2S3?KkDT6I)syoZ8 z8dLQ!>`)^ta)fOXoku95gkA6nrUXl`(=fH=vva5|9NFzF3BF#eOnQDUcI{2T(x3ZZKS<+V$I5nE{^NT^2Q~b~l>2!~4JU{a^drU;e}|-eJcZ4L+D3{sMP- zZ;YH=VYX_eUzFVqVLF>+q~s=Fb*f?;QLgamZ;YT-2siaLYcIWQwRK;!BKn}zOS5`M zhw9UpQUfQ{p0M5FmyIO_-<8y`Scbx!#PYwrzx`DFQ1;`N`$s-$*kYY zr&n=pb4|I9aBXuPRpG3^!d`Z=751{rRWEyHx+HOzbOKh@drPH5!_%9w3VGTc+8-75(&p@P{7XV?B`m^GS2a;(Alpq1Hnxy}8gf8FH>TZT2 zu86M`(Ybv(L!O~B?IZai`@_5M*l|m5gX)))TlDQwZrP;$uCADKwr`ETZ;$d-w{4C7 z-rc6$e$U;uxqWPOdV=P^J-U;O-r(sfqgVEOwnqO`N+dORuZj-Yt<>7+o&G5V{a$}N zaaDAnJH3Kx-r3!muT(WZC3GLrR{g@cdbm5ykmiT^Hr2OR>f58?TiOt|-9$;|y3_E9 zd@g+ZECbJMOvk&^+KWEUtG257KkOBxfF9PJX0GKYcx4>nC(BnSyVJ;C`N{BVQ(57B zcNz-L=R<`X%TlMh)7sTP6-qs`eD!E|dKs@C4X-wo6`t-+U&gD`p~CfLsk7Z_+1Ag7 zQa@3an(0kn#H*Q}@7FWRQg`>Jh zulDz*$9c6sym}h1@`F7$Y=%lV6q*9Fz3Gg%vwUkj>#6z{8y()0Fsk1?`n^wn`q1k? zJj+qg(ezh7cl@sVzWnvqynH%JzpcYIkMzRK(=}P6sWy|mCFW;OT z*DJ0^{dEq8B_;wdfr&aMQgHnHf&CIiX5< zwios%ZkJrLYW2lTG%<`ClZ1Me`2$X?Y&xz@dGGQ@9$dUJ*M!@Y5Z0?Wy*^bk=di*d zOKy#RMOXG0-=!pY-<`9!q^l~AAH9b-ukice)|t@#*#Yp`_$0y(`t+D(B9S2C+*g}#E<#gRA-D~ ztl|?8EmtRuTAF`xeY4{AxP=mQ&O1?hj5v^15S_5t5$=&Fil)Hx87pOYru5cm#wIE~ zsl(&DXi0HyXBe6p5Kh20If<8<%Pj9=f-D5|eM9t6l0Km4GYMfH(wOEFD1hsOiMik> z*YzfWj|XR*VapnpEwqm!yO(6SddF!4QNCUwCe$qzH==SqmM$%Sl6+I9l*7=(CaGxl z6;o-9Z=FYYto>%lbe^`ZNcJR#WY>AlJH zU%ctPkG$b)_kHaiO`1^LtYiAJO>vdc-W)$QiptaP8=p<4Kk<&Q{*O=p?w5|8h$_W1 z8RhlmDvDDEyt%GuQmW@^mo{B>5@HTjK06c_)FqwUu@uqk@UhV`5J&3=5lk7d$TP z5Bhp;3In4Z^a~vrbZ1}9tIC1V{FOj1C(bI-Gb(7T6~uGUM2UDg6)#HZ+0uw-Qrj>% z>c=VOJh6V{0e@=0`GXb4&vPt zzr@?M+_S_*q^7jxlkAnYe12S4md}sr%Cb49E6e6bb!FN7h_0Me_WQbyZ;c+*mBU9r ztZR2`^n1GYwno3JE2%b)=*luoS`8_gl7sQm*RAAWGVkbCa_}GFB=`Vxh$BJrRT3xH zcnDAG8y5Jl=*j~BWnF>rOS%Ff$ATDyU(^){|5R5X{DQ7P_^_@(_<3D{@K1CF!q4dn zgq&ez5I$m8gYd81tswk{yA_0g=^;EGAe3q;9CFZzLHJ!=f$;Bi1wxLLH3&JO(IEV` zu0Z%LU4ig#bp^slbp^t|(G>{)T2~`Q!C_NoSu{bpV1%00T$l{e`MvhUUva(JVz zbmffG5_v-^1ey^@H{2pCt z_*ZqM;aOd2_}#kF@UQ4f!|&3ShWG1A!|&9UhVRprhTowp4Rffbp@xGX18RQL-3m4D zbGJgx10F)oVR3Ybd}^copuPdZ59CA`jui`UVI;uPYG# ziLOBSIbDH}vtA9t&*}<x7+tswlWhwx~CaHWUvQGEl1f1@i9{()7Abdnu zAp8qmf$-0D1;Rhm6$s~b#rV?hFTW<@oEn=pXYFd>{=nVZY;d=Rv-R@Ze{biCi=d+O}YJhciZOnd%olIbUYJz<7aP%l)P=n)4KcmE%MUJ6S;EY zqH4CPI9BEs`xWo#jV9YN4lqW&&t;iOB#_%##><_A=q)b<u$j$AP50>=4k2u7Sw(qjTPS)q z+oDcck`Z4ADm5wdFe+?AP|IgR+faqA$Q+=R9{^nRkYxcJEdnV}vg?VS?edtKIIKmS z>@BKAoK%s@f`3je;*?ut(P<^&^t|Ld-&<6TI9;r+r?k4NP=3@^AmtdPIaF}Ex46`M zyr|)*YEUWLou;hRY&4lM55mmY;u7zCQNsz>J^h@#Do*nQRCNoN%KVf)+0zZ_JD|Xh5JL~`nam&jVqoGk9dsS%MuFn+Te#Zk zvlCVUpqri0Ay2=gRjZfW3wii*(eP*$X?ujU*?a!sU7!BDFW>n^uy}Cz!R+vZeWQz6 zTm;5pY=spI02WMOko8t`$q=Cjnm=#+yBqgH#zQ)XzRpLC5VJclGA<`Ihj3SzVq;*Y1X~8nxT0^;WJgt_|~6 z$FMpZv#l-xMdearWw=p8rdP4Pz8xag`ua9qSzq6(D=Qq~uC~s;MOW6@|E??R>@Vwz zguy0%*kKR#N{=LH_XREJ^;%&@2>{ZvWLf{K^4QrVm{jdCI z6f%Z&#In*}dePd|Yc9EX)ku5U@)av>k8H!l<)hE+T+PS;ajb<&JZUO6!SLQz$fBr4B!E3Ts>iagkueZJOI@d12{%ipVsB5()0f(yb z=VyA)5m4r(+uq@RlwY!Wr`hAde|8DJs$qMsc&Xpl4E@$#SXp@MVBxKmmn!`4hJLfn zQxw*FZKITxT{T!4dmWXRKUKcm(X$_}cdCZ%jLMh95i=Av9(u|CMdjtQU>I$gYo%D~ zx}(RV>i6tn6yK{n8yCgz9lf6%{Ftqa;-x-jzk=TZ?;FdPJ9_rRf#rX=5wRcIxrFRe z3s%{vTlV`L1?uLNRLy=z(bSIKGxa0XW=c@0I>Hw-<%lb@NJ< zxLD-L8(p{KxyT+9iGKA&S3QBw9I@>N8n#$?u!&U~`rEXlw_ev213Z^Hdz@Ts13(X9 z(t2o%fU=gh*cj{C`b8=Xw#F#kWrxL5M>0Hx{~?SGTE*5 zb=#vvrgqwEPa#Zesfb#zof1B>v~t@3MVZ}DYA)0E2DN~YTY%$ zm(Qqv<-D0_71tAe)#5Aqtv##&fZI>7w+4W9e*gdqu(QQW$3aB)Y-jNU7nBbOjqG57 z8r=NI=Pw-UZb$E_4i82eTJ_a0Cj;PvWu5gqI`~fl6zTW11$6mkPrG!{>efA>>ej4& zD#naOpnJ*_s_x>8)}g#EQeFQERkvdKQa88+GZ8Z`wC8|0ygTX z;J3>P7<+P8^R!p37AsSDTKN{sj(bXyUgW0_iLzx&Q9n@5S7w|eazIeYe4&S|{u9MR zh0q(D5+M+lxx&^PSO@+b!EpsfqX&+q$mfHpG8sLdkVIpvWgC!pyyT4S+-s6aV$_tW z$4R4(sHgY#TP{Gbrn*A=#tx+4C3Dmj$@RFDaM?CFrhN{V)Mf0zN7BcXR|+3M0cKy^ zz!%h3mp5iM_R;iv`hd-Z;)9p!Oo5^?iB~m=w+v%wt4*J2i=JwSg&fP^KQv`w{_cIs zdxIt8y>G|@fZXeh*9Hhq>b-0{{l4{Mi+j0gl7KCv4bHl|%Seh*9Xnvr&$MP&4Hlz1 zcEArDqv@%KAym0(Vx?be)mROT;b4_3Y)~V&=pi}a)1(!`FM8M(_+^t0PKG2cSDBu; zpgjG=3}qha9bld|VSEkq6OR&P%dES-pNM$0d8dD{b4D_*$s4b(pf2*MdJaut)+aeEIFTL=^&GH zkt4_$ZyYO>s^JENO37MU0HD*Ls{rg*<_iE|W5xh@)B`ZD-v^0I5CHQ90Eb9EVgMYp z7=FS-qR!+fQUR8l!i74LIZA8FDD>Uwf_aCM@v&aV);_-6(IXF_2B2n`XJI7xz1awW z*4rtYgFDLiQl@DvjyD>2-_W@sjU4#)->}+)_Ndo*xi2=!z2X;4T^xqE6wBN z^yDTWQ`F*C(U*D9z_=9`50rAE`7C**`hG$0Bmh<`vN3kgwR4a)D#Q zNkr00B`#~yaNCV1$Dv7vz%_e2kraV)kp`JOJ^VhIb8`AjpU#M)f9pH$;7(%dd`8u- zA{Y{G>}Y3D$18hKWvI25jo_ARq-Y1|x#U;q)3PtsE&37Sw-fec9zwW9&O|1~WEUgT z>Ir+PG-V0RcqSp1UvNrN$Zt+w0f<CVb1i+(aP zftfp+!&ZKv^VW;wo(m%`PO1jA--$0{NF1Bv`Z+xz@!sQF(OG7f7c1D(7A>_o}V?`B@`7-44yLYi*Z*aw~23sgv*Ll zO^v3IsyS7|PBk3v8L(^pZ*s6Uxy!a$@*(RYym>=3&8gZvUgsVou+T%p1PL|exE|Y) z+FF_XDiQL@*+-4`hye`#h1wfE&bWP<@ZYrd`1Mi-h2JehdTTo|%ZkOw5Q-Er!-680K|`q7-0Jca zTBOO0#^fMj7}2ni;-8ZnHbggT^TEz=xCu9QiNBN7r>KexsOs-RRmcSD%}tssn39}0 zbKVercR@8Is-(9a11at>T{{eyK*K&N(&Y}4b7_Sj7@AH?M6M#MsL>yj#6NAU$-ose zN<0xJcy%r14qTHTL&)qI1waKhuA>^|c=6-S-3U4uhK7&x%)0_bL&yCXL&vcNNF^PY;!wdy@Tt%<`=GLN-g5gts`X#;zT!Yz=rs& z)+fz{eEh{18g^pjPyx`Wpd%pwlI5awrJ@>>!9xWL=sZvZLB(07f_n@3lQG*qK~9*tasrz{8R=jIlt|nTm_CV1|^OAx*YvhBFc@wM`8N zp$HldiB!uDFcx+O;}D)ejhis`aYj&-bQp#L1uY9h0W0Mk758&+D7J}pFOxxrv1nJu zffWxAwL0)rD#k8DV^yFz%~J$xB%~DIl+tOQ4)ReCdCfsSItVK5>GJ`AJC1fT=80&; z0fvU@hj?c{MO=G6QdZ_G8`Uv^=ttDWNVGI^6t&Uu!xF>x92I8a{PHokTp&g`t?*m} zW&%+$U<0h&PaZ#LHhsh~x>IlrrjJt2+ri(9Paww?dg`z{SlmYKrZ8h%c^Y(DfCC((HQA5$XvhGwQEGWx_It4|Os>rfRA($W&XF=#to+8Oz4Ko)n2KM{Ti#5i)txw0Y@Nrc$_ zYru{;gur;oDlA+cHXD}oiJlhrKd06HTjRrJnB zNB~?PsEBB9N7e)ZP|?HHiq;KOwANSDQXD_ErQal|CBU=ZXpukbC1sYdY#FFR(YYdP z!Sb2!kkXkWEJ25PPzm__&ab^a4Z!DTzZ!n1j72QFyp0W|GEhu19n^$ZnzcF7GFTKi zDL>*P_5Qd;nP*(C=OcWPI3eJr+_fwVAMxXMtU0QlQv6!-A4Q|r^4GoN4KBTpBGTn6 z?i0%4o6pIqH)TX%&(zB7SC+)ug;bLXpw0}i0!Sq=h*x1&;BtFENxu;Xq0U$Vwde>w zd5-bH9;~d&xwcohV3u1*Ie$?v(|7QbRNZ`RY26(7ACyW9;AlrB{1`+AI&g^J${Rn) zRXIx#Fd9k%Mf|IE*$dYZ?^3iR0ehOna-&k~m- zvB0yk@dFIN;abi_%*G{~U>92!F~}AMHj@4=8oI&7hIiIg25}5)R#oxT@`}sICMJ)i zIqTW*rtjiR4fjyX0Rc&3qL#$6Qr>b6x6Zmi9s1&N#?T@&7UD7;e@e_Rrfnz)HpGcAsm zK%k~nh57x!#n9P{e18CvBm_F2BA(?c8RVd7TUHoDs_Owi+23#|LhXMX%2Ea^8#LlC zp?FaiA`U6@$e|eyDHy#2#~`IPN!CU2HZ#{U5gtEjwu1R4;FV&_^}OrzX0IiI0|p-L zv)fiR7C<46)!XC@?quh}Y8Y8dD))0h1+jV*1{Ug0W6q>iJE6vV- zk6qT43lQwU9OPRS>{uGW91lZQN`7Ofk*GHyJd8?sKuE3a#%Q=e>VrTzk<$b}GcESK z;Kz@}1-}*wn8f(LN^FXWYsE!gFHP2T~BB=fQzU_eBrw$Fq6u~M>alBhd1scK`$HT_k;p%T$2 z4mbeF7&eG&=G$v7wG?o{exYTrp7mXdU7i#QnjJS_+?G@H@NQDHz}ot&#lUn{0I*?% zu_Yyx$#@LQG-k*~2^uEfUg=p9x7V!M`BQN;J;Q(LosH?JBe7ci6TrUGazx{gb$0`) zzi#6}uD9l!jI}UFWkyC~<}Q2Q8rWcSa3dj3AG3qpwN6wt&j6%a)H0I(zt)oyx^>z~ zu!x2U|B&Ct^fMaqy9x-3>>vV_`kpoAcwswN(lGoFs0+KJ|6TABDa-N+_72tpjK<9P zW_B>KT09vR0eFfx*9fcRP%@aq@&QI)Aco2s1a3Xd{KlwbC_oz%l%_i?2VE_Tl#3>j zfgQC6K>>#l_yxOiI+YwFpO~L`s9F7FhK=>`rU{_=h>p48q_T<2peT_rK5 zjvFd{NV?B<;&nO;Qz+~?al5u(M^pj=ZEK=38eNHzg_LWOZWB-Me9AYqMZSsLv)V3+ z$uSHVK%RgQ1C&1&VpwdX?*@zA4;d$#q5m5SF;5&T$-hk@=6y9BJP_mtr!7)FlT>ca zTu6(vaPBsXxr^8U(-@e$<-BD@W?5VK1+%$Pesuw}`y(D+($LHLXEwbWB9Y8%f8Sy$ z!z!dC`pjnq(y%xoNdPO0AWNY3U$n5h2UwuRHA;i#Zk2#A8}S4ZX2t))Hi zOm5e%k;K7XJtqZlV{3lQN)K<%8g7{v;7Ba$zb(%O8n#hj0TS$tGjcvLHaT|=M1KDq zh-BijuvQSsgGCXk8Q#(w3?kVR27mmD3ruQ@#9>k!9p}R256FD-qeW!^eKAx5%P=Y* zJO?VzoCB4tz8cVyCV8+ZDz!|T)pbOxJBVb$6^N9<$05=!f+G1vNfNu%Vraa_Lk|ZS zq{zk~Map)uI7O26tfWXgp$4kN&J0~5fy#Fp_$8gF!pcbF!DPzSAYp!Kyw(y&I-IyE zj^Nt{D_rW%FcY!gADofUR}^Z%eDtof(7pDt%;e8&56^Br_QOqAF+6 zSVzThzUxI3X68#juEV&g66IwEhZsw*3|F^0_R;{F73M#ejg|Xt5W!WLz$v{Kn_?~f z2)xgX(8g0_U;j8fOFNuXu3|nnJGxx2>==7hiAdN7^JaB-ZJFJ1&KSMf+E%5<3>^<) zAs?*T5F?mnR+r+#fj$hex=N~AYOvFGpeENzLaKR!52II?g}uaQ1~<}S_$^A?lS&_@ zRmZy5!2;4NiY|G+IN+1BWJ5nWwjTIv!WnEG*|J0?U1xjP2F_KU-!Z8b1RWAeL|wJG zg*7ZXMsufyNao!sVH$(uysm1~og|w{^=9V-_p|)6Tl35QpFFyLjrHs?eHacDI>`YE zzXKIq6%*ss$VvQhR#ygyzh`*)34Tt2ZFWzzGW17b{(@R_&RH?*<*`r(>EN7^Ia#J^ z?UT)L^#>V|4KAIttm5piVD%kDJTaj>Sp@NB{HDhHRin`zQo@o#ja?veLUOnjk&iAy{lYNybPAfBV^=7Kear%_Xk*ig`60gYP?VQd-1QHppt?Ueu2*o79 z?MIWu`RzanqAI}C`ojLP`xj8n^bZS*NQRgUnSNg-cJ%1z@;aNpB=7uZgz;@o3tQzP7wu^YSu1N#%tt@nPn8v zuWJe+totdXbufb_80v*H2`)+vnn@T)yKdsxZc8zfqKM+!s90QLZk@}O{n7`I*g9=I#MAoqHHmh)Wcwbh}a zyVchBan1ZAv?Vk=MoymT{^ppaT|L4v0~mT%%PaWeF>0_M`yFg{sK!y1B_Vx)j=7Q_ z{gbk(=~J!E?4$D$4L!s)Am>5<>=9jQ{CjGgMqk3uIV!N05IexT`kt>l+fnafvqe+! zP8HBoID5E#w7lhHnDCVKFWho!On!d_!hD_whbs^C;jD)S9%oRIw$q`6Yra2#2dkv* z@C$B9Pp*!>uPamI@41RTND65^grSWqP_Ms?>sdJF=bK0zauyO*Q!VMQAFRM3=HY?X zw1DMM4V%-tQvIEE_xmuvEtr$v)*QdB3;OPZ0l0hh)iPs&Jf)>5ztUk8+}}k}BI$nc zgYnJrDHY?{9;^Q9+Us;>>lNH59C zKAzuCJ!$&dglP9%X3wEV#G&aC3F!E{UJSuaAYx;9;Cy`&s3#l0B3?@d7i;2yEYb!@ z_PRUC5M-(c-zp*ws^p-*-|M(T3uL2k^O@wxg6X7va)SJ8NEk2A?|7wv65*=9#w@L_~VKnHw2*xUUOb+kM%I286tJ;&mZKDeUsA=kCshNKzMpY~^pw?`!ukCp zaIbyto^;%dp2nHH;OT=3_F}cB1wQUiIO5a60!5=U`u1*8d5`Ba{GB+Et~RO~!RIVCBcK+&Qp>LsMZt&A!i49qCP$Qht?9xT3`t7Omp5+n zM6xCu<8Tv&(C?mHkw+ZNj5y2{eRpysVXhHzYC$`8W(H0^t5B*(ATsfeW;UkP3<}5O zH%5&L@MWG5Fc;PV5qf^m9=Mgl9*}abSW9dw7`IPIt~ai=m%-wL(kCOZ6-lgr0*(#r9zI3wY0 z^b8txy-9zcZfvX_Q>_}oPf#66@5ZGMoX`~k68V`ri-fO0^H+-T$!NZ;U?HxH|d*_c@t@9?>&! za6;F}c;!j^%*F+hp4W$`XjaWXWX+#w?o2;sy?vCGj1khS!+HfqoBb&Lc*ier(5T*V z=;?dxILAe%1qruSxOMPu^o=TRNNDA7liy*u@!&lut~6&<`XEkT4<33)ienSWNEzdY z#GFJil`)><=qEqMN3E2bnSI{dvi)hb+x_0^r!|0tHM?*v$l#G`|K-U>QfF}8U8q_~z zAKvXh6w>#Q*Dvh$aqcFxxgmNCb}4pfgG^ByWL-7LeFpZVek^0%v)E|_0R48;5y5_* zU!oc@!pC0^nDgge59B6St_T{CuqNQbMx(N)q@T3aVQ_7DF&>!(0Owd6^x5NbetYn}nRlI5e~x(fvj@B@>yW>>v|4%CT|;P{2Wq z1WvX}4)R@VZis`-fsx|?qs|`FmA*gXf6wdcIml_dcbFjT(uiwDbVGk1(>upO&e&&x zgPhfecQSj#3g)f($6A4d?9n_zdF9nHy_$;eY%j?{R2iJ|z4ofHVgdIw%D>98jtm(t_+a@$GiLuURt} zHFV>6z9D*F&@Uw&yEuy+uWE zK}PIzHY+Rc(prs&cgccxQY&3KhQw@LWx4VlOm661HWtdUTnxX%jT9-mFvo{4CEbhG zIEDvn1S1cl<0|Um?{Q~^PslXP;Z$6r$>l*-4FhXfjxEQE4c~HX+nN|F4!DE`(DPB* zv_#ksu~(zzwQU%L^q@ow?K2=W^|9~h_^E8qbBHeR1Y!FlvPLrAu2w;O)J&;XpRZb2 zVp~>&V^yPdGjOhdr>sA7rLi?X%@aeC*;|jG&&{iCN}koVFkT86%Uk95Ddw%FGhk+p zR5Or11~E0<5`n6?Z8<-?^RXj(ZE&7dkiB{b#x-pj#$qPFydax%-C(hTZ(gRN3k zx^R|htKJpeuc9&KXnT{jjZDT%@yxs7Rt_CPd5n2c`xa7K-2Ox@{R}&PXeI=msEia9 zpje;Pq!4?{sbYal?U3cPCe_WcCORiYj-PdVvn;?3ew12v%1T>26#*a~# zu;!ZQEx@oY4FIhneOUqYjeLQprKPo^Mg|h+5P6hd$!WQaJJ4+kE+LS>>$8wM2S_{1SK^SSv0=a zg0+}YvR7Lz{xq^dJ_G50 zrmqmLkw-6ZS)+9MzrZt{&u}IAhsBKI}e<$ATZYmhL0Fo4W8w6`CiXyQ#Kfwd!R9B2 zl#a4flpU#j{G@k8F~jHG5eAG*4TU3;8yC4B(G~d~RlCA>u^ShK;12YsQsP|wK_1cO zett(s5&)Bk^$aY+*^DJc`C~SRkvN~N{5w|~nMc~j7p`&7Qof@&ZSxuaBhWU=EikyXmv$S&TT4Dbh9=41^3oT&k!vHjbbb2R@LgbQNq{xnfGRP9zo^nzl=V1C1x z(*HYy%VYY7v{^zW7a#~!(>5)6zb(w`@+vusP;k;6Ml~`Xia{ZKIf>NeSY(^Zkf zuo_xMs7N84dW_xeA}JGt%&Yy=JxdCRyRouDIo8F8MX_#g6+=NlL9N`CaEDOgakfst9y0CjRZo5s&d5HeXbHhpOu@11m97yd{%V< zryPcugmkyU?pz>AM)qN`2qTTUwu&K%W=SHAon2)Fg#3BV!mqQWonYi8BKolx`I(`m z>hyb9r8d@5rtT$zsXl2-tAs3DMU9Kt20DUQ-4--?7Aj9J;$7TVj(P5o?-Q+eZoiux zP7%N55H%A^WHMgXPpm}=aMB=Lo~((TyhMi>1ZN%KFqgFTVBlnr0}kmt&l?<*S75Ef z;D}AAmiud#HV%88#1F)Dg}@VhfG$uVSt!>iX}t5(Oh#pNRbf1@aO7)xC(J)}unIdvPh%@mx|J)I=Hv zWH05LGqKLe&B*P_cECLRryvl+loXFE6Stji|K4eWn72{wdF@!9qLgrqeMUL_{n zhM*}ub#gQldQ_-nxWnRN!lQ|9H7`8YN?oZG9kE<>M#2ACer;yC0GqEkTO~j`>lM97s=w zB}YqZW}`AFIntd2977K z#W>!CPUng!Y(}f0}uCw-D=coAfqS`;Wx%9)`Spvf+7WB4$@;rLz%OVn~6m+8ZTHCQw(JmZ~N?}<+ zAav?1lX&6a`#}#xP!7HgI)iVzE`#58gD>6Ki9PbXcL}zdx`C)gj z`&fQUfeGi4p<#cKdbmou4Xb~vI_AZ~t3-h!IkRI9Ks6x+Mftvpe!EOh{tPkp&OfBaiiYK2VKp2BOLF{s33Pl^90235b zaLFJ8c~rTAn4%`1Mulhs)bIsd;Z+r`VC3<{xWcPM9&P1-Gp+z+RT3#+8fbq2GS~vp zR!L+@wjdNVgaQ|891y_dcKtQSl#GBTG-@%vY8Mezo%ZQD0{{okpu%tl6*~uK2xM`_ z$YR{#(`JAESoKjy%mnR-t`qU5t+f&Kf4KwKxOWnwH_Dc}K~HR%cju=aNa2^IV@Mod zRQ^<>h}|YHXfinf2(X7Th6UM%6J*IYGpxl10W`TV0hA0*3U250FQGiSO)izI=jEoYcljBafqhv{f3BQj-J{N%I6OqYWN zpwh=NU0!I9lt}qPdL(8oRXE6?z$fCOLkne4z8f@O4u1UPcLqE81$9Pjg`F5^p^)4r0r zIw7-N2w8(dCOWxe-RR^bf)%ljdLh$Dw(jK05yu7H+KW4i`#ETfGqKSmF~zPSnicCq zwUL3^U5nL8h19@0%v49zFi{PEB%}t?urhW{b;8nBdO4Yp1_!ad*IX+k#S&s9MVDt+ zM^O<)b6cPN9=6n6S-dq=Dw(AJK|YX#%4Wf0lHwBcZQ+n8b89&y$u`)&285mRjz+~L zjc^9K0|-SX1{u=^S!&;`BsR@7BKEnH|7=8FB;84iJ|MG5XhiJi;E0$+XVz*AG^IpG z3(8e%Eeurc=IapPPJA`sIcyXm&S*+}!4d^u%$X1ldfU=)q90?43=$NLB+d^Aaq2umvNHwuuu;IzCD)bG z+<6b2vTKzCNAH`(;91YbMB1fnG9;c=Nge7Ia}mW;$gvn3y;p?^RF0mha)YC10+-Hn zXYia)&5{$UsNOYM?ZJ{0O8sc6A1+a9kg=E+4(QZ?hCLQCRyn1Rv3%NHH>ZFx%qbOS zPN~>A=9H7M)aeDIhS(*NUi2OES7$)tVpo9Ws48)3KWo4m+xj6(T-KnuBAiR&f{OvD z%)L*N#C7QLB`&v{{$q*DZx;Vp;xcvv^W+~?At#0V~Oj0v==WfpNGU%CG{Vk#8qu&K%o1v z#I-=;G9%*;(Z+Ql5|`p$>#{JJa7J84#Wc24+q!rz*p4rX&2mEUt6rjdgTLGu+d=aG z9CL;p=M`6OY}R!om*wTveybK$Ime+ln2v5HC3JPS!)aEg!uZ50YmQ36iH_gq`hZ?P zn#EWZ&Dj&bhnU%b&!_o4aDb{d1n@O>t}uO$t-<9J_spKl$}0;VTJeooMheduOyJrT zXB%0W%h8;i`vZHowl@-_0RjH6*au;AN87=qW0g%}8Cu8~!np0*DwNVYuugb|QV9x{ zNgH0VyT9Dj`&8<^j;4@$>^H*XtJycbSXLXUlM$ZqruDZoE9N%CFg&76^o4zWVrtfn z8DT8akQ-6~mciTY8}lC#7A+LWW0Bn{hsBXWEXomMba*RlF`!)KzWD*r9Jp0%pi$bk zh4v&+bm$>lLj&7kk+9%#K!OVyIocXG)W~u>>_+r{^+;W0Sj+7&(&rXNZ!7D!!(=xt zw!;=Ra+tGfz<1>F-OM77Tl0|cxUG;ER&fO#;!Pu$4^bHkVmTKUlGO6Y+gDxg_fnTE~`$wsxk*4Cbh3>1o@?PH)orI)d9pmQ=Gh*Em8sz_zE zA}+{`?JwWq(E(yw?#wL^6L@sr6vVU)E5>q9Ojx5FG3iu#5@m-eDZMtBV@6DwuTqke z5M+#W;R@FF09>5k20T?H;0-P`Ku_rHZr3d9$mXZ+kNWOYLfyyw@&9CJAwg5n{4BTt&$5Bn`}QymczIW zG98&fQkg*P@Bw2c22fq>);b)J5UPYK0|wLi$S%3?-a1X zcYwD_ElctpA;R|C;5%&W1zQlD$Mtx4zpcu72KU>D(JEPw3j6)GbFdyqA(%ysm4$T* z1PncrQf}BfuETF-RnZI(zrk^jfRB;RZkHa1+?JM!Puo4tcTb@tR6+$N54dTD zOV0NC9r>~My(1O(Vrk)!Mb$6o#rDn)lju2SVv;!EjAFr2O!GQb*vY$AhOm><)nG&m zb*aOpw1a}1RERdg60tR}_W5!ishpc*K z;=K_i-eY4*iT8Nn0u0HjqaO+|B-o5?4lv=u8gu7bIgp6#A+o=Tz*MWt(MNJ4g)Ty5 zE`b;u5*khmM?-HcBHUP=W=^;8$72h2j-8pX*Q2XGCSH*ZLLvmCnA(0(pJLJ85Iv=5 zQ7IO5M!UpnQM*G0KD#_QTn3SqN%>su*oAQrU=0j`hXfE>q@z+-Vm0?aL_dVDAtM6$ zDl+#|4p9Q?9!S_X@^}gRR%ub73ZF=F6(>=E6%UFhY8f{IGzzpNwii&`o%Od5ir5mM zlVC4qNl{-d;ILu3Jfl~V#Nndp(Aky@# zYCzkdpV)u$%jGOfy#>k;Wov4@Fy|X^6#Qf%V&9Mp+0EkE;~9 zIP_D)qh(;=BRN}GY_!e(emoS7=u&eI1tgjji4N)Z!-+3ZhZK`&kZoXvVDbyV%Q?pD zwn;=G5El!j)rqd$gNsh|gJE{rL}%89sTJmSQ|%N#tn<_rvtOp9vDR|q<}lNX$n0~| zTnN^-nQq)}6;5!2AyX;wn#2EXM+#VEjYp{K6B4`Nag&IbTJLbk-$soQb}2%`JJx z^`bSm+(JkZcPn@@qgTsygHn+#)6I;%OxN5($^p2-s=yUmVlPZ+&$4XOHL#u*?8WFg zUqQMZ(J;JbPLl}9guioy8yU7hu*9XfR z<%&001BuqJ(JFGBN6BS3f0$5&^L@{U!V-Z2}rQ z*AX}fyFWEp8_T44HRc(x@uN-!E9^IdF=HSMH=2Z1{5v zJWV;`evQ1@8*9(?iDgrsa`2D-tWTc>r&7%uqHSmtj9inh4ZYXTE9sQA5ohlFH{#Z* zn$HU$YCucYATk;#b+S6b5U4gg50fV&mZJlxl+9pNOIGCa>SPsdKj8*RhXEc5dMv!!Wjk*AYhbFtK z*o-MiaVJ$fwfy4z^KYI>)B7&%yub}<<}@ZtpRcNtlms(qbP54q8?dgknfN^s(ps{_ z+0m9#hAbt%uYFa1=pI*l@!)rSbN?K>+~oi2qwSJg)2AqR!G#`eN4cK^1+W`U91*uI z*LpAoCUHhy$A#w^^T3X_KBabqGw%4Naz0)8P2yuimpGr!y2TzAB+7);Qlx1ZXXcce zDA-@quJhq$t>X`?KtYbT#oMwtCxDaBm8d~cYcQ?|$jab!4jqKyLN(0Fx3w`a$xgrm zgQn~}Me!riPq+r(;PDx|NF_eiW9i>!Y3~|3pi&opE?XBSGB96ubVl)(!m`uieDzA)=bKrz4 z$b-Zgps_s4_!_*#STZ=!&SKB`jHK>Nb1bzAn{2qO!h;36lPtz!(ehlE+HS-3dl%V}MM0XY-0^ zwM9@W)6_SkUnU`vVp$su5(#M5pUVbSvx_8Kua*{=q;OA+J)d-T`)ThdfJiSmAm*K~L~D%et5cv_G`W_J@{hf5-xo2$*rx6PZJ#>jGx#Sg>*C z)Dmh1vbsDw75pL}bMt}+v2tk5nC+>uNZ8I$H+++bEeK`kCcN*D3U5ikTN3bw%&aOJ z`Jd9;#;(aRZ4$L?&jsXo$JZgq926dg0r(uuRU{be5?zy9gtVZDxCA|h^V{o%BdqtgfQVI?N%$gu>4+!p#>I=R&@1AUUvhT}ixDv(1aY5e$C;KZMuPG}^QOUi}<^kUGl{hP3S=+@~scFI?iIK=^6op(|5iOUn2ys&{ZjBnf zxQH+2qIi5GPqz-C zu}yD`o~=XNSvI%8gSlMk95XgS+=#Z<2sZ8f&_I%Y*8-d-WNqQgR0X8KYBl7Jm0~@! z&BfQR^@|_hO<8n(95@0?N=N<;yQxM~0j1>XT53B#mhjYs5yxPW;)FD;C(`yokXCTa zmY4;UtfG5$)Z@Xp`ZVVIBxns&zj{e=yh7Jb6_=JX#gQZVm5>A3noXHZENyf68N;2# z)*AU$ZWEohBqQwoGKT|jwyx8isO5N^f1;@tb;L@(*K*`G(ofYGjF4_^Tac8V%&U}@ zV`ds1Z%3oGc3i(aYcyNQq;>@@t;Rl(MHXc=AlXFolL9ThPn|%OO_WWU4m#>XU7ByI zPaDzEc{(X|YA=b;1DXmXX{FnTxg2%2w16#0Xu6iD3}IfCI9ItYJVh`2bSbqRR)> zWy&71<5;ZD{RYv#Za@$p3TzuKs@gZwr>dbEsCe%PF%p_?03%xr7ZlU1{`@t1ww>J_ z{pGP5(_Es=ArqSrSd5(Y5@s7tbkFvkD@Ql)W6PylZS-v(+9M z?R0x%}PUw&O?-CM*B>DZ&nz5rOJ#Lk%38-pD97Ar6pxeq;j)=8fM-=ak5Y5D^j@X$w-H>VQ&VLfqRK_SczXH8 z#@gPdHkbODny9D0w`hT<8S%I;vp`4Cgz<{>4WrY^FZA2f$%}iD*g@7lp#21f3v?M!!j=0 z?$Y%dn*q=7UC#A2Wc|vxek^)nZ?YJBM-Wl2H?>`PavO8{<-7c-fEchmqe^Mm6VJc^ z2{1r|9o!5jn3QfRWh%co{iRXI^vhzWKU45k##LqzSzhe(UidsJKR;7`UJg){&-`Ig zi-tk-bwAThxvC+7?=)=}kE0VjZCCmSjO=5mi?v-v7oS>=J*Qy2anWh9uJTQ7iEo}# z`6gcCn||e+WQlJkE8o2p7F0FjiUgDdzm2Wyr ze6yzbCd&7eL%6zloa7@*6jj!#FpQP8S=paFJJYhBegeL>}j)YlgF4+Laha-whfVd zVj-9A1TLMbYSxUml>A)+0{QH1yHFaEezAZcg=?j=6}FbZBE%$M4$>phSG_D4M%6Dv zfcKWRXFZld3ULzYMQ3{EJ;Pm8n#Hy^0jk8T3-FJ2NkbFtPy*&vs%udxj12;~U{;HZ|YW5)xx_nSBL zR>NqmFbCIgm!D+tll+88z_X!8lD-(5r~@{nYBoeCc?Qpc$KZc?__O=)zXbJLqZ6&^ zT2_1Db)vcacuOI_!Y_(irCKM+ok_}bfUVlrG)=4#bGsrrmGp6%&)P5M_YG>?_D>n7 z_D;JqFmV1TVFjU3`5xAj0a%NXrP3KpBw2GhdEIpT7hcV&3sIMK-Fkm7*28<@=0C-F zV=sUiTg~_K*M>^Wc%bG|Ah1EhUeJ8CDntlKY*FICEGddQC=LD3>T%tk8>puuG^pE$ z>p-V^kQzcg$5hX8t49Z#{r}v(4U}D1b?13M-dDX>Rl54Jg(T3cXT>gKDH0vU_Q($C z*79dC3-{_QmV3r)mZz83f|0x!Y&34qw8kzY*)BkU5|lYyY_tWM-aNt{OMc9KdX zKoBK}Acz2A1QwtK0Tm)Z2@0OyfA4eeyRTHGn1t>%(-zdbALpKrefHU3XYYLqL6~#U zDkJSn8P=cUMLi~2aJ0`^92=zZYnwVhUEU0vh{5{{7Px*-ZUFFU2((wL3`H1)X${XdE6b>VpUYu_onp8oNc>W?{jjAP?zlg~U6iMeV zV!WRswfT#{^{k=#{6*k^R-`e15jdO`Y0h87xE+0I&0oZL9z`T)kZ8xx~o=FiJ)o<*3k91&{RVTpr1zi*&QVj6-!XIvbdKrmacldSm2on1`f_a_@3)Pd>SF4l#u#F%iKrDgn#BUo zEj3cr)*X5m8ojy=P+Whw&TgkhHq&rX^9>gBrj13p8ks~{Sce-tS1aRmQ^LdoC{;HGXeq?u`IXiCSoiOaOUD;Hqd+d0WYn{W0B0A8NZm5A z3%kn7D`O#r+XyXWpA;{~?wiUHC@ITxU!DjX%#G!Nu%fb7QXMwenSUE6a?IF=hg79M zVe5*);=;jUp)x-+`)F|=iSwDieqRRcs0+h%-h!1<8E)`4ZboB@jku~Df^Z0!wY;ScGO^I@2L>5{Wy%Uli9615$KViv;oF}5CS#)`U0 z=lgJ7_jKCj9=q^II2|(dUtsDs>!0y;o&GMCB=+x&h09vDzlA#h7aOBD@q?HCef+Rp zc|SjFojp+96qHb7j#F&ygoQ#n*E~2+CBbfMtPebPf@E89`82PW^N5eL(9>X;s;YS&%5Lm-GN&o=Q zoWk!(@&4*UwJn<689We2F+z{vgi_g)w}ax?eQ(N<$6~exOv@ID)(iRI)_pSYY>`D} zloST?>{%cmowl(j68C7k(st$_aVI8Rg!~o5`bP&Pd6m--h}6XKWzb=%Xndfdfi8Hr zDkE}VOM!))yIq2x8L%9dtlK@yJgCIhMLau^0o7r#=@uN89_hO&H9>=pI@pw(QuqSa zuCg2l(dGSsn-ZZYo5vP_g^`qoS#Z?8DwftN%)+c9R`;=J?kNQ6$zrQXqx=+ z@-7;Zf6YQRQwF??3K-&NqiK>iPw8DW0M{^q<~#DkO(5%Sp*avC3RZkX{P+mY6#{3< zArG`4LT39z(E%%3cF6S_=mK*fB7`0-f&rTQ6ax$)^w{lvMD~{AmJ+aZAbO;Wcct8h zyl-;h!=htuL+o>Tl z>*9HVop#BWVHTc%*z#WyvI}+#tVeOy7d&uGSJdMAG}``?3;hVX{eT* z(2_=%f!r*h>c}i~J>Z^cmgXGKTAVG#k$0QVsy){Kk9*_WEx)AB7kEB=52%K1_g*Gx?$ z+xhKIO~l)Gu-i?L$kW-cnkoF+^i5JR?c@>^vTMq|+t|f0hYd_v@M{_>rgbT9cb4T8 zQ8SA>Yd5+<$UD#3^5ZC~|EQ+mP70bwc^0~3bM%_tJCY3hFr|IgsZFOi_S8~?9gZ@|ZboE>6YH_m%74-UYsY`EiNyTRIzMB%Yh`4>UE< z3GY~COk+weDxXy}o)MD&K{Qr)8}^S74JJr<3hY1B zfm5?d4f3iU?zXG!-w>)9*uSqs*#G4xi$xbEYmQ|nrdUn$V!F#5{2%-l%M;Fv{|}4t z4XlO&S;Lh5Pz#SKvxF|T@aC2P2HG0vh4-w2gY&OE4c5~74oU#5Js+Eg=vrFeVmK@X z_l6FO4-uePeENn3;3B}hi_b*>pDM;_7eNFtDGTSLt~5jo@STLXidpOcX~m04E`WQk zNdv4_69ahWcFC>o*h?lC%%N~(HiE^%aaFA_aV8!%ZjOV)qU?k>$T=aI+I?ero{%)nI%-Catd|*c=;|)2UyeT^%Hcg!9TaX9@O$*!Xycn}FZX%J?amgVO ziM24LMsri}9IX zMfNKROF|KRmIoy@N=inlNk%RE4K}*0R?LOc{%Fb`Rj0SA&l3(X|ICJ!kX({Z?7kD2%oS6^PGOkSRvhxs@BD$H^o2hSDfs9Q@lpkhH>K*T(5@45wn5p zZ_`_y&st~Z!4TPskU^~U`svn=^2rXW85zDpGn0(f zK&y5h;RDc|zf4o59p!Vh5i$5$=%fr#mVC&X+9i-d#>%$9;_SDIwWkbF7UFH-B`c9P z`Re45K05@YZG}?b0X<*}4(gX20%2LuU*#`iU2K99_&nQB3#SU0K-k)d@qOO>swfi9 zNfttZDzZXu{>dZ~3U=OAkB7ANu&Y+Yf3qW8T{v20V=YG&X-g(7GkTua6d9p)0eqwf z)J!+9+<$%xm3#|N_dUNSv*a=mT&?R=6JqL-omvl&F{cCM1hFFfZ4dx*L0q31+IJ?3 zL{p}DPz>jcwoH47fo7~kM z)fKQ)%w1xw1$5-Q=#Xf$k$Z**T~_bORD(x6?__LB0+R1ddc)GWEb-?aGnCY!zVzF) z8y%a(2B)0eHF+9po58-Eh7mQ4AAmTP$C}V-k@%D9EkprFc#9~L!x7zvkBh1qM+n5m5EN}$Yl*o7Nl@!e zRCaYbm2#Yv>8rtp*+BhY5k&2>szV>5@e+MOin~7>yGyim?GQI)H@C;q=*_SQ@BktNF!+Hn!bR0 zii{xy#O95cqIkdG;sScc%FzSuW9k9|r@qzm(R@V1T1rfPp5-JNU7=f&>G;}dx`e{a zosPA3j8ZYcb85aa;29wlr&k3;KF5`f_<0-g1x|xyH07$hbjarkAp?c0W4@>+1_n*O zDj)Sl<<**R)M`7<{*%T%1-cH5n`|;++>3I{RAl2`k}sZr+-H-3v7TRrK{y2#8L}&r z0G%8Q>uxBSyUDSJyz`-;eHu{kg&>L^#|S0^k36>zlPE<-3KFGwL?u{as+6n(2w(c) z^b`ay8;0XjVB0O+ z9^hb->3**qE0c2{SLu)wr>-Wj)XPCoR|P9 zyIGy-GN6wMTWmX7dV~gtB*2f%eddAjG)JPlkQrc}u)=A**SuAbn+Wl4$)?%=pq4|5 z>rkXWT&tOn-HG#fGyjw5&JpE`Ra^;@CfmhyQ_kuWtF(6HSM1Mp*mg9rSPyZjDjzm@ zxKs~CyNZ(fTu05>hDq@d;7q(s59OgW@d{lbX)ysH{RVA(N5tu1BTy6+4D?mekTy~n zc$EE@))=fvSOw-7PV~^w2-wFFbLh$5(9`Jqa^1BwIX=g2*frwabFJ(TK*)9R5|izj zvmoK&Rt^bM1{GA#?$G27=`ucFQy}Fsle_qC{UzTCC3|ce-k=)RjIn{xt!>Rmgp*Wn zb0%yuIkLn;$=Q!cp2Qv|pVB{x9_Y~(uwFd^d4-?>J#WNnfv)mFt<}c0=^cWU9Iqyp z>k5H0npmbQ$pjNu=t}%!VhL9=XgJnBk)S2!uhALO#XqT`<7)hsq(w`Vkk8Yp_G;Kx z(+m<`=eqa;JWkx@-u8 zxd;QosfH8kVVQu@63l~C8)1R%R^VXvF6g@~8Li$T1JKNiui1XL_SPfsLac1ru8icZE&0C_{^2^nJm*tidY=nY1O+fG;M92C3bijWl_Y=9p5{H^ktb1p&TeYR z4hpOg60pzDCWu{eV%`g>XfPU}a6k*e+z-)ew)9#f+H_QsFDx6NK=vyw+SA$-3~5&| z6k(V(03>g*6JboO6E5}XfwVV=n*^I86a)pT+II|vriNZ-bJ}7Poi1V8X7MVRmwG%K z(x3|j)Oi6%_GGocp%n~PP@T6k&;##Pb&x|~A4nhtY8aM;u+1iHF;+Xs zaByn`hSwjJ?RJELurs2KBJIzz4_o>~ApDuQ=Lugrkl1qnTpKZ@PEn&{EilqnW;_Qh z#ry>~OEcol`9!n0FEgt{<*YtErlYRQiA_Y>7%n_mJ3)ztt8D~3?HSsX1}}-ZyQ0Yx zP_?ySrb~q^t9$6g<+K`c4KFmUzLz&Q(rPgpCG4U0nv+DOVG%FY8xFPeMh2RPZd0H- zKISyX5W>tdrQK0&erQfT)e3n6$^BA#N|wAqiQB zphK9dCAyikgZ!k=)F88(*h~lSmoUW%Q>+4WmAWU9$P1@vz7ysy*Wb&|UhBaAin7oulS$4s@HEROrUX&W{ zoVzB_`~JXeA0#YGmWUJgur3R~3D%`=ny{B7zlnL*Wo6PBswS_xWg@<4%!Cw~)DcU* zxfx6o3f=5dkj!}q4B%+Vj*yS@zms^NJ$Y4XI?b)6_9-)l6rV8|S*56WyCr>$u%XZv z*r@5@Pc6GfvmiCCufqmS)D>#Ww`rdsQlr_MNNrZ>;8<|0@1`Ihx+{ZeWvTYMSP>WB zJBb}MTPKn)9xDV>(^2;Gh_(W;Ibo!o4{Ry~!F`M1(h@5)jvE^`M*CvK<)Rk#> zH|WZAjq7z~`o~Y|$~3wSx-#wLCv;_+S{e#x!GYOSCQ7~K~-lW z6G)WXnhYf@-G!<7;Q=W>q8y4-2x-ID=>g`Q@_}WcFrE=UAx{B~jtA5+z? z(E}aTSvgnn#U0tQCo~~Bw3aGU#u?|Ra82_8>*QLL;u1yVA z$OSF1UC5hHSKnG2iiC94UBUdWWGiIfm1xD`xxTDOx`|A!5ewH=WI5s?M0R8J&QI$rJ8je1MqT+4Oo&c?txm!LL`l!L>@sGzA3&B zp-!>?W(ClwCbpw8AI9{07v({KTWI15*P=J^gxc@BDZWF)uISVE>;9(rXDC~EHnrHb zK}>DBKFxf{kx4Oq)WQCk9ktLZk6LJHZ({&X(`G|djGxaC(R{7^S@IVND1^USHPhhE zIh+^r8D0q7nwE21E1amA{YilN%mTGdu%C!Sb^5-$P#wQy{?79_zp${F!Sh^Ir|`l; zRksN4nrzZP4d@X#ViA0%@gd(~OP$;%YY+F^dhTSX#QTOgPx33B3Hu4A%2tl~(FV}U zcW~8UIzSkJy))^hu40&)CMXyLzQNR-0Sft9;8u*BL$UfJ=i_s}vkFp5J<+%0$yw5{ zQOBj2fLV4vR5&Sc)8KJ67?9zZO~JZ&1|#4JU03npygj%nKCKTp*&qzc zE!#3_lb;A)(%33fDJKF*f*1OF8iR*?d-$e!x4uZTFQYo5ZETEwQ@UX4CPcK;R?ZGc z7z!Ie+K8kphc4*~VRsBheWEvPptLNtKiI3U*<{s%KF(J%CxE;nR)>=t(vvEAnlh~E zO34e#y@*kTQg1O-^6=^_?;hdYCp`f5h%g39nxZ}WCF8uA%03{_vHMjb$ZoE+G-@)? zIVAU|RY(%2N{#z?RjYF#TxlnB@KS25rHcJ7;^d)tbUHK^Aj3>`w zHp=I-+o_pcn?#x|LsHs0rnVqIvf>?8`%31N2Q%JrJpi{)sF1?SwGdkQ(G;1wf@UYR zkz^obVJcO<#zSwm>Ij)Ls)OIN`Zd7ehW;&8I1?(=^%<@OxS>Aa&JPS$6U2dmgMiaR z0c=t$u0~+}1=Y1P>^`zGfE-)DKyFv9It%t)F^TWvW1JUZIrBnJ4` z@TeX`i__b9IN5seAz9D|>H+~uPM}`H$p16D#ogKfCm?w)2elvO1*jr?%%3Ab*HP>+ zSAiu6@9RArR%rxx>e{f-3skRb!fsJwaA(vx)&^13t^9F+ zc2dt6^9-BIr*&OjkmvOPv^u8`E`$NFH0!hIz8OS`LkB^LI~GU(1e{`RbW-P;E>TGAfh3yiO#RNu+SM8@>9O!N?X?_7imlS{RQ@oh=b4? zvrW%de#**UXl~B_P$!3BEfnBRw`@Qdh`t3JG}KcqIs%D9`!15n@pP{x7c!e7U_GO< z{GR1EpP}Mn>7L6;}(k*k_k=@;rC@nxRG+xj8rh61+ghgkkT z5hkW=FTeSI(%LzG0Cn#7a^J}Qn6ct6F=Pqn+2tFoMYiBE?TmD_8!RHhkM#J1yLH@3Qn#d3IuN}wd}9%63Pcb^+l5N00{Hqz^N3=NlE>OkNM)oT&Tq>IiRL>YpLXY zJ2AkEIjLzY77*arH?VFshSOO-d2~-EM`fU6blzu1%XMS)wTutX;*nacs?%Jvs`72( zu@IpZK?FOO^*T09fm~R#Snh0^&H~{X)ilFK#;}$&@smamI{k{q7NncHHNy&k_I--? z*Wid&-u20O)p4=XoAcfq_B~t>oLpG(0su7D3!|Wb=whY=(snGg?I{BXwe66G6x?LY zBwiz;(T$l}N9BF#@x|>qI^VB>}z6(k{WzN*iK-2U0D4O{gY0e@Zv} zIsuVeCw+pD;OkZN3Q{893mzDmo}$AwS)-7pCl|4x*bbY$7gLO!X|ECkEMaPvRpca! z{xq<;VQ*p!0l*s(@WoFksSR9&&vOC5HaOB)j6!%6JZqRy!4Jx&gW*8g7ZzNUhGuauB}F21KXh=keZHmU=8Q1jCaRy zpYiU%<*H=73`=nUQzAYv-ktiLlE<7Wi1{wc`ncQK@3C1SP7GN=Z&B)5fqAN9Tzyc) zk>5ij47}YP4T3Y|xtnc_KFHhr@JOFD9OPOIH|sOp=SFnIIJ1x4T*TYD97N`k-ePT+ zwlIVCd$ANe(69@xFdga#FsywXc>-5BGJnO>Bb>MvK+8(`Xv_AC4pEU2d@Z=em;p4l zE6~mKyO6-h(LS|je`jGy@4?v*C zn`g1=?k>I))Z@Vt_{zdQBY=c(hP%QGXm9q@HoJ?oK3QjpBnYSS zZc?d|d8A~%lXRk@CT#c~hs}ZZcZ)eNd77y`t-gY$r})j!6u=GxJjHz$gpIQ!gTjW* zB(=OG`ohGW89@?yX>{hCb;F?y>dr$<&awDOr7nLDHj~E|f}%9DcbGE@eoTA$oMjBH zWvfuXQ~K2+=pkLcSNn&%$&4-vpy{0AdX}WPXSCwMR|OjREO}*!E4qDNaC_3ViW!^t zd?&RD(@GWP0-hDN@D`e_&8J8m1pfQg7dYEKb&wJ5)xD$F-cj80d=BTs52y}6ru~>54l!-j6bnW65ar~vpf?dofL8A=G>;0Ce~MW! z9fUenn>+?t>N1v!kHHBr2gn=+O7D_Q70$3T{O-|n`0rlb9|*QqxY`2q=yTcxr2@DI z%+$)-Eah0kc31+e(j4!civ`{5xL6UoEo3mI0(Hrlj`6yADv%al_yeP zV?9s|7WX4z?r@Mz4uagyo2$Bzaa<_;A;Ad=*EPnt9q z&$Ien95@wOSld)9jwpo3)=$G9k(Uj0uR0VYcyr>)3gKmeWEh;QB*Tmpx@|!z@$ajU z3*a5miQq1>ba@nQps>$!59$@m@%Wr{YhKgx znsX%`V7WpE7-?gORP(6OPxxKyVfJEh|7_mQvmGXSq&{$FJ>6Vx`%Zb(2xN4T+!L;M zH-la2mR^i^fPz3##K@xVXPPA1{EU?}cy*Lt07po0k7sR1zKGpuEzzji^YzaETzX32 ztAKkiGGcxWg3@xB3)||%E(yEwHs6eg!p<7Iz@!g@ZR1h>Dvw&6eH?ge*(CDa&_ZOi zn@4=io!)g0rSs}7IfGS?>OgCzHip{Ssh+!bJ0#}AZO=q^+~Z#IX%7Avt0$Y_dh*S{ zVt2h{M{(4KG7ugq->b}g|Lou#+uN>@Svkv+R(mK*<1E8w03fy1jL;)LfBCHbG|%wg zMG?d><{cQ;h5_vZ+5E&O-ft|G_Y&tVN-MSAX~)=9_`SIGbBJ**KNjLWj{tIq}$pdB|)pI&}B=Vn6?uhlqGKj!;YX`7pM@mIXjjj7fn? z8~q-y*PqKBNfjkj98+d;$q0~E36eB$Xaok~WD_<(xrumc+d%-HaRA!Ebf|P{p72Tj z%I~1xJl+NkrUz#&0T7{G{%1nPwb8rP3rKYpkaQ!mWf%a9Pig@%P=t9i3!va!EKx%a ziR>jp(45wqNvD?}g29V$50>IlO*#$){5{IwexHw#;l=ul8=mPFboS3LopkydCS9RV z;AjY3jBw)^92LxRU{qpSqC^pJ;Uio1_@G_#%-p{#XlA^>j=NJ%XZR$$tu73SR}47az5VBLx=Tu@J5~*?B1UAt%ieRru!!wgNQamb{Q;S?-*0x# zEiTRvJI*GFyzoqWyh`kxlG{*= zDg0+XSoUEt=WuQ%mM|27T7gVfpNW#%Dd~gjtnobG{?;*uG#-vIWEmNLldBrMzd1Xp zIf6wIBG2b*qo2~k7!qCK`7`f+N0z_)(>t@So=jqL7nZ|TY|!RL#q{8Tc|n5)0TV_l zzbq5UKt)FSS#q!R(Hzyn;u5_AVTdEklq1VjstVF%ld8bI=bby=>9oqLfL2K{f~vy+ zIoOs`dsBjtBz53@4o_^en5^z+RUzbaWta3rnwcJZ(B?HI_FyY%FG2tUJ=cOy=*R(m zXE`a9*kYe|3)iRM4t0o5b)lZ0D@HX!XY~&XY5rtJzziXWZl#{rJb3FvOj=Pp$74`? z_5ufV$f?Ot$D6>d2_(_8)95>x%FuN_Uo^tJ%NL=jWu{-t*-*mPwfe4L0Rz-8Cm}?>=ZKDlb&$`w81K_E`c$kYnml_S!sE^ z4Gty6RJY!TUG_%F3&1ax*0~=>?6NNuz#_<`iKCgYBYCoXb zapKB9F}<_8DkQKWr`YMQiYe*)FFNx=fHb2jjEhAruSF>WHhCXMEbMc+^_z3~(>a6t z_<)G2(wvQxuu=#VcETdKoYD+v4}N3xiT=DC_jy4Q;zStDiyR$W`6qCue7+#feXeJM zS4AzLt*8*hyy`>}V=MpNf!>lcXkhw&cc8=iU~Tm5B^^G}@9+ov9X{+GKCBK;tHaaQ z;SbK;Ve4R}!zK|1$+76L89AELAmk(P6jTDS9kIgdeTtRzMZ>2T5Vw(d3#yEHfnI+( z$xicDmsc5R&!~G)-Whcbs88Agwe&w{z2{^iz?(>~FIo#{==qllP}^p#?EF{R=2_q% zl3NQfmn|8bk6R*+yed|*Vj{XCZ&~SjxMeVZSVc@K3(*{17Ty>laNU_KD$uxFhRA?eM;_(C7RSpD>JH~fFnK1v;?*>UJV#*|w~8?{oa@dG z7QcR2LkeT1M(Jd@Yv?sB*eQlh<{_lVNNld2bB?VY&Cc9Caprd(sr0o5htoJ5VZ{-~+Px6aj zMOKkoQ+#eL)4to^Z+VZmyhknX<67eF>`zA^NcP>c{qGL=cL-yuz!TYXc;hw9|R+4{34`dh(f8@95`^-ExsfkZb%qMPgDZKM|9 zZY@N{3_aNX5w+DjfVJCNUK>5iyN%Igze`izr73j@!M`clp1+gGYG+DhO0>6#L@ynB zNQ*({fm!`1x>`b9Y>4Wl|8{Gg>DX1^^&x$q*eccvnNUp|CTDLg*`%x%!}-hP2y8Z) zNstK@IHL@P(NxqyR?zHSC<-EBI+zUEH6`B0C`iP;%AjzH79qAOzX$g#%X=*%Wp6Mm zz3-D=_EZ-cTGvY@e@`6}sB{X3n`6l!W%}pciD_;b{W0Rlh#tE%c5Ks___$2$4Deq}pnixVT26F8bb>33Ik@;Y}P`{JbDX+w{@cYNLM#xR_IafhC>^E|}! zXZ&uJfAg8&v3IZN;q|?|laF#Y?e(oBX%=^ndwnasC- zyyJ1Tx2%(ssUY8f>IvOl={^2{-(Atkk9f;Vz0_ykW8WNjxCEww97>o7pQ$J_#*rd|C6Mi@1@4o9L@rls8@A+NFTYs0g z-u9BeU2Jnpr`g zzOXl><2-=ED0`o!=Cd4%aMlB0c%&D}!#70_A> zf&}04q4P=G0pRZD&Ds7TjJL2bvvraUL6-Ki!z^~Uw@29zv?qe6EES8ZSzqttEq#4J zHJiZnGrE$^?fT?RT);#4iv=nBQ&P;yeusqyfYdv$aH##mFYL(jdj7E|9?$Yd{_bDr zu9kgaW#`RR@rY4N$JC!%z86nkIznojc`R_>d5X#DhrJdSxELvjO3LpCtZ{F4Vk!Yj zJ3K~4N$iqvAe3ppI(#~h#w=+GU@*B1MLnSivXXvYzoLgzCUTQW4_*PhtI(9pS2Gar zxO1fsKFRle4Yu0Ock`3Iu(JRBXZ1YGUxe%G`8eH^K}MX2vHy8-at>QiNgb4x^;3di zfx;i&$#_!^g>I3`qKG5uF^Rc!OGX5kbvG&-sHnq=qf6VIJcAn-`Fd;{5CU2;TS3#=a`(=fk`0A_}wd4!$}&+WOzJ`6-N6#0_|$s=uhL zIXN?;JUO1pCT7nQkEAW;*R=djh%c}C=)|8qMb^0NacM$qjE1LY^mMfU^whWC$07kJ zU$kuKZ2NUO50`@yNy5w4GP&BylIWmyD?PYH-;{$8*h%{;fR`6LXEW64Ou=O(=KCSS|pHQ?;h6PipGNUqfr}HA`ZvS^O*-t`0Si4u^gaj zigM_B)WA9fI6_N4*fJ)3CYNR8`%pY83re}d5{z1|Hoc^pE=s0lN$T)yzmjyYUU^ya z*m&8_zfXMkmIYN`9XVy_)?R1Ok%zX1i8~Z{Xzv72{HPNWPIOU2H;+eAKKc0Dc09t& zA1z2tJC&+cBPb-Fps+^y&t6?ZnfDMf%F=X^cvhnw2W;6{#6zJe8>c)hJswe(wut* zE?;a{*&+uJrHuA41yO%bprN-E{E4Zyx+bj;(bAAbMt)wGet*ey5W!syA-nD60T1#+O zP?tr<*7%3x+^zQy#y4kQY5ND3b$}02%Q}#K)ka0)5L^F>fl_UB3!{?Qu-ozrZS-N&BE}_?&2E@TFt&@136ecd{$+{oDo+VDtfloYoeuE zMmEmKwlZh!!)cW#|BfX0s=oNak!kGc0R+5qKl)I8htH$i#Wm_VlwtCp|uA zSd$;IdjHDmB?Q&#Rctg>M%mYGzz50vrRD_b18^MTmhpqY0HAPmO!Njq8TkL&Y9Ycn zA=H+QV5;Vm8Dmmu{vCD#&w!yqms9!$Urq{RWnmVkoMbBvQ~^=;)Oy24VJ9ayZD)UH zSb~U;u8G||&KGBs&7Nnh!H_PG+Uk5+Zi(#c)|-2)*rMo|`~yt>13ns3Y2WW1xsMmx z%TzxQrb~2qi_ig}R;y=6lIim5w^ge*;D484vcGB2BKjNqg^hGlHOn-iUl}KgMX}4W z2{3|0>V!sD!8{B*WmB99Q$c(_V6AVA4yr!vID+&ZOO|5I?bBn7I@TOUp|ZI{)|^RX zY7Swx(i|2nnsa1$ZlF0npn=1&3Eb0_Ug5k_`d*lY3-t5eOnYx~{O_?K*%D;4(t3v( ziRli#!OcswLpm@gx+q+3W0sqJj(Ryw>6!&h@kCGu*OB2^j)N!tBG*Tg-&FpXKtLwk& zs8P+5KteZZ5>bhK4`Xscw=o^5RX?%~PYH>{tJ$!OgcZhJJ2I$cuQi2UN2O5`u&9Ez z=FW*<=nS^qG~WjSHEB4_{{}=QJ3?x1`TB*^-vK(f1E+2LF$aNWLwL3F94w-{3MVyE zTk8%#?M8}%!uRV@m3MXPTQb7gL6i$AgBN;;+_$l_do=DtlFrlo+Xc~Q{=^83@Z*MT zgdd(EP!)u~-;d*i{g8}gp3rP}j2I19zL7ZpyMmXUVcUt$j3=j)F>&Uz_PSu$=Y-07 z%&rnx?hN~Ucu2Pr!@UdkO~J4)7T@5c%Qs&$eBGg;s-h`-_ziowGdzUzr`VQROZL(w zu-)a$T`9=R7K4Fmj}>udzPBv0hy7tg4ddmu{Sz?O*62Qclowome~R#^q1w|vSGTk~ z#Y%EYj!02s*CH&F5^Iqn?6KhX9K4tvY-8*AyjWnt5q)B!1iv7B_Dvh_iK4c^-jDOC zW_qTWY0OhL%V4CC-RC7>*2Gtyppz%%W#|t9O z*|#^W0gHvbpzYy7bdi!}BRQh)@#2}*XGgtk)X*Q_c{M`iI6(OLaC->SCciUfE9~SC zz)KUaYS`7c@v*BP1WtaZm?;;;5ITKjias#DU16p)KBEzg4@Us&0$croHuD?kf(XK3 z%jH4@FSR`cr9UN^1a%DYvmlU$m(;=InPP?Rb!ihEaN>xB2MgK)cWSK2z6D}ko1P49 zmDNClSP(R0T8vZ{aS}_a6lXIgnvyz6Tnc3jrpFNXKnR44`)s9u)5`2u83+`CMot|L z*vlEi=7Z(S15O4G*~`DNm(P|j4~f9Boo*v_`T4oT{L!_uUeJ^yD$f*-C({WQAaW~O z#>8&xp@+J)hrDON{?Ap}4L8Nd_?0kxRQE>{OJ{%zyfONW9+0r&gbFav_#dzmlg*v% z8^w;6R7YfiB7tv$>}Z^jJyrUs6CtSX9>n^LJ3 z087`R3|7kls|ybdo!tyW9r=uHwm({`$5Laliy+-f&4dR_^P~~vwn8S+uB(lk6I=7w z+!R|lv`#@nierD?e~4n^Ek_xBBZQ||M`B6M5@Y~1U8IC*=U2#j5@$bXVB7oU*sW0y zuILQg&|bbi-_`RgtB*c0eHQ3lP29S{KhSrmT@Ar6${3nqMh#A!Ho?Kh!`^}k3`pUBs7Pf z#~fcz+fVB#m0 zgia0|PU2ra0AXorxFY#zZ1^YOm7-r}cN;%Y&PS*Qlj>G}*lV`#>S&Vl)*B8`svwMw zT&kO`jU@jVGqG2-(H|YE#9p>d2HVx_NQBDJ4a>prD|AA*IipZ7p4B&WgWY=T2YMfw zBg3}``4;XBSkT%k9nt7=^sAyHN9hPUn04eZ9XXP!r-4>eC;z%&3m{RKXXJelX zcg)6K{Q?Ja>=_*J)-*Wm(l35;)^*gFFd-Z)ROy$&L3!*d6M~Nv|8&F*-5`;A(2vt0|C}y5b8Gt60!wcaRdKhL>B?}U< zUxE|fEO1Uj%nlgbjNp=2-R2;bg3^NnDOU+35hJFYD!3XLxy+@=2zH(lqKfRn%xkZJru06=17y|&+4U=0j3uLvVtv+hR@=C zAooK6c~L2|l6;541oNW0DH-0>Av0L#JM}VDw`(BD8Nxet?8LP}926?1$j6eRo@|KF zsU0lf#WV^PPJw|!1)R4W70&1a(**;ilFA}IYl{>)Dam(R7njNE$guX*tE($Dq)j&Y zKaNol_U3!7-@cd+)`bh2M3XB-=iwa&)`BfyF8&yM?9F_H%@Z3rE2!@4eP3N*A|pec z1)O+PWuUpw*89;eHIa3S9jZefT52!G^qaldKzm9g{``@?zKUb|_(hp6q7U)F)g(jp z>YK&drD+xi84x)o@{yydZ_S^Yj*9p!B#4K6oJtr68G#LHsT@(&B7AB}>+F~=LhibJ zng&?+l)W}RZV{l&A@2C0s?+_dSo>s|^LbJYAJy{H#aV%sUvkKmRh=!WGFov_-_ex| z`lTL{(6Q1zDmFjQXHGMAuF^W&B+6tpd*{<$Z(`@q!*MW=AaT$ia)&!o z2yFD!U#HNyP^!7;T?X)_1Jalrp(#hl2Nn5j)HZ(8R)ijEu z%}HPUK4R3wm(ht!UPFsz8vuC6QeP`aQ(r4bQd=w1K#Mszl~&h^njcS2rnXjSw2#r! z{QDSvMsfl(f0|#z=ySS2hcq-qb1hL|#`1~++&)H6Ldio_fD|p%s>1Q!F?xrwZByM6 z%l=tl)08FJ6j}(N=#qxf=#qxfyY-oLPc4MqZa{!4?H|}()yML$FJ?#>{j4ewrU|EX z&A9Lu2bDr)7!k|i9JW3POj7kvs5p~>4G>O}hYkhLm#v23?L15_rlbpiw zHJb5ZHlUIm@69;>L6n_0&bq8hl%1U|VPuKMT?jh)Uh!(*HU#bWaYCS^6tia!^Baug zM0xzd9vzD_mgcAat6#NKTF&>|_X|VcOmeb4k5t)4{YV)E9-Nv50@c1QzcMDMuxpYH#>^ zvIxf_S9fX8E6#sh+nPnOsACov^YFFTvgDgE1g4z*%KI{pEf7@h>b8){h%{CAttV0! zr~3u)%_&|ZZ_Nn+ z6O3A>yeNN@hG@{ZW6pl74;&h2t!bA4L~@s%TbapA;DO=&p)Tn^A2N}4C=l?(LXU$s z3ZC4}l#{>aF1r1FaOfVuv0kxPiD*~$cT$I;#S!*3bHXwn%;ew4XMBAx<#6z)@kH?$ zGD6QwN~KD}KT`MMwR52$b7I=&HQQX5du=C*gs;sqr;Kpc5AS5+KfR6~qq?RLd!zsDNoDf=Z>6`!+fuV63Fc{$aU;ujkafZZjm4VCw z2D(#@bB@>lJ)rahphPSTVrCyo&|ewY1f`vEA51$T#Q zKqUJP$+MhZfk$cf-L~j1smbFlI%LFW!R<|;Xk{FAuYAd80Br`*YK zt=vMj%SnTuSbdwBV{;5f7v`-{2q~uvbUTL{eyI6AoXgx7S@1 z3sL6jNii;d@nST}cNhyWVB7 zofq2R6pBq&wFCe^7TK`WcO0e1hLGYRz9w_)r|d%yRJ@Ym==^ZZSf_Bd-MofCf%Z>Z z*9?kuCaf-VD2J($Z_~RD;piJ=TKV((ToSrsioa0Y5XNl!8)7Uyt;5;n_>;;hzwq!{ zIpAl^I!@hZQ}aHZU9ip5)m87V!h)z#cKMjz;j7n*?tgfj>@Heo_ZgVePm+8D}U`1gkQ%2&l=5h|;)r z3$X&kOlCJ{`Rwo7-C_PH=ar-&tNMC=Hry0%@T%8a)mR@Z;yE>W)q;o<58EPV>8Uq2 z@`K;qE!5-NR&q`i7J?0TRk0z(+XM3@M#%?PK*)gE9p=OxS?V4mlma>Q2z33WnyciS z@I#lQJ4$au=PD0pk1K`aV*J1J$PQ~HV*0g$Jg5C2i&3Ua&IbTE#PNc1iGKF0jt=4^ zP0(i9Ru=N|hX; zl#4LD_$vR=`RbEz188fI8nG;m^CQxt#5x?Qjf#yWIjQ!Iv)lE25zAs=EIpp%z`vE0 z;CfHe_q_J-LzO{wtpM57Zhnwu&=`0(MmIJHSm+djWzK2y!l%DN9+VzHs!&X!+V3zP zVu-XFEMWgcItd$PSQok^blPBe#xdK`;u|93J){DS@yf?E%mbEJBVV!k+9nH!_m_|L29u%fc%aQ;(XRgbE;Q&>A!Y%#AEI4zCct)=nD*?-|vo?r&b zzcmZ+&L!XA}H~j~2U}+)}cKQ{e2IH+(tpCOQk3Vso z+JAAJB&}vMj_dVWz1AQRlTE6IV4wDqd?{Ldhe&=giq>sd7?S+D+bx(92`yiMU1iH? z=eq0GtQj7D3afyWg*qHn6)au4xTs*USFn_r$4UjSeC2i5U3ulT*Isw+ z6+g~YRt{1p1Xl5pm9pdG*Is+oRo7f|?KQnMvtC_ujb2`T_4w6SUnkL`^;YZQ)_m8; z$r#8C;}ZSKfftARFRmSUalHRxyp#Wj-9#_ll#MfrO06^fGFK10*!8DnbHwfr^uF62 zBk+N9AXg;>^A5aPwW`bE4zGAc_Z2CTyz^f%o zmM*Q=7cXAAxJIP$aZM>P#+Qc{VaMKAgU15vKLN;dL5@gdgPgF}Ah&q&X!x&xl_Y7p zm|J_rMXcnMa&qhejw0u1?C>LenYJ&5AA!u%bbc22MzXS}4$;&gN}Tgtg%{9QUNFrR z{wcU-qJ)_P;A*!!{5Lez8A=F<9(Xl0)NZr>TkV$8XjUfXP#=8>l&&=Vn$4hLZ7XAh z2Q7g28D3)TX3Y}!mLKnbNK7@u?)I@}sX?9KYE79Q8y!?EdZBWNcdG8f|ttjl&nn63`b6+#ohh^5Y_@#}Mbuc(m%{&{77|fH-^Ub90 z0ZE4x)WkEkDx?`8kNa){Q-wGz4Oa&QS`f25s1W?d;i{?RRdx#$6OGVxL%N<`IOP*J(|VP z>ZHnJLgBB}%+}M{ay7i0U=cGiNG|EMo@V)t=Sj|{j_mn#e<3k{3L3i>1&$q1oIA`q zo=WrEr~b5VDEei64E?2Np^x;o=%WU>$JhZ|gz_n3lSMBv3z}?+5T(trRMtlYj-wmU z!nMi0c9lOS=`)`AjZgo^r^%VVl6UlF89#`7OS+UrT+KgB{mLZT$VZTJ@@20Dg^)g< znmoNgDbS(2dah+88zniwm;#9ip-8`7iwe5#<~5uN{6O%bt08=U*hT?~{c%cBB;p ztgPZ;x!d@&$0a|R$K!QKmT1Z42Z+0bw&x2r=ds6}$JzR#?A}@uKWfF%c90;&r9=Ie zu)z-Rwi8EXuOEx5tL~*-YsX<+i?bFlO=J^5JjYK6+A*qRob^#=sx{Q^4392YFgluL z3l}b0v|z!K#f!6ri^*ItB%W|iEQeNCpK$JLgkic;hQk6#5>S0YmVCHU^@;2hMEO;|f=iRoxBA2w zSF8Gj;73l#u0Bx=5olMb2Q-hsuF4ZUe`Y;tbtdrQlZW1H*fcs5I_kCFnLyV7gD1%T z0Rm5u_5=Kl?80ih=ngwzwk#2Y^^5CB&msQ#KF6ffrM-oNhwcKq3lww~<{&fA62OrCnF_!`tO0VGga)v;@Msx?VYPk9 z&1$u=y~v&r+Mv7=Cn&iKMX*q9D4SX%5b|&h7HUq8%bpmb7}k-ZD>qiUg0(iy-rJ-A zq{g~pnNb+rP4TPCPHnPI4Q57p_2%sF5$HthijgJRC#XsFn3V*P`pwt>Pp`MlDTo`$c!6y-EgRJpRO zQu8U=B}O7(57Y{>^L<*5MaaWYb|SBYp&(i%pe*X0{@0NDdXB2)6Huk*naJHPjNk(ll|7IQ z2sfnNZsTc|pC$?5F`sUd9f<)ZzH-@Z~Pdu z_lj_v1c7C|cy$~o;xfqpZ4vLY?I!oQ#jxh1 zz#-fy@->AxP?Ah$1iZ_3!v0A@#Au8jo?DirkEM^YI;Yzeb>W1#I4})oS9tVXaSMKl zu;cV_y&1P9tFm6eRoTCF)!J;&Yy|RY&Og*JYd6H+JRvcom%sodk6I3nd6ftU+7I_v z>dY}yJ{BN2#L-alidE@~+KMhY8|WbYl*f}iw?5Pw1N1V0BCjKb7Dsb1rs1`$Y9A}& z=0ybxPI5#`nqTDLl_dKkg}K17CfhpqU{BpPAN&uXPfx}4h8^! zpi*l@E~c!m9c{oyZJg5pC!C_Lk?Yj^cPJ=r&T0-9)l9@=4XaAh@Xj_>?xe~j+u3IA zn;N&PXhKib|1;R0)BM-lvP5aFG=S*+ZFSG+&EB>Q)81=d#lOO<(`F~%*Vc&EMtj<* z$%KCGYKO-2vXLHmY0cm{6~^J^s9LY=v^CjIf4dE!UvpdSvHZ{{_GAhWyHxop{$`s4pCM@|F-HG4L#I@U5E15;3}Emdio=#avnbW-m4A;VoL zp61j2S~&5;5aAe`WfuhH3x*%>9AfL%zXY?Oj?pj%J|=lrL&<%vF6z?Pwd}vNE-5`; zX|6<7dbg)IyLxrM`a#6KhIRnG`R#EK^JBL^v#r;-d!*<6+=DHE1AP##@SsIq=> zGjz=)F?NKqVX7&>Q!EgwJ&n2lQH|zmS}`~>Y-?&a`m@i7VgE&|{(@Ei3x?htnq9Z* z*$2w}dr<14PT|wR*-#c{;dqk0y*-isKfT68`i7B63m>*3sSNTi>TZhmPN4~BWb;=? zX%_?72gA1(NHvlT13{q6d^R6=27@L1W$d<*RbIhC!!xVkV5NfTfeL_tRdA^J09g$E z!(;mJxP5rcKKw-++sT0s1sgokF|a)I=tOhdM9LAP`kv102K?un`5u-r-8L*q^S`(H z&a-{gCkB7dCS89revHq7Rk7XS^h#J2=4e<2idt={aYC`WIw@wHQ#GymG)$3}cd%kr zJ*hxNO~iW`Prj@2l}HC``jW3evDO$(aH6dC6OjY5cZ>Fnutr(0cx62EB(wO~6Wh9t z?c2CdUq$8pM>5lv2zCf~gp9f3Ai2n#-w$6^DYoZNi^qIQv3=#eCb+EaK>65<9V+iJ zSoez^E+2cb>GEFFRn~U2eC)-Jm-oY~%3{V(2-SX%C?%f;j^bLS*2^nq6_w68Ptljp zDk`;do}zsidIzMp&QlcQcNmfvm1;UqQ4H||MWy@BQ}kuCib}nmr|7a-MWsK_Q}oJN zMWrIoQ&if2g{7NbJWo-q0@b3I<^Hk@%rDz4)|Rk?QC6N%tr>36d~O?Q<|jdtVu90W zf58r8JAlresfi8Liw?za)KO2X*v?|P&dm^X4qtu{S0T8mGcd$OR^N^w&=PnkKD(=} zym%(#RGHdAb3{e_hpAn{YcN$9X$sLq0i1!lo9Mv7=~nxbFn(2E84p&CFdk7A|Mn#e zN5+u(0W8DF{BO-j;$qwY=MIBh;K@P+n4MAJSg+JDX4TimF9aQ2j46 z2sHk(f;hjZW9O2GpV4=y=$H9U3cZ}eJrAn0+?TX6JCB*KV zqT*(RZc=*`B?jjyT#=NoFhzC%`=ptS)hD8{AvbN}nE}@)T$_jlzF`0UnTZJVd$<0^ zeAQcfq(u1!%!<}wH4~He-*ohZO09o#Tk+t<*KETZq(KpC?x?+WR~SK;$E1q?5uejNsq+6 zyS1VRR9;Up-pibBBAn^Vyr&M??c9pMr|XsrbMSBBRC-XCi`TEPs)?e&90tM57pc`e z+e!ij#@}mZe}$`w47{&Ub1QqT6^QTWzC`j(VhL9i*@2|?a*Dd`<|nb4fYqJxvO8U_ zG;kTkWDm2EtaD2_)vK_~U!WmKTLiuJYPmFVE4!^MyVc6# z!L2a)nr%yYS(Rj;h~@g)(t)MEI@092$37^SqwiM!{W9=NUIEGp8?ST9M|C6aK%_6c7IpC2wf;jVtKpao#%#I zuEyN}%2juaRcelER^5xJ(O%cR8POmh%~ruhxKr#%GO1|jHDH0LueAO^P_;jz(vyf8 zI^uvO^z%c(u$boUkTs72J7SHHLb$xkXlh|4C=tjqnhEd;+xRY21>1KlE8k^(vCT~? z(Z;7-LHVmVENxNu>e{5Xj_l*~JxhB)W}{FoyQpQg8dlNgBUbIPn8A=rO(C;@!s?yo zuUEM?Ive9dCd48AsMVuW5+8aVL?&ZKCf(_Dm#$mm~$vXs+)o18H!Fk*Z+>(2kOKjeMe3NOLLQ7 zJ#cnL<5rsx)Z!GaPTPsv0kzSP`Jm{)v{y8(ijJxxQrcMKZf7PPg1B2}b4dt-C!5jO zNJ40~otS$wU5+VbjBK%Z2?15CuEJ>BeGJ<$zlH^NZ8U0TLA^y*wM@R*AF)T0IGy0Z? zB}?juPH&-h*BEooY@rrLWHwSF= z9i$d>-8N4KsN83Uup`H2N9O!v*adXIi@uKcX#wy1bBC#t-nG)hjbWLEP|i zWOn^;WuCzRILkqd-CIV7BKwbxxtg@Edj1?sR#kjLOPyl7*+x zuLZ~I4CN1Dv>1d0bkg|}9@+1#d5ZgZ%Fe1Eb9I(QQ`hmxHiy_^ixKy1+Kx%QN4S?N zhYxYyRc0kaq0>R|FPpK0r|Nd_R5`O?L{XohQP#FL(viBTTqSUx>4^dg|ut#*ff) zS_idvmVy;5(ZAfQW0vy2;3xaa%Ff>>aZ|chaCk^q*An62XGGnn5;_Pie0VeHq-VBQ zSa4jKR<-pnA(}1w2ePX~%?axecLt*LoVr4a=;EVIl;5<)xU99L@BTF%gUATZ(021T zGKmfzrp?vWBKFSe)*wn{BvGLHn;{AOY>?5G6kni;Rh&YrLy0>)?BTB5x^s80zQXM3 zl)fb#2wY#$ZG}tr)3g$GvO72irVRDVG%IDiKrx!Y7{hP`B+ZnMNfO|=+inDTJiwHI zTznFS30G-!cEx=Uj^N%{@R$4?$8e0r#zi(sQjoBGvlvG!u0{M;<5XW&rhuPBR8-;* zP;&U1vLM*oD|(_=chEGPzXR0;t7U9)K~SQFdpLA9v)5{t2|TH!+t}{6hNN*J0Z~yn zS7l+Hx7anXiKF6^O8+(6d(FFn8>BB}2=0NmFxPERR<%V|tsMY>I?ZM4f1fRg`Nlj0 zNb-wE5khqug@=fNPe@4RQ7%)R{&P^wzH|SW{B0lOLv+`Heehf7Y$<&4tG_sKiic=~ z+5*icIp?X6vFXmn7fvoLuE-GMPSlQ>ANu!?&!XPc+O0a+sfY`Dz+{{5< z*cbXZAUyDYLKDIu*$tz=rn3WV82x12vWJ%%8SeK;K!- zoumm-=QC(*f36+xa)wqlUB7$y$gvkb{itTTX!qZ)C7Ah=Bpa|16@ zq}!e920Z{op5QAVDLH&7#bRqr(W$UO+2zq(iaBG|eA z{wTN4&Pr@M{dogPtEoHO-6L0PuQiyNsb5X|Gqi*D)15IiXXFNu4~heC)7+=8CDAH8 z&~n`ys^J`5%W()s-U~v8(REBs7Q_ACy2sLC+?B=Fu$k>(|7VK|%~4SFMU|F|Di(Lp z$+ggn6HPjRWdrGHIB*{>SFTW(#v&$3%cYkZS_`F{^tdk9A&^?+E56NzIWR-~RH&-U zlx|SgijC>yLRDq8gl~Ic!D~Srr19mpuKjD!1r76{d{(tQ&5M|%&3Bk>oE*5=CBnuzmmw<%Y-#+J>`@X@1uJo)q+w>QT2t!-gwmI_&x&X(?U@M;&mmt{=Yvsp_S3qq_|S@w1ps;n-g zDoFrgpTj_`1Dgn&5c$u^?*s+`8h+reNTPJgj>Y2_DW|6jAtrT7AsVVz8Vsw($Hcki zl3Hmw&VJc!E9Qh;>--y+3lYMYDuJCZ<(LKeQE9mQ=IoPA*{Cp7i)o9;;nHB8##N9# zQ@$_?Vgv(b9UF4oCh^n6uMeaiST4CA8%F?M53?39l!tU3V+yIK>df@mC@~^UfJQ*` zZM`7?2e;8FwQ5E4$r%ql$P*rtxG$0naU})#8Rw?~Lh(jYyl!j)dZZ@la!aRITDNA` zp#bU1r|T=9f+A}QV%o)WurA)9R;|ZKAd+!zq|-Qej&?_~Uoi`>jRw(y=cPc7@-2a! zL77Temr@dEpT($xQrIIeB-17xMp3fz^Cm@;K&gqdPuJX#-pxZ{jbvwJH~+TP>tJgI zHHKs@!}8HZ1C!nR8rhmTd7io8840VD&}vvmg5hSenXEo0QPgu+4u*oUJ@4edAC8%P zSK`KYu)-%y1Xo8V>D*w2^e`4E41zmdYGTp{)2sxYcykYIctD9W*FQwFe@TbQrWHS! zx9>!kW<7OVkO8T`RN`O7>{i?k!Tx*J%}w(gOo6iunY4BRFySoQq32UuzHV;|SkAg<`$yw+WwOq0IFUA|Y>w zEu)K~STP<$#edC4iy*zX*{7IC^6&kYD#2cxpYtnB6({|Q6HoubuL#FE=2uu8NHdrJ zgWnOCqq}eW9nm?uoASD2(*|>hgwbh3Rmg?W3g3xBmjuYHsQab{rO*b{@ss-YNVoci)xlpjp`j2es{}ipc!Q~&I;ifbp7ljN<5mwW&qgL9zh+A>_7Sr5v=5xnAq+XcmX`aELQx3zB-9ulBnm8< z$Sw)TU@gm%0iWYd_8Z#A1Egv~9L?mASYNgD&aQ^DcOiJ{S;kc!OA=-;n{Q%K{BbB1 zbI7c5vjz#vCd5)eA@Gn5hK_3Dpl23HA%OuH$Ng#H{?ciogBv5yg<-w~Vxp1Fj_nu3 znjJY8P*Hz&lsijMy<~R8B+XYr)no?Ml!@VEW+znBpPg9Ld(PQWJ>xbz)YDZx#zi#| zKxl2WG<(LNwl=z=yt-0X+J<17{!^eNg}`Nd0zeLyElQEZzR4q6l^dWq{t+ZMR&Ki< z0z70)MoJ%(ct;99?tk#5@WE=^16dob_YcH(QwO!93cG~LHgIg2ialQTU=O9&;+x94##(DSXL^LNci=IWbDE`b#dFZ^;S0e92LbFS$fZ&In?t zroZGAzd!eqQ$4=qs3$BrWZVCjy|;n0>niL0&pv0~=bX%*$s}!)R?az8|EV-L2!+;C zAsa|3BFOcRUSCl0iZ3_qy;9^)QBej75V301q5*>p5N&`Kf0Zg;i=w9sIK`kbbUw0g7!&r$G5N77a>t7_|KCs%K#pr62rzi(xX;>`5(o7^Ow!4Q5ikSg2{8!9=tC)My8OtrIWWlefdq*flQXW;d$l zkUpY0+ce=Pl<-Mcer20{BTx>Nf^d3u$wtLoMS=IDiuo1a9Cc+`;+s_` z9A+zGY!q|Z*6sv~Y6NFOGn@aNqO`I)0V3?Rf;C$eTTqG;w1TmMc(x(%(0&Ch@NBF_ zE!RL`qKRVsru+l4v2D`*bg8+bbU|k#F8l>{>e`QV#Eky31|=5B(h{vG^j4Xh#5N^6waQKEj5J2Dj2#>{Fls= zxz^@I@d}{TpEM|RRU69;SaMHyaB}J?LOr7eu;Nx=KD6$cqv#~76VUi;-J$g&9M*gnpQgv>sB^NKp?N@2v4CG}^C-_iKcG-dqW6puC)-cZ)xjp}ht4#| zDnzIM8bu%)f$u>TK9jhmq$@0y$zp~lp8qLIL1?>lLA0^BeQ{3T5Z7#nEnf1M6qJyk z)V^6RjCUvWs@i=^s&D`C{rhLTkN4B2EaxbqY##wBAR){f7fWX&CZ%sLLAUh0p zr-33d{w$kb`QJqsO;58!>!O)XXXb(FvDw+nniJ!zXJ%%;m4CDPwcOMU>R<(XkI3g$j3GsY6HwIJ^KdRNff6nv?w->(9BeJ{s`SM4GFgY=?FTOR z_BOfIdf4#0b3h!?flLW6cgvRtCxT*?OJ;(Cm8k_JU}%B$Sk6T`@nkF-e*{|63(EQP zXxrrrsY_XuX%_E@Z|?5z-UA?3l3LrHT4S5h7`|DC+8hoGPGJwp*+^Kaag-+sh`n%S zCS`Af3CZ8Qnz^&IV)jI^o_A$aN?{I^!|VsR*`>66PZ9dq0k5h9UhC`Nve000>>x!4 zj0d(dE#KbVgB9aqPxEIiB* z2~!7pVq;i*L9;Q-Etuk*0tT~Jb8cDr!kP4fwuiaL9!+W+*uYQh_A22hnbUSP3uoVE zqshE^8^>^&t|<~zzECm*jyUrRqzStNpd(c{ys)h3n2#@zVw_$e;V`_wHnFmYlKm^6 zsY1V6?Dajpm&qOuFSA#ye7OXVWMPB1ulE9}QQ-x2>7_4lmYH8T(|du_&%z5NN|rB_ z;Bi9A67aV5xXiZ1Wk{Cne_4y0V^*p!dr|G^z0A2Rp=akTdJXs52=r`p?6Op24Z4Bd zy#{ur2E0o_d~exCv=+4p^0$a!mA@xiS>ahrJg=hgI0=~NS%EYb_)m2eh7Qa1ul&yQ zu{-&tw3?Uiu56rvNTAcJ!-a9lSC%(Q$@#h(m7~0=r)upch)v=@7UX(5(?*Pm*p7Uz z95k)$?i-+eJm1d<7ZQH8ke-62p-?)EM5;KEBFA?}>znbrV(SrYk+r9|7?M)Prh`ei zQ@U(>%HNs<839Z1tY{I8m762^e&z}u__ozb#qNzn-+ z0>;@^h%8W4c|%((2;tpGVoR80kY>r2ImzD7_f0BUEn~9u!HQ(1=ZDvW;h*L^X%vte z)1+7n$mmzHn{hEK3K{T>ofgT(V!Sg3?0QK<42!5c!6a(lIjKl{lvaglA;4RbRa^)s z#V(nSqq;I3M|1@cwp@5RI4jcE)B0VMuUzniB5!;t=VMA}uDLzdQWQp&@NUKEO+xvTYo&%EV8wYY5Y60v^s|bHk|- z*wYnIj}#rRh269zjy?|^(X}RUo`XJ309eY)4P{9V1aP$>Sc$FzR5J)HZ(UPWA?b3t$jyIvLbhlkx)A z080v%J~B^WZV&x~n-+c&SblrF*P79@$18Wf1b?voBcwJR_}ETu$=0rE8Z|439~XE# zcHGzLe({feyauwm03m)7rx7)84?Aw)MhsGQ4Z^quwC4peqkYe{r0Etl|7Us|jKeC^9>e`c^PF65FdAc#vrB^p(j zPACh2_lGS);1VT&3X))j(!*Zr=s8dt5 zWo=nFoDA4GX^MgFyiir@98)h4ocK8e$Bgrvd8Y4r>KDN*OI`Fhg>a+s!&I@<^)}AF zCXlL%F2_Y>^mt4p&n^py`n{`>ybeBH*Ih-FL^OPz z^$wQbHSJYi6D?>j=p9;=*Qv@lLn38V&yF$=Q%o~s^!l-;67+kFyhvPQ0P@={GX%}p zAZ3MF8=x*4w3m2YkbPFB*o=oVWnM|!XBdD*N_6Xu71S1%Y8c&m*l6K9ZoQ@21=fid z4_nz&g9kve8^o;_UkFUt$|3fXW~$Dn)bs*dQ^O04nqj1>Ab& z3nh5lM8`eJYVbI7I$`f@dKtG~|I0%6t^F@UxttBwqp9tQrf}=UPI_&K!pSleIyjq} zQ6|C1$J81Ofj=oHM~yrL6UDwcOH(J2?^`IyHA^9*H&BsINn&99_&d;7&XRc_$x zYYA%n5AMP4gKrM*fk3;jxCeyrO~*Y1^EWT|z*svU_ZU?u4t0&7S1`;`UGfjuwY5_R zmt(&uB1TwegG_?x$Fw;SLT)^-KnlrWL>gSfZA3P!b}6H#j49+MeNa&~EWrGOeKy;u zLT=&aNPY+fN`{3)ZatL^`wSmP3c1;@%7$|(k2oz`L7@v;T&HO+rOip*RT!<2m37Cp z+fd$qYBt6|(rgI15ld6Z&HL4(gOSl_kjfZ%ew~>?VpzS;Xn93W9rxD1z5gR$opyO$N%m z3OfSC$mjL2NwcaySH1jTAt&CWi&Fzp2rT+>8gWHdb} zeyOe2;FAe-Xj$W7avjgbn@&Z-XoI685)_Z_yZO| z03{9&$qWX!9NEFl-)|2ib%L=s*~86@viqcwAA*?^lVx(C12U#r*l;Boix<#TM~+=P z3uSCSQe|KZWu~WA+mKeJQ4C^sxaaSI=g?)_RnW^W@t|_wY(;tH=N*Qmh&+$GSPs7je49IwZimqP}E=987wymc_`6SqD z7RIa)W)FmK35e|s-!6O=m+RIG8rsXs>9n(ft&UWf5;>O|k%Su~jSy?&uxf#^l!7biSFJ(!6jG64sS;ZHFsJQ#r3!BvvNB~|R9K}dgq;4Khv!_S5cl!+iQ zCwkBW`!XW$f{4U$P-VEdsshKwRg%1zqhPtucuP1hYRXmF!-hJ)AT(5#MLFYSTWg8%P_(qzHkCvVfM~rf^0rQ( zVu5OBIrf2nV4G(AAv$ND%q`9w(CCP*)DHx-ea*~vpf3Unl)r#Jb!t7{5v;bjay;m!( z$VFW?Ip$*>OG*}vR^F;0Z&d1krK`wVF%sOFGUUoeW%6*9$fm$e<%`o7 z0aR8U4SCF1wY?`4p9y2Orc@cOH{)9wV2j9*qKP}WDwubxt_1hC>q^qywv1sDgjR~9 z3t<1*>N-DctnJGn1W0I}J*hIW__#LRxtX#YFw49oeVX#kPInlo=_^|7TO_w|yBgI~hJ!~*6))$aObaH^6 zUMF(eo9IyTMAjzt?xLAVjt~Y7cEA%Szja`TR^LAP} zzutN|tYss$4YLL?lz=wMq9#H8&PUJjZ$8^5OM;WYfKi7MjI!X1*K7;tZc+>e`-c~F zXmI0VUcAP$ZBykckUVOigtHJhO6=;!v5ZNQjl-Qocq@8$7F0lkU-1pl%;pW2u{r=xE27GyS}i+kXVrq3rlfS>ZI{eeRF{~+X1(muVyQLtst1m z1F{2)li?%IsNzKU$Znbq^~Jhz&Y}>q0YMj5(5SMdE6Cmh?u0!RhaHh$0yA6_S=&EM}(lgch0EtJ~!ev$@U>DkvqT3viUn1N@m3tDvnb*=d5 zVn${vw~lA{t+<}Zx;Zn(r<2U)`tGy{L{l5`LOHPl^m)f)dwhL+8r~8k@4Uq06FlDP zJ-Vv*=mhp~cvR0!^`4omJ~I@b;e0Ug0nkEMSGg|bJnmsOK%{(Jqs6>OPT>I@PaA8s z{72iPD(z=DMqPh@EdRu6G1Bmjq0BR``~%=e=-dUp)KF(VmBh4#3?BxuitYEFH<8W` zx_kyNjQZg64;r^BA|ysGf50r{DpoYvo^_SOa6~9MzGF zxOS18zJwx5npnd_&_pXg(oYjJktSM^CP=+EP4InyCO(vCg1I*>s9!DaHF>yoA}eQn zW_@>cnDOIj#%I=-Gdi)pjpZ4qXjOZ;X_=;870s9P(rQol&5Md;g{f%epIjXn4W#cu z@&m;Z+LLnQ8P(y}PmvqlhL;RSrVA$!O7dgEq$VM)-5qKtPVh(J32A~yi|yRejCR*n z=Cpg!)r~oMY*4AzDnDkteUUDf7Wfl;4VM6ZLc5-n(EDafsZpunrk5(UJVS}J_?N_7 zYQ?e06zp%7k5X(g)1GdXJdJfr<@#$B;_}aVX8!4NqP5Gzvp=e=+rz?2>f2nyMn2Pp zw9G$~-U_F_>Zi}9pVZ;9?Z6C|{hd|OWQ^RDB1r`>(W|P-!5M<;2&3$({zt%QJlKYQc;z|mlL=^T%0Cm{1;LFQ0xxWbo~OdFPP!W^BB<;|Xj7XGVVo?;D;n2?HmAxq zN7d5PLQB_x<t&dsUqoN(-p+r&j~|+A!0>V;p4yQ89mi{TJym9WqxPHV>hH4^Z9Sb-> zrB+lb6jEdEA;!9hQI%n|aP>h1$mP;*V|}QYktC45>m~Q`;u*?Cq^hJtS_qm0tK(2t zZ-0#Vy2iroX4qB7vaUhG#o0gj4O>E}>rxjp6!&3iTq9AL^(!!cl!n23pidg&aL(aW zo3afqTW>cZq_DB}?WiC8{NV626_!|7?%r9k!;JXHP3hudvSa=n>)XXRMMo6QpmA|Z z;l;Z-E|+-Vpemszs9bz(Q!sXcNM*Vk`CqJ-fhG^A;98C$C!a5GDMNY#8(l}=+LID2V5mETAI9KBJF zfYP0b?IxLpP9Xjw<`I}}IgXISa2;B80FMZ)5!;D6tEWe!B8IC20zAemCqacg2AznQ zRV+jqEUu(HBi!)+2;)PVwpVTKPPJ!lMvE3toaV$Xh*O0v#qhKaGtq>q?5i%`5PQ_% zkSHDKfV|Ki5xb=Lk9ci+m3fIaGH!s;x@HaL@i6-gsZ<&t0*(@o;hFAgycg-`gwbYu zH5fH^^?1Oj_KLDho=rI!h@TSXOTp^{r4Psecoh{w0VF5`dJ+vE#T{22#a)ew31PzJ zM(QFeD_WV5A^|0WDhbIH^x!~G5K=I46Cq7iu_v1s z3^KR!98bymnGhLC9%+k1cb3}7K_q2MNJs;dEQ*uVMg;9~{U}BX8>} z&IUvwOZBuyA_{c;4`j;xgfY9?7LSv#9(s&4M3#IQY3YC@GrhHA`PH#WiMHHrjYxF$v`3FMIM<9P*$iUF~L4q`k;i%0C|;4 zSR>1G86jgr$biexO+p2{B8)w3Q6JUVmkbwKA957LNMJO8n;4B=hSBJ?b1<4d%FrSb zWiax{OyVz-2<{Qo5cZ{LwToG4XABNmnI)?1R@3CLWmx>M$ptERbrumO4~ErjA7Zo{ zPRT|{cEgdGdMhYL@c=oQL~NB2^$wu{lzIT8wRXhqW-v&{2eqSNYe(o;Sd#c2O6^Fm zuoS@9;60>yaTQKrv?HpxT@?=XDUM^>q3vv}JskCj-=CHa`*|t|aR1a_mn%85dQ|Eu zzLhB{q{BUlUzEIpNdQnrwCwRB4QXepQ5vm0QuO_)9UyDSe8%U2(s51HPx$y)G%5-| z)tDQ;(O;YIXlte-+e7TR)RAM-r)3$Dh|lI2Lj_*5pksv7CYR~^2Kp8be2`A;{b{}4 zZ{i48G{cWkzX2*oHHZcm@RTsfw}eiZ3@vMJ5~Uoo-FE;99x5<}8X-U@<%;smOQeYa z?ms|#@M#fF%I@$Y)FCZ=)3$#t$;b1x0%5?gk2Mo~_ZgVdw87_SA3oE3CR|b@9J^J* z)xjExl@PdLc6F4CxIhn+QcbETM1!w+F171g#~WvyDB3fu`FJthTQCyZN z<0C_eGSN2j651AT0CR!1se~Sn&IvlXsVMZiq5iXlb?2M98K>(wWU-;1s4AmApvtOe zY0R?eVKuHU!UBSLyNt4vnVwMx(Q7ph9f^2VJj+oOoBB%aGAgJQ%_3n}YP^+TB{G)O z+M>4!=DB$@CbX2`u~PM;ghWt+Yk@Jz(OorMtQlAZi~$KK-8H^TI83^0jalNpmZpo+1q((qiuB+| zQ4gGOi0;EUB3jsw9l8l~1~(2|X8Acoxz(Jtsv};NsAI}Xr)I200pIp2jauuXDUBJ6-QLE%6zrT`KmPBY~$AM zo3EfN4l!#Jl2A!kbk<$hx+0MCM$i>4H#M&U73DbTiY37G>N!JC3_x+hx#^+Hu1Y2} z>4|QL)JKh0-7iDpqoC*w%nA!B=qq7w zv9*>s$lgMdLZ-pRIf-;oSxr1(709b8YHYEi>T;4Q=^j{4K}kFRa?(IXG>}j~HVuPC z5#SC=T0h)rK|$mmcUn=>DdURL%d99VY3Eo`K}qwpFhpG^VYtK`aA}4u%GNe@iv)9g zOCyskSgAR6Hg>+|vdQlY1-CgqA{1(sBPS;wgxgBTH*#r)Gd0AS;Gcvj8u)a$1-fr! znX6(<#*I&{rb$=eopcAqn*0-&PCl(#jaSQ_TD-KB9A~kVZ$kmoO#Ci*<%^M<_~OHI zwib=8oSCE35#^DG8tj;tZR^gSvTTW9v`lwqMw4GQ*dK6E3nfIdxEo5q=byom7#h8~ zTD>;r&Q{WSdEG}_!o*aL1m*kTI>JS@VK3&wic(m7R+#)4c0lyY_sSNlevad1f-Lv| zpTsbvD$QkJwox3dDHWDIMGX{b39_ezn{Trk>3r4FQ3L8tJbHPI>~* z^o!82h7jF(lSLPw-RP=%BZL2r6vKLz2V~~46vI2{TykrUOZ{^8c)ZoY@h|5mE(O<{ z3@<2xF857{RzSr-0#Zhbx%$nl(LPXQ_>B~!>Xf-J9~@iZ!STTd+bcXcS$R-;e5|ookWlr4#)r`T}u3@t_2MUk>W4`0eTl*r8kxi+o>W}czV^diyBJvkYi6DsPv%{I! zuuAcY+q_DzY6;q$NMpS5aX#ZluwcjwW0nr#>D06nwDY6(`o?ZX|`a? z5@BxQ9fKm6TG-TBIA;>vpxaxOM){Oi)misz!dXT`j~6+H%x1H8R6;Q~j4z$3gApIB zcb1qI8;noPp5@%;f^%1Fm9e=js<$d&AZQG83Jp%aEei)}gre#7&0*0qIh?YKbP4fG zGUS8}_<>Fu=VpEu|r_q3efO-d6 zh~Wb9=;P%WK&Rlp#uL^f=+L6|rV2B9xzeQ=?=ZMP*U*^i1iUm8v;$dgeZ0b3yp2Dj zOL&PsM6B7fAp{i2?AfFu6oA{t%%n@!Jx*U1;S=@F6WDex$7rB3#-Z8J%wN(T(NqjU zgUD!f8re`epmJ$S2jX}GMxygk0TmLG4HKzLHD)xU@L$gb&;%OuNF==EmIZvb5FS)J zpykHUR2*^-GP2LhIS*tRJ!UpYKsPHY5a&pkkfF&>UQ!IwbgV$Uk}&wXVI zLCSQCSwocE^=T9vYc~~*8&^$?H~2R+Jkl6tf1nvnL+V65{~AnX6UU@wOf?$FBeF3i zz@wu)BGW2FyO;&5a28mf_zya;MQr3mZsP;IuqkU-+aSHhZCn}JfZ>=sc|!_2O~hkZ zX5#u<;$nQ-YXHJaML)VehSXRnW48ACk6cyMw#~p=eJX?CeV&RSm zP2#HXm89_k!LG_(Qqt5g#ckTg57k>fi`R8icCg+Z>$NY6*;resOO>OA19haPI1T&_ zTP~<{GMIv9>iM=gV-)EPW{A=_CO1lqdZsAA?C;0p@V#s8MX=2(maUpYYP$F(TF?$o6yU z!p%k90vbq0M3XH35f2&y*aDWo$IRL_n1^V=q zjwHk!8U&i|53*T5Vu2SA>%~i$NhwNzu>eAj3i|;eM|67!urRAWRP6|D2YSA3^!y0f z0sE*mTa!WJ)5uP~IAObl1Sssd;0mzJ>!(wM)kSH$4AcB* zg61^PY-vrv-YrQr2X0B_k?%s6V|uOfO4>qNS5U*}>c)1d{yV77C_A|1Mc(wv^Wjjb zV@OC*#dfZ7BJ;1(`ljqSNB??mLYD>}jGh(5^m<0%&hb{GIAdVX0;~k~6^*&E?u7le zACVJOTr6k^`Z>1zYpl_#Xzfs)bzkBH;O#NNs)v2e8W!A!JnPl4J1@P?&J8z%Cu?(rX*AuK_XQ~KLy|x^;3z! z6Z%#n@RYvI(>|(<0beWu-%TIq1AJbhTzk_%;NHn90{07W5O_dW38eV_Fjs+k2X7wK z=K$k^Zf}<*SnM}63yd{P1h~g~yO?%1) z+am%sar>mD6C7!m$dvVTf<-EWa8ZkOH`Va9{O|ILTBKq`7|0$dQl{D<{L&W$zC(zn z5Z z5go?!PH{3>O*LuZr-NwF+F|lvCX`d_5xc;(1XcTyyWy}<3Uj8y4hxm+#aeU$~ z=4n%QagV@~#jFQI!6)(eUAO*p99Sv$D zt{T2>YyK}Sb^bk7R3#dYsJemq5P7oKlvz7{c&735&l~!c?74WAcU+n(dXTRR)MuqUJgnsJMCI(&uVI~+&s9<=-J(yiVV)F|HjnVtE@8pyr5b0Y+N$GWYZ zo46jB+1mN-zCotamIeyu@mH@;vJ3q+BkIruG{1De;T(9POkcSJdhgw@Z zkK}q6-?g*;4EKMS>nW~}<$9Fsk8nN2m5k?Nkt-?A#X+t=#`P?~{y5hYT%X4EFxQ{p zx*y;l$Mp>DUd#0q*GF(&uCma1J~Oa+<)b|pWmOs^)7z@Uashookwv! zOn;B(x^2zY&UbU&!}p)$x{K@6^*h&};(CU5ewynJzWIDqnO_N>r3kUv-i{&>hG>UKYP^X{W$R6m0kAM`ri5*>u;!kGW$gK=Imdxf6mTi|1

  • >sncvae@f%eK}3Av>LYHT(PQ&iWno*VkWHe=QER+w0%*$Mrv|@2MyRp zsQ$wGuKMrSf3N=1`fc^!t^ZE_x9dCV@63Ly{-b}AeIZ-SR@JillU|>_E_-eE_N?(6 z^;_$2$nMO3f*i|Y}fZ8_T)P-&<~M^~G&6XbxkzACRi`qv`I-cs|Ny4G?T z?43~#rPLuLdu=1m%`G_d;qn~FgxK)kxpiCfJ0{f7p&1%dJ?D+J56^@LVc*4fD*tK7 zJCWD&7GDSaf;W;_Il@os6@UqwbM2%}y{?Gul5{9CEu=Rd2s!Siml*lO2BV@GxTx4V_4dlL*jJ^tXdLuB&Qi1(< z6V_XQ$1p@?XZxt^u1ICfV4yNl4)e}mEuJu+fyz!zLpy3k%jV=XjtV^s6*BTbXH2K) zY*^LD`lpcBJcWM^kQX>>`0rRfy;bA6GFh^^7qqY-wK!tdQn|YCqAF!8&Dm+Tero}F z{Riv-a7k$kv;AQ5n!!BTqcoKJ9nofe6vUX<<-hOtcdLDxGIIFLI@9PE*9a}g>0Icz)M(Up=}qj;#)Z_`EV)xPQ(TBk8p*pH4cfQxG%~|tXmzh z&=ULK?gb&fjc)>E9G(fliXZoIaA-#TiqJ7CrnH2P0KJEqzm|#JlpUK2v$wJKnW-(U zCva~bn#A9u8O6aU&aba?V##5-TL_HY*&ccr)cEN3izs)pxISQAE0E_|t@+ND4#DGG z_vR^gkEE*N;>+h+d?I6U2=-PO1vqB3{`|csW&7C9%Ot%jOc{sVjwwLqdfU`FmJDLN zMy3q(@L)M-9R=`~DmF~UBK~CHVB&y1c|e>3#iH_pnqqbTwE&&>U9G68Zr)o_Q(95G z0Yi`<6SF3n*X5qPE2DE4Btkp`qL>ZWB* zTJ|((L8BEGgoo8gyTyv#`{9Z&Z`I+Yo3fp30un=}9?I&@EetSmPd=@cfaE|J(=Q5` z9~4^9z~~7n8rqgJG@SoID>}F=HaG=O@>;WYMprn!CAHLLwAP-mmV5}pI-n9^O*rUP zaPkorspQ$+!l!kkjn~$saTK5dWvdUbPAurqc*HOj$b{iY09Go*xU|d!ucl}&V%X^C zcVnI|`Y_M$UZW7p-kFGfX(Itc68PiGVE;6O4cI?9ZS14)##j2We;5@m$W2ZQTHhre z0TT#qqqQNM2JtT)h-99)oG~2ydXe;23o~fW`bBp*C%OSKoK24fp{*DcvwdcLu~#JX zQa^)cSwbR>UAn(w@jK%zo8gC9w&E{(g2|VPf0$)a)0WmN<1ByWoU{40d!)m!5A0Nn}U^-Bn#dS~)gtfLO|-&4;*hg5V4*2O^6s@HPP1(t1V2={aR) zGDu6DCgB;;vs8Fi6;UazRg8g`B<#qje+B&bipHRV`d3jw{i3P@lW z#RKUtu3>E?op-_OZc%dOv}MpDe<$-7FJr=^3I_B zNS$j>ImgxOLo43vBY#ChK(NxSNNSy1uOTxbOUP%s=D-!n zT9IENoX@H))?#D;!R(JfMt}h=0xAA8Q~+eR0hmG?V#i!&=W4{9QtG)s=dA(Kk?UCO z{D2Qw7}F07iW&At9ddekyZ&e;cTQaG{Rgn&a{qxIZscDYi78NM=UU74{PQE7u@LtP zY;JhKHX&1Ey>SI};&50|LDxqVvWZNJycJ}GBdVSU1i|EACV42FBmiAla9X=5)?V)y zf|VsbN}^bD^@XiF^H*AVv9Tp3Oss>Mkg-LI7&8*gmclB)hWfsBB!+Pp`8(k)_3XSf zfyo5obr~dJ5A1T*TE3rW$Bx~hi-u*$8{+XA2 ziA|__^9$k-(PI?`q~)mQ8JMBO8)O5f`8gg9{25MN93m)_|M_H`Zquh+;|Rtx_c{-A&%0NmLgjy%vrGtVrbcounN^6HOy;ktuypjlTS5T;2dom3M8SSW(*BST7Ab+rO65w7eeO~q zXGk0ayxw{yd|Otbh;WY}xrI}nbp;k#B-#rJ6zub+XN`A{HqU!DWQ@pWCwxpY2YbHA zz=jO5^hiSmQx;+;!PJ)44Lqal@`oy6eJX!-z4>{q#eR(2ts#-w!#L74Vtg9{$=HM} zrz?n!}OHJT08hK7bRvWZLF!@2Jj?qNURez$QiVOqt!d_5P6d7ixonBRR* zF^{~7h*zG2>GcGwNIO*gWs%v@juxe_Y`_7vfs6}5xWi)A8R0DKQsmmpBUY&AFW(xD z7FRw*61=2PC|w`g(I>?#!Y5=B{nWHUtDF+V&%K%wWENBMN6StL6h!~`=VJJydpad@ zISwuc3A+E|lM@7JREi`|$yy+y!R|UfRhj$P_1!wkkZ@e(?V^NZn8L&G~C1%7sY(gN!Fn-My3d_L8n(gk{H&J@;|Py zL#R2j!>WdBze-?)(y*(H@VpwX(NJBmCxB`RtBg=ah=DCaDc)!(QE~s+^fDirCrnh# zGpsDJVJ*4NbCTAJFO^j$6kF?sB};>S&OK3eL2pIZJ#Max6E?|TJ4b{~@my3V{-Tm0 ze{Tm~N9D#^pIu4*`bH^#KkK&LFf!B<&u;%X)Gv+%F?Tje8=%_X_t|CgA?H%QEcyUxvQ+o?`x<%Ftk%Mj6^OTeVq=IqDgA3$F!t3tobSx)7#d=KE7o^2~XNM0@_D%BP%BSauA4+C$D_El0r-z{sC0X!$N1tqQ^b1O} zkyRPd6dd0vxM5+)6xqd+{6fjWyRmkc%Bb6ua++AZBOvy`VJ6u#q+OTxtJyV#BH;dl zXXq#x4|ByFmA;BlfNe|bW877|0~8jA@Gcie_=8jReYNY?H$Qs5$&zge#z2DbB}*+T zV0)!Dg`@~DfC3iKLX5?SL@-YLc!C+@Kpq-drgES1ixnE7mCvXM(eo_Ixhc@2%(yEtXTOpM*gKJiC%5YakBJ_~8RXc_ zMeT8F*E|^0fDykTj9A_sBWe*n9W(;yQ*kDSx(n$48jt+OX0FW!*U3=U!9TS%sWZ$hq5oMeKkFE0w$BWUM3yKu-l=4vy z3YM}drO9wCkz7p$LLE2D7b&HP`zsi6!Sq4#t?XA|T8Z$*{2Rsf6i1XTp!gku(jZXq z?x3yz)p&B7Yk;y9IO?*pMIo69x*M6Ro&yjToVhC)CwQ{gxP`yE+d_K~94O$az3W0` zL%o}4h*5EZ4DG--@;CWSSe4$?bW*YmcM>%mc=zOV{wJ-{(<=idI=0XZUmscnly+(7N)RtxbN1G1+meBUBhY zD@M!I`}J!b1&$8c(bbOz`Fu|J2$VCM+O@Jr+jpJPa6@sh3iJ_)>GA%O#jrj`?V zHhINr9QP_k-aJVu=k+#Nuv`JpOw#VSh>fW_B#4l95i&y=A;zeRPy#Pc10~*ueXJM- zd4j9;EP5voO{%O+~ z6zkM8m1QJ5jupQty9;V#6@Y2Yx@KFmeoC{hQJuboaz}@meH)|a3ChS-3H>2{Mcn(u z;iMx<8KM-sbw>Rz@xga-tGGI>SOjWv!X}nkU~iY_e+n3mRmd@Bm5O~yg6vXOplD-L z<^Vk89Bgo*B+3~@IrF!epONHAkUqxf?GCqSs`VZ{y*mFm7+D%Zht*qmm!jJ!>uKBDibeN2oPjH zm>`voi?b`}4kc((?Um{g33n-YykhV^m8NOrd{FwN77m-DE1)uzFz*DV^uHdI7B3+# zm@-wAJ|ZV8Nf@yBl<}hs|LA9%(Q;65iATocebNlleew)M(~{lF7)ol${5rAjQY|PM zb_h6s7YeI2hYBlRZ!o>~B1dLrSv#->kvOt4`Gr?$0Ll1}vMY}4DREdVnY~(b@IndV zU{hdL<@+lfdB?Iwh?lP>G%VI+gkZ~^pCj8}Dz!IpWCwG_ku??K$ht--BgE2nupA60 z5F;s>6ho?kV#=6o4$WdXEoX_K&6ar!^#bI|5`hS0iNLyQutm+041P9)FnSa{s-&0= z$fr!#358A;GQv|56!&3RUHX94HS)JR_3w(t$>}7@LL70#p%ax^#myjI8PF%~u8KZs zyjA2VNy*}>iX31z9Kc1NP5KlX7(SF}bTp+f#0Vl4%LqVG!6bzbNlIjvuI7Oz1lbE8 z+Sy#@u$j3M)Yo~RWLBfdzr*}ua2E!vzzgz|cb8ubdL&9gQ6e@mlj#9Z1m8)0@mdY$ zT3V}Gh(TSBYc(iW-rr(Rg%^dUS|X;Gz^AN8A>I-QUKBPsgG$r6tn%Q<2svO;yx3D5 zxte5=){n_hG%E`Tq-FU5Bh98fC)ua{FF1^iH2JXZ6_Yg?1l==f$4gPdis`Yh`sO}i zno@+~KvGI$pCAp+w>}X9VR)=?YKS9$=dz-NAcIFK=gi$2V0h>X{rh^Q=~0X7_iinBuY)NG?jVZdzjmW zS$`>~ckkMMZ|5f?tn#jzReJd;doeY$;-qUg%-bf5he?NKb*^Py9Hlh>M=`~{K}FE- zT^kM8z$it;WBI!QKMYOQlbtsj-Wc}IIOuw-^qxQ_X>R+G54{8+KT-t}x&*)GWC8Inwp6tGjTU9T zIG{~A4=cKlFd@gH3CSFl_z4)x-=hs8nvyVK%+V2sB$D@#U@R-M3(HT#Mcs$Mu!Hp= zM}fi8VDYAiN;c@vmC=ST7e;Y$+~(sY5+IJ+k1Y>7_`_D7uugtCc#lQr(MQPyDkV?8 z&AD^=D6J+(1yo`OlsSbBSgrL$R9drhE}2BfUSenMqOM|v4ShxWOh7a4v;4gkRr4IF1`_H)$4HH#pjE3T8vUlcLR01(MY#{d zb6Z+(t2=k&R82*T3TBZ^qjbs7e~pyNW=yFhXJNPnhcsU&C7jn;Q}(bqUyum^7s;}^ zRhE^W-avG~E*O!!_&J_dxy$=85xHn zVh71$vbCKTrootO9emu%1bAPChv|q67E0;CT3C0Vd9Xllg_sp@LvpgL zv@<$5=9P!(X98~+&JS8mS=6>OO?C`AjKo~>lZgm6)=rek{8AlJCXTq_XH0pM_YP7Ig!Zj@%&wYwkc!#ZD4F0Ypnp=6l4sr00NdAIdk$Vw`K6WVG%(w9 z(W$KD9;Mp(tc|kJ192*g%WgCOz_Roj%+#<{&OxuiKs^sN%2wAJX2dveF*NBj(jL)N z#|*`0?jzcR@yNWVQ|THa7m~e{(;S&8vr?fy2?dREr5zZ7*(zt2twq|A(4sOce0PmV zB>42l2}x7s(Hv{r+U`W+2zC<%YGSzBNfIfFM1jC%HrD=RMx_H3?E;}uS1(^r`J8g54~7li?6LyH59)l`gJaSk8VR4`@W!8+OE)RpqD_e zO3y}@L->C1ey3NUFumf$fnLePrO*E}yz-vVD)9<%CGKR}<8O~cEevodc(D!4KR1UW zo+FQ|8Kt(;!4$p8kJ2_2N0-RY2Egvmy1M;RO_zinXdracGHRo6KRaNoeyd&CGLg(A$ z;|w~$+&Y9-_oL@u|{EbuP>B{uU#tY$ z;blSNaf9|@K(j4(q2BqVbfS1!>6CFTV@X1!f;`!x!EIOHEK0adu1rhl;PwM;IIMe{ z3d2@vYooF`lEJgerKJ(VWuOzos)Bs+o&3+SQYS-5)Yf5nAKw>~1TiVF`WMUM;m;!; zK6Jh*^42qTNB1%otI-h%xEHd3LrE(wojM@LIz_)TqB01M2FO3MEO4~zNjd~@y__5P z-^wO~^I`A>1EHfGlt-}CVoMrQtP(YenKIxNe?I6c$*f=y%#?X*vS`%C1<>C z1saskrIPd@7Yj$<;8qD!o|4JTo0i~L6WZfDWhau`V~ajZDrpr z=z*4vAPd74(L;#NjO^h5&FC?gjuIkKPak^nPlD5>wDZA;oc9U67RIs$$#z+h(@2{M zxgC;3@)qt>wiXm)i!|kS=y$PbzT16Pkw6?u#f)LYM3*cbDvZ5>;~fn0W-qqDg{;>h zKtCl%_i%pKXkhQVR(B^=WzTNP$>Ng~W5hG*G0?FJ_6Q&epO}|qQ#DvmvYN_m{==MO zK+hOW5~J9L$uns-VQsMVOv1B}31Y0GY}hvq9jh1}j(IM>%|Cc1VX2T}P7B^tetxHp z_EAbHUOzHl4b0%o}LEH(Je_9JhNGDcXFXvi4yO6QrBx& z+W7>UE7o-BgJhebH0MvXgio=Ud4a*)!+DYA_L=*2UNp*k zIWGaoZ`iz`E+Xw^oNvJKhpzQEI#=EWFtGqpvVJWTsv<#qk8~H6$N*P#cVI}hvsE1qc$9<;dZFez{*>B5`u(T@q zOzY*icU(X$aU?&gx@4^fG+(QEb3*vq;=mo$jb9PmD;l-@xMkU3r1@Z!WeevUt;83w zZg94j^sIHksvUd6fBku^1}SIBh}ssh|K$8n1|e2Q_M)U!t#XONexdFxk-? z>PPFvRa=YguJ#|Kqa`GL$`H+eZ&YWl4j0X5p~jx3#A-40>}Q(PNZe=l{z6u7Ebw=T z!TeJG&mkub_nr+4%IlwA9Vd!$`nO+t%QbNJ<{r-;1R8$)P8@&D+x?r$+W%7iPtdKr zi@uEysJ8W?S?#+ry9+_X4chS1{A0gx%rE?-Uf>w-_`+gsE7MNx9SIR$_(8w$M}7et z0@NOU8~Yushpvb>w3Uz%j(}tMJ})!>+;Wt(rS%id_w*WHc`CZY8*3^n*&g1M4dpwT z(~zdHrS;>@^HSmY)|}O*idRE46kI|<2f9nvgS{F%Tt$`DFEcBww$cs3tbab4o~1G^ z*am$`WBC`1i@~B%?~beL9lR3*?<=C11E_8K$3Wo{zr^{RHDO>fi=1Z#(c(q%xJoYK zEIOsF4d;I}!M$iXz-`8R1NGYhU#g!b%`ql8S1qo0FM|c6a0N`fD`Mj9ytgt6q!ka? zS%civE}3O#Hu?aDTkn)QD%MnvFwE`>pz*kEoVVRu(C#y$TUrm+dPm^9gA}Q5&e0o&vpkDJV1tFGR2bL5fS&+gt+tM=?@_V%k_F0_P!8>+_K^>gelQd+IzvxIgJyq+@dyT;DsZO zYNk7B&N>}5TNVRvPBG7HPKKXX}&`A61P1j zhtXInD$J7F()xpXbC(_pULn->IZ!1R{LXX50G1JdgSl*8@QJf%p0$bmw0w`zU!d>e zH1gq?^Mn@o-d`;)k3HlTKmiS)f zt1?oOuNoxC*;M+f`Cj!^+p*vCB~!0$IHVxp-%7iUStGr2@ryIEy%fN0o;r}tJvGTdCN0f+7}nZm>pok4>YnV!?PRvhpR9t%0oS2 z$&TvQm;K^A3FMimu~2z=A_*3r2simVukiaRF#4hY%M>k*r;1Jre4^VO96e(PNz4Y*ER;19JwnPf!Qa6IZ z9$b&GfMHk!P2QvZH%wgq<%%E0I9}p^Fp-#BvctqAQ1WW@ zpWt&XL;|KR(lWkm-dWjv<8-pMm2|STJ&`itkGwk~MY6}0Mx*Z}>v20Br6KHBW~{kn z#$fdK=Zr~<_{zWc83RaW>?{9XX3Xvr_+p9{5;8z-20HJd$9?{NlTE3)JrQ+s^|`q4 z#@a9&z_Hnf*w!tk;NlGr<~UiU0e%OuL3Q>J_ySQ~#YcVqb0y#r^jon2r3eRh4euA} z6g1;4-rSU%Ua*^)((Lw2`OfdsHT3(uu85Mk)#SeF-G}lF>&vWOET3oWBjfi4v8b!q z?H=ztcb5BJ+x+g_+J~W$$NO5Rdf#%D-z9ui%1taer46Wx{BKKsDj6skn3{PjH>cN; zz4Os#9exIu;Zht7_Dh`( zhOvR)>v&D6XdMa^uz7ic8WUT>80V}bcsd=tg*FoeQ^ZOQ!LAVFg`>HRKdj%7uI)1o4<_*_AP*WPW|9qDIQLoN`W=z zwlBql->oZGBwN4V00)qGIq6jWvXoXWOW)L6LC~n#;EIr1yEge&(hN@Gva@VkA3!eWUXXo)p}y(jrzIYRKB^SK0DDx)k$s5?QHM6`+} zf}~loG6D?-*KDsIh^m2jhI;|QBu8WlFjwTW6g{%06Ml@7#_JKp6 zOsB1QfU_FS?>69JJI*TQzKwJ4%fVmpyVTNRHWq&3zLh5K+l$Tb&IVBUGb~35`cbrG z>oLXi!nUvwrVsv*pw89uzqLB&-L93)g*QhMeB<4n2ys1eOL0Rv6+N6Z5%ytm(<;lF zzyeE_$al29zKIIQd}yN5!4zX0J}U8`v>xfm<&=pjx;XqxU>lBsP4EaL%p+++PH4Vf z!UkhW7gWae6i=Ac5ih@HmfJVCE(H_XV(!PAzjk*KF-VaTIi*~JFDx@qQAnV$k`6U2 zky(s;Kxb0P1Dz!@?4z?kNpzOzXeBzc9()cu^9s5!Ix{x+u7k?%Hl4jG(%GHoM=yd- zGcQ8~x>}jeFioK|qc?1&9idE&-o&dHsRt$#DT<`j$OJ?#fuiJdLlD(1XrZaN^O#Ho zmP#b4i72TcsdyV|QWJrXqypmhOW6(<1A*|U87+c)u?TJnGl?xR$s^4dgAiq0?bUn+ zL!ZUYq+K{$oQg~XAIjiKAU5=aTrz(N1QopvF#XD*9K)?Q0&L(T(ytcR*EeH!21Llr zdKMh@uJ?TeEdpI&ta!n#b3+_8CN9uR&(wtA;SlY@qViMPKp3POEm|2y`r zk_+rAZSAFRoN{aB6xv7xPiexo=s<>~T@j2ZP#J=gx8%F}<9E^KP-m8@cL<+7Gsth{ zUo{ULG=~|tUwM7s6nI_GJ5b-PsW5+LDymUPZ58x7tf&aRsgTQgU@G3wHx={ibvlP9 zL$q|SCnIFGnJzuO`Rk!*$6doj2-&y-JWT=JR$dhJjfNajr|$t_b{S7N!!TxF=gH92 z*H%U*QJc8fw9j};>yb?{3$O1LC!mZdYTb{tA6V@B*v?!k5-zw0vh<4seCM7wP-shq zrRmQ-B71|%>de(2k=?CP6y5J&}Saq#We;iES^#AhnQFVoS(9LI&A!g{vSxe z__t-jc-9Iqi&>a;brPo-GiE;mdwba9TCc5Jkq;b2D_}*$T}Vc}rMN_TR6YU#$p^SX z@YA4`akJ=6%KzRgJ?zrGE`X=icDKMA^w(L?YC)ws6vZ{DA(FmPn(4d!b(=lxfY%=@qOZ!uX@gdQ+@^xL{lS3sjwL?e(bE zcrA>p4V_n@CKVo8oNiFgX2~He5o@?7-{%!lMa%PzB9- zAiLLha1f}pUS}E^L`KLMS?8TG?&m!BoW|z*X3Ay%!uPRuJQ{9Wo6v$gjg@q{mfDS) zLT3*v=|{rK`vy1EpP{y&75AZYWXy2u4(ejIm1~G4$+0n7(@Kb}wJ%C0tlWq^rGC3DcxjK?KzL0<6rN1RAWX4Naj6CLxp}YpLotA zgDwOzZD0k#iw?wKla0jct88MD$aUziBtn8!!!7;10V^k%5^#oz&PZc(domQc)h6Lu zptMoxo=IDj!y2{1G^J3a6t?(!Awvg3?$bHNI7q2&+Xh22ciwLsUjiXvnnrxbLL-{ zHW(6>r1GF5_oqw2#QxQVMq4kaH@87v(u&nt*;UN^RV<~UsO~{DY z+U|GbJp9L$tl65cw|}%a$ZwHAeiVu+4)fhSwbp)Wap=S4&8O<-5x&QpKa$i>PIPAO zenhvU0xGJkwI8cm7FTe~xUdoBz5+pkw0UQW!_Z`LSWcsSdX{R#`?p{l-mC4VAH9;*JY zZ9pzjURvZ zJQ}znW2FW@q;K=QsXED0x@`ysek3h6DBW@k;yG)!e=Pr}R{O`=qloPfPJ-rPbZ(iW zgN7o+VRJ@wZSCR*+D($T#rQ*Bqi9UQwF3zLF}Az@&sO`9?M@u?BS8dqUPl*Y=Lp^~ z#i&8Nw6Kcc-6Bh**yi*bh`zK6CdAlp<#=7zN~itE*dqS-(2my#RAhudFM85uHcc_dJsSvl~G-9^p}uf)$@v35R18 z$wo_*c0jF+PSzZUMAaDFwfGV&%0Cs0eDc@O1246&;?MQ#xjp777d-Rpskm=&Ji77x z3*+%nV`)9wG!uIMsLH>Go?ZHvItroZOY8XJB zAvrjJvB`4|U=#}|A?1rb(2~9#Ca1qm&DS~F91&Dz7D^h_a;|L(6N5s>yjL*8E+#iU z^t|G)IUg6LP*d1L@G!qk+Nq1J;p8|czXg?(m6M-kjcX~e^DEdGL0O~3is%b&p%NwH zXL71*mgXoNE?aFjC-qXl?_vhLwZH|Wb_;YV17Qt~*)eqpUEPn(C0lXI`2DLFru#f-wa zZ}O3f1f=}^i)q(>(z0>ZGiDXe9(TcKOhHh=96lJ16=J`7gi|!t_1Ouf_@jtE0?a1Y z$g4RjVnwQ++*GUiVVJpe-A7j&^`oD$9RrV=sKZ>Xekgm=kq;QxA^iwr zJ02f85x<|-ckVAXtBfuAq(x+RHN}ylPg;!aOcNvGHL^<1`j#+xs2M|bcQx5;4Wr?E z%2b_0i3!hdYpJQ2sCu>;na}p}{xuK|(^0(hy)VG%yp@j(1)i+w{w=t*tS> z%Gu1J4|4HZI+nxhZ9cv+j$IQrvty3xNpQ^c&5JJz%|peW_@vzO621ASEZO#^W;fRE zXl;=a#%p`@8iHtW#_c5;i8EWg2Z`%cItCnni`&2Bw_rw)`FCFnGQTrg${9#P6EwpXqwr;=~2C#RI_33-$(`8~o{kQU}~ zaHe}Hr%q1y$*IYeW z3zZGdWUj~txULm_ai6;-8&1IFBpZ&2*~v7m%7&x*5!rAuK7`NL-_Pnhvf&i0uBY@? zWy9Q{Y}h8*@M_71?Y;%OoNHEg`EBPuoL$1_Rv!5N=xL1Xfq>oj-x&^$*nvEZvqeV|Kl>6g=E^j4J4{yUOknCA-y25GC`HazT{r(XV&J_dS_A^~*dRyfpCq z)W(MT9nDgd>??abYJfC6PUUg=7Pf5b9f?FTpqf!SU~R>B287DN)WOEu0!J>PGVs~~ zy{0s9eDqaPSw^VX$~&h}Q3m%zf(e-7Da^i&t=*c_;B=hVG+#=Ixp>_hZy|3F=%ePYt>38*7It z2@ro$H-{!c_RBz0c*>R~SWSYs^!_-Hc=Q}AjWqwEj!#++N3yZ+K^R3i>Qpv6c!U2( z-V)tG=l4=-*e*hCr9ztu@KzfM60x92HL*aNg_=&8WxZ+t0w0Kj zVEr*S4bKhlix1lPdy^Q=L z@bh%DjEOe1oHjn#jdtz_FCC6DAJ&a$%SAlB{i+d6E&L!CZ4D7sRN%Gc$J5GA_v9BR zoJ>IZvgB4OPWhMKz8HsF?*d85Bcz<_h1sihn3k=mTQ9QslAB2}62=dmVUGPTFC!cR z4Ws&i+eP70I95jC#@b6lo(KYrSGkW-hkixse?9--ewq6HdYKBe;icm68(x-!RVMki z)!MwIU1Si!WbuL86IਾffP!H}<&E4xh@w!j^BJKd{Yw&USMora;+T&UcskoOz zxomw-zt!8b{2(X3wLAu*_Zsp}rMS90#2$@wPR@eMFH?j008Mao7IC(?&T!PD2ZSwbeObV-}a#jjhTJ7PzADlylcv1O?2HX==7@7)e@(?5WHKCj)_ z$RY3I03jA>w6kiP?S^Q=+AD_GznKb&MxGP-1(Ub2_N(h!**ak;1r}i>YB8<@f{5p6 z1U^6$uU-eWO?H|njA0$HJxj1Sz_OKuTq%zw2MaEuM1=CVhVVBNa|2H`lG?YhHhAqH zOMu3wi80C+{OXy-LWZXUCuuk!gwCvR7GEL+=s@E{a5Jjhs3Qdm-itC(^m`iTIYwob zAb`{6m_A(WH?-?uou48wT0>Fy)UP*mSe0Hp_%tAjpcV6my&0Ln*P6$hA$M`mSo_hC z)*_O!q4+9|tYFhNaMFqvHrF@zi_!6Oh*4CN!Hv;_*^ACn>cyE;EQMSP26&w$R!>76 z6~CEuEQQ72oha)0r<0=)KjFp@Tlrg}EyYfv8abbSrW#xM8N_lmw(`@}*ve1qW+k@r zQ~j}(pVIBV*vivGbmZyWkOp3B|710`@{@XgFt#G2U5dBK!_qxSp2r5MqcBbSl*dos zE%_0GXy484E=w$)WUj!@cN6|JY=?x_A30x;5b@V=}GnHG1f>3}8tVPhqdzr$^xx-~eo-^gHy1P1*6W7-)M`TqeChn>X!ltR0hP+-ax# z4xy>rwCG7}7>l;gicckTsKiZ8*mz947I33)!FTy`Sx`DhH8V0W2jr6g#OGP%8xFIF zH&xo63xH^f$>9pNP9qf;a0py6B^(n@l7S2zc&j=p;zoqvl~k@@9X-P-_r3IJwGkEv zn(`nN$ZqSLf&nm7Moi3~4hk|zXMM16bjn&xrIrDthQ){L(aMTx)uJh@%Qzs1^XIQh zQ3`(V)SU7KkzC%bYX!=lYjgId7P^)dZzA=ev|lC4>KuEmskG0Ka7Dei(u)t;1tvT} zjpA3nN2R}IWSEsnXZg2(8@(B#5#xb<3|o~~5VyJ_!BoB2tmHDz@v0ZsY53jd699!{ zy_g-`XV0&9N4{6E#ht0<0x}^1@)~S#<|==|DwsRkfBBI7W@cJpU5(`1Je2)wVGkB7 z`{D`DJ~4onuIn~=-Mizjq2Jk?;sTAG+E*=ir|M^aast#MFRxJ^3yYsINYFR+RUkjS zew~4HJ2GXX{RE~J?RPn8#wlJ>X94g-&m1!wd8!+ED%4J92BM@@W+s7XKne??6)G7{93Dk;7!{4)0( z_UH@*QP(e#wpqYNJ@R2OZVgO$P2d%o@)0L@p2uYui^UwxU)HVB!B6)^?!8}Rt2Pr8 zGeg@ruTuPyFMfV+@pDn}b9wZJ@To-cqjB+Xr+D%2o}e06&dya|IpK3~920I=*%{zM zcx@H7L;9U>W={T+5I+b0lke3@X|h|GO6j4Vtk_@A>C6vbf`^d4&YYRA73UA9!8QtK zGX61XFB~hd)9)>wDO;z-guLb<`iPXZx^456Qdrae)hx&(RG6qe#;|Rsst)FnL0RvuJ>ZTE zbOKL6##Jc&e0 z9(~ALwu7^9GueeU!}#JkaJS~ns(9Ox3-y)~=R38`rwOzUpeE2H zDQ`AAp-3ByVmPVan<}V55wqB>b}@PJ48TNr#K&;Nl`aZ)#VTFougxK zEo*d&z+e%bHiW08dZ@@VnG!a=HZM}F(XpnAEoND?XkIq8UJ(0ps+503wp!PDqKnxt zq$7agi|#@lypX}4N@AqDGY_}_k73$-g)NEKHxyk=pkc%Vw01*7@*x`%+O}svdZcD$ zuR0#|x1bIhjxL5nbTP)|JTwSz;8U002xl*b6IgLEjoU&DV?4!Ku8gq);%;CRb&in{ z8sk{*4L;O9lA1QQ{NP5ir#X_wf3ABZyAmVG^wR2V`N>k>v%}}JRoUYnxfeGEz?I`%^T)gnMypZG6}>nn2TaHwT{c>Aoy@RW{dG8`j1va zrycCoX(KnU;@wQrBu>sG!6xU)l0e(%2{;%=^eeUZl~k{v6=b(r0;QJB;SP!GXfFk# zU^~5s^{8{{X{IbR+zCAwg1ZS?g#O7k#cQ!(dw>ZrGmhga&P4}$G4sx3?$JC_Pz!iH z5NZBJ*&X8kf{qUD${{|YiKZ;Kz%w12E`_2lQ=@1{ESEdEaR1Bv{ zVWg15Tai?-&^X_xI(q4bYx7$`UeF}}$W{36bxd>E?$xIGjm_8QYz;orK37HR{~mJ{ zN{G2GQ;n@h8YKvQSt+8C{Wvzri!C#Ek)e8;J!NivUHpoy>ppA4XI)b&7pKb#T`-j4 zYFkz@xdh3D8~YK`mI#72v*g^-i%c8p<6oXt7KsL9oscQalZ}cd;=*0nfK%y>+e8d> zIA;WjU{P6>T(52Pf)nMM;0gMz37qZr4tyln%aU+ZHw#7DygLW0p|%NRiM zMN!gMwrN}%jYkqJSnCt5@2F|A2=!eNBBwLk9pTaDl~A@q+E#;9JWWScPz*)0m=Hz1W7=F^gfcHzcC9^q?`?E26!kb}bowRh>z*H=L#~ zis6_ss-4v3;O%q(K|GG0;9YT7p^}2fG&kcJ3taFJv@18%1@&}d&%yC8G3MyaR_bo* zvb!pq%OEY&k}^r+EXTzXdZ8$7`e!l6N)1kNoY5aK=oJsm-pm}^P?=&r7xanv(dDNR z=vq{8BL<^0pgLt#yPKx-NH?2EKsR#Cv~Q<^uOX)VdO)gM!mCAK zK5xZxuARlnHu(2oaR(@tMA&0VFz?lPN5H?%m@;*9tQMi8^GW-19U;3iMcuq}M& z` ztt&JU!z#~CrG3+e}{SuX4Qd&BIN-qbN()KwNb%cs<(V^*&uPeB1- zDAU*xp~5+6uOvaefIUMw+B~i_Y>-1#2-oLtt#CPDH;cHg>ekqAc-)NNLm$BxZIm|s zcQ{m`+k6#UzRfn&Cd;?1&D?9GHgC7Jc_(gj%joaMZQijqi|@Lui8iy&*lcr^HA$1U zW5xa3Tq~Z>RzOG#;Y!0bax~jqDMoE>usUgNUZ;GGheVcSk#F;c|IV=s#coOXaMJ@y zSE9%qRDh3*91O_EZDMXIkCEYK(G1A#xNOmkpc(TBdMAe7E*c^20-X-Ol#=1*RkR9{ z&Xu_9N*u;oG#DnehpBp)aKygt;0@DE7>>%Qx$mxxd>H8rZ%*iV^2^8s8zYy@vr$R^ z_sPi8kGjev9mN6j&%NSGBA5vRR5Y)ho5X9Qns!^AmvJO|<(#7#v(7vI3*H~eQdsbF zvDsaVzyH_xdn)n$7vk?fAAe5=BXR!!8h;;&zo!!4e?I>HbMg0d@bhv0k@)-1#@|zk z?>`rR*DiVWpYZ>$aemG_^X;p~-&2Y2KNEk~@l-DVH&Veb#06AifS-y1Hj@B99RvJi z1Q68X>^~KM)i!^&1$pJ5Cp;JYzqYt(`tncWN+%K}ht={{gG1N$*xc#iCZu)^8(dv* z6@wd#;m&$E)9ar$xG}+f8%BjcJ%&R!4Bl=vkE+DC8Dve6|7MUFJHBRo5gu|xkZ(1} zsv!T>AS*E>X1|9t6RKj66+!MABMD7&0A0t_bp<4RTqK-!e#yVP9q{hFlWlKN;krApgN2@o;#^bnxG7 zgv`^iV{r3=+cvn_7%t`E%#-qu1~)6XZyKEZCee%-weoPTn+7)~xW6^HS`3#C ze#ud!;Qrp=YJ&S4gR91HDGxUyxW6;Fs^Gq1aFrPDuHcugykWs@8C*qhn}WmEI>PV2 zwmkDkgsUz2Scr!I_*eFEhoe=okA`|#fT@2v^+Y)*#_87wSmrs+1If$u&bigkXYb+x zNq^rpS7Oj^v?3eXNbOzFo zI&5@(6KXJjxA|KP{=pZ|kzf^JgY%rfvV)4jUmVFitTT_R9NH@pgWG2m7-2Ka%Fsxf zzRrasxH!2=tSd4Pkxg9=@x9)N{olkDng@ZBI-ck|+>uKagFgQ2>gn$7&mKx=I{Oqi zwu8DDAxJzlxUhe;5bmb0@b414n5REC7J#F)&9U!!1T8+yyg7X6%CkR(r#lzcKApPU zeKy6pr|1)5mGchzNa;qqI?PVzqZ>zh%U!sO2ITe;?b|2??;t#{WB-@^Le2H)&*zN* z#$+e*@~+F_Ur!7N#F&1Va?Yj4)8ztMDf*rYvwU?x<62N9Xwz6aSm(UybLmMAurhEX z27#YVO)wrhuh7vB&~<=<4|dTFIxM-Z^eIE6Gx#>)GAeqOp<7aumh>U;=q@+W{pI(8 zZqB2-vKQUez347S=r;C2cdZ@W)vKYqo9xX62l)M57Tz+X-Oi+?fkDSg3gK-8?()rE;nH!cy{ zUC9oeh?_Y=$WQ46DyvdXz#3I5BZpKoZCok0`mFiqDW$B1PLj$LuPbHtDLj5c-`1@~ zBlfpWh`npkMr(^u_;rRE$gEQqj#MH8hgd(6id(?{q?)n;cly*K# z9D_bcnB>Aut8RsYyP z&FYbeqp>)@U9%bo)q@wEp(2o*)K-80_Tvwq&upqxI&FjWbY@L*1-@rxuG`-YO2&@N zoyp=*c83Ev_G{E&+!s#43fnVm>-vTrLliPYsu}_|n=Fw^(oK`vLbcxWz3$(SDn-oH zl)By>=7%{bMbi940rhReA*q;B48Dk;nQKz4Pp(Ou0z2>ITE@=cbF}A7b~Wcw?rd_a zn7QeuDBLy(-B;tABL&TMrCI_M4pTT4;0IPReoc?WbiN%4kM|5^F_rT~*Dxh5FT6fE zCG+k4)uyCSAmc1fg?R~zr%D0NjaOdMgw(TK3QR~V5budc1CK%*vxd(UOdMePlYzSb z=bt_ug#BJzj~3bqP(B6&92sx1V*8qd(H9M*FLs9FG!BbOA%4#Fu<@BC)udQb$|M<4AM?DX zXVw{31qT942%KfTbBu`DdxdVCZ@!VwiZ^oxAI=uw5i9yShILLS8|KTnPTXV~FSd_$ zSfWys3J6XRoz!m*(wk5Uv#JX*IURE0gcVlDC*cai=g>;MD2Oy4at85r%1ZSFEuvj` z$t|HG5$#|5`$`aIy{yC;(bb%RE+3#x(N1QLc2G>X7>&ZE76QEu zkVJd?{I=37xC-zfm}r-m!5D@72S#b1|E;$MVV9@0iOzQ1RC=A^oJ7inQ-<&(e?+bwzTU;o@_#mRy-NON@@w>0mBL0=RG!FEDP%g{<(`R-fa_T_jB&e$ z;ByKQun->F-6b5B4U-1W7)LQvT}g_u_CkJa=`&iNgip=t_kVtMW=1I;JWU$zSYZoR z*fcxwTw%NIg(b;vhq6*VhxU+Pg^hQf{NDa^3~{8Y4R9@pOrpph&b}|gU2p#kyW=*;w;~pzulKZM+n}m_`AH$z!o)h zA3#MaJ)#CRU;DVd;1MAW237XQ?>2*xQpNqX~faBO@z) z+7OAj*j!+=vn(y$6U+yUZ>|O=#5e3iWUxU+%Yi4Sjlh#rJFm1_bf25}>RQ14SH_IC z0#82s!F%%Cr7v|WB!5ls5&0E7`BnNoGrCIt5$=i;6^toGoQx`kDRHQrk>9vd$dPM= z?0HJ5^k!0#BL}029JzNAy;&-te>pJCQxJi?2@10ILXK1D@oLA0j<8ihFr{s! zkR#i-n3^0KlM8pPN4Mg_W(V2|OOoRZN`ITN-o_cC#wlLeYG z(ef?W@j%|F)_M$EO9wnPaXdjnBNw>##)*y-GI0T*{iw-GUp7P{YFZmGYTB}fI`QVH zX@{eELAy#|vI$Ew|DmR>0Z&cU=>E9&(N!&;IyevpEyWkXPf^EFPaSNccbe5z^0SH6 zsA5tnR579QU`kU;p^8oaJf{> zfhy)8e&N5vdQDZoACS5R3UJ7vK_fdTuOo}Rsho-LMv3XN+Agpzo>l`(wdN*!juB#x(Xv$HsOv}N>IrvRsw@P*$o?LVa%igz zk4`85a4syo$U=!#6vx(bCOHitN-S#?IPWxVhXZ zgNx9eo6CJe<}tcRSQ(T+apop6ACbaMf?vQj@yOHc%GJ8@Ds?GeM|l9R#jZjg7)AlfbnoR}#2W#LZ@}E>nyC;Mv z_|%ndTA2&h^7^e25?gza5aJjk+bV;fo9y=9QGU4du2TAUjvEB@JYp%#lvE**oU=xr zaOYO$@vy!vu|<<9WHoc}h41I(V=WEGQdLDu9F7?~x_agB>XpCi3H)7Yw~-_<8M{>Y z$#^n8{VTB8u875+OgaY%>A|!Cv^g2cOm!zC7JD)|cLffMJ$aFU-o#{F0Z5aPkg&+l zY_3Nuy6^|LsNAMLaJ<$W2(EvJZxa@k+Z?Yon3&i0MZESlKj8aS ztL%(ZjdG0FDn&w$l4@+mm5S&3=P9K~(n(VJlf46lP_w`F+oG}Bye-Kl zt-0d4C1r-^-Ue}~An{zK;<+kpuB1i}TVY#qVe_T7!diH4Njz64!iEdvRu6swR)lAu zWPYT0jX`6-YY?WwzwOt5DhNCM*jI*sk)h<+3!F8dA$UMy=!bzh zaOi6kUL-ogmKaz&3%JvKNIf$?WGs2kzPpx$+dQD9*x%(L`O6(k-5pA@RF2GDNZ~PJ zkAL<`nBkybI0CGC$T{V=hq$fEIpuq^ES#}w()$<~ql1-gKAg_H-Z~CuQg;6A@v^=v zaB;Tde0H`B?0i|vKNd{mSTi+@m9^2zIr2>kgq7Q^#Bi39`@$Cme?teIOAp^(?lU%N z?lP&|(DCg#PQ1r-)DFA==IH|bGm{R$( zYx+=svC1E@zqJ=fqG962$$?CWhDqg-VyTqru<4)YNJXu)N|`n@N@+;_#VDDA*|i!b z?e8@8I1y2KNph$X^$fp+JovLmNt^!c5uVh9D(UIW?u+3JF!z_J1;h5Y`jW_>J@yjc zL+x%(Dlva{ul(5saw3Q(DNllEQ{{vGGN}a7qTcg;-oKww%4DBb3I_DH{I2t7??foK`?Di?G~ zUm6YZ=1Y@OATrLT?oeh#%1aR`Do7$lDRi@?!e;Nd%->ylgMp3s~+CX|HILG)+tY0hnrfF zspe*)Xg%vi)XO+-sD9eL?(u0ToqUmS7`BodF%nZRI-t$dDvPDTS*5gP@~~%%PM;S+m<#$g?AW?tF#n>c1FA1hPWMho4`Hafh7k%0)s!{Z zUaymyz9JAf>&$B?c~*OboAVIWz|F(i*Z96__uq5yV@}IByUIO}1X<*VI&DEIvv6J? zsswM4%(&9$Nj!~=d{2`8b@5nY8K?R^qJJX06PmMl#q6gjt;i1;-d_z9L=*X}<}M`} zre&NLY*Tk8JBx#tJjYwSloW@8Kbu<6&X|P$G!M*vcC0mg$zc+?Csr>RFO#xt4&A?C z?WRdvw!CZEHd~SMoZlrbW#-;iYMBOZKWZ^me$H@-ydI19 z4iV_mbjAwXwI;SJ6W9gaUXnLju(iV0_hP#iVY}Iat+NX@+Av)|08_J(wpuXVz1NI^ zn5-Kty*J6)LI5mR?Y#QhxnMY&_k8FAZo>Mj>?mixd`@c5?yHDWXETq^2;y<|0a4I> z`ZNEkrd82+pxk+psPnvwrc(t-ob&oSe%^%&joDM}ylF(MRPKr)hUmPrOd98t8V#0- zLWK0^1Q97Dn0kb1oOhPV>iMK)BD>_0+hkxC2Ij_zC`zcGtqqulE@Wr`LXcah&qWbM zwg774I_QNP=bfb@X2s+B`OATp6ga};d>JcU9YE7{v4@UEJsC-2?f>n=!jhHKD&2P| zRcIeCpHz&D>Zn-fnK;~{d>H zy1zl+)~u$g{jJ>>&Gp88CU{z^GUF;C^VgLfnZKbFnLl+uDKfuCDw$u^Gv6ypk@?%c z>NTp~Ql9Di)qk#$am_RLU!`XD{M>IrXtO@66aOc@_@BMsGsu$>Ew?nFITUCDyI>_= zwWySiS@2lSE3I6xk+n{lzd!1f;R{zmP1;2Df_3eMp_7zRwK6axm|L-K-GWctq2Va7 zDK-~zw8B%ik4-!o?p>om>A@qU;mC!{-L_nIEy-o zY~=~VEcX$$cL6*4p*^vhj9{9R12{hbb@O}3X0e*q^^?}F@e5Iy<|C<6A3>aheywKm zf**rxwAs_^jIh)&sD;kUpl(oXkG|l6nO90X=9Hd9-4Y~yv!oOXA99?QYb8j{_=%rP{&_&(4aQ8t+7{KMcdLr+4T6Du7o39eV|ognD6rn z1Bd&_OA_ru%^+`dbrV@eAFyq|ktM*J+5=_{^on;vJnWVjiEK*kODu~V*iSbreqR}Yl?z)C1=JmB*rS(nkLNv0C4%W0ZFJt0{Tai5 z`Jrg&#~wat=*1Kj+yd8nSWM9`ix4(QG;niS1j!jGKxQLJ8~0s3F5GaYPLT?yNu_e8 z*7af9AvEriI*I27GY^|~-$LQ-MbD}PnmwnKZrJtDOQf<#7L{W3EGVT3(==gTFe9I^ zZkTno&Z}1E@Ti5?u5~n|gcJ@Y(x#(zoF_GJjFfo5&_#Z_dPZF`M&e_KCqqvmv1zw& zL(T2lC%0=Cb;4|BRX5CNSGB?J8>C`Jt9sA(W&eIbDGKboQt**;Kp>Uptl_4E%?lmZ z?BPeQ(rmctBUXPk%G~ONtv=%060Z@pw8d7b%sPWDEjP9x$?2n@Fs!gzAP5TiW!>zd0w+9=zTZvPN8hjO% zhX!j^fmZD*zoc=4_H5u4W^wO7aN3-W%kCoxevbTz#O!5GVnCqZn_2r0>X6@6*5EV! z?1qlCg}{R_r@GCiV;95tqm%~it&>T;*-+;Cf(`YiL%%^$zBeZxJz%nHkTB35b!wl> zPOw*ElapFM`KXk{X|AQUXwYC^)jN>fwn3j(IkweB0Rpr3#1H&FBcCJk?T|N*=N`2c z*_L6kMQ|%*UfG&MOG&tZ9*tZ;kCuaWy&~x@~!!0Yx88Vyh_qsLmXSHDF-ECWR)3rLt>Ty}WqFb|U$_`&7WolHE zGMKv>T&B#9QqW*3kMPM03ifs?e!I#W`Qb%pwNKi>+Z2?#-f0WB93Padw!PE-d-HYjVgz50hl9&pdlbc@Fis|kFnA!kV zAvxW z{E6iVNZd+i$-Mxy(E%^&XMN1j+2o*6Z&%py<{_=YI6u_lS!c~HoW+{!3h%lSmL3}# zboN^&vBFKbW{d$rdM=!3kAr6wG=$EP@^SQ>Wg*otq-;(upN({!^|J?9JW^Rz0CRmq zxv_XQl`;*d?sU()ssr}0trXD4S*D0!=I&JUH>|!bs&W#3{>}rKexj-c6^#Y{f4omC`X2!X7N^N;j?e1?!snxd^qbtE0Ag4ik1zcKkzF z%NASkEh|SXyO&b2J-%+syx+Q7K^Qi)F&SU5c^8?rUyf9Adq=75m(z#J%hijU=4{-$ zE25R*mmk186NW_fN*TBc-BT0HnpIh~zqOY~-P5>^ zD%X`AePx@psSIzb0vfhK$~=9!DN;LD{JQb6<6tG)*@fw{M0^cs~Wue3g3{c zsKFX5+h%t+$Uv(b9?5m3&}>7PP2DZbhOLrDjN0_wS4C~=ZbNN`yiG~essjIf-mgl{ z-hF@yoFJt+Fy!KvyKUs=4I@;+NCTd~+qd;98nCr>(Q-N!N88=C!EfXq*Z5_10+lS@ z%@#jN#kENlqrn-y=Kw~cnqVaNI7VW5_o}U9_egE6-*auX)ex^! zOsus|>H&<}mDZ_yEF82&>9FNzD`J{l&9MlCCBQ>R% zTlb)bj_TX$E7j&5L$dbDh>F%;dB8lf4nzkXzgOj;?!OZLr80@pUssC8*igB3R@UIp zR&FCMcj8`K-Q2(PP(8%3ar<*7AOHm}Hl&gcb*0Sq zyJR@%+q$7QVt?yMKBu~D+;zab&o@2x>vu_=N2${ubq>}{#7CgqOY8PHuNo=Ecg6m$5;80~#bV#Fqy{Nvz?ZnCJlh7nrN3A&fSRj-Lr0GpwfT); z%Eh+QW8l~sc>w6gU6p`3Fv4Q8u&65SLvyTXb-6Yubat(R3O8{(XZh%0a$Pjhz%k{~ z^iZnK_s>(LnpTra(FM3U(OOWG2`89Mt7pvqatV&_{4K5&g%|lHT-8I^!XV1BR*9S2 z+qvt_r12a+e@&FSeE&$}F;Yq6ntwm6l-esw9TjaW>pbT-L=h0jqq-N4Cj$mt-Sfw7 z9xZx!CxZLDy101U({KCQw(?5ujxY&Cv8x`P6{RRd%cSAx3E{A99k3iDF?M1v5~C-~ z`yzwgwP9S73(>Zw@@f8rQg~Ti?>DX7ar;|85jAeBrQ8HPZIg#SKB0Ds)5;dzvCOeW zf5>y%ofE#j?S`Z2s_?S8lMbJ%QrbPNRNYS+E)XLUAiuN%1Vt^Y<8Pmka6T<=X%XLO9tI9z^0dh=wIsok~IYp$hUFQa2cDzD4g z+;KeNGAHI|Vt*^-v(59a}GN@FtPLF$b>NNwo4g*seE zI^rYgIs#5&IM#Z61!h_=87?k#Ql+l~jKfH1Zbmrw@vzmzxlys>gFc)VgW2vil^|b= zeOKX53(jPQ|3^4i`jSm_*7|O*4*yOdyhA?-;aCzUOCRo2=k>+@t(uG`VK~VLKN%g(jOAJ3XfE%~K}_K>BT^EWYl)M?E4zxb2e4S#F{#)g|wdT<$CXmsVxS zw=a=@Rrs)*b4TSw$0}5c^1^NF6TP^l6t!VfDQsY)1++t;>n*P`desm#la%ACjf3wh zg@ex>a?c}5>4+++6sC$&Y_~qd=divl9@4n2TGJ|rq6xipC^?~RF2ET^0%b<|K7`a$ zLH~u_`byyr%PMR%P+?0}*m7LhSkP8jk~@qdySP9L4T)I6Q{ofh$Nvl;?_hA6*W7ut zo8=I(Z`dJXJ6R46`#RWgvkhm16}2CXe~0;ZoPTBh-FbT|tZ`XO{uGWne=OKr&S$wd zh086noYa8MpT<79#sw~Cvbr%Nud7oOzb2<2p7?Oce(HcR&Slq;VRun3hqj={aA8JC zr-eng1MunrUg7h+4^YauhlKp!$4kEbp!@bXh}caWxUQT=tpFv?^Y#v^&Qc&XtiQxo zO!@fnj#-&MLPE#Afif{?Ur)jrVfradix*Nq=DrW>%uZ0KkcKt6bsfZLS`(n(UAmVi zR3vS89WX~c%j^p2U&a+jG2E=c@PL)-KkW1+oMGIUFq$6f%HZej2wEP>eslS@(%~@urpv+m zkCZ@m&Yf_DF5!{Qhm!_An|8|}!XtksGo8kC0|D-lC0IH_3!l)pzn!|m@(J_ha=CPQ zXpjbEu9VNATn&^4eM9Z^Yy-rbBfx?Hb!sO&slIop%+KR*hR21e$A<1CABUE`iQD?@ zddu#iGa$c6ThSRoMmo*BMuO{AILIp>x&yhobmASo$mv=I>+9l`@=1D|{bRx3aQze? zHFj&tu|@-XxeOcD81_A8x>e!XQm=0B;6{!Htt<(wq+V-qzT5BjQevK)i!TQspwq&- z?@wL)*c@dAP3Eims4?40f+C$b_tX-{{9E_37 z<#2&ay2ZoX<)ge<;ss?}e}vm~&EjVg5WBocT)~?UD`c7ZLxVuNTPamuwcNv-563dx z3*x@k`AKJq{3ifhPS!ugi^Pq*c^&1tsquhqaXMi>T!=I4as^HZV=ChuvXrZN5F`3Y zO>_R$Y51T2`FczkMJuMcmXFzjuhl>OxII0qr+m1n`wdqz;t+4Jk=dlmH2ocS(z?C4 zr}<(yD~Od;^Ubb-EH&S##T?{C8_Y?GmDfYSMszzVQ+AN7|&>_N@u{)Q62)Z^q{ux=EJ!R zhkcQ5F|j#`nREEF<@+I7ej7#U)=}7mNU@DVJt^Jq=k-9Tq*+C52wjL@2`g|;wa!s% zZ}9FzjyX|AjKzkrnrZrJfb*YvSXTI!-SBdK_2$oH=d+NLbXBGir7~5Uy&Z1Y8{|Ml z$Xdx-0VCP3%Sz9nDJX^eJbpg2Dr9wpnd>4ZW4Xi|=3@DN(hZxFw_Gm4-gsOB@Eq(U z_>ytC?JOK=-%cxON)v$S=JL6i2{K5WrItUN+R6@{Q-^J4hg6R{mlK{!x9~)*cHssC zQ|+)=(d!-3)Rns#p5)-+da1y{(^>e6syQpo_QL zoR0R3{9vnMeh6cqS7N}|oGupNlIdm~Bt7zB)gKP;80lZZ4 z#j1StC^wyB%U>s zkp+OqS z^Ya|i$6Kl!gV~M>g5MMTuG?=M*(lbN_FEi+ATX82Z3PiB+$IqH+ypPoy;SK31QK)R z2YF&wWz>1{1iY65m1|3dA-_1$impMk!#48uzMp!3)Bm34H|MGuZa)GmS1t~^V*=&v zNJHdH@UObQg)<7Jlq2N|M)mx5(-!fD7v`F?*Cv!>9v?0Y3Gq3qiA(u6qy2@bY-9S* zt5{-J3U0LL3Pako;F!dszn#FfhAS*26_%{BrGj=SXbNJ@4Zt@}G|{=vMp+QOO%aX( zgf~RJ=Ii;a)w5#t=+g_Uht4w+$Dt4ZL*0Lz7*D&X7s7u+8iZ+vVg!Fn*XF~2$?8IA zX9@iyZ_qO07iM`g?jn(OjX$!ip%jIXa}qHrQT>!MO?Ek7dDDKMOZ`*!RjF_2_qhwH zZKX(_Ev4w1JESUaSI?z$snn}d|DabDf7YP&N-mhz>CF(*usf9ygDSmmYwVI*g?r-x z(j@cxiY|dLONdsxIf_10#|Elm{7Qp+kBs2*h17pndMAsWO3`4(m7>9nDMf=BB@HJG zHw3cvm=5uD)&=ripPlYvMgsRu@M4s|GSgDwA~)=W=@Q#ooaINMrsjne&ehd}t_XMn zGw{!Sdna$8z4TN{u~}tlPE@U#$mPH^{W6woFqtjKF_O=(AHkne&C9{X_mpK_Nw`9O zXi%e}P%bE{+*NuoEL;iab(aW%#p*|HiIj&$1pQUIdq&-P1@5I3Dw;38y%vRNrf;iSy#f|C*9H7KMWO2sAZ8#iCsVD1(u@g(`V+ApV+C7Zc(z zo#K$zM|iJtE0-q3Joq#PqSMGYW+$lhmHM5ww%>e;DUg^QM}f@2s{#rGk*eyv;n^P*7kxMB6ZyCK+Zr7mu)KKbPNXkX|-tjV03Q)v-X9W zNA-@#JQAf(smgKm_TJzZjxbqlW75+5-oa_^+;YKfg`V=4MfsqnJ7V(TQJ5wfOZXh7 zmFAGroVuv8xEGMWQ}&nJ6SSHHKdf-28Vz}VdY^R*^uEht?{E%w;dNuXQp{0&LgaD5 z#trC}rm%6avI#iJODoX{H!k~9lQ zx})!_l0oLWyiWFY-Bbv(S~bIvF}?0gVhZAIHM=7S=Qmz}Tra+=4i<6b{NA86E{4xK zuC2Rv#_XnzO~L)O!EFfcYX&D5hG@NqmO8j~!TpuNH3avU2DfT(J;6JytTn-W-C$M) z^A`rQ9GCU7;GG7yA~@WJ;I=Hdb%R@s;qLHoOM?5F!7U2z&jrW$=oXN#y5Bp4{Qfh` zA31Y`^RHMv3(CK4`RA3tVQ{l?J*i;a;N}GP=LR<`xUU%8bPSjF<;@80s|JUmlkfRc zgPTmix$>q2_h$w-DY*Y7IHH1&@_UWngzD#ez8wAdl9f`&FQR3M0hS@=i}5mq79DgQ z2%a6S9N&WSa?53wq9W%%mqUctzEB>WMydxcadXphLOZGt(p{^)N~8p_#9e5+ARVnE zOB9yVcNxa;l=}%4g)eeHaS??twx7n=<+lP}jzIy->8AG!dM1atym(&IGub%F-@b1v zjn+UW^#I42_o z6d4*DyjPcU6S=`G$3-h2;b|^=4O87igXWV8E|R;Ex^nT2T9-JRdNh3XmGJaqwn*Xc zTr^GAsjsNw!LMfF1<0EbT{>o(R8<#>Jz|wW$0kQqdLho#Eaq;`xrX zJYKZbDP-H63WFg_yOf98BV`DBWI@X#2&y2co|-JWj+-u;tCmTE6jqsM@BwqRc&F4W zBOLl#I;}{}`Pw_(E%BYw=T4P6qsyz0!{v;$A;##I_)fbeUU8UyT`5v_>FCaIKBcuK zr$IQ0Y;#b{HR!KebLkhom-IjiA_&@^aBCL(Vw*p~xH(Ge9XdBz%NnLq5o**KmaaJE z6_d~?L`~)rBm%xBI>O3_#sd6n{o$uly62T~GlfHUy5~cVPknors9>66LC=?J8t2D= zXd=o0Pj&?FFyN210f%pMbwGFXR7<&phE%(V&axUy{TEWtiQb=6=#jY&LwDOYQb;jP zuDLAFSw9RBgXPORmtN$GWpXY#T&y?Lx&;+S|CJ91qlVHl^@c0yOTFwNM#5Q7ulNRF zG;xw(hwDY>a$E&mz3B3E(C%nci~l9Lq--k?9d}3LAGiL}{TYc~<7VYVubC|Smao*% zgTS(lsMo%=2bid5-cew)yO@Zjt_!JuR+ay3-Ng#A>t6Zk*;9BSmF|YHo?_g^EU*kk zLWkpSWc4&sEpNG;2sv@8<#8Ki3kr{#=yXOhMR&2G-yj`SqCA~Ol>MwOn0gToGc=wQ zQ6qIE1UDEqF=ZWglr`gX9aoMi0+H1zj6B?=Yj9{p9+9LZUv9ei_cWKFEjY z0*-}0bR{fa8oZGDm_C0jcexuH_v5dwDmsR;6gR;Xn}`}1I~@Yz1~S1=M&@8Bqs>uV zZs|j;JmM4MK3w7>{!KR6YWQqc)yYb{0BgKCHC^1F>ms} z7!-92MI?IYfA}7HuWEm9)I(2+vyP;x|0xSWFh_WS8*knt=F9?xO49wpiVow)mQZbX z3Ya^LDI`Tj8dEnRC1|V_^v2`ntFjOaTx&gVkjNJkOa<>dBrveV8`%ey$+6FEzVw&9 zaBCv=LRawgp{qm?r9S&57k@a#H!2Ihh*HNQN5N*VZmWUQ>o1(E5d3sUI79c@AoAgJ z1-XV%CIrSkH$^r z^skyJIW9N?Z!q`91h;B%wHPkt;YJ1bWrM2;?vD+w8o>qcveHHb^(BL<3hIl3@&@o1 z-0!W!ykD{0l_)m`@VhNP*T&^~|Jd?Zl>d(mZinUk2x|won*r{N2Dg&|?hh07eBSbG z#d&fHSgCM%>XqS zdAKFPaR3=Tz9_iQ8r*ygmk!=zjam@g9~j)c;67t;x)sp%a>~PTCt$Ak_YH1VaGy3f zIh`EbOa0}iGlC=BOD+S!EgIZp40p!EO$qL^2FJ~X`JUf1IK?wrdBLw4b`yg7j6u}} z^(jFi9z-{vbib#M@q5Aj-stD|?^;O&Ney)|V|6n%Htf)on!WtfM%ClEDYkl^C>NL> z1x;1l6ol<{@Ib;*;x0Im^xEDDo7_e3o4`(zt{};A+(le+A%ImBzdt2&?=3C$Npuis31Qx_zl^ zpLL4OJg*|3!>k7KPMjM`Z^H>9aNRxChYp}?nhW7HH$Z)q{^5vbHPI+$QHT1lE6&HZ zVox+w0Qwo1PT8n&301*@x>O+YzIboy&(K5S?k|MPl>7UXXaO`Fx?c##AoRFD>pfH^ zM_^9v8}D&S&R9=*U?|`0%>nCAcgeBqA$srHQa*so0i8NHVEJQz6^~I6zqTa39 z3=<73@(W)JzaYrqXPABG(hXrE#Tz`qNA2YfaN7zI6 zW1jBE_ow^unC{ixbx-$9rai4-JcbBA9>TlTibLAnF@Yk-$~T*dG|M*mN$>Jg9OyN? z;e%!=hEmqQPx8hqX82+5EKv#wHJTpJ$6++rc6 z5LVNdv3|7&M|DryJ0il&BGwx%x4^!L#*vN88+G)Lvdb&2$oagcp%l3jZxpi?BZ0ZG zkUz2yYL z#H|9g*nA^|=Lw|Pz~r)BK2JUqF^`bgX1tQ=;2)tM1z8Xt3q_#9i85$!kdeR!Bud@1n@qf)0Oxaw)rI}=73yYV6qD) z@?rV$;e6AmjNXjxJsLW$6CNpS#|>J=J3R}b3RCgWr!S04j1x179mHg-bQlh1HyU5k z{L2OZWHP=+L@nt1%9TM`2;OBZxX>7Si)+&?$op(K#@p&qwlVy-6Urp7cZzh>TDT6~Riqyqtw_CPrXy-_5pQ4~s^A#NcWsNX>M3oEHGr zu=utb6)(roDjeu-G45u6uC!EOSytmL>JYLlM-?>o0PFKAowBYLab2_Vwo-qS2UUem zlgYsGxX|12AT?h3=g7aP{EHp}87i197A%bAv@lk1 z8)d{m<-&05Vr<^S-M|Z57t2LDs^2tx3mgR@d&CHdz~j6`xfiBrxyRwP=nA766WCcz zsSZA^iVuzB1u0l41GI-Hlg-@pOg*=U7{HnZ0Jhss5b?r-h_`IqwxK*)x^wsA1H;5Y zY+-xYj-tF=#ISZ=d?TaK6w>CY)GL zDEIp5qe;-|XZj52Cs?XY+gT6W89W2|oJ-FGqp>$u4CVD24U2+3e=z4|FMU@gG-N@I zb}ZivxTtN020nO0qv(sq2`{>ehQO*{OrWZ%SM;c|Ab@vPXz&)|5E`0VdABVA3Kj+| zh&RB%I;(sq-L=x~H5xrU!$Or7CC#GIZUo0rj4gx#xwAc!TVnIg9xe?yJ9w(4oNRYwn@uXPQ-;5r z_4v!V4lrTd%IpOeW3|-(1dD&n@Y2Bd55gNdBkPT*E3)^?DO+V-nop1mW8_LfE7eU` z?h|ITTbVYtJf^9|6dG5Ga*m(Zo(Vj+!I)IaCbbErOzXP7?vi!UOd;7# zrl4#AVq6<9?%}hkwDuPLZ2(z`8sr+)DOm7(+r(f_k6sA%6Wm-Z-wjR02Dgzn&%Ra& zfJQd;2D9Knl`TE$^q37s$B)7;qagcZN24JFmdJbeV_3~`2JE)p$@%UmE7BgeS&eX- z*GNmdOnZ=v--8P!G$H8aPjI9EGEHM8g&a0J^{IvvJGOY^BR*pOAzZ_0IzXHr-X#vG z4v@Qwt>sY%%vjSaQ3vFF2h8SeUvrn+&91W0Wb#9%pdnxveFrS4jWY1&q94op;yPen z?^Y81c#y2~l(V7FcDVQ~+Pka)uH(+DqQ9&@0Y&6?h z1;oSfB3?!@svH3^i`G1kXWip#nmc}b`9icgHmlV$&zza?B0FA?m?y*{5!F0IBvI!^ z(?p%yTp;S4(!=gIg}ZP4u{X*ttqYPBBK|769GWM5<)@89P7y29tjMGQO`K$>CUG#R zqkyNwQ~a7H%b#P%qDbt`f?_o1eKf*sNf=P?qKW{+1wA8-=SdmZqG{-gutkBjzbq?w zd{&AUqKd;y96Q2i8GKfYvAf4Oj&Ttptl=KwBAuCs?RzKQF3KJ0ROv;m#&^C$d+oX6 z`D4xm=uB3iX@GZ5_mPrWHfuH5z;|vGUo}CD!QuP`5Mm!JfAS`Nf9l`;X z^3Lg0I_1SnfGH%UiuQl}`y?Fpjro@mzkaYcH42;38zIB>7C;Y`k_-nqV!# zu(0W6SZJnCsy#GxMk%y0EkyKTPA^nT8B0$N z58Fa9blBq;ipRCw_9!RNTQP_ zShLQurrymh)NXN|Zwd!?3+rg2oOUOvPM`pB4R$gRuv&^!y|ca4`-!y5e;mRj`60Jo zj>>0Hq*F$>(onQ;k6ILbg?VS}E&Ou&F|y)~#J&DydDY0BDgh(du;U4y1km$Q;m>%e z4OeM_R8Fe}ly#+ccUx8Sd#Wmybgz@I>Qe7q+B)|YzDHCZ;!$ORTSpXo+h9#I4oE0V z&7-#<;Q-;@5pIfc%9Yz8SME1E$=4BnvlCCjT4$+;a(0t(<+*KMIi53 z%5yqO zb5#8V$$k6V1vb-7^B7Ja<)jtuGwL>kur5tCzqRL??5xmZnx zb9IC(zSla8A9hj^D(g(uXRM8Me{|(QzhTu+pV&m`roQy;?QY>$+zuttJrd z!d>EQ&c&Vk?#2VZFb*bJblhSHIKqPyI81tNX&k^8I930p|!A6*KNPjq~<$FbDmuByhk3QbaFZu!t0BFsSP z0hHpmWT(`ssWdrI8`BH}kJCIt>C@?+Igl?XPqEY^^H@B@vqzTFfbRB8s-YhHxb@f? zJ=XIPGWuCgT|-$n3RM_fNk=Yd9=DFl4iXAG0p{Ne^iXXE%)Dx7IL z#Nm=^JB&?j-BFS7l1)-QFAnm2i|WMy+0e^FtS%lVh5w9^3V2j0-)nl$cV$YZl_NZ} zQ*6h>Z+rMH!RyS*RH?^BbRbB!kCl5wIA}JRSq%h2&)`G`K!>w zx%Ap0@G+8EJv5Zz6O^$8pgh*|8VDx-G3d~GiisNVyK#)SixlyjePMd2Cqor1`vpl~ zbyO!SdK56AI>*pS%~wsW8w2GY^Hpnj<*Ux@2N$XC6fvKW_T4H~{` zkCLJ1Q3WZF9*BaT`fG7eluy46(xDk?#vNh(u;#GDc1Fw?ipd~ZP4ZrYfT;Pz^;gA%y9#E9%W(x>MqZ`m8T!)5o6-)3%t8qI5t9z7+0@w;XLX$#> zQqHC(j)}Qjht!WTx~hD~db&ZUAbS~UHlk2-<+!i8LVc>4B8hDrEI+BGBmjo{9RRC{ z#0_PDfuf<~QT$yN7>dM%R>e_qtCL|CI(1mg4A4C}d8|6ZWumW&O&<_*!Cu5ZAd?C! zaD`IQxYxWR75WOisF9sBID!0p&7wyw>I|}u64QDK8qnsTMbW~uPlZz)-3@L8-Yn}) z<0ix|>EeJk#9MFQ1cq6oKk>{B^$s2xj-&06KXlT~m2909nKFPxV(1Nx4eLD}MawD) zZ5>tTK>Y%&^=rj+{#KqSv_Yy>}JyH&y(d6*`kB zbeckUR4C&~XR0`Jr;GoEH7e57ShZR7Ufh-pB3+5G#;=`QFUv6jmMiK^-v!GzApsCO z*57X6#-LQl+L|^`rvDL1M}%&7DXS=g1;j50m(lv!bfb(itr&+5xz75Cb&~0{2UM} zxK2#yXCOc(*I_72>;~>EJi?W)NHj9jG+~)1G0WbLAYl`RF(e9KJRFX2P@lFcP};%~ zFFvDI0Kq0`V zLd}a~!6atHpCWZKQmLl(k`7u`D$(tMjm^P1jLq@gjFm*XC3sY-4G+Jr6!?bTBO>XT zrb@LdCau1ekaY7E{-|Et83-%QUK{p3t4WUpILw*Q!$ELpXWj%Var5~T+%dY>j8@;F=2h;`wSz!7SX@jql4IC=2 zKDyOp!e_}otbU;22@fEl5$eOVe!xUc;KTXY4e^;jjLudTb$hKf6o=FZpnR+BnpYIL ze#r^_@fgtS2J|I?Hpjxn&qLgQ2}!Y9FU7$4<9L9}LEka-KmbJ#1n?aGHC5YD)EbAV zL>()isM>VY#vm>QUo{8J`x%HZU_UunGM99ke7K^@-OTdpIR{U(zYg~;)aGmbkEgcj_w94@^LZL<$Rv4&*|5lp}r?0RxhJ&C%&du`(-T?lYrP;R8WF_z)ayqhzG z?M^For|Ny|4y=8Cwm%oRynfH8g?8|F85j+si`!EYr5 zXBCaGx!XLduW^&`Hx(-~Qx+pWdu=lX_Up|Qt{G?w351$?Jv>?-Mfkm;vH#-x*?C$V|yx zDrX=_(I)(2LTZ_U21Mi^_-$@Nd)EXkLlBet^zT1xMj}*(i4aU*&j1>kkwJR$cXbeB zBt^_EL+O>B>DF=U2q$fToz8<|R3E$%i&L7shWhbg8qsZ^T(h((Ba z*ug~R1mW{1vYGGb7!kzu!p3OkyEjI7P-12f{EpZF6!zrf(ei`yZA9-)JjnsWh~}lO zBZelWhD@900~ci*SCP9ze+~79lSrh!zz5EzemgKbzO%`MoYTrI1hS)h%N>qGAegnR z>oN}5gDYBT94_PGgBx;&XjkN?!Cb ze^|5F0`BZWvrFcVLAgmvojMW@;4Vz1Gonxwc5P&oZxlsJ2SYJ``B~*>)U)zoIZtms z_-hCIf@bhI`Cw zwFC4>8K_m+jux6J>W=G`;0St6FwHF!3SdLTRXg;R&I+j za+)~rX(WAjn67wmuPVBlP96w~ZR1`)*K&>o*rJd^fO+H$QIE826+HarZ9trqnPHOs zAgf+CPm6k=DaVJ|qfJd6%`eu}=Z{82YikVGa&JMQT3BGjjzs<$Ta1bN)5zHRUu$F@ zX-HKoSBy_K_@EatnTMszZ1hLH`5#3)&;YwU-H8HO`B%*m<#g1*qjiUM4{OM9wv5iH;7dq6fk{f5io=_kazVG>8BA7njCuwl>(v^DE`?5Hfe^5hN|)bY#FI*y)h zVFtMO^4+^%QeyQhHk5K&LhaEk_YB~J(S*-kB|yWxWAI`vSjIjy9m^)K88c3a*3ezJ ziAV^ynJpH|ohH@*X693mJU8YZi+~(+Xu7|aA{T>ZHLW+c5G@u?4~SY8In6p!eYAEE z>FP+M!V`U&497x&h5PUo#p1ij>z?o^OHCTeeDHf%_s{;{rpLNRHneC0(RD_P&iFH3 z8KsZXG+&%{J|P5cAnUQ>q%~4;2~X8F`P2nugr@z}bJL#57|bTrrzU0Vthk6p8BI&| zYo%#VY**zIib92&&=5U`h=|@6beXpvF+038ROiEJ%#KG)Yq=5PJ5@Sx z1u>(c$Ap%N)j@)KBl4pSM{#BcAKL522G^K8d!z<<%AC~@!nS9LFFGjF@6SO6tTAQ(+pd0jsXikZQ%;q(V+t21* z&1Un!aTKXESNL=!quj!s7MWuSh1S3lwgiro`7o*ABMG{hX0ds%FAlL1NhooXq=dT8 zuz{w$pHb!QBFt98=}Y;o3^k9WQ*uDEoCQr$&Uaqg%dW^K?22@PmeB5F#_h)K{k`&u z5I_LemKBhk^IA^vD%-(hXPo$k#!jowbKlXibDa1EjU7lbc60xm#!fK>>BuH*A0FwG z*BqW3pIXSL;G^Hs;ZcK<+TjgMEs$h*KKj3Dc-p2G!y#qnZwM~J`|m6aNnhN@!Iy2B zZ%ZKtygHlOWIGz4s;1nysrc(%6ab50JptY-7pXa)#Ft(8h_nxvdsOEb8gPvh-7G3f z@lZGz*f2!s36t_WMH#`z{*R^!(DQ88)FzB7`9}Voj4C9rj4DLb-e@f?o<;F+_{4+X zWHi}G>leVzrk)G5>$R1?Hg&3l`K!s^8{@C&R3HD24wloYUKoEhB)?ljCIv&fuv;nn zROCNnG94V&AuwTAsuA5ju8gMwMuDx5*F4UaOJrc zRkdm)G38XOxA@>V){K0$G9*!ufTHSJKf8jN7=*&P&un-Y;M^cS5l)6-S$YeWVfo1) z=uO3%%9df{c9ka#Oua^-Byx(2gnh_3N4dPE^yH7D1g1)yBJ@Lp7-0jrUg=76m{Bs&s1u*jh2qMEfy9J5fN zL5gc8&|tca@Am_`eIUvS7BH;<2f=&YGG$u+&Kx)2H2bkhg_i z`@J`um3SF@|H((vKR)!Nivl;Zc}(UmH|^GxI`i4&g5yg_2d{81h#WZGhuT=i_Zqh( zuk2^7vVbA$`LQx1fK@03}L;$T5sC8wTZm1|#(+f;OCxud?v8uuhV#A|BY3MoP_ zz>f3=v{s63jY!yOjVOVo#~9oYx)xDj_wMOHEH$ED2E`RbnIvUob7dRRY%kTPV;9m1Q#BDRs#$NvKu*>gv;Vba1T_orGbmmvV4rFI74UfP8cQk<5`%mSXIA8Ik|9>8Zi#GZTKz0 zng!7kRpQFBzyZ~!Q+>PmN7m(=Lc;2TPubL8_mB>Tujq{gOX%1t?Y$+aP;~PVd_>Eu*-O1F_o*GF!V{d>W1EL; zi`>wOk{6f~m$!ybadJOiQ8E0eIdJEGHRV?JshKq3qBPsbCb`_JJv$t(OnZC2U!g0C zkZNrvJ7ox-HX-5LdCZ)$kDCWpotWHCrJX%bJvQhrUIHb%dkwzLcygjnDbU_E9l@q+ z@?Hy8`S2A&E+4)p{TO|%<3ir@#!~LFH}-bu_&c`jdfCo9;aF+n|*C=J&-+}wh2pA zRU>A8UeYX$lUZ?xQ2;m37QoFFK^D)-X@cf-CeIdm%;)TOi_DwNqgV+S%*~#WT<>$~ zCZRxEU34`$F@l;j+-0s~C_=-li7~{`xh;~4BhpVqBg?-~zPfP>i3#|7=4my=D{r!w%9|fV$>B_f`p<4yzvL?3ZX^^G( z_$_*VkPq6TXTyA3G|VX~xGfrHNGw+pGH5P_y3qFI8WJ37(4CaATQtX^k(X84&{SLHB*efRRRiL_&g1tL9k zs2(;&J&rwywje}`I*|qhS#2B^`=%JJKSv9)#YF0V z%`;7hWzDYA&EUgTk!g4hw<0oC8QjiNA0|?-PHLqAc_8+Ur>D!xY!IT;ZOD)sHfYXq z?1#sY=p_1C7K&0VcCDb<{^4l1&>GDD`Frp{Vc$x(^J~GI|w?_2px0*;)F)q+MeH+ z2@ST1@B#Kysb#i1%Ebma5$*}Z3_>&Y6hs(sB95Pq?72tlU>hl^8DO!B(NntFM4zU` zQ;%`Xq1l{}GV`t$*_ZTE($1qVai?(7y|cKGscz{|27v4hz-chE5ras;M?_q* z*dNMmBv&x;Ax48_V}!OE_dK>9(g;mT3^7=;zTae?$|fWxNC!EYmu^o6!~kCyVzT|d zWn&FGrx1f!AEIFK8US@}K zRdd=XrRPwTz2$AZkI8@-r(6$RoXlqoyIG97Jy9z1*+!v$%<~^iM;n7bkdd6Yj>2gu zK4`@h1C)WbE#)RPw2o=J2iPT8A5;%WOF&>VbIjJmhW=dA0(YI#f)zOHJ_Or|z+zM~ zyB3d@ubthp+SU@cR_^LIQzH1h4J~WtMiI&0u+Bl{ifgoBi{K9}1EDTCqqWw~w7?hJ z-VeT!YyX37yl~o%O{#L!C!L`nX99d*JJ+xh5Q`-DA?BPjDc|(Gxbc#RkZN$uZB+DQ1hl3j^htL} zQaHh4&H#F$$J-&rK{R(eFoWY_2Zu_UxHtj0ZDOb>RVv~bE=5&0zR~5pJCX_4L1f0A zSxRryfnhmv*sw3$v_WR3^MvX20!~vm-I;;RdFyLdUBzq^vsSPqw`UO;;X(!1`eg*M zu>=4+LeQ}&#l2!cDs-1ktERosylFJ!lz1=Q`na_4vVQGjNDNWxekx|vD`HRLqWw+C@H_cw#ltiYR;KR z+e}mZnONI&R;@#aj?qCuS5-axld7lC=)X=hkc?xtZ5mW@7h1L##7niQPCKsQgVu(K8A=NgGfePOolPrmCSRKD1-VfW zC0UqG(-~??46Hc8OApF!SsTU|T3BNP|D@xk1Rm_+rPiy?a}zONq3d|*62oQW-NaMy zFwg;R*3me_MmQWnM3*X^2)-aMg|RDzZ?5%=5EONzZ_jjRXdVY(BjPEuZJ==w^S?|I z^9F79Cq|7*HsF35If^(*6@1)Jdo82-Qpv6uT~0bs(_yX!AD6U+5nlSr3?pn`&VCsfl>VY8{&89P(St zZ`|H~g@~k{5fR0LI+D+2k-kE)I!p>r#5s(jZ;@|(rbKIs7+R%-V9tVDg~Kv}9qPvT{TR8GI1Sk(jTAb4F(Nl00tg zB+#^kTto?FUg{>|c6VSV+?mb3>7d$X1YQ=PjK)iJ#Q3m0XsZNvp?Fl@VQ$$$(-;Ga ztvoCSb>K(gK(!>SWD&sxoNNrkhMX#K6HHAuKt*HsSm>5>EDMf<#b&W?oINectC2ss zkp-sOUMs(vu-h7cjL0@FL_p-jS!7}lqsELQ4x(*M{IR05`BP!*E9GQ8I@{b_ z*1XPy=xo_r#k=v7qO91eCB<05+5SRsHvZCGv{=f5v#|(ijkUOyuJ8%7lJg6f6nG8l z>(?wayVMYxt>n5|0;rI1Buk`aE?6)$4IGd)%|Qst3E9iR2@7v(AzhWRB`9_x+sKaC zE<@TI=>^-zcW;QJE=wQR5KU=JzaCezt zlZMYGM4PT8B8lxW!CI(QTiY&YrYi|b=4mo`giXxYy?v1jZHD@?5nlqZ3d)Csh0h+N zE^B@b9;5c4wQ$O5bCo49B@8d@pTLoVRs6q8nLD8j9!lDTt!=lagS%QZ5R|xJ8<)z$ z;5|d#@uamKv14^@yNiRKl>7`s(ZgI$AEw(hl zg&TX34SIuYAm14YiseRo4e-RGh$}ij5x$h$a0ov#AZc0 zP8cZ;ONzR#`Jc>jUYQ%Pc3GF!o(fW+BT;JNL|u{;_C^*-=OJLj-AZAm^I8lW5gx{k zy@mJ7eBp>Cy7klA3slQvU==rNE$1_>MUs7(x#Urw&U_}#Fv-cAGYwoswM~g5NKf;t zvMXGlESYS1l3KzfX=2iOrDp$YTngrrmOJoyB{vEilqp)yme$}R`l$J+!|TbuGJ_RA ziU>17m2^ynoN|Tl%DAFs zLQY_N)=c$L&erx!d=ZQ&GND^Jbd_UDZj|VVQ*Yu((I_K&+76^mw40n(G#(Ddi@aCD zWxKbD=mzNl^fEQJFWzD6FsaNDUkoCXW6^voP)~Zp>x48I=_@;vU&WchO$W;0r?lLX zmznG`)K!EH2Yhl?xdf55B?XhI9wx&B2SFS$4CO+&^~~sssGD-Hv9^6F?SBo53k%r! zgM%;@P8&Jxo!Rj1$#=0=0MORRr}#Xyt=f&e22ZvzXPpZ;E4<(?H%Y2K{@TxvxFjI0 zy^LSn+xIh=+Gs-hV{6@PH8X6rg*2k{pEPni&fC;afeg)1*w~ zCUA1AI7m2dCP52CwVa7T+$z^R(JT*CJ0eEF1l*!z^X8>+(u(AlibQABhhkw`?vgT& z?>MB2+w-Jo^P-|vx`DZy$tEJMuW~|%^px>b5Sr$%Kgpj@i$Mh9#)<6 z>%ohr^{Do1Z%DKr06(}X$(fD$+*Wm5S8s)gdG8SLR>$NT*29N8n^r^G-?QBrrN*5B zc@c$p(%sPuF;b)vH$|MR^b_|K2cwqIZJ{yV6Kp_6Qd@`pS_hdwG`#)wb=V&#xxcm! zWXj8}1M25~+ucl==SY*bthk<#mr%ZallA2HyKETU0VBelzqjz+9U(?F2(S>lhtrma zl4+9T2Xf!)=BCT>JB0w$K=*RG{6(qrpdb`l?iNdDmi{%mT6u0tf%V)@C*&T=^=Dzl~Htb@Rq^aQGDOd zz?>bk*3pdBHy+)`RdeyKN`R%afxkMFeX8DPMqiXnA-Tr83z zPm*7DJOLTI%Dsq;|@d{W_E-tOVM|NfE*g9c#Ol3p8hfFVWwLY&ZU;lDW>@knUMHeCp4QZjpllV0Xe6n)f-b>-yJBVk zE|d3_{Q1G8!36L^#}Aq%ziKDdAT2y>%JdyCVb22FEkHMeE9Ya;4l0bDt8X!85!F&|Y#KsvSLldDt+5M|cyq0)X@?7U z3I-csz^GUAi_(|BTI2bT>};4*s2FhR>XNa72R&_ z`8|qnY7sJ#H)+h}2t|J&ZpqtEW+5LHk$xH?&@d0tGf4q78}Pi40Kje!lp|zhEz!q8 z{vtSM72mtZaa*DRAvjlue)Tg4arrrfm5M{WMQ%=m0WEh_t&$z6+8JGM!oxq-lgaCYuhVH9%yvQ7{ziWaY{ZP*`^< z5OI!LRBlHosg|uUK)5Tn`;%8<0v8+x;0-^GY5v2oWmxS=zQyPqH-QI6qvGdP=0-+# zZ+*!*q*wPr3mMD#U6$(a7Nfv+ZhKu9oY_8zX3S;`$ku}02sxr7nh!#JR+wF*nlN>S z*u1~4TOJrf2yMv}Z8qNq+mT3E<3_0~$dCku&L^|*}?@g>L>7$fT&xjoVqrZxUNMy4si`gB2-x*j~C5)$?j zhVor5tZ898(eoR_b^-$&QAd(4<$mo{F2KQu)4`uk`(907@UP_W`ao?P78rS*M8ct$ zWkumB(wQD6<|SHJw88Y<8!j`+Kt$snNMEd#A!6T4P-`(uNum~?tYww+Msg47&?CYw z!Gj)d(q-EeeP^g{&~bps;dw z%-A0x=CSd<3T>yb^w2B0p_E*CGSW#OD1$nTi(c}(x2BRRYY*n0!l$PDDk$$Qk}K!s z;5u^Wd7)i9Iw+?V$hW8EQ5J}NR&4d}$wdTV^+UFrjIq^HRiH><7=_L#;Dfj}aHq~| z0+&0RC3nuw0f`Zgu_huek|0YFsQ;qoWKEE+?O z&k2W$%~)@RJ1W8bpT!<<_#zFuId@>kVFM5Z+e!A$q2A$n$}gDGE#eRtG*dO zQrnClX3SUKhnvW4CNf~TiB4t*t{KR~{tpyEs8mi@7ABH(N%-{Mpt{LlJKe&KX0Y$~ zDGvm+;7xMn$QX+lj=k))r~_}xB+Cyy6@($hy6BEY!?sADN&a5y{jjtcpfuF%H9@U@ zWFysk?ft0qcpEZdfIvIiV;Y8~I^>l9QR9^~8D-wbfGqmGuRMifEQr^cR4U4rlUx)MEq3Wf?bq6NXCXyN{~YBMF)bk2Dsr6aUXW$mO^d^iU&rqgFO*bpipSI#eWD@Fg@{%I!eJ)=hSx>8&s?@YZYxu7!MNPo82*E|7fY z4AE%Gwi}bsDWo8R3Q!R|00q3bz!qqoB_U=}-H>N4%bnoVsp>RjNqeeHRKkf|a_Hzx zx$-odXg`Z-{)ekN&4JL>^BX+lY5s?+KFxs@6_bCMqe z(yF3cku*d}r1wZ-;Y~gehy59Nm^gm&ogB8|wAo}+YqQ1Z8@5@Etv;>H?*ZE&x;V*o zL6rq`u~on`Th%jy7eNImB{me36%!*+`8$ENI_tYbb$T|<8i#gQOAlGg8VBK5);MTu zAtTrg^AjnFgmczNM3!zDrj&BTkk;fMnMr0Dp54~1|l5XR_hm9mPCL@wTJP`|zyYv4QGGv$H?P=vwGmhzT z!{7(mjc+({7uO6rCQN+oh4}{*_;#&;Nya@yI^nibx!T^*Nu==2y*>9HyK&dx&Z9>T zA3Av8_7~r_|DJ6xdd17{-tn@#_PuoPop-$ChU>Rqw{_2d8Mx(EHmtN9BpchYL|V9| z8#VSnr(@?Q64?is%>RsLlTOsX|3o^O|H6+`_3w3cR&+%8jI?XViO;WKgyKXsYMGR1 zSu1{!xlX4YSA{u1)-<5dVZb1uSuzKyT8L2sxz4t5xz1`|HQYAc`-6fT-)+zTp%rrk zfsj+i`H=S^3&UZtdy;}K6N?&sb!fV`J|wlBK9~ULL?(_eXLl2J9_UAgIwGqQei28X2;Z( zKx`Z&m!;6r$uKv4Y4JfKtiy=AdS03DS7Ar7U!TrWSX;ecg4jBG{{tgsapD{=4^)|B zlW4j}-mcBDX^cj{HH;Fr;+TQXo)fhmAp))nodsr5cf|f7MEYAL8FR^fT7G@H{=^eE zKPdE(4F^Zig~IFjMk0b_9x;{s8X+bj>`P)l>#imp0Z@)v4=!y_If(rY@j4j~CDcM< z5N;uhHb;FS^On4lQr$g&i+h7BnreZGj)mXugcQklSgmL`^y`-NQ1V@3E#g@_1!qLF z!^IZUmY!wN9c#DXe)7*?>z-Eb^$39+vFtZv*{{2-@I#y+Y7I3>yr%1fO(gdD9`2>< z0cLvGq}HmqZ*#45?kgxoTyPhB17Y&SIa<=g8Vr+)3WFj4hG0MY8?`TzGKMEpF~2F1 z12e))0&ONl4+?vV!!35rxZGV;D#lGO^c33_Ew2S)I{A@af&n&G(9n>K7Bq}eBN7%N zEoX8eDRYD!?e6M1tZ@c=g%(q2qv+E}T|UOI&a&Cjdzhxd0ZbNbCbn^)Cse`PMmvY1 z8OSiQUK>q3(UmGK2y4rtM#_d-&`4Pm;*dNS*5vqIjMuYlOS%g4Qt?N=A1hW;T#}{$ zvgzN=tx0a5BZwVI7{St3+y?u@2o|{mi)-1nrHAoHmXfp1M%RWk5cjD~EHb@-V|7c1 z&dwds6mViEiew5+mPjUAL1Ct`7Ltt62@Q#|U2>UuSEdCa>g1-D4hu)fEHopJN2X^H zs|(VAEa4RV=68l3WR*~Pc4i`(X)&|As#(O+;n|GfMNknqBY4szFIz4_)U=+p+V5q! znwRmEdkzTi#_SLFI@GQ+7?U@6zr+QZ)PFJYR;nbq9KS;Hz_hNsP=}t~LIINkt^cQ& z>sc-qTJMuB78@mQe4P;R(bu@ueUsyuO^4adxZ^B;??H`aZ0Byx*n}~J+n&i?f>Gn0S~2j(I-cU+c=mSsi0=ZWeR&(zwUboPdssin z2<$X(wzf3izQ)aF9%OW&H*^)&fN+x3CAGYxv7oY*Lq@d1@1u$T@P8Ytaa_!1GZDOw|);=C^!)dVl$)Z z!E^*OTD*IWd(d1)zf52$^LdyV%5zO^m8GDPaSvEFMU%&1ois2f5OGKTjF3HYof*!A(ETtA+HZ-sIp7n$fOqrOJGMV z$dHhpChL8YJH%yRBmG7J1$ZF*L9;u!@XbE7elaH{c|ySJ5oOD9Cni_(G~FL&Ff1OI z=Eg=bv3Cb@5y(SL&T}k7RITOb?=^YI-9(CMTXfvA8il1!+kcBJqa<_O zwlH8`HkG?rj>G!3e0m!$D5L!Xy7kk#jWq}J{-_)(BUDa)ip8W7eZY;XKavRmyR)Bh2cp&q^Iw+GCjCcm~hUf#R4x zQ{$9=3%=YS=i^1h56sY5q5I=DM8QIf?8nFzj#?Z z(PDA6e@}6u90fTjs?Q)R*YUk1(lmnGF=Z8%)Pl6sYMYiR3~9hF{sN_56Wha#{)?(c;-gw#iIB^Aw6 zl>+_B`&mHJ53-g&(95Z8Yj&ooE zX?&5G@U^cpP}w2Uz#)M+&;bp!NClrt)%lNb=sOuVaMXY`Tg%6b3o!%&%gfSCBbAS?LQg^1fZ$7_0v z42&?{uy`haTD*3ml~mZ&rb3pYR|!!~bXY(1(s+mlAU_2v(dW}^H0)kk|hVG?DD0V zSSbg04=^vB7h;K-tsNQ8Z)J2&vd9Z>Xd>mEE}{=jW^331<=PH_hmi3|J`Fg&&Kt#q zj79BqigD8-;WF$Fahq;V^|pvs<|~wTLz#{KwaIK@pT`N+ehw4*^oN6s`soj+6?8X0 zaSkbu#?pua?75{_!MnXZ9u0tlD%DP%K(3D@SNQoK2^lh;{z$ZO%k_;LRz_!xSQ#BZ z%8!L|#jr9Q&}&?pT#Y9iP`{1{oNcRdqi2AcO0g?yEUnZbjA*9e*%zKd5=s!9V8vs0 zTaX%Nwsr4@a!QK0F>DOwt7Eq(`DwaYjFA5k&MIA?EDAe*sRua^q%&rN`=H(JLP&Tc z6r5SgxV~`ETz|0ADTMAK5kFNbC86~IC@K{KCZH8>lhmyowH(wzXw@{ZZDJvdk|G{t zedYInUP=svxN?2vrUr3kaf_sd_O#>mitI5P*$kx6a_qbTumEg+0<^88X}XKO*P`(c zJSYmr_;+ah?kA3ZN7#&A9AVL5xs+M@au^+{of)(SNW?cfX3K*})B+P|p_oc*;+>pT z<_1c{6l6h4iIqvzV*f}i;UR~_hF#r^2{EKQ!-zU2SZ)LpBH^!LLT4nA0Ta6O6QMje zK}5&%QRzO37jN*v>WvPM-HM!3<xqbM7e2Y<40A6aBB5CMd;${>(nc&556|6{`5`_F!)%yDy7#v+0wh`zKX#QP;Is2 zIX~j9tr7#vAV43TV;#V?x%I*7pocsVha3qV;A$)b6zoz#jnfOyz)0B<7QWJne-i`^ zAWEziW=S32fh_&nURX|Sr;lv7&{9za+z<2(ujxk0$AK46A>u$098e-`=fi;qS}gV{ zG)GT});D9tGo}xNSEdg)*69PAwniate6V`6#$4TcJF*WH0>UT~!Ku#8R)x72p#(of zS~sH(*k>bkFn%;s2RzeHMI9ywg&*x{5~%|)Jt>ZuHg%ERnSlhz0-SjqxgvSc;CE>7 z7W^K}hRz<>NVUh+YKO*aJYj%!0)dnlj8Kt60C7ek^0mvV{Uj73U`HS52QC>qu-s`5 z@~?0+cO!sgxpm7rq*Vy9No?~9dB{0}r7Z$>qVJ<-a;j~ztj~L}&>cYU*9fHS_YeYN zGh{`lQbMFK!N29+m-BiixKbRx6k>{zK@!MW@F&4olYfOlIU_DLz$Wy#bstbmTMrjz zWq6_&J4u1SNz#Evi8B<^lA_rA5aAoR_hgtT50#IKzv6lMjC~znoXm7Mxt0gQ9Ut;Q zr=L3YjwEfXEC2UU+Ky!f5|fNEC>=aTubl!M3@K53N23 z-g^+#t^xFSsyk4d0=e{hULm0xiMbRsDKhOqpo0lV!nO@Goih2F8aXKBE~bxq zBI+CJ^vbF0UpCe$_Z-nQOc2n~c}pQ3>)=TItss*c7;C9wz(Tu_`G7a+T>^m8)1(uS z(5a5uZa&beR0wF|lDeUq(b%b*m8Ff!cBwRxuv?NX?Q1D>2|`D(YU1fph5!)lb`{n& zp&fGV-o-b5tAuZBFlD{{k32An9t}n1tDwv)csW-%$sWLD1~d{C)49KWi(D4J*~?;e z>}&Z?P6&qa>qv`FBy~uSf3@KHq0rWk)T?)-3x$)(NAZT3j6y%*g;~9Hk)Aq;k06s% zvJU;-Tlk5zRY?+0rNa5L$u0j)p zCzX>4`N(MSR7N-Yo{5o2 za1{}w%}9^9;cj`W0=Yp2i(%p&(z$ehOnbPF=#}COSCNV&AHLRC>BF0?((QGYQK_>G z3novSWjysNb)=J~5`B`X*-EFIC__3Gw;l*I*-{cy}+v=VVLK9!yqg)fk2s$wfM|>2Wqb^ErFQkTzMH;eJ4s2=2>ONDoE@|#|cu` z#)PaUzX69ujg*wMx3JpQTTBZkt3(c|?Mo~Mxxi|L(;`I!(cW8_E1FWz7e%T2kl&Od zfubL5)c`+T)D+5KhOG@BwT^^$>?{0P@krsf{Fxji#cJQ-!fC#xXIy8BzwBHryMIQo z46l(4&Uls1v<7Ea6b(o^+mTLFIs@%dr0Knd>Z?RQqTzUOX?#?hLZM7+S-z7HqpM8HH5u7XTxAkuiw+P zEB4@INbt5UiFuk*=C7)Z-Ea6IJPdF6nGaG@OhfWmSeLHk?cL!NGfr@juTMID@<4Sf zM#r0fy1MnJDqH!Iwp(^GM$EOOkN$TIa9Q(%>G21vWO=4S>+63KqM6+9SGsX-*K2h( zkrCOed^O47N3L};_>s-X;P-)9waSAdKTc@^6o3A*TjCZN# zaA{*r_7zd{lo_E`>}jUf(`c!xdssRvNuA_z?18t$)oZhk*ZP9omz7bMUQU&&y56v} zG7xHE6lBj$Xcu}0$6bF?^`&;8z#s?U9?*w(b_U`VN#%WHbG`Y&)E8l45r{NW*#q@W zF=VhSLQ(MbDHNY>Naw)?gwU%!t>?-o3O}rmdkfQL-9?D!+CNN|+3f8iOz08 zGbVPjCe$b-l%6&iPS!hL)r-i#$;<=1IKvB6KZuYStB7Vvbf@%FmC^&ylgAq17-x8O z>L;3uZ9`rUN@4imujyJCi@+x@l$Nlen&>lr(6R_p)70>;>#@oke%PzaiKjKDQJU$h zo`@Tl^dy9NMb9$O)vN?VEWlMHZ;WlFd@?zEjT5;Vn3`jZ1ZG|s3B=N#e^SRHq(j(Y zYBx@8ZYc1^8iADtOaw8+RIx8bLNeOGR8<3z;bcMDv@Yj=v+tBUrwt z_zEnd8e$951=)tY-7sH)|3Jsdk&R*_JXit%?`au~A8t8`8I<%>!QO8i$J*T{W?hm= ztjY$L`HkxkFv_4`C<&GCMi3TuDMV~o_LYuw1{lr?7olor^aN|C1=~5`78y7LNZ&JQ zJKr#F$psu>Y}bvUG!v%_^Bxl=-oQS!JuNfHy>koO2f{SdPRG>6DrLlm!XxDx7SYlKVCcS|3$jf zX{1*&jl>Q@7h2MT@t}?>9Wy^a$PIN2hnY?qEBqc6Qmj5tj{=`63#e7qLIY7CtqX)Znv{Y>ez0*CKh z_T8SI&Wdu;czZfem+k2&O?>N9#NW3i7dbD>CCDM^ZmwtH{XEed`$@=44Fm@!&@K{+ zFEtfUmt85i_@<0mcyl<4K%D@{v}?~6PbUAq zO)$(At4iG0Rq4%EWN+a!Qirx0`9J=l9?c1`Ileg_j|GbHL9oIr z1NGM51*o0hID}v%u)^dFf=U@l=Nr-jfeQ!%R84(a`o=miL zs%pR%C>t7~2HIFT0JiAYD|x^c$`P<7_)2}E)e0m|Dn3i*Y8SI=ab;GTx)rCm3SzcULx<4I5&$JwKUl<+5Q2Z2{}0_Ar_2em&~H zag{Hlnt7KFacH_enQBF&Tn94TbM11}@fV+%KG#rvtf>+s?ZC%~*#sZz!-zGj1W2T9 zuYV{Cerfw_(_bSZ>Gg`f@m*C+i4`8S>~!WY-{aU$L?DTFVU9d8;=k_$kc_3T-U}d; z_X?cVd!zf2X+C{#wHL`3$S3bLBH(_!c%;%U_v7?IjVWEY7qM}fcYK~c$maz$r9IxB zzOUnC^2D~({t+R?^ufIQvDWzzwM`=0(;@2F{Yd8%he#K!==_@dv6OD=euVvv9Wn)B zd!Gd?2@)#;3C~~z&0t250WN|Gz%?*6N(d1+$04U9=9*LHx@wt}U@h2^b>j{d5Lx1s z%AXzlkEP&$B=kuB=~CVQh|DXKKjben`ImI(4k!O3&*c+yPcBJ>RTaxKlyxD?5MjL? zbuzHT*9>#t`~(i4jFc~T=IN80y-3;6fXTp8XTaP`e!ZTdpN4#~Gh*&)Q!9e}GC~xX z-$}eXyV2DFxenBYt~yX}|EnRKqLDT&b~ORDMA^`Yfx6HYfV$|{m)&;UPD@=8sFkKx z1k|N2L*7O3BzWuU4M5Fz*Ma)Nmk73>58j205$6j-+_u>y2MO%Mxf?;nt+<8Y-q$l&GrPKF8K9Dw;h3+ z?}x*tX1F<&)v8feO9X1ZsTBdm*=m5A+d`BKGPtWZ05uJv%7FU0 z^AL$_Kb|FKrOtj0)HxG1HR73?P6AL04uihC?FiJlBmyXW4Y%LlD%31Qb)UnpBD1k)QmS%JyVy$1F_umX_ z2QsW}YQ^|E0Or!WkY!0wD20q$t`qI^oaM*>YuHXRoP^u}*eqp3BSw<*TLWP8e!Y|j zY<6n|Y@w+Y0doPn0Fts2=AvYTR>@3I*HpIEfoj0qbK9DLI#1cqh=Dr0EdVv^*B9J& zoUQZQB2c8^+AuVMGrJ8<1${VO2rXvg$uO?D5+k`t1+Y#@&7gp;IM3gb4yCta&2QgZ zxU}t+%%f#LNw}lerH?3J$}d*vb5(s_c2%h}&@1vQe!~WfHIi3)4XpPF3kB$9Dy^&1 z5Z4vtWWK&YXi>j<|#bzMM^2|d{-V?5JITE>9lzhhi9@L4oC zM8Sskz}39OJ*j&SdWd>%PtQ=VX&X$b-4U_^ZHjH9+w&yJt&CANYYSjUwugnC@arjq z78iDGdtBI4+w%ifD;lN-Jh`2=PP^)){$f1bo~&4pnA%t=KdRH)H7eZ>oSvax)2K$S z=a!UjLM2?-0U7trZw=z!ItS!NIeHhg#za9yHgSJ0@eH_(fQ!Cm-oSdfqsq4Lm zrm|M+duW)eBeSXgoiCv&wnqj&azhhPqm*q=)rj}d@C^Z|albyD2Ws?&2-HMVD+21& z4FQ=Z^#r`}8vvBg>7<~EF)UjY6IGM^uY%fZ;VJw{T$_8?C zDy;GiTsZFiaOQ^mCZc$LKTNTp1>KTsrZTOiJ$r*4bLq5~Saff1;oJ?}bT~s_oQI zwNmia^|d}&;8@2yPwz$6d1!d z>%SsKy~#O1a9n%D4#IDL@nDxz$^e&+MGS_jL{MeZar6R^G9 zZLLri(?O^KoIApP)tyFg1^2J$R&FQf&$K|Z5`Zc-&goZ%aN8-`i#Aa96|vfdE&v=EgV;qz5uY!UO#$` zQ<)l=k=9^GuvvqsBbh8b>*U_V#x2Anzu3<3kppgt-r%a)d8E)!X-iS8$leeFwV8nf zUw{l8h|M9Uaf5-w6oc-?i^-$TQ9uCzN+dI;6JBi{d8drw>`A_fdIOo@q7S2!MIf5V zQ={`_tSb06!*teHD%5s;W{sa5q?^;6|IZmuhIZ9=+IKz+yDmr0IJ_&*89RiST#^(U z@Xj6D6?WQ~Un}2^ZFhotWLMm2<4vvT=P?fKOzaA1dWvT{=_UZX&a&5YfPMB?(ITBo zN!#znCcuVoRNrdEfUQTsM*aGf+m0k)_{Ip>SW_zkZ1l$J&D?@;V*qV}XL`zwG=R3o zD>||{;2$36%tSV4>rIWOLpKRDHDb`#ZVYoa;@1;yJAyWJQv_}Fri}yil-@+_%uNBX zah~afn`i*+GOsegCVn5lGK2V9QfFRUBj=m`XHhi<-9ruqBu=NbEk()OIY?v7v zy*U6j#xosv6Agf^@+t%DV_$%tXWIAWoAb2PShf}*2sNO}HwVCmDgtcGKSsdTZjLiH zT-n$P@_c2tj^i;;yNHGCvYl?kZH#7ZB*QjMk2TfiU`WM^^VKAr$?!E!G67JC_ksv! z9o-15j&3d9FN1qJt{s#N^(%~@?^n@IpS<;2UzfMOkn5ry#Wo7^5zc;X%9~E6i*;f4 zR3*=)yr-wU@uqUZ?6luB2(v+aovaAyCMvtdA8KJ2GyD05a5JlC47-@y&;Bk|c1e2t zbW1Qg9LdYDa&??I^WJZK;RByv{oc)Ffj!#zeQm?S{E@*X6V<^JR`edI*>jmf)ku{eovZ+4G)aP@Px<;I}k!SNP>Jzd(b z8C&$@{S&iW@?Z=v;;wvtAk%9viau`Q?cPUqNg`%q}dYPgs)DjD= z#X`{b{kz35)?LH9v8niuvudI+$`H=?jxafz|ySG*xoLAY7lR^*423ca>7plt;Yh zZ?GeALD+#;l1AbYE?u{glIfa0fFMQV!jSWN!YgOg*{my{_urWVCzF4)>1Hkm*(_D( zRdj(uQRp{Z=Ui!?O2K}#SgVAI>wt2!FCW0Hn%!(~4QRz}fHKt5BL>5&va|G3t5jYeatYi4y_U<6U(fL9gtOx~@- zhC0FD`rzThdHjt5L95B(!XNW39cnvL__$u*$UN!E{7mV|C?-{a%AKyqGWbT0Iae<_$f;wDhcy#sj~+7? zO&rv!P8`ephG0ET%VSvF4px$I{>xB}0+nnj!v|R$(BR<7gq+y5$<#ZcQ91c4K9r!D zhTI&*$Uby9qR>L5VU28@J5HsU zCULlQ&!{n6x~l>Pmu>{g07hT|D*_0p0H~>7IVO({64f#L2cgL*JZckdM+bvCqcgXX zElOw2ncEqhx#U=qGqzQW` zvKt)wI@F@mGZmej(NawvM8~8saRTo@-_7Fe<8AITZ_K3~{p7%BjOF8UzD`frU8m`KL8-W7;#PMhtwOf&bi9rb!Z{ zp9@W@fvM&FzVZT2!8}*U17>~SA-8}D9?hdUXa~>f`IH;T;@Pvpqq(Ce6MUjqRhrP4 zo+BOwFu(7sA*nfCGQGrW2%XeLiZPU8DbmTkg|8i!s97Va$H5pOVj5aza@O7f~GkoS9h?v#>$i-Gg;LKdW_s_%V?qjn zynL(*S6@5afMshGeM9mcVoZV_WQ-*a6c2|mg(#g1oNnS!V}T3!`gHOLEiVdrB<1Dw zqHu_Oogg7vr@Nm;IoAz9F_wH?%AFygU6L2M-OgIO+IRU@cgxX%hfn$j%hCqwg&v6b z=;Uk+ppkayfe@|ef<v@tbzPfshcDV7eHlrmFy?w58T&u~e)d)+wb9)EPp3li%NB zjcvlVA1}&e{o_>qo=6L^LjrK-ji@&--b+o*1a)@P}|2?A`aJz{)p6! z@UsloQmSv#P>1d%VP`oZNf5&n+iUC~+3L0N$0B! z05;mQn(?u#JmZ4?InB5N@V?L5igs;0pw*jvxu-j7+>by?Q7^x=Us;B*;2(ARlyoA; zpqGw@-;QKDxogjHF1Sxeq!K9dSBH7U?718M_u6W=5mHh!}!B0uGB{Xuki{ zoBP?K+Amz|sP+pl2&%zzYf}8#AjPvNk{5-B=Zsz9BJ-{HOZ-b$BLcu{lP@^3^|BDF zD6BvvK8nZ?u#ThpK@y~z-XT^b?=MPo5yC|?Z81Y{HJ$IRRc@;q<=5d;3asV&zTm#s z)psfMz4nxSqk880wobOf2!+q=6C~8uGDvKRwr~TJjHv5X)netn4lZ%o8uvB{s|8gKLzN*sq5QM3}a??Yc+aRa= ztH_9j*r$2w3a5gx9RZS}=RlsW^wu13#-G_cgC%;yIkCyTKyrF7#?g92(a=kSe68c6 zU;Zi)MFUx=(@XPlEpB#%vY`PRbB10TL!JD3l6Dc-LuB&EOQU&tyr~r--`Go$c3;Y> zGp(n1rju?0u+;#+PcwYo;tf>_N zGrmvYjq*30aODP&F7rydi010iKO~YUJ1MrdFCWq9xmtI1HDZu1?~9Q{cL~yQ{}@49 z&p;ZvYhx<{Y4k3EG<=uUxiMF70O}I2GNAtbqq*a2t9Rwk>>H@duC7K5)TO%uQ0sm@ z>c0`F%Xei?gqLk>Mc}O8h2HnFuz({x)6th{Tn&J&@G1lB^mHyD?9$8ffk_5z)z#IA z0b6-l0Bp^#N5bELS*tILfUP&RBEZ&Oh9Y{mHlX-I#<;IW1o<$TTc$;FOG} zcyD3kZY?Mq;ck1WMkx~73eh%0cXNqYJ=$jMZr^R2wQ7rXv`}9ST=?Z{9T$H21;qsx z+$%bj3w4_En}$#)2rtV95x$$9i-x4Q{Ruv&)3wE(;8QJvw-&d5mTF)}9)-@SN(|DU zzMGr-;I=axilGvqNZejLf49y@A`;zQ-5T7o4&m>u^oJ1sS%W(6zao!WY|`A_c}66E zPC9=#SF0F8+MDI~S`a@5vZ^oW;smapV;gxzok%utxzSft<;?7=42sRW@iEGV z223PJUJ;1ogkMk5ZjJqpy&@9HQ%$YNQaAYu4R?aS>1kI6>^ew8_tZi9pMMSA&KU3T zJ=I+XX;%%T5z2-}4ARg&0i-d%o^;!hp^V%UK^kvrMIcSwBap`K@$uPHuHFFDS`Mgx zpUs_pSZ``B9kPz8Mhw(i1Zu>uC){=fYG{81YIOg`p$VK3tg2)CzaPxxSFUve@+&VK z0Wt26d-2Bj8(4x}KCp#}{a_@14i1v-uloMX4s#l;pB28IQmoi1*YZ{Ds7+mU9+dp5 z5BJJ_el;b9iEDjvb+GdV3Teg}(wpA=TC1~sz~A?Q{Ww>8l?7PGtv6E@uOcIV&$Yf( z?|I>t%A(oFpM2IGkW5loYHeEv95Yb6ukd% z;Sc#7GQhz1$w|#2B3z@qJB>-^y$q9FX-D!g)AZjJ@HD6H+T^hO;!V}v^S9I5%8 z?x{U>=JuWZ$)3qQd1mfrPg6EDU_NqcPhf6m{Q5lY*7)t|J&})0H?<;&&+K7p_Aqp& z=p4`Vyqf^*I@g_eNgc3{{C@5*$f=h!0X9k5(1-z>cu4?k%CFD4?a1IJUlIX3)6|N9 znSP1Dn|g_L%d@WD0MytWb)Xu~^o-xp1k?m&vA~33z{>9kKu!AfwA+qAP23TII^EQY zz?r-Qgt;Tk))}7Zw3}!EZ1m1LU=3$_#_r6Ef?mdP%7#WfTcdXdz)tz~8Mhq)8^1FG zHrdpQ06TRj7A=Z!h#-O(4Qf-r_x9>h=?^;aFhN+X z0{X}GM86o*lktrlC7mqs352Y{cIYC#Rl-N9(ytjo=^Q(B0PYr#zfK&ogI?@TrKw6l ziru|Brf|40Cg^1U&mBcz2*jJ>q))j~OJN+P^e(N0x}dZIG<$W32GmNq*G5!AM{M@A zP8@yf2|B{>fAqvx|7zi_@BPZx-~4~`D)~q7mcm15&pL>{r}d3*)=VzSQz59}1;|%=|*KOGj z9#P;mR=cCSZ>AWY3jQkQx=(hWM^dEty@lapy@j1zC&j&roO0Vqti~J$^9PkD*aAd6 z=#jpcSQW0DlV~!{#}HSMD%3^CQ6|60)vAw#ll)Kc--qM(Nv1)f$|*ewGg9P=PJk$y zMJGTew8T6W^^&#$hE^UG{~^P}^?VPdLl;{gI?^(s0wat-{>l)HOE?QV=jR-$;v*o& zBaVPzHZ0z!v28d6LV!y;?Mc|=5Xh7qBFJ9n6>>t%`Z;~aAyN@J+nWB{d&%FVDUtuD zH^!{b;w8zvm>!L`0kJ)72h9`6m7`Pty{whz0$oD0mNOo8FvPOuc z&b7CSEEt2!g!31YQ`gmEgn)uA0X1k{K2{+qay`a(iRvDb->2*Bla>ch zR`)45>=0_%o`EYMw4V2;C| zbkZPB7um;^5mQsHh{*#SCJWccW$PjML!_#Xq*Y_aB}b`pp}9$G7OMJ4dK~7sG5XXP zUuPkL)X}FX|3sR0$52xox&xKUYrPgb3S~AM*l5C3%51V#-c};A%0ZQ^d{L73*TUv~RPlnSvfO?7i6$tIKK( zx@zW1G@o`)24Dmhuw=qDh>DndpJhJ+BaN~X3cO@P+YanQR?j74Cb5ULgc3Tev&^ji z#&x#cA~QIwP_r}%2zv{c?=^~tqcVg9+zZv}OFpSAoBvA~+(?AjfSVxi&Jnb~HznP(kjq#v}_WJXLA}nZg zQq>^BrK`tI=2QD3%FKx(~w%bYF!U=)R+rh-}q4twy>pP5}Nl(S2$}x~Z{2W<>XuVn@Qqy;mUR zYD}nOQZ9rI@la;tVHbsyq6~zR;&%f%0uJ|pfD@VCaK=}FghaX`{sWPKpVOH-T@fSM zIQUV2sO^~%aJFEMfD^(2#gtTQBJ0#K;uxmRw-Oc_sS{~Sq1h~6XQe38R1+}YqUqyHPAi;=bB37P;himrEij+&cTWVD5Yls!i)PnT)ZVz1MP?0@sTV zN9kL9w%4H{gMG!8B1$YhV39c_4JYMf-jyg7mUhikq*S8*K1wI}s8!v?B7dMuTubNZ z#I^PoK6O}Gyeb2ai6=&G7*0=vK=eKrJ8Jx#Fxxej)Wp31;Z^3g^)paCK9g(sP9m#G0QQ$t+t z{>qKXN{=EH<9Pt|s0TRMuK*ygJz6WL7WVqgLP>^Gw1OWAKl#i8i#=();LL^0e9~tN zDiV8OvZv`zLL7%5eQ2P>y7%eOfQj!RQAW%7)Af(%>L2G0)ejBm#0CvtV7cSRWq#dw zXkb#J0*c2eJ)z3h%hA&FaCUEqiS8tc0(N6nHMpV(c7+M!o4kT!gYw{IAviUZi@vOy)Bw>{rU{A zYnJ%&+cFbwQ!84^Q@25qZ=?8>tDg4XoC&w75@wa#uD%KP{nxq)_x&%N3D*d4j)kh* z*v>aClPKT}>#8+A#~ROSjd{1A=LIT@HWRkT-`U%^ht3E3FEuqo3iR6)6SB9kavNqI zwol|>pgWtYc8yk7xKRR{P0WclQj820tSHg9iHdSNC!LeE<~D_PVx_o^piTpInfEf_ ztO~d_fD_Z*ntpQ_uMeH4tGvpR4y@niO9R7!IC}Hlfpk-gkoM^@wZF!&fmR=tLT~8B zfw1?Pp+neZhdcG+8eu@*mDJB@zs#Uu;}A20ChM#4V+V)|qb#`NSI z--w0S6`0wzGGs3>~V&_P&&4N>f6W!vv++dt;?RpWR>1Io>(%E?AGp?e(+6HS-sHs}LH@9MsaxK_rJ-9+ zrWflT#nGm9OD8O`1QUln66vJhG(Y3h4-$-`@DaYG2(eIapVakrkuS z9Cg^n6Z!t2au(vTN@I}AJd%xmz-T;?7tqFSZhOWYpY5ZZ%WxHsq!ezs_sV`j z1Gr-_c)V>`#t&&Vau^cde7oF>XpoMsKWwJ+i`d>tA1DT8xCsSH5LRN zXA{`WZ)km&`InoLXs!Cojw@cbjAB5%Uw8#JorlKQvh^OG9qyS z2PIVP;W83`gO$r=o9Bwf2xRAoDHA{=SKC!Gnqe zWGHb^vL&6)%*L!6!OX$40A&Tw?OA9YdlBi|xD`!Q1Ns_O-f}H{TREg}D~I%LErWjk(cmKAo+2WPlmYE0kVsMEf35OGn zo;iz}K)sA%*oXpYC$-k>W*Ho&>SmYe`o}96V~DY%JsV@<$EP&LFxG}3i72x-7-Rk$ zvbu~hJU21M1S9zD24l=R1CREHgP1ctcd~zH`;I*=kEl)|Mq`ZppBrnim8YWxL`nRZ z#e2!3rZJM(q{$KzJJQb?sx?|dGDv(ov2#0kRd9tSUXwU!=Y#WzTrM}uP3$#7hMm4w zm;SmSv!;bZiLv7d^GQHbk|bk3IV)raNi|r!+ApdOCV!+|0`AzaJ$~fm?p^XIXIA5w z!v-@`@GHY2m7UApUnkPyvsmQTnt#E5V4P{{U<9XuvY6Q?V6zz950iCdk#+#bqUVl- zwg>=n23K#Ys944#o$XQXIT`2+oM{X!a$X+FSR3E$jUnGtE;DFglsLQc&IjAosfE?- z01Fo4iL@>u3~A~`Cu54D1@UhR?k#-&sHk8%e7qLMF(g0&Mz3o{ICpkP-bJjK8k-5s?V_aDsY*Zt{tc>a^-eAfk9Ft*m zyq(eJ^(dx7dPu0Q%w-azJry4PH5xd1`-D zE`SVxIT4dFBil#Pk*T zMiYhX-AEyOYZS6KP{`guA$#i-61hYalGvs|Au({IBkaf8DPbl}QdDoCo47K}ewFIx zH4iy^p;)&jNwH(4gbX)t49@FE|J#YB8|$y3vY`OHN_CXtpx!e6~x#`Zg|s zS6xDemnZq)b6N=WuP2>oWd}^3aA!HvpeV6*VF+8uB*KIxk_qUW$wZ_r|3*3c5JY4G zr4#=AiI+*<12R8_OkpPVxC$aFW@KUxM53z51bZfu32<2%wQ6LdFPZW@6z|iD&*caN z-Qv7a&ajfjO0Pqp5>p$g1ZDG7Vtz9!0Stx+5t`mR=~vhRZEz%2lr2|i8S|KOhV^ci zGi;Tur<|G?*dQ}6=LiQQM3z=Wgm8Ng&7H2(_4g^iK*&10})}#@8ni2 zPtvtwWv^JLS4`6#Dal-J#Y9Os_S!I48C^G0W^*NFN3bwM^xIFrniXclE9TiraL|O+ zy1hu6c{v1G3$z8|iyY`94}0@c;WcI9xUUrJwv8*B_ zHm;sG2JlL2d7f6Uk^9v63^BLL@`40g_rkT-b9*r%(o;soH6fI&sY~(}43rlx*!LBp zRzg2VL-CA^lW?pyJ+KQ+Pd9>KXO;_(0TO$r%j21O-&zxf89b;#^$=LKTzDn?vKcc~ zog1RvxuoQ7j7{%QTj(epn2u}|5n7(hEBV_jVN0u4g!rcr8GO3Nv!A;|{e!#7S8Y$k z99_iABT#J33hJp%S`?q05r1KS3?kD^{kjsUfm_ez-sj|ZwS9U6!Mhf3dd#*#t5#HLol3gXB^@EiCfK&* zCdNwUMuz*d)JkJb)ln__cl&a4U2U3~D_Rv{+f{EW`F5`k1h-h6$TS+-&)4x`gbck} zTUwPmM_kBqzDrCj_XB_g4oDIa_Nw={GrhtjMxYpDQM{<1v}{Mp~>hv`%X#&&Mt&LQ~A(msu4~nDBe#4Inr)SxmfyNc}+IU!gilSi+6=9@>VKVZI`c*F~bS$`2`3-;;<$Pq) z(&ghPZ!J{X$eL5CmU{oHrP79QKy+TuUs9tjT`8S3O17obrIYFQ?M^YdWy0u zDm(A8%ME3(i)AmWEZ2fjc3EZr!etj5%5IBgmsIw%F1x6*pK;lRhO%2@*$XQBmoB@Y zvVX3!7#MfL61SLI-&nb&vL*RgOV6B}nQLgKC0X}S=GDw!xS2UM^J$lzX((HWWoK3P zGcG%$vY!fF&TQlJpSj+-5;3T^6w>9+2dX9Jm2_1SgT&p)f}7qYdl|FJT=GFCkE()^ z_`JIdZ0ztSoo(rXPpRrpB=2wID~4vi!&h%jo{)eTHMpPDt5@%CM_F>(rGWe@=B%ZZBPhvyTMx#&H`2 z@w8@dfp8C2P}PG((mo;H$Ca@92*iuDWzjY^)}}|7eMuS#oiPxrH6YHnZX9WCW#(EH*a(*N1+@Uk2xQuYIN{N*tFv|%ZH5obXe;*d z68EgsR&S;?8|iRcwcpV8XVa-Q8(~m-j>hL0PfUJoxJ66pIPFM^(wQarVe+AET9*mX zWW%so+*4H4VlU~*_cLmcrpCD_2h`*!mWVP1G+0sVHp$R3=C7Y89*}9@4JJ~WL)x>39A#a}t z+>zmnmh}=9wajO!2X1ZN)EgQtu8gysdn;K{|rrB0AdN<_3FS(PM_G}7Fz0ao@=-U~72_2m00J<*^iR32?^OwUrfCI{}g zep~Dmf?PI?W1ZQwEp={KJN(Yf2m21N7HLPPL13X#FgY)-3%`N6pA*-e6{oFihiM|l z4W=9jN*_P+DW@ z`1a<1f2)4$=uSKN_W8^H*4~}6#OXIfy>cwct)Y}Gb*IGorbAzPv%aA?bKWn#_(gwf z4d1>Sz7@OEHh%lor`%>sciP3b)88JVp2V(IZhmb`=kRaiFB9@eK-*d^BiAzxt6OQ^ z!biG-{ON)QJ?)QWFZENFDO2e=tio%9l-j}8-p&)h##x^FZH28ot#@PYaE%U;PPTic z=yF_LdgjN}Wsq@n`|2iLo*twu!cErYa_n+_8>R8c&GOW5lYld$cUrAmFyCsnu(vQ-lv>r6GBNBa3XW}+^}&l1Ud(Ru z3+8D%N8Ae9kH|nWauTTE1k6;qbHEK``YLNPZJ^Tor^LQU>N9^a5q5W)mN>Rl1{w@Hl zXL?3x$4Z>jjI*Smq-A9H);=;}2H34f!>wKppoFRtZm^)A^sa76iHCr#BYLMC2Qu&;4={KxMv7{ z^2Boi93@xNdL_X5xowY#-5VG-Qs1~Xq=>#S14FJcB+l(vm}j<2oLlX(Y;H9+lr)cV z4rPsL^5t^H%v`vuQVhPgVuiKa#_!-m!b|Y7jRzPQY=%X--h7}5>QMo!UQa-n*ib~a zK(jlNKSj!D!L0B=Uu%KGLRWJIhPDWGv=g2c@~~B(Yz>4w&t(mgp)Jq*>qR|F>G4;I zM~TU>6>(eXt@MNPYvHG=)SW(YU$n5bKWvWC)4(w@hGCKJIicAT7;~QAP0pD`SfwD# zNgA88vIe07k|Ci&!o~(+BJj6JpMn&H^kca)?L@E;4av(Z8j++W3vXgNh&Bi{k>v!D zrcvfF%F{QTqFTC=++lG7x#L`;+~I@kk`BBW7CdNfc@V=T&nt)@NF!oUB3n2iGJq$<5&&|`4;V88 z)Asy@`9?5{Oxr@CMO%cw4O~y$mRUsnt}$)qS!ABK5>?+KB4OzRV^RNR410QKSO)%T z<#G(W#}oZzG*|2*zZoux`8V@DgBL+H-!pgs3V0hfgG8=#+Z947Ub;sj;~Qaecpn(g zb{QkY;EHmy!B_bdLR2mo9F4dWy)M$vJGk3qSsq6{jG>{bbZ9Y}R$r_Q+fL4gac5r4f@`d>s;V@+eavCNAXCWK5T;-9haksu3kC{@Q?Iu|pJ52crSyA7GkzSp5<2R($6S2*E@Go(DIwF05@9}!HTg3ON=sn4T3KvCa|pm|I|l)@0ia9$ z251iPobq7gMu6rAG-|VLOh_ZzMIbM%q9whR&m(NgZ+0q{k?L~;PXYAulL37(0=+Dt zX|QbZ(sq#GhVK2O?igd1|JXFD)no!>UKYB7NEh`CWMf%y28|dp9sIN4#JVt7)d&|D z^fWe$`-@xZlmnb63Pb29U5>plKx#YF57Exe#eK`^5OIo&ZZ%R5uqUG)toRQ?MS9>m=u5fY1xd23?CMtxbN>qqV32N&`g4$Xm zsI7sZwg$bUwXSzy(icH(t>D6d7@1^kvjGAPNMOAqI+lDq5vAcMP~DWY6Q8%I1;iw06WsMg$aw3Gzz+WqS5D z_8eIv6Rj0SL1`5G)EZ--dMXH)4ya*MO}Q+H)Zs zY}g9Vl%6Id-!B>>p)BGjayXkEVFKSG+jAyiMZn<&Nj-V795N&Ffz2wD&_&-Ui690tw|GdC$Pa~EwK%m_qNUP-f}#A9nq&$v%Ss_Iv}qtl~h#bC$V^Rieyfp@nvWy z^kXMK)a9UeY>CGUEN(AAC3+{AuLD4*BZb#@g2OiJyH?MqMSEDZD!W z{{M;ZgAi)5i($~H0ycsms)#|igR-OXuziKN77`_8b;sl~y%xwS%%p_dC>luW=}1;A z)FK)v^Yf&l~?wDoCZ4}bU`Zbb0%?!d>(hAZC4(m5WbMJ@@BXO36?!n7-d!=>Cp%oG0Fy3 z3K^rAUhB=+Y1cSeW~>TYu9V**OHTX=a7GzyrGGa=F@vcM9ihA>{mSA{l72^j-u~qw z@R-a2;9Xk8L_wM%Y`oPH%awMdT`74tcqOd-O|PTe9&n>z0SIF^#{sfTty5o^94ynR ze_5o%ddXDsPPPb#2xZFEDn-%rkK;1^5iJK!FXbfeBu$Q$x!`>8rV_?%Pyfdk7(?0i z0gc_j1^E-Fj1-Nyb7~77!is7GK+>ffrq>&Iz6IS(^R9iRnxKkxvSxTc5#jyA4Bn?t zI6M*??=!+7i+PFC=uH4pOR zagZN-=7TgYKhr^~lJ{Y4-QpnYm@RT7J<3w%1{}yRAR}30oG1V(Yp8!EB+yRIX5%wP z7zkS_`8(b=&&l~XC+D&`k%6mW*GG4X zoH}ZP0=5?9o<~<1mC_{VR9?7)N0fkS4AvE%`&YKfjKQU zA2&t50q3Hx<*W!DlCRA<*n}cr1~LK+W=0^RCl{e~)WSb>$ujzc=+jlQJZTaG(Ulya{@1rh)YMqoz>;^0*7O|(_ndlRq$61ZW%hgiK7E_!!AK=X&~o$%2Bks z5*UWbdtK_9>u=aOC@0eD6djTHQx~6l{7F1CY$1gJ25w3OjEYBjY*ES|=szza!X7i) znVYgVZ2n{KSnA-p8nv~RSm~mklv~#OT)B~5d}ogh@l@mMv78mdT)Ao0Ljjx8@061^ zsP>F@xpFf?9c&`!gx8c4Dyf`#XRh3u$i0HOWNoFK%$zD`HjpbfDcZsB!Lk_KfGTHN zvEtp@ZplFV6WJgJ5Tpxs$BUBgtX^M}Hy8S=L>4Wn?VNW zwlvYnZRrAbMNkg&YVi7A;GQ+13&$d^%eJ(vM~+?7)Ua)}rM*Tra`9PHBR4jrc<0IF zUAZkiQd7~DX*TQGc1;`BQrR^#<$M9s^^{vWn33lOvMoKT^k&^HEm@8HJ?SM7)` zo{B%GGqFWW!xNe?Xj^oo%+}hN8FY}sT8;lmKJ7|JftbgqMcBe$hP~ghQXCR+B@+I#!+KKT$3#7|gA2;J87>dOsw-R7+ zwTJQZS9SL&3~LXH-m~33$|)Tg8--q* zqax2g3XNUcb3gV1%{@mX+8=CmK0g0*{|ndl-2cK0GWTsV%Zx};ST<)Gfy%Z=v&_W= z0t&4shRFyO8KV=2n+d!LqHkB!GD0oD zT_24_bG5RwiM0>8g?6_Gmm1Y(_#Mz-Y!Zx?#o&g_GCN`eBVw6=#=RdweGy;FH6Sb8 z>JGRM^S47%(Z@)u5!g_=ntLHNg0>^Kos+FRUBs65E!02aUHP`QBiQ+NSmKga!#KqE zs;%gpp4lj9E5 zzh7ka9NW#$hUGbt?{{y8#;g5x@{O)kBj0QN&jq#8A0av|9^X*bMCE|q)3W~ak8wc zx_6&*_SyS;{J-ZG$tg+Kz*#>bx$p(0HDB~%VEL%j08*nS4=Cuiylf1SkMXc81j<-+ zu>q!pBR%?{{leB!8S|9lVS28r>5Dc;YiRd?9EO$6^d2JfgcTz}HFQ}jF zwgM+A8-%AcDFqM678sje6U1#gC=`g1$z?mKJM*7Sq1Z-p9W*R2<5W2qWjC3L<(u&0 z#u8{r-`eA5_5>yS4N{yJG?w`>;P*^9#^69FAE$f*nJVHyM+bP0E#>U(w7r5|lkFfm zRfNaSrHgQ;cooh0MnAwthi7PsOm!`fxep*)yct?04K)pr963Q=_p;!t ztd@8`L0esh(K|PpAgyf}k;MvMY8YZr_6*JJLr9%XnD;>IjLyvs5h~BNJS9TCS>Dbw zjWq@wS-gku;=0v98%rBc~Pdh!Lo-P~YPvXVY`;86MyHN=z)s4q^nVq>^K3cp>F07AUR`u<7jdu~gT%XU06IfF>HA^HZ2ejjhm5n@Gh z{3O9BhLL`5mmh{jMFCIR$b3{HDK$w~8lhy3OxHy8d3T)>`2| z3i*TRlp{t9X-CXIr*=V(8Yxu04C*ww?a`P6eyNkmd;5r1wU%Q`B6MVu+%65d=;8^5 zy>P!|p_ZhXSi`9Ziq&MVjJ+|Cti3T@CQ&EZou!-kNtsxYteBBOw=KZ1J!pW`x&HdaaX54eWVjUlw2ZP7x)i zl`tmZI2KSx-)e2qVVG2??5TWpIErr^DHq_yKIu2 zmM>bBS&$PMPO)E!oK)k_CE}D8$V-7yOBT^d60+ix;zupz&&ac2lTu|5<@40!(5#i_ zh|FY0kL=mjX{j54r5yjN7(ZSsXxg$(v8gl(hB__Etf;qiB9PRAtMTShmR{}?dvcc8 zXTflyLRKtTt9sLw+2S{Pk>mz9>*1Ak8;B&^!4*$0z|&TH0Z$rlE_F4)(cI0?)Y?b` zP7um;rWcx9@rtz>U%#CA;TH$_VL{4-7d6eti*zG5`Q8w7`28&fxW&RgGLmw@|74+6#Vc;hi+^QBj1?4?@?gBPO`r%X?)Q8J)u~Tt@i0R36`}0Ij2JE1YQARa;?i-g4@*Z4J#dX#$SRTHZ2K}j z(_aWK7*h=00i!;0543umA0v;aZnC0jtkl!6MnOVta1X|^-`gQw9)4rnK%3kZX*?>e zXGK1&ABAcAr$=rEWlXx(B1IvzKD-80JjD$tt>zBK^1P4(1+gG8AMSJd8u#g?^+$}p zVPxf+K_xWSM*(~Vz)q$eL_`LKx+&{$9Fw`75=iIsC?S;qyO5N>-yOyG|tpboxxw#}}d_f93p_F>2sQEiFX5L~g|a=OBecWgwGwq#-Vq885S2ZE5;iHmTAgX!EE5kkOUZKk3Eye&3NLXn-evTb**p5A3 zLTWj5$8H!g0#SGc4NF*Y9YeaQ8)Zm;{?_v^4Qknw=#9Dnq^U@AvcP8c^OA(Bbtx~E zb(=nvu3XoCW}J0fts`J|q$c_M<;Ro^G)0aN(gTJ$lk}7s%^E^VNizJjlv6|&vr?;~ ziqZrvT|!i`S1Gk85uc$86pDhB?jErumgv_XuvMh3O_sf{v1FhJ3S_^?ehL$wZqvgr zs1{~+aPY07SPH}Ay77FSI;f0S*PY@zf?YvVys0)DGD+9sjoE$+{-15PygYIiwz8ce z7Rz=)N>GC$yt18Fv<+~dOi^!%g;(;#-2@6^7PVFnN*q{QkLoIy)e5ib&k(og-}f0wu?>J*4zZ?BIX}t zLbnTMHowcn5>h=#hA(()aQ`j?{4Kp62!8JzZ+KBV_v79Dllo1~$Aa}Sc$y*8&r zno;PEfGcZoD3yR*{7x$C+m2xN5+M2Gc{f3fw1JLT>|(?F;lNr-#$T#Tj|)o1hxwI^ z4=>BZAW+fVshhwWCPJxsIL*whE#zQa>z=v0j&jEJY$WcpB{zWzd_j|$1Q^kWNN1jw z*@?eMXU7pL)|=-&CyV>*t=T#EeNhvx-p1(TM(fg{RMhW#ut05)u>@mjtXKQ{xF?Ag z%1)^GhNk0wHgj+y0Bg+CXOKzvdwpYF%{Aodqo?t(rUZVq!|ad|UuEwDUOEW1F)p7d zp5N~vxF8KP8+BgXIpypt0aH2qV_aP2>@!8oKV=M{Hkn}Tq&~=oBkbDQFn^y6e_laX zqj_0E{RZUYIoyDK(T?D{ZI)70Vy)f@hBtPC8#)OXDKaN3chPkB6RZntBJ15w!Ptvt zDaFuUWY^%pUSt{8+>0#3n1%NuD1q|XLeDgfo4=7@GQeO1WlXr3LVH0|hJn54vKQ1s zpm?6Y;XXj&YRP|b`=Id~)lt-y7VJaX5%z)SD8E=@32qw7$kL~<($J?Vecgp{&^jdb zH)JHIxENq^|6bYqQ6QqLzE+-%Rr_Aj&vWp3_i~Kz6LS@NL@;fk?1v zWaC>;VNeil2lHZFuUBx9N`vf&or9{vIHIEiX544hXnRZUjKJ8kGn7MBfTa~4gp1OCOUF*lwZkqW-qh-66?2)UfwJLqw4 zP8VUUohZK)MW}^&yi=vP;aIoxSj8i(p^SHZy~v>;_1$~XlOx>n43cZBmsY`U zdF>wt6nFQNxdCqZzhXX|RurzEfxEA9&D+dB&^oy%4&6wG88?;pDxO4{-6o!RubCK8 zlYe9n5jOLw{AUuEa<5BiMG|CX%7!CwE7(31@~ZUP7OHh18FyGSFANfII{9=M{rt(UxbfM3J=m?Sx; z4;c8c-{u>@sKvuD$T!r>Ffp*-1+*{#lcOio&s19b#x!iSrx1^D-mGdm82MB-aLx&1f@*p=5Q+22SBqQ3$7g-e{ zhn*fE8D>QUVHm2q`G6GC+>{&?nmTKQ@E$8Dt*e8W2F0!-mY@*$1O;VF)Bbtu0vyop zXjg7e&ChbyfH?B>HE3qJk|Wxm6Kbt9G@poSX#&C!ToBPS#T$|{-ReU_AESww%sdN7 zWO4{Yf9$ZxAMm{rsH+4Eec(XIAM%U5`}#%^j#3}UJrs+}rxvgLln+Uxk zNjrifr6t(%uj3`^6u-)xlV zbPw1q6cVjn%U3V)$cgmG9Ner@r_v+s4n+Ckb&5yY3#hp%7O73P%YA8Id9r?FE``yY zPvBiAe2=V>-O6Ksg;c4}j%E;6>1!$%YX7I2)BN^C;$Q1@I=h*o9+{qP2FLj9y0 z(OB{wIpPM~KX0*eptZVBc9pu80N_T;-)KWlW{DTJKCYiQ zWmohQ5bm1@iVEc(p6gPwq}y}pCp3b;&+yZbO+=5WmKvT2t{v=6g!FJIQo%wK={iya zD$Y^^3=eKr_)!bVHKt`t4OrIBPNc?Gg;lyp(j))exUkCR4^vve$^fH2*-~jdC5&Hj zxV%1M5N>T;Ebx9Zh?j9eiZ+wru%l0;!OPc)IaR&~CRWm0LOOCT%2&-`9PWutL2qun zBdG$5@vl7J?`Y_)bL+bpweYAo{$<&##2jS;5LVb6rG$%-_I=4bcj^UF-EmmVbFX=t zWSTHjBXXokCX)3eMWSQ|+9FyN&`v6^Q zNQ{8ND}G2?bb$1Es7s9@b<2A52{Rz@V&-vHAcF>q*I}xKU0c(=8>Uxue-gTva7=Ws zbS=}J1!GD`!~10~9BQwkdnL?>?zJXdg78ji$qSM^Pd0M2P{q$Q=zeKkx_7WA7)6wr zy-v!*yFj|H%l@=sauGVO0J`@Yrb(s=Gqu`1#^{)&=zgH=FVX!_cPW6C+fbRezJN^X#Gzs(JfauPoEP$xw)-2*hK(%D( zmh%z{52jk`S}fe_3QcdFisXNzf*;q%_(hK`AJ>dZeBYPSFXLr3KDKAKQ(yGL0k-$xx9wMtnnK5*P22i z>Z*xLJvAQwv8M(nj{L1B4#(EhhGI4CdJ4L-o;GO-0Uu5bQ(54HLE18zz{-vOx+V`V zuV^Y!j5w%b5P0b`?`crVsolXy+B~}^;;qFVtU5~@I$J4lVX28owFc^nM=N`_Qc9|P z-^qIvdaX7O+a+%uYrSR#G{+K}D*r^+b*k%pNT4*#&tJ(E7~qx5wE};ty&SLJ1nG2P zEAW@YG(e`FR$xdqI*KlZh%R%RhO7wr+ki!iFv$?fhJ83qsrTsIEEYu`OiEm;#ghEi zmIsbQr;$gU-wG#dn0?NB@o*zC#I?|Zm{yKz<*ztf1y^hbGT&?5DjN&A#56PsGJ}*m zH&^I0-ED%?-y&k5J4fWEtE0r2k@1|;kgB$qlXymnjoOwZm6LeChM`sZJDg#mzfd~; z!tcvp&Ip7ZB^{9?s~)Jd7Ovy{ zYw?x-tR47n2HDUH4=26wvxD^d%x+zk-I6w5f4APb?ruGlcI!B?h~*Yh%m;M?1A38_ zJ11s=N7KW09}@=B$ML_^<#(Jm_v@$d6EQi+<(*tV)l9uOCVVA-gFPb&CEERshIATw zvW}EyCzL1yLxTmnj5d=I;!-z3ub*q+L(0I`Nxe{>wS5HPU(fjPB-wfwhz~*i$u4?S z_u~Cd3;2Q$)08~DG*xSKPY;uh>FK@jR@x#uorwXzdncaK3M#G{jy{3DInnp4fLO;9Obt za#g4jka9<4WBAgt+fYtJr9pJi5l~*5;==h6Z(`a1b9w2 zJ-cg8hxrvME!;1O5gi)hX0#l$Z@!*WhDHk9_V-`C45RCosu5qa~sllQtiX`+6ej`pkB6YAEH9+w8kmg z4aT<_1v)5tDg;mw_H7hx2ojOV;?)<1<*R`(8sR4P9MayqIOewtsLGr4k2sROw= z5!=n-8t!MqTxBAX3Fwx-93LT)JC=I39caA{Z{V~bkzvs``pan#9#88?c#XV3H$DFtt`;mpw+1I0?mlgfQSY~opqQm1I#@#cIttvsYe zxLS}@PWq{0g@dbH>r3Md_>3Cr4j_%MrCY}lIS)r*O*Ki71)VE}zFhsm+hEMTo;R(6R;V?qKijbnh#F@M}nNasAsFZ#xU| zxzCN$=TLUEd{ATMW0=KC27#KsEOzwE*6b)|=F1olUD`Dt!QgsdTi#cyf13OHcJ@LI z0pGz-oC^H!u9ri*OWq>rE$tx`0?sxUA`q+mUrk7ovwf?Qb4X5}EPlcCd0h0V?L)A9 zMtgy0Xo=o2HaU@zwunU-qJ&sfnT_xE#m9EX#WF%9!l-da21Rp71eAQ4RXTMr;S#Jk zUA&=N{9;A^;uoD#roobG%BhV#`;&^)+I)ubQv7*Mnepaix$IWK1JZ9-WD=d*C2?<` z285_mMU{FgKdtf;ipnf4K6aEajQC1?+bteRP8xiNS`&709?C>jNN!orAYDbj+#bNE zT>G7(U7b6IPo(&z?I_nTj!pk!Ha-7VRKGnd{r2VMJ%6;QymU-;?)c@RkbC*$dOkrx zwfZj-#N!8FCt%<53W?+XUOnF;QAt%j&xZGG&HNPikOBCgY48eD_#kPq zh*>4;gIl?eGzzyeJLF#LPz%-1e5*6IuVqNFcF47P2on|UhjWg_`h#^n#nEzyWhF1pulimScc83Z&WX5Zw&y!i?_U;oSh#ATR(ZyYB_fvS zIRDNg__g$bWYy274_5d<=gn!G#QJtzP(?$UD(hBcUD3f z0z+|5W4Lpc)$Mv9xal5_b_{?KstuYLZ?JBX#@9B*5m(}Dd&nr=cqxk0Q9;W;7ST>Ha7Tx+5tIg@KrZ22?EiKfbG^>2}X_LMub-6F|ua?F${ zzuQ1@DzJsM(aErHm_XCNjYq2vZ&=z10VLEtsPBan5OAE6NYg{iBVk-{wZ)x1>Vw0t z_3&w9_*z`L+W=D}E>ho3615hW)exXXJTP|BykkuccAZY*l7w~9#U#kP9J>R3sYy@) z%Lv4#WIN@J1a11={O2zIq>;(3bF=;yx#KE5Gc~cj9lTE$&*vl{opDnyI~<27dnP(p ziOO{O87E{%a8aLr*+(TuAijBiO8U7lACcskGd?AQAEcncPY0EAniXSv|^f(zIM!rA{2a1{pU)a_jdu!dI+q^{`2~?#p^1e-syAm3OVYzvEkO! zLXr6cqsgC0)3Bkg@(^ziZ0SJhMB{(a&U z3?e}!kUlCkgb*;G%j{{C0LGR#G>d~Gh0TsQy$ z$IT=>ZyD4Fuj$cbgkr`4at9&cWdDn^k$9=*qN8<9bI~7MpmakAZfGtF{RS9kKPu^# z@Gd~x=p>6LUU8m~U_+#3tf-VGkhjs)8UTnH2LQ~8JXq80leGvo&+YuW4jvRWO$JjD zpT;n4$VD5U{|Jzvka_J631K~rG@T42_~R02{4tCKQROt}ll(z)Ay`ujvH=N}!ns5Q z%!Q5!gzpsumCijQUl=T}E<&et!zoMT!U*sP(dBTA37TC1 zzwj-(`Q|Hyx*sa*&-P^8f)8aqq#cjcw|j&Xxs{WJY5f2-7<6>&o)^i(cxGp)5yAjd zcKY|FHK3jHgfc2V(hXN)J=<}MXyE3^9SvtE=0S%ND(>}d<$Jg=#bdrro|i}Kzh7KF z*8GDnAFuze^PVvbvOW&qs6rA5wGW}-)a6o0{CC>S%&igR26>*|RidzXt-#bN^WX8R ziWaf!PpGT4i7G8lt~~#yy3O3COD%nzMTOgdn?M zLeu%m{i%15h?>ET=OtWyBXB_Yi6@I;a61MV#2-3nYK#{&EQ0u?An5+oKdhdwZh<<_u}4S zOM#-{|6kTU@c%mz2d%EsAL|tUtvaCzo(e#j6cwW&&%m)4M^bvX?4LrTDrsUz_C{_u?D#usuYmUN}?PZ54 zda+fU+^*em@M3VaEP|nMiCl1t=xRUg0or&j>RL8UiePule_z3j0 zn;-e$LrxtVas0<%3o(6S5B#Pm4~vXmyxp&#YH;N7$I;Bc&~J*?bN0-LVhTq$czaSUp+ zTC2!nUf!Lf^{$1$!(__Em1_q}nx0au^j>?IDrnskEq(=uI=+%U5v@PD2bc1CXMlqJ zs=)j3QQ0tx91rcNmkKLFPa(MRg5uNL0J)GWo|!#!EkJ0WKdI$qyWYClcFlG0fO8=~ z;y^t1&CWu+%1-z7KJr32E#4kOsRT-ot0EcfoA3eLe1=+VwHPRk@A zu2(AWLBzT`NcNxh-x+ZVf7Pm^;yj++MDlyoUl;5MI1nsI#JXYPWo2Hkhifl^LW1_`-ysK=TcXgwaF>}7jWjJcM zv8!+)*|ERAeQv~_T7V2$AnXZH^O3g2ONp@6pHxn86eQbPKK|*ui}Ifr{}df9@#D4- zq7a05-jc+Ju&!EHq!oaIRVnBFWd(pozrmYFo@=umMBA-7+Qvph@Ta*kQXp>An$|sY z)4;m(bk7Bm%5%s3L}XJsg_JcH2Z`U|RW*ZXrIR&B=*Cj@<`DNwv*FC_L_Y{1o6piD zU9FE;;}7VHJ?934}3kJClTWDOksB}HV!vtmsPY&vdO!9Ua{bqRu!z7h|!Cx;*^vCnJsm0FG!>4c$Hxv>EWPOe&dOWy=uNbTg=L+MI(k=?RN5)JZ6@tc)I z`?1X=+JFKT*<=OE@~u&zDd48pXsS<6VDpPOJGfJ+JSGNgx?b~(Gz{&i-OiqG@=|lK zYw;vs&>LU35z8qR<3$lIW8I&eJRPJ_U@ z2%l}#K$MTR$Ww#ywjY|Utv|+BB_|=I{N@3N{%cHDQsl&*loeAAy7R&5c$@cD8~mf2 zZm_vS6f6>ABJfDEvRUvkWikz{c;0`AQu}Z3r&fl~E$6iZE)C{O?UJ{L!3~_Gz0oDS zB>xH$u{UGWGxsvj)NW@}W*!A;d)B%M>}pk;z_kC)s#f!mvJH|Opf%>u?C`9q4C2g3 zcS{ifY9ImISmG@?6(XVB@XcHW^cgA7{^nwFpoJ9?59{3AzX>Y&t?leHp`vXgopwvq z-M@*0-};@@ZO}G$7z~yz>#F}mW*ew~#7U)TU^@%Kys!d~zkNkPfckVbmC*(uz>{o( z0Hk#YyFj?VJq!gf9zya{Mash3lcBSxXKpRe@OK690GK-%3JHL&IgnY_BFnqZsZmYC zLy}gE)F~d#2mm6FgV>ZzC-QWySlxw#7SyrCQY9J$3}*r$2SeZApqJG3N{4IZy{}wV#S|~?JL{+ zXF5psCJt>n6E;SmI&FP(i%^2DNlWWOR41)f1|M@vqDxxTcC%(FHVU+A^t!1}0a3pj z)<+G=(q=yp7z!u``Y}JxO#4{)H!;DRw4vRZH)yU^H;a|o>MS)`XQBG!A-pL~>1U^e zLr5^|%CpN6ZL?mC{Po8t8)byTd&D0_bT^e>sU+61ICY1KTG|7gi|j{Ka3Y2XuP>j8%Oeloj4-(be1Zt=3Wq* zmoPi7%%swBv80;!rD&HISe<%1!M%_ktSbBh?5O2RPuu5{Pkw{Zi$c7%Ju- z^aM4OLVKd9^^2K)IPLxGpV|A%x%WT(^xFIN%MRa|_WpHDwib+TTLN*>49XJNZ zYM&00WVi_o@pDlIgChE|HuFeavR`PQm(Cza3+RxN&J9p>UqjUkc`6Me+LOh*yY`gJ z;fhh`E4Jb_D(}@|$^mot$egkjca+k@{&YmCnuNmVPe0lU_6@q$m|7T>&tNJWU;squ_e|Q2us9c}U+?UF=>yjUmlA zLzZ+O*E7UjSW3dJ>^*xV*3L^NI@c(;deY{;j81 zznB+NFXEsm4_&h+1Vj_`d(#fReSNJLg=+^E#ZPDNlqi(av*J}CB;#CC6^cKI*U9Bg z>2!*CkT4-M4LYl&mI z=^rdgqN zHA%>4J$iam#Usn#Rwg3d2bMie6we#CP(1q@if7-ZiszoBc#hSIN7+T&H?eW#>rfv7 zyEYVEu-Wb05x|9=X4p3BJ|G)sSu2e#qe^)BKAgD3INL&e?z#;=x9!8{wtcW|mUTTp zv4L#kIDK2{Skh6CcQP2ZY1lYIqBQIt>%AU?hTqwxY$NF2hg8X{ zp)5(J$A#y&tCs;^Es)MQ#a9i}hg4wqLx?s36xge7Dp@!uigfP-t#=nb-Rq=16gn$y zS2c>$+hPU>TEANCSC+r69sb5gb;7rh|toZ1qy0eS`iE2OA)vwFbU9rdZ9PubrTNK5w4 zS(zo+Ij*KQ$V!pgV|FZQ&W5qJbHGZ;&cPIorO>J$n^kJ{DdAJ4SPLnhW+AR);W!FN z)@~T8TCG!jfp@L4doWt_u5u2kwl1eWlF3#XK2$B{QH#+&R!0$O|FC%gR?dw2)zjYi**jnIEyJ(92F1f24>n z>5@)YeZrnqF7(DcwLP0WPZ03z;r_x{|Fd13ja^Nm9J_7XSZiu(Dx!GG3zk2E?jXQl zZ&<7tC>ny+`>ybavEq;6WcP_5>n%jBixL>+^H7J2;cKy4eQh-V*KwO1-b{d59UX+X zD@f1furL3@sl}fVJDou)c5cRA#s2f+B!f{F*|LaKhGSZ!%4~tpWCn&;65cV>GSGHl zy(p_9pIDSn`waP*hOWlw$ydv!@?%PcN$RUfsLfZus$nv5HN%EpLf+A-FnHw&+QA#y z6l0z82J7W)JB&ta}MEEhcQW!pZmmyJx#K8#GC+0y9B zHQ*Yx09*G(q@y-@zHB;G_7TS3s{rwyTYATsxk$rjRZtEk-CUiP$KN@X*im%kgj0ya zG>-l!a_2_Q2ieGDJ#?E?0hKv3sJG-_7$A=Yl#|HAjf-)1RZE=w3YNn`A7Sh9rgr8_ zUex>~6IbVZ@h>+SBArEuw}*WPKi#*lwwEi?NxHncdXRKh>Z?CGcr_0jA|2f+`ejLt zK$V)8M8v9Ilb`m^MTL~*D7TOc&OQm7;Pg7t!fCczI*R;I0_UW`=S>`|yj_|f9j(Jf zGA}_M@AsHD~Gg1u`GacvKeaCo1e*qiQ+M%!zI@T5mQ&TV#7qn@lseRwYjt|k^t zt_qvUu?p+X?UDz&-Z2eU?ZH*RX6MDsi7mInW~UQ2bNiHp%^U|8JI9;s_ozP;Qk)#bXUDj4iTt5LI(GFmDVD$@b(I^8dO@H){FOK ze%yi9hj&yi`AVB2g?X@=&e)b>K7M63=-f^afFof>w9q{|JZ+VSpJc1X10w@_xn7jl z_(1(0b*8<3@-2_#?kZ~2Yt0J^pTIPc&UnD0iwVFT-QscMUmK^wX6i=CUbAmNZ>o?H za3A!DV*=iR-d+drX7gmf3imPO6ZFlg9`*#LN~oeKSp}o%Xm4f29}~yRSc>=KcY{pL zL^G33CUvg${E(k&^GQz?(?t=lVEuvCJ>+$u)`QQ{namWtsJqEtEB;il*T#j!f+Lz( z0<4$wI?FwWw5IyX!OeZ)kr&l1F@;d5{A)ITb+16GIFb|#pvx5Fyq*^L5+P3WvCgpE zR$n|ZKZ&ZW-u_8CM@;nTnaiH36v|*uaWa;`<9S)mE6*oAQ+hSldsAi(VwMJhY@x%N zGCxG%LUZ*sGC+Jyio`7NtwsrU7KIu+5Y)=Si=-8J#AF-_J*nOmNI$i+n3!ozx1gDX zyhVeJJ>qxmOx!TzZUE<<52o}>(}M|&p=@fa3Kno8t9j{jc(6?*J)tof&;Oz8VcmM4W+UGt;L!&peXB4Ffx)9&ld`XfjGtVFB!yTU?p)v{N5;|262X1S_-ZD^6VT4 z!ai7j+P5mU2_{DI0pe!iikv2JM(z$>OHo^45GTioM$f`E8N_jVWgfL!*ue!nbBTKf zai;-s21bHGm9tHrQ{fKj*UHDHzHs)jDd%8kIo$wpnuwyTOke=S#mnWR4(270*8y>i z0>l|rW)Nqu${G-d3RuXV-^$ry5U19+0pc($5{NsV9f}89?<^1=JX}pwY7nPzaDce+ z@$t%KHUPY;1wiZ@?ga1(5NBK^yIQ-j)Bs+AXe37T#^9vE#WW1jY`9|vaRwhkoVwQu z?sZP=kMhYNZqPN}ossbgh@0xH1L9B|MmyjacFaDW5j{hCjoNOSm3TEkjoDP8>K9R0U&2xEB_tC#Nt~Dv)pSNw(2KyWkx?M3@N&t% zuF;VZ)xC+j)H82ICMslNe^W+OathSf7*Xk(gs6l+M9!MeJ}}jvt&6F+fRvE0AZ{^m z?!X|X3dAdD>5Vhq73sw-2T&CNUgx48Qo* z4mOV)4y<)_xq2cQ@NiaaPs6g2>o+0T;|ZQ@ieOpe27+BRB_{;?3XysP!LreIhF-=; z+%fbBf#f@eKyr8gEAl7zv~3xO#;stp(u)z0mlrm_ZXH*qgpj0|q?xpIb{k;u1JDGU zhti#hKGv`qQLTyV=s=|$P7T85c!>;~*QP=3ZouZZqbq$+wTZ45s&bT#P z77xqasA#kU%sCpJGj1Q3Vh1kgjN2#pJ7?kxhdJt;Gj6%bK+?UU(UML49L%`Q(P-_V zM0E|9QA;$S7Gy<10HXGW%hO|Ws{LoGuUGf(*DYkH4thM7@OT=j%NaVw{eeY#S8myK zhH|Z34n1t#0L!P-tq-*Rc(36bIGjO9!D`6k)sPPfP6u5I9FMcfQw)Yi4U|{;xufCj z4A(fL8qUsE|BHlT;}|{&&FTA#YZCgGE;irxJSxk+PW{{>-*(8?i6BVTG*T&gJz0!6 z1cqSpy|QW8B(3QJzlU`q+IUhLSo$v3M&`)Q21dR=U5Kl4pHCc<3e!+^z}k`goM);* zEBn01m@6RUiSTA(=NP-m9qo@|nmC2z%@kYe*rz(!Y_MS*Jegw1c4m6^hQT-}t#bng z=RmW4nJwWtq<~x%xH8GVUELQUvE&Y;mLlV-rsJ*$wyUACTwhaB5h&{gwP7QCVh9F8 zGvOAP5?v#AVw{)${t7~$l(FVkO24|@9yp8`R?wDT@`UO{hXB#rw{H&+jX<#dl3l^o z;<39{0YcR|>THXZlM1xE=RhXeOB)!PTbs#%*H0d76l4TOmVe zJHBBY=z*gU9ILL^O~F`>e0fl=^z4jfmj}UfwQ7@EHQ53_b;AzOdexk1f-g>qChSHK zr!I9M+Q^cvIVpk1r+U#|<*-z=s}V6%0_||b%n7s+F{6Sg?k=$)1+q-{?>b_JRIjUt zHBdvsXt#@ozaqMbW}c(h8Em2LoSR}2m2;+OXqhM!BeMg-#|o^KOi$LLETne1Q};;+ z+uK6oE15@-_LS~ArPNy$rpX!0_&-zt=`2K+(t|vl>>%ASwmbPNZ?Vf)CY)XbYp-|` z(zvm0%SfXst5u{-KOeBwuD)xjjP3Z5^1&ljt}o3PkIM}kR)jFu5}_3xedse^qHboVp)fWXquRRu-ioO zO0^!Z!_I7F@fUL4%5g1+v11>y?$Bt?!Q7wZc?Vi&u!6C* zD~bUp*gDm$R*LW!JJ&wq6&t;hvJA-j_ZvpY zU>`U}L!1fAI{Dg;S(Jq+wWd=dxO*BXW)CPP(;WnnRWrKo&lz5_kuLmW__kFMjFg1*?%fe)-2rb~- z-58{M9VmX*_<-T=L6+p3dkp;X=&F+WnqD4QdyXm z2N2WMMjOO9z~gPq5Eb~6zE1E(4iWit>#RtiCXt04dmjR+MBee~8b}HFjF$2TQv{q~ z>RBCS#MAMU>~){yaQs4Dco*^@9KUH0e&3yGcTS*Ea$%UX(-ik!fU+Hy&&4nvJ6ISt z5zLD1P-xDu5l~gaq-|3udblu5r@tv- zQis%ut_@W6nJiclpKLTVX8={?ZWM-*A>nn4*+R1d zKB7O%pV_Ii<9(TS+5MTu)HP-9&i>wqk8+rplAG@AZ`1Fp_U^ZC=p1eHyRe_+F}p}^ zWVBI37Q2#75_rGx5#0gKgHnV6sKgT`Vkk*(sN({ES>Cc1lD*gybD(I~&5slxQEv|F z^|TL;db$ncH`t0wom_O0JoX{_bMG#!73Trm&{>(yJ=4^xUghp?d{56bKb={4PPDM+ z`4hAh`Uo_hOV)XX>txEPbt-XX-HC2n`|AF8%Y!HLZPovd#Z^N_9xA{~--P0$-Au2- zq*^VY^3bR4=#OF#;%2=83C>2DV}V(va!1TRfr}mG)K`}rf_ZznFmw2zFl!N&ix8%W z&yfO*0Qf7_>6v7Oo9Ic-ltg}!P(oU{Sx8ys3G0J`L`jfpvZQu|88tF85@r-{>r4)C z;vfejhfL){o86SW3+C$zgj@FL$QdC_A`byAL^Ik&;K=Z;0_!Qt-A(xG{*p@&ru3CvDvN?`U%kr-n`AIwF0G7<=KaCxSB zRY2hoS$OQ{fB?lbs z*24tO-$0i{;q%3ulakA$jjAd-qnVm`Q3$;vz0N)xXzbjT5g9@YBYG-LAC z$0{(X9xJ`{9+6|td(4gSCL&)7IuM6fQxVDSYN@X*%}PCmGuDlLl$nNIFNRQZ&7g(* zs$`P>pa>zSqgd6(zGH%HXuYW1i9e2O&5Pz zoeqdwoYW`wgCk-&8)ypSnuMq^f{ZwAUTP`3*jDd!7kZ89`r@5P6UknKVaW~_dlAie zedm6)m6Yv5$4EDqmk4FBe4c#G8RFBar6gT~VgL}E0eG*Vg^zA$HWSL?*U26h?FsdX z4ay}Z;-ST|RvTW|hjHsAKmUPt@iAy1b)Z&VL>zU6;*HkXui~d=(+{Lhe%+1Nc1}@? z{Ssj^ksUu!hmM>*n7kGGB@051TDf>PK)A(Rlop* zs@SoyVNx(s^j#2s=roi_H$i6Sp0?ZVD);mR3csS81^s~zSrQ6By9|Y>y={8(gS^*` z?bWJ(`cpfskYbkrv??xRN-8o-`bN4Xim>#B(%`Hc6wvl-%01VN^vjZ9>6n7wSFuE% z=z~A4jW|u9JEQ%uo)wu!rXg)z>FG|i8>OLcuG#@Na-B+KIZabaF2V>wXj{MN7A)H~ z*61WgR5}$_e6a#`XpmIqJ~C9!m^zeMEV!2sLgOgVh+HdZWa`toUce0=t;JMJ5#{b- z(yYzhW4j0X=jX_$X+AlFQsk4F`8Y_+Ys6G{=9PYbQF6U0=MrliXnlqZ1tCt}pO)%C zIjI0);dzNC=Hp zW7Do4lm6Ayj7h}+SCdEmyp32?i?fDBjqop%BK6aQLqD)FhyGx2=zn~ML!S;D>OH%9 z40`ry#-JkqtI42#-bM_n#aY9kScR7j>H6uxpd>nM1ayBO81(y|VbCuagC6XCNp#(~ zdYt*!PY2FC*y}}be)S5Qp9g!}<3StoqLw3g@nElrWZ=uBCB@KgY}hUz+n5QzKbY{Y zXPEE{$b>guZ6^HW(}M|Tt`-w++nfot9GMBXUsfi>iIbV|_cvz3{}fDk=QB+B1!Thi z;c7GC|MB!-LPsR8JUFdX?natW%aNHd0%MoU@rsisGvV)T%!L0jnDCBgnD7h8ga@uR z6DspQN_D@0QIUhasjH<4lgizQ3AG%V3FE}(%f*EFVKNiGe`6;6zF@*vJi~-vKqmZW zSDOj{&!-0yPF^i0Oe%LHCe(6dCY-sfa1>ulX2Me&GvQsqgttG#gkL}={O_+e6MpLH z!Gsf6iwTp;-G~Xb9GMBHFDnz`YspObyBjm%oxz0v`!h`V1!Tf6zS>Oqe?2{zaQtd9 zVN$spF`tCFsIu=h`9RJ!k6=E3~8@yn2=s-lG9 zG}ZrmUFM@qZJ(@}`xj`oHRl z8*!u7C%91s2wdVs)!X>>a(QaLSC5bA{i7Rmf+vzw%ptGrhU%}(B> z{Q>#=jg)fg0Jlgfr^rLKH0z|4Q^`VMZnGsfUEL{6cFr<6zOGhUCJkDpppjJGxtR?9 zTw*zT+qUu62zlirBhE=yT5u{-p)ATvJMnPTsZi4B3cyrPsHEuFD0VhgG(Ms?AxYmm zqh$Sb{+7Z7a~_aP>n!UI53|I2hyn)%G)38ZkI|2x=mTkdxR`b4?0WHpyhi3oktacYtkZ1 zKUELJcW#EPwEU`oAgUGWlD#M(AT6C*t48sFbJ<-yAfjAHD0kKo%HwNm2d21Kk7UuP z9q0|#4u~jMn4AVhQC+Y2%(_CEeEdy?vI`}s9q>sF!+n2q?Eq@geSc$VT$~=|15db0 zZLoacVDIZ=<3k?j4RP53`w;S&N+8i=lnwYu)Kos;tpBxT1I=Z2$$**HlK56Fb1=AC zTNLDgd(FHcNec7aB!vG%uI$^S2*4W`!u1bY+^}2xmDq7j6~L__UjG--Yc7mZ&9iGV zHmQnI<<2OK=mH*IxrPWw`xQG$M@?ElD^k{?^<|C!4r_RpnIb zFhG^*WbqJS=lN|8U7(EG^r+JOKL;_~*v|FZlyc7YRHY*_rJ&Kzzq9zSzAgPQqq#fN zS>bnttV*^A7+Zf*!lO0j`+JaYYSb;+Wgz_(bBw_*8&WfxD=| zBHS9X(ch}88kPgC^Rp`3(ZZag502H6WLA?DRT-$NK-Xo8=f_+V>l&|p)Q7QU=}cQw zCk@CMYrNq07%q)gTcgx2lhWJ$YM03^qhyGRdl)^-$^}M_rGg=vszm9oifX)ydJ}%v zaH+z{;_tQ>_w%?LM&>yo*Kx8`{mFU_^u1I;9mRfC^^nWDkN|28x~B6toNtwbI3DQy zt&%ekmtU!7e}p;>QvY4B|GcYj8qF?rOgk5)ei$kSl$xQR#>lGZnkmrUJ1TdCs}1NH z)w1l88w*gIS-zKxT^u_m)=Wm2WPo<|@7%p59~yjsb})HoIXqYhNUDUNszvijwP}x| zcpy7(w3%IqUWS) zGL39*|M5vGztUPsB^*|j_HHGqpn>%z)xQr(_3xgMR9i?Ysmy0E)s@9m|Ltib zslM~dORBGgpcX;A|9Bvk-e|3y!UDW-IRyo*FQ>jI^Re>r_93pQS~RLhVs7lf^+NO$#cyI(_@3tzsK_1y{DdZ~G|4n! ztok%b8BDga@DqL(gIsMa!cVxk%YIyX9M(RmNgef>t+y}RbHKRjb>W;_KoZ4A(8s&l3V>@vVJZ3w=SA`2qcHJLB z4(=c_ox5VJ(1YF0IWY3XmOMl*s=+DJj!;ueo!$sBe4SiFu{z%~gMgtekzRDeCOaV{ zbkp#-N!}nKG?~xnIFaw>8)S@S4lWeQ9C_Zf)0OOLU*EYs+}-YVqq*v9E#&`WzvA-l zh5QIA^}0s;p_-a3bfzCp5{LCayCcZysU48hd4W0jQqzP| z!t)X2L?fcHh*ZFvUC6;S-w?of{4M&vgUb*fj-$xO<+mJTb%O37P+SLN_<|-29U_^U z(tgMEjG=bKB3>|JpF0iS5$n#T*yhckC+McsH#&Encgx|UUE$Z%nzOU>a7rIG_>5jr zJ1wahn$BJyaRzh- zsi&`@!CF2v;oQEtYm>imo@JhV%wmN1SIedH z%afOlpXPvAhvnT;d_B64)2wim7UZK)u4qV{@yxE@>A{p*J`X4Jy*Ia z*eYwRZt*Dk_y6g%#Ot$58>oeBRQ@mwc16WK=Oj(-q zyeUJk zLgbtrMW85NNLYlM1T{#!HmO09>jnkP(?XrmP=JFvbyo|;vpo}L##1MFYP}R*nu-1l zlB;ul=Q{nVr5*IKFrr*%* zJO|mKK&J%+ND2}!dWzAfoX*Y% zedaya9y#=KtG8&4uAb$z$IIs$$RN-`Fv1#w0}$2HzVaG1B2UK+XiT;jFKCL-HDK{22pKqkGa++n3|g}%(@SjW za4u{q)&Zkda2Wh_SB|95@4hHF42h^`Fyy&AggV?Y+qVYQzRaM{Mna$UM?#OyK#ZvD zBQl8)ui9Nl*ry}$2NQ(P#*!W*RwRaywE`Y4-rk8pvg{y2HRZ2#8fP#t8O1|<5_A@g zb%RmameN`a=GU;9XznJx1>tNiQ<@URoE;+Crff0KS=gPMrD2dPcZG)e2KtX;Dg{R0 zM8MqjV8L`8X|wTb0dp4?_P&uAImZbGgs0=7*`NUzfIgKy3lV~s*IIc(u`HIXly_h>0B0V z5}Itr1cW3a#5WQg-5?e^1$X+}MG;c0HoJAmI#g1A@TZR!c&~@NAu`*@C8aRC>G_(YGmO{T+^j@7lz1AStfLXXgM1OJs+n!q@%EDWDl3 zIB^ENm1e3l-+2Y@`5giqPNG3%u5Ok*qohQ@i)~GLMwM)s@b__kN~Y?NrGX?w;alT) z1}~#L%67t_4EA{{3IFDcnm?VH!#GnA<4i#;l_<*=ZSvtNIY$roelw2!wj*PrHjSqc zYBR|qm3^xXPzJ|5^azSW`twQc+*S8m$VAQVefS3U1cO&O-s6>Txw|N5iZf2Nq3ylL zY2<|9>Ma28oKY)}8J?6pG-m=Kc9kqtKJ{yh zMe(B^gQ*<169F3mp1#Ju5<5=(!K1tVfv??Djvt=uz7P(pO+Kstt~|u!?UBw%YjhNj zq8In~bHWuff+WxGQ%%7XK3q%u&E@liEO$BSDvOt0k&PqgjuuT`+LSbj%OUx^MbYTJ zcFyCJD{cd8S8qCK?Vj2_w{;$;tWeu<$pCsRQR+}0r>B>u%B0ftm?+bWB}}5&d7S0( z*&2vqTvb5y#Ox&lQ4vMT+5|kuMj(;5!2c8B9KVERfAtkz_Q3y`&X>CEufF=rj^{U) zJyK}RmBni~99?YSZ{P#5L*yxqnnO?sI7$h8R6HY}NCRTYsWV=KE3%$D;HQM6bk{H` zM$4HJm_;aMu7gAJj578<@&PvK9q(r;^w9;lol+y`i>C(77`M}FuP9Bhik}xNR+C)A zkSU>hdGP3%e8x*bf0aAodHqiICE(ML)1a~l8;od-VM(YxJqCiUy_I0}38QIw z8muIa&4Qw{w{qTRX*l6g$agIKVr=l2CFcR%vim;O0gs|`fVX6HDC|68d`?fEt!4o(|M<)n8sF6M+LI_KLPr96Ckvr3gU@hhm5;satx!_& zE}nM72z3LI>6&6v-x)Eff6F|#BxkeC#Y?Kc$LS}kvq!Y*OTuHbP2UmRi zCMLmp>FxsIVhO-^Bid(7FO3$^c%0u1i52_FP&<<18+hI4a)ak>>2b>eoG#-)_`LDc zN%Y`%hsYVu&ViHQoU`-DAc%BR|iU0?uC+N;kxB<4n- z!l%MTd?|;We8irv3_ zggEqX#lt4@-O|Itq$7)bfm^D)Qpem0ee5UOoJTmEr}K&tBtA@;ccT95NH!D@2ICPG ziZjUCaj-~XlK??-57iIun=t9xgu=B6a-q0DAk@*S$1k{EA=fByifmVLN8NrAb>a#Z zHbcJSbe6{gT%)rV;}jGZHKYrqe->@icram?}&9 zJ1}@(4}mpQLn?wG*bzk#lk$){=1~_xK$fn3K~edT@8L2i<~C3Sp@^e|N}TPaJn3+b zJjT0YK#P%BY>Z|dA%n9d$7mrrem`mQJ7rs*bMW|tbxhN)&KXJ5cGgOF2Ioi|#O=}- zQN*z(S&F!^_tzsMAtMEk$8+#lHJEnhc-E*}osE(TD3CWb6Tsuzj3@N8Cz?;=3pttA zdzx(zf|SVZp`*ej$Q>-y)vU9R3fEavPOxcCNe*KU3eQKEF$-kxq{3(^f9bza%2W1(z zkweUi7KON#qP2o07PGE`4dpjkDNfYin4?S*CQ4Re=SZ~(w5dN9it6C|(2!EP+H`bc zTqy%b6eJB|%nbI_Dl6I?6Kjup` zi6=P0_L%M|@z)}Ex)v$cKwDNC7Z|`BA7|~Gha%YWY!!qy5caa);vm?3P`KW2*wHpG>1`&f9BsU6S~fY_?h$h4eNu(mnl#8| z1h+lnv79-L05==)Z{*l!@m{@)-69G1Z#;WPG%jKGZ}99pH(%oXk&Ii+e*pI%l?qgz z1Zu{468f1IH!ZZ?gr-TBxp)JMhr-!2;3hnIdCs^+B&_vLj46gF|0fd**X+h?Zr8yj ztoa+S=$a2Z4K8}k-+1-c9CzR6M<@4O{W%*HfbcE#!m{s`D+(t3wMFxZBpDC1-c1}h z0=QdI6jXOfGU$h|C`EnVyYs{A?M~Fo%9E{P+1lA(m!aAtwN*0M23+jjS+Y(FAR6V{ z$CD~L(0ahtv&td@CW4Cs_6A}@!=ke%lY99j*|fb@w%i7P%IJG!=Rqk;3o6nbPV%x& zUX&dhHt1>HL6{3AAt($uoE-}8OWT5=DP6zU*^##F0MneVC|lM(sxWK?2*yy=$2fi5 z=IvOI-*juGoRIydL$wt2EJP#qp8) z6V7hEzVj0P?x0~`qCfs6YMq2>^_QXA;h((bXulVbAGh9U`O9rGPQfq2&<-;}4Gu*q z$R|XMkuOBd;*j(@;8bgt4$C-GmALG#1T9FNv^~mKvJaCz>T@|$$jsHWG8m^TdsI5T zHzlYI4cJ1qQTpia@F)0c>nz%7UveG49ie^?`m?_)-+1+31MQA7RQjjF7T2uu3yS z_9c`Dr}|rd;&}NOXlk{2LZO_$rm@eK_#CLcF)_nKo z!a1-m7ZQERZEL3CUs<#FvPe`PXr^*IFxOQUJs|~R zWQ&-4NZ>o+V&i+r5O7f4S^S#E3GT6yygJ-N0$8SBKFoeUOVbnv5;C$9wX8q4Es21G zy>FSG!Owlc25Iu63jBv}6&Hx?Ph70}As-MHq%FLhG=tgfW+bvKp(JO6C|z)%842d4 z%{&I%kIMJtJ%k@s?k*v1b!G|@N76h#j|lAk%z4(<5K*4gbk7{C`T3dGlGyaHF6~+29dl(Gaj$O z$&wqVXdVYzZ>J!u9J`fdhO+%K3%!6T0_4ddB`6-I618^SsM|nsDmshdob#@gFwG}e zoG`kU)_O2mqZo~aVjPO?q?R*lOeLq|qIbAZKB#Oe9mnaqW&XWsR!QFWQj3dn#}@gm za2@NxasoT1j9$p=iHI1lbiLictQhrBRYKk=_U4542Bl(CKC9ytK}ta*_NAMZvb@BE z#?m9PKgCc~o$+tL9-kSm+%G<<0Z;fywCwmwTuA{vslp=4Kb7D z&Q$Jt^f94HG-xJ;X>!Kz~uH(6ye*ea9nvpdV!Lxl(-2iR_! z>Wo>QNSaU{WA4}Do&ts<9yAc_Bbh=7{r+BdJ|ZJrHr65?KI9C?gD25JaAb6qDak<5JVMlKWwCm8M|nze@+~aGjcv-*s#`AXVlzbTlBT+)GI*yP-j%ePF7D%b zH&A;!2%-LMxL&_mB4DY0Nu=XV7#~8lE`NeGV`G^jBgKb5h6g(ww%8|I5UGkI>x4Yg z2$McUGm234?$FqPB1A`3ico$S`(;^dDMCGNLBU5YA#dmYO{?T%e@w2#30#R;i#aTi zwAcygPEtkYj|n0BG&6CE?A$I<%dQ9!gGH|VCH**&b|#af$Z#%BJvU9P>@43?XX|CE zlyhwkRQY`%DsjJ( z=vh_~J}SNVacj{Ix*I*ZQD)yRvedKw#4JI7ZsDpMHu5*f=ws88sbyx$`yHIf7LG7I zy%8=uuRFOW6V#Ke%?)Ddhb^nxG>Jh*Mf1T%jX=XCNbE*t?=OeOj|3u&`#AABoZaw zj0i%tfhKSE0=8&wq@~9O8);%sn`z;i)-8A)X;%Eig=}i}Bg+wRQJZ^#d4rp6ZcH+5 zZRF2K)6KP8%br$_)vdPH*<_9F6?&_-7n5OHm1a2kM_TQceLBHvucqwFzKyMR#WTJ5 zT`7LDhN>H}*v7j7+|#_Me}|^8FFR!`s^G8hOzdd2QkBX8)g6QCq}i0dmZ|Qz{@&{S zHcwykhFXNIr8?=&w_Q=H8}>wBib(M!uK?8@=f<^AX>@n(hV>f=XR4X@fuv6Pr3eZk z9PqS+ze0Z#k&q$l$w6dfpF{lUHJJMo(_z}nD;4AI)gg)(-W<1~WG8i7{8aGI{A@{3 z$%zWnhkDgw$id?_Pqa9sBxlr zvnBWPZ1_OJ_tE_RP5yqR2royVuljqxs(mv!tcvhc*u=H!J~kUmAo~_+SUq*{s_;zM z>P1N_J{imsE~|~M3SWftG^}$L<1pJ4yO&dLBiMMxW8h5M| zKqZ>5ox=BkQ>-d9O<``CaEcg9nc0O9RVVf$wA~WYh|Jyt)SkTu47K+F#aFpb1x^h) z2hBQr7bPI1(`3!rOPQ^?<44K|k0`l%zD;8FY#4w-pWq`wudwn==@*cex1m}3-{(KZd7X20Ml_Pl&Pk&3`>HoPh;@AXiqYK5ysu30;dfyy;dt#>$ zrGlQRhfDZ@y2WejP6>1Y?$|6uWFaRMdpQ=t6TgAd9F69zOg(3T9zjs z9QBQutmR4m=B0*L!n1iWQw zfVd~zo?sdBNVcL@LOuo=BG^wE=%*`_l_n8J{&y0(Gz#Lj9Ym^{UE49mfU|rG*9nXB z69X!&s%}F7L*23ilE_3(NdQ~sEqUJAa+Fu=jDrJ9(((Oc59Ub3w+4-UqZUByK0Ua%!_@}h|+@dfwq?8mh3VSxc3EM#gQEQ2jT`98CtF7mojU6x!J zRyA@+F0=a+EEKdt=|J5IdU1H)_=^tI;mp;a1He?FbU^ zsui{nbH#Ws0ly=*`}>Nwn^vssj1>v4bZ5R^1OQ3vie0(33bkNe)9_G=urI2SF)U@m z{%zJ@JHcE2)d271x0X*=&T0a&m7{ejJ6caL^X6NJ;u?Bu`&3~hLylPGoELi1jv%Ld zvO$mR+t2HwruNNE?VkutWg9&uI_ha&E&I~~k=d(}P?fwImg#c7*$d3S_(Jgh#PTM0 zw&X`LQb+J%-T4(r+0>DuS;;E1*1kLh1>j+A*bG%-A0Xi}`v9R@`v5~O2M!#N%NX~g zfnY*bHMjKgHF6%@Qfx7`afbWX3azgX$?&xmPe?f`h{{j0XE#nXloJcLK~p0SiwnmU z?5Vm4aiNP+OfQQIVt9^Y*mYxJf%B*q77L{6g^wpi^M-{5Q9NHS!kXvza0JPnQ8^z1 z+D^Nh{Ecm8%>&h}#wK3WKCM^@#@=+T)3dQ>tqCJBXP7#<7@Q$&V|IQ`+(k}F2EzkT ziWH-L6G_&ILjouKbi>#Kaja7j8rl@D4V{`@3=x0X!hfXLWiu}Dh@5(eaA#lEy@O(s zF=3{bCuz=-*KnlxX#|#-RY)Rr4oZb+HC=ZAGu%E&D)5DnOsp29k&mfRu&ZMp328e4K8e7eu?! zOQe5m4%vKMvY?W=>ACT*(hv3(4_L$#&qWYi7eMDq$adtUW&3ZXwQ0!q(31Ie&sMU% z+10|P5^@&uYQAm+)zCe>N-R0A>ur#l6ywJ7X4xLnPwj<$|CDW1Z&M3*4n!n#8PcE{5UD+OH zu@t6pUc%f$BnNxDuAFRFz(zQ9a7&YHM}DuB?f+BA_OGgByFfVICDht*W5G^PETR)@ ztgi^k@z!{N2ihGVNfq^R)PkRt6}FTy2YkY7!^{7A-zLnPGq!Mc|xL zLIM(cs6@#5-AccjX^m6#!?{o0(e@ZC4iTT)t>&^+JuCMG%}^DxN-l5d3G`%YPp6`~ zRUpBr1?8)SOrAg^8nKvVeesTlayYfI$8*IOH2u$F_AP@8dcPa6?W&-$HV5V4xHQTk zO$=F31s9lFhFy7*D2I1gIh?bZiQ>IZL5A}9<*5UNh}IUgCZ<;$A}fhNKG}wXf3&?H zkR>NKmHwP4eujA-Xx(4Xe$Pogif+Zg)7d9Lr34c67iGnx<_RY1x=)dyb)fZV(MyRu zn9Aw859&Ihf_Oj-Jh~6$C(D)&k_34G7p`%2&~F~U1-ossB7<&!7vxK$r`y!aaKxc- z2MJlhiCcQDbd8|&yICBKPa@;5q4^_HFE;n-K1yo_P9+8r*)BMvBdx;=PcE%4x>}v;*BtW#R zuKYgms#X*frrSd+DPdH5EHi()d_Zx|5H*Ae$0Dx>?zVd+{2!)uqPjIk6W=65lv zQ6mJd+u@EnE=e*bQAfGjx{}1ASrXNbNEAwee7muY)uxVso5xNfObI$n=_l3^?AfZ^ z3P=)fY?@@6FjLEpqAb0TBr4dHHz{D|>JCeiE6|{_ieIMm#C9!7lq8qf%pzc-I68`v zKo{4SvNZA?h4MiNh$%_*`dKD@WBY#f&V_^yv=(heD$AzsMhIIlZYrl)-jkqe3%Vagq5kH4IArSEc3&%El}5 zvUK>>qw$k|>Pt|iuvQAEYPU3$vTNPJ{l<2EWnoh;tzRicinN{E@~ z_ee#Qh4zHrMo=#*oxP-Kuu4)9xU42wbaF*1@!IULYYyzD(p0-O^f;*=$>;I(z(*So z#M#7yRFkR)8Vag(HJ|#p=MdMv=2swfOSRvmiq$e}rX<>qq zh3`xp<=glEg{b}0d!cqVi(fyhUjz3~l-+(8N0RVhIe+psMfn!~@kh3eCo5Rh4C~tg zE;Vc=o`m6m(@aZ9cGeKAT9q(N=mnb(qSs>sRw3bvpW6~v&Fg}IRnqQR7+w&piU8}D zu*#E^xR0r&Nitpjj!6dLM3EJsN>2K7m%k~f&Ghml%d$!2^~N^YHqa6tlf84juN8R& z4$eFgl0JCC(mvGL#ye`G6J$jX`kopN#f< zFKVAGtXoVe4XzL|GaZ?gxaSgIK-P0JjxmXcPV(Jk@o-~Roxq=ZFnhNfJ(#NqQh?+p zcZf2YHbt&;8?Y&uh3BNc7)76I2A-^CVtE{v#*&yRm6Hr)?vJPwGyn9FpyVRuPRw|3 zI*7wDE}tl#-|wJb0V(#y5&7cI{rCg3i3L(OW|BUl8#6ClyQPL)R(TUx!%o!!wKLEG zwWA)Xyn>usfJhcmzagb~j#g^?MeS3mq5Y6}YHS2!8ymsh%qfT{_v$`{VKQC3i$!VH zuYQBsuH%j9ILh_;ZUnRNoPrQ^Munc~jWE5k5z;OaN~o`DBar0-8_{JWI<`=rzu_i8 ztk^W6<5z35O4c+-NF- za$qWg5#f;xy`3F&Zn%QXO>@{kg|CjSl{}9!DEhD<-?1=u($`7NIEapi?hYSHya`h# zQ#98*(Y>+jDsHZGp^27Fu0y$LR{ZMf@W42-tEu6~z!dSWoYgbr z+8gYqF;MLU3FK}XCs@ig)lS3{Cj3X;8Tr#LU!=Km zT14)*JbbU_S%Kjckb^)Ky}?FU>*4$A*zj8}GOjy(^oflxn+kADwxDe>D=5&knEBod zg@w&U4bwg6)=My;;mm8vp(aD2UH;h<^GZgRp&$ZNLU&laP>IjPHgkb%YoM=$ZX20; ze-X6+t?pcEfn=(4t;5W+!;O#fV_pX%uTAg%s#vZ=TP_zCoDxpN*H}_^RvbGY=;Ej=j9xf7;B2SXHf{*Aa7t-#r4ZV&k6*aC|O0B18b7v87zN?G;DVBWTFiFwYkb+LyB-%@42bG z&L7Y^PgP`@dvu7huqlxfwhQ@byD*F>N>2$5Ry^p zq_$UR@NB0NxI_Rg>eWIlO2YfL(p2UnvLm>yj4-6-MDf0SY7Y0+4DKFr1&Fq7^7_u> zo$^a>Esu?ypK8Hz`zP!6Y^w#%|j-*6MUR?GCN;_REXyNu;kyu9^C3*lg_f za%tx9!Iw*(r#h7BoM$Ol7y3wXwyY&}*c5`dq5B+?jp%B;2)99|pXGUgn5@5&zhykg zN$wD%3$0A7Nmf|kJIXnkFs$j5co?cwrj#uJHVo~wU>8>ia-PR_byNPP%Q0D1%MOzE zjm<+hu39%~k9%o?l->>G8chIanz;h@;0j_g4bi-VgzXo$?6n}CbY5Oc{?=JtZVINK zQ%N|8xb%H!(sQ90Nk2+0oRm@Jw>*Kr%g$MMB?b?1&1!Uh97(8%PwM87mQms!@w(F* zVHA0V%uSJ)!|KB`>zsNmSrCqcy82t7BM1~iBk=xXpI=7tGaoWkHE?avo5nyjad6@|4-mRn@#7J5Xp=06(UFzGvmvxuF0Xk z+=T{jye?Fuu_}8cQA8YMqDuJ5lr*riw(Hd|sCL>R2fP_t1eQs>t`f&jQ!lpm4DDLq z(@?&?H`z~>#NZx4(!%!^woS39c`f6TX>1;&zA6y4gmPzvWR=KC>sVC8{pam94u1qO ze=GwL78;^>cf)t=o?+{kh$jCE9;s{)5*6OscdV)3fH9PP$B(5Hh48Ya1bi$4O)r%$ zGO08@Wm>-5Cd<-nI7*I3yTM?aFtO2xcZG%4WS)+)6>MU6!o*%LMFE9V6-{X2oscj# zJ@XQ+ch-Zafgq9zv~$}+4-zF3+7O#%j^zp2k88h4&Y}VBG(eZ#zu>~;rWITPm_NtcT!@CPC0)M`@VZXwern8L;q4HD336n;yiH4w0beR_t?DrqBBkzKB%M=%s8 z>EB@CL(bp%2WE^rE;0tsqLHTM5f_dZZ^U3Z;lz4yAR ztE;M|r$3f#$w{gU8B1UxdlPJ9XNaDN<$wdp4mnJi!|rhQaF!e?yBiy?lVMHVi4{i) z0(wBRG+6;n;u$GH1`PlBn>D*fPo%H13H5k@8|oy_q}@6f270? z8P+=Ps(SCf`+oO#fA{zQ{;oNKcP~aKsTXzhf3l=EdF$k{Xxq&I?oixQECy`iRjIJS zIDU~FC38R{5KEmG$$cUz{qH*bILjs2PjQw?R)1_Ba#nT{3|u1gliJt)iU;p$oLFta zez>YRPcCuALRiDs4K2e&WUa0c5Q4n~WeG^UUZmbwVKas`A8qy$(1O^0Y1OaQhA4Os-B8t0;r&=L>Y^2tcoTi%GvD!8oGTzbyR;9Bj> zG6=!z)5wtLjQp-F|D}k<6k+I6FEM{BxN4RhD-^J)jKz{gIw=y;*rwd77(FYG6&i`2 z@&4MmWj)^P)wd>i%s@}i-m(?WEjvOif=Od3+Fe#1&4YIYX`l+7wFAr`2e{JG1I)-d zMf}+{iMAFJt+`NhnPrvBY?%Yh7=fNHU1s)q@}7DSUnX!+2*ZjK7nx)mU^uSF30u5x zvJoghINkG@yp;2!&FSbqraqTLt^Q6`AdEVK2YkS zs3Al-P(xtcTPd3uJtdStkD06E;eA1^R7}RJADt=Wt-4+>dbMW^=H&R8)WB+#JTPs2 ztPY?9Y)V??rB`^YQau(YuDY5B%IC@D>b;a65bD0z0|J+}VUIxNt+-wIi?$b)QOD=Z zS*GOy!QX(l!foJjd>2!De@Ey$uT9vVjnAyJRZ!z!vs%*E+a&k=`4<*m{=t8O{A$I*ca5qh(&>Um4+Z7VbfS!6V&WlOtaMRZZHw-g-?If594^1= za6ucCCLx3++D0Ivqb8zx5fQL&Z~XB(If*!mD`ZU+Yww@5!Ys0z$WJia55m7tn<3kz z_$=Yp;v1qG?+|Z!j)jpR^P3`8977mHHLNehM$2_Aleu3(C>|zvDXeD+gqJD^|9mNg zNq3izX+E)D~4Yjo`v(VOw9k6q2 z;;>5Zs(hZx!Xzdi}jrk9ksz-FiRuH~9 zKLjYqvl}=qua-bBF)dRoDYhYOI2rGPYwFo+s(Z>)N_0ddM8AJ?1(UaMEQ0C$Y7T&_ z=0`9^rWWie2Wu(AkfXHt)Xf_It(`zHF}ibL)-0+TMu}+eK@=4#lsrTc5AC)>x6yAY4nExyi?{yz|*Fpfc%BLLcT)_LB>W7q15;~@~zCt@k)jl zO}qjGo<=b)3@l@w-XHuo-^3GDI?GJ-&=HAP*$l#vsm{|{!Rb1!e#&%~(v(@&>54NS zzlzPI(KHq;rP8Y_Gl>qptE3XQINNY76~8Q_W>PLe`_44gqlGi1TO5nJMYY*vcr41W zfCjD&or4O|U?30ue%kuGzbqkwdsQKH??moE9~78dWE6$0Yh__5CZpv7jj1>-(6Vx5Sfb!VLD%PIO`DMiaCy6#ICAG`XZFt6|FiwRmW zszhI#02{t7BFX>4OT$G$EXk=+)9T@HN5DZiWhzO``wlggh9?Tc%wSBpvwV z`ejNep#;*YRFrxNrAgb0LGL0yY0uKX7ukxNM$eK3h?t;Mx`3)urJEI7uT;8&>w;%# zW?l3wA%}JcPpGvdIQS^c_KE>+1#tULuT0@|UNOPe4i6pw&s-lw_=;niXXLZD2= zmfTO{Aq>7dtn?BXR6QR!+m3pii^>ZKSybi)Ve`wTD4{W_1VI=UOc?pQ*=L<#A()#& zDRF27qDG<+>eUJ_Xo^EyH5?jxuR0F#aF4K(@Tk8dqYRnkLtIC7ApoVWpM2~g-UZ#G zYzN0Tii4|JN?l+!DrI>$?Ph2fxti9MrZ+s~n2S_F>AXpS8&Bh%DOFljUiBGJ0Uhc@ zNO9g68Iks_CHG-C0b8aQg=EyNHC{O+A1uy|&NGVdqOQ;0;EZ!*cI*O$8sZ8-snYE*?eIk9*oV`8 zXijcOoN&h$YB6W&htfTg{+e>aYt*mO2cw;$fhcx>HsDUfgcC6wSyJv)3pI4A39omm z30IRK*O_p7RN8$U336Tc#Z9WUc4g68OG2d2nkGfJ>y?b)Vr!OqNG9P%PK=f!MmDInh2DvCGNv) zTvb7@`A(~PPri4`O_>a*#07l z0wLC-sn&Hk|5Y>b#fxZHAi8h71|cjt7LM6EDTb`PCq9*0;(QdYJ0MZU;hc{GN!GW# zEBjN!aS%@vBHO!+u?a7hodO&uZ@F=S6o{c=$bJd>nAZHF7tbWx#vrGubO`HF=mzz5@v^S!Imq=AFaa?~C+sUj2kf8i3D`D=VwiQPzTt_b>*dY3ed ziTu~XiS&4HDSbJ+In=$xXuX~Tw>i06G&BrARq)15*WB3leL!6p%aYakq{3CMU zvTm+XE-|p-AOy;QNr)+vb{Q7Q^0nL$xvgTOwOR_d)d%Jxuh;fWzmS2UY~&|BFogH> z8yX>yYYj~v`H^tBW)dP^6fTFaSDQA$dBnc>4(P;NkpZTrkFYs&u$9NHai93|9O8n! zs^V%5r4DOvL6lCr&G|wgJ?KW#CgFy~nLog{(Jkirh(~{0oxBf_VfK|O`HWQ&Se$E0 zL)II_7J{Q~(QM0SbDcOubMlqXn~c#$^I!i*oo(gYy};R)Qz-+?WoCQ9xkWEHN#a_w z{mS}mOOJSgquCFC%-|i%jOHsqyWdnFO)#W!c}tSB+gM~Ob|0%~Ik`wOV4g*%CI~ut z@Q?x_oay8y8jO}NROpoqqTod{k9Hn~e8m&_Z*W5!m}URVo9!jwepxva`A74x5-bWb zlG3aWY=x_9LxfZYi8%{oxMCXel~QGq^h<%=*p}`~&=0IgJCKP=ACri@<@KAcw^|fP z9i#s4_QK9Qp zNj5n9&GMwSeVG;kh^;b(Y#Iz4aRB^@{7J>$GGcGPS^9Sr0EmG(miV>!J!?k96ZxfS zF8FCqRtJD%@+d=Ed${Vg&o}{mY3(zqhSIo3{WQCnhsx5E%jvhmaW|kj1VxGyV-fO9iVwvnmx&@2L2vtKU!vZ&l9nyq*hR}67AUpG z(aa}1T{SG0s)kU^eukxEK^U-76(T1QJLE_~u^z1DB-&{+uHP$F0Q@o=ekGJ7rL4}W z!-aLB+_f+sCDrjLsaT(ix5ynUrJ4yziVwp>gy7cFT)>?HO)rl&$;9<+l3JD-+Uzx} ziwB#^x-6X4!wgA6tzueM+hIIzJCYe0Ca=(-aEOeDWd5*5ZH@?)Ua65!fVt)*3mefq ztcrxeA$uiSDDcFPhQNd17nUbAMkd`eHF@fy?MspkXW(ODPk@kD@r7`7-*vR7!V-uh ze`2bQc%n@J8E6O~^*D>CJTDO(Wjr_SWf3+VYBd-*G7?FSJ9)czd zb7xuQw{F7QqAxG?fkMj5wEU6h&_)TOnzwSKl0R{u;dlW|U~uO0X@hs}2ERoxgc2Yx z<+eO%kf({d6TqD^~9Ofc=Rk37t)a^p%IE8CaUe{#CQyfOwX1^swwEQV|aMXqMrQMLmY z8HEMc4mP^c|0;rE-&^Q7Oyy1ejhZC;2&>n?dbGo!e2pf&e1-SJ;UgjIYwh5U{Ch>B zWBofIj`nu8UhLRCN%X+UzYa3X*Brb7UURjOf4-J54qHnlNa#DCo z$i!W@YIA!;K!i+0R{F6^A`>|+y$W!vn2N3G)~P$fJ8MhB*bFNLbGnXPCy!_XK-Eq)<3F@kyuwK`}xY43P_LfD=BFt9sA@9%9A6WWHrA1>qYi(ap0GJ zBDZ!K91Yi-pXVw489L62{4;2uWwDxndOAP_<}5wMpVCMtl7)XxR94dRG`#cu^=5XS5YmpQ{ae>)az#% z_$CyO4K%ZfWjz%W~{+T={CdX*z-Ahjw-(^4Cy zg@ieXn?U44r#rN>-`2uL<+x^zBMH4=Se7QGH2M3KgLW-LEbu;C4HI!=oRhbagZNlC z*#Zb$NqAtdU%Qr}GHXN(_mZD<#tJ~+w5NFTw^6jepq)_TibQ|3vD<DM`l%s46CXfTJRC{LQ6C+XX_7$o*e4DQ>=tG4Ng)*y&}8G_ zE;m}ltVIoVhU0jkJY>8#@6`)PO6wprtuDyn_vj>|oOT3>RN+r3t<0;gg+CYNRU<3o zD^QIHRh449^X;}|l`P4;3Gb~T-WRtO=0h>U z*w+5*(f)+&cy=JeY-^7u9+kxzJz@7Ci`82Z6Ka>SB&tvyza4Q|DPcP(-V0EYmAP6q zN}psW-x4$X4U)0iMiX*6(50D&r1*PT^_Z7&8=3-s^{JOKTv9-}F;at3!O{EeJi*euRHULKz;Y+zJDJR>{h2^mQB406&vE;Lr8F>t1 zNidtoXjF)IhC=vbJ*($>JI7K5ia1G0vQ8s2e%bpx0%|JW z581(Yxhqx zKS9DrH)1R;7F?@ZOdIkuKou&M!Q%ow-tE=PzRXtdrF^Xvj#`no6~pc$(N^WWF7`vQ z5F97wx|f=wtudw=Htm!Z!OKlL+Ne@0BbUnjJ?~7Z>mcWuN^rgod&7E4M`#E6-Tb)9 zxCGBtt?(jzIMBD~0SStz`hS)1AbZG`5+|5J+o8amb0pLoixZ!C*adS^TOsj6Y;XL= zO%mIE$iA!|E)E%qUhaa(_wXVNH|1K-quygcap|WiT_Hzi}?o4 z5?J|u63!XU>AVSxZxha*Rrpf|!MGZZZtBcWCCIW_mN(-|t8CZEJQ7+Vd!b}W*RZGsT?TstA z5Hmq}Jh)ZEIW)ft~1a@70? zjyR7>#!y{zBN-A>W42fuD*SMVCN4yB7HUNwLbT^vMIT_M5=Sw?C`g}B^g#k$RVkJ~ z`X`$#P()3nxU=-SLqCIJ8`SZ4h#Kg+sZTEgk}fU(=YSQQuBxrU%=jU7={ zFO48vcy$;-3QP-*5}P1sr4rMIH$GeQD2^J7LIpx?2uqPpQd zDb}%3>Le)%kc)4~zj92fiY-4_oOp>W1_5mgv|bz$k*}Se>Pg zO``Ml)s3KIvs*R>=bVqn9L@?L473Os3RDOr1kfUoL5>0>=^?*sg@cqt>)5mFtYcsK z@{~^f(sXPoLBmxwQ7n8IUr$v)+-%S(bO7okhvD8bZLF$v@!KsLz^D6?LPGnnfW*V} zG^;lMq!$uGc_bxwM_5Q-OCQx1EK zwNwmE)T(&9EYXl1IZ=%7Cd9R2_d`eoUH+Ul5AZ!QBr_psC;w0QoDlx-^&NVfm|)ce z!RK0})Imw|r=7>W_zbtN3rPN&3dsSSm60qy?{R4wc~i)Ki~OMYdZC8hh#V$YoJ)33 zZtK^I!fl9_66Glv|*BMn!RLRi{$&}1;X)gC7Ob1d5X9S3A znnAKv`S(oOi;;d?K>F6_k*p11A^IJYwE691A*cPytuFa-BZSUMtDxt%N5g%w{C=4% zg%v5G9!t>)Q}&s1mhhI6_varFPnDS|dq2v-X>;R!Nlx-d5Vg(8uhr6k2Z{)Ve*9R` zFVD*HZR<*L1(kDHp>-2CdZF31cjM|@v-=^L!hya;=@u6GIaR4b z-kK_EcSRj$H&Fi)Unsbu&v6Qto-$yK&=H9@)(1wm9-& z>@l+?xJrp)b#Bz&z=Dw!;?w#i;}Vq!BY}RrvZ0v2Fuz=!)xRxJ%q@B*aQ5*?=45EK zE%?NXp{OuUjSoC{e5K|lJZPCR{*m!<-{a%D$HzGBorqMTLNOi!*8}Jy;?#m?b_X&Lm43SJbGRZWo0PB1s%f6OOCpU zY@|Q8>|{?{9TPa{^6u14GAfH?Y9iI-oNO2z*mZd0+z26voewq|0C{1HOQ&Y@;*wDP z9!Ppe;zJb7xaT&R;VW(tpb^y=@Qr(d>K`7yJbbu3IAKRZ-pWSN$VS5K2<R%H5A zV)0ugq5B)}P0nfcmd6vK#O%E{`J^sU7_js;n!uy-uk?~&8f!a3aIllOXTO#c$)fVF z+U!#PMQ%KT<2C5gd`GK9$q_fLo`a&J-%bZqk4`}d(Y32N^mxVoy+G=b`O1bSfODo&^)Gns{@xfbO&!lgJ z^CjRzbAKKVr@0SNs{UJk;I7PyOKtAO?4qC9N0=@<_YmsyoO@XozyFUq_rdUcL31yS zH3Q85K64+;iK@2JFaZ-bvzWUvznr^;SSW3fc`Wo#E9x-bwUMN|I*TrAq?GJk^!z&X zmB?iD-1+bvjb_Zd;K!&eYe@8apNVZoF;y18_QsEH3ek@m-6+$_8@0`X6`fK)MEd^# zG_t8zqGX(xK{%AhWfu=yW+{kYdeuqcifMQ??!6ArT(>{rIIlcn>RN${-{%N5_6rIe zw2AGF_iR#=ItO!|mFsaZo_(HFjQ=wKjHy^c>Ur=l7}T}+7n+-PI)%{=Pg2P|S&=eg z^Z|-s>i)0{G`&Es7X}vT-Z5!-p6tL&wFCI(HxGH%_9`rh) z@&SrM`tU<@$TqGJIAslp);c@l^ zX)8wVT)B?pDDZZ;OYrVRwaL3eoO2b7z0;mR2*Gme8Cn&ubjKeQviNe6bz6#z|-Y>JOL1i};(;Suqw*i&h4pqG*F^zEv>T+Lf? znk^qHQWD=HwF1pgyD*0JFg~e55JUu29R!`XcG`o#8IdZ1W8S;?ZgK43xo^Kb{Wq-nhLDy#B7ahj4OUl8@AA8ZZl>@)B z#^ZpbmMf9ObqWe)e92OfDT7l-oB@>{!|d~us)-k$Y4pFHV#{ten{L4|NRShQ z{=KVX=1T39LMvy9~QS{|JQ8Ok&mJ{p3_ngBi8jgN*RS>>bi<)h3f zlP?KLwj6&IBKvH3S4^)_at)*16u1?$0~T2(>+Z^|4%*s^o4hv18uVWcUZoGhS?0KA zT~WEBL!TjUlARA7RVC={ueez=B&TfeMz~oGc{ok0eQRPY9$TxwIvy|@eI7V$!=V&6 zi&iyX75e3XROfgln)&aWW*58iHV5k3$g?Vu1rQs`B@3{{#S<)9ieEK*H`OlZvmLJ7 zsP}<8do0BWT5;q1xf`Tp1r{S%426Kt|CUFb($}2as29%P5YI)h30^y2Fqf;z`$s}# z8XKM0wDKo$iSohT`H2T?D#t3<>ROsQa#xq=BiK>qwPAX8Ny zmPj;VMEADYDsCw)v@m_}jC@`i4NIELQ1Q+s;TSW0+s3A~(jp_hKpFVg)@Yaj@M`bi zTy-!R@cT+AVpa8#FeeghlnL*}LI|LktyRILX&!n5|#~vrgW6fiUA{li~>I)M(^Oy$FD=RHIlg zLX$oqRydD%4QETL(!-?unPPcRw;dI4-ZjmC3om*6wKiw$0Rpi`e((aYP~$;Th^Uc3 z*!sX@a=lJsSFZJ-jUCYj$8{-mwY~A==29P1QNBtW!+Bht1csoqTR6N{FGL%YsvA?h zf=*Z+2;jsg*%9>9aQ^L$0~;+kSRR90>EJ>`C)dzU-osmISDwv%^``q| zA1Y`#O_UxSuxoI=!s+^^Ee85-aYp_8y2>Hu3NJ5C01-#%&^QyIZ1%wmyXiI z1N6TmGEfISz`bv&dmOIRt;{7KJlOXiClLyDS8d3 z0$~^+^zuF3!A(HdkI^+s(K>xzXp+1j!scn}hlva@GI(3A=3Iw~*wPe+->YS>26Vk=2gU?&xX!F1*dxq%O%4H80mxwf-p7ruW@D(WzLy6NR?}zG&Ij4yb#*xq{$UrP8!bOH}~J@ zsS+VW8X{D=XCRQW26VN&%5e8V;Y##d4QEEUf~hojB<%5=+z=x~w`%$7MsWrzoDzCP zT`iqb-P9@749ni%io~?ynBa?^G;ifb2(u(N8wJiVRNGsHj{TblH{~C+dz)=Q%}HtK zwKGD>Z9sv45~s?4N`rLF*&|e;HZ9a}?wDs#QkIzMLnT#n*@Z2^1L)h|jcX zp(XE@=HVA7ae~1zs_%+SfDFCzudp;(V`1l@v)l51XX;krO4s|%-8=}cBl$qKkg!hG zYDT&|rI88OtY0w(jG4{hc^5qvWQ!sX2JOx`PRFaqAa4W`e__p|^ zE~z&1Qc9~CXo$m-LiLtWDd|+*&Y*Jp7?UujfGrL*SrJ=+Hj15UfDt*s#2bcWQAY_u zD3MqVB_x5~8q6%PGcqIS%Nky<6 zUi5fXt?|Od_Qt2P`zBY*mGNl!_yImxq97zE9*LKrasNZ=RF{i6z^l?~c=a5(X9wt7 z+7b_J?xqp13;R0Ce#BAne1PEMNgiL7|Ivi|vu1VP@xoIWZSVhkPK@v57n%1R$%QyL z4UaT^%&H^8E1yS&#G~}XnnjiRe5FVTlk@bp3RCl$4^Bfe(bT*c`>G1+a@3EW$B@!9 zV4CCKj~S=WmQ6p#G+lz7snsQF^lB5f-fkzvNz@P@4kmY0{vZ70DU}%xg@MOKHT^a! zKdj=W{1=ARV^{aq!IZkP-o)y4c9q;Aj>cgLmL+!1>U!{P+klhn4b(H*xcx#!D0POv80Z^lnToZks&N;ZI zT-fSeE57EduGaS#6`x~CmL`WZSqM8t5gOJWYxHm0-WaoZkiF6BOIgc5hK8Zp^A2=o z51rq|AGK;3K{*k2TY1)W{7)RQ{0=Y|W>snvivOUKf1xtU&*~^!8|B|2$$@HeVhsLg zf2>@yE|rczfgI}7p|*8wE2(-3wt>MA0U>YaOb6aO#Hbu}a4A)pU*Sj~%5|XDbH7Vv zq?by1S|OrF2R-(~4+sfnu=FILE1PR40ZrdrGUY5V<^4C8OgYnOKcGkF{%~kEAnV(f zJvz)>-)`Ph8o3P#ilE4ZG=NpR0S=8qf|O{&KXhS)3X01mKQ6zHr$|U&eu|`ch1H@d zVphHIM3lY!PnVq_UIs{-pzOL6MBD355D<8R7&Y&1^VT>$R%n?X(HAR2m7#RBHh=o) z9#Zu4_cAb7RM8aaoMP=Eq(X7fMQH7&BigTZvrJ{=p4KGvWXmzklb--qE8R^j~ zq?pSPDSf={-Gda_t$AV(SLq`5uD;)T2PNYF;XV7FU>cU(`6ZE%b>C~c-o#~s=r7kdUX766bS+O*NQM}miFgq} z!~l*6AlPjL%s@tf0gZtRNWcw1;@7!knq%vgnXQXF{^R&{Vi=*81kn~tsc69&Hp5>b z&Y-zQ5V8KDo`)Sf_^;9zNO2OGnZHI>3Xq9PgWo8KEo1YC=SRk{C{_}SPmk` zSS-!rBKfjD?nhN<;DK~ClTS%E5i2^r+LB?cIL^xJ1mmB_0xMSML4P2)|4V` zTw+o8yGV=j`?@`j(j%0y$ZSmvCXz%`siU=zkrT%t+1`Rl7B+hqB1 z`_fGVbUrosbyVer8~W?`B5O|hzF0F@&fG2%ZEM@k{V7b7sg1UO`Y@DJ3RgosC;2QT z)(Ks^8G_O$38PLvpJwgd(!WJggm)do_xGz%RPVh z0L?yA+75I;5KR>QY$#%h{sK@Oydu`?b*k$Kpay9y=|9>GeaO{4;eCXWNWcsqh- z^s+Q0eR2A)_V>iWDP|i`rI@73Kzb$SR5W8y_&B<}vCC2zk1?uQOPIC-&PZK3-C8iR zhFFy__N-RyNjepyc-F*LmUcghkNWTw1>F@jrAM5MquoR*Mjn>4hiW5E1}oz1)ju;x z$7HY|Si(P1;}IXPM>ubt%x)NtD;Q{#J9=_30V=+=4A&z2t~m)=_usM$>e`m!75~?} z8u?!{tNPjA)%fRwX2l}lek`cipKPjaStIhdGcr=tQHP8TU==%I4xFIJ#GfX6;i--$;p<9o4X1i+RD{KYww|G6>wv<8lGu@xIcv6#s5!@iKT*zmw)S`a*g z64Qr^V+W>FEaEh%l`uf5l_D?c1d+o+^-?6woE@-`9iY`=cXv1c3p`utPNB?+3{mgb zl1{BwrdBJ>&V%u<)m!QXB!o0&%4_3hX=RzyBlb=1h<(2q2n-L)n0NPhtB1HD@M5Y5 z$ex1ppetrlE3=|6b$nE_3;{q!-jC;jW__z~lU>;{6j9rxin0ChgJcTCjI4mP*!spj z#n!zfgIPgLr65|XPo*?kRZJ`WKCSEbJZs0m&#&IIy+n?4c2$a>7)zBfcm{ z4c7qz7ul~`$FfsKa)FyCE%HZMtSao>vb~WA`>tRn-x4e6sMrr8{z-8sILw?rY1mNG z+10l_`j95^h#G}-U3tYAI6Nw(k#S(Q5$zf0z4*t#DF7$M7QW#H%m2h~E{?_y4PlZ; z)`ApGJpeo{50E669t4o%`VF~j;{ua^6TB`&!=fuo!MylxpLf-vISsGy?xSmwmg6Y4 z6H<`+ldoa0X0sY}p@>cm%ad_C-9^P0K%ot)1RaIW<~3d%Z~0$=>=@6P9L{{yoRl47 z1(0cYu95#~<(`9pZ-Hjp-5|X2pGlVj&=~BfX1Y8m$uSObC&gkS3^SuJ_^hhlVi;xx zgF2SVKLP`X9!pYVP+CPm;3BYtzG#nvJS&@U0U-C`Nh3;MmnX|6mDlnu<_LMi=be6{4d&v6v&RC$D1`o-fX$bjf#V7R3ZZV zh9irl=A+C_elz$08=#9R5IYcER9p4E_^VOi&j?2&+vhzQ7PO8|Yb>iXX^i&k!<-Jq zF<#Xe`F%#&POKznMB(rE7AMB(EkHPGr61bhC|3^ zBY4X5zh-e@ z-qy6|(1OrcjO_)2LhSr457B57^`p0@uSHh#@%p`L669|ta?b#!E5oa4nwzf$Jlej6YW7JtM3D!`oO2Yf2t8aEgq#7$hVU{%-$__E2i5UlDmMG=vv4mQSGdzC0CiWUC5CpO zh0xxU|5vdXFnXh0h+`-xTN>SyNt!&CytUo~2!Usw$eb~AT1iN(vG!ZbY2!RdW^!vH|}v^?Pet;K?!%8ho$IwwNbuemj1_l&>meb=1gnvpv|?m-W?8^LETf-BWIau!2L1#Bibt1yI$Jtje-?u+nTWQDCX8~(7|IU zo9wq{DQ@K#A2-wN<~=Zi4rX(^*s3Sab=mx;4c1yA?S4Z`|puk z9hLYEd~pBuccu@AZ{Ll@Lt)_W!QgpgxbnKoh`+Ozv%C!hUI&ENAjq1 z@xV?~7=iQp-7llf-D>!Gd7b|C2&XiGU4Xee|1miS+S{_7EHLT4 z2K5v-HKfz}w1>#ElAcN8v>@4vQ+S>~yG~HFd5k@+9`!VwLG&AnDduYzc*~wM<4kEynp|;qi3(AgK|Ipq}rGL2|o7RFY8Z2vNk?aQXbm;9s_XmP2z^ucGFOW${?Ic49(Lan`gW~-FEw8Rs_U!L(s{*BB;m(2xw+mD-C+Twfgl zjK}HxN(5}WH5^C@7&%7(wy#wOjCWLkO}CZ@j5ecy!7AIufgz4%;03A$#}pg!Ip3%o zS)J|Fzw`JZ9xu6Ld+(}VJMn*w@mA7?N4JYPHGsK31F>#zT*!)5oY9pf=80oNo;WYA zvfrZ>TbyrVK<4vGL}aW77%P&O9`M1n5E)HHGP9su1YSBOS)aehP&v+FY>>jBy*w5Gl>U$4?O^3-?aXDIDko?z>sE2eBWdOzB!opt zt&H7dW++#MCm6~!WYHfEDcy(P+q0rBu4a4kKQ<)rp(w4jmW#{4q^VP*fXp9K^2(o> z`mT=ECt2-zu@|OgF&zQ%W#+13KIIPD2t%a~1oRAP*KeDK{>t}oCK}!CL(A0w=x7>R z$CV=YAD;&_BQtJ)fWIj9`DH(GFg#Jr`kwUpV!ys3SeyCB&6E!{>s;Ayo}-B;aJE_T z<+*0XoR0eT3TQY!vIsR>+Z)Hs6l-WWiWrKV?t~!|Hiu9@Eo~l9Px_vro((g8qJ;X? z2-GHu(p4h;m<<&LzYEn%-+GVzzjFs)3_i~2C^qj(ugO0LgI(`Eau(W zA^)8lv%SPpx;WCKp}1tyrsw&wYRWH-Y085}MDhe%5$bwKV3U&Xe`!5K@0{1HT}X6x z<3#>>Rm#zqFRUXlp)WYUIyAtMaEXQj`l22{V_wkb)r6(z#Qg*49MX}*?|JU=qYu`5sVg%u{5Hlve_ z03S-Qn|Z~!E7Wm(y@!FS9ZjJa-p zMEh60IAQ^z{lZ&V9Q&Cca>5|jgXv&Gyqk_wa>CjUNbsqE#7RE60+ItQ@SXXJ6L5?! z{){79T;zh~but~S8}1BhmsN{q)hFb@UoDt*s)o#KkMde%1~|}3aD#Qj12)%bjp9vM z6Fl=|F+G`1v4E8ic1oQ=m?s1nyI4e^Trq$VSip(^0xAG%WXDnhGkl&%p?ewmL`dk8 zAV_0sy)9a<=4CdKZQ-y*rqAg`iWI;AxAfC8BufsBSYd6uGK3@*X+oB>GMy%)1&cB% z3l>uYGZ-F=@x^p8MIGkH8v^9ux7Ji{svWxMjDkD5Y}V74`0`e%ouBUNxXElXiA)U z!!_OxypAbtSAktV`luidYLzy1NraCEjV&6~V6|AoIA;M=8E)NM*)S=hm zcE}HU@1`AAt*MT?^$lbSN%-ZW^8L8*^cW8ZcMcNnXIDiIV~>dXJ;Uml!J1%p3r->nlLz&Ym_b7A#j8yZ+Jc znPd<9Mkug=w)YnCDDM_&g%zy^3iB<$qV1d(4Y7FS2_K|hK<`zlG+1u-@N#Qgt5s4; z{P(ONXGOh|*0?tZ4FsaynQ}JqWlAHo|3y2z;7^R{$5#tmIAA6yB>+(ao-|rb-#Mml z-d$RChDI1Mzoj+dO%Avve3c9us`o5rjIb_OkJs{ax`&W;0>92+^cPn3Q@AMa+b0<- z|2c1(99DTihYrNmE*5ylj}p>!o8pS~G}KhsY3PVft_tX=G>kJm{QCb9g3!u1o603S z(Y<$(dz@8F986wvd7h(h9GY3qH~!8nY0I;`6W;)x5rh{_Aoe&XLcVLw?U~;@;`$r*hzFdmOp)LO_3-4mI_QY?HLdZ_?z-)HiAxsDDGfX z&Dfd*^#`#o94zc@=^)LYBK#sX28axTl$++#j(VY^cG(eJs)JM}=gMwOz{#KzL?dsz zo8$#PUIqyAKXG72nF$16R1}%(j(!l`dU%iSYIK<*A|VK85c3xS6TG<32O72$7ot|6 z$DEvI&5&316=T32+m<>WVIKv=+efF7BLn1uF60+#-}w}^&q0cXhT|Ndlnje4#beS8 zEJ0A>`FuG4&t1kN%>{6YIUy8SihCGbrf8)jXt`3&gyvP*PIW}61`nLZh?WA622ia% zHee_jB`OxoOe`s&y|H7(7IkqWnbFiDNEAs$xfrL-GiVXpvi@MTR5s}h;HBJAx%#k? z&_@2EhwdG8>1iul(*A-4{u?jCp0Lc$*}9O;-daSi~7^c z|Ad^)9myL35Q^HGz5%6p3slwA5{d~`4HBw&ZlR&&(d^#L@&MVO4s#lC1MXR{`TRup z@3XWS(9R0Twlo5ZGn<3Xdok$~xFN@Wb9~6`-l4;nCd^jby)%VheD4RY-#cm^A;PkE z=B9ypqYGJBbB9+|v8?gEnte@{*M*Ni?JBsaNt1W9-_Vv_ptof(FR{X6KMDU6DC=*JS27t`(@b=q-rwSn^dLWF-VpW3N@HU`b05~! z7WMl&f_<$=imT5(_mT&-a+!GPo97yDlm$80g$A+$!C%ZyPjfziXbrhXtNqDv`vd_P zt`KlBi&q;-ck&1t!z-9PcwsVaZM&jRj3qn+L`6!>kRc+EVn%Ksnd0J@C}=0RuxmgK zZQoLdG}AMg0I+{1lR(udd9P;S9JeKFi>pLA7*PQFGCvI6yl&$^Do%g0cKakhd>i*t zw(HXPT~d}a*=ZQLhU7zfC%=#IyTs+R?j0F<`<%MRzQ6|Ba%z5X z`$5={N=_EpBTs$*;bKzm!9LzWF_NOe`%+PlU>${-S3Ai;13sE6Roj#Maebob^EMmF zsf3$(HMzYKZj7=NL@b)`5_tV2XovgI&WioxVt@Pm_e^U~ezeoi@Gp)J#z@eE3%;AZ zKYHic7J4P0U@-1&XV|C}A8P{{1;btu_W zr@vgjhlcpRBrt?`1V$S2QX=y6pYN7vhZZ#?oHaF~8M45Ogp{)4Tow>3JYDvci`kHm zT=kV_hi%{(>6z@V^a2CW_X~V~*5BE&nGMjwFU4P{(O=JpR@upH0G~YDf>^9DPQ#v& z;{C4G48{T}h%vI6bc~E!K}}-<=B#Gl1UM!WM%>JgY4nnlJP@?BbZ3T;m70P%2S*s8 zo{Oe#I^q@V?;QtcpiawlNI&l*a8|iR%2%PGAL|B58D6d5bdCZ-NBc#GVQvF8iVol3 z@c_ddV`rezv1~04y)c*?ekWQpr~_Uz-AMW2{pcQuQ41tg)=~f|3*PF;c>YOwMiO_gM{s-V&tUCXUzQDh8Ds^YY%(zsjvF-<^(sjWnxzJfgP;OWxD5=T;O;}Kd zS{x}$lls*lD^z`U4oxO@aupJmI($U}?8;b3quE%wdhGsk4i3*xbNXf&Vvx9be)L0?%Uc_jq25#+2sF&dD(2lO@QBoHAhCraKcB1CxS40w~| zy`*-~x}@#C@O@t2C2!=SuSs6XZ#j!rki0lqQ69LxyyV3f8zk?7+97G@^$8^ZMl;Co zY2YXkdS3T1k85w2R)NsrQYm?1B&Fm%GZd_+hF{U4N*Oypyn1~2l`!+D)-NRQk7;aB>yM)Djs9ab!h2Bu$CBn)5Wk9gX|3}J zYass_EnRc;StKPCh9;>6LU#6;%F%!z8w{~(4RpT0Js^+8@~m1S@W(4&t!muN->d8g zKkT>V@2hw>TRgQ=21+bRB)}{24WqlK@w#x^;#*6~rwUA^*oOdgo-hE_FlA{dyB&nX?-XnIT3?$vw*9XOAA&1vl90#6vRcwgG&FE3ICNzL2%@NVg ztDTl7)aU}Ev)(sACE?qNi9Pwhqba*p5xoe%2Fnt-FSO-&4|v4|msvyLVIIp0@Lbja zlDp)+-A%ziH5NYTmjv=L_~8)87f=NHaD(%F7Uw$%39-M^fQpX_Y%-25n$(-4#I~Ed z=)Do)Fvet!sOC6uN(RwFHRnBX;ma5-PSR(iRJncStgi%iF@Q_aO5u?=-IW<7=eZ5Q zMq`^($(UM7 zI526=J#mri)*(c~0^RGeLxu(VGj(iRp9i=2X5SrHeDHPAO{SAi)JqiBRCDLT=$6?gF$w<_R2xfD68gFmky* z2I~VaY=pgOtl0;rZeT^VmGp@9J&B^C{`o)2P+s~Ia^ZA1B0C`;eZUppH{kESPAnOb~C)KQmD(hz@FK9JrZy z7MZY0pJ&pLJ?x!2{Dx+iN_-Oo(_1dJV7s1T(he{orapZtO81SaImZmM6PSYZ3h|NE zII+eFgnSxpg;=I$Or$(Z!!`jP?6yGVGn!%;j*d8>;i0nOoErk+EL&lCl5+d;78TlX zSwWr}&s+yHR$UyWJ$0uP0ykTOf`rUtx_83R9qTx5L#Cwy_l9XnH6@u;&JGjp6> z;*%>EJFFOupYNCS{h~U8?c_(Sl}TONM&rSTVPv(M!INq$f`ymxcY`9Uy!I@wb!64B z4K^HR9d0e4lm3QUWL0R|mY(iRZQyYx;aPRYLZ9bTno$JG07hT|D*_0p0H~3j?}QaR z-w6tFM~c7t+{TPeajHRZP9Erp03{n5a*EME-})QEj5Lkxq+kiMn2#_>w!vw z&9jZXG_NQBtWK~`mjo+-jxtmDb#TT(`%r#QPRXdS2c;juB@!9-YQiWka2$q8e>y3Z zDk?ZB!}6p*$vQugpD*=%nO~CE1LRize!Z}nf56H<&*{}SKp%3-De5=uiC#1AN;IaT zp!K7-3+)VU$To44u}8qJGSd;^>4>4WSlF|2HTr#-(#$W!)q!uX)G*G%(U4zcRn}x3 z2}fRLvtza=g2)%gBSlgfLdbkcJYL?2`h(+HL3{8`+YKxp_kIvZqIihw8A&r3v%8X& zk2)}B&7fh0fFbD$elZY(6{g{4J2mdTimhnPxp#FIjrLjTFg92%)xkY7o;3Q%hvYhr z;3nX{rtUNdQ`7*qx>ErN5ki&<@%~?gn)RIY6}%)c)v;i>m5AmyR+ySICHA4rX~n>U zq(_Z-P(a_KBRYlg4E2NO@CYuJp*KY6#*-YiHC}v}rBot3_*=ltsKOTfEkd#QdJA;s zq9!?Fp8MgqJP5Hk0n6%`#zKD{nT8OiAS?ie5>?ZZp>svxK+c~0FG6`Vb!)nX98Y#z zuwX5&087NOU@;}MMH)a^;vbj*c3!%`K~Miw%H1RgQUqPP)WI`jPYMFh=pg-EbEDM> zF>8RO7AW^^lK&QAN#*0)uHMd@G^HA~BT|bRmT zfN+&Uh=5=>WT4h@u|2Z1e3YJ|s8b|m@p50^687T^nrx@cc0!Kh4~VIfUiUE6MD$A1 zsTF+)w<~+$uIxrW(1Lc}mG$}7B1)$%EG;Esv=n$1KLhLeSM^(tb*e)I}6ZF9l`V)6$s)GRhDsa%=Q%oOGu&kMX127otyfF6*3#VZU2w7MB0Zub%X#y|_ zp@+;xN@wY13tNLo z+QJ9d`#cF*G;-%pj#qE5= zA<23d*3G){_9Kb%C?UF?2rDu zk*9CpQylrTAJ2=sL)>H6+fYjKyRzE>rm?-z9*{P1H{bdAKhm9y`9%*GFZw@=#8K5f8>apb?aAwHJZw$7P-_ZKVZ&p)Md;`zJF79e*MDGHcnFMe`%lwDO zY2irr;qJGw2@sGBC5jQi#?%1pL8veS$|glLK;c*!H6k$XN{d1Uq6a}jyS1q!>uxW1 zMY5~Vhz($5oFRy5|yP2D;jj`h8dsLo9 zCbVq^#*@Msm3(G+hDifsQcy$^>BD%kN}}&>RyT}M`?JSbyAtk;M+WTVxG>o7Que}@ zfkd~5$i!`Le0Bam>GEyiGKrU%14f?rS19oGuY#yS?PSU1zr}!=eGMv1ueY%ZTXc%f z*bgw8-LEg#CeEutVpw@7k5`k;{U&f12|x!0m{cWUqE}9)QynImzX26BobQkh^Bokr z$E(Kb4lh>#JhBTF7fm-$xzNJvnLEC(W^ORgcQd_Y$VLX8{}|c1?AlWy+J}qBzHh3- zEFwQVlO&4tJGfULCWtN&x^R2V<%E_ePMAJ*krTRN&L{{58TM(GkSAj&y=bsCH7?YR zKG2$Z{%r!LjZ!C`-*D0|aX6$U3?>*Z!@*PL(MaAg8l|SxM}s)kA{$pK)o>)W z#$V=1cZ?)G(|3A z6*wP6psQ52SE*zo(=oA+SBexx`sjSJ3(KFQZ0eP|2N!*WDj7~SRP}>JuNdh&ysr*; z2cS*ciC||Q(APT;GEo{pPXh=!i~)R7)dRe~--m}R11~`gYsY;Z3NkL}67(Hh8Ge?v zs;9=`&fJ#?phnEU>oBX(Raj%!Vi>*6ss^XI$irN8>O<;gJ6YkH3D`Q>c@GXIl*v7C zt;4l!wGYc_5uR2DZE_U~3ys4X^HglX;wYyA7Qy**;pga7Y!Uld>kULGw6%EvQz+yV zhp2bF+Vx25K*RJPA!Tz>=ot4DPxV(nh<<7=fZQJdxlhBb7{Mxpu0SPYPR;d_3gB*_ zoj&VCG!g)V1$wRr?C|Uq$=(N~g+gb6QQ#UwRhDNAF~#h(Lg1eqY0XYqe@vL@mtvt8 z*ov)W^7424pm^ zP{4z!(E0$vauMla7Lgvi#v%%uiDzqdl(l)BEwWWK>}BtrYV7GELAy?T=xFMlpQem| zXa$$)y@?K=L!S2dfO2GPV0z)r2-f6SP|3S%P~u@l(knq@{%oszB1<~|KeLejp15>n zA-#-s!LQq8A<0mZ2MPbdJfxUzjh!#^y8A3QS7fXvN-CP+)SWL2J zgEgdP-8f&gsaaLSHtII^*cmo0^-On{#qKf(#O}&om~nMa;<_s6|IU51B_mA^4D3jH zIAz1wDDqs~7L7Vv1N}z#N7H0NlYTqr5n>BC&s(OA3>i!_>;``mU(zNw?&RPSUA!Mh z#@E}>M?pH@ZjKlE4cn3sR#9}&I#dbKsG->@$>U6NK>skl5jbRgmx5V##z4g{rT_Co z_9asKHBO+%S`4-`J0H;>P~uxMJ!K`yaS?)LS{V23#)N2nUy(fU5clkb)iS^r#8Tm~ z6YL?*w+XH;UH}u#?D0DIPYx&IR5*u>ljkxc1uaZSkq-8i!OGjyu*2XhLMa*K5h(@s zJxV2+v>!=boMa8wu$_Q>hlrLzg+#-UK;W3yClKw=TtXnGjPFZQEfyA<3}>gfi-Dr! zXy*i`ABpB@Qev!VKkaq+p>gmZh#3Cff!tO_GK>g{Jj7H{RMzRxKGH@0ZxfbH_kWlblGPu~5W+dyA zIee`aa40J=gDnZ)Q{$1Of|K{DUAMh;(D5#a#{E-SzeDLQNnlps z+jKc`SQ{yTzJSrd^(R7zc5F=tZeu@)->IH>kSd!bawKpWv5Nkh$q`qaua~vzuA~|h3F9E( z?Hy!=H|3DtM&FWyj(Zc68eUM(${lE^78CS*Ph%PY`c3Z!P*h>RgcgZ`4oNfX=xYnP~Z45-6g0vgYy%=2|+&f@+3w@ww0TAHkh9N*gcK8*eQN`+TWWT(QgFm3{8GU z*Kq9f1`2ZcG8r4M3Q*`hY@Fop&b|5L9ccouchs9s-o#Y^euJLbmc90_?7O*urc32&C?nVI6JA=0huy8i<2Iba-9Up1zjRc#NP{sM4@paI}*8JpHgH8x?NPPAv2t6G$2^=kS&qgRri$zS&W_8!g)3~ z!i9=+aR6ots8S~G(i1N-_qD(>U`JI0vzO{OEI>>v{AS?ATqb_}Z`nE(oXcnlRJcPZ z&{F#3KuVxEH!EF?BWt8P8Py-6eL37*LQ;Fbiv=%!3}MIG#F=@rkzdfm;!^^%alt$t`EQRXPjws&eQV`=b^CC`{P7O^n+Fb>GE>RKa;>eE1!PJOTV0- zqI>03?OnBcyfKlCr(-XE$yBoirl@Y@RdfW8s5S`{3t>BfKoduaZ1sv2cu|LNnJ%<6 zrX8NQ#*~)-9BE)BnkpoTw2ugfCwd_x^$40#%F$@UvGo%!Jg!6ISP&V!KTFdFq99s9_|tUnm(k}t&4Zqny0=5a|Fs@U zxE?7EGAo_}&yT*fdVc(9?Qt{H#!~>lz47xs_Fq3FD0VddgNE~qJQa!C67BnypT>o_S(2`KEMjG@Rjk>&YD=><-k#`mdy}hH=QnKF zxMt0|bvLeCw|@OP{R#iwsN-fruOqAGYpTnn^1-L9vo`WiiEV=R1%&A#IS~$t`h@vq zVA)pd1Arl?Wg2ZAHrkQQ!Rhat{3=M(tA8y2@UqX`>ZuQnr%EbF;XzvG`MWy1vL6_< zcV*uZ3x~E3YyCmq3gvhGxO7LbafZ?h4Iru9`K9{lf{~TwV6l6DvHuscJUDLUhi2$*+*v zWClVe$n=#~HK)*5#8;Ew8x^s%)Gs>u)u`YF&pL|EZnAxO>kwuR^bU=?iyNP%@kDW+ zrYmwR&e2}s;c1RU8s|+l^5@(_7!LL8U|Cp|VW`RX65rK9tX9geF2dG2hUXs)`&7hnc+0fZG&qnv73;;Iis=RR2Bk#MIN_4AKHYOj57+dBja4|a@xZhcUPdSBi>1&zFY(!>)j(~(lQInZ+BdA$^ZCQ!6 zT|Hf67A(=~a&2O-#Xmyq)@tE<}^M|+_n z5!{AUr$38fJz29qxCv|4F0HfG zZdqt%iLr*8XhmbKv}+~z6&M_`u<2~nW}A_sR@!WE2L8R(w%c3%JzRM~e}%F$!B#_a zP8#j>MK6BIrfgH6-N9k@h}dgiXQ(6Kt zfcw0MqJ2YK8+h#mGq;jo;MRG*Q-Rz&XyC5iL9;EJ!%Y)LQN2XGE9%b2;y6M(`qHfh zGh-Y(AL)SR#qP7yo#;Ljx=waD<1hh-(Pq6g-XGt^>B14qO^S7buUkUufEIJpQ0gl&J`# zC3=t&PsM0kHn)_0v0TxEeua`wi&~>oSFfum4Txu3aSzK^4Idb#J2Yv0Ei+g)Y%SET z?x!9dv|fwJVS1az%WzIs&v}*+o-Qq>i1?J9>*ae=~bC+t)bc0T4J^> zs&7#aU={<(Z@}ovdteYi*kL^W#;1Dd=9q1}CtoalQca1-~E88EQ2bhC$@{E^U-Bgkl zf8+k1b$?+k|IK)PDzIjRsc}~Ia56+wkwkiGQ1Xjyi$mRd`rRu{FZFh!x*VFFtbS{jDpIl!yUSPD$1W3W+9udOoVH6$ zzFb;83-;=;@Ii@?%5D(9d{VYVv@Vfp>UF=Jd&>-G2v$DR*;I~)5l-7q9rkvkVhBrw z97cmjF*6*+0ltb&N|+P}Jerv>nvhC1lps7jW{$Y>OqE6n^cj(KP{TP$w^Ax%xF||D z3u)~cE@m~{fM-`x0ncZK5S})KvlA9`s+vHEDnJ;@(-=ZWyGrv0b~D7VH|t;|gOA9; z5w1FKBGN_R@n0la@Ypp76+7t~cWotIXVG9OmCU>ynE8;Ea|7-^{`jN)Hg%jA=`WEK z6#Yc?B86j<4dYN!pi7}Hx7R4EC5W_;lz4pni9y-5Sf^C(m1kI?rBXeiQok}3(?>Wv zHJo_4@^d&mnUkQ_1W78gn;*ZkF+qolLTp8mRfO3Vv1X9_=g03%)^eZoZ>%9%UQ3_?;eMTjujD$yEzc`>09iuOY*;Z*8xXve}9X=*jqmr>mP zY2wjJ>c|6nhm?#aKQzSReaMt4jS=<`TPL&tOW4lqm9&U9C$G#sjSq?uk|#nY zA{R*2;m6QXL#ybh0cyq~0{dX{#Gvb8bs3i42M+YI6LjU{kZ>1I^gEBnF^889ATwrO zvHSZ&g|UxRJ$_`QN6k!d0+pVjmK;PPcsxUma=+-D3)*(N$GS;VsY?riVv3U}KOrN; z9EjYgJ9pcA6Kvk%RCPZ!IW6~BAitcqty)cj$N1{iX}dGAI!jisw&9MH{0$ zdwBYi`%qEVvX3{^`w$D=K1>M_#$?UEQTPeQyo;-_rH#IxbuUkOsBfzGQo5tWYv}Hw z;ZW_7VnemrgU~1Mu-L;NSl0&Q!mX-HM&vp*gk8eC?i>lrpD$f|mPwd(KIX^SDsl{! zyLrjVvcXK$U)01KT5>m&7ltzO8NDj*ByX5JG>!D6!H+PUl1GKO*?FxdC8IFiUg5#A zxxkJkLxXQA^s&A1SqTW1he$hi4o+e*1m(vuNo=v;eJ)sCiDm;^XL*;*@~~w}rpKGc z$jQ~bP@IMA*T6O}8K6ImBO86Qa4r*3=`V$9nV}CvY8H`l^oe$l;|(jZDK@$UemMy> zH&1Gg0^!f8LsmV1b#%xwb7RjU6~q8ngO(3tEZv#yG&?EF_SMJBxh1e(T(iPjdPFZ! z?hU4G4PsR`G{o55KE+Px3B2^@1h^Jj4f6moGR^og%U5b?Ydf9Tz35Q%?jO(+OGa5E zfqrnF9NP(6Hc#cFP9G1ghB`=k0>|!$X%?FCz-uS{&8UfNsBb5gp1{+Cb zWK7Z_>ZBXA2GF5-jBKThDAcc6OBiGK18|G_7R0l=uOK0?b^gnb5J5NtPwF zEs`nphTh1|6wuO;Dgw@zeW( za=QKCeGg7q2jh!ryIC%HA`j8INz<@F7>lcAYE zMFX6QnUgM@Z6TlH4Nkt;v|;0>n>LO9cM}G5^gxB7mEOc->_)jD56-+jIC8zFIj*Ld z3v_9`%DI%MX?|SLm0k-#F8abCR%YXS2|?~H!1QJ-#keenBLLJ{DmJMI-XtZ7VyFYp zP2FuFSE422p)9x($6t$&$5&>_n?1`nRY&FTS1-t-L1E8ZyxX@SjZ?o&G{nV!SC0K;g#Bddyl^v z3{|hLU=6;zX#%(AkIJ*DJT=kZc?Hc76DWIPSNjLv>KsD+GamgxMt=Yv`1Q>9l%{HC zOg-y}Ku?w2W+Oe9j-A*u#Vp6|NyUp(iU^KNgLk{1C*4mN_KA^xO0Q4s=g1h)Y5sBX zl>MAiKd0T#saikhNBSvULb0FIWk2@+;5WPUzS0W^ZOm6cdUpD7|4Ma<`wP{No;^L> z-~VLo`RVFM&+Z-W&sLXsex~}-vrmWnN2*KQpR0cK9GmBUgSb9)vK-wioGaH~vT_rL z-D#4p+{E!}nq(_C!JXq5jji0oE)sfauG~a!5P{oTxrzM}G#OvHiIsDjv{!B-omVeX zO2i7NL#wmb^hAYlejlrT=oH)~3(IQiwfLfSioi`*WKFH0;9V5G24-0LxK5&8_zH(( z@pXISY3myoRlkeMg&d)XA>6b^6*gRZH9M|Pv{fPlRmyCR8}vl&Bt5938N-81TxIQ$ z;tda-=5f_tsd>cC=;5<|_zZu`#eSZ`V?#EA)>IKDyy+}Cv5lCXL4i5-pd)5ce%wP? z=2u2~s0E6}-7YobGDnxOX1QOMiUx-hkgvtQmipraRB4u@o~X9-u47}45`b5^@d5sp zp${d|4Z*j)~qJ9$n9?_9K5#ksopx!CT{v*@WfJ1JjD zc7@b0tPJS}a%l`u`NB^g;b;0907@S~!6UsW+r7E__N1ovO5x%bn?<^$Tcg~H4~wh# z)b^rBP4Z$v&bYBh9tw_eoDt(`D;sOIn%1htn#%%lDnOf{8jtaR8-A&-tld(H6Y%yv z$n|_RNV=0*F4v^IYQtrTYVp|pKr}r~kWJD zvwYbZy{@+#4lL%Y9HrG@GfOuPk5u zPc}M@u=?u)>@_>pD}>`L@RWE@O3fwa&B=Y{1JA}5K6=^|#jM@a_&R=3Gu~cQl}Ivc zR$WQBuWYaARq! z^b%+y82w4r@(S-CICF}u5xsiJ*azCA7oFgE1S7EjGflC3`9HYX20*Ssj`z_KF**6a zN|{z3Kyby~CRp!j{42eO$uUz@xLrOV?_Z_##GUCYxxm)&|Bt+PfwJo=?>x`rKC0?g zNk_6I6kE1$T?~{!LI%qZgabMrKVUF{>1Orv%&>asSxXg zqd+C7lM0BaLS$0uD5M|)7a(u}0xF3@SAxJrP?CZOQX~QjqQLzA-`?kS>*`@dC#??p zhN-kYc$o{DuFWE*jJ8c}&S@RyDym_Ooe%hxLKm7dy8Vfc{g~r~!bzZ=R4As+Oz%zo zqA55|Ow$jS_Z_o=0=a&#q>l#JGSWnHn+j3W0L+G(;>6czce}YGOv>&tgl)o^R*XAN z*#5||uH3Lrxy{8od7t9CWJn+Yy5Ffucl==+b7(iZz1_->lx+j$b1l@|8n+GFrrX5v zZ*%p-VRWFu4HNvxh8}012zS&0D%*5PxwipJpIKIs#qInyJXAS~ykT0Z^>bZq;vi_` z)@Jff-yDaO8T#zHc_1WB0mH4#8%Ax&(sr=qgOXWi=F~(o!BGHqvK?^%+gf4ZF|I^s0o zxHC{~unk!!V?nU>j|IKvPo-p&vo(Pr&X5ZYpv*B>3#dKC|GpL>Jd~$0j?hh!| z-9wD2neO8=PENq1syUY37HzVU-*sGFO0?yMrrmLEY?XY@2E^=5TcD6`5lBIOSca~2 z(RC+GKHoxvX=}Na(T-fR*>W!fCm_aH*nU$-2Z`jJ?;l{>ZP@OHMkZ zqJBNY1H}wn{%7tv_gg8*z-C2eBc!?rGF*QSYnoH4@Era0m9Bra(z+_wE+oQ^Bu|95 zh5v?dHxl7tYNlL^!tdk!ZKb2peRuE!neLXbhkbfFMTKxmV_}aF&DG2dwnwI%hFvLQ}5VncQpl7K0p(&Ihv3fL1#(lPt$^?N!iw5VS**{>yjdGCPSt!8@22FB6W zd|xD-b^>Pu_lz}M(-U%v2eTluTxR6s5Tp;3OW911`f4x9A<#}9Cux+p9PP7 z{i9Z2kzhA2V!xTEw%DKhVBSyTa+b_1^L`q6(Ag8|-#Jd}n28m@1vzDKW(#o6R=~*; z2t7E<1`Yw_-V$~N?Q%N@f6Y>#gH@!3NV_&=vBHGSGVM_hFr8BuO2jQLQ?}nbV!7NE7Vydxr@DB{5sbU7jtX?uS^4~Z`}JCK9zaS-x>RRjPu4n zNjqEP*@2F^!dRKyT{H_$H!YmnyVRDQj4p^Zp%Wt}e?jFP;faB=D;+$Mnil8x(VR%m`>)(!=xHzi*-oXv0S zY;@OwJj_iS8|{j;(b3%x4{zXyrWM#j9CNo)#lZya4g1=g>{?Z0zVfiwm4>~fVK0@3 zz3x22R;^1JwjZ#rR7B2u4%j*44A>Pq?||KWsiyhT5IHHdR~`mhEvFtAncz>@FrIYO zBZsu7x5{(PLrFezMCeViGlC5c^2%rd`r>YH;!Fp)4f^2iGcr~u=vS}iHxkn=GXeUF z0*dz5lSQNN3Ew29bYpssL*~!fF=f_&@)fSZvB`3dF%zvhQU5mJxDiGWVuiD-hLyP6 zu9ZnGE@z2H2qHw2nOHohI!;vX!LKc0Y(*Gb;UrDH_c+tNI@E1xKA8<&BTOG^X~^fm zug$vu(gyTUtE@(W$;5ulf9}^OI%8jpIM%MN-q}-!2R}jypQ%p!rhuK_I5T;jSh;#|l3rNr6 zw`jM(!Cj~Ioq4KAmFhqUygh4dF>mY$@tMNvhy1C^(QU6tik<(jJj}ipE6! z!8A9=3mf@fW379hFUGd0Wy%>NC!aHmdMfKo=-9+GPK}9y#hpyU_{M3ZIV+#`#ku2X zQ*Tz5b8b4$i5JZ-W~{s;G~r_JSfMgaaic`*34XcE=B7f~W%{jFQhMh6i`V5pniF`( z5XYwW6!=tVP04N^v~Q^Fsna;kV|0h^QY-10 z)mmj1_YalLq997geM!Ua*bvywrorPwQW(szU>=k;{LIUve~!~ z)1vA&so!a^^;I;zINkVVgENlz&^9}hw?;H$R#_IvvEl|@ zl_mF2APf<`QmM{{!-Tg-(ug3QnHz$x>$IEGyMG#_VsPZ0+UUXstq@1;2EeM zYyec0(}2Y%=!nz~T#<2pqd=AsAZb?zV*&(?bKPeErfnfbr)gS*412D4G2QU3z0p40 z#aVH2ahApGH;aA>jXJd>-?@ZOA%Z8EtPa6Jk5N334w?@-^(ChblhJO`h=tWnxMy~w z9>;`Gvp=2TCYyf4sTnzkQ|X5?PF1#ysx!S%MgIm32%_f}J$q!J*9BXf)=#<(tpQA)f&t(@9j%7UUzw#vQB_6(W5yJe9Uh=_o){31+K+IVw zQ~S~cqYw+a2Fy+kKt1J686XxqH1@|K7LZC}0ZkAKzSNSx%x(??x2EQF}2Nh1% zgarMqAaP&YJfR@7^XPAvh3&YlrErdg-IeP)kbcYp(F5ttO8>(a6L#m6bj9BPT3sMm zlNNiY8}pQopa&G_IHONE>?oh!+E43|ZW+VM+uv!{NZjQ2Tq$G8($917;FcBGK2U~v z*3f7j7_l@Kl=guZK=Iw|!^up?ENs2cO1u}=WP5166zGvhc+{=y3TcQG+KE4<0I*yV zt`Q})pu!gB!P_f`dGzy=!aUktqZqY(F^#Pre^-e?A zOnTEJr;c;0Hpx(l{QFHgYo_N6pi|*)I^pSLo}0}Wy+y^ucy@5Ul&GfJiIP3U*!6r} z$1$`E>!7cr^sS;{Bl#QP@phBRqtA|^7*j1fXVhy{TLe%VBR*q2k8Jqp$4u#oTcw&V zo~ZdLa(sw>C_yR{8NQ**^5vj`EL`iU?p8_&oi#a3xxYV%j3HLmn~&0CedOHwvQuu7 zB{5jf-amy9_FW;!NBJwhMTX?_?+C}P=&Dtgp?z0)8LF(P?N@W-*4+N?<_=s@cD*tn z#(7H4yrLqQii4^@Y4s=UuL)5O(V;7f{BDzKIN6G+N?-Cdo6{P{9J_+_I~`Mj2=dO> z)(CZ}nDA$2&6(1ZABWA6XH&RjT8_<}vJSc) z7Qiq=2xBG*PqC;Rfw8#~zB~h-fLZ1|93)Em$ zl)qX$;wyi=F#fFx<0*tOPgir@WNJ_K=c$?5lK#7fpmYz2fz4U;dRl_m5;uJ^g;>G_ zyWcbp4O=Lw&^KYwY&L_iSs6t-T3Fck;FD;3#$Z3T?tHKxC0v#srFI7Uz$L)G zzrc1E>@&7FjWVn#rCD`y+(3k7_>7Z}7+l9$=N3_&*45g{_TZE2MCbEhk43(9xn5aN zi>6G{lNv#;SLPK1tmu_Fiwa(J#&>aF_9A*^Zk_bXH~uB`$~VAXW!m-=t5-zlbWs#T z8L%-W(Aus@e{g-8xFm^LEGdrny4;jYewWr^rWMuZ`y$_`EiS$_nyE1r=X*L&Ejf8} zR(qPiT9-?{X}oq46I_yClm~{+3TD197WdF9J1wXBh;@pGguR|pM^o$V3KM47FAPul zm-D<_(94DOds3PkU6T6$vk{FVBjy@JN?)%gn7fO=B8(69wXwO9$W zxpb0){75H#eTHEM^~G6Ui;U3+{Q^ zUH7pO99BrUUyn@7(Z=46B{8}wikv?(8ntH6_E^nyeX1Tg=@z~s&yZ*uzKYTo5iiR+ z>`%V!EHDDHK?X^M$m!tLTW$XntR!bK&m$X&q{v$|u?k@bn^-Am*X~!evBMqej}dZg z^MLE(iLda3D|6EkwV}(cHt$JRD6V@IWebZIT!@U;b<-mLgd$~A)KKJcmQS86GK(ZM ze8~M`=A%g2qFOz~_o3v&>rx_*5&+#xJ6ngydAyA4$t+$rVDYjgRU&1JzsmBmco{IU zu|exgL?D;zME}T+wVfOdg%*#fS7LtFJ5U%*#2)CsMbCo!NL^TEvAaIZ5N+$lfbI(1 zLaeT>*kLJbMsH+e85T{4DQt!UQ~Xt-vjQ)mf|N;$Ds*N)=Ie?VZV^GwA39^XH4oQZ zFm$FR5*mWtK&HOIF&5gi(3yqP6xoXvrg7I;K(-2`S=u32%)uu8KEj3sHMqsoo`E`D zfxJHX-jGOtdzthqw>{XM@!YK%=u1GgW7fNcN3xt7mh0R|xlM~6KC(_E-X6S4wYc7r zyv>I0^9H|y^kFgnRLFi~pXm;FW(o8ZKjtvb4rQP zSwCrLd%TE?Fbh)_X5`wDTGbK+#2$3S{6O9{4$yF|4@G7}|y4L5gw;#e2#h zB)C}N|9SCVBRvO~Yi8ws%{ys0$1mf5!Nz$7o&Mx;F9(tWn57~G9td%-7Xakwf&xGm z-C2$dspzxPaHr5{5L zI0hw^;J-r;_I8&PlqgTwR|^}Eoq~?R`jg++fvcKtb7xG2%T}sf#>um6MFM=O(T29y zCx5ig-zP-xtV|Z7&;ED2xoIb zP$2G@ij?6z6Cq9;Y^u3@?ZdCM35n%iYkL47JF#n|GXVWjkmHiB&43n@SD^4{X4Gm7ZoiF8N1aj zQ_;wn$WXE|g^ZOY%-GxHi_+XHlYC6F1&Yg(rPZxQd(bS)`30+Gt6M6i;6184jk;I2 z2yJILESsR75@_ncOQ8YiK}9X(pn8ck-o-erE!hMwk*4HJD_bS??^d=rjl%Ae&z4@c zz>}<$Zsr5o$3}|z4~hoRVuIZ*Ig3w_i(F;#L1)=6#NNKhd0D%!z2QZuRc>z*qOct@ zIo)wp$l>|ijL`Km8J`$pj$m}E=;f(P;-1pls9E3(Y3TEE&lLFhxM}8I+~d~q&d)t= zZB(Vsv-Bf9kb7g7$vqC#e26@E0*8Pr{-pKGziEX&@+4I@&^!o1VK!N*TbPryK~AV*waN47KyM8k=u(R@2>50nj@43DGrfTwj+Ty4sL8ktYXiX04J zNPI*QcqSq}us2u1-s}aiCtuR}UuO9Xf0`XVHT=zjzfYW3ydB}rk2)vb8R`amqt3)z zh`3DgcA#Edyt&JU3?u#+@3NgUpBoH16IN!;8=4?!bb)6`+ZIg^#1#I2pEk?N$ev|@fO}S2ac?xkPugoV zmCdPO4h>-y^SX}LJ0QO8elS0Wz*`95)w5}Ay{&+{v79h{j2n&gi@7PG2z?{fgDo5S zmZg8_Cfkht^c?nr0|~SFo$G8x*kkA8$yMM=q{=ilhcZ(!PWudc?ZXC@%N4ar)2g01 z3UvhjiuLIgrx^Bm!$#3Gc!zV%q$TV zGp}h6h?xg`Xl4-gR_14nSxA|6?L_=MkZBb&U*Ho}z@ZD+T=BF0fS)1A7V4nBT}IPj zTZ>!#MvvzFJScuPJ|7f68)pyZ{5)9U=Rxr^s?@^99EK*ghM)b>VU2XOVaFpVra9gf zI`rxCN)#Q-qm4DDx8${3xO&Vg)P2a9Vt|#!>euMdPq%kwZmo;qxHOM%`O0%^#a9-n zoUau4dlyC|NPlvzO`x;<4ef|k`bH7;T_Eu}J+)R*8=||k(w7`pX2_y%P>;_aR32py zc`3PrXnBxj1Sw*Sw-)FkO(I&};UK~sYnf$bZNALnGbWZxPh7^tQt`x=U)Ea21XF9D zSj03wy;)HMqTg%OXttXETIL%f)$8Q$DoAa~EfX%+wMUA`!AfGR zG14T3E#GirNb6+T;=pOe?w5E+1Ydtyn4&Jsa@H0S&J1bnq?6=yP?4pf|eE!@X;si+ke%myA2{C-97S18Qnqo(i6s<;oN z$iM^f6&AyXqsBH=I1l-_lynv`{beN@C*QLbXQ7-lQtvPQxKoTZn5UjeSxGS-0MM>g z>i;gu#mVA?i$g|qV~n)hpLWr69LXbuzx6Tw1DG(VRy0qgq41pKxwdxXiw2sQ7W>DX-N zouPK-H+A}@mGgzOo`|u8Gxz~Aw?Q4vqb!&k$aC(`_yKwJWX*EU%gYx16=$qJXOMm| zjWJTcye6hG&?EQQ`{nLw5@PzYmi$x8pQ55E=xR{I%!-ycg-s=5g=0CVSjz0|#W`j1 z!kn^jxt#KYe=$z^!T$i9^23X93OqrEx96)-E-cP=>Q>mw0_?_CI0#6JSZNvF=}V6^ zvKT4mek*qo6Qa--niaj33VC%Jz9D(HJntnQAv5C}li#%y@8M?I894y68!HO2sc6bk zZDY%|;><>`{m^}1(GGV5!cRfxRCJE*n*JT|$$>k{lUnkoLBGl@0k4e8=IAKAi`hAp zdCATp{kG>xif$lzh&kRcd!8=G+S#7riz+&xFhz$Rp#Oe)7B(x^&M7*KQ(4jJl(w>> zi-m{W==DTMrs&Z7g`)F@FU}Ml){*G8->if;iE`O!ynvdr?M zPSRy@uKnV&R+RE$Cafcv*-uA^jnk~K zPQVVP`UgB|H52M>!>Xi30fX9lvdmjlD1C7Yy(;wW%-UI*r>krdR@?;!npk|Gl} z087Rrou5>Ki;z_7n%cNYWMMT)i7u>u2;=vpv)@EjqO(~ZqoAi76a?h+QV`0Lybd8$ z0@sFkp}xT*o&!ih4PIz(nT?BLL?a}!I&|NbWMs3KH|g}Z|Iq6Qv>Mwi-~hsegBB6M z2+DWy^A{K*$R_S$9(va+*MatFKdDt8<~mqadRU4$hdcUt@}73@aVI3U1DELPfsS29 z1xMBCK6f4}*@<;@u@uMeDJpvlzG6&TlMW)(w_EtOlh2Vo$(zMX+MUDkcDHc7i{%e# zYQc#bt{WYz$f_)B#mK2XOxaa*Apj*pbZR7LyC8t2Kf{&;9Ccj2Fcen{4^{w$sI<cF%yRP-o<$e*+lQl4qX_Y$Bt#_?qNpT^Ylun~*0O6U#B&lJf*9@E&mFcs{b-81 za;6ZLr|Ef^J}o^v{cw`5OBeY_zP_ckMnR8F>BQNwBzgDMtx{ylkB(q zbR2FBe$~u%CZuhSpfWO|M}y>D&07QHSb@)&8IL=H7WTEXScFMK0Rh0eJKOR9>ZOEi zD|Pbg0}SHFqlo@K4r3v0o;wR1*32={u2%8C*?=IKBRy;oz}QR9ZZqymk|bm=KN$+8 z+k?B)#2tVr3cIr&?X*jlsYt>k|5i+CNY<@1b-R_OZU>+m>%AS;+Y!E|qeHz=IYK9V z0SLO_r7os=kYDih8`a9z_@0~N*YliAL;mWrmwL*@FFJF8X2x9F6YNxOXTN+90Ye%S zUzy-46RNJ0H#-nBom57Yj@_{C4yZK!jr&>?Vfx^GWQXuGJ!HM^=WjE43z*?Zh1EUM z!66WLMD-cPv~P!lKaZ-78EZqgE!ESb_Lr-fl-f&c$9lFlLu<3DdYs~hWxmhmRmTS5 zA-KGro(MDXL=Lt_$3q0uF zX7Bplua(bOJuV?q$=`RsxHj$JVJp>erAFuNSKXER-K8rzez)Z7*{H=0|YdQ(HbQX#kQ`L~Vx5yN7#Cf=#DEXnz7(*NL zv#m3vbvMNc22FvK6NO)`u5OC%;TJieN;XN(xJ{<=xNBxWIVzY|Oj%m+e>_tq`_V-m~K+Rt*x_Zyf_>Uyp0NG@X!p5LK|96XhQc0Jwf z6vnStU+gCV+dDxW*SAFJ=9dpObwNW`c#h?*^hCU;BQ(!3#jYoZ2KcHaqtLYSdj?=b zus-VQDM(7QdsJj!Kb-1VaN=6;z%aw#tG#|zp z$&NzBUtys+4FE?GQA!@h>9UKpILN`?Zsx6e)5Ic~a&N^FrEXm0j`1s&%9RqJ0QAGT zW~0aywR28t+k*}^oy|$vC`PD&`-cClKb$7=+g5Q~xgy6xQt`QI__M>I)EG#9AbrzA zy31#1&@P12QSfywL{*R{@J$f9jv&tZo)sc$sa62%$;UYOnA4yV1>)%KHR+4)OE>FB z1^X zXH3U@D08l6#}*qwj#xYUEA42fRPKh7Y{Psby9BSl#i3ArkK88uJbYX5Z!}G6zLJU2 zX!yVAh|T70v+iMKEGGFSzPIJa(r@O2d55u=6{gMkvrK*zF1iD0nhox}pO4Ydj`gf? zg}KnhwPNBCF33Tb|0ic`rt$CWD0H-dRI{wRK9LW0^68lMThjr#Z?QZ8%DB5Gn5(_= zrs(xNWnTh+b+uk3ox+O;Y%Z9t=n>_0my$dx3nSZ|cl7?d-jZnU)zy8pyZGn&=SHs@ zv$YWYO-lj|uymW1ZuOI0;P7~VSiIX1R$~tu18uUE2wDRJK4xflQdo5*jL?vtfkD3Y z@eM>_xP$=}2H9@wgIxkppsVnWz>f-lGDKSh4q{!(7XwgiWv z(^hh-xWh|116c8q=q2dxNXLpoM?`&?!A8!(;(D$Yg2{_0`P(h6=R3j?ALzPE%(@?@ zF`}Z@6&5;3!#nS5y&`uk9Il-W)+7#_ZLtsudGZ+KD^7$LlC_AB-7#!v8m9Rj$hgr4 z3_$1*z>c|u>=m#Gd#$zP8gk50$~_Y8KzFi9vD}H@7PNMCL*Wqdi&!3cr#7Qxr)8|M z5}&YdzQdPQtETSwD8c*|gL%N8VlS8paopG8strwv_&|q&x)Sgo8g$daOT6f_V8#o7 zbF7NB=TO#&Gcg)N>zW(->X<`fFhHtpKX}L{CvLrLz0P3mBYDgUFct2;EfacxiIM0v zGRfFVD~FMLEe&aJ4mY@1b6L69sT?*m>+oJmNH9xxURryvS2=8J*4_h@khD5sj>HF6 z?u{yk1MaRm)zMoT2+6=)`JUfJYRZ0@iIPd0NWx__d{5psn3?Im22*+@pKTLf$z=Dd zl`kx%guAXZ>;6on{>=`QbR%}aUCfaoGb{c|l}1%NRgk^?fbqxOWdmDQ=kUwVl(o5y z&>T>^ffU`M%Q)~k`RlIa$hx~n-7zbV({t=f!Z&d0Q`J&1IGR%E z&QAl!wtSVY|E%10S#8H;#0}`6%@T)+5Ltq281EMAqqxlJ@YT?&pX&3TleDPN08qgy zphOZ$dOjfL%Z#Y)FYj3d*7P)U)i~7-CF4G{&}nz~WY(-3Ynxc9qj7c-W%fRF-4=#U zyS0nkefFT>141hmIF=0#V!~qTS?Dzz$f+_6PD*GAehVX?8&=6Squl=yGbFV_rcA zt};9&p4Xx8=o`gd(2`5yhcm_x?s`~yv+OoD+IA)q4 z3hiaPd;U8OM5yku+qV2XcQz^A zF18!H(a*eT9VRa=GFJeo~vdx7>SfKr2$#LYvBMLW_igew&%Rrm`n(222!@d*7_Sj%4Ny zW5Eva()v`A&VOZQqe1a*%e{L#_=$u^(qG_IT)Nj966OWyphfK$R5BpgX+Ap)pEy>i?t(&W^oL(ev!r{nW zKy_v0uuer5^EuxUx`1@zCkz7Cb0IB!L3)!G^-2SkG&8 zne5}iUYlyiFTGSZ+_@|q{3~Sv{O5JUkA@DHNT&JqLZN0mzupHTsCEt9rzX1e8gH)7 z-jwt=8iF@l(wYV9yXy$8gvUo92LTK%Cg!HP61|X~$>z#5_uSmMl8zBWwBA}H^kxfh z%lYKb6H;+Ro0G_Y{GrH6uNy#rDIOTRc%WJ7dnm1VAe_=^@v!PA^L#5D z1ovo#Oy&!@#Fs~h$FHN7x#MMS`bKrqHIR=l4>$b+P1OC;Ev5ep5wXx=n4Zj-u zst$@(=E_!M8;60^E%!A_%YDY7utP|k)Zv0fue%9}5oOf63Hs}%=rusZ)ie$;rV=+r z4qQOXCmFz~aor;@nTeQ1wbjf z&bOj03d?NP2bA5yDxxNCuDMlJVrzpPnHZ?k2M9H{+FcWdZ$T&(2^DYZn_KO+2wR+d zSO{*2+Fk42cqQ2iu0nX!L$fu7VfwnR()kjUtntEgXkZPR z$gErQ&H!^Dx;1YLq6sjKWdv^F<9c+z!tt;%3#@+C+g8$|LbuvFrLB5%D7G}DtwmkO z((?QM?0tX6$;nCth4D7?!L)^WCo8X{7VrUegikGew?6&6sNtk<5+BB`>^bRU5muHL zDl-Pc_m}byAc!C|%c1V|VgLHRdc2&XfnxtGyF=&v;voW#K@?{y@{~X%1%95N8P{@VBBJeR{b;yvGqQzvRfVBhvPUz z(XG*g!)6WHrmB`1q%GjJZg&0Bzd-{U=($FpvWuMAZ;t7hTM*QI8%B2e!JT=c4R9`_ z33Gn0KTr{6k=&rEI6aL-uj|lR+kJm+2jZYzfZXToVnn>9^VEhZGH#GEEpGq|@+_r5 z;b~wFC_%v63mGEAc=Sw$nz!OgP4zDa`;Yg;esby9r|46~{^OUAeMbEgfqmlnnN;nD z@5uk)FZ#Txa4Fn>V|07F>CZBMi6<01cOEf;JxgLj&?pxZdB}v9Is?gUPlB=Q1+}ky zF_-^v0db+|#Rvu*CN3mV0fF`a&1NLC91#QtZDIi`n@G!`_7<2sIP%zTcSm9rG`tKw z@zI_GXFnnWp@CmiPf+w?1Mk)oAI*mSqw5Jq{S(2d3YA_4_CMAW``iFVW1X5YDxNl8FE<~X+UX2j3WZjWQIECtHYV{Qpgrkoumwa#l{jS)?Wzg;8 zJ?SRhbcs?!(Wgqck6%9B81+vC-H5+s`W-`Oex$!5PVSW3=Es_nTpIaid~orCceA!c zG70#fO}EgDi)#smnMbwWmSi(G2DM z_na)Sfgw-|l{}}57y_oMB#YvD-5`$kv_;+Ko)b|H-KbDX2JZHpitJHMH_M(=oO@1| z^9{@aCGecUm%e45lcEhu%CgvgMy56cxNZ-B+WJE}B+K@Ad(0C!m6M3cGw)!b8`3_O z8}bn^k)Yr_RJ*VtACVpZL{H+;BZ@-ijQIyX^aboVMK30ptL^wFGV*oXapp`HaeVED zSjeZ4v~zsHb2s^znq`~SE=0cWU255h2$q*&M6*l}m`-z>jC?ka6ZtY7nS64f({QO# z{n{{{k#r}7VGdtc4oy?(Gp0d>)?s8&M?h+}9`03W;Cv*6H%o%YP;+0bnLKe`+FjHM zxdf_pJ0VYGH2cw=kb-9avXd3oZGIGd&-KK&H1I|7O`(fPNjJXdE+5~F^C!Z5;cLk4 zNb7eJh-NNInC4Tt$RHN>g(O4+Mol(I>652fs-+RdW8_oH>G%kX|a)01(S zmtCs&L(!+oKA*XK_F>dN5$vPXrOUwniJsWUYF;Y#Df(2gf8z47&!~SQu+N%nX3%`* zyiF98snBbppj8>~dEV;6d+6F*Ro3MLuBk`$dKg?LO+WiDK-16utD@=UNYv+gVjsWY zQYDI_PgSBmclp?dlYb&43K6*+?0>!|_H`7>CC;TO`c$$1`OC*XqyCA&zLvFs1SBvu zBd^-R8(LB{FJvAsMBd?~cB`}-tr55U$=XpV6LQe@r;vzA5M3)C=@oan4Pn=$A6a03 z7#qTDf7s@9_P;)2Km9hcFdci(+t>|}{@uIZmLz}Sw~^hPe(Ov8%xXO+ee`1_M)1?u zx-xz8)SHuZZTdMrRBNrG@1G3PH+|Wav){*qbk6^_&Yze5;*;*R{d{)1^yjXV9F}kY zCa_$ap5#NVzLqUE?8aj8+m^|g?`Q#onU7Xc_{f%R@a0&Laf{pUY4{EFZXMY|M$E2% zX;AwhwcO7ZspL1cNM%deb%&Q1r{C8|7-2S&4}Hn~{--FayQL$);Oqp2zBR?==0w3V z!W>?6ZSAAjY~Pv6G9kN;r`#jdOvbeZHHet&Z;ja_mo4Tn2M3U1R;xKVBOx&CmpwOh z0-_^?t_N-RA+xP*6tTGqg>JUa=u`dUM_xyU#rUAG(soHl+KlOw(YL@EE)*?NxwRYC zaNC`Lxtt=+iks;02Eu4ijEmYWd_E92RvQ3S0Y5Bh)kpAG%L{I3iBCaBL!Kjg*V%k5u% zxFp8y^h;$8b8Y&A^3NUVnJ+s1^W&l9qyKD_eDq%-C6n~yM;z&VZhe1pZcA%No}A<@ zibdGmW;yu}mmXySf$)88eQB+~_!H{pPSM&7=%qcZH(P19Vk!DKnKvZOB z;}gda@G(f3RwytMFHdW0VuKA}*X zH-so6UZVa;zj#2046?6=TsW6-r&r3JRUX%c+VNMmkZSQ^4n45l2CAjpS++@I>6JVr z)k$Kd;t`ygpkRPDYUKHg-uSr;A8uJEgyD}*@OvY`oTHyernH7gB#NYB!{p6qc2cS` z68^tfR_#={$1=7x1TZxkc4A>jIp3g>&-_lVEz4d(9R^prdsgJ?9wdJBdc4qse{g z`Zsjku1D?6xgY4I#W`5wRrK7t$?=eA6lmcuON`DM1FrZD={b^t!uuS)wIi_g9|Wd@ zj2*BP${~yE{5l_GTJ&^~xdi7sjd4JCI+{4h;7yGxZoMZA`@N6RhkGSLb~yM$nNNIh z>WSk+cG7_kGQZiD)**|f-H)x`L!`8WYxdI4;I4q9Mqx^_0NVq5?`zlhl52s#LAY9J z@;XbBMqh?4C%$Yh3@ym7>vL{MgVr0k%M|i#&sUOUf^|*y!Te5$gaU|Uj%OY2Mo?3- znX1r$z%L(iSC9dqJRwHmFkqs)M+An0U*VFX$TQn3Z1TWyVLK*~2GxApZ zy4!cSu*28&-!~LpY2zQ3OQuAfc8vzvQO*CaqD}UGpZJG&lJGT~20t?t zT$#?>FS7Z`sOLgso}6?~mU*)1o-FVrUHXcur7JvM4)44Qe_s0^TK^|#Ylqu|5M4LR zyV1}Ti_7nO8u|#NM4SboGJ!UBHXgVi(feWWa?#L=jc#_uz@v8l1X3#Ucq80q9q?kw zr0L_?VM*1_M3Ls$JRo&SXRhD1WCp5VwBd{IyB|dlFbZq+7;2O{2LT&YcYfee7FiaJo+HO^~cZJzx!71Ztw}u5f zvQLPQb_cm)HY`-gdrbNdR*K$+!R7I@rdBZ zyzoY?mXd#|H7BvJD@{(FCXkjo7df;2vHq}6sxI677%FiFJr^q566RORV6+>-z+NWb zAw6YBNJ_D32s?Z%YB3l|ls%*#Q-*(_gsn4?I!JffEg%-e4wEs~f$hkiq7jUpiELfd zvxbmzw=oJ(Y)l^EoQs;W{uxj@;D1tMRbMcX>kA~=+GUvDoP3`}QZ70}O08L5Sf2Kv zUNrXF*^Nr|rC%V}JgocRj6R$~FT%%K=M2*!-_CU!Q7plEE+4nNFl6GTifzsDMuc!3 zE%bv%$K3?WEeXKn;g5{yTzHi!9qq^#PW8&DgrCrYw7Hn7Ee@@ccSnJbhP?Z6FjjNN^T5 zpx}cYA&}sNA0>dl5u{fTOCqSXw@;tigvu=>YK7!{!!5f~x^X+MLrZd3Q3B!dZpa1J z5Jr*^YbtEz$I6;GyOe^8}CWe1GPFBY11^F|vc3}?4;*d->?y_sac!W zjLWk?KejVJT6MmUQp||w)N#5^8bWnwG)m6kXWOn1zTkggN6ou-2NeW@*-&;gT6 zKnV_W>XTHg4c**_qXvVk-4DRuZJVukH0ncsjo%x?3 zPbKqASVMFj}i--WqEi;oHek8C_PJYqaS?~rX+j|P= z&Tcutd;&jb1ZC8`%IqN;0F|^sb3C!C9AvSoOg1fWnj6jM_KcVRfFaxl6qMd9`vlCKXOGcX>2)FIYpb(Hsj!|I= zIKg!7hoAyFkXN$PCLc`hh_3Sfc4o6=$fxiqB+y>A$hNwNnTnW~LCbZ-tIC$aj@egM zSGbo6qz=u9g10ehN&g(yOl22XYo<3# zUpG_ka}R08iI2xo_CtC$eZG*gs&ViYIlJtJE*v{+_F6*@-J;mFbx>F`=K??HhYeu1 z@%%r0yzqtLy^P14y_d*;<=_OHi}?>3R1HuzlmWUk{r!G4#~uvTCe2P^%QA=Qm4erm zHCDw>_g+Q0s8jZmbvm#OIIZt?DL&fTX=>i1(OYV->?LzvrkrDz@k$RKwSyp>-!9|e z^#nUU4M5N8CU2{)lEl>ctLu6h3tm@@mf^s0Pn>+q?MN3#gz46jlBrsg$uO+vnl_eW z;8Mh-Nf1wkJj$9@6Ovrh)=hbu&Z4N!uJf9$p5(>I)1Xh%x?8h#a=%YSnYA5AK47) z0MqWFNbgB1VJohfKTao9BHiMfKgj+!o{mHl5q>ZZmeoh$1M(r&hal-UP$B!g6`aJ> zjp{r0@f-?K?%0a?U8+gp73X*1xN~FZkb;_C{%LCHu!2(!u*tw#RYGe!-C*6EwLxYx zt~Z*NWQv0Z&|Flf+J~l%P;OWUq*da3w{7RPy-IAg_Q)P(JGC3ugQ$!+YOdN9 z%;@Ja?e;P%=#)cd4mRzLb(CR;TOZJp&13}}>$GK9C z*%4ZT=u}qcOpyMq>-CI)0l)>)TjevEc22);|>&fV(z%4&P?CJU|55C%U$I&`v&3rhZGUFoo50gypX6 zLWilmxfFDFi6t-msDRsVWf+h2A+E9NBr5QOdQ~TV1-D=@v zl8P?Z64F3#@4qEdAKrh15{h!^ZgU^y!qwG|NdyItST6GIY~7}q9S)@Dl-a) zsX5LRu~FO&1Jmwt+*&unSfh+|gepB`h?)PG24g0#}`QF_+t zI@oDUTpnqH5=m8}f34v~eWDId^=u7o9qDKo-`c6XwPg{X=HQ`7CyjIl=#Jq?Ytb1_ zg(J235o}dNzwj7~=^mdB(yvy)J_6Vx-UPa3O5YhX#{d~Oj?$br z3DT1~fGBht(`;qz)QAjzKIa;0$P*}0j-lUe4s8!2*%bc*MB*G%h!lu5Y>Fhj!ry}Z zReS)`D|zvkLHd=-Seal!H%Un(b?M?yffj zDgz|KW=J6NW^iJEBOh^#(mdjp!GsG_9u(cURz49V+Q0{jkJUQ!!TCsx78KhWx3h?+N@9O3 zI_oArg49Gp6H?|yXGsX@o3p@!-F6$MSC`77(eo0J zUxAQh*<4MI`49^@XdtJ25yXPiphMYJh~~^22SUpgEgrxw`461 zo6K-7P0^iRDte{4fdyPORkvjx%D8!*pZjeoS7W$T+dK^XZgRIXpWKxiS1EJWd=uMN zH!(7-0h`}}^>g!uogE!T;t`^^O1R{##}M0@kJt3xnevRAQ*}9$)h(6*ya|rzU&JF)?cl1`|6EO4Qt(XKEiZmyh-*k`t+1kKFj8$+_S=>Z)8@fi5sqMY@(pyA z3;dtB8W+}1`_j@VE^M@W^*_@WGJnz@brx7MXD?2liRK8KF_?&nlWIp1@^h!nls{pL zBu@XBPc|JGANSpb1>}tNpJsOv2duw{4`k`Zd>}!yI=Uimr61L2)Ji|5Pq$FH8m8(K zdR;_`4RN+s7&b%w*Qf_sbTCdo&&&QWj@QKT04{)_jilz{bfW|Idt8JmlOx*_Ccgn5 zx5ksQ45Rkn?uUm5+ult0Q$C#JQ>9G}w51saH~+~3XWOu2((hWv^j@{xb{^>n7}1~_ zbCqbAq2j$wc_OYC;0o0cLSjHNyEe&h=c@Awz~+wV{*d!Q8@T|kDFSTN?FPfD$w9;X zXPXRiLcDX1Uwy~WS~^*@phvdg^V)0-T9vfeB?#Co4MF4g`Z3FC;7m;a^a20WJOc~n ztL%gR>8OyN-awxb7}y@{3k?%4wa(d9^9j}qabx)B&hIn#z(UNa0(dlNE&|7Zn3<=^ zES`8!u7fD?TNyfOXafIXN~lHA7-|U%TmZH15IA!U5}YHserwI=k4yf(KR(_`iU~|% zXStz=DE|6vW|T#sBe)bk)TC%#xXxIU06%AlXkC9c_On1@0WBStLAWzB-09A{3{8$@ zCojmd>%1sLUb+dgpcqT36vRU%KyhSi45Vk=i8wS;i3PgIXINa4bZnC4V?(E#sGvt( z&Y^UYgtQPhO*k(_+&G%P3xMCe;8-~bsjK`$u<7~5nR9kCKrW0#PWQ*}}7xAo|{ALuB(hGu>ZtW?$>2O7BJ9HBYoj0zK{C ziyp|F0a;z~pBY&14ZPYtQ&?5dJ2RUZEty$WU>1$M{EbC0WL@>ltgCNCwv}f(6;`8l zCe~JAWJ@138(|hU`I@z{PsUI(=j^LvTC=Y_R6AoXq-}~c7<)u`W`Ob0TM+#YTaU|(83aK5bu_|;qaS#JohTwJ2hxBk#W!DS#1cc|*N1w#>y^4Gzu}4}F9wN3_ua z>YF+txbATYI($~0FmpE@n3L zOyCN&ZVQI7t^t!EJGJ!TcKjIM;)ic*KhhrDw@>jZp}C}}ojQiWNgdu z>B4v3nIGrRP{6jfeS92eISN=@OPB4v29JoMsJS0x?7QVEj*qjy3gFzDZrrzpZER3O(R^*nnW$SGo!>;hhYo&$%72wa6h*A=uu?2Ox7NjmE((fJ#oqTGA3e z1Jjn9qHMvL7c=q#l3%kthvQ+9jhHA9;fO2@vq}^y>;J{NkZk(%6I@7Uy-dWn!LpA% zB08kw@tzhJ(kO^&FX#RE=KS~;^f_l^QVdOyoYtcyVaD!0;Ccg6CU%4?6-WfOVO``D zH7pLbHC_s#h@Oo`8Xd~bUQg;Z#G2465Hh7tmOLopxP}0VkfU5sKsc(`T6v7qqZyL` z!W^rP5Y&V(v9`AdvkDc0)EP^-fUm{bbGlZC%c#}NJPkAsgV0CVq7AD7EOB80!L@bd zJF)NQ!3V`yz^Vv3Tb9L-*|$FTO;Mf<^#uCByxqBM2s^267HrwY&Y3r+HXV_(GyJWs z%=vHFTL(kA-@?&ktl0RgUASzN02i@w5MlvvyZ9(}0>>RbZZ3qH4tXtAiy*7FY`d?* zb*QZTBy^Js8n@Un`knyD;+EF`=DKQUH_V~!Sd>YaZreFkQXaZh61K4y;M^37Rs-yq zD|aPc@+}3|Il)-kjTe>>dnj~m>wZq#5-z@SaJFs#lQc}bChgLXZi-wIfDRw^<#bU= zw)cz$s@KT2h)-%Z^Yewo$)!R^Ln|faR(cxfp|q~cWs~*dW;xTqV|5ql9A`ni3(8Cb zy3bXRvP9iMw%nM2Ro|oJ6MYDyBvSMNS-XPw*}YDT6R*ecI0_cPFQ@Jbo*XJ|-CmXt zz-lCamnC`k! z2!wCNgdM3C5~m$Hyle+&Kf^VUZ7o#;j6T^1{4TjF=73P|-lL zqX-y(H9d$l2*bkNx_(H~?aSRx2>}`<%Z=f3d&X8WD*h(sUu?QNwdP&2qFo-~@+M4x zS%bNWjerti_V<8`-Iwyu`9|4|TCG}RaA?gH&3bkbmLG>3!NalL@Q9}C*XgOw%B{Xp z>&_#nuv{sJxvJElW zjapfIcB2*xWbRTZyUY=Sz;fjqwYc%hW%_x;lwFo}5OJB489X6zPv`hsKyc#HF3ZXQ z>3vz&dk5u&++7xJt@g@}OSIv&r+8Ub)?M|oECfH->$F*1mPJ>>jIoc@Lv~lzomYu% z;Y}p~dCP+`fSD7C>oAJIIM#lkceP3w#=E&3(Q-+Ze^I?;$UHLgM?m8Oe+`X{{8Z(V zAvwX@gDv74D&l6j;3>7~_ET{(JFGF)uS-N(pF7%duqmLa9{D9>zmER+C zmETV3xx!)_95Ix}e{|A_vU6Ez@>RGq24#nFbVdg>ZZUMBYFNh`u7NJnq}nxER$C}U zzHX&6cOa%_Oh~WIW|NPwJc|a`j(~`%EKU^b2QP0u6lNEDuy$a+mDC%=LD6z{vGzmp zwU&$zT#334V&-P;2)`Pz33bs*%%Zqj=6|JZDhB26?Qi``7}dzgqaUuVY+lJ_H`4i&wGO_jU^7n_3u9Nda8Hyx zX*XBIw&H(m3*h~qv3V+8+B@|^d(_52Ylk+TN#nK`a16p|c)#vjQoCuNS_C=rKrq!3 z9-h0oey#OL=;(tj!2^xNr?z!B-mULUM$NL?T}o)qCxbf~_>*jt{Al2|OJ2z?35wmp z<6Z8_cS0V;rn=KL&M>`|{HN`~+B=;09|>hqmdR-6{nT0uvD@?mp8$vJ-32bSq_$c` z>zD+sr?>a6=_9SQ#(zw=T{&RE&#Xs5qY=W?@QTikz|Tjq9@>YPCOh)EeZO7P=SwgH z!;Mbg{_cmXh5Bp~>I)?l;zFx4Z+}y@P}44~^M%?JYVS$k^_QHSRCLEh$TpF5@QhGq zWKa66r>+7zU7N)yefg3{bLXY}c)H)AY8z9y2gLo{X z0=H1$mObgOALAbC5@Z;|G)a%@Zf~(`4v@Q+)Q^folt|i7|Lg~kRO(9bofw4hp}*nG zC1A8`t*^%TAvzl1QvM;G#Co`ibtgk^&b`;^SKsrnzU#W{ z^mm@HGL6Aat@Q92UEfZbjji;ZU(prnX^mU1)8Ab1->+z;-~YNVzrL0J@(=8LoeMD1 zkK5(!X}|8X{Kdbr=l!h4rH_Bwf9Ep!GavHbxk~2|A_L@^`g`>HFU@nk2t>b?g5Y(RWKq`;^ti zhiQ`Ji?E315)n!>bib6(7IDS2cJ1@*w+49?;h_c0dWJ)(XHgqk;eJ_=H;pW>2Yv`z z58WOPDZ6ZRz;2N7XU+8a&xVXl(&Q(l@dEC9?gcw1*`e+S4JpgO9d%%~aS5_;2UV&br19`|o+({|ya3mltc?TmD|p zza2X|+qbXXwr$&{M&oI_UNFpm>#x|b@s?}+e^>keV*lT@*Y@i_v}?WIKQeODjp0uv zTlC-N(ErzX&U2X2_m%#;;f5P-Zan?zO_E*u`@`qrwbhj=OH4mXK0{P!ggjQF(tI9> zNHyVeLVG}-2T#-n?~pDjIU)S6r^E%@cvWBk`fY0~J%#Pz9catBM$WKlq8;r`-nkwg zqQz0WxBGTgCdx?H^>iPfQSwCt;DG%d*S+lZ^pLeqrTw_fX^{L9wTKFn7B=)uskLUT zt$NI&NDX`@<)^HCLsm6r%Ra>$@&#yWM@TYR#C%emtWz}x|0f%~K@cThrq}Jk!P@Yk zJ`dDjH+ns1P>e(?@vt49cs6#(S&oGqw%gQ(h7kh9Zm^@)7PxFqs61t3SmH1owQ{;P z%!MIbJ6IDSBhjgvTG<|~)P}C1=C!IhqeEUY>>2oYEP!tqo2kpG-K}ybtsLIZB_Mpl zBmAUQf3d0`l}RIUQ^xS=_pakpMk zm$H?*HZWp|qW7<^ryV;m%a~Gl;~>FkcZoZ-#xbIvJaZZ^7tA?Q|F^4t@KWnrj$k=dkNcF9yz5deHaO~key+z zyN_GSeOPI&Ze@L#STc@V%Y9I)(#I_t$L_sFA8r&`-QDH7l&#dgnS>{&hNvWN+Cf_` zkWwBaOY4RS3vf{fiM&%~Lu5hFCcn0xAfHmV^we|EO@d3YgnaN-^m09W!7>!C0CuA) zE1;5B?#A5RWz1Rec`NS8lMVls18LG&1F@rx>CfthE<_`U3{x=5H+WmGF?1bAb)GKDLZcc`-NxXT&MiFa1{ z!E$8^S1La@7awtFO{Voa$*x2RJYy13Yb5PIz1?8Zk$2K)%&P;Zidps(iIR7r6WPGS z0&YPZnie%lltx4Dvvonu;?e2WYDBvfroCE+WikP;a{xtc-S_e=7fvu(INIqM7UW@f z>EP2b^A#A*n&~K$^{g3;XAW)^X3ng7ea62YF_Ja4Ye!~sxhXUGr&#lJ)|QF3S#*M< z{MmNLDHbgqP zon)*yTK#*HlMG{faNKr1@a0%PemuI&Z~>IF+Kins$XtKb9z6$_yFG;WM2!@xr|6`5uLB|2Epl3YW$9WRJqO!xf!mh0FS8p}R1^p0ZK{rT$M* z$~e*l++c;nHLyi13oqv4y~ri*dgqn@bGjtu*`m)0ctrF#ZoKk2kjy$z)3Nw`(;X2O z?bbiNS!i~e^iF3Kx9v+sXJj>)SJma$N>87*(%yA2A{DN~X|~M(B(OQ1g*@X%Dewj- z^=NCU)RuEHi?)`!+CpH7+R0VeZWF<($IV#NGo{wtP>R;hbhWmv3x9@BA2hC|*F2D3 z^B>dTC0c|4vlX3WVmnD!!t?~6QSv8-=_!xtL#r@-B*XMkgRo52qZh#ROnE$An4akx z57@aNrjM1{>c;f3uC~Ch$23@4MK@u3w$xfTrf0ia>%pl(7M`zS8q8KOjRutW&}n97 zr^7mvv&@60W7~ty9s|*$&e3e!I#fw!8l9HOS4(U|bB(=Bf0}4=e30R_X)rv<|Jt40 zYOlG=WP#h+A?#YVk5Pn9lFu4#N1Makl*a5!hokU$mW$S7$q`1vja(I$#JV76_Hu>@n&bhWrNkz{q8L|C>7XG z0k@-xquES0*yE;e(2i?rtqQS7M{}&;fu_Ikl|wtM5J@q6hAF!}ct^86c>nOA)`$J{ zhN$^oZ%(CkIlh@3cxji(rU&YswN4AHl{^~d4_bw?7C2bXv;az^<^&$&lb6d{o1k1t z85l8485k0#D9;HL$flH-Hr>5nD8zd^B9!7L9?~a?ixsxwIthj| zcABoR6J7M%c#m3PT)1549PQ#KanlFt?B#ci1r%DIyPw*<^omrZF}#LT4*Jn!V5_h2 zm7-EmXSLn}<|#s9L@yv*M%C$BSwl$s;TqT6=TxnwgQHEW54MK{6+tR%Lc*90{-~c9 zaDayhXY9jW5Oh=)*%?ad%oX^XcS1g2LHwuXNrCt|KIKNw>J!8t)2BoHr#MDNoPk?s z%}U=Ls}{?+oT~4u+wXy~f zVzvigr~!J*?xyD#Be*1@39d5H=(IvK3pq~v(C3#-`Oo`M=LsL;oFl|(FGVqsjVBPhfCZAP^P-a2tJstFiJKfq|XCKs(z*+*N9%X3s4Kq>TW z12~;k$}xDFz;JrpF6~!M?lr9M zke4tFf28djx#KaD~JBejcPpNDuWwb+u4r9JZy<)o{3#jHeupy(j4YGo;ll8^XSGxMwTB3cmD+1nd#YT!TN0)!u|X@Z+|gAWdZf}1D0j%O{jk}r)Vj)T z4<5BS5WWXzMs#&Y`dg*@J~e#CV$bF7?=3eE5NN9e^MVpfiUcNwc%UN^gT3)m{8Q7%@G`HlNW9V%Q3jk#W33OxAwXTN|GoN-} zwkWHEcjO4z-VBQ;=FX4~nli;={!MIj`~X@kys*P>$kKj~YHOS_SK^8uu$I9T3ycmZ z#qcqIcC62b=A>rw&cbA-pRt~^uXZ2O)LK3LO`Rw|W?v_5^N8h!Nm%+wJ!_L1ca+0O z(qn5aNc4pP)Ua)|i19`Bc1~~S?JdEd}0rT_rKgfrTOT_4<+incvbqA@=P{8ZSzlEdDWJyuer88+CLLB=i+?lE= zx%=tEpL$D@-jeJX4hCPWHpbzy?lul$T762J+sBlSMYLg?l_qH8vwui}+cT|=7paY@ z9&KpLl5Yb(5*(9iV_H@lA0z$F7amH|8?6m80>BK5qHSfY?2%aEzQ?Itm@;U-xfwOV zNKd$C5fhhdtzAzzQjMGA>9y))?Vfb}PjyQq>5S|w)X8AFcOpy=N>=KiQu-`I zJGs7GjAmxk%nZ%EPtAk|D;WqqVU?TO16JE{1+1qXtT>q*Tx#F9#yHDmSl{kH{z$-o|5#EYwfGH@3;$}R_&lI=3Mf1;KgxTGfZ1b4YNxJw?j=N!09J6JRG;I34` z{n~sL++Bc6CPoG>I7RF-a97IUQnCW>FaL24E?Eve!Bvh@5A2SLt-vnnO8Q0`6zg;L zeQP|h*)s#11Pkm^vI6eICv$K~nD9KCu`JuxdI37& z0cFyuZxaG^$^$x81@z5o{zV0#%13P%fVKtbbs3I?qVY{0Lcex7gSA~<20^Az`S4lq}Tqw>C@(kSNGPsnifct2n z!pK$W3GOJtupZ#9cyRaQ{nGu4z#X&iTjMceASEID?&ksb$^u*t47U6s4=#3mB)UBV zcdS;zu8tNitFQyoE{+*UTImVyL~n4%<)ot%4mohgYk)iH!PRbp61b258J%*W^j!^L>W zsOgFSxvqhD>r6n`@PE<`WL5*2x9?kHqOfKBe?h`n(#g-Gre!&K{NJN-zdD1Lz-|FA zMJxFKo|@rj^uH{4NA3I8c(h)@@Bb&my0w6J2jwl<%ES9Q z!Fz88?`XY*Uy4@XeY~*Itk>c-veZ%572@K9g2H*8V-7f0mGBz*t`(c~uSvn@s5p6^ z&ppqWwNO)THgh1=1U(#e&9W4OBWd5a#;nDZdFrDAv?LJQsp*c~abDl+HRcDhF|)i= z@DxQWV?I-inZy9x(JtTw5leE}m}eYtBoW&R9jb&ezwMiBBXOQG%hIrSk2X#hZOpqi zSlh!*fGdOUs{Ycq65t%i&n=YS3eBNtJ=`oNI7N4X)UCj*X~d?4%6YRRxzk zo0^vM=DFmJ!q|fu##YK0qi6+Ve_z-{l1F-S^%#r0Jt#b?5DxGj(Bc4|TmyKuv@M^0 zyiuX>S#|P6PyoDD6%<7A0FRo!+?Ol5@kWWr6s~~$5t%HfILN~239{DCdw@LQK|bO@ zo)E}W9%Pol%S`!Aanf)BFf-Narf%xr(pG3YvPl#uFLp<#v zKJFl9ogKH%Lp)oB_#M(u@NG6DdKM5{ILqUB+#o)XLp)oCn8Fo^A1{<0%e*}yCjGn@ zj^{kYryRs{f_TA0yikRBR)lK5Fg1QCJm^- zF@-A-f2H7K*0Uj27kI+rA;IBH%D4lML?Qe+zNciOrEi-?y_T^l5V;n%2*dD?j5S7>9SwZXD4YP&}p zXNoo!TpJ5&V@Yi+(Z*wH!@0Xk$!YQAJ8tolqMSwDCFNW3!FrnpNgzs;j}wK&vusA9W2*s=;adj)5jCx5Pu= z`#9EYeZlotQ`6QrJP%D94{>^lcT=bPN<2j23J*PcoW@YBxD+g|Vxqgi6HF%rhvT6c z2i}apo8x;r$9VtX02@`A64C~1t(fQ@ZJaLJn0IZ=tBpmqu_(NM13tXg+F)@NkE2H$ zWFlv9EV(w8)CMWZ(3&>>P;KB10|yJIP=&a4H|@wS&e|B&SE$}k8{{NI>;66Ie|tA$ zK?%|Zi=xO#k2a1JZIHXHHYU`@l-ii0jmbYGFn6o9!Gb3;(xZ*zMH|zujcK(pt2Sn7 zsQjRm#A^^wqkHm3d_+B%%F z+z9XXXk#BQJWh2QKc$w{#;D4Vw)Ujo{}Qxy^1@Og*lMk&%fX_JF@1#^E!vn+`3c%M z`b*%{!NGDEyxXIVqeUB&u8m2xF|G2`wDH(G(I2i2R`B569&MZ`+L&=|%&3hym7k-H zCxj1z9?*qVzb^NDp{qfUlzCM~H=TA3&a1&i`%YN6RneETkG?hGn8Vx?Q%zfoP8Pul zTOdqd9?SLRVymPt)g;O{pVpsLW7iw3tSLg3F7O1?IaPA{a!FsIy1-je`4z_d=*L+= z`2X2^A0WG~`p)zIdH?#o*WFiAOKw|jeBX^S)7o;6?BGa_a0OizmT~dN%q*LwwwM}r zH&w~Ds9oB)Jj|4_n^-1-aX{H|STQk^4iQKYnNcPhMi2#>Ama`ZpiKm5GD_%8L{KJz zh{ORwwDEqvzjN+=_dT`RGPbFtYOQj=_wJu_e&_e^{Lb(Ee!sJf{mp>MU4<-NrD{}F zm4AJn`u8(|g2SOO52+f&qOUEX8f!$s$l;$nsZP$20;rs#awFp$UV54T^WAOnDf^zE z>IzThy+0W`x5yI1*Hg<~VXgFw9R4uOfmvVJuTlt$^ceYBX3?;AqK`Gei~6>T^gwme zI&wvG{>>}jDRF&N?{p1KY|n!0tD)&U1IjN6fGU2@2--e=$XHc?1q8QxD5tdE0k<}s zd~tLLcPGgV5mwHFyJPk}KQ=0+<-5)~a=7CRHt*Elk9%O*=&vu!|~Wy3CFy?qR_@@ zV;|r1ePgrv8BIBEU#qS*7FKBEaKDZHu8sX_rfv!H1jrYHH)y3T>R|w{gU^ zaYSt_s*Oe3_{^U$791hqlx=6!{|arK?YFVy+E`K>XVk_S+F1H9V{s-ao6RVK723Gi zZ)4fDv8*;OsErG>@uA;D3|t#*rnCJv5wP>!R|C>5RrUE1y}7|~ zsG-{L#k*dFuY9Kz!$(S|uLid`t~x(KzjVcyesez+v8OsOrE~>w zSmZzsv2!~k##PJ}jkGgQ6r*B(wM|dtL#^}qC~COFN?d_OZNjIv!_TCkP|EmKN)X^5 z8~wA%JJ@Tq<9P`_Ab+1HNHMf`>^Jj>L?erd!X zKkHG7$g#HCs+?&Y2*{{$2Xo z+#LMuHVy@YZ^n57=VS5&Y5zX$I|LC#OIc;urUL1dg{huSTL1c*j>^bhh4S@)N^Z1N9V`L zdtjFH*Enkr%w%tgL2LVGzBkM>nt1yIf-4`KvfQV~}{CHOD( z;RiQN>jozQ63v|tZHwmh$&&&Qf?YQz@WV`Q0f0F*tpF@*fQ%f}Sm|^#uUQw$H53gF7F&Y#4LG^@cw;t_ z9PR3GuTeu9c>clrAN43{(G`)P$$#>Fd(zR}1tVXy8*QXiiVZ>x!~r5BB-)lTx~Nv#Cp+7iGWEMN!mlGD^5db2vpQG$0)>rzH-Pxw(KU;T?QW)|!x+BEH zt96v2T_LE%CJZqFfi?Uks-?Js4vgmGZNvnTYv@WYJ(QB`Ia!o}p%zX{lWp%NUOYkK z;*WarUgxH&F?~#AWbiOsM3$QsrDH1O3GG2ZR~e;2a!S=^n#v|oLK|O>>5FDouA631 z5vd?V7N3`PsO5k8r)URl?yjaC(0kXQ9co@XKs@ln-*YIx+VkO!A!UJ~{yay!@7zC} z(Xym~HSX?Brs!OPE60>LH|nZOtWoe?Hj@8xxI3@iO|`FRm-4_OS1@8sS+Qf?u+Cdb zlj)IQqdROkPF&ycwDdwbRASO~xF-y0a;$&XPzxD^9 zRnCu*Ri+qJi!l+*WV6#E6N(fI=1cMzKx~c|Z_PHFspy+gV%iJFOvMq|HYE=alU&Jh zu(ZN7YGagzRm%^YWt3Y=Q9&?UW?kRJC(9iWopI>hZ1QEQCKw*{gv5V-oC_A+Ij*0V zb(A34+c_9n{fTt*Px6G>WCOGHn!hjd7^sfPhdaELxlKUME_=-pp568LNQ1n6k++y~V9p>d&jW-*I+QFQo@;6Efz_ zKI*M;D)t|litYVl(~~`4bF5b@^eqI9(tl(IOMa-=H#}fOwEaJJq!BF}$tP^iS?XCX zrdX9xmC(OXO3wIFnvHrRJ**E&IS4bz7JL2KX}?`L)+^_te4kx8(!ZkF=}>Q5ythXS zJ9ZCX4~!DjCG1Y#Y3M!4+lJSDz2uWN&{I82etxkAO5na7Kmi;ldp#{t_6h`MH3O(rTp|4BtjRh>Ndz)=&?%V!F@dmZi+~Fp~i5{Hk;|%OgC>cL)paJ zt1#0+UWJubaxFhC;Z_K8Wge7ud_mSQf+{OnR&zy#QK`YK;$q%=u@E8pxzknXFmZmIXX z`nn#Ip5$MBeaY77gg!U13ezX*`Y;n3uJM6;Gh1eNmZaBFaZ`^h z89QB!GAosF9@oZhY;a6qvh7LenB_ldcT91|BF}M9&uNt;QL#bcdic@goq)_56&-66 z4Gw5YNe2JPmQ?UJ3oF3Ev<%WQO)aWa!Kb-EP_tlGE%pS^WuWkvO<+tQp@nrfLcaPqtOes+;>*II1~4X5)x*PQBsp74d9o`Rq2t>dg9||KW_6$wf0iZV2{loL>P> zpv)a-s^HI<&ZeQ3FE72$eM;+dEA6c_j|VP{l%hw`j@Y~sybY(>;;t4S{viKR z#XnLFd=4I5^}h!Ht?+=o7I5Rr-g%*biC1MWkf6~`r>QT(pr#7{4On3|VII|PcT;Oq z3CivIb4T*2E{h1};+f!F#>r$too*~=d3X2NPt*(bRGR# zAw$~N3QGO(sUA9Jt#RmvC(OD&VMc}sGvWzjHGd#TgBeq+eMgXC$gb%(&|TAis1aK?zr;GY~O!m?gx}(l#BX#{2f*8$G^mv@NYa)7KmA>(}*lt$jVmS6fmx z^K-q~d{$i4GUg;0xn!I7(&@@130Vlp+eZys~UrJ)b zXcd~?v{^?;F5YV9i(F%-eDJ6it;iYnqB4WY;KjcEk{@9gE?t`}nw|Od55dl?SzVz4 z;eZDDrBXN`)5V?_kjo|@!4Cl^q;(GM)eNzH>ezl$$@XhsY_?zf!m>T8?Rs$U{-rmf zqNa4XlQh%bY&L)T_xC5o4oH5X_%WYp?{ed4~@$oHFj z!kNTujq>Y8&r^?QFTTS`DsF(IG^hoDaOxJ0+MjujIsiB?y2_Q5RkX_0oK=Dn)v%(o zD$d`dE#2F$vMT!fWmwy)h}ezQC1}z~(9{oEf&lKCCk|x9{9NXmbe>2}8v~PvcY&K% z9Q}Q(%4EC)vr|VVvqMlgZsJ`22*sB~_bHlC4iMr5F>-X(ex{UOAhA&Ew}ETKfN+hF zhRSm~l!4HQfou4UxR&Tz+`kBH;U*3TMSLKTr!sX8sCwuCGwc8_gscRQ`D|6jBJwfe34 z2-io@xR7TGXJ@b(Pa#JHt66rNF=kP9AN}%2zx=P$&PG-?R9MVo4imwuHIhzdZG@9W z5B$bEI1q?{U_C-W;!U0W{ZA1AenT?NHjjr}+@fkinQ*h98raNN(TuvZdm2YXdM zZor1Z6}>L2S7jHk&?{o4uFH;}c<74(V9?lx#M%()6#VDF zkP|gS3u*J29y}EK&YKlff(qiJskHQql%yuz1x_k{!--bz6BXpLnIM=m z5nEUoakOBwc>I0?4M}S-I(I6^2v02r?C`1PlLDg#>e1_TRL_@U2|HuS)~K;4q_=9xzXP}};f07} zqt*$1Drt6}iA_p@8e-89)QZ{}fSPC-F2KeFsyRjA;l8LUUy8AD{KJ~eOFetMChe-{ z2Qmu?fzNQNxJ{k~kLVa0H&`7;OK^z`YL2q*cHr)h1+LB+I%L39`25H)xGJr(BDf2x zai)-*tAR^)&oFDOslbl7L4402zNCBB@?#l^cCgA63bNK%)CpMI*--#BrE0jkumY;U zV18;C>?OOrO=%S7KuuXrA6Zsxs9S9iX#5{j5&-fE=j=L6rvz3JaXYtAQg^&>_48hombj z^GDTv#|^;IYjN%(mtoN6;AOaBByWm6h)~rS^>kB*1Y?~@msJ{Rc3E7lbQ!anP-M^~ z70BCgH<}|J98Z4HE>!G=^LC++6F=f*@$@qH26=kKVY}JEnALIToW>};4r^6q;0)r` zYt<|4gGK38B_FwdWR+x!Qb4&fp}(bT0S{7K#&ljwbL9kK>*4|-lzRxaHXtLiftYnl(o6ncp{0GEVXArD;ovBH?hlb%g zG7Q(TtHHG(xE7u-T%!eCG^Xg}K~WtB=FHWASrjmf1Hd>Qf>Flq&uepR)H^mNkEz9P zoZl6>lP@})r53er#s-p^L-1RkxMxkjOGYJddY0nw9GGO`{Z;K@RUroCVDAu1dTE;% zyUmyE)|N2^?;GcPwdz4e_+L}Ob=YND1a?u6cHkBBIA&aM14xIueVwT4%UrE%b7^2K>}?DT zWqQStq`Zl@(J3IcitceUxNu%rbIh;ov}s!cvjWRc8$`Q}KghYzA`fP0nW5Pmht)nyj3YJ?15=J9c|``;&f!scj}S+tel! zVnUcjuyKgsn1xYEYLUdd`BIGL=h6`Z3N=ARbE23AgdP^*?8#zc0t*%s6Pt|mTE%^1 zS^6vPi*hXv2B4AJ{UFWeteQK7jRvP=rBznV_`8_|y}Z##^2;4^F{GGO4gPWL&@>?r zMxP{4J*rR6EZeJ3!qxWZ6KBl4KHDTh^Ql5PB~=4a)kW#FL93flSO`7yOfZ*e;akv| zT&9#rau!T&(86MVByZf`8zHE71iuX*oD_iJBcLd}VnChJCqSLlCqPk{#ekxei~+T% zPk^F03yP@<6in=F8pWTZmK-7S5R$}N-AM`f)lC6wl_xggCeG5Q1sN-&{PN+S!@F!sn4FsLYna&_{Byan0pRmVL*a!uR= zB&&0e(+R70k6+Qc_uPZZ5c3am50;fse@)y2P}jgcQUbFGn3=?b+~e1A1^3h{oX(1* z;2vSD;2uD`Chh^WYvCTGd=+WqQ06PRM=Dmf>vBUWea+6ll`r>4f}G^Y1{XLXWwu9< zeIz}|VZ3$3Qm@Ds)VhcMN^7>EEW(ObZJ=ME`T z_%1$z*IM3Un}%Ibb;dCRxTD>^64`kqHV`<$HHO&)6WT{&=Hi0(UL(GP)mD04l@#n3$BJfKWYg*@Qf0dKF$+8asInGf&X9t1XOI?JTpc*moKCmEo9rpAE+-XA;+>s)s)Iz2zR?-}a#!g%n zN9xH*QI}fQ+SIbQa9s%r#fp`{7n2CaX=9@W=ne|@60t;Fl*@ka9jkX8U z@#uA=DXnXNp=pfRxjBIG_$MKgrqt)$y;7GxQ>l8~{i$}e*75AApu)6lFn~G(CaW`> z*FoQpN5Rfh!Gqa`bi>Yb!Oy*&KM!zW{vgcJG0tJvgs?WB)IAM6x5-|erVRPcFi+Fk z&+LBUod=2W<>>3zY?Nyb-!Rx>@1@StE~Rwe19OQBeWj#A)goCIZ1DZj57eH0D^O zhih(3#4+*5N#j&#PhKqCH3=`Z;F(rBx{Cw>#8Irn36Qja*GYqMK1MwE=~`R0x1oj1 zQQRO4HB$K!sO=j}j*NgkIZix(V3hB4K+1adnYk4H{^>ma9)-^l4Xliv-J-$?Sy zQozY@twAy;yR3i40J-cx)kk!(Aw_2X8BZREutd$5E&FF6ApZVZrB`8 zx?Ja!>MyhhZ=e$3JNxDM-Nc}oZrB!20tu5>l>8x`*u1i#m{&~m1?Z56VDSw$w-hQ6 zje`0W`6accGfC_Lw`d#3i1<@`b(>q{q0Gjl8yk0~^(9{JYV@lO~q-)bVC~!$4E35Z`m|&x5}T zaa@tc@UUGn6qPix!!6!W8hR19M9?tSlD|Ua*i`qsseGN<$Wa^0RnV@4@T{*6m(_-0 z9N8|i*E+xT(rBHlu(2E?ylUKFlCzzn(TGQI zH_ZvtsMD_UII;6i2b8K{eoq!U*@$X0cxw`~c60QWZSgeyjN}|*!)sfkSJ*FHKxzao zlOh|*_xQVa*e{$wy32u<)Gue0Y$P+}oBN}`&9;*@WEf{s5BJtN8=&}$JXzTcCeqsM zSS^mipeDZs$eIZz#pa_@T$o+=xR2M0EfCYeXTH!!!J zvQi|*2xliFA~ijvwcM1pUHUrz6Hs>-#0GEcrx)vQF|lT^^trjwWEB zXHZ7`#txVd=~yML7Pyhd@-?xLbKiGBTTTs#Y!@xE{i%BLWlYE|AsIw5>vM{OEUW$i z`k9rMdIkUUdVBpg2NTx~QmSJB)oAJvtoP+qZ0}TzWt%}r z0(xcrLH5Kx%Rsh-r(^XGCK<*|z?p?6P#_&)Lw&Kf5<(#FF?|9AM}b78zaO zgtf;ZY4Io{%WV(MbqAg}tE}kpxOz0J-6kYut$RsD`EL(i%ex{q*)4#U?`fm&lP>~&q2mugUqNz>^c8QdLf`lWrmw_tb^6MVws{Mts?e8(K}p|e zh`!=084TFuXoM+gisYj(uJ{?l;|(tyq488A$e()=2rXcryC4gaYb3OgvEGM=M;f$jxGGD%-$JXAWv*l@eK)LWGOhf}1su?#IZF@Q#-k_SuD7 z$_wI&X;PR`cJi1+R)D*1?2_xmJ1j6JQdV=Q9wq{`M)FCX<>*7SwEET4N>=_6YwGbvyy48W{?hyj?j523la7=(xUO*oa91*Ui#kC7`9i5PW@fnjsTYCEHN`x4E*B)%$mJsIDqPOWQ(YSoBS8WjW(-wGfiSWo}H%nRgV)-=Am1E25$X{x5kX~z>M<_#&5dbjPo%JWGd2`aXxw{h4_KqwU9G9$w)J;#%Gca{#)_Sx<|CDG` z%f_)ELT^H29RILS)#Raf(y+S`%bA+w3H>bFT-<}{BF@G zW}j@xT9LE_;>59B0j*PbBDZ!Jus@Lq3}qLcW@|H3jqE zrHJws8W-$rKp~vT7fi|0ahQCu3o)wyTo;itcSZj-Gr=#zcImo&p8vGUVqwpmSc(G6 zO5=xSUk+G_DaxmOM7zTFOhr~k8>r`6x+Xu_&`hu0M;87R1cVpPkT8W!F%#S)Z}Xab ze;Tfi4=_ zlrNW>gBwPzLazIWCD-04Rl*^PvCTWMG;Kh-f63AQGeBWo=hLh-|bqJ~RH2i3Brq^+xww7xwE<*LL@ z*p2EDcV>c6Gmd;eVy}8H6rKiOu!^R}lA1mwc03clCXykb&_QO#vRfhAj&|JxV`fLfQ z7$f>{n&}{cDsQYdCM&3Tq@jfgp_pP<3mKMPd0yw+{Uu~Rr=Cas$ zvAYFcWGe&rBFC~CM_SRBs#fTxXa%9bUU7^XNmJm^Vn^F)3bzB{f0{Kc?3v0rW66Uk zNg`?;JKxQ%q8s9xobnlHzngaGvkHC}(2c`kBPlxBXx%wtY$Q@`4Fh#bwL3S6c8Z!1 z%yxemX4Q|8s4yvL)N{y|j9~z*bDnYPUv6vMDDoae=3%6&bc?8e+r*Nnttx06nLCUY zo$6q)>LuZ#geLznkwb!{B${jk`M%o#+EwWQEUs}J?sPZ=y7<)?iF^VI+_6(w1)JV_!5 zB8o6A*-~T`9k{BP6yy|KE4~A%8RE$0=ipo)= zPnaiORM0F}r4dlM4v-Z(tyOFRp2)$m;c7kk(73alF|mv+B*t^t#8(j zdSB!?^5o3nXfD6;{(H4n?r34ee#s5+1T*acEj6&t_eSifW6NXlJsql|Hyjfy)F$q9 zF-dIMo_rwcynh2R4Do|3$u5q#H5pbwOLLt4GVSG}Fy`lzd!-v{(#)?%&vJo90VC3$7!}#I4ud>$(sq&qEFjbHiL~>?&h(vo zw#7O=^Y+;^06RK_H~A3v6(%t@3NCUQp*~9T#|;%6)thc~wkNYKz7#rlhyTEg97Yiq zouTNwbUbGVDcXmM{aB9p7Un~RLu}F-jkp{ksJhgwF|_J#n43UsCk(yHK3GkQMX4P& zb%4{(P$yNuU1KS#Ymgbdxc##1I(Lm7m(+h%;JJVD7++mODRHoGQKR#qMAsIC8Z2iG zFL5035Le)U;o&*=k5<`$_!XB1#B$(-g&aJ|BF&I!&a_x76=9bhmbJ-;#X+x1eCE>vd2up$ zZsLJu&Z8PneuYRTvf4(n8eD5F{}xYcLCTgYGe(k=m`7zJ%E>E~njlt9+E;vEpDk&a97aV1EUQFR3~JS&FeEO1#;3Ex~C55-K> zicTGS5IgLU<>inU;$?*~t*+vHkD_OjPneuO-eAa0UeaJ2@|-M) zAOzSF7K1}B)*`le7kh)paFeJUp8T3?ldtmhmJpk-#?D@xAGU{uLUqE!2O7!$&A~YM z-`yjGMH$|7@38fERPP zmHdH?hm$sI_#t>6zb&saRs2cOKV+8fW4}E4U}UButqS*{6rrGyp$<S;ENnQK%a!P z^7k?GN|+MJIcbwCk2Z?P0a#`;(d?Y+#Cw_$ntnn!FM>YpVb+PDC~na9WrO^Z>&Vdd zv>=buBXoY2V8DmrA-!ohwr zI;z3zY)uW`sgtApswH2C>RZAk*CTlTd{fQkC%IY5vRrelE}F^b(Ob;)DKrZ6ms4Pt z)twRp7~3e$Y4!?iO{-idc{RAnMYGlwu1<0a2v9JeGmGd*aeYb&!K0=4g*ZQMp7^q7 zB8Z7|{4WGVv6!6etwAee@G&*9Fq1o+KMTyJ3Qu-zYH!Z(`~Ybpl(3iBQQQ`T$R_eL zA9qD>m>?VZsmc$25)HIwBNeBL#>}`fu=bNQAPmZR$<~1OCc)a{b(qGf;JP7yfgJ-G znSQ#C^UJ)qhBs@;A2J5ol{XLPjRvcbG9WJ^2-O*-dU-&DwREP5q1&-c4<25V zsXIxR7^IM=n$|#3SM2Y*W@NRt(C-J={w6fds`k z8?0(WZ709l!K$=5hD$Rz>&y&?3YgEJ2__9u?z(lFkSK4W+g?Z(8)-}1ye7`0@7KHC z4ztaH>;Xdt<~ig6H7Jew`P9sFGfQ62Ft}jmIo?*D0BV~q&8^{gUu2|nm)Phu`tfMe z8R{HCWvk<2y&PtBZ>$5rC7UNS4jz(;sjLLz*rNOnQ6ynGv?bgd5rEAw z4Y94{Y}Y7{-9ZIWp*QM*QAiwNGqG->IwJU%off`|vQJ6vMz9jARV+}C-hnj0nMly!8_@3Mhq>LtkqVO~p|O6z}w7kx{(;99S9@P)Qh zke#C}fsFf5Byiv#cYBb$qzbfqGUSgKb|W|@v#x)WTUY z>t9P9dxNwO4gX2}>To^R81Z3R+Y%mjFmYcaraAM$e=I_;XLzR7Ca*vge&Y+}-^kq? zMJ1&qU|MTMY7wqT0~;H@aeWC^@zbQwo#uJ zUMOWEdhVkKt}t0jvDkMGNG0ix?_%)eYXqgEdO`la4xDYvm^VnOv`SH*^CFjo072yM z)IleFhtNTHDtg@>%cc&Zy{B`yJ~i=RVqK%7%_=xD|3a1jsz|jRK(HVp@zN=aC*1CU#seMTbYRbaG zQzu+qs7t9z2Vj`!!nwK`*t(!j&(^i2jPbS=KS#Jzg__#i@OI$G)QpGJZ;hBYp}kYv zw4k}pWKq5;KM$ea3-V`>noYD28wOLS*X9l4QUJnb0;4-mFX zW7l!1BN0?9*jRv&+hJt|6oR43geih;a&KHGU{6K)XKf!j`T20?*uy)ce~PotY(B6U zqz-xpZaTi^Jho38|6R1drAmIfK0H@7JNbi*49LDyms+GlY_w*R^91rqZE>x|JtNw%7gI##%Ay*%hp-UVk9tG4gIsvc z*GPf+QB`>1R`^5Uh>l|3PBRa=^CW!JX;5#9U}$V(Qf(o57}jWl_>O zoZw8rG1=QVQ|~9c-2p_uTxxqb0|rC#$USaK;x&fG7QV6d8gVH8COtq8OaZ16lRxRE zYt-=?=Yy9~%w5>6ybmaW_DGyR3+dPy?$i{3=MilgzWrYv+-(zE^39E@605@d^0&>= z%>?H#+&8NjfD%3fKL0y3Y?CLjtu)nGNd%1?1>ADl_krGx422o#Rpx4tP=Gf^GqrQ{ zo$1sg^xNF)KG(S4>5UJQy+!+gm3CjVaku)$#IPyumnV+K8S*_#P}ORvwHHUUA?gzaQRR@61zmN zyBU%?_m?omc}6y%n_JLZdN;D2L*x?IC;{!ew9_4v!LU8}T3a2;6H;(yoQ{H-HNe=E zdkgMryrx7MubJ(tp)-gI23S$KG#cYLzr)LfpNnTA8EKSS%G7goVW* zEX8RGVUPqnB3#i1sB-d%ThG1Dvw>b_PYXXIWRA5#NsE?2u8mEbB{IctB0l3>7#^Hg zy`1ilj=pLme9E}cGOjE?)U9i5Y8oQ4HY`v*jOdF(v}|7?hJ|UfK()^*#%)8r1pR-v zc1A}F3j*xb-H@+E2z{%PkXYM1=#+c1fM0F;718j(XkjP4Zkt$k!IMuP<`xmzQ7qEy zYe7Em`4oI8$$l^cd3NDLa7zl(D$3i|>Ru}_hgtbX;kEJ*%(DTdZVL2v!d~po35+qF#L!k^bWh4`k*|?{akk<$YJ(x zRzuhps26mnjxo|sF@v^#ROo`Gp8S4Vn$c@hBCX?TVVKwS#tlsFb8X-SjG|iq$qr8< zHX{bD<9^(IjvgjiG)^l1J!lL($am`TPw0uU*x^KWTxE>eL(fC!zrZ|9JF+8~9f^6`=7v zST$3;^mb{bLWL8HVZ3BhRWII&qXwd|Vzb9D)n}96#^Rj`mK(+jSOI4h-G}&K`%qU0 zZ^*4d@XqYUoew?o07g$XrK1P0L*o#Xd;pFpPt39$YQ@Ezhb@fKXRlw z`|YXQZpUS9av;-|rIm~@T$vw}mNbX2r6h_lfxGR>n(4Q8Hpt1~lYq-jLFe016g#)$ z4u(QkEZ^J;U93dgHT(e)T3}1q&9uM3G~E(}$~L??t;rj3J1^B(P;ZTtx!) zU7}*m$E@8nc?EnTW z;l}8avte=h2U?vkhK^;6sD1G$uOUsB)NXrRkYSYBUYB{LK3>4&`FeuLrXiENQN;On{2>jS=D2vG<^xl;&u z%1}xu#|D8L?BBpVVq^f|5ow)jFw=w&X3{>%L?xVwKruLgM+s*-XWP0vY=bVynG$r; zlkMW82yBPV6+smjG?8_xAF&>av`64nt;AM#a;a)$+ei};SRyThIw|hq9yb+Jrx{+> z!2n%p8Cb{ye=5z$CN7$>T91`x6j>}yg;^fwkE4UC!8|!GeJ8!J0_Ev6Og@QfizSrg z&Dd{T=@<#yq6I#U7u=>uKg8!Te`8a&(K71oDJGbCH9(kNUJcWEgo|!c+ZLTyPYCZ` z>*;0TJN3yFL6(p8__Xdf35~+CgQ4W7ar@o-Vv)s#WU1B%2*rFIPrk}3d`q}LhD6ek zD(Vd6m?Ua0USNtak@8}dwu%6}MPiB0_cv~B0GoV-({gd0WHR=<`#qzL7#_b{K7i(= z`nB!sC2jCgQOfN=%k+L+Mgb(?@gkmtvNIY(^-|kiB8ZS1#9kSr>Y+1s<#>6#JfLhQ ze}jk%zNPVxDJ{zSXKyR%mEB@8Eg^ctT$EHM5>=Iu*-%P#|&AXL#_8%**kFwah+f0!_fSX>KG z&Q4l{M)?xvcbqS}-4mWZ*`{fkV+6_|4|K4kO_W$x)1$l~@*UxmD|i9mQTP3bzMtoN zzL%!Hj+kpb3vPHgOZ&DHi%-){r}XF<>-!XcQ&~^+zD)17FQ=!nw7-dxR;(v2w>L*; z+P#!Epy*Mxg}Ce!DCxEWBm>Wb3Q*cYGynmWOx&-dLZ*2X_TY108;&MG0Y|Z_R;y)E zwbw&Onq3MD5NYdHaJFWkAJMjFA1mwxU{h!(0PrSgSm!JhuY)3f6~(bZq~o7?01@}7 zZ>2*m@HHb(l+C9 zi6l(r?7h~UaFUBvW4R5C2WpfAi}M8pQ%@bE`i42lNBzkyPW#9M?2GHpO% zTNiuII-`)JW-(=mvrg9?hn=dEoo4xD%mbc-z8uO%&~ng4W!F*T^?n$7Xx?mvy4Y>G3f60ye^y@KMbl=u#8&HN}*c zWAfSZiy*HwBvLqM-M5}u(5wUB;*qw0e1rd#@Hy&Hx*V~J`;>59W^SBJ+yt>q0UBpR~0*=i5Z%AHLl>~#d{g1(Ud z6>mHe?|?<3`&8O1-ccGpT%;Z!GjixlJ7XI{vB@mtni0hZrr5SBs$6Yo0U_Sl>jQ4x zih|7;PCf4?=ey~8ZNKElh@iGJp6Ai8xEZ0Hr!C)Ld6)zGk#RYaweN9ZZc8HyH@_C8 z;EaPpIe_QqWE}jtsRfDCuxa3qh*(u8Kp{OP)1YvyR80fKrsGZr7qgD=6)WaCi>I$n zTSpZE4l6&q)zH@#$)68v30e1;9XMc9%x$ebrLo)64Sd}a{6Z|a2x<*RI_Sl$cFN$PLUzhZ$if&t@r&UbZ;`(?;#CdvTHu-P; zU_r}CGFS3tJ*g)y^F$daZOdyn(be6e3Pp?GNK-KQ9g*{vYK_8dXl`0oL~zo9Shl58 zo6K~{qDcwCce>e<*&;4vgfcJuf>(jA_N#A@%KvndYq`uqkqFC%zZfFPUqAo~97wkD z`w)zP(MI1YiN_Nt3jUr)T_ASJexC3QhTE;2Z!t8r^a~Zs5F|@hjFgF)SN)#we!f!E zO9*7)bx6RWw;$Ihr++@CPlm8hpVvEsSSG_>{RX0axMU9S-DyNV9q?$$kYffH?5mJ>IQ#< zgrHFW&)UuWU^gAPHof;x_60tYN-#e*R(^neCpl%D3SzaMM>sW*EDP!P5xmPdAdp!p zAEBHGUcTG=GdbIB@-Nm&PURF zi7nR4h5V%O$0}PVD$rOXRkcwJUgue&2+`Y8uAgPV=Oq{? z_=1}rPlZBixS$AZs>s^~k~v(W2j$T10%haff;A&pg?i-C)~j#O;0p6)_V$rnRsD zM2cj0@Rlw*M-laS1zfVH2l-rv0FrIxOkTp7q@P4X5Z|c)clt&EoAAfDo9Fe@nXc@o z7$qeyWIgbgn55Eb*M_rV@ZYdE&_#jmw z`5mRiDO403NgYzirINoQ>G!J<+wWaV@~%*rpF6QX(dGrrGlk*B7ND~E^UE-R?a|TF zeh-M+u&j$TDe{jTD5lc(^PR=0kiLNP-h|V3?zLLcSPy6uqo2%U;brT!PRtJxAE27o z<%A}ERIx&8vO+)vaOknIjTggX%q5H2pB11 z8KvSC3GqHnI}k=ScZ8Y8r1xd!a6Xzu{u<036=IMt_H>0 zX7;c#{{yUq20S7{oCDNYukRP39$U(>z_MwO0vS6fGcy~u1Yft+kGF8XQLcVgvTz_m zY2i3WhG_pB92v>QRUH_iIWX9D!#}m8!8qsA64PYWx%7iLFz8u0aXun5{K;E^BiSOy z?{HQ3g=}(jUwGTeq9lx zjtRg1d~d72{#-LF1@sH8mq~cchc$yVm?&vK%TAe}(G%*>}eS-_v)*>#{zf^t{Ba?{$dw^w+m&VC&&B@onFgGgJzr_y;`7;VS&H zKF{xQmf`}H#IWSsX5TE3vdt97MQXUTvVpZU6B4vBnU$hse!EitxSg8X{YnNYbNUKw zcztRqc?Ya(Oc9@wcR(GZHfDkkx1^5pW9B9}P)W%lb<%JpS~JgGrCB`si9&&sPf7HrZeapAM_DwBp3M}FD>X!xwW z)9M!9oC&_dSv4pgtS-(Vfn7&|4?7?FBK}$hpb=~pklt`7CyFcZ1M1o+st8%OD=Q%6 z%#{eaD1=-#c#vRw@gfNMYeUHV2#{&p7AP)P5OTFNJ_X~$3Rgvk-FFh;Y0m1hn{Jp* zC*1_|a#?(2wi@@jX;P6KlgZNt&mIR4n0n_($2JlJJPk}0KI#rL1H*pMLJqKyg{F&b zMMu;+uP%2lV}2uSr4fRVn%xj2d)hK`%56*fs|{^i`i1Q3;Vs@bf> zj#sP^Npe!)>LfW8Mmr2Ywd4uKmKOO_)D5UU#9Nfk#Imb7J0J6&rIFO$ye&zX(|^|H z^kVmulaQKooNSO^BrilB)Vk%v!7;J6JR>=jo(lP-fU`};S}=I8NVzG5KJ+2()s8GS zom^$Z(URIY$?AegTl-rp(tS>nM+V~?;EcAJ{7sSc^Tm*rqdiY7B#_s9LUFvyuB+}I z*J16%rS_b0En(%8+5_iVYLD6$U$Jl~QhQWf)OCd7c~aWPCHKU1JOGE%dt%E#+8nDs zpSuZmS+XQgwIIXG=kk}|rL|jrZg6ZLzidK}D2X z1JAhj!LaL02+l_CmYZ(Z#tggW|Kc%r3nEI3J;*vMPJW|sF*+Ebvd~Hf#3w?{uKD=f z#zBA|$N)W!37KL{#9_4`&f00`?HU80L$}<+wc&?R2`B|X)AE=Peo@YcIOqK3oOMt; zsEQ1VZV~X?gTMCjmuYho{cV*+Y!M}6nPMSg1ErdJ;mp{4huLQp5z$tNSji^@sjwe{ z2V?M><>~TDkbnC4o937SM(?{9>{l`#(H+%B%=B}yH^AwX*nTjpoo@cRyg9_mvDD*N zA=ymu?<3HA% z|BDmfB5hr=L|3br5-b+ncj_=`boPq4>%xtU;!Io zHOjpNLhExMX(-OtvxGa-k~037MlE$vwlRbTBb%cG&n?1HTCoCn9hp}-M3LU6BB)=$H>nRj#OXeq9P1crAwJ3nvFRoLzyL#TQ$q3#d`rkB%WnafWs_2NG zR4r(%6uuq7vqE4IkS{W4p@#ytoOt=T8HX5)5oVRFwzxCFP03N-$ea16AINLTx28IO z8WyLyEGpN+%(;lvC=2<*vP&*Bnef5f-Z1S*J-CBCSb)ssEh6cgWbh~Nv(qa$VcZ7< z5;H%-BqAR+OLF#+7<4+qJVDeI*%vaP6qU#q0;yFcI#wM3Ogzk9Z6W6>%8a#8z#+{N zCf_7E!*b37O&DDvKp}Iuone%^d2_T*Qj2QNz~h?Fh{w(eiUkk(;k=$g&%l~A3=s`pvOR}o;0vfssXJM7iq0A{aSm(iV72Y zp=aiwb!gr6z!J~<C~>i_kqvEIf~jKV`;rI$LQKS9BQd&_7jE0Gsub)E3bdpd9Oz_antTJ8?Ycm^MdQg5MLfuSRh$RJuH~Y!pg$(l^7tZfRrD_wp zCMjj%TbJ2x*b~m}$ildDI11yHkZ!(ruMmWH|K?z?Rr0{9HiDHYH25TYFKTl|S`I#G z`54#qq`5w>>B)&zdarU4d$xK~jN*8%*7?f+kFBeTr^nV+C@_fEsbReSTv#rc>rw?M zx?1wv9R!QWUIK4S5*5}h^V_kt_(uvdP|+(F|Lf$nc#7>wf@Lnci z_d}xjiv=%i;}nsC;yYu!nQzXIzVi_ac>mw{sen^}`&>X~SWW}ekUU`~;r?z2*`%$)}Y2y9+y z+OArW++ep6BER1r7g~UhoGRvc@?ZJ<%&Wor4?@M!SDO>!8+6kCEDUl#mxecK$T+u}Fr z+e|Q(QK0{gb~T#V7EuOAyphPg4RRF*A2ZR#HC9Eo%L=C*KweP*j*BLkDb)=9v6swn zAC7OpC5t6w4jE;Tjz!HztpkWP*>X5RYj)ImO~zAlm@ou2K;kx)FmRD2XTu(7khmJ} zaGmIW`G748$a)*gm`&#LHE+E~@?OQJ9+TG93++p^Z{hDXSqK0r_4vq)S2>h1rNK*1wZkm!(*e8}kW2w9^J z!4KXG$xx#62o5$fp&tf%FT(s`-HRT^H-crS*}DES`Q&c0#bJQ7K{;YY2Zhvfjw-3XG*Z5B_$iBeJ%U;?*ha{Bb(NR!IR?viq zrIis&BDt#tGb}2@(O|Z{bfyJ!r)aICIn-`JQ=*D)U>As{B4T|@M_ zjKzoU+L+f^#&M4oszovDFbVNnUA3HaZp(Pl z;;C~XiW)=43#$MXHi9nCX1rh^$cZL%p+@B~#h7oh@3#;;?yQMT#F~T3U#OTcw`AUr zrciTuv2Vh#rm^HnbE3g9sY}${%mmTq4uId5m#LBDY|1I74(V4guupmi3u5&qp4udp z!D*LL3*I(qJKMyWZd@(bPiC9YnoN$))oc?;Qe;Gm?gn^Gf=qoRXmiw6^n%cNGavA3s)x2&XY{H5jgk#|lE9_x?D3&l z8to=EqZ&)845f@ApdWptewqa^Cw2?k@1;T!rncDIy0Gt)85)+idCPBaGkyA1r?#bO z5=E$`r!ysWwp4##J1F0r3jmSX+hGm{NNT?T{&XHb;nzf(atdW z%c;(Bt;sOWc7R=OBZ!cKJ$h+_P@rohU@+N(9&rEChOpQgs|8^gRaFilFs2UV@J(BO zC1Iv9W1pm%+kKnNUJb$COnx3JTH09Mg*3r+mf9n{ksBs8gW52isuV#eqI2zGXv1bh znxUC6BMZnM71g*Y`D^4igAhgQH9%LgcT_;%Qe6sZQd7$}E4#TtzB9iWq@KIOCqovh zl&5Yl6pQQ}Ry_@~yE{`L6heLQ!|zV=4f(tEu`d6c54}4{E>86k9SG6?hnug+pOxz( zV6tOI|3t_)VOrlHxv{KsSFqiITugC)lNciTa+|NyS3GJ@-Nr3f^YAMOHw~iG-t6ew zPch#dyJEJ% zSw%Y>#(gLCaEwo`U}FeotTX+R<$(}jg%D>YD{n!&~8KIhL70tomyu7nI68d~{hB20u zr3s5z4jlog`UcKn0Dar!lX?bpjHFNcg8Q!40P~;}Fu(hR(VlqI+M)wdfoF_ZS$kWY z@;qB`KngY=w#a}tszeHG5K}spffC~p%;x7lT_k|>GDv&9j&LS6wYdBW2MXKbT)nLr z3Zc(-;pRSpz8T@4GkpZ0c4&&X`NHt&*@hr<7lR-sTai1A<^})sc}i*wCU69ksamvToMFqxvLEr2q;iLr z&FGsa2#j*IU*A@0h^tW!T2&}lm3Qd~Y-RHLzOy0)C9_rAU^NQz>@~|hR-WXZoWAcA zG&EZQ$=9}xB!4x7!ApK&F)%T$DEaVZ)@C|uJE~=J`$&ML5yotqcI$tDCcjCn(N;(t zHmhco4!#X44Pw#ljgNC&YBQ2|e^=3QP)H+HH7c;>>0Y?P@3eZ-U7EOr|8lG$+S|IHf+C%d>~1oLnP7c4SJ1x1pv+cvj-v&qaEppj0RqM1t||;Lr%1DyQFB%*V7KZO4@;Ck&U+b!P++SXtmiSLFWqw z&D;9R&FgPDN-mU%x8W!I0 z*FlBtJD44#?-9+U6{`1i#7j944Q6lvheFkAfsislQ#sm9OuG5nyHZI8pRV?wIavyX zI23%*P;kmx5cl~O1S~u|<{KFE4S1$FBOuRWML{!5E$AdC`W&&WyXUwoQZCz2bk~*E zfEtuWEC!HY_%9?_{GM?qaY+zVqd)>u#eDfujC&vuG;X)^i%}mLqT+wBA?%BV1bH58 z2;>^Kb49_lRi@rih3^Wip@qs7m52u86u1h4t%1}PWIxaHp7PmzpAOy;Z4EwR-)92K zcMwi=h%taxSh#H^n%h=CTdT9=t2Pu0HHXIF1_~o7L`>XzmYGmn$9z4SlXBr*^W(mzjxBxN^bxe7U#)uN7Hd+?Y)o}I3nb-4f}3u9!gV~xCv?;0Q#A!G!s);* z*=nnH2c?zhgezWBtBgL6@5;D?bo11`Yvm49@1oDBfB~b@QoX#U)cLl`R#aJanNk{i z8IsiEC5{=TUZwq-4!##}4Su!e=)%WJzakDi*N1>55H`3Mhb6vxaX93cAe2xM zW%)Sb&^!|yLD0E$DxRGQ2w}K)bVPE}%{S~aWruap+%+d|+{N6xVb>f=L*Dvg4m%_A zpT!3xt~>*v9r>XPCRo6H|r&JYw&YXn70k4Jf;cKCMywQhl6Sny{W;OM_>Uksz&q z33^Fm;q!z(eT&}$j+6Yh?0qLdiep0(`MZMnT&!nZ!*$nBc5THW@;ot(C9V~A4tXnA z2nys1d4E-0pCUKylHqqc_;Yamiw)sjxlS3z6=8+lLeCRD$m^6CtfHE8rz(s+gz6I@ z)uZ|(Up=b#drP~|*b9wf#zujPtX@x%B#As(w`2!+m8fH#V0)YBJO$VTb5jSwxhBJ; z@iwd~wpfIViw$j@l>|fT_nP%L z|7K)o{LX(?HAZ7UVc8s!kNO#6)Xi8U9M8h(Xu(}?t#Eiwg1YcpQ-LkgQ3AIZm9pf{ zQlqL|$a%!VBn?!`MW;q}l27#~yED2=Zlx)gGv!^&Y+OaT%r~pbMXJnF5m6=N%$T{! z6&(eIs;*p7Vi26r>>KA_Yq}B7c>ASyknv zSeo#eWi!AIY)Qh~p5xE*wNT}_zGT^Opskb(A4i&EfP&9CjH#XL_@z$rHR_+!yQ~~% z4Ik6JW5f5HU42~lj@!L=kCbmLvKKzb4b|H0B<~*KmogEGⅆ1HBOd|oyz3Dq{f!~ zb(Mtfze0_CWk8~jir22mZZg- zXLO%~7VQ3)FlGJF&o;I94ZoDP_^q{U@#3;xJV(l?nc4oSXP~Ta;Jof%;J(hGQVy8_ z(Ea=(Kg4fcD|c+`fa+wnua%7&jYp+mp)u*^s$hU#+}kqt;5>Ty@hg&7erGw#cV6TJ z7rg{WGGo%y7aZ>Fx0Ct8JJ}8Qg~^!*j!I>*LBk-xcZ_2>`81ruj_`m9p>=$IRFB!i zo#fBOILf^$`i8#?%R(v;Fi?{iJjx(ndoW#FW^hx80zUOODT6kAQ(`V1YVlFUDAoN! zOP=&sN1=0OT(xr#9s1U!)it*mYVscz@pA`H_yp_G7YTL!kLM`Qo7fot5- zk(L8%F500mfd{lc1W30HsEqyFXZgc^C+D}Y33vwwA4$MiH7dkci)ii6V@$E>?DosF z{kRFAy|%U~`@}bP1?6N-zAJB7osNKWI_vb~_X%(^=eoA5ckJem7SkOcI`vAO-2%ET z$ClXKMn0WKd60Ilh@9Hd0|jm;t)`tcZL_m~=l~H&Y>_%pts4_O5MD zRqku7;)paRxy@0DK>~+{Fi3~$9K=oHk-KC#dwmb45b6+rWSspeJ5@F% z{6Pg3wGj!=+>+H9H?h+&=}RTdwsCf|HXl>>eOhjNl?jWAjYvkV=?2tlx7d(W{$`>U z5Yc)+s6)sKq96l!<9|A1|3?}LswJQA6amlTFse)_D^;S6&T1&M zO>ro4#^775ckXoIjcjE(@%ypu!2~FwOR(XiP`W&i;Q%(ZS^51@nqnlA8cEIVQT=8- zSR2_L=!;muBT@d~t5tJC>g*09oN`>66j;5Ti?`4E({3ST)-2V^XyBTVyL9k%cY@f8 zueU0%8>|TR8T=@a>0t5*6q*SV=>jcFM$Hx7%HnCw<+a!|N4hKQ}mXKhK z(U>U_b|5r{vlGk-ESONp_2m1B3ju5=Evzx>{1RKt+MZq@%*W+!h5|bv!Uvrei+GNJiuGwJ zf;@(e$dWOypgsAekbzh>#pA_6cVs&uJ%pNAQ4HM{^D&xv{x^<#g@Q!j@R=aU)49BU zzdJu0D;bWAJMT}6BeNGq35VjEg|-mVh@5irqBrlmkjV~Nl;TRhK0*eA;i)Ph zb*S+oQ`nwd99!*4)EbW=eIv;!&51PE$mcPNn;HTzIO{>dkByn&cs(DH6TX?(?(Nx} z-mn}Z|In9sNJlwwOOhxVQmD$W==ex&hBj~pd%f5&7bVj`|H8aIk(L*981m${_-$f} z+nJ~uwyoM=b2`7>SNUbLWWV}^>CNZ_>J~Ku*>+zJvsNE9(pQxEQ9dXD@-RUOa;#0|7X?f~=Xv@boVRo$Us1Mdb2T|!oT`Cw!KjtHGD~=I{r6RD>Uv|3PC7@Fy z7wN!~I6(yYT9j-AP;eUBBn-J`f|HSO`;g(5RjdxLwx-9JD~Lfz^}Kuf z_s2wig7&+eOwNmEeI)pFKHiPQ~ovcV@mpAQfpb$;QX@4r5?xI6|fB3~} zTCy4{fUu#8ocx6r49$pP1wxHbegy4smC_+Ie8kMpS_56AS0cNXKd5N=NI z21N$6JYo%1sZ4M%g09OIOV&mi$Xv#}6JK4z1`cF$x6;n6L!cxNszIL;}MWG^HY3oNnH*D!(JC6vz?iyC1foUTB;(MFHBSWNTBxZK0Mr zoP`JFfEDTpazghWv9K2$uw_K1drWU1kEv$OX+rRSp&z;WnDL<_)d|6{5MskXSX{fX-_`=LDVnqJptitUhS9;5ccM zR`RQB#71=DZ{+7b$!^EjjNieulXMPa?~MWcxIuftFi=2C+2RU{9JCh&E#+XX?+Y@m zP!o1$tqBN5UTKDzd~l((W3hZqUtfaau6UOVD{kZ@>N$-I;~`_&cyc{3fiMn5Zwj*M zXraL|s0}+D9pz13i$*r#)Gej4{4<9etbDo0Iq7Qw6I~LfYx_zT=M^Y&N>+~@Ye%F$tiZ|QfwYWZ( znE{=4mYU`NPN`Zsoy}?AF)rf+>fefXfCUtqH1B9ShUbP(C1R3qow`*{rwUtRLhZN- zwOtX1eH@Dh|BQ)$+&p@c1J5*8+t*<@`rbiHci6&K)u~dA9CAj&A?;C9%oF(@sN`gN z?gUFA1zap@NwnCV?Z^!TOvl|)1;_YvdM!W!7Y$E#kf4w1b;E+gRpC}lnCKw)W_7mpR@a*%q7v+g?AnUT8WCBp*Fs?=RZ@n##^DqC} zo+QUAN&bwrx_LqiM%*Vxga!Ev%gFNsgpZ3+OgZmY;tAC_D7y!?rSInXvkLKNZ&Q2$g0` zc)GK?+Zycw3Tfxlf(9CTs0>06EpT}VffWdao0Am1(&uTNTVF5`tb}CgD3BorlGnz` zH3dh-K(S+>xX(bbj3dR31Jgt)HPoh&)M#mu&(p@RW^d(IUzrP~9W@gyw{3dirL&0dd3w{L@q=4KTp|)iuxx8_KR0c9kPv%g$kJ0gA~+{`3GGmrFP<366ECga2tj31k?Edh4qi zX+yztwjT5b)H2dKs0)HlAUm&XmE&1wJdCqDo!qkr78FMKb68 zgUNly#HeBnils!kqL+u{s(u1qkz>>8x0DVy34<)yeHi@19cQZDTwO*pRH(2?YjM(J zEG7#l1QSbXv8A+oOWE%2w04J@yhVTTRZ@4ZlkAjMc!f>p=7ZHrtX?sV; z>3#QEX>VJp-_dKc=w(H_OXi6Tc{CQX+C+FEqVy_rln>t0s)07e_6Rf8z1DJ*atX*D zjG&3I=n+-+N{?^iRKfQ@DRZCuzIiQd@-TU34IttLwD4%TE_qy)2kvF-HE>{+%?zvSdCM@+u=?)8?WisEnJ40^whbNTt2K=f6|Ov#b=Bwg!EE59r?|% zaQdCM?oXq8O~yGUCLg)KhXI3Yz`ja|Y&x_ncb2c!2zHd*`v!Uu1JbirgTIk??q{GF z#dZI!C5xK{==MN?xkT9n`E}^#+{t@%Kh91S{@R6CyOCce5%e;YxK~NYbzZcg-Nw*v z)6gz97#Y_n1bBa@!}^qYibGehF$Fj;u02;LfR+1MpcK`Y(;3H%u?6)W7oRv$ID1AYK#P$H@)J@q%?4$~6_6_gSGdF1GGe-?L&t>Ps%_8S{pYkOu4npQ72|gd1 zq_K>uizQ+jC%6jR=;}fCu_bt^30_6o$ej?LCDM~JrZ@4O57>&A*;_%Fl2714uv&9` zpI1Pu2wZ57n|TgSX(h)Ly6SFd2d`D0EfoNP^Q>^R)H`gEYttqg!Fx2(KDPOVMBbiR z)|dCb02*&gGw=p`YgSOi93VoZqwW&9oqdbb-tf8vo{=pzp;@ryg`*)mOFy3&$A)5* zQLng5NzOC(ZwY_8dfTW$bSgdkF+EI2Z_0lnIco;Nn}?qx zwG@B~f)2q#K&!u1OMcE)eD_r+5P@Mn7fxlhe%?~{^S~9)$sf$|y!PDNm^vIhdn&2E z&I_cavj#@l&d+1tdUs?1x0Ex=Ks;)yW^2;u*;wNl>XM@e=3(+qnF%DUWaIDi!?!)q|C4jHIl)Ny zZhkh_(&uWKc5(^c4@pU=F_LjQS^FM#elhdb6+3>cRnLMOwC_(7XGSGshA5f&m9A5s zvMAi={?Rk&22s;wA!GNNjc) zaPyQ$`l9w^OQn*aez&U9m% zx9t!wx;TubG;fF`)x7CFl3mL&DjJXJ;c#IXZAv31q0+TY`=hVhVt2=4j(0l~ou`1? zphk%lrm?trDn&oCHRAcLUxz?wPhp=Effy}&USUAC_*;Ltmnvc#od;pRw+1h6#&4S( z!{-m`^L+acWbJD`_b7}8P)WX7*new)r~6>i-~XuSCtX5+1nG+Un?(4iC>_7|srS7j z0w*~X`~fSExGB`+MH-4AtQ01SopzRReZ>Gisp0J`0^Y!A~ZyAkyNPJpR}d)MA7Xyo^Bz1WZjfM zn%fn8;uZML?p({#!Nfk`>>|9~jx6AZft=l`6KVSCpke4%8D8jQYQSNFIvw=2(UcQi z%xlmWBK;(RoAb0tzM6;Vkm}BAGa(xmElJX5ejFO{*d$JG1BhO0lF@O7QQCRbzNk&o#wFZLhWkR4V3(~3TqEE^uFG;X*o&-46Pz(A-egC>+JySw|AJj~ z?)Q_%L4sV{P3oa0CWcRP?pL}=6Y_>Q2X87YIga98vVz{UL2snwau|wnD?JFaP9gxD zNp#bM3bkOyIhK%YwX@7&RhyAia3eWKjTd4XpbuYh{oku#sesU z+N-~@nXK>&_^&4O=e)q-zybO~7I=$ltdgxrX{C1|{$HTvdt9bwm@ zq9qgy4>YP7wh{*+0f)IF#DhNZEcB}(oHjP|WO6X~o3yzB=|akGV+#Vrz}}*y*ew>u z$^*>A>EH+jZIg7ornrLw*XqKHqA@z>xTkUGA zCZqh_B)dI5WKUX6WeI@|>znuyng5iEf{l}EoP=c72|8n4!*ikiWduk(*{cCehFF=o zPJ{%~RFh%qhNH>DSEP&LF|}AuWknd$9yzPc{iNL$Z90?`D@l{%SOyS~fueokkx;RQ zv3nTiJ1H%)^?R=$7C?=RwaCX9!fWKelN_>%VtKD$IrI7{Baqw6TGpz{tb)B-Zy2cV zwA$!hHQub0GFndsAq2Ux-X>kN^x8;-UOPT1NyUpORkJifGNpKP^zJT=|Bn1>0IpVL zI+gYZD`w2MgL+r1-18f>-A)6%PE{H0a)CVb13+ln@Q_xMd0P)u5KfEZfG88Dx8H?( zVVTbgig&BVs_g})x2T;8GNCjKJlk7j)l#t-xNTl{!w?zTNW>dSOf3HfUk+2o;MkG2 z!|^fFPi;Piq>gHOwR98p(4_|AfqL$>X z=t-wS1S`CW4%VpBk~@%iNjQ23A1S$q;>2}#2X|N;M5V{X5zF8yxgzc$^fq%~X3dar zL3r?BayJ){^)`A?-RnD|F1gMr znqFNZ;`W%G!qtaR#!zlULJB>XB7R-WN^A?Z6E?wbVF*+TYg2+LaX9mLmzwTM1Y9Xi zxa(ve5bKeErYPv5i-awA{A%XsaMLWgU8ZE0-by@=m9h}^bEtCbxhUOc#&#S{X$Le_ zK<=u7+TmQt-+en-i|WZw-bU7NVa;3q$5FLkp&OK zjPiEyRsqHkvW3_(E_7%WnxGKr4fz{td<+F^Kv5(MPFb;&6f-5f}bzvS)~XlWtBLU zE<&jSxVe1Ti3&ySkYU(D&Xw&Diy2V1#D{ukPxqw>!#?+Jh$&9qqe~_ET`tiNws$;T zOr1ufnbSyUh(%cV0VZNZ1Ap0s;4NiaOyxJmBN5hNZDtNOi-aXXQ;sy}9PY!OZHuZI z#u*e5C$`AJd!p0QqtIH9*a8G_&>*aR{6(1)IkrHA+hp*-g;Zr{HH#im$}a6`Y+k*J zCn{xD^QL0xxEQh54^{W>yaolnna9Q#q+aU>z1Y_<+n9F`*urZ{W)a5Vyh@?gC#iZe zihzq`4W%-$`T;J)2@T0&!Xuj$Bn7>M3k&Ltxo8&62IL1Qua4BQ)DDDa$D5#8^tHqT zKrQ;rNrnYX$ziSeY7+qm4ES1+Jd$SB7XWMUFb++3A^Bp}h`Tvx)&$M=JRm7-r-*a7 zt1r0~d4$!iCfcZp?X^2V)@}{lB1gZ>%nTrF9+1ojxQ-|ly=DVH*P7r2PN<-0+C->u z+1}_<6eNK0QRfcUebfYCYw{G-#LCM88!g_}%;;iPNCxE7=b1%^`MiS<$bgBbPEc>x zru&+_agxNa14@8atUCRoSNj^><|t@us;49cq2_W-;Dwxq9RjcPe85}w;3YO*a6<_Z zl-a6iP`1aHGHpbcyo3v4(TlkNA$s*7#RS`$$=NmKh!JcV(WPO_$UJOo9=1N!-;YGH zjpWOYazF1W_r-4EQpAU?T*+qbBZ9T~mCLgvNFe1qQhj7{)YO!tKMn*)7uvuN^bl-% zcNBK|t%9s#Qgd;%q}bGnREM6SWY5xe`3d;pwzO`p)11t9W01|WtKsc zbW>=!PtqYF;EFpWB+nyzbx&bJHhEdb&{ORDvKNO5%5F7=QTUXLT&6Ed^l)0r){K3G zAkLNKK+>#`ac6A71Z~wHsO}Q(3ksk&@TCx_9DTumO-c*5+_s}N{b^vj?m25m>RG!j z-f~GfHSPSuQQ6wg46;*%pPq`nL7?3X0vnw{;39oTjPI0jU{2#0Fw=rml(}MR5*W;- z`S3OY7|ak$qTmFz(ZoP0wF;c#V+p+M5tmg!d&oiNDfh%)aoGq?tAQ!*HSi%6{?6z# zEoOn4N@L@t%mU;3SP_co75v@k%>pM0!FFbW6N>LPv%nb@g*e#F&M^17UeFus8)gCe z(S}*zGp2Y>R}lXN(~8UkiBc7q_23Thukm=e{qGbL5Vj+R7Q zi6wUN`A!84vsdJwwTVQ;>~ziXmUA0K273y}O8Su2YQDAORBe@y@@=}5lv3+zRokbr zM6^er1%uGdr4?%9HH^T@9}^ay;8tbyu&3~Gn$`7>O1~j~FoE?Fc zv}a1ac6s7uX-D;>{t%;((XV-4q5etRhQp{z$u$R(%CFjlXtTGmu9~D|x?$bagRKyh zl^yd%l)Zej#xtg@*~eR(ow8^#$vXfL`D*!)QDA;8cB{W{1|K+Ei=WY{5( zjDb8pq-<=D)j-8P!@0I$Qf?r`p_OL zbA34L4UUFa;+=wZ_JxniDI^>B2+T@2J;s$cxHN2Mr|!+)e3bVll8W<2k8J>7#0imd z@N?wOZ^;_&*9Wq=5mqR1O{*6zBcv(wA(Ddn*Qh-SmN;|Hf-_t&jSqp7d=3LXrph3^ zQS}b1i@9HXFMW^Ed+h8D>o29Pmm1;``NX-7$|r6T8|pfi7MXVxgw66HJ<}tg`8Un3 z!;-1>;eu^1)!e{Fea(NCp>_2vU|} z@lu`~-Nd{Y|AbFfypx9s9t{4s@SrF>i6Z=oO|<}PTh4i)-i)@vtA`+MsWs!KH2zV= zd0r2f^C2@RFVYWX2$bKPRag9k41wA&C9a8FqaUC2F{0aB1$PJy$NE;tyF#CwDzphV ztkdJQw(gYw4W9-qz_E=-(eTl=56N|IKTA76Z<0EP(xz|aD<<%t2%mL}J5UgnSvIgot zKr)a0rTNoX%Y=QmrRJuCd216O z%J>Eqyj&syooa>ak`aGez{BKCf>vv&k%q+T%Dgq4%>uuT+X(@-i^%0g;8R8Pb=;8^ zF&jFSAzpA4fh^4!c(ABq0{?y|bAbu`4y}DY{QWh7`*IG0h;t5jbEraonCYUT2-_un z$iD%aTD{ZOdO!En+!yXshz{ZkO>$T_`JiD~1G)b)NSM?CejB z`C_NQELI#n1rO8@JC)2|m?CSNiv|_f@-eXRiK4-wyqqp#1BKTMg7?U}lz|}UV#O@77zE~G z#W%8334p|kOeU{B2_Ie3#Y#|zT&y^AIF<;d55>f}SrI|xW-amT+^o8DZdT(h#ppeR z#t95C8#!8GH4IlGc;;v|Lr%`oY9=0Yw3^6|y5bzI<))(*vy~jJ=p0Dmk~S3G!9_YL z9Wbo7!%TK2Pk-5YKtJMRpC(yf0*tUQAzMq#*YnaOct!MkCH7RowQeI?+O`A-iUTpG zwl(^xU7v4!J2@k^%m%ODV{%P5vL6rnLZgAJ;E)A_(nm2n-2Sv^VE~*y5m-tDo%RTy z7N^t!F)lz<$JdH?2MOMOn4b+}Yx0$GcP5KH-kgk|bdS%>Axuw*ehbhquC<@-Axux; z*OuqP?2bSGhcFp5Zc4uXj6&nJ&l)sd`-=sQ54|dEJ5*9Yp_DK!bWj$8f;j~0T@qOq z{xf?WjGz;Yvz(xDMxY+sQ3%^+Tvkphnd4x%Lp!>Q)5Eid0l`GX++t#bEY2UWJ zqqD2K=enqO!_JMHHg93OBt0OGfWvfZiP@tbZWPFPDR$OB@kEroojCU3AgN{E@_USINr_H534?5~L4 zV&w$EeHm76=F6_sb0N-Ouz*I^d1n~q08eBd&Q0WRmux?B!IuacqU^5lf-JDMj0|YF z@?5CVE<6{y(h2Jl%CQWY$Wv<547av^OqFcuQG9~Qj(9HMCJh;@Tr?w%VbGPGk~INI z_&J@s6uC$hv!w>D#qQyZKa-I7qcn06R;(c~;eIK~Emd6<%eyEFh=E#@3cSagORsf@ zDlcmvcdX}(NO!U^@viem*~rfVT;eGNgITH7Don5ix8~#ko-RowHfp)69$Y8Pa6baH zuUPEWLcra$3cyiz4S@T~dI8sWMF8#wxmWRj3m`AITZ7F*%fbc}&e0PpM@CPw0%KNb zbVPrV5>$6eP&Zs2F5P*8y3xahy#o}lQ35tQ7SNK?c>s!by(tUb5_#p+zZ z&+-I?!2^Q2=@M{3_QK!>&S!@qE+i-dKD_LwEpV41C@E-^fS~BVCn!eb3Ce`LO1;pz zU9h3VE4e^Ak8)ubS=n!X?nmM>_w8DF9?CcLGqWW9ECZ(&#z)yT(9cW?r?oOZSk4u| z_;%#zXOf{nOGZEw%R}e#6od>71$}i91?`}X%PdJP6tr_q6r?uSMM3bCB_Z?dQ_wY@ zf=&rCkkFK_IU2&0vpyQq7O7hFwIyk28A!Fz5M|duLtjfFwN@J1c?n3drXzY>j@LEy z-1Q3&^&Ab&=4fbUdFWi8hGvY0zP^Zt>Z_ok#+qnIZLW)k;44=M4Q=%_bY75whR$(K z=J0&7%Ja8f$4YP@#T>aKd8GuPrR03}1N^WX(c+>5E9EW(JoaNyz{0=?lZyzpXL<>o z%VPltUS*tg5a$PSeOjK(tCY|3a&O^mjrGuQ^p{K0?)8_)kQUmd>}qM3Ry}?&FD9kP z;uVZF7BE)R1G77FV`0HOS38U*)0VB$;7Dfnz zf3;pHU4KPDDU~DHW9L>uT?_vj|7;huIHRX7#7gd`mxq^DQK_;{kB0xY$o<+h$l)oA zIPP|}cxgqtMQXWr1ss=NoZx)e)}vkk#t$Sl>i63xsc`pZ(9Kp(~BGuOLk=Cv^(7(m?2F#LIiYTl zKBbCLzjM(RS`E zhy`}12k|)esT~%~xQz%(TOMSWXLpAISuHxdB)eM%Y%T1LvTI;>XH&4PRn~&hEJ;+) zuB>&HK-MRa{WSxUkKR@&JqS*Hd&6lDvUqPevtE#~BM*>$dr6RK2k2!M^A?a%b`2o= z_Ig2f^%ViKE#iTQDzgf?QfPaR4yyS^k$Y~}VR?xDW=X(YwF zVq0IFW5y=c37UMUEimWkw0F^LQTH#F>c$9g)kx`51pXyW!_)X$1hv%Jf*))A&yGZ- z3$=@aw8+s}PX3L;`)s4?bm@77@;>#1xq+x#$+WwkhlokOnQe@gu%@3B9@d}MPejYq zmHUv`OB>=5$lI#G-B#O1QVrZl>$3YE!RN(E5ldnao9S0f+LT=_2BA~^?V4~X=kS84 zt^jVlQPADAc5Sd<3cTSf zrnQoM(sM3$Ql#GAQOteb+dCr5pE-WT<_Y7oE%s!SGUqpOxoqNMJi5~E;PMvU%S)Dn zW`94wBs_?ZTQRjzc3F7P6I0h17x0kt{(K4#hO;Is9$>fvc$p~!IPS?IY0x>QE2A9K zl~Iqc7fNijVa560l2Bp_R}3Y}t^rElTQ8JA`zruSJ=z3M{&QqVkp+>>xBlZ=M}KBbhxl_(k7YUB%OqmwWc|eA9guS{_^B9 zVF3F3MLh6l4;;0*E?#)000&3_5i;`{L?+h@kx7HdKP-Ys=PJmBvTGn0wYe^cAOc+h z#Zi((34X9a`G)UW@*5Nro)1=SuZl%B}&!{&Bq+M#?~DVzHk4>?&0?Q>r-8 z)I-NIh{6LV^(=nN)l8n5?%NQkCvn4EWCpw&1|5I(ZkU`0AjR`j`7JL~&-RuV96MJ4 zL@FI@S7m3!p29atv_P64In4GHzGXf}pNLQXuBBDsyj93-GG-Hse$*QxmF8_hFEKHd zot@af*-U%ctxy=7*>Wo@v(RN(VzKR6-Y{g%`;M?pi;j54oCbBVsl&`en5!+_yLLw{ zGmO#^_w}CF%LCaqBj(^?2Zz%Q&Mp2#lI}Vl8?h2 zii745AqW?eU6&)V-&|Gy1dQ^i^1pY1jw@!TlwB4>n8O8zceS~$1-o(yym)rmDM=Ev z#Ssjyn72i_ZkW2eg0~1nY=TtMzhyuK`sFOUpkNk{DSs8I0`XV`FFGzOYL5Qd@t#6&o(zY@=XcnXK3BMJo(DHt3B~cG7&!IM7zr|k!ITUWtd3Q z#>{FehStJFtdGkff|g%_bcqeF(flGBWk4$iK4q5${sJ1c7S~0gaH}OJ{n@2ZPQ23c zN)lU9smt(qA32wdO5djGCcVz^@>F^m-~g~;W2FDQB$ZlQD>U6ucC~=QSn$s&m9Ev` z4;%ZJ2bBQ|QkAPkQmSQLipw3(Oa_U;XDw&J;4fCr0?Q!LNM_&f@_BqAbmupw8J8SW zQ-&twEte&Zb0KQYOhP&ZJ=}*lN-ielyyD~ye4g6P-%0)?(zTVc11eowCF7lz%Ty-{ zC;=iws+3Wx#ntlwCp=__ds(`Uz{*-07iG~5lI@DlTCzg2XeB+CrFPNJe7WcyDUFnL zL}p%Eit2XA^rBK)VU$k0A!_GD@Q}?>rRanr)i5HvR8kCC3j@-Dgj%0E%BFXF@@YT4 z_V}-pFMsbbI~C*WA^EM8rlC(pY2;Z_StUV_a`A=9&G(ZjvyhNwYfqt&-1~5H4KI>f zJ+x^G7-UjC&|N02Oi*AH)G`nx|HcAD4)R_23Q9a;#%`AxmyRAxE}nWvq%#F|%yrlU zF(z?bU_|q^q)M7tofJ`g1!qKkwl#mYY@3K`jXd@0@w5{V&hsm4*Lh3M^GjiS3(i6a`hb@xko)@AjPlDxl zi{1zDl*Nl=p){49ENSKqeEFLwM_hJMCYCVz0qczLN9TqJ=yq_(?=WJQzJDKyEXV?q zCJaM#=#)UI$Y4=MT;|Z+aG!E>krqw}t>#2gd_w3zq#Q#&b(}Wi+3B6H;~1b@Nu0Ex zJldA>qIxJPmt1NjsC2oQ|BSoc_(jZMknqN%wkvBT?G&A{{FQYxFugzmw8C$qX5hB%QRfE~U{<8kFFE z=>BaSMqhbFGXK+`UjMLi8(GQ$BDJXjIWC!h=RlNHqmQ$^>ilBMIZkq7mrynNy&v%F z_Nb3Jq)mllDb`zzCGa3*!Rk>ZFGIN3d#KCi!Rka1 z$|fUdodSi{>U1kN2ZFDk0Fr8khKhEwhv}q@dPsGX#Z$PvaNGm>jfA_5T%#Ti^m&?F zj;`4W2r%$u^c($Aa-F6!dG8s{Ke)wiU#GWQ7Ti)JL2IO+ia<=7?C(s-U>yB$dq>cIBeulDw$zSp@`9g?&%0m;P^04$sqStt~J?8hb-%zG9i=KvnS2q zp|ecNQ$%H)8%acso$*H^+w-A6loDA2yAO_9!mv3w zA#_=B?BQ%Exnae2AHu#bm&AjPg9hT$p1q;6F(|!*18cSJ#89~oI7Dw)5&-S1003oI z3jkVO|MX+6z5=GdZ4q6NVal6R)pzLbF0$sfE82}6zc?AH5 z#X9AGFNC&FA^3_lkQ|I*l1?C%2ujhLI_nh~ans@tQjBwytvK>B#ZN9Oj$vNrC+P_s zK*8=CA~6`X_>{6I`%19@rj?Rt5kiNb*pY3u=nvW@Z208!8>9)e)c5?Xx|8%ESDp4` zDX;tX{n5MdSxZ#dQ@EX6>@cFfI<&Z_@LX+S;XHVUh8i?%WC`|6w{z^-@kYmyr4{&5}L@TCgm5`bj+RSGst;r|=N4a)&E}P|>?}28KcP6luV1GKK=u z|8lq&^TM81YL{@>=tSURD-#Ef5ffC5JML5A<}u-+TfayS$CSvsc{+%@G69WTZ=4oI zyE>~uSfFLOL+vJiOrQ|?H-PqY!!!0Rb9HY>qLi}b+<-VtK6?+y#+u04${6Dr#`jHT zvM#EcrE!kv)*Q}Z#mz*5?&!^na8CWJ7|wBy?=mR06KYwQ+_*P15AsF0L*X`;lPdUCSLnIUe>u89VN$jz!;DsPy$!I zAZdQ!CHhK+Ytop~RIU03bYP!}Uzw!8X0+>y z-m(bo)bvWxZY8WPMLUi|F|<>Q>q5I6pIaX7rotTBaa3)Fb~DBzP$bubb|m8fG=$Z( zpdHrH#b~G1N~$?ZkkXy6eR*n&-nyiCWT>;kC5p1kvWs+y0)Dncy)}iKab?)W8kQ&) zhm>Rd%Pmn_pkN~VfJo8NvQp2Jr;p=}o!Qao>5`gVo{2YEcr?5rM2yLc4RYzPRAeQz z041~NaAbMln2PE7xvgHlBdD5m#1oh?r?qK!AZiLI)X64me^#|SIUFQJCPWs=2)Vn1Udti z{kQM&4rC0Peu^^1$1z969RqRa0PCFWRK-MxGDg1zrtT?J1w?wL5ypDK9#cvngQauX z>5V!{f3M}q*~Z-o7wYhx1*ItjhcKI~e>!FzB~0Dc`Akv`=`i^)tZ~CO*=KZqnJPp2 z#{~F^5HCqGd6a$qW&_Ss!qWEu6D-68dfkEU9kt%xVsA%pcW&!vS-8I|O1@xCclLH9pK^CmM{?5L;X?Tn zch}vKOu4(Rj^wlMuCpUKNLxq|?kV)>cZt3ZD|pil5sCLXte`%e8O&ZNNK++V z3@VH(cNTVXVd35`RbQ@+&}g2^R>Or*$^WZ%GGS)uu0AQwTBJG}P1>R-?6e#(&0Hn! zAZ5#NoncJ~(*S86<-7|!LfJ8NMrXtEoT>pbbKn7->S}q}iVP-w5xN&)_M%fSL^P@rcr|Bhu==ijuJvE@Qp&~fu%V-AgT zBwuxH3l(?TNt-OM$j_`N4r#0zI}OhSX{{>0{#%+pvyuz>14*&`=v%+88=hGo;Xf<@ z3MI+vPs_len&Z$K=Vn)yGVyR={Xkqk5M5*nZVg_%U{Il$ngy!-^rzK_IRdkAe{%i7 zM~p_2|AzVC*P#x2^}ut(mx^kXkv&XLK@-n+ydL^_fNlWvhr~RYQRmvlKQc&!a@ko2hmoAVZyh`uyM+7nPrggPmYH}^ zOuklNlONpwf~CwT0uwF!#pK&uAr^HdX7E9>&2wLyV&r#@{FR^z4~hi!P+q2h>ikO` zm>6P(%9V@l6;5gsouG!kJT-__agvuI{4M${MfltD(`O9r_WaLxEczLja*%BHXB1+9iUIfA*|6+VA&5p<-A!vAtt;s4Uzp2C9- zHiy8wj8z$m($3S?&dc*;rFIqu3r|8t z91HB9CYb;;N!xEHAW<_^!yl|y>13V_@&{4VZ3Ov;!LVu){yRfPvspUUM1AK~J2Qs) zn&aILC2v0zmzb^#dc3HLxngY1rt4yJx(2Xd!N_dL4&*~&9`iJ1!kYoCX~WzZV7Ml4 zJCyvEYldtYK!tkL%wbYUn{EIv*)-B7if6$!RN*BF9Y2R9FF- zWK-n8FP@Wn?B}2IbC`VBB+v{w{x-a;@Xwk%kOQYV#5QkFeIY&?z(;~r{;~gv<4d0# zUhq$tdWl@d-2|m+XIGQbij6kIX*=4KW^K7z z4kd4L&79C98L){M>EsYT74F6KFk3JDQU@yU$poAve0P@h{-&o25vN$PB7 zAwkUq+~vIQ;yKkZd?*57HUi4Lx6EB;Ii|V3KgB=;D2yePEta>|2#g?mZ-!u2m z);OoO#SJ`;go2r-gNXa(!h!A^y1V$VvukTtN2yk>+I?4tdsop@z@IiL#QFzXaClVD zhEi|PQ||HqU2W!xtMAjM%H&V{lKFWwGT{YzC!3dF6N20* zpfIVJU~->5#%G)OOyVCG&Nxgtf@1ADZ}1<*EgwD3N4HQ~W+N_f^P}7J(N6ynhf(EP z8sH=3GLzKw)g8^RUZ}5j_^&u}C->EB`3lME(y~;;DQaFzPR6mTIH`eIg@%t;pfIQ5 zpkk+%vWjGzAnA_!t6nAPirwr?OPqcUjh@lXId|S}GF`FG!Y9v_8*Q11r;dls>^?=Z z_T%A1n6l=3y?k<_(seB_C-u@Z)RUD)vFmya&P+jHI9X{bMZA^Rgbj;J+$g4sF(&~# zGoN^eq%8Hlmh_tG&)#X#iaW`%9wd93BZAv@bY91#y2U^GA;D0R2>NY7iS@%p4KMjd zDo)Lp;Ytb4+D_7j(z}agWAy!gO1t!Q7~Z{7YnmiYX+A{XDpDhSP73*f&m% z7)f7_+DoSz9L*{kl@TmV#;hV!Mg`7eoMRdB+Q95}g5zmLHu+u&J>g$!91|?JCjqB; z?4-atrAoO17|)JQsrc2rn&Brtnc@>>EBSk*F=2w1($l&^K!zeK#|+J-6R(m&^o~vk zgh2xW&~dnK5R@S=1GYKCf^l1~Nk9(H*$=<%f0(cz$bZf7 zTE3XHA4t8dP(%`uRdQ{G_jO^-O*Y?8i|SEmCYX#|N>rwhksx|IwkeKYv(?z_7}y!~ z1)6Io>4EcyC|BO{g`YI0+ z(kta`L`Xj5g2lnYcV)3JpCPeQ+!wa|Ygr+y>F8l+w;$*(l}n|7^{tj{InYf^J8|h$ zCs%-28G@{PaGZ@`E%7^<=z`={aX8nclQ(5WptzwRqt+yBiT<%Y_m+{s$~91aaq8sx zkbsa>QaWNteSvYYR$ygp{Z!)wyXMT7k~U1&TMLqx$1x8t$1UU+7d#_b#gam}R~|XW zjclA)e!QHQX32v=2gl21XPHp3Oe{Z9&U+42FqxtN#V1@xlh=U}rQrZ&rkXdPz8SCN*e{1re-KuuMapB;eOMFBt{yP>`t;0?MIfC%B?c zvA4syGcPifrqDYlZu>@{GOwF{4~4l>6AJwv3jECOE?NzlU_in8I1IdHM&}hB$*`5? z&De%0iTcMkVwc1q#}zwzRICy{>Y;x`R|*WOK#>9?LWeQmks~>D7_*La`q?&6PD?D$ zKzX`|eFwVGOD_PO$qW9%11%kvz|F{Prr2aDi>NX3EnSwgl)|tePLXkw(7y7lrVy60 zA02lA8;!dN`mNk$5s^bbIXI@BXHo@DsEey_( zu)0yJj%aku3s#D-6mPjdo}S~H=esH?4e}V@olxWOQ<4^=^cRvZwG8B>-cI?J3H@By z@|3l#Kex5HB>U>H87v&?s8z>m$s}8;+$1kWbt-9-%y&}zZIac-nL_gQmVuDp zgh}=-Pb_YE%33DQyme8m<@m7BKHL{e_$0qkQ@g1k=pq7yWr+ioWV531Dwx-08LpV{ ztJU2^G)Tat!BhsGHQ@^E*J{F!jZBX7f)=WsglNT8NQkyYt)+r#t3dRdHZWtqr?Rv~ zL%KZ_z5`h1;1WS5$DgwrEX?1RkhL#}9;uO%y8xZ0QdLaz^pI~aF zJ!nE0Q_94*bZW|~L^u#DO2xe@T_~W8DqN-K@wnKGI%ppl5G6aVlrbpQhH)~q*%p79pW)l zBgyLzCBJzHHtr%_U|MTy$;_fz^+sRMWc6a+UaDgrJyV-Q)RDDQve8sW zOUYFT?WJO%EwHR;B%%tyn0%hq3bzt`f?QP+oX~N zpGAZ^S(HwY-0nZyZlA^a3@M)0a+^y_3e)DZm|YS+L~E-=TS~RK(?z%1&KM*`#Y`=9 z#oN#O*zXy$AFyDdRhm73c{g+a1Z1soZoA*i@TUB>#5aD z6qdmMAU}MT_Fl@>xoB;#c!phO<)jWjLE%ypWIwNXf|@fRckjX}u(>KGpQB3$-4wjc zb@U-R=zPB*V`_$F63q7t-zz$cTs8>)+pQ?GhE4Ne zj>3)@g-L(L*pw;ChSR11m}c#0+glwR>7G;zknRcJ(u5kYb%*Ojt^v3D!1DkS?Kg>& zEgX5`Y=>L>I!~)ETe-PT=jw9ncatbhFsH_t-fvOw-A;4seIPxG0*-!d4nwiDU&JU0 zr{n)JN z_kuIZeM|foYLDW-uzt9e$XRCd@n6s*#PopeI1}5IJza~t`Dx{VFjd<{TXmI+`TuKkiBD}A&ADbU zHrEmjoh>7s>&#A-_L7y5l^;IaVk)Hqtq!1yx+U`z*VLAQAhgJqq2IkFBUO@Fk5ZwY zc^wFlf>|p2xh8+X+Qnjq-OAQ3b4@MOVh`IAMn}+fwF;U(AW@pi%)C!!X5ODJB!4F9 zBvTLM2v}(J&#T+c_AhlHvA~iTG1SZJM0YGU{#waN-&4NuzM_+tEX(315}WiPS6?aM z3eI_$4r4DNu)s@wD;Z!*N(*a>cFlj;qaTk;`7j4T7-r`#BQ^N(|k?)?u z6cP2*88VP8Fg4vFu^GG~fI~Eqo4W^=m^v$n3)(8<13z^yzEn|*i0TU;6di`iUlo$S zZ0XsryLnTL9@C~6J>)8@TQPbIdTNoH5*6~yMIA>5L1K88Vr|`$)e=Z4KpG=P)LM6} zt*gI9Ie=#1BdYtCv9?0umB-rN%gzU)8eJ?oNamF`aB&aWwq8YD3P#7ME^h;&+hh^x zfF5}7V$nB-n_B`+Vm1zvn4>T3WP#>V(}{;z9_m-|NxYvu$_~*9T@2Yz>mHPez04{^ z(h;|PUG>X{t3(A!c-)jy-oBfn5_2K30h4lg1ml9DiBZ)LBxSNy!PevQwg@-* ztpVXy=BDWjPlpX{r*YE7G&^fqI9VrLNO#Bn4UUu)ux{84rVpZRpWY<)?+7&Y~Li(;3~L;oilBDME~*7}M3y z`EY*+7RQ>lVi}jY<)5*9$XA( zq06no01~SO?#aBL^(YzRDeJQ4rN7j`C}1uOp5}`Uwi3^(O^2i$-wE#V5unFa!3a8| zcVwGmh{Uvjc2zW9QfS=}<@GdCQfdTp_0}*;3pjOE#Q5H<0;T9%n+*hHhy|DLw@qrU zlH^qnC)e?U(l{iNiDU1GvKb!dL59HMU@5t}j_E_kld?eI<61y$ieeBftaK9+)51qQ zq#6ez2Hj#fui4Tx$S_Kgu}sE*rZ=< z6z_*1Wd*{_0>NPeI>7C`QO%yhVQdl)*L$Id>VK+l;MW>Ajp~6$S5iJ$-zbLl=2D%A zu01%9R5TJ%8%frFRpP6}s5$kXDV+D$i(J(t{Vg7dd$%zR8khKfUcb;*^x7?r9>}H~ z?gJ}0<@5e{(<4+5V)!8$iXh&7M|CP>2(vccm_ddaM|T#i39du3@lw;E*?JKVxq>?` z*yp@FlgU6u;}+|QHy(fljmXtqs-|2A9dMhc;=rJCm?{zij*|Irlk6&F_oy?AA?)*m zz|h9N;cKOrgfVFqM$|NoUSNDj{r!3FN9}$OU1eZ*7RD+XT13(&G4uLnP3d>ab;gq% zv*C@<g-@w|QnXF#y7{@`ab0P5!rCRS7Cb+p`<|pq z`$R0sDsLS5xB`}<-)CoscP%>>nhi~A0o?${Fx`r_Pjg4kybRmsI~kKHneG*2p@JnQ zx*WZ3i+)si1yw+yd3-lNyJ#Y%9h-D5C)1)oN82+ZNmfW?N+a#)Hx0R6`buWHvo6Yk zHw_4#;BtdPk{|x`i6}WX{#rIG>O@~iUT>$3MGW9&e*G3Bbpen%W*L^~#M6^;H1&W9 z?ALdPPq20P;>bW&+eLNi?+eM_W+;jW!F=j-)&ySnXy{T+*o3NE!l4w;tQly2PxQvr z^xDL+)-Ia-E0?*k6C?o7+U0xCPjAW`RW+V<$4(SVx+FVUq7BI67$0kuAHxYZu8Md! zq3a1FC&i60J>mYOJ$eIhxivUh&f)0dI{k2`RD}-~Q5cD)f3n;tN4KV6VOf?2^&HU1 zc55?N2BaLQ45t)Rmq#ImQ!X_NQ?#MjJU0uS(Hk!T9pS04q9bmPECSBF8*!;)GLysR zT)^)y<*;AMVMI3BGBGB;_)9tL=RppeOBrWVK{&TGlQRhy9ag2C0a@c{$9QKn^n@2s!KsO=}Gu=1NSMlf&k1$r!$r97fek z$zh`!1(MK8<*;LFnnnkZdrTJMex9z3+xtDthRI=wM~mdJe{yoz2^${LSSp8&XL8u& zrRA_Ms($1!3pYRxOGCr8{HQ-C=+7zLJ2?#3eI|#Uq`@FM`b#qm2_$b4i7Ycbd3RpaRk#KP3jsuT z6b=t2r2|OMrcjoPQl-(+8LBih1wNrC@u~f+EJY~VKu>RCOlMd->~kI))T#?XGaNCe zc4ODtt7wn=f48lpPCOA6_&@l+9h?Y&WT&867i+ zI*4XU#NO|0Xt+d323SkF%>+}TGnpd^i%LZ!mo9UmTZa<3Nw>%e`Xk3(pbHi7WphnG`a z4jS~Rz-2So-&gQjs*sIYm%BnPxIp1H4IFZ)^J)hYj~;$EHfkJY_| zknzUtFzB|+sIYm#6bfxT2=;aSXyvH+5fy6thsP}y#$RU2UB?=*iq7?~lnW{xUE0Ua-RB>EJn&X1bN|7d^)l|44e z01xW=C!zVp4E31WUE!R<6l~_&vzg=h&cZ|DGsL|dV8IUJ&iyQ4ND8FV7J(2+1EtU# z>{A>K!M_T#(&m_!I`&Z9#)zqfpV}lVlzG7sk_A{mp<}v8WzbvT=ng9_>R9kVsE4KrF8(TjOh%E7dmgZ2A z6>tT!*bik0b`*mzi#FnRm}nR3ZV3MjRHlPr%>E?-uhM|I<;&XsraSXQ@_!K|Yys$^hztZ@icl6iD3 zPXCcx3{Q?6K86t`=g(Gv9cGBTf=v{$Bx1oqA;gMz9DA4&Rsna#ZCt6sft(|cdd7QXFIKfY?)BXHUUTPVc8%Br+8hxECq z?c_-kxE7;Nh~dInEccz*4f4zbs3{@U^o5(;VLBpGo{+7DF-4`0;?czP=uf*PZ!;Jn z3)&F(@(D*LP%P*}$NM-2E6Yb%W`fEl;yZFG&H4;MZBqYw-)tEffiEt)aSU+aii)jI@lYliT-c{ zoiZJj^6)>E?|A%Qg&Q3(=Gs#B7VIj{b~mn;$Y6mX^`11E!(Ss!1<2g(P>e>pJy(n3 zZm}~u((MtQ-xON|N3AH}KIs?t;Qj)OJd|B9K3CBuS*;%WWwd#VnI?S;NM6n?(5r`3 z#^P&mRaW#nu1dQe@z*ghDj8*d@wIEjTX;Oq<7TddT*W9V3Qse?WI4I+VA9Wjr31-z z{|~u_>sJ-`h1owX@~Cpn7w#_BGSVg>1DA$vooCxT`w%Nd@H-U#)RG?;h{|~p+hl5F zK{(J=mXtxt@%=;v^u&>|b$!koGU7rb7AHxTnXsrdz7>fopD3qaw3KIQB8Jt~)yWRD zig^+P1he0@L(%XbR)I?;8N@nK(H7-K5Iw2wdblv*26ijwr``IbcGOrgccPVQ=zunT zXyH*ONSSXn&*W>asiIC?_gQ^*o2yS|E!Z{w($n(U3(Kntgd!Hi)-5^@qE;I+~5uZZ_o`PJ()`Ca;p5)%cmRk2BI+19ri!e#(eRsU(Vt zsA%_f>y({Mr))l7v!5%0AwPS5%2XeT4E&T?n8V^Jt1X_geuUO^$_6TH8;DI=KOZkW zWj53?+UX|Nga{_6kO!2oig4Ib^$~kBQAuNQAZ&Ys(O+V^=vzT{W2o?WIj-so6?n5e zMaYy=i8tYcXya=9X>T||9uyKV_8>}C62h9!>$ggBybZEG$9;0HZGSTO^&>h_?N(?r z%~lRs5qw4gSVwqTA2Hx*UE6Ug=;|6M&bGyysb;oKqde8NKl-hXw3;rPj1|(B9iKAEPxmQ44F>!9t0v})CL>|n~&kVv|c`L8+~uH)G}7#Jr}Rj6wtnnL~R zm~5YdE3A1MLQiI@qAG1{k3L`|e5TsySj}*emr9POO;S}QnbzOFaHgtIeRss=>Z>C_ zilNr4u;7AttGYXDVx*+Wih`=k(Ff_Tn!3Be%6(p|E)RD!dO;|anR6rK?p$?$^dTGE zh3eXf4Z5+x&AP$O+2Guv&8r!lrC1d*8ZfYgsh{Nh+Qjt7#r+Y7ekTLSODw*wq1!a1 z+oK1>@rG))HDbkhXgM?-hdNmWJ*a82itfU7kR@K%ff}o=k>fE@QVYpSq!`uPtBrUw zVjW8f`6^8~K{9PBIoSq=F-s8fdp`*gw?o8ke!#X~Oadc_eiRr(8SjX0YyRdyOFN?6 zbsqg#yk~{X$`Fc;Clw_mcXB|=T>8*f0r4$x6=N=4`n|T_!WT=%5K3{k zm`fL8=n~8Ypz(u~n82Rs63pdrZE5B*N*JYhlqR4BuX0O)M3zNMf#H!BgMZLsu&H%1 z&>~=S%4aQeik7X#z}UZ(IxPUhOaQ_$_-U!x{hFevycN%{Ee0ms9X7HYO3o(R6j8>=ULe)o3lyR5GDb%UdPH#uZ5MVvA%G)=2+QbGbHqd(>0+O za%)YjP3UxT5z2`{!L}B$vdNT{!3>2elg7$s9V-(%`kWZc<{V=|dTvWu*(4~okdb99 z>06g(No^@hf){2iiN>B;mV`)=V@XVlWT=cKu|~F#4{;m~F2T;ut(%>_{tURZ^$K8TWCB9$ zGtxZA-*vIG&ZXGdT#lX5Kq{!2k?Nw@L_av@*xAL5oei)WubQ0!+;k{&j?477t%IGZ zI?v7qjfv#MvSFi(LH*TR{*)Xq8);jESf-ZP_~LoljOS%X+8i$%Zu=SVvTutcX*o@W zq!shBfBuEyWvf~K(=}7Z@6_p#zi-p~Hs_3hJE1%y6`t#69gIF9GTT!atytD2OyZb^ z06Fo1hO`7_LjtKWgJh|-)gZr#5*mo|s@6LtGVP%OEH(|r*Bk$iCE3qnx9f~XD9Hx1 zwnmpJvMicy(xNR8XT?ml%jb(g!!sj+6j|1EvpIFqh_g4%I%~$(xT|Tu^0#ByQ0&$l zPh9^`SL#*S=rn>eicef*K#6OX$A%CHH@nQ*ADcGwQaNZOH1&lqX>`dTH)%<(x-iuo z$4tQv3`}TpugtN$%ZYMcSDr8W=L=kuc?SyaE)eYaVbc>ym0_o1tax?|Q;Lf%J!h#r zwe*}@i=_wigIRjcUeeM-35J2bLX_n|RanJP&KrydP<)SV@Dy*eu`0zPU_4Drs>$<> zs^+=+u(O?NH+>a2R+C!}pseHmse*F4rBiqkb%#1rO}29?8K&l#BZ(xPE4+h>u8lq- z6&`>L0Yw35P*)@|@>d_0xO>Fu@Hi`?j~ZY`)}p|tGjs%`A5CXy&=mSnX6UKr49QNm z8r5sz;LISZARtC}T{gNgKf0rSbR3g%Ortx#aC8%E8J*}z@ITJzPNbtdYNI>RIyz1L zDn>`W$y}Rru;w`3uj7!CLkro}Jdn20w3Jl$C&gD_w?=6!zifZ>M;q`Ir$#&l%;#4|w)l^aE zX{gaDBZl+pbhxW@hpKl^a|cbKr4zmZRW>-s6Ng3+miP}uY)>5;C2r-=!1j+P(|?_M zGM)0TNoZ$2#(O2R&vsk*1P2T6(XsUJMX|yslD=gsOVNbh$;qWZrRcxvv677R;6l^` zN3Y+g1+El*8gJjN!Ss!I3-DK}^r;&$fSwfLm!kJkRB4$A{$Z{y_J>vM&cesc-!FeU zii&>NUI78)!tF$}CAqkxfq9JzsyMz|Nvvnc%GkG*p4jj*B);~*+d|BtFD_FMZL zcEP7C-#sGX!8#GQIGeQLy_)>7vLtKS+Xm?|mg(It1cSlD`F2}yft!i};<`i{_~szr zyMmYt_&{8U4yhhcFdT27AI@NJ2FkJY@q%N@d>ozP`FJs6hsn!MZPR`0DSFK=k%(IZ=KE4&NKRz8q6-065WJu($bZs5 zAMcY_fQq1*fn8kPI`9Oy0?xR8*5bn660NCSI6YyH7aCVAlqe}9i`VkQIvof`IzOx)GdiuHIkAL= zs!??qioW55mAv@j*;%hGq{(4;Ob~Z?Nn|(%0hwb|@mS8U90DGtDIwsfngq{YEjE&| z<`MmBtQk3bEEZREi^;_}L&1scR{Zyf>Vav@QIAk@j%T6bq^>-V+wXT4`uW|X;bUqN zHg$}jq@o|uGe6d0SBG_vtTe3Rhrn=M`;nF}=m!>$Azf)#>@OLsr@c~Bqr6hn6k?BdJ{y|UNNff9f-g%f?h3J>f@c&wDr|H_BmuhkrWVjxr_$JruxC%+y zJ(ASp8olp`w{MI75OCfa^f#Pw597HKLC^&AKT;QK^nwTj1!F~NAbsqltD?}9=lOcQ z!GgL)OKCZIcS8&h+5>ZS$XGdf(%wLO*_-#<8)z?m6K`jOW`pU`22EWzvtsIs?m4RK zRqXoL71QyUuDzV?sA~*M(3L?9a*)~Y`1=9w|G?k(bN>|g$wb5Q>hwW`+JzuG3@>u~ zVHE@}Cv;^onbh@K92L5&>CAKcOKv^BH?u9)nUy1*Ab;XeI5xV>=q#sNTABbcZm)_}v$1J3x3>hmY(~`wA-P63m7f9uf%ys(Cp?Z|`WZf+*Nv zmy=2YzBtbCD{-$vl0c9N21`Y&>}N%ZimCYc4p@A97dkqux`xlZFW!af@tLM^LO-p7 zfXL5Irb>;1Y74O@fo6{GsH^AjnBjYwb1y#)*nKKLUA%{1K+M5=`Q6J;A+Jo<1=A>; z9KLsd^x3R$s#|M^!}flAd9yBdW>COBay`5Lo!!$xJI5Q;S?>s*U3jZioPp zm8Y-49xvT&%_LNYdu&I;)?Q8^6RwpNZPCT%9V@v#D@i6Y3k~#w=xyvw=RLziR zUovIqH8D124MYq-X$mZFnX2~YRGrfYWpcLP(?Bl0c#l&S6XqcK=3gUn-k7(?6Ibed zi?HZ>T6~LwMlqfCkJ@b@oK7IVd02 z>3Dy#HMuJ8*#@gePusDdtq(}ZT8p*`%F*Y{#kaXW^q<)s0uzO;Y=We|*e6e%wC}xW@@S5=t<$gDbOnN>{BhF~W@Q zkzh}AmE?Kee%pmmj4sB#ShKROvF|H(I>3C)B7XzjII(ko(&sIZbnTd0k<86iuso_~ zY4^I))lclav+$I^|2X&W^Y5b7unpf}V(5nG&(`mN|GNByPdDD%Kuszdxqkr^FXGo7A#W7fg*oPR3 z%zpyOxZVN5QTxp?qNBQJJsVRQY2RE0*Edj)INs~IwlJY0@251!$>AGY7*W4krlxtT z0sD$-h7rx_%Cx`fMr;Nu!~Gk%f6U*%f&0h({Sfy9{{Hpczvf1!N_iu$c6f3Ew}~MY zJZW6+2hP7U9>Id2S?}4)GA{9?jk$yUK^m0&~#y=Oq29 zCcl!Prg;y=6dkeOU~kVvPi#|xLknA>v>u+nF+~eO>XSF|3P?T8HHRjj_KzM-KII=h znoQ~unx3`Ec4%^nd!fl$y#q~7+iwm{W^@l)olzOk6`8>eAwTAiu;fF`%iNJQGY+l{e-`Nn){F41WIglAUJGVdAx`#8r*|X7E)QmnEO)a6h`5`l$ewmxlVR?28r+Yk3AEAdHr+>CxTngEn zc55N~n`M&*+216CG}z+>vA6n1kL+*pj~>|%>k(uh2M{SwOby`2^$uh=vpRSo;ml0! z;-UN|WS4P#5wf2$fDZ3!L3WwUh3x%Yh3xaH9b}i8UEO`_F0;?u{ad&8S9 zrw?;K=|0sXVTH=@K+Px2J8H<39Qz7vaBIp2@sBz-INp~-dWwm+~t2LNRGH-SFzfgVYUALQ`~>V-+z+(kNW#b?kD{H)7*dTX0WdtoVreUwZI^@$hPBcXRSIs zoskAl0`T8{N0dA#)8L()3YE0z(pYrua}ZXhnM>%>sY?-xo!yngbS5ps^k)-|E^UHy zhSuc#&HI@Lg*9|zjPc9V%_+5B5o6G*=ruCmX)SDq3oL9mv#@b*3)^n4TG(_Q+MN!} z7B+_2oi1##dZ;`~mBdZSl3Y?7mCr%e)Jm?OIzDGsh2}0xhx#pJb5BW#GY=6){`el1u zk1X2?6H0|fMH6Y+KCO2w+lo@slt^f_$R!qEqF8L%Ryfh3W&6BM@$p+)mhGczi7!XF zYS|vqGt0IDlhoZ0-Qwe*xc?yc|HZ%m0QXP&`*H5a{Qdj6e-HP`8PZHzTwH3wyEe1n z&EAsoW*35Uzmpn$2u{b94CgbqDA>bt3A&&^iIy-v;P7H6T%Kjzj&0XG+dju|y$17h zHC`Q@!_YRF?8N|2?Z!$ChSW6yd$5_M@g3M?Sj4(Ad!>s+%p7>Og%w++0#ofvxvcxb z7wXRgic;b+8NZuwc!G}D2yJ-?ct!LU4taBkoa`a?1Pr^Y>FQ~MoF?E(cOo7!vs zyP4W@d_D0wdq5+?U_PX4;jY35xat&`W4hvv8q<~a?WnF&x^yMJXGB-U0qKf4cbKct z@vxq^paih8Xin#}+Q5s>>xygWeq2Z@Vmz5B9CwdUO}ve}#|(g@(!alv`+j$C(WdiM zlT5+VoOu@`=PmoAFBu;>wddzt+?<%> zCWZUMd`Ef+0^~}vJPQgOi4_c(oJv_Ni@8PKZz2HLMcz-tXd$D8e3sM-=B1?epC^`U z(r6IhL`>r8G=kID=c_e zS6J|2U17mPy25(rv|WG*$n&~xjjujHDj(g#h0p4mE~$wBe4(iM_v%Z>W5#srCAvX( z&yE=wvb#4%-S6&=QO~PJa}dDu-MumD3*09kwkWlUyZ7%R6L_JZlDi7yckeAcMGeWM z{p!5wXAvqL8$U^G`!5z9FN2|z_ps}M>DYGw{BG_EgZN2mC83)IFP$ku9<~t4>2^73 z7l6c~pCdWhl<*28n!d;H!m{AtS)l_O_}P0Hp2Xr=4VX19j<>V&#r3$sOE9H%XV&y? zqA*?H9rMr(FuCH@(<{EKbkOg?BSzm@$_;8tIBq)yZ1;_#5X6}L&NucKq&PyCZ{4&sgf`ijdyGjIU33c7w$xCy$djUrJjwm2oZqRT0J}ExE>B^X<08ZUu}y*oMt(BK z%dRvYvI|SvJK?m9pyqcYoX@yV+Oj1SCnR&1viwuQxqH|eV)g?5vRJ^pI_lh@5E))3Sl{@MFfo*lONe*>X*;_ zL~EJ{?lpDHRh@xVzYy+SG}g$54~bmb1tXL333?!x*O-51S4`1%2(XdFt?oWxpJ)Ig z;>h6P;*sVT5m9QUzQ`}lf#{qbGyY1g7DK+k)kPc=6w1A%IO)cI8h)Zz3MNj6Ej8H< zdw9{X(97o--1)@9@0~=8H`4+eL&~9Ea87Bn}7{_aNnO-@< zYdt>U9*6zopnE)jv!l?n`iOx!_jezAM75K5T5l17Q^)sMUxPe=GkUwg^-UmXpJO{0 z`xJ~tEcY(;QFkMK4aDVBe3f{IIFZ7+*jRIj({_tP9O#bGH?w`{$_Vf+bv0e%!@C=8 zP3QQ~?xu76{LPK-rhEJ(!5QWr$K>Id-bQf^owtlzQCm^iG}fg{mkZlKHln9U+Wb9^ z>)s;RHfA>Y)0e;*Z7p)&iP37@)AwZX9Axj<|4-iAz{z!;_5Jg*J2N}`qN{z=+EVP9 zv1CiO6gf^}DRv0HjxCdhG-*rQv`t&u(n3~A|Jd~>A3pyNf4sp;A`%rA5TF!6H=vM> zqk#BSA={#a4N*W)(TX6$k_cEDK^IV90}3oukpK7hJm=oI^Rly&CAX!ulD#u`&OP^> z=REJvbDm?oih4=Aq;9gX_XClE$zbPpr?1$tGb@I91V6!-m56+9?}6A zFV~OV8wQ-<2uG`H0)`#zPt0tw7A2hy*l|MZ&vGb85;jU)65^fTng>`O;poog~Qbc_cNx8gR>-<68WuBLI zYn_v5%4ya)iKZy<)r_Z{;Fk~YHa+GUMsT)st$mSg(!SEl@AAk?e|&;yk0t3eSNd9q zdJy>Inq>)KmnADrE?Gjk*T@zawuL=^pD*;>ZeQqQyM3XLT4aR1!}s|@FW%=1eda#j z7CWzMmrVSFyE%q|>yPaoWX02ET`gpDUCxWaLiFe4q+?lCH6kqQMe$-^)=T>6%X(HH zSyt7Gkf+}YQ?RTYSktc}pH{>I;-;vA1;|>;PRtYy#alB@iDEA3I$BVv{d5`v!q{2OIrp>3B zgmWECH3UmZe^@kY87}l1Fs;U;k`ub}n>Nd8-$Lln3TE(GHZcc>1i#&^Z(k6}29+{& z&YnVlFhPSOcco~sB7%23NS;dgeDd8my3~5|gS;dL?gZNfc}9u-2;oIYK(xDPN&lKN zbZcaxG8!Ba?w`#maC$dPM?!&(2Vxs@J8R>z-3Bq~5-CJPGcBueO6)o{-b_=r0X55N zkdD?gE;3~`C}Bze>--wJl<84!H9bs+6>r616!sdzN-NT*5)1mkFw5PnFhB;j?Q$0< z^^Kfvr6hlKOh!u?r+g`=6DL%>sbX3228=?y^B{@8oMTW>g(r05JwWtv-@t!nJH$zW zXWVMHtua;mUW*I2G?ei&qICOE8Uo-LxUtMjI$hhYzN-YRB74*6y{Pi-3@)UEpXB zv-ao zh1t8cEhiX%`RI~8i5=HA&Mr@$y{9fg##T4etV2{WQ=(%x$JoI>YP&m1bnuw@;11Vi zHpqn>{YK&F83a_&x4!nfc)BSFCPDksGIBs>csA(fy?cT5z zj+IoJtk|=q%_@KyzQZUCOVer~+NgrLxM4LATg+sEAQ__~Gmqe`Rx<@5a0+oFigwW_ zQRn8FHY|w|{HCqQ`McUzq`h;#?_$dp=@wEW*IyO3nA)n~gJ>(9!ee1o_{5s<7hgp5 zTgl3pdS+or0i+jzTtHkOkN{?rp}|cce74khu|-VaAa70m2r{_pGFg1zq$NN^7TabM z3BzvnqHs|V_h)By{BjTBTPXxK)<}q`PJw*SrWw$g1iwv_-jul7BH(#xYh5Y2Ce46~ za=aS((?B99(V#Za41-le7z}cz@qO@66KQanOaUs)!yr!2Ij7^ugOF~(n5@f!shwU{ zfwECeCmqwLt(R3uI2@B`!c|Tcx6qTCRyqCb^{Xl^^{dX_o2_4kK=gT8FsC)5v#{H6 z@+QX#kelN)#{?#6c&7(d%D^%WsvL)fDg)*w-`WNp-gmiaDq=N?4*(*WV?qIaoW;_? zh}Fj*yjL1#P9J~N?&_mZM!C+a;a~=!@KoQ2YJ)^L%u)o)WaTC4(0b&s*e_Ulh^spN zUVR5EzlSTIOr=>BNn;`N7qy1JC^R7&v3g4up$j-HvgH~)N;ZkJyOSKGSKo?dZ-1l6 zhhlP#KBevuX5cG%JbLlseQD`44JRpmM!C!l2n2dh%eUVUtztlhfca&6v z&5LUA~ob}hxw#Amy9nt3eO`WRjcWh0s9kgxHHvsX zo@WzmsMuN7DEl#6WM&kpO%?qphoA1vZ*LT2RZ_Q8g^Z~zWR*t!j8!!DIz!%*t1(|2 z7Ntp#Y!awxtY?4@d|wlXKY2+7y)XLFBGOh>ICv_j)6K0zP)u$7acv5|U&|Ed3N&KC zdvZBRs+^xGs43>WBA-4+TlIUWS3(auOpyz_k997&A5A#LOUqAE@Pz(&nE&Zov2EKd z**oHEU-`;c%1=%=>^{Sfx4rEhjA(H18F~OyIM+^9Rr#gw+w@Y(c;x^U9kCZ&T_uT? z?A@h0YN5{1i@c0lqZ%B?o|77>^~z~;Z;pe&us6X-TLYm)F8Yr^os#c}u0e@OEeA08 zWS)cFF79MB zn%+Y>A%I>i0DgzYp8%Z)1S|cSYEYJg^0!uWgHFp&(hTJ>3^DF|2NuMjs(j7h9hg$zO*d)_=}u?y;3sYiO>Nm) z|Hj&#FS%oE^7he*+qUQ4@K65HKivF}-|`Lr?2Z58>;L&b{a63sU%vV8{r$eLef{6~ zn%6xs_`oZwuYUMG{@XM%JhXpj`0t+Z-`?=wu3bCy-%DTm(mi{26bi3=`D?!V(YyI? zN2&bKYoh~|#~=HuV)6d_Uv+QhKicnmFrVMO`(-cNvEz}yiGm}ALpWgDE)%f>r@fhnsj*3K+>DMM508>Eo3N*|~ku`$ubdmgIW6GGRxPfnskRZm7i; ztSVkwi2fV4aJ)9j!Hj56tA5V7Da*gTMM$#V5PSEcWKlurfZ^x21DEF#I-o>X`rCG@s@1 ze0(oJ{{m{IL+B_^i51YLI*(tbSKgO(hdQn0eM3oAS3tu|0}@AdV$&^@$~c@#|CHv|Rg$wjl`$RV0qA@UR49 z#hnoyBmPE^hco;9Sgo?H>oE=!15a$iCY2nd^XOFIFWQXknqwop7WL?N!Fc@HMM6n$ zud&=5vPOxqoFQd=s^gJD*GEp-Bi|(SpX+!e*Y%M(dqks;*9HZ`zUVhhP=-Z0GDbJ;d`I zF5X9B5&O6|H~et!7sAKQ59fY~b3iPXbB25I1-jCDW3Kawf?d%{$68+)(ps|$y~iIH z6ywqyR`fQX!<8_HKWLxBA9b07Klf1XijCIhup9=?9DXl+WDZ|U=5UEPiBt+)aWM2m zcR+M!G)9yue#Y}PX&i*lS0IJx_f-1a*vKku1gC|ar|sNq#qL}lN=_X$kL9Hyj(n79 zplK~RiN z$5%;C7R?rgy;qVi48>mAd#pUcf?T0vBNJ+4EpxOuUl3kHhGnbz&bLt&!j)h`Ny?`Z zY;?Lo3VkBMi%j%1M~K@5YsIZRUP-|(-gi!OFy2*M;A6aG@D|=~$CCbP)Jg#_KFYyvpUiB=*RpKiRQG0`LwT1-yYUV^qM+;#+6;?6|;9nZvvp_{F>EqIC@#V$>z z5T3|5%F(7$M|pyJ9ViU8PHM|ayiD&Lum?RhMngdM@=f)-JZZ&ukC|*6MeJn?&mPOe zA^`O9eB*YrdYGh+imy}G zoE5D>(i?T&0VwSOmHi>1|F}Mx_vemO!#J`*aGx`$dbNwsWrB9%VG3v<2Q8K7J z0X7_^24D$N90=)_R~fXa^w&X( zZSUX>vf#7&4;FkOW$cW>@^B_e#(AWhFhGsYA($z1TF#@;QkOr-pp&Swfn~qo*P?Sa@fE9_Z}XdRj-zZFS96;zwy|V{ z)Zrkcj^rEFq&_;iGBUyuD%CDR<<^Z*2|`BhWFE9g8w_`oHiXH0lRIeV(#GTr=6HtC zZMl&_Dd=DLl6DH=8PlghBj9@p_rs!!3;eO6@}Z_G_<}*z`NEYHZ$-u|?o3ji?8>b` z+D_4Q)lDvDID|`t2u5IbYqtPbJKjbr^=32mjWipDgm*!<#y-p?cnI(+hBPy3hPal1 zff0)QlcsI~bAg||XmBS29N4Er{U%@64hJ}o2ozqKN&$Jwx7M`H+8u+STy(WL-<#xB z9YXGAfY-);NyYhjVi1+u7{-4pun6Q zP3E6Et1DeBs*7_iT`bZ?p=%fCtP2K)Hq(5>{rU>EcX{i)J@PMG=F$gXG$mG9PP%yH z$+*u~d8yi*O9iu=xTuC1pa1dqN70{lOlhgADbYEuww5Qc+27Oi$qV-6Hw;?ObtNSz zUn7a%;hrSwh6B(-p94>rXA>{Up{jsK4xzHUX7~~x=C##iomd?6d&VUsBl&f7=xeg0(lh~MyII^sje*|LtPvQQq^4yh`7ZkKac`1;*Ej8jDHS0I}t{%Q;k=_`T~r zcg~*s7ec9(0Y}E*+Sls(2DU2$9*eIjw}7Kt+;^WFvZpvg72irj>$5VDTANpUJ+fvm zGP`C~kDw8>1q9Xxgy{cVqgwL*c|&5!URtMbYrfp8{#Y zyU}zNYW;DQa$Gt|o%{v?SpSK4^~5?uq4y?rNxg(L23&VNJ|J|D=jjLiF;Q*T=|@u% zWar6iJi2JbB(tc{lzGG4r#he zu@&3xWy^Y5{BFq~L&r9}U9JdknWd<~U$V+eWCMnml=YI-nMi=B^(A^VUbB}RN?(HI zcWs^dSo@g(d|tX1bcMEY%^39msXAY9a3w5=bqG{`W}R2qQ;RmFXHBLr=pF8EJlpG; zbM_`9^TqDNI%nYeCpKpin_5V5)oyf<;);l8hMwV9v}gu84pY#~R=f?O>K!|vBC5lO z3MC#=U>m2LP3Z^37!?Zqg`|ARY{l|C(O24+ChkX!#EZlg5N?AF0@s)6w8nJ~caOY% z$GD0{kIManW_ACAxew^$yn#Yy=21SS3^o@K{Ny@dG{~H?XAO+=-NAUu!1#{^#zlik zpr4NSoGo{!3VZaN9*w`pmUxkd=^~#mcZV!brPM2lwH_z2-Y;11=ai(BAkW2Kqgk?v z*sL$;jL%LPu~ZhVy~X0?yBkpg)dpu|nO-B|nyV|4S`mY5X3zj}8;Eeo6sfPppFx-pyQDP*;LjJfD5Kl3@EfPdnS3 zFSCVwP#H1tsqnN3gTe6h>2HwbaPLKii$F1^;gy~8pV>X>MB;Gt5}h) zO;*IWc*c>MPgm$Yu%dO4(Yikzz$R9t{MAmy?683iw$s6i{-MS<*GktRjj*Zjoapb) z)%7sD*ZYkY=bNxaPsqX*8Pb=i3hRK@dpaf7AflqL-l~GyzbHgi*odfja~n}GfF`ZTjPP;&q%AmZmz(VPWYpdk@|hmg+CFz6F$r z6)3wIYud(92~ zec>)vYdjGBRWE~rcKBWk!kuCo>1^}EoGYluznAn_>D?pku;_dDxM)58-K58I?;dG~ z2dHfIyl;5@y1oV6a`nDp4+yGPo=FnjlS*n0e%NsoQK zd!!u-L+>7Etj9MbJ^sQ5OG87fZBj3K5Y1X%MDw*tm*2ht{?L$&&)!|GT9>a+y4=}& z-n7F&?%m^x_4qfE9(VNak#_y@a_=6Ot;g3TJ&r|R>BW6$N35iGk4x6$*Cahgy?dk` z{H%A6=d8yku-wrEXV`4y&XULV=wk0j7wu76x|{o*V?BH|*55+!{uZpi$9&`P38Lrn zAkA^@i+%+szqvguuYWk~2}BrW^ZKL0w-JtD*tXXRA&o#j6DdxB3BxJ0Y(b)45;^0= z<&nm|eZ>`RP};YzaE-dZ$u?x%m3CaboG#`>GM9@vSJ*oFK^>n~r(mORskNxKoW@tx z9Bo}b8|{QObBClJyfx+cwC(EBX0bliUBO9`k3W;=kGxXU|9mu8-{-+iavrkV#q>yK zv)B?TD*5<6dhrx!5{1nTjgk!hEd1ZDKc7Ti-($@9c|c10TuZ%EqSv!we0i6GCo@tYMDm= z$-HCaUb9r~17bl4_k`sNyYwe%z5ogtS&2u<~uC)o|d&K>RDvH&d(Z60% zW8#|EGU%`IT_wBPNCIEl8o#M%$Si5as`Illu}@?{tEExx)!J29ob+`%KS_3lhQS_= zY-zy@dXk>bE3qA1J*V%G?V|O#hcP~s`{nR?na`gMpO^UjE8+8%%@5@+hR>Jz{5+py zWs4eRqxxDI*=8Xdl13BS%H~)x8B4<75ZadbX?@2J$R~Q00)4!U%?OieVk)ViH}eiy z-%dRAT(%`Ul7SzXX9;xT9sYhDqz>w6f6gFqq=2ArOJl zHiEyQNwu&WAb!gx^Be!`Cv*9hP3B!ESs7LT&Irr@nJYteVWHoOPe=hcj1o)s6mgW& zGeAj7?!uhGy5=$$w9HgKTQwY1!ZC;>7NTv=%30*ML%Mo0srnbzY9C+XSSFqHdfs9Q zqR0XrO;3uLkG&GksP_~s?XIgRTAQjh;BiK+SwozOKOPl!hdS-8Gz7A@(9q9It+~H- zr9?a0)U7C%XhrQIg{er5n#JHefmbONnbE^xT8FPiTtlwpa*LJgx7n{0tj`PU$nO*&5O3inW0~xu+EmrF}MMNndtqtrdM*4L`zbYxS6#roB&u`Mt!}?i=?3T!IXQx_KD``)^SwEBbFu9bh@$qa|P)j@fs|C|+0_cNO9| z=XoXUkL8dGK|FVWPsd6!X=Hm0D0+)s_p9%R0q#l}AU4Lhr2~u`y^JSiJPCwNN6!xg zfu>e9@N8xiH$oXtzFwomj8v=`O|f?ikHu5|8P3Yc#I%S!4=5|ZMv4JRs55zNQr=|GclxxwP{1r6a(!bP3}2iey0>-9X{eoopQKfq_xotU^jX}yWfajY-~l*i+8wH z>u8h56^vulIp0wL42V2XHxLsNYK%dh)LR&c=YI1TyS7gB`8Z}7zJ8hKyBN>hCQ^;X zkOt(prQWjc!=vZ=_7xZO%se5BftdJE;|}zutA)Tzn)qsi!65&1pO&gpzf+gKdL7@2 zo$ulz15tUWRQUxj$!jawzK3Z;GaZBU~aa}FBWMv0@qf*ZoAE_Bx;1U4?rQ&jg!$Uc^dO2-Z>pN&d z7bZS5fYVK7ZWxsYh!-dxq8c0vnv)aJkSHBa%lmvd+qTx(cW=XaNo^wf-a?%dNbKED z$Itwm`t6zlW1>2E?e-^uP5x;dTYJedBm|m&Qm)dx25GE~AR`ABpx4ycN-mOP4NV-$ z@<)g4t8JmC5G!P?j{T|OYol$d;wpu>@(vhBpWgDi_7@DO*uV8?L$guYJNYU(1kf2U zA4rT&M}3<{jmnpQGK~4Q8k{f3eyf(zHiw8*z--GuiH4QOVaIreRe6pF?73mx^;*Yr z!`WN)&U`~(017~r^w1Pu9j=xv$xoM!b@2GSq{*tD+wxWIb-lbZGEJ~0DvGZzYQ zB`6^)F0SxWXR@Z1;DUg94L>31T}7v75$8O_sihLn@ezQ%ju!##3|(;5x&S79!PEoX zgQj;RXkM4gFt25(4IUVl8o*37??yr3B5w&whLh#rfa;)-vWP(3>>_mX=yT6M_dGi} z&WmF>Cn+qExo3!7c*I<*JW5$aBa3Atd*M#>lg_658j%afNIY$;STD9qyGFzYG?jcd zo=a{?st%!Cm|77(`xn$)nMhTk?i{yGLy47$GfoEpFgnVccNI^S8vC>zE$}zAUP%R8 zh}(r)My`lNu|qZslkYLvh|D97D_Bi!7E@X?wb}O}HE>4)^AA1ovw<;E9~XqFT+jFE z=$FjggSY>bR4L42>4gRbaX=(bD-;UiIjC3hm*Dg9wp8*A=lDg(NU_9aNel^5&p&b% zkPJ^)=<@?XQlX|K$|0);xl-fZl|z{G(>U_8$|1px&K9ml%2ltW3_7B)Wla-721!@c zcawd1T#Uybh@OhhL#UU1cxh@r{E0BJEgXwcVR7^t#Tz(CX#&lyNpprlkE zxav6!LBvnclkAp0=hka_3YD#@93`@EKQ+lKL5DSq!_yd56Wz7hB&Q}wsY9Lu5*F4P zk|~m?IWrt*(o{7qA7xy9%{Kdrem1;n)_xvi`;K@6RW(;eAXo$?I9iIoFEx%N8cU7* zu;@*a7M%@%A{$F}b$x;6{G>eW=sl*iT@0;3HPwkjy|X5IHewq*{as31Ea6^y8uHeU zkQDQaJ4$3tP=%OWN6l9l4Ckd!xjg(7Pim#ZOF~>ozV2f`CG31*!jEK~m z^2|!{9dPJGy*=I1n>A8OR4M*|q5dg;PTQXGY_x#-)^@QuycUFCw0D}#jzpuoAE6uS z;k?%ne;l(#HjRwOP+gG-+DTmsvehC~HjAwhHbFSjhe+084>g<|=QnHkEh*cza0 zkDBl6!jv2u)W}Fyxt!5(w18PX8+9sPSI;gKP0yYMr^ecK7-JE_HjEE=gX=KI6BV1c z%)17T2s7M1A;lb(ya<_M5FHR{!;oqd2Vhcx$fdrZu?Bs5q1$UEScB>We+sWnHG5ur ztt8dS^y@-azaC~FdN(ht4deGhyB^UF9x0w00$BhjehM03ICKNU z>ct!&a=S6IH*W*{3gDuC(%!uIpaS`?v-5*Yj}1dK4yn@3w`zz*haoXGW1eQC2!;|Y zf+3CAVkFW)vUlpfX*`_xuYcGt3ss-wcPgli$h zsLwOPA_)NXrW;q2A9WjqQKdpEbxF}Me?`_PRQp!kXuPCph1Gn9!`T@ID-88(idhb; z{tjl%Hk)S!mt6-^H_L(FtA&>C8jA!c7Y3^9Im6_(7H z7Awr01g%z>4M#d>AM254J*J>2JPP=9(!t&q2CEn;^6NI8qb3WC?Tc!d9c*fc)kd(W z@VWRvhRJOZK{ogwT;kyC-6FGo^g7mL7^MS^Lj;=`W|V?Z$?&3EM;4xV0DUEDmx(-PZ##QqU<7#=s*&z<%c1m<{~AwZDJlb7s-t0Zi`fi zqhKwI41sKFkLwK`=CZVJy_7{V=GJa5Yv8wO;2t3nar<(ilbFGr9dNo(S|m45g2l|3 zQj1v+_OO_JeMYoCFA3|jEweu0;d;NJdgtUeC3wNS^Qvl>Q~HQ(L_u{9b$n~|C$hf9 zqqrNiCUfIK;-5C}7ygFn_}Z^q$$Q~sA->8Nx%>5{vma&agpNQS(r>81o`?uGMiGT6 zwN?i{W-*FbL7f#sIfT06Wee$iwn)4MO)#n0T51oygi`UM)T9KrY4()h);8i*J723l z+{;^#{HFNY)lH31-$KDN#Vv;1wNryyE75?ZmXJ-H<}sM8Ih3S z1beGkGK)`Mp(pJQTv1V0m1D}|%M~2S>c#ZALPf;ZK@f|QPU}O!ug!x*Ez84vVh9mK zV9^+?wJQK>FGXQ(6tE_+Y#mzb1}08sZWL0s*u71TKt2GWcTJG$w8`N#*qtN&M9KMxZm128`ZNx31-lx|tyP0HXB3y%&l++D?$Ex0R^C}n-W^IHsk z6nJK!+M^wh=Il{M!J7LTxA#R23C6g@j4Sb1t z+lFd|QLT`=zczZ8t=&-}U?|wUpvC7cvqZs_tkpgtrDz4b5nPKJs`kL5<^^Y9%SpAGXUlxXzK#;tASiT+}Y(mfN(|#^9puGPbQl>CeUNeqse0#fZbs zJ8)Z%Rdvfgqv`=|-(ZaL#ai2L;f*(1$XwUZ3=DW4=3wKQ{lXk7XL^XdYVcc$%AiZN z;(t-;0P!;$k`D%u<0`@ts91iQGOhCR!&HI1*Pu8m2B|%^u!MP71Bkb|ZbCxYq70U_ z9XVk@DzY7{DbA5mm3Frrn+n_G@xTb@#JT7RB%5ORmXqQz(*fs7D zP*0?-zSwIy;Op12mIJnyVb9D*6S|edPuWIxMvAr_Lp2&T2o-jIH`h{0Cv7V^kX^}v z%u1S%NH{2DGQwU)Fvua~Ny}Qb+q%*uj3!X}^)d${Br56@9Lz;ye|U(+C&k*qGUK%3s;g9_l!?iB4k`s1eh6wy zN7f~yHo&*6d<172&~yZmf*(rU7cHp1&jDqu91rr9Ehb#!vfv^iS5ycPXzH@!m9OQn zT_NOEr|3)=VY#B&&GSCY?21?k0}RYAnU`CoFG56L4Fr8VACL_ z-qd)G>Of8~rT8cHTr(%07Q@O2Yk$JTh^na;; zve_1VZOyjm)xxtajK^3`vRt5GwFIPLzAc!6&$2C;Va2j7N}5e)+J4#B$mD4J5vePY zoO|#f6Qo6<$hrbwOBWOpcZPP#`zu?5^rRmfv^Mri1UTrWlEfc&EeXbzsDS7q!`cuV zI?rf3oK=E7HU~!Wxia&!z&{Ep%({>|M+O3{EN3w>V_tI{s?W2`; z`$YIiX}3>FbrThy;$D2#tPZbr=WKM$+5c+f7FRu3UiG2yeMh)%sf{Uw(Aesh3F_U} za1_uMKGoz+sqd(5RazKI%hq^YchpLr|Cfl2FSw0zx46^!P4#{8A&&g^6Dm$2zj0^F2DVr7cfGOllWY&^|A4{rXz`=-g>$<%89E%sqtCZP zo9i#9@sb^LVyEMbxy5vGQ}n*EM6H7Pj!Bua;`9WZe@a8-`mu2R_-0IMMA1$(V`T~s z77geD^n`lxG5IS~{2o%(rF8(}%lZj;YP*LtRPAetFDWQLj^+?>fR3fPD)nCCeMU&B z>@Yoxf=i12SlE82LhPndA}P`5ZMF4;LXHY zT-&y_w)j>>US-tn60+In zTFDH7T3mX%whbD3D#pUE>72!gE3f~l&0C*JiHy_g@&|bPQ?k$~WmWtz z#%)3{^^ML%nh<~_3Bg|_!*aUD=>urFg-o+TYqNqi(~)lS5f`7<)2rchMdrC?C8)NV zG0|7sR=al^k|*FntEEkxl)yWXrxeW<@)3ThJi)7o5$C{&O6^|xYV|_uE)?8hbsx8H zpMJgF$DPf6{cOIhLhUvla@&xX3^~{t7!YSVY#A^t);R z{GfxKGiWswc5CJQ5!jnUw@L?|{8*K`{~7a% zil}!MYLEbz?QHe@?`!Xt-hPk#oCelQu-`G%W8dVsEQSklW3uwVrOk~yS@K~xTIReK ze($f{Nt&#bS+W}z#KlNKdtTq6*t5FgbXwH)rR>?`DhfZR@2uk_(8S;wWnPo@VP=(` zX057_da<^%MdUJhRG~5Iis~#Hx7=U`TWipvPgcX7V!^mbFTFKtw7GB0iEOVYg)yv-(kGc7zV_q((B z77h1ZGSX_Pk!F40(&19mtp$Bngg$UjP=4>Oqzmfmk;F9{f_q>b+(ne(RiCOxMk^64kbA z5XIUnY*8NQwlug6TJ=pkJS-Wol$QW69U{h}JfM+1;PORdK~sRu+CipU^ppE|T%Oh1 z(14u3=DXlN=338QZ+$Tk?d4!K`Z0>lkq1hy8tc?r9&GQ+Asl5hErbhI0 zR!o&yB_~)h>z5IME!Osi(?Jqc;w)Uru|2Q|i~QN6+W6gk^y;MYW}DBh>e-wU$P;81Pbx;I zIa`&Oc2n2u36d;2DUxi;0ps|*SxDQ^+mPN*#CM>#Dek0Ww5zVRdHk?}I{KaXldv^6 z^P({g2E}}H^odd7Mu=i6l89jtS{}CQ1fl7nQ8l#(HTsWXJ=*$_D`8}eF-zt{x<&g4 z&Pt4u>G~Z8w;*QZgppzjCdcS&pMOJP~^A}(Ksm)pEghex*yWmgK ztXJ=R_D$mH=C2x4mhX3^zD_y7EGR(dD^MMA%rOfFKuZyJxyd|M4J+I2YTstq>=H|k z2cqw8_DDb#YhC?KkOAf7F~)Hj(f4@pK=DO>H$YDYw;&mkxGu-!Nz*8!@70^}Koe0A zXLR#EIaZl1bUMm!QcOf_#!rVe_rUHfe1vcT5Y16%#CL0B%G-G9P2QDt) zG>0Z5?p2|T%B!r53i6ZmavAREL0ReZVpnVGGgXyek zt^*X(yUd@5n~nXvqUh*QeSfW3k6h1&1+@IM&wfHo8n-1*qXPt_8!0@aGA5qSpClC{ zI6Nrx9vlxjb4UYlkW)0MO(L}^iAX2K#38|rn)p}^K8795r)^3Z;A+|icSR8LX^-|k z&E~5$xkFau8QZ`j6n)Wum3v|ycT9xMik};SRIV2gQ!0R?eaMTS$sKHzECt5GbyfM9 zRc+{-rApyQ9*x~a{7h{W7ja0PSf_t{3 z?%X8rkc@+Vm1&9_ zpd>UtdN8PmBdi4zJi}h@$B93f>l1ptFMq@^()^8!&}P1@rF#J z6>h`ypboW{C8f3^etk#c@od#YJMrT7U|C`2bI7eW26`v9G0?PC_bwOM@2n9ENVb0F z*dVl+E|V;VQhYKm4}*LhjdFCMSS#(xowH4=rFfpKy-Av|wUnQw)|TQqnwR1;`NXkb zijUcy)6F|Tws0~}s1AiJwu3u`MhqwV>7m->o?K4v#S@%2Xl9{UP26n2GJ*4T!t~_# z2O)B+L5iIUNRH-x(6hI6(4WzuwORDV7_=R4D*97J zRPZhS-Zcc_gRR39dd|Gk(N^dxnUz6TIss9ubOy>xHbZT*+d}|6%vQb+P*}uDx{4uw z`}j;cLvx+PWP=kq72_*{;vMPmV5e2%eG^=9YzngzIqh#&3o);(myxg9iXuJ8<#ewW zwSAJ)Hs=-?pn@3;fV9ZHX80U9gNmw^jC@TrbtPZn8s&!bjm)yGzi3_I;t&TaKm6eq zs2GpwShQO);CPwM`lC+K!4an~U#~ykkkuhU#i9fzl7pX4aJ(gF#G|DsgBI_75$i(Q z`c(4k<0#WG-ECZ2nQa#Y?hT9gJqIivNHL^4EKsU$YViz*jhpqNv_41Xbtvr@MQPCn z14SU%K%qDjD*y8+?R`H09PdhpcSDqh#^f5YQ<@H*Xrr`lE!Ly7n_VnX8tlRRr0q;6 z5#qgZ>Jy@`UuqNL9Rzl><0N{?F7_w27edOWk%Rmkj%W0~-r$FN;@RhgXIojw!KMPz zG4=${Nc7qW&q6oO(FeuR891BZJI5F7E#v6x5W$~maC_3hb;B~!n$7O=_h$IH)Mm50 z9Lp@$(aO)ewMejxnYQrrM5Zy1EI*%>rsw=zwC6I8C|v>tbl0e6P=LEBug&$ylIm$t z8)Tlf*_*!r|AQk^;wWr8DXE-=76Hkk+8R;_C1l{0=Q-mX3r7pjkS^N_scs{N_q(nwPW#C$))Pt*F#|roi%p(M37!kr&OqFt1#{hgO z*7Gb$ZaVsjp%(D=8&E_-boC5vuJOiFe2BjlFXv^hVb*4b;OGl~8H>)yt`$fNP|knV zvJW&e#>BUfeZbDp>BeS+-<<^Q-=VruBXRL<=A0cfqI^LE7)y{zD{u@!s}%gB;X11; zh2W0pis(PAEAHw;T;sW-2Ddz7rB+!W(&&bw=TyyVD7kB(8`8L}G80xqw4ese_Rae9 zmgpm>HOv!?-$K`t=nv)osHP_IAJt&@G_YlrRl*Y2$#e}iql^(VLf~}NGF93MJ^EV9 z<;$0&Jc7Mw-{O^eSiATEIOJkZRmfPmEA@ue%h4N|Q8IF#)+F&Dn*Y#y4t_=J+5L*^ zW09GZ5*k*riv6_d%-^JLzfKb?#Yd7sVcCLMK(rA3M;q5k#>EU@v|({7KwVKzK`K6M z!#Gi&M)h@eGGlj+*PB+`8mTU#x}(i|vfGZ-Dce-z{wm>BcxY<3mEu$MQPr~HLsS42 zXrF$OXT_Qq8>$l1AV#YtZ0PkYJln3QWV9? zlgy=e6AMyr3;y~VgCn!ZNX#H}yM7dXVSN=FV@BCqrRw0Ey7Ni6&h2E}@s+OP{5$aQ z8JZtr*z&F4`SepZD;i6T9j9!YxtM(a9o9pb>?g6x$&jjBX^mSsS*x`mMHbHv6?~xK z4Rj<)f(}PVYMWK!pOD6=LXAz!tKBwBvSyntf^DXh?2Ih8txT=5yju%Vvcx9A*6Wew z=Cx_sW)=BTVPdxusO?S~(>;-T=j#BE+`@CZlH+wLd|%Ra*(laz6W`ABZBlzySoX^= z9*zrzYquG?f&t4Zj_YKNqbihRa9OQSM<*GImAW(eN!&}nP}@%aA`VS%Y$o``&M~CA z`Hsl_;`X&Ve4p!Dy>9e;wO+qX-&gDPt*n@N9}Ax4I(xU|01eylGZI$l9c>81tU3kGjj$aZ)*`lpeq)Y`)Hn)*vPA-kU?8J{Mz3b1r_fA- zyJs_P=)fA^s2N+sa>`;swcV&GoFNypuTFCF8rr5girDgL<^&?u%-H;%+}q>+r*R`Z zX&OUZrZT#?VUdud0Q?jh2wzDUtBvY|Qpg6Mdg|f)pS}VtP!1w41M@Teu%X$Jq8y7_ z0QWS&h|6NWxF3Yyp2S4X!aL$?1Ud6hqVn4AikS60_znYtWrG{89xj>+r_8z$y~U*u zVrfD-Qv-U>)x7>5;{ZmS|DIasyo@)tp#^aQ(^eDBMsZD^-B^n?FPUtr_>qIPEz!Ht zcy|^afsD+>h4N^+6H3?4!eb4oZcM`v!|fnX5*P!1p1S26))}2fZEUu}V3cbBxFxU@ z^Uzh6x~F?psZ+8!`ggoEaA@`|`=f>0gr;Ion`O__4rlG;@uvl&4OI7;hVnFqQRv7Q zP5kvOirfnXV5`@^5o%k6^tF)Kky8MDLxCrqsYBp7`r&2#q#R8zO~qYzM^j=~_x2K6 z`czjhAxZ1qMjjs)^n!JHq+z-NHD|>~%q}|KNUh7$jp>YadAQMlrcX4OJWOp)S8Tvp zu4;5NTu*XsT9;?ks%c$*LSL~i=NsTgpAx^3VChTZxx3Q$@rFe;CWg?MZorw&Sx-nq zuJH`}+)v)64*+ML4;sM|zsQhW(N$&*_brK>>^nV-+@SU_DSGGLd714A?S4e!}F0p+aM1%_U34_YbsiK6Z z@Venao{Ini^^qyTA>#$UxvdqfldHhMOa~NOy5NA&pi7zfFn* z?xqB{Q)<2V2o5b5;34Z6M~w+^Ic&&8HX&zjfMX=8!?R5iW6z;|o8TRyC~=bx2+*wF zW#-TIkZ_R!uTR2X6bUPh(0O=w5(Ya>cCkiT$55LxnUF95y%Dgs>x<0u279oD!Gcc2 zeKFCb100R7o47}gQ(F(j@7dJkIJG9V)QKFAAytC`_-HUNIbL`=9zu+p#BLGMCOioR zxrko4t04q*weZYc$ZHGPupcEAvFE`_%^TE`;^)3K28sjWdT`U+Z^rjl5G4~qVtx}8 z<`UEE$+^1vHj%Aqe1e2aOv{L&)=DD3g2Y6XsWQEgBdw|xQ_KTWlcdBgU{~eKyUgpL zpfyNO5^kg^;lhO4I8KYiTDwaUi`j&DH*1!Jca@YK!n?q326tp40uF6&D4CPH$BE$u zN5JXrDJKTDGu!1?JfWzcIKQsq{9KLmhwDkM8P2a(V)b^_{$Ag{Sv;&8mDJ(*iR}vS zyC^{Q`|u9${JdJh{I?TYhQWnSc)K}+jg_+!7q)BXbDzwGcy32BGz_3I<*6(hO2zHWXR!_`YB_Z=F!;U9FrX~+u;C-b}u~K zQ6E*R0*uw@g`~edxm>OIl(J+M*Mhc&qe2H@=U0VIum`T|cJhQH7;eNA_P76OB& zuW8%#r9#-71m=B3fS7+dPvn2}4)Rr-yRC_F6;#U;8xPEO}&|Nz~|G{yx?ir>=ESQwQ5= zp`BF9D#2=x=XaQMH$J(YJS7ElrEB;$9*jIyLyKXSA4>UxVNid8|ztKzjuvGa!<>7^|kwoORuE^ zHb^+QZ%e^#f$weZNnX<9)*$4~sBn^q9khF^V~3o0QND$=w}eTHPce4LK}U8lh7dod0B znTcqaz|7S&KA#K|@?h}t4%4kD1Rw^|Mp@k?QxAG7#xpJ<$S6w{x(V6} zX^{9Y4V?1R8VW|P_>MQmf}d8KC5aLrdR$qTk)n6;sqJRdnTzYnW5HAVm^`((ydGmq zHrKVsDEHrL^BsQNbfm=YlLU9yZIk?3EWcLQE#(b`*Qo`V@zlEYm+|FtgDPitwAhEZ zZrQAtF&LS;Y4)iFnYeCM`pR9m*GYW`hD%B+?P(~PZ}j@?saA*V`WBiL40$GczE|F9 z_sM3R!CgC*GdSy$HI)gUtce5+M?=dkP-{H!bUdRCz1qZ^b<%1;+FC0JW_+^)dM5a0 zB{mN|uATMer)38IICzzMYEA8u9Vku^oiT9^53|DPn2BOZNI0voA_r?MZHv3m?`B<> zb~MnqjCY;dt{azkfG>&XHMJAlRuwy`O{)mB0Savk)?o#_w_+}dKh>fWLsZst>rfta zWHK=UkCALKPpJjWxjnNZQ6rb+fOz zJ+1cBwGs2(YP)@6TMAIyN%TunA+VXyeER?xsSp>pcL?Mb2sA?xJm!!|9&^LUz%JJp zAM#zivY9MuKS>7_ZL35pbs|#ky(K7>aGQ1mWmkp0TA>&uPZ^%@C89-xLmf_5Wo!ATHgNh%dJhaT?h`bWy6`H#O?v?Q(bbffIqZwKAzB-%idsV@xv9 z7STMm=kT^|GS%koHFj2Ubjl{V(q5yTGK)kG6|MMnwvNhoQ})xiIW>8tG4QxHGG5i@ z7lc^xiKk>1u@7%?1Xhq9V`+PwZsK(Ak7^^x4v?YS9w$h*Wz!eTojp#;F_zlnl#%YN)I%MiG6S35 zt<7~Y%6$g(hSV81hRrCj<7o0@w?BBHeR>{>XGHDd- zkEGavS8}z=ifnbXii)V`gDPhv=L6$x46_K=@n)40!pTaA=a+2@v6W!d_DMF}&)9}~ zv|`Pka;eBKr23;-&AKa>_ng1*26Q|$_w z;-R@m!w(AnNS7mBH&b!uc0Hwy(t4O4Rer`}@Ko>dK#-!y4YqPg-9r0m>hzokk5Ni+ z>Y(ni1X7MO{gj31IU@c_9!d-Wk7Uab5Qn*eM=VGpwsZY&p-717;>!HIfyW>lhgpC} zk9i=?*|rD#ga%i!#8iqh>&og~7`bD+GnOPZ<_ZVf#_L zhpx_#$ff}%7})G;7L`-d5%ySQ#C3;CrYLH zaLi?RkP?Hx%g|ee!~zF;=Z0P#iUU#rN!Y4w`9&2698QV@@+RQB1PcHZWz6SGD)7SH z&ksw@w?c4Uuj{w$My4>}aPpiU;(0F&c)>neVZdJxA1Ms@8%B>T?|JUU%N!`6G9!J_ zCGG5-4O3hCbUS;K{)YJpV0SrOlWafEA)_neQ8i)oB|SD`=Za& z2i%>+>%t=}m@x)WfNiWLuZPGf#pHeyHBUCOQX>JZN5Ot~UPL|=S1}%td=*4~D5=;| z)7>j5O2Y^z>f(V1T%eb*1leh&?5b$)#&x&}D+(2dbo8-Trdx*1(o_d-OBgItLyh`% zE)1v%99ki4#CH)H@JF^zMSm__twev39HFevBDl2fH zl8As~f(_4xs??ihWt)}!go#!ZmgbVnbV&=n+AAl{Y@sit3RNB^T_vJ+yOvQhs4Bqj zwA^}wO%uc-IUo_hfM~J-nV3fqTcuD~tyKzDowt1aoxD;*NGpY2p|V@sB*v0Sgxa)T zk0zNlrD<)X+UDT1UK_7R9G4s;clPAoI-I+mZN=z7gIa%m`&OJCAk15~4zj748B)`k zBo}cI(5D<*n3c(uDZI;E(#ZkOOXgX$2}-}<3DrEbx=MU*);y#{l3vNGt1h6Q*I@fw zLWN{wqApIWQhFK#1QNWuUfP%ua?Dk2w0S1>a89^x`0nW`gspkY%Akfxotkvd!H8`y zNPA)j$dkAgRGV)KZw}R`g+eO8ha!YuWsvEYm7{ODgDh>%D)gz|g0QiWRtu!IVugGd z^Mv%*v{gmb9nh$jfrhRexoy|0x=I1MjiM8dtt38&N-yX@6-TKfq0)-cGD;)i<~}b( zDJ(O@B(_7}(vwJxJU^Y}-(9?Cav!$I0i zJncQaD!okBZd2tFR-J-!;!IYGZ^V=pdSqstrXoJW7iI=mSDg;58kI9Cq1pg*)qaMi zyg)T%idmy98Y~awPlTk2`~D5970>V*|(56jCwpR8Ub$AUBruF8yM zSkkL>;&k~*FuET%oqo3ap;$ZWJsFW4gs8s5jO{^O z8;X@loMVDFHi&doIP0YRXjI_eb&QHJafq4f<~ja6hbL6RM`YP2W zQ8`i+qUKTZ`wrFiw;d(_DpBDDa7Ru*U~%nh<1 zjam>@YC&*bL%l=|h*K3Z#kS{yxEl}GiEs_4$%B7+PRK!HC(jd(m`@7~=R;XxRK!qf z*i33Fe(Q`G=bI*rEA?^J*ReDTuHhfkL{H?*7ct`aJ*sA&%k$_Uj20V!kTksn0rG*- z+Th;&ILG>_G6YR1Ri`HT=*PzNOU-^EWoj@c#Uw%s!~Nuh%*()}!A>=ZN0dFrS=h?H zYknfZDcmaz#>0ceyD5y7t8YmLtOVD^O>za4c`t(%8Zz{8it}h6vqxm54~!(K9W(l- z)Q$?jht!S&gHsxWej1DqSD=Ywe2$NC;%YJeweP_xa34;AaJndO8&o{@gqp(%=hT!W z0(%^#nny=0#bXe6J;Gc*JDv2$v4x~}cqdjI0&tas4m4~77ldRd*WNH(bdyZ1ughHA z*LmGDxb!2;Kqs{z&qzl-@;BRukbAtTCFYTR>OfNH58^!T4lRVGH8~R)N7Zz{ZqxK_xn3V> zgJTZ#$63IZM|ld)(XH5l>)N%cXx=!+`C_9_K!Uk4DBWt;*YfuN(Apk0(jwX z3UiGAE&8lbJ-w=+ReTWw#ww*lo2Z|sugoChGpj@+s(XU$+{v*GeME%QWl*A88onxq0?pBrea_M zY;jXf#ul$#DK(}HOjEY{{a}vaO9F(;^5-}n8AgR;CId96^L3CD-wNy<7z7i6$hJmx zx;_pqumxO))YS_yEET$_);Lk4NKcDaMYA2jLZK}9Ux&Qt&7_|FUQlha71dIC!3xgN zV=6D4No6_g`i#kh=oL=qRZf@_;?=UB=Iz@GXn-gq0Yv+dfZO;qi6Ab-MTeUegv;Z_ zhY_bx!_9~+M$Cal9RJ1wCS<$$iD3aT@IovwBA8M(@~)#Fmj8tt0xKf30a)dSbozpG z=B1`-SB~6}nY-?R)H#fC@olxD3NK4ROsIr{Ia?WM9+e z?JKSW@fWMGncjeQ6k}(YVv4n4XNrKrUQxDJjCfU5c6^&YEUlXDMPQFcE1g3f*w@;K zy+VAf{WdQWl7`#B;~>Q7cxmL>B*cQ)^8bPnbOIG$n$6hG`?FAbaLw9zj`GZT;`E^9 znG1DmY-u+xSq<{FPkzkdr)tHxiu)2udm`TSG&u%8tn*fb9SvpyfAlH&B^gD;US}4_ z&a&tIea3?50Tv`hM2ocsqO%o>KD76!B)y(vtf?qimLd%bqfm)M$jZ4jYSIUO6v9<) z_+B#NCe8Rf^%jc?2|SwA!>dy0JX?*gQD7TRa85W(vVT&&-`T;LF zNd;QCv{_Ra{ig9+7%Vx&XOg?Ap4%*|XhwGkxmdx*_?(ohmoWWI@JoEE>sdGGxr){# zfol86 z>w(3>_uy-5d{7u1{NZAK!{!W=O~ctd(Ql`b9a_Fwj_GRL*gHs`pQllQDYeh=PL1Bgu?h)6V31rrau$3^96oEi1gE#<{p znR+<)E;+|=TjC(XABR^ekDxUQV(Um>bj&ExqMCH5SZ2nT%vO$Yo`@YShhUV<^pmPI z!Kp?mb83W^$PiqwaDSfMbLIGb&&2PgF2B}xgta}e)!ytYq+q2W2mj8NM22wTR>mRC zCr!O_t4zzmUD{P*juW6!jkP03^TC-Hy+1iOWTOrlr9i;(DmcHry6Z-d$e(A^i@zxT zmmt`+jfbzfUBy+G20(c;gSO5riPCnp@fAmqMNPk@QPnfwO3jqhL&`JF#5+Y>o>pc; zNCyljF-|{^fcE}R0z@fj9b>th&HYEd$o zFH1=9;tR_H_e57KnWtBWa3Y1%g%v5J4lMyGoO3z0?hzBJQ=MIW!@;ol?%k?oVs!i3 z&)j-YRIKy`FwSi;2_N$)6@o|U8Gf-aOX_1!ZJR}lh*RiAR4nXOkRl&{*{Bz>kS?-H zBM0o}P3B2PPNmb^vVotD*y`pWF>^I^u}+WO&@17rQc%&;@P<{=u~%BhuD~JxeCrSo zyxGziLJmn(}mb)Z@pfCWPP%ExuR znn>{vQc1;vRpJ&QgU0+antLet*h>}{@mLXNoiTYLOIYhL`nJQ7j+GicDv7b-Jo36@!CN7y~08|0xvV2wWbN&H|_> zLkvsYPgsoVDM=Y>l>pTTWUVYU=FNwPu{ zxBU$IW;yBtS z)P(1CahzMfE|;|~+yA<+OOvf4j$!Xgi;4F@+kP0FG1;KJf!OF766NDm7x?Z{vIIfA zWrDHV#=9tWTk=&{eL-iCDg056lQ65Y0eWgqB4)j9Wg>GvMw$yi;I)Vnta@vKLD``n zy>vk_uVSR!homZk>nM5Oz~?$jqB`11&2^pMW{MRWCaTNDmjbDYWX9jnE}aq{rhEtSi&Qe(Vek`@^A3AV`u`+8rV^WhUbI;45*l7( z5}IhF=z3!bEilx&49vKg%#CDJD2i~0DE?KA;`Zz)x{6ACT*k+ItVnYc8@s zxy-bbE-%K_t3@I+HRNs#&6uqq?hI{hT+#|$)sAHchcR`STLMN@PwdnO3vc0f7mA9J zS~ep%WB2V;^bMo18%e|m9Er#yBCC^h5ip|+b}Hm7Ch-q$9^asDx>rZ&4bb(bnql=hBTXy=(=LZw+SbW!SpHj z$QcW2N_>vA6dfqi7jl+N9}jFpN}(qyg;O|!a%g5RE(?*BFJAnMXfu3)yV!xi>mO+E zw!fRgZFxSJbAdg$9SpPNL_2viY{ZE&Ya>p%ZZdB8Z1S`zvv~ee`_iZ_QEJhq6Lpp) zKJVj}KKo~Y#Wb}I>MrC+PA>o7-+G3pI zazI&?2Nm>!eMpMIp(DAw!8J0663;$LT|ptaYP|0Ne||o9;3+dYwJ`LK6X0{W=cU-n z`VIT_3?~5e&kE3ma8NbsFojL&tPXD@o_fB>QNT_{-#mpI4tQ0yg`+Z>)Aq9yHlPdt|E?g z1bVg=s!i!mc0i?eox4mjN_VdoUQozVf+VAa>UPEq{<)U@7cQ&Z?L1to28Ekpr*LlZ zvr0#30Xt!9)G*PJCIq6PBv`^$S}e{VhypGpWA3)|OF97bXsL9A`DkvC%q0@GI)zYENDb```W7^dxmZXY zA)#7L;>0mM*L{CT(!e?JM*5jk-&8LMe|8q$hIg-^Kr%quG96!g{#X(aBiWrVy9(k$ zlZau{x7b{kw@vhe@ul1oo*^1owFsE-G#!agAICK&vV`{-t6D)1W~K^K!ZD3m-E&}A zF$1|?XM_snhsbgT$7S+QI#bY)4wI}4yYeNN;Y%_kdzcnHN?vW!1QJ2L3uE@d)CQyF zs+MeJBAw{bY_CT$y-M`$$^AaVWoZfr->3oG5^4Ua8y=19;7Bo-m#5EZNr>^=)mm^e zh`My=zv+%i)8x(MdVr-TsDgabrH%}pmBq^{02LcGyC z9?Jd2q~UvRtyo||8jfZ%XrD7(Yp^t-)^37=^E&$ zk6IYPvZ(O^QvL=L=YyP>%Kc=1bODnTvqT$6ohBVZqMQ*DLinuy)y?zwQnH9;Ft8WE zP(Gv~G4ddShr%_SJgo5~GT6erQ`(Q^dZIQsA&Pgy#a&&4c^PpuEa8@vY7hO^ONQ=j z;0Lc7j;}h!O>AHp$xs2a@`FO}*s^4w-7O(ddVC9+d$}3AK4ZmuN>JxL})y z=o)c1YVHyF(rJf!M0N>?D<`_Wo9GBRxaR0z8*NBS66LVj%a|#@+O8_kn3U6BL`g0p z>c$WIVoKrUO;)g%M{T*N$J!)39&#Nn(G;Jx2NTWB#AxE-Rrn^X^Tfs;UNF5UW8355 z(cWvFs9l*&0?Asl>TEPn7Ij!_q6CRE6=EzIktK_`WQ$1BL*R^I5#KC;yd};aW!$ld z0lHxk6OB9o77QKpB}56(5~gG5OV%>zzohBI(y;sP^Y_`Fzn7>TL(@a5ok?(kJ+DnGhGp!|g15eMTS`3D2Z4^dU~M zHE2tjg^l(R|IcNDCoR!Jm@{Cdj zq+YnQ63ZyiZCBeKRi~@7(s{cT+J*zQ>P!@RLJ0tHyH$McK;JX*d6Y&eoQUKWR|#xn z5V6aNQc~{i3{4_qtREHVwp%#D>|2g-hXUbR|K4~8>DfcW_>~Z+V`@~5eqLPDHmevh zeu=#hY?r5>QcP*!xbQkVUvq;65?||BAnr-RmgCRQwS+5NzM(yIg<1&Kunh+t+@c8X z;&rHXjpz@lPIMUV8)Y7CgipPP`&SyS>LuMsUd9a#mmGLsCHAp(SpWlk{`JEs%oXyR zASphXXU>ENbrz!EG*f34^UDuhH3dt41%3qXawp7<)rVBT>n5S`W6DygznIVHsz9q!Nsot+q_z{CkOb@ zY=z(Is4mjI?za!~Y9kyt!vpclY6Ts8pSqhdu7});ml~0eUv3LN95w*iui%OpI#?qE z=4O}wCqxWN#(%MhK^~x{b@k|{a&5lsdv9E(RUH8MXH0~(ZMKj=TUU_j*d~cF72L}R zF(*DLVIsa%h?PD8IVQ76_Vsp8Xze#JvK)+tk79dzNNPX z2{qi5PzF|)P$g##P*9FfCZ_5mgGLx4#I3l1Dr|hf7gu#!b47U=iV2D^G(Fx>kFE5K zxcEx(Np2gDkZzYyDVwD>Xwg;E}!k^%5SPfWr*D@kg~?83*?^~J_W-YQmu z3gx^`6fIAF%G<4zU_3i-54N6od&$_q%}Kgh5w{MGe36K|(p4SitlG(_F6|E_vFS zgCgw@oxTd|Z0#^_4Oe_=qn{7!+RnWba`hk?h>6Z)%#m_=Xo0kpb}Vg{4b@vr(r76|o+Ow_#~hke#J zkT?>uX5@a$cN)4UbrHBVQgW|Y(?w8q7lEvB?MBDqC?yU7r8x1_;wDa**2HfP%b)-E zV7UsKzere~WEqe!oT>#J)6f(XwhP-dm0KVtzVEx3O_$*RUprgJ!G?yWB@U^#X=;V7 zzV?c;_L&r1K4(*KAv4^mxK-K48B(+w!`@w1A5437#*;UduS4(iz4FEzNSAa05)yK^ z)HPUn8sYN8W+%;Ctpvt%1-m@@y-}->E7s=8w0|t|^I20V39wi(10}O~z+52|37I(X zBiAc96W!-Pl75KIAh~SIzu}H}zvLw|i+%*M-;?`;vF2Qte%5v0Fju&u*sLduruP!P za#c>SV)UPFd?<~Tz(19f7=Nkd$E0!s4p8IaCe#`p!!*IB*I;d+c8Av}_Sybw8C(fu z;eknxqPvlb-Egn6%()&8XNfGY?%ble5RXjw9dOWe4aIG zL*nauq`CTm9yWT+l@U$Um-r8CiK*rSLGLQ65=lt75SK72ir>qT_bF(_Bm5nni~fXv z!MCD>BG#h`CR0fhFjLK0!HDok!?ZMALf^l2QZ%ZW~i{WZUimN6uTsDV_c>lMY z*Ii*=_=MV$^jqg8T6p9jK{%vo5}K}t?zk%`U0L&*@q1n{Z>u2Dgzbz8&WHT)Jgj z3)XR71v2X+9ywz3s+B@u7pp1F_aBsN8~^OU>+!sdnBCuqP-*mt+2&+yR^Nt;D0&CU zAu5;;Lx_Txq(A=cgjF+GO`-mt!^znrhn&Qf`X|$5#6=^#O2t5(%IX>r-;kjnlrk{u=Z|Ah4NCE5$DJ(6J{!Hlv~OU*@6@ zcgUZ$KFOcD9FZQq&ayvY+;h)A_x%6Mwo1Fo386R{+`ST<5KW5`{nN1b*}N;l3A0eX z0tpwrCaHcC3@rG-D@k;^b^q2ixP<(LO}%cz{aekvzBJjdEvmdee5w4vz*c)e^`0(3 zT{WQIX>-avx~f#z%!0hM%cCpy=pA}=IrHfK(Q_S3w`@&cqNYokrhB_IU9zUz)bv88 z=^x~xk92f7<&jWSYLZOVbr=I-#b^nWhhQX}WApRSqm($~66| zE=`xLshJKGK1s>++c{dPa}liVsJc}(nuMfxc4>Ofno`o9z0;YdJGwMow5HU2!uZTI z9qrO|!J2MT(^HwIo4PbTWlcxabUxGc7rG(LytP$X&v-7=cA!hQbJo-> zSyPiFvzeylE=^~xDJPeMv6-f&E=>n;RBpI}hno%+p$# z+1se&F?ga_=zk{vZ4~W$1Z8-84w&c2sWg++Ih1|EXueAK=<=nUb zfBgAYZp?g$NZ%kyM@8j1525@~UI#DB6gRfxxTCCHTzQl&EVaTn9{lLLKcu=TapetL zDekoC|L5-gqwKn_I?s3Sd-Y!ZuB)GxROR^ALqMrpg+#|#8rvOov-~S#10Kf1cujk{ z-LtaRWJ+E$mgAMlAMT`*C}}llpcs@;5J4A-8Hq%OqFV%|$3u!BK!5_3L`j5EpqdEK zG$IHhK+x)BKHu*-_r6!9k}TVC;x*QC-S_Uh_x#vr|K9uTGgUmZuZ?;s^N6(FZ=tYn zvb~+sGw#c+ZJxtcS56S`Z{b+>g4tgk;Jv)@oJPsurr(fe zVJZ~kS}_;qg}OFCERjC;Dy-B)K^qtCO1ER%)E>Z!Q4w1$R4w~AI2~ng1CNFidNI-W z`ejwF0+5um>Tz}edUu-hzJ^)@4(LA&YFoXRRiMF)e!IfT1f|P(L?xei;IV*vtp!pflGFIY~ZKsX>VA)d~ds<)sue4ym-Ga zy3FL`Y!pt`t7p$)k8!v?e{Pc&CZ{ewhaI(Bg&vG<(>N=l3WRLNn5>NncSuYnMCi;Z zZ~Ni9VcGE%Lb0IP4Onm3{ODPO~T?Jo@bA!Wyw| zq3$?CA;0;qS|2xh?@B<9Lgk`?Yzb8KUa0G0lGZEgVtsgoR=4L3ZGG7-&= zn@_iMX9B{>ex8MzNOu#P1x-y;S}9;ZoiGU$cuGXb~~>_Sx7 zu0O~E!ABtwYGJ=CZ8yJ<8)7{mq^K&E>+TWl>zEIfg0DY#XH5qJE5Wa!=lhP}bfV_^ z(*-lqgi0!1c6C zULgd8#}j;S1U%mAkHX^|5K({&B$gxac1kKTUc8#y$NOoSu|32E%;jcIvhHp!;G1aS z0X{O0d-#5eBsY(MWPXVzVJ#-~?)G3=Q|Z9%ITi)2#VX~y^;!`A^;Wm6G>JT#^vR~CsxXn^>WLjx*~*pB@=L4J@d?Qy)p5;G4Ssj>AEoW zX^w-h=B~{J%jXVB)Y|oG?yWHQ^`3kE26I zk}!;<E)($?_UHM#@qbU!8tO@l}3DK^e7%0!|=zMM__-?c==-{@UWe z!Q1vd1;c+w5ubjlNSpi7{$x$opA0U4(rGXqkC|TPE!xs~N)b|g zTc69UgvkqXd`GwTcelg$vG`rB>Gz!PU?`uqqCx6SS6*i(PxBtduJ20Buwd(c!<0QR zJ(>=6=vt}@5NWjgnE!ZBs(oQq;wL!St6gJeapdknU4lMSzxu)d!%LR;eNV7i;J4Wa zdTGTuY0#RuVn3mA#+S{nLs~bSid@TPt$setLP52Uw0lO!G5w&fh|oQw?WU9#$gXo| zz>0g)*prfi3c&I{DTy>46!%|T=QI-GChTo1m#tGWsZsFu(t1nWQ3`nymc~+5f2f z4}|+-AN>I;eF7xq!n1z?N(1otFZ|eJr#?Mtu=vE$SjZ+&uw>1fI2#xBrSJPjnbvTc z*3rEqbfqhxyUErtLuDZ5!n^7yd>&GR#gCvb;cD4^wa6 zH`FiH9q7xQE7)r#GQ)nzP6>gx1hzEvQa!iKUu|%YGPsL?(147idor#%qkE>EjFJSx zrzhe~z7KhHao!Xs@$gt`l~m=_F^2SX@y@rBr-H57FUXPAp4>Oom-B^LD3J0dz72)5 z1biSz-YREA9ZwkzaZ)ULm0%q z*eEDls3<9jgnlWa8!8FX#Ag&rBJm|1;!A7{33*G7R^n)hxR`1a>DwV8ke0X}D%bFR zL!7#*Loke^<0GV_$STpKh^RO{cf5~Bsga&j7*i7l_c@_EXP2Z{ggEwK z3e$GXl#={LZ*lA<3knRDzc4V4^U~|DjfK3d$Yag_A|bw{xnTa^{9Lr!^3PG?&}XbN zGO=3rG6s{SGZj+TC65v*l$~jGJ^Xc^2B{}EKMm1gujtU{tO_xih{P-txeJ0^wkA+$ zSQBV;`J!~aaiiJPv|WEtxT&*vBoMw%xQQDS{j|eW2aSA@US+k&B*3cchRr@Ax5CJg z9WYhoJo;Nt#LE@pbR@UCNrlR#iLe=-Es~Xp?I@Akk*So1??Ji-ng@JPeUIp&lJ5!` zi>n8oY%^cvyj>z0_2g}M3Mdu>Q)eyWIK-`mRYTAN3nr}|S>8RG5m68)R(2M?Ulb~u zsEb$eftBmuhht4sZfVM#5x~B_K!~kCh{{*j3`ws<NDnFr0p{1Gx(MC1tQC=L<8cLNB(wNP*`?}WPQbQpt~DTRZXFjhC`&wXa!90}{ZyU75G5LH_wX)mtzIAn zvC4r{4^Bxe%OZHx^(lDN>vHRSn)*pHWEGFu-EIX3;G+z*>L%j_oN9{Gf99J7?!Uq?udrL*6&9z&Zm(z~U^lLTLAE>cyRci7 zf0OZ|&kEp8@SC8_GW@POQuG4+Ha;1@>1%6S{+llR?pYnbi5UsML(B;LCMtEaaih=P zqPYmex$H!dx(36=4!B!kxFQDu!^yuEqWeHYVgnrvhrUACp~P^cf-u}3tr(5^FKLpB zu`Lk_sRMDl!e){-Q~=kIe+&xkEf!vw&Bl{bgAP}ymmxH=_tcBqytic|ffe{uPrZl# zYCS!PKcr+*lno)^Wm%Gx-Op6b^J)wTospe0K3mR=1>d5y*3TuyN&0o?+Lm|^duZ@@ z$>P*~KA{ejO+QN0aw#>kAMB%TILlqhW15aO-lX^4S^MYQJIBr~8+YYf53o#nSV53R zb_C^QpInsX&kykv!3_SM_Fr6?Ki4mo_i%@O*Sz6oviAK&@>bc-?)ee-tXcg4H;_{L zg^40*??_uD_w|22~aADW;d`o>H$4p$O8B4{jO`v5%5UPSF*3jt$rO)kH-Rpp5$YR>CrmYG9s zQj{nUpE(xN0XK#_@x)XyvA2zbU1NqPMScZ|mHe!q$Ht~g9#!MpR$N=MC~8vE7_L4s zDBCS^r)eg-Cx1V`N^el9jF$=Q&#wk-mWU2s9N0Yf5`g{rR|sq-`I-Q>{rBXJWWUu9 z=oMRcu#{j^fwBn3u`<|HZZX|-f>*A?t_ec4A^>o8j7Crk0s&yQ ztRR4n%BskGHv#li1fZ0FP^+>>1Yk?jQr-k$`5%aaAz)HuzoF$2+u@rQCs?e?#|U?R zTfbZE==Vjw$&hkvo@nEDn(C*ad~lzTtc}^UsK2eaN*YMAuQH72>}gp-1OH8B0tKdt zjP~on_J>eqA}SquB7LZx7NY)l-ao3Z1n07Y*vMMqsEymU`Q#vpS+-^t(ykDr_^tQ= zK63w?R863K&8U4+HHunsyR!D^?9STz zxfuk+uJrBU7AGk2(_P62bcK&j>k%5=pOpN3ypisyBN1TZ05q zzYis2^*8%yf1d70j&f?C%X>OLg@x^$zHRk6JbcfL_k&(IazdM)JlPy9yZ|uDr>2qV zwtaGFaZ^6PGqoqgE#?OJ`2ZbJG!5lQmf&UYsxJZ9DMOZ4i`xZ8Hf+--^egu!_N~Hc zUgYCyQ{6SQ&vW4NWbH(PLdT)J9Epe{M%(GZ+v5pd9uD-0RZ{cDC~2KLk|u@2dR0GI zEs%J_L6S^|cJV|7y>Pq{Tu`kkI#m`T?ge)(LX+^W%UVv0D~4nP*O;XBEOsgz3I$6e zZVgm3)Nml88@7pf)}(gYgTc@RP$`Uz^om%lP$KlD-cyxG6+R&u>z7zJcH7Du0V}!0R2!!PG0s!w1rFEjG{;$q2(ws4?(ZCnULHd>i_FTz4TQ{$4^O z_zObCu9KB+lwXq*l*16*P05QKTGE}p*+HN{}GTgxCr{YTm&N6ZRug1 zl1ur~DlH%m&BKgCW^i$s47eJmKyow{Jx}m5b{KwIS^r6?ccAP9>mS`e zB+UR)vWV1MgWv1}5YfSrlk5+h;XF;;3}8a~HjqIWq3aYe>?t9E{p>8l&EqevpQ04& zg^xw%L3hsmdP7PAR$45T_ZMP7n{2r-ptN{CnIfOUNl9s;?B?!YjJ(;$fhEVg29yJS z6X_x;k$0qL)%MxM4ma|(t9Q}*(Dz6L`vRhBN3s;?J}u7s39(N)RWs+3?9-%nP(?RB zqMPU4_C>XQF`0rAvM;ZLz~vi6908NnFdna6BJ}~@9dX`XJJZyFBw(~t9T2Hy94spY zK_kqzb6H50HB;~S$Nq)jw?17U0%3W&o4wXh6uf z$_%(D>Y^8EL4BneKx1H~v>+TIE6jkt$_${?RnUTzDFW_xnE}-GQFrv(Wd^YS-dYgQ z4`x6MD8ciPiqQtBp$}I!z@B!ESwb=7YrH<#+`M;pumQjt+E~Q~u>NAT9Wae)jl1=F z2+Pact^aqcMWmmBZc;m7!1e$K#icedtcG?Ji$~s-WgogVeEp8%l4A9fu9xx zNZ*MtLb-vL1I56qVY%;Mr|gzOpiRr0X}Adu@OW<x`3XK)+r9N}fwJ$HTP?1$O# zA}^*!kxJl07rvy%LHMYMF4pNdddF0)TT5V}=jtcs}YC)4f;#(~glJLDc}pwe9AefGpdr%VV}E?{T2CLFCm*`Wg2 z1bH;Gnn!@NGkG-~)mGP1gR53U`>J6t|5rJ$Rl7jSx<%2uoWx8sOj0*0h0?C|37OJe z&0rN zuHpy(ZQu!eE5jvbF$Uy4GSd zmM$zs!61>0FMIY1eU<2kAkKb8#-CJEnkO4J#%htTGVb+w1F1FPPv{(u8pftKMf!vq zr2%MQr%@wzBs<$0D_U%9jGHQ_jiLY*>ra+_ihGJ8$T%0VWA~LGiAt0oNs22DecJwq z-~*8pIYmb_7}{hnGY2)uexL5?U&)Orq(l_%H@_d^Aw*swY*0<7)g7xj3azd;O z%1Wxv*k$S((|+ucxzM|BQEm8kHe}_<8I<^-T zO7f7kq9qx}0AwpS$o%Kifn>bTwm4CpoB$0kd(#klIH}uHMc-8R3EK23DdU%%-|S-~ zUW6F!Dji@Mga%;KomXl0=H8U!L(G@~21v~M1`2|zVF4Pc<$S^`<3KVNS|jJUw-dUe z=pNSO>d)=ug=<6al|>DTy5Jdp7i zaJDYd9?{s;thrA}c?#>qJy7G62`j8BrX^4>-Z?HiML<=IUTYjS!;)w&v^tnU3J&1$#Gyf18wZ$Hu|o`W|*XvVsj$zRz;fC@$2evWuPp zv>YkBOcB1Yaxs=xUkprHFUCP|QAtDxbOogQbp`U%y0R!&^c>*7%r!2`)%cOSI8cOy znir3GQ&z*?juSM6JA*yAg|ttV@4IjG&e|c}!hq&49*&p1P}|bMUFj^v#d&|0_qF~S z6qV5C;3`1QL^+pE4WKeJi-<8zKPg)o&j*1iN+asZ_eHLH5u{kqt%K&U`h9Im&uy(v;$C<5Fl=5d=eibM6%@(`w{2kX1)I9nP8?v-|uorL)&*(Vd|&)X+z zVJjT2D2-QW6)3V)`uBpAsK4PP?Fz9{1)r}aT@fzjCNMU>Ko~8E3k9QFSofGI;_OKH z5QGyn_!7KawqoSGX~|$v<{!pBb+o6lKObbqoT)R`3DSDw9>&k65X81Ov_-}XFCeRd zK=kWe651~55HL;|8FaofNf<70R(aO{|tbaSdG1%L{4)q)e_(V1hn+lq$vkpRA-~>Cn_{NZ1WB+DQ1B;fN=>cf%@a3VH8oWU9@doGwc-=c?8A*fJhJ)p zQ?yV0K$MOJv<``ny2VH7?wmf_<^7y2z7oBk|2#~qWb_&hDh@^vBjgX0u#Qp7{+@dl zkii>{%-0TPxE{F$IVhgUm{*>M@H9%M|Fk_`DBeF@zSJbXFa_m z0)i|+31d^Kz6HAs@2<2UAfu$a-YEsw({Q}8@5q}4~sL>|rlfmn$?U1C5H5YAfX= z@6@v&>J#*>7_b1+!KDxL_aJ}k*}MCNU~_3lNf}huPNO)+&p=z;e?v|s?3 zjsu62@$)f0FZeT2ToA*)cw&O|d1vhuS1Gt5ABCaWBx^WIRnIOpqw5|!Dk;8O!Q}bh zo8?R^1NcJvMs7d%J}eJb5lwrX*v1<{^~qZAD2GHQ&;er_1p67%B8^8y<`f?x5&`{; zau(ZYbfB=d?3}ft*eI~;j1;nx)9e`Lv6{jNbO9i|{?eS(h?TIYa7kSTYZTEMIG7|& zN2#GqLLbgn6lJlkTHa>355Wa*v6dgChfY4#Y&XXf3@reY_v|Bvtt~3vFqSkhzq}{x zK0fifR5%*TDHaMkdxM2~D0MSxlvRqG<~newC#a5#lrgBhiXlG(El}z?{z5^?Y{ou% z^U7v+41Ly-H8S?1qR@(3byXuefDa`22{C>eto%i3}dZB9Z>1$e?23M$Hw!Fo+?5e+= z7BP0RU3F9OAwMR>Vq6( z$+GWAPF^z%PS!}qwZ~7~)&Z>~`!9y@>7;v=WF~~wXAXhz7>fx{5f!`_(8&2TX((wK zZ+eB40Rbb%fm5btkJM#aA#H!j`2{A>fn0+<^MR}s6l=Z7MHB#p&7O93Z!E+T83IC7 zA*$@F3dY?VB8etT^aD{R@u56@a1<_}dOKSz+ymhS*@l-;8P#04D{i4^WcO44WiOOU z-cM1}QKfd?_lliw51*-25=z3 zeA6p;*6s#TdT`_U*{?AlpMK#01EP5T5*{|G#({H^;sg}z>aK2_L9q>HnTs_Xan|UUs+d#JtJrydD{I_@bXgM zL5t9NC8=H!8*d{~xu}Y|5}&Mp?PQvP{i;3zZXfMtgbj62$6Y7`(&BNXV~GeoKYqymmRa2&m=8bb zf$*c=3O{NrE)(@zjSzYT$AhB9EHoPdzYB=){L&T=zzAtJ(jyqJFph;`pmKmkfWpe_ zFb)T!wSzmaKg|wm=KC~G4`fQ+>{?J1=2&8)ibc?{w~<>I`+v8NxzRFc`F+8e6v#+Z zXSS?H5Y0F>P|G6nSeFxqsu7_76(Gal+>S&Of(^O>RNxRt6!U|B0yN+NJHm*9UGq3M zpt%;LXv%}IdiQ$gR`qChg+Qbm3b_~hA*hEUpNbOf(EfWyBu#tJejIgXnMtdVP$Wv> z1JeqCl|v~EPB`T)>6(R*1IE&dMbh%7r&Eq15947J=(Bk>imisr*i@!bR5Op`0S#$i zbEzhFV~|OwFe?2~y;R%hR}te3U-G;7RuN0!%tC&+$A30|M$&{Ojm|B}yLxY%Dttk5 z7jTp8T|<#{T_|cN8jj&7PR{(i5$p{C|4Ok-fKhsQ4~K8@$CA4ek))5hOSue#&|<38(ORr~XteK5tfK zkARR|klKN-!?J^@-QOj*qC&x#p4k;tZi{K}9kFE3-p%$;G5%RvQ+qQ|=3M-IfX|2h zc@jO|_(-UL3a}QH4n^<{GeFXnxo@iAq!GSXJ>WZs}4T>re1oJL zU2psk%#;kt{(;m5aeQ07b?nA^J&{!**jw4HDM}b@oMMy%fE#PquI)i{t$7EKmM*Cc zQrZ_=nH@p#JN0`@h9lDzXw7$2xDg09P>RI-%6^Xh8 zdgNEwy50BBD5$d96J!~+KGE*a`~M{cOICY;dK8i$KNh+HG0Mqxb)%Cd5kh+I2gmN^ z*w)n^lF2H5_GVxOJ+#G}rMSwb1%Ya+7Jx&;t4ts^`%#A*rs2}?2R1ex8L!t7PC6D3 zuus-%_mb@-oAjRnXLfK)(n_p=_-Xcw$P;+tfXEtPZt?@X&Xcfy@o>gsl6;I|h@dgB zJA=Wde7qBDt%W)PAvbxl_CL|?fDwV#jUWSIzm+?4wH(HhT}2w~NS^4hHOoa7`+r@A zBIRINE`}=g97_ASMo?+QNSj0`Ec3932tPSj%OfZJknTan0`dCezM+k>zj)Z%JXJQM z$)rk)*f>q$dsDvj{_MR&u$QJb9ri8{Dm2XxlE|B9vX}4TKY579u%L2uWbuFMruy~mQ`z)T0G=$b zzTrjM3RS6bIuM#;R7?}V;@>Fqs*B5w{F3^Y`<8s@bHF&MR zKEVtK^1X!%ajiFV35M+B{47hU86q#j!t6Bp0pu|!9jr0WH$6$hF+LedFy&@s%C8Sh z8T~C}9l?HpmiU>{nj23^LA|@E-#5x#Zdg)3*qyx?NlwnPx^M}0UPt?sb|gdvC)suh zVo_NQee25&6clEI<`kGcVL36njm2%*g|UIh!)O#)3%WRUH`v`6SvNhz6bJ3^mauh? zYDqIEHJ)fwa22xTA-Y8467F?@2g;}tpMF`1PwnEzV&c=EHq-u?1R})~n(+h>+rpNV z9-{slHeo8|+DN5&th=-JL^U$2?;xfhCx1a31|&JIiQiDSz?FWW807J5$%i@k1t-f7u|%5s6H%Bb+9pq7xMf-A0`lWvWUdPjOj zty*CvhP>!5?nqDj=49;_JZX2<&c$z=z_~kXU*wxI>d)%BBrOGQ!X8;PluKyp2nLx) zlQmV)<+5SCUzd$AC4TFLDYx&~&BP z)r$*E72l%3+HN>N-cW#CJDzZ0d5@Bqvh7L^>kfWsq38gs)E+X+x^&xn9`<6>1)10R z5E`_Z%g}P^!!%)>cB0S1?`*yizI%S zPUl5Nm+tBOqnTaO|LeH#bz&e&SN3USvaPC~;&pe{4oV|s%^zX0fdz_pAipALkc4+5C9*=mWA7RO&|~I|F_B+=rgxj8Yh~gJ=SiwR3f3 z3z{J{q7fh(*I!9#l(rH4S-NFUKD2r??)GLTy8Rd0(}n&GF)jQe&nN`6Ij-t# zR->vqJD_h>XVdz2D&}zbw1f&=;dBZ}mzmCKdkZE)7*#QU3I}Nz)G-P@d$pa6rl*Tr z!C}o;5CZ$jAo1MU-s54hzov860z)Bfq+HB!!UBM2+Hp*fLiX3PPg(15Vh5aZ**PH9 z&lh}@1MCNuKMfX&-}Fj^tLC}sja%0D&enr1E8g$S`;#@HENiz)ui^9&nq79LN!4pO z#{}vtPe);^WRmW=?!TL`SIT#Vkk4bC>>bW(ZT?Hv)CVcsK0Ji=r2atotIS7hy}-3N z;1lZv-UUq25~v4!PMu$v;{gsCXq1U&vTN%c?^2)TlRk5POMQk<#k`wx)+B?t*dzB1 zPJGmj!TYHI2&CczpA)torFwk=wt^!ZF4&X(5a#EOz*JtoTj-o3+(!P{;d((yZF-859Nm*scxV`z9#N9#=14w}l`P|H96rbaO1Be0r) z{)pP2texhZ)xE>5k@ftyW{r+GAp+Dxi~M%jF)Hhy9-Rb>W! zq|eeSy&QBrgtW5Q%Y|jVoL$z-qQ1?bYHOunqFuerXq)UHY2^aEaXlKF%4? z0cJ-zIUMLz36StV-SJfk{aB*~rDJ*}3$pYR z^@-y;m?)xP*GzTP$5J0#GMTV89T777wDI-8;0NkB!cbV3e6_sz_ge?X0<$)DClwjFx|ztA*UX3|T-EDYuAN>_>08z7g1$|!DM@bu zI|syMS@y4K%gr7ox(>qv71kI#4ur?MW$nQ=-Llq4XCZ4(GbOJkCvMN`GlM#$D141ts|)J%5}l&b@fR0R_7#?NN5T=6U6kRo z#BizTfOh#*o>v!qKd7$3kjp#`ON!qvZIqq4U*8zkw4ObsqnykWr^9f{0W9j*MSjH; z_wgUFx>L7klsJ26`S>Qb4|1KyQ=9H7|8?M7Q*G})LQ?V@aU>60kMVXPR&;r+aG zUd7-^q(u-+eP@lcK>1DVT%cs+i@J)HbCtHRpzqV0%P}tydjy1H7@OPuVuTTV2MEI# z2Y8O!wjO=IEQ1k9Kx3ZXEDMZ7e^jaz#{CP-)`6)^%pI1kB==0GJ4lS+z3DuO&EG%$ZRw zCaEUfQg&u@kg}oRCoi2dtQ*okW;!t2n(9n0ipgV6Xi*TvA5)@jB*(oUANUSKKDh(imK;?A?xi;HVZ)H=*D#SA)1UtX`-=H~{p znB_RNc=p}gu%;fA9;Csrr88LtG<%t!dM3~XlsIOsVHRY*U`>IQ;Pa9{Bh-21uS5hg zjz_}Crgi0+D|&{iznAp;+(^0BOT%4!gsnoh0X!w(RA=t=+x&LCKI?c>&3PT35c$a#gSU zZ$*#NX8}#WosEscfNt&ddiGZ4raqT$m1FwfB*FDfQf$)^8+$sHK1fhkTm2Nci7ZB0 zCE5wWSI*Cp=iQ+h7(E}zP`TKX?$J3O%XT>6!&ttzCqg2k7EeDDcV}$j%Z^pp=VT-;TCW1nWwFx~8+`2=qW$as&@fY6m+YS`xvjw1UT~ zJlkY8N&BG<1ddglOHmT~9F=Mh(a})w%Z($Mhja>{f??xI&Gun{<7;@jfiBX>M;tfAncZHS z`}gI0htPB!iXhpC;^o_GNUaBGeodIF8trF-oaBPrKAUTdZ(z_)3L|rs5Yf7P!;yW& z4>tH5L>-0I!(lQs(;mJzZ^d&E*XQif*Exv8@f<|O1!)?p2b}uQuPXQs^F1I2tfr;z zAm{USG#?bF<^8MoR_HzU+8*}szgO$vS6g~jTEcJ`Uoo9L zC*Vpn!aA-EW*DcaFeZW_3VC=PI5`ugRUwe^Mi`(%hGoq&wB5LHiv4k{BvBRM*WRFG zB~=WF1!{$3C1ZxL0XopG?Hf-bIK#1$4TH0<15T-%24`OcXTJvD$ri3)sn9iHPVq%* zn>ztFLKZ`CDpyZo;GDeyh7mLs16R^N2&KswEXrVJb0}$z5o{&`25+U^(b$}h&^qzv z)kIhgAISr%u%_f_2$bTfD~<{c;ai{+;yBHGgOZdSSWmJ~kuV{6jc*7fjw?FAj7_AF z!G4bQzXBRiMT2nV{)G`Na7-07e0OV8Qcp;HpUb}h?7gh1QL?W@ zNj=HUn)w$GNV2gJb_%T**7qO>0>Kj^zG^A7B-+%dex)miH-&wIH~j3*nn;+i@@Sid ziVFsLK}Q;2sMQ|wONsMY(80h(`<@9smlbO0jEqUi!6gNY;k+N~PuB*{t3peyo8i10 z^Np)=UI45-I(*NO?X;B0pxvEC?;DdM&sQ*B)KbE*jK%U_thbBrva#~+z;^)&Cn}0w z9d8pG6gW#(3mfn7U9s_QzS|Vv1xhcbW zn%$6(1PmUDOd(2wfWtx<^H`Ab&f0GAST*ff0-Rv|avmExSJ%zZb;)2yRxsFRiOnOd zHV5IB2rItch6VyqNM;d>6}^*!l*<&l9ycYWRgB6ub~iWegGTx8LwdWp>BcT@ijz!C zflfu7inwq{Z?Cv%09-dWt#tq=o@LUnU7wp8YTnu`ZrTzz^@f@VLvvI06b9ZGxvAe; z?|`-7Avq8QwbHE0^`Vd=&B*`JQs_LFXW<%Fx)E|`8?#wy>qS{aEsE_5z&A ztaRh5taRgQtQ3U1mX$^@=8bMvDw&H9t(%pS$sl15PYSHm*g!H9t3m}WVS$;c!E-zO zF0xYrMa`22#@A2P_q8PD`7 ze6ui1SfcDixwd4ON<9{7ar6bp6HZS*Lk@+EIw$247Szt2>|bN0?now9wn|kGlRQag zvN9DM{u8_eDv8qNyO3okm&fh2C2qk5H^VdBgttB(-1U!!-yY^B9B{jK#^XtYm3W7O zl7*FuM9WkgS>7~2Vv@|2_moR(<8Q`U5eJ4T_U#I@I*3&OL+Nla0r}FTX@`Hz-ifFN zBhG)4Z$8|w&e&~5jcr1dO@uYz!VJh;&%H5!-+onRqp@I^X+|MOnd2%bxXGK3`OTV& zs%U*kn=+tS)F;D)AJ9x?B;1ic;g6&d-I3nIJ*vR)7kl%(Of@F0%1Q&FCg7t|MPsth z+j$Cs%3ozO52zj>9lwP$ZoRupsU`zQpqfx>bD*o#raUQ~wP$@v_j^xW31#b$E}b89 z+BO>G^rG7Lw7*sDr(!SY`Hzot2{i|eT2IsNz^*tU&n--oDTTQ)NnY^W1e7N+<2))Y z3?6~(kDGB(LWmLO<)t8h3Y$kX_ieHfS1ilKi!KYS*F@(i!Am7NHl_UO^_Qp58s?2x zN6%c3?8FCO_TqSG%a$gTrF-QxgJZL#8L(31rTnwsC}}1vf{Adauola|Vi*1Qn;!)+ zBK9uTJF*fEniaNMLM>tarHtU;hg?j||@&y4bHg9_k(UK^N?JIInaYGJkW*%X>$)W8j8Zl3|rV%q#SU(Ls>disiYRk zpRMs!1nqJw<34a{K%`$QauXrSEd-c5ybPdg+u4W23ecaWI5Y-6oy48Im|N9h&LpX# zKqx60l1ri}Bvte7VvI;-qRe;K4yKEH=)o1qvqM9ur4VT{o;YhXY#7ta%JRgo={RR^ zEorcIp8Rd2z-1*%ZhC^;uJt}A1@WOSWj26os`01h5e{YZp=0LzkbfaT+`sU6)P<6P zpW_hhU_;KdO5tvONGZ2s99&p7>CQq@pvEdXBhq0WSq}au>Ali=10J(ZQa)AXUq$qD#aZAc7jAKLd_lX6G8V9DH#6-^zN?4@L*G4R+c7(j zW;a=T3}t+;%gH4ViZ!rDDzD?B2+;Cbe~3k_rP}u{0~w%R0V^$l@>3OQlO7YsNh0y8 zgS@bOkdJGSMscZ2dR1`Ha5fd`+oBfwWH&tYuy6u4AT1R7mlmpx%o0v>T$MILrni5F zHjD^Qv~M9~t(yy0MxAypvWn{HN$Oy;jF6?ah-MU@>9znA>MK9~PS>K!ezqho7XUgGb<-0ln-QtrY*?)p}P1deB%bkV* zF(Edozy#r&IW?TrD5JRoo|YU&>kx-@HKH%R*gr43TQME-XK8{YxQHWU2KWwXc$SBB zAgsC6Z0E!iVJRRCuDbllxJPkb^-wYj|Mi#Q=sbj?FVo6~T(MfTj(}U#$fxiY=#^?8 zX)pS}ogKvwC-)E40dBxCvuVre$bOj)Aqsf~H@KrrsCvXzv7dH^IKcCe12r=}%m}9LsbkpBaS;K(O8L14Jdv@;EX^r+EeNJd#!&F%6I<2)^wiiUe zz1boT3vGtuVXu11s;~Sf&`zfv0ikmoM)4egek}G6lToyx(Q4I{ywUQ1Y`W*aUf6f9 zkGLRiTE1Q%Wws$Fp6x$WA*5{x!Psaw3S%q%7=y}JxIqg!k?h$N8Lm{5HL81aqldS| zhu5GrxUfvK_$P{6-{>9kNM|{d>&6YkzY3X12j^6&Vs+rw1H z(Qgb&WON<%M(K0Vdbzi%L{rJ0bEz2BM0z&tNuZ(*Elu-RnKLIzznx_0Jk|JES1*5o3^YYeF><8fdiauW9BYw)L!A;lOEi+%zje4D!rw7Jcn)oC|KdmF| zXLFNYMW=L=vEI#rdV3(K3IpZKD#CeC#uwQer@3k%GrB6`Lpp_iI#*XSsvAloMZMH( zmg?O&=!pp~dKUnkTjmR z#clOV^?=z?F!((yRdk*oQTZqdmu0ZMs^KuDD{66C8@jk&rA-h|ANK>H4r~uLx?wB9 zrUy#c9%vZ0Y{uxomL?-?53$3e3$~Vuz;+hBT2ymbS8`U)#qUSCTC3DG!1`JJblyug z>%Vg}9$cCiK$H%c+_u1^cY*bJ%&$wbTq({gV!``VP$Fm}7%ivR%+i^?l>kP#&?9wnQQa=G&AbVfPV=93&G9f?jCLXgsHu0` z{yDd-DZ>B&o+SfhNewNjp~!yO$O$@gSF-nle!b`x&-?EM{tjlp!+>`rmwLL<#_Go9 zPUpZPzT3=xm9QO|vdab$=7TcMAg-!AR(5)sYBX)-;i}saWUJMz@`!uIjQr*yG4NTx zaEQNy&*f|Ejb>s0!bCKS7VZMJ!E)#j5iPK=wt*PIezkMJ?d(@Oy#HqyCG0S`-utKn zybtRZMy3PfN^F)zjJ*O2W>22XmDdGx#S0{!jf=zvifj=nK4R3u0vRmwAy_71RjR~O zphKBLqBF|*I_9nnhg>l^C2%6bV@!^~byrMo?LHdM9n8E3LV8q=V-h&A5b*!b%>W&#gVQhfP?_;$GA=kcE4{E|JM5UN6RSD6FF$g@xI?zVc~ zZW>9$23I%ZEhc~(_oFt#)*RIF0Pt1Ffx+AK{kwXC7L-n-(ff>*U+7O;AM& zQ>rN8s>&2fxEXHMw3W0+RXcFNnLkmukt>NFVTMeX{_5F}Q>&gzAh;h@5PIcMp@)Vz zzUox4{7RJ=cmvT0*rL3Ez9|^UovGL?qZdHL@d4hG?bP)+R9LRzs*!OlPj|+mpbi7; z9*lgUs|`kzk%-5c;bH98m2pg~X9_lY1oB)oO3wvjU}G?YknZ9^z!*z_oK8BUz^GX{ z3XT$6eHd!O6&9RcNxo3_+jzil|4R!52jzfb$aq!v)asXXOww0X+st4wx>ij)`GNOS|M}4zK zMj7@>*fkuOCYQU8VaYfS)!R#QxP+7J&?mwLly6k50<(qv<4QlQ#n9LSH7b%lJkNsuPd&zle!`= z7jzB8bApf7v#2BLK`jIolDQtBv`}XifL0)C5wP|U!S-a2l?@$4#VN@UB~1P)bgm;^ z6P#(Z2UkUZwNgl_^8CBrn5Bj5zkywdzGi|!Uu)!pZ%9$nOgM8~3)C#5!9iLE*g;)m zUYDqq6lAk86sA?q0d1=r4TwWp_YwkhjSA&!^--y%B4-(3MOv@}xbUSJ@aNN+mEY&q5|ekLuD8G8FBmED$wzltv&HMsdizCuU2KDO|?W zb1)8>ZgEJm1n9OMbYl)2d15IHAATsDpZ+Ls`LLXDfFVb3D3Rb1?kSN#@;ZArMWoy@ z!urv6ZzoNn>Q5!0@7B{hG56!CiT9Xy-aA3ZL^J{SZ)L~rLAI8@A~mT(er4zrLpG@d zV&m*6tvrD;>roKf-GSHwMmuX~w~^ZzeGl&v0hEx)_G@u_G;F1=tVU}&50VTf+OBBr zb{#$>c~sIGLGdCjFuS^FEiCT#lGdPq(V74RF@)j7d?L+$H>9S41uj5jx5?Nvr`=*8 z#C1@?%IG?xH;~$@c`GJ0qLw1H)$&%*cI@;Eq$bZIqA(my6-A-*R;*)|S$gm7D;Hg9 zXX4UGVDj`-@>NLUa|U45jTI)8EJgsZjHLqtF?9upR&a71#LG|X1zq_T&k!U_a-b@E zvKH(oG^6D=U}-RnXxnj;jSehF!%%J~yuk^ceHlJr8ul)yWv!`bP~MuXy{_F4`(Q-< z#X9wgt^8cx7a4u7NrtpI4OuIsBiKlb)63r10^sQQ0Wu&cR<0C~mI9@3=f#%FdZ{Lx zK4JhVTHA6p?UszE_m=pFtg7-tloD5UsRMIVFwiNJAti)9-yAW}^oi=Y?``GSf7;b}z%zyxsx|a(S|g+hfNI1)~k! z#qF(nIj0TvQz+;ot+qmsRZg2VKX6-$;Cj#pf>?%v67A*Arns~$as`mD;~He|du@WB zaeV{eehrjy<;ZCuTkkh&50dMU6k8?5^$F%050^(6OclRb+dEmS67TI5h_mbXv8$v+u3d-p%O^C(aCddEEg&m&D)hu*p;NZhjBC{R5YBfB-J2PJVJgz z+(u0#s_O(I3dga_>{nEmi`=csnkvvX8w8&f-ol+|)nVbIFAL$ZjTb$%wc6fPXN zB8*vHI}4J5Pt?g?_BzcWMp!2X96eUn9bAkaVMio5>U6F%in#`}*;Qwm;^U}{lE*R6 z2|nKpbw*jY34<|qZTE0h*A2})`@klC!Yv%Cx&iL8$?XEk*71H_asf!nwO6UBI-@Pu zE$C&72+TP+MMCGICkN~-cW=Z! zAHlC-=j=l?sG!XsP+#c+D92@q3@nJuY=(O%>2_*} z>Pkuk5-RJA0Yw2r z)Bm2K2J03J$cgE11(#QHe=&bHn#2E58(-&AhTuqNy;;hS;Fqo^LjiSWo%7UFeAxBf zdW!E-Hq;RNM2A`-4Mf2p`r4VtzQ#&P;|)^eDuE=5y(waPy+`DE)Y5{3*Ly^$Ca;3P zxU=}K`-XaMx?}W@m8~Kq8Clq{$?@;T5L^76PT26Qt-@tC0#qzYMeZ0Ag?|HWxj;d> zY5sHl%oYfyvDJ%~7QEp6i$}%uo+c)OhkoTKN(e!HN@yVQU-1@z$wpHTP&fN&OS?Tg zZ4Cu*t*4|Yk>y}M56i>RD>67 zmU=oUkM!=kp@5&8xPPdr_&_Zurmsi=I!9k|B_x*d0Gha}3e}Wn%RDeL`suMl%}^uX z(N5V37DxxGROnO80g$$bv!4(>gUF)jx0Dq90~$H{J>Ku&7jK#;8@s*Rgx}fjmAzhi?Esn0fCKkd zLl_<=t+iie((uF1q@m!P$HJt4^7@meE)|mw&i0o*X?m_E-Ix7joHQHsHE9t5leU>~ zy-C}s;b}IZYTV!#WGGWj{vXVjkX6nWdm&AuAUT}*=@S@9=lZlYCXvhSQ`!@{ziz7d zyMRF2yWS`$;r2It-%+dGS#80!){B*(39;*z8fa4*bsaY-*iq%(Fra(fF_(!1GVsp9i!5M>lyXk z&`&4EEiZTCv@1PZ__$!r*d^BNyLM#RiL6ScAV}~fui`|8U&@Irby7grGI8K;?I9_#r==y{_Bb#jL*N$*idXQp8wP|#UqQ4>9W3< zFHdE zo=zN3c4*rR2@}#D9IC_HzUqaztZcV*-sDO<4${{~c-TfmnJIQo4r(t?)Y$#f%GkDE zimmG?a2Qf=_E*iennGmx^WgVemJZ6akuR?lEI>_JunD(cDsZP2wB^B9lHexd}woR^%2rxO4Ed_td(tEr=IicO&J zmD9r>(QUkeWoviPGFK)$P+B_nhrcpKy}b0nt7g>TFX>gNHO_)%Z zQ!^y30a>{@?exOg-gPtj;1F-fEQuB4%-sQJjxbdjxLxjq(qX;FDP@`g-UiwEw&xzA z3tOt;R7zoqJTLr31FC0#s$Ot3KMWs5ei?U-Q^5cjkt$!}-ebHZw{ve4Z{bHpfkJD+ z4_;UL6D>X-JsB%=iaakTYSe=J9;w+<1`+UxExeJ9U~jBt?_*hZaLi6GgGT?35*F9| zlTHCBT@pZ%B|q<&T(qHI`C%(BZYA->%19i}1ov<) z_9`IlglxC7U0HVc7tqqFSW+5qYc)1~J>w)kH>-4HS#okx+`hb1RF}L*1p~^+$Zl># zz5>bh3UvI1*edVLNzpc)1ow=sg#`j`JjHm(qOyYq(q)jvMUm@L_-xW8+p4!%a+_2k z_-V;J@-KBp5GPJl>;)&0&AC&0n!RZJJ;mRl=%+bn?gHNFsrFDuof+z=GZpoLub6&k z5?gTq0TyWE?}A1}?ZY@ak2H>&@reRGRz?l~?M2C9v|7F;k9Z=zfIrw=MEpfrcwAI_ zlBJjK71@lH9938ZuT`y)2d<5jJA8*)Yc^y17@5=!O~HXXtb%YSju?o@U}ly2vVUtp z3TFjOlI9{XgN59(u^0|tS?GXNOWO_7U?g`Sb$T^O(Z5FneCTb_~7i3Wm4AJUCX3w@codklKr$fJr|(1+l(J=yPUh-#wVT+aBI`ZBH~RJ&xQ<-h!B zl8(Q#M}g3#vAyj7-s`QSDf_Rd{(3IR^m9zmQ9T44HWCmED+Oaq!^dw3X_ z2N?&<(BdE&E`taL0_qtzFD0D zh72vsr15spbI|A2)Iq6P9%gXgKVV- zvZKWt#<$3EULwv<3<=$u1I1n3NAbIvRTfIVti|ONpb%?VWqBiV-Ee&&<{*1lUtr1J zSGKUHS1v5fByzY(BVf6(iu^}K^rjXTzgoGlEShqyRqaU2V2o@G6nBqr0XQ70q_slr z%`k=p=*ilDuRTL%120a1FMDY|^;%z=Pkob?rkf@ZI)f^6^NcrnnVYumdrF0N$=vxM zKTg(uV0a75pzaFV@FVL-Yrs9(Co3B*yKoHU}V16=Y0 z4NB0CkAZtc7qR6aG3zsHmJ5>3H|t&6P<1%Hmx*bH1!V<_ANtt-Oo7DmAN6MA@yXtE zHBj{M(7*d7JwzO-X!7mP{^))B#_s$4pEb?{|yJXPMZ#6_4<0=Jy^3ZeFD@IK=N zx#x|%49qQ44+L9=gIb|_U{MYu=VhF;;H~WUy4cjxaH$~37zd@hBDd3{YQ}1UG?i{O zLC;3|jhxTWl(TEgW z$^?vz>%jvkLNrnACt=X~^8zRC_F zeKt2hw+9lL0`19QF6ij7;bJ=6`;Kt$3S%8s&aLo4$N1z?KE{SlbRJmf zJaDRdARpeAKB-+KPdViW zw)I%R?)~e=Z_&Hc>&CxTz0$7ta)U>tSYWaW3dbO@NReSc$9S@OJ%&zpHKazZo&x4P}!ulz_ zr-2;qjOo()YIqkq4=i;)p5xi86mE(NE=DIeP9c1%8+cl;#z8-)LDR{JILK(}rDrvO z%d{eCou1Lt3pC+xOZs+@gVmnxZuD^Z^M0BvX8C!(`{$$O&olb@*$w5?)cIlEx?0UZ zw=S#y#bvin=@wZozo4g4jDDW0>ZL8{I}3I~-))9UKt8X#ry0fb-J>{Oj)KxQZshpJ z@bZ#grhFCWxtSfFVHfFm(JgwHC6zar#;eUzef~mVJoppP6)=v0(SMo7k zfj8&E8P1#&hrHe}MSdhg#K^pefE?y9j`8e;zR3TesS!Be$B7(ezuye3oRjP*%m(Pz zPnm~3zfrYLN{ao?Baq+tZ9KoYVf-IavW>$vRGF+&=UVwcQUpY$hT2dwQgBTyOvUHJ zYetphIKC-xPncqUn|L9^`7R_{@m=uMO~oy-aG4y0oICckS~;|$m9|s!+B%=SA*Hvt z!Z5!r-+;F3p8{PSxMiyS?fH7${1%~<-7quUoZu#WVrz-2L@(E%Yo%l#4)HX%$NzD@ zfmbm8HwaHit&7Ae26+kS=Ih-e2i3tApW86bEhe~rJZCUqjSyZ70#$AeZ+=T?XMMN~ z7Kb>$E}+qNuF^t=6teoq`R42k6ffS9%--TNS7?a3H#*EO2QxW!N(bG+lD{nu{#){I zk4$Q7K2c66&jUlCq=l>wMk7Q31+>`PwbDo77#j=C)gT*CIcnd2fB~U9xwgCiyNe9N{<9TbZAa@?|=Y_kx#S7O3@nMbD`p}iK ztgNH6OrbO>MvePj0;LOx z)pPxdv-eNx^q3k-UFri+vKfq>P+Eq&tG(9JYm&@7SNcjMS2gd8ns@de`{EC4Wq7?U z`!px{vbR#U?9)Z^R5srqySUB-SYhe9^QC@y{l^mRDQU0g#3S~sPi05zFWPV-cQFjy zoRx*f)5zpG_=!~)7E;v^IgwLE?+O!=LU2h}CU{vByvhVo%!^Ab>_5OTcO=teH(Dng z=M(7zaQ<89=mDX@RW150OldvH1W45Bw=(>29;F-ts^^Bv2+tSuJG4QRoLyZeY_45& zuL^nG!Fg5eACtI2%~N`h)|M?#*`L4o(&k%=yOnJXOR+;Fs%4hRi+;z8QaYD z1cGK`B{?k>E{VyK(n>Ki)E=1U#yCeX(CqanOt7Av0e;ko3Ji6Xj%?6VFXyX6z4d_Pitf9N}Z(j zQshHan~IIvoKQ=GTF!&vaWtcfEu7wTU34R{9oF)@o1C!(hQu)(;r5Q?EbsBdYVa1A zucsieLTD2A!xzCv33u3`P?ln?Rvk8RqLcInxOsMtvr}GBzLag6$=aDsfXIf&BAuJA z2xzU#tW)a_h~qz{qNts3?g7HOpqJgIO#5E4zLbX8VP2^lU}f+tQO` zsD+LRah#@Yg0or+93yc}ygqTQSQSZEuMlh1-oP4ZaTmu#yf}8oFch7Lh_hpJsr)zt zlEvNZ;Gp=eI-c1q)o-<(N3eNiBw|{wzb;7#Mei5x!j^+kdTgUqe2N0EHzUF8TOyk9 zL<`G+X*x~|jA`S2WFfQctU)jA7qXqGkS#db2JB=Dd%fm6C?{O+M!Dc*>wxq5)xb&r z9t}XG=e6`4!Df93Fs`NcF*z^Q-VkJD%y@yE=8V~}G$eBtHYC`|wi+(8x^~KT@I10v%9_H}!|bSl z+-pC(S^Fu7=0piTj2bMp9NBILP5@|CSz~STrK4g85wkB`i-y>oMmVAOw)EU);#d(g zFD}PS`v6-ZLiq~NaYKcU_5^~C;wGiR!F8IjtyLIUuaNr^R|SC4gHUDQGOg^_vyND6 z1qW)^+Lns9GFsuAykQxqgDWawcgpP^pz24hQ0{6=ut2)Tzw8OgoEbp`k$h0$buj6WuCq2OWSHPEA=n-iyu;V; zPZ5)AMH;teeektgJ|25IrYon{&g;6(M(^AfNDg-8ZjaI7$2N4px!UOIFQ|&q`z57! z{1!(ssh*hM2KwVvxK)>1K{rxo^;|@TP8em+NbM)Y# z%T&;CPlU)|&PmIhok3~|1fuIN{LXt|Mx;+-a5E`kFhK~ukz^SoLln{1@gy;@3|w)- zqP^pBn@F!D?X~#p#oHMsn%VPdu}=F3Qx&FEHbc={#KfvP#^g!a6~FSk@6C!&{nmS_ zu*IzfZe1?|r#2mfQ+7PvO~2&y54uymz28?IGK9~`DUZj=jh=3y%${k1&1&vxLaY&lNy}<8>iMuZ&&RZYJfh6=NDIVfwJDAvk4e z9d&_}N^s3;&I(bMuO=0sU)|L_L^;40UhAtF2KmieO%_E(+fAU;se^1PLCNi<_x4w_kVK{(8`L800q+6Of;gRm%b)(w2^|<-=s{zatvA3K7p;yL5+dX6arDOFqof zoqMelaTw$`a_KrOnar8jb5J+77pz&?K+p+kLjQ_VZm%!?oX^n_L5YK8A7?nDjU;cC zmPB$3etN3MQlU_`8~$Kz`b0NGx)Z4Z@P9KD>y25cSE@mAu|8Ft{6q|>L6*g3Zk*;u z6g9+mxpC^ZyPlxbf3e7o>Iv%gm(eZd7O0gaZp9cEfn(o#9%i&GhI!IADxpS76gs5v z60LOxu_M#zl#nt6D39wW3f0SuRu|J)M6ovt@E42Slc3mYakgs`O8!~Y`-gaG9X28^ zbN6#r{Xj4CjW!zWBHxJ7G{$puBA6n@T1f?{4t?xahR(Smo<7!y2@U5dLUB!EfWnyS zTsbJRO9m`Y7wbA0W}OoRNNlIVGqCJV5a4?Q*N#tkV!trIwr^GG3qE8&mAVZoxR!w; z%CW}SQ*oS+%R;QGwom<7Cj#|DBYccs(Z>O^Iaxw_9=JjhE7XIb2nfERUX4U!Xj3w7pPY;Z|eywd06{b~HO z(p(pGRq~duaI~}R$UmcJsu=18y(vP0EbgrRethnOr0etbtiP((u?yDY==K)U1y-QiKzS3L%1!S8)+shdtbM)4X|zd2l!p zpDr}VU#IK&rlZ4`0h4f^0{$Ue=TK`ZcV}0cp|``Wm(&}5wJ-s3$kI@l=^rXtxhsEu zo?o=!s~f`CO%%VDseh8+Hlf>uw3SoXhthXWyYV^wC99<6bQMGO4>!bo zeX~?zUafY&o>^?Q2NW+4Oo9+KDVCjRcQBFQ=E_d#hkE6&wUKF8@l`0BVk;cr#v@&G zDl5S8bEg7aYpT5k-w-{?mtITkN5&^W!M8hWkLUW0L`NE8D1^s0>>X~@YH2EcAkKhS ztCn#-F`TmK57GwiqQW{<5YRBi9&xXnkXwZ%KBlW?%dQ`1{PEkdEw~EXdQeyge0HkE zsTds>frk5zF%oYeDl|>V$Sih`yo`FBq$UFUX?Rx&G0_DRU*#)>sz=$!J3OV>x3(FD z5bDxq3Xl?iBt2|%U^RB##P<}?!spCRix7IgTs?hhb2yBl9NiW{9xF&XDMt^nvw}Rl z32%<}s?^y$!B5;IC&8K2*E3tA(V9lz{7bi5vps`<-ogaG2|9<;FD^8X<&%y z3Wn&DAu`*%#W|ls=&fN4?QARHPsI-cMqF+|vM3yo9ecBcn39qqq4JWkkVIsy`BnH> zhz#8r#I85BxCucsh-!Z0XG1#w8jy2MKPCX|SDG}mjWy~yD0U!dPabyyiZogH?1s5Kt3 zfL!#C7Z09Qp5qnGF?vV!1FN^Te(zH(*I+SD zS$-uzz7Ejqx&fU;SM;iBuEnXoAT_7^rm|n=#$@eupZ9DOi{Tf}xam`>EaRUxPAv8L zK-!7)nFr~?^A10p>l^PIR93iDjKjz(=_!Z$y|#z?mn*g}vYy^sBHDp6WO}ePW#~J> zb*ODZ1mMHoPeo#1q&cGpBoIs+u%r}9OsW!h1T_+ZCgV}aiqs@@wP92|nr<{44~a0U zB#EozMX{G8-fzKdRpO36TSG;?s>&Tg((1HBzQS+Z@+H>u_`^^-mq#7Rb>lD^wEO!< zIn_@i@t;QaOP%dfFFI{QaU%Il63MV828jY|Zmg+>1#dG8-=*LBr-o_p@^d*6LJ`ei*w3c2q_foI0A zr70ZS<9I;F{I7up#2b)l_ubhKsX7pI0gs!%*fLX@PQp%O2`I&u5H+!0T@hY$ik zgwCwLC7aWYMGWS6N5^H>tP8vB&^lBF#aoO9*yll_@~}>L`kL6-SDQsf)~0z{!kj*viUqnwF+qp4x8zc)P@RUxG0x5LSd+j62&2B z*EKD$J9Q&gjxKKj%8XbsVX^;@5ta{?Ls$_IJ86!WTRzs!{=_CPt`l0`_DbjoN2d^M zMj&bnJLxH;LFpzZdA5~0Mbo#uu@?X+h!m642*eFFE+vwV)I1^9L_~d;X=fWL-a5eT zh0?Ft2KJ~IzD9;jkrrJW9>vKT`?2*FeCLP`E3`Fg zIc*j zHhEFlicMTtuO{Yig#dtRHu=+rb{KGjqPfY;@Ow!M#AYc~Jf8}|hMZ>+OdgC^vCffk z^%nCIp`72KE;OS-d0iVTYJ0Me6CW@@k!7)a07G?&CmZ!s)9re2iA!9sr@5X@E)qLu z%{!V4l#BKjV(S$CGuj5a6*Oxikd91rX`*J z2&?I&jw*=pgNRdleKjHi4x>Tvh7%qU8P_xlL~&KPnrS(l-L`T?Q6Qi9xi5_se!#9y z=wVn<2ehJM6Z2kCa~nr1%Cn|pTEhj^C6eTpRY{)kSz3-=6ePd2h345U2};;v&1q;M z^I6&5I+@&&INTwO9C5hIqIzC@o9QxnI}f9`PLuvc6h+#;iD{M@9ZdL?ORuo|5en90 z3(pTMEo2z{SR|NOIjkb$2~p{6{g8mcOm#;L%AywMVo!qFFl-%%LJ_u(TZrF6<`D6Uqd17beuSWi z4md*8XA+#shbWL8mnT@S6!lJ9Mbvwc$X(xi8Y7@D1-ci5OGZs2?JKxlGQF&+%!32m zL&-7>6jV$*H!Mg8qdRQ)bgh{7a6PbNcpTGyg3;aOMJwz8>+I~o=JK2-q3m60pf(=A!QRNeg(6aenb#mO>nfTwJFT+= zto?6$KqvKiKG?I~0ORTfOHR9pTwkU*D>>8EVt_x-vYCrarK181jCM=|iHNCN1&wB?NwMPVDHPkVF{zY$>+e-A9= zMeC_G2OBL+>>jKuNjGP({sun?_d?KHX}>u;QOv7Yv# z_@c$=6qSjB;5m1Tc4^qx6(LmlL29Uf5ryVYw?@MSm~2=IEp061C6Fy&ykN+Ey|73& z@@h1)=i}_^5OTVCv63kfmQs|h2Fqu)Z26RCG&(zR+LJAni4kbfkpe>sNiAzummpvZ zBI95Qa!5gIo*E(2P7{7{n=&d-xKj2{I(E|ihf1aPG?|c=(mjI)nUJ`mS{@Fa!pDtk zOCp(Afyq~%G%)FBe^#jTM!1;=ofTy_QIa6qR`!R|Qz!y4ifajU{NLIB^}yA*t!lHX z>M2V->9>IM!zB00adRT)T}W&dY+x%;qJq+k6U4W)DPGGyn)MrsWD^+PS=BHqGj`@g zf>#`VY!mS(`{DJVN?j6b1|mLGsnj+0{+@WvAw%bGu8{1gmF{BHx{w6ulh`G&)Wt2{heG@@G#o_yU1Z8Z(=;qtu#D0)#4hMcxl|7zR!`p9L5>0SL50j!^>4>Qbm1HPci*p=;;EJZKST% zbz$VQ3O~T1r^xjB_x=_CiGH0;wyU<&4CbFq9vEPS-C^1?|L;&A4=Nk|c2usBp3rdP z_lDSi$vdy%xa!1(*Ol{}&Ekfxa=GCqF#JM^0f=E$!B^CF_@gh&sVitQ`=Wc=o0fVS zE4~lP;yQR7Vq_g*T8KydQ&-u)Ruu$_l&yz5W2h>v2;`un{NfxLV6sop2nUPGb(=%b z7O0Sf-dGtl!J^|amY;(Rub4H%G`~F+weYBzp(R zHL=00(MdhmRm~Wa!n_?NjsxyLs>i*)OVE-U~f>~0DW5T_0yG&GG(o#|ka+z&M( z1wb9^wN~tVk|)RvvYvkt13PyflcfW(LrPAdoFRd&ghoAa9isak z=H@+MLK+y#X+Ss0{kwhz<&|!f;@!|K0VJ;W`RZW5AHL+b1)eo^eu z+~}2X!5n+090$sz(il&o(ikRD5tS>20qUNJ6OjH*z(s^e-6K@c_18&SrOpZHa_2Tu zs&kDwz{rd<(CmVi5-{j`gkiHo)c^QlQ2(m@+D8NGKVPX-qks^mS`*E#n>&ZQHr(PL zC}-EN1d$Sha6^YD8|s4K53(L9^PX2K)ici*g+Ja?4r^F9Aew}fQF;@C_g_wO zWm#1zG3O9A+7Mcs0SzQ`+Pv77$8JK&ku!EH(|}Sl5eGvFkw7>5LdLCSLpeOAA1&~s zCrv_poiJ=P`N=82q2%T5Teq~&$^hDm%0QVik=@9Jy$`N%g_AW`!h@nNQWNkSr)uEf7(&F4c2xUvtZivV2q44lZPkmF?89yxX~uA9^&_#k#jCW46vHJ>=pS88 z8zUv#v*vut$YItxQ736hh-)h!a3|td!ZXl=fmZgZ$$ke;3>;kR7K{{a&3T78e;L(8 z9EJ(OL?RiP!zkM?TMICa7%HOmwo?P1(#EOF$EIS4JC_$1gDw`lU;(=`EIksJDXfAH z?)6=7T3%8}d}7Dngi|L*6;R40A7QKYrUBUOJPsq)cx+jXs;`f1 zX*Z(Q>s49A^M*dxDyq3h{fjpFH_{##VDTwQwKU2%msMUsZab?c)2?E#Ch@Av{`D4( zZ?3Yea8mFzdVdQ3Y3fl9J!)~}Pb3xUf6evxk#*bL$!dVHvOrm#3ysXXoiF;uFv8Yz zv=_=D&oNT>J;@_2795X5?2`RVJ=(Ge??>Ft_aicPhSx7JZ2oTR?hHN=!*vG(ATFkZ^;Vqc=5{?p`0aY1>*s9W z;c2HzW8ki*)&dNB*K48{1Ote=tvXK&=)K8<&ptKS@Ce8^31&C8V26V>c4tGv3Fp$N zK0CJcQPqbQ=8$C)Q~9*OP2aVmji?qTv7dr++Y}nO2Eo>3s>{1!DDJvVV#_ z6hjk;A_lRLzuw~FAZRh2226?I;RsvNf;5qU+-kOn#nm$+tmen~1x>r(+ltsVB9k0Z zoqe=KNYDymB)3GFv;f*8rv*^0%OHw2nVZ7e5G`RWnsnFLl%3F!5!bmBAx@fSn5A}7 zzbqN1@&ugL1i%yWbFS3RPlC_uAJB3jQjVbbz=A1-Vr*8ZMrc0GKTrt;YuR5w1Gr7q z6jK$z=cE;MntwKdJ1{xfXA0y1v{ZhiM;z1|H(d3CfV%{6NG@y&`2|M)V_2wQ-!y8s zbH$Zdb@wFKREs&eg!$mAzC*fK)WUVO!1D^#IfcmxuHS?%y;z>@H=pI8e)Soyn(1eI z_gdCq%}9dm{&+126&Dd8z0r=gQy7@Vh_O8vi)qm9B zClQIHBH5-R*+x5=^|G69Rb2B}%rzU0dX&sw+Mj018;)3)W@Ec9`D`PP@^QO9&>q4T zeE0I=+AWvuPh9*{+Mjd>Dn^dDKbft0e-cICS3}zXMrp^LLo8^w9R~N#H zWZeW|R~gp*+H$Q0yjofV&n@xv2w0F~*u%2lUJnw;eZMai*04>t%+7^7F1P@Hh!~|} z^Dc%}x)?S%;Fv1cn7}zS@0L@!uYj)b^+-%4v;Wz2n(>Jb`Q=aPkIPihl@>$x$@M9` z;-1(M1I1=w#(7>nu<|4HmR&&CwG?Vb6vHl`P6N^mtO7x^WoDrf0K0@1iq2JCl(-x& z3Ybl9&>-h=(`gyAOB}n$A0vRYQSRmbhseGS#Xbx$+=k%KQ}818I+~vk%?pp!!A4f3 z_qxqvdtII6GVZ>QHH1TBt$fj~aB_<8YKtXBTi9OY2BxjW>R{7hjOCmAn?k~oX9k;e zNma{-jkGfsw=rzw*r5Ow-&M<_olzyO669iRfa2~F)}WUmQ~=mg1Wc$q$sT-4RZhE@@Ue-I21GOpo+%YPO+ii&G5Hh;RMpBk$D_7^ zWuLE`xDYGgnZa;_frH`Wl&KBQse2Z=u9Yt;>~1DIr#>6@@hT7;A8 zT_DfxM1iZrzHgpI3LmGPoz9UN8Mok>O>TD$)tL#m58bT+-JuUm^I;x$Ew5IRzM0&+ zrKNzZhnE+7b@~tksAs=f`5wSxgAL|23w=0gn8P9aD;C0=%6vm*L_Kd9Y`ClXX3EPF zI6k7e%CZW`@cj)I@_ex`PD)-d&Zo3xncF?dQsApnsq!i5x-?Wg6(z;*q;+?l z{Y_1Ej;~th79b9AqgK_QAYx3T*lfOXyHhjeK77JC%ieJzd>`1K^abLsbPb$ zieA#O%zIGENziN#PY#FzmzNp%Dr%IQoul_O{|UAkIQxakuyxJ3TVxd&%V=q^Z8hMK z&FrL?*_agJvLh*iQ9QyPY@-wl$MQo}0S^JOX z%D;&1&x`Rq2gA*oCF2HmEV>apW~@bfX*j+rg}JN-*sD%_GL9E2X;-?g$~wpirrq6W zE82(e%q(smiG>-;Z*Zj4Qa?kRt4ZpI8VROd~Zust$!b;qoShhqP2J!hu+6tHP zN!19$kect#!WghR!a*CcL+cblZk2rLp*EtGQn(E+UR$h)b(N-4)NIPaND1Af*>6P6 zf@83SbCUZ`|$ zik4>LwrhEPl)CWx}tf$6_=-RuylLBTD~O)kFO3=A!}`t!I*=Ha!rqL z#)$cwJazcF0@fJ1ltV6=%6=i@D`4!=0%OYVrAs$;`FY@ochiRP#*CHFrg9~$JVvQ; ztk9_d02xzgrN1y0#tt0`6H-;1vNFZA`S^$ML}ec_F<@)dc8}2xIGr_L*Vt%n^Ycu4 zyX2{U_&!tITvsnrcM$@p=>~HVoj55*=&mD#6)0p?V&v3;U46q~3aI*qxji}ti#d?H zAzY^}<=A(&unW|JlGd+xzm^vF-|ix$*9;;FVFMt^93tF~1ffq18p52MCs2bX3wjcP zgJ%BL-OquL?&h2BentwcX1}P(YJ{n}d(awX{TnVRR+kLD&f4FK#9drUZCD1a!;CFZ zcWVW8Z^K#|E~VzdJnq!YJ2>hZ;WoD{xkW?zBT_99%J_ye_iiy$^Rn0 z-;W|1SfSi@btcR!lW_|q*eVKSWQEs+b-R)<5z#yGE#e4zIh!=Fo7H5SZ{{7{FPh}1 z{vf1Ppb(ro$(KfdL%#G<=7{J!{Ysz{d}X3QAuNyzT@on5fN@k!*<%i=;4~Gf-~pv1 z5qQCyT9-V3XL>*HSfci*{&GPtxV9b|v`6$Z?66NkEGP~DX^URy@Q$%d%-e@SaG;m= zIK2=ObGv63Zul@wfH%;g8I^N4LKc7&6$>yRu{Hr_^ST#gVqnB%1#vccJIsmM`hi z%|EW2(XVbRemu3v+gtOW)}xXC1dr@*w{`E({`d8e>lsR$P2P@ceI1wJh_|!C<{`;* zbbAip{`b=L_wjRAcdF~PR6f3(P4iDSbSN`>SLMF+f9!sx62>;q*i7c=o8^2Cq5&vM zWZ!2Np+dQQ6_`#)#6DJ68gRDMWLX7!2C=abBmVs#`BGI2yN##_z22;srV8vo>bfx@~8 zp(gX5gj)BxG)A$-VS6!);KYlgX~+=?Ek!_2h(KUNxZg>$C7RH9dy1_otln)~0U%6??lGxxu5f-O*5SE0jbwo%XP`n{Xb@Jm-8k)rCJ z7eE`3Pc7kw)rz28~6tkQSnSN1HUCaOhkL@%FCuvApZ)&0p*G@qqNl9R! z3pTSOooTGSeEI7PIj0dMCldRJnP4W<{)tmH?>>RCgt%6nta!`Tw-OTk6am5aQURfl zPwg7)_}RON$=*>x!1FH9*4!}vMgV-~VG5P(P;x*}`qA;y!UZ?cinF=iBT^P03&6Yq z_Mw<<%3z39z+84fha>2%kvqnh=k28PV}v^@kC)wJs7EfMivZQZo7BFhbikZ>TTsP! zxZ)F1_artIfg(oUlazxxpW?BF@Js=~pU5Oe_?(_$sqEEHvU@gcV0Os3_$eTr9?O9$ zm^34&V5Da%3`zUBhs7s}UHRO;JNsFVJy_Y03d*i8q++IX>ESLqi~>dEiv_^7neKZ8 z4pxA`bphgBh-Ds?R8xS5wB`VdqBmi{y&+FFoV>Q-HPX zk^z}~)>#M1co#vEPZQprdK7eUb#4TOBMP%C_6EHj#?g2)Y~Kmi?; z+m{B;F|jJ=z-%z&98iH#t>s@D+SRp&CN*koXi)ggCxZfkZfRfU-!xw_CnU#pU~ZT_ zx){z0sc}~gdodW=fg`)~LUY(lN*oP64W}FwV&9Z0SaU!~3S8_(-)MieQ24NZy<`jo zzxt9f5PYdoDEuSaCt)C{JTkfo~HaXcaA7vLFUhXjL z54((nMrBRw7iJy@GC=f0Gf%pTnFspCTz79_=85JyjH@#91b7TufYJs9ES$EL3N$V$ zI_cSbC>SvxL3%3m_TU_6o0Fxdthc9P^fvbpFe4>14SIVz(%a!3(%Xsp59S`-Xo9&X z`XMS}iP*T&+v{3z%mT&imi5`1!rr{@K^Rx9pO%d)Kzs8^qK2fvZ)}o~1AgVjnyrh@_i6cC9vm^~-dirqzL)BWAr%Ygi1!zbel)8xCN(BSE%nP=6f(%$UVa)TSo^2)w?| zsL;V(uJ6;xg!6^DIgZ5d0+W-$Rno>Ze^*sjT=6H#jy21U<{k-$LnKtZQpj~6mM{w& zkqwA0_-z_-#r-S3z~A*g970iq9x^P56K8BO+|-Fp`{@HZAYajmN=4EsW(!8P^R9H4 zjG0ukwt7C9$7qLC5~z%4<(B?X|ZC9Bm)e>DQxyo$WF! z!gaw(K-|ZqAwzEiXM_55!W`Ko6O3w=ytQY#0)kq_REda07rMz?h4IP?iGkcuh28=! z3Vkw|nCcn>oWhBYq)GRQKptrGEv${40MIMxTbhRg5XMaXiL8;pGxEpU^u4XlO$h}Hi;90z&MIdttdLQJ5dg!sG{pB$3@WN zO{y73*IP%VaqAx2vHxTrmY)NeaVCn4K5cVh082~dbH3OpOaov5AqD-6G`mt3xuXzB z2a;Z``C8-Oq=Oh!P?Ssv7=PN?x$b}?D$?5IQR^r`6f6a!z%;8AD#o8y@TZ$SX|b;s ztu_yOlkIAz|FE`#^IpFG0Zz88GpA!oKKGM*vz#!}?9U~Qi)+>MU;MLo6z}>IZ_#UH zrKW_L2c`9+x_{|vVA;6pqY}=@Y~(_ zQ*?sr{Cb)|edTl49zKxOr8{FgdBa!`C>e{~`*zAmlb3ri#3^Gs_{~aokXGegUOfti z+T&3$qz*HrZw{?K!K3qRza9MHoCAbPp#!vCDaP3!Zxs2FOUR%5L7Lpr+9nk74EvV>Hw{h!8qAaWZxJ;_%f7oam*Ht@LfK`tOPwRx=BpO|t;o&{wSmt-ShJ^TGIl5|iZ z(Ap^1X#s|D*=npw`C`Q<8JaqlOFhM2;kC_vkz5M1$%(Yby=4-)iR<_LF6mhtkuU+b zVScMrCa6FAq9F;OiJG+navc>8lTu*nhh@Gz!F6W%Fv786cJZS8Y;w%HdEv_d;-hF$ zNoY){mcelBuy?6;euCbLe!fEYqy!+@BwsZ26ZA$xa=_Ku4dO;28JAZodpwxgHP<*3 zYB)U9*lgxYDh3Huv)?x6&Q{mp0t|Va3uq1H=c&FX4la)2K;>tX3(IkE-Z*e$mkhEn zF#`EAAS3bUIsiV=!*p+wLB13~-ch+)ExU_oDJh}&ikml?T)P>pH>%5*&TMks=qk5% z&8>OejL4-rCjj@^S0v2OA%b4qC?eQDc2HB*U}80HQmA+r3(MmDrFlaqPfBJS-n=k3qM8k=<)&$0qBVs+gUa~&lwP0WwS$eco?9Wvqd>n7vpho~ z65G@`PsQ^>jfg3p=WFP2JedgHtVjkaL&^>Xu0GEz;_U0pmS%-*X}_KwseXvNoc}^y zQSBSG!TPAWh-UVx6wiI?Uw14t^Zk4>0p<$epiUzHKob(U4uesmSz|x2YOL5AG%YXG z)!>oXAonT21Ziq?#LeW$+mjytUJ@PrIj!ARJyu^WOtGaf#R)}}hkBw2Z4VmToavr8 zc&PCP9l_)30ZGSAaQR*c4HS7ib9`TC2F6`=5SS38^jg!s^yze0@o$7G-#d(?%{1n< z?~_o$G2K*WeNbQIyzM>E>jbQw5*!i%v?5rAd>;4bDLy;dZ!mxQnR^zR`C0wIEb9E& zCvoEQ>Yz$V$MDv=pvL&~fS&9>^Qy&SW|qY`SUcI_EyDgG4Wh$qnqx^BrBdSe1N`o(ns0QcIlssc zD95xN34gfgS{1QSgYu`w{l4P~xO%o2yl%%?9|Kk$)TuNjWqHg$Ow zpPaVklfwvd%|f;uE-STYqmm95(FIK(viNAHrUnsU#>^KP5-gH!7?{KQsloOCK;wg| z7krGJ}eTi}JsB9u1O$El!AG1luJi%@7Sv>}eMT#U+ zi$lM6RWWboSCDxXd5fpw?~3$K%C*l=`@^*v2{Q>W`7eo5yr8uk9MlSf)rgS8p1p6|+oQJ{o8W`@BA?-bMiVK&Uq1HOKa;p^vJCizw23UsX5^lv{inJf`7&XqMPe^NA<0PG2P;8>#TE-e@EkHaYZL zZ8T&HWJ55r6EN%TTRZfw09D)8{r(c6F8`rlRil7m8_uaTxY zuZEu`7<@-1`R^-`hXNGQ*81A~D9JaTB>tT~tBos&yYmC19^wMCqZeMtd`~un!R-a9eRNPO$61&^ip; z|7VDm1YcnyWxkg!dzrhL2=c8@j4;#HXBsEwz`c@z)k05fz!iCGJ@m5mdzcf`6Ack&y z;U<)|zL9-Y+P}C%z(FJ>?c)L`H?}3+5es$~7RBZ1B6U$MfRcSBpIoF0s5i(Y#>>k5xj;xKq&7X?eQP zK3W5gLr(BsTpx*}c%KdM%x>a6B-9ZtcQF)$f{vU?J;PK)lXqql?uVxoKBbOmNkvs~ zo1JyYH(hqGsxu_sobyPmUzh=?K(AMZ_1#O1iozljq} zgd+{?va$b=yCKD~!%nDM>3rNJtieY9+h4<-^D}?{3}+m&-=7k2Y_J{ey0Md=>IgM` zw(AabT{IfrZtqt$<=|5ek-_GE_AL2FcJg9#e^Os(g!pjo;Divn1oArh32eU~tOf+0 zB5|-rg!O*HeXz%$qv(CI;(b5cJLT>2ek$KGJ` zDCB0?_q=9=T;2L4>G5Wn1DTTr`nJqw5C@w_X9qWrpO)#|M!IjVR3vFR=@!%&`@nfY zwE3K}MO3 zNtQY0-R*(1PjnQv6yGx!uO=eF^Ugf)q7`jdlHhjj$(|b#l1>dpQyUm3zoVCnJq02( zGK48S0fao{*;mF#Exp71rl^rpSZpZ%sCLyR`Z9tDwO)@Ev7=SrM`DphjTR3gp=l)q zkeb;-XE;82tXw?aQ3e_DhA4+vbwBY>qpGb@RWx)ehU!$wKGuf&8*-7|lU%LH<8tL~ zgDw4A{>=bU2YN-^r_KUe6Lb^3kX!nj=X7kE!I28Kq&_jaAwOeW3knU8%ID>*%z736 zX|jyw?2426a39XL>2_oRUfh#)?Y>Fc&0ju^FRG(#NpVWUgm!Yymou%M>?m{S#7U(o z5mIN9|LFS@JI<0);sTm-@+TE@PD-WGq+dlV4708#N=Z{>rDijOt^L|&%7dlrK4L!_ zOa8|{2KV`(SkP2tL2SsKT&^x4=RHxHl5boaS}e1Mf-%#h(->XbyRcY%b zY%Taj8K1lu!zcIij$vG!g`73$QK(kp3qp8&VlVLDIN&-ty)N-Yd)L(L)7LmB`r1Ju zfgd87ckHSw7{y$5HyUI|!g=;ZV~u1l65>j`q>y^Ls(l$5zoT>o=hRH$i(~y((WwxY z@Tr4#lE0LEAi@Kjwn}Cm7Ci;kbErZZkyz!@Dpf94v39ajsM5)mY^IyN(|Zw~SN5kX zA?boB?TUYV2DrImf_y{S>2>j#o5xXvqja3xq0(QUeR`eXyuPB-+2mRkc>s8?R<%PY z)dKKth!vDS!7Uc)oLHuFa;Z?^MpcontNbe*0^wQyTvj>?2C$iSwYIt5Xp*FWOQ$9z zRMhuO)|*6#G+U(??RD$cwS7fGEYY2uoa|zwUiaGGt$h>BJ`Dox3+{veOj5Xh$+;Dr z-k65$w~Priu@c>HW5#Ba_i(Z1txy3sl_ilYPHc7CTSD8%)HcTOQrrBJA79q?@v*jP zfwq5vwzn*6`(@4H{ZF}n7LxksitSE*ZCU@~ z#YX^{G zNP@$$HVc53X!Eq%JY4`JZ$b&6Pa2@JW0ebl&QSTBDxVWT8@0^DkKoG}c!J@*aDQxkx6AK#Z%di?dWH%r&a8`kymt?qx(H;#L)i8zyU}O_;-0fo zYM!Ol7KnQ<7R%9)3V`M?YH*J+48Iit+CN@75cm98qQwd4{;#fC{-D zmFOtRen4cTi~8&5re9giQ)K)D(prCh{zaMCI+I95TIRoi2Fbc+=pDk zy~>Fgbmj4!(7}bpfz$2fp5hWE1!CBTCt);kgbum#UL5-SRe68gxF2r$l_Wt3=wcT| zlpJp_5W<16_6Rp|d-Jrnp!OCDU=EH~4ur5cRvEd$l@C(+kSZS%Fy?aLT?BBHw|XAY zuSdCVdBo*3GsozoObExun#U^+A$&}#Br*a;mrv%2c9{@>rc4OO0V`L06^k8d2SQM_G9jE=QH8tmxMI4pj1YXo6pI12r?Kw`NBRCvdF*0{^W4~49{YT! zJkMe5KiW|%<7DR65qs1+Te{@uoI9yYBE|EHJ({wsN3PZ38Fof20 zv!4-Pi8u=-tv;G#9*s&^{30i05D7aH32Q+lw8?5oXw)UTqA^|94AuBk8y)fj7wev) z6%@o32+&$S!&`QG8ktQ_$uTz4utNjq)EKt1PWCz9i{*$9sxZ5@hK5m`cbCh%m*cA6 z$v%r6ir9h98oMNe9N#6|-FzLknM{TkaK#@zWzsYz&S+giJ*TxU&p_v8fmGL}uR>_4skuwIE!Hz6O7vp2Swh9o9)rYnmg2=mTS#2z#w0G*WqGllwZ!GF z&X?*I64!+lUF8>Ju@h0^Qnm78y||(ZT1;Rr<;7~8d9i|gE!Imy&?OoxBj~alU$Iy( zj}Ua#B~%aE_NRDu z^hAf0V1C~H+qj+|X%6LypKppwi*(c~fz5@UCmxe8P>I{Hpw(c-S!j;af%f8*g0sm7 znp9Y4#sp>wdO(mjl-B8bsqFYx6Gn`>ZxmHmzePsEP*HN!q2K+2?bVr;-+m->W;I&QV#2Lw1x5raQzCslrurbdLd zY?4+8D1f)Di1uO28mEQKa|TRpqI?xV^8Ga5Bk2Wpa(+x1G-HH&oZ~G<1X`L+&II@+ z9D5un+hBk-ScuNV*j5v%f9%HO~8^{T(R7iOqM3xBN}0ZvrycRHp@Iy zi>f3I96|9lN#heQNle^=rn`66fmgotV+WG_U0|ZEu)uRGs+3_uU*A~V0 zALWDp3Z;NQdvDY!4oZXL zdsnTSw{o)nmG-V?P}(GE0`%Sp{G56vp1HKNR$+EQ2P!l+Ezb~Gz0%gb&jSQS*6CzR z^xzKE3m)28uCEc1jLaIxJ?9Z0D8|h%93G0R@W_~*+&qH0I>cM76Evd3BWC~O?9*pP zaOc(|`!ue~`kLlb2b=g@-hE`B7fkCMD_PyDNdx29$0@Aqy9!BxKYA8cHO(C9F-ADz zg9r$BE%P*=?^%h|B|9*@9iK5jre105Kf{?S58V2iiXj39CtO*`GE^3_pfD4#KoJ28 zlyl2KIq#1AQ5H(h7oe=I68+c96aVNX$P+cM2vfyGTDu8wuE`VgOO!SjT2{9cEhm?W z-3%+O2=&iD0My`X9l8(B`->fr9|XXQ7^{e)2w7W=@#xM*fGMsN z;9dygVBQ)8n%aXLyFu`eerKJu3TbWmvMjAF&5;$Igv*Fq`9ZEU#Ew>ZpvT$5Di>iP z?NsybgS|9g=OuLf6^Ef)m>XR+1LdQw zC;aEAo-}n3<@|`g)8HXBaEu1nmhwY1z};gsaC|u^EExCVS(r(W@jHqUGAC7j&?7$y zfTIafpPU)&V)uZPRz<&MgOKbTOX`~EX@PGc{)GLg|@{mjdetC&qe@G4FM$i zM5(v1y3W#@N}s2+mPdXFlxOlZU|Q@(3SCY)0yMv<_Q??>i_PH}EputC2Z}`!#Ddpl zx__&;f(YHz8x9&vX7ctPuZP9sBvJeipQg zJ;js8+hjN8f3-wHj9z|$M=3dZeBa3bG`|1eMq#tu3jr+?)BZ37Pn-;d?pS>VnpT}E z3IC`p_#w=dR4Bt#Nd(Z2rAn&C=+=Kq(|SmiM5MIZcjhyKMyfizMK*rLPR`IzRe{Jb zI7J6mbAk}@ToSTL7%Sn3Ur^X2tCbUfq{E1b%5;*NNJ1akB8`>ftO9%?RTIV}>=aZ`Y&w@6Z^nWlC% z1h+b?O<~h+4NT%OZ zC@VdY-pG-xA%R@;Fmo94zBk7sS(&$O@Cnb`y*xnbPR*3=2@oIxDFCOGQ+F6NK++gy z!UXPQ0@H|E^6Y_jFzh=ElshhNGd}7fRh@Y>GFMCY4!)Jli3w2l90iFgNt`gfYY$Xi z@BY>QGEAT|v1icZT`z`lqyOp;9yP+E!MSR~+%VS1g61+k~mC3{t}g{Z4_qpr@T{Ovep5+x99W^;Q_ zX7NC6JsaC=kQ*2{qTph#qPF3?CxXME4Q}!a3QRr~zB@=619&e#1zLLfS%N12lZa2z z{eXOSVlXwLzG3H>@Ov`d^C{c>ln9W{vB(p(1k{FcFm;&o5rG1y#woBzCbS7*L#%^H zZ+r^maurBZcNL%+%m^~TVW3hu0M;h~t)wBRffZW?;q8zp zeFTP1Ag%tbeuQA^dchk=|1Kt=RQSNAU~10)DsDP#WZ+hl?W-%;1)*&Tpb+eEVmE5Z z^psIt1!1HF7GL0ZU5-=QLRCfACx~`Ayp;{9%tvv1N^qoiq8w2qq}Zor=<~Mfu1H54 z3p$cPAf5pV6N%`Q{mm56(d+36WRs`+bq+@Xj}gWi^QEDJ!3j_G3u177L>oAnuc-FO zKnB~SZx5r#N7aW#wy&p>()+2B`Yly@Y_&>{YGJ*BN+H#7Pq{O5ktPw_`N^a2=Sl@8 za!2KjgAML+L%^sAa5nkhdDZhJ&lR4onI8wra>X~5DT1BJVX&!J8jY?>vPOQkX0oAGw^A2t4UKYj+sn`z+P=;_=yT z9OA*Te7J6!yXcBBo*0c$cQVH@GCAaZ@)(cCF&@#ApAYH%YdCLR0Xoy`0v+&<3{TH9 zPWm1nr&(M`;uOY-fU#_xOJn0yh1ujc7^mh((ex3j(|Q8+DLt8yGk%6`@r7_j4PaHS zC^2co7bPeaoN>^k4!Nnh_T)>ea!1KbFN8a)*z@u}z%buKbOHdKasSeCPFL?^UUNnU zE@cXw&c{Z&BTi>A#GI}gI9>I5kB!9hAU>NkWZ?)Td73Bv5(^*WdusKPrU3SIk>`En zq9Ui335Xn9J?EZg=|XYv4G8CVVR>XRs;tHxmH5_o*Xny0gA!?dH6#U&H zaiCv99zK&=c47CR_9RCo9h<3HfAPeV?8~bzJ>3mA?Dn5kFaf}@zrM%<$qMlUs}GW7 z5vJQip|U)27e%GXy` zq*pcLP%h`J*q9N-)=4Je^7LDEAZK0w*27NKXkx+Ht$Ql9KG{n3bY3^aJ}paSk$4wE z-Hyu1YI#AbM<;62$_h0kM19s^pIT8AisYz<>YT>zL7n0j?2b-%(7LVTCJ-P) zN!Bex#v97=nH1f2(XfU1%R<`qRA-;+7RPFHvhYzo$DfyBg0pBhfz)vIUO=u0yW22N&|H@ z;3X{`Ea9^5NYF9yyfVF>*sW9!$dL962FWpsY=pWIB)f+_nmGZ;?3EU&^zmofy@;#SiFA9Kb>vxJX`8B1}Ts3=z*K}PPd zIs_~Z?86))jR?{u3?*YAT^%#lT5`HJ-mV6s6eVGbt{WuT9X&+|EAg9UYI%m~P1}Of zTQW6Dx-_Ce%#_T6a5LXP{e#6aH=-El&=lb;gciaikA`$`tQpHxi?L)J89BGcM~3UT z)U&JT2!)X0oTvb?IO>3d6w6ApT?s~Gk#iKIila)$$0`NQ=2#pU>o-&atPTCrDuY3g z@5H`xZ?SR#3TK=#!GJJW7dNB*OyUS}VKdt2*bi?;d+R!vJ&!uk=ZTXK)VU!ib_4Mw zc3V+jDc-bKG4ut5twX9M|KH>=Q^0UTc_9L&s;??gmc_}DFqvI@sqbUg{+aeM(}5DX zMaC&nYc@Grw;@CR(J)QDY_HeUiTYr@Nn^bfDOc9{C+#kgw4I4hMu6nOXP;8qUkKvV zidv$BCFtO^CQ|34#G1(*nCp3(r`AcvX%1?0RBBnDJ}LzmY$_`tJU`|z_7Wn4Up@&N zs*~QBC7?xcwtiRjs)^{HsHt?T*;z2~x@FTUP$ zk&K(I>?^Ah?Tf7Ud}TQyzC`q%z_8$JBU-yPk{9_Hm+>2s*ItxKVu{f7O(O~unauvO z%T8>;Uk4&fyia$6dDEy3bk-{A`=GbPqS<7=_F7(%!qPR>Hm4_HeGAekcpkK7fzHm; z-b0j^Vm(M$_>o!!6dz)VlGV5xJA~2esH$?Z((3@Y#{ZDI9)FaoZSI3f`43bm2spa(`Gst{6&@MByeS_Hy!wh~ z?n}OhL=93yroK~I1EI;r?&-`WhwJDxYm@|z@NrI1W?eiQBxobwnZ(Dd9PY#N0Ez1G zp^WK#)#IWngyVw}T?(n?#@I-dhHd0&Fq1IKAE8V>>Yn81~{TYa-ekLW4mVT5JT?_c}{N%$`d@~i(G z3-YTg7o@WJ$S$XN96KUxo>O|C=pFknVV)zvm$l~EzeU+pSYoHgVwSs71DWnqLpoZ* z=y-;+P_v0VIn1PmaY*Vc)98@z$?V6QwtXq;SR|@WbO=^I3~udDy)FB&cQ1mCOFJQl z1ADI~)Yw%OP#lc_+jnPQ-y+C}4PI6}zcea##$TzI{hG&dc|~b0q)R}5MSyZ9md`7E zGBw#>yMt@1R=G-*>#A~t!Wxd0?CbRNgIET3R5vS)u+a=otDK9?e#qVpQls}U%2wxLYvtmcas7rgk2Ou%t7{&>Fb@pZS6w6D zoE>cy>#!N>^BTFMA94)`UjS26T5|8bIKrQBxBym`yZD?MiTR9<8_}=0^C70lCyCi7 zpcrj(;v*D=M+6GYKeA%E?N{-ng zG+>!k--4!w`u}DAvcE6*A?wPHSp|s{{4hFZ9W79D%sN`Y?uuG)SF9E-!08}G3$SE2 zfwxl}`HAkkCAt5@?xaIaquq&fPTrl4du6d(>B-MW^@Ob*X|d>Ky~mjj5#YQV-U&+d zNyyUgOave$tmGmUuQp1_1w_9MnYyI|UJN7adb*~{VuFrvKqo%y66%#UgHSK-8I~=) zZCJc;wP1#IcO@53s1`jzp_-gQ!)kDF0eOu>^BwWFpcRkoHrHWpB|9TluOg(6-Jh7S zrz`~MM3o_qI~RoSHwZ$`+=-wddc4>mL#MEf%ydCm4XpGuSS5fk+yrPb!_t~9Y&s^D z=AV`2YMr98;J25@_^nF8)ms<CD)Ljn%H`kUyQ7F~v`Tzt` zcih2M7^^5S8lzgBP4>0JV&B`A#n0#H5kD;--=KN5j2ViG`CJP=5{?2GN7gbIIq0s* z+YmAlPG*JkW?F~0!@qva``-@|C1lMBdI%aI;Z^#~e$)ftOgw2-Dp0b0&xK+6QpX!U znTemaxnB$9gaQwEp5)2hJZ<)E2?c~>K0Vz~z=H$>0voT;u#Eecc}hekj1)eZ-}tH= zaV?a(4i_K4!xtZ4RS|`7V)*taVW`moKp1BUaW+yaUeg3#5Sc>zc*Bw_2(ci$IZX*< zM|38*yxsxfVRDdliC|oX{ODmw>hdwzE%teFa-oL}fv(gOk0g_?zib#U8e*wx7 zy`aH&r|rP)@7jS4vXFRG1Ozcam{HsCYl;#X(^I=XDuk;IDX>N=ug~E6`ft37qope|D*T1!}GCcBH zuOxsx)1fhCfQTLinI7`9=6N=c?DJO+ep{Pi^c}+NM(K*eDmQM;3YFeNE$F29E!pov zA~=-BfD7wnz5_`Q`I-dVGG9{*O4Ok_9ph`Aa!%!?d@amLkQBUeK;;C4l(Hy6 z+Ea1(&npFg)A5PfKmKP_TwaXGL;%$4E2%6mPDJ90007XBOfjM_QP!Vpj^M`UowlUv z{fAOzAN6SO5xpu!3wsYKTI?4b-+MY`eiNweP^Z{v%3f~qDWzXo$=Q#MwE+o%s-v_) zh2!qJ#Mw_c4Ge68AE4J&Is4Ko?VY5($Ys~!>ZgWHu3N#?Pme8y0OF}mz0ZAWYjSnC zpH@_rD|E~yIcK;kmxO=d^(k>l1j*nt*5qBB&>4dl{eJlm+=}|xNO$SlftSqv@*lWW zS1-&Uwei?VWO@-=-U^}Rx&}jG_#-{bQM`^RC$a2WQM_o0isEG-QfN5`52WG;R^~lf zDTWC)hnQ<2*u2qw?iFb@p7o<9U#kjIhwx%n3b5CO1ReoI|b=B{C7X5PKU+78*kCyEo8IRK_tWt+{{@gJzoV#@%=%b5@hD}_fVl==ZaMGWgH}pJ7aYtpW$peXIFLCwK3tm`uRjY_c$^R*l8v{U~3;mC8=;o=GCg z=&te+e!^79;gI=BPKV6#-ak$)GrrQef)uS))157eH{kNw_4#tPD?7bfqk^efN4(O2hwo@*RsJ7{g~H=bG!oPpAL8qWYTv0s>ezL^Mfuy5H+T!_OMpNaF6%*5>?Q>Ntr zYyVg)Uw|AK*1M&MSDm7-nje$tA$$L=LI_FuP^@%-1p?zHw_-U6Q0zx0nUi_~=nXv~ znX~cz8J_Vh*OFezFi>^omuiktO8J85fxLUfF)M%xp)NCg3jN8~@0r}*n_QQz->`Ah z=37Xn`?86t*3{J2si|9Uozh46Z>zFa7HgE#Xj)jXoa{NUu5ljaKJW7)s`I8g50vs@ z1!I;;CcYhv?5J?r8(@4}(Uxl0vR`h2mhsZJmAV_U1o~OvE^{sDi|SjYoQ~3~(+mALJ^9YhX5mYw%eR$;kUux{wde4W%eC+2J3=ba!p@hBnB{x;?a8*VkfZg}vE z#rZnb933wNE8{E%_UMUqT>+jzgHWBd3{-0%KKCl}*;va1yR;z@y{c=;rKFHqCV_}07bsSQ7y3QPd)orY zgd3K{`@Af$NtLEV4Qo>7c?)~EC;6~yVV&YL)C$}M6Vp(`%IGb9dkX)XO^*1Guz2BZ z)y1}$$1EMg)VWG0dyn{}w4UaDPm8y36!GaFc&gvvuj+qGPw#}0=tc9Hr~0jDY$A)x z!E02IFz1-6!Wz|6Yc#$`4J~kw2Fe;m0%VOEbZMY*jh0evWfMYk8Mz%H7mUMXBZvSe z5TAK!1Z455yL?3z$baRKXp;OGTq&BiBqHS6w8GyLgHo}D-!5L$2*nnMe@{%8iX9og z4#kcQ|DM2%8COV>pBTOl#g>MDPrRa3?DX(;D0X)E_r$jhi&e@xgq&-%&wsuhx6^|9IuU(Y<)MG^(YZoo^ zZ4IlrcF`iia1_Jx*4jnMAv^{^ckQC&<}DY!sc@&{w`&Wp8@dS`z?0L>*abtgHrC{4 zlLva;{5(WbY_t4Y+I>hgi{N`n_iZWDZCJXM|093;dzAu-gC1+_L#SO0%tC}0s+1V5ju_>Wa+rcV3Dil2WXnp&e=Oz?Q#By5Z1 zJ~T4U=m|2&eQ0D{jPE&TR3HP7ED%}vRoX+gr;zp#`QaeKuQzE+G45it|12PcBnlsi zQ$2$81+_&rCM3jE>)VaGI?DGdjT240XrY?$Xu z_;nW}{2o(^M;#aRk}%P~tbk#a@lS~$R*)OY?oBX>Dp;)WHJLhSyh#f(1s6*{p2kXp z>L~Na+H+`I2xrQ5UlcTKfuR*O&_L|W&tmJ&MC?OUIfMRqXX2tPm)4z?4g7G6(1~Id zR4NVT;-t(f|2nONTnXG`7Y^$O2~Y~S%b!`9=ua{79VJ3AR7*(jhX?s3#X*t+I(pAJaZb;+p? z#p>%mecj9J{JNXr_NVN}Wp}EkyL98=zRI9II#WLE_YG9gf!%t}DcF8nN6P8<30(&O zbr`ro`!EM3!g0j*_=)oRZRz7&1I5jecs$g%1bl&pc_@Ho2qUxr7C{793IigsV?R!$ z_)PlbG+MB@0y3(C5){70**HDO%ql~aLKi%_y5y=AL}~4WTMziwzOVAO>9%I8%H4vGKpVW-2Ou%4ZKCbi#;u(^ z0F%e!fy=1dlk`CCY?QOhs5B^2|6qDZdH36{{w5kYd)%rO*iqCP8t&&zyH ziXQ3s(U@aHWd^+wIrhfM5psECdXl=3g-bNU zNj2C}@uMyx#$iwsSv19kFxK~LuTHlyMg5p^2HCBIcd*9aq?v)}!%)GVu9%-8l(nXl zi4)levFQcGziH45h}Ss+abvQC4HO`BSM?!E2lQ+5dn5F#q_7Jh!tUcdB6fQLyYwt5 zKxR(~*2#zVj72X;P;uILG@x0Lq*D}LPbUkm_>GG2ZDcWct z67inkVjklheKC#~-MR4@xd%1&my&biGpwPPhI8XbxI9o7xt<-e-niqpK$)VwwD9xq zx+l@9|JHkw?cuM!7iQ%x18!>F!gn6i>_T^oEkLDW5ZPDAhd%P#I{}~MJ*h&LPq;J^ zQliAyzupveX|Hit{|%2<-IIv>(edp?SM__6Z|6IYS$?4_-Ijd&?(FB+i3i_1vM_~I z1^H=q@c<1eRR;C!*t+QGF`dV|k{M;yW+sSRSx>|^_{wgP-mh`N9PXqH7OB{n*diBM z&aXPSigFoHm>r0fP|tou4vJ-g5t3>P91z~ce^0*EOWCZACGiUOFzx8s=Vb|VZ9|-9 ze?@ForTdjy*lfx1)!v=}TrEtKA`Dq;QhpIHvd^JavTF_Ra%Kz5w}9t2^F$BdVj0v4 z`HD)}7Zh-&4nfo(+O5(+Eoz4~EN><`oBHk9WJ@+T85)-qphQJ_Nzo_-*%lYhz*@nw zWF#?DHn3EpdO#6tfK(P4$9n>ygb;^~X_A~J`h-u+ONxTBf2B|LUNMv4d-*{QdUY>>K!b0{FANlXk(Rm{~)mATc8 z1v|A)cy8B|AfyVbi*NM+A0)vadG(3i1E^IDc{jw$D>)w9Q9{pnks2^0AJ?jTr zkaiUvD=xGi(q@;orU! zR{4L1Z&<+;;j?%n5bxJocHgY!$MkJSNwf zq7(fjkYnHuQdtq+F(e>ac0bxm8$pr0SY4s>wY>FdfhkfGmH-_{@EK}={LWrpT$DcP zWMbOZxpiDi%ky)}UoDAm3i_X`oN7WHMa^n(GZ#hkGe7tKOm`oru!sEZ4=dM`EZ1ZU z5Cb#MeGC2}{#4j$-Q^V~Jm{7Kmt0?LJ^Rhce)q7=P4C5BoF=| zeWjLvDtVp6@zF@6z8V#WL4_!B=t*#r*7|HRGY$8eNw-gT%$)?iG@~z*vNOLUsKncq z8WglZNl!}Et?1i!mhyIbr97diLWP1Nt@BE)f$1nS1ue6u_?|hX0E9fnB7&?L_!#xw z8ZocxjU{Kh(5MKOmaz(;51ES&DzSqntm(ut+pr>BOZX1Z9Ajq?cb03v&xo&7@=0}t z08T>B3JT}P*ALo(>oYbuxXxeE5I-NU=uK=JeO{>fm`XGBgy!0D=sjX&F7&#u4zqL_vo#GW(BgqD{KSF}s zQMoa8ldUWl1n~0Ca}3Joh_o1Wuvgb97iiIbBhx@>mLL|=a1(eaVn3s*!xR0OGx%G9 zBl)>6kmc;psVL?4Y&9;vqQZMO1>6?hEMJ#Mxhq>KRAjPqu&w`CrIJe0LPwTi2s)cb zFaM{wC6E zlPxfPn(=v?1ZeCewYY^h^Ut!E*Z0gONqz~xMCD~NchSsLziFm+DG3H(Z>(KdgTNQ? zObFD~0O#CD1dvjon&Rbc7T+_zU1^=yuh_WeLWu#1tEMU`->}d$W|K|gQn;oO8>Aen z=AYQ{Y0DcYN~BKX+7XTEI8K~W8hXzrN7HE}!izMfey#hD%P46_^5GOpLcV$p--Ib+ ztKv0PlT;T3BK#vks$)nRzY2WQ(uEbn?C|7JbW>&;L^q4^sHl8X4-J7NO0MLj%L4!2 ziBJJ#4P}4*aGE#S`X^1+^?X16PGU!jifVGElH94bBDHR2`USZq42vDvPraN?xKh{R zx(Fe(KoZiK&8{r_TDnVFkVC#{z6?XHvUgSPn|$WJ^n0}8@+Eqfz!C;f61E_cLeiQh z8#KbnG|mv;bfu&1)m=h%@vn~=fmQ}J4Hc4nCA6X6sQ1al-}&}gedGC$P$K7+@*g6< zwOamr<+_6 z!2t5TSKg83S@w~R#$D16>=!|v?<07(q8yxk+5MD6k5Ee1lsljriX*A+^w1P60wp0p zF}6_QVH4U8%8M8N8LxV) z5!eX(6le_hqD*7m>l?8(r*DT-ET(K3a0-@9IEL=8W*WrIOwb4noo!4iYE)~3Y9(DT zZiHbUV8|J0dl@VaW9>Rb$ysbfO@-JZDn-?^%lW0sJG-$TSdL4oWNqqYZ_Oox2^o!q^~jLnOBpJ zJCjj2<9ymOMK`Fev`{FzWL3DY|IWRLOo1+!tGjR|P&9}NvIYtiaPXk*R(J*zVlp&~ zj8F_fxHlm!A&0r{ct!?6gJyyUT{Rete4M5j(YVtoi!764;s zk+^Mk6U|g$4Co~=$<5kCw}mR=OC6tYiq|$=*Cr(j3CY+8xHw=+qG);QibE3+ayV_r zQiOgs6?Ty|W|&f*VRlBL(oQpoMk`3ZKkN;j@UYDXD~b&tEd20$uy+){YpeF}LA1UZ zMHB{))hNPYh6%#OO(;SVDSPHU$qaDfkK1+^rt&6cJFqB`UzpSf=z^hVUz`*|ca{;# z@<-8~NpBAx5w0?j&#Eb7KvR2LQ?})w?x^0q%j?Z1-wBGS@@&5?UlJY){*2sKTP>En zU*D{%4-Zn$p^Z;Qx4cLQb7CAYfM9i~!X)eml^01S2ECBy;r6?#;>OMBoxs4Z>3`!^ z0shb+&3M+ z3rJ4boNlMP7$xgvPdL}vw98eyZ2_@$R&CJ5i+;=B-qgqFlOW|tprE<7m^E-dmz;n zzeCE5Zf#O+i!Rn=Uf$VZ$YoOH53F<}=4E~Bg>{kP^$lEc*l@}G7aA5gRuhzpo>i+` z5kj4|(TqM}wm9;vm@T#qRSS|U*rJ^Ek)tRlG+l#;Y`PsJ&wfi3Zi_S9DCGv?egp5+ zLl%#iu~lLoezwzQIC$cr%sWswsEfOMpgs1hJ^t8lZCr~M>l=cuQX8^9=F`E1W{W?v zU#c+S6JnH1+FDGT`J#vYskT3TBS~}@ad~X#5m9na$NL&&(A#D`Q8+rQ_5Bcr_1VRm$H zaH`)P8i1!Tpxsi*AL{F{tSj|gi60M;8aqy)tAqnct8_rnzIe)=_aq0!slmj8o+5SU zG}d4bfDxJg116xUuq;{nd!}1KzPahrXqC*?iWR_jid)ip$|`1SMGV6MBw#M1+gK;k zZPkd8H6D2dIo(57fb)=Up;CTXYcIy8uO487LP6raUrR7)qe^FW%?JCXc$53MgXSre#O&7PYKLWR9VNF>?s!cL%z#Ln53EL`;I^Q_V~@nE!t(#msHVIgHDU9SaJn z8DvPo>GC=xFAP;w06pKI4jSLh3xsP$%1|PS=4g^j2q}$c@0)u2pyRMA0DDj#Mnw}5 z&7%mXsmF8jTpiAN5rQac>pO{NizVnL!8b8BRS+vw3ilYrei+P39;?FFn7b)7F~cSv z>95k~rXCOCCd)bE(DP5P*>fGx&UCQ=OqXc8+EHaI@~}*>qIOXF`0hct2qtcMcv`Uy+MOw@* z&hS8El);$T@|;w$A^{(kvbT!g>KlRph$Oz@ySK_tG+ou!q3Pc~XlgcKSPU1*8932; z`D?&|D`c3&!B^F-dV@AHlfLOeF1Ti!SDC2xsq@FwfdbYDd)hVy7y%@jqa~Ua~PZ24jA%MZEnGqpclYpIndw%>=+-~)emv{g3 z+%vVbIj>A7De%A8_e?F3w?%d!$He{r|DLILM}oqWpbh2)5E4E=CR09E?Le>g&MuRe zBvR3VxF>;AyR%;?j9glYQ0gPz?}+?_K5h2NKP6RoxY<>@ztrF=rmA?6LSA10JOkET zEvPJsgUlNDB-?-waKy~{Yrhl@n9UPm!z6A@(smyUJWyy$N?3M$MaO|F4!A=QBMb7D zw=5#MlKdbNtyG%nb^LLNKO|by86(#x77+6Sav1X?)Uitqm~bMGId*LBu;zQ4QsZMFJJYFmmWJNJE~Boau-5GOW{C+K>N zdWl&`j z+GL2{(!l+Fw7<8~vQFh`6^j-DWO&NHrro#EuDV2_$GD)ZzKaV;q>sw33liI@gp~gU zYy+UBNXtjyKw4v#aqJfK`C1QNleZrYiQX#Kv;J12PR+AIsZ)cvN_<3`S}_Y4q!bP6 zh|C!hmk|};SyYU1E{Qzwo+%GTM;{t=?=1m+2f#dQNI%fDNpMLgyv($5-ULh)gQ*oM zv-#NMe`-X~0wL~slcHMJn9t7gVE#z!_WT*UvnKVD`GdY*@aryqY5FML5;DSRq=C_l zRuX1$hpZu&*R-RIQoDEsmTC}B& zeLa0QWvKUfizBf&u~$odYYL25*)wU%LK3{einNYD%swHrBy=iz0M?{OeBuW8q|^)k zvOb8>qS`dJ4)hbNEMJ#Cf31O{T2Uw$f}E;>S;f6lZuN1bo-j}fd=Ct~eyn`@22ooP z72aMZUFq#WD^B@Z3`>}pSm};S+Vs&un~tP6i*Jw!Hv{&;3j8P;V^)W1ZeDs^GdkOu zJh;Bm4GZX0=aNJpg)=c^5hVk{T$-jua0Y3GBGSSW#03Rf6N0eK%QYk0FumfIxPv~| z%aZbD6{6Nf8CDFZ=QL@xdZpzV%-(zWlFv58L+PYcw>92v0-O+)_DMl(F_mRuX*q`# zK4pf1c+^|gb4cumZx~L-6n{wWCJO>QMp7^-_O?rly%n+B7PH5(fRkvD?DTf!Vmn8s zkC>wd+)qwsct;$Q1{*<7Wk8fDg;TgKIhlQa8c5|_9EBz=9npG_PWFD3b^-VT~hhAro(K@lDL^2x6;qDlOD9B2!x{jK{xy2wEE0(YnK$n zj<>-qG79_Cs;U1nSLryme0D{1hEz|~o7!z2&W z)=@p@5|MtpZe+`A z$+T1OgDON8#q%E3GSRGiFFfLf`Xj{_aV#K)>Q4%?Q?!MNnH>@(=wL5$XEvOSm@IH$AkxSrvA7E!U(L1g9`^}OD*dFVy1lGvn7v2hQZ zcj!t#FX%e)jC>c4cg>b&r^%LY&!eM!#0T=VE&3E1trLGlVUA%~(y*9}TjcaIx5)Wq zrhz$KX_3>+Mq0!<7)-frQRkN(aElto>&G5Wu>gs`73}SA^e2oxr=2OpAneWalwDDQ zr8wm8P<%Y$v^)-+AI3R6#JRpII0Mb{Ugeiy`MBx<%M0ooSU#pJeLbox?HpO!$0PLd zm)ytJC0NyjIiU)5=9mq8bDIR%sYZ3y8bc=%U@D&g8;rtYg6K?w=(Mgh@a$5Eo&}CP%|WgcK?( zqRqr3XYj$hf8=SzbpJlGKAUx+kezKB&PdcP4E_)_QXJw4i;_iX+Jc05dz71Wg8X3> zJjSjX?Lz0az_2FV}ghsS)g(CKwm)3J_HLkk_{LLL6@{cQQyLtQd| zfFo{mtDye4+Tn;6{vP*VCvVjggW#~12K%|V_7iADr?~SV>nqMp>PypjmhW(tr%yO7 z$)6NDBMDB&OBE6@j6(L`Krg$YcKXxyZbf*WCYt%QcEgJl2_!MG8(Ax$P~?7E?O`OI z(G`)s5190u!(N=O$Vo0}kv{Z=J7aoLMGqT>)9x6juxOPq$m6sn63N^0&Cz!kxdJf+ zv?rE83#VzFx#y1N;xn^)ui2`u*}hG(KdB>BMzqeyR}W!RFn0%@}BJCe9w6i za3GUu6o*eknH(M_JAm=$$S~m>)%7fO<1T5Q@I2I9WiL;j@fO;Jqw7w@{Wn^P*pwQw zk-|(cXH#oe`a!dY)okJ_(EW5;Bh0ZO)kVV72jF8HiUvZ-J;9<`L1vAV+>K%`=tWvM zcv6+&!13fGYr6zW0487oD**_o0H~fVwx!E&;4|yN>l9d5l^eBh1_EiGe)r7a@yyK~z>RpGNblvwaGxuHQ=$xAtJoKl?4u zn7teM!RwDjye-DOHe4fQy*?^yzM?z@vpO6~i3 zKE_VM{#Iq9TBOCQPJY4(ikE|E5tXqo`-Dgy!PDwbmJXgdd?!{$Oc@CP3n2Ehp!a#o+jFto$&}gJp!dmVX1SPRl7KVh}^Fm9?Wyq`0Qcka>Ny543t`B z)h;-3NP$%VJOqG|XHw>s&7;Xr9yWgHII)p3-8`S#+F7*0QUPp3b77sBuorOe4Xuj1?GzTg}2bMFex!XV@Nd zzs_=!SpnI!6WN4s)nq$1n6jM32R)ufzY@=fAVK5PBQ1c1!2pX&2nY^GcsL1+tT-Ps zA**H6C7}s+gx%hjhAwGQ%)4JKCC6_`tA0JN*Lr!Xqg;beHDXb%ve7$4XkMI8r~?lw zZk8jcgGmuCp^lhQ_=(Uf<;1iD*zO=LAV|gW1EozRatc6pEN55oJL1=Eltk0K^@?7h zPMI3VbO^5y>lp5@jv6Q+mU15?(s8a^@(NMx!T#8Agj6hfnDFf}M11rBk042}cIe3O z+tzOE^m1eKA)~J&KNINt##p(F%EIM#QMoQ`!ldYfDH-=!9&?A;Dree3nelR_$zRvT zUuWNx#vXP_dJ}Gs_$QLu07t-SP1m%{$kTDXCibR$99hqV((xUTaP98bibQlb;2U;n zb+Tv4B8n{wql^en6TY}9L> zJGaU1w5_g6{}%DDJw5KwQ@_6}9Ap0a(__g3slS#OPI6>4)m+lzX2l`oEDHnwLsq`F z)OX{xk(FB>RKt9ltC#BBA-<71Cl3|h;k&|H5cjyIl1D47QW7rz$~v89)zKxJzQk!Y zdjtUyJpK}TB1a%Lec3Mv2B+0b4E4FnrY|GN-`Ln!$BedKcTCW*B};FFhzJETwB?@mnBKXn2erD19ic1V7=v; zH0D~&EWVYe2MYg+1>@U>`${?cjxN4c_B6ywrQyxhq`)oP$=It36453aID zpiw;>Am{SVw+lTHKS=0$2TW^Xdr#a8(tH<9VO3_>1o?6UCqp9Wlf$O7&)PS>tKOd~ zK1-a2RW3iw>L!0Wr&N=+SG{q8*<`;tOeYoxp@WF8>}J9O%OY3Wx*M*O$$TYbM1CUp zP)G7k%6A@gLw?=5!`j);Sv@;bx4K`cHEI6r(TC&vzt}03aw?m0(#U5k3YCXht~MaC zgX&y(u16}e8of;UGUgGBb=d)IYgq4hm7$xFafN%gjNk;P z^d=I2rV>2Q@)X&FP7zT zt+I2O=3J#-M8l$8q8ysyW2MUaodM{O&Joy}hwVZByhP{PQZX&!!fR5$<<4eX0lu zm=Ax-rHzT~^!Yv}k<3|HrU1L1yZwCqXRrgTmaBbT$c*+-A)PQs`I2$^AGRcpT2sK$px)Gw9D}*E@AOcjqEXZOF~DJ`UPk~2+>;VAwrJpJ9>1C9xbRx zr~J`T|6Leh1iL#JMP;a+)J>$DuTM15ef{JJlE}&sJ-<0ua)xH9>nURMP8o8(r zhqWy%b=eUsxPkDr8(}lV(=Le~ntL8R&2n$k7kO$t&0;5oIK}$)qVMXI@R2@hQr0ed z7Xt@(NrUFW(#UWwGk=%jZXXu#Qrx)noHy?D~j@km^MxNc!Q9eWTbN2ra|yFH|R4;^O8N zTzIh=iT!2lcp_hA0a3=|_aQu&R4q?N)NgB4%Mk3lly*%QarUKP)P4fR>TF4q`IJ8> z<}1(BjGmgLB+Nt0g5dUzt?|!nh64v^Pi)cr*KtD@KQZJP=v8#HCUUceYGO@e3T#vez-NJJSyM(C=wCV-SCbeFMmpmh6R}*w|S4 zRNLvMV*`6Z@rVBE+nYK4;j#g(jrD6cCSz4gUYrEOs?jU@4*J?+yUfq&uTZLyB;cwM zLjrmezHS3U)tnp~@k%CRpI2Vr6Q%B>l*l$ZcdInojQ>}lVQL+k>sO291EpUT&qKqc z{lL)B(999SQSct5VkPg`XO6*N+8GaNb0f?}zfu~_VtKMnxIfF~h8R-@6+gjw38ZBB z%6tR%I`{yVhH+nKz5KO>nU}ON%fEkpx_0@EinOi;ZloGw&Xh znQMbh-tB7lx|EgsZVImGQhQa(TJUlZ)Obub3~L3S>8(YS5|Uv-Ra^EEaJPFm$UhSQ z@|WdnwS5DC--_dx!@iek)R6v4>ADeaw4B9&yf+I6);(+YNrVk6^L|DH_yhpdVS{y83TKB-!`(B5t(SNBqO4?0Z7dc7144kdRHf+!XY3>px@ zvfo396Gl$}+QxExGDbQ(b>t$&8NsCiYubCO$`U337O z!vS+nP*splfpdl1a~2XUa4i&<9^g~ZWuG!G&j>2pqNm$vST_w=s6~Q0IZr30;yrfJ9^XvkT_jQlFj8nBc8s}T%6@tj&^E(yG|fOnqlhWWzFX87m=#W92v zoj#Welf8lN$ZH*M!iYnr*r(L_4eQL$r(y-?o(xnx&QF<09`;>hYBN-cIf?|;NUg$O zKh|IEah;vr`rh7}e6`|$SkuTJoudhh=<3pCNu%dCOO#o=oE9(^M@-4hWIi5)UJ!j+ zVQQis@`}@fusy&v;Ip}lBg>c&9n9`A6l2Br@GKZW<1#Go$Y0#8J}9cmmvcDDwEz>i zU^p_swXG1~b?75)J0qQv4s1(%5nk~6Wztri^l{lPD|~)QQU|liab@O^0^q88k*kP) zu55y_5GN``&|$PYey(B+d`x91H*97$s_I-G<)}Xp!*wnR_p)+qkSTbsnu%eJE5FA-Cq+pTxeEdinEA_`hgyCa ztB7R0c6CL*bTSowtAJSyF>v0}D%@IOfTGN%i&1H8Y2DiIDD%m2zko$)V5} z-l==#k>kYn?RdVyVuff*Hn1W-vP5oXKRMykDYk|D;>%Uy2$o!Mj_rNUvaTWHvV3_6 ztp0NABXIGS1X^O}d*Nxx$-&x_b}>OTG+pgC$E|cDV>7c<&dENIp_*Hi*+;c3iMc~{ zM{a~^kbxzT0oEl*ao{bKJ{;m>QczOI{7CsxWr*Jq05#25<)vlAEC4q;A_Ci%zy>x_eDHt0ZI4G^bf0qdYI|Su)0O^l_MbwKr45&96+H`FK6KMXuVG z9VKEbh;rv07bifUKon_fHrh8s%B}Ig63r)YpWJsZsu9c#2vTjGl!?b+-w;WXYRicc zGU`^AIoj7i(@N?8X5^6;Jg5Z6>KAQg;G$hQoR3!`!9hKnYo{^nYRm*6ROaxarodTu zUDGJZkn`8yXFQ8gHJRha)Q|TEA4!&H24@* z4%zjrk|ifr!^-Bw(QLWV_~7diEAZN%LJ%v&zE{A1s=_p}JJPdO6%Jk^ZJcqkM~qsc z)*ARAN$aT8GY~2vQAYL3BrVhteS~s9NYX+S;d9ng$*I4rBxzY7Rg$!#Rq0w53?WBX zr;y$W3wdOcH_l%@4Cz^O7)jAuXv^kMik1|mL5kMuL5h~aOP~T&0#C})U&1{tZ{UAU z(NYC>IEqJ=8#!E53!rezDCP(CXv#W@N9EjcMztSY8pUIB^QA1p%cf}IB{6sJpjTjZ zVD!b}i!MB#t+uo=0uROB(LBWb>29x&rKtl0i$k{9fGZ zLoSa=#@0fK%kN`BKGdCrsG`M%a=?eYnzDNv_9gEkVDL51OVYuYe6)dNa9Lyp3Erp# zr40DGxQ%kX!`q>KX}@HCPipv>o3;EZUf6~$=-`?B2#-7jB&f6aU!;d za$FAF^wCt#(R1m~js=|8$4MXY-nZzx?v7Y`wm^K1V>uhc?be(zXY2iWRReB`9B6`T zuF@YRHGBC8nUdLWl;^YqIj-=CK-mL}3ZpoVKa(9rW>73sxuh8JPUd;DvRO|t+Z4XL ziS6$SM}5i`@PH_U5Ch^wanPZrG#tUicVGI+9)AM(bG%D@j}*KmN&K5@Pb>4rtnEH> zrA#AfH(m=Lf*sSqBf*=L|-` zeu3?_O$g-cr&8uvh{;zaXYhxR2-bR+OV z_HR*v{f19TZp`k1kN@CE!k0B;D%}r%JEV`~i4G(45TAsRNv2(Nprpibp&M-co4n;i z@Evg={OFab>k%3{&*{9<+?}Kbds5^?6M=1Y{tTvp4*~y;d+rQL6TR3DPAtAzfaPUB zed!V{w{gON|E%^q9ZM$T$&n;8m%^84bhqS?_kf-%2Q)^3Jr_@~-J@*!!4!iRr;!3! zH4{M7QZxJap73WzS~7Jkrk27N2lS^CmY_;Eyy%9j-br~%S9?6C-YiQCo7R86$GzFyB$lA>aB@te>sJ8=e#QxJ^q zzXs9TLq>{UPZ>o3j5OTD1JMXy^NsQvHrH+wuh$9Qa-=EcNr7@CB3a362fos+Od#@y zl(rNZ6y%&pMrZ#Slq|_W!+jilOdvc!G4d#sK4lC($ZrVQoSc=F?pb4N^&i|jnnF$HFA!%!CyeJMXRr|+mRK>-N;15=H5EPUOwWQTjisB&n^rPxm?tCl-Z{TA|T!%X%4OQU%9ae-c z+BZU_Ufb%`l0+Q%4i|6?I>VOYCxEQwe?!J9-tY;q)72_MJ0|ylJ^C#WIX?%3=vs6vl+&R_0HV z4$}z(>vY4^#n4C-72BH=M?}xlj%tz5qE$8wp->Pjcg}(5pOKl>5z*pmZT7n%qDI!j zEUIy-#u#S&?It@5MYw}E9C~!5tb2fd$6A=KT(l#G*7=6}j3VOP7Klzts5+cLGnffv zfJ2ER?LIq;_08_T}; zjMN|ox|!T73qp}urGhzT&C{3^^sGmaxcmoW)?(?65;UPa$Z*myp@Z=kdyl{2wQ(kc zj>}b`m=qu~s3Nx*HgNTiJ}~$QGmmwETI5+S4zithwHAMN@G<5dAJ@x|trWQ$iKa5A z6E#PiC_(@WfIqU}<)|NwE_hKQSz7RN*oj&6*j$8zBBbFh-F{coyEs8b3QtJ;DAVju zC@IW*(kCkX*3o+7?zH#NyO(5&6Q6?q62af6vjf{Xmuu#avNcX`n??kAl*eWa5>34@ z$+z9!T*FIrwA-j}+kyVN(1LgXw@)$)Mu*JGdBs!}+e1pJE@?N^gUX_Jl3Bcaa3HL< zdDWD+QA9X|$dZN?0(}gu4@B0xma77~jd3I$rRKEg?Rla^4vl4Iu)vIL5*8<{DR~`Ig%I>OK0s zx%T7r-oK*W-LChQgZ&@mfyaVkQ%22{d_$2x(tm?|dX3oOtaPPOKVM@5g;~co3|$*0 zP9u_rzq$5-I!{msp2=@Wd;Ho-zQXeH@DqG~`X50DMirMKLwa!4oA%K1qe< z$iz+fV2%%nbMjC>UeJ#@)pvq}dM%o>|3^F-(GEv`s=uwuk)!!@Pg3x(Mg!gBTU@p! zmV9)lwz_9tsa${CkRzK6f+LHy+4Pjnw6Sq`rzumMS*Q6h2~P>KZMP2~G`MpCJ1`2v zhF755vx7DP$m;Wn$>7hYChdzkZgoQSqbc<6D;e}q?=Xj^DOfY}=n5PfdLD4-l@M&{ zNi5MSYzQB~DYYOWR(R=m35-9%RR0wt)HDH7&plZpRcAqxo2aC*KKm^rHBW0K5^6ob zRn)3<6=UZ$4ofix2!;qW(RP=%g_*#JF(lI%V^^m_@+Sv{+HN`fFanXKl22&_$(Kjx z@Uf6Q#T6UQqH@@H#{M8rDVH>tMuVT`DXnqDK%+D1h)5Wf^fQoZx8pzo}G!o?IWo!nU(6PvcnGR`y&($X9k*Ta%PaVwPYeYjjBv4 zB=7@EN)Chtl*kZEb|T8C%Dd#M7a@e`t@b;67@PMMMSYU|R@(;e;Iiz_+S>sDO`fs4 z0llvGn)n$%zE8W>D)8G@pwzo!tB?}qSKKS!AhoyuGREqL?C)q*`s~2iJ*loCxO*?l zJCl7fH_&c3$O_|tHb?K}oYm0vX@w)U*zl`2WmY#HOEjaHw+yCOe+#Z{!U$d#kaSK| zWn3~}pGd}n*cFRFxA&aNhk2tBv#><(0vKX{8;Z_5`>)FTMg6hu*~iVipSwgccjSWZ z)=Tw^Nx2bymN^AK&NpN~KiOYTu#)smlI(p5UT+z(OoE_lPQ7DQ!~k$D^nKxl+9PQ7 zg04RafS5bpn4iHIFJAu6p0*TU_f9eZ;tZkIBgHN6bnv*?qy0EUsrYv9ti4~V0$%R- z=}Jo3tgaCE^B`M0u_zzj3|h}&5aWG#5^9&(;32N6>mb*1e$49~guKu1?yP+xT|b$w zzpD2coh7%N?Ka6!Xv8v4S${WVzokXDWd}kc1a0T#F{ma9nbp1lADh64ra$`KwS!B8jRA#YkgD zze1&$4E^QHql*QK??R<|w8c=NBs2k! zl?MJ;s!yh{Q*^Um2a`^cRe-C@!*Mczvftq86=*(jQ%Axk)gDBLJ5rWFY*_~l*3oOw z_VjAOS;EAvl}v{ol$0a(Ad%kIAVFoQwlD5LEGnv7-K{k-sc<1=L;#7W}_46Ni$P53UOl z);hb)e5JujEpHb>`w8^6;67H=w7ZoQw`X-_q)zD?BXyEnf8w35$kiB*R2nDpjng=h zK&X|T*Tnjwv7S~REEBHDy;3%3O5+q6xX;(sMUOREt2+Gy)m4Plg@D#&pDGcb)-}DT zT8+4IG%O`b^nU2_bAq?K5j4&PIgerN#XTga_p-`NX$^@6ecLW0ptb<;9grlF!5xQKy*1nkhPjJEq85zKzWS3OCmh5@K%wLHwrP#oi&CzdD5zo#j9P5`1oUDOr8wKpnX{eaW&R{nBS6?*xlY=rH%u70IY|%e-RUkW9-g@ZQfgbmMY6anTDu zFpZ!C;N)g&bSb?OGov}(}ac3{*6M5h~ z|86u+rGuqU_B@f!Vj!z}?%gsEA-Ua8^lyYI!`5-MJkfhKHSDvmjuI^U;OCj{c4}Ig zEx6r%ld1$>j2rR)@*=RLcpTL@oTDr`7L#%sN@$$#Qrb$|xo+t(JcAcXGVa9Vf}l7w ziBOPdiu)7)Lk`139#OiHeOBrQ@hNsPd%SdOMf4_Y%UP9_Z;Z<S;!bqqM zza;Y)61k5EsH5d3tgfKs*|cP7NPPX+AND0=6?TXFJ+fWuGgF zQB|c$uFS)VBy`GFNff~bE?eT&q9Z7L2OPEw9l^frjY$jOQJkJA{hcR4uJnmTAr`_* z2sW}4;-w@eWDD;gThQUzWzb3fdjcm<3OYS+rlH2|)AR;KawzJ7x_w>=A-uaZ0EIXk zxHkuyky3K6pg9bUYbUOIZeL4>a!D<9O~@sEvx?kRrJAv54tiOES922lHDR*0COHoL zXzVPGD|Pl(ctBr&RH4b;Ghw>nDf+wtKfxOKk8Y(;(HT?c&JO z`Zj80CE06LGqJAa`EA*W`b{J|zgE}cH%S`m-Ksv3Ox5Urud{@mv$#w;7caiIXui2s z-eR3J>NVW!S|-L`9~{7yeV%EW_Vii$R2x`~K`YQDB@@vs3sxx2t*;Gic-f)0^h>nV zDNC>(tWqNV8Y!WD=*5Xqkf|g660H&j3 z;Nlz0*K8MIjO~EouP%s2tldFvIvjP~!_rYYsFkf9%iHxvv(;uMd~Y%SMpn!TmpvW( zw{A!8mZa^jDP~)0Zw(ZjfA5>PzEFD;=g}jJwU6s~Dz-7_h=(@`u6*p7{x;eLoQ}Xj zr8PL+XZrWWQH~eHbMFJa0&RTljtb=6fIN!!-^jU-7at$?VNg)3WYon8{-)QHm3r*S zjGptD{w~jNL77}O7H?ri3M~~`JzIsRRW@+xwr@!p8d}L+5BEN;q z5>c;o9=)5OKHgpOh7)ILtb`hW`Ut$2gH&3+q)X}p%K_ch5?%~pr4U{aVUn^C7nNV0 zePKq-Y+J4|xbQd=P&k}&cLhej8(;u2#*%O+aziArb}C0J<8@K+FmDkcJ+N-@c~okD~12=4{7At zIc=g4xN4-NMWaxw^#ww-F4{3I8hLsK#k)X%rR)Qq>=+E4mZ9v4PVCiX+Q7?V88jPp zvyW5_&kshcPT1;V0}tAf*n$zTp{%2NMXlKI{A4Y(tJ6DCo$`E5z9#!XtUBqx{73se zWFvIHQd8tc6rMG2>W{63GG7j>c~gJWS_0H#`KCv_8&XEq+UkUTJiVwG&m#8g#(ccr zK^*XKr6%24afY$rHo+NQEzZD`A&2qnuDD-3?!p38Rk^*nUsQAX+si6>?10A zK%v_8gvrW^HcF&)$u2{20<$%LwJ{!GMHC4WNd6Z)@qn;rwi_YUnPNtODFmZ_B72mH z0p(c|nK7q5@R#xBjb6dbs6rVl*&aW?aeM!!ME9q6f`lwz$;L*Uj8^Z>fa~wcbC@MJ z(ulu@3MTSwhxU;&aiepF$?EGfECY|-_K)c|03D@9mfc~{QE|{1zgR<;`@Dd%3AL(o z*;e+adn;7z<;A-tfi%_ziI>Pzfuuz;kMMkz=1F`eDD{!VkS6UmCM{hVgFZV!D>1@+ zj;$O=8%l&RbPW6yH6q`;n~^c!6Hh8*9s{j#z6SbW47AN7 zQUc3?{-Ot3qcPcIbC>D7_huO^qhm^_Qe#SZSYrxw8dD>=kLvOaNzTjoGL5Mu@X8&G zYV?0RzfnAT95iS|5&2l}B6N{tP*WNQzMH0K8u6S@7zzdgX|qZ4DZSQU@Jjjj8ie7C z7{*n4$ynspeAr=#u@7QHrCER=SfRFa2P?C8GFXGQzGp~Xxj5c{y6CK_i@qPZ9Cd*j zs4GA-Sg{6JNSLl5p?8xw)e4U{i#qXn&LR(td&1xM|y?y}LzGso)7a|W zBX$ww*L$6=CwBI3*Y)_$9*JJ)-8*|ftm~1Ty&u$-?43LL=Jf+TcU9=QH|MXnf{TE$ zn%f!ZpAtD1MF^PJb34{wAz{P`1>;s)S23f;@*M34(Zz_cayw^Q(pNDd2&@a_E5&V? zLQhG)w+}q%FVEg!5D7A4=>#xswV#nxq}e zKFu9fMdi!0FX03MMnnV4enn$A|79JZI74)#Y{+ZNxfQW`ej@McAY2_&&tqXQI@LkT z1!6q(p})=xd$)voHmVZr)tgcA5Kk)DOXA_F95+ustrlc~fG}AkRfMq#gYq-5+!D&h z;EsIL^*)Yt({au)b4qgpc>><@Df%Ia*sY={um+NZOujbONa~h!5!PJY6j(bsqhwvV zYLbOi?`cM(J_hxjDLd{r>i_JirQ!?$Bt2XH=ozj?L7C}e0 z!Hp6NH~T8(H||fyOC4{Xe#!iv3;+5FEiphu{ zf*UKi>JU8NG@=*Nyr4sq`48rI$|im_GkY9!P`J#|T6z3tdN+~(Yn?pF8wFY+y&sli z=Jxy!eKU@neckpx%U1juPveu<4L0sIm8Q^mB24{|ZEb z-kv_ZFYj5l`Io&R8k@~}^~QWNuhK8QnvRjWF@L%0=tU~x=KPKE8D57D_fCo7?PekZuXi_y=iTZ z%koB^Ozr4xhNFryr4_&gdTaAc9M=9OW~1~2F(xxc+MW@X6T#BPjU1#U7Oq=#8_kha zrr%=%&5G@a>3~BRp|>}&jHatUV(g14@kvaqBp{>uwO*X18WH92s^6K>Z47dZ*;uYI z5c*8bY|~xq-3!y`X{_{1coy&Je}9bC_wy?Kimw=*ABeH~0bZqFdeuv_D!*5Ce1DA9 z55!olcqU(KwZ69vJg7pzWo7a`p>%C6`JrP|Y1a&$tr)3>2&;Ij_ zFxVDd97&%YPM;l&&kjcU!-3A<^~k&Y)2aNPRDLeX&qet?aS3O}DV!P8`OBH%$%%G1 zLtB`jx)78QEHIW~YiQbarAnzhcQ5xm6O1;p$MSo^6sBxh4vyHNhDimg;Cb{019Atv zfm%AUxu%m%HTz`VfyjUbE>G)I6z=rKX`1xCP0Tb=smQ%_EZt}!z#s8is(l7(Am~%6 zZyZ`i;#vrbT8JcyT3j}|F>D)}$HJqT&2L)EXB3q+ta&%tL)~R6OqarsahA_x7YunO z%rB}WM0H%aMhjm^I6oRVKN`qh7{$5fp42lsDepNNl7#8;={GN21Q>1y$8 z`e-qIbUHpd9imZzhreM--z-{eTchx*$_s+fhcHA=s#IB*iXBJ+*S`JLp2_k$%*3>dc>8s8zT&K=;U!o0<~ z`h={yL+2LE8`I8+Gu2699N$5?lJ=NJnm?G3aVg0>WUrl%;pMQ`V+>97W_u>{u}AxB za3EVD2>-fv4PymBaSrbvDrBu-Tj=b|C`T|RPBb6>sIhWv@TOjUQ%`SX4PL1#mTe8c zZjAn@G*Yj=VXKAVH`mcTHfp8$5-RtkRkW{ARde)BrT0rf9;u)L*a*lYJsUw%iOY}} z1ALD_InrXg=v)uT9KBsFR)K${&yCSymBFZwBB0WBL?+?5A;q?;_|8GbW@!6itmK*_4%>Nl0q96`;x0D|%w*K0WItU)xAuYSa+O%O)-P!RDO13@wYGVoN7C|9A-pvV zB_>MkI{_iL=};Sy?tpco%W}9^iqAWAfLgpeYXYAIiRhds)%VrziWFR5Y9v&L!A0S#k9G$=qnn zeU_N}IqY@?(9kiEIr1m6`~h3;^NwwbH>QC@4n^Yx1f*Ax>OhtZ3XsSmESU@dIWqm{s&bbVFW%xHYg~NYEsWHFd9dtY;k|F zi-fBETTb)TgIEeM_6Q4zF?{$`$rGQ%ZYPI+(h8a&$-b(X&@}_pnG#~y%hu5Qn_D;6 zU7jKdt%wgPER8&}+q}ow`bPS5zU_TU4=rdv^Lo)Ig#mD)sh!ZJFUm z@{$sorjq&=llWEgrFi!^TdffGC5eyc068j~m+H73fhtCjGYL9+LI@ftQ zdY$E?o7q85(kB{{@1Nwacq+TJWM6|Kw$yh;Lf5tN<)p3&sZosgRWcuEuP>>~R?$C0 z?xXZK?du#!noGG};6%AzdzBf*G}n$TN}1BuE(x!@%TM;{X8sCzWfz)+*ZnUFuY`54 zh1dBjr+Oqe0@+Ji&$5}-EeeE`gw2XAFFtPk$Y7ehNE&CBT$?_e>!H~Yjcvmh`0dR} z6iED-81Mf6@)?)cn%oAaGkG_12Bm#GtzOE4_nd5MDx8U}6X4lw72qe;trPHDFKALh z3_4Cw#KCb0jFI+DXdKtv8XgwNS{ekTGxp$=N|?k!%knF3bofCUi0td0aEr~+L15P)?{on=6M;YfrtFCFmeY1ZiFK~#w>8I3sFSm zv3S_t)Hj1KFhSTZ$Uta;c65D=wL`(R3%-klr? z+4k~oJl!FvkS!Q;lb26>rRlV(tT@<7D5zXy21jH~R;;9xOc^A&3UhcT&VUec#M8Pm zb2(6?oVnhLLG>*PX=g!~=u%N^ki%BiF=E=K;f!&2E)Kt*iZo4DP!j0MeqR#xYHOxC z!x@aABkpnrA>^EJb8_9E^|mT0-Bl!Brn|}&3u)HMoN)0*G>2~9m)|KXW& zOZc=52ktJ~7BVU;y~I@%Nta%Th3CID3tjZLL_c8t*zr#Ce`pZpFZr zd-=IcxvE;Lx>xcBq5rh5FT=PX^A%r`Jvi;iq88$_kHMNY*UpduF8=m8CJf22<&b#& zrG^lKRID40>?QN+9|6-82xsd0hqnoE1e=W)P!a#w1`M+2i8539-#LE0Ouo3$b@(1p zqb!A&%e-Y4$fxCZb5Fscym{vtg@ox3QFgg>BN|^ql+tBA1SZSLvAq`moDW;@aTxm*2q%+eL7KwXXM7S zc)saO9zTFCu&SD+4d`{0(B$YE{>!hU)bzh}ao%+hT50VV{PRy%b|PS|q3o=~nOgzh zDL%0q>SoAE+nyLqrp)z#*;2MJNWom5kbJ^h$;>H}2P(ScbwFNG zR1->^;=!QA91o6s#U=XQScObV9v$NmWg>3{vy;95(_4QP@w0g0n7WB_kK_N1q8Bg+(T6y_O)*`>$E zz!BK7em&t|pU^K>RA~QYj%$J^s1tk_jhdTlL@q$JuCV* zfrUpQFN`IY5WxG!AIqm7CB02jb2F9`?^F{+S&SWZ+Fxa6 zuzVzwCsaSn%`*hz%52Z_{<3oVy(ApumdIXrbIdft28EN^IXJ+U`odU$*-ORop#BoI z;;zTWisd>yJi1`_^Gg}WXrnEpexGF7ba;=C(;v5rW7Sq9`<{s1-UW?jFTmVbxjbge z<)vZJ){HQ?x%PQyKfI*s?*42dN;8MGN?|}*byx6GFw;e!IGg|89XK`Sota-!i?JbD|0j`(fPe@Mx$Go!8g?Ujg!$08518 zow4EDf8$rX_}lh&ETP!LUV2(r_C5P%`}MWI(mnh6 zJg+M(_@b_8yBD~^RB*((ZT2QfpJh6cyXgZ+^_Hk4 z++D;R3x|gMcqH;?e`+7f$;bPR;tze6LSu>wUdxIqrH}t%y{A(b@mH-^kS2CZnux2| zJ}=e*370kgQR`2Mu8P-a#>>lPpB6^}u=60c7EyiqMbKI7UCEm&=6H|TJo^kP=~<*B zn@;^aB%_*py*tg4~rR*%MMM`zgsm^#A9a@Qdd7`w4#yHW;Me`UGH{1Z{!3uxKQ z8t_(T2ba@%?Q}m?W5Ts0cRJn@N-jOBKCBd6VKrvEE0pJ_kv1JXqDoEUEb6q?Ys)Tu z;UH50L3}T^96Xc2P#5>n#XVYD{F=1&-Df`cUXty++SnGLf%KrWsUPKJjb=XsGFY?w zWOPSw&pjE0CIN3kg78d$7%V0{d`zbzbF4Ag_IuJH*q0!(9o=;LI_FWEPnCT-q`6C< z4(b|x+OONgOxhtb*@rRd5xm-uuQ)n(WE`d;eu6IXQp7!)t!L~P)$*5Uc{pQ#*KDOa zW0`>`DJMR7LRV($g09fxFN?MmyX!OEo`}AxJgauPaS=&tp_c`EPk6C%vdbPG35!D2e_OF)=NQbZ52&&j81<$N^6`zie8BoPfJ6!%It#RXf~_R{!5u-2&PloKRCRWd)$h# zK&|J%)2f#E$K2BA;An!AdBhA@irlqg@*U}MY4b9+b6q6dWMXSif2Z4!mzHw}x`?*x4Rm;vSTZtLgzS?cE~5Jt>&_H&*Ld%x2_YUnrDey(dVw2!ttGzEJ5bkf`BzY9%@ zvY7|`Mf(Iv`?{gRr+q;H<~ujk1kz4^?1LFoYORE$_g1&=!m*jsU8HXG>^b>!gwG*F zlg|1)sH-wk-q6_3ZE?6n4fp%WllTWK%tyY{xRKs0gzTPxQPmY`LU21l1nmuiRm>$j z%So*Aac7IRUV%h>K+8w*gdgODnbMYPGKgjl)4?44>w1h%7bI39{|Xd>(z>B>pCH0{ zaG!qMLu2Sd0cv`tn8GK`E%lKTd(f1d%pnL==TGE~XR=?O$!X3<2tZxhpJaV)^0T)z zCgZe?!|lNd*8)h}IV(cMW&A4O43Dvf8>uC09UQvB=@~Kuw6i&jK`e#;nw4bfZ0pV zn7}uR2O3WLuAYuXi!IeB(uo3WeBOlP4jPjSL;omvuEBw=y4zfvtBWhEj$Le&tc}w7 z#r0_kH61dDa~fZ)*2^NjZL(U@ftu{UYTs~K>OR(GT!gO8n*ALK*4|D1w}FoXu18=| z$<4J7t7XzQ#cylg^FNM*W*d5to{rAp)C?((B1NA*dXW0{QqI-lkQ-yu=;GL+4?IduDerpzoFKJL+JcQYBL>)U1V#^l1mK)k_s)zC7eN%Ze)-xY1`{Uja7?Zy?4i`1_ z_7Ez0N!7E}G=|-R8dv}*v3l)D-N}9t#%c_TqMz0K=AAWmakd?!?VXi$a9mgVc|zAo z7PrSb2<`a?Q!rK8GGV9a>?1QGAoM*V1aUS*O{=^D^Gn(d&6@_#rv}gI$`CK62G1^O z@WPS?7isW-8`R-P5Ho)mqyYl8GUTQKjLQ((F5I#3zdlrpSY&Ah$$8(N64lSw?S^FS zoVqN{MyS5XQ*3O$6Uua+2A0wkhdyDgtFqnX;4mTlS2;DG=R0;%Z`f%|UTs-dF{@pn z(+5iiYQHK1Z}X}h%F)iwYHhEsw9anOBLl+;8`&?p>*O#l85rSPyG5s?3q6HkAqg(d zF04#dWBFi$iyiz*0+ME7fEE%!#}YtF)lvgTmH@=LhuKHdSme_-z}B&}Lu1AaoXQ;;4E4Yo;zgTv27SBbgEHqeo?I)9@$Zit6WK2VGPnZ=spsW&^YY zw?#pKesBrs%{)6`QHbWzW>z_u?pfDF!rZ7Bd3I!;i0U@izRJ-x0AJUELu_}$032Sz-7wc!kb7wtzarlNizp()+N;iUZyuqweR{$W-NL|r20hoXV ztOOvK2SD}gun)XL3bjnrGn&$>a${o|&hm8b?7Gscn0L{G9FpXa&-va`zerXo348S&>Er@v zSIKw0Y;2PB7?W`j^iQ#TGjb)ikzEK) z;C?z2+RzA;E`_bG^H7TeOj6R_92!2vRp9SKX*j57M}TsS%9w979ya??01Qs-V>Jl1 z)#88+8@ehB=T@?0F|DX#fGA0lV5~A=vH#-&x7ZBlD~Iz$vw9k+CLL+J&0g!u*gPL_ zqGdxBEm6goGS0TtIhu96#O6ukMW>D6yEj@+jWnd7q5S91CTyHZ*fuj0kOpyG1g--O9om5dumU8{(BA+uqX=C&Hg5x!QyFCDdj8 zjR+{BC+3B~cNqaCDHDwPBpVvKMLzQrY=zd zT(<$k!LpvZfP8D5u(ovvSXHnr`xsXD=Gr}aFA}*QTu^MQyoxwt3DX!jl#?_<*@uDa zLEfG6JHRO43=ScV9hj++V60eFLqI6OK>TP#e9DK>AR2`V8YwAnsq$&4zpPs@Jyg^$ z%i2IuTW$_&dcWc>&Fn8k<&e)?7346<*9jOWTEbfaG5u1z-jy5cR5}XJUMw6Y$u}eR zg5LvvN-X5^C$$o1Zjs@l_;Z=(j}YezR2RpfCE(X>FJ2w)BQ8u?DXL6`0r4**+aD6f ziTrFsYi%~yYQ5#f+2d@?>ZGX#lqK3)-e}f5;&~8Xnk}qu6aX~JS;zQa;Y+CFTk7k* z2oeLp1UJSpqH;kbacQQqp+Y&S7D%*N9R0PuS#gX%**=)yminfuGNLav5*uDeB%Ta- zx5ni3#u>#4zT#|Qc(^2p4y=M|6tj;_j=eC&Te{xfxQ$oZVg|sW$H)lKA$P@7q02y? zB*SJO7AGa%&JT@$_5$6OlP3iXHMBJFmZHw_Nc6kd2X_ROvpgixxLEeE8bAs7D&KFe z&DSMBF-0g<*V-fM+E>qhoJL4lsuwrE6B6y&1q2uWEfwYZ1+Mw{owd*FHwQcA%Znd+ zq!_;!VJ&Y*aYD5*H*mKU?*Rim!n;wN{!QP0p0~v8#MDUXtZm~h&PAB9{$(FZZ0^M1 zOyRp{`R~-4M5=_MAk~FNy)9B*gzvR*xe9i%J+nW;jMt!3vFIMMOOz?@O8cutlC9v* z7+NAp|9T5Lu-I}qG+Qc{^N0c)q#etJ$r94AQtqJPaAUST2V6hN?$AZ@Xl5U6Q7uC` z#^nxD&ovnQX=T4&hd^3e>h~<+t-Rc77~wl0wXLKKirwFDo0+u5Oc?W4@{=~WgIn?y zXYg^)5}vP4LFKKD%?FIZme>Lq{OEXjCi6mL)JsgCbjl?Ny-q~ZyFjGn*m`IK?68-x zkSXU#n%T^q5>Xoli1Iw>`+HFSC5)|s17BzNo_7Pw*}-nIl+yu4NV zy;64-#}P-+iFQ8pTr>ABoFvaW1*ZwwPsny@Y;~kOh(R9mrx^%_0_6%i)lt52*6-Ag z<;{AnWyXrX6jx?h;`mrB4rHcPo+8(z1vQJ+$TD%`HROzF-k|l(Ym%I$`ZgYW{@`6r zR`|JkzwU9hf{I4MlwgDzK>BtTXI0w4`FxsTKHMG0saZS&O&>3exJb>|o^xqxI1fKb zFX>a8-H=94UKN>z+`kc%wiaAot!44+b%0C8XfEFp(ik? zdt6xPn^f}i6{GuTqkp+Gi1(;0xfcB8Y`4J~#O%ghHM%*2xKnH>8e$@HnT#6UgENTb zOH=+W=fh`(02HbZI)e`58~c_D*4gN06gM_zdR-d!uvr$$d=T4? zV~5#jfLX=^Qh(RyouSdq9MPP>q4T;%oIc#q>B9_9(ha$jIv@#=4j~rgun0|y8J|ZD z5o~N;G+*?05bHpRHC;ktO6r2bREthbO$4@eN1`u+Ko$DpqbhyvHhn>?0g{N8g`)6^ zKb>lfM?`6%$yRUh+r(-WSx_h`nBMhp-J}~tc!!-H#5e{~rj8h~qfG3@q0o}C#*WVB zBqDQjV*01esvKJ4ByPo&TyO&y-hp=^du#lKvlNv#n-h1d9OOV)WQmW0>;ZM={}p&mgdzetg!j!s(7yKoj_ z)uiq&+lFSf-_DM1YxVXIinPnwk z!@r1P$w+pb)>MZR>If6G8CTT4H~SL_h0ez2J5(cx6BO$!z$KpSBl2eULB2Rv%?BW#W^19Y&@Y)*i3{5Cw+gR_@{4n}o+KB>knY9Qn^ zVV3MXgQ($9s^P9`4F{Q9FcQge7L3r7ZiLvJ?C6M5`5;C{F(`44;@Lk&wRF%8OdWOa z)J#FJDet1Xqh@u)f%#F_kCG^$HPkKs?rPkdC}i7@nM6?7aP&M1UJ@0^CpSQ>3@>yN z4U{m93Gsn6Oc!-Nwl?L3cn4olY%KdSGmd1)5f`A!krXKw37rkfGw|c7JTAiuyP=|5Kn=-mF#$@a8@R4z_pW`0jncgJ zbaR$*8i-Y)OUKe)$amSX-*dS<+BCkO#!V{W(O!N4jE8VtJ}O*E z5!}}#DouK^_c!QRxC*@G2-gaj?0FzC0WN{5{R|8yD|SCf0wh8fZ=m}Wi; z{!@)5o|G3W)WC>nR+2*(3`bq0J(NOi;QL`@WfLLdN;iuXPq}>}U80gFu>pGJT~ewM z>-u#UB_-1^;4IVZc%XL@Rg6AL;8K}}O7nnWi3|z3a#ZXsOO9m^pMe-fN_F77hu{@$ z@rtyH#JnOo5~L!8@8B0^8ZUdQ{-jK@*1((ys|W?bX4$gqRun*>GOOf72%NIrtdJ>H z2+a%b&aXjVXGWbnl-V@MRUvgq$V77zYOzU#zMNk?yfuC)A;oG1ZhYd)Z4P->?JEr zlc}^+g$$h~bvuYxdHo=8WAp859sXD@nUs}?f-0-6IDK@<0?Fi9p}4aOSl;DiVmk+5 zX-s2hQh2eeFe zXJLTj3p{wF|1wt0&>8tQdR<*ldljM*Z0tta>18Jr|E7!IbZ1R&6`3*OMhOUo?92VW z-0nQ0+Q|eb8{5l%5Gzf$y2$$2QvaK}(s(rD&$)WsBnp0MFC?u3CE>dsTY;ag_ioVd zb9Hvb;N zZ2F`sIOmM*F}z>EIbAsh?X0dOoG$80%oZu@MT}$%IE?(PuBiO;dX7=GPrvEbEZG## zalaX~=`f*u`RnWdUGHwbv-WqX%<~Nnann6x;aCYs{wZ-5*lz{Nk`k97*^x8iJ|&V* z4Q}=11S6Y+Q>6fCwY(ss<9fAEufWxAUBT6yuHb4H*J3YC1&(rXgJ5ju;O2Qjx40};g$8n5S$ulCZi2uQ641l_fLHd3#!mwhes7sQOexmH`l zT)6qDdjp$f^`{&h2u3vDf)n-GDV0@0G^&VG9gm>YVVafPOiPLmej6>D{~e;rt~Tsp zw>`Bj8Pd`*#4BJVY|(yyaRg(WTUH`k6j1)bg;Y4zmgwFGY@b>TYdN(u$@LLW(?xSo$f~PEQVoZ_y=Pa9BtiZo9(ucj>~>Fn`AvEF<)t^b z%5bTD-=#Y$eDF{VJqiyykPmO&M-k_W1(Uot6@ zi(l49Ln;O03>ziq@-nJN4Ptr~`03arA_C4b`_;0ymx$<$=CUjgjVh&cHGXOw-FI`@KX7C2Bo&C?P}( z$U@uAw=>sRUZS+R{xSztF1-A`518F3^>*2T>?6x%0hZ$ikl1%g+k<(bHPxk1cJg5M05PLUEIo@wjGVkHm8z)# zK`th+W_vMycfH8-*8jTWnfD*)D?&2<=Cwq=rp4wIA?TCbfX`xkvGbAa6^*T8;u`=L z2$&B!1|Fs%dOJY#Z-P=_TDJsf;)l>G;?V9ja)M!J`)n{};{r3F&b~oW59Jaw-$y$} zCxKnzG^*IqALlo$Xgt{_<4x>s@8`6sRx4`v2`Is%V%~i$`;eIsY(llOU8p<^tO`+e zU{nATql^mKgT1DVUwV>eJ|Y^W0*F8wYyObBk?v0;?3hQKM`n@6AGxnzfdwOj^yAx` z3Z4K>H~Tt9FVi^a5#$c1s>!!I*yAM3WxA1oVDf9k`pXUw*ee>~Zdv#QYAePVebag2 zcm*XrMR$>~A`e(~Z9nV|+-s@8G1WWV=A;$T$l*38NQRDz zPiBQi6tpL~plQCH0(|-~G{A_9s`yd>*Bm1ftKGT-j`EFq$x+gJl$wa75uBpkU|iv? zPCF>ZcW6Rq^PJ9efQWO0u`HgFiN_Yx61>c;`i{x|_#-_0Aw zDP{m_u^0;?S%7Gsm$hjw^10W#e5F}$ig&|1TkuW=$}rlqUtA7ML`jte#Z$h;!(=W2 z81jAaZdtqwhXRUOXlRt>{30oe6qzYM&+I!|nK)2!P5lD6Du=|Kb2H|Ay0)u3COuJ7#+w!NHrloL1@6aO4d4>k%14{TaKru`20Np0oMswW`HH&y9dn%EJ)$I1IsMjzd5-d7^;-^FMZN0H~T z*n9)k7=sL01%WD8LC^h{poew^18Cul9o)P>WsKYdJ2PU)T82q7QSj(lLl8}G&pvEw zKw)4KkgG#n<24!y2mjZ_&UU_EBY*6Yeffk~Aub_x1nDijekCF`eb%RqSIhu_QO}Sc z0IfTwLXuI8kx5rvtF3r2GPhMz*D!I6iv@?KqQ(>3{G;_O5ge1ZR!amUpzC4ovmipTRRJb-2@?{)J|nL( zQG%I>P{y( z+K$29rh{k|u-&R$C#E&?kj7Q&o?+8lWw+kP7qx2!W0oCJE=k5 zkB$?j#s!Ivr8%K(a_GbEy5bHP!*z!U9)D9ydww<_d*<$oFEroFD+zoS&Jil1lJV@D zazQGT@XV8wy_HhI%YO`N_D(AK)(cGz1g|xqsZO!`lY29m@tw8%CFM{D2kC6tpWP7V zn_FN>UMD4+&_e5(C*!nHzuQ~q;F^eOx_wb`>@4!+6TKCaEL>)yzw?=$_y#&wek}jE zXF9_2S3bZd45oE@vM=gVLYV7XwNnmgDj&jW)f5d&%a-$%2eQ4W8U%;H56Q$HqIuAI zPyk~LXzCbB)YFdCyUDw2*{`hDCnxlY2Jo!We;najtG%uk<6Qe}{dKiJ;Tr{G7br=N zw|^=6*BZIP1zxNM2 z4?2nAXt*-n9Ui{nB-BoEZ1{V*eq)i|zidi(WWwFAa|W0}&jG7;n#?}FnjNlTzP2ge z^2P|HS41$SVnXr0*4n`{S`w|M?#DE zo>tRlG<(NK!V!4)Rv1{)z+GAtCElRKkWvv>l9+_emfzsodYB5;R2`boidz zoA`0Bqr)p+VuwHVUF@(ZX=7t|o2Y{0q|XVo>e(%1UwpPWEg`!!Ap+^BZK%n z#zewp+SaIeiGUmrA>~(vl;?EnB~o5iV7^LHibM8%i5TfvDV)|xJd1IUg_IZbM1>ro z6%92{ATZ6o1S^9sCJOkO4>`}oxX9{x8T0bQPiUiM&Maf2pFo_dAHzeSJQwjoF zKalILR0J9$3WAvQn8m$WO%)B_7O8ydUo5G7=AVsJKJy(R6(qs+lgeC`RN!u-qyhtc zX{3S_GFR<&wGfHPXVES(*Pn#nlk=q%)kbFwZO!j30psB~CNp^-Y+$8u!FcvDu%l=O zf0fyyqX1CILe(PCvNfe`q)=VRW$}!&V+8si%0SCVQYX>AqdHV8=G4K47i*IbiUb|4 z9~FV*oM_{S+K^^L8>c9%PY=_`aouA$WlkkUn8k{8A5|fBg79kgYpboUv6q}GsDLCf zofJH@20@@)qE4Yzr{mc#tIOxPmDSp(C~?;Y3yEPAq$#zg;MJ0p#3+58RdYc1G*_)q zn8P6A$slisdnHv-3rSJ%E~PVZ6z#7LfLoC_pUEej|AQPDd#1Ot$?>5NSdl`<*$Y<^ z^HnqRO`rmih+-0go&nG;b@F$H7!nMys^fekUm-EgyX%Tsue`hd1W_m&pI1E!s<>kZ z_3Jow4RD3OXHtw0Y|z-~m_6I{n6df}jPZ6n`zf^V68I+(>%ByJ*7%AuUW-@OQ7rNk zqcKoSEcA+Bi%PrfWos?-Jc?mOSX$@2vXB9<+5lMVSQG+awuMn5 zVPtK9Ye+rYDnzf_Om;HtXv*3jxg3j?tS0Maec3$Kr|dK_RuUgRV2qdE4UH!uoaX!U zRXe5N6Llr`%hk3uW-FfH9YnP%U-?A7VkhiH1yI|G*l}6aN?K5Tk!&&W&H6@Af5s;==8ygo$xS11Q zf7+$Ue&D{Gd1>D56$i1PuG^b$=fR3R)9qUa{zMq5GCCAnl+i&h7&Y-*aeNdV8L4*T%Q@asTB%YZaBmVk7Ng2l$iW!7 z;RFG`y7j0KYD-~*92vxA4bHwXI5lA%3^sPr>lHR;;=-h?MKI3H^~Qt3?PDGr;;BtyqN2YZ+vLh#IPD5<@YGwHwRM~FaF`j zd477`e9NqtS}61HmLk9R^;0xlIA58L8TM_?PwD z5EO`FHyPeB8k-`Va0eeX@og=(qMve4)BZ0MVpVM zSi<^#yAn$nJ3!~czcNO4GE+>1d{sTkcbw} z4WkHS;a?axDsAdQD!dS#G}x& zf~?G=I<%zG%N5WnIh4cAm*Y^-Y;Y)v|8~it_-??VRL6_rP@wN>9LnOoCZEWVLz!Zh za41E8gzJ(h))Zg(`1`Y>Q+$Sth$0b7z4)7t{A5_7M!1LRS4{;fJ{`ALr z{;J~quf%iD{(kYk|09^efDN7cDm0iCpXQ>^w*}E@KLt~rMkx{3FF;Ei5u>2AXoCsy zFtl?X#UJLE{q=<7(ptGshbG?@^~|%BJ_6Hl%V4$Q`Iw$W-PWNHH?aMP? zrt}QVxAe|JCX`3jxNHp8lK_9DxTh0{*ORKOP5)Uqs^tgtZ3f|OWllRqKntEr(Z9(X zm@7cJrr7;KINB_KvcF$fK3!e>`CguWjX!#NaI7(w0_l)p7wmLJVXOgVf#NcLaLGux zRlng1Cv==lcSB=FTkx&XYvQVZ-O{g|UtHEvcs+I=)e9Tb34M9ZVmSdgcEN#9i4O#qxp;fj#xuKUk`>QS?^ce*B9+krn^`_ji-U z%=>pFJWzijRr<@uhMPuZ+8Lj{i8D_5lh$UXpxMX9-#l?WK6uHcYJbn=T(orh6R9Qd7I@&B{;Hb8b=)xGEW zxL!oRQ3Gkxh9?(SIaeJ8IQ-6 za>WgnZ2%o1(cwG!1MdBz0bYp z_LpQ?vK`6@_qpesefC~^?X}llYwfkxZt0+J&L5(5$1A-~LT}a!WD7i)CB+$&Ref^n zlI-EvP7VTF7aWpbty@j^h1LR@S+y;xRED#C*uWH-p=YLBQSS}n30e@Eva|0a<>;d1 zgpt8vHYW}BZErj}f>@;imt%4r-!qETs3H=5Amfb?5 z%w8c(dk8epS^cTC0=A$>ygPF+)Y$9*lvqRY>NmIfYp?+i0F5i ze5t0HhvwoI5!QLPh~UX*iwJ_gL^HQ|d@FM9R1@uGiw^^&ZNGMb#F@*MELirI7WF%( zMQ^Twr+lzM22l}<#06;xbRR32g9XNWpDk#yhed=0b)n7`irh1!$e6t1ATa)Fr#ec$ z+E3lc=jLJPNd~UIg3PioSzAJ>0;XQ9)sP?pYq5eV!AcuEzu*XFecKI8V8D3m#JC_K zaEZSn&>|#w92OxtAV{Q{KxPd0ze6~%RXoKpv9!4vM(bmwTJ2Rum^!5g@-?~ze#^Xt z;z~HsY1(-dO2fn29UziY@5idFc#&2-i7gfQd@1|N?{38}UiHB4tI4;^ve{vDK*c4C zVYHr+#5z%<o#?H2jzBK^&M#BWe<^v+l&0giUXurA%nl5N|y_j5tI zyN9Y-rX4^h1&{+1kUH`G9yWDq69dNO9z+3oTX`8kmpZM&fh7-ksxXDpWB{0!L;I7mjBY#o7pwG(jVTH8qWkBP5p{FAiPO5lTjq-7nP8L6x$=b>J0#_K$qI(ykjW!w#N z$7L~dne)`yw}I-CZ|O`1t*y3hSEpE2v(eE<2`X7bxtrDYu>M>> zk~~Fqk_r`!gs;7qG7Vif3qiMf%(OHq>dBFQF8~N1plYI@0IQA|Vu`2?R~Wb`;W3J< z@0a@3PM7GYa#(i_XLNrfPKF7i5e6QW02O+ER# zD2l%6EMWs2c1mN~r96t*z}YFW5{rp#_K2;oU}}Fyn3NZR$2@TJtlo?xwELdp6tgUrYCz(sCYw# zMbqZ0;9cQO!9Rxjy)xJtw%EGB-waW7lw}@EHaUV#s$-#XvRzZXVlRbuJS*v8qG|N& z@`bd1Y$bPkHu_KT7K*+b{{WV0G;8MEIYkZb)P4gads8S7Pyx3qHV_=|(v_J^G&1@A z9wwgT)mySx%bH=eo`Y(L4(`e1V?gKZFZlU=AmZ47sEAkyj{ba*TvCl_ef|&nBoCx^ zl)^v-{Lt=Eo$@;9$@bV^dY9w=16Qk_g9>7BKFTUo@xK1 z_>PaqWm=)eWeSKS@z6}0?! zd+D+4)!D0sfxn6b89*3-g5|YCy4f%UQuzzWz`XT@LCfcOi4avW&brMD%CPpHrKI_Gf8yR4UC-`W#~V-HJ0S>T;We z5P`_cUeh0PZoq?@UMl3vaRZ8Fk9Bmt3o#V#Srno1$xs%+_6GmY+^xjejnzmp z#rbzEzpzA%Zn$a8@RILXvQ#YA4z}c1F6iCT%Ppqz(rgZXQCJP&D3^V6@Qa41Uzp9o zUyVX1{^K5?ZMjf!){j}RWmzUOIjUkbQ!rSBW@lmUaXZYNk|D*?_*F>BM7H8tAhTF< zf-vkiwD*7Ceoa`_(!8UQ8(L+AQ%b*HMl8&3kZ>zQW48YzyAo400`B*4BeQgFX+&Hy zn<>US=UvejX>-wbp{oqKcS`IXh?7O-IL4i-KN^VR7+Dp{PqNnL{wvBo{I624)_Usx zfA|!|vMQk5geEGi3<%XmfQ~lcgUGt=Gz4mRK*B(_SZYO9!Wv%pMnu;$CSlNfRc=@; zJX?q2D^B<6Rzz(x6 ziIzu?azceA;n`!Ay={H{wcLeLN}bw?fxA1(g(q5Zc%Y`-K}k_Y<**T;AvW#U;H>Z5#_z6{}7;EU4D2x>g1!N%$1YFc4?t z9ox?+n_@^szAvQpisiq|-&0oO={WDpLEPc2KBEHA=*F3g-;Dv70}>XjEusoyp|~?B zmg3wh#b(l7ed&u$UwP%p@q<*>q5%t=6 zX%L6CLU@uNkg;;H6Ch=dPVAPWfsW#1RPg~8^1nh)Fd&wlEVwc_PJAq@ideCRpzQd} zA;lY-f6_q>iH=||C7)(>Rr=Z4luYa8>|brbiGc~RrI*VM3%_zc@8`f zTAK=sEJ$Mr|A==mEV>fM8)r*6`q&TCn683Po;us?2GgDj9XCj~+ut449MTh2o6de3 z8-*|gyET9e!EsDw%3mvaeR`E|ttdf_t>vNGL{RGIDauW48oq zVHtb0L;pcUr@KK|yOu$n%^lmZ_BqXL?Qs!*2=5{q0V<%zRhk9jlmK?avR{wJ|JciJ z2XuFkJ2y>rciJl7Z-2+xEO4}Q<^uX~+V??xXOuJbG%4{v-%d=8fHSfSfMPSp_|YNc+2^CkNtqPZ5#0iO+$^V{Cr}IEV|8NKZNH$bHC};V@SY z3XijW{LN9vA>kG*=}||mZaf7BQ19j|57`MlYNPI??%Jq3rMoe!{DeV0)rNXc{rpgm zVaFHLLfts$pLx_B63767&Dv1M3Eqaf{S-f}-=JdL!e34$=kEjud%N-5 zj;FFQ731YJGzaRoNe(e@`k6~GPOP)0zI>i%mXo9W%x73PXFbDiH}e@?M+1?Qz-89( z-=W=F1QF??GyIWXOb!1XL?XN>8ov354rPab&-sV`S?JP1Pq8^Gj|z5B%$0)4stqJy zM1-t?K0EtCt`*vOoq19q18iwDlyN;y9_LH>CRDT4w##E1!ARhp`!VFfDn{#C@=-)( zlcDu#DfxT6GgbyCjCOP1D|`)(L-;0?BRopMb@fZ;`rZuZJTCxzaINgaQr{r`na15@C`Wa; zeLZEXRw7ACxeYyDSa!X0ZKrO!PH%h?EO(K^yJgL@d`HU7qWWWRRyXNg*gv$5_cr*A?SNdHEdJ zfI=6opT1v#D-~|m)Qucn91ETA1p!G$TiWIFu{ZJ)1`gqX7y*~hn;Hc+m`pt!{Jee$ z={HK=i9(p6PSe^$*<)`ojXRFld&vyZY03eHg0H^dy#me_+i2FSXJe@a+(|8z zH^3$9*PmX}evw48Gg=s)n_>58K|C5ToG@GLMyhM(Ajoe9^CJ6H7^ENelY6NwZ#^Bn zRb3wB_Q15P@JW*seo5v;epR6!W_%zoAd^(Yubq>FaZKgMhXBmurp#frRmCQCdxB89 zhEp|{QWIwb5jxz{l^vYnIYXhv94*WxVS~HWgxD>|^opzNeM#%y3Bmf$24Vc6=Am@^p&F8ca_reu%N*~P&g@Bg^)zDfGmQUit zBS@J~o6p#9n&W)&>wm;9BtGJ>fonS z^oE^(aWm!QDXGGo#@eH82VI&bUCl-@KuM4=f>TYb{>FeFp#l8L_1|lT+{ub~^I76) z|M<0}=@z@t<16XVHma&xQU^RUk{^RH^E47A7b_lvE$Jj2AUA(n*<&80RnkoU*++1-Z{>|Kg^Km>u?@>#KL-~Gb`?_l0 zAT|O%AcaxD^!5`QT#`@d_;fHjro+X!?$%jeRm;T>vY|2X>mo35-hlThsT(7Q%}(c+ z8@?(;I1xZmRdTYsxj+keW*HI76Q#Rt%+kqEXSdDg);1|uFQ{#5C*LYz5Wgb*#ps9g z%`i^LosOyw0H2)d6c*z1%OF$~blkW;a<9m>lIXra!c z6+_eql!`qcDDn=19nNj2iM1BQDLL4s24;=?0YGb>k3<)DTzqR9v&DACht*15eB`RL zTCbYBR##nm)qJ(mDjO;auBn=ByQ)OG?TL&=z;pHxM0^N4Hc>gCOKI1b!6XaA9xXbc zoKPjZ_}Qyl&Z|_SO0QMCjhV#Dc0w<>m_1<&Mu~5H#hf=AV~2-XV2zU57vALSCDgyW zUP+M=qUGP3za0};nh*J!&|{3^BtL0k5-)>q^{ilJyCSWlbZBjE)-7?YUE`;1d_&2&MF#@*EVpko9nZ;&ZQJ#7>m_ zU#Xj?k@N%VP(XnbYPrE$G?a_mYL`8^c4$X9l-g+moPpc;S{zlJ2mdJgrk6;SoP;Gr zYNyk9oYJXNS3sd~fOw$DHgI8?U-^q(r%8deBU-O4*)tEVBSvvua1Uj;+)M2x^C|mP z3E+1#FOX_CHG8x~(FmS`>JEcOh7_`sycQh-(H)FH5N~9c;Ppw+Z`G*HHj^z1YU zNbjfZoR8d}_dk#g?*l4hV*MvsO%SSds)EC;ts6Te8kz&bt}0>=p^Frt~Hd8;YjXX%wUX zN#j=A3Of@oyzttm&R;u{(XWK<+7YQQy!QWo{@R(%$Xz+?8vR?eo~sqJrQq&o>8X8I zgTN&#F+)^>l0>#Pr?jazv8k%2rIbc_SKitz@>5(u<5U6AfcP_$-*G*_BTS>l+1qggq; z6GV*XD5SVD)c^2Hh$8zH>Vi*_t~~PpFYzl3c+x<`%_g)45-*+o+vF{T@NF5gTu@nR zB0q1r(!J5JneV&!x#H9{#M~zM$)-t&4PVk+t7l6$XXV#1gDeJtBYJ8X`*yA~$-Aov z%+f>Cp4ax_8qoR9N^`(Z&C)?Nl5IT*rx2qeb@H4zR%RR4EcaRV{gC2H+tNr5$Ps=> z09c_>VQ&|2YD6yR{eEnAt3n-i)75fAsz2bg`m^tX!I(H@D214W&16aq+_caXVBM;v zy*Gt*F4`#PVr-s;IS1Lg>B6dL=^V$$z+aJ#No0kLtb`QzyU2rGz{Z1}%Jq4l{RB|< z8}b3SD1fhBlNk$_<2_IhbaGR)Ted!`_pXlvimsyN9d?oqh%f+K1Dz1*fr5xAM6DpL z4MDxAMmUD6b+i%1LvWIxgUdkl;KI2*j1H|>U%qyY`gb4p4lWO{1o&`J>Xg8S)@s6F zA!^iQVmm0XoF!LPyPvN#uZX2PRVYT{3yu zGC^r9M0ZnW$}C7KEw4CVnzHwHHYV+xAeTn@^d96*0XAkQDV=0Jm80T#_tuo0AN?V? z_(}csZqk}Gl9LGY$}pC+stlT?hQnY$RH)lGDd{W>UN3X6o#9R)Kj=ejFA;sJzEEJf zF94R+Rc!qrnW1`@y}jPQWgvKt(xc0;jw-e-EXEPaJFl&xphx%D#3 z7800k1O`rXaG)#)lwku3U>sD#0;;_;2Mw}t<|YQt4b{TgPjlUdi%9x~oe>ea^L8Um z+U04~FW@iw4*6$Yh!ls;dUEV^MEL~@N-59T>24fDeq5^M#Zx?ZLH>Fr) z3p1Z4)jTGOK`j2iYlFE_`n7g2(6c~-tfMeqdIH zh!dIhWkDQM!f`y|*t%`}w^dK}(D@bdoH(}QTP_;MG$|Z|Y7MBG5i`Z)EF6PsXYuHu zm|VX_6wAoNl8+2!$sy-JF9fFSPYa;8NXiSM*AQsv^@wh>bD@`pWV>QXb{yMAyN9W$ z350MWPYbvBUX>l5BnKOr_-rj6%)VZ{y*Ar^pz!wJW*_?+iPk9m^F?_c#U1`6aJL|{ z(J-kk9T_Q!PUW^}YG7qXD8yp)2@prbHll&NWjD zICv%|vj~Mq!c8h?3c!vJa>W{>#Jn1fttbI@yckY zW^6Z#t)Og)v$0HKU$DfYG<$+(*>i%0NsgE=y9k&iG3l|G-5m^l_*tr9rtl#FLkCEL!w<(ie^7}tJ3o&zobWfksqc%cEh9&op+chbWy|fHX9~6_*~2& zX`NYUkYdZ<3&nlPbw%dO^LB+o7uA)wUuKF!qudD{yNu<4*!>(^wj z+L8@#kV2fCV0-FoGQ^d$F_;9iik~tq5n+KXoLmIDa?g~+lMWQ?t|A4{-Sb*5Agtea}h(s)pF`kOyb#8Sz&QmyVcJ?pY z5rAxga^poI1OPN;y2U)z&60j#`n< zkh056#kRN@?dpQvoplDEN4M9^E+0}NscMko6}yGe5-R)HKvVvUrchw@()Ob5G|S^1c}>fC z^6mbi;eW6o9Xa8NY0Jft9; z&vHixwrL+#+kyBG?c0Mbvw85cgM+9oMz8(Q4(U4x0huf@hQ{a&n8+srLP}9 zy0sTEO~|RW6lzCD74qVGfdfVhQp1p8+e&gVHu=q2{cIebB0^l_PzZ;RjAS8Lh$ z#(*BETDP^yD<6faUYU>eDo2t&G#lH9X|pa8yCqE{$Vv|;Tdy+l6eRqYyf^t5B6wBH zcdb~CBY7SA6)Nd6(cRRM#(R^0SlSAVHD;kU1HV1~6lo6aWC;nsOh8UlFXzdklx0QE z6#0qp+TxS19A<1gj7ioo9+}24pkv-mE@vyr`i}rSTzY!14#?n276LiIfWu z(cAP;$ynV#5#$d3L%+es%Su3dWaO!4gKWHJeXq+vZ(4gOd;D~;WhkxQ_Mj~^*jjg} zEd*TD>ec-6n;Z8~zkC_WQqR1?b3giyNuSygGj(S>-%8!tWp!j6rLFw1s!z^82p#-3 z`j!81@_WiefI7j3A9_nsK=xFI zjvovz`5Y&ACE3`gIm-BboK35oZOlj6eDiHK{#$o0#EAm!Fm%Dx$FUKecS304e4luXEZ48taX z5*G2vWXAl(wNc%>2UX{#Xmd%$7Ln67=9rwep-veW5Iv!7(dsmkh9hcJK-_^v=un)H zKI{rE=fXQkT<=uAc)gWdINQyx>{?fL4G)jd7X%)*72r*b^PRYR3RpQK?U9+(s>zLDO|od=E<$z$i~ zO_MoF&7(=b)?kbfX~SEM#JO(u_xp847}&>^U(C51NMFk@HbXj{GCq)LQjz~Q`9{A` z#~G>eYz0!;i4lVLI0o1*~|6Nr@_^Iyn3Y2)O+ zI6lW0$CT0l0-zHrdx$UN*xT`^)P2v-CiL|Y0&vTj0&w3S zJNi6Yu@vz1>j+z!yo8EQQB<7tELWbKsW28cVITqhW=oQRu&QvSOn#zct}Ci;1)mhX zC)$lq>WdTh#Uy{tdjiayq;Zq*9XG{$BIl+vyeFWY6BRri%zQ+Qu}##|0uTQN51N7L zK?XPvCIC|!FcmX|AKfs;of+wL#Spq!)T^p>@`Pki*m@@)#Mj31|`_iq~oLY~+Bw&{l@C_JM^u z@c}2r&H8{ed{C~u6^(i5d?-8vlOLG_;WiQ-S_`<=xE55D^oKZ!lhu&vO!1iWs@b-L zn+&_aHkH{CNoYWfC)i#Eb)H~3Bgsg%=gTmB(wP7cdGY3`{up!mMC8PR`TypeXE8gK z0eh8=tcgQX-)$VFWjtyO%9NzBJI-#@=w;X!mtptaB}`dS@;xH?`)V|=lpl|{tFCHJ;_7LLGwGXJ7@BY6>#6e7 z)L@(T*`y&wsqao2_uy1SM1}ThNp2H9`;e{rfKwP;vi z819AfXo?=?V_d2igR@6 z$w}2X6CJZ^1|4!w02>Q->(uOSnIRhzo0HlR?gG|6rqc0+ABU&%QXL%LDA1iUogzJKZciHhdU^z~W z9lSYM{svf7{=|Z?OwER+4Z<|Y5K>O+3P`6)eh!|bN<>UQdGu7%hBKYp&$PtZp^aRp zd0s~mG4`q{`VYpG&K7cul$4@{MdY+x@O08%#7coVo@|}}L$j9y-MUa?I|abA)uD)e zg2HgA%iHHbkq(;cph(TF4BoCMX1;UA3i!U>tx%x zh`~?YbH>{7k#JKN7oHC9*yRZLdhsslve)M zlb;z%c6FhImKlH@Gf+}$s9HS9v$QELknO;-EAf&)C@$D>Gbrk+`71*e$LrQq3Mqb`l^$E-F`25J(QEQ{~j);c_z<>{(tX)ihGhab1yIjtK~aY;GX0< zrmak?9H36=3Y#t?cAnI)Jxa@Ef1Md-7GWf!*5Xx7vCm%|Dt?{eDH2mxoj(HHfr4Eq(FZ1$tZO@J*0lu(J1M3sNIc!VmP#NjlF6BxWt(-^-k1Yr0D52n%L53g0H~gAkDYq8 zGd40=Yw8tMuHb;h>vHwVGDnDLO}(OWex=vh#rYm}saMe0Y&%p(nS#Yh{}nY{PW=Dl zOCA6J_~PMz*$aUGe|o9o|DRqw{9mPXu$P(sfBaI%|Bqih{IfLO4fp9YVfb1L_${Yg zM&<;%!#mY zqEG!CakU zH|PZygje@^DM*oVP6{Gy!bu+-C*UNnQE$h^MgVyA#opZ!7z|pdBYQj>ZoVl z4(7`53vqo<91`|~tvXoX=v_!b*7O_)I4KGTx%_uEa65y%mkmAi*x(EGtEXmdAM|1< zZ`rT#BGX{n=MC09c^rZ>Wrgf<+Tnw;tFfQuY~d4KRg;03E*`?X>9BIxAwHY@%Uz^x zDSmgP>pN2I^4(#+6J`-8m>DufT$x_&rNIO`p@_~gb;%^sEBqV9*jro)uX|hC4HmtR zq*u6KD9Tn}CB0e?d?pt;S%XmsWwUKbiTVI=a3m`u)<2wzKIwumrEP;Hsg_WZ(m*!H zx*&Ihb(CkCeoAfJsx=!7ld>C>>3|<92Fk-5Ac@;ica$!*mKc<2Uk*04a3s)X#$Eel z098tt01!!R+7RVZ)bWsM&Sp+aP#y=$Qo1;k0glsdMiMgIx?m_HEZfEe)=)D5)me-L zX9)S31_JR`S*5)u)$iK35BYg#8f_TT=sWgd z&~m5BBNwsH*6N5t>%o-4&O~q{B1kmTAWBC`mf_)iO|b(7x1)X&zSiFJpx)%1 zWtrpHgUu^gdg|F1gV&)Q*3+xadG1tjyV>-Z9&%j}yD@(Nzn^y*R7}8QM>je95*PH^ zU<#`*in_^Md>F3Yn_{BruygFDesRE3#;DC&COFq==K4S&{bnY&9M_t$qn^IBk0k3@ zcbJRpV9Vmf+&qFKkP@tP1Trlz^J1c`mjyvlELA3fFhNdT6`ob< zBR$i07ay}>5+(mxZ>Ig5VOau5I>vrCpvYLpR80O0^V%)r5NuoY3@uuL2AOJP{{y5n zM23a^yYwBU?(`k3CA8`t_|4*IKKvxmGRDq3vz>S;wk@hM*s|EdNwdI7vnJs3pae_C z7Pf)i94$L(Ln%G{>Lx5U!+c(jVo9B&VY8da25U(24e!X)*wFCqc&J$?yp~jl?Ei{K zyHtmS`&#RaU|J(B1PffRdXE1~sPx|40GLu7zJ|NH8!Zna?9s28|>YG2t5+AKO4K zGz4(~cn!ZWc8cCT2Bkk0QHK!p8U>RrkBGbyOX8s5(UHF(aLCo*nZp;vor=m(8Kwl$!ziOX`OWXnRs7 zT$e;$n6lIUoVpeP2VreeO&FF+y2w!&xnw+rv0foaNN#|b);y+hK1I5RRv^ER$Gn=XxlMUz^n=ytRUs#I!MMV!O^=zG4BiN0(lLqZd= z;?Jf}9Ld`1c9dLCNNS)DM#v&2e<-^((r6nbj#?@-MB0a0-W*$NBx zSD%HfA-vaOB>zVsm@(-Vo^p#k6Jb;mH!4cx+U6GU6lT4S-mJ<;a6kj2$))(X)^&SX zjctDAgQE8RpfJdg*jt!(I9qx1YjvQ!uWdm3F{4D=$nwg!i5IOKC(jilc%lMC5yA4D&5LXq zF_FPpbRWZ4Jjn^j^M!Z9B?Mq>krJk^<1Hh^=^ zs!1C_)Hgq?BnT7+u|0dc8em${XmJo1%{Q&olahMkyn-cZs2|J?2XkiCB3*q~#zgVD_KjOn5 z`7R{?j4gXb%r@V3Zkz8v_DKblWm!#2YIpMdb)7A~9u+z71|E&u<{hBXEaIItWqK5! z8_QPEd?Bx`xIe*EeOM)Z{SJ)fjzM z9W=9>pG?2tmI;?G&OG|qmmUpWEJdC$UG3o_mHQ$_=t-%h<1|Q!B4(FaW| z`#D_uaBw?=CE3r+9g6I=n0t!{x34<2vD@rE*BN*UB!Q}~*kylSAB?@Cl6skEi#XtRuo-&dm^4Por zRQ9dRnQRy2E$!{7f+_B*=Gqv6x+t@|A+ z{8zt}{pmBrO@-MrT-sm#a<)sieD#+lQheGG=Iw(OOkMK{Y~Zbot&5b;Q`c0pD>RtZ ztj}FIzb#f5MMRPrlfP#zQ1Y4OP^&j8R|q(CWh_@#A*G4(cVlv+I8j*BOO$ygCCg3d z8byR6N^KcT#&VgEWkN2{_t92Ko~{ahaXBugxxvHbUY_I9(CSIvt-wGeZpM(@3-`6^ z`IHrVs#uC=m!XCCmMh7!T-1H?n~i`#!4(myDtJ$a?&m@r!aaOlztI|{Z2^K4)xG!& zzuS^8;0hstH=(K6IR=Ii`6{^7(Ao^bmh(ZnW66A^!DEhA0tSeIXrrxqYw1TQLJ}93 zso8$7(NWfx^drx80c-Qr5`jEXY$pQ0PD{mH7YZ-Q_eHRFnQh#17h5t0v#rxD86U8} zo97DC4%-5;C6;;v@+{_}8D`8v8#QwI{9%7nd4SyU7^G>K{I#h6u~@0(NBcIn>P%o( zZpFT|SkG5;!(|PYHH*cuyjpdgr@gjy6|ozPmsJc-KAN-!Y?Xo9g47bVGiUTxXA!I& zN!~m#SAn|HmDD-Mo-GYBZ`IAHt4v9C%d}OuAP@hFX4kg{G=g=}6Mxy_n5y7v(+uLCvZEu^eg_gPxfL5(Of)tWNh)rn95U)hxKjr>i2>-f+*x#Bs+Y-Emru-oifZ;9YI@uNIc5S*T+gZ~~zRTmf98ggYikLr5)hL^_KSWQ0i z5+CF?m@nc`$H=G*wJ`(pk896i8+?wbh&Mcl2Dga6y=niJ2Vr!^E*2S;Jjm*hlSYel zxI|~wv)xXfsLN|ENxrp+M=2t&{nCURw!lOt=!!VKQF{{?KnpcRdb&d);H)<1nHkSz z&QqG$$K>}!6qr#{=fd-OAFM=WaD*TtP@MAq1#QHlxQlf~3jsY)k=a{XAqw@6pKa zLknIa+kP@Sggpb zQt}tiCQ2g;-X%B?R&P)6CvQJFV^4^R7!HCP0`U@tZ=5h~4)x^saPXzl4Z%OyZ#;Bq zV_k5%WXoGWo8zJ!kuiQMpm7rm-D6(VYFHRek5@ zQ60K8PW{~1B6@t5R7LxZS|zGOma0&~thNZiGOpN7 zlP6ehTfNECeuaok5SVTY0!lGg?-IeMI?{kdk)kx9vcX@6mn_E&DkQ zE3$RI2XI;Voq~cmJ{)oUF&d1M#hE)3PVk^W53;-o;~orJP?Y48E>e+Vu+WQO#JGY^-qRT{_StqF?<{F#5iS(r8y@uQjRpo;lsJQWwu|cO*-llO zyblC&ERUV-)}FUxDn$eUjl#`T8aF3=Uwp&kXRxgby&OpXiJXjvm;+Kn3_QHr>rg%Nj* z5_2*_;)O8Y7$Y>n&@BrKGD8*^rQ$?(bE!DlO_{+Ai%Z#p%#f~#1;PwHTg0~5VTNYB z2uvoDg5{Z^*Rdq`;N9glc>TV|pSK`u74D)ph2PesM6Ex$(B#yr)tR17IHvr<)3i+o zpt&NM+`!=WCoNmyvkiwkmNETv)2ox6~RcL!s)xDIY3Wf7R`VDRA zu(ha5Z+qf=&rh7`g(l9WoHo-;n?HI!(}sm(fpxyUZXlg_gW=Ny@P;b3R6?q;)uLx% zmNOaXX`O8IApZPC#ioi;RO*bL4yIR0wK-itj;7e0l8RsQ=M+^=)9LAgsKU|6hw)0BM;->9g;m?hqfXH1O+KbUCC!oK zs}&et^6deqrf`xg@~}%hKmXdTY_?ru&IMToJ~rQ=BC~4!=10KcF;5GAEYC)4EQPt~!5c!B6ADh*DgXY<5bFTK40tS)8iIe5kC2%E%Tquc~nQ6dw$D277krVHC(g!v z5M=dLRF0kJ4 z!yKiaFV}MzeWAFX_!&!OBN;19lPTl=0xtwg{yl$%;`^FH0Jatmzp_=z`VHnlG((K={2WL^?WHr(o`#G*ZKHiLBF%;G6U!0c5}0TmxPF3zrTKgq=G^u$TlxZi zObEgfYY=$xV;aVbWPvSUMho`Iyv(%uk(+2g^McJA+Bd}m5d-6JBl*nid2{h5S_bPS zH)mLOm@K6&nmHv5lyNrnP&V{`lWgWe@7o7TNXKgv=vmFMEHl;ERmy4JHJ{Ed@OOXo74aibKk{og%^M|Gl@&Yw_#fjM0j%hi{HoF zY-Mn&w=c{-r&I0eQ7C8X_5lIT>zQmvCb+82Y1*`%!Q)mBF12*wBh)htv!x32G8?T5 zczI<@&&4nMZ>$$vtHlhDJuZHi4@ax{hdYRg6X3|=CcNXKhDo6x^bGFJ9wn}pTqJ=y zzarX5l6n~&chOcoTcbbQs&-g1_}bmolAHhp;I67xqcP#zZRqS#1!GmB8M*GEqTH(K z2M@ROeJZlw+Ste6h7NksfOIv`;~Us-4II!X2UM%Vl4=em*h>grc~a(6yxhylcMKzJ z^fjbo$tps}qwKeI!+;6ggS{#hD*yzWsN(kAa3B&5?ge9*nh2to(TPl=ghHe8%jI9c z`%}_I2`Kt|P>$D2Bu8b3=s}2!h9j!OKnuC}I#hgRFoD=&2*!v>7a?9@i|bKc9o8nz zlRKLW`FVG|BFN;ce>gtTd&IwbNYDNdR=<@mRCQHZz9^t zRJC=@uWd3+T*GD_eaB8S_#`fivd^5?qE^zE7wH&qmactHtlp;kY*X5K{dSbqnt5!8 z2wo8uHiAjE~ zZfN^lR%E;1hRSKYpelyP7;pw-4No3Lm!jt5$HwqwyR6hs9gp_D_a^3P|4f+dW{_bV z913pK5$4nZ+o&})E64bZL!J~2hAFp)uurbz*bKcVt?Hi^d2l_YE3NNC?LF&%kMa97 zzZudRb~%oO041#$mM{EeV`T1$^18BR1TN!Zry$yKT?etg>B`qb1bGS|27j!seF^c9Y@dB?4EYFCubO;@r=X(lnV1VN?I4+tU}&#`ue%CmMbSJj z%~*x(3uAY_fSk?V*zcKQ@hu!7N{$dbjO_#>aI3O0D#zR$9~uV@**L)VC2b}q!}|d} zKWLpeV1Hp9`+2r1JXoL2Fh5ndoqK(FNFN@u3Y`dWh3$>ivk9I>*-r?wOoa1Q;o(^o zWk;;c-x}YTsAvCka)LE=Rd{q($>Hco9kY~S2$w9$BU?zx%c?;Qjz9J!Z!&LzWA)Y* zhLJz>+C6!kgL z%?#f%N@EYonWN5j^}Oj5|USxJZ5(MEd&x6dj$9Bpe5 z_BDG1cThw32<~hY&!n*lysOa6j!rYXXO|qal5PZd;kx5N-!rQu)3-r-Z$i>Ef-c95 z8$n~YQNu!2vu4o^S|IypXo9 zkqc=Pjbd6z=caeac0jU&CLDM{nps3`aMi=A>WEc!n7=hIiyfV#o+H#_5pdZlVF3ZM zC;4+eXZ>#!g-}Ppry5pPcBW$#((cWq+WI0}G{^PhgjK>E19;NeQO#6Wqau~z=mgK0 zk_vexwoiL1Cw=23-}rP_Og1dq+57`I*N98eRcxxexU7a8GLPwRN;VeoP7^i=5a0P-nw)4J&6rdm629|a@fnR8p(@w=P>!Xh}H$&IY6fDe01*cT!sn~ zR<)fm6(wGD=8&x1apq_TyBOPJXqh?Y%mMkP?gnQLgi~BYsK`5W5Ojs8D}Qk><`P8O z5ra!z&K#w-Ge=3o0VWj~l6x0u(4AMaywVjknyXRXt68jJw2v$RjZcPQ^ceBftKF#p z;w3k3^PE9cCN(Tx+ZKX|8-=pP0=mybFqnP*`@q+meT<6^ z6CJe9l9=FU2RUT)3qkgAuI*DsUzvT$XKApM4zJ9fE#9upPW-_eJ4p|2$Uc8?tKGKR z7?ecGNsvkx-`a;7;q>`{=dW_`wl=6vN^dBV16EP$Y`X{`}Hi` zMBZ~8isQYaJo;C1A>O1PAbJULnIr=>DA=+qT<@|gtmPvmFVLZj`n8r{Yq&DvadJ%a zy`tW5$YiV53GHq-#Obc038U$nPp7clA9Y}`v}${S*9Caiy2HBhlUjz=%@Qu$%mpvp z)xZF{9Vi0Ifj8`d!nz`$)Q@2WTPd!nYC|c7l+#{y(Me>?FrOts*tdybqsVDE8q5VE zIc*n&5;RmiKn{YyutRW^BkSkxD{rXTt5Y0>oSBq3oUx>-2q^rKqMDmM$=0VCzkLAR``5@(oo5@+U|Y_>F9pM8j7 zZzsSR&OUq0tp$GJ3A4|?lLcll`!E-+FYV`F*Ym~M7yR@8-7T^3&1iX`iItM}MrQ)7 zA^riZ%b2y?#WrU_)4VcC+yhhiVyJuNI=VRataUno75kosKoPU2TgDUmLWJY|wtWYHxoB=b*Hj zBu4`rDQ;|Z%aGcfndB&L(wxg`)PFlhg_xRJf`S#sbwIELtWOPDsu(S1sj6*!*~=rR zU9Dh8=F1R>0Ej#`thjthu1mj`#g=o0N=*d9$JQ)gcTk_sABrtMKMg0JSfr2c&Vt*U z=37Z+4iK`GS=}z(9Qd}^aW(}LW##ehDW<-q)UFeOWv`C2X?&N}$-L@Qj1_ew>t*CD z*ZSCaWpUi;iSn6g$iafI*NyB3hgW0tabuadb(n4cO=##9+28#JmuhyBOY)OtjceV@ ztFy0s=8bxORF~fD(_E6DE^91uum3j4zW!!N_kFJ@Jo%HixF`QdJL6VG3yZQaIcY3f zpH?K@wGYfQ#kV4<5&{AFBnacgf#lm2D1-&S!eR^#-SUFyy4Y5f3gR8(_-rK?7p4$X zoV*~6`|MA&HD~^(t6r;IHfi;tY`k|1k^fZ=CuZOgjoo-DlZnIt)?6ikVh$}MW>t$% z_4OPuomL)}v5xc>QYL3)79w&p7g;^vh#E2;mJnRiTynX!r8CLk^JZX2a>*0Y z6eSn+VnuXwF1e7UN|MW600MJ!@EB+gN+5EeiS`8==P>-#B$zuSi48}06dTZSja5DKg6SO73@ZUCebn;@Fl4n@jk~33i-S9wV%;|E%}RgZ@0&@DRM;`DWo z--U}Ny|s0rYMG3lqHn80j(X_5xq0xS_)y5Z3*(QhKmX(gj91OtU+DP)O4Lb+Q6#8KJIcOzF zE#nBrjr>@6V>}MbIyI&_pzX41cBHGqEJzO0J$59zsuWtXNh|5fx0^mOqg-|rw|DXs zgO}qZs)zMxG$v_yLJgm!4!2z8eS4zs>Er|Kl9B9wW>qBn6kbk-&86{r5VcgzE6!{V7eLV`X%#W0;w%V!PxGxoO)J;!Y)Hu#CQP7HB7$e;;*L`HyiMu2+2 z8QwS|MC`*r5%}y= z0D4>=e1^bsve?n%pn-Hgg`H~-b+tN=u(Tdivm|gKu|9w& zv@rF%Di^Al)@3S0kRBj1LYpJi)G{8!Ova=18i#0_f^()ruVSg^f-d!{XrpW9kdcE! z92}JH<|BWIk*9bzVI@E+iJUrbCwRM2d0FK3Q_VQm%TJRUIIGH+1gtU;Z?u8n2uX{W z>SV@(h^TQepX^c%IR327^W#l7#%mFSu%G7mI&E;!P&RBbGkxC?Qvkw_)%cZ(tGY(Ck<1;KG6d(7WaTYNId`9O5RkK5UzDOf* zTF0z1I)}SzK&AHwo=(I~8>vH_vuh`0^`s5A?i`_tsy})j2{>mAN8?tX^`K9a948(( zp%5Y`1@|%g>VW+%e06-*S32&Tb&YvpW%ZpKz--DMFlYKm$i_Z6G&mKvL>PNXZr#o( z>DpI@4M>NvvmEg`FNhjSX>AMtw7`PMPU(8xx`4x@?fxA1a`ic@E9rX9=*kApX_X^> zlE2di$?4c!13==TQgIkSqG$jlA1vi`aL~ROv%ll~RhQbOV!0b}hR}#VLrA&gZMPjN zVM3AId^noGvduCt%NkOKmR&7j$_pgzABvPWmL84s$iLM&!u~1!B*zc@z<<0$Hx%0^ zCMUfH$|5lS&l+BSl_?(yN<(R880ZPL%H$)aAn6s|44R+?F-a#t9O;_rOm8EB33w2R zd(hJ;%rS9xidj{2$GVr#ADdd%qkMX4fOnLcQj#8JZ3GR=;K~@$5fhw@kWG@Cr^Bdq z8gUDIE$xUu>x*MF&(X$AY%A#n1Dgyd07!43$3}7(ag3eFIlFLzI>c2-+~F#6A3@dN z>kn zCjx33Z><>QMBVXn{pL3EcG&~CQ;s|Zl{v~8t+aZIwBsmeoY$Umys(AEnZ;M<>xf7! zS`K=rm?IGE#!MnFG74)7r)0cvOLpv%yskxdX@Zq}R`NwugK@;%;BbFqddr=~`IGsz zbh4`DGR%5k%VHVYVks>aIxJG3iBl)7ZGdxZ6^^b`$*6B9K93}`&#qN#r)=h&u4u2G zvBhJthEgoL%(h!OTY(#LZ{qb1Uh6i$eY`qj^ViW`RSDZpsH-O! zS*?mV&uP2gE!YBA`o|O&L7uT*Q$(BQ(|jV2qLcdclr<~gJE#w5Hn(2mcGvA#Wi6hv z7SHIjGip&=Zat1?<6s6-wt`kB>vz;M=GIx62!)Pr1T)_yh-ZFYxu`i>llkXm?K5XaFKeHJ|*0~tbJYv?en^qwa=HePqUm| znD*J05a!iB^AZ=XeVY4DD|_bs1^1nm*>8Q{9ZdDVIk$}Vkz;bkVM`QoT!EA5<6-h4 z%Yuczj7h}a_VUL3{Nyd6vwx!Onvs=3K++phZaM!AJ5Ve(7Y*&R4s%?JHu|XG?ks#b z>%CtFs>XO2mgI1Yn`lX1A}NJREi$ETWlw?gJaGe9s}m~aQ<;Uy*Lu8FvSSp0yf74L zgWpPE@pk*}T_s}t4QP%o;WU+epZ!AM?|2331jWD`H8D0|sSX5)x`~;~?zNe0Zm9o! zC5JBAL0-8l2VDYW8Ex@;Af}^mkXcq!IvWb~I*SlGb2cTh@0(jv?ep308Xpo9B4gQjcta@_5NtT@=&YZ3^5wIv)t)I zAP-lZv)2VTWB+#N(3gkO%4TybxOt?visKiqirm`y)uh47dN*?pt}~B=6m>43bMo9R zX%n0NFnWdBEY6pQpsFdSp!J)z(+LK#4t4qj-9CneB4+)q0(&HCm7Bbd>5eL=2L1o_*|&HOt57u1<4z z>*QlHpmAOY1IO<^hNhhM>EL6z$~qHje(-UF(Rv6!=w?4a;4^Onsf-%^_>yvnBsRFf zj+oVuVi#1#e4OyziL4?%cknUALUPsHd8{HnwiL|T-1Pxe(qIJniW#9x;o#$o=nltW zKBw;JU{kEJ>W-aAU{+4+ji^jVx8=QIJ3<6ldtFygoSm9!ioNvG_JpnC=hPF1u!Y4~ zJ@E&7(ia`g$;h}PJv)7&9Q%kG8Gqz)XJ`svEYu-l%5hT>kyb7tk@o3ycZhT&@$Jzp zY8((XLbswsPSwS}Ntb(cPB46y@Zy1Pha)qoV9|CEv(7p8-`bqyIUI!d?)==?0njza;9^HHC(p?+ z`aQ$gh)ai!g$_AxcPWtE!XGwlx$)S8Kv3Fp9$N zh+`Xy<%|+ZFU7XSA~cKbwtFa+>)j&TTg6wnFb<5Y)!$Vydu=pU1e$SxghC@BHXT^T z*<{NhSB#G!m!~Zwj9+6BR5-tis}pU2-RZ{ckhrt59bk1W5tMB4|Muy|zOstK{}3!_ zn$@z-=s?hOoNgRzy~q#A?YDJ8E0(!V&OcxmG+4K>|=+Sw=PE67KaVG-Lpl%3S zTtP(6CytvK67I-nOg?IJPM|w9YHaKI3FJ>Tp3z2E<*vaQBRBU9GOnjSFoUQ$?*a3` zfo!{j^MK4QlM7Y%lL!c7KZzU{gnc4s&OeEqWKz>;_4~E9&yNt;*?k#678*s5}fFi zDMoHi6?sJ=F#SOc{osqdJ!;jNBg6pT6gp>Q+-IVu@VVL=a;KH+Xre*n7b6@KCYT+~ z67#Q~HbzW3ZA+-RjFZaCnP6a514YIU15YihS`bSqjyQ-{0v#J%@&PwT0+1spaN+WO z$Bj@{1fvJ&gNo$(QITV@c08ZKndVQpmv3>^dchedwO(8>Dslt1lck4o(8Yg%GDJLC z8K!_MLoi^xx;1bky%^|t7)p3yXAV|K6Y|fhn4ddi%Z6t~js>-Zuw5#=XRZo2slxk; z72;W)jl!QK2ID#@-x@yN8qNe+V>ZfD@(HV`yc~Oy6L5JY3XViP54N)`RpmQbk^iEgFCb`FS94gwE9SV!cN|d7yX43Hf@%+9UI$4kwlWyQ1yJ7M zl-zCL?Ne)bCC&?;RS$T$DH?eD^}&A5K4jP4C{l{pgdU;_LQ}~{gd;g6_HdOS9&K@i z4WylXiel!;SQucV0eLcllk6ym=&9(DF!?wRtE<9eU0t=ZI7OTgEEJn$@B6r#K2C7y zZ!T4$r8Y=YdX5g-25H)ZH040Db{2b(PIWbdi%lgvJp-ia9HcX|%Obp-^B|oDq_b)o z_eOyvESbNGCqR%g9NobYqngmtL+)~Dy^}i@eFhzlQqfVP8g!AYeeqD?i$~oTk5KHa zeZkgsB|Bj7dLpOZLmr?50CZ3Q;egr@M9kDGSDU1q+BzcHP6}ShZAn)$94^_C{%$!x zk9rhdCf!@{2yoq!+=+4PAwGl?%Zyi={qO5j=Xq+}7cR6e zkTX8-(LHa$fds!D)OdLmB_rNZrIa<+1wz@H#kXmSh*u4A0dV=}BkP(UiA)B`=S=oNKCl_> zuvxNrs#YDZC_%`T89)(@G?`4?Z(?|eoMPVSg`7n3Z*w;Qtb^^3>W(5uxM~h!zxZ^B z5-Ao5RK9hw3!rm-5fRhjwihvjVNaNEIo=djA%{yQ2GJMP$_UUODaL-thLX=L*o=y! zK&UhVkqtWmWG+GyT^I30GMId;oSTn3#G06?41YXHckZ3ZC5Q_ZR*G=Tke;hd2@HYa z?22^(e!$=ce>sl;o-*x{&m+#gS2gjO6kZbB3ZeGZ3B zkl0-43;qKV-QuKG9p3vnBMsJ1NPVgj)@5Uoy@t>%2nowV(5h>|wPuIi%e6`Pwp_Om zypb5m#A04A78$}CDpvPJ>qAP03GhdNVkE=Q`q}@~(>uG*vZn(B;sS{0`o-)<=+-Yl zj&%^;fT$WP5JO}x!Kg^d6Q%461b?ZzO}F*oE~IwvO5Pu}2xQ(9YN;;W&U2n_M9OC> z!D8rTZt~GN#@r4;n5)8yw74ydy~5`}YFP`IepY67K|7m9>aR66|>)FJQ9hV>)b>c~_GRFz-9ozX*DpQ5!5Z%J5oJo;#Ch1X)>&WyF}n zp%nM%Bn;}e(z73f-w1M@SxMNS2ds0zNE}|FB6*61BjuDBR7u-ro6#W2y^Y9(aErdt zb;Q-lcFDpun2M{xeV#Qy6v8PMQ`%N{V9izRtn-17#)MrHPe!?oE)}L&q1y;+k(ohH zQw86TSYMSTg-Ak>d_#0M6i1FgmL?SV-NBBIatBM41o9q%VA<9Q+L#utwq(|gK&)aJf zoI@VA?+LdFd6?fzgeIp~qSo&*vjq$e4h#uq2f;)wJHp?dWIO$H(;rI4JJ$%ReNHA; z+Cvek68!P;bVgI(Foer{O)DsW_svz)4quaCUjs+RGR=3i_*K-amBCs?Xw($^R+I2T z(`%BymNk00iuIM;bQMv@Fj5{lO;sX3B7I%L1@66)6|qk~mVd_@=RRy!d<6`c(4q>Y z4p?L&EFY{4GL&ZZV??nA@USm!Y;J}K){-4zj!gL=`#BoR;XuKL6$eP#s~e3?GXx=I zHc39#r(q@(K@VzLpA`8~ljkBFA1wkuJJ2NoKxpF$$Zru!aM0a>KrNd2;E<@XrdX3! zCE2-@;tTM|eF5MSq;u?S8M{OlBQ;8I@>!qU0-;TLP>oHL^-Z ztx$46jUy`~JyP8DQ9e`j;DkOOxG@I`2Ym;U=;kmJg~&?4k-@g6DG(({UUc?g)LP`C z!XIxYm`4=#G&;KeajG|ygv^pGN>5WJeq1L#3~Wu4t#Y%3u46GWX(*T^7!`?UW-sCs zt7uYrqs4w~z!<`sw2|!8v0O71I&O#_-~uQ$=2!FxWq5>RXDvHPIhzspP{2(m?Df1b zd_818SsrwPJ>w11new%4lvVfRm6kJ48oE^ZD58djK}gNMu$}PRFjYIbF(75mHyer1d_kVxNbW}OE#N!TzWnJwlyCvMz8N;h2yemv*+{} zx&rK1kKH~b4UsWbZ)ryZTb0OtRwar{UZs=&bazx$BCXX-T3ewi z+cKc~oFmRsBO242LJD%^hV_#UFpR^|z0ze|cSDD1MrSG%p};N>!PW%`rZyqRb)|>s za)>V^Th$dB5#_4q2&6ff*dq?QJM;$&m4dO+d@VZ+Is9`!c=zIJFVAvN4a>BO(;lAc zQcut`B(6Zn3Zb7t6}sKVoMf^Gv@9i9lTJZJx;hFyColkRR5-a9)^W~Q$GcuytOIZ^ z9qSMtCrzYX32Vv4SC;NtE9jPCon_mZoJx&z`;ZJ6&&?8fAW{YuQ#+X^he#WujyLBM-vi9h~_1y33)_I;|J zC?y}1ifX2Nm6H}UoP45B)!bz{ekgS^LagS+ot6s++Z10QYYix;9_2P!IYdIqTIK9o z62oERR!)w4=@#E@yn)$XmH~#p62;15aLPN%*W;o8oPzrq3g$D*oy}lN-@*L19-y`f za!dvpLVEJ0B}$!B&L%7oL&f2HFgy4ohu|kdmjc0U!HY|~f11t~d*rh^Avr=1W0BAc z&a^y9Gh-Cr7=q0#Xd*TmM%_ykt}pLO*c-bk_<+*6q!h4=kVo9>&l9bYZdaYm7dGs# z%zpcu4yxZCK%}$;OJg&JU8=hv6cs$;fRZ_-Mgjz7KIoU6f~1msr#~M?n$aj2asnwv z3Jjs;haCer+Fy7DI&3hU8l>7mGBY1sTvKc~Cu) zfcxck@+7Eh8l`~wCIfRBD6=!_2Ai^f;u{bUmjbt;#FOo@ql9uoAVa3v(cgb>0zRuU z7`wb_*{|P;BE*KV*^;yeLa zn$O=|&_l0s64CHhmYERwA@7y;adzLfNRSE6Ofbynzr(R;!nzEG~{CkU@}Pe z*%GsE=m3yT%F>{3Ddt`NF4+&~*XG-)lEuW4iiWw<`{O6o0(MH6~ zsx+i|EY70zYa)5PBND>%-1=2zcIPc7B&%5NU`K&XJ0y=$q3 zU54c=`G&Q$x>bc;SQXbRpJPQU4S5Q+N3&)m`KA@R*%ca&*1{ibqq3g8>lA4%8_5T$ zZB=-yuLW*$ht%culyhr0vr!5XU$Gg1Z<^%094NIR`E+LY?#iWH*FD=aMZQRWj4Cdz zlo{vnp{B_&;shRP*4krBK524WB||SoNU_f(q09@1iAz{=?CVy*_xw_A`l*>($xfmU zT7`v%rz79+!k_Y1MqpJ4Qw=avu>f(8L#MVTPnw|{8>sE^L6svJq1m{Fx`DHGx+IWr z%=6#`LM0zdx}jLi3d{(S86j-KT$Cgd<1cO-wQP~ZB2q+17D91VIsX}VX4_&eIAXt* zQIaW{dhUVPqlZkTqP9C^JN5Oq_@378NCQ5q(>(0je}q*n@L!3*p~BO$h$AqZQs2J8 zHo_VGI146)-&2t#kY)hSMOq_y1V#+`V2Pdz?Iayy&Q%*vhHVYaO<&;B^ks^fi57s7 zb3>O?457M@=@tg?e|WByOenF~WHnl$;9K$my0 z&k-mWO)|G#nZ1jPatHpbt@M$JmpvP`3VXv+Es1Ga8cSdrX1$1+crqd?A9Ktc)#A9E zUzY4^n^<=cVRa%`Zk~xu>%QD-weCCNj(hbMTa`qncvZL<`I(0+BHJI6Ky*Dtz)z|| zH(Fz~srDU@NVf4Ei@KavFtJKTg)ZD$pP3mJOay+pr-8Jk@rGJqLcikB#&>w9x>a6v zXaf;Rfv@~3537_Y3W>;fnL&_B?U_CMUtl{Io7B|u5y?{e1^UhI5-?s8qx1~9%QBjo z3@<8%Z8JPELa=b1r*0(eyoP0Y_BY4euzbCSQdUyd+tci&RYgkbFktgb8Wy8$Y`-Tt zQvvUIJmR|vE={$e7<$Iapn^Hh2hanEimD(U(j>bJ;B$4142`QfL#8wW~+z{Q*1>(4e?qH>K7&c@(yjzMv zAsehGI|emCn}lx|D+@s6=K%haQZqT8M5KE8l`gW2Q#0axzQ9gocoYj^ zxh023+OI7nsRtqh7Qo*eg!j9hFy7*$g)@P8F>gtDSbWRubgBMo5=^a=WSp4M$^P?n zk`D~|^Yj!iUG+4XyQfsHo|1E3U7ww*u+URYCO6yIaJ?OzF~wx~x1J~OY&=>1`XoZRhkvR&zhUe}VD3YcOBdIms9ENdMD2U@MtgjkC=lO5UrQ(v`VtTF+5?<=KuGk2lYWJ6ISc5uoG=5MZF~h)6@MJPkOB05kM}fd)hn!6Xh* zKp{$^WE>*EnTW*T_xt~=&b_x=Eq?_Mat5`1&%Ni=soGVw_pbd_y9%wWCesgVggFvx z885-tYsm+Ob5~=bScou{vah`D?j(EHxA|C|d~tOLb9S^vRoj1BF0l@Z&nadAJ{Q@J z4NR5kk$S06EEWqT>O$)w2)dk&hRj~&JF;K@cKNoo4E?7Iq>ZJ7v&vMG-5@*c-WON} z{Fl9?VjA#ta5^VsXj-DCF(7cK^?`$iRFLLMR|T#V2n8FX1PE~@6N-||AkOd!;{Lr- zK{gl(+PS{l931;Y+<)g$Z^p&SsxGH3ztpdZt{Qq>K}K!zo3>B*0LX z%m9x;3s3@4$bSQjIcP#}gdtt7WeRgbhNOkwt)-%|Ioc|0orZxCArYzPXA@L;K z%jzvl5WUs1ts%CY<*=85d->ws-q_R2yr0Tq9Abo*#lI5UVo@AAzsME*!1#W=lxL=$ zBFF|pWX%X#G+#w{<&el8klBnQu}INFQ4m8c((<@jBZP7jL?Z3jlHwzAEIEbHxc?H80L&Ty5V| z$sHL7uxN+q;U233ubg`r14SVkvR8(H_#i}}*m@3a`eAjjbk7GP=T}}oOcFO_|4Bu7 zh!MvlhA6TRMGWj_T&l_6tl<%%wEOHg6#I&`u9u#&4r1n#LVFI%wTA2J?-_>6YO_tF zJVhs+O9;oc8EsT?rQNb(R+JqQK+tf#k`U$VV+yShZ&pMg(I|fJQW_DXs!4+zc=M{o zZ#oFdN^<^!?7!*@J-N2q_SS@+c2`J4lXynb_CVSIW+SIE@XSdn&+Rl z02zzp&0Ja%?rg~fqps&$n5P>li2Z|4aWQM4^32fNJ;gpbSG+cgoJGfs=d8VEls_Qb zN&7b{2+`g^C_D%bEi(|=FR>QJ;gK#jg~5dtj3{xhJjMu&+zt~@dL>ETZajdX`G&mT zB!?8}fSoLy2O%(`SH2q2f#ny5_fJ9xtfD=1$Qz#&RtXQdOmt-TS+BZbVohz5Zp)(& zr*B=(99!oxRB1EjXh>^KIk5 zY})}l{PPxo1ubsog7$wCmw?4LQcO5eHBMBu%0zG7>2ZQVjW|(_IPtM^kbRv~mBQfhsu8Z?MiLx}d=G?G6=yiq=6@~VZZH>v8) zJSOm0LPCV_V)|uYN@WnKts80WX(q>L2DOkbKG=bRoc8Fv{Y?;qaXuDtKHMe^f6ct`C)=K4Ctzz@x>a9`-q&Ot`H{Bq9 z*7xPllnRuc{44>AAzQMFW+w)JAJ2a%?DlB>!{LyAdH11(FYg@uy*>Y-muK=HR-^p# z!QL$cnH?DXy)XZvmuK@I*0=ofqrF?<<->!&=kp(Wc`pCqVVPsT)X!d*l{i^7<~L-Z zW}L{=98}E<$_J&suzHb_n3c&B_kg|zp@Tfihwf8#JXBhJBbMs z2tX$ZqYOv|%os%i{%Fs@5syt{y(_#NC052p441*aC}EU!JN!UU%r!$qPYD%@jS9*k zLf(WEIw0As?m)D%og&_8*0=DUr8`Lj`f5|arF_8%TTQ+#1+ffmAhc~6R1kZur4+UF z2{u|mq_y+9WydrG3&rUbL|m)u9sSuVVZWkI?mOfsH6 zmzLYF=&AWZXAdZ%IhM*UQ+(-?yOMWdP!MieX%z;-7mk%e#ue24ur~E{4>TF|mNAmW z$bQm6&7(BMs*sl$;<+`G*Atvf4ig>()RYHSjs}g6L~O*Upb)O^t93p&+)IUzKqQpG zt){#{6i`iUXThr>0?TV%|J8N!%H-f|;)V0%ob+mD9VPf(xgp~M7f`64oLDVw4MT)A z0@hB~7nTg|pS*Bbg!qBUyfOLZ2sm|l!kMGvL!x93WM z`!dRy50MfXj_`^_akP-+@7CnJRublTw=q&F;Fgn^nJbS-y*d%WKxjL-Rm)tGUQMl# z5Vd+s2m~p?w10L*uv5tnoIUiw+Y`(8!MbFV8}Vtt<#!ZaGSQRJQ|dM2ClRqM+&?U{ z4vosOCKZ4WhE!sI%t5@Vt8_CUTGTPES=jeoXHpQ6p)Yk;-Mlz*r-2?~6bL3Qb16yG z9>HI2m0H0DyK}L*gaMMP24jVAFsMOw)Cs6k%qp$RXR-|gx$r9N>Od^OjTuZTK(1|8 zOq%)$y6c;|3{T|y$4NVqd?V+ANIF}(NwCw(Q;A6eb|VjzH;r<%i8m&9 zhtfpGS|GU7jF&tLchF(TMvSf|Mz>vUGW~8F7>FZ~e}l$#rt3G5t`!l?6mum$3C0HR z2#$s5Nm-J`*|AUwo|L8RV6-GVm{}a;DJu00VQ*V7KGDu%=)lG65jaJpY;XSNz+l|G za4@QN+rrfuk{dbPW%wzE0ej9V!w}^@hT-~DJBPpNQE>0WXz2I{TP4`;ihPV(h+O3; zEr$A9tvaj-fLd3SmNrS@&44j$lDbLXPMHiC$}OC;(BwEbHJ#%eY^0BKn>|})IN2hc zqZ^o;2Jnq}mFw%Y_{NwI;9HpTfNw48+N2@=RvhBelyt18!Z_YI&WiZ#unh==ZHzY9 zrosBwVr;{F@;$*eTIyk&mQNqN6}Pa2FyyD18maVQc{PL7&|cLAc~dIm`xXTkOfcnG zvv1j&O(IKM0FV_pF8%|qi(t#nt-_Lnb3eSKy)fBjEsso!p0ZX)KOM^h4M_yZ!O>E( z)4^8d_Q`5R$6c0wv2-VHiv!hYHdI`wq&2u)7g!wypDPTxRqlX9iUV@ZBgjMK5)?Yc zeB}vOSS}Ji3)3BZdnySZ+auebRpW!ySWDg`)*jaVgSx9TF?2V1%S7_|iH?kO7-x3! zNW6Wv;!t4K@sLVCN@W$&vi zay8a5ulqXUrVv)VkYBjJ-=V{Lw0Y~xiW2q^ zPB*dMIOrH5tXEdcdJ_!ixx~~-6;IwOeBn+IlXd4XcSQb%LJ}S~f4sPl7E}&i21u;? z28lz5^5|imol{HR)`#g)o}F8MZ2;(219TMP#P--LX*D{6bfZe&oV-n~!*%#^t;!ap zcO-wPyOkbHh`oj+2Wm<{2_)Xyhv(~f1a_A1M+G9v$-O+)LCQO+=7gRHIqrzQ28;O& z&vl##pFS-qJIYBzVKc)6)m%|Q=G-UbNAL-X3J6<`ipsld!7!>#04)TmR$T5`MfHPA zML0|_d)TYciZCdO1nO!hUr^lXt653&C(2gDthFRMw37$oR%+Hu&T_}~#{(#}B_a(^ zIlNoxXFT&d0%n4|!blJYOWT^`{SvW>+}{W2Dg zT9V%20+b>y5VI#`F7B7vEU64-1(Ph&6(j=TN<(Q`Lutugo?()fO`a-}Im5Aq_t`GK zq3{zjS0d1ccVP~^dljV#ljf5cLB9)9^xwT!?`AsmhxdPa7!t|*d>yY~IAuVPu~`48 z)~ODsU|~_J%I?wwje+gpvH=eTdo8`Xftqn1DQrW#!$Mg(Pg`NbABIP{FLmO$Kw@NHaI+P|WGF8XuFlUjf(2T%$9DI>18uT)MgS05|jos7^G>oxZ zV2UN-!GERO+0+a$H|3?Z-V#nBpPZ^^bX#5+7gc{tUjAl*-pcmEzwYD;8@x4KUaMg} zpZtR@iGVcM(vB1vrD<-Ppko4qWM+bpo{`mMZEkApm><=P%K>U=w%=P0+MM=A_0&1$ zI%V}(2lo)JT9Xs{bc@Ls$61#H#|}CZPdBLu_#U6;QA;6|H2B@L3#OQ$ut?&(uJ0Z< z+d_Y{K!%JgPrk+BtKd8oE0-oI7v-z>R?h!lvJ&S)p9SiBfp-O+}&J_XEozP9GCOx^kj)#D;nzLACONX zh3(U8+}+6%!$wMf*V%&)`1^*!E_q_S*U~vxs9O$JX_yZnD1HpziA1V{a>J zg}8w4|K2a)Z~7iA;P-w%7VzIW0~hcEFNg*Ft%H%gJ!f}C=|9=0=6KYi+?$eVAaHi( zp@_!3NID7$vdlViaiTujJ+8%DQUzwR?ZYr6hrKrv^qzZ=fL`8{wCj?~g# zNJW7!Sq9-p`*Fk~(NQr3vK}eyyQW}lN!a99aqy}*H%dl`{6-Ii6|iXBeqmu9FtuK3 zO#v{2EC_FxV_vMiATTG*5+rY8AbOtTZca+Ut1>)iPTbAO|ApgjzWy}cxEE5~4Slgc zuKfawyZKNt*vwP=;}EMb?V)-VAd*tH^DShyI5*jtZ|*7LUOPFG|H{j<$pgNVeOQI- z1BIn(ELC2+Byev%tn#Y{%frJ8WLH>RzjjUF9{V$uC(XuL^5!XzdVbLTE3?0RgAckt zqRVLZSuXv9?xh+L)w4A@SZZIe*gpVMJ+TVIDUizUpsw58KnR_`Meo47veLOBlh1*% z>27l!qhtMweHWx)j$cdO4MM;yf@B3xqU6bL`w9ftHpY-jr!5_Q@LBa5gmiW`bmcX0@=CgD>(=gBX$7S>U6~<~t)1p=?fHhGv(%i`A)a`8q)JMwO4SKj z8dnT!c5xy9urAY`vqK~4D!gW@e-*E$9S-6tGUL0rMIu z>OgH)4wkS{oN_u6+6B;3Tto%+R2tVRaw{`?Yq@mSrYo=DfMafPf_ciX>CReb>+*YD z%--6RohIqo0!o?674HL(_NXnqZ@AM@ut-MNK9Ea+$Hp$fAK^x6b?7yHdvup=qbn7E&$8Dr?F8rJKP_a)3)}s?L;t`O+!A=8e*2Pv}RAq1?72R zxnkn_9ZMAyE#n0GtL>~tCF{l znSXaz|2$!aSYn6iDTq&6oOIQd-O5^OTn+QMviqX7+J~LK=(f%pYF)iJdh8_t9;F#4`C zgw&>WZ@I3OIJ8i1mbg$opA?9cDBq9r!$uf_=kAUS>>0DNHh^!yJP#g zGOrU6KX)$!{$=S{C&7eF4_9cI(s7&i#EDmMZ;U#~#5FB%G~Hsqq>zzDANO^CIuaXu zLcf9OBo4x~wH3D+bqD<5mkv!IVuX6FsNo8QhP&0ykczF&epxGmL5L4o@KioU>b{Np zYZ&18M5V$&SJZsE0s5jh6_BeHW;)CduXEBvr>;@dk^(X^#uzuDyPCO-Kz=|o(o2dM zr)gh4P5V4epY}9qFl*Y>f|hbMtFJLR&WPP|*k@AmUSVxf#VnCwd|(Q#{p2ar3KM5U zi`nora}q!wQNFlkz&hdiVD*gXAWRCV5QmWWsuYbQInFDLB4Y>_f04|8@v)|oM{*8m z{Q3{VJg?D1hH+f)!9AG1RzMYg%q-Kb9m}Tid1(%NZH*Bh!b?Joak|n+3_5GXpzli} z20{vm5kNCoN@u0TLAUyY(3ud281hsNWu>qV2w^R&*fd1dHO{c37~+)v zD&ial4{AhJwDU7Tz}5rwNYpTGx_oe1iaav79 zGci&#*uYKeLCUfmhS0PotF_sO!XZyw+E}q}?4N8W6@g>%tO7huNEQGCTWEa*l zVZ}&Rk|YW-fF{a5irbG!)0mYyt>;OO)Y~8`5aa;+r~({|n4|$U?!F<{h~UnK*aEnn z))GA=RvhH#t-OzwbQCLjTGt9OAmOzfza;R1L0!dYOjifG2_@t=+EOoq9@D}(cW9OJ z-=X*j5IWwL#v$Y%=rY5>u|CSkk9rjPi+abpqwiIC3@FuUo0BL#D|KN9voVS{G?l{| z8&;*jrV*+N)JO$DEKQ{i-y@_$>jmR06^239b7gy=iFW!!={kvAXMxZhKIEwP7G=48 zH8m$GZf!U-Lo9DdCH>LMx!Xy*dS@8EmlC29U*m3g1uZ(h!cc=C1!+*|_wY4ZEc8N0zWzAa-R0cS zP%KYZhY_LUfd|GBvAj07j4st#v87?aafC-e$=7=FIYUGsHL_iX&Jbi#i-o+TgPJq| z`#T#1c#W>G3PR~=?Mh_3tpI~+Hr;s%#0`+0)ZUg1C+vXC0*;$RdssgmE?? zg&_m?csX6I6|G++A{AiGE!zq$$vWC4Q!nFTCeNDdL6J-w$3x@Umkq91Yxz#&4GDs% zpIfD`9(v)FaEBWPsk=UoiJesq6SDaRLB;Bm#ao0zMxApEux#{fP#77!8uxb4zTV^%pZ#KjJy9GIAP! z)u@yy%sX=;kSNc$hDk%s2?yKhxj`?XXkLtkwL%4O08uqMk>*q@E|s(;Mh^@`Tbt_9 z8J0uCAT7Vzong4ceyexeG^Ig&r50ifx54LhKF*A;OytSXyOZwM%$TOrz0%o&F36z?LetvnQ6HjpcWT^1EGfc1+VB#gx!Tvq=#a|1UQeI7bUo zQ%lUzpk(&#E9hI;Lg32BqGW$u*Pi7kF86)I5OqAJ?Q?z0(Aj~32O4UUS2gKw*vqoa zT$vM#mqB2lCUG2PSMS!N(hft3x~At;2B`sifGFjtmDA(?3IfkFh6NFuP1?zKqa%7!;0semgm zC#21+06v7*B~kD?8=LjI5+idsQfA+_-y`FzWI>V*@d%GUkLhch%+p#V)TSy-M>`|E z7L^=FEovP>@=+0J3Fc_UiAXSca=>d~V=&Vi3a@N&`g#TIcjW5Zhf!HvKvlU9JV!Oi zdknp(#|F7*%?#&yXy41S-z_j0%b;Y8K=i2C?!?#0(x}&m!s}(r-}TIju;ETZn?qSe z{2>llIrP{g#V`uw8X%JcFdahOF6Dhgp@{E8}8b+iT))gNT;4Z2ubvQK9QW4ZWriezOWMwp90@V;B ziH(B-?M^GaGIE6^MME_*0F{QFo_I*rMIeb#T!HL1dd=;^<-9xqR0~Ts(DqZy!gdL^FC^und!Iy|xAfS;E6;@^rO^r!m9G1uHNmn?> z^exjUWYzFGqdXp(PO@r54A3&Gh!oiAB!q%EG_x45d9%Xc%f!&f+8nXDxqB_t0zjQ#|)wHGNus=8P{=c zjp}`7`3e@eWxUVV{C&nqIN|vgvP7DLW&y4xnS@)*^ggSBKyCx_3WQLj2`o{dGV?GO zA+2mt#-<+90huP7229?l9YQ*hcL2AQjt>l2j-N(~@qlzlbytE!2}s1(Ya%H`x)CV| z7HlAC63R!epn(#vh~?>M#OM$`Vx;37s>pA|PFm^E)%uN{9-F}++p+0C2pUWunpZ%& z$WF$A7Bwmi3COKk7q0^8Fd{q+JEAfD=fhHfp)hq>xx7`^FIJ>$_Kd^=d z_FVsuIsEw2DHtjY7XmSuET2Dlv22Eqfc!=`qJTRL@;}r8>VNxH>F*veK`Yg5v`u{TE0M=jYpA0CQ+s*U!ZA62moBdN#QiQJ~#~<9}lmy zG7Gh#OC|@h(VBv$6!0?=KCf$m1w6)C>pKf{lCJ`mXp*qLS3vs~D|B21cu%9K$z>J5 zyaS3$qJbbM!s`==)U*y=s`VQ9I08Fx&O}q<1g|p}tFt)Z#(6lzgEeXeQU7WQD_{wqIhJ^? z%^qZN61$nmu|*=w81`UTTY$24r4}fYZ!3SFRs^4?foTDfmfnxkKraQHMIP2A^>T$7 zN*u$uO00;h&rmw9Ua{h&XLr_wVfJP+Pn2n?C!*qVV62A0n6Z3ij^qj~lmd;$!itXg zqJ}krqx4WvP6Q~8@dBSANSiF zU-Pkn&8u=@xcVY)z9!U{@(g;OH(^tc*`!V5AaVQbKkVP_B%hxoW<4Z^mw@EA9`Sr72y z_m@$E*7J9ya&!6b)JR}_Jt*l+Wh;WSFV~T@d-)cMzpq~|9 z(FgR)TDQ}s$Wp{1GI#falNpJ!V?Ctwh2n~xBQ)0O{hd9f*2|eP>Aj^XLBkgn!%l$^ zN33oozoLDc#B%M<6X@g>u2=ZzI@xKotsd=DVYS}4S|bYw?2esWK9C3vL}nCX{SEhd z6e14XsZ*GQm@;K?_M8yE9|}^h;Q)%M82GkC@*9GpmF1)?bD)A#F&yNFx}y3t00e~=eT+f9RDA#e(D2BbUVZ^j`KoQt+hpZyWVMiluFo9?1Kbq(xX8ysDT5~w3949O_1Lq-INUP zA>9-yq7+Q>=L&qQl@M{1aH`SIcm_NnuN5YI1@C)vn=^jWR>+eBovOGf?WsBsjoj6U3)5A~2Gwywy(V zf+-+wC=zKo368hzG9il8@gWiG*i!wzFTL+eWIqFeMb6l(B(NDPX^4%(F%5S?^TF;^ zCGG}p@2N~{-+uzyeufYbaJ(Q1==;tA-*ARZAaML(0-wtgcF!3yfxz*D34AUS`1&(s z0)gWP6Zl*vaN8L&fxz*D34AUSc=s7Hfxz*D34AUS__{M>0)gWP6Zl*v@R!e!2?UNG zOyF~wz?m~-0)gWP6Zl*v@X6&e@O=L*VZiZ&34AUS_}w#P0)gWNIe{O4rncy3X^EEM z^_dx?zw_*8(3RUB&6WVio^$BQ>Fv?wXKIfIl&79VA_@kwZkJ_`R+5eaPKfWPJ$kk6 zq**b0=ZD#IIbX=W@S$Jz2~-DUsCJfiWyma(Y$MOZ$$6G?VaO`pttul*NT}hx8l-u} z)=o~8ib6-pIwC(%{^B?<5@}dgm|s9@i}y{a%Ub+VyiZ zg7N~Dl_|tWBWxcQ6qgQ~^Yx0%`B(7@Ien(&5-2MLO*?EH)ni-%!8a509zUPHnMG-j z!&#>aon=^(zt%SEr&yC(!8=3D&I0dDivN-rLmZ)}rv#_5B=aa4dhF}SGafJY%mgS* z3t@xw5($fm9T=d6kbUxLXaVP`d^1@5uj3*r{@(j$-dqZ~mN*tgi5kE#lIjqfVTT?4 znuk<1fJ4fUhAYi@EWbrOkcZJ3UzFN~R6#3AEVfePN}!#H0(_T)qhKGAVq$U5?eA?w zoV43Qvxx_0Yx0RT^1(q5csYRJ>lHPP&qEFz=b%;7sngHaUANO=tNez-IZL(Yss`Fi z2ii-wCZAM$ybXGQ(HAe?MD1Z$9w6_c=8Q=YFWFx=)r#&0^Q?ZQ6ppo=>K&`%w+ESk z>nSNGPtGEscpbO^Gf*O3hH6706~WuLB{D6+H%d6SyiW?RVhOx0>L^q;_Du7hGEDy* zd%(%UQ;_Sg5d5}KwY*Q|>;h5x4{hC0AR!Uer)2IrPn5l}(7CAaa=w9Aa1`ZN+eGzh zdOaH;nc^elsD^D{Shy&(MFV^xKsLnidt*w8IiLz)V8a8kRe*mHkSft-^@?uw!h#qN zmy6QI&!rC6FA3}(tVLFX_zvj8o{67h+KXBBj%iBMCovnJrc&D9-W`vN$`A2zvqFw| zoQyfG`1IiT!bdf}^c*iOCE=MKTMet0ByAbP9nJAPjw?D>`_|FsV ze~A5J>qnnIkaE?`(+*3v90o37H+aMAlO6MN&g&1#VoLI(+I6aAp-6{#yaK;m2EL=roP}OG{P+!WSEV?>lh3!iipfNYKQV`dCkdU!&x?*5UM2CcW_UMJe3SAIVLB0ndu;r$7{O- z$Aa}5yq1ET_syp=#`$2dknL7TAKl~}42IJ^D`jk-8E}&`Q)HvPffxt|;rtRQw&W(i z)qN`6+y^ee2}}VvproXMQ?JOMrpvAP{!7tW-a8m<$-K1ok`vV@XZccjX$e9Z$P}7q z+0@3}rDlS?juPHdL(Or2y@5H+OgQP;vz-YJ1N+P8b0(my=ROk#?tkV?z}<6NngkON zck9Vl*=6bMjrkK&ev2{sB<7emT|9d+Dg7)w&l9*gOyH)coj}Szds5;A(Z6~=Cs0H3 zoJq;R{m-00vr|MD)Rb?KIvm)Q;gGeloF6FL&F=kc4&|<8ySV6_-_Zg~(_A6@&wul} zB)R|W_K1FuWncTPyZy6wJn((La*tknI~To{^#rpJLKH*5pnFyq%Thaw$=|eD{a6x7 z@(!D4DJN^6mP=>BcSC1q-#i%F{pFr6&!Vk5&EWI6BXU8Dv4#*I&dy-T5Gzr{kq$Xp z$2YHE<>W7f4Fwo0(MOXxV^tn+$+J3fIHax?hXPXa+9-)P@kir6ahT|$A=sv)i->g+ z+e}qR2Smc-l~$C{OEt)Uy*(FliNJ?#*EJuvXA|4 z`29TROZ%E8?##-U_O$3Y!a5icy+r=f>e|Ap8#q1xE2eUQV5s8Hm<992_ zN4@jxeA+?K9Yq_+VTvmRZN-T|XaY-x`aTquki+Cj_A19mB;J8?d{ly-%(J7}ygz@A zL@1TBp3IGOgI2Aby9ggqN6gFVUZKk+ftznj4X&!zJi#_Izim}3>y0o zdqUY7`(p(ipz5LDBbbz;w4UuD3F2cMWkI^&dzlBF`_~Pb6N6-|ZPu;5juT~A4K~7D zQ37-}&wI(|PL-i45GFJ^T02F+_QTz@Ex8`+g~sOBmQgothX`6$38w9Qq-mbQs(0y1 zm}!^>71BKu)-c35P0 zu5_yjyKmU3^%R>Hr_V<#LYr#C^ zYp3z1#XQNn8`sg4nev*~li7MliGR_0Awe>mBi85l*@h9DC-ZnMQ_gXhU&#-&eNvX! z((wR;h*AGJF(GjsCE0!gSBK5~&KpZv!s3)ed;zLhrbPusI$2SxjI zWwjW$E-l1=|9jf(e_x`O-oyVW(x8D*JIOYZ|H3-^lCsDQG-)u4ir(cwh;Tn)9p}Cd zLnB#FZdFA$B{3tsXR3X4aT{<6a;jDE>E;)}QJVE$T*$+tq&S_~gGp|iY zt{pC&mA#QK|M%W$Oo-t&bn)bq>?_Q>3(oxewc;aGP@l%!u{C?$YkyJz0cra_hE08- zKR`UtcM3Ft_>$t?*D^fiX(|K?V7Tq_%hUyc4Der4y89O|Dc<=}CFBlcr5zQeAkSdp z&GQtSQjQ7pG1HS1+vtCPpBs=_UuP9@BmnzeP{=p2>!c=}L1FKg4IJVzmGvnDY$v5` z%C@z5)Y4x^D3_FvPe_Tu8d3g&Ba+#t*ft@Z(llWva z)$?Q%)f;`sNdKjGgyOufl6s!%V={R^q@ZuUw>tT*D|c^rBOZ1jW5$TihARbk0q}(b z5oE0BABey&A43ohMDWmYvOvfI&Zqmxfe1OHd7NiD5Wxd?s|W6=?Ff(eONNm<>5?~x zVY|`8Cd&rakzUC-IPM&^VgljN^yGIZC=|{~5U6-Q@^cal>r^-=!QXXG!gfj6yr^>$ zX85ji5+3rsa8ANbeGe9+CjiNQobyYwDml+N3FJHn9u%jlcUNF@In+BR0Rd_rD@H+9 zD=p3xJDi`BU~&1=h3wbf6U1dNitDrYeRILjKOX$KDf{T}qE4}7{zECZaAB-Q`+*Xf zr(_rD>@0xwwobqw4m+%83PA`_Zk`8pHYwc z(J2Rlm!jcm(ShLJpTVxcMf)>+Ao%I_X8@9xr%7ph+MNMzMS~L_y0}S0crQ1d*H7Uv zqu>L<$Uh418U|E)y=*>?Gbty8S?8QI~jC(Pv?LRGtGs61kTgj;xEs0Dn zh#*AaxSkJUoc8JjLAVIiWDw#L+2mjY+SQXI7lL41?qL)*;`y*FF$->xvG5AGievb1;o`K23s0N+GYNPc`m%kd!OW6)|n9_ArfV&AYK#zO*RKE&aW z97S%~Sez*?086%rNOTw=v&T&MvWAv*HV5Tz9dvoRG(g=F43nJBpj8B_=DdJ#zL(O8 zv!Hkw5T^?w`0aVde;?Y@-ct+>SbvPq_s1m)+ZYf!z0Xwc`2Xy6Om2a z0m!&llGkC=ko#C?GHJ?lN2@jqqEO~r3R`kxR<%MKdSk)E9$8kz_^KA2>@0MTZ0O1i z+u|MBN7LJ0eY-R)IW{8^6~$Mo$sg$iLE4?n+YPFu2`%{!0cwkMNkHonD;z=x=&2(< zHtz*6R$q?YeNJO_etn;*ToIA+1D46G3!4Xo!PhuiJH$rK*dKGL+Vgo0)BA8 zGn0*8SXj*ko}!d($!Z}szLcx>^&y-_4FHfDDxGUK~ zhh;nc;CVd17`u{~e-pMI4!t2)!c@0q9_Muh9*2E~Gl;umIwP(rX5N+Qwn{ukcHr~7 z$*3gBNPj8$C1PO0umTgRr6RH_EOFSe2`DI>p-cXdW+tIKR6*@z98prp?n<&i-^_0g zXLxz(CVt#1MvFkxl#JR`kU{b#hi$RgHFeZnUI#-nS-Og72Um`5@p@Hz0g5)s+6!bY zRe9ui1Fk@l9O5NlE(5dJxM8ND9?VGQwC6CCg)nbd_CN!@tq9Z@oO7^QubWahBG`!k z9DvtgaCL+r83+$*_*ifmmhOtqN-g3|r-W!qTyJX0v}Kx8V53#rC7y844T*bxr+N+m zvZ~JL*f&+qPkf}RohBt6t_<0j)L3T)kA-Nn0zF{GK7C!Ww@Vt5J$%hLbBs?dgH4(D zbs{skaiF*qay-CYj_*_rRC-X?m3zD7MS4hIL;Z8A^kG+b(4P;N=*ykJT zdnHtHgOr;ZeI{lHB$TcU2`lafLG^P*@8<~!QJq#Q5mF+%2#0R;{+^ZR`b9#A>*M_2AYDD0-t`Bjj%p2ec8;kQxR`oD! z?T{)sVVpVSYQDO%yW)~pVZbG6*&Q;P!9uw4hSzuVPX2og2SABOLO>KldX9aXiFv$O zuwU<^}QpQaLn9OWH(=nsnPG6pqbip+t@Q?Bn1s?H;> z^DtfcBRP8+8II|zbyxLMgaC`D^SFv1cJbqUu96W75FQl>N`IwN-4*@lrxpF^C#ivs zLDeXqTRL3RVK%ZxJ)}Be>&FD*sta=P3~B@$0ChUJfdy=`jsu0kRd0_dPZ8H+H9S*G z1vuemRhR(r0i?0hnj%9QgG1?XY#FJF!rB$)$(M$x+PSXEJH4iIa$w|UtvBMU& zV0<+&++ki6`xZx-A(E$;c@x1rQX>0v^2aeMMss&=`FJg#a(Q1A7p71MB3qcUc{LHn zdY-Xf*&pk&%6bUJG#SFjR5TyLm2n7x!%#7BKtT2<+K$8I;bPcG%LaMF-%dHxn@OFk zg>*l9Hy-L;?(dmDfMsRGB>B|p=<_A~<#h~N$>VyT=C)hVCC0Js4QA;*!G8aFC9rk7qd{M*40$c{EN(oQ%j-o^~=;EHxPt z3>Htuilrxmc2AiKQ{>Z4hg_mdP6w%M;PU25ZF%RJz~iNE6~2H0MXN+jIk1`=3dab0 z6{;~?M`2EnnK}8r;rQOk1@DPmHxwT4zjcDQp3qw}Wq6?MGz|_PrM>OtLFKZp@IiB< zZRHN!3@?1STk5pRaH^1`KIhImbOy&xDr;st^x5OiyUN|NJ79#fGl^<;Lb(w-0RQTvNd9Wer!P-xT$F2xD!>v3taXI2B(=l z=FiZa`FG)GIF!q5J6-Ymy>9{MIR`t~pw zS_sH6ni5L^D%cYq)_cpTNPRrIvRhrND@W?=Pu^qU=`cBH`v@h(uv6SPK-Gsh79wE6 zk;Mg75C4;UopaASEe-Oc6|$0ZeWcP|7UHDyBb<1E$c!+mP;QbPD;mUj$B6=#3hX`XwID;cSaDwe;&U*mzZL3V zICY`^X7O~1Xr3T6XzS`fzwKj@PQ}I^5?nbnQH7swciVqr$>`{KO}h%Jm3Uz zpBuGXS0t-ly3$L$WC1U9J>n+h(Sv}3UyS)bT3tL%tdUh{Q8OSa$&U3w>r0i#tAnyo zLwJb*@|fB=L3tsF?E0pVW4ikocSn+Upx$jPK8`QWQ{vbdz04^iB#f7_nEv&u=` zEND;tS9>#B%C@oIEPX~;I{)x#4nlo_Si9-Q3)0H<#*tDj z>}ANjx(iom;cOqN4hmTM_b2~*-$Gu-^4m_ z{0DOv*&`LD%q_GK<&u;Za`uG<=`6e`ZzXT7qqR+HEv#GSNNNF(o2*?XP{~|sHu<1r zI>HU6`f^gpdMdexOqsHHS?}4x+ri!heap*M&1SLPBwr%U@W$e`r@S5vdQjSRs_81X zE-y$WdjyjX@r(->Ni|xU>ig{&A#hq;>xHnb4{Fidn(u+e_V6U$N~qc~nh4<|eU~5bPPk&mX5z_D``)IR9m9ub6UHBL2zYp#*p= zA5JtUcshqZhrpW5HE-m*XN!Xw-&7N%)qABgKs43595Ur4GMJ(kQTVgG! zf#6FU7-GTAKuW=@Cz?nO=wu5Z+*rH?BUf1)>;6UE2kF$^fo^MK`>CCPOnaRuiz8@b zJUzoWAvUpOKZJ4e>n+uf&+14M_v5Ak>s~|FS+DhhMcAw9C>nTJ2q}j~d-QM$3I5Bm zHu>xsj;{dtyiJ&B$wOXFeih5nZCObyv67vDtFlKc`8@oG;~KSZxKlz$Dh54Sh2S+Z zvKAB=)EFu=v*P|U@Mu<+KrV+sW<*zl&5&}ocErnMx-BQ)z)5_k6Ll9*haw1?@OxTN zb*NGeTvEP=HdJ6|NiyGW*NTn1h>twL#?BB~l1Jh=f`k)7!V-iDg>((J!9cZx`_COu z($*GZ0JcGE5~eTjyVswM#YPuJX1d$Xs;@y%$LP#)HEDBgvIb0ZTt~%yuSDV08|=wj ztToNw&y%T%qG^*QnqBQJnSt3S%ssor*&QqOHf{{|UU@Qk)5HFm&Ahd)TX)K5iT7K0 z)uvPyqzemo2ykuBbRf+wz|Bo1hn|{7DX1z|D#NakyiJ*gX?lxxbo$%W`K8zD+lInb ztZepa=mi`&$+yV{f^Go=DW`~TBo+=KiqrRX- z$TYr1+W^V655mxbqTRXTL8;z>KBBL+Ahe{7wGPn2Fi!0Zw0_I+!@y_7*8opALk(@` zG8vPrGA3(Faq_7(APQeJbo@x>*XSE+IbXcd8kS+46HwS>hV(W)SI=Av5#H26AQCqj zIBn3!0(F)K$b+`qV4~sbN>vQly?o7wd6xiOIL!EW@^PN^*LoM}&rN;D#it^GizV4S(fR!LT|YPLj{Z>S;moNH?+| zse1>si?98rV%T-Z_HV}$YZov=Ndt6JJY@E*J-?h}pDGL-bI%#~Cr>%!o*-e?!0e&- zQo1@&7*e79eAwOqqF{n`{e-*`$((&WmOo1&*4m`q-cc*oMZBkE8v$Y|ev0%`FIkxf zh&L70CB~z5DwOI2jj=XJ!<)E=80st`LP>~M7{nUYBiu{7OAzjq2$#9gY+NkD-NJX0 z0Wjg^*I>Q}ImYyCvQNM3y2VrJSNOAle%E)Q%Il~MQoWW9G37ht&n5GNk(g(qM4pM7 zX%=c08S{cARACDo7zAte=$Hu4nTTLHNpC1zXT022G=AZ~jD&nuAmlB9kXvCugybS< zlZ6Zncl|;N)jkzz2bMiR+92n(CT;lytad=LRLoJU4RHd*%}BX5_(YA)R`B&PkWeFE zqbx;bnP@Ul=S_SI)Cu8w&wf>Mu&I+R`912q zIZ`Klpl`l7Cp+*}lz)QExX66*4gBPylCZBeC@(3C>QxAkcgaquy{l5)W{O=tSJPw# zwNLAx$&aN)#W`Yg9p5P7pW|ZQVRve9nMun-;quOWX)Wp2UWJVWcx&;W zltIRV9g;7bRg%5DyP+_oay)KywYpnA*2xqF^qx?@kk=stiKV>o2nHMqX<2vLmUr|S zRm|H&D{l_AFYvQa`X!BTrCZ8MG;s+y1TJNp9G{Hn3RsdezFJ6riDzX%6OveP*Zo#< zPc=LAJ8#EaqpTd`mAm|lITL^rbcxDwsw)x6YG+A`a=n>W&&u>0zt1#n%8veCn5OrQ zmPUeNeiUh;PLwN)k>gFE5wTcrn5-lp7=<#l0Ze0)HZCgqJP(c-mGzEGkeJqWrDxY( zlRfd|lP|hW!c$lJ_mjoj*iG|D;TC+0K461`Vl<_aQ3eaMUQ*NrrTueMsyL)KX10^R z92Ybj3aeS(xW~?jbu^4_l5)W;=~Gy=iw|%57i--iL#widbSQvZn(F_FaMc%qXTTJ? z2)rPJa$WeFb{OtJ&g+#4VYA$2OW=QpN3gRZ1fffI975h>B3MjvgaN1>3i^{;RL5-5 zT`px>z@xrryS}hHp&4l)b=}{jkq@ruNj1!46T%cfHfa<<^Wo$JtFCg}Y+Ni{$ zQBySS=ttP?7Y?h1(FW~IdMo5z=(dvIZ*_xI0Q71&G+XR7Pz()>#0G*eNFhWM?DDPT zNgaI+j5lh%t7pXLBYeHFCIJrky4og8H`u5Zi2*@z0d}`~*sW>gm!?str5czK!NFPe z0SKb2hKA&mQ^%Ul2a^|cE2lt>AU_m#^uqYf$rcXx?xiqQz1bxcWn9O4c+PNM2HVu( zmn`*@r5ivw#r^Xub`6I%Vr@2b3uwauiaA4<=qV!x67Mory=>11cPGLN>0_KTOh*yU zp?Q`}IjYbXCQDPDVelB+fH=F3_~e(+rv~R}>gZKZ1G`GX3=dnv7Ph9#e9|muCTWR| zg0)OjxcD{U>Qz{sB|5Xbs8x;UGEFT(gYz@nR z)c&0A`&>Y4jyf0N=wlmJ#fZ)YCR(CFkLnNT!9mk1f^P^wVK?_f3L+B5;{TV42O-Rn zS!NoK@(y<%kzEL}Uf(h7ad@!mg6@+C*_@$)KDYd65#N}$Dvn;S+Hzhqr;_jnKIizf z>z!5)J9LT81rruQCn%O#DI=I>FzBS;u~QJzbgKTHTbfto9W0yB4YkJ9pkvet!OLMb z{IM@U9pR@Zz^{(q>po2K8tf_GCAA{#l3EF5h}~Kz<7iulrgC(NlyD6$!7Q}k3>ySy zWDW_fidr%E(1b}q7C!a(N8OQu7|F~u6WkS~MzygX2ham^EMl^G$1*$m3qig3S0np9|ugL2z3#JG?g zip+H2GulRF(Bl|P$P!3pUank57h9KkUr3EKdC(cKTl|a3?ogqn$apO4G1(oQSp;q# zToLCjcsCU{A|w)z5J-q2#9kXrwX1om&Z40rmJB4;+fWz*kk;h@Qn}ol%&L@KbNw%BKtI#4)MI6h#rVgyY;1jj(?M0wjxzCD@QZt~mO-`C4% zU{(^UT`!th$J)(BXM#lmV!dKX*ul~4G(d6oOluj35g3&7D8&vAXOqCHrHBisl4w&V z#y|EMqYJVm4?qGOQ{7=lXSDl*qT?p9sSygL5Q2tChjf>sw>*RZO-pu!dOQ`(KW_jm zVok2+0)M`ai$+swZ9Z~RMt7QP$#H`4gMz~LLO?$KsbGx8PAzsC%b>h`VHL-^g%oS# zYiJ_<=F!&3wTLznZxuw%bRrij#hgMfb2LoemoVA5=f{_ddbKF7*iILB9d+JN`E;rB z6Wvyb(dIHqJIt2ae8;%nSiks<4TYC7F^hXuvlVri2byf9M-l1_Uu$OyypoMBQ;pL9D!tBq1QA=*vpU5rT_07VkWz7$%w- z)wh>UVO}A8^TNV*u6pmaHq{c+V-u{RNHE4KiiDbA9!jst#Cb*>rXjW)dw}wy9w^)% z^j0L=;y@$60dv8)Tgv{3l~Oo<1ccpK+~c}g-$LDRQ=^KQbODRxXNF@Mo6x=^ePH{< zEt<{s5Rz5RHx|y^L+V_Zx_!l1F#)r5Y>BxmN#G4wg1s}x63E^nU{ji=9S?a#zLSjU zPHC$Dr!GBAs~ZZRl>Lb9hSTQzxF5*AaaYsLYS;b@Z@gO z#fei&t32WUh@p=Qy2n`&v1Y`+qNix-IVm`TF7t#6Jwe57rQ)VexiIm+1Cl+)`96S? zZhMC4?XV+bM`^H|d5|YwdR|wgzZ_DXI*MbDY$X)XhqCrYL=>*8&pNx6i6REG$_5Hk z?IET^#K@W*De*8*`=o9C70P==D+yP}jMDEzXfTiJ-oCrCy?3Rh4G=3RM8=Avx_gYa zAs@ayTFwdcxMx{vl?jviir*aBMxBoZ%L{nRgRZ;BTbP{z-a>>xpSMVSH*bMc4U}1$|G=Y~*)Bp2w0I3C zf56X;#oeW@yihi)Sqoq|c9o>M>o3*rux3UeuoU>f_>X{;_2i?_bikq6a<>5vx%PXh zJr;uol8?KIS)xeXgg2R~-x~9KOI^YP_XsiefVHh2xrj_4mwkFdVy6dmCD`Y&_}g1e zc=JJii)_4KCBz+vtuY567lcRpHUngC7z?L@xB7#wppZ9}) z_e#o@hV4W$XUIXndmUPFCLHwZ{j8g^k9`@{niN0ZD)w@chjb*tQV{fHClCoKtz(W0 z#LO{krP*+})Gi}b2&@DJa_fXWLI{%}#oc8sm^I87p^PmLwP<;eYY6R29$mSIlHX#C zqI-o&M+<}ZqsR;2V2}=Q63G)5BD+Rj`($`b)xzgQ-SNvBd1`>)G^okOCf;F7{mGvJig=r!Wp*zORV z9SCVQTJkCB%$g|8#Q;9|9r1`L_Of~+1lj6IG@LA*j*LS_()T0wM&eKa~tKbqT$SR zhzXFGfw*)nUgo=G8HN?zs0$2HhyUEfyTp0&dDNda1SIdJtLK()hSVg!E{$H0tAlmaDEDr8PkqD#}AAJ|HpDZ1qii4^WEm>ST zcrk2%MTs&*Qd0-3fcLqIL$QiEs+d<5Xr^uD*b5fI5UCeU!{=we^T#A5eo^*DE(&k> zsSuMiwKc{hv7|6$q|rqOqNAIJ4^LLVpjNS>VNw>QJ)gKWD#cIsodK92qYAIJ4^LLVpfF{h7teH_-u zqxzWD$3A@=(8oc2%;;mgK6dJ3Hy>*6$9viE6_*DE^ikza22c8`Os#_2s2X^8d-rvFMPamnwLl4PnZ09@ZU^}~8mlfG#>i>y}_IFA+ zKkUUKMoZP3y&!9`0Y|Q>vZ7KrxQO5DFa`w^H+FfV;=NM<=K}P0pLjt53T1{Sb6%hw z-eM}+ZXcqP1rlN?k@w;TieK`{1Bf?6cIiUvI#3Q;PgO`K51882J{GQZ2>bUrFMSGS zqeFyq2OJo-)VTvRkai1b`kTU!s zg*6nTMuDMVH6;?Mz|?0?yz`A_YiaFjds9G`M)KMiYJwE*H`+uCF*(V9w80T>N*MD0 z<=8+3To(T8x{b9R{J7w1FE&!f7VT6T(q&8Uc9&ouma$S<^sa<(-~l;BjDmH-Gt`b< zcYs{c-Q>mgP0)_q4eQdOUR7x`7(mx;9m1ri6bA5a*-_xwB@KdtC1kSlu;`9FbS<8d z>qrv3M%^*VlY_O|tU637_LNbfqT^+?fp`W-MsEgD2+?O_@%nBH*sI>&ikp5z;b)Wo zp!-|GeFlc{@mOM=DOxD{-+(Tk9|pk>ORUa{aw)kN?2jZ_F%v}~-LfmzGS-c(dCgi0 z2&BPf@eXss2y2}!G63eEz#(DU0b&6nMF)oo%O@hAH2Sw-(lFs5#Bom_9lA@$Nbnu%zk+WP0^;=4bDYbkM{l@jm>#6_(35;# zST)R_#4u9VBa9yJqt!&0@I`o;W?PSv_(*rD3UcQ^D4~_MMTo;WnVKNm0jB~%mih!) zG@1*SBXi7A)_7c6i^G_2ugwZ6-$xmSRyxWQGB||D1td-sy%@XQ5h>QwmLo6l5v55$ z=~@q^p+s=qA?1oJSDr3-7eNwL*a^ktQ8T)W^Vd5LB3k$Y`~JwVrnfLZ=+cBI3AvwB zSD3?R-W6@wb-asaKJ-NF7!os+9_Wc9IV#;DG?#&13>5y}_JZ93&%75RNHVA91R46F zQ=I6>MNsB~8G%&8j6kdlr)1!br|OK891Qgqjc1OHNR~oEgzSLma7#z>c1FTgf+hv- z2q=0hE=hjf($4Xp5C;=-GB`Bs6X>86cZl&-OmWJAhePS|+q%olc*iz+)lwL@*)Jng zT(}O8rTAS6yy_RyrV%(0eM%rqT$$Gs=NVUGND{mz|41kV&E(=jEMk0;>=4+tydaht z)@%80WR2IbpTXhc@+vQaCRQ3+UI0^obFk;?u^CPs=tVtrym=e@=CtCp)N$+>1f{d; z&`>Lo=qRj&=YdcybPRS4w-Fn-i?@N`H7(??>DI#zG?RBLt!Z?Mh`l@)ms1qVsZ<-6 zQrOQa6CNizpZMN{d!}y<^Uj2`>r4!9C{Fn3V7KFBGY51x9o^a`C{5EXN*+Vp=#K9- z8$We$8A46y5K~QI#b=?LhgnR?4j8BUdYE56)sl=D-2NhvPjxJEx~aZi9UHeTsg*Vd z=2^oPnyrh!hGrK4#MS{o8SF4i75H6aww0k?2VZYx%|aGakIDbx9>Xp)%T5KPF8V3O zTPm&=J=f}+wn9mutRt6#kO4|U7K}?>GLsRWK7FNvyJKYxvMNy1og{5QjYVGd>_=u@ zf>D!LG7P0Vr(R6e4_YDc`d+Nc&=lW0Nq%u=PX4kD#(7N{H!+(li~Miv_cc zDAA|>O#{?#lz0x*zey;;Nl20UB`BQ=&Vr^F00r&>=pb60mi*QCmiivdn0e1(0HLdG zch&2Hkz%lSmc}H-MGgSCIS0_oAc;okDL6nt1(63}7!@|zx;c1f{Z`2?ZA%uA2YMAO zlIY}sWoYo=9&KGzRNyQOd58;?iy9kNWeNvsow23i5SSL7QI&yE$U>K0CxOqCJGhJYu*CV$6b3-P<1M7J zP^I}?9Qa(FGh8VHYfPj~Avn^j4s@vWrBXdv6|iW_1yvru_wSx}Fy5OJ?{tr=$^Tox^yxDny{5Av#=w;iYjCkaXfv znOgWD)ML4R=b>eoUKP50m4KTkAnvK!>&3|d+QZuVz;#YL0Y*0vQltTf$tt?2oh}FS zt7IcnWGEqm$*(t91xE!Qu}5LkPk@5m7)GUunNb^VAmRmmzkwK0uTq|Wol13g>bq^ zk$}DnoODp|=RZ1iK}m@TWj5QcYv`MTY$i#6=)Nf%phIr>B*rHB1`#?$UY5IIvF+u^ zc_uw&j`mS^HbGPnS6Mda^_%;1Tm^>sQv_p=zQW^h3?24e#)4}`G#Q_Yv|$W@D8>wh zgn*|9BWH%_f8x06VYwrW*oIFh&YooQ4|)E!SDY{C^wpVnfDkNmGK{uV;WpTQs;?YM zr7ea?7m6ZRZcy5R!$&sQlJA4z9Jqn)((E!Xkv3#)!W!Gk&clQm?NGqovS+{VH!*|- zH6(KiPn#V1-lb00J;{JPVFi*F(J1lcD z1ede%m|KzDNiz5~)+YDX)n16bpn=TWg?~D1kj@Y4(Ns|$Khp6PB@EZxh9tPu5KJ2Tr9fX)<5*2Y#J@rj&jsCP7Y8$kValhM${RdO z-AA<-_Go!)$8ke-N0aIn?KP5z>&!53dSlVcOL!jdAt(EBo~JQl;e^U6=O;dBNIVZm zd4lYQjJg6f%FMXKQHb<(ILbb*+}WeO2E>QzJ8_+e2r9Q5OvP7DRqvLkq^w?X=t`wK zkS43w_>A7z;WuXZbeb$}X@MoMd|`98>$KI6M`5RIv}NyL=GRITjEoq%%O?Kx5H$C7A(;nn-jm&z0k zmsLOtlca4DF(4&upJQx`Aio1rSkIxBDCC5YC(p5lPPs^$AvBEm18e}A{?E%2tmN_f zSxJ5iU3NoZzQ5sBVZ-s9TE+ryX+N31J)C>YU}DnaxRr!CLVf`u!K7XmD5DT6V6ua= z4?8r;b_Xa_21d%EtHo?ly>iYJ<#IQ4OQbmi3e*orjmB^8srntsK_L}5u9G)9)xA-i zL_=L1VlI)~ttlnATP);>Rsnp}T1>nB-$(iFG{(sXgos*1^?D`wIz+djaIaJTnp-a# zl!vWsXashbrFKk8*cgZuiy*3Xf%vy8;>zCRr@ayO>*g{2dNf3YE)OmZ@BeSG`-yB9 z7xAc1lmkDU!~NBOn~@FF;SRyTAV#Hu0u}MS|H(*cDs6~EO8P*PiLd|yJA0)dvZygr zj2Bu{51%eb6F@3Xi2`lfgpENc*yVwESHjKeqI83+nfHC(`1EPO1WH1jjOGmj+@+W01;{6Iw-gL(mha#?h(f79P!>( z=D`mZ5lt_|1#Ev7vAib;(JqmTrtBuHV6#Jq9o7xgMAEqsQV|G$k39 zH?fQ*5|m&?Qd>jU5pC~etYwL28BkOgX_ar~f*yYhMcHG8{m0y#kW0Gu5p%}vY=HqD zFi9PN9|F`iEF;WK;($cu4ql6eWw@Px=1r zBCU8;@oXv?bbRGruIdK*zD4w1x)SiVGd|y;D;gW07Fu=&<&AlKq&P^fVI80xX zd-LyQd;!2mMI`DTre?{aUYHdW8q}fppbpI_Y?M|NxNRQN%I=@!4Mv}x zr9u2rvubb$>cB27B|1B#l&#|%TQ`!wAUt(r5jArF<@AbmNDMf#`7_#R*IriD^sH)u zcu@QX0Nf`4<=yfg41`i=aPYw&NMZ&Qbu@y=(H{fzibvldJ_v|tm@VEVX9Q*e!u13! zxUC>s9ry9K1Tzz{cgpLi{&-U;G|vkf+CZj<7M}~kOu73{f>P?_JeveD*D_a>lno%R z78vPSv|X17=&i$XqDq=5C@W?ow*_6|4u*7X?6F&|AeS2W#oIl0bbP@8lWkd{X0(4y z@w{A*lAjqFxa!GQx_b#ggAC`yf;}%)!?E0okno2LcV1!$iKN%;Gu%G3onNRf`p6!v2-LMn1iPB zFNh_4&lwv_SWDisFqTj*=I=!;A+CvDMp9T3(>0h@Rf6!Tv4nqD3@RLmYJR)QuUSxjXvy-E zl%L$1J*e_yF*wk=@zZtpQ+nf>cK44|_*r!KxXRZCz}E@mh2*=ZqzH`NJ)!aoyGz!K zB~l8;?*7`_DZik*$4hzaF9F3tMUQd2;D>eeporScp|D+|*OYt7k$S|POgC0c_aO`@6$+sgQ&LPPy(kNFq;ei%%E_( z5^&mvt{ONkGZ*;aQ1ta76g>od2b@+Udcf(5oI>T~FPz#)3(z90CO1!x)!qSSOfpu} z);dc8xhVy#u4Z9t&mycQY*kq8DD;b2%<-%R%ti$lW(%Bv+4>eRTL2m`^<@&RkIN>k zD5v1IGZqctcF3|L+$L3uFH2-B!fc|9MW}1zGLT5(=YmZ|`4b0FtzW|74nehk zo{|38nTPbpzE4PhccomutN4q>JJEpWu!b7%!48>k5qcHr@JI=~!J%RMjRTlrn0Tqk zLx`=0AsR;#)2ys`CV#R7iC((8Y` zIPY_z{PuA*1N$eLyeoK{39(;6l2?F9owwt^yIwD^jsT#I`7$RQx{-5f>;le?qBb%N zU=`(W13;4^MBgP_aaWP+e3Au$PuYQS^}swH!qxL?BzQBcgRFPrRLvf%$P1HVp8*PE(wo3vQJAZ?2~{&3*nyoBCasKh@N>FB!a8*h&V@Ey zm6U=Doh-#VGSpsCecz|=dsG#+&g4NN$h^IpJka|Hdh90=>NSglwJK{9pO5l6Bzc8q zaVJZ`%USzSUtsas;H)mh8MxEumgb5IM3%(zfGQIO3b3II2q2&Wpn7&l zerT?b=s?G4F;S$IWEW2T5b_R~PQ2bxIe3!}`x5wjCAXK5P*HO)>o_JnY^IZfd2 zY6962dg}Bk%F6EX)68Bt@s$|?kv3iKvSrjYwkSkh;hRT+nJuL7$AglTj5-{Y?gPVD}!PPiuvm`o~R z!BGNAjNuh^B=|h4$?PbP!_u@)%bE5&S?;Z{?FcDB&yhC8Qei5(YzjhWz!GJorhpb2 z$4?fMKOc?0W36Acy};^iJ2=Ud9)Z$KMd*hQy-5Ck6=EwUC!cvS-6;l}5vGLLe>r(1 zR!o-G%P3YVO<~7@l0-XYLX0+L#3W2zrxw3`N)d2^S}ns!uw5KKA97I zVS}ga^fc(zx03wDDDN;dTR;miO^X%QtasK*jkx$O+D1E6nxYgIT<>#Z8|?>`9gRt4 zpa(NMuvIo!iy>o)^qL-4H%R~&d)I71RIM4AEN!NHR#~nM(-1U+b*vcdUFC^117^?} zkhLr%e}5LVEM*+fCkNLzSic$M^^~5P=0iJ@ytvWsmZ*%YNEhyiUZ0gRVpJFE;%F^X zEJM~>(4j=$oH3iy zpI+I@VP#_rglaAKS2j!mg9a}-^(3hS@UsJj# zE!3Uz7+c}%-~KBJiTpM$ehij^nAu0Zz}rhyzKO@1wq}R_l75u?Z%c1tE<+2fg$VMT zobav>QprTWR9;|C1z0SzufCaT*qxjMQApn3Gxp_$=T&%~!gD(o7S0O@|2Ug&n>sWA zQvP^~$5YeU{C9w5z_^${-lEz`3%|HW`EyWI3?cib-e1uNHGh61&za_I<}}UUoHaV- z?7lCYdY0s%j(O&NXYGi|<)=sfU7=ViSE{voquFW?4Udc_%f^ZgC^x_{Ts+6>LEKYqQ=#JsMZojl^RGk>O?HM#%k@aw1B*)RW@ z@BdUN|KFbQX_}lPGDN8d!>?m85#y~{(%FCX!Hm$kFC zJbTZZe7fhW!+Xbm&m}KyXDfL2m1A!G--gy7|Eix|5}wV3_t+^eSibNf*EtoQefWs$ zd{yYt8~)I+yfU=+@c_$@hm!9PEnFN*{@MR<$&13TW3z7W!gjWt7C!!KuJaY`Y>Z$3 z?|*i0H-~3)A2gg7gkSFtzs?Vk{ZWAP%hbYtx3D3=`TukGK0tO=b-wRD z`~2-br@MD|I;2TAW}iclW+oXjZ!(Z!P_rlr;SO`QQx>gnC6FkKxWm zX#^n(Vi*xJh!8Ov#sSSJC?aZ5)G&iX6i{Z28YL z{@vR@Ya`{kr;03hPN-g$KKWaiu&n&24^zxUj0w{WhHSx)q4Jy z-Bz;fG2l~=+Fk6Sbkz;kvy!j#M!OSJD~mAcr}M2Tqxa|W5(}Tc{i3uyN?^eyq)Pr{ z(r;)bWtyQgawvWw+ThHUZLZhGr%PU~QkFtbj5;-C+u`h*%p2$>%2Z?l^AcsM^C(lB zM;Y@j&OkJy9eaiy`yuU_oeYLN4Gh9271%_9ULK`e01FU{#4(FY=@#IOT+zAReT&@6 zT@IpKV5K!bt)Hgi0g-6meIvZd>x99gqD_PscACG(hs(Rq{^n@1Tl z((8j$F9MFwqaF+P^AhzOHjjE1&7;i3JjxtCk1~trQD$-;Wv1p)=7@QedD%S5ESX1{ zbRK2e^C;7qN0}q%QD*5p${aP1GB2M;nWN`XW_liFj+sZ9Kbl9GKb}XKW%DSrd>&=~ zWFBRXoky7!^CfL$WQkAv60=#n z-mcs~pF*FRPodAwr_krDP#*=j{dJ4Vcord3?@2An-=n{i+V!CCQKE4WbXVPo0x327Zpxcp z{1+9U7hpe1+J0|8MHRxqO-eL$brv!(!YZyI?d?GFtp)AD{3Wq;P!RC+2T5CAXp~R6 zpoBzuC`Ix(7o_q8$1LH%k6#CgX3ob1?&9-hbAP>SY~^#FSVD;CM#04sW#@ipifZhf$_DzeS3}E%DVWTpn9gdzYGcO?OKD*eL%;us+2}woszMPf!<=kRl zeEGaD=g=2{3YjTZ0zVxo4NBp;7ca$P+CR)*G{IaH77OPa&`A**z_39kcric6(M;2L z#b*OV8>`g72L|)v)8NDas!RS5D*Ial2|89xI&hFH#w#@6*0_Kn|@m%#h*j;p5>9FikRMT0Vpkw#X!&HQRG%w9!{ce`k4PvjtmIr(U)g{x=+<+Lwi5nJhnt6 zrfz32@FDBhMRc`E4JSnZGync#zC;wu-viX5ELi~OyOtqm1_PjO6_BM@xmT&0|Ebsu ztfS@88x;JIzMeWsgtk|$nN6n{&h8RpXn0RBh z=C4-K>#QpZ@949?;aA64$m&+;2?%76Rf1$(1TDqTR(*`|B7YI;GADz{(>7b%ywRdU zO=!jYymvwwB`7J2CiqZ4^6l9XI;1ZBfEvlmvHp&cSOi$-!MvQ{P?KzVa1KED&Y zgO80C{Xt71SqbF8973r?22B>5JYQw9zQJ0h;5VvOO+l#@yx*us>h5Jceoa+GMCjrD zfZcjX{&d+q#jAzd%cGqb4KW^d4O34LZSD(*u%#-6nTQb3Zc2z?)Wyj&t~k|z7N!cr|Aton;vweOSF%asjZ&;Q%3N8)?70k z(-LuVqVN5Hy=Rl*Ael-OJy*{AVmTwDa;r*F>IW*4=W~p~dP7|% zAjh#@lF6H54#lu*Y*nXysGzMHXYy6@94?P;E1#UAQ91SeY(ha`CEF65u%l;{Qy5ZV ze|PXVn~b|jr)8tI{CAq)X@U~L6j}DIGImFLWZ)GE%Z67c!4D}`|Ho){*W-_!b z7@4u{vzE=(^^29v&u#`wry4d!su$6L5gQuv1-P)G%_W zxV#gzUBf}JGq@d_u|e}jNG?0wTft)ZHcb;?^U`ihls9|E8f=@lnWyg4FeOt>#Fk)f z)MOoeF1DLkSzW}JHTSe)&YfdFVaOHsp=MlA4gNLRwa_U%8r>k)d)Ht-AKU2^R@jM} z8T4CJ8Jav>)@xb-0s!5ISNArBgI$|KtrrlkVXo`P#Amh#sf5LPF&9{)^XL%X{=Q$< zMc-TaGv7t}N=fUY#&(W+N#<6}>M~R7L2olJT{mDa^T(Jjb~G{NFBMUSa{^{&p3Sep zREY*)izh>Cs758p)3XCxffc61;wP}@+L$~wkfpsI7{-~Eu|ghcfQEXQ&z}%+Bq}&h z5NWw4eg;-DxW3b zu#V*&Qz-G}R*=TEsXZsl(me#_3-0zuWVKn$_M zEVV%mD?WeUvYOy*LrpNn^k${QQway9at#k9fz5jGbpq&a7>ea-UsGH$Piause}vp3 zZ|B2fORKxgh6tY&(k@b)3kCNg@IT#No}^ zywTC2X5JI^WbUD!Qy40zh2F7(&|$E|P%yK8uw({{cjIpB-lkaFefADcv?l(G?tHzu zIESfDb7@`(wzVc3cm86IK+*T$U(Mp@6aw@K?Lg;s=ri0Vn@<)>LQX?AvbB7~tN57Q_|1U8bNm?wt3U@FP|Y#uU%XGgs` z5Bx~?vXRCvT1)&zwJ9HQ>r_5aKf+G6*bnyq>UJ_MLAm^pG<=!{QhsFJN(v(rw8F?V zUFGEdg<9wp*z+8G|DT_Ry@)OqtHEr}Eq>w`Nb6Kqa=X*GAdRpHl2jvUQWjNGVG|#N zCi`?q3|OwE#C1T9CI*lur(?;@#CeIMr68LbPzkN3nT5U)W_b}Ik>U^7Q?@W#_$5qr zcTO>?8o+oP4nZl8baXN_L+m3LWVHjcDfWgn@o_38e&RFnK| zg^ea&9W!z&HKc1U^%+=E`F~iO7<2-un!UnZ)-ozskxqS*Iq<;JhAnX!f>etMn`JkC?z)aoC&io_ zUTMCV`52}uEOAK8F|l$ayQr-rJJM-zwA}no3BM@I_Qj$S+Nx*m*e{K00G7lIv1_d7 z6jVyK5~FfEG)Bgx?h-)-|RFtv^f)>^cS19TiOV|T(>O3#r@>8a`O2@ zC90Gl5go%RT%!N16R!lS^cV>0M@eEaMlJe+N>gyrsldV;(ecuHPC_syK%17l{~3<0 z6(dS5bwplfk*jQE#ey0-JAkqG1=PwSsR%w40!YaL9puW(qbWwD@r2U`KJ|XOV{ z9a6d|q@uwXa>lL1ud(_aO7bWMLGf5MhD*u`LNIC~?EE4ITtfo^0B7b}4GA#ZP3Z;x z+>uEN83d^WG#jhBNVj^DepHf2r9dzk;nJk~QqoN(4-bo8oY<*=>U2&7V-=4d8SaV( zc@boOVvT_`gD2Cgxje><%lJcF@^B#+m_rVv5G1A2yt5n+cpo~bO-AC#rDHn-$;Um*#ln@AaHW>x3Or{4R|Yo#p_bu_at3hzMxh{{kiJ?v1+fs$gG838 z&Yb6bkS8)R&`qJIK+gebNE~rVgGZLhQblB7kN|{Qw`F{=%Dl-iONaM}RVdV;9`rWU zqlZFsh-?L<6^3gxtO^C^VYAnR$_bHne`dMzm(fDzk|UFPpp%D+x<$ILX>;|RGgAHK za;ed!sZdIEcuM^l#Y~v008Ghkh>^Texa#3>&_P0Il>-N=oK=93irQhy$-fU2zvHL@ zMb_%G1a*GjI8pFrrGUyD(QL8Uj#6=drtwrRjE`|YinKdeMEh;>dC(rZUSZC zprEX{)Mf?C_O#nfXTTk<*)SQG1e&4QZvq_%zPKR?3Czn_?c&VYZ8r{$SapUMBJEkb zEmlfsmuaLQ9Z5C-9cVQu%<UnvmU8qi|#qxsyzfMI|ZxJpRKyF7!r+xGAqn)~rJ%Q@i5k19y=uuoRMB7sKj^r)1_MkY7U+teCDsqd7=wMD~^7X2^cYjcg zweWUnarXg#XWlO4ZTUVv9<>(UE@QlV_Q&{m)LMAEEa2`-PvUV@Yniu8)jj`xmsLp@#cX+$#?k)ZfZx@ozyPtdzAC_7RZ2-%@OByJ?uR#9op`$} z=I*DD;89g;;q9XDp4)A`#@l6-yDvXwcX+#~pLf2|`iZxT-o5u>dxy7+T7TkuRtx$) zjo}-FxO@C9)*jw28pGTDcX+#~@Iz$xt1=Ti-b|g zo9{sbCRKbv1!Ne3gb9>!4Ts52dwR^OLmUwR1F6x>ESX%Ex6thq`H8~&6J1k^urdVhklQ8HY3pC7^ z^~X+zQ%Jv}xWrBs)zg2H1lNc^5P8$8>o6AoT^MSeL(yGg&pwrl1mHlW{~})tM`cvmAVh3TKkpEQ@m%Kg%|k4q2SD_*wl~oTm_o`}kR0po#eG zD!P?O<3VF*UB($!auxHfU%_q?_$R6|4|Mhmp-14eL_6)^>`%!)eKQnEgtyxDK)1L$ zik-z4V8?2XN`zeW86*OEtcpPb34JIn6xi>GgHJTaJUgHVvPd1+m8dF3kfARUy>4#; zM*?6>bm~TzEJ_K;3cote1KUE#gfzt$0F=-JL2FkPi6W%aI`{02|Bxi#7PH2oKMnd1 zYi1b*R;UzTYOs>z(bTH$AFxIV0f z^*7U;aOMWuxwy~Jsh#i;(TRSDk$=?sMgyG}G@@4W^@k=I^ z7UNBqUMghdrSH8oaetH<%xOo=^Htlz^%hSdLgpPRT=3E^!JAROKLq3is5r6CeOToe z6)IQf=c+%Ye2Pa}fBgy>*wQrl94}r!|`oJS5k1vF+ z1;PqR%R1r*auHNHHa}pl|CiQIhwKfmF&hvh%_P9t1rbD%0QqZ)QB=N$&QP+q2FE4B z3Eb*duQ)m*+r2Lb*?spdTnv$JBv%cbRKB7;j7H-1Mi{H7gLov!i`iXDIw7!~ye=zA z)mGuf+Pz`H)l&8N@&p%Sd_<^>?MaR&fMzzp*Yf!z$ZgaTI8D9*?A}0XKwxVIHp#FN z9qjStl(@+u684FJZ-AkSk%5poHIZIan5&X_V1*g6U0bZ(8JOaCRQL=@jhBR5k zj=0bK4>ensR8VI%QCqXmlY!l0-uZ{ZN;3y+$i!-}yN{Fb%oxXz13;zOj}7H8f;-Kb z&tizeZ*`G9LUk>V?kq`Dg>^$DrMM?cn7W;5WNF+anW-JweH{FfXenD6G#Z7?n??ag z77gF0sN5;Zg_a;Ihiyw~DQM+>(}L>>b?+ntR`NiwjLC+EH4RA#^l`t%U_}Zun+1Wt z*v*-e9Urm-vnB=&6`(8(<)WSWYdz`3n6r`cveRbSVygfO&F;7mf6dbH;-!Su)JydU`btUvVBn? z?>VzD6r25n?xY&4MT~EVSkB&O?X8S+WcwhiRssB~fIyoqVmE6I$WBST)NorVs`ey= zsww*ju-)dU=Wreq|+#ne2L>S_FPAlf%`;$14MKu7gBLy- zUm138n#I!UL}#%Cz`Pmj6NU~AL7YP(KU?iV1k2)Ir2#VhmmM0Lq!yCjuUb{Hi^#oa zB}hkFsc>lGdBr6wpHxjD7@)YEV6}16{Ou+2m7Gg4)Z@L3fiTiVPoUUwjXNw)0s&7o z0#6E=OpG$9rRHD-a!aoVOPx9_23>{+$1p|n&itFL*R%Q@V=qYsF>BRtX8$;BO!@VM zRV56#PV`Z7c+4~~N^Q@wH=iu$JWD>@J= zCc zbfPEo2&{472|X6@fbQHTWk(->J35Pa` zD~0Xz>bf9jpQbX=nHpH1&o)V5wzRG46QM zpvH%(^PvLg64Y>_kf4S&F-}iB8E5&45Sd+OJ=w;LYF406MK}To?xzuzWsn3Qnz?D1 z%Nqh1Js`=6ZD`ea09QIM7xVZq0HWI8a73?G0F>z9&AzgrL{#)MC3_r zST>e8UvpWCApu+oxVvTuKG3)em_N|pc#}+wqK&0hgF99~gxmy|t-_Z_vbD6JxpTCN z7+r?5740_PgW*a(UqhaS3ZB|Gx=Su-lfsM2BFWq6gJjnA4a9QCghXRxd6wkJ!A*&V zG0?(1E9+vGAn_ESj&V;ntMq)3~!_}|}dKOqO5=-86=^M$f?^w^R& zLM#79JvDQI6<}BYJkABtdJ&?vUT8vI@$FJaMbEHDwr)zAT`j=UeZPM9=!0ccbUUGYc)I;v`9=AUx+`jaPrV+Ps_x$_4G1il4Dsxq?ox8+bE}KU# zm47g`%POf|f{}`$w^(hovMsn1bU}1~uhidC;!E2gCdgwx{J^6|+xj)LFazW4f3x*K zFO@?@9g=QDC|OH557D1vOfMo#Gi6*7i43r6ZzV`s@};TffAy#2y)1MQ5sa0!(l8fN zhT{&0YS@^}Mk~1$V{8b3KT0rxJ~L<>j&Lj_nFlf4E_wOUT}(mCeM6VTJ;X)iCD5Mh zKg`p`?m4}zyQjj-FI2J#QS|SkkRCpPLyGrvHtD*INo2wwYpLpxBDv-qSszT?twVgL z;c22R+T>=-7z+l$7`_HfhOg&>Gjz+;uth z1=nTY7g_yq+zA_cT|yY^k$TKT;k%3mc4;y9AAQ0OC#6qOCNxASJ0+5*S4@(P9FZYk zuIT@AnT>`|rp15Og+1Z1qNdhL3-;oZfbn3*pvMUtO5wYW})0~s(}Ad4@+1`%Lb_mMmIDBtZjU=6IkW2&JK68 z#GDY`ZUQT+Fjzx#UP1x?qreh!ygrDKb4~@kL7S6{P9k%i>mWn2_o-CAJ z6vVXCYt9cwT%aGDIp+VPs1(O0vwDKX;Q+ckBJm5w(n??2O1A6_lyY36DkvbK{Hb7S z_R)NTeaeoMoP|+nl{pM@k5o{`O!~%5^N|q7Y}xP z5(mHOgp;eLxSk&%jhEp7`Q}PyYPJKNwa$?KBrgkR))Pm{fTjA9JEVUy;zii&^Rf&k@0JXKG#Bch<+cf8H=l#0Y(fN73sg9xkL1l& zbu*dljUioE@?$y%cdoWN#>C#PqRi}8wF=y=b$Vfa8npHsw5;K74H<{Fl%RMRj4{_p z!X#3tgI$cT6En>$kXsqwv89VUmWM;R-)oX8)LA~9%{4#T7**p%Ub0}nH_jnhFv%&V zu@E3HEZ~p>Oe8kYO7g$v79fZi8{qv}&Eo>?RwWpe?4bFBL#sl1>frm>?^A{JRM^XY z=PD#SDIDfL`96PAcs_Tr38p$_tq3GbS`c0Zrc@!4vLYbn*HW%bdYt@tAqxQJ0JFR< zxij@2Uvfg1)Res=!Y zHn62g{vxrGa-(3aOIs#kuGmSlK-l;7ElJopVEMVxs+6!dSPK36ga66zWAH!utq$H0 z;VAc`hdm;ML)0SorW-g!z2S)FrDaqyW}5+K=nUy;CZyhDh~*mMq|E(NBbYL+mJmYZ zaw)$sb76}}-Y0Syfh(5gW}qu@)!9)3S1AC5F_w}Ct8HNn2Fv4}2*MBZ(vm%jXe8%X zk0%rop1>d%?5k1P(3Z(u*zVw2lqpGD6LQ5VD3vFT3?q<6nn*j5b`Unxa`I=w8klYh zcMKWr_k<`TU-MPS)&h!oEaJpz5^az$h!C`|^h!^2scNJ(!@^SY5KkkSX2`}s^h|p& zZ{cEsphxiT9{PX!X-<=a?z_5-xW~98&rdbOl#RJHa*kR@Y6vkAgb&Ll!S^%9JK4MJm`%5(9XHOwh$sTYc@Bo(}NrN}J>hg7wM zWmWae)WQ~zn<>J90l^r>%yoqc_NRB=M5YKVm6=xLbr#sb+GsOxDlNs8q9&4ZDl#1r z{9;p0`ov5&JAQ=8kHI3`@$fjpJUL8^$pM&|PuH=@wZ*X&eWgFoXH2SXD{YuMUc zQ6sr|#H;0*Ve_0esA;Pna2C#$au+dH&ikA0io=SbtzSSo*3w z|CR|et`X)?D;2Rrrb_UtWX-gYN{mX@Y35ev>0}Aj-2Lt?31b}e=^5(|a8u0YAJM^0 zC1=6b>!y-b|6ugH{Ue#241Kc7l%Oo@C#mJoaQK${o3atZj?h#rRLd31jBtlZ?O5ad zz7qvzc{Ko*9l8g(={_*sdY5Jbv8ftIuTf3@Ig<^@a;K)jdAMPoKusiOu%4Rzk`_PD zaIH;#Vt@`dO!+{NaHlf&s=J{RqD^$=7!-TSe?FG4<<1Vs{n9bZl%;x)(=A}f-i z9TP1+j`4U`Dep{I9PnATfNZ>H|K`J`y~1#>mSIxgaIfG0~qW}vCMjnt&Wx)>?na#lzn3^b2@zJm{|TRg{I?#z<(8$ zM~??x;5f(}#T%WhFSxLTeKeNJ=pb7d$R@h0vYy+b*VLGN0G7!gnoP;iV5Y5W4e-r= zLM*7xJN#;YtRzQ(GunKh0n%*4Ofi-=pyM&)FuqE3#6+UTU_lLe?R742@GX!pEhTr4 zQmXlA$!l3s+g#$+;N`R0y|LX$nP5ix=JUd(p;GPn5yeuKTG zl$&XZaWWZ^ZBiZnPKZ83OPNvRBd1K7$6+vsFg8f<;dZcg7WScA0<2n;LG+$;OK=o0 z(A3V3@phe-D>$?oRBZ#MUGs$U>@e)SX1HSGY2H)vsLvz`^)>nr3@rwp?Bh27GwMSd z607-$?M%*UFxG&f84{cC%4Z^UVkLHZb+tt~ZEW_lx~{2C+r9P@z2+X`!*Db@J3E># z+7_D4jfP9f2`ia83Na*(k8375z{kGtpOjIV8EA-sk?a6Q;Lq7+F}#7{l;r1fxmF^p zHIp1}tVkyPF~zY23Enh=6S9~8a1Nee=1n4Sq8A9Pr4mcLEV++<2DwjFav$81f-!-_ z0Bbu1u^k=qt)+Z})Tc_%Qxn_KS|;_eA}=B$g)LrYA`fukG^zk-t2HQzee!Pk9(u8l zciB+4bf#B2N-nE^+hWon^8rw@v+)uitvGN|NIdJIR2klSAUKR5{K-{WAG*`n*<1bP zS}x*h-;xIjRmuAY1rQ&2tLi?@f}Qejz*|OMG3f~1ojsWn?&Q-c zW0qG_FUBmQh4f z6~n8=kI|T)At0yu5;eASEvIh~h9P%=YdEdY#A-XOh=IjAL@cXK@QMDkV!!bO(rKl{ zSNUnh!0i;II|(RA~}BK+Uc2+Bl2J#5?Hr2kSzqW6I~~d}Il( z*noqI?Ut?J&Ef8HUFz=nTfm#ArVh0nX_qjg3dZ&8Lw=ufq+P!+Q{;_)2hnNHXs}Ct z;(EVSWu_z6JP7*($+xpO&t$!$&?izT^9NFlDJvMBhdyCF^n^Y!eKQE~gRpNQk?<@V zL=^s~!L(?Ta=-X2C^25}zM9kZD!6vN?Q;}v6$<-p?%CnYb`XBast>MR?^5{%#qu!j zFXGzuVU^EwC9%g!1`s75&b$FKcYDa7KrFVs@zehpkashBwsACTQ!^SsS{|VB1=kT zlRao~Wu~96suo3A2i8RSeu)PxXer1t5i@VZninUu&z0JQUxkWcs_38m5aN|G`J*_E zMW@_xac4}GK-cgNZr$GI@tO{K1=*M{30WSMgcy_tdC#^}K!Etd>9=3q0c&kDF~?5pNeql1mI z5Zo2%-ur#M7@SkX=gaO#~X?=ZGrG-{4-Gj>;jH*0&;?vDVFwf!5WKB1h6i zqzzaPKB6f|zBoFk)q{@!t?2?(I7(3?LWn|*FvAM2E(0#PU)hUdPk17=`7w-2w6K^(iA#lX~)J;UG$bGb|YQ*=yz zNzl}f1^4EyEK)BHgoGosGEQ7VL3j20w&2Gfug?B?=^)l{m8e`UaqQ+4Mmz~z=7RXn zM1BlP!XpCGAX4EJcQpq<-s|BRj5X#2%Lr#07PHunC*W)5HmFL)cWkD-yAD|Bx|u^Y zQ09DFbjYyqoz%a@pgc!Kuu$GfQGASF6J7YKz#s$Ou$n^4W#yS&Z7oen)rJ)z9^)K)F~Kvm z`h;xoF9hI}=x_-}d4RLLMmI}2D%y<@SwA~6)aEiI2Sr=rDVM{2gib^zB|6>`qetS_ z6QUKg#=}41!oy>A9z)hp+0bs$8b*R{Cf?A|hOFz*$>)m-iI^f@bGN3`yO0o!r7ft?^FOMx<)EP0NF(N^g zS`cRP_lYbdD8Dz^lf8u}qCib)Yp0{=I0FQ(IA@rWSpA7M92fmVNnKkWo!1#b@>Pmg z6Fhy1$Qi$yl3H{|K%mRYioHTGQ-EpiDxf}5#IvTst`HjC2UG@+@A`looYXfklO zwgeUs6x|Hj4el&Wqd*=WJFMq-@^=nkJm#gYh=&9X)-5gWmTRlvve&+x;jhWHzsxgp zJ;_OaCfN3pOD0FEJSGHznx<{92qm+An5LWT!Y4#em`jzB92P5nC{4GY>W7m%Mi6x& z;IM^-n+$Sji{h>~*zsO+!3G}1YzWx#8p#Q`>n1%anZ<})c+7tXkpanhPl%rN&0%$f z09gVi^=dpEd)H{QxB$Uj<2!6Wv*$FJ+f>X{O^>lj@UY@-F;DeP`fbbg(P(tpemm1AX9kNkRW@tm!!h~gt-7B z)A1&eWtw0Uz)bZ30T>UI)}*a;XpQ{#5iVYcw&7J9$fN*P+j6*0t6&RFG~IRjn~>pw z>bLY%k93)-zA=`knHDV7RP$^Rw$N@;HS$?)x|<5sY>iF&9WcgMb8C!9L#ZM}Fy15k z1>|1IHOkSV-8^6SM|fBUs-kWOs^WNan-G9k@6{DbvfV5k_%>6U? zmT}o^K=v-Kmk-2EFZJCGk+&2zqUN5?V*{7mj2E+*(y|l>6K$L*6?PEkTnIUwDwh^c zFkBk#L6tlLJt3Eu5wxHoqel#Cse}2k$FI0x7Jd%(AG~TO7Sc+m(5tZsY>I&!ZlN91u;h>?{0*8cGZb2UO1q zEDoon-%z9kxE0|sbAh`th~5xYmHQ&A6EA0b0&U5k+-pzZ76OH3UWjRMe0gh$9yF$- zYfHyPAGP1hBRR{S5Zx`_%_uAt?zGRh@;4(^bmjh1mz(V+R*d#Mk4602=t&m8b_SA# zGp8WO#UhWw4fXC|_?~KJhVGE8k{RNvQSo@U(VTMH)Os926bg(7brBps8m|-jxNEtx zO}ESC)j+=ND{rKjJIsKY1Z+&3o>|$e0aJm=lR)%+?B8j5BRk3H26Dedr$9%DKop&F zfgql3=nK@F$c_00HrrmKQl{u37c8< zY^T*BN>rTL9%?3bFB?3wsEtR~()`@V&zAqW6An_76`-@gPcfHWu@qK0rC}G8po@7E z&&RwiIH`1TdjVSjYU{Gsxn-@nQ4iy0K!kEY1Rg)8HR^Vou4-kjtMWX$&fMyZH?uK< z#EEByjd6KISV)c%u=a=8MEIu%H?0ct@87wJ@K66&H4%R1579*UnctI%@JuOR4K)s3 zHOLM&;cTOOh9QP#5?p2X(IHD#B07;x$Gg!bU9YgDEMSqTDzdzP88Y`OoxruoF%9tx z+`r%aC~`CToTAovdZA8)G8&e`sd-J=%e8k@LRi99&HNx$?1c zyt?=g3h@|}teRYp1uimF*V;&z_QPVj%7y0}uPKMKI=K?cwOS?Oz?57{a6P_*5|beY ziRoXKc9t#RfsU~DsC3t=m}f?S;Dr3Md;X%#GqzDvL?U^g;urh8y_MAA&q~oNO1o4g z68FaODh=t81s9FJL8V~_Lg~~K;;i(9N>f%}?kXQ&ej?Peliz#!EhhRV-by*SgEsf; zz2pMN#e4MA180W!?lz9c6}JbVR2*4aLU@DQQLFn&gSEd+$_farSZjT$%T_JI6?m=A)LI3g2yuV|ODX;w=oI{DfmDv7J5$N*n03a5?a;Za7pfG)s z^d#fd-HwO0WO3B@mrg>gbjKBFqACh9pd&uhLWNktr={glW3s0Yw^<+9_^fn?BMy*0 z2>Ne9g?2|;HK$&AzZU=T{T9^qQCJE@x!uTK`eI9=mb@_8mR^XZu%G!znVFetz7*_H z?aoy(&$6Z9pMCT%$~=pkN1_oUFh6Tq_>T^%c6#OYCq)noh(PV6S6$huzcH=94y7*k zH2-5eCI&y4q)*7>;EOosTyV$5$Zn&HDJ#1T`|py=PFk_)@-~Vq3Kbq-Zss|hNM9jC zRZl@|{aKf^%542@0(~^I1P`N&w*C-z){>d9{zB<#zy7k)sY-|Sw~63ow}o0}_`Oxc zg7vq@o-GGbtYZE4>+dH0^y_b@N4QiFo4IH)Ao$SBSC=xw@<$iSbDA?QlGYB|P*5;k_Q}4s2s$}OTS38erOK1b+Y~_N!=5QTQE}o^ zc$qFW_x#OX;hF2vAKS(AY4?6z7P=d_NX@e_0&XSud2B=RQoKhdqHjDR(+!o}4cjPv zwEN+^xs1EVxD-Bm^A%ia2Nauqv*&p7NPm)Kq_ct5pKE4-9uJ;i*U=j`?s&Ctrp>y zYur8Z0HKk~3b#DK-7mgDnC5beaLamk^L+$LF1HA`T<31hV z;rH=8b^ol(kh_(OXhBYte)<{v`axaB-G{mKyng%VdERmRALKIPzQ#qB{})ZHy8Wdu z3%uF%F?F!T%v?Ew@AiYfZ+kYB?!?U+iWtMtpN9L!3lP^S_bFXs_i-+2(ALcn?xE|v z9pA6-5OU9A7SHe68|EIe%t+*DO0`s`o?=lYq0mfnfi*P!sVUZ8${Qx1x)1$3lk#%+ zW?dGzrypZ(AR77WFJ8y<33s6zJUw7w_Y(|Y|f_egjhcCxKE@$^Xd{5!ZL z?z>!)O-E!wga_T%K8Bscl-tQAc_vSqceLB{0{cBE9-?jWn-gHDWJo}}deK$M{wihpV&s@pVMQ#rl`BH@Ef8>5~ z1BfJeCx>oVqU<)Q)p`grxx-v?fO^tXyvw#HKTi?{pf$?<5bD93X5 z6cMA{fcrL=9z41mHU})+tjhxT{5yI5=CEKFx}W?9w;0~$mECeJ#g2B*Z00iV_Ht3_ z{Po>344QV==(5oLEKs9glzsA{{lN*R149?rsqn*bpTWK=RP zmjLo}1UWM6CV4=8iKDtUIrFb<;SGXLeJ1Xnf@H*anj7=6;i21#28iZ(0xHg-onO=Z zg6pQ(Qut=q*yszSBHrGTO3l1rfgO&+k%W8$X5oyaxaxl3e-6@oL(!fYpKM7tF(}zP z&q#KHk>JH#WMUC-l#1wlq^He1Q#Oa*%#T&aA4Cs(4IJ+_1#z19N}x{FsUCp=hr zue|Xv`p{tTX*pT?9E;jxW0E=Kb*i7EoSa^z6WgOcVWqi~D++y`eeFF;N}HRly61q( zU)cs!rHl_IWKWig2WWt1BvG`Gjy)lID3GBtny|a)NN`+y2Etc@r14h@Wkj-Lw9TtCk1H7|&dOnNU?C~Z ziHdR@*D-8(ZMXMz+pQG$!K%^vd)5xuRmq-VNCQGr?n(lMp!g<^R zHUvIr^NV9DDweatyEAz*5;Ps@sMm9BXe9_7w8B!Mt#mJmoYFpAzfVGLA%`}ltBS{zNvLGGRWfTJs zE<-7*$aU>uui-%Vi$q{3!x~~~HX$u!Ps*b2G^KK5_S*{}dBTaIbOAv-4Z78IcwI-% zyBTU=6tGYFWZ?#B#xvyDYdT-U4pN!>*N^F0K85sqi+5ca_LxGlau7tyAc!~S1N>4$ z{3P>vjw;W_ozZ|RAcW!d(=s5^TJO%C?oiV3xw4xJ$t=G)$xYTNX7{K~x2h5J3!g!l zJTOKuxwvtZg0=#ZbksLtFkpEhN`~`|wK|FeI$&OD7)LITB9~%GMRn{}vDTkZ@^YrX z`+-&=<^q27GoT)|Wq3JLUMIXv0$x@ModgZcVsSuy-Go@yV~f7<*m8Bb2di@YWvVvI ziaTIF<-y;qg;|(yE%2qcI}p|?ySH!YI==MyOKR@|UaB~gS_Ur#e(+L1Me9?ud_*2L zEOf-2Md&!htNa}wgym)?5iLZ^?h;C3i?g-dK1{?ZUM=#(mz`n4wejAvoeh~w8unlm zYFb+Ml2MrCMhF{rfAo+kR`d)cfMGQ-f$uNlU&;qt%j$@=ZcC6~LWoA!U!tp-XyG!U zWlEVY%Fu1hN-7#=Gd&F9UVb_fuEZ>!<(A_t!^V42IBRXm4rV8k^`^Vm{(_kQIH(@) ztYGd%m(f87>eF<}GcOe>V#mW(hFtR%489b^6EN>6OQWf5^PO6ISU>`isMjW}nhpuU z6!D#wX;10oLFNn=0CYW+K~rHUx=VA7+TuZ7mk@|q%2Mvp3ZQ2>E`Cf`sQ)9n`USRI zMX$3_?4nIfv_KFn$LZ*nieHgbF9j!=kn-Nm7%g>PKGhHfHUF_4Dwjmc%rEOu&|O# zhdZsX&LY(oXmOZ7vvo0mio|WLdT`vWs~+6R5AjO3>*_Z~WWCwWxDD~TwZZ@q-C_@# z`@?0>yH_>ci*z?5PN9To;7%S(eGb=^{Fu4M?x6}m1+QJa$^p6)_DcXYxa_J5E`yp+ zKR5XJsKzdsPsMiAdYWPI>5-)dUmfPKLL<*rx(dTTOfI z@4kvLoOJBjN-?vJ)yQN%4W=B>M>86UTgtBkeDXMGaBS)F>g-RP+ygZ#KAqt$qk4Ws zMX#m3VTkqQdI{oL4_j>B zjod27hBay;`3FX`L{&4nQ@kCb6cIx4g3yvNxl{3K=p=TpjXc)_=K(rUPPT-k9h8ud zC?|#Nqm)ljhcuF(p$Ipdu)0*3O3>nKB|?#oF~rz=BoWvl@bn#AkJ4L2< z!HCqwe|RrI+}jDqT~UJ;jvISvMY(u_qB?+J@j_M=aW8TnL-C`fTOXk?;qPw#T2yZOus8JKnD{a8;0;>KmQjo4NwrF3 zAZwjoSfB7-qx}Zd>RB?l$XEpu&Gkve2vP`bZCFgYF0!SCw97Wc=0S=qnI+4>nVg(< znImHsO-=Bgo+!q)0JGA({Bi=Lsk10vwmBJ)JH!*uqLDSd+p^+`=Qs=UWb>vZfps}d zG?E+mg|NB9OB6pH*k*|4o8{8lJEttf9FV`!WEQ8gJCSr}_4?%Jb55c{dgX1u$Rx6{ zvcU`A4gw18WJw(&rJ8Nbse8`eED?d8;Ut&lYpk2vw!CZZ-J? zOU`^p4)*Qx=w^#AZClIj>Yd+Pw(2Jx-==HN?y^HqXzez;9{T-7C+flEz?>+p_;d52 zO)B=1l&61#IqZ_3}Eg`tGy`1cDVyzep)sTe8B&hhlYv*3Y z3h|wgr+f0tg=mQ+0?YWW7wm!Yl)jdk!&E}7rNfL&93r(K?lk?9Y-%ELU$dXv@zz&x zsa3$({3pO+nFO_YwtDMUO7C#7g7sL+yA>(`k7rK)W~h4uxg!)Xp~PCJtLl=~$T3k( z1GW|PN&;Pr1AWLt_rKvV_lFm{{|$$UN=?gXOKz!ha`9{*`gyT^zq>@KZ_6XMvJx!t zNfrkK^bxE1o5sn#><ppA7n+D?~IK!YcsV|LUIT1qdECxAOa(m$g z&JtcPSE}97>+x+KKWSg|hS%}uVjj$FN6V&k_jagJFj--#Voh2^@47tU8z+@qnpXb% z`a7=Jj)Mxg^VgFMb7Y;ncteLUrjlop>+DDxhY2{gbRu!y#13GDi6JeBW}Jx85p-Jw z&Jj`O$Zfya!I92(aD|va$pzu0kWnSq7C?Nz&**)#st_3 z7p2Z-Y{^Gth-mlRhJ0GZ;0xFj6NBMvVX?waZ`vjLJETU@f(~ zk>Vb_@D6rGh^Zyt@j)+PHNYx^ONBd$SCg>*gw9$*;Eh?h2*1AvJsu&r;U^j1z!2$Q@yO*r8Ac$P*$|HSRNrI21|xw~p> z;r8Y3raj*1!3s~iAMf!ipN27b)kMs77b_h z|4Is@r=|62iG$Ka^Zz0hlbhi`!gVoy3UnA>$TFhPsi+qt7qbe-BM zPp)tC#FZ}O1=6=kMaQ(S<&p}jHuo+Gna>gD$4h$|ia!HD4 zH%^Z{vQ*}En!aqo_{)|w+e^#o;VJePhb`g1&eG1pB}|rU28k?vK{}{oVI3pY{GIEkT5)7)n!vFd%s=k93&G+2g21sh(D7wI-gr z?<-P|2_>Xcidt4`X(+`4&FfxTtb6tdj#S;#g}S5Aqv_&Tl$*M2`c3#Og9!(Xs70xsuZ#pDrp)`D zeRX27M^25IZB{q$n(6zi$|2-Qw#g8tfHTdduRNivi}CWGG@#o8&>Q#(7R&w!YN zJ%h}d#oDcyWKn&!pH-}#Vm-A_s&*pn^!NSjV(nIp;12z@pVL=N;dHDMMRYv3*a>Ru z=>*pPmRYaA6Xz9cw_^B9_jiJb{oPr#V%`o03@moyqP}8SarxTVSo5WGabK|+HfI}> z47fA!&yTDeVGIjxrrD~KXAyPOg!^nv#3{&JhMa>ZdG zk0~r}+U@D@g!(H}GONExUCGr@=!cc_hBLF)H!kb5%PHVTUw?~{YoZqQ_4H$r`hkqE zzaLwB>bG*+vT{YZ;qO)Ghn2f6D<^Eo;7dg00z4?!(~rZ{kK42F1dr@H;u{s}w{l49 ze1D7m*wIrz<$CI0sQTslmcu)%|IVKJt=!#NxgtHiyQhB2_0&)P?3z1A}d$y?-M=!rQM$Xj;g;;X5SUp_me&KTe4)F;`KVl)2SEpt8awS3{Q)2)Np5IuEZJ_6k@evr|L5R^QOn{qw< z$4aAS0do4rU5t?vnKeg|=|@gqvsgHN-`x}!*SVFmcoi8RnZcFdIfZp8xqq>K#Qrt6 ztvH_SyKO!7TRBD6uznQrX61_g-`QX8{;Ztn zR)#R=vYGp9byq6mck_a{Zk#j-gzDJxbe3gaoUAtRH$l%*y?mh9ztc1~_2 z?-*0LrQL8-zLexai@jJ#R!JyBr;bO!f)yCXwkIGr@I5g*(2P1Esq<%D>x(3X9_6~- zAwpQ_j^cjF|4xD>k})inm1Rjn5LR1q;}}9iv-$#{dP@WfO_wLIb52}v0f5?h`~Nzs zvZYb;XK_4mNm=n#QMbr{0rAW-z*OF-U#Xu&{Uk%vq!%YjDwD~|kn~WHv6B}*l%*Ju zmd1#~YkSa3DTh=A&V#yZVJXYqxWsdEP(j_Sps9!=k*6`nm08a5vMhQ^{GmQcw+Qth z+bj$UErXP-jg-9{Lp5cV!mE6o%kc3CVJ++<0wglSm-QX3E<2uaNsgol+*H^hy(qWL zhzXujx-I;wuQvzJ1Eo`m00A8Q%c{K$zHw1i*y6=)b!Jc!Y!ak+p&GVSlCmM|YjHs+ zXO47+wvR?@q;f2KJ>wDjsFd7CHtKvP!O!GPCN#S%_Mt)Y^g5D>(QQ^U(y*8f4{(yA zjY|s^_oWY9DfiY(k;C~Qa_XN+{(Lv(&?i}LS%PC^N?Ae`fe+Y_XHj5PCQeJCsd*?% zl^U~}B-UJ?Y{LV^$Lv?*iX$!~2luTK;=VEwD$G_%6%b9<8j>VJ_nlJm8pfIGqtp0^ zMQT~J+++W=C6VB8&7^%W9lu&tX+&%NOaeTG{-N(oFZO0+1(K2nM35&UpB5u&dU5Kg z0W~Flgygo#waw-$@*pqfoTCr*7BId-Jt9J$v=hvPK{WYS{6+;Pi_j7`C1OQ1&KC{1 z3&v~c&<-?C{pJ;6i}|9waUIf5Kt_ZMj}iErZsZnu`emu2VxrVAKOK~-!y`%r(^WzYJL(3 zoKIdaM3zGi%h0c$u*mb(BbA_*3>TqfDkQvZM0gHbRVDtGUd2|yyT^oH%TQ#hK6p*v zJ|q>0)6O(2l~)k+<6^pCU@G|BYNCL-FwBe`fWEboyt^Feh1Hu|GtALrp|O$p9VNd8 z*q(i$BSMP+RtOZiO~7o?D1L>tMdT7qV@PkxegkEN2BcM^0jOQV7yQb;5K*CujHpC0 zl5^WJhZq(_6WMtx!i3_6+NG;1PXm-7_oe>iJ!4vSHS2F0H=pZK5I2^pMrN(69WA*gFps1h>ua6_a`{6@v=EcJ&b{EK|PF@TB>LKs8rW?7N(2P7T1BHw8e)}GtMWb2^~2(9SGXu!-yhi%Da)E z30=VV^an8MN9z=37eyO^c*TjRUdRqw&UNWR>h`+NtRpKa$vhM~qHYZMZiMDLBVFaO zRf7t(&U(P_!*?{(-gWz-0FeakR@0cz({#t6>XpZMMo2FkHBo1o*4TV<4gOF&!6No5 zO}LC?`&egDL3upTt31}=nw`N7=xII3hUqhCZXj!RL8eX^$l&5k!S0RzwOAU$Acd|n z4XD{7(1ql-v9L}Ik_wc|V#L8mI^}u+uHr3Ib-=?F4$y4DL~CI?Gz+E@q)O8-bM*u* z(==n`5NYNFFKcSt%(Uc8(vo3TLr#<_7dWk1`sPK@EP^GPzPSMj^qe53570vN&5#G$ z(n2@?fkwJW`sPMK-)!J*FPf!sCO9p&<%@bW&P>_*jvk_MhH$XTASNE-L+N0>*ngJ%x*1CciF#(1PZKfD{r5K~Yaw6A7*F8th zf`=3Y#dw?Zt!FpZKh_1V5vPe-Zb`V zX7dXkfZm1X@L?ugrdAu-n}y{l!I3PmMAlR30D}QFS{phwfMv@?um^mUhrjiR!PN9J zhlo;$5o!Lw2f|@a{>&8^6?z5=I!x9wa0}>$?j7(BQyn}py=!gFnqE+dUj(Bt%AI1ZpI$8N!+{kG(!aIh zEYLo*AR(fumEaBjKoXa`rdd_ypV>mh<`>%RzY6C@gX@O}@>n<^r*UYUuXLUQ9bf~e zn21PBnp|l3DgL8Fh(IF7E64i^UIzXSyDC0eMnSoYkP>S`)c&?N$fbuZ2A+E zc}7jFOJ)WN52&w9UMA%PD!YgE0a1kt@5K)&;gq8s@q={H17~g=5`KY0 ziEs#f0;=x%Z_vw$ZZ8>+U>h*(1A0r?stjQ?T8=Ya3=73`47*n!6fjI5{3=r+F2D~O zDdul&Dn6-ReLCunLzDIE%>`udkj=$q-=v#_s>`4)qqXi_yj5=tbHUr*xd_GcxwuIm z5aX**T{h@>t@iF*K(P~yC_Y(fu-}#tliBKdR;TpB^8?S zj+_8K12=0-%w+RbaVgJpYkAgmo4xvA-WNjLQ@ra6!ne%(9 zm!yX8yCq#NpX!ss$>zRpo*mrh@BZq2%78CzoXT~aA z4?@6<_u+iqcnvHm1rXm!Dlfj86NEePj<=XoY&!VY(nP4KYI0>6X@R&p3fd=4O#CX| zPAl2O&*}Jd2bVn^zqT_t2?x)w(_q)RtPR5F!O0F{I!RLOm{}LI&71;6&l=dG;=X$HVD+)u$Ok2sMEwojCR}7_S9$^d2QOn)D#I6xdalMX}w_J(DqLfG9+WxzZU=zxl4{M={PNXKb70w9_q~@ z=wY)6!A1r>nSx9HZUPAaNE#g4XHjGP&9B~`i3}Hk)?!X=(sYzd{gPnT#=8@Uy)2f_ z9MV}B!kZ+#(o^bUjqtxAFM%cr+LEERA+c=6>5@m{xQoOk!zeTUn*(K#B_JW8t&3cQ zaKK`x5a38)ms7r%!wIF3Rw*QfN2w8^R9ZFKfw{i2bmVA7?z@sm2;xy`ED$y+ZA$<} z4Baz?Bd~>`oQPh{L~ye{7;Ll0N<>FPtWvpXlvo`47gm7>CaF%k0KE91!-;>z;b@W- zIwa&bQs+#prPHw%txc?rrY7kvkw5;rXC#HfzvYoAHqWmzG5LD3q=aE+Py=gblnkmk z7OgZtGpx1VsVRK%Vgn+Q7Geb>If#)!(#CHCNrxexLeh=+G8uux@urK7a%`%zL>W+; zkSWe^v30D`bf}sqbj(i4@4wmBgsp=|Q2QXQ3r#E9u9kt(T7n{Y#0z>V>9SC9EQPn5`mx%v+80MOIN^x<z$+sVa)Y(FfQ8b=7)Eo#((aEyC)2~c zXnN4d$UsDgG47Ueho~Jh0Gl3mp}x5R6*-2x*fo;9Q_cU!+e4)xXS-wxT>~J_YW}DP z;sP3ulbb9PZ_+MN2OyhpcA3Im(W|~nVXf<4ADMB=U*~Orv~se9ag<14K0tB+`&;Um+R-2-pDG-(s*)BoesEHXWM z6b0ZfyI+tj-A?KR=P$c!-fMT5PCb4(G40tz=(JxY$Yd(bEhXP|Ogy{@1QL-5;L@}l z&$h}>kIx!c(6vYjg7t~a1f-CIMYwwmrHR&gWi<_Cks@uvmZlZy@uv8OhDB=3-cE=H zWV8g5>(nb{-M~akyPO%HT%;khN5-q&ZSTk6i}sVR>)-*!gm}t$M=qvmq%%<&b>Asz z*?l)UMFbV&u=ScjXhb_=%P*a*@{$;2Z}=#@#`si~x@yb>x!q>tjJ_)jD?zbLmF&H6@#nuHBI(jZre5d_~LN z?Ig3~0pZazoCJtK6o&dCRa9J@i#>?7Gyt&{F?a{uMaNQW97q#65!fD6Ue&3Gyzh9- z*2Pe<|J6^D5AV#(>dTsJOYAjwP^Nqt$!OF1^wi`UDS<4nWAJM zC6`r$H5ZSuRjbHi6I(TebIZCG&OTF_Y}HJ9Hw}M7?+^`iZPnNnjZ=UKiF4*~Dsto@ z+Ha(06$aiXGK)Z2nUuA#vrLv2d@Cm#=VL4>2H>Uvc-d%=L+3n?ezV zG+v`gXAY=N9kGcRT+^vd@~z;dQK0S(u_q{a9QD$Fh~E}n*_N&wX}+oH`V-qI3nN<*mfDR z%vz~hE|p5me9dh&)jD?-{)+&!)ZiL1SW!u`uB7S;FYA5<$TSXI%N^>GdN9o%t7)@p znaM*`M~O*Icn|Q%VCLhd@+ch;GS{&bMBHiSu{HY|LKq`WvrAdB;gR=VN1^ZR#rfjKvyLdE|Ij> z3cq0D*w;cFZB?_wxTn9kB?;Ry2@A5DX`q+N8i^f5Bawf=%2QaU*lOetVm$y<(`HPV zYSkBjrYUj1WJlR*GBV6$fnqLDWR0CgZzNDuh_18gp!e;Sl!h&_0$r*bT0*F*Zf~Ts zgaa8;}-wsdZB`3Z~)ewV@KoC0jl^RS_S_t4E+)KHe?y%e9x@R@ew z<tV=BVlq7}bh35W< zFCI5!z>%z`*<#_AbPPAUAXYEbOKb{FX2#4_C(ooFG?XimpR{_kQF=*fpWOW}L8a4se5uiC8<98G%pOp)B=?P_u@P7P zcn<{bW&F;c?4BDGS8`9_NGV>})Mo5@?8-9j`WyCKo}MMi%JS%(c1dkR%fy(dXUeky z9v)vliwir_$>+P6cuwb9A*pcx#w%v=EV9lCDz<@5^PwtMwdW?Eg6ONO>aR#S@KJ(G z?&)fF&si*1Z^M0Vt{!`K_P_cVGt%BN5llerc1Ge0faYp>@|3u@ezSK{6`SsKWXC#F zE_zEz%}Xd%7DZ$P4>w8+#h5aL8WYShytD!1;)*fccn)lD4{CC!;4ZmLAaCU|&Nj;> z5s%6$YMZXJ(Kgf43fV4r7_}4kod>t?uC*7sH-7czul&#UVeZG@{@CN>AaM75@00KQ zyY{GqMYTDI45BrgD3rMZB5c2LX`U-djem4M>wUTomb&BLJlpq%PlhjW$=}kC zQKYmNxbeU3@MD}D8*1RG*&o!is(#$}CLK(hR#iP+2q$J$HAx$i)-P}qf7@Av*tlaw z4*%OueO*@1%p*NRkc0{Ou{HkF*-Z>d7N%}J)AxpDLonuncRx!Niy3XS41>Q4o&3Y3OykXe3E9gwLph;v;q06_?*`|{b~wFwO6E^E1F*R? z0)-vm49PEEa(>r_J*5^?Toij595 z)Xok!jUG&!O(^^E!@9f{yR8^>`@+Y!(f*XN=Tx}K#~ZFG$~BwSN<1`O36!}nLME}; zffz7g2*eg^##_Rqy+(pK)bxXZscE2u^;k!0vD$N zL9Us6sHF!9Ilj1zYxEc|(JC(jcG{bkZ??fk>YXx!bw+#4Ma}kNWGFDNq}jnqDjiAg zY_%u(IhKyq^*ZpM zskJP|;2bW8!3}Ei&$7P7=TZku!qp{i2Xxf6@ah9P6}nx|A;?^cL^QMqJ0Vf=jLO2Q zUd#t9s*AXT+t^6=rH~oWnV7!>qeAlEiDWqlvybd~5_5ZWzT1n}4C&=1t1bUqz| zCz^MM`T&0NP=zVPZ>cH4=>T8Y<*6)TQoJF*LUf-T2Q02fk7X((x%79$0SgiF&lL<>=f z5(HU_fCW_8|EDV17KLu80!vWCk|?l%08tdOfS?N~WB~!&Mju!}fqs88=XtsJ?&>9$ z?f5T$yw7u==RD`knKNf*&YYP!=tTYTnn&f@P^=Ewh!SLrdbK64Dylc&Q4Vgo$lQUr z3f76VgBcO6%lIiW_YOU>d(CTB6}PuNVB&0Vdw`vPTiXN8xwZ$RtiszKFlomb8O~og z5Mx}PEWubX0UQ{&Usx63kNi4#(0;!yx-~959^dhFG!|_=IDQcB#?RZIKG?EaN%{SB zG~`n@8XRZ$-gH$)npuEBhvnvHdJ+sU_sgF3wlBK%fzs=shOG+UAW~57so{%0dH2Qa zUgW8Exy*YOQBiRFCy?^ zQLVBYI=IKrLfygL3`HEtthf9S2jL#wRBt(r&fdZ$Mj?DW(_qwkE71eXwzuftWQ)j^ z;69Y7hVT2F?XwE?uy@!&{5OB=`^Y~rkZc>TV$Sq{a}WD1zd=k+PMIY0ket9a+S@pl z1Ri3Wk~s@GD+`zXdW-*h3IQvVl$LsSutVcRmKoz|&cp�sB@>bAPYdZD%?(PL8zC zZmqTGT~9g3YuorPi$d8R#ZfDTV!2%FM{F14Z6wM?P(+yR&|Xn7gi->z*;yt5v`!P{ zFHWeaqJR?V&yL3Hl)K|F`AkV3#DmGA%>MXaBHu-{OkqA^3RD_DNoNqz@N$dp1&HnT z%nt|Egh1|bCY`#B@|}F_!~cEXgNWnI-*g!Z*Suj$~4s02ZbDC zNNwwbl78j-ut%Ui>U|(JS81?c56*@@EEdyF&?lcd-ick-2_!Y0xZOLkOwGYUPbWAU z)jDxLbb=kbi|T}uTeWq9gjIBcP4CbN0u50`3+&3poDn28jXi;15@kjKGiaC`W8pL) zouwMjl8E7FZ$rrWp&pJ6R6{#TAw#ngv}F*U3Xow4J}JnQhOG^<1t4Qqb=D~aod$+8 z2@J|VSB=TK)>!C-0q70pOO)q)B!^k~&Z^47)eW=yXfDPnR+a23gLzUhpOC~u4`Q99 zD5k@9m`Og>33GzCBX9-`n+}7ls+?Q`VRyl7W`4^x72kv4)(G2N>D@aH(}ew(8t=mB zjp3%cvoLLTLNIcyBxWLYp`G521EJHK?L|B(?7iOUjZ{xM>W&99CDn$p5ocve%SubT zjo&dUCu5Vk#Gs;W!?2zdC|6AYX!Ziwju zPW@Zh?C$h$S+9iTg!m0R3(JBA!N9qsgbZ_{VfcMsznRKF>n;@i90sRQMG>jF;%h*_ z6MB_HMXqdZ&%mM(E%0D5VXe1xb}3qa$&>ky#lL(75%Qhe>yP^?6_1NvUSk5J!vBa# z2_v)s(U9YnNr;`adTj=fxa3`%p^dPA!xQtW_}OBqX{k`c|an-64(PfYe{%@!9M z|7oPvdEOsAf}(d9WY}V|Vf*q1eTAjvR^vxTEUiSVW{!pJc|UCZq$bdrxMcAT1>1)N z-rD;j-9b|Ig9;$Tq=t2@;f=n7G!&0i@i>lCc8BC*XXVVsY&ttmD0r)@57v%MGa z8+LVoVk?0SN~vuSd!85ccRqo*%cf1r1F}U|yYjr$O`C+msL*_~ctA({1EvNdPNqaZ zlAb%t=DX=QrU5ZVnFeGHW1n)37juRYkr;|>jyt%ZXGC1s-kX48JOvK+Yck02Z8~V> zU`S5~&1MKC;FvaI`YmEMrxTPKhBGd)2*7nHcpKamzw@)M9EAtl;~xtTUX~hUdZ14` zWuzECgnn6W{2Xg%`CG_*z#EGeiqn~4#eEut%rpf{0bqOU^fjFmawPE@Y)*V)Yw=Ez zlu+W?nctk-EE;$+3(wuO-#aAV7l=8OMa3BB=$l=9t8RH2?|i|iFtH$fBy zmwRMc5Q~B$nY%oR{KXFZL-1Qg<&i6UnPrR_3hL;nndJ(2?r#l4^^h9(kLa?q@Tgy; zRP!smn@CITO7HGT@Ai1v*T_#O$~wpjJ~g@}hK*ZzmW-ly2h6FWEXIK-5>)`K%=dkb zkKBft1=A%8fjCJgrJ!_J;F=YOOF5F@DfEOjiY9b5AZr`U1Jr0^!*=np2=gm_J=&yY zKEirqUJ~|tTWeGJdO%ylFon z+_{tta9DpK7ZbnYL3WS*P)ZNW_}l}ylvcD%8SO=vljSx%W}?J$TVo2E#Jju>?UG}w z^{hKuo(9jMsH#kMX8Thk7%$9v2fP|!%T$I3tK7a#!e^CX*PyH>j%Y{is3ppl85Aqa zwA7-VT8w%1Y6Z)Y=%uI4=Lg6^#t!8(wQh~lh_|WnK`D^2*H(#MGP>@PI6z)!om;QF zB5mA(pu5Rz(;NSGA^t6!X^)NXF1*coii>f0%v-VHn-4{& zC@FP{k|FIx(PC$6ibP6FdUQv>bf5*rK-e)sfL={beX^o$z>0ANG^{w?-0v* zO*1mi(`@Rpx7pI=(dM1HJl5Qyk^{{bK|Qi74f}yX!(omMYmTw;`r6^mYpHZmqU-ogHIns)8Rpdn||HYxIuS)JnrWPZvA*X@H9rE63dny;|F7& zL@lm-6g9aL8x%#@i|$a#VLrKCH^_!)yXx=dW}9xR+>Gm{kDIN!sY`x92%w==*OU*_ z4Vy-OYL3`kQ$LuRVgSp^KV)Hl!(>e|{~*H)`*#BhSCvX*fZWzvB< z2Qz-M9uoo92{gPmPNMUjbDYG3DWrf2tbPLvS`%2>Epm>s;Wv~};greA?g&_;*97aH zPO$ph!Lr0?T6q{)agwl@4G6wZMqw$JwAwZOU`5!D8kVzhFDXnRuPSB5Q!>&~X;dQMdNUc!B$gc|)EmN){uuRMR9U`rsLO|iB zfvcdvPvz8`OinGc4brjXxo$ZnHFnZA0RN&=8gQR??d6$Al9m;{+Hq;B7}}GRTyvoA zEANrnH7epjEB{&a$)c3p@Sj9RVuoc`t0<4ulG*-Bws9r9p9Rg8WSON$qu6%1G?Y*o zH%Hk%<;kRa%#zAa+XCIiV{}#QBdlK=AH7X^G9N*=*LogIKV<2cP-Dw7X_m2(Ha>P6 z!z{r_ozfU{Edsx{37NP~D~+Be&F*PU;vV}t>M_^Vw81yfAaB(m?xb&IOw{06w6HSi z%=TqX(jp^Rk(MsmDPonPoNs`IomM`2g&Nm5(+MqQWzNQKZTSnf>!P}7QSfKa<_IwBs6b6NXsM9u!p5B)$R9>boM9&TF>J8t;gX~(VXJ8rDp zq$!oP*QPZBckH*dh7?1If>j(>}JOHDI&I}8G8qUf*@@0bKKu3-}SY;ca= zB{u@5feN7t7&yup!S7&Id&mGu8B?q5y0ja{AZ3*_Q%hNfgto_&)e;Mm8Pc8_t4Dur z>r*B{ur0X;X@p#$gs;-1I^*d?_f!SW9dv==vJoa!mo-#hqH3oP83cNb(U6lVNK41{ z{)da2P8f9HO#li+srkrkAe4bAbZ|?$h}>oS45R?PlmRml0kWzuc+dgJ$CHPK;Mg)^ z!V7^@-vDZjcg*cz%1p2=vyHy8kiq(EQ`3*kHmNl$tLz30qrnVyd;B*{=f-z!R}@S| zR-w$~YTfU!lxm^va-Z~Z894=h*sd4nQ%d-b4$+O71)oA|(kkW4j>@FN1rTt=*%&%2}9M}sXTN-bNAXYfg4-mv*;Y1_4_DPY&D7c2b^Q_+N+40?`P_jxVrevLkhq8fh#-h~Qnny|n4qeWo@wxa^+Q4S{K zRf|1)gLVtJn<%|mOC`H&q(|2R22LBh3ty)fgelk$mJ$|6U_XdG0ElLhezH0F^;}qa z;bo!AQz|HoSyZWl_@jNo6i^98Z{qnO@K*oERK};p)Q~RqNDqVanfdQy4euAi5)It!C(tW4@ZbR)^`R3$I3hTN zr0hwI@+jNwX7%8niLKX(Q&Sl2TK(_BVGbxWps}^{$iKD%xexeMW6b)l@$uWF zFr#o7MJP1VbW;Y;VuHnC*yz9+3F6{VW4UFGk!-poM?&s5L(~jqzAn%bb{9+W!fz5$ zU-m%Iu*0agraOFBuOFr{h5uJAbL`eII4P4N4D=^%+{!QBb`Uf5phN>GFMLywB7mBN zNM4UGG#k$%Rq=Zni;-l+IJ#CW*7z_Jr>b6a-SLP%sS#`qFnA@mf$KVg6$b?2n(s2|5|YlR5EzIdUa z5I;J8jPJE|B>!~5^Es51Xh;i772Ot!avYP`pM4%e6ZCr2PibOhEe^(mC@U`u zRS!Mp!r~p`W-z6OnmB@)f53Fx#^F~p*DPGPw3iHZ4$iim8ZpGrV6Lg|V!Ie!ReG@RM3ah4!hi_a_B2i*HsRWZ-KC+x+ zKa7@v#jCI7B+6IT+qX*`vIwgivmaspse%=D)U~m~5JRP!Ef0#Z}sZ z`zZ4EO+1pJsbMktZ`CGx>1nwq*dX!g0JbH(CCpA_of|q3_w3cMY>SBBDcBs7L-##< z6)aJ;V2K^YHOM>BUHNk8Lsu z^{ratR~Yn((p(Y$cCIIj{g@Q?{B^jj>-1wT2$gI7DSk+eozp|O+DZMMf|V#?;7PP`Y`%mHP1Rbv&FCBIlG_O) zUT8E>GazLFe)z%iwCa1EP4Lt-m)sT0pSBoG{caWOH`p01z z4^%ZeSkXZt@Gd0$sWbFTd?31Q9442h{59vqC9r~CQzC+T5l`3XPA!obYF{E<)LZT| z52$5fafL}!L_w;=i`?BAwHnh#{+HUOKR(wG=1Q^oB7k19eS000#g7Y~T+QNo9*Az= z*V;+6L+fP<_`J9Ak#1K*)f6w|xvF?w(~V%4=nrT#c=lU#G>#_N)^$z}CE3jQQ{3=$ z%UnCUnO+C?1Uz(cNmcF?<4{)dxiY$)+swtI#-6D({FO02pGfx zW)_0sxS^VMa4wk}^3pJ>9}`x1yfEAvmFOI%JeW`{V_SdMX;SI{W(k%uR9A`T$%JEV z#qT*3A34O3MlV8)Tr$8<_Zr|}{Zcv64>bhn8>SPjQ$4vGdSL{Kh|E~7vA2b!HQJm( zIUqx-)Fg0p=Ej$)fXgLzvJyIkpceBtuDnXSRpa*&vA+EZN21NkH`b=u z0^Sn6;^XKEu+DztnDGLGR}1`7MIxeVx$J0-1@4JlfshDTgV~P7J&j)+X>CdFXHP$G~Al+!3HPSs= zaZz}^6~kov5-q#AJ1*k5RBPU$7!vqEicD{#C$w=EY)V2^EwRQJCDEO2Jmn@T?U2^D zHT!kh3V{wPFBb)-fOiZ8yo=G_Svbbdu;RgxaDE6k8Bq+7!L6?ZOY0FDa>qAGPbhnaQS%?gH&Qv_<7D4nRZc13Ky&@bZ( z>H0JK!d9J%j!8^wL)^+cd`G;3)kAi*t9&(Ro2{cLN7{5Hdyd#{^FFReN!Gb7$BmkaR(q0ONvmxJO%cb|UnxVmhN06?kvg+LXXD~)rET+qTOEH! zo(0C&l)^tN?6tUmlR&hCz#42zsgrfuc2_Ml3WW*5=yG224sW?~v}HWHcYkzmRNt?( z)3!zDwnZDAWKfby5O@V3G<8du2|5@j#;`4z+;$9u#;R{&?eY z&(rR;cRPDtJv@E!xY1$y1xjYJ*}KI+;mrnWoN|&t&o91KG5KE-1FUsCm8GtFL=;*i zfi@};#c?SvMz@cn@SymBe2OM&LK8qeNuS6}ixfE&v-01GNFu#Q|7&@~=bWY)l{gm? zaoQifEEpT?q2Af^lFwBcA^1+i$}-1{02&7NzEy>m!u?uby)qs?5Z%uoUl<($IdV}L zMJ+<)XJf7KeFYWhXtC_6sWk=PtY#a*^xdUlMQsNX42*yUM+?*Kege21*>|vHm{j_W zezmLR367LvE@Zf{>kxm5v=Pg5V~AV6zjJ!Yea7ay`;ajLuTGWlL%p;ST7~dTYmid>eN(KMUXrdswD3$02Q&5k6U62h;=lZ}F28`|q646(XHHHL+FhJ6P+$xf`s}xy}by^iuRk6-SV4cUR zty}GjWIEp}9xI*@MXG#?_aKfWCeAuj^JfOAnL$;xVy2HMc7s7%)le{K->Ki^4!WYj zf!fw|WzdfDT!S{L_Ye9YCxdsHl69y7O1m(OrN^e3g*Np9D$~XY>@NFjN3aH2N$v#G z%bihc5Li|3Yu*I~u=*CWlu;q-ccSP`>lPV1GhEyv?BTOxLGvV`vDr(0Bi0U@A|5_~JVe-P1`iA{>A><# zm5_x>JezNincoxIIazJVA2(~pP(CD-^YJYH2B(BZax=}DTo%u-#B=z-loX}zV+NcE ze%O}8yN+Lw?BLK`W=5VPSk=m#f=n@+<1GnFfB;&}gLq4T?>Km9gU`@_A|D0@Ag8N} zv%kgEaUj_wNH%H51xQFFAxOl|@{m|1#*`A;G$CCTY@4DxG=cl0okj%{>(BxVJl3Rv z7TN~Ms-fck=z(><^R?ZE-`O0R&tpN7Hz4A4wIveG1}Z=tO{l*UP6VP2#)l=GIf`KJ zhY&QDxg{fAe2MHyBdqLKlUR9l2HN#yr0oBV|Kn>q8dX_pyw-BUz^MpGX}%1w4xhRMA7xebL=mv&cutlH#mm*}#-l~y@ZDao6djFg#W&TASH9dwSXSr#zmfEMxwvR4V#UHb;R`gXtyN&7k2I0bf^5?$3VL@lS)u&am zazSrLr`+~*^eSCR2NISts|H@Olvz`sjAc%gF0)xTqN(ANDZXMQhr*)gut+E=I$29s zJ%avvRy|E2uIXKOBXOdvQ!R&z?Q4!pMcJaeEwCnC{q%5fx=_=O)+^mlfHYq2!GSK2 zOBXNy4VeQA=w!tP1gLa|e099!t`+Xl6)*e`Hcxs>EIX3_NGGIiX6?cCNFOvO zAvl{{I(p(A$|Z)*$_7^KiHO(-#(G5_XR@Gx#DbE7)qLlfui2Y41z64(+KToSiPE-H z!QW9>(lDuEYrY0nh)NP|?Vv)Brlq2eQUyVYYln8$@CQ(H$}}S=k#jtgkrhX0ZT%Xe zc$_5HR~zOq^Ph?!)iL3C7(Orr5=sSXu?3AhWFA3|>U?vXyksG01=)nAjkhV^tASZ^Xw?0$Eje)g8fn6JDRH~xyYla|SO;S1X@D7(y{86@Y#79(YmJ@9fFtB)N zad!c~Odgm~jVm2q-x?CULKUd5H8++Q)FmRe;=i_jTl|1F%jviI7~_aFu0zn^|y2jQPnQJMFAPF%?U!swtkJkJ|xE=+H+ccLm zEqatfVr+n=UvqN`!OaAKScGvIAj2G9Q3DJfHW1wsk%xfAVmB0Q&>|fU;>kNQC(*{iNs0uo z7BVaYJFe|^u$h`;!nAoa^vi<;!Swynsy2>2_B`0IpFnVrXj#LgADW>Gb zWtpI)LUWJ*s|uDwo+tX5zU4l=O>#^*6+}bz`Clt?h^cuCPi{4zA7d&Ut#nmCANFC~ zBLqu9vahf8Y)!+5D%}8|Tt-{+PYRSpLaaLgwHDv5d8IpId@w~+G-Y(!+rlu^!NYA> zHf4LQX{~-4-Gx0ITi`F*y>EnG7~^T!2rW{i7B7vlAYWAx{76>ET3bj9GX{Cq;<+&` z@2~u4miO}8O}7rU9=3cgB-nZtbqYQ-F?yy7hZSU5P5l%d;62>t&Y7*P(`(T^Xw-#z8BvGUnpZSXd|am{5Sfd zjTCPj!N`jZccC*U6`yA-us2S_QXR^?0xwc960z*Prvek2S*y4zx<8=?6t4^;(z#cFTLya zFZ#zXe%ZZ+H{A1*ub#NOx^I8wYu@zGqhEF3TlWsW>dU_VONYN?boXNqzoq|Q{L9)c zTgJBDdfWK6?YG|%HCtbF=Rf+VJO4@Fj&jd8?)^94{BK{)_7+YgNPRBk-7zf%`G(YN zxl7x-Im7pHag%GZK=TC=;|Z(~CIsaf11rRTW6DkD!K?#O!guEjb#}!;ulK+FlqHy<}_-kI7;Ob zf1eXB29DBcEdtPzDn=KHJ>A4@@H;}>rhZ3i4asbzlF^z|S}FdclTg}=p)@yGu1{&5 zvXTUNaz|tkgNu!gy)ifjt6xqwULDzW2HqnA3Jy`_8N>k8h2e&HWB@bVGO@v``vZg3 zzJUHkN54cm`MBJfg;{`uu9*2`>EXh+2H}8>fCmT(Ga8GC(@}zm?d%%H=;Ri6X%B4jzs1VgpNuSiLIugq;24c23WFW5CVkp;L7OMB5h>=}7mO=gJ z3$Go|oSM~r>piA7w0VX_tP%KNG@dMtg+uTSV}D#7AXJFG22=1O=vu)R*nCXQu9)6! zmizPpdN;((N`?N-(97Oze|``|S5dqzWf?6p-kGm6=+|x{U*Y_85^GO31zA66+s;|h zmW=<5MO%#j1QM2pA~G~~50gxph5W^hS%?Y~W^qz(`{J*%Qo~k^TX;zySamj71DHoz zpz}3ErBF-R1HuLZpQxa1i&qrM+y0!=LLJ^)gz}bzi|oDUg6p>=L_AhzMTXlKIaq4- zof*UJKEKGRkq>ouVOjjXpRGS#&3K`)fy)RU>I;#Oc<2~#hEEn`Ga;#)wJUATV|KNt zicQk2@Pfm{#*C`A17tQ3DWYSc+-Vtf0!d)MOzi$aN9gJpwB!l!+)ugfC&#?VA-_8NFR9XWjmoSC#rW5l1}0cA{K&8ybfs&-!qA3|kZ%P}(l`Ik&^yb%sm*%!O}V-r6#p@!!4KG9!xbf<9~_)i_k1i( zX@t-cPCdP)wGO8q9GKMjXWU9_Q0stLGyJ>pZWHb>xJxLp zI>xkuzu+=~cr`-t;Y&3_CQLvH;?JOVY1LuiCt0=U3eh59eG@prz@|uzyAoiX z2eUWWcLr^P;e7k`4(HpicQ`3{mEru8b%*m?Zn#I^a@~XZAFer=R^o;S^IL){azkS| z(m9r3>juY?5tB&@Z}JBVvpw*0$hQaS(fBbPQIN*!c>>R5A&Qw#XDZ0^_!kLKvdBK= z?!;Kf8Jld7%R3$NgGvh0Esb1UVlK%%!*eMiEKN|HObJO%A|fmk^Cbz)w#Ty7p-SZB zrN4e`@HJYv*#_>rjV;gO0V-*3!GgdL|Mz0!$86bqcX^iJ=fc3ZTe-Q;a&uX^IV&eY zRWk?gw)#Gpm7BM6hpn7kv)ju3WL9p$%DLE(hiWL7GObb5_*<>;Qm*jK$6PZcN;@mOWQC1;#ml+EDbtNVloejK!c6yCys~cD zpUTRvSlM@|tUSe&?qv|qXJuEdtR|g#J||^Qr2zkQR(8$GicHNdJt@1ImOY)7ovdhV z|AWfP$v7#S_3eLVWsg|df3LD~@a-=9k*w^LmHl@rD{tBEvOk-Zowl-XQ`wny%PwSP zXRPeMRoU5f%l=$ecGk*%v&zn`TlS+_**PowZ&Y@E-LfCc%FbKar&V@g-Lhx0vI|!B zn^bmj-LfCg$}U>jgDQJ&-LgNQl|5%=52)627vQt*}>r^(lcx9~d*D#?(1C5h`Jx*KEr&KgJ$Yn*( zwHKYSqF<|`!M80d`s?jQXRYYJR#6<+5@2l!5dW|CqH|XCU#Vzt#L8;@jrOARR`ks( zD&MJq0y};0$}U>jH>vEoj_-*s2x#rz*DAQOr@(-}vvL8*5hdt5kFn%TG5J>uJ%+ej)Ln zsOXV(iw>kkk66(+sOZ$XMF-QOQ&#jVRdjmYqC;uXX)F2_Dmt@n(M^q$K`_r)(SNL> zv+EWeO^eQ2(buc!+`2`7FQbh)EBoatJHKw(|DBbcx3XWRvI`w$N#i${P^OjoM=G`0 zS!!!qYSBtPsZ!_irBKQ~n9!pY`BD{GYAaGmi!51@FHw;rgSoMeH#dGF)N*7{WBkP` zGSyaOG%Ye^MP8>O(``j=NsCNdk=LroOk0sHX^|N#@(&eaE>*JS=L>G?boi(Q$B$fo zkWInygCk2rC|ot#PhFhOw^>mC7Z+86K<-u(>oGZ{N#K`f&W0 z9NJXodKRbX7=_>B-U+h{W_+AY*v3!_Mh0aI9)(-5(|(Z!G9Q#X7Ps z#h$Yuy^arJD|k@eUVU-xIzHHA1rG}09Q=13AAF$|JXmk_X-rwnXr^?m&3+7Rjw8$u_>@PpZ{(VrRf zohCO*#->9H!eHWM%ODh-9b>iSh6gyTuO%9+`BgSGo`kYyRw^Ru|7HYpo?>s+n2gmI zWhcQxYlhgwNZ44fJegxhJ~RI% zX70+s!+3(rnZrou8SY^Ipms1nQMf@{U#Am>_h6$f%qPTgh*s5*)f>NV2DBad15&Jj zojPLHiik6h`GS2*O?RF@#AVNZzL#O|9ySxQ!F;r(^k5in6tLoNE(#sPT6t z1R)+?6L1Xn~hUP-#__;JgAG_Czb zb8O%f-kR?itaac)3w+#9waV6FVzeNq^4xr|oo%H%l8gsF@}2~XE7?xu_RY?o=F{VR zMVM33xTMmGSoy^Edc0_nH|$kqWv5B7_cWjo`z+u^f$@w!OxUYUUFWSQ+P;6HSM!9; z7c>(ZoiV?@u+fPi$@Qm1Z!{TaRc}0uDv%gtNo z-hsVgAEi>l*-!qKO3fnAKgKK%btQ;{3&oaYwl4U=PS(PFC};`5l7gp;t+L^c z@&-*^Y-*Q_CzUwJIA8BC99ASO%e zQ^DaQm2>V?Dr8o)F$MlSq?!m9n^#D!>%#Ep4 z$hdJNp^fPo#*|a_Y)qHKm~#3af;cqFeBdrOrYn93Vi;4^r^YW*&t=sqw;x?oEL~D< zYpN|4C%i;t+=omzyV6xrJXtnOsKPbUmZiL8vMgkv*Rz1LB0UIs@7~dJp1Q4wNS2wR zT8=QOkBKLUWOWtE>N;!H2()9|8D^blo+Jmj@fFi|RG;dZRbCy{nRaVVwalxQcsqA< z_{+G;bGoa7c)=R$7~lo#6H5=6jJ+p$TIkc+wmw-A;6A7Jfn^a`&Z~9$bWvB{E$g~s z)u>PBxy$v5z`R_aRG;cup`OdCGwsu=YFSe)gxSEI(mq}3svw?ZL%)k?Cwnwbtd1Bb zdtZv@6up{*<7vmPun1`7+X zQv2Lr&sCu(8Zd90yte`l9|=EjZ-u}Ql)2sXJOsG$17>l;3A+3gOmR#(!8Lpppd#-G zAgBHcE=#R1tO+4GLy4U4e$r;q={9s&5pp|dXoS~!Jn++5v|zPh0D$7G7I`LmEoa+m zu_EWPS{C!QoU>Z~fofT@S{SrY%Z0XDtjKa!%Tm6UWvk^8)v{u>Xs^y;xztvR6z&9hA_0vlP8WQJ^(q4i5zxWi(u=*qh_T_-UO z`6BTW73QY(VNM6nrnTzJN%$jH=_JuJiIgW%gzu}SDLsNb;Y-QXAVw*co!QVv!l&A*RWC$~c?=}Y}V+b>rOydEN#c=@VH}E9Y690@E`d_x*JJmMSR%YJ8B%Ulsee+iP{ia5V23Z_MV#peu=7jd@;3SAhS@2VhPa#&Q+IkBc>el8g}uMjxP z8-R1Mtu__4$pAQ)+Q3;6II9NEi@94#!MWU3LA;iMvziBI&A?%8yTJ(Z23lbgqnvl; zK4K5LT)8J@{XNDzW~l9ZDC}w=nC+(f@>5;)az=Ka>Ho*AaN^LN8RcgB^0Qf==%eE} zft{3pAv)fyuIc!kQ2TSLEEdZ;(U<6Wr@AVN=dDd+R&yaWoRRdrZOFV_-ClM(ew;c8 z1iRwJ{ybPJYU9JKywsluXN6z!sw#>5-7jn(Z0v5;%wJ z#`La+;6vZa`u8Pjt$q(?H~ z9p3T`Rj`G9{J&sjYR@`jwl4(_bj6mjI#9R)?IbRY#heS{MrKW5uEnPUDpL*?h+)1B z`zkt7I_20u9X?6?Dw5^sLL1Wbi9R|F_Rs1b>@Vt?V*d=k;&X=mTe&-vV*h+sMe$OG z{d0NjFB$e_OLTURL2X-Z2xb*8FU~($;OOcXA+1JXTG-W$4|7@-TVw zO(dA9$%+!8a)ecwx;=?I?oqACknzWL<=wQdGpY`Q8&)gsax(q|k~R~_!>WE2NI(OL8%>I$Me>bvx4Az?yO)y6uVPsSg;~Jac|ID&Jc^6i8(8R zZnCKMfn@<$&I$(lbU|0%E$O;!)u>Nrxy$wGVpn}=KRT@pw&CBTLMxfWm)bfa5Y+Sv zOJx8;{mWw-%fl%oBo_o*Oiqg4Icyy?+k0GA6GSJlBY3Zn(b=XK@zIWxM> z4>rYkmjVDx814Y0me}?N^AzA}CO*gUk34wyF&T~h~KHhgyLfXhZ6FBN&LSdol8F13+|ia?-F9;=~fF-)k-ZM5M< zSDiN2tY}-9$CWm+@X~8O?2`%=2elc~b%V;`q1J%S*Z~=84O}SZ2>Sxkua7F?RUAlH zo=@9;!Q_w;=Zsg$o1@(2DCtCZop7&N)yX)W40WE?mFM$T=Zud3fT5&yp7c5g5?!>r zPT*NoolyMQQ0E0*dA?+IE(}>@5+NsnW1jUo8==mNLwSmKbS)1x8IRoRNFJv*hvIv( z5FjG9Qu`JP%M~jpBVd!~@tHf>TrAVloB5)A|-$g@Ko73MgA9yf)>KmxuBkQbhtYbwX>isuSFv40WE? zmFM$T=ZqYgq3g8HlU`>^*TZ=l^sX%^vE8#ngBx&N+HIr3>kJvn(V!dQ0@wXiLUPZV zQJ%d^hP~WwlwTYcz1=N(I~VYsBck)ed6KhI;uIP^#{L{{iEj#HcqxqGimt2a*pcL1 z^pL0;mpMhyc&BM_NsVi&k-Dygx{hk!gqDtQg~*-8rbt8LTHF<{Gm*l_$Z=tH+NPq_ zDf;AF)p=ZX!H-Ys%JW&PbE;u9Hka0UoIBu5XzUcHR#}~9G*_JjHtU#-=aj~1_h!=A zA6=8io+(?KH+*)pwRsMqEeuSE%oeOjPh3g#-$tIstOz5&sL=!G3*@>y8%FSguA)C( zm(!6u>m!%A>Rjxq5B;}cTx!L?>0*4B*7;<`31qe7lZ56jx6ubsvjBag$OCe1JlEoo z=G(|*eQ`*_?Z-2Fpi9J@Qs@#f$DZ0G;)7w!P!mea-gHHIvo;FW4AI?`vu2cYmndgy zQ^#uah&||9ZBFWJkZImwJL0d_E1B)+_@+F$sa_&yHklNivBIUyvKZFoCpP7I7N0QI z1rZcW#ZAUWX2X}An&Qb#c`l;H^wBH{Oiu%&G`o3SLFy@8IV+M)n>m5Mpc>`z5zlQ( zT))nA)fO*W!?w^`;9Q^7LbA9?^X z7>;q56L=>%pg9vHYFqWpQqL*X${`+eAEar5B!7S-2#h&YkvBYy;x{D=p^g!1q=SGa!mpVR4SmK3sK3N*^hQea$0*<)|$C=6Zy3!?RHQ{@4oJ}rnBy3d)avmKu#1aoqoPiCE9<~lw}25Um z3`pnp)|s|3R8chg&8BQE`X??bgP3X?K9d3xRGV=Q!u49B@*LNANnhfkBuRO0b0R4( zbbTBz2f(RhiFA#ra4U3W0Hh@)xB;XG4Wd}HAY{_g`+niS}tX7nQ zD*@ukQCdM+n9`N!N4b(4*{eK8`d(KWCb=`E!86KCkLEd_>eN@q@W(#E_rk=CuBj4! zT(!@tws;$N$2n2jJDm>nl5q1 zKgkd2Vq5zvt@3B+$a!@DnR?DUQbF-Ot6nTwFJ8jk*|ZlIx@w7+y%(x&i8DU4v9*re zp6kfU1|7N7Rw-W?Xe)qrIRj}`AgvilFXe6(kbIl|O4rx%x)fcz-jW$n#7b>@&@1!&CTXnLn*H&aM>-B8D z*K^kEan&+!MM_;8;HTSaQ&CaaJR@+%M?gzI24O*iuxOy{;%*@wgtJ{0#OE?_7W3eo zGjMJbI7=IVbD^y^6*YnfoQrMXEDM|!1Lx)3EvMjI>Z%}K&A?g7gR^Sj+$wO^HUQ^J zTWu;TaMl3laGOnFl3(!=W#EhN<8BhyVV^ZeN!yhzhODhAOVnqx=E#;DiB1v7QW)3@ ziOR?SET%@whjdmJLisQ2Ffq?CmPRxvo`EQgeZH6{tf5yBKGR^Q8*vK28{V7(ZYe6`+?PsZNB1{JIbs<7FTai6r!B(D1! z!t+XdlK-+069&NoEK7>^AnkLEM>laQ;CRy$I=PIqdA^h+`pP%0B+|%PMWe7|5>nz5 zeQ0-!Ntz!f0}*O}2WYvl6z)T`3XNgW4?!$MtCR@z;Haa$gT$RMp`uND2YU#0Q|26E zGzq2&srn(s^&2MQ&Ac`oO`+lATLLN2((IOTWYVU__mrBXN3wKyPsfM8saXP)3ax{k zsPBLdfNH@53rej@4VLqQ5U5c!FP9#Hc{j!IQwod4;<4`QZc4I*8i78Lx_X*!A<_Q8ey>5fjV(}1vxQ{tdXb2tT2b2F@W<)a1-|nV;n4Q(MmYPB+u=r@g|*BR4nNV zwHG_8v-x@nu2m3ILiR@9!y4pLAn#H}-o=8*o89XG2R>nKRHR@l%*~v6$DwKpv>LIF zk7yi}BRyo|0X-bZX9s5(nQaCRQk@&26WJ7Pc2et0NewR8ry~Fe=!mMjp*cTGXrR=~ z#wo>$gu7NnhKeYwaFJnW=ZEFG1f|MsUfFK!T$RkdsMo?9_jQZON^Kj9+^Dy%*-yO;kxTo=93bqeW z?u)DSiFj~S;9-&Lpp$Fk=8)^qxO3W>EltkI*0de$qDv^4LusH%CWIJZ-@VKY9E^(x z6pd)vTN9xx`a&@uW>5!2tPPg7eBvZ)f_Rom3mJkw2#}e$Df!tRdN@0#mz+lM%}NSt z{1p^;HPcqVx0wgO4(LYf9Jm$@0vjjm)h*gS=>`tJ#RH)eoagH>6OF$y%oQb2<0EDn zi5>-D^$BEc_?YBynw=%M3h%}8mF3hXo!ZZZN?whLjEsBKD5J93M+HWiaSP%ene{pq zj)+Z?fN5Yfxpdk}Ys>pva9-u>OR%N(ZBPJ3b4W;|xQs_y6Qdm=9(CTP4+@-PQ}VNY zP*%spRxA@LqcW(|6O%S1;;fCf$`RKzm&^AT7>%Jowq*OYNRc9V) zZHxW_rC^oQk3mJX^Mhqc2tds*f+j8aw4qcLcObIWStkxy084x5EdE3>{%ubxP68*Q zg4a)1S@BF7RnNE}?U-|U+1>H{6;kb~(Tywp!L{nLGCLHdYmoGoL#I&wv%C>JSKbKA zubG;C<`VV07~g9U11u_4jAu1H4Mi9pJtGeCvi9%dzq9 z-r1I({IUc`%BC@II7)DSgV{86PFn%yq*I-T-x*c11omTx4{O~z*j`|; zt3dr)1^N=URBBvIGDC|Rvy87sD)^?ul6y26FiprY0`h0&_uUmOX_$Oh|Hi`dSRsY= z*~ySb-N(bTnw!(z2XaaJt%FsAzddvRR(qpGx5>ha0C#WI6o{hM2MOS;mh*rsPfJCB zU(Yw#OpH#nCQBm=9XYh|xHkxL)CK_AQ3UHGM)5?gxuN8UbFlW#S4ZE@K?K&H(mFl0 zGYX7{Y}J`?SH}G8qlMo6tvbhEF%o1GV)Z-JG_@|`{?ZVW>5p~vB(T+$+9c;VU)Nzh zI#MdEuQ^i4?i!41q8ECy0Zw-EGobWIe3?@#lTYGH$&ZG3&(*5rE2!5WM-w!l7%$tRry!2qs z6pqP(fc6F#Z2SSWD#J=^7@cmlaHXG&8O8V=jO^c`%Uvw&h0hlrKhQ#p8sMD6I^jc& zKOI5h*5kuPonT}s7)-#%M4rc|&*ZRjD)}*6(j=){iS_X|E40y8^#GoTO2NNSQs040 zpCK*kvGM|-1SwBBh&aP4K%pwHx4G~vEL(&0bP(yaqmz|(~{a@AM^19^;aKykl=Vx z;oYUzJXl&*C#6Z*Be*7<;EX;anax6Y_J$T)_QduZZeLH#m2QzFEB5Ga5{f)nnzyn$ z3m+^pb$1s&8GinR|NPDH^OXN2h#dmbQkN|nk_S-nPCnUP_{C7}H@W|%aQ_MJe>vQr z=bq$aR{l4*{}t}-z*ADsFi=$v4^GJCLfJN8FgJ+a$SiFfZbRxZKTD&*wE{;3i;9-Y zm<;L`TyNFKFGnk4!X~Lq4)JoiED}@O%A02UD-g-W&!KHiDm?JYELIdnbk?Yi_KjNd;|0sD0L83yc+!p|^Z7xj~+_+fkOBfc2! zAtKTT7=0blBM4*7hG?P$tz6M9E09TEiKfokS59Z=?HP`WTt$}C_HJk4(;VRaTjBnX zxc}{N|0(W&C){7;{zAC_L+*dK2SzudgB3A1WoH*Ne7K<0mC+zh&Mszvh%~q3R>AmT z4iZgy5>UD%yk3ka=Tt(4Juqenrhg}AB8unY$iPx{f~8%r^61f24PB9oPL60lI2~2b z4^DS@!jT>P03OhZWL^)Rtk@euDMYHwdnVQtt*&}Mj3l|Yc&xY8aNhIG`r8QgD=!o9 zO%dM?qnBxwgxk1;CAyS%H_QWfAu%@41N|V(yNxjz@ z35>@HPYiH#fOMfZgBjI?z0lL!_#ms=13K^0X9deVe7n^P%pbFa9s!n;kpmQ}1!qM2 zB3u*b83SdL!Z0_Nbl-Ruhsp{13G?M)p_adoUnuHj<4P)c- z^)x}-mrFUeg6wp*!i)7)pro-uY3b~~3?GO_`D3b5Nb-(nNf0m_F|8YKmJK-167M9m zK5w}3YMGn`j;rOMWLOvaVXw6RkCyTWxI%zZ|2+{@4JtLQN5`--M>zW@ReIQE6VK5Y zQ{JL|R7%+)HYme$WrfHc2$Zs=kbI5G{A8f2&Ne8(wmdJCj9)GMMm#QxgH91xv~Ri7 z01QdOGZNNtn1!E9Y?ehG1zQ$$+VO4}n60kkiFJNyn*E`uLOm36vz}urV&6oguj*A9 z3mqA|giBe3bx?5q?dc)B71mEA(nC1~E1ZV%;Q?@1(TTNE81nrzhjC1UIytDU%zcy^ z&8JW^Y_PDB?S@jES?q2i6+f|8xU$B-MNFP}+MupASvY|T9p8W;^jkuzgg>xd zsB^+}Z#Wj1rn7mKurKAau-9(VfrIoTS=!3C~F9kt;_SBhSnk zIsFzi@r+m+8&@5>9J09h{Kf`biPsq8tr$yDYDc-C|JbzeG1gRBXPTEfr+K+;n!8P# zlw3^nFw^D(Jv8>WdYI>C$n0d5*|F*CA9n7n(#{=c5H=_S&eINhnwnB4lj?>(U`MnD zi79&`kn7}i7}W2SixpAwa=}2?G(^21PmK>QB&RuCw22rvMaB^9c=LoIFl%kQhT&~| zYD5R8qq7ed;?p0UR+JJ$R?zX-`j5G)O*t}J?jPzYmP$o2%*Ws`!#eSs>n}2+jhbpQ z*9xM)9&wP7Qk?nCypWNBHb&J3oq-(|6eU434GzRkUT+JpS$anFgtuECDD9v;yI=Qn z;rT8uSdG-lo_GghDCvgz_|n89UdKe~p(eR8bMP1Q@E0Fr20Q%41b(=>%((%T>X25o zk8;7bxrgRtf`M~MQ6Q5zUea!=<*HfB#@FXRcR671^(7_LK_);DNCll-5 z;v3t}s9!EHv>a0Z>m}#4R~cOAKE>-8Uq_4W(}eMbGaK%igf=PwtQAv&`?jH(1iKoV zY~+%mVQIVW(D1x#XkZiCRLBNJl|JMs1xYu6QkcQ#gwh5wiGhA)Jb(NL8jbJTUjJmt zC(s@a@pgFz0+>I}MbYOEWw`BeVVa5IFn4f4F>x%VsRtROsbc(ct0s=i-FlNvfNTtU znsTVXLxQrciiyE@nLLrTRV3qbQj-yRB3^3DGrx|NJ}+CXb!e7Z%HneYizH&Itcg{S zw-vtxRoUetE<_r8UT>z%kzz53Sn#U4C7;Xj@0!Fmcog*Oa+>$9`|!k6pWnVA-E{+r*QUE}P^L{P zT153aO0G-AN*Jtg1f%pAW(x!?5Rszp*VAcB*DvgXPW#>-r}$MwMju1NuBq)|OWh;KzH7KGZ8ee!^4*&uQ~XLe zKEc#nb7z8}@SCY&^(MtW3$>_UN7zwnG#~Knr8p-3=~lnixVA<>c8TtN;~gUgE_G0> zvO}ZV%SEd%J_#{C2~3$x-R-+;)?Thcn89*)m{JaF z@UdV@v6CO#n((%EILKSQr1eM>urIzRX%!i#euRd1<>QX9LVCmG`lI1{)NoY|i+@=A zjM4sPZ`SaNIupPDQ2e7hwXb+!oGCWYc(0-4Ev=&U$&6DBL%~*t@CmAZ%Cv_*_A{hD z?D0v8-eZ&0Mj!t7S>q7{t)h0OnVbxEP@%LqP_;}{(?8x7MAWYREmvQ$(_s4*Sr~1L zkCBnb$jH_~9jhf|3ci&Uu_ySW$cR$gI@eV2Bgn|f1c;=yX(WB0I>9L)?#R<0IA!CF zEZpD}HXG#z(~%i4jQLJUxGT1q6#o~w%J&J8)OXKDh@`#=Lu4i1B|Ls5>qLe_Bwn;b zT#Hh$o#zOLMT_Gz&dRFCrmO^!R5{@ZAj9I**@&<#{l5N?gjHIrS3{8&o!{5^NMaiC zQNrQ@Nupio!x58npy<~4kN z&zWoX*SHDgo|Sjyk_lxl}c??4Yd2UCHR%=@ONbV%Bk|EC3Pfh{a~@Aei7 zDBKvV2n*S-Ia`by`*kKrF<#;OBJzd8nmuLpN04pB!uBJZCAkVNxpo#XXi0)#B#?sn zdx2z4tcf{QMYcUiUFuN&wHEli@|XG zqsNFJxRv;UL?MXEGVVV@VMzTU%E9;V=NEiRGIBx=6^tx&(W*4aDu-m7XbtGJ+gufD zPXad&Q>*@9xxq8im_=MDF(K9C=&%I3S3`-{lsFMI{-M8`Zg9lUz2Hil>4{ULo&%70KWy3#b3iZ1#TVc|Ktukne#tOn>v*;IHS+sWjxenPIc z+KokWbavws(ZCl_!ceuxQ60H;cPQ;L{Pp!eiZIOLoy3cl@w5XN5_upr?)`@hz{(noQzC%S# zwXj&#w8b8Dma%VE={vuBq#9xl&_M%yhlA_;VNVq*iVq1g0*Uu)uwg{|>#hC+D60dn ze$|opH|21J?y>KlLZMZAEGj>CaIi>~@+jc+pvSoszCB z3POSyBHZ1W;%Wo)l~jyKE!mJoQmZ+ELK`=ULGWpIHjovt59$mmv}U)pV85AT@N%^S zeypF=Ev?0VAOs;${?fy^K$9#I_i3k9u*ovY!``@1-(9Fh03VE7PQWZkdLAzHL4{-i z)OWBS|Imb7j+uV=eJmS}Zm<7fsZ{Y{K=QeQYg|SK$-rY;eWvk7WZN9gm<^m(lV3Aj za}S;bf{pD|;`d%=k}A<-%4?EI>g(ual9DgRCTUhIOhE=AKS`b1+a~FHB`gX-h`<%g z*pgV9)&YFE1K9W_a6#F&_IYaRzT_V`W4byx(&;USQ>Uvc&FX!ok^e~g+RXknvN#4_?+TrPPwJK@vp^o-< zt=s+tCGm8rnsS1n7*Cg~JY7baIiY{ZO)PZsbTR9?d%6IBr>9F(Gj3b6<)&oNw^itC zjXrkJISePWz$zO*y~U9hEX>=&^){gpOY@dEu{3Xyl)bGU-j76Ee02<9yxH2^#sgmp z!?)OslTEYr5(F4OxI78a7tmp=cAAlN4Mw7It9tk5R?T`>b3nu)-h}ec<&DjLK9K5b zpRq^*T#8=F)qkbGIURRy!LUYWbQ=7Ny zvZJXm4R|qRE@?IfHtNx>#slR(%@S=3`>&g$+e}2knd5u*wqOyr$W}l{(FK?$sjZ;3 zmG!}6QOyT>Q?$A9=Oaz}Ak4#by|08l1;p8^0q^r7p(hwZh7*(XMz|T)FjX|khN5i- z(Ps+UN2{`ulO7q=^dYT#z{(t0k><8hbvAD7<<=DEq~m<7io>`|MI*#|$$uNQzR=nn z>YFOIhOIUArhDWJI;sOBM~k>;@NEwONE9|}(mAW~v3wgA<`0ZeeFL^!6`)p)X>qEjMsjj{(4bn5&ZIiq zf%KN)+bV1Jj+&I9^s(W_Uh>^3Kjw*nI^5r?Lr+)fR_IGL{u6|{;ftY~><>6IW4{(Y zU`{OqPB76+?UC8Vh9Wi7TWo1hi{WLUFw{vuk&$DcCu!VdD?KZO4n-WAq)Q*8hMOTw z$qF*#xXPbUP}`wP@k`@R!_UXzNBF(bElyn$7rYjR0~$3tbZWi`55QCP`UgmEhk(f} zmAhkACpsz4Ex0=}aHF_5?k@Zo4)MG-2S=_mWwy5(e!}*8h;5SP-K5fk-d3} z#CIc|M5ld{JO?D(-)u+^NnEX(FIIPDVJ$G32 zL;Nd+=5QFjyNOJUwnetUia%4>dk|JUR4NopT3Qclh5gH1VU1$3$Dr!y4g{$57o92( zhlBQqBSu>%(g${X+>g^SiS~IgUktm$ggobcg^DfA<5T)rLM1P?;!?ZoFH_ypsC z>l9|XGE(GHLn^OSB7PKg4nA3zY%*(SW+cG;PDL0%Br1{SxRaCcwej6HKeoE zoIF}YpHJn{S*?OTPneoH!VZ|mHr;2=L#)AUtvN$4QMDOYHa@}xInmeRq;zmV9+wwH z_iQa|3pkws z27;NIm3(6NSTnl9l!EOem=fGr@Ipa%UDgYP(vC>$N9k2IP1^k8Uud+WA@*fHioVPO zW?aCA!44&^=&4`o3#MNtcNio$Ti7P5$wz6&3+%zk^kds)SXO?aus@a|=;sTs9sfYI z^#g?TDxQ~N#d3d!nGr7<&0>j(<-yGookQN-VsRE`@j{Zvfmir=FEYK)YvyA$Kc{{! zrTq-1^K)rOFT7AY8ZW1v`r`sS6>eAII7e9h#TRU+pvCBxP6N#gLuX1z@D`Z`oAs!r zP@iT9^VM8$SlJf>_=%?_JR@F zvJ)1}=U|}5Mo>i3lHrKV`UR{(Y%`Dh-FBQr*t zGjSoHw$!T%p`8IvsR73Y?+NWR#yytx>Nq=CF4bqRZ>tCTKfzXGT^W)z zo(IMmWo1@5^!UL#$pCNBxNkP2s~Xmvhp=dxJNb}`79awpW`YRT*UWDm(U;^O*LtoH zE$W@?m&fIWFD#9^Ysd^fLy2ohizEj)DQpT(3WwyhP)j0Qnl-b5P^7~ZWTL|rBU;xL zW2ZwXx?oE@Ysll%56ilo^5EpzNKAI_Cf`xs?!GWfShGp zqk{EGmdg1gJ6TCIVs<6_=yTYlB#@@n&mL;X$x^VDCYEV7DnBNl4SC*>iG{0n2lb)t zbNRN+JfX$Or}Sn*9t!}5+bi1{IfsfSd-!Y%u|km-M7E=f6iP#l0ecaH8-frlPn7Vx zPB%WE3XfAq6g=K~898qab6d~8SUF(KX@~@#w2+OsT-axkog@e<5!Xnb2FTuczo6&i zvi`CgYVgIxh`9L&&jvBk&kaPQo%y7d_)`T^+yI!Wn`1l1QQ1W&!d(}n-Y`aYe zE=d@Oa~Z!G zMS90L$E(S~*6A>lOye00vwNzn(1Ut&D{ZVM?b(*JO<3QOzWj`u5o5z)RwK1$Eih?q zqq#obBAz99Ib3&0;zJF{Za)ZpvxHZmTR5@8S?>izpVUt$x8}buE6yTY4 z7upHH%2vVAbHUxrfXeiLfRg^70F}Lfqoyz3!p3RhO7c{AjDwIwh;2KpkoTje2m;?7 z9>9ey-5m~dFSK zoo<<{mJgrRZ!(Nr;;Kc$s($xtt=k&HuR*2KOet6N4gykC_eH(ALQeXCJ+^86Rs8F$ zE0~|o=#NzsR&!I)C>I7HQF?$=wQm~(TKw)q@w>Vyo6Tb@c=OO|l@_=6m0zmdIX6MKZF8ySb7|l?noFWCjfZjdC5P1CX^k@~UM;xmF#1WFKiM_&NC7=U_&scoEsn^FKRa#%bio)33GW^MNE#;w}>IO*BQq89Ow#KlKGU z0gj}AK9?x69dlOkQcmz~;t-a^x9PbDMn(iA3F3mRXZcmevp%guB#ZeX)yJE@^$t#2 zwVZE(JVH2B2oaX~DfKDRqVSvyS%487X|OB->+^vl+ILS932YoKGGyRJ5*7r{)p!95 zrb=_J4@*8aa&t>Qvm?Tp$amATM@-6Bc|5ttoZV;YTIhYpq4>Rdb=dXt6ZO<7=DdP+ zlQk0is;M>VS?{MA@J*G+WKLwKTNY;On57{Dpx4^Z_2`qp>&)YM2G`e{vvqe)0X{`c z$;kw7F<4B2@exbLf}Hn0R#*|sRVJ7`OKhc=W-+Ju^Vg?-t%TJlt&MGMi2F<1+K{!P z)`r><&}uMQBX+F{MVoxfy0BL&1CGntu9Af@*=jKvX{(7GP1`7Hyk)AhOQOdmz5mc>I6|UtSiE98g$fYy83TihhlgpIvG7 zLl`EHw4Z!jOFg25>ln$VwcMNL@Su}&K-|hEr;nNOLvhlhDh%g6qJlUe+B&gr>%@T& zc)4OU$(m6Ph~aaKR(w9ih7CHaciI1?DcRt6x)eU|15C)Ce0E4Hcz{@PPtMD}4+xdV z4w-9$$IgY%FSfK)Z}zr(;VcVedEp3VSKuzzQ!gCwoEf=M;8No^3S9OA<5K!Ufs3ii z_e|~RZd`i)1{1FI(o|KJXi8>NHiRJr9qCtS{P5<8 zoF{jeYjTuP{wZqtNKdKiJ_1pao{DTRAr84v-h=R<3J&PNB&gjtY2^N&9XlGb6_Mex z+*43K7xFNoA`&Zv(pw^Rk^eyb%6=g9N&ZSMPJ0JP+*sbqT4V9Hkk{j}L5nuBa%lTh zs7AFhuZUN|taP?1#G)_x)Ac1~d91u~IXe|nP$?iPiUlWIDm&nI%2_?NONnVIwF+ws zkkF9Wl(VfT?Z78m>ibn1)khwcUCENnvbYq0zF*aY5nYAga>`iMYk9CNrCyq+q}PI@ z4IiX&3W@ZTrKDx{IDJ-E!XPTOyj}8l!t9oNz^KEdHNtE3mtJTygd}60#>$#cbI3B= zEPmS(iga{ENNnuH0xJm90o`R)_@4<%#>Mu))u*b5V z1R^9slMc6Hv;zT#?5yvrl4>uZD8oL;!Iz<`Lt*fhw_G&JQDb|*@HAX_S`1*pzsdiN z&>bY+NVfgwh5bYIVx>|kRm!zWr3!6U;+9T{t;Bcvaj+Liz)Bh_CP{qaH>MkOQb;Sr z$N3?bf>MFtJ$rpL|N3y(>*@UKsjk;^`PZ{uuTSS+pXz$On18*{{yK*n@JcQ~K}^P3 zWvgNgqv{jm*z`&1!!%A%tfwHB^r~(6rK%n|;^8wBFi6fGjBFg@)31YSEAj8+ldYc> zI~q3rx$I{#8z!p;Kg5CtOlif;wW`37d6prlRFx1f?2tZ9BBl7j{hC+6ZNENQ*l$~2 z_5i0XQD~9uf|eAiWRSm>4g^5szb$Yml^P7l;MhoW@xf1rxzp<5*izM=6#uN!ZUmHA zUCP3db(+_MbjQ!gOFQ8SfkYd4mM}jjuaHip@y$3M8ij+S-; zD<3YRc+4Im8?Q{^FE|o^noHw9Zm)lLNc75#M65$!N|>(Gd-38^(j!oZ;0ig0YJSxe zQrCEAhB8aeUMJz7QWugR#-EGpv-5#q5qIYqGi~Fn{gPrjuNdzJ*C{LyjAg7WTaU{c zY8qxR8zdK#fJK%t6LoST$Wo$cmvdG;$V?UYF{jzGr=mu+DeWkP@#qQH|k2@-X+L9R49wD>TAC{-*E!M!!o{{Sub=51X8=KJ~H^FY%lyMhcjIUCAX=<@kg!7Xo!;Gh^essIs$gu_$%A+*`EC z#ZimJ*5+3izXFNB`Te{ZbYtdYgE~p9U#iun_@(>f&k{S)>f6gxwww0pphRBx?Qe0W ztIl86p+oU5Wjy4rh#C6KG*@d^lv{xdmYHBRbvq#&#G>N{9Br%15AfXd>Rx&WYzd(jdyHJ zDajjA%(78Tqo5UEZ)LDr>+7!%4iDfi!aBnZNg5)Z+le7*^Mhp#V6^$c(uuF;eVWg! zB16(ez4CFyB(EXhP$SmuA0)S7d?&UfKO{%XGQ%*3{BgXr9r9O^Ey=!TGlx%WW=rxh zw8#YWefzkqiP8ba$+U^GCQ2zbQ8%t1R6(^NsO9*tz5`HiZwFPNal8`D&CVt41{n>c z1LU%TDY`)xR&P4U-**&{?|8OA*0j3mApfL5R)*pK$KKn(*;Q3{|M%Q`=OuIRBq!vB zm+*4u0x==4DgyE%IjIRCD5z-FO0_MmnxHjE(N=^D1Q;;DsHqMb?Fi9~5_FWPqedMn zF9SpvG3uzOQ;T-Al#Vs(v_*NozqQV}_neuV0e!Ij|39C{$lQDGIs13kUVH7e)?Rz< zy;B15^l@1o_-QA3L?kc%Ly?TBY{SJe1AxxXOG9)OqkHez9Ciii?)orBbR#3`HVzK5 zGg*U+FEr(g+UFqPe4$j5NiK91qP?H3Q5fqQwQ}{-kJ=UYTDbb=R|E{7)E6GM{@tvv z59`j9zK$PP@8hGok25!Vo;1V6dD7F-1Ue}Cwb(VM&Ms_+hU%l-susiAthnL%S@@6(@*jPBKZ%dY2o|-l$1TD zifAwOdWU*SQr3L~#k8Y(z1Y(!WsDyh_4gsdAF!jnQ+3e>8}+(8B^GS|gR+3Q%F{2k zJ4Esy%-9lhdHPBIg-GtIH;y@sPoIBK(0CKqcj!DhqKa0S<4M$A`jDZG`K>;%XPV`v zg}$x3J!!U*Cr!a;{bM$JZt4}=_#sjJ@}AAoU7z)dJ8@z)aB6C)+$DB10D8RqfQ^1+YFEXW$;d zypy6+amI8WP2*I{an38xmFMfR343f_c!iy>vM;C>)^&2GM8(2u^Gp&M#E2a|#G~6a zhZH`=#-@tI>Cvd0N8ol|K_taJIb+m*>CJDjb(3{8#;}Ula}~aSprWIFR0WYEyJ0a&|y4Z?ZMo91~BcV`NK!UM}hh zgd)Ymb)7{G?AjG_Sh2oZijKHQn=XPmbRL_D^11sw_B6NP;`n)tnqw{yv>y|JbQOuO zrVrGWbj=a+WQj7!heTy2Y*f^fTjR0F=GPR6cc2O^ATlfZp>5^e8a=8?60QkrbScjB zRwXI7BKO#N9MT-WWHNrKxq?tPj<@L@p5={vmn5sqqx3O?mKGP`>{xrXoIo2Pt4rhj z@z*-54il0Ph94lJpehTL;kJDjs13*)poCI{pf1RV`HELCY*Q?Rs*=u3-4%^Xz-Zhj zo@m=tnR6j&90W!+v8c?&+3f$`Cg++(UU?=s8SpAH&4OGsTY;BysYz75h}e+LrTU?d zXfNA0y4t~kO!cE6?N=Q`LmG;0|6DDHKhIa|MOvdDFs(%MX!uSf!H&U*yJEZ_wdBc1 z>u4?Us=g<6ht54OA|O7g$%WSZ6It^)Dvp}q7tuq!Z`LA?T~%*UT$_o798oSr#}2UL zW~Jc!QX3z_Z_S4qk}xa|B+lrUoXshR>eCv-*MIg-Y@sJJ20C#k4wpGQ}L^ zF+pY!Zg_%(n?tiUZ@(4+Auye%#!f6kb9-Q0f5lqM=xjovow)NPcg<*ZvEnYJY~hYu9em^Tub6Dr!#-72)ttJ5;3c zoc1>7rW_{nSG)%Z>@UPoGUBT(091vWKNGANl(pS+dbT+ zTp5oW(O}w{-U+jPcFS?9Mx}%JH@#@ixJtJ80LO(`h&YNzI)ywA3? za@c^J-Cm@zQJv638O_;g(!WAfxrI$zJmBxssL?DYaZ&vx3xmeLF)JOEd{wKomLHkZ z5iLH;rg!H+JgP!Ymp;vq%$0qZbz;u6Xg~d6X(7j6AXKRDysibz=FAq|+D9&l<^s2| zi%Y8kYEg%)CEYLl;6fC%(Z%k2MlOMFHw8NMZq-Iq4Vt7-Dp&ONH3^EC$VXiQ>;r%;fH9INZX$+4*!9#s)POx1?N^~ zQ7^WOz$65!Vs*i}TB!YEZ3JxwVHC@f#JthjYC_`9czHrCu`sLswoAN4T z7}+Mq`s@v^Fx0u>uqL2Voy4Z7nwZIdRH$nn|IKF(SeFvVe4bddz)@|}0`wBd5ueo8 zUOdn&`2C9|+g@88yrB?7M9*ZDt!6GMA*5ukZ0PFbg&cv97R{t#XUm$?OtEOP*G5A$ zV}VYDv$P&*jfFGplK`0HR%S~w{Rh00mXg)i7L2B4v*NM|eJ+~_l;jl8WB5F)Y24Ty zQDutQ!8oXV`b6*d7P}8i>;@3KN=EDk60xH@MT0JO^Uu{9zo#U2A4crjZ<``>Os-TG zqGT}G4K_&{O!}mWG!V;5AHO|$FK32P?NicU9U>zsm9)-wg_)WLsmxx7Bhq=A?Nl@e z?}Vcs)_x=Rjj^Xa^30E>j#DPB}<-$);h2JKp3!}pQEwzROC?P*#$ zABkV+iG*)wYP!7)*2$wksmL*KMBt%W(Xy@9!|vps%%jK@oGFn(}WrF>c^vMtSJt_JxP6VzUg zyj(UpW2o0Qv0X>=EZBSw8i|$0r`@*p)T^0BBLX*n?JsgPIUUk1BgQd!xd!vhNnQ=d zPwn1}#$LdM@hs3Zmi$z9!@X^D*MfAp!YhhvbbStw=g-{)bm1LX+e=?&jS0k+fSxMQ zpnEfAc_5HU<4xW1CSd&X4z~Z8w>ICc-NDvAPfzE&H&+*`^n$g8D>tt#yri=&VPCFt zV1-+kROg*@+a*<+k!KR|xN7q`)%HbQ2d>r1SH3JnC$p*7qOOC=Z7m|=$;ioB3?k~m z@kX5@M5AA4}5mKJKkCDgDo|J)0!?Tn!qE1?tly)UQ&yZB_Tql zzngo!L{WJ8@o~b*=0h*C7BR5!rwn`{H>t+_oS$&!gb&QRV4_D9# z{paZ|G<`fcYV~R2Tj(z;tiYyeCpm1%SRQXgXUe%1o_VcP8u^-iUUbP%MCM)I2Sg}reBQ}Lg20a@Xg?3_HIXdbL?O+sa z?3x30rOF&tjl58~1|>|0mt}iG>KLuY(r5t2c}X^m6_I}26%}VuKKA-ds(#CNb7$IN zgKT5A6xPgJd-s-`KK;n>yKcEVcL}XKFgA4gl~-@+UHy*gyv=KCmtV2DyQgoccG;D* zlz6A{?Y!evZLheXtH%{w-5vP(5f%-2nlmMp4T7n`~`G}$FMsBt*a%M)0sc>7g_=Tu@kmX-XpC)oIM=>4xeog^6I0fA+e-L?1y=29W<0*`WMbq3MN1Wwwb1I%ZS#(sf*LZI1K0xoM* z(GhN62v-pqGRYeBTwhpXyVg?=NAnipLpWbE0wyI zFj$01=-~`4^@8G>wYf4UJ;QWPJB64W|JD-HZoh5q>Z>=SG-4}Nt6g>#=E+2)2iAFoV&u*x{0Xj{#(~Xnz+NGXgFb`?d=8EzqHDkqKUOfq zF$?9g3(ap9aYD~87a_CqZ{QRC#sR`>4)tDl@N?Za`XT+!oFa?CDL{c#>9BT6VNyIk z?{P2eW+jgOdcqtbwj{u2Y9$?HgA^@T@P)T1HpSyCzrRIMw#?6&B?v_wS?9Oo0E?}f z<%PZ+!H$UMtTr-zkISpoSdL&DaA`Th!p3rhS?t%!55#LSzmYA+3`4R+frr?1Q+3pD zlBy)A7f-AdM7NHc!@E!oATAgdt5N^w$Pp(9?pmb zy%SW()l-}<6?7lSD?n#*&S7cx$h;k?`~7y`XRIayrO&FZ&0q}ac@DBfN!WTNMYRc4 zrdkV;V=xrM>M72jpt?&7gNAB7VmQ7{cO6oKZ1Tvk9#MI+29>aQWg%$4j3h$V=;$C_ zYEAC16b@CD8{=uCN%kO93_1c*S&*EIt&B+KMPaHkvW_nt1l@5OmU0Wp<|XJvW+8GL zD5rxUOaT?7c&IY)^f{j#lJM8bnRyarVEI?n0s}NZPN0%+3rM@bXa{n+$yKB%nRc)! z0n~z=V55S%^_88LY;QYk1*+GUIowh^ZeiCp{QyN#hpbfnHrbV=v`7+phI5=5m#M{B zqt_vNiwe$~(l$7wi*Oo`(uR5z@y~177Fo;~Iwd*t4RftrS^B@2GfPdb^jhw~4~t)D(t(akph*%I zuQ1{pUc6%dq<$-jyNK1XEOVK>@a{u2F>+|NWgR0$YxrXJP0kA2?~JL&Be1um=58>v zyId1dEy}S6;#jQ=4Afeh=puj1h-1Nt5_xBDii)SPevJGG(W9BNCHd7l>PFi}Ko5Fd zavm@%gX%@MTj;%}Kvvu_G)6}(wrc#!?2F{F$8&C&a*H1L5L1Rq1#odP)vnmhc0G>D z>C#d0F^oJElgpwSg45eAiC#&BQ^~Zj_RKW? z+i!_!SF$vUrhaG8mSSNPtEOODA?w?b;`GVQC^7@wRA7X9)ZN!f|HS99h4?iKuiix&dUtO<|bUlT-byBnn=%i9Rr!}!=7wAz-vR4-I515-RF?yO658E^< zV79=b%g&8<^KaBB+04Fv4c&lEB;Qh&7cv@vPfolx7 z^rW0mmc{H8Q#ue}T_ah`+>uB~o-p{Fy*rlnIlLiEe1= zZJCzR13aGJ%Hw@P{e4=CVp608At2y~NsE~Vo~GTNtouC`7Mk3%eC|%M1NIDwaM-g) z*RW?;*CQ$RFek$=OufWCj@21?lU7^JW^kkSxLG9*cqoSV#C#kzKFVs*F4l?0^&~zk z1NxTmev|%3r{hgD@W#`46A2z#!GP|r16jL`*?kQvY?Md#hxv*rir!pzkI^$HQ)9Hw z2|Ui#^SN@Zqu$O~%{FmzbI;!Ft!&sHbKopr#UJ-q920Z3{8Z>J={(q}!|2hH3{yo- zMbb8nn2Lyz3uOMRPxzT@kAHl1p7mgOVl0tsKT6{h;#k_#yvWKk_bIF=FdhyzG0wR? z5P#d{Unk)uGvE|}f!)x#BlIyO4sfupomQp(XlwCKyfxnYPG)wSd8 z18me)C~89m7FqbZxOQI`_r8{3miRCL;pUOX)c6kB@=k5!5U(0!cgHWa0Sg0IHfjfKE6bWd0|RHzcXHtSYKe(t$kVFum|Ex`8wmupL^>_1s|BLz zlk8ThWl_GK6lsrod8r7Ao<(!Fsf(%JI$(}>p{Nw-+W*U}VR#t|3?H(Z_W`)+N)36y z4R2>#+&F7MDiV8-`KH=@l&c-#u5y)Z6FVASQ5jaHQn|dX^*28wr^ptL2u&YN&K7N0 zMnrvLw(V%jcpKkD9tv)QJ!Rg?Y#VR0n~LgRxTv0^QeK1}hEb3~)F*sD%_Ba>J7VyC zwek#TIql(ktood$1|(f#XFNo!!=qv# zE77LH43#dk?vWBm?H-!^gEqQvbmGR@#as}uYJqVA1rV}Ce2Iu@BHKh*3uJw>n`;8r zd~t&xY;k*JKd3S3r|}k!b+M;s^_noV|A7xNEPs!#Xa-AaFZM2u)mI-noWO7K$U%Ny zYji9viR{WcuBn!eFT7S)=9?9w$F||HDsRVECtEb(F;+l)@oU$F(f9e2)N}nJ973pF$Q&W6K+;MKSpS@Ogtr+buF%Au>e!T1m=ko-ZuWa6Ia*Wv+7YFbwW@WCiBichrA{XU z1$AnAC~B)tC+2_-yE@*^g+;!W)6*b3z|F_9CYwO1i4tqlE*U*p9b8UGyH12rXa`4v zd2lrme0=^CYTDHec+9LOQLDPuJl*1J?V6gWjee2Oms~rscDoG+A|Lh@yJl-;Fjc4) z(bd`-yE1!+!a_ylOg|S0WmWa#Cf}u^eIu$`H@le5Tl?JZ9cb`ES8IL^5i-@Z+P++~ zm+LC6b}mdF&zvwNi<(d4BljYknC=9aF2oqL3ZEdv_oIln(ZEwuh^iQsMSv7Ia|ZmT za8D6V6En!QMD)1AY4kOQjsOSi7ik?iUh77D8pYNs9h=yRQnMO*o*@C<8e7E!+9rlY z@fw*DFg&x`x`-LfTA)Sp^4k3EVhIGuS{n(mDNS&Evy38SD&~mwU!>Z|LNPydVooOy zBa1M7ff_f6ZrDmrLJ(cYNBEw@N*n7-vspu*UjuEq@UyLEF{u(x($c(TNadp+)>JTD zmD~#yeQfqHmEl(Oqxktl7y!N`bj;ny_AwaDKo{@@xiPI-{= zGqg?Lc*jn08wdvs_?^_jXWHzFAbmu>(XVrOVn!-5D6!Ioe%GrcL=e;Tw#sr zQ25K_Fh6Kfylt}08+fr<$$WI_S+q5LhMNeMyild*s_}|cw8aY8&PJ^^PhmT5Yd;0- z{f_YVd9ks;x+M$MD}ilf!U0%0^Q(`lH$q(BN&t;tKcrGv6(G~jy-&8VaSw`(n6w8! zy=mI*S?YttoNRs!CYalX89xRv-rP2e=Z_;YOH7WIDA)Uo#}A3D5`A#kUCer zch|X^Z^BhNeiN?dummx$)I%g{i6U3dzP83uZ+Y5Y7xPksLJh;!{inRvawx11S6WH~ zMFRGD1?+1mZgoWM?u1fI_Sm?LZ)G)c;!o1I>U*WodFTW8&7+SkVU9%625Dl6b^INV zs!*vE^|6}1=H1~S+x-POgVAN)#fPRu#CFU?!E;Chlp3~!q#Mi<^E5m;+pn~Fq7+*d zPuboWzc@gU?Kf;p{1cf5MpC8I$fyu$Tule}NMTVlo+S++Kd&eo`g}eZIjqq?S*U2Q z9Dj}5@28S0a&L|!?4tGc`g1|}WzM?y!3(qXJc~n6RHQ!_ei%>9G$7x6>IBbjNq!Ap znm*654#k(#8yE=73<&59F-M~^ID&E(d==xwORKHnu7+qaY~$C23Y2q9LIb=|C(plW zBiFZ4dvwD^b##tB2tPfKZ3zLZY0QGetU(V#h!GHtauOL&)H{#;3_*<=L96tn>vDiv zN_&HRv99`MWF(;4_^y&@9x{ZO(>sn0XH?w8tIJ|Hvq%O8clvwB;1F=0Z zS8}I|Y%_)->gC!|D4|<@NR=ir!lsKZWCIx}6g_x=Y9}17z{6Bj z@+j$&i`A~xm`7HaZ96sNa1kqOax|MspIi=P=O_7myUPwti)Vec#(&QzZX%FG-DF4A zKQQ`A;Fix?E$xs_H-FW+=B_GMbz`YIYf~5gwRxsYKO(Ee4f04Cp0y~f1!RYKBZm~7 z@*<7b%n4-_Y9S$cqj({byhHJVh{PPk)j_CGsbIYWsixk9cX*Rk9cq{!C?c@(8X(m? zR~s=#zRly(9d_A+h&mYcjBgp+`qK@zll?7KREU zIxD%6FoD9AwyS}uJr8EH1)Mwa?RvN?FXH4f8b2$6c8tN#Q=`&+#5|l99u2Pr%XT5N z!M9D+t5Pv%m_a7hh;A=Hi?L$;<Tk-$?ZahTQ||S$|l1kifM#lGR&x|EtX-W zc{K)AV0Ta&R-|Rvoag2AU^MR%q;iRlbwY;q4!p;LWLR%<875SW47;~ckZiWZVyxa# zRsU5rpbN#sS`Zs8NiVJj?KnbhIb9qJrp4NjD|H)b0!v)6$R8K2vBw2_r3Uk1uM`-D z4~w?eus~jyjX8LX3h8HzpelkqN`7;CsG|KexhU9JQJOtql$VQX z55G81dPAYgg4Z+lunY3A6Ur3IjXi8>MJ5A|auy=$ax4zzMMZ7Bhlj0|bD~`ipB%w@ zG86d*QsW4gk2P|VbJ=Q?UZHdZykZFUt)!c%m*rHwa4*YQgVL@-B3)^v*y=JxIH;}^ zp_F8|lt8uC1&ZpJKq(caRMlWOB~aAS1gd=W2EY)CW~?q%b*$ikx;^nv7}ia z*AR&YMYPbKh?xdqG7lzzG(x3}-i*wRK&K%jPeHdLcWPoah^turz|MK0elQg~mv%&u zga-ZgXf&^xianPQnEHT8Wq_2(OiLU!>@+7wcZ=0Lnu{O!-fmn`MZ^MC356_Pq8pVe zilcIQJ`n|iHc~WtL07(E9|bAhl6U(U)a|3zvPGAvBP(P`DSW!vJ{D5;5vhA>_OY&8 z0@+8?Ewq(%i;u`N_R(|;_R)2VSx1}Cwh05`x()S_w4O7>+f@>!*aghUomTo|X1&zT zxCJ!D$(qPl!&;dA>~_Da|!0RRqZfOEwJBvP|g+KG3$DpX>vn`G_J{p~7Z1EXuEeT3b~| z1p)ZdJhZ+hOR!}&Dw>~+iZt+o2MmIwgH=~Lr#_06`5J!V+hC5QfzT}Bmm4@RP-F~g zyux|9{2k(NCk$g9dmr?y3ic44pl^y24S|Zben;edMT0*D6pPWrxkTgvTADxicvF3Z zB=y>O0MbdDZL&ML<%sH$ykQ z@d7@Wc4j=LA>8?}Kre9jS;IIuSX6E-?FquK zB%3R#va@)gy{i=}Ta_r1L=BT_p~!e&OYGG;(E0Hg?;?{RkS|C>5`s)a%7oC13C}cz z)aZjnI3;IF38qv#XkVW1h?_UE33j!#QiS9};yPfg^-BwFpNPPql%0CZ5=}O?hquz3 ztwGiBcE)_tpqlwAYMLq*Bx}(AfFvWez2*&*HYLj1gg}cUDYEMFXWKUzIh+g$fmNMh zTPLw2MWQC_o5*5X3^gsB;U8LJCjL#Yu6n>;>QI;85v$Nxwz{Mq6oyq@5*O;#HS;x< zeR_xu85(#sE{fWTBF(;T-%bKiOyRlXq<{-TLVuu1eW4D%iR?UE4|r=vyC16W$K4X` z;z9!hTDH+QC<;u}M>d|bU4D6Q-Z}t5Ka7iB(woP3!4J?fOFZY0Qf0(K-G#!+ACpZb zt}xr;Rwhjt4Y^yccYx702Op-lrKNakD|{ZWyU{V^y}}H}Rq0qCv}=RRBvz>o&qF0y zb#-hOb}%qKj)T+rB#<*qU7&54DPzSeB?v~bLF4Fo7qs^7aPdyb31sh2OqzEJ#Z`8t zTU8m;sK#uolDJ&x7ajd}*A<=5Eock~wD@n_ZTFR1>VUAf%>yvq1%c9LRm~XcC?$Eb z)_y$5`m;QSg5T8d-Sx2kQG+^fUP{ww6@z0vm34nI8j3Wq1(+4e3ly65))Uiyw{|lp z5vla>@*Jqd*V5MTn&Bb9rv5y8L)QTel=+zLP4=*wj2_QgRBOt;-gJBGO;_N27?X&W z@Ta!Hb&gd}#+Z3TGJc_8xMc)*P)#+QH$~*8HE2WP>J9odh=FwDqrASZEXJ*&C$nNO zff$TQ494uPi-BgKq-&oBDkO~C5S2O=y(TyPHvQo?9+TTwAd^y$_SoIJFq$6}53HCU zYWP7Uv*Y1##up>PB;w>I6K&85ht)*dG6qas2aT^JrCQOK7Aa;>)2d*qjchbbP6L}5 zY)%@1Z5z`FQ%&L&1oO{MMnqJV%>0C%&PB++~HOX+c!G>tC0X5h_s=-Kzq64d!#TBvZB1TiSX-R718?|WpMN%7^ z)3AohS3=Ec)E&hRECU;r%h8ex6A$UVAwrnTm`mEqn&;^fS#_L;(;OJJI*w;KFa`%k z#DS3%2k1B{arhtV=py3rG`*8XQMZYFy^Ga~?9r)5CsOpH@3UO1a~s;T%*JfS%&se` zlq<&L7z3Q|Mn5-yd}o%3?GVu;{`S~i>*tW582%tXSLPm<-=&c$C_J>Q$OiWF@SkyB zZ1HV5Y3PxnhD~jnc9zpjbR{lSpg`%KjwAzMEJ>YbbJ0X8(rWRBBJgi}^_Gm&kl3l=*KCuObprxb~`}N_Ap5#Y{ZWflX z>;m>z(kbD%2WPS#zA~Gs7RxSB?d2fNien)bVzH8pg3$$%B!ae(rWvtzEu?nJ$WggS zL#$SLxmr4nX7eF&herbN_H5j%NPuyUJUT@J<|@i){Dw%61|;lOZA#TL^@zEv1>5pY z#nn#ZszuA^piGUa>?&q~q1j@jXm7TN6Ay&#owmZ@FQe-8+twUkILBN%dmGb!fybC++s;+Q%x!Xte63TB6^X5B zpdiZ?(wG;KhR3gCJg8|vX$bQb;+?-39U^CDh0-w-^-RHfSz z!m&S(IoGr*!)r_q`=#jG47F-mg#3Ar{b_XuqZE*1zc8R};Aqn<2(TpEBATXB zBUBMFs)Fr{)d>>b8f~Ks)*0bhujHeq8A0x*?d4#JxXElOTlJ+24su~>?CF9-6daHU zGgvkXU?W2+%lv0RiF%z?`fA|mZp7p~0ofEh8JS{MPI7nlqUi7q(GhP74@Q1St7Ca5 z0?6q7q+p3UFHKy;KjNd^;xH7R5Ta_sDlTF*^6)I{raZ6?5 zU?d`BIXiie5~n9^55llyW+2H0Kq zp)?tI+7AsJCkIi$-mLJT&G<6Kl{?KzW38Vf%UI>*ZtIf0yZFFlKmw%!*WGwl7DbH) zr#EugiG_)vi~CiIh%|BMZJI7MN_mwh8G%h9f`Xcn;-yxnVmMf=&$oHa=sR{(@kx44 zJqK6S))U51)r1Lc90}x6;caRjlwO?=vUB-qwP#IB#&!i2>j`I;fmNutCJHhj)2mW= z9`8`j9Mc?M!u|DG_0ro7TvaR+UFD;ggjO}16U#7xe9K6I-PNLSkob6Y9)oJ6Ua@(~ zhCHU$6vL|Y0_p>EV!uiqBp-FWiRiYey!oZ?2SJOyv#gb1yjtNCwtbNCP)|v|h4pXk z{*CZGEodm9x^P>!qnv=?CkH1PMO6e)d+kA%^-4*!LUJP0l7cK6pttd(snV(i(bN$9 zVzjGs9jpuute;|YQAD;XB`z+gkJ-ECmj(+#hZ5Dsawf8Q>L%SnE3h*M0x9P* zxOju)hTk~Agf^FNM`zMp^L(d>hYr0UJW^91hgMtZHJVRlj#M&3dyB*tR*+q$b2V$XM|9!z&&e?=5o4>4B;#cjgE@U>!`?l_`p2I zg%T|UgIyh5RVHNec5CODp9jTNf^SDv+ZJ^}sTRRI+qkx7sqhxQYoJN$A&WB+ELrnP ztK4JLYpSbVFkKcsal|I;gsTE}^EhIYezz{8UM%6)nnz6qf-VYqsb&q6V0(dijAoQ0 zKxjbzEJ=hRX=zAi=@t9Fi1?FZ;M9n@;wNoEpk~i7AQyAOPT}$T0Y^3CaQ$tj zFT>4rkhQrI;-D2_|2uW3N>#|Ux<66S5)!W5bQk`|YLAophpj!*mu>apCTlrMZSZU= zB>~~Lr5oIIYtpvf;C7Ig&BkRKT#Fkd?x_Z+ZLu5ETbgd_Mn`q_#QfeMvAI&-fy^*n z1ABy(JHpVS({T-188eG6|;E~e?G9i=UG%5V|o2$7;}{lO-eFEx2KBT_By zkcmWDn5pI!bK7#ZeF4>!(x1-ZxF7JU;w32} zj;GmmgXJ5hm(AP})d_5}t70i__%Axz5Gay>%J~-bGmo(n5}83~>-14JNR@Qa@0F(% z7btZ>wxTz@hylFM=Hv)q|6a9Wk@@db8(9h)2bU}@M6}7#gEqN}>B?Z_gLteG<}SO%R@lw}`9mu#y0 zz2@iQ3o#m_aV)q;N4#W{IrTwBK8sx--n_M}h>EnijU8J{|6EULh1NDHO=R4b?DEBO zbu14Mty|C4nVtuzw3s*#u#wA}DAwwTyxlD1_d4&GX^TpdK7oY7RGmRhKh`9~-g zXUu7_$%r@?QA1Y-r$w@OtOqm2-%4zdD4_927`kVM&MK);pV&ocNa5Nf>Z$4z%oRCC zS3JzqubwAcP_$3Hj@7@cAB|F1{|25!NaJ-?C4PkWhlY$+tQN=P8xN^};@SZ60TlgK5vRZHpO9Pd_(+lt6WiZkH_wIA^oW!({NQHLp}bG%M4!8id9 z!|anmWZH|LbQo!Hrno_z_WR6ds1sQjqOEjWkSr&oA6v6VEM&0IgXZwvuj&gnSD83r z9Af>{8iEX?cW!>CAIMbCCS@O1g$`tA8I7pF^3EXqUXrnCG(g*6-Y6XeuL7_l47~MF z;X{~`9u>!mev=Dt;4HaRyy)uti}{m$pR5r!MYm{LL~as+Rmm3}#tmfGT>q4D;zp5) z6+UvMu4Q;&q+4|)-9(M0@hRn&90|F$|Bh`&8E8i~I>E1HPEJv*x1FRa?o^;|wb&*zDJZaWl3 zePP#PzVQM*QI=&TxnmADd36FtOp}ehsVP>6Qv?@CWtvvOEt zkwnxptlgYd;{W~o{|g6b!ezERnkdGe;F{ou;Ktz6w_k0a@A$i`?DLYpy^>#--WXi& zA6M{E!~z2(A9Z~p7c-t@)`Hf{#w&uC>craVHvCz9CwI-mJ!2848)CR8J@F@`#q54YZ~1 z8RQnd0 zmW;uGw7jN&JZmejA$WgKey1XAs`xfxP;pg}e6FQkr|>&3MhQ>8pVRE`F|O`i3|GG; zq{*dnXHb*qc9|Y=2|kl{u#|Q%9w{bkq@-!L!Z3EXY7gyVqkzN(aYX!xMsK_>r0G{B z6{e8Vu8fcdLldZ7A{{6(5dhf8=2v0^42iA~8+5E#+(J$~_P;sts@ep`r>(sA_u$Mo z|8F>Bg@0GhP^bxK5~vwD^L}wgdm+Z0u|nFSt0`w{Vr>oc+*baE+j-4)$lNEh9XRa_ z2N_&je?+9Li4iGlRw`!HAY6mX)t8#K*3CE2@Er}B$Qm$-V6Eox!h}%!5xhP0YPGvW zUd<-)x1O&4@_=ern1V!tUP}Dg%`dO?SE}@oSLu8L%~Yv*&1|4c43-CMWvqox>E=F< ztPvNYi|ohq!uMK-xpO%k<{PR-|1xoQXnCf?3@+!em}UHq&`N!UUTUk9mzC;Up;A37 zGo`v$qKa1VI|5Frj=0qBrAetF&>&}NrqsYvbdI_RO;cONC!uRocLaV=ck(;jM=6B& z)p>11j2>z8ax}-iVrZOd zOO^J9e^=L4tKCAj9yx{@AY0?ANj@4s8rd4-w`lB`4bk@}uMVS9L$)Y1Te0+TdtKQl zIJ;N`7kmd@Iigj-YWnfm0XPCe?I;b zx{^+Cjk|aK1#(WOyg-I`K@%=)k(ls;Vb`p~(yZP57Im+h;RTrDsV^`E&r7Two4jam z%0 zQIg)>3V?UEOz*ZH_mFz`s0m?{9%pWXi+*d|Pfhf#6xyIU#pWB}xNMi$&_B@;-al=S zk?NlYL%WzDyYEDX{>t#*Z2xF8>=F(cc6|b6?PrFa4x6&-hJ>Plpe9z`926QBo32(= z`)52YD~;P@caboBxnlS=>#2`Rnd!b1$K*=v!784TzC`Ka$vw<4EWE1T0}TlQus5}Y z57gV38(vIKSK6iEhNx-@A51@M-CjROy^I!PTDD2+B((G{H6ary$4Ka*DO4bQ`=>&F zN|5TO`=g>(-VaMBOKN*gt86QeAeA;B9aiSL8LbN-vRq|R!JVCIJr5sMJFtCz?UU;$ z^5ITN^!Z)F99X4hD2F4kUpyaJ#eMbK!6QY=Ft&1^4WuNgF&0fXlK5OYKaXxK@E!5DJ81;$5#qDC>R_gaEq=i%kAshQ=c@YD>d zM_Fa=2biWHlh8Z>{x&g^=0?%2ty8Hj=sL({gEA|Q#F&aj4@j*pEsPwKa`5;55ZOsDSvP&UZFLltKvjunTS2m z5Tj;87me3YSs0yIjE>BTkrj7!Uo=_Ah8<-zxQ2Dff-%MLF>o}>kVgU<;$Vu|V?bMQ zemeywL)X2k%CqLr^J%`kM9Dn*%A)Y5w)`6XQ>=e%F%k3Gt-;mtKh1=hVzoC&zSUP5 z6U1J{ZoS8p3GW%i+ScT*=^2_c{6^g?G1bTrz`93k%C+GJtCynPctZ52uN4!Yo!`}y zT2r2lVGMGzhDTi##u=DXesQT0%<4Rw7kl5%YkU^>FSB+gm^1sT==J?nbMG-!QwU<` z$T2pM@oF~hO?1p@6{U2YF&RD2jj1&?TB)g6pB)@(Buk~wQLA8A_CxOWWvSNhTZW00 z+KO8x>XW~>x85`+a%j|P6mz3#)A0LLg88W)Q8Z0Ft(lBsxBuy;G1X@j%s3#B<=jtQ zEj%g&HBIk|SVgO@a#PdJvb_eq-K?T{MLcIzD~nS)3#0}+_sNORu(JtwpvIcBiE5`g z&o(>QJochlCDn$}w-FxO7Vq>7)#9M9b@;EvZ14_WTNZWG)FfgSqqg<@3D?ICESymV zVpq!oHO3r|M>Gl4)Z-Df9a1Nq0ZBBc_Kz{GLH^)b{6y#LD&*J5toK6y(2XSP8s=158vzt^0iww1gEQ4{vjGtK?`|x zir`1rY{gM)X*ggMk{qRnc7}tA)6XikBGJM&&eUD;M&nGZl#lI9IH!=9cqvmzbWmC0 z^w_Q`8PYzZ$aB1Rr3b;;RvL{wGN*i2?A3Mflv5j9EhA1VsLh*FK?;nz5OkI7s2O6x7sG(HUO2p@+K*s|0o zOALOZRf81Gk0@FWH<7!G>0ZU?)J@T;>lg8X3)#y4Xk-DN6exV#2*+XAzS;1)_K-tJ zx@v_{;2`<&OxO%BMvTZfNHpxa&bx#1p2XdF`nYnkapkU=xpM#i4p;7f3S7CS}> z93hPnR%h0OuvHm8wnXm`ma*9iCl0&q^iebi@O8!^=`!tw+Sta}-U2}SWWC3$SwqBE zCV7tRdLEDTG3=L**^tk^BdhFGvi(S(d+y?y?G_q1BK!ntH*Br)kMJ|> z68IjplA^h*%LYz5jC@6IFKIZNM3|hwUL%*&QBv_4LzN>hLY_xVQ=bU*k?5ZhVUFO1 zPIct15NDW;>0x+cE~`cz#7df?PsZrujy^8*eL~+yfhkjR;$!(f?-Khrds1>JkW>@J z&Ru5UcCYb_ZVgwbJ4-f7MX8zXjE9fzhQuyHr5JwOdb7DA2Iir_CZ^gV4(KlwmAmiq z{?emBD;`G}p$A?7DKRgr-9iTz8w4ikv zamSS*m-}TR+m5CtR))o^8kW|=Dyg?H4SPJ%1e)4IV$DYxPS1+m7go?WwedHE@NJO~(;_!?_JUKQt)uP=QMh{+4|UEje?_X(n5jVE z1UW0J{Uo2Uzd0jglVRU>&ClOsLTHR74wq(Z?+QAuV*9GO;zx>9)y&E^B2`wr+oEF) zUDQpGwnvs{f^i8A#%&LIh$9onK@-F^fm-ctj)XBuv`N5MfBIGHhT&{B68xn42TIIchgY?4}xKRWP~?m3&9;3>M)G zai((@Yi+t_<+UcKofS!y(i;eH=Fh-7c0{M1St;Abxca5@6c{fB{W!Q%(35-;&01z! z*?k6t8$;`%xy>478jnVe6-aQJ#goXcO_|WosTbT}{3QpKdd2~g?TUUIf=@=`>825S z8CLbFjN-V<#b-5De7MGQjH?T6^u!zx9eHuI$J@AD(#76g{ec~Vy>Ro&y7*@O{fX|S3`NxL*FtJmURi%eLncO6~7UrmaRrPjd+{96pGFGldchbwdr}T0w z-eYs6Fh+TA2olF%tw1^(^S6)6AHh0|>l3N#w-NvN%9NF8Nm~d*b^YGNLi9<}2P}@% zp)O!=99Y2^)Le;)xE0Wa5S!@zee%i;x7Xqm2sF=i)K$bbSti{V`MpFXhE-+RSQP!# z`CY>qjwxv92>rYwH+oFep~V+0mZ`Jg#9W_31&O{Ctn15qLfpE}+XL4JQD018n*8l) z^7cYh809s0@iDkMOE94r9>t)>Z@J z8lBooq?~UXI?m{P$HSXM16thLN_k)oJ(D;sa-R0;eRL0NQBn{U8&+h6Db5wiJQFmS z&{w5Gux{aZ8)F?q_Rv@p-KAiRH2I@?3vWYSVvTNc(C}RiMp9i4-#KdsiEv#yQ0Nft zz}$L9XSw`ISGecH)%~j6Dhs|m%$ZWOiGytevpXbWPwrN%iixDRW)!Uz_a4%4d%Z*S zOIX#TJs2fQNHad)kPs&*|St#GDc(QN$U~^n@X{%vC*NGLIePA zVgt;@VJw!mB#g#jN6DmC0%Mt72#jWcmKMe|56&Ku;q9Fq1+cF3*0>YTn74J$l=M`H zl7tbQ=C5SF7=AB_dvzMc>dF!!&0y=3uAE8Mx_q4Z@U}+Wb0{d^nsB*gCW%P`ZKrJOKj7(Bc%PjgtqSi5vDOjn?`Pz!&o_0!{%DeV>Ofh6VZDTC>#=WVnYB=yjO`Gjwa(`tUdVL(f%Zv)Gu(pgtL(2$R7k33` z9X$MjneRb$Z#^q#t8W>AXj#)q=6BlgnvT6|spdjrpAMpXS0pPrBwW$nE!j*Y0NII) zgId5tpK(BEoIh)CN+%0_S0!~oXXAl*N{aDKI`VAwfi_cL&}suV({8THq?V>7xmuy& z10_^VI5>15zYiTKsrllQNsW|=nl2tk=>(7)Ucz!B}sd3qj@NVf#fCz zI@3G}+Kekw#=cB?SIR(Jq0#*{&xo35#G2;|-aO~q@XCdFMxCCq-d_~+-8qF9^6kOcIYH`iY#Iforf{}P@@qp5_w4t~QF_|So>djmTZ(tK?OZ-jc zV_E5$G=W>?td6&-Tq{XS(rk@nVzcSGqb{ukUXwg&5k(c!z0Qi$)}}3~ZC!&qZkz#; zp8n;AcVG<5R4CDxo`vSwR{N=*Pt|F-<0cYK_%tSw7&DRhyoPfR^ZIuZUW{M9%rJuTak-|%_GBG&j zNz#^$a!p+IWIU1aBfJi#Oqy`Mp0**C;t3?J>5M@u?VYTNrnOlvsSPACBNSE)Nr@Mc zq%-vF7W!$%LB!OlMhdxpqK8g>kwmT83uYWsTstMlw*CUS>FJ)r4aOKvUohhWkdJGH zNh;;F{sMfesV_hs#y&VLkm`(-F}rQHUZW#980H$pc3*z#lzCH4<2XkeozHM4_t*I* z)cGc?^9|DZ@^Y)sS6V&uxdbm5=N{!h&d?|8Ju=BwuN}>p*k|80I`(B?9lXf3yp6?@ zQI&bvasGSw{*oJ*Wp3}su ztpu&Kg%8p(R|OAi?vaPI21|4Pjr_^vT_(o7i;N(>3#U0=2O~fCLG_c=VpA)GZ_0z3 z*0oq0?;5mOSUJ7&AMr^~vHcrk=g$h!a}flsQl%7vOEBn%3#+HBJ@uN(DOZKtY_`zS zsr0lAn6eVdNcC7L2292Hd&=6}OBYS=L1+|n;;0B#PZgmj!!#k6ocRtl~yXDcaho&O`rVH&%0loqV3j2b-2W ziw}0(vjs}*OY(?6{&BBS9$^3B=DUm)&*o{wIC*-j)A>n%n!SIw$3NU@Pd}%NW-P3T z7dHDC3H`>jdTJ@F3EE6*S~^h?^y!|e6_vXnv7|>@MBQd!FiiZW`I>2&NrORi5p9TP zKA#JUEu1MkXtkNrpoNXI3vX>{-AQegcCHlGjBx4x#jR~bA)DW4i}beg$GMwEXHKXb zx^U*#cYaJ=w}w8`ugaBJk-K6!NhcOVQmkC+iCI_vikV|Js^24JK14|=hG8<9y9vQZ z-OIHr8n06cW1{C_sNCK;Hu0x&io~eg2<4O^p!@*Jeay;byN(etF$L-&gW8vdngB|P z0#RQYYD&4gp=(v}sLCmhCKARemn0@YLjV3qs1d+H!PU9&1#HKvV6$f25~vzs&H<4y zU?e!ymH5S!htt&l9-Vj#?n0NdcYH=xa9syC{f<}tg~`XF^4F0g!crRoQl&ihs=1&X~6g4?+~qX%8qI| z{27$23Wn6EpM$I=`Q1#pp_@s%WZJHkoFaZ3Tv08$^JOxP1;^Ow6o%N0THeryg9;sR zDMK)A4Q=NN&^PNDL?n#q1Yn)v04QfiD6*u@KFxmvuRZ(`lf~vIux}a;1G~WSI*me& z9?Ho);p{O?{^w-!gLCQ1A9fh3zGoRV#_THFg4IKw=i~!?nz+7DWxL{f|3G!4G2=#) zETvJ?iVXO~%Dd5o(k+iJjZj)`+k!Xpzubq|?ndNGUF)d_0bG$AK89Sa!lnrA^dV#D zmSehjYDs?cF=TO6#UX+kaHxuxPicmeO6ay9*v%T29a9g(Fp+VLCP|sGKZeR5UpXN< z*0v@+D#~xD9L*n0#)-LH<*3Tht8%%M_(qcTiMjq~@Tq)_#e9;o@~A#x&$vEur5@o^ zt9xvF3uJA0F&w8NCinR8;g?L6?pwL8lUC&n9a0L}agsb0NrmW{SV=AUt`Gq)8Wb>} zLjoRR%~P1<@yaIA6>Qjz@Ud-1eeOha+kI+;WJZ11(#ev9FPl@>eZ8-bt}?cy;t5|k z>iNs~w}vA+NPnwSS2pSzbm_Qny`SCXY@7^%8VYD2faFCwYS{K+XH7Bx)2{kHDKyTW z)TivA4mZkVl}^m{*pP8);q}Mc0z<6?lX1^SZ71f2bdSKc%{AfEmr_f-t#)Hey!Pp& zGj6CVN~2^CxQrjpEsZ=Gc{D^EpZFj2KDD9d>1~K&ftn);qH{frvgwGYg~Q%9S0xd) z$L6C^OFY%txB*LL51F!@Xe9-o#({Mg>s#?mh-zKGsLENcq&>1xbNDG-wX!Ql>TUUc zjBa?H?5%jaWmRl8HI^xbdpQoR{0U>va8-(;J-XsQuFx9MT=lI$F;c2cK6V~Nb{pdB<3a{^BLc_CS zynS8ZeL{FO!<*>J)OWm#AyOB2C%aT%{3X5*$9RSCC(bir+$M}M?!dDw#xNM8!WcCS z&FsRn7$YBJ>=MRKVGJ3@Ko+APj6q=x8iu<<-7!Wk#@H&1TZPeQ7(H2xZZLX<(Q6p) z7IhzCyb1rsYO?hR5I$}Q{Eh>4M1~vP?9|+Qgbgj zTa>BQ*djJ)TEy=Yi&U#V!x)M&@-fC9VaS+8qlPh@#TWu(L>ME6VcIt8i!p*2W2Z1o zrwXxvdvi%!(7h_xEN-SU_NMEF__^j%n&dmf*CQG z4;V~e4D-W}U7f(}6wEfk3>r*-7N!rF0l^Fy%=?`&(_yv<<^zK1HJI)!ObwVG!Soo+ zOD56-Y<=wRxj-9ASS7F&9~J{g3aLb%DQVhH_Nggy`k zgfL(T@AIm}2>)g@Y!O1Q)xr?Evj{a1dW6tp2=6t7F-|s-2kZ!s9t6VsgfPkPG;aNL zE)4h5{MbP>uC`R2VDHkl^4}Xmw>{ZpcA=&xYxd+;uP-5N&5@+Gf%)EhSQE(>BJmy$ zI}haje{VdnaCiTK87}r62oDtQ9dwBGY$8~}DDj%j@qFRAlA#w*Kz7|-NCEKtpkR7qtm*; zI6!?La|vGnMvqa(9VC1~rfr5ezI;UKJ|mXybAwYBW8`9tox-@$=rD}_EJhy~1Hu?E zjOz_!gj{F2a$o&ER_ou5l!tXr)D40e=O(POnNj4Q7n&eNhvHYFy$X>`2_XMHrKo*FQcl!Q)>F>*1+9$~yw7^8+UoW&RdV?-DuhHQWUkjmYgvEpjGYT5*BWEF2R(B$wHcWUW9Rbw*j)AhYYb%|M!_SG zQFaTZ+xTiIgISaTP=81ZT#T|!DAx$3-%xt9C_SL`38l|at}>Jf#<6Oh zJRZN-=>bDTlBC`HlZa{#)+cw|76bFw^|1CIn$Ga$)M!DP>KLJ4xX>}kDtd`Ome zMUGCg_2N3Xqs%LugmE52vNG>rQ90}*KpBnA5&JHcGd4IUoryQVsa|wcE6g2+|4Dym zy9`*S!b4X05Nqb$kam`VFBsgOKk5p-%{XhVXVFOw5j16(c-88-z=^ z(QOEmv(q%>CuYMWA=C`v?}RWEBaG4bh~%xG+pGyEAL;E8+S_?(l!vHS7HtT$k=gt{ z!g=0p<-ZkLUyQbkzSiqZK8ZG-IbrKg z7xCr|YIoYP}VwgJxvx&PS1~Zg}83bln zWrq#sul2PzhEcx=e{HmE7s#8rH((%rS&&{p`c<;uK;ERUlO0|q&B4VWn+5XM-0Lxr zS{7uogY9`$vfDu3sIQ|j$d7))z``kcqyvyQac_cqaHS*56+p&2_8{gE5F^ z4`Yx!1@cDjjTp#K7Gw~RVSx-A$VMZ|K@v$u%SE>;pu=gS$u?)gyns@DhS`(F>;|(} zn7xL%L73ySGQC5XzwFLn?jwJ!b0d$|40B>urn2L+=qE(uqsduPMsE=2@N~?<&!XOn zbvpM7(^$u&a3G604CbgXM-B7!!t9@p`He@>O^v$m66PCtbjUCVvY7o~4hnP7FkdIk z?&+BQzwJ(AZWZS1d9=?kd$O3_VD<{L*D(J|nBzL6Bt48Fe(@lfjk@n^2lI72S~JXv z)E1D8qQ=|dr<0WHE=q92MrMVV-aN zoR0aeuVv`mCCtC1)R18gWHI}}92DlDVg7~jb2{b&w=mdJh&A4ZPnhRZs?RWcvY6dq z_6oDtF#p{6**1-zw{6eVeIIMbI{$)FHN%`}%gSdP{8U7EvQ7N_GvnuU%)9npo~CoJ zF#nuVV?2s{W-*7s92MrMVZPS*IUVyVQWsKn-zCgHqtuXL4rDR=!BnD6G-#NwF@8?R z{MC2NzSsFF%-2$?&oFzknB8FZ3bWTR|J3-|>MHSFyXZU(M`2$pK(FD^ngLC;W|`Ou z6I;1E*(xTUXH1NN{%Ifsv{yiX%A;f4hjUq=VSq*jG-^PvHqOOBw>+Ey+9ja#cy!2s z2C_i?01XOg(18BL*cAiaJDLI7Dxg>MXrBS~WP!Q?>J?D00lmt2)Z#q)WY(G5*8vo?lm!|FXjDL>2J{N!Q4FLk zj#R7d63{DobjW}PvOxU+4cc^!0sXP@$blLrq!|79`>-q3MTPkaF#8O%CyUt)X0I@N z4fEy3F7`drxayi~up5o(lznXG*ZIdhS~H*tOA~DPR~*PQsqE58CQ#eTFEe(8JU*6Bl2laCHexerk^6nUD%a3dd=PS2L3nWuG zLVFp{_ZeDG7Ofk!UZM3G+DlcHv4S)A=6h)Q26G=JN3io;?oVQ14( zgog$Vtv`#_2ikzp1`N%%2du?tH{E1t^0(?}TZHyvp6@lZ?krjjv>u`L7#jQO!MMCL z?gt+*G&x>%v`6!xox?+uJOuahS?=ZG9wALTlc}xzBI90+wj!J~Uv!TU&gPj>Lm18? z41q8rgps`X|3c$cjIb<5*eQe;@eF5K@wY#V&eOC->Ed?Zl8 z;ec)6Q1WDh1|YtWpT_ORXppJZNWdUk#W?jIn$8p=hobPeNp#kyw}$LC52eS`{oq*M z)}Y~>QL^3qEW?)6VkL)M5Z;4AjWawf2VJS1G##;k57J6(ceETz&(H+=$a2ezc_ToESuxBF;GSqCydm--~0gM;~Wb* zy`3RLxD=JcRLVNl;L=2>C??LKd6ok~QpxyAipiZGCq+8F;#H-UR3lK$ z7wCOT7VBNb4Q0L1SNYo@hL!T9=de+5s0(U+AU z{|xr<8+h&Y^1LN3e+YCaizQRwm)GIp|M=TissO1VkVV#t?O372ENV_-R(*eNSc3{; z5o9XQRa~1X%{A_YJVn|ITNxUU)5~oHFyo2??Q%gC^lnpQe&hU&k zD0(=)Ve+DLX=osBo7ThD)prT8!ssU5O|3u?q@S2jN^Lx)U)6xk8iHG7>ux^URAIrE zjsS5K>aWIc5s5a@fBYIdJFBty@N^a@h$Lh@bw~`#M@Ww*NcsdxXU(uG0Eq=Dy{8qu z%wN6CUp;n=6(GjUtNAUeu`c5i3R$L>GpHt7Ij|xoS7mugdbve~jKT)3D$r*(Po1C* znf-6|4i-IF+8tQ74LFt2>H{sQ(1HqaU$gH^^4n8uVujUfSkzo{nL=Z+u~Wt;-$-9- zt1h z<(DKHBv}b->xEAfv6zC)62i1LU#|^lqsJd+nL$f_&HwxN|78x4zs&lX2ZNliplNHL z)ln*U&Yly_Jz(Da1q%;6=-@*Z9eUW|M^w71M;>+b;%6*bdd#xrD^{*r{mf@Q``G6k zcl-&@J@KTIPkG*{YofJ(^!#<}PdojL7o2(43tx2hIWK<6OV54T%m4TluYA>?y!yO9 zea&nC?9cz={J;FG*S-D?8#Z3>#y9=-n>StfmW%%8;p!4<)k z!RFv9cKW|2=;rXv>wvnnYH(+8S8#W5PcR(Z8{8M%AM6Rf7CaC#iTbUO^&P8rPY{q_>}RkOQ4ahN-}6wDnCGsNXA zE`RNpY(5&z<8tQ?EBod6t`an%m;F%eZl4p#{uowPff4Sq@qyia%*(RS-tOLZja8dv zYIe8R@3QPOQVfD}Y_n(de#GhhESHMeaED#-?9OI?-!3ji`gPy1ySK-eKX7J#noC<* z)l&WxUu}g}OL>5=b1C!i9V{JcQF^RhKIWHXx7y{PU)~m9-shL(mPRd9j1|#t-%8SN6yGsI@J-quiyCfSd zV*B&xNB?5Tq;^HB?vMY$?vkfvcOUahlCG@mJDF~QDl)60kcYqIm!w2RbGUrKFUe)H zHo5&?<10x>kurlQN9ItZ)FZxuuuP8N_ zB*jDug7cN!mPj#nzLKI6DTL40ZoX=UfUl&BSW!(va7o@rq&W`0lDuJeG>yU~Ss2Dz z&D3y7E`>!f`ARl~-O;=dmn0NeIZZurNoIoG)$A6RmpS2E7!`1JX~}|Ne0zs?ggd;8 zT;dJoHqZ+C9~>Z6A6cd?+|XjT7P=jDT5?rmP~yGa=nx8a@d9DmTe?O*uSd%tY={@m%f%O&y8{0$%YwB38HKe^klUK5vl#NPg= zPVvotb)IwY*FUj)uXdjN*027=uRiE+f0e(l->+WjRR7#n#w+~opLLD#$1X&-k6PVd z?zR0lFZVK+l}~=k=sdR+z58;Lrk6V26aQ*0{1U(V;kOLo#V&=v?zemAI92a*dvdnF z`Sx!b-i!RI*Db;ez1-vX8E4P(d+&5@bEe*9g+vlzIJTLd1Us^vt#jo!FnO&XiSG&J%S10+^hyT&8PIQ#7x#T?8 z>+nTF+0c1{U)}3h$2(sp|I>JVoJ;3-Tm!qukCvSIJmiyKFt~GgC=Jgx${(6kx`}t3;jh4Dh zZ~2wgVTsrM7vAfi;r08~^@g(8Wn%n0_T{q5Wl*OtZ}Xm_8xGP796*f4|1vd(N*^3f!=c-`?_5%^e4aRR}1{fK7Vq))46M( zq0I9qZ}h7J9Ob&NTaD&Am-^hZ37wz+;ks;&U)}ZJ_T+58`s$Yqv(shbgDyQ~zjwud z8D*?km%jb2w{3^h^0@cSS>AI7K4&QHj`FLW_J%g6?AJDQ(;{&Bhad8GXz_dRy}>wG z^crm$Gn9h2(GT1XvBq0Q{l6bKOst$d+Y!50vM}9c>?<4H@XakODvxp=Ya^uSV_c1F zLoKtqh!O0P{QN2_me&(uWA{$&%~0OR9Rg;A4=zj}Hy()(6p1Sip9e;0>OzzZ#nvrT}eacl`Q9zJbAv6;Bnr@Ew0bzH~uOh51! z)`g#CcH84O{GQ?5&M7=lIe0^$qTO5FRwQaBq8R>`Xhk$pQJ{+GVK#W8kn(s%(G1&` zLNK*13Z*B7#w!F^cqZD+9bRBD?r5?Cdn>FEVwGuEcqe($>@)gDE`i3qgc2lge|!HG zC7SGe!hEUT=#-Acls1{U8a7HTa`JrW8LLd8bQnrUMd>5lsZ%=Ip!AVSLMeug*iRX2 zXCa`d+t#&quBEU-fs5fcts2i!(eQ)>`Au^%(U)}w`@C=!#T`Y{HeNh>3dKLA;+)1w z@d)80+!TaQEaYm9U*-Jj)XdfMOpN!-N9Li7ZVog0KkU5^a9!73@4L@`Nk_7GEZcEo zr`bAjT*;1OL({}{lE&MYB<-a@U^-0Sy}fUG-|L&{*qt{x4VezNG@@}*!E=k+rXawy zYH@2(l3qRVoe|(BE_I7&4Hd;LhPtgNhNs61Tonvg7Yuoy?{BTW_c}*MvYj*`FvJs` zbM{_;erv7Y|KD$|Wi(&1iDmn2L~u+4@zE@Z4o|WL-nhVgM1-s%3bWHM3$DPqdzXb5 zK&tpx32Zwwyvw0sJ{8|ko!~a^yBJT2<&3{J$bh&B!VPcqT7KP`4ak1ovC4p2+V`sv zn^7DsGLBZshe+)7puM~Ulli;Uj?Z!i+W3g7P3}T3z{P!A5 z#%kr7wbdz@hBLOsh!bXj`s?o|&pMgG9XZfS_uw|2I&wgLpbC9{~k0+}n2%tUnDXJNv6hADn)t6QeYErZw z&DOvn=J&4suO2&Yi&RPa4%{4ZLBu^t^PUF=N;J*!a_O_tq5JrVzW)K*mk48JARm3{ zzb`unz!s1sh-|VrK#4Pb)D8Gi0ZkHhX5&ukoR!Xs^riiRC6VrtmL5E?s}#{o6LKr3 zWp>F65leTJ;N9XY>;Z!G9TyKHgPJWkiv^ZAfJX<;@}xbm)OlXt9ki&nKxuep5mxeT zt$I>d=Ye*dy2(MjFpxpCnlUpw9rN#0 zhd>`wh)^l%R3jUGY_Zm&)T zk}l=5Wlz7M@-T4#ef28RPwQ?2Fm`igvin17dQCBJduX9ua`Qy9mP_tW6u|6R1sEra z`$brB@ns>0tTy#1wCCnW;)3M}S@@m#>*8fJ@K+EKK^3FMG2 z6@|l7n_@`DHtj@Bz`|YYK_E;i*a3pv*o z=zKdZ$R7F-NpMaPcbQEka?4;S3Gwdyg`zb7-HIB;=5 zbxvWTv8s969H?kna+R5c=QFa|`fnBLxcOR9QDklMyCIC4UPT?&tb60o^XdV2)fPpo zoOFIZHB~k|>HN98Qu|V*bN2pWO&DfjhN7>sC73WsI;l#%(5i8GmbTM6vjyv%G}W0- zLJl;gnVPbhdgUreuT%ydXT^uZeiUWPL|Pg9Z>8bOVn3R+<4DpxL;Fg#nZvWvH<&}| z9k`8jB}ulLZP>5kt6{$|8K!6Mi%v{dU173JG$vAn#uFHpH`mZK)Awj$HWmwW>k63c z+cLd@wmS6&ankhCpVL-gh*3xSUpJLq=QDZr*lE$6*0jbAg}>UjJ)Gri*%F$5 zvn{-2IlCRn@=DD!R?cc+WVnbb`{Uyc)YS@V z*5<($7B;p_^k`wnY+=7E=Z;sGoBiFka#x?5{qAei7ce&)4)*+#gWrw2Mz;Bjm75J+ zd3JVn)Bj!1arz&5L8o7%d0x5MkG#m!|6H+u_j8>7!!PLcYc$Vm`VYUz)4%&N3P)Ay zhP&GbQP9ha~&4cTnn6lc|ionMtl#n)NM zb0V2I!%i0`98A*BO34V6d4m||X5okDNt+L3zC$gMW|oP(P#$?$R`STm)3WT!9@QCL zNzYi)sd-CwYTnLOFaph!azX^TqR|PZT;o0dHxE#bqe5tu}(v1lg? zb{XMHWO&>W^xgXSK`B{SvA|_MwkHI z{}#J}Gu!EA+hZnZr9VDR08TrQ_-i}S;t#G>&clhDTwkg@eUoj112&3DT1{c0`xEek zVha?rE0CFn*~|el+u;D&Ufzc--5=bW?r&(~wyoSldqH-z90a38$Yf;ehS%a6&%-<1 zYr!_kO(LAZjEjQ?yK1woLfC}O4yX= z(_`1)s7pC1mQT+a`L}tk>6yiW&6P*_^rQ=1{Scqc=SZ!b>oP~yx+xps9G&0hT24=y z!clDO4b;YPj%K&{lmq6-oFswV-oarE#AO&GZu#6Wwm$4J4)Hrj*-!225y`jmAd8bpg zo%dXsy@5O-=~U-Try4fR^(|x@tjf3)kDozR)!`Gx{Rg_I0Q8e{UYKh82R+BhaMQ2? z7n*SdOR?)us`1}&NWS^b9GSKmKGG5|Yxt}feNgQVUE6!nGjuUSNX1q$gv$Gr?6xx+ zza=f-_kgkq!cD40os0vpy9o}YvRJtq~GsjF2-k zeB7*rWV4;xV)k&=HZt43jAv|DL#I}<9A<>&Glu1_pr8=^EiDjF*K@6smuDLv*??;z zw!roI?ZWj@Ed=b}91m|V0?ki*}@cfd{noU7{z#TBup%%wS`-G zZ8+kxjNAft!DF=oH!yMu*IdaQ-PU|$*_w}N%|&ryMaFU3=}qIz=*>|~hRa58yw2!t zW>@)gxV$~XWm=+L4X6W`4IH^lq>wop?F;Br-GOon1IfK%XoHgFbuc47TNvdEs|YcN zy4Spn^oH#UJY5S>T!A+X#Y<-_^b0jHy%HHri43NU47Pb@Z*h5V`{> z$-qvsa+KZ0cVRw~74#1ECaf|}3H}uHWO@g$g{RUxkgwf|!{*8_Pe@F|zB~xm#z0m6 za{5TyHH}$A$cAnWtr|oBdU)UC@G>$MEFcAcyUp9nMdXwAnnD2@RK~dEVM3QYOxPt4 zH+hVPB9!mH8eg}J*X&YHln}XS)s0wvA2a47!lTC9$n0?`E8_J!S&NiCh=LDTw3pG; zX@+N)Y@@L&LESdVph~NwE(esHSBMujVwljQZV2s$sH;{oQUrA+GdT6bT?p=S2)LhW zqjVS3epGyQq|JDAU(Q^Q==va4lsYe@MGgf>d(yu)A+XCNVRxjpChWkN5M{Sv9D2nc z3^iFMuP|(ro}eI?MmfRSJ1GU>_q)n@C*pA z9QCDom~{7m25ehXF}e~BEU~>HU9t;Ew&rZxbPpga)Do|CtcduIpIZV1H+7cxd0^U@ zI84!)I9kSvOkCrhzdmH*=dKT#cy_ddF>wv-N+==Y{KmvpX?02fPGjQ0iCN)<4T#!} z5>{p6roUYsJ*>dQ1@kIQoY$9S;x<=ZnD{mZSf_b0@ntxDm3%yFs}q^k_f;YDwq<-= zO@J2yba^_{x$A}ei}spAAs;_?J)3WY;tPi2H+mE&6Z)$7xK~uvjVy4i*qo2!QIejN z@o|2_zLSRI;|=j~O|Mfy+W7cZ9$yLewy7Yhv^w^}pD&1?!N=#fd+bH_v|+DWq1@pe zl#`EqX?UK`R?~0$gLEL@f)TLyD$xRD?mY%F zUc=*bwwCwlA2zTyVQD~DB|r=%erxT zOPHIo*{5PBVd7i9I>JN`%9g`cG>|fy(7BZ;bXFudYb40YUeKP)l>%*m?!}if8uxUY zl$lyt%4lF$LZ@v~MwM2l(<_lOY6UvIIw_+{t5fR=QbrY5kuto#tdvm`P@!3Jq286b zG&@ZNm4}Y59GkO-Ix*QdbmjUICq#mJq4!)(fE4`gB4VaY#GJC%6bePm6e4Cu_&sCz zEoI*@ywyN|KqOw7pplt?`sB9{i9d{++q@u-@ol=2b7EUr%GCrfC0ud~8FZSI{FT~1 z$qvdeS#&Eqa1@=H*n(npM+xSoU0uDeHpJ{D0%g;>((pozWB9)82z?%28Qta=3hH!X~;=ZE@3+r4gBD zIHBAQBKA*lm#C(OO<;DI9y(ukfF9SA^Fn5gy8aSf?~Ep2VRyjZCpWG3DBm-Hs1Mpd zzpYFAvg5k7UswCORTb2+<!wsrD(SvaVm%s*jg$NA*bG znf@2>zJ6$Pf3HB2om>~X?u-_;b5s2QLzhPz95uA^iiuseWC1UW2%Nelq)BpL*oPxW z5o)1NYnC0^Xp^a`4fDC+<0j|y3C3&hl${$&7<1X>={M6Rb~@x$?A!9~T=eZMxONuQ z&LZD+RA}0{Z_B*byc*W(J7RYCoc}dzf5=@jZ~jLc1FXOg+pc7Wg3@t|b`ijZamZuL zP(`5J(oBTR=z)EJ2h+9OSo{O19F3h3o1pD?>3gNV5MSZT6-q-5MwbGmd3TPhl$vB-EybU0xo zyu><-pxsJ;yMEl0?opAh_i0b;uv@XP6Pq#3_BkhZJj`)huycbNb(P3Mm`tvw@G;oYsO9NPR2keJgXW zl{vL?POWf5dis%{W@Um*{;ZWEDc`s9jBjP$wKA_(7SzfDt%O?m2o}bv=zA@vT$i&o z;&0dYex&oWR2>xoQ~S|ZqfZ1W^-t05GraKt{8~aJnCzXyPD&wIiZ>p)cl&@2#D&ka6I8# zn{=&Bs{>NC@M!j|n(|u>hhr<74ZE%qOxm5tJ>_rcZD<22T}U0?)J*2gI@ycYSt zA|(G;1mw^5Ge`cm{aOnE6JqO>^NAkw@vU7Yjx{jJJ-wq4UqkQKU4Ar5B zd6li*QOctOsQ2C4zUbRtaBVNB?M1cCZRN{YwD4Skh=LJ#2oFnkYKdJdOKQcA_#Q=u zcCu%dS$gZ&vy%`kcBu6k4%cVcuaC|L>T)&wfYCF`3g_7us>!HRO`2#)T`Os_b&Pyl zrLnA%v`_JHlpBsHT6<`3G$ziIg$sofS#al=f>DxWr%axcS`wHRgzGUaBrlb>WcEs~ z8y{A}lqxFas#yEXzhY@uCgWV%)p!>9lwTv{1!`g(@#3tZ!o!l~`oKHX|BeHJv-`%R z({@rTn)#7%=0^g`m>3yyV)2gB(JhH7{o`A1#}wH{4Ocl6sOeF=Gn)9cx1+FnM4390 z9oo1;d&(P9nMax~AR`=5xx2xX)9Ia-&L)Bo5unS8&)Fv8jOqQ3#WO7VXNXcRWeye> zu9H}tQsxvuhX9-v1_`uTVX6&M7>dj3MtPiWw+y|8=jh?o%7Q14EAdLjU8WJneAw9- zDju0eT63s^g)_`*TU6DBqnKvP_WZ7z?KuhOK-n2G5#JZkq;JLaT3YF<*M@9Y<>lw( zJf}o9&p5|8|7>sB+h|hgPmZ92t|^zw?CSRLQ;sNmn?Ai&+sn})OeD02XG!k%;aRj( z%D^P~%gUwC{4w#Ef9jLmI;t3v$NdRnL}Eh;12A2dX?enFk(1IQrI{)Xaha}iqlu7g zf)^P|!K{Ssv}1?g)2`(zGIX^k4OQpYx5*>Xl9ovW?W8^0IWJgghx9`+BXnAHDD^=? zUHWY?{y-BGTxtvEy&NJ-8Wlunw3KN-S-z9OXHX$@yFq2#Vt+wSs zP6U%d+iJ(6nVMF$Z!*y$n@lU{65?%Zh2>E@vKnRYE&cq@#mQaqQ4`yi7-?@YuM5S< zvKsYx)VWTIKt-{!B2$9sB1x!U&JU8}xkTv?v_6tvTB6^)O<=%McATxoO* z&qs}WlNgwLc?BF!@0D|*IP)buc`9+O^S4Wy#h8Y}^q9bM-X%Gskdlt;O1_1@FEu8A zXTkZ!gl)_);Ysi--!Cg?Nx_6Ig00$cAhl7frtEB=kt!xONnk_l4%XL3N2;q8m0a-a z#}if%XtOoxWWot(T|*8OyvDb(s$(-m=A^t2{!ovP_u)s7S!Apj`O+EPYh?ik;0G?1 z2zk?x$BvbD=vXYF6FUU#(#uwuuEm$}cJXC8bb!2z715z($B^*HZKP)g?QAU$X|W6& z)e}WBqBbMPWnYv3l2%VK^~q1 zNtY_Ks4nsereEqf9Z=a%3oi=;*Yg_hyp7kK$A(@+J~Z?zyyD3n3U>87ijKUXdn%Ol z`@Hs49C?xVR5)~7HTk&J+!@sq{?zco^Wl&$3zRhFWb+YT$0f2zu5k%Vp;(Da7>6eq z3#;+odBj=Bp;Q}|Walfuk|uUK!UehADK}ywmpl^@m5PFx*iM!&u**~^95-A^@DX$aOae>m0g?yb$RbL|la%PK~n1=l>jqIB91XUCZT zSYjH#A%C6!xXpVkk*MSWy9`OuB>8GXd{u4m^@Q|7@KyRDil6o*F(4E5nXA%_TNR%) z-0EW7D$g5i4I>ZJ7B5&9c~A}j4KcDt{B=ySoy2Q&{1sPGnYp03Shz`^aH-%j#H3Dp z#mH4VzmcQXk<>_y?e|+)!Re_e+#E*Mh%d6dQae@6f(q#I=nbZ(#$Pne&vpz+dhk#? zhD=RIKQ(QnNMf_(2T6n(pZtLKjJlN>=O^U|@~zB}{9rpi)Vb8Vx%R{|wW5?0S*GDN zQ+j^RBCTd8SVqAD`-9FHoVK$^MG)&$%v^UzWmxJlPCNci6e^q#R-@E~8mdf9le*BJ z$X%?+Qlm^Uw)7~vFiQ-n9PGP6fEv6-?y%p8gt5U3!+idtdc@~g%yedx;KAYx4P9+S zn}+(hnNcoBpUk@yxz(~L5@&>*GL|?}dB^Qut@65|GwZTYqU8;W)4FIUbIOVoxv;iH zhu2zH9k?>8tO?mJjw`}%1NVoT$sIC5Qp)wd4x?&QrLKo=+e6K2!3`P82!;&D*N}w@ zvo<3X3SUoe!e1|bbjtYAbrclDC)m|7zJtduQ3l-{2(pnsBS%ri71YkUrR^3Ct`607 zW@R#PFCk1C1W#(XV)EWF!#B1CK(F$Ss(T8k9JA}0hJ zsAKs?=8l$cjAxG62$96^3)&RwgvmZxFRVgGFSWG)02ECW9QJG|7o@9`r6d2H8~F@V zq>#!m=a7o^=B6q%PIq82qA{DYGjbp>vnoEj3s^aZj`s5@b1qvkwJuX;b{bCSV9NSC zrmTPEDKj4cQ#S79t@#E(me40a?QwRwbxv7-fQp(j^&*B6YW1Y1Et|6bB8f+5$#R$} z@o~c9;7r;+j#@Ad1ff|LolCE2W9cCvlh_tpW9dy=7}+?9;dhF{+2WthmtLnM$)j6% zA=MhPhMWP}Met>dWE6W8vKj@SDch0{bHpqGY;k-1pZ2v^UuW#=Mwbs^xBz~LfRLN0 zd`1!d486QBiG4I231C+^rw`kKh)FNUltLwn)hWw(+{4o9W=47y~a14av`uE;Ym z{|V4#z7rI@Za!?=YL+g-O>tt!7L-q$d!uvP85893Be(nAB-;;|ZzpZ+ji#VksXS5< z#JigTQ`C@%a@vSD1S?oE__cGa2$*xytDY8kHvpN|5c#ZEuReN{jr!P#dI^EoJ%PL9 zTQy4NjK7_zr#GOQg(T(A5VSD-wvc4!ITWfhW~maR{EcHv!HA*1ZKPlbjQ`=Z>S*FuA2$vQ_S(oa;bEf5S>E%qznCa&VTrx_&r{nnl zI(&oT1D2gay=Np}#mr64YHzpGeYL@SSz1FauXfCLr&uSQOsG3WEz?Q5JE^#55VEhD zH_xr=65E!J<(mxU>JL!3u7`j3TslbEu7gF7OrIj1P17YAr>AU zVHrcT=q7DlHoB?uZk-K$i%M#Jy&HARxjV>()Aj_XLQcWVCxjY zVvl~8)DG=hBmq{h_T;usmpbO5Y9!}aEB&AMw4+!LrHLkBGVonVMs4$FQ{)!)+x6tG zcfu^9pQD#-EDR* zia`o{9iv>Bx`_Jt2OTLA+zrl~e3k+d)yNp@VNaq;vfv467AQ_PGYsMDXa|`R z;BiWuNrmiELMXFFCx}UEG7QVE%yrJqi{+w=-fsI8^oH7 zJ+aU7P!R|-;8uzyNfghT|9?q%vjpBucdX1N*1DGYO;BfxyI`1|+;@Xzp8kev25!zx zwx*gg(L0h3CuzZYHMhdy8y%LIX4;{Nw(+=4B3|lf+@TA}w{umosjMw1wQUa7mNRL? zn|C!1Y&ni*E*%Pp*LS>XAS+~In_S#cxGWq7Rm^~4X2NTdHQR-+?OFzHEEW#{=+FT! ze$Lzie=Of*e{D{ICp}-0szy;93Aio9S7zWVovO;lysLhdAZ@3wZEj>o1H0{s0QPfg z+mxhYPeeQHYEVr9HKz%L99vB>&F0nU53V@&U09HcF1*_h2Y>}&018*pPyQL+$RIrB zdg%)4F+a^+BTt{yd27qs>4eh^oe)AX4`#ulE`TU&j3*P~*k;Vii0nq}4y$Y_gBy`W z=@Z6%P8IS;G^vRHxDagIxX+;P)|?~WS;9*sVc{fMmcz5&78K-SKT$T{uu3^5;t%BF ziBv2N$WHpDWu=4ggqhB^dt(xHsBZ&mS#9D%)L+z%W$l@}nf9d1Q)W`Ar^zytv3q+8 zD}60L62il^s%QFVm6{nG->*9OD8MW7skB3C4Pb_8K-pk#1}K2jfhqzi^D7&Hb*2GP zNB&9+R4Edu6kYi(#fQSptJe93(6u|S<(=Ok*X}wkAn|rXdA~)s4@PhkjjuI-5Q}aH zFJ*Ee%j&WS02h<28YgtNFpX&`jUm~xpq1zuQN*`5%M{v6MAhRtdh2a@!dcGy&M6}eE6NOAt<*{s8zCRyyT7^Nf>Q1b0e)Z8d zbst%6l~&z+^^M2c>A!tKAlGgaVjP8Gbmb_WSUt#e)Z^J4qqC`yvL@u~#yXa_QA}zf zI(NI?hw2)Q&BxnY*;}x88^kn@ckMsfm3i`J)}oRcY_9Wy6V<|51r?7l_0es>9!;uRw!G76I&fYD%e74xHMKM&lDGFv!ZO~rkIERcIITxM<{w!+}tEA4T$j~ z8PiVYluiGI?XFjGXo|w4Z9}bTgJ~yo%9p^|_Z`eBTZA31#Zi3%hD5ITttB{hvIEtT zVkQNr9)B=b2Yg%Sh4Bn&#$i7CVvyXlye;Xh1*7wAtg5UmZ$mmZu(C=8qw_6Ym2$Gq zwDpYvqw}q-s*-dvx^*z`D1N+{7m!vr%4H|Jp_cC|&D-N#9kKHgHPh8qsC8TaU}53NRc_p{kgM?8AP&9Mjc4Lr#+7Ln`_jGK8O?7?D=zz7q%3?yv+at^;h_}{Cl{QZx;@-$toe3o z82tdNlH0;_81)`T!dSAvq2>JR@svRxI%8apDw7um4zttnG9xNiRsv(nnDh>%?^yC1 zRz&*$Ewvk{>QNzOz{CPCQrmcL$d4{;HpgGJ*ApK`p_65n4N#A*gbheon>umG+e&zpRn2m-k0>`9jZuIUZlAk;dTs@7>3 z8nX!kxG}^o*xRRN$_S-f(twC-a=kR^HKK%b781ugV%a`)t(}0-=e@XS)AHOBfVns!benl!Ol4}2O9Ycqtg2@Y~-+)%h9l94EbJCr^|v67kxv< zR!DoNoA@EWljl;<$r1?%1?D%|Rh8{fJFJue58vo5%CC?YU-=dI z;{6hI6?)>0qFsrv!uO%4;NYrSRcBJ<1``;DRDsa&Ev>3rU68{ous^ewCM8${G3MJ^ zRpaW+Kz40~#;b2-Rb4AIr8X4KQP#E@f8iR#r3!1O71)0&`e6{u_USTZtL}aZ_&%3o zN(qHkLmSpk+y1!R_L4FMnp?IELdSeKJTpD5bAOyn$BfwI3oR4?Z|QVb(R>I^YFePL zids;zR>K|7_r3B{V2_?N6|1Tnoo+LUL32x@T|j=a#)|vZ*oh zY@Zy&z9IW|H`QG=$zWno6X5h(f;LdqiX)_IKuCI;*>B^-Gu+|BmV&NVSDS(kCP*z6 zxZU#ad4tXp?9IC-sb%Ws=#fojaD}t_;q%bk=M7 zvkuN#!D;cWIW)J{Kev&gcU>0)N*L zt%kI4w<>7S2x`8|my({9IvQl5=cBN083)q8V4;2_PxuzcU5n#tQP`tBPVFNRfIJuY zVVtEa_yc;1J>s~N?MqRjN9*jW>I^;sU0anPOy5MGmQ@1jvvVbCOns@%FToe9Xnap zI3c-!LSKu2l0+YpLwCBc|02&=aT0K}LCh;zWOyHpzr-BwDw)YWyL5)Hl zC5^&%>zS$MtuyShW!ts~R>aZ)6wJ|5Zo93tAy0ENZ#xIp8ki?w_nm0!2 zd~~o?-wKCJ`=P7!f?a6KmuKut{M&Vz86)L4-Cjv3Bg9OT1Mx2^|Ej*LbaSgjS`Ax4 zhB;D%$j@!%*C%xyUGqk&)zh;PB=TDOdQM;8z_Z!tK>R1yp?H+k#7O0ht-Cla<=JP~ zz7gWj`r1vwJJr{2B|pX2sju&8m4F(F0yJD*jRR~rEvHd(*ZySe_;9)Bn#gXbt&f!N z#B8ajD8O&xgS%?)r`@}5_kb8o4m?0{FaaDeX&kQ|8qml6LpasaSP*Q<_$IzK{a$Zo zG^cqm;E{6w?fRBJd|R30iuQlC#g{Y8BNX^o!o46phdcyYAz&V$9_@x0fUofHRVzTg zCM(c}{iYZL!_+R(o1zGqg7zI_FoU6x1;c7_3m`3hSUJ9r8NtftSjckGbQ4ck1D zVoI*dT+7w4XKrj`MxX9Ld(3g#z-t-w+TT9t^mFZKKfr8v3Yd^XYb}-um;q&Z1U$l2 zX+XZLx6_tZo+Nci@Em6tuxIjklAbQDv%wULAk5P#T8kEO95d~S zLfIM_KVHX5NP8MJ?WqAF>0k01S%feo?70WviyY}wbwX}JLJn`|P7{7IAM9iwepx-l z;nAIGbRW0eOEjLmh3|Mg4z8(`%Mttn(Kms(BRlpMPFoVOlU=woD!<)sB+!#(r_kBH z3eAKPc%A3Q4p+5_Fozvj7L3VmIkD?8vV})`AyJ102u^>|bP)@r>XGwgx!99G+Q?8> zEKcjI-IpTWIq7_S67fZEuw$MKr3f$=SHM_izk%gkdgyk}pJ&Hk^FjmH@z=yqUEU6= zaGP4F%3!{C<`u@Mo-24S3m{Ba@AK^tyQRdE^S?ri#Knp%!U2xLw0ebutxxiamf8{M zoFPz`*_>PFoY#u+`;~Crxg8a*-c0){n4X3ay{lx7G7Fgz}*xjNN!&=oLA<)JHtzOH3txNCU$osybW+jgk z+O4qKR5zpynYrjdD%tj9YA_}-jCS;OJ$s`X_qGN$w_*e_-w*O8;b$#=KNur9s?9~n z*~B!vH?-EV5ui1|R3`%mDP!2b&S+$~{Q4o0)Fcs~pZpdArkZTo8Ab8CL=nesf}HfT zwLblaNGFc1OE@a8bdX2sKl&50l69nz!5@J40ln6A=>XNFfxgo7i zv&GDA!qNEbvyDS7AgS#0GpU?uO{1Sd#$GXdec!=kux&Cmm88oZ9I7M)B8JP~la?Q1 z=vwuX@x0M8zV2;|8}0ZkpHg||^f~#B2@<6*Cd;@*y6aG~ zz`6x({a=IItD>$hY#+jHtU~~X)+wY|-bczb_3`{xl0g;eUniW0-+glHsSFiPsBzKCNb7_idF>&bOLU;{>)9Z?pEq?^koVNZkG3u1dd&VS-? zsO{ck{#t>&E?FPHQa%Qptp{2a-(GSZ`5PMYAhv>X^AWwn2jU6l-VZgeR|Br}ro9oU zYxC&_6${M<(yv9z{dSH&@cI+ z+qML=tCkOaZ4;D?z8@+oOP8>7ADMmaXyM7YW`A+J#zNv!NAMNZjTgo2TP%S#&Wi46 zV^*JFh+?sG(pBal=RX%Y7ussB{&76p*bd5*6J&WZ(C?^z)6SCmr`^Z(n_(!wZ`{6f zDj_GR&2oaC`oY5f%lD8uP-wNzje&x}YXvvHkY$;g@esm}UKjl2JN=#Z{cA%MPbQ?N zoZ`FEU0KBUReU1BK)<<$wF!B{P@EVmHD*>z`n8hS5Ydpb&-SduImxdz&M9K&d&W5{ zo^$RI<;nbkCk~hI1R=qdrsxjK*fG~z#V?1;yT#98J9Z=sL$DLbm_myJzy#!QArfHj z=C&cMwISqeAHv)y(2eV>b4>E?Odr;jgU2w7ZhMYG+Y)y7*P6yf>QEx^B3mgfgocpmo+YbnyAMSF%HG zr*o%sbW^fH+XdXwi zjCbS3U;bblFDA{^yrBG*TP$bA1H`x*9C4T_GEV;1vaJR%u~mK0A?TJUK!sJ^%t4_NHKqwwj%&4JWU*FpY!~WHf2kFO5W1O5q*}KuBNSElND3&E}hT{u21xP zT2=5QNKB8ZI0*#&ou->&1qaH=yKFXNb1aY6v^*4{x zIWwzJwQvt8>hO7UI-J+;c%~Oi_9o#{dYWQ4#EW(GVpARGrE(6WcOObW6TR(@(go8x znL`0TkKy(e*NhG*pqX^htUWv!0Wal96uB#Kj3ty9=d_8tjx*%1W^A2`Yj}YylpSl? z;2$END7`!WnOq?R5u9YE04sl3rWTX2Bh z$~LGj+t^ZIg4x1t%s_KWd#8PSIxe0kGivV?-*tTYvh5t0t?N24NC`7wjAI;55afy| z&hiyBJA4uMwrZPE$|F|BX`=khG^#fbNj~D;cS_Y;TG;S7N6e1O+z;h_$+c-211ebu z|4#3+l;Gj=n^5I=AZ_bD$k+GXyBYgy5)j25%yl8)*trXQXEY59D)7pCFM& zJS(7Psnkh6zRr1HXU^4`Q=N0Fa}H>soC7R$B_JPP?HONf-c_4dwFOmMpsP;u=}qwI zyk)q27R&x@B`rJcK@eZ^%`CcR7S+s>npvWmPJOtK=Y5>YB<8T*7}2PFO)Q3X7q!t> zpC_=RDl6`)PjEmCWrFYN1Qj}UV*G}K!dRyan~3g0U~E6%8;wFD`aGh~nx)1icc$@! z55U7jJ%>%ZtOA8fEs_PI&rACBrSEX1FDgOg;x(uPKG?ekCpR&(oXtDc)tA0$?6n#0p5v?%TdkA}= zea`hOL`(;?(iCY$nj(OpD>@s&G96ZdwyptnpHXwphxvyZ1pppn{?wK1QJ!Ft7N=S) zLw6}?v z?2#J*Nf$HRQ98`m^ze-;n%tP&93Rbf?h?Oy(oz0<#{)?tK8tzwa%n?4BS{oJw@U%w zO7nyDQvG4#@N>lBD_bLV=>8?#h|=GZ=g;g{?uI~-tRbSXE3bK-oEi1B_Xjp3Hi4~z zvQ~lCk`jM>46c%tvTp>flS+KLH>qGDdQr;Pt%yzn-F!=Ej_)-9Q*P`~xw4Nbt=&5S zEhHs|DnS=I41!aV);8lB+_RiK=4$%u{k2N*c5x_~jv8$$XO+)5sj%o9%kuiuR9 zFUMaV93qyU$dmG5`taL_YPfky8BC2ND!~U%}WF`B25TDmvFo%{3y}&v!z3? z8K=EN<-ybyz<%@HDCkN250fcZjxb8!60~G#WCffkt zRbrZL>IJze&IY$1dw1|_%fW}EK2Pu)ZS$YaJ!c68bKf9paflRYhQUg;N;=wx+1Cv| zRl%<_f0*rj=5qvTCZ**t%g!KWL?=#H6lvNTH9uS`l|>Z054Ors3B6keN{_xt_WHRp z5WpUMB2t+6R!yJ1;m{o=xskS@yrlC8eb|ZqtUnOXBJq{85}C;l)(_G4&PeyU=!t`Z zT0I9mJmR(@s`v}NLx3lLEZ{`VpQx0p50~Esg!^#W@Cd3HDIYG~UQsd#HL5N{rn;-t z->Rcp;<)L7x;R$gw(`v+ijX0*hf3A-R@j>E#I52ezO8&KpTi>6kLq~PdU;<`9jWYT z^%My*c}L_mYKWGk(xVI+`r6v6o0HpMY4se3bGQ_lp7s=(>PUIZ5JaRDnLGJl{&oXH z&+Q%*0p-9O_CW1%KxrJqP%0n0oz*X9LsLY2ISqsU2wlhlYHI>Z(Ol51G@D-gaG5f- zB2$KFkTPY62U(^JU&P1aZ`EOQVns|Ic``{Y{ziR>AZIn*caZt6H~+X=stQF$i9hx7 zq*wfMyre^)IN~E|7z90RAq0ZH_>szW`NMvuwO83rI>;#gv))#I9Y~Zk?xqZm0@u%NWa%uFYobrA`QQ|MvdHR zGK3XSBUXsTbd6|}Hx2D(Q`VU^uBkn0Dwu%pvB^4(1=SaSO(PSb^va}aaR?cL?IW7w z8BEh=dOlLnil2{{wP8MZum~CUQ@j>`f*IWzT`YsA6kOtY1`2>Iig7f6=65|bOXYlI ziXp408`_a;G_=<*8yf5*Z_vi}+F)$=`>_e1)R2Q4W*fiv8YV~cH(C+<`8EQzgS-)? z-$C;JOXa`iXd$ilCbd<$$B22Z7=_M>WN! z;y7wW9JK~hfZ6v9Rbdv!Q7ew4R`~5Ws=hgnY6{0MiJ#WM<%;-eFyVPOF%_V`ipbq2 zoSx8+0)EPbXDNpr6J8NNB^c}YX~j)8(RxjG2GSn;=J=^Vi+|}_jbRw6z{hT>Kd>sz zaX!_?+XHTDlEtRlc(vwkdEOyxRC$NYt&W&r9mP0n60OEKYcIU)`q)cRc75#HmyqoG zV5wA+>`H>v+l&p?;tMf&O1|s*R@u0J`HoVLSaN-56!V~txL{ogreYt&f57uyr7f+J z6)mYKJU&u1p{G#TDY{8Id#-Akk|R7ttEnS|pui9GWE40`u&V|MVhuPsp4QmY|BpY< zc>+Dzw_u%Pz#vs*PkAKRKGYWpvt@V zk=QZB6^>ayF7{>S4$|w-*2~)Ix9BWT8sEV%=;75@(KL2O>k&oDr!8y5youoowG>wm zBp~2XZW6WqDt^_U+@Cyl`1XppkmoD=C@i5;gD~JNNlo&kj98+n*TPvy2D`2NwxlM` z@(!xi(u+zLeye?5($@!hwqPx6IuL(cSYKx4Y8-Ui>MdA$-r@ZeKX1h)rFzBG>GLfu z$wHP)0;bhl@R;6!scV2}z@}j_tas89Yxg67f>z}qGTjyOJrk>kuTmK5x;OI~+Ia-T}3NGWXYS*_%q&d$m!l(F3(5_h`y zP;+sxTz9(GTnz!L=ExrUM z56ir1=Tj^Ha)@8^?5j@BZLj6rR#Rd~B8nCQ86ELiYxrhJ2Ng8_sov~_xJs2NtELO6 zz%uaq*%o79W>>WW%_STqWcro$S3?8nqioQXsu8wSt5@Yfu|aL+^>@4e>Ae#Fzw2e- zeRr#2e4hK{s!D03qn**McJN z>9#_^RQrj*($*fhDb6v51M9HXG^Q%BQ|lwh(4EmcWC2uD-5x6oRO5mCMf{kR9Jw%t z&Y>lED}i%;)neiz)mO&NTf!t!)wtiBHFwhfo+=%P-ve~JN@q*q zBoXbkvCW~$o>Ln_Q4ZkaYdUW=&+`{VNzd_Fi9f8v=jdUcd*={ks?8j1<^3dW^vpSYD6SG6SpWwAJ1s25v3~KlM`}m7nip`buAVl+IZ3WA=Bd zoDJ<{IUJg%RDH~_n5T+%POJW`)jZ8#4fGT(JS{bzfz6tMhC`F7O306K;2B;Y?;{5Z z3BA_r54`&twwbv#g0~)LVV91g|I#SBOA!UqdNPbL%sU1;Rwd_a@nNQVSLumrNEQM{ z=yI&u;w;~t(GKXhVI*+xW+N1BU+pLoJZssX}c_!QT?nC!( zmiSUrOSs_^#`N7Wfr~uE=}LNp%FPmot)>^P=g}JXBJg%s=}fJo!$`WU%thLpmC2}E zzpLq-;p9>N*5XGE4d&bMS%q-tmxp+lF64zP@vm6n#V&;xNblNTdF+PT2}rY>b5+*J`@aKF-&4!s2%u^sLB2G_Fd8ED~~08hkxw27XX5BrXq@j5@*!XthMxn z{hiK7cV=00hGuB~a{|Lr$XV4tXEo3AR|qx7XTjdjRYX&^yptvc4%=U@AC14w%sBR{aB2z%`C-DhQ+qBhug1`DTMGM&^PMZu7Ba+#IdXzDsrV{~@6D^`scc6we z9Ww%-f9yCVQC|ocb#SGEU*G~3s`5bYcmCd~KUhVMA-y3)9%I{f4ZiC`;;$vrFw zA?hDC0~uRAnN~-=9`WSN2pzZ=c36Xt8DFo*Pbn4z>yCdJ-5)Z>+Dj^-T}WPiW*1_3 z;GW?R8nQDB?<98u29&a@$THZ%p3FRalQR#U1;*A{O^PfaN@4`IuViw$l`K7dLEmyym z5YK9B`Mj3RAbXLP?H`vf8?Xn<#y~rkMP|NtymTip7V_!|3xoNIm6s!M^mL~!z+Bbs zqG3E|I^w2{2GF%FN#W$h7KW-Nxt|Lq!L8-w^_1k@E{>9Xyv%HQ#A8kOhsd4YT!WFH zzARl@9F60TUEjRcZ2ojGee~~$QuU<&@VAHK_>Z?V-(Svsldxvc+{qOTNGYY##Z(bF z<9s?^M6!C~+z2J<OP%miXYE;!7c;HeN2EW*o(15~Ywg_q4ssbcJAm84f+4bJ&Wsy}E*CS62% zns}y#fO;hvn^bSFh`;sTUP<_<6w3%cuEzD08DL!R>Xs32^B=u{%b0OJ)5flL8F4oE z0NsnejABaJG8SB~Ua+~3iOn_ApW{Pp?nC8_SQYKC6b=+VB{;#5Qua_(c1G6_&uDRf zou1$e4s~Doj$kdDG@GEyYV)njfu}8j4cI zS%L+aUoG!bo0)G@c@bpCJZ>Ay(Gj5!Y2FZ{9B{@^T{;EthI(jgJ`XLqnGmf(BbozQ zG4MbcH}dJ~E2D*U(ek0UQv6iOpWH|fJxQOuBc^9iB(dVSW%^t|p{{w6n{{j6IJ;hC zNeDMcwS)G(M-Ly#VPfnxaa*T^n&S7(>w zzp-QI)$GG>cP*zjc@^l#441qShh<0tFd%56L=&!xfMdZ(3G0!zqfA((MACqKm^`V7 zLF@(|i05V}QMAJx#-pspf9jXLVlNf#2mNm4L#;`~?~U&CMQ^pDC6Y^%s+O=~W@4A| z-mjUhB2N`{QKEp^=jHf)O;et941&{qY$gp*13Lbu{LY%)`0tn<@hG+{-)GQS%(F{h zwu)_iVTl%90p=BT#r$MlxtTM71d9jAK(idtqs=I!Gw?=&z zS)+KadyKA@_hQZjvLzC~>C^GTPnKQMoZ@wa;A2AJA2HaO4sIv!#=tfYMAbbAf zp?<5*AC}6cJguoCpjUo4W$NK6>E`#Xc@JpX6g)#XCr>I?%rqaPGTR`);rsI@>% zJ-7cJe)RDDVp}keE_F}Q4h58uX~{z;UZ8;;Pe+$3@uOSrv2h)L>fc4t=(GRPM)dUe zME2j}v;WxI5=_ zZZ_S2&p@9I%^teqTt890ck@uSpB*mNA=v=0v5K{)TRk4JqUb-n%Eg2KJ!{}0*T92( zkoG|XGW2nNek%v>9XWVEMj`S*C?whBdeISm`y(Nv>w>!W;ODob(QC6#-sL)ZCm)xr zKFA!|^4UUa4gbSt80u|Ij$UEMm-K^PyfXQ@Mz!&9>Bm8FA^3bX)3ed=I8s2&xP{t+ z?SCuxPE<@sg5#*k@eQ%0F&a9U&Ont;HX4>R<^L=zcKuHIyL@fBJhgp`Ctiy_G(OmVALr04DyC$v2>H`#b7A>d`jp0F3q^Q<2M3b%+`lqTS3i=(1)zV8JY&HIUt5;h$ z*lWO}-pz<5zL8$LIjPHGOWw|DG}h!A2`Ms;JPY+(Pk38$;Z} zqs=o`aLq?vU7?`3zt-lk3Zn8=)5BK!V>oz6D)Er=12!FaWhojO0D;&HP(s3&q}TOx z5#j&X%y%h@Bb7vdhU4Ga!m#;)Dq(3Q{jxPmsLC`)O1=k5 z2_#lzfdL8tOcHR-Ln!9s=TD&O>dp6?32rLZ#vO%Rb@Rb@kaIrYy@#*Cno zfaA|^xf+xuEg(nPUh1xbv?!7}()Pu_z2%C~7R2yq+tY!z%57hcwzJ*QmS#Yb0BsxJ zNwhWW-D5_1jBDp`L))Xz18t99Iof{og+bd-yp+&Z z!2GK~+tF3HZOti7II>n(nrc#-oVmVCX~L;Z&|OLsp`ptwO&)FQfw~lb;Y!iA9#0bO z*n*EdV7T46Z5EhH_?2Q{4Ge5YKk9bJKvPoN<^9wP!#IEW#lyft zO*LxiBJTyvzY50rFF;NGlr3wokm2&{n|w8`{Dfzr#+FZrt`ap9k9h z=9Q!E$rlD~KmAfdTLJTLX#3)z?Ps0`+J5HB(e}4q7_|NDO9^cS%)g=Si-WdLJrA^f z>dMjf=@$lVPra1TR>1tLK-*uaRw_hNy2Mfbd0hhWlUuGNP#`~m+38uJpz*%$HkiBs zmJ5}+0D6vI)(0TVnVq1{2M~Ws=Jj*(=yx;Lr`wEmay(v&{`rsAUsC@0@{h|!$|`ia zJ6pY3Of>0LpzYd+bO2U~gu3)+N5J_wi$46O&O@T6kCJzxP)P@bcRLCa!equTn ztzr-Sj~;qzh#+t;AsSj9y<5$XxD?dwFPmH0o8A9z(wEu&*~z5w(L7gS{_eLZ zj7a!svc(@?X#VN{gI$l;Wi7gxbj7nx|+O6I$r5` zD2cemF?&$ft_g~Bupj~O^q9&I#=EXl?wlna$CQV3U!9)$8MZTh==x@h10%`FBvFF^QU4t7K4fekKp7P}7!q_LL}dpN&8IrR^l-JUyr*Pi!}nAh#`6)%bsM9~7r8 zuql=FwVe@3TuK8blZqprwvsBuQ--23jF;k{<`gL%L;i4j=2JfxE9n~@+z#8ZOnFq= zCNqsNAOUD}@rFn4r_sExzT@%udWKRV3#6%*l?!fJGnM8&D;@Zvb+D8}?F3-5l%>y^ zqWYHQLT}DzM}@ z);8x-O=mZ0Yc!3#*_}zShql}~n{Tpj**Tlqs;rcs`2Fisw&t?HV%Mz!%Qv;lXxTMk zSol`Tk#fjiav_ree;VH#wL2GirZTMovI|qUBWfVn5vAOrk@EfARNxNdX8Zwm@wpj)Yc}IQ%vm9{;qt92DFu-$uF4*;V%r5S!g$iU0@QyD3EkYl8cD%%rnjkduS-2jUVLABaE3n0A%+*;4bq zFX@e6sa(q*zf~Dj)8$;uWx`XwbGYDbw4uWQhu_An;3V~$D9~cqUbTLikoJS6(+vk& zw~Drt^|^0sUF>z=wjR*x>Ft&S*><*wvQ?gjT-_vjKET!*# zoKwVKmj3kze=3guc8l8wI+*_V=|^oh;}5x$cHz@TeP9^$uS046LdVcCcy@9c=VWj_ zWU+A&Lw>@SW2n}(Qu$m$Te*jb5$(OJp`T(QZ5CjH;^~jQVRtKK;ab%4> z?G|hL1EmJEAEu8K*Y+pKAyRbvxe)<l}4|>89llGt}p=~pVxoA||sz&)#(?`?|pG*3U0KBO8j_{YXmCCW; zaZ2%isRTVQs^nSjlXHLrS)C1CJX@@DrrDaLZ&&FYNA(JRE!SE2-Mr>x-dEvULu;pP z(~nbRSLuQ;K3tx!v+1EPJ;4drY8?*C>bOEP4c{hnJjVXB;qnP3*=RN&9WYE80ygmo zwr@S74wqj3E zjpI~Jtc4SHm>yPHwWe)EHD7DSjY6q=eLJHQIttp=oQ7ZlNj3Q#uNzLCqsLx%bdYSf zzI2?%vImlDJTdss+211^(DUa!d|#NK^I!9 zXZW;9kB@EAyp1=8c)hFiIN8JG01L^Bf0Q)p`S>PnnktaJ`0Y`hS)SJb`MjXs7Qk^5 ze*;2YD7CnL{TaPnv>kT^tuB^Y*MMt!^$hiTA6hszV5Yp)SK4sOZax*0|QWAUwYWqn$9#u$Y(>(kqD7U|3edwBuF>+_RHl9R9kMs9RQ|>uJYn#{r9jU2Y!FIK z3ue(E1AGE*qhTQ`xq6qaUB9U@p7eDzkly$cAfzxSKF$~JTz1tRU{BW$?2ID7-~ZWY z|7keh&gdzYg)0IFZQSaEF+oyG>aifLV2%ubL%0o1Y(!$A;aIJxh%ldCm4L*xvyyMj zlON=Xx*MeD8ag!Fu)_}mMqzW1e%245QVig?!T}h```(P(hu{=bFV-3k!HNcB)UJn& z7a7wF!4x-Vo~B{K;@fU5a7P(~bX*VE8FjgHEo>MXSvHJvmtn9cp#W|u&Sn=J=LS32 zhw-CuSOzP2W2_>aRKuJCL3B%CIH`si2m>R63^M|Va?Ge`TW7E1UB+kwU0}^J;l0*9 zLHR5pZgADbUo;_cXL#~EDqbJkp{xiy5m7SX8af51C!X4rfF&c9eW;Y$N7be)_{Ia( z!vi2aTK!>c`u^HZQnz-bajOjKtGaw`M~8NnI?i{fhk|W-0ehNwP)PlB6uNEG3)ub6 zgQA4=f;WL2s0DANMQH`Xcs?I=A4l~Q0-NB|D+h1po6`;6!8gQ<%!|iWZj#m=~E4|;edOC!+mCV6UyarGs9auQcx`h>U*EaI0<5Sek(lm5x?zK4v{ z^1kAqVYD_J)gtYOtZ}L3P*3`_m1E0{{rXM!i|s&qH{*VBB=Y1J)SB#LJ^#8r7eL7O zX^oT(aVPZI|76m12ZNMn^xF=AXAg*>D70h3+gZdgv5WOqL(F6tiZg8n>*NyE_Tx~Q z77ZGa!nM?t5w7E4CfoSURDF^$YnVbUw1>gMCeAxQC0cPBg2;GKRxzDVG;~iY>Vv`9 z8a$3!;OyeTcDuN5M0&s=jj-w`G!iVKQR!@hw`ZWCKrs^@WT}2)w5IiBU}}_0?6K14 zCQLK=a#mNxglu5W&WP&~3u+hD68a&#w*|e8@V)XK_u>u z?+zlFmxM0kC_|+fF1bi9~%{0tp0spCu5@zS!6FXbKoQ_=A? zVaHCys$+CO8Dw-^jh#X`CUvDOTlayXwN6dc4^%jpC=f{Y=)Bf^m*g)ZdK z*9?&>L{^Rt&RQ_ctnOQmmmGPUmHGQYB({|q;w*@vUV-O0dzp>yjVQVX#6BI-MqcF~ zR7F@CXDFXFmcIdrH53{sS##OzCpTqBN0*|C>bfp-K~i0dYr33uS5L8SA*BikHQ0|T z5JS5FfnvyCx8NLf1$u~4?u>r5p0KgH#I{Z$eQsV+%_h-J>i^JbrtYA?4609Ug2b)Y z5FCwuO%yfQ7}%GIohdC2Ox0?(xY|~`95R5W1eOvq__Hbg2UE&V3jt(2O&ZLn>KQM_ zbg(Bg^_-Zg2gjFeV*%8*fhs$Y8?$xfhp{nw9f<$yCQsR#kb(HC>-#yLxp@7R_4(__ zy=LQjU;0h!@k{js==SE=5YA12b7vVA(-{43{f_AC_8qkV$jLcq4X{O1kHkr|(HHw$ zKY4tfPdOvvzj8(c3OtaYHvsR$Z}&?fMmCULd^NN|b{c%ZJ0xnJOcpdtW;0zVO!s*J zdq!P61K3_wzj|UY1=6*!A~CRGKI>(5VyHw>!R50mIm-8Ec)RQkKxhf!d!nuEbw~_o zOFH7Ko?ic8di{@TE4m~CqTFN&Di$XgGy?$xpUMfNp?)C{#N@hQf*fCR^f3;W?uCce3hPggrJv$Bi>_kfqjMh zuh~v9O;Le^7tg5aV~X6{Tay0dj^%8HbzmuPHPKpqv6r z8|O_$Q48l9xmry@snzlaQuuI>*HQV7+Ye_a&%Lg5hK8o4RwG-q)MGte)#`b5jkA!y z8cU{DU!?QPtJSiGNANvnYV{FyFST0WNv+lxJ(wP3Q?U(9=cjaFYLrXzZt}?i1TTPE zy(#w|pW*AxAm; z;86Ut>#<^K;F(5z9`%HiuN%z$sQtI^5#Y!)TnscAkki^Aie<21S@q_Ym0+393l(a+ zd1oY{Mc!TEn67&`(0aSV0Rv%BGAz91^+?kDqz;vNgj;Nq^@k73+Be-2&d_da!x-AF zag^g@T6X7BhX-oz1UukKIrh$e*pJxnjY`r z?11K>sJyTEM~oI8DMQ)p4;M&hlr) z07v)y*ew~oW+L|LEI?VK?^LN#nh#j86-x()o-!*MZY2rI0o; z%(P72Wf0QNtz8WURjq_3(<>7m7Y&S`d*B5SA!rYn0qFq-v=%N60~3H-{Q#;Fp5#PQ zeV)eWU?FU}r&*4=Q;b`-y9K(v0ccTFi*Z+6jQesYe)cLG!Y|n|EkX0iq|doiDtmmO zkN(3M)&%i{l~K&a4~3~}fj?uo!gfL3py7U0*nETpZo>obQ-1Q5oQ2w}Uxr#k;0;e;JTt9N!YCY;IB)%~F} z1qn`h9Xs2r6P#$Pt_TK$ep9PyCvPs29jIB_8U11zhXWz=BVq?|xW`MTf>>??IU<{ov1K?HPBl4hTA;7c zL37taUspJ}R}HR7Vm&XZC|rVjg>eOxl56xkLZh?xkQaODkeTMPAu6KSU~cvjr#CEe z;_DA}>=C>N6-FrmI`LSmugia{T7DhmqsiwIPy+gRZ5+S8)xX{2-WnC?cGM5`sV+`= z>VlR&g`>_`q*oYNksY-OD%0Q|8-uBK;8O2*Qixao~bNQ?HrN_YqlhbTSGMf*RmX%^Pmp?Y6_L-|^lnl*XN!PX$? zFzA?NYlChL=7%I%Yy0h$eaWB^>INSdZEehs^BPRgRt_Y=PqeD^6*9XAQ|z8}n|)O( z(uF#Yc!NDXsK=xM;KIdmtxqzO9&B+0;=F$Fxm4#IGq|M2^D3Ru#Sb#!IIhjU){C!= zoso_qy)Nm|v*yqcXmJ>{Sa0}%>(D~XLnd-Thrv|l6=gYNaRbAU3q@QrN;Ybin6JTv z0Taj{Ovjt3=jqsh7v6)!vzx6R5M*$;Jl=$R4N9V1h|%HXm0U~J!E~+Wp?({kpw|dMpP^s4T zlkw1YYQy1A1CGdPL7;5EJp;+W^PmEh2M<62Pve@8At*icmSxHQc{yUuZr)vhCt5lPBotZU(*ZDzTyTqXPsWK(+FbF5?7RtZGa0t}Jt zIVds}{kE=+PZ*1lBlIzmi!J%!* z;Lhk$C4J4Ibo;$SuRy!sSlOqgx8>iMv~e`v8vNk%h(0%_ z$q$e~8P)i9!=YwtWN(E1ZkUK_ZD=~lXL|SE2y@du@i;iNMKRK&Y1wjS5e#y>O8m>L zIu5%=>t!aNUZ!avkN~j1+89beg6qM|*2^WS9BEw_e^SV5zmo&Wb@BW)bO5R(x62IA zP-JDqRFWh!17fm(SA&?8ma+a@LQf@0J_q!4d6F4=W_gm|N%YKt)EMW5oP2_$6I z=1m}WT{?7yV(B)aB)`y0cao+1>`Q&=9FYHvO9%Zx>yz+8vEtUt_=L~4Zb&x77v)uh zR_ArQEcE%UI2|~^Efu!BvhX)3GYC9Owmz9`@IhL9NrNOAO3&g(+(E)hn^E8O@TV@2 zclf@q9O1TU9-V%nmR}<{&C;K~lBK_a!W{zTMWI*+;ET2FVtaCK5yZ_eN+}_XR-og1 zTQ_v2;Ik}wiKdRAy_Di#GCqE`jgKSPz>}{k1V1R7s37{a^m2h*4TXzOJ}X;fIi zZIU}oz3^b8^l~h?uC1Fwoz3iELL%nr9>`z|9xi>NC!YNO*?SuxJFe>9x4X~xIrGty zM)Hg_lKQj<7=eV8i)`sB4pF=O#lr->`&_sDs$8#JuXy!bGp>~Fs9di56g=3%h#)`# z0m4!60LqmxGUz4_5g@_<1_TIO0Xb1X5W!V0;$jn#=YT?#@P7ZbclSAGG_qwIjO$jc z^7J`wB%ewyrNB!l3I?PVm!pgcAB+*N6G^LFH3@Ozh*=^ilfJhTyCESYI7b zUzsY-gpQ>5ZwMacb9NMvE#yin6-o^|Kknxnr!UmIj}H|dk4H!A?Rm_`#~cS`11!j% zAb0&0g~f7GP4e})x{-y#Ik0TrW5x@z6ZABg#POMd9HN~$6QM#=wWU~)9d4MV$^Mvm z2D5xEP+!AjIi3M$k<4d}llV(Y_Co(Utb=Y8=?de&7{mZam!TQuqx=LD0ll)3mRJM7 zU_6IyS#CFV3D{lFrH+}-EcZ8XxD^&ng8epSc~HCfmAR89hPTdr-pED<)Z+eVBVaMJ z2lmH_9D`;fiPuTZ-^E~LAnnk#4}XMSns#4EC`$)Whiy12%4;T6%Gpyoi=(TZ{w%%| zSju4Px7LZ*=u49rt*$tgj_Qg_>4>i2&tYBhDVSL^{oC2pZyS;zVr9&jFlR>w*|uhN z#RpsDZM;*!mAF?KWp_fy0-eyY8_n9}p+}W^{5ck%_`*PHctAQZhK%^$pokps!nv9v z;nM?oP)@3ObaG%_>1dNI4`}v8Q#9pXHT-JI(03NDjLAocMNi4T!=B{tIjqVr2<0%y zIG3XZy}HjF&S8sgJHJArUm0SW1LEZ^l!w%{?YUgPk z(#HzA$%f!F4M;+2^23dZ*o^QQhmy%9&juxD3?)T73EqJ-&_R4~P{@@%)gd}j#1FBH zSBGco-5APivfjIAV_fX1I%&|tSeqI77|cv|V;FyI&{VFvO<|`^&?fe7KB{WUYEp;*3W-RchL( zZx`P^F+&sk^sOagPSeYyJ?i%ih@6d}cMVKrWf63t92ub<)Z-BJ0rwgNeJM2M zzKEcqutNih%D}f@#taS&Or%4}JfsnErNL|<8C^1wEX8}!)9!!~_#QEq0Z-u5`obB} zyLH7`<~We&KFhGfbw8z>^pdP(d}IfAvguz0+0-urBlfh;U@(9Q=b2aLppq;u!wJFIT)oOp9~02tuQs ze6BCK!vt^E+{~RacM|)R)MLCx*}&#I5LwwE7d7>otbg+z97a3Uv=N?#+`c@_a*o0m zW{pykuxpep2Fbx9^<>^0kAl=akJ=9Jxx%+;K94-fV*=KX^(W(XR#+|px9l9y=)sUcWap0w$p z1Hg7w^d8zerj2^~aGaBOHlujiBN<0}os;ANT9Q11mdTs#sQpfQFlG}u9605MwmbGi zlZ4ogMR3MCc*=EWfIV!J`Um`mCKF+n7MikmPC#fzhJN;kzFUJ z;9YM1JNhR0?L@=1YHg_FV-y zvw&$v9+adeuObM|S80AlIy&0K=8@P%qWOU$a=k>|j&`KoAq^r$$35M1LShz3K zc7X7JzSMOtR!x6Oexz=E7ZS&VPH1(B<59P4yCr%M$J4qFn$+5(kE15b{-o~3@oof> ziQ|0;g%GiD2|A**>Wk74_vw@_CHF`@K#1N=92Le%ic%g2Xp<`^9YDu%zC|@BNaF_+ z6z9=i<31nFvABs75{L<}+&hiY3mf6D7d9|}Dc!A^uo;1=>bDyDw&I9q-4{d%e14aE zP9g;EzEj_LWCvF*hm%CZPPHrnk!t+S28!-7jZnz0RT{!c+Lw{Og@}>=JWPuhOcy+$ zMn`zVi41UipPKIx8C67va73=~{e8xmS@ZZS!-x)ry{t#76cwUl86e9=2Tbgs`hZFw zcJDsSyJE2>0ZnEkNQEkLVlkZm5j|}320+c|3-abrpX2VXfkQNaRH!ENTyo}u?1v4= z`ZoiiZ19`Glafy8H$RfJTD&1Rtt%qnl-(z?pzrYA6S~s+aa|Dv$8?q1l-AjL!6sn{rd#9Kp8HPT_Sb#09$3{9N}=AAX(rf+VIQ7^W-Z_YkIxalX>nAMziU$n-k z?j{--nnc5Rbf#g_1MW{MC!2e_*Dqqh^-qh=5xR%N_-!>J9n#w#=4Gwb0X0oyCODkF zFX@j=Q8*6xN$^ROu#zG!mg)XM>(2(hJIHCM@;```;&2>@o1*0 zjHdLmZr08k%604{O{5=f|{8PFxLQp@2^$OrnQ8>v|jYGc!@mGiQ=U6@0 z63I4nr*3ALt@x`OfC)9bmzx&uckODMh4n%%s36CBjTE#xBIE_F4s$hH6dOJ4kzbkx zU#I#y5(CYGJBYDDzhLc-sSy_BsIKT2X8;zOtC~7WQ|K4^_*7q^UjVyPdKTGtQddyy zgs!NI$GJ+sctY3XY-IqN`iU=n=oe>n8x5Dgr)k>s3k)rmSqs|l)*Tlp8v`oc>%Lp0 zf&P@k9mUQ|*qMQR;it@CJJ@Ub9Y9w)OQ$$%`n7J$+~)of<3}A0<7{7{VVpA=1!YY2 z&!u4iFVir9muVPMmb```bRj}lXc))yp-BiHmeK~kk4H!P#m`LF>(VfwDZ9xeE}&sJ z1xuZDYZz!)9`R4w{GEo?C4Hr1N&Os;PFTmi8jZ98BxJXSv9n7bbsEO@4otf=3@7I) znlClTi+R&9oYJL%b~FsMEkDdNHq2bTDwG6iU{Y{N4(uMLDMHRJM$|w~mxyVcPiewX zDZ@^8k%sL)9WFkd1>baU4h)bMGb|T^8J9emDO6U&$+f44DBl4D_ngub1| z`5Z%xr}0vM?fH1A;iebkr6$EZOjc}8=CQYUNwWM7MRZ?##~uB$i-_hB`cXVbQJbp# zm-_l&Wb^Ua`F0m%_>ANCE!4V+gFjD~x0|={@8k;*wG_G*F7NnD&D0tDFsNRKHxs)U zhc}P)I=mx1z~)bQ=|B|f0QC#vj1!;28e!UVL7Mo;#l*IZYccB=Jif*7UZ_<)FNVds zyv($c3sLMZ0s3EmaYO&>zfR~E_R^w(C}KAV_(vL4{DnF`KVvg+m7i3T5tq{aA1R|i z;<>p~U`Mhp$kTsj%31VWDTgtT^&dZ~gy&yHHb+uwFEa1;^W=?gkS{P5EfP<&n43I5 zma)?#5uSNno;yc-?8Qw}kNrAn3W6X@>Zcb&7|(eDSnr=g7)M{!%TNsR1tts=_7=2_ zq(d#*jIKoVH$jbBwj>$7g8&qf=sVi&WLRYohmDoN2!fVbpwsh;gyWZIUweY0jUQUt zda@FgT-mK@wrf;LF(qvmWq^GMD*UIt?Nl8LZ~-VgenBtf{06m%L>C9E1r)2=ac>oBW1 zR9R1twZoj1tJPJ(%3|ZL%xb;$W%hFsqEe|xHVY^YKE(v^o{<^8Hw)r7Ua1X5z|KF6 z0j`}m2g@lQ*jxA_N38AQx7cgQ))P&Y4HHUWCaYXyB^6rVHcsLpBRAT%NUFEy-n1Nl znk0SaC<_@#SlWo{R#v><@-A;qUmMdS5*(p(7QJM;UJ;|vlwpX#~ z#wtXynM@U;&z^f2S5g%wk=r)CUP8I7$&rc^FVO4RRAtw2QJctIFeZt zqL2>K6|CSETO^p2mMIC(2}k`3;=W?n22gTy+4l5vvE^kSK9HB-V0+I)Ta!w$S4$hU z)bg~Y@n7!J^>xy>uZt&a4}@)zk`fpXfqMJ8Dqzq84_W$uSBk1G5&LLR)X2Py(OcEU zwC=aEkjb4F%5>%M$@|7Hn%Bu3O{*vju1=jbUnSFRElYTEB~Oyb&q|Y8qsgY?I zK^nrmz&1m$-)JRBXEidf=Yofiev;-oKN1_18=tCoYD^5y24yuDarnU}HiAW}FGyd) zU|m$N6qXV32S97-E!UHV`2^0`jq`Io*$%2)PaICv4>3%E9cf6ACw#{lhy!w^vt$_I z|JtGyta8Ow_(UyRkwkwBphJ?DW>ys|(msz5K?PRSQY^1^I?8vgZN%{BRw-XdJ}S?r z zaH_?*mM7M0NSebq7|3M}%a^YkY*Oo1r?=j9Q$bWU8n7U8RPcbU+Xc(E2Av z>H@5l*6s$R6speSR5Y2^xL5N62c}CNh}^J!Z~*%D`vZvsk4Lc_*hAdcmL@MT*iwtk z&=0Ei$wlgPO<>Fsq+eWE!;o;Qk;NW=aL9|Jnux?d{Fij0e#g{PB2Gv*DdGNjUz0eD zK0FzN$>fs?el^W|C9i;_<6JB@O^W+Dw>?C`>8UM58`kh7gj>_*x0=x4Elf;%SQ<8} zMm`et%6=7%J>S8otzpy);pyvppVm{Wg6ou)Zg9Cu734O#vD^TgQ|cGb5GYf@)_AA# zoan8lGE8Zgh3t`b*>!M}+ShKZlx@HwIe{y_$6mZeXepAiZEJc{ zu)CECLA5RiBn$pqui z*Nj>468jeU%ldtEfMY8r9QS`1 zKSs%F-w0=sa7;UAfEgn@3II-M9w%s`9#5$ZpX*I%9Dm3#*!nm**EmmJ zXq>##J5D&EjW9d>Eh-PO5xH*Au)h4Z5U>HA!*=LI@nK7vGt4+m&r$Y|@PL>A7;}1} zr&1DK`MI+P0A(fqYLVAym}5$8sANTlu}oIAea>IMr&vF?`^W3OJW0(To_tI9N&`8z zlw0K`KPg*dN=(`+N1b^^p6Fl-l0|OCv|iL!MbLM)^SMIUQVz6r#&{pXk)D&1(=B4c z$jqOL5snwycD~O?Vd-voTas%DCiwtV=$ zO^*!=9Q-;2%MP`jTO}`|TCGHSQrGL}zt>+;G`jOjrBZ_A5V6;ghUz|D%O~8kp9dXQ z1p({Mcl;t>>i+H=jGcCZC<-x0dfwrC&pWJ%cAEk&A@FJDu{sI((^|M7xB|9`wt(1*W2 zZ|MJu_0T6@e0u1Uza~BO2QLhl`Oyo8=Rfjzj_TN}&_6#eg9Roo18e1|g6G0eULZg} z@qz*Ri5DrLA4L#L_mFc#b`@SF%(zGzoGHxc7pAErpv;mpZ6^Mbv_iBe3q2#K%^E{x ziFpbetX>pjFz+2d$OvN-rCDN;OagnIos{NB$X}Q3vsO>7(H{(;di+h^w349Ou3oe4 z>ZYr^;h|QClUe0XJXCB&-eo(WJhYqhUB7RmKsTGwNw3YA(Ni3DCwBdBb_$T8gF^w5w)1+6<%k?`>9lg4G7W>ZeKJ_{ zsRQe(!PMDPB37}mVrT#=i=A8VpdsNho}N)2vVG%8hVuVyDP zOwB*7FLKHnnxn*!N#2y-(dj^d5omag-|!(wQFTb*knV3u|F}7Ls-BQp2ID-}42t_; z4{2EJRn&(&G=g(eN?ElXt+~hm6^`hLT3hu6itJ@@?J_^O;aKc z-jS44Ed$IoH#;d*eGDArFxQqj3?G#hj*g77*)s(SYq;}U7|u$bU%nx@l?&fcLRI}7 z^;Xl9hQ-%s>dBqzt4Z1cWM7&sQmIqHwz{)ro_f+5XH#{Ow?J zGB<8vfEBq*e2tNcobjq?Q^~f&&+Zw?R9}fMZ%O+)&Og;65pm*HuL#!sr6yC-C-)!y z;`gQsC3LQ*(!P%Sudn0&Gmanx*ECi@D7dSDqqrXXb}Vi|=F&Qqa4AOU2FtkQInxjs zD}|Kq8PU`zAHzV|$`axMl;=~vz&=;WSw|=#66B*a4&co(nwQaJ^`Pc*31W5$>&=yG zgyYxeV@<8kN-D%VQou0k0jj{l9hO1}xh1oYg$Y%Z3*$26(+R|Xa zyl1FV_qBUWCjZ|~}Pr~nM>H$$)vj4u{od9J$EX+RgU8YG;))$+i_S1f*zcTyjN8R*~^hXOECqCeH zJSBP?XJd82u+LIdN4isAw_{bhle|fsbEHkE3)l{c%fH^?X~Z5FDzDrPftM$VYh8IX zcw@RaS$yTG;GW;-&lYY>?Zp$QEsf>{e*-w}0J}|&IO;s{mxyRqzcS2{r1JkvK6D?k zQ5vkjZVBPhL2dH1EPk9Xm28MwW?OELFO`hYfGK)r`+ykXPQ1ce3`Ct64D%!0z{BuU zu^A{R{{q!XvXGE_Gdo?OWrBzdi`XB-XVg=Bb@|;Ecx7?PqKB^ zhfLEKPXD7beOgE?v5v}APv&1E;+0HD7qK;=u5mM&qVPFxL-^4pyjieB`Zs*Ljn@Y! zRtACFp1qKlBX`=M+@4K&dt32zxlNFc_B1k1te8xPlf`>)2gr-B{9icIXM|C}ztv1S z@9Ly+HEEK(J;LsWUj(;bt4$8SbBA13ZsZQYw3W23jvEB)k)a{SA#I%Qn%HBYiZ$zx|))m-(ir& z5D}8z0GtUGxT2-%#FPWztghV18><3onfBIA3<#9dWK09a=?;t(hVyG^ctT1;=s3C+ z&Vx19lw>daMnYThG+>A_Bk@eIP!PUZ5U!at7x3jh<}tBEzB%GCnyMv>*OhMONwjFT z1H+9BcJ0NEF$AeLN{9s5v(y3~1jMEXBI_}U3Js2m6Ep>R9d}gL0yvcIPl$^~cN=Wk z*AWYK8wOokWz|%x(!i@4#jf|A|*}Bs%SgwCKhNH>c`p-$r3ZCbr~o5K}INV=_gbU,J3`cZLYYO3w zl289?Hvv)o3kLHsPz+=mmyP12v-tt4@fY&QX(O=>d`8P%-5EL0*(2%28c$#vi|p$I zCgfq(bdZh2kB_!~YgQS>T#QEXZz&Sa0f=CajYn%3Zy#!-7d!EsM6D+(QQev5CrQu1 zLIKb80!VC!S)$Zz2qb>e4FURbQnJ*I%aqEK+_zOT#h%%WL%)(o{c(B8s+PKn9UwJ~ z6DC2nJb43&vA(T*9E^ZX&IVvH`921YTyLKgYaipz5x<)8o4fSY@w`mwjbg0;Ty}uW z56p8@d0jQ6B7ud^-vD_w!aq&-U&#dwxPgXI%H3!*E;mBP zBMn+ZBN8H=3&-`FO{=}0jZe(%26S6atkpA7r?MQ@lN5QxsC+Ihw<))B848AJ&ZDap9cx+g%W>b4iu-uch_!RPa6gr7R74iI>?Cp zyC!}UPxVfmPBj2CLHg*UJ{ZjG6X-g#DYMI5TO_0MqaH}7H6W(a-f}*p^EMHZn8Lq@E5yG`?yos zDy*(ane{+KNv@hxS)jsS_gWaKOe^^}Ij%jey`CGgTVdt~OmL3ZRL7O^%!& z6xyrG8siw@7ik#j;9fW;(f(UA4tpa1HW_r@s`9)hQ?c|suZxrroaS3LFCIYnuLxjO zjIQGmf=+H^ZEbj}AZ;JZq*{&MDkm)~scy`?7B>Y5=!|lu>J7PVMa)vzAX!u`7_T6h ze~ODwoK-9++AMiv6#EVy?Wx;1msVo^I<+IH=VwO;V&hG}1h8kNG6O7UXTh?>`JlSt zf^oOLsvJ^_ANU+n02<++?9x-r15Eev+ehT9g$n{zCsbKI)f9tSNVd+-7nJ~7uN5>z z03KKI5yi6+zimloPBN`tSJtS@bEue?%-O36Z$QlEiO!Q9*4hA`J~fn{ucK#V+f@k4 zw=K~)H{#0UvYrLB1Qz4QO7~6->pC7X<$*XkD=prNqE~@4<$=AdHRd?%WUYN3%Il$* z@_Ohc7b{>5BZvN&36Vp4G`ik{yGu}}D?npnn6XPBcidXgm?fr4vvdv!Faz1LDT+^tO*2e_Y(RP>wSZZZs+qoc7o+FQ0W%{yM$aXyP^2bPemNQQK6zlf! z0=_9;z|)EsXp|OJ@MBcuK#tPck%KKAfoMr<*3DMB(WvgSY;ubjcqXN8cbK04s{z)@ z|2ctmU9TYo7ZGM5a>SC%QO;uFnfJ>kAEm-mTtK)0Il?D%S{{qS*4=taW#?$4+(vVy zDaX+R+gBkgy~ZIMi1eCzO8dnCb>IX3G<~vYqJ2WKZbzng3?@-ppTPK$%1PM+ahWd- z8EXfNEuvwx2!B*{dBZM8n|ycD*M5iA@*@QKj_s*I)Wt?sJb`HNL>gk>i_lbJG(i zkC@bHeI$%3j)~*`mFlWo>9lF05Y=FBT7eP~>v)DHlhW_HV8H8eO~BPg@xYm51kyuz z0X%sI=-h!13PwHD;z|qAOiM5lEg<}&KKU943g^MWPOG-~)e9=A80GlLP@5=_G#tiN zX?^T@57m}ieKU>9>i`yP9`q8p!eYrwQp8vQMpK_aZP0qzAgiXhYlv<}8ZQi2YO@>f z*LugxeZU!ia)~aklO&pS-$O2~heLAoO@R9BO5d$5LEV-hdJRzIw8g!XZMHQfJP#KF z5rK!b7-b>Bc6}tuSm2FCwYoGzb3YfVV8FE90V1-sXn}M6pf~P%AS_8LLY`V1>kwPF z1qO-vSvF(zA{aHJ_2XvL3e+04KK-b_ELfM6r4Ouy=Pa7QCgI~&ryamNWKa=1#aBqf zBp4c5hCntoRztVoz5ixf7RZ@O4Mj;;jKqHvF-v<-McK{`lYcYK&22Y8M5+ytP&ZMk z-D@xm`WD|17sUN4myhVcBJqdHQNNoA=JMcJ5P=_blFk>tB=h&CRctU}s-{ugy{2EY zlgpcdiaBZO(6B=zMi>}_Oj{G}*qIp&OD33T1MUnx=p&GZZHa+f1GJ~2)i%lb#ys-} z-3c|Cm7pcfzC905sc_W3Y2IeR3@zY?_03?BLrXyne)5piPgTzL;L`7^$WS%V zo_ELYiswR7`7+&*7`p5x z-Xw!U6VbO~A}u!&x#vLweRFT+NT7-6z-mJY{X{TE7udJjU}La7?6)|!Gqnz%nP z$)IJwC;Zx!JE9^sCMI6jfLPkH#lnmh%y>6c)?|#O0(q5W6n#{gEUYqTi*ndH$u z8y08iLIS=Qvf8}I;sG8L;5MRb`fvxxK#8i7Y$#hJK!Di`4i-*r(G8I4Z9Zq|fM~XJ zI%3>D)>LgeM&2u+6r4hZloMd)R`v1I}E8=^LG+}WarVkx)@D}* z3=`S>1A*t|%mloR?_Y!i(Dnod(q8SR^8x11vvkpBOZw6aJgbvsCBC(kK}OEj)LlAD zf;yw5o@D{22}iA&(E0Z~D1t0h%dvkuSNUK^8h|VBB2)mtDK%}8k*B{Bp)$N^c0j%bTj|%a1M)X}O+4W^OBa8KBQnpAbm4iB zC17kF-1|yc(2n{3MJ&)T1h3QM@hIdOAczKV4o={*9|1gG|2X?X;5-HFya8_~MCT<@ zzoPo^JbgB$K$Oorq~?Yk7^&=3dn0;{w%1zzuiPTIsSu^|;Z#-i8g9=}=w}#*i9hg7 z58$|R+zY=1jylg!9-kLAooqe8M$i^IrYT`MnURz#9b7cV9do@>OKw;+LvU%H`)Rjq zM9`gW^9WBnPs$4VQ&k;OuX>$K=Q#3pK$B}_e?{DOT+OUOPaxlb8pcq+cnfx}?)Wc# zJPW&BuioV%;k<9TZ}a|E`lhbnN=Qmm#$H4gweX+CZyiZmtHY~O?dC#BRQ#fZaV8&0 z*(_~YK;q{pEcB;xgBy)4XVe!)p^yU0@yLwM2syYN1H0XiHHdUCZH)LQn_G)mOoV2z zW}NSvif-O-jjM?ffm;T_T6}28Csy{$I;FxLFX@P0agyM6`d|)K4T({3YVqbQx-BiXTTwE4D@kjUtT(qfG^juJ=#h#PuY;BnKpEi!H@NJ(S*%>YQjw zM+-_|kB1UPGV$AbD9z8IbhCpJbuFp_-2@2-4p=O%6}V_w0R;8b%5bhJ&=S9AP^)ok zCmL3lyBwI`0?!?jx`Ps6f*OTV;;M8Szd*JvVqr4&IJz8An%Is{9PNNNw?!D=lpvvON)2pq0i+A;F%V=;Y~f3a!u6a{-N()ykMBTa~j1l>iR;Xmq!6N`b>szHCnW z+hk)1MU60OtS62U#!OL;w<#uN%XmY;9UR4r9(?@4 z$Nw|MfbA--#Jp4lLfS6FX8nZTqP>- zhzm}9k3vRfUr!ZZ`R&qc`19&($Vd|iM0a*-@86InQrD3TN=;N8Kpd^?^Ubs?S|kG{ z>2g?$aafBY6|z76-e6a5X0hyU#mfRj(r-&F{+UvfR!eLPYi-2;aEUQ`nY!$s7EN+L zo)K-Gq?49ysAj2+j?3|XGv;tA?D)0oeNEveVoLrx;%?IJdEq49S3B)Pd2XzXOICAp zV(OYO5ht?ZC=sfT!TcFh3K3Z1!d=Wll+GP^OTjFlcxt5z6woDqXBhPM5x?TFjD1ng zGX9f%ivUyN48ZDWTjdT>cSUn_{6oB}i+_~GwYu5Eem#nKo5db%Kz?w>9&A8jM)`m| za~RMSW)brP+yiMudetZ_*~NBX+puqCKSN$qk#XKSu91QGRiZ9r>o^?Bop5VkV2G%~ zqqIZ7AwS_Z=PqU~ckvzb)OGPC3!@;N175}ew7et*MqG@i=Y(5fd7m*;ioz0I64H_$ zVeCjU6T0*A#H0y^l78edgrQvGLU1`H;`lSUBqbWER`j6O^nmRkzuPHj1piB@G&tOB zfs{)RiW?t|;#)UZO2WXEyi2@zoD0l^IgauB*hDLp&2AQ) zc(MXrAhaM*a@hx8DdnEKlLV*gi~$}49+JkwENvQ?%|x3_Q>;tltQc3zqfHbXB6!Zz zjF*>?GHFw6m`^xtxlgiV%BBSVDHI+*&QBS;2|!7l+VKO82o+^ju)|G^x3**UV;@_p zA|)+omk~o3+J(kk5adc`8;Z8Ky19`usxY6VsIWdNKmOF2Z#)ugl(j?ps4GVTfBFU2xOBu-ti|?|) zCMn1Q1*`x3iqdS8S~9mu2GXjVrz=WhkEFG|BHJv=knO!X3CrCnN`sJ=QURVmb77io z-Ymmq(z}lpZ?cwNjKUlaMXrN!iHj^u11Bfn)!9W{40%e<+2^zF2eXF`ygAOQ*`ILH zrh_6I|L2Fv#$T3wi%a}Zqb`U4)!F^uIpv@bQ`@ zt#$~!=N0B?l|pV(r}{0k75-`(6N42w@+G0*@=yEy)R_6)<4FbE>~6U#?WpQNva#P- zR5u)S47dQHPO}Kb1(4w11}yJH&?W$7%8|W;2))d$2k+l(y+={eLf@^B-yZ8xR=f>YfYrF#78N4?0N_ zuI79_H~uoRI)EAo!>JY zU#-fo-h4On&S*`!lJpr;dL#SAf~{6`zms+dJjrz@Sz^Q3p}sXPOvHRV+jS2`!p4It1Rl-HM(^b2_O6l4 zv+zgx_9gtd{LZe?OxtKMcn7knS)-v$TQM5de49NQ)MGcAeSGlH*{^#uJS^z+jAlPK z=W{l^;Bvvy%osbzr$k>9#J?9pz~GD-aU4i80UANAlaTO`aEs7xvM@&~>0~|7MAJz~ zkJeh^w#=uZ<-EZU{CdXC=_hX}hQEm)R7)Zs{a_AD?cxDFXhu=rr7Qa7(g>W~a|Ipy*QYOqZVn!`$C8n37jgIH**VxOcQpEu>?yvOZL&iYFrpK6 z_>0=yBuJl?NY0&zk2lh#aNv?FF1OTaW;~TlB+K)Lk`V+Zg*o_T<&|dXj>x~Ceke1ccmx<>vOKfkE$V;XXhC;ixL25wmu72+U5Um|c$5#sidHY%h`2s<%1E zx6Lft%VD9*rJy$3%eeIt?_+EwFDabwi#ah2=FDuJ^Zlg&$;phwAjk>8*7tRE2q(A& zmvNZ9G#7d=gSn3;OOr%`W7R~uLUJd4rSZ3y8Ql1pCoPbadZA-yUD$TdrPnGuei~ToEyZe%z(2beB#VWA5>lp}FS9A0vMJ__-Nv!2?8dQ^t8sws`G;x!!oiSx zbakwKhko0KUu7BB^-2tTdGfMkC0}-Mb0phS7Wt2|NXzex{}8-e9qwb7s@Cly2i)JH znf=xn?Y}9E|8anoq&p57rX};$CL^nY2g~hc($6lkSsa~$QrhMM*wSQK{H_rXP9U=+ zNd?F@mmG?Aqktl>q4H_gyiFtH(RJW{ysy;VM!Q&ywwhU%*p03>^xi0MlpsXV5ob8K zA(C2bEh+}S_)0N&0PY_Z&2~TPPU`eOGad$#F1BYDYlYRM5s^M#WvyE-hmmS~In;+x zNI1t15y7AKVkbEmsx+>2RL-zhFw2KcT;`Mj!@Y26z5_%pz^=MkMi0Tf!qf>vR5T4Em?L^e1oKG#0o@}fn8VVU$9Ui{ z9RIlx47%5d^c|L@hfH>Oa@{T}m4dM~|FBEHIP%!0d%NKPtYQ273(o zE{c?GWT0?L+aMvrV^#2h(kwI{7Ye4yc!9=t!=s?J02+5%m=lfd72_$^j9N2+W(-Ab zvB_J4^sQ(LB-VL(l-5-|wN$OttbF`pAD*l86!Jqzl?6g4 zB?hUmI8iNVv7QZ1>QEU1$$7(jykW*Wd#w2{%80lf!*m`d6MO}R&Htm2{rqI~ntsKC zm#zu|#1`wv8*Y7)(fDgM0rlaE?Z10wuvwMfc^(4~@D_(FYLc@!2w=sX=Iok6cI&wr z>-&)XJ!m5@Lsbzr|T%ah2Ay5W|)jGLL5TFG++u*^!Du z3I%^Le)5!hXA|-i$Mi@qIH&Js$>;_%-L9+SGiUYN4)Pha_=w5Sc_0)>6&9d2XJ>7a z+pGE!*B=I!m&ZG*lpeV#d2A!cdpwfuq$}$vm&b{gZ5J&S^7viVc5-p@*aL(HVP>4M zB4##((hTM~xEf0w63^l2n}U(q4T5{pYJ>uk$Hm*?A6_2MZo`n+opk2cNMx8)@W?Hbl&eE1VweJQ!vfV7yrSS(yi-7}B(ZV@l4Sn|O_XD<&`|YXe zL0LNos_B}&jD-HzD57ChJ9>3Lgqh9o+yaz+Gkiq5hwVlYh~R7~`{`g6%pS=Os@Xhw zVX1kV*db>@YhKp;akw4PFcxhHw$p(Cc1&39_|;MUw3Ae22!y6kdaG8(O{i9THXI@r zdd%Vp>V9XfUDrqnC0I%K=T6VoLugzVpmU1fY-)Z)@PnFcGf{l1qI8`g9c? z0EQ2&ra{D=+QaSP%(fk=2T0ARfRfz{oLdbSA4sjL8+l{<2=e&G_Hp#^vuVsUuAas? zV>xf^IHxE-o5sv(xln12CX5}CWFC`+kGGiB4(-)d!Kx#owsI;o_$)54osU9geb)6X z8ia6sN~(uL>2p#lcCgz(*SS&p94S$!6hjM1yXg^7jb}saxmGON0i}@$tAckmgwjQI z|0N_quwJB<6^msz@JdRU)bC{21%$8Zy;7c{|O6DHIQ z>$36S(z6}~AO>lV^7h`vNs>jjvQm#{u|L>ezUl?nbCbI`XM)YrjOd)}?QHZS$l(rx z9G!(AhlCg)$RiyDnc<_30Dl?;nW2@_4nZF2L6Bq53PEzq-!CuZyejzX*$5Il1c?g- zu^S#itOa;pr-eBY#9lE3v1SN0bs-3C{4x>bk*7ltmj42(h9Wh&dllKcT{DualpTxc z3(9)QW$`h7S{?3a;Z7dOIAdI^Wq-Hhd?kM_enh4B&>xPsQqvo}4^&*mi}O=zo%6)` zdy2n$>5GUB-21KcH*AlNbDQ$YkoPF!)s_Qd$Xh1M<-?;rg-2YeWE@TlmZS@^%Iiri zzyV6TmP@yQs=kc(Tq|8JV{kk=+tQok(NnEF?*RTJKGj0AmgvRpeO7W8Kt84Kzwzqm zjJ|{3Pw4xaz3F1o)btTZozgYo=y~ctwDDk?+f+8r(jsU$D$+v~FJ-2vbg$7S*dbIq zZ50|#7Mp{$o-CDvvff^*^sMCx?iOl#iCi@k;DneTbw1VjJ^73llZ*_`o|T**k>_S2 zWh4xGRGWp&H}q6Xt{qxc=x9W4U>i6Cg|pc7WWq>YQB-Bb3>y1JRKsC_>wCHHbyv%L zb{FkkGZAI3^OPd9>U$3UbDkCTtoe^|YEGK&5loUm2P9^YPKUI@Fq(t8P96=fFz6|zWOw$qXNa}iD6N-=Hg(fTaWWQx&(^R=y9taRHs zv@j>5|#z{$@lSrQ2OCOWAT>N^f z;o_$}bIP9at_>7BZV{}xB;GOLcNScf-c7co9k&iEq&>s9gY5IuF+wn`Cn*2?teWwo zzzxqyLyc@eCk^EVq%S5YV8f}&sgZ|?sh54d#PXQ?xgt;z|2bGZQ5o+i`WL%G2mz$I8*D`A_%B>O1D-e6kVyV$OJXO}G( z#h-~EvV2w~kthQ#_}N+zpFN*sF%8GRFhAVB=Cc7Dp0=6j5(ir<{j)v=f3n*9@uU6GOwdm+o+`DKpt z6V#I3jjzoGNS$`diPw7LVkT57`;oull%2NpoHuK1Bx? z=kc)~ptk3S>zVq^5fv2uH|_Y7OY!1VO&f&7l>9>yC8+f%DB_#086%jwSR7exg~nz# z$Z!1`O1*c?$170L`1^UO6kDuU_<3eoU)C9R@fG=Ep|NCk9?TKd$cR41m=)rx-X zo@GIo-2qcns$@g5m`mCqx*~Q+TFX@lUm%L+L&;)iP!dCF*#3>>>UJFc1SEvua#!gn zV`w*pluR&z>c}HR{4}#=w@z+OmfeBtiNwaNdFuoy%s+8j>6GGy)}f=qWeh;A3O&kM z^EMju4=Yo>c$gwNj7|@0xveo^Ehg^H3%Je>3b1x-Is2xpIUn==qu2SeQ;EV)*CmQ% z*k%LURfTj_WXr{#MpQ28oM;3u>sb-VsUZ7rL86_U%yY?xU`orq6&*+({Iku8_~%CD z+gg?{evwwU#01Q!b#ZpO)(Jp)bXm^gqeHorc*UqESRV?gOoGZ?emNvfn-$7=r<5_6 z+4A;cT-`i2nG#OFw%tx}pb2pv29_z{^>MVt;7Ac5D;(2e6f+j^JjCA=wplo(N!LA0 z{Zp|_MU)Qnd%;xMz1eCXXqhA> z^INyAi=)h^&9b?CTW9p=ZtAm|hnVAb?&RTlG-iGCP60i!1HW{eE3DB4j}_17`#DCx z9A@0%i#saMDsZgT&vu&Eo3?XDHPgU*NJ||@bp>zF#Rrbbv@u4cF~f*bf)+3$7l${H zK5gLG-o|t+-d-UTy)I`pvzswilZw?S5^Xx1tq9HZT8~d3LYAch+9XTCaS(GU<5Y%A zN9z+t7hz>f>PRqlNLdl4X%)bGGbVUj5sPKGdE09wx5OZp8<+DzX}-WmbhHFk1?g+c zL_Cug3jlKN1@c0~td?7!%OwT)K1?@ym5*1^an37NC9BoQ%aT`$vdHq7XurfMi}0c~ zMBr_>&PGB-IZqeV?-S|EXs^x7kfNoZ*=8Nu%Y#BXV6&StyK+6UNBF4?C8gw$G6NEt zQ8$l>vXs*Yo$C>dESVET33)zRR zyPSyWosBBAR1oohVZs+j#MZ3b9?~WXGPLZEA{*?&*&m&^Az}dm7C<9lGZ-jkp{tDx zWq%ZH=)FHd-RsjNT)WLV zq*_25c#VW{?w@wlUy;=(2P#21WM`K4XKtKK{F9H7G|0XNHa*^bP5ExCY_DxHxLm+J zGMzYiglo1dnqWg5Tc+eBDQVkL-oQ~BxGb6pubkq)cq>n^vSjggo?v$tU2u%*MfZUy zC}mqgIX}p^QEKM0XnR!jq3tte@wbwY9Tr)it{f9C{&(=4TV3x=$sOnS$<=_hCpci|HX<(FhNlB#03N}Q1x?dF}lC)|LvqJyv zG()`$9b{oGuz5~3EkmnN6hFC8-LC0u_jJ%ww=1>JU&mtehag5NTdO~86aFmYas?|? z@W-7NY@-gvXaIJNQ!(zAhE>w($6MU0UB#vPYE3iK))1>Tsdd*GOblT=tI=tMsRyQZ z0Skt*O?yqX1Z)HWwolda3Pdv{Uo2Tz25d=6Wyiit2TdE=tYrl<;mw@Ug*Az?{`P8H zHC+Lmj;k?S@N^Q|zBD*3R27Q^Q~h%-J#tEz<@*>m}!QJBEv z++fmrYAlW7zrdt2lg6Gin*-peo-bWT@h`OXhQj!ARA4Hog4fO5;oD6~HQzeU@zr?h zO1HS*H*=x!D^fN=vJ;7}RM|>NwNumP{8ZIe4&ewLpSie> zsWD(++zNoLtiV5fcH=sg0I+GY-JhPGpa3jSZG)t%s#3sTHty;$a*ko!7Nrd%rQ(d_ zIp%8XE@V#{oaqujS#Hyv=8f>gy5W!!T}PR>?gQT$u3F23{&y1N>^_^0tEvb%t@|f! zKuSy!AMx`(6&bEQ%^;Z3$w&jRlzNrL&d%QdeoFYqj{!L)%ljD>)H+9F1#)CdIkOz_ zB#l=`XZ{x{c!t7`_iOLU1_Tm2>5Q3)H6IGesiHEH|OP);xSESWReZ{s_+ zzJMdQU@f%x+0Zq)kv+O&+~s9EPQH|tRhF?L#84G%Age0arR>MSM6*?Uv%2H2%-;5)`)ot!mSQKjrehj1W9m(D*37nYk=}7nIjyyR-1_2@_etwa8%S@4waF4rk%skJg04E}-E$_~P9OncWRMa5J&!?|u`(PWnV5r1f z7xN>ew$JunNO1T@SDHL%bAt_>?IsN)0IHI~R?)uMrkn+yke09(#%Sa2KERUg<-T(L z6=}oJtsyy!+G9L({pFgWNpqE(uG-eosytc1HGlK^*>8HGf!dY7z@R`OO@m2n;^a69 zcaEjW_d>W%J@rr|9pP}%Id(rG(Y z@r!!==EZun!L+I0RN0&~Uu%QW6WwhI<2g>isyZ>iqM034sMCBTRyCylL` zQUhN(;F&bV>m2iNM97nHRGN3XUnTyr!r82AeHL2UP++HhJnL{8mLg_o2qk=vg>lk9 z4HZq3Too*0plXf^8u7<@Lp8%LpaTQQVN-zxe@6V_D#t%q4bB-Xt!|EoJk%g_WY24% zE25kaj$!5uQAcMq`N5rnpi$>!&~$8X%!(}mG^ko)5zFeP8R-8l&`G*^3UCSLltkqCeF>Pz6|HTTq~Z_C8fN z6A!KR^3d75u;#hw^%kDBv<>Qzd6WW?HZf9FZPmWEoEM=~!$tEqIfcOnh-u*76tCi0 zr+_=|#n8k?0SoJSOV0}=UngQjD@r$~gW{o$q@QYIFk1T}v)dK(q?&BbP*D%IFfLf< zU0h6tK?t|;nG5ECb{q0no4kw{#M^++iqce=s@>VvnLjrM_IyN27;HssTKrkcx{8;v z^Q)RLI5z3q@7r~?ZK_s%%1i>!osV^KXWuXlnwBsAFFld(PpCm^(45wl)7nny>O=tk zupA{%^6|3hXvnVLYPR3>N;%3928Hr}AnFjutixw6i}pv@Ov2L3l3IeHL-_H|WusQc^c>>EeAe!V>V+mAWG7VeCeL(htv6*}Rw=4P|ryf8NdF2>EeUqHF+TomFRU)^EP#N{k{+*kd z+yD7-vv*g!OM^P=%W*4w4zdQhsN$wfOzQHQ8lY;(NUwWsqly(~$bU!YkUFBgI2Fnk zM-%-})r$D*YT_ERD5YW^esAr>Cp?{0#H$OTPZ6p;xzLBhkG3%)(OVGp75UJRVy^L( zmX(2$ATbJQ<2*VvO0jeRv%GSx^OOR1x$@EE!7}z}C0U!ILr|liAg@mZWDE z8#`8y(S}*SRO-2kR~_rPju+fngi>95Ufwj<%h%fpf@Y}5P&FeK~5?!3o=hRO-s$4?<7DmdtzJd19=J|_=va`u0TWFKgj~5d26p*U(**9@w~aaTf|p zSqaVgx|H3ZMCmY;D$*V~PE>3<&emPCA^1PJhgpq;x1VJV4Oz^_sO$UTfNV#axn zVW8HSnnQRwxrdYAakG5n#3eOy=DwRFp=|PKn>GPoQ4V!yv z*+mdH7S3oLE)Y_Nmb0u5`y@FUj@;og6b4tp%j^>$yD!cb#j6)f@iaE%6g}6yN z#j=ZzIJVn@!KU%4R5pE$kuGbMh&2|$8V`FGWq4SmG%y5TDjS1IWhp7-Yo{nWSnI47 zKNJnh4Nvx_28E`jL1`Z21k$*yAqP%)sF-|Fl+yH1VWZa(jWVq$C2A42Is+mAImTJh zD*ntF9Ewm-_EX^rVjN-s5T+_+vj?GU0SV6dGUN1B+1o$ujzTTW&ol4Fs+^a7hf7h4 zGR@xm@I7Y!+gt0hsn5E%TFy8v#XlHrbttXdFCgO{#}<(qrzfK*v<<^6asXyC8{mwLJR=|Mho!2stO)fYpq$_q~ZY*Zg?3`Po4Xc^)D2KXqizjr?&> z*OF9kMUr@h@Kj9xus`aS=ScIX;^0y$5srgG(J8Uy?oe<}R!LRoMgg zy|n|cS7tx_x`UU^>6O{8N4vhhD*NnP+}A6MDH;T&ros@JOZ?^~azqr7GGcTC#OS&s zW!*4*dG^jPxfb7A#h-8a6AJi2Q-G@!e{dL|sp!X6CO%0ubddyWp=0Yk%_##F1XcB8 zol_1WT#;6IDX=3E@DJz|PVTD&^(IGrj^x}qZfu&;_2MTwryT0@bm&fg!jdwVR2Ll@Vp}2Ml1@XH1Ip;I)S*MF zfx3$IWSc;Ta*G*@=)jB_(?2OsfV#SC(Vd$+p;~u{c@u9iq5#jFLJ%ki$bS{!1{v1a zqVT%{4jf`mY``C^YK&~95_A%I^x$nN%!mk)7^f@4U9oz*0+|PA7u%DaJ35hrP|R)2 zZwfG5o1y9+mNsM8Czy+^fpJM!>=>%Cg8@i!E6W|D7;EmE`_nIWk=~l@Q=U+M#ZrEE zj`%O0GirAKnxwq3z*X7z{5lMZqdXR5r@!L(=Z`C4O~po!5)^Yf0L*;K^1Z~ItVB7A zDps}JwQ|}JtL!xCko!$a-Py-laj z>IEh$b}n_tbxjeS8PzJ3J==g&yj@IBhDTYY_UKtX+8Mw6Jr(3-Fv?j!IW#)Ft}F}5 zM8sYCwj#Pg#!op@DM%R&WsB2(jzKwm&65Xm6L3Z(Wx4Hn2O{Zm49-* zjHu3weA?L7xJj`4e3;OV#u>P+3^_{S#VKba77WrjamU18aPN@4_DIP4(RnY6F=0dH zN>4lcq}Q*07KK%zKS5NPoJb7h8O8Pik-{q=cw}~-lJih^VQef^IbO=3wr`zQVQ)+_ zJ7$kp?YJUl7-h>Rhy{bA*k*PXUB$nT$5X7-xTx^G5&tV%qlHkd8w&GN#S&9ZaZ}A7 z(xDIe8_{fE$7VB)D5<^ac@<>Y6% zwQ0klLF;`Xog0>Lux~+N45ZEx^|XLh8R>MsKptRF4QdKWvlc3;CHQm_-6B58Gkh!C zk%}O;8;mjrxEu3An@|~V>pdfpNsUE}!bplwY72B^q=v~RD95w!f4)m$X=mrU4Y4Hq zcK6qpXOI7-d-zkKGbLancBZ0tas(07n*G6tv1n>t)(@u7C)`#%$at2i^lm-8xa(ml zTtWQL1G%l0KhG3VqqC3e`ImG*kL(HJv*$fO%=5z&*n}UCVxHq(+6-T{}3cUc0Z1ejhMPY3>!+H@)V>q@(L z#ZHo`Df`=b%{WZ)5XbpSth-YUi7VULWC(KE@I;ns@}TYG${o(6v8`D8yicF^s7#4NTIo+kwaia+ewr!rr zFl=Y=jM5e?937D3*enu8Omdy}h!unM17>Jx(+`sYr+o}mGdyC09Vdu}=UOH^-P5cT zRDz@;7VZ|lHFJs30VI_S*u~lO<=h}09-K)`^3y)1b@SWGyZ_V(DXUrc)Wn4EPG69v+NrN&4V3i9+acL3z~Yi8%Tr=&j%9y+h_m{J@?bI zfC4bE?F~3OIjbQ|V`j4QNyGD2K8tq17A@ofwnfU!$5()v-?dC&&p68?^DE(3zJL`V zx{lqd=Jq}ol_mpJJ*Y9RI!mE+xg5MwBXfJ-QvDdW?xCbI6McegY<(;%(7XYTCdzhf zo~foa8U4T_nOIRWC7H#ru*N1Wi-kIFSH6}Yf@BXrd04T~Vp2?7Axe}PWRC``kD(u)Sp?dq1r^+>Xqki&Q1g`9at zaqPwG5A<+Gb0DXL^2F*Ym)E=l|_I&uGG6682Ha``g{V?6|l&TcCUd+wfM(nSOF-|m4}?llKcp{N)n7UbTuXZs&Ub@e%o)qVY<5CsIlQu zNuBMbGZ{Mm$6=P!GEgm-z6vbB^b?T6?Q%Ur<+qsyj0r_GHeOXOhL}Vp{tjKO4#!*Mmo<61qqUF!0MID=K9xX4}L zR6-(sncqat9!=I@qGAr%qFuAGNd|b3T=tF4dXpbgfK|p191afpsrVlc-_ZFDF~Cb& z4RFR%AWxc)v)uZGY?+Stz{w{0UHeqt16#w6km7^#j%)EuKQ4j7PnNr_EIpNWDA>{E}@S%3CXF50kS8ZJiUK^K}bmH%&o?6B9+ z?fbDHd(UUw?>~y%DQahkgmfoBF{#8S98n{u3c1cp+L&ov`R4nYaj zO1Hb?U0EB6CCB~^I*uw7Naq;alI==bR~n}wIDn6-k56b1AV653mM!sU4sRuC0tyIN zXzwb9RMw_XX+!7ON`kv@3yzcP_!*hRUzm}cpxSxII^1Ba3NGS>lTCKSmj86t1Rj|%0Pi`xuf zNe8&K_BOgx>l?7NhGZzn;I`D^wu*NOo(*jTs2JMh$3#+c(}V+TN2&=IJ0z)0aHpZ= z5FW$5d2=*v+MBsDjCdL5%qWTu4H2(th=~jRNU@CnneXD#=e;n77RU<;8C4B>Lv6Tqq0Ao?R>bQ^jbM1=p~&HaoZWJB)jE zxXG%=2Ck|y0BmsL7Dbu{>}{$9+{7!;F@N!N@|V3|@1iGVf2bUtf02xhhUKZddC|zt z@dGxsyJkH$KWSVRZGK~WzGeapx9}n;g~nrpWy8}}ePqZ}y2jbP|5my^ng5^n0s)Rf zx>NQmN?+1?%@UZGJj_D5Z9Ho#QNuD#8kXY*)-g_WqAuWShJq799tfg(Hlz->ttjd| z31ZUrZJVlHcXLvYER(v4LVg1v7#xjkS~_q1NyM%yuQY78SVQu?8UJ@79sqD##nhiR zG4U7Eg}(*-4&t0Q02(>ibJU&ih{5{@LJvVKlvxjsbLcKxH|%Zk+nmm61b+#uZ#4$p z;+vk|uq^+E>?Ah?xArLbMFx-UIcsF6{18e!r)~EPL>AaXZ)9eFwovdq-J@f&@Q%r7 z(&KCZkT5nNv$jtNRA;PD0j&0r>-@<})Xq~q?b!U!+M`d{U%l3p0m`H2WT}9NK|CrC1>CX2QKcGy!Qia)B-1HMX%{5 zWvIcPrm~sUd9bl5&BrP}bQo9O?G^lhUa6fb{8>8@-_rmZ1cf(x<;r;mCEA_#SduW2 zPAvT-ivRc$4S}*A#**@=Rw=7&2yWyVELu3J0Rg67k~`-vZu9GHZk%LZ-^?ecbKbyD zcuM%Iuv|&fN9HXZV9)pVQ1AgDvrl}0d5ha3>AzNU_MQ*e;8F}?&wovQYxcLA16=3LYtFM9^XIpk z72ckHi@wU$`!2ms1dW7RBN(7o$fNk{zLXnpRV?EQkZ34&J-LOqZb7aZmyHoxv*hI zmcb0C63TEi)+zp*o&G!WYAf+uc}}$q%n>0e=X`f?m7(}AY@c7^e*>JwZ=>0+2PyCK zzNTJghqt^KuzlVdOC<^9VKG*n^LV@@c6$xCQQWnck3VRHe{M~zwI=isM{;;i81_zr zq@1C`7e=)vaf(BauSHSy#e0~#rl*tHCtotbo2L(yjlMQ zf%&WWoyTChW=%i#04)D{-shf}z3j1)*$l?=Wy#SAevrHqnz)%KNRHsI3=Q<49Zxvrs}e@NRN}R$~U7o^(#N%cm}cTcqnf4 zp$hv$CK7%~Cfb1zn7C-jP{|uWQ%n-7T;aso%?EDnQ3!UQ`@Bn^_qfjuKJK!Q*c@!g zUspPi*um#vb^y=mr$2~uqT2Da1~jz~KUXL6iTjwp0YMLYCBFo(1ULX-m`f<5Ly(B~Wo`s{(g$5}v#30}nV#U=^5I@$R)K!6tyE{6} z^s?*=Cmb{V*C?kHA^}op9ihlih>}c>rPQ@?5i9N-@l^CrK9iy9{H&(0#vdkkw;e!5 zD2?N3-FfB|HbOF3*obr{i+OnWZ(RW+ILS~N=%E#)xuG;`$;C5aJ_&D<`K!+8Q~i_v zHHN$E!`zZz#F&Ee#-uE>1fu}biae&WhY1Lh5K0G08bHTH#ZaPOCH%$5C|{X%VV@~s zqj?fN%Hu-7Q=?@;w1rX%4ThpOJ76C@aiIXHh@GiZ2%oX)6RJ7T(I;^XwG>FfQI-W9 z%+ypxB@(|k`=#0Ee$pk{R%HKl#(nih$g=Fu|M6asF?*1Uc3+!7T%EmR+HKNa59E!9 z1*?KJQbVICV5+QSqCX@yT{~g9BEy=EeX;{wTN}v7&=XT(8cE$oiD!Cb4*^=ozmqMXK1sIdwf$@_Pnw(g{LVpf*(T1ODQZa5;#6|` z9hQ47sJ7yFpu8BQ;~i3CsF4tByMcWI`k;+gROa%Oo)pbkaRnEGCgeeZItd!WJlho9 ztqS!KRIa3#HK~-oysVj|)#YU8@th151q$Pj%Ya6ZsB>I9txf>C=v!kmr;4k+))EQO z_a6GTL~>_Ew>mpHTAdKIFqcI0-C>)n$mEb31Bnl4bggxmKbo!^bdp)EJZE6)1EN}Q^A6L4{i@KL97tYL%_&+fDpvnVf)g+P}Hb?N$x%aL=NQji2Tlm&b#w{R>`(wIaO=HT`T*a<`5 zM1`hEx(97dssU{kRZ&swji8cUhKeSwLcSgZFPPOPdu{C}oH=D@d7I}c{ z*^vBOggF>P+OQ}(0S{NMpk(p8JUPL(Ow4+j)zZtwd*8!(D_=V$v@FtHJl@I|2HtC< z=EA76%TU@Ae7+ib;8LS8h^2(fSC_4|f`O(f$-cl@_xM6YGm6xz;P+W5x1f-KkTV~M z>G?CD0cvI^Ye1Ik>6$DsVVYzOZLT23z5t{}pn#aUT7TD#& z`A#gykm03LZQ3CRo?i^W8OC!0_(Q*9fNS*63*ZmE-~i`9l5#fxntmfMM+VzOtylNM zBIFYk6A&3?VfclR6ANA$REZScGQ;`q@_U^a(b}9iHE);pjN$j*Y^T z?x?OBzt`mpysjS&Bum6w>KP}s2rIH9-`EAe_UMJ4#jnLs zHNjQES0Nj9t;|02$k>?k0oJo)MrsZYREA@^$I48OoY@SMhNI{3o5CWN;+Ri#pnbd|GtK}D)?y=fsQ{w3Ue|a6`502 z3YI^mmf^|#ohszacF7rOIa>*M8~fW<$Q!YB$(w^*d1=37jA!SFD}by9VdwnS*(@LH zVuA-}NgbCTYLdb;d~64+=lWUZ;mdKI^1bC-*L5mYa7QM`c643NJ+d{9WP30I z<{acib_AJV5ecvpK?D(r5(W@h1_di1gF-|YK!5-x3`7J$1iruj+PnMfZ)Rj5H&jyB zg6XfjckjLS+H0@1_F8MN?e{GSG;IEPMVP*sU?{VjTocr-XJjn- zMzIHeMYipMt_NxMMMZFRO03LgzULmiC`d7x-0D=zAbAfP?E_im&ECO0Nj}rtTwav1 zhznN;Y&s*%^6aDA9C(j}#pV_t$K{khCaKh$Q0!gU!bXBP!4Z&rX$fO!0csI*8$!tg zqqMORGruXx;&njaG+40eQhRX$g$!o$#(ut>9l7>TKH_c?(+t4S3=Q9Fx;o0Qv(v~s zVvxbk@xurd#q-zHDv~(KE|Wnfa2h3}q$Nh_4uW%CswLm-@uY3NFqX<&3-2Oqm4urt zdRmIyA}9hwBoJtCepQh-{AE>P@nSwt)NatzRgdesI*FatIJao4081E|G{dHhWwQrU zml1CskKB55f-I{p1e)&h0m#V6Am6Rwo0iXd^D3Fw z2BV;Xqc^V9>a{9v+=k4GtQd$Jz6J)X%wb`2YDp`Wc7tKj=H3mw!N|>+dE@5hoq%|=HbTF!E~4QIlrY$~3!4JKc~|7ds(*kW?CaZ1OE zd>92=*hKw^H4zEk+Zy(&I-ADgpxsb@21&p=0r3_9Dm%JoWk$}b+5O8xA%U$b*eN$> zPaHxu<>Oj5c)b>Su5Pz3t;=*8JKmK{4z=tcIcXtJhmef6@WI^gvP0Xd@kX@Et*E^~ z39{g>w@{fJW@bv|FN&a+mbD?Jtc#r6j32`CtDsl4xcHlzerG_tLRsZyK3U zhhj-&`j5P>6en-QsN!&f;oKlZs7Ef4(sk+ao{{*Wv~eFkEUY7!1b4LKYwrepR7iv2 zb#lF_)tPdu=+HekQv`c!^0ge4e)H&hR6Ww;h!z|Q8(bhqQ-)<WEqo7%E)6~wT^W2s&w%zYyV6dFBUq3@H1w~SDOqll zzXXMs1jop-1uAupyl`Dg1j0j*JWZ*s$8;YL8eOypz5^fET$L;#?{QV?K8*a})Kmge%dN zVF=oT@6Zg`D$${X$Cc;3gfYz-sd9_F?QtS;TT4H~`|!LtUd0ru$8bVQH(#U4>XEXU z=ADuDtnnyL5JE&+HtFLj{B!<640UB<~o? zw#pSun)AL^OZvk3ro2=-(7j!?w0<&qAZ{AhF>R5q)?H4k_Zj#I8??GCI9pqf>TJUA zmyDEI*e0#Pf*vNWbW%{9v@Z@&5@JlmB%Y2|;kA&y4&T=g*abi>IOx4kw%uyE6BV@1<e1zSfI0eh1|q=6XObRK^G?slBMCRzOe(>fjZ6GxhtVSbQu2=iJH$xyUQJ*z;QfS3;cZ9wf`x~ArFb!A?i1GX zqB|vtcbSbd&UW&Fiay_~Rv`?S$!-;xWO~&Oo8>`VA+_!2Ym7{+< zlY3kSTKB~cthcGKu!GXYh;l#9y>Psx?Z-Bo;7Mn^CTi`L_~e`&f4!Iw{Ajeq=u~-L z*?o4ntRD|1&6tAY9U$WZc?VWm0Jp#6{6*aPNwc$Fo-{1m>DffxO`09`HVhdWMsZOz zLS7}}98$myp~T)v7Cb+!{3lJwoRy*>LSs8*oqj1;$O! z`1?a##oAau8b3XttLy206**^p+-HCH;|_Ej^?bc-LZO#sj(SdMv_$NAMPsCULB~Nj zw$OrDTen)$5Er))?b&v$OrX;KE+)!_MlTJ=ySBzzRp4zjtUy4XqVtBT_JreOj`=ob4u8T z_;d%Z8Nqedc0m)>YF+#M0FTZYkf-@8G4xl7W9VZ&W!jXuK=*0j9o6h>w8;Z!w7KXy zEhmu3G7I{Hx{%Hys8HmTZSQf6Xd86uZ8(m=cl8)YIolj<6MsJ(?S@YPcQ%6(Ab;S8G+gQTl1xo7OkA`?KZpg!R!U6~eHSYvK~xy|0FD z8dCo#ukI2=L832#f1067ObOK8v<>8$rg2^y7P#uE<_5--w;E=mFOnw!(?TK?A{GTB z=^LCGT3VD8Ne+ECY~u44D6nG3{uc}P{lqV&b-z#miW1b~u5|%xjzPBu8uYNyt__(A zXnD~KJg4x^>={Z2)#9L<5UY@`1+(qE&-J%CcN*r_ii%nF!_fe zFf8POEvsV>X}}>FR(l#6_&C$vEg!eE+Vla(?M-To zX#Rlpq$){A`X5Tu2WhiD?yd{mecIl(w>?0UfRqlra>xQm+5^7V11>>^IU9spT6L&h ztd`jy5=*~eaQd_89-M&S)#C5&FbwO1%~Z})P7%vqrjy86Wo-f;E@DlLC|?;SiHv&Z z5MeUBR4qK&q4Ysy;~z=Xt6TZR*EmoR9-N`!;GG*}G1qt95Phl+wNY@Uo2eWd z>^t9DkZ2`c3ao<)K{X>4{-sxTAZ<1EUM}_1!%gyU*>j2ERD7Xa`kbcAt+sV z(-i?kUriT^eYkZn6n6)jISYMQESE|P;DuVo-t*LIYax~)ZfmB_8$a+u@Ss*W2gUpI zMa6X_);5AzL=%0g2o*0o)DaK4{5;2}g{%_&L^&Dru60vRLSW-O z12bY6Ea%xXgW0M#tx>=0oGRK50J)-q*pbK_rxza59qP~oo_*`R6*a-X)4(3*FJ7?Z zlcr4VninOzRfDu1RSE$6bfqslLCqfh_L`51ov~VX+uupN^A065v+FIR#)LFDC~Rj-UX%53Gt^k{p5x?V{a z6kYF#3;!vTC>zS-j8w_R*a52yR z?9u{4_jE=rAl`|6QJaxL(eVzwTgM$V)+`#K$i)d%lLgRyDyk8N7Pw6&WW>1#{5T=9 zP(nm#mQJk$PpYWaH_|@+cF4sgFUnylWrM*1FDXK4lxQ2o*DG5K3^wjPLurBkw> zlBgKH#vV{h_`tgvz?zoNZ1d2*6HOz}tab^;-7+U+yX-Ha*p=iN_yn6Eh>qbSFBn*- z!5dkjXcGE2) zbj-Hp(sCtD(-wKV*#kr9^^0ZhXvwYOEr;~Kh94zeB3EnMyeICBR-$IrL=<5$PX99CG7Z~@w&CHiKgUkl1%~yICLw` zSBDM29-)>2>4%lV2%$B+cqNEI^`Kt%p#9NM+r^;D9b~LwiW0(dUZ%A-9ZdGJqF5VG zUBEBEcM^?EKhKjztEPW5n0vKM61mY`mOcRpPzcN5Hnzs_D^`G5+S=}7#L#C36(ja@ z&l2)p#r+|F|HEYodGEh_mXHHavH$p&voS780`@-bKekOIV=RcYJI+pD#xbcaa^kDZ z3#A6Z&8;x8HWmt2<04`v=5l^fXfYR_U(UA*D^a$-V#nHT=1D|dxD{z``_(u%SRtG@ z@R6MmuX6_Vlb9ogCDm+qwUQ7f-@+B3_$KXDyP@5+Rn34=dXAc6QTQtlalTXSbp()@ zFxJ;9+gFM#eG4vGDP_+@)c@^%~Loegtf1l~J}l=FZk7 z@3-3=8{sDJBvVDqv;HCe6&`3CF_|B6^Wp$LN9bL)e;0G9FMEeBNjAYHdBE>^U6lRF zpSW<#<6#s!@4sePtQustR&=El9Bvc=K-XdNk`6+tb~B5OKKr4K8<<^I22-fUK2}&t ztl8l*>a2}P`CtOuMeQO#0L}+!T1p%88MyftXM`#tk+i*cJ#}z5%w2^8x)tYWtz5=) zO(pHNW z3`bBj771Mssr-sL``O=DnRB|-71_4WvfVhzzVjSmvT!qt#622hhkRObA9}kq8{h6u zoAM8qWRLtl4<5a!`8%;ozoRTkb;hPnm$O4`4Di%aSAq%D1C7!u-eUP885>V+1R-?R zNRBgZh!YjF;L0_WJU}Jt7J&%%cv#%Sei7Bo7F)FHV-aCsb&?q|KHx<8qd9KG$`Hu1 zQ^quyO%@P6)f8wn{umG4yxl-bI(xKR?^J3`>xFkPZ6SsEv|;B@9ORmmIeyz=eub9B+Nd-10?&K^_;m-)-C0lzeXs2c|`UowY*MymTnoR=G3W@R+EN2#C`QUlK&o-XgDl40`CwQ}M z&IV8gqHb4(tm&Y`g5*?_YB^EVa?ZZN+3aT($IaLioXzE04pYl%)p8oof0(>)B^Fxc zxl_Xcifxn1{o*)ypF3Yzm4)KObyM_RHdq+$Y+1$iKLH8?m3wgjrPU8-!7FUb zLo{;rL+Q$qaC|$CEN7yinsD3>tr0HBv%|KCH2Upy@UB59vx|)f4q}1^o(= zH?C@7vexG}`|^Ak+>ou_OcI8aFG$AG%TW3@!G=2XD%egh|J?W{f9hWw$59_JNKy+n z0sE|YZ=+3Yt@YDW+3rw1kyzp{kvjzqNQ_k!-diTIadx+I@R9585)ZN5x~VANoo~(6 zw1xIy{vLCB+PHBx>f$>Icct@^*k`!rdc!F_V;MUpmj_j!_Xt|9xVUq=L2iueC0H8^ zm9+U)?M1j$^lFeBTM6?PC5GO#3H#el>FgZFk1%420;;8)zn9a9KqN;#(RpHpl7k+P zR7AW>1nFpSl4)M6pe zVNb01wGSsBT(@Hht4?35jGc_5tzI zIeBCg@FeNyc_SGqE-6T$F;8+*kii8=;dv(_HcA_`R>Qa$7u<=z3ZNDgA}N?@Z0GP8 z1Po#%H!+;ZLZ{o*L3P;}3{U-p-Im%iE!EoE6>o@%tTwVbpoSQzDXtp0L%M?NgQ#@} z{kx-j7QH+=tSi+V(er}_++i5#o12P^x8&4@h&BtXKx+wY&@uCPMV)sJGo_Pn+D~pmbS|7#>xr(ZKULb%Q#AdQcExgtcI??S}1K~rYhC+rE5As-Q88HOAm6Z9P! zWk@Af3!{lgs-m>ISh+=ab+LQZB`GxqsD+er19YORt|_alEUQ=<@y$H4ir}hB#!88p z0XuS&5Cce+d-j{Hg*CKNH(Q+{n7JOCKoe6XjwqBzjd42{xk-93GJC5Vu#>W%+DnTB z)m>F=LOYXm!*!agM%^aj3G9V`$C`xvGgx9|d_r5*dxhWSwA-&ewxK-OcAD6)vO-LU`&jEDKEIgjY6JWt#&=bFC|%iJv8BDz1uhS5X|K|+)oE)>`=WH^gCkcz zJQCn_U8(1uTqIsTV6vYExsDM|2`nTSTc%V7g6yF z`-QB*W4ofYIvr}Y%+hwT85VKr!gN8Af)S@R4%}~NVkzWhOjn+t)BDvU!T*LyQ9m!X zpJ(-Rh5g*2nfN;>Z}(=A3jXII9Ehqcf5sPjHm(X+Q{w7}+d~hgL-#qEc`-HSPufE- zXAM9tULM`fvlZ6nZ|JRcYmX|(yTBt=dQi`(^nfZ^%C=-(sdOJ?CHl+8SpVxlm;KCp z)VFM!pu6{bd7^KR!J?q3Y=-xm+vmF8>&TCK@@Jj$N>6=QZNQo(_jiSZk)N&;9ti+q(nm;e#jNKYFzKj}E6nE-MC}OK zc{^64&`M39MUDBUt)s`Z*N>iRERIo^q}H^qlsu`c<7HT!Ue&IX&ee(#o1m)>Uvrx3 zaosvfJH^lJ81bW=eF^=%xhZwpsypKf<=@QY-?R~`r**^itiC?OcaEt_^g84-P53=` z2sF;~41vh9Lx;kJ%%GH&#LrRV=@1_*m6Vuw>{v2AU_C&ft++V`Ct=pZ{z!9X!<^p_SwNHw%j<4D3U|a(gX9CRY=vZ zF;0dGUovkQL|$5B9JEyjKB%(Fazq()KoDPG;$SJGU)sCqr>lf89hH5d*Ozg>C_|G6 zR^p%s4^{p+-iZ1`w%TGjB8R$mXe4g+tC|ggT2_d}Sub40Wzb{ba@XT)XzD88+dt|f zsUdPfy;WVQ!|`u{LLgD&fh&3QkDraM?b~(>v)@xO5Bzu zgObfp$7$nABZ$bk(w@4%*=|brVL2p{tI8^?_pLr_K=!mmhsFN*`It>hK}3Cqhtu!x z#=~^^{zP#jvS1pPw@#tfF;Z9>pSs^AW_6(%;iz-5p)e6NL|P5neQHa)H|^V!Hg5$& zYj*4d-OZjWTH^goapSs8kJiwmVc0yHh87RKP(+pCWU2rDWmY4%mv^WDs;FW!%?FqT zL^KS2=p#!{|8qYW_QT6y2Q?A?oD%`OlZX^G8bh<>)Rk3=wa6Nsrl*lH7g4?ABC5-l zjuCf3U=DrAe-)k!VnS?Ei8ZYUD?jNpfDf}0APGK>Auj7tJ!IJiM^q&r3p4m%bs7M& zv2Y9pQWxZ#kYnC(oz|K^=VK~AB3!c-UjBft(--274IH0?b5Nv)Yy979Y}Z{0*OhEI zi!krZ!ZiU|!gcI$9XniOJC+Gu&Z5ik#$9f|hWRB=1%Aelun$L=CRFGGn6`0`;6$(o zlds}rR0?l%DNo6^EwV4{H<wm%}$)4kq{L8B57hjYo zqMYdQ9!|+b`K{YH4<(g9j=J@)`KLW!q+unKNxc#F^!LWeV5M3cXqqe18v$=uj7^$- zFUY=uqw2l~&NVf@y5uDhc^%#GKN;5Mz%X#0#E{ zD|UFiXEgiD*XSszweu?2wk&fXzRS`RePKIj*g2E^qrQl`zOZOy#oU}_-G*3Awl6Jz zZMlcyb1J}^Et}dw1)K;8oXf>-_QXgogH4IM=v97yC{CU?5zTHUUKAY~(+zz&rF<3o zUGC85f6TzH>VOBK`HhfL%|B4f0podBx6bLiOAIOnZ-J?Hlnrkhu;V~cukOC-^e0Z-T7t+rtu?hB@^K<4vyqKL-_#lTtaw^rA9$}+ke2fkd7~NZT~`@SYq|# zvo=tk)D;j|1{o-mx;ju;HFbmX1A$T^<%N7IM=?>6U7+w>pzJDx@*{m)24z7X$D(*N zfk63c`6eZvmb-Prqito(PBH~R853D{eSxZz&X^D)ib zF=p;eHvs*d6vrF01#x<7jzxi9#{StkUqPYF+G+{&{+j~p-=F)9DE1T(S-KTB-?uEwp(2~EYB;3#hYFrS4J>-|SukbO%_e?MtmZ#-je}mm&j6)9o!9{Ayy>s3Y&F z7Jce>*bz@8x-qi_@reo{%B$Wb)ZjG8)^#A~eHn@Q=IHEcqCq^
    u9s5rw~cMesTJ z+ggkT)Z^#dC4oO!c9>-dg*Le5W!}pZVu&Z|quH)gNTQw&Bvz@z#I(vxQ|5zjp-iPy zhRts_UTh7sD+!qQwpgF!gEvqbylA8Qr-gJeh?QughEdbpHFa7|ou;Ytvn1;xmQblS zX~EF02)= zn6vkEqlEh0obvRc+St;YeQ4Jkx|n#0;B8m%KFU0Fx?54ed|sMR`&HF`ihWO2mB^tp zGY_f=0c`bT8itV_gf^JkXeb2Obq1tUeoE!1=4lz|8r{?Rh>9MeXlXiIA22W-*Sq6o zLWI^aIoK}Me9*>JXp9QJ@MU^gQlR>^nPL|67+2x(Qfq$lYY?d0$bYBG>?~T%*@Mos zD$hu>SeEA48Vj=qBSlsd>O=Y%6BPAp>S1k$%agcZznBlqE3aLfd|0~Y#M{Q?BhOi@ zjy5bWHd_PnomAx{6V?tTS9P6SZAdJ~eJs_Dy zj{Wpg+1X13GOeQ16rDwqD%vcS!=7yjJZIN@8rIxe7$Y}Ysijyrjj6k%{)y?pE>2fV z+QoOi+D)*=e=x_MS0(4E#Brt^c{*&?%XFzg>NxUxLdqLPCQHenPD=$J(?hQh=_ij0 z>H0_87?YAdBtfC{3Xm5%V@DtAP=enHk?)AsjuF4r(Q3v41#N@4hj%lx`ix!LmOG`w z@G2vT3k92aCNcYH3x-g*@L^$*ZcOP4m^js#d4@pwvP}O}WqP+?@5roCZm0)?I&bri zlkUrk7VC}iw*gaWx`^*U^ilF83mS}4`qP)~{^Q3K40;5qvpz>RCA;wUR@LN|5thEd z5!jSuqEby+h1ry(mz^uE1H>dauqAcCvm71tqyX=0LRm>7RI|VP5T`H|5aB5AK;%(W zjO2Iqv4co<6hV6gX`Qi+Bm4f3x>QXPaT(86nJJZ-qRdBCrXy;^d$E*SJqJ|=Q+)O} zKc-BFTEszY_Gvg&26KG&pob&J97aIAF%6DuF;N}6O~d(&)fz^7OtKkqvJxY<&-@9- zs$gu(jdu%~TYSvg-OvmU*L!G&lVYxR+7j>YblX}i_oZk)nW?q9h+>)+v9oe~FkF zqYEQ23%+3sW5H1#{kN#5j!us(#fEtgAf3j^QDbSHW7O-n09Mc5I`<=t<)yOzSOjo| z(Y#tVn&(3>qam1K2o61iYE)W~Sa#43xe{5`)%khbMvl|#a{GMbCdZic?J*V|vqiI` zkeOIU_)c@#f>Dhi5}cBoc*+? z5=EU5_6pA&qm&n1v5y#g%)%ZgkJX({XA-e;NpShK7N)}otUB6Qb~g>QtJP|SfA(Lc zQVIQkl}a4>|IpXmntDTgJ2hj-$^Ioki-$qkE1Fe+g|X3^c9NKhx1xigl1GTe5liFG zVux%C-t6!t1|{XAd-;e6!R+^>!NM}A8+OvERM8r7@@aFm8Tb_$^Y!KH`0_MgJ}pa4 zhavSx+2_wPLw22dP{F)J9LLuwr2~5?Fhjkor!zaOmOFWcVIljJxAzpU(PtP6k^vZ- zk2-)W%BlqTr#ptCNMgk|i9+1z#76f?-qu^ZP{`Y&sKiU&P3x;?=JF_XnpT~qraQvL zc&CMN2hE40Fw*gs9E$^qpb>>|B&Lka=TKMjd>=k09uoydyGWsV0{Q>}h@8_M?k&$5 z46jcYZI+Nt#%u!eR>2E55!VXSK*Y;EOG%mTZ8n?PUg^b??=RA4q}NHS3b;}gw5$jy zqO=LP<)?Pw0VdHgt_kGhIr|FY=Tj?8j$pI`wXc~HZX(UhCPHH=VP%qk#?+#jK)Cjm z;K7HPsQ47v37y^5xPO=FbNOigPWf?&iq9tI~x)h*r0#aC^#}+$lo_)wm@S*@8L-NUAtaN!j z6Y=}F`AAr9qo_r#kr()_R3f%lt_0|)r1NN2!{3_w%_RXcMArFoHG<_)w*0xR5(djV zD|zzNNw(;i?71NCaIRIL>@X4W8oYcl&ODsh{8>Jl?S9xa(N{19;eI~K+D&ELz1l=3 z_F|vm>(yD~c6Knhg2yOpOm%aXW-a4y@-@K8GzPXfsm&RdCf=i;k>nqTvG+y%jm6NFp}Zu7I+_* zTibDRXk~H~W~Jp3%=*%ByPaBM$il8o{1Fyn^qJ(M9J||#aG5~CBwNbp#UgGI`u>Z& zD_E;7lt1r_8>V?G63fLT4Tt-FNpSI9*s^jYz8>s|8@>+^gse0TF^J?425OQ)R_OS27#6S<+YulYJ{IpqMkfj3Z2(VQ$_ zzBxAdDK;5DnbV|f@#Ma`l~<@K*<>#5Q7B5+*h-lYE`29y=mfLYb^ z(Z{~mgK467dTa}?NhAmrlB*~R}iQCcf`Bo>mqn;o(iV9}V$^qrpx@*hWY*AO1No%Tl<3Q9`2npxpfo${H#E|hp6pD1 z)6fcA@1YV4aS^KU!Z^7jPS;~YObMV7m+cWA=P8`><}&uNfMNJSCHdyUv`6QJ_jX{0 zoXPlfML=?C<$<~kg^ecy7Q-FXwcNFbRa9nxTt?d`HwJclZ zL9QfUF$k_HLqLBRh=jyB)1(XnhK=psS%35DH;CfFT`&F?nwv{YQJSddcqFBXICq z(IN9j(Ey9d^H2addd_e}B46pZFWiI?3NAR;O!hLUcZz+$SPCx<4^x56{x4jE(JZ6& zI+bJ$Nb-a^MG!XE#?u4s1Z%GDh*&&HC}Xx`P(%PP>d7!Vp4Y1$`4!oQrS6=g8-F;> zNnndmXB?iG18I+T&(5eY_&%e8(Cum6GjH+hHM0X`(daSHAj5@(f3%r@t~JX~>I0u@ zoO+W}3xE320a;J8vn0v}?@9;wBj!mxHXtw=M&i4S#D#{qXKj2W@{;$0g;@WHJ3@vw z&u}XuSatF;i#pUz+qy1}9Kxi6=^KT2!0=$gkbs&vVDOrLy`-wNVt-8aZqKS+0_*eGpqaM)e$MEDx41Th^$DOWDQ!loGsf_*c6*qMauDD>@?kvRes; zXpS7@!Ft`-ZskY?cYn+Z)NjRX=pM}2q1x43aoILUKFSvpmHS4bTl$xCJH_o}0+j7)LtO(Fc3Jsw#Mr*Mir89zXyzBL@%@02(L)Jl?9*-2x?Gp0yW-cN1! z#MGL-750ouYvKLGf}rDDpZEyzz2P0Do-0mBKasXQjJimlni3;ZaWnAdo zdLaH30_SqBl89W$0`cG?W$C!4?F>2%Nen8&@4k^`K(Dwa;>NP52qto-*g11y_5+lD zQiy;MhIDLZ^|qUbcnAASzE2*nOT$~bPGnG(s^&H>RFfwn39}!n8p|%@zxxe+2e)!3 zIglAbJjVu5#vtbha z^wazoFN!hvzyXB`ei`c4IOtO#Ma3zgPV>&LD<`PZ=YB{4&c0V39ePi)9XWO{ggnwG zINYr&S4SOrb%cp9Zxb8W@|+oA#37Ok&K%hFd^5*A?wUDKY386m*rYK_a=?6&cm*-& zpPflVuPCPpjAxZj#Uy z2L!X-8khKdO1WyCNhR*2r)nxYl^prO$W{6UEd(Q1>KDG*VB|mP7yiXyrz@XRQ2uIi)jqfKMdP@mlq@!#FE z816!(ae|K*X>HCC#4Dmw9OFk~HIrqQTnDJn2nBKS*@a9kcGMBx22R114K=`=Qu;De zy(`IX9$y-ADu*;r-!By<`KS)Ps9gfnrFvdsx`0QD2|6Ld9<^MIR69f^tHu9Z+7g3? zI*kU=kYUlv{$#{i^<*a-haEkdI%G=EFG0|>BaNKO*i<)xa_FXhsayCE=TOTddd z^^F+yY%$*TKy^Bk$3n5ckWVkpPM>89(_1cTzOl0yk7Yg6Vi|b}N8zW_mizcosKv3= z;;VQmi9v9Ew2@z!9UlH3O7Q}e;);{0v}AHKFw_T{@1T_2+~~f~^^{6U%NrZyfhOVD zpl+G+wicBIUl3oogb6kkvMhZl@3)iRPLh>$L>MkX)aRBm>R76Imn{-C29=^4kXWFHAEZA?@R zhz3zcQwaXcdWK7kXWYH?y2?h8Xf=B_xQ@Rk0^0;v#q6e}*UXOqV$T{acNP{?Jv(=t zEjezB9$RpV8`UC{WH$a>>HS!7!>r9np#uj~AFxPACh#IsiEyu26hcrnrL`V_N!~cr zN|;m8CNf~c9quCSLH()L63W@0-H3lkM@p*>ZN zZbH?dn4X2=%r&bS3uQ|aQ?*ybrW=fVblqM3EVuPFJNQC> z=S^@hSYFRfJSgf;N}mB-zs~v_C^z2IRe)v(pa=~> zh)PQzu9aZPA?+;{rcW2F2sg$&6_vm$S5$6bu+#u@OgL<4UG0`XuJs>C$vUgUfHF+#CtWk-o@M7BtgEBSE@k#O#7Z zXGmJxfOU15oM8!gbh1s;P zm_Q7|WN{;oGMJY+SUjf8{^Eu*z)QkqI+ne(6{-{JfQh0UFU?XvrHtgy-P}@M0h4Dj ze4j&G1M{c8z$~YSSots!yq3TM+sx_QS&T`83NUa-846yIeAy0oR2M?b*fHE^r1DUk zRnp6Xfh}!Z(h?T%i2w4G;?)9PvW@Gi@$Fkg6WUKMV=(PJ%Uavz4}+j(r$!1j)KCPY zU~R>0Z;UFKlpCrXT`NX6^6?#yz330H(-`N}!jWWrSa#BFFyEHrT#>ZSBoVNC}cL(%pVEE%B zXmq+Xp3dU6>W@=CPT94ik7d%!-$K&xDw_vd!3##O!9-qz@g+lt%YVHFqxCd%0hT8f za6RyFyN)|kVeD=Vcj%WXK*)?JG_XKKcY)EkJ+sKQk4d+Xalriiyk)@@bCU!(Ai)kDlT1h^& zf;(eJkD$~!s&;Gwa-_SBHY%urBZLbLa0sbFwd|7XOed^KCyIkm<~)`Wa6y}jO{YVt zSKVJCR&D)+G7;p=F7$JCP*LjQZ)}h4y`LP9T*U~&|2*W+7iBGI=^dm;*)Jw05hs;v zHOtxA$~+L0khF_N5M~@aEhktq%Hb2nC7jcNuE4E~frvGTe{v(1X*2v9VgTsBSPKRy zF-i5o_@gO;*-JOI*;&Ub@RxZp@}OV@O2QKD)N;w#u(=;I=hljL>wCT{E}6X~`Gch$ zF2q_*fIv@zTE7@67=lmJ&TRSLq3oLd`Co^Os%6GDnhOdi zk+Lx5pF(p`Q(EDOR+ehbgj8q=1PhQk}SO-HjRHfY&Kp1n-_N^?P_6@ z4g60%gEgFgrfk6+7^kYx zASt5gGifftm88CVdlmPp(d7g_2y265r0WQ#jO6zzxy>@ac}<$%OC*VmHDQ0aDZAVgQ?)?LkX$uf)56mTd0CWB#W*TN3ZXaIl)@z3s)_{=4^VV7f+29%w~c-` zx8+DB2fk3q0t%6%&=|x4q80-p)Fx!gjz4U8?-ev@yQhP0Q2|iI*^{3SZuFDF&~6wt zt97Y)4B)eyckSJrR)$*0J+SZax)E^LU=)JaSYs@KmxE)b9$QVIk^R+>Zf!MW7HlzN zcNDwv-U$n5`A8*dxS4(|F#3!f;@TW_4ux}eMmx@?pWl^84#8^JLaQ5y5E3SsBfr6Q zDOy&C{Sho_MT==Pd=B}LiqCcox1%MN(_F{{cnLS6x4DRKb3|!)7t>N&jbggFyOFOo znRKWYmGPtW z*}~)!SqtOh7vKQ)Y%xxw;^~!mBY0-Ox7&ivPp3W)l^RX}+)pQ<^&s&)Aaz>r7A@(s zo=rwtnnnPZ_6i#C+{-@)jpGRP*QUCIMzR{nG$G}4l8prV^m9cighJiF8Ye3cMMAa~ z7yKzfA8CK9&lX*Q7G9Bv7!~~;ta7?*qAY2c=D@oCDQuH)>g#-rGLE` zrG)k%3)DBE1%Eg%ZT5;SGO}*drMPuICvh-SKsH;`wbS~I=5>PnDtv(qHwB2{X*WE5 z^<)*z{2t8MB1ar+*ONRdlYo_8$xps@djYk!5iO!>%D_jvsPkmHiCCAg++;fWXO1!C zFKmd#S`96M%XGz@ln7S(esJXTf&XImt+zauByYT^`8P2l*3N1lbIxfi2|9|{7F1b$ zXC}rGEvCletT7Y51l>0m(qHV-Y1u5z=wd@!C7Q@BzL=@WRA{V~?s6ohrll#(!(7;F zw8o?$3m44MZb@~sewyVvx`k(cCB;0`(u-vL{9xos!7m6bxc&YAj2-tXRzk2F787D| zHLXbS7Kp|F2HzkiZ|6dTINeOhNHOQs_GBkOBVpPN^JV%P!- z#-X_I**oSmG`=%E^k z?8wG1YIUJtqD6j^V+eOEKwLAsUW?)pYG?tj=uiVAyunaIe2}r>4~7mm2+{_q5ibLl zVqh8?n5qp-4h*p%4P-j$ysTvQv?Dw&53uo(LJAo|CC{!BA}n7yDCnQ({LwRlxahb5S!PDX39uB~Hy z5kvCrt>>8+1v)*Y4ofyqUUvNZMDrdw!)_$km2BB{?RY4os+5cEE~<;VOI9>KC$X1y zRN2WBu%%DIU9xt`mc|>V@vRso5Fj^;%*2H;$tdbWK*`~Vz8RWzt1eA2_SUTcKstu4 zg0;H&;HbPRDxZ&6iT&W;UaSUB z;6ch>B$KXn8SIe)s_Pn8msAKIGK9m?LCl!y9R5JbY{g zP>qXyjomQ-Ze(`JLuS@WA2YTuj5caw0SHy^W6_J&skkv0_$i&29-D{1DjDDPn6hR$ zwz#pHqF;QtsMCjXky^D#7z@{80hQI5i7rwL3mWjM3S2xEEoQeV0jOFds95oU`Oxfo zKq^ZDQA;I|3!tj>fCCEcM8>))AqRy^RF@A3hik7459pqO-*3tW(x#)0xQGTjiltCV zut-ZK!N+VW2{)R!xdK9!gdZ>tT4WQnP?}Sdiq;SzO_|>HZ0L3oX$5H-2|euyYfY>v zubDJvQv~hX#G>crFY=j^%%0++5mFH-Kq!*m4fCx?_&hAW!udQ(u@!lA0yI{*I<&rwy{m&?a&=&%&0{UV^LNB! zwY9v6w!ol)3J4bR?ZW2lC|;wCZHQAUWiLXM{aBxCUv}rcSTx&9#G(~Af%8Fu6Q~D# zl*dn84uo<*umqSDIUy$P>KbaE@ew7c03GGoMbFHmI=ioo^Vs3UOvTQmlJ>>Cx8Iz6>pOc0u1JVQ#Ao-xIE2C0;u zL2EpNiX0;V9v^O|HF2)nGttrQvkgwSM_EKAb}T~zXqHkR%~t9a#%v%k_OKUQ{WH(Ov60S$jlk$3zT70HA*!bW(y<4o|J3jxq8pajVN4{5x{4d zT|l7?GYfamM*T3`Ez5olGed5}+!-#SGfPV`8!iEgUNb9{@ZV-lcH-X!ix$TGldcqf)$T84Q#+lV=AMG|9a};xDl+5x6+XUCsiHc&r;rBngZZJC zU{MsQWr?=&Cz!9Z2?Q=+Gn09W4wbLE|68#!mKi5)f|>cHc~_J)Ndvath^3~mRkAWm zY-u)93glvxN)DAKxGYyT!{1hMwhK6dx8Ow^%l~NN=|HD?#4H$%w2d7pU{i09ykpaRXyS?rX2IeS*u-YUnTPNVl`3 z)pA$a+m>ks(Mp%W|0&XJT?94DtE6{T6o+^in%^}xhH2q4X3#1(nNohr0EK#yy$PcW zR@v6Y*$)3}4e!R{(bkK(vbU=Bl9VvT*a#M3v{C_robRHMpvhJGEVJ ztXlj1>WZ3_4tlu^f~!(VN0L!Ix>Sw`M1?z-aJg2;AQ=nIuERvXKDMZMZ5B$7nm)b| zhdJH3h-jmjpo0E%<+W+$F4j#1Ta;(f+}{r9+0W6HTAEL&_HWS6f`<0=1m4RJtR#^q zzez9u)q;Yk2No|#7OjYvFI%#-Z*U=xS1KcX)eM|i!>oI31CEnVa-iF#;SFuPNVPuB zbq7F5Ep%t77;C7Dk z#_b$!EmE1~Epp-#x*F*S>t^!ULd9mEjL4Cm2A2f?7rRJ^d$S1dg}feH1YY@Ig$C)2 zxoI_EIBYh=6ABpF&CIA~6C_ooz^N$tO?7NfgOSBdoCVjXy?3|T+1tbGTr?!7ZpbQ% zr*Vesa5NpUTIRaD(eB5c#jnXm8^7gzgJLm*D^j+YF)B7T%xT4=LqzrLK^?Q#hp)$X z2aNFRw4%<*F$GTL;vm_Sh28-4V7dsjJNisMhT;eM^rtWTy36wU3-+Zf?Z+|7$q&G! z6eq||cJ5hMs!Q6=&HBo4+DJZWwe5>(kqY;)dCNK$w_c>{?zpv@!Yu3v#2b!w#cf&| zZKocHq|F)jI<{bO84z`m9%1#!r<6|KSj@pe)SWKi;GlYn<&t`Lk=gd>N=A)ICmTkm zmn^L9lRf^E$^4T^KKX0^$pAnPrN3aJ1TT4eLH2z-?b3^_qj0{4oG?%g>C&R)Q!8^S zrMarIW1^lz>Iu7Gt2%|OE&q$(1#5Nup?MP)rfD**IeU@fdE0ll7MCYr`XceMt$?zc zF{L?bhQee}-Y@g-izH>CvnT7&8JvhCN$VB;nL)qQgooS;0 zrWdrCz8)yyS6~T5DJ5~ThB4rQIaa9Lr zsHT5 zz)Ot6n7)ekU^10cAEIVR z4pp6tG6|oOF=e-|jMOf!8An@=3lo@~_WAogx{~13nr8msR;C;51#Tw0NIWX$D9&_g zc*=o8M^}S=b;`P|jHL|TFH-(1atKoL3?(JAU_Tga7r6CjpppI9?lbOcVcAp7n(w@?he?rbu#!>%y^+Yd)1~&%kqPxT1B}PT^ zM+Nf8Hz8YS4ORy=bVtAbBPqjSdj@aex$0aXKa6zCZ!U$caT}X-3*t{!U;9`IM3JlXIZ~ zl6?)hC1wc4En>;Qy{n^*G{G?ODTESqLLupbD}sx;Tx>Co5}p@1r!MAx_Q@A- z>B*|m7P}q$H6!|&5zLR?6ob(w&JQxjxQ$m$<8_f6FA}Tg<0V#Hp6&9H=JQ^hPcxJU zlG{!eyxI4DkGzYT5W(>D&J+D(v;*=cJ11yVm6$tTFoYBRl%d(Sb@W?D)tmr zn0z7kgK?{Us({=JT`=&7B;wF)8HNMaLCKMW)_pO7qW2_gsP``u)pK~{@;-KRSLHbu zmUC^i;H9N_i>rIzY?MbNuJG#dS)|))aM7%VdP;-m5 zNft(0eDnAE<~Ww*(%9;o-?eW}gnY9|n}H|A+x}94Y;U z4h*MbWHbYE-M8QCzU;J69C^xFXiD6RJ;o&%_ zpix{zWi4;e67RC$B$b7tvc$>Hk%HE=Ji#>_?YPi~+v%4+97Cwvhf1Yux}N)RJBMiz zx(V|n(XNKa^egk4GMqb*KK(>C09g$sqbXhelHgujQ4uh#e<||-gqHhC4{E4w)uu1O zGN7}x7}6BU7Y3#uj{yQ2IxwII2?nUl>20Rzu+M<0TSo-AG!XPdbb zAH7acn@Y1vK`VT^xdr1wB$+RDH!Y^PzR4dl%mPMtnEfYltml4JEmGmSPwJJeZb%Ey zZ!z@jk-b>oUyAkJi|Zjid@k*RDrG9qCYc;P!f~i2PtXHQ5 zqes`Wnv^FHsH4cOHFBF+Yt!O0h--4FIDJ-E*V!@i*)Eo9* zalElpr$iyPv{2jl41pqfyKxfJsdR=@zvZ8LJ#=OnaF{N+X5$G3K&099!{$t`6Vca^ z>!Nrs%f~eI(ko0=@v>Z+g`X9Y--zmx{5EGD%CD2(41naf7u;i&E6bAGVcCi8_EK1_ z*e0qD8?xNYVRmWA+DoNp!sTtpNp;)_zE>y48oT{I1@t2JkQ?|3p+aYRS=5WTzi^e{+ z+^i90tF@))oM#OVF(l16?|kdUgy!f#i)BX#;xlADH%9fOxDwQMNcYHx!vIRQbY~TE zw4#MaH?8ahAg~NwJ6iD?$3(rQIIu42aH}kOZMF{1Gsf1z2%E5gATeIJk={dxq-%7G9>9C6fv*A_2;Ru6*5Fox}rKs+efn(lj)VLATNt!GQHVYBqr0V z0b(VY_lu8KjTTJC1G8VmdI(}f(`3FlV5UR@rlK6GU4Xl(eN^UIOm2;>PF%ZMiCAlB zb#ONH8MEdt)QK#%8Wm-OHyBBiB=|qhmsI_OmWRV0*GrE%d@3&;mLB(8h7Kzolpgam zFFja#+|<5f*JQQyxYsrmTP2m!;~v=(UFm1al{aMTbI*UL42vB~;+_wes;S9x>e*lo-exZ@&*_zN$GJc{$iz*2RgQI&1RO^IdZvMiB{WMX{fDYxlh(2@P8dqK%9>j$zbg%dE#jfCzxODGPZk*D%5K z)PZ8IW?2+imWe@Y*JcBjI=NjCrkj&-iwdjEipi2|neu?4av%SQ<%us|ahH4HOS&&~Hp z5|ccb*gvLhCk(>;MxyLPr zsGdE+L{+37t9K&7Se*ckf&>iUK33D-KPnS|{G$rtDZD~JEYD0YhOZKMOLoQ*VAv%is>-^>o) z&IjAWz{e6r+Z31(Y%!zm^>WPp!Pe~UY*sQwGfoGSM-=uzNY8q#l`7<-bN?XdQ}2-O z@uu>wKt&KwVMQp)szT(93rq7iLcC2aPQqnN1^5H;+9?3yrRfOqww4aFw<$KP&*2;E zmkSgZ>sb>tUt)ZO8IXndWasVBc5wslqs`eWQs&=%4+GZxyGmHqs3;y*fmWehyT*Vl zDI*N2!{nzi`%sl$?@u-T;Z&)AW(4WxB8@0p&6-|pEksbxH-U0#&5{+{NGIw`{EEU-6%-CA5ossPg<@|I>fE!$~aPk1zNF9ihuPtg>PAHP{&Z>p^I!`Sr?OkQHFsumu z?ut;`*}5a~WEqFM!D64^d^O^H)}|8yrjX&;RU*e2e(lnL*RWW&E$~@zgETt`Qu6?k zk_SgP;1>N35cn*_-{{f;O)J!wygTGC8~ioaCWk!Yg#Et-8=- zS74UlHQzrzPwMRs&3e2O{8(;<9WNi3F3X9fNxP6E+M3FMDC$ok&T5-D^jJKyfQn;c z6wyVPHAj5GUpZ9f!@(U_hN~#Y7%0G*Ai>X(N?0Qw!)U#ClD@2sQe%1=UOKZoEVln^ z=WC=nT*=<|t_Ksh@72v9npR#mr8)}8_TS9jv{MDND0}z8?fMlpvz^b4=@;4Rzxb|) z_=RM0`y2^4lS078aU(m$-N3h3@j$!k)CtR%L6-@tr@zUiJTMmVXftZMR2@nW3iAAf z){zRAG2^))kajQg+JTlqQ8HO+4WOkML#*+LRaj3tuvd7+MYT!vA&$is+=0D!DRlvi zYZI0$y+nQtjQl4K8AK|uTWu>`miMFW{&n*76ER?(Zw5`=A6LcoT!L4bH(ct0r%4lq zq&Lz)Xj!5J4t;8v_kDM;HvQS)Eq4t9@;XE3vpM>(uo+xR%k1LwcvaB)WQHP#Z&1&i@r8{5gMPTNvF#&^`< zx=svQpK)mETAtU?v>iv~`3wQXB|hVhTL&63k>ojp{+ij9NvWd1M$x_XSG?!)_Mmo@ zfQhxyZ5v|pPMev?QqHG?d!fA``D`gqHRCMgnfQQbZ~|{i7~kz&ViW>8_&R>D_dFZ> zDhKfBygnDTC+1k#f_v@l2Gq%JZ3qaR~OvU-cCDo@fU1Mf0of_BIq#nrrrw)cx zEBTH~C`_?rxscF`nmMan;jj;#n#D`m^2ox>-WMfm;`gP&igx|VKsLFi5a3*`mNNk@ zST6#-@DJ=OYXyew>ULx<5l*;2LSw=Uv|I#A9+Q%49zE_J5(Zxp48|M9xGkm(bwJz< z3a(rd46^@V;4Z$SjF=sfIH7uI;xGBzMZA_>z!p3%3No#d-63M?7@@)>U52TT^yDds z_LJk@BA_KY%_<^}>BMUvxyM=PKFzsN+FfU7$bwow zUZqqBvSJuNtca!rnx}HQwKgLCkj@x^fG(q-#GYCR9j};#3dYwFBH26N!q`?B+wLuh zHnbpsLS#pYlkGk86;Pd83MgKV&_)6Mad4`TxvJ5a!7MaOgHi}To4dxBsRUVeXO&?7 zP=M*0_d!#OC#YuzI#i%uZPa{4#%zf1reXiA5t! zG{YM|wS%aZJlLN+ODAw@g>4;61&8(%EkBmT4m9yt+%vZj8UCq^8B(B8x%chxk}?`6 zI2(JPAfvle8W}KoRQDC_mHD;ki2Wu56pqzrfrvYs;wZqegAV!!p-5ayvm8^@_3{RcGcF8jFOO($mh>hp*3}%WvzwHrTlDJt|E|(fvXnFeZ@e110tl$_L+ui$fz$`_dG} zjSpA7r-&yvMOPMe#S6eN%_HMJrN8#6>3t@n`j8ur#XzxAxTUe-j6DA%KZA;%hG}qV z7$jd<)G~Rh#SBTWuI*mV*o0BWD#FxRS}~SU@}_0E0CIEz7nNS4pk@l$qUcTJ!!NRB z*(%5SsA{y|iM$`5U)iEL@h$XfFlD-nK-E$J?LvvEBmv!uKr5I+$eQw`g6mwt)Cwx{ zc$g1{>Fd{AUnQb!S#!cmsC;u)4vd6Q4h|5|odN)d!#Oa#>CIHFb58w^st{@77JEVR zc)jz~>Jfado;e`?@C|-A@_Ga-T~fwf_la~Yfv;WXmU;KtsU5}xyE57Sq!p{J; z7fF7MW@H?^O(`qt>I>T{>n6Aw$wS(6Dyimsbj%liois@DHy9`!xPoKYik@|qI>nft z-#d$|SxcLx4%Q_%_*doVcIHtf;-J85B0R?Edw>-7GqVrAwI1uU`nXL#lbtZ z7kYDU%9cipC0oOredAr*SXG)L$jp@Co%EyII7m|fcmBb109`@%GDcL$S^5^v*@R1y z6Z!Ecs6wX!XEWcjXBZ|P{DEpv1K)f`&#+F)c7i&gWTi+K*+tpAZKf72$?3#7--$6% z756*1>Z9|nQwT8BbiG3m@fY!xoXnje#w2r*;1(Y$=}-*uB{(AIQTnVM;=&Ahh&CY) zVTC(WUrVSBbL!SSUlxPuGM?ovX)uHN^hXddXE0~OV`bzBrrs9AWvt*Vfdd52I}npCpgqv zF@`9N=jX0db7+rwXr~;sQ-b!WzCQ}IIy-I7veEqbohgZGpZxwZk)$o|nZ)8H*>NK_ zmhjtIx=Q1Ri=2qcb^{x?lN1yBDJ~t1LNjv7y2PZmB04$|sMt@}ydUpG@k?5U7=XYGq$Mn&mQb9)wb&%(0FxowEinYi!6e_$(ls z38%z^MWp-f_@hUPMdk#HOo`E4h>Cl_tw1{IkG%LSIaIvtN`>%ZrAS}=N|8KB(qx?Y zW3rI7=mOGjoto$fzjazn)@2EmlNWQnmuD;r1xYUU*gJI zd*%&~PR1a}eCM!~i z*{7tiYwl3!R!b^fq2v?yf52+1QGgRPY#nF)6?6E zc5hbKWEE3}BX=gMoqB4XL@RqoQTBE#i`A@Ske&q|%tuW}PQCVs0efn5OHt3ARu6NF z#C^Pzq1m;mZon%MIhE!E6=x_=6isDK;09cOZP3xhYB#Y!gSGdH0^E((-bU;nH}Fb2-5;pIj;XNU%*(Ex zFBJAg0SVkMRL@I0+O>|iPz^DVV6GNjaTg_@S?X0?HceT5>k7D1gUi_@WalLgyinHN zE@k`Rs^SC8z|`TE`!dhY>4&rv!C)k#ecxuIg#@b2A4E~j`9)KI79(Q#geh*&bC~?P zkpnIsQnyR&WoU#Jk9m9?ukqI?nCmpp)4nAVi3SpFCnhjl#Cp^J=&?Ov1HhtQ1Y z!<&>;0kbLpMatjVvlj-aoi~|K!G%Xk4vE6IpjXx%D9u$0?WR4(W@KcLfd`$B({PoZ zt4dryAT7rkh_ez^Ry$Jd_wfjCXuSD+FBzKM{J>x2Oj~L=0c6S0(P#pi zOZuM|MT5@JrGS(w!+|ZYBCL}-7rbp%rgZm@DjUkjK!H9yn;?YaN`s8(mh->=Ces7c z$}4>m&9t7J$YnbP?UO#+^q{DV1jE_aSTC_!00xlJ2_*0d`SQ(87#7NhGT5q-0s3;}wGgbE7auAqG2Dz3f8tUF7Gg zHm7NtA3fZxSBZb6r}(gBJCJY1qRRU7v9vuqeGDIqhy^zeF$84AeYFz74@6N2iTZ6* z*6wh0bEi4Z*&TuiY3NpcHPjklKj%w)SUd=KX$XWSTQ@a|jHlfEux_P6rz%LWk6<9)=)@}G8MVh#Bv>IL4zzWD&}{LD zgMEbzW%#m~`GCRx*L6q9&VQEfXw#l~-J9%TaJ`XHhTfP_rqmn44>iXu=X<9orNaNl zJyAse&(agcA(3#>2Il$L_eD-w`XYa9*T`HNMs8$qY<3S#>5CWY5W7q3TYW&s2&l1%Qx1}LKJy%eG(1ds$=Dt1g^dR%m>%hlzn@gJ@l zBc-Myw=0ukO#G6_i2`C;1kh5bX4M!k4UATF>d zb{6Lw1h0SJ1YQ#RUAleHhV9@sc-PS9)bRzxu4Hray~4Cij$I{PW_q)2#W}Vo&hhqG z63$JJ7{ql2%fG(0n;owOp#uBt;JW?r2kotGX!pY#lvCmDmCzNUkekUOmzIgMgAMtT~MeYzgxxv+-k;awfV1d2jV;ygEqHYy>mIIP%nv#%jAHrqwzYI*yJbzfbz zw~=|ECK3l1i)pYK0`9pj;6}fW1edgv$YUvXQt(7)wQRAIinYQVC>LwnZLFV<*9=%%Fid9u}8OZA9hrK-HKHFu(RefmXZ{+@xzGdx7fdXbo*aa>c;WvGO z2NQ!dInjhVJC@iV1z>j<>JS6rZtV46LbN5&6_<8>DEyIQzbHoq^vVec0B_Z`6-|ak zGA?MZw&ba3t2U1}76vz+mvKT8(a9Q9&X@!SrUS?Gu=$)96Eti|;Lac;QtwoTma}h4 zR?!)=^D?F%=M4*)pI6RJyYv~1HI=?d|5^WJZ_1fm^o+i!o}YBjtmwVjJ86ZzUhmWw zT}Q8B#7#M%i+)K@amVj=$C&+2Ug1nB&XLl?9b3dOI|Yo!Io-3HZ2vpnbfWV|0-S|7bJO^-+2( z2%EvYEO<*jNxY?6n8I7iaW`gJuFPcb*u80G+FF^KpWXS(sdKVt)cKm^1O8Obg44Mp zF3~b*&r|pIhFo4sO^-)Y+;lFF%fTp`mhsAHE^AG_LMZSF1=l8BeZ`VqNWGqqUeBx7 zbu~SL3ZP@R>(#ZL0*<|qR>&M|!%JA~p;yP2&=252u304|W3Z&)WGQVe`J)od~51p4S>G#F?_*wtVXR9pFgKKJS6JzMpD3G zJ~(ECX5g7qAwGta&mx`5kFx^n8kuk&f7jiiqyu@1tqp$QTzzDvCLUU@<>cJVgD<%k zM-i3dFzk}$AbBWRap$3=e>#8|&|VDD}HuLsHcoJWWaX)2t_dvH-_6q=MuLRo@H zX8Nzq97A8uFZ21GQs(71dgFuAA3ZGzVrJS&55jwDlNm zU88Miv<*gc#=%sohKYoxS5nhU(e#pPjCG0Uc>4bVMf>q3$D_A6vZ>se_g1U_~7qR|i!07_Hu9 zCje3ctHmpU9gX7!2J5HO04gU!wF{ zG&NQEI2uqME=siID>N-5f>ZOfWNr07RHRmVovqjLOGU3kfR~X#W?dv_NYy>rQ6%td zDb?}r6b%Z6I-D@qqT&y%zxMZqSiw1Us6o?oW&YC0oERz??NtvhCXslJMBFkT$~=>Iv) zK^LLi7}Fz_up8-*suXCU3kg!aw{m-k!|Yude0R8e8+-%Zi8a9G-VOJ)dX$n))5^U= zsp)PGG6FF2MZOFwXWW@lVdzb0lU*UQs|b+QpUG_rE;w0rFZq!;cw`8v;wU!Iz1btY z!CV^Y&NWSo^s_Vw0HV;0pH@x)GQ<-x#G@V}8BSdrPS&7nn_&eJo-s0gl5TJRCuhZ` z&8o4mpGJIc%bLmFBgeGVGDR{cpq!|B&KdxC-eA=Xwszei=QP-{IxkP z>dIKoznbVo-3wGVI4W>h983skX)@50stlS6Fr4ZPPz@#` zIFkm#hSC71H2+l%DjUj+G)V)r)bHM_we;n#*V3HhNW^TPN-9kF>{x=TAH6~P z84q1(4x1z9<%iHN84?4hSVS2nd~?1`?+5rhvCo_&YRoMQ15A+o3QY_?=ln{RSfVK3 zV7MbAnvt|9c%_?>HtB9in^bdb8mCQmoi@3t1zlw`-j9U^J=&?l`-(Fz{zc-m>CjWK z=tSr8VZcJ?%E0@P1;sy6A;Z&>&QuZd#OIPu1#aPXkzZ2h0?+##5=F#E_!Y=vw4>98 zV$*?Vu|(&4e^qw_`{KfsBOtd{JOgK=G)Li^>hwRVQdn|;Az$TDRZalDGIeTSJMvTK zJtW!+=Gp&LQ}h8}*UjQ`$fIJh9_hHp9qCKFJ`xd$1jMZQ%;mF6IbjN#L|@6{_~CR% z6eC7>UMH5y;67iIIr1|-pDWfNsN*M;`}o|f4;YL8!SP%3_s1!QqDTPQ9*PDx`uG4^ z49#5ls{dni>Lb5fQDwegSL6BjHKXmNRA8WN`^oTTu()r@=j26gDTi*M-T%FD#Pl4;a3LP^=ZWrVsU;#(Q_fFDa&$+ z+8k-sC~4Zv&qP2U3d7?G6;{K}s3i(4HI;1&4mJrrcN}^q`A5R5+kbbso*lL?8qO1< z%=%xt6}H&QpJyZX`~Rz8U&(UMfzp)S+c3WP>iH~rSNVb{J!~;sn$zo%(GNooTWh7; zyE8>VO}2ws?GUn+q!}80{tVdC`WyOyqvig_tHk8W5eNWiRO-U+Rk0leN*tC}zBmqU zLSm}*F2=|5Q2bL^f_IWpz#!B`;k2~tSG08S83)N^&r+_4pwJmPIB`xp@WeeN6-)`V zWI~%`{5zXcos(|_zp=k-QcHlZtO+zv9o&(y!NA!2JZ1U3S z(U1I@T;;gZV<-)K?v5;1e zstei1==Zkwg0r$+mJcca;G$y8Z7tGFxqQMqAePHV_Gp6#wLHv08Fnm>%w#AwoyKm- zbyEi~E}oX%Fh+2|yIK$u@Pa;FFfIBzU3~2;rj3Xm0Sg2(st0j&flL?{k{NU(guX2h zNuZCU0X}H`30;2AV%ua%2}|weM0}>qL=QtACW8uc)y!3eh8|3Qnjeuus_80(q;35- zJux62Zc_-_io9|Q_(ViL!aR$uZ(wR@P6`%&Yo}%!tFBohUW%|yui2c}OtDPYUZM`A z(%5*9^q{CebDA4yZ6=mWMCL}-S&rRs+tPLjdUmj#B?e{kSDDNg3Fwl`9k*%7Sx4y3v`?L?!D0}8yCi%# z3u0GMYUox`YDNE4G|Br~xg!*eD{@EXD5wCB8E?>0Sv$pg87QSUG@Uu}jm{?fdS;{5 z75jnO9U%rbSyfC%I!tvkb7E5zUe*Qkqlr`={+3Uq`Snah+ZIKt*tW4Qig_{>WsX#* z!gJ)ur7bN1KblXa#a8p77OK3~_^aaIwbLcuRZ}CHV(g2hqR{fT_$&(Hi8EOc>#DBp z;PdHhzSs>)D?yc5tfJ>e2-TitXnMN{Rps1n)TI*2%D7%>CHV76E5V0ok47QUBg+7Z zPNi*;@pkT2C=*NY(`qNgMTsrRcv8$4_|h_+Et)A^B0U8al_D%XGsNdL;`3u_$QAE( zOpbRAaUxr6un<#FjX^L5Tfkez!8~)fQ0y@a2@y~*zkf(sHPZ$tP)^PMgn+aQw^M0N z^Ci5hqV_%Xmpq5-7p)LC^VFLfAw1G=XVt-5J+DEm%xS0R40gfvzE z%HvL#(S%2HoQpZa6MB+H0cce8hN6gpustZr!N@+Ds7T;>F0BMZ#1u@L)m1YugJ~4*Kmnl}G${EBKwfnRvWC$qC zH^~{$j7OZo1o91XhGi279>l~MQg+Q0SYsfJ0S9XU;0YqBXAQzaTUdxQ(ghCK0Wsx8 zPnpac>CtS1v00+mg||0x2OuuEL&y<4yr61x?obehacRQ9sE`04%}WFOt1HdD(s*1r zX;uyff}XPd?FdvPM5 zc`vBo7J^+V)5^s94K5AK8r(6qhuYa%Ar&TS^gr2TEr`~3k1Z7qK>b3h@z!eiyb#~a zEb$=f(^8l`tSjNrT@8}Wce1ekr=J?O-rV>f2U-3FIe|_362^FRb!LXpsJzw;d4J=v zJ@P7LI^5s?9BG(b+{O>I{sF%HGg`a&kRN?A+CLHPALn=6GB}`c)Spoe z79QbgTvYT%#sN|}3xCg`JOAU|q9(&R4sdafhsBo1N=(_27ndl~<{-^0RuyK54v3?X zHEjxFeq@CjI&q+CN(dswzUfC{iWFiYTdb981KkzmXc~;$2=yeQS2XWpAFck&+cn0G zBbQ@5D6-R;ACzh6KTi5+l5PEF9Gj8vt*!H`#dO|U1=4mnusj^F>@GcsC6rIZFOMo4tSy=h?@qX8A%?f=^wg(1pDG#NQ9qZi?b zdpcP<+@oTVivx-07XDT)^t4f7yxJODVMB88Zp z%LU;EU2R1*%H^wUiDAc^qJZD)5P*$S^C!bwE>0cz)b{bkD#b(~YRMX|_7q`3{a%1U zdLBNb>WRw!NOY0re;2bgdw@9pTUy(>P5x!8!eL69`Cv%`sy7_#DyHCweOlBfMpd~2 z>1INvQzIO(Vgs;as!RHaVu(6EhAE72fYDS}vQx8?Yz^o&&Ev^f#)r}>!cQxUs4`y! z%=jBYK9hH>A&0SxCDcJxtsvCQ3jS#`4kr?gmKPggAch89p_2ENjMD_;X$}_UO+<6~ znZ>|CUxW=1?XEF{Z&Tb+l|sjh1!o9>C^9DnRY7I+hUX;iVcb4j=8}dx8+ReS8u^Xt z{VtD&h){i4UC5$$#QQSv*S;-2@~tj25LN7Ka9lx;yq<^kP5}j``h{k*BhITI&~&h` z_(v&`AuwVP%ZAjs9cPQ zy8`bjT@U9}jeyV`zX`LBuQsJFMh#rzi2dQbx!QabgNcVZ>2f0vqwhty-sxxyD!S9% z*|>X2(K+HwjG?+ibn@1T3Ih(1^H7lxt6oz+G2uvPTW8av=@U~;#nK)AW&)xLw|$iK zU$8%4R?orc)M;Iezk69e6eikGP>7jP#bGLS3RH9Jxd(O~2czT}r~7tQhz^WJ5PVjp zi5%hPFREbrjN;4~fj3R~aC4&4k~=)ac0)TR{K?}Ej#GqNNXr2dHZHBmDGZ7u?`)jp zYF@>^Pc<d&2&Kp|a#j&A=b9yYWM3OS7c?Y8uQwBd z)(KY|L6KANfHx2JO3Fpp@GKqQ1`Z@2l^>Wad9OW#F?tfqgC__tSJx==UIAmaTqVs% zk_dt*(pV?c8BkY95EzZD$`7^AU<;5%+Z~YXcrfD4gk4;X&XMO)o?q?cq!l|{P0nT@W_6nC=$}kik4M@p z>!l!ttLV_0j8*6QO}8x3sMQ=tqf0JKwx~m6?K8kb2Cd;)4R}@s&l>OZHDwHcIps@4 z>!1-Q`fK9llu_2B_QT{Lp7g|e);B|E@(Fd?FP`6DQC51o_dsZ9&K(cH;zja^U`Ch$My&bb0Yi3!lfTtTR5*q?$6Qmxd3dU!q!7hj z%8VyB-F;u{5%4o7V}(e$>QYFuxO!YL&YFb1D1+D0ldg}K)0}#D1+RY+m|9Q5;iG5@ zu`Y3UV0vZ>CfEp1q5D;*^G=uepa6+kWE9xzrJ6i1G^hGF?mmu`oZ0hI@zWIvfz(`D zW2C*+&PUcydRE-H_8GA?o)I@bsaKb7f~~0?OmW1GYg(tid6I&>&W@^i$c2Tj1&p%^ zjMV^RRa)ShhFAlP!KfUB8Ay>vIjZ=>1jts=1Q1b`%0C!SHA!QpN#=er4Z0pp5v-MJ zJ|47&4+tpf^sej_6R&w{YT~ue0K=681MyRVk2J1S@F}yeYFk%V_$vH}9VHXG*qtSH zYIC9nppk#%+l$%^tKHJ4Qsg?pKibH$W{$8%8P_80DYK~jE-bS-shl!WtRShpNhIQs zM4*#<_#i3gP_OxlnX$i|*J>XQkM@0bUCR7Wkt8k<{tDXouI6Au-HfAla~OrF%?m`) z(C@i*6wRMEw-0+uGc6JsfV!REp<}*HCm;;Bi*?!*ze9%r+d2fOaBf>wy%^?htA_># zO|1Wmc)1j)UQkUFH@jQPp^@h8VlBWX_^tM%v`w7ihRW@i?$?XD02QzHd;%DO0>RJ0QP?&ETjfZHD#XA4M}!vH5Vn zdr>4J#}Ld61dv^8(hZ7wLsx0zQPJHE#K9OHcI@j7?A96SMOW!5jgDVH>9$vS*ux%8 z!ybuY;oYuf+RoXfqZ3wF>1nQ*^gVl3AonUydt7|>p6>A&7s=rhcoarv-0~pRReCb* zJu2F}evsCbhy=Tdy`+;S>?C4C`<%=me+9CE1zJp0eWvOC2so8>Om#_l7>>+eIC_;> zP9e$I(x1Ln8gA5RUL^J7TWjW6biyfr+-+Dpm?K1 z6n~YcJ?4cpCN_q~#LB=p)|lw>c_oiq!H$|i#k_4BnklLeP0&UF{;!fTVkf}q(wbl! zB+hsxbC7Mbeh;=p>z9j@CIaxzOvm|_@xk$nIofh7DNI|<)?k9+6;v4URL%oxwuf8H zmfE4vTT9g!s1QnT<>|QSQL9x0;S?o%5m1I8I^bqQ@D7IoSE?WbPEcrdiaVzk+Nj>F zIKhv%WL?5<=z^|Dh$P5>BeVL+xR$I9I^mYfw8OeZ6qnf}u8qubjTf0#Z5!>ldQ5^PxD1KJqM}{2N907V_x}Z{< zkwbV~VSuqDd|ly`L7_VEYG^IAI_ij3(a|bSzp)y02DArw&&A$SK8q#M zuT=(VfSlqs!|T*_FE+vWt1qA0aasWoh?3!um2{VBbFNnLT%aQ`b@xVbc=j&QDZhc`Gf zwEa<$zLS?FLcpK zp?S<8^#vE;+E(GS0LkDbPys4|2cUqbc^&WKMiyeb_=$a95<2P>l1_>o-M-o>oaUCZ z7dxTkaWVKEoHABMR_iJ(#4XbxwmpXy>+yjq(5jW5if{q0_u zKl@v-FfC#P<|*d)GGQynhk{*J7jwjiB~sZI+R`T=LVJn`O+}rNt0PfmUOhd%xGpUAzGpZWDIE9K>1d`Ij zSnxMDnq;`9P`Ww*H)^g+*hq2tU$Y5=mefm~V=mljxD#a&+hmb0gz z-K8F2KhJkIlVE1MoH3;X4yjuF3mtHTgVa{#G^&B+47~C1l0L(fbUKxLxe;{$UD`2Q z330{|T1*8TZVhYVDh)j3nFIn?eQW4b8hTYjuhPo|>g(|}8hW-gpRQ%(m($2=G4h&5 zKBtk-F>+Zc;wnAO6_aL}#*XAMxk(c7T0kNjY2foQ@Ocetd7z0*Wc-(FdY+A73vny3sEOEIqKpw! z;dvbxj;E*LF&{un8#lQcVkWG7S8kf+nIdUvo$$KC#4SWcDwiZ_vAK&yj$;LStzFp5 z={zam!j6;7SMEv|Y9jEIqDd`zZk5)Rwun-<;)X8Ai`5ht3KCH1D-khP1&F(Zc-r9@ zbUpSGJA|~$o^iHxu(Lo0*NAmxt+3dbQ@*i9J#tf?zxoTjPrB7b=_|@M!)hY8EnLHz zjXbfy{&2OFKWs4W#q(`-Rp{j%)@0RPr~hkYa_on75U3u&0`p9P8alg0e;(0?MnMnE ze0NC?m2RcAw&X5;!b@S#*W*e$dz?V764-E*@E7Mw2!Wa~ic{n$TCCfYo)jMCU{pku zV_GyTwfpI&6El$;YBVH;y)AZ%)51+w)+C=Pi(A*^fPtUo@^4$9u?{M7Jl4a;r#LZK zIrb|jm^)z~us*K9EXNKRAU{stGBqg14!-IsYcj1!*3qL%sp7#y(+?h|AB~RcqZ5%l zbhKFwIJh@CA6jy&6w4}C2j3hiPso}~(_LXk>Z&@0$KYiacJW2nyX*`{KI1guoMg!+ zDtM~F#dGyXY2E)T>Yqi#7p-ydeApKaDWU?D&fLjjR1nZrsZ3~Gcb3TiTXKLR`K{Rh z$o)Y<|S$jS~Ull-yHpe#=J=YLmgHBO`Z!8%bb1lACRE`yMrf z{H(x9Mr^Ty%J6>2hOR6D!7h0fMuNbJb_~ewt*$`| zc3}*gC8&qe@qKc-4^or8)Lt=~^ncq6eroDT(cw#xk5?iIR#Mf^C%_~3E_L1iow{3V zvfXB;!9>eGLHw)YoN$X+*3y$`E@Y&y2GR>|1o8l6^Fj>eG4SXao*X?(UmwwQ6|)u4 zf43;yFrtAf+hG;KINF25x0Th9<^dZ+^Z4hTBI))pnnwrC<0{O?C5ohhQ6$8samj3` zR76z9kQCV{*yu>3GHi_C$2Q@N`+#>ao6sm!rg2kdLkR(wP#iH53hju4bRx}3;7CsDs&DyY|HB;k809 znB4s=4>ifkey@S}xT5y{20NOgmfpqaQx3xD8v`~FbRUe0k!4K;Tv0_w8$t?dA=E~O4eoT}y3PIlKYU%W@7`2+ ziZDm-%#`w~4Y+$B^ZH}ui@1pD3i%?M+O^1iUC9-VIpmQQ zQd9j=vFP=z~+v1yWE@db^=jW>t&>Ifux zYkC@Km;XE6j`?~##FL3~peSK~I z(K|d1puc>Bojy>UKqeo=AfJrBPO2~RK^*hTzwnvoqApz!x-3}2)ybrDb2Mx>UybJD zvhDL9V8*IrS+h?xM9h`Vv{^cdhd{~p5GdI~D$>bHHqDuB=^LpTA|lsmp#G0bRYFZZqbftIJ{O;ZlDL5AQ zHK}(3oUSVb?K6R`Qsok&`_I=GXp1}vX&Qc~y|NNHD9Q?_O7tK+8{__Sfx2Qa}THk%dMw1a75k0cHI*(#%_`ti4QS zZOgk-EGX(Mvf~BLx*6_5K*-Lj7Z4PCS>Q*bYa|P9pKuhV15-uTAEUx^t-e`GG!ZW$ z3*Iu-5LQnAZyMv)cSlttX=Q3!r0G1OOX!ucBAra!7Pne(4W7bz6oJdfhjW(U6ai3v zp*Cvy?>&ktKE}@g$fY9E#BeaWw-5I$X6C}6OOgmnj=aAiAx87aCZ`{HLxm6v$)|Zi zlDrcj9W6$cb7FSka4CHy4&T)EJijzQSlH8aRBMbI+bc;tY8^U>Gf_W_nRQjl#D%c%oq<=?x|&6mpwJcBTmlb4%-=zBltwpUvJ`-gJ@rbmnkL6{=ob0p z^d5kfl}f*e;YxbXSH)FTi@B!t*eIzER^4+8d$NBh0+BFuQjgen42IN>q#jYliUq-? zw$Y%7$j4N*cyxmuEML@#Fz$b?odznzfWjaOg5;kVZ}Y&S)~Q@E!{7uTV30tk6(YTo z;4{I6b(K$QUM%hzHKVy?IL%L~`KsSjhIDcYCk4(3f1bi3dqy6!EoSk5PtF4BO<;(Q zwbd?Qb6;dw$tXCK>Y~XQgHW%GRR%QOBVTwD83u<89tJnLM8?pvBodRMRGo3dSpZE6 zA?))2AzDA8T7jJ!BA{Yj zcsrTqOIh`fP=TWA&O>Vo-XSzX{`@x9h&2YYu%gzwC+j>Otz-a{@_3lgDlIR5Dd`|J zt!pM-^OVPMJ`ZEm=JWij#!v>qbZc({Ym@c7AePISJM>n;!RJgXhyNUZ6|yW)c2V^^%V z(ymwsKCAANUf7ubuI6$jkzllE_;qT}$SI?pXHN)YtSO+Xy|RW)UNH|wy=YI@*uHpb^G2s(^-c$6!&Jt*t}pOp2}-%@AwRy6 z%_xfE+p2eT+-bjscU10tJi*E8yJy1NpM-OHRGqO>NpDAxXj#~cZ=Y(ZaJsj;p_GA3 z+lO>zQCM$MB#UsF z7V7$Gc@QxJmX+IKJEZ7~PdKuYdy*T2#*b#(BH)q3E_?N24bdFwvaSGFjMO#aD^#F6%)`KWC#!wxqp26GOyE8*^5o;0ykwlXGTyJP{;L z*vQ{`-qt3xTt$Czb~)Q^MG&=Ww*Sv>4M zpIsk2B_|oIRd@xn;)2lQ*d>4Q1_pbWWX6`-GL_S_;xjeRjCv|+%GW#;Hh0kxe5Upp zcz}Fyx`AOsuSfzJU$3g&tAP0_*(r5$)03l>tsSJ0ynK>A_Qy^5;rN9U3;i!npKzuZ ztykbtg_UZgkC8&pwxbwPb`@>L?WtPn|u4>Ju(tjL`x#9+}zQTgO zs`-j57m#Z~i%9Aunyti>wp~14x28!=3N&oCrc)i&tf`-o#gJgjUr;UWh%Us==319^stX*Xo2abyQD1x!s2v^cXF2x9!G{R*~OZ@p`)?q5jVbA^n$CqEaWR`*Jv z-PCjgxN)MciJBA>IFUwJi4n*HhNv~d@oOAmrjC~uy`N6KD^r+0$rXmm)jLVNn{63> zt#18NbFDB)tGC)jV$smf$zauZjp$zLCH|nzcR{GJNf>Ol3yLAmrXf~i2;56-G!3!F z5Sy;R*bv$UwO)i`42nj08s}n+a~eZ#q4A;-ljQBzM=ClNrVFvMsy6 z>J}(Quwlyx)l+3hG{TW<9HDj#6usL_rgxdm^f_)W*QygmOB<)6?r3QdSu7DZa>o2Rar_fB?P{KDj?(K#_ACgUv}ctKSVhX~XUny)#v{RMMu3;{Z3gp>R!%$?|}O|AG~eYy1(&JC7-wY|EldrgVJ0MKtZs`yqj$CmCxD~V_Beo!xOUC3V#K--m_(bFih<_rse>9kTN~9{Z?a1y* z{N(}g_1bIy+xI=?liGMXnH*IGjjyN4%HvW?2=sI-ZsAD@WX`R^|Dc1HI~(UHQTsN< z;}tF(XdQ6^CocxsH?0x7bUQ3t9>I@U^$K19?RyjL0XT-U;3%~dkOL9Xlneq=!paH z+@eYol0l+1X*BN|w)#g&8sw(ksmC?`%Sctpz_d}m+I5i^Vs1ILNF<~YOZ>$}6d&Bp5IA7A~cP< zt{g=V1C#L?b4r5c%lyZ&rQ4`w9gYXKx(eFbDnDzip7!5WuQ3+8E1;q`Fg39PSv?cz z(P(FJoG5}Pv%9ioxPXybhOFv^&71p%RH$UMyjsljEarwfe08it#bm;Pb9wU~1<>U$jsGu2Q9{efUb{;c5h=_F})lrh#=S< ztE0k)TRm)bU`xyV_b#U^i?{0L>mPGVTisO`w;J!i;(}q0 zSwT0tZ6SAboR)9!2~?TRNlqX8;4OTAT|NU=`eOJI*5ya=8CevvjgD>$zm(KN@8S;% z|BB!RGgPKgzUDDDxV`)Rvz6~v?oDh(JRm=$cJpOBJPy*r%YEq#6!_-Xzj9XuH}kj@ z@Lv-Amo=x$_(86=a?%`l6ax;Mt`oHir2^}NixocV35MPh)eGS#&HfMHyreZR!ME-f z#S_>wXxxHmS+8gTs^@S}S~!&}@?XCy{nE{~W1$%Hg*2vWRWK5DD&Uj|TSeURBaHi3 zUrtegpksM_D)3us6dHj_X9)h4=xarNsdR?QWn2ZLbTJNlH5jnjc&dq^@mv!FeM94& zg6|u4YG72oS+-9`cppi-SfZD>duCKv;B4x6H73HzW;7B_WbLXG8IfWnS8X*9i+gs^ zD`~iMG2A%~cV5GtpPa)q?jy}{XXByV*La7>26r_f)LqTBxUF;|jJx2Z znUjdx5hp68*lAd^5yY%lj3AF;i|$@UEtdJR#mC9udA~7DMQ^(1$=>( zr7w5f8IEIk$2r{WCviN34P4xXVAhU1MZvDMWPaHx4c?r)2+NQeCYQ3SjeT6Rm2;vd zGC3l>!v9h8kbUX}sC8vp3m=Iyut4-iScCYmD;Zd&Pkwp?#3;s#&!1@Zl~ASilCzpQ zTvj*BGPjm7$b8PPFeDp=AqfPIMKF(Wm?3M2g0hZ0%Ww8rhQStgxO_fqQjc=UT* z{mMEqeW`xT!EIp*T60>K&#N(nN~c@{HW0e>hWA0XbQm#m%Dzh-yx3 zcGLqBM|As~tS(MVzCI4fazPTsQfkiVR%~a~!f(YHT^)bSU;YA-^Eit8mdO*cs^-i{ zr90IJfqfn%PU-4+DFs%G;5&zG-#*!e4%y;qeH6NN1uh-z4RJK-ru3x38>=Vf$FF@v znc8v?*^`?YiE%QQYI6IjXG_5)pX?>aNL8mhIY~-nR?UYO4H%!SgYgNKVR{Eo%W>IJ z{tmb~)ZEK$1J-}JO7UPb@8WArjx>*knp;e3uvb=SR0WApp-ZdV&y;3Ps;vcaJcM$e z>oh2}KE9VbP@4lTbZ+Ng`?brj{SamL^3NT3uJ7tO%kOQYt+iC54+gJuG~CVl!UsRZ zuTov9iJ2g3%54f=Y4*?PloRjszm}(9@3vO6q5D|{G=9IL^9t~Jkqc4GK&MqaT4#RK z$2m+~Jl-rGw=gcoQA~wNRL3dOrVAthf4H}f(}G|hwB z9}U?m#S3L$-yOlN2!@O%W&BKX3Dcp3GY_ky6}@(JcErx>Yv3<80)OH!eeoAn8rA83 zmL;VVvMKJU`sR!RxI>4dLSRasId6R*D0U5iH_=!{_DJH{tl&sTRejPn1>bM+PTjq4 zoaKyF0Vz2^r&31V1G@@{;v{Gc-2TDV*3B-4#|?BhX+IH@Vy+K0Kk`L-DJ zl=z3w2UaG3dhFm7Bb1Zp@tAxqv#3zf)tSYpwo7G$Xgvr*p@{r^uTVt9EyjuxY^jL2 zBmu+}8OP)zS4$*tjz}ROOIE!9GGL3(RxjU-iFbx+~f=Z^v@YN~^E(|ax z)o~#_67|kX;Ryt9of4i|)4?{lGT4wNyJK(vd|4=hB`%LA%SWJidX%ab5!N(-)T#}* z&aS9XKQ~!>K@}H-YHq8vR!4 z_PoGjDv<)0(0UZ&9UFuFwpo#s>8`I@4rLtrtL#x0|M60wS5`xm8GH1vw`JV5<}>20 zi9IUU?55%zav7qF5E()wgQD4E!_iUffpmd8_=yV@uqklLF3lk}^!6Zexcv_d=JtHN z@%E=2@r}a(GMZiTq`_~?&41AUHzhUSJw}^55@|*%yKZa8yfiJ2P6br#cyANY>Sm_` zLK@Hd{^{aG!qe3>4)?GO#h9w9)cj=RJ-|$$JtyZQE?`?ciNJumYtk#+8=DFNQ^v?e zGIv5auGkH^R5TJ;7)9xM$G)e9LJbSR%r$~P*z7o}ab->0}JW}9U;Mxx6&m(62JIVQIY>*wos8( zx65_&WN@)!woXIY<&$y~QEk~2%4_VnppGNe=OzTH^R&aBmOKFqF%CxmCmI$Xs- z5WK1j$wh2oNV>V|46FG&rkZPG+iC_6rR8r;68DNBjcQ%C{X*(a7R_QMUE9v3b;tr% zVI6!`1dIcTFH?otR)FmEm*XHO_{y>wL|+woN#l$5sjhFJ>{$yG_|%I=6wc|0U3 zPaw~$a8pRp4jh-IRS#$i{KY0GdxhyEr6>DY^Akw*YCq`otyRhGq6f5rkV7@Od^X66 zXXSveeFo-8hM+Cbb9%+$APbaiL6(r-%d!^JtsyjP%g7B}i$uIqKkL!Yy878rKO6M( z+H-ISC7?_R8_=eQ0ww1P!8$!0&x9i4XE&P=#!2COyQFS|`DO55-ke|7u%TM<_!Mo^%;;C<;xRCAEKOUgE z)*QbU*IVlS=z7c9(e;+*3z`|%lEBQl-jcn!>GhT%$tA#C9BfplG*C1UitHeui@PT3 zkh}b-{AJa>J<#=*ic8SYMhy)o3z%=oRdvD~Por{vAEyrpMCN+1#&C0wDBR09=THOc zstc4qDMt6?lGZo5gA$W_W(xY)s&MEk{O?5V(dLqZtXG*>oy6bZlkJJYkDI^M^U1P# zzFhFP+6kcLc!}xOS5EJuE&_`V=Q^|>nzjijS5{) zG8Feef&SWOfC1gx^A?h2urqQJE|CgSHmp2Qa5*@s?q_PwPaNMf@z@LOIWm2R@U6I@Wb3S)et>fzRtvL z+|b<8`N?m^PJrdfV<*NS6rZYnhK+JUHnI#oz1rq+#4(&izo?u@sd1aN zhS|FH$!L9290z6_4k{gbsSS17^n*dV>1s4xRZ|!wo<11lfg+i;X77#arUfQQFjV&P zW+1{&Jr3+t6FjGJ(04%$mNzbSo!1S~phwYx4yIU7cj0n2*=QKy@oSdm5`>jFCm{@rmlRzggBE zA5XV2r_^DaDkC8LAcwrG>l7iWdnV%OTU6H(^L&V9$C2<>AP^5dQ>cxTMwn>|R)v3` zC(F`6Q+zXyPKu?zT}e0OFnZT$fjj+=%!$Zc^!{;v5)bY4f63XvELT~vhj+JB!&~IS zl*R>%^F(q`aDamjw5aTQHg4S0{O(B}ssW=uPsw=2a--hlwXocLF4g`_>hg-W%AHwa z!?^NS!j*qK@pWlOI9W@+{?asGhr{s*EV8US^YIFgJmoCe>6QVd#xon#@Lci{{Ba( zHn$CyUDJAmkD|mu%|lb@sC@=7w&as#LAuRaZ^gJaRpZU_?BPWu%i{~m3aj6q-u&|_ z8nj7D=#sY%3 z^8V(3sZ7mH6^JUe;?}L5Lv@~4=l^W&!Ua;%z8poh0NAFSslSu|htIsX-~aj7&->%I z=Rf}+p7Y1|6RlU}@_u<(z1-y}sMp2KOpT?cJrqtULWwSGtf_7}9QH&nzw+q={Uz3M zS96BQ0p|={}I0v1X?P8xNWIv_q2Lj$Y1==YM$*b{y{$Yms|aR z(i3EaRi38tC3V?;*Hh6q*XZ`zjaJrZG&@{0m%r~nsSC1n@>gH=J96cIthb-{ zuZp-Zo-jfNqlmOG8TrLl>FG|%Af6bpGAQ;iIgK%|j+4gWVq|ovbQ(TxgW16HBlS_y zz#RxldmG6cB-X_NPaH35Fk0YxkOkuSQO{Utwr~~w_szHRflGoPWKnGKyqFv3W)Atw+w8Atfh|l8BHi^M(2u#}~@8(^~WRTPJ z1xBMKQp5?1&S>ECW%lon-5j@?!!7Q>hdTdPO;Bq~@@~^Q$jkD^L+xktv%mCD``Y`U z?k>6muRHj(WA8j;54(GGSoG2x-~Y%1VitNx>0;gQG8Hx6+?-)G+=W&!f6v2Y)sDIX zMLK+1^w{x(^Im73k}}1gVu~WDCVz_0X^oh?0wupe*4Md(y?Zbc91Oo{Yr}s&l!UQh95^$tAK1B0n zR*%%&{&QW??#{-6CaB7FJ@5}M&E^eJYZGrQy~`cC+$^nftW$YR=a(plAzk#*A$R(* zl8@qoNtzqJ7(>Mx>)Q7PF$ji)?QBch7@M+M;UnDq5)?LgaZirj4QVg-Kw*tiJ!Ob= zH>BL7OAB&7K)?%NwLr*VyRSSRENJ#nM^kAXOLM%kS$IgK5gCe$gE>AECwZ1l*WUM? zS!2(+#(Rd>4X)dBq4B;S;?FaDv3!gzKJPFZVIwlB2aCq*yAw8X{-Y+&>;d6}?*F{+ z#Sa_<`_#bvzT40qw(lKMGs;MGIn1Z`=FjZ!f4lIqt4R)0PtX|AY-1<`&ACA`T5mAV zb6{7QX+4f#`vXftcwHAesQw1GHw?O0=JjRwMF9@H%ipDY0;D2pqGVEff;}dPU9gYu zIyo!IF}-|Y-%vNwGp(6qG0e1f59XB`87c6p;#ty9*Y6%0^zLdjv=4#Vel&H#TM*p8 zc&p~|4w1&g4=xh;A{4gILs3ORQ)sJjWS6BRwY#C)MwzIRz7^5XrPvSq!tJH&26J{B zOHkTxEPs829=d}$K=LOciR&vOh-?4B`e^G6qIVco& zf8$}TtSl)bc&RN+X%OLptvAq6fk}j!oZ(E69!OlizrjB9O7OTa>kp53zdj;#?xrB`V zC%VHLMM;4ippbzF3gBZqvU~bxW{{o117s<71}x>9cu_Dz@=}|N`PLQ3D-SUA2B#tj|CnJ7^0TcqDA|; zktc6tacQBu)3|h2;)o4Ig32eg8eD)n7`9ml`ncV}{`pdl^6mUF2#zVGnKjCp_4e+B z8@$Dr*{ZC=9ds&a??|ch5qp4+e2p)=o%<5L{5m!cr6zVY?-`mw@rg|Y6eGei~OT<(=&<;gID$N9cb-w=qSE zxvt*98GEVAuPqfVd6btXRdw=@H{S8)=3_LFq-8W`cm4r)vM8q@0D|H3{YI$*86syA zP>;t!D|m*p`E)@mBA1*Up#jxtbftF*rpAD==>fE=`~b33*-z!~?hEC~edC{1pM?_T z3u#Agy`l~H%z_exo8H&~o;Yu#A?XJMg_A9{gl~Je6KS}W7!JM69$VjHJ32vE3<7< zb%L-_#jfYv97iyGI@VO1UuPLpL-9w^#d;d;T#R;3qrHL-#IF=49v$zEP&#J7i>{{{ zh``x1H)!kMvjnH0Y|_$AdfBmLo-$avp64)Tn9z=lwDfQ!rp>8YZm~HLUYl)BwUhzU zp<|zSD=)V;ZyesNh%?0EQe@hU+R5D|fWEW!_|oeO>h>n23#*R+#X&QZfV5oirYg&D znyokCa87N)wMQ$Qb!tY&FpadgyZz7IS}q#jv1Z#jjA@J(z{lfzmXl7ZJ4IP})`q~6+m8u_qY$|_3~`YR7} z1~s(TY`5S#*c-N`zp_fpJ@R7<{Z+ai)I0-Oquoe<^*a?VLwVf>C+Q8gp^#a7MMEmJ z*JILbPa|MH!MssL0 z9SWo&MCJyI(!)Y`y#-CO`Sf1))6%VjZ{tH8DU`u7@M-%6RK2A)46f6Vs`oU!ZSaO; z`@#7etk~U7zwN;`D|Xw-hf9jx?St*1*ljNqy8sE$Vt~PdWu>J2fu6KE_3fx1}5rl}RZQ9Dit8a|b&ZOi4*+ZCfmXHRBEl8m)~~2sC3<9BIaHkY?N!aat=DQ5NX4 zRy>oCx4=X&I*h#D2zd)YW33po##*t~n@J?hx$s3xn4_)~gyfP{u#n!~rA`HtD3sQV zX99Ta+x7#C;;S@VmD&uw$eggDDqY}d6f3?q>4+onnMKj@F|JrQEkd8#JXREav|XMC zh=V6o(?DhlBCQvOi8@2CA2UPfytdhMXJEKj(?EDQy*6%l;TuyY%>d!kq#TIPa84AZn8HjGSfYyyGE>Kd7O!!Q!# z8gyi37t847V+w#ezq8ROk}{c@za~exJkY_m zL0XpdN3&JAlcAlACN*F&Ve6s@7sL{KkHVr1d`?>@eKU45+ zd1I%LCw4VjP`P!~9hXgcLAeM^-JVzBn038dM`{0I)ifVtrdRLPLR(e8>F89(uH~&Q zQ7c5$`o>+tw$Ll$iaOv1MF$+^!-A;5k$HYK z*u_y)-B6gU`3V0m-8Mn8nx6KUZ~;iv2ofkojUe_IOENCXuMUPlMXHze=r-S)H8TL1 zQwf;k0gMX^QEUit127XZehQk&DbtKn`QU$Sx1VX%@Aj$IX)e(VbV&h)w5a2hk$FYr z-5KE<4OHr(#VgZ#IZbOVrnM%zJEv)#V_NqskGbp_6KQH<&9S~EWGKVh3~qcu30J|8 zFE*FvuEuAlyRzFrWC#r$)+>8lp~^M~=6N`0;vF)f9NeVDYMR0shS3M)YkoT@0T3$^NbCTLLIi;yyOCCu_6Kov4@Ocdb z8>et+uqq8D)}_x7sfJ(CcPp4|kZmX&~IMX*q$xRsv(mY@0(Y-8+@*|qfg5@mWD19@{x_3U(~o?%bHBn)Xb zNDIcVFzmVooNDbEolP@ZjTup-7-pdvQ9d|Mp?{q-`X({Fp5Qqb@K7ijh9Y=g0iKV1 zLm0kb44X0w1^D9Ek70VLVmRzQwj`B~O%j)|x@3jT7FL)xYEkzXeT(K5alWV!N)5-I zWPS88u}P++_BA7Xs-+2lOTrBQdT{G?aciz9YvmUu39pXunqH~1PO`=XQC~rs5Ypt- zLt-UUwV>eZs<@^09LTA`&XiAO3OhA&IV<&JJls?@He)@dSLFYy$pV?F&)C0WSZn%@ zV#NHV&ha(r$BvZFF*|9)vfU|#VnY{4KA!N9ST+`&3V=g!Hc{WQKXGb#^zg3yU;kC*;k)v$q>qm@n@V~e z6gVQv80z9eZwX})~3)?1W;H5s+pP|{eTH=h z;X70&GAEOiEF64J+#uZKGsz0d?9p2s!-|tSO|AbYx8{@+P!}8s8+N!W<+azgKgv=T zW8KYzDnF>Kx6ZNtts3jC!xo#Hg;L*e>vfHW))7*93tlo-T8Gb^ zbM*-QcJ51n7^VP^XluX(aKNN_kRGL%M@-dDe!RV)(+mQkzZ;7cV8VB13&MDoKdZcu ztnh%|uEQY8LBaY^W(qM)0ya=&BmV^RR<;a!21sws-=vizXYU0v@w5CaFa0mw9vRVh z=70PLK{@}^IBm$UiSKTKy(eiUvhIHtjM~Gf?R`7DJM)fFa9sN-5@N{Y|FDwHiNG$& zVaQ4XL}UJmGT}PoGYrjb8>~;3ANotqtu^6Qk?s_$O(P{Dm0uN2NFUQKI-J^Qsu`nu zrZ>k-o9S0~-2dY3^FL5duE%pq`~$Eu&#@1)`>#*QRbsJ zu=_ibG)ThY3;Zkn)1(%9qVs!drxgxnZCMDSlF;bm-Y!ZJzfGtzg;W=6fLR%^A zIua22#|SQJIapgTkZPuy%lHc&_);dUQ@w>!A)S5Bx3l`xpm_59`xD!QC8T932Ful{ zmYpw#&Hnpa`5X7M7_WN}%=I25*$$6wB9cBEL`wjPL@lxEk2mrY=*^&Me*bFsSpP$_ zO1ZxM0j-<4$d9zLR;1~QQ5cR*8HK69jDoGHZVN_1+BGNnnJ4MwJJ?{bG%<=S7)8eC z#3=MOF^WvN5J2^Lu?m^n*#xU7M!TCfu!^i?6^uqo)flTlY~MMy$f%87j@?p(Pd~n1wbOzn08GB+TcMS$IR5S+s&#v_{OLNLZ7JS}+R` zWM+XQC`-(uJ^wq+Air}g1(Gy-Hrowi;PFsF{+kV%^UeHE8xQjLKQ>*On=1aa3;8Cz(xr zf`s)+egQr~kv|&mpecR-tNtX7U4oVuworTiBdy}BX=tD97o_J5yhuWH$RQiR93{mg zE5;Lxh$qgY+GsnAJ`H zCT>JpJDXw9E;EC^E5no#WU#LUVH*gsc$B5!tahCWN$DNHm2x_kx-D{m!fR)SUdqdn zBG7h5%|k31p$&TT9`3*FP#o$dW3ua&ia~~hkDe4cBUoQiM9$eJIr)n0d~iV{WbhJV zSDW2Bv(OZoO7y@}d=i>tJyNWc8(3%n5B;7(s~%+Y|1=BPyqjD(sEpQvQ1Y$6rmP0Q za{#zQdN$7guI|(`5Q`CVy6$9n9j* zO~CXx3?lgqj(x5|O!{`GT!y?Ydu~9 zOr>s3;vU~+3T6z&a%!_5*sih27KPapp|_&CuVHX%K54a%(u6@b#^aV@cM zPtGhezLFIMkJz(84w5hvWT1Nq=Kv|Nq58y7zLx>|?!Pwr>2^pK7?cj)?Z1D1X_mc0 zy1!Eaisl1;r}`?5zzM&)1LgFwJ6$YN89($gGnnHyG$lea%mS&n)HD^MYZ6BpA{XDq z&JG=N*t<>4!ouo8c_vFiha`2#{@0slat_>`WW*&$hj@@s@mc z-?l7Mdmrs1figY!yCMnrPX7}#z)W*O`z#WN;&x<@%iv}Y@z?WPk!$8C#!)*bC27E? zog%|l|Icm^)Wl<&_cl{8X3n6F22nOmrQcIN!4<|mgU0xunh_%#d+w~Cs{pVe04^E; z(x=t1fAXdn`l5#hedAp;CZ=#)J)Ce4$N7sAKc{nqoLVml-kinUrlGEPHcrHAmRRR2`4PVGH0=ZqsczhO-0IY;e`!=nPh`l_YL&mJxZ~GDtM0rlA{A+T|H@T zIt6kUANYMowmWSZrSE)w+DZXAj+aPZ~RP3Pk9vrY`bmmeDF7eMZ>Hk~&IaYI*`OudBZc;?x~x#|Lk{d6 zr2xSrI&Juv6r)jJynAGcVT8N>uQ#>O;$qRMDO~_NzrR6hT&m0l!S{c89-*d# z%j&V{VAS=NU^Zb^TKwNmV-K_b88`wva}o$4l6*Mm@JExh85y0Pq12L0I?$Is1@Ya~ z-xc}Y%mjV#^0wkrp$zI=HRV;V=;50y57PtB|4I-E%&&MJPYK*R|A{r8M|mVmDP!dU z=NHDn0SIw>{o%4&4zbbM-IHS{Mf8&zo5TIZ&vGYj1?#7$V3mclIjmpQ^JB1nHN#Z) z!6~$u;G8&!4nP#_d5zu3}ZE z9J=Pc*;|lnq@79^(#dere*+u=79j3OfU&g&nC7wr(&FWA8~FC!(w*3h_HCHQqXVwO zhqH>=nR20Z!Yb~JtYYtR>0YvmC8WeIc(EF(6XuY|dy|U<<0SM1`58^~uWSQP@S3(U z&4$s0*u{sTopTtn|3o?5^d?3?zI6Z7i<~mFp2qZHA7U_s_|orRIf@gEi)nzm|LRS? zyCiIz?);xMo2{zj))b0z%u?aCDE^@lpuaYftERJUww>v}R3%LcqlaC8_|06Ox8cnKG(brl_yTX#hBdNKwxCDe>%?c z|LaTdD~3e-(5@VZLTb7lh3)xdBfc^J=ttknWo`M-^V0w9?W6DCr|+-N|NNIHzW4Lp z{1?;n|0Tol@>Y>6BkdI}jxsVu_fQ86y8lZmfURvJS+v0SIu*?K?{7hIA}M|Nmp8N2 zvOp;0%g(rEIQtK^w1u5EGnoA$Z9!Qh@ts`h#k*VLoluO9h@z#tG47!AJ7Dl7>gZ3u z{aE`}y0Nlk=6wqUQW%?7q#Z~oo>WL9PIXA(v`35=$UuetPck!E2|ZIGCxmZdvHAIK zD?w}~w$+P%=AnsaoZ*UIv$L`L0T=9+A}vUmJ9m3&q6tE=95_g*`06~oE<@}IQz;u2 z0^ixtk;fJqCqu3^=cS52ku^hyU}Gs{Bd)vD+yhB76-ZHwVaIb;q9}X;O>i`5zXwej z@3^k{k^X4~RGJ1IN${a0i?@HK)Km^9O+|biX5rChGuALXT*W4eS3``R5pIKSmUY@q zX`xUzJ+m(Te|2+`nPmTHdfKmkzVjfbB-)G8E5O+S#G=Mx!Ivc=mz#SRdU0Y~vVVbA z{5k^@gBuibUYid!sQ6+0_&VtfMGs9jzj@!bxfYMybQAaY%rFJ!`nE+W(clc8L@Zdk<||~( z&azu*VP}1J-?r9#r-OgDC9aFPb0@^FWr@hmImj-5g;a2~KK?HF#*J<)&IQuHX|ZsO z9n2u>J@y-~uk?nqOBri|z`u@mki$h0ul5shUX0la-JOt72KCE&<;g3064JY*aZkZI zpa+kBi4^d#;ZM&nEHML(b&7dOQWR9jB=%bH!}BCjp}KTxpNDmG)UyvRUT=&D)-=b5 z7zCcgUup3#Az*a_VJ{wh1ScbKiBJHOMN~*lFYpkK^DJD)ih#1=f=_Yx-~Wi_tc3rBdIqp+AI zG#T*7F_QOLZw5=0aawQb?-as!m3R38vNbySX}2SVDJ}oOE&yx~o^t?sTxB|m+hn4G zES*YAi}0kno(W2A2jJ4h3K!HyxiVZMv#9F)LI1dtc4Y5dN{pGuF~rgm9>9p@$&x%w z7d*X;5f_V?1?2$Yh-9D~Wc5tmT~IldG>VvrpfgS5Mb@YA7Qssx6#kM1eO`mIJ*}mb zLnrB#Em1fNH?OSqURRAdE1}bPT z%*kbs`JmhRX^p zlqmp73sI~oBK$1w9w6Q;i~qJ=$i5qi2qrfvtdbo_fj89xtCgRXiJl>$IOG zVLKN1-A&g+IT3@#{4aT6SMzduRFgc-$a?ryMqAeVK*|*j!8ri_UU7S}))L@nTOO!7=QUuI z=#2H1sV%_iQG5zLHyD(mVMwy~n2FP+i{VJvMjNqMIOM;|kq$hqODyPTP<){aNsDax zj6Tw3d$kIUq&ZpzTLFj+bc8X&opS^$x*(0KAWphEBFxK3D^N_<8fvRSA$KjI__9C| zQ6wB0idO~2_ykD_@dTHp>*%;0NOV`6s4b8v6$`br0>hfwK!LF_;z&rCCG5T)&QuvI<&@#=Vtzf%3o_(!Ur50@XrTAay4eJ}0cM|ok^)H9*3 z+&j?tAJcY<8UCM@8EA&p&zfP`DbHUjKd8OCekU(8LLs)Tjr*q=8C?Rk5@mt+mw4Z^ z1=Q{T^38+Zt|rJYFI~9B9o^YlZgSKXBt3%+=jGi?bAcgYhXQA_f2nLp-DV%5x zlUGuKPZ&Gwz`|DV?Pz?cd2fTXUQ$FD{t%^X8;yI(U?Wezrz#qQZPNI8uRI@70SBE8 zUG++cTlDz0V@q>_=?LbzyclJZ!WfQ%P&OPX2jv;zJ&GRzI`9PE|jjEI;T}VNW(Y>Wa44Y-}$v6tRBnO|0IY#2#e;*idRB9_<9kdU;bFeU+)jb!M@X28K1jEhroeP0)Gzk0~ z6_Ef0iVF5y@KTBzMlh~1zzJvCo0IXu!*X7##fHH)_yF@+#D2WebJn|X{@mEPo{F27(->K@eu3HO*aisISvpJWh0dOIG`k`&xwS#oL_ zZM|64-h+cK`GUkBQd%SQ_@uTYYDcTtYT;PxGwpOS@d6)5pp-yb_jM9^(gZA<>{7q~U*r{>B z(ZyABnRC6Rer!_VvAugwV`*DVT^_=o3r&br`~PB2pyCLufU33M+4y`9FD*cgKmSK) ze!H3*Q_nNZa2o|InbLZ1xP6%q+uuVnCtcmydYqHP0QgwQ+DD@TZoxxFYnCat1@C4= z5@%R0bYCl)ulAM{*P0D-y3Fs}?cHQiw5VzAF85H^hEOk_Nl3+<@(RxehPYaA2G*|J zIk@|KhTD(Px4VId%;Ad=sQF=9jrFb5zJPw%8&=}Dmp@OE+FXK=k4I=CKrF3T0Q{tk zk9LLRI?nq$TPys{PrB7K5uKV3BI@PG4e<#!YYj1-C#-K`BGTZ<4=Z$T9%5ojv!2pm z+cO}&|IFLX5)vm?a0Z4TdNcA84XtY)>x?|wOtcanC5o-}UeF^OJiM3n zOeYaRnNgqz6ryaj83hYaf`a$+ecrd~*6nT^{39&MS%kapty}fJ&-=X3`}}^Nmo?qu zg1UxbladH*f&k~=&|~CznM0lm>&;y{Db-CIdH@W1E#0E{PRewdlIqIO%pRmkGz(Ij zZuQb)Yz#dh4iTv72LiNDQPQgbh>L6`Re_)x_6Rtk?oQBMJ-%cqRE9LKeR4FtfOlGn zdrdTK>U>VakcHzn1a2bHveC1=JaC^VU=7JvVWJGOZ{YedUN}pic2gXsK)kap!}>|V zb&6+d=}G$w9^8_fBU!OW09v9ON~c=PeNNFxLki1iVbF0-(dUSO`rCPz={z@O%s8q| z7wAg5Q{LI-zRmHWgFpNH$XGw-AQ>%5#(?Om3#-bc4Lgu%(SFnFUMgH!Dy2Hl`0+e!5?R!(QIM;nJF zaF19EhwX2Umg&A09B(_)wv}+AU7&1MJ1_QO^wcu1&bDEgL;Dqcw$N`>f{5t*3@>~9 z7$-T??y{`TwlgZThtBd)hKJJb!Qrq{sYIoa|5f-HaA8ELd+ObdwP=Q7uB>op)SlBI z=WTfB>~B8E3-o4$M&hNJOM=mH_W5?VI@yi4h5NzUkKkE^HyEs@^+ZX0+$#u|7q?MO z;>T>U^1Ow39c)l$J$`q?f+R{eP*)vxpukb|3%Qpi0uy{utwI|O(X^0Xo*b~&u7LFc zOaR>?P671+j;{G=uD6Sde-+gN>`|5JM_`?3Q*bgUV86bhEDUO?i2c}l2sM}zpumNu z2HCe}85g1a*jbR8^Nz-(a}OrfABR@uMKVbu4Q9}(FX_R--fU9K$O1A!o=rSPZ#^z5 zeMqdwy8z7gg-|n=S9o#}zb)m8x2>T3g*lrZV>ChPWdciW4moAA_RydL>sx}1v=qc7 zO9>vP?0WUgmg+#P%YTCC-6yV$RuTxNpJHIf3lq)1SQpiub7~&vR0D?Rsm*;Ux*}T(T z?wHc}D%E<8*=q=|f-`d`k%OCWCFSrr3+3O}!o=Vt00rQ=$HpJ|r222*TQQUPat^8? zD_tmlzmR{CFNQtHTY`sOi*JRF8`4VSKdKTs&+UeR*M*~cR*Rw>e{x{C-x}tY0=MyV z*u+8(hz1=TDQc+B#!o)$2;s;evWW!e-zlt128*M@st+E9O3w@M^Xk)^Tq8=Hcu}~p z=0NKO`Ce76FZaI@cxt3_>xAZjW46Lw+k$P-mdXNSz5dGTq_HWyR&c_N$_dA8TY(y| za7k_+t_ws@GfZ3!yKT&c?anr|>=5}#8tLECXqh6f8v`a=6fECL7ddr9pp`cT?bn5r zABQZ;REQTbJM9syv(%eOOOXybKR1Zi1-?bUhX>XF#^4Bb-$>UcL?D_KG2LJu8tE6I zKkGQ}lkP-k!?&}C%#j-xMtMDaUGSqK23z86{z`bV#f4rjJLN0m9#-3wA2tR54==TO zrtP={cmA3?awiO_-~S$FV-3<3y76pw!*%o`Rwig?TVl?{U=GqfM_tH(*`1Wbrf+r+ zh+O3!%Vu{n%4YWfceB|&T5yuN=2SP%t$gak+3eyk%V+lpg#q2{5)>!%CvtQ9^EA2a z+*0^0o7-tL)V{gJ^R@ikGR>M>*u~={)Ac6xSY!iE=`H;@0itYH(~t6HQvgP$qx6LP zAxjX5Dz+kpt-eWB92Jdv>7@R2w0KfK$enyrKVXHwMZkViKb+lgp7Iwlspk3P`BORP zqD|QIZ1MX{{zX0oOe(A&G79)N;5~g%yzC-8eJ3>ML>T#dg1=4KP7QhZ&TAHUb6!`~ zVZALn!$+Kb4YXaAD+b}S=IlGGHsrT!q-X5!S?BCyja-bnss+K)5a|gTb1P;9Hj@*W zUdNR(VZCg*94-}6pytT;Ey1oD=}(k& z>IsAP6A(FbzL;GJ;_Cb0F`7_PjM!_J^D%H82RqZ8Jy$=_MU&S`Q^ zO;W1T@z@(gv(<=+n(tJ7kSNo@&)~3958}f_P3@inpR*`F@Fes4*jNA_uUtbOAw~kx z3WgYO98}E5#%m)SMmZtO9;Qw98c!8f3n7>$kVH!Qj{Zz@%t6L_s2E);E zwoAci`*4{O%`n)MfIB6ti&HeUr)O<~|cU zz#EyuCeW*To;w&w(Y-Y)S14;@A@|(5)m^$zX^UXqP!3Y)88uS=I+uQ!0eua}YAT;q z!#rD#*@^tF<3yvNY7O5U>yn6JovL~ozKtDfvo^`5<4x=tJavF}uwGo)4z%~yArT~0 z|MR#Fc>c(e=jEy6C@1xFsNNl!Kb^Tc`QF zp)|{1g{MLOGMM)6E#}R@JIykHnwMa_;NBXwDF@?CjEfA5SDT=Top9+@8thGf`7xfr zu0>Rs|Kyqo@j8!yYmsvGM`F@M;uHIb1gL&IcA{h zwCdQ06Vhf#GHvE6vVmY6+qy!mFL{~e2dw^qyd@wwokNhGH7xi=QqtTqHTO{j3Yh!~ zpXVb4%7UohC+zV?KH1LM(>LAcHJS_7?0Ng^?0zO^VaeodC|wX0Kmi>xL96!}>A0a@ zQ8PHM_i}E`aS}M}w5%GFn#Us*4kM=)jOY$gvP?xYR3n%->qtiT^A<;ZV<#O41ippWR zjHk#P)+f6o@)Xta6lo=# z${`+1Mvi*{ca|ow!1&n_2mT5ZDULXzQ*EP;Ivk4av+_r1m))ooEgKgLsoC=%1qMK8 z1Vvoi2^cdw@xd1QyfV(+8*=b85|b_Txi4c z7mbKg=ZP|!_XxYaFuqKFU+cq=*%5A>IIfE0ykz6l07(QSx@>8Abj6WrNJ6o}^m^XB zBQfL$E1B=nj)`>~<)D94Kub0GUp4lCuGf*|0M3Eacx@d4?? z)*3~*glEQSyGugn3dG6c^VC?ophm&s!fEDtq9vQ49_)rrDYkBKIpBjC!zT+@;L`(q z5~pyT8R&1NC1R=k#kppnpF;E7Pq?>CeM0H52H|z>G3LS{fPbg~Tpq|l3 z2n0Ou`X>R3z$`aX_U&BJ73*S7SKeXUE(kR`nX$hlax0gR0p19EYb*AHah)^1>&%7s z$a*<%H!zNDtR()(eeHI{iH<)uC|u;POB`!raI@g~{n>JsO{havtLp-oPUUIhh}s_a z!!RD@>F3mq%&sOzXY9?Ys(gY@!*shOyXIuJ^Crf$Wqg9)5AKb670t}lCN{rVkn>+clS-e{dNc@am zwsu{ljT3aBObC;sA&%Q-{6X#CKvX1)rKp1_^d+s;XAH%@3Qs990Z$$W>chn1rJ`bB~1aDdy}@$V#c2t5&f% zvIx)z*?LuykZC(?S&h4hGmP39nyN<0%V6U{+G1sPiIZ6>5@Jk3wz`RZYow-X>t0K= z?>p*xtN=1TXq{If_KN6aJyn(BXW6D-FX7O);sCvBB96`pB-w=F;NO7L9i;=VA$Dc% z{+jm#+x*d`&Aj0rlI%eFN!uY_xwi@p*nVGaK7n?lysk+sP6utE0gIH`9*6)+5S9rS zQqU>5lHQ_hTqw;y!|b4*lM{XG;-UdM+e2X^xa;O_vr|Vis55(3R7JZFAXFfgj;)F+ z-L`SHR?iLJ(jGdtJ#tWn$VZiQXpNc~9Poq5LXkuz2^;Hr1I@8RiN9y~TVqd;&;y5) zEO%R&7WJroOpplzATM}AQi#%W^fsZ_^bQ3IPdY*)(z-B9`O-sEAzIL%KmtdHJe8TCvV zjeWR=drpzqh!>O4N|e5)3+nw32BgM{d=zkDPV1w6Y(*qhzwR)m%~la>C`{^B83z)OGF|g(i2OvvHZ!#d zZo$SM7dHiVr-I}#iJcW-VkoxblSNh-it&MO_a+PCwrD>mg|YJ?yh1eFaX`Mw~~HbFG*eU zK>~SWfGH=^Vy6o&QY!&sl@k-LK>GB`yZA{u5ta*gDEESUw6qhEQgv*_;JPfBuktz* z#NWCjC$g29plnh&rh%cakH~N?&Q3;c#IQ6`v_OV(PCD8(z@jY1wliWvV^R3Qh>5~! z0fg$t&yjdJog>QJzAbi!p~bizJmj5+dPa%nU0A*+jHPo;yvUO+8XUHcGjmx{*(iV+H@Go#cj=Nfl+ZT+G<<`j)Sgkd`4)dg2U^DW$uc_lyQZfOd07(w>+o~mSTCBs4^GyRaznF zGY$wGlDe8AK2R!eyKL6&Y&}v*v_wXQO*zm>&ngfpf;{`hlPf3B+v4Iarja*z2PMcc zv!E&Wat4!?v=HeTFb1aGQUPp(lj{!bPZ(zQ3UOR%vjYAug1Uml4WeFbxGV>pYdy44AsA+M7t7@K66q^e%jov> zGhcY3S26k4^lKj}d|j2EI8i8|JepqlxchojU!6iTTjHM|XF9CdKpa4dVY(*${U@lY z`|odZU9CxyJ-O##EHT7i6DXllA|Cw zdDPe`+i$zs>jLH>004>Br0Gj+W)r=WE5(A6#ZnU+Veq@<^sOZF z5YPz^@RL14mY{ew+0z0-TUyzgl|;K$-poVBO;yQ26Xg-x3fi0wvnr0h(u+1Yb{nSB z!LW~a%g(Tn3@`6aO8+Oz6u2nISfcs?3kj^cQzJu(jD$FXAr!prbh9D0D2+#Oqk4AB z_h>*&h(=JA9B(oPu}#egBIMj^QML43QFZedUI0QohYrFN#^vGDq85C>D^{IBXtRKE zLD`L@d@qO+0|8MYfl!lBVq~>97htPMq4wo!G=N25KGP2-z(Y&%+_)|_1Aw&KeGj=b zf6OX>7)1esp}A~mj&`b4W>P-B8!pEueM3FcpGUd`#?`Qa>e_VxkFz+mOBVQ=P1KU+ zE8G6POWVI!aENd`+1sapwIH{jaT<7%7#3Q#;lh!$V@Tpl8U-eRJ2T-oq9}ZYTm~@wFRc{!-2-ush=D-R4dNUP~4OdY_9GBCS**tq+jhZ^k~QR)>10{)gzxTQ|li z>Pgd%8YHzvCu9dQM|G(IVnfYQ1G+e_Ze0x;Pf~9>kr*TaPr|lYV@~Mj1prty$9L_H ze{Xg3RG1Zyc6DSJvpwfLOqgCQ3$W(8b2p)VA(Aypj<0Mw&!o^em{)Zwff_%9C8Cu? zUsQ%$&6Gy23W+JpRH$KDB^6Aar<2gq0dNY?re-`ljS`R7xz+Ay0Ll&}NJE)?616x| z{yH>6*@5e8Z=kn#Yw!_AM&(FTXo#}k^=m`1Yx0Ce8l*A{B4gB5#$ldd+3SujG#nEV z@a>E)YCI+vaCgAE=r-T1=G=69nJEvPGz-mxHeT8ES1?%cX^H=0MS@PA@Do{eqk>Nh zsr)2j%7D=OySd?b>=3`Fe-xze{y4RpG7;EKpSnm5LcZqZfxq%Jh0p%S^jEnQN!@=)R5n52@_^w8KL_?#~K)8`BSQ>BH*Z zy8F-g`=1ZPnlxM1QysaZ7;4BWM;Cx6*Wk(SoLxAA7N}0u)*^8H0;zsa7embUktNN0 zy6jm)Inn8+EJYEPP6FW~Ih-*MD;@;rW}Qh)8O_4(c?R0%4UsHV%5oLqvbs1;!ga@T z<=|u40=`k-MP89eN8Fk8tZ+Kb|D4Zr$hbmPp2J+0k7P_2)!q&H&_V|?7ddan@0BWP zWdi9qpzzbXCI~*I5h|v>{Sj5r?&fFJ~9c(Vyu4_ou(ar7(Bn>Et?|gv0n&+WGW!m`9kXxI= zxEF4+;2z^40~f*7!a2#VZd5MwTSH4whgQ1h7#Bx>1{gce1lJwz=4KwjE1r0utYoR}53Q%{;H*QH9*(ya!C=VCfblR;Xf4cJ z^{RJQq9Nz1ql#q7>|fZDEYHflV&OtTnP9gph1W*Gw)x<(ZI^;cWkYamV96~UR;L-2 zW86sK_bL_Os+Er%XZk9;cRkHZkMZ*4@&5AV6%vb7O&hVmGRKZVwcEcZezi`FU2kVN zFbN4$iB)TSB@!vf9ln2$>=QmJwcmeHT@n%wm}P}$;SRXHBz^LN=f_p`HIBAwT&j7F zC1M6F78E2&b-?IwaDmxXCgfzM)OOtx&=PAO4aY{} zp6B#ofsM1;jGAUVUt3G?c)ASYl$|KUJtmmVx$dwvWGLruSxawHOj`mxiL*1`$+Fq@ zPp6JUaeI}NDN!!_z(Kal$!uv`(nHK_5>usp4OiTf##~K9XToQt*I~t(;~pZ_WO89D zfe^$YuM||2j?y(%PK;T;g>FxwwMpR&XB8h9Io=KAgwDtzapb^BNV~g`5-$Rwed17S zCSEiBzexu&KT5gzj#AWgwCLqcBxDV8+KDtVj^ly5K92YqJg(IYo7ZfSTd8q!b5GMG zN_{t)t$ysVvqDJI6ncXbOh6tWxm(*#0ekKLff%&s=edAN?rb)FlkEwT3XL#p(>_GW z-vo;@1ShU0ht{dwTJSB#riX+$TfTKg`$|Ygl9>XcwrEM{M6wfUJ*2j`S#fCml7Yuo4q7rZQLL>j2azrvS{NoKOK`GGL3dV%klfw2N?eQZExki}&V9h~&E8OBf3i9b9Y zNa>j12`iVh7uk*sq+=I!R9kVj%IqVU1R7U(NimUfd=vP@-UVTZCI-(cgN-`<7)$Q+ zrLZEvRAm~-K#eF8Xha%B-=$g0*}zzlBl^Y(UE<`Bzrcpa#J}9uNg^;zSJBfo;f^Yr z5bx15Dsk!?v4f2iATl3XEqs7G0n4rF~fhx4&o;MO}ijj0V!KYByK&#Gl;_zG%Tbc21^n@Q|gGH zCv~4uK5$#~wj89l@*6dy`Kver$0fR05g4LUSbrZ7EWK%<)Bune)XdJA0DAub)%UG7 z>(!EFVnA3tRhU{$tVN|YgI4~@>>fW>)gT@vA%P2ntPrr4VWL@(92ny>DyIm<1^jSY zTO1I=-4d+l*XT~wUsb3ep$TT&mo#3-km7GC@JvX3b62rGR*T0Oadu5PNsLrWR*?y+ z6wA@tn{PJ=N*nV7^Ig__tJ1QYZz7u6e3xxPRXEjAAHC^hva$rJuRLq`(C+whx&WqK zUV{dlkpRs-=tv-XOaLnw>5*kksWO^1SW+G+JscyZ>yjWS1#AA+yg$@#7NjAGrfLdS ztgQB+ zdK)=$N%WZ1r}D<&Ns*9E(?d!1X}f6=7m^R?x)Q(3S|d16XpVSQuQ`a9#-MN-`$&Id zx@mDvKWx|5slK)ZMGo4G+G@)5(Y9U3^U9)No?+g(&tx#fpJkh~hB6oWC5erECz@Xp zv5g+Ah_X4aQfNf~;_o^BE}F{u{M~fHo{`F_y2HE=*)6cRD0jk3R4_4X%msCz^55nr zj|A_4k)A|q&_26h)~kNVC#5J2Jf;AI#xNNXjicHDkpD_rX<_I?}DU%@|2&uchZBhOnNtsU+lpJITjCpl2}?HXESSP%1-L*2sdc1Gkqb z;R1PV3D~BAPy`#gqDQbNBSX+>eIy5u(1m` zF5GZ+H5#D#kN#O6jBv6MdW(Mz?+>9T>5>907p^6#>U@_z1{cmltlkxMDsHkY!pbEQK$~CRa`k<50c{ToH_2%OFRNWRN4(5e2Hk-j~Bq-V9UY0fH=Q z0xGM%;f6O$Sv#W*#^+8--x$nD1$6@~n09?YYB!3<7VR4H3QcR!7Yam>H9Ws;!vzH5 zN66+(TDlvx(aGHZ5v_i_tcq^)6V9+Vm2L&5qK#ZtJ>-JnB0acS+8Vv&e@tFOvY;t{ z?akXJ`QO!>sf2q|Qr{S?K+moxHy`g3g*i2mmS80H^c0e)?Gqo!9$q;Z5$J4|!oj#c z&=?wOwcG955Tz*fez%3N+1^H=!;30{G-+;ck1!n#6x@x0*%lafMia7@(d4Y{`d&$? zrg)N)=Y<`Am@~(}VFox82kzY(Y^H54?Pl8I(rTuITu{F23LTxEpv^e|uBWFB>>KR; zOTitcJL4guj`}{pM&7i-rO`~QT2ad*gcp*Qnbk6vlM0?6mgLSqdoA2XYCha`*KjRud471F2TXHEt9dH zvTe5T3)<;mI>`QJhLCV(jQ!+BMe48L^IK1c^ovL>)~eDZFo?QT4Qx_lWnFfjxKcv( z6bS>xyRYA#PK?6RX!qdI&|tgO9B9;Q)k?V(g_au8a10__>MrxAo3M$ink2m?uV_GW zXEdu6wpofz}ts_E(UngKyc=g#P)n&G%^W1T|uNj}%p z8A@&b^=kRoPFH(sp0!bbZFH(??ZgK&j|3@^KhGX{KaNR38X{mb7p?Rm;Z7No)B>^SzUVMiDsgm>B4w_qoajEuMk6I z3EG=o*3Dn`pkiZ1oHT9OToss3bA^u9vXfH;K8fT)^+M%bgO>nGKp|ZdKO2h@POa_c zhZ2lxY4hXBwVjo^&aUmO5Ia@RjdrBTOjej6X(TI^Dj*bjgZ_X5hA{Z zYLnTrK;6GY^VCR26&E(|W-D7vmixG?dy`hwtH8Fx(a`PMxxXvvLNPY;e~ENL`Zo2*I3<7bT>O^+9eaE%_F<g{~4 zCu8O#pHX+KY#aYEL+!zFXp41rbR3pWqPetDh1|uC`cxr#KVdfo4m56XvRs!@kS=H_ z7l1)=lXy;)-d_^L1o1_CAbSi?x&^0Kxmzy+Q?k~i_wzn;PWuV86m(vuJ0aGHk z*=D0*sk+JWbToyXAWQ7*_h7gdbZzGuEKobjP)M*=#r{CVhSJQqwpWvfBaY4;94_P9 zP_5_$a6u}bN+NCDH&Q{!K-)~Eaa;{)g)K6r*5j=(b@G((eVRia;a$iD`C*WnI?rFfqRGU9IgRtIW&sw9 z2f_(vEJgby^rBVSVn&*hqY1L(W?4l`m3_KSPcZMR1z9~kHX$!4BEqCu zs4jtxWRw|E4Dphqx}v2V(Y3uHI>n~-Ln5J%eePqQ``1i`-kH{oSy2<$Lq=OiCU(WY z0fISyaiZr+Foqh)<*9A4UaT|L*$LrAT4TqQR2xx19wK(4B#fA-P@m~uN?`34bS`pd zArnn#k{s7&&4o3d)56_N-ak>2C4?$p3!@+)&RP^y%P3-59KB!A;5>~#lLGZgRQM06z)zeGBCB2oy0N?NSjNqzu;P&n)>=9wA5cVPFz7N{I z%9fk7{K($lLe8WMzxP2q$qjU%>aiMfq!v{@`F$PvGWs1-zko68Vj`KUQ-m>JcQQF(p>{prE&y{icCZ*0hgs_ zsCUqvP*F=jNdtBt>{YdD_!29AWAKM-V2m6cn*(~D*UBVmQQvG_8YbeGx9NV~G@0{j z?j;#n-edJr!;qnGG^Jy zvxql)esl7Z{bkTj7r#@7=M3L{zjC~I7Cp=4rnBJN47t_2#7Cl?$lZKk%kYLJhK!gQ zYwJ6LbGir@bsc7k-+^YIX}S>p;mnF{iA zYh>q2y*49gdX3~1k`q9)ZYW{{PAICs#@YCe-d;xY`4~-{n_rC1W~@txA-aSGQZ8hT zthbjtBFNovBZ2qwfQKMyVyV3jt-a8C!MnFCDYllb8EDVToS89Xjl2iuY86%DcR9(a zen|&`G**z!Fg&D@d_>;kI^rhk`Gva{RyxNU;Dt}8_{0R*U;fBZ|NEv|E{ql3FM4ZZ zaAC~WNP0Pc-M#1<>3e-@&C=H=*GQFEsx_cWEJk6`nPM3Bm@i6yP`jk9Rj zXaud0MIk=G*Qt?R=>6pFTH@8@h|{n;>=b;u)8+RIE1Z3No$H#ygx#EURYyJd6lsQ} z_A2-0Fy3o80V`-kDF}XENZ7EMp*nzkDzJkQwkGf^$p>0rBT_5YM@L7H16+@c$n04k zO^ypq?Am$mZqi~ufD=pR9=`=LGQ57}I4h_eheJ;|IQG3_q~ z_9qwuRsd5(TkPkSX(g0x41RS)<$=iydxWxRy>_6EQ)=h5wQ#Dxozo-T*xHGE?fm9Q z2bSDOn~xxyY0s{GoY>ErM&BDcjQ*n|1xEkzkv>L$V#E%|NmjfvEkR}|zdy7`wZwHn zQ%nZY>`A+)d%asdLtwZV*@10x%Q8zTf5NIF&Ixv@8$tnNux zlGP;d2x$vOS(6AYM_KO0gT%Ww2B%lpi!6#p`oyDq_6i#Ls_1wxQ)^Cwb3B>O^SYSj zxfL9+j7LYadPP56^*>y(AGCPC(|?SMNK1s>rR+g{ES0f<+{cQ}Z*(6i8mpDW^dC~< zCmkP@ckVF3tkCJ?=|Mijo5pmyARiW1 zZ096Ke!PzJQ`ZYC%ut!m`&g5#@@>zI_IrBt{xRS7G;PbBadkyE(RF^s|00c1r4~?YU8GS(W;5W}uNbvDMKQL86432pUCA+yLJ& z7k+P$Aecs)IV$)Um36rU#OhuwrOV~#&;vsi>dyhL&;!u(IdUKiK4%EJ(1#$`!g-IN zc_DvT&;oR>Q@ljr)A=w$<^Ae^HxVa`ZP zWjd~C+9Bnun)Yi2Ag}P5)s&O)jnSgR)U|e9fZ7~~wmNa}31n;_kwM>YFyx{Y^-||w zaYKQ70CQT?Kgl(nTG?B9lYFkH2Ucb)@6gKP%G2xA<5&r3U|?z`K*8Bx?z|I=?RT%_ zQp*!kQO)6D8^IAjg2NiY(ZUE0+X#;2BRJMK0=>@3LmELJ;*V-9tnHUO7NW5)w+Z<^ z9Hpt;d`@XI8Zc9LLIa*H40y%{d?Fw4sg*@abFH1s2W;GgMrSo(G>0Ok*>9pWr(>GE z{!%GTaVdd2=jZ9Pz&=v|`?P^QmxFz-Z=Uoz=OeA$j;jS|&j?x+mY0iLb7OeR=OuC} zQ$ajimILVJg7gY3pjUuyonra2X8aW(8uZ0no2N2^H7Y_l|&L3&{B;X5n@tW z3SL_M!+rGl4wS>kckETnBt5iBFO6Yc48-rjZ%y6FRf}yg@}pMC(nyc^hp7(ZA3nOO zTY*;uNkxJhEIEUO1E>SrhlR2%O9JKHUNuc0Zz*9oB8dWXW);H0K68-Gs@qGK6=A&2 z!?w4Fn9-_CJ-EKgs|OWd2`tRI1+xU@a#I3}hQ)-kw;E;7OkS0KmJ&yQQ+WL6?K5RN zSKnSb%jfh0@B04EuFm?quzFE{=*(`IOoQ|hnE+6`H`rT>KeVu!!u_3_&4omhv%}7U z?h(A-exjV(v**nf%drA-}W!5rYLVNyn0X~_z(c{kiaKylU_w;a=)Pi}ze4b`9HTj%1IS=SgO};o>q-?DSk;#Q&*Twl^qSP3}WWR7Wym^)8 zhPy-lOS(W~+7QiN5KsG;`kIr^gE088A^Wm_m_v;G!&ip8En6s*dD230R>@`VvSeB^ zW?bB&-xY%yElW)tS;iuT5*`7fo{flxbycIjs{Z1uRr!a3;tb6$HYY*QH9dMQdlb?g z@U~*BXe+)T1T=n@>b}dljTXUbtNfTI2p}PL^3-nl&8tKt8<3BC8LWC!a z@36$Q&1yhB5k@{67o4;f*d~1u!FZl6V#ZA&I5riuOMj&}H+@OhuWK+G2rs_9}+4)k|( zl29Cc$x~MJ!V?ip%bp5f@G1Ev(rY#j1%Z^4%hdSPhN(UVz@uKmLmJoO4&&Sy92my` zO|eGdL&4v}A_JU~hjhgVJSaju0wIz>VSf*?Mk!Z|^f}tsx|5kXdpTn4MP2%kvdG{% zRosLOA9==|@vdR#&Ty`x;VhPJHOcGgQ^VfBA%$tcu&$zfq>bd!F(?k_zvYP%Xpz^? zg`f$%SfD)E%ieLwWzC zXEQE(B3DC)&9LVvws| zF3HDxnXO;T^>USZ_O2J&=ebL*WqUakFYRS2#{cGMpLKf86&7BE@gnkidb{(Gkh>-D zPU$MoiE_CfPddj7Rv^g+r*(UdH3}Z=?=1XAjudjq;ujFnbGSMK9lU}eqo^X8!-a;W z6Dr^Gs9K-4b{&0c{&4)nj7!ufrfi~jn&xI=THwWW#$Gte-*#;IG~*o{M9QnS$3+^< zH)Fd-3`wUeb%waYxt0xj_#AH*mG&$tSIqHdmT9-OQQ6+?)?|4q5zQaEdb+M92qe^j z&vYX;Kyo0F&MR|kdYh%~fIA+^9{kN3^o!v0xnpx|>cB-yN4^`fYjD_eS_tV(Y=XCx z+cQ?ozU?;fu|DAV+)-L}JN|^7hKbLJK14-}dFUnT`x3=Zp51A_+&m+0={;&-CEM|;s-n1vMIbi=oc zKR+AWQ0^r7yci&WQN3+(md28FI#uQ6=<#BDvIE zVU#E;TTstN?+Tl+D38|C)fRd{)9@t}0GQNFYh{H?_y9EyEDsKj5l0H;Y5^j9q=b#VXeLPB-N*N1@s_^RCjz=mX(DRYwP8A9S`-(v$Y2N&Y#*@ibEO4{fQO z>g&=1W4+X?leSa@hi{f1tXQ^B1>uWE0+lesGHFM=Fdh-7AM6Y{7RT^89u{jfA9g*} zA+fPfQ$MiyybO>yS(n1PbS2Tu`~>@!q|>`y7F=OcY=|(6Dcc<2HdiQU;5H<1b+=fr zem_Hgi&6JBSGcDbv)>t!KV=p>zp}$Xld4WF+1b5dx4_)#3GY*IO9F%2Ho@dU4mAEZ zx2-&%eX^rgsA|u@k$c8FBWk+QQDj0y%ubk__t++er7HXT`pPH5Ox>JV#Utxm1znQR zDg&2&WVIkOKH*_G`?!b-sa4TXuzG|c&J|7%&pmT zhc-rA6FYIc_*av>{6B3~rSRtN!B#1<|5;UE5RAY`FvZ!SI);8A@j-x)htb!xt(C1! zY-y=hYdWBjUsb%&9^#u7@U(NjqxwK5ni(X;qwL;i7wsflB-cle3OR+H+UO{;dub{xZn~sbOuxJI3Y)mu zDb6MHH)XT9g-iLLCw^A@5L(V}y+G&n#yd^`qY&Bh7W%>(Y zPcJfv^M2_6ywZsCC{M9-e8jdvSbLuRmT2wb&Bnyx!eMvFRSoRrxO37+S>YMVNRlh7 z4PBI4-V*5OMJxQH6h$QHw62^WeT1uiKcw#ra8g${+jg!eyN-F_nhj_&-+_E4Dh^0+ z3URifnGwy=jy5%?94@Dzf(?R0>L3|wP1~n?gSZeq&miz`XM=!>Jy2I7b|IGVq8Ytc zq#zjFSJnJg`eVV{jVTH^ke^w`!lGEC3P_TG%Rr$#JAN$moS?Vm8LZ4nqXjG4@~nF1 z@QP{s2zRt^Giq_6B%EBavGg`MUn=ZxEi0&GIJu(N!N~%`{jy$WhqUBe`L!%gzsBk+ zsBld`UbmL7@wbPc>l`b$>?F%I$Kk=6WV-P9ISAaG&8O4au|n!SRH>FCPYsda&C}_j zvPTs+X3A;JP-RDz6;Y*1mn5GupF-Qb#8DA#H`Fg^ThJA>UC`B`ZB8Fm;ve0UkLeUe z3|Sboz)tpZwv+u}rAS9+F+oR%wdER|O|h&G@i{$I*`?gTeAQ$e3-g!9>YbL$S66X* z_$+7b$&m*nlqiCa$TB;sW~L2>qx|h*ce>(_J81Wo5GR$|1QI-;yiRw3n1YtMiV%V< zwRF;2n&EGYy#;W-(-d^2Ph^mP7r0JU#EjUbRe_T=%vXjzUs3TF6H5@W-my{-=hP|{ z>ko>3A=xkTU6{R~E7WmbSIq8tUET6L%g6K(${_2^z?rXPXgyplqBUEdM`>H|9HQML z>gPz+(SX+O88z(iI;V!`D!ZDRJy;VfSa6aWPQeE#K}kKN?JF^b*nmF);c>^b<&>uj z|3~q-mJ@VODW^1g(*SUijOagCjLR2QCC+cqH&rmT#Ej*_nC6#WB4Z~I3k`It^}KBA{I z&9tV*DrdM>7R1*VqWlrFg?Z$PR-}96vP2t|-LqCq<>QfpC{??dUKC6CWnV0TN|$BS zxU>HSw@1Y(VM(|#z$h@c3Y#hQll362^kGxP!P&jzR@Bvg=%`~-11Gq!W~sTOZ=(PP zv#02Qyc3#qH1d7&Y%5jLm36H+gA0y^Q%MXr|IQi{)AirWPAh#!DXYu8*=d)krPMIg zI;AJNmtwr0p33Put9PdIKjn1&E(w-(d}1F8p2YZXaOZadSR~5Vdg?kq$Nz?ODi=hq z6C%3?7v_SM9l|Z8qZc6P(QcrJHbgCa4UFQp=oT(0jR|iyZntVfFcm=nQLIBOsV@;p zBDtb-hS0WHhhSm?4LUKl958$e3I04!YzmWZP!^A&HfH>TVvo!_4|lB-`#h$dKfKLb z>mS~gu2*d}Z@3a`w2dr@-mToN$355M*^(tLb}wiE5M8^a(DiHu=raDmhbzf~m10ByLJdZc0=GW8E&R zwZ*6Pd|D@RqKo+1wM+yOl5&4;B2@mU+RE+?{vNERP2jmpL9vU7vr>gjT*Bp7>4eQCJdxYU(B{l@Z3edII@(r zB(%JY!GHDti@|>tOull1zZ_AV+X{HO3X&k35`qWJ4mWF>YC9!OvFgHASU>b#w<&}8 zm@*Lv<{t2_8aF*OJ!{uL@%h%<>q?&#KvB;vb~k!?0J6kGFTvaVY}(Lg2$ zl5Xud1iU_tu)%9?2WNfQDIbu}1h%1(k4m$#%r*l{?RF1{Kytu|W(Cq60!d?758vUk znI)~dkdOooNjEZkO{>Q(My)HkE!8NW6051mVIRhkvV{rn(h9V4Wh!0AlE(V~LRD)TaSh)Tu8nWLo{QlcPAObxKv74HUmUKjCYpfK zIMzD`x-7OOIEvVo;BeSRR}<37hY_|bJ#52Zg>7aq!!}igs!BOlxi(N+ZGN>BRb4eo zl|pjpc7hhf$|D8WkydRr12Y+n(4zpYls_>yRiyRIE5V*(`b-GNjsWogHey zTCO@%M8!zgn#*c?IzRN}v`b_a+( z%NHl+HL$JzmSo`HYzu!x59tnVD&i+3TRT??!sD0w{^O=G!VU$CYVFf=8t>zfl*vcc z6jDG!{#te%{rB_3{${6v#(?%tsDBUt?SOIPIr)BdWEAD0dKcs06yDKGB`V8HE!o={ za2-B|g%MF+_AyeAn*Z7~)HHfT$pgqr_VA52VbP{X$l=0dzL9T;*t@xC5^js$z^6!? zMFBLBRb_Ke8Y3+9UUrszpjqvo-TqGFnv76I>|Fk>lW6XL; z!}QW8Y3=s|0waB8oWt}NRo;0b{dc~+v%SAM{hQCb>d>E$TpdhLldM${33h!)*blaS z#!M9^Kxolwb7gZq!Pc~|5=2RUm7p=vr{f%(!ZMB2cBR*~^S1PeQ+Otd)|_V5oR^N4 z>-1MmKNco+s_gQUZ#AL8ThyxP3oSCOJc4!u$ydgmiFj+#|D-mhP~CF7l~BAx`(WyN zSo`>?b3+wNA|vsaSBkI{{fMW;Bi)>9_JL@tbItMT4o4U~`UW1|5{OuwcTNXriGcCI z#h7H?w6!h(uVs1u<@Xe$V%DVELNqE4uCiTlN*x{O(x2qOMQz0Qz6;!S zF$i})hy^57H$%gvSnUuh8y7Lyiguht`#My8f%(@c z1GO7JD@8&E($}bcGGa>HoP9@)D$Zy;PttIcG^?p>CwHo{wb zn-ExAXi^2;$KT#%R2u`Ogh|bA?o`{9>SfhA5R7%Z%+xX zumks1f{n06LcOe(zQtcB3%Z49N5TF?)zz-oF_Ko=&$yn9%c=qrDwM|wqQ6$7f3z{a zxhmgPQHT?qhFd$1RL?=8X#~*vFuoU(7$mncgo}xpD*Q(&U~6R^9iSRjF<#J*tX8iM zAVsNAj!V@sUnm;w%>q4eB!n~hwwuDj&b6{V4DXcOv%@`M7x{xp&!n+Ztd!NCXi8Nu zi#cdY{9{Q1k1_f5jA9)Dm-I9G3l)7P*fGvnJ`>z!r7hrC@)|*4RXaIQiKu+sOprDH zqftKTP(dqASXEhCcT-##q3|F-s9I}P5>XZY--x@I%l&wbLMzgMsk%nvqX0SUjfv0t z(vg1Pe|_{Y+I=-Wg0I{TLN!F9@~Dc!-MC%DNP^=K?fh4knX#+wpo16h#)s?F8J50I z)o}%)s~dy+*s^HjnXy(83&mF7f_qxU7N-ksM!roB#?0CT{$iUC0BzI>tz8|SNJ=9h zzz=xN9fhKg>Ks;tmZpynKxXeA<3;9%9~*EzvAd)`9zV6pwS|VCKMViLTF73bakztj zt>IoXO0v)miX#UQf-6xbwTXD!Q{tnHZ^{OLorxheM&}bcSq4Q;sb;L^dp6X1`R|pm zE@6gurzT-$7?;9WO2fBz`D0omd<-iI+gDN)I^xeBXRW}M(YC~#Lnw_464@B5F-dC= z2neFN;@?S?HKZeQff`<)gedDWgK+%P2iAf1#EO8eBd}R>VuHHuV8Z&WiPi7a86jkp zs&I}OWxHs|%Lr*OMpy9fs^itLb=PCbjYM{PJ`(7aUNltv5xrh;CGPCv2}FHVN+Ije zwd+Ue$6OdJ(^c6mY=8l3UU3Ksi!b34(5Y2k12>)+y+7Eug)@v74O9o_fV(h!p$V49 z=rjh8CRP*R(3C@vYFMbSJt+hMl9j=rR&b}ams61WrbzTIWt=EG4o+#}7C|g=OBXpl z>D3h?f;Y1lQz#yFw`5v^)EoyM(XD$MbNxi2_3`*6*xa5v>kA06g0RX1y8ByUHrA28>z?OnIZQoI} zI;l6Gu4d=CFXZRBbB>n#=eTqw{d6Ec9|rLE$SHN^uaq$a!^Ct;;j}F+ZSz-ZlBAAt zN}IjGkI$o{#=eia9xW&C8vzKOWb#H9ew=DrSFT7c7$V!D% zvxLl_40c;Fj?#Lajan??SYV+fP4f2KPMParjkr8>|cu_PAkG4W3kAC#|{n zG@9{32v1|o(lWrt98Y-8VWsJHcVloyqNz(73|3OY<*XGsf(_VGC$$}iy#+pX5Nav@ zrkS{hqMj&kiD=5{QN?&uCV>4Yk-9c?TYYQ#J_f2?p>DSO^LbWQF1^o0>YY)aFtc|{ zaEfa>>%+R~QBLzP2}1xAL^;H2?%W6hcHfO0?BMpF{Qvc-T%Sr5 zH!|gk0()MaVuQ`vCi)bSMqv@;zH;gmdW6&|CM$_{$|in3UCQ*4m#j;@<#}}}^d2pZ zYWhODlr$mu^-`6pmT6PS&!RSkhSsM|nHqI(c`F3X52i*Tl1v+t67>^3C8|$%${xvI z>OBj-GCk`RSC}TN?w~V*7hYj9#w#VKD?!UR@Qfm{C9QQ>BdGEE)SkVW;=^_z@ytOU z!!mG>A}BdQoAEn2ZmJI^k4bhIKp@S8tYYLFxzfmUQh_BjGi8V(ToT@cTv|!ngg3qD zmh^apw>*ue70*|Fwe5U!w0w$wJ@3QNLK< z$+d`{zSGkfq&cB6=xwwK_M&O5!ho>iU8p!!u!7ylVct8%ccm_HA-T)J>pMX+Lkn?A z6=Qw7Q$1z*8}4NWF$HVst)SXEU2v_T9hECXyQwX>yg|FyRWj{RiV51`S}sf+hwp3& zXj!)s*a7$kN?pW+4f{of(IDtLKsxU7Y6nl~Bu?Pq=@#H|Dpber)b}_A@YM#{fI|yl z?35O@P9SXwXcu%kRhF&<*?KOJjRAX$yY=b}SP$hg+HK7R_L|L06=z~EEHbgnwh+vO zG&8&A%x7UdGqqt<=5hDGDDHx#y(M^KZoFt0ob%bm{GY7W1e*}2{mFbUJKR5;wi2+V4# z<7)g9&gdn3ttz3)U(8Z<`6yGwg=9I;rxl?!R(*`4h73}(du7kFSy79f>rtJWfz53# zVV(i6X3s3&eN+t$BVbPF3RY^kotfgz10>%$#;SSkGkp(+7!iij3)U`4b;WiSXqyLq zV(Vre%@^Po%5KP2BImSB%U{W`` zH#d4kddAy%P8yRj{QVSt^sgB2?!Dsq_bji-ABe^$J|>G7T`jYC(PyfrB05P0FP2W2 zo4luF@(M8-sTAEIE`Whw-70gM1*>|%;UivTsloE?#7?nZ)ObM_iu=J(#SpnjgEW5U zxpBc%^ip!E!IoQTJn?pp-n4+ms`UIHxwFn+6=>BH5IOmuhjSovW!&&yo^hi#UR1`7Qtw|N;|5s4 zpq#~EN1q6ZSde;A#*IrYA>-ziLI2mkYv}*_KQ8qDT$FExfc8sUVIh_XEb?dS#0ZJ0 zA1wF63{seS|LhQ^bYk~lGmNTL3Ut;R0~MTi0q1tJL-Qc}4L61Q!eJlp&A)KE%%S`X zXUx1W|3V}UJ!q2ujHR;~XrGdT)5G}_xP6vBfiq(1O|+M#H=Ux@-+@AhQoeN5xH;us z7ighcZ01v?u#}z5Sb;k@pCBGIFlCt-GCyDd7Ik5zk*y)s5D$*2F&vqiMQr#Dm`Tpc485dNm)iHVf=OKWESH_KctJp;UaK3J#9mD5M_lu6BUb! z_zpG({{rm-hV=m@qBv}z^YFYhN}NId^ASFyA#tA_x=2c=C6TCTFvy5U`1e#J^3Ip4 zMiwlx;aaeWG0W+SCW=n9+Yz|S1VvGB$%NA4G(%^uKY* zb27d!edbU3+H00{jITeToW|egqQ`zf5$3>c1iY&Q<~sp)9M&7-#8(u^3dGof`kKEG z=3IgeQ_)sKwf^7%Pz=oy3{gp+#65@w?hFrRinYO>}EoaM>wwAkFNZv5px| zqg~G70;dEmoIU4aw+ZbKaIHc!^iqpY@s19hQ_2b6ewm2yGt8>>(K=#kmC8n3&3^jZU*|KQafDENYze&<*P+XuUcsL?!FeuS@Na_ zCeuH&2yE{_d(K9Iy95vkTaA^dVwav}{1#dk(-Y1*GI%;V=o2L07TwP`v9QcwCX+=~ zG>G;+*lpWRn6Q`UZ+0_`u&Q)-*;8_ZD3FK?lhC2+nC;`zzSHbwetVcl#6hZd{-_zx z-V}fS(_6u}KfTqgSa`?XED_?2WPXb5SuM;%H%NyRb6bt~554}m=bpm&SKf=9s*ryO zQT{dtAzS;li!zmO*IW^#Gw(ajPigt>#0b*T!;duopq6Fe&69zb%a)8h*?1w(ea1TR z%sIgU88(#XRFo@JoU$yzO8mZ+;-jdAUPKcm65DQZb_Uc{N^jbi-t@21;Ee<*IKKIH z;RNj{%`T00bUDdvoVzZ(Lo0G)b<5jjzU)x=laKajc-LMl)N+ng;q)wjXt%n{hP88C zlp0$4fVICaZ~rc}|7zM_$3`uhRCE#t)0RLkeUntYCWiyixH_oh?W+fMLl2cjJw#G6 zu-DV*F4&IbwL>jQax;J6PE=SQ8Z6LFhOG5H&87DVnAQuapBU>7xb6`PJCgF=?P&I7 z0+tDv8x&Bm5|&h)xLhukLZIczyG(QUw>fc+VyFf<@p1`XVRD(A(2@xTjy|pWd!#Z+Fjb;io%zR$5AiGqO4{c6U++OB=uqQ1MqD;O@Ae z42R1__u(?8D}cVV$#f%E=L2AKbUuq7encD&j*5C*!`Y#YZ6KP2n4QiiPE|uVjZEI)gKCAum zh!lN3t~y=~36lKSFS)~VkjcIp17YktC%;BnGYOx~5^P9@4CGEpniLVkBhvGXDE^8_ zq~Vi!(3!3G0l&XIVpF!}v>|x{C(!Xk0P*@F5M`IJ1ps$s0Fc{wo4{2F^$onNeA7M% zBvY1xaJM{JQ0Y<-AXweR8<|12O61WnnW6lXM3Sx{YKh`wV}h|yU8YeZVDvK~$kb&U zD;YG5tN6b9i9g4|X`8|adX!eS4Tq$aZaYH@8-st&(MHC+8YY{<1O*QW$k&u&NiNYh z);hF4sOMNe4cq(NL*d){WGBCG3?AMNf3gXDNbZAuydw?VBhH4aoR_6|do=Guns^2a zBEev(<*-betB~oeDQTc#N;owaGCyj=yuM1=^NLwRyyz(CQLzY;qNYOMIEO6af&4wO z=X{4KrfxfIy!H+E=$My2UlZOaeWFC#sP@8-mMWF7#Gg`$a~>o6e*ij4z6Ci} zD3l}-f$$8KurB5p#UI7tE2fz}C^4fo8Xq4ZiXXlm@xo;fLY#~u3k_ukLCMx{{3lHs zfC?fmP99~9POoU*v!?OmAXp6sGUvmXnmxdfh6aN&RV>q^+|2!jpZy3r{pxg9mqvPw zOZ;~e%@2mk8~WVNXq?&k~X7oAsZ({V6;__;Eg21 zNz9N^-b_R3c>I$M!^ZL7Brm=4fCg6j6rr2yzYwp$krn@xoz}%C`}Xb$&fCdVoIk9x zeZx-bm9{N)Mp~;-=p+?BtcsLP6Sn2}&&L3xz?>a$QeHN*B+R*m>2<4g^jq-!O6>6m zcBNPK*z2{`mS68sWHyt^lthSdA!M&h(uhf!yvbg?yImGH%HIhql?bMRMtzq&4WDL1 z;Km?e{y;|h+r4ik#}M=P0yF@GM&WtB6`luYw7CO{$Wd?~kT@TJRY{27HJ%MyA)+eu zl0kmIhWH~4t&)Dh24IQaydY8e&;6r3@}RUOB4+=xH@W%*zd`_OlVezAZso|PV!JAe z@bRuT!_Q{rt@NOXQ8|7}u|jEy84M5c)8)cXZ3rTJ?O}dnsZ=bRvK)Uv`Y66crVI~* z%2uGSZYae>T_7b3UAc!KP^CTFuL>scP`%rXpPFE?;KQ?@1~^mUL(taUx)qS`^(M_1 zs*yf1Q2yf^I=$7-CxfW&9)uEZHr{0G$e*$$m^f zgd+>`;$Z;a7`zEK^h?m0TnkdCq+Dr)bLPAFzM|O;#o{`KYs5EJXaRFx4O4*D@lSIP z2UZH)z%ePD6EG<_j-KKV%2uejAzN;=V8m~lbD@ih!gBR0M6b0P!~LIoj!Y))S_|)m zEOx~oTGi$7D>hGvKU#T=zev0XjGvbu{MII6;1@bvXEwQs!~#reFIrHmZDnqB*2t zd^Sj*KKst1_hWVXmmfNy0L+)T$onz9I%+xRz$EO-`~AyZ^mzjCEFrcVWJ?Mqzpji~ z@XE$Bu^lYo0MBH4=p}xZ7@?tx2kn)HM2GRAApWWac<+Ntvs#D> z3%T$}zTE9k;|s@movjjKw6#P$hn0&BMpKGzi!XPi_a`!XBsP%b>$t3Ql@ruo83Q$B zHy@2^#jIzo305UHmQWG3U?lyDa7r?Yzf|G}w2$%rZe5bgAO_rx!T%orsP5wz(0$VK zSk^W&?LyS@i+`-Bv)WG~2(y4Z5|ky6uT37zVJf7`!f3pify0tWOV=pi?TQq5{REk5 zpV+EsFep~w5tkv!kXIS+KyIBJqkOM`G9cQ)0TGfu35qTmXJ8H-BW-16DR+d1<$74A z&-?jF3F3wj-|5@@%+Tq6ozWS^KZU@js-b3LA3;^C$y%jrjP;}e-{s(9qK|>}C{6{% z#@QIiXF-k0oqQVr6eh&Or@$|OS%erczZ>w;dAEjJ-syQesKU-SG&Q5zoqX+wP`|vA z5TX$ZgCs{6FzB&2fFg`iCjh|@08-5uDXMTlc_*K69Tjk=1Qo5H3~AzzHfdTHi9ITM zje8R~!Oj{eQoOh!vS?qSk$xUcSy|(qHe;cb zsLu2v-PVJv5wlU#j$JgGQ4L=v-I9$Z!v+gn0_unS}7FAT_0U>r>f6o{&@Ohx}MZ8*`398Nxqb_z8x%k5{ti97k3v(dO2?$(u1 z*BeRU;a7Mh?A&=#6O%6(J7riVDr1owgqe9AE2lUXEhRTINX!1IDZKGQ1Hw4MU*H@j zMx+C;0FjQPOXiGAC+8XwwnT)G>i`7XA|=;YS924q01<}wk5Vm0hzQl9WI=?ua-;Tw zqt23~jWV;mw2`188xFz~_T*|8-bz}Tx9B&7>LbWA#})a-ccDu3A`h`3_^4}}{D1l@ zuAT-!8%c+Pop-w8yj>J2I;&~`F0$dkC%w$UYro6shS131rP#Qnqth`NbfS_Qh=xbH z;ZEe8f0q@cx0E8u?_xNDy~q~b(s}_tkxv0N+IxHeZozuC36P7vty$2@X(9u=6!+4R z2R-y;vYi>w@(`AISlep6yuZIN1FU|{Ky~R1+%KzS$h_>xEM-M4gZ{i>&`)PYpvk_3 za>|}^h|Zt}H#5i^9&+9=Z;nd^$U$YFpr2!#0s)*&0c7ETl2+knm7GyDg?ps!aC(`> z%<<61KN9Uhbd%+_%(z#~6jwyTM7UY5CefGU-1&@1nlD}y*{o<-B4K&_yk@6$@tmdf zd$bj%MT^|aPsbc=l$)1M%Q6dNv!(^tjb9irkms2i;Bk#JHFQ{n{&`o1#!%H5SQ**{ z?MDE8JCK_irPvxiFRcxMoK1}E8hgtx4j{}HhhAoJ=(QJE91$`{Q{r}llyC%SpRITt z@@Wst3@0y>t4FZ3utn-FlWIkJ@*928(VNmwzth5)zs^O9yea&1LHbj_)VEVTOuzh_ zxK3_L&v4OmKON>~QkC{CLo;N3$eU4#FU3qZQMl1w`lI&YNYmF*byd|na8DB@YQ>)< zP$j3Wv|MC9wN*rDfy_6l+b|fpBze@%@S~kzTfSw%_U6bCHxtZamK)NUbMG*7#lF5O zo%%T!A@9o4=vtn=w^vV@r<0ISZZUbT|funK@FUH zzF3;%FaC}~ub~-1#;cv^`anC-(dq0@|hAu1887}cRCYnF<8gj?1ILtz2cQ=NttdP4KT2pT%}x z+2suP^CcQNU&3I}kKDMx4P7TI{BW#7v1P84Tt}1P_*zw}LqZPDu?mX}&%;M5@|v!| zeB`kUC4eC&(6Kh2PKKXWMZw#nOX`Hp<*Y_MyrWxUVi$Dt-IqYXdp~y&S1GqH>ODU= z3f09a#p(DJ6oz5GX5e&ph?ZGd_}2O6lqgQGnXk{L5$bVhO-#8Q8Bf3qI8&F~4yEAKZQ;41$f3QeOw+-x|}v*5ec6a*MfOoL>mNjkO?~ zT8O0}hLpHI>X{S+__qARYH++%`kAB(>EDWe+AoQI+U(=`_Vi<=M0xJz(uo81yGJMS zZx^V<(}sr2#H!*7A2y3)byaC1OxNi%YuT4!9dcJmDI8&AR^nwsk2JBxaEQypW!{ej ztzM-;(*GLVB^^^OQu5bs-69JiPOrnfsmPLf09p2S!&OWy1w3)tZsa~fHW8REBEBqX z$KM5>pgM(aT;OAz9;v7+X)XvRyF-OpufZUs><$HMvpa6~*vFz4VD$W*(=%**fuqNN z%t23L^QILh=!}IC;z?>!zy{4qab33{)`{YjCoj23H?Wgjlj2we>ot?vJi!>GPw~cy zig#D>`6NC~GRUH>NgF&7F9_QBfszrSkQ@tH0UnVnGt^=ep?>Y_K#fvbapv)G{4oZl zwC}28#ba8nGN-5#9LwE6L18A5ymuXERS?la26vF?xrhcSluXsA9FnN^8-J5Re*lS$ zn9GIqk0?Ki!m*~skd1}=ni3g^eX84Lb0&SEAhh8S{cGrq=3=QxHgG&>CXx*tn=KU| z1{{Y-+;WT>(X8{DMIW|LYeK%dgk5OjN-9i=n7UlqMt7;wi#~yTnazA!=W9}`HcBSob)VCiYR`AY6CfWP?^DRuRC56IE8qB@DdnwPDc5m1nNqG2OkPU7 z1f^X0->!_MS0X8W);}Gk9i}d!{`d4~&U`y8zWQx7pO-vA0C5;T_^t2pY0AM|GFg8g z7Sxi-G9P=kl2}yOnI^_;#SJwA%M7X7IUDslGuu$H4$do5VV}`^Qe4mb(sxWQ1bepc zZ8vo+Ch3s&P+H&7tr3mn#_#;}Q|KxLg;ZeOH&ET3MsIG{cnevd4_2PcN z$(ktTOs~2KJ}FJQXu0ltpUAp)YdlcX^mwa6%>zs!u1ldGecj z4#s6_9_d$Z6(}tUtDB_TLm@;k1B0-gW3ha@$Fw#uj*seUN4Z2N{@$0%QI^Yd-}}qO zfw^S4d{=mz(oz#w+W=X&G5DE?q#Zn)%FdWut7JucLX{r4kAoqe(`P*trzfRKv&$g; z=r^gXJrG|)0o@dyqWUY8!PDNpWWv#Uae16(#?#aG5;ckctbr3=)~xmDo|ed!!&MGP zOG)Dp0&Pza2VSTsz2!hiMxi2Cv8d__GapYdVW46({a{JWG1*c86a0VX8x2M$L5u=eQ~y7zG*fqhZ{y7J5i^pTRK z|10Y2mf&4{vr>QpjRXDI7Ibxm8m0Dvh6~Z!BDL_DD1z3flvsc?Md{|)Ig1~avKYyD z`g^|z$61k{;*!TaUYCCDPYavy?o5Bxe;pPTh7?mvKg5Q!_`ppr!1TvK`rhAgfhZqK zXr)&^NFeH&ApKdc@wf7zOqhP|w{Vi*oSx)T2-bw@p`!yi12zD; zpcGXU*gfG6E}-6liyN>jODR$^DV<-P*wgE~HINag@&}|zz~BxHe_R5#2}=aJo@Qiv zs%Wr2xl;-JeyI4J)CIWyVX$>j5@VFizVg?jQCe0HnP7wOxI1~?p_6<$R}4UrJdRId1A|AYL+bq*Qg!eK zEt~(tZLz(BHw?ARX#ul(u7EhU$t^-0eG3lor5&~E(BTMz5ymR4Y}lB17k?6J6jQ3l z{2R>NW}7*FuaL!xdxNEjKUm5dRD;`Ar&1>F^Lne}j^n75IX4hkq6tvz(a!AjrbUVE z8LHvVxwpd(zyN05%MflRmd%xmh&ZOtI+qni;p9Q!Y`J64rBZCm2vTkxzYx1sw2)1wgktnxQuf`21wkl5M?cao84hOVz~_ zuNhqKumISxC<+Icj%FZFRB-{94JaN1A6{~Nz&p!CKEub@grL1n#=GQIV;n6rp?Zaj zYktp>?HK`Z<1VZ@|M&!Rj@Q(7)>`f0nr*LjJ7O*Iwda?1m2yH0+E?OpYXA-QW!p$Z z$yt-Ph@#gyu9(0p1*U#|2lZ8q?1_Ey+KzAyLni+1$D z2&@!4tkd*3ZH3AT5!+sa{LMEme$$tRvbC?RQU;g}gkjv*ZCXB#%N;hQ*lh)7#U{-= znL3}xs#V?Fb}Rixtb%>9=nP~e+1^*_H4gzGZ11gHlzruxhnu4Zc~F__j=S5U^Sfh@ zg++zDsA4nfX^B}aQZxtD>#vtw_ITg!YWjhrh$X92D~zCB)lxN$$0~~m0VDnk ze=rKLN(SV3+S8l4$5i;~lP|ki_X_+I$k)$?&vp6Fj>7Ka2kip$+l%dJ9b9jDy?&)x zYbGhmY!lk02sVse&BXPtn1`annwWhS39^^j{IZKr7NX{ zz7p9q+9753zIzMB&*q7W2~};d5Cm?ZHXfDB;s$zZ{NYt#2_WBQ%AtLH$aF(K;{L<9 z+Ct3)I`#FvHU_J>M?UsD5Dk9TXK2i~G3!6l20I)IZKy2Ox2r{^d3U=KJ{`G@T3bod zg;xV#054N?8%}|;&qpR%o5JS7r2D4~SXu{Z!$jxul96^yMX*w~v2Nb%goD~P8S zuOKmeq|dVveiihO>0JMCQsWv3LvPCu$!vMj4*T<0J|E3uI7#aNDQ5^as~BQ~IWhs=29de-vX@mhG)8kQxr9M)aN zw6OpI!U!yb3?ekXB2t133OJyELKI>^BnEVYNt8qZK^(3EB2j_?k%)MH|8MWAQ>WE3 zCSxuz)^b;!s$Ki>?Qehkd++nyy&v(3Bkku!?zp_1N#YcKe&@Hnqptm2?%sL0;x&AU zyH8%L{ftks+y7yN=F8ma>S0^Yb?^P04~={*joYH2nPr8xbXXQ7Zh?>5j%qzcT5?`f zLxDMwa{Eg`DV2V#pLUcDJB-9v%bJj&!9(B@A-02`R90s*S)c^E36r{bNWi7#N2Xkm zYS8Nwn-QF-%X%em@C?6A&Uhl6U9=q~xHXw4)0-84j*mxptY|=6)N%NBSKfDoh0|Hs)lEjo=+;4qVgs^vCvn(M&j#mK9HcnEj4gV zl_H}VZ{Ah}YymZ-d3*~(zGyMv;L%K@rehRCihxsS7HcpU=HA0JwZx9+XT{y?>D+$+ z2PrPi{&@kNWUvKlZih%}>#QynowfNgB{2;^js3Y%D$SAy=O9r<@yLpkx zAg$FKzzm2jWR7^Vw1|RlHBc@Mw9i>hO=Ub_Y%dyV_gl45)~_KNTL#-Exe)LZ`cK4@ zmi?-fXj9@3dhNXZe-hR?UjdA}*eb}IV9hk=Eu(`zKw*`86JhXZeG3RwTB7@~W0K}M z1R0T07@)9y#I5Wbi*s9j%3ZFdCui}u5f0?l_C&w0@5Xy9MLDZZma-T>vKJ94r9kcf z1f0<)C>{#O6zK;QkWP15kS0>ey?~;xR3hM!{swEINlHIPDvtx-EIPuCI<%eM*BT1f7G3_ zWsGgqT@XwaCX5EyQe@~67BOfHjH%YF6@nYTjwh&j1Vwv9EXX<@k@_Jdz(O1Z&4X$+ z6(H&ThBnyh+RYguYL1#XZ_*Afph;6X)(zBne|s1OT?>jt4y&}d;^*n3DgTi^ zd0ubsClp=9S`k7#|13IyBHZVaNPKPr1#XjQ1%R)Jo-ki>4 zvX@F?O_>%N`ZCLeUXe_GFv|qq(YEj`LS6w-{fa0itYLbV+_E~oy&Tq^ye9yPzi{FFRi2QdJ`_r^O*NVXQ|`W`9Q?T$BxY z8UI;@hbx7VL@9s2x7Sz7D*-A_EXdzQe zBP|(Q@ce!MzmUiAU|G?PysE$Cz4<6;KC6(sq9b4$@AFJ@w8C&S* zU(3L&uWVPl?o(W{?}ex}&PP4{r8$9K-+qvC7q~y=BHQ7&t9A}g%pe%g*zwa_W#y;? zo3oFf4$UYh5(+VIu9v|1F8r`nCA%Ut>Fl2c7mV45!7&% z3q^SqFT*96s7$HwV32Ld#_nwz?59L?q5FU@lnO+wCdy4yaFf#{`-2X^LvNAwb-hM~ zO#nv)DQhEYODoR?PI;AHI$vqFhuqU%z!K64MqXPgMGD(u(39uuv%6Vh67iRbwSu$+ zoeL{T0zCVJyoa-WFa}=F;Fkbh0V&7L4CR+`h}7SQzNTHI-aSV^=}mJ ziQqt;f6_VY-)M%j9R3F8&?ilY(UGY%0a_5p9yA3uN8aF2?z|U4XptVEz0cSFjpq-A zdl2p1-~V_?)5SejjieFtsemsv3U;7;)*Z8F0nU~*-Hdf}lBVk*HTapRaBW!WzHm1# z2yOQ%E}|+yhWuCkEQH1h_wkQ;v2bf3jmUc;QBZ~E?|F+A`B`KT+>f_T&RwevF=cOogjG5w}p?t>{TkPb_w2o}Ns5 zR&idCoSdQVD)k?zcedi#sfC#^d(!2CU> zSH}1zs1&$Q)KKhxAhwAwrn3Kz;IuNnRIjy0n=BxD5WDv(djJ^;NAr{|ha!t9q5>vM zYrB!~V%wgXms(W|byJ*X40nisk&+tn`I^uHSi-AmWe<9fbzJ#*qwoAx;`N)#?AC)X zPJNE1gytMtU#6EvsB;8i7IdCKz(?!kLJ<=O0Y^Kh3X z&w@*mltd!9CVf0UBkB#k)Lfsdqd%&`dcQVcA{wr-*y5srxsN)x`K^#Ot2t)h6fz-7=jqz$ zf3s<|I;5-X2_j*2?4nv*I?C?EoWju9YZkg@^gX;*DjWG8`I-C4`)=^8z`(GqP@xj#@H9zY0Z+YLxc^Vx+gBJ#@w@@DwzA7}vwa#D)w zPkaDA=@;E+xs?9GZFl-14wpFqYy9SMjsz|yHd3r$- ztvwRm)mxM=TKjnP+E?@EP9Dtc!Jgj{FQmo4n<{f#l5%2)Z9&}x%M!15t+lbs^ZLuD zAA0>BSO}K*izgwWWAae)*^0C)F5Ix*-MKM)wjal_xHqis(96d-JJ2>)5lC59An*xA zk?{G=zgF=BPE1q;`FIw8bWox$ii=g`A$6Z#&+O4rp?gjKmlL2fdlK|WTV-#E_DGDT zND1v+WX@*4*N=?ud4tY4UnJ=oK@GD3)?p~g-H(vdIB7wy^TGa=-R^E|8J zB?1!@u|8kOJ4j|HKaY9(rLBA5qBbc2I9Q`7lxz-R#A4X^ZG9%>9Upz~#0i!x2)#8v zWr7UPdc!H`>VO9xCIy<)>7UFw%oiv|QbI8pY>7!onn*3(#H+qrOsNWKK_{*?4SKa1 zTW`@G@J{?0KqXU_s6DKkz|Vxo)16+*fc#J_`61XMpb=3GL$zrnrrHr^>B(s0wOpc? zY@+^-9~cAGP5W9N;n;XZgj1YZ$MA1xI-DBFYsi{cGpu=oo*MIgCuTr&!M?-!k*q}U zo2m}}RuMTs&2&zN61=7{Qgs@S6)A6)PyVenA#Fc!AW@5eY&p!SmQ=w#zBu7IAlS4k>w5ch|$SDfnG@v>n`#Om3(0yYKeOdz+ALPpi_2N-#;cw4>xLQ;>A7< ze(NO`O+yP@=*cshyaqJpJy9fmm{RLmKoxpv*Y@bsQ#v!jRd58Z;BpK2`1c+#P5E*9 zG=0N7*A$=>VTR&sCcATh-YHO4C7B5`Xnq+`>v)q6R~ z=*}HRYcHUi)I$AM+@z*nZ7q8QbEwO0R0)L5iF%Q({w@-#bmi7o&k)L*Zdz;#(=L)` zm(=&EnV-Kpoe}nORcKMAe{#?~yK^*v4N2-R-w{b}M&Jy!G-Qn-!3`gu` zf3z^f!ZF|M$3kNU80B^VXJoF28th2Yy=m>QB^)FbEktP3D!@ zf=J4glI;&Z0uxysY81PLmT<+dBil2`{HL=>Bm@hCbTF5Qpj01Har?J?jSFD*_XKLf zxJekx?pY^aj3)T-_qC1=w$7kMwj>K*|8cq1*WH&{+zVNZ@k4pjXBJCIXtmse=|meu z(X~=#&d4c0rPYRZ`#q^D^qQYY?N31&$+x1|o4S@O+41>}zQrF8Q`b6DjFCSXrj7`b zZ4;(gJwd32>sttL2yiEUZ&Kc-PqZ_>I7`77KaGBZzGLAuIExcaQ-thgs+1XU~v&gvlPa~ zqB7ukBvlbp1}w=KpdgMK7c*KkxM?X?qYT)CNoQJ!rIE?fyDS@Ru;K-6sEg1b)$d%L zOaxwaqHFMUa1>$)L81D+-Jgxi0B@YVmlWvGo%Xv z-uZb7E{+jb;0d!z6WquDXoJt4-Z5x-Fp zKXxkix~Z;3Ci8gI2cZwyycS<~*q=J#>q~ozMvw9$YZG6uVy_+T;$|`=Lo{a?+aW!w z7w^-n++i%JU0%Wj;0i;kP)yBqsGVmjHwxNOKpYLhAR5}-e{8+4(UKUFW)RMrLAgiy zgy+Y(WcRn#)OBF_5GaY7b2z9Z$FH%kWCa!vGMs&KTqW08Ngvj`6U1#$qH#2i5eo8Q9oL#~u>d&AV5<{20 zfj+N{_Y%!XUjmZi)?>Ze4K>-PkJ$IisRznGfPW-f{H$+QL=WSSg50Pf?dRiz`WS|u zzX$oNU#Oe^`vRReb13djnikHRrmc0`K50G5ClzAjm&mZ?dXQXHU>^wKyMVY;>9)$P zh>prC&w)XdXK0dxB)0jgsF)`w=vm}z-J3`+GGaaBn9c0X<0?WFQv$m}4he&(9FlU_ z_Oe5rjz=jm!lt|uo-jxEq)PcRh6hWc!$|zc!s6;%IkRvQ)pAbyom`zBNb*UeSj_UImVD>LG8?QDldoP*KyufRtNi6)L$U(1ZCyySIa&u_l+vLKO%)IlMblU-hu+_ z;fTmj$Z<=2*oL=30BoXe29wUDRq0ZkE<`k`;fHs^S~;fZU_CDzwKCX7clG zvStlF5)Wt`Jv0(0z(Niv0ssjNlRqDCy?jT9@pzfQS2iwce7l%e7oP ztq>VL+L(x_yO7~hD?1XaD_^J!`)|vhxAT%Ju8P*;lrC5oxgX>8<`!O(H zpgTgM+N0dN{tjnvmAFyKs9-~Wc!KXBYWumm{S)hjquFCdq#qwwr<;t# zXe$(L$ekWv?}e=)T~4W?Q|g%Hjb`?>f{={V!MCRMduHO?{ieS&B)r|Kwp9(smQni0 z>FGb5&o%|p$qCA(LAxfD$dX?B7Qxy(FJHhWZM zk6W`x`8z4zoMunZtTE@-yWV7? zIv6@jw?YT$Djn1;@1KAj;(B0$bsw~aR4Kh+7v@}uTTvLsFd3>Gfago?G7mpbXYh&e z{Jdtu_;gKQapZ5oH5yjfDmZM^R8gVB*_k#2QDV!In3IOE^arewCrbT2R)dq}dbHNX z>SjC2xT?}K(3=#;^XwdflC#b|yXc30q$Uz;JM=J}{cOwT8c;r=$FPMbMemsV6T0HI z_`Q_SQ{|XSN(S(hiYmG1kbB(zo`48?2Jp;4?+x{5$OaiU555VT$Ci3^lx^{*0Gr{= zmn~vi{jZ2kF6^r-wv*BR_Q`P5e-&L zbqC2ozdou(qeR}!lixh-OkZ}&P}&X^EqLYq6qBcRpaH-)+(AM7)?y(T+@c~7n_OAM zT7W!Z%}a0yD~6@06qrU6STRI)zg0^)5!PxkA1xQNGG4GdSjG!)i8<0=(IO|66`iPT zb-f;&Z@tnB>l5&y{RS;-%4}_NWvxn3Vex8t$*C(vivYRCY>~h&$*Bv#?VZ5KK-MOJ zYB?}aa%-;zAhy(ZL(t{lWj`QI*o*uLWm6bDrZ5nDs{nHwR(@Aj5mS~y8RYC#Nbf*e za7X_?10g%X~$&uU;(by{v+Vg)1;hS0 zSfd>W_#GA#=v1Bud!Qb+5m*!}q3;ln3NZ=pWMH+>Jebh0%YURAab?uDR6mDMrG5_x zRrK6%K!tEI&q1Raoh6ai4yazKp<)L-VqIs4gAUT6qOwvpJ6^BKW-7NLI#vSeD8qsP z3RI8tbrn=6is7|NFrBcrtUISv8Ulz+XJqsVK^%lbVyN57dAKoH-2r|Z*(kx#d&u-BA5N0=H=8) zR`H~r0|c~IjyB{f#a8fGf(LFmhk)nDhljj=_>vNxUP(6+ErMK zdasC1d%N6T&hVPxSb-m!q;-ITlF&ofq2ECcf)FAV^(i)ANiNo;D_uHrG0kjiSBbyD zZ@U#VS!HWG`+~3x;B80s77YU!s~zzRztwzvy>>D;+m__VPS!U20GfN05^WI(ig9#1 z8LdcUCCSqObywc`b^u{DiQ2`%8d1QU|A>(ehj8^AYYl-w9ZFkXnTwJ=^KuzM> z-S!*l%xJeM9HeW0e=|t_-=wCdoS$#sa>0oel+yMV&uu0LZH^$~0b0hYJ^3SRI7JQqdo(#HZN{?Knq>UM zkNlF}03p74Olp&C`(&?U(*){;9*+(7R+HtwVA>}H0F18oz4Y50A1{8r<1HQrrLgqX z0wtBi8R#Jy0!bPfLu*4Fs^N%-t#>-{jR!10jmreS4>pbN3|l;?+Q#b@ia7rqQQMg; zOb&+YVwSc@Qr?Q>#&LqSK(~w+v$Sp8VhD!Z7OI~!Q^wY4&ARO-9@8B^Pg}=DFqxqp zPwVTQ<<~Y#+sAE|)FLWkYT4y$QNpMJOexzZm5o;-Giw@&SsHtZW-YjivsMI3rUoOU zv75j-@g$B#CZ`^mq688T=g?R?D6X}RL4&<%pFs36)>5_rhqkHR&1B8V1MuO-aG@h* z5wSxU{egvRPbF8w(W%Akp*#s^;b^z`o9K$@VderEFP+sD7GVE4^uQK%H_)r80nAHrk;IzzV5O_eEV1j@aeQT<^YkT+GWFk zXI!y71k`1INDW4<kc?OpN}1jwlmkk?(-m#+^?U8gPmW1g?Akc9X8-b_I$PmrspYL>pmX> z3lxn$vJCM^3vU<%XGfDoYD7eK6sc)SZSeCF?@vOaw(Q;u@)6~aDJ^~$@Wf8HPSq^K zeGEGo^;rylK4MvAB6wmnd^#Fk7LE~kTv&MgQ}BoXA^V-y6#Vl!D(R_vs5~Vrnh+1Sq+ozy4ds>|Cw~>DA9{<$j;FU} zk6;BbGf**t+=D?D{{RS9u>h%f>dMIb<;#+m64{$4@lxU|EDqG}UD5w7{V&>JYNnE# zY+B}pXymr_Lu$=l&Db`CfHo5sf}bXjFXgf3fM^Gn$N%pAzRfK)ID)1x2HGH&m;mTQ zb~l^M1z}HM9_1_=Y~n^e>?_(}ffH(n+8>Tq=CjWKQ^o_;mX%o&K!YM4FG*r~MyW#h zFIlos^T^Nn??V$i{;yX zW;~(qtrKf+8F=NbOQ;HvvRnbfvYtdJv{^)Qh?3l5*a-Dw=3(v72BhxKSzYOhXiRn^ zIVwjZS4O|4J6v|<7-DyXhAQ_W_ap)7)vkzM=AXl~lMXPEJU)FR8%+k1_9?ApmE+P&EDo{8QO!e|Ib%MN)*-nH3SrEbBo&cjm~KBN*a~o>ZiX-g4oPv2<2Ix zZjH~;EbC+5W?37ldDdFG1n%V}uy4qmmb@NMC&)S6zj=pAM2m@jEbYKVYF>ECU{0z} zTo2pTGF{_dW0|mF63sIpHJ<=$EBnC$KdK}BQ9Y_#rtT3Kq@&^fAoq{i{R(P^E?yBG zQH|K(kJw+_1+q^W*pHtJ03rhOz+J=l-gQli3 zdI(qX6O`4Y@cJ@CTJbY%#%?>r)jI_XUet0Vaevg1517GmyD>L4@Yv(sPwKC*{!#M( zOplH=qoKH&)Sh)wx86u|PI&d;-icQE7?HN_I9*EJF?B3^2U)&ET0!fEc6UuP6OE?W z7_jFV21H#=#i2F9-jU@R`i}n4HH(08 zx4_Js?Q}@j)1#)q7Z|KkiPU-R26hkkE7wd49@hvSBB^_u!@m$9f_lUtaz?WcMD~#t zTM(Ji6-b>?Iq1Tv{@i%DXwE%EwuDY>1uR$6io z3(su|vM(D&!fT*|6;9(gsdt))dHOO{=`A=tPl@ODORS8yH)J^Kzo)d7`=3) zTTWR)F7+m!mL~P0jW3@L(+FTR`Kna>KvA|rTGcVS7Uv1Mid^@l>L0?4J3o6 zKKkgI>ovr9x*X!g{keBh_U3u9d0C}Qe2y~Du9Qj6Q3iEd00t}Z9A&zdGPQG*$tz{* z=O}YgrA*@-Wu8?j(>zC+3oB(%1I!DbrIj+BbCj7Z%S3K#e+tPCP=aLW9G_lLmKoU9 z7nhHXA;|uS2%mGV*z?Q619dEt*4jQN_rn>n4WdD&)^UfTYew2U+1~Fhl8Z0gnOpb> zg}Qx~Byu&I9K#V=Pj5*!}IaGi6gWR`@O;E5PKRdj4?8&a76X{%OOrPDsA(vVE|LMXl}wZRdl%0 zRg9Oa@^J~A=OFb0vABY*Dk!c%S8)ZpiYo}uiPxm}NrSqHb;-|b~LkGJ~9x6sQV zGyuk9a!*xOzxld4xXEbkHI!O zM+34P)0NrT)#l3VoC?oRat%myT#v9{^7p8976WZSqTLnKsCBgp+S#^13%rR?kg`5_ z$@nljA3PV9S0CM<-s-Z*@v|9~QO%+(zhTiytin69&7?7#1&$CxuSFLo$;}*ME%Qff zt|)?P+!q?ep^*ZI)R0j8sDK8FAJbJpR0jma@cbm#0*HFF4-N?+8Yquri}5RQKk+Mq zfaN|=a>*(X_rYD6BXfyUm5m>Vzto+J*kfcp_e+fBDb#lUtpxkpe62}V8el657*e{z zR$yYVV%bx(6H;~;yiA4WqVz5wS4OHF2vup)vGbNXl9n-7oh2o&Ip>Z1birc(gKl8xL1R_l-F{K`2`p zzl%c%b=^5;IU3wN{ejnk$twC7Um4Zu9M#OyEAB#V&PM5X3Gj~~0!crt5%W3)2n!(h8C zx-r+1K?}!oHA)zfS~QpRp>z#e^-+az%uc79P6;HA69+BqIPr!~;_eP>PYys! z1T;0~`#!8ZQZRQ0UCzT*pH&-HgGk_~vlgn23nZM2DrQsL#R9c(JFrT=xJ2zKi~7!* zG_?&kiyeubL_v(+InRC>5_?>3hbJ-)F&u)m-w|9^#Y{ua3v0V$(|~> zfc$h%4~f{uG$k0z2u#HGNPr)n2_U2*ws{{Swp%YEc77A^XMS`_p8=9X`d*@E`M8E` z=w?C2f~6AufpGiPOIEXY$;Jp`5!zVd1y3Th*(!kOG;?QIHC$koO7f5hNk|I?WTwC} z<*ijiH}sN87F<9%;ZFd&l`rD5p)A$`TvGF7($9wPO%aXlMPeK%0In2bl(UtBH^PG4 z(>H7!+3OjE()kEwb+?qBaUo+XFfw?`X{>?2CJuj!ves#imMxJ|IRayry;VBtdob9} zHqnxU2D};WwI5nd4dHYjC@{7;9So6ufd*H`FXMDPB}W1kIWkRysh*L<&kek!>$m#q z)?Jp|LNRp9{8d<8!(rYNk3d}Wh4l{4YyTA-063bE3KVOFDE;7Wi&hVoL0B%dW>os?qM6!_M~X-cxX*PJO*?g*QZ=h=o66cq^_JL zwGW`{_ZfSRE>-2==p@+aw&5O^8^sfPy`RmOvnZ^+Q)NpJr#7yF^HhWT)9me&VVA`I z3Te<;ttV39Mu9Y1u&ZG6<(FuP%q)l06HU<)&MBffk1M?n*utu!>O-_vjZ(4B6Nm#( zj;jV&tt2Hq0gy8O7r}2#G}X!b)ppQ`4{#XMObP%;dADQ_URT*`Q?rS}3g!54XCG@9 zo4o+Q2!F_N7 z-msVy@+Yhpk$PP_t}9pnp~UP0mOxustr)$;O`J|H6E6a5OdFqxEwQ$LuQK!h6VG!M z1Y&fIyy?xcv82@=8cXA`G1Cdapk7|r=j1KIhQa+Ry$Rm_>Zfw6TOFv(Zln%H)glB| z>R4B;Lwx#*=s!{i{4)Kf25)#bg@o^}@#Q}6UtimRyp#Qfw0|Q)PQKf02~`S_o(H!F z_JWZyX_2c`wjD|-I~V7k(x3%#!~#iblL!hmBrPdVM=krypknlLZzZ|Dy55(liU#vo zRk9r5sf{qC7G_%jkZ7|eRfBp;FYQPFQS=n*U?q`1+fFR@Oou1X*P2J$)PinH;m;7W z6zS-|A@f5*^I`%Zs?vu1K6HR0UE(o-u)KDmp6svs+S5wg0=E>Nr!;~mAZ=ziD^cJH z5)j!4;OWiVn)&6hs1Y%#}OF5B1HQ5-bm~&-ppxzP8^dMl0uL=O=A$ z{`vO@V0WQA^bwL3kGlO_DllE*?)}i+dj78;>Db1K~!k zIjJop4~%k}67{i^7l1_v*urF6Vs~hlkhyL+1JhC^9G?<$KF7U_i;&H-`906w_m})E zGG<@sKJ|kElzz7R`WMK3o4GG?$sW3}IGniU9)9w*q=b)$rZ{gclMf4^a44jO`uKmS?b z4X=K|&1goLu`or?a(jOWqA^A9i~V`MWJmU{MUm_lNVD?V;4%uC|K*KrD~Y>sJ+HcC zbkS7w|ABh4zDZcs!+T0=Y=o!PJcl-~;qIdlN5Q|p1C(d~46tL(5_q#jTw!xx6y}qrVeJoq* zj^AH-zta82JN$c6N5>Le^T9(0lmS4r-#z#Tkj{xp5_=>+C~Zt4`)`%{bQl%mi$dc2 zkLjBQ1K+^I%#$cT9Lw@~m2Xs!kg-85ac@5Amr6-iAN#zK)!))(%)OI~==o3FO?T(O ze*H6Y*!<40SF^qUloJ;J#Nq$WpGEk;^JfPBFJK94MdH7Vb24fKhaFSY~d2vLnxOZ4mdj#JB{&9Z`c`f`c;)RzWW zFf>|-6YfIy{vcJ@3Vx3J#)Cc$^QY@}>S6NgQdG3|eLL^G{qlq=*mJFl-731X8nkMM(? z->M+uWcHU*s?ZiKUV5L3=EziTpVjkVe=KNJm9Fvmv9avS)7-K~qWiCV`(tKhu~cBC zA)~j_9#S^j4zn(!y|pLKo`)}tQI%qRkgu{@hb^$yQQp=~MORnmBu32PZ^ad^M(>#f0}?Nw@9EmL-DZFPD$^l66s zL+Z&Sx^R28f*P)fj_4k`2>y`%sX(RUDyiHfb$5`i)ZHOs6A=o@EJ=QmAQhs2*(%Yb z6br$=I%44%zF2oRyqZ_x2;Sn1pl-6S@)e<7mhy(w8~fyKB1FZwWB6$%lcrDqAmJ0G zzP%kr1C4ZAypebpdDDM!8gxd9g;Wg-=62(9n7tN5jm}IN)Ht9Sx1a`T$h`)M)4+ut zX8umYSIg%6Di867MUUy7ljc_T8vabFdtc(9tkC6kJVT|*Unywq7~b$6!4n+0g=Y$D zG8>J0brTeY3z32A$?itqIeNq7Zxor9*%YPx@?^v6MDYy1j;+$n6K23)g)yMF{V z4kO|FU2%07fUNjzR-dut_Ud|?%!$hK>xW`J#x+TAW-;!(dl6sJtC?8gAF>ze*Q=CJ zMMfl@t+fR~Zlg{RK_LrlNF=6@&0@16q8;-oS{c`RL)kHkzOpBssGr+lS)#H-;oHM8 zi_PpxnCynKC(*(e6ZNR7Rggl%0v6iY55Wr3IGsI8>#w(FS#R+#SM`3rW!oydeMR&* zKM5*`A!ArTtakLLND`Qyd?*BMoMiMi73PlHM?S+QWkoEMPBQHH7>Mu2xIe^qLB*lC zS_9fd`lgN=rVh@PL}P_h0e6_SIl)d;!>exanJ_%!dRMQd;1==)neD7*W(*>;)FG== zD@cb5Sik6uA&aSbYa|G$ljFwn>U;aFoEy|8(xU&&DuD>FqIhhpF%T$%ygs@l_rXsjijRqz)QNBWA%o4p z3Z)~}1-z_$?H898u#Pwi3s@`DaOLuHf!^9*phoc3>*VIu$!&DN*TGV?pdx=N{hal= zt%AAXd;isT;UWuZr?s~=;ATNZLZ=Bz2P-Lvsl_i-kKbC{Lh0M>-~qe6%`)e6yCUK& z6?E16)WCj2b{4rIJLydo_fvFSE8Uq!os=;b0$t|AlJ*P34_Qv`2WoLcBGG;(x(R~g z_vC4{QF5IErEV~ZLC?8JdQLXKAOaP|m%@tVfFAte56Zk&_PP-W7>7$t^*XXx;F+P= zWcG8*Wgn0Shl%Wq7x0qI;(INr91~wudWjBlSO&kqCGYA(FrcY;+*bDbk-XEj?FPFD zi45#u1;cx(9_=WtreO{A=KwhfSH@TM+Csa$t!}VTcxV}QH5|V`UCnpo#O1G%(ele1 z=YDCMK~p$r=i-G3%v!pU99?nh$u^O)vpn^cb+<%rriP1uGV zA+N_eJ&MPT@-dn#O0}*BGf8;x za@Rp3*{pLRczn@D{ApzE<)aqI*XYRkR#61f?Tdg)$~Mqhzn;{)5*0u)4qKX;NjACT z2nYBR%(T3Cuz|j2ci>Dn(-H+{8^^e4l-7+Ahhd_R`7}beHg_@j`2kVzS-%Q6s2Wq` zukt01gA%=5TRxFgpzi8qg|QnLk59xvFO-j=H*^P+l>pY(G^bWeOcU7Dtvi5Vj z5LWPl445`3tV~cvA*_t&6A%6ygcal+2`i)21l1f6R+L|ymwjPn$1^OfSoQ0y?RkWi z(0mY9-v09-th_1lhB{22;?oM=!F#2cyrPX2Y327Isb+02SbTA+c$XG$Oe$HsT6~G| zAYnVCkclq$Sd#0uc0sW;VH;z^g$p{W1LN3Yv+%J#bxTZCzkwyv-0c1!-)P=PFT$6WIakeMsja;J7X%U|nlGgwV>C+A4f&rk$}JWN!u{MbZ-BP~({U6dm|Ra_ z`(1%*-K)6!2JA0>9Cb$5?c*Xl)O~4jkV9z_MCJJmngk#CSyXX7@Y7LoA+CddIyk`- zKe@OtS#=$p07cY6^1|n0vMzLge;Uj6u=^^P?1`nFA9axHZ^)EaJ;)WLL_huE@_5`i z`t09P`GpmIwq*UMILP(0DnD?L>lpRBPe<-^e}WAit$a{ukGX&P#OpHGbD!5`$o(M~ zS^mQ7arZ}`heEN0%h!Ri7ZsomlGt}MV_V@CE8aJO+;|$wwT{#`9HF5;dtEf9( zo@C^*(`GE~1Ntf~;dYGflb+6*0B!gX z_JHh|!3fZ?u>i9mDro}KRu5lekBUx0#*dJo zzSG=D4DYIQzDCDB73<<=YVo)x(b^8Jso>>?vud>Y6y8;9yk&KIJKvt62A{%uDtQ^Q zIUofMwO!r7E+Hiv6S1W-5x$qgojX@2hJA*2K|KqqW?$hOp$n3?p~!ZoNI0gtw{njl zW$3cQqswl7>UcH&?hNR%%cBb`D2SzKV{xn4fFxtgnpj=@1ygDyq91!u7u3CAI7 zefC;H!67|-_*=EOSxDL?i?TiLwMD34APhFdO@JfDYqYBpH~fsPHWVqL0Tv-wIF&F)tP28`Ttq+Eh`#yT<%6H*(oG8Qfga zK^^xXQi-$=lR9pvq(ITJtrv`6X}Pl1|in zY6DV31|-l1wDM=83ON2By+fVJ8AqF0c~5OXQW9{{PZ^MgFcldk_SzCj-dZx@jR|dc zz0%`I*-{XlP3)caqTbzGlb0gcne2>wcbcujL_j^?2>0Vrs}<`D0R7s*>2bt#RK_$Ea> zKNM~15tgHc6fL1NcStUhpmm&?jVy26!5vFgl5<~9R=5u`{Y*l`Ru6j@a`{XA!K4OR z#A{#0wIe7HTK-r?vd9C<8J}%VLJ*MsSIOS-9h*>=pwD05ZA<1MhDXA@0r$yYp+j`e z|9m-k*-<~7Amx6wlkKfl9^r>REipnZE zBeHi*XlQ6CN?2@g^>{)Mka5dbS-m+U$gJ zYh<74bFl;(6l^2xURYwA#fsgg&15RM#dvSeA`)nVz3Mxxl$9i!s~m1-wNUs~q_)Cu}`2=?+Z-Fx=tNO(-PwxWMW)z)$A6A}Z&ma`)(eLHS_ zL(NIw>>v(ySf)*G8dMQ)6wJ&~1L9?l8g)^3E6nK~IBHm7NFu_Hkv^4cz8S(-FWoU_ zRgl9Bs1?Eo5itU%f~^N2Z+5V`)wo*)O*gsQN3iIC1Z^3b)od zK`xY;;eNlW#5dxkJ;OHw4~s*(_miEvQXTj_-L@I`}V6pXpbIHG*Cp+7o@iM~kC!0v7mshUOAz-Az0v`04= zv3-!Uw?OVBt6OTc;&OTa?vvjm~+S&mA-r_J@JugVipbL8 zY57LIxa!=4yjx1RCUS?j-hO{MZ@p;B&MU!~kF|7>FX!q^kjZS(<7|L~0s9%3tD(Si zTn|>^JY$X$xAt1LL7IHd(l5S8oYa13+xX28=7u6K`74cGoDV4(jj!O&>JDoa_>Isa zjM*Tm&`wCj-NcSog)7&7X8U4L8%VTA)uKPI%D!bZ$pGXoHrvJu_E+B#@3|o7A(ACn zTVVODevAG#Z9_$P0W$TCZDYc^L^Ya>y*&qOqh5Nq#1uqp_#0b2^0p(`R#O&2+i0dZ6(o-Ef8 zaFjg)I}ZdHjDWiP!y|nRFFQiNHl}PDA2Key*&sfxX$8{JiU5WPa*1AE5lxB2(|vR$ zn#$}XDCO-wZyrk)^)m?95D3!jdlwK+rLnYnco!`6_t?SM3b>~`tsV-D+~4Eop~fk% z?7-t{w=QiKTe5~oc|AwCMajCX&!#d8m;}XeWxqMB#AWh?qO;?##8Ip}LGlU)(baj1 ze5!*Eg)Y!EVm9jvJf#4Vq!sW|=-dLVI)SvU|F56ywPBh4nUbd6U`wPvT%kCLRL~Cj zMC|B;mtv46id#B(orHq9ne;gr0h9}hlTR_ax2M&+oP;;2I{2cIdhU-oCBKj01FSHb zZ|Y&5&+D(?Qr|GGS6;xD5D!!I3NAcXGaw5MksC-q7_X{F6F9B@>C*Bs%Ao2M(bWKr z3!A(yHwn(l!)(7^#xM0w=bT-|z0i@Lmn90eQ^F6aN%0D^NISzTd#m6rH6=)zyKnUp1i3{2=#}$OK`7f02-iVg3gm{0DmgS%_5F*qwYbUox{t0{~8-dvgPzqHq3KGBl|+KLVNZ zjtPJPa&nU)@K6dng&TsJt5^^U?#sTjfICCkNDzfRaLUTqN?1WQ3u= z<{O6sWE#gfe#q*ePa#Eosshks<>asyZ}ziuD;IpyYUW!uoV|`krUq|Uf0PlMNsxB1 zW|C6P2)LChx+QhtRXX?w-DD4SOTxnF@nz9fRtJOuFF&gm5yu+8)Bl{&XlV^m$F4iV zYdp@Z7)nZbO}V;tObKp3F3%H?=T-gW32x4(A5iV?!aTK{ElrjbC0ok3&$^rL>R7@N zPjh>=-zRngRnMYx)mA>LTFZkJ`j148l;bM*N3DS7GZ;~n0HVSFNxq0C&ZmN|Bw}61 ztmf`wT2#&~3R!MFRioE%agZpsFT@~wAX~2Rh~3E?y$}Z;g(JXf zmUrC$y5M?JgxK;j0HGy>H1j(5Lg)GUrA>SHXe#hgS=k6#S=Qwz6 zJwi!KtZtZgLSKPtkE@hSq&4?=QMO#d_;2Of#I%~*${nHYLVsH9VSb}9EYT~GziyO5 z-&X5=iTX(Q*Hc@@n5p03A3HBZH00r40GxIuE zf>&~`G{X*fZZq*rJ6HbP#R%R1jEmp*|9=<0|Lad!^nS-rQS^T2&x7b)2CKgN&(Nti z-P?4Ta^Lu}4_39nxRvgoPWrh0kX_&(XDKx5zNX6s?lWAne=POuvDbRSW9Gt>O z?|q+r^j2M#xbK9I-k*BWzlc-t22YclVMxl>Kf z#MyU|A&sr+ha1QwUC8h|xrdiC34XUk^dPlMy!O5Y60(uuy^0+n`+(Hg60#53X85fz zO*(T+8k$1N#^zc|*${FDWrEIGQnus~_(%!cTsSf;0eQr4hpnnT$swC=-Yb z&USlNgI(g|2{l#!*PrEAaVJb;n1+ zkXP_47YOgv3How&C)V+qI7V}p=y&2})(K2Fz86G$B2>$kaEtH720o(~w{f8txB9jj z)6M>l>Ta@L95t-Oe+=gq^&?gt3h@djVqH^u$iz%_93Ug3yYCbl?V?jXzO*9h3zpyY z*`Ue5DYGzqJ@e|K>n5*^5A_3W zkExAgR?}=#*5c5{aXmj?JeRvTC}Z`E%1VqDyO+J)u5j~X|5L<&|HtIz2{orOl(2Rm zR}DJz-W1qk z(3SUOh_)zSWLFe77Qd@E*#5mK%AVRMW*&z)@K%x-b+6_8tn4vA3#igorum1V*VScr{)HTd3o{XJC8yEk41l`uQ`?Ntl65RM}U zQ4?~xO~`8+6R68narTU(&e07Yi)as^?#PHCbcPm&ZAJwrtmVsN3Y-gxZEW$a_aFFBETbvGh6RU{J1%Grlp$ z&09#27T={Ud06C}eZHHzs2itiz^y3ZO`Z}uwpMx$L&hGdq~ors0<< zIwRNy!X^Aezbm(R$aL4Uhh$M_i}vv3sh|nI5tx>sp6k`K_e`={iW1U}FH~8bLwwE3;8Q&JLO?QvM|N*P}~gdCu>DC&8&2lVu=)|Ad?53 zM7f6wi-T49MFM-Ntm<~ms%oSs+Pz@Qvd@gkRgp#wXHob}F@HYSx>O!i#8yyMCh6zv zM!Y^mhADO#O>T<;DqD9;(<8~q=m-0XHf1>z8>sXf0@=U|T@hVZBhwh3xB`xLq7w z3v&4p>qAVWJ!sZ#+;iDLaW5h1Lv~4Kt)a8u`IzHA3|zQa2weK~c4UK=1sJGF{{h4+ zd~j!fnA>9NeGqZX%CffW?m(gsxP-Zor%$-wTxAu=1_2FMN%sYGWAkX%fYB+ zxg*TE+LrSa*u#)=E+fjG8R6Z>oI#ETTs;`ZE|%yrURZu;?XfM{VR1)U&*D*FtCXCn z^wP>q_+OMmRsbM)ZN01O%q_rx1F$BOm0-zJwRAcRmYxi>MoS_sbRps$!M1rieTO_r zUaN&_E2NtkEwqEle9H(&p5E9IE#L;tY3WSo2|ij>r0+Nj_%^BhMHJ00N)JR~>88&V zFXgUIC%2FFS!|yf<_w!P=txrJJS{}aLSm9^!&)}GzL#wWjsHl1@}G~hcgt4_8W3<) zVj8cYTSylR^Cm}_0kQSiXA1d$RUfNt2SqGio=mMK>|7FB5A;*RiHt&`QG6frB>+N1 zJmV_7Ye8Jv%&(H&uW*mQQFkk%HMU#I4j5DsB$4Kk%;VREoEmhIFOlEWSsd#ns<8nx zfhZ&Tb=*n~S{Ys!VOcR)V4o7PLRP;GV%3-4u4HnkL4N?d?ld}W;qME8n7u4U zB(|OocitJps8K3KGme#@M!B90;JwCrZFw~XUdyAoHmy5M?C9C0T?gs%#(MTt*@)%! zCx$`zNF_se@Ih0)OW>d{jG#D8b;1&{O9WP0-VC-_6@u*QU{29nvJ5)jo78EM#02J%iU;{~i{Q}`ni?ePbx-%II2BBChr&K~yP zLHvRLGpbaiAz`AHCzq~H*6{5bb_gys8w-DIUJ+2jd`=~6WY>L}pW7QtR|=sHo)pZ! zPgb)cLalJ_j4f0LMrcLf!Uanm8VXTUpTuTv#un59MqquSn<8P$=DrH5CCcJ^YTlW4 zyXG<}mjW`rO0QQ$H(EL@R>+)v3pY9xe`-kK3q*uD%(d8odRxV1>7g*NRz3)Cee;2#gbIFb_?VLyo zPJp;t&*_zMwZ@;OKGiN<)^M+mQ3@1oL8w!oYBz;#LqJg?WuIzScRtl_3en2$BaT2v z$cdx9yjMxIn<`_W!gs~eLi3ENvqZbJw5*2@1`}vSlxP=2x=Xa?1{3Y3KG80gc=;ow z+9X!d7qa2{)Y3l5ZjoC0QtKLB(io;-!nmx0(@H!?0mTg|c8%dm`fPj(a--gzN{fRQl>KsQd-_gw4s!Xby{yFz*Et1-E@pq9D!-0eYD3THZ<{3?;iZN|7k?zi;j27rF0-p8+yM?%>0y zLdM(&xRkL9x%>WyD_=dwo%pJU=SD9_;@4kN7b0|_BSA;f3hTg#4I4bshf$CjM)3e( zRj`IwQ6_MHrz18{0%(`VK!O)(_Q7$Tah4i}l5Wbnz1SW3V1?>F*KOP8e+C(S%b)Jd z+;;wCA1dd3p?inYSWmduDvdSrd>C=pegBU%i|)_3==tAh>#ivU*Aa{dbtavEE25j& zOI{?ODKRT-v8!-702M@H*!nqwN54crVn|~T;-yo87#kv-{qnDJ*fqUEis2RVm+j*E zYB|o>63tlLmdGAfTTz~9ZuOXUj{3U2;`w z_l4cJ&f|euyu3s%I`;CC@KPE*e~c8fNGC7%zl z5%g4F=R^Rc*rEm^09d^rAU%~-E#Y*71f1DNFLC?uq1y>sb zWy&L9d>u6iDB_*%S%y?5IIh#rvO72WV#P~*{VFeSqGg1Da8!Y`AvNQ+(T>B>OG&<_ zm!b=m7y}&nT|mpR3Gm;mxHmz*L}c?o|h z5iRH=k-4NO%)St8pkRauab?gwKf|H&o)m!f8zwiPbBEo%x>rLtVg0>LtynLw^UWK2 z7*t@iZ}D%7F0C8r(vALu;^oZ)FK-g6t?8;2>$XrOHB{lbP$kuzD%+oCnZ$hZGGv+P zv*EcY!LrTDWs9DRiLZ4&*h+@II4fjm8du6flGxi|*Q%pns(~75$VJJcv^&z#=8t`R z)r*;Xc2N9zWu+k37(%*S^AvAtH!?k%*q-8Mi0IbBWm*Nq9N+~wj#83G=FZo~Ps*=! zbbF#eQW;W=gTgHN-%{GZKWj1JwOke=uA@yO>=0`on%lg61K!+dICbCQp7dwhX&N|r z%g}+8fqds>cxnd6rpoqm9-yRwIAZlQ`BOFIoPj7|Q#C8^Wo!dcZd748g&7AClsGC5 zxx2_t#+jP5Lu>`DKvnXgX`ZQ0^3LW%DRpdxxDPpD*6Lq(zkUB-5vAo zONkrW#q>Zpy7dHvJzoe0qrf26c(T2dAc_Vvf{jr_J}^=V8GarH;rY@Z4lx(oQm`skapnOjF^7wF7HpE9wS5FwVqBH;#z zsY)wp`!$^i1V`NPszd2O27b5!^* zCl54*`7jt4=?x1J4VBloaQNsnYIVZHn6xYiy#GG7N@~R6G zq3{VlUJ+df`&&4~<6<)1_3jf89UnpAUFAs+ZtT^+#NJDa7?-p7l=-Qp)ScyCQRQ2- zjOHcs)lFU-NPqiAh|nf4wnkL?M{NKFQv=y&6VP74b}+C3D5=WRpq8>GK`d|gJWe4L zi9WGsvPWbu1WirXQWHfq1|jPOUd^7U9Q%h3m#i6$PJ}Z&g>VX4K%a!GkIO7&qQFv&dZ}CYH~i8@nbdMgWm~j$mUHi6q+&GER#m&d>|C zJ^ILVDuXgeHwkEl2mwf@S45Fpg+qTwUNwM>wOuSS_NB$ZAE^L)fz0S~+vXwpYh@kF zH5bAp)K=QRs0|rZyb-ZOh^hz^3~q@3QNc1l#xaMDuT57C4Du}ZBPR23~=TdLn~_<-yJ6cE;U`=n_-XicGV+2ETAK1!B{{ z1xLnyITSE2(6CjSnikweyE?7d-ZZDLVAt(zP>zB5r@&LHfCK@SxTC8|n@!@4n1vx` zK_vJUa0aR^QEiD$sNd1Tgu=(rDPIj3EYeE{r@+NX;o6ZH9tN!n-3noV|3Hae;yW0F zy%0Jw_pnB>oeKf61rL~deE_em3kajgBo-5!<6!T-G@B1~ejP=Ox56##r8)|>u)`&0 zhfC;(D4}Om!96j*9INNgaMvE-r`km3)b{m(hx}kxr#rOIrzVriW?$=!gDvzp$sUI2 za*pNt&LgGeLxjQ%>JCHjGGUq9ItKDd_==XN$47ev@N1hz(ShmFZ5tCQLBfaoa0@Az z({1``ovnF#R0B!iN>=s7L<%oYb5#W*!;`otA4mCvvaMf4k^wVl#z$YxoErF;olkZ$ zx8-m7x;^J>X)PD3bLaYcim%c7G~C_zc!x9W< zdVfbcY$BYzz~?@Vu!T5~Zk+7A{~4S`&1W%1%&1M$Zoz513a6(r(UoaBpznX|NiL`9 z_s{zL=X09=GVob>k<*1$KHS6S;rJ*^3t(&2GpVTi;@WQiyC2B1Z5MXFloY2UtU*mEW=o{S zls}%d41;T)v~UIhuca=0XeSQCZK&t;i%f7PES@yzpmi;n@%&wbY`0FacqA>HO3&8N=Hk%y0&=^FF>M{5YZOjLH#C1U}<4!&Fq7d zN=#UXcd*f?_II8d2>W9-u~4-;7w}1Ib;-`562wh>0$9}3PR)KMx`AlXWejZ@$q!vD zCid+2m+<})y|-hKd9Phd4h=j?ZH#(!6_2jcqZ*S6(}>-l! zt&DG~I?Je03wRb$BWPE{1W*R)RN!qWlwgLd&i%%Jz>eK$Gf?o+YW7Y7W$k#gHeu=* zp=O1wNo@(rZMA|60qN^!YS)XimrqRBbgr3%ct9pz&;nw5u*~Q^_FHdt*p37f$e=1D zX15`~s!-;4tjumJ6BlLJvaPq*Xb%U@IIv|OK=!b6H)8h!xaDT{htkRyVKH|2gzu0K zo`D<6K5TXGtN1-wU;ubJmB@!ySu(W52+4s3Ww}|d$m}D85O_a-Um?RSLYa>S@EvW1uD7bClgD?I_;BW-8GR#s|ObKVWw?x~Tn0x&@#@ zEAIPjis%^RJ0qfr8TWMy%wd%ugFD!i`={?=jC|{boj*yuVgyp_`(XPQl4n6`xfQ7$ zps-T#$-^89o2;435i1q}^64FnfuY>MaR*JO_+)Cm``UdEn2EYyz&0hIAGzaVOt}W0 zR7`>~rDa{rh%u$fAX6$81Ya68DoW&Xn}cnY|Dz_4{au9w+d^Mq4&V5@i+-=7pMdca zE+(WF!%lPhBmPuOfQ>+u9D!Z|A52;zCe`G|X7)g70oo!JK)WtdS~DFcC3~sg1VFDY;*L*5 z+3>EFm`^^=GF%zY#+Ge|b(BlOx>gHco>`0+~>J56Y)(&!M(q43^&qY;gbHMeY!;B{nttpJuZH4i<%vpu&KmW=`lR zIH60_m79FaOU^^?aWvLA!`lAqk1x}f_&O?B5gmZ2ndwKm0X}^g^R-tFDDJ0?%*=L5 zW>qiuN--c&amdys$^@urwl@t>e;OkX-KxFDytG+Z($_qynTBCGtciu!=%6m|jTjqp z>Y{trK`<;?k&RP>$w&5N_byQHA2E__NGb+TU1JP~;_UY)Jnq^U-bGdgt8x!#YbR{| zBi@TE8gEAa%s)Pwcfwo|Od6|yKw4Y9)FLn^vR0UyK@w>b4IsRgs!1x#iTAdgajU7gCi$WRIju85Y3qT6KFeW*tx zS)<>?7lB5~h@-7ww$>ZoBO@X$F&8XKeHIeYutwwQ4(k1GoDZ-4rLB9q9XM{*@%l?5 z=JZ8*qC=>t7CPykj5c1&C3?vw>Q{;<_8UN>n^fWpf|xu}OpoR97WFDyr`nLN5-{g# z$x~H`mI9&tS5kWnD{oiLsso84Rn1q%n}d-Zc4tc`sZyRfT-j!X@m2^cq zAfwFL7AfZFfOkQz%dJUh221)%&p(zdsfCasU#4y$t3#9{abxSN*n9NfXztr}EvmHr4Hf^_$8wUkm-A zqU~Q!q$8t`hTmM(!`Coy`%=pebOETAPf4KyRLr6Rk%{8fDl=Tl-bk&~VHJ_NrbJIs zB1wM|5$lGcM!ERdFp6zc?le-4vL$e3_Pl2t8a5D%U+R*;Vn|2{sYY~(v-h$0dC~#s z&1KQDJ4@$|tl~%2(&T7RXw85?33d@!!1>x&^XE>3saAsxmDWtcp>Gl) zl~ZaBR+G>_;nq99&=(Y1n=XtgkXaKj}<@6tY4=Quc3zqid1Tr?6Z)4?Mw!Gu-?^}@Qya_ zhOHNR(y78#T&C}b_eH{e>T@vBz)r~=H>XsH;3A9)*b+2N< zqO5<$jkKY)=x4^SpA??Bp_lr6VzDP$YOxcVo4=*(PZxCA;qx{#(c_S`*KmjgI3gb$ zG`ggqq>I`tukkzS|4ZHbK-*Q-_r7bb{rBvB{;UK@0ta-RUFr?JQ1I3Qjep2GuL=}< z4VKH`_VvC;-|ZN6_`Krz1Y&R@q-Le?JIHPBtCO8fAe0 zsVBn11hhY9A{3fG5iMylU)e-XRbIpR`IQtid3me}X(lg^wbT}v$tg~NJ_zuI;RJEB zjFTa70-Q6J!jSChoXxc^>oyz1Ma|HHkkCVm6@zH{&|(ieCRF-^8{f&bDy-mC$Rxy& zO+TUY(A>GyEPbEcX+-kAG|YWP^ee2I!I#G@87&hc_n+LFwwD7m0^z$S8|13fa--mx zXh4yXj045mK$mDw?u$)k-|)dQFA>fJ;7Yz&?DyXX$xRY&-0_O`i=zuHW}Iulv{>=v zbmm#4eb~MPPuP~0*Ro5&P$A9GZR!LUq(XJ*`!YdB9YVa2pawH5yI}j-#2q~3v)kKR zBwlbuC@7LiB_>{x?TTln{WMMl11O6n;NYh838x_I)CCtOb!zt|B1oc%_!qVKCibPB zOMjZG_*o<^?I4oJBsUu{^?*G}0^0d}@I0gHg{b4wy~eO!0a??{%Q%zpaS29r&lod; z%dNy!5@^MYY&@$vHN4<76w)bs{#3(*&VR)THDT2|?;8~x=#^=d%6Ru2KOMX?&7sWA zM_`hG?I)#8$+S^!mnUo>Llh{0kMMO>p@XzDsc9S8MrcA+bl@~m!836XZQnSh(i214@VF6}&GEUPvBe932=-vBi3E9y!;vBcTB1q$M|yvtOa|Bq(~cd6!o z&PTfCy!Lc8TtqHMS0i_~@wmTk#?6W0S}m?76+;xt3%Q@^>?lkLH_~3axvp{sY zpAmF^=bWCC{Exq+Y%07)X6aN*Bao84H0HPX5rhGXJuOf?4ESxm0VfjSZOB^jj$bUZ zAunUZnE^?Q9+J-|@9T#Q+$7_`1IRyTl8$ApbS|t{mI0{^EGU+%)lh$4zC&<67Izu2GlxxMCPU)@1 zkqvu9l}Cj=-32pA9}+7ky)9N-rzO)lk}86t{_%z*!YKvmRm=8o-UDLonHGzvoECGP zcW6X*Ye2o}-7Y4Oy;Evtu@kYddivMw58)9aheA@TvMj$@75aQY=n6$4VT8T{LcfmD zLI0)m5)_c6pU^fUpA_S@?#9#h&EdqZjV=Zlip(2lKjK^X*zx{veBr}?bNq)dzO0h* zwDgmhz4A{5{sEHhfB)Vzzg+tP{YL5A@jOo7hJws(t=rlk<-VIjsnb%Q01JUvteZc8 zPfWh$30)arVbiVjcacG$VZypv@JPS&@RdwpqpCC}99QgZ>fiDL3c^pH9>}O(U5V`* zPOj`Ac8KxvwU@Bbq)3tGBy9Wv$`ZEHhA@rar3(gtJMy6Kg0JVg?lFy+s=QM*;aaw7 zT=-a9+`<)vYner0)f|v}vUQ~*VhXagRiPzoB*@lPjSR(+$=3WJy}yJkY@U*>m3!F9 z)~;zAg_qu8hC#Oesi)w?>gS(=r0RK7kW|eUXH#%eHNTQ#4ieqm93)jQIEO;2egTs} zJzB=YpvMCuFRqnqK(G_3HII@!ZQY{nFjQC>n+H;GK!rWDgRC^LIvYs;5oQr2WN;$t zbuIT}f(UE)c+&(Eqj|6!)D(shpaWw7vj#j!Cs}8I5V-18$(llG~RMD-k)z~b{ zXDj{C2+Btw$H*1DNp3h@c*?M}c`C6+Q>otb$KDR#7E|qa-IAt~#&GFyVHtda#F$DP z#g)}2{Q_YpUPY(v6?d;e$r+_0PNWKj;!ubUqA3;VZwV;01&omv9uLjYjc<*D9G&L$ zn;(4)i{-&kT56YG@p9)@?#_W?klLMXkdT4Fh0<%zJYIf`O!%4C^uIton0_~*mK zjTu>=qAwF|u!Er*N*^8I0O?JrJoBpF!Sqvg|6EMOzhKXRmEo4G!#UrUk5!YEEFBJp zs4q05pu&;%haD0y>AXk4-P-iaXB2x66mENu(O(%;I_%6#jwR|3hc+Wk#x4yJ6A47C zLyw_my+;lCD;#Rvu5YN}&}|)xfFE7r@H4M7HI%y)Ln8n5F-X5Z35H)ZO5oa5)(oR~ zs8!3ROJ)aB|GShz`aEAgD4(`Arb`Ybz75^lQed6{Uy1qjh6z|_C>Wj}F>NnT?#C>) zBkq+a*M_50C(HI%Fe|TXTE~F-MUs^X?g)O76l1`J>uFIb)aNDSSc-q6I#S2kQpD{= zbhk@(oZYHP=D7SNNQ3iwVHwP88TxlEJNma73cZ3!q4YPvm8k)U_sF5Z zhK4+2o-G%+vaZRJR76eorq*FMAiGJ;SV<%oB0hGndCeYHkF@O47tg!Y1 z5+7-lVAs9wheXxP0zBB@Wo^SZ>Nf7r)n2=sy-W)wy1WePDE4IHXD^+y4A$+@7gd0m%1H;}< zW@V{EIp24uZd&O7Xu1%%kG(VD$R?>U0Bw#$C1}Vj-?b8XG<|2GKwe;6vMc+igHnw1 zRLWYIlui&Dv}i(D_P?z#Lx<3%&@(HOKg^{0d-vaw=JovTe3atf+xdw%aaH9={%w6M z%|FjadPi{v8qME7OCVA`eYC93$dGeTOLUEwlHy468Jw@qk5>8E2Y z-JkxS+|pjly_3U>qBy}}vA(3Sv^mf^??vqwlRy|BBmsea*YX17kNy?s*8#{e8bpNF zJ9co)g(=pqm8*vv?P2~K92)M`=cFRBeInJ68KQ&*hw>ULz#-wJI-4&6(t1eHC z^V;q8>BFPccu}uC*eX{lihfjed3uViMUBn)#*~Il*)1*Yn@?k@CJ5Zd?JMmJY#U?G z!H_GZ4T)!}jCMq-Yc;Z8jnOPxm3{i^CO@OyYbKGm)jom}9#wNhIa-aUeU0?ef zQPsfiz~AH4*sB@@afSw`wdGD%SLaWC7N8Ie!Rxbae9Ms3G)zJDq9_Yv97z`~7EJSx zkm1!6z`KbE$sk%*Wi|bjdRS`PsW?sV#i#XRiMkfmkXBUA=l7x5M6Z@kF5%@_dP(5G z4ZI++MgHMQcWC6*nf}#{yqe|JQPO9!*J!wB_*ZY>m8nb~VkW7*!uiD-d?T;KT+_!! z`G$5tkR^#K=OHV%$d}vFgWHtgHf3hBB&!`w#gA8@# zz1*#m%xG7y#&*@%ZZ&pM<8jq6A_XLSMJmB-qZ{C*RV&4@ zQTa0M>_gC$wJ#tX0SJL=zIP5_5bdixDWdZeyu>lfahQF0OsM_c6^*uA31%?HZ%?+C zo*c3#)1@b~_GEkS3BylLV0+soi12+&g+$2q9nWB->An8@RHcKwKf|y5sI{?+zkWy` zBL9o$ITuC8l$OpAeVoELE><*WE#s4FiX%wZ_SdE{eQB4RjCMjzAGfAYR92^-6)gY+ z#1VH?uqL}&qFt8go;%rqiNo=b5?xK{-4yS3+|4^wMle59$Oz7y=!)yvfio<|7U=ha zKY$^_QP5Vk!)5LH%x2n=zA+l)2J=8*+j*T0Y)k)%+}_HLoo^?1K`q~fl61euY!~pl zmUg?MpL|eRz$vraO3s9mcCRi2KcHf+hLUUN`>YsQ*uTQ+gmF4|nsv&<;$hrBY*eI|L1U|~j>%42P5s{>(v zN}=BdQnl?y+|?56qHN#26;`7IUnpWSS%ExPT3NL7DN64X{%XC_b)69$np8v=FfWKE z4chm-RRy{7H~+$O?0ybU92B)`7z3w!R*BZv11k4`YtSPdG?_Y)(TP|+6jV`WHlyES zwXp=$fJ@@Nmu4#I$61s_I@DkQJwP0wR95@NWFIyyxrt<(h?F>_swDH~?;$dOf36RJ z(Bu)lKXSUs_8LE`az~l#58n*jBzQ(T6bDco*X!c~>o<7KRuGV&700qYsrM&~Zg4`; z?$jfG74zuSM${$6+O2Io!5K*9AONKVy;FUu#j0XS#Wx|$7~OXeIBqqoz};>BkII%v!nI-A%-i|y zKXLh%{Lfkcb6Z>(z0mU{D_H^@zSkgNTU?9uRXP=$v}*glsZm@m5L@8-EvqYMIF|r8 zhz!m2b8XzdwD{c7%$!i|XoeM0gObCZ9kgX>OY)cI2eIkmj`?kgA1Hj3X;}v^(@D+C z)W>Ktv1O<5ULg4%N5K4#cCgT3td0jz!3h#4$Y9|V-{F+xta$kYzI-oJUlG~=k%%-}^bcY5pC26j}1J{J4sRRF*Hz z|K)46)=Qe#$&Y-6UOM?he5lqVJuEGFi2T$PWQj~1W(8b+3~>BO@^4geXptERfPmBRH8jqb63-8g$OuF?y0bB72*x9XxYhdZH&DhaVX~ zbmAlH7KbN<(kl5<4Egw15utN6@lOyAUu8z?Jv$d_rYC99Eq3EM(rK8ZfUgX{R(Yir zAj5Yjz6NTSbjvK*<3lVki`dejLb7s!y%xA#!=M0RGr<2!JQX8s3! zh+I;N)9g9<4?t16u{bR*$-jaLEdROu)^LIRdX)r$kK>@6jj@uqk{;#s@bV+LtoA}CCj1>xS~6XH8^<8Tq-Hj<+NnWzd@l|kd&NLBb80kuNT9)sTj`hQ2CfOC$J^A(^>M#PUC-z`0sxo zipU(BxgY;UKce=EXe~9E(Hr^TG{rb%6}O7DhdRw)Rz>~yo33*2!G8@6taA z{B1P#^c%(SZY_rQ`xN5bP9b=|??s_8<57Cox#)s5K9;UMc>lS0dxlOnxK7sdf%cX9 z5L{%)Gc*c=zdkC?0dVvRU2&`*^rB-wi|uDvY@Uem0x{A^60X9`lM&@Bp;C+sQ7l(K ztg?=?#_r7WX;=bO%NC+wsC@J_?GYAL{-r4Y;j>OV=I<_hyLRDLzR`U=^C%2^s#tJ@ z^!y30)BMl*DAY4&J9kMjUJ|%qp^uI){aq z;Zf*(a^$?A{3F=bv8fQr@;`9W)B4rQxV+OGSJs`8>(QU#u%M{Rj&od*vjl;c6>aa$ zZ7Jz|l`*)ga>@sc`g=}3VB+iS9^5Xp zA(_SG7+msjOUa|YAZU}aF38{VOlh0HG@rW5{d{?`2^h_pOailmmA+DrJUFbI-9mF~ zrU%Xup=wIdK^z2&GxBdBGvs6WLlPYznZJ&m?-;6D?@MF*6MU0xiakzFLimr-5Qw-V z>D1&!WO-5Dxq)31;XU2SK5KR^LnBhLORYDEhq1$$L$oppS-VXzWM3r*hVVnSWH!C? zL4}VS{(}$j{O9w}@S#~0%;1p+dHyr`2l&wQqPZ`@Twpbi>*K=w zKCv2dOI;t_sKC6*6o8#24EeIquhAv%gmc)lWM>xd+|a3SvlmjOYc<1qv#D($0E|E) z4){^~2XRt&7tYge-Td4YSnW71J0brn$V?V1gl?+3)6zJh*0x0d(!P7TqIU+CC7dv` z_6EXPl!mW*O{b;)l_sKjO^3KinW72p5|IwN0kz=ykSm5iAg&LIa;$q?Q4tg~u7oC> zWh{bCRP*Rc=(751+Gvn(4K`x{K(eE6)6ygHA>*kd2@*6WUWnN-GRr|EUngDdAi=c# zVC)hWxQS;bpjgylgIUpyj^A8irsXNYgg7sa+fN0@V;K(cR=(Qx#so_faS;l#WH5Y9 z1XPLs4QfmPsQ&6&t$vP_>IHb1r@q=}IoS*lnKc7Au4dqzI^5t1!Tw++(1RD}fJKnB zE0Tn@=I|KPojS()VLLoQh|_kFN~BVPqm|*Gw%J0(U8%-8lEEhMT!c}qHpxM27cq_I zB%rDN^myItIVS*fNw>*}S46jZ)utwZ{2^ajB9%?{C$&Imnd3JjS3+SEZ(O@?9q1K+ zRZ#6e3s&Wa`3yK5$3*P@yZzds_#7VGekX;1mk79$f0Kc-M3$j-Shh~K9mqkKC9)pO z>yA-3*S$RYO%aJQr;X*AO?VUp)kL1)kGdd^+5WaBS-V9|=R&8lI!7)g8?p?%>(x zo2=A3HdxOb=}xpEU}y;;Js|2|6iNQ1Kgkl_3aJ+9`7@liZC|vbsU1Z_{Vee*=~Of* zp=0uM@DtK={TlZWi)3kWWeVI-YzbG34Od#eJZQW z`M}0)<)r>TGw03Fejm<@8!U$lSZWxSl2wA9P`v$z{vtM^c)Iu@NHgx{*Z@P~%b8;> zC{t&)ELqQfP`N=^lKT!}8Jdxy7}hD)hBJs*8!9EqMgt%`tE~5&PCi2vA8T_v8|3)$qYOZimc*vMV1M$tSASZ#*fv0rAAlZcKF+;Q{o(y)Q!V;jIbjp^SPI=< zPSMMM+%y@Kbt~=ywTn->X^Mu`Eok!7MB4$af9(7uy}c%4pCQjPG9i>i|Hf~K@;R8` z2bND&?4X8Q>l@NsP2Zg`^zLQ( zy|JKmQS`OCB15WJkfh2Am5-?M5v%+WRW_JknjF>RV|96|aCex!{-hS4!~1c)eVV6S z4Q1~>Hmn{_;K)oFsQqd9>PZzk$!k#!To@7TBctSKvCbXYc4;!7B(;Xfqzj%-yq~NP zpJsGive)P|hXw$wp7pnYN-d0JR-MdZl$l=&-5Ob#YD%%d0X^NQ_|Rkg9AV?Q#1WvM z4t@$U;MR?*Fr;^ITQ|RjM!Iv6CU(C~EZ?rVl8-x0|Evd#2dJzJ30;U4IbO>}t!=Lt zvL9KSG|JruuLtP%8+0cz|9W-}@{V+Aqw^v_!J@z~j`V*9MvUKNw= zj@I_8HI0nc_R-n_6+4c1u_uO4Yv1GD3G^}t_3ofGabRvwqOE)q6%BA>1*SM6P-|L& z#bdm}Es(stEJ7BIORxRvA+?6Rw%pTl?wuAy1;As(n-P_y#Jw?R9U8@ghYRiwc8ebH zh}}2WMje)R=?$OQIZ5P#k*^_m2vQs$TVvtwI8;QZ}Ag3PPyuMpiPZ;#Ar(uwKr>KvkYDJtFvTNYpElO;R!7wh0wl~0V z{o?448^}=g{735kqUcX7_J9rZ<uFFdj^;5ZcY8+|$9N zw8^nSD)7YZZ*kb)Fnp1p*}$2_!^r^3?7nTTZJQD-w`IE*?;30PZiGPoYpF6KCJ1Ph z{hrnoBWdZ7vqg-=7iCuH#}$NtQS2sZFJeR#8%mx`+ytK>1+}s_$K3shCOAhRnpkHl zl3Lcbpkd|$9q42Yxr)FkLi2fHDYmP!@TJvG0@Azca{n z=3+s7J}Ac&VQMgOp8CL77gclywPY4&Eq)JX{HSJUYMN)QqX%#{$mIr${k$(_tpH6e z)WK|I^#qmq=qv_QY9#G75|tWZNiF8AVW4AJDPUaN8O)d6k`3nF=irOYTh`j#?HAvc zty{a6uB6e0iOCZnbL*cE*r(V)a}FC2_>D#zomR#*8bh8cl76ZXjwJVp#;GB0;bb3A z6fmrg6Ka4lbeA}?sEaJbQ;{>vWHFo#s|CM2YJr|=R;x@Qkx__hi#Ei!jW!~qjU`4K zJMS7>QdtSpYA&mTX&N6k=vVol4r-5)O5r zA9blOVFJhO!UcN+2Np|8-#Xxmh~1bIoA?Fdk-aSRRsXe30XOejJ~Kyt;0! z?N8uhxIx0uO08CfTx`sT)TCVG8tI3`C$%wvaF3(-7U^?rA#j)w(IVAD`Pc(5icp2J zwp`1gUy;OG1maT2%j`21Exq`bwJ&EK;FM+a6Jw0?rB23n4YMw&fZEfq3&kqYaj7Sc z4GZ4l~Ei9CFLK>eJu-wVY#KU&N>IHHrd0P>|vYibYss?Hd`zm8iOY4m-?Jp zYPARQakDg5k77c6M63UW%`)_0vx5)#*=cTB4OW1*LYOUBf%J-w6-e`FtU#NGO?Xxl zmb+sk{e#9Qkcaxw8|j+4fL^U`NdTZX&XzFIXkzEoHq)VU4Ks*^BkI^h#;BXu>>NK8 zZ)Az!Pda%S6Q{!hy^-<}>I@}Td!RqMotLz=t^8#J5=4~O;9DebxgARe^rCujH zOUEzYlr0?x0vpGdZ<^Fs@-=K6UuI9c-4i1l$D5lbC&dJ&qJ^G z%cmwMZ3<|I-6{rVK<$V@?GLr}H&_2c97&jzYr)bN4ntI^m5tc-fh0E=dtkQQ3^~>S zW#HFYqEW|FViGs+oSJ-PBv#u8=B3#Z2W3-Wi=J3SkuEj(381{-V9Ma;_L?|bsu7*D zse^*059DVHRZbC4fTHvxuo`eA?!NOw!h z-5H7kqdh9JmZHSma2x4+!IGl+mg?h@7fj}M^Y)T#snNPZMbhs|8;{6{*Ac!S4pX|j z&%tG>#?-^qW%DsbV-8b{w6q@XQXJF^TMu<9cNQE|=7g%=`Vg0Q13PX$etF3q-Nn{1P#WrLX@t)}#ER@U}tlU|HrfP1(_!?eAvE z!0i?3i)P#5A{!KQrY~yE(HF6WUM_tRidBC&*;;XXBr?-feYKgSoLQ>(cY{6MkTuE% ztFW`q|D>g@@QGGQ{}SPigd9DX@ZU(1)Q0q>4LwBTGx8M}>}Wtvp@FBf8=I8-``t#t zp5Dm!>uik5kv(3UIr&U)0Sfas^V)E|F+AL?w(}q8K=vQ(K<0&hx#_b{a1zjg59xc& zC>-9H2R?5&t-XjLJ!(TD>V;Dr_t1Di?T`e%OAIjIVbj||(y@!# zTz66QXY3d?Ioiz z&8n&@>J35d1w?j^rE^*uP)i9$$DNxpa{8s>`>K<$ANWR@S6!AE2h^Pz9&_}9s}8JtjiQybI!5DEpUeFiCnKAYpg|NqU=L`RcJnPV}-4qQFGEA5`` zW_c*kLm;ptq8#n8Ere)5EB=iqO$EkIPGtkzQU0de@(R(Z>)h>tXhft~<18;PU#JT; z+KCF59X_8%TOxZSK-IDQu)xpfW(CDz=bO?(L&Y_zPn0-aJ@2wCj7f7kqr9iLbc)-V z{QBIY;C714caZQ7RX7XE4h-&0+!%jpfr(-=T4PAtrR zyUOoC;*~W@krS=-{uW;epI~`-?I(7TZKGOs4-kQA)3%(E5buiYhRV+l75Zh8Rad9Nu9x^k3{6WDjQBK#W-CjK;JF(7@ zCbUniRxFn_vp6(EmzIoFTpolk71W^$# zvk2|LVxB2O;vmi!HUTFVG6C#cHUTW11-jWxG=cwBK=ngkQ~;!J{6gVRiMP(;^8A^7 zwrGkgMwx#z%-;!H%pYwJ>L=UMQm4B`5pK(WMxT!3?BlB`wqe&OwtaE*w~pr=Mn`Uo z&+@+GCk;0^R7YQ8y9hrH*2iQ5mR`M8QUq%+ap{d4IrauE&Ys%bFKw<0sDn>wJ6-UA zbu=vIc1{hRBrBm1IBG_M^i?o7lsVeHyoT3pNGJm%P;LC~m}c|h6{thq?~|G+n#>dW zB>%Ch$AqcJ&m4^M|F_-a{^ZKVHg`G2n+)j+R5yw z4eZjRGLJjRPQ%@nCxxEQ|;XXT$ywQ?f-4^eER*LkR-|>=!8(0=YaCu-~t)f&FG(0-!xeLmI~3 zrcYphP@i7z(^sc3Kfu>~D~eC(&qjKH^xh8k_YIU`uL#-_?6=CZP0_QVp=otHO+&5p zEd%EikMGyx?biPN{59JTc-=8jrfW5m@3KNF+P?!`P}w!$$+SOHMb)~`vlY=vI>{i= zed-JsSkm)_sqWENH`N)voos2Ud#taSfiTsnR(Yxnr##h3nZ^|h92(l(qKVBk)G`ZZ zBj18|M%kiQ?49?t%6q3Z0a0zC!L796v2mYU^d@+3wN~$K80qYy*_p!qgg(y&tEz`Y zF{97)vdV6aWp^mFUB7mPU(@<^%qD!F{he-w2_Ns9Fas-3_?S(Y2Iv$39*>0^eGV|5!*eVm5LHEqZE|B2v)uEvS&a`@4YMU!ndOhX!Birh!E zBAVqhTVxT64Mivp5}-VPAgLROHvPVVpy97`ADtW8Cf(8p7D}!~ zS`p2t&-9fs0f3MSePW(2^*L88i46#vpiR%8MFj6;MKo<~kNbZJ+G$SQlGWDA;l?;sSIyv0O@J)dmsD3v=k1z7Hk{u~(rX5lZ?7d>A7*Jm z8QX~F1)%hY`_M8$UygGSCVQYfX^IDguI#Wa37XO;VdNA&ZsT!iyR33TlanZvf5!2<~GIN;u+}m*$wW`*XISZl{)iD>q z3K*)PJq$IwKFHN8lLh@Dvst@*Ufpg#Rezp4+z|-alKSp&*U@;O?M}>d?{G(mSl4MW z2kvmk)~zqP!@b1>beS~X4A|Z}LwYVP)`0GCmpRVe;m)>PBquE2;Xd>{ceqPzMxar! zz#typI&SIvzttV?a{9(-7w&M^L{?+hlI4N}x{y^*pBP1W;tqHCqBCwg*9-Ipq$5)? zrpW)?;r_(G@(%ZbY@s{c>DP@!Cbt-zs)Ogd!(DRUoIBi`KiM7bLz1LM609`P5k_$g zcuY_Z?z$@eba%K9nL8);$+4KKN`URF>JE1es&t3DT(EfflilGS5F)}dSlAuze>^G^ zsj;COp)Brv4|ljPT>dW!iU*WCn9xsxFaLF%XnF&^0Sb;U3%~6IYDmrULL50 zG}!@}OZ^?d;qLPRmO1V=e_3zeus}qOSd!Ef=!N_m&^K6uqBya>u1&ulg4>BU zDHL695(#_{9cI+N2aosL^U7Ey|IR<&m5wD^r2qPV+W#hoUm~ZkR$KXE(ibUL&Z5O< zoO#xgrOTqT&l!#Uf9IZ8W2enO^yMjf9Xz+4zvBV>Hrmdg`WO3lPCKtMu7h9IxJdhw z*Z4L4qe=Z*=6*foej%>cdH3ueY}C6Y?YzOS-A{J)>nyU#^XsGT*O{)~JHKP^&T#cM ze%pR6cE3LGr1h}Kweq#E*{`&nw`k>V_m1c~{Acq!+^^wwKFF_k9==ok4Yl(De$7tW zuR%A5y?<)0z)Llw_k3iNO19ju=k{930XNPk3325ez6@%!>&Dq==fnK^;+yZ#I}F;I z(bOTEZOyf!$b8)i#k9=9Ju-TZ8G#|)c z)%Kr_CL=DHXcxsy6-v<=2X{ntj&;a`T__L2fVYy|4SK^F|V9C7n4Gr0f=zNno#*&b0bOJ%FnxAh+ zbnmo)JSMZfJK)M}pV?^ff(HU}mX_j`4GOI`XDPLdxUrl7%l0!Zf0T;++O z02;rcB-J-`25e}WJaZ!IGyN}mewBO9tl1Gu;N{r#e=??k0H*3*f&`130ZTa?2P`mL zmv=QO@~$Qo7@WZyfSM0&kBzV?7(-DOT@9v+RSRU4!3@NO4q(%V4LKb-dq4BYJJ)BFLaXOt95zHUe4ZI=@n7_cz59^H{hYV(YCmF@Vif=dh zhy3&iL55mqc=B!HBCGD||&b8(LgGorrAO(tWj-mH8tSWxV!k|PDSM&3Er-8KkZ#@m9 zsy4WCnydMy{j5O2kV8Q`paAaW#vWjg zl0-}dN@2BUR#jsO`HT5Va$!g~$-nf`FMhPf`p@OzCgv$B9x>Vs%gYwih!#`LFQ!^= zF)0|^QKjp!H(@$iuZ1$Ko^N%FV#pT75V!#bBLN2OY}kgV>A*a6kxydlM{1e2qt2H4 zAA}$^^;SmABOY*BT{nODwUgA~0~Cvi+cU})Pp%_Su-yAA4C$Qa-%p#4Cu&og zL!)p>%!JF6z)Jaz$wMo(RR)Q;<^c4I;Sq=4&SE8fM(+$h+cMdZRUMP5M0`Ob){fNM zEa5yN*fg^Wj%+#k_zhI*B?ylV^g=2Qiyo_NZotx)X=r8GKF(VUrYUXa7t&5x#nfX5 zY_7vk(J}A&Lvc-_M^dMK19F4xn=;e21)`%0Zjyf1@cA%nBpJ>nZq9V=CTZ{GxXby< z&C0KDCWb~t3CGuDG3;kRLybAO!mUi@h*hW8v;Tlt@IFTJE;&_TL7Uqlo1N^I>VIV zK;Y^gHc4}1QW*Mhh^U0kVl){08DSZlsO%wO<&T$vcQM&-R>NH}5^B0b$UPmVi?g5T zcgP9D%J!ocL6wS&MHxnKfMZ+7AaM|e1=+b#f;*#_4pAXsYyy=@J2Zy#w;E8_IPAvY zv_@6Ku}7}c-kW$Mm+IcW@>3((tzAQOdG(5o`74GbBzVc82j(E`vNXi1v|I#jV{r?i z`;vvx{k-e=WH8sGB4A@E4ra+p7wP395$bVLtyb#LBi>5U zglIO(&z;b8>{Ca;{K5%|3ySsRcB*KM?{WKvDC{<_LHWfFqco_m{G+NSU8>C4h zN7DyJu^S_+5}jnc2TH`3%L7B+0*DwG_w(gol#er4rx*wfGk;7)?)E>I=Z}BXCI4|X@_)Ruo90jPCw<>}?X#?v{L8wVrIA0uNBS;b zaY_FDx4P6Xk14H&`NWYBTp^U`p4W{j6!YAhGF(K@ofwNNoDD&jvh*N(P(-Ir0E@>k zez~GNxt}Wz+J1==l=h~h5Lc;|dF6k(TEuwhzCi+>6ldgr=~b+JH$ccQ6jGA%3*EA- zb}oS?8W$r1T2jJ6{qpa#e0-MS=4bOAkGn+EKCOYz)$;E1+JAsIUB8Ni6{&!J#=sPa z_Evnh)XaP!VJXhJCNnS-VE7uUihVkU6gcyfSz0>}gH4}0ODRCK0Z2(;Kw*c-D%;Yn z9J?=$f1`J))eV;>H&V{hQs^2h7}9uS{di?cIT0v&lP{{Sbc21>vJ1=S0s$ML@3*iZ z@bQKY=XXV*^)AOz-YbghERMgwM6qqSZQ}z(tGD(BaEmT|m-t2ot905&O&dQV&&Z7~ zGdx}%qya+9j8Cy{3Ne`;m1Ta(S{|&3MsiCviX{WdD!==$?@CuAQocMI&TH)sEH58e zEig20c1ic*RC|Whm+i}8d5-ho1P56GO7Q<0_$P>6&6&x4%LmAIE>weKi?nWUIwLvD z@t#@IhAT;t_?P0V2cbwlnh3#i9KeY;fgbW>%KE@p@<}DAirkOgxREQ~2#O0~Cs!q(c6k0L2wj|$zZ3b4Xm0MCkPiZumvpUl7 zC;9!1XgK|XzgB1rwkx_$Bz+&zO!=JECx%zjhq4A&M92ImiNWpn3B9Kr>BFtuaXlxi zD1S*94y1>LXMkc;PfwvK(6o-gGP?{$-w*Tsq^hGBKT4yJ+@!{N64N2%Slo@o^lduZ z^>TY1I+dY1|)sHBZL`y1Q~>I{DA1)Y6_oJl=1MDOB8Y?LQ`VzI|jy-Ug7|n78A?+8crAxDEC#34ryo?nqh)1 zDX(UCbCaw2INGf0nJ=Wwe;sK?%(BbYK?+d9_ z7zaOWCR5e?>@noCG5o2Ld={(aoW({`UmndfjbLO4l%%j_9`HPd#dPjm;-v@8#PXyM z5#@BAgd*Ao&KO27f)7DmwRF=G&2AP;raq70;;GN0l}<$_JiK=$H0DTZS!IH+c0)8^ z+9Iz^&fVCJe+R*zn?U%|X<*VMrOwzHg5*1@V`u8~7z}XD7=EihgG%#tEJ@3afDPI} zNY-Hc#0qWqL2Hv5TB$MMkNM9^+^Yld_p^Qu@8l$^LLUEcAACtWM@pU1Z|Ov zqG_6PZ3a*w5hC@Sei`FBD5lcMr^xiB+jqPSXG2oDdgNfxfQ*{RXX!q5O{Y)O{r&#? zF}_dRcS84B2rTOD1k4a8p=egt*w|SD>33?xUnKbsn;zNx^F0s}52o=cdjt(2Zsc6s zQ=q@p?~)bKBWi!~%i|e+o<%sfKIwP2^^02=biPO5NANUU5#i5=q+w3uxgzCtV`gt= zBB|cg@WB_!L1SbI(b^`Cu7dzkP}@mM<;uVUwoph@unM4S>6>*K52h9)Q&wo1-YuuN z);wtGv-atQ z*~wNly*03t`svuo0)4P7NE`q?Ur^bOr^hZe#IxP{++)iLHyplmtpgd4?*=ay$9rof z8lB&!Usj|!XLnNN<;k8Jbj9aPjce^M4rpOpCMQk$3y_hWnShlcse~${DdpoRlTfZK z3uPknTf{#4kfW+&s)Om3K*+`gfEO9ftSzYYmea+y0Jkn)KW0zN2lg~cJ- z*E)s=o2aa<7AaUCku>xY=v0>Gg6tlSC{u5NuMFhRl9@gIKyh%{s;BMzt$gUIm+0&H zH@U1M{b2Ecd(i!Z2NNg29>)e+Lq+iCx(Wp(Ic&<;znJce zl5jfHdEt@8#-fH~>rfg2UT%)_7dwp@PDeK&`tel$N2~05ESD?KZnLW8J5XHNr7d|3 zJS4MKQ@gwb)R8X^CqU}vTbTnyY)U*ahGlAW)J4T zUp4$C5<6cUAB{T^=%}X`inY6vq3gdTyE~6)D7{ecj!PMVW5NAxZsv&<95tQzq*~gF z=Km8hx|EzCat-o;+=mV05>L)YN;sbpG4M{)2R1cG)LbD^GsT?5CeK&quSD-lm2|U&W!wd^q@ff`ic^(hNAAkvd&VhL6x>SB{OG zm-1)t<*o+ZA`n|5WXIo&Vh2(!E#D0pHskQ7rHv6Hn>W~$lETbv9t~j zL>;DZ{ZP!N)ZT_v2C_9$S@T2nDOKmbu(?B!F&9{Umn11l$c9fFq*F5Af=XK%P_%p> z`LmTq*m5);nS@=^sU3O^X>%ZLl0Qk5Kw}ViGrhO*is&J|MplN3!+@Y*xtBg9#q5k; zLxTLBfe`1yTWVKFDG)l2dkmliZ^Vr7dW&9f>4%Xi{ETuoN^b;5YD?J!-gVz>1F60Y zQI*!4%X!1avc>HW4*x?5XLa8A=9p~EgV&9sAdz0pe9Nm9TO_RPs_;;C5^@-!_cH3; z;}9#%JG^CBWUT7RRlZlvP<>)eO+P;jEu!VVJ>UPa_oeb?WcRtxV2vNL5Xk=B9usg0 zO~U=j@TU~sp_8j9qdZ+W*+Sc24zz6x#qpZo;2oNSPK!@+g~NEN${B>GHwp*JF;^ws zP%H~-aQF)MUW9e)GWR*E9MfLmgYF#uW`&^HekvSd?+#--s`uOsG!O7H#%P-u2mV^H zN64+c&2+9F-jF`v2@nU3Dsw}50SGo+xs-oG*Q)5E6c1ff)`S5Z*8t#wb8)~@k~|xV zanOg*Xso!8BR0*lXVxdx3Ktb&IK-zKy6!=ci=M!46Fb$i!it^hOuG`giiaavkb@K-u{yqe zKg7s|EjAuDNr`&e!&tiqSv`W_0ezx7#$P_k_u~7I@#c4LE<6}^DV6^X+@MDXJm~Kia*h2nF z>D`1joYGn5q^heBll3xk&z>6LDnnqOnu4C}?v0I|K@xIuGDZT_!G}(Gs$UeA;&hPk z7f@1pHQAM;*W*pjCwhGS*PO(Eq%z7Dmu$jAh@EaD4yn?pRr0?et>*sU)!UeilIlT` zh-m${PX5cgfArN0HxT8v4Gi9XB{m8eM8#g-`~HtVvh7_zyJ7t2iIyUnlG69fok}y$ z&-tAW-c7gQEVuPfxuSh+fIObf{G6N67gROU0nJ!ZTGM*X4~?4>IK-t_WXx^@uiu*X^Vr`e-$r$j_B8>(`5wGVf$veiD2z`P~n=Tj_Zi-?9#g zYA3a?-$}vocs)4WxxPK--1&`A*(i|<@b57(7qhE2U)BmYwaa;HXgrx@(b}(J1xen1 zXEvl8Ywd|lK#5F*!`aAf=xb})$f`ByhQvvs6pqXJt4LHuD+Gt^+(hM#t7KS>caA^t z%)1W0dGAX$jK9>dTe$|bg7^%?mjOmDs{C1StHpaZx6*s`;s{+8uA$aA4y+Kq2Zz%FlgJKd%^lFwdtjq!zR9OAme1(Qa|6sELNVJFF z%!*$^@yk_O*hMd;v=YRl&`{>EoA$7~+*(ash{OW8-M8z<;naMq`<)`n{#9+M3y+~)Uf+d%x#nABO) z)<9UgTL%n~Y0Da<%w=VEl{uz&|m)I|JJGBoYf~*=U+~6QUg-j0qg*aP}3M|mLqV98%6Tl=-QL|Muc*6rMyXq5Ra2Z&)Nz&bcCgX!4cz0t;jQp}Xt$ zT`TkLpO5T2JURb%!@dG^lG@wXMEK4Jt{H8kGlL!x0k^h4(m1zefq4vxst};gNM!YBa*^jnoHx9+C!m zNW=p=_0f)H&}12D~+;`_@!6|J7N?fj+H=#p|Kt@Y+E)& zL}b-hQZuE_%BqMr{VDG#S0Uw{)Xl{8k@Bv=SP8jeg_8143ll_=jmvJZ;T&Tr@0bne zOQpO+=ERFLtCD8GLd@V2Pzr-YlkLYn;KywPph1`J&S~VBjGC}$o`;a~oel|S0!fQV zzR0-xQ%&D4HWzaq@1V|V+VX^nk@mrO8R21Xk)lU-2k{Vfg#O%3 zz2UK>QM5@P)WwZ5PB3UTe%hfRuZW0#D|YGdu+&oCLC-t}mrpPmGuo*AeBKvysBKY*#cIx5;2wsJ5Ss@fgS#G2m`@^0|+E?dywvT`YtJ#+l%KANkJ5zlpwz2N-*J zaCGCJer<{U9@g)N9{=ab4=46}NWVXH&mY}=p8Xuu&nLd}=+o{ky^o)I?BmlvYcE^+ zCDvuX2J{QoWj}CJ85iraAFPLQSeJdr<}?oLvhM(R9M)yuvE__^yJ24|eEp_-3Z(fl zaQ$ezysQ^W)y}pxNfdQi+as+g6Hq)KV+W)GDhEDh z+)mU}vvaPiC7z~=^Uk~-0)f>kws5;EBob?bxIAk+dS23UY0(daJJwxzC@h=>XBuo> zmC=&IdKrf~KAH;Rlt@7|s7|GE0_T#r@_hfq*|-WGm-mU`CO7v*^@ceNMDoFt6C8d3 z@)sh|o#86|z%qB6H3W1OV!vGvZQrYko#s5S&6L|xkl^CwU2^Q|a{VL=2MXo`;*q+n zwBfbO^@-t1gX`VZ0HaNv5p%-IRBewYtg0ZgA(X4p-df zO38~k_Gmi?oEokTiyF6Hn!J{}HdsZ1QAYK+%~X4)smn@)bg6Gu-kG{}4Ads{|zP z9w3XF5yL-lkS>5Qt5G@F;FoS2>0X2>Tz=HXto^;8zj6AUO@Uj+V?!wZ zp?(SsUJgR9&1 zw+xT%xbyVL%A)px#}=*)FW`ZWuOA*Lb{B@n17U@HS9zSYAod_ZX}r5_VUWiGyx2G% zu)j~2;IS8lWDg!T1qMHZ&hE81b~SYBshP))Ei<$r=r+?+fKqcjoM?p}RF|`~te2wG zbb6_%WwsAGczl@f^9^l;E26KMY1i2z;By~xw9NMW#U;q&^H3=(*fJ@6VM4D;@a$o(2bCR+d0G|a zeJ$hj1c$XAKB-7i97%N-3#S63ZybL-`jiBemW~z=;2xa2kl51qNTE(moO?#>0`?K} zpLe>cz7Dw3n>uzsyJux$9S?}R!8&&E=~>48`pWq4fjCGOX5a6t=Ihsx=w79j+^d$~ zZ-2LAqjzj$4_-+9{fQH;a@BX!)OCD~cS=WP_i|sA+iA=AmC=KxP-(cEwuEh9neT@z z@t;ZG)W!=Zj1QMFQ1=zl&~ZbwW6Y#uOU9a#nH28C2lO?I~lKq z-RB!Otf4TOqw&Y47y#KUWdZxZl+X=ea2>NTcJ&)CIQ|}^F)#oTxT)XMM+6$UXNg;> zL&BGewLNWhw-%&x|GZkulW9jXaz=y7=%!3EGkqjOH%2lz74%33*Ry_-*;PESNeLZd zTy9EN>Tn0h z_2|vqE%%l;d}RIg?oK{kXs_0a>0KNj!;?gIVAzn~@6f`ahqB|c@q(kcMB$)j*WJ?t zYg5vQ69VueW3|}oTM<2@LO6@@_XO@jP9m79_wpz%v+a|ek7vgIIHZaFK|R%ij-edx zPPctNZ~%PN>~{NN3=+(iCtK@dZB8;_qLu?4FpT^@DbDyDzM9Jh7@C0I?yY>$Vq5&c zc#K#8Nnr@Yg88W%SO7j~L;#!rncZs7)6F)kX}roD3wWU3ODE=Ll@r^A|C2#%7o6~@ zzC#s9^vUnpYFGrZrx(1dti+VCM;D+**b@G-GtkeDbonyUC0$s&{`Jg3coi{t(Bn~5 z%l8^Sxk$!Cdta?@fta-7FttU+@3Z3M?(xN^DNf6NIjJ;~AJ2Nok)^*tjtOD`IVf*r zy{jpu5eq5c>VEYjM=Kq>wOf(?YhEvQLkfF}4M&C9rWlkHz#f=ioamSWwov1-D!^E@ zVQj=ex;(*uYVMHzviF4KB}lk~e{hBhi6)q3{@ciPX z!5?yi2dxbMwlPy5@5}6T7U!Q80^kW)-fbbpjT9 zy}4}|Eu=?6Y8Y;CvOa!;*Yeefr4E{~lNVkYUd^R4uGt$Y#_2Y{z*JrCxT8sS1}>|- zp(yo6wV4fUV@Ty8+)8DcFzuLJ8iI}&m=qW8o0q=tu1+KzlSAirAP}JBiN(PdK=i}F z&cA&?u#>U0{dmZ6DD6Q(ZS9?%>Q z&^Glz*JnlX#$q{~cc67Yp1(uuXWc)V+6tgCJZP-X6TQZctFhzO*on|s-MFga!;ba} z&Z?k0_@fs{zolNK`()T0I~vOG%|JO_X+H7@I{4wqG_M*4!kpO! z_yQrl*cpNodiwUIbcZwz!yuKIF$`L>&QVY*G^q|7ur=2<^{b6#M(wq5{#0W+wcCL` zeJT)~{Erx$mEG>kPOB_h0{(8N<8Ozu%ugrZxs;aTgQ+As)7#^z8gGzmlMe zDN$NBUwNM@>nsCi>Gi0fD;g3+yL#89;lKPA3FHl;;?AfBOr3szHLfb;Qa(T49AHbK z2|yCqY5ulvFrJ-8o)t3lislwSNV^;d;VRO@Uh_< z%E)@mc_R4Ucq7446y0Vd@NO?dEvfiIA%BDVJO6NFf1v@X(L$k&p)11dop86~=CvYO48HBYXhR`gN}6>Cd?D z)vMP6sA|64{e0Q$oPoqe0ST~pE>wGMzIsRwUPY%@QGf$t{z19(_lCxi1?6S_fU}AH z(Lga`)7pd)hsy8pcthh1k%qU+X&RT-FFr@uL8)aZ%^U9gNyJ2>#Q9ETmG*Bg=%*AJ zjePQ3^9aPelmfh>qI2)p^oT0RKBRGRYL!gTUL2X<8y1UT^J{ z;o2cCEzYyCgwvz$55HO(eQYh}hlVYVN54bo3t1Qwso^aqm7)8vNpl=y?NoYRXbIc3 zF7uIl8PEI8l&}%=YhvuZQfAT-S>S}mS{us}?}Mh$N+o?l6nc^75aTZY{=WjRXXf8M z$ww#u;m*l4ojxzTZsl*+>ootcUcV^+^xN*R*LPMO4vrTK)iDl-gb(})izX|m7~%_& zK_2LzxsTTRmT@THm))K3bKcTrw7DMqlDp?wT3EbL7a5>nxgGF$?X{pC6TR(MuzvPz zd#+JWh?D9ST9{wJFVFA)cY4JcZ+-#4H0Spkr+>ddhmeT;cu(KqRTR*wJ9gS0S5X3u z#sU)!u)L8H66Jm%6#nc&qkaP=kjwIi@1;azp%OPxf|Eqgjn$S3GM=rIClf~oJ$~)r zS@7#8-+O18|I9)Sdqkg}56vu)%iGi9!l_tK0RVQqEAl=9<}p(U6>LA zq6B`l^jrK7Ak)X0yvn$iK4i}%57~Zm~x`{>s ze^|%Z&ZuoBwb0j;YUs>RmGeycuml+{#0alBYE*NM3P?6oU9Q8NHMZ9`HqDWBMrAmN z@E4gpeMC(VAce9kMMqq{C^{Dh4;?V8!z1L=^uyVqmT2=#R8sotrarAbK+;^*q`61c z7Rh6+PZa850!U(^29j9SBr*Jslhx^7)gYm1c-_?&-esqH>a#!uX3FccZE(vFNPA<< z!Wc)=MT-T|e3O(O4*l;o?F7VBNx!4^l{lUf_*cMe?eRb8F-g|3m_si}?!YTQNefaC zO)lXj3CVGZy{vL0FT}7uJPA8;2Od&*+p9P5N}NieTee**k+*y0vmWH{eu!D5HjBmn z)veS*eHEpTk=HkI>|!SM;5kj$Zk=XQ!vy(#Mbsp_#9nK2jk^g8A|v|z`xb36WXHK6Us zcR=D^JI1gi`Z`uZUmagU$DinRj5mnEle1DFgQI+veU!f(p3;3nm#rHx((Mrw2Q5Ho z#Sq}r�IYEFfKGktNnJhn>*yl*o_c6Ms4Vq>rnHmNJCRDGuUaX8t=ZDnJ%{ReCR_ z9o2kYrIDhov?jv25}NZ=w=Yh=sPSN3;V;JrV-6xb3?TeHQF?-ngeO>UiZV0yWUBWB zV(0LRk23aXeLv3k?e-nRFNV(h{dbItoFu3DWmy847cKcAeaN5TY+3Ku9XbXNg8!Mq zzKUinR)eXKPZwk+r(z`{iuBjk2B`s{B^%x$3`cez)=|QISwoYvDl|*dfjxI3yDl<< z5~Q?*j_MtD!+gixyc_D#ilEG~`Dwk^l}A9?)Ihx)!b2+JSPdSgz>rpEHal?|Twsz9 zucrV~*4&o<)95c%wup&LVNU=Um}&M4Z5&xbQDteQ=qDfirOLEQZncurp(Gk-8}=?0 zbA4lhguz*{U7;8%f$(~_HHf+Phjc!pvNBJ?UPD=(hkR4}LfL02dq8FPS=j@jEXN#> zvkZGiHUB$a87Dup6Qyu9Bg(_7beK1eD8Eyjpw+S)@2rZ&j#dj)|fuKYvxzjp0UQ#VUjJRb3kTs%h&hy3L&wF{tz-9RdAdh;rt~ zFDBcu0u%~K3>2(Kt11J<=lXyLE$-EO888;0#a`1yJ%hJepDsf4^gSQYYn1T4fUrW# z#qwgk>HXoN6An^>L4Z1R9?+(_j>bjUx)N)dDQecMM+paq&AS9{l0sJE}0;R{_s9>O9<-U&o(Ee&joG z`T{Q|_d5bt^S^q^IRg9ValYjTTBG@9KGLVo4UWU->tN9~*$Z^=0Z|BqQ`TP5y zC;9PBR{rn#NWXb*dzAwS37BHA&TRb_hqv`^f9A7n!4^9ZUIT2Y(!!Z<40VevCNz+= z|H2>W6cKOIz8Oe*g1&Zb9}yqu_A01OC)JWf9-~@QNrX7L3$Q+symV#|w=sW0UWSNx zR(gntx34ZDp0Bnc5;j~_*sw?&*&Q+w8K)*wYIXQ{cotTOhi9Q!&AsA(>a>W5r%mbM zl|@Ceo|oAa56=wvTS-4^yv;|$*IYEbE9bgB#wv4-C>Gx2C0lLNvMUv_@MJ|xRxbRH zI*LkR@v!^)Sa?*ijIx6Z%K4bW&u16X&)}_C#KPnJW7s8^r=;uovGBz6_G00!p~ee} zg-7E}O=96&iiHo|ypeCZut&>MmvX>;Zsk!cYbh4K1y^YGV&S|(*m zbpk8JX0x*k5^_KY#gMn_r-_9RMV3)L{xP7QRJj=#?HB zlO=Nw=?DZSoly0BCm_E%kVbGGe<~U^jeFmI8&Jg z!Bfsg*gbO@Yl&FFyJF$%^JC%d&E>o~T`YXj9Fy&jg(vbv5e8Z)+Khw;9}6EQ@CMq| zp|99a4Fln=JZaFO6*-2S!;%uVTc7Oy^J3uvtHr_tYdf)k9~TP`tt%GZ;ZL#fp_8j< z4DQC?xv}uhDtl@28@%JtYAK*gvGDadvG5Ay#>y($!wvM+*igx7L)k?=}9?x8K_oS7?<;LX0SGKz!`O>=OA z{LvgQ7zrOrafD>uGk`f5kR_^2W@?P{Um1(^8Ni1f52j#le|4O4Oa@mj0pQmk39oB& zupZ3;AbUU<&oUaC69>-%^>Of<`z-(3+O#!{)fQ&cKxjeBf7tWb1PaSP-(>kGc-7n- zIsO6vlI1_t;Y5L6I-C}Rvgnsp+*|&u(!^EEmVYOQli|lK|Dh#z+^o;&y^2~QQrl=t zHgAoE&5lbb`Yit?`Bx$q z`2kF;_H=Imr4!`#*rW;GZ|D62HvjZkah9kR4Z^KW(m1Wfzf=qK`(f->t=)99fW6~1S^E*CX5vn>-`ej{HqY8$HS2n9p0(dS$0uT*wI9Y})_%Zm z)_&8nz2Qf9Oo=FL68mvbFTv`W=fv@2d%T2Lf>*XX|g7t=~p6qxMkXVe5w?tg#sy zn``U0vfF*xX_bZB@prDRzqa7G9TdYca*m~ceigwJA>e?zb5-}LN7?zoDWeN^ej8O+ z@8Ifvc7Cwn?flop@doBy$*-`eUwz^JLbj~P{7yqQL3`Y(ZbEQ?U%Si9CkbR@EStvQ z{2Y1A`#3{g*KGGU$}&--Q)4lW%E;;)En1>gSEN&&nj8~mt5Z4)^*8$87^xm*)eV%W zY*pQBUqy_|oReXu{9o1jYuxY#& zbk026G|7zGla-{g!IDpNv}>Oq;SzxfXpmh?=i$n)a}G*YjWVP`oquG?sr4BNXt8jh%*(7LlCHd7O228xy7^zsDG~J^;@krwZ4CW>)WF->K6K*!IJd zlM1f@RhY$CtFkhT{s#7i7I_kwLKaWnU}v0~+^WSE>%>U8RuD6!&vd02LT$rs022{` zs7329rpZuNB)37GZpsFHlT|gznABu!JSV-b*W|#Yf&8|tF(tx)bx?ObNz?F95$xE@ zbftiu+S~Hti2*;iZ8--<;lc>2ZO7sEZA!SVtj4D}vmv#0$=YElBD%*d$TQA|Zpq<# zsFOusk^gzJK?c&idm<*s!8wpyjHJhfjamHLw6ScpRjJ;rihF^6PD0)nugRC*GO<=o zbi2u#JiTS@C)-sqKex$1Al7@udK*Le*@;upWN#!O-j8G_Lvh_Qpr2c9 zYcI6ZyfgnEyUfJS@t19yWI#K4k!&2lWYgqBZCu3^(Fid@l|;KTIEkEgFUVZjBs<4n zQVi~d8(d;Qflf)#QM{Zv#V9rOgoDPo3n$H}9c4)vWRgFHVB#}d5G<=4I7;iwH$Uu| zh+&_i3gc#|tzAbz^ek!_76BvJm*g-O0oeFQduP~x2>OfG& zX6BXbO_&584v95d0wKRggl7Iyn<>E}1n}e9GZ%AroKOtF_Z^M{22s^z7U@DGFa*um z{=0o691?NcA@T1ZNv50`Eq(${Yx8^ob5$0iF2bs^)QqW})Ci$Rzi@M|`PC-YssYB`UQMc+FdMov)+An4Q7R&10Bd)?GH#LrD-K&g z70#dN_7bf&gQ57)7>sR9$!TI6gEqJr^RBd=E*^gTrR=Hj&yj%90#9rMmbuE6sA$zt zuG@Xi!7tiK9Rq~eKa2x3R`UN~Xhg)MSBP*98tFG)?U$d}szoYNRVSnJ@{?^C|D(4h zagA`{EGn%zxTcumgqT*hyQex~_k-j^>$FYfrxYEZce86C)=GW{CkFO)?Q#^>ch$-) zy|jix47=_6*3`*{GI+p}trT;7kQ@~V6=O;MJ%*Ss<&F;}*IBA`LJFl*A|QrPUCS;= z4u^`yz>cXgh@s$EbxR&yxt8(Vk=g3E2z!mD7z9JeNLS(qF|;nXHVHe?&Mf$)>*yj` zvzG2EYuaB-k}8xehhQo{)~ws-7|zrcw;7iB3RjwmZqvGr$A4~Wn<5!~onD2R z=z8AQ9jGjM2q$fUiWxWxoQag*hK=6fWtmlq&Y*nqq8ZRs-Vf**8a2q(Y@Z;TS0~;^ zGMM~j0$nlg#>PP6%}E5K`c7me8zLgbAoK7R^fivuRZTa9CQfe-^9T9cx%)KId3Zr1{Yi`&NF*--V6(=%?V&K(~@Nv@5X!#=t!xVh^ zD*`3YM3YFWHI{x5ML3ZHX7UWkxx~0(C$cFc_QNFg2Nw`TK7MHQJ857Q&{D(om5QHZ87{g3*nI$jLVraE*~mzdG;J! za^f*eHa*On1s1`UAHVSEeg#Z6i;4W?c$BCGO%#}9Yyp!dEd8XI)Z%vO(iX&|Cj8?u z2^b7VO^-#uAdUGv9QG#Ok3o8YOR%jARL?cm%C1<#VrC+Nq&2@z+$MJ7ew=#L?G%-9j?&TiD>(hJvOIm;(bZEK#hcnPDvlNNa|XWPHrc?4#Hx_Z zil4@@*cy#v*=A?Uz1O)GW%-xECsY?(Q)Rz487UZryCo(xnMCxHtj*`cs3xyv3AzLU zcHp_ovkxYdl)J+LuxpGHp6p>Uck<$&iQY-cP{26Ax3V!z))+sl+M`#JnePCxb0Y_= zdlCfrAoDdP^No*4E_>IG2S@5Twg-stmg&%O}%A|!LD~laoS4htK#RAm<9mX(L zPu%8baPLVB;9L0TjHGOi@v06ntpwie9KS+`dZ-3FiSEETZ;%#|LYNwGkoY|W8h{0>S)Z#G zRvEFs*!h5=ajp!<*?D3#DJn17wBBQT=F}Bvj!LNCXqp?y*h5X4FQ#aK`dyCa(pGsm zAxr|C-|JMib;x>gGqS4Sp~*>ry=Evm2f)27tK8P%I;jPKLZ)306e%|Z3zkQ zi*|cm;f0`>ogNsZdK0OL&~5e{BQy{|8q!qGo4p-H%CjGf33o20i+ZMsykpb-?HAIm zJGxj(OxG8{B9+#9mJF=TwOs;lIE}@kKi|(uDGJQE57+f-k1UxqOpk$ArpOsA`8~QC zUnwytdj04&DH5WaIkA1uGWrTb5JZx5U9iobtUibh%KW93Rv(Y2vid4QDV?+sMik$v zcUNpejr03i^80EYP|`fUJ4%*{Bul=hc*;1BwGWLU3I^f=aAL1>p^Shvm&n?{0`hb! z27_|NM2Yr5#Pc}abZ)z=^JpGRcn(S_j$S7uk3vw|!c2UY+KYu=Rl)(cT(fx1!+G={ z`FT5|i5|}4hp&>DOC8;^XdhRS&aQa-E0Rk4-rAX|{Z*D6DjV{KaT?bsB>n|K%{WTI zCY>-Cn5N@?w2J%{HXB#IUMw_m%knSwsi6qTno5&8B27+`DMETY7<%-dZ89b{T&G9B zra#w?I^i8glvPa5ft@fbD|Wpe{wH6|TJ056;x#iR&uc9`?!sS0ISu6uVsoIO&=ODXTu*sp`NPuh}^%8kUNb_Oj$!ZY|J} zqT3}wio~`JN}G@aX%)F@ROr!n-k_k2u2dan&NAZs!~oov7$GQ&uN}R4RJSYE+y9jr z&Bsv~zQa-Z*R|v+Rr!7#l}lV-Oy1#iZTn$I6u`NXtX!zO@`iZ*oDESV(YJe+DTV|# zCF;0#E%d0dPFEn7u}hLASvCfvu2$PdSh8eE7KUNc zE>%~zTK!a2b?a?6NH!)z2-xOfhY2k~Vur*w49ORhU|TX=2n0eLF3gaSNi=~ZkmQ2N zV=&;@^ZT#8djlvgW&E-RaEK2z_;lL%dU zVRU!FZy`)R%})00%OHsHCWn?RgGidLUBimU4sJE`hBE^}o*W(G%NI)XLfC?3pwl$YXQMm{KJZ{jI~fvFAE5SCmemvLur!Kumw_U8VsJ(4TWFeC|< z&couRKNkgoBuNZQ^KMAe!^mEI8kz#7+~(9AE*(hhk)!#x=*>zc-$YM=N=FlsJ&WWD z0elnTaS$ry6yZ#BKXZ1b4JF#O)buN~Pu!#g;`THV7>_8wnM#FAq9 z4q{|`!IqE*)51Zi=9btMw5}F~ZpXP>!&*DWLekzb*T2u9Atl>QsR-6WQ2yy|tkzxn za~pTm@LkSpnIVFl^?GeaA~-EE0?cBzN-yBX!yJAG88Rc2(H3XuE(oXZi?DZhhHWMw z8%OTAkwLaArsS6{>n1M06BQss1cyR0mBbzLTm;WXZTFWo<$5#sGT$5SJ05{0D|1)7 z0!$DmluL~s%uEz9@WG1jqDJOkY^5Dxr}m+KNAjJ>AsA~>=Tg18PKZWUD*y(=E7_5d z(vF;=MBNRwoFXgKje5Ky6Y9@u((HzFXF|cZlNcDWKXmNuc_6in+H{xUpBdZ5Vp~~L zg(Qj2ge`bu!xrwkC;f<=k`5zg+PS3wS|d#kkLkU%n!4|}6rK)kXW}Y&cS}ecM@h%t zim0cijy60jx>uCZBI+UkgxP~QU?s9KE=0ECA|N6`RIs7Rb|>mFP$p@g!-#f^5b+og z=zy-I%t4+&Kd@Uf6gWE^X%O0Eiy(^wsR7ZfQBv8gbzeT)dxK_{kv7RMrZ}`!E|`^B z3y@^ViE%3IFjGEF9ATQ2yY10q=yqwrFSV2F$`M&0Xii?l(Pm03ZWgz{Za=pw^?~3Mbuh6 zQA-``?nw8fd)N?uX#pnRZt8t|^a}A3kxL!RqtJxf8Ug9vUOX#0V3Bb~Owzs?y=A*w5TSS(PyBj5Ljwpk%HmNB8Jb zz>Eo==Bk;B+_my?qIizL4W&F9L_45oYe1OrZbH5Wnw>5LSf zGa$0fNST6$NSqNsw!tp#1qfjKa_ErgT|3)EWUIRk9B0>&*uFf!$R%=(*j>GW?$uE^ zc_$>gQ{F|k6iCrJf`p5QaT_f~x4(%85Om3OxYGkNs7rMi7pO$NHdn80q+6HMIO0Z^ zG|{7Ihz=3Dl#xN`pS^V&#Y>6a7@fc8bKNNr4U>q@QShVBZ~T#`cpbgsTr|`onkPE) zG=`KB?f(`LJW>qf2vTLk(rc(ctI51>@7k0%7t>FRr-vQ6UdEwNO}fw8w@=c?x;usH zhU}j5F*{(-HHi1P)}aiZ{IOqo(O%E6*C_${OZ7Us2emuKWTAU$NB0!63EhLp^%ya5Oa0)my6%TquIuPC6| z&ss(EBmmhy!PpU_=FHu046TmpwWuTO8`9SrlRocV1}zyU&B95v3aq3iC(_5-iS*&; z7U>fzK8k;e3JTFlk?BZXOz1@Y8XJKPD#91V9m!14;tAhO2b9lUE)^4`M#X!&;oOYZ z|A!L_4OUCXmZ8%qUH(3IbIy#pFq);Vj#%o7IG3~_!twnGPn*=0eDhKlcQr*ts34N! zM&^_#ZH@@gh)>Soi*A@GG}n@7VujCF6%hp|btRj2)W>zzEOoWdmAZ@!0oaKA02(kX zF#_5GE#Qh??#dclCqX1egaWE#hm=B>V^oKnc86G!FhqM0y?# z*ScJymoSUCKyGdDtAgmodMm(5QW+V-P5rmwm_#q~f%L}Q>8e>vOL^}uBz2UR!+qpxIPvM^!mPO}Bu z)qeB`U2-El9UQZOQ@4Q~^&!+M&(}Ov42pz?c|~Rt#c9KRL~}VbP2GEKW>ULfV<25X z)(dV3x$7_|MtgGZyv_d3j&!P$LzfLWmuwq3XigfLWe^O20Tm@MBCSCrZ5gDNYCs~k zu8;_7QD(IOLLN(PlA-;ZSMr?r20G~QmXqY15JyKwHDhAhbzIt*mrvZNB&&G=k6#xj zAVa(~Rd%AYoI05D_ijwc9>RLNZMifzWEfYtDGz)V&4?(5q@hlxEksJt4#K%5y%1@E z+V=P9)MC_lU@idS)o#U>N+59CU3tC|rzd%ReO^FRb0?v-dA`0u)b5e|61R%ZSV3j@ z=UsVD*@kUoN}ur+J3>`m$}vJdIH$N?TDk8yjls4QT&&3H3#iskDF#4A(ulgw!?wZ> zeitdAH-91D3|qp~H38#acQF$1g#zN;D#%A#!!AfGALOE-1_9@|i+h352*~D$1nn@K zaM3R0zwiQzG8M?{5ea49*R;f29koAOGiCg(+|l$Atu*Rk$J=^p)0AEGodB0*F6 zkd8}BP+ggOn>9?WptM{4oI}yf8G&umi$!axX$^QSc|Ls;Ztg^_Bs!j@Z!S_}vJgRG z^DRwEW&(>ta{Nddh<*cJ%VQpa}Iq3D>ge`;p`(L$UcZCZwWiM zfp&N)Wa*Pws2F*)!j9#{!Gm$Z4!Tb%s)aH@7pFB&Whs5^4q8Dm!W?bqJ0s~p`;p)t zMj4+3Xv{{TZ9XG=m$uD)G;RBdSI6F#9^+g=yaA%!awNalZ4p=7C?(qjG`D?mgn{c| z0flsuKsy8i%>@ERpj$v8eW;L8S*WLvsAKvHm9$C*YW5)pq^6>`}kFt0~qhivR9>HHEth=@%YWgH- zf=hDF9;TSCJ}JW(|KSo)bgaVC7iZ-H@wwKua?^?2jB=mT7Xvp2!D&1x{+-|eNiL1o zu;oc#0-Mf_)-d-((k1n?oA3~&bg00^wrBKlA$AfT4c|moK4J6z_~>^WKtPf6K^#;C z;e-NhBYb-dXAb+-j!3juIRv2p6@TlmANaUV5~?e{Mp$gG=Lz(O$D|+4ciaW`%Xp`a z2W9Vlr4yu$2fgG>g+DJS=lfGQrgRG)M}r+agZr|K#rOd#ZhTstq^ z@}&B;$U07%Jeliyc6siro#|Hh1*chmuXTc-L|h{h-@&bjw_u_7(!1hF8=Qu>UW60= zWo?p)O(eQVC7DY3*@dZ;pNLDm9a!0aSS`&{J z*2+(&*#36OGo&uZp0N0Ys5)?g*l>d{XBUmsVYX~{_A~D902yUM<6J6rB}iV8ne{fu z1}J47UhG=oq<`=6Fa7wdr~dl!clIbHX~waOrWT|ukYKGiA(~cP3o_V7YJzX!PUAOC4Pc#(>VMND^|-#|^T-O~2GDbM zb*!pUb@La+wGq|5l5e$K9tkqTFhhu$A2V%olob)OrFfcSr6@=Y zkpzA6504J8w6nNN;AM{$1_l6WIHso0q)<`#eO7jXULxBh;7SMu&hGQjW^F;7#ZkrbrUe8Jrm9Umgd=Ej zByh-|p%Uq*_-ELjSo-|fowjM)+KW>L+w*OFsnFE5HurHtWOp6Svv%~=&{p`25xzFW zfknN@f>I>BJz184$j^pVNMFGuJIR4>%eN(k#+kc;OY3s}kZdTOTDUc7MhP;&9-#3y zdHtj?!Rr&Oe6bb=UgS)Ynwj(N&Py(&)EY%t_1Y$ zS^5f1rjJPCv2FzC*mB|wXQIV%k{qU<9Lt!J5c2;8g&rwp2-iwm*F}^D@5M~T4MqSUcrr1W770uRuaWr*~2e9nI3tg%L zDq~DBI||z~NK#%%2%OasMs#m5I!SfoEUYeM%tv)8q8;d35Y;8YS~*{;+tdJ5H>!W0 zRJW-*R5z-AUQ{<(Ij(sgRM*!7B#<%`iye($w)& z7PQCi$rLT73{Qle{U(G-S!thI1WF^EQMJLKPjVNood);CI8D6{iR1`XiqIk5eh*5G zgek@Jriz<+exrI;lMSw^X6WEeuB8lp(92$Z@EkmQ6CQkbey!y`!l6OSFncI$xy?eH zkDoFPk=C&Esru1VxxdRqK_;u?gJ~=+O?Lf)_P!rFNEg9UoawC!agERc5aa1VhZHc| zuy^YW;8J#!lWf}v=O)Lz*90RFzcV5m06gEpI3rZXtRWz`dmyV1WDB$=AM(K0-#l6a zYOybwlO@3W<6>dE%nmw~;%Z=1k)miP&?pa(S}bC~pCOBpI`R-j)p21NGnK(RI*G!f zpoh0=ytHEaXYsv!D@}`|5t?5V4xBAQ!Rf5gxGX!)0|mtp5n!3n^ut!SOtOe)Wa?(z zp@P@jY`vWGFl^h#UTlMw96u{$J>x^xv6QQZ1}{1Ubx?11VFmc)r7^SYx-S)IgE;VY zz4Nl%J(=hd0)2Zx;rJjt!-oTstq@%aRLlkywWaq5Gb!z$7Q{}SW{nqWL$oiz0#$yC zL`|&%1In1&!?o!);_1gu2VL*Ej2tO41nu9+<9A&=Tx6j-{aBAH0zYl8>(kL;sM$kJ z@6K)1j9O)}BbRlvMXr<$)J-R?S!DJt$poo1&1W@2UE2Gw8-XRfTODVWK${@<0|3Jh zts|OcM>NZ`sIH{D>$uQ^}-uC@7y$}5O4JZ39|94Bj`_9w5PR*X2{hiz`skCn|rA9?VQzuV` zslG>UdfmI<`G;SieEo0!;4gn1yoT~3v3;NV>G-?4E_nNflYRStl1Wxoco$gNyC%&Jx!&tx>Ol|Eu_Qfxat?vI0jkzgYw{bRl`rK^3MbF!Q+Wq5G z%%i2Ry!+*)-|o8Y$&<7Bw%4MxwJH0~pLT!mR2jQr%R|2AnPQm*YWW0E71qrhKmrkR zN#OTIo;h`n#?^FMvXYr!;!ZXU;3sZwWy6cN;x7O&yPw0H3{W&f?g#ME^H3kr<=EPe z!YK7z47z47(+(6~!9pE2!wep#9OT@-8Y6TJTSySeN*NldnIfj32wB2(?y0WyMR%vQ zB%jP$;<7Q` zmW_@q=Kvs2B*G<@$I4zj}-q*U09$iRI@vs>9Osu7=H#y z5C@I$?@i0QFW{GWer#G$#%?a5#=EyT8j4xn;i7D4zj#5=eW`X%2g84i$5c*D%Mo{T zQ<+eQUfO`;1>G$f5W+s|rFCryFAZC-)GG7mjjg*hRDt4#?tiDwL{KO~Ua5De$W-Nbs z0?QxE#|58ai=3mRKHJ$*~6T|6VO7*3Wo#?)lb>~D&_%iKEp$7{M5ZML7KYqdsr}(+B4#H_j>jhfUds)Y78(Hg~cJi0R*24OJ5jZP#fc55e}c%_Uq`9S|bHvGf- zvaXFtWPwXr)dXkDJ!VDKbe9024b5}a0wsT@OW)q)5Ijq(Tw<9b0*Vx$)v3_u^-J}vfr z;MQ>QW$AV1sJ#7e)q=!jz<25lSXHC zfeLvQzq+^pDeyDpyf<487|m?K{&i19=y;^{mX;@WXd1P$D9l#gkE zTpb>L8oflKgNDwqVSffizjK&??}8D7n^rv)UivUV?&RYWo=Z30)qwu7aR@8DGV=_D zMK9WmTxMxoyCsR}i3yN9w{^av(GG)0E{7hq>5Sfjh_7AT;Q#;vXyx5@iD1>l7OdQ7 zQ<>Be9V~aUK1aMIF+PEF)Ktc{W7sZ!`niw2kU_s{pj@{1SDt$KA2OI+5cuO&?>Z^L z9|Oh&V-F;7f?E~W$Co_xBed<5D3``UpNfrQ@Mi1)JkCAYLzN<}Se)@^oe5;n4?6CDj zBlWeJ_F@WB=|CFQM5>Kz-Y{>?8)j9L) z#83ZQqVi0Z`@_`AjKULAUB8MLF7+(~q`y4F_cDm>N(vzwNvaSKwwbe{O`k6Uh z`s$B7Bcc*+p)-U^2w*LD0Uk#p@(cVlUZfiLKA@V1EU)1pwNucVQq+D#Ss0Wo8AB?L`QW#3e^g8hK=aXKwKu2my5=s>7p)I$kZxo6bEX zDnkauMfS1`dUU4m$T$Azt=Ip_U)^xB@9#c*3sA0SXJHF;a#Aq?;?|0CY&qGt$>@$(xuNq?8&e(*5HgCjoIHX0 zegfl%FN+P}MDABJ=`2K?DW_gU=avBL2nf;&y++jHhLic06Zuwer$FV#fytS#=0(mV z7FppYVLL=L6%nU#I*1YwI`AIvrTQeL8eYHPB1NO%U#cvua3=T zz$lg$?)WduyG~rz_0KvN=`Zq-ZIlo zIVn8uf7Gy@tVpmw`sVMw-*c7^edqK4oWb4}ri=Ig`h(B$XhbJ1edxKiTZnbfAoGw* z!T0k?VPR#kk=fq;Ac#obt&1~yWhWGW``f2JfF-W&<=Ro!!WY#vgf|tsPi@~ArbfiF z)xyk%`-RFIU-yYmZ+H;%8M?q334^am2lwZL|ENZ{{s;B;aY%zV>Eu(N%C~I{s#l+S zkTPN0#`ipV>Y4}hZ4cz6EdXiz--BcY8!z5|&1W9m_)DL7%^U82@PT~rpcs>2S%XrrA}x0V%tOHeR{!Cf$HkebU@5 z5}~3Fb6TBT?$JypeI$h-m_Fh_6uYiA=%zGDg$X26Y-&S5{KOYNbUZ70FWdXsKl|qM z8Qqzf?ful}Ka1Gt@JvoFIQFi^kvRVhETr!Tul>pMy({nS8331f{o3P){`-|bd++Ho zTD2zy;9Jml+v>tMx;DM@blIbvHTFaG0?I)~#XU1O;t(;E^EEM-YGQrQzvIfv%@_X& z>WQ>jFMikSA+z~ZGjU5Lh+97MSYCAZ45RyNtdqi@zUkK=yyge5{adI=`h>erM?mL) z7;C{v`(NMFAA7PoeC;4)|A!-A`+V_<&%h7*uKM#=|K{FZ7a&gdJ@CYn2$_Ro25)f0 zp41ch;AAx49t7KuhUpWtjcMRTv%6pED}$=%mTLi8^6~SaCErhFSji)Vvi7+DiIkY} z+r^J^(fP)*nYR2Z`kHvdp6yl zZ+RR(PG?K^X)&Jlivb}8<*C~9rTZWF476X3JbCJ?-f+Be?~qIaprq2S2(5F>VJLpx4knn^Ud1@uk4TR`*k1({{7l)_P@0YbSL|!KL3sv zo&elDx-AL31?Xb?ltZ}GLcHtO&ja5^xyj4XT}EvQwC=;COQqkUY zaVfaO(y@e)uBAAd6nGo({U~PfRB>|CHMK9sEpAkoDSPJGyY}VY=6EbB zS4KS6yFa^rBu|@nr-`?&==x0@XXY@|pJiRoQ@}j608EUSQ`!m{1HO(6O9;$8C+EIh zk^ZmoPO`>cx<38FDaE^`S@7k^rII-Rf8>?ydiQ)jNr_`7^5C|FmhhjT@KT(T*UTe6 zX%%%wA*E&WB9pCId~M!mQ+B_gAz(^(FC!u9HZ8EL6&gqE%2KpKagNAIjuw>9=)#b6 zqzoyTfX$X6LN{B?HAPRbj9Ct#5@}Ifvt{}5sKzrJj(4Yv@8Ju@BVKwmZV7dHCv7#A zLIPkg#pgLR4<8GPe0yaPv&rhaiQow6mwRY2t~WbR^bDr;Hf%IvwW3w_UsD)_JK597 zaZjfRUT|M+Xe*w^#>eDXjcnKb9Q?D7>T-7PG$)1i;ak8MVwBU;v=u*X??_z~_+-Xi z&D`45b)y6BM`2)sn{(F#ULEq*%fpw6vbAXoCc9RvT_l?WyV0q0Qd^&x5w$-W?rsUD zZGKxfrm(_-`>aOjYP8vhcIm9^x0uHjnQvx55`LdXQP-#M&SVG=X)8VhFa!u!=-CYN z1cjc-pqc6Wcv+0?ZTj@B4bBz%5E?77;2cCF99X4hS(J|Aw^Kyt^vWw0hvf)w{8|&L zt$DN+XJug}1WNWx{4^t7583c$GY-Zd&tOJZmlVSv8)3@nXw%Gmm~aAHwHa5!8q09R zu1%jyX6^Dh1rIPY0?|6L-_zNQRu;Dvxo%a@kVoq_)im-&n|)H5x)yi<=*WlPo}>5E zu7p~6I&Hmw(?zxU-U%X$dOr=k^(C3O_t{2WBG@!G7t_YAiEiWyuuLN8d#RuVI-7N! z@ev$1RRaaFeabtN<}hPh@w+Y$*Jp&_L=9TLChEhdDNYm|FiVWVa1Ci$mb6&H>e0dbamhM@Yyk}^rF?dr}jqeVDESs=HX?{$68XiP3_ zTlHpD@8gMjn1KxvAX(RMYn4A_nGS2;n-bR2oBjQ?+j&II*pBHzqX2}eXd7FxE-W$M zh7f`t}&z+aI$;NUZv zgsK)MT4DpGQ8qwnAe|qSy54FVBn8V@YaDcb^Gy!82Fg}Jwl{q&&lOL^k8;PA0=!y? z>9PwfBxsU~pUKmt`*a5}gPuug0bmL)dW&PSMmgn?$h{*SeH*pQ$Lg^jiqVQM-S=FU zg>Y<^74S>Nr3Gy)2D+l5rh@Vk5qZeGOQX9i#AStE$Z(ca{C;qYPMxNT_p^){-Lcoi ztc@$X;~kDYabUcq!JS_ach=HHBRQ3z)H9YVuFUZ~;s66$*X(`lT4sH^i#H3G@*lhS zMv99|o9?P|ml!cq5da~8T~0(ys;+~HE(FOV=fa*Kd3K8spjK>b4?HD04U_M1xkQes zEpp@2=PpAMEzpA%*}F<~QQNaqLdSS9)~GQAVVkop**WKcQsf$t+`3X3lauP7u^_M; zVtEURgTeXf2t~R+WlYP-U0{frs=VAKCJ$m@uBb`$8twuAqU` zrX$^w+eAb3y<5@zgX%T`|( z?%u*TvUTc>L1Hn$a9$pw8l|FwIG(Fmj|A5gdg2_eSm|q*bm@ z!Bm~@asN|>Vo>Reiz{X4@J|v~^Ez_q0RUL^4$K`uc+X-V^3&9CZqeY=1_wIONRT%Z zb0#I&48VW_qn*Y+?aE@wA}*l%BrSRyy@ykPfBo4^x`oTTfeoUy1uk>MEfuDAyOoqy z?G3Vd&bvs^wxu+SiIib*S^>KDd9z6E4NP%mId0QMrP-(ffY#0xJML?0OsFlAtY+Ge z6Wi3g=w7!f=v11iHz}uS-nP0H0SQ~6R+dFZ=K!>f0(OgHRxM76bv$oi! z)kwzsrz19#?BA_1&FfOLMDr|eBtnV60B?4x{;}9?EH>O0R`%AtnE{^NBSJ z0C_1=8iZ@_{7KPoNBqS6D6+mFJOh@G?+w}oeGkx7viw?DNM1%2Kps+4@w1?Go&?CzApXGF#E3qh+hx>mBYsb=FoJ1`hFV)yE@Txl*u z6vx&-Ar<;ZXU0~-uLtSEsZK3}u2jmIkqNbch2d^XU5}%Hwbgr@Cv4Z$Ie@NazI?_r zZh?Wl#Tj+EW`UyuIygzV7J$no-vLo))QM0R!45Dq_kySygSCfl;Ilr|Nwmb^tl%Ut zO73qqmXNrSMM@C|4Rf{?F_fls1EqaB*LZNDl_(Flfw5P@4tE+5c?U-IcdgNBKqMG( zx^(vo4$WFSS!I=*vnY8eFwKBm$peXs#UUfcaBCeb zm;Mpmi{r*147YtCbf#Sv&B(&c>CZUjGdk=GQPNdWd{aq$4Cyu}5~uAHAvEZAR$X@v z4NQ#cHc+|~p2LOBx-TX|X#zj}jFaE$>4taAmkeVDZX5$T+PyfbIEK8QD4v=30W zw6t+gPPww4wA&Wg1~DAuX-VugX&~$jyT;aU8bxafDNO9lCLl-#v@Go8_605>Yb?&^ zHaIkJ%AZ36?MRJ2hO@|}tJu8=h0N_H;tC~I+7WY!iIFvKeyQ;7=`>C_Zw{6N6U550 zhml%N-_4(y7mz#P?(#%rb+u;}zYc;{RjiNvVDp2I!==A_mm6C^2`m3oT$p^VjobKD z$R*SCa~UFO4E)7k{GY#WXQM8e-utP1YU9kltMNu!U8KW*eAm~1oY|{8FlJ84;h5U^ z$3ME{T8Wj`Y0v%dXS@2B4G%uZMr0?MKpF=8Z2bQB-uH$FH-6$1ui0?FtMqffa)5-F zef29iCGY3|Vfuml@7H;YRBk4dNg*>Smf#8WRBoYg@*aw2utSn?8lapGw(63B$anC9 ze|Q$1HM#${2}psc0yp9Ktfj|Li{6C10wyY==+q0qCMe2N=IU~J23_tkV}V zPC}DpR{YB+Urme#j`c9J7n}SDVI5R3ml)18AFD(d*b5mvEnKRUI({k|E-?TAt09#1 z%ziZAl1Lzy;UwA*B$2QK{c)u~@`VFWcRc(#+!Vig;61PS{JyWqA(8#b2OoOqp+Dh~ z@~&Lp_BZ^+g+Kf(<^DYL0JEO4DmzI@GX8&4{3@e$4#e0hI&yJ(jP zfiytd1JH*9h-*Tsv z66oKtAo?4&r%pU!jBxVUseGC~!;24p`CByALI@s+tvHlf)*yW%Or6kLM8ne7kswME z;VZGTx$njHiq)W*m$X;dwvD0TXI+2g4=?z~=ihR}NsVl_?;DRk{0G-P_?a8nRJ!?Qez{%q@D9g#jgC@?VPB&`!|05;p={v9V1!Lwzl6n zzn{sp%-K2IJhCHiH(l$lIemEd96P7IdFOD&jSfh)Y3H~sb`I_(k7e=BY1PhYi*}Ce z7>%97# zbcyYd;N%ZhKG*U$wnNUjK|XsP8{|Eh6_J(1XX2~DGQ|(@adNKbF8)hk2NjK75Td0Z zurPQxe3`?OBF;f@i=v-`FW*lr6ao*A8-8A*In>07IHa8FS6GxW0>ZRx-N~gaVo*+8 z**Dmt2%*y%@jeBLOE=0$s#cA&Rm2 z-G6uAqe^e#qZw3nNsR-#Qb?#j$@~#xX(&;~AH_yr<%^AwMtdO4*&?@s0m_U#DfAdu z;u~*j#~<%?6;=z<8E5>6drHJ7IGb&viO5lIhF#Z2mtv4)`0{hWz~mvo z>vgK`dot(DO;XI);Lc*^&Yi4NQ*j}=F4x+Z~9JhanMTda!Y+V~K#f{t|v7;~x4FQgm* z3Rgzgq^6M73Vf!OMOw1SpJXz#)p(!k@*i_pkPAp?aW0? zw}XI)nvc1~A>XLAJ9d%Mu?wK@j$QDBq&e^CS-7GOTX-njAq<`+zWJPod1W5Fay~;Z zZYC!*6Zh2)X0m`B@cZXM=Ke=Eor}+!ccMae5^D%55gN;Vu1ijaOJ>6*UQ;rKkvKxx z^ZBul(JYBXB_g|`Qy``%DVmD7%13l~8+3#lDX2~1Dp&@CJQu|jOn-X1$GUE1FskYd6|x}ZXk$1sxVFP*UXGpIgg)e376z@E_%8%a%Cab zgq^Sk(Z5dTi~57~oa+pF98>jluW_9L2u<@5-D7io*=;Vjb;yk=U*a+Ye?LJ)`67aM zI(iVr6gkVzveT?VM+5PC7L5jOTAZGT2F8{ADCQ+2WU5Gd%# z5i8?6|M98lVZ+=J3!UZFW2YbHN&&y)o*|@BYDrpuP<8vNA9?7yf4T4}6!VXKY1z6h zw|_%gdoatw!{X8MF&p%Kb?v*~`;V`9h%(Q=_1;%}Y3HxowFc?FKV1I7Q@^`gF`c*m z)89Y$=zrKF^&;ZbF1}u@v$?{fAL#y z*5d=m$G`XOpL>TMpD+H_^4~kbnriLP6Z4tbZZ>?wk|oqi?fWz;bawUxe*>EdcUZ6+ zW)1$?udy7Z*b-jfpB+3#-p&!H;N8ECafQKxkE~Pp_*ZW0JGsX(Pu`2To_ zNR+PCQBG7zcUCid8(x%01ss*dMFK=zk+J*RtuK#z2T1_OqP;*H@v-ZRfU*#9t-fQV zJL7UMHcAHe?2@SW`9{|0e9|pu$fXhDF!ZE&3;V8I?@VZS;}sVx>W0PFs=-T`pQ2>b z478ZQGe|AFEgmCfq=Ek5lm~1{zm4k!gMURTN5HAN(vo)V#{7yo$N4p!XFlbn$2Rc751gdf;3u zPF!jJ%j%0rwG`V%mbA4)7qTs_pngj+ughUyv1M`VA`Y5c53lEkvGMV6+FnOT*YV5M z-Vguny*9l?oi9@G@9%%RD~QweoGpI$W5->=7wh^ze{seY#0z)M7Qg;u-=h1<%lh;} zlez!oG1p>Ct_TmV7f--Y(i}DLx4Qx9g{uDY4?P?}sg~lO1^Pf-su<$qCt&O^WXwM2 z7!4VnO68%<;;wB}Nx~e!uie}ApLAgX13~M{8+uP6-dY^yg z9efQ-!nd7&?ipbhwBS=zSS8v*tGoJv{h;fv@GdS%2PsH<3weY@+h}+sU_vrVhwiID zfLWi;=B6BlIOD&O26w(bz_D4p`1)XTXm4_bb1fwi97fBujauw#llAG4f_R7d3;j~; zN7q+SdRC?0mQP7LV~8u#cNoP6;IB(5gc^$wJNm|$E@?Zb3IR$jCq=rx+AiMg32+7= zB)CHwX?&H0EHB!oayJmhXQzb3Me;3=Xi20*$=nLvyr3_gLnO;6_~(P zjALuESP{DZ9%8yGwaC~w&;y{hL%;#H%R7m|T9vwJZsv+~m)YV(--{RcCN9gT3{`|5 zYJ$cLJ&S=7v~0b>wT|fCGPOmZ@65x5%(?}1?OzPk5YfBf`45C7?3ynToi-A;_4U=6I+6H^v` zNS?b#n_J9`jyUG;@WzLgTVt45#SFjwiy!^YyZ-#)KY!2NzCjrc!>E~MG^2J`sF^Ts z#s!bKW-#@tozFe=-~aRP|L&iB<(omGoz+o0E7Xoh16U`0LoZgw^uq5Ppf7=fqdda5 zV~|?6hJ2_YeN*nHrsB4fv?F6dDkd>=cCqcjhJ^U$xS0huTa0MYR*MqmLxqL_b)9S1 z3p29BOw_vNObJ3a6bf&eB>j9$!=>o&q^GXoU`DP~K>EV>hy@j^I zpb*?4hLIMU(cTOfZPW4BgT0Vs1(!nli*P$`{8eVo$QrQMElkCW_T~*2kvw(#79|5p z=5i7`ePg<-E^e&NLOJJeOeQklSPM%SW;t?Quc}p|#MJ6*o2HINX&8W3s-4XDmUTWR z)AeB)laSonp%{Lux#KBU6?wG{sI^60CEY{YR7PG;Nb8V{AIEZVR8~ixAZ$inq9v9o zmBJ07IV!x2!tABA*cEHu=49BHJc_d$o_`d~V7?*GKZ@QMik`eD8LZSYVIp!uT{_o_ z^rC#=XmeM8#CCHfXqa2wJeQt9JLC0j#k;W(vV>XIm-bQbGSg6G;hb!WU@-y9#+I?3 z3O8DF=fCZ2z>&r$^)_1Dghtiab}va8`kk1I{2Mmfz^rpO`rZc0wu7>g22BtGW!r_a z_*a`zwt*~B*)B?gpzy>*$@!GZ2%n>h%cuzYMx?Oaq3^35;>0A2b!*ToBUGZhdt&sO zhc_2H5+j*QFz2vl>pdD7Tb;qN%q*p2)h~TdHo_*BJ!qp5*_+OV|M6vaS!n0Y>u6 zii_&uvf?vnC|p`lH4K%u)s|{aXIfjc2rids_xud_Epdp8NcV;pM=#+;_SDWOUgpx| zU9}1qMrBmtg6JjeiYqMj6;NTV!jhpBy(^ zKIe?MK|D>51n|u7`7G4a&tV{@7{CUI2>SEy@C93z1!X>N(&*%+3^C&4joRnrB+~Mt z)rz&;C}9pKNf%$;R9KwEVBtB*J6{s3jbIsdcFrX=GY*~2Ej<5{e)uH~WWg0X=ZNN8 zvGW^U0`31F5TYA$(YYD!xi|HNiG_BX`iX@%HCWNWwwHzkb2-pCLB1-5w>818qQdhd z!N*dOrF*rV&V^V7#ru|G9q7^{N9LF0p`DnFLR6fK$b9!wxd;%%vqj8$AsO1_vR7w| zmnrPPsDZaki5_q`=UOb$Xizd#XXQ~*EiTHvjOKoOskAAHm|gGZVgr$k)v}GW6hBY& zlHtno+~+h(^HMQNC+Vf!C}kRCN1j=7Ey{T+8k{}^#NfDcn+JIUF5(yMQPXG`Y&5tQ z4KAddS=`YGTrDo5R+Sa#`W$wC5HXM&VTcEo=Z?Tg&X`#oF(D7V7=`(-hd5)|5crk_ zwxDAQckejP=W3-E>hKgMg)3qr=)|UGTO;D_sz_S;SSpp3_iFBv0BW}+*GfK(Cfl?U z*>kRhrgxQF5LQA960bzmGM1TWQDEEWS`=M(=ttS21&h`Jqbn<6I0VgZjMA3mDlG}7 zCzybN*R6-MXqk4_=8bT+6j*|_J}iZEsu0*BBTS1VBw@#*y;>Xkz9JKv$g^jlB7ErY7s<{pnOo6A1j?5eRJWG%b|J%1|J{egOU(6xcM)cnPZuiq9>P zS?i4bov|&K=N{|5UEk#5vHZ%2|s0@ zXJG{DSMg?Of$2mWG@3_@7UE)T3HGYlF>SB5YOgx=b$RZ)@+eyf2D+PpUt3Xek{&o* zFzyLUheS8_(6tbo7@c*jtCf}2M&yaBm-3Xm4p6mXZe&2{myIQo@Foe)>k6&N!d+vDEdPU3XpE!PJSZkf!sX74ic*QF5E$gwVm zF~EeP1Mqs+IxS&WPI#tOfDWFoNl5DB0w$sB%@3mvMsdHSmpi*Z=q#U1s`!a-eBv92 zNQRfB8}6C*3j$)4@9|O(y48GgdeY1h35k)wA`;1t5i_*u6)#Qj|GBakbft@-Tr}68 z?j_=8gYle}=g8A%iCzskq9L^|KPlb2;f%np)E?w4_{xZ-wklMk#hwgsTvAULZ1?2^X&d|+;3A>T^7!99&X z)Y)|WMuMO;OmEPB?_wfI!;sLKK#E`bFm7)|hqDVfd9QfSL+>&#_SpqO+J}!if_hf_ zE6823xA^p55wIYUDJ~$RD#FUQmN$~3F|DS~>b=F|>awX$RNy)aK*Jr#;})Y_O_+1= z^LB}_f^+liF5gHUj-(d9@XOBqFDcBvLxDunkqG8mXyFa4rnMUJZtSer#Ke6eX-%R0 za^uNY#61a;UWD!0CJH2`6CiP!gV<4^%~o7}k^x=hETxuIq|23_>rTcYDG{^Fl?C|5 z+09*<`=^-O#*(qI^-MeXV@}6NzyZo^xvocv6jp?Bp!yE-_~{816jo_vMcQx(!3+Zi zD8GgbGzefG?OKrXjW@aW`0~N3)T{Ek!v_jalU8~~`ZAg7SENHlIf^?7YuXS@WbkS< zw}O@=WP1|V-t`KOpfU~!qT~<#*atP-XvGsvl>WtRs#S8AHjT7H`~sNJ?9i}GX=-no z=K?&g$hQ-tCI=JumMm!x0({PCj!((Mz;}lz5&FXI%lGCx84G6yN!1k6A@A9|fgII_ z2|%zMf1$7kQwpMo8>`KN;umU?Ri4`<7fOUV!Fhp}FM?naNquve(m_#<8SZs1@3RNZ zXIapwG;&=MWW!W%b4H4gqd;wPka{E}cIyU{6??U0^MOLb6`+uKb<>XAUPxVzWy%}HRrKH%F?5~hH z^SOr3f;$@GK%LF@lEi_3Z;}M4gnZ>_MEcP%bC(aX3|bW)PWS)RuKPSd?pLD zJc6Kuq0$LDStsZ;WG8D`^5sPm1*NLe2+$FRg9P@Z<}l=%XHji-{ZQCXo6EEe(5k;yjkTROb8Wxw zK45dT^JCD+(Li^~tw->xpsN6(B7YG-!VVx3nCgW7DR2MWk^(Km^PA;Ga`@wFn2l*E{E1ab{Jfn6Tl&C z8l#-gP7dcuF@Y!_%NU%wXPdmf#zgt!2PbE5zP&L#4r*MR38@EvZqgm7BF#S$r8&3O z3Ti=76`F~?iAm|^zGzZ+#!PJiK_-$igq*_A2zZU5%_q<_#6&C?rVxWdi-e$}$&AG# z5j#EIghU)7F(g{zkf?<(Oo2qg0rk)4v{E4v06Z}pLn3j_oUkYts*bRTk8(i2a$J_! ziO*L=KC>^s`1*h^bCF);zbySCdMEvaVq6isGb&+7u=PIxoITOkThwPNVA=q>&oe&b{y6i%+~VC`y*ebu0F( z!$&U<2PP`DaD1X3j+N@=Jw7{XFW6lwj|`6w(MY{wtsb28_4br%;qKvjSQ|c6QO~=l z>hxI&YxU|>x$fGjg?mcVm9P}nC+elqM)8S3Y6Js}XSiIdH`j4P(-aR(R?79tfEuri zR>msh^;$SMQ4I$wu8*l|#3*%XqefNB9UHFI z)M64OmZ3$qB@U7c6ixs_K(kv@a#Jl;tEGcV8?B5F)%Q^Ca19iw9bLsg_y6c^ zs+vT7Y*GU))anDn<8i`)qlJN~vB^d%IsMAj9w^l(#)iw)vHg}pTf50oBf-R~126KQ z71Q3K^gyLNS_g^jWu^U9L$p_3H9TG(of@dv&`2z({8^bpmGMe-xNMm#e2GxM9t|$% z`SLJEj{Uz-&L!q5YS^F)4$i^& zL^w7vFa_X6I|riT@p7m|fa}W$R>NbSI1E-N#*!J;2|8d?C(2HZ+nPdS1mS^j48ZL# z>|ehw+_-%wG>dL&65>I-bIUH6v2#c9R?uyCwNyPge^sIUfdg~%9OBMRbpsnxm9Vx4 zI1PjoyZ3?~B9l$SDo+f+EJOpFl3fF?%hK-Qx!Ekm#3Su7lghtCjM^&^Vi9Alw)Zj+TZ( z!?;^F-O<0f@21Vgn|GW?$&FU9f8*9I+cs^!(N%Cm66dQ{OkyZcj7>s*#2iVgF`inu z!cX7|k+_L)bOOrdX4%M^3yS^Fsd(mdGY!HtDjIzG+{}qdXp3#txjDYtJayRTMs)?W zWZj5APgW~~!w15FVE|c&qS3O1FOau0^gnvxU#EUo9v*i5h?h;{_5Q=mOW3Eru+3^FXPFV61+w2Cdf>dWt}qlrcTqGs%=9Z{2u%I8_5jVHy=DL)Dy=Iyr%y z1WUW#<)Z(WDLFZUfy%t6vE_+D-0`3!(>b9gvOD@0*^6V!+uqk zq&Ph8&{ga5Z@nUfNDv3TogSED`Y&3E?`vvKFYO*f;nk#QdOt%(JO=@}9zzck*wOFEO zD4=Vj18~}aF*GF&VMXnVWXXBZlFdyMZ)4N)XX5(RZ&-UY=s$em`Zcs0vMWMu@DK#& z;mDET^{+n?6pjQ}9tmD^BnbKM=+P^#>O6Y%b)Az_yQM1twuu_L1aj=kohC!A+*&() z7|IcbE9=nW8^V=`C;N|HqPxV8_$ky zo3efx*Ur(rOfO#gt&Fp1%D{jF8tQPFF^X}SFk zkviBiTpu11Q!fpbhP7y-8s+IqVa@6_R~Ob^vu^!0AyArvF&zx2r$!O*kx?X#2RAy| zY|GYLH|^ZI{f<>z-dx?jRZ=mPW} z7_OPnS9KX-&2j=!yQZ>;o&%NsiNQfKnE!CSpH`D;h~oWK^5!HDj*~K3nJ;U)R0VwH zQB~PCJXz_TsvQh&LjJIazRENjpFI`^0ebbILqPX9RGEMdkmljy*Q{Kc^Z*NQj!F~? z9>EGRZw*X{88giRZDMq!-#-@$4w(w^yOW=zca@Ev3k4^DM(;aW%K;m-5p?#}01|hp zT$Xh2+q?OuJFZ@hbUhTJo=Bkq2|Q4xTfjbHO9RS;gdyTs0qsjr4duqQH8eW08|lV3 z=}alH{w5|ViclAVAs!71BT+0^YROGn{KD8&-PYWo4Ahn`txwYc> zKrd5vc?p@ngbsP|5WI5hRDJ89&)#Z+xE8Rs(YB~H>@#=nsa8q@HQ#=+%V6I9cFP;L zL!J)y@0+TeYn3;Rp~c>2WeWQ%rICKW*0vv`P2lKDP#A|AA)Z7<3O*sik08dB;6^w& zY-Fex#$7!a0cG>(^w?(K*f1SiW`IS3G68z4TXLj=hR8ZuotT!`IaVr9)GQkV#~3@R zR+0MKKX4GB)cU2|!{4Dmb@sYmYW2zfNUXDj(SbP$Q4`W7hRaP&6rv^yz6qB=6U_-p zY}izLGn8bj`o0~p3QQh=XkwwLR)&VL;8bP6lN2~G3}B~V-v@)l&Xa;JhHa{?sH=x2 zOENb6B1I33RfbAa=UwuA3ex-f^DKAGd6rvyz6IBuZ^3ov+w=L9Ga#-#pOV04&3Tr) z`aH{BbG`-Fo^Qd`QO^VL{)zEHP-uEWx+gmGNaY}wPHfwvvjdn*OGu=M3(lfa7_CfK zM#GXfIt2(e&gcb`oI;{8Eh>>+8*v!FZDN0=x@}{KK2k4iFNQTVb}T-!VIiU+JPge# zSFJ2mtX7{GuV_P?2o(tl-X4xAY4^myL1YrM58pUGaMMJ!S1CJsd!1>jhOEuh{JwLh zLT`0|)1d<+{FKkZf_JbHYnAO4nSE6dwOJ$*69S?S;#(;+W9?Nuh}bb@R2z0U=5;o) ze@W;%3p0B!s`OQAT^*>=7q}}+t@g@yYeFCc9h0&R}7p-No#S-4%goJME~xC zlPo>*%_!qQZg%h3S@k|RdEh{>5~EizIayxse%A3*s}BzEKM?GhJ^&XuAfb8cKrlW% zSwA2~uu{Kc`%A<8OxMWPzY>4ydqBQQmA8T2{GFVn(nNo40NJ8EW-kT36zt^?2Eqfu zp+WnL)tg=?#|{LQQT=62>%&uH{bP;if}U%m6Z;1zrw1F4W24kLFv)WE4^H+s-U@@0 z1-;cq=wvs4z~kWq^Yi#ycfkS;DeA-1m3}n2Dn%ER=(f20LP2JUf>eQmGnxb?s?~-7 zT7AzbGWj4+>(}Y8y%%^gQ)UIYRdBN$2zI$2P`ZEm+VW_XB)Mq#DUBUqP+E0+E=13Q z|L`6XhsP(UBmrYlXqJD4as5t2^V$9V_0gJtmg?&tO`r77=xY9{hLcK8I7!OThko{Y zx#IH{1PPpzh2J$?*)J>aCL7dS%Tp_*bM2^>H&2vTZZ(m8z zTT9a`&q`FikqU)zg`Ab?5)Qz~O9O+*N5caL0{7-$%4H>C#{;-LU>^ZyJe2sir_7FE zLzUPeu|X;JG3CI2PH2 z$NqB_m*(kab-%e2uk?-p~*6yV{a^n$inOFDR)B^*Io1;osZX5~Vc< zu48}hudb&Nkg|;SShPq{5>>XS-;MH)~=!4+Sjh8Ky7l}T7C;WPE7Bo-O@lm zl45;wc%V8}bqR&~B>ztd%@mQX@vls~3eI<=WMxp%x~)<1-UCz#{#3Dypble%N) zb{V{GYp^Oa`o)aPDXn7DB4fek8;gjKZvCX)Tj4SU0gVZ4a3&li%ySiA8%|8(3&RZM z>qVU<>S3Z6(>7~eA6BK^lxbw|+T6Eg)vg^ox4_jAS5?vmA}Ik&anxQsj2&;>+}AYh zXnZ$qGsR9Wqbnl|WR!<+^6JU*RpG1exFc?8=YD)hrt2{W$Bz@xKzyblgA;;*7u&(th5qHK0IrcFQ8`dX0VH6+xwb(WuP3XJS(j0~hcV!el`xG|hldnv z5RcmPDGkS%%&;fcCmMZogXIc*{Yj5 zFD)xfP&kv!ZAHQ zSsg6zp%}1Y$IWLUlg4g#m1)?~Ik>o1MI1{#(B;%_o~936bM+b?swH|VVVK-Mt>1!v z%Olray}B}3_fPRdB}&mRMfj;r4p*m5RH{u*dqyD*5+W6p^=FP?*6-4qgtLOu_SUFI z9q;}QQ1zz?lb8+$$EKln@(~7m_M?JIxYiGA-d``5;n)3otxXIr+}?u3g3oY^94N)jZX=6us;QpBU8bi(lCDy4CAVSd5!mvm&W%b-VxvQJ_tYt zrl9?U!=ec-`tmI#*Zp!4EYu1<6?xwD(=TRy&r?f6EbM^Yf%ldVRm$hpW zFRHXQu4E0zZ`5jJUH#fM?oF1Mb%}HuxH?&dm*M?L$n{|VAb;>xfe^^|`g`3Nwp=~b zu5h_c9Caa3p8Yow|`|*-a?9Y+pwHgy-CzLBvD8GnD$pj*~2((ad{H& zW|P`9I@;$_w%DvUyJVJ7(nY1}-*;k#6%26K=%=(&f8VGmwOz%{M5GK1tb#5k7pG?6 zr&w6VwksG~xh z;JbCX>bBrpP`3rT_4TR~auTqUF&SXw493SXvbskM$S$wY$cr8^Art6V4|bI{>qp9P z1-ek%6TcLaFCsZ(wGk7n`lqEt`zoSAWE3Q`6kJAW5&QXvtzwGCLTnG6RIg&$-RpkKv55)++n@%M|hJX<}oXxB%W^@}R=# zRR3sgl=agKAykRC(WwGig*dD6wy}znpNWhk;_jIm9K=)M$QrJwVdoMl)kd6vYt^>_ zke5(=`;FVeEn9c(ShZv4)*U7$TPp~>Y@Ry)lO%nG@L8&N-I&z@dmZk@-BkqXWS&_e zEU(bWJC|+_KwNN!wSw>>eOz>f6^la6ENeS<3HI7e8eWfW#f|pJkllkP&q0qVoi1&% zUUd&F{DglrRl%i322%H?PB8?QEB1=KvwAIk5$b|$CH7z2SLKyX`(2Pg|6n7fh5&|> zu#r-5WeSNDSH`FGSl!Av5=R+Q!LBFk2#V z)`}k($C%OYO$$LyaRUg2(fdRVvTl*SffX!7Z@yssj!k3|HZwjjiUtxD@RkVzbw8sjI0Ex)D;Q0lBq%*vyn3uJGNK5|5&Tz%A%#KXFaFKby{XzBAzMTR?PZFV6io%CQr`5i3QQk>!z(nL7tdy43wkS; z`IGd=n`yn@j0?`&=pN;6+#!oYK;(?-Suh=>nCHS^^j5Gpq|>GZq#az>$}3k7PHF@L zD}BJtO0+e+f5e2STZ}=_D)&v5>=CV%%wbH zw#Q_NO;LnTP#zr|6?hrQ0hw6!P_bAF_{=EcQom9R#{KZH{iP~`1Og5Mk1}M^0En`# zHEJU0sAeIqS@1QX&Z=n{s!0SnHSN`qOqe8V94-o+&HkY_JzkoeAYqsgbw%a!6o=6x zdhKZ>H@y|2bm^W14Gv3GD2?{l_8}~jqL#=4@uIAP%PLqF!uP_`ySFzIW-z)?v4j=V z7h8}<5}Z9XdF#aZ?T7{w)v;dJYQdwELW2r5V89}^@lwp-?&L1{ON zR0uJSg_8czCn!@JCM>u*rpI~}*`D9g(ujK)lIh!$h9*WaHjo!>Z2F%c%J@D!kQGUQ z2+eX$Vhz?N##n>Mh3188r7kyEG7SfdOY6a)SQ;_oR^Tafh$O0+8n2eV?{aU)kuHk_ z$gP+w4@Sc3h#ZnBxlz(D;o#KhDEw!1P!5)|oh4bIeh#6yhQ-&DxH1U`N=zLJ=>AS_f>}qi@u6Z{u;BglC;S zlH~6rZk%kmbF~b;JFgC{N?cBRG28@t;q*l*e6s};%k~+Sj0w1Ua=2C*&p(5VLLj?ATveAtCXbe8KR@+c>ok~g^+H(a7l*+L@4FRd^0DYNB? zmGW865*6ihuAmM{$LGC3fd#3ISvrbV*?PLkN9!d3{<_Pxl$TdVYn2G}a79#F0j#ku z6m#p+$aK-8o*NQgE@ymHaW@K4zn3ztB`?S#^y1i3h#OlRiPFiTRzg(@$pkOgx+)Mx ztI|}0U^o$+e!Mgqt$!`}*TE8VqN~YT|LCDIIz(N6k*i6jxkBXBR;Y}dQHq56h--Lnsby?tkuB6I7|}rj^n)(Rj2DoK|%qf za*t7wn=quYwc!=@YW!@*L8Um!4v!aAcyvd{1^y#A7u8>J;#a^e<43clW6B0WL|G1=9GZZO8+E<++#tH z`1fDGd$bcml7_F6jD{)7|CuSc99?9+8ZM%OTJh zgovion`Ur%Q<6fSFBd{Yq$YGFiJ7d7PE2ZF5GIG5J7H7_H_b#{5igVT5S~iO&VxZh zYW;YH>%mQSZe{0$0{4RYPz_sSJs7AGR!|+`iRcneks%BM>>5X;$m1W(O$HOJ!u@bf zYznLZ7L}nr^?o!aOY=pd*MZ5&{`Korn5Tk!TDwN?u3&+-#DOFChn>-L1P;l9R*XGM zG(`3_7a5aBxfl%hbLPl~BX62DVZ#BM45#30M!iI@rTv)yf|08Jdb$Yx6!`^_+_t*P zpM?=;l2Z{?M%{ya4L2R#D^!j!P;54G|2dryDc&)zb(O(A?rClOw5}2-`G<8;0>6Vp z4g>kYiZjTPfG(0}q~{NhjYYbCZBHZ93^oNaBPHKuMx3)A+&W=)gw6PZjGtZ;Jutp_ zc=llaqM&l!>U9HAS^v9^U!_MC+%Fy%mcL3gbaW2cgTTlT0oaIA8>53};lT=p@w%6R zm^-z+yP`#mLL-1Zu0cVRojq9CLm(GPo_AEOx@FDjtoAj2bfLJwFc zIcpAFw>}&t>@O-3x37jk))CTAmz?6X!$EJWk&&?=V3$fZkD;mhrj1NWDM8TN&s*xYK0T zz)Eb?naBBZca=tm(76T@d0WSQO)q2L`B~=iuyM~_hyUzcvb-$1_N4W;-!FH5Qd_^_%h)9=%yD$$sJLrQzkmLt- zT2~JI{kCWG~SsmetH1XO>ro^Sa{-&=$A%1qr&q#b^gp)nAFze!3mPRB=e6FSWz9 zsu4ugEx6Gn%AK;@7sm&j!Oh+_DpVKx2Zl`=VFE$)j6Q3~5)_HC*}ip~=&g{)M<7g3 zt^uPt9Hk`BP>Z|nC=i?wHsx_VMi!u`j5jJLPM=sf==O6K@v)^=2s=0y=MpU{QYLzm z6oH+~3 z#pVpy48pFOo|&ZVPk%f4?^ZB&ky zYWELAmz8WslD=I(Ekk?gE+wT7;h>DuX>`xlvg5^CLeU^GNW^1^)`==e{jy#VQ^g>} zx+^7NgQYx6XNojX%m*7*JCY15C9S8(=m;w()l=kUm{8zHAsq*CD>F%8A5U*Z8ywmL zVSF9x{v3MNuKpZXXrWRXh=uqusKA;YM}bUW?E3j(fWa3wpo^Cu2E5`beHenCRzMo* z?G8sSoGV-D#{Kw}Qw6#7p})qX+MU5^t2X;EHG(f}iM*@hF=`C@^kC9#^-5hjrX6wz z#GfgIyUwwlP40)!p|mZrkrL~cDBV)i8C_#)U%smHvU;?`w7mCdQeyMLaqQ*=EPzV> zjshVZWq?ZlbDt3v)zl3}mzNYkMt?)KC_Sl~Q@4gwZx;|XMFu80-|Z4yUM7fAUEVJO z_59JP<8oQibGcNI*0mK1rd+ExE$8!xA)jA)Sh=itSh+NRm=y}9T$^`}h<~N?a#_)N zxis(G3I$WH)tgxUzgRpR)fkA}Z@uI(Si-*0dP2LLU%S=4^m&x>Yj-@~7kQ{>_<=Iv zFU?SW6Xyv~zH~e?~nG$q>a-6Hk3HqgSu{-1X6IF;c z%OVrP4NnwY#t_V4Nld*R_IZ)*kGVU<+c}&j1HZ+uQFlfCfb$9M zOreGMU@l(Jwt}`7Op!9e?6yj!VRCyK;NFBk{jcbkEwaR4huc98rYGuF-O003t{u4q z2{Q!9T9cerYLC+krKU%No2u19td8#0cp_atgfDZ9E};0lctNqb!zs$p9+^n>4`Iw4 z&A9{$T03bQXq0_`FnSZ>1jD;Vmmr>YaS2N@?5agSPt_09r5>p^om@~+w6cEPBaZ{> zE=LE=bcrBw)ONycIT?M49GWYo9NU$Z5mnj~Xb{3*@`iB+nkEf>&TpsHiax4Lde}7`UsF5tqD z=DZw_1q>Q`vTZ`w$V4;V!`0=NSI>F@LFQ$bKim5ADhd%-rKDBGpk|z`F)Rk17rrc# zuC>HUH>2BbIn z@28wC6RB2l@cA>UMeo7IipGyDJ}SvW=*ywah86Y=|$O^Y_ldK$C@nVT+|g+ ztVuZGtjWmx9KliYK3O-_3av>!Y^B2Ieo4bi`doFu)_u_hYm&X2)?_?zFph+&fBBk> zlKvkb^5afezQb5^MvELLh5+>7Fg&x_<{BpLGhvv$e>?h12SlWncZ z0Ao#}0gc{WDAaLJXiXOVt(W>MrCF1uij=lB8Re(wM($!B)*F01_=1j|c8m+ODpD)7 z8gic$x%djsh_vSxkn3Wb@Ajp9b|T61sid9@*mvFeec$Ap%CyOtBy%BFI+XZr_!~4O zPh&OQ$hH6~-PD`JFkhfJY22)VS)!JO2=}u6gVN?MH`i{9@C~!*>b|=DJQFX94kmYB zIO#m?Qm926h&B-I5=5jwLeffVqHzErRuz5p=Ufj9M60k2JD=g$=Q^|EJ`|#LL5ayH z*B$V^A*Hy$$Jj?uozv&J9D)lx%zF-SR;CU&L9{T z*kfG6+6ZnMelg+S3B3ZRO6EB7(-Mqh81UjQMN4~8e^4kI^Aa+@H6ssDHZ+oj6F}uk z;_VZ|mjQ2cz2rxLCvHs%RmeXgybhDj6m!8hG%e`L6CY4WWW5`|Og&VcgCgzl@jlR6 ztVX@x#qjpCIPKoc|I>mUXHx^}jk8Y^7x-UsnOI2cUTxTbzEp6t9Y3%g-gfTtBH_c5 zPZ*~j4;f+$>!ZnlRraa*=SXdmc7wimc_Ek(mxb$0$^{(e9xEe8ZPRw=L@vA=d2T01`$_>o1ba^FzV87;X zx5}m})9?p@^*H^`!GI{=gyM776by!O7jHk8%Z2S!X!0zW=p)$bEMG18%*xs-Gu1{b zld9=^7H=G@M^pH>NF9N`)WH;M9sx_A1s5LuC8H>9Z!F&P zRUUn~v(WIpRh;2D8{9aksqc5^eV6{XOYKw&8wfQ`Jl53ZpHGd)g**G8jl zzaHK0`U+}n`oj$xp0&R9qYtZVfp5)R$c~*W*CI#l9CV0{RPa6~F#qiu?#pQmRoFo5 zXW~CS|Ec9s%i2)~fOPj_-=wU!`qukAU!Vc9EK^U;ti@?vbT6QGu%D0yfW&`TsG2yk zHS43)u|VJ@r;tWCkR;sli9x zXwzQnc!q+GaXe5m&B%k919s>YhFrW%~V)j#O$ zc|*m?W<$l<_C`a^F3iHFsu#^wFcBMD)l|h-d-k$2ZK?D{xir?z-Y!-IWvohA*73pn zcbkSVDfPUj*@u(eJpR-%2}1vcD}p9@pQGK}@4%yI>~i`=*#6e~>Ci5cyCEY7(&1xQ6f)Sr8w{}o z;q5cy;Rgf88j?SG7wPuDV)M0tPKDG&rtj{6{A{9jg2hy+^csDoQr5d@dlyi*?J;!j zQWwzf7@rlT>Y|j51@ZCl^m%zfgyojud1w|7YFljnSPQn>{dS9c(gdw_U4IbWI|rM0 z_7j9|nhv1}qGRQ~!6{zQaTgGlw5SXCw_v20=IkGwb@%>b@A#n8eS7ru_&pyuZj1ud zF>{Rzx(k&8j7IgO0ziFny}>-wr?RBb!P53|PyXRDzSQy*F{Lg+rxX~m%!?}RcqAfl zB;Zf^DYev0^q44|+X>g?$GjE_Rg!B=9?usm!N+Hed8)t=)VtU9$P=v!cOy9-TjYKQ26IN8<_{dSfwfks3jskU~5EMW791tf7j z(`(LdjfLb6!uqoze!Wc@D%4yP-NKd>V`z#3a|;y;2RG!DFUM(6lF|f;i5D`9*^r(G zk>z)F-epcSBw==pEM3K`MB_(z1X}8+39|9M;{d^T+Gi-k04}Q+ zd>^L-3}}%UF~cSW-1z0W%*1G&G79>oHHE z%Sq~sBC~)z>Jun`*VA7d>9RA;o~+?rjb!jEHDowuk;EOU_83vgXjR_Sl$8q}@=cxAEjO}~R zpP`X4n1YHZB0n0B@0hV{#Qzy;#yHGEV8pK&Y0~{HICmfiT*=*=#qe(60P=L41GIIC z0?Z)-`3-zK)KhfOBb)!OumQg^gNM2KM(WPjPP zB0rxEGpzzgf2MZna?R-3k=81((NVj{ABq#drpLFgoz;2^33GICaPVm0e>bH~Q8RVD za%068o`(@pb(&X5}!r-WUPR3iEzW?>$>{Y^b8zHv83jyQB z1>Dug1R^h)352UeCY5Jst0Qwv5gVX&doszwcuihpO8W3U6Y#gr4$gHAYInPNaiR~a z-F8rwMai8=D&i?2S)2RsW}p98Yu8)@*Fr3@c4U%< zcoo!G(mgvVZIU2bSPLG~r0zkEFGJ5tp9uz9gwFDClZ>eP>^k&>FP?Bwd2d^3ljbl6 zymX|N`0JJ7>A*zm$KK)Ma=XljufoNdxqCMlVTF{V^|e5MWtBI<#HZQsoJu*dsx=`A z3ECvnn5+Suyn9wlb#9^qkti&K2fPxrE+^w^(>Dr|s81(bv<2Y}d(?-WjEbtz@gS7x z6+CRYRwF$%1iICswue&y-TsnOn|!?5SNb@AJvp(djx%1o&U(PTgY#Ag@^JRKZB|S5 zP`1~%mb!>>`}waKYT32vE5LYh)sM!De1~ghpXK$(w`c>)7izDsomc7PEV;$g)yJMh zY|XXAvQEwnoVSG=)oQymFShaT2aB7HA&xJrF!4U-3V$0NA^0T<4h1`-GK2stBQUE# z>3Md;)V428Th5sYk<`)yQ(uRjr*As4>}sgLw1+*t{DPu_;hlBE4k~k4mQ+^YHaA{N zWGz9aA2_RBr2+X3aMC)#%b;CMm5!=V?e)jOo!ARD_hyQS)d@`xdWY-=Rj8R+mWft= z02bDpJa4xi(&*(*Ss&%FZz<)6~J}-VrMiB|W>ElWrFRmJ-iDshrh4 zS@b*lboeHjEcFmWV}(IR^ZZKdBs>u7C1NCS*R0`Bz`ne31(w_BIQsbhbQ2>sb2L%y z5d{)(#js3VdYW3ka#Qpmh-{5jMij$6c=7DnQ~jApHne)1nj3f5Ptsgsnb0HL@-9>Y>W*weM7KKivA|!_`|4Rliz$@lf^l z!_}|Wo@@Z@&PLgfbzZo;>k*yAb19jjmsA{X&gF*u@=cE6eOmDvZcNIcA zS)5|1Q@LvS&%x&TsuJKTHwYjyOg*R%c8vk%p(7)TI{pa-?prsl(;w^E;a=Ed9Z z4jA&gj|XSx)jkr;+3x!`;-UIV+i@=*s(ijw3DqY04aw`%cW6koPL6k2ERW32Rnd{w(azKsh{{b$G@Id2N>m|GAgS$&2&fH8Q~(t zMOhcZx_`O^89n@lS$L&~D7Oi%RHUxYh~iMGh!*Fsm&#KE`>c{;k?& zYWn)*s9K>TV6wYZ7z5C8UN7M4+d0qEko5%|2zh8aZfMpyUFt3kYL}gPPQd2s z!&2?u{!&GZ?)7Z;skKz4w>fzC?|Ucv^?k8hj~c4>PEPmse%sw&s*Z=Hchd^kJKL-F zp3pC8EIe)b&Ov>sGIjaeoSgo)_x8>9>uPoR6y?Nxpmyu|Qe}+rDdf>q`DJMwpM3@T%+`QEP&#XnvKOhe*jT10(9u(=FjbAgy5ISUVK zJIulCWHP&gkmQzJkXMIO8oo>$`JWHp{FUoZULvD96|8nqSoC>=B5&=BtV|@AK~z@T z>pj|AZuQBNHHyfh=M}?>0(8F`I!K11(LrR34x*r`13u@|m7Ysqrv3V`ZaQoMV8o~e zafjKEF1-NCKp{+OPla>2lYLEtNE88g3&EXPAEXfrOw*1EJoGi7d zbh#RNA(5#P}AAKwtuQ)!wiBgH6RFY zDjiHLz1kSQ#ePfCTXEXFK$}v2cIzL)`C)zAiF@Duf)YUYq8=39AB>VeuU4l?J^rxV zFuf^?nDX&cx15E=H`N)ryjI`tAX#nMC3ftAt&lvfHp{z-#5d0buek5`Eq}# z-j@6DA&BM5VkxwquPrr9bz*r-?Q})YXJffVWw2~47`zX>jg9u@!!Ya59zfJ!!Yh5q8;c zSuQ3^z>To+N`hs^J4B@XNb7?236>B*wr#!8LsH-Th$WSXtj`lvdZj4ZpSs zaQTU+eHuU5uhfJTSVL@Pk;k~)3TNpLHjClB8{W;upT_u`Iqc7p>ZtDUFkUlg(mHe~EE@O)<_ICa~j) z2;*@LiU#ubaMdsMP|aQ9Evi?lRsYqZM7o{q?Jg#A71`Ryv+5=Cg~b`pZe5tWaF+m` zG)|fC$--5--T9{>%EraFr~QlRw-8B{W!V>Z<4;MM$!(;pw$xErkeB?>YJr5>Rc({9 zG|jC7$`3BO7W>G#|Mm3>CaQf?dUWY|vD`y#gs%_|)Jh<7w{Gh3us?JCZZn)0LzFN( zaP=zuJ_Uf73~^N+I?yuoH^nsMhNXUsSLhAqH$Nk1X{aqeg60gL-!S`X`$22!=Zk(r zW$`VWVVKLOrzandkL`Fmcz;%Y4mS3!CyI_i#EgEceQiFJ*(_2rA(W*wQ79jAC?l~p zErERViYDIhyT(dAWyOEEl7%K_UnTmAAdY4cK|0E>AZRDO6~vh z*tR?-nI7PH_4VCoeETbhj~;Qb@+r%c1zC2#-9BMjgu zH|??8stkY2*W5qTb%I&IHK^tn(Kv`W91&^Q3Mibgued-47|ybLxbC=2;zn62bIobg zRxM-|3lTUk!;_E7WClywlkO8K@kx-$V~8aO zuGVI97%i0xsGZxV{6Y4EDFaM97Hil7qp-9x#TZp1M7^5Xp@yL|K@W6^W0RAEt}3d( z8O}fu=3hI6JP{xsYc81-^H$>tBCmJN`!BYEyd?+85O2LeP|>yuEdHoq_J+9arJ~kC zMy3%)2gs?HLe)0Fxg`xMw05wg(>&n0xOja`tIL}!;^kGK+vs-LM_qiKTtNY+ts&NGdT?IJAvZG6?4*q=nxatkuNXREpDrwvc=92{dM=9kOAiiH7g0Lm| z5I}6-nunOLk3bP*7Jy%mJU7Al3WR1jhLax^A~6s0Y*dGju2h|81G5OIaJYjJ#V7vX z^T#ZD?#)&O1Qu&9 zltHxopd%^aKknMc8|ypgpI+uCEk#Jh9}r59AX73>%0rWvbyVF0v$?2Z2(no?N(OVM zTq3V{GpD%%0Kuya%^3?6bVr$uJLAg~){ zlzahxn4Jk>oQ8O-A=g8%$E-#wIoTd@o!}4BjECE@DH^ceRW`IJW#yuIACZIV;um~D z3>kfM$6Rnb6`G6X`(N5YAh1se^YdiYTrY28&ZOLD5i`LjmSb#=*l1|hT*AC_Q(;4- z-90+9s)5Oo`{F&$eTL@M_g7tWhgO3^)$r0+++RSY74IM|ABK<1ZxMi6E-9Rzo>cZb z6}E};ii@ixmbX5f{3p2vX-S=tDa90z2S^vN;U3|>*Ot1&nl9O>VheQ-_N*qZjGh_N zEK1eL&xPWOD~5^LD2p};e?d4KhKSUVIObVZ+#vcYW$5T2ok@|k1;{v|KtzSMo_+bB z-w4FyAtjRI8fHfZvKMFqGJTqU*&IuFt7{c{$4rwUp&f$D=~K|LX?(gHMPpB;KI>4R zGJIQZ23r#;Fn}C-C`_os)ZWv6|JqIJPewF`@i$)EnrHgAA}V~Bb=p;U`0q9=J*bZZ zS3c(>o4^jjJh}{gIhTi2#+TcmOch7E^DA6u{L3kfQ=mXP6;S1sD`|9vb5A-R-w~d6 zh4Y|oC(;ZnCL#-?C#!CDJ^XcZ-e z!Z}A+pgK@s$u(9QEv{bO*tq0&{`C4Q;R$-VxFa2b2r?h$Wxuw&$0#tWA=K$$umJTdW;URFP zO?s8$Ibk8U+Wn=bh{dPRo)L#cIGvrEm>28PEalst*I}q@p22doxUUesfG|Ax3(m>m zWHOS)PYIpaj+q6#_vDCWgmB0`D{=-|JO=GBhuib}S1u6Z9QFH{T~#Lfmc8MBhJG2I z&rVI_^~4W9jf7J$JnSvEkn>JCK9^B!a}uJm zgjR>!MYC=W0*@0ofP4bV%>vbaL@TEd`Q%t5b4$yQCOd)S*Mia}cZ@IzEDSB*GRGW= z1FtXt!2TZ1x;LZmz@(rb0U1+La}ZfayGOsZh*AoziE;M=KZXhtBQ705<6&QM)ZSfP z>mW^x1@>rbf{Q!{B# zSHZ^=USw$QTRFj@SIoLw6!hQ@e5Hy8E0O`L^mue($V~FG1*d!TO&94n*^qIDSAs<@ zT0ddDF}V4qb#skr5J6UM`)gQ@%MYVpDXC@YOb;hEDHwUiQf7}@M6s^WdIH}tXS{;S z+g9FIIRS92Tx@N5VK4HqqN_!_=DY6ADX6AT8DX7T zPJtR-4p(I3Bq&HqE`ImZ$VCY`q%{!&D5DIok;BTz3O%K3#|)xQVSl!?I@lWKEwBt& z@^QhAhEzeAVMC#JPH^r`+|%;uV{|K}@!p{%o3V0?{g?7u=aS1pZ{YfaUSzXgDWJzb z%&Sx%h-=D5f9;e{{pAIzzr4Wv%d=@FRw#iZ#X;GPQ3X`WUNO*b7ks}v{aNq_38>_` z%+F1C@D+g)L@*zwHz46b0ehsz;-9h*h~;s6-HnoP=#UqQdbB!=@+Y7j{XqtAQncP$ zpvQ6py;iM>tB8diRpVZ8kJWqIADDde=SA}#Yg92W`Gw;XKAwzKl_+ec6-JzH@71B@^&y8rBVT3rVJo?`o-4mhG*up&lUyPyWHA_f1wAx+S*E~ z=a#za_ftn|OuDq)a_)}EVASG4fna?h9@DjjN=*L~wbCrMi&^xf&7vo57E`IsV#;V1Q`%FVS5vI4Lb`&FbTbF*bZCk4tn z#lxdj+LE2-S&%mlVP);a&&w5KSjYeaJ-5GhcSZ9{;=jK^G4L*=J(q@?! z`BGR;9KAy37*x5`24544WzN!<^@GU1>dG7l3SQ79uC;GG1;$QF zv4sh0v2Ol&ExxdF=8HEQ;-WYEGMkHM z3qQaS;KIJDjM_p; z!$+ZdOn4Ptla$!5r!TgiJtNj+Hj3VV5oym174x}Ej76mQ`>P1thIph}(jM1Q9hE6j->*H`KeBj5IXTX{5ZyZV{U z#`^t@|6q0!ETi@|wTy!M;A)->(aGkS*p(6J_F|l}7U))4LbZS+r9_*=w@++8$RfU1 z22%tI)<`|e>$+CCe610I{K7KM%EqK26|@uX)9-O~AvBtUZI{z%2_Z(kuo}nTKyRcT zkk}YK#BrCmPjSkC2OHmi>^&5l;A@x=6Vw}Q+L#tZdzE|C`$NEmzBvYdKGOXMY-K=TXYySGyb!A&z}7% zAH(^v0db!?mWOf_Z6Yfcin8pZZcvO}4{>irEL{SbxjI!U=MYhdHEs<)lzbb-Z7BuE zR1yNP@ba9bXW{%gd2Y^&u{@9yM^T?h?U0kmtDa)f;P1hrmP_l7^UP&%*~hcpcW(|( z_n)42o_6-1b~=x5X$o=w{&@Jq|J?g;bm;a@-#+cUeSEW1j9vGi{<{W&?OW z$?m)Jj=$DhlRbR&!hP%NC$^rv2J^*qDE{fuU7MH8r|0G?(=;<4GtUxli;_MD;c2-S z=x{Dte2b)%dX7Qxsr&9NDq~&N)+N?Uu2AASod`Sc=Eyq>Pt=BXRGeD#)=TcJGX%o7 zIM6|01{WhO6mf;Q@1Z!2FV5B~Z8dn?NgmDpk(y*@K$}2;5bvnsrR&6aC+C=no_#p` z^#`C&e+c@++52+ewl!P(FKf!SR?Bv4%JyH>z}1xPBWp~%3+A7Wgw}3N+5Tn?Tus^j zMh#p|*?!>_o0*U2)|Bl(tLd*?c5o`?-Og)WW{^53vw|8nTJ6bJ|OQG&=tr<;JN8AHLX4*S&0A^ zItJbfKG2R8l@OIY-@b&|gC?Mu3Z+8M)SB|Z@K7{M&QL)kI24F;2I83(Ei`s1oAt`M zBp@rh#4ZC1DRNKEen-()dW1W7rU0c@B%HqKvltQw6afYV%caYdtX50A$9tm-=kpH)7GS4ndJAp%P$uoH-r4_q66$%bcnV#l+X6Fr><7zD; z4gn%3z}uYU@8B;^rD;8+F$s|mAFy`NPg~oj&B5FCKjm#Dasqb;b!R~u|5m=*xWYvv z=1}$uYk#(P;NngK*1SGP*z@j^o<$msxVMHUyNZ+(NP5WvS5BbtV8z`eTv<+^Ae1pJ zKu76ydM4of$v-6$`%igYBl_`1j?zZ`(^w+yHVk!^IO@P`lJ*E(({Q7D`Jn=~2O7b{ zZJk3pT5w)%J!8FPoLbORXhozFlfcAUEU)cg%@u%kI9lEdh@CO-3h<_WR|Wu4keuPC zF)>P$M%hQ>^QTB{0|jtHQEB?sYKo`l^3}Yk>GAs9tI&z}-?p&lTy$4==FnU$a4tLB zeWCl8KY>@S>#FI&H4k-u^J?I*u!y6A!_otY!6i0bkK`P8CD<5YNKnRuhhAy*y%1&f zt&Us;X%DOcBAu02IL6Edk}q5j$FOK2du(}v1+!|+-8FI^M9yN)Tb@9%MLr=%Cy-45 zoaTT70^XZ)6pa?JVWFxd680X!Y|z*)6^4=?K^&GU(DJM_TXs^-zKhV$OW;DP5C-j5W@%8MV&<`rWlYc@t!|~-mp_e`ixBrB0#&Eg+ z@h^2FedE**8n9nc))@6+U)T~*pd#r9rvPag?37to3VR@L-3yDAnb!$ekv!WGhQol{ z0SuEK{?9P4dz%On^*BLND@5k2@Juc&^fCMqpnz(XY<-)__IKQFgV%5Ic*eb6s0c0lH&o0!xuzv>M z)4srrV_1w*n2GMmIf*S`%|wAJ+m%W)Lq*MqmJzrMR+2H8v%vLG@rXQIK=P*(NpjuZv{dq9o{488{j zGhg130*pC_xz0T9%|Ncqgj(eO?dz|nLrPtT;JqTwf|i4%!K!w)KT3K8vWHHJ3iGgP z@&E)g_^db9O@jE43pCF1R!?13@M+?03g4r%+K(fOb*l}LtKtC! zxub+vVM4%B4Y$VQD?xo*_5ByTi!{BamXvP9nIZ$t210y?*THzWCMyZE&YsM85uVhY z5poHtFDjA=FOWJ-*VX)xCT7}+`i*(_v1+4;FkisU3EkleF@h{Qv(qN&&OQQuN!9Q% zk62)c6gHKa(qVCxIRVqX!+ii#Y-%pT)QgOz^H>{}`>%QSexCg+M8!7$UrHpvZ^s$zS>D*;l`Jmz;w?vcgM$R8fYHfvv6e=YM2te`Gt5)f8M~XV}eC zZBpaUH`(O=rhWrx^*FnuYnsi?$I1RsM%Wf(3o)MAB5~y1q%?~5H<(Yf#0-(ph0Ejy z%P?%`_|(`?o-DE>Nz#xd{Gu|LL!y1cewPd?UdMV!DOBJ5Gzr1;#r#AD{ za{#H@^q&oWctV>Wr07|{&!08@$!U|esSBy7BP}>=XwHpmoV=Jz_(bKG*-fSh2Yi^! zgBfth8Nw18;^+^97n;19i^jN7oNIYe+%u*pvAkXEnCD()7N^|XIwQOv3{8*OF;!9#QQVh{y5IWCMQtx~O$w~5 z2+~79G}o+XD=+UKH`0f=;HNBbyPFb`tWqMQKZi<5^W`98wr9#DmF=;YLi-ed4kxFtA!rKp29YVynfIqKozd67MFAiRQ#oX4{ilZ zmGrm%x9*kwwE3Cb$W(ATV`*mc@nR}<{3y?7Pb`Un6$pTxrP!J6WdJWgm}3q37T-9g z*y%d0Sa=FArPTF3%I?FVrKwyqXgLypd-{+4` zzWz<7rNy<;yUBKx>?pp%>d$nMci|RzZpX@0Yk{GywoR?) z-u3pz3OI1nd{`Oob27TYx-#2!#%R(V34QviNLw6L(_-6fL3E-H4VMi>kUQSmKkeLI z1XmS&G4-%W0`dR*Ut7=14eYKtn|ywMgBV0;4T;|Egm{bVTdz`b>v^8s+{lyL+o^ti zD^EVpQ=1!ka((MX0^NE}l2rw@)}3tT>9(c)!J8X7H0t8dQ_J#=y7x(6q6;p zf?3FB8rxs4TqUX-^mb7#X@676Q$+c4B7lRgHL*h|f(qo{nx|ABxu4TBUw zDoQ<2B%vYq7XLEp7f&>i{V8Y2C{C2R{C7v+1l_lREwTRdu!$upNQPhw7eCop5><6a*`s-iyx{V0+ zzS&S7Vz+sHJ}ZOnNpGdaWjAbD@w|V7(mjE!Go@trI19aF?jBYRfoJM58#fO%yUDJZ zBz1u_o?Sh>Fd@!g)FU{xVfjEuU(T`}A+Yk!#9hyEJw@TIKLbbUQ7|5(t3w=3EwVMQ zUGILIW~_<%SAK*QOMq?K%yN8_2-3uDcZPnJ^x_A1Gxu$=I^Sil?1q6260DROb z#=Xm*nw}vhW3mJ`^I(^nHH;`&2gr}$sn`nm4xmJ#!AEF-WdCwNB_V19P)Vys=R%q% zw<+WrR8+HurqHC4NVu>P)CfLC17umSVNaRdeCc0bVPNL-*w8G97f!6Pc-W9YPL(t7 zAdAct>%Z}tIo~4ZW;B9wC@BK4ut2xs0K1g*)cuc0-qE^ZX>7`bz^1B zvk8hDft+#jFpL?eJPhKLZ(>SD0IN$=zHEBUWeTx0{Y_0w#}UHXgb__ZCagw1-8U%uWX~qQejj!Knrq&;nmQ}b*UBFA&)P8F zTO)_^(@Yn)c!QPY+0ay0mUh!Wwt=P(_SFxyae;mcf8O-|GQZ00l^cc|#+Il=5Q(b+ z297`E&sfZQZ~S*VLT4n z4k|dp4?kZlO!rZzRq6K`g#Cl#D4zn4Zj?65;~x<_<#7#iyhPaTQ?r(D`kGBwWs=-C z%>Ht-IKRZVRt-0Y}tnZOUS{49To_}gJ9)PW^6P2WRehu)=Q3xHN0NF;!P30ajM<@ zfHWU4YHR4K{JlXoO)dhXzY%_AD_o)k-3*<}2pb+RM!s-;7bG?SrSrp3ucjCANsZ|& z-yaXQs&?(`us|pZyPWiC;@0t-x%?fuhrsrRGRkgjbyN z_(Hx|*`92$FHuA9pjHiZ=|Sp>JV}4K0q}c65cg zHAjL$yHY_1;erp$YlT}W>FhI0tTWmq(50_i`L6kq`XoX5LE6x^w4a{rI2t0SzEA$% zXJLQT`%Y_I)PT9_tTwrC{lI={;jk)5YJWcnuaJJlqPF56=q$ZzLw5;fDXH_7F1+s! zHTAwSOq-FrsYG{SHp{P@l$it)P%Yz5z)RVgpTA%a5W{3 zXDwR#D7rUXYTA>+Nlh!eurW6+;B!W1nt>;4aMkM6Icj$T6!s*##9^I?fo|-V9)ptLE_%$=Xduh^E9(8hf zd?r?u?2eYIN@Ks2SG`KRWF~}H=k;dk4Fuzk_3xnU2Z#wD%Pl*Suh zqf(t#!OgAHspU@PBYjg$iQX+9=s;fn(mFB?O;#noBFg=QDjR%Tm^NVx;udhaRw|I@ zWLr~%_EBZw*mCEvLDde9K9sX>{ql$^FC*`}#aHu{gF=tF=&h}R9LMVze6n(FsmlxQ zp=!kF@;QNs`=Zja@k=#EuFV>obWTqyF>ae7>HLw9^x69B&yNF+<%msKB|>YKma8r| z*l@>~=anZftsXO%tgQ)C)pDcKjGz8`rt7v`+cZTM54a2%-E@xklFUOIAjG4ng!C+b z!IQ)HduNB0r==I*#a~yVVCJr_&i1jMt%S2IRdo&ys#OP4%wYw$_I6)agIA`3>?vwQ z>-F18{7EyB^Z0m(#S}nqyE05IZmZsi>S{bbYaTv;oLK2L#6pY*QR^|iYWhjf5gND@ z6|fj4RS#8f$1tDQyoEowYE@_*ms7Y1_zMZ2%$~SW8doPM>d?_FAu3bM%2ZN7 zN*KB;31r7?@91P1;>lD;`bt;JV5=QDW}1)5{Y~Yw&6Ab@lHQ}G3P)1I#0w(aFrmDpYxyuGZF$ zsP2VVn-;7D`^?4qO;XD2k7I%&c>4%3MD9PxpJ*%c2jGo8DhrsT4v#U;q$v<; zfny6{Jd`#XnpbY@5+x|r+wtV}o%bKk-yWR3>mGHEL76(OH^fpa)qi|{ytn)AXs@Zh zbFkY~_2%%mSIgZ|MzqkSXQ*(!uJGA%VZ!`QQIH}k=uK;v+aJ41e-~J9w$55l{v|o2 z(N?-yvGAtsz-5Zb~4>3sG@zVPjFevlfnZ$=}X z(~n(O^9mJqN#VOa{3A_fYSGnLqwz5jRX&d5%+0Xpz|ZGZQP6l+pR>vp1kFb9$*unvZmPj24lmS2xexDZ;6f)`Wt+ar1K)wO6DJne=)io7jbKh z_<9ZHNSk-QWdx>do&z})8gLT8D#ZfQ?Vs5v_c-EgZlvwRH8L^BxMgUMAC;_?TfHkx zJc58uUPVw06UyYBht2W#0nI5f-@)6-MG@Z@PXL~eFK`;i-L4$)N)OL!&3Ddg4nEZB z!V%?BSS3bt*)yZMQ|)Z`!_n#9&%68kM=nD`D_jdHo#Fecs?@jyBEZb!TbJ01#ILk= zCNRNM>o=C*=TykfN?xA3{RxiH9Cm!i>v4speD7|AB4fEky}8Fp@Wv4HU7`i1 zN*~B_L7S1`+`Bw7)I=FsDlCl*4fy+1%*fgrN6OpCDkV0uR6QQ3F69_mn}MXPjjUV{ zQ|t5ibAN4e4?bi=jL#NZWzp)q-QASScl`A>fk*$#gA{2Ag!6&uRbL$st-q{1=lp#5~rXE#IGhqfI2uqa_qO^-4(`0FrCvge>0!| zrmlaLIl)niDbfVBYjNO)Y{Bx|<}(mOIh7Lkc8fU~E!xN8rma>~%8SeCFZq3hq9d0P z>s45jgdRxj%~5lSfC>NX0t>N!2`n7J9ZUy@v-v^M9K|9|U%YmTV{cBjvH=E0xansb zFBN*|g-WTnmQ%k{>$bICF7(#r(za}Kb8)p?&)Gg`*OsN)FvI>8b={BAAc|teO|RUk z`%>-h#b~J{Wj{>w+bI<>1NkM2F0)60v%$q8?i?Q<{oFY_wzya5>hMXq+l)1;*+*yv zgZHO64*%4WH*0t2Sach>0S@6SShjHpOg}MU4~y1`MO$!$QXQ{!(%L^gZT++_Udbsq z4r(uBjMPV>LWsrmr}_rlKh~nN|LZP6x9=|6`@gpy%N=J%oPiXZFZ zJ!&JqS%0iuJbIvG6FlN0&i4MEZVJrq@9p89I2v40n;TEXhx>sUh(&hdw97#u3E6Xp z1B)UIXu7I*u?T#%_Po8@vN)kEMv3BD&KNon_qV)!PkLBPeTj92#oHV@16p_{f^VU{1itDRokBd_nMrzwr-@*x0VImA_()GdKwRpd$b)2%|rYf>Tx|e?JZNJ%8Gl!EZI2l$37E=<;(r&ng&s%IrYVWZ&?4^aV;F`*wM4KllEtyE@r_cE4R- zT&l8M*sd$=e!H&)O{F{Pg-|ev2{Y&&{=%DE!`hSKo4;01GRPw5&Yb#_0pj1O@y}V3 z7hjF%H-HKA)o&+IL-w&G98W&W4nn~Un!zLZdtFaymCDr<>cam}KhvQ?fqY6m zwQ*%+UE*c2kJYex_jgqS8W77sJK7dq%mpeR$an_M!&Q}W@C;~e8B!}7wgB-xGgtyl zv$SWG^CK(QddIa(50#SM0Dq3??XYD=14!(wHyPkX14FnT%Launh;Vdhw4>l0a0XQZ z*b>f^30mMf0TdjrpZ}U>hf<&Fy?FWRnRdC{FY5?04}@c0aW9mxk`ZM;da?FtCATN~T2ClG4hONA2>YBeg;@mSezvcm8l6{);(oh$~ix~_t z?zD__P%XoP?QYzR;7&<0C^Jy4;WkBWFOofIHriIZt>^l?xxwEx{x8Nhk*2$di9JVl z7uSD98CLa_wXF;q@ro}qa_2Ibzawsl^xN63jM(vkcaPDmglX@mZ*hLKM2+#G%{k+8 zanPm9#b<1@_k?y={5U7)VtEoEoVk#j((JGOyS3xqx38UfAD-FT`^!tjLu(zDZU6d6 z06TCQk=Au;6ri4%zR$^x?gs+njc)nqpcpO4P9qLq6z`~D;WDEj> zYz;6gF^Q34hE$Fv-5XZuzHau6Yk_LUUkI>;9-5hwx^0uWOBy1tS`h&3?ll z7z*z%I~LkLod#W&y(1HhDbib$?Em`DP5=AC($5+3W`2T@qTc>)SknHkThjhMvm}4l z*e1|ATR4SD4 zo2qXU7}D8If2)xJRz=tT#?EtEkL?NK z!x1uw{HN(STl+E@!~Iy_y5#o;AN+w0$sMq4_w6@AcUW3-Z_RZM5Rq}XNp&zFp}jR< z9en~os1H6sYlj`NClyI?vNio#Ou#p@6Qj+~ zXS0rFop+mO-3?ftJ!7MCY&oc;sM7e&4pTx=;jP3sz9l~*z9QUhhr_WL-F)anyR>rl0_5*o;mnlRNGRpgb_|F9$RQ$>BYM@h^E+^d7!m0rbe_(I5n>%pqiJuKO`?iw5rR2U8X#-n*b1%NLbp$PXj^>X3md@LlN4v0N=4nINJs zx1-9TOLA5hA~MyXu4M;BEns?P1E?u1fh=}d-@SP!y*YkeCd|ym%_V=`kGJd-II1W3 z;ZUMcX75BmT0Rb(_{=M+F&~X$@Z;yi`VGu#@8-_)TKHSfdEkH#b>H`G!O!LAf$6(g ztlq!0a@3<$-hNWX??D}Woy>^eD(QE&!?7sQH<+-|M<5m+MvZU5#fxaxkQ5@TvGoGI zv5bFUAt|0M2R0KhswEX(38;TbG)#_Tx?J!N(5K~YU?1th&@YggT%Kg)%BPZn*zy{* z`jCX9pCP5@yo|Qe2`mb9GqR~u!I9IE$Em?&aKlq9C|z^Ga5Vm5)lf?Yz|NS*R^}D$q^tSr1GJ7Lltu zRmyV|K@#dXQ^QG=AFw@?KFbBfowSHXPO?y26o_*NO%_LQCK(g~A?9(gW`_#+DU2Z0 zPY|#G(JJwVjkWK`aMu1j?0ssp)21l>@Uq}$pV)$n+fVg-uHY2n37T4GsVVKOCEw7sIh z=%lZf;UC)EIA_P>!JggMM}^kJ_^Y(5i2U^4+-)w-XO?#)#G&`y6||N2uNx*pY7zMi z_?czeLvwT&8k$HcA|U$)rNbboPjl*E_?V#%f#~w-(A415fhpv6J`T!J=gvf)Jb;{7 zyXkCk$J&L_?kz<@@JpPZcIw|D#M`6n^eH$G)+yu$tO2KeH3_`BhQxO&aRQyjFFVsTpa*6+r%+xR_e z|DN6Ut`SPYAH(WsWXDaSf1L48;&yz|MTJMdZT(L9&rSVGA!C)WsHaM-Kl37YDtbY!*6--k zD7zf}K4J=RYRrr+G-IT5A&-i}S#V?|M+xq=BGF$c$KF!?SVRnV^ zo>3ab=m~=1k~jCC#?dC~uV-QYA#@{NF9Z4cXZ^r0?2WSfYbFoXA2=Ew?P3Q^NHJ#I zUrMD|7GF`kv*Jo8M{jze9_sva?tc6<%~HEE231^Da0=w5&Gaqf0UlAkbe+|}IM1VC zbU;IZ{@#8b4)NZkNHn-i#|e>HM~MiGuG4L5zFx#1gdHhvXnQULbH@{w%07TiyN5@~ z`lK(G9Zh77i?*E(6xJH9hfh`hZg#tin84ECJ}s^#C*rTX9_wCG!KdkU{Y`>*wD9W3VV<0C z?$Q_(-bwK$b;-)X8ta?z7ow&>5OkBm_CQ&PiXhDG{!L{iU=|^3FjZ($Bi;S8-^hW5 zmo8?k_#ounQCMmiZjw=e;e+wiM&y?7V3vXy;yI+RZSn29$RZ8L7mvMKkg)gyB%$zS zuWPBfBSx{dB;oX4%>`D%=HZQaY0^D6REwj1nAf}+&n|l7bFjC($QRW+WLkKqa=ucAjvxj5 zP~}tnbV{)#ts=#kkY;|RE@y_;LM?jY6N&3;ttN|}!lL6#60HrW$+4GWYLsw`z^tuY ztF0?)CT$8UjX`V@?m}ba321OYRC-r*&6E*}M(qSJ9^hvhd(U9(st906*)#?4iB$+P zeZ`xKnC^;qT$Fj3f#-RtT0*Zdor3Y5 zbq_2be22;NQYJ6S0Alj2T$9&k5@5!;_eCr9$!l+7cfi=0ytYrCcSYAWdDczux?tb^f z*7K)FW_-rSTqX!)$l!9#t1NV3{l#QHwRzdnH46+8N$BlJAGYOOl6^EfYAWAwKMd#3 zTPxM)Tu!AMVo3L5--?Ab5$71*a)6pOT?0<;c^$ua4K^tmjGDn|lg)pNvD9v`Q3kcN^1 z03aO(T^Fq$c7hQh@bL|rrqDMT5`R{Fgjz^90-`RzB6T9l4(8WJC0NdL?JOL5vWaKs@sS{(s1NvEeUnM5S3 zq?R@N=KcFO#|L7l$%0BafNrF0Sxw(=H=K>&wGSe-sWT`A)ek{>h-i&ufX!^s!Rq2d zk@RbVm~1W#n(Nd&xZkKp5tfD>>5dvsqsZ_4qGqd_gz9}1=JGI;)1yALyR-TSWqhpq#x1F3t|9z zL2hM{d19e+DVMKHQLb$lPl4{khvvo>S(x2V#$UuajH}-A;vA;D&R!(6wCvD+U zd0})?Dxi}hcXV)Y(0cjed3)p8v+dTq-aDoy2ALK#eqp*F-@U^Rd8(0F<$L%ey7>-L z;pKd&SPm?HUeF0)Q0s(n;P1Rt0os_5&{WZQ!}t+-#&%#eSRch^IzwkekAob(Mspik zLGWwBJ9SbRc9%977(j!`Pc*9_1@_IwzHE+&n4=zRwoz;zh7D z`ezR1Z4AB$j#mabpr`tlC$NT`=D-^t5q4&N4dZ_{>t0ZDlEm4s$sA2@!H|?X6k?mw zJSdVU*q-Q`cOdZ=SJX32cg{4FVR9~MXRZKCMWIQRllkW0#jiQU_W-t9e1N60Z)jtK z)XoNISSY#RnSNvvRQHav$BX>P+?V=NDk37(faADfk(G2eQiC=Pr2H*1xPnKEU+CJK zsJmq%h>grg_RSen$sxYxP$!y?mPpsrZKd0!zg)6|CV1MHkLB$4QcT0eW$+s#vC${CM`MY8r?!*UUH=5`h=^MAIkAiI|4V;Z=FrUMdyx91@$+43zNz!Twq~G*RWbE z{lU>?CRMp>n+NesNlD=*l8YYs;Kc&Mi@|Eu-%u0fMQ#wn4DGkA-@VqIbX}l`OrT}& zto*a}jm_sTwq9<(>Rt3z01Kw9C#S{aYFlbW$r|<;09IVqv~FARfAQ-6 z{oj+~D0*+)8)a-YD52#LwVgU1EA%qEXCH42x1L#j{H&7`(ao-5Oc@tQq{_6k1kiG=Z6H{$ z0QebHCkfZB;Ao1b1viXNO|imSC{_J}^}-x2Boh!R^oI}=N9{h3Fr-0CBUnXhwbA-> zOLA>V4(q!l?oVx(LgQexf4@glgEnTUKLJVZ)zDtw9%v#bud!s?O2zL?yw6)W31Cse z@rWMg0Vxegm{(crs-kF-yCoMltO}+JtWeCdu09^MIwEw%^(wqVh|Pf{Oo|rV%&mG;HgGA9*}AVv!Nn-C4Ju_che<9chS`q3rQcN@ecDDm1EMLI-G<03>SdVVnSKP^}21CDnpL%RCFI0QjC?AMQxm zHTKNDwNxP2iSnB<%%qmaPxGfn)^byDNjlt`m*e3XCNI`5TpdXntwpE2)g5N0bjZ<+ zgUwrgO>vy{wqklAr><6tqe>+_u2h-sHxse$O0+_YljNQy6?j;wt>@QNepw*Fl$7!Q zCBH&j=bEvxI4HH8gzW%Rd@e3>8?>bPE9!z2B)bcj+ zic2e<@;U)E7txzY%L^8ll>3}xDTNn#Ig!K{zlxSMHx8zI+4FR|A>NWS>2S=^x z*os$s9n7Xr9^ZF3my=cM>d;G-|LAl6p85Iz>rK$MYWNm%2rD-k-2Y}v5TD0!4%Cijtb)`u>vfnS{HjmjCR1C z2)=gfl@?M^cS1X_Y+I!e`nab$dXd*^5d|kbw7#rBFMdZw4975H1*7I}MGiEsKSz|Iuym?-GHJ>HixwlE0J8X{Fl zMkEZMg^*zCT>5BUDtcR-dhj`TMiZw*Y=|7_gm2usys5fybm{V?jyvl~r`>w%Y3nE9 zx;LXgX>f6uVp7D_smEcc3ni?S?+#-lf&rl&iJJ=;1~e&r{kS*otk6tqy_>aS(Ws9d z?nDcEkR2@~C%S}95?cxL9lNgWpp~^R2j0L zJBGBVptDGe=2RIXOT<=wSDV!rJuInshuU_MUm3L^op0%KgY1~1@p2x{f!Bj|vqlzI ziwh;p6~@4I_M)JiL+c(*GKrv(NUe7bLfFJM7PkRDk~kApFgnYjB|TpEvQs%Sy0t5f zlj{8RX#cExaJ*@1lp=M`aaw1M>j5_&3%LF$z_a%}{MjmKEQZ&5iy+{^OU3#r%)f5R zS$HdmO}(f_WC_cc4OVU;maH|IL)r*?K?YhEO&+8!T3SVv3LDm&+)BwJ!E8MOte6?QFe0#Yt zZuqzE5~B5S>s9p&tkiFRr~YNN-j-jrXr)WA=kms0u7B}x_11UlUo6#YM9-^Ts%W!X zv^HAOUhQ#HZ#p6zv6ih zRTCV08G}oHmz^KN)dB*=trPym+}%d6^*6{qHwgIKFVF?J+(C23Sd6IINCzHwCope> z^+>T6fsXl<1rw7ueIp9Qsg$sqmQWioF@|_6UaYlbCT;3PLr7CN_Q?qq%Fy&<)_e<8 zR9o(tKOaZ_E_a9*gssJ^71l(D)smEDIUj8d|l&yL06;XKr~bMMbCv zML5J2(&7Hm$Iesz-G6%6`S-JPGWyx))4C`Gfk(I(B6b2-CiK~o-k0|edkq!=S1MdE z5PMi*%0x^BrrNaJT0gfg6a^jxyHw*vKRe$!Ny$3Wp35Y$76K^g27fUK)lWM9&Z%wf zsbmF74LlS?7VU8WkPK&^nf3`1R%B4qD}1Kl*@9(OWCxKKuLtW`z2|3Et-L_M!QsV0 zAune5!LU}z^>+o@Y7(Qxsa(nQtyr{zYaG;h+lE!Gu{bZVcPUy36^!J%wNiUA1i`EL zXpQR7@wJX%LcpBn?cx6=HZ^@(om+o9 z`4zWN-tTY$Uz3semXb)<`$Mo6UpbXH#8RP|gH@s;*~&<5oUNR{lRXjw9sZ`VR%B^c zt;|1WaOKRxVF|=ohsLDl4AN%(WvLT;^^lWlou<+FY}8bg=BD~AW9PEbxMOs7+xTrr z)`?}=WU^ntVX?jJPkQ*Do;YY;=eN$2{~hHb2_w|Xk?n>aC5DLrfMZ~2aCrm{8W>(1Nce7EtAT3T4&P6PF>{5-r#y> zVda?!v81JbVccyWC|oqmEKDvPO@px(whjixYk51%)v^Xcu}c?j8iI^GJ)LYX5y1P0 z@Zwl`5hcyb@<$o>+n;J*#jC3xHN4DO|C)AVvip0LEKwaQv|cM2srRG;rI+(wCwkaA zH5sWVO$zGCT;34S48&7q5h_3?uksq#x^|<~z~kPDpd$!<&BHu)B6eNDhAvJ(u7kAy+9pA<4s<*`(pMZPjXzX)Ors1L z4wv0N*1JLS1P#QITLh$Hu)~c4{nK?|aC#r}iC|9ldo4F^D?^+!6>A4N6Fx}$WGz1) zyqzTU(^Ak5>oq^z{%7!iEH0X%Z1zN*by)duJJ0`x8NCCoSp~{8{3n05PYs&%#HJn| zZ9)Q)93X$y1p7oUJi$$vIJ&jarB{;xAer+GWD`Ea4Z2x}SNeAih05p5yEZ)k$&qwU z#9@D8govmGh8__Pi)qTm7dC7&;Rm4wlaic4h`qe&8C$4$Sp9TzbmNr+3Waa;z=W2; zvUB^69C~Z%v?@j>N~-7)W;&j2+~6tU12a%i4KwIL^Pi;mthCeI%eEL#ZIT@3-uxwh z!vQ5R#D2kgIDhOF0$jO0C|0INdR-u%M*?WJ{`UUqAvyA&EdJqt<@?Q^R*Btc>zef` zO$%n0(S=TgX>C-Vkbh9P+dx&fYY9uywap5WQ&+8?^E-ohg*%6&H2^6(y7RAo3@48EW1R)b@_ESMkhXeq@xQ~>;3`GM07 zSp$cFedx6BY!yk7xD-4p_J(JZ1J56_^Ghu zwOblL1{$S-w4Bn?HW7)Pj&&VBq5&!b=$%#KSs|-8ga@b`BH;VlT5-JMveb?Z2&%e2(s!B5bL6R#;HisSc|ykn4A`tW zMJI{pfjq}SMz!KB>Ni;ic9l6d?JXAXyV)5^J}SZtd6@BCosY!X=+tUf2^Yq?LV2Sc z^~>C~&|6qiBBd1%JCm@-5rv;X-&Yw1kOq{7Z=40SzdA^U?HQaZe{{2S_xbRXqsH|MBH z#N!-sVz}J2(y$0K*f%+RK0dp8Q?cV!r4!D( zDU9u559ZG|tjIcd1%b?JTusTOha4-WUzU0g0AJ4xKDw<3GorWlWpb)B@7KMQz<4sH zU&HH}n%qy=q&K%f#X)Z|3^PK0Ecni7x|f=!N9s~ect{EPNKPb6pF7jn_zuVi(6qhI zzx{`O9^}r*&;6dyP*)qW|2%SNhHhFZr?CLs6+(~|dK-wcI3Xvvww6rTe3v;sl~xgL z+7Gw!t=Z5j5m=e)1!ABmE|xKB!f%Ma8jKuU`ST}4^bik2D9)@XA5w0rpI)2o912M4 z^61mE3KAr`i@(@JoVFwJD^&`%N?%kjw3pmY0m zY>J1sitEfN2@_1#>8;#9sohdfxa+~@jk%-QHsDzB9CWBwYU@1&CFR;3>wwD>X4~oQ zT>Q4$CO&8r7_@d+f%4Zu_h5mXD8_9pqiW$wgDNq~l3zZQOy;Hz!M^XsfEzC>Eh+Q? zOj`wKmABYx%e^SaGE5ICKER?DHxJ{FTXu*bRL$9rE@4xxH|F|^#v-WLP?g4wwK_m& z{$=hh$Q0>Tp6X4$unQ?7!PX*S5l_rP434=jxIq}UG)lNNWG&|DcUbb73nVGiX7WR8 zi9#LfK<-JcY{!&REa|){mIjJWLfbJ|!#_HlEJGVjaffa1Mluq;RBF(2Bmf?$A|rw$ zj7bnm8Kss*J;z9;b|-pzO+-Vow@D<9b@so9e`fOGF67{oqa>ToR4s*Rsbo%@d(yVF6KMdGxJJBFe2 z-B`H_fdKoyKrfgQp|ElNmSBy#im42-(=!p+HX7sW6Dv{s&*=8Ywg08H{Yz_o>lf73 ze`%rprD=nG_T6qO$N>+e+3xEj)6E}%%#=`p2S*d#7>Bf>msH1f*zih;FdL6xtt6tU z>9Gg+v)1ux+LmH!7cxr}Hx&^!5VZoAlVCUL70Xa~Pthyv>$Rl;&L?i0QW_dtZg3Y_ zUDY}jvSXaFANzHSo_IoMvOU=jRth9dD;MDCt&+tMx184@XlteDbJq}ZRyDYJG-{SQp6qZu6pOMZ! zBj@fP42d?*SYV(9U|hW>LQS|EwJ5Z6pd_pg)g;iMymO&QA&ul)!fPu@=h#@us1)&d zNsT>w=OG3}zwilc(8YamZuu{g3+6k2>$%c5efx!h{|WIc56c#xzK-?FNi+LwDIzp| z8s2&m^|2Y}zkor+BiTo=s9S+j$P&iyX=24js)uT;myPeHJx|%xOsx?Nq<&AA1|2ws z7*^Pe(-HR{PWW%3$Ih>LwL!TiH_|pV5DB$;=CDQ#0<0j%_xf6v*;D_K(I{?&RT4LY zXY53E+dWjuY)fJbdaQGn+Y_}(nSd|QNJY_^Yqs!o;tJJKfeJ8Mt3#dG=*SwZHI}*f zqU{BO5xp#!RFu9Hl>*N9r1!F^rSI^hcrOZt! zZ-=mF9lG(U#{nRRf$MO!*sT-c!14$wW{W4laXn$@lP6`|U{Dxpc;d`edpm0C4hTu$ zd}fHvX*k2d>J*efZgFtX%4apRFAy8|{iCgt;jlnI_wI)ZmonH}k@rPm$;}O|4@+`L zgaa!#uJY2_&zMY^aXZPk#Ov6#1EXm)x$vZ(i4FwM#Dpt7)06g0Pv*}gY_UQdd#QEf zNxZk&cC5NH=v^$*KndO>Mm%VmwxTtVyME z#rK@1%wynrJLwi}itUS<*aYe=9GL&5<-W{?^$@IG=aFz(J<1&(%1aO?8H?M$xQ4b= zx^%l4sGNOGhtQaCqAG2(bA390az`p&ik|$;tS^lxWt3TafFqs1 z_K2`8LsLf3>cC{fWK>mqd(N05Q0E8?l#(|VYo4HvcXQr)*g7kM&#`}EL&*9xJN59u zj`)=;#3EbFE^o~N+K{#3uP?eNk!(TAvj#x%{bOgC!ee+2x5jT_?$p0KNEBxxvUS$6 z#W}7Z1~So(;$7%_0?*$qj>H-cR)aIGTH@mrT*=VvAIRV@XG*|9fWW+y{!~%682-`i zbLzolhMCXpZjM>uzljHetA;-a2>@arwRj0papRq_hff8TBkolmgu;|2hZ3*hmUzcv zu1LG5d;1x8r%XYc;x;}uy}0eg9$=dAcBotRTCdNK_(BER;W}~PIM!1%c{g)$gs@S0 zML~~J4plQO%`wn{=NwU^&Da~_+H0MnfhILm@nUAZR`(!hSw3VOp@_ejPpi36QHjYk z5>tp-1k5>JxsUm>VgqW3o#E}le12vxaQ+DRC1B?qmYa9vXxV!zMi_VW@ zhvrn$%n6Ix5p+k{JuYi8L+rbyj~IPL18Gh;dW#e~K64e~oU@F5%X+Z!>eRt(OOe&) z+=;{(=cwca8W4c2Rc*QJL$jfz5Kg;AO!C)I*YmHwQ8ji)Eu{CFiHz)`g7Un(0dk=d zRe~TWc{}INh5o>@;cu^p0yru;B++ZV1?TC#-0G!FyPWCvwqBHKEQ2Z3D?Jg{6tpon z8}{?UbI*s_7VdZ5qDbjY5p8&U-q=WqXOS=`b3BcxX9o`{$eq}!NG8bC%=37?BAf!Cctk%I-&|NTH$OBK_!o~xAJ~6g!j8d*n z{RQ0PDnrGYTJZvKg@*lhsgxdtkk|{f^#Tc;Q&KNr-q4~4y@2_0wHIjHAQQOxcLnwH z7g(y}1$6pO7i~SRGM@ATsT6@w@CEQue#nB9uYnk{go z+Qo5%r`ndfRF;@)*jGtIRS1OXr?_sCw5NnuNUZu7n1^y+kuVP-#(}cSeH)9H*J|z1 zmngX%^_xyiMK@{vuYYYl`vsoYAX1p1!b^GXK|bYciK==kT%NKyUu4D6^;U>L@SX5r z19##@bUg@9_bTOdpc;KyT;Bgdoag?4OxivxQCm(%(_`wt^}ltASIeIH)H76GC2d01 zmP&TKLcCxR3fx*Sxo|M^+IPt=IM(lmECqjVD z4A>09#FI=$M+gBLBVbDc<7DT%-@kwL!&(v&$Vv8&QLXOk=kBWN>gwuh1vrME-;U{t z@tOW|YZp(CXdm(O?8XLiWI;u7V0iArgyy{xV-jO%7+0Ke1WYr|5*2uR8}w&G5+UBtU4j_P$r_c0>3SywJXOM>m#bhwcPDw$Ld`w#d^Dn#WR$KHoGfNq zoF{dX>cS|m5nPnYkE74eFl@udj@2LTr`}l7&UHc-$h1&XXiM(7SbLlh-IAp;yUu&5 z5~5J;k67&Jjdc`|dA}8s_xpD|1D@w(A!9N8hq7^sgl-=X|9?VmbY95jmB_2P7yJ|Q z{j>5hW#6ArpZ`Hg_9tZHHb=NW`s3q>9P%gRa>bSjLc=|pOKMy3i~rM>YH8OA0M|1%tdYP=2oo*9$j&+y)!quJQ0;ZdPx> z^e~%bgY|$p(%2auoZ+}lvq2~m%;lT+n`*XI=;&mll@v$TUl0g} z6kcsLmeyH!ZM^3&m(I+HoL5Kfc1=?7IXYWMDc;$u<%XbGdpfImcHISZGfhBkvKsax zA;j9@Rqv*BcCAH8Ej6^OQ~`E|qvog*VTQ{LreK@e$pH2(=t-#ct^AwQE;B80S@`Y{ z7d4lRvgmf^UB!~vadABq1?Jyes9AeDn|?d$z~I=&@}5Bed3azF|x3?(&NZz6Q>Cz=TL62eY|hw!@fTb69Ne@0hxO z_PVkSe`rn+IHM0b)A3uco0%{=8#i?J<;WxgiwPDh+dADm#`W_qy*?Qme z3*duhgbUxnX2?Ogk;2XcG}i_0PcaX7Q*xhGBKMe?en&d6=;K87?rzBEDf)f^qjUQ? zd$`aS?~v%nS@2+f7eEZ~EURJWjtMw=r_*l<9G~g0B>TAss$8B z%(=NRyWLd?<6r|vStugsBUFTcH#09rxq^r$DGw9kHKzx&Au|DJDGJdnw!#-S{e>^r zFy4KcYgsYG9kUcrP<|A?P)HWORAZ(SkC*3J>Nq<8GM@}-D{ae)BjvPvkiL(lKXMk} zISAwBo-$z^nc4D($eEB`cJarBvZUgY*g{0JrPUUs*%P8sGAg9e*&!^)Y-$lsI0 zm$A7nD4_c*bH!4lrM*F$v!Bh4!0&63g54%(mj_RDVy82)o}J2`Om?1LvM3H?xN+dB=cXlhS)_ zSe&-t9mdo$V+^}x?ClOn)mM29y)EO?_`OMb&}fz#rF5^;Dx~w_wI$)zCE=AN;hfOaMOv2Dskv@J7iN!t{J`7j zT(0_Yc+s)LTR4>L$)&F&6)Hm6=5J&FqE8gW?yW(#l~Ld}D|e^J03~N;Nym)ls73pR z7Y?Mk;YQPyCRb*v8sp<$D~!QEiec1!pX@I$Y4PDsurb04q$HdR7qq1L(Pq*2M3S#= zrZyUBmI}ghfgCIe4g{hQ?ZgcU4Y_@q&v2nR&dl4pa=R$`n!Pj?SgJ*yv%+q>H)lxU zY&N~-^Nwbqz$N&57k)JQ%)*j6DC!@S$oIfP9se!D(xU5gT9SsUryHkaszwnQV;G|vUAb)WY#c!fEEU)~lwoQxKM?P6KaOEXza!?OWwf z$&Nmq)!b?|Rf(@{N5(ZtJ9jZ!v0~ZU-A}Xa)-g|48iSz{D<(D518zPQ-Y8hD#)TCk z)dcr|Ios8W*6?}*s?`Q*AX5+S?#La3DNI+a`Zfepw}^TB=JzCZlk@A-n)n8Na?eQ= zY!O!Lx5Z0iwX|_wE^s`(Vy7`iTytA?7j4B%+#(5PnW76afGkP3Cqr>c*|GhwDG6qRR7a*sy&m_ zvn0FvOa`kD*)usUUTO=uQ03`5`J4(#3#{asw9oUfXEJz}H$0Qx*ZNEhBA@QhBzm&N z<>kGFXu&ldZCuX05qw_cT(ZMY7i?^Jb&Va)UX;WTn4!tK`skPlf~wSK@}$YC?A)EU z^GLbRB+%Po5I6aP<=)5#$Nc?wTauRWi?+{lWv<(i*B5`2Mn=|z+5lhao{*e}>Z0RW zvNii`50}dDWIn|{Oq;Ipx31V!5B>xxcVKTFu(dbmfYTpP@4+)LD=~YfFCXM;fusPg^NokP4LsskCb)NZMW*)J0_h*;6-n&8l}k@YoE6gT}dIN!)ESQh1A! zC9&6MrpN8~A5dW~-cM>v>^GEtL+jMGO5iPd@e0B)_s+&VL`0~Rt@^c4UclzrjzR#N zJubO~_)MwzEa6(0J3&=4m8v|K@+g$H@rAH#=vc&%w@G|>* zh;xG1_06rwdGcv6#yVcCMJQD=(79XI%d-2P!Z5u#B z^T*1<#~us<*dE-21nK?2M3$3Peg=~sKQrZI1W)e!3HRLf)1OTGQrPhK`^lJZ{#~Nh zk{vxD3ex~G2O-TCdNJN8SBPEbpT9FNQtK^6@9e@FZ&YC&Z9y5@5M39WpyeZo?LOh? zQ@WfnIVdw|+*d5LjI#z#Kv7RQmqIA1@VxXzGpV1R zF%P4;#}M!8TUxPxGN_kIBXZ5J&O!kEx4R8%-@lxG>L(aVs$9Ea*VUHtx{vD$@vZLV z0O=oHK#b3PyyI&u?GV51Z|M2>)Y9LSZ5)nsH?iasPd@aq}m>dr+fU`{JIP>xijyg`cg3xC>J|<1>XpE(gPDDH!a_ha7 zC7x6`%fitO`p+9~oF(7e>1)D-b1$$YSp>G6qh^qd$2gW^yu;HLHQq539(m=3-lBKn zTE`Ii=SEF+}(iw3;I;87LJSk{7S#C!`w)!?% zCem2?&*tpsCTi1SZ2YvB>R2#$Xg`%$Kqdf@Zg>mf?n6NQN=4~}(#sie6ku66pN{)J z0^?@)A@4*47S~0%Wkc{f|1t_1pK`q|t4&_HGc^?R7!%AodQQ0ej+Zn883%taPp?f^6zl1Pz@oKkf{4m z4qzsbH^etfuEwfx0%hZbW_2;+|!2T#w9Thu2yo`!tr4~6KCI8SM$<35$lub-KoiRY=k(L z^A@{t$#NVUygYDmDLQ}mUa4=DW;~jl$}k|5(riYs+{oct4pu8gxCZ*w&1quymIFPa zb&D>z+C5v*>Qknr{xEMeWi)HBpUOXQLbD$Jxd=a_%ooL}8zGxeK7dhC@;_i=?Cj;p zj@hOa0-L|S_{=>^!4d;)ZtK_CR|wkYpE%DYJ$N-f<>&02e#CDdShwf=wO--(48*=# ztpxmWJQd)-6mV6oY&4n%RZ2An;85l?xx9bi4PqV8hfjZAZ=WQrl{OnLUETdQ>h-FF z%8hEXMl){ixkJ+c0$o3Op8m^cX(&Uya~LL1XQB}~xh_jnYz0tET3$GYz zv%Kj5?zXECCO-HhWSc?xG^w6l-+bqg45OHfAFo6aL;AhOJJ&Q z3ub;ScQ_E-VtD(Pw*SI0qJ#il0D?}~RaE#B>+#HmS+v~`N_fXXE_?@P+d{U}0Lr?m z+>a)dKRUbQ$FrJl0nP+Nn63tLm;^lO-ChpIz2th>yTH9?(!b5CzZv0bC5KrHYNZI%`5_|`>qD=t^+q>6+);Am#~m`Ni(JQ#h{70mFKn!@ zy1(3?pPqjn&4y+%6zfjyeuiwW{4U6WdYum40ffD_tlyJpt0xPcP#Ztt$MU4Z!s{iUI2s)B>(^c{DLwRh$$!~kckeg-Z?g*&2y+A* zwFolM%+%(({SBzim>S5XZbk??8Ukw(Vg*|^`nwTR1Ce$9-3STGK*aEusYz|S^_T`I z)Hi~SS_GLPB;8!s-=_ZRGHJ{vK+P@?mLEKT(lkf|(})oBGDO#6AQLnMR$?GC)r!;K z<~o0UeXFs)K(3&2geq0RR8oJN_O}vK2B_Hu3T)D_`375|j@rZ|aR%!^wFrhSqh~w_z`CGft~&)?Hr_e3|(Hp66`sf0YQ;Kp1e1R*l54Zq5y^8 z^`?h5Vn_}(d>Eq{V*|9gUW;J?qC;a?fHv1FF)ToIKnxR?5>>v(r z+9JCajD??%26(9yrs)XD-N(yUmekzh zzFzwImHn3aMV$YsyuMy0=Wl*a{_DTOVu|bIr{v{Np^}gFlt;R}N;SB|I5;}~FdcLAJhZN#dl0=?_+Y%`k1VL zOi+`b=nb>36zi0|H#x@BP>TgE8`60hCoJ~#*U7>ABEt5E z!$+}8^&f5X5NEbV2R&385ARNvcTC5uuR5GX4Q@_`gU?Ulnqp=5M%TAFy?P4M=X$W^ z-W-`cg?l&{tKe2J+k6sPXIi}Zyp3FUZ!I)MlGM&mCUf;sG1}wo$uxPt$0I*a)oXE4 zj3xT1r_#-qC1+$$VZRyRb%~oXF4ZBY?r3&z?k+B$N^~%oTq*&R>M69N-j`!MqdbG< zGdf};@l3jlQfh>p%!7`H>=evU zdGZqWdaQv%1{n zOki4W-~-^G_pmB{qxN<9)yfL^kcw+e%680=>60}%yu{oa6564i@Q5uu6c5InG-*RM zm^_(ub`9n0^~pr;;r8R3w_I-Jq~rP?`YrA4lZjkO?{2O!N7gyblgMGHU82L`!{AB> zYIcnO6$uL~?sp|O=ZLGN+>>WXNGn%2w4iLd<&CwDg*-f2ci%C7_%vUCUk1nMny-F; zE`ApMIl27dY8Wg0J-zA3=-Ka1zi2?c`u}T2bG{qDCW|E_@ikd6c^ftBgD0n6SZ}+s zZ8vr^ID0Zdfmk+bkeXGIs}+y@q4W0WIGD6mD|QX#&HLlz1FkQV-TmX_7$)QWVEIcQ za)LPERww>Iobw${B=u~RGxD_aqsd>>kL|aw(*0IPf7<$UoMhxC*OSgz#d&L#mrr*> zFaG8(z}Rg7y(5Jw6@sRTp6@#el4i;egxLA2;FR1BZ9Y-QLIU{`T&E z>&+AR9N6Q!c@DePI&626bxJ!qvQ9WWK0;QGGHT~YI!kla`E6OkD+g3Ybh%tAV{oca z?1%L37WE|*2e*YJ#VTen=0Nml^OYVtQ#kIWa1I*WLd&KBTcL>f@hQaKENTT$i+7ed#j_qSsnFbb#HalH>;z5tb7G- zd3D_PxZYcFcI~f@y0<#&$Ld$$R=xta{5?A=lX&vC^u-NtT(EQ1 zHgtj?KAt+l+~oZcMqQjb9PI8pC^P8Cy*J(c_wCkxx7}*L-RW-c9VH5Tv;ThUQQ-dG zmLV)v8KNorkGlb$=iVMIS6;|8`$CQv4(z%jf~Wb+L6L?X-jgZOpj*N%9Z5co`eY%^ zEv|!_>GzYxJ()+jv05gN-fDcZ$k2hPk)6lN-qmyXUNyIB3a4{u>&CzeIm<4mcNKM7 zZ8lIy*DEBHw(v=`FqS8rI6sLPHg5%O$CLPvI(*yQ4cp_GPbPFbATyD+8hvQLZ5<^Y z`)m6bE(|BmXPD57TU(e9b0Hq(|1ltLGUXGclGy8CC~;g z$=+jdp*wlgRC@c)(JT8s>bT#I?8=mD(!n3Mo~(9$lbw@y-DVE)2Yc_56TA1oCWxKT z_WsVhHQ9Tlwl}lKbzbV^O0Il$vT_1KLS>To?+=pfAbIy;e?Qdmv8K`vL5^?sxJp9q zU+gu9<=Czwt?Os;Flf{UaNmq|OegY8&N5q-V(!rUnSTboHRj}1Zx6EXbR!|%7th^H zo=M0(5W6^yqt>&14n-Iwp7ywUF4^wko7PvNJBpYeVLVeYmEdpQe^nai!gS>%Ux}_- z`G)w5=+89cL+8Njf_R!rI^AdRw?w?4FM3txIo!SXD`9yj$w%Qm!RN9K)T?#hKe#dF zFviZfZ(=q+Q+f4{QycDkz49{&T9eT;7-1cla^FwR=aNth9&UIB%Pn1hfb|USKE_Ct zjrG|z?lZ6fhBqB>jljYQ3ajTK8Js}i(JGB=j95>p(=~U z9iq^CrmQEDw>hpGt(x9VAX^7}?^i6-)TF;7$pN~!-fHPQJq`15QgKz=-`?6!vi+d% z;|>w_jmYhjPmzal4+6fI9v<&>j$gz34m$tN`P&%J-uUE*3eu17TJ3*N{=c984Q_*1 zF$P%-mStyr_GB;FmA4MA2r6c~Q)JCN%rjC$a7phTVKu)OoxQ-fPKFbF2XU{-k$M^H zcd+(|CwQ_Z`6}cj+e<#|zkm0JzxK29em6nuxxKf$n`FC5XYa?IQ1}uSODmk6K}2lH z@ChTOmak}j_1~`HnTzw?oz`)8?|8@TbJCOHcuK>Dzc_7btK-A{JreIOTHO+(4vuqp zyThqvNK@U;;iE>{K5FORJ14FD;{caLF%?;4N7cFSihXZ?I5_yv&hgQQ_VK&c!A@e| zAKvY~e`lXdRnkL$|I;g(+oqB3w%;Ed?(Nf8%QV@;a~b}BX8RG;({m}a^TWLzXBjQP zy?vsmY_6R^CIL&5**4a4;bj-dmn5hcfBDM`{1@`^0*yNk_7DX9y}q>dZrcn`+{+j| zh5%*cq7pNe{9dGgN&n*T5l=o3$pQQ@Gca+qvxCmwK@595TY`uzBLTe1hr|mI-SMwG zHF^rsFx&;tEqv1BF#gGW4V~+#XFqTU1z%2veR$B?-u_`*a;D5vSl)8xogN;rAfBkB zsM^Ce`QIbojoAHPI ze8BsZ!Wg}syq|=dF7~NEGalI62|>;c`wXHN>EE5~wV6VP_LoZ#0#3T^gTs%xpGQ02 zNl)7C*Sf*1$*6mR5*{=oc0TU37ZAVB5lf`qj)UtzZ3Dlz)lSwGHA@xk5w7l5viGiS z_6yrPIv0u*&iNfHjtv9m3?e>kg>rP`bC2##zE57Pf1_u_t(WmDbbjMJ@9E8>7vH>C zPg~Gx_VB6QqJ{$Z+^aDEn*8-&xs=AGJ$4K?Yrp>Xa{c>%v3s2Vvd#sSu42}U7`h$6 z;m-Oo5sPvE{`cg&zbE1G^6c*k_db*V`+xuWU6S$dTmF3`n8%Yai2=w)J>HQsb%VaV z>(4xdSFrCCF)(Wcpc|ftp^=*q#q+H0 zMLm7E`#Wbh{rtl&^9;wz1a4>7i}E0ce>%L5Y4M;5JnC~0IJgp|TUNTLXkQNh^1DQB zyEUI1ob7$bU7r!x$%-LacER-$Y|yyy1_Bp#uxcL}l)Dr`cMg&lK=iL`TO6$OTPP#vH31}O+16c*nF4#1^EB)!M|_#_rHaa zTyO`@3|+k^ek@SAR(o^%H?w3dI1yNT`M2b+-Y9v~?2Qu!L`Q(#hfVG@aJyl`1<5mg z`H0L%aLJVjecf*+x1YzG$=L&DND;4_gBhTA9nuZ%OuFn(xM6|EPjA^1QRY3KFqa|< z+x6>BSu5*L|B@>D`&fMQf)N(?At(c~AOXSC*8I<5%$=%Na5tx{#HBlkrI1}VA$C!o}4pvFRH5|;{#t)V0=4MhVmAKtf zs+MY{Iz9+DO3l({xl}HfE9GjrR<4)1R94odmQtl$sZ=m%uhc7z%0{JG*{qhZRIgO4 z)moK%Hr0)4v$|O;)ylO>ty-(q>a|8~qt>i#)=Twry;85%YxR1)QQxRH>zj>Iqui)8 zs*PHs-e@$q@xwbyrH%4NWuv-L+o*3eHa0ez8=K8iv)rsSxxLe@Hyh23X0y4uNfkFK zev_;>Nw!H)a>qkWn=(~z((hb`tMHp!rt`s7pZf+sxOlxlHv@P+e84@Pdl{-;?KZ2F zYM~GYg=W2aHS?hd5Out@4N@-BD5_PjraPFv=t)M867C(Se0O1BbM6lD`up>9G`n4{ zugZN}ce7s2r|Ux{!kOSZ{>4!P4#xfwi#vP~<`Q3odH1&Pk?AAmQ-Gt=>4-qH^bch) z1$=3>94t3tXte^^e`tmk*##^v)fkcah_FpTK4# z)D(50PNFU})J>QOWs17JRg0R>6PbvzoX9kF1Be!lN6q9@GZ_l(UpB)n?;ob;5&j_9 z&5KyE9wCkrg`g}134q1mIYFkfJiFq2ZQ|PfmqE9gPm*RLeZ-DDfyD;>Y+Kl^SF}) zr0YJl5CrgadfiR*i7IizcOPn7 zjD{m#wGI$Vny_$BLra%?@Orzng;yPISNyANJPbZ&Yt2wkUgMeJY={swdCmJ$uetm> zx_yoB1{jifN^tV`O7FWN`}N4ku0yb6z-rcbkrk>tkjRZHUw2f70XP?cKOp_+aRB{( zKzzsB#IG`h`lkuXzjaR);R&Tsb-nGDw;NgZmXvSg9ero|etHOm;e`TdA+x3+F7WB2 zAdN06P$52Zdf0TNFH{P4r*XEy(4)5eD7R|{Q`Spx-?LKVe)!uNo#WqBKHX_{#P^K`hG2d=HPjw>7a;mc- zsuS|=jslbcg)Y8|Us91LSf_acHBV092byJr#10DJdiyXF`QV7Ui+6@pmyL2}y2 zM`|M}t(R--C1myc^>V#h`nGc3FQ0FoZi4No%SQ6h+%OS9L7Sz!iKg2|Q4F9np*5i- z3J)NKZak(n41uHuegdrtg_Q~Pte{s9AOb@y8QDiwsTPapBXs6NCBhJjLOX$7a}bXr z3e!8n%;!*Ew+HB{eGJTEph5Xm4;eR=$a+k|od;_Xs#YS{EK#cd*z=c3UtmZHxE|}& zeQ1Q1Wq{)B4L5uFFkz@#xoc=5^L@`BxsB0{GXr$3%*|!KCl8~mm{~UmGr&)I#6<+N z9PFOup@7V2-UZ=VqX=ezVFpH29{-G$2aFxoNPa1AHlCqDx^o`9Gdwf_5kMQs5OFpc zBCsaIn*LPz+2?jR2p7$#r@nZ=FAvzVdKEaoT7 zV)eoUUK7e)?yx(UOOFQQ${o-A(q4RzsM^xI-KK0=V-NT8Z%`~p&EO6~7mas%E$`(( z?8ivJO9rNn!|x{mP=I)h&X>G zP6C*Cgjt(~837!oKj1HAM&N-zaZTqii?os4@$We?)U3DP-%T;f2m5q7(GSmq;^O*~ zxhUo{7j*gXs=&A><;u%R7nNT3f`{DL0J5_)S>pMkixMyyFRdveFwvKvaTg?DDV`nI zfS|R_4=*VWE(9!1;3P7kVvKNp3m`Gx0p@lrgmJ0Y37yMHl zPVu6mZkP8414j>;0TOx?z#FUG^QMQg2q`WGI`A`E3SJ9A@`B-$5@`qzDC4^T<0#o+ zy{$rY*dU!5?Dj~4@6T^*{NOBsC!3V(uT#x}KZ(&WXRgK7n z#M~u%>7IbNNDYCtE8xeEXK`CVs9PA#2*F!IV9g-sJR=7yF&>UXk4p55Rg*$5Bb2Hw zmaB+cL9hbLcDA7=JhKp2a0F`xsUHQc2aI~Yt|OQ|)FTnCi@!ia$CJ8E`MMtiKAqkN z0EWlcJ%7Ts(c!}S902PJahV_qbg{JVO$0N*XEj(6Wx1GF#|6e0UJ&h$0+ay-0(KR_ zEC;*8JQNU*)04{>(!Gda2Iz>yIc;_xQksJZmK!--tpJbCV$yC7W`Kc1ho0&7a!f$i zmB?D5-U9E--FiM*RVuRBrhY}-n;}(q6v0g3s8>O;By1E$_F3XboH(ZFMG)><7lY@o z_}hV~*zPrE+jr#G!tk+7>S zq--I!g;tc)e*xW(t9d>V%zA*5j)l8fnlw=k`cwwm=7_U6?*-qf%kYbO#zRm8^F^vB z=A9(MSsNSFGj6JoK%BjMuG=ikutK{*r?ZP=s7E; z`eqHCWPNi3oh8v_!s?r-NNcsyMzdPltnpQER_e_r5mJiQP+T@@)g~HHrr8EcO!T5? z{p#!}E1O98F|WGxne{q))@pULy0K9%ZYBW~8&qf|>M zmo`xu(_!Tr8^X%QMumu&v_?ivmN$w^-q)d^D*EP4YSFACTHO{gZRCbdoNlg` z%M5=RRX0Of-ypt9^hUC(PXjIt5mj9snt>7qq=XfA_dYP8x#WiM!md2 z1=Nv@7i)$RrS}gx(_!j4`gC)HsoX^CUn(=uw3tC>T>+4ArPrXg67Kz`!v^sY*^n zGsIPo!6G7%hEd-p6=tkC#@M9(MA-N;zex@6XZ1B{B;8BburgUOG_Ju^iMdso{j`q- zP~n86T%z+doiury6%Hcm%xs35V-i+6gBxnWq70xWDCL1fH`CE|#+1chAtfZHu}Ozf zA;^gqG}W%~U1y1E_HQtSnHFwfbmYP?v0RxaxJeZy3UQjtc;}&Op+j^_b(86$VJvAq zHJHQ9P)cTw``ok`p%oiC{qfWd!b6d)FsfEkrC9VFkuX=QkbyFbnaR_rv&>iuP3RLP zhSZF@W;7&{m9jzK&|#b-)fqieh=4}jHktD*K1$FKL;p56)dzG6OOPQ0U}ZoeI6-21 zL2K)pz_xg3JEO-mLjfK0)L9b;MYNdNOUY%VJ}O}pg2e;@sg<+@HF)Cs5K!8n(cEOZ zg@o#G4w)bqRYttZnHA&3q}bSG$w6JAbt0|Qn*|PG2IQCwqOb`ug=9fH;RVfV$WIHG z#o5q+(NvmE%jjsxS3prOR!i^;7Lj_EBP?q}nXYBJLb!C!Cd$b;G@;kfE67)kL8D#p z3}_&98^kJWW<#?OI_EQiQczVI&IukXkqot3Dh?GA_S%u9O()he{b_83g`m?(NCeXqBB5_aS~I&=r8^Fj#>sYtY_B2<;7#=?aw5<-4V6GRIwA-ai0Ar@7Ze1elin|;tndPxg}e(+f` za?Dlc6st<7#&in301aeGGh!Y~y`&>vdI%P6m6c?}1Qi(ropFN&F|nbA%7t+aX(d){ zFmgUC4UxDRR1ikadSkUBE>KliJcLDbL6QyBommQ(fs@fBW&{)xQp21?tfC*7PVh%@ zaO?dh{YP^dcJX&64P(Y)Q71tUsYErRQembzOA^M)fVuQy_N-$FHmjUzz(~Qqn5k3- zVhJB$B}*((|A>&lq??;y#T*de#KknybRw)#q=FN0NnoOQ^etgbOQrw=MLD#XwL*_* zT8bng5J@Pa`=N!92p#w9@Se5ItY&t^%yho5@0KtX+(G+MlWQcZZ!I%`N zIzV`QZG!ffdTFZGX$^z`!XhDnav_vWA{ci`CNLZL60@HPifoJg#q?xKAagSyB0$g$ zhJ+%J4IpV7Ii zX~)8e2^m6>I;2MsSnd3e#fmt}08u%n7NXTAH6d3>Abq9zAp*&$`NGY_K_?Mv6XKZ< zroxOhnX#zKOm8S4Bg^DNd8`qU)dRy{WlD%%v3MYJCD1{I^H6PsDz*nQnLwyRAP+f1 z=CyEH`Uod=7M^7E4JfLHQ6dZC0;DbUjjTJMWekOgGkt5~vB^+2wvr&3m@qD87?TtB zCxH^0FWCTLf`P%|Ur0@L;r zIO|g6Eo3Buf+))jLcm4pV%)@JSPx7?^*M77krN&VXON`L7hQ%7M+CD(yj4;n(+3hk zziOtkGlV9}wqD3yQOl$un@ zDyBT-4(1wr6iA;(RcA5jjiaisN*W3J5R$DG!W5x$2ndounBG*970T%82fC+eATF*A zB6W({GDS;_i;Q}xrRx_;(&7{0XAo#B)1S^J56TcTpcq!3SPguc4iwo(_>LJMc_ZdS zCbWALXMxpIQ>Z5ufhS1*V>)O9hmZkL)ZUiau0f&EbV~e$$TJEM1a?)(pY#hnki9WO z0oR8$Lgi@=f(z55R8cD+A7DaYpc3>2wbO2end@6eER~^1@g4Sv{2$SV+0IA=B*b6M z8YV8yf@iatpg6=8I3VPgs;g0mYoak)8WMll!AMf3gP`Ho7zyW4dBkr9l1?NaAA?LM zW&pBw@{&8~T$VWlD{3Q|M0*BiO~|Q9H4w%SLRnwdQ`$H|fwdzhE0b5X;n1R%=pz;= z&Lbwyw1&ouLqX7ddiXhP26`{fBGN~dnA~t1BzM*vo#_f93DbaSNh#C^DTxWN34arn z;D59if}%|pdm~P1;pXD#%qBHP{Q^lv#v~m)H{_~aD8s^pfQ+iCP(yZIzD)K(z!D`8 zgmANN6bY8X#I`x4T^l0=J(mos&AC<``)WE8G--oERxCQ>&CDMrHLH(Kgufu3*scPh zn9-N;!}=2ccAYC$2;Y}^9rc!EE-@Td2IFS}YXBfQjFI-1n+z=7hRi4GLUp7;!qQHw z1X9b~Vd>MgkWTuNJrLb44g)%3MaYjXmxZ0n91;%&M?!=)Yn8CtnJwBlhzOdTOlu@M zFy&apwV0B*M3KxQ85B|++YER$6NHAaonSFQF=?+a$k<7p=>r!Oi_uoWn#W)>Hq3PT zoZUI>0VzQ=0$G>!20f%wh!|8)mK$0wq8!jp7G}rTF-& z)5h@WKxt7RyRPocGX_p-Jo{G0~-(@6xzHn&LN|1zTZR!{Ma*p-CD<5^KIN>0L5dqGTt@nc5<; zD?uquB5uhPv025+qRSD_rJ@R1iej_1)XHHBKx|ljA1#sehx}V)l-G`jI+s8El;nc5Yr+y@~ zzL|I+1GxP>=Ifq6zvvElEjc~2pKt~kt4Wm3b=|RF-j16HW`NHg`$d%H$9}a$4*aPZ z7J$RM5v{uCHIa!2G6BAFK;eWigk>>IP?$?s5zKP1JIq4??W{0{bT1;90h*;_bx^}M z|A_C!1 zbEZLe8v)D&K01n|srNd61k1e|Gr_s=J)gXH^$F{9gLQ!f2M`1I6<&TQpk)U82>0Q$ z<2k6lvb*EoLTeO!>_tlynU~5Q1>B`}AQ+fD6>WAUAUK;boG6b@xw4=3uv5havX1|_ z3~^uvWtgCX=qyhFZl`1ZUffz3f4CXU-~s$F)L)S#j9Gy`b3uont06B&2)MYu>zxh( zWaoilhzBzda+QVm8nDV1hU?>=|Afley{lHsMQ0bf`FakbnU44@`Bo6QXC9m}6Uno+ zSJ3$MZ=VUe?emwK@;40I6^|2y51!}ZBb>lm1I`s(u5gJ&phv2t`>L}iklnAkd41ow%)){m`3`2$MOkx5On?>Wl z7L4c<+ZRCuDIbWm6rzDVnOl!kAm`FT(^AWfPA<0KsjXtb6UY3a8mMp=&`pFa4-^;} zyg|m`tC3k(je1n|s7_Vn`fcOBmtcLRbxrtx~_^kL4QI?Aj zRTCfRGgg*fKnik16{0aKo%9CHoDLs-tos&{iR%faq2 zB4mIjU&N5^MFcaz3I0c_dRLTNL)TsV_|$a2fe*MZkCJ&5_Zv{$*FED~AvbqB10YC2 zXX+FaBqXcE4>pwf2P)?L1IZ;~p~T2P5MyGt8DRbhCc!*cnWw5W^A8HjKPV{wXcS-y z$|Delig}&J4<5U!1}TZA-eF~gJt-Y&vZ5e+2sMUf1_fiSSoto9d7tDTR5kzL1)Usg zv%*J|u_$nAx8u%-gR8SHJRJD}_xxS0h6(REaRqa5b2rIvZcd2$)KL5v91opUfDpW@ zIn}NvZEjN1AS8uB8t#%~Xm#@bkjXrKXfgx+t;i|xyhue5NE=l%=+ck3qXrBtDS{Y@ zJy>>x7jnxbWl)^X`7Pr~ zrs>QdFEH!zZF?O#-5=cZE5h^z#$X2_O3_p)X z4^s-vx7_Z-Z@I}qcS*pod!hou{<~t#u@X9ewlk1oclP5Z#5o7E$8zn0KMfjAC2v;=gc9dk_a=fFpi~q2Eb12E4A{#YyI;-7w)8?{S)AZgHS{eevDd&D0vTiE<5^F6w?zmBDaW!gQ9rDTBiq9#k= z?pCkZOz@O+I7@Eeajl~R?MPokc#bdcMyH`J_S_`>?jCZa5|=$S-CfU%rF035tQn-m zteFUIpD@_wj)fbYC>nT!66bt8P}v>u#39clX_pC(|FS14^CShF386EjvB>G^9^L@$ zp7-SU1w_4!&UJdav5S<)ke?BVFhGrX*^i%ol|Ml+poUy|Dlo64{T@p3@py0hXlK9j z%(hmiP9`nbxG}f7<7NQ42Ck0XEMXE2ga`aCYHtT+4w6n2W&304!ZO_mIdLg5fXEWG z1A}vA-QzsxabkjU(SQ##Ip-(b^s@AgST(k-ekuyeQ}W>@hH*WBTu2BY*98KIVYubV z-#j@Vl1VDIwd7}BfKm*svubGk<*lzPeT{I8#j~=^9yC<^Mhz8kK|{q?P<0g0P)g+y z?!#151aj|;zr|}*e7egC03vq$AP2wYdPdOThJsbf(S~fvjKS6f~bz7O+Mfv`+rS)V_yQ_wdC z5DlZipcXxiA7~h<&}EMRzt0Fs*yE1R2@ll~O3WbtKy4Ut;8@GHBcdR-KPV7M+!Uz~ zoBY_$fC=xlg+&cI5X~UN7Bz#alxe^T4HCXVCgB&_Qhp>lkb>Mc%1(vI zPf#H~Mg&r%42}9OTF^AaLXdIO_5@P72_>-}rE&QubYwmq^Hr zfL5r0kBR~s-Fb6`tNaNOCaUq{b)6n!iWjONwCP5pkIl7fsr$ul+6XoTOQ1G!05?Wo8KS=X-Ag%H`)<}T*REzlVh7AH1lWP-P3`DnHXF6Fj*@9 zE{IlH-1B5toX?--NV$~c4Go?_U{_%KG@nIK(`MR3TsIfx>+79IS-qUF#=uJ?i_`(= zkw{if4)zY-Wbg2ejDDrZ+Y4iMP*=S>whO2!d8q-;&9iq?dj*fl%~_5)sdkFrK%HjA zGX_as8Kfjh?I~>hLjC=_w49V4N?MO4$K^_Ks@+5|0}N~{K-s9|I1euK23HY=`^043 z9L#dCJB$cf4tCG-&>Zac@=(Af%Ar!y!GRdsKpe^P6!_->PMG(%J7y+vaLl{rW!2i=p-QTsb`9CbHoy^&-6ivD5b8bDo>DlAJrM6pY5gUm0<`D=Il`Eb@V z4;TV_7@7d=dAdL_F)e>Yp$N*?J|80FmN|d1a|}7(Vr(38Hh7-)5}oJ6hp2qXX>9lj z8k>?7$O9sbp>GH(m-JWcfZ_dwjUld>_!Id-$#WyjZ<7Pe9r=Mh!IT8e*MgwD7JN{P zx@FE^OiW{IuwRU=5iH~UI0S&|))<0?vxQ)%XtYVc^92)>7tDtvp*5=ke<4-aQ6?8- z?sUX*O3(9_Qi92P=l~#ffD2Fu0L|Bfpq$}E7=~lLz~4ypfk%^DOf_}d@hWvk9fz*)Bk31JNkXk%|rH!*6b5(P#xC%$y9}4o3J1-T>&U52UVh0jfSwK3;tMQVvxIu!iOR zNXQJ^E40T)MYhLyd$tGkc}%qJL6BR|&U+(1jTe?orOwyJKtY}_dI-Jk~oBi2j*Cmpd_PH+AD)>i(+rikKT^1Mxh5>BqF%r`jR7_aXr#N4PGSIEbeyy;ww z`!&4!Va4mO{fWl4`DkKbpO12cIXVXJO=V~6ezA8(D!AudGfK48slFiFX3jR6`yk3s z4IoVo7oe#DWD~&sVgjyDF8AmM3+UvQ+qk7$RZx< zGZP#M@i_V{R!!--kP_+yq&m3()d^^%t4PT@Io=Ml(IjJHW&QCqyPFQBiC~YFm9s~& zUiY~qc5r<^q<_Shmtc6U#FE&{LGN}+6a^1&mc&h_XG`PGhj>)`GQm;XlEl2Dak(Ul z9)-m&iL}Xo`eiy;5+6KVF7C&%d&+)ES{~I|maTbk%Oj&0WWv>~3mCHriSiK+>g1@7 zNfsW8z(Y)Ghu^99!pSRoxDSQ|%ptiA;sUJn^*8CSzop;2)*~nPlE!pQe8)3gQl3j9 zW;S7PI^i*iRPR8b=}&2#1Q!T9N;+QZv$vknr(0m20m(2h9SuF%vUO(qFl_8Xh?7h)#zUiChMsb+rz;m-9OpGA+^0>mF}WRM;_%JEM9K-vM$fy z(&+E+?!0U7%$^F;XL3ZO#@Sn9g%)0^>i@&;8Mm)wc9TjMIB^e?oetEVJ9&iVC}P+j zVuf=>N#0K3>L0!FgH?Jm{YU>H5C7@(^5;BCUxnE4#7uf1%8Qp?bMvc$Chk--=F_^k zfNxJU40m9Xq6AG{i zD%fW5=Hfr@2Aoq`&QtgJv>x(CyQXrLV=s07_Z`y^LH`Vy595ZWg$#+KkXscQQevY# zSr4bQ^d%}>9!mIi@>;W%K|UW({()CiTj7AWKTZGb;BYRh@Ur*ONjrT@kEcvBI}z5i39eHoH|J9< zp6Nwqjx7Kin>4t(axF3pKfnync%W&fL8p`5=^4w|ymJ)>@J`FW0$wLvtFMsLQ}XOTdx$7j%0>N%uU4T-%KXSMGHn{+z0 zn?(v8Zh1_|M62>~Ci}x{dJQsi)q5}rgY1nY7O}0S>?%#IcZT8o1v^k4Qo3G8f9{U| zqHyGdmqJmNm|6_SPLzTM&Y{x?LktsGc1c)ay024EJ_+b~3K!9;I7GYQTsIXEJ9RlWAI6FKs-B z(0CG|x?X-JzO!8*~|4r1I+BY zUt3RpO)Z;b#G6Ju{c^=CD1+n_SeM z#VS{xM=3vxg6`p2oH8icx9m#J`rm$=tn1Isse~nDl1yKJOZoHbZ{H-}_a;1#^eX)x z^W|Nh1KhiwuKoJ(+sl{h-z4kbe3Pt!qDPV5pRcX+j8c-yro$9~-ScZbtV9lygNe5f zIYjhunjxPm-j!gfzqO#3DuEedKY8{O0)PEY1;j}J$0P?|0sF8DxJQ%L2p|)oSm>yC zeR0JxVu(#$>;javUwzz58rYM==;ZqW@>uAB(!;MG|62Nclkl}~z+LB`2VW*HI``MT z+nXs*HoR*czCTWn5qBq&yW!;$a)%dp2(~YiL-yp8OLoF(IA@-uYf!=<((+ivjK0gW^+dqf^NCFv!Oq z6Tx(v?!+z8J}*sN_pK?GLa0cHPoq8{CSWGT^;*g}&fZGn;)~h|?=HE`;QVdnwV&nwdQnDy(}$^BFydy-SWA<_HJ2Pvp4`*7nUE4;w#)_2YRDyjE`h zz5A$cLsa3@+aBEw=AnK~m{zri{r>Qr&;z9X^uTQgJ8mDuyOsHbNOxrz;M7kov#DZf zr2Z8<=_*)OTf+;;4!Q1;;THEj$-rJop2>)}F;#Hy^LPT<52)a!<2tfOxZK(^j5Xtx zJ2g+5du7r+wD-%`oByT~Sj<$GPs3eD8oyS9j&Mw6oHoUwY zkt#lVdN>$U0xONJWgA@tb#Zuoggp>#XhMYT2anN}IGnzwNZUVCCmDNoNnlGy(N1W* zvLxJVXSeAlMt^ks(7(i|`DxyEc56<^8Qma1J0m637@U>OZj;W3W8(2E?;`1mk0(s4 z-gS>LJbQnu$>IYvs!>Nl47hGl^V}U+eogm#s4mz-U1N@5hg?PS7L%MRU!TIgwrdDQ#U0=>i_b=cQ^ z<5{XXm&P^S()pULmhQ*CiYg3sOr}oDiS**)M00#`LFvFEacV1;^TI^To3qi)eV$SQ z;nE5bdVDkISPapReUYX@6`!G*MLJ_MzRz>2#q`fm(?{ut{nHyQt+wV;W-l412lqIRXPJ#|QLJPw%<(#( zdymo8J-^I#&yU=Ge3b8}Yw^%`d!6IZ7B&=@?31qT0nA7Ebd(Zw4yx(Xh#qggpPoy0 z>RG0#+1_Bb#JhTu`sN2r4ZNLeAPn1#!;v-ap!X%)8f5RGOV$f+ z<+!*)zKhU&n|a*xOeD?|1W8~IOffG`%w+6RoTGT2NwRxLtDZ|mB0bfW7c~8RlSvd_ zR1lI4)CFAin@+=KnMR?lAKNFG?}OQXf3%aGq^fAILXHK?BJJP&JxjRh{#1y3v*ovvRZlyK&Tp~(}X@-4jr3M(w zbCksj*_)_F5wEXl&yq&sIyZTxo%8osfc>jG(~0);*<@0f#2cqnmDY6f+S+PuM0~uO zh(a!u9}hUrb1#1&Ln&cLdNw8O+~1y#B!C~noihm}ak^-e2nto#%OyI+k$|KlsC4bc z(F;>vd4xG>q=0uYgQwHnAUfrFu`;9xbAv@gxM#SmA-gwY6v$^Oa+e(mNMhYTdfjQK z9k0VzjbUbbP!f^vJmv6LgdO3JR_M{(Q#NF}S+PJe>NuNsEPC`{o(W{yPU^iAY}M2D z;RkiuFI;2L+;4Y|R9Mi1qJTg7wOV=AsQtFX;Er$Z(LhglkuPO6s|t4H%XX86LOQ3& zi8W%)J5iCxufr=X{sczn1a6?ywNsSXyc%m_v%>Ff3-~f=NrhcV$qKy*n(}Kj)ux7I zLt9)i+B-`w)>Cw2scaZEk(S0Ywb7JkN*MqFJ_EQX!sBD2nZpqO^f3KdxMS6Z~Nm>(|_Px3qeam0BF8&I1oXRILhf9boy z=P6r~BX;hF?6)whA;0+X)3v5{k^i=KI_b`JUkU*jpBnwisO3D)HaAvtoW8ADXJ+<4 zccTR=1L?f&AwtIXj3;M(=IjipK+qJ=oGG}y_TqG%(cson?zkI$tiWLL;Cq-AsVCWe zF(w5=O%R)rzcNI%D96%72t556!r`{6a`3gGN==9RtSIDp5CQnuKV z7qS^Okx|?|p$|FP^wqdOl@JhkxHL1SnDOF(r$p*=!*i;eeW`99V~h|R$SW|h#|WQ@ z?XXQc6Mgba)P~IKd2$M`0`CVn0?O+acbIMvb~wm9JEN)es^4F_C2UWhFoR(xJ3BuZ zh1#9ZxLWxG8AG3zW{k1sAFc7Lbk{1qJyYrZlOM@=U%r)CRO!iZJiTKa{U$y34gI#i zbzH>UatDINiBQD;@ssHEj!sr?zoZ==6Gmt&BJ6iQ7PIT@vFn|QZ_OgKxYfsI9=-cK zCVXzm+;Ht3LLqHZdJUuqYH|PBIoX*}L_(Z*cUs3?!n>@DL$<0kzcc9#oy6?xq3Jjt z-`%od=gJqGlfE5zASrp8>^ui3>1eg22P`BVFq_8#^uXViMJ~|6V(p-bdw%JZJ5OrX zaLUEgyFE*QK5j-Gv~ofb|Gcb6^uJifuzx+0f$6Xwg2+k{!X6lO;AA4gKC+s*h=J?$y(W?a?a`gz?Bk!UlH zr^lvr5mT4wm;S|ZzsTk$m&0>xl4w|ok_dHFDFI`x>?4!4 zd+gW65t$7zfN97p9IHTz$@Kvtu?VR)ZgSn>Y>w+FwbF;!r`&?zGG6LNkrnP3dS^K&20HR)20Cs?Qc^2XG_4id;sH?D-K8FwXCx(~%yHn%;(!_mx7-XhA4|Ic)Gw|#K< zG5w)+gt@{m;t-PzkdD%iRpvISz%Ztrb> zjLBPiZhd`&N&dEd^s&n=cNAo@9YZ4Ur_+wcylv-+c3TH~?;Kxg9kf0cCAd(Ep%9lB zbaZgPxn8HBLh>*=|JG@r{62wO7H>;6GMF`@9a`ALbb`M)e*&8Eg@i58`f5WKj1`5HV;(E(Fi^gBK|d zciKlgL-KOGc%o zoSaZIF^?luv7GL^QFMDO&7LDj4whn%Cc*yuqmY~)KR7Z(h!lr5Mlci>5!8*2&>XhV znB>ky!sItL-Q6d};lYRf{q17&wU={DOj;HAov}5;`{A{I-a2_&;D_!1$P0zKH8mST zg<=dXt)w4X@6*E~i6bCj6guRPC*skZNa2ePQoEOTm|>0|^~YiBT}Yk!1-X0GyO`M- zu|v+a^PyTG9mJFeJq$!1!Y<~Ew$#mBu=Ppyng`2M>>g^1F((D>Hfm2M5iy-lQmqt{ z>>i>H**(}Vj!PK3-M4vdTIzGA(UIn+rjOs@2gyfUz}KC-Q}J-=1RizA(RQAh??%5* z^(^oZ<(r*&D0i)*1CEVQis^Hn$R8ZG^4zpv_x1q39|>VwBVpYpQyc`h=u0g6_T_-n z$X)m3r}4wP$)lcXzy0@^)Jc<&ZLSY+dZE*H?l5`T)Xl@K24y9^g zVhV@r$;A+X2l_GJ&B|N2p^* z#pAC+63hN5M=p>czpYN+O4U7>Tc;;)^Zea(+LSz}#&|;)o5YI&Q$-liiQYKHADL{jxd2a1sf7Ns6svirEeQf~gLI zgt^%FoWqRZu^qEQ3OZ*ypp!~U>=(Uor@e4GTSOSdKtqr%(xdia;Q+vo_`X78x%m=* zDP<_z^pi_!kuHZv35rHd*e3C z+K8Zzn3(Y(Yu+W_KFre#AFGbHTgBDo@!I=GM8u+8v$RLq-)_{lUUzUoy=5HIL?ZfT zzDfI?M$Hp3f!&khN2}f5ZXLH~vUdW5kzl$l?)Po~Jz!;riXG<3aQYvOx3u9awjd>4A>&y7?O!7xfS#S~? z)?H`El6s$0BlPI$8do}-jh9KeVg2C3c(p=d!KCnQt8B!s^R|VfhB+C5z;bA=#lK8; z?-&~si2R5m8^zqVY%U-y%};+RBqG*xtYfz+%c^3R`L%w)6jWTE)4@&!dFvYSA!<89 z6^iM7AHj{fB%jFv1tvMLVu^f|S8NYdB*F_a-YPfvFIGzns+N7`fJ%Cz1^7(k?f1vE zn84=9Y^tDLT97)d#n~*d33+O&Vf>L7r;|9zG)s8*Ub(S{9KEP$deFtLoEg$L|1HbR z#m3BJ1b40()|ySC6F#y)LyES}_Vu)t6Ln2;lO1emNfdKxE3ROgA%(DSK zsfCo*5w6L`Y4g%ptJSAx00^;e&#gf;Ecc5w`^*4~j9EFk)tbo*9XvxJTgO*v>z-RM z{>ycnbo>252#O>ku%@F{=e`ni9Xw3Og$pT+z2?SZ!8T?UCc_~4)Rp1 zk%K}O1&QgX(8#@f)0uac^ePLIV)^BcoVUK*_BdVa^v(zNe2HIjovk4Kb^!AXOb|mO zkWR{j@{x_bpdZo`Dc|*ShsywzJW^8grOov_!Sv@)KPivNZ=p%W0&|bZn3|=*JN1A@LKT3 zaxqy0?)5m?937mkhUIaa=i<`Of&x z_@2Ga!%eiI)~_D6W_@5Z!Ne@OCpi`-TIHC3Q~kx-H=M&JWgpkU8F z*9U$T$i^*{Y^^!FF&k-Ty1a60Ds!z-o-g^g1_O}y;TOOD_JXf?i^_d-_G(AucD)NL zVnjFOObcaLUODK}1E>57MbT3-)346YA1B6K$fr>UxNH`sp6EW+Fx4E{9zB&OMn}Hxi!pV{!Dniy|H9Jw^|!S|7y7uQr+;f&O!lilj7r+Q$+UaN z+;?b`%Par2Rzw&Y!mn$IUr_PjTDKOJ#oFw87ar7`&;_>}zegdZdwz%0u`86bC^O8a zavYDHpOff93-GB^T=k zYn{WG!8W_ewV7CGhDr&@>d5w5@{;C*W9nx$r7qHHm%%<;X*hE8)!;URC1r-#ZTRz zSry5h#9+MKbK7S;rW$6bK(u{vTl57kG_lr@myzhz2<;Ci)wZeva5Inv)stD|l?{HC2Ei+bYm}q_)hfBxQ7RApq}M&HXe^AZ znejK*UuYDkb>?8=N#r7m%gh>liRs5sX6z(BqXXmyBDC3Ae{dLBL_num%nHL@c8%^x z_O|y$b55gZ)%Gp{F!-1a$B(?(HggJMgJ;P7-kn@_++9aMH8YVmEJL|;=?|xJRG<+$ zvzvyozSbbdtatt8=C+5Ul9`yqkL`RW6S5q*K3$%+H^LRfow~ZOk=5Rfj*ypi(`5UW zx^_EC#JK&Gvi`OPD9lJaEy zj%=*vGw@}8@2R2U{aECyAWR10ri>fbK%sOQh}$O?bdUnFXZKAFude=@nx6oADDp1n z7e|p{0-xO=M`^K*%qS|P5Q=ssF{RaL$qK+v4I>dS+=Q83BOwm5ERQ#fBi(v{jCibA z_5yU@XErdt@2zjCN5Pt{*o``v{2sqI?-+8;nt8O?HY0d$(J8%_xS7U%TeYhTAtXE z|6n%hs{UvxEI^6`_?e_(`p%#|lXwB`o=wivIhc+n+@#G4}|>)lAr?(P18 zMuLHoL4WkRmB(0$P~%G-K8o=#>6FEIv1v<^FKF8=W?~CbHDM_m85j=`Q`7;4>Gc$r zsZLhJ^?gyq&eyd0D`IuXXVl!GW*>*jMTB8O@{feq1<#7WLx z+QC`xtyM;wb=?8V%%e`sB94rx@DdbeOE?^bi4H^yWXY*(S0M>r(O{LZ1*f<6w-KHE z#c@)^n=jq5IZp`Og37DLMH)}bodXL;zim1eWCiOnb$qbFD$*)xCh&x2h6EA}PZ3B( z$3>R%f?UDg?_3@prdlp{EkX27%Y#q4r*iNE{-~@t4v%7DEeea|5mHj<5D8Ct+nA|# zbu7gOQEF(IqdH;+!PfJ#_$6IC+pl?cCUnTT7lOGlQaI<$XJ*T6seTnBdjyw8#tQ=Z z$ny&!{obva#=E(ahTt60Q9Z7ZPkRsHK9noJdYCx>IBAhK>|LLS9?v~) z!Sfz1zNDDyUGLrRNL8d~&lM(1DZ~oY_EGI7qd}QvV{@xxk^;GMzObTDC@D!Ul5FYV zz7$VV-m`+HHe)k`pgZ-NCRRHeJ!EYpqzlTn#J0dB-n03cOY3)Bv9!=B`4XDPNFrZ# z<14c2>4BrUF)y}PfgS|e?$q5Mk-X;Sor%Dlki^z{GKd_Mhne)@q`WXW&0&ng6?vFJ zAMoCrG+>1LYj${+m7BCB_-<5hPBDhsR2j;1~-p1{Z*edwVBr1J`f47A*$skX0Yuk3eqgpA2FT+M` zr3Ai+)f2c4GXpczHpf3uJ9?o<8yBwM4h9H2xy*x(QEk=8;*^c9Ep|_l1QB7Q_mx?& zQ*kyQ$nw5>jjU;1Ic_vlVi=Gwqf1fk1%~swo`r-*$(s~4z1A-6{!;1S!-*L=WDRaoeqs-#|d5Bh{tM~JYsH_={U3Q zW-YnPT@fc)PR`w!=a`NubJmIga21v)LWjR)NY_W6wDy;msy z2YJmC$$9iBN5|T{g3rlmZ4B@#pMsi4nuuBM={M?6CbGM;xkhA#JiHxwkOUtW61RA> z8%npKHeL~S5{TV?=aw(30$S__uY-a6+rkbWd@ZktWJ2=OC@vpv(j(Tra(TUyX^NYf zi?j7Z#{h^Ka%1S`=E@QUUS|RbN9PMfu*EsL9OmcQREB z=}=8@)eg$)ao|X*^iDh$NBL4D>dQPLexQ33`dogV>N@#?qH%>^-=d|<4J%wbd&x8Xc>AmbE5rF6Z($qQFcIRr1B<^NSO=}a$nnA_g)VD8tfC*85>isfXU?CS#0t=It1Oe!g z2A-FU*-eIJ$60}n2%k<-T-4!I$x=M(;boXf;pEVo?g)$R9o+CIFaGNR@31eD7xHW^kcquyUzL0@=h`{Y2WVK??U$}lx+FLt+g~u>tLv2PmEyB?_NtsFF7B)CZ zoJW3hf(fRsUAf2wVWHq$eX;ip>$ z$A#3|J3ZWN&~sa8mM(pfiO$UJh-(H>8**K5Dqy;w*fX)~MPdxgWUXk<6D+e|e2?J2E3-Af=DeT; z@B9Q+d2xGpN_t4bkPQ}>kaeDI)x=`eWS!JG5qZ&lkp%bneV#PBPS3ueMrF&w(2vE4 z9ewmCMjV@Kp=b)@*U20gq_8w#JB{4=5d#rI{H)`z08d3l6&pG!# zGqVNo`^$e`Ea!Qi`?=@#bI(1uu{y9r#Wa(yD%6TeBZdsV0N>q!rukSsvLw zRk35L^Ny)bJEr<}Zogxy-J-?qQth}IVgdA!cB%gDQrygf&RfpscB$=XCn#1mb`yn$ zU8(F*N9n(8irXWsp3cdp5QCTyktwMeGl?{VvQ%=q5vgG6*y^3FnT^dWYj4%l*jHFL z&X#wD*2?nQR+u|&(PyfaR*44jrS-2`Xq89+<1oLr`fygS&rrX?OtX8ZI7!of07plSE+g2naM4Nd{cyq!}KpT zGZ{{sn*~h{QM=)~XmRiCBGQvZZL)k!wTcfNvwHyRr_Aije~0QnZX9MEtNz@X zo%COO{kI+eRW6#kn3b-ntS;}+!&rOUeRn?Tk3MeV;y(7`4~l_B%qKd>@pT;Sf*!VJ zie7M~Tzl)u-@*VW)%7MH zTBVF}IlgP?o~d2a{H%zMy|dHn=GD+PCQ#rkKx8enMw3z;sMegni(rC*t{q<;6Tc6x zXOFdH6dMm*#l2OEDq%}ejfTf0MCC`K%;{1+-J_=Vj>3RAp2SYL-r0&Hom0~Z<(ARa zrg3%Yn_N?~Hhz0c-SQs3FvPB&WD9MTob6T7CV51Q@*svRQNqd{)m{pwPzfZ&<%xet ziX>myQL}n{F}Ro?aYO8=GI$kn(-#6-6-EJ+dJMg}y8h}3bu*{whDJjo!x`%NsV$*m z2A@c_S#y(TdK(s@mNG4*PAeQZ8%T?|Swj)ndDSgSY;pXpQB0C4YrYP~vi`S0wRgYS zZGw6_}f)=BT^TW$?7M+`Qp<3i+dD0Y~@g7roZsPK@*mVT;n!PBK z$Ou#>j&fv8&>H-Bw7l@MEQenc-sM&lISQ$uUNx1JB0pTj_e;IZnK1De`w3n1S_S1f zaM;WV8W{NI4+hXprNz<7GP!Yxbxek9?a{{+b}HZM%hJQCsEB71uD0NdGMK8$a>&gOK^wf3>vhF2o)-1dYm7ogg@$SqD=_TV&EW*< z)%%gUh!R@I)>znw>4VjTVa=}4c5HS7$;cQ9iv`+07d*_Vm^ibhPUE=MMRu(iO`A4v zdIeWM{4aQ+!TnG8*tWp`Ys#2EtEkWd79XWoOky{@-u@Q}w9I1b-v5G^ZSc|7#a~i} z9jx3-n^Ul}q1#l{&7I&KW3Xjaa@~g11Ug{0rL`hUTQQo*sy8tnQB_Cf2;-E4hwL?O zbk)ec28|xaiLm21^>SR*&>?&6J8aOtqosPuKmT>?(~AGy9EMqgp+bM}0n6L*+79Qg zUAtxvrXvW_qcVG-R!GKscy%bfpW+!bw&Zd>EyuC5P|XYghb~&x$}T+P7_J^hdk$yQ zb5UkxZKaQU$7?7*5To#T*(>bL2KM7SX0>WQG7dZ6D_b$NM{H`|h8}BN6mxxP(-SL4 zu@g$CM<3vksWdTR`wRn)!LjCXIx;q^cUW?t#u*0>WK}z|zg>Nl)*5Kpq6Z&LL6rkc zb*r7dr@K}znl)~5pH@Wyc=tTc&U-2KI@~($VPtghRYFE->Sil7NU?X4-bJI| zY^72=jc@IXd?{xtVP)(CE?!h!8V5rLr%~*IZO>-S`i#QSj>N%6&Q$!48hiVG4k&M} z;ySu=mM`diHG?M_CF3bw0#n=Q_+WK@*TP@#tlwQ~ol9bXtYc3-*RG!Ka;Pi}IFu^-|WgE8%L9Mp1qoi|kr#b+?1(t*y_S zTAdv_^nhW}cU`dG%A983gQAp#HeOmJ*5mPEtb_=QmLc5+&N1fkezVv&mU>lvE%Ixc zdstIyU2p?m$&f7Q+=xu?a}&NEPg-Ijr!?+W5(;NvcwQ!cT*F8B#P-b?pmg8G>U;Q(k2qFi27B=Vx@`k+FXNkLRpWPMOvF}p^i_tiq`Axf%nq0;## zaq1G1cEa3gNI}0>$BxYK<6H}&G?u*uR1r2w&TPd2nvB9i06Sb-N497#QbbB#f2zS=80W*wo;N6OwI8lguTdQ$hmy{uT z$N2nu-Pr6HwW7MQCXjIuX9v=d*A9rxS`*MZ9YFf6HfRKeqZ$Kd$54~T(46bWmc*BV zlO2Z2h0pc5k8RazlnX=Tpveo_0qc6lq&I+reZ+kTVi|O?#jpnFF%d$h=%+ZwZ%83g@Zr4kCA#5{~H07V#C@TlQSg&-xSA|Bz97u1xZG>Q*ZqrO) zQzs^_%2kAJu|`v6u5PSt(-jnIzWs^=OrGts1Pv`YWpB2uHfGu@rvX)SZ>ewh(G~4% z#GPgexK53+dRu|bI$ZF>)}`WKh*+_+_u@7Wpy*7Rp?hc>@jWnHbyw_I->(^!SkyD7 zuT1Km7YaT`x?|SIlOLvx5q5Er*8NmcGS=^#=#kdL9plu3So2p?ON-+f!bsxUAf7&4 zT8j`Z`Gt5-sA)9b+ImwG)u6B26pR|LrRkSM-W$9npy|l3Rj`E=Oev1L=S?_|Rv9sX z<|-3!h->AP(xRV|WTrnfh?Olq*6d1?tSlR4Nvju+0X{2J&51oVbzL=o;fAFtbEZ$z zH#5POw6}y)aiki=C>HYCu{EA<9W04ofo)Pv*@f=&PNr(vVl-32HTc7(hFoK(qPVo$%OYa;2xzHCFvD7hWyb037>&mZa#o=XD9qC`tIM(? z8?yTiXxtHP}O9m&@Llir~&t zU#-rptAuOQztKc!($v{lIfFO!21QaQVuequXJD3Zjmotwg3jM=+oj83`PNLPY;1uj zNm*?qs%#Mer*sZW%um?A8zx2ed`uCfOmV8HFy(|wK{(W`xzl5QVc!giSTJ=u`)tCl zV_G8W4KZRu-Srw@c_&_z)Mqu8O@p~Kt2UVJtQ({vD1+2HBXgsL%XpxQG|Fa#A5gKD zT&S&$aw(39VqmeU8Zm@wy4n~u#;!C*Ys-yI0@VM8zv7s=O(kT~fUX)T6lq=cv#U{# zrIGsu3`YzZ)G%^tH@-*iUrvyFJ}~WC{U2k>S z$unUgL$-|R#ol#iU;P*;r;1}?_xi0i290u249shjfluOC!|FLy(TZ6ZnYkGz{6fhj z&P1DYTsg(rP)V{I^BqfKb$)%>I|s>6tf-k(Bn`_)jTkbr;sC#9k(2W6E$eIlbkc-djZ)qYey*M;NU9c^4I#T4J+*)fWH)B$2_Q81^> zABJ)uoJsUwo;A6)QhR<(kMg;(%!NkW=bEuT;o9KeJG24$38=_kb<2BnOn=Qu>nzl)-6E?Zid3DE z-OUd*+q0H}9?FW-WG1ALty4HKI9o0*oEsjOcy`RP(_#(_2!`xE=(iTk?CSZA!UoQr zKC8rX6-Ic|7UHdfeGqfAWB9LrMHyvhLo>=@$_f2jbBnS)#hslYkCVAxF`hai|Z#>M%43UM5v3h-!OWLC>Y0*vjJ$L$1cJQGl!78RpY#*tq z^m;#PLrrZ|5v32ei^z8757jvsIrk3w8lwfb1p%;Sx60jXrcIUUT(dkZ<Ke4;tYn8ZghJF8TQ zOC!7{T3=_@GUpWL=QSBz@(VdMfKu@_a*XI}WdP9}L9z_3$^7zJ|5C0ozt+sNe(oia z^d%HYL_;J;PqC#2qe7rb^J&yuj76iKCe1za$b@^*LkP&6H9PM$R{EBfr^RY{`u>^?RFF$I!gH ziABRNrMd76IpSuN=uCAe)`TSKV;mag6SJiO7gxz7{)-i(gkSvyH8ENeUz&8KRLn@D z^CjsZwfIXS-)m2dQKL+ZL)_=%oFt^1W<(vB+F%Zj_*hHy)>dZ+93~C7Q6In2l}HWX zQ?Us9TA(pJ6?M2WEAl1mGeD`W8a#T?NQ8wgwnIy{<%6<#Dz#I?wvlW-F1miYlnr-Q zjvh3uDLzB$`;YnFfKL+<88`+#YWB>rr9%+74D{`S15n`&AU!<}Q_8z%xv3()sY_(Y zXXapE{DeJNtE9$a4~+4V{SPB6_r;l$wG4HbW_gXvhL~wg^h`7HDyzbhtA;$+mSyGa znrQehVjA+JuJ0Hvs7VSk+9_?LF4oBii=)M4pS|DL+z9V1I@ZFN>=IfSy=auCeQe6c zWEEbQ>?FyG3DP~t?8!6cl6>@E;TriBc!Vlu)^t`)gnjU=QLsbER-CfgKyefAFqNY*u(Ctb+uldPAy!2< z|Hl~$vx;eO#6oDBo)|sGFM=Jbd3Arx#!ntsPDLQGoV<_GNqg)Zq)HR_iX{FNR#M0# zlDUbv*1B?f^(>oJFwaQ`8`%`+7FG~UUD2ZlX~qOIRarer`sX4!Q-S8xG~YtikN|Ep z1lxXLVc4-f)R2T7o6vM%tI4yVFjvf|onJAL1;L38#?qilnMu^xi(hL*c7aYn)?bsI9_@Hqr>gB6hx*h&ci)v(nkM*!*I2O!uZHYe%ArFk3#i zZZ7%54LB27=HwAXmfF!+rL01kMUFw>8n7Uz6LwvlRV>yqJS5d6mVR6Y#cY6W^kO9u zFx40?7Se1|6TjFo8^*>H-iYEd3@<&KWGgW1+sM3}_!kEMNKX&eS=Q%5eQP>arHd2P zLW@{g8=wqpYHg#jwvA6LwFe~wOAVI%u;3_mqH3x-pqi#)R->q!%dtnbW)=z?VXz%H zn0c(9z?em&hVNT7Y>;&FxrzSb+i4oh^F)bJ&m<+OI_nH9d#6!VoBRzfnOhpDImVcZ z!XU6|4yE%XLx(oTAQ-`CSFla9!7pf)QA8GFLnB9ghP{xM8j86XRLU?$$Jr?-im{C{ zoE@89DW~velc#i2(JKY3?1_FtT`(ost1?D!o5yCK9IdzXH;7%*sBzU3Cn{!+P_;2j zIX#Eg`%y&-Cs)J-3(z>%nBZT?(_Asr)Y0-tRH^`*KBBCM};iJYBy6OdWjuE(#xWAab>h<}_VUzQXJ4WOXSIQ%n&WOTao1L&NmSan7yPp%$QsnWiGY$SzvDeoUo9nk!|`lrA**d+CgfJ6@yilSJX}k2AwGr z3nm5Q$$vrqYTi@Fr<;nw$^q^}Q@vwWTvzAnu^t~Pw|7z=gsetpZY=FmEtC*MSYw9X zXh1h5CeuNx+o-xenWVkkq02tf)`Zu~q8RzCIxvrh6zHNe1U%PfpjS8wCh!mNvE{KqVU%w-zp`_(Ch6bBPY zbf~MHtRcDcM4&g4eIW=Jz6=pC=uQjk0i+?*JvD57enpr?)Et{)JCd6ybbJ7X!kG&S z-~7y+J(b&h8&4ZHpeFLodO17WN9V%_9wE=Hb5kYO@duV04Y|jjNkzc|Nv|Np%uY-w z%E6HTSc9AT6&ggyHKcM819iFDkm1Ein`aoHBlL~&wCW?9R94X3XLMkNjrD_wv9)GO z;9QE^#Bc3lTc}Jv#165iX@>k*O&DvA_8Pd;Gcq5+#O37L3T*-JJH|wlH_NC+ItYQm zWU}oOSDV(x37f)+B6WK8f+qNoJ&_fl)$^%{yct);j+b`uK&hc0OT9LA3yZ4` zJ8I(B2pjiMkISCkw>QCPPZ?6fa<*j3o>gAV%@7$%PYa!=qsZ6QRakeDOi}k8qyB85 z89{@jG$ln)hJy4dzGlj5z-qgQef^%O;4$Ejq-$;>CQ<5!xJ$&9Ru&)&i@!GpM}WL~Q*Y^EpV5 zB7I$(jFZ(HK{K-mpolabhvEiP6piAb8b-<3)@gZ4D60l`qft1Lozo`TA!V^d(f%01 zv7$vsh)M1DJ@Ixtk~)VD&N%KGXPh4&lWwsv$K7?*{(HJvthZ*PePq{95>FXZ)MeXT z1H5R>37e!Ol7UnlV>nu1i;SJbVEaz!o;kazgo7Hf9nv5dYO2|@G@&1EC^FAt6P9L< zQ<@v(RknO!^=z!m+=(r-pEkE>`Jtb8l#B;8jN=YZJAt;Y-fC17qhc1OWRnjiK4kRr z`002`)3xdNY&K6vnwgz_m;>3%2;eff@CgE096}aY){HsFan9G>4MVYiG>la@BAIG9y6gAoPUF-MZ|2nVvb4Ko*UmgZ5R< zO+*Q@UGz^!bhmicQvW(_kNsLLOEgGACa zVCbX&wW=%>dBIpvKVo6CfI5`E4f>kWTEP4R;)iUb`Owkwz@idbSkS#BO-fdH4VC$= z*s4Bl484q*)ZJ*YFl!$|-7t9dWY&){ObsT}h%2{QJITVDIc|eZ>hTOLd#vJ!yz5o8 z)iTvRffBHDXGCsl*pXqdv|1EZhB~A&1XC{%y{hZaHWB-{%#|^##4f@Jeu1M-0W^zl zaaQ1%XtEry0~H6wW$2iR#rdeM8e#Si2b;)a|4#BP)btoqz4<_Y9cMkvIA8p) z*o7auq8r5;@r{XRO08C_fC>7)B`o@Fl)^{-H`=ueVZBeNg+&-KvPK7U2vPRwIr=?Y z-QO^lI+-;NF4i`d?&TLmGLD0OSuDMqYmk|vd?L0eyQns-*+Oqo2gP%+?W<#2b0n?A zy4251+u?zVEFK|?Ru_>%Dg003QIuaL5eYeB&A6yq4A*45$F;Z?S+*1xyT@|en!Fb! z#(cY7x6!#7B2iU;zFVDgJgd93Fiiha!z-NW*4P|LqI#4_gT#**xO5qpd@G&YSObG> zc|(yEImQUrlOv6DtS??0VQXjdHWAGtowf0lnw*o^)4|X|=B+`b4Q^^VfmN3z85*sG z3S%P@rM?B7`L^DHf(1Gb-PP=syse|Fl{bRsrHCpFA7588jTJFRC z&d#(3QMoa!e8TF`>pQVT@wSUht(;gt$-&V|@w73Jgh>N5%hDVYS!j zMiGb?9$3b!Q_(j?MaPO1SgA5;9sbmMCb^L;>9e7m`6H|V%ns=~T${^^VxtK}MzqBd zv1i#$Z2w(xbYwum7Nw?Zx0#0B?SfS3n~F+NF3zl&QOPvEF?4jRR&Ejs1)nX~KF1L9 zY|KUTSob?iT6;%ZD==y1EY67FCWo3izWJ;#O%VIEf;0IBS1CI7$@^E^tA)2@89DNW z!yzmWC#hU8_S;da^)evr=Eoad;1yC231!ju4RT= zmmh47YV|6Wsv#q--o;qhoIQqArZ*zHHS*Cq=-D$y;H89>?Vz4o<5mi4417=s(O>qOQPS2P$mrY28RwpqkmBHI|VBk|nMU2%! zR@BX?p2Z&VWI`*%bRui(716@5k)=eK;iSi~SaC*nAJz^ix^y8$EE}7=4`%G7xgd0G zHrCYw%?_k&U8Nsq)#&+Gmfyb!KQQR5#$F9hXObr)mvYWXnlg3b#M&8sY(o;qwkB@A z->xtC4mP4>Bagbd#GUAhA7&%QgUX`B#W&-z{d%>W8|NDQCRt(nWZ1Wu9X_hT8G!6> zEaV$%0V1B(7Y=$z6)l)7X|9oV>N;^6%4F=G`GUD`L% zrpaj$m7~E7o}oh{D>=h5%ju*u2}0+QGP%n>cm!dRH;IMIEi6*vqH5&}lGzlJ9DuL! zGv&xm4{QVpi(hhZUf3tn;A#@@w3v+x%jpy5UUqC(Ypmc3s*mk2Njy=zYhr7wvmGi^ z6>;CAlANVf7{u5frNno*LcouFOd1MR)6Rmdi(_CKMX5hmC{>Gt36N#TEQ_IMqa)S4 zicbM03rMMDmJ}Wk}oc&OxA@pgh)}z8HW9u6jxPa_bIm9BozX) zMk!$tYboD-zes}A$vnQWafa)mL1rXgJ#zJCSjK(zu4V3t}4fhaZP1k z0hA~ZEOlylaxW{*{ZxBK;X#SF#pgvQ3)y{Uey)2k8zm7?cGHXb`S^j6BrL{NGLH`#t9muXz&5O0x0XQg&r!~l9 zZU1po>bSU@*14u8&qTT$$%*#5OjOr$QHhdoc9CiQk4;?Xg}*}GVc0a4+tX@m8ja_H zxDLXxIIh36=4u%#RhCe__*M5Svfal*m6{~xz4_qR%F$f7X~0~yEk52Jrlk_PFHCMU zBV8kwymebpU7fsUJ4FZXJCm=OlghaYtz))peCxER8}IPx7z3I>?|!)Dc)5L7)IxQ| zKC4A3h3{sZ9asEWXq3EJhS5vWx}W4*EVtgu32r{9#hijHTF85=0d5n@4rNe3+)h~I zs~8oPq5Si;TdP#pPV{%Qx!T@~7H2zkwGU*P?)r+a(n#Vp5cVw$mFF@C6$X8%ubhg& zLX~hUIHRB0hwHit3aHX|V^D45f&C0!Iee4>eCa3uTzwcilj^CT(65jB3E#K_!vs72 zzu3p(BH!#FTJZmWNJwJBB(>z_-1^~F`d0nMlG|u5c!HbAiPh$YIX0FfawyIf=hA4- z1gFN!!5y0@3v*nX(2HYIIXUzL#nofAarnR0g`|Xn;h|og2o-WsO~a~_e2Xj;JAUCJ z2@2iT$E(t2trGAwaq(QL1EQ)RA6*yiZPMF2W{U!r3s~u-h`ze(%&UFn%sO6Y)`d5} zC4>K?D~FHB-Hrney*0swhUw!?j$P5>i*P;`dQQ3Q!koBdOKy_UB^8kM_v|m=+#=sg z&&;@v0W2tTojVs;7afT1t)Jn*0@fWcQ`x@)c-l( zqB8`wHe4q(BG6~DHAf3GD~2JD`!3SGeSUwo_C_?+l6`oZWN<;u)^m9~JLr*_O)sb5o~we_^LbiDSh;4CZW~wAfNvb_U)zlTN zbkdGI)Z!T09>oS`hlE4Y9j?X(T9WWad}8;~g2|)EWu3w?)=G(mOlVGaBvqWaT?3!h ztm>AmI!ddrk77}@7RnTrL=vG;FKi89Fb3I3)NNEgb}gEGuqClvg{=CS@R&QpZzwEj zxZ&EVB2ZB=CxzM&m5h5!PuromYo|3td%#5zYzQeZWEXlEUNHg7mh*<#q9~TkODgpG zXq^$J>1n%KO@ha=w?0aTL)xjTLKN87hs7);C5clq7Hmfyt(?*>$x06S(}l$}4m(Ud z?)zjO#;;56-l|oKUkm=$;cwkk)s-)v+IszUEA=&%PE|dv*K{UT^`$*DOI4lslD;%g zRo!h5EmBo^J#6;y_32D=JoULfoe{6oJ2k`G!!P4jzMyln%u(TQ33M5MC-QeHf6Mth zhrbK>gV!tIoBzx&Rkh?*p0jCq*}Fxkk>{$1h1+0R7kKfP;}5_8DDZi>=y}O&h3|jV zbDBgxoadogTIpT#Pd&EO!HQEB$sejB`=%c@@{n56qn@XSGc?v zT{goddD$G7e?^xqa7kW%jm!JdWlLO=moi*Fh%Q^uTeINxM;HLI@2Y*t_q4v>|E2KpdvDx-wC}5u?>h!Q zs~rEIV!WdEljBRii;sW5|0~5*{7KU)y{xBZwF7_T(*0k{RqbmhTdNu(|juAN1pqFj6V8{~>yzv8=L}O>n8gWtCj? zvL`Od%K%({j4lIlNnQrw@>6sfjEi2j6oc#eK}u=W<3HoR;n5qVGK!0Sql zc_D|t(~HE@1u*u6h;E#(iSl*qyP)@HTFXErP0K4FL`B4VS zk6u=$*XQlORhdj1=u&7~=%vu@pgs1?WU|mVc;5<|8IZ|r4IRaIy`KWz7J36z{vHYb zp9t?;K*e_kRQ}$F%3n>uebhOVQ4YNw=z3eFGivGo;9c$EL?}u9?gV0*-hlX@$iq6V zaiN5(9@kr%3jduwj80W8J4s(~TCRs%H_8ocmYKG9v&@nBnDYBnMj~Ju)k7_@`9|K+ zjSugu+WPnT;rsE&y8DBhI~{nKyPtfs)67l&z00OfFR66;)N}6sV*8AxWyMP=qc3#u z_piJ7`>i3ZyR`8%*VZ^4o^$sOdnoPX{UYy(zaE^j_xFGA?<4Q@_f9wa`p0+$)AAZW)vrl*WTHlnt|KV3oH@U-UtG@0&c7VSRu5h~Z$4<{9z2Y+; zg+Vmm$La1}-Th$Fsj|HmzM}6=OdB0G&FQDaN8jg6bviiU`v%%#TmSw$_zAcDYpIN+ z;rO%CMqkFi-bc=JdIB^ zSSOpIOsATqr}*0}C(}hz=~{p5>f~T2L!3-@GS$gpPI@`nNhFo-mPWW`(g*p+F(R3C zJGrFO1N?1ICv%WuX(ycB$KXml7+xe8I7A{HEeo28R|rW9@gs4@%LMi=Jfsi z(&=eV)+4dJ?e1^8ie%DT_}dXqcJq%aA>33zcu@I4W;!`gB$M9X$!!1F-ruBb;U@J6 zvOWJ(D!=i!eMHjfU7S$O^wz=O=1_i=Lq~s8G;vc`1W`{yQY4&GE$BY@rPDo}sAdv^ zIwgMd`6CO4={cR2)P<)iJs|{UtmL4vGqkmlBAItQ%PP)5DCOt?bogVCO zdpkM6$>C1)KgiJ{>2xQNRC=WSj{<2Sq^e#xUtOmKrA$&OXZcfoQo*Y+$&2dc1pZWo zRlc+NQ+gCP{VCnWf2&oN!0NX)m^MAUdtJ4mcQ<$=u9TB{{T1r|QEhU1Fy))Cf7DBD zI`zV>YFMe%%fm9J2poL3ci4TN@eXF{V=nOb1jzmMu0wRk`It0vn=W^r!l$mQ?&F-9t4~wM4dat}~4ipE_i1WNd+uXu?`WxMiX;=WUH z>=bLE$D3G7x1r}SU&qbqiizP))WQ_lC6+5X$XPq=NBlH=Ptzesxqel024s@&y* zsor$j#r4QYe*z|US=EiaU|ChIk5_r9k=_$R&046rD}+>`~S*rk@i>?%;( zsp;MGE7v6>{b;jPrW4;Y+0=~=WvZUNH|WU%U3Z)+VgAWdsW^5>R-EV<%)Zx?cl)ZK6kA+ zs3$z0Ae5Dnwmv+Ssf2?8Bw0=XpfZW{zz~P)#6eAXvvSwrBE5!2G6W6=kc_A$lB#o& z_WmlB!EBPdUL4fJcO#NYMWp}yPb$*`4hE2{UwW~tdy)R-=yYZ`INTr(>eeTtzllSn zXA(qLI2d3{95zGYR2(AxJ&ms?9BvecG90qBP5~l)=81G>A2=95UFz4{yQ&=NW{;#Y zSQc_O!6B<|^0};MRHUPOr8DYp({FzEK8%DbQ`bH|WJjc}Lw4LOfV6~8Av+?S2%rlb z3?OClkuAK`BRyi1R7Umv_u?RR>J@@k4UBYG@(+VeUVv;ChcdbuRn18Ihlbef&ELHP zqg#aA4)^D>KHiIT3=O9v-?Cvcw z(uYq?*5BX@7mTo!bpEUA!Va@{9>}q-3q2m@^D~JXB6qvr-)HiUVv>S2`>&q z-ALa!(L-vUBCOnPf~mZFZR30*eSq*xBNZ$!SX))BcRu&}6Y1&8JX%WJ?E)z`D;XUN z9O;`RPUV$_lR#yXfo;C=p>L#{A{7AS?hrr?ZIe1Ln@C&nj+r5E05#Rmx_V`g^g0Gz zSXOg)ii5<(s9ilxk=Bk&Ww1!(4In``q^koW9qxeKUE&}ackK2Kh;+BW!2qgR_uk?K z9_b!qQyGlFxx2+dvhA#`okOJC2Mz|1Y}*tWuS|>d@xqHMxvh6mcu>g^t>VUif zB-^_6cMg%B8B%etI7qhb7HVarpB!S5%o{+mtzW2(kuC}x?h^;ewl0A~q%Q>y29Rt! z1G%CskF**3T08RLesPd&tNOcVRHV~qr?56vB)0c^{Vm)kZjwIRH$Y}x`=A08ug$!NVeU+gJ)EvKLfzVo);i1*>>_y8Pkp;ZEXM? zQr1`=5>6tn6LD1AinNBfhS3V@KwdZ#aVI_RH8j%CmuPrgm3vqKiMU&WypHq{L>#vL zyaCi?t_m3+=?e=yO^=9!+UKu{iW)w^J_6~^j z>u;93uUArbRMu04X+K4D_)_AB-`Q9yed^W$_nr=*)*H1feE9KhTZo zZlr}6pbHLDsSnDig@JaxKZOalD)+cJRLJLex6|AkXk|?*!^~0M02<|0Z|{0cq+e4X zXhyjw#GxyI6v~zwL8N=_m9}(F0?2N;9-8?@cJ>*DBZtf{@kWr#KWc3k&Zc3J`%^P4> z02lUiedek_C#KR8E4in|VK)F9q1P#?k*+%#X-8$ZAdPaYNS`uh^aVv1N6h;kN@yU zMp}h%)QIajIBZ_YU|A`M^oy@u_R7(eh(*i1ZA7}$LQ7)qc>yHoy0vlX9qBWlc}W^T z>cf3yL4^;rQOg%$?1l)E-7;Yn9{K89o~5M3UubNu31Ric|jnh z|5O~+t6muBX@FEW0p$fU#i2U1(`kYBCMcR=?nQBtjMzAI6p?ylor^%*sIbO%@+^-u zg47OjF9{_X@b4eIxkZ|wnCi(3)lP}7`^r&~CMd;5{9YD6=@U17;mAl6q*7=oCBNzu zM@5>T)cjr%FB3x_I%HBf!a4VbIH&=R!#m<2O=CDMLx4!{bwJ(#(lm~v zF$fUpg@MDn;vh|918gJ$M7otHGj9NC8c)y^1c>z3kc#)jL7K*OyLqn`=_F(^`cmEi z9Rci7Er7)3*02|#f1rmr zAa4MLe>JL!0DlN{r@-MOagf6+SqE%ELF#>jz`+0#*Jq#+%i-2Q-+jR2@Ub{ZO8270 z3$Rb1_d6hO014=m{^NjiA`Kiq5eLcUn?myL4s=$Cg8?L#PeZ3x5ElfxcSyyj;vk9K zD*m@%x{9K71MIw7}sDagY#Rv7-y&NEe)Is>z!IY9qurbPI=%dO(_7vYVDtIk zxvYrv+3QoX=;gkO;kU)Na`F%OHrm1EwgD9W85g_3?&3iAs!3&750(2mhW|Jd+Ui5T z4s-?T>E5&|1E}e3!$@1D*)7o4x0wE#`vwl})a-5>Ci`0|wbuxzXADYbYI(O`OIR6F z?e8LDt5HbT;ZFZNFrAs~I89Jg<-S#{q{$uwQnA_sr07(TV_;+Es(bD(c5$z=9}g8@{Vj(Jt84{apS?n@1j`%xU!KL113DeMzM93DAY7Wk^X0n|RP zBKMVZyY1)nSu#YO{!ikd%$^HCg|Hp~(Onq2bc2Hd6n4J=c%D4B(pqK)<=oHWAgj!d z0dP+MeC9#q4WN?WDgbT{fV}}oZCG+T98`BU4uC5H;1&nu4Iu64TC!SU?;ZdhLn@Yv zgDS$s0nj}F)*}@aq{0Bwj!q1KtpngyS~+I%+%e)H?PzuYtP=nyP__1ig8@{fzGe6* zc@gQ#mT6^F?pSech)E1d0v*HxBa)drK^&wV^<+vy zm3MHU2SH`$Ge9{GThTaG>}Lkr@msehohS|+aro_az7}OlpbMZXO#^fSu!?H02DF@1 zh@Lggtw|?|LuWp;LPM9s)`5OU)05fX0Fr<&AZ?X{Z$Z_NHtLwp>`E$57KaKxl#@|P z#r*^!`VK+Ja$x}3VSef)m5m(j;`Cgm9!A396mgIprXP`189X1#q?#(HahCzQ;jkg; zR~(iFx)J#xOU|j{&>e@BWU+dHNC!{O*g(esJ#e@J)j+YoE6|UyN6G$unmF{tVQ!h* z;Um4|EVt|#U?%{t6G_#B*`Yp^p{Z)N^>lIQg~Qq8gNnNYHA-|Bn!l_k2H2So&oW$- zwCg&>=}UX3t%E&79AtmF=v(hYBV9=!s(Cd7>HdW^oZOjk=-!79e@07IR~hN?KYOw_POZ1(t%Ejc zvGjrsT6SBf`8vH)-B?mlm6yjd*=4$g!h19{sP~u-kmd6%0c4k1pGYgn5rOV8FO^{o zo;QH(GVk?rK&JrM;ayYpb7zag9spJlghX^j5YZR0ph1I20lN3c;ptsG@{t}<8u|B5 z+R`Eqa;KDJ`#IuV#p^1%EX8d@GzZbgn3Wk0X9MiXhb1BC%|g(lJ?PxI3VHy5ib@YU z(nFG9)9(HEiO4f`N%?aKgz z0W1a}nZF4fMAwIdY-;Djp~qf)*y??69g*%$+|(FUrVk&~I`c5uvLx02CCW6rjA2n; zKFjt7F#^_Vc<&On=%4AiRj3yTFoX}M5_>sh$rsW2gst(L0rug;Gb28x18(=7aw?eb&ADYbrDDOz9+=b#W48X6EE9#&}S2+D1O6lee zurCe+iG#BC72+UzP%s2vBo4#*&3DmfKZM4Au{eyvVUHVJln)OywU>)91B?c+ z=Th|!MD`e`Uosq$AihK#_Q&BhBB>y@B=(}$FGy#`5(fhuz=w9chN)8527ESJ;=s%g*c4k!F$d^_Ck8$GpC%&MTxpKrI zTrRA?mDBr4rj}_K;4lF95`?PUw*(>jr`<9(c(_g+4i^V{1xc0x^a`SLXuJ#y^9Gp4 zhu^|MP3~Mch`!Dc0PP`ny*Nzg!;Jt`4?YASdLQ|qj>iBq`0ynXMn3mL!ie@ECjsPc z5Qmw3c<(G9ntvYXF=#YW{S7b+z&Auv)xHmr6upc{+ENj5ID!wWLcd!V`rWh8%QYc; zqX4r3)ZFdqpB?B{^hi>1ZW5pl0Hb(|+}==)HqO5`#ZnR0hzKx;53|m9slF`Gk#kM6 z&)qD*Tma*!)fxiL4s*)Xp?^p|wgMn}K(#ALzZZx3eE6}it9Y&ZIURs7 zm*Q@K1pxju*|Ye$K#%*6B{O%6I4lHkGx?xm-idq=Jsf~0hzxKfAC`uRh%Ee3Sb;FVJq&ZB0$y07n7X7ad4tP!njIBV9(_Dh`Wr*pB8et@;j{zvw;F zOb5yv;AlQPgL0^f^3OomCkRq5}&Yx5Ll|L1sL1kGHuejYMzzH}E7_VYyfmooIQIRy*Ho%EEbiCJx>-__riz!EX z#ogj?5`b=hc6o7lpbtSciD!V50bGjYk<1#80303=hqL+6@wZxEkxE?>XpSizL{{Da z=isn84pN3r4)jm}2%Ox5;&3hwV{nkw;@L<6sFV$G9u9xq+`I66pw}{FLSN54Bo5~T zcnonV4sQgyBjXwM4hFaYhffjRYRjDw*`l9b?Th#x7Kaslc$sRTNad=daR2f*1=6=@cQ2#^D?h@4cf@L8Zcck_9aKMHU$ z4)^Zl8THRVYi7DcGQcGO?hAGGs8CmrL$#Yg9R37{?w11S9O~#nfo@1k+?MZY6!cl? z*6TdHd;_(_N4e-?);aOknCbBJ`EhrFg4;7S0W(rYMxBJJGM+wx=La1{<$U?347 z(g4UC;A$K;-^u}zMh<@whid@rzk^$IB7HZs7Xw@iU@jjdkuC}J%h!GA__#P+hr=!# zyS$Bb(v);&P^#)p0X9Eu7Z(*52m12GX|1WvJt5y|e80VpH^=O{PJ3TwHjumlG5}iP zpz61Gpf>~P1BbtgLo*z{hJz|_PXMBI0D4mn22lRbLI;t0lR*a&J@*pp5ORMLhZcPJ znYjz~n4`Nnz38g6R^{dmAW`r`7te$BLLTfwDx`xvDGuubxPkg94o?SqIRJ4mfE0s` ze)MjAyFl;0)J;84iGzBp?})u3*P^A*IcgyB@T{tJu?w6-+`SR1^AeG|m zC0-5}2Ra#m7Tp>^te-nfgE!)GxYKJ_rZWRc#WUidBCcgoyTsDMKsP`nP*`~bD0!#6 zDwZj`L!EInX`G2T8u?#6d%jVa&>@ z6fdVNM8{ATd%(c}(t~HPLbM(9%s}_K($+WUo)-tjzUvka7#Zk8SNH;G1Bm6W%*Ker zm_X+`Aoq81P*vNHX(9pU1ll=pFn|=q*FJK&k~i1O0-+(yWjHRFiH9 zD)(OkeJ1Cv`xSAJls)|iR~WAdbTJ&%HVu#k@Hur-K~+#EML(g!3?LP+io^kWg&+e%0l#YvKm0%0PXp3Rn}$HeSvl)72=Q=hYkQ1A^wz_n*-g5 z;@%4m1}MkjHAIji+l?SZ&k42qpW@Jw50f``#V68vXx43zvI1;Ad!S(-WB^nl{?%|t z1zHQ$Li{)3&{p#KY7EF~G!F!N!txZx6`Kjm3uNoE1`sJp+DLyo#I0d(382{b*~i7- zZvx%=G}}3mH-LQhxZd+1(&v!Xo3h>eZ8)T}L`V-NtXI)#s$rZ|;q;POt{NFY!usGB zTnjxc(7&Hxc9PsX;viw&nz<3lqTK?$iMpqOvjN2VE;?>?J&}Hg!MdEtyc;8<2b0=+ z9PZWk^FSx#}${R}3d_QEcP?26)gvz}yUaA?JB8Y{G z^rRwG-cZWRHb`TkBAro$%6%YS67E|t#V=H(ql!>@LrKE#gWwh_(p`&CxewuGVt#u{ zPtAE~pl|)vg_8kPW8c2r*+ja*H#Y5%`?okq%&*G`QdKA$0 zkDMbto&jX4>V%H~Spn+7Wd3bbOtFjfZ;b6!b_P&Mrte7G>ZwTgqY+4>{8$_$^FJiU z;t=V=Zw-()fMoti278LwX@PE$NoyV?_lY=20H3mtcM7>c>)w);ttxK-$^1=^^)Xze z52KP(_m_RDf1m348)IQx-RZ?Qx6J|Cfa#Dv{PTP>fdf`YJv74arN}eE*g1cM^&oojQ_|> zf0*l`oqCV9!R*vA&kQ&t5E6Qq1^PE)Ud;Si^Fm_tdMN7xbhp=hM?$2POp!{F{91qt07qWz1smyBH5v%A zAVq;z0(k!$Pkf}yCqxSij!OGCLaN-WnSN9@Mf#U|$h;vL1Ew~mG=+?`7p1A(v)_sE z{8rKH&g-UhHbU+Y=mBThYJ$80df;%$g`Rsi209ObTIzS=&=WxQT|Q!%6=-gSt?bAf zU?%|E?BzpWrkPABZ@VgGmZ0y&p%;MfpYfL33M-X_@!m(686t0ho%wLV5LX#;Pzi-s zXgy55+bbI+m%qMIqx22GYw0AR^W=J9&8@R|y|W(9ZcPwyQD^U@=D7#uvDWgZMAEcus(|1sjf7q9ICyi0)B z1^xn9y*1?CAm6>)qrGK7j&|hIV}(@q>Y@D*kXHxfb5JF)SB!$EkI@0yA#~Y3xb7Ni zgwJj6a8B zI}T*CB`LKCJ=(nN&H+0*U{7%DBLbwigR3F;h3l%2SM39Gh$ELCFXW!^)Pv2`o*j^% zhrIgC^Xd^pE`z5bN5FGvKrRi)KREK36U0-wst239y@cFUOSv4Xmb)cL+W5nURO$^m z5|`@jeUuXEhqu{`TJA)#8U>(gGcU>m0&Tg_X7%z07!BYrI5aP!&$Y6F$g-0ZE7c!8 z*lg}p20dz~k={E@mZPe_8d6QkkOzR+06C)gM;eePpDdm#XFb^b?i<8cDU5XFxo)X6 zz;AJ=zb9dxFo$%CI2;6Eht}S(&kFRVNhwX|=M8W$fOT0(E2$FcCn!6N1ahZ}!x$WP zALHqY^d$ySbUJwh90DNjrHb@uj7{y}aGE%b1yDZHCEPxN&SoGf?N5Lm4^jXjR?-6LGk6L$5@U z?n75Y9-nb09Msk1^k7rKH=!V}kB@r-{nyJ@-SP&Qgu|aglp?(efU4nH;xHM&ON=`- zrulcEKS6&@W*cA%4nvOek@NUK=R>jK=gt;~sQ@mfBCC2{73fRnOJQNvjsS<@aQVuV z4bE>5^lV0#%8+veI2=HC8jZwVq$e?w)kL-drr~fVsgQPYYoH@9@+xz#I84XkAi8fg zrbtg?L?Oea0cPOP0hWqnq}g*-WvX)LiNj1BzU=73fS&^$U#oFhRo(!z0DO3>D^uSF z`gaCu8^YmyaX13N#pIld^_oBzpr#;Z@&=fVLo0@$stu985Sq>f;!uafhc~;96zMVN zxH@KlIRJWvwiW3ev@Mn93UQbV;Ph=gqb?1!^#i{8(*W}TZ1<`IdIZ|f0l5psVLpIq z!RO*Y-@e~D7+?W_@mLep^&cMS)_;?tU6s2?92VlR?hdXaY!PS$l-ip&z>xqhMoK9k zBAxSujWBaLaae@I`HaydL?iva!n*E91xSzvH!x^lZQ4_l;&^`~_R*t*@mf-MJ zSX9%DfwOEITLDm^Tq+Jr`7n%uqjD_LKZXIi0gl1pw*e4o7YF1n6Nh5~)VDwWV{k@2 zeO`d6LPqi^B;3enZ))rA2ztNoL5)8{kA7-enq1sqH-9>7@%qS$rc7C*iRBMDIf*-H%${ zjl>w>WB?yM>Xxq`0$u;l-o;!g4yORP@fu&haCe|rJ0LH>dZz+-2Xi#4(&zfn;`tG( z0#zqo#YhTE68|-voQ(62YZU(zFaJxEk)5Cga z06Wywu&P`1p~b!QGU66tc|DXZ2X^A-E(MN&O6Z*$7+w<@>cQrKO@oah?wv zatX|%m%Mp73ubW;wJ3Pm(!KmFK&vzW`zaHyRVZik8b?#-BCaGMgLd(d0IjU^k{`U} z&C5Bw?1;2g)Z0PTwVpwTh^+j%ftTEMFxlZ;AaNx2HX|~5{V%z_DO5k=rSX%nW#{oe zFpR~jf>58gujS>**8{Ku)Pv10$5C40Eq*=dns3Wx!UrQ2aOvv*w7#U_or^;&DVu|I_+29cb|tZ!KPS1 zJT&4fcOzl#a1l;%s8|_Pz4xdTo0%ueo-82<+bPGlZSHaHr`vrVFVCJ-DoY4tWq*0g zQay4vDJ2*4cH+YNy#e@7Sj3b8_QQb4~~lvV<{^A`7V z8?-1&e-j|BOwvQI%R%n9rzd$|sF+_x%}|DB3z?K9lWg@1|C`Eqi#%V!%lt4*xtbhS zkA57LHF%c!fNT;Ue(I!*u)qn=l~?kzim{gDUpZqfiR6z6d28sNUIFmaa+6eR>J6El z`-6hK3fF-l$g@I_W9kJ-RmxhOD+7+^q`v8a%J0I;lk- z7Cirl)>fQYeAf4zH_zAN>EPh`fZ+LwdY=0?^qjj*Azz2*orC8+gXc@@d0x`cbKX2( zkLUKm^RB`3$@M&^(hZ`WyImpQfaebw2`bI)gXik{o-++R=gsqtc)C7#-Yj_DwZ7-( z4Ls-WP{=po`DHw-QEh`~)yrXcminE=mZGYawnsh`m9soo-psc+(9iATy?9GJx8ZxJ zT1!$by`A+})w)x@evj*C8@VKELxoh|{|ptf$QQM$sCCL$=UYIv39Yj#w9fv8)>#%K znkCv*%kRRANlrcV`U8Hahx(WcIW((KAJ>y#yk+ZGX7hY2ZoPx&6NBf$^*#4*;5m1< zLcR^pso;4|@Z7TC*|Imaq=DzWdA=RL=Z7jaK6rk;P_Q2DR61KP+PQla@*Q{{5j+nK zo^Nm9IaA+r-aOxl=V+PO0MDuwJK#CgiV$-1`kr(55^}q{0DsCXnQBXsMM}Ypk%5CtG9)dXp zi_0Gc;Lu7Q&vJ2~e@A)O?!kuz_!EG$difm8<$>M`)iA*Tj{?}4nF5J` z0fCNyD*ql4hd<-6lmYg}(8~iI&oqUW?-<}Q9KJomm4Qg3E$WYotyt-y>@Oe}qG?Ie zMcQVb59$r@I1ZEgds#(##Gy8lSo$Zi8lS2<(;lb~m+4{iCH)*8X&;9_65-E9cwP^- zY+zTW)D(qC?`1Hm18x7Z4>k?3Ar76K!JSz^Wsz0@JKpEQ* z1^jUz3nFg-sW(5iafdD0!Xv=H9FTiX9HfvRy_q|#2=tl2!2oRm9Q2u2&pCnK9XLEM z4%+~DkvVYHp-%(79DrChb#_*X*_EGk#4;tlTp1gfZG1Q zwsw0=q~%k5`^*dC&1A_#TbTj2$KkPdE+-;=pXSK=g4~PZumga&uq4tq z;h<&S2G|k63aYGfuqSm}w3@oDDcqODp*d*5VD>0pyepp692r zm`xe;KJArt8@ZRos+)=P9bIPjl*m8kyZ}esn>HRbd8dlx_6?l8+ zQwKzP4cTxCi&Fj}R_gQhP*w@@f}7nucXyz#KzD$Z0lEU%hI%3S)+x}Pp&P^DHF4;M z!)_G20D}VE4+4?1aO~lz@b9qz~`sD-sRR3x}>udL z8-4AI0kk+^*S$jz9%!#4tZpuU0}g2#N=^@DyMkPYIUz~64uRf3J7rr946qvx&xc8d zNdGb;Bf#lziWL$xr-!oMAfF7#NY8QP(-E0BWFL@6we`8BNOQDK`lsAmVzoPf#h6o- z1ChSyrI0s3UmQ;Q&=+l98t5I+c5rxG9Qpxhxw#KjBP}N~%ueMEum^wxLNP_UZs-d9iNinuw_){Eor$#5LIdOtplqcpI1B&j(t0mM_>dz8hK0D$+H3sRelbSL_=D&k;(192dK z4G`&MTJ277_)Hvr3!wfa{sbC?wm}-;AOIbLIEwVAH*5>S&jOgF|6HsN2HBU^sp=eQ zCjB&j;N%(*4kVwO8OX4n1;hBYF(y?etnchGH-zCIP8Cui-fBJ-TWB4Wgz!oahL&M zH&i-Rl}P`M5M_%@-T*UkxTl>9(MJO9h$crPI_?KJD93VoD4PZHy!PIMMS5JV=@EGY z90A~W$R!ONPYiT7!qVLQDsh;NL!7ja^yVWoI$$PmfI0wqI(@11-v|2o{x)fy`%xU` z;E=k~V;gB7+Ri9A7+@~$kM{Qrc_Gk4zIGY;lQ_%+@WTmS$09wKa@U4l1Iz~yC+#EM z6-hsqRQxOs3jo}N_S2$>Zb_l+pQ`$c01_H`LoNhy>`fk4q%DwOS|7b+IjoKZ&?D=t zBJI8?lbH@H0n89($VDKp{D)VSn*)8Bnm>?r-%Ev*daH-Bqd@+FNg|bar0r-#Ly46E z7US>(yF#Rzw`Lu?=Er;edxa;{kyg3ya9fP!wZb|1o$k_x1KjZ z?pSeHf>R9^v?+Nj*t&0h{K5hzN+v@rb0d(AM(KfCjmG=Ia#)~Ch@^7z6meLN!?ArmAI=E$ z^pKMVI1|8rsFUI_DbW2WO>sC?9L~bwy6@d)dV8SlNG5>10nP^SB(WEVmjj(kD#YP5 zaX1Hu%|3U)wt+rQD#XD6=K|Q4!M!-_8t5Pg~Gj{b&2`8UvgUATET7 zbRO*;4K85KDCIxSlc2b0EnD7z5k6NnD6 ziu72_S;(=!84`hzH{?YijvM8hw=W5_D(kzu&lIa1fc0r1(kt5pIuU@T1PpL74pZ>C z3G}W&|Gkax>pDvuF2UifEqs6&X&O0(x%p866z#kow!IW&kC2CvZbTk-=errd&OBTA zmgJZo$}Z#Ura?c6bm3{%!98h630p(11UbH^FKCJM7LeF8Pd!IGu>|DwPq3wF`C_lQ>!IvgkSmaTipUj#K1HDsk-Pz}!{P86ukVrG zfSRfP`~q>f9>Al0+;H$@pfl(Re**^t+yLMthLMVRq&F=#J@oh$icA((dMLXQ#LIj7 z04&mbVWr(O2Dk}8IU^I*_eej06$#H>C=NH{uo+=(0NpOo!vO3_CJA5`Cqw=o#7cyW zkT(Z9hAaYk>O}#W(?i)UAfCF*CEu%ozA)F?>yr_gH{>5c&SKywtL?Ht^H@M=L%Ez- z-HOBDFdR53(3kqAP4_XtZ2+2B(v`bd9Bv1&gz>&Q_DH`$b79=@hyYRn@`k(v#OFcG zMEWa~1rm7rB_Nw;wSIwhaXfExC*U?=Tv`!mPehIk7AdOW(?X?Hzw}Uc7hk{qmCrsm z$GD~2LSw$al*Rd6lfj*I&1E7ad;4i|BtUXf%9_e|Nm#EK50^^l=hky zgeVl*ZjCkjzGusxklk&aYFa3%gm6(3isF-!Nuj32v`A%;x)6mBK8Sk|{?FI@oVRgz z|GxiuJWj9coY(8Tud|-qNk!rg3qL)C7wNI6J;faO`qpnA@I}g~bT^@^qK^2Uf89 zZ-)gFyaln97Z^og>%IZ!vpaDXX?RW+Dj*Kw_+Ml2wunb^_*ZE#!4inC_YSYOGW!G^ zm=`{!oh%DW5q#Vp zh4IgFoFDG_?{8s(fe2KtAgVzu=4_@j&Fs|fX@J==U)GiYj5j?qPjf_X^o zZ#Dn=iJon~u_>3dW=UQHX0X*3Ujv>l_J5aHew62bzqjEd_#y=M*HcA9iuDiGl}sag z^GZB^n8&fkU3R$m4GFfpNOF}(DsT7q@c5Y;#|_u(MN0S^QE0U>jP2OH3DE(ho)fs) zi@MqSg!rMFZNAJ|EkOr69dCv{;9oyNv(1M&V_rg2#D@ z-Hg;lrYB0RMdWFp;(3t01>M^58b;ZD@Y^5CRLbTb;qAY3SO+-%#M=!02F%M6e2n0t zWAn_E)#T(U>l0C3!XxVhxbp*l=;YYqb50K{87M6{r77gX5rLduzU4f+m;nW=ZB9PYq=9s+<2c z8xEWmOb}>kez)4&s0nWg=LbG74C^eDD%GN>TI5Uch zuryz{QjXfhSILf#g0b$b)2 z`XAXRl!fEjuZQK}B3V%Num2@%|2y{nw;p5of8!9?4{{t6i`;Q9ELd~zNmdr4D$*o* zODuE88mM+BJg;zo+KK7Zs|GNLsrVh6b_xBJ5OtroWKaF~1r9i>kR90~ux`sk1TRQZ z>DuZU>hLx}=eUM;r|unkV})e$*8VLFOOB8KEmMt2)~*S%dOro3yO&3hZqeYJ<85?OAzp%#vhPCY46tOHHRCZfvYv$8Z0x zjoKyHTYTn$Lx3I`Ypm$`kJ%XCCf*r@i3eKZ? zwMvxGexYMz93C8>3&TUQQi6_H8gM{ZIyhby>(2x`BUlp0Rmbc03%!4pEbM~dm`B4W zVvfVme-NAj-hsFU`k-qZjbXD|-w6sigKfY``nrr~CO!d?h&91wOMyA2HZ(c^g>x#L8V1YHnZ%V??r>%NGWbPL~U zzb`>oh;uuJ;nwjWepI3j7EGY^^*f9q8g3o;p~{+)h7V*xE67)QR+PYT<`S*h_}MWD z_J#1}kK;4f+3I4#8U#)HKM}@a#|k>4esxBqEx|Uo2$Q+vIZHJJaN0-$&5xIlQ{U$0 zSj6X;QM5;90?m)bXKO9OtILQ#ofCGB)?&f_oA$3+hN;Oo4!B*Dmi{!(N!DH!-H4rx zf8yf-EWEiF#pZ+NvRWO0>G+{<~Z{;{20YAHuq_Uts+3(b_gm5fp zrP_(am_T!vAH{O~{s#N}{4WXe)lNT>DGjE-^$c5VEqUdq>2dBY_QI}Q0^7DTrI@d1 z_^L_ALn#NAqshlIrI!2!8<{Er$A7b!X1{X21o_-FHKk_ID`;;j3C9NXLOxVZK9MOk z$ipkczU~PT56p!nrvz=aSN_`$p$r{gVHR&qxK&EBUUoEvw*D>*VvctmXPZOi5?JnZ z(N>e_G@fgec*kMa=zl5EAGMr(Dw&4eCkBUFc3i-tsz%Gu7$j?_3FSQcjT)EZx%3;2 z()ugG-eN%Y;*!&1`mWU4x}8Z|DHX%hYY>ClTDn~>eV+hDRm*0eJ0b988`j)Q(w zch#S)nM@OD;Zxzc#qnoKH>6H^-6(77GT(O&MdEk{vhbKKYm&87$Jwo2I5%^2#4kDC zsGhICp4j_bLgneU7s9#6`4P{r6@IZP3R4p*S63oXB~Ood1VSZ9f3o<6EGi|lIqak6 z@@2&O2)j$gAHvjxY8GQU2H{f?@6YHZT=mLO8o!c7 z1>AmGn4*?MY|N-#Bo%*3qY1Ud8rnFdacsm*+LQNq0 zlSK*bC$g@kfYosw`yy;l+P;xs7l>{DKjW*Jv=F2w&;)$Yj-kOjo=HDrkVrPmf+paD znHM$8IDSN{V@63$pb|c;cWBfXN345*ozqIb#X@^cy=Pq=4(iqJ7w{Z9y^6#BN-!m* z{ii-zl}Y3O3+?&@HzzQ#}^|IHak8%adMfLCsW@` zsj^%gmB&YA6{dVhutJv7X5|GuKC1*A-?h%pPn}7iS$RwI&@mk!{aziDpE^U(R)=7k=3yhzsgPRPonnwT*n|4=4dl(HTKTsAEn1A8UnS4R{m>8S}0Ku`}88u+_KoXTS#f|8$= zbeYnHon!4*5=K?WT6c#wEy2zQVd9XPVHtI0#9G(?BUL3~tL+!Wtq(@Lh~MGz_z%|||AlB)v{V`KiD(f@#V5nk+J6ny zE*d;OVtF(yLH;~)T_t5tB2J@`sjEAFLq}y1lA53x;@K#0d?yH!UuEGi1Ph-FBj4u{ zXEx2VU$8O3;RxP?&^Z4~#1w%JRQ@IlM?m}^=aEtq9Eo7jd7+%%jX3Jx))W4cz_RnZ zOlcn9XL1-c9sfe9?3mza1h>S7vN+-@8jM!5TVn2d*Dqjv396!c_#<4-N zprQ6iENB7sNyO8zqzy_5+G<(oTda=0)f)ChN@Yi#r6$;p zfGS?&<^KaK&$nYE$zWN~(!LLSi>eXF49!u~p9$3bx-o!iC3t0}IWRu}Cr9v1=g=u{MAmXb2;dio3pqQI>4^6;v zB^I>q9w7^Agrj!}0>}R3PHQL=sBaCYm8x~^Oe+;u(cWpT$^EjRg71yM2G)otuuRhe z!30{f&ubcP;kY*JfhQwnL2LEJ%|lmO6Y;8JLmEt=HTqE2=2}v$jQAbMnV94OSi z;wfQhc`4$YqY7;6zyylv+v|?U_X^#5lq_g1zU@KDN4jhOkCRmrXz8@=LCKwZ>Mc7z z>m@-Ef^82TS#v{SQsx0S_f}?-1Fv5!GZ}6L2x3Ex0-&=i`e)W{U;K|$pl%@y0_wvFlm1gaW@3o z1v9~+2)Z)-X-Hlgu~F?XMm;49Jt6koJ8TYK5%GQAu*!l7w8kwc3lAlZcU&5#>xr_U zHSY7wVVd|IXI~UP;WfeG5L=mew19Bzi3JYUB~Q!35fJs6@U=2<>~eAV@YDoHLQElX z5;!(J$+DSDk_9bo?_xQrG&qjw9mZ-C91Zb*ox*xX7PPeeu1S1>yHmjSPv_YwEE8yH z`+pn_N}iPkEp3lx!%OQL$EhqOc&uM0LCqq2U`b7^Wp4hrVTx=JvCa8mnfsiq9gpB9 ztSLv1ZO*g#JvG4z5XUk_s#G24v*OU^`D9tp!uCAYDq7+=Zbzx|EnaE@Eo=|q*qCnN z`1R%C|J$4*3tHGVd^tS(I_`%+pLd(!6a)(>G+A)$ji4)OcwQDxg?M&ks9DDkt_tfM z6P$+Nxc5UE93N*{dKic9UciF=2!t-|Ypyvwh-*T3{PL`Vf}RwwDYf9yMcXq_oU$rB z5jpNrsxzVb(}HKJ44#RnLuu$-j?WKnC z*Zp)BEnyvxKhr*CNlkDLM8j8t!11FqLT`Fe7S2V`^Tn|H=GX+m9^}ph=OHLt8{P*v zes+5Js_`XR=#7Al9$OMQ4j>KsSXqL$=Ob9wDhM2hv-s4g8`iv8*|`8uM`~ZIZO6-~ z8oqQ&O>iN^huBfEIG%$Y?PE=sg^M8a%EFBt=OfT3jV8Dl{&hAuG(tKSli7o@@UkrQ zftb5bcwBU>$H@&|Ma`GMR=K9U1m(HxVW@dH&Z1Z-w`7J)T?#RfeG`o#jyKbxlpPaX z25}z(HD<>b5aLT!+_vM6nb1ruBWF@V~y@egc3iP7VdS7hNzh}zV? z1df{+D)lbO1Xn@a!2`Agj^pUF+#;DJ3wC)#`L% zpLmTtI4B*C}aN^mQBaiV8YvC9SN@hQ-+9u}Fu`zjU!L#n0GN^G@7xpz+v#50Jf|B<1mMIy*Q&Zl> z4GOunMo-7iOldUDq4Q)@<)#b!K5Q~AOo?>-gube|%>?&A+{u2w1dcsu#QN@SzAThL z+#Xjgjy>qB+R8G)00fuS55s}uS!^<}FiOg0VIYEwS=K1Ej;GubMmG}-g1G0PkOs#V zj2FjaVSy}^A~=*60UDAV&!HpHs!|gShWP22AaHEYRtaY(l7+G`1i}25Ln0k((JOe1 zxI_YbXgB3hlv4(W#5itYEY^oXi)3mTg1$3Cqjg-wy>#Zs1otBN7Yh#M*Remfqs8cA zS-20PRkJW*IPSs%h+Cv47>-~W+0lx`@h=)7Z6jOxn19a~Z_G=0bBEK=ABSR_F$JqqM)M`)Xau$BYqH>2Ld#*&N|wsP7>LnqHEJw#ypFAh zBGO=j2O)NNJO~_rrKssLgO~lCF z%VptV1hozfH+1}rX+@KQ2_8YPJ$0*^aeR&PCdtX$vM>&!E&`)@f<;;^Fny5p%V zGJDY$-a!z4Go4W;k*{@vY+gJf^vzZ#<9>6Q#^d7lxkSmHB@-?4;r#VBTh6=}@ z7?l|+mPud>AyYnuqF!7fIR2Ze)CcOTWojaV3wng|cRU&a6KHCJrxCnOhfxJNo;}z; z*dFmN77C6Sh%H^&$7dI_+EBA_Y&^u;%`%hLWXMiU`V5le9uKwZ_!x_5t@SEp>sbV! zbP2W`=W$zxkL42Buh*IKIh1wFf+@!vF{PI#@5$6;1nVdRwFt*sS=#Hn9|`hHNR=t4 zpxBX~ro1|~mW-E^TQ~#}9mn1${4(=)%4>>xxp%%yAF4(O7@JV@kDOQ%*;5JxVh}JsZ z-SJ9nGDxp5rAp6~uV8ZnO4WqpQr^21VJelWSrBK&sFRVK-f2N)0cjoQ=&ZP^uUmFa6mvGV){DR62EGU!1K) zsY-Nwd^fAa6{b|1Hsu@?O}+_x;vFJ(`L~?_obU;zn!I^q*wuFIdRunKj^3FmOWK`SQ{)F5Cwha5G{JC{}~aVj^$v2 z<_HGF)Q^bRBj#eWEVMvSBc{H7#BMPcCTNLZcuf7n5i>Ct-^xNO1Z`sKcZ>M`JE1O2 z&>F!TG4)F#J{NQGoh;~m(U_R}CnDY!b76wE2s*}E>=JQ8tj+Ibp$I`X*5aIqKP?Ms zFo7N{4~_XhHe$V44mnw9kKl@ZLv`N}u`rf{2|6J7ZI>X(#Bx|0Dl7Rx7CIt0n*-Wv za+gGWA(n#)c7|vk^WQPzov|E#l!aXow2%4UGvW!c93*JJD}w8$h0go$h&7A+2s5On zub*V96P|6)Ap3vVYl7Vn+{J#Idh??Zm){;9(YMIL?hvPZnQzZ3j(-jcXO&E_2Z95W zu=;k~JT{!j{#h0}BPiWDoI7#kzLah?AtZa$t%+l9Cmu)0G~Xfa>`ni9Tg8f*jwum5BN zqe+!)Xihleca4R`SqtdgBq%87)B-dqtmcW1$FiIPGV}4UU`u{ge9Z_a{bFawvkJ)k z3y&Twjn#DUDpL}N8FI!xi${<8zbUTff)#`G6N$xH1!P{w)0Mc!Y9i>(7Go3Q8hacb ztt5X}T+IS&SLh@`aaIADC3sk6q{ga&{)I6Da>jm#N5B2GRdF@`Us-OiQnCujoP|d} zwr8xG%flQ{Q<{w(j7MqyLvc0oPvY?a%UK0v9>jy?)L0GstEndT4RglM$D_x~KNVMF z{_1n|3ZpoyfXsY6+Os!S1OBi3+6MzUV>jW^F6m#2tI>X8ez>m!GBfb#zQ(Fbe_@Hv zMpn+)&+({@m5d>-#`&`z)BnR>oK-;PC_JPpHCBUsr#(Y{jctlY`5mCR8sQIlJJ0G! z0U174wmLFa!+T#2j#7g;W6!}u4JHE>S7ZAk3QuLFfXqxhO0%&V*gs-n$|gh3*yVVX z-$9D2QGH!|TViGvkXedH3uj|Bq#w6~SctHJzL zHam9YzQ#U_rz3F(E3QWJVxFf-RaOC+u6RgQYOIFwKMt_}XC`OtW;|ps8KSrv!+TA% zFA%Z{$n1|t~@dZ=#n$`7Zxw-M?)1?qjx9H9I8DlAk!2NmQ!Ojbgx2AGIPex z!=nesVT!A9`*`Hdh^v6iL3q>$jn$w%FV8ZOGxjAsdbqn+aW!I}GQr+&WfhR=iAPVI z#%j2(xFxhJV_(Iic6FcPYOMZnZMd%jG8K4qUt={sU&Po*CUVA}ghzEWTyZrxPrO{? zdT~|(nbCO2UTUmH=GRCFEBKtTQ}9sx$q2>Ou>36nwDzn5GASOda*frPJoGXfDssl& zf`^^svu3VXY3!eI@n~S;&y@ki`Pw!xvv5;-{DbQ zW4pp$i!qH?#-5Ky^`v1|EYtP#W`ai#lxyQ8Kt=U z!rpPTS zN^0zZup@aXr1~}XET?46f}ex!iR)p*p8j6E3kJl^)GFBp3)9`%KX6!#F= z`R{}}Qa~n)M|EVZUd#W=z^S>+*iCrU2On14LtzWog*s9|ri%8gIx@B=Z2rxmtc=Z3 zzbdOo6t@`mPc|Q^{j35q-{Mgn8G9J)-2KCtW9$ofH0F#`+{0n#G_o4ZDj@Sb9%?W( z_6XQ1bax)1a>kCsqc>NND(;c6r**X7#Lg-pa~K}&wi|mCYzy{w)UJ#z#G`ignBpD{ zJBY4B@njW{xdD&%CXMX{JC0;BBIS%7ibrGJ?Bn~|tdUhf<~}^CUt^DjEvLdY z#u_^vkMf%+?s2e3b+cN?Dj?Gx54DgQdpvCO@u6)PTN{ts#uJKr0_<*&ggR0{rWqd9 zk+CPj{ux#UIb**KtAgZ7#XSkORVOQrtO7FIyi%t$=Cud%1X?nT>Bn>4mH9<|8{ihCODYUH&jX9Z+tOFZw0y4xkwu%J?YoO#=#k~;ruW5QZ;JZn)571z0m# zA7pDeRk1zROg0}6jiz+6tRYKuQFsZmyPwr7pW>_nGMC{YzqY{?KcKuCuIkGK_w5?Q}Gub*kq#Q?8ZaGAgO@NGCT|#jWo2)Og z*)W>XOf~@zeQn@W$$DZ<7ll_NyMM9`GFb&=ZoCCAX%F!t0Ukbf>wz?a>cE(&i&)}=h8O#zvm@Q}6zCd~>^>nwczTvc7nb z4V)Qei7pE7Kz2-N81591>57M5R_?OYWOpK~vq2jd#W|Dx@R`je$tyB)7wlIAY)6t5 zkXea`l)NQRzLI3JyOAA9ZLndTGg)Umtf&Xhin2r(h4&z<5eH8NWWHu$MrTZoErI=* z02Q^di||m?!(NpU$}rJI;Q(aUVO|5E0y1afp|zBoOi41?Kx8|gX^ZWg$?D=^89%U0 z){rH-C>(_B3zjX6lUW61-p0c?S?;pbWTnV%=7~y;%4EIqsB6!5Yq}^LjO@1G?b$!8 zfXsz>G*GxKHQ5kkB^`q`lU;#F)?RaKx+olq>Rd-SOA zAZ)S{JS>L>&T(tHD7+Wh#V~rBRzRi~9-c4CU6z{cK4e$yX%k$|WGCXGjt0(^HEx>d zqHs8}{=3<)h-59`EIfKRqMjDIG&N>~EH1GM%UQsQcya`(ICAXMm;& zWG!G49z6)ks!3^KsWBr7{c=Yg-z&~pzzRH_2$ZomWb6SvtC-GM%w;X043D0&P%Ke^ zjoikJB6KGvNKX6bEZ~n9!`Gxp?|m~$6I~RJMml_-P{kH-10Fqp$*N0JW5y6#c#VCF zl(T?bKkZ+b^xk=HRTqU15<00kY;{<`a6EdBkyV$b#*8KO0X9N-YR*}}EqL^prnJs? ztGXzBh|rHY4W$yWfJJz`1YDXL^Dxrv?9llv;1N73c?vu!m$66ieBLrV`C7noJTj&# zv4C-SetkdWR{@#zczFI9xIn2=m(xYzqsVH$XRSZ0fXp|Q*7{RpAA>!GCtuC$7SIEa zp4iFWz=du|7ln@#x|FzB&MF`?9S?I_xyw?MCCIvPKRQ^>WIN$Gob#ZIWKH?gMd1_3 zZiLwbYYNDmi{}WenT&g+CVLXuO7f+4XR=rE&?%F}GBO_aqjSU7f(0zZ6DudRfC(t> zV4bNgFAL~{N6*6KFL_Hgp2G7K(U^>~7BCEto-I)A$DeNYTsssyPS=%VmBLZ4%VXZp)pz(_oLu*X=rOH*Se z6M8Ps(#oa<6yu56ynmUjYIe{?;S{8g^T{dFtO7Cv@n~go{KMh0_SVhuK}5E*5Y( z9_`}Dm<7Cu=bfIm_Rd+rEIexIwXv~6HeSMW$J+3KZ2^7o>>d=U1!VC&K{O2p7Ep>u z?M*h`k&WqizWc=5Yt{nZ!=rH$MQQ;r;~Da1XwepM4W6h-R?5Z<6kpIAc$Cj7AoDgJ zI(E4{`RW=bn~Cfhx}YX-la0XBk#4&x$`V}^zJjbjorbkYRsor_@Nn*Ag~=vh&1AEX zO@U$Mo-^4fJer7ByER=DzKZNf7A5q@tO7Ef@z5X3U6z`x4B53X+T}9Y8F-kP2fpjp zbWu1PS*vJG0ht=nn#)p?y@qV%!qD$b_9`B&cPibQE(+%$djbP$0}9BL;@Ok5xhyr= zTx3m_gq)geYemTEdu~k^g|8#)MHw(MW)+a>f`>I`xyw?My@9NvJnR*j>?J%r(+_#y zt?8ohO=N9hv>B;@Of5Wgg>sYiq!vsz4_Ws`p~sl46&_^wejsaVWx6Pwk8JS#u>Gun zOg}tqLM(AvYO-=Gk%kd2>bTR2$-WQO2jTwZB1HC&S| zMAi-iOm8`p)yBhv)QAsdtpsbjC|rc>V2W4WS^=33c)F5PlNDjjWQ&oVP8q1Znd~4u zYHw?04Oya#!ncquBRO>AtO7DGMRztRQqem9A2@fFXF|yg%^cC{1)x_zu!r&|mU9+x z3!cM*)nT8?s-_=Z6uw93hG^9Smj1)4N!ct(jd>sG>S)yhX8*(LXR?Yk(M910NMGt| zV`#M>NWdzS_Qa|&YY3gc*v5sN1x&`%Ge}2okX59KE($*+^t-)7xmv(`cw)J( za%pPJTBJLWJPIpk0l&QUcVUg*=vH;XJ~YcH40WVjE#O={o>rHp#-vC`Vu&#;X8||k z=}92XW%TE6RTqUH5js7l)dI%-Bdsn?jrkbqI~Y=0EnwC^()xv4)kWbaNUx(P)dno! zOgym-yk%0F%PNnPnq)n3?xsxD7%bp2Jh3qh{Zi(MGukdx(z+=8lsMmFnmAbtsKgWF zR4AZMJ_`F&MVjO@yalA3@hoQnUvZx!38Zj_ZQ3?YqL9K3&|N7Q-Nyo&;_>@foC3QE z|L8s@*@&lCypIKR!Q=P&O85C^oJ1jopA+W}h7&!4DD0$uGjR$-Y4LATMV0 z&};#>Tn5&G^mpbWQ=833zy(PQI0)Z((<0er92oRY2wsa<(@`Bu~DI$YkFkyMYNrgR9BT z!9$e~{4UB8T@-$g>0ol*o zhlwVufJ};qiKg6TsmXpswj2Yj({d)8froY4z?@stMd44#`od@-p@7UOc(jmkS!%K^ z$bL`5)NiuS@G$i!Kgh_>u$hm;`auDi%^!tV9LD|vdjPB^3uBA$>_gli756`|{Rq&; zh5|CD;h`c@W2<0?uL<`x_69t=Agn68C4t{R8$_0$9{!6_8nv zht`)G`zP!KTA&hO>`*);Ao)dc|AOrw*sKCFX9e~ndGzbpJ!4cOca2g1qo~YSc{fo3 zY61$#oQH=dutuJGwK1&EPG^&k`rO(S6YwxnCsm4H1GW$SOsj4MWRAhZsyj8dChSP^ zK?&uIy%`V9HThL>Gq6J_W?E@h0hw#?&`MKdYr!reK*P7OQ}G;1+}{*;JJ?T2h`N;m zG8K5}R;jVdb4xO!c4BM+9<`I-6<6nuKBbyqvkJ&m;DJqz)ew9Rw^C0uwkIBX;-IZa zZP}oU!tIeAMm#;dC?K;N9yY+cwp2w;JiGqw+&{Ybzcidzr%Wde8x$toc8C?2&F zW9uvK+OTvqwhtcIt18$;Gy)Bk_U;qBWwk=r^T}ZGB4r5a%ya2*mYP|`!u#3 zkJ{$|#ccw+KXOeI3dpp|?CrRawRkz@uSv5Yl{gOkEV}?C~{0mQ_IJ^dKvDS!%Kt$R=Q(^2wQOC?3iuDJ4$x zmaxNb3CB_Yia7t)0^NJP1Wg`}fpu;TqG^Fl1}ip^^ES2#UqLzM?$6V`7D}KEgH#vx z%j3Vl5)PI-HoQ@L4Ynnc43W7ScrH08m~*`MI@{$=O;8iTpL|0rq2o7q+lEv!R2DK2 z{$HOvwkWYpk7W|%myjS+){>HY?Fi0~csF<$B{K|Vo9!UR|0kRvpAm7+5bfyjr+k@8 zgUS9awQ0l$DiN2Gq9XjZ61Z2k3h}gzHo8P?H#p?pY?-PKs?*s=kY%zI;-v$n;)ss~brHPDTLTr6<3N%`BOfswfpvFXw5x|=@Ro40e00Pc_~kr3 zDlx&I$HM<@i!l$|y68}!@DJnvi}rcM#AEbSEDAUofz*Dox-Z8bRA-K}?p0m!MRQBXlf{q&=@C+$w zBV|o>K8?*{$sBu(3>)2+gVbb=q02Z-B$?w&SlbtW$mj#I)&$RUoDPu8aVyre+ZU`! zw(CxKu0JGvEaF%^T#Z|Q>Ph-X&sw997d6G-F&2#Dw|2Bc%1$ak>FhT*XS^B;(D5Gf z7V1!09xYjOBE559$g<;QG>+`y_u{Po?O##Q^zMB4jbN~03Vb!RpP{WQ^$Qt+ukx}X){?HJbUqeQkCpjj6W2h zaynMl)N-Dn)^_*@Zcp0uP#>(BtO(Bwl$*M_d}n}-hN%g5ftXA+)%*w6zR!-!B;#aZSBOS6LV6r;ni7t6nV=Jb z_v1I;j_s}uA7wl$3uzMo@i1N=!xjzqeB~T9FLzhPV$&6?15mry~C#^ zj^$&61rw;^-hVlK7~+`iXiJIYaaqtKRlg5);-VVwXD^v~J2k;x5DhwnPre*C++wd^ zlSCG@i*wSHaK7E~E5=k7-Khz>B6yYm`%UwU<3N5V;2d)Cge<77F8?_s(((M$^%Z9E z8xq)eQKsAn<>IeHtvQ}N$nKRqDN`C=-kuRs>v-JUu+MJ-b;gR3VJ>x?MQRz~lku|9 z9l>KW!Z&b^H|-G$!~`0emNI;+xjL5IY_DIF30P>O=f?|TnLBP^x|U@dyrr^V1^S{# z=#jrhykJgV!MP;lDG3fh{oVe$F%LipgtHOw0F|0R51-faHd4Lr?ufb9>;-Q!Q5Ft@ z=tpnXJaJdVc?ejBr6$mj)39-{&@SSx+(PTpr)A*~1UChz>+O=K=94@t3x^{ZyDY3TrboPj@6FH0f(ed5a1`e?3&E2k z9{pUN{%6|cIaxRoq80hqOuK8u2d@lA>`ibKMAv)5*ndLAZl~uL^uoeqSvVTOn|$l0 zlH3sS-3!By0GObcEIh7;$RXp1r_u5P4Z+5j!?@vi6BhJb zVS>{U%w)dMbl_NTl(oX)FDWsaLv_*o3>4ED_SG%tN9=!8_@5?Ca3;iUJwulm8gVZK z#aPJ7!dVDv9~#DF$HloY1exG$1iv%Fs7n+wRtcAH2@kZ>W#JqG%2_;!SQ)W9pQ38) zFu}PHy&ep6u4CP$mfz%MSvU{DE-k~{gf%bgYEP?WynzA>_k-Y8I43dj@GQXs7 z9@&{8Q|CkUX8EKU`O1j(Zwi|NCb$4%uU$f7_KSG;F$D!*V_~K&T!>(3H$4zCHb=bT zh=PKvNP`J3g7|_FPOZvuO?Y%kUXg{1Av)7>GylN#TWmH@P0$CT7PGk$>DYtW{2+2SdPw|;2&*jOhvTzy19X!TqlyN*X{w$CQE{C`$ zE`%IE<+m!ePf;cdS0MO+rxO*m<2yTqQ@bX(65=@eiORvTXP%7_$!uA;3gS*IsBw;n zIJC^11_h+~F?Rn>(N7hD|P54|P}S0iY`dO*(syG8uGci1&BK|hF(7y>kqok`ah z4#&d&3W@O-#)#3qawylP2Ja2tXNOnsWCUyr!xdP`(7Ul#ri zkzwN2{M0n!ZV0rBGr{c;4e6X}y^dYF>v@DzK(cTL#BTe9=NiYe5HR$oCb$#9NjzYx zBpqw=IKcLBvOpH@g7|^utt!iL;*Vhq!vuFjtmAV|rNQwX9*b3y3uWORh)LbTEgYZZ z7QE_AO;CcM0pp%JspATkf2z$zvM>PR7}|pbjzw5NkeXm1#M6U=1;;sbb6Hp{3xg0G zGC4F($M=IEH9;wYF_(stbX-@QS1^v(H*aB~phV9+x@tZc=>hGv;Ac#U*zDB&f@Qov zmq4!@!sU{K=(t$`nQMu(F&Owis1A|Lle0z;;Mh!cT=eeh9TIWIa2M|@sjXF zlq`{jdm$zs6#A`W|69Y@VS@V*?9)B0P!5lHU^829CQD^uID#REhMYTIMFlfNrY0DH z;EIW%^c^Q3UQqA`yRFNVU(K?*XnsG+TpW%a@8t2A#X)L%#G9;vHBz);i7RYYWREwg z3C0m{M^%_1cKS8oUuTE4)GAqc6hS=}8yc;~M(lV_7>p!n^BBZ(KDJg%b3A8QXqsBx zC96@jeH_;Rdv(W2L!&BB)gqbwf9~6UG<`2kh@mfeSAr+7^T9EpAvpd)yQU$eCU_D- zLHu9W9oye$MKZh+3;8`Ur3=2{;lHcmYk4gK9YWzFXElXLbT35Fz*IUqx@0lLsC>$ zQMzdU5}}J12D#&SW?emYm>`Sbk{)47bX<0d)p_!fEKEnxn2|uG>v#sOo&KDf;AM#R zEacU$9PhKu%i=*FD=}=xC%R}p1I2kigqn05)<0B~31%Yb&J(z*+Hs#y)dN3~DVl1c zi{`JO=yh0Vs*bgJoYE|6f>{WD%!X7sZn)j@HFCX7sn6-c{tNa-d*@jO9Zz5vjpnn$ zm8mJquzAqZ18Qo$byEY-m#z@p9%$)nxHX)=VL)RevV%jHp_zM z+BcX*lm^GVP*AA}G&GKi1?4y~6jbu9Ea)+<$G?IF#~1MPnl&|n)}YVs9|VqN_z@)E z$wG4kkH?PVxCOr~n4krM?dWDoq~q)OW#M~SXbDmKk5EvKeeuhJ2{c7~9gEsAhhG+Q zvY@T4hho2Q{2jk6m_P&Zg4m)Qm*AI$A7nwhXI*07bnK2_7EDltphvug0DGlz%_ zwpZ%9lU*aw-~aD5zkgC4w@lX!{zuP2ivM?bmBKr=Y#dfu{n?378~bKzcE8Z;qDO$e#zntzfcgL{8ZA?71 z%%(R6{GnCogk9MmP=oyRfY9e)YjCKw@?H$Lkx>~@#M5YJYM8794Leb-Gw(;0zcjD7 zY7`fCT3ro`7vtkb6R=&){7inW{E58ulQ;kAyyB1TVp3l5pLX#~Uh$J-L_LefF6N-I zi|5eT#bh)ON;3tGUC=l_wu=|g*u`8lW2Jc=ja^JbV;3)?v5S|`JS0sPja^JfV;3)@ zv5OgK9+qY%8oPJ}ja|$_V;8TYc|@8rGv%by05tMZy#n z|1F;Sr~)>7s=SR8Lu{i;R{Jcs9w?tn>Z19XO`^=eFBoUUAuD@YcrF>xfXP z038ssKUH05?=v4KT;`YSqDHV_^ul@3-x+)I_aNQE9V6c-^1qT!g}=xBgoD}f5ss`E@D=i>nyPti z7+AYuPeb*U@%`eT*N4ttPJEShBi^wKcjYK`?d^ALRJUE7!p=L?sM&0n4n=$Jv0JCc zP4b%V+@@{&79E>sYSpi|Q_EJ{wQk7MUpk)Zd_w0ot?TL*T5qb&?TYJ!vzZ!PO4X>! zmNf~xH@CG8=>Hk?d+Y~^|0e3@lLb}bxnxavQGA2ek*bpaG@e|AD`J(;{W4Tpn{lCP zMlK8Zuf72n#Hzf9aZy!ybWx~^gR!Hk=zCyz{2LRiw32aC;fqO!@TFLl7so0*oOmko zE!z?V@LzIlz%8!^-EmR>M|?}O7QL$IW?sSwr;;vV4R-NWjwjKih4(fLOMo7v zLsc|;zc4P>Yu&0=n;m!VP}H(^lRDeyH!j?+eS=K9j=Su-Tf?T!nzyLAL%~jUYc#6g zmVJs5Pg6@PEf*TmwPw+JT5HLjab8oqalK(xo^`l6v6ddmg;H*}Ibah~uhQFi zG<1v?I)ny&J5LrWtpjff4e%s7l5#bVhAHg2F644HcI1C0=K91K-j>={_}4cFyq~uM zDuqM&xh&xVj@1a?Bz?jqv>IVQT9a_U_>-0lA4(n#B%kD1lH^y?m^hU58nw8n!@cSd zuMQeZ3{;|y0!2Z*7w4V^+Q|UsVd;(2StC&pud>=OV^3HS~;l^hnNRW9u2$_6+PG2u)@^DwXi?a) z#!i`nhV6^ma?W_d6bk(r`vOKcP_x%_uGWr4xDL3TquT|X**sIQ3D>Gcrr=~;DXt8+ z8CTRYQ*b(N7;Y{uhwIoXQ*btJ1a2N~3$9b^OhIqlDBMC^6|QreOuX1h#54_rUo1Y9MqcKb}h!MOgoNw_t*x*akFJ#jbT zCgW0EgN~VkBXGCjrsCG)8t$E%h$Bn`*#8u%s??L`?V{sL@t+=k8$vl%SX>2eE3WHlzvg6K*mt#Wgsa{NrxJO~tLpH9m*@ zSP_mzJm=07aZryid^OyxEx7;&kB1tXe!_mg3yY1fm-`im5c_A%44AQ;VY> zPKLdzIA^nOru#T5&Rk>mViBP@j*9bxn1-w-6osEf3`mqoWiqxh9=>r@lr_e@)+QKp z6ffd&s(d#Z^Vd&dt?Z~KHttQuxr|*# zt)d;pEEm)8X4Xiu=2((fTry8l+Wj1=(NVXVY0M`WQxr!ro5VC+%YM4j?1-^sK1Aak z;`-UKD6c2GD=Wn{>`n@_nsVg%sA zdoadvw7z7_f2cRbakRd)SaJHW`76dz%sgXmemSJtQOp)&`bEZ3DVY40tj*^sM>%yA zlZt70NsQx29gM0FaYiH@se^?^OktQ&RXajdiD-0V&rkr4y2;cfVy=#iqnPz#8a^63 zvSZJ@;;~B=CI9=-2^`gKD~x%7`cxxy6tmTswbYszM={xDvet=h1Tl_cHX4)rAng7* ziW#?DadzkMl-`Iridk+0=;H)6f-k0STiP~m`%nU�-oaa7?X zE3nq&1)C-mB(6~jy-xM#sQYcUSX+6= zCB`wtny^x_=EOWZDpsX2TPZ;$#8J$!Rf^MrodES#M=^7a>Br2gJ2{HU8FNv*kE57L zt7YxgcppbGYmC`G-p5hQh<6od#JAx-j$-B+Q&kp<&r!@4V;+h3aTGJTQr5Px2cY~o zib;*h&IpX7m{IR3&Kml$a_T5%p)uK*0!J}b#`KFRa1=B3eOX%-Q{X6Oy)g$3ya#2gFs5qnJ&`92GmPqnKo!thI`r z&QZ)tV?JWTK%LJ~DJV^e(_~ice2$3J`O1u(6FZ+{BsYs}v??wL9A$IDM`AvYjoVR7 zr7?}xgihorX4uDyb5LyDj$-B-^KPspM=?2LzO5ZPvZI(upUB#ZSgwv@))@2kYoQ4{ ziW#w9agHW!YW9v|<{5K3X;TAo6tl&cTVhTf#Z3Ox${;rG&m)RSjcFQl>L_N^XNpt6 z3R;cZQOrVPrba7{VycXJE0&IcnPC}yRYhWTt(s<%4Q!ArkVl!s#ncT|)z zW4`|}jK7XzHXAec*N`1YF%!O)wegJR>a31h`BaK&G9h+WM?~tZ!@d#uOzfN|C?mcVb7_k(&NwPl^Ni`p zn-Sf{QOp)&TEvG3M=_JXleLwxcR7kljoB`aBaUK5eXlrOV()Skv(T6`;^^oorplPl z;xz3jW@^rI8pn~j5yh-G=Hu8?9L0?NL2(X>J;hN>g)vvh{5Xo)YD{_T4~}B8Kg!x? zabk58v(cCXVlQ?SGwvtFDT`@y6tmoz4l!+xVoJ6s&Yaj+9mUKv=GK@tM=_g>nI0RJ zqnPApSz8%KaH}EVoHBeoS%7m*C^{Krp%bdu}3fA z4BVt&;Tjdj9_^?Ov&NY3D0X!VN9#(zDbD?|Upk7JXUta&k-C$km@URU6Fa7(n908@ zb?dl~#uUepx>Q`F?V>$L#TvC0rr~B*G)kQ#1KvUrjpnk~By*1X(S<5wZs`)DIEtD2 zhvK}^Dlm>>){AMph(}6UbL5o?E0?i}b|8 zk~585Ulq(cMskyprO~WoB$I(M`zB+jQtKGWl}5fEH#i(4Svp8D+eNdEkt`G0@Qi5I zk$T^3#GCAls6jYFOeiHrqx{$)996lMVw&`e4Z<@@jO1J+JI0ufk<1x+ zYmDg_$w@;n+xQz!5UPS5xu#*PG45O*#Z_dEq0x*Os(3Bp!08ycdB)vInaHMN;IV4R3vVCm7j+jv6UT@^1u|+sWa_n$oHae6MPs6FB>bJs}+Dz=qgQJ+OVw&`c z_2(E3W=Dv8KGvUOBsUs)AL»yG;`xSG?Xx1^3%Z(vpW%!tLY5jjF^ zHsV8;Cn{k_hzXKMyq(t{#zjuqt?$u&k!iY4k8$q{1}^TcS@F_QC)d_Pu{VGVYXov z8B~2Zl8w}e-IgCFVa2G3h35#d(1_<^eKx%%;iSbi!mKz%#z0ya}CElWY#f~GmZR$MW7aEj*;AC zWNEyoVfeqbB!!yCsOxxj4^XY?iUN*F~*!U zL1wRxCFB^%HAWUh(lMGH@swibBIy{(c}AA=0!#Jj7|AVxq-3;HTpm#*B{X@W%(jfF zb&OCPNImPTo$5vJxBe~qj-ZArz(QL_N#rz{?#WBX5Y2y}k&cn9GI9mADAG|RNtrrb zW(QMIA{|ANgY`zXW9k&?7|F3OE9N~v1=2B+6-NHSfknl1jO134jn3tbu?ogfH_Xm} zsjAtaH>W0Wx8bJZ*5ewVLAowzJ+Bd*y6oEpKE;AY}B;aXkDsS#W$t_-&sS9B4l zMsUM$b8$Ib$BQ{Nf*XOGhuebd)Q3|exKX%;xGG%dOE@)x8;h&JZN+uHlv5+Pak%BU zyvsN>g1Zuz;8x;lUe2izTtD0dTqUmd6`UHu^~X)Zt-;m3GE>kKcN1=GcTnTO_ZWFFmKk|<&#g*YUlwxY$Da8ECth#-)Y*#PfTN#k+K$!ydgH(47BlYPe$tn=S#5r32MD>ykX`;H|IO4n0+)gIhHmk+2z!Ex__DRU&MC`j^qC3Hygi}DM#@gtN97@WdD?d!vm;e_5CZ2 zpBR6B#j*PS!{#ggBivt;u4A?RxyBFY{^A|0`JC}PvNx-7*|FOGq;lE6gdJM(j@9wk z7+=6KEAfui{D=jLpCf;2P>$94n`iujSiX+c_upcCSu9`2YWtHH%Kp??zK+#=YWxk< zub#IYb^p*lMlT|M)0q9dh-0a~JgeS?qMQBB?Ug-8nGbxG@!xZM@s8E})WwQ_Y0SQ3 z_3hUi|6++pjSGk(hnQYJRKn2azuozhkxiY=!I(r+URZR>$9H ze1~|7&as*ww?y$57Kan(j@9{FZu}bZr}A;EzJJM5#eX}NuVc0Sna0nL15zhm|AJYuEd*NOReRQ^Nvn`e9`=HF5F1HZ-iU%9{9 zr(-ogd6n$nO#f2%cC5aCYW(b&f5+`tVf>xPh2hPyw5tzqTaBM{ zP?+8v#fSEteNXnMHwepeNB9nb-)Q{E?%}|PV>LhSeZ{Yc)0bnl{pH4wZ5ZzFsQ4lM zB_AmM69s`--P;i6d&v-Yh=IuzG3=ttmao5|1e+G zs{9>w{}8|QL&cvKmnV+Z?W@fA&uCvtpJR3XZ#Mn|+Lw68>iV6qR`!?0_U%~BR~kQt zZB{LR9LJsP=^M6A@yp}##}U3}n4jhve|}v4IEoL;mz?pv;_}C_nxB-){?W019jo~@ z#(x>-H^*vz#7BzXH;%uK;zRo88Gl}k?^w-mF@A2GUmUCX$sf!9nQ{DggpchfHU8Xq zf5&Qm)F+DHD2|_w)%-%^=l_4)y$M*=#TP$3_p%5oh6?V>)lAUbQ_I}$H?_>NwA2lo9&g6$XUZ?I?kl`r}Ey8Z@x z_|=j>S=ZlS&-N_yJL>PQ%WtrUUncp3uzh3x24j2HQ*Hl}zi0Yibp8f=_yv-`Sm$rB zXM0y8`G08p8|>j{{XqSPu>GTdWUy!ZS1$Qm@i|V;9|n8)Db-A0s@u{d(ae)eC3J^bwB)W1&K&tOmaDBU{CprB!9Qg z-(U~FPVxiwo0A56>X-8)^?wKbEWg2?@>fd!2yB1p9~$i8r`0h17;K-}{swdVthfJ? zzed;JV9)ltUh-ek~e3v(|=<6>#@Bi z-(b)7xJ2?l!1!YM4fgOGB>zEdpPAlZtS@^0%KMr6^L2WIJ^W*me-Lk5Q@_EOzx4c_ zQOooXp#Rgp29vMWmr}{ckHN|IG1$X*onrcO^k1eo*rPvR@^6RznZLo{>-oJ}@;gC4 z(;MvJXP$=s)+H|~|H5FL-Oqs7=5;B-fd>yAmGGk#qo|+i!Co(F)Y>>=}xJS)A4Q9%*I8XbN zYFFWTKQav_^IqJ=l}ta}03g#~GSA>H?q5v#iO$nt<_TsgneF!DP9swq+>$9_cUJ?r z{j1V&*trUaoq%LO0iXupxYy5qHO|&KLJUTtGA3&MDwaMn491yz^50}UF-9>A=2@3y zd@))v48~208p)WY83yAXMbMzKrCSZ^oIC^K z0NH>FK+Cm$b|)YakOQa$1mBO7XTUN*E}#k!@&Hbr0rvp%0LK6wQgHGNZ~^iG)qpM! zqA>u;fC4}bz_AV|&wvy_A)pq}>mi)H15yD+fI2{UDn=$C4NwfI2lQJHa{;P0s6j2JzXq24#*F;N=NU@~3&V|e_msmp$+Q>L<@~*DT&?(&<9yi zg9Y|i!7hBALConPmWwhGATpdNs4JP$@bCJRZpGP?PerjHgN8c!H z@Ogv_r0^qnfRe%npGUYx3Mb-;CJGyT9^tG9Sd2M;t4?O{d4$WQa0(uSqh^E8Bb<^# z%@<-mqOifiEe9piT>dW6+8K;*de}ufn66L?uhlJX@HvD-AY3bjUAn;yHp2GCnzJ8d zF`m=yZSXmSskuT5S791snGL3}FUm~KBG@MYWu5}z)OFPS3MP398*GGGK`-GVDSSo` z0E6Sk42tVzcW@A(BVE`Bf2p}n3a`}GF!(&eIS-*2JuCI}ZZJw@<^YnoBZXC)l0nf> z%Sl>c>u*N#6S8TkKW>BhgNSe^LFu%1C_&t z>gBGd;540+!DPDlNB@Oj6$OJHM%%GS3R{0NvYCs5RF?iQWiq-b8HKHv80jupi4^Gy z7m#Wcw!UGcM@<8Ry|2}UH<)?2_(vC>Avu#i!c0f&8YpbN%Sbhox+sU4REjOtK*|i)H88lbOtq9*txapNlEKgD?GrulT;-$4t7i;6LE6?}%#Fb~n)4N3LxvX` zs!U2gf`|DjX|T(;w!4;mCIE$_Wb$Jye>$G*r=-DD<0*duWqNLZL&+HIOs(y17i)0; zF!&ka!cGF(YLvEJTARj=wPmGKTYK%-7(C@oXtV!_&j|&uHrmRi<`}m;S5eq{k&%tW zilvxO2eo}v*!q%@a_m$~iesmqRL>i9%?+lZUHs$VA6zJY?i0+pZGozs!q!1XHZpgW z6ua&-RZE4fzs?d%f0Ej6TcWfnY`w}znyQ4vRNsf;-U6ZlS%7jtz$0*P0rarrfU^M= zfR@-JI01=(9DoCcWIr@G?$b`s;?r3LnNS=WqNhEBmwh_+S$ycbMlz%DN(WOKyzI=G zt@!9+)>BB?E?m!T1~;5J^U|M~`N}0XOivvK|AQ}e{)Nq4%F{?2G#C%fvR^1{6*4mP zp#FZG!HyZ79B7tWnW6w&6(6Kf*vj6*6yL!sVx+;k@D-AI5?&CQ2J6D7X0h;*c(H^` zgH7R!B)0@^4!H)K!q<@-x*bnHVYU(m>vHBiL*`bPj5RY@8@p06xqEBuTnnxh>CYnN zTA$QMu{8kH0m2`{)&P(OC>-QkO3$Kh>h490Tu!>0c8N+ zP3Sv-Xh0UA91xI!z5|E@WCJPyEjOdz01^Q?fJ#8{6WAI7mH~1BRe+Euu{8qR1IPm$ z19Zs5)(GGNKs*N<02Tr=0c8N+Y}f!04afqN0|K6h4FKQb z3R^p$M{988?IY0=$C6|UiLGxW6WifB5<#EqqzYRbx9X%hB!X)6k+Q9r3zbbNG^(AD!F~MhorFeli|8@nRY*B zBBoW?dX|waRlej-(x)a0TSpAHT5@Z3jTE-l?a*m6Ndxe8lv8g7~7uGcmCSk>sX z7EtlMF=*FDV6t;dd+>|_~J)+x4VQV`hS*k+G{Q}=SW((@0gs&vmfk~Z2n>b9C zY{9h}!wIw=dKsKn4#fK6AF&f(i_T_jn;gvAl;5i{<=>L>vk~j-(DLYu6;ggYj&3P` zi^i1y<(&FccR_#9_xSh%-=8T7(y;#x$?D88i{s?^xouwijhkq zw4*+;?WM7n`cO(aF^iB0dQTr|R!D5EeT_Q6O(zkOqYrjp(HJ_QzrYBUkZ3liat%^00ezLTnoDE1T-I)#X&#C8$=Y0x7_%9Dh1FgvMEB`(9P!~{XpFIRr>EQzfT8QIJx6QI{uOPOym7xSf&QxaQi-)JP0 z$zagac+Q0xS4wQX-dLtg%DkjI?i&(YCm9*YDS{UoFL@6&mEnmOYC0*gwS|%R#4F2$ zgEOo#1&j%c)XL(#MgtGlaQRZWr3^s4MvA}psan-O)0lGS6wfMTAvU&Ch1f2!wTF@T zjJDE?K3L9(i+1S#Tj14?X>nY>ffim-Kn@Nx`d$jB>+!h7GcL`i@ugxdTu9-dO#L+c zR*9`I8Hqgza;_yAx)QRfZCc&o5s|`Sz#qds{5C z|6qkdy5yAL+6Ol0moyUXS7Auf9uI2lmdiJC#U%PhVyU7lVzBT>lk3X`%cQb;P5_#x zo+*O5>LV0|tta*~UsyYrgkO#tKt(bZ7XMHZC{~qZJLPwjtvbVA`!-X9olYY3b3J%| z*BJS-*CTXEIt6UNGfJz*$Tdj0Rl0psOgkI7tYV#O9*LkRylEsid=gt9 zNiNo{V~ziiBB$PyhC*}ad(2T3R`;_$tspg?q=NHU}g$i zHH@VDXhR%q>^eXdNuQ|UuCVnSBl$`Dd=eomVPj6a4{8k3g>fK-kWV#<&h21n3iZ^O z3!1td_;IO&RER{GBs%v)XQFH;JncoSWHDqo{P#&&WL`!hXavr;X%2<0lZ^DtxXC4y z`2&k6WfZoyFcP1nJ1XN7j;f-JZS`TQfidV9+*f%K#aGQh><8whXWkkO?RQ_`ZN`86X;v1t6K+fHXicpdQfg6>Lob>3|YI z17J`->JP{Olmf)7s6XJJPRiy63tt;M?XLG=!Iu1;CJv-t{5ZT-!nliP;Uzv!Oa|jO z0@Or0(HDIvg(=XZ(LwOtc&kAsnT^9y_Hu*0lQ>lpq|-_A-e)eS@j@w+7%Y>Z6NqTv zV5i6chv}FEGu$yIdAQjn`iO3{#v;H6@8UtyX>(vAa)82x>cAxhM_!%vdEG@C%QR_V zjpv|X33wrm18be6@r#w%A8@Kq)A+n;oSrlTaXiH|BQ!Q?e4XxmkQwrEdD0BPp%v3y zrm^VaBkU-f?+MyJ9DvWt&u*ON73@WrrbJ_RK6pAo*GJ`pY1!0gx_)bZp~hZmP&jFW zv(j9DT1^r2H2yDXTEn}gwq6>WH2g?#BU{7REG@4Hc&1E~sPTVE^C_HEra7VUe@Qc@ zuR4FYMq`u4w}f*jweb&Z?Pa%JnC8j(UgQ7LK9ex(GtG?}d*!3DI7dFmu;?<4t-Hcr zY0g`=58$6MO@+oL%~}20>FIBu!Tn;IZ#4cdX)e~khG8(0D{l?Ma`h1mOB+_IR1)1U z#|bDsp9vaMl6TzI1#jXtBVQ!t8*gy6n78~1BVR}20o#pOT)0Zk(%6Q6gbO~9ushvj zS-@t-MaT2AV^o^u8gp&M#Wa}JXbpKW4MARKZ-CoO3D4H_yeX=f;sIOJt$4tHu zu5Emy+MoU$<*}hE4J2-ys@v{%jbSTZjoVP|^xSRuUZX<*l|bb|8W#Ti3!TR?5{`2m z3&30{_&G}{k0xASsS{+7=-c#&z(F~-`8isoRHkTpSnzKtTwgMevyKgVpwoI6ig`K# zFjc-x(ewa;%(F^0UZtp(DViQ7xKs+4st14j1eZ>cc@(9(xEP~?*i9;b3Red zY1!mneAbSTwWRii+(en;pu|j4!6Yx(&e|XL@L}(>+iRG{tCl_4i0V3PulOI*EP74t z8x58%V2Z?FAQxx|c3_mv@n6>U0N{pXH7qiiHFfxNSIJ7w=?r6M|ggRAu5WGxI zjWhTH8-9Kn_(zRJ4`hzVZ8#GuHx6vS z#aSCrmaFN6FdlJ8QO^{2oV6S6;Q(9K+wShHO$NnTyHcca*CZeV>2c|rRDF+J9<#lH zF~?<~Ok-AFj|sbW=h+~Q#@=2$#z!~MU}Kdpva4F3(Q3>MkY_D2`5_5@AUCg?Dei-R!A-2e!WTb9X$0M^kJGX%ur*Kpv8#mYF&Bpb0n(P9SwYm!o#ci3_Wguam=3k4V}W z9VVYC{1%`y`U~qL25j(kj!FV!yqd&CyaVdOwlW`34e0V3wzhyda5K2ISJ=w`5yC+i z;#h;@PhqQCMt+Mf!bpYjo5YZ89fkRqkp}bBLo!k@$&z6(7|AD1K{AkWDM0MTUKj8- zZlqw#D9SX3a4Ch`Ov2}9$+$^lyHi-MpUBBZ49jh>eFh96Ip(MG3~r7z)#S{|M^@E< zE(NGNAQ?~qr~x=$$F?4j0w@I30(!lHZ9O0rPz0z0gzv$&9?*DNLP0P}tm>axv`w%c zi)OGu0WxdBX?rzZ{v*d=j$f-xvX;P*WEsq%VWDsz4#@Z!s8IJA?u)iw zh3F=LR6r4+4iLT<-2{*ZCw1keB&^cGHk02zQ%fGEO<04xM#0?Gis z`*8XNhz4W<$^ilUary;_17rg#04?A4vpWHafT^R^gDVCLQG_zDhH2|j=Jtn2DYn6G zcIqEsw~fW23p@Z}uSjb*S@SVC5(_eQF`4DKO{FzI2*abgc7C0@sOmk=ckwrnSTL@<7c z{RXUB|3F93aa|~dtpkkoT`dRDG18qD>{K1rU?OWZ-z+lPB zTn&YtDlp41mBHS&DWgQ_*>k9z8j|lL)44-ETxBq;8OHS9jv*4WIzF+2H-e=H5ZTqC{x{)dJfeOV2srm+0nFu6N0=TSt}*CV0NXIUetJ%2eeFQtllEXW?P!?ET?L zCHbYsV~iwhrX}rJ;aK7*9dfs>!&;5oNvr%p9!;0}2!+-USEkF*SPAVQk3vOK=r2q* zw6DLeuPOs)ce%^z5ncs0YK}<*ki7@W?ulz>Pi;e3i@g0G<`9qU42Z z@zOK<`QsYPDkJOwh^z4WWAbd=(0&Y}FWZJ5hW7)=1aJ~51NMCv9uOcJkOe3Q1eC%9 z0>lBb0TqCj@4*8CBm!~(m4M*)asLmn43G<`0)%{k`+tCY0C|98fDUDtxBxCdKA;-V z=?2e@%YngpcpAEZoS+s+!akv}Rl~^E!MI;Raf7i$W^E!PAKo(=24`WA zOGXkN0wBZSJUI9nOz{MI5G!CXic}~WPvW5yG7JWzRx-Nao-!E*gORu+`rs$y=27^&78U5kCv6T!4qh2ynaB#przjcJdVB|hURSEhB1>5S!PQa*=j9YPT zAa4Nb8;xM3KaCXa-qFV?24k?|)*|leO76ob0Y|dI99-l!NbXY(#WmP)^R`g)()NmL zu;ebo*$25nPhwT0W`(VcEJpU2sBRxBY?aE$0Wc3IGlebJGn8znd!xcuzKo2#Otrhh zRy8Br_0WkOw#h=x_wn2*3r%2UG*Pe2QrVkPIjQ)Bqe6@O=O&fI>hmpx0;ceE_L| zB0wD={B!s|fHXicpdQfg3-~^ObU+E90Whc%8%aP0pcEj!#6}XZ5ReHd1Na`rGzy3Y zWC6+n0bgMn1;hce0TqCjRep9SAQ6xQs00Lmjrs$Y0dfIVfRJxcf51I}JiswPhi_4T zfD4chs0MU7hWZ1N0R?~>fa5#VACLkl1k?h0eUJJBQUOJPIzadjs6XH?`12e-2Iqk; zDe#YIGkRE1!?IQpozgL~_dkirGA##<7SJLzTy_7GLkJ4F8VAoGR1D-Rd>pilYX`K4 zkHq2*Uq=``1++->cP5Jv-$CO89UYQyw)qBcKsU&zeAu1%aVpl)U~j`W>SfhZd?+4B z-~wc@RKMC^*wC!)oPn}aqKji{+)3@pI31(7!4!9{_P0HVG8%Cg#mguj-UTZ@#SQj| zhkA(*h9{W(66H^0^GbPxr@%=plIlana4XOe(9$Jbg7O8F53_YrE{nmzNPA9O)=2fg zcT~19IFIUGS5B~Z4Q)9p4z}d+wP+3XS$S07TN}qVp6))_l8G#E?8i)8S%Z73}y9TtPto24qm>P;*!IZW{x z0u{PKiq}#+d@W`TdSnLIia~vg(hk4{3~4!P*<^GVD6zA4j*dDhegI1@JB-0foH4p89l0H!?NpUcS?up4{k!~D=8n=c=4rDUU*IwpNf#focgIg?Nx}kU$|Vk1_mn; z>}6gdTQ_Sl>F^}|(LaNyP%(zJ3x_CbSa-1b2C$v#rTi!I!FPOpU1KdTwEXN&$wof+ zHR@jk{hS629t0YTlVu+~?1BboE7TN5Dq=fTN%;>GRKGEp@>p$bE-@^yZan2X*+ePj z(|1#UYi&t`^MV>#a-qkPB~m^iSKZS5LgTy(&L`g><$uOdrJHMTTH~RLqXKDpjHSE+ zlqcx4ezj3y>tjZ0gCCOuC$)gW)&sA51TshmRcir-t(}bIU8_A_lBJ~Ef2$=8Mll6f zPU~kcQP&#~4KKS?{hq~F8s~|jckh$KaTJE*wT@27NV;Fn-cr8sbtV6<#!^0y!|FJx=;CKpn4dJi={^};PVin&F%k*!4wNQYgFdyg!G!RKAP zYoz$y@F3U|p3>NRK@(0e0$j?I$l43-;eU2ehqMOAi7CECuKPtA*2|&J2M2ZV?FWCJ zlW@6|UxVcsN0-82%I`a;e9D{9A2de~T!pQt8Od()A_?D~@>rvgbodJGz!{u}T)d`A z_=m8%YvN~aV5or9Bq|p6t+$?l8tnRgzk)K`>FU+{cxqhG=uVjJyww;qF9Q*4Tpxq z#o%*@Um?ZorTCooYIquaUh&+wS(p_#2cdR@&mn%F)LtdU2WahEG(N9*dNH&I4TICn zrQVX*+Rn&xjr|fb!p423-cdE!qa2h|hU~H|4N}}MTy5PA_K17zD0^4lJJ23}x$f%* zv;EKA*N;j0fm+_+^UtFh2dMvh%!{n6!NCp$T>ehGU$C4Pg?nBsmGa%Oobim;;PX2M zu7lJcrJwyZco5Q_d%4V~eAxAvHRzTZtokp#_+aV92l NB^ys>hDDpaZ)lEUYXw5XbDv(t8sw^nt|M2y8YzD(ti$3PJO#AK^A|2Ll+KWsbCY$L#rbra zdL+PLT^uCh%!iyYt4Xh~oZ_v&)3@{uhM)aA8G0f}c^7n06jnyIP@=?EHX~a|#b!+` zWK`JwI3wpYx?N+HH%BdcmJ>*{bjfj1E5#3BJ)|irHP%H{9npzo=!mkY*`?HeGw$zE ze2&J-?eWJBrM2%UB@XUXoLx%srPwb~e6_}D>?f}Kooz-3Lwq8&EAiC#sC^H<6inyZ zV2~IUvh0nQfFg>A_t)DWgJrM!(BF3Wl29=eyRiXC`hiV-D09=54KsBJtaUA{tk^u#P8i3;j4u1eCfI>hmpx2K$`~jo_iU4(h z%f_hviNWwC;7VkaA-|BVxUWO=8Vnw8o0Ur5lllzZV2Gg`x;|vOX84{O(;1Axfwd}M z@&fRYGx7|k?}AxE@=oC82=WX@TRY(BeMFs)_Q4m`v2MTrv;h*qkX1 zc7jGFQ$CRe(kTaO>;esxWCBiO3F8TxNL2XkE@*d3wQ0I5bt3P~&%g;ffg>oc8wy)xj7%!uWrJgf8+Rj> zr#lUYW)ylA-z6q~4e{s#B2VL++N$vB&(%`^hdL@;h_h;%tPbgjUxhqzuXmJRA$)K^+@x8%u@Wl4Gfg|IO zzW!9`uE&vE71R0c`a#%xLCIGL)Y@3O@T2R?R(0A?KINgAq3d?k3D>t6FuGgNiPi)w*E%&bY zFMfUKr?&n6aFySj)3@~GzJ<4bx~F8`=kKkYT2ubvheuD&`aL~$$|tQ=6Yk!nn(oHS zRrq~8P0gnG=T(I}>)mso7nNUtX6A>*Tv)hV5ObkLT$mADXcdfzjIUAgAa(@;_4vWa zjNIpQ6rrEr+AG$ev5`WI-7O^kAy|bszM@L71mDYI>iuP^WGx`a@Pp_~Qfq4!{`+dx zxo(65#XiwWceZ0ARmpSUN3sNW>ZNMr`zpLLNOiW4dns&cT7o+{lwYw(;b&1UhTGSu z4Z~Ia72c%7_u*svT)yAKJJZC6u2B5B_|15R|3dkQCvH_WN8Fs4`3s{KT{ky&`n;Jp z-8gw!;*DwfJ0Ur5AqBZz-sv5f9nB9OE zhc#(xCZA?Ea;&0t#klbkuDojE)swEdcJg)CPnmkdjnk&jxasCwZjG2TcV6WD1q-9@ zTok=HCN}P__=LoyB}yg*DyN?SW5l-zkZBm+Q=mmjT(2G#_92QC_@3Drl2A}V z1-4?0c1gUAS^G!CyzF23tDl{EA|Ypnet+!2+1|@qsUdBKb};V6vJb4o@30V?p>KwI zyT{a!FB+(DMn&VHAB07V<)4YiKZuKQf7FQ^28tYD!3&(0Ix)Q^RpgiX6gb zsCRfkg_jRdxK{Jc(7zVnU#I+$tJGA`K2wFw6f$6oVg9w^4E@cdGD>;B!2gDWwAX8GhtV72j%* z!e(j_C_nkXK|k?>8b6JE84kcyNc;t+KH^y@CufHB;1dU-9k~&yaH1^dv zQR6X4NB$Wdepcfs2 z23!ZY0e}YuCE!6MF%vKcP=-+tnk2d$Ga1rk7zzpgpc^YlUB7*9OnDqI4fg% zt|bv@+8Gg9zazpjUL6~mOydoju<;U2craupMX5=Zad4pF1BzXed`b1F|Sl=vJ zQc3zTHf~3&b`wd4nP$ zP$J1t3Zy>D*iZ_TX%YhIk1;lc)u_-UN}ZXmVj)l^$+i^8ew?vw@$oRLT9Xh+-H2FO z0d%Mw24n@gvZ%ET`gCPc^EZ*~!q|FEBDOGtv7M;hwONtSUaUz7RFjn2d!S-ZARx7O zqn=U*eY#P5ElDZX86|qsh;^q}IfFjkDVC8*(!tnDO(HhyDaQ6->@i99B$@LxW5cm_ zST%}tfXv&%*o!Gpr%8;&f-Dsafd-O+7o%x8r0}CBna?1C{l17q;Kf*9IlgeT@hv0# znqv{dn9qL}fe>#;mcvqpty=P^!u6ac(ny4I`Qt_Rb}X(xz7%%o&6iSNpq(XwS*uDC z95j-^Xme##lM~%t5>sFWNw`o&NwB~q`nR#QwT0M1ZS8FBZ5?bK@!UygTNhhbTQ^&G z+l4kqVDq3B_-LZKkf@%C_!hF9r|AZ;wiGZ6vO3GLs<%PVS5>gyi%!cyd`n{p#V=r- zmaX-oQ(r)Qu#rU`+F<#NzsNh@K_)JG0yNh^8KSRZrEG_=tsK3BgjJ1wLodyp0BqJG zrx$H`(N}oUmrB~IrlKI$nTeGREZ2*SR11tdGDAjkq4AQ;laX9Aydp~&`Ha15qgXtV z+SiBnm6tIx0}Y}6ayQcr^cK9($p)E^V>;s2IL9YG@w?j9nBB@uY~ZP*{HMmdl2vn5 zxDZDy+yd7cbjpGQ1 z*c=!bZ~g5j)j`2*luLT+NXJ378kp;wKso67wZX20_>=MUrmcn@u+(f8l)|(vO-UD$ z?qNqjCqQQajY+75v1;1+OP8C5VVVWs2j01M=Yl=)zr>8X{MYNBzxRe#Q&SiEJ^A8i zYr1!QuWBap*@=AC@_lHL+TPz@*}>o57xV=Auqz&=)vxak(D(k0wM+0a0W*ojD=Wld zCsZpO*Wa;z6Hgt=LNsZ$O}e#A>zP(Ht@f^NHPd>gRfGRWYnoOwt!Y}MboP5|5Ix!AKy$NY&141dkcFIdmVcMdk1?udl?&)zxpx`z=rG& z=n3Gypa?~n`UI2EUaDW5AG0t24>_`^@*qM4` z9(@J9K^{3VQz{{Yg<%1hDTR9jsI-p+tCWdCS0t=N&@)N0WLTwL`C!ts%#s9_$RbH# zy{VD}j*MNeQigtpB#^2=5)dvktZGSc^>~fKNS-PQ#AHYUda@+}FCkA7P*^Al#L8J&yP4HRSc@b9`;?Hlhz-aA!v-RzeTdn3 z#IzO4l~MrVyDS7`zQDZh^E%O4^0lzjsJwQ5BkUI|QapipwiqwAE-&^3UOf~NmjKrt z6ZY$Lm_4aSrLbdJyzg~n+-$6E!w)i;&Y<6IUkQ6pI0Ot#9fAN`j~A9?8w>Fvl(CEP zq7m52pQBLy8G)-Kl(YNrx&pLGegZ+6JCA#g%?^MbtkVWT z-WZmR!YjS2^@;1EL7hLH8i@%B2P}E zygor~hbmHR@#Jz67*!E7qU|480buWV+r{sGYI;&Cq!X z+VU;bqDM3`GbVTd)ngy!WhylkR;6G#8AotJ(Rdj{Ln+n$oPswp2p1#BzKP(=s_ zNg8pyFw*j6Bo%aV_{)1{h%9B~AMR}9F-WgB+?Ov z`3t$si<}!B5g(Bl6&s_X+@jz+=Euh_cElznM#ei5=ElZFLOv-jI%;l2Vx)uo#gU8W zfG4vs3f%&UkqL?CVKG<9)`?gxIdFje7{J9@aX)Ird9s*9M;F%XHk#!0Oz@Dqy~MZncBXM~@Uuxe$x9&ihqilr4ptTnUY^2CcY%92ZlNna!b zEIKBDG6pU(d9%sP#ZJq5vBh{{gW-Ay!jqdJz-4e8V{u6%K#HS*3yQfv#^uucv?PC* z=OJqfJqgMnR7c1icQ+@^}%g>9|RVV0Hfn=_4!c@oU z_!KN_y}gOrXM3u%a#Q1k?ev$dl!Fw0sfqBwCc+`|(MYR+!hST(c=T=tEZW4}OQcP_ zPn67^HpY9OJRvB|eMd}Yxh2%fa&eEy0L~A1VK;_mIfRJT#s;SJD0vZrV3cv?d zo^`@|p5TPzE*J=9AeGqkRWpdppmNJAF%GVrcUd9090iiV*eQ_&_L~)wzz(!l5~y@q zDTR?LPZF?4nIw?>Ye`_axbHp4*j+GU8Y$XpD~T4hV|A{~_8_C0yI^a1LDNr+@i{XV zx>mo>goVO-mW18)7BZAk&~<=Bi`uumbp606=+be}_3#IX@j3GrbY+qB`&2GcR*?+k zU=6q!)nlMF$0l{LnHFIox)i%$GM3X;cf$6ZKzTim)rD=s>q*3$Nq!*c%T=`a4UUyr zf$l&Xo7JM)Ypov$+Qi?3HQ21t0np(B5AIe&847UH*3uq;m$^J^qj&3j)7v29L~33e zuoRin{EB*xP|;!&N@mu&cR-t3%>ivz=>eckt=MA-rpL&hjBsl*Dc5t&)Jxn=gqlj6+HE#LPhAWgFKc zwVFlFb1aOE$6}R;wJ6dtKPocX#Nc%yA74K_`4QN>Ma!U87ufxMnl}@^LADkx{Q_HE z5a8q6yk(HRSu20PK+)oY09*5BEdyKm1YO`OTG;&pZ2m2S0$W_ryj4ImJ5~Ujzi(ix z7JdOOFRBTryp4+9n)(Ur3nGI=D7l<@XmVbp z!7L!tkG%weGMS_wh9-&8X1sF^VR8Z`4Q*O7ZM=`Y_!=KOEl#zxK77A|BUH@@Y#B;r zfGH5DVt@m_M#j>MYCr>hir5xZE*0Cc9Of~~h6SyVL`S7d@wvO_MR{ndra%w_EHxEi zl)5SqiO2p`txe6*7c$9ca-dYnIP1UkI2MDWVF&;t+~#BRwMjn-g+Z=4pFvSa=E_Uu zI=*=jHiz#^SJJAZz&VbnGCoJVtrw~-2~JCf_=ZFaQ-iT=ta?Trl6TsemqX{ZI4U6_ zDrSLWep1X_>3Gq9f>#n&HyErZ}igt-y2LChW zzso1ZM9jgkhv8zEM8?O%q>hN_s0A^LBV!UBvGX10${#J$cxpiov<`2aqdDR>uC?~%4!Rl3Thi6;X-)YFz@_`r07IPR1Exd zl>2Vgz)K>clOlz6_$&0Ns%BiXA&vNsL^8E~i>X*KJO8?A%4B4pA}4I%=LHGGT5+T%=>}LhKgiqCqalc401e z5RuE`A`!tEb{V?rf~drPj`^`kG4tF4XvpP`q?o9=v1kgdCn55#q{x`LvU!`Pcfht` zD_4d!(bGhOyH0as=Fq2eRwV;z>Q%+*) zBXMC=f&>56MAXkAZO8p;RAPec=CT-V2cPVNXB#hG->hh8r#piq ztpA`eS!QE7VZHeS2D+U)lJ`jYIPJLW^Q9!y{6LU?121XLkkMpu=g`T;fQ$=Br85 zbvUt`v-kH;VBi(4{VO^H6&(Tv<%OCc>SeC5A1j|@MJ8* zu78#l^sGqi*S`1A1cn`BHqF1d)uX>>4qUy2;hdn+&et|98J4`_>*WkT7~Z?Ob>};u z8FI4UN`##+?ix7f$TP)P-y6Qng>dkUEqngF=J2pCTZSB2gYeV3sQ7h8Fnc2d>`E{u`#jbOTtQJp<_+rSz52QxK_J4Q1Q;fe#_#XPIp#1mD$cAnk zoFeoVF=0>p9}W%ky?4+2bf@#ySZBNKU(Oyi?wvdCEZXE;m@%Px^15|}ev?M7xT3?8 z&inqEG3V=kJ1%~pOY=@Q-tjcrKksL>KYjfoNx;`Hlf*o@oFwe{fSXk-sbDzyu3E(T zWZjPhX_9nafn9r+BwZUbg!Qr{zy1}T=srmveqtnk2#BQbu@=+dc784C>LnA~;~Qd< z4t~D88hg!Cknzikzpevz6&qy0d3W$Hw+ibS2K-$IvRAD~VRlK@zSz_EUx=TBk}TPK zQKXM#Xx_G0UaP`hL?J~|>e!hX@1JKn$Ep~LQ;OZNen^Tf%-E6XzbZPA{(tM@e< zih4TUymXb~i`75puAONu?*9|LbzIG7?z`-<_2Umr)_Ye^--SjzJrWe$wtc6rJ$i** zJYev!%SOVN!L$bl0qb)DPZ*VtxfuRU6nAEk(Fu__l8lX6fGHd6rE;D;@;FGH6{Z9@ zJ<@@Rh>nh3Djl;(lR&PqF%GzTvGG_E>8HlWMmy$1kg4WjMvqti30JJ7nE1#DthaO# z>5zCIX-XB2gh=DJ>QyB+5#>hi2}ucYk?=Bz z3G#$nHBggCX5ldV5wi|My*3#s=_)$V)N|>6nUe!|r*(J9^YvM6TCb)vI`l4}vA<+x z{fWV?ACo8|U2%yXZbEuqaxeEUKA?2>93IidsJ{blU@T7hrMtlr%iRq3XE6>hpV4eO zoIPjtbR;at25%W!MY)^W>vP{Rz|cW|LhnF-Hx8edDm_-D`^6rEeN((6YB7d6_g!ie z6B`q~+-yM-9APoB*jYp*ggMab*mLGb;qiLMB}1@7i-*?*?-j$@w3RzfZ=cQ`PiI?o z{^+PV@e%RM)eVS$vEI4lJU+DQJ<^YM@75Q>j4`o}IkEHLWV>TA!V+VdHM%Ww#Z*sPI1r7(VAsv{uZ5{?ft4#Ihg; zB~ZfN&#jEcOO9Me2y^gDC|t!zM}X!6MEZDuqRE?$c=l`3xB93T=}6bMP&Cst^H=Yz zlI|O2IN*M_;7>X`?>-NV*R1{4@vl11tHMh&}9+A%WRWw)V z{jKbFK9N9r9m?NU)19E7G|OEL{UMs(+uAN~4>JBGyuiSNQr6)V%FhE+(jB3nM|Y&V zK%U2Wq%VX#Y2Nx+hJ105CF!xSXD3Zhg8fOe&J$67-eMx12K)1Xmh@gfMf2P;{Q|pu zZb_bD;^j`>oF)Amg>LZ`$-3K4Ucf3G6Ud)00qs((Gpgpx>$KzoFmK^xd%k6`GEQJxO;3>_GX)X?ig9 zkJt1t$dhKj{1WA#py|QL_exFQ3j330AGM?ZU!`e?+b3#z2kcMU0Z4%Tuh#S~*ng6y z5262Gqv>|AKWX;qS+M`LnqCh3Pu4UZZId+n`tOjr4w&>-*#CM>&w%|&v)?a7fu?Bs z4cKR@rk_Fi>D^OiCG1a{W1}VX+^FdQlz*D0yP^D~IcCP7{L?l4H0(b^({H2vq&a>f zVgH*neFpZtS<~;p{N9Ik`WSjgyqtL0%%g!Jd(OD@AgU24~Xikc8F_KG-IT8~hJ~A$n2PEng5o;ya{KVLJuLC%Fz6j?AsUy!8GZ`o; zc|Iu@Jsun}Q%@1RC9v~=lNuWXH>J^Lz#Rv!I^k@TyC==L`Av^URpN;Ga3)|td32f> z9}yKz^C_NJg}Le~DRcTaKbAWmEvc%8g7eH()hsE-U5vQM_^8-<4waRsnA*dLkAzPV zqaN*OxL@jpA1s4N10Rv#M zJpYf2z*fx5OuVtv=v?`1F^^Ns;UtH*^l&2_i(+Dz#x&khO5YJ3eR1qOb%t$*CW@+i zj8|Xt+I=dI)xEz&=fMDpbS#QozBCp`tSF{?JF1^ZGPO`0$Z3_9 z(mBQ?GZ#)W#v-1daOl!vcND!eVmT3qTEx70Jh#wu8#j*beYd%d5~gmmUP3>4wjZQK>p8hY{=F5Ie0#=qYl^Agj?0ic-P}fvNh6+ry7A(2hso+|ZD^R71zxMlij_EulKa#Ixn6i}-OU}I6)Pbx@71`^ zH_*M!G=;Uub5JwSefLvs)y^m6-a5)%qn6S`O7(z$k>ED&?49KWSfkxt4L2d8F$FGx zB*zS94xV|*Lzu>+N^Szp8Aa1+p5a8y^ZIU*xvP<&wzt>^d)Wis&BHeifQJvmG@Wpb zxZbpTI;(q>dyV{vXzrVrpLYgeCE*sR+inS}rs`IW`>98FSLR0jd~S^U$OCor+#c29 zk;~>rM$W_h8?h{EF$T#TOzcae=3zR;5;c6-uuI^Z{~sKJB;n93R;C+nx-y>9MAvff z=F#KLaSLxCu?^J-F(FkAc{nq7 zdl<@j@N($j04mP`lU-7xToT>gv++!fJ!A1q|th-l9_j)b+ zC>}S(^o@f>ePdiM_Gin=d-L82&KA)HGK#cf$Ql3OyaiKhvng!T`Zif}&1|60ed1uH zeype_Fk84+VY5toO@*qvVmzr7ZvR<74q=x5CJVb(PsdiSlWxUw2}^LO?gii0sFhPv z6D3hGETYFbo|6=X1A4fK`uP;MU*+y4E9R|)ue2bEM|&7y?n!bUW=Yi@&u6nRCywP} z<}RFwp%tbo84(A!A16~dsCFL$Ch$(Fd|(6Rh;Wxg-b=@gew;AJ#-IZ$UAU2<-5S$p z%-j=U?!oJx?!6p_Iiq66>4!{ksC~9$F^(Q5oe#BZk%yPsH5O&%GME`i5wQBvpfTf8{`?zm{5$m4ZDyZe~`-@K21 zPZ*C!OQJB*pnhhBq&MR!`neq|oJI4b6O9R%1rD}6VU>3waG!xL9F@qU-$r*CI7N9@ zc#J=FUMe3$;S(py2XMzjq1%vh)^ZopQ$7|z85UjOoL-yRTSp{d%NiROfoEyt3hK7E zoP(6_$Z@Uj$Z)Hvwm33teLKRO`|+^gllJy+r(?!=h4)e}re%@OgxCjgJ*A{`E|A>i)Ev=$!3*tr>ZogJtnZcP_5 z5X6ex0LYAy6^!H^@*haNV0R3i5?M6s(AFFFw0(DHv2%Y1|B|on5})MU^K0w{pWoaN z);8v<8N*igazxq>{;=>^YIfJ--`f{{`re3$r|1(7yH|^i{cXq3^_N`W4eOJP_jz@MmDwg<#jBK-ZbmGv}ZO7&9 z=yUh1u)1MgrcIbOZNhYX+#uLDIKOa$O&gWeQUAyX$>mJ%Qvt8W{TOOM6e4D?@?^`nNXk^C@Q%*hk*`&5V z<%eHeyY>Ek7j^fUwLk9Nr>}1J{S_&h<9~F7 z<T zxB^Ej+<&mNmf+(MydvN0D)Aro$0h!<)p40}>ZJ8Nu@{dg?B~g+;G;WCx&G?EJ2eAV z4?Ex+UjquCeHr1;j_3AY<|D-D4YQBk+!`PLIr2?XAmW(=tS=|3zh|llaKFz z#P8O=!aTQdQMPFQ(BYnV;P%~ow9HuNh@$zm9l>1LS?#<3HVXBa~Xl?_aCYz3LrV;N9R_Q+0uO^Sv=@#M}BuwuM zd2Xo^SEx6)wtuYhEzGgWTQVO5hl0QT9`zxkuU}9f0BSixmFvX<^~&`}xIaLB*$Dds z7a?EXw)rAMrQfZ~k$<5|7lv=Ck^ja;D%~l_u^xl*?KZ|+_;wrfJsqY#By{1W>Zdp6 z!5(c8|H@WXo_A0$-m>YBpGo3vrAoYd&H5aaJxTD z@t=o1+Je6w^+ofG<)JE{UY3yd4t~KVZz1&uW{ql5 zGce0@A?k-Mw+O?x_m~dTpDfpkE-L-hoyr~qu2THRnyGZpp#HSWNa&@WGjG|X9bQJi zqrL6uAH9Gt#;f_%n~M*0v7D(5s@;!YrpnW)lZsz5T-7tYgUbJ~zp`5;-p^vc8Maw{ zsA_;N_tQO9xw2pf@{gh4%5jEp0K&gFSNt`w2m9}ZnX3KzZsQj)gcyzwkJ0{f+A7=* zlF=5{gd>#S~N+OYx3X9e%ZrSJ55ACVY;pfD*r%yNRI8- zNB7$Wuq*rB9l-2&VX!Op4!A(I)AQYwU4OYjm1j5V&wiAIenNZ=#vjYG75#Ah-VNy~zqXZHmpWdhdOQ2t}e7|A5a~y5Pc%+`owEaLzze{-&jcMMB{QoQTMcugl;FyEE(}*@< z_Giw`eE>Z)3(`e|uG$`YJQO%*-E}^MbmU@!fGiz>LOaV&26o zJmtuE4L`288$W`FvL+WOsC^yZF$b_N$M7w50)M&P``6pek0U#UgI=Qi8>}&79Qel{ zn)+Y=lq98<9_sLpGrA1dgq4)}>%wC_<{xu3+ML2A^`4LK(j}haPL1E$YLuR3VJ=3y z;(N<5FUP;~&rYw>Q1cWC>aVlEzy1GB7_apa8=Cq}*gNij=24ZGKN=CH-G!Jef;gjQ-F(sNfSy4o&D`)0OATjm8Kc42H2s4z|oKG zsN9j!gFjz9XMfJroIxnVS((7up1|3dz}1VuRgu6|mcTUh#0d^S4*=shGyCwgh(xwOepY#fo#v3zz z*+2y)z=TpAW<0~D{n&cM;WNwuxVd-6zKH86Yx` zCyV^val%?POB~rSLfp7!gvhKLAs(8QEW-cTE9$$C6i3f&5*K9niPi&u7R_&cNG$HY zSbVT*xCneVOk8>WZt?W|v7-NpS>lbbB=Oyk2gT?Wr^N3gnu$kmSSS8kbV780d6o#c zeZ3guGefLsxKh-xMYlyii!Jqch+*9?7x$h%EKWVpRos)- zL)_Wx9`Q-cpJGt|r6P3XIPq%80r7Unr^Jrp{i5Gw3w`x=asSFI#kY6$7f&8|LR>Nj zzqI_(JtBYGCUNQXPsFEVeiP3HcMy}8G>Cam^%ZfSz9{D2Q!nn$yGgVU{XrC6JwuE< z@|oC{e3|HV;k)9ngG8-U`t( za<>>#{h;{%t=Zz|;`c=K@IZ0<*mBYC=%?ald`{-*laGpzdWMVe`>Vw_H%}G^4iCWx zliG=@mG6r7MN`BtUAKu2RWrqawnN35Hgm+9i8k@;!!yN>yn98rMN#jKDf{(@a zFDt~(SyzZVBiG_OB5Uqf*4Wgk|ET)LTXDr+Y>Jt#d`) zTkneXtA7@~_bm|94^I$>KN~K>b3PLLBi4&Hhn5Q8-XWsRl~0P|TZ%==JLARVT^lf1 ze-Y*5c8ES#e<%L_1T)^74We6msfc_3C((AyucGFZOT6;K+u~q-xOmJjUF^GSj2IC3 zifA$67m?B?Q@rR{AqKp`i*LS97Mlle5-BGaiCMFTi#>n05)+@u7YnkZ@zb(bi1oJ*5nDnFMB#UVV*TKY z#P5kC#2~*eBB-TJTobTR^!w>%an<%Tk@$mM>^;&;{JwIlh?y8KwzT_MJe&`!jW-_`1P7qV%OY-;>$6!g!6VEvG&1x#q=Bch)d@ciNzJcV#vfHV&=6; z;<~%a#e``wqDA07(f8LV5inu{6kWsK5F4ZV zhy$g$Vn@g{k@n4&ElFK1_mT=_*OiJnDrL*k+;nL#l!~HM+&2A3bLRj1^?LO>y+n&cRh=)_sgpzJe%SQ+OW=!ALtx$i-^F)%FS|rar_?;X2!EbQR(M za1o66{DJ%xXEB_62qW9S!^q({{LK?!b4MDZIVvbG^gwq^5guP(0GHy6xIE(}K4w_q zYD_F@95+LKV=Y3Cf5qnCmZHCE25wxqiaHf@;neH`M)y_1dyx;?B(q_#$qXx3Rh}7qj(^99=PKycl0rqpM6iFQVdeN!uzEQXE{-iwdXNT7?JTrk z(ZJ$O=_oC@2wAQ?JVIYWyC(!;zl}hY+IBp?vZt zzlHOi+a1O@o=||o>$?!)T!|l^$#~4`!nJl&)U+moyM_-ztv{llOon0AU8L&R3O_-e zi0jMF;O@#1FfhJ@1-$*Jl)j9RD{=^1eF3V9hN#XqMsMQ)qN;u(>cIdSLbgFFqaXIX zARGx$!NAc$=-W#RzsbN7hT|fzh#Zu(1IV^nseIEQ))FWZ31eUnXz@&e&VcAgx zrGRztG;6>SlMGDEEW`y1E68N0;nQv5HR^{ZPHky`-rz3bhd+73>$))(zf1<_Vgmk3 zc@0y~QMl9N2!+b$*!Mtyq^bg3RU;@)IS84pYhdE0CY-gn;ec{G-bqJ5S=JlB&pd&n z4{A|4U9L6j<0gmk@q^(>B zW$tb8tmE*4BZ1(W2t2F3i#)GxEZU?1328qZzjY0n`2x7QIb%|v9OAYYB2~%``3}<| zRUL-p*DiU;OMJ6nidtICA$I6Ojor4sJhGH@E_1h<{* zP)fQH5c3$ETczQZy>K|e7?Fj^IeUDoMk8wt( z0DC=m!0&n{UIzRL`|V+Ptgsb7N-|*ZpE_hcyN7JJ;(g{A?6@}tjr+W?qx=}gs7%Gd zaig){Cl(S0syM$V1Oo#Puwu?vR0!|gtrbf6?Ora5&i2DRT@M`rg@}D*h4Sx%4wLoA_@twKHv=}7k+E6z~1l=D6D&d0onO@*VTuUcK&!DF%>OkhtYp0 z17E-V4U_55U~01-)|#_1zTznk_K!l}{H-vZP>yAH4#OqkIF@U@fT3G8@*lr}torZJ z2+F~hIB)2>dx5+33l1pAqbo}R{m;8Gjc+Xc{eouC7pPh51>ciNh_U>NXKFcEzbXmu zR`TI`=rPLe$D$_K8O9T8aC6)RXijy)>dv>Q*b|R)%k!}~q8BTq`q8-i2BNgAV0~Q+ zzGtf8)_euwy4EOeN`mW{G$dHwM#ft?Y>{-vrp$I6h`fx*g=f+8w-q#F{z6#ze8iV% z!r^>1s^_}l-40v$es;vqKkKlzY#qXU({S(F9=Lr^fl;LJAV(j{( zFkBc0-c$|D%2Pz7ZZtxfD5MuPps^|pMYa#2+w}=Qe1*qAOdd9+8i74?8)UC6(p;;d zSF;gY-qj#6RvqaEaoGGyA0ylDVD@bZ*Wx;OZ}W<6A`h6!KuJPxH?!1gwO*qqpd@SF_zS*(GRo`>+OuiH?H zUFh#}gT(4Zs1N1|N3w9})9m5?`onnk4trt2i+B2$5td%AFGB5p`Ox@w8)50d(b;4D z%Ua7o|y_1EqR=bJxq|5$pYCP}@Odo-=DdfciuZe?^@%FVPdbUW=_`KX?4tFqd; zVXpg_ob4T4>kT^t_E>-8s$`pw`YF}P)taWb@KS9jSI^MupUS*0uEBEW8tJRuTu)Z? z`a}O#qzyY;ZD;S&TOa9m%bA}QvO5*?sYCTiUPxI@bqRI;B-TJ<6O&o@|MmGFY*)v} zd=;C|k2vGnvu}{P+?1pmclqp)ZxU@kaw;iY*V?>uGIhQZD_Qa>BHWbP%$h7cc+okL zD~^Nozj!;HEEcynWjlNwSA1SaT|b<4_P=_2N)WX`k63%wF5beUMhzbmuUY3ti~WHK zrTspXKic?Sf{>A6w`tsg!&(-YNwgJ+>nAf-5rZ4HmQqLS5<8a?tIqe-^}|SW;pxLi zJE>PqAui+Xm+Osaru}KjYDoWmy;NCjd3OKHg$YqQ)cKl3;FvpUp%t~jfXyn_cG8$j zJ$5e3_c*WCwUXLPi@kYzc2dP=>LLpgmug;7yqg-@BxlpHiYLdYd4>e5Lem6E;&>Zh zJ4JN74dt(I$@Z$hxhMI6#_u??hh}A|T9i-=OvpkXnYCN%sh4nwbf`+gjSgykeO7L& zzToGu7E!*pCXn9;Ps@5JQ`bw99@8t!3U#O*x|#WpU*4KAgPNnp-iTjkl;cc|k>vT( zr_X1*Q*YK`H3LHuYy7AMR>aVy_I&t0aeWDrs=cSd@|4)d>CWj}wT&X;0K`**wfouke%AUT24H5+eH2N|&kJ74Gd6;h*u z;ps89ga3%#z@(Ont}jFRM^7cbomw9k_R;tfRW>^Ngn_i&XHkA-rff!3;L5jS zfyUyT6ie#4i`b^Q*(!&fsI9Gu?drx8+gDQ~p4s}S!ysTAwO|yxWNqKKkOS1bS|)Rz z-nZj1)cGUGaH$Jsw=Pl#jUhgPiP`lz)X`(u!GZWF%=B1yXrtKkRvJsy`giL zQ~TJk&jl+@I((_eYLL4Hkx}VkBJ<{~B9={3vHquM+*yOv6}hf-NTtSeCN}zOH7A?e zL4u7^^*51!PR(g&6pK9FWb3HCIvKmG9v&*4)X`FeA0<8CSh`J=Px45%_oaq4xEy+>`hiDXKAJ>( z4F-G0QRjCtH!P}#f6Snc9%OV5*}mEPfST9B%()}csrj0k)6LJ5oZ$UW6Lru;(wZ1% zdA6TgS(U8ar|##gK<{hLvTRdox3ZHi^(zN<%@;cJn3wPVQ_Z9~6`78;-R<=6N{`19X|BOmbHdo{!hNjm(+ zA8y%~!K%`@nj#4wW~aPaliJ}PvyHdQK5;5_{8y&sza-4-X@8lNq^Do2b1y7>yq_27W=*Q&d>v^zBAvVbsYjd_mIA zadVDRJA7oGO8a~#G1Q?ThdLh@+n=LGJm1V_a?+Cwar;50KTE5 z(|CRbqw2ifDCPwVrpIvY)v8I9{Usc5`7lz*8% zd;Wf0MA?2C7f6ybCyrb99HWk$KzuxBtkOM8-LAl1ejfPQBbC~DEc@Z8(!U8escq(x z<(?xB*WagZn8MDQpdV-Yl$tY&Wae%*KUzl3`@onx+*#XHPo4jXv99RXTGB=xEy4bH zX??h$hdOjTIdx(~q@C0^QGdo7vy;+8r$11nMmaOHX5Z;~lc}c}vD!+zQ)-N;|5az3 z){Dl;qW!N<^pwNUExiA!?t8y!jvuu^ft9bH zI882;`inVhzp?vW-C^o{Wny;!^v-*ysmqnf$hyhhf1abhCBr7k{q1w?D)nA1Vp~}$ zIFLg<&5-TU*k!Z-5jAftsTkI9=}0NH@Oap*;ywBII%-ZAzr@5o<7O*${1l=UN#>XK zP@Aa|ox7s>!BGBI7DV>qmo^LK&Y}L~)di89w+s7&G)0E97D;JIx63i1j@KbRYTlVL z47FeqtJnL%XTv;dRP*J}$E3`!mb^Rz(XdgV@A(49fY{E%yoK|(2nj^!;1+?ZJ$fD-iv)|H{3XKcJ{>o^* zYn$CDpiWjJ_vyT7D1S3u_JzxYx}a7XzvN7g@4q%;PA_#eL+&U07HY|KiH^4%9X8J7 zhvq0%YTC=N{(va;eeFs7I~$l5r8Y|IU{vz3j+Y zBepD2#l*ImTd8@9MAz~+AOBG5dNUSGK~_oxwSzGkaWm%KyI5+@XmkWD>dWH`YE*31bI>4n!?r^WLeD(q;{Xl$`!q@(%Vm6Z$JW{oGnv4 zK`ow+INR$d+Ommt=&HKdfJ<0!>yOi2XpPaRH3%A}t{WF+7X(98ZAefpngY3^( zSmt(6-2NjI9DD9|S2T5~0THfV7nLYn5_ha{CFEtoPVlB>m%=a4%>NY}Jt*p(p@s7Q E4*+ohx&QzG literal 2786306 zcmc${3t(MUmH&Sp_m$jxo0BGeQ#j|+N-C5VC@r+8Jt&~#+Zh>V)V7Q;BdIT-^G1je zHE7WYRRR_bS|nl_qgJQCv4a7NMyL?6V3Z0G3kE0!GUK$=zYLonYb`&0`CJNBfb7WLI=*88528`JUia!vNNau^SKva zu=?Wnp8Nib-}SzAtKWao`Tumm`Tu;vlyfiohl^KVe8In+fAQ*nIDehvA0%JSfx$6#DQ%g<0^Dek(^?T2K&-qT~Ak8+s|DyN3bM^WEa=|($b&xF0=fC%# zF1YA@?>&q`2s`;!CBlI^1N&ppq{9;BWo)pNnS z|1`){)5YhXzt&lDkec%EKX=``Xw1N}@gSL|nN07bOY6>G4dl;%=eqY@^sfg8J&nHq zpD(&#-T6*kJBVfQ3K+$ieDsFg<+V`_F%`bMiqdX}sv%_n!B@ z_Z*VK8fi%FFvf#6rOw#F9Sl3orZaW;DIBCz7P1$e|DN~#)8RliFg5?7x>@ic~08#vT0>(ZgBmq>rSI4-*@@% zx@te|rqiD1yXj0r+Hq29!EqaW{}J!AQ>RkB zayt!-KtFDVml=n`N_YJR&!?nIk3F}MW>XCfZYq4cy5Q@&sDGwF0A1sk(|Ls~$m z1VHzB-Qaua1_s~@C?*lGq;n0l?5D^Fq0!+bNSk1UI3rD04j7auRnZ`f`5uom*8&VFV zi!=^^pZ2rhFY9N$S?R1gk%AyRFHZsW#RaqGXJ+7nW+0vs23)sEU{(fGP4ZWy2L_Y~ zUWEPyS;zI=Q`6JPmVs1wo2Oq;NGeCC$O|nwj-SnB3vQaR(4{OHvZ|^|KaEtvGu_X* z%bjc%@H9dl^u(hykXIdQmnp$NYMnY&6C;z!EAh|WQ&fh$y#65yx{bh}w^E_h=&6!*MB(Qj-lcrR8gD6jgz;C+XgK04+ zVN`+nakB>Dl&1(8vyG5C5N!5md43iw!6@jITjCvWs2=JOV-l{Yh%sfnTqaY?Auc>utVOelG35OMvoPHNg*gS_OP|s?)#x&rbIc>d zPcpvK01Psaa2lRN=4>XP&8G8?dz4c&9LGr&kEPEApOHY_uAgdZDjM!!CIq~@p`idB z7;-ZErCa&+2fKadC zA~e>oltMw0g(T+&ti+myVO;C!J0;nYL5VGdzUB zf(3r^O<8zQ3O)t!48EbNOv+JiI4=yW=;YyR;2n9B$wHagViO%fQmc3_jcj51G1=8P zGK?6LoIa41ZlqI%bUKgVe1;Djn5M&{--S$G0Ggj5!9$dqbaX?mnyX=1A_6Hop?%EFI+NOu&Q4HCdD&rA@0 zJk$k9IQcOVA&hkT1hK;_p(>xU{DWOc3qZ5bqyW(d!xWN;uK5v5-;U+Z?2xk#QR{AVFzIHKouwkkXFHh9iLSRgFs zo5KVk_d<`F+k{}|x8Ks>(;KsDrwxm+=e;P)F+Ok1}B0z~j5*Rm2r2rp`ENLaLFOtuMLl|!tj z3(U%fhLT2|DvI@Z5qZoG=@0@%I-hGv=UQ?A1V|u(%fN<_g8v4m zi4mYO`Q$cyi0T=2L3377*L^igz)Vvc35{-6;!ZzgFld5YXLFOMY6HjxHu(`a07L#o@h<2R< z{8fsDQ+Q2-`xog(2VklEG+T@54V)O(1EbMXF4q7>F>RSV=_1dF3>Dx9LTTWSnlz0AT)$Z2(|MAf0b2=AjWt0uCx6RPcPH z2z(U0K|9ha#j6k@X)-b4;LlK>*cDR2_7H6bilH_MVF;(iNMwl#l+YZSxyZ=%UuwE3 zqE3q=XdH}GF?tUXFnB3LykKzTH}Ck9EieXz{X9C-m>0Km{YEB#7QveJq56WvE?gX1 zLWEaGWcO4>W8V&n0?7h zT3`42H@I&+=9npOdh_w6w;Vs^$Z1DTn>NMw=kT*=k>A?T+S>Y;t$u6kU%h0`lnYP`=zqhMDN~L)X-ex!{y8VKw%RoP)w?Ov|7OOCCWk*K3((WKzl9Ks zjj+jFrjYg+=G45G&%^7ON=_zUWEQ^-DU527=TET+KBPM2W**HtvVi)Z&K0taGm%HR zqo$H8=S@8t7BD-FSa`oXb0$*9xiSS1E_36Kj*cLYG|jw+Mearaie|7H`^QD=oS6;B zxlZinXB_`Y_p@%V`}%(OL;i>TF8_(ta}C!wKG$&avQti7aoV=@NW;$?9%(3eeMdEJ z9^T+~&AqwP_qyH7nl3%siO1ZvrG|M<*bw*7&{A(+*l@bD%%4d@Hwg=T9)hre$Dn2ID^LM@59ysxH&-oV?o%ojfZ@K^9qNcE6fgeWQZt~m`Hne$NZplrp zq{41@ncwYFa-%8V&0jZ|6*eq#H@T&3kvAqwACWe%*Nt)tH@cBWk$zL8Pk-CI^=>em z4qc|ATPa$peL%Gjxa|cuA5yB#>vsd!bg`0U(A9v*HfXbvsmqNrN!uf;VAQlV!rx3V zM{Vq(jWbd!V~2uX)ToL_-BL^B`YS`KQVnk``jpJIhmCEX6C4pz&jLTU%yYuVIMq%W zmzR+nW@EQK$`+iwlTVr)Qj5;ljKeT^vdDp`myN2V-v7B+#Jk}J@Gkg$dM>j>CuAr^GFb|z?6beILtjqM`E#)HAMBV9Ma$2;sOZMOed6*U4ukocXpo=ns$1zQv}@^cmUDcCB=&$F(u85q3?HAUf8TZ+Sw7rDO=b#dGQ=4+NnoLUBJa^nkJzi~|Ki)u|v`xvFdcOD!H76-q+xlo%r9U=h(?g&jY0IWO zt$LD_oGEqfcIwI5nr+I+Ch{bGwkiMl2q{T_AP88^qt{9b9_@=@~SZ9R=9&)%O@ zN-;e$v3F*1notmcfL0+Oesmw>Y!2FH7pB3@sJ7!bfHVO`t4aCv&7>rqu_+G^RR?ZM zeeM@Dlpw<9x#v0XT363wd#lJQn416RmJe2HE}E1te7uURDJJE z0Bu{(_EIWoD=>MU_!7fP`qOMu_UTYy0yfeqI$8QV2&GfMUVP|d0LE~}au1vdRrlQVr=c%*}_ax!dN%%|>jwRu*lJM6_ z_?sl$n}okj!e^84cS$&&guhS1KP2J*5K86h$k6`AYp)6xI!@`G%bk6C-&5^kUp2b5 z8Xc}i2Z^eD!k$f~-BhZ|dy}wVjWF1Iljou2`DaP^P!c|zgd<7#bA=nI+_3urJ^flO zFLwmJj%>RcNdWnaq|75pxH}0SO~S{L@RvzAnuL!l#9sVF5a<42R8KbC~QOu|ux-g4*hd67zu6O zu59!Ug~QqCEoA6IZ8bUr#8-{>A)L2+QXBfz&LfLExbb| z$~4vkdrJn~9aKiyRgG>U!}eOh_LDGFo3M?9U9|~=K^aB@U`IgTw3&wx|jiOcPvT=n#;Y3cT}U! z3g?P)`K`@KMcawQT~pB!%QK*Rs+~>oNOOBUS~JMdS>Sa|UFHo@q`Y$JS8?e4S}Kl5 z%A*=?uG73O^Hv&kuyAnK#@P#FY5B1JbU7Ddf>d|y6k`eXU9G*qpBrKC5E)b$%Dyyf z!?cpNH@8RU5pE@XHDNE|X@ny|d$fwMgYe~qyGcKha6_RzI+bvk=f5N@GxgQWon3IH zFdh3BE_c2UO~ySy!-5_!x#*=~CR&IDZVZo#W`?cNv@nQfhf|`tVKEBBd~`HK7=6;EA0z2@3xnV` zgk76Ak}mmY6g}@U{#|PR8r3@V^>TN*l!^xiKO%wuL9lxvdK@rjD6CG-Av#JF{h|>1 ztYu2MFa?%NO=FJFr;uBp=Qn&VGP3UUA4^HtT=qMPa!(cCPHz7UOn69dZud4QGV)B z;~i?1Rc3IKUNkNBhLxQa?@oo8+gU`#yS7{xw5z|~F4ehns@1#}*J)m@^qp2wz=#yk zyilX0c?E{?&-A2up+7zNEc(;@Lug)1_UuI6E2Jz7y(@l4S--AqE=*Q4R&m>}WE&;h z+T$x#(r9gkMfvi)B)qgHzOp7RVyHed!=KMTh$j`_+`|eXAsbP^TVm5j`k z|DxKrDtEccyT6RiZI?`EU@n62Fzy(J5PVyj-^-Re<1}!(EET~rZ%hQ>Ebx0-agM6t z9+Q1^s;K`Ak+QLk5%$P{1p)Iud9%TVrg+osmM{}EK?xbz3os{uubR~fgKfbwP?N%B z%*XGF(r|?|T6oD-SJQD9yrdl~LfY_`b_^`APt_JSw|USjf6PMR40hh=HiX#KLdZJ5 zlQ8aVX%7aljmBv!In!qrUp^bvJgAbeT{R?Y2&g7mJjh1y?QgFS;^WZy3xAhX-jP(k zPjJ~c)mE+n@5@UJ&_6KCV@Cn5;Jz01Eyus+;Dey+jZ)^430a+jWFF6#=8!8Lya!fe zxW>V_L^^nH!XnZ|gNaS6fCg3cu8Q)Z3sm#r9F1X}wc3=M8x5elk_Z;ES_kR{#Pr`qCpD% zK}D!1w%!(Kugx2DX@+ozTk>KjxSdfP?`~>IxaP@TZ;K_z&S-;MI5C0#WsWkQV#>X@5Sl0@dsYRriBMH^wz~a z*ffnn&Sm0xub(6S!F&^4?_<5to-o6d@`T=5pgnD3Y!7TjkZzj9*e_%>S9unmxZ=$l67|I?~*`Qk1jTfQW#rF%Vzj7p(SZa)c6&c+2t0V;9 zXnem{B~@!13Jd18^B{+K0Qup^o}^TY#S5rwOr*zlq(?CoHZo-wqL*YN!w3diipvk1 zQ{4;b)R8F4n+>@ezArx#pYVb&{P)~232zE`0p)8nq%dJ_GBD!M#7FV=ei*9q=4qFD=? zBFf$ZpY_bA;5N}em%BH#0eUY~yVrf-YP*waH|$p}wjopPr&aC0l&c;8VPzVb-uBj4 z+oMMmAFp(R!cAW3lwhra#tGpmO6f6>*qB(Kv51AIEcHgj6;D{|?Gjhq^+LGfu8$zp z=2pr4@GZ&vR1!VIa7BZdd+z};iBbF{&vLdaKM#9pF;Fctw&%80C@xtV^w+}VldiUm zjz$p-qhdHWni9^Af^b^Y8qSQ4s!WTQB4QS!kZXDzhlai+njS8TqHt++WVk$95S|nr z5nkg)SG(b&XkK`HbWB)^=7cXn@jf>f?w$^r3 zB>QSx$yQUzwgXpk_jdq`-3Kt03{0%#G+W7lsbsqs@553P{L}t~|0Pvq8Lz2m@E{cp znu>OK@gEOT(f?CLH8XMOAQcUnigtQ&2g{cO^{89KU85uq*H#4a?((946%h}6(Z7`^ z#O>0WW>;8-AJ5jtz5|1pEjE}sGj&-!gs8;IQeH>t(d;;}FOujCjoKY-hd-KC76#^g zoJxlt>w!I*8wik5&5h9)GB@s%-2aX4imkJ)9&h(XkD4}oR*+_+tW(DJ{FD~+{8 z*SduziB8qoY9Z92FjK$&=prrdXth#zQWlck+F_ccg=GC0WhZ4JxmqRd`U9CKAm2Dt z;XK8~Cgqu*A%JFAfsD!a$*dL^%Ki;yZ>_$>O2x~?_T{f9ytE~gmpAoQ8Zy(;mRw{@ zj!t-KOC~SBdOsbi;SA&TSRb=kA5XBpuY&dNRznVB2J3r;b-VD~&{|n|cDI%no;_8> zZ%h#1a{$Em3h_NJ1o4}G3kr@o#B4dl!t)-LofP7Cs$?DFZ~6hV>j*o$7P2C0sR#z) z`FGm6O*Xpi+4qYD*w_|Zkxs=8Iqa`$popHNa@Mv_$3HKD5Yesqbd>hhxxl)HSD z_Q9>lEqD26nexkLuj1wODw#u%_N9dBR2Z>fv{j4ML@i&=;?bsVQmU=DxI7vE*S|;) z?XGvZ)A!v$3yI~B0Ivumq(I*0Q7CKMhox)=1%gN)DxOee*OKbRlr3Ad(ir#Kt zK-|FJxUaQ6_}4nwTUn)jUW2vhz}6r@-L0`!98i>Uwelobw4?8-8-2~FzoHrEu5dQj zkN!XY)abP;c;V_#GV&fX@{Jn#<^zn}jPh4EF(xbJ%Sir?2A-I9>@a)lh8cHwE1dp% zEPuUzoL~D><1DX$UbM0Hnz3%uSo`-Is~PE!G}|rhnyJ%QH>fa$9YTZfmwH>wU>5j& z{xWYHo+MIX;!*ZuS-!+u@Wt|i?^O*=6@_x_w)zJ8Ya6hY3@4RP8QbH~lt{DRbbX7@ zLgwGyCredV@4t%he6yhH7iY%2)%_RJq0>ztpJy?(|Eg8n|MS79ynC zH-@EJbD^+UVFuq4bSxt+Pl_AGcoSxb_ZvG3xD?hh{_d2>1V)o8c}4ehhs_+`42hC(lfLen(AP5$*4NVtXXe?V)INk4rq4{RJU!XFOc##NTh)MN7F--_oH0rryLO6y zymLI;4{0gZY3pMvmUia{tEQuuszdR^@@%RRioFxYxhWOBR^#kVMXxggj&@Y;{%_UO zX z_eCu>GL^-KRv8&~iubXTOz8~UeB({!yq~JitDFo&xy&fc2oh_F*-b6?0WD1?oT_=` z$EFW|UEW-Hv>S@IV#Cm8|AbL5@cUD2S2dOMkBG7?8JYZB>+;79Ec;b}J9hBNO@5@I*d!s%*%@)O~Upv`cx@hk?32Xh|_Z zpNE>ueSY=|##qBE25a+MzSeIn@N{m_;CM%>c4}1`{)x`%=~^ zX*?CZrCg?eS3OtksCW;2J6+DR{`2*7va2SKfYxPH*7fP~j*0e>^WLmmisdX)V zRc5W~{0*ndQ>WbJJM^-uY=<*yUw#vx?P?!oroyBxs=|ZCIUqItXausX85Q7pCR1+f z+J`G0HgkiQzpvo2OE2q7{=vMQiWN;wYsh=P3p-4V{Tg!7r2bl|i4U;_?icQ;q}N&-f%AjUbSw8Qj`k8ah= z>Wm3zlwZn5I2HS#M)%lv0e1~TZR%d7mJxb-rA; zoQjQ8Z$@@Q2-X@++cT2Xr<7`8MCw#*q_T-B|7{1TP_Dkbdm{u^6$vW&wOrvPiM-Ius?1W!>dPxOfq~kRE8LwGnz&Uj zYfIL?{OE+2P}2jIk{k50YOog_R$q=*J?_gCJX|UHTfMBO!|KcHj>|=n7^-FR7K*;}dx)q^ktXG; z9n83?RV^kS#Q@~Tc(P>4KXolfAz0tA>sIW|iKNNtyQUvfnA5+*| zZ}=N0EgpLz_Mb1y@!H6)4vc(Q`mUkuSu4J{d$#lk#W?G-{?qOuApaze(TQ7UV6 zu2Em3cbB>Q9#vFTB*MQ^)^bHRNRrgho$+=r9Bo^~-LjShg{!O8O)v}*ZyH$-bR=d^y4OD6=Ps;xkgQ=N! zG@+vjQl4&;&dBo<6(#wj50SYlpX7LMG9!viX5d%Ny!}6bRJAA4T_3NouKV?}UU#j& z+qS&ve82N0nN)=IHw&5UY}}dO46Q;A5yt z{!1_Gb)M?WXKt*6kPz`yMdQ9gyLk1c^y19>gsN~a?lE2W)s9`T~n^V89DIO8xW#W{k z#EZ`=&JF#A&NLiJ!;3=sgx&6}jh}NbjAy@QjzV6HJ31rn#?K_-BhFdWnTn^c6$Ql8 zRkart3rFTBB<4t*J25dJ@dypXc%sW0+zz34l`0UDKmIvL(GqiUmPxr$-X1lqLHMc6 z{cgRi&KiTC%FBK+pZbzlnaMd7mjt;XsKIUWb*U)!GkBxaol3PEkPxg={#SZ)jlsER z!YZ~WT{4T*X&iFR%P>`fQIh8X3*Y(aOt@ z=w($s*J!IRyQDAFyrksgm68wsl=0N?dtN?TdHL9X(;<7XvfSnqm6shi@Uo^uv{`+* z{%c^Xx`xzHo~)Gos9siCj(S^t`G|x+ZpwJz;LU^u;d1B?;4}w!r=#nP2d*A}q-+)5 zPPQv-wkgVXebrHS!!(#M2DEO%xO#ZhTPZ!{=18Zh!Mmxzr?! zz?p`M?!J+f3QV+L!rt26Onl_J_{e{ZomUr6rT;aj4;_2xFT6m`TCr1@X=llgR86v` zwPgH_YE#0Y1kxiF@o~e~pqrXm0-7GK%#ynwhE}WMLpU6%yxg|`%b!(CAWc&I4BiLOT!QP2<@L^krp@SPLb!=3Rs#?5( zZ`^IHgH;Q3#jUvb%EGBKHrY)&b#*}EJ%Q^f#;H%6(t2~J1cBuxMuOSYElRb3P1r>5 z7b`WGxujE>z4+vCXs*$53ezqAdZn^d)&Yjy=dFWbUY<1{83ZLYbME?SXeH*@ohuXRJaEWsbSG^71BKMPSvM^2^^=UViCEl$=vFF3{$)dI@WrV+^eZRyF zjsf@8H?ebK6UpWmneRJb6Uiohr3tes|3LjPep27Y@WeKt{8XG>OB)Xyu#IHLyxc|^ z2IakQkZ>TpB-`Nom#W>U#csgncLkCN!SdF36^gqp3#j-M_txl7Y=6Fn-?xU9PO&4& zc}dOmP1?z+VTi?3VOr^U+S>TtI@!=joRbZGJvApA8tusj)Jqx8wwnW8dA3(^JTxa8 z(mL6Y*U1KUZ7W`idAUIAZld~=4IEb*5#;eC*hLucdLhci|NIcn7)?gG_?#TYt()Sx zcD8>@{2&plMa$wo`)inX&4Prf{g_dBvGL$;nG9o8!x()b!{{8MGvQ>0akEO=VI)U$ zekMb?7ko^LV9^@-N503%>XW`CZtU0P`ua_%m#Mm>8^3O(Vr^Xy=>C-YT-QEEUFo`l zpZgD3c72PxbrPb!#jkv0zoakA-cp}y-v}(UzOE}iwcmgqwilJucJ;?&RA26@nNso4 zR~ciudUzExy!Y!hxopxOen}@X$$)Iqb2~{Y*Hr-D$8)z6^3tpjIda)YTr^?Ma6sDT z49BpUl3JSCbC>ZD3CTIjA*SIOMeBu;j+jlFh0LfQs)sC#XkxHZ+#cH8K|amk!8B_` zbMH@@b6f?^3)!0Dfl2|KWY196?P-)TLe{V@HEtzC@@89Fx-$FaX>(tT&3=`gU03!n4>JEiCGAXoJ80B_pB?yVYmqgmaDq4T5NYw? z3pow>zrSYrbaY);X_brE$~Wa4Vwhi;^rpm@s+$sTONLo{u4&>hHE*hCohF6OXLbH^ zlH=SsY3JL|SDkPF_Wx#_zgLTs9OvhBc7IZvOgE{dU4;LQ#+i``jk8b@Mi!l@I&!ky z0cWiE54wTsd)AeLnGkQ3)}aQDy0FCMc@Ij!68>}=4ZT$x;^n&*Afqy)+OV4S6n7uO z4G!;2_@{K3wWK3rZ$ ++V4n#&R^mF`((T|4_L9M$sEemE_LL&=T7-y#vpV##>c z#UIgqXk{T`lm7S(!_KW(vPpOA#AAJ~+rE#SEsG`N`4->tyP8}X)8cP*Etwzuz%YN~ z9TO?T@P1L}#Y2jb8 z{$VSb<-$E>f2raUQqUW}+oa6_Q?N6ENk;;cedU7t>I$A@3qF(8-az>tfF8lG7ZBrI zslT|=8!_IQ3+^>?yKg#+dX7q?=uel|4T}HtsKvvCg{NF#O{!aiQd&0~$n+8qUT+ni}?udw6 z-yM75)$Yx)l9UZm)7;45FD!D=imh1#(RFU-I)mi)6g&BXA=CX$vPl3ky1gn?!#b9)Mg3*BlquZuWmMcE*vwW81 zwoR`gxigB#$a+vFnL&}vkRh2tOEUH@uf5rXWQIjDiv@_C3CRpqNTxp_nazhmGJO+C zhU-n9dj+?sLMWcD)){a_WMz6uuY+DVQopVSN6&{~o^_?C8iFzOUM3j2S|J!NwQ8n? zGQpU(CPOfNWrFE9#Id=KU^bgHno84@2b_ub&5?(qf+x!jUYA4k3!+I6*ptkuR zR{7!Uk%t%-BhOojSS+r^{Fue-8vBDz1Itce%myP}8|+C&t}#t)1Rn1(9Ka#sthre# z&tXOpE*F&;rI8tG7E+bN@RJ(lM|GI-j!7M6ytjJ)0mF3T&0L!$1;wnU;OY;{n_4UF z5)Z$WARv6G$?VCAzYLfp_>4l%kk{VGw4JngTVghtl;(T0bh8DAK;I!W{2xtPBm7BK zBYcg_tXH~)*`nS9?6Tkyq+OOg2jaDLaE+@SOtY6V(q!$`fNOnLR($QDMpN?P6tiFY zuyZDyg7qlZDz4>CY!V=;Dc(v5Dvwp2hXA$D$SAV`>)K58#NNjh7K0AMBDz&ehb3L7+!|fYGqJ1P z&V1>!?#@$7o6)91lTua|K;MgfdX3}jAo>mAqC(4`tMYTHe5wWzV@e@7z0j!Ol~_IqURB6x zoIMkK&Nrr_zY$;VNkxBK;6`(hnZC6%ZE$6uR##hXe^b1b`y zyG$d-UX;;#BOWyO%i)P`B*FSr@b6GBLT`t(BuDKJu8RInh@m7!^cIaURcg_dK=#5r zuIVDJld>wgOC>=!M*>zw?;*=LM{rDI+;-uY%Nc8sDYwtJh_B4$b%K3rg>uaMo>UQz==uOJX@7r{HeKK=% zLR9E zbLvQ7wu`z^pR>ajoO}FI?u=rZ4q1y_F3ej|W^OeTL*Pgex4S2ETtixHJv$=iK4Xatm@dC@E8Togb^y2j7#n-AYwwo8PN?vTO zzPQu8Sed-ore(N+;h;&6ll0rF1-6(MuxFd^o1i7$AdD5lCT=%8Px#s>d>NGXBsZRJ zSf%X)BZh(t(0p+GU@1v4_WESv0dorwLH1_men>;Ohd{W9R+O;?T}ffTYh;}AH^ska z<1LiPgJ|Qvq|91VmEXsOmFCc+64xhwKH%BVn z)TA>K4Fb|>SiaN1XuBlQbiXB)b*x_Iu~duaX?)y5jULD?)Qt{3j=ux@JbSr`-a(Xe znM%E#=wI8Y4hM5Hlu~v2_>=m~1MxeFzrrTpMKoODDphBGkG9e85shtz`-o1nsXD$p z+eYspI@d;jKy;ORoqYXRkqAeh`!Q9!@p%Y z>C>;>B>jy=>FbzxO4>uxYfV!8kPmEaHM>Y{w9yBNo^EU0Nt8})O`0|5dPU;)?N9C7 zubIb0$fJ1}T)bPj=qcmk3=XLJ2)MtXeULd$KZKud@H1xOFgU28LthCv6NOcwSP*H_ zk#yQQ_z{=k;N4TcMjUqgd~O0)0};BHu?n4ey3Aqp=dbyFJ-=u3`wD(zekF{H{I2A8 z3cmrrt^B@<-=p|_CBLWhYcDf{ffIi%zpvx>Z}|P&Fe}Zwe09^3Jhq?e{Y`n+2%#=- zT9oG?pxMXISFdm~N0ft?V$afb;(5!WF?_HTJ(JOMX|;dWMdmP!1n+Jc3UJxkK)06|h{7b0&x@^WhR}Qnb2a`~|jW!}%f`zG^W4 zj0;P-Ii;i2=S(pztE0hepT?9EztL@e`if8nAsgqzW5e7Wk}}FB=>bPOig!lp?rK&6 z@jBGpX8Z9WQ=0p$O^$^)bjKc=U3d-*?hP!~R8AasjP-*n16+ZvhMs zVtSaZgJxL1;diG7JB>-F3&Ccst?~GDeJ|+L;C_5w;bRGRMad?eZ&LsOvYKxuNE#9w zoo;eNKJd^PsH4W<9(NPX>N8w$AYjgulp*s@$niR6gid%A8lpmZI!|Bbbt!IT>_(z- z$BYAQt-^H==j$Bn?_#`%x(El}y5CLD3CVm@W!Y&e(~~E0lMT8vm4r5!w8!OwjOiek zMu&8h%Lliqs`}dx=TWll5GC6v$#v5|Q^_*j9yXdk^C7mBmJ7@C^PbUd0I$()ga@PB zDE{!lHGVfwsUP?;(^YKf58Asa7n2pcz{V~4fnmPnB6=86b!c9CtW1Hi7CwLlqZ&o zZ2Qk>N8p258nAi<)vU9781L6!Ks=hfp}98i-L|jqG8)QuMpq}@cbM_1y|3l)S0vn$ zle#Jy717#44rFjXmGHJ4$6*Nva-2-1-oY90VZ!Y*qN&t5WNNOFRl`ZmMnFW$iNH8g z;m#Qm6Nm%)GuSIj03wCBh{jx1h&kQD-V8vHVB0#wC=20qK9>i0rY@BlqBRq9LJF`k z8lyzz(3i9ak+bTP9yKG4qBAg~%rFk9z=V;Jja7MhWFkL}tfi8&oSnf|mSpS^oRU(Fdd+?ACKxWk4Z#NY7{kyA+}dJgBgR4O@k17(Y?iyxbX9h#vBQLX?pE73P;a zI~Ah%+@}z&XGkF`&pis!cYdI-b-A-cAsWVL|waGA-dXtLMdvx2IfUk8rVWQ)6zo8jqF`1R?IQ&A+B#WNw6iK{*bBA z4bPC%oGG(4I197FTkOs5<7S?|C47gSryVnyr-cwU!VTp!bkW6_;oVqkX6O@pd&IIk zXX@)?`m90MOtdpuC*$omYzTrcyYlm0KeN=xwW_9$u9;wYMj?Ip+vN|4x1B8Bru}#I zgPo2uw9~P`<06b*>cFl4iq^r#Ci1qi(n&d|B`zqz#|TfW)oZwRc94 zoh^qmambn0#>BN=J9)4G;gQ()D?H}#UTgI z%tvmFgxET>Js75geD%qWr^r~;KRYN{evm0Mqee=MAjisD;CU#qyjg=Zll~`rEL&m7 zWw8%`Whl^OPNCw4kyEBrtCv?a2S;d8!S9U38u6<^Wq=-9ArQIfvY zfXl5Je;ZsRhBx5%4^Kgu3@e#iv$@IydCb{mmDrOEb*JhJ-lehaJ+wcGWnmWdU} ztS{5E`fc3TKppT~pL#d4c-)W7Ezv+U^xEUpTD9rz5J-0<(|J;i^7CJZ-8I!H$UwEu zq3@u(wOdq@kLJNBS&cPDOEn#!S$Sv#k21udJ>K*IEnC+pUsyjv41r@@B1; z4sX&{M`LigXY{X~v)WJZWQV_(@5bJ8nO%Mif<`Sm0DHWhxW{Y}ZRvS{XKT=@ByA(f zi8swED>F{WH0~D>F^xAXWE%Go#-G`(>z{^{gbKBB79Z15@~&B$kps!s!)}|~9-hc? z$_1{o%vFHO!jAnly!1^$_ zXDepzfhiO67@Ujp+Q^4c!4vvxV|<*6dBn_0Br+3Yo&jJFAM+7^qGJlo*M($gOIVuH z23J{)Lfzm^shOU*!M~L6#1j%FN=6_}iI!ao8Om_-yo1p0E+odae-;UkCzif3eSU;z zzHx+BY`%GfR&0-zHU}na-zsbG#h7ap<;|-~)5QQ`$x`V{8Oy|bS5+Nwf+XtAfp>X{ z8;0^tspe^|&EqF|RB=*eb2V$eHfy;$*15n=>U6DTl71}!Vg`~}L8ay;=1%QH1@92* zdd%jE;W+w~QX#m>?9(ROZhFkKhg{3W8-Y>6H()ob@Us38?F#A}SL@X<vV$sDKt%gXfC$T|9U#=C;Rt z>PgXYqW6F73Z{gf_Ra%Xm6c{G zY7%g}DAX7)0o8GrHO_*I`Qjt?5tCZvtyIRNwE-8uf@9ZWZ&b0RM4^t3t~QplWHBE~ zSKP!p+py@^wD7E=AG=Y6{GCH}$6yvRkLl!*4;<;O7rPP^{Ux<=?3r`eqb3K)tYr40 zWAf_DLg%WZ*-C5TBXT%2kaCjN0rY5VK%2aNhEHL8hizwga1cH|Y|Ke7IeZQNIoaZ@ z5o-S7m)i1uO8wZh#g#4<&%k()sd8HIleC7{=Dp6!%4Cv8ZB}W6llHd3$@F1rJ|o^k zT#Rf>P1AYs4nCWOX>*ZYQ+6*YJMuXJTRMO8UX#x(E_XB zc`HMf+@i$=(4Zg(z?h_p_gLPqJ>__r#V|I;OBhtm#JpT^mlj7{U*AfaYiI@Szc z@JX{aE4f5$>O6Zo>cO$`53iwVOnTK$x0WHIU7=cdB$Bn2wX~6fs!KHoH@l@)V}@iX zIgk~9*x({O_6)nQUI2|Jhk&Lf8}PFe7lqad8{RrKbg&Z-hiPRU(BAVAhg1X)YGr*` zWeas>|I|ii2w`GWeR}`B^6Ce%&9`9ov+O2D)xFid@`X4^XxWm1QHZH;Zfg%hLa3Lp zUfSYLRQ6#V5v}iO)9fD6U)E#>HT`YD>&&{O&8{K;RDHALi@}yAYmPR)HA%RqEt;*F z$kJifYnc+bq~Z^QL$r&ps(oR0Q-J~Atj{^toz+N?4rK|a-~`OK8|2KY&$gks@KNQ8 zAFGBSWne#tXE*q%SuGm>Rdf*J@Z1{Cf)TV*}^ z3B0 zO*ox@^*EZI;ON%V3zx#Pj7J}Pan?z_)nKVDoE{hCqW($aqJEf2da#NlbjSoraxZQT z)z3oWi*Bl>V{=N=43gRsBu$?KKlAF5gc;3^@OZT4g``@vxY7!CwJmpqFQN|;Z2(!# zc}bb}D0dhWCnwzlN!Q5AhY z{d*RSNqX8@7eOwpCe0qAP;Soy;N;Dw?pf{a+;1)#7VOb1=w?^a2^?`D?w@uq7x1ky4V;)qq!7bU?f{yz&sC&W3dN8y{0 zpbBx6>sI(S3}p)6i=j;6d!Pe_e+TLbFgp?EBQjF z`HrW^ks`)P1jW#P#@AerLB<9805HZ5gYgaM8dB`~Xvw z=wpx3jbL68bB+%+uPpx);b3XyFd?&8mvXsYCLi})rxK`@{)KkKWbV3n?mxztzWO3K z84mLc+RaV}Bs~B?p$cz{0J{Ye)iFS#r?iLXgm2KMWSe)ImG-BI2C&4K7ofV$Tddo` zjNDx!8&}-FP~>usYFgNi0CnMGHG3#q7veNoo=d~m>l`Sg6U4#sT*0C8i0F?@9%-wxK;CV&_o)i+UMUw#>e4GbnT zXZs;$4zM*-Fku6U!OOA%S+KS3x$74mz+9t& z;At+OXTq+9d289E@Butnx;0CLpl)Uf5`msBtdP%MI-r?7TFx}Ousx3nhe6hCjN|lT zj%7(;KyOB15FSKe)X_ zbGtv6tq6;X^e8J=hG4AVO1hQoOxSMG?vCSy@*&*L4Jnn(0*0^5=M0xbD+z8vJaM7Z z@g+}vUg!)P$?M6iV5CylB0}ayr$P{3%YN3A7I!Xc4}Q+c4qhW%hBC{%y#ncktosDs zy~n=*PBl6Xr&=*NPW8p#Q+85^*f*=BQ51FGFXnp#P{10`GhUu%{`CkpC>f=2uhCh`-$yI5R~mBqZ?Qxg6jB_1hsonnV`BR z5Y#>a3xXO~IDtw!7E}3n9YJ+KP@@OPIwn|;E_wk3)hmK}+2janL}e#MP>-o(4MA-* z00peI1hpPGh@iRzkLOKLU5nd;U%W^JMfXHdhVU(Ar7UIXyuyn?S+=t@umv3pp8h8( zYj|;)ve=0?%dax48WYr@tUU@RP}W{Sau0y1vZ`(<;k(7Ps|<<79Ve7&4L1+bnoRjdEbbNVTN?-nD6x1Lz?%;ExX!`c=nw6KF?>+y~gEs0LbWYD9J zY#&PRKd8yzJC)CK8^>e`{a$9$&;bB3EI56fI9<&fCbrxg5Ydu)<%_^W_7R zEKy~+N>+#mQuysyq~v2nDennn;yzhKSW^ksWgL1gY}F`-65)83Wg?7T9Mj9lIV0{k zsl7yrcmO7eFZG0V!lskNpAsIp`6R$U1&fx#2^^}Qw7Bo27cljIpgA}&Ng>)TCN-&f z@#!ji)6|a#PCDQsbSvZ2I&-JS$>MUzzyK4DLgd1r(zh~xY2fiL_nAQeQ5L6FR?V#wZxv& z@mEsJ(O36+mk{nS&LtZmw3}XAEt4b}lg_ z+K(s4bigv5JuQz7R=d6G#hkJBL$BCCbpuq_Xx&=Af(t0uXe5nqO&Z^##0 zsCLk-2plIY24g0O8C*O>s}>nM4Kg@uo*-jbMVNHTyt9f6hG=lXkimu7vz#0*YK#M- zq)A}2MrWU74HbF-Y;4%+m^ms;o7biT@WyI$T(ZGbZ=zxwus(rJyZVI514a`-R64On z`C7>Z(98&Ymkbv0T^c(HOY*F+7}cx&bwv+>QeXiOgr? zQBpp)!M#N@Fh=D>?-T!VOyx*T;~DI}>2jZG?Cr9phfnDO_R;FT4Cu;pS!U}q%yGa> zc)U6D@0=Ab;%r+n8%~j|$TAm`=`k&ytKBEV0H(!9n4PGjFvUH*B)BOZeG>9zt0fA> z_#a+^M~JcdMKi6eojnid4;s>SueHbO%C#RT#1-TyqXJCB)XtBeN@udUhQ@rMIHf6Q z<`Rq)HR>K%Hp!Z?QuCy%>39nAv2Iu;#$6w}-IdrBn?o17@>H}>y3>`Xf_>6;t~_zC zhHh~s#?>0SypHY(uFQn9+q?oih9E~C?jqooDl`?av&s^Q7 zEB!(qGe~sPrf!hsDQ%y0B_B^5KUBwc8y`=2h^cqFh>s^6#2R?g9eg~Q`x>U0BRdgB z!BG6tAwb%u=y%XQ4{j66J8(n{jfps~#7_O5iBYiT`M`59VfzqD@mOio{i zrx%q+WAq}_8)HwGKfv7hCw<(n=m#6U#Jh*dRB1XMgT>DcXX0(T6BR0WoEe1+JAB^e zN=tE@#vRsXZXw2dPX^YU7w#cE{qC*z4}TJu=BfCQ>iLHs{@D|sx@+gn4`L1OQo^@x z{nSYBz=uBMVI9`YDB6NjF`Uz_u~sTE!ap+ z@<`eBRzgU8m@w8xnx1i38PfCfCix*M4@R-#Py9Dw7MzXumS%w#H-}Hp8TkQMD9e~L zULa8?{Y4>!K>2GyX{a01{>AtEOd4^8|5wjmDw>w?vz(6&tJzp7)DS4|16mP^Qet4 zE;XXn>EI@gzHsgbm_CC)?!kM^PQJK@l+rYgny}~U!1+OO+^e+Nq_OQXA``t%;ggx@^$Pc7xB-mtsZ8`Hg->VnUGjxoJQ%%2;Z@n_FBR_1M1KY7 zz9~$JcXFX2c(?oN14F7L# z^Q*M3X#+^v=%X-uppcC|W+G2zqxB~8 zbT+z*2xXh2kDJJr=I9ed;vI~ZAh|84UZ85!JI^*c6wn}}F?P~kjOb`T$rt4yumL)@X0b-LceOnKGY zWm9IBhptnG*3)c*-k^aUUvbljH`=7FHA$On(!lZM3ao?V80`xdeaA2J*2|-5z~(iq|+vi+e(B- zRWf$MGH;(M>9?=<)>P7GllIt3EGM@~yQ@j6WY>wy{2pett{NB;L2;v@+;oV>J5=Im zOE=pUN5NIwe9&T0MdsrRMi>wC4&qcR3Wk)nSNWGGG^hAEHojYNmQSkZTnr4$oqNjN^|+sd%H3lK2M2n{E1f#gDh~PQ|$#Laj_Smf-e^ zt2vWN#67hpp+^aAShE$rn_X^&f6tnQFy1BVUtsGxZiTx;-8~UaUo(SyBkt1kk`?ZD zg@0p!_Z>z5)%@nR^koCG|PG!uf`Z_NXYA<0$*$70%a{x0N+3dFiwBRXx9Sh4U4K z3s*R|Dtv{hcC(_aDnQJa6^%{w3q<2y0ix47XN8|(rQahEY$BEQog>uTSQGD7?hhro zH~Lf^jC_$(L~2o*JJ8$WcnLFQWM{*c*)XR zx8fg3W*JoeECn^UhgH`*OTqkx zMkO4x!riU<=B#i>6uyLGbqc#^cfBH~8U}F-d1cD@lFD%JTA#xAntE?mbgqehQPF=e z(VG-K&qOy7jk`1$zI-ea?Nt2zc98udx_`FmU#%gpEj950<@pzz=k}WP?KSZ$RVS*% zma|uwHa@23w^sD6aZSdvE1K3nF=sWnPl32T=0x~uB3l(f@7j9yGJouC^mC^Q9f2`J z5?1p#-eFU$%rz<7ZA#TcGI4a=rV6(35{#LQn``2ms&T#ENZ#;b80=rqf&Z!Cde!;~ zi~8{G1_N6K(2tYUrQFxh-!XBmQdp8pSTX z=dwW4LPX43c7%vH7*gNm=W@$V^azH`ETJc{>V-`l2kx6qdastPuf z7H*)WaS5w!l5H4+G&XEMTP4V>UI}3EOQSx}A$++zU8sGY{9F$`Y`evjUYaqk^? ziN1mZ2cD08r@UUap5CE5;Ur!s3U}PemaMr_bW#o&&*%io{6p;eAJhTk+Gi>|_s7bV zFfM$viO7NGLU4uWt<-QjFdN8bcK|)xZu{$Kva*#Z(QszevNn zm)~A3gsIewM)v_?hlt0zuGcbKzB}`Imv!oBl0BoxaYj?E(3r=7KC&c%HMNMgAv3KJw>Gt3z z`=J7$X~$Xe$R#TBid^c(1z(1)QigGH(S5F%*M5r|TrF5lTx1(FEf9JpQ)FB2u5K=s z;++0`Cm$S(1NWcHPRC*QdAjaJJpu~r1&#F@I0P`{izM10CUeC}aIFW@58Qjk{AcN* zxtilewL)X}dy%cs>{fCNN@qA7cv$d+{O#+v(c$X#2^iq(6ZShkf}E?%5#}uQ^+VU} zFKVMB;21^lPA4B_`RK-B@BOX+Fc-f?&~$KuF$&bz z)%Z6)K|E<|x6+gNN7aXUMedqXmg(83q;pE-YV+P+$_BobhrXWM^yj#eZf@fX2O$@5rSKS#%%So|a9zS) zbeuhn+8~#U2DbDAPD?dl2jV`oNo;8Z$NpTs6T92;I|pJC;8LB z%^2}7q;=Gs6wOMpLGjtOs1#OB4Y$; z-}LUV+W!IE>(ABxEF?wfN}%dnR~;VUe(pIp_~Q}z7V{?P1EGxHkVoF^1}V1}Y?zxr z&6Ud-@4Vo|QbJ%Vgvd&jQrse@l4WVe-TfGIP=p$}`Z@ITi!9*n*AIDtI#F2WLxzGm zHJEC00iUD-6QJub^Pg1C|F52ZnR}`2O+mLQqm!u_kuVryuXY`m5v?_EC}X|}4T9D} z{=rGYp#yDlRTbPD7kY43bLf^m*dQh2Kl|Nm!)aTvb=eYM_Hsf z5HVLTq(UOPtHIYr3=U`8t*;k44>?m3{(KYB;K6^b)I^J!lyYa(hlZOG+tOZQ-_i*V|<7|W

    D?C${XpU`gmN@WYY;8bCIzWTocb z3YXEU&t@)}&p^MNn;^^tYb7Y?Je>w3>d;_BEOfJGKAlz}D>I)HVq(0(%QS(dT?vvoq0}q!BU_HOn6NV&R9e0z{J3pKf%W{ zl0Txgc~JZBT|p~vwUSjNBJWKO>@7Ixlw^+ z`{2)kY~K(6iEqCaQD{XoPd}2Nwt|_b>m=s!(?+XY25%AxL)1Q96-8@chNxWwkl175 zh~CN6>^SyL%KRdfb{0{=r@itpYwy*|&^KHuJZeP;N!M!D2KD2x&` zRIF*G7%gp6MU&Qmsl_%`ZXGMt*kVm9N>s!sv|<};dZmh$%J=pDeAYf^pP5MrT)bB! zbM}w5)?VxLYkhvLwNmuOmF@PrKFQP|nVKO}{G08DZt~jNv?*e(86~v;Ye@%4J&LDW z5(b`Y>9A}l;lmmTo#gxhUd~z{C+qN?HP>tGogwnsu-!uX_6OF7e`YL*3vSd8dDQ zvOb!UL1%j|NT`&b&uIs-VV}qNur2#(FLZ92w~*ur8KL#;%`N(e-dVof$jgo4Wf5P) z`?7(gU*-bJudPi>|L z_vGz#AA zF&jgE%(WQNfQBZt_b5*!(_^;Ki;EjLC#=Y>$jJ^TQ%`>0Jwk>XXo4c0EpBC zo2t^Blo0;5uRtDnZ`8fAM@*i_tudfNAJjAC58JRU>_xOCVw{=g+Nikxl2z?nV}3zd zfI|&9Fz_6WmJ$L>Hx9#RD(q&h|u?a<9I?6SM|v=5B( z={AO$cm<7VTl?2y=3RSW-xY4?lRlq-8xfKZ-ag*r2uz{Fvg_*|0J!D}YEH7nL~<`N#fLd7+pKF18o;Kr-4VLNUO3GZ zL_opalndrnc|~9wR)@fA+w=M?x?~Sq9ojKZPH$A2TuYEd)@ssD^ZJbTU!+5z6ypR` zk^Wp0{!HWtRf!tBV*X;d6vu1|eWF9{E3Git6hyQWG%`RRb)C=Ki6%#!a6p}rtgHaI zPM;zBo)(b3{)&ds5+KX=>z$kGB|PjPw}urJ9i}LzOtpOV67Sy?J~_}+31^0go(eb^ zXdxESE$pW)_<#d&yj<|5fkBaa`#(hG*HY<+Y5!|5SNS!orq*b=-F^iu<7!VZjp&ec z=*xPw%~IiWemXQtM=w{m%7eO04FT*vdQ=$v1FmAV&(W+L8*F;%w_23!e zM>>%3*&CsrUCQe?(1H9@K?l-wV^|Q4GXe(6(b8QTrIen$Hjt>P?4(~!lD<8_w9l|o z^^fn^2!u~ym?6Ugo`oRj-EA>!0I9%coteczC6oYFk$pn{-Apl!fH6$A%#+tw2d?vp zA!y|#P-IpNEZkN3kIcaQ<2X(SWK>LbYsDY#BbOfH_kRdK?O^3w>5!PdoD#QM2}F6( z=I&#DM`C)DyZb+-+9D|oN*!!i%P#7-Wh#C=UXPfte8Hfp(2$w!t#4~D;EbUCTA~fC z0s|O}_UZKY(~{g#HVO$-^jfwF=jST&)M~{JoIEu<#)L<&!qY>In>AsT zRmj_}6}#dYQ-bim$0P_*k*!T)RxE|%XjJB2LPuw%0r+&$I!QN|=Tj0lrhzWYr;zb7 zt+LfT;UvDhsai7lU7D#~wcT%*#eR(Di}kUnhd2@=jb+=Y_^|aFEsAHVMb=gfm!c6K z#Ht?E1FE9Hjm~D5Xh_FmmYneP5v`Wt_u-iQCVsc{?D3^zHO_uTZg2?9?DLao>*46S z-kN;P)`z1v{VIR1<-y!uobzqdOp(YpOQEZ+OF7Ift-T>_(tBZF;Nwbl4yW z(0nQSZ!s}LzgV$&HIog_vHDDL?e^?k^=-f0-t^`6q}S18!uHS-aUIR?tBL82Qqr`I z4mZv6rbdCzOpnZ~oe7&MGwCUfzLq!cQ0M9M+}<9CnCzl+)5%4>QMP?Xn4n>kx+!U- zkpkE5iZ+)Wa@KWow#6uP#rAI97S!0vSFi*PG>+u0nRO9!>dO!!XXDNbBLOMhJa4K_ z7p6If2~>~9b89f8rjcmIdhkoTYw}eT?BVyF?e+3Cd$$A5HCun0J!sG6TlIWGpXg0q zoHsA#5K!u#n@<)`MB6XnCwiIoqNAOBHEozF$+q9Rm=&%AzZ%FT$s-Vrk|Iq~#s! zpEQYFb9O!N>M3Uo%#xT&P>5Y^^HFWaJW6^p;$5!MZp(XDvfW`wqBu;)PtQ-oZ&W@$ zqQ^&gyewayouy?%d+vN!)|J68Gz$PD%+=jaE=TyKy)}E87)PYX3isDbgAIs>B;(n3 zZ*+iyaRb4Kp`dj!Xmb>d(?3H&E1-au!w>-hK3$ELKMPPWCkDZSe_TC&-2D8QdHN_m z2ZO9W?Sp0)a{N>9lW@0#b8=}?;2m|+91)D66nZYvk&hBIC}OwD0*Q-dZl-;+n|iie zFx@t%O1PUCO-8BPP zjuzKF$_E;&gf%>R2o%1mju)?vI-is9Z|=jbSo!nHbMSaEY74gmSLdwsw|hsWf#zZ za|81olHH=wTwJ6>jM1jV7Og5<$YVEP3(Okzc`;jbKo{zOvBk)MEjocP5zt8*Bl>3`f|WCuED%*n+N`0feVOa)%Kc_%CwsgA2fasVJZ6 zkX1R^&u|@d*m+UM{30Q8NRJQog$O!NJwMYWmF1pHNghoFc_c@P`5G9HBL!cpv02DR zG6veRDaBEPdy3haFhzG{NG?tF<(-8b*2xt!w04^N?m{f)UKA(>nJsASp zy5!FS1RP-SLco3j0VRR>bgzMdy7C@75@GFT(I1 zJ>C<~umo50gi}ZObTR+s%8HGH+<3DpG@r=1`fG7q8>rT~RQT9Eea118d42D)%w-@T z`whdi2j#7gvg}t8W>9YjR>ioLpLxVy)<}C3CkG(UKw~(j*qEiN-iFz?Y76^u~ z%vYGuUN^SRef&*3IN*n==o<9j?RWG>cJ3wSx&1PHpdWkUhmZeYGS?Fty&T2EK96BU zQbNRT6Gf^nb5gXlg^ykTlCeZk89ewIdRem~sTJ9BDeFMxQ=!+ed_=_iHEutVX;qVL zC#$J)+@-0@m5lk4rB!lDRMjyrE>vaF0eMSHQV}_EMzl&=K(dWF0=VLA4+p2)RZ+`! zpVhvs-vP736+^-0QZ{2|oZ2>G798EGlz~SFVhl^dS^k9&i(m{z<{QU-W`ts_+5r*72@)^;JmEvioAwU51vdh}9>jS_r&^P=8gz;Xa*~(;#boABc!~=-I!b8YS zb?NtXvZ9xY%7s~vZ17dlFJ+CnL);?Z;wSu*?xzPq?jhkjtTB=zQuXS;BT3@xEQ(C= zdf&bmp~*+d=j#@X<3}80R#e`tiBhZo=$jIc3jHwbQFC|Wb2UuaBl& zH^Ozob=|M$bzKYBDxdoGE?p;ny+_x$2<4X870dYnTtv)uNE6xga6je+`IR$*WH67+GjL__^qUqLGZuzNf4R>tRg}690D%r#@2toVXm?D zi5uo*7)Pk#+vT6!wrcD9=jLL9XIrD}6?xsFywb#I!5xx96x@Lr_s@Jg?YsF#MZ2f| zFQse_Z>dMnw$w>Xe4FT2yRdp;yOp=tZjainj&ZQtIlE;kso(ChTW$YuZx2|@ zavj_49(${-E`MvcO3Cu3tT0ol-_F}xov~+ckK3)bVfDhPI;oUAyY(T6&AhN;;8HNc zTPrN_T`7$G@!NTQ*Ryxv%)bB;?%SHf4r)MK#Fil}QMpdJ;17;9WOK3q&fQt|AHht+ z$cR}PH;J?dGKOAFF=j%JU%}z^N|hkl2s#_l5^HVUqkm)`q$_&Ou#p3A8#3uuFmZ=7 zcG$SWnM;^-HtgT8sK9=sZf!JFbyWlO$+3mfWV zxa>4%iRj4}%KnpvO-c}g7V?*X;f2K{W-nu~GDh%MKAU+FM4=KPrLkk|&fo@!@tc+X zLNv5B0}F!1X+y=b|LnrwxQZ6dPbaP(;DG&su26SY_zvNuV(Fufp4>HOVJf$lp$xh^ zr9sLF5Z?VIz4H!0I-ZWvpCv82-zer{N%vduu1fPf3>Vtv+Ew^$!RG2-L$-y8J)qI{E-PyZ`CDd< zC=5bYQHwdCb{ni_6<7kU_uM`N#2W6vcTQC{C6BV7SMwJFO@7W(1e0z0aij3Ft~=`O zr${a@cPXw7-dxnBP0k_1k!-ztd5=|(bWNhYz>+lsTSqbPp9G%7FW_&%SQl{pbQv`lLJIx1#&I82OxkaU7O@Kr=$`bnnKz? z2O)rar4L3H4kRx$Ca_CW@U&W#Zc6yMxc>FFp%c**J-CGII_QmvvPi*fh4?Z{E-30R zU9~w%+H*nJph%hlv;@YJi~}sLhG?^MAsWNGCL@z!Qd0$2*#Z2lz<*?lU`%nWMl?Z; zJntn^A!kW}Mku3zY9J25qTC8(TklE(gbO_BKe0Ue|8u~?)W1)T#7z^i@B(I!e_DSfAkd>tdazCb zFB9FV({OJj^k$^r8^o^Jve_>{m``S^LZPygN+Qv|PEFDn^sqecl6!oXgiRSU*UeND zD0|;w%vc;DACU)B`k-hB^txF$t%xiyBKa7kuqkk&U17pX3)#_&tS z&%*={YV1nhnhV`q+?cm_W~VK$Kg_QgbEl|IT0;M~_85HrOXp8|{V|(zHqYM>bsM#vko)nd67? zTIX~QhKiBE3{_mOfpU$~Q;rb^vRG#orE+?B9V;BpXD(3+_F(SCxCG}OQ`R8K`$Bef z240=L?fXtoCS3Q{6eryqJ|W>&1TqS9_oK1!Q?qC)+rzk>^ETsr1i~DWlg*ck$YO|a zH2W_iS(GixS&cSIDYF)gPfCkrZe~9Z(UYmhECu)Sb3t!Z?7`JW&LZchJpXHSRd1qe zTJpNUM?4{TMg2PLQqS8cp!CojIxTTLUouZCogU_RIAC*hVLsMxCm&IkPXivt3xTXd zf)Pg&|C18PQPg^?s8a~$*=TBvP2TewK@Vd;?PtLd6ziZnI>5T}g&A6qyAb@*W`p@% z{@MjlQtdUceD*3S&1-C*BLfq|ULQT1qjq_`&L0C9z+_M*It%fji2gsCRLoFfQUT1b zRyE~K1I8LMvf;YZ&vO9)^3X|MmXef9d{wK|S#?$Q%5#VH6pB!{bWOEdkfzqT7kx(q|je%QasOWj5 zJ0{o-S7<8MH72yPAWc7V(`p`gT0=>m%234SnipWg>%&MyTm&*ia3SPtBLP!l)-k1- z4MtHZ{LGF5QFZDFku1mAqfsCQ!Z@UMF^(4?)T`s5T;do(UJRq8QsZQUi52(jV3R&o zYQ#Y%BE;%)lI%`|-$47uRny5UokU;GN#r8mHZsiUUwM5JY2#JUEEjq5Ujlg&?90bz z7~Jyhy-AzRmu-Ma@TH=x*_;mU7RZ!kd0nL2A$5W2U-};e3{wJ9WeWs3bCJm&lPRzx zbWOP33qd=j2$XmdJuM9Wstv@zMPlU9qInH@2xK#(Q!MVy(3gk$ z?qT4q?LFeQ^kGB;H_aumC{be{r@i-anh3294V_FZy**Emov2^e^z7WgP1eHG}Dc?(rE?dlsQO%rGZH^%@Oq%5wCP zXcat*5hYl(&`uO)pB?JZGMnA118%2V7_yY@F?7{&LIg8;n^2jJcr=VynhoScPko5? z7Iq-D&A-?mo2~l;y)EWg2-15=UV_n554(@7Ql*e8DZv^WRWB96ADb-!ZXYHhNUX+OCT`;8=?2KQeol3)d8;C^Ipg)B-1XXw*Ior3-8Xq7BZ$o?Aph{o=!d^sb) z#6HyaV5mkE)zRTJv?-*N0CcyM0ICd;2tp^308R>Gk3K2jz0=r-p4}78SdIx)?Af|F zu$;)uSCpfiC>+fM=AIavjWIe!+>tyAxbMwtiHfaYsf3o6fc{a<;?Ove_TZZghBHwa znWgAU#Rkc0dH?>a!i7zJZT1!&OZUHbE+<-5@&0_jEtIkhYfFcq2N@{2u8-`unM~-Wpy+(um z#VTmP9s_!n!{pVy#$Z4W3u>50-oIKicYOfy8>;OXh<75sgk=Bx4!*0#S){=3AK~V; zJqlb)qc>Al9ebRPCF)p!NRC4#@f|fm(+1lj631OacO1o7^I|7=`wj-{^^E z(hBAt#Au-w`3!+fvPA~Qc`VhYZ-T}}`~UzROXbKBU^hpT8 zZho=HY%_=3Ov)v5UPIhsAI@%|2+pS->}*$SY^H$;ZPLx+J#wo}6w@*C=ymKo@6Otbpk9lH&tF6JXIMSErdBjgkp58U>) z3_b4!5$S{^xfBm_kCc<1$MOXxVu%34N;S&E=5iRsz3G}FNv)R4wl#~XxAwI}h zyb>|BL#m-PwBeET2RJ;E{s4^#rJGwIDyN}&H8k%Zt$^fzr+o=+!2)%B%wk35F#Ek5 z?sUI*!<`-m|De;(SLz@=hqH{p3{MUbkOTiRO&}1S-j@+%PEH0chrOtJL;Mlt4ERO6 zODSd0J3SnL9wgKVo?+zlrMH= z5%Z8k#gvb*K1zxW+uAJXFVk2o(FT_T0!Pyk(k6&lJBi&U6QwU%Bq-!73|7L%YxtxV z2kX0}1F&SI#(YW)ZYdE6%$VFVu{%%rq@{kPOxAWBykDp!GLn)?Vp9o= z8-jnNfoRNMu{zIg@z|8}eF3~gC}fq$<>9#)UG4%HeK-KFrj)vre&vaM*jmJwZQZF^ zcdU1I7H|qXhG&3?NkQ2+M1ql?v+5jktVX5SCIW_4Svb1%+9SKE7QXSq0z6Ciopgxs7 zLB1q6^N(mx5XJ(-K+iR;9{X70YvMw3nWz(=BWkQT_+m;XqDdsGfFYua9Uf!$w<|SO0 zfwKB4EWCbPIl&h6I4~j)^VDBZrV=ccE8&!4#WpC2^GQO*zqcwG8)P6UyUh|TELv7e zhNB>4B*{fd0nDi-qaY27f{0;i+3SM2KNfq9QKBH4zkUJQsu$Rt#Oql<3}Ao{Kd%To$HZdt5Bb1AE*J=$Fx z#2N*j1kF>$gVbq{?w7}4*e3eh5?Q)8o~hgnwlB0qe({_0evi$ZJe*x zG6cwxBk=p;jSBe3kmCW+EbNgdN;y`*8*h~a!cmdH;x1mvD=o-0Sb85{Te8A*a;>NY zfyY|RK23C$D$GO&H0}~e2L_`xj z!Urg1RuWNF2p|mA;$jqwdx|nc(~{K<)<|Nx3|Byeh6R+^Nn%N9;bO|CH55hoPI)Lo zmLf6?#a!fcOsWkdkycY32?&?V&o+`6jG|KbnH_~9e5X7Laa~hf2SPl;$2jDYWgK$J zR>wiP#4!>ge1pMMYMg8^bBXETNeQzf!YoCr5vp@1!PfyO*lWoa zK*vz794~|}flO=#G`n+Hwoyg4t($1|y&SV@TlYDYD2eM7B>)?Pe zxq3veDviteeJn7$&-0}$mbGD%{5s9ZOf=W@m`P2hL;s;SlF}lu4x9n<+;u(FCt_Z( zRCVNv@z{h#gwWo^C5y)5W7?p%ON4x|$Q5nNJXrR6CQ<+#M5C_I4dD-FLsV2jTHHU~R=U*b}T z&<9U|%PS?cCPT-tmg1aO;9u;IZCv%h57g#8L;@S3^vE|S1nl9u&=6x6l=N99f*91o zPLA3jc@w0rW)RXnWrCe_DtW3LYdEg*tOVDDqEYZH;wWeh13oF$U$|DnNgN_}!MUZd z_Cl`RSm9d3^wZ$ljl%SZB5}sGArkjwY}@Y?Y)eP0*ghfKYV4yLJ8a8mO34lL`A|E9 zp&G6=x{`5GjZDERIhXF1oU6)kt{`+0&MmRL(V}m4A`cdEgDyvL6d?mF3M4|2&roDA zC#PXP8nyr{4zj;sKR5eNo~hjI@uz7qQ|~ES%vV<|=96V+Te%;f%fJS=J`b5*k)6aU~HSt z+c&m5O3y_BcnL-*Kc&shzJnv(A)A{g9DP% z0;LU(6|37|8fuvn9s%Ouvt8{Lx5yMnWEy8$27qzqG*9BIEA6urh)qNi%yKRatn&(F zG(9P&ex$U{Cr)6U+jCjxNI|(e9QsRI=gKl{nyp|qf+^`s*a!p5Tp5O^V3`}LhAeY3 zybWC?o|FwP)l9e(POBNOnt9ubP zxhQkkSl3gEmzMg3xGx(rh$ci#OK@Rq^Qv4+5xYwAiIPrB;+@pPV5ut?7w?H%>Jug9 zj?mZZpYkZgjpiS7 zZgh-DWn&x*Ep>U$93yb*FiI*lPBs`K|BG^QPl+0dUWo_^*|<})8_vn=F86y;1_iEW z7dn1k%Pw^MT%}!>jQ)Lxm|y6!6Kw@FOb;1DP`2GA#KTpv5~awUmofc*`RK<4+w%0+hk@aW;i}@JTg#%4ZNC%%ieI# zp7vg@3RE8%vDKkrwTqpJ(lOmy9&RM7J=DdLRjnAO^a9halz8#5XYJ}m$PRwU66t+p z8gk{oiO6Cga8++57FM@z*=a~v%i>=a6XD36rMfxrBbc?I5ZLazXJTJQ?|iqr(nFn!O!_ySXt zd(%E%g1SMu(kvZc;1?js%6#CPbYk>rt|wAgu{tZRfij_Cd=&{PCLcXKkBuPLK=28; zhk!Y!?-Qs@T<9V_RC&z}&biZQ=AJP@tL&pFmq3`dC{T3?^uh8YbqTNts9-te35=1a z>=O8uD(peSE43i}$IK;QeSF$30ckHkRxW{OYB-vMek@!9OAg3*yEKI>E*rLM9d-$f z;pc9hx=UcR^www=Eu9DzZ_Uy!0lHYa1V~he&i{|O1PnMO9Qd&Lv#KtEF%(YnE1rf+ zpwoA6q2=`LhD9y`iY?_57_M+K`?Cxlo%&FovP-~nGeUTfj!F6kb+{VzCJegJB`{q0 znH|Mbb_sL_4m!t(>vM}-0+d_QCBR@RHBL5|r|S~NfuTNmKTlvAK&lH|0yglfOCUJlv{kWOA5Y08(0ATFMVG*0WIr{R zK#Aaba6!6m@BT+sa(x&+E4x#kk^jxHbx6c5Y` zo1rRf-yi$1O8^q~p*|g#K&VW{z$v=~N-`L@1j;8*<`N*S{(NG=3tR&K*h!XqCilkK z-ni{jrS%DfFj+IFpQzX7v<^0J=%3cHI$ynY@o63GWgqO?6-a>`-x_Oc!x0^$enf}% zK+zE$V{COWp;I~Vn~#~2*P1C(Jz(=MeSLgGJl*SPDFR#S=Xlas&S(6rJtWX4+i<$_ zObFTe6>F4w>>|MmGFB3g4bJ80PWceX6GOAOcr00&kJ0KFqhVy-)3jv?dmSrgP=NbYP``PA5NY8{77=L4$9ad?_ugAY)6~DK%@jTCIUeikxAqn$Bbl6Ea*U z^Nh1NR_CV+jxtkwEA!QRw-Y>HZD(<;qWRH$x!Ub5yEt!!gCJJr%Y4K2Wnsg0+7Sxe zRJIQyd6M_$JIfqLB5U;C=jVdRE$=LR6mSJ`fC6To;n0Yd6ZByr@T%Gy7KGd+AtvL zyFLyAHuSqjIj}HA_aLh%U`+n@o)J0{+&)BJ7xlH|O}A~5ya1LjBq0{0MS#l%&oM!w zWUJiS$_UmtNz_1`!e$-*8b_6U^mEF(1a#fiKu+HSH-i#?dPDP;&`2{wQHQhUc`_-LWXZ>NoSo@&m!qR zN_eClz1iNrELG!C3WV63uzb|ykP#vQ}ZfZU;EqQbg;~SADNr)}(sUP@Y3ZGF@r5XhPsW>fy&v9)zwZ^Uv{(;~XWPKjoc z8R6(FPPfp`Z-kKQz78_4UoP||bfPQFAw89Q-?mP(kWBJ0y<=K$H}a6%&aFUKyV=H9 z(|O1*5$rid5m?ow7SMOLN)c`{!JCCrO*{RX+$L@3fiPhSRZDAsw4a;24b`%&NYhwt z0d}Xx@<^br3ggPpy4qO9ltHapP6m6L0Ui2$`w!nS=<{vd^!rSUwlaa$8$gfJ5Gg4c zVKw6$Y~eI`MB`$;E6Z0#IckIZk4Lk1d#3Od7n^+PF;`z3esO;k-bBSZ-=Wo-p^r3X zYwgPCpo0BZy#rDEOg=0eUlERN1a{f9=&e21Oh#P`412CX9b|2Da$LF3_Y_#LumzR2 z40zEt5(Ei03HfPIw&4N0t{|xdN;YiKO3zemLf-ZY+rfd2kNE;yg79|=Hzzw^$60P* zJAUEu4=NySh+i5y}0uM+x(&b6)j?ier$UcCma!R0DihtaAT7c{%R8zEgDI$ z()JYyn%m+f`m4IO|Ack;mN;}(WPj4GdLMnPz6XWvrZbf%wn;<3yAb%o?%MieCAWjZ z6$MkA`5GNm%c7K^LD0Zv~38(XBTd1QY2g}GqX{W$+RXiE3iU`S@OP7ZnB_P0+CW^ zsbq?*XG`E{wsY0cSORe~L!zPNjjl=~9j#|8q}(ZlA6;U{i)n;C(iPZi=Iu zBK6ISgefm1k(2#oC`B(86rwx6jSn01&j=} zFSxgpu)|lnj5zFFkhXuf9=A15vYC-`y)B&CDJ8B&2Tj{J!M33`%|)5{@;f22NmxR8 zk-h+S1@kl45Hpw<^{|9+YB{!wex@OD))sIf?Yj|0rI=W?K0q1{C#vk-t*pF)&EsgTa&r#uoxB~E!HviZQ2Mq*2N7>Fhu zTS;O2ewenSftZNr@BmH9gF#dZKcj_#4rZ-|PCdIe?a+1q0`Xw6<7T@$sghBCTzdV!P z=}-uc{Lxf!i=F8tZaiWK@L+*Fnil8HbXQ6kJdonaD0YT2#lvW6s@(&~Ak6&#+E zBoV!%?$H_#i{;uC2SGbpaH6TYU3_KGD*z-p#te9l#OGl28q<-KrxMYjh9esA3IaYXp&u3ChVFQoRch`X!sB)w>RG0m1oRd5v} z+q$dpPUL5LLtk1FvG)h;ygOj$S%BaMyY7ycC=jUeBLZv|TUWmEBH?Xic$Cg$J5l6h z0<%8h_28p<$?Le6^`GXd-6Q6;k8CqE1+_KGF6C`|Q9S1ohQOo7C2aU2^ZyKb@zp*e zlHx(T?!)MCfRTXBZj6R7roL)DJQPXsCVNFEM8wi7`Yo{=ii*MU9RFAy$C-hpQ8DQkX4QjyPMH6DD`o1dF$YJ-m6D`)nz&`&O4g;Re*S7hPeo61O&4>iam>lXIb-~+T+w)8?JQ&&D` zcV% z9#(r#rmM){Uu>Ax+;O`cqE2gXee`vsPJ`I9q0XOxp;mGVeA-*VA>BCnB#+Qh9%Xl+ z(kMU1b+F1GD-Wt!AKg5X-Go{_ootcMdIi-U;aK1@HjF^mDmC2X0g3qTrPu3J73o7w zGuHDOvf%rFcXQTVA@h^HH{wPv@n6?oE3D4;J&IV@H&d)rIZTG2`X9G-E>e^KV7D2K zZ)IX97hRq?bGe4D)w2KW->2hYCtqttzNR9wb~i^yb@?2-{HiXQzj*qf zF4x%QS9JMYo$*Q%(&u%PpAQ?*?8j*qoKhU7T+3+`>v;JS_TcL(w{BB%Nadj4!~AMz z!8P2jo6!||gZ@dvJ(l)r;$$fnKJECV$I_Lg*kPiDcv4c_>X%(Ef736edcM&w<*f4w zQO4)`=!O~*IPi$<63X}6T8C7MT(QQS8ENsi_y*JBMvfD@-w!NHiw_V&z@uwTIw__b zqOa6m5d8sLnoG-JZ_>25lPn3DOkbz!o7^>`o!113ck(gFH|27-E=MPf#~fEfJl_ z-e#1hz zg<*l>OP}7s6|AtHF?G+T!RZ;!I(|DJTANOT>Qv$0iEI}SipM6P1QA$T93NaB9bDc+ zoNAIV;}! z;G0?XLX-1)OAO+mesL4srQfs8PY&o;dpB%K9@ZuO&o?FaaamxM?*LX@JwAaCWzMg6 zb+~>!6rIa@>o%p2O;YADmXCb$@n6e|gGsi*_d_7&QoQ-!c4Wnw#qm4dC9u!2eyW~r z?7yaU>d!6Ht7S9A8=s*5rb>059}t|f-;5X3*{M1^sdKmL{N!+*gG<-hr8>K)^S@MQ z{cxT6rR(fgo!!)V@7!Cn;`C~rF8zAP!hY>h`8|~XsLG#REzfypi_7m*`F)iCvdVX> z<b)j4ap&atKI98sMk)R}wJ;=Vn)be*HBbCf#2r#hoU z-w})+(fdbu{|9;>553n8=y3Mo4oU-BBH{=qeb*+w)$(e)fGGbmQ@5lo5b#AIfd9}I zBpP(Q=4sh2k^pehUB*3Y(`-|6dxeqq`}O_|raBOc95ANp1NV!+JvRhg2}%Yq0Hog6 z1_KE1`#^hG@6TEEz7L=sxAA^#=zVcJeTT$2F@C^!q1`^u z$6NgR7Qu0Mfa7k1V}G0<`^%e$2tnH6%{I>6L*tYJStXErRc|2E6q6op7xLo5xVvx3 z)cYxWv!E8>9)jFfvstNh~e^d-P& z^nPT~c>0|2dA+ZQz<`fFr{AylRSrm8z`=v}ISt(_dOBR8r+WiE9d=nJ1>q11!k5%i zXV4Rxk-v@ zgjyd|t;&?gULZs84&{%l{Bg?PyQusyy@m39wdk8FKT++Kjk?ce-`D#Jy@&dJF8hs7 zEF9G^y@m39uDe6!hsHBZZ=rmj-~MV*`C)ns<@@~h4V7O!o<4tlPwy-A80z==>xO%H zf7&8=^!e*udOw8am{`#v$<>GRjO^}fPi zVZ43*`kvlb_$$2c^Vj$FzRF**Hn=l+{jl@Y1LCiv75;i4@Yhk}uRbImS4$Ozk2y;G z)gSp=H8W%b$V2CR=cw_=t$leMYW2teyQ(!=ttI|gRK5?752<_w9-+NHJU*iL6?laA zeR$lh_XBunlO5(qY}AqBn|k%nQGaf;Lt2~oD47iJnCLaafx}V{9(8aX6X=fX8tdm+ z0NZf`Tff5}_)}Va{$PM607{$XINCwqVp)jVhg7qo`7T85H}$?U6pPQ=V2V5M=G6s* zk=Q8lq>N|pjEg!-#P{x#ky%do8sV*_J(^;Vl2g#8)Pw&d3sw$l-M#9+xk`ex3tP^Jhfl5JiA(P_gko_uQt-bKOCp%Vl=?c3ryb=ht=VZkxC1(%m*U>(U)UZ`GwcgnmtzWH8`u zxJj3*?Q*9sozm#b?S6OrB=ob-uGgmfCS~u=X<|3^NZ75|o{Ew_W-8^rNg28W<(W4_ ziqm@zQghva;#6}E&@54>FV_D~byigCtRAu`!#8%Tcd}TIseENJtdaj&wLnApJt{9X zYUiI(e!ANJvz98qPvxaXeMsdihD9wuXQ}divHv-huUN#j{5eaNKdAO)y?#gKD{5FR zpI)kbU-<3($in`gu~hlKF#M3pKd(Ao_6ez2=m9=`G5B?r9~$quOO@|Sl^Z@v`3=?f zQSFz;aVZ*m%Zfiyt!}l}vzPixDJ)5e`J!r6WK7Lt47rw#l3t6?2kI3!-J{>X{pOpp z;)dIB3&HTa@B(kgJ9L-m8=Ak{=)AaFG(T_GYtwn7d3w04_!oWY{6XIk+{Imt2sL-I zYAOx{dcF97#9XC!h&ef_tZy<_KKVZ0AFfFBzO;u9rjuKg8K3=Z>HQH->?z|`UhXaI zeV~Kylq%E7ewvx>p03M%NXDw*vul=E_}NTyxjv4|mHsJf zh1!-9p%m3R;eh8S(bmJ!b-i`@x+NBTNO3kNWh%Ft^#m&#M_zMlT;%zPulcCclGc1Q z$xmn7j;1BslbWO?6I<{Rx;{?K)A~!8oO!{Ph-#N2j_qquj?uFwl7IDZgQYn}Fo<~) zqcd-6-AA*s?&EA92?@8PGGP<0r2MkH!9dwtKyi-lGSo!nf)mM!@DDRyvrbzy%G!@9 z%8#U4xx0)M8D+B%)_$xeV};(Uo(>Tt1CKNenyL9dODrp4g0a;7({Phsii32DvONf4 zwwLAWSpHE5pg+BHuYxdO^ekKc(Y!eC_SY(~iG#?UvMuwv?2k^KQ2Q3JQTYWLn=&*QX9F4@teYMD4A!UJhQe$=A5*Tc^xIpak zGD7T(V=Am3F^1&xCWaKSZ1h)<$P0tU!wM423@CdR?gI_%3AEk98w59Lo`)v8>Xr^nnz6cFnqJo|o+aj{8hy7(Fb&Yl` zKV7IAGgXm3N)iTC?Gab(Gp^Xv=ZgK#6{=y~X-b~W7U?w^N0GsQB8YKl&E#oId6ROM zg%;=4;vs8so)+K5w>HH0dW$|A#21gT`=n98yWP1vog7jIJ-!9@{a#ia#1B|pV3D(B zR_wRqTx!hCZQf?L2|EOD=Jm~G%(Bvn?;*UK7Pr~oyW2C_@2C)P`hi}4{XsIIzc}dP zVO^$cqubh>qA!Q*+qr%yT<_=lpThNBTz`k_;&E&Hu>JiaQQ;n0#f)r6$KttRWGst; z2uL z_}|2p5(xooiU?Y8$P{*%jf`(bcMv0x$<6Y;n8(2!6-vzUScj{i6g2VS4n1eA2PQ0V z2XRPJDFS(bZ=kkYD3K@D<p&f!Bh?LkPS!2!You1a1Lv zthO`+z-xm5IJ&mAa8%XYk|F@kVXXF)0C+9$2f%9<0JrdWErh>oc|X$KUoLToN){DQ ztPMsFH4yV=sO`yMKpub#-0750E2nZw2+AJRU>N|bC z2{D&uaa%l7Jj@gDZ7o^K*Br6UJz*BF($JRiMchMXm5@5a^XK6nl?>z zr56?F%#M$xqocLa(fa6Uqmht_m`7|EJ>Jm&Mn;MKA^WR99 z={jkzBAe&2C!Tow?nj>>la90mRNL3zv2!ndY9AyHaedf%GhNt|lixF_7jGYbUL$Jp zzjixDZ96?Z{q(W1+BpBKPfbnmzbqS>oLshS+3BOFowj`W>CMxXmPZ0ACxntDe_Fd( z!`+_WvAd~uvFC%jDqajDE^imlRms1xcgxzv8M=GO?k2UKkb2*?yNvS-)##povD*o+ zxaX6J_^@?u{_U#SVm**P`Jr99BZ4%cZ`@^f z&318~-hI^WM%u;Sfy(0O=k1mSkat@rf!|O&IAv~wCW<|e+AUt0k60Jp z^bWnnEwf%PziM~5WyVzVUv(#n^93~$O11VGk!ZO%N;oOK#*`{f#RH?~3@wm?sAmz- zB!HL8q{h9TQs}bJu5N!e;iySp(Z=cmoI*+QxkI>*o`e`2`Rm3bh5gbL-7nJr>h(yJ z$!e?-{?}~AqoZSEV^P%Uv|7-vuQpJU1(QC~RzFRK4O;4a_zQRI!yV8fz(2B=$tmrC z73jE_|Ikj|1*9B#)ZT%VQQrOg=j;xoh$_DOcW+clkfM6u`yweIsTQT`3!nd#eF2o{ z?(h8_DAD)6@eglNNid>!pZ;Sj2}0E7L-La;x6R%?`j>jQ03i&{R%L!Y7DuUxPu_Y1 z#ZJUTc?<*2j<+Lo+%-m;qcD-d|Mwm#huWr<-@m;aDl0OI?>uC~VW@idIqw!jZE<(M z1B{`HYX0$$trdnk&fT9m_!z269_r(*>)Fo{ z3K6`b+@$ktkwsd3?F;lh={!p-jnm@S1%afqMr4r|-}-Bynsm+0*IDWP z`0Mw9&!lr^yZF!t?K>;H<$v`S&Je!R;&yKV;iQAg5a#Qba{S$*Ba#K)y@b~Qw2K~&3tQ5?t zQpazzJ7dnY_=mTN3dEf1$A{lylf7kAyShe!5?^;PHR-@eG zcTns{_FLbfR`uaaU$xd@%-x^0uR^J6`QaUQ7q^Q?KBw}cQbeQV8-kKkbQ_{ledmwf ztCFWidrz-apJG`tBuBjS^T{Z7dhK(6V%>VD+iy4?*ZG|ZzNGULj-CC=VM{vC^}F|c z!|v8Pq!0SLX}>cOnRMUDJ9t5@-0a*5FX-+Al8lLT2z}3{r^F|#)F+Qbla)%o z)7yj}RMIAhK0n;!{0cv)`^-Vb=;hYkyPL3p%D z%C<`Q7ylx>pmsj8&uYR8e{oD$S;z|;&xTYXG!}JdIzdCD<3zd9CRo<6VoBv(2b-Q+he{|eE%p(d~z)N zF+@rR$Cik*C)$H2zB;;W1`C*MVibqO=GHuFqoTZN$BkKW=R0oXOfJ65n!ut~aLDH> z5C6s4#`f23dP2V_K6wn%IpGxi^jO_%^%;KcsWZG#RjLIYW7?#6w?uI&rq%Sm|I;vE zstHWG%O8ImRHmB1q?Ud6QF{l|N?SShVY`EA)#ekXvUQAUSrRm7^0}@0iTgm(TefR)E_4mfyj^ zx_iLyn9OuHf6&HY49vS*tW6kLZGPOL3j^!x5B(X;X4V3mBAM51Y%D)|oAFJWT6`#Xs-GXV7N ze!pW<)7`v8pQ-;+>pm^e-xH?rCmJ5tZceD^kma7^Vai@su3l3&{E?BzJ?>v3g=WjlPPUR>Yq z;nvm)3x${3kTw|y7SX7)h$uuf4vzVpa;C3%#-q#n`7AB>8Xb^8XnHT6-Yz)q-DFBdH37w*<-s=P507^MTV>dc&R!3 zBb;De;t*KeUB1GKN_a{e1u)o}FA7p%shPbuhBBe+X~CX%+5)3CTfAX=FV#ER+qnx7wFFj0Fa1v&VG5HeGQe?lgY0YYBZOTeF+Oz4ln(adfSA z0J`Z~RzjsW5-_N39lI$Xxv`h3c)E3x;#>j`o^#0qhL>zT?WVl0h8on}`p@x=l)ko? z-bh=jZr{lpDp0e=d0Ep-YU?Gn?PY2&^{L^SGz1LoC7T%!YNo20=%v1UZI72$^DFU< zy}G{4AQ+(+sn)OS%LG!WWy3n{CfcAO8fY@~8+)Uw&00p)ZVvigd%k=WOE_|GNu0~ZQ=W~2i^){tKZjJE(+xm^0(iPlVC-|QAmakZ8)5(yl z)bAw{mQWlu=>4XMolC`B;EfxF9gW1_7Q3>D#Aa#%BKTnCS4S`_fP(jyrAQ)hXl}}` zrnp*>Ak2A+kiOj|u;M}xI@<=(H(aVjAtXY$vR$s=sgrF%BuA>P)z5LGs@fgBnij8V zV}TxnhxGE=wZNgB2^5@8Q~VR=N%pzb?O%`!EK081N-YG>O7kQiXJ%1V{-D9?y4j1^ zMPsEs&e_fdVq*#e4dT1>vQ<4_3l&>g_RrGrlRJp7MC}huB$-cZTaYztqGZe9pNgZ2 zX_0Tirb6o@o#?&r$okswF8IiR*!R02E#3D1?u~{$?rUKK4DJzna+}#%sLlT4-k6=k z#Ri7#@R*Psz=A>snvay{5P5XhgMIx_JHXR;@YtKh?7~q?1N|U@(N(q~36!EYE0jLW zQAINdN}X$<5UqUf{R8bKn6j{q>xJMwc$8b=cg%mKOfZ7;2qE&bjxa1g8`}(s`Dunk z1i;RTc@u0O(Pjj;o5ynGKRa3FWL{+X^70&~W!kh^CpfVxFzn)CXkv>Co1JO|^Xz0{ zjx9rLA$p^eij?O-BvVIKNfHx|$aC#>tXQ<-@c?h4EX4Oq_=O@TI*&c zOVWWZ3wks3<)J=#KdU2dFk*vl!oX2rH1Nju9`h9Q8teBUVGTSCoBdBTO>46+jnlR; zSu`}T=cg|x%uIHUlkHd+>LM!r94ADPqg1&qpjXJ|r`*q^92CI}cmZA!1UrzB2?nBb z0JoR0L7DqtDx@Ud5$m&Muz=kM|7yiybR_Iasb_ov^>D7|64YbLAk?Fsp!?J_S=jN% zAl9-8NJz~mxA$0s0VpR>L3Kb>*wQ^;^pz{WOez zw6vGcu+yU#8loR7Vmq`-M;YiO#$3~43qN{^Iif;5Y3ND|Nhq?l&kl8noFzJ09dIk% z;zPCp+lfyk3G@;h@q#0<$T09B+FOL*C_0}F-UvuzGi3^OErkdOZFw(Iw2VM?>a(`p zynGA`Jv7F4m4cK`k0xOtk)}u0s2!1;K;w{CLLQG4AHFT|W2czFjl{g1n8>uyXlX^- z*^?5-|DI?8W5dFQFZY&B$0l3|7)rYMfl&(>8n)GrGh$_-#`ZoQVsZl^>x28^#mM7n zh!@`{!jFmeGnpzRQcAm_PY-p>&!W-6$KjZ0 z7cm?dRf-r@M#T8;&?gizC0QUmL>BOyW_8G7jgtlY<&|V1#X&-5xLm%loQP-3EP>eh z*f(u-Q<=Va#$OZU+t!?Bm*7E?Mu3W&z1-bd=u9GI`_ZOd{ff#pSd9{nwnS-5VptuO-VOp-IR3hQO>21(xhqVD3K#*qNspU043F=N~1Ljgi_O`)tho$ zs?BLoDY0L;f=xB~`(Dc61!Dqzi0QNa8#0I7u^7CBH9(+occXMm0uZDPMCQ$)^8xZ) z(MHt6aj?DkK6a=!h1nb-Fc@*|n#vTchbv5BCTqt9py^}zvt9(c2(Gt;&2`=cf zPn?4QWCtiwKru(A2u-;)Ww$+U%T!mUzqwrR0)&dVoA6)k%44zr&fQv^SY5)tb)K{? z;1@#`{cG+KjkLaHmfoR?XZzyp4^ZVd#PeHv4)Hu@*Qnw`y)O>UM>Cn9fM*YjXe%iJ zbMaP&OiJ;VBHgwsPgw1|Lss0{^vISQ^7<>ZixCYV%!|=&tMokvuEd73@H|0^z=g32 zEis+e9Kg+N`^|~NAH_IqSLdUv{#8lA8z=E;mYC6t*(xfoMVdn6Vja5+H)1Hu+B|a` zXCcSMvq-z-=lpFth_|?xU2V~7q5^TWMX5TG3=MUPgdE%cKq_lg9DLxN8C!G-=1QB2 zin*^09-y~H#i7arrnsos_28g{+-^e;fctP8V%Flwpo-+6NuBQ`?w~siv1kuGZJb&} z?AFkZs-Vz-*GF&J< zpwNc+B|YUDtKZrF7|1fot&y%+G8qT$5S4WgRF^tg}y4|urA%umeAHQ z48dE|wiQ93@CN-0apc3mY7C>K35}8kw5}ad5+!F1KZs-03`6u9x?=#f%`a5+u#x8f zTY4MuK{?O^e4N_WN9?bsJvNJE>(0lYc;VKab93|*@?C}GZ)C5~uscABaq|HHh&<+# zCIRe&kpF2BeaI!TREn7(y94>t;wcc$Mpm{TOyibhBKGqrelRnrd|8m0qp%KpW@?|w z#BH*kWl&18fi73P0g6NmTi@u17x)e&Q-%?>TasfVK(@rAWjH!a;JN@Xk$ zy332;4{Xt6)bkD7%)cg`GVBy;nk0~61bV%_*8trD0!5%>GDGVwfuVAPo$_XhD+rug zp1{WaF={BNH9w&5JlbuV>yO@vr=_rHc7%SJeB7oP1?>>PbtRF3U_=~kh{(levMA~= zLNSrhlzT*(m>ni8M0Dww7H!oEn1sASd3AQaNV%CjGA9bZ@QIaQ$!DeB_vx?ZpFf>W zfln($)+&YWkW1nVvCEs%A>m>{94wCqXoG=F;@)Y@jdzb%RMrmDQnXDh^!T8d*Mvod zg9T`frY^+?urnwoV726Vs3{A5q}8DPNUIU$)Jepnn?a9To2;gj&g&c^0zh1G=o^TF)za>S7{B94e z73`zS-#Kz72ZPbPKCk_C^MoDO6OEk_ihI75vfnPqILZ`OI}hhv@x0B^be@=7kL@{fQKSWp z^y0$7i}OVBnwx~G%mG6%iSdM+jmToDfc#+sx>zD?stZZutL%z!Amaoa`x|8mo8F>s z)>diHH6~1nCSvz;9~DAPZI$&9%?y-h*LoBr@*ldsA-+fgAkk#`61u{tww#|_$^)ig zMfupbjky?4Qg*FMzp`KYRbKk#=O}hb*=Cz zRF`XX^d~^pZr1LoZO=mBpvoNUlH+VnQtC#dRR@Jp}b)TuPUb zK8>)j;|0)PK9;!6UTO--7W=1T6@WGUfH;&hk7*0oC4i*y^H%oS?`tOGtXifssldSs z*bu$&1+m2?E@0Y&@6JOi@v{+`i6FR8(ntDwXyn5*vOby$#t7+3yp(yM5QrEOi46eM zKpxnJ%V{#5)fw*>7UtH~gsxhfos@=V)niL_yZOIQGRtrbG1wWv?cwkWri$8o+ z)gJL`ovW*bC_!^zI#^5)QF(YzYTovgA2yrw{p z?^R=LPwWC7zXa+sWyW>T40G>fq=_#auz`b_!gCzZkZutbV~30??kwx09# z5-a!-EQgYpGDA^%vBvmgZtL1icCK{Gb>=D*@vP}#2x=IS^fz!uAA)@db@0xuX{-%7 z7eT3%Cd8qiHxMfh$SuBe+b$3Ggo<5f{PR{a=Hm`pBg$to&?yX)O<@uQ$b@bhCeh}x zoVhVfib4T^*c+rOqHkF_J(d#(w}GhGII*YojlwO67uGF&<5QU0fH?GHhd3bn109&j zO=BeP^jQ8mN?UE6uNGy>AmFgLY)^yrzyM`u++(NVy#T<)7aXCpWt+_9_^sKWSlYK@ z(3a06ei_Rr?R>0exeXyyC$|f7Ng9BDWAM2z4D9M*Ni$NM>K0 z8u@}c_^cckuy(bl%oF3-%Bza{FG-lz0`YSsPoS%uE0+))e!FE8o(VZua=1Xp?wF#E z1A^1@t_BCS*TD*$K5>zyAmS)vGuTbQX3$-W0gakyK$d7I@-EQn1%0SKQ4 zWx860WpDHn9UfyxHl)5R7Ri9bAQ2AVrVI_`dZN8S45FEoX(Xlstw^Ca(m~GFw@bqz z#Y|e7Q4jR3D%ay+un%)~i|EPhAuXfqhaFI=tn0C|J#dmaPRNQ>7nd5fA!%sTOJmG% z8#%^s!n5YuI@YpoJauZC+^gLv#6lBx7?lmWRSufHlwcXP9fk~nXaH44h_v!wCFIISdvbkA_#DfBb#+DRwU;lEP>Os$`Tm1eU@;rLH3fo z9_t=dEp_!r>Sb2tt|WLH19daotq4Tl;|wvTEgdB$>K~ENuod52ZoxC^q808jF{@*z zMx=dc!d%A<@271lRG=T=TTZ`Bb7bhB$2|`mL5LXGeThk!3kh}=gUvp+WD#4ot%xxa zQ&Nud!WHs|Iwq$1a;lji(Q#V&Oix1lf|+a{v7r|@c4XWtnqtr7n<7^^WwH=;b;SHy zf_t@!Gf|$Oyw-{zC)TAlB%%hg!jtaMJ!zHe?Z60JrHk75!&k`^B;L}k=~_U zeE{;v{vrmSu({@Z%pOySYUrtQWULUt^oqgL7wLb5kqJsieZ>Cx$xiihsyIU! z=&aft-K_iLR%a(W>whAAt(Ro?#C$qg^jRTzS0br#s%kg`CFs<$8iG14A6W^4c(QwL8)fjr^pPeF%#5rGZYjom!~jQM0ls+r(sVGNh> zNOs{t^QfW3;N5n9V zG|xCQ4h=`BM_lS_qC?Fkr7DwsCT{;s*=_5qif9%ZCJ?%UPAw;j+}dU5tsj~v5}&Es z=QD&w-_isAKNpLixkPo4a-}ZE*?aL-xA*>P(!M^u4wvnoZW*eQ5VPSUSQ13&hL~(( zBVyZL!Bl6ADL^m?9H2n0O|IIMBH7kQzbN&GL0{Pwb0gjs@wCeFNh*TiIbYPOL? zQ4B&()}CUk+|P6Gmpn$8v5%h`#`A$SQ%W6Vv zW?l0!iE`=0K8u-0Q7EhQIgB{jB0v<6rOYqkdgn;EmOs}1h5g1TcVMb~=Wm|jzjEeZ z2rkFFaiWE!qbuxx?Df#^Y{Vqf8Z=dmq>D#EX*0Gh5mq z^kYmQpQ2GVz)6+3p!WlE)0FjMbUx-3x~pkqkJYVyd$&DV;@i6~KJ{zcL$hXHaO{Nd#gC|1E0ktG@vy)a(fvuoZ!y%eB`&;=@lns#hZ=#fF8N4(G` z6`CV)-%hbDDW=$IPbyR>jY;(Wm^DT@$*aF&Plz60ooVINXlY z+e5L*>elAyHpo)A+RbmVyEfA~&)8f67c})Cu8{`#z9bXU?9;J}aPANrFd47lIpSA; zWvqF7%~Kr*oQdPT)&V%Y#`n!@jA=qyY4+C=5C>pwv?a3|`YyQ&Uz=~vKznc@Ad3U` zWC?cOr%_?c-lJd8KCj_v&|_HFbzrrlKCT+xdzpIATwjgi6G! z-DB47T4ozgbI`TTZh!`cfH&D*N^Kvus*lk2eB61ysAOz&^r(u9joI2*g`D1wTL**$ zds6Iq{2iGD^-q-#_Js#OsRt`JM~@Ghd2G3#NO3DoRS0_W2q@bshv z&QKSKNEr%2M1mP7Y}hor z0jT2s?UJ2OCT?&F`lM@zPzX7g>%}~kn6Q;D+;G6|f>DOW$!>eHggDtb;xlsj7rS~w zCp@A!{Qum&4U}ESRquH|?!EnSyQQNK%d%Rs&%KcoDRxJ)En7x*h>nP4``IRbAitG2 zFK-cMSVNYSwI*^rA#av>?qJ0hppXtIq?0J50R=UQ3{4P82LyD8LOLLL4H2P}h@=4l zf(X(K3N%59=l9=L=iGCSNcgUAy+)wQEswOhs6iwD(8z%oiu3b-(_CPG%*{fs?rog6XHpN1Ht;nvn7(gf4PeiCir`d(B#p8IFyTr^n z|1#nb;z@PwGBXI~#RcS_v=!?xhrofe%d0I~t*kDm^c*btqM-C#?#i_uhmtEOJ;v#; zh1322hpKMmRyYVklWkZGZn#@i_5bOtFeS6Z06I)a$I@RESGfsE%6D``9UY;ggV##v zR~~?~<#eFk6q7QK4lNv>;+!8?(K#03dU%q;-j31RTO{{N zfku|<$*Oz$H}PSd+eYC)Zi%0>7hoUnA)#-x&@+j{GIDmB9BU!?dh93t^h=b5dc{mZ&A#ctkQsUk>mRB2DY zw(B^DQX_Z|t;{a!>&&9Q`o2vIs%u#suB_)taaeK|m>7IDl9(6S3h7q-`+@+zBirV1 z!ucSyVI@sl4=*q>rZ`>MkRjfsb$E`FSm~lm%N)9?RF2Z}2G#BGNWA!2wadN3meN7) z9a^p~&k>uD5??Zd&A;UiT5831+p$W_V6s8S)=tw|JwkNT`mJmU{&g9ln$rv!C!0?9 zC4Y>pdZ7PhqTa7segC>_pEQZwy$wR$5nNDN>N@Z1l0_&DIgEWz@V9C9X-Ocm58*bN zd`|W4ro!y>dydGDc=@g&{v}WC3cjcRdSW|~cQc%lj>w)m2hoo5oH>8zc2(34IPo1$0g%C}!eUvX^7zG30Ft$JrmAZ6S|~T%}|S zdCJ8Nts)D{b)l8(g3Gm4C>HH1muPt@6d$sE1g?%XC11qFp#%PwIA1nuDGR82PWEj1 zzIa|SEap09IfpoUbyl=>%Z`BiD9OTX$JK|6CtFGoJlCYlCAO+Ab0wU0c`8|A+RhpTT-KI%SzVY)w#PH7^MZYyq0T>1 z7q|$ks-Ui%Mf>F4YYrX}X>N>chm5a-W^18T^M|DZ+ zR2R5R$hvT7#S&c@w@Ns()q@)3eqN+|OEJ*2F(eYjQRkSn78mWw>v z-$;{P!GA`ZGvspbl9fEtD#_i!9#e@i;kWLkjAiE1C4=&4D>GLxDYGP2vqIZO(`v+p z<;RoHn{+d>+Nh;uCmgnv(j3oc@gW7=AGNPeni*Q1OEbq-ca&yKfw&WvZ^yx~ovy~F zu38RREn}1iIwVXl)Oe{ZO#akhn_L9j#8R-0uck}z_k3Y`r2huG%nEs4O|$yXs@V%A z;AacqFLnmr#Qg10j0gS#z|X0cxz+m!6Uf8e%xry>EXI{0`sR4kyz7`r!Z3J=XJ#co z&K3!>CE2c1qI*H;y-p)y%e)ENwG4a3b!40L*=EANHEQrPcxWA6Q?xk3qi=C!jiC~0 zSKe1NCL5i8Ja2crLPDS%&*|P4ug5sGHtcYJ3x&xhp%5aOouF`!!Qk9?*JXKUjy{09^wC^_G?MUc+AGE4hQYX z_4|@98nh>_?+C3S>^oU|REb9Hde$D*GGw)kQyvC^YQp3$^h8lH24{awU?qx!iizA) zi~2LURDUMa9*93(4g zoCkD+ab#`B!7zq5DHdb3bD`NNWgaWZzq55`hjbQ@5sEGaviyBt!|F*S4ZYr>6cV~=!VvK z%oQCuctgkWFhpxS4r?ugq=s%_@15st#sPUlK0D}|Rb1}Kh^2Js$gpaIeU0hY!F)oG zP|~>l8u^^$F`FoXJ4S1cd`@)+*V%ml_q1vO+zG&)R5im^&7=dDC5jF#0(W{TaHmuo zsGPN5gYCQ?0r!IaTGB-xv$+zuvjw=9I)iKWj!y#aWz_<>bAUUqYNo83c?Yg^TP>P> zO&l{1qwwjayGcl>L;eLnle?mW{)Iz1h=$S&hu3L|?{>DN?&M3Mg1|L;ywHx8d}bXd zIAoCtWBy?`ur+IBoyDXPKu372bD)yPqw8>ai0tAMm~kk-25>m3C0zVFyR zCxD!D^72ReIb)%oXFd1z0CwZtF-bo?4$JmOdC9UeCfr79Uh_#jfy&Icb5?htI_N z{|IvqpJ*gHpxmS5C3bWo`7@{d_b9-NJ97lSc)-I zM=Yhp2@3H&HPS^$hJx{k)=xlCm1~o~u58P07%i9meXeCQ+xDoJ+{_mE=H$UCAZ?GA zeR!sr*T>ABg&50kcF3?YifxP)>?WjS5vQTR-;`x%&V>-!y~pL!p=qwO4%)PH=SHQK zE$|9S7j@LYa*mJY11K-ZjvL38QSRgrl7nA<gbg+I#($}9SnRSF=XQl z2&RPIoKVJuurp%h!XUVvRKl1QoHPuQ49I8nncb-=eHO}KoqWbclP&^14BLNSv$aq~{fz>2+hTqfFaGk$Y5Xhn|Q=L{^yYDe&8HGM9XX`HL|?5mQ@ znbu|WU+a98Pv&Vtl$aVdr3a_2ohIJRRhWL)!Fx;mLXn(Ml{nl|_!xoiMWx%ev9b_uWH zTo$Jd%%nY$%fQ}h$YTe6r;T1rdQDs9&0b814^{(aDVmIqCq zX(dcp!RZZGMSoZTIMIWb)3YofkYZ03>oi<)oaImYk3TFnV-0BKYwBQ0vzMI@CB+#? z*Xh#p?oH(D6$LsRhstsOq~_bzeg`F`(Df|Vb%)H z`(7v_>_M$Fu!JEkLCgpK4%zDGGP&#&yrwm>IQYS^yywaro+H}HZ=tA(p>uSPCI@MA8q zzTKYPhCRb=Og}=fcNq9rT1Oku3|q3hu@J_H?tmkI;6pe2GAgT#rb^5E{6f6sSeO&s zm?jRdau_(oZ#Hb@9R?1q$}w zJGu&4C|#b+N>>(AZp&_$f_ie5PSB`{F{)=Xd{r4YDv#J$@*nK$h?}WJxS6z+63yhe znNW|w%~}0I9nw3C)iQf^xKT&4IV*9&niA6wSBcrgsr;G*z<^RGLsc0X4qxL#kskFihqyoRYISjGk$?UIeJck>8XJ zc^|#5} zM>$;mT9OyY+Z`5Lt7DS(+U4E}OX)E98db&iis^uPiTTq^tuI}kPDEw zg!wiF?~tW*AoL+AzSn})D-*iZ+IFm##3#F608?yd@yRmwm{rNkSsb8v9y!Zvz=gM5 zd($O>?5bUnb$BSN9FPIz@hm#MH*c>_n(#&C~uS<%%oT-71vVnRy-x4xXxuhR%7 z@*o6Pc2vZmRLh3CA9H&E(}3Pi(o>ewfutwZ)U}ZGl^KP`rvqI%M)AT-FeO6jP@`k= zel>XEB%N`s&N)zEV!co?X^5IJtmia##n=y{4V|g~<8F6GpQzo$#)4ME?018QS=7|y zZ1z*{H5W7NmM~^kEyb{j@Z8`JXYGRI3)tq$JKm1qzkPqxNC*B`?8k4k&;RQW%6C!s z59-L(%U?cl%PqQ;dL6Mubf5RFx7O`?>i(C$Y}4jGEFIj}Cl4MjxYW)t_hZTdoO|W9 zYoFc157&Sa-&FU1uIK0GdbWWdZv4EFpO@6LoA_zgvzPMIsAn(X2a`H1NA+Q+w`c&` z!cmgA8d@}8FfcFiWy6wR)-U;mn*f%m4Rv~nFPJfx_=0V6i7%LGmiU4-V~H=JndAu$TA(Ctu~VK^sCx1EXH9Z;6Ok3CAkJM`OU_kiVlX(XBv z5X*xT9NKc48B390U{%aZ<;41_A1$d?fDTzeLYlSkBL9nz8e+}S*S=;+q)>hB(g8VI zIeE#gX5{qEMINxYem6`ulRAeq^zcA~4?fITBJd9+Y#r@_cEFwuZxmiWwZwo6VwpW?i1eWg#mcpd z<3bm5n}K#y6nUNqh6HU(c~VG4ae4)t=MS8o4MqedLbkC&5TkXqi(kmz24kXC0qdc* zg$OCPDSE5B>%yMbT1m5wLm`+tHpi)=OOanjdWfo~!0Bu-2}H!O9l;YyL+*Nl>@2uz zOQlq~0%OY|T|@ZRRJ))RLX%Z2&@M0C`|K%Sff(B!3sxsc?FtT}z64s@E_2z3CSV#9 z`B!30oE)pMk!pc=`xO?hJ&jX{hKV?NoiMDX(-l@(5StNs{lB<8h~Sl+QOUp zo8xF+vJC&;L1)WljsB{H;jor$ zrbwM@uitpH#Lkxw-g4{x_wUSheb=jBv1RLRx8Jet&bzjM$K5;bdDXqI{LbA>2?j}* zCcuI;$o3ZX?OmMCoQ(RuSC)EqOC@BQ?$Fe#3CHy-Df|Y$=IZNl*NYKmE$I86h2>nS zJg>rL^#KAmlyMyj)WFko{9FE%3jap|gI*^frKa~iqJ$Ntgx4%pJtwzRs#|uXy!uMt zcQ4G|sTcHDMSHe8cnI0q_potei@T^ux)wLq=&8U7gtcDFJq*(cR%k0Zf-G)fgB4cm zi>iieSf>>Yl%7f5j!?I|`yNvNV1CNPBnkQ+R06SWiiH2Lum?FlQOK03Ti`yRQa*ud zD!gq}ZTBx#cv(@Hyc%DbLi+AgHOotl@8Q@f6R_yCBLu}TWSY`JE!bLbETYtRQIpss zd_STd?u~)m=r6gQB&3EaTIl;;ji=G>{++yYd1R}8qN%zTYQ6=8glx67fYLrR)a{}= zBUb4;W7{;$u>mC}h7G7_7u|3zvcS6{lJp#2oF&k!OzQMba%&BDL8jVB-gPoifFM7L zt6d*8m~_?Q4vUJJlt@a2N5O%J^|-xs_kGvGG3Z#UAkb`Bx>j`y*~bH@CO1Ntn?#%& z6?Er|#_Vo|9*2ff*K26>eq*_;(L6t-UV|`ZS{5nx6r9#_7`#Q1hp0l*j;KhQH=Usz zPzzhcUh>!?+b6X!dXK#&C03&%4;5&8%l){eBC zeqG*$?29pK&(6^F?Ypz!!+PQ_FNZeGI%Q{^Qko?midtt5iXc6Oc5`z$N5&DrHIgs3 zZn#_Lx;xRv^TVx}h9$`Tl=d@XaR(rSMIA^Aa)jjGPRQ|1^g6y=LJnlJsP56;TVsR}E<@8rZ5~~ z>Fxx=!Qidgpv2>Xy|7}I$c~V40;h6k^td71@R)YnoY_{-KJd%WBm@V%8F{M3SQY}u zg-&Yz`GIq`Yc$dVxP##pp+h+G(8((ldMJZ;M;mwx$HlRlTwD?3BsP#=*Gv!-C*!De zAU8x^z7;yIO-d)Xwj4!wNqcl8-{4ci{^}M6Tw%&whf&DE3CduSYdOo zv>HGj=9Jo_YqJNZ9Mz9Hcwwu&Vqqm@XqT2(L@4GfHei6ma> z8eQXxxR>jAK^h%nx!34bGOj=x3<8el>|U7z0cJJ8^sR3%XHm@$M`&@BB91ww{!FF9_EPMGw6&&nd z9a@vyq4c^#*ql9bZB06Cfl3eYS8Gz)1|jW#7&U4Cvgo#jeu#@P)VWiRkmT%@&N9AY zjioVRQ%*(zu2?TyG&JwA5Sk&Ui_fZ*(`PP7e3KNX6|-$FT+855sL*!FxPozDVQrZi zdRmIJ&+X@sw!)m50W)cfnMjW(L@;8iu>@lHGR0yHYBaNfZQU&x zLWXUDgaVHPn|H|NNQ;8A2eRpIZ52WKi}n@ISCbq=`WsKN1?a-PjLP9ULd>Zv1b%GYH1eWG~DVX)pa&?bWj>H2`v zP7WM{kI7oD5Z}xGU$is7VqiDhJM732McJ+#HeM9ysG|^{YV!x)eHZy7T>@Y|p=MDO z<)B=9s{ch-wA}6A?PQaR{W~+db5m2k`e-X_v4OnDmLMMzU*_lTY?V}`9#Iq5z|u?X z9$H65^*yrMaX^j-W&?S>oM7b9ojHS@=0rd11sgh$mTnklZOV&ii?-3ngygz8%}Y0% z<(EXZUVgMU#gE<9z_nXe6+1(vifk#-bhWKw?G-p9mCny(J^M7q^7#VJKKk) z*Ftl>P65GzQk2XY2=NN$g#-fgO5;XD-OR=i>aHFev|D*&WFO&&4U~tdEX}Q?GDION zF|6*AH|%^twryWN)b|!oT+f^*%BCeRG*N8?Kf2!a34y3zeTpM?)G}EGIcu#O(ZMa> z>d)e%D|=95B|}qSR9EZODOArf(UlEV#0X86(b0Xt({VTkkz&?BJv%L|>71@J(^@7h zltM&GUJfJDDpNk(Z}J}`oL0HdlU7cuq|N~jMXnXgR7EaynVKN@R#(#|s^}H*$@1C2 z&7oW4xQwxKyn8uMj+hdjoDpC}c1jKC+QR4nCx`QnY;_&kMqOLAmFznL+P%g}>?#c? zL5(z^3}I z#6-5xDMWhbD4A+Nrx3^!dlSMaa0+<@Ioh<{FbxH7ayhojx@{&|D%MP}E0lzCF6nkS ziXArm&6Abn{QCNXY&&v+OqealyDVG7UM&0C&%JYq%}*&A{W=B&j~xP2L6cfb*Y`Nx z0@`H1Ig1{s*SCA`j0%iIo)V3Kjmi+(!8pF(r=`(jphhkf4b42J-tPKn7)6c)7w#ya zUmTiB)R)-qFk*d(GxB45D{WalSNb&6ZCz5nZni($hR}U$8DaqriQTC!K)KBJC7& z>#xXFc-4?cOy;WP)2z_N)FjzP@@G7(;L^b=Qtkw65beaj+X6f?Ms8g+W+-mD5L)hp zqSF~X*Mn(AbV6oYS9z+Ohc1VkDowut{o3eu95ncDm9qr>aH%BN)2G0&AvsiDN``cH z;DvMMEd9tr)te=cW!vPBBh}eL!Qy;)Z}y!BvSoYR{qUOZX*tKzMZGa_z(Ug|owbbC3 zepYhN*Tm>N=R*SUpssl z8N@J(E30-!a1k*N#m%V%{PUq~OqsCUm~Enr%}i{rQs|;PvBDrv6ZY5%jiifNjfPqx z9)w%DOSvUF>OW)Od3YUaHx6kLqs1!ZoRfvjV;GyD_e!CrYW)?Wd3V8gIKU9%3kUQA zwB-`x0lIIA@j&vX`rlo}HNsu+AO#fSR2lR(>(1}X!AO^2Y7W{p&NUCYT_Y?xk!yr& zEKd6eg8+4k$Td70YtWR28r!cCwmP_LSpDmKjWDwB{wC{YFD$FuUn4C0-FkSs4(Xec zM(4F+fE1$&t{iq_O`E$=9onxP)pa+vOE<-$YJEx2_X#5bSL@|DAmCf)Ll_LlccR}M=jgm#=a z>$mI5VU*abyK=bKbW+a&uI0+%YP{22=&G$NhmrYlnPYrp@q*NxKqJYO1+WoPz+X9R zq{CR{R}M>rzve54S?ZXBj?ka)%Hg(NcOEetTQ_}V(&_SmNs))bUobVcEwKe&2!UvIBEp8a7;TgB1zkC8v@ZhyhqW9|7SQPnL;>oQcn8cj z&?bQ0!re9{SOr;Zz1)#=9j&cm^jc!_qUQYqoBvBziNab`L)NT0^{C0BnDC+{wQ1XF z7;faW&3Ew(>6d!kY1Szw8PCW`Ca~xk-x^i1QB_F#sj+xBCG?boSJ)&~!1Zk^58m&J zw(?X%($V2=Rt=4F;9S!zz8YWY4TdzgdgjIpsSB93WSY@@zgwY}7Ei)@b@u;o1_o$_nt5c|9k9f zH*a~XU3V8hBHdG~BC>57?|RiU&C1);$`3ay|Lu5Iean7n)=1ohG*;80k0n=;^J2x6 zhs3V)^`XrwI7^!NYm$KzGQAjVJwSru*ph0}^}_(RO2CHMDee@@E`4z%Nj|WlJ`p?3 zVK%U167Xmc+})(xujvn%4iY!Qx9k&QNP9q20JXxzE1i@ug#u(?^?4)r<#HG0`euz~ z6lrQT3qN(CduUAaLOd}9Hj37g%Kc~Iuz=qL50qG`fN&rYj2RMAN}}Zl$zPaMj*eg= zyE8^?m8Axm9;L+UkCBml#`|SD+!VdK0pxhQHTyCH?~!ISU?&(!-06jmXaFmrlRuEc zCF04r$poVV^;Vwla-P`dQMJCt-d5JGa@M{#y6SRVycSqFm*|lp6iWy5z0S+yCX?nn*^J*0Y9 zA(JtGmYqefPdGxxrqSmFD3FHKCIL`KPTc8$XyNGqTDI}h5e;8IkL#Pk0OQ4zpy0VD zhAdKK3;4I`*mhK)mx*g4FpB5_Wh7kIsMRCgK|H}7r7Df zXAN4NDV4Z#XN*LzX&S74Vbj2yTGl2P*4MvMiK|DE zQ*;1unLM_nu?DefRkDAxyHG@zgTXfzw#Lx4s zyehEA6iFI1kR>c&bKJ_8gW7S(I2$iI$R#LSVncbC2(`*hJ*PwWJI!x4)AotDv0-+; zE$M=i=6sTPj1lzXn5&&+BI@$(iO6gs!q^$_3?W6Rs|{q`j6P4*fyZ_|_ta3=u5a>3 z36+6eLGnG3(GaEqaUN59{6y>Vi+bFo)cznF ze&Y@BIXCpU&TY;~l(xCVzWM--?|7bh!NO1HsdS?KZ~jU?IOFF9H0 z_q}oHMax3bz+#X$Qea7Q3qwPYg&cuU8gNK30wToE9-W6zy-KM`R&jA}_4ZZrYdKbO5Vr&CbQq zaCSOB;vdyTeW@gnwQocR&kn{;|7xP?XLv|8wQ<|5po~#pKav1k0ik`jR{p9EI3hFZ z>{?t{hOMF?<7#mQl!lb?uxgs+C#vj>Sw?gtC|h0(g7%(yER_HPOUJIRv#R2WT8r_ z&*ko35eVHO)-2<5pTcev+Mfxb|1y-2j-*FM6(MRn7U6~8AO}1e$Zltww>4P?Na19S$Z#;Y%N=fhy-VK| zy-qt&YMK3A&NTz2|IdA7C_$Y+HA+XA5!l3$%e>Cg)zVpF;6?FFzF*1-?5&^~ z(8#TUqNX@?Flw!qO2s;Qcg{}Xtj*p5nj&$VPvkwKd5c>L94YU;&{vlZM^~7B$bf+a zWtz2#0wtPzLb6E{6oaHc?5d-0)NI-bOR!T(bXX}T>k^3fd!$uml<`2mz6InP*~!8; z0w7=K6M5HBY<0W`d0#M@0w&07Kub7fMVO78%GKwv$5CxU-18E`&QPl!;H}?Q5u2(# zd9Rj{CiOVYgUnyH;_yKLE0JxvK^@db1*<(CGaAAhY1vYh8(W8hSVfRhBY%xh! z9cZrL^y$9lO75(Lp)+Ge@D^{ucrrExCZcRXD^rdWX_d4B&X^*@2!tc6w|Fw*#x^4+ zNDVVd6|Spr+b$Eh%Q2Tve#rphZ;rNF=%b`olDD8gn{DO+Eh*yLEoj0R6&(HW7{m~lE*xB@1&?(PSmd_}=XG@;bum6X)g*2Bb5z4%_Os+BY|#mQD~zG6Wch2ZMt*b_ zss$ZlEYy{LXa<0tH->!oL^F7J6`d@4WGi)=u(sqJDW@5qrMV={FdbH#D9iF$A{g!2 zh6R!?va{q&}q5H~ug&fK8%lC29Km>~jDH@23JpDw_4|`5v zsqc;As745|&npnZw~u{52<;36yf#;EVy%RMAkB$`I78oJa*!2I(^W%Z*1xuiyl9?D zaJx1Z0y*>&D%!CSfGUwh&e7VMZUYLs1<*1{EXhOE_ipvwMd!(T4kvJ3?P+y&QSWRq z6~;Up0o z{E^s4#mwxEDg?4>;bm0SiCA`u4jvApS3Fsfm2>FxXaek!Y5OYshS@Hg4NQLW5WP>W zVEYrViL-yRk2K!Ax!$-@Qn(z-ejn+aWppt3oIM1#k2PdRPbh^-X=3#ol;dA(jhAAQ z(4ky{&+`{!1*afloq}KiNxp&2YfJdnc7tdZ!tuh!DzXiA$+D`hQ}7@f!=J|uPN~G% z+ZKNKe4K@EZY+akG+@{GImg*k3-f#>Zg5h?YSJE4cCg5Rsxuom*n5t%r!BGZGVQTG z-gvow-qg57o%xGe1MlSQo9ae3iigQd%s7B$?t@`tFxOG?U-DdbxLslB;#5?n*+)+* zG8Ifb%>JKb15AwLJUgUCr+Cc9{{V|-HTk!Y0Wl4Wr4*Dl2xLtx+lRR?g5Q%5^Hq$w zPuPYXTeYKmTh)&6oZe0(^}TKdO(d&#H>+I!V4i(=jJHeh7)`W#$Ju;UgM&XRsJc|; zvJD+y=*m7BYcND5+=UI%Whb38Ef;I8ljzx%O%&d>r1kjtSB@MGsb|CVjGsgNBp+qy zvm-o-)Vf#l`Yjr6_Ghoyr)#~cW_0p&H@}zRXQE^yF8;$jKe+ttvMXHn zdd-%VeGL+7+OOgc_`zU;a#^*pKKuOXp#-SV=`p$}U47oKtUV7jdj`nyslRwS$v*na z2a~i1bH@!Jn2$Ik_hf4y%n*Pd&)$iJ{J`$V`j=;Mqn`cEUq6!|w{}Ui1&7LA+rR9k zJv~wWFU0+`uaCD%)ZbsNb@}vV%T{;e{ccY7M<3;v)n$ZVg0k*au&?v6_3ZHHEicSK-##*KZ~Y3@SMrM`YlBlq7mi!)TtBC_ z{`rsJrZU~G%+LRdmFZHj&wBPjS3}L^{L2qnIc8Bl)Q|*@z7^(2hS?5DjiImX5f!;s zvJ-#%w5W*sDWxqHw5VZ*1KUG`1h%sIOsHc#)us9>+$!F8 z5IUd*4X?>z4G0>!Sh>#5_#jnFG)i=y1@dx-5p?}XBQYBveijVY5rgPn6c!-5yTPbu zXK0Ekl#_n1BmLfj^rii1iZH3n%%#&8Sc2YZHEk7&J*wCiTKrstOH4DhQSu@ekf>SD>8KA~B0~`?ZssepWKymF3J)2mnXY0~y=*z-Rs3i{|C^7(VD5ikf=;5O$Lt~*5F)W@@PUp8eTG(Ve_^`{G zn|@sj7XUYT2$%M9-P#<6f&Ujpfo_oGt-`-*HK^76m#!|PqzZ<;Voq&zZKbru1n#0O zoAh0pB|W@-^K%ev7gLdmUvm>*g1_Z7Ceww^mR0%oC!c|lbTLKj?XSdu7vY(#4pZBv>-_H=2) ztV{#RbI&v6Gge47Tu12{6HOYK z)0CcFmtI3(7Is1{c>ryN1we+g#S6O9>+m}_fnw6*OT9oY1J<>yVDuc8u4#;ivL>~b zHLgQuKSbN~jI*9r<8bDla6}>SL}}4e>!%)tX|tAkFQvvhUc|}QLu4k7m835z+Lde; zq)WuppFXw_sO0q~S@ZA%!gaow|4LaLt z=hgZo>n2Q3tC?r$b&)DTd|m68)H;HeU%hBvxx=UCGYo2$%f< zqvV!Kx^qk|fA;L#Xn9mE)8dGJg#+E1dsN>~cyWp*a7cA{ME+4f$Y@%1sv=T+#><_z_!M=1C8&FIsGyuO)o zeRDaOtM_T63J@=NM8zP=;fW)GqFrzGTxSYI)l2I&+z_c;TOA;!Os$VdI@dRLIhgTK z$zDe`_c!aFY#wUpHZqAGLWuH8bP!?dS)zj|TF?LZu^$0Xk{+@@{RKmr9^dt0_10tv zCaX!N#Ga8Xk%^%>GFL4XPiU7PwM}+b3K{DFY2LG~OO9=gh=AC8P2LtsV{=DQAdgmo zk-ZWdB(U>UqSk5+7|Ve0Ej22X3gl*Y9o=G>cpOM@8RqJN24ODz@f~fwq1$_(p*TBY zH#9X~Ath}%Y;Zg<``6k&Br}w2P6A)Y>M8yH>!yPO5DM%iscy8T=RYFS?VHmZ57e7m zcBj(*v*^CZK)z|m+@JX)cyECeoMCqTI6Ir8sHz`+(&?F}!+na?BqUGrIR7=F)X>ix z3xyuh1haqr=2W{))^Nof{B~?%WWcJEGs! zmSa>5@-$_{7C0S~JZ)@ls#-QqV5VavPqY53Md-xg3r{Dkd+I_~Pd;YlCLi`Ekfo~I z$}hw-+-?Bj;0(eaI7^C58_>onvxzkOO}W;=T{QyIAd$QG;Wgo`4XW!*H5ud^?B=Z8 zuy9;Dr@ee|q{vn?<~VyPmW@N^7O{S&X*|>2ynTB-zL&N=FMsB1KbB(HCyQY{x0k#R?Ib-c&a)nlXV|2?AFxK) zM2@qOE=QW#NwMqfAXfLUpFJqJ-fpxok(B2U)4+K9N#^?_Aa+L1>vvlXI@TotkZGcy zyZOq#)FE6E#8(cA3!LJ}^J{t65Ry)|XfLnYd4q;d8N>06h4~})fgH<*gv)L+5;CmV zuFR3PR&-^TG?BB51%K*Ncu?`7^HrA@;gr^L7DmO!qpW6(9s)$phIw`K6!wig{SEMN zL-I5S>yu}6x<1a{ZU)ALh*Wy}vEoghUWLJRN5FIw12z?vVYDj5Grn5$MF)ewYd!w0 ze_WB*`%NQ*{Ow!CTg8~Yuin?ImWK(8fqj)IfVgK`+y0{^+NPqiz01I58EMO;5rquc zo!gN7sO#4xQL~!)%Q0SF;$NHr7{g+$E~(BRUr?v(bkXJPrwbI(%3$z^)>WwXV%$s% z({Karqf0^sa%E1-7cgTipk+I`(4RaTrUgM~-+B&5w1LKEK$&9Y9B_JP`cm0BwQyiLsmBXE{^4`dY4kP&oOsd7hV2s0 zlh7t?xV-bMq3M8lSv;*rF^lcdIUciv7?xYI#$VsIK-QSGgp#Z=V+rlD#+lowlb9k? z`i0S))~}N_rg_ZfC?i>8dN7wYF5ULLc$~Yfu|k4`BLmxcnI-O@x#Q(zKiQSx{?Nw4 zYa--@ue5z;s0OKV8}FOZ8R$!PIr4r3+!w-Quh3je z*Py9WDw`w8wB1^Ek;jeh>sIbIRh?jj`=x5>Z3r*Q%?`&s!Mhc9Wc2rZV7eh<4#`0< z8zaS!BK~}wZvxQf9ovb8M+V~5Wh10X4|5P?=PLWOAOMVh&I;7&xSTYlg5yO}Hr9F^ zEgn^NL>Y&^#Uz}JY-y@x+!$7>d=l98dRmQ~X=`^vWhaa3vKaskSa~G4V%w_dZL7Vg zf@2DcSWGk&v&=4nuv&lcv z@PsxzYci}uqWbnSqBg{bnoe4~Lx2?fQ;H3_QH5DZ>yw~uI;G?sB$G+bqfJ#BlX#tM24ps&n_hxV@=7 zM>eF>A-`#vP!fClnUX7zgc>WT5|@)T&XX?Bo_su9G?leu?R`m4n-;Ko)@3}2G__}=aDh)g}Er8zUHT*?4^{9$L8 z(DNj>CM{=N)hAqIr~&hj^Vj@-)z!Ul@($Pw1#8ttYP& zKG_*F$|2dPi$({7Uw;%|S3WtSQqwbLRzzlH$mOrzi_hElIXjXOP?-D;jJ^h7wdBVj z3BU5&NJ|fOdvbNT+*M?GJjD$kT#|UZ;uJW1_)y}e$p|0wx1r%V;r%4;D30sn*`P2a z@Dv#@tK#`5bW_va*q?G1VmrYHSrs<&;S4+>*SrI{0sXEexYf`Seha^t<>Ybep_1IX z7krGRB|C?fSXzy%e%(E*My2hpXf55a;AnEQ(3nawH`XS1agivXKlC@#6S8ZJ`9S=DgPYPeL&uTwtk=a*70TFSvFCpxR) zP{e|%8Yb=Qu=)RMJ;HID;8BPk6JyH zicfthlLl}e&y##~r&<}cCbep_)!q4$v-qP+w=!N>lySC{@ld|n(KL_kHO_Nb1MMh2 z6the-+KqLIhV_8gWA-YO463xxdD?yYu+KT`3Vy`De%gIK;$J`GUVqWQPPy0P{`Ir&RkvYD zq#k*@km98NH^^Wep;Uy zI|MJwcHU+CpfB~8?)7Kv6_i$h#Bzdao)7y{f9SG(z?b?T?)9jDop!J9r|H4qA8YcY zHBI;BVs+K@mb;=ikg{!GicM4rYjsp#o7TeoFR2nZ>-=qurih$1HSHCTAup4|#caKu z$4l<<4jwPM$8Eyi`AXwXsvs23Ns6aib_BnsD(|Exs_rP&ZF3ZTj7J0t5%&om!HS>( zU1LxZ?#Fr@=kWtj z6QZvjg4Z3(MLmbC9?j;HDts!JGp)W06wKzBC$2ucx`Golm7UMUvC}F;jOy%SOB^$A zwO>%SxjfrNWxGVS`L=9#1@BQdL{!gB^fS#I#-+udyWQ*i{OeD-*Z2C@ zBkpyC*KGcFv22z#cSnaofgm-fZU2!w^o>J2rEgqwKYRxe-EdFvL91?aJP~h?r<6tq zE^r_6w10-2<5~YaNBRuU*)UTPC%Cw+(s{lf+6JO-#mb*n%;@*UlF{!A0@$qlC8Hn5 zZaK|+(16$y{Aul;;O!oup&Bx{VR1|NGi)ymSvsefUB7?AzkbfWzQe!%2@@>)ytPWz zjLA>=5AT-{9%o1G(||FKKV$aGYY<;o%oBztHh6M8Wfu&ocefJ8EMZfT;A!KCwXiwf z$=NiIsIA}(+zXc}8wTz%I?Lw4q^FDDb6~&oPJ!?#2g0;rb-b|lvEO7ZvR|14<(nL~ zCO1Wgkd0=nM&hvZeAb>}N3=BMpC6%71Aw^huF-QIh7tFA*kkCu?)9hr>-*g6yLruK z3=RyF1swG3^$>~gTCUqEBB05vnBX5hVGDm{u0kW;;a92m+hFnZjMe-!w5BG zLxuztX4sJ1FPXQIIa^AZvlNuuJde2EX==D2;`}l-tjSMnXTt_Mmd_UGTY|p=kRNj` z$MX>Qgdev`PyTa{w~IpMJ0wA zBktZV9wW^LMj201w_eBjv?~Q?dLJ5MI*vF@M(2QL+>Kmxmgg~pW-#~%*XB{*=GWZo z2mI@td;Qrv+g($PT1;We#_m)A95!rBEA38SCJQWJXU`NyOuZdxdwXa3P0_%$s2!;! zKW;@KKT9}VNm%6#1%G5F(C&0oUi%s6cp2R15;iCAKmx?NL~b#GG|f(&K zbfm#$^BADQsggy?)uPqQs0!hNUmDG`#07e!uo*7PPMmy%ZjQczf_K z{&|Au=lt^|&(Dxsd8T-Nr%I08CEnQDdU&OvIe4%Be*I3O2;_SYueAN!_TFNsP;cd> zBTek&D-~q!^eZVkMKz^bFSxJ-CqLCvF0&>ZRtW%*(FzIRzK?M zMIH)M?&Vh-INdUpw!?%~MKEzd5STVcmtMU+I`ztkgG)?4ZOrs7vSqWsrY!;{^(!SH zDXcwpo_=MalHmB{*I-HB>*?6Iw`9xLumcPjt^@;nb+H7mO-MQlFwFL?9#>NP=%?w` z1}%Y}ZKNCQX=FDaNBF2+4U#(ZNJ1yyVeiyX>wjwH5aq3-%iWf$Q|5+&=4%L`fcp1cHrkav^m1#P41B>_EJ&A-&F7c>r4G z6yImV2(zo)XY+QS$wQ&U*;!M@wQrtHyGOCFNkP$U(R`EcoBDc26WfWSGGvORC&QD9q2 z-U4jEtCg3uV2rbqW@-S=mk3-GmF#>Dt~+UD3PlB-C7#*MSGLZYVplT`-c#bIgTeXT z1ex%Fsr3>;Xx0`SB`{MW;a$5!~xtG!P&f)~qtp+O`k7hgMdMvN$OW21o8|tQTD%bKTdxLB3hNdvH4r8mp;H zKl(JWC);4>1h|nKIExyyPnnR~jFM-S@lCy7?! z{9DZXYbDwm=F**ob&mqhglwY(V_d^bfu20 z_oFXzmW!E@{1k3Woc&tGvz?k<4iVJSm5pw4bF?5;%o|*=mN_uosZnkf&?H{wPJW_# z?rW@cmy?q%Y!aJH$3+zz0egZ7p}hcS{OU3+luq-9oKnz#keYF`7uqGAP=6s^l2@7C z2TzDH=wASeeb6IW)5<*}w1*rK`*dQa)tgf0{}*}_zQEq-Sl%)`;Gh$K7LoqwP#yiz zp}KN^$QQX0;wWDG5S2$^pDx}ai*13@fw1)H$>%z$oJ!0D3QpzkDzlE;igBCf@KZAZTJ|l;u9)uiSv|FSqQNS3zDb>B+EbtB#C!F(nAlgcD&hO)?CB= zPTBNvl@614!3$_bXIAqDdRq-x&F-jq8DMQKb_r$Of|&;>X@z4|eLCcBN9jG-eJQu? zN!@~Lm7(nQP{NJW!vkqIf(+#KmS(rkw-?0-1pwP_tkQRPFxi-w*qnG649-|r4!lwg zJ2^hIX>a(@_-l+Rwp5wUO0>re^AD%e;sGHC1h1E;8IJjqRkd1$2L>2zTlxpF(?SCI zx~jArT28|BO$$7WI)|CAhwF2&E_^(D+t5($zyK?cn*^x!Lql_2Peu`1;8Ub*i!X5r|j>{59 z+*w5lcpadi9U!0BL+$Gx&({5)S@7LC@Iym~hMoZW0H7{oh{gB&UqbTfnCEV#x<%An z?bu|*82mOe#Y<<>*+82g&(}{Ja)o^)B)c=kjwvEI}eyPM1?IDCJB! zWlkw)%PGUZN6Mvg$|0p(ET;@9<$O8iX#O(s0Ju4ozs>0FT>dt%w{exdtX`kv52HBY z45g61a8z0s$Oz)gtAa}@>DRVj8;|@4>Yd;tbgGoY<&+Vn94V*J{tWF4j?4PJ>_;pbdI+f4pLIj@ zkcBRRB)$`Ev8?d_0q1jK&7sg`fz__}2%y-cy%!E=-lv5^^ zaqXwIb?)>{yox12~K zf{|B!a91`mq!C6zgxph_&{9`<{|%!af^YU9^{W(UDW%Rv&IZ$^$gdMf#l6gtRq%GHL=;67(h2_ z6eP90Pm8@j{2_c<;(IOeMz#G(m2e6?xkr$2NF4PCM_kuFkx!d+weRd@Mt$kdVuPYv z(l-JNyV|lir=67~6g?;4ttiV&M448M7Y0Zo-im$ARq0w6B}<-*t?|~RlJPXTgZ4Iq$lk~hLaW|xwI_f@zD&ETp(C03wSG9wOh5_@;g%uk|OZh z+IS}*cR`!P_efV^H`=!lVz0X)eL4WU(q+ol4yHw4L2kp3*g=9qXOd$R|LqNs=z}8V zq7E+e##~2xh3q86^ioll3BGy}Bl_cI7zrLy*FyW$w0dpl5DG>RKzE6FM;x zpo)5~`x;mDhD|$B)UtE>14e1-tLh`@i^9PCUoI3efvyPvqZQ6{R9QJ!%hHOjC?=V# zV>n^i>swKVyUhZoI{;v=Fc4IT)J$TM11<5&=d8SCaXj>yA)Zwjy|N7eQY$f>EJhbPEnl8ORS5BDfeZ z*`qc>a;HS-oEV6-WRqU32iOSJ+5lZKVizcg>Itp0@(g58jSVHq=ZFU$7D(a}CIAhk zp7IBuZJjp?T4#-D(IXH=YhU)UXJFCGS|@iXqX^v=Ze_``{*t=SsBGz!5@i$0+9j(? zmi71W{r=*zYbk5DsupFrW-xLi;d{I$`yAzxOY7^ubIm>e?D1>w@fTJ08hiX@m96O9 zQ{Jhkd>ELH#aAUvtD04k1Ai2nEi&9juUdZbo@GhE>SUC{M0=%-w%j9@Sm^G{F0s?^%v-iUNXo@h3gW^k<|nCaI#|9@Ub9ga9ab)N ztf3kBRn#BdF1slR?h4e}Us^)pN*ZThvINv24sj}nwt#v1-d1!;rhCu+Wottp0wQq# z;viB&9N1pwuyq0#lN{m^OW3);NYIh3Q1`MHDPWNlw4x$~P7Q%eNlqsmrnkC}c$llJ zh?JK1P3oY6T7){tOkdrG>i=vYjOStCvOe%j9|}0P8kj6R?0tv(upFXLa3-wv2!8TZ z5aS>9qrOXsQmuM_lY2c8WkFr%ISCGq-9E-1i5T~zWEN`dXOqP?pTySHm)2p%*+$c6 z`;7a)SBNI^j9sUqAvb__?k8auH0KGQVc6pAZ(NGeN4z<}6NfWKgLDuFM}CJQBeoQ8 zMMc&Df{hM>)F8kL+NJn5bc3CJeshmjCPQoo2Q}DkckU8NB;q(sKmqrRQ!s}{$%3hi z#fYD^4IjEhJ#p+8CoYb_^(I@rjIOCAucaY;xhA=xE6X`}>$_}sw%oKznX+N66U$4x z=BA<$qR{Imp$#!43y6%JxAWphZz&9Ho+9D*5eEUTI2@p~e&SE+JEt(`_?qbLa76dS zSTeksvm+xvYAoA>sWlf+G{?vQR#b3AnRU7!TH+q4_eF(Xg3Lf}M~8Q*RadKrB`;57 zaxSBW1O}aQXtkmMQX>WjoIdy^C!pr=;E+h z0v?A?y&YArN7WJhv$H>V5510)Em!LGm}Ry$$F9_CHS4O)p>fpY8WRSyXP;q7ii+mU z%1G}^$gqhDx;;kHeg?u^-ua86LhKZY1sobYIIk2FX3A3TDH0&UfE{3HQ1TGl>V#*4 zgcDJ+4Tw3|t|qoPow~h+fNC-9oWl%B*DScm+NIJ!mJQ0rKu_&6d;r6hWvB>LW#KmQ z6rtbdyyddHW0^3%CYOntUg2xX=|jBNJJ{oAy99k{)5G_ryS*=c^aac<=!=Nb16x4Q zxi6R%Tvsd7fO9DGu84zzf@>O{gJZ7 zXA{Qb?DPkYB*{m@$VB|UW|a{k0=VkveV8& z&L8xMQX^E!rc{2}a$q{Y(&&TfGhR2?UUb!oHLwu{L6)>m?ic|HVekDR&*C9GIO)Cq zvZ9;`QoCTMDqwb`Zk;cdaTQd)yvXOSRb`AZK`tk2lqp86vU((L4pf1zO?KXNdbA#c zkU}6Qf4%<8N!0aZtcaS!>}DANr~@%!9=o^4j{qq@uhj|wFAQ`x(J-nmpfsGtjq(kA zbb<#ipUDpip5bN=0-7QUcFY4-T4*N(|!#C~l+?pQT<9;!Z8e>0YKY zx|3g0KsI@lRJ=@iKZHn~54r0IB3F~ajj6?kCTPaV2N-Py7ST~%i88)t+z$A9Uif(f zc`X~wyKFM_0@kvPyq|;2P2V_;X+sPsJjTY9L>xzl2z=Alx)`EH|HJ=o7n&N zQP5*=+D^4O#S-jI;j-BQdj~`H>`(}z_K-A2Xu^s6_xZSE6 zMk@P~pSK#ex*E1!bqztrNg>w+B*V$4h2zX=pdjOr5Rh;HP*>q(2g~CeYmB+R13g%u_2yRf#snpqErgS1#d+W@1a?wr=9I61_04du zLIpI}+K4h$Q>P2WrLL)sSB>Gk&?(|l)6?Z1mgWOYHqdzu-^;bG>Gjv#^o)jR z)@U&A_4$qzDOO6>37SS`Vzx*~;AeHww4<#8Qy+3X)!me`h%`7Q&1kuH-HnlQdhCSl z1yg8DxEgo3^1S0@M_fU9CF=YIjV`wh!L&XR@;G9T$LYet`UMz=T{ISX{bj&B&0Z+c zEVVEbNr)4BJcMksXgTG_~U zvbHZl;spEMZiVQxy8&Lvny{eP97hYXQ?7MCQ#pv!VK|`soH5}!w*Y@!=_Ky1x;XWt zKK`OmGfRocq5Yygwu)H+j+Da~x>^Meaf;j(;OFD;73nh@gXJce*xK-OtSap+Es(=i zm`9mT;dC3VLRLhQS(K6w+e*aRl`~|PE>rwCdL~#u0(&GFPS_Nu$O|EE_8h0OG>c&P zJuWVen7*79$=7=v>1o_W&FL+F^=m@p)?q<+heV4A-~1SMLQ%Lg%>tZ}uM<~TKoD_? zwY$DfWmx{rvtRpXyitebkCyP`g&GM>G~lGAmPGTR_0e_ie#7yaUE78Af9HOu!z+eC ze0PL1m=64!u$4R;VNsHxhkUD)N?Qs+9dL2Io-Wk5rmVR(y$)ZpEFeg2RQL5HzZ7x8 z7hAf>2Y<8an3|$=S@O#fIoLz02yrC9JUHNm{RiX=UZo%eV`&*+2O2lTJ8e1ZO}i}Mv|RbR%)yarG={yeVN({qKmjB$?raiNOgn;g zAenJqz^o~>ZfQuT&DBL3p})z8INI#V)970CdJeg1(?|yqrM@EX#>uZjrfmAD`Z20% zRXoaDt4hf+3o9E|42fYrP?cEa$;e_HFsD|DG`POwEnLc0bU|ry%2u;!FGV$*e@*mI zr<246RgWDM*2x_7OppLI3UJF3R>3jrBH0l?Pp& z!s3gDpCL~NcJ)7<4!j+>x(>PD74AJ<{}gx0F&>$wcF$`D_N$u)pOXn8yZwr=Uz0Hm zob+T7ds#LXScc;_>WN#faH4go%gJ%ugJ491<48k&U}&4FM5;hv^0A2RYd$a6&%P9I zStX%{?`K|08}~`l!Ix{Q8fmyuCL?>5na#oB8mR2sGXU^0clhlH-pSEQwY;bDWQrKV zG>~S5k@(?nBSKNzJ(4tf0=J=18-^nTp%;8S`DR>2vjA&JeYedC^X7}Qps@xF-Tpvh z9e}OLs)`*V66XI02Lvq)kxFfo*~D^sE9s$~42CSFOvVy6I+5|5us0`3#74x;QQ0X3 zwhb{2u2b`S9`iWLTWi+yDIxhp)oAdFv=K`aXRw$Iu~?hq44k`X#Q5wvL&eYqcgp0A zPQS5~XcP&qmA2ZLH18c!2sA(`5aTgtP-hrcn-ah0NYtvfQZCm>+PAm-H zW`S0<5<)F)0qy3qWijmJ?^%Ehl2R$k)Q9&}mAMWSxOv~kcJD5$PZ|kIttj&M%D!J@>?b3< zyjPixCe6dYZp+1Ws_nXb)`>|czb^ibkzpfJZ2bn)GzmH-6~3<-035m|2Dxzk`GFronVZ#NJQDa`;re(yY>m9EVa@B zjcRGJ<1*UW*SlpdoB#s$UW0CZiKO{s%^!=R&^bcvM7Z7Rg_~>#$(%4&#H-j;UlUrk0rDn$x|=7$RY<#&9Xg=)4tqQs?P#2KcW@@{HrZk`o5KvQuE@EUAAn+uIwiu4YgPcsR7>fKomvHU(UrUFuyA?f`$DRW7@eNO#Ks$J51^JUyU7Od#ibMg~=&| z9$vZGlm3KudYISsjgamJjL)FEc}oM0W_=6fTH~ux1(>tjE10zk zilP_I5a^3ywJsGM(iUe9k=jhAsej6f7IXvAX> zJ|*SEir~(O%&-*$Vl!${0HOYHokTTzwqrCLs-lUywlK2C--y_OG3s#%vl^7NCLu$+ zt+xh@)OU+?b#*Dk$M`Tf+V1qot@1&;J^_%(qQd=G8o`vFNlc@JGgM(w+wXw=Z4FMV zyg4Yeji6OY`;Hc}>)xpCwkHnQPQYv)JqWlIc$7XVUKC^pkQfbF9StMDZC?&oi!Vd) z^3oR~PvvE#`INq#Xnh%N|8mZvUt2ru#+1vfE-EuN#E7`|vCikMOt#l`vQ^g@^6s

    ;Y(fR)}Xc&bmyi|?TP@A45KN#a*i+RP`^E~%<;h;GF-HkWfHf|xpMuG9cvq= zVz*D?4b|+rKp>8hIApS~qin-&k3`!pkQ_f1$16mJBB@9u|4H!!h=_O_tad<&-$YKolZIEtnL?YX*PvexGBI3DDM*)|kfF&cB5Kgs{WBYSS@ z0sHR6A8NYcs-+B9mm0Zjb z0f^A6+l|56$JGb`woJmt{8SWIi}>*@z~SGNH0)C(=9|VGQq5o=QoWpox~*R}v-wj= z2zH}c>&zH|!h*S)D{VDf&z7t@NIt~?b03C<4lt^h#Xy_w`Ra`4g5MR#GQA6!on?Ad zM5k@}yVId0}p(pU5(`gH~obuySauEqKcjjPs8gji}AP`Q5yY-WYp5WXCR@ws% zO^y2hTDU|w$%Sd>nUTaONiLC@LuEC`uKq)!QTaIYjinJ>^0U4f5BPQ0qeJf^UUO`| z3m_b)4z&IOiLh(y@?g+yN%9%wTxMX>NEQ;Uvr>P37(a>cfOChAnc+Yq$sS6osxa#j zqoQ`TlJWnGO775AP{2$=bj-(7ODhX~m22Qa-++08s1jR?D4BeeZ76wW z=+;h%!JH)K!l>%}8Md+vP=&3#HAq#23-IQkCqhqan#B%c=QmPGevJ8LmM?J{0B36W zvu;cqf5r`Q7<_k|p8+c*E%4`{iE4<%q;&JrO-o!<95aa|F1XJ~0Hh$hwNl_`NYlnf z9+Qj*$^>V&QIIBYGH4WEVk_8S<4C>$jH@SnNyI_@{ZUkL;t@Ue(ouyuC&X)M)K5q^ zdFSFxC)}883Us4^k+>^wAPMdf_-%&CyTfMSHcvu*7KUTwV%Tu@i|@v-3xDTU24*(} z8U}kLS3|seRP$=9c{Me!wVLUCxn?L&HLqS`Q##AR?Ir0Ygym{r#Wk-5+N(a-@6^YAw&%U+?GhywCf5zw8+5 zm6lpB>Yi#u9bvWjS>Y(T3q?9vx*q}9{I+yF=09e`<6O*?9}dLa!w)B6 zcKIQA8tnxsgR+ZZ-`! zIzG&NkBSn*hWPu)h%uo&=dNoCTI^b5cn;@a!JGnE3RrZm z2Np^v5J?P1VXFTNZ9A=qWU(BKjRNpd58pglmbHxaA)f^L#w&d+xCekoJ>)^v3Pv~xTY!`ZG4E2uWjmD0`as_j0yK3)`q=Tu+L;L--&lMSkC`IxG z?KHMYK^R5ofGrX_SD=HY$9scpCOSu_XbB?mcCleBIn~j?>;ju)H>?+Y)9jKYpk4)K zD+5Bj^D2Hs6&Cw0#qe0BIxd#%0vQQO%q~GF<90Cjn7gG)^;^ezk54mYitQN8iZfPU z)c`NL13Pj%HSek&7YkU^JRFbs1t<*W1XlENmoyp|I!3>R;KFwOX~Qyw>i*3j7;RVO z44#Y6005BYn*1U1x}JfhXP#y%<39W-*j8|2FLm* zUkGe%XM?U}+tat))_r*n=^taPyJ|mCVChdpCFIfnFwmWb@c0LS#l0#P7^eJq+%0Mz zR?UZ_sIHCDH*e=p@qI3rJy`9mPF;Z-5Hx2$Sn1lpS-Wr{^5$ikbc(qTFd+!eBh%Ie z%U0f6?3sbZsMFhq^gM%G1KlXS>#oK$Fp^c7S1%330#lzPSMW3oTza8 zV84M40YYvs9%J6QZ@?13UT9-Atq2>Otk`C!s%{2}$Qd(nRhy5~CO$O$g^q?sUtKo( zIu31S?Bukm6G$c=)G}F-8_=3YwCpLdo}z3uVECnot*FP1Buu5Ak%i4R3?r-bkC=Du zOsKK+6U>KS-u~hJ({C$@lksz!BJRYF%dzNX+z$3oZ@!d01flb+K;;YqIL#hB922OV zS#2hl4nC_H;uTr3HUE&pwoRtv5T05Y_*oVNEEQs2$NPIn8`4rwNP+-CHIw5! ze+7aZ1cP|O`)c!amhy_@0)%qoGJ(|o)O9O!ueU93eTO@GS;!(D99G)PyQ#yQrwKm1FFtm*^m^ybWX#GO;2|&7Q0ue#AhnhG9U3u(KX(wFpFVk@@g*N?-goX zwM*Fs%xMr3i!I^o^a zaUzDprk=4kI`afxmqS^#Xvn=l?3I0*N+H=`BSSP0YuE5KQjl|L7V?(tec&~Ed`W8T zhBh(wFY>Z-iklg=8qQzqW2(C|4yPTMrRGW!Ct(RA*%skD`|gbQ!A-_{X$kI{wJc%D z`ELkj`fFDQ<*?f^kbR=|8u?Cspg!aLH116H^;;6T*+Qf`sA~uzZiiJ7rLUqP;{NY7 zmt)G^A1N&*pX@!r{IK2a!Hb!nyKw*eC|A3qN2L`BhyP$R2+S{(^v-xM!8l&pXTsEu zr~>SK-?bmo$nI#fwacOjrx{th3>nIpE?UETO_G0B-9)}bggN$;@6t*q#bq|bB~WLx zmJY)nLcGO}?nF}*@h^E9db>T;x5u(wP@`$Udc9cc@Vh7n8Ps6G!k!X7~{zz&5b?zAQTbwE`cf*pg5jPjkuc3=#! zwu8^BrA!$pQ+2&s%5R3pHg+1UhSOw097Kf*OA~K1PcUZfWvI!t!n8FOj5klnQd)hI zewNeP;Xoeg{<3&XzU&a=m7ZK4@^h3ThwjLN;`Qdz7FQIvTAn!5wPk{v~jlVdbg9KE8G)SJvG$432slW(K5HtV;Vzek0j4 zua-qGjAj;dmlYm}D=jON_N=T3`#Q-Y2P@e}8M6jb3#Ok&Fh-CJLyC>=XA+k)=HcvJ zA%}Ti<1TuvvqX4O>9Hw~G}@xa3_dg~kk?9wBh&9P+eTqEn7n&Ne)FE|xT<1Md0isJ zfJ4=tWL3JRFdw;DEsBwo|En_cyn?-yf$)Jg*Xj>i3Px6d)090n)O;`&@TS?wAB1r} zN!0>`>h{hNp6%0Sk>&uAQ&Q{^^Hz3R-sLuN#mGa26Jq}1E^WBxnGHHxNNcT_y;)ut z<&+D)oHEPvP_!KCoKc-Kp-%55Y#uO|s|cJW!iGpmtzS#Otyg=R!(6&&no&1MovfsI zFDog|+OLpkDv8YkD<}aisN^jE7fPZ9w*t>j&f+9jpF=b~F){v#Bcix&0NGWGJt=Y9 zAB#?)2vGG3s?dynyD8sboe;`K)Cv7F88L`VE+a;zj3{=pE@hi|d(rY-&&DNLaPZpG zL)nA4R_bAS&>|~fqhKt?kRY%jN=S}yflYmY!!UK{2!rdo(1x31jqFsOf)qhd(akJ= zCFO+kg6Htm!+b{iX67G(PI+w`Bzp#m0l@J?2o5LvCvY+Dlig-NRJHzdRg|I5P-K+w^smqp@+R7N61) zdXt?T)kjB2l?2rXa?`&c|1QZ9>Qa3)ZQV#@Wk5n~``O%YZ#w{f%2XV-%l0-5C3VAX z^HVp5%krPGqu6#SR%V!`kC1Rcy4_p^)t#IqdA)VGrH#zAA

    M{0u)QdMxRRsSMqRccS+v)JQB<6WUHr z!B==bIGk+p9*lOnkI{;aS&tMnZnh3>LD3rWquPqnejig(OETX_^?gZgu{DiGxOhgR z;t+dZV(CzbN7Ctx3eH)lGyF|#9P{c_o|42$m7K3@@sw;p05;ikeW*$1HW^a~s8oxB z4hw;Y(jfNG1E*FSgBj=vdDd16=Y5};8Ph-?3!NRZV{@=GQ3mXk9mCye`+vzFMi~z8 zlvcFffjh*W4%}50<*^Zqh-^2DmWU~>X3~A6^kR1Fv5AE_B|*I~$Iu{^)kdMd}n5=nwiNRaGYwAK-ABYz?qqv$UM3pGU?r50fqFtm3}?a9KZGv zAH1VZP)K^JiWQ@2W*Rq2lL)|soE@t&Gt+vsHFX7tR|_vM{k!q<1OMK+;@ zcC2W-l4!OoB5w4ZuI4&Xc2>v?dnB5y!io?@)Ig%(q~IpCRD)StE2D$if~T`V>#w2C zwB)t|ScY~*i|mEs^YnuSk9a=)>52ZPqNU-&&JC9oEoK;CcB0ZF*D~;@kWj)*kYjzC z(dh81fY<~neaw%}#BAE>t{Cc!G0QO71KzMcG5z!{iFWF2iGu3n5FtRnWv4WAg)4M& ziV%QW^DQhED#=#Yx;C#d)9qn~I}KDyBSdD&wrzooA`rede{IUREjiRh`%k_#+_X44O1v&)BWx$XvZWMoj%X-Da;(QU_f(<$#>$NV#_ME zlsd&wu=?Dw6;n2&Kkt&=J8uesQ;q7Kwvv03=rPU2g(|0NGpfP)su<-Me~YOJ^dhI6 zrSSMrEA7*jM4eL`#FN;mjavM%NAJgYbXJcpFepKQFNj|R6?tb6H%+DaPrW@}`Z;C^ zGRW`_w~z-Dq|Jx%RwYsi@w4el!dl!Gm$U~Wmk;d5F^*dNt-pCq6rW#w4Yxb-Xpu)7 zIo%)|vr}$2XUZk!L$F;z%m<~(Qxd&au|F)RtK&W{=X2B1;ifi|k6pHaj5brpH~BM% z1*AX?7cDq|Kz@rPn5+9OHA{LRE^~jCyZw8IW-x5w4JGJyW^I5J+YP( zH~@7eoFGwgW%HWk@X_m}+vR_{xvhO#b8Z*b&h3ATdF_DooB&2FqHCRbZRdeA^ZMT+ z=trSwTtP%CGrkN4zm3mr9g#eyAsl=fh!~3G^G3epZ7`Zfc%e{#BJ61IsKjZ zM2hgXR!`PH;)4DgiHx5^m?j5WovDFDCmhZ~U*YrR3adsZzb?S)pPZ7#0!Hj81R7^` z$1MipIV!vQb^|lvpXdDI$OpaKOv)PHXz=D}GWq$SSr zpv8xEh6kx1ON|~!?6i0(m+V2tOUXerE<`ZhaTuwfWouas>0FRpuV6Mw4n5-eK>tzo_!?Jm>eWvg2%gA1mNdk$$tFb76TDz}92JHg?X8Ie9r*@Gkrl1xl`V;mhGD zgXpj<+iZ5K(iG#9`NRE{dat?iZM&@VU|Cnb`Ib8AfVYvm#*?E(W-f@{7MgM9Sn+V; z&f(@K?;(n(`P$+Py#Sn+D>o$pw-)eZ*qr{3D7t+r2?KCX(C-(C=1~W;`o!;hgim31 zT%R1@=MpVOh6|rWi9%-TKb^GW=`S%LxfiOHy11o|ssT_k!>7Pr@(pq$mF*8t_~+;J z8|`xzYpTl6=@Y56NQRB$DwA@_NtG0{!`jycpYrA2s`JZ?!+z@IE{d$GaT`^FE#1=; z#@8o?9VlcEu5CWh4WXeN9Vz;BM^vapQOOgXEnJ`zA?vh3vNybjyo&y-E<_aw9NE8v zK^rvefD}Z=ckQIOm6Ols=N{Fk5X7e%JEq^<#s$kbJ^v$_BkL93F+;iGs``UTs|ltCN4o-@dF*+HG_EW~U!5i200>-ftGoXmgf`EMm(# zRLG4>qv~dVPf={h-Px2}?BTYtvFCNh>Z>czw1+%Y*Sr31XBh4_Pj>H}orNIC<+vfx zaYt+0*be9=$;X8Q3>nj)$AY@0DJo$jn32VXn~x{;sZn!i$0G80H@OQlCZ~BFMe4%T zezken`?9-m`{Ri2Q>pDTg%9K0L0$}mL$ZIWXk)8f@FiCdBcikIST(+Kc~EU?XceMox`Wok}Es82h@BGI8t$_HsgU5NhAj= z^QW;r`hy;}Jbm%S4-mXnQ(}$8Ei&Crr(lxxqT4z>hnK&rZZ*2)HR{48ZKp?L+!UH; z&c4;`i1qyPc_|)U3I-Ls#ydD&YhnH-yvBNVX&MX$!v!fY{dvx)aJ+CpA&O=+sOJZRL%iZ4 z;p-NUzR~V6u>^Rh2BrYb?<>$dqc_-Q;ioYJ0503*#HkUk-70WzEx=qtf?MzxEW$}{TgKmfIV8g|-(;-^K z^l2}q&y-z4Zj%sLP*LuPWGH*-aJGyAh)e)3sNx)D<=CafNlW}K6Ok)Xu9|&lq>Z4m zj3%q1Q;h3^)`{`t#YO!Z10z~$5lsBO0C1v^mo?*UVpJ|?RJDy)=Y+PGE@r6)XO%y> zwrz9l(!050WM0bYCI#Yv8Rr<`!+}#qC7E|iEMo}O+^oBe`w@ox6#RsmP!nyNUbTUS z=8sSzo&tsOfJ@~dUG8244aeFhIme|1=9E6KMdaAIF1uxe8+@@oSfI|N{hi|O6VjOi z!r3bV!Nzz-WAsL=&zBlqH@X$LA=|`_y0x+v_g@zrRp>hg=L-S@BjCl?l~^aB2Le9vxw!)Rtgu;034c=E`lLVBC#pPgxTEA5iUuz5Jr(+ zS-?7z!x7PM@fHhZckFDw+}|P1r_M=PQ)Q|y@~3Q!l&B%dMC=5@$$tIWgMPzDnh+G- z%{DpaF4wGDWM2xJG}kD1EAyO>+il(BHH>90*qe99--Coa(Vs%})vdgdLMDxFGYH%% zur`E8ihh1v?HD7DR|qwUzD9*BfD8^A2y0o$nga4tDm<J@ejy_ZIXqRgYPUzs&x=wqN>r^149@_~Jwg`FnpW(PZEKMJ}gbl;R z&WPR>B$J2jg@eOI z$CX`VoKumGu7-lc)`lfZ3QELJa|8q7427dNvgGc|&4HH|=n1Ox^<-y5IRpB7mzdR_ z+df2DB93Vc$7r?Jw5FG7e>J1$CsIev@e^){;dqQ{3f4`I2OptA%b2nhaa$SO168y8PFP)|EUQh` zV`Xx63-Fr93a7CWo>@)mc*+BUvduZMY|B&CXxuD>`5EmW)z0ynXQ z%Ij`B8rw`_tiom89iL-M={H=%Ti7wl;O_T54fe*`+;yzk+)1!SY=E8DnZp;xMxVEh zECFB5e!6vmG#q%k9XCr|hb$IrieKH^(g1I&0t3yMmwV zna#0!R_KLisZG;>NeYO+S#oZ>H>Kyb&p3P!KSFo zLMT~wiS2^z@BN1dj4GLByOMuRuo`1X%er2tGM@ z;gNA#qd;c&z<)-h>n_3Th;5p^9&v&-2xONu^Gyy0**}>z&eZ7TMq+%`Nol22PKNaVmof z6KClGyA^4(go!75X|C>Vvn?`?B!a*I|g~8=?vWWB5E*0dU3o^Gm2c_6&g3R4f2OaeU zMctSq8Lg}hQ~E^xnbzo!GD0!&R7VnpG`nh)AOPeYYM@vqapgxlpc#5hy z^3Bt=$6Khg)RE0LRQ0>oMWYT9TT~W}j&~%dt2-O{xJNnaI7n;qX>a~a#|wksgmlb} zl@4Mw^feym99G^{Uha_7iXYBB50`40Lq&^sM2|*mZtubN6uj_Zx$tVnud^Hi&jh8< zb!P7up+r*HmbYUzI;npoomN0RuriXvN*k@UE(7ys=oL6b;{x|k(*V0r%W^KY zSueH81|Rd}NYf~fuWIcAzk5v7cP1g}J!XbiRQi^tYi~<>c6f{?$rbDhK|jCiQYF5GF;71noh8rgptNn^Q?aKK)=SjGs%+MSfmJDXZQYf}@WG!HWz zGh(mhz?|P0YmS-;4nc(=5biwWtVcnT_D6-FgYM)4JrY&Reaco;v+B`X<+rRpp34

    p0?$+EE1|ER(*e47ZrQ|yld%EhNJ#Bn)d^MNk3 zYNvQ1dbF!vQV5#BFuJK>3^{m|tz|cSCa<;05&Gy$ks*Q$fkPqU?YUf0W3MN-b2D`A zZFQ-M+QK15t$TrhPVpHH=o5Xa!mYw)ouJnu-be7m;(`e?`m$13G4h;z9o(5q>61L- zvQ?k&9q7cSj2W*HX_$>{G_p1srf&ZQzSmT9CBe6xUh(MfIwtugaeL5RxnctcFEj#P z;=!~jQg%JCVOWpeVA)Yg9Kd>8#3v$WJ_#7^jW*+jT;=*p6+c#WGf#(UQWx(eco1*+Tr+1j}sSI ziQYO&s7yIj)>N#A5bfbG2)H&LKZ$%)g7Z_EQL=w2wNJe2sie@Z3 zWjx6&9t^l{Lb|e~r&CvTNi20(kV~yo_aIXphSWf=)d;WWJ0FMKOec}DRa+i-7Q}q@zvgl#g}F~Fc zUxjW-)*Ws|I^66%7Od6o9*Yu|W&lJzwtFmCTJ=l(&pp5XstR(x`5QgBmp~cd?t*N| z%@iyvDp%)&t1AvVf)4P(Jld0zSHB>(-MhxhaZu+GJmFaW?ILt!7&H^ZCmF6C^#luU za4?zN<$K4fn=6V&JuP5%>mU_GKqdcX0Tb`ULG#B-qghsX-k!ODQ%>!k=>A!uAmphF>#G9EVO@rArQ-C5KKcI6wj3 z?QzwH+^_4Y4ZBOPF?5#PJRl9Pp|i9Woly8z5!$0uc&}58DbRv-irvl#N*=4WwJC`` zUaa)Qzsn0-!l4^Fv%PqUPB7lZi_^eUL2G8&znWY%H#elSDaEQiRJLs%T4FNYBl5!I z%J)lBoA9RHPQ#O45)RdMtsoy(zC$`n(#WjzMBBuaoSp#9OHPNfhKe^uY>`o(_T~v! z)7eSaNKLT^OjOJFO`0#-^j#UC-8$7@I0df>(i5%hi@4 z8yhhAU@`~d3{b=~TD%cw!jz(EjfmXWK+)z~m?Z_e5#Ew`x)zT57VL7}qy-W-vv|zq zAW2Cn+o0x@Z9uHExy%D?FH4pYP7w!oeL^&$B@Ij+v(c#=%Ryd24aSLz!i`E$L37|o z!W8h@ab6J#7T)AJK+kZEB@mz3$+9_hSHBhhvgc6yM-!|>({DN+uRY(xb zo7SLMS^j*mdDba5Ui7FOi4o)$gU{0P5WdZq=St?B;m)>JTD3htQ-fqYtHFj3nRPS- zcS%5KRb{@~1B=X1+7MPxJmlIx+r?RVj=!WME5$mivd(?G)-s{`5`L(aZSKBZM!aGp zhJ?mUN^LI~+yu=|Ba%HeyV352`IwnT_qvG*P%^?;TeX{ZKUA+g^uqcBBjj>|PC9)w zlHq?5uIZ9DNAM+?GFv7@sBk4#J+Gy~Iv3~(My0HzjtDRGLJoEhg#UYvu!FK-8RdMx&aruT+5 zjG{iE-y-tF2o}(9x#SDUEoT;c({g*Z9n`_S>JI8~x;FY)=FSfBN$Y*uBKCsaw0y5q zN%r5_$fF=|P)R189Z8^MUMYqZ4P?%5y=Mm=GI^8Vb_v*lJSVbvX#A^Pzj^Fg+-m(f zW;b6fwgmk{>SUe>KNiPJR1;}AsA6U^=r8AokS6Hn04^LCSfU@ZORU%2nKc-+ zX{IH+cGmEKyPsip`8$>TtkjzKj6Ju{dj^f9#U;Yg3>}m05_>gw?g|BdI0$tBu@GoZDEt+I+brxK&`i73T@ZLg z8AE7Rg|xXc2kt^;3`iAG6(xwv>@3kFK~Y*3>5JrtI+kZA){+j1`LblxR*p8z(twiN z89(rk`je9rOd?pL6bf`JjwYo$Wu zzXgZ}gM}OyytFIxrj>XVl5$^eDc7jr72@@xvK#>Tah>^WkC&~OgmY155Q?5mi=NP; z8Be|Mk0a7@;8L&wFEbTR35XSHw#zof-&thY7_UjZ;cF`^xS@e8^$6`;^l5-1YAt!q zl7KzKT0BRK+^^NH!yDe1hFtbP3OS8)@N~7Y+H-VfGMHq z?5}Vn0l7%WZyUc~&nk`qMsbuXftW`saV+7D)kf%T%{f4}`-D(CJX0ZEY*moR;OK+J z0JWRWXVVj#x|so zdpwGogvJTtFo<=VQGIxL=zfkWbXyXhO1=9*7nB4!te#qPFf79cW`(P0_Uz4L%afZl zxcQ;`8e49qE5l4QwXMG*J7?A0ez*mMhx|j0sJ`WYGkE5>?~W5%^%`fGHJEOa2Fq~A z;yoq!A6|T&za~N@X2TZ-fal0*gNfehdq{>R5XGVr9}jG^;vi^B=Ys%vIs_hMe9{6a zpzJZwk&n)DIgt@ZSdC|SgLYZ(*o|SP$-ME5o@dz`4xSUlcCZRex{=cYsFoI*v(QA$+?n+=)d!rYco`eVTe}o7XmVjLY?UC-N{H)SyuKH zjT!|IH5?fPNN6nuzMf*-A7ULqaMO|nL|GhKd;B(VbzZfg$Z6aOoQQ2#>ISYJL{h5- zKF?B}j99>Z;SyY_sZuu88n40q=-2gVWW@9gW{l}_e^PJ?#R}#gTWFihDMHm$3BTA0 zoSGc@^2hjP)2$75*;RYJ+SK89(66JJWK8q}bB!BDD(IOH71d0gQSsqYJ&7oY+e@$7 z>K5J7FG(OeRYpt>Ic{wcUjtcr#Ky*Oy61V}HMF!}JbjJ+V9R$)=)u*+wcf1~U;E)T zC%&}UF+0fxuAQ-$PJF9K_c7jC#DBfPatYmUarGh)n;nJH`s4CEq;XaPb2-JLpzTv5Dh>is-uf7%1J^1xT zi*%O5eu0U&u!j%wFg`;C1=6O{j@!z3)MeWlzno+Av~S!n%9T0PW|Z0(NC9Gi?DdNz zOQ(Z#x^a>5oej|m+iZ5LXbC2FMThfNw02_$I35fx(jXxvAyHEJADaS}OM^22DOb)2 zciVVrYfy}`r{wY>`+FSWfkXt7ao0Ro_KfBmTk9CzwKVm%EKNmx?bf{=cYD29Y&(`?n)*SE#<1`4F)J6mf00v7jTq5}Zff(J~fn`g)&Z@S7g?u~y#M z->BI%w_8yJ&tPzk)li)QciA7VD7*1bcM*Cz4y~@1QQ;x(L2W1)`z@=BJvThW0E*F9 za=!^!&{(>P*U>Q2LBpn4^W6QC>`Ye~anFkzTMp!3O8L}~)SM06`es8ieUu6^hioyO z?A+D&-^$uv5(&7}LPu##KHbYy$kCW=uXgzsf7787YS7wV+jGOw9LE#dWE({;cnM<@ zf6`iDyI73=)YxTeO>MJf`nw(R2T+|#(RVAt7dFpTeTfXysQ5<(A6g23Y2CVxJ`eZ= z5U*QjmO9+52isenqx==bjLNa`!n(EQsf7*H-IJIwwDzj*?ncMZ%j;C#n_tX{Bqye>9&@(vF@g%q?6em+19j~ zj_tr-TWmZr%7S>`8lp7a(11~N_zV@;(HN6VqGnA?R?nCSnYOjk--63UzbjqB{}tl43D z597DXIbCt)S-Krg%(IB8=0vMkSQ$_PqrbCB+oDCFtF(E~jaDZ{aVu-E*`sLB^oM-4 zxesvbpskp0ACTU)$GA)|vNWb6J!4N;8I>s+TeMB>lO%XCIWHC033rKD33tYtv=RDNq|t;$cS5tV9H`KzY#d%#Vl(S6V%(>|dM-Aoi3TbujFE%mLo zZ{(Wo2m5U%v_DiYKEy)lH1sWALuZ+_J86GA%)K%vB@7#50aI9^E^Dc*qGskTppr?s zh~8Y}%FBFPqnMd;H#%X-Ofw~q-7=NTZu!#GZkgW`G zx#YVn$%W<=S1Fh_*pjzY3cGAOscyC^PGQg`yzBNOWphnXRabJByjvlniVs%e^shoN z3fh`*G?5^w>mjU!5f&T_$^}I_AY9~DxV2u8S7CtK1&kdE574&(zU|>t?owei`ciTm zCo~vn_^kKRc>vU=Htu6ZBou7I)#OW{-$Rx(4a+)38np+(oJ;yu6AiI$cLx?r#x0pc z=!qanTbF|T2)EI2?SO|ya5)HpN5OLfZ9tm3hfPDe1=-EVZEW$qHJ&CzI<{kBEIhK= z8$m=~6lQLe)pI~MI0cCbtNa`QS`Fm`)_ zF{K_Q;wEz&?Lpcwyg{)H+Egxsl{+ZaiWNE)03d_RqIBoR4j%~D7al3_OqXaVZp^53?23}%X2zNQY!`LIb&?KMoE~H;{N1}z9M3kkO`3LV56<+D@&&ovr864b40ay=H8P;9-5!5?LkK5xVvo) zclTkzHSWHi59qjGlsn*gT0lAQH0uNutRSE?jsZ7c_h-18=tpris;Kam;cCOE+$`G6 z*@xrmVw0;au2ENbf^LNy6HFwxn2WlUh=2gPfKy>r;!EUlsToSM@KKyKPmd?~khA8Q zPR2qM^VNtFIp|F!w48^ z;`g3r9QkH#tyQpygC145Ame2Gwhz-tU$azFU_k{IDDeF^&`bu#Ai7vsJ=#STUZn72 zuWv1!v<54}fbp9(pbSXKYhiNe&iKT?0!wKvEQi!O!G^0xlvKZ+_lU{(C#v7Ow*HHG z&ps-C!v@{IByQ`^$1BU+Mi;t;{C|zVL`Fq+-Wwm>Z`&<<#ZtMgy;7}pbaqAETekM} zZri?N=dRs-S3TwGdVk}od!F|6XAJE9g=bzf_^f^VpZ$x^x%RowJ8VJOGE&t`$e*MWt;}5%Uzv0UN&BbAry479fx8Q#Lx_kE^MB?4ARpV{6@U3@o>0em;FV%QE zzZQRBt>5B){n0zEjA^suU6xy@arc8CiCl1HQvdu zcb~UYe_emAs)E_ww$KPFwH)$@PBzYxeGi?%k}b z7du$K@nLIo$h~{~E7s<(xDkEmvj)pA`~Dm(FK{Kl`%UZNdROuf{>4f@-~Bp0W&K@O zjk#Gi{-@u!Hh-xaZ{gP?Gd9|T?%jt!Y^@w{zuw}0JZ#BM}cOQ579B{oKajiVV^)Tng z{&d&M=iY1mJk=8h7ySy^gLu?$_(wudNOXk2zd!aZr8aHCC_Ny_e7)U5*2)u9cs>-9}S!uzcXJ>{q*k z`tf(zFA^fY#rP|Zu+5iUD^L8Hy(2DJv-`~t8=gtvJn`rHW+k~8RP*`KWhHU9XkmX#P58E$k5^_;%6SrpZeJL-oB&}I5a)UwC2yK5CJ+j*^VmTcb2NbdhQh%>q*@FhzmZ zbscTpA@-9gi4ve@RAYu3t%^jm$ypV^Dj1*s5l2;uYloHUJ*)dnx=2--Bafl0>{#1E zsu(@;R)&>oXRE2-mZ4g6Ag#RCzKJrGO_Zr_qKxUnYY^KCPsb+O>D)w_E|p0TnljRN zzc;!rr=^V4stq@ZnljQxoR%`uo6kb^?zJ>a`DQiM*B~T;I%TAdI4xzQckAp>``SKJ zK3Uyojy=t4$&qxMa^IzyE7}AREC_nd_*x0RWfSde-9(w5O_b@~M44@yD6@SNWp-?$ z%+5`e*|mu>yEjp$Zxdy%+C-VBY@*E7nkncb7q%X+dW$Pbztz8_S z%mw)*oBFuAw$CSXL4Ls|2=&YB65-n)f6|HYMydFzjk2j%J()!KkI;&K#fZR;Y%$$9 zw5mG`!R1OkoR;|125o%&vL!f#fBDVf@y!(a>&+A*QSjxv`=k|GBLUv@<<~yhD*2Zr z0sftt?~PK)EC0R{U@=REjW&Fr+DxHGH&f`-n<=!knL>ZFnL>|krqE|LQ)qcJg+A*F zF_jbhzvL!x$*%Bux@k*Sv96W02rC7)CBwNrI(k9XZ*qywAYPH_%>D{oXmr+Le9MIW@0b@>xjQ}+i6bG#G-tmoF|tr^bqwdQGoo8`ijHmdEpIw!SN_3VsuIlQ354&ZtGB^ zz3C85JFHWX;NqeJ_S)qTrNe6*?usvisnPx*kvYyG+TQ9AB^Kx!b6?`_S2hB$qOsw! zbl{M}K&c!AncTDj3<&wFG%>)YF&*SwBp*ZgMdi$Q4p9ZA_A4L=zo;TO*t!ehiX(Ln zQG%u97v;hSI~(5gi`r@N9mHG!k|MT=d6Hi=^`7C2B>V!us6|3^*e$S_=6K}u?!p#+ z6BvSjGyScjZtxU5(r0ef_=kTChdd*GOdr+wVLqZS^(8Mq_YR)!j6bB0Qv4TuM5p?y z*HRPimfr~K2ZO=y5jKQ`gvce?d(>mCLv2;XHISa_{F^YFsFZbxB+}?_O zGFHa9Ptu*{#Ut`D1Kofe+Qu~Z9N_RFC2%bU@wFGL)KiPEQ;SdVU)O<uo+=5%YYHHI7UE8Y=837DB9D*ZJ;Zyp81BeCmYWOZngh*}y@G z1yuSuhyh*MYH6H+%A8omC9uL$9;By0Ia~`NjZY1rcobe;F9c76u){DRMrFP#?m>q_ zK)*ket^)X*;qA}l9a`maw`>|>N<%9$4)s;X2n}d%0J6MFnu>~JLSQh=vJY>vEEIZ7 zoP>Mo4#4ST-cN^m_)mbhqJB>+>LqmsgSvw63`Wj<4NwiSmJoJ8Qis%v1dp10%TNy7 zQh9$``5MKRA-^O;OVLNR*Z^G$55jvm#ha3I-jj1!pV0FLi{LekQvG^$iM8Jo^!TL+ zx^M*q#pRDf#O4$WMU$Y5IxQ!Hx&`e?kG1*UR+g5Zw^*C+ZT)$PwRvZlghMJpO&Qrm z(3!1cWJpmEpxwFcbcaIp9zA5S6Tc!fh&qR-%Q|6FGAWRxL@N&lJv^`!l97w)D!Y08 zku8L!X0GNUFjd)VNc{6W&WM)fxYHHcjkYuhJ(0H(eS|`N)UpFI8_E)OL<3q{ibU#^ zIc~mLp;K1i2R!;(V$(Fzq7yphz4;d#il$%b6($Ws^XZWSRFz!FfSsnqH(rCwc!6a^K}{XrB90-o2*KPfDtb0U+RG4-Fzo zP=VAcgfh7HE6IVBAAv#qVmW$ed%uzwmLqZODmElD-T- zIG5DjO7TNr z+$QN}b#GOW;~fs57b7f`dyq29MskY9=VMCu34^(G8G3rn;po#0XJ78F?71!i+^vX` zS_}p+5i40*8crgZl4kY32X^-5?m>G+JZe2R%-u@~So1zUwqF+@L+vLElwo^VpKhLv z=#}FBOLoA*@=QpW(k5~KnQR3DM&vk-6lJM(GT+8S)g^0S4~aB7e0aZNCjGifaGaG+ zm(Vo38PRE4)+qp@P2#>@;8+-~*cl*BxMY0Il4=TkpsLx?;5BI|iG?H;iov4^qWn|Q z*{#IXa$Z9<9tcC}XQUl~{wBTgI#k5O~}7%JvX_ zRa7j7xCRtWzFvs3LP=2`6;G;l+2ZeS@bUMyS8EWW9Ig|h%-T(;`SnSNvJW-KCZZ5! z2akc~pfp=A7G)%9QM{`bG_TM z*bwRVEHn{?Dl1rw82M_%2 zb$udH4eioS2{s~ zTFbnv@l8%LksZp}wrjbI+;1;(Xri%tW4(H%h^-RiYW&_^}M8;eCYF#UYtucC6zESDVah`|`UA_1+ zRD(R%LZamcjX>HAaj|Buao^Eb4sl#a?I8MPLQvD{z?vi1Qk~ZPf$nf9KLlVKHSEB$ zj@05%qxd53ufXDUte~9xV7mAs^-A0Kpvl_Eg!75vVJ>+X0+XHYm{0rOaOglDX~9b! zIM9&j9c=&; z+ReQ6kUJk)r<^%knP!}EjmV-q`*R?uEg_Yd zNPNM~T~?SQg+iiZH?~ACgBOK98BUDrN)1jl21>MU!#1M09JGGyLU%HD8#M&+SP>4I zKtr_>poo<<1?}K7UkZ&5<8|wteTVDZ-Gr*NAnKaL=W3Ef?=;EmQ>nn+zh$JujH|tq zGDx`J!udul!Yj`rL6x?HhpJi{%r=jv^jp~@eUO$>LMSaH26T?P$5^j*RF4qGBirhE zod@#r0qn3TQ=?`Yqm&&Je1Fb0O0GA{WvYFrHjPO_P>hVw02sWn&NhjxJMrZE$*O|R z&UFjQ>VOKpV@=W2{zN&9qe?kMx%%pIYQ|Cy*WFT8)kYdgN5{FI+zwRrFYoojly7}fGiWw*dai&*0P}znk~;NDAhLEexVAu zG^?NvsjPB!O%8V&&o;RTZD$8QNXPpvSWd!#Y|Lt(#Td>KMfjyF>+GzXj?rNyV>O~z zpdp!eI3$5X#u3A|?V+^dA!iImng3yYrC)V&$%@$_c@<0dw02BVs~qg1Ai_^+vH(;mS$Ll^)j$swi7QP-1ZS;?Vb+uz=hek(dd!FQb)hgC%c(o($zNw?2 zR0|~6CJH~s;u5_RF`&J+LAi(eD4Au(>avV*ZkPlG(W@~G>7eHN=hM0Jxb=Lx<@@^w zBIbP$^%~LDFVk{jhMq?%G^)-73BvPW?J$_f{t>mgXIo^ z!jcEtV45h1JZ1DGH@qQA&!nZ%{C(4}W6ti&9i?e!t1+Riww-L$mLEO7Fe z&qXvd3Sab~LUXL6=Q{-CFk(V2Y4scf$x2QztR)vyAJS8`FBZ7VobLcBi|vR?@i{7z zjB^WQIUeILDLkS-bBt9aQz~fWp;oY0@sM;7z8^9jbLbb=SIZMcH8|rNJgNqdQj1GP z&{<5h#Ix4HybXV(XbIwAj@jaR2mRZ1(7&P;zHId{b;<^8V;h!blrHDdqAt=A>G+Ed zNpnTC-*P-s(nu~^cf)+|iXLS)agjvHA&88|C3gcn)K8JrDa5!{B`&-|6O?0hVUG13 z<%w0cPWE0uyyJoZEYGR8lh)gu{XNd#w&?G`nO-mJ^|D?-KY4$@-Gz~|98ap0sc}*S zC)5}}kLZ&P=eTN3QAF-X$Wl5St!xwc(*hsAr<$88akr3VpH1(FA{&9u;z#caWhykT z*G{&b>C+s5x~D}o63-Zdpu`IU{fjP{W&t2M1IZojGAs_Exk)djFX7iLom}?V=$No0@zHW1_BgIc*66)0XCDJ z*t0gV!2n213;FpRecnr?DJz6rF*KznS3etr_1I!C2;%*g@3om;BN_gzhVM9Fmf^QW zUx#3W!Libfb&iPqs%|_^Id)!ThB+2gNBmNb=k4zTe-ln!6sbMCFHsPKGDD~6GKIqU z4@JHuzM9JoB%g5QQ&9)!RibZfOT-KMOgSF4>1aAZY-f`k>-vkO;)fOGKvSnmP(H2T z1l7D!$?Fas4a68;*5z)#YaV|QzQJx>+ISvIHDHl%U!AbS(NbPS2f4Ykfq{%VnUz2U z7YY`6`n>Rj(8A|Q;XwIZ0f@hC(fe$UpUPz&5BYQ-91dmwi8_C)?NVngU$FlIw4=N+ z=!#kcvr8zD?w?9ehH)q2fK#@{ej)vmo7`-*0H?e?mR=Gz(5u*T;-FmK50YSn(8MH@ znc=2wP0!9a>5ih)2lmn^B>!8>{Xd!7N;jUu{BCP*o~*cFR%j;II87}`-0(>z zG8RiKPg1dTu^Qx>7^`6k$84Z1!72vl}$e{SF zHIKShh@W>8f9f~gjRB{ulyMsaevud?1Ke~O>3V~~1pvW-nQ8jGH7!=i@M(Tnt&f^q z6;g%~^WQh+Ypg~2a?0Cip`XiNZC~TC_pWy1M$!S9&MGB=Ugr3e#d=DT5kzzC4yweg z)>>N#mHvd~(-eV5B|5z=L|G$YUI|)Sl#ddiuyk4i6qgffLQPAda=ObFnXV;Q3W!z8 z?v)y~g_p>YX|;^Xc1-)S0 zdF=z0rDNc(!|?#|F)nE$n_eHACTk<+3k9uhp;+JHBpyvREH-q+KIt3#Ovb2hn4cJ> zzG3uW{Q3qX(h-}*gzep`VJAo(>4dcPNk_#EN%t*p%U+*=TsnYN%VqxAn1IDi5fCU^ z2>7jpfJnj?0+w0{hAYl?uSYB<^4F{KG3Ii=h|EyUc{#z-@ zJoTPpu3|SjwStuETNE?3eXArJLA#EbQx`;EQD++I=MT5F)2{5LGDym z5DP|=ff)ekEjmrgc=j4JMhw`VMp--cHZo!xAwHK=;&||*#g!*;k;7fK6?Bw*eCFsx z6rbf!^w)6FzT7q0obRfsSg--*24eeRy@4>h+cv&Ht$xnwg8)FT&_7!Xj42LAw`Fvb z(V{ICN&(=Q_DLp3xiONCB{vri-HAvlMc+rub_wCt>+QPol+O{x9z5|x`DIcnsdz`6 zdqq{_R+>kla+rtAmAH17I$)wbQc)D#6dDcYIm-?yYoywm%th~qo7wv1;;UZ4TGDSu z4_qRDt)hH>Ql{*->w-u0o223O&iGf9h=2EW!ErrGbWJ6reLooB5n&zi;-~HFBU*c@ zRN(&#X3$zvvNvd6h-CX*7WaBFrlMS448bZd}VC+U24zc1CXtlVEN#WxQDj zA>z;R5k1nEJkP~n`vT#)Pm4dqM>Kg=@_bkPo!3*oJN^P6D*tdHNd<>lnW#v|ZVJXF z{Tpi#Dqwyvcxwsin3t*tYf2-;V!9lHNWsT)7{sR;VxUzB$I~Yv*uGqkGRngSAeKxB zAXU5votPM`ZbWC2t@uD5S4Mo6m04>KUdgj<4Huu%VP@8NtZeIF02=|!D6SB0x9C0E2_fY!vg5=qjZFo8N%fUoiZ_gf zJ1|)$4FAfG_I`XM)+qM}--R#5yh`yab>*T`j0sgxO0>Y{M#+F!I+qq!W^n?g2Z_D9$LY{1%p=Y#La3Av*ZT1OZ7p z(ZV_@5&sbbi3IaF$*vWvYqAn4v7hl?tFUS&H_T*_<6KH496HWT{!$LZZYg>Vd0g?A z#5G-vc#f?mHki9@&nYuZISlfJASmYK!htB!w|AQe!H*~TOh_ULE`?)6fF7vGncWBB z14BUAeEOZ;zl1@P70DI)?rtjO-D_z;&&dTZ$iW~AOGYKFYnoG%ZGxc?1Ek3&@6SVilCw?U&B;yrP2AqPbI(MU^mJl6kV6!ieb6 z-n#@hjG~{q)t!}qV%R=e4F4||9-XGPlvhM4im3VMMdSUr#An#u)-2RIP1dQ_D)^t=|A9>oLfRz#&QUgVnr_*?q(wBL?44gsI z;o8Y)-+0=_O%zjQhp)9&00YL@kk~?o^q4UIe!yjf4$g$?gs`=^BUwa)_-XO`zrvz8 zewDlGXn*|K2VZMHpY-syA8DUb5mYVm!%f@tm(+jrMK|GFLuB4ss7ws2s!$oUXWKDH&CllhW;z70bm1{O6Mk~pj=9mfpn70kmCD(YaMVGxv@;qt~ z1k=NF7T=0wNfjF;D5%J9pUEN*OqIc47ycX62o+_AqoOL<*XS_dJEZEnf=^*9qNyaU zgOOUdY{H`NbT+zdL9`0~b`ZL&DQd8!N$i363fJ3taoAz+B#FCR&P$XW-@m)#A;cHV zq}5$*b|}!vz*2nwPw?A*`~qhatI^}z1RVNJMM{tF%xu<*Td667^ z%B$Pxtw#@WN8x@R^wvA|bwG%BF8*V}$^yu%#K*7?+HHm4<#^2w8q&o=&H6y0IYCj` zi6%Nkmw2XzX0^;SzE5=UeMU_!TT=svqaSMyiz6%SGCLZ*&GUw7-&|5!iN~$Nc*o&- zudGD$Jwe~5G8>4E-e!v7-qtgRmA&D3t-qH>Sf?k|80+v!LDYL7fy|)v5CfZs5_l<1 zw&5~x3X#h-6hA&sviKq=odKuUavb*)bRyQvTvlNLD)hl@s&BIocla=$7<)+3t8M%a zE!5&sb0w`hd??E8C(M4;i9}14YlZ?d4;tCIkV>UFC#TC6Aab6M*S5PmFDi-<_0OTj za|!-3xE*ZN=jOX{dZxoW1Uh za8<_$HEmYq_R*wjiA@rmXw|6_y}QEdYmkfW?rlKZz4I`nO#VQYYsXSv`6Nk1`I;(OV)-RkG;y^>=y^ zV3WJE7zRO*bGoEtOZ-P~VQzmw6MToC;AG;vsvrMCMK>;adOK=YVv^KD^Gfty&}x@2 zWh~!x4|cz6dG#uGaZ5%-h#$CvDPah%WomN7Ic!LdCDcUb9v4*II!9u&HZ zV^N4Iv2M@Ooe!qOiB#rlaWh82bx^>r&8l#%0K2996xL=hO{eH^o$CRtS$r<2CO`Cg zD1%&Rp@Yg`b_n}LyW>gOqI6cuaz|DMA=aLJG+`{=jC#jI-8eu&IcgjWicYT4CD*9j zGfjr*_1u7uW_8Olz=iQP*Yb&^<(1VfXW8bIv5l6OShuof?$Cn~?LiiwJkS*$hCShP z)*duVNV0HK4q;R5Y$X?jaQ}$`(SGBkA?U!IHfWmG)F_VrjwJH0Vs}8~>fO;<8|Z~Koepy410fi|doFJZ#6^R5i?6`#3abV- z7z`_NgbEk)NsL1{)bfuD;nw@-)c}M_dyuPz`2xv=@$7Zo*s-;awUQ_i=??5#kqr!7 zVI6uCkD>5!RvhW@eH#Qj*bK{V1KESY8(2^^mA4?iyg;k~gGM7FEx z_#AaKCHVtCxhu30M&~SefG#Q)hGT7K#!rk*X8bBP82khnyMVDfGybgxBVs`?&f_GC z=kd@tVMHzB_d5MA6tzL7dL>@664cA|zf{cjuVRD2WB4%?wV3UHi}lZI^?zRdpI_Dg zh4uTtVEt3QVv#mhFVp`Bp()1Y<`-45!QcTRIJ8P-`|qy0D`kxsaq-U1Vo)p=Wj9Br z6WicN&TNbR*%<3+OT!wN*n9??8}S!#PwB8}&=!c1sr&B_eOoyGH9IadYIydar7sxI%k?lqb>nT!}j0G6Hf%u7|ozpmC)+fJE=&6 zLA^I~Qqg$zd^#Yr2Z(e6b0#`iMtVEz2WwYpSqzaUC;WgqC%&Yb}rD z^X5-PN47Rok~ki@*wS!<)e8(@7}*gIw|kO|w6`#r}q4@U1 zvZR$mZg~Z!vvy?aCW2*QO;_=8Uu>q`>$m*Q^2hNtRR)7c2q4L=;*W|PXy5=Lsj-iN zqj;>69lONmc%^mh5w(7Vwwij$WpyXfEuNr`mu6Yr zX+q9Yhqh@eLED){X?h(f-|eBy>JDV0&$HB!rf4|FilLhw2wW3Zyu>&P+{PIdt!qGX z^ipFm9Mc>d2Di0m2zcY`QAbV@{BY9xqcPX7l`tKylIWEc>6HbiSDMLcL1D zQMPRqC#}oX-OF!SuSEZ9wu+OL4c1f|XkAlD|IZ}?sM&x6uJ~a`5R7YE`YV#aGYW(=BsXz-Q*}ip^c? z>P)6?GI!6mXE#+=?3^#Q=;yAgW#{g>HB|3_b${t z_xc=bSOA7Jp+xAy7+e_U~u#v=~(6)ipWjw7?mw>H~n7c|c3!gj(i4-<3jfgoW z4VuqU>B$7=WMIDR`fW%YZ*C~EgdZ{h9dXsE5M#f#=9XP)3D8kwi5CS$E4 z(i#N?@KzPkI&8YeqL6vQ08^Xn$r2#>zQ}jadQP02Y}q54U_^SH;3*~tS|Y68!HDyi!jB5GOVW%e1SQ}c57OBN>yrfx!FGq?J zjmbEv<&OzES*rVm94-(^+sf`dXq`{=I(i zBxwN*`_fuK(X;@|OkoK7>9qhc@@2IE6-u;#R~t*M)dGr63$RQRgkxQa7Jv=tXVU`2 zA_%vXep<;LoEvHZ1H`Up7IaD5)eAaj`nZ74ac4W+DYvKAs1qn@`dpSh_rxlD?#cD- zxhKt@i&IhDNn_$ppcHqWRx@X)W;4pFpSHhR6Fz8b*O}r564av5vsO49jBPQARx@i? z*=^dSX#(_!2Yy(+akXv0XrjR?|GiIzb~kBWvQnReb7bEafJOUVIv^ zs1hXmHPO=!zOL=wi_iOw*Ev1Az73QHS~Ulp!o{o#ScGe2p%s}qxon@DWM&}(e_5Hf zVE9~v)CKc`*mAsNFR7PV`j=WfQ!0iE=zLY#9g#t0N*0(l1{Nq{V1aUC6(}dy2jye} z%Enuw|NqLwk)K1ED7vCd*U;3vu>toIaF8&hv{ zdYMTv)iNnmY%qA&+DTzh*`%ORO@?WKFhhevz_c|?C@|mE_XkW2&lENg1hz5EieYoG zonxP=Gm--e9j2G~j6KMB{n-YeN0@o%{Yr(todURmv5F{)ax)gq&Z$^jf9TIf>lhp^ zAg9n2Zsq`VIMn>7L*p)&IA2!f5{HDuEZvOZ4H9DG(@B_oT)L?vj6eP`Csq`#R=tUh zwn@kfKy7fv<>=FRz4zrVvOTt=j7}{=i<`G<$h!Dr?i&|LF4#AMfd>OqqF6exm>7G3 z(LcehO(b?TYcWNFQCMdgK*~TkTGd(PaVRL;J>;**w|Rumc+$6Yqrus@Vi0 zl7)3bfsX2vrY6~_&*(c1POE`g8jy`;ng;&D9AuT z&I6!L2eS#NPtMWGyMk2BiPRnMv}?Vdqthv@`myrTBwFEVKrkmtf?PPgHE$Yu+58G=uRqS=X<@Gte z7PM0xib+3Sa3T3huUsUoG>4o~#`4P@N+TAwfLTf*u`pKAB}?MCft@C^CM!O%8Qyw#-i#F`8E#c&4^q`v$Bqr#VC&YdKHyXdMwam^A~Kpx|`G zwLpOe#*wo$qq$`&C*wYq27?>)Ui-9LT(Kd$zd^b1FySH9P~?3-(Wim`HVTu5*vxzB zn=~5TT}E%?wsQ)hJB8$&&56`vU`0zosTg&Fb_nhQ_&Zijq@-7YemhCQ;J!8Kbp@V< zABOS$%2$w2bKNWLw6F>Wb;3kk#0>{_r^ACecb0~O2F66h)xsDY0?H9K%(t~Ux0LM9 z&J`geVWpHaS{#B#6?ln@19YH*BLw0)&u&}}u$sXV+7XEzL`(k6Tiruy$-=f91H?V? z=mPnpI^rMwkC7;PQ+jsIKPO!+ul4+YZ-$HKnH{)B6zFZq)*?QbEi6I=MjGykB0wQv zX_8@;Y-?#YMn>3@jWI6-Gx`2C42v!sBZyP`b|%gi3MLYfu!BIjfPKOD0n$?Ff1^Ia@YV!0TxQDzrDODZ8Tr zfv@3%@zkk_5m24cr~1dvu!m{%z;+RaGXx!h`=O1$-Uwh@ zr93M}&k^Tv$b!NR>%zY8XkenIHnseza4{Fqvan0bAsr zMd8Ywa?9)dIJgpZ4uYLL=fxbJ@^(YZwLC$xyjFB6I=UQel$%l9X*aNt!s;zXx~vdx zdLo0OMR2NlS9Rxk-+e#p{iwQClB}{N+xJ}; zrUX(;3C2>41G>UvJB`(jrkpK`Sty?>^_=cVGRmWE%&1tmV4*-Fxmi z`|PvN{&n`*hfLQZhDtA9+x8ABUnB#d%(U9mqdc_2^wvo*U2Pq^F3T`?sG4ualBCfw z7>b$Y0{g!_+o*_CMHONKY;e8K?Rh5QnHE9B1UrX;Xr4VjfXK+bQ`> z$L#QHD;JwrcDWUBHi-b`7FGg3Q~@po#T~7I|=IzEN-psaPGIRSHuEzJo^2Cr zN?|ur+$)F_8V`qyev8e-=>cnOFZ-=`9rzG(h~xpK=#OpV`k_S)3QY&tA4JN!D*MIM zv7X8b52}^HgMabSc3gWaJ}Y;`jKhP;#hMlzY3I38(lY2<0$a{=Reyx1gea>4^3kRG z{xBfcqc3R@!te|!lW@qkh@Rl84NGhZsA}=>sIE-8W4aFH=Q$`bk*2Tk$;JrVUmpF5 zDOgSBgnW-Rh&JFKFB!3g?P?Lj8a%IxY4Dt`4DlIVY4EJBg$6GyXpjU((FtqNwjCP~ z3{sB3Cf3z8IgG4eK5vCs>yiI9GT)f z^L8N`@o?681@`OtPV6q&}cUXS6YIny{6BEMvHV`{NmZ2cXU z*#_=NPv4fCS}=&inEo*D7Ql*glx#j`*}^ZJ@`POB8*W69s;Vn=mUlw_i2;$ z5&LSIEB^4R@^fG-D+3SILUFf&T1*&bz)2k43Jjqfn{i{E!*9d zra~LkrCN6*{r9Ln#$;As$>R%e7_=sEPTtL&+{(Q_DCynr$BJ$$9b zp@{4u(T|)a)CZxJBI(3lF|fT%&_lZ6!2zxUeGjmXie` z3b95++xy%deIGzAcX|UBrqZx;ayHHd2dRXKFIwcm%#rk{piVbvb>3rwtx*N%pXS{- zY4fxjb~%Ke2iyj=;CI8|_0^DiPMZ}Qa3(|QaYK&`;?QGs z2Pvm?ANr*~=T1DMVrOX>^Orj|Z+SeOqgL<~GeI1#jC$P~nXpEOay-d?NC%d5!WiLu zE7sIi!+TRTNR=1gM7o&ls(}|>?fR4#9*Lj=J-(Yg;#Im2BAkcdRkpM73K) zkCuIh8T%h0i;bmQi<}-72?}(KaYxyghI2x)%#9u8wbwHNCgO{^P&1^suDsQQIg)`X zF6*=SII@siGyS26^oB6RqKc^9Jp9kSS1dPkx&DuZ~X z#-}O^x3eMmH$3W?YLI`Ko&d_?hxZphMC|j~gncHBMot@0LH+M6%0K)h9p&to5ek~p zop3JfP~A>*>v;>k-Jy0VG{sdVwsYa_BRt(APZISDGY_!esgt5`MgPa@-U^q@tsR*7 z&&qldJ`VYUO-W2Q9)k z&8gn%@lA8+8{aHDzFBsBv)tpGXg+<3M_^aRC8cvYvkHMwUMvuDE@}(}_xM>20qp4v z*KK6i#;;~Wg+H!iXGSvV8G85zg!4QDK(6G{QoQ0kBw$=Fy=di4zem zsH47c;-pDryi&%UE|thUlsEjNr}2k+vM#~JaZ76w9lB^3{P`L zI~I?yrFXbHA|(fcw|Wd3Cn{z`G2s+f5(M4P<`0BLJ`Zco893F$z!CJ?07aL2f<_oe zGB0f9kZa}060JPf*NTA5#6;H0kwvZ0gmuBM4l554bJAB%szWUSgC0_3j2L>%zon|# z%0y;LkvP!Lv9feLG|`Ck;OoKYxN=dVM^NCEQ+A>cFFEz(EXbREfvNY(t2qED{Ji4o z4Hj6xGJ~;%qT*E)hBS8i#Iv}HglGM%>0 z0)ebLI&~`S4m%>1Q$mz0wyV{SWJo#*&V{SxPgI4tDqM}eF`9bu}G3w?UcM=j2oyreF z?c^kJft}fbQK+M?BnC8HCN{pDtV)%vyQwrbd~$N}C)m7HPGJn`t1Mt6+*vWDDM+R(3C?XT9>K?p z*zEy}=b8v@`3Lsqo<)Ff2+%%_){q?FNN{8)+XkJ5v ze~8NcH|8I*@iM8X?FBSwE*cJ3Vsw-rzSjg%;S7Y(!&xm=Ng0MmL}Mp7X;5Zd4Q`x! z_}c4QTDDJ=wIfFvK+N_UAa}=t(nu`9Xow_rNAJVlM~C}9 z5(e;^`i?Q_b7Tz1g^31_AZM{{xS;<-X8kIbTzJC+fB~53OnV^@FLQxpoJ^XwjPE9s z@R+Q?g@aWQb0u0F^jSrgwv6vN2x-yxIuc1D3s7g6A$#?e0?7ySgOGB$awU4DK3`I= zENYcKoih5!PrEq#$WKU|X>@Jb!}CvRTpNM|I^PN%i2-uM)QL{mygI_Vfz2zQP>t7f zR)|o>lRqF?I7CM5{naF53+_0$U0IVMh@*?X5*^Gz2gfv!%GM(R$C5*;>oKmXEFy^r zUz50JL7SGi2QZK=iy%COzsw*si_fOfCF&c58x3)|9n(NCo~UjvUoawK5s9l*-m|(w z(P#Ahv&Y^arQiSZ&d9!+*DK7Qb9TLQ7t>2Mf6m~lY(yVjl4xIH^XH=r3Gt`I{4qk^ z0dJE+<7n+_PhOl~wt(M=ymnwya{d+uztM>H8x2Ok(Lx}b{x9dW<9(5M-;n2h9F|ug zHfcv2f?XA)538xrf;#})*ckb|$a6X)7-! z&lOTP)PR89i#Lr$O{ScI1xMv$&4mNL5%0-^8474#&_$Bjm;LNuRvs55cma;7Q+#(+S3a84mCmx!S70Kb3_2ngsme%EcvRgfJFsf68g$Yz7Fk?QKe6Y&`C}Rt zIl63iz`kMBwe(caS$UkZkew}KOLgpCJN#ufAPl)nBaY%x%Sm7iC#&gcV| zxysZRKYK6LinF)pNCpWI`MiQtY`R^foQ@xGU?aGtuG#GK?%6li>_6>e(WbP6+gCkDk>?oTia+B ze00hZ4SN?F|0R5MW+Q1Yb}7C}NqL!E$Ld9rG+PilI-1kb9G=JH%*73X7Ka!~t#FI9 zPB4tAG6xJ2GGh)fZC-aG3oFvUK-C(3Jo+ORe>RrbvtE#0vJPL*j&=J~`_jE9U~D4# z_*E*-7V`zg)3d!|$L*HyqVF_S`rM+@n!IO${=5Js;~Ia@^A}fL^o%udami0E@X5SB zxkO=gM@rH>?VQ9tu(3oUwFOX(la{R;!OQrkIXJ~J^mYhoISh0d5+dnfzQ#zlHSY_j zRPIWSxuG8`ag%I8QHRs z>|I;+mFn_hEXR*)?4ZoWtGz@dA&Hjvf(|44u&gPS;M&h3?F(zYa%UKIs;e(&{GyL% z{1CBdYrTry&-kHV>$Twf!dkCIPT878#jRN^6@1TJRq#C`dL?)}O6C{@4>CBNQw)HU zPRfnblTM_|G3j*JW){*)S6&~`6}Glt!92vuP-;I!zzUuVRA8V8mT;?gX1akCHpbo=zZ~Q~(~40385~?dUL+-BQ@qXT|2@ymd?T?ldwq;P%&QPdMikI?Qv#Hj#~}8S z3&OW2s6cl54)_;6o~d!ljxQX z8Ob7G#nl@G3k+%xDvO^rn?uH@Ic`M}MN7ra639jFO^xv?BtiyFE&aVc&yY$Tu+EDQ zuJqq(VjuuOQ`^wQ$O95iNVX1Uj?1Y5a0O&-&Nc?C9%c5SGB!HN(Uh>PWqzvxJDiHx zx6HI+U%)^Y@r5-wK!!xPF<+vEnwtCx9rA|Y#3MD3MCnw+`R`}Rmnf))wIiYpzsXi` zx^3_E4-rE?$(WMP8CjEv5rVebNymr?BuymPCAFfWOo85D%Dl^GiOxgdbH3P<&IDwF z9N_~^l;^k-pgDltwlX1SU^SK7UWx&MrRV97JpAnWuoXIXBf}@tOW)IKTmy5iz2>3x zsj2M`F&-+Sy})?C?Jv8T{VD+afnCBpgUj5M+Ujcro)MWghVq0dml0w{c2gQY$c~M@ zpl`kjj@@{4v};u4*Y3p;wlx~GkdwNWCTwl&kT|(%J?RWhsfvMkI4wn-Kmqdhw2C3~ z)Zp7-!1z#<=uTgD2)4L-k0ubNos8}utrZswZnq%=(X;9Vn~C{5+v+#fIdXvZ_p`m$ z)t!YC6c-s>OcveW+sc+}4){(eRM5Fn*`ZvB`WKox5LjC-Xn=hj4CfJ^*{}UVttDGmael#bEf*nho)-)T#5SD1!RD<7#4R{nSfal3)Q2c!CDw*8J%YuE znTO4aZF>8f4S}|E!Z-73tBb0gU!vNJEy9pm!0^=|`pB>k1#uCfHe$T9_QdX%D z+QOWy4zCi6&2TRk6?9rMy-5vw7S#Y96P*P&$!A8Uy2|;AiH_in_O4#MjTV>zMAA2Kv3%l+YMKO-#v4d5 zsbI1499K&$*|BV_5ZHi=2|$|}NUkn~-do>tzNr=eGpgh2H-@KG2Nu*h{a(mt-17ff z6*+Ir?}+^+Q>EuKWPmL28G{paooCw#IUZq76z|zeQ+-Uy6=t~;NH>?Kn{GCPuB$Wu z!rhV&2ITQ3l_`;^Ur&(JAw6L{&m1Guc1!+1vu5ahlgPbZj%+u}En$=wM+&VHz`4!2Ht zfI4XOur*!a>_;q{Fl>SskmHh^eQt^Ro})g`WtZdXNAoHdbM<3ORC$#BfB?HJSBLw# z3FEnBa~UoP|3K|3aLHVY)D%C{R>bp8pId-q6KxB=3wL%VBY(%G`EXq><3N~}*}bd7 zE5SeM1ClWM`Uo6~t=4@hZD`rtPnM5>lT8msr<6c|J@wl5te19%Q*lNVo$T`x`_+*+~wYpXhQGq2B1xbwr$2> z>$btUhO7rm+6?DIQ)~RpH#py4v_U`?%hB(!-9Oq@qrt8!#OVJ-TGouNCRo7+#9rz0c1I?oMeAK`J8TjAH#JpxFum?w2*Fl*{kvMX1fNtuc=MLvpaq6w?dwXA++JOwj#*vt z4?Kwbn%ndcz4luP=mB92_!Mksvw~)?W+y>+1EsVJ+4WMs|0Am7%|ycc>uFWQYXTwj zICbJziC{+|vUj%xf9bD(<*(n?^_(?6mhXH&X91Dt_3Av`9&iBsE5@5$`U>FFlfRRD zAl{&il*4z(-CvDYmWIMADn%wj5WKfC<-J5wgdzoz49_FF# z+B>$L3YeXIz_uGrIetJiERCzaY2?bEYu_H5uGW>0ZBMrK_8Vjk@286N!uPG={d~$a zcH8#mAEjZL+5#}ywvTSO{RTEO-L5^+(s2SsPeW)aLCewqtD7lLjL~OG5UHh3ry3~i z!lTAa&^?c4Bw*X!N3MN!%b$6)7g-3if2RRJVT1| z$7sv0z8dqHRFse;8#C0|%Zw5q7e@(<*%T8|wW4nzW?$Yo4P4PU0fdbcmO8E1Xm{Z_ zfoswB3l5YWSMgE?%9MYm-W3fLQ;G?saWUVnpI7?rClhm`k`G3uZ!jwPa8>dlbRJ_3 zp+s;)aj@czmtz-ZM%miy6B?u9KfZ>u8ebIy4JQXzl>=6Bf`GsLSFH``UI+hFwR7xh zzjJJsse2`CRzqrBR!Tu(@_T;t0$i5zvur^=<5_?hz)Rw`K%Q~i+G1`iq2I?Fu8@ls zc&G!E{at64;-p(AnVDVVq+8&mN|C?^=ze~FDn){0r8UP&352xtOWCws3pX)V%2s^0 z!AsnQrJ6yvL*LOtO|-z7n3pF8lO4D{rFx_##a#9cEDy8>=^Z@&$BK3435BD zaYi-Hi~|`3ruS3f1(62k8-c)Pdz{l1BkN%z^|-ZqF6w3;4^PQ#1Y(;Kbbg*mwZqb2 z;Qs<_lK!${8nh_ZMC7-4x(p-pBGNQ-{EP6DDYzakK9q3j`MRHHZoq1oUnMlP9qdb! z3DG2`MjmPD&SZR$k{`B$R!e9A5uLaz`ZsO6;k0dPYP8K#=+!eQQMP2)ze7iu}qD(Zrik-WuCl zhC}@nIUO`wQvVRm${eAc-8*%~Qk>S+jn)osZL}=0v%(E8eW)E_my?2G#L*jwIa-JrT|GIA=ZJ~z)(vr* z1@sZU!!yTx*yrKVx2NJF<~lEkKZ~X_nVR>9rLc$ld!u=POefloJgPP&i84@PQUNq8 zAeoDYbOm?aG)y$7{`Ml=?Uh+T*nj z_ewz$#5X$a;YPi%2S#jCv&cPiOK?iY7Hvp%PQT29Bf6Qz_A+F#SS%#5%nFS|?9sQ4 z%=SMjfJ!rQ z@TY5)+|tqvkT;FOwO8qsjgH}O;dxha;gU!3Kbi9g0 zR5S!Fa7;&nyH>1&dZ1?aDoH-?E~f^QQd&wc5@I>U!9=V%Ol%$PiqvCcnw$n%M5FO_ z>yFnlh=+CPla2t*4GC{#vv#sBK*hKMm>hdXj8hgH3{EfinyfIgifjCjFl^!^Tx@nm z=Z$2ZbDGoTJ8cbUC@po<&rr#nRV9P3Q0w1Xtw&!%t#g&`&#Lz0BzTN}Ci*Y6_F6(W!~s&A}!my*|*ZByy@>?s}Ep}&E3Q&dGENHHsNLbg5{*H;M z1D2RW{we1ix~Dq!Lua)^pFGp-fMO!Y0d;14qWm|qwy4ejGMdUtM13Xvuf7tb+BNAT z@k_Xj`(-q1E)I?NM<~JZ+D5 z*E#*uBcc|s`vvj1ZBtK8&&f zB(+J&TKfR;+i@N5RE2}7OQB9fZ-Ly#FL1)7aM`_8m&xvRUg=$(PF2+2YIsBZnzwNM zQSg?B2z&{;BmCd+#n#1WSZ^`jTRWKC2I~Nv0f8fw2%P3Y*k%084kGTvWw)U^3bgu~ z$wJYl6&*U5yqTiOHVPkkbjgB4R&bENOTC_;w|VvSw;f-{q+9zy(h-h49+KgVI!f%s zZBC+vqko!(E4M8g8OUPGE6Y=p++J!1%+qE(M9O~VgG!K?-^!?@UMm!c1=oie(KTF> zHStxyz)n^kJ)}qRRg>K{QsALa`!XQ_8@GLY-Gqk({OR2w_3>`e8^|14pFs_w_rbdq zq;mHNQr`z5OI^+2#hamAZ-<=ZIN~lU-;Vw(M<8pIZnFVn0t$yC)?I;tQDYMhh)3c< z;cf^tA@tep+(RLYr=ZXwvZ+^D%N&9hk61=k1J9Ln9e3pooq(64L!M}$FQRJs7BAnH zhebFR`~`%NV-ZS?N4)3im=P<~7{3`z%spxdn{${t)%=KthHcfSI4WPzZf?EQ!@I?$&E68R>mteMXu%LwZD+m*2H-IVKr|_#@C?n z)#C97(UF60lm7?AXZU|D|F1Hgd0pH{>WBj#tkk4i6C(r~;x-sk#u;u9XW$7Orsg<| z@kZ@l@jQ{o)L4Rxl_18!wDi_BXf|9Y(QNA7j@cI+ogmvzsVzVXGFEDrY;?u~lYTT) zO_)P^Bb6mnzS8~fNgmlMHA;HG9P2_pz*FNJbS?qW zSpo}K#oB7-igED?Bk_&KcHL^ms&u6QlinsV0D;MA4dqE$s*A20j;_NtRCp~3a{-_p zuVg$(^c!D0kzDWTeuf1V^xT!*0k$%Ord8m2MI5tWp;6$$3MwE6z~o!44w^vAwA^VI z?Rqe8>5_f}(EOvCY|5uf#X_h3B2G2FR{o`|3AI|I(8^@EJE&r#>3bxBG}Z7E=t>RxtUFq9Bh2S$*D}FGP_Q>VLC8pM$dY0VOdy}v z65bg%=EnQ_$NbcyF+Z_r%w1|dt)OY3&$)q?ADf!b>7s%Dt_`$CqoZhy;rK?E_1X0G zY)r}ZqA|rZ(wG9B#?*$BVXYtBv6(R)&c>91j#o}Hs;>Wy_*&d5bohE3QAEC{DCaE! zW8%mf2fiDos2`4)PZ$aY0%e#ag#o3^oOK5PJC>>aV8(oBJj z@o?v3lLYNOM$>s)D|*xw)0G}|(OIJ|`o1J}ff}gGL2h8VgtJ1zZ~+P3>&2-CZM+9r z5e90%3OH!^{ghhi=jR7x&{CX4b)oF6A0fu6!77=-Q!zBHV6oYzAQ(^@n3!ZB$T znZlIbDEvHU3iCGPXqnj)P0n4ilbD~A#vgX;my1eg#R_#yKstiirTSRZpL8_i#4)rm zsjrz^No_Lvdy=rpiGXS)Zc%TGLG!-zHl}WWPk3w$#X7^`L+CZ4t0MrA9#T`0llV!a z*s1+`qb1&~3+b$T(%-4))QyIf!T$3)mvzRku^%aP22KZWo0RT4$#RAD0X20}E4}?4 zS#4zT%gO7cmnJ`t*tq_Vhq`YRMX_A|CS5N*n%p?i9j7f*&tRCZqnpVJn08le#4^?I ze5_0O3;OkA-Ph~7HENZx0LOR{(5Wu+k(QM1r7!2W^t)4Nx)lz-OE) zJEzsp2-{vlZKycZNAzZ>IMj#SlfqCB*%0F>qQ1#wQ08qsA^^}4lKLIQ`v4b%2GKTt zIqPGBy4`^z*A#{iEK)8}X2Ga{pl{|?G95@uOZ}}TcX2*S_p%Ku z*bW20ZMf?=?e&bRzm5Wn6oOOMa1nPW+>-+Cyl@~*o?sKw24PB|tBy|Ehpu?g3C;{* zAZwCu1|@wnml|*w`Z5E7YQV(6+Z^8T=*Yol7;-p6JIJ5364VMw7U9g6!$-i`#gQ&C z=#kgpk=T$C=jy%palhMl@C)i(ne@o5aOI8 zzH-oi##8snP5CD_5gIlJyfu{EkZbEVP&;HnE#mTOSk@5=y{UZI_Fl0UmKiuJn5>Fd zyb~>Z5X09fm!R#pN{$=KZ-dY>Z8r9(uV)w{@T?Zc`n49ybco83{bqZ`ui^Zw;z-5C zk&5HnG+=AvQPnZrw|Wig6`RHm9M)_M^=3EBy&K$Ji{AG(Acezgu7MA3uZ{lEdVZ~W zh|KP}fs)=N;w!k0w3vZ|-EG3q3UD@HtTkTomNjC1t9Y_vvU@Gu6clv{jn@S4u8o`G zuI6iEE=o5LQ!=JV+oQs=Cs^88bvrH)SsvV?*JzITGRbS1KywmpVmaMciwWA99(DT2 zjP1P5un+z;c#hq9;pnPg<2|Yo47J8d&;KaLjpHxtHBD}d*T=8*SYMxyRZQ8}>emf^ ztk!2!fnWl?Vyrae8j1DU=$Omvwb?JddaWC)8{*i;yKk%p55{ZFIOuKz&ue8h=L2M_ zRQzgIb|v-KviU$)f>TdP^s~k+gjCZr?lL27rXBs$h%i{yXBYiv7yM`E+-K)p`3nx6 z-*ZNo%AfS*Px$i3T=`?J{0Vn)j*NL68N>0bC!MYj&?LmeSpFyF%0jfZ+n-HCgS_T}o zSXl>NuWe{PUNu3b*S=*9pHWouMDq@|U`%>A6Mc;HHaY5`j#bX|qE~3%JG3SQ!<^EU zp`CX)pLfWfl5lf)YDe=nQc}(6kbS({f4s|oJiXoMV%mMR%UyE1VBtkA?)4wd`j2MZ zM>DSUtS;^7dm~CEy>5*l_^Z@8Y* z)p-uR+rWI7VBShn$N9`3DgQyoctkddSra#?*O(Mmr}uGFVBVV7 z3I(Y399t$tT822II!J(TD3{S5)5y7O&bdoQ=1%sS<}M zHg&-H)|IQy*2t&{)hj^rXh+{iHG(k_8sGa7aQ;R9rc``W@^54bE>{(&WA(l!<9hK& zp^;MYO;Bv)3Yy10Ei|7&Wj(2oKsg7#uc~tYn?mmwfZSKXav=BhtPe>UF2!h;gR-y1 zYATi0kHqpISNe+;;P2~m*k7!G|585!3Ox&pZx(c>P(j`^cu=8d%TrPxV70WqOgH;l z?CU0b8+zaLjq?Je6`(Ble^VIEKGgIPV7Z92zM<`-nrztI>>~?+6j>}dFKhiOlTz-# z@G6rs>K9A+UjW)Bqgt`UlFkR0wUU%va%Pdl~$wW$=a^%MexQxEkf9)~6O@MeTK^ z>n^eEMs>W6e?}@eT8J$z+7ehgI;j^w>lRM6O#g=H2Xg)qA8@ik{!EFj^oY6R*}Ct$ zgj2+xut8s_)S|lJNI8);+257zz*nmcW zf@C!l*MMQvj{X}RO7mgvLRM(T_&DTB9P`;RR!BEF_twdsl_rZmx=W zBAQxpDs$z15JOn>#-hQj|!c_ ztDV^6k`asIdU%!p>}ZbH(`DPxdwDvvAVVEcQ^0E;+IY(2^>U$&=0+e^Nqd<#u(L@a zAsJzF7^odXS9(i6OVR_xF#p7ge?m});S-!*Z67j;GQT+kKf%(fRhx67TIB^qFl6avzM4slUh zqzr%A>+-6&%Z)O2`@x8wvPn0yVA9pK#Y$ho;VfFnkV%T-0_-%+kzCdTZ0Pa@@wj=L z)EDfpVrs6A{nrTmW5XO?{0Gbxfg9|0i8z88FjKafR-l|b*4qlcXKjgH5 zVq|NJU=c}8rYed-_Cvf3xyOkF#t%7jz~(I{6J-3*j?C1&)zOye9)?2v5P!V{1f!-J zTyW+z>Ckr=?q(hCd~7QL60AK%U)T67vm4HESnHp|d0_Y+odgX$S{Nw?h8N>pTYYh^ z;*G{12ji|hPebWS-Ye$|+>6+GDoQv!HjL{?LP>w#B3ZX(^6>Uf;~Id~nT4VfX8U`s zUmPsJxlzJHK6kF5b6Msk8x5qLaKA*5x)%QPDAh$l>Pn)xRFJy(1@*zMu(d5nU6iP` zFu1g+PD96Or4|as2xu{hvp~(VKBOyL`hc!Z`q??8`b6i3rq3-H?Ugpi+-UF5M|&o7!s0pCz|i>U2ohk+xbkah3J&vb=(kv8_<1+zPpaFz{~=5vOYVluwAP zk^h|aVX!K)!`wGMYFc|Gqfdgs4Z$8Z6N=#f0LmvCHrqv+cZFKC$PF_tvv8_urEmei z0`8)aQFc_nEdatFm>ut1;Ra*o-H8uM{-12&T4%2cvcle1+k|QO^6Sc3h%y^e?Qqg{ zY$CS1=C~LEfK&P{(L$mJe0Yt$jq)?(vVxbIccV_M7 zH^-I`T+Ky0F0X*fGE>JOKH#_Z+7C7~LmLaW_L`Ng^b;bj=|`0n!ob9tsXO!M`i!Ed%_$)t>=du6wQM$-k(^wYTk6c9b* zvRL<}x6uC>_6Pca_x>>UDAHzX6kXv$hjm3(AJUZ!69@RqA@tHPYuu4qMA|qW?8Bep)PdjY^bVD~ z5(=(6HQo|~*<#NHVlj7odM5MXp}O;6Cl5Ryz#iejSyF1t55zpf9O1!v9^@s?^5DWB zTZzPS9xx|(G|wZ-Xv03AsqZJ$CuZ?U-?u4g^YrzS^_jR1)KE#dT`Uv0uhK)@h8%>L zx2_{YLJ*QY5T~BDdd9tN%RY?k;a0NfyslH0e7Kr&HUz)oiFiyI`;fVWc5= zY>Y;B@zj|u((`A{{zLapLbEvkm3D9p-7fC^?Napq6)zr8OfN^kfHqTy9A$QvrMgzk zE_qcN#NA1Ab#4d_5(8#QPt@GYMa#B1QNOfKsl&1;CH2gPF|VA*Ir-Iu^YBV>ZUuwb z1Yt!{6b?tuSvzYurWLkOFoa}pEE$%!5P2LOh5|Ugrfw&Qyu1-NEdnmcxI9~z_wr>` zx9zhTrml|uNeO5a)s*L*6DgunlBgrw(i3TkSU(qJt0@_VE77kHCVAG~mj#$RK;7(y zyKLt9YuQ!Fq{XaH5Qh!DnHe~g5uz0ey;+<@Ez8p$n0g$npx0JK60dzT`wKo+V zli-mvDNSZ*8p^2Aq?G%|Wd&j~T3(}UYHHfz7n5UwJNb{K$|Cs^&?E<*{^HE5K!Lwv zw)@V;aunEHXt=hVH_1<7qdFT6@_83-we7Jx$i`i@KfiSaeJll!>2RB-J~|-a;Td8o1s)I4@`DOz0jUG}4b)I;OR5 znj(9>|iQ-(44 zt1p4$I^Ri;>z*^NTT+}Yl)@&}v7q(CFswWoK5;{kZkEnd!ICug0q}Slf{BKL698d22xKx=)`e>ceXGa@yp;=L zcM~u`ISv#7<-i;FKtUY>O8po|0(04f01hElQ~|0&-ka-Qrb@`4TvLL^M39WhfL+{9 z&O8&?T3+A;P-qR5x-vGkNeN9{7HcqvsBnio64_bdENGBjYyHvnVcBKkJ2RN&282vb zN^e3voGu%QmAMgy+)s(iALl>$L^+i$y;(jQ8b1-He^8siSJUN0JyYnAV>%nMydgNQ zMZ>|Nko|b&dgI#bu4k)Gx2us7CD2Jwrd+YQYfRX)_N$Te8%w)tNlCKzg$oAySjQGB zxAEr*NmJ_CTLPW7KpOI(eIqToB8HRbM7>K{_9fO3?Wx7e=O2{Pc^w^Fe%Ha?m#!** zsBGxoylSaBm#JlpzQCGDUc;V9o$hyVds4gmg}bG19E_$@meVt6pXKzlbkOM8j*xSq z)=MZDwe`V<;2fwnZN+yuhX?@95QP1NdPPzHx-4xRofuQuRM>L5>XzU+CFp+@q5Hb> zd?!~MZsMc#XbvV*PUj6CRMqIq0Lno<^+ZaS={5#Upd{QH1A09Nl?Uw``rAF`>*s_K zYj-D!($wx5|NJyp-!2>xqGC3c(__{;r!Dw)cQ=adS`)o?cVS@!ZDg9ONT$hYaF?Fx z7&7@AMx5bbm_E}MLjtbhw{4nD5yIZ{MJ>bC^@wRbCQ z-R;f?sR!XZw(A1=?fhX&W7MS9cIol9hfh4pya<$d*_9m^VWrDa2#vz zi?zvL*D+C^r6?$;zdJawv1GFr^lb=c1_g(qGWd`(nA?52XK-RX@OXm~O3(6YhF8po z!K7|R#yg*8#@6?BIc)_6cd%P>_;lT-*#sfzNq%aOPRD9)o~1b>bZVm0kT6U%jJ{S* z-}GPtO2BX_ebeo%8J+n2Oe8NM4`&9qp=BO?2Ig(uYIVa5K?Zz%TOd=hu_mrccBg$e`p53nAR~c$(5P`Y0^v|2J*8yiVe~$7?$O#|n*oZ{ z2&0U|4mV)=j>PP&HE{Fk298E%c#j48`ED~%S;8YxA|=E&jRzbNS<|WKbfr_oOBg|( z@XwEP^>cAfk95ibj#q<+7!yAirw592(VFPZ#T^PrcSGxYWrKA~fWpOSScR@@f3RAV zz`+N3R`bYVpT6zlDMNfq@KJp=TlmT`#X|$VLre8lNS-&*%YKowL<#gVs~r|R`*pxg zQA46>dx=?6-}a>F1C`^+_im4C{3`zpEj76-09H{at?z#1AUGhnvQ=?H;cGiz{hqpJs)GDcn|?jAU?gl2W0lB1+gqS{30%v z7i#J7ijk2Z7||d1--==BeZH?jdI4he=PyW5r{W^+ zcl+8xu|uEB9`h;1j^w}FYYWBZ^2e^&vHW*?O`+I{{IM%`D*xTSrcmrm{@4{em;Y{G zE$)mil(wR$26!u~x1b{|AGDYB0!6bq)`zy@m+v1y669}72hmB(PfM7N=0Lbcf@s;I zs}~f-C$e0vS1l+SE?;!jf}-W+i>_Qyw6c8B6$^@1moG{rlqV5~f!gv#*#O#Cw7z`N zXhG4&@**R+Sx8LA^<8Xa?_k85wwZJfsb&GwMm>8KoSKrbo+)m zl%kyOrr`>mxTGvaIrdFgCycG^52%oZ5&em&H8X7%=Mz3rY!X}lCeo^G4WYm!U5G9%cPq_Dl3t;dB?+^eRh#mvLkD>cC0g zq%dQ}p+PoQ2Q>7I)s(J0zr=GtRvZ>oP7fKOa!e5S7dR})jTNWZ6voP$=;<7Xv{dMP z#!M=kHrHe@EYbf_VQCRb%21V$SNqKaZlz5$yiF*=;|Z>q#?66*izsOvE8*tAieh9o zZZ45eKfOv|69nsYW_9t*pqwfHV_15!BwtD~%|8Z*%xF`uSi}sY?}7jl@+Xzz!5~JW z6%O7IfO0AGiRJ45ZZ&hz>>^sXiqn8aB?LcNB5w}aQYnCGBFjHimEWS~GA^;uRI#v& z)Y8S&)2nPFZCnO3F7E(XxKs5xkI&tBSx(_VG&f?NR{UPK+CCoM12!!U8F3CW?5A;O z@PHifH;0ed?X(X@VkOF|As=k?Mfrcq;a06kOqY^@o&zXv_daS-s(qHH_&eaUia)GH zcE-z{L@=S0#D=u+9}T**q9P%xM>_%2BC)*z${)0*Q>2?03un- zzG%6P9#NEpCK@WGG8;Qj5H2Q#vDr$ed{l;R@TU0>zSLJ?BH?}M3lvebkuM_`?uK_#9V)h2Ml^(bB1?4h33Gaq| z@?;uZU?5FU#KM6x0*R?)%!L%js%xK6Htw9abT`^-YZB{w6VMKgHfPo*km(??t?1)u zPgUGgRPxa_mLB8iPHe!S+QOTyll_SY>LZ@TKT|@+t6E z#{=pe=Lj&3p09-i&L?((!zsK|(n!9ponquX5CucKIQ}wkCGI|@ALj44&xnr~@vfvQ zzndLXFd$doosh73F7I3DxxBZ}b9vfo!WQ~NQEwIMHSgtMk9aHo{J;CDPfqtrdXB$? z-hFvbQZ?EgfOe8jk90fzqzh@?&7Rz%_wpUUF}OK&;Pkwg0F+=i$g9f632|}*PJOyp z-dLSuf$!22?V$5wlI%y^py0(cqNRHAz#+RVk&g~Pi?-BOMYOOXqj!?vn9~@G}DVyRr;V>1JNt1T; zNBCVF;_pv74)IEmFUCj;xkRT;ihE_~*Wh=0=vPT$7eK@q9^e9B5OzBbyR9o@h3)yt1_vS5+AzSYQxKWHlQi~hu!OhX-PJjL-|(4zr^l- zO9CF{8!q?K+{?u*57v-aoGCnYN?`+Gx=lEhlzBV z17Q;Cs=!G_Kw_+E{g!~;Enl;@YYwG0*o;#;T<{YN5z8QQd9(&$g0DKdM@FuZlox(O*ko@-=aH?)(|!h!U6A zus~FEloov&l#zq5T!JvlDS0#)VW$N+=_9J9p6GWvIy(^!k+m#ML^ljh8Nl`b zAzo}O-G@LXn7SX&u4MGPR)JyrY_9@e`$z5b``zcJfmsi^YZzf<$Lu&-p}k+TWxIDK zp`jxbY!V%uuYd)-a`PV?Vy;#}+Vbv7q`0C;48`A8MGgBpIi|T^-MRb82 z2-a;UdH%Mz8s-mb%2vai3g^@!n>3OwnlH9W!XT}W%WibB8vJUQ?%Zk^(|;oRCw5<( zG(rE{f*(pLyoYo=GrF;BhWRK;wfS5ZH>0I7FHD>wRNn^%oFY{3mnLw;wVH3X7KV-> zo&(R@MC~m6jdO8eZLvGI9wzVrOZ|{o=dy{Jov|a<#iaZ+Xt7_8*tri{O@xEmqS#Vr z`Z7~=rZ38Yd=+t3N-ZBa&ZXtA*8Y;?EU{B;VFhUH;a2Il9C4K%g_5*1hyI3w^3P>9 zENg$65rt(jW*|e&Bw|$bEtd+~9c@rV8`*e^DMWWGT(yt}rD)*AhMe}Ceg*?W`W2!} zT8%qJ(P$;pNn)ua*F!5Xyi)WDXaTl6`HM$5U0%ujbKW zsNH5Db80}Wl!ifVIu}I$7l$B=Jo~v;uQtX*MWau`MtdWdakr97LNty72zZ;r;IJ zotv2&JR1)mk&VgShG0seVQdP8Fd;|vo9@GSVK(?XZVvBjeY_FYoE?6qyvSr$Y>dbq zj>5UiCSu#tJiP%OZsk49&I{0V5b+sGdn5Ko=@WO^0dp{))5vhou zx-k{Tx=mvzTft9_jY}UVT}*nDb1sY#1SZC#QF5fr-567Oj8-Yn!O}v7jyQJ5LZCY- z%B5cxX+13Nx=J2aIDX+fdGva< z$m`boCAXR%OU2Du2$guVYT6K-FQv9s1^Y**#NiwGQIxpjp9PvbC6y|KgdggS@8p2* zWTz?y3w+2dnFS8Mi(p^9biH{*$2}vZ=x*FlxS+)8qQ2KvnlfVuoc_KaVhd8Q?}uSm zeVvz&v)A1vP~m8VPT;5UyO~fCqW2;eIl&|>x&Ty&-4Q%fM^VkwDFRV?jqfO1+pb2T zbDU%K3N$9CqntPefEa7rSEv23>2cK-&62@NVjU%y-wGQ`5B)h2(ldN0we3XN%eL%; z|8vBlI~z!2nuSS;G+$gxCeY86#XNtzR0_RGB&QlghLy^w`{@AgK?Y7b8+Ddh8#b;S zrY< z8dXNmvqUXZ&Bf4|;%NsXKJt)#OuN#roYB_$rM!-sqKvFh=bH4Xw)ntM^9dNxwjmDn zn?+nc%FYax4*`75gt@Lponi)%?H2;lw0H|2!$+GmM9p8oZNtXAhr=i7pUFrR5H!+? zm*4gDYivTTPHqT-^jiU3;u^Cm#$n-g<7nRs6j&NKsoG+0R3s|_o&n)S4V)7Y36N40 z>tf^s%*St8nt97ZY>p3&g=|RzuK9G7Pw}^H2v$nk5prnn#M?*7@gEw#|Oo1zh;EEJGrwg$LA<;Y$!CF2!bAE6eCza8m4swD@Qa|$3Y{{RS_w7rg|^D9R2i`lD}%kEJg0e?r>&l~Dr44#>Olk;dN%|gG`m4C z)zXpZ4`QL$7`g|{moM(Gl_`L0E~j7Z!Msh$7s+Euh2%n z$+0fdW^%! z8))^kEzs+mPEu9TK@3KWhs1dtK}r zln4dhjpfDVo=LMwa9=VJ%M22Zex(uatI{+LW(d_8?GL0k+>X`ox#0GPxIpy$h2$T4 zs1t-+xt}rI#Ba!-zn?0x7Y^Q3oRBa+zaIdo`re>uwj~UTrQx2eL1~8zUK=)g zxFv}Oyg?%62_%ZTB==xYG~Fm{h8V8n+xXIKFsb>Jt@La*Ib`+JN>%$xDd0Qv2^32? ze}Gv`mDcV?*l=dTR@Ug@R5sD?#V$N(;vP@lk+6^%hg-pz^f#x_i+S{v z`7z0xT$hhwlyOm$Z|9mkh$^M~(GlDGqW=V|kUnU-L8d7<&ZqO0o311{EJ;@d7+Ld1 zDHZUCdwY`AkZmV|Cz5vD-gY*4>T&*T=fTuIP}GtFFAYOnzG+VLQG<&=qZ4UkTPcm> z%D%I?EN_W5 z>8?>mp|Op)>jRK?Cuot@XHEwh%R%(_Sa|YiSnZ_ak<^9`RUMcWz!0aakVGDVMM#N} zoArY>z!p9xrF>wdgOw)gBEZF>rM;^n#v7KdUk(d~3^I4w$?1Zo8SOza7W=ZDfFO5@ z|4eP;8GNj{csrm840C#F#FF+tMjlQXFuoT49t_t}KSc*YYF-1RI?A7|Y?{YS17IG` zO1hVm&kDN)E9n6q6m|(#(w#gg>=LY`yLnL9C0I#kc#!Q9Y@>^VKlY;Jy9C{YvK3WN z3%ED1sTprd+VbldH>_K@l3<5Hu@DeM!np2w9tV5OlEA~RfEoVUW*}!G)5(-&?$&dc z-Xv0jrfvxyXF<=KjrX}HQwUkK?cfP6xCQRF6~FKSS?EYsD1$GJedIo1^GSZElyL>W zh!1-qF4)cXDl>rJHeGR&Djjbr`kPA9+=r^GMU?bOO3!V+C>9nn0ll)p0X9NYYs!pfFVVkcdQxd9rf^uEvBm-$~MTljAM`@E3?ldcsBWP01zR?b|hFp^nYhM zs%p_9F6$zr4#~1IAvh|>j4M0tN-u8-*x zqUQ>2ndx?~$%K2+0XZc@SIjQ}cMX66-9;GAFxkiZdwcR8ZMxsn+%W+Fg%3UQR{S(G!Z z;K-vWBIFAJ-^mb;r^MK5kTis@Zd)R;|DQojjdDvtqwVx{@&MU>2Gn7g!#m|Dl0*Yl zrc{~NSgaJwD<4R{VYY}JoBF5q04eDd0_=$7Cvq7tbS+cV$bC7NaXEdpM$OiFF#uzC zH%=#A(gLadYdPs{g$Xj}C3cQwQ^T0J)_UT-%fKjKE}b(qg)U`F4MFC&HE?pKj|$23 z5-y-Q<`OOVD&ZEK%itzGr{I9w^!1wh9h6)R-6Hq5+BFAK4|~cyg7@C7Q5gPAKFAAH z0Eb2;uB6S@oeK~c^-zmxbt&;R`_@t=P)I$%zfS&^ad~rk&DmhN9Zt@NZC@boAeu46 zFo=@+z^lhMIRj`9k?9Xure$zm27L6c;%S zdGJ#hRNVruD&7+h52~AkRGadQv+_XU5OAaWt?xuzFzMArhIIQH!G2#7MxX7Vo%kg- zIHOX}K-ijpYRiyOmn%PdexNt)@`ARoLqQ1^d-0v?xb&-Pj#(gwWP;4!mZNZ zMD*J02gt0!qLJkXcGOLw&D$A#p`Mzgj^t*{$|a}WY>xx{n}kx!({kg zhdJe}bOc`z6AEEKIl6OmS+I!mG<{90(=2ba$kbw4_Qe1T99+5F%}AxErHH zNza|samUp2Rg=82mR`^DkpDISxqW-y1j+6H(K+-mC zpTNngI2HaWaFw6|p<${NZpG~85t~!xp(h|nuwgu8x?ovKtE>nDTC}i$+fL=+R2DY^ z(zeEBtgbS#A(UH5t+(3UD0eG)!pd(s`V@zeD1C zN3?8_!$bWRG+4K&i{ok7GjFrq-9)r(l809jE##Au>Br#8dUUV|DhJ=K_(L6Bct#0; z1u}K!R@f7l+e{O20ljS()1n+K*e-?GNK>-Sxa9;qr*&gC;?dcrgqei+DvA#@;ysb! z2o4OvHuQe+2!~&eH#!c)Fal{H{iwpviH+o|@dqZ-apH6hu|ixH*C_QbL%JeFGepWn8uJ+w z%*X>qJ;Of$(7F@nRZV7Jx@iS#swz(zbGa;)O7?1Nn_K6KvnXNEPc{L0K4jFuu!vyw zb`CD6=s$E~zH;Hlys*fgKMcc~n@g@1lU~f61|gqZDoOb*pc=E0X2H*UNaM*#MKY@7 zilPK*z3m$!eyLn;t1hB(0No|^vaBUy7?!(wC!_7dp0BqZ; zlp}|;u$2@6^VcCDIx-@IdM%OorYMi1C>v}IM#*mFavt?ZFPP!U%$2kTk+iBjqmYa7 z5MxuUS4ahe-}2a42KP$(3vo z69$zH1rCXKMPF|tNmxu`$t^nG+z@=-k_-x+lD6gO&)OWhgRx?a%0G^N&BFGTnnP|F zIGTIGl*Tl(cg$41Lxz#c@pjQNgzt-$eVK7rMjp<1b9oa*;5d!wJvvnpG!UNlMue!~ zRr5Y|#b--y1)X_gBFM%HI&v$%c%S_GZMI|w1=(Uv&)u!!Lm(# zLYoIg+pkHV|Ia(3XxFu^e-6W{qd(&KihVN1js%iGgdbDE7zCOLpK;>QBq`K3t2gJA z+Aj!!e_eT;?Z@VpbbP&1$Ztsx)IS!`yi{D71oR!IE}h(D-g+*`yjX?g|HKxRyVRKa z7*4cvE7Y-A5}W=(;t;5mn6+RqmUf<~e+8}{fK^x&9KpCc*c4Z{ChUGRVJh-9UwN($ zDm@b@L0y_1TAp@X9n$UN>Kb{uTGc3{pm=od;OdTpi;0;3+_4wlG8*QLR5V*0xY4*S z{mPG?veEe0c@Tq!!X~0AT9T^54}2r~Q|Vpwc#2tSqTZ|=w?3fey`%G6V`1ybns#O# zs=_p=uI6}4f-+0Mqb8086~Hq=&Uc!O9w7rOO(}Ar0utTY7QqJ+duSyfgG1r9q*(*o z$iJFqO*{I8OgiS9p|yqy&I=+6DiK-UsTAdhVYW(dN=OBME3db*JV_60XjL?>Qmjp8 zrM4;p{7$}52tJ3%Yh00-9F!yB{)jGc2SKu(8f5;~(n~P8x*)-=z$@ehX53EcYES5n zjaUjVefL|T2aAW?rJxGg-*q+_!R2HyAACZRUn{H!w~dv7t7&lqJBh;kv_*;G*jr*eAQT~Q7EJFKBv`1PxRgp<4{Xa_kOc7&FMKRiE&7%k?vUAhuE;_ zYf3h66Gd2|XFC`-ojHAhIQ?mj;chFn!~QDXU#uhgECWGKiM$iLNkJal7Dt@0c?0Kq zKo}|t#E6yN*Bzvq?+(tEzV4tB^mWIm!@7eQ@!gRj!$GVJQF;PiJ2NUB3UMpV;a<*D z6n^2E+42FUU|ogv11QxVWtXS6sa0(>6wZdZJ=OBm2n&dDb5SX zcurL{)n$h4!B~;Fx}ZuR1UYnO6op<(FX;ZfDx9fKL?6?p1b&cvrIHNyOgMO$IDdIR zAEi63<^B9+N?arK@1|{Z74!j0CByq*wa$hcF6>G$I+?NF;S0@1I^AVYd`vLjO6%}# z>?jqim+df0{LAX_v#(%>H67N4JFA@42t|>ePFO||%nI|vo3jzH(eNX~7-&Vt#)FZ; zEc_V!gU!ZOjSB3hiaHQ_kvr_HS4=sHtb+AwLXid}C`at#cJ`GsqYLdt)|He9vtA`e zkmY@}qM_yq1SY2Asf9t8xPbr%qK_NujG&>H{nd3 z#1UC?7vqTb4H_^*%XM1Lap7>I^X8oXB1w$F3I}wW4#YWMF%JU;aCK^j9-5PO=rKGd z2`{p=>GUcpgY3b`+8EU-(|W+my7C->^wLGqHf)M2J&HXrWr0cT={b~&W6q{fY7da> zt4tLN`&A@lQ`yYewm4-G4KImQp8csMmAyYPQrY`sAr*$_%1LFqNGfo*eo}z}zC2Pv zh^`Pu6)Cc>?F)pM^CtfWIzzyazHJV+Kgr54Im4!mH7x$00gDY-vn*CgtYIE%%p_v%MK z!b$AZ+rw%@Jd!pzlK`$mv^|X+)jbj*+FZuRthfkCeH1hrl3?Tbk?12t@|uEb6%;^{ zSoaDqDw7~MWj#E}-+HuLT|UpPbclqa+@~V*KsNfnzJ*!MI@ydZ?!LgWcisR1ta~z!-1q>^bnuBZz_#oUcPkedtXJP_t2ELBY1 zpD$Y&WMTX-2?dv|H~GgsW5}tbXl{%p%7|juGxahldj)d`azP4wY!(*`9+}2c7O6RN z8wQUMn>+--6xk|K<2n99$W0-YUV!o}|R(>WPA%U4i5TJ`mpW`)HU-3Bax+^e=h98etY==Q~f@?TY zcjeORqhiaDV-JzVj@Rpxz`BO*wyQks-nrb8T1-_%*CWmflBZ4UQV z1kWn$s=+nbYHsqFB8L^TOKSrz{UQxaqa8@rgYoD$v8~-CX8kxGiFRkdagoVb%t{yD z$cwe;Ya2qVcv}Drs~!i8WVlzIDJClgiG4g;Y?+=3IooA#8N@wQs!uB(@9)6k#(zAZ z#|y#r7(Qm$UKbufxSZr(Txm|vG%}mR<63$3Hgq}%35-O)Ljp5X>iMfDn5ZcDzlF_5 z%5^Dgk|7n8v2L>`BV_7hxPME6A|M=1RTONcu$BJi*QcU%RrD%kNR_i2ckAhTT%=($T99L#=ji>gA}PC#BnEgqR*e>GGUPATT{x+G`!Jr zCo-R9pjr5x+=)bev~l8E5{&g+%qc}73#XK0P7t8pkHG`gmX;u6gc29$P^>QJP^5`C z32>hRAqfS&N(`qfgt$IAwQ z>QKa9$TKZDR%TX7^I-u=i+eB*Euod9B3VP=aw7UK*_H)c*-&)8oTi1a@$~bc8k*M% z4#1RJpRVD9EXxO}L>gBBPl6Azf>T$nLKG7JQ=* zfRK%)yX`&lcwISmZDPzdGvve^9m7b|I5BpXJPP;ky7DHK)J7dGhZv~ZlVedVuD8(j zT{7A4qrJ00TS;LKbPxZjV!UYg9ki=1QAlCT_>3Rn;`G}4xpO48U0DMD4cG=iTb8Ow z2@z>&4*>`1h7b|zPxT>*`VzA`9V5@M23^{SFlr&?T(_uwyg(Ge0j#5C@+zkI?~#X` zqw>dzhq`jIAd_vHyxsNkp1g1O(kVY%PWJVwM4wt+U|(X+Sa>)x1PM?PHkmi|)+25i zAd7U+mVHP*U}vr9KpEU_O|MgUDQ*BVINHY0^e~&(x+SrH)(vb;9^w=ZvSoxj83P=| zB%D*|pA#A;R<&ke%gb&#Vl4g6ho@{gVl^B^ZoBlo(}=@H`h)lJ{PSzsYnkmM)tvTW z(s(AKlS&*7F}P4^Ub#i(vT}gsfy$ylV!wT&KIsis)4&W0S!u-&`&`;1JOn|!D$szj)Uk@ zZVP0^ozE|QUgf)m^6WvO{Aa2p%$56(XrLd@s^x`D&3MIEEic$;{{Lpx(({GU^wNK1 zZy|Oh7ZHVjUP}M?cYY;GUzPqYm%it}|GPZjoX#(N{$GEe=QpNb;F1$pk{;OUm_g2l zzVTIPFiQWJi;CUhh)x;vjOrvkJ#l@XMe+^7{n`&Aw#$@z0NOc^%!2tvPY>9ZkkI7{ z%h>I#*HMsF8M|dJd^UjNBD8Kz8{)VotP{A7D?)l(-G|X%MaF8gWmy;>!TO5x&|d&( zVceHt)MUmI$7Hn)GX4?kR&Mc~fPx+?RaAjfb784p@+>v7rE=<33K#KJ)Fg`^dLeBU zy>tm}bu?T4rW$svyolv*=}*O=rr`40s-;X?q^;UM@}PBE2rvaPLx9vR$Jd==>68__cN~W}L;>EnvnN2P;tnL%+E2G7mr59Iixm zW3Qpx`M_H@z7}WYHjAgD>CO+q(WdwleQRyYKV6;v&#Eq=+5m9vxS?+xQjpiwlRXGC2H+wLnkCHo|MhyB#>P zivD>C%a22&b(Y77o#jEb3M<(&tWaXKhNd#Lsj2sHzp->vqG(zlORm=&f=wvNfUv$h zL}9csRe&>6@ily~b>RoLOs{jR_3f?9OzXfF#Wyykm+)75aFbaa?F3w@vV}HL7Mhlw z%{9{`fH3-_wu;@muoydrlh(m_qDBr-fj85)J6DfF@F z5~!DVib@E9z$j?i{;+nQ3ZyChkEbZ!P3;44xj^XXE=%DC6s-+`?Za!@a=!b%rOdVl z!7~QIY^j*cgpY!36AXat0T5*v02G3rZ8wgYK!i&=I@w2zvGre1EP~^FL;$wmOu+bO zNywTyZA)6boKMo?jL8a;L#C4FI~YhaK@c^)m0gbczrSQHuzIavg6My1d8e3R&Gw-h zc~D1BqlP zqig^L6pdMF^#K)!)EN6R_$wMLHuhW|b774g&Kk@6>HRFqR3oDV3S*>y5&gmhI4uxo z(q#s-o)K#EMaasN!omOv_aDb_@D^gL+`*bd4|`fdQg(l{{#&1_BkU}MT^dWxET?Zi zyCX_Xmw?mPqmOY!Ig99Y0Y3W0p>S)sRldarJ;vc=_#zNQMxWgGau!6NZoLh7n3xhg zZmv839uE0f{8)}IJ{Fs>T*5n)7}3HHBxe=L1@BfImlL)4C`l>7)S#Iya&5|WhiOY6 z%u7gckMc?1LlyWzb^8u(@B;BSo`}B9CJ@VgjsNxj%yp;m_`F4K&3R!VVCK1_$+wd! zQ{nL@{SyL*Gh1pw=4+OZ@4~ldfd&t7L9x4!s&R=qfDAS{GYupM8$j~YU{j~=L4V?M zA6kIIN?}S=D2HOVNvkkl#gux|G}G4nZKTbWk7TK&k6>7SOwhCvo+FHr_i|yBw7No`}Y%w$;folKj6(%;5&t1sq)GkYy0rBYQsW)JV*WJMd&{;WRMa-%<|ZY}AK zYa8Jh1aCQaP@kf|U)5!|GB^LQ3g@S8a3iq>E+_OR*V1}^G10bg`Ty8^8(_Px>dy0g z-0%B7j%3-E{e*qq9cA*=ZAG5O*dEyvv^~byg&Ev4o-S97GjUZsH7?23Wf_gflcMkg z3s8axBCQZXM0gEitPq)g&8Q2JD9r-|2oRvgB7T4p9zg+t1w25B6F51K61F$p@5aa9Kx!^B z-zZe3p>Uf9Qe^gd8ka!Wv@Vg+#{^76*Y!fsjR7+qZA-MW@7UQ`a3J9WRDE->7tTm$ zEt=7AC84p4QGA0r{ET+`q(+rvZ^m#&_sLu1(zOwZY}|^qk;Nv`{&;K#+VnyH_C9(I zmTnenX;{XRhK03S4i_EUKqGtN#iT60+#RGWzScqnPmmDJSNJ)$3r?t zlYEAt)X?g~U)C(pVHh}hp}t1IQQ07}lqEYpp6W?^(A|igwn{qhs2=#v6kx}q@X2>qc|n=t#92X=thCq^nkIN_ZBdvU zw@-qYru*Y~{c4@3#}n#}{wTgK`iEG*6_MM)0TfS3q|kAbq^NwK33MzpUaT8cuW$>c z9nlJ`q{&3n=(pt0Z2j0w)TbN&$8;m_-i=QSYbHuHbMAD^l-n-P4}^5mHBlVxZ!4Z8 znzK7}WhQS+n4-x8gloKVqt2BJ*f|Z=>{Jo?#n3tX3x2lHDu>w-_|roy1V?{9K%O_$ zhvFaf!1*fdqb6~r0%2%(sZR5hal5QedRO3n%O$F3pXFb-uI)xl5}X>Y?TLH%jT`9< zU?!WaXWHH@zvJUcSykxyMqhwswP+Y;nx;HvXso64c}Rn&6ZQB*pZOxVV&sFr`-nuS zym8A2Rq<4mCBz+61z$Fp587Y0ix~ZC_O-g0p@`?L6kQX4Iq66&V;YF+37&Fz{F+$N<6s6fCdd;64By0jd0j_}jedqQM~Sp{QYf2NWk? zAH|UKXDhx!a9hBe?i;(}+ao;X3`zB|=vScu6d0V*%NeVZzQTLwG6WlD;2@F2F_nd% z9w)x5MBZpy5O$BenF?aJds0wL^V_&en%u%wUytiKggC3(CR6p}n6*DkZ(z6dW^a0i zsX8g)T?2SlM%Hxus9uareBdb68zY0e>{Gf;A#=(yW&U35O^XXEa?t)fWsXXv6xwlD zxIinSsjSA`A%vKAgZ<9*hXP#)^$ooQdR?f4V(y`yns+WX!dWu;$^!N)X(K!r1bpk# zUtt71M@G_bnTy1W&Pe(k^==mbfES?&_&lGk@H55TExd6V?C59Oxb({QIc{8H=f+?p znd0o5mp`*0y|GbDBIjZAlBHs?(-oeZeZiB@TQuXzU#)1y-${Ch!D&Y>R))fd%uZgF z$xJRr7|rBcqzxEL^`71!Z;jc@AHI!}F~#zfz+2v91!GCw7H1z%UHtm)*6&*{iR)6w zx3vo!tBi0u`+A@dO3!u$E<5+#2bAb*On6FLj$_5G5@v@vS~h7meN0)i*+jOmq}4}v z`7FH{g2?;NjV#)%*qk=1U|k#q*$SjSna4Qn9QBkF6zwmfl+M7@i85UsuA%QDS?5az zS0)VS14IKv`SCMzzy9h{t#?zOtXUjqUDWzMCit58-g2$Qfu{Yn-lJNVltgz{OlzUD zVQ6Vw<-a7N!%_IJD(6M(^zm}1XL_Bs(^$#meX{)iMDM+Ij`!mqDk1Gu@4bP^`>o~o zo4E-{)nTh1ZxnL!6igO9X^;9fD_#aCx9%^FRTZ z2BXpnnN2^awrF;;u$MvH9t7ne_Q`-;>cH00TU^y4Pj?hpAu0ZvbsHFu#b#E)I~)VZ z%W~8|C0l1~K+KF~7X{d(F`5kuKXVEu&JOr`9EFrYxBD5hQLMedkw-N1^@Liz9NC>| zsrh@Fd&%&pbj9$AV#~fZLM`IEetPOZ&QBk|;Eo%_v+VTv^Dg%%hSP4t+J9+SbEw71 zDCUo%C&d;7GaImMnhan}FJmaj<%>b)D?lLk8ugqi2XwMiWmIH<5GSJRI#j^y z^Km`3sBO z8}Sfy!NH;7?hru~rb87NoY4!9!D-d73uT4THdxTe0tR>EN`>Od76}xk5hq(v zyHagR6mJR)Y8#@!AmNFr+e5{o&94BjKHHB5#i1PhBHV(%Q>lntQ4ahi^r1^_$J0*T zl-|19R9&3yi{-v9Yh~&#g;30U0&1Y5(3cbZl&3v^Pgsq!Y0;N`c&}M~Mg{c2k24pr zru`>?*$c`dYilf-NrmFxpqP`VG2H#M>!z%c{roMa$3fh!tpPjEzyo}MJycEg9B)!Y z8tX7^Ob5vEJGW<$^kMz<1A^;0=by6K1aVj^1jB^^nIQLIuM@PfKqq!G(v~WH9uHE` zmD@$d8W2r#41raNTC1aJjvht-m$jzqeodEoUt4q^bsv>%He=u&{Yw}uMp+HcP{T=~g1I)UtMBs(RXSO-fwQV-ajg%>*4D~>rs4(qJPDtO6!RyF zqAKXDI!fHW+-3KPzV=ZyOfm5ca$esC!eXV8Rk28nOj6S46;!`Od#BYMx-g}y@4|ky zGhSC0_FG4$YDE_|*U#RCaaf$XAcVm*(rtjR>kh6upZQSe&V;UNQQkCL>;@a03Qb$v zTkY?*deQdLy3OG*cZ~!Xj-9k3G)>d$F;zleW>m>8c1$_%+A5jpRbtI&N>-4G-O5vup*tapYe^E7t0y2n4o&LJ)aO?$M)}kA&rUukMa>=O>@; zW(~+a_IDDqy(g5DeLxD3`2x?Z0V#mgdyTCseuK_!y7~@nQL!UU!>s+?)+~UYZk91iLkj?XgpC8s_D>6$ zu|FFwRSM#Ewz};c)uS%rj_Iz8xD&dYup^<58VoaC7rdtuEsB>nnghtt*H-B;cuV57i&gZ_O!PJ>vGM z*p~Ks{_eA;rn;wDZ)N|W2Q&>WfVeG^MeG3bxy!JkVCFuPZ`Isqihh~na}%PV*i{vK^>Zhwgu0#X8%p?(!TgK`)?xs$Sv8{sps?D zL~{rqaZh~MyM%rz_DA!~uqi7~3U*FxpNXliT`FKinr$RVf=IUfVX0#=`YwP6(}x%@X2PwA6+Y?$NZ_jP6@qYWy;m41Brc>!$O$>hV)jPOUImq3LWW#=5LMBT+t zrfURQ=b}+Vg!#V`UxfmNm&}x^Wvnt&WMv!ylbqt0eU?pKJu_cmCW5w_%@Rr$F#pMN zy=HpcX<=1pK$OL|Q*jes)#jf4QCUO$U?Ykmf#h6#*yT!aNP{7^1xK3CWPm7+30XI? z&yl6f`}T+7gSyB;JW7FXiuMmC{)8ltdvDkwxG7>Ymo^s*PqVWmQALDKqg`U3vGYoR z6sdD}y~Jjs!IZ9pexd!Ka9NTi9EjGtYqck9f{Ls~e6ApwVG6QL@0@U~pb1U96T2RQh7g_1UM$P$JQj>*<4waGp7db1>@={j$> z)L`Xxc7|OHMY2&Gbz1Hzc8=MzUmei{ZZ_ic2N@Yd7SqI2KuDmzJn_fK1^ro)wen;L zw=xmP^dn@L1t`MC_t(#RuQbot(zRYam0Gvf#G1EqGJekb^~LA3U*xasjaCYWj{8Ro z;?aQN3v6+y447qeWS5-5e1?504bqQ?h@-74jy4=|QCPV1YB+8py5ZMlh=$wLp&q7T zBsY?%kVU-1jjiH3oC*RU9|%ilc7rmCaqQ8xN?d~b4;?p&Qh2cF1YzJ3)pc8^L6oZ2Bg)+hBwTK` zJ~yn-#C~cBa7+%PfUg12k0rOOr_AaNeB({A1F^UlvJb~ZQei<;fe#2MmuQ0c4F3{k z?K~*Mm@+BBZXT4|O5WmM`!yrD2r;|x^raKs$G42&J{KNyqJo_;;15s0Cn)<9@Z){l zfVN!m1Waf(E^^?wZfqE8qO=Pi-hk)ziS>13o7b z{200JOpHoN$F2nxiUPYn6)1jua5^P;X56qmH{^Q3aUr;7zFbC;dQP7AJGr3f-jP=g z)BSBr`ez3xd;k2l!Hc!cFi5d0mOXU8T`y28D!L3(7?9C!4 z7uEsc60!-x8I%*mHjRPJn#@(KbvQ)e-GK-7B^ zhNl=PvZm87(#=?|;*ykYNsSH6YJQ=ht!n%bwu{^|%D&E+Ei<-mwJHm}cUiyC*Lu;y zwfgFmt7hR^X_YM^`JysynbmdWAuSkybW)zM6Q=P%w6Tf0g^7xJni))jMRrw02b2@4 zw7awOBGx~P;KyP!U=?p+QrMv^-HONU2|JidG7u_G?;-(}>Q(L>ZC=2eLcQe6?5{US z<7kIk7%&$Zoptc=)#8H}tbF*%?a+Y#wdLe&E0~?bAX$ec&Zb3V%c8J~?26i{7xLm` z>{wtWh5m0|A4dpH5yT6_m#~krEm;LpGX^F9OtY2YoSEQ}r_BAhc*nvXg5Kvh zukywe(f?6VCCF*gBB?_mEy}k4ySl+xI#MWQYL}hOZg^2VlG_osYokW{>uHg}eH;Tw z5J#04iuFWx5U&?|D+n1ZLe9}qWCKN4!Y(;*9sX?xIg2bY)+Xz8(BYlE(?rcYf+n*z zx{HEb?iRYtx$$=-h~KGYOyLwwtyeUH%c{D=7?PZdtgWv>)5HyqenTH5k*`=1d8G~Y z30M_L19|2ZOCN8?{Nbpn^zjeoHQuxz5$1jRc#Z?)f#)jS^zj`pj z98XU;27|6cGqw8A2~e zvOQ*-cXpb?0uYP=8^K4Zlte`^RI*>-?M=e3ln7B2(HkVOnrS41*f2-P=z>=C-$hL3 z27yeGrMobxZ+`#g*Z#}1*N&j{^0-?&tl`{Yf9mYDGnWF5dcp>iVD=0uUD|i(yczfF@%0 z9e(X+hh_piq>*zF6f2-Pfp9Ty z&vIRWY~A!>ezbHt?KEMtERsqk2pE`C@AZ)|JCriB;kGMl@;&wwK-onr2Hc`jeoyH$ z7-$880QGP$k8-zV>!W(_`#7x3DYU$;qNBS(gaOzYhS9q*m%wL+OM;^iwTcvJ#FumY zErAMZ9UVpSIm6ckxD3PqE~FS>^k~Hb^LqqeVaP^&<#M0d^}PHvY{DR#t2u#{0+@|k zQoWYn%rDy}m8CtY%sgO}q!Bx$LjE+fNTZ_dF0VtgPN1@07jrte9uQ4+K8FI|vfqEP z4~4<_LJS2Q9GywQuRgJ)j+r^Hqk%B>_80qTVEm~ASheX2?CKnNH)2^wn+B zMXVDTp9BugqZI&vM5m`7e#1uny4H=KP8F}D&d5YGxA8m5jjK9+g~{knO3qN;H!h#j z&^S%s-P>(95Te4~$ESB8kWq`9s7RqX7IqCM`}a$=MPPB{QkmU5camO*bxJ&)__8cy zl4Bzi8-i4DX*LLHoK3^)75=ri+$sJHort3FMWKcQoJhooqu+br;hG?XaeAk{y&}8? zX*@^q(Pd0txVG;~L)>3w_xg38lSzDSgo2=4X<&YB#KvLHKCarBG}+lUiLY~?-RIL$ zo){|5jGHMoOKq2Aj;k=j4*RW$M1 z75RKMwz0&foTv%&3427>v**bIZRR~(K^0A}Z6y}j+xin>>ikq~XN|FXt>miX9h|S-XL(KPu80d_?HCg>|sBu=4sWaGQXC ztqGCm1aJ0)fePKs2R0yRacCiSDZ0imvF zP8dhC-D;T=&VtoAPy=KjY5>XSMh)4C#j)$Hz-L6RiY$g(0sV|{d!iq=@?-LX)w9Db znsN!ZsQ=j~q~=Keg=>R*c?ke?3Ga~4)9Qk(teR%58+_^KnWqz@`rDat^ZH_T#1fsD zh0*Aw=II32W7XPC8@)oNX!V8dKBE(HmpLQOpwT|tv8{pwV3Se0Cl==HhovFEqnQ0Z z1+G7H0y85vv}>uscmQg2i3PxS%Q(4{QvFkgCe=KsSk5NE4{b1SmyWIZLdqVHAcHB4 zm!813vFaqjly2MK@<7IH4xlg3F%E0}GpfGAA2MOT$FwQGf?`Z+e_kSjvY=*O1zcj0 z>fyTo*odTS;w>LB!5RbTnwQ;)swr$17~|lJ;g}MhRS3t{ZDX{ZR`w%0zbai2$NpT8 zXNqH*6pn#c1FB}kg|I^VZ6n(nhoZYE=D@`=ie>0Q-avq|Ji`m17Xp)s_zdVRlkyqS zYY4PcJ;2R8^wJPJ;#!hDtFzH=n&q@2;RG9L)n|mf$US_RT!3VRw6%D-dvE#nGWW^< zuk`j`xoy)V{G;$6qw6cE2ZO7(dz{rR#QZ_Dn%l7^@^=78)?>*iz%Load%<8v62>un z@DifYjE4`iOn*zJITJ`#z-WIU`t$Z(%yAE#Pi#Qz7Q(0k{?TnBn zAska^NcBLr=GTbNM`b&`*=;D4WY(4;Y8B)>xt;nmtOhW_s`373S+4&j9Q={N|Xd*I*oWDN3q! z7^^$+xo#M0-5uj5NTSGJ7%G%EkX1{&6qm|@uI@hW9}L4XsogE*&?eCU=XuyRC-wnb zo3uD;MLJ3PK2uX{aWUH29cX8luJuo{h`XbE-;??MU6P@kSArsTF#E=fpP)RL&o@D7 z-O!FJ$Ce@MRNBAQl|KD;KhhZD8s;FBD8+=BA7BwUQA^jwN8DtWg_E3l~QF+p_M9`^9MmY zd#_2DV+$2uDHyImPyxetl37Rsi8wC5dWV5ad@l|I2QS*)P24B7w|>R??6HGiz6OeuO(Y2r7EE%-AKn65k{A6<%~9D>re#)}zFscegaHY6P5!hz*91fF~} zB)adWI6v?oNLik{4{+(GEMMV198OM{`5%%?e6G(4GNDPLj_C)2o>{Q5ha>6ZFyl`p zQC2nYcV3s#H$o4u%5NK1CJp^6VGHX4u9bSaRh$FGXOogW>)i5@wb6}S%%Nj-bfY{g z?K^pcP~>KsF4QS1T3(z@uD#3Oh-)||>6{lf*?4)^C?O&Kq!GqOywtK7NJ+Bu(ecgs z=#DJ=+>rR~-IlXc!y&Sf03%cHFbeBIKva~Ft?*Z9n7F;u-`>?8U6x;HG}lR8>w!^D z>2%cxyi15RZeI2hXQ2}e|8g_95chFGQ*5Jc5j=+zd9dJR~m1`m5yiizmxVl;Tv#Q$)xBL@j{V+Eej`>VFmz8yju#w6^c~#3zg-$`^ zm&xWzvz&%k&R%+1FMH`_C9A8ju!@vfqnA+$#7RiK=@cefTy{+XlmQrk1*`y|yz>6! z0-OmaIT-F)XT(X5R~SXt|q%hL_2LkTPg8Zzt>D}XyC z!hs}D*VteuenxG?6Y*&+$=$rIbHzBhU7K^3#d=&~OK9g(LKs-E?kme?Dma$hJ8GAE zKw{Or&MwI>&Hk+lzhO?{OM!%7t|eG1*RzE-;64{^7`=#HQcyZTWQOaEzDzYfI69K! z2&SK#I!iHxwMWxbYkJCvFrXwC=1@?HI~R~exHz9}Fi1j5+!<2BBF}+ixkrXxA7q2H zPTZQH!#pH!<4B%Q6=gx8x$$`PW#-*$u?{G6vIbKYv9DB?L>&g4$$KWF$O$^XgaZW( zNfWrRTFoZj5OP4xghSXjJ9JQ7)iT?wyy1zI!1K!Qj@3C&D!Vvx11vO0Ymh>%gafRs z5yF@PbM#9t-E3#mSLv-h3DECF!!&p-bXh%BtVQQy3If@r(#QW}OF|PUzBnh1vWCeu zi`1-DODGK4_)`;<{QS!G+NlRIQ+-vFc!>)4li<$y=L+y4blTj=}lVBdEnNaDK# zd?!1!?I@Vb`*40ykO(t%jyOIxLCpDmn7yZiHjf%0z_#IeJKTL>%Lk>y<{|LlF)epw^oudH9*bhl zNdzHq>@|CNz1J2bm|uG4*XFDozSjBj-O4SvG9Q4GjPu`va%R!I9J4G@)%;~7ErAq; zh2yfA%+eY0sAG74v*`fRv5{!s5dBVbRUHx3H`5cs!q~UCS=gMFqN8(PtV6!djEv*S zCJV~f2il>5kO`~OH;Ocq)&ymvR1x$gPNcGJ25_<-W)q*Ws{noz!ke|2a z$xS#u8+{44f|fh0+-jszv~BiOz_-D!Mr~XLiS%*E5?9a-qXQOo(DUrS(RX!DQ%SxA zs-=D!T@rX;lV8ROEcr4e)O@XIz80F-uxq~-2-zf5^iicK!rdGoAh%QJp-#D)jz#Aq?hA=C$r3+1C1iv(A~jHq>DKmciJQD9PsC5G)XM$L0=jhlYK)kDl2yqkIDR~anIh+koq@zg`qB}wbZ zgk@ZklvWldb5If+8@4^MZaqRACPc9(A$$m?3j)ym6vMG>SpCns=B5O6gG5c-ib+XR?>j|;3?a~I3k{&sJ zA`P?1_{fn;f8+=&lH^@xsEp+)VC2f#Nhqf=cHWs426^B-g5&mmEaj1JM?I3KzRy>X!1C1C|S zvz?D@2^@xjgC3h`kM%nr_7sM{F+zkWOfpK&@zoKR%G!uIDtwSEq{@0iF$8IiEQuW? za(KzR{{x-7AgNMxnCg-;!=(y7;~o+I{j_?d!Iq#!kIX0wd}5e%NShJm4&hiPvAC`f zn_g#1g`fM*#80y`nq7EM+weSQhT0fKxS* zs}4O_m|WvPuJO|#*8y=m$Q5hwke{vd=@JU|G|9jzrAT`XR7tRIyir@0Mw`V-Ci=4E zjF6g$bx`vbq1s5+R<}dgDx?Fn_sGI36 zr4jL3ByIFn&RKIbSRRp9x(*tSjifM8NTbbL%Isq+L4ZH-2Sg_iuDykevI{TrDg$i_ zD+5s*t%zVeY)Dx)>>Y$7DO0ae(%w*8Q?eP{hPF|!Q^qObQ@Eg99gQm{a71;V1!XuW zB15)}m|W%ULXse4#?#c9vjCjijDl--=ah)QJq9cB0r1`rKr^8j!1AmT!w`=7$TJ$7 zRmfAlS!DwlZuhhbm3q=tPkcTvNgC=0bHjt} z?FYsm=Yx$2)gKiTW)?seGfQ7Hv&2`=Fthx?S$4IdFmMB5;MnS8wS(_4sB_P?+xjXF+!^c zYNb}uB(tA8`N{N)ZkUqewxlOKrPvpLA0fKkv#W!L0(!@r&^qKTK$1Tw2N6n%q8U2O z?j(Ka;7oy&Za3SVc_-G?UM#-X0suK|s(J&)Ssu?GkbXAcm=LziT% zHklcF2!+*V_H{6i71XnHHI%tuvdA5X6u2hI#H>TwtToSg^V+trdbNQd|G6Mg)`>iB z|1VvP$Q?l`1n7o>xfn1=0rwC?I zW=1O%Ojl+$N3tjDihq_YKz8m*lvK!fU^JjD0zR|zm}J)1L?Dl{nIwJQbD4y-7?1-v zJjE27YC0LY>L@L%VFJo^JK!-Id`K;I6P6+*bbY1!{*Q>7i{0OI$#w?6_kZa=uG^xo zx%=p!_Z?;b>+YwZhOd4k3>9DfhWkW7*q5rsL3%oYYOec;hL$DQZx;~NB&3KH46lvw zxNyvaUUE!5+#-$yNm-V};9GKqXM#BwEbD|3$hBlk3@w1;ZTufkZ-Nb_z@&J^v8XL4 z{ND|})}xqj%`~!Rx*6;z(IHcUyb{y<+L@dtRRK^ z_hxMT#_)XPIiAIYVYtMxKFxF0c!TzOEbttdgjMSy4%z`OgE7F1mr;NbXerIs%44=& zx&@ySlQMK}gvB0Iabse{#FNcCql$M7@T+MQ< z;yu#Cy<8agyZ9Rael<$l0tDsoZbF1+EJR2L69e!aQbF8R45LsbCn=xPOtS;i?~`=L zVoYW5WAjU4LRw`l-c&(yVtA_OTDc77{`XaqXe>0Xh{%F31lrR|B}UQ@&6=452d21@xcw97E4;ov#t&X-7C4_aut?On-LZqZsPd0 z9d;0}XGBHx3_vzL+!^j|v027JAkkYT)g(IS&xRH%&{Ei4OMZoeiuP_P860DK8_yKo z-ExQd8(v|WUDp}b7`H@m{DnR2PeYm!n`X9qO}hr;?Hw%9_fSlXbMe9=OgcjCH+PUX zHc}s!RoJub$}9a(l#m#OI9IJ`tsDsMr8+XH0N8hIcesAj=nw*1QO}(n_8v?1kjV@6 zt6NXo*8_0(QIFVasUCDL`*^8KUY!rws=$Ew;OJ?N6?QYs@Rpk<1j$x`77p73whYjL znE?V0TXNUiLs8AoeO!5RdH}w<+yCCD-=|g8ZYp{FVmc~!JvoI8be8v)fNrzLRd&{z zJ{4%^&&ke-~1OwKW?*u?$GXnmXEd|Olf)lSG8c&PPL z&+_H}8WV{bu{M)BOA~%v*Yh{LG>*SVHvJ+WFpe_0~qbJCv%RWZSO~mWlqhkKd=v^Aw z-PqJF%yGLaM{@Ff($qn@(@9AZFCR}fzAC?vHIg`+uOdReood#^hbtX9XOK=uyoME- zRfldG%`Ssh{Z_5}H;`{blO;pC*TD7rclFJbs5u?BGJ62Hfy&sVOFeqz5l(2X|M zMza-LmfHC2NW~BtBho=Qcd*cLke6jz=i65#S1QNs^8A8FH{%h=I+7F9uHUCr-`RPT zYJawLd5W0NZy2+BCUnVibauKl=KdVer3dx=(`cH7-I3sE|<6 zBhxrFqssz-X0fVJR4&T$3kQZ9DDkn_-1WOb|HU6$F$j0in!Ql~R0kL6OW)x4>%`Uw{>z(>K!=l&S&4fPz{9 z0IvcX73?KhS_X(Y>!{_aFUivKp7q3C_p&re{H1r+d!aAPRIQ`zBVkQ7Te-!XBO5F* zUR?pZ!MLpwFo^L1h)cXce<|Pf3-ktrN%HmlQcbjlfM5oajoEWShH?=u$Z&+g5f=nE za3Lc=2IX(g$OUl}))_%!8tJp;ftt|%^84`z!686jb6 ztOYPa5)+G~^e+q0X^$D&4a|@MgSEo1LQ0wW%&<%vXJm%P?u;1*wph@MF+&>i1`Pa# zddx5^T9SKk7Y!P`e&0oUPGu?bnikyXyPA}!^~cUMIkjr_rsuO2LXN?H{`PqgLbNE9 zm&*kwanRiZchO?)r?XKRFtZxjx6U}H zo=eGs_6-_8r-x`+Na>R-G)117RKZ|DfxU@jH!rYMQ8k+r#wGTM2apZ840R{Y55mMb zex`}@Tuz(gOq-c!Gi_MN&!STnl5Q{?=y+d_SQ^WyPkB zy<6&xo-WTXl4`>#tz8jGQ*1#=B`o=fDwHmXDl%;9DA!MwL>0`PXI6{5;^1>I^F792 zv@`$PFWAig_OJfTUtD%t&=~5ipk8)b$QUZw>8uPM##&+=WVco!q|MF zK>pqQ!G3lMBvJ_z{l&%V+jM<8_D-{0J^j5mAl=3>>VLne%tYFLR85iR zqT6rSJ5}loc6bvwDAq@M=ca1(cHz39&Ji{g*5mC5L3NkoOuy=GdvRg;@mq4(L);)jX^F{a*`xT3>z~4I~vo8BtJX_-hQ1q?9-z#{O zSr^bk3lpyS)Sty2L2xW@cdHNUpQryQ9El(J??+00fHQOOw&9|K;IFo-L@L|bSjNJqN zh8-%|iAp8HJ7RBz_*aRm+xAwzgfHm=_IQ5yjjgAzX7zDBkI_ZB-m~*Sn8)?xBZ2j7 zAbTG(2#%dLIN?#m124nm{Z_ciRF9XDQ6u}oqPz?|VAb|DiK!JkeAIDRRZ=^P%XE)9 z9X?xi&xScY@q#g@C!TBO1Y%!w%n8#;=69NH!@is?8#Qw(*bY-SvdNA7U!LOcyvfADL`pqpn#-$!i%K9Tn`4Cc{`H=; zgI@Q?x%ePzZ}3FMR)gbx%N=4rwB-)+H_djzmN0Q(raLr{?O~Vkuu7nfp=?!k{<-%a zXbj^1B;j$7QdS}LXqOCE02MxB6-|U_H}kB;(S!|pYvwTC3OkS`J&4Ecng8PRgR1bv zz+mh?rBf5>Q7k{_ZVoXYz(|fX6I|8iaoS`L-i%EA#YuoJe42X3*d(J0a{wE!3wSw9 zOIO5Y`afDPHrLDH22FV)Q`1;7UN1h}R#zk)o)4S|EL#9!R(P|?aeetC?hVAyTZ$ox ze97&c3LsnH4%%wTEgowL36eOt3w-VDYf1a_YG+4Xi{=Dhg}s(ts$hb^QMI{?iVA~c z7(6`2cdN)AYhyQm8%k!Y0qJX?85-DQ4eZq?dsVAK5C=R;v;&^Fhh^Es0lk|2JA)I? zo`!TRSw-l0g4L57229}Y8&IiKaYlGETa2#lC?gUre*}zSY9cyaZY~u=C5xK;^*K8Y zjMe1;>~*#}baSEPD7T*;#CT75qB;PykW#2a#aBmD7~CXr(foe82=NMATo38$UD^+; zLZCE!+5VJOe#q{P2vtYqDIohp&O5YZu*K&`{O3m&{QM9xP-r{?VUDOjT`#Jn?P*5( zOPQ}&z?dbBkcvJ_n46&q=9>M*!Tx8%8oAker)s^fdd%tLEI5MHS*a6pAq7%s6$M-O{M9*0XI*9pA0q9BuZ3Zhddu%hpCa0Zo2o$E_0hHqzlS z{RZDNs%)aA%8pxQGu_H|wdSBE>Zbb|OD0+ft#X)R<+87hAs=Mw)w3UXRI~yUn;T2U zM=-SCMCSDs%8H`JqcUd|>anU&Kz?9P4u&B6@>@7UlKq1nz^jSqS7l>VfwNd-&Lm*D zNq`NT>^9pmyzkZXeb$M+_7~Ri=b&XxystH%Vg5zSHq~XZ->(n%TZLW(SZdo=EjPup z#63w!VfHv>*2D+qRpbs@nZGfqw`D7Sjm5|lG7|C0ia`j0BK&q=UNy& zNywKy$#P~(lwSaBZAxT2D-Vb9YSJL~@oM3ZoScv_%|d(jw~P1_t+G?6U#Wi7vcYt- zR+KY{G?{OBC~rLa)*Bvri%sWM>1;uC9;_ab7^R-FE|$jSbYJJJ%y?U4w@TeS#WZ{* zpkZ@+E-APzR??$(yxkqat@BEbC0p7Hj^H+G=pVuDZOi!=Q9b2uy|NYU2Uxc}-{yN$oL zEofRgPO80Ad`AdYd;Su&Dm~Q>jI`@NkF`MLq3Xm)w;)u@b$c@@WF-(vxZR3KsdZ~J zy1>V9Ho`-Dpv=yV*56Hccj{yMvK2XhhkmmtZ0G7GB*1drP7M~+%R)9Wpv5(?WBIPX zvrX>1a5f_XprGO{~7KaD_8C7|inuKcj;I$OHm*VQ5>3?DLZx(uU=kqb~@zhB;dFMZL6j$Ivcl>4Q0} z&@Asc9GfW^`6YvJr#BPppOu#qbxvD`wx_L6GR}0N(DJ#3ggjE^u=S~cKc62&1v$9I zxTviVR@<0gyc8mCp)QEZb|N;x|3n8yYVb;dezA4REq9-O0{8~pf8wG8QGdJuW<&u4SMSGKP=rBi5lfZ)&wc=LFNEEr1{p>M}CJP+8 z1_?f|cSBarr_YMIY?Q>I#N$e0!0A1EO9m()`{sab;?2SKFO%XV$8{{+1jUmt#|MV+ zQ~j&>=xcHZ5Lko^Ooo64?xB_%;R>G`;aWaY=7Hi!(E2~C(L*a*20R(5I@ac!`48jnC6KI=xKKjIoru>x7ivL#&4g&3?Wfd+Js zb%FBW>9A}8g>^+hsUKsCgHp(kPMr1plzczZOO?k1CKys22>Uh>Y!vN0M?EmTw9mPN zt#JfV5~JxshWV( zr|>2yLe%FIyip&KR#cU23ZYoa=tN@FP)dPho<$g^u)??oQ8;91%4OncPo%I4N8&5@ z4wA`?l5pke{R1=+{Dz^6qc}+`)mpvLY_$glhlaByOD|Y<;YAm>-D4kmD>V;X)OO#Q z+^k<0lB$&!AAS5`-7RZ70@B^jembtZ3)-&A-5PkzweRXs%S+AK`U?xvpn&0)L4 z+^6~;^LL3a`GxPlStVl+?dLzCS|kx^h`6xAR@x_$B=u)Qou@ha?Y=o$Kg%4gFE~f* zy*XN6V2)~|5i%^z(fV2D=(ochg?DL=_R@?nTQx_ulr1nvsa8|ZPt^tHD3h}*^DH=gN!#@v@5^9x2;onrT2{*YB@x%(IwEkfPr-`DeH?y>OvfAvhAfn)U` zy#g6#B65FSMf3uz2CSXEflJ}g{DzyeXpp|^dN+(tb1Ba>`LkbUloExoMkdBL+OPMk z`#8CtR4r?SuQt2iU4?4VtuU-Io=q3X~`{=5hca}Gc(l=6C4U$#? zPL;NA_dQXYK6_g)%T_wdXFjbKVHpo0`>fm05ftzu-vg~w5$jf47AyuR_0_h&5ag6u zUn@(6b}?e1g+f%yk(+qHb=SDm(xgzS7mX}O+%>5(=_{#FY6~6>XAdvYM|ZmD)n3n| zG7kv)hXtlu6V$9MQz)!OLyCXzC;{nIWU1>@cN{-s^{%M%1cT%&b)@i>3l8TNXx(hT z!ViicYI>5=4~;We2-W+sy~*R%7J#(9Leo5U?|m0!^D_5ew{fYvpK{6mentB-|MFt@ zg&pBmUgcusn3fH9CS$%sO-}Wg9L#Zi!?a&d~NS3(Q_u(G6 zhqt}Bi$qoafEx+TVN^(03djgbvb;l$!S%fdKG_(@hZNr^fmnV`O7b)Px{6IR zgcP}+lGekno5OXFuB=MCx$=u-K9(S&IC@6;ydW*h-R5*dR)}oej{WODhX$U)_h?~c zVZ&iiwQ=3A+V?T{TGIcmz#V#T&!=n(s!~mI(?)jL`5*`Sj+0^Cv=65k_k*gB(qz(N z$d%JVG^9gaMB7*`xXX*vY&gB8Wb*NYdq^2WI6O9dp{QaTCx8CU^a5WTvBYieD4kH* z{e0QTCiGz=Bu}c?u;tB1F+zGDw;6Rm*gB^4)j=G(t2#ldoB8e#uEFp^Fm{lfx4eY( zPf+v*h8SNBjm60tV__2p63}m)p%@6OYKCF>_od`_r)r(y-um8c97j25*S^3zE+5ts zj}~`~#!U`47(2s{K!IKkAMRh{p`D{Oau-|rJ6uU@qT-9iSvY1h=z7kiBBfw@7EuxUSTnzNaPM<(C(L1vI{m>Y}G4k_U|I8?$1F2R=-{p)ml|N1m zVxz?=okuypa=T4(4$G`a5vsG2bFw4@EbJiOk1`$Fui|6~EC|ycsK>*U4iII6bRn2@XHuRq>2fTy#xOj;Y2uUE_G` z61r70=oX^`+l%(>#G*ZOv$O%R^7%BUpZdW$Ij;?O%8J2FQVyk}us&;Lr;Gwl%`49F zBw$-zI1Nk-Q^0toM**!a1wad+M>7HR91v$bKx|Tv3Pno?$tN=xEhGynp@mvN%NHtE zlOx~IA|p;3qwa{~&JjkOu^%9dQcZhUkRohaBzQzJER(Pg9Nt-ao4MdjF96Or07vDI zE(*@fqTqC)I1WmLnq#^G)rm@&7{{oU^JT)s&{LuR1gI*WNwu2gUc+a}nh;P?wftOu zxACe|7`tpPn-O1;R_vx#SX=Nzp}yfqMr#A8%>NCOR>9jP>}(0}Z1z~%G7n2z7Z|jy zeOOX+tD`^AFxr{ii_p#X65ZIC8RyW)ZU=45rRAOT+HezA3_gvGPsq?`1uaj^E517V zKp41Piw@lG(!k+&6zB-5hL#y}4b)^9xIF<;dy9eF(>HM9a~in4z*G#JI4Kc&HgHPN z)hZ9%2W;S``UZ}M892_!>>D^N932J@?Zzl*FU^ROCc8~CrHdxDvhNF1ev&I`!};6F zKImIZ%Va`_)mqx$8ZKeX9Zh69Kg&`?9kYE4isr&embAHZM~7${5!%42_SMlpvY@Cu zDbyF4%BOYuGM`~qlJ8X*0_7m;vXjlB$K$#J)s}GGS}ZP`OBjg4Kx{2NW9ZyA?!|=0 zxsv>yzpZSSVP!`jR^%tRD%~~*E7*=0UpDPr9?qU8-d`dpe{%L`M%YU4dd*NhZ*@F) zSM*1OCt(s|f;|~W(le?s8GW2s{&&pZinH-%;hE5Ph)g>*w!5fUwgZ0d3?7}~1QooT~_Nb11s)H+OnYsyo_f-+rv?qymM+VN+(k;19vL9)} zm9hr`b0JI}s+NaZN(5=&$ovF?IYJUE8&AU8OV%SBv}s-G4%v`QlR2vE+xMNsud!qg&qQC6GJsTcyN}O=E2R^+&P~>`KO2NfHFIjFb3;;fDcuiw)t)_` zkWSx*TKYB2+ER5T8OV^?i6r^U3~Y=N*Tp$+mxr|F(RJ|^s6*gkj0;v0lK-#d{mS+2 zPLb-@^naPigUp@!otmH5ioJsw_ALD7fy^u+OEc3IYO|Ee$o95bkB)oMYt8ai-{d9- zHX>&6L`lDp-utpNWBe;NB62EXE1@EVjjUSiL|j3pWQ-WH;A0(>Ms%y+iMWHWD$1v; z(-rj-74Yihc790huh8l2Ga*N__9O0Ca6f_#1i1uN=wx2(N8H#EtufY?@2T+!2%5hi z0WGi}aec8LLE>ncD{cc2tdSlb1I@FLwE~UvSU+Pn&zrTsGM31`Af!wE>F!I| z*p~JsY?#g1i3@|&ALfw_V9#HUQ)6WFgn0eF1V^n({qb82;86-tNV;s}P)fQS)Gdu3 z>e32yG}-#=x47@120ehRuZT28j;Cv*i}@Ayjusv=iaz}zyLf%}+4DDK>hLojG6PM0 zI?n@E3=gnMo5GA&ZJ%C|Eb|%C#~T|ub)F$RYmc|n(k?GP5jG29Bz9g?mq!7uOn73k zb>_HnD8dn|+2=@WwkE!+tPNQiyaMVZpbzT`@%O8el{%B7u2W54$AyKBLo(}H<~X~! z?zVGX#&t5`z~}o%FXSh@jDvjZ4nvL}S$ctVT{7n%+;`yBT9?fxJA~n6z%9EMC9b!u z)s^A8vm7n1yY$t=34?d=oia?r7x>P!;fQpPZ=#=T;s-lTEfsj!Nl!!O2NuL5C11-9 z;%mvf`s(OF#rwFclj(gO!mG#QeB_34{+qr0;oAt+q_^HCAoH~jAEEB87_w|Vu)2{h zajIDX22>mB_-=PVYHRKdx^Acs!`hA@JcoAjNPxvZq8|242@(PtBo)Iy*WD? zZYTJpZiYdnn#(?K!@j4l4~8F%ag-zbYvB1d@)Pkj>7;V&PjYh7umr@qtZStTU6j2a z@>>%h>Z?*{afhv>FW+tY=$vwH>T^3X%st>`n!_n!RvL}NG(4q-k5Pvt8J-JM^zHov zk7i%Ql?m^uxmC$31742BqnD=ZK-8s52wOM1P7eimetC#z<4$G+X^!JUf<8XMFr8G( zGVavfN&DM{9^=fMZ_G%;Y)FK-j-8}qs5?auI5{ZOQDVgIQWa-}@OO*-WoFQo{d8Mx z?qD^x-s%LNi|lajzYMQ*y3PT7^x4~mSfe(!7={2^j*-Ycv7@S1tsmyWj)ZS@9>g|u zM9og4MMxTK7=4NAnH)wMsGH0|5t@KFS`jrNR5KrKuJB>^C94 zK^0Bs>V$l~by?L<>f6)SuGCgWiR`)`Fq#;zE`*HJAY-$p^X4iIdS-}3$ni-UD=3Z3 z3ZfKFZh_2=4@$rm8KBkUNwgKTF!lQ?7pj=n)zOB;+F=qjNE4~&!|+ZqlgT8%#3P!f z;GFr04#?nw&itFn?Y^1)Mh+fva8SBijQoB^p5kuGN`O>yE%)9|@%DBp+JiQf2PovH zNh8+?;`IPl8Hg|1KwwkTF-|Z<^T9;}frzMac`@0w6*v^U%?%}YT$5hwbcj8!sUA{P zG`^xCH)dO9^nF`O0SMa^p9=zi+?keLVex(NB+gSb4NY&ZyEL75+h@c zyX~{d@EM9JY~o~MUEa^3LttB-R?+Y#I}+_Fz~D#fb@oa+je#W@eGgT1-o#RI>c4Tt?4WBA3@dN?1%Q@ z@dZCT#)r34SiH;yc~WABycA&MRvpsSA^45kOt2f2vvG8Q*~Z$4ifcu(FD>^%pr-L= z?XIva+skzuy2#sM58zG(@|0Z0Q_gs;(=5}Dr;SNo2g(V;7M^D;z7U%ciDk`!geX?9 zNL)M%kx^LNVogr43+LJ8Os$VynqZ~am0}UqU>vN=DUdKYrnkacd|X*rOAptzT&8Z| zH4b6k7Ry)Z9EE=BGjZyewGD8dt-{d<{Gzd)*hfymc9*F&b^!T1TT{3JW2?wTVoXJs zZmX3uZqinm6R)@NT3Z@{-P2C^WgjTgZjl9hx-X2eAp0YAZOtB4SC2BXTCfSCQ(&fF zur-ciLo?(_<{9g?_za&M=M&jEj_K1A)~sUipgw}y{CZ9JU1={?7_shJizoHjNwug5 zpr+^91e$@AH|aRvhT7^cjpjnW)O;k`ID9ecbWdbtu>7Jj2M1WR}l^6WSy0_bE zpH%nuF7&ORhx_W~?Lqot-|pop`{dx)rq7gpALIvqmm~x%dYb76tc2u;x&?gpN*tiX z?a;rL1g5qmEPbFE-Y!JV@rEo_?sM-eMd++{U;KeTG5vq{VHgBF_;^i)#;_%dIIaXr=;Lv=+p=M! zFJlt1w*$PfSRzcYv5x0W+$HN*N0F;=7U}~&UPEyXguy5zL;I}W`*wwH?sV}3Zs1-S zsM?c?=i`jwtZ;~si>@-JZ)Q)Sk*s5{{8~AP%Wc`k+2>nfAHHW4z=ROX6dX!)g8}#4$D0C#ZWvXXT$#vi04tryup}oDHnQmxBUq|U zIlm|>bXm0Px((XxjOO$U$hC=H%kcG3%ySi6l68=tt&aZN?6(SU73<;{3ZbFS`z=9f zzpbql0^@L#VHh_PjBdL|0L2gpJ?23JviL=Jb9ID6!3ntz-R9?tPBbEDV_?;*EWFZF z1`#V5m?jsFFu@M8|K}*3HyJ$Qp_be-!Vtu+`MMF1Hk2f>5eKKU6APxMaYgAhcV{Oa z8Exs|9r|iybdaH`<(#>Wt&bofzw@5`yJVC|D>JukA|`rt2p}L<8^{5$I7GrI0E1f!?P^F%f3O~m^iJxXq7{X3t zO>6`6R2oL+y4d=1U99|nVhv$r*2IY)8O}26ADYq^XX+468Mvy5NVTo49j*C9S`9>6 zJ!c{v92sS1jU~5pws|s-8iz%V(5><$4eR`J(PC>2v`DmeyiuW8-fc$nm64*u8pKvk zWSY=mA9yk~_HiS;0t>Ksh1f(=+;;l7)s1>b>W2r|)-h7P1pW${op!V~YJi@Mtox>UM zl!l=o)0&?!R#@rE0w-U|Mu0`R11&5fDQ^3RVx`_KvHewI>r3OnHkA!Ejs}9lSIn+A zjg^6JrRyLetx8Puyv|aNseL2NRFF3VaiI48c zphE8uilljxd)6jVF=VU3FB}QK&`g?M{M(j<7~$MI60%h`&aagcDjN(b4xc!*UA|F* zC*X+99El|Lk_u`C!;&KiDUnAr`vA^+9A*qm9oQ#G9i{&>LCaCOU7QAv|Z6|CY)sd*H6X9AETC{XZ1-$_EZUR@PE00tEe;QmpQ zJFEJXjNx{lv=Pa^uU812es{Gkgub z818u)A|=UXs-v~3uHDF*g}uhGGQC>2fql*|ScP{jRN)<}@Sbvo5WJ1A;fR8*dh&Ftea+>Hjd0g!!jc>%qqE&hvaL=&B;54Iu|)RiN+W}bQd z2-?xt9G38!IWO|vShAgW*$!xCO}r~E_esT8M-#XK(g&5?Y2fWvYj`Fu3Z7LDc(^JW zczg80o_M`nB8?)Yj7{kw>LAMxY^X)>>QAe7a$S4S_0q7B4eKpAk6~ODK(9Yqd*dtLY%x2AV|T-IeWCK z2`xS3Hjma@xkIdG(BXeIcM^pmkgR>Nsr1Dg{1*>W?38`M0#tK*4cHIE(o?2|ETD0(AA-`yp$zFLfPMH56k9ET@ln^jB` zEC|vNmx^OP%p~+hL`;Xfp2Vy@ z_JsLX;7!38a=K(<5VIjKagCh(+lnC@$v$z$W>j2JL<}(Q9X9ObiShOjbX}HqiHj1) zy{nJ77CJZGSJ-LyoeA-X3k2=M_eP2`6)*&f-O{xYzQ7<2pSgelo-*ySPvh59BCbF{ z%qmieSZ_7ArWlk+YYbr?3=_$H55%L$1!AQdLy2z6fDsYg5C7qTU!1h66Qc1cBSh9u zNd56z+>)J1Hk)*1lt2*l4IwP7*>V4J%>aevy3-bGquh*nzF0g7Yp7TqV@n@$VUH|C z$-3S@J-^j8Hw;yxq>@UjM#cc>VaoJ4BX{c;V5T~Lo`g)P$t6B(3kr4i=j1;k zZHnrsKxV)6QFo|skvR;XO#gtDNZ1T;Oq(RIBCik*@(*U+#U0O48I09^>1 z7)xnez{8R2m{=DBKS%M=2Rh@5bg9B@sh`Az+tZxbT$TyJNK#j1Bu#{?lRKe7lCk8P zuFdRVE#$40by5K(eg-hF7$tC?26?#B{{B!>e<*z;Q|mBmNP{&5LCgYcQ0o*ZYi8W? z)aX)EMY6ZeZ%LF8<%86(5p9^jh)5XNc|)>t%){lEb-Eq77CknWxa|*hsG>x7EtgjLI7`g7&{y}^+ex()XKRW=q718*zbv5>@MWLY z0da^vt;4Fw<@(W@#)76~jvka`6m?p*S8(A5W3rXk;c3}0@i|u4Z-dEkxno0^=1%L)Gpl{>>UoH z-|p{-j={lRB_>!g$gDIVQX|Y>i%} zD!VJc`dpyRassGETj`tj216AcU=YWWyCuPTRL36A4E#(imrb+>7G6S$)jX>ol88|U z5CO|_hvd6`{*H-n=Xq!xVJ{t+i2|jF z-2L0-6o>6;=8%Rsu_le>u+uakB2x{CpO8EK?{ynU4=sh&^OZRJkLe9c;sd!)hVXj= z)2WL`$_sc%&BC>(^f{CrfFioYb3COsJ-`ajRtw-*MqV-TI1?cd7@c&M_^d_`lOkw| z4cPejx`(OWY-wU$%;rZalRm8DN{2V+*=7wW+}Rt}+P%b?!*vXMZiz`14C+^cVEh*XCSQ>6<-f;th@>_2>RC)M@=zsQx zJ|lj2b@qprj$y!WhtV_O`iBd=`Bf33mLbzr>ZwZhLn))S4W@F^g~qaPlfG(Ad|O9H zcGL2)1ZkQVx3W`7CIr?aZJ@egWzwhI9ahegE8NMO%R-ho0?zNIKYVw)CAyQTh1Zh4 zs^SVEal#}<2*IR+dl&(W%cytZ}AWJ%Sbp>c4Ivx`ST}tWNdw1 z7rkG%LKG3LU&v$uu-$SS*H^eN|DA9Bo5M(piKNAeIpZp2JXy!cGRzP9_GKU`a={IW zfy2VHw+s~-f>|kqZEH1>OrvlQ+C4--Y{n@r3_!zDFtaSYfz<*uqOYh8T454lX=HK3 zK%+IzvhfLi)P6Uu8s23!kmXxZnlcnC;UaagZZ2~quhR*|jqJa)40F73%LMITEM*g8 zP6^dh0>BR3fi>bH4HSL6DO}fdc(3eb_Q(E zB_fCB+GtChM|^>-8+8UB5icc}$FXDc2m~;wf;6+F6#XZe-n$kU0Dpe*{CfXjOQKWf2A6uX8A7rnWWm1Ht7^2nD z&O`)OfPNw`g^A_FZ@rw9R2 z&m4+M6qATzbRGr-HfR^0s3Ykm>Oj|3q7D>eOVpv|ftvf!rf%~@#yE7Q04fMjh~#Zd z*CPRM2?Mk$#poVT_ivbI(%uR3?D*3y$oLuZg=+B1_*2mmdUguabMZst1`24ojR?ad zh7iED)q@F0z5+2BWkTRjj>D6j2C^qDgU&rRM!Xlp;amln9Oo)%SZr0gyThO`bA;o& zWR7HW32Gg?cfA|Ks$0yoVmdNMddOEM`2wLb*sWYIEbA9jQY@KY@HI0AY6AEXF?%Zw zX&%G6vcF6{$-^--qJ%>R_tf6P7Idmq@yz`^a{t6v;lKdKOM3C&4H*{|=GJZKe)b2( z32tO=_^Lq0SfW_DB>T<~ElNn`B121Ie&^NWEzPJ~d0{*-WLQ~j)-iTZf3|&X@!1xd zJKJ=#VvEf;!CfM#bD3}5*_M5+m}|yIAhGgrKbyJ6cx$dfYi4>4!!F#TFC(DU+P8E? zi{8l(NRU{JAQJP;G6^p7i>B5ZlifaHvYRa>CNq%r`ZKk-Dr@bwwAf;$D0)BhUD_L$ z0WFpK2Qj1Z4JA_GQ{qa z+MVIx&d03KI$ucE5xV2pmBzsFr6)MuqMg0p;J7(d!xUuKh~(lF^s6(dl&ZvMh^Dxu zN#H90QX{ZB=41~8w|iOFHbrq4iL*TzVU%^3dDJU4<2Sr&jvT1GChyb7ZA*j)Fw5{6 zyV#32&T#q-HGSQQ_?~;!rk|RMY^I_OT7{*C$*ka|Fzv-H{wZ%+7_CC{x&dWKdsbc; z#6ZSUjq#IRH8W|icZ6f(3hGA+*ig>}5(0D=oF(ky$s!DTPa!&X6$Z zn-EPVJtkzm{nogvl9hdXHFwO=?`LJ~vsIC(YOt_;f&vqLGD<8np-b`{p1z<%V_Ce1 zAt=ig*f0@JXYWd4%#IE9`2dGx|I6G5zeYax3)~-Y(IEi8=xTW(C!5W32Ab^Yq_>eXC{o(Py{DtcziZG2C6$O|8#_5S?M0*qG9>)$|5=a;aZ`5qc6dKsTd(p z^Ne|8XDkNVX~$Hd3DjfiUd9NL5{^}$I7k2f(uKOF05bp2d<<-Mw?|zcjF*S zDBUCpa+!a1GZ*e|qy(Rh`G;3>M|JD`yDI?VN&(?#Rwwm#KTzhApxX{v)J?Q*VW%^En*?S}DU8-;woos$eGC$ew)W}yetIeMkw>v@ z3X|-SmJ@##E#f*m;8-NR_{R%x0uiD)e#>XjKG+AFN|L)cHe|Oi&}Kbp~N(I277qHp{b15^4fsy`N&bI(bQ))39` zl*AEg;J6ut(9HP0mVJ3?fnlvJde4sYDXuc_Vfn7grR#*Q%x+dCGT1(v*L>dnWL#@v zkEkrwd-fGO2DMuFx5D5eZFCowsJ@0}(6JV`(<8NraolKNi>|b&eC_2HF^2cIh$%Tc zY%SV=pELF_VtJ7GTf*M1q#S*Yx^CE;;3=CXz~c7UcdU9g;X+DyZ1Fl+68AUl0qHBp zK7do-RDKSYWq=o!r_?w4%zlEWZ(V6;@7zA_rH^0rhs(~xswT`qRd}ce7~CcRc9>0q z9uM(TA^@iH0kG~U%mj#z28gD0rGX=JAtEt;_LxBwYmN+QYf${l3zg{qBqtG2$E=a= zEH+WkjH!?zK$2Dk9Ig8Q*?SvcyRPca^WJkm-@WhSNP3cGONR4ql;pW1A$7&E9dIW) zE@SMmX`HmHOO;}3+*4GIBe^oR+eI@yPIzEJl#T#FL=dz?WLg<3L;@x41OX-pa1znc zG)gc*2@VmBE3G8XM46bNWZd!m{{OYlz4tvm*~Wz%k;W?Dd*3k<$d8`Tou4Y@u{}nU0sw3@yuv=3|PhJj=bDVku@}>)#M?qzpf474L6XY24 zpC_V4!ZpFXR$GxsjfN0X5;D(j&wtOe<9HCn{xXR`nTS<^u|hjVXyglb8sWVci1#^y zzx1z~?zv3n?_GrVmU50ZkmAtSkwZ{kRZ~rJMrga2cb$UjmS+`(s62z{`gQb_BJ!tN zRW^&WN^5Q1#!j|K2}Z}YRkFP){;s!8r^9fFBXE{rJntFHV}U_`xjYufXrol*5g?O&~6-Wy*!NBQez0ymvRmg>3J+f&#-ZF8Pjf)j49=;_|7NUm) zvc?g?Iv}OET5M3R^ox@btr&=9;?}8IqFm8Fah7}f21Hsr0#xy-*jL)NeJm!896X<0 z?o}czk&~k@12ReyQi@LwsU*lrO#g9pLtAR~6awFAeu;3O;9H0@5Vk8e)u_sosY=4G z)4Wt64Tdviz7L`y6nzWyQ1rCSMqj!BX%#a~f)vXm6|e6H339OObLtNz=+pW}40}XN zFFmW4o~_j=fJ7}^O&}3A7|9TCJTakYgou8$Js=8sBW>m2VK9_sNIXVsWF8x>q>S*` zsTNh#qwv|^lx5Xs7!ALu%L);EqsuBWFqp>DWqt3m%gX8LTd=GaX`%NEFKdi;>9W?+ zvU0pk+im(4?D<$5iA4u490Z$4A9xjXk>8TghP~*rf@>hj{>PD+?y(?%5fa>RBWYNn zz1J^dH{wO3u(-Sz*{gn;=tRL8k$2c-EI-74Mt~`ZhOqQOG)rZ8@4t%eXZs=SV}MIP z8=wRK`y|9kMcxB?_9*3?h=;=bjHg;9f;{@%v)_nuMRvV5lY{yL_V=lE-ywSc#LOr=vmmKm(E!p|ODl?+ftX&GpC6s}0v_^nIK=OEZcY zz)WHu5eadzjf8-SuqX4+(>gCnRB*4S8;+M^&K`@Iuins3pgT8LV~rGjY~hG(wj` z-?LA9#Q~V<0sY{?{>1S2z)&C1vx6~0emy|vsDl{bA=*M{5)VziBA%vS%zXm+X9$0( zvM^GUoIV!7=GIsNswY8623IgP%~eudWL+Y{9tg={F4$ftLf2AqU+ICrb$2C1Y^jmEiEkLy8SN)|f z2K}ZC`e*U1?nv!um5+D=zN*qPhw6SJ|4+e2!{czgIw(4KOi^;O{s=M(_Tk|SD_<4^G5nKSI zpZrU8BFRp;PyNn+1QCa!h48(F&awE)bS#4`U0+MrIHhXCKy-s%t^rMO2bP_sBjNiF zY7>W1M$c{j34zEo8c~BEUQ^1pM`Xo2#>H;tY+eW$>{!x~waYR$UEb!HlgSc0{Sa(8 zEV0vJ*taDXlEwGMtY#5kNq6>`!1c6P) zY@Ww2KqM_HjHKKDnR>mkC^(2nS)L|71V$W)&`Sq%!7Zt)m=f>|d^0xHdSzR?7>60x z!|p*%ltAtLOXM4fI}NpZEsZHFfBrr9=jE@T<6~|9_iN(br@WZRAMvF|hL%EnPB%#4 zbBXO(r_kIn-N|b8dcBs>R$Pg1lrs_Qo$$5q{JX~8>sXuPHB^a|aGc?0eiS^wG~h2- z1-km22mC_+PZ;2eJ3-T!JOTrPJ8cXO8p%P-m2L^H6om4asN+U8RpoPuXyr!SkIGtN zPON+`6XPU2#tkuA+L2}=k?{2n=*t2-pUcFEJ57vnC(TUBK?vh0^oCQ4h6#zPOe--S z4=qM{T%%M(WQdthqECI4Ny~6%hI=bZSP?QLE%as^T8@w-jSCmFgyqYicd{EXqA*+s zF=`b~Cp>$+p4G$Ku*wZ222n>6eUr3am~!%g?R`3k_|`2_vV8BG(7llAI#OOZ=Y?pc zQGO4Vg_lM1q5Pw3bcN=7^_&PN<%}=vzzAUP>OwXw-7uw(jiWqCj))H$(;NI6Nrb&f zNIcKBY6*`mh}p`kN+7mPuD0Y^W$5W|9-8HK!_Nf3)M8uQa|sQz5l>!K*DJg1oyn>+ zMO!g6P=!4s_DOyUFpW@8G2}@{aA9Cw77eqHO=#L4uc1-{_so`I^#I^V|!eZJRNi1daL?a?rUiR=xkvu zPM%&3DF!#6hJ;oAW47zfhh1;k)amwk8QVe3fSfuV!?KoQxh$&aNu-4W{{r!>g+j`2 zui|6WV+}xvZkI>o zI@5nw6Cv6mrGy79;YKhJStC$ZB=E3*0W7N_CdM(sN@PS3%ulJLjHysiIuv6YCN)D+ zRBi}oGKMicUPzG+CjwQo{86C;r+^_kR87un?!p7U*3anxU@Im}Y(gf=zV7+J>60Qe zpd%ZtP#|0~;T)Z$#vHra-yACLu8j2KaV*e0jRaa`-3cDny>WEoeN^{)`N^OVVG(rZ zWBan|h0l%KH790tYx1RCdcp!N+Qp>MLYGDCK?+*PZAfTgL(MkYfeFi@F|fmb-pNbQ z;tsx0M7?eZ{M#ueoCwP^$Jf9K&gd7$34oe#!U@8`iDQkBebTXtrN>CZ5JRC=PlwP5 zVvnfPMEQ)wU}*@}$Bp=ep@)nyhm-r4hK(bF0aJxa@J> zR`QOK^m;LH_fP#J;_l4l+mgG32Pw+Ji@M8qVwbnFbIC4`43humb6`R*_IYhekns3E z4{z_l+tI>Yc#rQ6_xSKqbZ$4!^v}J=S2b@Lx5umIvU_}*iSZz=Mvx&VZYknNtQ`$@ z-U)Ut^%J5Xo+Vh3pJSIdA8!XAf3bG?lz#LiKCO7;nVrK_YXh9Oh-`-|XT7N1TP`d- z*5ls5WylLFoai%?J0Co%rg#G=A5U<#epBsT4S3N#uasb+QaRNz%lwqxkCK;E!+rL9dC3V(8(dMe(3?07C)C0g$kAXKEu6b#r=q)<(~1s zcU6DL8(^XO!x>9l`RMSG`zeo%|9!CfLzVYefA}2b%BO~pqVmb{zmHdcsPeJu5AP1H zd~WzCDxV$y`%LwRDxa?YFj$VUrUJ=u%E$d+?eKw)M?izrUHp@BfIQIDVB;c76}A-C z3hT9g#wyxyBWp>cJmk+Km{<@+Ii+FpsD}~B~nP-jH4m4&iX-91_b#@O2JOA*M{4|jnuPj zCs{y0tq8cD9~>E4`Db)I#5yC94KL`Ceqz{LK~c+|Afu0m^me5WIhB5b;~_`QGAnDS zMSLyF+EZu+r-F^}1iLFpU<8Krq~l?X1(8##6Aep8R*gyr5)Dgpxn>Rqgv5cASm`Ek ziI#3cD~xPq%q@-V5-?0$sA z%WbH)VmswU;3daYyQB&_60i}T;yAc=bg1*SVs90* z>Gq0yEvT?TwOHsyRpaI!i%YOLU$yd4Rmz8II9sT2rCc$+oB4#|BdHE{QZ4WW3U%`L zU81uZY6woRqxNFUP%N#ND~2E}ll!q665X-2qCp@14N`@Vzf>wZ?W;{hMV$L7u9rIO z_dktSk(e(~wvj~OsJn^34qF~@trWCkE+1*A%I}s6hc2WwmblLc z0F;QIGEi-?s}NHrI-w6_p$(I^**=llqI|O0D8)dBWzpX;VNLYf#GJ`Ip@i?{BSI}rd;u#XIZX6WuCG?Swq z%ci-*6!c5e^e;^w*;876DP58>3?+b$c4WbdyA>P&aP89hp}`3Sz~2tDXp8af1nacf z-AHAJeUk-xK_p`Bg&a0_3o+%`tJ@4YSr7A%iw`nT$-FlX^5BE=YDXmj!;uBbA;d^_ z>-!Ib$s(7oj1k;tpRx88ME(=?vAnnN#jJLzPZq{~GX@QaBN?6d3-zc`FDULe-&{(A zwZT2YuwW*)!YaaXCTBgE4GYb%q%5Zgqo<)a76d9f^$MYHTLd3wqyx1_2Yzlt4e`ZH3~4h^gNaZ(Fh@zJB(7;`fcZh_ap~ zN5qILmWW_NKbS_^{>Mxgq01g!v6QGSIY8Sl{Mn1$&<4q(=o!0pveU6a(2<0IYB<_V z_V}({qyP0*vw=M-`^oG+c9kQoB(%xFP4~ilpIyX9`-m&E+I*Nc+xad@iLv(|)l+$p=xP3@_4&uw&q+Op zVrCDo#NGFY8@1h|Rr(Q^{+X^6jj5GmdVZ{W4lzm>=lZ>(|L%8OlhFnQ=W*EK1V&L|$_nlf~43_=O*A&<0fLHcqJQ7NBp>m3IEKqY0hVt82D@ ze_a9}0_qlaoR+o>RFCddWwo_WfaAQEs5-CW`J06?JPA=*Paffk7=TA1DUXN$tbULl zR1R*&kib6=$!;)MMX8~D%I*BEqcO=<=8DD}VnBC!Kx|8tJT)xoqjVb5?J9joe!qH$ z@9@X1DqGXRBmYf3Z4SqT=-x;BqvwAOF ze5W?X>U}Bto?H<-k1CSF5#MRHAio3?D8AF@o4NKOOCtyf{wiUFJ1JOvP*6pRz*^7Uzz*)l3) z*{CP+4JJUTEA3N8nQKR7u92X|X$6fe(rQl!5dfb10t?MPl5VzQ;n>WJEl>r3ccRbFaVO7G@0 z62;k;p>)l{4Z{qrn}yh#sVvu*lr@8q3lTAJR#VzK57?>1Jp3l|D{G8=Z+0Uo<1FoE+t_3s>=zs&Qh`gf_~9TKCtUCU0z^;gz| zySZMKiR}fja*r z;z^>ggQ~{UJ?wKw((hgTKIGq&T4^e^mBWK%<*% zu(5B?{rkP&{Qdjw--P}9fp5kB{l1HE|9;@&-@kun{Lc5#)Q$79wNB{6*zaZBv>=0@ zGiL?N(%hzneJC|HR9VgquOTOKOA-ZYvJXtckC09BLC+ol5p=|i!PagV1Nod`9;8^; z2)^#TKE5<{AYjNBxtWn$S_>~2`pPi z#kdCD`yn4JU|9vP*~cX;ulH6!d2Y9xg-AGsgS^OYI-k~gkUhP)c-(Y8{Xejq&e7*E z-+eo|=`fdXOGEC*>Y-#WhN<$bb*e!mS^56GgvY%i-(2bL>lp0bJ6-*SzpTqD?k7Hb zpH<<`SYaJamMgCS9NZN6eU-mtygUb%8bMl@wy%I37}Nej<%!kH4W|LOYvC$czI$+=Zlfaew50o^JbQtCnuN9E#1M(#58Z&)sq$LgF0pelmu)|3tB_X+Oso0rLDg-%__@+?AHal1q9L{X{ zyQhF7*t;!Ba~u$W?ut;;pw+~OYI}Y(>tDUp9>XT~Giy*&v(VD=B!v+Nk@PEBV}7B` zu845jM^^3_Ol+tneGJZ(7>UynD|4I{JP83P*IX>V5BfK8P9Sku(VXi`A2R-~^%@0a zmp+NYvNqXtI#KQgjT-q^RwyXVp?q$gz7OVem+1RIKDS2S`*Q?vz8}cvR_S|hKKGJ_ zN0NrtEM9UG;9gY_WxiFcd5y9)7ME<_KDexCqF^Z2V1{e2+d*x*)(*)@;IcWqw|7jp zxN6;OaUBGfp1o>V(i@+0FHwWgT)ba6QwIk+FWb-0%hfEY}IwXfkQ%h7uWS06YM z%f*7&i(F5zEP-wUAo4R}F;eaZR@ntO6uO8C>a7mAmyp+t)mz8cU|q51CPG*8=pH>P z*6f&D7ct{hZ-9|OeoIAOT)thSpkMh_`OSIHJ?>%#ERsTWG|1(KSC8SPaib&f?5aUR zjT-+E5O1P9>Q64=|GPEH)U~M)ypQDY)61beFhv6){GXC#Wi=rIOkszDmH-s+3B&CW zUaM4|3!nPz$-p#%8kO**h+##sm}wL@>^v;bp3Y{}?bHY^7wqjh`$@xKH9hh%AmIKc zEKLD68pnl^o50TEWq{xqY@gvCGkA_CAbHXQk3Yd{75C$)CzHlW)#vY1{-!e&!X0c= zDBf}drQo#e%bFtLS%DIToPdYT!!R>Wsr3re+Jf2SIMh_If|g2RdQzlh37FKk;PI6jQuNE@(SnK*CvX~h*PhJ{4V#e*j}v4e|8Q0 z=CT{mJrdYKO@;$=(Q&Lp^i~iK?&xNMf?KiLu~<7|_&P>me8C)|>-*BK!4Bb)DZtAl z75fUYfv!*Vw(>CFtn2!C57Zg|ielwlj=h~3t`?YzS?gHEiC6P%C2bH)e21+6Op7Qo zLPk1$%HM~Fojt9;8Phq;u|;n?MoQWa{NOJJQcNlyPSuM#Zc=D!(3+c6v9;x!v?F*3 z=_MPU$`_Y1BA#CgfYTe53P3kCmtqt2#cY}&S1(pN)`#ltVp_3^gbqMb7mULZrD&VW zXvl}OBEzgCX_*dI%XHAowA;%hV3NL7+z)>U99YLz#70mFL}VX87V?IyzcfG=rFWQ6 z>)K(`ifA*N+QFK{(5H+34nQ)^4748Xo(Ua*S;Rm{0Wo4|9#$PUY;6vDGy=lhdT|J{6S7n!FpPabNIw}!uRAI14-$}I)3Bw1 zai%`U5E&0fB#3hgJZLwz0xsv{HMSmMMxurtrpt#{6v!jv`w^QL%go~J$XRKVEXCV9 z^dXXI?#SQ9jl@_~IwMU&ZLT`SCa`?|$V-wqfw!i`+=zk=h&zLlo2D6K7*6$tMoNrS z7k`8%c+7xDEgVW|2IeU-jISMo85mY)PEw7-xO#ehR z)WPfy$gTs2!LOe-F9(0Lmum1-Dkdctt}Ob1Eb^GLoCgUzL01#y!IjRkW-iJ@KcJ+o zUyKB9p1*w!8)0t;vI~2euwuH*B#A-{povldVPeIzH)Vt?9E@C-!!+9v6>uqmeY6b3 zNl63h+;c;&5y4$9%JXpN2!np!47tk-@>8ptW3!mycv*C`LySpyJ^PDk_yj~3t#k0J zW)-X(N~j+6b-ajkOb?CVJJu)*2?rkygo*cc;t=wWO&M^IQd`+DIUdD)IUTd_=ywYq z17*(j%}E@4z0WB`)^h@4E72Fh))lCoT}cE*))9TBt(T%Zpl`7#%!-DujxYdO9VqEG zUG$6ZDmF;ux(tMd(fGD&kG0_Ot7$neh|>&b2E;NxHH&}pDxS_21J#*=?-e)%rD{Bl zDkvdR6+j7ISHd#OeD8K2EL0n(pkjK58WC3#FcGI+LYU4hEU{^NtAT=w$)D&h28sKT5)D-y5=t9T$#tSZ(9M3{K+zyuM?YsXeFr4Bo`P8e_;;Zeq%!2NuW zM}*@#vR%oUCuC8Jg;Z(*5dq-v+$IgY%~aS0q4c7CGqT-w2E);8$K3Zp+zfJV72hBx z=cpC2FNo`?bM{f%fh-b~AdJfyQh*HHqjIrU`}L?uLMn!JY{hnHN!IlN>9T-_nLK;0 zfg+hU!9(yI$p$yAvwSCbLxLdco;CVu&yCoLgS%Uc^R4~Ll&Y&pE@IIkd26|1%*kxm7Qv2-zI zm&MI%QCUbxT&5jWbiBsEzP9+jo7eSOQ!$wibczh_sXmqleliXGcntiw2Y%9@oFIRW z6C}R=me{=N-wUX|$7|XGa)Q5FRP1Edoi)*rD9^91NkiQU2m8epaV}A;mQ;+5wMheT z08uq(BCV-*TpH<1j2;6-Um*-QGpvG!L0bMIhZo%8u#a~;wWJ}w(h9zuJK^5qC=2># zAtnCwwTj#$QOY^9%_Elc0W=K(Z-WF z!g#+A&Cun+z=MV+$ZnMnHa*0)%vzZfiim?F1?YvPm50u&cVAbZCd6q_d{i zR0^pveSj$Cv=66e{22f?qqgQG4s$--H4#MCS*=nu?-^Wzri)c2`Ill%h3<_nT&*A$ z0EGx9kwn8ny4NO!=K#DBNd;ViIU#M9HSi&-OQN7U*T_{}xt)6COtjaT>nTGOvLMmE zd4+=oubFF~{8`#0)TbIOX6B}cJ(8!&J!&68;7}20$wI#e#m=f$r3eI=MVkY!baDE5 z6YO{T)%Q$sWN`sCwFlxwchD-*4Vv^%_7Kg&F4pXTWKc(aZsQ?*NZCCH%U_TsHVq8C9pFSgH%&wBxw{kA-m1I z<{sg4Rh|J<%c&(0j?B>hc`J*g`kH&DSn~5%wvm=bX=(s*bD-IRutZD(SuLszD`~z4 ztH~$c6^H?pXQS)9Y$e11Ez71z!7E#X88@}ED6gs6 z1o%=h__&`|DG-~7t0~YIOz4jYRzSncqlFWu=a7rHA=gX&67GYuFAE*8G_ubCd&p=E zmZDcLlmb^za^MA~2@8SiERSYXpH;qz4Q>VX`B~M6ri2q-Zy}2g+SG?;O$f*&JX)dp z>;@WgA6#3F5K19@i~~fhJj_K%D_xYaX^3F zVM5>FnX^GRFhdcAR)QQ$jJ3yn6Sx7IUcM zd{*Tdyygq0yZzWlR+yqR;n<+r#egqt97)w>)rE*w@9C!6pv}gUNFtRe%TsO8^dkgd zT*67=G-rKq8a6%~)!CVa+R&xgcw^3kuLv>`KGn6sGCbg{{hbXuuXX`TGIsTkTy_D}JD|8E8VGWIR9}xsP4Cd9dT)b|)35`bqNxWZuIG!jSX*{@HOcZ% z&rO!?AV`7J$bp&=}_(>HwRjCv1_(vJ!nTtSv^_yIC8Q$+uV4rx%US%fPZQlAh|P zWnh*XoJAh?CCzfR8A^g-+9g(}-Dk3x)vVZYip%FN4KVxio)^lrG!#)WIY41kV9d&D zXHMkGSSSS=t&ANV@kJeL0cV(&M!7ylX^hwK0T~|@SRb1)f}l3z+!Q_(q-a`+*zmK04alX&h2ffuxcQ~gKIRZnxF}W=8i|jILz!YF4*@^Hp2ZNM zN>Jlx#c^m@xG%<__C+}}(lKaxnXo8a>kaGaL*bN2JU#6OqVSp&N$45`GV}~$;t(H? ze1fp(Q_hReE*}ZbVM|&%6*UQh+nxahC5R+%7Y`X7z-&frsHQA<*D$aq9`16A*RO~T zMLZ|@BK(Tn&f020(V@Dd>%YyaBaB9JwRc1&r*fgM2x`M<9iFVq@vxjPv$971-bL4c z@nl|)DhHmU>&Q)6PPZ@yA~?lRG^iL}M2FJgJSfd$o<-(s3u{;1R07nvZ`f6lljPXt zUq`HQU?|Stx*61p%fGGaCJ>IA!UohG+V2$~it7=&sTBT*K(1DxQFUrHXw(-Vi?;4* zr@W}T{}AqzAOQ2=hy>Ie97viv@(_P@=!>tm?S7;~cNCYSXn`ICO)nq7Q_htU%6xv& zg-&)Gf*|A2EcIA3e|V*as#JN*%BY+g+$eA94bqcwDCH&4(iN^1$#x^?1tTy zUjkHkjYCJRaM1tOFW`T?NEVQBd}je)2nGJyMY4d5<2wuZLKg5>FOmgh9N$^M7qWmy zE|LXg9N$^M7qWmKzDO33aeQY1U&sP}=ptD_#_^p6d?5>X_##X=ptD_ z#_^p6d?5??D;LQEGLG*o;0sy64_+h-$T+^UfG=bLfB7O=K*sTHxqzQ}zPiwFP!lS} z-}6(39{k2v(2eg`@<*-&*!P_1kQdg6R>^*!QyV=z-jE}kVxoMj^`UF6 z=q~H!-+q)UtH*2QA-?Kj)|{`qz&$*RzXimj4Ts|w6cSi0?!@a9+7*?I95cY|d zA`s|uXlSqV!&p|7G#-_7L#pm9f6t6A%x@Vu?&zTI3rZv1Fv@gtQT&iHQ^-U^A__;` zQDBw^%56ndWE9trtCLkl>f~Ff!X?+OQb{Y5%=Ug<9Mx-7(b#6N4Wu~JFQ#o^Nto#| z+W_{9U)3#j@bE&`fnHb#@YxZ_hSq@s`z1bx7(y>h2@V@dq*nqW8J<;y>7|x|7=>w} z?;sG%1c=6WV2llF-PqgMEY;M)+pfV8^_wl=9?t zlX)|xQp{yxac(uBGrRaobB3K|Q;qvbWmAytkU<0|POL$Phc^%BWw(%NhxW+M@;8^r zh5^z+7BVr0DI*L$T+9aJ)j4J>*^C_ZSDhCQ_}lTqq(qMD_$ATpqW+4p{)+ASQ|=Fg z=c4{(FL98IYnNW5woyibmwb=9DZ0(_pYNJ(MR~TlrMuK$&g$(Hb2az}_p>qSjI#l^ zlPL&N#U!A4CAfecD0DHS+Egn<@-|P3P%}73hJDgErQqWknCpboTjPgVM}N?8ez^vcvs-^#~eJB~5a-LXMQa z$GldYdK|uRQo)y*V<)5}T+^@>ShXqX(ijG5(jnQ~UK0&=$dm@HJH`Q9I3G<@sZ4_9Fth#~eVw!>ceXZl)IKE{{AH2ig%7neUq z9D;q!?9(=9E*%D+upzeV9m=NR4D&Far&dvD=U1W0U^&}zXW>hK-S*uB6Ddg|ypSKyB=rMlR5|hy|6suwe zw+O(;O-8vqe7ct@&?P}G7r+F?rf3Vgd+G`#p+6MI4F86hUwkqJ+!_@BS9v14Wk>UD zV1@0P0qU}-M9*17pV^a2($FnB3($BP=tCqio93&g*nEaLZ79mtAvr^MypcDkU7k}&@q2l8rUs{mZ{;6Kp;i?~;thkb6l}u|Z>8YlF0P0b zPQpf53nB}HNtKraN!ak2C^J~YcoH_8e2M(~S}gDY;i`;yX7X~l%X7-^7!5AQ$(Ukr zjItV>W<{_%ecgrYMi$X3Xwbe6xOB^BV9eLSejj#nz^6~X4$H*=Mo`k-A>cHY3#)OV zmEfkM=dzuL&`5rIZUR)Ny0B1{Yzl6Y~q9R#Gk$;FNw^R4!EPs%fC1p{k~~xEL2FzIN|Z( zuEwUbfrEx6iye#poV0IvlmsrU*a^dpzy$* z`1jfYapkB@trS<9HA}&P>VQpKV#X>)MD_`at>0dGlc}pJt~BXnScTYDIq0Sh7IE1u zx@P9hTXsM;#AXN}wOINeET8_ZonCrl#{XR_KmTz2{UQgOJEVoXxPr|c`W6M7GiiK( z`E#FxL?OvUgB!|^{uOCyd*va%@&|^`>*eoDLcFy6h$KX4^BRinoI5JxP(JN9p8Wni|9r zTi1AzMUoF(RZdwcI4U|D1tgL`;UsG~Z2GV`49mzVOAji?IRZAUXS^bmj`kvjKz|dz z6*PHqO(#;)NH^6s(gGFdArqyUpps^n|86g(nQ)Kx(Zg+)^4BcskpzS_MM!eeIb1uH zaEW5~mAP;G&u7=mGd23UCFKv!Y7NH7{-|@9NVKiPGfwOa9N#CoAj&=2e_B)ST7&+8 z#-#AD9RdMwFBcx$M=*{2z0Uf3us(>em7z7#o$KC1(H@*YU`4j8OP(%wk$)tA+xa3j z1!6(-M;hnJ+kPoeP6_#_G6b71DycOYheKh5#KN*&g>cPVbWVHqjn88&gF?$w*rK)S z?n4qCDxo?lmF3?|5tqJoU;C#TKZ(eGl87A^*&UbOM-py&Es0Cd`O+Bohy-cHfA16an_zk>NjEur9@q2Z z)$>ThlmGF$j{|XK&^jHP7cjp9>CuFjlBcT24uzRA& zlijR(&NYnGJyG7z;C?t$3vL{oN4guCwz zdm!362Z-u9M}gGCX+1w(JqK?i3_dHx#;c@cTrdBGeXgI;f3^Fa*mz-K;oLJdVty}v z3rf(%dUHLYC{2vePfZxSq=;E1YxK~(B7o7Q* z>%=-}ptA#I$M*8gZ}{gL5F_nB2-q|S=EI1`=1zeLMtoiU{#yZ0V+Rc~3dV5vt8UN~ z7-Wq9y6pa+ysm!VCl&uZV5J)sSx7Xfc&nh$rj#dy`RJSpL2mRXQA5QI$YP+}P^4er zz6(e4E!;Y33uid8kIKq$!?kslbw&rSlTx;&39Va^|CM*Fuq7WA$7$l`2>HBC7&?p$l?$o*ZK4uNj_1* z?kV0W`Gf&?mjQR$nuMoDB?EHjT=I?p+wBIMbQ|bLh9whlJULoL2|}FB{6QjurkoTS zDwdBbCxyqlHF8q;w{lYKl8jA7<)m2Pw{lWE;%A%SrSMbz4xQ1{3@PnB`&IHdIVlJ+ z&v@G9LaR#Td}5fB0>S7wdW@Qs)!L6KcG5X1EVFGaKl0ZhET=!8NPk+E{%oy$&-=$- zZz-R8n9~#z-q*59!L^kb3g*EqQvRF7VX(k8y!ZmIV4-}&Chpg?AP0ckWea+K_iOSG zR&us)*Ni3C8+I~&yQY(lN9W~U&ywD0Npd~kuHj1HlG`=TyWlF5RjDk(&8ma8i>=Oj&)z46BPJx`5+rH&#-XJ=rT;;yQIk&qji%{8zoGTBNEX zr1CN(s&ew?5{=2d>rewKF_da+3;ZY^`Q*>5aGv z@`bLeRfFYFnJ@%g(O%A{_zz^S`)HuYZQ~>5G&-bfMtZBxxDGQW?p4l7u#3r#u;(Ml zKg!XARkL2P2Q`Clh2~LyrUu5AkHUD0k6#uJ5}cb%P=-9>DbdEva02ihK39+4dqy37I%%q z!lePioniAJ%g%6|_eGjU<%K{}HT(V8Oc2qoDszi-uk|kq0zdMPgTR@_wby4(h+JK`R9ld z2va4fhR^Frfr>*Bh814&l%S$@1XILBGwl4$YpsdAVV$E2BH@(A+h$Qud`fI#Jkk7K|W z9Pb?v0HPbuHu8l+a6=R<_~M-cjMIqA)ASaeO30RHo?{PQ$@^Z>^#6JEVWoKcl$gLH3l{HVIgww~8f zqh7D4j!NUig-lced#D(|)G8pqGT4Bx#Ea`*}E)&|x z^POvxUYZPnT_BJyIWEQLTeOe~70fH&qKXdQ{(Q9vTqI=n?&l_K&I2N8Q{}e?Bs7^mu)+ z!f3K0$?0zNQJ!9vov4r23c+d$e=v#?!V@j~R08wEv`0Jwrk%eXO{B*8czqzRr^`g< zYP2g1IO#eGwp5a8p0bZo771?3oMEb%qznEe_94ZgkXVWn!{Se@cUDd9HgVIgUy28`1CGN4WD4e6g)0saC4`njMNNUogp zk@_!|vvYKA~eyE==`kHjX5ngd`916pT>QgV^r%W_OHVz6NYfL)fX5O-Pu==_mMiEOqy?dqnf=GGr({CA6 z)BKl;FE6%#QiH%~yt96FnBK)BC4r7VLD|{YAJ-!gnXDz z^S5m6NBQQ-!|m-Pkci($_4{Fdmyf&6lT78UQYXQP$!^#lTr!H)g2BNHdPc=hy7(DB zFOfcpA)L|>#DY|6u=*isFUkyM@9I&s7U54*2rejJk)3QSDUB3MkEl&J^b;D^C0AF& z^H9^U8KA749xTHKbs2?+tJyxTU_^XBp}0fO5|)L&h&iY2L9Vs{2lJKo80m%BI>26< z)jD}u2CyIEFF9=Q)!8Yn!Tw*t0t`=B7kRNI2m>PRZ>*cPtj98Cf&Xbi zxFIwZl+U!QC6^Ti24ts(P#hc|!TLC`UNeGqLuCy@u~Me>S{& z_Yh10HbOh;U?d+0KE^Ylv#7_~5jKWsI_MHeg^m)9ROl?DvoNuY;MhFN7pb)U2Ux>! z@b5sqH_W9iE;<7sGZ6r!0uknBF(tJz}_L8y@UviUva_JEp0!Sc9RXzb|w_2`Uy zc)*|g8&<6_=qDHTfw_qu&NyN(yuf9j1~?Ptlm3kE%)e{@g5+9e+r{d)Jme8y(Ky?H zvlF0dkd5w%IEOBB-Rg%JoDdw<7Cu#_Jc-d((4$0=a~dL2(bM`Sj1jhT|Gk@UZ3MgY zjS_pOzL827^j;Qk#6=HtA#!drrQ^-T>o8ImF}Fv-&>AU5K!%wDHwaL{7VVG)II>t4 zovBKtT$ui}hL+>Tnef}=6hXB&e1lrjVXlJ#bOpyDdSS_s~@n^S# zYQ*g)gAcNc9I{DsBS#Krj=-22J;(|;>_K`ZvEr)z)~J0G4($^@Pwh{ueG)FH{a$K+ zqWo^iL5l)uV-oXXVnl;HFg~dhKCp;cD!rny&`Psd+KZvdx}RXO!UM9dJO9%@eIr}# z)iC0j@Tg$jygyZj&$Q>@f|B@{D~HAAprLaK6vu(6Y$`YG((7B4 z_tmQbhi~d(8IRnss4TB%R|0V&IYfG`91x~qy^btfHZP^uxOa?Rw_TeV)tHJK7s(H6 z`a~UrA&DG0~ehmM(wdF7JCI7hp6U{tvO&jI! zKJeE3Jny19xp}#uG2O1E#Dv5-WyK%7pQ6C9{M)~Lvwo2R>yI(!@{5}U?|WoHzwm;1 z;xRP|lP(W@SZ_6Fc0Y9DmK#TxJ3eugF>Ng0p|4ft-+hFk{nz!Q*Kg5P$d{F0h}WOw zORp~qo#UIHBMmJw{kMZ*iD`UW>Kv^nySn_ZCqhs5)6?Ae^xc>}S!f~a&?ZzWB_Xme zzgp!7zwp5?{97@u2DudAREoGP8w;nOCI-fm46M=?Ub8)twc;FV_vg@`5erd%*E}LL zw<06TKY-M&Qdm+^s!4XC4fM7}y+PA3K^SSIXT_)~^_l42mqyfW{zNa^D*uJ8qa>{n zLsin(wF6inyi~mXMk<9aOx5RQ;yQ+2DStCTi#OMAJ+FFb2od*gQ%kqtO3?JJoo4wM z`~io^?r5v-zM>wTxqCSZis`N8_qH!4pIz=U;BNlz0 z!ikxe4B=^t>L9&K5_q%RnZe4@Q0O-r4&*XT>71K7#o)e9mQEe>6!ml$nt{<#df54C zb^fGlpufR0G+fu8qI2{?IEFg(X>W9x?u`!9vT|{vda^#(qY>CKN|L4y(F+O3y1AXPNA9}K!QqgiT87bbAG z^%Us3xqc^cY3tg5+T~Y1u#;>7)P0b>_-=G#O&NTbtNN$Lxz`Qas7_S3R^YkZ9s32d zA1>94fG7F6iyT83;1bNhF1t+3TLd7*y!AS_#7Q_Yh`m5{VlNk1>Y_WOX7(3uR}zk5-^ZrVcHZREd<{_5`1X%^AY^WbUu8`os)Ct^a;yRPw86u4$I z|*>x`CAXjf}g(%x{ccF&#y-^>YsU2wn29Bjn`-s%zg_A*}uaTH2 z(v;@8k$;iEzV|sMILEbSxhIpf_bcssS2w+nED4GXR!KtCNzZ#8Cr-jX*w>3b#&5CP zhig(7l^Kndm-AHP5`jIdaF*&0I7sanOqQ+JTkc02i%`5SzckyroG%rpOx$6lhP<8@ z`F*8a%P3k*usL%#zGoep@{Iyc>C^^w9j`ku7(nHi_c-AA*ihxkrPhW4vOl4VMKU*1 zwN7z{$isW%;az;u(Vg*7L5{cXU;$fljVRHts8qs^lMY?7Pz1- zEvkT7DgQ;l!QllbJifMa7eO0I(IE*^!G(q$EDQIZwB^TLZ8Kj03l#$^FB(T+g+$iz zqsNeSp^zLK-~v6!YhJuw3RLza{cMN0QgqkZ9|OfatpjNNEprsOP}X11cnHH`Z0g<+ z5a7%trKnB`A50ewNzG(`8J;2Wj%zHJ!#bCzjRKa<_e^!9TGNqdPktGCZ!Gi@e zpvzj{z*V35xz*q#N}p{4Pz0cejuo_8GSyYttyE|0;!_SmN;D^#=ihgVnw;WpXV=yA zj_5vM|Lyg-zsDtV`y}gbEuu;nbdqaj`Q(8&<>l|x^34{PSn7IF%ilX){e?=`<#9G< zqc4B`mnhvDD-5Yn{vDyQpmtS$E#=p4FCR(eFO#dbu6y@)x9c4d?|D&67%`HBDO0mV z{1_wNLR6OjzEqBO4op1YQ9VuE-4d zUn|1Bk>7+AV8I)&N9F-JOd0bII%E2UGDW{BJb-W>+7SMXGHO$6qv+QWM6_ldWOA8? z$H-Weh=3+)rdz04v`q!ul!YxYR}ic>q+=Yq77~J0B)zG2oAGjIodL9DRZWC^OAzvn zLCC!Th>%MV!wIXe+G?pRl;KyU64RYRU($=kWA2^^`8YX2cPI)BdW2!;9 zZK$>_K7lb(!edeAhfW|A))#`<4)67@TM=f=1BW$d}EDWglh$fM1hAn zNl+_1qoPu=njsXiT`z^A2|H?;wppzJBjl~BP&Qx;!c;MY5Wa35Qs?clWTMHS&e!oP zs1w5V?f;gfXj3Pb`-jwdN1{&n0Mzil5p|}ZM~a_;G7Sn~QEJ&w>1Pv_aYp8d>i`@8 zEVmHX1&Xq{L_6o7)bgL=U1L*iQjn4NG?U26Uj8W<&WGNMc0vIyW}9#HUkp@uww`U9 z#<6cJhR0fm7_!9P2udaVL?%j&Te{g+{gbll7Bcm{QMyvy#MQxZp+4YqEz@pt9Mm3c zmH~OFIO#$+@Jl*3{W+@*B<|X_5@+#oFOH0~S!~b0!nqUbvFD#gs^I#_7W3>i62ilK z20(q5M0w0v<#^p4NPO@?L|Ha!oXDblt!hK+8Jn%+6+|0_)T9H@ygN-9W@#@Ui4$WJ zWcbChb0imvPR!LDouJjHXZW$WEyEYA5PbN7=jK;(K*`Y8Yn;{Sn1WKP3gJh?GS5uv zt(TNP`(<=R?ec!Uq~02;z5epodA_CmIA41H!I^A2GzF&-V#cq9Lb~Goz5?3Jzc7uM zqx)?dhVF7YT?rXf-fe~7$t zfB>oG78Q=7E+};b!9&Ejebuy|?_Q`a5fo5#Oyyr(Q_ZZo6^=>1EhNa-VCL^g^-xr)cYSYk%hiqJ0>}frpikJ;|66xP zU{%^*PyCqQhwAIRR#S9@VPXor!;NN&9g=-v3Obp?vOc#W+#(7RfQzKbc~&TW&WiHT zr}pIvTNDr_cpay*CP_(I>*lMJ=dELP4Q!ELL(ttSTGWkN5({f`0|ZC;G#w#7Oj^4- zriNbrQ56FMz5HW{3_4#=P9is^OwA&4`jP0yz63zMEorylOim99e4xjJnf&O)pjY9r zukyOv`mlp~bb#xYJCIABHz*_~WW|~M@0Hn`G2X6ixKrXKjLqWswD}$FM`mt| z!M$%eOx0}m3PpkI$|0Tu&KuyQ%I#?`{#sB@UWfdZriNu6X}g@f6SNT#Cmt_}SgI8S zaV1(TA3L-!7hd>)C|wlYT|aO>>@EleI?QKVm6DmRtsvAjYRLhBy#*_oR%Tf*1GpnV zkeCTkpIw0kEvB4Emn1^e;Phm9UwSmIT57>HD=LwvoS`Vf4?ju#q%RD0=_$7oIfyY& z=H|goq<}y}T0A1`J8B9-N=CP*JL8XVA4tawsvND;D)O3L%S_0psKJxRq$fi}&~J2u zS7nc>C1^1}g}Wxm%I)e?bTeef0frf%i(q10*&CMEWo_32RyB5Z611`hd0$8MAj7=Vx}f0wpp^93=M zZvJO8gHT4AkcA1bfV%u!43qPi3o1(QoOPtVuCAU5Ak|`IL*Qk>1+apWKeJP!*hK=| zo5BDha>UIviGG3Rs47-zPFmKv3mv*#3`kY?UC!HN0cBL=_RaZzm z5%tdoGAIhRG`uttzbKt1tz)Yto|lMuAy}6m{!JvCHRXHsHBtU7UyF=qzxAs;ACwR3 zYi0R1z7#ku$wjU5k97Q8U4ELc{LjXoKc(j@%G2rj?<|U?8S)a649hSXOvG1icrd79 zsreRH+*!Pui@4+nbMf^oD@RicHx7isEhxD?K<7lLu{Li*uD8uwyh*PaF462dt4K9UASd092np?9?F%J;vSbnV{idVJ}8Xh z+Ho^JyKF*J7i2dc8wumsI+(J*O267)YlY4K4HPxSl$;*Aq8b8uij7xJ51jp^+32Kv zTmJL4Oa&K+ZG1W3RuwMf#^DroJlu8uBQ(yhV#Z&wk2FU62-<9)iM5@`J_UuQ%zTLjYGOQqJCrLZjj zT3i$t0!q6uHZiRZhWQ%#_ zJyYy9#uqNQQ2Z%7ATGw2a}8f>sA^Wuo5x}1C$+dpj=&>f&q)9h*n?o*vK$hwcZo0S z-h*XOyNI2x7rrI}Ceuh(@(`vQ3wCBvech(PS*`9nCNi}PC(1e&0iuF4y|d?)R(b9} zpe%k$qkGC{mujxS$F7>Zu~2ER04}-Ea9?r6^`4rz0)#7@U=!CL&bjXeT-HBePs-hk zTx>N=b&R6`)pfx^RxnOFEn!6J-CM_#jWH~3r8(nt+QI;CqCCM;h}JkjB|-)07q?dbAGN_86Daht5&Gr38C3P<)>_qtht*2s; zLjM0mp=>-Rxg9s@#Ap@sWU(-dS(0&(}K#l65uZYSZMt_CI)!$V*W{6}FXEk)HM;MTh8x~Tpw+Efpi zGNmu6DD$A&06b~9#-&F`1WwUJ;K#ixLR2ICAq@=zc$jYu_W;8^qTh=V|EP*RGNj!j zL)twSV;@8O5fw9oXTv=o*^;*>TcD`5d=pZRgkC{`ZT4tWk}P8lJePL|*2FP9AGTq* zL9{&ZX;IU|X_^D`i=LGF3zgV=F(sw`f;~SjhNRT?O59TZ!XI+hCZN&Z)Q7>><6?9i zjpcNqT~jS%yv_Hkx$vKc3M^09DTq-)&V-1lqL1CSnh9Y+svb z%=72y@~%;k&gcjei{!Bhij=UF}D; zjzvq_rfyn2jXfDY5b3NeAgwEC8C*O9s{LIBe@ew5rvWX(hAN^o@?f6A}k<4Bu9SS$`Jyx^zrc%pKe)!$t;V#bcdP9=>gJS#0@4^E3XG zKl~~LL+DP(RQ|9@0KE)L~|Bq>r7B4Q!niqF*t^K+9SC?V3 zyuW|ZEH4OM5$(1@0eql4&exUFa!S5$G=6US)cxSHS` z+gCWjP#K0ZVbT>}Res_R2)X>y@&UfY%6}x>rnYWRZd3JwU93isU~F8qY^(lBrn(lJ z!gRBiWp#SxZ{$ZDCtp7&eI48h-DCz6KoME%pKOrh%DaIAM2>Hk;$bpCs0+egj5f;N ztMLzHQP#&g$cwK91?*xIRwm*Jt4j_Bj4 zJ{I(`OCNjmv5$}Z&)1Lo+t1C2@}NGB=;Nq97WA=8AA9t%Pamf>kTd!?tB-T~IHr%| z`Z%eNQ~KDikAwO+qK~6|sK0+U4E-=A%vpxMB z_J3(Mr*s+H?oHGX;3V-i^HZ{WEXn3HbZz;lTw7jR?n|yM!{_Jr@%+2W&+|3r3NuxH z^>bt|=#+oNm!ALYn1W)n4Ly3eU@KC_G%2qT)>_6Jh5y#I;if`>uNJ4&Q*j*7?D{^E z=5G^+g9{yq5km*0`+JOBw>av;F8q^i()*zy>#$Fk<_fS%cc|6V58FW;%J)#cx7o`1N$|KBn!9FF*oTb7zP%Yf%{<43+S4T(==UE=Qz zr~m`0QNBBo!Umoyc|!g<(RVG|DyYv_^sS5SVicU*;zmAREzZ4%NmLXr4>G4tKxCPO ze!2G(`S76Mf|^iRq9FB;%kl;U?9W}Ii55_+<|}(FlnxAt+6n6bCIhB``6&N7LzZ}7 z1M(ahkz7*Z1nLbFC#a2YC~<-e+~ZLJYCuKAuTJ5Xz(>F2NE*LbDl>{Di4#)hrj$70 zCFMKbzhD9XAJ%6xu^LKOB>SzFmJ<%D!3nmAH!AF1{A2s+$}NoolY_2~_T5dj8cSb3 z{oc2kRTZ6ATOM_njj>E4z-HDVt%|; zDq>)y6qnfKe+&9bTP21u1{u|qcr%eB&TOu48}vOyb`Q#7qW-b`f7bIG<2ghOYz#{q zV$q1?{|lq5)(0TO%kaoY-Jn@ufu>Iu3RkxkH?0HC-E!hJ>$K?;$QR}iYa;oby)&ij zkUJRaBy&gkBx^)+O3wey_##vbW{GWV!GoUyC9raii?$+WCIxXPg`-Q)Bv*3?95X58ZCjQb2C)VeHxSAO8E`4=zTKLCD)5td^ z)syfE0aah!xJUdgnigXoOmD7xTF*4lUKIR==Z9@6H}U5;Ffog}-@J)+6Tw&O#or_u z4Zg8xz0x|t?^q|@6~(Vl6MCbPU&zN#_dYT&M_Qt8g?l33s)mck5?SijOscg(LWf6z zS}={mOD*)D9ibPOVAhG!)tp}C>6Msv?*%wY_i6FBPLDr4x!e@6( z=+9AS_YJajScU244P~w1_Y5HD6*DX!fWZYW5Sf?~Xl7%|kbjNg_2wohIt56*`J%k4;x_fZM=w;7Ov{7c>=O zV?Y0?XbpY8j?9l~CSu3%8#8GQ2A>YigeB6pHpyvZY!+jMzqzyEQo{3}g@}>JuU-&{ zF;?UibZ3ogrFclO!7jsN&Dn27Un(5w=AN(KNuXILb%I!+d=?jEc#<6XHKEXBPW zJFnRCqC>S5x&yAheR<8(D~f_Y%4 z3=+kh8}s!0n5Tt~@u}IJn>vMVIkQu~evs*&Osb2rR?w+)y5zk$oGz=KPqk?~h5ekj z;Ax?&g&!`sSNg74cNQFbm>6EgwPzMhI~JQYn1mb6Y#*=`T5eJDO2m!9>_cYZ=K~iI zYC$JiY6>e(g^ZyQhJ;XwX{oot{F2aR$q3-4LSsvvid<-^w`pRt)(^qa#@1Pc7~Pt` z7VdV@fL7|#Aen4O#NTghWy&2^TWWIU?cQGW?}qhj#`q3-#;^;Ky(1uLihfIRS@Yea z*IIMysUFMQLoNj&W0Y*%#-#z*m(Z^7xLMBIsj&%-lC6-g=ePj}kH#S?RzET83`R{j z&j2OwEl5eL8gkWgi15v&e2`%=?TaCY=aLeRqb1*CI%tXPL zY)deQ$?_PE`(-VmzGS`?of`QCpdU`*GTwTr3h==@&Rbv;o9i3wH42NZs#CGMqQ=oV z970t`XMsWy4Hac~9%9_QD;Bdi#mQ^UDDyr;0hC7r0DoVx8b(GjM&sMpZsVw7CX>$uIoW$%;3giwE zC6;JrKVxun5#jl@g|nRQD#)68!BJngz<(!ouTM5natUqMal0lEoy=f(MY`l8jl_&p zEqvhVu}Xhe&Sj`m#+vjIJTV6up|_S4sVwl5Pr$T)W{I&t$bD!LLb*6ZXQk(74cg#)xHSmkjsH#vnG^ zxCHh*J7=YF88+sG5j(Ndq}7v5{xPrLF5<~O>#WQ>K!~1XuMK?B;7;_X zg?^G#OFE;T#yk&;)Tz9L!zVV_lRd)`xQ7qWUE1NbdQfzwZOS%M`AXDkwBWyK)7JWt zEyNS9sUs;qF_Kv@-_w+5%}}@gGk<{2`Mb-nd;y&^*+*B2(bvnjeuk${ewt^0F}zAq zFF$mErqI+tnTVV%;#Xvi&ocz`D`*;^79^Xg#5KV^ggOl=(^? z5WEt%ilI`lvANcI1(G=17>#YJ=^DBAtY}xp8#=^>t(*6&^y>x;oF}n`pnw+NnZFJ( z=V5%U;eHpcO?(8O=9uO(56A!$1b$fV0&lp@$R2G54Y3V@vR$QNvWrCa48 zJA&k})HNmB+TegCm3>VCd=b87Z?&%oxxg3S8P;W%N;{+PC|lW_yE~?P^&0;*nzfqk zuD&Sr0~t2Kw-Vd$43>f-If|U+w*!`99fnXVN0F!TX#jSdD$g~rWZF>#$4xtm9F*8Z zc2!*qz}Hj1@zG?X=Xbdee8Y-OQzyoJ3Gmk(i?}uHd_gsKyT$^aj<|(od2e%(r$}#P z)a)%bYT_xfyUB6go+5|T7+c7G4T)e_X=om18Zv`R!<190hlwx23HuW4(~Sn^yI(gN zlpzRWrfCJxo)ac&KOB*?@xc+d8WC{|g_d|sOPr+k#XSbsM2|HK{RO||3ZAZBG$OSyOrzMZzLX~2(GHa7T> zW=8YXP{YjuV{-n7x#o=?otw?bguuv?$-k(@#626pZ&7d_{$ej>NTa_`^S2VqcJnU^ z1+@vPIu|@V#0RuD3|ZLt5FgEaB4LR88u|snX^8f@92=r~*AVwPs4a`cg(v!`*2cI3 zot_qToNf?8SSQeqowM}_+U@4QtPps3`^?ps8HzhMhZ&7RE#SU9zA?P*7|ecs`B$F| zX8&7_;D*OAes$qx9FOh5z#T8$S%Cp*4xljB^rxn>twmS-QL+c8n#A#ehQfXMx+hz@ zpv{@3GLrIkgCSH|3goBImBWIY?%KAZZJ4W4G=9q{OrWKuuJ6Rug%vjr8VX@ep>5-i z!fYMb;cjC{oZSum*Q)fbK&h)U12HAKr94(9@~=(t^9g?H)*JI`=l`jz_U;&Do{?iY z19u*B&Q?^WZduq52Dn^B)Fr=oojhWyzV4!koMO5*!jR)QoMLEnu~Q7Kj-!u!V!Z1K zyqKgfkRMue^Hr2sgGT;GI#qUY!Ot(X6q2F}6~z{N3s6tn;(WKS2d zR#1w=rdV|T9&k^~uEDhjd_NM!B&PoQENQHnHQ4-cbL1S^`)wsE_YcX!0QNV9v=Nsq>-jHKCE_BL{pjIu1W*~H0{3{lp{-Y9pK{e*2tt%6 zxr?+`xHaT|Je7>j+(+k;PhFk-a^6oyY=7VWgopGMi&zK*^VdX*wyfQV48v*Y-T;EJ zR*Vae00?i-uE#fQ6-*Qhm}VxMYF{RhfetljKDE{;RuyJlS~R(B+B3fskUm2Q%%u%k zxSmu@dBgj3UD_Aj?HTkOQ$8xl4ODTCCX7hp1^iUGCoK(+I^R)su9u43EjS@N;+@*k zv5VuD|K zU7y|H}%O8?Q0yHByq1hXQ6G;*br>B9_0>4WRzkHr^WkmmkISBq2#vC?BiRh8B0$*?~ znm4WkjOMl95`|&MS)rL=$1OBP-M>H`of|6H?N`u4V5e;o zeyqrenms7YY3F>{Ig#0vI>y<{o2(=%iD|#ghCwKl3r*g2j!|cvM;#RuBSX}pp~69q z`NEI66GuAN>gLCo=lDue?rdYoX%IOtT<(b5#AGag{`T)YV3z2!|&ss~VaU!9xg@DMk^osVK#$0Aq>=Li65 z$`wu)ilD>a`^TOqik|9IQjJIke7l^-{LgC``rxrqcR5pW3UJ>Cy?af+a!QU;l5S)x zM!uP{)0PaRx=WCMX$oa;M9&f%9}?1%$pqMRe84m$(l+;(j2g4>y@W>o1k!x<~G|u(v~7hI8(X^MgV?E1D=$HH2BxEcL=VlC`k+%?HjR^YR%X@; z-Lo&f|JDHYBOW3uTIT*vMJEJR>qcz=N{>SDM*f#Mg8lgH1CYal<=zkB`ueHHl2Q^* zMvyu07rN|pV5QUa3!G2Z5g?dg2^OYbz7T8H$<zWM4Kp?!A!k zq{GR-`etkxf0T&vJRY!C$J8e?uaJH9fO~TuNJ;kweIqetYw<>8|N(4l4zShp^ z$hm>=3@R9K@1#M9q^AI9m8|L7|a-YjM*g=Y@4Oo9& zMhxli42l}L1}?r18zIe5G9XqEJXqmDioRojDeB>B_f+t8EA8pvFqQ6)t+HN(AYkgE zksq|?W1II6G>D=iX%&iD(EY0HwUqh)xqJ5@yRQ1)b3e{GeY#J#v?aA|*%EO2AVT}b zij=}wiV@J;WsI0OiSI2c!_+V{{6W>FB-b6wXgr==gBy^I41&lYf(!yTMn;0jMu17o zBcV+k5a192CRSo!B0%ISAcH~-2oQKa-|t#`_vu#4=3!&sYn6NNv-jHT@ms(3Tfg_B zffu7N7?5N+qllrOdPc?^pQL3qOsxr!dPX%__(w3MmSL?OhVhEI!&sTToVpNSkZq7V z=(iTMeh1&z#hz_xh=Jo0e$=k===}V6b(xM8ntC)J;G}K!)`VXJtmU|ZzDa={96oae z7tDT_QLOhsZD`hQ;05_O)WKR5K7gr4f^PtDELsxkA?eRU!FC^QG znod@t_;q@JZ`v>4yWT@|kMeL!%IE^)OrMqaOUs4fVbXT+o^TV{U|@zvo0M-Pts+Sb zXt5$$pmc-{ucF#W7a;zFX1511p>H^9a2wWx9a6H%E+uxA%y2N)u&aE6RZ1*N-zPRX zVegd(8_sGQtUtXHv%9FVBB73Wnt zK6UX9x`>1{#J8l7V@Ak#rp_{5ilft^h&L`#cke#sb1a4Ou^>=~4+Pq;HHsUhl0%|ILBw51KEV|O9bW1V*m{!Q;>fV zE|uUevula*1*r-Qp+a3B;)=*ER?WjX;o)e3htIK`kQA>Pz+^5B-h1bZJyN`8@@$qs zP{pIKR7l6E!9-w=!-;aV@Swy}l+wRkIMLrp{hq6uUoM;|oOR(o_1b&benO1qV;D}< zD*i5p6It4tKvR=NeR~~FWOku1oX9MYqXbw_EP486eS5PajLTVQxM`3OH=KMx38t&& zQ~T{eP>U*@DDPbN%xv+sO@#~VAmpfrb1c{b-x$GHpIKm?|Bp%geCH&pSG79JO&e?AHS=0~cbW5`dxKSq zZyoP~|E*KO5?Le99B;Al&fH??G8>b1jgciGT@}w)%yVE4BCq5Q@hXdO>NxwNgd<2{ zY!54emX#!&Ne53+9-$bDLrzU2&LgO#j@4yIMTM>dg3z$+dE8}VDKcoaM(m=$1V9n@ zfr=1b<+d@R3N#7iL}_sRF;@~;W+8v#r>k5H(og@ervR&DBD%B8LH z1)-kvF39elLB45KG!H-_|UR_zsyLCkkKE<`XlcF^|OL`|hhhBYB zya<$liH>|qMR)6HIk*d8Pr;xJ z+Zzg2sLQ5^!%3~Wg1EyiS%)odaGz>{iQxR+zHV_wFHi(80WebrLU$JkP3!4a ze>H=ldk55NlpgP_r}67W-ZrwNaJ8z6IR?qoiHBE(*vE z>w5vbwRPDR!pJIbOm0P)cNh4AWGREBkruiVX$42+wiGTC$prcp_$rj_y6$iFI%YEl zL^#XPLBldE`2dPzssR&>zBmfpU<$<@h-HR5z~fNelZ}kF#gQJ?gn<=+3r!wZ*a<6x z2yWDA&y)Ud){rILOC5d^+!0c9I%QL<$+*lK@-n9C>G=L=)dX_^MM4PxLI<7rwlLlR z1utbVH>jk~%^C2#?L+3pKFQH~WwKLD7h~Pg)V!VT{ubAaol{;5HX?KfY8zqTc#C05 zrPqv|9T+7aq0xh&MUl?YF8cWw9buBT-YJcPKJ%9~54K2?2pik%w zjNV;25PHWLVu|&Jwa5y#is7(P;0P*yKyf}CvgQ*Pg>hrqhVjJ++9h?agbGH7MJSHa3P_P)Qgfv~6r&nPeXy7H zap=TEvjbN!#FhNqI@xPPdodCZx;ZpBa8cHs%v%~igjgCi%7`>Mg7-XqvO31w!s%ao zsRr>+^(ZYc9b|O;2#kyL42|NTY>V?&gIXmBiYy-_a-c@iaH1jEf8)-Ga*#TUNYi!` zLv(sLI+2$aBAZ`|QNcP$!U6&`g}*iy>~oa_FiAac7zTUN{$HqaMvrvY32`O4%@^ogLF`f~^U znHG}DDFlTG(2TV8o<|>;*n3F#> zY&9s8v1izi3IOjEd*U^^qiL=meqJNK`v?!?>%dd)q1@C7ik+=))VWeFl&i>U{H zO_1wcuxuYz8Hv{@MHs_Aa+X5IX?3qRIcTy<_0HPEJ`|)l@0`Jp+*|Fr*;G?Jx1!3$ zD2RRVR&v&wY?{vT#NCTsrBktcViw*46*Il{R(YU_JT4CcnpBnNq88K{Cn&w8^fa;4 zhKPd-m2=mXj~@?C^_ybrR8Tv8nl}?Hl*d$73G?8(5q`W0kLfrd^C=>;3Z7#l;391b z2*YtSjz~f*B=s&R7s>9CNN*48O6NE$Bh=f^&7Tko@;=a9_9kr9k>;KxcL+;pM%in- zb3{0kC#4}mR0NbLnjZ4H+cHbLB2tR;jT9QpBW_JYT(uB#`M)3mpnTBrS1b(szc9mS zss`Pv^Gt;bwq@S=lz>ourENL(AP-YF0P`&E4Zcs6XHO69+yT~;v~>4qUEQOP<96)8 zKI-9-U_(Z(ZBSS%eEO1BdGiIUA5b-L?T2K64oj2a*x_BhK_gVWeo;)6M9+hK0NAW? zr4p35;j=lqS&0#S#S%ldQp{@LRu{2NN7{X4j=qQjM?C@!PdYC8bXfPAjX;r)o%#uu za}5JNj(3r~otULNQ`0MxhwK`lf)i*MRc#kv1xBJr$5AkC)jf=v?Du;1Vho@f=-s*I zRB08#m%QT1NGv#8o5+k%Fi+OmmNxKH~SK=9qrdQh_bzW@X=GZ3Dj`x!@8$7d81t zKHsuO+v*%dESyfGh$UxJ1F%BGU`%$mWpeK6L%~`E#(7~3svN$VLSiaDyr5UxRiTa1_5g!H8_1;gq=^C(M&`Mw zQdw~=H+epT>9(GQ@eQ@#TF6*7GjPv&WtbM(LXMH|8r_bD>DV6^oUuknq_jCYAf+1) zHt5V|xU)Qq3t!hI%OLur*P;3BF5R*P>Mv|P?!&|%OoQV!30CGkG4FkECsEW$-x+u_ zd2!fYArRgUs{`{?xaceVCcmSGIOA~ls{HKXa<0Yc zhx<1wKWn&0vnmVM1PW1n9rH@*&8;?FNJ9pna3WRIcs1K;(1__24*{% zW1L%#@QxTORV;>`3s9jT)31pgQI|sJ#$%V(s!JNfI9huytJ&Bq4L7=OF-YkB#ToT! zxSijKO4$)ZIvfrIl1F)CM#GKsi#Q_)Qb?GErNgN<<(}?*XU)#z4Cx!5e2~8334-*$ zWmPZ-IPEl7l?|R56izn+PCGnBG5}T_k`wdJIPJ5`L+Z7F(|91kCJU#*L#>E62-f2M zof5YXtI?;=Sd74GW0J9&wpQ2}$F^%523XDhQf4JOj@ejEJ~&~u^TDk{87|ePF&oEw zVYa#xFk5#4v(-U&Ok)`z1O0^CL;*2eA$5)0^IC6=+XyZ~@%gMbfJOEe1j5nE`vJZW zR2Mvj-kr(l`hJ*Hesy>bx%*g{Qa7STVFc}R64&gpG@F?Hk2T^+3E5}KC3I$w8;SLF1Qa1Jr6RQv{-Zb1Ysm;nQwCv`)`O4&C zPU~7WW}l&|!u0?eKG%LRjp7!po|{mORBtckjdtEVI2|H?i0gtTUN#f3BOOnZ3sSb}TyFXOK?zeIBq-sx%YVN;1SLGqWao`fKC<)1Cy4C4ZYXPoSiiXm zErP=!{{qhbPNRhV|49oXwH16AgeWbmm1F# zIqRa;nMrS{a!Ot~#2Z$6U-C-)Vbl~YuIGY~MVN}4K^hZn<9Kjyw4>r?k$@HU5c1zi zv4QyaBV-@v>|~5BxiyggjscpmEmb1FZnz&+Aj5KsKuj;+P>y3L_BJk37Ps)eEnaMY zfBw5B?p47D$O;6yD*{NT(9`PK=CFot2whG(@o-82|V@bW`%N(I4iWOSz)vngGp~< z@--{A_^ z9h$=sK|iZUn#FbE#lSu`?9unTj#OQU=MrA4c_p>6gH8=eafY+IXA zkj3nRhgDEInpW|Kwuu-4_%7If$M5A7WR6_$h+1kW>X4#ulvt6+f)HDX>U4PD-teq6 z!9bT?_uTY=!hFKlN7W3ZzTu#NR&l*5;6PxvLj~Hh69kXO+u1UImFOv*8H7N zeV`4oKp#Reln<~XZy3%7LK>FCF%4+tGQ$Fv&V~it;$NdUSwQO^)FWVsw@aW&s9ctg z;qnct+pk7P9nV!x0Veb~#lywCZz{EF<$OaSAK!|DTp%He^tYPb>{v_V^F{{>E3Wt` z(7KGq#q(hy-dv_v5{pLFvTqQ4SNX_ol2B@$Zr{ z;L3GT%nFz*4&023c0Z<^%DL-~_%e?K&3?3$_uxpdEm(qv=a2e(X54B5zGU_;_!TDbHJU(d3gziDZT} zk<2HqQ1*g0s&sX*EOZ_Je5r~17KIR+Cp$4T@=;tCpthJ)*4R+-+PEk*%|1~$E(_&o zW@GU-9Xz$~oLL)MnEh)*W&1Jzf%!0>_T3vmMaykkuCY8|eY-0u6xzr6(1iG*O;*Y- z@vG=#F(BLFR?+TotfIq%!AN2UToY!hA-xf!hkaAsUucTnEb+m*T;ub?FHYdEfKeirot zqtFM;cMT21_|aFpf-vD7Kyne~OC`OpeUVX)rUx#>Vk0MSyq; zT=eAAgpM?y;0rdq%wA|!ZSr@b3ThGqQuRtBEB>a{>pNGOS&LPJ78V6uzL;jhf?A@q zGX-C&)kg7pg{1(R^`Hfqrp*d~zq6IK~7lr*ZCBMM5|geA~{Z61?YTPn1*qx(;QhGhc zYR;OJ)ja5iO#laIZw6+JnBSzLM^Fy^WavsB$LZb0ha$WJ#9mTCOr#GQYtk1^&0I?z zh8r{12R4=KgUU-OH5)24st%ZHcW5q*o#iP=;FE*73EbS<8*4u`T~m8YLj zJGtTGS;|gr3VB|T(|tc)5xHMTPM2FwKiz89^vrTPU4@p5T+nyjRoumFIeq4Y3aiMF zPJY%1r*j==X|zwt(4pE$N}+)RA$#~BSa#c5GlRwV&p^*ny)1YNA`BMjGnF}l@?9=W zeSP3M8%5b6oqb9mF3ynYre#+mbzF0$Xnk8m%o#!2i~ZdYuDAF;ce{|K5?_o(OZDcT4;zGtYuW@QMQ#O?li{fxcVxt&QRaxtxX7!yM6 zkbtCZVt`gh-Ft|$?N_BQ?*EeR#dh0u@(H#n@=>60_%JOi`9wNvIQ<=bN$jBeb{eZ{ z=$@EI_CtB=x6yCOAC|gb-rpAnTKDBBrNe;f%ggG%T;AW8P`(D%E9i?rb=u;#H;tqXD$>+ zgF~rtP?E(^kdK*gHGD*tSo3tlGX~M}{gdkC)55a(AGb{e7>srugaD-q`TMR9|@R8UbL(`LgKV# z>fa?Kl#fGaU2adQku|@St789f5FP9;E+>DOZwiCSHdkwYn~K7;@s~(I{<49}GlH>j z*s@$hNlsVIlVG(21qM-igK~qM(#J?-{GH-&lc~RzPjNU8&Z0^VC+;j9M_mOO9M9Mf{3ZZWBReh2-W?Hy$CsL%C>O5`Er6M|~r z1@Sx@;*5uoRy75sJPo0n!l6=e%3)14L`3M}-GJTTumh@+>` zo#u+XP~qPkv?3ZH=FO{+6*1fpMcUuMYN&WA&lCRRr>PUNE^x?z&9$*%C~7*$iI$iKAFd_?wR{lL}~(A;hBqXoO!DJY52^c zc9+mklG3`JpC`pz*wt`|%+XeYIQ`2&$y|_SydCUfnX&7G=BO3yVJ9+m(OnX?#pICj zR=t&GuG&6eSg3dx6&o<8$iob6C#*f&K5N<0reDUW7eum2#KRUFKQ^zRL5wNlkTjIR zFfM1Q4AbI^3o`^&*}&@Mue1hwxH4Hv+bxv9y`ep*pocwk>EXsrynGc-1tXt!HhJ>t5S=OVae!eA$vOx3+HGp3v=PZo~z(;9rX$$_HBN z(){XPUkb^(#mgwBRBM`Nk0QL>Y=o}Fu?}Zr?MtwuO)s4F+ZM)_C6i^)+*l}PJjFH+p+n7tIkGyPI=?^ z@b^l~H*+bDE$=K;6JJWoPd~WD=XE{qjxu+h35v8pS!DfL?q3*4!UBlL8haXkgN`zH zohfnZ32>CDlL~+TZB7*Y6)t@T_NC>A^!)7d&*nb=lAbRwKN6omm8pcqIlbrO8tqailWOO{+$g_i3}^0H_M9?A>Z>&%#6>bBwf2TC>${*o4>+mZQ& zws+Gu^tYmHojs71K-oGoludgI+L~SIm>>a3`=lxZNc+`0($eqk>t*aQeT%)6Mq5c_ zbciy*m+ly{&6O1TvV{aIjBNup#%Ok=60txTb;(M750MHgt6+u6D+Mt+p5Vs_5errr zX@_TL#uZe@`?9nEZtvG}ceFo*nP%h6I&El;n3d(?{-_9VV*@Lb32~T4sCCkPt>>Q0 zr8I$hj;5y~`yTSk05E8Eu~XXv({q{jXmbKa!z#t{LWw;U?!A<%vU6LIEy_r_=g(H+G>l2wl_gP8;>{5%R|6IGZra<#TbgMu zje`#SjEsl$k214_jma}anblCH2=p)HAgGR&$E-IG|Kao6aQt@ zDD6)*uz?rix&!N?qn;}sM<&LBV*bIsqHl2MCT5S0S&74AMZ}SXKu#nw+rn7!ldx(c zUh0Ar@sif6cPJ8C5}$dut8-2b)d1tF&{h@NMxj=qgi5Fe4z59o1pG-sz+j}a6Kirm zMsn*$HmFVM9C^eaIG`wnq8SQ*#y(5AvnRy z3>Hyo^+PxcVwB?!(rt-c!n!=}T*vMwE7!66iQ+oU_}!h9*S|I}evh+L=&hOnnRuQ& zUjMhADf`85-s}I5Lhy_PzYsl>%1h11D`!3~odX#VjN4Z>`8%<(=iM|GfOcMq^pBt) znEtYJRp)m@ee#2Xsk7z7+i$QAkYkjG=jPFq+viTdu>L~Ez=Zq&(TxQj92LSp!+~p^ zVvN-)ZfI%CfY#=0cr9Y=2#{=+?Yq*-l1t-^CA+pv;Xv<7aJV?9oVxuc(%|#^)t7oe zYy&!!O`F!k4-afo)v~2ET~Iam8Wvh(#zN%31WRM_4SYxTSSkFBjXtuDGpACYq0QD0 zY^46SOQlT5jpED1r^^^e_ugHfv_bc(k!EGGo&zt`Id)O)O$@CXXc%Fy+20Sg(&iqH zwgw_$NE$v(1F0~wnM(RbCTPWxX}ZX(xJ2{Ou$n1?@BazYP&vqB=Vo3t(FZw4J1?Iq zs+@}-^N}6AkTIb0KUa{$+d2nega(f?HhFbotKG^H zY{D$lI|c>*!1Cu#L}*{zTA|_ei40(L4E{}?og)86Gqm<{+Y_$=Yc1)NO-6GE)*ziW z*w~p1lbgRu%DcZ7&V}~x?<03kUuU^ zl*5;!l4!s#WQcy{iND*)>3fu>2U64!DlBaf3_~~~!>tiu?HbkiaXv6_vO8{MA+f~>7HN-iXpP`h4Ifqu77)h~3otPT%l1u0k3Jh&6NY<)T&9#cp z4C$mW($R1V##hMUM`yE%3@9iu4O&H=Y90?DZ!AM+zA9~zwy>?-u&RLs6fKa*$b-+d zeF9J^N#1Of17zpIk3>#)8XVysu&gpVSvCCh``f5`Pjqj#$Yb4!6OhusRDYh1=MeuFgm5nid)<~){ zRwJ2kHP)(Y?q=*Vor~S1!*w?`%Dtp~)pfs)KScQ=6I42*Uc}>8L;)*_)r$hDj?bYr zR!Cq966%AaOQlbAECtLe>a6T4P=af!>ju=C8#F<^h>pKF7E}8H5@8(rXe1#tXH(Gg zq>0VSgVpnCIR`sKTc(Aw^-z3t zNiTSmj-&zaD|}D;@jaaf-@URV6wq7P`-aFLOmXp1W;atvzf6U-PCwoQ)d8bQhbc~Wtx`TE2Vxn@2-NF$-h>z!`Fp&-_ zkFA=c)%9FZmR<~fJ8q$F0>q}P3Y3F-YTDV`s8|#XfXz5ZPV^Em1=Sih>}QFJG1ItN z^=P6S?$=5YIVVca<{2wV0TU;Es!C?X`?I6rG(S9B5#tu;M$W+xNElKsW=|vfJW2hX z1^Q@J`MEtDrnvnn;pgv^ueRHXOF#CphR zDBfoHsYUm2+~h(&7OS7>S+SLjkP=*x4*xk#o6kE)zWl_0nDvmiSJOi zD^AZqn70WzIcty*6y+L_(_a{*9;hQ`3*7|ng+w+CJ}4OkH0(6gpIZM(VzAwVYRX=y$x$~ zf1VnU_@;0~wby6}nGGtx@@^F#>q}|_t)TqLhV^%^NdphAX z9<&TQBeDQaLq0Vz5?OYDwqiUhMx3sB9scUkN6i|$>HzVzO8EG3_dV~7GG zfC117wh6&Humz_{)bcY+`wnV%0Eb8}R9*s<;)tQMXukp8VR;ToHLP5IK@HBPtIgv> z;=HXLZ8G;vSMFSR^G@R-!K&3vNUqMx7#WEDL&IVXcC$sW3qyv*9A zDgb~Z8olH44HsJj#jXkx@)MJpxUKQyj`r6Q;gnSL6vAY9FV%E7t{k`@wJbgf zte4^)-`Gq(aL|>YSIx07+&R?Mi=%km?t#A+&Ju!P3=Z*kyrYNO0f35Ux;*R$a3wz2 zn|4`H&;>*;yYAt%%wcdoc?OR@9DVg0E_+f}DDnxFqpF9vY7D$O8E<)tpf7N1TojY% z4Hrq}-=s>!?nb$=6qhAmg-Zyn=P(-L*4!f`@#1f&2?Z~G^-P+PWQt&f)Rf7g#uphr zv9I{$?9|~}4IODyOvj##5(B{M0dhvZsmzjOn0_|EUvA>9P??$w#+~zn5xtnf2&ppo z?$i|+5h-Bs;az3$?bYu9zWwnQ@a@*EyV+3uLh%itrZJTZE==y$b-P=g=G89?Dm&pH zFewZx60yF6JWbzXlLU#X2~?G+EeL!hHM_3-KFBprz_cF8B|I!n~kf#b&8ig$2ghoU$K?KTy1 zGz2gWXqQt-tbB3~=PM7YS1KC9e6bv@}G z%lB}R2dx>%h2@uS3x>?c*;4rPC#R+G=l|1M3Lk#5nynr-Bl`phg^#-&K6&<&R1Tj! z+k5vId&qA#m*UG2yaPvyF5M~rP@v%ik)z+z?lrFygz{7crCm~f>Y7c+=mVR|*5)fA zZX{xYJ(-3-(N2^Hj|UU7-zk6GuMb>VemLp_{r0!YPizZvvVV@3pLlc73>?qWR#f02 zo_Mpg{?F1fxsU&#F#%)eNb##Su*FEr{RcX*7)IN=Be6GDgoG| z*2_>Ob-jRHr~0hR7wjt17+K+{FaI~)VM-LUjBboqy>>J*@uS@vSjLEh(+ckc>&0dy z7AzKXiP6hrHmcCUaav-ho86L}Uf6H!=?Em%q6&;TMGZ59(8F%+=B%@a|6!YtZRX|Z zlNOX713BHKaM=YC33_y2L>xS=&sD+@fVcOd!5xC}Yf)Jfug3yU&%GBMV}MoS@Y-2mm-MZ6^jA<8GH; z!003nxMdJBYXbeXGe|eFjHVmKamC>qMr*+&>g%FoExt<%35D4=Ky^ANE_5xAzc<3X zM9Ga8k|~u*0UQ9*3|`Ll;yyFRWj|d^QY+UI8H?&K}mG%s-D8${kLM8;`3PEs! zO~jR~#uY^vu!dkDn60saz)%}kBn5TVl7~ZYaGIA59rtM&27Hc$Q4p&`s5S_j!g-LW zCtdUyii zxzFT*rdArR%Bn2&OR6d~Fj+#1pAnL2Q~|eeu|sLVN_7iDcn<%VT>xz(R)aDxJ~!0= zo%E4VXg;xdB@m-^S~Dyc@cClC(TcHqU!#1My!snuFQ|-=Dn>1%B02mMI>69a62>;APp1ld+)Zf)|-SijNY+&`m%C^?e} zxRD207_loXXgP*R4+W6Gq8WSyFGRyCweU2QYb_0?(o+NH`)&9kYD@AGOxpnmfiP!7?wtAnJuk$(F{vdFKnS*zc0!>mcr`6lV_2 zslVWHn@Rg`3&K7iEjjzZ2U=B%S@0x?dvu3*4oFu2QPZ@gJ@#oN6=n`O)~_c3F#p%i z_<1ll8f+y)p_aB=Zs(#&{;LiWPrChKFHqeunjb8QxCCff0+Qi)XQ@)5V&wy0er-`M zDBejPhboC{UokD?rA&W0Fh(Y{d$$xOW$f(j;3k@(AWn*q{fEN>XgI$`b7A3W~W@juVdet)Vnh}=OzdQTGt(<<#=lH@Uq=7{e{$J5+*##GAt}a$*Qd|NP%v3l7n0 zecwm*o#uK4^Th?yDhv0=iG)%D8mASpmK?8{nyPn|Mv<@YymR|(za)OfS{nK2GX{pk3*GlKKTE%_pl=e z|DRKj3|?-`ok8%+7?BW-10D+6X7BQF-rAh(B`}BGx+@Z^XyA zne+GvPSRg$unM4}DXe8~%9yRPpeTQEepFSm0j@6IIKsl=ZbJ1{l(x&K**3c-$Gctc;bu0b}jW}R*1RaME?W3*O<*K~Lbrwvu_wqgXh zR<^9Ih`CB6Y0Yow#f$kIzfQLVE?bi<+YWH@T5)->5WDS~ygQ>;UI*+tj5EYmBN4rI zG#WdR(%Rhx#g6JXN^7n4Sg`NKNJ_!}4;i2Np@lOadmix7S+^#;JjA5GM1u(9d09Q4 zm-C^`iy`EvcpLh9<=ALQ$u4KLJ!2gsf+r{#W%1AYrh;XmgRFc~o>+3kN*nS`Ah-Yo zElY!gA(U^uj~(}nxZ@(wuw=v#iX+EL=52J+;hKLnP+8yi<1{`eYhJ>rF`~&gSLy+Y zRZeTCymaT(4~3h~qSsbtvUySkgk*ZIf(==vS0wwSeAsdgfFtI6E06nuyx3a-LxTI^44`ICZir=;0y_;`T8q0H{)q+Ow)&(Kf33aY$jv zhh2kqLw$`~=_y|y(buLLls(?Ov=ob^mX*4JL($bQXN`mD=+?m%!{1Cob@{MM(Xng_}dq$4SFCx(POb7%y0J73b@fxFTXzvtk+gTSbG2gn7^ir+j z5F@g$cGJ^mVM=w%l33S83sZRJ6}KzrD*)K*+MqE(T4WpWG7%^Kk2)Jx-(#o5lJz*7 zs2&hw+RP@j!_Ug_0wYl|rl&vP%S9fKV73Pcp?gAt*=|b%SB35kQVCp|<=1c7LWY)Z z;7A9H_psds8Y0ilnv_67#Wk_C#mjk74RYFcaTH~PW?Wpe*g}&^p>)6=j&lZkM(w2m z?-wPfHhfM$?qL0_QAcOHsv4`E-kVoJz1tz~L2?nNfo_QYN;+Z)g!*O^`X|@zb>-7G zECCkifuw}H|OIB&7SS!Hie z=a7bmNJ4NTAgdNMVTx+XDIDopA=K6gw&~C_4)J!zh}b#l?DnbE(6znxiJD|o!(CQ; zzTtNhxw|&XGcHV^ZFVV})|Jof>;Sv5U%yXK8neFt?q~Ift`c0*N5!N8a(s#Phsdpw zwCk)UtQW!CL}QWnNjX`$Z}7nGDSV3pXRF>p?K}>%A-i6!Yu^}nn zVPIwLv1U%Cli*GjHi6-WGR&dlX5UBchN3v_B>Y1u33}l^8Qc7#QebFm^rWF}o96d9 zQDnWF(Bxk4Cd>u%?NmGDd$kJ-dQ)Y-Zr?`AAx^!-xdn&oK(@dI{Nbp&CGMckn061j zQ{XzN^njy@!*CJAixGEvgVUbKJ;>njma>u?DLv`cq z@#IeP7uuSfVu(B(M`+vuJ%MhXKE)6rd=Dtakh7-Y&IAu_U#6B)3?1$3X6!PZi`^8q zx605>jq<2soVXsR%V3AiAyka_skH5u`y^O<^-H{iggsh@AylpC|7S7`anKY{LB=1b z)Y>9%PTov-KX~E&j9e*R47W%HYCKdD=N6Ky6Re=w32ANk9dz={Ez3b1DNJ85%|%to zIG;##Zh`al#F7e+d9~#Q{RJalKSuD3ruSuGcGr4xr3!he@4|_IF~wc+jcOlcQrA{F zx)qxSssXDt`75X_Bv&~~)6UQB%qlqEftWrG{Pbzfna|4rx2JO*7iRP2)Qq!O=u1%0 z2;s!Tvag)aEt>x!_=|6l!UfhCO9q#hGk2AV1lpllj5*EweCj-lkCxLMAwjc<`D2ux z1R6_AB129MG*J9}NQ!e#dr-{A3>*OjzsIrIRFDK9@)I0~wftrTV^|%pr~s*C=QLu+ z=wOChPaOfAn#n$Js$?IuNM|?)S~ROJ0Hkqt#cvuqFAqrmQg9qeTZrR;)&6wn&NZaT z^45!6G!{sB#wy)8uwB=KAa{An{aQJiLRF;^xO{_-B?Ab!3`0?Q#3E@yXoVO4jYQr; z(GnMtYYEiY;3wn5${z}Rg=SZ2&N;=#!9LB-$J3DJ9418CFip-_8<1*$ zK#!_zH?!MnCSwt1-pZ}lvIFRtX?^Hu3%+Do1|tsDTnssekULaE)2P>Z|7&l2A&{?G zew?j_6F@|3QNL+Oa+Ne^;!UdBX97ne^&0z(b4C0^QXY}VRTZi9+vT_46Moi_0$nY7jnhh#=Ns#)hh`G)rgGCNP7dK=T}c>B;0kX{SroijNb(RmtPQauFQ31I zDHtw4sY_9QfQ!gWq&@$pr{|Pk)rIVn@#U8qmF`p2r(bbzrKhu1$_R?uUH&9Goq=su zPsp8zs&k%G>#PcwWZ!E`Wdu3VqAk8Ht&QY+_{BfsIRcFxwSzl%C?!2Q9v9!Kiq`ga zd3eA)-H%q7#&!G6>JX6F7qpY1FLDw?;E9b5$in3w*{R#7fMAtf(VxGPKH-YfRbMLw z9je~tB6)hnBzfeo;a(veQZVMUz* zLcbR$Dv?%>Sq#Q%LV6plaZX_hGCt0!_jZX*nO`4Q8;Vb_j|+z4fjz9>u~W+i4wjxn^Yd6IJ* zJkDGb{5SF{oVX=q#GfhI(wJ0%VMBGz=u4Q%- zu4?{hCj{u${R&-_DnSl+A|SC}Vgq%w8kY&QVAYaAI@OX9Wt_caOhcWw-f5)8`+5@V z*qP;QDQsHGp^YrANt6P)!9mtYa8Uqc(}@DDrg*?9SfoU<9|7)XZ)jBRXZx{ZOSB9E zJ{lL1a3sy*yEykq@i^Sem{F9BI!o6Whd!qGG0uW3lgrr=c+*7`?ej;2x4JOd1!hUF zxl>&9P9-%_9QB3C6<`uz2+g5&kUl|WHO+hVjgM57nM_~u3#Fi(;?Nz3b0vq0?@6`5 zF@5Tv&*qGZw^PrMoYY6#P=qzh$x8dikanbDOayyLx8}%e6wD62o>HrD(r%qzxKBfF zzfH>;?iAvqt~*n(wW*^OP!(t=WvW8@5GCG`s*&-nO0Vhp96@vp@J}ck23S7PD_9$& zYPvco9DKi7CrZk@wol=}bW3x<+KV~_gdB-qJkv@Fck}BNM2rpiAgcvjpxvfagUoyO zc-xh3KK(xSMB0^Zj(Ztem(x(!UuQ2vO?|o~i4MUyv;3F$2T9?lX_kh7fat!HYeg{s zS`g!)L5{*GXy|e{wBXOfq-Vv{C<_2%9jmo5syM69GDDOl?;_V(NpJ82h% z>HF(oYEEG1hop3A4WjqZuRi$qJQ0I`&tp1xBf>Mvo4*neAqr7vl{dT{A?mx!JFls8 zT7Vfc=mnYysSg<9qzULA_`L!ZUtpr7gb=x0!Bc{q*&>P`id-h(3Lb|F@KowPQY*na zK?IbbLM8xfQG_wraiY)RDA_3}Bs{@YMT{bT-2qQ1Bs_sZ4w(W_2(Ey`AHKA@?4yBqWvr@O0Xa`|4&B;yy*1&XAxMyVa{Zdgzz6PJ7`V|xlScKxh<_eH7 zst~lV%vP$_%2sDwVqpPAX^2cR?cByd^h|p&Z{cDr^E+B6>C{61!A~Pi4wZN7vatLJ zm*QWRcjA;SE_cT%i#pBcmOuQ*>x%N})03`0$begUbOy=_Y>ClgA zQmg|Y1J+#wFsP29n!6u+8us7s1^uh9cY5(ZM z)r=LnA2cX_QUJwTkB)M)tAX`>OWn9i6`xkEVs^oMDl1MFV^SHYvfiMcJ5pijg}Qsh z+R@n6wTmIpHZK|0QdUx2=YVH2cGA;CegW;}tTmDk@gVdfZN47Ez}HLKYEU|X(c zdO*q)dhMA-A+Reg7iwy2D=&4w%u|JHf(%WKT*xAS@Gw-HA@HHPOAtU+X|0JIR(@8= z<1((ailcRW43Mn>rW`wrr7>cS$HQo5ohZ;WA-2fc`y43(ZDeku}a0)=T8lY+`!n^_~7&|XO5v}54VEpw*GcZz3xJ;ot zSydYuLP{#A8pKAThq>HDe#(#zK*_i$|7RI~w|!xgCSUA_Z7CB)AhX|RDL{$%0LJ?@ zS+A)aa|+pl2vLGyD1YO4If)(G1@qt6@vg`Q{MS%E<%M zlIX;Lkjs*_mE}>$O!Yv9Yx9dqe0*Pv&vV@9MaZDZ_*mj-OX?|#!B#-mMf6zi#i154 zeIl1E`KCd>waNZU)wk#M8wR9(faQZz@5&@m511$=Y>)kyjH|(E0+AAVp2ILO#zx50 zu%g^H*_ZHeZTXY$a}ru*ReWmsmr?P4b6R_CiDk0>$$OBp;*qaZD1)5G#FA*&;fB!_ z!VHk!!{?xQ5%%Hl16T!m**oEuYCZ}6o5`kj_KUAePHqs*%9y~8;CKr`Auh_=gBe4D zt)f_TQ-xeJNkV zEJE9_|P{CKWct+31ISJ*it zo0J{kLSIHUG;1LR9A@{Bl}#z&La(IoQMWXfwO}!q39&!S3s{5ZBq~VlXCtEKsbFns zf~~ScKOj~#589r*+PkF*FmDgKY3{Tfo3l>x@)N3=^Dw#-K)$Q|YmFIx_RSwLBm1)t z`*Mtn_}M2ki{4gEDY($5zf=BKQLo27i{4gG)I^x5|IxDOMJZ!p`QQhUG6u>cT(p>3-IdC#O1^j$1m+!d`yG6RAPku}FKMGP!Vh%&j482dC2@Z4Nt#WI5B| zva2I!SSY3`a?lY!D}6u^Aw)JTh4V~)J4&e}w;ldKit&PlG}hj$|kAYshR6gww^oncjobqeC@%dg>-o-`UNl_#4)bd;JC7sl8d7m!J z%a3sp1&yzlmD}$lZhE+UtuD*UBmeMPzm6vAH|^H&%ZJ~~WuW{a7um0^4?3g#ddx`W zpTAwcE3W3*>Y?u~?<@lg#8xE;%g=Zrw)K<|ekoNBj?S$EWEpA5#rGhB_8=QIk+t@# z91@p7%$Xdy2AO`oYU(H^AM9Q9@C6*@InWv`QIiZ6Z>KDAY4@jN9AOSefbU{>avS_= zmkoFkQDDgnZ@Z?qSn*{E^&w^zyB0;e^dK4-xA7Xz0bP}n6VjzR)$5hXI5G&L@(SX~ z2JH?)1#rjl4EeRQkC0c%)ng;HOJ~mSEwG%eTx;w27z^{D9B~G<84|omd+U3J+q0n^ z#+knAfarJo#Svqvm!_q7m&YvzzOt~>;m8Tn=}zeHLLJV`sq(DDp*jXm(zCyCbOEX_45>yWcZpg^Su@DBH`SBA zx!7HeYf#SM68*cKWymQtLpJC*v*=kXpY5?w?#tfipFZHR|b zB=PViKq6B#DxnQN&LapfKyW7BP&NCMmE6dsc4Z7^4v>EP<356!0lkdp0CNWH#H@;H zX-Oh*m?fWACMM8!0vBhOiqXkHE-$AHxvzK90ZCB`Wb8oRKp&I01*!>-*F$G&h4UR4 zA0j{B-{FN^^0YCMIzca>b9y1@b~!Ahz!wi_=9lQl3gkrp5EkpUC%0`bBT6}7oa%Bp zJDie=P9^W}AB2jddM)!IdIGqNMWf zR=INUDt$Bjg7A|}PGW+}q%%f4nrRyEGSdj>-R6af6y)cGt`yYLLv^G6jOZSD+CaH5 zc{aV_oAc=+h1c?nau;x+$Qm`*;$%XSjWE<^yc>~fuxhY8D>T7~TzeQT4*{Qs`8+NR z`uVDge-gGynz>#tB}w_w@*d>+G8McadodjmH!=qX`k1PwHDw0KZqu7b;9 ze>urttF&)7_>QDAyMtAl)vRiA#Pc&Dvfwdoheais^)ofy9bJw(9?BGl1$0~;Jx=ut zioaigp391Vouk9WQK05ykEVKM}!}Hzz`K zu-E8}q+L?4Mti}(;S%i>uS9Ftcu~}4#pRGcCuJQ7BI-PO%hdo9_f^kHQ3BEn-H9sj zYBB?byzt{P*R;qD839}iocrk3Fj3xx(oI`1=tU&w0)UGBo=yxGw+;(2IUC3{F6da9 zi(H>?%`m9Y1UmrUsV7N4eKNhIJKP;{yg!=7tDrC(WB7b&l)W6tIoxsJX{X#lL!Dx) zCvNAg>JjLf>UVG~4>v4Ws!3xt*Ybe5+@)$1n%ePELRB?W^Hg(7hS<>I^B!d3RvFSz zU2w!l+{!BqDW4KxNud-#s@svO=*?as0I%MsE0ko9fyKAAwWX@3!ZZwV!n8>Dmj*23 zvfV%q;jUW-tL*6qA@Yt^7c~!bo}kd;pK)>#M~dP^`XfSblyGp7E(L5VmtjI;xU_NJ zq2yCA4{~`bc?{a}bws5{9n6m(Klhb$@RP*GLLY%5jJ2&?$Lk?W4DmA2ADKUT*o{^u z&*N?uorI1UxkVt`CBI(6cjOa$nZDx#(#Cc3{x7;VO+J;9E%5@ls9r^n;y&a2_T<` z%G+HM)=z?&q-yMTf@<%u222I|)kEAm!v5XOH}@8H7j5b-W?P1ikc=pK;Vb)yVt0vp zlaz}?CsjF2BP{4qV$@v zC2Rqhr1N0*t6D1;1sHcCBIFSfczj4}v^;1-)%ii!QdA7O1a5UEc-NR9A?`c?$zPc) z&|V|H{&DsUe(&i$s-p56@+bz^%4RFSVa?%Z%^lRr@5j}GOG>mMcCq{sfJ%O9-J`%Fz0V0WvGbAu1{h1Py zvVcXVnke#qL^?X{Ylz=kiXAio`FG1!0h7Si;I%dx$)*?ChQl-m0nF~@=2 zor0E=oG2Ob1?AQ}MwSHm_fVcq^#6lG9DXD zH1~(O7aP5cx3W#{r_JMfFTwwU>;c^d@J#Z)+>3zRC?5bUjq-3_B}UkXRXt1_>g2sl z6p%1;txxthMCgtWR0hH0&1yncLw9JU%hb^YY+lc2YO#f9AN#$WXIbazc=K)SB@igBupXXBmo;8}Q364N(6lak{_A@K*L4S8 zh1D3(`TuVZJ`Df3j9&TN8HqNpyzLs4!^t(2#V;iMckSygTD^74*nm7ClD@}V>@3dx)rN~Hwcs>+E}e4E2;38vZ?GyTu&+>e{yg`>U%I{-Bk_G4t_;-5xcX)&noA_}#h90kD z6K{VfWuIQ&pi5Ey=pB6WZ_BH+jzi`5K8~$vdHE$S#o@k|zJCklpILrRm(lVgT>4si z!%kXSRQ@owRCn*AvM!C6Z`9?C@}c_0M2bbde%R5T~j(%;+>x%Mo{3-sue)bpF@$5bP(X-gK-zsl^Jx|Xn zcW{wAbA0|E%8y>p^CjiYT=e{}W)X)O0sxw1<%2sw#G$?=t&MiR9h0jpr>Nm;H{nQNFVIdf7jKP32m$OK0VWzesN6IgDffzSe)wV%O1f zFdK`-Ys|@6-6>P*`}${JPkqB=mTvOv&8~fz%+mVxru)^_Fqx%W{QBtuvOwp`EIr7t zpM9N-)cG)(rS-F(3@Vr-kL6?Q)$GE8SNYK9{y8DdsZe13a`?-kx{^~&} z+s_8Gf71~-+t1b>|5!!fTqcwka$SF)5U)aPkQ#vqoX*n({y7N|Z?hkfhk&4>K@Tle|(-*XYkuPZ;=z zT0Nuua$KuFiw9&JkCCSi7nRefsRgVgiKe8MM`9~JIB!y;)G`X0sg=VnU35eNAzuaaho%07CJ z9ZUPgc7T>tE*Es{s5J8e83scnh)+f)LGEk8>7kB(5;e7lgY*OtIo##1vTdq-er+r9 zFtB%Pl89489*}`KmqC`la>LCekE8H>@l2oWK;u5?bK1C+YE=ncttxp5pObTqznWT{ zBP4oL|4T1O>3v>eQ>OQEC;8M(pmeAFur5V8#YG-mkuO?WK6o8YLWAYkx%BZ%_)T8# z=ht)5^Xo>_j=UJAQJxGTuMSC&Vs7yd8U__WUQT6{f+HXz5L>h-;PR?mlqkb#Dni_{odHC67otU$(;W8A4B9v;eO zFy*1lzIs9x@WCj2Tn1t-^qHyCFNmt^JweYkMCu#DZBB{13(vMNPN>lLnc!()EX3F1 zUkVC(0W9cpK7^X6R@cEyj8CS;(LbOQ4&cHi68c_tH_6L_VG@w0g!*9QWZn;f1^uJv zs9H^Ng_b87GHAIVq9vxPezaU5Y#CHvx8xO8>5G6ZTk~01RiiIcwN1X;5%F{5Z`Hya z#CHpPIV5+F?Q~n`-$wl1&X)mychVA_j_9l6Oll5Z3jE-uZh=A4wPPhps$rocz70ah z5nk2r_#m#fu!;rf@Q*p-L_4LeY#JF`ct(&&7A$s$hMD8ytJ&JPCU*+nSPVX?>(e;KIR3te ztG3<)xh(5Wr>w2E=LQ`hI_+L?9M4+E_iaSm-+=hU>PI43qoeh7jUf)>MTMcvwJ`gcZVu zu^P=ZSXNH)qmf=GE~KPxOek7T(=!lUMCL$agNc~#Ma=G?|B;w|Qn!Ja-KlP?Uf|3J z+d1;RxXVb=kGFOQ5psp9wl}$9KGDpE2fMjZyXar5Hqeh(ZR|i*?w&Iv%?$;vqjl(9 z4P4c<4?S%~(P9bpYd?>P+Z2~pyx-f$9nFXVck-&n0QA7d08iwdO-*Fq^xdQByQi8s zGGK*sC%oG6`-oEwO*^Q9knh7>?JfQHH&3j|4hdw~u96j z7uT(WeMdm;WBTYg#j|n+cgOhTD?>uohfeuK(Tib9$Xd)>Ms@Q-6+Ow>YwoY-2i=8; z2=$-f_)vB=6B37Di5F*Ej=*7Iu;b}8k+e8!_-$$ShDVfS2I9q9Hu0*a2yTbe)y0Pw zL;%4-3&ji~W45vQG?+b9DhkpwQcMr?8A}QkJ~LsespVlTwNCwWi>ic6lXX0MU<8p6 zl;#JuC*WS5DexGxe!KP=uh{&zxL00B3h;#2uRiMXNmrlTiXq#A!GH{qtLp+QxT6^| z=ysdR*;HlF)+yXD?&s+e4BgX;={*3mGa#3p5W+{IIPz#Kh7W2*9&P1kg8&|-s6>e4 z@&_D1v~hk4j0*aN7@!OlCsiFDD;#<_K#)LhsCb`j?G24m2T%0+vij70IH->>Sn*eO zEvM6_4~JtP&dmalP{vs_n2BG8G>7f#=1|qN6|j?l1A0_BQ&h*F{SoVOrC=F z;QDouFD$!*oGCmH@_&^nrk8uOJMx=s5}xVw0=kh@F>%4l#5esB#XVIzwRa4fIYvd zc_;rw9S9xPYnb%@NoB%JVHK4AaPqonV4UXux-AN;nKO^;XJGmG%p+oSvG>V8k#v)p z-1}sj$pLwz2X<1c@cr~nP7SlQH3L#6fQYDdpeRb+lkm$LCCSkcou2K=tnhQQSFFh| zW4@u7lcr?PlVV8e@sdCEVH~;@+6-*6dTGGk3&*^Qa#weVZi`W9D)(|rqH?zGU}TK1`TVaJ-%Egj#j>#W^n zzn;+AUSA*o{RLGO8_WmhMQNo%`bB7yiajdjIdy~iz;6mY2jr~VM{It%9i|T_%-D#| z+Q%d&T$Qs+v-mcs;6OJc58I88-cnIKMNyAgQT$uwq5FfPIR1P%{=7bVZ%yE{g}MuF zSU4yAhg;L0sx1*KZ}I1&Z~-Ab0O)Gb!I42|6Wiht=KhnCcB#(x#A4B|jBwgErG}L1 z)tLb5!`ktP2nVJRXT<)!f�h(1cMEE9m80+{M1HlmWFrQd@#~B6eS*0$o`x)d6zl zDM3;en>?06oLO7!w^geoH&D}S`92I)2?UCLqbg&bGJJHKADFca4{rAZ$xbD{Won*U zP-S{}lF1V>b#tFHg%yMNK~*3ftPfOu+*9oD*Fv*4T4!iHNe(&PAH61Vxsu12qEZ5% zhrwBV-f!-Z)3??cR#^#7Q&KsTGO=0*zivL;4?P*-|Dh)a{s_Z>RCKY9{?@8ECGBlg z-an=M%TJJ1VYqxh7e#!VhV{zZz8G@1#bTr7&Y$C~Gg98br7pIl{OQ-i=@vH(WkpC> zq*N&bE0Ro6f6zPU3g=4VHDb0QHyaiEbTD}}k;tPL9Z6pMD*jxW!{tf$nD1VdJEj^ zKNMJ%`HoO8%X89TyJs+BS1P7wreVeApchkHiZhTjY#3sk5rsy9+6)Z~m>h;Nb^A?q zYB{H`;&{1@V`zJUNN;oxXeFEyILm7#9&zHF;Eii@y0O#CW?jm9rgptN= z7>T76jMVN7jPe>r@RC9pj;9AN#)bqjwILT=ja)57L&VBN+2Rb?QTj8y5i6v#4@pat z2>O88!=nP!{nGd&qzjwOFfNy>7TLWa-9y5(!zOZuwkN)-hI0Y>?kK@Pxo)|CqFPV~mhZtAd z!kR2A@oJ6uf1%TXD-=vp;bu}YP5+sqN+*{tQ&v*}3Om_Vx|h}+R`0(^S)`}w#%_u{ zx3u%Wlee>+ez@o z&NaerQ9rRRc6#EFszJr!?A6P?jCOA2UrEVUS03;iCRj8r00YBRK!d>w4I(_SdVwZ6 z3Fxk5!&og7wZiB?v*O!>V{*p?uW;A#P>D_P??YUL7KE+P=kj0*d7s$L5 zGHhcPQ;%7Q5eyf9fp?Pr36ux4(HX!Pzo35GM7F8+&W6O=t=dFzhk$^xpLl#Xsz8Io z;XMW0CHRtk!)KB6+5SfP>P_WSwMAs*b{gD<3a5tf7Jj%_TN~xsW3AFL;8}SXQ^TCa zcF2z`-grUA(Hv}2?ZKwUfrmfcVmNO(JDQ<15{I9Yprv?vS0!@F=jirrNv^TBSeeS-yuv1TK;u#D6~aH#eTVzW^L{TEyr2pHw2H?Xlc zQZ_f^s6vq&#olo}maC}P!`w@<{YNZG%wAxwqs9ufa(3G;8whq7B>m=wsi) zi3msMI14W0X&~O1X)sejUXxvF8dyI>^Sk5yG47{)zml#pFierq;+07Oo2@lAc7{Ou z?dI5-G`7#f+eMRB46-y@Rt&OQ_LIf)|7Y**!}B`pJI`~TH@!1-t6ND zrM8+Px+Ugf{6AgU-uypZn!^|L))Y zd%v^k&$UbDhu>HIJp{bz+RtIDy3+2PwTrDtDWDHQ;JR%~fUj~Q||+hz}>+p9KP%(l=8Do0=rzTtR9C*Syu zw-RHkFfEq6-pT+94q(-N?{-`28YOkCGAykVzn+YusRy9*xW1d3$LW9K_(WFQx;pi( zAo+TRqD1*z)=OtJ#U>iP?@CP=K-H~0o>?=j`90C2sUf0{V~t&eg@Nd&r{A_Vw0gJUQl{sDKZ8@2-TgN6Rt7D;fsknv$_Kj6cDz=!!o_X9rcg~o>w zA!!67C-f-pGQTf*!nrCU1>5FG#tV%P8yCTYf{Q8A)tI6!`7m*65Zjawo4_Q%j(;^i z>`6feu86;RKFpec4>KkRK5SYH2TaH@uGo+qA9mR`j$>Ei!>D>IK1}?RIH7Ch!%nJc z8l8aeFG-gD8y?(P%7>lG_^^d5^I@M>{qSMSy26LS zXc-8!{ir|l^yiH39Upc!$A>M_U>KeM0U!1QKCI4;ir4czld)egT*;q{8?B2XSK+p_ z?&r0-JqNd-b;~16DqmD<(p6rwAF;ujQq_dKa zgjQJvuE47z^+&&6_9^c=f}(yiMP!)li+&54dQY%hyu0*_tmp;a$V6Q*0}2)a?3(rO z5sgsMODTDWOxo?yBrd740Ton(GK$GGb`VXYujm!_OUNtr4f-)sI4GMAUS=r8Lk;Dg zn8Yh+T*h?VyvxkzT()EwxaZ}j9JPW6`BhqFgTtMG7s*^gPvGj|XRA)91`zvV1yMko z4uQsfc^Z7BGels3N>9#kIZW{?5!(*UBjc%df>$z@og5s&MUFH8Lokiz>A&?2cB=T= z)Ti*_Do%3puT$h0|<^!B!J>8aNS+Imptgb7hZOrDO0=xA#1AwlH_)xLt8 z|BJgLuzAki5!C!U?v8-wd3Q%J^Y6Jk0+|=w9YM^$@9qd-UUYW^FMrJ45xBhM?g(1` zBX@_>?-_STu<{?fI|7yG4HgPg{yleBpbd9NaPqR1vq|0?1P8Ec{C!v`XQ%JkQ8Z|K znZ%DaJ7`Li$Qd)12^4aK-?0;&J1L~~Z7wXfzJ(d6TDtT*valh4tT&xfF*B(SHK?zT z`dH|iZmLxL31qY~8tmkPRYixLfzMKlVu4V>K|=@#?FczbKw7U(p^~P@6^vNqjz&p% z{t-)k47wp!3h|I*!J|Ct6r6?Pjp=FFWJ+P!n&KR}s)5m*zy-P5E=v>&Ia4p|1vxGC zqL964*9)x$DQFpRwg~8NvvN0K6q4OeAiy;8NazkCOWPHgpMBdTz*7TZ@-jbk9=GZX z%`3fk2ZPPW_%hu^t(*W?Pa;B%94OD${J{T+yX6WlGJr zg!LXq?`TyS&bOr^;HEjDjFEe3$P{iW)s9wv@~5tC;fy=ByQr~O<-t(YHPx#!$rCm#G!AqS-lo{b?B}OxDMOMO22Gn^%I)BH!HJ6PFK1 zzlqLQ;gyYpLZE@}HhEt}^xnbU$qf$?$EvyiJ5;RqiCdrwMHPbZw-;W^59Te&6`Jj6 zHK>0N-C(|S{)EMgyj&@0L_`bP$;@S;ko+w*vahI~Te;M`X-X@sn6|TVxph;;L^1e{ zN{y*noG!u5_msnmg=1m_Hk@EmzFnUbQ$D$!(P24!4O=RpXwSpn`q{YL>ocbxg&>-A zAxP5gt7t$Gt3o&t{Way4x-SQ6`%#o3`dj;P_r@PBa@hu+A>eH|n+O5PTV}fveYn+B z*TAy@@$RG;YD}#8Ve}2Fo=C5bV=fNs7>6DN-$OBM=?Ylrz08|VbRtbs+dD-R%ipQ^U3LKE4pCG8Cm9V&!P(b@^qbb{yF%zL9eHU(??K z-`|OFW6@;NV*xnpasKG$Lh`x=r6wIp36q45n!YJb&d(o7-p^^FC>)gRPj6C0oyy_X zOqx6n6Oi>PB|4`Fo2gHg3{93#t`6q8%>|U~H-)o8e>QyoWpz0DgfR>rtb2n$3=aff zRKEcy8S3yAIcHBJg+*4;?L>3vH}v!X04uM5nrDe(7v?EhwQi@tJRjzR%jmWl7v;ZF z_)$Yfg?Sj)V+;AB>H}gIRMYg4n*2xcTR&__>gL!*=WohvAY%={q6F;VAM8Uc+4KOYijt3#>7v}YX~0ywf#9Ffhd4H*+f244FI~qXOmH)Xdppo}vi)Lc}wCBF_o4+pF6v)aQ0o zZ1p)Nz@|%16oRZ@D^QnHdt4VU>2ck6@j|YP>*}IVW8CtAe(olY>2wDK zI!ji8-RXP(r!9}OCf&{_6Y(A=p1>G?mQxUKY76!cmZv{#Z05oy+L@LzD`WbTWH`C=zuGzfO=J&1Pt=z9OrtqmVd@DZGDBR^{&86zuG zqNv>~8$SA3KKgM!f}`TSacfxzU#qrov9!5P6GS=x-Q9Xj!nPA^EBdJBz8b8SjLwYq>l z9B2ZTm2#~m)6Y~n=EsyarS>722{Bb$^WB~~Ia%qDqZ-=ujGCTCpWd-u{E-m-RHZIW z@z$`4$RCq@Q-QYD>~(b7Rm5Vc@6Ac~p$;XW+*2O{w6(2$5GXpXGkRw$3>95AesjCt z%lH!}f1dJA7t4^K1f^elFTX0`ra4K-Y9tUkQR0#!x_eZqUVDz}vVTd0bJ^JTaX)VN z&4dQ2ZzgTzPG&fozLYT~<$90#mu4~%#HXzylRPzj$18Vr0C$-~_{IqUC(?=x*k0m1 z>0fFbvut{12;nLtNHV9dxyr2JXwSTgCsX*Ff-5v40YMbVNlnylVgXkZ3nuBpX$|_y@NOV@Vj;D5X0=o!T*9g1TS4a0RDLl~uB%-R<{hEw)K`o$^xp<6mz z%_y-2^%+HpBlwmzmw?x4RaA#2bKW6P!7K3|0f839h5^DkA(Gr`4L5U$wB#EzNVqRE zk}rl~*}8TCz3AQ$ciTGmp%2i!pfYBT(IcczxH_Rg@h9j{MW}sp;FjXSI-=^%5pK^8 zG-U|XoZO~_-{o=$#x^GdN?uqh!ho5(y^(TuboU_V%$U5IqU<`Qhp80{`J}QnQN(%& z+{0wA2)^rxm}#d%NlVxStZ0;c>!jv$1tXxVHuSIC(8b0A_f4PrO=;CkK3Nm$iqte| z8<8eiWYdw%O_cJIC!x<^^K2CqM4@V0Anlllm@C_EF8VdJY^f>$kHc}1H9FByWXXyU z-jgpfV?~@A;Dj^wd95S`p{z4y6J%ypEE8l-GSeA+$Zkqga}?0d!nVMgOWp=Hnz51o zo-R9tHJhbOjKWH6ml=6mu#UB5}2dDMpg*e)dQ#LWGSapi&{j>B2!{ zH5O$T$fkKoiFaVj%~){S)E;N~)`nqv4^SJzV&4?})_!h7ffT7d17cX6A#>S~xg3i8 z7PM%E&c(0{YL~@~2$PfDd2R#`MW^%!E8X;Ja?R5}V`U)SIYcMRER}4J9>a6lyTYjw zQxa+J=!CKj&)#CS1@F~M1fN3ggNceVKRpf z(}oVM89LZxnzWTo=3>z%Q!ec($8{zzED2#muDaL=HBd606zlXUL*05YH?vnIQ{_S- z5K}B&ypbkz-E>q7vSk#EX=cJU(x8n5!d9tjaWoBjmz(0WPXJZZ3859!E3*t&nM~2^ zW39k5W(>XrekFI9#;A>YffBip&_?W}K+{A?f%T6|Q0HY(? zFHMTIWsSV4fDa)NHAu#c2}R_M#Lnc?LGmeP+ndRyq0!q7QfIzq6F!Z6sk(7cQ33k& zaY9(RsXDpl%ctHtio7aZ%~v;NC<}2Uc*9B&0O@TVC%{viXJKdxQ<=9cHIWunWU*&M03n<18IC-@{~gLDyjAZru%K-NC~g#bHK z1CaH0-S6%f-0q%-%DQdx^v%XMZgb{Bre^ebHt$3(ZL z*8kRt2?@%gv|2jMyP5X<(lL`5>j|}Ex^#S4NP!*3S;}gqhNaUW;k+Gcww2He?;{Hs z3+GC*uhC;Tf^?RlO$&%!u?uTaBgs3CByT?=Tby~Eu#}!%ODU)Cs<&BxE~__mb&?)% z^h<43vaTouD{BSX-2k*lk0eJok1Lfoz1pA-!MRFpOYMOXUV-`QQ!gZ4mWNN=7e0&d+bKE=<8A)49rju5_c&VHng5Io z6?_&G*5mz+2F0~ds=>BQDR~3&rzi)Yjdp4K`5*@Y#R`!#KOs?j07JGaGa(xj>wk78 zEdMjXOjwxBJI4R))Gsn(VWf1PL#oGOd|U{%lKx6H#=^h7a2pq<^j69rJS-CJ4Zl>d zt7wWcHa(XhI84&P-IE6azguv=b0D&wTFtl=spi-4^&ipRPjYqkO7lc=91m^WD`KII z)d8rm6@cvLYX(pq01cCD;NYTV>v2Sf<9CgvqeUr zqNzi4MZJ>MaLQ_s(3WsEL@Ih2dZ)%~YPFS?E+4CS(N{dLD}5z-HIuDzaz&w%HQ>|( zX--ENIxWMTlnf$DMlg*BG&LEOuo?XkouG*}RyvneEEz!nDO)`%zxV)#Yqh z;su?UFA3M~Z2B!#&j;F^VR^Og?GPPpW%EK>Zy-c`3(L!LMtzy9&-+018(;5ep;U+s zE;H~wa+#4z(dd=a1>{|39BA)Rh8(8~fU%dd-%MMG_{8pKPAna0*;1nG8w3+5>ky)n z2~RC~O+ZV7CcxXxZC5wRP$s5WS$tZy)C4f2_vly8g>UqfNxKhQ;T<|sMZgY|DU94Y zH9+2pBlnrc$cW?mFr`=qvt-pv%Rx8rR1k6n<^o9fQ$UzJBl=JmP9L(SjRbfF^3UWf zb!nZjbiVC3*4ws)mT$SmHzW{W5z?73lS0>o83%YL%mCcHFq46C<3NEU&IwwPHZ4M6 z8(6BCvFQ*M)m}ySwbQX;xOzuXI5O5>%8?c8XkwnL<%~;)YNh-WaGhWde1^3`@_FbK zBo8*$7FI#zQj64xz&&&b-Yh#44wW6H>Ji0MW@==pvm8ipf=T{FdsxPaLHQ)giZG!E z++1^Z(wn!%i$ZzU2!nWCQIbw0w5O0P`1(#$pVPQ6Ua>4BM+mpzy;MFIU_;Ay5c*`{ ziDz_W7)$D?h?}lZ)QzBU`cgLu*ElDdFE<~=K7(=voJm!GeW>@?VWq&@v+6eQv>|t{ zMriMLy&O>|1LiJ26nTNk!->ntrhea{{n}X2)KQ5}ck<}`yp@J7=tJ8v`xkr#|N+CP%p ztbyS9ef#j>sVo6G?L< z6q7luaFCbquI7$v3874sw4d!@7c<0C84BVt;ygG?m(Rt>QmZZnIC}yQXg7APy@~d? z|Iy*Lw$-|uI`Y0S!yGH|T8O&RR>7|PId_+|sO(sR2#y`TTY%)qDJv=@6 zO)E)=15p>r5xZLc~FT0GY zCX}isdFL0VBQ)EkgT^YyW&x7an!J@Cb_JHPh@+u+@zkH@Ucf&mPvl>GGktg+*dU(4 z41Z$13u7aJ&y*lkb4C|q3`403YN)b&E5`UqD$jBy0fk+K*N_f`8|uYhr&hA!u#cfc z?z@ zy$EhfZt|t{NsxTBLv7kOpX!tkOqfifSzi4eKm^)z1g$!OUqQ0sUsc?zW&hZ;ueh!u zEcs_nw_ejTE(y6IVf`gO#0GTma9nYBQux?i+`Nxe^=fD>KMs6%Vcz*jGHq@UFWxwa z-`612bxhIV{F6PbSv4a%VJ5!yZy4Imdg@du>*4h)?kA{zc z-k(P4RUyO8c(Zc!paF=^62WCI2F2*JB=6c2EO#QAa8B^OFDLOciC93puE&+C#Fh{0 z22LFxr4*k5QAUTQfb+%5q^_|eph@K}NTR0U*OJgEeZvzHgFOLS=#Zw@9)T<^f2tGu z*etalnjd}vRiJ(xy{s2-mDvjjGFqNA8VGvyk_s8r z^-n_c^=yVf>s^I4>G%wCtr_Ha9u4jnok1IOnC&{`^e8qxq^?#s)EYhzW%UNnQkh+F zM*=D>j%cZ4_s1=am|FO$MVtc1cci&AgSO=&A-bVMI%(Vk7EZ(NUdKq3%-0^?h@FdMf<_-~m6|9G zXma%|sP=q4+F0%DIT$jju~{ppBqf`Frch3+4E(Td!lRfmI;=8zacYs>9~T^vEGIFk zI&EDQMD@V#YGBv|7RSM`L|%cHe_Vb{*|hY!3S@}UtwMywN6{0t29Bd7(58Mr^8=&z zK+-Iv5UBX9I82sJ0!`=)Jksrj*Cnh823Mlmiu+wo+R_VZ|FlUI|6Msub23IV2@nhX z>RdumCPD-p1AHq)mDHmFWc?#0cY0n7K0wMV;tbohuNn_SYnNe8lya89NQ@Qkm>Ut{ zLbB7MuQk!J-Kirx?2f_E<0@zB9u-8!*CtF7O^kqePRKDG`9cg*Tk7ac`9?Uwbq6txbWKBftR7sw|ggFS@(Rq=) zQAx)PBT8C9PVI{(??Bi`5sL;49};rh{a{>rFyTeqH2h#fo;Q8k!)@$td%0Z*+el-fz4z4&@a=f|@wVXy4}=5EpqNx?6$z~V9lY!+Ogi|| zg0B}TVL_S;o$$Ao9G;BK1#;Um*JYQjvCYsQkrQLv2MiSa7X3+I+rMrQdIA?Ud|cET zc8+x_jAF*AeFy8w@Qq}J(NFL&qWWv_qb#g2`nS>r7;F2ItjZGIvpnEhZS$X;h7*sL zMR1`kiefc#13z;Qv9txM;gm;=5=RvB73wLf!JH#X=>?C*rbmC#A$}Wl6I1O{1WREa zeX6f2)mGcDhlPS8ki_Pe=|S08e7Bqen&In+2{d`YdF|K`(i0@|3H&W*arC>KJV}RZ zE@*jc5rVEdF`!^5of89^9G$^q(?KqFh)(q{A=jm6CnR{;NL6LEFu$*M-e6x%&3geRRqsRLVpDRKD$7 z*A;q?6t!Eev0M4sz?Y`8Hlz&T<(SK@KA~vjul@eoC4hp{6^Ci`ANBMgnE)%E#>tP# zz-h+I5_0^lwvapAv9QD$h_WhKqq;r7<7r^l!*!A?j`@WR z|JaXvk-GFc?uRb*3IiE>tz8I6Kk;6vgvAp70kulbU$ndiS!<@)Q0SZcEYM1Pthjyg`_k^1Yzq;nI>lTYW=f#U22ta0=qtad84|? z39LvsW$i3D{j$h~R{IJIP%Z*syQ=CuI~OTv1zsY`@YE_WbDaGPFMs*$G$-DYtkV1K zSsz%Js}u)G#M8re5yYn})xg^`M{w8jcR&h@COKAPSCKHZK}%(jy8prW*(f>6ip+dh z8&(D^_41^e**uc0=dsa#WXBkm{HX=wSeMr_m%+Wc>9Q62B zKURJz$9#>b<6!YYeW`3YZV>$5N9uZ@$PP`ulCkIyTI)XcP`M=P8ln*Y#g+$SrW?FDh3mQ7m=QRksQm_)~tonLK0LievzmWg=xE0 zG8LxFB(~MjG#2fkT4ZGY(r1bSG&TUYWM*jQTGkAW)Yr^$4kK3`)8Y59YG#EgeHo@` zPk|HqM_)BI1<%c@`LB&5dIqcJ?0*ld#vq-o8lL>wk`3EXwMpqM{4X1~s4T7_K~KFk zm2>o3I}dYeWWdvFBf1bQth~{YZWA`4bRr|reHP){ zNY^6@qT>!K$!CH?53)mFWz}2Qp2b2k$cjlv8m_;C6RCgdO3utZ4V>4{h%ykX^o| zSLtq+sR{6m6i!2taV3&_YB@P!^n}3o=#Tmg1RA~OS^WKTs@t2rowJLy;K!sQ42}rUvM;YSu|6i5DSUkr;G~1QO{OK zJvow-mnaAI<)HrTaOCr1uDL64V750iwL0G0u^bmX`8@xd8jbNIqegHZdL9lHQh#pX1x-CEH>2qBGtG zt*B}f_4I<}a(4nk4!eoV$RgC>+IYVFYHAcOY@>GZ;7xY=TO|m~CfR{vNGRNT zx1I3|?@ZwMba-s* zn|D&)(c3kz((9F&+-q;|Lj{7=Ep0TiF&YddMJy$F*cab`#K&#<0nUrY;n^{>%!;`+ zN;d+CuLx*>B2zak4g{N9h01dPTp4$=VB;&{<8IlX0mSqmumDvQK#dDyn8S zJ=7EJXuIi}IuMLW5I{@8Hd#8%gT6)=jjl(>kfCG%6~gEX#kMCt6c%oB8iJWSO+(-` zA@KPo{5Lt_pYzz!ir85uoFrveX-1l)5RjN->QC|>N*=nle{>YHfp%xO3lrQ6W&UGE zn^VEP$n6Kh!|agl@Eg97RH4Qk7TwWah67kJD`*}Zu#@FmbX?gJoD@bC>097iir&)0 zJ1~@)6+geOB9-JgC!N^?d;NB;+ETP)OZL?ET2lw98ddt__F6IeQ)wYf(RWaA$g>$6 z6cb=3C^TB5!3Rz4EVZuZq?im;Q^uilTdHd1a-@qyT)H6K^|$i*6>-+%F=~1F5y*&t zh91IdjRq$q$d8YYN4JL`QbW|s$uBT#`$ZdI)c8vxk>u1DQ1!Lp`1}PM#KL|juUICO zlJt8$gv@H1G%UhOz0Qw^RV|m87z1_ADkLHB{^-xdH7UFmTiK{@D|sn6wGc#40gAoh ziBYBZGN@7^jojVQO;SezJGne6Ez7qBMhvijMBixXYyq1M|6DuCXk;a0ZHLkkm+hO-1^gee>YdUb^^dTU=29&%1)5WgtwO?N9F|qiE3TS5N(!F53VN5Y9cj!Ghq@)0yI1=PQtz{#IpB!}YK- zYTpSxr8s_SRJWySDf+Z3X1fmH{hVjE3?Cj2DIUx9Hwtje_j!%XQBq? zeJ^IjoLU3ecGlXG53S=!5=JAADaK+QeX$S9*s9fr!d$~ES)6z(j_Bx`wp1LU8c-6o zClG~IjXM2xkVO?mY_rcLo$RM(6tBEp>&F#DJQwS#1hd5-Ex}iZRSUGvb-u!Ov&71Ab|5twltqOh<1_{0j=zo84&B7Uajq6VE9VUkl z$W%tcsW3L-^C?X0C>MdIZCIshzruTf1$`6PW<=|@BpAS5#O>*+m3;K`gg$Qhrzqq%xHG~lX8m2B2nb6j zvz4Em+n>?{5dq<=R{FSK>F53PSycjYnBu)B4)5kxxVF3>Govac=Y^Pt4tLfSiwM^; zMI4(N{;z^V)efDnY(qVkrI)QWC12tvSZO@14^>H?!R$>Huu~LSg>34K`9wil1Gg0^{*fwm?z5FZu1ep@t@y0{!^q z4L`~y0|unq?dL~ip|UpRCe$Mk4jMzxaB|BM`4}_#{6eqU7!4j9<#bZq|6mB++^=Q> zZ*~@85)v8^&jUoI0i=d>{z?{xsJcW6RyRzbesvtAS`@I7b8H|EszkX&-!;Z?wSQB+ zuBYD{Xul%RUJqy;o9G9;0klw>YXvPbcayd}SKYAVLmL3$*a7J=kH8La6{yZ}fW`^k zvmwv;-;V1F5qnIO3j%WYXz=SQlF^aT;KM2+J}kw-<2_^1pBu9dKW4^-4bwndmkvk@ z_El%os$Q%fs1?m@FOtI^f2@~c9PoZz1Orc|6aIOoH!d0tJ=QzMTJtl1aZ`(qP9XpB zOTF82pXLk}^i~pnpg&7oUA#MHKkj8QUDj`I(4W?;aiV)}$M0mL zjQ+wvadLQLsdQbRVWSeKDdSI_y%f+F>7#~zLQ4yh@vc!gveDqIy3XUf-QyYmc+5SX z;8E*wZdA}bIw}}(Fw3Z8e8xxROe8tTF#}8cwRNIF-Cu2`u@w`DmikgYrkt$Wn`PV5 zjggU#Ma-?R#5iU)Qt{JLXc&JDP~oUDrdKpUe6r?J%&>_61+JnU7xfJA*X-Hx(PFa} z`t}u;^&6u+J5=lPoH|cuhH5zM8k5b_;?QXDgfDkusMf;Sgz3}v(cs&oTGF!CiVXH> zS+xK_01bD`X^I>Q!=+24nsFLgbpqj~(Xr&X#Lk{f>^#3|`uzSgj+e0sk|M`~|(toPTa7c9Ft#AjgHIj)+l%L1Vw1nVM!K~NeE zrgbaujRxmcu!l5o2e>j@YyG65&^;J2t}7TZp(_|MsVf+9l&i#`6I_Mb%X-hupRs3_ zpPs>tMP0#^1+K#4V|r%%)5F4oPx+^*VPV0i{nJIT;4}XIykWsv|8$ODK6}9UAZ?W7 ze9bVS0R?Pe4i9FH>?7q*!-JFT0B%M%(9@LxeZhAr9?bSUl`^clRm>$lWqnsH|a}yQ6O}5HK(e~cCKCxoDH^_F6O-G>~a%=LU&#r+LBtpo>$b4&_cw*>8akUCZ*U_W3+Qp&!{kZzzkyoQ3#7M z6oYumKEqCMNrmX4xI?vT+~IuID>Z$ZpVaiCzPZrw(<(sN6Z~x#lnZyBQSRb)Jgx(g zv}CT1=s7jO7`vID%rOhH`DvJczD>0PnUaa=~X<{ zq6y%Csz=(MyB*zTppYP~b&x=(zoFDB8Z%1fw4UmX2+=(zL?^#sTRWN=`l8I-<>XiF z4Vszk&9B)TG&AX&7#cLKRR&QN>l$}LWOaqk9oKam^h#IGxDhKGbDHoeU1R1zSM<-5 zWdDA@zn|d#|MmCd-2W=~$?0C5qmF9kJE0yA zcd+Z~ItX&vg*=?t!p zJClp-m_Va?6J^%=6gdQ6$+gCHitEkHxQzE}J!7>ORf%@+F-7oTZdeICRrIT9#r6af zeYIYZLGPU1A{&0 z6KM<+sbs{V2=30S6?y&^^J>k|OAGE9;g>^Q*$E6Vn?aI?1KTJLy9OJB$BQ?>V^*Vj zaeh=+g9l${@c5j*l#fXrUc$X+4v!ZuR8DSoHoa4sZUnYvS78>_hNwBq{HQuXLsPo4X-#l7o?6e0Eb_KAEF?wH*KBDQ z`(Xvun%UBxAyBN9qV^^#FnilGtU$^W3=5rb8u_>iV1t|=#3m4G$oUI#|*C9Qg?LfvhJDOaekA0Jk3?} z`dM8eh@auwK%+#jPnlBjLQaD_Yj0Kua>_i!Jib-k3%Wv@PODDpUeq(SuAr-%vHLy2 z=aD!6z~6t4`;Yni&vO59e}9(yC4c`J?*DKAKz4vdI~*2Wv`&C>P%P7v@9@@_H58qW z{ht`fiQJ6&So0;-n#VumnnR*5`$vyNU-FM0iO%U!dy2hwNOT#%3yF^D z9Y{1`zd0nD)IAe@l&fjexo$$5c~f_;KS9<3ABZ~#3{-yFtwdW@f;haTg|QsbwiV?9G;*$9t=P z^h@Cp|LB*(gdSN6b4-NLPyCzQ$FvmY^^T=*(tdMGL5g@S1u5pW6wYwnv=q+Ugidcy znLMoe_GW5(1N~g{bzRbxrEr0(mcqK8sdaKYDgNz#Pw-Y57XAGr+&}5>-@^TO{QU&? zm;L?2+<$kFrLY}#0WumnGt+Ui1UM3ECZ_Ek(`GOOI1)NHm|GSMGg}tl-}z06?(yw1 zIgzAAyc+h6jOgCyQ1ApJa3~mg6r42+2JA8#?|+)UZ-tv@w&!v4Q5j0W&3BlAB$Geh z=^s6A{+xgGxOr5M;O2s%tiw&YIte!y^$y%L_ZLPknZ$gYz)f?F0XNT5W)p5+Gz^#- zs^jK)dow-MfSc>8pSmvV3U01(6>g4O7k6R#*;hEH-{4fV-$}y-90rH5^KdU?=Yp;GNxpe~o>VP8&xgk$u0v7;+If%8 zFEM2tf6`fo&oCNC!`eIzTV_n&3j2JUGZALiYM0wb+T!b-(`Y$4o6S&(!{Hq2tvt`? zCOMaO+W2;%=1?u2gw^NwB!E%p)83=~?C>DJ@Vo`NXMr!KolZfHvrG!|H3PZr-Oo3? z-)d8@dthrX{I=*6oB9)6bF$ZW{G;F0pY)G@Q(xC3oBHy8edIRv)RkMV+uGF4LELZZ zY0QCq!^LpQ&3n_PetExkfup8cb#DKc?9Ie*!=^qtO8t`KbY)YQ>$nCYS8>&PnN9to z^=nV?9X9EA+50`gliZsRIh*~rxqpv;zs~(JfByvc^43hI%%e?y9A2+@U8Yw&miK0> zo5mShwz)oEv0zHc*YIpHwv@r)o-y77S!9-e+%cxpjJwHDyy%Jgafa(gS_yJHX)2(y+sswib>ccw*kA&i;TF^w)%z#yO_p3roOuT-7)D6m30 zF#O6D)`lu?p$nV6jCeTm!t|OIXR!-+nQaz4&@3%<5$)*5gIDP+Hnix5X_QX>jyRj> zuS^#@Z$Ous8ApUgDv<{SmC@!4W)jS~TN^I$Z{&=9NRw=XTEKa;?A3YXi!#2&jXbjC z2(xGdsRoO-B>M}>f7?C#NMc_(ty=Q@Y+WX8ydbRy z{>Dap*7ye5{Tlg(7cxE@Nw&}iNbtP8O(4N@xk0`zr7I-ZLMb4@vwDUED`Y~X zcT!ihXA`;(#@i2L#n3HO*g`haEp;^bD6bX2^!xhR(U=9@dX8?!-NQWNedF$l*~AUs z-5a4k=I)J9yEqRS-jG;$pM3IwNc1UW8V(8w0@b`Lm>u31{DFR5vDtNG9H(8S_(uw_5YvqtugQi$3P+3{yqaA02?Z{TEqVd*$tI#p zVjzmjWydgo=4p0&iM7W^8J^f;AAgI3`>-Mns_~7y)c6}RW)Ntg*k+$GoFwKJk%A;&oJc7E_!=X9vD0mm<5r-Wx!6SRYnqGzM^m1=0l{`hE6lE zdZ!i0qUROJIHDhe@bigRg>)+~R?EDzWACry&n9hsJQ; zTdM~(hE+XiV6rM|Ok5L16J=w-TJ6U`Akl^~Z0v`PVG_UpbL2m^F}M&Zyn{B4f#}2f z7?|dQ<*;(Gs}?y_NJb75y}qCf{3N4OkpDvTclIUG`x}P4#g~_dsxRitR{T4K1)I~8 z?A}+Dw_v<`Q+g6UvmJ4*-xxP`!RWuG{^*(OEL4n;ECL8p=h?z@>^v)<+4vUkH47q0 z3n{rQ)k;cJspn0ZP^8*nr+?Z^D5eog!Huv&{&gmk0I`t*9%LA;N%&*)NO*uV91R05 zr0Q9&!Vt|oW~9-oEfS)Kj9IgrqnW5X@pjO%NmdzZb-g_UV8JX}lPjX}su^)Mo%wakDO( zV(LIn1F7++!AL39u#kXsCM-xEE{sZw(IrFpiESGZe$yAGL&oXLT=K7s^f>z)(xbrI zkduc2dZgzLl;^KJ`oEPTzNy3n+o#1pTN!+QEjVZxl#X0MGg@)@Jd)KZ+qjM%0AJ!} zm(+`SQ-R68d{Qr>h-ivp_2ChXVku_A8|gu4AD-a2MIdEDNLZ`aof(p5~K1?{p#Em?VM+<|yu zalPy|mdcGP(-dw;we7JWC%LN9_vx7h`ChKv0ea}Hq7X7&4)?GTUKCjDel8|qGdPf` zX@}TW*lF9g7P>u4J=633Xi=0y$!<#0o3~hgLrcygDwySiQ+HDCR$BF|lLkm#^Jd0( zD=M*Z_;ucMb7?8;jYj8qJ|bIFp3n1n^*6o`;`54)d>%z{@7GZ<&ey@EThcP!8(Mln z^U{6+r5A(&`EIxC|K`ksmRVs#mOufLB!cxsfeyB5p1>?bTU$z&CvZK>6NqPafS;=4 zxl_Eby?2W|fg8fBS*a|>O|xZj>qM(>{cDbAC!=PbXP4>bT0RRJKZ7S=1Vm!Ggrzfg zDiP-gi(y44hTQ71hr-+L!a|}GU&7RN2wjKvdJ5wiM!%PCPy}8Vl|+z2lU*=o zBy6+tG|lVpck9G;v6wx;=X;RuLXiPnqo6JAyhT)JG!fMP1+KBs=-5t*hW zC>-S7puFIos!a;5DqDAcmS}wUxi8lZ;F;58&(F@LYafC z==M46nqjn3B#eRaa0V_cB;u6288s0}je=*5PUez#@AdB7vgjEanUY&QMb?6j{VT%PtGFrB&Xl$yd^v7DQ2DO1{w) z@r8}TMMFZ88i=+#By6Qwl262~AKLK3eC&RBhl1PaeU1-*k|rpzn~uA3FE_ z>l>I|Yu+1L-=5->E)pj14LMKG2ndb_8+xZ-z7M_A^4$bE2s*$ks)N7=;Gsq1Fnb9i zrB`v+2sdTqre_(zwDs6zi2&_jra@O`%b!rCTU0#5LR%rNp4&Yl=`D4Un@-8=u0n;n zhLKJA@IG9&4FOv63%a`q_G>NcEXd}#TWeW^=>(%|J&)UXHd6`W543}{k~59MGF>X~ zwhh6OSh+n%3Xu`S-axi zVXIqXaJ))70(Jn`WjlyW$Bk^!Wf)Z#gU&Q)`=E#E_Mwm2KJ?jDwvXIQb@`>$S)VB& zVw1S}r#47s8yP1h?}!g&%PL6=3zm>|@BhIHdZ1LeFhyamQ`O1@Uz3C-8eQYB4T588nyO)lioawv@n_v3jc|Kd5ln zHDAm;TtER9ZJn4)B501hX2v+g8WuW8g#&Fe zt+y9qe$c6aw-;2K1ij8S{y&az+unMyLX?DO(!Z#$BpL*)4rq_?v?7b#% z#{+K=(A$ns1DM3Qj5?!%UrKKhKf}3(L6Rn2R#mCzA2dzKQw<6Siw7xRGRFR?;PuJJ zEiX<$fm#671H~Dl$uFQt97KPN#KP$xsCU|=N|Go8D*v8PIXO*gaqWX_IL#b<1R=8X zCOC;!OjfesevS)}(_}Gy5WmLvRmE5=BmDM%=D{1W@< z9Q)zx*>VMBeIxG4v`;!rLR0~goc4zKV8_*ZRKo(`+thynTmRz&!AgGy!E+s^VW}*r zwCVK}lT6^cn3Udt0=Z4P<8i2gj5Wb9hpO^l!99ll5$w>f1^@U)_gEZ$FhjLLTTp{- zzxMu8;ic}Zim7dOR)t%%RloVOlS^8%$sItFRflEg7yz-j*Xfi4e$IKxII8W+2HdRa z2$MhM)w~$^wO7H7e!rN+naL#QGVM%YimnOH*>r5 zK-CFYtgPtob#iQ`QIN7C-i9+-5fetxe!q504!e#e<}E-{q9@2k;eY;#n4NGJ=W#8clPx-SsA=?D zv?n|39@3K4lyV#V{Wl%=QG!&b$f_YM@VBv)BvV9+ON2bnZw=AUsOUrKqMeHoEWX_zz<6EeQk!sQo|i8OpL=mn?80s_9&3-YiD7j%rKs zpe-03-`yWGI@oGkIZ6fv;3Q*t_c zaF$ba0+x17hG+bP0J(y;l&ekdLznUK76Y;_U+-~L;xHE6DZd^kS-8QvEZGx1=~#a{ zH`Y@7Pm?j>u8yCkIBDA;7PkQOC;M!KKpcK}KSr4cHDK5m=U<5+%rk~Q8 zIn*&G$tZoqF+iQKKNcOju46wF6ndh|W!4@ayd&al8lU0bGX{#F zdkSw7Rwk#qgiV4un)NQWy)ynY7N=>+Xoiii+PAN0>HGF!@a*SzpHEGjBb=2>Q=ugR zlcR2EtrU{~_VYw+R^hH3TW<3zqtH=he*lS;$wCZ|eGb3Gr^Eh$4J}9WMz;-xY~!v9 z)N)$Dwp>|#*aB@W z0HP>D{3%IH&qeuE(OF9m#1zW5sWq?EKmy$=3T1|G8G{(xqJCcJA2Z^D3rH2!yfk(O za&X()SU`L0QxQhBRu^ERRZ+@P5pv9eXwgMWMQB+mEYXHqHhlx>JA{~| z_<%mTJ2UFfiOj)6xFq&_f=_aC8gO0WwP5te?s36CF1tr1G!=SVVkrbVmy?NHAtaeP zE(a0l%?wxBQRnnbOxAi2rzLPtrp-Q>Cf>~VR`=<7sh8gMORQtyS=x<$`YBLmSvUBW zTCmCs!k{V6H`a+&U0Jnfxr*ML*E3yT>d|)P?iaZKn!lgp{xALgDef=$`&sV4&b_zS zv(%9s<`JJDPN|E+rEDrs^lO=U%OGRbG6SpX>u=K>C*%#McIw5x3f;P-)WbHZ{T$!z zvPNBg_Vq*gbkGr(EM1}~T|%xK{<`f;Z7XFI2-Qm2Zs5n@9Fxw;QcxD`sh1kWZyM`bbvyttoSUemBwAWcfK8$fqACc9OKwZIhuAht`-j$4D_>5?a?@oSvNWof%^hOhBVnWg<= zQZB%d&Z!0hCM@-rQg*8*cCT5zZ?!O^MSbQ{cd7rz0(y^}dfu)Sg*JCy5VMg({cWST zIN-e?PHsZL&0njy>K*YuSt{V;_t~+!rpx?kk zvL_&wR4DQK-Yj#mS+!gncaakvZ(k|Ss$srE@=<6{y6jxxW)u<-lea*Pyir4+bfow`F^ z?og))(?B?l-fEw}BLBI5#)gZ9O&h<7U5m0DA)sq}Um`(aDr{lSkg&C?pWBkf3gqe> zE-u`ktB^n`d8fp!T@b8Rrx6MG^fLGo8LmgE9=nj>D~B4pUMdD(P*P)r5C!bgBWi5R zCY?whPv5Ux!YK(**>m6yJrjXBZ>BHbQ#@bKxd9m`v8(}!lW0qpD@={l*f_6uLq?GD zT%U#oe#m{BEYS}B`MG|jz=|f88uT;M`v+cAWKe&Nex}L{p9v>uv-EzP8Vr7N*cNm6 zP<|39-ImzNfGiQs$Lq4W{+UhX##u^!vfr4S>~T{|fz@rS0?x6_X=A7m%tOw|v@vPt z*srtzt39MoH=U(_RDUj5JM|uO7`z=h;?WaVp*9^7NnZ-8{C+>MDT+~Tt`uCij-jB; zeXa)73wTyDP>D7tbY-`m^UtSsolomcq=OA3gj`^@u&HWQbcPR9`8=qyq>uL_{p!jo z${Zk#cN8P!aS`juQ_|*9(^(r!VJPjUxs|M`Q9l6}{Df2XQbVg&a^1RT>y4wt>kDq( z<^Y|Ijd0p`=9sP=mPsnnrerwn4##Om|6s$Kp#g|hfiVb=+!{aE&?>@9Te4bjbtT{G zst!eJ51tSDSEpVT;5a=m1?O9I&jpNOg$Dg(k_%trm3ML`t@?k zi(>`youmOd=p@saA^olrCR03UBLQ$*ejTP?7q4P{h)vb0<>A+5GG2D;*^UHcs1x+( zu6kE6%d{nD)3HYHV3g=!N;O;D=NK)WX?@i&oV{%&+EiejONxS$tHmKUn^t)g;2Q^; zK@&Jv9IJ1sM!ffrVnX9SK>OfujhTD7yxCeI==GT{tVt-Aa?-4Re0aFluf1N1X^drL z2WI&@f}ibR=Huv5{;YoU+t;$&zSszo-`3{U2g>&tju^^oFS(%N=mxc?2>J3TpUkm? zXeVdY=sW;$Zmt5~1+$e_BP&pM+@lSpH`A)NCFgtwgOw@v}Y zN&lQ7(q)dCH=0^6pc(+~dZ}JXLkU3_`~@f~CIG^dm7;g1i@>*NoCaKtu|lm4>`&O- zTr8CPw z6IPBDG=e&f+9Ts&X37~0Gh44K`owmC1+-`FXLf3>VreaKtMUt(T^Yo%o#Xo*|VF=uBa?KuRzOC1zKx!#oB}o$T8w9!{jKa z+LXMTbz-%O>D63JRcfs)JO@FNh=AoRmWmcqGVQVZ7}!ldc+H$IP25mLWGtDnB7%PO z7AtWgD=|}n17R`i$CSYKLkAprowW};;(jKh*6s+bC$A?}g^_cz+vAmR+ONP7P9_qc= zM_sk#jX5;VMvI$x7Z3&joOSK=ivaaw@xm<9 z`20T>soa@y2cXXjna_6&ouCVkmRp~f8{lW9#)W)-&3STWYi_(EDh1ouZSwUgcC0{t zcx}m4eQLGoWvg_`W=$-c7c3#No*p@*a<(?ah6FLkOe$CZC)LCG&RU1J7-1_zz;F(4O5{4)LidfRn}x;BlK%g%n#`;RKpnKU=vfCenSl8Zhoy zEv2{{BR2SB~IMRnCUPj-0?5~xC{3BR!(!`hn8(Z**eeXf%oS$@F8`Ox^T z;`l)Aj!Xep-5=lKv~tok)IrnuX<(l^z~oxM><#M&8x#`iYN*yZZ2253M&86M@xrCT$E@x8;gUQ?|jJfz|E*;Xf(?vg*{n=bX<*s2SIgUPvdrZ^GmS|5# zf>t3x;!X{9`Zco~$P;u|^ww-xb@CKx6#{bHo|-!3u>*$+^Q}v>XwX=--8fV;A~WM3 z(zbw7#O4XFr>46;{PM~p)jPz-Jk7FYzC97RRyrisNQ&>07mkZ^9NYJ}NT}@-Eso*{ zfhGs&u+#aq}P5vkMie0uD*rRjO6hN@WCK({*w`~Udbk}R#%4~BG!lxS;8>OwE z^V*)F6yUXoYI4GD$l$l5x6M1%Qx9%bs12p{Ux-+}e`hYD< z$c9eMb!r*sJ(p_uLF-Jz569-Z34l~wG`L6B%M~2t_=OEM`SSbW{#)Swoia_1&+0lE z-wM%VC@oXnzpR}_8@5(*bMjaEuI`M*cj!Ade_=Z@TMP^9QOOx$zNM?m8ClC$vld)~ zJ;6w<<)hisGr=%LpxkBl1V-rW35elRb%H{nED5eR&R?vK!jcgKHhx^-pXz!zZ+hE5 zvBB{6?_tFD1aibQeyo`Z5Jy(qDf3iO_xm2XCWd2 zV(|Qou3CiT5V706LC1mM5B+WTf#8!m<}N8zcTy4jk^2sBaeXRuW?UQ}$-D zGd0Z#-5H=v^}rz`E(Y%kF}p@qa96y_XXcHtys4$7D}A!S?&`fBY#*k_7y?R2ezT@%e?foxDO0d0ffbuXds$(3X3;Asb zS=yw&=pFrdv>H#n1XVk!KpX)PKcn#VWb><=Y;FHAFG~gHSyGvyctCk{%EImA95b$y z$aOz$+)D}e;Jko(k$Dd#oEqmezz&`EY)$iPqUpu%>J3cL3N*4t!5KaoO3pbClNSku zU+Km-9T$h%OU7!|nC#XsjYr06FN;3YS8H*!*8FHVT)I__Ip=V8Ck*zjb&ph>COFty z3^zh##A%Q04#|0LAMN+mc$+w&B7JKs`(Ajc){i9~&By*mNXs`CI7UC00U9B{rGwoY zVX7CspcAWy;{A_D?=*Bqp0?jrVhUErc45qQ5c?sMP9gXaqIQeyrp90g-pN#21GohI zJGFt12+?#(xU4T%w2H!RI7u$Q~`tJOmK{1j-Xus8l4ihNH#&L)q)bX89g2A%BXZzrgam39G z4gMGEh^@g{-s>fVw0X-rkByf1p{taL87j0#mUmMGU>z|wiyd+c&*~wwx)U6?)h=lh zUtiERRTdIqEBa{>t?60dnRW$0Q+G5U(uTef;3-z*VQyGq$m?dakX%l2!a>%$pCQE6 zz+p(WwL4>r6QJ1uAk6sah}cCFs11mPa!mRqk(cDrBgxStj%oqhCrx>@F7jez?^3s& zMlYn3)QCSPr_ZPHBkZ18^oaR>#ycQ>ta;nVpuY_*-(fAYNe`tWPdB%ctb39rf;ggg zrKCi8^M{i2_y@fh#vTc#dK(c`x{CPD_KxYu+T>CnI`rfsU&5_af0#Fz82q}iNMyN% z5S8MQ_$72w#turYbj2@H8{{?#ss%Ha7vg>~0mghO2<0V;s!mODI1z(MFXi=%#$Ijj z3udAdmtj9O$2DL@c`8-FCGIqedE`dmO^U8GAKAHb*TMP5V#gfYAytDgdL9gv3l&~Z zL-C8C+)NpybpTmYYewE~nG0Ntq|PQe@A#q&#AweDSYjirwPbt@yX#W~IS`&sRvmay zeuq{T9mo-?)eQvrR5c@MTN9x8bQe_XHTs$+k_-lI0GyrbTvWYXlYAypoEHxE5o{38 zixK%cg6eLD1UL3L@|Uhpb)(Xk@|vG^@&BHrY>)8-=6$RAwhf$St6=jrfE+tXFn`GL=xB!RbkK8b0 zxIIpSfdy2CSQe#UyI!+5de$Kl4d+M%^=Gd@DJ!joTDlzog?$PJ8&%3RlP_1eit$$hGP!PlOYec@d0lTqjiM`Pq+F4L zwGPXsq@mHdzPO(ibz2)p3b0hqbyaV4OInVoL{y+}j9+RiwA23HC|aEI+&4mOaxAsd zy0U0H!N$S3jrF<1I|q4!iDj5G+}6W)63h!OgJJl3cH+oHq(B z3Nj!UTGJk_{bVWs0R>CG;2B>~MVIimI;&NKAscV^wBwlmh(jtwZ!V`B5yxw*@8(f} z?y61O;Nj{HM}LZnZK?OyQPR15^DVyZdAItrcJOQJk$;lk?nu(ER`9$XmWB*LPOZpL z)fV3fe4(SF?Hn>|VCI&1YwdU}s6ZvgsyB*=wa0_%L`z(?N#S@wNa%*CforWxJL1ph zP(&l^H~?z`(mq3$n3O*Ctk3N&>dTnmH;0)=4fB&V+~W4KjcwtTS8t2{nYsgOs>M@* zN4uO|Qn5OVLA(op&iABjkNsl7HT4ibOoXuL3i0FH-3{Ne9Io|ShOD49Lv)r?Z+ts? zF?T-31g#6{uGp@A8pnzWz&gle%&`-02Cw=Noy8XJ^Nt2I>_Xl&>X*a=2<^A~kSN03 zu@^9L4^P5TqtGJapl)!hZSyN z45f1vc&Zz>@s7YgRqR&%)63=$R{wTg-`eh*BJ)rT+D1Q%JrB0H&cj_5BzbgF7E7@P z>c`_flFxU*rm=h9om~HrOP!$P2t6({0!%&&?8UH|$eDbxDi@d`tzf8BPxR?N;fBTn z5MiwIc1qO5ihiw^iA`7u&4Ut@X1iw87H^0ruEx|*kwc2DZ zbUd1Y7FStT5)@Qnz*0Nnou_^GtuWC64l^)O(DLXpZMtQ388A!vE8Z_9f4sOwZQi1# zO%{Z)>WExXxr&3!xGwHhpLC&3AC6n<0SI?&)PnvyR$y{83qP2E*DFOQ&sd?S(GKH0duaYzIYCK7>8=3l{epCSMB_%o}pnhk}1P_5`K7I!ei#V9e|j2N~rg z`Fe0@xO$!7Z-tDZE43Hfu2|NRD5|XL)y$L_5**6e5w4w1a9!3tdiI$^yvd@9D&ksB z@8hRRvOEHZb&C7s)IM=7N8bw@uo5)z0Tpef^C}OOn^ReaZ{H&W@NK7w?v-TbW6<-I zZ@Ga%*Y(`;Yhq}frfKYk2Obm`&*s_TU|o%wo>E%j*ZAZ)QY znOEfajX!B6SqfE?Xg3|8cqLf~Q4SAh%-#VLI|#jiHybSHWnW@7MJ z?V_m*4V-x}cBb+_z)S&sjX?dXfGV9(ows_9V9G8IOpb-F*Hh1Fum3^4mY4VUrPpXi zLrX;9kySc3LswglhQ=9mmGT^vJt4q*l$|-ms&gop{K-11t=k(rYAX~}73vCQ#a9^0 zqKz_ipHu8cCsly=Qnyub&V8k~Kl%Ynx&V!ZyU-@GWp3`FDcr!z%`3{fr!`efKKMQW zd41MDh+wCw-nzM7N%ht##LTPScx+U?U9CjCPpUUkuS2HtK)`HmKwupU2(jU`Cl3mG z@oPA>RBc|v!Z$XOhMC(8m1`q4e>>X%7KxZfS+?x4qcm6S@s+8LdYr5>*Vgmt&Wpow%f zXB$|y+)Bx!mfMY4|F&9gMdS;`n7M_n63@wU14$c@m*r*WTKFeUWU z%$7Bjfdde^{d`Jz^~nwU`6l9+(uHTe%5dfykZu^~1O;2zn$TGg4%dLgm3psj_$s9R zeZsY2=5o+%Sz9lQ6xWcXo|d%xdK_sV^zPP1C^z=daE*Q8YTf%Sb#G7By{%9!HLAFV zpEFe3NZHc`!A#~+ZSIv!furoU0^N4^w)PaB9jM1{hz$;Cl&%w*yG|-I5DDI>IrtvpTX$i9s%n@!GduB#qk0J#)K~()E^076{XQQZXnjz6-9nVm%9P3 z1@9PeNrDYXw1^0vA)|J3k@h&Yhrj(U@ZLfmX>9F#YD`K*JRu6C_K*^*)6C2wuk?v0 zZ)R?ZuM?}?o7^y#l;0qAcW+XB!1N$lrnK*2w8EW9okRobPWbD%u7K;ZfKGB;&fYLI z;#>{5_M$rH4}Yew*l5D&379D)4*%AJP7KA0ZT+E6e}>x|DZEyHev%{T7`tugqrvB6 z{giw@9z$)y*>siU0=F*gkhzWp`-}0I!}jqTg-bY9AY543;iVe4EQQtBaH+V?GmMpm z9j@^1|7Y)gpzJ#9dhdPy%$$GoXD4TpCY@=!&uQ9Dk=#VVwuEZ4BeaDEt-k1mE8Z91 zmv_Cag!L{PW0&hLv~y{I5u*+eaL_0NMjJK7DA5}+!T?c64H_`$fYCcbyp9-kTt*os z()aWIJ>N|M~s@{hr^$uk)(Br1wafOU-)=lehDBxw$8* z>)2tsiW&v8OJ-qtfpJ${$KuA9z-W8|il0H*l3^w1J!jIeaC4zqMI6V{!s@s(2c^@c zdQ?5X(yhp`+fxXny7ivm1Lb3$$OS(rR8ak zvHV)21BIpq$LRA}I(CpYZ!;5=7nWmU1&&`>@bWJnhcDGxcOL|(_3`la>cG)_u>>J#&PzT zd!$zinm#0^ix|hmWU-3BBq6Sz>I>avOoWB2)@KNFSPLBFvoeCCmaEDL$$8wItvNF| zCUC$kxkcxE3rwjSdA2(9;oQvc_!hVr&tu95p5133 zlY^X_+T>Q>aZ?g)&(FcS+MG z-}ZIh_@x~_yM@poNk_7rnKX*SMteV=f4FJ1Wp^{gYtlgIiI>eR+n_j%WecQ;=lm8( z5zljwAU)4+!8(tmFC+Yzix)mSo);AvGy51_j_--NqpFaw9$;bP$|wz`(B*;wR?dA1(solGxP0-Mg&fm^TY-7yp%_m;dxMCbDP4u<Qc<-2;6Z{6bDg$@8h-zD(GN!Ip2`GLZM!w1yYD;5<#sPaTTwD?{Y2z(#bVf zXs+|IUaLv+qsngv;YuV5Ipp}VMx9Vnuu?Rw4}TWD2n<0^Gt04`fD-^-LP%^=D<73; zJxgkoSH_VVN3t5Xh)}FUt+VJb!4av}1Jr-5kPwNKcZND;DVH+XZ3Uyj#qzwr@^cSQU zFfHDKq&fKof;@qkZ(k|P4GcsjF%2Zj+nXG(eJH-oMHZr<(-k3GTt+0%E+{IOCd#1A zw-U~6mXNpnEX~(>9xmN(EaM6$Q82BCg}4we%0k>qohYN+v+a;EUbNC712-{ZxJHLe z2Ue`mcdkm5>~PNtcEwpHknVN9=dyHV9cT0359)bH;-)i1@tojWMF$Jn_i6MG(H&lA zOc*Ohcr`Mo@xlbo-L*b|<)JMJkQac)0|?n(7$)FoZ)1~-2(S9j%D?xS0>A<-;tVLr|;O4F}+^g(j*Pm zy>2*e>TBg_c=_{t!gLkl#-t>Ap?Id1GKdOf#DvT0! z2Nw@;!@RWwljj*va4XSe`vBWt=7UCXN>8k;XZ-g?o|ce@gG>Hn*o0A@&=*24in1i1 z)aQr%U0DfZuAdRKwV}v$2}~B)!Z`okjN(-@K{Q zNaVzw#OpOTD9+3Ihgko$whQpz!BJW_-@|t@4^+}=|2*WMQ+l#42lWJsChs%@9n`gP zi?`893icW`M)|!>iCq2h5tf0!cPOA4EaaO>!9{P0JAG%A>f7~CGty3)CPl^A@L>)ILr0=i#q3(~$= zA3N`#RR(NfSvK)~Cc$zk$ggKHyJ9Zikl8zx$z!n8*pHXZbtB@dX*3>q4DqlC^Y`L* z$;9oDs*09y;`StHg1{htEJ#B?cj9t_RJ=WWr_&38s}{^KP|~oj1uzq2#U77XIN+t< z*LRhE-_XQ(Wf>L*>7ZTuO=M9o{jvm-e(f@d;Ye|WvNM>xE5x#UO7OEJ%J~Y^p9AkC zt%faW)laB{+_Xubb9YMmg!Byh^Q{931!F#G6cj6bR?R1lw>z7+ID6F6ohi|uyemue z&)(UDX)lYEK$0)%2{FFFQzUoMKPT>@<<#Fc#Wzv;%PMW^_lU#vdA%4(9dU2JE8@z6 z)B*R`sz#XXjGj=syY`4cAGXHK0XOYGngj00U7}9T0k@D2IX;no)OZh3L*ty+6V`Cb ze?P`k4!A`?x@2u2$f{D=3aA zF~L)2pv!7e%u_8o5ALF?zs#fCA-P+Gh{>djIs-N!Yb%U`bCD2tmTV(fM1EP8>EL+h zGFc5;m>h4QhC3%PXKz~zu^kygYtKSlw8^(Ege6m4ZTecCI2rz3ozvHJCz-aELDQF6 zCNq7OT+4iKfkjM~!Y}2TmLjhx$|slGa?xv+Jy46iR{!N2@qJ9Gb`4` zU1}a5wR&@Q7nup5@r=Eh9nW~4?9Iv68~L4%kK=a|0Ab9c&(v?J_zd$~lGLU3mNK^D zX3?M)Cs)k@2Z=Cng(mL^-us{d|9fSSX_%Gt$2ay$RmaXYaT|`;sCb>Gmn^*QqmCU zg0t%QW%vLaaci}#xq@=MvoqlVUzXgvgPC^%j{}d~TUgZka7M{(UnSkXP1?RO=YU;Q zbt~Y5RZ9>r%@V@)ax;o|B}^hborFmw4kR`dO9&KX2BR&M1)JHgJ%#MFxn287NfhEn z8;OEsWk{6aWfF|ndtI~Q5ppl{NOn6}=8G?P^IN>Z^E<2gwcj*iw8u#BS8RUqQ6Ryg z>C(%|aq0Zd$F2z^g*cL7Sj3relM1z*k4P=!rGG8s&5Q`qadqqwvgBRMv)9<~iAN*= zZVgTeCCq8eX=ECo6Sxk@YwWPy#ZfA4LN-xxDeL3yR!sqxy}>90H#(__^T5cVDxDIe zl*b|gE$_%Za#zNYd&V(UXat_z(UvFo>~2Ia*yoyicTgKox%NcuyodJiZfBsK-ktH# z8Y9EhJR-#kh0&1Ld1w#qZh2@6@P;dSXob$A!eDUXp`8)dJ9?i#w%ZQPcJt7l&{!>} zSmL5Rd9|}1QmQ@Ea=k#5$^j?^a>=v=r-am`vQYfRcY%mXs_bCIv9V5JAfZ%|<7_Qw-typo&A z_K@|GYCw2L@si3pIws$YoTD4{LC(=z_U#iZKDQfy#hWVI&6fpoa+Tr{bp%t!8%e?; zN9G5V|uY93+LGRMvBje4D(U^C9UOk0# z>L&6)>eeh{2pP#DfyFfq$gpE>O&NBw6hvkd@76R*q@^7}#r1AY0mC{8Mb53MD^+DK zQ=&;1b-{UX=hjRqSv24%8CM8s=dHGbN1cjP?SZs5E8t;|l3EVT9j3J?cIW6_YX{v` zsyo|;ZG%kLXz@e~%j`7TGCQ>ym?rI-53}Q*c9P|QcR|iwrEF(5S*FxQrab)!P|RR3 z&HUjbPO`lGa&oFjEByoiVi{@HUVOs8xb*U7$zGiCFOX4FVN^<4e|&}!hwpSooRZ`K zU>s@9hoU=q)v0kMZ@@Trm6PgIrmF6fI_oB-U(=C^_=w))8$^=oZRxm@K6UQtmHN~Q zn+Nt;qX15ylJwTq=5f8cmF=xAc?Pmt%SvuDE(}3lcfP#ySGl%{>8K09%y(H^vhr+( z3d@PCZMMNIIFKFlUgQhcID!RJN3OJCGEpaDrQ^jS!zLY>3S5Ynr6S&!s=$oY2qP53 zC=_G`J-OoyQA$R-2t+QgWUmq~TFFd)ZMLvvoG@^4iq+sEIkMNAVXm%o3ocjyfxPx3 zf&pIwTjdB|(i2DU0lL@s2|XuGOg5vUEHRg~ePbSc4qmMi;d#MgI}BXBMi^MUK``*k zI-mH!|7I|-3W=QDZFIklM5f+rG=RO0oOir;;e_q1Ob3o`kz>ouvAWfalmbze>=QUY zoo^(38*;|BoV(l1yS}aTgy(@!>hDkDgfbC6?v67`dd3LB*LiHyT2gzNbgbbGlVQ7d{g@^WuHT5Cf52A;Wk;d@_%g0wv zTmhMT$jgZPf88GBLy_;L-sbymIvN<$(C0(mq3s7aCQ$l%E#ehG)JC zFZwaC0c+@4smOEePr+XkYl+B}4UD8gin>QIzlq8k7J2a)mdl)N$;xe5 zBu$QzBs#2zGW1+XM)$;8Mn^Veb>Nt8(!OQpCaa3V0GKek(Rw?VwE?nB_LH_PP$n#1 zUb2_Aq3$k&d_1iyuahQ~w;K}{ZOxC6xT7y#z~P~et<<2OnC>W`R26>aI$juVjQY$r zKJ7YpVX)}Rh)0?|IA1n}eqb{;B_p3np&#t>KS*KCTkG-xyBkmkb2qD&oX6VC=AeoF zJ~f^OB5K1^sqyg2sR>|+>(9-^;8Np}_kOS5jk%ByYsUp6_;whkwd0)S3Q2%PQKN=) z*G}cFtqK7gREe&pN~9{WX6KWytgRPdy&ZzUIw~IM;ldl z1Q+ylIl|}dqvZ(y3;&TE;eV-H1XzB6&egFb(-$y^TAIIel#CY#*ac&`zpO+((|$?- zpX^i&CJ29g#ePiNguZJH6MAdsgnqcogbrxPOh|cl{e&)Sj0Ky}n_Cn5q1J?49h=Z( zGNENn_Fy`p4{Abl^yk>eaxIzA^oj}14G6$qqxy#_CJE$MsLeg4HA3upo~$X&HHPS# z(xD<&)yO=AG%{1;R0ZD;*BX-8d2_VZ9F;Ovi8fgVqQBx+vuAQ#cJPS3Tu*o*kT1A* z8Mc$FmXt1MW2E$t)_x9pMY}U%lhgJSw}lB&sHrmc>$I_x!#S4~lJ*NSiuMAqKVo)m z0QM*Lq8-4_YVp~RbG8YVgnrIG+J1b}e+0ljT}uJX)v^6JhvnA*>@?4Ko(|jwoaPfr zV8i#USpNB%Uw#{ZXA=Et20{L@bNpZI96#e;vi?atXd|`pFWN^N|3CPTjQ^iB{y1+U z4M^hZ*&Hs}9N+;LoNT45vbm!xij!nixUO5ts7TUo$*8=^WJNyvvj{*42sLe<*a~22 zZ!Xpnk2Hx9@1`#$EN4jho$fz7&iw(IeD=Mnaf+SPZ#Q-JG+}SL^kmsFCG@!qDY)pu z=d+*7*AWDd!N<@?*|?tnprYU#4G@fz@8NQLRy-2Tw^H~mA@suMFjXnIC>oW9 z5KoeHv6VBvG1@F(>GYuhmch~zoVvTd@jO7-5ec1q))$|^JdfFtIa;4!b zC|ol1vo!qA1@T{8>+s#OWfqdCBLw1P9g(p+*0FQ`tOi$_Y%#C{2G`dtc(B?R#Pio2;d9Ls>cT4p|OPJdAf!_g{f-PM2S~uF2}OaCSX8YrnK6_>*)N zB2^J4quh~6>gg*C(&eoe!ED_PkU&St4BOJHMz@Is#}ww1;5TH2l1&=7(s+gn7I zMX@{OIY;Yo8IHA4TuW|wXb%Xv`o?m*$TG>Xo)VFt4%$A%Mg$@Y5<&~GtgDF)xY0H$sp>Vr&Wge6_Bewb_P;Ca09#iN zJ6unig&nQ;8ej*@NQ-9>XsS(`iCv^{zM0f@WFy`Z9Ts0Hw-cargYmMf}h73`~l9m$Hs~u)4 z?U4i~m06T^+$?eN+n$Jj$Rk=JI$Yqc94|Ut`zTC$C8^bAQJ@$ z?yUezekE4lz!9dbq7&V*b{ng}#M%?x4eX3n6TN^kH}78eX0Gh;VhXoHV}`T1K2(BuyIuGCfp&$UV(FUQ3=5ni z;q0EzwSE2uOm9VR*lyP=6EiDodz#QL-uU~KIbeTL;y8KmF>T9itt`aKGg#09wbCCder!*76V#*C=DW^75QB*SFzTF$8GBm)Ea|w#QFbtq1@3Oz}}4Mo6R^< zcDDv%PW9IbR{fK^F5^G`g-`!N-(JkwaaGAW5L7VSk_63^{3_8U);Gi>2Qfdc_=m(J z!X3rcK^w-z#zr}|FqE>tj1cdNKfuL_bamrQYvx`-c!mPu%JfzQ%0b~ zveO84Nt*hdAqQg1UsF#?&DX6#VLUDp2KHWAuu9>~1}-Ij=9vPSHnC4M*Y5~s*Tt0B z8hra;aC0myIaJdG_3e$Zxf329l@hx-a!Xc9?MWrBMa4SQ=a!7cK=rlM53=|DAp~1B zbbX!OP{QZd^>r9>)D;(<=z=&uA_CSD$AG)Y*n9(qS*cm_7h-rO~Pc(7WU9rA=<;DdRtjg}VsM3XDBob%w zFz)yLT&+RRZ)}#x#Uq!STdm%M+e{l3;EP)W*k~?tBKVIFBpp=z1&E?M_3chnzZzSI_l`Z0ofQjRT>M`43a|WoLIEFpqT3EO zAQjsM+kR(rR8usp_v8_|=RA2-l_;(|N=ZxAAPTJoLmKZGMB$nSk&cQ`M+@9#R9uU-hEYL~xe-SGU4)WxMd)-D+=P5A;(n1}Iom6V%r`E^FdMd< zZ})lUTa_nAFZs9g$r=|W2hqJzHNA`BKy+V+-|fE$440tcVAmS`Ztq)RmtU&NF8}IU z%j*V|aQn2tH=b)X?ueGT+g3I*mo#FINZpR*mg!uTM&?_M@a{BWh3)RBX;x9 z1*&SL7u3XJTN4X3k?-2XqBZdK?o~1&F+ErzkZl+)QztTg2*H z0?)z5{BzVllZ$iNoxrRM(p z&OFDTbzyd~nKQ=t)?9LzZ9U$yr@09c%X3G|Ia}YB8*k>|K0}R-qH1-R2!%GY;T9Q>YPZozPk zj4J90TYbgt8Bnc(Yn5T`arBD8f{TiP59hkf#ZK0&eFneuerB+teFk<;h>>e1nt^7L znSoz1ZZ==*Y%FNd&j*58iMNZY+b6qr4pIk9*qPCjwx4 zXkzf!IpBZPfM5J94><1vOawl<;ff%jdi;DY+(IHRBJ|*{mHc0ij};`778E)YknIo- zs1NHUmx6o8$>c$?40e)Zu>iZS$0w{J*%0=4gg(`!?1=&)Vxl2qn6!!jve6KE&|aYK zHz_Ji;LjNq3*f28b2KZGVuCM{Sok3$0%vGk=-@{JmPl;7&W-pjp@7o(WRQ3T3Lx=a z7YZ=cdOYi91ykobX6G^IoIMMCx^2-Yn=T1dq7ESg12YSR1Yg|Wh{WMH>1+fg=2Svy zBM_+uc{{)+%c;GmA*lthCAtM_?@7E35YCEn@(la_R+3pJg6mkO#rBHa9_LbMCW~;PKV2q+D9brpOjcu1Qs*FM#DMh}(~68W zQ3P~5`6h;Wv7fbyA*tZS24W;^8E5T6rcMC!=jpDVUp)0rKy70?sHOY@f6q1Z3%q8` zprkz;^oV$=WU01pbs6I+P}DkKwCJjk`Qg}V;o&;P*B9712ywbB#SCUfWt;8Yz0LEX5LC( zmr=aLt*vj6h+eRaFlh;85Cn*=s|_al7c6Fpyx30aR(@>E>KmNsUyXE$df>iB1v>+W z10?eT+))%B!-ObuB-s%~mSSZW1>Z%XtqUFgX=@P@whkQZd`(B|z*PSlFx$&K)(K13 z8G3?nAmTa@(a7?@!w77~#q6RM*a6Kg&uAb6L=|f8P#d8Nv^hB;Rkut*CMaxtan7hD zR9sRL!XBZ71PY3G&vc&UL*dsRbq!T1r~zBLknf0b*y35(6 zE9*;%q^rnZ?z4A@D z124TwC|Wf@sX}7Y_3$E=3Ut>=RU0W9{Di)j+(9U;GNnw=dve~T?L>>a?U!<`Tid%V zhy0W)mcuVbKK@AwZ?~k2VY;JahOT1-E_#eHIjhw%Mqt;6l(MQEH$W*f^Bv@|*orj5 z8)$mI;=EEL9G|mBII3b+8Hk54tC+Pnp24^@jit?#BLew&F~y5C_GmKsGPS|lw<-rZ zt6cv5OqEN?mnjTA&IFb-KLe&LaJ3lzjDe+sYY?8X4o$3w66*XoCyY>(+46x2Yna-VNH_)()K_Q&oJDCn`1fq3} zP-9~MEKG~lw~Mn+<2ca!h2bWUMu59@g(>hHRyH|NFqVYayt22UuHK*y3OIWx7TUE{ zk+InNj4$eVN-}&b#a9}>hJA2j!jjEgsr%P+}+?&wo#o>5aoc2QUdD z9e~H!1-)Yk92jT^CAFMeWH~5D zw4TW7w%qwTM=Aup!jN3dMMjK>jY>B85Vq6=x~x+ncjphr`Hv-!%sF83ZGcv z8i{ERC-zOAlLV2RER-%hxhr}LM!Cr1;Rn9mN0C;ujuN6Ay&oePyIhO>Cvs7AXgvRt zW*LAW7FRS>H<8%krm9JQVk*^ysfaB>ML81y8o3*`4H8+q_@n>qZ&Yv~CRfS|5(?58 znFlFFLKT=1!OMj2x{i@ewgUmt>&U23`YdF$-RNZ3nMjC9VWsiAXe%g4ck77l%B%B`ws}YhPY+BlhJf?TZqZ{k!kW zq)>m$SNpzjc&gLAor|}^rkn^TQ3ayL?n!!*a|td7)$S(3N%?m^R|)v1Me)UD1=Z)s zEoAO@a-JlSH1#V8K|*H7Q!|&)i`g22#a{<%xI39(#-dObTh|f_tZth`C;_h28E3tk zyys+v!nM2%|p?(z<2+5h3$vm;+Vj$tKxZf!sZ{c>~*lTRnHNNAX6PA&edBr5ZD zHjC-6mCpgg`vVp)xjd3DUVucu?HP%&DOr>t{eM#aS86Uu7i!G!nE8inrS zNk`#mn9=PP9g`DScec7x@bsGO0ST!hN@EX{c389xVpf(>!STvoB@$w$;R2E?bWnzB z`kg0cI944ob3%v{R!BgVkQ???(EYH3LP0zLZb2m&PwoBPg)7Q-B)ngR6Q;7=w>jWJ zcjUM+x+4;s6o{w1MkiQyedGVk_y{T7&Ky(dvXj?W9$T%EYa3gxi`@CGZfw1!PAr5i z%3>gyiu_KG<)n_L;ORocXS)L)TaBqK9bcnUUZ;?s6CoWXl5>!gl6aec`mN^e(WSdYd~sbMyuM% z6 z$LW`OrR zPw}AP;XVhJjt6OTm1coo+_4e@?*_u(u{}7v!76huAgKgjQlL9Fl4-?(oBk!?R)=Af z`J9ECgs|-iIS*>|myUIaZan{b{dq&UTYEcE?=~p>H59qLUcpak@EM2R(8zU+In-%8 zyi_B;t|CVV2ugYZJsCq!Cg<9j9kFBsqTtkHkO;zzIMeXDHV2|-@UILvai*s zL!xzf!BqGWfycryd(fw|PVuiN%*b7$gkZry;w5q?sVc|xwzEMN`X+#l9n*^Wc4vUn6Qh1+qg$s05eIu6wKFlDY#b&mvPTY>yl-l?v||sM$(q8!!eNc zCINF4UJ|fTuhu%}6(8V6HEJqU+p<#_!P)!}S_OfV3zTv-1S+;q2`nXJmS@M+ZNC{j3`v~<G1`9RneeY9G~hD_&^8%3QB zguq|qBGQ^~aDk07GB0LlzL1%@!MjRa%9gm8DS-wkk7c8(B}&75f!V!7g&=3=22E{d zNx};-2M2A!e7*Tgy)mn9^5%c>rq;}F%N5b6c?iFphcn#2uMvuwkTF9mDnGT!sUepP zTqbHEe0)dn4lhRa8gniGgN|N1|A6)o!Qlng*w@m+uJA1f!hajt1*!7ZKs(E({qg3$ z9$;?LLdH&_+39GRcuSe33c`<$G)lVy1?6Q$#f%y7-oy;}&4f)!LmOz~69C>fI711; z_WZcwp9AixzBVL`m`{VCQzQBsZ+H zyay&(dD#(aO9%$bZ~1K~pcvi>F(-;TtG{s&on{sZ=U2=ABl=4>ym8p1`~kmU)XJnw z8j>l|pofNLA#R3~P7t-hV+R6QcoqHXw-0RG3B#IhTK)Y>ut%;A_EB^#8y1NIGP1HrE7T<2xbY=HWEJB}#(A6wgHRP`>36`JwQwh*o3|sZ>OB4#Ss=%= zmaeaek=taV8*Jo8^Jdwe4~rxz=R2aWT6II_a3OF+n#(BR{@TC zF5V#+j~!9exs#`i1a5*7{h|>NqO2=}Cg^{RAS*)jz%?}=D9#G?XRo+nPEyYNP__!{Zdv4eB*#edF#b}oK_ z4;M+iYi5@e&%`w1hQ%5irNB7dq7*C#RKkwhLW=*^7S6mGi7lsOTONvC9}PP6;Fn(wc5APTFi$0>g81bdB@o^GaSq9oznV@K+4BXEy=2Hvz}xO- zA%pCN7P1!x?bI#Kup5Bve>ugTw=xxer94_U9@z_IAU6ss>`@FYYl~$GLj!9$PP#|J z2Fx{);4!b;@Yt4hYal$9U)40&b!!aC{BJ{xBug+_l`F6;Qks^a*G`TCnaRG{?JnLTZLZV>+EvczahjuV}zOJ(Z z-8$~Kj$i8T=qrgl(OgMn2XeIv*(r$jtwwe%`S5j;ozIHw-1_X1oq|(opcp3BvC$e8-TZ!)%@EAeL$Nf&Hb8>za5vhp%_j5*CARNB!^w&13;?81wO_-`3 zKemVxq`c!t(W64D^sb~w$R`zI6fI8NYsqO+I}sm3%N+6Pu);1^oG7d$dgb^pg7^Rn z0R)IMh96jQc4xVwAib9fSElUq%<-&~C4VN@6LV@L#hafx3 zy~-WO5QbAa)yHnl>UpZ76qcMRO9ZE4vuqVtq(mb%sEdSmT5uxS>T3Iq2&_)a*HsP( znh(?R&b_>sbnJ|YDqz3uRDqYI8Vd_}g=}anOnWvod(n{%jk#DhG(BCqy*c~nvZ48p z%7*4?6q&@%S{;NzlBmI;%ms6rck#U6ioW7n#P*aR*x)Ileze9tJPbudJhShW_-QYu6k|YNX5_&{hnC$j51`k zj;dDN)c(w%u=?n*W4)>zw~i2d$bG9S1RHfo@5gOa4{B6Xu6=q)+CH>O+g5zK)%H~P zwx_M_m#XbaU$`}6HtthF_)9BrZqh2fM3p9dr555)>4$S&Dot3WZ(0$D*n$2F#5U*P zN6tB$IAO|7yJU97qT3kee3dq@%o(b{X1mtld_&n{8_p6_#iB-C=S|@EtZ-SzZ=a09Bb*fjL+19X5b|2QP4J)=e6NO#i zk{HJ0Ova1$qLsnUr+!<21X{>8xJ?Od-Vd3gGf}oMGU6KB5mT+BxR*p%m06nH+9e>P zFFs&XIwWGCGm<}D4FAq4GOIN7O|`fWjUYaWs08<|HAzx>o<<{{#HX7lcZ#QM=Ny>> zIVL8y-xIt?A7}icbH1EUDT*n2Pv)W{t1P3f%+b~|&U9bKqqdAYwTvfiP4W(ZR`heq zo#I#l+E#T|RpTGCEk0Sohm_Em)2r9&z}`7);&<4CwDOr%D%;e~+Gq`M=X|7P2SS~- zAbiG$*G>CM(*lpw1Uc{k$6A%XQ(YHKxM znT4S-2z~8iJ-`VLT4Fr_;FwADCgz&5*{IzaqbUOky^!Y*i-mj^;ja@K zp1A*H&t92u+?pU3!*T;C=8p_bX~uEIi?Guom&S9TqJwwnfUcKz!EVQbENX00nLf6U_g6#_+_z(gD{w zk2nUJ32@D?f-NF&WZ>#I;1UNp?`yXK*Ft}Hp@DHw`+UC#E{4(@xKv)-v4pZKZg5dg zBGni5WM?nvSrzOA94WiRw|Lo2k53$041d`;+T|X@i6{IfFtBCri|fMkCu|eG(bLCL zN*_6uoAID1w~U=I>!6BucGuz*uuM3rIN+(Gnp%x2hACB41D-1SwQi@1w&`7{qA)W) z)z_OU&i1WJf?8DZR3DI|K|88=rmrhiJWEqusiK3K$$kUBDYri^aAw+1$UrZlOow}d z-gLkDiDt4j6QDh^%09aR%p_pP5PE~1%HJ@WmWhXR8A==_>`v3@*38Wy@KTU7p3gs3(iMs3+TT zLC+Q;;#<7z2Fa%jwC)MfDnqj7C;U>15OI>O8zHi#{05C>x*e}mmu@2M99{*lRhe^W zeGZ9Q>xh()T|?o1A}FjfX4lfuK2OVMuC#abBSc!xcf0{7vMAPRY0%LjjLUb>T z^MiY@HMUTLDvsy$igJ_{6VF6Tn09ThTB$wcb@f=offuLY3@kIA)6yy~ZVQ=k4sAKv zV1G3`X-TcwVZy0yvE`Wk7DBJGKeaGIrQ??7IMqqdB1@v6w}kCK;(kepOHBzDb3ubO+5_!dP_nCVS18O$vFAZWie_;}cw>l`P4TZdqYMblWw zQYlX7=PxWVherJOI3e&r|5f7cs5noG#YV+T*&hkCwgc=6!!j=mV<8B}9euJ@rct@7 z)AP4z$}780Vsx~!i470~jxB87Z}MsK7qTV=&|hHjY9>tffgQoC>4Dyr+fOzl%k8JI zSg0VsXw1Z(_y{ASV|C8uCTwE52|Ccy5eZvJ?H00ppV+{U3dG3gl&+;KthClj`JLNg zg`AVx(3P`v?=&KrIe%5o`vTYCD4=&M$MbkOrJ5jzi==k@ zEefzT_`8j*9q)kd?gLuUxp_3>6`dQTnMDamv(5_=8r-lIgoucvh`25_-D)swE?cwk zZ@V)s{!0ZiCUdg`=LX0eIJa#bww0Ii)owu#SRvFCiVLj~k%A{|(SefR9|_@Qc*oG8R4aA;Io*;s40ig_?2>y{Mx1si?M9#zzKv(jP;a zPLA^bYa-+m96hzcA5U8%Ho~10Q=%B#a?ine-Q(xQnTD3=&%6jtk%XVlOTzAb9OpCv z&iU(`gLv%4uSKlxYqAsL`TI#Ow>QA-^b2!;z2@Aod^gIjb=AeN~ znQvebCgM18y`jJ&dYE#I7s1?h@@DZ3~^14S`0L! z->{V!FjAK(1i}t}=nDIHFodnaX=Prs`UcHRkYi9+Qr3_pI5_h%np4X#mwyT#f=c~0 zw^IxT^v93tiyw3Gb_={%>k_dEj0W&>OJs<|4Ud|!c0u5e-tn2ALhsm=YEkNnKK-E| zt?KqxQlj=lKN!wcx1OyF>w(&JJIIM54*l|{lrpQ%F|Tf?(o2ZYajwBN_Kk7@sA0qe zq=;g3&#eWNY_}`+IQ&t0MA4tN@z^d2tEY+y;JmE9f%7Hd<7lbDL&Torr%I16E+o_f zGrOa(T6A67DS(Q2zD470(Kv;u3qnL!MSmSNu_KFqq4~)e2OyPf`_zW+ClM|B(+!&Q zZwyj!i^HSrhsg)FVnBy7bpN49-x@JvZQDNi)7QT3Kcl*BF^D&C+f6<;R*9&h_?AKC zr|fC%5Mx{2Vv+HVe!Z4)$(W6`-RT#OF9+dotl&ON#(iFx*gcH&`9n*BHp{cq@P{}g z(v_XL1EB-*&yfw~mm)&3rOLYV7*FP${O#t7;fMl(m|xGirZAXygHODW9r&s5zW zo?5Ye^Hy}bik{6B-PWb(Su46tMdva_zd%Ce70u3B+5e=nvzfA=>{fQx%5GKJY09<* zc1+oe6@eX-3Sd9XZ-gRQ^GRUG5d!Ll%ZlQe5ecoT2bCluKor!^k%5G6vr4dfb zj&~_KYejEY(UX~?cXTOw(uy`!^mwM|`YuI}ThWG!9?cXT?o#xq6^&GMCR22-8{L_) zvbU-1bf#>jOS98f^i~y}$`mbkDLQ3EZ&A_7Owm%8qLWs1OhqR$Mfty6EM5F5B1V6B1&=vvl`ZAtT+hm3m&$Winag+u&c(PAexz48r!MV{ zXRQ+*wQ=%mM;yznwaASZuElci7Iia5e6n|oC#}WxNsFc4Em96t>fPdTYjIuDV!n5a zlmp{>w|LZA#DeB#o$KAAS=wfLw>V=h4ks-Jy<4Ol=-a!+X=`yPX>quBiwElygCq!w}O*w?#7%0V-Fw>V)fq8qsui@jSk9cr2D22s*hgYe~? zC=1qZ*CIr7ZBBzvGi}i4}7xkeW^s#q~XRXCPcWBHTD${sG#^YT_G7e@{ zwAC_tHSzHU;bn7`B?s7*1qFML_o8qg6l5?>X=`bFEP0TuTk_a3HK&BpR1#>B9Hf>A ztdr?x%fj|yMk=!&#K_?^Ak&IF9-XLePGjph%x>CYybM964z?U$2a~ZZ8Pa?^0xD%# zi067Q<(w@go&vWyVeic`kHT#47H6$RY*|{er#28IXLdL@!h!aXPt)*E_HOT_+S5#? zZX#)S?MNZt_eAaoxuRz$HmfZ`9tDUKxmZYe=;W77tHr%vpAUAO4So@H;7vck zpEvSi;wg;rH4gI@WSts7XwF$u5EAfn?vOv5_Qd5~Waf(U|L@+X-}02=w&LC+k|gPZuqJa>0!4n?KMYnzLiQ;C_Ny|}!WZ;igf&WLjC+4C_$1`nfq z71TYD#-Rlqk}`{{tp|1Gi_e3fmjy{aAMUQ`v-B*O#|V1XN)SKJIuH}ipzbaxYMutk zo1}Xw8nS!^G~d*yG#M-kZce3p3rY`9&$;j-z)|367KzpKwRh#&2-~HJ+}>u9cg1+S zJ6_GFt*rFcU|oD0>nRs*k$VC3CX63=tVs$8y}_|C!UN0P3wdy{S;Q#pl6%g6+9yWI z&Daqw4D+B`C>3)ehmmqJ2W+^Z=QS?{58S69$08dXp3q4^_Sm-LwQB61{A=vh&b>-5 z`YJw`?=3t^X_7019~@}*r?zDF;GX>VyE^+-=brrcxZ_h9dUH zcEJMwWoFWO!bX|47Fg^)8neJGR)UK<2-}Xd2KN^9xxvceP&(M_!hpn#y3U%T5sVgD1@w)^AY0Knh29x)4>BP z$d5`!Way%!`s|l|wgNU?H%I-JHv}TukUr%C=Ny0d=c0kEX_L*1@4s9Pm|Q%vBRD0C z>}ew?NV?%i`MNE44v?^6$m10C>pEYT&RvcP?z7JAN!JH=(dpd1h3zt++%MmOD=oy@ z-cZ}y_kp)wi_cRF9=1oCZzn!KaFVE)zgpF$+{4l;JoF0%V z-+sK${rEy2HZ<<*-VxmIi@wl`63hcGX$uRn^4xTp@1sT{A*)(oMp6*EGvzl8SoDa6 za@eZdO1d=48g%%@4eCmt4zoIi5YmjbeHpsT2876C`uTb8%FG{`;2n7Emmr|*+V3eK8HQ?+dM!?qxv$cqQ8pgOG zio2&-+0!)cAm>BnWg_rpvw7EE)~xIZz}tv(!6HAfp?><|QLqC@Gz%fvEAJX}<5C%8 zM@oM`u8HN`w<;rvzCQCz zi5DcvDt)VkAK5FtekQQ@{4}xrOz^FO$l|Ui-fS&UQ}6BH6K_87sOT{^!!C7aX@>&J zej;{WJMk<{?5TKSxe)%t=u2&0Pks2Cf?(pA@3a|x0ZSKl1(oJ{W+s@AQQU6!wjSEUSLs@RroI(8lr`q_j{E0ISYXMHeNus zXdiK(!rz#GC2vIW3W}T**oB`dNmhN8UF`@EYxFis_55E(fzW$$mh}cuelefakUDvd z+Iuw4?cWjnSKgerUJ&f<$Jg>enB4WiyntpfkYxNYpV#pcp~%?u1tYHS zUvJyS3N-tkf+I@YZ1g8<0SYjtP87(8|C-MH4e8APKia-%9SSYLFr@7dH%mZK5PYY* zfZ2WdE`14VJA!@GU|!$DV=0}CRqXpnv+O}G2)^4@ZoT-wlLj7l4eaM}DVZvyEc{YB zyzftk_unYQCBdwRz|{2;_3sLH1p-so1$FJkTgDLLlTLonb@E0YmsMXd;f_p1a1RH#A|edz*UsSPc?fM4P}j zq#ql#XlU1qNQ?{d)NT|cheBG*=odt2NBu9|M~m!^(twiqL2(JeH|w1OWF>q_DZhF6eiJ&06@Z^uo)s~}4UXFX=%EQRZN_io5&r8~ZEr5;<#>pGekWrK z!oPJI1F~A)G%HEp`S^cMsA9Oyv2UxGLRCAZm(P?Q(@8hV zee+-d0hxV5)H-K+wzXQ+ab_TKFK7e1-w*o_G(6X&^;q0TfoBl0tBA?_xjctalp8eZ zTA_cc8csz3G>Q0VrdfJ(Xv+sX|B;A5pVQG-M8eNlmeteq7+pn_Pyitzy}{lnzVIzJ$n9$V?!tXG zLIBc6Xv5QB#FsRQ2zl1V;xuUbNBeVfB$PAle|(R1n)Dy3fAvlWI6w>2e9~t;oP{Zk z299~g-fd-BcLX!~EW{#3f<8KAI?hLnYAD7B8F(=!L?HYWQzVZmH}{GBiZ|rvr96vI zn1)+a429&jz9RZ#A^|-Iu$Tz|sH}erf5TrHyBdg9=nU+`0I@>d9x#iNlEQ2?{NUJ{ zpw?N41hwT3sMU>sE1~v8cc`TqU?c;z7|L!O)Y7`3RwGQIR>RX^1htw(2h^IO?mwXR zns`q)sGYnHsGVFp)c(k`1GVpXEdFA|2KuF?p!%&(hjoTPxJ|BOQdlHb&fHhTL|iwf)Ek;mI-Cgj(db2d(W76A9sJ zY3+x;a!}iE5`u&kDD8G5w;|nU_&p*E_h@)w<3E^v7m?iqU>mc4=UM^K#M3tD3*WhR zvU%v)A)D{|$^l?YL|u*9YcOB^Wb>;+M1A*lK<&HN4z)l2>_F}Ga|voSnE!y<8*U!y zMr)tC4yb)_HQpI$rEe&E@G+FyDuL9GV!A5i-h zf!bfV4ygTwwL|SMK08o*?70NB8q8Ne)c!=VP{4uG#eMScZ43zCGqx80fGhxJNKCA) zr5{~g|F`(M#6M6zd`0bF3N^=}oqr(w2`SdsWXSKPrXOxo(}^2;&Y9(B`d2p<{X03= z%qh3hINz)!&h)4v_u5U5V}g1nM~r6t?%cQFz{JZLyll`p!OT(H?0XysA>ml|qfFkx zH{;1MAJrS9zVCbN!^-%i>@&1H@qn72aD}eQan)uS_>t~_X^yKltJF{S0L;E?HcP$r zxkNoRn6G~7`A3E!Z%apNJ~Ef>)0=lV7%f(~K{({o^xH>3wO8 z%fbV%lkX6x(M*#+zRZ%!LtdSFH#RF(22mOe65qe9YA4aaqDO-Ln#b>84= zhhMEix~`RewO{UzR5a7>SF7AezZ$K}uci^Eel-nGgAr0`5*>au`u}S220w>IlHa&S zk>tm^Q+~N5+C`F|>H(zAA<-_9oVuYR$%miwBFT4ULzr42OGN7A{^~ELP;Qu zNQEj&5`)b0vEV@@mP@$@_d)xVU0&gbis>Pmg{ZH1BB#(bYqC9w5ix1*f+8x|#lIXC z(>V~G{^A;79f=^sY`QS6qPe5@Ak71>oqEhN5z8 z{5n^{$Gh_fI6^TFu~aO4QK9=^%(Eo~t#C1(HkPZabORfKD-T;3B8|M3NMvGm?xTSs zQDyExjt=eIlh^qlVo&+Y(S%dnI?X}?+)V(J{8`q zj$hRTAmC*x(;OsXH6D-iL|P5*;(lMia9v=B8j5%4A9WFt_vG)V38HacN6Ulv+W8OO(&WaHDjEqkQE%o@_$KM==Q@7N<4Z`W96T?0M5Pgt{_+2_Y z$m9p?JR(ai(hAswA1**rbO6&6RO?!?doF@4UweD5Go;k^QK!NLJ@JXpzB!CTsD>SSIkP8eim!oc9l?v&%_4w6F}RIH>C~XMK|W{|>xu z%W=N}U4blBlK%Bc=&+e^4F~0?b%y}YdDS~^bLtCSJ#H^j)@9k)sClA4t$Ca~FYG=7 zZiyy;UcZ`-=jpQ+&gf{L^tNiOTQ`yy;%mII8O+>HiLM^YZx;|~w=U9^Eqge(Q7bo95@M0IvqTzF2uC&6qnQzp+zi~a!s;%U(!z(4Lxh&?{2)AZr z-8YA$uzQEJjbVNV3B)$CVP8nTgF5(Ji7O9A!}>H3_dRIaI-pmTxbGp3FjOAeCsn)} zA24vsbKPYNz9$8A2GgAkh$4YtkIinbxYNFE_R2%iCLKM4qw`}yO30)^HDK9-$8mefG7+K1Cxdi|1}f)&lo zQO7G18ffG;J52Er*6BFpUmU~j1tu^f&5qtOc2SDXy|8I$X2~@2U8ccNqt@WK?N0c| z>H_1O#(`=W9n3Tf`Q)UM@8=V2O=`^{ zm7Ae;H*&Zu$0P-vhrkXelpM)52W+Y)+-U}y-VARchH9SYF*S2c)CN7{6UKi`DJsQ3 z@rk!~oG~?s*4d`MAeS!``)V;a%g4)SRF0Ey_U)4U#IB>3Rj$DA~dc`8>jQ> zQnP02xQ(${e+qTP>ErcweY{Hc?h;}P_fs-U%>)f>g1w6B_a;NLVJ1VC&?!P|(pEA| z&2pK;TzXFAQJ$6L0I`*Zl~AOiJR|6LQu0#u8A*^Xz^!P+9!@ncCYl`g@s|MnoK-NvNynqw$_@8nENh(;@TY}|CWE+QssH9A^ z;wb+gdaOvBbuSQDJ(U*IMfwr5G||$5if*pbUOSZ8)opTk}s62>? zU_kamCuNyj5EVufX4Yeb^%*bG6E1###+ z;vymiXW8jd;4lxkT@Y3+QlYA=$YSYLTBN@<$!by60LHDs7hvPx%kspzXehpUZ`@C2 zaY>YpO#`j?7ORdW zfT9WtZ9*pyPC9*PAR6A+7}Pa9ZU61N=5#>jE?bBRgoT5Gl7qD40oDPqsX%K4S90_v zEf{PTcdmp?GTJ3&X0Sez7=<#8IwsbN2I**fe(zAR6a?!5?b89jr&ZpkDuUB>3D75v zrN1x;vU_vF)S63v{^Z8=4x6B$x~|KVl@vpa;`yYz{us>~WGVns^#P77hq5#$bkJ@2 zyHFXBep$4w!LRg3gWQnA2~w$Iz@S@is@WpCMg7?k8)ogzYn{}@w$q*VTSDB0HJs&8Vdi2+iPr+T9Bde%j*Y7qiA%_e*QNuSMfgw z?q_l$R^$I^J^o?;9;ACSstEa}fw^-`i_r$3=znqWXZ9JvfRPjE&>Z4qqF$*%ohe51 z*H>Pj=P5%-_~%YIM1*G~NDhqmiSPAWAyPK>i zlOJ2CkPncSKs>PEmi_T9->0+*;s~&E<0Xh#g+iegsCn?9n&4c2482OxgB*kFGql3d zC5IgtD_Gihl-jTZ1$JF_Ul7?C-Fk;AZZQ&6F-zKBw$_)?Re6>+=EtV++8_Lwxf!6DF- zkBS-$fnk+s=w+i0$zNeJT}U_tygv86Nbheo!g#^2ry-QAc4~EAt(uG>1IQT-&kNRP z{m&FS%Rs*fDsn2@L=}6CwV{pXGDzfNgoij9aPGU8jMO!^t z?kZL8KV$~yhv_V&ZkU?o zvh1onCzR^phKtp^B%9|FA=<85b4FUd)8YEWDtxdEDzE|Ln0xVTxy5mXoTyAoX8Kw3 z`(x`Zn+V>|i5w^per&xA)Z;8nxC|5GrlAsRKVkoNKFK%|2^R$o0_1q2Y!aPgL9@~Y zbu@)$rLi1O3$?^{v(AV=3%$FJ<9NRx2U2g>aX>(rlpIQ4^LjFKrBgI=06oxrngMEI z_LsW-BBYusD@(v*K^`e4`H#B^A|*}L6xE4N}Z-E&k)ct|w*1^ld(f{{;7o z|MuA+{!HuJ`5^w&*0(YM-u<9MTeHoh2Pj1qTiek8xZ$$}dQQ89CQNhyrdl zZ=eN<$MYj{nQQ_vD)O;cNT(T|t9n-qc%6D;_-RZo6)RQ^e@?WzvK3WMHAx~!qujhK zrAY3A%C_9YO*mD~`+NXu+Bze4>qKRMiRT^Z}Z#oK!TSbaCf2I8m zN_nR9lj>Y)JylA+Y@C;IzTKB?DT9*9ASvZAOIM^rVJV zlK0p!8E|uefog`QY_Q{0#b+VkHQm!J850A<6!VbE!|gRN(E{h(E;#3Q!D-NimIlrJ z26Ap=JCX)nQ5b~ZvM*N|9Z*|!_6(u|oQIFpaoH8t0`Z2Ok+a25g{2Z0v2)~8QPR&^ z_{X-;f^aSNT)Nob0h)4gqj|42je846TT6blW64!EyOL+Rtt7*+L9u=e08U2VT02`a zw}!gv&|<*HbSgY*%I_}F0(!RPW^8X2Awqr_Ij>1`hw~cHG#ekAPBE_ZpqQN4_66WU zHB+nrMil487EtB4IJ*VgP{Bu{>B*cSiM)b?Gc|O8WR|K)5d-cD*kbC-F1GoyL0=H3 zfIUNyg{;a*UIN*#aFM)2Dj^>db^*;Ps z{Lx=I5XQstaUS8@Z>hblnE0!A;@XwFIz9rB4zmRqqjT|LzN`}#t;ZuyAP1a7iBxl2 zgP+QyDFgZkg};!bLpfvG7KK1G$0oQ^9#R7mDFbX1cvA#|Cfh{9Rn$jC4Gu+QpQc+C z1+BTj5Y;&stpKptI0E}Z|EsrP8`y5gTl-_|!?qk|Z0Z6>KG>pPq(h6KK?jy%6$GHc z`)n4bE89@HL)az)SwNN4Tr8xf=G(D&n<3YZHglC8xJMEGb+rwGNuRR&fjw%{zAn>= zZ&x5yu>vhYuo}*D58r?7(rf4 z^5%)X#W*CuP>-dKP&Nop)tQD24vycBcotQUnDFEvC8uhLv+?Ah=e2eG&NTy%I`V;X z)^U3<^9qr9WW^h1==Jz)6}Gy-J3h}2^Z6`-Y`nnDrvJ-R{-}O8g`3Z7nW*d02BY{3 zLlm8XC{lq-~XmQJ$ZJ7 z!|f74sSRjw{YhO&D(r1G!oRWsys)uF{cmrK@gw-&2p2a-H*+a!<5ts<8cqE?-ncW` z9QW;Q+!fWg2A2!*H$E0W@1e#x((s52y43aq03*>B`PN3FJ7qj++|FW*0aQ)yS3aY_;GuGPoC%j1JoIw@dG=8Y5zQ?_X1ob8NE6#?Z;&3 zdVK$$@MkL8yW^$iZ9>B6HZ24uafbUVrN(Ux?RH?ZdAC@LL$1x?&&msD-=jUz=J0>4 zqXU2?x>t&80wk-0=7_2qXclPlDnOIcQg8oS0Lt=R3xJNa0caux-besV2dOzOw7^to zc*Kkg0{y$$i^kVD39ud?OU)1_0)hs_w?$uLk1ir0qDv|QigQT3XmA5f5E*eOOB@)D zzg`|tV~Ab14(8t4x=l#6&-T`xV(Y%}T;Do3$bZMJ1AoBv8K|L%aWmoxrET63Z45t9 z7R=-12(_VGgU^NW;GTHcVWM>6jYwo@RJ8DPw9&gYvA)y^2LztvimBs@q4_zz{L9Y~ zA^0z!IvC=Lfet@g+pig%X6yfEEn9yFg*#ZvR|I3-0KZb(F0v4Klti?u+?bM#xI+b{#~i*DZ+HTRjknUk_tH|Z7mRy5&FXc6`JvJEAb z4c=64ju=cCcj5LTiMulH;aV&s?$o7wm|S#gbW5~}EUWwWHAcy#e3RC1ps_)Za$^kY zB>{=1aIaeMo`hmbjp#6mCMHb^0w6!nK(pY4UW)r-)~%I-^vw6VOCNES@g&n16+Q5$B5BMCPWyz2u6?}wH!x@c_AwqHM zMw!z&62t8B4-IKNf0lSxX5)Ph!_ork@kOG{zb4gL7StqP=hcl9_0Ds|jv3eE^Yk># z9othw$%?k-O#BnpeA|k8JXbM2l#?taTeD;@5MXoBt*G)&Ct1%PkA{D%@F--yxr&rF z-S`!LW(dU_S%zj5!SQyK1mwy}vtSdl4Wk^orS4Xhiist~d7)fYq445R;>MC_uIt|mXRE|TfV1bTqT#Np!B5@0N%zTUh>HMz8$1Kn|6R`&eqG@aQvFy4a=@_Kr%+nKB+hU;ID~5k#QQ8y>s~ zxWk$Dg+risbQ3tFXMu~0a9ZhO8QEk<@S7Fp2{6PbDtp3N5xH3hl97&^2#wbSlCuVq zhR)armcWDXvqKs|e5r+W!ie9di*32(au!0V);SxaVwXrViPGk0M*IRZ(`l4kcyXjX2u-zF_eBUz`=^E+`R8SPd}vRz8LfjGHMd|`9EvuL?uobn z4g}A%tK01vi6boHxQs+!ZiYJOd7Gy|5ypLehlU)~lRNT)W;rexRhi;VJo8KtPdpPC zvNvx7-BEzC0hnFp^Hpe!<@^_Sf>V(yUwR#M9+oHvhlmy@74|gg__81aPO~hCm{3ew zfyXOGl|>@j64%IRo=2b;lNsF?PegpaZFHY8-nhD-H$~ov8}agX;$dDI5AELY_OzTh?dou(1QaW-i3pDnk8o^Mos3%-! z8ge!_WHfYXKfgt`_N!uLZK2z(zVY?H-eu4bk? zG2BL&j}1eZL1@Ub6)TCA;FU7~fkzFSjD#`q(2KMmV`HE}R?JXUjBOSmbWDbkHkP2&} z^~dpAMdAw}1!lSy2p9}`22muktUv;I?;-4XLOCybHhqc+C_#(ZUvW(r!|%Y*k8W^9f} zvU4;K19N0e%i+%eF6xq05jVRpj0E>Y!F`$G3vz?L6vTR~`cv{Fb>nj+9Y13SJ04rX zj$wS-w!>mQu;Up$$Bb(o(MQM2ezwdiNhvrCBQkb;6sC}aEpV;b+8NGKU5q%OJ5R+j zG1bC~Z&H+AURHmT)@5$Qh%0e`sFIul-+=@?rdVTN*Yf5#2I1L+7w(90twZ`&gG}($b~sLSlL>zuN8iXj3?Hw` zg{u*Y&uEu!jyA*tF*E@dgN_S}Nh=uZI<7`_*I#p!SOz$HRL!@sj1ZO~7?Cmj*g%9u zJ!a3tB?6+%v12{bZpu3PSjL2P!23?B54b89HLC6j>Wah~2Q;1$!4*PS!e%J{X%!Zs z79XC~7x>Mo0bW7f0YDl!MFVgJB-KHuC&qtW!OrmFKxi4fIQUy}C*+&I5x3gBBUscE zI(xz1<6F>ou=u>5v_7vVjNqJ}QkxXXbdfLdk`WH+Lc|Z_sDT|hZRk>^c@q&Twzp!2 z+GCM#zsEE_sc!|_V~JMo8||>+FgN|g9z#)Q+!yUJwTv}TM67{`lZpg5QGY08iI}&{ z{^i+l{nw@=r~fuL5|&vU$;v0Fti3v}rfJL=hm-efY$ZHJH&d{AqKIN8?4$%mi7BeW z!k?_#k0<%jxFN(2?B7TLR)2V2v}g))Fu<5HD!~~**C@tDlnChqiu)C#S-=_K7|on% z0I(Y)f1U)Lt>OBi->tzJlm82*I%xj0RUK-q$G^p9UKC<5nMKgmp&%sRWYWd|pS|}1 zvg4}nJ-hqp{tqwW;$q59x4k=sM#}_7DL5*+XqGuI7qF_4D_cQ?yyiVVfXjv6F-GuTafMRHvRUXE3x&7x!KgGc_VLv;8-<&)YdRg_O^z+ zhVhiiC@^EH&qT4QfNc!}c$tO)yiCK8vg9=kv)MTFH`g#8&$}ie_^6aN6u9x|pedQa z*2_sJ@gWGYmO~7}NzOvUpaZ61I0Z|M;1Vx13^Xi{_(yE~PQ&VuzS6OzevU^^Si@e! zp#N%5+CT%r+M!|G*P)L(4P#3irX3oFlk=psmm1{7ylEIt=~74A8V1^y@8*-%&0M`I zlmuyDQg8{U>+B}NA^^Us3{itJG?ak)vnfsMOHziN?jjA_-91{|odnN7PBb# z7-tkrhMd~L*V??{eo!g-5o+!SZKc&@o_pxpWV0@ zB*|^4R2RPLHg+%rKUQ;K09wz{s!dh?Oa0ts3$G!QB{dAqOv=Ur&6{k4b98yTaSQ*B zJO`_m!so)}9Y1zThc^Q|6Nk4g;tMEws8tCMu>K_%_Y?n~Z-Gz7g&BkWQB)G1`gjH` zb(6LH-3+x@;vJ7jLcX)XqxfIXWVy>28Li62*AYeW>@RuIz2|D8XDv_Hz5%Tfe6}g=mr&X-U(oXkI9(^7H%fb!#m=u@(BO}M^}Lal zh-!MiLXIsx*d^H)k55^8u%B=^wabh(&Blr<(WT^KM&yTDP_P{jN(>fY`)kRN1rtSs z*W&wo$oYz`ySuWON4D!io_Ymxze^jDsw={wG(_1zSCfLqyeqK;PM|@yHd7f%7jVoe z_`7Cls%j+aaFr2Mhr?Z`Xj6KueX5z|%xvql*lAY=%AKa*e_2I@Ur+FH)FgQ$Un%Sy2wUEa zO3c=V>QchZNda7JNMe_a<#J*J;7&fYIpL(!DGkNGR-0N*Gq5%soLV+>ZQw8_c72=* zIK04lAb{q;3ERMt^65?;R_Yly1|G>#l#olTXJ-;V#bErmr25e|3&7>ZvibC5(X_~p zVC$E;lw3whBKVnNm&S6H z?NowTw3^XqMxp&B^-l>l)h9YB!#>*3Hr>L#FC2T&1%|ZjsRwMPAZ8d&NxzS7>dA-)FlWMzT+&zev6z3rz-Iy zO|rFW1yd2&s@d`w7DsABEfgY7v?{ch6@Nb=HNloY_bEGcRBR>J^GmWY&X4BWXyq-f0(@jdveTja{p(e-4IY|mV& zjW5aq$yZ{pnmSqr-xRn{bHH2o%iSO(f%(F;XLaM4b`n!uPNNT2wy=3c_7MgV*Kg#j;)Y9bO}m}MVp zcT6oMVu3WnB4<7Rs~#3$bh9l6lL;gh{AwEaGCl!G$GJ4Mz(92SQ*K{~R;8ylvdplO zk`Qi9+tg}8gU?fiHm~$puX^&HsMXb?w_35+5K*es8kAnwU0O!HN-hu9Dkj}vbF~t# zt#|#o4(_SAFUqh$ro5{0Cw=TxO<9ZVX+lb`hAO|YbGsFbR8|DErurMit!vskC8w9D zP7sS(@8G+e*kFNYyM^Du0CzJlFSsIjfF?B$w{Vfxa9Om07ked`vg5Q}lMvKl)KsB8 zi3SGSrlz(9hP}um*nK`>bf1sdUdUf2??4|XN{Gl6{ll~hfY@%eY(FKU>>iC6CSH!; z#zV405X@vvSa=WxRV{3>78*RT0Z*lV)=P@8x@k{}Gy!8zksq zbFVOhydoih#u*@CSNcH;3GoXs_v7;Uy)PFova+=TFyeRBG!il_IEEPO`%gzX@OwYKnK2xzFfrGb$@a<5$uazPqs?|!+QOD=A zzw4`rVA`KlDkU%&IeQI3sP67s?#P&N7ih6O2$&guvw{8I&fk%_7u)p&Q4~fqzTtP* zH_VAPhXO7k@>?0lif|OPXE+z2{Y7Oxk{x?zU8)Hwq6_fbDe6;wJ5Rc zRVJVLn{$7qGO01zl;#9yUPZK_TGE7#J3VP7;yRbNjdRkIXr|DUOzmOL6&al?L&EV6 z&Bv({bE|=x9~08m=L-7wJ!jCr@413LeEyuF|12w`?|k7Yqwjp4l+k~FZn(_bpEEpv zyT^0%$8N3u+_;Qs^|pbOBL&Wep*%-`e#dhL=y$w80sUnJv2+i4IBGI&Q6*a1Iman% zQk>B*Ok+ntnK@_LO#HJ3(~9>E`XouS-pniA#)CuvgO$$1MLf>%?jDT_P8ea5*j{6f2vg#6`)a#MwCZ(OiO zkG3MKc#EZ36}0hG^fMBQtoGA^YsLe-Y5=dwNJ4W)Js5n$RA z7zb0c91-U0pcH#TfL>C5su2{=!JeRn#!McM35ENCAvETEmv(NC%#23-osc#vwrAQmzaz*L>}`OM2-z$YK61^BO*;bav>-D4ShF zFy{&yrn<9?%Y|12H@jabp~@DHdMoaq$N^!;h42^rDMN*_y{U3T8d;UO9mFw?5Q140 zx5WR*aWArH+}xDC2lU7FsOGa!FdR2MuJ23%T@o|#{u&E;s|&n1?A*T$9n?JQ=I!5* z_GG`>@{Y1`Ba-p_>v!e01O7UcNC}!|PMi!A96|)#s21vY9}kuX+Ll)lZ_y-xge z8J0}MN};~lGd?ZT^_0{gD?(&vI&jF+#?GWnuLF3YQT6S!898^q@g zsmQ^uv@(@!3tZ|!NgT$ngKm1;em&(E5s$-FUJ(TOOJby?SDroE#h2|8GO1MByAzyp z37LA^p1s0E)-aZW{&=DQka!t;acoh7^C>P@T9%@RuCw$@PBC?nzLL%8!9^MxDPia+ z+fzcEdva>hHg$>_6VYpoOYTRbYbZuDO7+~iq)b)k?W?xEeGrAL4>W&{0RE7V*m1IR8u(Hc!4eRJ2$I#RuNo(qTW2q4PCt&P9|3pbldi4m;HEJS+I&OA{92sNBy(gcl z+}p(=M?h2D%@Bv~B^F+5zwy*oU@#tUN^&|djVdV(!QG9`b<~kMY^3Y*LkVKjCUpOA zr^DcYDajcCERa;kKea6`+BnJ&BQRd84$gqf-Fe2@@UvT{qCi*-Gi2rw>NNL?DwaEq(` z*&dWCa1E&yi6qC>P6lS~jl6&C%?2TNM?WuVFyASYuS@swgJwMnNEpR+{ zkJsjEQY-{A=!v+)f98}eX-U1Eu2gAB@*CM^uyShw+aB@o*QNQnoI7MZUB|`|5y^-y}&^L>PbgQyk-KW=6JGCY5CXG(a)s1ojn;LUiy>77S49E9*FxI~*x(!RaC zh0o_rEW=@D)Oj8sGk8^xt1M~AzuSy|TyAl+9QE-We=CzZlq}wPD?na+)&IgFJHzw> zo~~-r`L9UouOJ+fuZM|+{#|hEwc5<^$G6Ej<$CS_OkGLyTAtRFj%%CkR#4qAd1jDM zR_gB!w65CS0X1KD&HHB4`PTnG7OL2%b$zN6$mY8aGW^@519i3UBI!kAd}IKV7JZ9& zg{}N8;*I9gRD^||F4=cIiO^vaAEL@;>>8b^0jr!KQKt1)NZP7r(+|EYZZ&{{o7Mn) zs81rc`hjM;cqiB`)Mg4I00J zaqZ2}L&FosNfdw>lx?Qsj%>otl93?S@)?0% zJVsOOXSuv|BPG$73G{|b8SGk%9b*VmZHN#Fus5j*J_v{n4@BmJ-R43Pae{^*uQQ6u zngEBgB*tr%qdN_@?B9rmx^;stwK8jcjNiap7kGJ=D2W}&V5~v6Kv*mV+`WaWdo_6u zk!aBtj0Bh-+Yu-%N3){JkOKf%?(c+uGe=fUAd`ZyhV3x`Eh`fGz~IGNUX_+d*!J%+ zS@WX#(GGbvtPSz*OPLV@IqXk8gPSGgk+f9I9{q>Htb}T?V-m$TS|ik;MJ`is)wmQlP3xmPWZuLA?Lt;qdy$No;jGIznuoMDzS1it zxOSY1A4mtWC94W4j5<&IsyD&J@gEq>$G|j@X2jm^BS#!|~sZHvgztVaWx&6R%PHM_TyiNJ3$X`lC6FKRVEY z*qqo-qUP~RRCh-B5u!1$MZhzC01`*SUbK@d0&&0eioh~wTBOFMirvv!CyW%!WdpAJ zO77&xg)aM9DlE2vw5}`Nsfd=O&eXxxZ1^@EcD=I;m`r|;fg|hePD|@!JW*mx!7Bcc zbl8Zlp~H@MCTj()H8&-hBpBkR2D?Dv0Y6a1{x;C zlGr-Pi2OSSegmbt2Tr5vfSDz|H0Oo~Drm5z1I1PWp?n%uLXn_xug$MCxbq(F;wl9$cuO}`GujbMKB90at=o`Uzc+i(JaQ!Q!F=^xKjD)xeTN)q ze>XnYcVpr#l7iM70F94D(Pg!GKV(eWHD)`TVpUijqCJq7(6;t(=224v{_#O8bn?KE z+?=o!_W(32X+`$be|1LFXDa!T(Vn~8kl7g;5jQH43*vk>SxMv%2*?Wv)C1_+<2%$IuplXV6Q+i zlT-kr9x7UwM^TdN4fmz-W>Nnt3N2&L9v6|8gP?^p2+f8P@iiAh5J(YXkwv(XXQh=T zYMCiOJ1IkMS9gQSDTKe+W!lS=!d78+jht5l5k<9XOl5%z&)lnEq%y7K$>ad_wDuYv z$ex9n8!&qekjmJ~Fz{K7zCP`@uoASmYp)1!mhi@siO<^!$7F^4<}6D33Ajz>IS*EO zeuJS{G@jQ*N=OLvB3De~sm1>Tol2>k>v&P1lk1sV>z-0O+ijW1s?nR}j%6m*jG5hn zCqO`Fl+#pS$U!S&mcjOsu|K^$6lQD}}(&V@Cf$B7ijSNm?D*ManMIbsWn%luRcERb|0EG%dd{-`YShWBVnq9~=gk>iz77f1+gi;Iw_q3<*ulMHYCP7+mN6y-qS|ztwRDxAh2ZA zuv){UDgB8UNR2dML=`nV*`#?td>(q@q=-qK=KI5_;+QyYVX3a-J*kDxL)F^)0VN{V zu@EPd(x18of>)2g2!m`C$zYBVNDtu!@Z=ewaR)vq81+yBOInCVT7r>i0;#z^`AP>0 z=fT1BtTuV;1(j5ca(rT-#mbO09QsvhK6uVuwdq#d45RXDfCZZuLj_!6G36yGVoU(N zsZXFbXuhnIRa4y6MK>b#7hxf_+4c8dyZg&?z#0F4Bf7Xol4#QX4!Jbjd3H{=*u|7k9xeou01vA%%0iaab<2`sfj1V)*P$7j`?*jB1BUI4 zP}y44zCoIQZb=~rf7vgHZ@j5v*5k|C1pw~rLEMVq-*WO|B`wp z?L9RYSOaX9vSze)C5!v|MftL`CKx>Jwvwp?DSgGC-`i0c8#;S0#5Qv9ztOC%zI z=Vbiz0bWFsMy8aZH1nGiQmc^OZg#IZC#9+s9XcdKTZ7?C8KzdnK}e{H?U?ZYdxbHE zuXkf>`p!0Yrnu|PI%aVoS`&Fvu=fdX8`?cpaV}HlOdVos%Pozt2t zt1zN6hLAH3W0+Q#a9m1Hn)728NHha1oT?58NWUNHq2!Q^bMolN1_}T>yg@os;dPH>v@cKHTdHzf>He}$_(*KzUBD6 z1L`x?`d3BoaWV8-@9DY$jlC2LH2(&)JbdB&XM4lrUU~XqIxnCNk9(h|81p~uHt|GB zOkI2r>G@`_bUEjLvC{SVu%I3D{fk(jVF+I5s^d|}BS7$zz<%Ha#hxm>{&Dtqf%6ow z#|FHe5S^Dq{aV$B=jmQ@6toMPOcp`7AtyO14%NPhUTI5FP$aEZ<)%WE%7;@`c5Apj zU7?*J947uGHWk2e<+vAq2^@8vp*%J}YB<@thkc(-bWBsiHid{y{Cu@5f-#<$>y?~e zL!ucLmtyy7yKF?zoelDw?rDp1glB;Mx!X6X!G59oehsXdJ;s{txT;ubE?>7(BVHHqOK@uSIX}l@?VhXhCaC9IYplS=QRM;xHHwSPe`_|;Y>?IW? zbM{>k<4( z4p^ih;hUW z*@0SWW;oci*b;x3qrF#zH@8>B>N?K}*b~&2jY-LAS}F#D0kqAZ)r%>>3gE`0TS$Q~ zXfSY4Ed{v%Mz5QSL_xT=mWCTW{-{Bnadn-^4#B!FAWyX?mpm#h(j7epzhH=8 zkYNk^wF~>dwEEmtbilZVfSGhFiZsCz)F7y)%!-bkCQ-P?2-OL$9~Sa zu2uW56u>fw-ezAxc0QVo+~6I)cJr%qVm;!qwjq0LYX4NE4Wlj!v8GIlE5aaKC0#BO zuPJ4p4mf!`3h9~s{C@G1cT29}&mUbw{F6W+nAqX1zauE2-rFa0kHN@04my;5bdz@7 z;_)ZUp{dKGnhb5`l@*Gw`kr|m5rW0EyBRMF5Q!GgF!VM2&`q&Ttkn^3UTCaEDbU%U zD<;G9c)w8Z2#qvt!!A=z>p(dEb56`&5grZOUetOor^E+-Sy8Jz(j_T-1YcjEF)6Bw znNi3}tM10c(2Y=NP7NPW!z{4wHx&f-D0;`^GEFZuAfi~O%D3M<{AE}VCH-n@0icZ&}AuF$EDu8i{$PDntpJ;DXqw7tE3VmhPnO1Fjp zYk);)t)BL$XrHO-!#CLDqd-e5GL=tLs&VHQ4yQziGK0YjtIHkvC37b{M+ET8v znn$SVC`mlW-1U}u(%eawF;otCw|w7q#Hr*P(`LH@CgI-3mq>%JX`^FCvh=l?3>rjj zCIm~HB^QUzh_(o#h7Fq|oPAxBdeULriH=ES^YdlV2|uav$E0hO!CxkoVf#tFzN1x5 zfS*T!zH=Te*hGCx&ua&poF@|!={i5K?L1u*{gj%WJ<--Z-&LDv>mD7Ydwyz-$1>H5 zeS;!9;4P;%zl-?*0)D1SF6g|}4m!s-DfPf2QQS%27BYdN!Mu5>%OuH8w&R6^>`5Ol z45fkPy*BFJWRPX2;ydmTf^vt8-ONXD&5M690JC!m2Lcu3Tp9_JY>({|ztbsaEc};H zXK=RJ6yxU6@N7&`ybncPfp#mxL1P7!`ol2c?U1e2l*A8~@hxk`<6L0v(D@ke$0nMo zyc=e{2|ilng1rboOD^)@^QGKtcSNzA^Z||!S5SZ)Q3Vj8Aep&j4dh+vw_gRLR8mh{yY zOn#iL7N#g+ZR^u;Dd{SV1v!og^@9?CoCmOkV&l9b>MFPB*Eu(A6o04yj{ZEI@>h}TW$!$EuyF}zBJ921q)@c{XpG9co0PL zliB%az$Pij0tK^wZuw_Ed}?#dH$!X}^k-n7m4C*Hq_v%5rCF3g2HpID>2BwrL1hww zgQriQgXY8DB*R_Oy;~JevZfx7h)7A!1&{g0xz8{M4RX~^g_KPM(i`%WoTJZ2+|SDn zKXgx=u`7;CS0w)5e~w7}3$hP$iI0!Exc--AJO6>>nF-|oY%}ar$j=chnSPPJ5|zcU z4<-sHR{Y=s2%}&0rtouOBK+jX5*0(Zb9L~Owy}xi6j9rXb4@IXogrgsHBH~=PzhxZ=cur_=-^I1gim zM~fy}3#C=}TM+bc8zc%bo&}0q=oH}B_{l;;P<Z_sjYJjPU0hp5@$UNq! zH4h!P4T!i4A;j+H07RGrqOLBvG%=tEML}*DsBD!k41yav3__CX_%#jcs{N94GSvE| zr{XM+vN3bi^=AhouXpulhq*nBMM}`PUH21nq*bCvb%h9V4;($eFRdO{u*nqWFRd$OgY`AyU_=xk>3bL+;G z+E)M2gYy2MF7=!0rT!p!!K3-v=RI2L=LbR&S=&vI=Z#_l8|?{R+o6=ydf40S8Z5XmB}YZ}wPn9TX@=PHt7kvQRi< zIxkuOC-4zKt&@=Opm1vt3O_GLs(v*2uKp|Ro!CUpB_YE3P&A$Ee8;Z_KAg{TLjlaS zyl}n+2g`XmEEO%^pFKe}T5`FQ$MWUQ0nZzmTmkclDR4iSj7I&Y*1pMf7*6iFLi!?k zBluyN*DjTdYB=HIwobLili_S%?*u1nZTnWSDQ!60$`8{L_Vhvu!`U9Zb;FVg4VKD; zD-nk&G}6R0N~7h(3x;8UlZ=6@g2!+&ytK3j1!sukt`5M^^XJ*a{O}EQRG*HZoqb{v zy^*F$y~A?Yjl>@d!RWD~g_Y_Iz0PaPThU-vOTl=ck2|zGEJ^i3Ubg&}03qpMddl|g zT%B{pz{_0Iv)=2QjTB@oyFv!z^u5<=SlaCc=GflV;f$S*tqaBM>n%pm8uN~pF-Blg z7#TAJX>+ z+Yv4jc}tW3R!+1=#P`5k9ED3@5#vt57%7C) zu#ZCBhhWMcE*V1ccS-W1_?IA$Cir385eM}8054wTT|(c}WI z7xEbEA`}e_XXEg@csk-vj%-I4i$xbrI;DTf5~G?UjC*k*d8~6=#Rcfx7OuvkHsv?l z^oBt2?YcVIvsv$^vAh;@U9GT@mnJKdRs7h@!{KauS^Q~)dA;!V_>+s0;T7RDd)R~l z=Q0Z1O2gR>Yk@|88^mwz7jCHfUFCF%r-9|c{p>!J59=)3TH+wa=i?LUaxMUP35Rt( zvdGVd!EnyWSR$A&PnHyuU=M=CPXcwZ>RD6?7OjOw;(srlS;aQNR=1iku2)nnlpLiR zF`ZHUV8f*`6*=9QxKq5=^R=fhp*;Xa1y(*Cu|`G&hoT|fUTxgN)|BBJh{-YN8Toi| zl1N(ehlvTA2&Oj6NJnIw3nqI_naODpYVIIMD&HHWbY+jQ`{B-#%Zp-O&s*Xd$8upj zOanScpAqWmyBFm#>8c3U-N`DtD0ev&OBqq_AzdMkgZf64i}K}e9@iU3x#(t&avw$d zJ!6!M9y^>p0r#IluUkAb*(@F+>zZSbdAbh?R$AVoKgZUYZbjoW9$ zXso-C%^%P(WV0+8ULL%=bS9*;ObGo4$+CiU?13j8s}|B}S9|89WAzy6Slw!(O**Pr znEULL4xpgV>0%=t1G7kHnTbOq9g(Xe9fz<&u?~b4?=&{7Ii1$@L0}j^KW&Kr?I@*2 zj%Oz$M#S+9#=EFkl(O9#P&3u|-Mwnz0b0-^kAZ6!KSYZn=;R{l$QPMGb(S30+HYW> z7qvALsCc5a#JGiyKAZh7d5Ki2@++JoNN$Ndb*Cr>f_Q{o3)nrc=MAZ;iFOd|6Cc<12s>AMms`HXxX<{F4J)1Y#A5}tF7?omTp52#JAn)wkr&xfx5$o!h1OnFpu9Z`oY5#@=evD?^-C!f6hB~{|EYdjQknVL z-~@a%FfC~WW;c+GT+>&DktNH608cy2flnN+pel1#Idl1V)%HQb+o;DNb(Rxn`BUe6 z8ahm^tQiT$Gx(}atom&D;{yMVsN7NO8wbI2|AY-KSFeuZJv^==$L;a=?SLfItHn~U z)NQQ~ir>cKweHYvJu(%TsXxmBS+3goxwhoE;geNu-OufG$8GPc(E@uMr@2ZpY}aqG z;az$F7#`5oap7%xw};3)C{@~0=sa0MUSTNIm2KngaP~k=FNsgYQ1@!no*KK>O>IK6 zKgVI=BSgqrOEcA{eKb?3Pamq~>eIQmDcq16#nF+H2e@2BS8lpY)SR}7rf7sE=7K=% zL6G=aqE2K&QMLH1SEqij28;-;4K-!5A#cF3HqbX^Hcd=4IyEe_d9qFoYxZkn`yDFO z93?;(5ZBl5#6;c1MBOl2a>82DD>fq)aw1{)tpuG5PC4TAsu@h_QKA+_BS7lW>NCKU zSiC(;!m=H^y&~T86nkTo>bQh#R$+GA+J%GmD4j~1?5(_ppvK|sKI*lIp$0iIQ3|*B zr6bZb#-nY0`Vt>zU&U3{M8T03O}dk9y5h~gkEKgm2TZ!}D0Sm@&%3*@EUyvlP9BJWt$7!JOd+#y@28n!s%~qnB z7)$UQ1lO`m=SDJ|O|j=;#JMfwPn*w)5$Oa18aIZvwMTo(VaD6~>?!UD%xW9L2rGp& zWMjr@v6wcrJ~S*BuMH!wK_l`yvlekWemRGtO|yD3=KHX(*A%$UqV)Kzc#|n5k^;`5 zonGqtHV0o+7rXT1PQI8kwzKL(E-;XNTJg~yFQlKLvCQOCP53Pe_5+C^4(s(!4oKn$LbMGFaOot>-AgR2CZRc~+h+V{$w;3yswl+4bsb!ibz_ z!>g!ghyC71z^I%p|FEp8hp}c}6n~O!W8#29d!y|7?!qkJUn?}={Ty3(&SrVTC>d9v znm=U5)IsaS{(h0=p;~(BPKMh`5c&D2q|tQ4Yftdo1L#S;+RLIGHZ)ZYXHPPI(&EDE z_tno?$u$*VhDN5@-s>_x!(|_-cCnyN6aB1MkFh^%Hm8<{O}lPe3F0(I#7e^Xog|_6cJ>?wX;L!_E=y&gVe%EK&<7# zZ|gJc<2gZPR_fp>?BZNGYggO06FThTQ&-%!ht)*e9-gI~!DiZ=<$1S-S#B^}c(QE^ z=k6^)J5x%+A;%yy*7Nd64s#|#TI0DRQ5??knkO4n@FZG%zRnjDlU}?%(J0(=C#;Nf z&rw1SK31F;#c%F!r+Pk1l(EF#cp%XzX~gjL!wSLr27Ox*KGm?@8RuBH?=P!+4w~11oLwWhF3r`PO_8YlMM+=$4Xr&E z%#hyQXM&71X88VP(O!XNugVtduzeA`H?-(9fEwvh18<3QlMtt>*~XzwgaoRRrE4T+ zfEqjqcpW5cB~NO^sO&`fiLCH0kQ3aBrGbKWJ;Lyf*Xin6n;{D0(MoOG=QyJJWw;#W75YJTCys8n zKzouX+_PtBmSA17ae46TCc3lS_11ElAE{+^KSi^VTe%fTr^tWk+>V`;Qk-9Pme_24 zL3_`!89e?%oVr@iVF|hz;@Po$`@DyM_a-_TV0zF&8~!h6HT=JfdmbOr(~X`GGq$J^ zX_l|^+k+tS^_w!Ll?)Uww+7zPf?L%%rxq}JUYz&$d}~Z$9$LaaqiRjI%-wf0r|34S z=VnkeIUfsBS77x}#L-_EcbxxK+Q5$0ExpMl+$nlywX;xlyJp0Z@21{~_`~dp&~9@c zlV~zIe?se3Ym@WALLOZW5-JRZSPHM2T0w|WNaH@y8uwZuHV2rZ!aH%CLX9fDIb7J&wb=Yt+Y$ zxIBm30L??j#CP@D<^-F8vs{>y3}z=!Z%dPf7IhZY$a(^dnpCiUF$BaMA^|3mfaoj} z%pOjC6dA=$pw>9U>0)*--N~~tUMH=^2+hTkl2W|*ya=3mV#5egsC3Ue$bLyo}*QAB)nBu!9Z|X#l(DZn;tKwVO`%DdLj|Mf8-6?NWibjE^ zf$osE`wQs}sc-Y)$}_So&%VkE4Jo8aAprX;H27VR{88N;^euC{zcLX&h8?;hJaq02 z%l|bV9aIyCX~IXEEsp=jP~?fzHn6y0Ws3_|X3qeQutRmv)X3XWfJ?*~R=QBZ)=Kr_ z@d_Bu^)O&Sd6Uv+_<4kmc`?6|=)6vK?MNjR5qKOPVc}6xbH8u&+{Nl`pI6(yS3zx4 z&yQ9n(u-k1E^^y+udR~eQpz^xH`}?%gBW4j`sH-9Eh;h1vi8MX(ai#4=aw*ff~5&d z<@p$7-&V|~tEnPI&v76Y9xDnVJg(YoT?`aoVjIa5YNp#njV^9okc_lOeXs+_=A~MK zoH#!hVjZ!!MyaO)Y*JuAQec2R^O3shik!l`1Q$g84qbCmk9WO#ItjmnwahLFKPCyE zbV>Lz6G1a2JYHl)p(ZA(#6Be+6C*7X^ND@MpuBhiD7ZCEDhP2vE!|B4CnOyTxded7K@w+Bsu-JjlM$&3ijI{(a}pex<*$)24!)G3Iz@ zvP8UNTi(7g#+==6KP~%sI(Bb7#L3}p73tFwaWG}P^pk$%nbz!m$cYJ2rlmC^nMoNR z=Jo)}ND4m89dhWxRQAe%VxusB_wqND#{dSXq3*-~wjZrsZ7c40Kko|zl>PTGIpUVD zr5|EvM6R?s90=yw+Y*Fz!Ojj)gJ)*|v5TD@Abz&Z1%6*PW?`ar*qGSp^5C;&dS@n@ z6Z4M(h|c&z{AMRKZt=6@`OOrw+2&^_dF=VwlPa-^X{K(+&(wL(&z@vR-TZ7*<=i8I z%@49U$E>bM;4O&xB9!G`{HK3Dq3HHA~5YW zd3zAJNMNgy_0ytCm@rnyg!WCGK`^jYWrl)PuU5=UXR}P*ehGCX=OxrVSA1gv!-OHI z={aAkOO$|SR07ON7_LRxqgaWK+Ri_*ONc}D`M6(bj(}faFFvoDQkK>1gRx?XzX) zkkBYX=}j%@3WAya+OWKXqUI>UR22kgtIcauX(FRWS@d>@2@)Y0NvKWlW=JlPgjp;> zvp88IVUM*g0vpTeWV%nAtMBd!E84C*jfm@GlNWNQ%&ro)f^rv-8pu+pu==l^u{_C_ z_w{;ITXJ~Dp0++EDHKIZGs1oISxQH;YL80#xk8p7>!UeFiR5ZlnI$HMbc?)h|A{F)hWvBxQk33|;1gN1kpD z)OBirv@6gGiG6CRQL{U*Wnv=(g7QZwd$Phc z&Spe;8_YjR^IaQr02)gsrcJ}6H#1U?NN*Bqy~z@V7bx+2pvhIA(&j+A-dX*7+SR{X z)xZ0!t55otmbTg^)2OS3KX12iuUfeG>{}q6lHqv=c>ebSI)6glUx&=Xv!fw;zt(Lq zO~MsH@Q3gLDx9_q4}TFwq04_ZP1XRHFodl!pb0Tj4k-;W$;ozQR-f#z1m%$Y9?H42 zW)kYKl3xxb;?|SM2sg`}b~c)lEx;;}btR>EM+}y-ZP5e?RDxDnI95{fioAlaP{F0- z54uXiF5W~5#6!uH0^bC>08N|-&%6zYx|C@@w69^BG$xlKp^G+}u$Qtni(L-LNw1`{XGiNF_K`{uDVV5p~(4yE)F~YbCe<2~+b?&L_&V z!cYOn`)E6@<+CWq@8*EFrKKdTT6mb-y3kb(854CBB3s)n1&dBa2rb8L{2tCdS`n@^ z*CKK*#%bw z8>x<2yP8Y&Wg4cE`12PZx8_}e33Oo#vtfO#@*KAN zOD>l)!OjL-{J}b9CbT`xJ=)A{$1Si>$QiEAtNyua%Z|ms!_Ffz5WMeqcgI;X{x}%w z&eTzkEFXDGKdOjML5uG59daT53u@JAI$9y{t1l|IJQ(KzgI1|AR=@W3_-!Os&ZZRD z>O>ftj^bZS9FV3#-Z1?9N@;o{Pxy61Qk+109k*m@xyilXz=g^=xa%?rNSab^WbHDq zqA8M;hN~Tx7az#a3Z?CG{27vv#23=pe1gfCa~V6B?Y!G9T2!LK&AL+i^=Y{K^3oK) zOcqo#2`@XpI)01>R)id9S)y8NN>y9iR=qJEjZxLJC!b{m$W~X*X?Vi_et@eGf}PXW z-gUTu{OwjAPpjB*Fr;e730K8igd;Tk0S|$S{dUMsXfBnlNYc3~s7R@otlU&E6 z;~XJEDQU>njKog-RU)U)({~~*qqN6JsaV*WW30C7##qU+%c-F81C(e%i7r{ZiRz6h$XX8TK``v1$NqSA>;o}EHO|Aw-o z9G^FUkX)`e{Yr`mRgp)5<5R`YU?OBrkV?&SQ1S+8X+;S!7E&(I#D8{8a1cTn9hy}{ zM3=a=Ke3#MAlQJ!*=OxE8&;IG3`0q_?PM>oMTntulz{9qE~V^e!Q>Tz&QgN!66B#IoUC7w$D)1KJ;l^1`&Q|c!4^8IJB}9r zC~0vdN4l9)1${_~c1aCosY{1kljK&H7*RX$^}L)r+&g{i}N|Ezrvg z2-zG+V}>`zZvckM1?1tuOc3SEDN1d4DA^U&ZFJw0PfXyjk;HB!yUO`_MLKL)X8=HWHpRWBml)vz zASD)c+s^Xi%q%)j!o(GK2mgSXWNw1SwOIS!USg@?QR8mcR^WCjG5%_{3-*!=kV^dJ zA!ZYj++{kT1c$G5naPb74={XM4L(FrR3)qJ?6gL!hChZpvchf{sP%gl1wfRlWHZUk z{26Iiragw1Jrd`rN5*DXUj{f~IQHGue4>ORRi%sQ_;TIMFTHR0zp0*O;8r6DC@5>-*pE{5LO^zFY7M0T7!~#N2Qo*DH^LaCUBiLMmuAy_?NkMBp zy`DP<^c(pAS-@XRLjFRsoq5%mCYcTdUKWO}I7Hfl^oMi&doi zqq=Gw=FH5g;s{SKjShu)daBt2CXwakgglDo_C#&+m~rsqr6e`wEQVy>CgYQcS~FgZ z=?`Z={q&SE{e9*9FxsgZd=s>A%8Dl%80Xu0=DAbBCUerL#lIQgt-}@gonvlq&_V7w zuCL@N_~}sGL<-)nta3Z9uM8i{NIWbyrkvE>63%dOP0EC)&B_MXlw-yTdm`em45=t5m2lc2?~}%>tTQv~ zqJo6X-(=c0=Uvjcea>Q7=TJ*MKBT<*+yKgn_~dylAye6gE_zt+iFv&ap3lh|4(;(x zOc?lG$9S&@;LVnk+EKc+Xp&OJ@2Mq$=;tPnNKh9NcaK+pI9R~3S|_$BDa`>)FsrH( zzg^h`nbmq5Q!SOGbp_oB()3aw=2O%J<`>|xy*Lo4a>I*23w{|llbyeYEiR<42eI12`RRI3fRyGEdxnE zhEW3?uMFP%jyD)zu%FAb4|o1tk$v%C$J@)YZ$9Xlh|fnEX219Xc_-8C5SMuS`OR2T z4@%0u^)?Eg7;P2>kMF18>g+>Yl(_PYCHGA2txI;>d9#q;=>#{MuO+xmSj{20f1?9J zo+-iIAs5d`a7!0NYHJhS?m@gI^K4tDO6`)$?EBPg7W1q93 zK7_T}8E41>aWv6SDzy{KEJ4B@wPcl^&Q7W~MOfyjnOO4AaldLp zV4>9;yFS#(X)-L9macaS?p$Pok}{DWopFG+X@e6=}AIb(L#`d|TYgd5=0 zmLp_LoqZt@#oK$G@c0j+5FQmo0a*TnD4tWI_`6b6YcJNBq>vnN!lGg1*tKfsM8)!8 z9s9y!GGUd;gq2NwZCl*ZI?DMZ(+bO4%d2SJ<^~8FE*fW-Uvow9fAS0y0SS-Kpkn$% zE+XD-=c99M4U8J^D2`KY)aP4S`lN@wCF!*#wkI#Vd%aw(6wT z?%TlnBwoUAYfr09)HkF&^Hy_gs~g6~EAj$l<=bMhBs`XQVV<8p*a)q(0_B;PF4Ad%9b(lF$?y(f$xLMwFRxj0aP<}f{58L3;Hf`|!*q6%k z;J+!fq1ntf8Ia}0u8oHpm!RlY8P^o|L(rXjqa#u{8Nf{`=RwI! zX$KC}bkGfLjiqeXM!m8kIsgGLdKP8in!I3i!5@%?C)&*%$Em z`e%U0McG4K1P^bFk7hgX!We)1`3{a3W!vv@HvQXbunY5^KZ$?DnHNq+#&s(oa0H`l zjrbk&vFr54FjDCmjD07Z{#x^C6J?Ef#6+ieYB@fMU%@L^gjYfW#*$ZqIu(%sBE_wQ z!^5S2>X6tayb zM-F#TgrS%VE^+6cmK;Tt+7v-NnL9BE!VpDpGJy_VbNdd>nSrOrXhgn6qaV)xR3Fq< zE&c}d`JX{8XYr(fTc)3xd*3KYA~L+vI2U3n5|wICqv-Io(@3ps*8DxE5x>}Pp2cYt z5YBoU33IH|yXSlwjT`xClnQ{YrzOms*X_RNjcq3O;%w`e91}A}^WyB0&Yv&M{^?%# z_Ns#Kxb<8)H_c#<4=kR!q}$Kp`W#ET*1#p%M?T|fd}Ecx0@En8Qjeg6hn3>b$R#VG zX6!QlsLpXjPQf4%N;&>$A1G2o2a<^mAf2vED|{5#X_&a|93GiOT<;{JlY?WB**eu| zuFf&8qHf8#_-L+y(Aqewl}a~1&q+l)Xt;De<-`TdT<`T#acx>Lsi-1pZ<`g^YG;>F zT!Z7ngd!qZtl&Dj!9h>1H|m$`A^lN`RCSFgXS7GxHw|f>g#37PT?Z?0hkoPSn3EOQ zIZ`Fgg+dIRh7~wfMEuy~HZ<*F#)xSjEbhsn2A|T($q-!Q2OeI`8W6iahuEKJ?r_xU z;5<9+K)Q&78#C4cHkDf%6;Qc>Qr%FYQFQ=d452FjV91S~7z;syL%3KjL2lqI*ZPK- zDDw1IuqQ>SX;OHv^^BLITH(UEUuU7qv94X$j5u-k<0*`grRHXIb_BryTLoL%P(#QciBCI}fW+2f^35)WNRvu)so72a!Tj2Z`r19d)(cIdxER zgz-N;PpY@g!}4si2mCy7l&=KYcR%lxnx9s}nl1UT;#S1{Oj)3&n3LfRwxdKo{y+Io zb-RWoEnKRilPrus8M2sH1VUO21bE4+qfpZ6Mj|&c16QAcq8G{p&vif@ml%~vMOkYR zDvwFP=%g__pb@h$1HL3WP-M+S{0LfByFnKIZN1DIgcGSc&~~Nnvh(bm)9Nd-z75$_ zglb=6g>2+DT?YA8wG@2(l>HlAn>9AvmQ6vmdcR1;WXJQosppA%J~YDpROo&T+Yevy zVx7z_^&i41v%VYubfFtvuf-@pr{v5Bm|FhSQ2=Bmj5eTxh>9?)p?2<|Xd3;!K473m z-(pMFH2Ah3Gt68}J#dO|i|NT1P}cb_w5%4(76nk2KB=!Y%UK`6Z+yrdP78Z_hkh-M zu9H8aob9C!+3{st1pX973;(A^r5Iad2>F!QvRhdPXXQ<(vb~&pw?=_u>!cXBKjdyr zOZ_G?_S0d4wr|zBlmR(P;aDwa!xoF6e&QjG|IU3w*y(|g@1t2Ci!tHW$XD@N*@rrf zp7HFBpLSg8S+`Ta!~U@soSpg|jN#|QPW@FBR>gY~XH8)~p-gblr6|7~t%18l&Tt~6 z+3-^<1!eXa2icZQ)2#hMs4bI$eCmuXI81io1nc}T2z*sN*C3KB#lf8>$4rEY+4RJ_ z7RmNudEA=eP|VcKF0F|R`LO9|LoycW|7ubzsYglm)znG&mRbSXN;2<~stc^p5cm$F z)<8f{`jxS#J*iIKp56I6xC7*k6n_m>)F@_UQ+70=2HK|+E#oWHIoT?SMkMrUL_HF4 zD5!3=Lbn6TK2k;29wqbkqRHEDwZ;(nT!9m#P`sl&Wlm`x&Az+M9e4QuhNU~Lm-;;A zQ=osR!CvrfZ;a=4+3)#z9XsmUum%R#j@(KJE;sbsy6AQtVJ5~*7yPCdh%$`20IQ8v zu4B!2hBR*dZbhYH%4w;dmSnqO_C~=x1+210!{C|UknIv0oN9t?C(%t}o0PHsIgr23 zbw;@=cpCFT3+t7y^_@-Hr1~PhVUx_A>H=XAp57Jf-a2Ks?V1D_nX_UQOtDfMSqL6_t7MoL%ncTqb1GD!} z;Z^2|+~h{xdM(TlHh|Zq_}4H93&2uw?HsUzMa77&avt;SgPpCvBiU!Z>S*ds^?Zfl z2%1$5q#zQ?{6nZFFt?<#o~m_jE`O}is`i#p%z4h#FP*O|j;Fq}j95cnX0cFOC=9X1 zZ_RZrRvcyF^>fm<+HoFFg7dIh5ZEQwAX+s`Ctf4*S^b8A0Nbn1eWpqbRfN?04m!A;xoO1542CA?faBu@9A$4}=$XCQW6<2! zhNdR86Pha92_zPCW&?@#tv7&%miuT~Kmiz7oCX{nv5ulB+tXs}q**!JlS$iSlg9ZS zi8AP8d{(hR?}rf;(**X6Gd=EncKZQVfao0AM@7Z?>`qHX4meQDOIl1t*Qx7#lv|;u z(b8`bCq6kwqPu*4%$;vGs-uZ2em<{z)@-q@21{RBGVOuMSXyHJYrUq$*2Jy{RBTe& zViQHMuwr9b&@(K!g*)Ob{!IIiYPK23=(HvaMT_ykg}p$bqekDP@dt@EprP$N2zz>A zdgrrj@c!_HXYl^;c`|r^;za}7EIMUzJ(4VD9q{;DkTXv!j=gyOg$nm|h*uO7#Ov(k z2QOal(GtrA9p%9|%74tQN-c08 zmZe0qutt=rF_Un4u#u+1_+Vps@U0TJRjHS(O&FzE_$fVMv$Sx%j2(^%u2l5d4+FNH zLbJ7acLk;kv2_GFr?MpfJAWh?Yba-mDpuv~rKXI*JqUYLfVwEK&Z5p!5 z#$@)hs;Nao+Z;kCWrwr7hw@jl3eWZ`8dGJqG+Ar@O*w1`etv5eSOTc(INzjOr{!0d z*Pw=*EAI9Q(1#ag3;?;^a;wB`XGOd2xj9}Uzwuy7q8VB7gulA_$}F!i;QSjX!{!-A zW3B1S|AV1nJxf1Eq?o#qZ&{TiWsHw`p77-|VaU&ox~`NA(=G>drL1(zPRmTh$q8Hn zq`u^JuzE_aZ<%`m*Cs{@zi&v(BNl7M{W_mhvP`;Ee_I&=PfwI2-kP-=$zALG_BXVN1c?_U^j&J0vY1X*R$a3r9X;&dqZ3BeGiB zma7y0a=3ER1^O38suB@6Ee&G&5%esaz#n0@S^(^*Z0 z9nHSH&%O1Qz+m=`FVQyHBDhF1H(mc5L3Yxs=Jxw=kiF>>?zTCp0*4zR@@D)dp}bag{e;>ISaWAj=a#M>?2I0n6Ci`z1dGcgep3%a>d^*+igmZ* zLD=Ih5;`R9k4x;bGxJN+Av}g{^W~^zefRtN02=9(hg$Phc#wsf9&v7=A90-VgZ#Fg zfYy{{e0Lv=p=k3#LUdyfJS=Tk{h&dQ9xs!QyplJokR=Let$+r4ab+5FlsXwrLYa0} zc9m58t0{&@n!g9^FPkQJ(C-!D`m||HH#JHC*hA6=(qZ@8*W`4$flr`g{tE0pkZjB; zE_`@X=Z8d3_2@`;KQ1b{G5(^}f5(hs14+-N(T3Nz2KYiS+{lNZ6iTIamaT`IbW4|~ zbd9sSe_OgW8Tc<87Q>I|cI?O$y*g>WQdw~1S{BM}rmSh5Jr?!VBVVnsj((zcDhGdi zm!-9XbO(ZlGB66ymDC8Xg*`aTyWmd$~VuS*e?c3~; zU~k0Rgm?hJZQ2w>vptmVk#)%50)G4O%3HsT9IPD4s@eeD6M6_@1kNZp9v$GBqjT|F zXTsa`;V5DD&lh`ilW(R}kL@n&ktyoB;N~71s)(ku-Jd<#QQw6U<&-Fcfyl_+{dz|B ztp$STu`V^9iFZs!gC1w&nS`+hqq#jUP(5jF3ShO}uJL;ps-6>F_1O4NT2XE2cb_$6 z<5TphnMFIBJZVjeXtJ&BX_FVgEE4*YY0ti#24)rN0jd#u?y)@W$rLF%8!d<8&tb1g1u@9pzJ`wVh2C= zAV#?=*hdAr66@}EQUm3E3Ut>Xy0+CFe82POTwl2Tu$M&0Bovl}ro$5IV1V+Yv3F<# z4jKJMhLl?xoRx|?OGQ>nP=inYY<8%b_lHPfKrEbVsM!gbT2-iPcBq+eh=sFLhMHNC zGYvIE?ZCkxZlcAkrwcbbVav(m011!LO5CuKLll%lAKYj1)h_`HT4xk)hTK({3*hA> zO%!_ADi?@B@U(66h0AcCTzF}ApLgN?b;&6L+Hh_W7H;mSi;0duYwiME090Pa-jMS^ zd&s!3DNMScH2rV)5{dx@kS@N$E)9NIC33n*heq2{DMW>oMdT~@suHblT>Um}xg=p$ zgL^V*sOKv?c?4u;rAb?EDPOE_isrNAFKv#PH+v;7P*}AcjX&&Y_;C-=V3~Q1*V@o* zjat|8+UejRx0G$|73EX_`Xm|EO+9>EH*Y@w_3FQR{-3|=>G-cn;laUXv%lHj-=9CP)q3Yes-(ZqDro;{>bBY6+nmRB zV4yi*4;C!wZC3f7YD!%C%RO=2!;eaH-ol0O{DN((@0SAtHs-6(?TfIujPj$s}zZD7rt z>L?YvQk*_(zOvb+U<`v)6RUuk49p~(O7ps8JxSKMDk(ffVHQ@5oy}R>Cj1C{>nPYZWe&J0I|_qE_^TvJdQU56e%0 zls@;xM*qbooZ<3e5F9OL?76U8+V*UCLH5>PV6WA)&0J)!y(KCZrwCew2eM3Z>a`Nb zot<>RkPvTXF$80WLrYA%BlnLpPZPrQ%c&~6BG4A*lCxt`IT_mFDEnAwq(09QBpUxg zs$1iJStf64kdMv&E5|=Wqp|Y&J4BCUsl_6dwZC2qw!r0;G?UkvAp({M3p6S{EWdih zH;@;|8|}7(hqIsmH@BMPAvc!o`nNxivqjmPxWtEw-M9;~hXm1D_C60$Gn7|mKX~um z?V!Uy$bRjk41Zm6&Vl`f*h7~=J!i5IY??E+9?GuUUv}B_O+HD;Wjp_oJqnfhAk100 zU(Ai9;iLXYt%G{w@7j*(#J>Y+i+7{hqdQ2a_7)a6WPsVn%pYHkr4qMtikUpj1t~r* zd%Tjzm|fZx%^&pnk60C}tO^zShpJEJgwDs? z5x>VOxspnFsa~v3`BG?N@{?u`H}YzI36}Jw^HB`RX1_cU7Z`iyhwzR@5nL zIDw~54nOmT)a~So6y3Zmk&4p*^6POsQ`2TvmHAt5VJBt-*Sbuzb*Zzy7#dK0d4P*Z zfu>d!C$&ifs*BP4YV@Vv@`9Ba5UbUYRL5+`knxe}2``d~R)BYFKg-!6T_xfGJwHjv zDu@eiJ2yPqp{4R&?tYu@cewi=ZnxPj79&X-E-&p#EdObk?ZM&wxoIe=+IEl!RJ98) zWqZMz^(pd%QA4(m@?TN;7q!bHu%?q1UvXY*Vk$|tm11A~(i`KYL2!AX`VQnj-k4-> zw!&G@1nkl>YSqQUF~{S7l-rwF6Fd(FT6tO50Sj!JwZH)EYK0yMwvOO|5amF?5-EV7 z`y2?m_dwuw1`r|x!r#A(O$wKF4N8m=I6$bKYYPq%kwM~3@8+X=*GDMg>})6^13YNZ z4IOBrnV^YggGPOAgGL0?VIS0u-n<#105QVvt_Y?=paigP;sP^!Ef*Kox1RT#kyy+D z#MF?jhHTSUIL|DZC0h8`dy~2{9`bansh`G7FU`LFZO2UiJ<2JC1squoj!@*~#S$Sj zRyy&xh!tm?cq$4t_XHL@FS!{7@#hG+ZUs;gn)!HIcYal^t0I4GU6m*_TNL!p`?}Vr z;Ut4;ph8P=-D5^*%EdEbJ_%nEu&u^uQ|%MAH^!pJ=N5{^pyuc@guNz;pP( zhwlbJ#f9$%B7FL)=COZ(j_$-UR8v4rd1#Q?0$NN;d^E_5vTr`wA=;K^f9ZdEL3u&; zrJs>EvYvgKi}G059InXTH0{z|tOoMN!-D0(O6mGh6fjg~GSMGGqOP5=n4TeNn|5c1 zIXACBQ&EYj5WNDM22(*!p-B4kbyR@+^gaD4{Zu9U1I1JE5}drQd8iKFSm?dvE=yrz z5lCIFbCUp2&OT~mR(o&|^LeYs_mKjJqWItIY4fV^&j|UBgeJY5W~#i#b(D&10Xb{3 zg|F%(QAg6)!2O%&iOV){)lIS0kQ%3wTW`}wGFuA6Rf{5NkdC)Xr6e;W-KL>i_u`EiiDsNG_1~TR`dgzlP&terBJjgGa@nbG?J25oCc_>WDP1Q zTYL}4QrqeijpLYWwj(r(w$(RtiMd>tO_2a?@1Sjq|97s6W_wp$vmK)$%w>PT_OPWv zLnaTaGLU$Wde^)h8&kt|z1-*S>6`DRViK)XM=$`Equg) zQ%k{NT96#}k?6Dc$k~n{bXN<`fR=2JoLe;V&Tp50Nl0=-7TzBJ#CotSb>TK?St0>i zmQYNQkh?co>Z1f@UAH$q+cS6!o#WmY{uI(pwgcU32ZlcDv#6yE_H`9I|D=n$WK`Wx z=br78CffcDtkKz_;Q7Xx9+Imp_qFiZw7KRzq05F;1offu!EVK!ye|@6nY6Q!NIi(Z z3EP(jhN4Ec;R;9>N)F^Xs)UJM-{QQ|x3FgWwj_T5_IIM)*JgxA$Pe5z`v=Kt;uu@g zR7c0RJogH_IIJz_BU8lrARYR-j8zvdLoCy931EbGUYjGOea+*X9o|nv@fS1to^(5_zqV zHw`H)$9!MB7vZeLpIV~!E=m*-Thz6#)B!UdL{_NiYuaOx53E(WtS=>7dd>Y~w#{Y6 z$4r$zF1~wARnrtX)hsfhDBkRvn+obbTTh z+U?qErf(G+27PUL@MnD>tziy3ltcshkjq22)Lw3v;!dLXEl*0MUQ-V8T)kLdnWJ)F!IA?8=Lls@a@;}w%wYNEblJ2Ddkmt8jemO7jho>0%s-c=MU-h0HLX- zERgCz!g_X$BZgJzzOVA0H83vmo2_fd8+1*@RS)%zN5>kH8ED9KMFSB;d3>@Ke}Jl1 zgijDEtuHvmtqk(3eyPb4`g_!tPTj3%dz!Y}Pt(_=yg_WZ?2X;c9 zE+TWFL)!Q7@lp+BfX_k69jXjze@vYjxIFKaboh-16i^DAE`tB$%%mg4ZTntu}VaW$VH=&GIh-dDo zRiIKEgXo?SqDQGJ4hxSjAe=+%N_{@u^=?2Cb%)9l~l4Oow%LR?aBHdI?tq6_bn7lgH zfPILKt_Y9T;1NbaP?Bg;r?Z2vUNqG&jh<{6iRwUh*@!u3Jt6i*OCrDJ!7m%w7Kj!p zx5)tN?8!zOJ*QMHgv?)#A=g%98r~x2BvWfsI3d!-E-9Flc!8MIKCXQ9fEbhK0q{7) zmMVk?@RuXp<4qmN%Gu&wtcp+`Dl!5Qk23aHaH?(e!8nylor+NjPD`V$&74s^tj1wf zyVN;2iSL+o(s$T6sddt^WYx+bWf#cEYND!)fwG3;DUNj`=agbPL=2nFx=aP)#q@ZJw65-beX zJPUxl5@9ciCm}M1L zVhOaXr2D1`2Jst)>9suQ=9_>{;=?`7p`wh%wzxt-)BMCUKh|>_#!&X7yIep2BJ5nc zLFw~u0~~LXm7;u`0Q?-NNjUT4t+tpVM^>>~<3a?rrxaJc>AEhW!gC&=?Ghbw0kq6T z{MWrqz|7depZS=_Y1}Jr9f|3?!&Gki<$Nt#;jo_J1k;KY8t^*jxelC4!i>odKqZc| zRkE?WpSuB7<8Q*R+JP&^7WI|BBVL%4#jI&So0W{96kLh4uMPecsnh($G_ZI*pEGYe zz)?%Z0U&PR8X}%jQ;~&nMF$0sX>(033a&b-oC!br&JQP<+~jq;H}7P=n};K38Ez$u zx{EOnZQ2=f#@QNaYy;=!xv)Sx45p7gQ%n6Apmg8SE$ zIOoNS-@ch5=kDF3{J5$+-}=?>-M@ZR zOWk^POR8VFN!l4HD2anS#=$CGKg95eSB6YHv-rbn7Hh?kyw=4+3!E9*>LA&466K-^ zA_yW#XCjc2v56))L@>IbfC3^>K)DDI;K^-30TGl$1SRf(F!%G_=T!Zwx?6r=9PDdB zSN*C^owLtA`|PvNejPMRO_dIkmJ(oS{c952VU|p3ygegh1XXWp&=PHhCUHmld&gY{ zpGG>w^wE1-_BFdLWD^x<#e6ED`!7}?5`{sMzTqCcB(U;VH?gbWpfo&jJU4xn(8HLT znv=h_p&_Zmm=be_&ZNbSM*0qC(#opT0<5JNF*$k*uI4sRaP;dql92%XFf#FLDB^aT z)#y!lZ#gWW)HVoN&_d^;>Jap{_U7t?aqB+pOKbwy2X`mcTebr}dnbe8E}6yDs*GWI ztH{Oe+bM!BG}@nmGSF~zXNdp(wj)zeE^Kgt0NKSz(^Y*k5g<8Rx<}f>Rj0B<=27ea5IQY7r0qscw{Vr5CqumA3P`td+q*aa1g7Edh z8FIw#HX+RViw|88GlOV$KkzZqD0@W;sE3eQ*?&u~a1R2*!%jgRxmz z5uF}v)D1)ru}}9%Jx*wa%?2JHp=c0IF?PnvZLmb1@JHLv341$}W)btu zZC;F!%f+1)34U z8NsYh0`qAgJ_{LP*kPipZB!Dywrx5h(p%XT@^C*##dNP?-Fp;gK0bNWDmU;#H$R-Q zqcn3YKOKeH%J%99V(%L5lGIm_bg#KQ3z#Y9 zVsBs&d2eSMQ|20yQWz5iTCf`2h7*WU-DC*s*;9efm;_Z~G}bfQlTf+qUCZ^Mxw*2_ zkGbm944bWKm8o?;SIM%txd1V`SaB-|i!G7=EFESVgY?p=vnYUY@PiAC5RMKSH#c`< z;v2yLvjT^}n7jlPp)EJ9zioO5D<% zKBfHuSNXH}7S3>+&at_Z+r=5KHRfmZ-D!!7Ph^$6ut*#k4uq!+ZtlkFKhRagYCZNHRO+Z2MsD1I<`r;NV>Toct zceC@}rRRoV1nFrWx!`%eSjbrUcGA2Q8#OA?gKPsov7o+=BDjb7qM567Uo z&3g;2X?L_*b?@t5qhUNI3oh43Dlj*u9gAg!a6pr;*tobxtnTRDE8^sKMye8D!@W3r z1|8?*0SpU%D=LUo)tK3PMC*#{1OEBAE=j&fkJ#Tw?2Gqz9F3hodMQF_(BXWl>+#U- z|JMSd95eU8^q)eg#3| z-P{QQQwS1=sw^=vT$6)5v<{{x5hl(yLmseC5EspQwGTxB@As(`iYHcKuyCa%`HPu# zpRkq{-LED2fYP*a-OmRq`bD9UFAuW0$b47_Kh7b_>3tdgxK|%t_&1H zn+?=A>n{%8Djv%?T*C2uzuUdcSsjRTlK6Oag?w}>0YARdru(aK)TI@NT_{!a6^6=joVf3rAg*ss&GeX?J0-P=rH$j~rS zix$AF$^)wMAlhQFkOR(QWMr7m@B?8cBfaKTm?>Rp&ga(17zhzMLe7Tkh}wXAKBOz$ z^rHWLoU2&dF*Epodlb`=ix;?pX4zPS>00PzkeLxRkV;S{V$Uj? zB6AJ`4l8RH5r|E7w=3~*q{~v4?m>wJD(x=^E540I_=T0}QtY8sMa6p{yS=s<0EoN zza1)O^k~*#JZOJsXxX9SeCZq}S}dYs7Kj6BV2}cGi6E4m#@c%|SpfO75+hoIt9YgJ zNa@O(&PiY!s~383T@qZEEzBIpQ|sD4DHGi=ZkZr*u!-F`~~g551Dd5%__&ag@uQtqFk$li@*U z9ZxSI0r|P6It}X=MITGqIdbTEy!01IceR5ybU)T*NELK;o-`v8F_!DHwY!Q-Yg>0L z#fyml564S!F0g%vZ1>x`%i`tSz+JmT^rI6G-_sq}o+R89_r=X^$w0b-@cg)+KB*8v zaG4k`t6$&GA|SPu!k+T#E}jl#@dDcP0z+r{e*}9vYXf<)lLghm`K|?2a@Zit+A!lH z5q*WsZRVLSFRn1y9RI4*K1+&X#iUma!=}P-0R>j<*#C%d&nfM%kDGp%02BqN#a){M z79OGrr47)KNw)z`5pKv-KzoZm;5m;`=KQL7NG%Sj39$;vS}@z*`;6HQtM%$;bR)RS z#RCt+Zz}iw^5JbJ9Y&uY1H(cd=mNwY>Am-C1xO%3khe##PajAB*}MnfNvI6Tph?ou zn5^zqraQX{eLxlTGBt*=HDEKTO5)7}561BWwAq|?*J1ZNPIezo25AzI;=xypxkK(b z?jE@zAnS^`c$kersHIhh+7Y!(5L#981%oq?hOfsU2VTuZ?ry`dKG;F!Jnc~8Cv<=u zn}Kb>!$qu#iOW}pNnAnxW);3Hyp~G~ujz&!38C~sWaA%csaLnMiLY^>AUrrl!okWv z#7?gN)-n22A4?E$W;?O$83gw4%1N{qF9X&gd5L!0APp~$SCo*3V%u=gDB(cxTOUPb zI3{Qu{M1E*LqmPIqU?GdJx&9rW#<+Ml?7AfYD_3$`X^u7T}^|7De4XxmHPDmmyJm^ zsp)|hM0hfeK%b74CdkpNO^_p+AV+v>nA+y%2s2D*bIgUhSF0sw9KuaV#MTD?qLMYB zR*z&*&GKX)LDnX&QbpbY(}1#JQFYfYXu55{H0Ssg zZS4-ar`33gAt+w*vKs=3z8)_T`*53RDDG|~b9VW#ST40LzzemEz2~X*?h;f${A`#w zZ~VY7g2yDD924))HWjyWj(`!wQkv*jMaVhn@!s-~t)Fvzny(ci|1;)4#uQ^5`A)n4 zODe;iLj=mafcT1C}do^?nadhScvmr;7FfKqjcL>`3IErWYR4z3R|Dv*sfP zSy5Z~_Z!&L{Kd=_{W;>n3o8c}MaeBYn0cSs`Ii!0p`KZ`MtM#D$oxv#Y zP;zO0y+xE*;GwjYBQn2ueWo9l%5+@ zgu57W&QAKEUAMG52Eytt(&YFv*hj}WhaPC{up_k?4_l=Y@#_vut`Tm6a}Y|M!2K5I z9t(iUIft#TOtNei2h^%=A_R&v$jRC&@BFM)y->DTP<1X>pj&mdLMU2}+nC-kJ=n>$P||?|N^^V9=58;$`~3dJQ&KX>9~|24kJo8_YNq%#=puKsG9} zx|?X~$7G+*B7;wf5A36mSUZviC+0~WlBO*~#t=}kx*7XmJ$^;fvC)6JO9|j+byGE5 znYBr%QP?Bym9c7-PEXlHjMu5tHJzx2<0l8@nh0oLxW&aJL%+y1US=RY-L)g{uIQPS zLQ;_M;N@qM$chGStpL4tVlCjJn|=DL1%&SD43YrxZrK;L85!gq@6o$;+(8pJvZX{; zcrB>57C`s0s74qXc5xsBY4!Echg(FJXb};bB~!D`#dSpMm(o7{cF4u%F7FSvQZ^V; zxUwj1#mR={>lNK$gU!7veE`d-^-2y<5)q@%*aKn-A9x!BSkv}dN?ipLGDAJHIv^Mi zN}Z4%u)laq*P`?6h=@~Y6d5@CGCqBdCAx9_;qHVj!d-G~c4H&T zIG&Jsg;fekla9;tz-tFiRFW5%qGP@;mzHaB9CyixPM8YHrx!@w(UncbYYyo@cIVsT zQCVCQGo83M+KHN0lcfl~`@2_oL=lv3h{9*X>1L%#1aXh@TL{8@Z;Q%VT^*v%s3;jj z_$y~#L^2?E3&~7a92#&^MASj$;FVRX>#)L(B-51%Di3|$q6bM{kVm&u@H zaV9&N5N{@gI94TYJBxtN)`?xmPFZK7RjL-w0nN+~+38Zy$+@8}@K1)14p}lCA~lKa zkR0c;A9k_(Wc*u+;^(3K*kfgcMd(F6?zhF1|W3%^p)QfPz=&#&iO zd6y^$VjfiXYCBJc$#jRzkn%5oZEO)`TllCU-y@yrq+6Vg6p7ui0BFUuMQ>;CEtea` zvTaF9(PRdU(v&phlEPn^hqGX^yZl))9^%8$tMe{j7)y31M9Z{R5H7q+0om$`hsc0) za1xX)lcvA}P8KW=pxDIS$SGWQXd8rg3BnAOD%7*vPs@1u$i>_*g$gutab#5&M`oLc z3A7~~B>tH`1?&;mAMjAyuPUK&nb!8|En6Nx3r0XjUMAsOtgp9Y8O% z7%#nB3|6d1L8|;Io-{*T)a~x188WrN(%eZia@5HVo1vbUD?&9E1Dp)END(`Z>=0(r zs^H<;a6-y4xw8P+y3C|uhV=%49DW2W_{k_+=5nX&lIh#UuZB>6HWUHu40i306|`7M z=10A=#$KR;`Ow>#Oa19lU83||T%x}lcixms)6c%wc~hRPgcTQruNhXQ1~XO5c~Sxn zKV<%|V3GM)MXQ5Qs(@p$(NBGN(?({Ol|dD%v3H@vnjJ2)(6v>vGT4GW-*=Ja*i!qD zazLKaTlj^?k`^}*xxxLK|e1_kvv2h*t$H++0Q{ z)6f3C%VqIIhv~iV03qas;Cp%N5PepRIA@{)Ayz5IEEU#TrPOz(#DH$Ou} zaFkAQ$&*B$4$_N0VY~OZ#U*!-J6z8{7)^iew|VfL@y;8oE)9{gMOnUdx}47arAyQf zoK59`h`E59apV9rUu3)EsWC_hojsD{j2o+wikVmC1SKy~iMoa1-5yVhd)OV$%tXPW z^F9+I(>F=##Q3N!U9NLw@M767Wg5&T4RE6B@LG+(oriATZY1ZQJvyjzB&d9ct{5El zs0cMp>-XeV(_A}8jO&Q3OF!C>K`wezW%J<-vMAt^rS=|f{Dd~Nvm+$6z-7XY8gGYG;<1q1Z@ggx#$y-oMMz&F8Y9A zJL=O{cv^bc)6!Bmz4ko-8N7)<(UtMe6-}c2h%=WNfyn>}5R-gvY>-trAviBI3j8)Lan#_wh7{KFpunBp6l?f9U@Rf~lLL-y7!|1g|!M z>BBecJ_s(|6dfA(_d)QV<-`Op<3u#)g~8ul3`-Ebn;3dbQ%1=@RpB6s%|Ud+ngAEZ ziu6(7KRfZgo1*vF7s~v+LOH|jiwE_E`41`eYiHbH09 zRML0yfR+90?{O=8o*(UNJmC5J{PX?YaCBGOJa!u9n$Plt@fshMZ!ZzbbOZrDbCwse zCL28Ok63J|?XuDtdxACF=B#$XEOiHJ8>wBb>WQRhcGu=ub5@uu&?CFN-KTK8P%C>s?_fwS)i{;Ah zs_LeA)nODKaq~~p;N=$S5N^Kd!Fbi?%Jd!#S$>hs%uc|Hubq-%GOCn4sG2@WD)D1Dn?L!EggYzNy1 zJy4>^=-2#f<2dRAfd$lpYM}hyMjIDf>nHRoDmz$FPqZxY7tfuX2DFUT@7kJy-D!-+r#g44&?{Zu>R@G zdlR;JGN5pn7M<^7ioDt5k&5tl2_YQ_iK6G_4OF2h21*c{<<>UnI4K*(&T^w`lT3jA zx=&5+N43RHx|Z(Y276#^CxKAo1$V@uYq(u*7=L>j1_m|E5KB#k@JI8iK`MJqBx)5N zj#zUji*=RK#9wFBJibR=bS%tXcKiwy$p%;XmysAwsgOLVAx!&wNuE>D9}(t5FdGio z=G`e>DnoR^LWlbOn4%RTCyRZA>vTpU>AtE~!d^ouc|<9Q&oNDBaYmgKf_c?na#;x)7y@n}Q&j@m@Q>w?%Y3Io>!Fg6<>O{5Texj@1p$qtcn$OLHC7uew-+!j1HU>nvA|Fltm!Lz*nHEQHV0T*SLy$|l2!Nr93N0TUgJuP=OxA|F}^DqSBdeR zKSw29z8bs#J|JGBb&0#xRJ_veSYNQxuI#Rj$GTn9uB|a;B4%6|Ehv%$;;hDj-+M4U z%gtpwG(n#CUU}vG zKB+d~MkjO?`4YXMs_;QnP<(o#zBRhC1nMMbTs`K@Yo6`nS+Ci_*f`+4XCv`ou_>|c z#GJqo4XFaYVLy93%dB`8Pjwfr+DxCui zDV?>a;@{$yo^w0Cm#oBpM=plOmJ)rXZ!}qPH?{H^Q>#wFkYME)SX~igr;MxEl(rW( zI{ak}_{#|KmvM-GJ(HFAJr4e31|t1In2sw=z> zUWS(Gl}U?SwpIjx1YLEMos&+_>ZhZ$^W3Iq@EPSKQRwH5{Su+Ax{Iz*_RXd2n*@>L zf_`wltgkQfonxw6W*zdGCfqL^0*&(`Lm;x8Hw2IyWs5^l$%epD)sqfEf%r;8AQ(hj zT!Aif|I3%NKXBot2?qsO@57hmP`9 zW%!o3&#golx3|O3vERkRJ;_ef-EY`Hp1%W`{!Vo4wsRnrJ76{eEhg4pQ!G%Dv#EP= z;){p3D>>y9%bb%0kvOBZ&d85ryKOO~FFgbvaO{In7|0p-BO7d->}`A>s+f$AcYS*C z{lwTf=_iyC&~hX5mz_t`%NLI-uMPal_7%jGX85#ZI8a7;UvWwqRuFdi0<{K`o$eI2 z=%=d|VLB?Ef~L00)a{NkH2Lc*k!!#MnqAKu5qC({n{Pp6{8e71$nAJlvmsC-v`Cyi z!i`*pJO-|JJ-(TyZuGtV)-YBc)&=!eb;SsVp{uHtAqRt!Pjoc80~QbP6FuM`h9E4W(v>|F7t_Oc=XgKSoTM@Lwf> z;g)3?nM{W)4OTSeE=|y-No{>isYSK2`AuwP zfB%jZRwF;J?~wsyQAbgljj(oOlMskLvfTX7`iaKxx%zPk-ZJz=$00boP_vg~uqg#P zn%m*PtoYFXxHitR3o)z2o1V&12=8GF@WVLK_*p*X@33$!E1U&-dF`{p*)a^b)KW4{ zFpJFA)$}<{CrUOra9#5@xOuq7<43rzI$T#Bt_ih!!ei|ZV;&Qx@eh(2Rpu*acO$?R zq9mTw)OI1PC}-QyES19CTq;tsV2kW?r42efiG&QH7D;hIRt<0ZsTx&d`FWNxedM`^ z=_CLD#Psh!_b~nYKO{_hJHguYGoQrZvNnB$i)?9YSW3_pR?=_2m&JH}`Vn2C^eC6; ztK*%oUy~(>+}HMIO)ZKtRwu-QYgu+#E!+b6RQ@W!i?pcZE0LY1U5|!1FQe1fY_PCk zoYZPly;BY~ar#dIr$(xL!a6wy_dp|)gwfaAiIj9~El1H&zq#HG`bYJxz>!592NV>% zc%t&RX6Lt{M#XFd7a`Y(a%Hef5d#^3fsixA&c~}NHTaY)vmE(|QO z(C^hPMW^L?Jl)=!erS%o`qk)#J(zfoIB7Q2=xD1vf) zJ}65$J|shvv0)H+Z8gR>l3(i#kG47t+IHFiLCc#(hf3mqbKZ9~$SQkW?~k*`MFNTb zXoPXIh>J(VtwK5@EiJD)t6NK!VI#^fVf=)o7G9pwl~z#;+w&=n#1td(hvhB|EJbsH z^P9HIoRXT20})K6@q6EbuW&a5#(Du^{+oQ<-oNl0_3ym;cfQy^j$Uyztz`8Vc&+3@ zMv3t}FlPRSiAM_d49q1|r5qSnYK12PJO|=`=!?Q=^M!!8EFdoDKoq}rlWR@|P$HzC z{BJPFpcc@an4?+@9M!`UKku2mnr>VWX>xEjP7}sPw%fj4!<1ljf!UU78xe2?$0HF!xvo_Ct+HHj2=$NoJ%c92qdO=PtYP9q^zieDcr7Y~>p zrX>{#O>2viMVnbRFBlzSnLqOC4^X|-=jpm0PURG67;^~CLWR?h;~U{eUuY9nh(xyB z^G9@b#b=~+F(2(vUb!Nj8S6d9;^-UM1ZO|r(OEtECg-Z#2CIE74m8T1f?=FkT$(V$ z)j9lP(sQuE?|zE0h4vZ20f7P@XZ4OC%JlRnI5wrPhasG;1nNb-zlgM#lb}UZvsV1{ z4Z-J-G_PeF4VFE}S|3~oPJv(&lu>9S3rc~?&Pg;q*W8+Z>&r}ucA>T2(9NpMEM-3a ze^I8^D?=!ljTwPbc5N|aUG}`$01oyBu%-db{z0Uzd?4E)8+NHhGQ?j{a~Ejt1M{Z2 za+$^oTVh$bA$Y855u-d?%TYOxVJnVnI}$<{?qS&F@30J>QW@CN8 zqI*{F&K3y{dS^dwahn-zQz|q?1;@TbFALm!K;Z@La%W+<(>wx?Ic9Fs^@TwoJ5r7D~^`&3p1sWBjhUKO7bZ=#A>edosSjIZJlRbUHa zv86Z}Q!sNG6aJGDEqd%Bnt#txIZ=4w++7mblF1VS)U=1%5QEqO%m8H}M82suO6lD$!{hdgtt#F`S0m z4x{Ks5U|t=7f^TimMX>0!5A;p3&OsS{u!Ai zVLG{uaLQ`668Qf@bnASd_1|(}P+2ZfTo(Bj)|h5`@k1O$)Sq5@`0diMV3dwFWQf+( zD`ITwARjxK&7f8*h{js?(b|U5MTHHY&Rv2;y@MK@l{>Sa5jz3 z=rdIP>0gL#7ginn%%2dg3Z{0xbi0tT(=6}Rd$uzdvAw>Y(c&zZo1Eav``5at7E4ko z(vLn{?UsmQmKM>t>QuQKg8#RoTVlD>Gaow;3Hewz=D_-9UOfC@!d@psaEJ5cVOk}c zKq3A0x0c07PGa7S;m@|r?EHG0AB;OW`>-vE-~e$salTnK6%eNcT+!oQl2Al1S zFeOOyA%Hd0w=DdK<+)JS&lzxq{=8n)pJzjGNkednA^6TI#dF1GJ$K-QN8JH_T_z8vBx98=9=L7!ts;SUX2SC_sAGZJjyANOwi!KY>1Xs3JEQ^B|iw|Y@m1Q-N zzAf3%B;Hpp)01Q7Mw02iHkKJqXL>B;wD-*XA_sPiFY8dbRyj6=_0%2Ms#}i#8)HjN zXSGUZUPB>2G)m(M=Nl=?;lIQb(>h6%x{Dsy7$TEXMT?CDzxv$)A|zbZ!xb#OTqe2z ztTO{SQ6jTF*pavuj9k=b+5yQFtnUr%6O-r-NUqyj1BgLjSTpCckRH4=|$zbz}t~0evY_ zGE1-txJ4fVl7nx%=f$6 zI(7aytc1I;iJb{D{hOpgG50WfM=eAa0G$Z~D_a+aOiiCk^eb{0auop|i=v&!Wk6S2 zHlws&gSyO{QRo9VoFFB%O40A42N(y%2he}?NC=zTq7!s3bG??DZWF1~ae4b%r3;0V ziWq*Z0py%0#msR>e2x9a*Y{`XXRME2?((gt$G%7o`%8bKBQsFLjoAm2;mxW_J=!8v z0+^lNEW0Jc%Q{DRy_P-0yReaa@~OEq=1l!T@9|F}#^yxe2Cv)auQ+-tWwlOjVf;Zi z@*F8!dn>zaf|K@c7kA^jtGjQtjzIMy8<M){m@)N$+92Aks9M`*<9E+_(b`ZmXvF>pS^(MRTMUBiFBQCT{&+IUF!}xJp(Qkt7d~GC^cBSC{-E|#6UX`O{qeOeX+4&X7CWeFt_rWsS*7yb4CB+ z#`<8aX1DIaGyh9dt2E?L0F`6oC(M{pUmuJZ5uob+a!wQJU{4cDw;5?nLf zX{?Q3@Pf7Dyj!_yY)!X|E8N;Ocs;b%Ub8mFW3C-vb4_Ipuj6?2lBKKH_H`%L#4F;s zyJmDP|0NU2$lA626e#{T5wBSjuU#9D*OzlyJ24OMlkNbk>b=&L!qRHY{ zlq!8iKkd}D1rV((LS&`jW>%^Q(S`-3v;e7h3RlYDzOD$te9T@W2(B;SB!m0<;#ZU^ z!JX)<8yETN#v(*kYSXrbaKn5J-!{i6RjRwIy2X7LAm!#_JV({pNtrZTg5Be!qYHf{b*GcpXYJXL+b}OcM#=I}tDBMx( z3&l#cuU73iWh|`y)y3Ma7&fSdwclB+onoch<%ydrR%HR~uPN4U#V%yUM4cJzcP%I; z)0Wj%B+t8xKu}u=h%o^nyVwFCUR$i)iYfi_0tD_@P)wm03xK$HL9r`YF{MAwrqa#@ z#T43MwJAs{D>hXr_6guheIit_p<`nf_GwS0SihB%O>RM_rWcmmo0Th~mP0cOjLquX zmz67aOYxq)2}!vUyd2}vNcU&o6}j#HQvFsAlU)zq3?By;mOGf0E5cjMVjJ|^@gar9 zm&^bG(@()HlTDDu$4a!0aqf+H(;q=XOZE8%0g;80%N9FFX}hf4KZ zIkxo+;W%2VpK_)8m#Kaw)Ll^jBc=MS9IAz0{TaSF-n1~Dlq=Pb)wz)#&%P_-k)*u^ zda-hHFbG*US-B$ToLyM%d{(Xq z@A-x0P%AxG&;WwkVOQrfLSE?T~UL(DneOJWC z4t4i(74`sYgZQ?67$cECn@CHt;e|CLhxRt_b~!uqGe;(Vc8seWuJ@aycmV*Ptc z-%+mg9o7&|GG6Sv>C$(UD}85Xl6iYCRke3PRX71EO%YP&UZQIJc89dEff6;dKPm6u zR0nDq*ZsU3y*l>ItAmYv2`-ERtOLagvhm+v>JQ~g-svsE6$^tQvH-G)sH5+VHw01@YzG9`mNlNV*OdakCf`CT&aGv*Q^uS zcS3mf-9x4NtsI^w3+eJ`sea0p>PJ=0I+1;s4=8wjq*T9^JC>Cz;^$bYe#({Vuc?0L z_cCu!Lu&NGI$`DTS6K)@@e%XOoy^LKk7f8cxv<=+tXxq}Ikm8y60h6%73D7uv?;8A zz*p)Qnu*eHw(zCj?1GEGg#|oo*x?Ur+h38DQTEH|f&R|VR4USPaVV1I&XD;*Z&7mn zjPynQ=pdY2W%{n@?bR-gRnY~Z9|$}Dj;}*I-Mxi=1#OgR^15E{W6B2hsR;@|)R|jR ztD(?BlkW6&98|S~-!8JEvnQ<71qmlJqc}o$g7%@Gn4WMJH9U^y^x=}F5hQQPu@Rqm z$v|uP)4%mWdc~JP!~5Op)k^VPb-~jIrQ49ERLAc7SH@ko80sCg8s?m}x01_?&Y@ zs!NY6oq_4egXlvXCTG5UhC&ofUOR~~i|?n&QmhaPZufG_$z4bQ_e;$<;>tU?yb6{c zxUc2nD}|E$QZWgw zsE_e{MiC(mGIw6{NeUnhW5Zk_r0PQNE%RsHG_?2#`7gN3zL3h7Dl(OCAYwKxq8;B! z!Sz8bwK2XW)P8Ms#m4F+MBueC)F1unh^Ae`;APs)^@Wj3dL!+0YeF?LR4q+au0hf? zb)ChdZax#-pDd4;Z$2Nq@jm|S2=Qz?}HaWnjU!{ji_~$SG1uD-wjxppp$ye8M}_v0<7RCa#wI zE>g}gxyE&?Y1LBhLNik@rF^?8wQ!`w$Zd`xUl#87bOxz3G40T3y$`y3d)`DDoY^hzP! zs_9cTN|ra|08OVV+r?;maLBKKRNQKseB3mh7*;v%j|IhNpbiwEHuK)MN8LU;{Sei+8zrgf0rNOx%_l15Xb0e7uz>m*?hWrGHD+fdfICFYWT%^~^SUn;NbMsiZ6;gUEzZ>C>qf*B(pahyrd$&w5z;YLR`_;JBa%U7rcAq2 zUuI>*WE(6qmS~QlXXnsQ8VNDRPeft0x9%!bl3`$r>PCJ3UmeFxYug6f3b?2@nyog( zY4RC7QidzYq0gcufe{f+=fGjvz82{tX({1ZUBOfvc#XO6* zrhhz(Nq3AmP+#?Wo|D6{ntoaz$PuFVUVLl|RnvVMxtTlYbQ2%& z{6>B;HLeGoM@VyOM*>%)U+pKDnHl!1b?dY9x+UkcQnz?e>el6X-O>l&U$@w9Ykrbh zlTle@fI$4b0U~B!BPvw6aq`G+rjH4#H9cfAlo?cI6hpFnWQ3&=$dDZ)Mmc0Ng%%=P znI0Ac(IIDX8WMd$L)hcB1PQC@Z+!_USM;D%4uT$q{h{8uHV!3WsNHEOvhxFy-;&vCS5vFcxpt z+rn5VT%a@-zIZ+s2las*%zD*joqkHQy*Cynq-ZpXOza0lfbW{uX-&Nz0sg_fPV=_h zXSD9*X>#rl2+E{4H1ho`u{a(G3wgKxdS18m!PDs$ zn|wd7ke;5kZp{kHAMbTb(od;dJScVRzkP)d3hY83e1F|ybzgjB7P4%S#9}NPMX|ss zFvFk#&SHxrI-lO+2)}L^>yjGVT%(ZxKdl{xM5wXOO>_N9V>tX{L)aFu(XUm3X`x#Ue0-_tSPinC%CT22^rsH1p^ zsrK3kVtrCqw4-m-$d9JO&d{{51ssW&p`RcJmFXu&sg@&BGW`U*KHF@M&eu-}1tPV~ zug-6qBrKbX!ta@oV7JC2RPVOZg~ru1r;?~?-(*oIVc#|NozqDeaM@GQ>4+;ZQ1vIPD%yOM!o%X(`lh4{KH1Yzk}2;tp}4 zHk3M|t^J7FK%Z&2Kpm0c(5WNXVW3#BgHJTx6#$1`w9zKEf_d(ShyF4WY=pEI;`kF0 z9-I9IdWU6fvrTsha1GnxQ4vIe5?6W$uQ27O*Oo4m&}UlAFghqX#;7XOI(U?YXdU_( zVr`c)yoDNfJ-nww^@eO|V06}&r$AEcvj5!W(U}ZN|KJeHKrm2cjN0B+2pF)_shG^d zFKE@FwP5Hv&XW^nJYK{OgbMU}JQ4DxqyRxtqylowg(!`Fnhin3&8;@5@^8(p4$wW_?1z&o!3Vo@a1k^pSJJQf)|3H(^j~z%4&`Ad=^to?N zQAqkS{rd8k9?-9_lYZ;B?2g2$kK0|^)Oqk`zEIncP_47XLy@jZcRxzwkBxVd9$-t- zKlm(w^`-CBWgz_xF45HL&YNqMmh5X-IT+XwwP0M8QpjmRG{LixMe$&x4K{3q9Z$G0 z$l#dYwz#RY^R?YoAH!epn>X0^B&6y^)+{jMjZns6>RQ5SjDTyF} z2$)`~L$1VQv_1-lyx@2%`X0A)|L%{wiLd3HDUG$OD$iqyE=)KxrjmLUUaGfyhY}eb zFd~JtVw0H@T>4T(pYTebGUbw-O1e3l;*_RwUtXW1mr483qdN>SO-15dm=usC7_zfk z)JZEWV~{n9L5LElb!)i!Mwq)$wyoXx?rY8>GqHyw_|UWka=(j1qasyI_!(<{vkl( zdO>G*+5uQlyiS`<83U?gknGTD6q-q6jBkZ6^#Yxd=eHCj*CtjwEj9;qMN7JWsPlMV zr6C$TfrB~PGsYGb+0WB&6U=Cap{Pas$(1I>1iCL_J)1{dO{C-qLuj#yKnQ$2T5R=b z(L1U;){XBQrc5jVvPE2y;J+RZRxY# zjG870Q?*tH9*{*(3z`#K+jI>6g3Jc&1m7bv3!U=VRkkqTt3|&$8rNS~d3B;3(TI;S zmxrKL9Z{mGx<8%7)DtM?(XJ{lnbk=21tE%mT*^fyM4rXEQMPN7qD9j&(dO)ux z%?N4f?6UqbwKWpyr=H3F4F`V>4Li?Vp?i|a1Pv&6ekvP5 zUFuq(g&89&jAc(u-OoPgw}}XNVdd`PoU>7J&bT&gcNK<>bmKNMbvvO$#OPX7SJp|i zle>b6XLKL>(A!EfS>0F12L22l5Sr%n3_ggyDBB^FS+`W9R(JXn zTA8BnafF?@9EI9mQT}R3_SJg88qs$OP83h!Gu8!`R(Yw+%KIdGOO>@o%ij}Ywb!b1 zfl>(F$C=;8ah{-ASkbW%Iukl^Zi40iUPb;on;0L$q?m#~v2F0dN)QA*cK;P_vqx9C zT1h_beVbKrl@Atw>_4X(Y;|2mzS4QMi@QTFA}5z1JwN0HS)s1Re&5*w`60Q?CZdR1 z21DjS80{aG|42VLMZ_fFvA?3oUQ=XrI%}yhW^PA+*5?(WFcW=d)2Pz<9Yi#z$dHAv zcM=M$Jo@ZNY-bu(;QKxutduc`08##%A9hp)E=w?wELAP59ILPvyI*3B<>{T;F02pk z>Q>Y?Y)pcPAbU}G7Z(x)+{uN-@Rsdqh!y{CE2JU(C8y0nJU8Iz3vL=Fa@=JVtJ=Gx z&#@F~b+S`WSQEgH5?AsYP_y3%PoZC3^gj1+37xR}3oaOJirpRq!KxCbCjvWlzDj{a zTY?h~ZkBDTVws%`jK65>>dmVf7hkHH!VKxbr(2a#2?2CL?lwsL9A`k9Y!k$Y`0#`>^%=r~bCY3$T%mFR5Dg3H+m+BIrRQcNJ!)F@Z(e#1;{jM+m*Y2|P zAHVv)zCsq!^n-u<;Xn8<-Qm>9Gw>6$huSuJ11x}5N1J!DyuwIb+S^7vB>QJWailsX z45b9jUTMB3xR|Q1p@iJ*q2ndaLx`<8+A4Vl+{gv@7iqCjBFT?v7~5XndTdyP^)iTH zRT~??=hQPvdt{8&$kTbmtn9ZGC4}`!lcL?d$7sU_3RM9}{NITACLTM-tyO_BX3B!8Hw>rnBocqxtbS-~%q3vQ(( zzDL&e0nr9u(AbE&4tg0-((wSa;HR+xtK)O!Iv^rWAF!4p+QB6jfXed`?Mr`qcWs7;*Z>>xtol{k}%Ms^vmoJy)&bgJ z+@s$b6(4k7Ad-3&N(&WgVMmVcjLTFAeBE2&_JwW16)D!LRq#-<4tCk!L8AkNvuHTp+GQ4e>KwSe54aDcOt|-2%yap z$DwW_!9MviC-KJW1~xG*C!%P`1*eeJiT+`gA*(y2%VamQVFz0phWzye)&yS1EPidR z@%FlvzaD2b`X5_`Q%P8g|0{mBix&Q*KCVr)IA;XSI(8` za2!^jCJc55PATG0+h1AI9piRMJgi$dI-ZqFb&ES#JQ}akEr)Xz|H<{cmY|rCKa{QF zg`(7p8kgY{~oQmaF6Pf*$xO*{~Ha?pCc1HHEu)(az}u4tOB zTc!kDsObESl!55=3WY^mhjrXezr1%1pUUyi`%bHT4Q9qg=aToC4|PAavkC8MUj{f% z6v~8c$&XcD)!w|$3T{WBA=a`8sa_OGx}ad9>^2JDAG3LXwPw4t`gd_zhFB>Axzk_^ z@1~AE^g$JVwzUtogs5$0IZe-f8|~gi&$vqeqz9c+S%lf0e1I%=FTapSb^yM7cG}u7 z^OrbNY?DbKgCrA821&el*hdb;gW8mTKLCAK0L4!QAf%?J4A26Eqj-_lbQdGZmf`*` z5>a0|=iK-W&6fskwD8JDxLQWyF~R`+n4o$uy7KpvJ4DR8AJ&BRg|U$<|5`n=d(CV1 z6l+Qkm^iCT56Fr?UV5N8S9&nQ_Pg|eNjnDCIo*twq6-qvw;@ay6>5OulR1u_v= zfFSSgq1~Nx)MKy4s=}5HIX`#QlSwLs$pRMF5i&uI(y2_-9crddQUuvyg7<(K^9x<+ zI8|t(>T9&dIM<0?nZ6dxGFx3GW0?j1N7MIww`7uEoqn?XU`=}R;M=3?UZ%w;A_Rhh`G+cMTn4JFtBDLF;P zN-Js-hgmxv$47JEbN%uD35!E244|+BWoWU=T5!6Dyagu-vU!96T*rV;hp^Vu$I#@h z;EFpcDk~YIt2`lFP47 z@QsKe(ul#ixYQsorxyuK_YYD-DTxFduSGb>Uz#qgkadJfHsT*fK6HW#B%!T1F6wk+ z9zjTabOhl}le7Wziav(JOxS1)Z3pcX;!aSr6TJTyTW9l9)#iRNF|61F=mdB*oxz%n zAFacQ=@YC2W)&fRZb}l3{+tLOt!R*5wInqG1u7ls_$3iF zf=aCxHsA`Cblf5rU`n?k-84ao*dvi!BtO$x%1ZyxlnlZ~l2&8=nTTGl1V(!*Kx42> z>F7#c>_kr2qQmGaK#fJ?F8~_>8&S-nd4OTHJ+nLt-V%jwWS=_4k?zyhIYamXZrOnd z2coCwv?1USZBo1Tt@Y@qhW1J5d3~_Pp?$X0-AC13(0Y z3{*kzrEU}0UPQalCKasJUebz*;ly%!oU%7Y90?}Le++W1xFT2%NRks z(Kn5fz4-w;X5;u%ih;0$#QmI+wL!Ey8NS>J99uA`G$qHM7OuSfKt_J<&C|bQdHiiO zPe{J-yAhz{GlG{nRupdnF=vmMd7W%*@}C#T4{P8j6#fa%dNO*M@?$de%(m3 zg{liO>Q4r=neKxITBf_}Lf-8RZtgx%wcs7cDW zQK3-;4_K<15zHyI-5(FIF_1_!f{%$wT-<}sqUdi8*LzZs4BPKqsc9Y(F*5Uf#5f(18gYZqR+mbzsqL=JBD3DgxwJRPs5_|}V>XOfq3^ZKW{6PY z$Gx2IlL$d#6HgKi{7X$ruQm9#H=*l!OJV^J8wcFZ6 zRMRF@hn>aCq>gH$^TMKe9Xc#78_+A!K%6uND!vo_%+y9I_X_`@P(p8<=896%Qkwl9 zn~n5pR5yt~DVKF(0TO{>*i1E^#O}p}Jobobja$SF()s01q18bWG^43WgutU?19!>C zf%_YESs&c&E|L@Z<=;)_r8eYu>+-uzuIv^|+u7?NTBKYxoGIW&d6ub-Wy5MnRbv}1 z__3%0U^Ro=(WOpTnjTr>%ym#B^P73V z7!}~w=U zmUn%Qsc65)h7)qJHAqv{?$L9Lsh~$rKK=FBvQtr z;QWE5M)Ce^aZ?F&jnLHy3N-Wn0$vU96K)2jDc8gi?Wi5K#9p(?pxD-wX({CrwP;lY zcS;k;dh5TEzNb;rpAYfroFLgGhE5|DVH37$|YAk<&x-Wo+6ylQz$7G zp%oNwQByxSO^2;+B(=0Ndu<=RgEF5FSnf5`ZW+5FxYzL%TT8@*UyG$l7TyJ%Kb*h6 z94zgS-3BuoY&X5&8*X~xf~=vL%x4%wD?Vy> z+s!83_$8X$0nQI3h=)8+r|Rp_rgPbVs$C6noHv(A^yOSOp)cohV|T3(v85Xu=jnEJ zxwV_~dng`)1mR+Cjtyy!VIFmJ>;=6kwZ!I_rj(yBJYbGp z!}1o~mJVvZpxznSu^qvL2MuoebkpJn0q(Z6j~lr4ZE61wl$jy2`vi&&zK#>FQhRj0 z&Lc*zxmG2I_+*W4SR3Nis=v<7xNe%O>sQdg~!J&XpI!-Wcfwf(wR=KE{?KuQ`gnC!s=RLII+SxH<5=t z{A8fVPZnb$z&eRQR^lWBOPK_fOq;t{5!SFLE5hRS zWJOrDd=VqR1r0sPf@MOGl3)Go#^iabMaPtd(79;@*^eQfR@rz&msk1y3wAuD*aoMO z@Pf^1=^L^;Y~V4HEjk3vgn#0A6~*$^OnPNpuD84dtg%&X9;)c!qJPe-u9%g#2P$1H z0!z0uU?ux{6`KLeUQL@B0gGg|2($rj*gBLx!l(P?JT{-}9M(H}u;NtxYAO}1%F|4U z2vrxL1;fA~4KJ!(Og~^x7r`iAflPT97fyjz*v;LNp!W`mxq0KT5Qw5VVTX+tmcyuI zaZVq=?2|4>|Kqlc5-7ca*z$Ajq#!Dmd#M-$#twc#iP}zZhj};|RLBgYcV{wWy8B)& zJ$RI-BCe>G!_sjl+30g!ZW;$i^^m(<9}t%vYEfxwVCI=#iW}ZhE9A zDiII8MDmK(&ppBhot)B+2Ua}4&zO$mK^ekb($p$bu;T zp-R*Q`*-EtV*hS4rn)}REWjp>!;$WxQGFumlGqFQ0QAbP&Anv0)YE^)Y@$g4P!mRO z@DjQMyPIul4HxHf zuy<-_B6j`|_Mlaf9?C|xh3j=Mu9yNrGg^QGQEuxp-3Miu-dC}A2`NA?*+kZ@7?AB^ zMkKe@|0fz^Yavw~JP>Vb3cjd_7{)u$9VS4lsA9#96;Q(P1{mxr?rreD;fxx%BR>EHTCrjZ>< zAJ<$h)&270Y`w(c;lz08Los+GXVMN#!d&Sx?0BKXOv#bqFbBGOZzbLV9k zWu4%M#ns@uA7fi&u_D`64GO8q-^cL|ErRHknM2WxOd2TWe07}h00>b6#W-ogCsK_0 zg!-aC!|-R77C&d!+2^*ps?^7xs8m&Xohs>!vhuM0uoh*ZX11~xz{=`H-#B3_2n%oF zwVmJ|Xy#xi(!$CF@CR6j={rz{r)$!Gfsmrzt2=)hIxojfcrS|)YT`w689oN^oU^st zRE{;Laukzv3OXMTlxejzO+}~TZ}dQFC;1ZEaU)VXOQ$F=MbyVNWwv6c%`LtG=Ir=D zMG7|~09X(kPIv)t6}%-HFbYWD<4z8q#Kvh#3yB=e=k&l|5MNuDUnEEyarObrQmM(_ zh^|uhkm zg-8ce1xs==3Zv_l=v^y;&qu*QYi7az_vO$CV&^1%NkmepWAq&z6U^#x5NVUeTek|n zbE?-C)d@$@%&68hQ3gPSo%d8c&s2f@cpqcLs>Pn&EUAdQ$?*0Uq2jf>D53urnhY3V z^@iZ*^Ro-Mb<&fr-w0p9OwbGv=;rx_%h}sb&r^4*@v?$m!Rwa>Ae5hvmD) z&Il2@KU=A^wbJ`Il&VK_))K$b9*3K#+AW_FdqoSReA9*SHFn1AU9qa>CK!k)G!2Z! zs&bOMZDsY)d$3su0IJG9AuN-5Y;y~#qM-Cn#Bm#68dC@2cm$QQePxv8duUry?%`F= zmBM*O`O$85zU@=UIuk~?DgWm}D>cI`(05*NrPn^Y<qUtn?FA5%AyD>Q`M=;yA-nfx-uTD(BEZQh4;(DlgR-MJVRe1PISGEMgfZghvF# z#Q{cg16eMv!F4GF&0Vn`V_qglgz3!ZVJB7RX6;QsjP3*vv6e+Iw}QQcwa-b^ zVkg3({UnM+618|s=^2cNXJ%flr{lt_;jor*KZjh+R|(!`w2PZazZeueJuJj+Xp9M@ zX1J|pay)el(-OB4r%g~c`rMSSWk%e7Et62qx=Q$kEmQ`muSs*U7-aElH78G8VrS)? z<`J;ik&2C(zq<8GRwHa%dq$yoJiGf7#rfgF>jwyfbPGL)b9|5cq6hGKXIW3AL=BX9 z!|eZ*lF5bLZpYzS8xks%qLG4qv_OheY;INdYH`zY2&acx^>4>G2+7KAmQS`Ajmt&c zoDcB4R>ZcB^ z19`wcuRB1MIhF73wBZ+!H4iJ06N0bozk!&XwKeJ5ns6X1Nh=>%kF-B+r*;mWT7`3I zuKI>I6ixrttYOZprr~vJ*dGM!4_#o4NO03@7(E0l)qmfW{;m%2!-<)>-5)(@D7ibS zSf4cNT1j%^qUyVhmfAAg!0a9f-7Xv1-uWdWt1GRJq*bl=67ooXYE(dqC#;rMaqgAK z_+ss|`IMdS~J`lOAx&?0l#9g8X2viBf zdQg#hov&RS3sS&tlJmZ>Qqz%}+7m+eEqNo=(h+3E^t*;zu#k`L+3IN{qOBe@tbHE81TIV|^VerTDn~Ti+-cVgM)5&&6Kt&WPJ~kLL81*( ze>!x}U@Z)SiV37{&$WTx;7h7la^HAhDj0g|1_OPh#t~&fK!B6>QJSLc!Ll!LAh$#y zN?)wH(lh6BEr#k!Pw_h+*b~{04V{yS&f4j)Ct!Yvx`jQne&AqlJBwFn`%CL|iIv~k z#@wr(wvLs+?ZbUkT+KEHAMu%b*t}4|LeuNk#7zUoap6P3Dh=(m$C0Xfpkv9_?A(@y7ro5j%fM$6Z3iO)#DX zT1=$h*iUhCAO1VX72F0&8Mtf@&c;)`i|KFkDP|@~zA|t#XE1odc-Q8?0FLXOf7Hp( zro>{Csa>rNaKmM^YcdQ%hxQ%#;48aYsEl^~%4pXBNSDoy)08cn9bB7ic4R!O$NiQD zP}%H|_$-@U>z25hXS2f_P{$NteYb3OEWW^^VbzTVTrwwa2-F!U+%X4a3)2~}*hBX$ z5V2bFX@!8NiQ1cW1V2IB4&dw#%p|iD&8S|WQho+&!G5EUmpn^5^xFCf{1Wx-hcnmm z7cxK&!w)TOY?aPH1Sn~rhw?`(Wl*!#iUM%l7YA*o09l^&-FoG9*N3J;@u@3UrYB|? zPld(Q-JvR%Fr2k|(Yo3g3$FeI8Idh<7MqKInA1yTDQ&3O<1P!#r3hkZ_Wr3vxVD(( z^c?+xkPhk)vy{Gu5C|WtTj@A}Jh`>#47}o6w0$Iq;q;A>VsanY5RgaYv{RlD#FZ#% zE7?e2&&k9@Vah4}J#sK@yTI92suL~&0RW1Dvs8lEg3kq`%>AUP7qnEEeZW@~JwB}6 zaYd4!{Bl|{746`cJBhL-!&3SXd4vG_?+olI=bio(YTpCR30P9Lw9^xTa<(bMK10RECs9?&2049;-TE63* z49598DnbjhP8D9D7uw{XfF51GC zb`Z3ff@MZvwU0isGT|Hz+=uiuh&xWarZ6X%T*Ph`O^-4x0Ns>?t3{T1D1K|+zgkO; zz4F^9kcyagYRy`#YQbu38$=oc$g6)s$1PHzRR0FGx1jz>N)n^16kcG%@>3C%aVz8t0$IdWZe+WvB|*&>(*`wrX{k632Kci@Fh(&2;N7lR(|9-0y8H9DfW8$% zAE#m1Q5ZrZV&q8l(UlHqUOheTucJaClYco98#n`n%eBEA&74n%q)l>F)>}0vT8qRN#MEu1j z#pVZ)TkfmZ+E>goHd(6~d)sZgukS{0&ewOLOC+|#wq?JR{$ku2OUbb!VUkAgwa;C( z1i<3@L@4&c%F=&pHdGsl9SPyDJE>9=vO?$*9Lr*jqQ}j3oEfPsxKCE6x`1;Vml~nv zTqe3pt0Fi_A1kO}AYNs79HJ_iBmR{98#R$aVYBOQO^8HXCLKSWw4V0oI{suO^nOb` z=mvQSu9&|$+V#O8tW2(-Y<)e_Y(J_5^Z4swz=$#~d9^-7qLl_Sgp+p04We%a;)~5x zu|pN+zwIE0w4(;eN5O~~%hjs0F~bFW(4x3tQ&z>5)3C{5a`wvZp-g}VT+3B|NQCpD>E~+yR*I}y_o-O`vs<=T}=kv#g z8+!>jj{wOYo3!U!$uRVE^==LOzBEY*dqZIOVyI?;%y8!Rg9uw-PA%gr;_W2LVc^0h z7>ZooI-oO{YJlaUGwnd8VaYt^7!Agx#9(Ad#2*bkp^oHPVcslSF&FEf`p3a5Zihn$aSLeL*iVtj?BnB^&jDYA& z#aUh~ru^&^jFJgXA{l)H2(plQ8JI_N7DJXkT6Mr${F?)KibrJO=>hD$n2#$G22tdg z`0)u8#RKU^KYTaw7@f3hG zQ(#VClKxEGNd`6G?=U((O-hqz6&e$S9EMNTx)Z>0#-L%yp}uQlgT)cw*?%=ygbN$< zTCz&{1D)jduY-qp)r$!HgiJ_gb^))K^(@|+ah%NTc4BZGZf(1Spu#k@RP1KyL#cEQ zYG$SG$MGC~M6P`;na%Vp1o;9-o9Do9C!A-0yo=|#+V$cfz_Fyei;kAJV-mu#t)_^^l!a}=|R%b@<+O)DG9*=E%m3}L@bvw*>Fbpc8G!WW+ zq<_wpkrIK)J@gy|-wyLTn!`+2jx^#m(Z_5P^~iG*;7U9hk3<)rt5d>IdMra}`jeh^PGO=6uxrRiz@O4Ice5vWmEkIvxTm#;@>E#&{X!GsMJoKna{m}`<5jzA9d zKY&=t z0@{*RlRh1!PtU7SvQDwGvhmOa^m^cc-_vw*6q#VD5#g6@S=npdLo zC{3#g%iK%lZJjw+L9m#w{ZNbqn!j_>Qv6t155ibm1P6)@Ri>RY3Jg;br2qF{WBFWG z)`Cd;aYglXUSwJj*&%I5PjX@Q@|ou2oY9qb@I*FvC5ovFo@eNeCt~d&C07|;MeiE* z-EsSc=LdhbsgWf+Vx!M;g!1YvyDxf$7K2nPXY&DMzb5H; z##Gd3cTD*sy~#E#T4DOJ;ALYE$EzOR96xBM>R<(UQFuuE!Xu6sviXz+43Qe@bvlcO zND26S8gEnUWaE8A+Ca@%bmm9Z&trK%z2f|6-qGXF*N&zq@=jd{(yw8JkdEhXjn$ug z{&q^*kJ14T&zt2cBf+Kew8VCq;BOLl`QwZfrx-4|qbXE`BWsX1PCp+ZMK1QgQ?Us*Fd|pMU@0CU+)hpdoN%ab# zN9`5p=}~(H&<*zr^mM~lu}F!DJa3k4bKf!7jDA9ghDZ;aMDd88az+~IZr$B#T!-X~ z5#hb5qZD+L6_u}D~X-X5D}Ieb6Q{YmQlZGO0-$V>Vv6k zST3%^@k&gvDCkJsu&uiS#f{|B=f-Bu7EP3yD@oY9>(*qY8RZe&h{`7GnHCB&WJzF@ zBsgDS)UK}Ys)n=oy%odR>Z^#1AX|o@Tx4{HvrJ_CevD<-4W+p`I+`db!VKz7ouM%U zoU#oa6ugJb*i(ZfFuh>Ia;y50r(kS!n)^c-8%g(!YE6xdG0G!C-fi1EU5-7~xRdUj zq#N$=jy~Nfg$vdXXn_+B3PkXOLi9c%TE{gPER?xy{sTv}sCQpmjv3cKQA?BSX7QMY9}5Q@xm1Tn{qf+NV}qK+Ez6FGuh?GsGF&U#ucdYa(d zxQHjn^%PY|yvs2O!{(?m7Nl1#<_fY7s)8ZD9Op~t3Nm>$R}l7_1y_&_-W6n-TtU{c zRup__>>P@#`_Np?YXm3dqSpxEEBoL=n`Y_AHTaYnFYWXvI30btBUvKrB`14k$$#u{ zHcDCoQ!?zaVmQ;vES?{vJM_jCRMN_Y=xmjLBG(Q1_BdPRWelA9(AkX5$f*;+vQO#$ zq?{HIs!dFvDEL%#89R0+Y*FM0k*5s=QgUAQj59~@$TXiPUXVFGH`=_`KgRwm(*RCH zI`VmgZw(}227Kr*+1TnVxi}o7g`RJa;v;4WD#qn4~D@z4^Greek&+tU> zrFIq=q;j#E$k_aSf@tm#m$FvU^FjI*HY#JP?m`te07HfImAU+k8^Zn7vH@Usn-s|D zRP9OdF|7@i!^fQPvCkI6B0lEk<70pA8RO$X)=K*2ApKH?kA1Z=J`O(PhKNqJI1sjb zS&MF*(+hHbDGl?XXVjWl5;hNd(7~3+b~9_wS*3e6>4% zAq{1Y4v#$JHr$Mu43E~(wP8bk<`Y$Lzm}DCRP5*nQC+&<&SDoAV&Y=zHa>KWlQ>DQ zm|2KT?mk`95+zQtKG?vt@Uw%_pO6QenLs)6Vpsa6U7DGGPM$P-&;EwFy#hSW1U)`< z${CMsG@d5oGX5*|zU-M@u~nqkVSGXGO={t7<}I{(zk+i-xQvsYbrR9UX7t{=PA50# zMXZ1^=VE))?ZIaaQhWqcn&m7efH^=juyr}|m<_h)xb%ZVkvVw?Me^>QmLU``flE?n zA>03kexvoc!f(k$Q~KSfjc9^cHm6eFwthpfSMSh?)DV6z>&;#ckoDMeHdS6BxijXO z!y*P%&+UIu18ysoL`@-4a>ac5bM{d|^5Cv?&q5l+NUC#HLH-X4B%?tm{T^{ND0`I8TS22$;ZXhyzxIsqJ_gV>3|ZXwY?f z$!v>|4Lg^N^IvW{7dJ?yXj?qxY>U+WgQFpHmvigUr6&6zXY|C%z>4Iwbmn^Jv~<`! z&7<^R0WP!WnS2W*@e-|GJ%8JO&GwZ#mV=epaa-0bW_@fN9Jl7o7iy~K3#9-C%lVl3 zLftFcW4un)lBK56KmGr)_x5piT~~egx#zyk+&eEv8qG-Z$nm+?jw25HZEyP3)Q3$87GB4jy1LiXF?P@X#`Oc5#}KR4?%_jR5}DF3`9w%L?Kg9 zh?)o(f}rB_2pB+Zha$)Q{?^{-<<5*ODUogdc&v}-o_o(e`>eh8+Usqvy_U}6a(sBN zo!${Q;|Ro21W1gFpaw1#+JApeimv+$f|(dSJ6U_0I2eg2^GWXDlqwvmpB(0K<+!zZ zLXmX`wM5p6Lg>O_(whhS5Ps10e69AhaDgMB-sRTJ^MxZ2G3m&d_1%mIb7UL>8D716 zuE{sY#TaFJ3C^cpnyDD?WO1lfksW$%jVrqkEJ06_r)z z#5mRM&db3e*l)}&rWxLrdUT$1oM(~m$+<4oZ*|uBLO1m%arpp!ZZ@3(UWfe`7+jfj z_=LLUcGH#xrw^T8Y(vn!E~bc&9u7(rqj*b-h4yBga@WdmOU=0z2As%~XEEWF&YrRMOKZsUYr+38?9zFDfhW>5`*9MfxS)fM zSh9VMe<0LmKu%;pD#RLE8E^Rbu>HX4XK{WLRguYrK@f&bdIZES&4kC2UxR6K1{u0B;OC&?n;l_mpuYsk9(|C z*QV{|DSg|tJ)CJ!H#RFAZ2Ph&b&Fk>2L7xQTqh?Q>`*C?Z^O#X!eeSoG{Br163Az! zA{*hDiaq`ami9u`h~z8OkDG z4uH_k$d`lvP&mG0O9vXuqgf^ea%VKlp+*+X0vY7dCFpR9E^%(8Vwem5rTj*v@C5dC zc0)z&vW+}8__7=1^2mOFr~jEFt8Rnug>(WSyl>My1$N-4p%9yM9{uQJ!l)0{@V=!M=^H#(-k%V$1nR^FN@js z4_Yradx4)iA^>Vuq_nU@UsDSC;ubx5GHoOF?b^r1&aiGelR-SoGthG_ArU=2W>E%I zjk6!JJ>T>dKSW!dd#|YyDl62&07#U!zf?@z2W6Am_m=D`!HAp8$SYhF)2hX zPYUsNZs$oM-r2+_9X@Eg(%P^gD7Bx&HKbNv~I#fJY4puN>PTWX~x$n@~)N~V2lItRu>nmX>3G4 zZFTpiOK!$FPQO-^)1#=BuK3p6IjDR(Drs7eZ+#;AQA;B?%dunH`nAXr5JO8=%am;x zo;ga$&HD$nSe1;9&B>B{k2aSS$=RioT!Z?7HL`=-A}d)tJil3g zuIsdL#BEEW*+#3Y?0|1m-W!`o^hYs6h2jvO9euRKd>ieu*^?aVJ?^e2V3Kq}S+qYTO1K*co%zRLTftmxU@cvu3m*>>q zt(0Rmr!dtS$ZsTA?L8jWlMYtig8uepWj1>SYJq!$c^F;VRmRemPT*6Uk!sVbBjoM0 zys3^*vh(<(*vyvNtWagsU^q{U_%>BXFoAGJV+I$7pyjwBnG(1`nIWfm1ErHh3dHq+ z1EoxGu-NfWsA^4bt%t-Aj1qr&9^SEO7(+K->T-vlAftBNFs?Eu*ZYf=v6`uN8LLm} zF4uXrzy*HsDRV^sDy{qC_?qYwM*e3ym;=$E5a#Jp>so+5VF?M8M3FBiU5YqNk=^a> z9PeyjA78n8p4ZM^OJcq{}CM^@Bx=LFXYndHrSR=dU zG5}c3&;lNzS~5*H*+FXq=P2 zO6FOnJ5Y~)XYcac@0*6>Yu#2XfS++;3}F3emP~*zv*py2q^&s{ebzk_RB83$C2;aF z=dSPyket4a>q&xrl(~rbWL|al(q82_-6F4&YpLH0>$MsRepnk2B0!v9JR+Io$GXc_ zK?G`3`>VY3P}Tf?7dt$$h0Wn)ReT(E0oK_xj+u-xd4uV`DNSO8I3L&GgjLjlL{FU) zQaun8J|*WM%BJX2-X8aG99fCG21(YV9AfOo|D?WSRP56y`|sm?iYHF!ezp&PPumOi*4dOqzvcDn+=q@pIZad}+wFu0Ujyvz?AM)+%WYvvlOr zI&xfMx=N6+mJ?kP@ltflU+my)EwczA7w1YB0nZrHUJk!pgJi?%1;D2OP@?c+J0|G6 z>q!&)(OB!&q|}O$s>7-=jdGMOkVNm$@^J4s$MSIbU`|op=?YpBvtsk9lB(=l>+PGI zhOq&8+-NB`AaW0QsTWG|Z0RpYIdT}rq^8;J9+<6iLOf_9sV^3G_TxRTL9&xu>5;WD zxpda#>F>4NE|OhTwdGD23mOEROB_vHgWc6p{tNAIDc&^fHMn6V1H`@h4F3gttR^7h z!iLv~_CqvVgZZklJOE^`t{VL=$sOx}D>xs8lng9idzde9F^mgo<8b<;xZ^DM@IbuL zt3i=l#p0#~XRp0sV>G={$)+;)XQmRksU^pOs7@b_!uitNCQfv|7?}<94dmo@7+gYTKG7>5+qIwNNg61f!$ssA;jvo_6VOQZ6VLB$s#e zme4zu*vgc`zX*0&8fh0IYXYmGjo;j>kPX^Qd}X4tR85kGlJk;xRF*8q8;0X;Bk{Jl zdxV4zyDfdwvvh)GD1c1ea!GiUP1faNSI#1Sqibnq*UH$mYmytI(Gpr7Q0fv2+(hll zE0fBDuEHgiEUm)jONNaOvnTM~k=t=Zy(qs~s-%UTY8>hwwC?g6zvM#A;P{X|RlXk- zU|DWeVm(}0OLq1k;)GYkoSA?rT&d#`G)>inmR8w%n@&>M$w(pDr9z9ZXYYUg?snyF zeiDSBC~R4B!%BR^UG^iS!jHP=k{-IXYVfy}JX^^(%^=k@fKhABLl*LRlN>Xz@{Soe zCmOgMGiyHd4fV>xxMWeK}=X&BMvFAqCu;o z7$yT6GY4)o)^$MX9>G*{h*us|m!_&IqoqkYKL)ryLJi%_Y_!Sl~SBr1FB_M-{6UF-7m^GrehF-D;%Y|9F zItgTf_Qx;EY|_xINNClLi+C$(6$50My|f_PB(p)dk_A#uJ~e^8q}QoMB`a~_hnv7YR0;X(IXkyAf zt?z7l9o8qbHl?uB_Jx%NT&2EZ$0yThBthCgL6zJ?^e~Y6AT4XwX5lilL#W&cx z-fVyE;FFh>L#zyG<@r$(w8Gu3%}@Xt0a-JV+04bRhaslqQ!OPo>$! zeZYm*W1l3)h-Kp`lASj&K=yit>>z=>OtA%%%oRZ#^zgCEJqQkSl1Lp9eqsP}t`_}5 z73E!rGr(9=kf4fI1xXdm0*Q)r6(O-sz)#+-3H7>m90ucI!P6CAZ&a{l6d-)DRvaA=2*F7Doi44RlHjIF-tPRJb*y zxzvdkZ!j{%m&uznfL;GFWt`O)%sSp{i^MYjMhDG!Kg&z)_J!3!8+lC#yxMihdXr^x zH7Rx2W=;pqt1h}P;8{wcieDYSGJdJsr$k>r=df`peUW&j#>y`_<}+IB*r2=kju;?~ zCv17_uRUN}k7!XZ@yKJuFOOgG&5oGPOW^Bk%F#(_Ha}Brf8G*|Nj%`H8WrCMxBMMN z9D@`d_{nbu!A5T5^x>vm#>%?*rQTTCSHIMmXQc&qzzxBS;Bz(&zD>xEa7sP`u-|CG z8)-1XYsn|;ZJPacEjSUYwV$Y)j!BHN6_IN~PxjD(j)xA`#UJyh4(KT}gfi=9T} z9DnqxlH#A)jQMP-BU`?u9_?}aFp`aS9!}};S$|}~$nY4rT-tQb4wWhzkzQYoRkV}J zX3`&4l!&rYUTJZ+v>awk*#f^)rFXSUdbq;FE|+OwIwXy7=mz$JxNMwG-&H5WDhvE{ zi45^*W`Wxgk|=gn3@h$aNT7OxZN6B!gbhvPL#b6xr8OhPo)q#>wtj@Pa@7)*%F2Oe zE=)eIJaZ+!aoya-pWh_;9I`x2rer0dqN|_-rAdwqCgnYl24BI4+W{guzG3{^Gdxp! zgOGJ0Otok$#DIY}EL$YHs*Rmx$#+&8v%06ts*Qc7l$C_2T7NaKcTgC@&YPnAIdBAg z7}%CN?^6W|>ULD_h8ROefqvcMQGueW7YTX9%K?SjBt_8+RU+5mbLCB_?$Xn)Gjzq> zwrb#&k>L732e>6r;VOiWs+?iTfDbM>piovOd6k7}!FH-PVR60r8)2!}7w-dpe&=Om z*N^@R-oVGGrTwnB8g}fM2|^K7p=_6oH5b&y2Sor>%&&(Z*C7FvB*dh&w|D{r^8pP$Rpab>$uwtOPO`~4?s?+dH}94^CCxaaX#-`$NI&N*{QQa27U)shd`}nV0tPU(sr%_{}Fu&@d=h& zI9gb+ZaaXLq|1CaAZR{Sa%G`>p^_a|eZ#~VWa;ab)UtXdB2`$5uSP;w;*h*IEtp}< z%#|n6V$yVHCUiA3q1%ox*bEr+0GXQWiz-ecOoWamE6tz%QYq})TUO25#r0Ne!UnDF zTT;yTX2|O8L{>|c0igI^44QO5S;kZs{c4A8gHC+u7@8^-`c>MsHF$|hzH2o|U2P3s z%pP+M8v%y_CQ?7Ej32KtHH}McR-GDxx*N{AoU?1U2X}0UU$Z^9E*j4bVe%}f3FQkG zhT22h%acPaqON3eh%LP2OP|2DEMZ%^(t)r6a;>|1#a+q5(C%nkey1zu=|%cV7J0Uu z{GOiOSbP@wk9x8`?vnAfq_?tLbylD3;OR;Blq~)CqyW{9m6kfTDI5X$C!5s4VKyl= zc9}=}K0!QOg!tk|`-Gkii_c!((K6jO+B2Z<66W@_nyWO$dd=;Oo7*TOE2b>b0p1QMoyH*o9j|Nq;G0dbgE)rK&Y`+oRc z?;5U*{PTbDFRO2guLy?Lf6ra7|HuF8pLBiat~Ytu zkD5Kb<$rz4P3vwPeAVl2FMZpsuim-+=EnDaU-w(y`t9HS+B^U4yEeUX?e6{;^<91M z+rQ)AZ2YHhi>`U`wJ+Jc<)tsXZtL|oY2!WE-)=<_0K9co_$(07*`$%l`B(W+UufE+XS&y zz(71&U0X#nf!cQP!`8r(wZd>;xopY?@NHf(?Qa9?eE_%ms|#9Fd!k(ra&IdK$@b_cJ#2<9JU| ztI#Erb07JDg}24EH}JFeKQUgSzYPiQGp`{{vnTs{$4ej|-H9`ee-q;2<(vx%zJ}OB z;`78%I~;D+ba`yZtNHwZqY+>0kX0$I-QN2y21SK8HlA5RDprf1g20;fa5&S-l_H@> zu~@9$^}BoCU(yJ0R4N1tw`C8itGO}?D}BG9QfQh~a4xA1qe>&J^@GS&qiHLw`hL+r z3VDgM;ZRHZKuzUUX?$q4#*+$5YT>&v_|;h#R;}?R=b<*f#9lrf5d2ms=KBF4XVFl! zkuq+)b_!ijhMf6cVdo9o5`G<{iMY~msufTXPF3Jv_Kco74OC|Z71rjWpjtAhzT%)FE=o`_sB%?R=?!t;9#pve{{a}K)rRM=cr?ja@Qq2vuH|&%LuKzJ&dpB6{#pvjc z$JoW$pdi2&dyg0}PtvCR7^zF+p|NqBCz2OG6Tf_3}B^%XK8Wru-at4%`^~Nlj(ru_n z2)N-b;MPx)A!1asC?@pHV4`P~A)BerB6IS9+$_poXQHb+GZ?!I2IDFM%c)ag@~Ois z#nlD1sqW0$>o#l8&3vk&7quzMR=x-7i5*_%!%Sg!&`&T=|Xf%plxz=y+8LX0?wug0(QRS&EeR3ownTNI)`dm zR+XN;qHK|ZYE|P-a*PxL&{I7RE9Q^M#l8-`=l&tuYibRB2@x9r<_aM)wLHZA{8jJK zm-JOaL*m9uOs{AQZ&u1CZP@lK!*00VI|j{IyA^GLxw$Il2`ZHNYR*HfgE*dgd2&{2q#$yNR3Z41P|kDK$3w<*9N`*q zKz&Yqf7(i38h@a+rtt^SUdkVq%Nb)3){t4{-L`I>?;Y;PB35uWI7U}7@4HJM(ECHy z&Z3BVRF16UXMDRt8XL8J+NcE!*=@Fs2)nhy_5r_!@B~mQTsO2;p~nn|b{%sv3M|_$ zb64^~RG4{jcLF?6b_==*Tt(c5{(Olt=FLDeOftDondgwB(3-spvA52#&zAs-u}w?C zVM1a-PK}y^oz5dbM8U$gqhgjaka!uwQtk#dV12=s$STOZ6f}m0#?a7M?m%N`Xtb=_ z0d(&okrb8!$>Oig&}i4OBUVFW_5;M)aAW?t6t7V8>UgcDcvWdr1&-wy^@+Wu28)J` zv6=tWZI&nl4t^0~vq;Jpkfc37?A0qIrkh!WqO*2zIpJr1i&%*_dbC}U%9irQ$&){p zJQ?h?D=sKc*!xiNKo&*$Lh_`$=ig}+JHhRR0!TKYz^BOs$7TOa8V@1f=pR~$8kVtU za-Qw#{df8TYi2=ItVfA(*@7leT>nL(;<6oPD$%oc<B1@0T>4EGqrks(qT&%2A`dAHXPw4Yph123dpWArN<$qw~{7y@hy35JYo znc`|{pVO#~YyF`5^CH+U%1oqGyy|T$l7?=WMBs>Vp#Lz9p3PF%%oHGaoL7JZdhdg7 zP)5JJ1{RaD;zc~!D_L=O&x7KgnV^tw)W8?>MS6Q<$pC-A11=$IfXk#xEhK~3S}Z7@ zLIJrJiL3f&PTIkIvT+7}oX9u9Oh6H~5NVSEd9qV-VhgdQotk%TymD`;^y+57dw|Ue3rTV(^cnk&n1s&&qdGW4_t6At;HqJ<(_Y5 zE{WRioJ+8EiF3(}btU6wldYt(ePPjLd=xDoIvWp737)WL*kAXq$o*l_Enscsb36e5 z(o+-YJO{~xZPd9ss$oHKaX-^mqSHy-g5rXrZPuN9p?dK$@$0l06TjSZ;ujtRq_!NJrp1_A zqk~ZtTNM*R+M0VU;~1EHrp1J*w5{?UNn4x3OowqbKvA)r#N`!!$I@w|Fdai+8CL9A z7A&6lShJ=d(TwSQkSQ?6Po26r*DHP=VPAlm{`{(lXu)t>@gl5K-gJU1UY@!-7CR=^IuwPRp`&x^6jUzhGsDzbb*BoMr9`eMQW-}_ z%qkuu6dX3`NyLFRu6EA9Fe)jRdUD3gGR|eGb|n+mrh))Ql6$pCckr?fECKKe zSf&gXa~N<)@j(CfU|Qi7OnzlI%x{TYm_F2j?F<*{knU)-*j1Mm2o-)j+oUI|0T1TE ze)t%l2l{CQ8wgr-Y5c(1VwaDt6%2_YTVp`sg^dbE%C1k!*v?~Dcux1fQFU!20l%%28E>GhabbR~iOLRp)-X$u zOvW1Kte5{sz06xL@)EjUj(7B8O-|&!%olq(VZHo&^|D~SK!v`SQysln6P=>;uq+gN zIc>dQ5P>1E3kMzxBeEts{N;ODEcUWwy^xNQb;v=SQXd_OeBNxvnxV%+H_LlFoHv`b zW)G;D&W&~S_DgxQIcp{zGMUety>YJ8o_NjoMBW+U8@M$=gm? z+wW7`1FN?EwY=>CYx})wJGpAxPvvbVt?j>7+o@IC{(9ba%G&-bwN;L0Pc{X5cq(r@ zZEe3tZIzgEW!q2ZZD*|Qzf{}VRoi|hZ#!#k|ApH6lMZe!QZbtRM&5SL+WvF3^#?_k zwVln|&Rg5JtF1rev8?TH=50?{+wWG}g;nAGt-S4mwf$#m>(3-C>-V#H+tb$eezny( zf|WQsnzvoFwr^8gMfb04`?9x2UZG&{wwouX)=8*7m#9R?*volU2#>36E>^zx?1I-5>g)BDua9o8!_&o6cIVZ&K5_ zujDeutXQFK_zaI-Aa0(|ggjh+9Po{nmqi6kMXCa?a)dT*UkUEz3*;OJ11? z&6D+_(6lBCHg>db0tVz)O9l9Xjroo0<+L@aBn@vHoT(R09&3XBzbNCHUQVOaos}mG z+hB}eWE+g?Q)zugXINo$Sg?3V=vxrw>aU>9PFSboUFf{tP$w0yGt;wExZa54Nm<3- z5T==o_Ej1cbmyK9Gv4&wNwUW>L=bEjeL;yqK$bQ0T0 zXN$qWxm)0zJ`Xr&I(k!6TMU3hMco28)YV0t7&zB*wU~i(Zbb*lC^;m(LQAc+9GuZ^ z&EV@Z+%KO&?m#O%T+VZ4AwTSgEP+lMK0sYK{Ab)_h1&Zn#{FIFz+`uEsjFY?&u(+# z9kRyZig}rGi8=ke=n*}1h@H42yh$FKX?r!lJ; z88_+t%o;-O(y-?oSkeHD$LLQb`Q1`9?6fs|oth~DYDcrL&%OcC{iMXg+de;!`O-T9cd9WTB%;kTqGbCa+SH)2>O9*@imD3$H%YD2in@HIBws z@L4&WvIV>-kJgepNv_~(k@TXD!Jb>uK{86DM#|BA#DdAB(WXf6MuD@>n&7odO+OFL zM6<|>)RdHcO=Cs-{Ub-yDqmhi9B39tphxu3ekAcBU2AVopV>$b@-3NCXUQ;E2b(D? zI=rHzWI7+o6z6C%Ry1uRc_qA=y=NEDB&CF4opk3J@t#FBmC{{ksYSx9-2D)Vj>F&p z12@`}Ax!+h&s>7Wh&P;v^`3Mex>VSQK3|kLFn1+Sc~oXH-(!Ea1N&-9)?J7FqyCZ9 zrcQnJa~(+2BYNm4*gvjous^TQ4Ex9UmYgu`NYE6kF zoM6{M!ye}h-hS;spcNm|C-)}xIi>DQE8fplK`TCtZkjv1)t~yAqMsw`Q*u<#&losg zbDNpdJ?X~uQ!AcMX6;Q*cyjPCquALVt-Y5E4Rh9{l2pB>f2?Ep)&y;SUhfB%Ibb<1 z7#PzjeR6L>pQo)Gk@Rt{3S&C6qCf0~9$UkU?wL%TNO$GWb__%y=+%q7`kcT34NH3E zC}mnp$w`NXr3?*Yy+tJ20LIl4^?+VRR(= z1078W+4oHjSd;73WU`~lVAf>Pnru~*sg5Q?S(7Pia-Et?Ta!w*-*lw6NRev7-cYZh zaK`s$2zQ%xPxTgw(zTxH)jnu4>sqfs_|e{CtyF`7d7UGmUC2Jhr{?>lKKcEG1-j4n zvK4`_@c^)RxdM!?G_;~)8XOo4z0dc)1@8?nyl>HIxZxx>!>wq^!KcNT|Ilw*oPT?E z6$=h-e(@IEX~XAA3+}X$$IFEBMQf6i$Jq|@P!kr5BabEDwB#4}xenTJO_v;PjIxz1 zrc|J$vA!Z%xYnb6HtplC^$JSb*Joi$1np^S)WC&eCi=StA)h85x z-1m7(pZvaHea`iXdXv*xpT}LFk?-?NUy<=Rbe-t4An&t4lyZ4XZgZ z7W>4lU#cZQZZewWOW<5zkwdBp#|8R~LlRKBg0B0E9MZKO)eb90kZZk?uKSBi!C;W2 z=iI8%wa%|f%Ii~7o==_Ca&+whfUdbJ()G$dp|xrC32u-0K9A~?-)F7QDQy5Fw6i{S zQkgz8y6!L1pc~p;|MSgtd2a^|ev2Wf3n|GxXF)OZhReGVO7c2y4;97@wlC{rcoh z6MRDCj>Zm<#KLKmV_X$z>|j)+TkBKw>H9q7`#hpgexJ5JNieT5&t!cba($*W_Ec1) zG4)BDW%PX081KEDH1@|Aq_J<6t<4%fS4#HTNMyFzmbEphB(+o*kBT&AO_=$4%^o

    x1NvU) zuoIGvax5VdbyRcl7B-^AlEBB)6H-`#hi$QyIFx8V>09~Gl}D^Nia*E7_=!u)2}C5| zUm$`tc6Sb*s5SEZ*i}yk14>qA^Fm|GId5;(_2=@cX2BrNYxVNd+O#k{!$+LZR;GU< zFkh#sym4`So?pOyl9Pb>1s-jFIoZ`T@^%lAZE2W=)WR}iFot#y8%XBgNtPr{9_8iR zc#irt5g$CGDN(qN+4? z+A<*(a-X4p=}=%i<0fzlc&^0Z0syC$bX@uLz5z6#@02eiLnaq`NZ5#e=0G_ie&PH06M2y3StnksZA$ zg4a}lIck-KAXa8mzrx-OJosgMz|pO#uu3J_RJL;VAx(gF0^D0SvlH?*wR|3~%w;86 zGG#2$0)~~7@@TCV;gWco4?uIB_zBT&9zcmQMeK1t*xq{A{Z+b03Tlx-{?Kw;JYIW(#6t-$@DXkewM?b4sX$TE{SjE zgZ1CcLrGw6Y_d{}$Mq-(>zBz0P7d83NkL<2qNoWYr_t$j_#I@{ETWT5cf0p8tJ%`d z&hQDMx7Q$g``oKfTE0@jc6-8)4OtBE+~IF7CbBgS5JayCA{sW58uc!$UDsQtqu;)? zHpdesL*VZoQ0VJCDVC|rwEW0pR&k#``3)+^gm1Xt1_;&%172D?$fwDSb)y+>6eOFp z=_n9YxhO!hj>r@NW>(k;`0G5zw*^lQ{nV*$Z|$B>GhprR(kwBf0nsxm*?tw!#kt$s zodiHJ4XJ$3=zW|mq2*rF`~&%xO>Y_VuqqRNeI zsiUd-M%cMSk$$p18ZJ0Tnm)BsN2g9XQ6K-UNQPvFPPwj&uklQd z*M$+ohS`Qe`kp?_eyzAFOS2VMkPn=Qhot7i2Prd=4rx+_y?mjtOl9WgK};LSS9=1l z`q!k_MS&hkIM$=|X)N%J5l*Gt(z1a?5TjGeSV<^4$4zI=4ha%DCMb&}FyNc9<98H| z6hl1%=uM<=X5TY`q??-sX~$IyNIRxde#Vq|!pfFMSOFm-Vmfi-QuK>l=nr{f3=E(J z+Dp9>`s>zMx+WXVt~pPm1r}hiJ&ww4Wz-C-t}3YW=|oLt<3KO^07QD@E8hOXu6+xd zobv>r&=6xI&vNM-3{|ai?;9C~B1UU@zyZwRd*2>aQd2^z%8ca3fPkmh%4o5=OAYv1 z>=XTa->ieV36heJ34Bz? zYRO0F7EMwzW?(@0AG}-vP2GU-4;=^>Ak1tP2H;MbLin%TEi)jzU$1i4Lg4tgMjW(T zbuD&;N8H)q!vD?v9MA*xZ&Ag~E_2^x$^)~EnynX~<-YL0V;Iet%n0*lvgb~HBK^Yz z!owT?S2)7J!Z9ja7kF2CBWhT3efm)`5LzN*=}c@Fx++BYJ$x?&_Ik%|YS_Y@}pu zK1hRzFwHPbOo5exuhNX+g-pbgc~~1qI>mHSsA@8@iZVkct-Kiqf1H zvDHyn8B8BxQ0TLZLFq;?E26Q;xX6WOe45?wORaixXY;kmZzS8vrgyjzY-IANLD-h9 zvj^~wysP6dLP>rYb!tm@bvWbFjl?|k{{dncVF)ajB>w`8Fi)z@Crw2=a976>O$WnT zc+-p1cdvp+kJIl>@dt%9_EwUZf3&2X2WSX}nVrpRS5Cbmn(u3{pH?4FTT9q(`WI^+ z)$^mpbAr|dG}aE``Pn2o>|Xj?_6a+`_ebTIpsg@vYd!7A_9^{9--l?otC(|gPIgej;e*R*MSA@9F2 zUEF{&98(m}O-*i3t%@0)Ko7i?C`Q`~yi4bO7f__z9NWl>+J#;;^$XDKO6e)$mS+ZD|V%_Y=}FzBD0aEM~p$l*+ z_o!j(;Vm{p46Np&mwsP2KUnhcHOY;9MG4Fcn`5DpXBNfO?zO2=%mX?4Geelv%xM4G za&np)4-OGAwv%uAKF{}AzR7gvNmxmSgw}tz-s`lHwnyza4?L%mhs<<#nu%K8+aj|l zrZagOAvlr87p($FoHH&ynCUcxATi)wF}1>kgAS$3=1qF*l;yhL%=22n`s6T4UBEmga@ zplv$7*}Z}`3vx|gVl{3s&cQ70E}S-l*?^TWd2!Z?-O;rVOA~E68+VH$;3BXNv_OLN z*!IRQMKBP@bqH1gZF9KS%W1v&bnfT5Wil`P7%_wdXGY?sthe0}wgYNII8gGij)eTu z!hHBHU&hDm`-SNFjTWN+U}^1)h<^}Nl-ha0q;n2{ig6C5!^Ura7{+gTe)`W~{BYP< zQWH?+_SBwzb5-X*mV^i2A;{y&Lm~N^OtEOoqb$Y|@{>!#brKGwI3Bmx14y){wRC7@ zbqFGGI&@>4;C3sRn-DHX((fjDP1j@sTeX-snKBJ-G>KAbtX>_V+-w#pYJ>XbEZXu2 zySsz7&4nhDpG_=T)w2`tirs4p1B8uReE^jL-Nbxw`PsjJu_oNla3xuy-n7Cpx0ESL z*i?C1Qh4oI7N*4!Tp$%W|C| ztC#u5YN~J=6XA7LU%e&$mQ}CZu-mUb&FIC>m<{7IQIzPk;{hfEhVz5Y+i6{T4VFR{ z4L_!6j95tAwf&(cF05eT9*y)b&(j7J>&a8_g8VoV%qb}6XzR->(OefLq#I+tm{|2( z-r2qr7)`Vobyh(T^h!7##dUC;JxWlnU>UdSrS}PpaIn@T!xWd>ROdv!xE)7s{cUkX z!d^0Xuoad(r=I?^ei6|pg;j%JY&|V95|J*rvh?pJdt9eg1e2idnP&g;x!-?4ZWz`O zbfBtA($9CI!{H_HvQ$( zK>FfUGz+V^oY!=6jelJ1ALR4PQGs7ik*0N$@y*;$!Lp>lWOa0-ZQC%XPqxh}=h!an zX4~1xtbI{*Qy)ia#;S;cviP=lhVgEg$g&1;r=OawsT|N-GHU z^9JS9q}YXAZcA-TA!=6u+Hsx+dYDqi$D;MDDp3OK*)znY0Y48`U@s?iH7|(zc9hl5 z=L}tL;qVY?4~VsUa@o$`|M~ZS{y%kLaN4bLm_55Nf=;GiQifZvT}c^`51aG%N$cGbBTIIyzgk7|$;nQaM5( zHcR0zq-a)AMj#B!Fr{{uz@Wn|)JA1hi>#VK&V<>54#&maMulWq2uu=fILl_$chPpx z^p5~2pZ`ei*{Jw}{XXNV9Sram!o||8^$_dRxSfoD7~Yv9mg#kUs0!jc@IAdAE1U$h z9^j{rW0{16qo{9}lA4T;gl7lI9ScZj#oirf<}gqy_^A^(M_T{Mypa(-)uCm5Z-wyhQI_7#DZH>4Jz4Wg2O1!?{&P z3(ygb=UK@8%Na8reW&xaByRLCsB$k6ry(^KM9jJ^9rxGUrfAxx6%gi^8h}5r5Ur~F z$ZDR!ZngwN+79;tndYgqD+Wm1_nCDbf(8%55Imsa1^A4%4knwmCcb#R01U+211i*Q zBp9;2MM4RCIvvl*vU{DL6U0(FQwV(Ur2qmfP=R-77I3o=xYXevKL_*m>k0&s%78=S z0zNI>$S-VlsC0x;Jzfa|c7QO4CNu+vWSaqDaRV^TZtEkrViMEwmBIZ4fSkd_pT@n3 zJNTvnTTeSsX8659g=R_^S%8VgOMpr1IEoCeQB53=2rN|OD*$dmn+>h7tA3f-9#P<8swfv8(zdu>aAOBr{ zA7qw^{6BvczVsFOE`3~(f8#5#rzk!H+nVI>dMl2<%kz8rNI$u%b387%0es}0+r#U7 zM?fmsR=ZF33}g;)|315$;2$`Slx5aYCc+rYPrd4hWB%c!He z4b@lJRLeXjEp9E5bGf<^TdPQTD2V&ZTI<6=G$bF~bQsbG4M}sDh7sdq9Vj4nu7if;CyAuvp|^e3 z!k9N274rvcE=~$_?dq()QN(O~Jx2ti*!_NO(N){g10JciMd|U-@sV?%=pJ)YI}K<>tNqBr)2HO?xnux3LkB{rnI4Q^DS0NaVA%eD>oNoHLz&XUARDxsT=l@Ie$F zi}Ry=l)(r~^S9||fvL{R-(UTi2Dy^8CAx zRIu((=O28d{|s{au}|+w^OO8Z-(K$d()@#eu_MjT&ELs~<{e<6n}7Ec+Qs|}eCYWD zK|ls&y8cJ%C}T`4O8|_6$$+M%N`H@`@w%JXG-JtnMuW&0!uMAKy|c|*Zv&vAOW;*K zI6Zc2emZ((c-SUw8?TMr7>zMbzz`rl_d-Ms;Ui3QV%GZ+i%WSx<0lhXwA=F2cWsAX z$tVjy*%1FA9cB6?!liBsElFF@Gcp*YVD60!s(@Zw5gG%%o=51GH-2vqZ&%C8$JFVys* zMj!VC$XT9$@DD6r=ly)7CrT``B!9!FIp5>?zZ|;Hp4+nhRW1L>BYtj6WVP=R=6zxQ zPs8i+d4K*sKC~Xsh?%VGmLFYtE1=}xg|+M9ZZ4M}pR6x1ed}<85sicB0Z*&^mnd2U zRP-RHgm~+;0Uq|tUu6gpfim(6t$^{Bs2Nhu~2= z)WF+AydDSJBu5(`-oGYq@e<(M<}VRK!%K<2R&%Y~#;oSrVrB>uJxPpcNi?+${>vEj zHP}F5Ff~YgcCV!(qgzKYP7A9P8s8Nat-~gwO0?!@iso@Ft~O8Tt^ITX0MXeONFqYb zLY*orbbrK}TJM|I%L5uNW_`S==pM#FOI3{G=3)>CU^n|1F5|SJ#x2;%_4z~3IYBd+ z`C2Xi(~ml43ovtD{%s26i}J6k9Iz2M!+v3x{rhBgf_Zu%BnWE$ z0pk6oemx3aL;o7juo3`TF$FC^iKo%joZ$`Ua!t&XVD5#r_$Hp+x#V^FR-K&j@#Vl# z!fJBpTzzuI63F4VzXV7FDmBgK6WnMwZC6HF9ape@kdI}P2~?mWTZH|ZPx1PNcHe5RLRAK^qaSVw*r>zKn7 z@*m!~7uJ^A9%RTcAnD;f;{CM0XCGj`UL*&@qry*Fqx1UUVkqU8*gZiHxR=$%ncn8v zZP^YE0AYU=rq(K9kt0@sB1_|YR2Kl^av_K;LduLMGD?Q66yv?o-xfo< zc3?=?c!lES4FfN)7pTpEt-`PcDv5y#&jl)p_B1_xJ_Cp7vde%B$3)eX_GBQEk7Jvb z#}?hUapAw;jwhTx2pC#+aycSWgEiq98YrFXfCe(M)<_o|CEZ0GL(^mVx>vFH@Gp4y z3(b;*2Bn>xbtmOatuOLLFf|^zUCj%JAPPN~uz+7MX)B3V0oHkhGA`^dv_crja)l%W zSg_nUNv@&~+##2D&(+6L;~=O@^a8uW->Zr5!0Msz9lmoRiRgTjun!qO(K?{-ah)D5 z17p)=e;E&;R3Jy(AWW&I7D!YEec~T2r*B%pInzY8pkx!2i!Pp82HnJUc`SeJtBCfFvzD@qQX}Vt-&N~5NST?@s$9ICPPE^YB8^xdOUV>4`$mhNhub8hR$cUYO~J` z!#uZGHU-Ia7lRl;a}oDIUZDbX=-qsG*-AS`wuU@Rc|arC#8#N4gn5g)vpQu}e5`Fy zO_fyDh!GO}g9XZ}>acg0P{+pT-Pz7@TS&HKeR3ru)qyBBM1W;$5%Yx=QmWK!@izc2Kg*#r=m z=nV%EjkW7v(KTK?Wavkc;AfY9K9Y5C&8@ zDC1>Munj|kc~|zB8Y8P+&Uo9ez73?rL`)#;!2rXPTqRdAb9?8p8UcdEgHidy+LZ_& z-I|BU;B~gO>mF4g9kjk=^YSX8q&SeHTj|N#N?o^9MhS#^c5L z_x}uy2l*sQVc^d`0Jnc}zC#~Teit7S2KLpQB)&?g#8yOH&JX!ZLT1%iScfD*{ugC z!NBX2Tj&$;$=|;B^`-~wp)Z$qP`m+rP?A1tj(HqRkuB49)N* zTPfD)X|;l`uzcghj}IP)pyGblE0L1PL_P zI`_q{^UDg3UFJAYx=@)|#s}nS0eA>S*@A|=HWFNl=GiiK5bsNAFw7Pp(#n~#7&wD( zp+FlFC}*q8dUzo!uoWp#!bfPy)ba9^kdrB}+J<;F6rN>AB+?PdjmO!lQzCfdmqZ$S zAE)oIT6Ut0Oar;J0D7tA;*z4R+xUO5A**@9h_n`&1oq`r3q`l($3F8w;jGYkDq)3+ zBk|*XdV3fQR-qatuh7b7t35W}nOL-Pa&dajxl5KVJMaAED^_-LU3Px(_wP$_>RHjr z8~plH|FzuTefziU-T9q7;oZG&wO{A?yB~bSel7EQ-}hfj{nsCQD@**>x4e~ez1}_E z$~pe-J6ag@zb5?Gn?GvrI-R^lD?9wxxc9LC zh`k&0k{|J3ZSQa2Nqg7wk{|Y8O@H@+r*>*x8vgEK|5f*QJ0G=oiNAZj|BAi$H@wZ- z#M4DHdZYac0Hdu&p;Yhu@1?>!SV%;Dffq1Uu(p8yhHMzYUh>&{Io3L&WdV^fREyuj z`=_wMkd*2TAWO-_(2J0u$ix?rEcoD<41B^C)kYW{jr1R(XRs=v<8LmkgIS6?e>c{z z6UOp3GKE#f@?Ns`)6Ay0Ha#6oM(tl@@vZ9f3IQlYdGvC z%&skbd_WIt;??NO91&4%9W@Gh+tyevnHEZ$cP+yR=wph}Xb+4W^5r#KIkzy2ugNd? z^0hbqCRSR-5GofOJ<;^%|dm%gZ2UGf`+j%|6jSC?1uyTOG>HHtOa zqsJDz;dAkA8qtt2q5A>-Q|yMrLNfsGX}yYJ6r~n1ik)jfoXm!}hCYfk4k#zh6iokk zv)ODV_3_4dYrGx)YmL{6uj4}I@N>3}640l&;4Wa)O8M7{HwgU*Mfrh0h7rO52bCjJ zTk#To0lMV;bAQB3r`<%&7B8V-FcbN+p-mZkLvIM{0+6yhnWw@lgmkHM^M4ApI7`JV zgmMw_{Jnor4h2r3czYvnMR3>n3?RkD8~Vk+K=$ePR^YvXG2c9*^# zPw;y-f@W}--}u*mdH9|OpZe-uui;7h*HZwyH?U6Mzz*^nCxvbM9+^L7y`=>-uS&Vg zHx{mFjOFF)#%JF2o@1~7%7b6|%{xPbXkwtSp_%10qjsm&Ojb6-N|w#A;A-a+4}AIe zKJ}q5J#j=s)xgxwx}u$FwIgf}*N1X1fAO~b#s3fLCPIh}fu!#kr?^?UPPdB`sE0{` zs%z^&1rt4zG#sDAr)63v3b&v;zk7;X(@5MSQ;B(GQ#gK)d***Bcy#c0^LM@ zl$7+0@v`1{`qrd3oT=Bw0b=SQl7e==M@!1AgF0gZEx%+X9*FgZlhjvYixg9&uN*<8 z*X65X#JT%SFHX}6E=s$W+2GhkG**|^99C=8b_|Pc6ZR1#xTog?suPYtrM4cJosKUrT?4`V7~68jvh`_7)6K*`k|Nl7-*77>u%CBkXV~>|_sP z|D1l2u9=IwI{B`}f37+QGi@=KMC{QWV0I|ZR0v5#C=o|9z=B&SEHV3~rSp%u zROs{re|ar^Es%rC0jAE*&J@7J6Eg_u#37NK$4_DS31KQD!Rb5dTRZ>dT+GL@%*SHl zno`yRdP)At8j4sT!nF2=;&5)B+>-tcxf7T>sLwLg{@m8dKaU{r)&CIY%b)hW|Fe9) z@Jppq^`$)y4HOOYtZcIQQhZkHj{HvDN?Cn1<%1GVaMkQKsmxAJI@(4%c4|^EJGBMR z#r#EhL*}cl2HF$+Mx(5~E&m#Z6O&SHrL>>C>ac*3pIgJ~#G@2DsS+?~^)4Se3yD_y z$NMLOIswQr7DU1Jcr-pXhBOG$G_TMo|11n90C@K@xh_uh9V6=qgP0-b<)uBb9L?k- zSz`G`dYz50V;%?UztNl6c=7d%&?s&^$b)tI-Mn5&i0t=aE6}>01hW3%WTX;}>ruLN zHqKLHuKvq>`vl*1*MDPUeBEt}#wN~_ijpNzR4Y;v8hfKNu)Z*rorfq-@!69X+sKqdC}8N`=yp?;g*HGD9pE2#u8$4-w199YM&^VgXRDcBErt}Z~gX3QVm6z4|A zOMrn_4E1hOJr1hZ1LVZ7vVn=%-jMGpkmsdbq56=HU!&1p+-P!Hl>cl_wCm%|b3oYP z_joeVP7>}5=lA&5$u)}&*F4{!#qnaOGcb7T6R-C`x@X2co;3UCg%zYgLT=7D4RWbJC(+<@6zhS}@dX~25fRx_;BuVN_Q zK+jgc%5G3^(Ye^UqwaaG>3 z#OKdi_F4@!h{Hqg?-0Jip}_!{O*sa+kB!T#r@OI^TojQvyyC>BqPpl?glRoTZRlB_)S2VLr1W%9Vdy?eyHBE z3{I|O;tgXl9Yki15}2n*yb_CN;L(I4QT)*Ry9oef@WCP>tjjcUf-7npU!iYA6x25U zyuRsj=8f{bvs-*N{(`>R*B z?6{GW&1wQfKPiTc2QZ`~M^Cv0-V`s$5gR~`fO`cIqAM_L+00kxKgHod1s-2Pn6M#3 z*4sAh%*aruc_A{4G_iV52VKaJACP#7X&&lh`}Rmzc5tm!7C(YBdvdo@&vg_@DbRFP z$GJiu5fgZEzVn}OFg>-Z^TxgyZY-O`WT1|9L=WODvsBw9Z=8rX#-p79OfZ@ath(tT z9-_Vc9600B3%X`>2lp7Du_hwJu$4<{OZw;Il~Q8$W*757lbcf^d*>#O?%m|)} zA1O;v{BZe$T!wj@*)k&O7Gm0`n(V2d0%2gu1;8K1S~-5lTe-my%50=EldPdN+G}1MbLL1pM%Yxy%wgf=ReEb>y5gc#p?N=Yig7sJzY(tyUyL+JmK@} zHhC!!?fNhhc0o;YsJTFqnVl*p!|#*jj|q@j8$&Q9jR3#}xIKpChN!3&T3J&WaG)XD zq4y$K_TJ{lnm}e3Q7tq=TtAI?AyD^(b+k2$F*);XlT6OYqcTywD_X1RoWHE z9eV<3s(<10rfo@0Kly%=B9*2VSH{FxRR*V+v3_6G0+0zj&#+G!$sXq(5Iy)<&6jS4 ztX}ne{Z{uktmj`VUR-U2{EIc-$XCIt_v&UOD^?ogxYmYCdaS3 zO>9bay(n0&CB_uo*M{f^MK#7q}-iGRj zXB4-jA;9he(BkfAbpo~!qbxeO$i`qgDjTdl)MfVe9DaGiC*efX^2G1}pO0|$T>!_) z%IHN&i&a=O*de%OJcE~p6hwD#?=8ueqKH>s+Q^2V*E-!rI?iG@H&a1wLPhWzO#F81 zTd*$KJsb!EEVF1#`Xp&&F05&&>K)b#UN7+}{q0I^a|LzBp4Ak?R?ZWy>qytq-~Eo+42Hhyx~1Albv&|BW~ z$=Y_BJ#cjXuDkEuwFe=!zGLI;U9a1*bNAf*?45T5NYpgdvc?GL5ZZ3Tnf>?|FLIEJ zq=R10$m<)&U(28N&h2!N?%epJyG#__$j9vL-;C*9@#vTy9Xi~4?REq)iu-J1R!+U( zFmT-?nkO)&*;uF8lG&e*E#S+vfDfF8IiBehI3<`YaPpAti$#6jENU_l$=A`})Qxo!21Wwrp1%2>%Ug}Nk-*tOMc%jDodqTR0ebr z9t_@1aAp{Rd4Xyg^Abu%zLx*>%!{O2u!FqNp;1J%NOicDUvTvD+~(J6e!);?3_@x| zEdwe9*cM|RjBRP=pA+I1e?QfELyWr2R@y6=1t)$j&rBN~qgp zo(u-=sLyq9HG4a=#+c+U+uFk`2e-cez>AId=m4QE%W9A8dat`7Jd9t(_kwyiUUeJ43L@XU>bA~%wVDkf0vK#_5MT}S?2M~v zIK@|!+yiG^%@_Bv{{>o2Cd|kk{WI8?*Ir!WDUSYi!|1=dghpsqDu#h$@M;D4Lo;BD z)4yzRAUmCVBSrn&nL-#hu}rV8o(lAV?_#9vj1}xJkb^%Ta5m@Q|@gI6&?Y^ z)`m$GCyYoDo+)-+k(e}vk0M3nnyrcX@)2tTT65bW$M6J(Wkh#*OB6jrF4x<$ncfDm9(%0L0Vb_+QLu>S10))}JO)h1}1!VaNq^|5J$%7NG=iHzbFq4RPOf~#up-ZFKU z+p)soShwi}|4ci}`2B9Bdtocl<-2BIdnMEJj2f&J&?W1bVG=3UHr~1MPFCVeQ+&iy zOVRnpJMXb|xw_Yw>UQ*EO>(0i@)M?v>Gn`qw2)fRtL;dk;p;Yp z0$XKUKRb912@@ak;T6AuHXZ&m8VPXE({dnboH_xmta;kWOY8P79M1dz@&G71dJ= zM|>{dAy#k+e>SC`#`2m%Qpngd^&9T>1Op$ha|NrNbcOu^pRq5TGCl#}`NYj&@g1`` zo9Sn9vDE$HGfOK~5smw1i$Xz`E*BqYjJ5;As?{ zHm<(HOr$R4^l>ajX?dz&S5aGs32K7=R!@3~1FA@WEIlwNTe}1SiU62mW3^Jf@$Ee^ z*c~{@ICuqQ6Wh0Q`|5c5w2HFp6kNv{iQNMML&WVH@04X9`lKD5+-@o>!4Xr*Kk$^D zOHeVY^vfJ#mEL_4Y*oO*qV3x`Tx@}@xbOvQKvb-`+DHknj3BHt8_uokHVr#Y6Jb>9 zIA!4BI5FBVB%ar}J$ZaPAW+Z8wi;+rU#C4A*JxPhasK&e93wDaJL*0foiIjb+%dQc zQA~}5v!Zlv1e`xrdo_kX9twmIDt!DI-Urztt{14`6{CNQ4^29J_`Bi5U)v1sWQFNZ znAN#ts5eb$piZu#X{{2k;=G_mZ z=?_i`BuGFdH$5t)yfQK5R)Yvxz# zHl-mmiu_~(J@@~WEw$*_!m!kFBmLny@r**T@Y%3465L-X0%|oYjJz7moJ}cguS2D* zTZ>qc{|jChwC-8&6scpd{b1~!i%`m8%bcd<==4q;okU6GA;Gy2>NF>)5k68WZ?H3) z3zJN4-1d?F(F%r+x6E2Ym-HKU2)~bM&WAZwiTynA6uvo)^!{^I_~M}%PVDOJ0$EvE!-b7d#6%G zkVVw9^ReCvCc-ThkkR0&wt8srZLPIf5jx9?v6I|C zsL4c^-E^Ck)W_xn#*H2#7BX{j8&ZLPQ9Mv`ErbchQvS7n#?$rGH;H5MspRKh(MzWC zR*u);l>5uK1l<*_64gm1RR-zxlKkDc)#ts6Zix;!#zf@_Yb&M?`fv>&tPG)jl3GQj z8MtYC6qP8_W!jz^1L-Sz!ci^|l|y+$P(BJ*DPRsy3|<1)BC+5^k!eFtB1@3y_u(NZ zx0Yc9X6GTnf2e?SCYc$Wwn#Kt0f=;2<<4__KgV>-dO0 zV}oDX8?*Cl_q0d_!*CJ9D6agSVa#y=U6=`hqYZrAss$HS3ZiPym@xBR%`dD37yH9? zYHPWK9rlkS!M=-*;mgtvZC%#uSX-TPl~KM!1=Q9iZ|gcfc*quVTR~T-97m1NnB4Vf zZ6dUGL)C+X30cJ(X$@bQo4mDc{_qyHc0J~wmU|l5(2jX)X=rWxsMcnMv1o3GV4ay_ zWwu^$wxPv4Y~AGM>6679UgKLDS{(FF&;WJ6IVKL^ky$w7xIc<#bg;xb+RZsw!;O7( zav*-2vSz!E#WD^&rmQrnvAG~MWnlN$9zEs$Ec6?jClV@7b{w9w$w>^JNU)dgiBu=F zR!{Axk|0Vx8F&DF=nOm{k1dXwl`7B$^HM6?AntlnC0fSQ9@JXG3IB=KaLp^CNA;VH z=Yj`Kk)fBq8189dx73szt_`2dXak3*@2BiXbNn^=9srhL7NO)yTdhjx5M$xECy1}K?2$V+l@wIQ=8 zlAZ)#($19f(mzH;>=PCjbL?=7i!A9z0~J)> zQM7R=TIuG&F|xxIOfB-QfGC!)VSS~}PXHs(z+b7l!pQpXMGy&e5kVvpMy8L|e87xR zDU2}Lz^G;ZK*||%*$_$ewx&9d|5y+<0w@+h*cr;O4uUWefZ|eV4nuyyrGaa~Wh3L@ zR|!wR`T${ZAHtB`12zi~WnHSoC{b3fB+M5`hjJN!pGPF4JIswxCk9#Ac->RbL0{z zLiB~F5c`C}AP)f%C|R!3#{^W6anORzRa)3_ai?mId~DFn5Ics4?R)R%9IqHS$2!3B z3*+P}G>ubzF~9K2QWP60+%WL+g*0zXDI`EG)2KwrnOa2!^tYBu)iEhgm3+}U>kT0= zifJhocP&P>W*BN(C9aIodgdSgE?ScF8I|ieSP=1-7TekYCzVgtTC$Gw`*~F$vabKi zoz^69b976?FY%dX_$^zgX$%;Cv8!>eORqKDrij^Q_`E+$iEM)FG8w(8lx(TAaL53a zg#!t!3s?_!L|6|FDH6K#K}@;hnn}r7-SV=o+A1|IIXf+tiBw~{nin4rX%B;*@MrUn zvUSoE-d<`voE*b0OPvQ5ksi-543Pk)G~{>55y%c3$eEZ6IV*}D^9LCMN7H^_3?ns) z1RO%aDPYQu|GSJ2MrFgU!3w3P3VzWLXd%ecWx?+xVpWM4tq-v-K|#dHW;xSORy0J? zoSKU0&EdZDv;8Qgv{vp|q&)JBuqDb@x-pkasFW)-(si@UzH5zf<{%f`P zAjZ>w72o?m%jb0~NmW&_|B6cGgUaL3K+!NoizZwCr4>FyTW9;AZc&+H`ik>mvzbW$ z%w%I+F(%_iGm*mGSsX-$ZTea|Op|#cgqMIb1lqkwi&V>FO7pxS{}#$9ka+jS`HAptHScyNTe>gdbC%DSX54p? z7>I%sR?18fXDhY%bcrChLhEC9@~p~Mr0-mz9I2anEjf=$aGDX8UWF>ouH;GQR=|2l zU&me64045(BAiQb zxRBRgnYC~0_VRZ`SKye7z3bB2A=TpZFrUmI?lKxU%2w|jY^QviZTHUYPK?+J`HfE6 zh3!4FN3lGK;wFHeR(=mxFh`0o50jxR-wq+k5~#>gtN{bfMTuf4PA6 zvnwg(%==D}qd7{attS3<`J({4j5C}@o%;N-{%_ENCgjf0r)))0AX;k&5_+W7hlyF! z=l-PoQ~KPObT42CD6jB0O@B z8_v}@J8~YkcCmcfl1t)KO$zfYKh-4D+)A$^^;Lhy=#Q+*4*8~0WwsTn%(lp`7pf>w zVJiD+qaiqF^p@n`#lb^<-Ztr+B^AkDrtuz)(|=i6z$+tc4=UFG&&M*$=<}I&gz@iih2l#x0Dvqxli#Wp6%@49~N-=wWdK!qiD0+;c z`7u6*T+q{^F^lQ=AnOkTB{$6$aEAyZ-FYH3H#Il1a{+0vRCZ3d%Z`Q74SqQZE{|&^ z&@$>eJpJCG&-u``-{waj&k+Khz6S^pY7}0&D0_|fK%j2Ejtz%kUx>p0nbOm>!~4ki#us5#NOZGl*mEg zxY@?rGIP^;_xwO>0d$7?< zjVd3piF_M6N*Ra#r!^D3)H3)UXo~&|0R?=CJawmh#`vB}n<>*1@JJBf`M5d(S?T5c znbN4F%BtPU3ArMKx*fG1|6V;7=k1uUa-~Bp-qBVUuXtnC7qju;%TB#z*!IBbbKW4u26_5 z)2VPX$Z0v*2zWZ~`rxKm)3xwyP4R3!EUIF+plvd)62%NAOy6b5_z5_qMXFD{KJ|5~ z*gc6(I#S`~bbMI-Dyn&)-}B>{vJZ)ty)i$ddt>FM?vKX#ryPh5z0Rp&OHN6(daq;E zglqSS7LsCNpgNECk!v9;t3O;;Z)j6Vs$huvfGQtcSor{zxrr?F{g_Qkpg(U@*5Z^i zeq1jNpyxGX;;^~;m9x>#0H^d1h4AhlTxfW^^mlA2_o(i969jBSsLDDYRGp{M@%NX5 z;~b@Xtj^JhI?E%E)ylQ3a;WVB}KY;QkNON zSyC70PEp$vQ8bNBsJB?h{Tz-v`^S@v1H;GaUOL1ewYk047$hUA+&>C-Jtv@oCmR~M zQ5WhSZuGeFmXiaY$66B~VmMQ{=?k?hruqngDiq&inVxhH?a?4!@Q~{?bexs_v%-aO zlAi_(NO5#T&U(VSM;d_()3_8mdxna~LdBy+#iLX-Ud$_UPf`=-U9cQeD|sO^XaoHx zG(&XqEHkdYcM>2YG(AiB9~@Pwqhqd=z=Q@iHl)XVnMD&e3zz&t0sz4V34p@eyBsut z!1V5?#V_J2K>m^G^~p*7!b~!YBSN8glXwTt+6Aw|H-v*rz;S#79p+ztA9}Z!p|A?{ zjt~lQ6#D@>A-{bBK0d`iOZ&1{c*)`ha?mL6U@1{0;q{IliQyv{$|Zl5kQQ=A=Qc4& zx-yD&0#w9TX`MI<&IcOjM4#~DElB#Jf)5m1M9vQ50ww&ADZ$Cnq!WH)kzVAI1DIC~ zgN$;sVZZFM+`wSyuz-l~(cEEYi~%;DBCvmAX%?*#C!^kt>eAu~p#)kyE|dW6JQh&I zQGI%g-_k2MjnE>(TWC?d#-b_l;Wqj06*hdO`Z3-{wVb82=@~l-%!s2|glq@A3LgTr z^XNPY#Wfk)_U3W)eEt^HHJ`wgH}hr(E*eqsSAj8$j`~%4fulOCvc#u7!KZOPT72XD zwdQ+p!U3(l-!O@>Kj*PKi!43Xud;)dtB2EKECEbN*aL@{Ko@j~Y6qG+DL}M~YZ{b9H66ZreZu5aCA<7k7-V07 zdm;>Buw7ID40{G%S39ldC48xQ30`XcsD2O0Y5DPHaiIGG*$ErCui0VVOG1@o*qmZ+ zh=FZKN>AE2m`l!y+ZRm1osMtJud+A=90>F;|*J7>(3uxE{MpFE=r4f_ABWrO1zYNI`#3Cd!=14}F#e`%m0~bPH zF)u1%uPTsC7|d?1<6bt28MBHmQOpShj6>PmJOg%9|M1lXlH=?N$*ndiLVzLvu+SS^ z9i76?n8J1G#1eQju#O#H)=eTBQHmcemTl2F${OO2Tb0MD=igB2sPcHRXW`J}TIKQN zgyO`l?ZuVHi!3pRJ#JJU^Ry^kuRQKp$_^`isM`3rCGfE4cU8wSX6Zca`E;e3wzP*n zmX}l>x5OFw<0a{QIsK;i0e_rU9ydgD?Q#0gg&4W0?m8@zTl*_^k+{!#It%i@D7v0q z1nZNf4#Fbrq>GNK1(Q(lS_;AjOvl$y5S|eEy4Ws$t%ObPDl5NQnBJ1E)#vUu&MR4~ zQ>M;Xk$>%bo;o9#)Lxpu`5{bdUtQICdn1~VBb>~;a@vu3N&vt;R5CCi`y8L_%AdF+ z{{c8AO9sd;(K|t6AZ5R(P`b8t8$5`ybogTE{NU@SYV7I0Bm=7O!nA+(r zJm}iA1S$a$o(Lu-y8`Q48ug^f`jl+OKrKQ*?io( zvBA`h%ujw{xQGzbq0CesG5~_o%*{$z??)1Z$CT|m>X;ja*D@s}r#W=$t~zCcDtDx? z1Xb7s-I2oVJoY_1*Ssh|obu;-2ZvXFl@KNT_Ru`YdbL%K{I((O!jdUAS(Z#WT$Y|( z)?>Z52&gatpQ_MH3DxGHVGedonq4+u9BGpw2CMmieaQHjHvcQW;9yp`@2#ymYb7vy z^2KgvlWLrhD{DFYkPY8d20;fgE**s75CU2NbAl0Rd!1}(58jzSKv78y^EBQObK4V9($IYc{Xc$v z!Ggc##RinsfBvzc*un=^+W+)lMxhn!HuUaeP?_*Fsq?S=&zp$t!yl@c#=-yepMF6Z zglQ~aHd3tJ%AVJ*B;#L=y<%nLks*1U4CML=J4Eyp)gt6uzCl_N*{<~3qJ{7qkQrKW zfW>coVPXp^H@6oG0K{>+R}4m+erFl20Bea3-aspy_7SaQl)P+9Pu-x9k^fq?q1eL9 zM2Rf(@?e9+la6X}hO(%RtjU?J!%eECqw>n;>W-gt4-Y&a+yT)2L0Jl1idmcVTT7Ty z;i;%)!AnyS-2fwXJfeoRiA+-^8~xJ=t-~QW5BB|3tG(b=^I3(T;C+<#N+O~NX4X?K zgK{%b--HY#LXhRCzL1z_58g1#DO%?yy8L6cn_m&(rKBa~z{$h#UM#rUxCAU4EQ`!# zO`O>Y)b22uy^&vDlJ#TF$(Qt{{r99E9_`16u9c-rDT->`TR^_nx2gcUoPYRFcjz12 zbpPjP^^NrU_y69#e1qnCI4prhq7LL2FsPAx3`lzw4;0I+Nm!~6rtE=f=|wy+{P3er zk*Z;yRLffKVh!9E#i-SU2b62idZ4geHa3~8N4&sK`gH4!0>d4{jt<`*rb{>0w(f^ok8zXIz)UB;{>y8SRS&%b_Uy6q!Y6hgg_7 zePb zbZ9|Cn%v{43!jK=V%{@h+@uTr*4R+MSD4?ye&gxipP|eXl`>yhF}fYj%F|kjvGvJe zTao;**sUX)z!63;O-lJuH4Q1@Q&^YkoxhqMZVw{?jy_I=K8|`HN7cu1^>Lg&p4rEx z4wEQVN7Pms?}6JNY&k|0s^>$?F|rK9AQ`t!^`sBuq=s=yU7TVVfAK80;K&*AOtp6@ z5R!$O)hBQ$Fxsi}wlfA1YN7mI4Fg5IhQn6T@7_>zx7Q@A_9gKi)!c)r8FC-)AUax`%W&eZ(>_ zQSxtF5159uL|B3>7dIoMhx=Yn4_uWnx^@*>zq(>{nd&w_wIX4Ys+1ev)P1^ zNgZw2y?}nm$>FF#Cgz@qk|-9tG$5y)K&wu$WWW9e@zb33Eg_b(dgA&S|&a&0%5}64YVN_C!og((_j#*?OaQQ1B?;~ z6M=pTuV$4UUvHK?wOPWi#a3ga5%Om;WR|h@C~!Y9-e4q);Hr-47S63Aia@J$VR!9i ztrXNw6GUJQw;?LfQfmHc8@oa6bHdS~x=7I(adgY@Rw#Q091X0XuBA-2XnjI}6APao z5K@AGw1m|14;%|;X2gnxwkgRaC1)ZPFL)Rb9~;KJ4`W`#IHWEPF^udCWWXi>0kN78 z5KW*UAcW+xh_H~OPrOxyE?Cv0UbUbLtZ&ij2uwp+WsXr-z|)zq;)02fXvxL^QLr&U z6g2#TC;&l>;j16nbw=bo*u(SSBTm#UmD5R50f#IdPACcJ=nbT4500_J=tyq#k_4bK z###ci6?r-wpG+%KvX*v+jkP4?Ku-I90;i==fHUMy%z`sSO%+;Lt1gT!q_!m-5C*td z-T|iQz%dYn=24qTiS;(f+tv_8OE+)D!Z&tYhmWZp^Yjf| zh};JZw2s!A^LnCeCvBt9(Y>iXiYe?LX}UYAZd>`_kv;=ES+1gI-zqxNDk3Hq7mXR3 zH~1N~io?g-$}n9!-i&AD@&3@)2Y4^H=oBET_3|0RdyX83k=FfvIMQwokApJRr9;H~ z&PeXY77rHF_6inLi&wCi&@}j`uLu{NfqxnVs^`Z=N}BGhQRRFHHsMXdMROCyEk-B7 zMa>t=8jXuJlBN#NS*1?ncda)rOkw6dfDz*j><_O{AiIWw^eyB>vb`nV+W^iA6Titt z5Arf@x_$!8z(vmB22#eX>&j)q!pDs_p^?f(0p5tA606o-ZL4*!=iW^q73YF*-ih2NNepv?)7i$HnzTwb4lEyp+&>7Hp}hU zk`b1399md0&wy`yqun#X1JHi>&HT%C)2Q*^DeVmXW!r@uN%(ow&|5RVpGv-HLa2{P zE)NV9ECvgCKteCsX%0rPR!w0R`mkXNtJ2F85pJt9;FgJEbBwGv#THZ-XC8ETSm@A~ zM+e!K=a{-=eU6H6(gJ}He1QtLP$4dq+F{daj&V7!mB{>|FfLvFH zahzeC(lEx#UDABKH8>N;%7f~3!^-6O>G!O2JhBY<kr9H{S=ZJwHdad<%KoNWpuP$^rU4pbU0iCl)GRVN0wo}PlWNz`*`Lx??Zf-V`W~V zWv}gMxgfJP}5F)JJ<%qdl(C9%r;?BM^h@@jL_qM9FzBC`9F~Yh6zog?K*YwyD{Nl-qCX z#GU?KQmCur8T&^*PjQ(IKHx3hwX)~N*vnb&IQ?e5JBd3D_98@W9^nWxmQ6k+v6jop zh1@x+WIoZuTjf1Mb{$KTBajDw5T;>umWoZN77;}+6F zDS-3jEBUDXlDT%vbAZWWNbA-0HRetmSUn7ydqun^)G_ZTZkFqQlJAYS$hZOV=6OJV zG!|Wff1fN;Sg<6-gJQJQbtS=>TGnIoR~2a zUQEXvanm|m6{=(OwM+M?4ltmFmvc3Pgiw;w5ax0ZW19sw4jQ(ZxI7F@x(v{KIfR&kG% zWh{?$@UEHRr)^JA)AO3%m@ZZX(oHH=IwQr`r^tlxt^xZIYQRpQImWDghb?Ags!_lzk6o)rr+1Zu!LEImt zLC@=$WExnx0B}SLPsb2BVaOD>osk+wW*4ff9hb1M^Q`R$=Nd($yXnHZ!}nT>Wk*@# zm@pyXuzH1Mhd6O(r4keg>EGGtHw`yt+*H!%rWE@fZ|E&nx+PX(O?<=p_Br; z3oTY?c`f}$THPgjKeqZTdKLcOhLUB;uhuE*`gin*t z)D^ZXzftu=pCOq17t=Q;qbgZ{+r7VAtUow=F>i?k$v)j)*dORpEj5#&Q*+aY&X7t;8ycD7*uKFjkpkycHu~4GaIrOJ&G`V4UNO*ii#jmIqG`zcm|+a3VcCljIlN znx7{Nv|HrzC%=!gkei8!a<@~ighvL1l`O?_B@6Z_py~`JIzaYfVGOf0PV-GxJDnz3 z=JF3pkKbT-N2=1XERu*5K@`N2Vj zq!9|8c5En^26l?xnEM^Drj|?d!ben3Rt|q}5Fl`Mz8^55g|n=WnK?OB1r+R{MzLfZ zEh=-=Y~A>ZFa<2Ub0YmM3>O4hMnk%%;^JyE5QbGm?K7yLa;plou@o?{XIM{rqQY0f zmQh;a{Tdn5ofi#aCDFZTDd&R{)z%Wb-|%97tXNFv?`<*nTOWtu=2^`C^MS?uRI!-; zJ}hQmZ&6*$5u!u6a&YpD(xC0z25nD9RlE-cGAm%FSy47D)rp0LX2JSq%nQ`eENVPC zI=ECvSSs`|wp7SZwp7SdTB<^hvZdNhXwQ1?Kn-w*5R`x5b#ENHyBDie49OKWz(SmfKwR2#i& zTZ$)J-vtg1rlcm*wytRwIWd<2kl~u-dUFDhQ$sK@G2@#TU|4V+vqSDH;9Wz%(Ge+# zh`R1A;a&>vK=4Am6x2-cNa!~KLe6)?+@#+=gQ0(SzsEBen(U-KtdocRdQP8)*f9U9 z8&FREjqj2(KpfY|^#!G&J95*m*FGTBh7QTmWKua|MvMwv)W?Q&JnvOYtZ1gCPpIQF znZM1`N?u4#V(>pcgX-D(c{&UcO}8cp#;SNSJb84001c2#O~ZT8TJQZ=SnCsEC`WxL z;K7Q+L#f^vvDW>ez-pS7WkjJq0b|r>+w#+47>0BWVOAU-Uf3|;Y<`kq3~U~VcC>E8 zwh!gELD;NQ1ZYa%zv8UAjlt!39^Hl=)IKIJ&#>?|qy**nZ*~g5fH@FoqpJLF34xCG z3F{GoRtLlKA%`#odpD(-tE+`nm@h5SEAZwrq}hwM@_J2(&cRUYilrX5tkDOMJV0Wf`gYozcYFAzwkc`BSDu z(Tq8uXiI^ECPcWMiq|ZtsPkk=Z~}_7s^%GBK}t8tsX-0`!tO-8aqi4zZCGDuV`PnP z)`}Lc0J!>Y?rQKHR~j%^00zOWTXw7q4ILp`U`E1rN<3|Ya903A3k+=%bElC?&rB1z z0t~kyhK#gAB^zjMl&%1wdJfii-SWr@%=Np{aUEI4aco1m(GD;t`YdoPk(jjptmF$B z(?`^%yicQa1sG9?V4xQ^47gVt#=H+>K5((@Cc`jSfRU{;0Wh%D1OsaV1qLS0kDkxu zBE`8d4>7BG)N39M2$N|QeU4ycDI2DJ82CApC%}Svk7&up0JgL-fGsuplN!S*^>K8EfvLjAfptUU06Ci3 zg0sLl&gK{~xZux&k3lGX$B4>V)$xXbUtQ~5KIju|dH~%wgo~=%665quD+`^9FNo>% zOzR;=z&xwp6va__GaC#!ytSBiHKUBH+%fa|sV@Apq)Ku^q9t*MR^!QWe%Ae4N`xp! zZS#$mxLUa9xUvo~R{BH=W>zqx;4~F*x+rL^v@YfweI)p8&xG`_#bBgYDTc!cm`*tW z4Q<32epilrV;Ct(IATT^|X?m0ll9uR;n?PXQpoE5sk_Zb|4VPo2wF&Rfi(cNXdI>9fJHo z21py}cAE_O2MWSC!XDW$voSo0clL!9d1UwR5Ma?vKVUf!m>PBfe#m9MA%SO?kXxXn zJNdpQAqZUg>I!mrnK{%rEo@RB&0Wx!k&xglw9x1kV=1WagxPLs=P^%bP;Cc7LOG~f zl{q+oc3@s+;-Uq}m1EOkk6c?r$Wl>OG<9T|f+V?nv#{6zNh;6ph}w1#rNRVaN&-bY zN40)1ECU4UP4{qiBt7{kT(es&TYo|&S{^yr7ce25>E_77B`HosO5-x3HAJk;TBbjv z*oC+=JT)UYG&z#slv67dBsxQd7fyd_aAKn=->AJBCcsvPxfsP^AU__(SibI)N zg&l@{D@1?_Yv&BHd%+nVA!@0DLoOb`A!})&Pow^E5~Yjf7M@KO1e2g)IFr=CS?@97 z%dp z*dagg*QNF!1#fKFtS~_p>Rhl)bu{kRN9$lGR>Ne={Fqb%9=-uK8)`=yyo0#i%nNt9 zEb2NH;{->J+>g4IO$p5yS*C~|$%2WGEkT7mqVEtoVhRuXH%I#5M+vlGmpLBg%~vw_ z{^%fIep-in#)d6^30@sMW77CZ?St@)OXVlU1)kEgQ_}fLoyec1puQ1J^*e;%RXcP$ zjO~O%xuy{ux@dd#?P!EUq?v;wksG!K^H7W#ULFAX>t<(X@6cStNc11bTfX0!W_LU& zaYl^Fr$h08M#Ul)yuOEmJovT1>yN12??!WazHZK{(>NxPLb~a*cJq9*sq7Va(7MijJO9#$_PuQ~4xX1x+lQpzbKYWKU*+#dVO z(W*@YMF8)y(9Hqw=7736$aiVp#qT}e`e~brK764Epf5ixtE_e$tf@dmxojB7bz@mK z6JRZClj#WXw7Udt?uWym=6z7}8q^aS)DsK}e**@!fEBO_aZmiysslwp_ZmCuc~TbU z0&}n?e18(F2KwG%M!^@QdOnY;SC;97kL0LEf=3YAX+NJ9|2dZ7mhwVnUBF7P$l`+IlmW;SKxL7=}= z&2c|;{^sWhX*LsQJmdd33k+UKx@!bN*BhPuqt6fujl#K;|HZr2h=QW$xD<_V1Vz8) zduoA0l)vTEdWOJm?o1#uoP4lzD!$w9K5UbFhfxrPhCZwGFc;c@Kv{qYVuYqOEA)YB z);!MndkU*B{-?u>B64fC8I^>A;x7V#Y%{~Gz3o5!K$5IqNR;|1NE2ZgD3i?pbYabM zJKg`Z=tWHyzR#w|N+-k9%5HB0goR4Qt^Tt^LfMz^}wkFi}?JP`M+H;>W zdYzHuusJ`^IEr3x{zC)bf*zL$faT5M==-ni2bLhXAr? zqlN5gwDW=N5xm1_*CJ_} zyzSf56w~sHIO6<5p_2;>?MT$tvA7U-xZu^EoCw5LgFt)uoK0a3{=e(R+d6P+ECNK?vrDqqGtsv-bSA@NCJ!eY{9UY< zht&%hWlo=LfqL1mUiK$_)!K*!r9>6DMo7B=SFMnCp=My(g7{Xju<+UeBc$(c6&gET zAy|iy7GxAo=!~MPwvZ@)HpJ+bJh03L?`HR)=W~n)Dl)z^eFPy7hJ@uoheC#24*@~4 zNrz4BD$iwaqT*-;U&)`=D=onQPGZuE7W2!;=_@+++F zC&3A>2&5B{2=1+jdk#!Ma8j@g86KMkJR?(Gn>Vkh8{THIf}Fw_Hc3>Wk^F~BIJ0Fk ziE0sy(-~Y1v=pN4Fn_^E?dB@&G-l6vvA(L%h8A26j9$}*QFvByc7XI+b@@Vxw{)(O z3PFq|$7qF0tEiT62VyKjc)voa))_w6*0_hHSmUF_*m|jHV#E4vf;I>FaZ)l`b2@nx z%LrSPEYGNSOUt%LRri4OUpflGD7j2IzoO}{7br}(F@W4NA(us z&Zs#Q6>LL1lf}3eiE-eXnW!EoV%0^SS(!L}BUcr9;vLpkhKv}ep;tsW5zB3Nghc`L zHMu4I`dE=^hiWowc;Twqz^zIWj~kcPuAw5RdlTEC z=7~ei=YuYwFl=}aH8|}fv$XDKtvG!7g^#O1>u6m4Hm#{fZPn;Xjk$n&f;bL(Ckx$<@==^Cpp{yE4ywEihR)Ca~2WJUgccAQ2{RJ(bS@l(A zb@WgS#BFotO8P~0+E;Yw{sBV)8vU^%9SSRE?W`@^$*dhW-ASohT>umZN8=S%O_|>v3I`+}; zh{P50xg~cLScaGxBgh3W;vzBUL;R7Lq(0F$3aM2K!X_I+Oc2{%C&C-PLP~H*R}6q& z6m96muv3hf@D)X$1OF5#+=rc1senRjntI!(PCQm=xCjLnmqoo4Si0cWbaqO1tzb5M zi3(n-zj)fOnXRZK9&T4TDY*_cN?uCJ{E4%%Q|2}#r8X>2u?!eS*Ca}m=LzrN&Mg4| zR<+ebL8}cE{FEwL7&9rF9>W}MHZX0W1whdf01VS}<{NCF#=oI#y-Z$AOpBouYIM52 zrV0yINu0%KN1wo|$pHCCL%C~hBsWl6Opj;aZ>7JdMC&T^9>i1Jf`TbQw%}6lPR9eB zkvf)rYx5=XE!t11tg-EZu14zIZJglRP_z#Xb|MOo5fcE-?z8Bwm{ zPU6^SK_jRMi$)aVVT~A;vTq<7d`RE|&Gc>tWeHeJZ^wYkJLLo)wvZy?iNaipc16+( z!PBgc*j>yDDGM^Sc8X7fXEQ6Zy9f;n-nIwO2{cMp6Ru7*;p;3Jm-~!}l`UMcZEprT zeKb@>ajL3t-F7Xs8J^G5DI6N5@jIwa9y89MzkfM-iu>e89+3(ZJr7TZMSrH`=_&_A z`{6v@6D77d08jTq9Sy;mb}~T}@{Dj!L%51=VJ(k7oe6> z{((uxH$3vzQdw)U_H2j(R5O4TE?a;VU>#`0q6wu{wNBQpmonWV$=E^AfAxG{H{65{ z*|p-xyet)P0YCHc78>%BXFT3;#27$=W4Iq&(HB$72?f#N4JM-ANN1p^2Hx~e1X;$- zqsDiWB9L%RH05hUIqq1OLQK^1Us&7e#y5UgPc!}LWLw0Clmv$(W|%0Pe<-A|d3DAW zwIUipDNvgE1rS{Lzvec0>;Ip+w}GTG<+PG0i8=^S;`#meKKH)+UcHh;hDm3v<$CYkdp`C#XPvS@eNo?q(jTLU~MY_|)TX;aP^EjWfA-p+>R|&C zj9-PQMK1%O_Z!EY#Muy=Ixv?tcPLOqpAiGPJXx8&huGyaCSqsJS=MX&W{IxDEM`ZJ zJ&{T|s>vWQSQDn7dhCah899v@8R>#JA%2RoH(kMG^vk|uCnJUbuFI_m3AbWR-CFVR7~>!SS#AVpILp}BitXo%-Qom4ocsuc zbEK2}aIExgw((gcLj3_O=#YBRkv?b}1R>j<*7oOL_~`b|!3z=rw)vC3nPXheZ}E@Yy79y`gXGCpvtlY1Z<5Fu^O2Zrm z$_z4u6PB&%xNcp;{ z8%+pQmTZ?^rQwyH@=SIY#b>tYLOHtqpWVIGV)?LsuD*MMC5W?;S5HV0lO$66)jbiu z^~+(_>+UtI(5RQP*5lK8tEiCk8iETR_X`AS$B5g5jzVC=q?xj9aJW!SF$OH--OM3-xAj>?$Lu%QoAUw%YiFQZ0cljk*CKtOz7yP5!fgMQ#*1 zS*ruTi`}TS9DY3kJGR{Lst%6dWQgxU->z%}r9|A=_FTsRRGs!th^ruP1<0wP&D6?4 zwYL;+sCXa1^lFFBD9073;($RCZOOYeCId=+kNjFfV`Pg_1L|6A5A+^#>x&dfp`Chx zjE{3ZYTYE!!CmUl-4cZsoF*W<%G+7Ew9^(+eiaxX&`JPTF!^TW^lNX zuJTrGPO_91ED}3y*iI_1)he!@^0h)P)riN#BKBTjC{R|M z+-n}$k8Y4t;PG@JUL?%aA|n^V-__bb*G@*A=Q}tkyrVoa4OfgxjjBy-DJS6|^G9N6(b(O^ODSw5Hq6(R`)Ervs~a30=E>2_c+eOToz^9p%;-)FS`&9- z%wE*yu5x&7_=|9J&3&MCOoNJVFo_0C&lZrl`~o<-gn3#O^0>oh#p278^b|1#JNYST zo%(sC-MX^<6x_EkLMe~(;Yz1q*+->ln6wK!&>6G~l`lYy%eR>TBaL49No8%&whM_X zWJh;}+n8=qMNYwWh7nG~#&16zJ=9&@S-t&S^yWA5=RqFK9EOrB|C8Ooh)muYvw3|Z z4>X$=T9h@nGvS^3PfZ_v{4ldmZl708FmHCxk7s)#x+JUa+L1lDGkp;xlp_aIsb7Yy ziDpkS6UsXRArV9P;7+4D`qA_s%oaNex{Jb>w8a@b>*cl{){hEs;GKDy)Rtln8JJ`M z8(_nHgDkBQV`#(V(#o{4Lkx?2Rw_N&ggmwo=@}3hq!IM+$MP0|KaHD&uXiznu1yt3 z#aAXceLFWcO`);?HC_p~3GEXH|{jsLfPKwv5wXZ*_<0 z4}tlv$%<#ILaZvypqbaE0o>A6tya3KiZ%=YdAh1fDOFgzs~lWlk1~Uab|Fz&x~kSI zI*nI6V4j|1%!{R~LdLRmRW^l&A(~6`PX^y)Ze(3$Ewkx0!ANuI7uJAwmhOvl9!jz$ zQfMgyqy-Ku@e;O0UrBMztNpmwPO*EDTjkW=tsLDh>3Uy0%Zy_qCEHlC4UiFI5Hy84 zN>s)v6fP-}SgQ(ed_dWD-1vyvNRnC}6gK(_c-xwb7fnX#hr=uQ(G;&bb>^sEmR)v6bkuJIv`TJVF4~4SyN%! zvd4Zqq&CenBSE5>od8*s+mD=9a0+S<_K1n-4;mNjFYkE)c}qd<( z5dzDYV(2TG&X_2tl5EES*j57?a@$DX zuX{gP{{n(r==_%cH>@P6m&gHyUSB@obmde=t7junOOSSRag}-ty^SHht0^7d466*= z>|{W#_Bv{fDo#X*gUU;B-*!Z<9Mnuz8NFyf(S-wp%Co%XUEcERt>q=P{L|DzNk7~h zjcNgEWvsveIK*w)>>INb6Udw%w0B7j�!*y9j+9Qn^?!nJm5|G9$$q0eGzjeQh?tSLor!ihCO(`Vn-)1+|#DqPJ3mZKKQ$D^=A>4S8)ddL}f!;EgYcSwGEpMdbNnc}>wY zjty;n%bW;YX7^z+hT44!*ITbH!@61pabVnrC6Bi0q}6i)ptBCpSpl@Ds*B|{_G8G7 zTV~gzD3_SK-j2E0w^;CqU`e9Iy3oH^ODxu<&?g?YHYp@O(oE;ck62**DF0BIN^}#) z;8X_ny6w43D=oDF!0>dnyD<2GcY&KNb~bg3t84Zpbs^t~mAeQVWJm#n{OQmDD zUbEQZ)I1)nSrL*vtahMMgm8djOjIL%Vg!jcr;#|s=ICS$!ii9%Gxp;X!mmLZlaH6^ z3mfme17xnL3+iJ5fWEEsWmqtzY6pPuZ3P*vH;=AE7gsaSRMxsM1ND2QT6GS}Gk6?5 zqmKP*twlA32P%ITtwEgu_##%8cyV)l&Ql5o>RGJh(2-rRR?P8LhNW(u4A`(WRWmJB zB-P*Pe2yGbuvg%VDgpNr?yr@oOP*q<*p&+jPIVQy8jN6wIch-IF{d$Vi}9&LZ(n75 z{HEog>KI zyw2vh8vY$f&=SUmzuiyrrZ&{BOI6LAI3|KzXmQ%4NzkN`E{zAg?C@B^OJW=Sw{odprK2TS&j6$j53DOrB`x##!ApRYikT z?Fk=va`0=V5OJD9m2CcT<}4|>5I0E|gQf0x=#`)!QZtv9!$R^vw%Nu`i}kU1R$04= zjMmk|(hy9_`kv~X0NpG{9`-H-V)!M|YHdh4Y{|7@9VOSH7myx|pECAZnhixb+WnF^ z<8{iqAf^R2bCaah^1n{o#MlcjeDq#Ww0#&!y*J*dxLmPm*QUNiXv{dIAL8hx&l`mw z+**^bPn>TV07D2?5fftaz;o_`aPnanqGlEgaLM07 z&M|*TlXDCdkK^-O3QMr`HomnX6_z|n6bwN|X8@M?CHRmbN0fn?mDEh$rvVljm`OtD z3PU~-k94~xF!rS#wC0gRD1n&c=tUdmIIjGy3M844^v@Q@gP%7UvWkV$dl>z}y|8@4 z68zD!)l<_FoN{kz*S^{S&85^JpmlT6r#E~i)C(d>$wj}2oLXKRiXjX$y`JDyf6P}o zbSZam>%EnSOzwn+rQVelKoY*8*0A#2U|RJc_IZ!u5YWu!!$bcI4tP+FMQxMUeFhrCF4N09 zI1xHH<{cbU2Z&~tnV`qfhS@O^!17_OCZtMSRhS|U3qGn^$YdVyppEHuGNs zatt!;qQJ?rg~Q3bCt3Mnu9jHRT6w|Z90o=-0L@~61_@Aej|^zZ!?E zr;jFTb7DSpPOO3>8;{LV?#C$$@t{pPe&NJQ9(t(AYX_QjZIOHPRJTO0(V1*b+$TMa zU*B3wFH7&|ub@`K2C~k!#S!7@`6hkHpLrM6gPS6hN)&3= zmYu7-x^N9j{=qpFbSy@X3`HMrK1wIZQ&iM{Hpl;}4k9Fr>s{2)) zYjLu7DR0(zvBPZ?#Rk`$c8u0(8ND~|P0)5-sWYPobav1?>!$~W0GgANILFomixV-B zT>6fo`Lg-+;{{8nM2zcru5e0p^*-6HZ>@XKAh+%yHT3P#uPMN#l76S^_aQsTC`+o@ z(`YCR2i~`lLqE@Dl1IOtjJu@^myAHL0VxJiD4#6H(O8H?=)ihLpR6U?5=R2`C5{Bk zNd_$~N>t_32XT;Hl|h=RiV9uf=%B|^pWeivpnW6C0a-5COirPx@f&0}lk{kER0PTY z3EAGzX+0256~=#4epoPH&p-d~UiE;{dIUWB$DQ2fYZo#!LJSa`sBl9CNQ#$QP&XMh zWHs0rr{k=8Cn^Brpa9(VT!_+}P0G&0;*O-a;V(3%Jb`e9l|7!@;et&t2T+-G_9&aR z!Nx#qV;GCpMw=qn+Eh(zQ>eNI4;5+)K<`8Je5m3p?*rA1^)cst%mvkrrLFEMH_PV= zU~{H$ga#@uj!HY^I1^~1v!MoJwb9N6P1-4&bEIM>UB-%|+pz#Yn!dXk=KeHQZTL3z z#-brOQxWmUYgc5HuB4w{qoq2#tho9=Lmclj>dn!N{77jAvIq1sh}D{I6RVLetJxvMpKA6be;vKMn8LZ1bvHT&(C3%07fi>i zWzT9G0^%pto;FLJcZq1qlOxsKNJrzFlS8Bk+@o%ge7Y^%$Z$J3oPOx~o)q3n|9%a# z-Q!VLND^{m6@qhqDwwED3`k!x)7WkT-vROT0e^J`-1d%v> zKYiaEpJr%`+9r2kdeqw~bQEXQ##w6jcFj7=R!8-7D_1w67%b^6uNLzxi@Br*FZMNf zp3}lJMqOIDMFDgXfG%@D(t>Yb3}pq^yTU-SrCwisSq_GcI=fsdOtz_ldJHYhQty?J z!bye*uW=#7BS%OCMdHrhnI{UrQZqsV9}KJ0mlk5BKA#MIBG0Q&Bzjg|eIlx7@4OVW zRNvb^kAyx^1gK9`0j8vcgQw5_oD@UA{{L{n&>0v$oB^r*l8=+M6_GkSOKCPhtvGtJ z)Arv7iVsDwuOzBKhLWb3=fmdjxr7aaZzg8vfVvrE^o za*2q;Np!F%K|>-@AlDDewSAurFF#SX&qqJOCXCaWjgwmO0(fH;^CNXHypHV{eX?Wf zz_W+%*`E52?2)TtFU?&09OQjI8T#DkeeP4A2h`^Q`uvSQg}LnoEXWrk=GH7@jEf=k zjtx-ZNNDb$H+N9Y&8fLLn!D;|=m#Gz`AR$0Q13+O@0j;@O#K~Kf5++1Qi*sI?#5A< zcC%V`3Y=(^IXo-5 zV29fAMzPIA{Ez9<7Y+VYUpqbGe|lL%b_n5ba7OyKoJ{fzw^D8`$&Dc%$ZV| zGKKGz>2)Rkuq3{N+`Uqeky1-T(&rdN<=%)|DVBPrc8UxoQiTN}-Eu?Zj9cNRV3=_# zV8*;-#=J0NL71^{RZIlDfQf#q&6N<*h63V06S_O?-JMoHy}VgSjX$mjve0HGv7D)g9BZK7~VN)^T$vln1~A2w4x? zvwimDQoZj`xlo6bWy(eT4r=N}N%WiJv;l;3_I-0@t|3Xk?@+n$7$m9}?&-5@eatmT z7QulBTUe3vk+ohNDi{0f4IU~+8Em4pxe7 z3@OC6>47Q&GPDfKb}@jn)t9*&#g$eVu7N zjSe2tF{F$w)S=J=DWQbu*xV5&)x$x0DAK48sU2)dg9OvigcZODZ&Nqt?arxPOzs>~ zv|D%`4Xs_Cn-aOz6dR+$={fASyTNCoPcu#FNk0*L>XEG>mQV^z`2+_fwy?bzVco=M z2)_(U(()DFs1+mtNgCOdO50IgG;-<0_-^NZ}HGK2v!t;p&R z^p-}-Jzwp&+EIB7V5Z}yWcDOUe$|zg-(UQYF49u%uTncdkS%@jku=?RL)(77DEr)B zJZ3*v$A<^1s_M3mJ>VVO672OX6BBA{_V~7gb$>YLEE0q@5kOWX}kC{S|vA1rB{_K5VL` zZMiWT$2;}wl{}ZGyxDCcG%hUDkmzS*Vzyz`j2a`fvO3b~nX-n+`iPm<)$FifQ^Atx ze49e=l#r23AmEmw_FxU|%EaI*!-a}+NUY#r$yCdIhMb7Fyyp`QoVQVgL>?kVSotb) zs8k@YP(d!J+Jf7rdH$u!kH#e>YWRlIC!N)xZ;|`Oj*=i`u1Ag9R4WMd& zadNNnD5)FvKO)O7%+8dv)+o*zPtAJzb8(;yJlRWXA;kp<*Aopd<2u(+;D-4g)pF>``Io=hX?=k%#isl^qvKf1Vu03oq#<^K{j7eP%*D3Rx=&hOK z2F!50L7krN-RP-gP(@qF!nb3lHh$oQ49dfws z*0;-}Shic)?jjY^F0H@J5Q$V$+KO(DX2>!}b6IfFRM73@En;!!vSIo4n6C~Ss6;ai zp-NOGUn5umNRQ?f`-7sD(?4I!BQr&VIb#52JMqSR_Nd^uJcyw6h`>$RH$FVuBLY8| z9hr3^U^L+ev%e_+{E_VUkI(c9-<F|9P)pPm%{+3lg{RDFYOX9BG$` zUKTWp5~7#+ZxUz`E5T$|AhM9&nP#3uW|6&^M{);8W%OvE&&s|6Rh+MIb{BJE!5)^1 zkco3M6YPNl~wujIv|;H!N8wci(EZykCgi5HlW}Vj*7?uhBBQSY-_3P(HGzs3+#ipO!-B_ zcKvS|`fayX?u9T|bC|Lc=T10yI+n6X!XS-VilTc>GRz%07^YQq6V7?scvexoOy(S_j=^-q%8&iY9NQ5Yk{wSRY*sTQPD}QDPjXZniIZph{(; zUlmIrY(=2oW>FF|R}1RbfM_6~KeEX6_qAxg7SrloaqY`h>j~!pb17 z9k?>WQoBG{n=g)?aLN`_X-t?LfNUn6O2g7{hhAyZgZbtX!(Vo98PBwP4+@YYMGJ*- z_}KC`WYbn@n@y?&>&o$EC8c0{8a5aInW8+TWcUgjcqfXchvyD5-ap5~qD>}#Ssv4T z(xE~&>GS$D!+OcvW@sK$h~u)Bi`+VHrOv%X134W8{VEA$SjEz{H>K6k0zt;)98=4s zfPK)N8cgFAISn}6W!^MJU(j>A7EG5%IMoQw);b#%Ed?cbJs%br+KZN22_>z+nb6;Z z;?+c%`g@b8aaBz18|PIpb%od|dDB8{z|&2z+@096PTqmA0h+omn8Ie;PgDx_nPfb% zkXTq6WzIqKg*08zq4b^ba-E2DM7LHtl$int;Dys~CdoE6h%X6@A{#(w8d(M`M3Xb9 zqKss*a7JOD`pO9-iP9pN)1M{(i`_&JbD4CvhTFsQ&yU~7`IYyzm4fiO3J&ko4yPN{ z*UP&V(_;#nZtZX<$W(H$|&q-6<)&!{tPH;oO74} z4wy6!VrTVn?-{lI&sm;U?RaDyMZT34~z?UJ_g@GBr3{ z^!S&W@h_$}H2QZ%{)MP=<6lUlXhO=MO3qra^rE6$a^cgWsE&kTTz1j5#XHjj8}p;@ zwtRw2z*Igi5l>6~`6nZ}hqtahlm?~X*(`)X$Hh*AVZ6tTCe#6Gsrg z0I}M;0CDKzr36G1Z>?(_mAAtDkmOt_tTYES@}Fn=jj!eifo6&p!UIXG20b+0$x5!w z|4sgZHw@9dGeySPnTIFai?-6C`eK_)x;dgD(ak}9q9qmwhS=hs!e;7RJ~n8OX#hvk zzqw9&Cn??pGE9xNbBlAp(*?h=8Qg!daMrM0L7Y(T-pM!uT2viDtcP<1+nwK8W+7 zhehvUQ9Uf#cf1$yZA3W+4YwKGhwb9;bY-p#O%Ac7f8bhzgT#-oct$=V3zmd2qRyND zw6Bg6XSY@WMNst73JX8{(i2;(%K^{}4(J5|dWr9Lp>Q!3g9`mp2lZk8;rTRhb&E~( zj6PxPWog0N5Zq?~MNG?()=k2|rQzNAt+}?+CJxi~pq!<>a?pII=U$Yjxtp&q^PMJ{ zPgMt}dfyU7YI0CuGr%DwqG5rQK*w~{a0DX;D%0`Wm5%~BUPtEP2W57Gsq5$Dj%_9c zOQNwReU2$mR7$_3>u%4)Z=p@8$uQWhN%~awxBBErWm`uciIR4^;g>Dacw}`{o4b==MLeDaJm6o&}ZP2&u(Dlw&DQ*4AC0xVRnM3V4^{ewkO0`}f z$yHi1+`CV>H{Jal>;CrO;MY#n=AC=Lm#a2l17 zC3ip9!9fl)*s}C38;!1ybjH$uzcHl8(!N@w*xzQdjXkV;Fg*NgmBs~W z#=U{yBF>QMN#j3eG0TtQ!TU96kviC$r~`8V=W{N}PRO=2Vzn&|iw26aBv|tgLF5&L zCn|J|s>5g3`ls;76Ugq(zaQw$TC-xR^G7WB(H@f1m+7%aB#r2TUF-i>6~uF zyq+^Omnl7WR1>$X@Xc}xQE21^Q zfnQT1tiMWW8;`kDFdn&AP`lr)qI6f4$LEy^cpk{hR?!-XFscomFEASVniRy5h;*o3 zSd*nBkKMbRm|9@SU=Wd@IOemAZ$ug26{0^QnKW#0IZPO*b*0f-a}d&l@HARIc{SH00kMfpp^y?&b!6zMyyuF5?y8I<-a^76n7u=Qp? zmzQkzr1grYY-g<$FZ6ndUX8goOZQsxR|n0DWsAgHA!uIUU7oDNoPzbpU}_6eV6Llc zZ_Pn6@B*j+6~F^fz|*+q>L_3|t#!x=DHk;ZmkY0L%PU6^fjez;%rXa?anRO@OA<3F zVH!OpOhX2g4U{n@3{1DqwxEP@>9&MBCfFOH_(xxucmLB~n0NmZSQuz+LGl-1$VwzX zNd2Xy3{(}&VfQMGMY~xcF+5q&Eoa+szS|cN)=9uJQQ~or@qccEJ6iFpog4J z^2uUXTGBtxQXPHc7Sb+-rGr662?IkxFg(GN(sD>}ECNO$<>r*uDh-avcrHdX3zgA` zNF<@A@@Q&flD&SA!y_KHWqpm;#in-OL+zj*k4=e_%at^ z!a`_>fRyrCy@y~5g{KJEIU|j1cm0MQDH!3Ho)N~;lVz?1v>b>iZxliCE+$c2rGnG% z0K#$anurhRxVk19WN_+-pU}Oon}rBY3P+o-A{fmcmJF%Op+5 zRf^k?mqN#9z2md$_?$XEN5@5&?NwTeJ4(%6iXOMgVd~+CjLRu=M5Y*V4DI1m9*`qN)dC%K+KhoaRHP_K|{ij9CmO5YNC&p51<^Gw3kdL zMp2~^8I`*Iz=SK!%brhiElPEuRWP&w=3559ydrE}U~okb1~vyxIaYJv+)XKm230ac z02+wfHGyItV4Jf)`I5VQ-R@3Zy3WBJu&{{GYx)^^dTf;|8n+ClZyiD1gCR#3Qt;di zAMNMjKF?W9D8Z{$gjCaIezHZ@-{47GBnH;hsLh<*fD)#*#~(@{V0g*m-PvUem{b{l zC%bk-l;xFYE?dxOTy6PICTLt#f>Q==w-|mROIkn@$^vbXs-%hH4MwsAn^SedfAX}+ zQ=IJZoZ>P86}f@J`ldNx{r)75Nw>MpUTh(;21VS(6ByZ+V#t6_8T!=@B>3aRc_3c;m$*#b zH^X*E8+rTE6N*GtL@g@YT+bKP)wz=3Vy*z;hGFjBJ@dr6@P<@MPIS*oHQY zVC1H){L#RuJ`?Kn4dQw#u>s=D-)3q!J+-O*u}-qay;o0TU<+=MraH#VCnbeaUT1FP zl^Qua>G#{Ca6-h%4EC^Z;r4Z?)$RCTt}>4<+6D1jx(R{YgkV&I5My6nCOew)A$>J`m+D7(Uvn#S}?uoxXPCr6GNAKKeXC z-AXg12Bbu(bF&Msesf%NVOv!*MN?a%rxGrA;mv{YM$Lx&$R<^U|=caO=$(~#muv^gN3VP?ROE`>>(0lI4B(!kP!p`XUr(!g5I zs{G>*42Zj{VEe!jD3q?+;a8%Op#r*ltnv>S8pPMnlkerw49CiA7Ld0!Y|UD>X6($a z5`i0D zhoUP$7>cd{DG!BmTo)I%bpOJXYFueNoOJk*%X{AcT{~Xt&H-T~yjgKkhutqWxypY_ zvZTbx#F2GAff)1qM1rc?cY|(Of|VF6PtQ6wi!?MfaC%&bO2(MZx}DwChOcXMNfFr< zxMhiCErzxEi7w2u6MapmDKsAMSQWp4XavM$Vmo8S8IP|5)+j&Y*`g2*?%nWGU{P^| zg)&{#?L9i*$A(U7o~4g^1J<=#eKzWm{inU$A+JYW8LAclcSDG$BnOV4s}Q6KzZcdXOW;%j@afpza* z$7M@?B~m`cf-@|fQsspYWv;OBa~73% zo!$RLZrAdvYt0O#I?Gpd-v`(l5hh&+RS*Wh4od7W{04YW8(MY4(JnrsHuk!Yoz?N7 z+FOd-?NJ)At-Ot~6&TnSgczR00mW)&&rN<6n|ttFd7Ha?AHv$@il zypBFeZrDNoO~0*?$%z$-CGMbhHXtpY;-=E{a!s4KX}W)+6Dfd7+#C;Y@gC2r8T5hlcuT26g^xjRewvS; z|LkLAUc28rMir>|H(Y+o{o81*SCP!F0KrxZ$Se4IS@cirdd2lY0 zzYobn;WdU+jB+5^>ST*n8Tb7RBX~x>UbgJlckO~MDJZq~P@2lU1Dx2!2PfW71BN-E z-~eYCS@W<(`5~I!f=h~rx7?m=cSg6zicBzMsO65vZ0M>~!iylW+0mc>070>AZEzBs zqdr+*dG}gMPb@y{NG1q!!Tkqp09_v^98Q7!?Vz#l}7V3eC~Z7z-=Uwe+KX8EY7njF>WEW}nJ$xAJN@ zcNm?Lo2YE{!umYLsU;vGxa8aHW)En*%xw0+Hru{5SmvTuM*Ry!({YB`VD791X24 zjaETK6)6nTJl&U4l}_BnULH5TkW@mwytfj#8Ux#CK@O1luQBaI{n!2=_W^gFsIT^r zK@TZq6KZ~J^s(TBUeKTmKeqkqLoPEzYxg58(HA|0WsEGA`K!_PFn={6mwT%F#2o&!sVbSDVrt8#K)hM89K0;ZZP%ThN8xQtEP(D{lOA zYFpFoePDBiEWK6B zv0JWQ7NOwa9uT+(`JNqQT~?fuzFt&QUuV@kS}1j5e`$ zFj8aLu>@W2Ta%4Zan}W~| zDP=oxPuQ84ybqj4IX6WYXF>5#Kie0l($_~d+biNgNk~Ew60on*#k$ltko}>UBO{eU zX!U_*MRio|$K_0(rNYL8f`fTDqTk@6yiVuzjNDb(Q5`hBxSPICRmQths>E`dzOdZ+ z44+WT^rSpk)WL0(cqi39P7kFtk=A<13fZ@a6m23V2Vn7wX`((6%@-2h82rXMFe1iYu>!pbk{&cx_orjfx`Y4KW^3Nl!j zUD<)ropAbS!|pH_is1L4TF7=iM+x`zz;hXo5=r(*<{tZGKv&Xdjb9lp=YcWl)14PA#Y-N(V5rGvk>LC;Du(FG!sx#y>zC%T|z|WurJg4c& z8%Kiovty74vBv%44lAoeXh@k}OSfHXLmWaw8ZugZiRP3+bV;AE38KXp)djvS*%!oA zNXQyi+fs0gO@c*H?5$W6i;@L+u}yn;m$~$e7B+nkx2Tx$zwh|adik776FG!6$?3I8 z*T5-hn>zhc_E6TuYKrYCqPhc+CQQ@RL2s&Gm@cBa9avv`V|Apa)`DKtcy(mwkZVEn z+%$s|Wx8#mSMuV+)|4{6##UO>Wep8kQ!68(Gc=c-lb+HMnL8v)$f=SV%M1Y)2%)|1 zHI^mUZZ2DNYaSUjFTTQ944ov!3Ng5G(hx%dI9fKz9|!aenTo%sSwd3~CU1+SoQ3sgV<7@J7}|AX6r+Pww98*nINnRoGwnx>1>-oLIxd@&R1&vW#o#p*f6}=B=No z`P3_Jf(a*b0xaiyMo7a-TZXsgtzU>WB?1Y{>zhb)m6bi_^c7tq>JRt}E{XYF~X9 zOsnl4AsU}dRTD_9IQ0j)nhSixG&-LqliI$sTLKuO8pI(5FpS-#V^Oa-yO^ z>FpCC@zCSKpgq-Ve3(KbACc)x7|~FD$&q7EtT-aSIxIkw79^p6)RF*?CpddJ5fIX# z@GwGg!KVbs^VcP6NWx0 zlbZ!L4>Uq*I^DZu*hC7K+%>t8bm%tjbL~D)|JYyKbs!DnF>f%^CT2_anj*p;Y!COWD{Z4 zqNF0K-m@VC#WL#{R0MJ1{1Aj^WO3PJ>q=vH7aw&=zDih9 zHznta**I+}h`f-afp-Fc@b78&)&+v;i<8$<5h>aaqoLKr*jFgU zWTGHI4Uk?AA?V0=xTfy{<3db(7%G0T(%3CC+Hxw^O$Mn;>I@>gCaUs!*-CBnd(;M# zfyPSHfGBM$v%1}Fxl%g6yk9Diaz$DD^1fpj$6QOKe&prV-L9LV(bueIL`_c|D z7$9`k9u?%|L7{VTS%GDRE?H}fh2YPE+7lE4!RG_uH)BmVao1*UMl-+w5nMH}TySNH zF`L#U4yS+D!x@nxTQ@M=sv(tHGf1VXo=c^5v;AIEeq!5?!CX%Z`A${)2&T}Pn+vy+ z7#aiSC|4knO3LJ3gi$rqZ9Fh7-J?OHNHk&yZ8DU`4<&=@SX>ehy0NEIfs8ipKzF$Q zjt9n~jo^bvF{K$)fc7A@e`QLN^2>F+f^H>7$hZjLaQfFH@{!u~3Pnd*tl?XW8x9oI zXK<+J+y{m)i2I_6q=lrnKyNSaR$*s38bV@7e6zBInHWe6 ziOZN0LnfXWk^**_fhUG6pmG3>UJ5F}V=njN;Wd@qHOWGFYDjb7BIia-8WQJv9RihJRss%BWhH&ozmC9!m@nVK4LMO{ZH3i>+Ok?1G+lKf-HnTWc@=XLWiQ*w1KdI? zIL&l;I?l7|ow^$%LK7`SLXWNMUCpZooGP$DPIlxYk)r#R*(SW6!B8B^rYy$@t!?of z^3k1~%^B-rN#EHL|J||cZT}>(>uvvCu&an5{Dj39X1}ip!bfjtZ!hSUwm)<${n=W6 z0D^C}4|^e(OK*kiBcO4TWftL4mG;kI8_6n@ct_&wF-dq$?;i+CJ|&L$S`;UhYK{07 z%FS3g#k}a(cfPJ#LPY)gTmJ<{F8McbO3&p;GRg4)p30w&J%gdG(_o=%Muka&#GgSN2-vX zoXpX&BF<&(k#8u!{2bDHz%^4+S2^e-PVW{W*eVQ3+81KSvST5GwuRi&2PQP^WSlr^ zG(Ct9ws~BNy{CPNBos2Em?hFyDSIetR5Rm4jbljA(P=H)bbIuqIT6W!w_+KcNuQ_` z^}uUwy+-agIO3Wm#{E<+#~x21Xr1Iiv63}$X8eVW@)z=)evSbDo8u#KkCmwwa) z$vMb7%Uu&M2kqOWDT%XhzK8u%&;C*$tFveLNdI(G{_@NI@^kt~v-$ASg6?n3{_aD5 zlgit|)b;oTx7W_7G4AS z>(KX#uzqkEtU8lchV^61VEsnOs?5}d$Y{d>*p2)j7>a&73OrVA)kvkP|B|?3OSxJ( zwm4do>`#7?yQ(;0(_g3)`2raN!>us|&w{_k5Jc&Rc)<-`?tp36i&m-pm*?J`SC+S` zNXd})s7Sg{qR*9GAb^xLbT7$Zahlc444{pghDd0<5Ks$ph15YkYD5bVAFCe< z#FCva-)9k|1q!2N_4nY;@Ilx|$top6j~m!qVSomww&vQJ#!Od23$n zJqeYKox(P4+p($^is`1!07d4ZeYm$l64^Pn5s$P5hL`_Yu75q~QlCZ|+Lt~O*iH5LD^ZY$Q z6Gzm<7)?N6yCa7uqu!Ee`ZtARzgnrX2I>hyeo{?*(n#w`{#MdGytq03WZaF$Qi@RT zR4ksA$P;pk$@8@dW`gEBW*4V+d3 zr>%i!$_=O(Zn;by?_#I}98P+4BhBN~ZjIppXaIqO7kcfXF=x`Py$iJWs{#n`R0Bu> zgebfvocmV)IH~D15rz+|g4vEu=yk%$}=4As7xzFaAE=hAz+9AT*?u$rCh*;1amt09B4FHQ7HQ`XbB;ce-+jLL z2lW~oZFcdKkeD?F0?wgqj17}5eUOB;f5&(79WOp#r)MBNbF|bMIh=lV;-AZ0%d!9* zToM-LU(@IfX&fdjxGMUw-}>j3d6mSN%isCDWIhF$RP)9cRBXYDVPW+KOQXQxN`swN z`YfFr=CG-s*7~5L z@Ns4jOaEXaC8=92d)m9~ZGV-_7hMI&`Iaqw1ilH+6dHn$(*9LyrJ{%bq#igPO86g8 zK@G^vIsbmnMtmB*T)qIB_+}pDT96F(v8s`FjlU=PTS?!;wm?x?gRu7~5h>aD_ag~u z+A5L^JCDzDUZ^U0+W336#=V`z+NmDXd`|C4rzY5!PjjzvQn7(LK%U=u8z2ueb3v~! z@cNgOJXmf9Vf~?Y^3}eq_awj!-5_^5c`X<7DVh;;MY7OdTuG@V0N$(jd+mPfl^Y=g zC`T2Id|dMkcMi0O_N=mzt0(nZ2|`i$q#xnn5V{;wHZ>9-1@7|tY~~a2x2v)X3vW)- z^PAe=$4f+?IQ!b~F~mmpMSWbC{SF`Lvzzj26WQUffgAPg1AK@nT}8F*hq7N;q7`nW z4Xs0k5QJ#u-I9k+Z}MX8>}yYz?wxFBPkh*a9`1=JO^GLLDj3n7@SjTSe5Wv&yJHEA z3rt?LhE z$GCQvfUKVr;x{d%l=*8K)1D76&GwywHAj!lhi2j5menypZ%nd|k`=`(@z7uXFgu#V z;Ht7!kj0)0ZZa|`e6(=^xz~@!B-q)#eYFUQ1qHa$>=IZ2% z1Y~ZLH6g}UQ1-=|1uIJx43(6sH6}aVIz7&RbohbwYdWiue@$evI)Uh;@ga9%p1hkR zbmEgy*z`Ty#Qcb~B8S$%z4XASSUaToP#FYPd9w9&l6xk)@jdJU?Qcc_Uy@02L49}^ zzdl+VG)Xw%>YlnjkOBzP6dgNJxKZCIVgtCzdcF}_OZoYY03el5`;`S6%Uv2PG9-jQ zVawsk7WS5;%S!rrsD(Lr0*Ct6GxZ%Dngc0Vmz{eKLetFth!3GlDzM{hWA-hUD7{!< z>H6%<9GmI~vN=A4rC+X9Wdn}i6hEjCU6fGh1G0;sCzX6vG@}2Xtr%PV_aa#)8|+yb zKjNSPe`&U!E2crzYRtuO?d+1Z!a<7e%)d)rwn4ga5|8oHo=QoMWX?s^cT6A$7xhfF zHp$uV|5R)=XQV1oJvL1PGB68p7hsTJbsGwTZPV$uuA5X`If}A-dCpB-Ak&ZW0B>g6 z=UAuk%P5-VtLa>2ARRAet&X8s}o0~7KjOTGw#5;AWd1Pb}xxT zZjb&CT0}2qWAOf8%Lm8b_J6VGFoh;=kN!^{pfsaM4m}2t*YOa6wu@Rp#&=*jxlzPY zszomMLg~o2rsDeZe?DcdJtj%HF_lUB?PA=Dm-li18`_l1a@FGQ(M6+S`B=B{KSlia zzhe>02${AY|5869E{l7B8cdyn?~&ZYV60;8ce>4&tD^q<*Iv2z;Qz`S*zFC3n`(Wi zY|QjK1$esF*1`K3$|9{(NaOWhI`!>1Hl%+u!8R3q)U^kXZ$gKgck+;TqD%poRbS^w zbt={((U9bMcG`>BX#sig_%~g9l0>M8l5wet=rV3k`EJ~XrQgVEY&%&94efa;v{jdN zWfOO_$0M-fi&6H`zr#2Hp$=NiKl@$CbSwL+e|(OQ^skED7H0>a;&qz6osZ%mUz?6Tx z`t!MI2ei4Z+w|P#KLO2?8Eh6Wbt$iJMK5mZH0i=SO_>it{~OI%Nixb!_-3F0d~xzp zK49RtQkg6X>W^*(0&LVRtm@B`d0{HqiXMv(zsnr~fCGwKa+tl2=vr4(hoa$|0Iqxo zRnw;W*6oO*i3HQvrVhHVZ0i0U*%TVF%C=6#Rt$0bQriC(;UB~c93nFu5)H6rIirgA58!A{J^2Jg&v~JhjSwChz zHnl#M2#l8*S^b1~!WQh6#;|S9e)DfjXZ(w^UwPbrzN9z-4Ce$O#|M-@{ZZL)32QML zq_H*A|ID_L6c7wb0cF-^zYW8XO=j;C-vG*dR;g8?;N)&FTTjC5Q5~Y9rn|{K%sCEN zG{izY!aY{6=7Ex^1`VRrmoQ}C~%>L$G9C3eH+H`OE7*q3o*>k_`Q*$*l!LNUb;>dgj>xAZV zMb-O9RT#ZPQ5e!p`H#BSydk!4ufQUGIyBznf(mx>H$FvE*JU5jN1Q#$M~R(`vnT!o z&wnuc(vv=lf>!*8-{kp=vP&QJ=S6eB>Lloc`nW0k=6~ijBE?VO;$?wNMN$uzT`S3f z6W!CTDZ~mK(V`sf#77_M)(_hY7=1B1BEFqHhhb5qi4I%`0?xPM{PsfHt(&!@3et*< zi_XOStH4iL?x;!lpQDp9pAi&f>TtR7yJ*^_cd~+X5`@i8`=RdW;c4#Csov8ig`WJ* zP!rwLCAAiF?D<7;*>r$fPAKOuC;$Fj_YBkG4PHIewE1v z{WKUA-AE@u79s6k9qI%HY39OE$gC!>I%1{n_TCs^X&^;jPX^M^K$uK1qBd&`fLVRj z10oKH!s>+;;o$Q2SwR~C?6gJzR2qSW7qJOQG z!ZZpk>{xf)T}sO!E!aR^S1@ z(sgWW!c1!Sma>`~mEhFN=FtKe^0AGwVzXAuiB#+McA)LZ5O1oa4UMiGGgKT zEpkmgWU?{j6xoma-Ki~vm+YV4@$~1j8qrh0I21#kwZE~+VEEtX;T;`tkVed(5XBaD4iUrDL&aIffDHYW3&=?P%TYU zL)%2&pDo#C#WJt+9`feN>#B*h1_9ws4?4Q3cCRY6wnuFyo~5yAk!n&)+!7fE9hh!i zbf?K_@8W)RGu%2`W*c9pU7$+B7^cZ|EsPvm8(xGDaZNgd^YTumon0hdm9{+af*;|B z`uvIZ9JX?>-WXN3B^-eOhU*+|GbEAnDODM-0eEjQD zgheDx(b=|0|{Y{!Z#R2`jC|Ob={gm0&{67pa^}nJNmt>_6ADgi^-wwl>tKR z2^cRUI&8a^leK2yY!kjSW_&a8<5{kPre#v&8i_d$sh<1G0EkNj z><)mpfuIo=Mln|o3Uk05)14s?n^$m2J0`evy&G6d@W+k@9 zgI*|bdhLG&7a@Z0L zYL^i1(`AW(tA?6LDm@lB!v}-s)g9BF7R_{PWS6E8`3Go`G3eZQKF-*<(^T5sA)Eih zZ$MvUrkA?%JT5jEw^f<$P@68S63%mZQQj*-ap|SC*svD!<{-E>fYUYrjjR73Lz4Nn z5w3{DEpYuvqZQSYR<*@{ajO+o^rePT5EnVy4Ym-M&FNdI*t5%6BO2+u%mIC-(H-?- zzFxD`$o5#VS^mZl`%h7Db7fDXyWUc;TH4Q@W18H3lxTF;9~Rq3k$n_7JQ?)>R&>IX z%&nIF~rGBqUUTJ zw?|9*B)9N6eS%#~I?)q?Q;W8mI~6~V>n(KZ_UOZ0pgPI_ozZC;A4MTd!dzxwYu?_~ zUBiS=j`O-b%~hM+>(I{r2cf_uSI_%3AU+Is?J7(jwAJA2*xgtSInzj5iQhPm6XgEB z$1fc5ZRggERoW5S57=8*>wsc3z=IKGgl=_FW z2~Z2+0lBu*;KdEbCyEpC;Wr=^+tzuS*Nv9BMEuMea{0IgqD$U zSBEk@jCA->?W`fsYG>_kdSJ~++}=s`fGK)0Q)H}-l2NSNF|B)G*X`&cUr(!%>w1ku zrADCDSYoHG3BVffT(|Rq?nt&_7qc?+_b3jVyB-j)vD<#}ZNu&d)RojSup?-Aq2dT! zu|DF!oS5X8qXThV%%S0Eqe~_k;0jogl&}JkR%;~MBpj!P7S-ufKnfP3>x73n2JaF_ zi%~}l5i;secS)8ZBqRCrm|6(igZwLDajX`2kD7E#3&V}|h8stp zm|S1krqiZ}9P3{Sav)U;Io5A)QbNcPfFD4Rk$wbOf7mgElN&w_R2=QB#~tDLggjr2 z2s}g3S$I>=o)JkRpQaZ9RjkMv(TjR^#$FbJ+E=teN|KtT96{{wcL^%3%0Kfp0XGTF@|jGqSJ8I!m{3#+m))PHqv+$8cITPlrDHA;bt$ z&yGNJ#FWM9m*gwG$WNtV_N%S(Mlhi>+KV;ylI>?8fG(3dEBP(L2st2ZO3aKdUcZ&T zuQ!Wx0tUum1`Z_m7NWH$>xEEH^q8PLLW4w%@fQ{){UQx>!?C7E4N&6Z2Bxr43t-;-@wG*^X^fjbb9gXG0fMucHj3@)L#qJ zvGg?TMN$08mIlwjm7yLzU5-Z=)dEgo{8g44SUak>Cejbs>RiS%P4lD)yaZ1=qdA*l z&e&g8I$!I25U3%&&swMLbQ@_hc7mN_0;y@CE7m0@#mAZqS-ZxMZD%1krU+O=gP4t? zD#$Jn7@AdgKVSo-{Z)-5!ZH`tVpAWAts#XQ7M2!BBPyhIeCtEN#SB=Al$yr zE7)#G1}3{L5xPo_Rd3t4RW3#2{DbdOMEt{V=#YDy=er(w2}d9pZDSTs6ld2v3+xSZ zBvJvrBoL~L8okqTsVDgQVLfr$fv3~f2?R+BFAkFjI2!@WPs!cZxsY6gB_Tflfy@8TpqHuSO^{b*EY($2OAwMfTobqX>4L)m_4`v z@}=fnfOL+|1xPDtT!8pS8}OnAEdNR_jc@)oTdIF~qg{5c8hl+H>`Z#Z^2VL@3^bZP zijkPd=uCAmatxqL;5=rMk8XcHew5P@pWfM988{sl%Nu2XfVVRuj<^sXIR^?>!?3>K zM#p*{(iO+OoxwDhHK)CU?s|^gUazy=>$l(bc<1`<%)q1Dryifx*O|+WN4IaVr!zB` z#vk3@e0+9R=l>q*ws^qz4f;+wJ)hz`)%kYYp4nL&0y;OFvvbTDvv$sA?X7HQq}3me zBhe$YL6g^C`y68CtUQX1pP7X550lyQ5h61&%gkSQy`YZwfLs6K4M+FP{wUKuFwNI@ z*8402VriDY^j*#JP+-c_-GXd%t`|fbAMc_8>6`KwXv$wuO!@Y$dmK(4iS}# z*ll5p7{=1qKgNDE=cb@Si6M6oKaNk4FBBd|s*=pb7o8M32k`)s2E}++6ZDX}?1$(T z5~IlgAP~ABDT$jnT+GWA@`uuY7PZfJk`ccWV6i>jZSdZ1Hy6_@C0d`R2)6t-*AzL! zKmvmb2X8|+!kZz=jZymhko^vSJKTD~v!VZ}eE@>fEa_Te&g$Ao)QI@66-CvizSJ4U zUkmF0QBb2wpO>jr6Xm^^fO zjM$RnHZpwl0n75YD#JNbLL$+Mx}$hLbW*-Fl+u>JM{EG-dKxgh6~783Z}4dqJ8s3E z<}U&8p~2&MgCp5|^eR9I;{c$Jgm2gdMDZO#G;QXhX-ikK8L*sSEXYSVT%?hsKg9=V zk~=Mp2vrE~2k@rY`78XCxkb--lKc=8lj@{Eln0v-n?0x!Y9h-?CB+)7}%KM=OgTL0HjZ(M>u(eU}tlD$PtX=xM7EJQ&(5$ z5?YcwU_jywXc+6=utOj{oK;BovK!|R&=3AwVjM!M+1wygmS6Q|0O=TD5Ui0ydO}Uk8we-tuL3vX^v}YEoHr2G8=Be3 zh}v*yWZxtgpU4arcXn9*kM>SkOX&{863HKAAK3+^y|VlvVm?O1GdrC<$`P4N>mD?$ z16&OPQ|n@j(*RVeSVCSgc0I*K_Dz78-Z2WC*39a>8G>L4&!(9{HW@2yV3T10idla;1)vUX}m zw>KW$)5Td>y z3(gGC45FjmHG8PoH*h*)F5<_56228Wwwl{>*JuaqL?YD zH-Lg)`CiOw0DW~aA${;`4gX&*Tz5@*{EWV{dJmQX#^1=t@0=gwNB)&oU#2VRBFH_3 z#s&ow@oV~YnuemLk)1(B!-2rxP#k@PQ?+O6-4UZ|+(B9$y^a>AQ2TY!m`d-#k27^? zF)}H6WPUyDijA=h%`GPu1_4D>G+YCi&G%G67)eDyxHY2An&PL@^^kPyY?V5@9=!zc zK>gUX6S6sU(w9L(gU+R<`{hvc=-895MuZC0s46POM)pE9>JUUx*~=O%fzy}tiBjmI zKAnOt>8l%3(K5m?0)|8ZFzN7t$Ed1^C%={wfa zs?Y90I1@4l3z8!k8KA@ZJC!Upe0rbe-lpF4uEQsH7x!4o^g^K{%}H$W5YuLNl_|nn zn#vBm;fyH*$Dqfi&l-o=N(;&72|lwEbfU%3(Ur|xzsLFOq`Pe^gg&cSf%#^4wXMLo zy)t;lwF2uE8;5CqwNE=5FI$XhtR~nB(0IwaqnP(o>Y91qtHVO4oIqNIx<{YP{~3My zyf5gh&--bO-B^+ga=htdR9JKavl1Ppdpsz$p)PR)%yP4|zj-TQsqkhML?##!$0; z_;Qpr;9+TRj`z39r?WKyLhYl$1GEu9e6nQ#PKq-)V68sVG}Jjpvx|kZ3U5iD@r>%h zv&`$0%o)c8%h|lpoPM3oe;w4X^ETio?eD=>KHv*|0|v12fX~~2iM7uMd{GTuG|*&! z$RA(Q<_R`XnL1(DzokKTv+SO#SBm3z|@y7*j_|9D5lNE9vW(ZVur{K=cAAxYk2DH@G$Fc*oFTEm;@0@FdbZ7RX-h_QBUh?p z)916Z+5#I{wAv2Ei!TQxbwG6F_W`n+vnLH_uL06BK%{#D4s=hFOWMXg)5kIOS-Cx$ zSD)#R#$>5w$I&Ndu2P?aT!}T@^oG@LuhEPIRjBuEK>O6#J{sf1^Ty!D`kAemnX2y4 zP1(#-ZF*Oy`M@N~MI%%*YDSuRaj8V}nk$?w2)yC)W|Cer*o%5C$xA|fD-?wQWM-89 zVEehI&>g?POC6FdSn+^Zm?L%?fl~S;1`siW2vEMtVMV_#X>c;ba6lW!OKfObtJ zjs)d~q*-&t*Zv5Kk4f{LL)j{klQ~jcnQBl1<+C|1aaFvGp!5g#W$p5Lcf0*;JsEXZ z&I|Y^q4uB;OclCzx-N#BGlrX{I~B9A^A+DOJPO43GbdYLg6SGx5XH^ z8=9j@UzDAp#Q?f&C~ROIIMy*em$m{cI;@f&Y1}$oLoyo?4f0QFuTX4khab+H=mQF| zVRDT=)=9WlU3{+rn7?>-k8%*|dz!Qdt3lcx|sc`(g0 zwTYPDY2H0S$u^g$jsw?Fcy24;*$O=9HdR;GuHsAJ{p%nM{l@CoA(0V3W=+)rmtv}h z=3tc~2Q{8_X(M`g4Mt80dNt0gx*j+07n|ogp?vbi9&VeK!@`;X=9+xkbldFYI4f`A z-|nu2vWDK(%{Q;}05O|GInMEbMml5WgUYxiGBP{#=T+kXR6VbX0goZevBs(@WMOhG zbqq$YV=H$rGqqN!Q`v(!m*op0#Im_a_>w+n&{+A`dinb(kBjvR<=?P%%9iW`-!^qyl!9kizf#1 zx@p6HcmEg@8F{|nbMcIhk&Ea16x6qR&DwR>t-pRl^n#5OQTT7u4K?`A?DW|^$_uxt zon8KjeVb@!@A+H%w$XFKU4BMzp{vYlla=fbzCEj78~oR^{_Fa7R_EQZUwBmS*0(b* zh|P|FZAQPYYiCV|b(&(+F1|IFU4^?Hwf!+x#tUr&A2dRXnP{MSFWUuipQQSW{J z4jnLlv)SAI7oJR`{QBLmJf{A}+Sv%do|v^?xU(p)-RWc23ba(eKK{YSRkG#3K6uhf zjyRm}`Lj3cUDMl~aX1_8Y!&tX;4P2nT|Jcil8rWG+fxn(-jQujy(`@jzX%Z!WamF- z?~-=5fnT5gdmC-+ZC*aBRycOdv6~M#scQQN`wglsF(`yXwXMQUCz)7TZ64WB8LSP zh05#QDDMPcJ$vCj=2Xv2v~(uy-ig*?>jS8$P;T&76s>hP+=C9mx+UlnVZayVPF(Ig ziM$^)3^j&vNmeVShKsk+cZmhsdPng1XUaIibHahpwt@MFyO&Ei9MLBIlB+b4Y>Ip^ z_lv50#&wpNi!@7qBj1q?b`X2c0OYyBw6Z=pp)x8JvxqpK2AD2RGmW^5n}aQ|x_GJg zt0M{KpYurF-@@o{!NQDd8N$38$95cMke0t( zc+)Y%Hw15Cgp<{7`u;P=E@PIF>fIU+m#YsYcyevZx6E9jnLb3@ljS`5~b@2wcgMIbX z7oPgU&rBwnZ#=z9VTs@kQ*ck8#L&oZ1=mQezAZId5jN1y_`Zky02!v+^k2{ru&>%) z%|SzzBzXHQs;|s}H#zVA-6ilVqt^J<2Yx5-DJJV64ez zY>*%oiL-vcRVt=kH#5!y1%Aws(swGkyb5le4XBN-d0x4Maq=p}0xRCRsvUa^zo@y@ zf8E3{RA5uU@boz14P5G5Mm_Msk*Jn^UP6=-#!s}nVO@n5*%_PB>@)gUo&6Rc=?tkD zVsZ@PKHmv}3&OBb#V9PUVp&-_VH^z-RRd4DZs2}0lS^QMR*4x`q4c;C0Tt=$P2B^l zSvXLfI zo@=_)hGV{+jOwl*jE@`(19g|X3w;jn^D9f!Hdo+h}@9!Lrt7JLeJ36XAO zk?2zHHYTN_^#6bKEHl{vOE@8zZbwnulL5_@92O)mUaS2|hst1ol6_s(i2yFWS6tRj zniLCj)9T;IFXEUxVZD)wwBGW?VUuWAw%3F;CLh4J-~Zk;{pP0j=Mz#lTKp~JF7~bm zx`%P+dzU2mLN`LtOXHSYrF)V!Zrhj}x|Gwnbs>HBrvjV|r#-Z1bCt)f(3<|uP4y0y z8FwT5yL{Y5^m5o-jPW01UzK)}&9m*Y8yP6aT0YNgvSMG^JlFEb?|+uE>&e_#PTjqKymtCS>NcULHLzD{a{;JTgU`oU^Tf(DsK zyG`GJIz0G#a9k8tE=$cF9?3WJ^|wBnkR)qfC&>aH0d}JRNFlx>#juJ0D7OJL{FWgu zbqtx2q&;VvwSgo7;s>)3a|qLC)1rydjU^VA^;dO_1; zk1|N70xt`9UexDPcV}+lC&@1IQ6>sqTgx1gA!BjbI@d-vt$AA&!lp;1O^&LUTX|1f4K~@yFTwZM1jBI*f#Z=@MWmuDF;h%ly8Tx!cOb zp^ODf(*60`KY%lS+3fgozTVH!P)SB7<;()S(t?rNIu!iFx7HL3sO$HT{Y>%}~ z66VC_152;DutcV!?8|nm2A})Yv6)Jgi#gjn>EY^Uh_1~HlXaCjlF(*oj#s(!R(^^K zeJnUB?Tff6UacbFwu1tzRt{HZp9jOewmMqFC`NnDixXW~!dQtb^+V+fQ8!c$u+txz zOW9G@l*(2e=IhVx$!r4?$=KTWTA3iNRyTrq5CJ; z0}r#V#g`)B>-<$xY!gjzHD#ILSrmk~t0wR|!%r@Om!lRjGmsGFL<&IK{z~e!bEGlb zx7}`lb8|=J>GMK3p%*=7AoId%Q=~iA%SmwNo+eJ7N~S-Oz}oN% zPGglUb%#6RS@^zm{ZXYF5*hsiYL^?tRWq%}4x|nyM9?5xua`S(MOy+Vp)J~EW#4s^ zy)TR zCp6Fr`f&_1eezTo5Ze7!?&I}d4&ZplG61kzk}=kcChhR8?$km_fhn3j5UF)WwKKsR z6wg6cGjPHI%E=tC%MO|y<`NS@Zh_Imk+azq+VFP9=A8wWXGxj7fVzCog}m@u7@|-M z0yNR2l+)Qev}H-77s~M$W>P%-MY!BMPh-qp2{>u1K`O!8YT5sd|6U73*qq<0{S;`x zl|@Nxt2}28y4@L(NP@roh>AdZoDNOXo{ibN9s>QC*R8l&YycugyLZY?>TASHPm!By zJ@wpX(iE$b;^FvHyE18)+pF zczeb!Aj=eK%~@a~EF@_lkF>F%cgK5#UG#Hc>ig3?7lm`r#Q7pAoy{`VlbcOO(Mz+T~2}{g~dhKGBZBR zVq!Ql03iy?M`TLS@Q(XCdgiaL z0ATygW;k70BDHKQJWsQ`R=}LFIo66Z6@4mG6FS>D(euD)NslYBi1KkNMj_BxP; z#5bOe)4KVpJb4v2A<-;|B?tKhd_=#@xq5PF$SUimDOXEY32tUl-XS#yydyPDEfXx+jfe%nDoF*kXl!iE+IAZZPv{$K zjy46jxLd$Tt1`wf?m(=)EW(Uj5Mm$*JK$ogVJ183_iy+=GemHf?2kp0dagpbZJ+xAvLS;S5bvuW+ z-Dmjfo_aPN?lvEf1T=M%NQzft8|%~k01W2VW&3`nt9w#5NR5!1-{jyRh@B@icEUc{M{#HrPG zgvpj^;F8h72$OToC1J{AA;N6EiUR`%(5gDIE9OQYz zXu)0p8L1CcXvuma%(}q_R)NhxazA0gvfMk8lo9~9aP;ojkk)qW8mv-+AaoC;D8#mc zyp2pN?v+X;GD8JOEAdoPmfq#xkZyb5WMF$ z1?J^UiO}s4Za5%^rTieuEydb>=``{4WSugYtrv3RZ$v#Mn-r(#q)wLAxfFqhE)+Sh zvk5}@schTXpN#5ZOLy)Gtw2$DKpWT)cOU{_BzPUe1kzkPs2hiz=^9A(0OkfDsT@3H zOkJH^;^awvy>fEnIWbGa7=7EaM~%{#0osfeBTRySrZkMg7FqD+7frI4){2o!$pacL z5Vi3VIpDfmx~J@Gr*(8(a8$U`RIUJvWJM^X{}#DZ7@}D3`QmLXKZinCbZtg_ZKMo? zl2(ZIMed;~YsJ^>%9f>VGnV+n4>O7hZY0Ii2l~gGiP~Mj|X7y@ug_p&UjHXyG{)D(lGVPMxt56uYCE&J#ryAd& z80Li-qWq?18$Ejv++|XN&r|z+t(gFv7Ar(OKO2NBDWY-Ly9UmQUhuM*LqX@nxrp_J z(Q4;pLv@DT1d#&rTKQsD2qy0Y56Wjw#z25mhrTb9In*J@3knJWk3>=+e@Oo83f}?w zZI?z8AdMDFgv5ti68kb7Hc!lsxReCJ8eEe-%E1hi8UQ>alYppQko)qWv`~w0Vqewl9 z^d&Bo>?vks?ET}BM+*^h z^AVV|-TFyM^)W>|>=8-M-d+M9Iq5=Y{$r1etgw15v>>6)5+~Md7xBxOnn=Tr^NC z{+SP59ywPFnKl*ehdALlb@;2sh;-!k$0Utpd$-x4&77(wvoUCIEk#ZM(qi+~X*b4& zY_&1_3$iN7%F7$K(yp*e1*xozxizH4W~#-zeurCCyzQQ3i;%Ip8Yb0wV2qhjtS_scYgNys>N+#*)*9bzM^DT}9K7 zQt{s39UfOq+rr1}PLdOvk~I!GDB0fCJb#QDcd3RhX-N#Guw02F{D`bM1tle?19WDSA$mCPX?)kgqRQtudBh+Ag)m zA&pUdy4`JT!mQ9X!BlxhnA<|Zln9U|1k=<@ka;Y@lpO+%u_B7X)6w*4+llEnf_i=| zc^$sswN^$Tw`q-XzVB|sl!0F~cBeM<&USvAB+@0Krrd5er zPsjXI$z$v*qJ3T|tQ`7vTtP(*aP%iRR^c(Q^_6;ToQP21) z@;2&}w;B#TaeMv%J|V}bXKZ{EXCKYp2x>vZm@h}!cl~C?S2XxZRqDw8k=onLzvTt^ zM4~&eWK03ZE@Xo{b&!!1jp|l1aY8wE5}C-IP@WD8n5Y1a#BtZOjlv83aOEJ5|Es6qB=KjSf&}qJQ;;BzZO5kI1aW>P zUvdtmApR^Sp+he+Yx#8;^Xq_=OibM-7T8JQnny(hw{G2G0@h4rY#z$FGRrK0ACq8v>R&Y#PzNX8cz?<=( z5Sl??%Ss-ez0PU*O~7bTyQdFS#&%PPr*BPi!W)h)>m zmdgxo4Tt!E9mLH3s)@#5t5 ze8G`k{Pk}E3a)%x5W1x;l*S0@u#^#OtCq={N*EAyjwU<0%2#pW1st>=ljIiq+Pf-e$guy0~a%sw)T z((9c%Jku;GI+p!b>Ys~t{0rgA>V$w;so{KEtxJudU1xGJ`?XLGv%EMrTQoh$DEy|) z**8urw?0I3h)*5SUUiiuuec1}rfjgcp@K1(D5#l+mLd>g^Z;QEhqWwQ{IU?1;+CcK z*gzz{Lm}@Ys~k=&>dc6tkHn;z;FmR$tKzRG*b{~twL1sXumHB8%dvy#3XM?^(%>l(AGrScm?fC=H^3gEyTXMPKYxBhOeQjnC{P( zhlsne<*$eOvfEDe2l1Sl!X)m3U1?8tGwqxczrI`7cC8KjN-_PKm@AY{BnqzCUUN$? zoz>+E6urh5RaY-guB9kCAb(*B1bD7aehnpskE=1mMM}0`li7Et`{p|Fc#nTx2L-(WY_`=UvFPk-O%w5ko!y$%ZL&Hu~!p~&x7-#i9&}y zc!Jb&eep+!Zq7Dx3iM^svZAi&<)j$dsOdGYTXc}+!;@tGHpfX=114JQ!YRp90Ob-$ zFZ$I#*k?;87bt-2ETWQtjQCqTef!&OKhH-jHZx`*_3Q*m7@VX5CsCDUy1ga4H34C3 zQm4=Y?n;ZOlN8LJ;6V3rInuk%>hmPns~VHt5%8HO7nX8-x8dATxlYM2HaSUh9V9V_ zRK+WL7R@HQP{tX&_{!-%*sU&_C6Ez+4EqKjc$@|ZY$L{+>h(*-W^g!DY}lwko{`l# z^QW3?*BV`IccCnty_Qzz>FU|*wenYXM}&F?0Hj1NsLTFuJ-^UBXVx4Gt$>&v}Atj%Cht_c1B(xXR;dX#77f*A&PfGYkk`i9oMm(jjS`P7O?1!YmH zpa$w%68O*1iZF4DL3QiK`$o#vOnzZsr5#3}%al1$Q9uutWgRKfS!leV8X4zSal6#s?r)rbKfLw%C2;P!cd%Ww!m}p`bB)3 zsDgjYf!|C30zbRA*U&uNW#Ki%kTol}sv1jB_xXx04ry~+FXUf`_HkO&+WZgf9m$u8 zc(OoRMnua*M2RON_@;NnC9$ouu+eomm@u7e=_Zq?AAEaL0k;7wnJvLS%j;)-9}LbO)4QjP|;z3?ICu_0Z}c|=Mgca%o|T08B1g}q*&Y?byzE& z-0F;K0~oGQS7gDcA*z@gbVnIM?tgDC#IHR2xoCxPEom+~fTfX%butwxv(>3^Oieq^ zsJ67hG%AJvji;fk>CbveL~Cu)nr-WTBC<~~5g8|{6QNw<)&x2nUX)6WXPQl{%rlk` zo*NE?o6G+$q9t8%X6#X&{o^5+dNm4o9Ee7%pgn%5oBHgG_6b`%FrI3@V2W> zq_UB7p0lK4DbPqGHy|lG#=UqbuE~wLbviU4H^}OgHC$){+uZfVvs zo9T?MFaWA`leG78LMoIBk>G3y*+Orb7?dX}-jv5CSuoTXKVqm$#t79);6c3CE~5Mo zP^d`SHWo*yegiZ?$sJ)tcv-xxSg41Vm1JyM)^>G{Bk4LotxOW6d^m+`DM*JUA{|8l z=$^XAIPqdAvXyAjNH(Ctj(FW9B@JOoktoB|uVg`$*~%mpmeZH1Ea>>61y!Pfz?Q|l zPPKGS==v88#v%F=!^l`}XwmgJG#ZZ6FpBC-Tj@Gb7}FiYuIHzqoilJH)7Esy{B&Vv zT1#-j&ClLaT;Vg;qo0=N*eoI$p+<@~rs{uj`$Ep(N7#m2_u#CH<8$QQfo0L($qWd>qZA{Rm1 zSlm+dyN2a9`jue%Ea`X6g5@6J^tz*8i7kuAvWS4o5xG%Tm;9sztckZYac2n#(}slI zJY94-+P)07;PILY37e!Onaa~J!wod7%FwW?Jp&DgWnQyU=v~8&uic%9IukzFk!5xy z&H)URS%M)chKdwHV|;*@QtZi&Rt8+v3R4T)SK#($V<;7~1f`dT<lCvl*EvxWxZurn%*Ux4vso(Z7R83UNO>A%&+bIUT-fNz zSlI8co0gOW8zWOYS;%|&j=V9GH+9D!Z75!#{qC5n3^B(n8hsdYWg_M16#>A?DgrEw z-mLnM~b)ck^Q&lg{Qx-r|sf)|8y)83Pt!c{cPY~8JcysJ%ah*G=yI8Z73EtH?+#J(sNdD!ZCm#s1yoEb zbW>nNuzTxR+oU(5+Z$^H6X)zR$sSBHNC%=W%yzg??UlV4ZiZFtMXyz_m133~L9D`B zD-P1xlrz~xA# z7llm{ATK0~mcELh-{>`)+3?3ahv>Iv*xFNaW%ZIBsCn&PJWYNli$5*?h_rE!yisps zi-<0?WL=oau9@%s_ETRudmHIr_prt{UI6RE;+WdY-?`&MAK!E9Pi^b{J5q>_PRaC# znBZI^#p$H(AR3iT`mz1QFyNHlhTw4eHC^;(ee$bMZ81WXQAb45Gf&y?(0>DO6ZE^>LdF(2gN-*7PqQ9e!ZLfW z3)|pfeEkhYx@jM^uh>VyUc7}h+_$COk*zZ z?MU@j?B(Q$)*W8Ok~Ou&O)aoppP|9+R{`MU7I@v1z3IhUSvSm~Ct2b(3NZ^ct1GOS zIO4Z}5nfU)LvwhrWKtLLx!WBQO!z9khT`X{v{pB*u5RvDHyf<(YHKyY+P?JK?h?LA zsXO`FR+rP3AS|hvO}V5b#Ph!gi`pgUiYudd_~+<8)bU>ZExODW%S`s*q*iW1KRM~c zPfL@;$}hKH`=Du1fMsu+WfNky2&ZMe(SCOstbmVkyVo|A2#=@%O{3nuir9S_2DNSW7wUrRK(R4Zn;VGeuL(si5ZY3wr2 zC>C|&NE;?WYa3HhKVRQbXh}dUwY7Hf=6zeB2KW!C2(N%IMz1|DdLlI@Ml|CwhG6bF zqp0J^A&PM%0X%X2q(QI)xP6Ic#x;eb6cDNL_jtN7I{>}21tDI^;ww?p8k(&EMuwNo z!^O4g@{K3j|LbW~_|>$e}Zw4Mb9lH$a{zc~j5`}9K5A1ChlJ4?)2D1hw1 z%q#2#{$a~DUaEHpt( zGKJBPD3!m}ZDn_o*lrV^G3y|a>rtl%rL&Xp2+S1kMB3u|@~;v7yMafcnYZA2n02mK|>pd)y84kkUBs( z$vJe(4&b_Fzl^V7%Wk7>DG<*M0I!H|eO8qbONZM}LIbEOb`G{#%z-x*S0d|Z3xtqp zDH_QYP9vlYeUu0C5CU63O0WyL?oD!RWph~RWSOBZd9-h8y*xUg*Tlqt!eKz5FZGK9 zG8WG3HHggLc_M2TLLSl1&jKtb7b^p(Kpyjq0|6VfX3gY7@gpjJWO&kYL5BB_>HTB8 zzw2*Ng@LGFaTbt`gg|ZdSHqN}(d#Dnbl}9DBwgnS0J?1%puu+#xlPe4D+@j=7wv4a zvH0|RP$27rrgf;R9KbOR0D3F_`0srnm_xa7i$(~;+^M#9($*(`hlM9x%q=|8#|HbS z)kq(PD=+)gB7R)n+yu(mEFu$QT(W>gK4XQ>%Br1Ogj6ZP7Y3q@Zi%SX@|IZ^-G0+`9;}V&#M8*iQA}QuB@r;z}b~b07x5H37Kh z&P|6AUQKP3pDHXWE6T74!(IeV%}`7Ty)OIaICIB#9gh2hnB0~lN&!K43DmpD^^FVw z-*|C?jom2Vm7KB&Lr``}wg~=y9zOA>fluIk4NLNR-l5)gNBJc75t!HfiV*EWIj~FW z*C-33(`*YDq=F$UX@N+ud96HV2uYvNXrkbjI2@KpiXR*Wea4G#^O4>1y!NH+dH~i3 zJrQjPyvWlXvm0B`6KI1tNehNQe0rmiJZAi|vO*dkj?{j4LW(iU6AgX?JHBl93C2=Z z56N)E-}<1Qm3-%-PP+Nzi4J5}&qvveAFIXueAy;Wp;l9<0N5BQqItAupN` zez0HT@U&~3u{o(e6LNKGdwd1Ro?U?#luNn78L@+!30F9?47}uKfRrnoCC4K7aK)rB z3#@WiI5QjY;pGvZ(uG+QuB!=$IuWcGq}haR+zYO6X3(-xIRlNrM-!Gy`dkdl&c4fa zTyYAk5HD3^Go`f4xNpS>M>=>S2%8r3_Xi2>m?L8OJ z=NpU1K~{#Y)Lsdg?S9-36KLdi?AvXtDlyMIbvmqziwjPzN4vhd3(?{>R1hEFykFXg z*?^P$xi3kv2Ju_zAz(fHdqKK0>GFJ6!h{lC!e)9;BVU$1&03jKqIwV02#A#whv*Y6 zSv@NQ0qWKich!UREN)|xJnP9h@nPcCwNvWOyRK56OOaol9K!9!G9fi8u7QI(8=XH^ zBiG3SJEkLVM>+@pMoGd%7;AM>Bb^`3H1=#z5C^!!nq?=j66`=`(q$EBv8-HruE(>jVh zw$EVn{ZYO@t?H6wFdZnDlo7j_|H`a%4pm_s!!M`V|bl|vR>`h?A~5Y`K6 zn9}Yz%b{P2(ggdok?A@zmL46I8H{l-G3>@Kj2@QKW&qyL$(1 z%TmUZ;s|=-0%ALCkIIO#Fq=aXX1RVt^FXs?SWM?#s|MlS%o2RUeM)9{_AlB7+Hma` zMvAv55b{>Yh&zQ8&B^CcE}PTmV`RS3wy850IPV}hV&JFvYRV=Uw^KT@T?$hF4emG~ zJ~*#qQRBjdqJBO8R*Jl{);U$5kEbe{iKvYvTbDwTFi^1GL~*Xb@t<5*;p7WE=TrG| zxtioTWECFQ8#V*oVt_C13B^RRspsPt*JUU)HDBcY9A z=AF|7(r?wdG&Zqo+hI&}xf4X@&Uw+t)&7c?#q;_+%`SE~ z5O8z5jYpTgKcMduY_sP@57~|=V_Ra^zw6|0@Bzx*?l8>9LNj7OO3Ef;-M?wLg4<6RY3;(9L*4IZ#ZN&Rg`98(8=|!+k05US*^% zI-(7+648mLCv8~V{bK0cSG}DNAY;wkIa9WFT!Z>S!FiL__8ch4=&PIjs3U4flQ;#!iOEYSyKaSxTX z%kSk%%>8PJXZP##0V5|a?BTl%BiO;1)2tBHqId3Evtr!Vz6U&k(a zPFNPUWl6t3FAIX{{Os`ymdbGIFx)_FwS>YUd$5S9J`D2fLOaJ)*?mv4Isx~I?#rHM zNZXJ^U27k}?^&Lt^hdzxR0Leho)YtuI^4zl(lS2Aj7Zm|A>%&Gek3b}m2;*}7h6D%L^ zo29n7ByvE6D@k_|#{MowSYnDiYQQ_!V3&%&M;p+uA}m*QR_}Lfz52Gxw2@-Bxs<4f z#3`g4Pd8z1I+nr$cN`PY%%W_+bW)nV5un1vXcZKVHi!qYwQVdubJt7;fMuypi%0lj z5&H&Jd%xqmmG?9JaPMXEG2uDF^lys3?((X&zZT;NE|xRket+gizyH<0UH&?mOo=GL z2YUPOf9peEmx1N<0&dQO$EN@AtIx6D%k=xer~cQ>U5Wi3*YDrE?fq|mp8Xut&(D7O z6HmCe^xix1XCIpTDSJ7pUozF&uMz#iRBJzoUFpeGYd=tEdYEeMJ50HUsn)&&;2x%0 z`%aWg?;o1>6)BXsuQuNBVl_}?K=1CKMK6DVMezu zAB&}n!ahj6ha58AZi|3R15^%nTeI#8d+HcMLM;ke#YIs7UG4+}HK^aZ!M@3q3vzi= z?%zKz8NIPCC-ot;p@HF+vn5hlG2Kcf$~&W$U>4`KvfZ-H@=!NNiCK2y1l3b&%2NNF zHCM6hQ_EUuQj6-`KLe3Q3Sm+V5dyw~hq&pEVd_BJN*Ohog*tuAI_Nr2urkF;@>Yp4ST4|mTxx-Y&H3d)JB$urglwLqarmU-=deF8aN1!L z!IEdmYj$b_M_{7pNj|S zYF0Y1(D9uN{gNV;b<K-6-VDC3~y&kyX z@*z7r+6R>xH1-*!yQAt9YazgAt?8CE2H8->)dprp{@N+Q>`OLNq^hboVRpYAr>LrI`$+LxiB>94YHkm){+ z;)F0O-bf|4_ozkK^0F_hefoPeQ5T=r*pE;|FSXX$C!Z^{J2C`jw`MPl{!Sa=dC`|G z;|y!Rpy{vprjHJqCIj2TrXL$>+Ex%T?OSEjgiK68s+i2ljtA0>WcLv2ZtfaSC=bIj z>{fFy7N4;rl*5$^$X9wOzA`~|XRe>uMdZvA#Swr=S+5|*aU{?IVgP=M5|5V(F)ST0 zo}e_~f#r4u`^yKgBxGO|jO-~{FsX_5&iw$W*%KCq>?FFO4(hsroRP>E>iDDh(Y<1s zoaDbprfElZck3(TzYVUn*MGmWMy^ScxU0185r@|7?_Pv($BQ1o%rM-a*xLrz)crJd zIbY*_ipj}$%;4!O%)4w%(D1?TQf+2mNd0OHjUEBN@%&_8ZKGy~@nI8U>Aox+lFdi& z#qGKIJ;6zU%%tFYK1R^Obw{95QGA|)zHMyJ*dm^eH!GfbdfO%o&EDMB&Lx1{BB7Yg zWmd}#V6Y9@82kE-nTx-V8wU140yp*BhL8X$EAWzC)ZymW)Y_i2x|q#8>fF7k*6L)= zp^U6eU>V(1QD%M!W$4CG2CZR$GT4?p>+mSEzkFbm(sGCcx+z^)G~2|!vsGssBsar6 zY*A%hcbEn12CodGoNHvjLFj)Jjv=S3+oIV=!NUtR8as0i`CFWhdtj{J#ZU3T(G}ND z>5goYAv{v6a22~_KKnAU^D=D)SfZ|yW!=Vyis@naXsvQ5z)Iul?uz>C<`JwbAV^IJ zM8$F_kP*++*kE`@L5%!JEnl$#@?SxaQs)#zGWG!LQCf9?$nldw#Fz>fLhsR}fHKN* zwgHYZc0Nv0fXudnw;nf<Wmek2Q>L~t2>crR}d!z=cG~-$mA1SuGlO|6IfU~ zgXx_UKaPqh-zjX!OGOyUF{iU+p3y$WZj1#A;97gorbHYkO<*R1T8#S6i@vWyScCX` z9D9J{n6nKft_jUcykZOGnXx~#F|j|y0?Vly5ZhhR8qu`lP#B@0MQhlN!O{ONu0=6gnI0@)<9 z0OXoRDOhuZFa!}`ez*FOMMUbeb{+2XUet?j7pBy#y1)hcK6GIz=TWXl$_2fl3NhQtHn=bD$sNx6MbVaiK!{3AxR zp5?<^Jy2yZqK^{F4%;1@Bk=> zp?PDzQW$!h02|9wafARHskveVCef%Tm{>&3t4JL#htpO;lDikEz|2A!y|ZAXvI@@mbe* zpJgvlyF=%;x0+KH{*0mwAckRJ`t%SGOw%Tqf`$m1sSH8uAx(P8>GRSLVNOGs8w|lz zMmIWiW{rjSsLaJ`nUg*AU?pB=`$P$KWBi6D`_&|oHpS6L2unG|tdNNQ*4E;B;(e~a z`&I9L>V4wRxf)m@C>EqjJ;kgVSfoigVR6v0BxI@SxS)(&e9-&@iN!3noq?8lML0rHfk^`>`PD{1O5*=3Ghw-S&UOR$K&t2SLLWfY0_$9X3 zS0;z`=_YwdlRQM@RAjwAEOK#bI-BR9*bxOR(h?2w8{dzB%et0i$G|1O3=8|FVu z7^ToBkvcZEtj;DmG}A_*kGiQ zl99{rSC%}I6&a@7esmF<(D?&{%O=VxwHP&c^}DJd*O2-9eYU_AksTPsSrNjU+n)h* z{CUcna28RFVEFVpeD9sK$Q7iBY5iz)k=O_BB0H2v92Jj#gPAWCOr6LAZ^@?^y3dbf zr|6)QbZ{gu0Tx|a#|3q=%w@5$B>+LlkC zEXm@eJ#L^0j2Xq^E(0vDqJ%`bC$)q>wbZDurUY_XaZsU-y3Tgd#IK|Tdc+{~)5ruF zPm*){Yl%aH0oQi$EV=fM_iWFKpIoY8ztAVg1i#QH#{|F7C&vW8s*3^i44B{-`s54a z7y9Iw;ForBuS;V>Sm-j&*Z|DJ8h~$f@Wrw0HpKx;f2pSpR}10yu|a@zi*Q`ndjJGzQjP>j!#*hyz1wm@*SGaU=}+@*f|YozWH#u5LK(OxZ(ko8Se z$HWn3e=y0nabHe6)To zuR#6ct_Oi?X0@`x(ED0QQsc}$aPBkRH*GzohuNq3XqRTlTx)(+k?S*Jz7%;DjC|nN= z8bFM%NsQ+&{=uwlK*b)?yN7reL-c9(7it`-)SAKz6EG8bJxl<3oi(-AB(MLWY9ON8 zaI~l>d0?gI0G}kmuRvtM!IJ4sa%a?vTITQZ$`gDQJi&`nmYKIFvx6rfFPbDrxO8%Y z@5lJQ*S_P)#dCSL|BiEysK_~fSvW4BrX@e5=~2%!1iD93gBy!Vlr2+uI2A}mmbmQr zbSIT`n3?!V-V7o^0E9@9Jg zcg3IF!aGt@2lTZApaf>>{S$P+xG}R&={Z#5Wt$gNfPPSz&&;-I#^#M}RN8Wtm zQ_u(^5wFn!-~cPp9hYJRd0+(M!^SyXNB9vL_MYsklP^tos?}K>Ab=Ie%l{}*#uX(l zSVbTF=u6W%mE2_|=R(NbG`>jD7u-~Bbd6kvJ5vCT(YD&JT zheO$KQudI_K5S(Vg|gUNh_Xh^U(m{+yNWdF)b5#2{Ykbf{0U$I zgQ}UkAkE$bDVR?oBFs6Ajd)0a@C(Ua+z_RC6ITEX#j45`@R4%OR1fezr1x?aEI~|z zrXNFbKWIpg%Z6wT+W*=sg5Q^X(lWyAH6)%t*T{J%u z6f23+`_FfHoG#`47@3LvyZjy0b1$YJSA}Juby+}hjnvuG8axVhyyX5??Y$6=)2zKf z>F=8H$ETnN)HEHRD(|5A>~Udz@o7G?zdj>8Zx12*8%zEEun#={YPG% z)6s2{_02irs5d06_D8n^qF)Hs$@Wu`*W)(OWZOlsR-Aw(0CEk%9W0QLm*MC(bRLdw z+gEpVo3DoC_eiX`$0RQV5%+NKWL)#`Z8nxXipOU4cp2MfqI4erlE&Y!= ziwUOj3Jz_L z>dn5ST?X6hIhlEOBVtb=ze+1ne@I;X~V@@~kSOlMT0T zhql!~IJDg`#Z08+iH>!&Z@W|<6|_+~wB5)XFrI+Xc8N6}&SrUNo4Wk+hqgJmXNR`K zfcDcMt%XC|4Lh{mFl}$~q3uu!3~valtB_~&cOdADvI|(4O(=du#nGp#la@g|ynjsZ zQIrPTqCYb&(G*Lq9oiPAxI^2`B9cAS5djoy5if~5w5>8av>lp8RRZ2vDVIF79ZI3J zfpQF>JS|i`v>i&Jfit?&G&<_ZEGEx4>f=!c{!JG4zE>fuA%xX#$g>)P2} zH?Q(Xw#$w*9C4)5NMQL{JPn2;+oR&UP>_ynHx?h+wm0YU=43~<%jQ6q;Un8R6<>1z z*1o0K7Jp=H!z~6;Ow!?zFo_APbh>mJhk8HCZLr1oCh8~Cdg5`k!hn`kaf5Cxm z=+htAe#?-@U)!2_{1M}q`nR;lKkRm}fAhcT_z2(R@h3l*dEced$@Wxw{6n3UJkCqW zJ=k#8tGM_0>vA;;RXqMq%;$FRPsXOII@ZEon-4j=H&9Cuwk@{e>uyqzZI0CaLmvN% zq^c6G(W@vVOW45Q@21#Af4#aah2;xsK{s6J?>B=OO<-!P3_vCVun)GjNU!CU@42sC zSi6Jo+H@cZw_P+e*f2mCp8rlN34+$`bY@KAU3toQH+t^z|u_G$RHpE{xo6^gVret3JFmm(q zYnbNcw?6Um+t`i|8e_{a5skmt?uNbm1nP)4n~|n3P_ip5@bcTcV8>g^%a8UW)AGWF z(EuPFeEeZnvI}Zg;x8<7*vId<2a&=;1s^zo-_v~jqw?`vV`eABTso(g;A{NdOG{NB ze|^b;I0Or(!K%(H=~?5P*Ene!4?iXsYkHohs~-NvO^Xe(tDW3l==}SvrT6b&9>?2I z3JCJHgl77(Y2S`c(u*v_G@J~!$KBcvc00KAlnTwxkR}3{?obvElpKpI&anu#+QW^a z%vt#k*?`Qc>KiRvqE%O8gfZw6%@OuJD$w>Tl7d^r(-)(#|sl#YgR#G-+-#_cF>{ z`>cplSh#$~*sG-PrLRNtNj6L}q%m>`>qH~ZKn|UaO#5BL6(R|lO8RC_{jHtuHjNmO zyO?v2#oB66BvZ!@&Ltyi8ZgHcs%I@|W{p1noCeK+wSIVtJmD3f3bU{j`+oFS!kf5#gOyo2UC|ZAU3X=P>LaS}+bVoko zo2;ox#-t`=<2BjkgC<924CGt$=By9{RzTe?^q5ACjiD+gp5*FT%WFJFu-7=t`*=V#E+G%Ys{)ydXYG3a~ zJ7yTrK3*i#y)`>#4zyAEb$AhE1S>h%%HSjp1Dj+KY?6Jw=a+*!?gmFGfC8S9d$fEx ze}YkJ=m{LS=T461)Q<8b43dn@2qensNlI4E#VUD(Qgram4|_gh*e9sMxEX3&EK`Si zBT<*S3zN_yr*3r*VeMWxn+NPbI5~A~O8q|RNdfKgrcUoNrd8#P=F1uBK6%Zl`uZ8? zgmllbl%)6~5>l8CcFjfexv4rI_I+LdtocZm(t^KfQz>cs8T}J8+7|B4th@F3m?u23 zofFY?Hz^l-Yxs8TM0B|((0jSqLDH+uhBy6}8&Z{ZKqzA~i%JeA%pTVDr9L#4YlF$6*Vo!TFOtc7F@WKmVcm-`g@GTGA^>xL_ISH{R&UPi)mrH&Rs^QI-7UPaxjtx01Nd$>2Py&>UP-OmRv~ ztKWa1Hf1*|6yrB&n<`G|Abio!FJ@|KaTA&X`?_{H3hTRS<(4W&L*cl(-Og@JJ>67t zQn-(mVvZjr{bpP+kK&&gV)0yVMNr=0=PFaOPR$vYhyFLS3zBau)fgmEFa|Lc0;#Sq zq6;=No||%`ew~i7(UgN=2sxQp{UF9~D6CCRm}qAf{IVMuc(Q4;t!4YKlcWYF3nCwb zO5Hl}7n*hZ91xjZyVX|7u;=)Ev{iH)_kLz}53Vh@O;;<-M_2H^;Xq}1Q89N2RLsC( z;5>;ErC6cN120{C65hb|PDCVyH=wD!AJF${)F4;4eS$>g-FOd1&f-M^T{-UNbU^Xe z41!T(AF`5-6L8Yl#Fic8Xx!&FdX0I8S>B|1%^Qm$-cREJQo|z^Q0wJLnfL&z7A2ev zXvl%}qlsq_VFKz5qFJYZHmwu@2hxK5L#%F>7arV9E+yfD@&8`i$$e~$9T2={GGDLX zlJ&|D-p0QL7ny=~q>V#~7w9NyMCAaHr`hnF7p+{_{<^Il*d2{&J)j*jIjfnt9V)RJ zlpx;p=J?|A2)fsggWr#IA5T(5$HC07P0ZiyF*E5sAIxMcX03T^ z)yQG*%hj$3HENelGO)VCQy=bWc$yk04BKnlXOz_w*^`2ni^9Ym*Rqx9i@B9;+ONwd zktqy=wXeBhA7FG2gHtCm2xH*Ypztv=>x%r5gJBB3{MvxY^U(~FY8}!Kq6iu$U~Xh& zlK0%O8`+c*`(cviYv+xn?a=7=(ZB|v5dh((m)hk#bCa|Ii+d@>nz4!SrkuVgfLwVl zX_|T)Pyk3L(~>M&48Pn=93g!CS+C`MbuAB+Yq@H{TB7mTO13=ATjeUkH~shtm*+qI z3M<(vC-N^}M~ParRIVgr3oB{D(!X>iMchuPTyiBf;UB+}fWg+N}2_B<-ORd^LrcnM`mJdW3`6fAjs9ZxuUnKTdqY?G$Mb zRXXau9PN+w@c3=2tH&~mx3p!XjurEB9=_l<*}#azs*ugfpT@D+8jWMwW@qF+=v+iu z{ble8)y39S+0!N?Wkum`i3v?65&cVO^O;sulNVXCT!H{QFfnmM74hXr|dd7Z1E!rWbqAo^qag~JAGMk<8_r!TyWA7K1W>qU$OA>DHo*e&s=|U(1i+%*zM+geP*Cy!3^Kh5RfOm^d(;RG z1d#gKEBJ)DgvmzRxuRsj>^m_>oXhEAo@pWP*mQsW*>vkv7b)7lFMzf7I`U^h4_kcO z74QaaEEfLxex8=0z})$8U2pctlNnp-4(rMk(ZP~8(^YSs#GvRklY3-Hh^|9p`zO23 zNGk+FB*ijadBW_;3WV68BwwPmMlYVt8yh$}>7<1)qIkRBt=)kc=Qs1@H`P3#qrs`J~#%72IK;8LYIY5<~+?pFw1iXdO8z>K{+*1qCFCc$z=aL+3rgp zEn*4J!6=35bzJf&2&FB|#AlhkAoQAY3pjGk<4p&P=ok5UBcq89=JCM`CFYu7VpY>k z7m#|meEVuir9*E`XKH^1lEY*}iZHZsjY8sI5Y!y^3vAK}gVkS`P0}jzSJ-S^`9{D! z{H_rk~NiPq$17CkQ738JQ#ZP%QhJk8!p$Q*Xj>FFsnR)5#_1LIj|FEWyP-0 z!(Z~ntkpp=7!?Sl>}fODFxTdKS2hIqw-4N)i&Hfk_ny4+^fI=N~p*m2or@PX+84p714Hml{Z zLF)!C=E-Zhd_h0}4J7m1b|oy$6D@vXSU7ksX)kTJ)h;iy?@zWrSBo210S5nuXbsZV zu65IFa%rHsL^N1=Zuo%sWR}*bz=pvXBu_chtAAjWi;<}4<0`Z6uX9bazUQ;r)Sy|i zCSp|*9#AIXj+IS%BQWr(df@8C+#8MZgw50tooP(6-;CaXRU6%};l6d%3ZSdWrrS-g z@8Cg%FP*uG!*c3NwNb8A2ycKDA`xyvp}G~)(L-GZ)}N2CpMZub3DxuXx`Ul2n;fG6 zjb&}8QTQKQ9s3(bcNgxKv&kn=vfsW8f*5bwXvs2&By{Z>R>*hsi)P+%W*{z}aD3(76DQ|3hkiAA?-JMt~E{RNFKYtwqx_eH1SOf?J z%j>=CtN4jagp~rnRxT~RzmH;%t!BH0=K)QP&}V+DF&t{sv%qQX5KRH+QkHI zF@cTkZ?}S6d4wTJuyh_4SfG%;Qx~=&NfLN5Obu0j80^LK@DzeNHaInhBm)&1+1hP} zUskI6OZ*h55Ze)oEX8t#0RDYG146}45l)BuSwNXKltAXHW`?7oj<(x=?bzlKItFK0 ztL=Q$w)_6XEGgCoC ztoB5$2toP3(qJ{N(4WglM>E;Qw!eY5dN(qugTqK4AP{fD_J$h|bNH3KkQtd5P0zR1 zPV2%Pr|*lgcSiH33COl1kGzUOwiHY9)MSS+{83bZl*kPo71X2AoQy9UAk>!6j@0g@ zJDBe)W@iFeYBG1V6j&lrjknZThna~Y20pgpZPZA2U@INTM@5JFoa&AuhhVHlor`KUaB7W%D+4DeZ z8MWyy<^N`E*B0Ag!4!5cK9kq+#^!ad@sk5lPDzIm{noS=RT>PfpzNdwRWnj}$Unv9 zg*=Si#RO_&E6=gFBI>EBtpg8>Qiw8IL_Or6y!K`suoBrA7b06R21F!?3N|#^a-tps zWs=-5N;F;=4Tv}m1V-R1u{p^73vEhZw`M4CjyTpJw5b&!ivy_v(Zp)0Y}UFjU+cX> zGfSmS@`))9ZIxWIGHU^n8gELR%16wUPZL|1Cgm>MH;rzW2!8S9+{A{+0z-51BAzyB zYeym(_J6(NR*PXZv5Q{742T%ol`P*i*wm}AnUM9}ORZF)kXnYSoam|_E-JZzcoZ}{ z)qckaQMNUCBgNF}o~gytoe{^)D!}B9q25h?rNz9&halC|^1etV&*UhKWH- za5C2u$PY=-rGQy7Jk3=z5wv-?bnUv8_%M9Ea+7*qZE)xrby7 z+t!Pew*}tC>|i4DE<#cuMPmvH7Z2kGS_+BYB%%;0$aH)X57;2%0+p!OboH839dg}x z=0=w^u}+~SBVu&1u|fEs{Teln7mME*pWpt8PRzE6Nkr!a_|VOwpYt*~mR@uHS;Nb7H~xGaIbKIUb4HdbvT1(f6yyGMC&QEP6^mw+;e;nZZ}OE@V&(GJ;lw!_h528tR&N7 ze#XENKVgve)hV?Z{*5|#;pe5s^S#}bw0xPM>iS@B|Vtx3z#rlMb z`|wXuK_NOZFCD8Z8J)P_fDj;1F}@IYBr`$XGrs-^oX@3_DjCwC;`R(Hto2N1H!P>)mkWZ85;tyf&Bm)Fsw2H+5#=u6}{Y5 zvu&LRVliSAa2;*()N&iX_$Y5$4zVO*i1#3Ru~ni6<|A+(t}SBoLb$Yd4HLbDSp)=< zrNOTXq8IC}04GUhybx~cKPP%QzrABypsoBa`auFkL@!I+g`9;t+(h(>J^PaA1>X_B zLZ7UPUTo4sqL)~}K=h)V@GX750B8|i4YaMsAX833^O1&Ii|dP#Pc3jg(o7m8pOtPcu&AL zFM1(hO+yBHks)k41mT-NlcuRkqZ0*g#FOz&0`X<6{N9d$8s=U}K zlhoMU=)`-(JjVD`&`s}#eFBq(30qg0E!a}|(I0%tjc`74(cg}M9rq#B zs>@xIDh5Z|p&4F_;_XfB7Q@eU=_fciBCo(stOfg3{ZI?RcICKt}z?C*#LPE~T~ zvUb{&k%Q*7#%37=17JXf1V*Gan4~R()KU#d%+?hWLoH-h!&EO3lNU&BlA--CujD!L z&0Yja&IxgJWL&dkrrC_NF)yFEQAt+w0v?|(PC$luX{w0B&T{Ht%HJC?A$thx*JYE^ z+>l{h;h}fnt9VA>7?OrYnYIwA!aE4rmh?hI5Ng}wNj#1k56lHXyxI+HsRRNyw{^Kb zP2c48_3t90nnww(?JB=sgCEP>Dmr5YmDxYHbvY}XH+cy)g^DA&s!niuZ7H}|k<-hm)}j;xpd!zRy6&=V1bm@@_HGr_ z^|BePl@ziR)F9Y7wvhrDjeyr2ji4QdlaF12{Fg7MC{uyGZqcylM#Dl3%_n#>ny;`% z$mxddOa-YFJIc)whL> z=vlR84JkbTgPemNiA0NUZDkfl#zp(Hrkgj!ZW9yDpmU8|%$U=o zoYiMg;69^cB4C(*JY-8wUD= zT8iUlTf8;dnmeaK_La#Z>VR~tNVh?@QpJi$G3&K{Z&ST} zx_N$W-G7Lpc|0lpH^BjtTn0b$`h&g%Hl0^l!>>Xlvih})@DRm1RN!LUQ{5v3CE?NV zP1NKQHt&y*e#Zd>|KyOBt}xP;B$L3#4!1u6EixK1s@7%>0q9@xKfZD6UJfYgApyCB z+Fjoz(4Rdf+1DM}2KLK%r#%bK-aFF?(zXZvi(e}Ic|x%PYE3Qg0a_XhGh|UUSjM;V zdX}tq&6J3V*kOJVMgl(Z$lT}%4X?%{01^;ip7MFqjh9eA<(Xf9n`CYd8k^NcDxTLj zKj?r?_jsNJ2$c~g5OTs>ksb@7el4<&lO_+gv!j#k&x|GwcUhA%4%CPDldx+9@g3X> zdkYqNFTE>{bg1>e@p5Nt3$nZ@d=?io2=HmnBxSbu{z9_EoVO?V+Y76 zoq_YH)RiFlf^^Q?90#D(c0KM|;aF^M=Fgw}%zS=S;)G~gaV^MT8`K2f!d1543^jmtHq<|C zl6qNR@_Dd=xB;wNTpjs5R5yQ7SsPK^OWD5fd{O1HrU~I?O&6WydL+mU!wey2e$2GV z306eNdNI#=QWPYH$OGN(herok+8Nws>}C75Y}LIyJmTH5t&*1Kk@ER5!T;655!Zqq zQNbP^bC2R)LfoSM#JyKC09eDcnm!VvqVRcKc7Yy|Z4z)LgaT(vy(ogZvYZPkmh-y017Fr{R)xm?jliWb$sZmYO2;Q} zO`1`K4L}1l-jvr*3KM(1xSHR}$F(r*Mb4BvrxU8}FO9)Z&6GRu`RR}$g(@7tog?jSgqmJ??<6E7|w$zkf*v5YAdA^%@%OmPzSzrvVe zrc_yEmNKFsf_Y%e=;4?)nx=nd_KLQcaA|j-6k3GX(xR%M%F-qe6ggwRUR?;Rc!@SE zCK;e;>{I5}1=bgd9F~TkcaD>C1x#UKcVQbD>XMU#C+pJsYD;a?-ojnhL9bj$wQytX zcM_5tvd8Ie#BS(4VbfNmA-k~I1+jvM`D6ZKn8ehTj=TW6k|7T^f_YhJ;UR6$dKUL` z<;i+^-H+?V+ZwmH#upjd&=zHAo438?;|XEfoYP$g?`Pxr&j{X}_;Ypu+eJ8h>9-6W zK9%AWew%YbJ5juYu;5=X?F7iZc@HGaUWvw_C|f>$G{I45?dA1Qm8ND6SkNBSlR8-A ziLkTZgfQt<;-3~kX^JzdHW>6tG636Ya9@nm)GtCJ*+P{9KBR2-pwv_zE2cLtF5&xy z>RC+=c1<-y2d{Q5rSw5B8;OtOGkrB4e0P4WK1(<>Xqnf3Gp}D}A9i$7e)Xwxeg}8>T3J~MzL5CDDoA7C!0i1xMoMhW1 zoSQf1y(Soe_?;=)0NC>#j8ma9W(@(k<$YAy?L|-)Y`sePL=@gkBj9k znH}svt<}J$A{ww0Xp{#?b&DAAXUIyV4j!UV9T%2urt%wwlPD|-dU!+Qr4`dZi=AAH zrp3_+%`eIhT$5d!&IXOkvg15Za11d4mI+NiZ@6WWMLe}LZ!bu51+O=4y`1wfZ*G7V zoA8pEMIq~{4_PM~Lp3yb@gcZ_db5NT;FFie%(CllCJ=!*@MZsNvb{YGNeGVNWmh;p z2+#1@0eP(uT?tgo2191O_mBNp6jVp-)M?goq1Fbv01H(4mnCXy-55}6ZV%U{BgFap z=DC{*!-qwl5VU_4U&&{rjUZU4=I>kQim;y!*Y(fQVW_!|n%b#!GdcP23Zv33VIBW z^Tb*=e$#neQ2;o#Yx(kQ$q-r(MQoc;^OP?`Es9g-LkSoVnH=u;K;k-#OB|^~qQ6 zUjNfSG4a^V^H&_0+dcP@_Qi4H+l$qxcxb$PcOI|bbNa#(O?}`7iy_pXx<4M_ca{S57N9v2N&eeC_ z{u)uF!>z3S@vZm-0A}}dn1jkg^B8j9jgOu$*H0;Hw#ZQ!rJjo@yX6c~pzw+<)L}E5 z!5x%?o!c!jLdU$$1F@{w&`8Y`n1TYbSmH(ArE3cWt4UF`yhwKuQxcDsaE-S9S zhV8sFmXqZ5dRk1bpDONA7Z&J>BGO6Hbw=)r@n?_(anK0=fAwT%IiJMyW7C2&c1RPA zcW*olrLFm~i$c(T@uH}+N|ZCg@bBYm+*Z?a#9c5{CfA{tCU9Kdsiz=>qO|dX=pLa} zdE;lb%KX_q*WnkCr!YsvwCWM}E{f94o{lzJteomDE8_DfJEx+Q1IN=C9!cRl&p}`q zo6Bi2=d>NDLoRW{rrceJX={&u@%lSzSk?fXk=O#6Sk(y2-Na_q#$5=@Xz_Ja=Rnlf zlH<-#rD$fZmDS9}9;M;CHSZgi8WrSkhk4lJ0|zRY)BYCK6yk#W;cbG@N`D%`#Em5lLX4P{9}=7b_uML-Rmq#eeaj!kpU_yq-M~>SFsS zfY~UhabIXwRQU`9Lq+jAzeK8ga^g}ae)&|x2FcVtH!<)h156xacHm;u@j-a0PzJKG z)x2-F0OOY1Q-2BIF!ERKHa@Ep9J*Aa={lf6i?tW~6$3WS$hbEU=3rAE<>TxT+SvTQ z((nyNFgvhVY#9{ONQg4}2HS<$5bRN(wxkM|fnPiHb{FE-v<s2fx*T{tw|0R{FE)F$#-cw3M9ENV{G|^vVQyyRda~Xt&tlk;~ynO`Xv@DdLNY zM;rhEyH?(jOMunTwqWHx9;fjZ9V~aUK1aMIF@6l^sHu!iWgK|w6L+6T(XR$5SL?m^ z;2r;*Vsb&?FIPSLEeZY@FixB}aH2^CP$bFTaRy#eNV(PF7hvqfr7VqLP|DIVp42$o zX%au*)fQYdJiSA#+C9EQeSm%MlX!1Z^Sk0W-7**)?+|tU?;~q>chmQEYlGLd_vPvP zKtubte&=bQUNsm0073YTmmPf0H3ua)ufOD%^xyl}noOL~K%VYa`SqVaclN<={$TIN zcCUZv5AXfhd%x3pC8hVh=Fa}5+urry+-`}=2-xqNn`<9xreulVVsH&sjAWf`K~3*? zmKDRi`9Jvl$NDBWON?B*sd(xWXW7Zj;^_~+&Aq@x!I5OmpJ}W^U3wA;H(LL5CNdxT z*j%@vrRH_sc;DdZOVd^n!zxjsDm9U6%bKrPvgRx1RFmV{T7X4ekIWAmII?8dJXHf& zJN*j!L65Af)LgRbBd+Uh)FcXJ9q>wv2X6VNzNwEXfI0fs+C@GtVg5uDeN!_76}k~U z>N{|N^Jvai`D0K0O{H?bM*c9h(pPvwoV^nC+11Kg#omB(<6`sScu9~PYxHhOc1XOV zUC9>KN*k>tJ0WyDJ2Sf)X?ABcGrL;J5fQ`*feVz{4v;jTB00Sc4RD)F`zcQIVgsK* zC|sISa&IUt4L8t2U%flAp_(heOVScIy?QI zFa6VZJ@i|@|F-9+|NTer|ABw=`|kn^D0BMitN+{MU;XA^2Csan`14>G3E0zr_iN*? ze?7wW_3MxYI(c3`0ixFOavXVn`k3JzFLHzDB{mNEbpVl3Y~=YHi0?O0e)zKJ0B(%^ zFWJm6NSrATy@t#!2G$`ExD|R0*y3%^PmJ7{*yQyTh};-3x%1V%$=TE<%iSd2%+NiF zRKxn8Wbag~%o7bts#E83l5`}oTa*&IMQrQP1~_WtHl$Z%F>Xu@v6~De0w&-{hCWNg zI-Dr)xOgEyZuhLUvb6dns)a8y$06oOLaUM8`2&CLV{g@pZo0vG-q7sT7E7LPWUmxW zo7|YuWV#W$s%lG(|NOD|%^!K+Qx85*_Hb8QZjeJa7$=YxkB&V=$X7>Zvp^JE3w8X% zyT^a%p7GyB-vt^n<(zb*yr6xQveYYvzA}tQDUM@THQ6|NH;aZsjw|-7kW-X%5_sHy z)UcB*Pq2Ub<$wGkPg#Ea_dfrhvgq65O#YdldhQh-4e4Yg54|$_7{2aVcph>o_@Uur z3hxm|Q&AMX_c;KOyc4g>>Xnm_|A$|F@ki0bjee6(l(p~$H4Wk&itX!ELr3DF3!>O+ z;o8H`2$ZMZ`iU1Feh&2+vcM<*EzICsGSM>=(f_PQPkxtr`$VYdqA#^y$#paD1^$2YjOySujk3CYHHO5l&D$q8~gD&#F(Ju0T4lIT&OD%IX8$SOz z))ag-a=}?#GHSZY>!S+j1JL{Wl6KVuMN#i$X9 zSNDJG2VN>6ReMwbyhYRR*q!^*__6PMspLV<8v7IV0?2_zMLn}eFo+n)b&!ZAHvQ`N z-d}m-4SxalgxhTAUl8btnV%TyMs7nXX)6t^yAIe%_(*B$N;@|mPvwCm=wErg;e&h4`&wd(uFumh<-u(B^pM5>-6S(A$hO+GB zdvIl)bpHvJnDX2Cj}qYj$A%wcHxuLlsuJw~_^<``(E&58kddqDk4x=uq*XW~qfCVl z@h9J)&A(0_duC$fGaxxSyW^rZ;|qQ>K%{^?RVRPtnP)!@?pGt9d-2!3;&^K3zd`p6 zvVDGH`1$J&3BU9!Kl?tHeQp1RpC;RQ^x@~pAIc9aS_aW-_kZBU=l{{zFJ^#Vi8vV+ z7n5uv$b9X+W)f2JXDy<;{O>(ej^h1kL!CPHFa2KjBp@x$PX9v_&y?`?f+-J4GzEYm z_&hBeg)pW8Ja&&z;>9@nm{!8Q|JskdRDtyQ%t)w{2Iu4o6cS%J43K{I<9U4dIN`(X z{L9~>7))RI0|&wIPZ{PD_=6XI;W}(V{);|WFs0e*zv=e7zb||3%V)G+!+-I?pJL^J zzrTBZ`2XCjrF(vQ<@4YB_0O{0Jh~&vdTXKcn^zpb4Gp4QKXw!FRwI&^!@H=y|J00l z5%Et5bhCxPykI6M)n60t&E)R@me@MB5Y%-Ch9)`QM#N%a7C)cAgTN??c#^-i77wjl zOkj(M>QZH2``UM39DBE;v4~t*(O7T(9RF&{oF>}3ef)1>I5UHp{@gYG?mdKbe8rl^Irn)!x3U$G8`dWQX&J~H`|#_qy4h8tm(i+^qMi|z z6`HdDb$LLzOFf+&_w*uu3+{srqxqLm?=f38VtD*jj6L?rUasxE#3f;UR+klw(Z^|5 zM)NP&I}8^EKAH8mOS-!5bUmHbkv{d&J?zrU!T>8U5!!uj4z#y|HM7&=F3^&gWoSAqwAx0U&-Pb zGMayx#n3{yLSM_mN>J$KEJB&Sj5my)eN>;rwZXYUKaQ}98#s5+@B~(=>ukzx`L7P) zm~&VLsl+2k(BStOFKx|ZG=E*nRlJ|1y2M0tVf_6zyz5!F#y^=wgRU+qhAB2c$xFeL z;Mzy=B(Pn(=1N#&S#H;j>SN68-Ncyu2ACN>Xj@R-)7drcEEX$r-L8IxJX*J@W?(Hw z?SswKHJ<~Pj(kY%2ED)JN~nb|W~}!wJBK#kJN{##_m^1jJRg=$dcWSOi}RYs=G@r` znCM21-Zu>;_9Y!9(Ajm@8Q;QjQ?;eQtxtJh$#9QxH2()K536SYa2y6LUlaA7(-g-6 zjx|e->m^t04!=FORkFZL=1~~oqUF%`nZuYNvrMZ zUSkQiVgqG^+;0Ae>-(C<XUdYjdsCtPv!OK~8&J2_wsh7vE) zoj%9RIMkwzi}S`QQSKLqU9GRE6rW4hwheg%aU|aKaiXi4oJovJ?xr!uk@w>SKfxzp z7^;R$3;AvLLwqY z<^LvLK|R4sBs#cajs=V}c!ecrP_AE{>$!S+$K7}9OskjZ0t)%_`R~2|>ec3g)K8^IMYGc9{+fo{4V<~M(`!A&;Tg`$}P>O9z{LLRZ z>K50Qa+6l}aOQmzM0dhAeQih^08wy`TilQ} z%qe5U*k2BX5220+H$6FLCDHahfhDA6io(8_LX6kG9w;mq;qbVN@HW({vQd6V32QKF zfkBX|g#B1MH;BkkDRm=k_b;!1%V0o|0m#X zp-f;qiyg!&tHdPaB;0|SxXtA{&pqWPcg|gg+`<4asF1y@#5lD*CB|cU;#qF;bs_8{eEC^--57*U^l(!&^|toC**t~;&>X+c zLMo*r^=Z|>PHGp_#FR!589Lt{L+PQ>ZWa>Vq!v^=Lv8E?Xol?LGOFxcO2kWc`UH$= z4!%w|a8;P*#7+qMxCbiyz)0iGj}ncLRtcJdf;uzl{!@lLPw9(rmC|nbC*(?qCf)l5 zU|ICen!5$zeHg8fpQeWMm2PR(McYhZ9rqP7CNvtDs%x8{#-r)*)(^N{L6Xu;y&^e7^Cnkm6X4vn z)XFk{=rI;rssM|fIII@3`N-TEvJ${Wh1pxQ(P|{?tp&ezw@e_;faZ#So|p>`aR zG@}26AdIg6CzI}loP+`Ylf`6~QpMeK_NRIz&B9;1$B-4`Hp>VuitpV*LP}Op0eB_L zbXbnE?jJ%m7XS`w5r1iOp3aE-}c*_;d(bJp4dp=c76 z$&A~uxaOFQix49sO+G{et0IQW$AAKHGGjtcT_!4|0;1h!bd zar8o~wIu|T+|pIE6U7b)hKyJ=UA8Mj7(@|k{S#0jdThzsPWb>`;vp+n_*7W2Ra{ zB4A^PXsMt$l5BUM@1C#)r*jLsoB597X%$*vVDEIQT!JfbK|t3fanfSp#?tRB(U#DO zKo_2lW$5k&E;CuyNxH#^;2oTVOKhFxk_1G_=*__r5O=aj$zzsb&bA{4(v#S&46RcK~5^AXNW;Q@8^N^CC)@xpynJnf_qyJkPaJ{NoOSiT&bq>`I^A z;ZI=OKzFMZL_h&YL7ZW3J$XjFfYBX)p95b=sGPL)qqztzahUK)4Cff`@_kgM*RGT? zgle5+l~wMT`No5RX$Ig*Uqh4!4jBoATkAl%M2~Q#%#A@1Zs$PYOuKBFp@nNNecH*L z;fgOvNdcnxl9K2cjP00EoQ_ifXuxejklq5#AxyKP!~~GW-P6yQ;2nM# zx>d-qpaT4_D;dJfx^emE>b0M{QxGX-j8J5JU6Kq#v zR)jNOc1IhCG#FWwvBThdIM=Eib*7Ki9qv-=ST_A7)qq zJ%_R8z-To^`pA!tKld4^^dEe;8(TyPEC1gxT=I=JV(_bwOQz{pvPc}Z{P`dH&p%~1 zqTaZA{%0qKrmkJwgALN^!X5tE_y6X9$(~o_i)$~+%s4dl&;H_#2gO!er<2coh~w&C z9)9jQ4k9PX7}7Z6XX=ms@Nd5Txv5Wl;w=w9<0^gI-(Dl(8-M-RFhxGY|KrTF&pe}B z7DHp#ve_YcrYvT(&my7{d&2R1$e7WbIN=P7@=|nCp#?lufBEbF;cJMi>GMCUMT&$K zh=1d=?sy-f&Ue7C0EzM=dhzS5O_Y~)X6P|%4tkbV&9fu~)EdPIf?#*~+ zU`~&-=h4Jpz?p*zW(dQC<~@{f{`z_r`wF3yhMs;g3>V8ki`8LDCa-^NVkDJ-2gCD7 zKcGaM4fMyA{)t~&`{HdM`9+Kqe}CW~-u|z@ z;ZJ^na=(-P)gLV%{biMV=d%;fz3JF*=@pN+H6YAsbpuq_DU^;Q5H zpm)uUO#Sw@(+^m)&rJQ+zZ-r5cn8$MVcD^-WV4&Rz0D9Y01rbnH(?~=HuHXyAOx^; zN(1nuU|pee{r?Cxr#Jy{tMO(b1D6G?6WYtyoSk}J-ijaRX2Gvsx_7g1mk@y@Ksy8A zhc(!>P*X$UlSaO>;S3wh*B#D~cEgUqC`aJ@&&d?|Zd7oTo4>C96p&QzZFf1J%F`^{lw33*Z zr(f%=SPh!FrL)4Z?FVfO0@|yhe6?`b$6ak$-aNxlg~1V|D$yLuLbx z(ofyQQTkgtO2H39X6OG`j#B3!-O|bVq&!ys>P}8*?wP6I{K%nCabm;^+R^qW=a;kD zk)D%7ypg$yQ88DFFsIM$Zg6rocb^;r+vr-1j}-p?AS0tx$}P++4V#t7dU3g^(&5k z5W38jNg?MbdQ3h~(XT#(2NVnsj}QNKK@Zr(u{fl>SdcT65dxgEY~RVHENsx2sIqTx zM1DapWkvhs9xmA^E0I5z002~U-@*M$yMxO<4UzAeT<_8*HJgMSe*sqtaSBn4!|(q0 z-uJ80JNZ-wRb5i&TCM~V>Nl7_Y%C3>%J}QIGHFocNs4$KJCY~bQH*sYOaB@!+Twrh z`{YXO*9hU}E>EMVrG`$6l&1V7eO!~7`UCeMnUxWR`bj0e z?51}7rCwKIw^lmqlpje?^6yh?F*zoBq?!okaZ~KNHpb<7B*mAX1KM=Ia`(dcvB^%< z^_O{_sQbg&%+PzncL#@U1I>3(G}Z#fh0DBv5zX2?UT$!5mDRj;Gn4v{yyPNpCS2r& zfA}=x$>hJH+q`m-fosPJ2&WW(<#tl{TWLn-ZCCX7bd6VTG4A4T`bw-SbNl(aZqd3r zq{ss^+!EGRU#`o#yd?2O-$yoFvrS(n4sE(7S)*iVFNL<)74fyn)nC~KboCc?sctO{ zxdpH#NL^P*4aBt#?=$2kt6uyZ#Eozg<*MS$1{|p`$p6Y;4M6!YtZ0#;v4C%53tM4vb@B^pWcl2x=7c4&F7dejbq_zgdzgZSJ%z|gj zQ9(D88=8sxRtGZ~zz6(;n?Q5FJDeFyZkTUDgxrFMA)tgqEKyuhXiui}it~I63tU@PIrw@Bu9!G+#jvP8IMN*VOL);>lc#)pK7V+pY1vk}* z5xt3Q%2<<@N#a92d}`-Bq#oV9rkz1f=YMO~wp7p~3}+>*+U!>MsNC+p5!9;dD!3)S6y6A>LL5Ggm z8Q=MTc`-aZ+&f}{v#ffY^rr|U;7{DkI5J8s$>=qw8!uBpxXrwZZhS@B6_Y`Ll2SFlAnS*9X4&S5E(w1vAJ@|L=GI*o&WEHj;htE!oCY0?DALb$ovoH>R z)7>|wN8f$tzHj@x-}CJd2QVEe*Y8!X+^aVw*i8H*UF$zq7ol^yWO|J+p?!h8@=r== zGKP4x|E61WPyY6=uKd~OaOr&J;PnrF_np#)3|0R0Prp1prN`~rv%fO2w4}#_|Hr1M z|GfO8dc5+g?xOs>dCqp)`pqjZwi|ExZ}>0&;m2Rzd&djkprYr-f9RY3;@fuX@eS|$ z%Bx@a?Z@@_?9iB(K!3~D; zTh8OBisk1RpLg@lt3&wS^Y}SakjNB%({8Gt{qJy7{ooLYFC&&6_M*eFe;mI=GsTVM z|M;5wj_cv~9(LdTd_BrOz9tC0(zyM{Ya`g^q4$9V%v}KDlX;&xV{GEvv}901TsAeMpf2Vi?L^?vP53D7pWHnr(>73!_%QzGcPpBSzdth!QUZ$Z%TQa_ zG`JfEwi(|g*G_gxM14qC+4(b^fB9!VjPJ%czDo%cHZqoTZS4+z!duS^Ec0)AWlN-{ z>GR_F4~U;N$KNwa&n#yg(e%VHeQ+H85VS^&gD;VAmUvd@nOgpi^ZEB&9DgSb{r1`B{h-3p8yN{;5;(Gsz|B@1?`Jo6yNd65EMaN@% zlhv2tby$3D1X{fj3xIGS6H^dt5`XDcw&L;MfYaJF^zMlvZ9M$X_RJHu&W!%8L-Mvn zg2d)`B$=~zaVQtT-QrI^{=1_(%YNCi30T>lg15$Z2`PJ0I5c)KfrT^_YULy$jwC=B z)-D=W$H&H!fliSZvY-?5Ae-Qx<~ zX@%~j&|MV5@o9)c682(2!*iR0``RS;D?*>cl9$rGM&K$GUrHKBcQEvXY%U-h4F%R# zLxCRqp=33b_y6t>5GSVt2gs4Ehf}cWiLVbB?w|=Fcbd4uJ_?x_9@#Vs zLK?|WC`9l#&%ADQE8zyLhi~VHvGECc+TKq`hxjGH`@eYk12(;_TVA8!Z~dV!2qiW+=_OvSq;B469-}}NfSI~Xy=RjuAWPbN+zD3U4hAn>n#`|20!((}v zw|4#poJfYRL2L2;Wu$Ke4&`GUKJaAx9sBOgM$H){Itkk^F+01jGlKMjv+DcKkAIH> z;!xlwi+}Hyu>e0DCIK(hM{a_nR~5hyvGMS$-|_?>M3W?@x#Ok55f~YJ8%~XOQ%aO= zV&vl*lSE1>pTXs?AB_yl=H+U@fS9$%w8LQ9vBwh(I{-(l!w?~%AAac2i$ktme^5CG zMuW$Hjgy#**juR0y&~3{1fiS^x5b&G7stLcL0Ti49%~?$A>xQbW#B4y0rdfCfw4X` z(s>VsB}GGMGDZl1Cs7>Q+B9}FnQTIUg})PWXAO%Y_27I~bbMEbj^7wcFrToHCYKri z2b&opb2BbO-J^3E|KOH5yK88xu^vzo7TiFH@c>URfroYt-PxPDJu_~aOJTqK>wOdV zObi*J6m?V+G-ilCA1OiGdN?}h7!^a6w}Se``54Jds6oH2?Lz%8WZVk4?Gf4z8|RPefQzt z{%2o&|407Czy6*@QWRyGHMa(K>xsn_@e+snez6}y$8AO2T=@0UOITfg?@DAmsH z(9U+XqO)l8ZU_On4tJr(nNn; znqtuVVN)^a9@2+wFP-fBT12R875ky74h6b-%4YKoVjOJ2#_j${=U{E-V2MhbP|LyE ztmVT9+I_G(FcQj+Q<7r@K}(hFCx3-g7b@OGMGje@N1NRtJC|%lACj5BE!Bq*rMUdi zZV$z@c0(R#i*8vKmef(uu{XPp5~2=hbtt;yN5EOPe2D&ILeV$IJa&jWs~HOSKSZff z{FQE#osbm%+jwhk4Oj$-EPD?_0z`&R`U1|ajc9KIES`56aP(^&eSg$%{v_=h5`6uQ z1im&tN&h+PxQJ_9$$TS6>5YlH;Enk=ou9Z%YXSE8%?!c}HzK9z$xNpIi{Hori0>MI zKga39#9jHV?f9 zfsM~TTqA|q8|AR6H1T>?K_6pF<{*b;S3%;ctKc6r= z^ECv2V}VSo^Tzx&yN1?*r+)gFyHfZH{s4ITwYvjO>Y*eV&usYnU1AAtnv8Ui&c;A` z%Xp;I@J*cB^@aPR_-@5x&oXq-wy9CsN<;D;f#p_e3kP|u@0WE$@*;^W>`tOY3JE>r zC6j-1S7DKtPmOfQOKPeAA7EM6^SAv8$M@Y5eGiKrro7>vn@pb~zoBsdJ>U44d_!s} z-7&c__?WQn9cU;g$Aset*P_$ipX>~TGwDwH@hhE9K6*>5IB*1Eh58b{ek17JhyF`@ zYkS;F)ixZ|t|82PD^^ez0pe?rkXl!8;J5r9J0BedYFpNQXoDAG||~ zd2IM2`7P3pN)V7T^-g{fkx&JvbR$9oBT@*&lryCzoWH9@L}yjmjPA=kFdH5V@ev)p0N$8}diEFXnH?eac(kl*$+-+h&+sryLiWff;nh_^EZH*WO! zjNE1eyo4!8pkWF-GzD}80H6)Wjo+ytOxPIpnhhH|#s$rXp*uY;z7>+1M@0W|uR?g% zTo}=P+1T_TerOLnSVsspZYPiKOE-No4@Gyu-SFlVxm#ID-4>P}0PDm@DdSk{N^(}% z9ELi?-ga1BPBcD+1qwFEumyuHPu0Uw~n7*8SWgl_xqy;3)GLErlpa9+`}p9 zIp9R~Nnw>gqFeDWt?RY` zL`;U7l8${ie~(;ejIepFsHx=y1KTOb#=VI`z_e-s3ir_5KfFUK5lLX<-@nzFV+5W+ z$3NE}DGl!)JCy*06E(T#_1HrZ)uj$NIsDxQ_2Y8(3x+}&z?{7dRZDw(zrJ}pJ|^(r znUq<=WDQN{9x?51{#%WI*ZjGHBW8ZjZ+}2yuLOsoWu;Z8Lf> zCr?f&Zh={9#DIwl^Fo5;!SUUlY&D*ZsfpqWxlIygCo{^wa{GA=CV$P)$wVg4$ErTR zB~?P={D@<=V3IBl%)r9Aos-R^A%Zy4jY`^~vM-m1;P&ufb!$+P@p+8sBZyT|^{(9p0)`(4MKW9;_ipcDuJ z%DYbaFykM5${o~joAjVgxyF~A^3XDR-f-YKof{qa`w|T9*gtbOy&ssUe*oh9T9%) zruh&V2S!o;D;)9!iU_uEM?BH7gaf>(8;Uj=6ag9JdpIL04QEy&&PZQCAsh^ih`U{a z&})~_y=r5_Jd$0q`{A1fOlFQTw7f~MqbtphLs# zM?k~c8sUoDgoa^py3nvLvTpKdm?xoO$_Wi`5*l`>G%=C%py5pp4JX-ZJR2K@!0j$; z#^p|?)@+AJjLzC7f%}_y!)70QsOF;yS}e1+o0)G^i{%=)d+Z^z_(}6$!_b$x+gXs< z7g_&?@}Kz9C%$xrWQbv=V+h>*`iQ#$-4?JO^4}vziqHV)9c@&H<>-in1CB~Ssp~|- zey1ZT9JnSr@@^-$Xbt?2n>MfpV#9ObC;c@_@Q0K0o@SdkRl*FttcPR#x|}>ZV;ist z9h{)rAHKcw-ftDUaKvwC+~7p_jHL+d2;kV!GI!`UBv$8=11HUL{#QPVH67HpzJL=$ z^3VSG_nUEieF4WicyLd+j?liS`1bSppZYxn+{QS}idp9ZI8XCj!1(GkA0~ z-u=L)b5@SqaK$GxFi>7I0gko!fb!gH`Z_nH#)86(>EC*!K0v(`Hh_KY$o#wXF(spaf zkn!g6Z{~J5ro>1_( zePS~=pJa_Cq|oh~qX-9w3^yBP?BhdVl!$%d&3B)l*uqG;&P%GMkcoL8o-o+q_%H!* zwTwEeuctlzi={VWAP7C3KUyLgbx3xYU8Y82Dx zuvSKim}@Q48A}wp`}Xr=P%-S`e9TQu79W6e4s~|V70;r*5afdVWGjaC2 zU36vy%;*s>UUy_`@Q3eK^1dOAc;w?q)*uWgS$m9eJ&~+o%EzRD5$JM>CEk1t2X2Nh za2wTHPDs$p&fE=C%NN6W1PuB9U3QXtDSokunGkQaWd}vPpi|Dhx2-G|VKh{Qpv_wz z;mZ{Y1OhlCn#Q0uqH@r{y+dw>(VL?29H@bO)J=Kq2ip-*LKS#IX)R?u~D~Qw>#%~%M-@~i?d7$MbkOGpM9nvwDjl}H5v6qsY7GO6H zI)fW^YAsNS#wgzir`_U74w3MscQH6~*68v9F$&HfKRCI5OlQrGy@igdN>YHQhDSLAE=xI+PFy6gz?j z1L}KJE;*FL`8&g*^uUeyY+@jwy;X-&*qJr*z|EOQIg5RKcd! zg)iL=#W>D5KE{As#BfZ|Jr=8upquaIfXT)PX1WD$5uy9+#fjHF6!FnK((_yojE0y9 zDG>+5IF)j;JsfTZICvMxflQ|101y7Kd7P5u$cub-JJgMR^yxSJPN?N58Q(gYXi41OL8}e}DYx7o+cvo_gv^b+J%sHtWq3#idH&sjEBUr>bdT6za&30T}E?3s58CP3zt=L?wG~@QU;&NPBE+=_W zd!+N`{*e;#(*`d8?zF4|YU0$S-cEwt~-0=0z z6#rS1Rgxex*&HmAKVzu9VuYomPCVxLS#eal76w);h)O3)G0pjHg;Ew!7=N zp=pY1jY_FqDXZ~Ht+G^EZnxrvdNVFpTpugV$_^4%m}9zln<1vsJH4zdSL@4uf>em# zrfRK~Mx)+TH>F~&rm2?d%d7N7+15iUQEay>OARJhk5`s2HH(ed7T@w1qpRGyyfk00 zQI|GaRJGitYOAFd(@SC*+GN|}D7`^(oh4*x<~1cZ)nc<*ysWfZWqGlEj%rV}0D;!k zUHo(Z4{tjcn(XPNdh>$Mi~J^*ud3lzwaiBJHdnUxUSkj~cBzscgsZi_`2huYwJx_< z!d>&#~C{8i_fTUzfR9S*|pzCCl9I zOT_y1V6ctnZE*q|m#S=?PEd21Q=la%E><`iI(N&ta%G{oQftSn0M2~1R&8IlrUZ@x zMNX+2<6zg9YnS8VYOz`?&U27~WpZ}d*;U{CV}t2xsnV{NTxk#8Eo*UprCMuOm;1Ey z$n2RIJhE76x8CX>I1t5V>0GtV8RVd;e{~Ytn7mYMEzMMx>dniKH0zg?-5J@XbCqS^ zjKKf#%6z`nsw~adE~i?bqR*S`VNNLKudW9zlx<0|Ty9oct+>@F0^n|)i_BHnu!9%e zI!DX(c&T1qVc~^4%b|F(9cq!q^<}hH)g_M{7Mk^?bVjpI2OR2D*_CD6Q*ew{xEwFB zaF=qI4(yMoPMrp4(Jf7aJZQJK?7)oEbNLg1+kCUwyxhO4Kz?nlH_riXZ>k&EqN>EL zbF5Q2uFszbJcK5@hE=MUAr``cUCFKi*JW|O+MCT*)N4?SMzMXaH_tE1N~@NvNB<&j zcwx=@mw7xtcTCGt8rarKqb#yiDwfVwdfTYgi{-dgUul-sWhE1YPJkVIYtmThLTD7a zEh5RjLWNnUZSevJyRxp5YZL6ZtrahUCaUh7+W1`uXO^3lQhjlmLsE{X;)PmqF*b-h zaqNl0@#&+-^N-BkM9C>DSeQCFGkfg#5m&(tNtCZyF^-{BUuuAUL>x(KF`ibu-A~{? zp}2ZntAn}REIV1fOK}N2mCU?1(^i;9g@d>CX4V_v7CWlFIlfw-I-GOEx>~e!-9Vok z&B{V`EiPAC$Tk>_mc@Jlyjx=bqZj^d(eKWus*WDc&c>usq)q6+uHiOpg&Jt zExMnA{}I8m=au4WK@aw_oPH@n3kro=@ycb(6MeFWr>+(XmZzpx#7}4^3}soqt5>X5 z7ndt#OA)EG{-~)+5AjF;Gy(p=0Z8(X+6M7RKtw_2>Nz$Jp- zn#E;E*Y2HAO&YXbx9ce^WtzwIWY0KLio9#;OuW(pMj;v%$3wLom)fYqCqdF4a{08k z18x+4mRi(^CtH>F5yo(s-b?4+u>v!>{XT=rVwG%ho!~gK*x)6GYtXOC;uNdP4qQcU z0~bY>JHbYTQ>lFzq_7U9VoQ8B5Ds76r(CpZz|+kq^{E{9D;KS2rrb)VSU(2cVTobt z+!z99{MNt;_ZIWrZCDOev{Yw$tXZ7eL6`y0Uejo!DE<;21}1l9nF`gGlNbWuEKDW# zVqtlQSRYn{y|%sVG_sBJ0{6VqXbZl2`Rzb-Tk%Rm+q)N9;5%(496dt;Tss{A)3z9k zE0Pelx9&@q>;sk@Zkl*Ehn7F}hjzbx-_@w_)Y?OPX*cFn#5&+H0M5gE-V=TMx4$RK zy(hZ=J<(g<6UF@R>ec&pY`J>%ty>x^^O6-yx3heVng^bFHs`u|^y4pNZZy&Mo%*`GHI!KB9}vK zO2?KNp;4ergxu=39Il|lvNoFaRVh6Lwe`m32w|n9-%??7?0+P!hO*9nE z%3>7_PE!gzae?J33%dgOz8s}ao&)UD7v;(SuC#Hbjh13Nbd)3 zvfKkVS#ICW7TkNY1^3@<&o@)f7IELrlw@u8-ekEwH(Bn1n=QESW()2PJ(r>V_2mUX zXtgfc6Pfu!kqe_~u zmoLMUn11-ka`|Yzc~~iPhYvf|R1034srhr~RE1va0Hs3)hWRONK!WEuh^@-0iqyU; zh}bL?i3$PM2llNPo3eHX4?=cK8PSFlj(VL#>|au{&R}BiS(WKZtJJJE5TEl>G7m0q zWMgV&zVaI7=88+0E6O)f(psFEbL}nH3-gy7Y(4x45v$&rhqpP`$Jkom*Xl3ap7~URjHlR~zj$5rUoiUAk1P^0V3^TmOpuX`ci5Dox(X z^Zaczs8lbs%J3GYC40%~C1)>JP!O&~R~GCqT5o!7EUiVAn*OqVX5<+({rm< zzqHU;UFbY6)u^-9U^@#7jY8)wx6sJxt#yG;=J^90SJ(Q}_!DO#fd&-q>T0EcB-fzuzOXMU9L>=5La+4n$Ip2+O?K{mgwsr zO`i16$ZGzng^@}|IC0A0hXQB4RPlKOmjuem#-FWLE=kLK%m($Y(#lTBTyxFR@p@_J zNn_b}&Y=h?ojZmJh#I!lMkWKlLe0B{!k1%?5tpMvD%zt$VZyZQ#Rn! z8n3H$Y#BXCwH#iAeP-+R+RhWj)t&1SRqsND!k9wVWx9kl2zjx*0Dn|1uSM?7zm!T! zLXXGd@_>GXHRGYkzjGx{1P7|f2}u-6iKWr&*1ZNp5>1ssUvYYx)@2yz4Xb%`TK{@t zHkMHy^#}X>%ygk;8^6{1@HN&Fazg>+x5ttuyV7$? z&nc-1_F0Q&`1fv(L~-xhAl|#HJ=K;&%@0Hccf&KQgZ!hKcJ-C|! zt;YU+{N{M9uU?|vVz~gP*ltwI&6TE0$h8~%Kb4S_X!Mp)LZ!wZ6D*zB%$v+RUNS2z zV&I8-3_j7m@-nG&r%y@Y^^OjyGNoTaxSY@`GA%R~9Y2zXeRTUL>D~&LKnO@oK!a1^ zz+rAwd|zB|U<*SHLi)fp+ZW^u9JY_oBXOB^eJldIqWvyQ-2l5OE1; zibH$JFyMAFddOW2T1)?*Hi9;a3V z_L+ukonQ<++aB-Pd&t?%p{%H2lOnSq0mRjiE#)`4P5(&5Ykk zXq~HdUJ%U-T8U1Ivb$5zrT3(;yge!OFG~gM8aRo{S_uIVhXH5W24uXPtU{(;M)0np zz+Eb$8fjJ+%$Sy@|D5Usd zPzJ)w{U36hFR`qRUh6Q&eUJRJIB#ED01{}-#I^r4(#@Q>SYfvJL$9BXs$%7 zjTLyK@*+R03;fBUt60IdA=7+LkBiGI0v+^EQRBi&bgo$C?^+d84a94?uv}a|mwJbN z)B6GoQeFY~FI0<5rIiSa<(}OI9(MAt*fwEu?*j*l`{t=@;(15~=M?uRW%e9My&Tw| zdfB%(^`c7ql1kQa@>z zU9KKl==BQW0S%+d3H08Lf4*zOr~u4G+Ww@&STouOJ&3v0?slv_fx;0K`Nk56iMQi8=><(>$AG@q23w zr`_=f^R8QqD?lt{gJE%sJoy^XkQv_oJDc(vQl#6C=CtWmqRt?RIO@lAsZ!$%W3R7Ed?NVlJ*2i5kTPW_L()91A(ZXsCFxTj(xKn>0m6zJt{Bb-|%H>_)#q{R1 zZ22h`moT~EjYWkT{~kvgRBI=Brk7=BVo^$2NytZFvUWSdxYMFW{nGs9HUn((5)^kb zinLIbrqjGW&?UAmNpt|p#9NEgW-AtfHmiZMBRqx%y(ZA*S6hfva9gH@fQ*%UUJSy;eQ;m{hUsH!uG6k8V@gKO1iS&&;O ze(K0UUFoog0-U6+gr*C8hbK%VwOy1FnG$1p}dJ_K$#a(z6m$5Vzh=HmM&nwG6=~n z7=7_?9`CKR&I#B87POZYT9P*vm`Zu8RTWrWgTl)-oEJ}KAq#pdmH3mk$D3-sV9Et& zZFG+^H_l1p5aBtadge?AD&{%25Z-e326x((0JnqbT6v}B<%UL3-sv4~b|S4|{lh1$ zg+bQT9H^w^W0NkW6$;519Cy*cuFwDuW9jU4+oz?yOI1zhF_+Q>(>qYSAuSVU>pIyGT*R5O><%=wyN zXVtU})x?9Gn)YfyCP6OZLQR zuqsxeSSz$H!Yq@bmdFC|LRQXY;)iRHbdc$13^soX8!U{quaaKts0HsQh3s%ta zpzW+Ktt@dK-IWo@7?7t@lQcACPmUImB>VC5%F?{lJqwa<@LZGk{cHa!x;fwrY3 zMRG}zTvEiPS=&Jdc8JXj6v=j|b(T$M^ie$@?mW(ldDh({asJ-p#_@)ycT3TGdQWUs zl5#qW@iD*)qc1|?3W(q1kP|mI0dXU8m)iL`PYRa zbE2z8t5CaALWXGTFMKu0G}l1tQ9>>x!){xGao#Qun{racwItlz-H1=IL(mk=>|&^< z1($Gfze;<(j-=H^F5t-Rj1z_OMhgrl6E{iJJC63&n@-k~fP?@@2s+FtTmTYTwKHm1&psR==zia-sMeNgU8e!UX8fU!Q9wt;aR6Q&0t$slH8xS2{;1U ziCszjCo8pjLuUk+Ie6Z>VJJ*C^|riU8hwyZrD%7XQjq&w z=Q^3DyvdOn1b&wpsAxMnQ8%5!aqL1yTOSAyj4v6UJ(%q%svO$AzZ}Z?-~Ie5JydYN zSYO!wCZ5sZPO@JDLrFMe!&Xg&OK0)r3Wc%2msl}(gL%H9O$=@ltUYEzttcmZA$Ja^ zE|NSQX1K(*#(%SJ+FP}E%G_n#?y4=_=_iVAYh;UU-<(*viHIHbm=NO&ms*gH?qLkx# zG*X9)EqHHeyTu+5s+@(EFca>e9ZLxduEM^(w^TtG?E!ky?|%Qg&;5e99DXXdqf>~N zSch6j4ncZ@hA<(!E$H1o;N2H=$yeCM6C1-dWg-z&kXFDeVklPYRdY-sVF@DJrU~Jg zrb9hKZ&+e7S-l;xh9~JQd8)am+PuUG{>WjWRn&;wFD$)#$GJx7_@x#u8iTCI zoPVTk=aClSFSHUKx2JyhX8M#mRgBX`m&rC=kgA1(rada36F1udZxIlT07#BMH&yQxnzrt0XO>Oi^LL3{Q^#w#F$fUgo;mnkc>!s= zl9Mp|EaB2>Wj`XeJ?sq+d+Z@awU+C&$sPMN=}OZ2^BCsdJw=t2eh*b+AxQuZy53hv z5O9kJc z>C~igOH`S3u4ZEI?*@3l*xU?RJ#dh2xB~lt;0d>KevP=h)crn{UnBMGFNRR}!Vh8y zf2sc9-o2K^UzOvBEdGXG4(@Ro{HXN)-M*IlJ-FBX?%Aye8o(>g`NpaALB087>8YSZ zK(4X0B5~I!Lt$x*qxAX*a?On7Bt#PtT9{EH1(v8^YDLpENQjg=N0-CRzSiSDZ;|{m zhzYk$v5BiNFfJiy2|1UrjCm0)w`J-X?Y4WSaB+!0rOP;$wW6R{N43El+D`ne3QN0F zZ#n-KN}~CoWZ%0u>qR@z7T!!ZUEF-ETGDG27Hdm#^26%V$F%OcR&6P!u+IDQ87}+8LyvN4y=u*stmW{HM_`F-yi|#k8|a?I233OVyc6EEJtu z)~*Z1;|hwT<9udLM94F$y9Ci(s;>DrTvk$V99JO2XX#AfI*2mKRrEN}Txe*s* zDB;7T&emnj<9dOv#R(k2dq=)H{R&(2ohB9eF;T4ymD(@4ugB3)1z~N(m`zR^QjCO zpUdz@hI59ZOsmjpmcpBXiF~R+e6CMb49`^ClS)oKgQHRz;aTOwGccKxsacL~<`88t znFA*M8!(xAb2cuY1x%s>2PXZyTO-s*u1(;kJ3}z3&emnPweNB5k|S5Y!q$AJ31E`5 z8(`9RSg!d8^ZRlz=~GHo{{T$-r{KlH-pVd4nDjXTCeen$ZTUn2-|k}P4$aT2=ynhkOAII!Qd(d33&P0VZ<;*Ch>iu*!_v`@^wjU3ul!x*P`gK&Qw3oyUFt zksCM~d1&X5G7NbFW1t0gmR z>LtgEnJ`<@n-Sp(C#XJ>wDJKz_&SvaLZy$7tnjX<)sH@@cjK_gBg$0BMiQ zHf}&W2}+{_oM7p}`jbK=PN~Nkkyf`1J0jq4P|%8Ionfd}y&$a} zdI%f?WKwWm4Xm-8cx7zhye6|-udPZ8??*h>;{(T15W%Rk27%9CV#N=TfYf1?K_dZu4Q6=PT zZN*&)?^_iTgPthYa~n#>o2%{#l>0VZlu#dZtAoQ6wYVu#L)&xi3VtO-&9d2Xj|^q1 z4OSo;%7%_QMuLnP*=Lw7uT!J9cn-3? zF3ocmKHWNn*7#0!jeK2=z!>=qb(*T-Q|G{~P^Yli;uaxJ0oC>|1pCmZ(6lZ}rDh9v zk6r_Mh+-`jYpXDiw8O;-nfqn$<7%qe9^-lm4~`ZFFhzqj!6|t&!o`~>0RmcBfYayj zTy$i}vA=r?a^EY+i_Rwyw2iAuE+L6_D|4_FNu}gmO5zSfOD?b&ejyw_i>%R#l5E;_ z*?1Urvi&N{VjcV#o%SNvt}_LZ*&r&p8_41-!`_(Fkcm9zb?q+QDt~J3v3JI3w78)t z6OOwR(@Ey8VmcX1OKSzZcx3JJGJ)|@70!yHEf?dnmCHDn?`vzp3I<$8{P9A09psWW5XQ4zK zSgTK#-?iNbyOu4aK9ElJa5I(X`aqhi5|?%S$jKA?jA7E#{yx>t%;cx6z?>dlqfP*( z-CZpusr%CHC#eL2>;olx`lTM&-7l5pNwpB%pT)Rf4RsDv@LfhvS_Hv%9-zVO=h$xO zJLT~(c6R%gzx3NTK!;9|;tZKM5Ri|NC`_@QZ!pFZhPT9u`v4dy)-V7ntVp4Bnaf@+ zVm3j{6q|~cxBIlYwy3N&)b9N=mu>UNJLVpNZj#ej4CPG87Y5S1{ zQoLShJd2J%pgtQ)4xG0~AOuajgk%`P=XSjE* z$DJ47yR7!xmg~p^#TM@0&Kb|{ChJ6X`X4;)X=X9onS^J*j?FzfcN$kbW8)Cg`h~As%H!%C84M72;V0 zK~$h1_ZTchz=18&vV3->v~Z{hWdxcRM@qaX?3~4bDEp#@`Re$PaNB?xT;Jtrf*pFB zcPoP{woeZ91hnf29Nrl##-nx~H?&rCrHFFJez}vk;nJXuw;%xt|F<6K?D9}A(`(Y5 zsis8f@pEZ#vMd-)@Fta%rbJwa&`n!-F`r_^f?w&nVisJ0iYC!=ZOlF4xXII+KFI>} zs4dX@N>P7_pv%d$i(~`lY9fOpsosWTSCOEhs?Y5y-Bwi?HN7hKj%jHbnk5)FufTtJ z8(V^T$D|Y7tpOUqG?N|?7tfOBB~XXvO~p&|mSD1hTEm6e66{J(I>9wh14tZdB4F*A z+?0eSSZS$-;k84&WCO&Y`BRPK@0vPsx|_NPbV1`%PdNa@U3;I>@3lyfd_ z=lM#E^kr!Hm$>K}m6q+tldko!oHsk47nn~{!Ivc5V*t~5#?c(*Ua50@SNT&hF@`&! z5{h`Q)@m!PSXAX-9iL(%WnnPlSBf;9= zLa|BxS!%6vsWitEqA0hSdeFB7JoyqbbOo$SDKoT<3iW}4(&eS9>Vg4#?|j(|Yrl3$ z1w~dD+mhy-*TG2>nOQ~SbisxbJ^oxSes;lp8)ZoD*a_G!npoYt+Zlkp^&m-gmfY^t zea?>Blj!WGNg@NzxdA%`fsYog-Vo3=;PvSmMc-6syGz{=A<*X;sS!~E zb?`P{Zee}kKoX1FAKI?z_!mCc|$tvt|FX;qG*IZkVFH9u0|zG3+if-Fu57 z!OzB-b{F>TnfIl$YxWd;dhv~rlgsE ztwOQ<4uWyxvyaT4ighowdw^k`y$I8_2Y#j-WO-a=qzTKkR0m~_c6S%PnWM93R`>Z4 zxfu>Dw)f_W?F+J|ZPx~^d69H*u_q24>^G#aI0jhUE=uD`Kdaf3j~_YpVBmEJmN@j9 zB~ZLL19x{fo{^W$1i@7%lde8=)Ui2+Pcy_jpiZ~D zR~+QS?q=ukk;T`Yt`I8YRncE(^zG`rGd9?HdG2@RhQ*VXaP?46reD3)MyUn|bFqAJ zgSro#KC&ZTUf>4muqyfia01bh%j)FRh$gEs-%JCP&}o;6uMUE?4=T(0+Y3G`b7QGy>Z@R0{1P^ zquSB^Gt8^7#6wu z+qybo4gG+7P-;rQ-ZeZSOuT+<4)Y6#(t7yravo>qSISkc9VKji-$L&K$_JJ4TQfa3 zE9b;+uL)U5@FrRIi&aLGw+gg$uZ?d&A_`!zK{$eVVX4+#^$~p?QCp{rXDz;KSR~(H zmr+u8Y2OiE^!jR8Z>~l>py3YOur;ePC z=MWF;mvVNCsedUu>j!$8@Z)y)Un>+F*>n(Ky14F5V6a%N*B7SOjy|6BJ@K?*N#@-4MFT2b5`B)76L-+`xFVR<_ zP-k=v!52$2FzXA2{*1!(t)E)9L^I<}Qav4pwvNv2dSp%)yawbiSZBKy)^JoXzO&!3 zy_8u=TPnNC?b~};CTkfo9l}}PR2Yz>05kDSvAS%8sjH#xQn@`hS8m28IJh-)MC@MB z(9sEOH@HI8?Sh$P6IT*uHlcg@rn*dWXBn4D?)l@Z;MDiJ3= zBhA6L3j<3^=O0|o`t>aN9e+B4k}UPK;D-i;Ax6`GSM6Xtkm|*M906Lhli&fi^QIN( zts``F_T=n7?y^~giS8D0Ac0r(7ZiJXQ_Tn6y>l-R8H{y}$Pf3(1G{(c(w`;ShSpP4 zivWfHPO(AAWsexv>Diu=zJ({a9p2S~Pqzi_$_P!L z;`0q%8NSBp!#(Mtf^F=90hy=go;W^z`qX3HO@^w6XO4Ae2n8(VdlLNsgwFd7OwQ4) zfbn)t=V{D(3k`%qGaVmLZEm)wxiF|aC-XF4YVPhVH2W>Z#h*?7{A4L!L=}`zix+-MQ$lsR6APboZxF)t$3F2T)bH_Z8k( z&=_=$vCn=VJ}UijZ^1AmCKL0yCk>C2TD)Q-mnt{WXA<5vBEQbrtU|ck7d$H46WVpO+t& zPr&zUKm*~TsBgf(b{g;rgBoBvcdctVwKKWHNBb>@0RWd61rRG-8?tvUSK9mj^~>kW z8^Cmm+w0)c)|%6krQ7eR5C&_IZIbD}N>33FlIuL!5{f%F(^I3X(S4PD#5wK)@~(WR zMA@js-pWR~Ht0RQMLL5XPLDC*Y8pa0ADFm(Ye?2)3s(U*8R>CpX_U{DzVQm}6T`jEP1W=goUliRRijudi&kl=v{7i4fFjE{c73!xS zIE#xPlxO1d&i?g|gj-uid}J0if4Ml1IMKfbY=b=c-C@|%J(w`i1l%{uH*l|PRH8St z0}Ao}ojnypo>bm)J6-4Jv$wZK*I+qVD0nxRjg9u;#%1>G-Z=kY1>;`7-IF+QcJI!f zOoLxAd&$0Xy$h&O%-eW-tz6v2KVtF31OdLUyG1+D#da|``^DSKOY^(Rt@)mEz3b(^ zSDEsQ+8b}5n>n)gmaDPM+m9Z}pDw6&V^9D3-n`XsEdnIZCLy$Ai-4UDUzV83Lg6VN zP%ECcZ#>Bcb*zsbFce={X`-%Z1|Ts7_6#xML(uLGD|^U#jY5CdtGfO-s@|KU1zF{>#oD-Fmru5TXKILOZ8`eN0a@3+ncP&w$0h`-&SD}ET zJhquO7jLuTXvFUz5S6`bRL9m$r61Sli2Id%q*$vHSna5VS<4fu`kV_2(p6S)eg^LT zRCg1)%br{WlA9$LhxAE;?zEhxB{zav(G-{GWjS4r^ZD7SIa9CWgNu`d-E+j0TjchE zq%zJRRwVD9oWP-3Nt~OV#hq*I=sK0oUfrpcTL{lY=QrCdx9JzST;C^|U=2&CH_wK)+r*}I%96jg#?%j zpMC+p_0ZY8Qetle;5jxv7Ty4f0gbSpkT5yXspd zZB28x0@59KnwI*=Y5)7@>noxs{u{dsWrZX@i1kE50;BCq%gwu0f|!S-_0`k;Ikyh8HudZ2 zObtu*oKzn1*=kGNWeTyy1m^5Bx^(li_Ax7>hjaU4rp`?$rnr7U$5FSRF!!O8AX$QBSu$s-SX5bZ;km9PY3Mn>nP^guMIzb{OnA+0W{HA z=PhDasQ=K-)}@|;BYcXa z+~I%{_zYZwr?ki2U6slc3C#UfhEA{>a06WPJD=g;Q^WpKG#mvSPB>Q(kTMr%IX#d& zh>52tJ!#H!8n;#ZRuzCqIhRHxAEoIUENR;c+vLP2MSEoH8W)}U z+JT5TcXE=V>mAh}snlT*nwRJJ)`08#1~YVRC?idg&Xr1IoFZG`P?jV#lYsOus~Wy_s4IjYU3`*Ht=) zZ+;gPI@Nb8Sbhs7JLl85G&GO<74Ab3{^yOlwB|T;JFgT%s40Wb%`f`LqzsPL zt_&fqLK**ftHzYjlJjDU^0iCF%Ps~_PmXh75Gf!amZ2`WI=KD`=c*aFOG0h|<&p+n zu$UzHKgub;Q^cK@jY=3uZdAY@-#(pKtPjs3=vn}AJ(9O2g0CQG`oM6CqrxYc%gL+H z_A$CbcJk^u3t!3y+~E?%1^%t_$8Pkj+FcbGSZ;IM4OXA-Pj7E}xF%%jc3st#4<|dF z&i6)^Dv}g*9ivsvxex8&65=oY5~mEI}5FgFVkd61Z@#EGghIEjc-?XxPFVqFnA+#W0dM2$?qSmK`(gG7N z1lq3Km9}$8E6MFW|GgHjTjGS);_5>|AG5G$t=KtHAwx47zH{i#A3kP_rW;IFFlFFZ z+o$>RFSJy52iy%i$7=Zt4~dskE3z*?aBq}RvIG3EI!k=NsY0|G-irA3*sYPSH{Csg zoDdJvE)VB)r>G3PE3IOmCrc+aOPCy#=YQcBq>#~`JI;l$Q{HniP5WoY&rg%I+x_}Gr5$=0!sT-{6qKzuHP`6;uYU0Z1i6PCR zRE_+cD4@6kmY9vQXp`VCh!}nlks1=mJgbTuL|>%{9Xg0-Qpnl@WSmeSqCzXL@BjUU z2uvP7B08>Nc4Q!XjwT?}r|Fl?v4pp}R-t#yG#L`wA-J5b105U3r@K)!_CV^h4h1U1 zx8-KAHSv<8QpSEuQHwc{un6>$IV`@J}`?Jg9{O zS3c*GO<)IM9$f^!7|TN{_4Qc09Aw$gZr*_Cl5Y8% z$Iet_5(R=!FZ^V1r?gqvxtY_q1T^n>&H&=gEFD;$$~%Hln{urm0QK5yrzdMDcgRng z!PN`IYHqJtXa-(8!F20fPShgx2Y#J-R`Lh2*~;;8=J@r=+xX5D&p<~JgLzi#;34oq zoA@gEbApB3!|pFMMJzsj_Jp`2!s+bP#JpISW+9(?UWcKsc?QeT;=V%k0>bd%FE}TM zlgUUHKP7ZxJ7yN(y(dR3LnMdHvm$4Zg=NqVbGVw{zjA>P&QZUQ*;PfNZ`KX|9rR1* zba-qUuZMp4X(XJ2!NcBc3pwwUPyWZ6u!HQTGb402pvRP3b7G$6!;k}JnayP=wmAu+ zvWQj(w~J=o90)v4-~i+kP;3@Z?Il`0vOT-Q#;B{0`v^=7-4P(e8_hXKtfT7wmkQBE zAr-OBp5TU1F<-=FBiK9KCr-+n%PXCOkuSg&tMu_u=ii{(9k1BC;t$5c&9qm{9`;nO z2s~Pn@qjr##7pUn#Z#8s{J_g<$Zaa_rnWj^I*|$4TV}Y%kiQmzB{KmOX?@nYLCi`j#6-zY$`~!jT+& z(8M6(2`iB;X3@VI!pezb!<_L7Do?FERXOo&tXy1aIZ@B^aFWYKRdYS};TTNO9T-G( zL0gHv8jg$TVT7X*M2+~Bf#y=|=mK6ME*y@oq6%O>m)c3Hm_Q!hb{cPqMpVeBZ{c=Z zKCGBvF0KLaZTTX0)ajabMuZ~%fNmd6Bi?XU8k2<9=6Am;C2Y1KjYXhRG1`*WrSJ z&O{_C6vC%dA#^A45_Q@TND+-d~oDGw}eC-&Yuu zY8X|3irGsB`t6+WccVWO{vg1VIF|VtHQHa12SLN~VVZ3c9>A|jsweg*3xQA^)2l`( z3H}(eA`yR9XA$`Xw5>mg&`k)|TnX5%yg09vYr<8;!VXovW?=rR_tYO4UF&B><0flV z@hSO*VG~?58K)@{$V@Ak0Bhekl;$=hfOL=nv1N>U7j>?Ayq0$zgdxEFAZ_7 z0odk$+G?%x;}xkm*W`$lvby?q^Ve?;OTSy&?ELD<)z$UQkbFCB_q(xFz!xzCNS1!H zIbHM2cz9>gd%ereE!Yow(7VmenA)(^WvdlCQe)DE?UpfTM8%@j4hjV43h9=vEi_tM z*HAN@WMFBPo00Idw?@-fJ{=uD4ZHg0Il#EsmW`tI%!}N<#rq8bYu1fhvQn7EG@nIJ z+AMm~W-*r9EXIsxF{W)6EfZ$ZE8a1SF|`>}%wn;b(zj!0{hou9crx`Q@(o3P1kWV+ zoUT4@FDn3Bv0nvhGA~&cAX1>LQyer}r7hV(mIZm^5LVVs{H$E@fW?bc>%2dBquvQm z)SZ!4i}Pg}e{Po5(_rIxBW;#Jo-c&Z#7`?!ia`}ijqZI|DhFWxFq5h@!bC9A*|#ag z!%TGA&j!g%R`h-flQd573$CAS>+RTP^;mf-jT|G5nJlwm^ppn%iaC!8G){Vi0vg?o zM}S$pLPJc@;%T&0WL^N|Oby@Xx-Grb0J#;+suWH{1e6oT0O+M4gX2E643xipvqFm|7+P?n!GVBt`=F^ul0rjAod3lgq!;kuTGx_~cwA4%G-eC8 z&rD_moq(M=Qz7@m;#P{!n(J8jC{&LHu3}`864&(j&F1UZMC=S6gA<}Z^C79sm$>!m zF2{^P3v*P_sWbMjeGzYz$sss#C3feeaGxLUk2SgA908qywbKrR$BpDUHrS*SD#W;eX7(Ir&XTP1a@*mDl07R(QGn`ELGbiz zeq<1B2pA6SMc)yV5CqtmpU4`;zen&1cesPNunT;=ot=?xIP+6Y*mBNaikI#t^smyce?; zw^msqt$-uNL`ve@HQNuei0_ra6oG;@QqS_bu2n8qX+#S@w~TY5u}FvoX~cb6O^z;P zK4Y-$Vj3kMq@d?kp_c?-!(@=4-eA+lv>;}y zq?BE1J|`A?8cPUKnlKv|H{V!|oRYb~u;8TF5Ih%Uyu@eag^JmVML+6l24~(OZ#X+l%WW zZKznzA)*Lr+!}l+xh4v?rSubHNswO!C(Tj#6?`{G$IDqUh6D1!DC!fT8)6PQ#ZxF5 z+%;H)aVgDlp1BAuIXkI-`mlSv^Xj<%s=o88USGbY>9hTPdHB6wYX3q7u6F$KRsG}g z&5p5E-FfwQ4G@!prQhqr?&)Fm(`nsb>!XPd&b)BPy83~wC#S!BF&&COF5R^`xqNkM zt}acnVwriCbXt`30SHgUoj3>Ip!v6mI;E%B^B%kF-Mlia%ei%t^^)m>xJo9n%9}Cb z%G{B(p&cck)V%eKH|hj=?iBww$h+WT#DyZRaFZO~Yoye~)+%f@yq83AGWSPplAHi- z@&LlIqe&O96XTtnV&!@A`Q+zkKp#H~`t!+Av2WX&m7TY9$~GUCRp*rLyqN+f}uo3eM* z@X#8Nat)`{A<-zYB1J4(VwOV$u%PfxWFDDRr4LU(ut|{6I=*kKKP-M*kdqF`JPg9% z!tr(jU4cdhJU1Ptrj&;uDiXj#$G}^`2hy;jBBGMz(+ikADDsJ^P$=X~ol_n#JoLn( zFH_J64h6!Lfq2G63yodKCe30l%D0Lht&6}yirmq%-%<1xAK|{4DL^L`0i|#HBn*iI ziueG6<O;BEG^ckmad(v%kBn1sj&7p`{D4=P)xjKSNr zuCulhIf1)_y0aj)9H3!r9HJ33D0_vKKifNSaYy-RR-Ym4S$9!QB92DfTf>uG!^sIG zy=0CnM^JdM;%*XcETc~l%9s+Lqx3pGBXIWQA0mnUV^+5i{rDh1XWiCuSR!c(L!Bj# zHZPkb?Sg9>ZZuyusIcn+jo{(7&LJHwwyw6GvEC9+E$Atf8e)k_V8U7~uI<2@%K_)% zXjv~HcE-HR!58)WU;q#W(U*N36QeX~lzlWle+;zNR{S;ej0Rt=CV#q!7gMauiWWUy zpZg(nuAGbR^2Yp;iv`YQXS;9q3B2;Wt`HLwPVbXH#O*D`5KzHrs)!J>uivE>OC%&IkS z)yR1eIg2@OdE&h0`G_1HLN)<#oB-b3z#BVTnS680X!Y*4r^6b2nTVld29prl!9 z%AFvHLSaf1QcgT>b)_(Spqv{*pR**$Xnyw zLS1Hc0#+c-G(^!b;4}ckq=*00$?85vf?|@@`zbJ(`EqU)z?gHG>&#&?5Jz@4p^*D0LlzBL$2FEeA=1 zRqbqll=O&04D*S+neL{W#Yz@rOAKm8O7KMlc-*w`S7d&6<gG< zK~HS)|E;6}4Yc4CE2-CBRW>E7*hZF>BEnXxZ-+txzxR`i+lxDJ!q#uU{bsq}EHnH~ zvM*xgzyhDhN}(4D(5D$qQZJCmb$^Vhx>j&nL1rjxw*B-!amU|`X)kl>jW5vScKrcsd} zVfauHA4KvN?vW8TU0A*GtEHx2<1-ir@A^ zh39TBMjKgbJxgr>NY%RktntGWQhpGiC#@ELR`e$$OH$JmQc+zBa9F>b8pAmHAD7&T z%KPCcQEUP}OyXe(INuCc3A){=4ucY!sG2**7*QN)Sy8xROhsaLyVwcOmC7s*xUzLT zct4nk9YSY0jGW?8X=a;2s&ffS6N5{rnxK3mlI z)|xF!y~vd)u!|zR4x!9ksUlTgemh1|g*fM@C~LbJ5KF97B6B^8rhOJpzcb>eaAb9V z&GIHowLA~kr{$07u*O8HgqSbjHl)HP$xa^3Gz~bo?mKXOG;7wiO6~ou#Ok=`4U3 zAjh$We2Z@!Q|xq|RxCUPhEnS47CrY)$I?`;7&M>h8>d(RQwp$DU`i`gY__pE__8#o zUU`!%-)iwkCtqunXlXSjcg<0PLj-@KJG=p-z;oMH zrdkW>Y_%!1p8M3S^%N>#wD_=gA}BC{M`v#c|jdK4&( z6i4kr(NvO4_=Iexp8e&@RieB>r;~C?`b{OTkmAeyAJ!MyV#P zo(NWMOT`jsoPTY2wsXrtD)60E6bzyCG0+Rp! z>p%3m3vuZkvwqye;_~WrSOl#}Wu?Mp*Qr?XxHUrGo)FZDQnFYa29;y(9##z@W9DNv zZrEuSj}5a%Y5-~My7}0R`+=o#WKMjKu;>~(?Jf)NF-0iq+=DYgQh0F)Foun-nNvVS?CqWp9L zP(h(a=R%w(w+7@IR6Mc;O(91mjBq(6s1YoT1&~FVhCQV}x^G=wVq50(xS&}eUO2IO zVqZf7IYG|6gDf&rtpA12%vcr~BV!>bhmvCV3JY`t4zNoJPTg~eM2!mu80@h)f#87H zfx8e?i+W*Ym}ip_F9dR)$>T6)m-0A>6TOii8A4B8nBL{0*PNvg3)8=-iK#b2Q=1%O z5s=BL(abkK++#os7))`ohD|MMJle-E_+Z;;+kzbAh{d9(Zu^ow>HqvRY#TJ!{Aexe ztT>C>Ih37dy12y_EG^FkO=VzdH~q2=G<|Tcp4G->`7v<$qW2FrlxR8- zA8WyI(bV^uHqC+wH+Fs&*GBj(!>t!q6 z^xGFswR;tiLIY>{g070+8${B?9q{iL!avvw7w9%GhR#HP3mz~2yzqFJ4=w&Vu6VJnB{*n}d(0>qz|>$5F* zRvhB98U-u&Tx?j{Vhf&R8z#M&ZUsI{VDP>V!5CG;X|rQGk*nPozxP>~6*Zq|jpG=wQk~T%&YeH7mlfPd1#;Qn527kawu-O)jLzb#F6b_z zECnsS!iD$bP-E{8hG{c$Gmuy=Si|z`mSU#a!1Kh6lFeA+1aioH8lHG*Eu6C8&#~C*n&2M zW(2{m#X(X)Qa~`ibda#c(YTXmp(NK)RZf}!%|R1I)31=mF^d(DWl?>FQ6;9$_yS$S z#DxRWT!VvdQtjYu^hVftx!in>dU;@UIf>-neI_Wl$`LSbYprSK6)ftsDc%SKr%0#> zQ@tHxX;?cZC>n^8`H^=~vB{5)j*bejB3Qw6WO^2q9r#+BvHYa7_3!Qkx%wQQD=>r4 zzMHh1!?&CRh9@X#G;8(JN8Y_5RfDD!O6Ih(n;Ub|96sY##u<3A0_$v^+D4M0iOz{P z?drhATte&P>E?C$0qD(rbj|0fmQT|ovO2eE;-nbF6GZdtq+8^Eib|eLFS>X4%&{X{ zT1wfHLB`iUAD1gX@y_*OMq|nZ8cxOM?}CJBia5Bhm2%O&B7O}T`MI>^MS3YLA$85aBR7A*q}-W+a40yw|;R%rI(R&-2AKg%0Xer6#eJc zK$qiH3}#rdw$SA{_fR>YbMc%&j(uKgx$sLZ3@n>0Y*IfyEQO$53`yrpLegjDA3rSz z9F`+CU!?%5NnEbF+~CI@V18GeytsPI9CB_=FhwmkD$Mxt&nLQWi?xfU$inp&0Yf+S zgIW}9NX>(A5v7Qm#V>fccT_vsD?Kf~05AT&6r?h9Wp%QHb!;ixWTC3QyIZc>mG%uQ zxKgXWFUOXQ1F0#fL*@O)Qg}!+f%EWn$i5WcZmTp*EpDq`3)SUtdDcAm_;ISGu^?1o zco4N7a%)aM@i{^RXM*a8%$jX7`O4=!5`TE}=7&Vn#XU4bW*n#Z*$t*puq z{FUbio$qEQSPGQn{mB)sBDO>Yabh`Ll;b!h8HdA=9FswrUthCN?aFW^!=Ms(t-KUq z!7NRJKXeH5ZZa{4_cuoBi;FMa^pUW0Iw)@ep619OcghTwnecLo<^ZE1;*-e>H#+0$ z1V!yQnkCX~fZ3Qza!5%$s}eKT&9;tCW+7e-bfiD%Y8LEaM~<0hV{(5|*=+Nq8GxwV zsCO+QoQol)V}pzqi8W?{k2eXFkJDf7w{7O}5BLTf{&6NpSo=ZiU{09x^^~P5=Pb0;rK4|z5wV%^Uy*~hSHP>9M7|hquOepD)K5Q2r zU>@I$U}ioZLKTX`m>jdU?HaO~ACl;ED!>_K7=CPs5*0(WC`#XW%$F6sbj zFv4}`e0rxtY_dUoJMDUSFV0w(av_@qhdzhw_7~nQC?i&6ffKxtbidh%;!Wmu;q)oz zoROmz+ygow)@4(fgM@ZwSs{Cva*=r*&5OcEJz&;3HsuiY-h2Bzz1Y&wfJch4K#ENJp@$D|kww3qkrE^!dt zK}6;s$UCqV`3!jD)5-{l^I^$HutcRBrrZPmTTh0Vq?w z@_|rih5ECjgIe{|er-{GeYd)(>cifbceCA5MljErXQ*&>Ug7K6!if1DqXk9K&QYbx z?T_80zYDB4TW6IQe~k`jwAEP|HWhg_hrkOdV8NATieE&NG&{t^w<)&TDfAE@Zw^px z+lOhV1tcL{Iqlg9rNgrr=&T5HbTegP%XEl)FsK)4qk9h#X)5@ppP*)jyl{-cX9WkN zsEIqQZqH^E*5Kc1I) z!NRlplvS=6Wj2BrHzFP-YA{JmX5Bw=Q&jdOw-rvAFk{g+=o!vLtTAgmC3s0t(%_3k z#TKDfGVhrD)#!3uz^yRis}(dLZQl8oA#UX2IgrDkHYWkBQY;+ZzL|Y;i=)lOjkKM( zM*8OXwg~OvouV~zrFn@RM*XtwHER!b@5^lbqm*^&3m@E*7%0q8H~ztB<2Ihwa_)dR$>K-@NIftynBk zFITY|dsrhXyfMUlgJ6EK(g!kIkTNoycNa&7E+`|5g@uu!=6)NC8Cj}v#Jr8HRAM8G z)xEarVvdoe3?yc4WW|CowLXtOwO0DK;6paV@Y%vvnN;c@tLt*7bW^?ARr;1&(Gz$sV+;#U*0KpmVQ zIrh8Y-4$g0!|MP2{QF`4eikROYnsurfL!xk*QZ8ig8D96&&-l{Ms-AQ^N zZra0&Qh9zk{Uy7P&}QT^V!aA$lF$Ruy*Xko5HP|2dO_W)02Um*?GD;|!|`t391SB* zU%YaPV{cBhk~UUExZ!6T&lGxOgi@)umeamM>(pA$7JBPqX)4>>Ol~dLa<&iBxn;37 zm|6dfPVQM~5Q@Tz8(g~6_L9tz?2Xp_Asd&S~LYm zDAnv=8eJ3m*6iG6dP?)-dovZDoU zYjAKmum9k)FuM}$WLn;f(Iy*=C#Cxa_$eeIR_ zaL<^5u*eRbcG=A(AvJd>Fv+8I#;dxD+26aBjkH>^FrF+%iQ+2G7}^#0x43){o0vu2 zhjoQxhW){GtI($hRjgt9V7^B2CWrEX7M{uCE2uGnuR2Dj2uwHTvMGj9iX%GPJ1(&g zgXIt8(<3d`K@KySz)~$rJjSP6SU}Xu-m!m<#|JZS_(pN^HQNdbXA#UmPocjb?`hs^ z{f?f>gMUs3xkef6BM+a+OzTU6?d6p}a8^914&0+$ovHF7Dn`5rRunMEysOB{8g}r= zj|>mF6Sr>JTR`G!aK?xN;dh=;(ValDD|>{`HnuD5-!B>C5Uv3~=mo(3!I}?i-<1zF_%(%b)Zj@L0{*B&JhYQ{JUnR`J1OW*aId`Gy~(d$9lH+r^dr z-1@KX=V;j3_;zuAp~_-mIa#mvfT%nJL$44ekd=BPEnQ#wRoqfino>b+#Cfb+QhkJf;`T}@@j z4O&EB-Ra7e8;g}M*r2?utZ*gqmn(|Dhs}ErJHK+sn3zjhthHV-%a?ab%03L8l~a22pWMwMvxNB*mM$&BKqY+&lwoJDTS)DZdZ9HRO;nrl?pyUPN zjoyrQ6x;&Npi)d(B8)OY6Fkqu>0m?M^sf2&=OEdW%2e~s+jpYKH19A#RVdv zl{(9|b+s>mZ8(c^AOPQ=)e}Y8*VK$tfjY)t2%3fNnc0ZCVUxK)>Ns%m<|!O( z-aGF7^_1JU{28}dIIVs%J?|%N{!h4h6AvilW~B*CvatCF(_VshXpb2k_2wHy7b4^8;MXNT9`+WWTRUKs z9@tnUo1a(j!e1OU}JEA7FNNy>)kdbXO*lE=C4#-P*RFu#AL(SK)U3`J>3F%>! zgx_3x!j=Af(jOcU1q(X1Gn1+k4sNrlW9M24Qf8VoCwIs&lJhbAIZw$kml{m@5Xp-E zO!^?5$)OSDr^8|0vd*;4vhLq2%bu`XIlCNG;!~;mCVMfVXzfiOWY@|;X;%~;@?D3+I& z)t)M=Cn9@uX?L=iJdwiVZqtHbKGbUpYNuy)@%RyMinB}0&sOV%rf8oYUT zgg}bL${)K$TQQYd3SKZ$X$4vfDpz{(#t4}d8#Ifc7EGm)DHA8nD~XimrS9#>3z4bn zh9C+7Ax-dShPd}PytMIXUQmJAi)JW7z)i1QL@%EKIIRx}AY-#_hc`y??`v8&bHc~k z7CaEg$9f!;#Rt^iIBC)*(10bgo#Y z505YqnU`BqWzYoyt4j}=>Y%4(r^Q^r_{;^Mrl|zRSZ94V$MyK;cx~x3Gv}iV{J- z#a-JJT`UOiUn&{?Q7LXeY2h~^5x(|^1ZI`kJK5$~l*}7+*oY*s3y+V+x8U|gCTo}q zA=B7;0mre7e_&xMo-IZ!6EHd@C58$3e?cZpj$^u5@CS(0VmI)R^k9e>XiaWVGIG>Y z%0O{>4eES|z){SQHgi@+TPXwP1-c&D)UiMm(m{aUcfvAeAsI(J(IU@9pYa@A&EMqp zJhQsWkLV2MD8z7^p(-b4g^{>129`OxHbCU$qEufSu|BV7rI`Atl$*~>&8UUif=>cn z?Vk1Mlb{uuzEi0@!xpo@il`6c zaL6Wmis~s8A?TnWWdWj6U=SOB-;d-Zz3ViuQ+C||%^zMCWbK;mIGJA0-+u*n`mm}l zg@nTgn_MT;$$hUq9bYGbF98W;ye#o3{3*`c^1MH-Sds&yuLyP_8chNlEVc00B~`H` zt1!WmDpIe=Q9LQFB{+dLU(LxuuU)gddp}p5=zW!n72%zZ%%|q$bZB`;NE~{{{XJWG z>uSM7NH-#z0YAS?g=mZd!-6K#j0lh|gVMne$We3gUh+=&-^Zn^`v$7KVkG%_##|HHggn=pxG#71>t)i@Ao804|jU_g0z+LEx zce?oMJ(W3~m>GMlYy&l{Mya(di|g%}EqH-ap!H7Wy}E~UCQm&LZ4>ZZ{jCLD@4e-= z1H{j&>ThmcJAvgto7Ya?@!Oof+qf2&eeF6~;T3LwHNZF1&aZ|W;?Fe^ld+vD#p1N; z&0md)xAtq2{+djiS4bvdm|=Rn8YYH@Upvk4pkbOnc`I^mz1}>UY3@ZtZu93#){MHdSKvwHLYj z&~s|Fe!JJ*?XHYOdk3_usA%V z#Wq-x0?D}dlv1(GzC!W#1E)C|s?!WQqRvnU55ObSFt#gVQ~6Z|>p=e4OzAQ_z{92& zuCqBX&IHLhAJ7w^!qdA>2R}^;H-lev+zpwDl&Hecb-Yc@^NSdU@F&F{O~*1fcbs9S z>@%3P`*M`PPfBCSeqScJsNCs5VXfg`_*mtihSMr?151Cqo?JH>%rCh zn*yZ*)RpJtj6{K?dQF>m7cF=9?R*km7G8 z+i~H|ABVYX9^J$-D29`QO6ro8D>c?PF)l<|fgtE6g%5(p5EX$^xBEAi6@XcUtif2J zNe$`7pZ!J-7Xg zMBj`4)M9f-gu+^qh|_yD=T;GuhdJVxMJtNx^^Sm$_&L{$Syvgf7P7e!nm zVAWQx)zuIs6E}gChC!?o!$M=^38-yAP9!3I4|?q0UKGVT0*NZor3Y5;*%GA`dN9Lyts?z)@7477i!WJ4^xw8 zJ!oUe@=~8X?~1N%@~oWz zm^>o>#K|)tO`Zi$wkePiVw^nCAtulG?0H$3ysY%FUagv`sA~aS5#-3Isjwc9RbD2C z)t$!Y&5c+4W^u;HoFxEcz2Jh)%Oohk`iseCYWj5BNiz|1!x>1lLOKb$#Y>8RgUz*tLKO*J>IVr ze1=04LYHt)10W>^T@RHPFQe9c@S5iJAp@rX1YuT|=!`As?cNPI{)-MS+g-E1@uK3* zoh8+{5jPVwk~n@XGR2%{R0@by5UDpNLkla;JkKk~5~9OWQNk;Cntd3t2ddNY9Me(> zxtxE4x|%o_7E)*K8?(&en-=8i8P_SOKk>NgL<+hKCW{dO?&f>I}+2^&OBN!c-#_ zU^5$ZU~O@sNc`1DPBs?C%ynwM*+=S8grs3dx}!RSP~>-hUevj7b&NmgapMG)6?ybH z6@|k?x)znJn(GOtuBtvDIzyzn!{-psNL6(n)oBBOUtQv01*jRI$vMH)J6L3hk)T!- zNI@da3c>*Ng51g?>%>0iQZ8PXqEu5CO@Z#igXYE-8JOJmd-viLhO6H4;uOZb&R#^c z(=HY+%p`=z4O#9B%(inVKKfU@iokB7%gc*<+)EXM&9@s1FUB23X<+g5f=>8?T1SKfPv(UR(8h>_#)^eEj31FFYzJ0@^^tF;6O=ae zILO~C)U}Zl1pZ9$I~}!z-H#1c1<+vf0L?0h(RwqvFPkI8!cmWNwh!(-p=$|>1i!t~hcw1>_fSzhuo{$;xlmjokOF)_N6^#DjuyIbwVH9P*C$m48 zf*~ojC&V_Uc~D3mVsN5s-hsqRe4w9c%5$cc3{!JPJ@WyubQBipax&i>ym%~!_-?~S z3m;&n>>DcBAhp9bDi%skc%~njM9RJ4?C~N$GIylE6pIM+)aE!|u*eF^8?ix318IDV z3@+i(;t#s|Ci-rP2x24ik$ZE-lyZo!In;^Mqb1VtbgFbp`u;*(4}9p$!*Vjc5R-6n z5%`NCvDP3Fv_6ym#jrKL$c@5xI14X4hSFf%>UBr=7r$LdC>d5C38#vNl!*N0SH`CN zh@;03eh;L?CO7dWA}x65>P{grFE~;%g~ErGpNm0G>jF1>Z=Fq@N&SXR1@+hY2vf=` zTtHz*SEpPn1;Xw{B3-#Fn+NeoNlD`-BqvSsf%ggsF9vH>e}kSVFET>t(y`xEzqQJZ zlwF{ROn_zYto-ZMwe^iRn{T(?HP2frfCW=j6joMTnK@PBJ+glbI>gBd-gCL{S&UQ3 zO;6}gBYGQt--KV7fId?HU46~3zfIc{YYh?|xP|7uAP-F)}F zJAAFP`Py2BpH*@q$JtMeHsNXssUnpvv9w%n8wkcM0N#NFMUlG|><`et;O^0RDkfP4 zeXExkHjGhFGEtGjeF$Z7r0=eaT@AV$fr+G6Yn4A&B<+?AaekM?`kC9M5Ik7!PxojV z(#9tBCm_?k?AYtu15NnkHI_`RRGiSr6`jIk0KXFUN5n9XKWRY1$%+bDCH0HUUAg#V zWiVZBxz3h#b+%uriv$)wEO!zi1_&}Ssb6qs!w@llst&1p!sR(;`u!v23Rw2Q8Yj=iu9#e^VK@)_W$J{?$T1n|>N!-onJP z7+4w}SXRbJumT+47SHyuJFwQ?}vr0TgC1OKr8T}Mu5X+usk3PZ3IjPzZEvUUb*@HAZ;gyxZrH2_>204RG zlJ}M4KW@I_rSilYn#|)A8rKo{NyQxwdZWi6F+_mc2_ZgFT;bDYrVgYH=Opesfe0{@ zb~9XyD5wJmj}vjMQMa+%>AC3;&UT-VZ-NlF*c`w=&yLmqEXw&K!`QSspbm^sgBkp z>xc8$9E^$RGu;>mK)Zu`lv|sAtVHM1C7cv6Nv)BA;U3a@ad<{5?a`cAmXMn@Y!{@1 zhLx;sDzjibW2O{@Tmqt?I_5qlnRCjzcbz0$%6>P|?r%G4@_>W6!( zt`~W)7NOvq-C^7!D zdhXgK_FK?@*@40ZV&VJG7y+FLxUVkF%#2IHy{azmz?hNZf!N^>*?}Baf;a2ol3yXC zJ@lC!heuZ#x)Y!Ep#T$9^khOo{o}!oRJ4UbaMlpPLeeAQ{w#zDGta}1=B1>;#i<7e zgl8<`p@>0|0Uh#AL|8^F5~bfOeSXBo8Q%Bx;>EWbumyV5wRPLKC@P8z!h>!wgCQs)#O zcGh@4;M%f)t4jf%9P#ki4?)9Xc(1nzq&;}4m_oT_*r=F=>w-Yli{=P3VFt6oiY>6} zyv&N`+dtLq)USnRGQr=x{)SdYP^QsfRQQHtY_@D;PT3ax=~=k9bILG$oCAnX$ivoi z$~Nbet;(J93hr&L%MX35;f6J2zdpO=+M_Lf-pk|g zY47~{uoSfVVIgP3!@{lpA?!P(AoJ>0AFJ$ZSYV0VvxVV?|LEQ#DrcMT%3t6? z{niupZ_D+z{K`cSx&(VJZtU6mH;-3uK2iT>rd}i3D0iu%^>WedAl}Z@s|f^4aoqN* z)0x8Aem0+|f4f@l^g(-+*oqJRocmzHoTXZ>G8NaEvY>y_X_v}R>(x)iu8u#Ks>3LD zqs+8&z%Ac{eHUCXRh*YpaqHtsscn$OA!y9_^c9p)?e_T@M|>!u;OEO2Tyn$g{1C1d z5GZb);BL%4aWpIc4cX_W0ss9Hx&RM6=+qdCQTjMg#%*2AV_`i~u|?ElE@r{Rkl{&*G9W`C zBS8+bh~MDudWrF-3YEDYT7!_>c|}n{tz6;Vxmubtx4e~tB$Q($?BN+{Z)g9k{z`v$ zUhUQYesW4iKRbL{7m^?f3EqYXrNEU5eKw;J=KaH7gGIoV3KtB-9#)t#A;bbxZTfGO zpDO2y{0@R$sPUqoobMdvXLae-C6Z+e0hHW>znF>YCmw&N)K+RLd4Qw_JQPG0J#qk$ z87Fs4dmq^=(y75E>jS43EVDe@4teob-h@M}h5*F|@**nCIRKQ-ZC5sVK5Yi#jZEAs1h!D)gqK#?Sj=agDJH0pY(n433Z!CY zvsk&qTcEKT{8>J;j4$I+*yEf*n+V@^e3ZpzOxb}k!@J4R=>jo*f3omiGlCQAnV7u(DJ zpojnX!a=h-zja>xDU^$pcSZJ!c?>iap9J2s`i8q7bwKf)teZ>^7x+gC#iCC@~NB`w7a<8J#v z;i6%1VG`@mG%(hJ)q_FtTHelbb6EpHrD#Ev9Ul(K>A81Hx9g7upo~lOu1^D{j`_c>7Wi2E-!35s=~<6%iRa-cN+P~kBPWyo;2?DnzV z4U#8lAdcK3AQgihZWQRBt^}$(9e27arhX`_h4{=x_m zQ40(`1U$^ADHC7Vu+6X^gceLnat0yxaJ^?9~AC3Q047fz*2N=vx4N~RjVhug66XoLTR6OOh3_Zzz3)ksuV`s5IRG= z{6H!`_Q(R>!Xry+e>T|S@E%)KML1gwzMto<2FG|=Fwb#zDU~`@0Q_8e#_5KvfkVJP zbli8gilky(h$R*FhumivUJYLu;j%RgqB`4iNcZf73d#z-G-yI!SyG zWH}BpsugxoztLu}q0FgiZ?Sk4e7OA&6=9IvN%*eLN8)UBYBecE4`W@Syip7SW=>q_ zE!bEhr4R;&N*f7?_dcSe5h z_k4o7T8I7TfkYE@(+W9_1>lAdf+W}5K$OJ^*~ja(WWwgr%<-wXifEHQ-p04)f>x2h zN?b1x(?xNyj8PLji0G@q$g!0_enEf_@i2tq4D<31<)-@SwYXOMx+kz?+keoghv`M$ zOc_EN9cJu$zM9Tg;s~_Eh|6gwhg#?+wqh~Nf}3K?7s*5?{;Zh$F6hJ;3X8Bj);mPb ztp|upX;1%T1J0A&YjVlp#S9}PSOAs^@?lFm(F7|A>rM6$y+-M=e39(8>(j42(?Fam z-ZRG~+%OriS27=_c3r*T(g&?KE{~?&fWyk?#6!nYd+;$RY1wXD2i&4?-HxYYG2Z4j z@j;v9pt8+UlnW2K2OZ>SF}7n7XA9mns1nC4y6r>TWNdm7901KQ;DxuA))tBZrnZ7Z z%PHpEazV=Z4EIAC5b&zS*26wz%2pACsyXCQDqK|Sjk(7{V^LP@tWx8JwW6hl{$*}0 z_!Q|%mTLCz*^U&EV2_coNGRqr1}4clR|&(GY6;he49YzH31dI=iX^StM80Y*k?Te6 z%7v+wZJTzAC7nsd(m>-$_&YXj_)v$XW%#3M@UZ{gjz%(=QVr^k1jYkrWJIuru^K`< zqtG(1=R7IZZbvY10e>(m@H`mhpyGqcV!(r=sOF$l<8dN+aGE`ML49~|R$Etxev$?) zqaRpX;O@$qhEE*9!u2$xKvfSTE>I&-`RyAyb&151-_IWJkdKM)J{InD$i>#Zv~?e} zJMAnU=Pq*1>yLJaYZWCd9BJiA9~gQs5EyK3SZ#SrqTI z)0TWgY={KQby{xbr8x%MwBTQ4iCn=jE@e_25d zOw$G*?a6LRSOO2ES@r$CDe1StY6>{PqoWDk7^`$aFR_k$altD^3a!_L#}cWhMUUOZ z=e3ScQ@a#*I~QJ}!Kp~G0b|RNI!Sn=g0XCgYl?7TUoVv^IG;E*?KD)k+$Jw%b!Y2T zNDgq=e&E+F3gZ#4Noq1LEEPzsRxH5xTPcgJZZWSz(AG-cXVoxsRy9blxc8U?b)r!M zIE(x)_{==j#C7fEcpRzV{D!l#JYqHdc#8Hc8I|oWc@6b^KAIdrxt`x+Qd`p;a1u-2PE83kRCokx; zC!ahFfe0BsfekvpFHS9gF9Bh;^EWq?9<}TjPX0A=SRR&4uD|y5%Ski5vlNk=t~*mt zqDMC5{1-47c}TX9FzQyI6#9hmdzu)sA=O03)ysM}gQll!Y6jK_PExC>OM?!aR1AjN zljAP;ACCH`5N4-Wyjr_hli6__8i=UcoOf6w1_4%(;d?DD%jA{+$gUJG!zu|kgJb^tSnSC~`R6htar`cTblSt|%|hNZ9MsLiJ6r6CfCnxHs@gFT!4?jjBaw*Acp$ifriVIOIDWZ8FjUi= zY&N2RI&ZabxNd!Zh-=Pc9=4-r)#9TH3l!$XwVoa?rLX;SR27ULC8j-JN0%};r5qr_ zr*-IsPdx|#@*%hidyC!L7Y-~Zk>a{|0vy*9c0PGhRt^S*#fB$NT*tSrrtXuF1pa9{ zn571PIT)sb63BB7K3n;$X8#3ZC@i_YruAV-4vFC7 z%B`%tbna)2*3NjMrIqIK#QqM#gf@fmO6`$!zd!{F|XA;g>p^v@Py743q z-fTOTJsvdACo30yxbNyYd=CO-qYO|PAq}$P=m_DU`f<;8b6ttTp3`;%bupQawGd^M zo&_H7X1H2ZV&Dqq}>mF8Q<;R+MD${_^ zXv!=$p0|^d(R!w30ew`Te@Ak-3(Oz zzy=*?OgK@cw#lhJoj9kghLb* z;6?!jrQu};hMMS~Kj2QW@VSVrAZ3%;?|~*^8LWehLO6T7a<=sV`s|$p>w{p4Wc}k; zkwpP1Iq1rcJn2SbHd6(#qPVE)XD1t+eGvJ=(@YV^maP26O83Qp?P}-og?ryCPQcI6 z8J1z19Ea$GLrRp?_{4TWX-Cs<+-`dh>-CAouKNcCw5YxK+~#!QQ15sE|DZRy;k=iJi2Vi_(~C5F`HKNv=SJNvxUy_%3RPF zH5|}Ktaqln%lESPRKz!>c)~qBwjkxD1qTrUKGPz8 z#oX#Fc3W0!%2fmSmfn|<@KT_JjRKD`uQ0!w>XZ`}w|jaw^A;J3QZI@Zogc>z&8Z}4 zVKLi+t}9#PvKGN)-z|Q`&{t?66$(dh5l4s5T!uL1EMwoY9&Efib+F%3^tCZ}BC*Fg zDmj7L1SzYOTULE&Hk26VshYt4C0|Y_I z>6ky~`UA^`zs)9^;OOR%M8S0m&eMFm*^HUAm}xXO-xO*rgH6;sJrVB}v@uT{_VdPb z8=YhmkGyVLr1ZLoHk?4Ot;NLakT4(|b-0h2QJ>kkYg;CbnY(j|;BMQ=H$3qKxZ$Wp z0GR+6GPspLpS1CG&>84gw7Gt-ZpQ8GXqg*bn6E9{OXGl8G*?|w^75;ToWOLKSB)%{ zT58AT15($OnI>#)`Lr;N@H=;c#xWwVohUz;9V=(PFj-@$#R}^wo_Wg;%$k8r!V7Cm z&%2D!GpT3fiPbMgcL>YvgVFjoNSfqUUMrC@c3O=>Da&6D8Ql+oJTF9W1^HotiVBHE zNFDp;Y8EOFl|j90C)Sz!SsjCr@;tL#2VX&V!C)dMJy{DI5A=E8*tTJm@`ma!;7(Z? zD*o2;7l13&X{CiydK5xpFOcd5A~=V=Uceltg&y<*=I+&AAhkh8aB~3*>gO*oQ^yPF z^c_r6J+3sK_yVyMfl%NJ;Ks&DH$S-3Q7biKGKh+vf+B`SJ82K6;<6?ae62R{AmORh zQWwe+mkmQJsj3QrFhvztQj!{#@CwOS{{r(+UMwQ!A;dVK?DFu&;^nm}+w>($Zd?7v z6H~!YD*yApDz9I{^J<3_wy5w@Harlh+%Zu#p9+`rY|a;%b2PjaA`qM?Jleqhd=Y^U z!qXj3IUT4*_mhj;--!R*YLkiE=Rr`H!|vdK`fsgo4Px4|XRe!u%Bv(LtZkuW+bhHi z7NNkc1(OS(<9@co$5vs^K@JxX{@0lDbF19x_4dtQSl~m#tJ|}RV-GE!<#)Q{EG-5f z(WdzM2=l0uR|I}p2KX~ycX&6cx39CJo#q(au2`bpPFxh5L4n%hhCRzl6q99k2cUez zf%FH@0`D-ZQ(qpIXnl=zR(>%WGZW*s{$jq1Gf23PxPta?!*OIqMRcHh;bj5~j*HQa zF)*|&+Q@==jdr33jz*DxM((qhrx!3JFF7^azGJdn5E$(0GyP4~Vovar1+pMjmNmgx zvihkKhOM}vmI=3ot1n0F1-!b%O5Js>;u?8uBC1_2WTqkw` zPRnV^*^)aq7L60Dt5R8p>bw;zAw#Nzh{Z&2?4t-wj8~4l-%s&$2;s>_Ms@fPb>obL zMhmC^-=Q}qFLv`%$E$fD{0{wouYQc#_Z`ObD@?NQ(2WyE_(A&P-w{0IJM?lzRN3#* zPhlPWku?rVp+*sp$-UYw?q&I-^r1U)@Z`a)9}&{k5mdx^Xdq`==E2f~5j8P_D5l zSJ0alo`o&p_B>?F6YB`bkcLRMw1S+2jkO6N8gL~g>|-RRH|;Iu6~~EYOTkPa94N-Z zrUu{&Xz=Kyqm>lDY_JrFiWGruH5S)dMmNEQn71=EB6sg7YS%D@(4#9oit!GumTQFK zp2F4Vp}Gs;HYV66%R$dWLhL;P>)n{n{=X0U27R_N3u1L(KF zCt=hhxj^T=Otr+j2-Bfk)O<5awcFLZicf;#La!AD=3l+k)IMFg-$Wf~9tTf+rIBU5 zI`i0H6FY=^-i48=Rq;hKaBwOk>+*sE>>?dKwrz+}Yf~3qY=PcgZov>n;K~DrMA~ss z?hUmazWD%Qv4!X{c7NG!**<@KorrK|&ghKCZ?kUt!tiYT(^<~dokW>6(IyQ_CPCXU zsoT2Svsc;%##6T6_rnJGtQFzqyXXvgDpyk2z30sJ!hS6*!?@aoL!f{)Fg+V7AQe*v| zS*8yrjYqUZD#{i9zmT$5km4lG_M}T4*-?vM(YlZtxNmI4oPr=JlM+})5#>m_Yc6O8 zbUq}|3!b(L1@IT(9Xl1b+|^UzSKR%%k^ zs37w0RF1NzKX4rot`WYMhdkjsvb^OT5y6m|z66<})TuBTn+s?PwORz)Vvhlus9_c` zvaxw?+&-m_g4G;8kIj)m0X!Jc6`z{v-HVUP+RS+;MJN*~giUM=t7x`EBd?S2X?|Ne zbrN?$z0VpQh0)FL<*^Yw+A!TS@Hs7m4~O|}3%+8EH#0`NTN>XkCDmNzBlP2NasB?Ja=5wmdh>N^?>>Yppac--vt<|};;=vuDw1&CSG%#5CMn__~h}BX( zH%Bfo#4Je2&p4IA)aoDI%eqN$xu&F4E~Xu=;1fzge+MNPW#JNg3_@tj4C^*kcjsmR zlHX`a-8APIMQunV1hH?hZ|Z4rrN*zZ%RI1#(fWrw4A1Y{{t|JEjEk|+#EM5r6b=_y zrMY~w=!qi9cQ;ELwlrG>j9Ggn90v=E;{j2qcAP_)L$0J2I-IMMQ;NweCyYX}A*dO^ zLRRvODt66d)6l{dHv{|ghSz|LOYydspY-q8SkeiF=R=LKMnlQ8`Zc9l1GwF47Z2Vb z{-tE@D@ftX!eA&AgWu{LxKmTipv})jz^I;sEO1Ql$%x6t&lzDlOUySOXy^L~l{Mt^ zfTW810>2Svx4S%r&K57M>N>ZRv1NK)=@nSal8xT)R!`C}Nb_j0-1G(S zL*f1a{YGBYgEF(1eMUd{n}I7J86-+9h!+thh`}avjb*-Kn=gm9;WQ0Q>}A`TP=t*t z;oxY=XBl!JGFE<;OcLr!bjyuYPkdzr8s?KGhOtv)$!Ck$pQbz2Q-WRERl^{bY-*Ya z7(x|d7c4j8!VVFq0{dV@z1q?Gyg{F8xsTM5<_D%ca?@b^(JX6tf}L+T|bjc0b8n%w$Hx?UDl{!zB~P?{`VM=dgD3BXC)$Tmw{=0} z%l(N)kG}Eh;@JYVz-mV%Ogrer=Z~Cec6@yai9?(=Xr28G69aYTOqM02Um~EaVx-B# zCU{!?=De0y%B?1aK`n#2$yXHl`k^@MKjdq|v;<#Ng_etR8A}c>|0>>$lnX@)_hdFC zr=qfZY$xx_;e4<$!^8O$bup1&E+AVB1s)4=J^F#5PN&@wUgrmvsn zf`T~;97m${DpNH~nHV*ALrsq5uiV_*gaWYZ=qfc=i>{5ElPf{T$&3YfAB{=Is1V>V zym)tXq+%LFF1)#MXE#v|Q>7J*p?LE=x10b_y$S$|r=8_bf&%jajLGSVxyRJ>3jHwd zFXtc#Dn9bn`4}t#4P27rHb!!huoN!|IB>O=S@+D+n|Z}TI?!ypKbYbZ3}L7UI#(FM zjuN8S|9(RN0{OtTl4D2i_uA!RcY!-wiVTPkCwB*CN=Aq7O<<<*)bxfU1G;XtZhS_I zpkT6~cF;DCZmV?gSPp}bCl%zk7VLoAd!R^g zl1S3W&&u>X>m?BL1y46KjD^47H2Lhrc7jYg{gH^xEc8Ds_%Z!| zW1S;jgW*WWd2-zuqb__&5b{njugzt7c&Mgb7r!c-U?%Tbq?H+O+hdrpTd{rc+OSrv zM|~!JobJSeLt4j{_biNqtfSP>OYg{QKQJ7K=6X2g6QnxGbiwVmJ55^J|Lxe z5VM%s=Yk+@A^JFi6mc+-BIPWpCX<#}H~ih6jLGx=3I>Pf;njY-xZN3)e)j^D1XQMHYkqXnEe{Dt5BAVQBLMwPo7f?b72rmtE_t)v{_fp_^jXeIYBM36)&*|fh^`^#5$ zv8VVY23cfHS(;`N9c19asp`*IkKsa`{w_F!)0TA+T9BcV+=hv+Sy-7XS4DK2tDF-b)S?JzsTy?;qB$+ih&6Be9DW=Ya4@!lOc17vrMUnzEXgtJ92@0ST-VTtKm^je(r zpUs3FUx5#dxLZkPs%)Jms(k(iU1gNcFVcNw<1;=Xjx$+yP_?oMC#K=Y?q~ z-{mADdLcF#-I$Z%g5^Bq;p*lUHJ)By%hOhlZ}U=}T`6* zYSRwknjc)=VRoss&_G+;`gKhUr4IP7o9Bud?2XU)XEtaT0W`pG3vNYUy{ch9Sz4718H*eNG zwYs^!wZUr)Z!xLaBn7yB@;*Z@bZH=6oOKu^HeMu}(|4~&cl~vx?)%7Y*4}J3EobFx z2D-KS&J&or>me+B;QQbYkceKwi%LI;>`qStjVvqT1hZ_nKlzH zRVTsBzvV8X!S{UL)|Js;I7}20AbTLr3A0S)equkiz09i3Tqq&FgIxP|+B*U^X#mga zt=;w~Jip(*;wM}+jY2wAnuDp=k^dyplji8EJ8mX}Zu1hipGj+!5C2`3USG@8?4cYa zPqT;e;x}1(eJxL~Zf<1dn;UtWXwO+WPkLR*yirC`*qZ%e6E~Fz|1c?IxZrjC=ujBr z{RP6h*Z6jOaei^vFPElSDaxLre!37>feP}ZqSnD%K*3No6Qb;LH*LiR5`%;y6>B&fRpM#w8fohR0Z_(O>~h3fWe zQpJ{Q-Lz@eY6`VgC__hTvn=m$7;yD`bJj z!nLfBm1>XE->nt?hViav<0a(}TFp{x>q;|7{oS&^Ygx;f+G>!B*Q7y{2U?|m+GI^u zvxkuiZ?%~vH}d3~k~~*`xAfPr0_LdSc=tL!jgOzDwfUL>`>fOtD|ro5UVu_M+-#PJ zj0E~54k2ix{j!Q;D*mo)h1A=uBu{OIGQMV38&mIAHnOsqVnVaBn0mLemX*a66OffD zRr#-q5Jg^~VHhK-19zT6cvI7{YsKjLg=!K%s)cbnVsp>@_|p374I9bJ%D?ab6Y_M~ zNU-w;9p+$4eo*VHZX>p}eodD|x-tG3Igj!bKh^+-Yu17@r;+@B$uIx@&%gN7{?A`5 z;eY?^KfI6E{VS0Zjm?emrfK~I4W7y`#lzBJ*4LhZ**~sV&dR+=>?D=0!pTc#0^Hx< zUj|f`n$Lh-qDyhR;haK@YaHqR8Th?CwTO95#2#$eRnE(9~;5K zS6J=^77t)S`(OUt`d376gLjbJ6~K+ehDIjAsewEB%Xw$oWU2j&+e*#@!~`)WZz_e| zOjpI!h)iod>R5-U6lRV(sr>SKW##q1UfS;}zo7HKtgfuA((|AHmHhLcakFImk6NcxmX4&&*A|Zv4vG zhuzbIPhBQ2MZ#hs!OjkkiU2#Gj~~=7S{T>I~1i zox8_ijj^&flr=ZftH&TiuZQnF7Rlr>*yGMv53WW9&PRbI(;^o14tU+Iy`eFZ#CCqP znU6=05skAiCFO%A!G9hb*ZfEEE#Xf+)~@_4`6hb|`a=gNOc=^|t7D|@aCV0`msgK9 zI_ylY)PP0x7}!bk>nV;>o`4D+oj^!D)>-aq?Fgb@tn=t;?Xgxfe7@sc%kQyPc%?S> z-20p@2RNnx(|gSI`|ZmXY#5CHeV@n0EoSW5@n_pt{Tqy$aIW{+;6CAE>lKQw*|y&~ zKCb*A+Ni>az>^8|cnqarD$A1}xxRw_lx~Ne<4X15b9~6dqa_X&;UhZFUy(p9hCOlf z_8s#?EoYBa>Bc|Wq-y$?d&mT)tDCq4IBec83%|MXJoxpsCEz(K2B?(nnqSjLM{<0H z;x|WVo%iI9c#p&4&KOCP2&&HH(Wd1+l<(I^8+kzY)8R*KU6FJQZaKfjy?wNiKk3tO zfMT-bG>-zuPwkD4yZ4=*1ZpP6e@4T?j{9BB%~#@bE%)VUlA{$P4$;B3-13*TiRE~B zbnIbb!spX^{e2yr!fWpR{$9c=`aZq<;bGWS_dU3z)Nz2`?n@;=D28F|NbAzL1S>W}x0Q|ZB@#HirIj^soLCS}t z)8sRrFq6H5)8v#(Cb-24<{$Ej*ulsX_aN?z2Z^M@Nx71z$&V&~seGw@d|5fD*7c{R zKc`7bZ}LRx$|~->QBFTyLa+PHO<~L~K=Gwer2^43)bpsGfN56v;UIR`6-dcVr8IE3 z(*xnmsUn$HKJ9;L9PI2JR6jfdkH8*J&NI-}>T#`}tnjq66PtwN(-Uyz3`eb>h-Yb@ zJAYag@``}U4qa?br3}sxit|vp8Nt5f#KDUpP0@QynCqtXe!6s6uv^EmuTrU zpevLCet8VASq5FharwedXJA*K0sD68ZV#3R{jfafm$hfW;<$auIXGAz^uzL?U)G)h z`)29E9xV@gusrDg@}M7<2mP}44A|9Yz~Z`U$!~kKJm|slp!>^%epnv#%i1$wSDyj9 zzWj784|=dX=>GDcAC?FGvi1zv)n~vG?qS*0wLIv-@}T?6gML^Z^vl{aU{{|5yZm&m zE)9CHJm~)NpdXe8{j&B9*wy7>kMP8|H)L8Df! zecWyA?4Kkm`*3jd{z2iv{(A#h$TEae^!J+%j_5v~EM{IvHTzO77!J)`5yaD6=)g!r z5AV^IaL}#cdx<0;2YvJ*TkkOpHP!D&ANObL5Vr`gMRS~UnLueKiFA4`BJ7!M5q9Tifl#9LL*#UY$2ki zc!DX8Ex`6PxH ze5%`cdR1>*heKlyqwI_~CsyMVgV*dh+wdUpm7i!(o%El8i2J~p`(bfD)r3(H$KeSm z-?~A7^#tqz%1AsL?Xv-nG|&M=h|zhn&2S_Ok|)68WQWLqPe9u&Xm#jRkev1@_~SkB zA|#wwO902A%=_*f3e6{;^+@v?;=2B_?R^2+KG;86@|nga{T)pX;l(wVYZuPbxE?2K zKD2|K_XkOO5cz%B5TV})-afg`?l9~ixc4f@r@QsjSKNJv^}iv18{_C3?;II}^vkDe z?QhBd_~n0b+u&r3kE{!pZD;rF*?zJoza5wes%EENyqbyA)7OjOq26P%n%|51e!REN zx)ZzzVb|nGy_EIqXnSN5JZY0W3ph#llg|f7pFZ%{es+)c61bi_`+Ivyx|h`V|FIh% zzTk@GEnLqa1FTB%2_>a!7-(+#Z)SMr@qBN$dfM1O-8KE3%2{_j<;A++oSRy$hw@-Y z61+!iTC%Fc(>%RXM`{_{tncLMlg&!)q?YG*&#HOm5a&c$E4oNe)@Sl-mf!h&c=*5T zrzf9lr=O~ayNTsLf7(C#WZ8u*>29$93{2*laiklyqr>C<1LkTGC%bzg#s53VkI0^$ zX_;Oe@9(;1v=SZ)MUia=j*tn#5@xo8wpcyY_^a6K=WM05=$KM`^V6f|} zR6p&Q;)w^!AT|OgBZf+>RQh{S`BUXj4i9>A*QE#G|bHV0<a-dGGi8tCG~+^8jRb zy&Fc|EtRBFsh(7lUToEt^{L7g?M|-Fi`vfXEsI}xxvh1f#Nqq|vhvt4U>*?hV7?(Vjg2>GPWUc4?<=u&Vc*T|Of=l9k`&`3W0Kh(2Sh z8yVQ5Lp=M)QpI5+s4YFcO6OlHBijXzFGh5+f2_yy&Ld^5bmFL&viM$-=e7TSUiG}2mbqt|NcWc*mN@JvVZd%3v(@<7O?c>ueEPN^znuT z7T3VP*7`cP8*E-+8^eU1kjHuoAM)^l-K>1j)6GWh^5b};c6>{`QOegTk~zt6Mo1@~ zThhT0VJiiPe1;I7lx6y86G$gz!7e}FP*}45^iFAF*m%V=Hy06FA7AC9?3EJnNUu3c z3cP{a=DSzA!n!BYptTxsf>y36}qq#yzdZXEFwwi3HX|6Zh&5hPdtKMp~nyprAwYAn-Z?#(+ zt1GMZ)y8UbwY9psy0*H$+FspQTUo2GHP)JIt+my)wKcZ(aKF;ZdVRgI-du03udc7H zudlb)H`*)hdb`nPn@)SRz1CiDx7!;VG;xFKHvqjsu?=3;uDPITLow-X@XoHc2ETrU zfI`rDU{BA@8b6%sdV&*%+xiZ4={i#jV>ROWa_f`h90WL5sJIl+KmD=D#!~q+`f_%EeB+lx8blN^_ZQ=^@uc zf>TN-*U>7`eBRB&H4Ed3y<{ZmODf|0fQ&8&_5mA z4Exs;?ssKsV5{Dd8*`>D)cj8 z*jjF0i%IQIH8<-1+n+hlz>g{8l}%HA;xuiua}xS<39ruBKET@JUvADGXG_E(4o6JE z;0$%w;uG$mhoMaE)L(ux7_ffQVUxHHN$hN_k|oYrrL?c76#>*GM#cj`!p0D=v~x zI>0lvwR5{(G<)5*g{_N2ttZwLQ@P%;U-@$Ve#VrtEM`pWVoVWsXB1O8sfgmUl*~!O zD*MCqrgM^#Imu=}J$8;$GAG%+8QXrwSOFX%vb6XL&JZ=H^UoGL zyKSU2?4Z;e(%v1Epp*q7PHCt%*4OEh3~WEW%+pIt6$Bjtw^hh6CKWCuTPf9Sq-1-A zTJ^^+x2*M)j3r4fr#5wN*J96dk`nQ2PNO`S%F}FIuZdiFzTuBffzbvsOK6vvQ!LMu z+tFD9)+v%X$q&Vcla$PhWaqeeOA@-})(2R5}N{_ z{1v|BJPR41?Be9^rV57^NXXa%0G;6Ugfngz=Zq=8_50k4+|9YY4Td2MfN2SD`}6M^ z5zm9{tFJVLZ3*v28e!ycN;C0F!%aHUHp$O*(0j|DG+~Q0GjOeT&42ePn-*#P=K6{N zdU79KP4vUfo*Yd5!HyH=u~27u2yVM%Rav~-;Ds$k z(dF~$M~?PUVqrd8S|NibEl$F4N!6gYP>z=S9!s0tNz~2QY=Jr`4LA0cYL5c zA+sroLc?h#TCQTa+ETtglqPsq-w$S z!z&oPzp{j>Ohi6w%Az%)7kBu)Q1jdWHBBzKoVPS>zG}p`2k1uRGW#{Qgv$}q3{J_GrG!VNmSc|9VqT|Ye!H4D zLFqDn4=fXJ-^sIjP9N&QdZjlAU4kmL#V`PfpXD&PhtC0asc5H2<|L!|jhN|li<~4Kl_*lN-AdlnJF5k;s#X-O4gHF_w_~Z!C?#_xN8JX# zA?3#BV~;ERNX(`Z-IPRhu1b3IS3<2~d#;uQHQ#}-20A2D3yL8LPt==eehs)ljkpnx z+e}UlIpCBf3htE5xnA78P9qLO>Y)J0t=7RY=I^;YO$SnOh?3st{yy@D*%nhM%KFt* z+bAU=i}bdel3?0##!vUE)kJ-6`kT7FF)LV~+=WHzX51{`q-5SDDb-hbH&!NX{ClC4 zCC$EJ-7K6~??yLgm>zR|Q{T=F)$Q=EX0R@Xjd{$j5(-GNuWHUC*~>#>;7!aYszJ=B zG-BS`%HPEBNH^q9Y_6@ZwpzF<8!M|DE!>K$8|%0o`CRAS>IPoUR%>Ow-CWsd@wD1* zthU>HkYBTf?{TfwY~#d)%+~Qk;@aGxi5P{A4b=ApYgI1E)m2=j&E`gPeZ5}asIRZD z)TwBr(OO?$Yv4F-H+a3ix)K|Jvc0|mu(gfWT2qE*dmUa|U%?wI&*?f%+NgujwPv#o za1Jcu?^?UviXCWe;H_N)C@#|Yt_@V2q-(fvfsBW@vEJI)pzZ1$Eo=gtCeW|;#tPoc zW@}{yk8PbF(6zE6*X{Zm&fmuR>RK~`ZMJA6|E<>P{u<6#I16LF^jc!Oh!V#QkGo_t<@&AuQ&LRinajMW_shEnlc~E!4Z_MQqc6Nt zlJ3VmAWhV=hN<7ioxR>bwgC;dHfV0v>kNM#Z#6@?y3Y3|jccsbK_p)DRdRc;A)=La zkiP+1*2tGW)R}J7yw1?9Fp!W{gR#)qDnzr^X0og{ zX>6m>TEz#C_kE2SsU-MVrxLQSuh!RTfDp-eF=wbzuKNfXgb8!tbYmS-ZsWLLsWZ^@ zm_cV;nk_1f&1f~~Hy~IeU?pOu2_!x<#LbX{NyJM!Mtj>dn6YLhVuSYc!NwQ*rZlpj z+1I9%pcm9IGnp}TZVggN(3(&`{bK?&Smda$fIN|th_~HfEnyYPW~f=|V5T#;u@y|p zm=u9hADKWih+bt(nfwh(!eZ7oKo|{zooGVS>;})POi@w)IwTBP_`qn@gkfU3LMI$) zDozx?iIzD@2h_rcKudE2a?voZXg;k$!%!$SL*oIOCL_FJT?;sYyTN#ZRs7S|5!t z4#8xCfwWdM1vPk*`Y=%XpwZlb++sl?oOL4DMUxS4vh2ioK@{s7OgXqKyiTl@b~C|Y z%p_Uqf+=jkOkr8@PGmv58R2Q-GC9{YV04vk(=!ka`-)kF#pVj~f=MLIvI=E=sDoO_ z6~?8dGI37Ep$)%=U%|dw3>y7HX21jC+hlAqXVyi9@VNj4PC--YIE!%1L;z~GG}d6- zObUj%4V_^+c$HdXV+9VNt0El`3A1C)K-)}f7V((*t&KHFK9g9mQhH7+#EB&XnN9qq zrcfOAZEJG03wjZE03V0~Xb3`KjAKUH25CWzS}9cP;BWk+JQTppWh|O)h(zKDxyGb{ zf}0Xv^iI4KY5*^5Q1?m#+R}0(WB~Qrs-uw;a~XV^7-1r4fe{t~nZhLW%t(u}TTPm? zwn~kL1|%bF55dgD6r*Jd!qmb?Xo&U62H2-ZutW(}EsBCaW(#veYz!Q?5U{Zfy&*T; z7*PzDq#(RehhRyZLSYaL?+^`R#$*wa;D3O(hYT1g#21uGV_=rZ0cNt)65&UT1R>qtAXkEb0w*b^kp_u~MzIPOv8938L}j#K za!5F;T3x3eddyq_BO*(&BorbkMW7#E2#e5)z1H8E%TP7ck$`E5Ud7>Aro1BKi~*7t z{zq5f*{~t{sR?6RpxOZA39X5)T^NW+b(P-02w*Hy0;m^8+2#Y|F3kjCgIt38Ay9N% z^e@O0l0fHXK*WIH8w?3mpc}x_*0t=;#An*W5$T2)RLB9pW;%NAI)a07LjWSPnM~q% zEh#~gda!I{lr%{+a>$N}lL|6~Vs&VbWMH=QkI9NU$^g+gNDI|!gO&gl76`6HA7YS< zTBzKj6tv8sKB1n4*EE>11{jm74tc`?8Ci%A&#*>BfeQ?O6Os_WV)DS|R>)3PBoEC- zslq%^)B{Q#3VDPKo7co;>Z6>jGV!FNuftI_j8a)p7hr7>Hww^zmoXG#&fwP6W7DB* zY^6a$m@3R7pcNp}Gy)>}yoqCjjmgIEfqrNobLJ!q01x;Ua&w2j#V^};N`0R|!J z7rdU~VNU89HGz&ZFU8&>ATbnFSttkv7p;qNlaOIPK!(COw1>)xj6*U=)8+}3p~LY3 zDv@lJmI(R4BEYLC70VEwsHhofFbNjaCsiI9PX93zXpm-^`YXc+fkTI+(=oN6?a%@y z1GVy>j8fAonZ?wH-T|#)qrmz?Rv`pR; zh?!7-MKusE-9lz#Oi&%_3K9_ZOViaU)HU%KO%16(STNF*K@dFLIwR#AE|2=nK!QZz zgfHlH5(Z?JB`>`L)Gf>7*LYZHNDH%?1U|Ga~GJRDBhbFaD zABjLo9tm;C8XhkR1w#wvk>`jR_`M{HSRYM-xREw!?#wxm=@n4~GJq_ph4!E&K>!=b zH*pF6(O(#fj4W&;mSB<5Cm*gU4ygGfu2 zKoR0dbmK^f6bRdBNLCvo1V5J!D&t(U4!asel1<*A2#QH(vKjh;sF{5r5&438Vpau8 zF{3Z#hxsM>?Z}lVMD8m>j&@5kml}>JL-0eu8UR=hV9A25NMZ#AvMaFXnHcek?H`+ zF^OBLB(y}8P?2H+sSYy(Sq*{E5zGlD100k7hKfv_tU^9WL5Ucd3f4Uao3Vk?!8z7B z;sGr|JOW*p`366vQK%R+Pr(UP9|mqf}Vib2dgk*$vc^ROjXb)^kNgkjt~;a zDey0}Kw1bPF-m&tLm>{)q7r5_Q!Kp2NT5|fMYclnQo5w~A;dusDihu1nG2C zt6pOzNQDsb!CUlC0#kfZ>JCYGFnM4(`jPj7k1#`YDF_*|6(0zI5&AS8^94C=0v`m* zivrvAwnJwOoZNWe0ZNAw5VPGT?4(1i8f?Y5aaNGz@R|BZI zSP-T(fjH5cW)wV$TH%RiDQYel@ApQ)6Jmp9M?)#7BIHFQfxyM&nKO_LL}F^PRD9Sr z?3=m4WCEq4cX?c3P3akAAy$~yNI1EE=#mDJ!lD<5-isj;r8`N_l!=5@f?|Sp!y8;2 z=o*seP^)+jZU#n0(h?Fes$yn=a@2FVs3J&lY)nhd97q6W!_35Z;7QgTiWQ0)GY&e3 z>aFecS%U3^oBg!pyWR-YElDO`{1-V&i;5V#x#}L2zvOd`=L#*hwRdlZvYb;*oBo%AY zBJgLev)1Z(D(4_oQgM0YIwqS9l9O=$`tik;Kj;8I&7bo5yUsV9?`2C zZvL2rM6N_=9I3cM7~kdTol=pO&QdZjlAU4kmZYr}rZ=6Fl*~z5uwt{{;vD@E8(h-2 z4#}{5h(1$#SS!+Nts>oQkPfZNi`QIPlHfEOq{Hn3R6Oe1@K`8WD%#OYU$x{Y_>^uo zNJr@Q`jo@}JK%g#xNJ*TnDqKL9QdwP$yTwVB%q?*cI%$_6!U*Y;)M6vf+U0HaX;7V zF_|klvlK~JZ?yhMF2CVr!a=|{JaRYH2Vz()tt}rM*3kE-cuPYmTW;y_!5;!Ru5RiH z+%^BrjYi=-ycCI|2vNnOkauAm2mvN{eYUs~GI&532Of`(*j}G?2{q-oVlDr%pJC6U z#fTy$qeq^QxV#$k_vF&v@rPZ!3?7M}hWaa(L_8{~k8G*o=WNLB4oaMyUw4m&BoyaC zU`R;jA$fM8yA_DNRu0E;H$1{+Y>(9Drhm>)v`ckBMhhJYAbHj+pywf32@@&6+KpvA zhI`7Cy6o|nmhv|a+Zh)L#0Rd@;vufUS_cMtV|7`=8|>_`wliYmi8hK(vV(VIQ;Y{c zXI*qmu}m1{42ALP?g%Y1L|R>q$^1GcJCl^ml?)ucXmu>p={I9CS2D_$lw=*5;YLvm zFqk+>GOP%BPfg_g2rI%dTO&IX{A@^^gFbH0HO>$b~y3oe=WkrIJy= zTgn7{GYRXaagU}hz-dZ8H!U*3I^OILPOo^O0M=%|NEWI7Abl@V9H$-1C~-WwjE{he z$s*NH?^1%OumC^_(-fMnDa5w$It0Ns35ssU?BR8@K?3!G+=?11@NWp z`p`7>0h}?jbX!r>Bbsm(s&|c#RMZ^EX-ZN0n5U0Q*+(U=xH@voW|NR2_&qhfQzY{u z*%_t`IZ4wm(wojnO6DXdoE6g49iZ$DI`7!ShqnJlIp9wrOCGZLZ_MIPH;iusXuiRM zMAU+gg%ndHq^qO{49emG7jyA|=8|&Z#3&w6V-nb$WN~37uen;XDAi~e4^&hyI3(z} zK?=pIS#a%UZHvv2bO=qMpT&LO1X{g&vkx&}-L|2?u(9!q^t4LFD>AJ&Euc$3AxN5J z6iF#bkk~8$UP&Ws-oL9zSn-g~Styi`)vd-P5m}mr9sU*@7I~n$mSAnHOmG#>!s1n$ zw=v0-3`LNcsW2&zr%Gdl&`fEBW-Q(rrDU#TAv9yBSWJpdBK5hhCo@qKIAkQ?$}%#2 zLPq+hBqJ#<8OcvDPeTeBS*l&%-DJdH7?OB4lr-d!vi|umGN3lm9}@?n?4(>^R%)-*C-9s;?R*{`y_#CfrWJW=qakZy*M58<7%7 zMZT6^GQRPplG2CIuCHz^m3xYNb$J$x4k%-DIcuSzmVd zt?k;UL&FL6aO^cW5uV22$y5ZX3o%_Ji5=f_O3v~lPDlF~`V#9={-Y~~uHmW(PjXtsMv@4sNW)Z-@IJgo z74h$@os?`9$)+d2;}nHP?ULQK4K7i(QPSQUFg@UpDKUxGIu|LKla%+OprTGGos$k~ z39#M00cC|CXTG^25)$ww-#*01a#10K&$!=Sxfaq$`bbEjkEB*~ieygG_XgY?jfO>z zBAJtP8oWAfxR%nWeaC?>Mdl)zlaz=J!@Sly)dR{1-PRx2w4O)&3F?Q@Po3w{rTulO z7pH0GIMtGqvt{c zDbIwr71mf*20#y2dUgg~9pgeqU3AXAUC`J;5oE}Zd5OM2i@2zdpI(zcQFow)Y6qSe6=uT3WbduwAC56W0L*gLhQy0QPZS2(7uCb+*7%y;c&3-`QaUFk62R@GjGspQmGg-ocRuOuDLO8{ANu2SEFuaV-*hjzxGPk0(!V-8 zkB|9HdQ4s^a#GuXpWuBWWvBacRFw`y}HP-xUYC{ ziAN&pyD5psN1rc7!yanCT5o@~6+CSovVBc_(+Ein`A{jLUWEHL9<~|rzbRC4kqHQz z13#`5r^e*GnzM&V`KR`Z@T#DB9`5#~k-^p1M&h#Q-zkk!R%TYp3=3IlKvA(^-P@Xq zpkssBvW}fTp=0}}q+xYOOWrrOs*6>;BnbD(6~j>V+E;9Frms zZ1D|tm*C>_YWNq)G3pH>jMR*^5#=uDBS`Ktw@vQmC7GHRa#zlL`m;vRm=$Q$4bR z+j>+spyTiovW+j5G*g71{wH~UZt5kYq|O=2m&gaU8>8&3+r^&jQ{H;HO}6qUzqfSm zr`5%!T8lq57k_Fj{#0N5X{Gwfdz+VftMqkUTh0c3PKf$=Rd=Tji*1wZqfUeExzdIv z;eGD!bG_GS={*ZKe(xBnTGx*2t-QwC9vX;WjB89aS z-lc@xsxeKmu&W9K9#bCPCa^r_e{e6HO}NP`V68Wnx*l+rorh^6C{%t_+N z=;1>&&3n>Y=}dazUEm75%k?hr@yC&Z*D@yYQz{vqt4HYpKP3%1?eZd+BC!Z&@OZ%c z*c@d^0E!2kk0}olYmvnw4q_%miAi##k#N}N$BYLEKKLBTUFU5f{HR)6P?>=E7a6wX zUsR*akIvg@1X#+9fKp%TGpYk6fVN(DM;QWrIw;gk zpms>HoHc_PUd2(rZKgCL((2%EsK1|*tSFX-OZgZtHpE#Qh|?+HHBuUFD?ys1WL_j$ zIE)!sI3zjh@HY_!=>jP{_^8xFzl$1qQ&G@L4G5o-f(EWG&Nv)@!Ut2;_zCXNkb=ZZ zO;EP0^|c<6+ul?ECHmSJHVjKi(}W<}Nsl-+JrOrWxi(IxmcJl$Dm0tls07I(%fqeg zs6zf>O6w_do3AtvE9ae~zLc5twe(=RRPkI=t@32^-&T!0faD zB6@Oj*%0ULd2mC!i%-@rSJoJKsbq;ekeX4+>b1R{z3=naoQ8}Grps*$V=Sn%E(Rs( z33oFfxw+`>%I>{`xLFahW2xOY9Jy0fJ!6n`wuAgAnO%O(0a4#R&+4_6+Z9c=6_;`= ziPSDqGA9|uR!rri6_-35uh&0IX{1jo)+v&Ck?agphP+62j*GWNvePZzl3d_FTB#j! zk{Av{j*jK>?~i?sUEY0pXu%fm_O|zOoGqD7`2E<$t4^kOd!2WO2V2j9IO=TBdt=A? z70V)2HYT;%sx&Qm5XW9|1DRin^Vd!a;K5^XZV4w2X?OzZzzCHjCjsg+9Ew-P+~+}z z+<^0!Xvqlql91&H+0xVIBcBU6Jc!G$xKoFp=+s$}kGanW6X@$o)mQXa;(+1(K%^q4 zHSxz=4zphg(ug45+8#lZ{sN(-)O0I&RkVT!TCr-t`AeW`>J3qlsW(b1#6JxINuf2p zAx_%f5bbEZNx#z-^Qx$r2WO!zYXN^@RYYUfPR8sEN%gGMh-#0$Bc{%QaPv$)C~?T*N8jCMFR#);RvA#*t=igzW`!V&g1~X*mv8YV5X3-`GJNHBTIg zYJ(r}Kr%F+lu+j{)O=FKVDRv{9-B_WI#%G3Hx-#y>QDHTWEz7RWE!cDV?LXKfl_)r z=#F?aLAPQqb)MGydKH9TNFns#7>m!w&EDy3?Qk@%2CAE#!xqf9HGLUC#>wPY8MdI< zOBcS%5!9&VI&Y$t>43uQBZlTl=EqEi({-s{jJlrL`G>13f5}Ez!r5iGuG;FK7>Vc` z5}d1M@UZE;N;~SCGZ)XVsl-l2H@&6iL{V<*N*Z!pV+hR0{!7oNRrCB;A z-MvyMuKyAejmW=YT{Eex%_}{@lP%(HH1)uzKpLb(8va70LCOfge~A*;Bl~~!Ly&as z5{9!<>k}l+AN^&*Qc;iHL{^YhRocc1Es91+^`Hdj4G`$`YcRL(AZC6rduW*y2Y;vs?7cqQV*7@IZ*YCdCe!aC_T@?gpI;%*Xv#Y^M zJv>v>{|D2U!(9P(k;&0F@q5OO`s&ZM&V6N_Vb~iIsB=b5Ax`1!AKme-HF|ROkKS$Z z{{7MEuf-=lmA=LsW3oMQUXJUv^L|y)#8)*FoXwLHu30OT8$T&l`gk}&4Ua$Kbam-uJ7`rx<8Gf^vpJQz>@ z!P(*AO{q4|Ff>s`Ezb*IbpMXF>7RT)N zuCo8y+n)jzFX}#cx0U?>#xsb_R)lpIf+KZuF}NZq8Z1INHUVsGvUnPn_sDXD0cLo{ zeUX_49VGkG7}MA^;#ROfZJ4W!kvB50R#T*d*cE`llpI}~cFVguk&T(JcOUjWeFQuxiFs&GlU6q0dfCbGdk1*$% z>BBL{tj=SmokFr|a{^f0fHEMaKg<$ln_cBpAcON1pE23wRZK5M@lZP#KBd{oRLy%f z8!8#8(3Txt$%2%8G!z*KU(+Fs%ZaNeDwp0mc@}xAb2`69DoyNg2)1)%h_%;;_fw31 zgsCzQNSE;l*EGf}8)liYs0Z2}!cRH%8E&uZ1vPg*a;eJM-zlHD^st79I8;Qq{ORhox`UUm4c=ql}^W46n z>(9lJlqGbMTu*<^`19KzU)8?rPPpypY4#lv&ab%LZ|D4K>9^ZIo;+FpsoU=P*+zX@rH7XVQfdpqi$pPaEq7{6xzbxxM0-@>;ibg-)fqif&w z(Z?bNR&Ia$`J0t*+q_@;ioDDG7m`nEj}LFoyO$SN+}-ecbN|iT>@DiLj>PsNqXn<4>1Y7x)738+g$pioO_LfpJ~HPzd_@`ENd)kpG>nV`blORrHxfB~(Kw zK}B4VMNMYhfMn}5JZ-vNHy<=8q0~!!3i%E(`^00Sm|kVu$t2q4K8f?5b;U|3D;nb6 zS#MDjh=F1UE_J-ke~`x|RJ9#%SMtuhytopU2B&2Ff!mAoeJRp@@nO(4bG!Ldxo#|5 z0CZpOS3o?3Mp%u#1LjL|y}Y>Qp5nW5{W9p1Z?g#hel^p!A%5NRa=W_MJ>QjvbLv}B*Oqr(8+ z91XCE0U4?7Rh@eyUC*4T#zD+*2wOtieleZF&~$!v28`Fln7rj&`U_p@r^|EY&M~9` z>PLFEyX^*H_xOLVxsF%U9SIK~=zZWgdYXxLymWQST;>1ReQXQDWDhrEYAy;jXPHn5 z9|<8OYUaD52ZK@iFIsvdO4z0y2Et8Kc)s|8m8qIz6jdl`FbdO zWVE4)AIu(ni?77<^h6?C;qE!<*wraPpd3}Z(s*V?*!qsO>4ec6UEcOiIrjXh=sVV$ zD>G|BetEieXKvofr*pB|klWZSRv4qsdv4S{kHb4v z7!G|)u`OB4cw^)n7+qgEsZVFUIV$l>qD_ZAZAq?j&8afpbt@NI_ONol^i)-8s8cZ^ ztzgpgs}s)&)kUWRlf>OtOy{|Upqt~-#Z6ICFXE;ZKIksZLSivQzx2eKN=>|nXO{Vl zQGAcvQOo7;;ifa?hrOc`^^VjS%5p-Yh?~> zYj}xwU?yYChqv!!ZuNqYgJ+tJE^(~no5b23e(cU>7kWNVq30JiNY23ha4s48dgt(M z>vH9R-nTZnp1gdoGpF@zEr#3RAer&_tC z1{NH9>gy(7!+1`8`DHk=&h2$Svi?4cvvi!kbpZszRgG*PplE5t~oW ztv3hT`MYd$_s6W<=AaI-;&y}cg?OExO9)%u9~cs3D-oU1?Xv7G0ZgP=pH;Gz6%DtLHv zc{Gv&zK?XyrH~|Yk&#Finy$-Dw2C7INlQ@K(&K~2=DZ3SrnHe#zP(C5t>#A6ser}I zkR!|oi;l3*a8XBIU5s%cA7|)Ywj>~pb@$-e;Z}ASeE4cH!b~?yqVf$g7JtRqQU2(K zZo)mnAk(&qIhxU-2ecEeqigX@A(J_&D^UnU&$jk|6v{rcM?ti|b$Fo4q8}6o{JY

    7ub4N~U5p&*EMeeeW7rBHL7_AfdKxa!w zII+1K*3@RbzcCZ=No`Xu>{3Z)=tcCDpW~@EHzWpadB(_gmR_t2=f-m3N`vYvwqCb5 zGie9d-GLvY`%u(Km7clE3o3p94L?98V|GFCWfD+GY8> zxqX;zpZDYtK=7&4AB|wkZ@X@gu|4K)%K#k9fCfZQ zab-=xZSDEx4oCeDDk_bdl+zfp#XEqx)cxQUG8?kM|^mCewMn+AysP7vm{i%j;rQyBaSTR!!VP%aeet%O^Mx-6mI(TRFq-;m>7ohdWrkmG7I?44Z7(!p%R80_rl!Z_*Kf(M#X;K_b+fQpVD zR&aR5K?*LATAe6U12h;Rp|?%cW4v&Oky+`-$m0{G)9?w|)36!S0Yn&JQD zI)=T$a3W>xkel)lgy#NHYlpUSIOK~ZmWRm&eA8ZT=9`(h4H3?p*T`sk8T>l^=xqtZ zu&?g`G|mOIz_WHnBVu`FO}wxO4MZmbMb{{P%H;m`dl1!-521BtMsA*+T(H7?I&ve- zzRO`|Jb(5hmAW+(Y7M-nY|VJnkora2_R(l_mZ!(&bdgY(>X&`#(l3d*$?0%F0J5#y zo1|+z>wW5-^`B&{G+nT}TpFR4Dy3j7)x*cy(hb4sl8BH9IKv~!8e<>Y@pnR0|LkMY znr(|Yu@?Nq4a8{}@cv3$l!S_I_JDGnzHmQsE{!HQ^@uT&PFIw4ClQ5{E11xbC5z{}p6+w)Xaa&VJfFAY9?IBm{iULJO9{C+L?(y?MR+ ze-my3i5f$qK(G|H4tHK2lww^_*&?&p+S+;fb1J^6d)JrOA@U!#4u0;i%N+-qV#m-3 z!s@j1m>;%^La#RWc3!(&+1%Uwxvas(S}Y6Sii!^QZZ?)zsp#$Qiyb1gH+OfeLr-6QUz9HJb4G1U^BhXS+bT1GE42~ZrRqcZpAQqBUL7C^ z-!~h-ueE-C|I6<`|60yY0E+^B#}*9OP0>jnITk09;wj`Ubr%K88^W|%WLHo0Ypep9 zO+F7}Q2}i_S1?R)y;C}N?SReXZ;SSM_u60=7TrS^5dn~7?%Lhoc<^W4&;i!ooBaY1 zW3<`@lQ9-ia8P%#8S>=_;#V$^Q)&2^N+_>se)IZqjvmLR!OGqDp#XktCv^-**iOVo1!%OO&1 zh6r}w9K_;aeDA7cWa9pVuR8bdfLc-+?iBVYNgpe7HmCo`DZ`r&`4qOMKCw%IV;O@*`O43Q>VihZ+BAmg1uE>bg_4lbkRA>;Uw>aT#|PYR z^`5Q!;6;%$Oq&A&C(*DMlz34tQ8no2kUANp%q85j4l_c=9$FMq^f}uCom^59zxag@ zx8{8tfi<}MDDtsMdi)d$jzZY&S%MigGF|pu5=DZhwxnGo5exf>lcu}5R zA>Y>S0UuIT-YwZEyD!&TFPnyJ*3GwptLmK+Hts3BXb+`@*sqYGKAa~FPC7qHM4mMrK*ReTF78MnH;VZpXth+mDo7%4!5mnI8bVYA3a-QOJ{rSNv*zS zc<^^zY@v5Cr0|E$x-q-MA2!)_JEbG=vL3r@-=EZ8T{AYO5QPy%H%ic67%iYIO_RSI z5-HbxqT^LtkyT}sg}J^53F=z`X>YrMzIBfJkh~qS3FYzuM@XX%#VZhSU;=>|%f}g5 zsXuU$l%50lMSYF`r)FtFwepWFP{~g;CqHET^3B^;Dqu8HEfu{>b4tg#SOq~$$X%pH z@JCUdR^p`7ERfwh^|c-J=y_FxLI3(-U})d`e-SYMnt&+?ZXgQVtQOG|+AudaHGlOFX!#La~ldv&|cJ!GtGAn|%AdeteZ!L=;U2n};`z1hs#A zH7;#PVeCa4^A+0!DiXs!2WVXuu(UxtEHa?nmiM5c{n(xa(}0YSwhrGlIz`c-7``SS zVt9Iqr!eJ1&99?efBmk2p^4qNpB)qh5<4hLJ&YZcK$JA5gHk7V3Zt_?mi#JniW2$d zOU^GoU3OVrJnRnocEd@Sa)&RX_IpUO0APw39)WC9@7E7(?3MZ{dnf0+F86Q%WGM=j zk8^ze%htQ##WII%oDLy~#2q18^^TC_YuGDbg+V*S-9&J}^l{XU87txtCvzgagP(k- z9@7wIRR{8BJ1=Y zA@odAqAuu!HK8g@MAY5v>+FdC>JU8Bmgk)3oagFx9(JOozP*04U-9Q<9#BKz)F)QB zaa=(&Fx{`U@OCjg{)z>|=d6oyQvJ!&SFFW#v<)reHAyy>RNp<#Dp_!u<2_$mVwrKQ zB2Tj=RzoOWykJ+*@eUhm>C!p0}8CZk$%MCav#j{W0Aj|^AxW$^SH^&ziBkiHf5AIE4syBl9 zg#VTh06JRu@o#@T<|*x>@@LjwZHe5rcM(O5>V}?aZyA=So;nSQBYu@dpRWL>U#*|d z7A9OMlo0|P%)HVQ?WdY8&Z2#+=S1~aCQ=1$DP|NxS zQ|GromTKD2$0j}fx1`Brzj{ef$yRrA)wzcDJ=^5;EIcihGYl=mZ%ef>q0+*&b}b6T zQgyy7AKaU_b9Og=heJyH{PwTLUMXu)7MM-rSRNY;D0wkhs(q!cj7mJsR&Kl9r)<&p z=j^MLR3j}?BS5laZ9VN0xPwA}>@UJ57ej;f&T~|9TioPQeLpVlF*AA!dckBQ-Y(C{REHScYbA7)UKrlbILutea2&&VSx&KwkLLrKIVK( zqBV5NNIKOB?++{0HmhnD8qA`Sf{Vf#NSa*)Xq+o9f;|PY{gAZ_GTsZ|qOS<5J3$nU zjpr)$z;qQxa@LJN##o|jO{d# z2_XB9)5T@GBaVW&7OHz1S=nwNLPw~ZCp+}i`|X^Far>)ziA=X5vJyR)cIZDhWZ}HqL>Ad82EO@xDgE$ zYgd4{JrY3&D*!y!H!VCn`zEt<0>Ds`U4AZ(K1KpQzCe%CWE)vfR4kz^vPx1(Yte!k zz&SOHM9kp?Q=KF68*Eu8-pv2#^8q#za;3WGWDhvAfeE-b+|rIxG+P;sTA2Kfb8lWV zGC&;{inm8slux>FI)rOl2%f;fW?8hh_ivl%2GGNR#Nn z0M8|{0aB{^IqXkQG&Ge%?O2>;n$U{E&i-TAq&IzLD$L2081OwsBl%Xc-BWlD@2(c- z>KwvHA>;~Ubc7#iA>Ix`GVCU9c4PYwG!hJy0{YXbTg4YE5nFt!#YYML1xT5nFLiA} z@i~2~=B(L7R7;r3#s(uqXuYho)bQ+udl5so-R z?!n{T2_3c;x-!MuP|7zA+GSRl)jm6ldf62d7_*;7;mp`*t0efX`@&)js46oaxfwXt z%%TFMUg61mTONv$YR{7j}= z2_e#iioQX1wht^Lpin+#B%KgZ$zYRn(f0+Yt>U!+XBvb~>O#vlsfZZ7_z1OBYYkHp zMIxS0WQ;vgqmJjPaFJg3(&F5mR+=ui7az|#p~H0Z*`no% zzH>f^7%z^`LV(T8q*}Su>+b7+qEwkV-B+2yp3o8S!6zS@A^;U*O#rRdJW9OB54K7g zo>ZQMP|hOdSg3GJf=HXOsbtUwd69{=&PET#8FlA`x~;HHFzM=8GE-%P`IKslNGYb! zbdFjQl0LrTk%1pL15L27l_EcgirSHH29fgRbZ7c3%N?n=!Gc4nDc+eX;VN4sPCNa@ z^NKxGviG^XP3|n-hdpx~6RR`N7UcI)z0mP(r(MQxt%+I7Lx9!j?f$pJdX~mrvAaV~l3;O~Z;@KA;1p z4DFzN1tvM2Jt@VhHBXUqI)OI52>zRr&qi*As`Lz2_nn1oBW&T`cmRwL6du@=Bcal= zWUp1NL${7K55geWln7>6F{w(~5*mU-1?g`SJ08btuu>+|c<;{QaM%`)TKCjpJyo2E zQ|5fIVdxkD2}8F3TwI)4p(yK20bw0{jtVv(qC=R5>usvA$VxRD`3J%K=KF0{}$;VO-FkJY^E&E zx!{ZW8|7COxy{cLCgb7^1%{0r3dG(4)Cp%X%s^NX^>;uDwJ!!$9fUg|#Rb4{F-{EU^=T-?+mis>iChr~`f`ibJH2 zNc7la5hO0V>HM*7>m0#c*d+$AtUNNt#S+m}##)7PbVS@j2#a#oN-HI>#;^!vBEZ#% z&=$9sP$|uwL!fry)~F?G)_kr;NY)J+?xb%|c2yFp{#7_+*OyIFKIRp46d= zrT0$)<4)&jeW3$$tAJY&3PG{EEFAB(try$hAY&M^p^Qi6)G1FX(vz&J=k>oyKZGW<%40Z_ab2DpO;b0jpaJ$|STKEiK5z zX=C&ya?z)FdSV-?D7c(3TkZnErH7X>s$fAa#v2xWvuG5>IAPoB$N3H^TJZR{-%1Ke zWez{~a+b?Wy3@k>P-J3BK|#U>M{6-Cl1efzT`LjvJBpzK#6XX!LlSYzowg$yM-1_n{o@FZ-Z>Y5UzdjM znCJj2Y6}?_arL-nZeNW!*8;)RgW?sH%c_g!?!X-_p1F(5!c|I=A>D}NA0pecMI1Xz z*W+7eYZ)B|;Ll_?LlQlQ(c>%@c!kWt?C~Uwv#7MOad77!g@+li9>h9#P?>$fB#{xjA5&urrBbV!rIeCe0RZUH7gW zT@N{85qJ!S3*drPRd|!x7BiT*8h_H1XNP!CL6VyBoF+b#ZTUZyi?)NZhh|H_Lig!G zH?!~`S-@(WTt?$;D+?L4^7}YNE6p~-Do1CvWq7IXGM(|n=~SNT+JnIje2n!}+FS#z zfv+X`9Q7iz7o?U#CO6sR9aFm=*GyoUu9aeQVwm;g?@;_-DFzL|GOL23gzWqPS9yGS zeMEU!!Vm+C9mQJDwq_E3YPwG5CxSfgJg&vF?t9!%be@;4U+OIy*!XJdHTIFhufHVL@ylBN zm)h5VseSdA+Vii!{7dbN-~RZb_665G-17VXzNmfoMU87#(0P}5{YC9Rm?tP!J-b#y zW>+oy{dfJzYFq_jbb6K5V1u|2;VHHA3mT14R!c_}(F>N2jqWS}Hcqe1p1Nmj5}b4_ zRx8X_Vr3uU?z9Nb+9QR02KJ?3Rz*!E4qy;I_PZYlML(C^;3f_Sd(I^}#~oWHvP@T} z?3z1vP2UKM3wAP0VhhWIq@jG89u)rC|Kh7Of4UcptJ)10U~(lP@6@1jo&KhGfjMp8 ztZFa>bp!RcKfcL+qdysHli{)UC?=j{|4FuQh0N#t`EUK)q!_^Um5LM^Bq#!692AIoB$U)ST~x?zO|cH?1Pwt#!#bCsf*16Sxszh>92n!C7iA3 z_scKA!lO*_!AQn$jEESWr^MjhgkR#}#7v%M?=OFqI>FILzy0y3h7!X`JyJD9cn>RM zGT<9j<0x05r(@GW!Z9XQ{^DX$3J$SYf$1!>(NWv>;Ahf?Cf|UOM>PYyMCl%)7)m+?tIC0*Vs_sDl2e260#n zsr`mzs!{kzikoi3(*4ry^mxgPY+KHk;8lV0y3WxtVXHEm9J z2Nof#?))#2+`t=J!a=NAY=bBr(J=#45P=xB3h{rqLc$AMVeZaf&Rm9vsSk@0F*u31 zn+s+vf>8z4fIZ~FF+-5`w$Dxcz@%uL7)JOn zIo6IiYgs0A57%me`)a!A45u$jw<(RxbWV##QU;F0g*Ox+=g-cdFPgep&RxcbkLM+V z%*3Svx(p|a@?+bvZ0GJQKSW(;N8evWNCFY4N1VY({LejQyjec@8FB3Q<#)anS&nKd zRH;Q>DV5;{y+2#z>gf1SmkDC{K~*^h{^#OIMu6|yz*t_?7tftS;ySQ(+=CA;==Osh zr+4>QdOdj_4;8#~;7bUGR0%Ds%(}a~0;+KTh2YctMmcvj{5xQw%l%v6*nYl$wr8xB<+*}Y+#=5hY-88czoUbO zEcV>}J7C$)9PK&$Gh_JJ#=Sb7f=>O?m8oH>2Ge8?6i9sB&w8?$U|!Zrwod>F!7vw4CYHg(GJU*`i31whpop#O{MT#74D zh@w{?+&;@&rn>dVFZ}EBZyd-_j3yj zF`;o}%lf1G0+2^vblm(!Pn!PDQ6!;5eq`bD=FxDbHKqaz=ONG5R6s`E zd;C-XXmc{b+d*2p{9X4VSljOvBz%REb`UMfTPo_Ktou^Jogi~_`8g``VX;TuPK>v? zelzT5ySwl9V(Cfn)yj2d?}MV0YWusiNIb=V54I94EP8=-jW_3y$BW6LY^)X)y_U+F z<{silO$5)BN+wy*xnY?ua}!)|Co{3YlGAQb%7Y(jdzJAPWo^;_CJp&srfS&3sX-^EkR~k16bLx z?O~Pij@`v`8cuGO5#{_MPyC{E?@v)@8i+j4+$he+Jz4K}W|_)SA>o4Pe0Mjr1gAZ^ zK7$MTg*bNBg`dM()G`mf1*k}Ywn7tKw0;7!yfS4lrU6d4!{jR7U6DM#4~9~Zg3KK* zMT+iid@v6NqB->ub39O``-TxATsFtf$zbCE`w>@Yq7Z*$v&MB@dIsx#?`!FkU$gg` zMU!8RA@f0yJtGtOVHx3B_hmGpLni&ZvuYTH=bD!>dk-bOXF5-Qt+W@z$xbolrsiqr zzdm|&=*$p4X#7GR!O1tVyQ#92uvHwMegP4I&xr9YE`S`nQ0eX6S7r8ne}O&tYW^l$ zV!pjOM76Q|rmd0pmo0Pt2NhiQQOdU7yn4lDu(%m7K29E3>9Ui;blIw9d}o`x5kY^p z%oL9PZ1FOh9$a=A&q5f_zsq78^{oIX7HR{ySQ_j4zmfhVdQlb#!gu zpBjqg-nBq&T_58d_KiKJDc%9>ksmjC7Oh&b{8Im|2SG-8+dh4&8b8H&->?csXH!IeQflT%bXnqT& zaRYU>n3cg{G+kk+1~#&kn1<*=v)Z~C3sx#Cnpv%O0T_c3JYVGo?Awh)it$-yWK8ZG z>yg&O<={|3r2IRmwA{{6M{4U#JKbD*K#kV@qTNTmx$V|An@UrIzHUn}n!MJhKht=h z@YaFFjek(JMGsEPZFk&+gJgw>EvBnnyrHejr`4f9O~~}bOsg!=SgR`wK^Yq5Nt}VHYh9Qzo`j_uV`kYR*Y!AI%?5m{I`~X%^z3W#2k`C+sA7 z34?>{3$|LyFIezpl-qtT%)hTNqBpqR#>9A;&Fg^{J!LHxnbgJnc8oLN42>)RsrzTSM#u0EKk~H>?RPb53XPWsetp zR<&#u>$I%u5<+AqyI)=AlR>rHY)~UHS~Q!hFdz-xFU#mV_p+_z(K{hqoMM7v0_>JU zFV0V{we(T__f``DchHev2&yPV#k30~wlngy%MF~qKPUTJgz>G70C5f3`}a;j&wnjC zxyf<)P!`gV^*sL{!5>TUq$`Ic3#un2^|U6YH*AXMvu zB^wI{r7L$fV$*^EI9_uvxu5WV&vr%rd|VOqOl`_@Q%)cX>VdMZM-gB6H$x|Gha>jX zgdN7rMAREv#Dco(H6CRrSOdwdd1!jYZCCx^wlgsZAqYW|&hXsm;W8MgB6Gp)`(YFh zgaWnBg;H*bT41f&+~0vr?`+XLaMcz)6dD@>p#Rx_<(9cmB_L_*nJk5p)-{c;xe%+3 z`x5~7ceZC6#~W*3ZefP~`9X%;c88ceSaBfd!g4@SN8XT8H)ZohKY-AMa4KJZjGfWz1X?5wGz# z&HL;}D&e^^!h7UFYVIIi6>#}z52_P)v9tL;$-h^v>#39Z)@GgPE!>OM>w8%i;K@R834ChMVu7qHT=*$gmw6Uw0->Wr1ck`wy3))wV^ zjIk>y?Or@Eq2Lz$9hv#*cRK0b!*rCXaUPX784zyJjRzN*BeFn@(e>}nX@9q_#c#s* z_2_pyQ!r6iY89})P2Ig@a zngMttqeb~yX(0~KaZlS^T=g$dxDG@5N&sv>#ij}^l@YITL zbS{Wk>Ae8hs}GgrE@Daz*t8v8I%?6*vGQ!NS{*4TeM^xM1BOQ8Kk4mpCeH=x7cSog z(2Bc@Mdp|KT>J|daW_i*3^|lFA#wWHhPiMeT4tcQN+$MSR*Vw7>42KpEU_;wx-u%} zq|sTD2uKBgiRC->#1_p3Vh~1LRv?|)A&E&Z1_TWZXV&0okIh7%Z4*1pFiqUKCH}YW zGMdRg#3SsnK<`PF1g_kQyb1dZP--_{z1=>5Sy-?gTC#-?%Hyft4#T#QYzm6*jjn>> zvdOpGdyC-gOqY*%pMkTWAur#fM~yFjt(qV}dFfjNw@~3NgY@)hrYzMnu9%2V5{a06 z<_`AYC;Y*RlKK^YU~G^4e>hpW=*>Ra%#gq|&ua`DVx=+ZndagZszS)sEX;?3TpstL zs}2r|6E$ZPH^B}|IkJI;4lQrHi~>R=MR)(l6$BA7AfpzWu>hco=*M zwfPVuKy3?7+Q=M$C2$^0#2SH(S#@HOGWGc?034 zb_`Y(R2Z`eF)&=qSn%lt?wNy%!}^{?B9}P&F${{;0NdzABoWy}G295G)uaXgqF}Z> zW>5GeieVT&dbY?`0DC@XT~6=|;~(zn)jG&&DA3#DR#k%&G($_EYz{Dny}oTZ))w*O zsy`?hmNJ(7VBpAhqG~D{5T$9DH7O?79C6e)w@}y!gYS4|<#D=$5k-e@UT^MgODDfh z`ipO;nTPYF#7HtpN$O?}z|eaJsV@BYs$7Y`#v$y(hn_beVaoyu%wajyfY|K-Rv z`b5(}oBl0QCiqkpkg~C2wL*E`KMf9@)8oRWU@-X;_^;MIbyT{k7(@~~HLR+J;H5TLA3b499 zhj_U{*yJATIdG4XzOGHiDS9JlX4U`{k;QE&Pnc3O%2mxa$-qt&o)X4t#&2}UN6>k8 zYzLCX5k>pMgd?Iw$AwAl_dW4;P?ChhV+R~}jbrD>WD<+T8uzoqA78jz>>I5{yJh#S zIGzww64|!a04!#60+U7}8B)>4@Z8F5jj@v$Y~KmunNyr95HyMHkOqB$Qq7;G1^sXv zkrfu05SrOhpJ#BDExqiH@h)?jwcLK%+@kP9Kkq2ngEHf|eA7;wolHfeq8ODyn34@1 zDkbFf^7iQ{O4GFwe72gW<8ZsWs7g(Z=dpB*u|@A*#8;}vzCy3Ub*Hn>RqDTNAG~3n znv|DPjMhikbPpVG1=B_0i!Tn$m*$3!pc?-&tPRiV_iP=CnsFESU_?dLCP8o{JG89HJ$ zZIFaQKiN(J8(L(QSdv?ubChIeqoOL_$_O1MSlNa|R3ftsI*>YhqTS$mDp3>$2&&xuo>%rsQC zt^1oo2ckJ$G+wFs9S8?3#UiOER&)Qn+MNL3SEZ`Z7r>9y8CJ6H1|P8X5byz8Q{V&7 zp5Jyq7e9Rs28q+m0CemBLBzt4 z7tVs|hK0=n5-87SWKER{Sbu>1fHtNNf|dk9CCsp(dx@J=BD{9xek*&`&)y?t%%$#5 zi%*%o2()3@yC=ktVWtL`Y0S4|t9Igry({;@COSUHvquz1ggr%QcLLR&fMT%M=MkDF zb_5tat=2@1p$?=B)l5~QM~VJyLF;3fD>kdxF3bpc0isR;G>>i>6?{yZEXOOkhh6?a zwLxv!eSciWqqb^<)jvMiLLU2f(z`(Ev8Ji@K!5C*gz0$jzgidmp)0ymY))xhJg0p{ ztB@z?|JJcscJ425B)`$FoeAsts1Jx>F%qMLJA@|t@*Hx{qWfoCsZ&^^aq-YrdXYcT z$Y=-sxmLQEn?dHGFT|G61=$d@h2A0oMLPKQC7AASq>b3qX=K_C4^(8q2q?O%A;l>C zTiQ{=SEV5VBQ}joWbsCe@!r-l6&YH}!|tu@+*iER87u96-MMQsG-7jly<0*#p37Yw z4AZ|f@rpCu=B<$=)uX}V~Y9Krm);XfmZ%%Dsjo)%GgPT09P@qM5-r z5MVl(({7lA>)^QZ@;ERaz14Y7WeJum+=uHiY zDV-w3vToFXc)5WgUP49h)D#^nQh*XMDGq-)<&rxfq|Zz@ zD@R=+W480`jW(B+TB8L-EZTBIEG@r@?Y}EGM-CKxQJT7Tb7|P!t}MCUX{glXGO{9~ z(lWjQ^xTggxl1Sve72;0j-lo;?xGvwelMl9Ke1SW!NnzKL~v_E@5(oyJ!cBypH_9o z-+(Gb$6mR7wY^#tOJU^j7Y>Io91b)tIQ#7=)hPso-TV|oL>>>jTu4ClSP{G z+3Py>FDz?7x*BnX87eM6_#8FsH7c7s2S#_X6}D!NBbDWi2<~1uih~}X?^C1#%63o> zd+tRcNFT%YUQnToyHO~Ug>Beis{%Xhh%fJKz8CiOc_0`Z>R|)^EXuaTO|+cF=66_E11*gA=tEDRt}{ScXt==*~N(hayZ?yG^<0~xZnq` zm1c-YMlMwr)5sr>tuRf)%35a~V4ySKL>iqKTjTEJ)E%@sJ!5dqCZuB4iH%BS@Hrg- z_|j1cumog2IqzPwM?5XMhn5}_Td#%|W+S0QxZ$M75UhBf{THzVYAzzA1ajWueQ;x^ z=>pTS)mT>xv^r3;xJp0HYVP@0hVMU>|7Flw4ZJ3t&LmHk%MxcKoeuv$ZEpgv)%3pq zKhM)3b0|5eo=%}OtI&W>lR`qKOi3z{kOpKpsT3v2khD=M3XwTugDFZ#<{f38$09_( z_jTWEolYk{-_P&&|NhTQuD$oV*0tAouY0Y%_I}Phv(LcR_CgY$Z7m7&_w5dXzmLr* zd67rMTuC^|l^)AR%m-COB_aMY9F3Yzs}|#Q&Fn*HBEl)mHN`=UA`2>k#-qg1p0@&Ibg`|S?a@Y;R?TB zGPvi3eFhEf&;FM)2eCG#Ek5G%gft#`#m3-_K18V>RMG&}GcIxP{q2)aRYLRweP`NZ za(YCgW8rifsEtv~NLP80Wxrip}G>L#b9AmH^p? z%(fUtHU?5XSMf(cB^ypQkNv+_iZ)v(8JjXx^4kgaK$wl8{%+nsecABeD(GCk=7R&K zC!D3FWf(7vUq#fuQhnFee@b$2{$iHd8Cra~l?w13-{JR2!uIk1HjyRoH*C>#HcLWC zzP*Q}WnVQXJ$#5(@&8LH+e7(Yw1YGAdYPz1lTY08#S}aTZ6A-{6%CDpA5Ws(D1xvp zLM;N9x^#3N?)W!D>?w-XNhm?wMf;?ZS69jXHrv4LpQrrK5jJHy4D>89g}T?b8~EWX zrZUzzc5V8?Z}Pt2?1R#JwGCufa9#H$=b0s_c}cc~cVp>n2#z z-{RhFif+>n1nJ?@f2&@mqsJ`#H@`vj-`?WfUiO+}um9;o1OHF=Rtpaeo6)PJT-;8X ztqXeyv7)jwO#AgvTuqI)Pg%Py$%V-5Q7Vy$t(5EjzDNmEU*_?RjRV~e4K|}Bs^48n zGlwGd{gU_|tnG<3cil!G;;v^#l9nTm?Y617N~(LCfBL+7QM)cZyLLamJ%MrCX&aiu!F*hL_;CGl z(D2JH<736gFi+84&_HQ2@kHhr$! zccJxkr+RY{e;=4NA>8dPzR*o0>=Wq07&g)Zfxd{=m7p#7YT8=d?@gr5KNwyT!iN;g zhG{72X~SVp9}ex8E#x|sWyW!@XRMF1g!sN&_%itx0`~S<*+{Wr@{qd3r1B$Zmi5hZ z@sG?>+Yrhs+lx;8To_X_#F>(nCFm~pL3VxBytLQao&Xi`fGx^cumAW`YR~vKt*d7G zoriU~gfH6L50v&~oUV^#8UAYCkD$kGpz3su4HL zH$4^HELp9%;J*b&yXPkZEL88e#~%dKo(KH|n8N4}`em?^vnChy>Q!=@-!}&EBV~$p zUf;U>ep$VI<9N33@Ftw2Q!n1=42J1*tmx|6btNsdI~H2CyJXXKTV?H8`m)$~Yv&tB zC#5#<*tL>L8&>4vR@-E@Y458!`6HN)ce8J!aka@dna2AvCebhYv>JX{e&5D+f%4Gr zam7=44e_TM>{^)44&gabn3&;W^5p_HlZ0RCW`;889Q`B&W#7o(n4oFJkLokMc@7Ks* zr~jwy{$nY4hX17`{$pua;6Ig!0!o*PtNdfhgrI+}2M_$0y0F0i=I*6a(x_js)uo%q ze@w&wTU#ipp|ChmrcHzfIlT9lO{e5qlu+vU3qSTirQ05MHQKy20*=nSYzWN(E40b< zxxd)TEUXWnmIPK8kkZ#24$|+Dd9!bH-i4f=cVRg5U$Wr;F_aI4DBM2T8%9)a7docT z3HdaNHb{hTSmBG*)!HpzMY3%%B{M)8AbaF_2Y_!2`TOoXz1FAtHaT1M>u-`S_0e+g z{kZHGu{VIHiA_ttaH+eoL)#1^-9mQMo_p|}*_cc=Z}so12N^cYB7ydz4Y?pZ0I-D~g!z^LTwm znj4?WUn-^Jt}0%Z$Yzq46M2(_l+8PmeasQc12oLA_`ztfbj53fNss zr_kOb++t~3DRTCu%3hl!7>4gOwn=Se}lFu(oJs_}ctNb*v+GmNzkb(Xyf}3^R z@Y7JGNTsT|q*AwJmCQ{y{~`|6-Dkj-Y`<}-1a3*$B*>fMVG6`UQeF$+l3OZ?n>{F$vU5w&KO7e=6mPpx^~>cZ@-N{Zao^pf zv#`{0@q3%DFV=CEOfGz%gK}BrxA+4cR?^`@9cC+WE!DNkc1?%b%3NEeYt`+V9p)|e zhYHRUR|n~#tu5DdQ0^$Zwpxe97F&neViectV8=|+(>kcI71`xJ8%j=gBIM&3 zvNItiC%X{xNern%NXbc6LOzWlyAo1zQjL($V#scUl$;b0@_7u|osg1~JqYna#D+suVY9dAtfiZ3Hc_5)FGtgWG_O#jUjszQgX5n zA>R>FN#*9vXM@fwsA<TI*3b97=_NV z!A^D}q==9|6rz)Mgp{1LC*;o<(t(hYla7RJh#{Q_(aCNSxQ9Prb1Yi&7vbAqzC$Lb zwCIm~7yr%0N-Ga8mwT6goNwfOyUhfOe$=(f@tPvqanZu!Yb$Tte2eQM@b@3D7u_;~ zxD_hFBN6jUPDsJKB`1{ddY#BT-EyT4wuY;N3bN-!e7EGJ0^E|5ittB98j|x%PN=7M zOHQO;asus^QHrAiR2-d5&u+`v*rHr+4{%4YI@l1b1ug{h;JM)L;KX*h+@9c?a9+T9 zP0m|_ir0UhxE9^USL$3^$~_4zwF)^Frk{Dz13-aBV` zIpXb%;Tik*`ML&v-hOw_>ym)m_4f15iHARWu=1YCTsPbY;&q)ec7E3be!k6Pe*V>5 zKd&>&@szV1D+X+Kou7|tleIE^>JrEH6CA%dC}Yyez*31-l?JE@^2h# zlV8=*{x}T6{;eI`H1_b5$fxRdHoC%bXJ!rmI>&K1`nq1HzhkGsKPceMHT?P&=!y6C z`SMB2GyLRyNe87g>N8vwGUc#LyKjC&Eyjl}K-@C4d z@7B!Ezdt0KQ)lc=x`e|?zwn$^e!e}rIv*Iq_X{|kde!whh0YsKdUf6{gs(wgeXkAW zD7=vNB%Jt=U;pi1Ki{o|pLb~KID!0$_kCZ-f#`0>`KUUMckJv~=UB(%c5^(9_~P~D zKEk6o-yZz?;;iAh9sT@b(x>YWp5XZNqmKIr{(Is4liJzb4)E@TCmcgO;dIJNcmnwn z7L%VH!2`H1Tv@}<$JWZ^)RU(Kd@A5Q0S~U6$;nb3dRf-+*GIFtT{tiJJCmyhmdBW5 z26p2xlg*yyP{W}}kjZp#I8>0!9_i3ikj?gWIE9e{kMVRr>+R6X;S7g?f=u>IKReZ- z3eI$TpF@9#vmIJEG#6yDO|lrbT(*Z_JXw&-7Ahp0 zZSQC89ELbtD4;_1zw+6Jf?T$dLmog+1x=W2H8QOnj&P8nM_R)<@lF(!XYA)Mn;qb= zHJRnCjh`JT$Ypo+vkM$r`bE_cVX7b^sQ!TS98M7Avd1|L@{4=>nVc=cNJ3U*oH7*{KKRRK_j6Fox6wbfNA;xA|Kxk9WvZ1a1Nx_yQ}&;l*fu!bQxM$7?!yICi$2j6 zX6HK9p9J+!0^5bF@#ea&K~CLLJxyJH3jdlx=nHolgeef3LFfx%8H6binnCCbVHt!e z5Sl^g3t<_ADG-`L=nG*P1l<1}OV?Z%YDUTEfJ2~+AM&o(m|V7*Q%QUC(y3Fb#oh|{ zK&gG3oC#mO!jaMq&iK0tajV}+SffUbGqrc|@Q61Zn#(cCoOTP7t+|LOS=k6+kk~xi)4bk#SaegnSMI0Sy6*A?E zk-I}YtN6d%u`3twmX~RPDBEA)pC+E8;6|II^_O(B|T*UrCE;9pg+*s6A2YfAQ80qpIovg+f7gIl zUFHh2q#)z?V`y*@m#$U07o|qf_}X}PcibcHfdaGp_B&>$CiZbHX=VWq?P%wfl z%We>q-H1npG~6u(b>S*?-G_^KLUA_N1qCC>M*Kt~)#f6u_$8BL*(8}Q1cNTeED z#32y&L%|5L^_Se{?q0-UmuGV=QJ5nI4eR~z-=q++9_7#&1taW6z_eENbW@;uVHdv-U}+?n&#P@#@p=b zAHG0HyaEmF--hCdI6D-_eIm$9*f$hM#0?M{pkM?!lU;Z9b{g@5oiaJK_qkG#JJqL! zyVDVyQhqGhq$1?=5Z-;&y`0iN0Q~3<{N*)wV^*yb+>qLBv`1d=y{5GTF*?56^YacYQujt zazez;PDmb=f^6JQwVe=gzo1|Q*|tIRyuu?McxoocB5?AU6lB}}+QSth?imz}Alr6X z6(>ZTdYUCNd0YyzZADmPm1@M%PDqU)+xF}st`PB>kc;_JkZo%jT4lt~Pq9R%Mv!g$ zA+*MbgM-2oQjl$H5ELT578Hyi+qMT2SrvK2r(Ve93Mq#tr6Ak37ROB09-`got}w?d{THNB$L2(oP}$b}Rl4yM_uMLjJA*|v6d zyr3ekhrk+pDneeit#$L9c}EeqHUa?|>nzWRClfcHw5o4KJcYD|*$VDJDxR6RHe^~Y zG~!pqnjRM=&x#-uH!s-hh&`~^tnH^pP?wn%3O?ei7kQo*NJ0H`7D-WOh`65aO~I!| z$P=(PhD3yjyE-9RCV+W zw}`msuuQHAX)r=v0_N}KgovLFu>n4LUJCUf>=x1x@w4+Rk*N{n*nHB#=N=7NjFI)Y z`%bTm7o2;23o$|i2z$~VR8S29e(_X>CD@|mMJY5S;QZ5lo#x?y z`}EG_cyg2)L9@JGwcU@2_&x1`Zj`(vg~kwWV|Z34i1>Y{to1V^G$CLOOEqef5!YX0 zw@8+tP+im2r&eaHaYwv`o~p);iBFBRA19B}>|~T8wxI{8IjX<^(<6EBa5+g)PWO_oYQ zixO3WLY;uGP~__KMmP{c(ZTM|91(ECMl&nPGAXo#umgUbvKlcnFr#56HG)=aYS6<} zs}bL(1JV4GSEbOJfc!yjxguUa)H+aVgfJN} za8e^2Ou&%$J&_SxVH`E%dJToD-#zFfNW`nxx$RY;IT7_odEbcG@M6ni^128zbWQef z>m9M*4_=c-ko)jdfxC?n3orLp^@bGG+-`ZxS4bBGtVr*YGbe&M4Zl6&ou?x0Q{&=U zBi*x*x3fYd<^OO3)uMU?yb6-qCZtp(a~!&detKHKX55Nyn7kzdT&Wj{zU7aoEfo% zsT>+h*?;j{XGJVwsr$VxEtO!+ubdUJgw+-;W68aEpoQl%VhO7lt#{C>DIa6+TF#1C z!pdl>Cz^R0eW$o-ye8oHpbXr*BFNJykC9cmMVxVT)*7-AtRKy5#(t!Wr;vN zIpWH0S$P`ChfwE$kW(yt*0U4BTh(-P#Z!Qx`%ENaYRs9BL#UHE9n^`MC=t5j37^= zA$1@^#G;_^u@vNKJVSpFA>yKt3nQ4PL01r=dB7f_96phPJdOQZ`luGMC$<=WDK&yT zjaPZXs>=E>V4GXAato(@DutHe;$Z>nSfDK~rXp0&60-&~EGaY&xLqL0mV73Xs=DVv zPC7r}+R3^NeUG#lNxAr>jg!^}Oao~wN}o$f#=AlY+%@3yfh40OQh~@r8IeaSxS>)&Nz?8`4 z0pAU2FoI@%jd4pvxGLbhkcRK1AX_{#WOG5lu|dHIGO?Oigu* zZv!3}6pSESeBF;uxHVv%pzwnfWQ3>e`5`$NTR}Y?b6a#1=ldkK1h{=>B7G^98$`1GdGVXFpW(OT2#{9%yR}nH2Eor!%?E z^eQ8$>s4c>t=g;?aLEJazb0!@C{(w*udCMI=tW04?%y$+>%+PIRYJ;`>VG$qTFpZ0 zc5@uiA)D*xJmzksYMoLgPj)y=rD{i*!pbmDM#=~(&+oaZMz$-tsqnecncQ(G{Hj!` z6P^(iex+ojuo8t{p#3RT##Gx`1XE4rIPN8Uq`mGH{*{uY)W}r$$i!9LfdtvKaxdI60F$ zn8f@p1@+IZNQ_EiD#;c$J3_l{ic%veW}Tt#eMKBS%)El+4=Jcd=hXK!yb$o*;#}@% z6pWzO)Z#t4K0Fr+xWEa?pHfi&{Fb8Ay^BK{p1ejY@I|Q+)IV>h^i^{E9qo7)1)@QJ zgA`QQ10kpqxUpdqrY=oTFoN!VKp-ro%&oRY*@ANNmlQOZ)Ch!!0%45bA~k|)ewRSF zI}lc{bNA_QDX8sK3WOvOI-{VHG=jXN=@hl@-6Ey|1?6I+6x0xI4upLJAxAD6qF@Ah zMS9Rp;v47(9Q{X#Bt54)`x7BD+Pt+yyOg1n=*zVm6^ za!}2E;`g#0Maf7h)aF7vN>Z)+ZL%r6_-dc$8A0ArbF!&+v4m_2TaUCvCZnVvBeEKU zsYb`*9Tlgb!#>0Dk;<>U>(m7)Wa&&a;yrfHjS{a2sC>o@@jG+95=vMlg3D)J{QUz z=K)m%&Zg^W+207VfEQqGRf3m;>PYv32a<-dQfSD9S`?IW@hGn13 zFSVq;<4rtzI0l8QrO+6{0VGm&ur$=kNi;dlyNu9;fbGe@(l9FEU6g}Xa;}j=Qvw=M z#2Nu2uI!t$iH;HWBj8wE1Ev0~fS)pf(fao|DKsNs^=`fnA93;|U)eK4a|rK{NVS7= zLVMU5PgPG_$4j9F0plqLRd=2iC2T_X*XoH8_UFRO{oGF0JO!hsOxwD2kY_W@u1Fl!UO2~ubY;jeqW>qR{G#H_928$s(Y%j){Q+f$OV zyf6P~9VfXCg{G~!&>b&XLuJGoEb%DYJ7%^nUejZz3L|dWu2PeVxQxs+Hv9HaO>^Etv_Z6U3d`%DIIUOL3;{+tg`(6w{(qF$+4w=z_F5-YV`b?2fc5^=1*e zaA7p5S3m*fBCJ4_KuC>n7#B_tiCc?;5;rU)Zn6{(=R$E25H1KgC~DutsV9D{X`A^x255I#i9<4JF7gk!kSkR=#d z$F>3Q44r?f6uJ}eH=~1M?r^W;gidb4jBqRgKl6-SL;S-d9A_{ck|9o{a2x?=kx1QQ z7Ks#|bWt|PUgFdU$8+J@b-r8}@v9Cwd#HY!6i$G!J42d0x`=_08sS6&IyLn*_u_y* zoa6)2?NaCg;oWaldzE8sy2;qIsr|Y?W!(%?CCfd)LiLKDad!vQ4S(p`h(+S5JGB% zGYHt0Y|1C>Og4q-$60&KGeZi!x$wbv?mc`KaM&C#Nh6#I;oLpkX^!|A2y%5cKoGvg;BYw#MmUQL zZD@;XYJUaX79U7!n0HB`KLLLixLw$m{Ry%U!?EewreTD$xo{}bYI37UoNzFknhr+c zZYi9@g>j4qiuroLbLqTH3sWNuAmB3+sg6^Kg7A8#0C*3{Y$=?}g_hhx?O-SbVP{%Y z9t9%|2vm1RjwrBqFnI)prlTROy~h;_aT-;=vXiDI8bSORD>}Ev^m|MM_mzc3JpnxlDL}JfI3V02d0e?0 zN5BelCs!^*?u4VsT}_>{eA6UTxqN0~W-rfO&T^-S1>*_ch9`hqmBJ;MtQ}CEFT%A1 zjBMy;;=X|QLr@zw!UPCgF>RGX#J|7wN0d)U;W`Dp=((yA@M)A7fKnq&gs}aEUi(c0 z_P|$`&-0`du7_|5U9@5;JoI9Brj0O(fTLMbk&!nO?=NvVG0+<&LbxpioE8+?_qWGIsSy$g^(jG(3U>s2uCYIfSty0u z2zau&7u2eNr<~^&$q2VYIEP!PjW!5vb(cePxid(^A{3h5LBJ=Wt?m@s>a5UKvpDGI zXDe4+IB`4m#H+T=?3`CmRK$X5M7+4B4~|y_{D@_GjlYXUm=59fcl=p*#P?|@8h4Cv zCxmB66Fs~HCtc&y+UKM&gMj@GbcKk|KI<*b2s0t{ped?+BGzx_effDQ%p%~}200@{ z420AOcR?sFa6-hW@PZWXhVXN3U(Jd5Y3MITmy+-?gvhg{&SrAE-~uK8eZ3mpRfI6Z6ImzPOFV@&&EuZOMy_r5c0 zn{JJu{Es+WlQ*6?2keJJ2TJl)DX5BfWfP~&(!K$gQVvvBY6NBP;J3XTP6;@kl9VsD zTncKaS9Q|t4)-@;9d4nK(+HIbIDNEN_n3fxqae%oniMqU_<*`lZFZ+Fgu9c5Lr^e+ z{NeZ5AzBD-2)Ol3+uxkLE(N9j)~Zf;Cg9~Wd;_!*Bsqg;&{B9YU~?xVZ%9E+?RRDr zBJ9cIBH`DRq_$8SK@Q^S-*~{tfYY3itdN4H95udhLQ%lugMtxc*mt|p4SdspRVfGg z=5I>DJSWOQ9>czrgYZ4FiJOxeK^JCWkA#z13=%FO7xIHvN-%}%ToZ8L&^OJi@MdSfcMd0+N1D}6lxK0d2m{*2B&pfdYG1@jiAIW#{*E2 ze;#mQXi@JzKuC?SHy7IF-A0`l@XKkg@V*rGfiN8Nr`+5h@KuWXP!x<%mw^7~OATP;G>ZgqTZGGgI#=2uNi6*vU z5j)-N+YwjkzFA^@&7)&k*@#n4m1ob64iRz$yo&i($NebaPaqTePw~~Tk3>-FKknk@@8^J3ueCQPQX?ql*K@oKBEEsG zX6wnQ)hJ~1BuEEltmiUls$;ZbT~T=C`R*DSLB@LfmF|UB4>)p+t&=2cq#$Fxu~-pnmSQEJOH0OmCXe{V zideoBD>arZ{LdJ0u_AsIjHpT?`2sC7^B+)q>dxhP(j}bsiW?^*$jncF#APDRkj;7VKChdI*^%a-r6N?9neXtfH|vN`-Y7pFw=7dM<|{~6fjTgoe8-f%f?(~}xQX|ObZ^L9y3F{c}{%lsyL6UEz zAVWOrFdr0d3b@}It+ExRMv%>4e5KFfB0hmj&Z5z%Z)>5Blc|W2{gjq5UyiMNe*Jf=Lg#@{uUvdBQZZpN<&0^Iu`McZETo&R)j3c zPIXYIv0}v6zSwk4z-!-e(?}3wRHo>m{fP2%jBxQf>Zg|b2 z&?-|M6sU=wM(wHW2L#M-m(8I$@n?yv7wGJ4$u_9kbg#b385WH!}7K?A7Y zzVB}&MBIl*q;e#G5uqW33)!in9vpF(-kJ!qAw@TBM8GkWyli#EtIvolEL>FH*NUln zU&iA{F(WQ4!%U6I958cP(2UrE+EnS;-&i!)DS1sfeTBhBnrXmplYFy)5%wcs@~vKa zvjUz6K|S?XDKvvn@VL(y>IS@NitX%3jnEuIwL_I-=Ewmb#f8#J&~H*`LBNBrc~4y& zu;>C?hDeRDKZNVMxXW-8D5LNOy+`J2y%Y}M!tg-2A`l*N!m}b|)fkMavAP4JuJYR% z)VQ|WAlsm^(U@usze}?vAwz?Kz66x1n#IV+w0VIDYVfHsTM;s|j(3gOppv(QMOBiw z(U|4Y{6m_p33+{2XRZXrymo{!7l@$GlcmONL&)=NP**yafJ*0+V{J~pQG{*vlC1wJ z#eYiuAkJ5W!QyRDDLyBpScL2})Qx#CA$9if)x5f(I`xr1*if8ofVuA>T)2TAsaE?i zD5)wYrS@ol4k}p1DA4JCPx_>-Hz+q})ur~Nq4+P|T*=gdJx3jdU#0Oo0oB{eUt}$6 zkumWDQe(CwWaAUASr9bu2%3LOQ#MBj1?^$J5SZV9x_d8YE;J@)A=SZN9SAukFdq%f zt&B;4#xnloh-{Q>M^35+a;HFkdAMb2p$L`oqPzE}^L;vT@)Ilb@~bL@2;(e*vG||L zFOPolb?6_;>6$?Q22`>K1bq>*DqdrDCS*onz7v>TojGEJm?~Z!?A--spTL|Ln2ll* zJbyfPLRaj9!(cxjTI7<@A{$T{IK3N9S7mXfn1{n0Ms;Z%x&c)4`*4^s7l@E;i>5J; zAf#?+3a5r@dp1VDL)u#gUvrx3{oH=5Y znC;Nifjy}`CNLie%rCvH78!FCn#Mc|&20m-ZD8Kx%qvGrQ>Cf{dv<#&rKz6M3sleD z2_}7fp)r+vV;&<$ZJ(t?oc*vpQA@6pRCfrk?&59ay^7nWi2b** ziO8riN|oA=4(w@eeD|9t6zW(K~jH%8sw~)tr&7IwG6|VNeZ! z-7n(jOvSYE>KYMFhS1_<&sW4VnM5(@q((ReLVTAh;;uZ{C`4hL6i$Uu>ln9i?E^Mt zA}Q}rguPCK@JUHFm$QSM;vm5uk9J(bOI^_Lc}j2 zv?M*(N}(5o4V_#e;{8rYjc^8pF>iU6X9j$c^stWdw+Iz;5a_-|$pq>3=A=0bS28rm z1U%cYC^f>F1hm`U8&Sme3^kPTb=RSwp(fFRJp#T92U#xFLjh;ug<%3xBlIO;Xh>4T z0T9#-CrY6ogbB<$G{WB*@G+2;)zk=Q5%BwPpE*~))Nvn>HT>jyDfEYM8x2{_b5_9D zCi~)u5zZ!H>U4jQKQ~}4JZu%nBq^K&;Td|9OlZVf%w&05lp0|G0UgMNrh0t>ZZ*}L z%nedFmw;axzSWr`?v?X}OCt;Mg_0YBICX~4#SJ^E-qR+Jjy zds)}8@t;-+9`QX>o^UosSz%L za0*sR{u5L+a)t zZbNbl+>c!$mwFN zt&+X~)X=&=t_!wu;}%hpyAi2#6O=d;JD(YuqE7GT{gT5B>C*g3+=81qUlZnH9|ogd z-D~EW*$}J(bzskzYxH#>l*Sj>I*# z>Y&jSC~>0KshSr|SF2J2G;h#kL;v`HTiQlyv2Nv30b`kLS0S-P+mTpRK>oIfmE6O< z>P#glP8FXHuHJYWMTn*1$#VyB_f6og3U2HVxZTQ%7mrApN^sk9DU+vpB=;&Ow{dnL zEmLV~5z>F+hI=$)o}CX)iCZp2w75;=%CRZFqc`&=gC|ssg|3U zR9h0ZNnf$*Pb1a}Hnx-lH-O5=pVn46D9Hx}R;(M%Rg$K2ec(KgatI z9kjTUko85L<#nLMkDz6!K=UQynLLH0mS=ye`TG@l2B-Un2~D4;+)_25v4+UoGIynz zq{H8)$f+VXn#r}X%&}zc9|2{OCvoRJ!FJpkAWo&7GL>mJ6n64}ZaIsq<-#qG4!6ve zxg}Ba*5*px(jwnQQo1X$j{SZN~f2bm5rRt)1B`o(&-^`y%719Y{|HwsQ(SJN_) zGubVYojjyF-wXNf5c#tZxl-B4xh*267WqCR_YaZZ43R(D)U>R1P{a8$Rh2xfJI^KZ z%|uqCUqxiK%e7{)ETa}VYlj4ss%J%RbU)YPM8ExEK6*bu-Jr(&p_|7dsik_SV%47qYkBCMYeVn+VpH!dP#;i2^JKJW;-f?{%c+AF z4-&TlnN!7{94fZjrpCOr;u0-iri4<9{19QkhbDDsi2U8Ay79`#O_9BBinoZITI5HFe0+#pGejP}DYD-=lg*b&cJjFH{3wxk43W1Dkx$+ta;|LT z)FMAd@^Xxxc#XypM){{H*e7q*RnT| zx1p|kQp}yu)Iq^hFh_BBb=in-b9Z(&yk<9E@FzMa$R$h4e;T9Mb!=s0^fRgyX2z0;rdS1Vz~Y~^~r!|MR*QEO?D^m2sRFQ2&id-5uPXD`#CcF-Q%=pudZN!I} z%&=|XZxOaG=7kFBDbb6$P(Q~>8qyT8o85$fTB>QHoX)YLyrC%8sVuHC`tw1az=#F^ z7Wew+Lfk|L^_7UYu_2;wa3FyDW=h{TwJ0*K4}9yHi+F`)&ZBIW)a##*x6?s=`As`M z^;U; zrJ%9tdS2uc;l6-RI3YDc0RcawxSx?m z3K1I(_SedcP@90>g>EMz4xzHxUy!^hg*p)8#*&BwQP4JTBkTpCDNR-dv^3yJv~4}Y zT`7gV2{>S9_rW9HF~rjFya?9gjkyoZU-4BWwFjR)QSTdaxo@_3OHy^Ya0~(J`KJcl z=qCFO5zO2gvmT7`k9oc#t|!^H?N3tsLWuieBJN*og!F9)HR?m?z#6DJRKyc6$=Lq5 z3}eo?cXUI|3Uy%H1s=Q4yG6vqF3hQ8rABCo&baTL5V6J3Ty7kjQr?x6#(W(VG=h1; z{k}XmEZ}6Y4pK&F4B-vhh3wm^fM3~o%x*v_G$G*IH=M9OV0oIsjwl#GE>iv0KKw@9 z>+GEEaeZG3`$33XXCmI(*Nks!gk}&vz_w~!i&(FJ&UPezAcf`xbfE@hd?WVb`&HVJ zXoMC7jIQBZi6TDEE>SJyrBc`*!X8b-D}Dic+~<2=jBo&ij}G-pj`-a9);1@uLLo~> zNpw(fAk4Wu6OwgX9`MmY8GF^h2rUUXjVE&zO5t$>b0SRmP*PaXLDxao@9Sd`do&R2v9avz)3jjM&?2AvMB51T_81H*Fpo@Kmsn#H^OW z!4Pf?iHZ2wxj8+ZN{w&`gyTasMZ75tqHCnk7Q(rA`{usU0p}fJyPZ=b6hVmFf+CJ3 zzdW%U@i7Y41$0o*4(2J}cy&bFnHElJQzNv8@JLv{iulr{ISseTCsODDp#~1R+Dydx z3}PCOji6!mHhckf%7~4zC3@WSsT4X9P2aVjGucD?v>6lBj59oQrIdl{_dUc4G`?e*U3jBq#s zpR+YkBjVoy>s)DrZSsW_jv(OZIX*;;3Ai)HRWEQDp(})c+2xabDTN~;EPcQ|i!@*p z6tp?n2;CsO^q4#K?*yC*VQ+Hrl@yMG&|?p`-@OCwGs!pX8R2LM|F)6nYZPi71L4iN z-p(VoX1*hns6V{rR787#UUtV(=i*FG{rDRZ4n^TxDI5=B zChvEt&qZ8}oh~8`MmT{0%GU@H*DN4 z3c^Ma%+h}^sgqzHMDJ8{jyNA9N`4oLU_+xZPljEG-nl5@XDMFwfh|lO6FR+$p^)uuf8ELHL*F<*5<+L-@DH;lE1Z zYzS|&vvj>Z_?r|45YU}gmn(%MF~cM>HNv?B zY)^mHwB^)*J6xGD=V`qZ1`_bXPM*z(7tsZHMJ6@Ec@Ul`bR)Vb;N#4^=tQG`M?oc) z=%C@t>sTC3?|_B8J^mRFVlp&qhN#~5azb^0(mCj+O=*YH%MV9gnP$$ z8;e+*zC)R(Mi>SmKD3YcBJ;#k$;Dq%xClaP#>NVz@UG!GeJAWC5o9z{V_poS-CVy{ z#2aXh+8_P5q%MJQTTPdWxPSsV7by{JA~5HC zgXlyuFA?X_iH;yuM!1ZC``EuO*L+dH+uq398@a_3QMjCd9?bUSvrLy?sS&Oq zU^KHm5vE04VT5Fa6p9J>w4oC=2E2eLk_P5W z$tWpYMZhgA1S#gsfER~yFv1uDb_w~f8E|kYhtX0P3t?8s|APTLhH@~%)e!ay`EMF< z*H8{uN#Pm@Kkn{n*bwlG<(>;8jDzsa2G8Da0q2Ku7$b%85X!F$708#s+L0O410|L72gVb(Q4&fQL|7D#@#*Fp+?rLJ3w6*fo@- z5w3^ubQM>4BVc8+DTQmKFbP7H-+isLX23Nh5<+SOJ^nuFXD9Rt*xU)pI4RtyThI=a z$d>|+As13G!c7WbaxcQ_faRT#jF-aA1gtvKdsf8d7x>#XMwkpCZiIA;oDOrRX1<{%;vAT)XO5pFO;!RD9TeONvpoxNs^}vF)?)4=$I=Kh2soSf zoJANCaMs0ULdV}ADQ=qRpkOA9#z<%cZyE3%>WfDp3yn!eme~P!Pc6zUsDH4%NKwkO z1}glG7-vUGx(vYaqosx4UW=Y81O{6 znE&xNNnUB!LBVVo`(XE!fTjUYW*j>VDI?4wVCUZ6-XpfOy5gf!Qn&}g(XD;qphv(B zC>)N05$=UhjbE%%!Xvi6%>2-+CMz*{5~G8H`(V7>&L?0IJLBo-?HMD?g|HeAN4{ai zp-7SO|GHfa_6cM=6twlyZF=Ri2*0|HZO3(ZGoBSpkO}CJ+M$} zUyTDcq;;@Id8r8Hxuax_`~;*Qs0*cHeZU4ZTRetww~JhCPeR%zboGes#@i#x zS+i!4Dv#a*oZZ9t$i8c83;X@Cu{}w3wSmJ z>MfZjg=Y!qQs^tY5l>_7=s*;VumHka!Q@3ehi;^`l<87f2w@u8Q9<1k@VUcmCuVAd zMFdo!K1J9u;H^$b?v%n}0*2IZ!W9882nt4c4#MuO+>O{f;I|MozRr-s^AL`x<_gUi z!gZ^YX+yT<>P@h@)f+5BA?XV9l! zRFv&7vf-~-D<-+Oa;D9%Y6}R$i`MwRUDG$N^+Nx+svHm9$crVclR&o+I^F-WmsckNPR)noi z7hNTi>a@jMoWwmf5z`CwAjSX2Z*8?Pme$ggE82k6b{aRkPd9s;D{)gznKyG*h)~-e zEt=3f{6B7?Df42^q`6SDqY%|W!MiZyewi{a=ByB*wssTx-+MsZ9aHAboJsdXZBW^Z z2-^Gni+f?(hCMOZ{12E;?Owd7vwlL+3=%g^^ zqzP*ax^=`qv61_6ZhtUSF$-34dOmL_=#CLbupXtyMe{`XkboXM=ap2%>8uKCdCdqP z5l|E+$Pque-BxOo2c@tYLfq*PG3hW@37hotl|;k&c2 zQxU8c@UvbSTl6==Cj?Z%8I;kCSbkhai{R59Mxkm=Tm=P29CwVK-|^)z$*WEeNPOsy zIFwauEr32Eg32I%rJ)3i?5X)`5vrcYlO@SV?BIdPqfvf7D|^cBqJT$k@b^+hupeWX zeP1@GbVTfUv=+tH!}vm&RLXI(KaP$_5o;QWFNBGpntUBMQzLA|t2+9k-s2)Dfz|KN zu<@JCI01WsdXLBmD&E1YcWc17E8x|j9&FB+f~2=4#!ld+0S^WBtU-ht%Hrn*-l`%V zj8{{S>t-jOkc9%jD%)~$$_`G7I13d< z($om@ORF%wQobWDqulkl?I|g2Prwf~+-ZrpYN{_R8es;M6C9Vzq#|A6jc3( z|LV*C5x2Wo(-?j?4uXA<;~nNmD%FVJJ*`CY*Osh24=b-hat*$qSP>V)+E=WtN2r|` zPrYhCjF^f)VztKJUwQtZVfF>-ssFZQ^+SamktG7_w#_)(N?4b%G;z2YHmAeS}0s@*dyHSZo97K|pWBQaj z_{$QKgS#>$C1N>}qA|<}dqC(Kk`l2kNl|8A(VgUq98BTpP7$Y&>$;rt*8w5%>6W}2x%ACQVG?7J%jeFJVl&Gl2w^h8htM{o=jX}sN9I(mDEP1V)T}wS1S*1a;V&h z&z0~hoAohbEr@4_>Wz3+2{CUL=v65eLYx#TIO5Tvg4LSkC5mWY=;->Gn}>*f-8>}A zMW}<)wJqIsh*&4opAq(ga6z!E5jV8(-v62u_J+_dbnb|)>3RA9j}i8PFr43|ktK?F z*Tot8R>kX5PzAMa>5Eel`{Ocb=cWiX)kiOA>uo4vxkqCgqCS#0q@zyy7WJkE5V1FQ zh&GlQp*{g`GnmQTK*0#|uV2H4 z$R0=hi6^+#NW)uFkO%oVb43v%Uih+nHaBE&a*PiB_nJUVMS~_=q=N>62t*YRG<1Nm5ttHo?tQ#MZy~ zRs|#IPCXy?=T;GWK;W|*$@@}}iR^QXyATl<=uh)%XY@pz&VVDubPZ_AmvaKsZ>&G$%DhF7I6^HG|wLw ziIA1CGNwk*ezZ50M8vJ>g}kVotdf)(WNxLe>uwit#SiXsicnK)tDqSexXax(o!jVoez`i-o#{$!3R2xjzNKKyM);ynA1B$o@Y8phmsTxfKX>PjDTOwYInx{^A{a9-1GCwr%B8m7N zmEhBCrA@S2>Nqdvec#Nd0k0^p7u)@>Eqb4bs8RKRdwu_7almyI{EJPZLQIXQp0y7I zRr2`(Q;15C{-pS+6qS;T*zBX`a#uu%EyUtaibhnkXk5pM2M62>qKvTqr1+T>RZ=Dng;g z;FS>6k57UiybD6?=ov|UC#K4BL11Rmfu6&-@(A^jRek@f>WM%>VZyXjEQX z;2kq!Q|p*~>I_0njmj0-nI-!g@j3{btm6;TQ4`GX?$$Bl$|*iAH-g6I{aSf-N8Dqb zFV*}gh2{|Y@?b`GF5--lR#^)~$SxvN##EQw`GJh>RIVSe&0V$>ll%m;`u-4x&UMQg z@dnOSZYmm)3J>7qCqAR1V&09#ZsAkI{KWV;LSmiAQg&)#kHRXdM^l<55i9cVG*(mQTtAzB)>_a9fXm0yXBi1@R4d6`+|)T+C#XJ0NMFR1EvtPp>n+xI!GbxLn^Z$ zDK$bz2+#5%50%fdfLC9vA*yJj2$r4SC8crvC+;D`6mdOFWyc7I63{3#l*90p-sOE-?0R*qYB_D2Cfq@_`P$p#c^9YH{R6dbW5PNqh5{S~7mV$H6cJQ6A=;x3z6fWMTY zBcc7lUP#?=E4GXY|7NMH2|7mT#)WP1G-VkZ2JB|Y7Ah2~A4R~2+(;H6;tiL3iqz^v zvM)!Ktbc|j&pp_WL&;3r2w{tL(Dghy*9w*b&Bs6YsjRJFM!cqn?Nm;RuZNK1-sO3r zLS6axfP;8PNPXQ1O56<(xkAJNC_qR?NI|A{6*1&M4MRcr4X6c55o*dUj7zK$<5H`y ziCifib(UB8;)>d1NA`9Je|X1!|0p$rY{+bCHd_kMIMud|BqOCDPxTkp7F81wpSjO0 zYH9>Ezi;pN1%QnKA3MP}wT+U3TKE@lI^mCi3&||K>ID(9Y9uP)T_fUr5IG&44HLv!0QPVBe}egoo}T92@YnGxdG~f86crt8`o0D;=0~ zzbGVZWx$yvP+idovIjkxy{JAf2>APH{;YG16lB!jA;D^u-vu0G2`^eIf}V1uIxx4s z6$6x*M+Iz1+T}!Kc+$cLza%KPJ}!<&42$G1i(vBvW6HBXmuaE|KMr^+704dhzRNPX}z@8WSkUK@N2oCf}8+B_!+JwW-&E_eD))%`7I)zW$W!l$#^NqS3jY^yHx*x zzjg667(qUICwz0c6xRiu3$iCBxmF7D&8HsiCS`WOE}e3=bYKLHD7#Ve@J6mMQ3@KB z%Wjl>!!`=Cp-P0>5Xx?poNF5<*+C^j9SCJNN*=@(J1t5YVJ`@6!p6YP0e4$(tCq7H7(5n4bfyIHfb7j<&06yzyC zx4+7sHMfAj8M2j01bNA2cXOW1<`Qm^Ocg=SaoOFRFZ?0rh#gEK$T=>%o3r6BMo1D7 zTIm)vF%33xH|JYFx`Gkp6L`Q(qkH z7jPi%j}(j`A9oSMhKB!$TQjm9KpLh=K|b!OjENffBOZFXzrt&T_5?f?6e2Fi(z3BG znJ$G65bj{WmuC?185DQ|kQ$*Q0dq*42oYx;Y1vHfl!9E_t8puphKTu7+*TXmPy+sK z7uF0Z$hEzssu$4ofP1jq)S5IHL9Xq;Yz<0gN;AgW<_k(-QGP|Dnj|ZO;}PR z%gvqgtq+m216DiL-P~DHI|9OB)Rd!$_nl<(^3(`j3HV|!ua}73co#y8=XXg#j_nR_ zxep%kCQ4Ob&on|e2rUc!77;J%>pup1w-n^qzW$)keIq{4E%XrxBODE(8-=ArM*JEA zzH>5L3day|JnOuwxQKn&Dj`3|2;B+zn?y<>Vm%z8c5J$vgMxhoLI?IXS7#>T8ptCK z!Rc$uUOr>W;n6|O<6xA3!)HVhpBSks6KY@HcEn#!v`rDI5qc8v9y%(Ph+j~5TE}`o3a1k=pDC4`w20lf z5lKyr(2Iae=emWA_!@a>gTg#1oB`qK{d}4jaq3aFt9y|M=H(i*H_Yy=VW@dT?BL~= zJSeF%2^h}0iJD@>awmIsjL?UGkr31rBesOV7b22}q|ldui3EaQX`xLq3iulhjl=VM)IOIOX8rdpj2>=my)+Bh>}r=V#}?w%U+LNrG)a+NQIJ5n8V#?ep2 zbn3uf`WnUfV$6VV_q2y7FNjc~7zr#(YV0AL)S-ygP$I5kdV|#)^Q3e&&(VRs3brf5 zvbxZ`fR&2>tw`;uz9 zkJBP9r6Xa=$2_ehVLcNa)Vzdfw=!crHy) zW1A7KP+D2Ld1Ev zHSC>87E9qO2p2!(iHvw7y@IERFNYHez>X z_1fxZgsUNZfWx8uM*N%Fku&}*>Kucbh=bLj;s zj3;2-F7DGr96)xc)|W-F>=^S}n8(%f-I@{mV<_3#fAxznbH!s3)sekBb{wrrJtX4Y zI4P_IydXkFO$9Prkw~Lo$H}b}l)8Gvhv@2hF#D49CKAy6OeaK~;Dpo&*F%^=Uz0+_ z9cWtYpGcNSVG;pd7#EeIh<}}t%jHRf5pE!$(o`oz97B@nGNWHEN%HCF>CE0M>pI1Y zE#kp6Rg6SxgqzU2Hl!$GKT;$s@`@C0hOl!hzhT7lF%KFPj4&C(Wa?HXEn;WNn}NT8FM;}Wx+#;xD~>BLH%_}-3j5F1HAkr zzJ-Ut0Gb+M2815mOBEFH^HKJ~_V_nY$aT09Egjj*XL-C0HH(OMj<$BQ)Tre#*{M-y zLCH+@T8&s2CtAMV3TfR%_<+W)74b;4FdxfAu&>t{^KO_+vAnAAh*_lc(Bw@?%_iVh z8lwmiE06=d_aj2Km{b{a4vg*SY07KFIWS4p_?0ko^=h4G<30ubwXxE$W@Y-QRhOPL`%_qBOXlWReyO~ zTK5yME1g$)hkw`lvGMM9)8)pLc`@b_XzmSD z8HxBBStcVBQfVrkI6j39q^-zYzN@9RY+C+ zKa8CVeB4v||0ijNrrXe~UR0Y_RkcOk?_}1UW$m)qWnGrtt=lS!y6lQJ2tr$es6kW_ zgpLYQf~X)Uf+EWxD1u;+QUw3^=Q-!;)b01*dObehb3UK*e9!%R&YAB_PCO-?YCCqE zUQqZiQj4^Yny#A^P9_}B+P|Ria5D0S1ckW$gtrs4)7qN1^@I()-=LL~33?GRm6sg# zn0LJEu<(lzZ_0x@>MJ|}Byj9Rz_TK20(JEq+X9uq@xad$g>%S5hdgLp?H;En$Eqvz zUN~Z^xS-2(MSYC4qY5gRGo%dgpjYw|(}VHGx`WpPH}{KQQ8HoXxXW zOTR4-N?F6%hJ^DX-j9HXbk+na-UfGsdDQXUFT>C9y(14=)PKI24kh`{a>PAf2;T=c zfyzH+{UA6c;_&CgcjlJK!x{*hMh3wX5kGn^crbw)=g_r+;GYqjo(&$}l?OePCfm6u zuarc*%j!W5NsNAzeB{YLMGE6%ENF34Keo*NBkl*R@MY-BN!HQ zKQ>}ZEX7CiFap7aG56IGuZX2E!8!;wh`HZ9;y+?3y5vEh7k&42Xsq6`6kEkom|zrw zg|QVMjCgo#&5z}w6v4r<6_1LzFt(-%w6V->6I%1zh)ZHUd?F8H5DbmwUpHcFtOpaU zi=Z;T@O^p2t7AQUDi323Ji1P}#j_EQjrCxH^$=7H4uV+`w~O_VlZSB#E{XL}6R~fs z2MNZkk6@!a!r6(spBqhh9A%{!G;Ll z=X(;GSsjNUU`|eV@<^I)KqA#)f#2{PfytTl*^R}7*L9XFZ2^Jj zN<&(UIAYzP&|<(MRI&qk_{)by>_A@RgNXE(@}Xx!GaXX*w}`6-1qJn-wFqVXM0Rv+ z%W{fH9!7u#Tly=d*AwCX<@WA)MiI$d320+!tR4rq@<`%khMciA1hmy(p|pAy{D*^H zCgzMHk|z+DOj=|0AUKpGMt+ekXYAetw37T*=Mhkzzg1ez{HgKgIirZ= zWCD238mno4=45*o<%~Us;Ky75KruPkQwHC}7JCp!yy8JTIYHojw z%A*xBibysPP@av|#JB!@r$;xzcZ6F_6!0PFI}Oun!{7`>>CIfMI*5dm!+&sSQ_+eN&u$oDrgib#GzT8%+tHEAE5 zutMaF-JO6ocNZwFX6(Ic?endSB9hw>(2mnsP1n=;a-jN^vHv8XepRWonyaf`4fj<< z@+<&wKc@Y7Mmo-*1^W9{G6@1Rvc?4+v z^hHXmY56@O$W=xW$(IOdm20f#OUT;=abtB>P}y(w41?>+1)&66p`#t0MA)tH-|l8jNV8} z<%~U)0Gp5WET!E7c8f5JW)zVe8)nh0v0K7^x=v_cW8Wa4_MNS?TfvTdCX9VWBnK1F z*f(}-*we?^ESfX+9|V|1({q$|8`y34u=m_Dib!rifLSGL?6$BM@S%{}*VxktsC_R} z+U;OZJvxjdMI?_VpmAjE_OJ)=X^+N&u{#scSh!qicYr>KoT6?LM$$t_p3Wh-7~PY9nL!g}pbd3UbEY6jlZ4 zw9@__b`b|Q)J8@T$vOhmM%LKdM&d38=2FRoXwmUOznCR}sle3FyAY{t@;K zx`XP?{0NA(5CTz~wCkepLxE`q;UFf3tPskGe zwe3x16p?(tpViS~lW`i6HQ9lvnwa4G;>~0?5}<)+)XE#Ov@VJdLN<=4Q!(BYkt`vg zE@$k)u$P^x^PW`B*fR(y?F~x%XV`fobcUA7C?a_w0TsvCzrarWQm0C(oUw-xV4TmK zhtz(GP#49AAbXqXqy#65NIpq`@$#t4vL-te+0HmnPE0m|fO2x9octB`8deRk8AT*# z5KvVZtIx^LCL&2%V~--BQ-b+QdpPWUcW8IOpK?>$>j+R$k19}5juVp|fou~xLLZ!% zY$yRn!OWW^E5e&DijPFLh*^0JyeT4COMqe6VzNTKne1=KE_qq6ex!0Hn?`_6Q*pDr zAxrC`_$Xvs-mH&&QW-@g*C9a8A2k^_&6@0JWUc=QJ;!7V2+$DeTjb>Lu+w+bSxqXV zh~(h}RKp6`slUnofvo;99V?}BCcB&fIiFbq+Kibxg_pv9jsSvmD)vT4Y!+cLC*$<8C7R#@QPbWwaNvZ*_VnM4uEg9tE_JZ>_1 zGudg#E{9RiH`$p4==n1;?oAiPrz3m+k&rhYTj~Hbl~F{poxpgiOMwFFI%~2sk4!Ga8~St&8GwkUbD5Peml>g~>B(Y&qSD3WvL-tp**ar` zHv}dU^sAG+iKL5eE>^Mo?Z&N)O8# zb1AW>4Akp;shmapg@E>Ma@Hhgvj|M*amHdUV-bfF&@KzblZvpJ+nCwJzI3O(?Vq!V zCIZ^rAieOxC{634cn+}_ZyK7|B2Ff-QS|E4tTC4nyXbiPE>g}SY6)mFCa({S6vieLF~1B?4cU4hzbH; z11`-Pb0yL#i^AZuh~E=X%~Rp&W;wfxz`JXQov%eKAs}aJ5{sxI(D-sFuOgDy6WExp z^N4b#A*YMttC2mr)OvqL5y{&K(EGE-&V}8Jov)tj7O@F|^@*f-GZwoeT@+tK?1iMo zb4C%#sRVcmEp}PfWNBodgj?iH_7Z`edDZe!c~kjxQG6}3O<%I}hKwST!wBqxHBCUEnO4GV1zLD6j)D82>h;mq(n{xTM^8v$+Xakki{S!3#m-JiX*ifIws5{SjTAmr>L_QV}*yz=;!M2FO1SSMU)*>bZVo{^8c2;A|e_1BC= zR1?rVi6Uze3kmG{aOlw%F^ND_q@R_KyHVWqk`3&PB9c`E7}$#yDA3R_Sp%}88C!aQ zn{00aV;Q#3MOj)G#rGiF_~meXu88DX1bFY{S(DY`&1Cl?n*zhiJ!i7t6VQXG&AsWO z_&#JsEJ_%U8AT*NddtS+VwYu2)`)CFWSn#4Og4xBOZXYjyEk1FFGBVj4&+S{$;Hu| z%d#fBAK9#yFz!rt8Ud|$UT|-^D1HFhq1;D(KoQBE32aEcxh!k4CS*%nLrG2c00EWs zMfawQ;s=qfP8~2aW)zY9gjQgUx!7e{lRbp&g2myi$YlQ_z@Gm6rS44^#SbHU;}IJQ z8AT)?BfwBtY_c+1!DP+Ix+$Dak4*L&0c01xByZ|vx+s1G*)BNH@v|b56A5q#@ubVL zCR>aw14D)6Om-CkDrEM{^2WeV>!SEkWQR4|5l%)C$z2IdOr+XOrjBc}7GxbTJl=99 zdx8KPsj64xtsHN`EX$_h)3IG_8x`=a7EPW<-a27O|9o z7CiFm(yTFU#4e$HoLS~9;%)+3aUq@Yx_i|{@$|}PW)Lh0Qh7gF-R+I8f$r`g1>ArZSm2(!c z1%clNua$4gE7G(sieDmjYV>Lm2d(1Oq=k4j=4GUFqF0NUzKYilc}1GmMe!?0x1Vft zX~rVfC9p%t!1E?8!>cjv#Fjs9^Fq!d{z;%LNawsIuSnCnDE=?8XT^H8h`;6Q^?8?O zjd>O6vs5LOm9vO@^7T6BZTG5+;@6NSa5R>BwTSmu$*W7V#$-|MgCpj!oJDL-po~cR zz?^s7t1gOPCw9k}SBqG0mAtw%Ys?!+FO7M%h*MX|>oWJMi{dwt_G}#bfJMARAohVL zOiFio-qU1F(m|RXs8e+Yix^8Fc7_Yym3z|6u?y9-E{fkG&6!jKX)+dZB!QTwRS|s( zP}n~;q)FZ;_z13<&vF(~M_^YXshrC9ewrq&nBsS!AB^|0i21AB$I=wqP57VgW0GYA zR;Pw^AB*^8mHWJ}`>dKKt(f9>NwY1}iMHU1NRB3;Ew};&rMNcPd&tJ}H9Vb%S;TJ$ zX!$}B(w%bjK7rGDis=Dq5r+}bk`qPNB034&eN>pnEMhK!EuxPPh-4E1KA&8yK!K{rWS=10gol70VI~_&fTozy z6=i8%6n~1W7wtm2j3SaBG6!p?!a>Xo{T4ZYQuK-adA3x+wk(Sp)as6OxP~ zlGhO6L9^IpS(AN^>|z{Hu{o2SOn`OTj8EK~E{eZEwgC()gp4AR{Rn6w;j*mBx{=+V z4Uc}4-9UgxfBI87Sq|IsdRRXwB6&Lj)(=@@zl2>ugdP^gzDYnIt>l#UE7*x|guSyO zk^>0P5Lsha!0y!^?rZEO1a#lel=f@bPBNk=mLihR5nyGTHTE0Wqi_bBGj=ZmuroeK zT0mo@by55+vKR2mLqDU4at=oZ}kXi@;>kexbDA!8Q`Xq9&t=u(&b9Z6DzS9 zMO7D2L~=L*xAKb@KA20k!b7>)CePS z<~K-f*`SN!-yka>on{+FBs<9fgCc8eU)XJ7?HwGkqX}$D2EJ9=64;Z7P(M*b@?Zk$ zC&sR!w6BJxqp_n2XzAFav}?kyP6S2EC?fd@X_a|n`Th9BF|aIgbH?sRKz-~xrR@*< zB62Ox6_LE3fCiVb17I(wtXlUPdnN&DKmEPZ4urjcTGZlM5y^iMz;o8vL9lbVl{%TR z=MqpS`$1_3!+uGG9uta4zD|Hv&l{CS01#`yUMSw1tF29_#LtsnDh|($|+0A{G z*4Ux2e||DFma%07G;N-Xv_Jz>7sYz_cv6sM6pCFa*iUOZ%5px zB7etprHZIK4LdHV#zg$<3_EGcnxHR&hxyb%mE?F9S)!9yU8p+HEv6bk4~JGYE(Z01F2n=-LYbGePF78+JRpzS$_g=Y^4E{$FF(Ez#792drUR}x}GyR z)u3Z`mKR7_tCF{Y(8qIFEScjbmxhCGD?!#|gP@P&3{W!1I=pR0I27~}c^gdNaNcK; z%yBv1blMlZNw)r41is%sd|ktF+l$q?^-m*7|I=RUQskvW2+yLcsA3#HwpTl(9IpzL z&VFpQuS6&$oyhGmR$eS8*r+72RN#t!4Ia7zqnrtM2 zQa+zlBRg(JI8>k-_cD1?&)JVwTXPjGB5&H%2X7`TC2%bDreW?ld6}O1`ln{NT;4Q4 zE!|ij@8t0mewTG!sU*vqYz%>|D1y8>4%t!$x@?W=${-VoRDyL(s9{B70pXeWg6C4{=*z z$dBWsTf=KzCK!+4LcaGa501nC6~2^ll{~1Mji6U+8|Ap$X11frnqUIN4!egw;J6!M z);Q@Jd6xNP|W=~V+=iMiH&?(Luw}$uI9p7e7W%rXc!DIwe_??g$U z!8rZW@GNy)KyI1f)7MI%9psJZhTQ6+VQ4o#2qu5QBqkGltG*q ziRy^`gCKpQJfsjD1EB}*=@H*uV8bYDg5N^)y(@ zxSa2{|CKzLU>5{C@xG>(8hb|k;pT+?GHv=MdDsI|k^UF4Ph&zKN2YI)hdq^& z!LO?Bj+orT4nMOd*bCxe7B!m0x90O?;UIdhR{M4Gus0EJBG5DAU09Mt9|Zkl=28(Kzt1w6HNn0JZsHc2yGL>h;b|b_FMS&xM*g0N1w1-5 zf*rHW9-2*UaF}X80w3|MBh3ho_n^|GELGL*__EI!bYZ`Ex%ysVo^;&jbS;GV8&(qh zkz^z3;PU6V-$P*%tCwJZi1flRZ#bTS2kk3NZ~%fo@Vt-*$JJ(AFTD5;Wk%0XT@3pZ zildqKHK?XV{C?N)mnKbcAjFYnVMt7k*n}X3hdbrrAOwSU3UjjKVJs9iC!6441otz; zXh^h1{0nPPZD<$B!=EAcW$_@vp%LF>3fA0Vg1!Z}zd>Bb(_G62$6TF0UFWY$$-_|)dkzn`aJ)V246-IT8bRSX;lbg! z&GN8dxJMrTj$m81ahhcupZQDpSs)Yq17buhzvGFA*|)3H_sYXR5xm0gWVKcBxi!OE zyC#?d(T8!OdT?z2!CqTR-zN{pK%9*SbFv0N<`zAc*mzUzhzA@9l^mo`t{Z5evpk#y@#-dFU*mWX0xi8va5jS9v%yqNI(|nnIUY_wA`kzDsNu<`m6hYR zpNAt16Z{9_7{11-JUF(pEmln~mWOj7=5HQu;dm804nFG4nxGs(GxMJ20mmm;{;4$| zm4|a7_M|^Z;D}oZvL=`Tu|ESuf|bZHmxmU4mqQ2UNsGSNT8Y3q|Jub#5H z7v z*^luYOdF`-9p{}I9+^)|FbATWJL;kPo5p~9jIuW=vnIFvUmstnlew%3 zu7v0_D>T8Nh|h5gZ7H9XhpP}YvIbTo4q=Ha99nAIo2&_HhwjL|@!AWb zDxqeP%>JJHPro!hgD#}~>GKj?i=Y4O89IXFm-K5oLe>P=A($O^`i^U})2B)rXpgT~USLESVh*K7ZS;28r)&+V-o8UGC zNA0Z$KH!hI2-Hb^yFAqV^B*_6VyWtzdQV7qvOPKdvK)xi-%EnAb5<^ z9W7cMNBvDZKz(Aa-YD6~I|&}aLbT5+__aMoiJ;BTe-)KAlrDxXAof2kLGDALK$yI-WZvBa`P3vOVJt#g+g|6yYz;;ScQ4`#Y z;4Y>><;pR8rj_f`H{?oVP8aqk*avJ9PKX_M;}nhV^QZ500flkTroSb}18(gpR3@ z$@Dw&pheb^F%OQ_F%KqCk9v+@T960F5yL|%mdS%wVZZ-2G_vDcgjEU?Xb_wnGwC>& zuc};JDcB9j_)V4<;Co;A0*a>QRnA5LO<#7GYjU$eLghf-CO`k8Q^m!fPZ_wO`_4@CNAbZ~QR}yF)L<>mV5nO9OI&_upYbnUoSU06-8kU$gp z=--BOVaG)*U3lF#YXX)1?z!PW=7or#fvj!PU(16g@2wbsM(f`Cr@2!9G}2W!%A2_T_yUWBg<2c6>#B6O zt)B0`?aXb)&RBZuMQhE!zUYsoPkeaW)Qz`0`He-_4qkW#F9y#34qX9nTcquRly$mD zYC0v*tiaGomrURGvc=0s{S-<5J$%t^{7a$-p~LEX%1JSc0N8MG>(@6@#GqVEr@ z{Ntt-#1$!uyz9DHsbb28^F#*UZMq(B*9i3Q|GefEihSBeUHAK$o);*7IevxW$F4mn zEYhZOkfAPmTYcEIugHnbB&o7zo(=;$FkZ&TLI`$JM2b?h?jNvAZ7u26W-X;uL*j~I)v9uD$Bjr?1l&}}w zRQ>w-h9DeIb2e0=DrillR?OnU-k4Xv?43x}%qC*>qJm_>Y6Um)nW2IO^Ao8z?Bb?G z>U+DmIgz?yQo8B7q_ChOd9&cLw0dH8oQ`RV;2vixm+4fB_6ViJJHz1 z0yK7!K~pWwU1;oLAsV~58;xBwpt(Ytd(ha$y=d&>J~VdGh~`RZ7NN0=`&pyvB3Vc? zJir(2uM+(*--6UdNeflNP9Xk2RbNBoBt16uPgVT?A67Y~Nv7VnJwn~pa?h0As*Bj8 zm+88q@Mqo9Zo&%|Z(P#$pW$EzdzU;N3y zNne*QJ-BYdl8+ym@|Ra0Xg&JXXa4<%uBFdE|Hj9Me^p(z-;w>p=H^89lIm$4+l275 zoKdOQ-Fr{KTZV?-*Lhn&wB_*Sblq}f8X zy>KGwg;(KE_!apUwy;JLJ`?qg=DEnni z9e17-rn?P^*L3$N_ZP0uyXd`tGjKrP(S3?1t=X$+=(u%DH=MY^_(6jcL)IHPYRvGl z!;-75QL@(BBUWFhzqZde&~(@8#aElWbPFxmHR{&Kv{2GgXCn3}O`|5;a&S2QdS^sH z|7+T1Uxy}L7IlB52x_`v6ixVDY|=+KiC4H+dB7)PlOO$lXtE(Sp=ow|I^b5v27EL& zrGu?fE*ovQp}?k~(nEK7&* zp?d|qn^R5k-$wmh@~`lV=rux43g3A(l=47oLE(F1sS92U@o&U4;JGpVj+=&#`wYJ~ zrrP)erY2l+WfFDSH#mG%(i!_~4dYFzw&iCSgkQ(Ly$`Dhg%=J9_|sL)fNewVAHuLzv393# z3ZJ0QsffJ)V)$H~hHhc{Q2f+2LSH`jtAOLT3%J(WLH|AXm#%@;ib~QDr=6!S3*q;6 z3%CWV9q}K>zPvoP<{SM&D;~^!)w-{q7<&QJjKW{Ds8P)f=f1)t(Fw;ARv#}V-@^UA zO;{g4{rm9p%h7y1t@Dn8_19bXH*2psFtK{c(7~fu>ph}R-(f=r4OnBHkwyK67mw|= zR8NDk0azv0gypcYrOCpB zu_~+?>&C{9P8J@D&Bj`=71*RP$-*PBYOEFO!6vVpEIb;k!Iof&vB|>Cu_;&@Yr~4x zOBQa89go#wOR?VLl7-u2Q?dD2JJxsoWMLUL4XeYlSikXmADe=uu{Nw|Q_7DWkJVyJvEG|eerzf>A8W_@Zch2J zX;>YW#rkbQ`LXF(J=TE@+LH2PXJZ*`88&n)%8!*}4Ok~OVr$BeRbY)+7go9r<;NoxR%8%7xOR&U_lpmXdrLi`w zXeY{#9go#wOR?T%lpmXl&BxlYzA4I&O~dN2EY|P0lpmXp)ngslpq(i{b~cv5mSID8 zq5N1m)_`?lBX*_ySOwOIbz!BuQGTovYr=Ba*xe~VR)sZV-Prg&C_gqEYr$4vllG+i zST)v)^M*98b+Mc| zim7f?n%#KdD{qc$8&a*tOyb~C_iG9Rt^8T>ILAa|~SVL-JW1 zeL4nisc|pGPT?51%7;nU|44qUTK99L08K`mLBAB?2$2&p;CgCAt>vito!>0x6848$ zEIX=?wHxyV3tBObVyYg|{l~G`kvGTkM5;u)VGK^ zKl*YM(_zfpoIA^zqnPT)l;#_@8nj5lQB12b`_n6x#!*aT+3C8493%;WObi&;Zy z9K|d%=KGk&(Z6VrZkQ=mW=7c@wpgBG3BjFv-!f1Yez8+#!QTiqjJz`%rv$+ zDygHG^pkk&KPIMeqz&4P7!(Od+MwborT91VjGEdJqEW`&pt}d9LqQ(6fxjdW=#=}>bI3k#FX*5f%>hZm?mQ`rq#qaipd#sIp+vs9L3Ck zM&53GB^-b{ifK2daXtN=GCs$OD5mOJrCGujUEUnUG#m5q=HYMCJBsNxCXE*@=^e$? zJtuGXu{lxOII428Vg~R3X}FIgA|;#MCUOhD9HC4&MzTfZpsivXIhH2M)@2Nd8?XuE zUN!5e`_(_MR2Rh=&@rUy5I5k=Md5aiN>%-Wm<80JGU6zv)tI+ATF_{96w_nOE8m9J zbrh3%QQk(y`#6eOX3VkiK8|8)mMYCt@ji}XmKd}7$KgJXV#;4qn)_lKI*Ms9X0Lc3 zM=_no+#T=ZC?@@~yq(ClUnO-E(`HOL<5;!rD5l~SOA~W&Vni{G#*B(Na1_&J%I*GDT&J*M=^C- zd0QF>s-u{!F}KCR=O||O>q>JeA5v@ZIVuM&Vg?@;2cIJ%4Zamdmc_y67|HrKj zAaIn=4rAuW&h03s`c0*IEH=5Lm{w!HiB0Y(rpK7eVyANylj)GRgL{V|?kHxNG3W5{ zwQAf^OwC(LbL;(~$sNTkG3Fccrq19fru=QC8F*hfn{yP?V9XY=q>f@bjj4$xbrh3+ zN8VnJC3O_jX3TAI7(0roSf(_GM{kZ|8jU$W){di?E@PVFkaQGN`>wn#XQ`mtaTK%E zn3=J59K}?=r!;TJ+Hn-qWXx)@b{xgzj5#k3c}Fqx-TI-wc{wJs#9r- z$(x40qnKu6s(2i!JdR?zjd?o`a7Qt9AIRJHaezCD$r>{w4sb^?vp*zF|5J2Jpq zL<}h4*ispARFW0O{QL9pJaH6L|B=%Cup$)4QA~$1yVDIcSRJ+UsqP}p;637Cbws4W z+G^y4I9MGc*&}km;y7fIildmN#(WZoildmyFO+6NERUm@CSy*G z<#7~~Gv=Z=j2*?y@0Pa@<1ls<({9Y^F>j7ys+KFw%Q0_`Vw#OPGY(cqG2O;YhnNthn3;SCpy#xsm=(sH9*3l(nEDm+ z_5^LJG>&3A#0*->M;ltEJ93D@+OYa-xWPLzj+AdlE^QncS;{yP=@`i#k%Nwlk8;Q0 zHS>+Qnem{-F>uSo4Y;%`jA=(3OW%sQBaTc*8%xHNQSZu#qnPp@rKyV}(@{)=F-OOd z=_saC%%Hj4N3)7!$X)t7`FotTlg6{7QniWce`frE#*rzn;(I08i{mD_bJP!4G#ax$ z?|duUj$*ou*&u$J<0z)~2W9TGcogNxrzEUhmKygGn_=C_F>saTSCH;>)*#{>`D}-D zO~ySJJEvpda>gwk7}7ZgpYzYf=iol%QkCJzr8-Buksq!IHRTw|su@c8yExf9MzYz+ zM>$)OTgOOt8@YFM>ln$pnQ}WIx^;|X*2oFbtz#r-S19H9j=>yV!$ju`&Lsps&T8&S1#6x9l|l*sMW|(u}?Tgvd74m z_!5h5<`~J$1#u3rDLaYBxiJtno4n& z-TTyuO6VB42IJ<&&gK}nPUGHYUQjy6z@;ye&u=+|66eUMB%f`@jfuNk$B?e#Vx{{d zGrY>+7`R5`PKoOW$I!XE#0@?zb~Hy^sB_m=iCjN+G{;CT6*=HGW=-+TBSepg0e!y?&r3(O zVx}5q@E5U09b>&LGqNMrsAD8+u29NNV~sjSa*2_PqFcvEmS3rqsp!@*k_|@oj&2-SrSt^#+1!Qw)3Wk+&V_G+sLb8h8!bV zcdguh%Jl#VfFkCD@2g*(QSnH%JG%UJP_ zG37ENUyhaF7|EJ>N_j#g9i!VNMjjIH=@`lK8Y%%hwNIFJxg^@=_(orN8Utce`x5SEfjAVzAzo+809&!{( zV^`ln%KrOq8ouG`s3Q|2O4&WKI!ZW_qQ{6{--S1?9GPM>cgodgELxPpv6RCSBVOAw zytCz4#%gncQXIsyJ4p&h7R*aT4EO_wMsnq-O;>qF%pp9A#W<=C8jLwT-o;UCw@za& z;hCs3jw*fnE_quN-|BMI^P)}6;CpCAEnOVBv~jmk})K9EyITXjTc9-a;yRC#6}#&iz8SC)`)dsrAPDP2v&(TVL5E<-+6HatHPSGZfyKN zcyR=qjkRDauu1>q#SyF;YsGr7$y0c71gpW8V2NXRaRi%!rLi`w=vc~+9go#wOR?U^ zQGRSHHXm!p`W{dDv1wQxmc{y=K>4xhSUuK(4f+@5$Iiww*fMPBiIg8J#~QFsY{XQ` zk5yoeSQl1$66MD#u_i2sjXjz2V^vr)){Tunh4N#wu@-CvHfb8=$EvYbtOuKXD&@y& zuq9aHG|G=n!O~b8R&+Y$$BxHpv87n=>69Ovip|H`vA$=TA}he27U~Dv+<8a|Bm_e-NyIlvv>J-EXRAId@Skf9+v`!^WBTdZHly#G$)hs64I%=78T<$ulbq5U0o|Ik0$jK3?E-?5tZOIZ6>JfZX# z#_~JLf8ZO9|4S^tW1jCaerhbgWB&fNt@3|(EWcy^{!5MDB$nSX@4xa%r7w)-cg*um z#&_Xg_3xPXpEG_~eC^ILpML&R@_#A*mEJMWw;O+ND!g^>m@i+|(^h}6elLuezkjpw zhs64I%=_;)ep0Mo$2?!RME-Nk&zfHxH9v&$oi+Xk#<#}5W1gS=jMCo{^W|9GFHscI zw-|p;%%5Y;N`8g$`^Ee@=K1<(<^S1O|BiY89mao)f7P#JK7I9bN`HE+f5$xEYW#*f zg!*^Pm#@e8b7J`%D^}(|(l?JYVy?(*J2fsDH=&{Jg~Y2V(gh zRsJyk%3o0W%VYT+gzZw7H9rOHB<4?|n=dWYMWW@IT zs(e}L*WEkxZ^w$sKEE{?zX#7h^)E;9p?~L$-=SYvmpj6*8~FLJ$p58VgcqkA^L)GU zOXKs)G4H>sUFl2X>x+&`AM)R9{Df^o{vB1maQ|-OHz$9}zaxA>;OqV?|K%Tr_IDH? z{AZ0H!*|q`-Z9V5epTtOtB=qzsLUV zm~Y<>vz*2IKFH&lkr$ z-)a0iasGFNkNqdzA^%^+`QI_mw;4Y&&YzBXzTz#V-#5;0j^acAXf*x|@~{5mnCH8U zzdH6G$2?#Aw*3ElP-tJrn%OvO9{W4i|HdB|+t(3Z^#@=1j?%v!+t*P%?F-*z{D-lA z9rJw7_?u(>I_CY)Unc*HV*NV8CxZWWx+t{vGrD?Dv)aA@*;&za#szF=7ALV*HKq{*HNm zh4J6S`#a|MclDj}pQL^?|2XFP4&$F>|EKk{V}Acv{ejZo%x{`1yo%qe**!%NwB%=1mg4`KVP^o~4U;`Eg>{*IX5G0)HcO#aX33)k}R$n+Jb z?{?z{#o^^BK0IEkK3DoRV)t;&^UcOTPWM&wIOhF#8~-8xb$>_rIK9_>A^+p>uk?<2 zJ}bWe1q;IR!jbda+1*MwlX+JaMxc4f!HvPdA`;7zOi8)^9|Qyd~HncnCCNJ;eVe_&b~&R&G{Zy zfi+@XSn0nx-@_`gCM<`I{SW7RSQX|K>9|Is?J17F(<^W~XaYyyn#>#__u{zPxbAPm z8NZ_va>({I=>|W+Ak-m>V|3bR)izMa!$_yUciNPc0c@>`{xLhacHFAa5F%V|ECHBOg-K{~%W% zZyyLpKBMR`V$Dc6mhz!a`IY>H)3O)nYUOk_%zt+yU}~uZ{SW2GJT$L6lI_ZKAucJb zJeQt{<*>0c=$Tj*){J#y<7aa6jLpVcuoc*(3QnG}YOEFO!6u)_$um}iEx{7!bMlN$ z!O~b8R&)U;&)D%;Ew&WvUCGHaHWizXwPSrROcs`5)37=$i}kyR#=xdy^;ics=weQu zv9qxZwhS9u#mPHXjx}JN*oaFQnOFtZh;?D5mr`BW$p?ffEnw=#nbeB@79D5A^iYMy zt=1kY&rxK#{#VN8$o`e{8%lIpsY~X?A2Wiw5R%$4=H%d?<|HfidS@~c zq;`~Vf;p^!V(Jq!wZ`lVB>qjzw&SmeTTHwuufEB%Gl!Wv0pM;90V*z>X==J{#jh`%6~=bDS?m;& zd6wryzh1q`^L~hznD_!d<&|eg7ps7)h|8JauegsRjWZY26L@zdSdX zc%xVwj=v)Alz7asae8+oM?MGWGWRvxiVvwv*o6eC7|2{OwChd-c6H&<|lf$-L2s3H9bj+lRDM`ls2TVPjmzg`}QInk(^A|AnMP3%YRb3`; z8FsL`X~5J?iYzQUmr-F?%3ZUrMjRU3%2CCbtdII0NgT;XHFcNEPZvj0 zx}jsx@TUkLNsQ*bN@>RYmp1^Vab#}n$H`n#;w?1#M?=l7mU1iJ=9kj3yeKy+DsSjb z=A>+qa>OuR?U&L~Zu0qGVcw3pKWH4ws&b>ITK-cEq*Uwk~Zp?J}1y@P#@RE@0|{^vVpDhdH2DF(n264W$g2df;cWr6%ip zTJRPywOo;^s>*A*_(KcJ!4=q~SuAg{YOEFO!L-7jJewshR)a0U66_N;$EIL+##O%K z)K`YjD{gs{O@nh-r*NGe+=LpB?Km2gBeg;zHTKBRfQKxn6`Qe8J z-!Su4UoZBuICVIF&(D>9%Vw@sY{|WR=|@!;Fx8{T!6&{SA{|Q)99~MZWNMXSHww{< z6aiDq6gl`5vY<%EnE9F;L>^2IL^{UIFA=#`8(%CD>FAj+pC@)RzKtH2tu zF0Awlwgy-w)`aD-u~)J+!m6-ltQ#AD6z!t6giXcfW9?YqYm$Lg^T zY|!;=jj*$^47Lm#TFcf5E5{nJPHe;tlpm|W8nG^{bROl$DzPR^eLn||y^%V=s<39P z8yi2LI>2UQE!YZd(oNI>R*kh{J=oqT0Z0egE=ta0j@8T+F!hkJcy2RxzXjprxGVWskkR_N zu{_f%ZdKY-V0CP<2=+IuHWb;LFQpKr9@S`~rYPzOqSq9uf^`{t4y<0veGHbLS*^M) zxUfwPE@~An*(SO;C%AZ2k(w))8vBpvqCU9jb{B;q7nQfm#hRS!YLy%?b(%T%Pa0aI@qOP;%pZI7oT0aI7r8FN*qi;@pwi2|mcayD!1 z-H{EL`rg^u3zT-(SfYzVi54nSE!JY}TQP0G)O*J2%*NPy(yBXsPTJx`YHlW`t=C1# zyvTM2_E~2;jID|68-bm1S4>;2i;{!r#x|!JrtWpN)!0*F8wE^#Xe_zvF?Pn$p#|ev z!u1Q~xs+L57XwB!TdD=OjL0f5^|Y8ZN{KDvKVca^xxLKv16e3bKR%-LKbZaqVv9#_ZQ@Uo?+@UMHVpV${WP~m94VaDTb+=6e+gB z*el~3v$q?jo>QdVW^CV>EzJvxUF(*3#ph~7gz#%eVu|O zS>|GWYve=2)MG!^WKHvXzQnB?Ut*a0`p25tP4cq?3oo_C9)_tq6j`h(LjP#I7E^6E zGI%=JF!iP)`I%SciIX#)F)I|a{ui-#e80+Q(8(JAiX`r90p#@$N?snn8ebJrx?hxc zNM2G9-*O*pnEFoQ#rIGXPv zs9pXQk?f^rG{dak+cHX@tWj{lC7r)iDJed5kJQ{J7O#| zDyz~}^yQDBUu%z8Kl`s*PgKkeVpp>?rx)XY$h^s+Lhg%*&vKg*nni0$Xfxdo3`y&(5m)NEU03la2`2RAQKhAbV`4R$ zJpL=EA5^J$Wf;xVs1zm7#{E&i)T@fr(xyun$v=kyRNkUkYJ84ekgZx>B%kGdF`m;6 zPCw@KQe6z*H4dJ?MdZF}wy#}P`53ApzlptjR76DOI;2e1i?(UebRWd_{leSXbsetD z>7wKqzG|dRUBJ|3kIM_st@*mx;7fiWSfkugEfoF@Edv!Q#me~E<#r_~S%Z%!6dN%0 zow=h1s-6&A#^)QlS-@1iB3Z;zzgEZ1CK~@zSm_2#y`xBesy@}Ni$NPv$C_@pjmTv= zVSsz|9G^8Rp=KK86((**IWmdn?qDQcv zGk3R~6xu9c>bfPm866~}i$#S8+``@rYr=Ba*gE!QSQXZcbz|diWnYHP##*oy*reOo zmtoadE7pTezMZ`(R)a0U67}p&u_;&@Yr~4}U~h^YkJVyJvEFyGH^ru6^RafU?}B7u z88!{8!?IYv40}^-I#!Q$V1w>rZ;G9bWw2$~(1q+xv2v^d>%>Of&E6ENz#6eGth9l> zDOQO!VL5ExlnLnsZpB zD$QjP|Ccm7P6+1@dq?y%MXj1cc@?ZV?&Y4(Zuzocp6FHz{J>BwbRtuZVg zd4*wFz>{i;E=JwN2f$kSWFktbkKDsQRuf;kzPw%fA8&A@Y*qbjk`>^;K^GSkycM^I zA4e>pAMuYhP@=GG=3u4d*!#LznY@$-IIAp z_4=oXiP5Fxqr9N#h(uwTx4!l%h50u5rEUC=O205mehMf`lP>muGq&MJ5veQv`*9nF zo!-he-1E++i+h@ezDwfQf! zNYxuk@zcYC|0_karTE#g!Du?|s+rO~%P^{{+fw}W072x>aXnl~f}kFB_3DC5J}4&CHuo2OqYh;G6NX|WVP+eU>H zQLD_gxqt zy-)Zfn5@M#+%HaG>W=!;W$~Zy@*G!Ay=9X4G*rZO{?WK>8yyR$UVod?^;vaZ{9G{w zw0Vy%eCIdk9Rh3d!DC?=Zi3Irx=O5uG$Yn`fsg&FRCC%gu@ir`BUCBneTp6jo*ifNeEoDn z*o8P!G%CemKiiFtE`_#o7q0BAKN%E1+m(`LWl4J1ke+|BB(y^B$j;fsR(mac*DP!-}Zn8Htsx_z{2DOZ~`^%azjnMQE5*(+8@v z@txs})=?Xv{IkgKM-u){an4foias2ZI!C(BE*Rr=x>!#iftKCN))uS7vRJ?Sl7$0UW+ZVQFx4$_$x@CrG&};P z>ONNFNpulK2IMau6WM<$^RFo*a#S(<6A@o9$%=4h<(fD#$HNMQKd>`kyJKWh}y1wDQzgVwoO&rSeq&p z`Jm`g`C1`+h$E&*%TN$2n)tnR#Z;oS8W@&vT!!r0hwU zkR=-P`1Mb^&!USkAd58Sq2r$^L{0D)SJ)Rc7PopVM6K0sMPqgi|FsXe!1Yz+MnhvB z3H~`k6sPtP8WX`y7Chu!%kImqhQ`E|-^U{NX4tWC4K)UdCg%%TJCw*WjX|Qw)JL38 z|5-(iA+ut!Vwpz`Rey~kW0NfYi-n6+3p8dSjB-c_KT>Vbn1!g0^>5BOU$sJG7Q!$a zS?E^7Med9=Mv=KfQ2%XtfTA&qTmBe1JV#uBPQL&P z0Q&&>0N>p>{Q@io90E`#PQL&b{N$S}8WUxp&XZu;r>Jweu}Z9Qf%p)M%bM_tCL#dR zSon_h8H-BerA!uSEJPcHs3zh#7HQ0d`sWJKG|V!XtVm-aTAt4Z?!XVG*ti-CQI-(7 zVN7s=8Vk|X=bUSdDo|q~N)w{LvD@HW8Vk`aA)1Vii=< z+sarPivnl9K(2}t+RA%rT%9s7>0!ySU=JiTy5hr2NFt5>4Y%#c6@SgXFIFwqa2c&C zC9(f>>moeTnIs0#N~SB;SuY({V+)GoVZt}b78>J}gKF$AG_d$VW6mTlqka!rhQ{-b z-u5b8sb$qxyD_1%M%kk{#Ndi*EL!fJGV$+oVk9T-z{a*w`8i(ODK%?1)EI zoJ-@Pwn>@-4Ud;YJE-EaV%8JF7i;C)RT?YdV-{9ih4p})M&qJoubs*oTX6!-L5jxn z-nm_gnwINsLW6UJ#uA(?a2Xm4la!SvSul;uxCQ@XpU@b00VjKRpU`+4b0TYKT(lnf zLZd(CDb~=qI_1D#vW(Q}e3uSYx@%}Qkz_TYVr|D;d3BoGUjaKG3>D&108mb6JIq0;+= z(r4;c;z1(2Dos=tz>#d3z^ipxN=BwhCGf`t@ zIoT6GD*UhFaN{i2^!8TPXr}NN`o`=@cJKrYSgaLr k7-@Q?HgRv|`gB6BLAvF4q zWo(x;p>(|p8Tl{c8ZfUgh7#VEI;E9vF;U}p^+bIqb_;wK^C3=afd~)!$hE80Faz%j zB`<(x&1v}2GjFjMDl9Ar-UOho_%Zy{p?F!4Rev%(#Gv**i0B8vo0cTtOu%D6gF}db z01E*70Qmsl!-#+YivfoKbOaF)APle^kOi0iCt*%tL7Af%2fQq4oQl_>AG5^e0J889me_yoVlL+^M9{3RG1kuQXLhHR28yXbiz+A()Lu zus~x77BAx*X1ol;0*xU!Bm{{nhsF?CmvfH#Du>1pY!rg(s=69OkShd#Bk9B^?DOxnmKcItLSVq*Dho0Z zCo_g1`D-p}k4YxAO&3xp6v|SK zwKPviFL_F-#zLBivyX33vddJgYMfVM|7?-Cud94~D6#*X6|C71=HbaqV*d=0_=oaF ziTxjnL{IhGE{Xk9zU3z5C{2w;1Klt>gmhjtIjd_dq>ZtLu0-B4|KJday;$Q+OhcJ0 z@jQWmhF24f#mb3U@i9wXe#UQuSSqpq+V42a6NE3Es4++}`Q@;td@HUe@1mCqpd0Zav?0d4_IXR(n4%mkzZ@&L8YVIv8c1IPe8 z1bCmvR0>D}WC9)o8eG6M3RnQx2gnEb=3p8HECw6`&_zt6fH1&vKo-FM650=l03-vB z1DyUu`vEb4m4IwO$;)UzARdqcI16ySg7yOv0c!y{0QakCKVTdn6>u3){up*a>ghF75(v;IFX z=@%Q2_lLjAZ< zzm2(`-|cb5SYlkXu}a-&;L>NV<SeID zv%1EtZkgn0n1woObt|hUrE=wG;Jl30H7->5D5BmNkzl6KAB;eN^)>E|ATdJ}??Em6 zolQ<9tz;$Ef6V$`^DD@ZMPpaweXT9m{>0Up33axO#;IJqwM%DX6_1jBF|Z{cUy~b( z&tZM9lgc<6>*9?aT*+OHSa0sC1sk|{du1GrMQg2&h8s|Crfi9C&!YH!tnYRUW5b9W zYK*1gCJXp?+bA@jX|H^&wp^=7gtm;urACIR4^ao9p87M_p*bccj>t65Bj0)%Nqeb` z*}bG+Vo$gW3Nf;~To(Ek=<{r$aUQiAnvuHKQpTqA*(5@L@kTCx0A70G+iIK&y2vaO zUU30tnX5c29cFzmRUJ||kD9!pc(433RLJqoV}vA<|6 z)j7qL%J;@VMCI+-mbt9%eRYj|i(KPWXU{?V)2?nUWA-bVpV(_?#RahZuZvLm<-c%k zUS4~-P`;c9Gnc&>$6j}-+ zWpFnor*VBI#Ywl(nDw#P7_8LX`C%v4uV|n{tbbhS?^l-8IMwAPONJF%a^@B;zX<{^ z-fk^bIJNYf^wWjDRSj&7lU^RFI4Ka8$56^+eHSbCxSS%f|L0qgs0_Z6X_v7|L1O=3 zizs9Zg;7dDV*fjw$PcZa715lt4T>-SRn3(*ZJ>0JJtI60wd>?*2)Zgz0D zFW=jl%>=9Pgm#woUH09V;ikm?_c_r`x~a&Dp)UM6$p%5WJT>)%si}tzETYq&S*#kG zDO58M^|N>bwi1jBRIe3Leay61D$ew97t;8RYxmmitbPT71n+m8Fj+D#x}K4R1y&hI z8ngb+*Xr+MeV1Ws z;7aU&o)g(kZZctCL!4^li<0YO%HtuZaVqE{GbO@99gn4Ea7`W|-s72DW7dCtIC+`% zy?j08+NZJVd*pg$?pwT*)te$N;E7RVsm}d`@7Y*@(Dx!BJ0#RMBRXVrYy8GuvhL#2 zv(ZbuAA6?o8+&OZt9zG#ZFtqu_!Zm8RYb(9xuW>uXJo9ZanZ`_Wx>c3@$%g$-eqkW zX&Q_FaevNtup`Iqx@1Tp= z1vGx6`YoY;710luuJLQsCyUaP_i$yls?zJ1lXrQe`f*lwX^xngZSF6y{|!!j-PoVG zmka+3KY(Cw)VQ#4P!_=%MP1T`x`Ve|&onMnf5p4wAT%NmAoCR{yamzMPu*G)8Yd z-Kkcwe6i}mb;PQ-_8?bhAgsgH*SI%mN_8YFHRE8YNN|(G`d$<8^BW!~8msCcg=ant zILvAy>RWIK>TYprjBAWAdov5vM6i!(m%s1J+1H509Y4vw%!wsL!Fo;1=OnLtc+AJs zs0U6^Wt<5{hU4~PCZdv7F)mUMvwA)`X#*4!s@paPGUc?PPddXyF$GimoC*03H5IEWPGYIWV-Gz zF9Fu0DBb(K+WlxOyy`bc!-1wA)C`-k@^h@@MC?@?g?!{6}8C_`c<6#{7agIeg^9=&*J*@SDQnPQ>n$rJMT@W15N#u{$z3z`dO?Wwi0JF z*?^MQarOg<2c!Vb0$gw4><5qtSPRGjxZlLt4`3W16>u3){ua)D09L?8z+OD{;{A!n zh$Rq8WdDZyxxa_UIxN!|GCVb#bBtwmRX&ZO1~<$QvNXJ6#Q8MF;J{k-P{`tN)XXxC zId;J;@jI89cu&eSMqir{=m}XDoLzH1jZvlrqyISPi^tKRD-U96z&AV-})`IYRgxN6;+Pn1yKLf)kuGQ5CDP1wu4( zpAgzqPK~V)qLukVSQk$LxLA!*Rc>bLNiMdMDpq6MklT5gg`OiY%sHpVxS?q1;%t`Q z#B;Ah09) zt&NOy`{B%tl{VqC#Ed((m0KsX!ha%of=keMr{zlk`|+M6H#ZnRkzxE0`51SAKI5~Q zGOi#y2WNwQ3CChi#Y@5~lFs7hwfV*jTp&;J93%c4{2#Nj0nZ~d&M|>sm(D!T1ilcG zB?uUB@dZx7VS9=o8o)yYaT4Q?iNV<@do?Z~aq&H01&jl6ah;fE1F#M^#2>&{*m*KG zktE^cKQEZX7|6)s`|m-GnAqZ&sZ@gV9E^P15b&7g59wiK7r48-*kx`8%jK0$+vZo@pp+IW%|*sl0UIb z^55T-yjM4gnQn1*pbsSa398bD!u1h zlJ5#1=5(8z)cbC|ycFWP}ic@|o|tU8a}vmDm+_;`AM;KjXV7pYddc7va9l zFG9N*A4dBb4?#V7!JQ5{V;A%z$8~R^A2{BefJlb%8uSz6+UQ5d^T4y)|BU`%{FUOr zG|7|ZtFSkx8=%kl0eBw#`*0t|-yt94iwY+roddXNU_Ms)1odVdfc7&^Qt1XAgObT2>Pe3hzFTfAb4$vOZ5%3nEC!iOgF97e<2*5LY8Uz>y z7=TeY=YfNf%i(hQu1>Bdi5bO;#}ou4ft7(&N`n;oPgfpQK?6# z9=#53FOSxDu?yhQr$=XA+m}b#-*x3EdvI8$>rtsk-!GeF`iyl_PmkV)s27h?J*v9; z$Wf(7lOC;llIY+c$*rHQwGSrvDR??W>Ge7KbB(Q_C1c@+FKUSf|^a#Rd{CcS?Yyqmp05!(~S+n~o@8#sK(M{kcF z`cB;YBM0{B-)}&C-w%3^=^ioo!n#>Q3f8od|`!r$O89EV2TRFlI*`A4>A{C zPTS*Jdyb5VXHq%Q@H@I!5LnxD1c87kPY@>Zx87qN3jzv3^d|pgL0FMp5V-L^O=L6S z%!@l;kj%V~gjM#rtEH#27qT>ZgB;1455zjc&ZT+0lm?FH2wfJ>^6Yx7#d8b0AZz2b zS>l~ z|H}Wnc)u|t6ueuq)UN^gH55c6@o!`CuaEfGSNz+A|Lsyo{9Bj0)DuK~{#X9r#Y++1 zEfi6;l%cAtp-EF_n>6EptCluYbu%<+t`rLL77*@jUN7=?etmjD#y1hg+md(d|2KJM z55>Upd^H9<4U;hx_a;2>&|mCtrSKH~ZzjsZ-voi*d1f==%zE6D?C>IiyPmrm32Ps7 z9+a|=31{wjC9ytX3EpN~&V&QI6PV*76)%hCiBzfYpYoJ-QCuby4(u-21m}xX4uqev z7>5*DOgL~|Aih<=IN)`fe3q~uLO(+=Tn}s%9y7yn^@8WD!i_;{7BhUuTbfAzhsEs8 zus}L9&Rhc58#j9_r0l~uvs&;1Qt|naViROn_Yk{dOHQ|(zdTanORK2xSbH%F1sT{(+W9auM4 zkaN$xyI$E?B#I-1xv|-g3$hHSE>7aqGMsu=ke<%y<>Yys>gkN# z++{^_Z_Vda4_3IPNGPO!E>ofKP>}B2jI;%u>W(jL`R6MV3KqQRTnH~vNES;D-x;8mvCxDEd=Md6a)6Q=xE{N$2|LS{{D*+adX!z+)tNg^(ch z=Fx!_HG>mOJZyO};!7;Qk`!~1T#jdct4dg`i4;MgQ?mte40c)xoj%L38?L4DcLvrA zqNND!Uxe;Zgl<`c<~s3(TCe;L1$XAvj|<~qjcZ@VP}bmX@Gy89${ETVDi|snDj6yp zsu-#osu^k;OwPq!O5l5hg{!PQ2JufzL1Ff?Rr`w2IZT(-tyisHRj8X$gg(o(tJcLm zq}A{u7Sfv+O*gR+7ccJ|%rp|5+`2rzM#kofuZte$%!bNg{ukdpSA|dBiXWiA0TlxFLE~FaFL8PRl4~S8S5*$F4A?6by8^^6|SyuQ{X9= z$;ewOzIYKieuj*gU^lGAjf=H6n9v-snanzrzCgwr@C|1ATtOx3ifm|sfec)Up(w0| zN@F9`0feJc9G3HS2!(q~k{*H9`7fce-te_`DA?Jg6l!8tUXO$d!f9ZnPl}j!C+HTK za9H8}#aGB!Tje@&os4Z2-Qflq&59lk9h0JUU$WtwpiR0;DLI9tV^V0y_A1m?kPK|ncAi)Q}B$~;&Ff$VE_N}3>0PNpC*__73n z0?!Hpch6(OCjf2ZV1=)YA|sN0gRfs;M&19uxWfj%CqjgDCY(7!$vI0#{B9vzNU+Z4 zGSQhsS8Wq6`^&3^zH-Q*mO*Q@_yI}=}3K8xl#jzdh?a|fWu zm~Oz;Tm-+@PRY5(hZw{u2$#Z!W9Jh_$7@E&@Ci+?|&XEo%>xd^f!7!}2mXOyiJs zJ&rFJcg5eeV;rh*Jsgg444R7L8OBp^*v2>whhp63n>aLMyc)a{a5)^WFxJNw`Y(EEy5N;O^EqR;wYbQQz^PjmD))~qb70SSDKHewt9?s^P^@vvZna32i6>ip5P^~1NtK8W&b^XQ%LCiX4WXHJ;IvNflsRI6|} zXAthQ4fmPGUlyRnRUD1ERUO4RIG|o1KdF8Q|Fe3HMq9fn-p}Qmc;ctojMrY5z0gm6 z627kLogGT9d(EhNOZT4cRo!cGs@HVy>0V8{{olQ*drkMI?p@uhulAzu-B){4_nPia z-MhM1bua2({a@=%*&*!K+@aiQ+^Os>>_O~xyfb3&U{7Z+;|}E|yCZ-*lIJv@w|EAv z4yXy}0B{9#0`PGtcRV|Z2Y?R(_z-}d#S2giz|O;SkO{!C62CRWj#L}K&Qu4{><)N? zIQZh5av?0>%5VkvrmS2Cz=iSzi_jyRGkJ`S8xK~tkXyzz!wU77h2r>zotrOqH3;Ts zKwyUh8Z1>zy0IJa#d5xQePRJGW;udDt~@4O_~v}c8One0P9!eDM^KO%x!F#9iK!HP z6sA=@Cz}k_4KJP3wOf~`gRuTV3f(TA9D z6RQv;+-Xm-khjFnhXrF;y$X7R*95-0VRhzS;;R?Z^&)u*_h=)NX&P2ly@I=;$Jx{Q z8jV$0_v&`UW;(qDmQ5Ywu)mgu9!9{!eW<`iLrJ5P@fx(Xr?r680xiBeVae8aO+gJ> z%Ves98t9myA81|sd$^O1W^AB!?d_3|LHmpFU+_hJuPD8wNK{lb{Y^B_A3StwaU&BI=3uEvswUSO&c(9@UmPkN zf`T~80keP^Y&GRQ;PxpZfrmPe9ym+-O9}9iERkPo_~$ZXsE%%VRc+>BNZk0|dKfZ| z3a(^Db z8ZNz|@Ev?EM?Ns}Pdy;p>@f93;d{~RG~hGN+We6h{sQ)7Q? z78jd~oJEE55wd?W3u`;PQlS+!Qrr1#rVVh(JVC&Fr~l3=AhMXK{HAdx(*6Hpof2$NzS{HhHRm{z zTsVm>WCHL=;zTZVqad(r$`AyOWe*AB1FWcmz)tP5AiPi~LDax}%EUfnbbMGytSLSu zF5UzJ9$VEflFiWxEb4UYECrAjzBl`L&=v7_QfNBd$WN;{Cj*~zg~abrn4`(iF?Qa^et zlv3q@LQ3Jyd5M>b)j@O?V-N%`76e5{#f1mMITGT+qlTL-HQ+NfO!3jC;PAM>$YJ5b z6EIH`p;kq_EIL#>cuZ`_sPO27I8#xvbSmj)MbhI;L4i?G(eWnSGd?gps%W0LSKKl% z&J?H?gqPNc$nZE(&-gLy$uWUJBVewg8U;5Q7GjDFjAe09bYx6?IIk@si5Lb^|Lgh* zQGvrSRA7zhQ6aIhu#+h;B7AsMWJpxJ2}XF`{bAFhD}F*$LLBNaIxHLoMMOu(m?8p) zg+vf3L1=XBOUZ?kNk~*et(VllC1A~7mD%ID>F1xJU(nYiEL5@KSaW8+Q3V0Lzv@TmBZ z;XD_L&QJ{+77-mZLN_uz3e^Y?Hig6m1;&J!g2FIQ1)&exz<+{xf(%KF2|)t8d?H+K zczApRQ)qNTRB(X;`fiLVAu2p58rE0k#D$DZ2#E?3J^E^X6AbD#e!SN!<-AftL7Rf2 zV`D?&Vxnmtz+8B|X$hIt~tk>AFFO^?Z^vbP^h}d!A1S1oo5&3v~)h${> zoS`aH4I2X~1_zv)ZH(3wSj`j>4%-Dr6k1Oh=U;LZ77Py!M~@@-%VRDoAtJ)W_rQqz zw-yFX2$xBu2U=)CRFGI}WOSxRkQmz-qOl=G9U~;TU`W1MLDfgBhk+)sb$w;t3yg@v z^z-suD5oNxXktS`vFeJ>=jH8xWx@N`kUUsC>la<>Tum>MSO4SPp*~ z=^945k+=P~nXt_Ta?MK`kwq!^CC1 z6@S7ry^Te&_(O`$T|V__SHNcu_~hihf&K;jqlQsE*Z^QQOD1?QD4-A(ghKe|t7 zkkz+`<~y8O@@31ZKlD<(#ksC^*NP2JeHEY7qp2yb$A#_zijS{q$$h8lqAv$X-s)7= z``*3S3C0g3pM0QPY~=dVRXYxmd^g*dN1T>^e&zFFl3&`Ov+)z_fq!N;nqpHRrwP*%nGJ zv9$G>CZEoj6BzAtaIS?qbtU`#7t)UZy*wnp>O2d1yic9CmAkgTx&6d#p$jaQfzcMv z)#rx#b=>peM;VJOVT(KG_MAOC-JyHiu^q}UvrPV{|FDY<*43X;xp>8QKl}>)pLG}g z&vAW@AQ0E*2_hJKoIHq(_^6qGlAv4>&?hrxmpmOAQUz)0fX#1)Agy04#$PrPD*D>%HfaZ2-@|#xbp$H6La@o__pICB91qGiFk9ZTVe9^!3RQKKNY=l)U;NeRgO@RX7wX^K zK6IqguHVW3PiAMe?cUAc?zylt2gu^Oe5~ZYv&S~~)qJj>HS24?hHdRsf@`3$cDoOc zykk;)f#-5GiOX{1c6^ttNjlMyj5F-JDB#c6l2+rE8GM{A_NNOeN9M2 zTnP3~(NV)OWn&pdm=qLNL?7p;L%gH{Pmdhe1V%(ej}}2%h|VCE-Y64-zUWx2lpL|f zMn{-Jp~$&{F{8)Iu!VPk2~n{jfmnt)SmZdRXxJr+FvW%FQx3JN$em$SbX-DQObCJx z88*r24C@slpg_c!ugEz%dtC-u*y|*(OAE%X_aE_Mp(GSy6D+b!Dh!x$hXBCA975@<)ibzl%bclnANVL<02r(MR z5+dZ_T8P~Z@7AJBMZ!tFH?hrY4ctgfuue21!veRG~=s2)Q z>1qgxsEi#6TdP=8cqB$YZ_MQOCps!(jNY)snY^N+u@4E1^D@CB*r7th@z$@YX%lSW zViAlXti~wU-BggKcA&3M=g?e^iHPuFv4OE;g!X^M+pp6rjJT!ahz)8%&>aTLMn#*3 zMF%4!FG$6}i;w18!((w*%wS=G9AQRbX2d9xvldS|*i?sMW(s~KcvKyMTZ_?$iH18T zh9`tz<&DyprDuvRmI*C4-qx~9s2$?RA+gbz zCRym?+fd7SIz?|q4fhdoFXo%!QK*4brlL=yQ}DY-USAjvm&HrXiarFI7bZ^c=Ok&C z9Yi`WFHG-&x`Uzzm5?;&iFJ@a>|}ajY5C0orq?}}XTqHSDfD?|WV#p1=OZPi!^_A| zBr-h&zb)WrihPgXp~L5+OdrI3c@1RR%~jHT*faYd`I8T(JzeDQbD7@hDCuILe?s}C zftg0};-?)hicjOkBb7zEAj{1f%Zk*+vDUk!U!RP-j; zpJ{GCesf{u^LD166*uB_a7lYZpJ{%s`U3Um=Os*gp#J~qy9|u1CQOP zze&-XQUB_S{t5PHnr#<|`qxnOm+0S`imrwFGtFn+qf!4_iXH;{*H-jS*uRdVd&B-r z^Yh&vsK1w@hr<5eingNuOmiO$MgP`Sv>ohUPto3}KhxYd?NI;vik^-7H&FCp*xyId z7g2wv-2nq&|AvbG0rhXB=;x?E(|qDI2KMw-bX(Y;!zoTb3i~t7{kjkJZ=&eV&~K{f zFxaP=qOGt$)7{LX zun#{f;Plh5Pg_M#gZ-Iizllfreu@Su-tgjd7u4Sj%>Gmz_HU!(e}ZMPEUEIw<-i?8!9yUwPQSqoS9?{+$$^41K2AFS|j%v!a*7{#_Kk2lix| zeKZpG@2cpJV4rS^E)RP$Z33J}{ohh_71+PKqMM=qJrq3&`b@J=A3*)zR`g}~PftbT zw>W}kUmphh_X1|x4fTIV(K}H-)9m-Z!~VS$T^sf9qv(>bKS%wnlMACW&0}LU?EkK! z<6!@Oik^k~GtFaW73?3NXlIn)U(pR=f2Mi-WTE``6#WtEKS0sGsQ>$lj)VP~<}rrY zY#7#cdBDR9VFXq(j?3lxFf0(O2{r+t;fXi|k&y|9@v%V=@NtMd$rEb48i}<=pP=!9 zc1#E#oXA5*ES9|J$4AE&IjYe_R3S!e2!aa4;QS2g)r}iYyY$0@!abV`BMJg_ z`BcJ-72p#^4#niQN<}SgMNG;^Y-;C#UgVvd*mf0(c65w0Y)sK(M6G~GX#_th>c*9G=D^dW#GH7_OqXX{bJLyOsA*R09Sf8KM+Ztna7nMIc*Vz#@e+;DdtPeG>wRg9v$`aPP4#`iflM#G+MBwgkQk-Lrfz=#*B`}sVZtvume?3 zMMVgT;Sw1gEDz5007rwB-xZixxm}?X@$NA;FlsouvWSl+M2YAalS~kzWQ;|;pw3r=~|j8U*7R=k)}0{J;o;c1UP+$nPEQDhP-R8)H%xm{JBQy_h* zhuMg4o`Z@=dEd#+SEZq2qJ|e)YQ~Poxs}+x#)(Cw$i7`aB^o;(`r>&{!C?rzRnJ{m zpbAfQf{QLKV(Ttjq@I(?UEP~<7VI*`22*LN!6)6~KQws9_wP+;f$!lFMg%6xQPASy zhxvt1%)}AS%L7#G74?b5tBV2i@mLMivnc&cBu?&vv0W}=AoyRQT!#sTa#-rsX<4hQ zy?vqg7EBmLCXv7h-d>OS*JM&K+o6#N9^|vUIN4J1IH%zB1C=d&i2BB0=Reg$5`#iQ zf-y@6CWc31)C|M)KRP@ZlP(sr7R{SCMFju9aaNLmBe!UguZ8w)yo>^0E7XUT_DhHt3ul3As!3h-gE|Dp7*suhNLj4*xCu|Yd`_r~;Aixh==E~`Mh&@? z>Rhkh_?0D6k5hF{6NBnD;5#Y;4L`KO+>?#fo|1l|sU!xQd0b?68DzoU7968R1%-9Pknoa?42(gPj*}#uHy0e*#qk4A@dgI!5m-OgQXTmuQoJR>FG8TrmZ1KD@5Tql@+mi0n3n-D$pnW771W;RAssC6Xi#1#mmq$4 zpaTSX>c;2kFFz1|IU400ao9%)$9|J;!Kpb~5&QBH{Oj7xk(dfhUVR}%EY^hqh17ao zL{UWY-ywyHRtn<*lwNQm|6g&ze`k!vizDF}@o1l3rZ~p_h(+T^w@;9;t_KaGX-z3;%1zf!ioH(dovTOjHYJ2Lo$Z!|Gw|G zvIo~^T6UFp+;?Fl{gE>6QFQ6k1MuPl{RuGILqM#lYV z`fP(Kdz6EF+cMM4@y#Zc?U=f*?#DyC9yYJsuXDeCodfVryQ{scyS)WgFjxWtJpB9F z^IOXX@MOWEUZ>ujI`{5J0RjC2Li&(}ERYxoA5?Jb*QZavfB-x!|2q{Hn`h_BS@_!0 z%&y7ihwI#3&s6-`X@0*_F23Uzx_8}LG1Td^o^{skjeN9t$FxhEJG&kDVw!ort;)^( zZ^{ond$+Hr-~9D8N~G=FIOR*9K@P)CSN45-!e_lc*)YiSVc)Mm>Hl4sXU8Xx>Uev0 z-e=dIH1-IoP`>xQWv9BAy|dB#?Y!@&@2p+TZpf~fgI~Snd9}mL<(+Pt+*3oGTC9uu zZrZfG1MZie+f^(vYsr{Delj~}pI+3a`OTxQYg@GcFm79`qwR+GecI{At>T4PuD=8S zgNu(*`NHxlu90f2H0SenT+U{Reef}TzWB8UzCsRH5%`I5p}^-#T^Bj~bf3#7?6fd$ z7oSuTzkH) z-?d+WrE{NvPL|HSJM|6-=*-<}vG4RnNI?I9&X&&o`uKZT0_Fwavh+bu`(Nf_wv-Po zBfm*h3cpI?#}&J)%O3|;z&DKe*fJ~d^&lQ9x+tu6C?4?S*2c92Yef^RzUeeZjz zFMqX=e^#Ob>`?~kvER%3^hLY)5zDPj^2d>9@ngu+;Dee<{i(1I%dbIS9BF5gxKC6u z`E93~IQM6H0PIl~I2`Ro_mhK%ypM}N340$!Iv>T~0%nhMOO<+NV0N^c!0f}-@l7hO z-(3p{y~DuO!Q1_55RYi8qhGk4iHoHDr=Z{XQAB;%gX?{wgVcAy{n>u!_mJoZuaD*Z z+Nu0MgphbNbshc3^5nmo;d8n#?H~h$!@AtHg{88&4edT>Wtt;)D0xa5tcCua* z^bfaZY-OqMx?bAxk1kSPznIh?3cImAFQGinzkP>6*e@LZ%Kf<+^{fSa%wOhfjW6+W zy=vu4e;L(E)?-ZtnLet8w2wD_m&5lT<|yqQR$khn;S%{px^1eyZEMJS^+vnd-6X%aX$_N z&*d6{`96CwFR=U?eiY69W~?pye~r6LkH!4R_NfQ#4g47}w<~hGK|KB}-%#o`0_O6} zW#za!2*2j}(}ezD{XX-heGVRz<$bTlk2l6E+pQ%q*S9+`m(v6JczjK4Ci5=^X1_TD zJJbM9jFstc19N|U2+Z?>wWS=Ne@4r8SH`&F@}Gj|_6K*D^|}IoV>>m*e8>Ip2l}7+ z!zhREzYjkK=k%@04;z-2_HI#C+HYxJdEd=wCy%en=x@d+G5*+|?a=S+?{^fxeS$&E zPahzi>+3gPmhXUZ%=(|;i=xJyP8FVp#8uPnD8Gyo&P`nCpB4HmcwNfP19v* zsph4Yg<_uI52XR-D6j&LQxFW?4-lpx5tt#V2tKxkRmp$S zt^mq78k6Tah)*8#XOH=hxbYueyNvYzn^0@AEHi&G{;*sT*th{7^#<(g|8kx9-c!dF z)?>hC{NXwP*R|6Y38`E0k#GRFg+DaTz<(9J=wC+He|VVtgYE59@SpBi-D`|Z1ycNt zDD_{V{NMRy9xl1J0Qs*)+{^k(S-KnxS^raRMM<*6m+M-X-yS9NA8_S=S%+!)pX7A9 zwXUP_|0YaT^D?)BSD@*61!3ANUDHR*R&g6Izh-K`-j^&Sv^5v_1LU7(!dU66ng!U&%c7 z9{t_A7|riHo1TrhLDha5LQaF{lCNEV8k^sR^45$XQ~FVA@o*{qxvC0%7&wodDqW>% zOF}5weF)99gpoD-1$o8wBuB@_bfCWn?N5oLv6qI@h&dTlA+swT9=@BLbI;QIQ|i$t zqcZ9H%3n~YAJ0*R_kP2lw{#$fVlSxag!Z(nNiHpZ=RCgr+l1+CkE5>@m=W8 zkv_C+?^0?yjA-MrakTM=Mbt9j4?5ZIFZ$NCJoOxvPr+Z-qZs@lm!NTv>EqP*sGP?& z%6O|kwLNi)ewf;dYSlbQCuWbPf&Ji#)wa;R;+3evkqh*7;};ZK^&?vHULdV~)|Dnq z%%&P4o2g0eEV{a5DBaCGOc5=dX>j}F(z-|m`1fqpY-c3dL;IALiN4W+dB*TJ-n$?g* zgS?7;NrUZs)5f-E=*Fl$G~-D*dTYdZs^Ybkz6h^Ndk?Lpb?*Hr>C#6u^VBSA5ZjyV zS8k`##TL*5-xz%E$%}4`Org+SZ&7fLnHHU!Lf^JuL>(7ACzt7SsImWfn&0|MIu|pC z%5AC>r*i2L&G7M!(M5 zLar7Y9lLK&Zy{{I`O9B)rp7iZ?!KC4)oDb*Q$~?<+on`%K}G7*W;Ip$b{GA4dp&(n zrwm2z>PbEyFQ<(Di^#716mrh{i|SkEP-NpJ)OuqvDrxed?A?Rt(fmx>yRHZII`AF+ z(DE={UDblt%wIs~I)qV$pz}0p&Rq)oq%Y;4X-Vt9Swj~*8siA!8!GwiGzIqzrP~gh zscwibz1M#Sb$!u>K6qk8jf`8auqWFyprCl^cl4{ z>`c8nT&J?zS5niOn<#H}MS3SBg^um~j!LvIO@k5-Q_Ux%XzVBUG^<=J?fw2;ijKNO zcW>gPruzWOJiVD#hi;`&M~~8P*!TQ&&xhtL*-5+Cji%|{PSf9mlWDK@A+618MCWcT zrS>1BQ`Y%NO8T`L)jRVGeOtQ?wo*%J-MgRDFYnn?U7wBA!#aUZ7yE%mtu94l-~Wa@ z%haQ@n|0k(i;SX$!&_719fN4<$!_F(!IOUV=}5H$o74W7BjkH9jdnylr%QOCc5X*#bVRRy+km5;XjvVY(Gz@pVy-iwSS?p zvzn2^gMVntj;|=}qj#u#y%c&Ub0GOVPN$Ot7t_OE7m`cQxnyedJ*|9i2)VcaiL9*_ z(jk+9MxL8a`_KPEJBB`@o*`~zC^mxTq^_k6_#+p60=iMvXQgOi>~8wju`Oj-nv>(% zv6R#K5cR72lujJjN4XFDX%6FlF zgdudy_K2e9@1?a32UDp|x2bZw4iuO;2Q1ra!raeCJMBWhGRo8H|x zm3l3iL;Y$TqIv;mDP!*mDpBDJsu+_-;R}PQ&cLD6zV98H=vJB@ZgQjigO8|Lx)0zE z}3*3+ySEvVJH8Z`X#vlNiKnkLt2MF*}eqdqPxX-U(c zXw}J56zrc>*#Q)t$oSLw=E$LXzS7JAYD zI#n)xfcCsUn-)B+PrvWnL#6i|r85K9Q`bGyXh@eRe^j-=LeKc>lPt7t@%52%aRAfjfs z=@^Zs{D%Q#c)us*jI<(>CCjtwPuer#(<#0kkacmF$9}*=`y5Ja%lwo|d98shrrIC3 zV!p=YKl#9dsyqm94#atPFptjyiE@B<9Bw-?^sD5n?PnNn#<_oUt^JwgdL6E;&268Q z_wD3)nb+H|`+eY&uXAs-U*31xxn&h@wV$!_+1c^~Znr<3v~tCdEAOas^4gc4^(11z zro8rJ`;7NXi@n#r)v$!9k6Ybu@3FaY@qZj2w6D>u%-i3ef6(5mUW-3Y?|9h0Zoh8l zimiFney~~IUsP*mYjdY^2~Yc`URC90`~7pOL_(Pi#V2~(c8ofH?(9*;XH+%Utr+6B z;X;9a`k!syUp)V==F`)sf8OWG_!o-LZe;t`=V54k>1(q5nGI~ahuq3dsHXUgvSx>) zH>OW+q$bGLmXj_2(8=bucXa+n=6l0C^uIYo@i}(pZjoz#suiR7EH7KD z!ROxzwnHoQnEqne zN5>SORL9n~*U=UaE-608Xv^I_Z|aN(iXTwR)~iyxeqQ#u%AQTl6^s40?wGsgOPePg zetXHR+KSJpZp-ShrRv))6rWYsyy-yAE)}~f-nFYaebCqC9=@mep*74u&g-=7RG8v3 zo7fh9+xXOhF^Z>}wzQDhr+%HScxycy4M<#>wM@%zr`KE|{Y|vryQ%qUg%evHwk!F@ z4(2HxH+eQWs`#viwzor_KKv+0@x4v9Vu>D0*56aSx3{@e1MjSVjMrs*Q!CrPcrmZU zAa}**IM|*vTK(?s+KRV4^6S3$aB9nziZ_)ruU|Z*ep)xhle6t$-+c!=4ODzsZFBYL zQA^K_P<&PkTfJWAzn(l^>lTW43oZ5OXhC@he-|X1#c~=e7-s|Dd^f z&^RpxE3GpDNzsY3_Mt@Yv9jH&p*V zO8+bE)7u|cReX-K*{fmqTg!YEpIy&9xOf-q{tk-It7uzRyWIx60L2e2VJUV<&Oy%@s;A!>e|wxn^(V*s`!}t=1C8J z-!Wph;^~y1)yuEVi$64f+3(w9a()r*PikNr{-DOyx1T9_Z%=d5=DBr?mAWb0cf65# z)vTovM@)*3DP!(Y@tw34O%>m@r}<+)6_{3_qv??bD?Ygh!uD0tN`~$rmZ^`yp+-x~&UKQ=nDPg`7{Pa|1 zA1!ybHOhK1WQ|4fS*2_q&z~lC?Wg$A*5-p*?=`$PT=A|IZNKcCIV)wXr-ObAlJ1^0ZC1R+V4md}*|79M#hdc{%47}>C~-mYLm&9H zT{CEq#{T0;=@XsH@5%eTEpIo_jR^~MGmjkIz;hwCB4(z zzdq~pvEm;Wv(?#rc=VXh6d&^-J-6sOnsvqR@_}*FA8t|dik@bduupxu9aVhqM}F?p z%CA^`S@D*=eq{#+-l_Cd@zy)(j{d(?US9gH?2nmG(}T8*Xj#9u;xWDwV$XHk-$wDY z+OJmUs6nH8D?aaG`twdR$2195y!C)ztL@9zJs+$1^rwE4{c0Jr=P7>Z@$^rVzM8c2 z2gR2xVRQRqev7R(#ph(CFBth*{awFH-g@5EcBf{&5vQ&zdB-~(zu+qT+UoZ7fln*y zudiBuPPc3NY?awl$;*|oO*Xc5538*#h%8z_G9UB3g{+!`KlspY?>&zcyO z>(Np1zS(}m`~4L=q?h8`SF{bB=J9*#`--1xv@NXWSE_i3;)mYztN8c1x+7u~U(3Vn zlM-A1_*lg!Ur*0kmQcR)48>cn`t2(g`j>6K;uFa><-y(q?Y>exEl&4sU2WN~t91Ss ze)%(H+$7Af?`it%h|8VtWhi;ZDL>C{vGwO4QoPB*oISX8&9>Q!&w85v;J4qK-1<}T zYb%+*Uv*)~cXt(^Yj4Xt_Ro)@&lKWWSZ6if?6ScIzwSMbZA_-sZ2jEu4OQf|6%B*gl^-r`40$il158 z7CN})fI5p5pXX-&;b8R5LEkFAo2&WH8D$?Y`BCw$yW08$Y~!c9_ydy_)8(3M<_na&0IRCO0!alioes? z+%D+R#S2pvpW$xv-8L^_>paD0m$f-xsP^c~uN0r^WL{S4x6s*Z6hE$ptjbbNyX8vC+tzY)zx;&c=`Kj#}p5L7~>IK?TZVFH$6;0+OXZKA8#psaV=YonYQl7 zo+#ee(^mTzxjzu?Z_&h7;=4Pyo47v^_OxDzv8A1SJ3pqnMy}C`yA4V#E zsLA|KeASwDCTiZ()+*Z}a{U~|=M=Z?Za1=$WvSw0JK0RhSDKyqLGdOh^TMcGJ0@;Y zytSSAeoEQhKD#x4*RRIK+nugtDL%QJZJXK`iT3-}F(2wx`9jPsCI7ygZO(+X_MM+9 z{;Z#ETk`PTHJlzwe@m-vUfA&O>cu@3kNr@WF3w)hYASwcS(|B9?^BN&D}HfJ^UW<& z+dpluczWtrVOHQ!r(Qb$U+IQpKXfpCp!pIuPpWd;|0Bg)%h>8P{UkIpQSmvxX7by) zX~lHKTYPNxKhD2+ae?AZCC#zFpUkWGt>SxEwHeYPZ;by*@z!GIi*@CGP_*A#*4AWa zxitSHT2AIO=bkUwa8~g#4a{Gx$)4nMQ}H>KZAPCjM{RhbcvA)Q&c>Z5`a3<6_Q`Ov z)eAkk;1q0nKJ9bI*H*k!ck}69U-qxxO!3)uY!%o3_3%;$#dm6AJF{T$ft7s} z@7K*d!tMIGn4yYKtz|x6cT5SI|#pl$uMgO|!c$vA1cdBW7yTl7G zr{#*j)zj>y_Q9h4l$rkUTiM-*nsw=@cj> zzJmGQx|KdxLK|WFpV<^nnYZ!F1Sr2kb1(2f)Q(xeqhSSr2ZaX%10e-Ue-Q-n9Vc`G NC3*fsLI-@^1^^UY---YL From 421fbd939c450b34f310f380466cdc0d3d15133e Mon Sep 17 00:00:00 2001 From: David Rubin <87927264+Rexicon226@users.noreply.github.com> Date: Tue, 10 Sep 2024 18:04:59 -0700 Subject: [PATCH 124/202] thread: don't leak the thread in `spawnManager` (#21379) --- lib/std/Thread/WaitGroup.zig | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/std/Thread/WaitGroup.zig b/lib/std/Thread/WaitGroup.zig index cbc3ff0c8ff2..cff474c86387 100644 --- a/lib/std/Thread/WaitGroup.zig +++ b/lib/std/Thread/WaitGroup.zig @@ -63,5 +63,6 @@ pub fn spawnManager( } }; wg.start(); - _ = std.Thread.spawn(.{}, Manager.run, .{ wg, args }) catch Manager.run(wg, args); + const t = std.Thread.spawn(.{}, Manager.run, .{ wg, args }) catch return Manager.run(wg, args); + t.detach(); } From 218cf059dd215282aa96d6b4715e68d533a4238e Mon Sep 17 00:00:00 2001 From: Jesse Wattenbarger Date: Tue, 10 Sep 2024 14:09:54 -0400 Subject: [PATCH 125/202] remove explicit ComplexTypeTag from switch --- doc/langref/test_tagged_union.zig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/langref/test_tagged_union.zig b/doc/langref/test_tagged_union.zig index 20e06c64b11f..dbe765f5b832 100644 --- a/doc/langref/test_tagged_union.zig +++ b/doc/langref/test_tagged_union.zig @@ -15,8 +15,8 @@ test "switch on tagged union" { try expect(@as(ComplexTypeTag, c) == ComplexTypeTag.ok); switch (c) { - ComplexTypeTag.ok => |value| try expect(value == 42), - ComplexTypeTag.not_ok => unreachable, + .ok => |value| try expect(value == 42), + .not_ok => unreachable, } } From 892ce7ef527c863d84f3085f79f1a5aec7161c2c Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 9 Sep 2024 19:36:52 -0700 Subject: [PATCH 126/202] rework fuzzing API The previous API used `std.testing.fuzzInput(.{})` however that has the problem that users call it multiple times incorrectly, and there might be work happening to obtain the corpus which should not be included in coverage analysis, and which must not slow down iteration speed. This commit restructures it so that the main loop lives in libfuzzer and directly calls the "test one" function. In this commit I was a little too aggressive because I made the test runner export `fuzzer_one` for this purpose. This was motivated by performance, but it causes "exported symbol collision: fuzzer_one" to occur when more than one fuzz test is provided. There are three ways to solve this: 1. libfuzzer needs to be passed a function pointer instead. Possible performance downside. 2. build runner needs to build a different process per fuzz test. Potentially wasteful and unclear how to isolate them. 3. test runner needs to perform a relocation at runtime to point the function call to the relevant unit test. Portability issues and dubious performance gains. --- lib/compiler/test_runner.zig | 106 +++++++++++++++++++++++++---------- lib/fuzzer.zig | 84 ++++++++++++++------------- lib/std/testing.zig | 8 ++- lib/std/zig/tokenizer.zig | 7 ++- 4 files changed, 131 insertions(+), 74 deletions(-) diff --git a/lib/compiler/test_runner.zig b/lib/compiler/test_runner.zig index ac9629a57dd6..2468c12645cc 100644 --- a/lib/compiler/test_runner.zig +++ b/lib/compiler/test_runner.zig @@ -145,31 +145,27 @@ fn mainServer() !void { .start_fuzzing => { if (!builtin.fuzz) unreachable; const index = try server.receiveBody_u32(); - var first = true; const test_fn = builtin.test_functions[index]; - while (true) { - testing.allocator_instance = .{}; - defer if (testing.allocator_instance.deinit() == .leak) std.process.exit(1); - log_err_count = 0; - is_fuzz_test = false; - test_fn.func() catch |err| switch (err) { - error.SkipZigTest => continue, - else => { - if (@errorReturnTrace()) |trace| { - std.debug.dumpStackTrace(trace.*); - } - std.debug.print("failed with error.{s}\n", .{@errorName(err)}); - std.process.exit(1); - }, - }; - if (!is_fuzz_test) @panic("missed call to std.testing.fuzzInput"); - if (log_err_count != 0) @panic("error logs detected"); - if (first) { - first = false; - const entry_addr = @intFromPtr(test_fn.func); - try server.serveU64Message(.fuzz_start_addr, entry_addr); - } + const entry_addr = @intFromPtr(test_fn.func); + try server.serveU64Message(.fuzz_start_addr, entry_addr); + const prev_allocator_state = testing.allocator_instance; + defer { + testing.allocator_instance = prev_allocator_state; + if (testing.allocator_instance.deinit() == .leak) std.process.exit(1); } + is_fuzz_test = false; + test_fn.func() catch |err| switch (err) { + error.SkipZigTest => return, + else => { + if (@errorReturnTrace()) |trace| { + std.debug.dumpStackTrace(trace.*); + } + std.debug.print("failed with error.{s}\n", .{@errorName(err)}); + std.process.exit(1); + }, + }; + if (!is_fuzz_test) @panic("missed call to std.testing.fuzz"); + if (log_err_count != 0) @panic("error logs detected"); }, else => { @@ -349,19 +345,67 @@ const FuzzerSlice = extern struct { var is_fuzz_test: bool = undefined; -extern fn fuzzer_next() FuzzerSlice; +extern fn fuzzer_start() void; extern fn fuzzer_init(cache_dir: FuzzerSlice) void; extern fn fuzzer_coverage_id() u64; -pub fn fuzzInput(options: testing.FuzzInputOptions) []const u8 { +pub fn fuzz( + comptime testOne: fn ([]const u8) anyerror!void, + options: testing.FuzzInputOptions, +) anyerror!void { + // Prevent this function from confusing the fuzzer by omitting its own code + // coverage from being considered. @disableInstrumentation(); - if (crippled) return ""; + + // Some compiler backends are not capable of handling fuzz testing yet but + // we still want CI test coverage enabled. + if (crippled) return; + + // Smoke test to ensure the test did not use conditional compilation to + // contradict itself by making it not actually be a fuzz test when the test + // is built in fuzz mode. is_fuzz_test = true; + + // Ensure no test failure occurred before starting fuzzing. + if (log_err_count != 0) @panic("error logs detected"); + + // libfuzzer is in a separate compilation unit so that its own code can be + // excluded from code coverage instrumentation. It needs a function pointer + // it can call for checking exactly one input. Inside this function we do + // our standard unit test checks such as memory leaks, and interaction with + // error logs. + const global = struct { + fn fuzzer_one(input_ptr: [*]const u8, input_len: usize) callconv(.C) void { + @disableInstrumentation(); + testing.allocator_instance = .{}; + defer if (testing.allocator_instance.deinit() == .leak) std.process.exit(1); + log_err_count = 0; + testOne(input_ptr[0..input_len]) catch |err| switch (err) { + error.SkipZigTest => return, + else => { + if (@errorReturnTrace()) |trace| { + std.debug.dumpStackTrace(trace.*); + } + std.debug.print("failed with error.{s}\n", .{@errorName(err)}); + std.process.exit(1); + }, + }; + if (log_err_count != 0) @panic("error logs detected"); + } + }; if (builtin.fuzz) { - return fuzzer_next().toSlice(); + @export(&global.fuzzer_one, .{ .name = "fuzzer_one" }); + fuzzer_start(); + return; + } + + // When the unit test executable is not built in fuzz mode, only run the + // provided corpus. + for (options.corpus) |input| { + try testOne(input); } - if (options.corpus.len == 0) return ""; - var prng = std.Random.DefaultPrng.init(testing.random_seed); - const random = prng.random(); - return options.corpus[random.uintLessThan(usize, options.corpus.len)]; + + // In case there is no provided corpus, also use an empty + // string as a smoke test. + try testOne(""); } diff --git a/lib/fuzzer.zig b/lib/fuzzer.zig index 9c67756a6d97..2aa974427589 100644 --- a/lib/fuzzer.zig +++ b/lib/fuzzer.zig @@ -235,22 +235,41 @@ const Fuzzer = struct { }; } - fn next(f: *Fuzzer) ![]const u8 { + fn start(f: *Fuzzer) !void { const gpa = f.gpa; const rng = fuzzer.rng.random(); - if (f.recent_cases.entries.len == 0) { - // Prepare initial input. - try f.recent_cases.ensureUnusedCapacity(gpa, 100); - const len = rng.uintLessThanBiased(usize, 80); - try f.input.resize(gpa, len); - rng.bytes(f.input.items); - f.recent_cases.putAssumeCapacity(.{ - .id = 0, - .input = try gpa.dupe(u8, f.input.items), - .score = 0, - }, {}); - } else { + // Prepare initial input. + assert(f.recent_cases.entries.len == 0); + assert(f.n_runs == 0); + try f.recent_cases.ensureUnusedCapacity(gpa, 100); + const len = rng.uintLessThanBiased(usize, 80); + try f.input.resize(gpa, len); + rng.bytes(f.input.items); + f.recent_cases.putAssumeCapacity(.{ + .id = 0, + .input = try gpa.dupe(u8, f.input.items), + .score = 0, + }, {}); + + const header: *volatile SeenPcsHeader = @ptrCast(f.seen_pcs.items[0..@sizeOf(SeenPcsHeader)]); + + while (true) { + const chosen_index = rng.uintLessThanBiased(usize, f.recent_cases.entries.len); + const run = &f.recent_cases.keys()[chosen_index]; + f.input.clearRetainingCapacity(); + f.input.appendSliceAssumeCapacity(run.input); + try f.mutate(); + + _ = @atomicRmw(usize, &header.lowest_stack, .Min, __sancov_lowest_stack, .monotonic); + @memset(f.pc_counters, 0); + f.coverage.reset(); + + fuzzer_one(f.input.items.ptr, f.input.items.len); + + f.n_runs += 1; + _ = @atomicRmw(usize, &header.n_runs, .Add, 1, .monotonic); + if (f.n_runs % 10000 == 0) f.dumpStats(); const analysis = f.analyzeLastRun(); @@ -301,7 +320,6 @@ const Fuzzer = struct { } } - const header: *volatile SeenPcsHeader = @ptrCast(f.seen_pcs.items[0..@sizeOf(SeenPcsHeader)]); _ = @atomicRmw(usize, &header.unique_runs, .Add, 1, .monotonic); } @@ -317,26 +335,12 @@ const Fuzzer = struct { // This has to be done before deinitializing the deleted items. const doomed_runs = f.recent_cases.keys()[cap..]; f.recent_cases.shrinkRetainingCapacity(cap); - for (doomed_runs) |*run| { - std.log.info("culling score={d} id={d}", .{ run.score, run.id }); - run.deinit(gpa); + for (doomed_runs) |*doomed_run| { + std.log.info("culling score={d} id={d}", .{ doomed_run.score, doomed_run.id }); + doomed_run.deinit(gpa); } } } - - const chosen_index = rng.uintLessThanBiased(usize, f.recent_cases.entries.len); - const run = &f.recent_cases.keys()[chosen_index]; - f.input.clearRetainingCapacity(); - f.input.appendSliceAssumeCapacity(run.input); - try f.mutate(); - - f.n_runs += 1; - const header: *volatile SeenPcsHeader = @ptrCast(f.seen_pcs.items[0..@sizeOf(SeenPcsHeader)]); - _ = @atomicRmw(usize, &header.n_runs, .Add, 1, .monotonic); - _ = @atomicRmw(usize, &header.lowest_stack, .Min, __sancov_lowest_stack, .monotonic); - @memset(f.pc_counters, 0); - f.coverage.reset(); - return f.input.items; } fn visitPc(f: *Fuzzer, pc: usize) void { @@ -419,10 +423,12 @@ export fn fuzzer_coverage_id() u64 { return fuzzer.coverage_id; } -export fn fuzzer_next() Fuzzer.Slice { - return Fuzzer.Slice.fromZig(fuzzer.next() catch |err| switch (err) { - error.OutOfMemory => @panic("out of memory"), - }); +extern fn fuzzer_one(input_ptr: [*]const u8, input_len: usize) callconv(.C) void; + +export fn fuzzer_start() void { + fuzzer.start() catch |err| switch (err) { + error.OutOfMemory => fatal("out of memory", .{}), + }; } export fn fuzzer_init(cache_dir_struct: Fuzzer.Slice) void { @@ -432,24 +438,24 @@ export fn fuzzer_init(cache_dir_struct: Fuzzer.Slice) void { const pc_counters_start = @extern([*]u8, .{ .name = "__start___sancov_cntrs", .linkage = .weak, - }) orelse fatal("missing __start___sancov_cntrs symbol"); + }) orelse fatal("missing __start___sancov_cntrs symbol", .{}); const pc_counters_end = @extern([*]u8, .{ .name = "__stop___sancov_cntrs", .linkage = .weak, - }) orelse fatal("missing __stop___sancov_cntrs symbol"); + }) orelse fatal("missing __stop___sancov_cntrs symbol", .{}); const pc_counters = pc_counters_start[0 .. pc_counters_end - pc_counters_start]; const pcs_start = @extern([*]usize, .{ .name = "__start___sancov_pcs1", .linkage = .weak, - }) orelse fatal("missing __start___sancov_pcs1 symbol"); + }) orelse fatal("missing __start___sancov_pcs1 symbol", .{}); const pcs_end = @extern([*]usize, .{ .name = "__stop___sancov_pcs1", .linkage = .weak, - }) orelse fatal("missing __stop___sancov_pcs1 symbol"); + }) orelse fatal("missing __stop___sancov_pcs1 symbol", .{}); const pcs = pcs_start[0 .. pcs_end - pcs_start]; diff --git a/lib/std/testing.zig b/lib/std/testing.zig index 35bb13bf0d9a..2cc38749eb5e 100644 --- a/lib/std/testing.zig +++ b/lib/std/testing.zig @@ -1141,6 +1141,10 @@ pub const FuzzInputOptions = struct { corpus: []const []const u8 = &.{}, }; -pub inline fn fuzzInput(options: FuzzInputOptions) []const u8 { - return @import("root").fuzzInput(options); +/// Inline to avoid coverage instrumentation. +pub inline fn fuzz( + comptime testOne: fn (input: []const u8) anyerror!void, + options: FuzzInputOptions, +) anyerror!void { + return @import("root").fuzz(testOne, options); } diff --git a/lib/std/zig/tokenizer.zig b/lib/std/zig/tokenizer.zig index 06c6b859ac68..db69693a93b9 100644 --- a/lib/std/zig/tokenizer.zig +++ b/lib/std/zig/tokenizer.zig @@ -1708,6 +1708,10 @@ test "invalid tabs and carriage returns" { try testTokenize("\rpub\rswitch\r", &.{ .keyword_pub, .keyword_switch }); } +test "fuzzable properties upheld" { + return std.testing.fuzz(testPropertiesUpheld, .{}); +} + fn testTokenize(source: [:0]const u8, expected_token_tags: []const Token.Tag) !void { var tokenizer = Tokenizer.init(source); for (expected_token_tags) |expected_token_tag| { @@ -1723,8 +1727,7 @@ fn testTokenize(source: [:0]const u8, expected_token_tags: []const Token.Tag) !v try std.testing.expectEqual(source.len, last_token.loc.end); } -test "fuzzable properties upheld" { - const source = std.testing.fuzzInput(.{}); +fn testPropertiesUpheld(source: []const u8) anyerror!void { const source0 = try std.testing.allocator.dupeZ(u8, source); defer std.testing.allocator.free(source0); var tokenizer = Tokenizer.init(source0); From 2b76221a468d1d4556b8f512a069b703f621cc2c Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 9 Sep 2024 19:57:31 -0700 Subject: [PATCH 127/202] libfuzzer: use a function pointer instead of extern solves the problem presented in the previous commit message --- lib/compiler/test_runner.zig | 5 ++--- lib/fuzzer.zig | 5 +++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/compiler/test_runner.zig b/lib/compiler/test_runner.zig index 2468c12645cc..3d2187748e51 100644 --- a/lib/compiler/test_runner.zig +++ b/lib/compiler/test_runner.zig @@ -345,7 +345,7 @@ const FuzzerSlice = extern struct { var is_fuzz_test: bool = undefined; -extern fn fuzzer_start() void; +extern fn fuzzer_start(testOne: *const fn ([*]const u8, usize) callconv(.C) void) void; extern fn fuzzer_init(cache_dir: FuzzerSlice) void; extern fn fuzzer_coverage_id() u64; @@ -394,8 +394,7 @@ pub fn fuzz( } }; if (builtin.fuzz) { - @export(&global.fuzzer_one, .{ .name = "fuzzer_one" }); - fuzzer_start(); + fuzzer_start(&global.fuzzer_one); return; } diff --git a/lib/fuzzer.zig b/lib/fuzzer.zig index 2aa974427589..6cc8f9cc2879 100644 --- a/lib/fuzzer.zig +++ b/lib/fuzzer.zig @@ -423,9 +423,10 @@ export fn fuzzer_coverage_id() u64 { return fuzzer.coverage_id; } -extern fn fuzzer_one(input_ptr: [*]const u8, input_len: usize) callconv(.C) void; +var fuzzer_one: *const fn (input_ptr: [*]const u8, input_len: usize) callconv(.C) void = undefined; -export fn fuzzer_start() void { +export fn fuzzer_start(testOne: @TypeOf(fuzzer_one)) void { + fuzzer_one = testOne; fuzzer.start() catch |err| switch (err) { error.OutOfMemory => fatal("out of memory", .{}), }; From 9bc731b30a0be771a8128bab25d873f9212643a9 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 9 Sep 2024 20:32:41 -0700 Subject: [PATCH 128/202] fuzzing: better std.testing.allocator lifetime management --- lib/compiler/test_runner.zig | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/lib/compiler/test_runner.zig b/lib/compiler/test_runner.zig index 3d2187748e51..83d53626c3f0 100644 --- a/lib/compiler/test_runner.zig +++ b/lib/compiler/test_runner.zig @@ -148,11 +148,7 @@ fn mainServer() !void { const test_fn = builtin.test_functions[index]; const entry_addr = @intFromPtr(test_fn.func); try server.serveU64Message(.fuzz_start_addr, entry_addr); - const prev_allocator_state = testing.allocator_instance; - defer { - testing.allocator_instance = prev_allocator_state; - if (testing.allocator_instance.deinit() == .leak) std.process.exit(1); - } + defer if (testing.allocator_instance.deinit() == .leak) std.process.exit(1); is_fuzz_test = false; test_fn.func() catch |err| switch (err) { error.SkipZigTest => return, @@ -383,18 +379,24 @@ pub fn fuzz( testOne(input_ptr[0..input_len]) catch |err| switch (err) { error.SkipZigTest => return, else => { - if (@errorReturnTrace()) |trace| { - std.debug.dumpStackTrace(trace.*); - } + std.debug.lockStdErr(); + if (@errorReturnTrace()) |trace| std.debug.dumpStackTrace(trace.*); std.debug.print("failed with error.{s}\n", .{@errorName(err)}); std.process.exit(1); }, }; - if (log_err_count != 0) @panic("error logs detected"); + if (log_err_count != 0) { + std.debug.lockStdErr(); + std.debug.print("error logs detected\n", .{}); + std.process.exit(1); + } } }; if (builtin.fuzz) { + const prev_allocator_state = testing.allocator_instance; + testing.allocator_instance = .{}; fuzzer_start(&global.fuzzer_one); + testing.allocator_instance = prev_allocator_state; return; } From 0cdccff51912359b7ec5afa57fbbd5bb69d8f3a2 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 9 Sep 2024 20:37:03 -0700 Subject: [PATCH 129/202] fuzzer: move web files into separate directory --- lib/fuzzer/{ => web}/index.html | 0 lib/fuzzer/{ => web}/main.js | 0 lib/fuzzer/{wasm => web}/main.zig | 0 lib/std/Build/Fuzz/WebServer.zig | 6 +++--- 4 files changed, 3 insertions(+), 3 deletions(-) rename lib/fuzzer/{ => web}/index.html (100%) rename lib/fuzzer/{ => web}/main.js (100%) rename lib/fuzzer/{wasm => web}/main.zig (100%) diff --git a/lib/fuzzer/index.html b/lib/fuzzer/web/index.html similarity index 100% rename from lib/fuzzer/index.html rename to lib/fuzzer/web/index.html diff --git a/lib/fuzzer/main.js b/lib/fuzzer/web/main.js similarity index 100% rename from lib/fuzzer/main.js rename to lib/fuzzer/web/main.js diff --git a/lib/fuzzer/wasm/main.zig b/lib/fuzzer/web/main.zig similarity index 100% rename from lib/fuzzer/wasm/main.zig rename to lib/fuzzer/web/main.zig diff --git a/lib/std/Build/Fuzz/WebServer.zig b/lib/std/Build/Fuzz/WebServer.zig index a0ab018cf57c..b5ad86af15fd 100644 --- a/lib/std/Build/Fuzz/WebServer.zig +++ b/lib/std/Build/Fuzz/WebServer.zig @@ -128,11 +128,11 @@ fn serveRequest(ws: *WebServer, request: *std.http.Server.Request) !void { std.mem.eql(u8, request.head.target, "/debug") or std.mem.eql(u8, request.head.target, "/debug/")) { - try serveFile(ws, request, "fuzzer/index.html", "text/html"); + try serveFile(ws, request, "fuzzer/web/index.html", "text/html"); } else if (std.mem.eql(u8, request.head.target, "/main.js") or std.mem.eql(u8, request.head.target, "/debug/main.js")) { - try serveFile(ws, request, "fuzzer/main.js", "application/javascript"); + try serveFile(ws, request, "fuzzer/web/main.js", "application/javascript"); } else if (std.mem.eql(u8, request.head.target, "/main.wasm")) { try serveWasm(ws, request, .ReleaseFast); } else if (std.mem.eql(u8, request.head.target, "/debug/main.wasm")) { @@ -217,7 +217,7 @@ fn buildWasmBinary( const main_src_path: Build.Cache.Path = .{ .root_dir = ws.zig_lib_directory, - .sub_path = "fuzzer/wasm/main.zig", + .sub_path = "fuzzer/web/main.zig", }; const walk_src_path: Build.Cache.Path = .{ .root_dir = ws.zig_lib_directory, From 2d005827b874f27535cda72c80b6558d9d4cd30c Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 9 Sep 2024 21:16:06 -0700 Subject: [PATCH 130/202] make lowest stack an internal libfuzzer detail This value is useful to help determine run uniqueness in the face of recursion, however it is not valuable to expose to the fuzzing UI. --- lib/fuzzer.zig | 6 +++--- lib/fuzzer/web/index.html | 1 - lib/fuzzer/web/main.js | 2 -- lib/fuzzer/web/main.zig | 7 ------- lib/std/Build/Fuzz/WebServer.zig | 2 -- lib/std/Build/Fuzz/abi.zig | 2 -- 6 files changed, 3 insertions(+), 17 deletions(-) diff --git a/lib/fuzzer.zig b/lib/fuzzer.zig index 6cc8f9cc2879..3f8a99114876 100644 --- a/lib/fuzzer.zig +++ b/lib/fuzzer.zig @@ -28,7 +28,8 @@ fn logOverride( f.writer().print(prefix1 ++ prefix2 ++ format ++ "\n", args) catch @panic("failed to write to fuzzer log"); } -export threadlocal var __sancov_lowest_stack: usize = std.math.maxInt(usize); +/// Helps determine run uniqueness in the face of recursion. +export threadlocal var __sancov_lowest_stack: usize = 0; export fn __sanitizer_cov_trace_const_cmp1(arg1: u8, arg2: u8) void { handleCmp(@returnAddress(), arg1, arg2); @@ -220,7 +221,6 @@ const Fuzzer = struct { .n_runs = 0, .unique_runs = 0, .pcs_len = pcs.len, - .lowest_stack = std.math.maxInt(usize), }; f.seen_pcs.appendSliceAssumeCapacity(std.mem.asBytes(&header)); f.seen_pcs.appendNTimesAssumeCapacity(0, n_bitset_elems * @sizeOf(usize)); @@ -261,8 +261,8 @@ const Fuzzer = struct { f.input.appendSliceAssumeCapacity(run.input); try f.mutate(); - _ = @atomicRmw(usize, &header.lowest_stack, .Min, __sancov_lowest_stack, .monotonic); @memset(f.pc_counters, 0); + __sancov_lowest_stack = std.math.maxInt(usize); f.coverage.reset(); fuzzer_one(f.input.items.ptr, f.input.items.len); diff --git a/lib/fuzzer/web/index.html b/lib/fuzzer/web/index.html index 16fa87991377..0addd9f88288 100644 --- a/lib/fuzzer/web/index.html +++ b/lib/fuzzer/web/index.html @@ -147,7 +147,6 @@

  • Total Runs:
  • Unique Runs:
  • Coverage:
  • -
  • Lowest Stack:
  • Entry Points:
    • diff --git a/lib/fuzzer/web/main.js b/lib/fuzzer/web/main.js index ce02276f9819..9ee6b445e287 100644 --- a/lib/fuzzer/web/main.js +++ b/lib/fuzzer/web/main.js @@ -6,7 +6,6 @@ const domStatTotalRuns = document.getElementById("statTotalRuns"); const domStatUniqueRuns = document.getElementById("statUniqueRuns"); const domStatCoverage = document.getElementById("statCoverage"); - const domStatLowestStack = document.getElementById("statLowestStack"); const domEntryPointsList = document.getElementById("entryPointsList"); let wasm_promise = fetch("main.wasm"); @@ -158,7 +157,6 @@ domStatTotalRuns.innerText = totalRuns; domStatUniqueRuns.innerText = uniqueRuns + " (" + percent(uniqueRuns, totalRuns) + "%)"; domStatCoverage.innerText = coveredSourceLocations + " / " + totalSourceLocations + " (" + percent(coveredSourceLocations, totalSourceLocations) + "%)"; - domStatLowestStack.innerText = unwrapString(wasm_exports.lowestStack()); const entryPoints = unwrapInt32Array(wasm_exports.entryPoints()); resizeDomList(domEntryPointsList, entryPoints.length, "
    • "); diff --git a/lib/fuzzer/web/main.zig b/lib/fuzzer/web/main.zig index 342adc3b5608..94ea8cc92f2a 100644 --- a/lib/fuzzer/web/main.zig +++ b/lib/fuzzer/web/main.zig @@ -106,13 +106,6 @@ export fn decl_source_html(decl_index: Decl.Index) String { return String.init(string_result.items); } -export fn lowestStack() String { - const header: *abi.CoverageUpdateHeader = @ptrCast(recent_coverage_update.items[0..@sizeOf(abi.CoverageUpdateHeader)]); - string_result.clearRetainingCapacity(); - string_result.writer(gpa).print("0x{d}", .{header.lowest_stack}) catch @panic("OOM"); - return String.init(string_result.items); -} - export fn totalSourceLocations() usize { return coverage_source_locations.items.len; } diff --git a/lib/std/Build/Fuzz/WebServer.zig b/lib/std/Build/Fuzz/WebServer.zig index b5ad86af15fd..391f67e8231c 100644 --- a/lib/std/Build/Fuzz/WebServer.zig +++ b/lib/std/Build/Fuzz/WebServer.zig @@ -406,7 +406,6 @@ fn sendCoverageContext( const seen_pcs = cov_header.seenBits(); const n_runs = @atomicLoad(usize, &cov_header.n_runs, .monotonic); const unique_runs = @atomicLoad(usize, &cov_header.unique_runs, .monotonic); - const lowest_stack = @atomicLoad(usize, &cov_header.lowest_stack, .monotonic); if (prev_unique_runs.* != unique_runs) { // There has been an update. if (prev_unique_runs.* == 0) { @@ -431,7 +430,6 @@ fn sendCoverageContext( const header: abi.CoverageUpdateHeader = .{ .n_runs = n_runs, .unique_runs = unique_runs, - .lowest_stack = lowest_stack, }; const iovecs: [2]std.posix.iovec_const = .{ makeIov(std.mem.asBytes(&header)), diff --git a/lib/std/Build/Fuzz/abi.zig b/lib/std/Build/Fuzz/abi.zig index 0e16f0d5fa3f..c3f32d309b69 100644 --- a/lib/std/Build/Fuzz/abi.zig +++ b/lib/std/Build/Fuzz/abi.zig @@ -13,7 +13,6 @@ pub const SeenPcsHeader = extern struct { n_runs: usize, unique_runs: usize, pcs_len: usize, - lowest_stack: usize, /// Used for comptime assertions. Provides a mechanism for strategically /// causing compile errors. @@ -79,7 +78,6 @@ pub const CoverageUpdateHeader = extern struct { flags: Flags = .{}, n_runs: u64, unique_runs: u64, - lowest_stack: u64, pub const Flags = packed struct(u64) { tag: ToClientTag = .coverage_update, From 9dc75f03e26146cb81fc992baf172202fcd19b17 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 9 Sep 2024 21:27:45 -0700 Subject: [PATCH 131/202] fix init template for new fuzz testing API --- lib/init/src/main.zig | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/lib/init/src/main.zig b/lib/init/src/main.zig index 0c4bb73429c9..ba5a2ccef233 100644 --- a/lib/init/src/main.zig +++ b/lib/init/src/main.zig @@ -27,7 +27,11 @@ test "simple test" { } test "fuzz example" { - // Try passing `--fuzz` to `zig build test` and see if it manages to fail this test case! - const input_bytes = std.testing.fuzzInput(.{}); - try std.testing.expect(!std.mem.eql(u8, "canyoufindme", input_bytes)); + const global = struct { + fn testOne(input: []const u8) anyerror!void { + // Try passing `--fuzz` to `zig build test` and see if it manages to fail this test case! + try std.testing.expect(!std.mem.eql(u8, "canyoufindme", input)); + } + }; + try std.testing.fuzz(global.testOne, .{}); } From e3f58bd5515ffd0039c7f5afde8b9d74dc5a24b5 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 11 Sep 2024 19:53:14 -0700 Subject: [PATCH 132/202] add runs per second to fuzzing ui closes #21025 --- lib/fuzzer/web/index.html | 1 + lib/fuzzer/web/main.js | 5 +++++ lib/fuzzer/web/main.zig | 38 ++++++++++++++++++++++++++++++-- lib/std/Build/Fuzz.zig | 2 ++ lib/std/Build/Fuzz/WebServer.zig | 17 ++++++++++++++ lib/std/Build/Fuzz/abi.zig | 9 ++++++++ 6 files changed, 70 insertions(+), 2 deletions(-) diff --git a/lib/fuzzer/web/index.html b/lib/fuzzer/web/index.html index 0addd9f88288..325342e8ebf5 100644 --- a/lib/fuzzer/web/index.html +++ b/lib/fuzzer/web/index.html @@ -146,6 +146,7 @@
      • Total Runs:
      • Unique Runs:
      • +
      • Speed (Runs/Second):
      • Coverage:
      • Entry Points:
        diff --git a/lib/fuzzer/web/main.js b/lib/fuzzer/web/main.js index 9ee6b445e287..94f09391bb67 100644 --- a/lib/fuzzer/web/main.js +++ b/lib/fuzzer/web/main.js @@ -5,6 +5,7 @@ const domSourceText = document.getElementById("sourceText"); const domStatTotalRuns = document.getElementById("statTotalRuns"); const domStatUniqueRuns = document.getElementById("statUniqueRuns"); + const domStatSpeed = document.getElementById("statSpeed"); const domStatCoverage = document.getElementById("statCoverage"); const domEntryPointsList = document.getElementById("entryPointsList"); @@ -31,6 +32,9 @@ const msg = decodeString(ptr, len); throw new Error("panic: " + msg); }, + timestamp: function () { + return BigInt(new Date()); + }, emitSourceIndexChange: onSourceIndexChange, emitCoverageUpdate: onCoverageUpdate, emitEntryPointsUpdate: renderStats, @@ -157,6 +161,7 @@ domStatTotalRuns.innerText = totalRuns; domStatUniqueRuns.innerText = uniqueRuns + " (" + percent(uniqueRuns, totalRuns) + "%)"; domStatCoverage.innerText = coveredSourceLocations + " / " + totalSourceLocations + " (" + percent(coveredSourceLocations, totalSourceLocations) + "%)"; + domStatSpeed.innerText = wasm_exports.totalRunsPerSecond().toFixed(0); const entryPoints = unwrapInt32Array(wasm_exports.entryPoints()); resizeDomList(domEntryPointsList, entryPoints.length, "
      • "); diff --git a/lib/fuzzer/web/main.zig b/lib/fuzzer/web/main.zig index 94ea8cc92f2a..9c50704e8a71 100644 --- a/lib/fuzzer/web/main.zig +++ b/lib/fuzzer/web/main.zig @@ -10,9 +10,17 @@ const Walk = @import("Walk"); const Decl = Walk.Decl; const html_render = @import("html_render"); +/// Nanoseconds. +var server_base_timestamp: i64 = 0; +/// Milliseconds. +var client_base_timestamp: i64 = 0; +/// Relative to `server_base_timestamp`. +var start_fuzzing_timestamp: i64 = undefined; + const js = struct { extern "js" fn log(ptr: [*]const u8, len: usize) void; extern "js" fn panic(ptr: [*]const u8, len: usize) noreturn; + extern "js" fn timestamp() i64; extern "js" fn emitSourceIndexChange() void; extern "js" fn emitCoverageUpdate() void; extern "js" fn emitEntryPointsUpdate() void; @@ -64,6 +72,7 @@ export fn message_end() void { const tag: abi.ToClientTag = @enumFromInt(msg_bytes[0]); switch (tag) { + .current_time => return currentTimeMessage(msg_bytes), .source_index => return sourceIndexMessage(msg_bytes) catch @panic("OOM"), .coverage_update => return coverageUpdateMessage(msg_bytes) catch @panic("OOM"), .entry_points => return entryPointsMessage(msg_bytes) catch @panic("OOM"), @@ -117,16 +126,28 @@ export fn coveredSourceLocations() usize { return count; } +fn getCoverageUpdateHeader() *abi.CoverageUpdateHeader { + return @alignCast(@ptrCast(recent_coverage_update.items[0..@sizeOf(abi.CoverageUpdateHeader)])); +} + export fn totalRuns() u64 { - const header: *abi.CoverageUpdateHeader = @alignCast(@ptrCast(recent_coverage_update.items[0..@sizeOf(abi.CoverageUpdateHeader)])); + const header = getCoverageUpdateHeader(); return header.n_runs; } export fn uniqueRuns() u64 { - const header: *abi.CoverageUpdateHeader = @alignCast(@ptrCast(recent_coverage_update.items[0..@sizeOf(abi.CoverageUpdateHeader)])); + const header = getCoverageUpdateHeader(); return header.unique_runs; } +export fn totalRunsPerSecond() f64 { + @setFloatMode(.optimized); + const header = getCoverageUpdateHeader(); + const ns_elapsed: f64 = @floatFromInt(nsSince(start_fuzzing_timestamp)); + const n_runs: f64 = @floatFromInt(header.n_runs); + return n_runs / (ns_elapsed / std.time.ns_per_s); +} + const String = Slice(u8); fn Slice(T: type) type { @@ -189,6 +210,18 @@ fn fatal(comptime format: []const u8, args: anytype) noreturn { js.panic(line.ptr, line.len); } +fn currentTimeMessage(msg_bytes: []u8) void { + client_base_timestamp = js.timestamp(); + server_base_timestamp = @bitCast(msg_bytes[1..][0..8].*); +} + +/// Nanoseconds passed since a server timestamp. +fn nsSince(server_timestamp: i64) i64 { + const ms_passed = js.timestamp() - client_base_timestamp; + const ns_passed = server_base_timestamp - server_timestamp; + return ns_passed + ms_passed * std.time.ns_per_ms; +} + fn sourceIndexMessage(msg_bytes: []u8) error{OutOfMemory}!void { const Header = abi.SourceIndexHeader; const header: Header = @bitCast(msg_bytes[0..@sizeOf(Header)].*); @@ -205,6 +238,7 @@ fn sourceIndexMessage(msg_bytes: []u8) error{OutOfMemory}!void { const files: []const Coverage.File = @alignCast(std.mem.bytesAsSlice(Coverage.File, msg_bytes[files_start..files_end])); const source_locations: []const Coverage.SourceLocation = @alignCast(std.mem.bytesAsSlice(Coverage.SourceLocation, msg_bytes[source_locations_start..source_locations_end])); + start_fuzzing_timestamp = header.start_timestamp; try updateCoverage(directories, files, source_locations, string_bytes); js.emitSourceIndexChange(); } diff --git a/lib/std/Build/Fuzz.zig b/lib/std/Build/Fuzz.zig index 23f8a0269283..6258f4cddaee 100644 --- a/lib/std/Build/Fuzz.zig +++ b/lib/std/Build/Fuzz.zig @@ -66,6 +66,8 @@ pub fn start( .coverage_files = .{}, .coverage_mutex = .{}, .coverage_condition = .{}, + + .base_timestamp = std.time.nanoTimestamp(), }; // For accepting HTTP connections. diff --git a/lib/std/Build/Fuzz/WebServer.zig b/lib/std/Build/Fuzz/WebServer.zig index 391f67e8231c..fb78e96abb02 100644 --- a/lib/std/Build/Fuzz/WebServer.zig +++ b/lib/std/Build/Fuzz/WebServer.zig @@ -33,6 +33,9 @@ coverage_mutex: std.Thread.Mutex, /// Signaled when `coverage_files` changes. coverage_condition: std.Thread.Condition, +/// Time at initialization of WebServer. +base_timestamp: i128, + const fuzzer_bin_name = "fuzzer"; const fuzzer_arch_os_abi = "wasm32-freestanding"; const fuzzer_cpu_features = "baseline+atomics+bulk_memory+multivalue+mutable_globals+nontrapping_fptoint+reference_types+sign_ext"; @@ -43,6 +46,7 @@ const CoverageMap = struct { source_locations: []Coverage.SourceLocation, /// Elements are indexes into `source_locations` pointing to the unit tests that are being fuzz tested. entry_points: std.ArrayListUnmanaged(u32), + start_timestamp: i64, fn deinit(cm: *CoverageMap, gpa: Allocator) void { std.posix.munmap(cm.mapped_memory); @@ -87,6 +91,10 @@ pub fn run(ws: *WebServer) void { } } +fn now(s: *const WebServer) i64 { + return @intCast(std.time.nanoTimestamp() - s.base_timestamp); +} + fn accept(ws: *WebServer, connection: std.net.Server.Connection) void { defer connection.stream.close(); @@ -381,6 +389,13 @@ fn serveWebSocket(ws: *WebServer, web_socket: *std.http.WebSocket) !void { ws.coverage_mutex.lock(); defer ws.coverage_mutex.unlock(); + // On first connection, the client needs to know what time the server + // thinks it is to rebase timestamps. + { + const timestamp_message: abi.CurrentTime = .{ .base = ws.now() }; + try web_socket.writeMessage(std.mem.asBytes(×tamp_message), .binary); + } + // On first connection, the client needs all the coverage information // so that subsequent updates can contain only the updated bits. var prev_unique_runs: usize = 0; @@ -416,6 +431,7 @@ fn sendCoverageContext( .files_len = @intCast(coverage_map.coverage.files.entries.len), .source_locations_len = @intCast(coverage_map.source_locations.len), .string_bytes_len = @intCast(coverage_map.coverage.string_bytes.items.len), + .start_timestamp = coverage_map.start_timestamp, }; const iovecs: [5]std.posix.iovec_const = .{ makeIov(std.mem.asBytes(&header)), @@ -582,6 +598,7 @@ fn prepareTables( .mapped_memory = undefined, // populated below .source_locations = undefined, // populated below .entry_points = .{}, + .start_timestamp = ws.now(), }; errdefer gop.value_ptr.coverage.deinit(gpa); diff --git a/lib/std/Build/Fuzz/abi.zig b/lib/std/Build/Fuzz/abi.zig index c3f32d309b69..a6abc13feebb 100644 --- a/lib/std/Build/Fuzz/abi.zig +++ b/lib/std/Build/Fuzz/abi.zig @@ -43,12 +43,19 @@ pub const SeenPcsHeader = extern struct { }; pub const ToClientTag = enum(u8) { + current_time, source_index, coverage_update, entry_points, _, }; +pub const CurrentTime = extern struct { + tag: ToClientTag = .current_time, + /// Number of nanoseconds that all other timestamps are in reference to. + base: i64 align(1), +}; + /// Sent to the fuzzer web client on first connection to the websocket URL. /// /// Trailing: @@ -62,6 +69,8 @@ pub const SourceIndexHeader = extern struct { files_len: u32, source_locations_len: u32, string_bytes_len: u32, + /// When, according to the server, fuzzing started. + start_timestamp: i64 align(4), pub const Flags = packed struct(u32) { tag: ToClientTag = .source_index, From 5cb9668632525d6658dbf1d07202f8e17e339878 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Sun, 8 Sep 2024 04:14:28 +0200 Subject: [PATCH 133/202] test: Re-enable a bunch of behavior tests with LLVM. Closes #10627. Closes #12013. Closes #18034. --- test/behavior/align.zig | 4 ---- test/behavior/atomics.zig | 12 ------------ test/behavior/basic.zig | 4 ---- test/behavior/bitcast.zig | 5 ----- test/behavior/call.zig | 6 ++---- test/behavior/floatop.zig | 10 ---------- test/behavior/math.zig | 16 ---------------- test/behavior/shuffle.zig | 5 ----- test/behavior/threadlocal.zig | 12 ------------ test/behavior/vector.zig | 6 ------ 10 files changed, 2 insertions(+), 78 deletions(-) diff --git a/test/behavior/align.zig b/test/behavior/align.zig index 158a079d9fb1..e0d7634c3899 100644 --- a/test/behavior/align.zig +++ b/test/behavior/align.zig @@ -582,10 +582,6 @@ test "comptime alloc alignment" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // flaky - if (builtin.zig_backend == .stage2_llvm and builtin.target.cpu.arch == .x86) { - // https://github.com/ziglang/zig/issues/18034 - return error.SkipZigTest; - } comptime var bytes1 = [_]u8{0}; _ = &bytes1; diff --git a/test/behavior/atomics.zig b/test/behavior/atomics.zig index 503b987a63ac..f2c23877294d 100644 --- a/test/behavior/atomics.zig +++ b/test/behavior/atomics.zig @@ -152,11 +152,6 @@ test "cmpxchg on a global variable" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) { - // https://github.com/ziglang/zig/issues/10627 - return error.SkipZigTest; - } - _ = @cmpxchgWeak(u32, &a_global_variable, 1234, 42, .acquire, .monotonic); try expect(a_global_variable == 42); } @@ -202,10 +197,6 @@ test "atomicrmw with floats" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) { - // https://github.com/ziglang/zig/issues/10627 - return error.SkipZigTest; - } try testAtomicRmwFloat(); try comptime testAtomicRmwFloat(); } @@ -306,9 +297,6 @@ test "atomicrmw with 128-bit ints" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - // TODO "ld.lld: undefined symbol: __sync_lock_test_and_set_16" on -mcpu x86_64 - if (builtin.cpu.arch == .x86_64 and builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; - try testAtomicRmwInt128(.signed); try testAtomicRmwInt128(.unsigned); try comptime testAtomicRmwInt128(.signed); diff --git a/test/behavior/basic.zig b/test/behavior/basic.zig index e1ef9ce4e68f..9acfd56d2727 100644 --- a/test/behavior/basic.zig +++ b/test/behavior/basic.zig @@ -1426,10 +1426,6 @@ test "allocation and looping over 3-byte integer" { return error.SkipZigTest; // TODO } - if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .wasm32) { - return error.SkipZigTest; // TODO - } - try expect(@sizeOf(u24) == 4); try expect(@sizeOf([1]u24) == 4); try expect(@alignOf(u24) == 4); diff --git a/test/behavior/bitcast.zig b/test/behavior/bitcast.zig index 73f8e7be18cc..7170ddfc5098 100644 --- a/test/behavior/bitcast.zig +++ b/test/behavior/bitcast.zig @@ -343,11 +343,6 @@ test "comptime @bitCast packed struct to int and back" { if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_llvm and native_endian == .big) { - // https://github.com/ziglang/zig/issues/13782 - return error.SkipZigTest; - } - const S = packed struct { void: void = {}, uint: u8 = 13, diff --git a/test/behavior/call.zig b/test/behavior/call.zig index 5b94a4b07d23..28fa89b748ad 100644 --- a/test/behavior/call.zig +++ b/test/behavior/call.zig @@ -275,8 +275,7 @@ test "forced tail call" { if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm) { - // Only attempt this test on targets we know have tail call support in LLVM. - if (builtin.cpu.arch != .x86_64 and builtin.cpu.arch != .aarch64) { + if (builtin.cpu.arch.isMIPS() or builtin.cpu.arch.isPowerPC() or builtin.cpu.arch.isWasm()) { return error.SkipZigTest; } } @@ -311,8 +310,7 @@ test "inline call preserves tail call" { if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm) { - // Only attempt this test on targets we know have tail call support in LLVM. - if (builtin.cpu.arch != .x86_64 and builtin.cpu.arch != .aarch64) { + if (builtin.cpu.arch.isMIPS() or builtin.cpu.arch.isPowerPC() or builtin.cpu.arch.isWasm()) { return error.SkipZigTest; } } diff --git a/test/behavior/floatop.zig b/test/behavior/floatop.zig index d799c9133119..69dacb66fd5d 100644 --- a/test/behavior/floatop.zig +++ b/test/behavior/floatop.zig @@ -1310,11 +1310,6 @@ test "@trunc f16" { if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch.isMIPS()) { - // https://github.com/ziglang/zig/issues/16846 - return error.SkipZigTest; - } - try testTrunc(f16); try comptime testTrunc(f16); } @@ -1326,11 +1321,6 @@ test "@trunc f32/f64" { if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch.isMIPS()) { - // https://github.com/ziglang/zig/issues/16846 - return error.SkipZigTest; - } - try testTrunc(f32); try comptime testTrunc(f32); try testTrunc(f64); diff --git a/test/behavior/math.zig b/test/behavior/math.zig index c658ea3081df..5ee07b9e9829 100644 --- a/test/behavior/math.zig +++ b/test/behavior/math.zig @@ -194,12 +194,6 @@ test "@ctz vectors" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) { - // This regressed with LLVM 14: - // https://github.com/ziglang/zig/issues/12013 - return error.SkipZigTest; - } - try testCtzVectors(); try comptime testCtzVectors(); } @@ -478,11 +472,6 @@ test "division" { if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch.isMIPS()) { - // https://github.com/ziglang/zig/issues/16846 - return error.SkipZigTest; - } - try testIntDivision(); try comptime testIntDivision(); @@ -721,11 +710,6 @@ test "unsigned 64-bit division" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch.isMIPS()) { - // https://github.com/ziglang/zig/issues/16846 - return error.SkipZigTest; - } - try test_u64_div(); try comptime test_u64_div(); } diff --git a/test/behavior/shuffle.zig b/test/behavior/shuffle.zig index 2bcdbd1581bc..08b01b526276 100644 --- a/test/behavior/shuffle.zig +++ b/test/behavior/shuffle.zig @@ -170,11 +170,6 @@ test "@shuffle bool 2" { if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_llvm) { - // https://github.com/ziglang/zig/issues/3246 - return error.SkipZigTest; - } - const S = struct { fn doTheTest() !void { var x: @Vector(3, bool) = [3]bool{ false, true, false }; diff --git a/test/behavior/threadlocal.zig b/test/behavior/threadlocal.zig index f91e10d12d2c..4052733e92ac 100644 --- a/test/behavior/threadlocal.zig +++ b/test/behavior/threadlocal.zig @@ -6,10 +6,6 @@ test "thread local variable" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_llvm) switch (builtin.cpu.arch) { - .x86_64, .x86 => {}, - else => return error.SkipZigTest, - }; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64 and builtin.os.tag == .macos) { @@ -28,10 +24,6 @@ test "pointer to thread local array" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_llvm) switch (builtin.cpu.arch) { - .x86_64, .x86 => {}, - else => return error.SkipZigTest, - }; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO const s = "Hello world"; @@ -45,10 +37,6 @@ test "reference a global threadlocal variable" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_llvm) switch (builtin.cpu.arch) { - .x86_64, .x86 => {}, - else => return error.SkipZigTest, - }; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO _ = nrfx_uart_rx(&g_uart0); diff --git a/test/behavior/vector.zig b/test/behavior/vector.zig index 13a51394243c..8bec930087a3 100644 --- a/test/behavior/vector.zig +++ b/test/behavior/vector.zig @@ -557,7 +557,6 @@ test "vector division operators" { if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_llvm and comptime builtin.cpu.arch.isArmOrThumb()) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; @@ -1571,11 +1570,6 @@ test "@reduce on bool vector" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (comptime builtin.zig_backend == .stage2_llvm and builtin.cpu.arch.endian() == .big) { - // https://github.com/ziglang/zig/issues/13782 - return error.SkipZigTest; - } - const a = @Vector(2, bool){ true, true }; const b = @Vector(1, bool){true}; try std.testing.expect(@reduce(.And, a)); From 8588964972acc473c09e21958a7e52247c978603 Mon Sep 17 00:00:00 2001 From: Linus Groh Date: Mon, 2 Sep 2024 22:32:21 +0100 Subject: [PATCH 134/202] Replace deprecated default initializations with decl literals --- doc/langref/wasi_args.zig | 2 +- doc/langref/wasi_preopens.zig | 2 +- lib/compiler/aro/aro/CodeGen.zig | 10 +-- lib/compiler/aro/aro/Compilation.zig | 10 +-- lib/compiler/aro/aro/Diagnostics.zig | 2 +- lib/compiler/aro/aro/Driver.zig | 4 +- lib/compiler/aro/aro/Hideset.zig | 4 +- lib/compiler/aro/aro/InitList.zig | 2 +- lib/compiler/aro/aro/Parser.zig | 6 +- lib/compiler/aro/aro/Preprocessor.zig | 2 +- lib/compiler/aro/aro/SymbolStack.zig | 6 +- lib/compiler/aro/aro/pragmas/gcc.zig | 2 +- lib/compiler/aro/aro/pragmas/pack.zig | 2 +- lib/compiler/aro/aro/toolchains/Linux.zig | 2 +- lib/compiler/aro/backend/Interner.zig | 8 +- lib/compiler/aro/backend/Ir.zig | 4 +- lib/compiler/aro/backend/Object/Elf.zig | 8 +- lib/compiler/aro_translate_c.zig | 16 ++-- lib/compiler/aro_translate_c/ast.zig | 2 +- lib/compiler/build_runner.zig | 4 +- lib/compiler/objcopy.zig | 2 +- lib/compiler/reduce.zig | 4 +- lib/compiler/resinator/ast.zig | 2 +- lib/compiler/resinator/cli.zig | 8 +- lib/compiler/resinator/compile.zig | 10 +-- lib/compiler/resinator/errors.zig | 6 +- lib/compiler/resinator/main.zig | 10 +-- lib/compiler/resinator/parse.zig | 30 +++---- lib/compiler/resinator/source_mapping.zig | 6 +- lib/compiler/std-docs.zig | 4 +- lib/compiler/test_runner.zig | 2 +- lib/docs/wasm/Walk.zig | 20 ++--- lib/docs/wasm/html_render.zig | 2 +- lib/docs/wasm/main.zig | 28 +++--- lib/docs/wasm/markdown/Parser.zig | 14 +-- lib/fuzzer.zig | 2 +- lib/fuzzer/web/main.zig | 20 ++--- lib/std/Build.zig | 4 +- lib/std/Build/Fuzz.zig | 2 +- lib/std/Build/Fuzz/WebServer.zig | 2 +- lib/std/Build/Step.zig | 2 +- lib/std/Build/Step/CheckObject.zig | 16 ++-- lib/std/Build/Step/Compile.zig | 4 +- lib/std/Build/Step/Fmt.zig | 2 +- lib/std/Build/Step/Run.zig | 2 +- lib/std/array_hash_map.zig | 6 +- lib/std/array_list.zig | 64 +++++++------- lib/std/crypto/Certificate/Bundle.zig | 4 +- lib/std/debug/Dwarf.zig | 16 ++-- lib/std/debug/Dwarf/expression.zig | 2 +- lib/std/debug/SelfInfo.zig | 4 +- lib/std/fs/Dir.zig | 2 +- lib/std/fs/wasi.zig | 2 +- lib/std/hash/benchmark.zig | 2 +- lib/std/hash_map.zig | 14 +-- lib/std/json/hashmap.zig | 6 +- lib/std/process/Child.zig | 4 +- lib/std/tar.zig | 2 +- lib/std/testing.zig | 4 +- lib/std/zig/AstGen.zig | 40 ++++----- lib/std/zig/ErrorBundle.zig | 2 +- lib/std/zig/WindowsSdk.zig | 2 +- lib/std/zig/Zir.zig | 4 +- lib/std/zig/render.zig | 14 +-- lib/std/zig/system/NativePaths.zig | 10 +-- src/Compilation.zig | 46 +++++----- src/InternPool.zig | 18 ++-- src/Liveness.zig | 18 ++-- src/Liveness/Verify.zig | 4 +- src/Package/Fetch.zig | 8 +- src/Package/Fetch/git.zig | 22 ++--- src/Sema.zig | 42 ++++----- src/Zcu.zig | 88 +++++++++---------- src/Zcu/PerThread.zig | 12 +-- src/arch/aarch64/CodeGen.zig | 12 +-- src/arch/aarch64/Emit.zig | 8 +- src/arch/arm/CodeGen.zig | 12 +-- src/arch/arm/Emit.zig | 8 +- src/arch/riscv64/CodeGen.zig | 14 +-- src/arch/riscv64/Emit.zig | 4 +- src/arch/sparc64/CodeGen.zig | 10 +-- src/arch/sparc64/Emit.zig | 8 +- src/arch/wasm/CodeGen.zig | 18 ++-- src/arch/x86_64/CodeGen.zig | 14 +-- src/arch/x86_64/Emit.zig | 4 +- src/codegen/c.zig | 8 +- src/codegen/llvm.zig | 18 ++-- src/codegen/llvm/Builder.zig | 12 +-- src/codegen/spirv.zig | 20 ++--- src/codegen/spirv/Assembler.zig | 10 +-- src/codegen/spirv/Module.zig | 20 ++--- src/codegen/spirv/Section.zig | 2 +- src/link/C.zig | 30 +++---- src/link/Coff.zig | 26 +++--- src/link/Coff/ImportTable.zig | 6 +- src/link/Elf.zig | 38 ++++---- src/link/Elf/Archive.zig | 8 +- src/link/Elf/AtomList.zig | 2 +- src/link/Elf/LdScript.zig | 2 +- src/link/Elf/LinkerDefined.zig | 12 +-- src/link/Elf/Object.zig | 34 +++---- src/link/Elf/SharedObject.zig | 18 ++-- src/link/Elf/Thunk.zig | 2 +- src/link/Elf/ZigObject.zig | 26 +++--- src/link/Elf/merge_section.zig | 14 +-- src/link/Elf/synthetic_sections.zig | 18 ++-- src/link/MachO.zig | 42 ++++----- src/link/MachO/Archive.zig | 4 +- src/link/MachO/CodeSignature.zig | 2 +- src/link/MachO/DebugSymbols.zig | 10 +-- src/link/MachO/Dylib.zig | 14 +-- src/link/MachO/InternalObject.zig | 26 +++--- src/link/MachO/Object.zig | 34 +++---- src/link/MachO/Thunk.zig | 2 +- src/link/MachO/UnwindInfo.zig | 8 +- src/link/MachO/ZigObject.zig | 18 ++-- src/link/MachO/dyld_info/Rebase.zig | 4 +- src/link/MachO/dyld_info/Trie.zig | 6 +- src/link/MachO/dyld_info/bind.zig | 14 +-- src/link/MachO/synthetic.zig | 10 +-- src/link/Plan9.zig | 24 ++--- src/link/SpirV/BinaryModule.zig | 2 +- src/link/SpirV/deduplicate.zig | 4 +- src/link/SpirV/lower_invocation_globals.zig | 4 +- src/link/StringTable.zig | 4 +- src/link/Wasm.zig | 46 +++++----- src/link/Wasm/Archive.zig | 2 +- src/link/Wasm/Atom.zig | 6 +- src/link/Wasm/Object.zig | 6 +- src/link/Wasm/ZigObject.zig | 30 +++---- src/link/table_section.zig | 6 +- src/link/tapi/parse.zig | 8 +- src/main.zig | 30 +++---- src/register_manager.zig | 2 +- src/translate_c.zig | 12 +-- test/behavior/fn.zig | 2 +- test/compare_output.zig | 6 +- test/standalone/coff_dwarf/main.zig | 2 +- test/standalone/empty_env/main.zig | 2 +- test/standalone/load_dynamic_library/main.zig | 2 +- .../self_exe_symlink/create-symlink.zig | 2 +- test/standalone/self_exe_symlink/main.zig | 2 +- test/standalone/simple/brace_expansion.zig | 2 +- test/standalone/windows_argv/fuzz.zig | 2 +- test/standalone/windows_bat_args/fuzz.zig | 2 +- test/standalone/windows_bat_args/test.zig | 2 +- test/standalone/windows_spawn/main.zig | 2 +- tools/doctest.zig | 4 +- tools/dump-cov.zig | 4 +- tools/generate_JSONTestSuite.zig | 2 +- tools/generate_c_size_and_align_checks.zig | 2 +- tools/incr-check.zig | 8 +- 152 files changed, 815 insertions(+), 815 deletions(-) diff --git a/doc/langref/wasi_args.zig b/doc/langref/wasi_args.zig index b1e7e7cbb15d..6801e67f0c7a 100644 --- a/doc/langref/wasi_args.zig +++ b/doc/langref/wasi_args.zig @@ -1,7 +1,7 @@ const std = @import("std"); pub fn main() !void { - var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{}){}; + var general_purpose_allocator: std.heap.GeneralPurposeAllocator(.{}) = .init; const gpa = general_purpose_allocator.allocator(); const args = try std.process.argsAlloc(gpa); defer std.process.argsFree(gpa, args); diff --git a/doc/langref/wasi_preopens.zig b/doc/langref/wasi_preopens.zig index 2a2f8dcd1bca..5a167bc8dbd9 100644 --- a/doc/langref/wasi_preopens.zig +++ b/doc/langref/wasi_preopens.zig @@ -2,7 +2,7 @@ const std = @import("std"); const fs = std.fs; pub fn main() !void { - var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{}){}; + var general_purpose_allocator: std.heap.GeneralPurposeAllocator(.{}) = .init; const gpa = general_purpose_allocator.allocator(); var arena_instance = std.heap.ArenaAllocator.init(gpa); diff --git a/lib/compiler/aro/aro/CodeGen.zig b/lib/compiler/aro/aro/CodeGen.zig index 0c87e7f744df..bfffb4117eea 100644 --- a/lib/compiler/aro/aro/CodeGen.zig +++ b/lib/compiler/aro/aro/CodeGen.zig @@ -42,11 +42,11 @@ node_tag: []const Tree.Tag, node_data: []const Tree.Node.Data, node_ty: []const Type, wip_switch: *WipSwitch = undefined, -symbols: std.ArrayListUnmanaged(Symbol) = .{}, -ret_nodes: std.ArrayListUnmanaged(Ir.Inst.Phi.Input) = .{}, -phi_nodes: std.ArrayListUnmanaged(Ir.Inst.Phi.Input) = .{}, -record_elem_buf: std.ArrayListUnmanaged(Interner.Ref) = .{}, -record_cache: std.AutoHashMapUnmanaged(*Type.Record, Interner.Ref) = .{}, +symbols: std.ArrayListUnmanaged(Symbol) = .empty, +ret_nodes: std.ArrayListUnmanaged(Ir.Inst.Phi.Input) = .empty, +phi_nodes: std.ArrayListUnmanaged(Ir.Inst.Phi.Input) = .empty, +record_elem_buf: std.ArrayListUnmanaged(Interner.Ref) = .empty, +record_cache: std.AutoHashMapUnmanaged(*Type.Record, Interner.Ref) = .empty, cond_dummy_ty: ?Interner.Ref = null, bool_invert: bool = false, bool_end_label: Ir.Ref = .none, diff --git a/lib/compiler/aro/aro/Compilation.zig b/lib/compiler/aro/aro/Compilation.zig index 6093bdc509f3..22ca9c00ed89 100644 --- a/lib/compiler/aro/aro/Compilation.zig +++ b/lib/compiler/aro/aro/Compilation.zig @@ -93,13 +93,13 @@ gpa: Allocator, diagnostics: Diagnostics, environment: Environment = .{}, -sources: std.StringArrayHashMapUnmanaged(Source) = .{}, -include_dirs: std.ArrayListUnmanaged([]const u8) = .{}, -system_include_dirs: std.ArrayListUnmanaged([]const u8) = .{}, +sources: std.StringArrayHashMapUnmanaged(Source) = .empty, +include_dirs: std.ArrayListUnmanaged([]const u8) = .empty, +system_include_dirs: std.ArrayListUnmanaged([]const u8) = .empty, target: std.Target = @import("builtin").target, -pragma_handlers: std.StringArrayHashMapUnmanaged(*Pragma) = .{}, +pragma_handlers: std.StringArrayHashMapUnmanaged(*Pragma) = .empty, langopts: LangOpts = .{}, -generated_buf: std.ArrayListUnmanaged(u8) = .{}, +generated_buf: std.ArrayListUnmanaged(u8) = .empty, builtins: Builtins = .{}, types: struct { wchar: Type = undefined, diff --git a/lib/compiler/aro/aro/Diagnostics.zig b/lib/compiler/aro/aro/Diagnostics.zig index 3039a45ef821..eb3bb31ee800 100644 --- a/lib/compiler/aro/aro/Diagnostics.zig +++ b/lib/compiler/aro/aro/Diagnostics.zig @@ -221,7 +221,7 @@ pub const Options = struct { const Diagnostics = @This(); -list: std.ArrayListUnmanaged(Message) = .{}, +list: std.ArrayListUnmanaged(Message) = .empty, arena: std.heap.ArenaAllocator, fatal_errors: bool = false, options: Options = .{}, diff --git a/lib/compiler/aro/aro/Driver.zig b/lib/compiler/aro/aro/Driver.zig index 7bdfd2c81e6e..c89dafe00222 100644 --- a/lib/compiler/aro/aro/Driver.zig +++ b/lib/compiler/aro/aro/Driver.zig @@ -25,8 +25,8 @@ pub const Linker = enum { const Driver = @This(); comp: *Compilation, -inputs: std.ArrayListUnmanaged(Source) = .{}, -link_objects: std.ArrayListUnmanaged([]const u8) = .{}, +inputs: std.ArrayListUnmanaged(Source) = .empty, +link_objects: std.ArrayListUnmanaged([]const u8) = .empty, output_name: ?[]const u8 = null, sysroot: ?[]const u8 = null, system_defines: Compilation.SystemDefinesMode = .include_system_defines, diff --git a/lib/compiler/aro/aro/Hideset.zig b/lib/compiler/aro/aro/Hideset.zig index ad8a089ae629..98712e41e2f4 100644 --- a/lib/compiler/aro/aro/Hideset.zig +++ b/lib/compiler/aro/aro/Hideset.zig @@ -51,10 +51,10 @@ pub const Index = enum(u32) { _, }; -map: std.AutoHashMapUnmanaged(Identifier, Index) = .{}, +map: std.AutoHashMapUnmanaged(Identifier, Index) = .empty, /// Used for computing union/intersection of two lists; stored here so that allocations can be retained /// until hideset is deinit'ed -tmp_map: std.AutoHashMapUnmanaged(Identifier, void) = .{}, +tmp_map: std.AutoHashMapUnmanaged(Identifier, void) = .empty, linked_list: Item.List = .{}, comp: *const Compilation, diff --git a/lib/compiler/aro/aro/InitList.zig b/lib/compiler/aro/aro/InitList.zig index 7e9f73e8a339..5a5765216ccf 100644 --- a/lib/compiler/aro/aro/InitList.zig +++ b/lib/compiler/aro/aro/InitList.zig @@ -23,7 +23,7 @@ const Item = struct { const InitList = @This(); -list: std.ArrayListUnmanaged(Item) = .{}, +list: std.ArrayListUnmanaged(Item) = .empty, node: NodeIndex = .none, tok: TokenIndex = 0, diff --git a/lib/compiler/aro/aro/Parser.zig b/lib/compiler/aro/aro/Parser.zig index 0a8907b23a74..00857c65e16a 100644 --- a/lib/compiler/aro/aro/Parser.zig +++ b/lib/compiler/aro/aro/Parser.zig @@ -109,7 +109,7 @@ param_buf: std.ArrayList(Type.Func.Param), enum_buf: std.ArrayList(Type.Enum.Field), record_buf: std.ArrayList(Type.Record.Field), attr_buf: std.MultiArrayList(TentativeAttribute) = .{}, -attr_application_buf: std.ArrayListUnmanaged(Attribute) = .{}, +attr_application_buf: std.ArrayListUnmanaged(Attribute) = .empty, field_attr_buf: std.ArrayList([]const Attribute), /// type name -> variable name location for tentative definitions (top-level defs with thus-far-incomplete types) /// e.g. `struct Foo bar;` where `struct Foo` is not defined yet. @@ -117,7 +117,7 @@ field_attr_buf: std.ArrayList([]const Attribute), /// Items are removed if the type is subsequently completed with a definition. /// We only store the first tentative definition that uses a given type because this map is only used /// for issuing an error message, and correcting the first error for a type will fix all of them for that type. -tentative_defs: std.AutoHashMapUnmanaged(StringId, TokenIndex) = .{}, +tentative_defs: std.AutoHashMapUnmanaged(StringId, TokenIndex) = .empty, // configuration and miscellaneous info no_eval: bool = false, @@ -174,7 +174,7 @@ record: struct { } } } = .{}, -record_members: std.ArrayListUnmanaged(struct { tok: TokenIndex, name: StringId }) = .{}, +record_members: std.ArrayListUnmanaged(struct { tok: TokenIndex, name: StringId }) = .empty, @"switch": ?*Switch = null, in_loop: bool = false, pragma_pack: ?u8 = null, diff --git a/lib/compiler/aro/aro/Preprocessor.zig b/lib/compiler/aro/aro/Preprocessor.zig index 63bf0858369d..a8eb74a4a8c7 100644 --- a/lib/compiler/aro/aro/Preprocessor.zig +++ b/lib/compiler/aro/aro/Preprocessor.zig @@ -95,7 +95,7 @@ counter: u32 = 0, expansion_source_loc: Source.Location = undefined, poisoned_identifiers: std.StringHashMap(void), /// Map from Source.Id to macro name in the `#ifndef` condition which guards the source, if any -include_guards: std.AutoHashMapUnmanaged(Source.Id, []const u8) = .{}, +include_guards: std.AutoHashMapUnmanaged(Source.Id, []const u8) = .empty, /// Store `keyword_define` and `keyword_undef` tokens. /// Used to implement preprocessor debug dump options diff --git a/lib/compiler/aro/aro/SymbolStack.zig b/lib/compiler/aro/aro/SymbolStack.zig index be2ee20cb03d..4c01e3d3567f 100644 --- a/lib/compiler/aro/aro/SymbolStack.zig +++ b/lib/compiler/aro/aro/SymbolStack.zig @@ -33,14 +33,14 @@ pub const Kind = enum { constexpr, }; -scopes: std.ArrayListUnmanaged(Scope) = .{}, +scopes: std.ArrayListUnmanaged(Scope) = .empty, /// allocations from nested scopes are retained after popping; `active_len` is the number /// of currently-active items in `scopes`. active_len: usize = 0, const Scope = struct { - vars: std.AutoHashMapUnmanaged(StringId, Symbol) = .{}, - tags: std.AutoHashMapUnmanaged(StringId, Symbol) = .{}, + vars: std.AutoHashMapUnmanaged(StringId, Symbol) = .empty, + tags: std.AutoHashMapUnmanaged(StringId, Symbol) = .empty, fn deinit(self: *Scope, allocator: Allocator) void { self.vars.deinit(allocator); diff --git a/lib/compiler/aro/aro/pragmas/gcc.zig b/lib/compiler/aro/aro/pragmas/gcc.zig index 91ab750b4c81..a382f4daac75 100644 --- a/lib/compiler/aro/aro/pragmas/gcc.zig +++ b/lib/compiler/aro/aro/pragmas/gcc.zig @@ -19,7 +19,7 @@ pragma: Pragma = .{ .preserveTokens = preserveTokens, }, original_options: Diagnostics.Options = .{}, -options_stack: std.ArrayListUnmanaged(Diagnostics.Options) = .{}, +options_stack: std.ArrayListUnmanaged(Diagnostics.Options) = .empty, const Directive = enum { warning, diff --git a/lib/compiler/aro/aro/pragmas/pack.zig b/lib/compiler/aro/aro/pragmas/pack.zig index 81d1dbc59a1c..24cfcc81a2a9 100644 --- a/lib/compiler/aro/aro/pragmas/pack.zig +++ b/lib/compiler/aro/aro/pragmas/pack.zig @@ -15,7 +15,7 @@ pragma: Pragma = .{ .parserHandler = parserHandler, .preserveTokens = preserveTokens, }, -stack: std.ArrayListUnmanaged(struct { label: []const u8, val: u8 }) = .{}, +stack: std.ArrayListUnmanaged(struct { label: []const u8, val: u8 }) = .empty, pub fn init(allocator: mem.Allocator) !*Pragma { var pack = try allocator.create(Pack); diff --git a/lib/compiler/aro/aro/toolchains/Linux.zig b/lib/compiler/aro/aro/toolchains/Linux.zig index a7d8c71bef89..9666c8e3b062 100644 --- a/lib/compiler/aro/aro/toolchains/Linux.zig +++ b/lib/compiler/aro/aro/toolchains/Linux.zig @@ -11,7 +11,7 @@ const system_defaults = @import("system_defaults"); const Linux = @This(); distro: Distro.Tag = .unknown, -extra_opts: std.ArrayListUnmanaged([]const u8) = .{}, +extra_opts: std.ArrayListUnmanaged([]const u8) = .empty, gcc_detector: GCCDetector = .{}, pub fn discover(self: *Linux, tc: *Toolchain) !void { diff --git a/lib/compiler/aro/backend/Interner.zig b/lib/compiler/aro/backend/Interner.zig index 631ec8ee16be..818afe869116 100644 --- a/lib/compiler/aro/backend/Interner.zig +++ b/lib/compiler/aro/backend/Interner.zig @@ -8,14 +8,14 @@ const Limb = std.math.big.Limb; const Interner = @This(); -map: std.AutoArrayHashMapUnmanaged(void, void) = .{}, +map: std.AutoArrayHashMapUnmanaged(void, void) = .empty, items: std.MultiArrayList(struct { tag: Tag, data: u32, }) = .{}, -extra: std.ArrayListUnmanaged(u32) = .{}, -limbs: std.ArrayListUnmanaged(Limb) = .{}, -strings: std.ArrayListUnmanaged(u8) = .{}, +extra: std.ArrayListUnmanaged(u32) = .empty, +limbs: std.ArrayListUnmanaged(Limb) = .empty, +strings: std.ArrayListUnmanaged(u8) = .empty, const KeyAdapter = struct { interner: *const Interner, diff --git a/lib/compiler/aro/backend/Ir.zig b/lib/compiler/aro/backend/Ir.zig index e694a23c9ae1..e90bf56cbd20 100644 --- a/lib/compiler/aro/backend/Ir.zig +++ b/lib/compiler/aro/backend/Ir.zig @@ -26,9 +26,9 @@ pub const Builder = struct { arena: std.heap.ArenaAllocator, interner: *Interner, - decls: std.StringArrayHashMapUnmanaged(Decl) = .{}, + decls: std.StringArrayHashMapUnmanaged(Decl) = .empty, instructions: std.MultiArrayList(Ir.Inst) = .{}, - body: std.ArrayListUnmanaged(Ref) = .{}, + body: std.ArrayListUnmanaged(Ref) = .empty, alloc_count: u32 = 0, arg_count: u32 = 0, current_label: Ref = undefined, diff --git a/lib/compiler/aro/backend/Object/Elf.zig b/lib/compiler/aro/backend/Object/Elf.zig index 2a303d348cb7..9b4f347de575 100644 --- a/lib/compiler/aro/backend/Object/Elf.zig +++ b/lib/compiler/aro/backend/Object/Elf.zig @@ -5,7 +5,7 @@ const Object = @import("../Object.zig"); const Section = struct { data: std.ArrayList(u8), - relocations: std.ArrayListUnmanaged(Relocation) = .{}, + relocations: std.ArrayListUnmanaged(Relocation) = .empty, flags: u64, type: u32, index: u16 = undefined, @@ -37,9 +37,9 @@ const Elf = @This(); obj: Object, /// The keys are owned by the Codegen.tree -sections: std.StringHashMapUnmanaged(*Section) = .{}, -local_symbols: std.StringHashMapUnmanaged(*Symbol) = .{}, -global_symbols: std.StringHashMapUnmanaged(*Symbol) = .{}, +sections: std.StringHashMapUnmanaged(*Section) = .empty, +local_symbols: std.StringHashMapUnmanaged(*Symbol) = .empty, +global_symbols: std.StringHashMapUnmanaged(*Symbol) = .empty, unnamed_symbol_mangle: u32 = 0, strtab_len: u64 = strtab_default.len, arena: std.heap.ArenaAllocator, diff --git a/lib/compiler/aro_translate_c.zig b/lib/compiler/aro_translate_c.zig index 4255989416df..910d12d32be6 100644 --- a/lib/compiler/aro_translate_c.zig +++ b/lib/compiler/aro_translate_c.zig @@ -16,22 +16,22 @@ const Context = @This(); gpa: mem.Allocator, arena: mem.Allocator, -decl_table: std.AutoArrayHashMapUnmanaged(usize, []const u8) = .{}, +decl_table: std.AutoArrayHashMapUnmanaged(usize, []const u8) = .empty, alias_list: AliasList, global_scope: *Scope.Root, mangle_count: u32 = 0, /// Table of record decls that have been demoted to opaques. -opaque_demotes: std.AutoHashMapUnmanaged(usize, void) = .{}, +opaque_demotes: std.AutoHashMapUnmanaged(usize, void) = .empty, /// Table of unnamed enums and records that are child types of typedefs. -unnamed_typedefs: std.AutoHashMapUnmanaged(usize, []const u8) = .{}, +unnamed_typedefs: std.AutoHashMapUnmanaged(usize, []const u8) = .empty, /// Needed to decide if we are parsing a typename -typedefs: std.StringArrayHashMapUnmanaged(void) = .{}, +typedefs: std.StringArrayHashMapUnmanaged(void) = .empty, /// This one is different than the root scope's name table. This contains /// a list of names that we found by visiting all the top level decls without /// translating them. The other maps are updated as we translate; this one is updated /// up front in a pre-processing step. -global_names: std.StringArrayHashMapUnmanaged(void) = .{}, +global_names: std.StringArrayHashMapUnmanaged(void) = .empty, /// This is similar to `global_names`, but contains names which we would /// *like* to use, but do not strictly *have* to if they are unavailable. @@ -40,7 +40,7 @@ global_names: std.StringArrayHashMapUnmanaged(void) = .{}, /// may be mangled. /// This is distinct from `global_names` so we can detect at a type /// declaration whether or not the name is available. -weak_global_names: std.StringArrayHashMapUnmanaged(void) = .{}, +weak_global_names: std.StringArrayHashMapUnmanaged(void) = .empty, pattern_list: PatternList, tree: Tree, @@ -697,7 +697,7 @@ fn transEnumDecl(c: *Context, scope: *Scope, enum_decl: *const Type.Enum, field_ } fn getTypeStr(c: *Context, ty: Type) ![]const u8 { - var buf: std.ArrayListUnmanaged(u8) = .{}; + var buf: std.ArrayListUnmanaged(u8) = .empty; defer buf.deinit(c.gpa); const w = buf.writer(c.gpa); try ty.print(c.mapper, c.comp.langopts, w); @@ -1793,7 +1793,7 @@ pub fn main() !void { defer arena_instance.deinit(); const arena = arena_instance.allocator(); - var general_purpose_allocator: std.heap.GeneralPurposeAllocator(.{}) = .{}; + var general_purpose_allocator: std.heap.GeneralPurposeAllocator(.{}) = .init; const gpa = general_purpose_allocator.allocator(); const args = try std.process.argsAlloc(arena); diff --git a/lib/compiler/aro_translate_c/ast.zig b/lib/compiler/aro_translate_c/ast.zig index 853fcb748cb5..8cd331d2940d 100644 --- a/lib/compiler/aro_translate_c/ast.zig +++ b/lib/compiler/aro_translate_c/ast.zig @@ -808,7 +808,7 @@ const Context = struct { gpa: Allocator, buf: std.ArrayList(u8), nodes: std.zig.Ast.NodeList = .{}, - extra_data: std.ArrayListUnmanaged(std.zig.Ast.Node.Index) = .{}, + extra_data: std.ArrayListUnmanaged(std.zig.Ast.Node.Index) = .empty, tokens: std.zig.Ast.TokenList = .{}, fn addTokenFmt(c: *Context, tag: TokenTag, comptime format: []const u8, args: anytype) Allocator.Error!TokenIndex { diff --git a/lib/compiler/build_runner.zig b/lib/compiler/build_runner.zig index 690c93754553..4d643222d723 100644 --- a/lib/compiler/build_runner.zig +++ b/lib/compiler/build_runner.zig @@ -336,7 +336,7 @@ pub fn main() !void { } if (graph.needed_lazy_dependencies.entries.len != 0) { - var buffer: std.ArrayListUnmanaged(u8) = .{}; + var buffer: std.ArrayListUnmanaged(u8) = .empty; for (graph.needed_lazy_dependencies.keys()) |k| { try buffer.appendSlice(arena, k); try buffer.append(arena, '\n'); @@ -1173,7 +1173,7 @@ pub fn printErrorMessages( // Provide context for where these error messages are coming from by // printing the corresponding Step subtree. - var step_stack: std.ArrayListUnmanaged(*Step) = .{}; + var step_stack: std.ArrayListUnmanaged(*Step) = .empty; defer step_stack.deinit(gpa); try step_stack.append(gpa, failing_step); while (step_stack.items[step_stack.items.len - 1].dependants.items.len != 0) { diff --git a/lib/compiler/objcopy.zig b/lib/compiler/objcopy.zig index b48fb52e821b..ac609c94e5fc 100644 --- a/lib/compiler/objcopy.zig +++ b/lib/compiler/objcopy.zig @@ -15,7 +15,7 @@ pub fn main() !void { defer arena_instance.deinit(); const arena = arena_instance.allocator(); - var general_purpose_allocator: std.heap.GeneralPurposeAllocator(.{}) = .{}; + var general_purpose_allocator: std.heap.GeneralPurposeAllocator(.{}) = .init; const gpa = general_purpose_allocator.allocator(); const args = try std.process.argsAlloc(arena); diff --git a/lib/compiler/reduce.zig b/lib/compiler/reduce.zig index ad536376da61..826c2bccf7e1 100644 --- a/lib/compiler/reduce.zig +++ b/lib/compiler/reduce.zig @@ -51,7 +51,7 @@ pub fn main() !void { defer arena_instance.deinit(); const arena = arena_instance.allocator(); - var general_purpose_allocator: std.heap.GeneralPurposeAllocator(.{}) = .{}; + var general_purpose_allocator: std.heap.GeneralPurposeAllocator(.{}) = .init; const gpa = general_purpose_allocator.allocator(); const args = try std.process.argsAlloc(arena); @@ -109,7 +109,7 @@ pub fn main() !void { const root_source_file_path = opt_root_source_file_path orelse fatal("missing root source file path argument; see -h for usage", .{}); - var interestingness_argv: std.ArrayListUnmanaged([]const u8) = .{}; + var interestingness_argv: std.ArrayListUnmanaged([]const u8) = .empty; try interestingness_argv.ensureUnusedCapacity(arena, argv.len + 1); interestingness_argv.appendAssumeCapacity(checker_path); interestingness_argv.appendSliceAssumeCapacity(argv); diff --git a/lib/compiler/resinator/ast.zig b/lib/compiler/resinator/ast.zig index d55d91e52c64..31250ea71e2c 100644 --- a/lib/compiler/resinator/ast.zig +++ b/lib/compiler/resinator/ast.zig @@ -28,7 +28,7 @@ pub const Tree = struct { }; pub const CodePageLookup = struct { - lookup: std.ArrayListUnmanaged(CodePage) = .{}, + lookup: std.ArrayListUnmanaged(CodePage) = .empty, allocator: Allocator, default_code_page: CodePage, diff --git a/lib/compiler/resinator/cli.zig b/lib/compiler/resinator/cli.zig index 6aebf9a0229d..1223b069d76d 100644 --- a/lib/compiler/resinator/cli.zig +++ b/lib/compiler/resinator/cli.zig @@ -70,13 +70,13 @@ pub fn writeUsage(writer: anytype, command_name: []const u8) !void { } pub const Diagnostics = struct { - errors: std.ArrayListUnmanaged(ErrorDetails) = .{}, + errors: std.ArrayListUnmanaged(ErrorDetails) = .empty, allocator: Allocator, pub const ErrorDetails = struct { arg_index: usize, arg_span: ArgSpan = .{}, - msg: std.ArrayListUnmanaged(u8) = .{}, + msg: std.ArrayListUnmanaged(u8) = .empty, type: Type = .err, print_args: bool = true, @@ -132,13 +132,13 @@ pub const Options = struct { allocator: Allocator, input_filename: []const u8 = &[_]u8{}, output_filename: []const u8 = &[_]u8{}, - extra_include_paths: std.ArrayListUnmanaged([]const u8) = .{}, + extra_include_paths: std.ArrayListUnmanaged([]const u8) = .empty, ignore_include_env_var: bool = false, preprocess: Preprocess = .yes, default_language_id: ?u16 = null, default_code_page: ?CodePage = null, verbose: bool = false, - symbols: std.StringArrayHashMapUnmanaged(SymbolValue) = .{}, + symbols: std.StringArrayHashMapUnmanaged(SymbolValue) = .empty, null_terminate_string_table_strings: bool = false, max_string_literal_codepoints: u15 = lex.default_max_string_literal_codepoints, silent_duplicate_control_ids: bool = false, diff --git a/lib/compiler/resinator/compile.zig b/lib/compiler/resinator/compile.zig index f9e211a4dc27..58259cf4c47a 100644 --- a/lib/compiler/resinator/compile.zig +++ b/lib/compiler/resinator/compile.zig @@ -3004,9 +3004,9 @@ test "limitedWriter basic usage" { } pub const FontDir = struct { - fonts: std.ArrayListUnmanaged(Font) = .{}, + fonts: std.ArrayListUnmanaged(Font) = .empty, /// To keep track of which ids are set and where they were set from - ids: std.AutoHashMapUnmanaged(u16, Token) = .{}, + ids: std.AutoHashMapUnmanaged(u16, Token) = .empty, pub const Font = struct { id: u16, @@ -3112,7 +3112,7 @@ pub const StringTablesByLanguage = struct { /// when the first STRINGTABLE for the language was defined, and all blocks for a given /// language are written contiguously. /// Using an ArrayHashMap here gives us this property for free. - tables: std.AutoArrayHashMapUnmanaged(res.Language, StringTable) = .{}, + tables: std.AutoArrayHashMapUnmanaged(res.Language, StringTable) = .empty, pub fn deinit(self: *StringTablesByLanguage, allocator: Allocator) void { self.tables.deinit(allocator); @@ -3143,10 +3143,10 @@ pub const StringTable = struct { /// was added to the block (i.e. `STRINGTABLE { 16 "b" 0 "a" }` would then get written /// with block ID 2 (the one with "b") first and block ID 1 (the one with "a") second). /// Using an ArrayHashMap here gives us this property for free. - blocks: std.AutoArrayHashMapUnmanaged(u16, Block) = .{}, + blocks: std.AutoArrayHashMapUnmanaged(u16, Block) = .empty, pub const Block = struct { - strings: std.ArrayListUnmanaged(Token) = .{}, + strings: std.ArrayListUnmanaged(Token) = .empty, set_indexes: std.bit_set.IntegerBitSet(16) = .{ .mask = 0 }, memory_flags: MemoryFlags = MemoryFlags.defaults(res.RT.STRING), characteristics: u32, diff --git a/lib/compiler/resinator/errors.zig b/lib/compiler/resinator/errors.zig index 90744e993419..67a5a09d3ba0 100644 --- a/lib/compiler/resinator/errors.zig +++ b/lib/compiler/resinator/errors.zig @@ -13,10 +13,10 @@ const builtin = @import("builtin"); const native_endian = builtin.cpu.arch.endian(); pub const Diagnostics = struct { - errors: std.ArrayListUnmanaged(ErrorDetails) = .{}, + errors: std.ArrayListUnmanaged(ErrorDetails) = .empty, /// Append-only, cannot handle removing strings. /// Expects to own all strings within the list. - strings: std.ArrayListUnmanaged([]const u8) = .{}, + strings: std.ArrayListUnmanaged([]const u8) = .empty, allocator: std.mem.Allocator, pub fn init(allocator: std.mem.Allocator) Diagnostics { @@ -968,7 +968,7 @@ pub fn renderErrorMessage(allocator: std.mem.Allocator, writer: anytype, tty_con const CorrespondingLines = struct { worth_printing_note: bool = true, worth_printing_lines: bool = true, - lines: std.ArrayListUnmanaged(u8) = .{}, + lines: std.ArrayListUnmanaged(u8) = .empty, lines_is_error_message: bool = false, pub fn init(allocator: std.mem.Allocator, cwd: std.fs.Dir, err_details: ErrorDetails, lines_for_comparison: []const u8, corresponding_span: SourceMappings.CorrespondingSpan, corresponding_file: []const u8) !CorrespondingLines { diff --git a/lib/compiler/resinator/main.zig b/lib/compiler/resinator/main.zig index 4159ad03e30f..a918081226f5 100644 --- a/lib/compiler/resinator/main.zig +++ b/lib/compiler/resinator/main.zig @@ -10,7 +10,7 @@ const renderErrorMessage = @import("utils.zig").renderErrorMessage; const aro = @import("aro"); pub fn main() !void { - var gpa = std.heap.GeneralPurposeAllocator(.{}){}; + var gpa: std.heap.GeneralPurposeAllocator(.{}) = .init; defer std.debug.assert(gpa.deinit() == .ok); const allocator = gpa.allocator(); @@ -432,7 +432,7 @@ fn cliDiagnosticsToErrorBundle( }); var cur_err: ?ErrorBundle.ErrorMessage = null; - var cur_notes: std.ArrayListUnmanaged(ErrorBundle.ErrorMessage) = .{}; + var cur_notes: std.ArrayListUnmanaged(ErrorBundle.ErrorMessage) = .empty; defer cur_notes.deinit(gpa); for (diagnostics.errors.items) |err_details| { switch (err_details.type) { @@ -474,10 +474,10 @@ fn diagnosticsToErrorBundle( try bundle.init(gpa); errdefer bundle.deinit(); - var msg_buf: std.ArrayListUnmanaged(u8) = .{}; + var msg_buf: std.ArrayListUnmanaged(u8) = .empty; defer msg_buf.deinit(gpa); var cur_err: ?ErrorBundle.ErrorMessage = null; - var cur_notes: std.ArrayListUnmanaged(ErrorBundle.ErrorMessage) = .{}; + var cur_notes: std.ArrayListUnmanaged(ErrorBundle.ErrorMessage) = .empty; defer cur_notes.deinit(gpa); for (diagnostics.errors.items) |err_details| { switch (err_details.type) { @@ -587,7 +587,7 @@ fn aroDiagnosticsToErrorBundle( var msg_writer = MsgWriter.init(gpa); defer msg_writer.deinit(); var cur_err: ?ErrorBundle.ErrorMessage = null; - var cur_notes: std.ArrayListUnmanaged(ErrorBundle.ErrorMessage) = .{}; + var cur_notes: std.ArrayListUnmanaged(ErrorBundle.ErrorMessage) = .empty; defer cur_notes.deinit(gpa); for (comp.diagnostics.list.items) |msg| { switch (msg.kind) { diff --git a/lib/compiler/resinator/parse.zig b/lib/compiler/resinator/parse.zig index e6fe228dcc49..3bfd7fd7e257 100644 --- a/lib/compiler/resinator/parse.zig +++ b/lib/compiler/resinator/parse.zig @@ -111,7 +111,7 @@ pub const Parser = struct { /// current token is unchanged. /// The returned slice is allocated by the parser's arena fn parseCommonResourceAttributes(self: *Self) ![]Token { - var common_resource_attributes = std.ArrayListUnmanaged(Token){}; + var common_resource_attributes: std.ArrayListUnmanaged(Token) = .empty; while (true) { const maybe_common_resource_attribute = try self.lookaheadToken(.normal); if (maybe_common_resource_attribute.id == .literal and rc.CommonResourceAttributes.map.has(maybe_common_resource_attribute.slice(self.lexer.buffer))) { @@ -131,7 +131,7 @@ pub const Parser = struct { /// current token is unchanged. /// The returned slice is allocated by the parser's arena fn parseOptionalStatements(self: *Self, resource: Resource) ![]*Node { - var optional_statements = std.ArrayListUnmanaged(*Node){}; + var optional_statements: std.ArrayListUnmanaged(*Node) = .empty; while (true) { const lookahead_token = try self.lookaheadToken(.normal); if (lookahead_token.id != .literal) break; @@ -445,7 +445,7 @@ pub const Parser = struct { const begin_token = self.state.token; try self.check(.begin); - var accelerators = std.ArrayListUnmanaged(*Node){}; + var accelerators: std.ArrayListUnmanaged(*Node) = .empty; while (true) { const lookahead = try self.lookaheadToken(.normal); @@ -463,7 +463,7 @@ pub const Parser = struct { const idvalue = try self.parseExpression(.{ .allowed_types = .{ .number = true } }); - var type_and_options = std.ArrayListUnmanaged(Token){}; + var type_and_options: std.ArrayListUnmanaged(Token) = .empty; while (true) { if (!(try self.parseOptionalToken(.comma))) break; @@ -528,7 +528,7 @@ pub const Parser = struct { const begin_token = self.state.token; try self.check(.begin); - var controls = std.ArrayListUnmanaged(*Node){}; + var controls: std.ArrayListUnmanaged(*Node) = .empty; defer controls.deinit(self.state.allocator); while (try self.parseControlStatement(resource)) |control_node| { // The number of controls must fit in a u16 in order for it to @@ -587,7 +587,7 @@ pub const Parser = struct { const begin_token = self.state.token; try self.check(.begin); - var buttons = std.ArrayListUnmanaged(*Node){}; + var buttons: std.ArrayListUnmanaged(*Node) = .empty; defer buttons.deinit(self.state.allocator); while (try self.parseToolbarButtonStatement()) |button_node| { // The number of buttons must fit in a u16 in order for it to @@ -645,7 +645,7 @@ pub const Parser = struct { const begin_token = self.state.token; try self.check(.begin); - var items = std.ArrayListUnmanaged(*Node){}; + var items: std.ArrayListUnmanaged(*Node) = .empty; defer items.deinit(self.state.allocator); while (try self.parseMenuItemStatement(resource, id_token, 1)) |item_node| { try items.append(self.state.allocator, item_node); @@ -679,7 +679,7 @@ pub const Parser = struct { // common resource attributes must all be contiguous and come before optional-statements const common_resource_attributes = try self.parseCommonResourceAttributes(); - var fixed_info = std.ArrayListUnmanaged(*Node){}; + var fixed_info: std.ArrayListUnmanaged(*Node) = .empty; while (try self.parseVersionStatement()) |version_statement| { try fixed_info.append(self.state.arena, version_statement); } @@ -688,7 +688,7 @@ pub const Parser = struct { const begin_token = self.state.token; try self.check(.begin); - var block_statements = std.ArrayListUnmanaged(*Node){}; + var block_statements: std.ArrayListUnmanaged(*Node) = .empty; while (try self.parseVersionBlockOrValue(id_token, 1)) |block_node| { try block_statements.append(self.state.arena, block_node); } @@ -1064,7 +1064,7 @@ pub const Parser = struct { _ = try self.parseOptionalToken(.comma); - var options = std.ArrayListUnmanaged(Token){}; + var options: std.ArrayListUnmanaged(Token) = .empty; while (true) { const option_token = try self.lookaheadToken(.normal); if (!rc.MenuItem.Option.map.has(option_token.slice(self.lexer.buffer))) { @@ -1099,7 +1099,7 @@ pub const Parser = struct { } try self.skipAnyCommas(); - var options = std.ArrayListUnmanaged(Token){}; + var options: std.ArrayListUnmanaged(Token) = .empty; while (true) { const option_token = try self.lookaheadToken(.normal); if (!rc.MenuItem.Option.map.has(option_token.slice(self.lexer.buffer))) { @@ -1114,7 +1114,7 @@ pub const Parser = struct { const begin_token = self.state.token; try self.check(.begin); - var items = std.ArrayListUnmanaged(*Node){}; + var items: std.ArrayListUnmanaged(*Node) = .empty; while (try self.parseMenuItemStatement(resource, top_level_menu_id_token, nesting_level + 1)) |item_node| { try items.append(self.state.arena, item_node); } @@ -1184,7 +1184,7 @@ pub const Parser = struct { const begin_token = self.state.token; try self.check(.begin); - var items = std.ArrayListUnmanaged(*Node){}; + var items: std.ArrayListUnmanaged(*Node) = .empty; while (try self.parseMenuItemStatement(resource, top_level_menu_id_token, nesting_level + 1)) |item_node| { try items.append(self.state.arena, item_node); } @@ -1341,7 +1341,7 @@ pub const Parser = struct { const begin_token = self.state.token; try self.check(.begin); - var children = std.ArrayListUnmanaged(*Node){}; + var children: std.ArrayListUnmanaged(*Node) = .empty; while (try self.parseVersionBlockOrValue(top_level_version_id_token, nesting_level + 1)) |value_node| { try children.append(self.state.arena, value_node); } @@ -1374,7 +1374,7 @@ pub const Parser = struct { } fn parseBlockValuesList(self: *Self, had_comma_before_first_value: bool) Error![]*Node { - var values = std.ArrayListUnmanaged(*Node){}; + var values: std.ArrayListUnmanaged(*Node) = .empty; var seen_number: bool = false; var first_string_value: ?*Node = null; while (true) { diff --git a/lib/compiler/resinator/source_mapping.zig b/lib/compiler/resinator/source_mapping.zig index 1144340252a5..ba396b019c89 100644 --- a/lib/compiler/resinator/source_mapping.zig +++ b/lib/compiler/resinator/source_mapping.zig @@ -10,7 +10,7 @@ pub const ParseLineCommandsResult = struct { const CurrentMapping = struct { line_num: usize = 1, - filename: std.ArrayListUnmanaged(u8) = .{}, + filename: std.ArrayListUnmanaged(u8) = .empty, pending: bool = true, ignore_contents: bool = false, }; @@ -626,8 +626,8 @@ test "SourceMappings collapse" { /// Same thing as StringTable in Zig's src/Wasm.zig pub const StringTable = struct { - data: std.ArrayListUnmanaged(u8) = .{}, - map: std.HashMapUnmanaged(u32, void, std.hash_map.StringIndexContext, std.hash_map.default_max_load_percentage) = .{}, + data: std.ArrayListUnmanaged(u8) = .empty, + map: std.HashMapUnmanaged(u32, void, std.hash_map.StringIndexContext, std.hash_map.default_max_load_percentage) = .empty, pub fn deinit(self: *StringTable, allocator: Allocator) void { self.data.deinit(allocator); diff --git a/lib/compiler/std-docs.zig b/lib/compiler/std-docs.zig index 0382bbf971a3..9ed1acdc2c38 100644 --- a/lib/compiler/std-docs.zig +++ b/lib/compiler/std-docs.zig @@ -25,7 +25,7 @@ pub fn main() !void { defer arena_instance.deinit(); const arena = arena_instance.allocator(); - var general_purpose_allocator: std.heap.GeneralPurposeAllocator(.{}) = .{}; + var general_purpose_allocator: std.heap.GeneralPurposeAllocator(.{}) = .init; const gpa = general_purpose_allocator.allocator(); var argv = try std.process.argsWithAllocator(arena); @@ -265,7 +265,7 @@ fn buildWasmBinary( ) !Cache.Path { const gpa = context.gpa; - var argv: std.ArrayListUnmanaged([]const u8) = .{}; + var argv: std.ArrayListUnmanaged([]const u8) = .empty; try argv.appendSlice(arena, &.{ context.zig_exe_path, // diff --git a/lib/compiler/test_runner.zig b/lib/compiler/test_runner.zig index 83d53626c3f0..f88354623d22 100644 --- a/lib/compiler/test_runner.zig +++ b/lib/compiler/test_runner.zig @@ -85,7 +85,7 @@ fn mainServer() !void { @panic("internal test runner memory leak"); }; - var string_bytes: std.ArrayListUnmanaged(u8) = .{}; + var string_bytes: std.ArrayListUnmanaged(u8) = .empty; defer string_bytes.deinit(testing.allocator); try string_bytes.append(testing.allocator, 0); // Reserve 0 for null. diff --git a/lib/docs/wasm/Walk.zig b/lib/docs/wasm/Walk.zig index 611830fcb89d..49a5e738fc20 100644 --- a/lib/docs/wasm/Walk.zig +++ b/lib/docs/wasm/Walk.zig @@ -10,9 +10,9 @@ const Oom = error{OutOfMemory}; pub const Decl = @import("Decl.zig"); -pub var files: std.StringArrayHashMapUnmanaged(File) = .{}; -pub var decls: std.ArrayListUnmanaged(Decl) = .{}; -pub var modules: std.StringArrayHashMapUnmanaged(File.Index) = .{}; +pub var files: std.StringArrayHashMapUnmanaged(File) = .empty; +pub var decls: std.ArrayListUnmanaged(Decl) = .empty; +pub var modules: std.StringArrayHashMapUnmanaged(File.Index) = .empty; file: File.Index, @@ -42,17 +42,17 @@ pub const Category = union(enum(u8)) { pub const File = struct { ast: Ast, /// Maps identifiers to the declarations they point to. - ident_decls: std.AutoArrayHashMapUnmanaged(Ast.TokenIndex, Ast.Node.Index) = .{}, + ident_decls: std.AutoArrayHashMapUnmanaged(Ast.TokenIndex, Ast.Node.Index) = .empty, /// Maps field access identifiers to the containing field access node. - token_parents: std.AutoArrayHashMapUnmanaged(Ast.TokenIndex, Ast.Node.Index) = .{}, + token_parents: std.AutoArrayHashMapUnmanaged(Ast.TokenIndex, Ast.Node.Index) = .empty, /// Maps declarations to their global index. - node_decls: std.AutoArrayHashMapUnmanaged(Ast.Node.Index, Decl.Index) = .{}, + node_decls: std.AutoArrayHashMapUnmanaged(Ast.Node.Index, Decl.Index) = .empty, /// Maps function declarations to doctests. - doctests: std.AutoArrayHashMapUnmanaged(Ast.Node.Index, Ast.Node.Index) = .{}, + doctests: std.AutoArrayHashMapUnmanaged(Ast.Node.Index, Ast.Node.Index) = .empty, /// root node => its namespace scope /// struct/union/enum/opaque decl node => its namespace scope /// local var decl node => its local variable scope - scopes: std.AutoArrayHashMapUnmanaged(Ast.Node.Index, *Scope) = .{}, + scopes: std.AutoArrayHashMapUnmanaged(Ast.Node.Index, *Scope) = .empty, pub fn lookup_token(file: *File, token: Ast.TokenIndex) Decl.Index { const decl_node = file.ident_decls.get(token) orelse return .none; @@ -464,8 +464,8 @@ pub const Scope = struct { const Namespace = struct { base: Scope = .{ .tag = .namespace }, parent: *Scope, - names: std.StringArrayHashMapUnmanaged(Ast.Node.Index) = .{}, - doctests: std.StringArrayHashMapUnmanaged(Ast.Node.Index) = .{}, + names: std.StringArrayHashMapUnmanaged(Ast.Node.Index) = .empty, + doctests: std.StringArrayHashMapUnmanaged(Ast.Node.Index) = .empty, decl_index: Decl.Index, }; diff --git a/lib/docs/wasm/html_render.zig b/lib/docs/wasm/html_render.zig index a5211fc77f4d..d9cb74f152e8 100644 --- a/lib/docs/wasm/html_render.zig +++ b/lib/docs/wasm/html_render.zig @@ -38,7 +38,7 @@ pub fn fileSourceHtml( const file = file_index.get(); const g = struct { - var field_access_buffer: std.ArrayListUnmanaged(u8) = .{}; + var field_access_buffer: std.ArrayListUnmanaged(u8) = .empty; }; const token_tags = ast.tokens.items(.tag); diff --git a/lib/docs/wasm/main.zig b/lib/docs/wasm/main.zig index 55882aaf7df5..0ec222751283 100644 --- a/lib/docs/wasm/main.zig +++ b/lib/docs/wasm/main.zig @@ -60,8 +60,8 @@ export fn unpack(tar_ptr: [*]u8, tar_len: usize) void { }; } -var query_string: std.ArrayListUnmanaged(u8) = .{}; -var query_results: std.ArrayListUnmanaged(Decl.Index) = .{}; +var query_string: std.ArrayListUnmanaged(u8) = .empty; +var query_results: std.ArrayListUnmanaged(Decl.Index) = .empty; /// Resizes the query string to be the correct length; returns the pointer to /// the query string. @@ -93,11 +93,11 @@ fn query_exec_fallible(query: []const u8, ignore_case: bool) !void { segments: u16, }; const g = struct { - var full_path_search_text: std.ArrayListUnmanaged(u8) = .{}; - var full_path_search_text_lower: std.ArrayListUnmanaged(u8) = .{}; - var doc_search_text: std.ArrayListUnmanaged(u8) = .{}; + var full_path_search_text: std.ArrayListUnmanaged(u8) = .empty; + var full_path_search_text_lower: std.ArrayListUnmanaged(u8) = .empty; + var doc_search_text: std.ArrayListUnmanaged(u8) = .empty; /// Each element matches a corresponding query_results element. - var scores: std.ArrayListUnmanaged(Score) = .{}; + var scores: std.ArrayListUnmanaged(Score) = .empty; }; // First element stores the size of the list. @@ -255,8 +255,8 @@ const ErrorIdentifier = packed struct(u64) { } }; -var string_result: std.ArrayListUnmanaged(u8) = .{}; -var error_set_result: std.StringArrayHashMapUnmanaged(ErrorIdentifier) = .{}; +var string_result: std.ArrayListUnmanaged(u8) = .empty; +var error_set_result: std.StringArrayHashMapUnmanaged(ErrorIdentifier) = .empty; export fn decl_error_set(decl_index: Decl.Index) Slice(ErrorIdentifier) { return Slice(ErrorIdentifier).init(decl_error_set_fallible(decl_index) catch @panic("OOM")); @@ -381,7 +381,7 @@ export fn decl_params(decl_index: Decl.Index) Slice(Ast.Node.Index) { fn decl_fields_fallible(decl_index: Decl.Index) ![]Ast.Node.Index { const g = struct { - var result: std.ArrayListUnmanaged(Ast.Node.Index) = .{}; + var result: std.ArrayListUnmanaged(Ast.Node.Index) = .empty; }; g.result.clearRetainingCapacity(); const decl = decl_index.get(); @@ -403,7 +403,7 @@ fn decl_fields_fallible(decl_index: Decl.Index) ![]Ast.Node.Index { fn decl_params_fallible(decl_index: Decl.Index) ![]Ast.Node.Index { const g = struct { - var result: std.ArrayListUnmanaged(Ast.Node.Index) = .{}; + var result: std.ArrayListUnmanaged(Ast.Node.Index) = .empty; }; g.result.clearRetainingCapacity(); const decl = decl_index.get(); @@ -672,7 +672,7 @@ fn render_docs( defer parsed_doc.deinit(gpa); const g = struct { - var link_buffer: std.ArrayListUnmanaged(u8) = .{}; + var link_buffer: std.ArrayListUnmanaged(u8) = .empty; }; const Writer = std.ArrayListUnmanaged(u8).Writer; @@ -817,7 +817,7 @@ export fn find_module_root(pkg: Walk.ModuleIndex) Decl.Index { } /// Set by `set_input_string`. -var input_string: std.ArrayListUnmanaged(u8) = .{}; +var input_string: std.ArrayListUnmanaged(u8) = .empty; export fn set_input_string(len: usize) [*]u8 { input_string.resize(gpa, len) catch @panic("OOM"); @@ -839,7 +839,7 @@ export fn find_decl() Decl.Index { if (result != .none) return result; const g = struct { - var match_fqn: std.ArrayListUnmanaged(u8) = .{}; + var match_fqn: std.ArrayListUnmanaged(u8) = .empty; }; for (Walk.decls.items, 0..) |*decl, decl_index| { g.match_fqn.clearRetainingCapacity(); @@ -888,7 +888,7 @@ export fn type_fn_members(parent: Decl.Index, include_private: bool) Slice(Decl. export fn namespace_members(parent: Decl.Index, include_private: bool) Slice(Decl.Index) { const g = struct { - var members: std.ArrayListUnmanaged(Decl.Index) = .{}; + var members: std.ArrayListUnmanaged(Decl.Index) = .empty; }; g.members.clearRetainingCapacity(); diff --git a/lib/docs/wasm/markdown/Parser.zig b/lib/docs/wasm/markdown/Parser.zig index 024a16a2d7a3..32c172968456 100644 --- a/lib/docs/wasm/markdown/Parser.zig +++ b/lib/docs/wasm/markdown/Parser.zig @@ -31,11 +31,11 @@ const ExtraData = Document.ExtraData; const StringIndex = Document.StringIndex; nodes: Node.List = .{}, -extra: std.ArrayListUnmanaged(u32) = .{}, -scratch_extra: std.ArrayListUnmanaged(u32) = .{}, -string_bytes: std.ArrayListUnmanaged(u8) = .{}, -scratch_string: std.ArrayListUnmanaged(u8) = .{}, -pending_blocks: std.ArrayListUnmanaged(Block) = .{}, +extra: std.ArrayListUnmanaged(u32) = .empty, +scratch_extra: std.ArrayListUnmanaged(u32) = .empty, +string_bytes: std.ArrayListUnmanaged(u8) = .empty, +scratch_string: std.ArrayListUnmanaged(u8) = .empty, +pending_blocks: std.ArrayListUnmanaged(Block) = .empty, allocator: Allocator, const Parser = @This(); @@ -928,8 +928,8 @@ const InlineParser = struct { parent: *Parser, content: []const u8, pos: usize = 0, - pending_inlines: std.ArrayListUnmanaged(PendingInline) = .{}, - completed_inlines: std.ArrayListUnmanaged(CompletedInline) = .{}, + pending_inlines: std.ArrayListUnmanaged(PendingInline) = .empty, + completed_inlines: std.ArrayListUnmanaged(CompletedInline) = .empty, const PendingInline = struct { tag: Tag, diff --git a/lib/fuzzer.zig b/lib/fuzzer.zig index 3f8a99114876..d8f405dccb29 100644 --- a/lib/fuzzer.zig +++ b/lib/fuzzer.zig @@ -402,7 +402,7 @@ fn oom(err: anytype) noreturn { } } -var general_purpose_allocator: std.heap.GeneralPurposeAllocator(.{}) = .{}; +var general_purpose_allocator: std.heap.GeneralPurposeAllocator(.{}) = .init; var fuzzer: Fuzzer = .{ .gpa = general_purpose_allocator.allocator(), diff --git a/lib/fuzzer/web/main.zig b/lib/fuzzer/web/main.zig index 9c50704e8a71..c6d6810e1b95 100644 --- a/lib/fuzzer/web/main.zig +++ b/lib/fuzzer/web/main.zig @@ -58,7 +58,7 @@ export fn alloc(n: usize) [*]u8 { return slice.ptr; } -var message_buffer: std.ArrayListAlignedUnmanaged(u8, @alignOf(u64)) = .{}; +var message_buffer: std.ArrayListAlignedUnmanaged(u8, @alignOf(u64)) = .empty; /// Resizes the message buffer to be the correct length; returns the pointer to /// the query string. @@ -90,8 +90,8 @@ export fn unpack(tar_ptr: [*]u8, tar_len: usize) void { } /// Set by `set_input_string`. -var input_string: std.ArrayListUnmanaged(u8) = .{}; -var string_result: std.ArrayListUnmanaged(u8) = .{}; +var input_string: std.ArrayListUnmanaged(u8) = .empty; +var string_result: std.ArrayListUnmanaged(u8) = .empty; export fn set_input_string(len: usize) [*]u8 { input_string.resize(gpa, len) catch @panic("OOM"); @@ -249,7 +249,7 @@ fn coverageUpdateMessage(msg_bytes: []u8) error{OutOfMemory}!void { js.emitCoverageUpdate(); } -var entry_points: std.ArrayListUnmanaged(u32) = .{}; +var entry_points: std.ArrayListUnmanaged(u32) = .empty; fn entryPointsMessage(msg_bytes: []u8) error{OutOfMemory}!void { const header: abi.EntryPointHeader = @bitCast(msg_bytes[0..@sizeOf(abi.EntryPointHeader)].*); @@ -295,7 +295,7 @@ const SourceLocationIndex = enum(u32) { } fn toWalkFile(sli: SourceLocationIndex) ?Walk.File.Index { - var buf: std.ArrayListUnmanaged(u8) = .{}; + var buf: std.ArrayListUnmanaged(u8) = .empty; defer buf.deinit(gpa); sli.appendPath(&buf) catch @panic("OOM"); return @enumFromInt(Walk.files.getIndex(buf.items) orelse return null); @@ -307,7 +307,7 @@ const SourceLocationIndex = enum(u32) { ) error{ OutOfMemory, SourceUnavailable }!void { const walk_file_index = sli.toWalkFile() orelse return error.SourceUnavailable; const root_node = walk_file_index.findRootDecl().get().ast_node; - var annotations: std.ArrayListUnmanaged(html_render.Annotation) = .{}; + var annotations: std.ArrayListUnmanaged(html_render.Annotation) = .empty; defer annotations.deinit(gpa); try computeSourceAnnotations(sli.ptr().file, walk_file_index, &annotations, coverage_source_locations.items); html_render.fileSourceHtml(walk_file_index, out, root_node, .{ @@ -327,7 +327,7 @@ fn computeSourceAnnotations( // Collect all the source locations from only this file into this array // first, then sort by line, col, so that we can collect annotations with // O(N) time complexity. - var locs: std.ArrayListUnmanaged(SourceLocationIndex) = .{}; + var locs: std.ArrayListUnmanaged(SourceLocationIndex) = .empty; defer locs.deinit(gpa); for (source_locations, 0..) |sl, sli_usize| { @@ -374,9 +374,9 @@ fn computeSourceAnnotations( var coverage = Coverage.init; /// Index of type `SourceLocationIndex`. -var coverage_source_locations: std.ArrayListUnmanaged(Coverage.SourceLocation) = .{}; +var coverage_source_locations: std.ArrayListUnmanaged(Coverage.SourceLocation) = .empty; /// Contains the most recent coverage update message, unmodified. -var recent_coverage_update: std.ArrayListAlignedUnmanaged(u8, @alignOf(u64)) = .{}; +var recent_coverage_update: std.ArrayListAlignedUnmanaged(u8, @alignOf(u64)) = .empty; fn updateCoverage( directories: []const Coverage.String, @@ -425,7 +425,7 @@ export fn sourceLocationFileHtml(sli: SourceLocationIndex) String { export fn sourceLocationFileCoveredList(sli_file: SourceLocationIndex) Slice(SourceLocationIndex) { const global = struct { - var result: std.ArrayListUnmanaged(SourceLocationIndex) = .{}; + var result: std.ArrayListUnmanaged(SourceLocationIndex) = .empty; fn add(i: u32, want_file: Coverage.File.Index) void { const src_loc_index: SourceLocationIndex = @enumFromInt(i); if (src_loc_index.ptr().file == want_file) result.appendAssumeCapacity(src_loc_index); diff --git a/lib/std/Build.zig b/lib/std/Build.zig index 109ebc8be26b..236ac16c476b 100644 --- a/lib/std/Build.zig +++ b/lib/std/Build.zig @@ -111,7 +111,7 @@ pub const ReleaseMode = enum { /// Settings that are here rather than in Build are not configurable per-package. pub const Graph = struct { arena: Allocator, - system_library_options: std.StringArrayHashMapUnmanaged(SystemLibraryMode) = .{}, + system_library_options: std.StringArrayHashMapUnmanaged(SystemLibraryMode) = .empty, system_package_mode: bool = false, debug_compiler_runtime_libs: bool = false, cache: Cache, @@ -119,7 +119,7 @@ pub const Graph = struct { env_map: EnvMap, global_cache_root: Cache.Directory, zig_lib_directory: Cache.Directory, - needed_lazy_dependencies: std.StringArrayHashMapUnmanaged(void) = .{}, + needed_lazy_dependencies: std.StringArrayHashMapUnmanaged(void) = .empty, /// Information about the native target. Computed before build() is invoked. host: ResolvedTarget, incremental: ?bool = null, diff --git a/lib/std/Build/Fuzz.zig b/lib/std/Build/Fuzz.zig index 6258f4cddaee..6e837ebec23f 100644 --- a/lib/std/Build/Fuzz.zig +++ b/lib/std/Build/Fuzz.zig @@ -30,7 +30,7 @@ pub fn start( defer rebuild_node.end(); var wait_group: std.Thread.WaitGroup = .{}; defer wait_group.wait(); - var fuzz_run_steps: std.ArrayListUnmanaged(*Step.Run) = .{}; + var fuzz_run_steps: std.ArrayListUnmanaged(*Step.Run) = .empty; defer fuzz_run_steps.deinit(gpa); for (all_steps) |step| { const run = step.cast(Step.Run) orelse continue; diff --git a/lib/std/Build/Fuzz/WebServer.zig b/lib/std/Build/Fuzz/WebServer.zig index fb78e96abb02..ac4336476e97 100644 --- a/lib/std/Build/Fuzz/WebServer.zig +++ b/lib/std/Build/Fuzz/WebServer.zig @@ -236,7 +236,7 @@ fn buildWasmBinary( .sub_path = "docs/wasm/html_render.zig", }; - var argv: std.ArrayListUnmanaged([]const u8) = .{}; + var argv: std.ArrayListUnmanaged([]const u8) = .empty; try argv.appendSlice(arena, &.{ ws.zig_exe_path, "build-exe", // diff --git a/lib/std/Build/Step.zig b/lib/std/Build/Step.zig index b8fb42a9592a..421c8fa579ac 100644 --- a/lib/std/Build/Step.zig +++ b/lib/std/Build/Step.zig @@ -714,7 +714,7 @@ pub fn allocPrintCmd2( opt_env: ?*const std.process.EnvMap, argv: []const []const u8, ) Allocator.Error![]u8 { - var buf: std.ArrayListUnmanaged(u8) = .{}; + var buf: std.ArrayListUnmanaged(u8) = .empty; if (opt_cwd) |cwd| try buf.writer(arena).print("cd {s} && ", .{cwd}); if (opt_env) |env| { const process_env_map = std.process.getEnvMap(arena) catch std.process.EnvMap.init(arena); diff --git a/lib/std/Build/Step/CheckObject.zig b/lib/std/Build/Step/CheckObject.zig index ce3300311777..54f7bc741225 100644 --- a/lib/std/Build/Step/CheckObject.zig +++ b/lib/std/Build/Step/CheckObject.zig @@ -713,12 +713,12 @@ const MachODumper = struct { gpa: Allocator, data: []const u8, header: macho.mach_header_64, - segments: std.ArrayListUnmanaged(macho.segment_command_64) = .{}, - sections: std.ArrayListUnmanaged(macho.section_64) = .{}, - symtab: std.ArrayListUnmanaged(macho.nlist_64) = .{}, - strtab: std.ArrayListUnmanaged(u8) = .{}, - indsymtab: std.ArrayListUnmanaged(u32) = .{}, - imports: std.ArrayListUnmanaged([]const u8) = .{}, + segments: std.ArrayListUnmanaged(macho.segment_command_64) = .empty, + sections: std.ArrayListUnmanaged(macho.section_64) = .empty, + symtab: std.ArrayListUnmanaged(macho.nlist_64) = .empty, + strtab: std.ArrayListUnmanaged(u8) = .empty, + indsymtab: std.ArrayListUnmanaged(u32) = .empty, + imports: std.ArrayListUnmanaged([]const u8) = .empty, fn parse(ctx: *ObjectContext) !void { var it = ctx.getLoadCommandIterator(); @@ -1797,9 +1797,9 @@ const ElfDumper = struct { const ArchiveContext = struct { gpa: Allocator, data: []const u8, - symtab: std.ArrayListUnmanaged(ArSymtabEntry) = .{}, + symtab: std.ArrayListUnmanaged(ArSymtabEntry) = .empty, strtab: []const u8, - objects: std.ArrayListUnmanaged(struct { name: []const u8, off: usize, len: usize }) = .{}, + objects: std.ArrayListUnmanaged(struct { name: []const u8, off: usize, len: usize }) = .empty, fn parseSymtab(ctx: *ArchiveContext, raw: []const u8, ptr_width: enum { p32, p64 }) !void { var stream = std.io.fixedBufferStream(raw); diff --git a/lib/std/Build/Step/Compile.zig b/lib/std/Build/Step/Compile.zig index 922d64c728c5..0f0b5d32017c 100644 --- a/lib/std/Build/Step/Compile.zig +++ b/lib/std/Build/Step/Compile.zig @@ -1070,8 +1070,8 @@ fn getZigArgs(compile: *Compile, fuzz: bool) ![][]const u8 { // Stores system libraries that have already been seen for at least one // module, along with any arguments that need to be passed to the // compiler for each module individually. - var seen_system_libs: std.StringHashMapUnmanaged([]const []const u8) = .{}; - var frameworks: std.StringArrayHashMapUnmanaged(Module.LinkFrameworkOptions) = .{}; + var seen_system_libs: std.StringHashMapUnmanaged([]const []const u8) = .empty; + var frameworks: std.StringArrayHashMapUnmanaged(Module.LinkFrameworkOptions) = .empty; var prev_has_cflags = false; var prev_has_rcflags = false; diff --git a/lib/std/Build/Step/Fmt.zig b/lib/std/Build/Step/Fmt.zig index 576aeb1d21db..a364dfa6f478 100644 --- a/lib/std/Build/Step/Fmt.zig +++ b/lib/std/Build/Step/Fmt.zig @@ -48,7 +48,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void { const arena = b.allocator; const fmt: *Fmt = @fieldParentPtr("step", step); - var argv: std.ArrayListUnmanaged([]const u8) = .{}; + var argv: std.ArrayListUnmanaged([]const u8) = .empty; try argv.ensureUnusedCapacity(arena, 2 + 1 + fmt.paths.len + 2 * fmt.exclude_paths.len); argv.appendAssumeCapacity(b.graph.zig_exe); diff --git a/lib/std/Build/Step/Run.zig b/lib/std/Build/Step/Run.zig index e37b97ddb37b..c35ba00b79aa 100644 --- a/lib/std/Build/Step/Run.zig +++ b/lib/std/Build/Step/Run.zig @@ -856,7 +856,7 @@ pub fn rerunInFuzzMode( const step = &run.step; const b = step.owner; const arena = b.allocator; - var argv_list: std.ArrayListUnmanaged([]const u8) = .{}; + var argv_list: std.ArrayListUnmanaged([]const u8) = .empty; for (run.argv.items) |arg| { switch (arg) { .bytes => |bytes| { diff --git a/lib/std/array_hash_map.zig b/lib/std/array_hash_map.zig index 1b96be472a9b..bf411b379896 100644 --- a/lib/std/array_hash_map.zig +++ b/lib/std/array_hash_map.zig @@ -130,7 +130,7 @@ pub fn ArrayHashMap( } pub fn initContext(allocator: Allocator, ctx: Context) Self { return .{ - .unmanaged = .{}, + .unmanaged = .empty, .allocator = allocator, .ctx = ctx, }; @@ -429,7 +429,7 @@ pub fn ArrayHashMap( pub fn move(self: *Self) Self { self.unmanaged.pointer_stability.assertUnlocked(); const result = self.*; - self.unmanaged = .{}; + self.unmanaged = .empty; return result; } @@ -1290,7 +1290,7 @@ pub fn ArrayHashMapUnmanaged( pub fn move(self: *Self) Self { self.pointer_stability.assertUnlocked(); const result = self.*; - self.* = .{}; + self.* = .empty; return result; } diff --git a/lib/std/array_list.zig b/lib/std/array_list.zig index 24098a01f6e8..ac1b1446904b 100644 --- a/lib/std/array_list.zig +++ b/lib/std/array_list.zig @@ -710,7 +710,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ const old_memory = self.allocatedSlice(); if (allocator.resize(old_memory, self.items.len)) { const result = self.items; - self.* = .{}; + self.* = .empty; return result; } @@ -1267,7 +1267,7 @@ test "init" { } { - const list = ArrayListUnmanaged(i32){}; + const list: ArrayListUnmanaged(i32) = .empty; try testing.expect(list.items.len == 0); try testing.expect(list.capacity == 0); @@ -1312,7 +1312,7 @@ test "clone" { try testing.expectEqual(@as(i32, 5), cloned.items[2]); } { - var array = ArrayListUnmanaged(i32){}; + var array: ArrayListUnmanaged(i32) = .empty; try array.append(a, -1); try array.append(a, 3); try array.append(a, 5); @@ -1384,7 +1384,7 @@ test "basic" { try testing.expect(list.pop() == 33); } { - var list = ArrayListUnmanaged(i32){}; + var list: ArrayListUnmanaged(i32) = .empty; defer list.deinit(a); { @@ -1448,7 +1448,7 @@ test "appendNTimes" { } } { - var list = ArrayListUnmanaged(i32){}; + var list: ArrayListUnmanaged(i32) = .empty; defer list.deinit(a); try list.appendNTimes(a, 2, 10); @@ -1467,7 +1467,7 @@ test "appendNTimes with failing allocator" { try testing.expectError(error.OutOfMemory, list.appendNTimes(2, 10)); } { - var list = ArrayListUnmanaged(i32){}; + var list: ArrayListUnmanaged(i32) = .empty; defer list.deinit(a); try testing.expectError(error.OutOfMemory, list.appendNTimes(a, 2, 10)); } @@ -1502,7 +1502,7 @@ test "orderedRemove" { try testing.expectEqual(@as(usize, 4), list.items.len); } { - var list = ArrayListUnmanaged(i32){}; + var list: ArrayListUnmanaged(i32) = .empty; defer list.deinit(a); try list.append(a, 1); @@ -1537,7 +1537,7 @@ test "orderedRemove" { } { // remove last item - var list = ArrayListUnmanaged(i32){}; + var list: ArrayListUnmanaged(i32) = .empty; defer list.deinit(a); try list.append(a, 1); try testing.expectEqual(@as(i32, 1), list.orderedRemove(0)); @@ -1574,7 +1574,7 @@ test "swapRemove" { try testing.expect(list.items.len == 4); } { - var list = ArrayListUnmanaged(i32){}; + var list: ArrayListUnmanaged(i32) = .empty; defer list.deinit(a); try list.append(a, 1); @@ -1617,7 +1617,7 @@ test "insert" { try testing.expect(list.items[3] == 3); } { - var list = ArrayListUnmanaged(i32){}; + var list: ArrayListUnmanaged(i32) = .empty; defer list.deinit(a); try list.insert(a, 0, 1); @@ -1655,7 +1655,7 @@ test "insertSlice" { try testing.expect(list.items[0] == 1); } { - var list = ArrayListUnmanaged(i32){}; + var list: ArrayListUnmanaged(i32) = .empty; defer list.deinit(a); try list.append(a, 1); @@ -1789,7 +1789,7 @@ test "ArrayListUnmanaged.replaceRange" { const a = testing.allocator; { - var list = ArrayListUnmanaged(i32){}; + var list: ArrayListUnmanaged(i32) = .empty; defer list.deinit(a); try list.appendSlice(a, &[_]i32{ 1, 2, 3, 4, 5 }); @@ -1798,7 +1798,7 @@ test "ArrayListUnmanaged.replaceRange" { try testing.expectEqualSlices(i32, &[_]i32{ 1, 0, 0, 0, 2, 3, 4, 5 }, list.items); } { - var list = ArrayListUnmanaged(i32){}; + var list: ArrayListUnmanaged(i32) = .empty; defer list.deinit(a); try list.appendSlice(a, &[_]i32{ 1, 2, 3, 4, 5 }); @@ -1811,7 +1811,7 @@ test "ArrayListUnmanaged.replaceRange" { ); } { - var list = ArrayListUnmanaged(i32){}; + var list: ArrayListUnmanaged(i32) = .empty; defer list.deinit(a); try list.appendSlice(a, &[_]i32{ 1, 2, 3, 4, 5 }); @@ -1820,7 +1820,7 @@ test "ArrayListUnmanaged.replaceRange" { try testing.expectEqualSlices(i32, &[_]i32{ 1, 0, 0, 0, 4, 5 }, list.items); } { - var list = ArrayListUnmanaged(i32){}; + var list: ArrayListUnmanaged(i32) = .empty; defer list.deinit(a); try list.appendSlice(a, &[_]i32{ 1, 2, 3, 4, 5 }); @@ -1829,7 +1829,7 @@ test "ArrayListUnmanaged.replaceRange" { try testing.expectEqualSlices(i32, &[_]i32{ 1, 0, 0, 0, 5 }, list.items); } { - var list = ArrayListUnmanaged(i32){}; + var list: ArrayListUnmanaged(i32) = .empty; defer list.deinit(a); try list.appendSlice(a, &[_]i32{ 1, 2, 3, 4, 5 }); @@ -1843,7 +1843,7 @@ test "ArrayListUnmanaged.replaceRangeAssumeCapacity" { const a = testing.allocator; { - var list = ArrayListUnmanaged(i32){}; + var list: ArrayListUnmanaged(i32) = .empty; defer list.deinit(a); try list.appendSlice(a, &[_]i32{ 1, 2, 3, 4, 5 }); @@ -1852,7 +1852,7 @@ test "ArrayListUnmanaged.replaceRangeAssumeCapacity" { try testing.expectEqualSlices(i32, &[_]i32{ 1, 0, 0, 0, 2, 3, 4, 5 }, list.items); } { - var list = ArrayListUnmanaged(i32){}; + var list: ArrayListUnmanaged(i32) = .empty; defer list.deinit(a); try list.appendSlice(a, &[_]i32{ 1, 2, 3, 4, 5 }); @@ -1865,7 +1865,7 @@ test "ArrayListUnmanaged.replaceRangeAssumeCapacity" { ); } { - var list = ArrayListUnmanaged(i32){}; + var list: ArrayListUnmanaged(i32) = .empty; defer list.deinit(a); try list.appendSlice(a, &[_]i32{ 1, 2, 3, 4, 5 }); @@ -1874,7 +1874,7 @@ test "ArrayListUnmanaged.replaceRangeAssumeCapacity" { try testing.expectEqualSlices(i32, &[_]i32{ 1, 0, 0, 0, 4, 5 }, list.items); } { - var list = ArrayListUnmanaged(i32){}; + var list: ArrayListUnmanaged(i32) = .empty; defer list.deinit(a); try list.appendSlice(a, &[_]i32{ 1, 2, 3, 4, 5 }); @@ -1883,7 +1883,7 @@ test "ArrayListUnmanaged.replaceRangeAssumeCapacity" { try testing.expectEqualSlices(i32, &[_]i32{ 1, 0, 0, 0, 5 }, list.items); } { - var list = ArrayListUnmanaged(i32){}; + var list: ArrayListUnmanaged(i32) = .empty; defer list.deinit(a); try list.appendSlice(a, &[_]i32{ 1, 2, 3, 4, 5 }); @@ -1906,15 +1906,15 @@ const ItemUnmanaged = struct { test "ArrayList(T) of struct T" { const a = std.testing.allocator; { - var root = Item{ .integer = 1, .sub_items = ArrayList(Item).init(a) }; + var root = Item{ .integer = 1, .sub_items = .init(a) }; defer root.sub_items.deinit(); - try root.sub_items.append(Item{ .integer = 42, .sub_items = ArrayList(Item).init(a) }); + try root.sub_items.append(Item{ .integer = 42, .sub_items = .init(a) }); try testing.expect(root.sub_items.items[0].integer == 42); } { - var root = ItemUnmanaged{ .integer = 1, .sub_items = ArrayListUnmanaged(ItemUnmanaged){} }; + var root = ItemUnmanaged{ .integer = 1, .sub_items = .empty }; defer root.sub_items.deinit(a); - try root.sub_items.append(a, ItemUnmanaged{ .integer = 42, .sub_items = ArrayListUnmanaged(ItemUnmanaged){} }); + try root.sub_items.append(a, ItemUnmanaged{ .integer = 42, .sub_items = .empty }); try testing.expect(root.sub_items.items[0].integer == 42); } } @@ -1950,7 +1950,7 @@ test "ArrayListUnmanaged(u8) implements writer" { const a = testing.allocator; { - var buffer: ArrayListUnmanaged(u8) = .{}; + var buffer: ArrayListUnmanaged(u8) = .empty; defer buffer.deinit(a); const x: i32 = 42; @@ -1960,7 +1960,7 @@ test "ArrayListUnmanaged(u8) implements writer" { try testing.expectEqualSlices(u8, "x: 42\ny: 1234\n", buffer.items); } { - var list: ArrayListAlignedUnmanaged(u8, 2) = .{}; + var list: ArrayListAlignedUnmanaged(u8, 2) = .empty; defer list.deinit(a); const writer = list.writer(a); @@ -1989,7 +1989,7 @@ test "shrink still sets length when resizing is disabled" { try testing.expect(list.items.len == 1); } { - var list = ArrayListUnmanaged(i32){}; + var list: ArrayListUnmanaged(i32) = .empty; defer list.deinit(a); try list.append(a, 1); @@ -2026,7 +2026,7 @@ test "addManyAsArray" { try testing.expectEqualSlices(u8, list.items, "aoeuasdf"); } { - var list = ArrayListUnmanaged(u8){}; + var list: ArrayListUnmanaged(u8) = .empty; defer list.deinit(a); (try list.addManyAsArray(a, 4)).* = "aoeu".*; @@ -2056,7 +2056,7 @@ test "growing memory preserves contents" { try testing.expectEqualSlices(u8, list.items, "abcdijklefgh"); } { - var list = ArrayListUnmanaged(u8){}; + var list: ArrayListUnmanaged(u8) = .empty; defer list.deinit(a); (try list.addManyAsArray(a, 4)).* = "abcd".*; @@ -2132,7 +2132,7 @@ test "toOwnedSliceSentinel" { try testing.expectEqualStrings(result, mem.sliceTo(result.ptr, 0)); } { - var list = ArrayListUnmanaged(u8){}; + var list: ArrayListUnmanaged(u8) = .empty; defer list.deinit(a); try list.appendSlice(a, "foobar"); @@ -2156,7 +2156,7 @@ test "accepts unaligned slices" { try testing.expectEqualSlices(u8, list.items, &.{ 0, 8, 9, 6, 7, 2, 3 }); } { - var list = std.ArrayListAlignedUnmanaged(u8, 8){}; + var list: std.ArrayListAlignedUnmanaged(u8, 8) = .empty; defer list.deinit(a); try list.appendSlice(a, &.{ 0, 1, 2, 3 }); diff --git a/lib/std/crypto/Certificate/Bundle.zig b/lib/std/crypto/Certificate/Bundle.zig index 92a6d43ac654..627cd4172bf1 100644 --- a/lib/std/crypto/Certificate/Bundle.zig +++ b/lib/std/crypto/Certificate/Bundle.zig @@ -6,8 +6,8 @@ //! certificate within `bytes`. /// The key is the contents slice of the subject. -map: std.HashMapUnmanaged(der.Element.Slice, u32, MapContext, std.hash_map.default_max_load_percentage) = .{}, -bytes: std.ArrayListUnmanaged(u8) = .{}, +map: std.HashMapUnmanaged(der.Element.Slice, u32, MapContext, std.hash_map.default_max_load_percentage) = .empty, +bytes: std.ArrayListUnmanaged(u8) = .empty, pub const VerifyError = Certificate.Parsed.VerifyError || error{ CertificateIssuerNotFound, diff --git a/lib/std/debug/Dwarf.zig b/lib/std/debug/Dwarf.zig index 1bc47a3e385d..d36e4f961d05 100644 --- a/lib/std/debug/Dwarf.zig +++ b/lib/std/debug/Dwarf.zig @@ -42,20 +42,20 @@ sections: SectionArray = null_section_array, is_macho: bool, /// Filled later by the initializer -abbrev_table_list: std.ArrayListUnmanaged(Abbrev.Table) = .{}, +abbrev_table_list: std.ArrayListUnmanaged(Abbrev.Table) = .empty, /// Filled later by the initializer -compile_unit_list: std.ArrayListUnmanaged(CompileUnit) = .{}, +compile_unit_list: std.ArrayListUnmanaged(CompileUnit) = .empty, /// Filled later by the initializer -func_list: std.ArrayListUnmanaged(Func) = .{}, +func_list: std.ArrayListUnmanaged(Func) = .empty, eh_frame_hdr: ?ExceptionFrameHeader = null, /// These lookup tables are only used if `eh_frame_hdr` is null -cie_map: std.AutoArrayHashMapUnmanaged(u64, CommonInformationEntry) = .{}, +cie_map: std.AutoArrayHashMapUnmanaged(u64, CommonInformationEntry) = .empty, /// Sorted by start_pc -fde_list: std.ArrayListUnmanaged(FrameDescriptionEntry) = .{}, +fde_list: std.ArrayListUnmanaged(FrameDescriptionEntry) = .empty, /// Populated by `populateRanges`. -ranges: std.ArrayListUnmanaged(Range) = .{}, +ranges: std.ArrayListUnmanaged(Range) = .empty, pub const Range = struct { start: u64, @@ -1464,9 +1464,9 @@ fn runLineNumberProgram(d: *Dwarf, gpa: Allocator, compile_unit: *CompileUnit) ! const standard_opcode_lengths = try fbr.readBytes(opcode_base - 1); - var directories: std.ArrayListUnmanaged(FileEntry) = .{}; + var directories: std.ArrayListUnmanaged(FileEntry) = .empty; defer directories.deinit(gpa); - var file_entries: std.ArrayListUnmanaged(FileEntry) = .{}; + var file_entries: std.ArrayListUnmanaged(FileEntry) = .empty; defer file_entries.deinit(gpa); if (version < 5) { diff --git a/lib/std/debug/Dwarf/expression.zig b/lib/std/debug/Dwarf/expression.zig index f71a4e02c171..38bce2614c20 100644 --- a/lib/std/debug/Dwarf/expression.zig +++ b/lib/std/debug/Dwarf/expression.zig @@ -153,7 +153,7 @@ pub fn StackMachine(comptime options: Options) type { } }; - stack: std.ArrayListUnmanaged(Value) = .{}, + stack: std.ArrayListUnmanaged(Value) = .empty, pub fn reset(self: *Self) void { self.stack.clearRetainingCapacity(); diff --git a/lib/std/debug/SelfInfo.zig b/lib/std/debug/SelfInfo.zig index 5e7aefef3810..76c0505e9675 100644 --- a/lib/std/debug/SelfInfo.zig +++ b/lib/std/debug/SelfInfo.zig @@ -1933,8 +1933,8 @@ pub const VirtualMachine = struct { len: u8 = 0, }; - columns: std.ArrayListUnmanaged(Column) = .{}, - stack: std.ArrayListUnmanaged(ColumnRange) = .{}, + columns: std.ArrayListUnmanaged(Column) = .empty, + stack: std.ArrayListUnmanaged(ColumnRange) = .empty, current_row: Row = .{}, /// The result of executing the CIE's initial_instructions diff --git a/lib/std/fs/Dir.zig b/lib/std/fs/Dir.zig index d4ea8a010952..2e84d1097fb5 100644 --- a/lib/std/fs/Dir.zig +++ b/lib/std/fs/Dir.zig @@ -750,7 +750,7 @@ pub const Walker = struct { /// /// `self` will not be closed after walking it. pub fn walk(self: Dir, allocator: Allocator) Allocator.Error!Walker { - var stack: std.ArrayListUnmanaged(Walker.StackItem) = .{}; + var stack: std.ArrayListUnmanaged(Walker.StackItem) = .empty; try stack.append(allocator, .{ .iter = self.iterate(), diff --git a/lib/std/fs/wasi.zig b/lib/std/fs/wasi.zig index 040dd4ca3ef0..5d729c9da1ee 100644 --- a/lib/std/fs/wasi.zig +++ b/lib/std/fs/wasi.zig @@ -24,7 +24,7 @@ pub const Preopens = struct { }; pub fn preopensAlloc(gpa: Allocator) Allocator.Error!Preopens { - var names: std.ArrayListUnmanaged([]const u8) = .{}; + var names: std.ArrayListUnmanaged([]const u8) = .empty; defer names.deinit(gpa); try names.ensureUnusedCapacity(gpa, 3); diff --git a/lib/std/hash/benchmark.zig b/lib/std/hash/benchmark.zig index 0798e26d6f5b..0a73be39ced0 100644 --- a/lib/std/hash/benchmark.zig +++ b/lib/std/hash/benchmark.zig @@ -410,7 +410,7 @@ pub fn main() !void { } } - var gpa = std.heap.GeneralPurposeAllocator(.{}){}; + var gpa: std.heap.GeneralPurposeAllocator(.{}) = .init; defer std.testing.expect(gpa.deinit() == .ok) catch @panic("leak"); const allocator = gpa.allocator(); diff --git a/lib/std/hash_map.zig b/lib/std/hash_map.zig index 54b85e4b3d85..9c436320b7b0 100644 --- a/lib/std/hash_map.zig +++ b/lib/std/hash_map.zig @@ -401,7 +401,7 @@ pub fn HashMap( @compileError("Context must be specified! Call initContext(allocator, ctx) instead."); } return .{ - .unmanaged = .{}, + .unmanaged = .empty, .allocator = allocator, .ctx = undefined, // ctx is zero-sized so this is safe. }; @@ -410,7 +410,7 @@ pub fn HashMap( /// Create a managed hash map with a context pub fn initContext(allocator: Allocator, ctx: Context) Self { return .{ - .unmanaged = .{}, + .unmanaged = .empty, .allocator = allocator, .ctx = ctx, }; @@ -691,7 +691,7 @@ pub fn HashMap( pub fn move(self: *Self) Self { self.unmanaged.pointer_stability.assertUnlocked(); const result = self.*; - self.unmanaged = .{}; + self.unmanaged = .empty; return result; } @@ -1543,7 +1543,7 @@ pub fn HashMapUnmanaged( return self.cloneContext(allocator, @as(Context, undefined)); } pub fn cloneContext(self: Self, allocator: Allocator, new_ctx: anytype) Allocator.Error!HashMapUnmanaged(K, V, @TypeOf(new_ctx), max_load_percentage) { - var other = HashMapUnmanaged(K, V, @TypeOf(new_ctx), max_load_percentage){}; + var other: HashMapUnmanaged(K, V, @TypeOf(new_ctx), max_load_percentage) = .empty; if (self.size == 0) return other; @@ -1572,7 +1572,7 @@ pub fn HashMapUnmanaged( pub fn move(self: *Self) Self { self.pointer_stability.assertUnlocked(); const result = self.*; - self.* = .{}; + self.* = .empty; return result; } @@ -2360,7 +2360,7 @@ test "removeByPtr 0 sized key" { } test "repeat fetchRemove" { - var map = AutoHashMapUnmanaged(u64, void){}; + var map: AutoHashMapUnmanaged(u64, void) = .empty; defer map.deinit(testing.allocator); try map.ensureTotalCapacity(testing.allocator, 4); @@ -2384,7 +2384,7 @@ test "repeat fetchRemove" { } test "getOrPut allocation failure" { - var map: std.StringHashMapUnmanaged(void) = .{}; + var map: std.StringHashMapUnmanaged(void) = .empty; try testing.expectError(error.OutOfMemory, map.getOrPut(std.testing.failing_allocator, "hello")); } diff --git a/lib/std/json/hashmap.zig b/lib/std/json/hashmap.zig index b40fb5e4c750..be4e9bd2dd5a 100644 --- a/lib/std/json/hashmap.zig +++ b/lib/std/json/hashmap.zig @@ -12,14 +12,14 @@ const Value = @import("dynamic.zig").Value; /// instead of comptime-known struct field names. pub fn ArrayHashMap(comptime T: type) type { return struct { - map: std.StringArrayHashMapUnmanaged(T) = .{}, + map: std.StringArrayHashMapUnmanaged(T) = .empty, pub fn deinit(self: *@This(), allocator: Allocator) void { self.map.deinit(allocator); } pub fn jsonParse(allocator: Allocator, source: anytype, options: ParseOptions) !@This() { - var map = std.StringArrayHashMapUnmanaged(T){}; + var map: std.StringArrayHashMapUnmanaged(T) = .empty; errdefer map.deinit(allocator); if (.object_begin != try source.next()) return error.UnexpectedToken; @@ -52,7 +52,7 @@ pub fn ArrayHashMap(comptime T: type) type { pub fn jsonParseFromValue(allocator: Allocator, source: Value, options: ParseOptions) !@This() { if (source != .object) return error.UnexpectedToken; - var map = std.StringArrayHashMapUnmanaged(T){}; + var map: std.StringArrayHashMapUnmanaged(T) = .empty; errdefer map.deinit(allocator); var it = source.object.iterator(); diff --git a/lib/std/process/Child.zig b/lib/std/process/Child.zig index 2002bad20e99..3a54ede2ad07 100644 --- a/lib/std/process/Child.zig +++ b/lib/std/process/Child.zig @@ -907,12 +907,12 @@ fn spawnWindows(self: *ChildProcess) SpawnError!void { var cmd_line_cache = WindowsCommandLineCache.init(self.allocator, self.argv); defer cmd_line_cache.deinit(); - var app_buf = std.ArrayListUnmanaged(u16){}; + var app_buf: std.ArrayListUnmanaged(u16) = .empty; defer app_buf.deinit(self.allocator); try app_buf.appendSlice(self.allocator, app_name_w); - var dir_buf = std.ArrayListUnmanaged(u16){}; + var dir_buf: std.ArrayListUnmanaged(u16) = .empty; defer dir_buf.deinit(self.allocator); if (cwd_path_w.len > 0) { diff --git a/lib/std/tar.zig b/lib/std/tar.zig index 491d11af2a9c..f15a5e8c8a0b 100644 --- a/lib/std/tar.zig +++ b/lib/std/tar.zig @@ -27,7 +27,7 @@ pub const writer = @import("tar/writer.zig").writer; /// the errors in diagnostics to know whether the operation succeeded or failed. pub const Diagnostics = struct { allocator: std.mem.Allocator, - errors: std.ArrayListUnmanaged(Error) = .{}, + errors: std.ArrayListUnmanaged(Error) = .empty, entries: usize = 0, root_dir: []const u8 = "", diff --git a/lib/std/testing.zig b/lib/std/testing.zig index 2cc38749eb5e..91caa7d922a4 100644 --- a/lib/std/testing.zig +++ b/lib/std/testing.zig @@ -11,10 +11,10 @@ pub const FailingAllocator = @import("testing/failing_allocator.zig").FailingAll /// This should only be used in temporary test programs. pub const allocator = allocator_instance.allocator(); -pub var allocator_instance = b: { +pub var allocator_instance: std.heap.GeneralPurposeAllocator(.{}) = b: { if (!builtin.is_test) @compileError("Cannot use testing allocator outside of test block"); - break :b std.heap.GeneralPurposeAllocator(.{}){}; + break :b .init; }; pub const failing_allocator = failing_allocator_instance.allocator(); diff --git a/lib/std/zig/AstGen.zig b/lib/std/zig/AstGen.zig index 675fe095a26e..79ab85f6db7a 100644 --- a/lib/std/zig/AstGen.zig +++ b/lib/std/zig/AstGen.zig @@ -22,8 +22,8 @@ tree: *const Ast, /// sub-expressions. See `AstRlAnnotate` for details. nodes_need_rl: *const AstRlAnnotate.RlNeededSet, instructions: std.MultiArrayList(Zir.Inst) = .{}, -extra: ArrayListUnmanaged(u32) = .{}, -string_bytes: ArrayListUnmanaged(u8) = .{}, +extra: ArrayListUnmanaged(u32) = .empty, +string_bytes: ArrayListUnmanaged(u8) = .empty, /// Tracks the current byte offset within the source file. /// Used to populate line deltas in the ZIR. AstGen maintains /// this "cursor" throughout the entire AST lowering process in order @@ -39,8 +39,8 @@ source_column: u32 = 0, /// Used for temporary allocations; freed after AstGen is complete. /// The resulting ZIR code has no references to anything in this arena. arena: Allocator, -string_table: std.HashMapUnmanaged(u32, void, StringIndexContext, std.hash_map.default_max_load_percentage) = .{}, -compile_errors: ArrayListUnmanaged(Zir.Inst.CompileErrors.Item) = .{}, +string_table: std.HashMapUnmanaged(u32, void, StringIndexContext, std.hash_map.default_max_load_percentage) = .empty, +compile_errors: ArrayListUnmanaged(Zir.Inst.CompileErrors.Item) = .empty, /// The topmost block of the current function. fn_block: ?*GenZir = null, fn_var_args: bool = false, @@ -52,9 +52,9 @@ within_fn: bool = false, fn_ret_ty: Zir.Inst.Ref = .none, /// Maps string table indexes to the first `@import` ZIR instruction /// that uses this string as the operand. -imports: std.AutoArrayHashMapUnmanaged(Zir.NullTerminatedString, Ast.TokenIndex) = .{}, +imports: std.AutoArrayHashMapUnmanaged(Zir.NullTerminatedString, Ast.TokenIndex) = .empty, /// Used for temporary storage when building payloads. -scratch: std.ArrayListUnmanaged(u32) = .{}, +scratch: std.ArrayListUnmanaged(u32) = .empty, /// Whenever a `ref` instruction is needed, it is created and saved in this /// table instead of being immediately appended to the current block body. /// Then, when the instruction is being added to the parent block (typically from @@ -65,7 +65,7 @@ scratch: std.ArrayListUnmanaged(u32) = .{}, /// 2. `ref` instructions will dominate their uses. This is a required property /// of ZIR. /// The key is the ref operand; the value is the ref instruction. -ref_table: std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .{}, +ref_table: std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .empty, /// Any information which should trigger invalidation of incremental compilation /// data should be used to update this hasher. The result is the final source /// hash of the enclosing declaration/etc. @@ -159,7 +159,7 @@ pub fn generate(gpa: Allocator, tree: Ast) Allocator.Error!Zir { var top_scope: Scope.Top = .{}; - var gz_instructions: std.ArrayListUnmanaged(Zir.Inst.Index) = .{}; + var gz_instructions: std.ArrayListUnmanaged(Zir.Inst.Index) = .empty; var gen_scope: GenZir = .{ .is_comptime = true, .parent = &top_scope.base, @@ -5854,7 +5854,7 @@ fn errorSetDecl(gz: *GenZir, ri: ResultInfo, node: Ast.Node.Index) InnerError!Zi const payload_index = try reserveExtra(astgen, @typeInfo(Zir.Inst.ErrorSetDecl).@"struct".fields.len); var fields_len: usize = 0; { - var idents: std.AutoHashMapUnmanaged(Zir.NullTerminatedString, Ast.TokenIndex) = .{}; + var idents: std.AutoHashMapUnmanaged(Zir.NullTerminatedString, Ast.TokenIndex) = .empty; defer idents.deinit(gpa); const error_token = main_tokens[node]; @@ -11259,7 +11259,7 @@ fn identifierTokenString(astgen: *AstGen, token: Ast.TokenIndex) InnerError![]co if (!mem.startsWith(u8, ident_name, "@")) { return ident_name; } - var buf: ArrayListUnmanaged(u8) = .{}; + var buf: ArrayListUnmanaged(u8) = .empty; defer buf.deinit(astgen.gpa); try astgen.parseStrLit(token, &buf, ident_name, 1); if (mem.indexOfScalar(u8, buf.items, 0) != null) { @@ -11881,7 +11881,7 @@ const Scope = struct { parent: *Scope, /// Maps string table index to the source location of declaration, /// for the purposes of reporting name shadowing compile errors. - decls: std.AutoHashMapUnmanaged(Zir.NullTerminatedString, Ast.Node.Index) = .{}, + decls: std.AutoHashMapUnmanaged(Zir.NullTerminatedString, Ast.Node.Index) = .empty, node: Ast.Node.Index, inst: Zir.Inst.Index, maybe_generic: bool, @@ -11891,7 +11891,7 @@ const Scope = struct { declaring_gz: ?*GenZir, /// Set of captures used by this namespace. - captures: std.AutoArrayHashMapUnmanaged(Zir.Inst.Capture, void) = .{}, + captures: std.AutoArrayHashMapUnmanaged(Zir.Inst.Capture, void) = .empty, fn deinit(self: *Namespace, gpa: Allocator) void { self.decls.deinit(gpa); @@ -13607,9 +13607,9 @@ fn scanContainer( var sfba_state = std.heap.stackFallback(512, astgen.gpa); const sfba = sfba_state.get(); - var names: std.AutoArrayHashMapUnmanaged(Zir.NullTerminatedString, NameEntry) = .{}; - var test_names: std.AutoArrayHashMapUnmanaged(Zir.NullTerminatedString, NameEntry) = .{}; - var decltest_names: std.AutoArrayHashMapUnmanaged(Zir.NullTerminatedString, NameEntry) = .{}; + var names: std.AutoArrayHashMapUnmanaged(Zir.NullTerminatedString, NameEntry) = .empty; + var test_names: std.AutoArrayHashMapUnmanaged(Zir.NullTerminatedString, NameEntry) = .empty; + var decltest_names: std.AutoArrayHashMapUnmanaged(Zir.NullTerminatedString, NameEntry) = .empty; defer { names.deinit(sfba); test_names.deinit(sfba); @@ -13796,7 +13796,7 @@ fn scanContainer( for (names.keys(), names.values()) |name, first| { if (first.next == null) continue; - var notes: std.ArrayListUnmanaged(u32) = .{}; + var notes: std.ArrayListUnmanaged(u32) = .empty; var prev: NameEntry = first; while (prev.next) |cur| : (prev = cur.*) { try notes.append(astgen.arena, try astgen.errNoteTok(cur.tok, "duplicate name here", .{})); @@ -13808,7 +13808,7 @@ fn scanContainer( for (test_names.keys(), test_names.values()) |name, first| { if (first.next == null) continue; - var notes: std.ArrayListUnmanaged(u32) = .{}; + var notes: std.ArrayListUnmanaged(u32) = .empty; var prev: NameEntry = first; while (prev.next) |cur| : (prev = cur.*) { try notes.append(astgen.arena, try astgen.errNoteTok(cur.tok, "duplicate test here", .{})); @@ -13820,7 +13820,7 @@ fn scanContainer( for (decltest_names.keys(), decltest_names.values()) |name, first| { if (first.next == null) continue; - var notes: std.ArrayListUnmanaged(u32) = .{}; + var notes: std.ArrayListUnmanaged(u32) = .empty; var prev: NameEntry = first; while (prev.next) |cur| : (prev = cur.*) { try notes.append(astgen.arena, try astgen.errNoteTok(cur.tok, "duplicate decltest here", .{})); @@ -13949,10 +13949,10 @@ fn lowerAstErrors(astgen: *AstGen) !void { const gpa = astgen.gpa; const parse_err = tree.errors[0]; - var msg: std.ArrayListUnmanaged(u8) = .{}; + var msg: std.ArrayListUnmanaged(u8) = .empty; defer msg.deinit(gpa); - var notes: std.ArrayListUnmanaged(u32) = .{}; + var notes: std.ArrayListUnmanaged(u32) = .empty; defer notes.deinit(gpa); for (tree.errors[1..]) |note| { diff --git a/lib/std/zig/ErrorBundle.zig b/lib/std/zig/ErrorBundle.zig index 63e0748d8d1e..3f3c7ff9e3c3 100644 --- a/lib/std/zig/ErrorBundle.zig +++ b/lib/std/zig/ErrorBundle.zig @@ -571,7 +571,7 @@ pub const Wip = struct { if (index == .none) return .none; const other_sl = other.getSourceLocation(index); - var ref_traces: std.ArrayListUnmanaged(ReferenceTrace) = .{}; + var ref_traces: std.ArrayListUnmanaged(ReferenceTrace) = .empty; defer ref_traces.deinit(wip.gpa); if (other_sl.reference_trace_len > 0) { diff --git a/lib/std/zig/WindowsSdk.zig b/lib/std/zig/WindowsSdk.zig index 1e1f0f162970..8fe37affc02d 100644 --- a/lib/std/zig/WindowsSdk.zig +++ b/lib/std/zig/WindowsSdk.zig @@ -751,7 +751,7 @@ const MsvcLibDir = struct { defer instances_dir.close(); var state_subpath_buf: [std.fs.max_name_bytes + 32]u8 = undefined; - var latest_version_lib_dir = std.ArrayListUnmanaged(u8){}; + var latest_version_lib_dir: std.ArrayListUnmanaged(u8) = .empty; errdefer latest_version_lib_dir.deinit(allocator); var latest_version: u64 = 0; diff --git a/lib/std/zig/Zir.zig b/lib/std/zig/Zir.zig index a918bb769cc7..8f86dabb5d04 100644 --- a/lib/std/zig/Zir.zig +++ b/lib/std/zig/Zir.zig @@ -3711,7 +3711,7 @@ pub fn findDecls(zir: Zir, gpa: Allocator, list: *std.ArrayListUnmanaged(Inst.In // `defer` instructions duplicate the same body arbitrarily many times, but we only want to traverse // their contents once per defer. So, we store the extra index of the body here to deduplicate. - var found_defers: std.AutoHashMapUnmanaged(u32, void) = .{}; + var found_defers: std.AutoHashMapUnmanaged(u32, void) = .empty; defer found_defers.deinit(gpa); try zir.findDeclsBody(gpa, list, &found_defers, bodies.value_body); @@ -3725,7 +3725,7 @@ pub fn findDecls(zir: Zir, gpa: Allocator, list: *std.ArrayListUnmanaged(Inst.In pub fn findDeclsRoot(zir: Zir, gpa: Allocator, list: *std.ArrayListUnmanaged(Inst.Index)) !void { list.clearRetainingCapacity(); - var found_defers: std.AutoHashMapUnmanaged(u32, void) = .{}; + var found_defers: std.AutoHashMapUnmanaged(u32, void) = .empty; defer found_defers.deinit(gpa); try zir.findDeclsInner(gpa, list, &found_defers, .main_struct_inst); diff --git a/lib/std/zig/render.zig b/lib/std/zig/render.zig index c0391b4faff4..c78a8cb07dfe 100644 --- a/lib/std/zig/render.zig +++ b/lib/std/zig/render.zig @@ -17,21 +17,21 @@ const Ais = AutoIndentingStream(std.ArrayList(u8).Writer); pub const Fixups = struct { /// The key is the mut token (`var`/`const`) of the variable declaration /// that should have a `_ = foo;` inserted afterwards. - unused_var_decls: std.AutoHashMapUnmanaged(Ast.TokenIndex, void) = .{}, + unused_var_decls: std.AutoHashMapUnmanaged(Ast.TokenIndex, void) = .empty, /// The functions in this unordered set of AST fn decl nodes will render /// with a function body of `@trap()` instead, with all parameters /// discarded. - gut_functions: std.AutoHashMapUnmanaged(Ast.Node.Index, void) = .{}, + gut_functions: std.AutoHashMapUnmanaged(Ast.Node.Index, void) = .empty, /// These global declarations will be omitted. - omit_nodes: std.AutoHashMapUnmanaged(Ast.Node.Index, void) = .{}, + omit_nodes: std.AutoHashMapUnmanaged(Ast.Node.Index, void) = .empty, /// These expressions will be replaced with the string value. - replace_nodes_with_string: std.AutoHashMapUnmanaged(Ast.Node.Index, []const u8) = .{}, + replace_nodes_with_string: std.AutoHashMapUnmanaged(Ast.Node.Index, []const u8) = .empty, /// The string value will be inserted directly after the node. - append_string_after_node: std.AutoHashMapUnmanaged(Ast.Node.Index, []const u8) = .{}, + append_string_after_node: std.AutoHashMapUnmanaged(Ast.Node.Index, []const u8) = .empty, /// These nodes will be replaced with a different node. - replace_nodes_with_node: std.AutoHashMapUnmanaged(Ast.Node.Index, Ast.Node.Index) = .{}, + replace_nodes_with_node: std.AutoHashMapUnmanaged(Ast.Node.Index, Ast.Node.Index) = .empty, /// Change all identifier names matching the key to be value instead. - rename_identifiers: std.StringArrayHashMapUnmanaged([]const u8) = .{}, + rename_identifiers: std.StringArrayHashMapUnmanaged([]const u8) = .empty, /// All `@import` builtin calls which refer to a file path will be prefixed /// with this path. diff --git a/lib/std/zig/system/NativePaths.zig b/lib/std/zig/system/NativePaths.zig index 2a50e27b0c63..3c961345567c 100644 --- a/lib/std/zig/system/NativePaths.zig +++ b/lib/std/zig/system/NativePaths.zig @@ -7,11 +7,11 @@ const mem = std.mem; const NativePaths = @This(); arena: Allocator, -include_dirs: std.ArrayListUnmanaged([]const u8) = .{}, -lib_dirs: std.ArrayListUnmanaged([]const u8) = .{}, -framework_dirs: std.ArrayListUnmanaged([]const u8) = .{}, -rpaths: std.ArrayListUnmanaged([]const u8) = .{}, -warnings: std.ArrayListUnmanaged([]const u8) = .{}, +include_dirs: std.ArrayListUnmanaged([]const u8) = .empty, +lib_dirs: std.ArrayListUnmanaged([]const u8) = .empty, +framework_dirs: std.ArrayListUnmanaged([]const u8) = .empty, +rpaths: std.ArrayListUnmanaged([]const u8) = .empty, +warnings: std.ArrayListUnmanaged([]const u8) = .empty, pub fn detect(arena: Allocator, native_target: std.Target) !NativePaths { var self: NativePaths = .{ .arena = arena }; diff --git a/src/Compilation.zig b/src/Compilation.zig index bd93b2061b48..0c1bbda65d24 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -95,7 +95,7 @@ native_system_include_paths: []const []const u8, /// Corresponds to `-u ` for ELF/MachO and `/include:` for COFF/PE. force_undefined_symbols: std.StringArrayHashMapUnmanaged(void), -c_object_table: std.AutoArrayHashMapUnmanaged(*CObject, void) = .{}, +c_object_table: std.AutoArrayHashMapUnmanaged(*CObject, void) = .empty, win32_resource_table: if (dev.env.supports(.win32_resource)) std.AutoArrayHashMapUnmanaged(*Win32Resource, void) else struct { pub fn keys(_: @This()) [0]void { return .{}; @@ -106,10 +106,10 @@ win32_resource_table: if (dev.env.supports(.win32_resource)) std.AutoArrayHashMa pub fn deinit(_: @This(), _: Allocator) void {} } = .{}, -link_errors: std.ArrayListUnmanaged(link.File.ErrorMsg) = .{}, +link_errors: std.ArrayListUnmanaged(link.File.ErrorMsg) = .empty, link_errors_mutex: std.Thread.Mutex = .{}, link_error_flags: link.File.ErrorFlags = .{}, -lld_errors: std.ArrayListUnmanaged(LldError) = .{}, +lld_errors: std.ArrayListUnmanaged(LldError) = .empty, work_queues: [ len: { @@ -154,7 +154,7 @@ embed_file_work_queue: std.fifo.LinearFifo(*Zcu.EmbedFile, .Dynamic), /// The ErrorMsg memory is owned by the `CObject`, using Compilation's general purpose allocator. /// This data is accessed by multiple threads and is protected by `mutex`. -failed_c_objects: std.AutoArrayHashMapUnmanaged(*CObject, *CObject.Diag.Bundle) = .{}, +failed_c_objects: std.AutoArrayHashMapUnmanaged(*CObject, *CObject.Diag.Bundle) = .empty, /// The ErrorBundle memory is owned by the `Win32Resource`, using Compilation's general purpose allocator. /// This data is accessed by multiple threads and is protected by `mutex`. @@ -166,7 +166,7 @@ failed_win32_resources: if (dev.env.supports(.win32_resource)) std.AutoArrayHash } = .{}, /// Miscellaneous things that can fail. -misc_failures: std.AutoArrayHashMapUnmanaged(MiscTask, MiscError) = .{}, +misc_failures: std.AutoArrayHashMapUnmanaged(MiscTask, MiscError) = .empty, /// When this is `true` it means invoking clang as a sub-process is expected to inherit /// stdin, stdout, stderr, and if it returns non success, to forward the exit code. @@ -248,7 +248,7 @@ wasi_emulated_libs: []const wasi_libc.CRTFile, /// For example `Scrt1.o` and `libc_nonshared.a`. These are populated after building libc from source, /// The set of needed CRT (C runtime) files differs depending on the target and compilation settings. /// The key is the basename, and the value is the absolute path to the completed build artifact. -crt_files: std.StringHashMapUnmanaged(CRTFile) = .{}, +crt_files: std.StringHashMapUnmanaged(CRTFile) = .empty, /// How many lines of reference trace should be included per compile error. /// Null means only show snippet on first error. @@ -527,8 +527,8 @@ pub const CObject = struct { } pub const Bundle = struct { - file_names: std.AutoArrayHashMapUnmanaged(u32, []const u8) = .{}, - category_names: std.AutoArrayHashMapUnmanaged(u32, []const u8) = .{}, + file_names: std.AutoArrayHashMapUnmanaged(u32, []const u8) = .empty, + category_names: std.AutoArrayHashMapUnmanaged(u32, []const u8) = .empty, diags: []Diag = &.{}, pub fn destroy(bundle: *Bundle, gpa: Allocator) void { @@ -561,8 +561,8 @@ pub const CObject = struct { category: u32 = 0, msg: []const u8 = &.{}, src_loc: SrcLoc = .{}, - src_ranges: std.ArrayListUnmanaged(SrcRange) = .{}, - sub_diags: std.ArrayListUnmanaged(Diag) = .{}, + src_ranges: std.ArrayListUnmanaged(SrcRange) = .empty, + sub_diags: std.ArrayListUnmanaged(Diag) = .empty, fn deinit(wip_diag: *@This(), allocator: Allocator) void { allocator.free(wip_diag.msg); @@ -580,19 +580,19 @@ pub const CObject = struct { var bc = BitcodeReader.init(gpa, .{ .reader = reader.any() }); defer bc.deinit(); - var file_names: std.AutoArrayHashMapUnmanaged(u32, []const u8) = .{}; + var file_names: std.AutoArrayHashMapUnmanaged(u32, []const u8) = .empty; errdefer { for (file_names.values()) |file_name| gpa.free(file_name); file_names.deinit(gpa); } - var category_names: std.AutoArrayHashMapUnmanaged(u32, []const u8) = .{}; + var category_names: std.AutoArrayHashMapUnmanaged(u32, []const u8) = .empty; errdefer { for (category_names.values()) |category_name| gpa.free(category_name); category_names.deinit(gpa); } - var stack: std.ArrayListUnmanaged(WipDiag) = .{}; + var stack: std.ArrayListUnmanaged(WipDiag) = .empty; defer { for (stack.items) |*wip_diag| wip_diag.deinit(gpa); stack.deinit(gpa); @@ -1067,7 +1067,7 @@ pub const CreateOptions = struct { cache_mode: CacheMode = .incremental, lib_dirs: []const []const u8 = &[0][]const u8{}, rpath_list: []const []const u8 = &[0][]const u8{}, - symbol_wrap_set: std.StringArrayHashMapUnmanaged(void) = .{}, + symbol_wrap_set: std.StringArrayHashMapUnmanaged(void) = .empty, c_source_files: []const CSourceFile = &.{}, rc_source_files: []const RcSourceFile = &.{}, manifest_file: ?[]const u8 = null, @@ -1155,7 +1155,7 @@ pub const CreateOptions = struct { skip_linker_dependencies: bool = false, hash_style: link.File.Elf.HashStyle = .both, entry: Entry = .default, - force_undefined_symbols: std.StringArrayHashMapUnmanaged(void) = .{}, + force_undefined_symbols: std.StringArrayHashMapUnmanaged(void) = .empty, stack_size: ?u64 = null, image_base: ?u64 = null, version: ?std.SemanticVersion = null, @@ -1210,7 +1210,7 @@ fn addModuleTableToCacheHash( main_mod: *Package.Module, hash_type: union(enum) { path_bytes, files: *Cache.Manifest }, ) (error{OutOfMemory} || std.process.GetCwdError)!void { - var seen_table: std.AutoArrayHashMapUnmanaged(*Package.Module, void) = .{}; + var seen_table: std.AutoArrayHashMapUnmanaged(*Package.Module, void) = .empty; defer seen_table.deinit(gpa); // root_mod and main_mod may be the same pointer. In fact they usually are. @@ -3362,7 +3362,7 @@ pub fn addModuleErrorMsg( const file_path = try err_src_loc.file_scope.fullPath(gpa); defer gpa.free(file_path); - var ref_traces: std.ArrayListUnmanaged(ErrorBundle.ReferenceTrace) = .{}; + var ref_traces: std.ArrayListUnmanaged(ErrorBundle.ReferenceTrace) = .empty; defer ref_traces.deinit(gpa); if (module_err_msg.reference_trace_root.unwrap()) |rt_root| { @@ -3370,7 +3370,7 @@ pub fn addModuleErrorMsg( all_references.* = try mod.resolveReferences(); } - var seen: std.AutoHashMapUnmanaged(InternPool.AnalUnit, void) = .{}; + var seen: std.AutoHashMapUnmanaged(InternPool.AnalUnit, void) = .empty; defer seen.deinit(gpa); const max_references = mod.comp.reference_trace orelse Sema.default_reference_trace_len; @@ -3439,7 +3439,7 @@ pub fn addModuleErrorMsg( // De-duplicate error notes. The main use case in mind for this is // too many "note: called from here" notes when eval branch quota is reached. - var notes: std.ArrayHashMapUnmanaged(ErrorBundle.ErrorMessage, void, ErrorNoteHashContext, true) = .{}; + var notes: std.ArrayHashMapUnmanaged(ErrorBundle.ErrorMessage, void, ErrorNoteHashContext, true) = .empty; defer notes.deinit(gpa); for (module_err_msg.notes) |module_note| { @@ -3544,7 +3544,7 @@ fn performAllTheWorkInner( comp.job_queued_update_builtin_zig = false; if (comp.zcu == null) break :b; // TODO put all the modules in a flat array to make them easy to iterate. - var seen: std.AutoArrayHashMapUnmanaged(*Package.Module, void) = .{}; + var seen: std.AutoArrayHashMapUnmanaged(*Package.Module, void) = .empty; defer seen.deinit(comp.gpa); try seen.put(comp.gpa, comp.root_mod, {}); var i: usize = 0; @@ -4026,7 +4026,7 @@ fn docsCopyFallible(comp: *Compilation) anyerror!void { }; defer tar_file.close(); - var seen_table: std.AutoArrayHashMapUnmanaged(*Package.Module, []const u8) = .{}; + var seen_table: std.AutoArrayHashMapUnmanaged(*Package.Module, []const u8) = .empty; defer seen_table.deinit(comp.gpa); try seen_table.put(comp.gpa, zcu.main_mod, comp.root_name); @@ -5221,7 +5221,7 @@ fn spawnZigRc( argv: []const []const u8, child_progress_node: std.Progress.Node, ) !void { - var node_name: std.ArrayListUnmanaged(u8) = .{}; + var node_name: std.ArrayListUnmanaged(u8) = .empty; defer node_name.deinit(arena); var child = std.process.Child.init(argv, arena); @@ -5540,7 +5540,7 @@ pub fn addCCArgs( } { - var san_arg: std.ArrayListUnmanaged(u8) = .{}; + var san_arg: std.ArrayListUnmanaged(u8) = .empty; const prefix = "-fsanitize="; if (mod.sanitize_c) { if (san_arg.items.len == 0) try san_arg.appendSlice(arena, prefix); diff --git a/src/InternPool.zig b/src/InternPool.zig index fbfd29369f5e..8160265e6540 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -23,36 +23,36 @@ tid_shift_32: if (single_threaded) u0 else std.math.Log2Int(u32) = if (single_th /// * For a `func`, this is the source of the full function signature. /// These are also invalidated if tracking fails for this instruction. /// Value is index into `dep_entries` of the first dependency on this hash. -src_hash_deps: std.AutoArrayHashMapUnmanaged(TrackedInst.Index, DepEntry.Index) = .{}, +src_hash_deps: std.AutoArrayHashMapUnmanaged(TrackedInst.Index, DepEntry.Index) = .empty, /// Dependencies on the value of a Nav. /// Value is index into `dep_entries` of the first dependency on this Nav value. -nav_val_deps: std.AutoArrayHashMapUnmanaged(Nav.Index, DepEntry.Index) = .{}, +nav_val_deps: std.AutoArrayHashMapUnmanaged(Nav.Index, DepEntry.Index) = .empty, /// Dependencies on an interned value, either: /// * a runtime function (invalidated when its IES changes) /// * a container type requiring resolution (invalidated when the type must be recreated at a new index) /// Value is index into `dep_entries` of the first dependency on this interned value. -interned_deps: std.AutoArrayHashMapUnmanaged(Index, DepEntry.Index) = .{}, +interned_deps: std.AutoArrayHashMapUnmanaged(Index, DepEntry.Index) = .empty, /// Dependencies on the full set of names in a ZIR namespace. /// Key refers to a `struct_decl`, `union_decl`, etc. /// Value is index into `dep_entries` of the first dependency on this namespace. -namespace_deps: std.AutoArrayHashMapUnmanaged(TrackedInst.Index, DepEntry.Index) = .{}, +namespace_deps: std.AutoArrayHashMapUnmanaged(TrackedInst.Index, DepEntry.Index) = .empty, /// Dependencies on the (non-)existence of some name in a namespace. /// Value is index into `dep_entries` of the first dependency on this name. -namespace_name_deps: std.AutoArrayHashMapUnmanaged(NamespaceNameKey, DepEntry.Index) = .{}, +namespace_name_deps: std.AutoArrayHashMapUnmanaged(NamespaceNameKey, DepEntry.Index) = .empty, /// Given a `Depender`, points to an entry in `dep_entries` whose `depender` /// matches. The `next_dependee` field can be used to iterate all such entries /// and remove them from the corresponding lists. -first_dependency: std.AutoArrayHashMapUnmanaged(AnalUnit, DepEntry.Index) = .{}, +first_dependency: std.AutoArrayHashMapUnmanaged(AnalUnit, DepEntry.Index) = .empty, /// Stores dependency information. The hashmaps declared above are used to look /// up entries in this list as required. This is not stored in `extra` so that /// we can use `free_dep_entries` to track free indices, since dependencies are /// removed frequently. -dep_entries: std.ArrayListUnmanaged(DepEntry) = .{}, +dep_entries: std.ArrayListUnmanaged(DepEntry) = .empty, /// Stores unused indices in `dep_entries` which can be reused without a full /// garbage collection pass. -free_dep_entries: std.ArrayListUnmanaged(DepEntry.Index) = .{}, +free_dep_entries: std.ArrayListUnmanaged(DepEntry.Index) = .empty, /// Whether a multi-threaded intern pool is useful. /// Currently `false` until the intern pool is actually accessed @@ -10791,7 +10791,7 @@ pub fn dumpGenericInstancesFallible(ip: *const InternPool, allocator: Allocator) var bw = std.io.bufferedWriter(std.io.getStdErr().writer()); const w = bw.writer(); - var instances: std.AutoArrayHashMapUnmanaged(Index, std.ArrayListUnmanaged(Index)) = .{}; + var instances: std.AutoArrayHashMapUnmanaged(Index, std.ArrayListUnmanaged(Index)) = .empty; for (ip.locals, 0..) |*local, tid| { const items = local.shared.items.view().slice(); const extra_list = local.shared.extra; diff --git a/src/Liveness.zig b/src/Liveness.zig index d90af6462aa3..abada074921d 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -94,10 +94,10 @@ fn LivenessPassData(comptime pass: LivenessPass) type { /// body and which we are currently within. Also includes `loop`s which are the target /// of a `repeat` instruction, and `loop_switch_br`s which are the target of a /// `switch_dispatch` instruction. - breaks: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{}, + breaks: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .empty, /// The set of operands for which we have seen at least one usage but not their birth. - live_set: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{}, + live_set: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .empty, fn deinit(self: *@This(), gpa: Allocator) void { self.breaks.deinit(gpa); @@ -107,15 +107,15 @@ fn LivenessPassData(comptime pass: LivenessPass) type { .main_analysis => struct { /// Every `block` and `loop` currently under analysis. - block_scopes: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockScope) = .{}, + block_scopes: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockScope) = .empty, /// The set of instructions currently alive in the current control /// flow branch. - live_set: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{}, + live_set: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .empty, /// The extra data initialized by the `loop_analysis` pass for this pass to consume. /// Owned by this struct during this pass. - old_extra: std.ArrayListUnmanaged(u32) = .{}, + old_extra: std.ArrayListUnmanaged(u32) = .empty, const BlockScope = struct { /// If this is a `block`, these instructions are alive upon a `br` to this block. @@ -1710,10 +1710,10 @@ fn analyzeInstCondBr( // Operands which are alive in one branch but not the other need to die at the start of // the peer branch. - var then_mirrored_deaths: std.ArrayListUnmanaged(Air.Inst.Index) = .{}; + var then_mirrored_deaths: std.ArrayListUnmanaged(Air.Inst.Index) = .empty; defer then_mirrored_deaths.deinit(gpa); - var else_mirrored_deaths: std.ArrayListUnmanaged(Air.Inst.Index) = .{}; + var else_mirrored_deaths: std.ArrayListUnmanaged(Air.Inst.Index) = .empty; defer else_mirrored_deaths.deinit(gpa); // Note: this invalidates `else_live`, but expands `then_live` to be their union @@ -1785,10 +1785,10 @@ fn analyzeInstSwitchBr( switch (pass) { .loop_analysis => { - var old_breaks: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{}; + var old_breaks: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .empty; defer old_breaks.deinit(gpa); - var old_live: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{}; + var old_live: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .empty; defer old_live.deinit(gpa); if (is_dispatch_loop) { diff --git a/src/Liveness/Verify.zig b/src/Liveness/Verify.zig index 87ba6a3fc893..b6b7d408935a 100644 --- a/src/Liveness/Verify.zig +++ b/src/Liveness/Verify.zig @@ -4,8 +4,8 @@ gpa: std.mem.Allocator, air: Air, liveness: Liveness, live: LiveMap = .{}, -blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, LiveMap) = .{}, -loops: std.AutoHashMapUnmanaged(Air.Inst.Index, LiveMap) = .{}, +blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, LiveMap) = .empty, +loops: std.AutoHashMapUnmanaged(Air.Inst.Index, LiveMap) = .empty, intern_pool: *const InternPool, pub const Error = error{ LivenessInvalid, OutOfMemory }; diff --git a/src/Package/Fetch.zig b/src/Package/Fetch.zig index 81f5cce819b7..05f724cda367 100644 --- a/src/Package/Fetch.zig +++ b/src/Package/Fetch.zig @@ -91,7 +91,7 @@ pub const JobQueue = struct { /// `table` may be missing some tasks such as ones that failed, so this /// field contains references to all of them. /// Protected by `mutex`. - all_fetches: std.ArrayListUnmanaged(*Fetch) = .{}, + all_fetches: std.ArrayListUnmanaged(*Fetch) = .empty, http_client: *std.http.Client, thread_pool: *ThreadPool, @@ -1439,7 +1439,7 @@ fn computeHash( // Track directories which had any files deleted from them so that empty directories // can be deleted. - var sus_dirs: std.StringArrayHashMapUnmanaged(void) = .{}; + var sus_dirs: std.StringArrayHashMapUnmanaged(void) = .empty; defer sus_dirs.deinit(gpa); var walker = try root_dir.walk(gpa); @@ -1710,7 +1710,7 @@ fn normalizePath(bytes: []u8) void { } const Filter = struct { - include_paths: std.StringArrayHashMapUnmanaged(void) = .{}, + include_paths: std.StringArrayHashMapUnmanaged(void) = .empty, /// sub_path is relative to the package root. pub fn includePath(self: Filter, sub_path: []const u8) bool { @@ -2309,7 +2309,7 @@ const TestFetchBuilder = struct { var package_dir = try self.packageDir(); defer package_dir.close(); - var actual_files: std.ArrayListUnmanaged([]u8) = .{}; + var actual_files: std.ArrayListUnmanaged([]u8) = .empty; defer actual_files.deinit(std.testing.allocator); defer for (actual_files.items) |file| std.testing.allocator.free(file); var walker = try package_dir.walk(std.testing.allocator); diff --git a/src/Package/Fetch/git.zig b/src/Package/Fetch/git.zig index a8c106412ee7..7c2fa21b5443 100644 --- a/src/Package/Fetch/git.zig +++ b/src/Package/Fetch/git.zig @@ -38,7 +38,7 @@ test parseOid { pub const Diagnostics = struct { allocator: Allocator, - errors: std.ArrayListUnmanaged(Error) = .{}, + errors: std.ArrayListUnmanaged(Error) = .empty, pub const Error = union(enum) { unable_to_create_sym_link: struct { @@ -263,7 +263,7 @@ const Odb = struct { fn readObject(odb: *Odb) !Object { var base_offset = try odb.pack_file.getPos(); var base_header: EntryHeader = undefined; - var delta_offsets = std.ArrayListUnmanaged(u64){}; + var delta_offsets: std.ArrayListUnmanaged(u64) = .empty; defer delta_offsets.deinit(odb.allocator); const base_object = while (true) { if (odb.cache.get(base_offset)) |base_object| break base_object; @@ -361,7 +361,7 @@ const Object = struct { /// freed by the caller at any point after inserting it into the cache. Any /// objects remaining in the cache will be freed when the cache itself is freed. const ObjectCache = struct { - objects: std.AutoHashMapUnmanaged(u64, CacheEntry) = .{}, + objects: std.AutoHashMapUnmanaged(u64, CacheEntry) = .empty, lru_nodes: LruList = .{}, byte_size: usize = 0, @@ -660,7 +660,7 @@ pub const Session = struct { upload_pack_uri.query = null; upload_pack_uri.fragment = null; - var body = std.ArrayListUnmanaged(u8){}; + var body: std.ArrayListUnmanaged(u8) = .empty; defer body.deinit(allocator); const body_writer = body.writer(allocator); try Packet.write(.{ .data = "command=ls-refs\n" }, body_writer); @@ -767,7 +767,7 @@ pub const Session = struct { upload_pack_uri.query = null; upload_pack_uri.fragment = null; - var body = std.ArrayListUnmanaged(u8){}; + var body: std.ArrayListUnmanaged(u8) = .empty; defer body.deinit(allocator); const body_writer = body.writer(allocator); try Packet.write(.{ .data = "command=fetch\n" }, body_writer); @@ -1044,9 +1044,9 @@ const IndexEntry = struct { pub fn indexPack(allocator: Allocator, pack: std.fs.File, index_writer: anytype) !void { try pack.seekTo(0); - var index_entries = std.AutoHashMapUnmanaged(Oid, IndexEntry){}; + var index_entries: std.AutoHashMapUnmanaged(Oid, IndexEntry) = .empty; defer index_entries.deinit(allocator); - var pending_deltas = std.ArrayListUnmanaged(IndexEntry){}; + var pending_deltas: std.ArrayListUnmanaged(IndexEntry) = .empty; defer pending_deltas.deinit(allocator); const pack_checksum = try indexPackFirstPass(allocator, pack, &index_entries, &pending_deltas); @@ -1068,7 +1068,7 @@ pub fn indexPack(allocator: Allocator, pack: std.fs.File, index_writer: anytype) remaining_deltas = pending_deltas.items.len; } - var oids = std.ArrayListUnmanaged(Oid){}; + var oids: std.ArrayListUnmanaged(Oid) = .empty; defer oids.deinit(allocator); try oids.ensureTotalCapacityPrecise(allocator, index_entries.count()); var index_entries_iter = index_entries.iterator(); @@ -1109,7 +1109,7 @@ pub fn indexPack(allocator: Allocator, pack: std.fs.File, index_writer: anytype) try writer.writeInt(u32, index_entries.get(oid).?.crc32, .big); } - var big_offsets = std.ArrayListUnmanaged(u64){}; + var big_offsets: std.ArrayListUnmanaged(u64) = .empty; defer big_offsets.deinit(allocator); for (oids.items) |oid| { const offset = index_entries.get(oid).?.offset; @@ -1213,7 +1213,7 @@ fn indexPackHashDelta( // Figure out the chain of deltas to resolve var base_offset = delta.offset; var base_header: EntryHeader = undefined; - var delta_offsets = std.ArrayListUnmanaged(u64){}; + var delta_offsets: std.ArrayListUnmanaged(u64) = .empty; defer delta_offsets.deinit(allocator); const base_object = while (true) { if (cache.get(base_offset)) |base_object| break base_object; @@ -1447,7 +1447,7 @@ test "packfile indexing and checkout" { "file8", "file9", }; - var actual_files: std.ArrayListUnmanaged([]u8) = .{}; + var actual_files: std.ArrayListUnmanaged([]u8) = .empty; defer actual_files.deinit(testing.allocator); defer for (actual_files.items) |file| testing.allocator.free(file); var walker = try worktree.dir.walk(testing.allocator); diff --git a/src/Sema.zig b/src/Sema.zig index 7ab60adbf244..8f097bb35f9c 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -13,7 +13,7 @@ gpa: Allocator, arena: Allocator, code: Zir, air_instructions: std.MultiArrayList(Air.Inst) = .{}, -air_extra: std.ArrayListUnmanaged(u32) = .{}, +air_extra: std.ArrayListUnmanaged(u32) = .empty, /// Maps ZIR to AIR. inst_map: InstMap = .{}, /// The "owner" of a `Sema` represents the root "thing" that is being analyzed. @@ -65,7 +65,7 @@ generic_call_src: LazySrcLoc = LazySrcLoc.unneeded, /// They are created when an break_inline passes through a runtime condition, because /// Sema must convert comptime control flow to runtime control flow, which means /// breaking from a block. -post_hoc_blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, *LabeledBlock) = .{}, +post_hoc_blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, *LabeledBlock) = .empty, /// Populated with the last compile error created. err: ?*Zcu.ErrorMsg = null, /// Set to true when analyzing a func type instruction so that nested generic @@ -74,12 +74,12 @@ no_partial_func_ty: bool = false, /// The temporary arena is used for the memory of the `InferredAlloc` values /// here so the values can be dropped without any cleanup. -unresolved_inferred_allocs: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, InferredAlloc) = .{}, +unresolved_inferred_allocs: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, InferredAlloc) = .empty, /// Links every pointer derived from a base `alloc` back to that `alloc`. Used /// to detect comptime-known `const`s. /// TODO: ZIR liveness analysis would allow us to remove elements from this map. -base_allocs: std.AutoHashMapUnmanaged(Air.Inst.Index, Air.Inst.Index) = .{}, +base_allocs: std.AutoHashMapUnmanaged(Air.Inst.Index, Air.Inst.Index) = .empty, /// Runtime `alloc`s are placed in this map to track all comptime-known writes /// before the corresponding `make_ptr_const` instruction. @@ -90,28 +90,28 @@ base_allocs: std.AutoHashMapUnmanaged(Air.Inst.Index, Air.Inst.Index) = .{}, /// is comptime-known, and all stores to the pointer must be applied at comptime /// to determine the comptime value. /// Backed by gpa. -maybe_comptime_allocs: std.AutoHashMapUnmanaged(Air.Inst.Index, MaybeComptimeAlloc) = .{}, +maybe_comptime_allocs: std.AutoHashMapUnmanaged(Air.Inst.Index, MaybeComptimeAlloc) = .empty, /// Comptime-mutable allocs, and any comptime allocs which reference it, are /// stored as elements of this array. /// Pointers to such memory are represented via an index into this array. /// Backed by gpa. -comptime_allocs: std.ArrayListUnmanaged(ComptimeAlloc) = .{}, +comptime_allocs: std.ArrayListUnmanaged(ComptimeAlloc) = .empty, /// A list of exports performed by this analysis. After this `Sema` terminates, /// these are flushed to `Zcu.single_exports` or `Zcu.multi_exports`. -exports: std.ArrayListUnmanaged(Zcu.Export) = .{}, +exports: std.ArrayListUnmanaged(Zcu.Export) = .empty, /// All references registered so far by this `Sema`. This is a temporary duplicate /// of data stored in `Zcu.all_references`. It exists to avoid adding references to /// a given `AnalUnit` multiple times. -references: std.AutoArrayHashMapUnmanaged(AnalUnit, void) = .{}, -type_references: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{}, +references: std.AutoArrayHashMapUnmanaged(AnalUnit, void) = .empty, +type_references: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .empty, /// All dependencies registered so far by this `Sema`. This is a temporary duplicate /// of the main dependency data. It exists to avoid adding dependencies to a given /// `AnalUnit` multiple times. -dependencies: std.AutoArrayHashMapUnmanaged(InternPool.Dependee, void) = .{}, +dependencies: std.AutoArrayHashMapUnmanaged(InternPool.Dependee, void) = .empty, /// Whether memoization of this call is permitted. Operations with side effects global /// to the `Sema`, such as `@setEvalBranchQuota`, set this to `false`. It is observed @@ -208,7 +208,7 @@ pub const InferredErrorSet = struct { /// are returned from any dependent functions. errors: NameMap = .{}, /// Other inferred error sets which this inferred error set should include. - inferred_error_sets: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{}, + inferred_error_sets: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .empty, /// The regular error set created by resolving this inferred error set. resolved: InternPool.Index = .none, @@ -508,9 +508,9 @@ pub const Block = struct { /// * for a `switch_block[_ref]`, this refers to dummy `br` instructions /// which correspond to `switch_continue` ZIR. The switch logic will /// rewrite these to appropriate AIR switch dispatches. - extra_insts: std.ArrayListUnmanaged(Air.Inst.Index) = .{}, + extra_insts: std.ArrayListUnmanaged(Air.Inst.Index) = .empty, /// Same indexes, capacity, length as `extra_insts`. - extra_src_locs: std.ArrayListUnmanaged(LazySrcLoc) = .{}, + extra_src_locs: std.ArrayListUnmanaged(LazySrcLoc) = .empty, pub fn deinit(merges: *@This(), allocator: Allocator) void { merges.results.deinit(allocator); @@ -871,7 +871,7 @@ const InferredAlloc = struct { /// is known. These should be rewritten to perform any required coercions /// when the type is resolved. /// Allocated from `sema.arena`. - prongs: std.ArrayListUnmanaged(Air.Inst.Index) = .{}, + prongs: std.ArrayListUnmanaged(Air.Inst.Index) = .empty, }; const NeededComptimeReason = struct { @@ -2908,7 +2908,7 @@ fn createTypeName( const fn_info = sema.code.getFnInfo(ip.funcZirBodyInst(sema.func_index).resolve(ip) orelse return error.AnalysisFail); const zir_tags = sema.code.instructions.items(.tag); - var buf: std.ArrayListUnmanaged(u8) = .{}; + var buf: std.ArrayListUnmanaged(u8) = .empty; defer buf.deinit(gpa); const writer = buf.writer(gpa); @@ -6851,11 +6851,11 @@ fn lookupInNamespace( if (observe_usingnamespace and (namespace.pub_usingnamespace.items.len != 0 or namespace.priv_usingnamespace.items.len != 0)) { const gpa = sema.gpa; - var checked_namespaces: std.AutoArrayHashMapUnmanaged(*Namespace, void) = .{}; + var checked_namespaces: std.AutoArrayHashMapUnmanaged(*Namespace, void) = .empty; defer checked_namespaces.deinit(gpa); // Keep track of name conflicts for error notes. - var candidates: std.ArrayListUnmanaged(InternPool.Nav.Index) = .{}; + var candidates: std.ArrayListUnmanaged(InternPool.Nav.Index) = .empty; defer candidates.deinit(gpa); try checked_namespaces.put(gpa, namespace, {}); @@ -22754,7 +22754,7 @@ fn reifyUnion( break :tag_ty .{ enum_tag_ty.toIntern(), true }; } else tag_ty: { // We must track field names and set up the tag type ourselves. - var field_names: std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{}; + var field_names: std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void) = .empty; try field_names.ensureTotalCapacity(sema.arena, fields_len); for (field_types, 0..) |*field_ty, field_idx| { @@ -37075,7 +37075,7 @@ fn unionFields( var int_tag_ty: Type = undefined; var enum_field_names: []InternPool.NullTerminatedString = &.{}; - var enum_field_vals: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{}; + var enum_field_vals: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .empty; var explicit_tags_seen: []bool = &.{}; if (tag_type_ref != .none) { const tag_ty_src: LazySrcLoc = .{ @@ -37126,8 +37126,8 @@ fn unionFields( enum_field_names = try sema.arena.alloc(InternPool.NullTerminatedString, fields_len); } - var field_types: std.ArrayListUnmanaged(InternPool.Index) = .{}; - var field_aligns: std.ArrayListUnmanaged(InternPool.Alignment) = .{}; + var field_types: std.ArrayListUnmanaged(InternPool.Index) = .empty; + var field_aligns: std.ArrayListUnmanaged(InternPool.Alignment) = .empty; try field_types.ensureTotalCapacityPrecise(sema.arena, fields_len); if (small.any_aligned_fields) diff --git a/src/Zcu.zig b/src/Zcu.zig index 148570d85a8d..7bddabaa5053 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -76,14 +76,14 @@ local_zir_cache: Compilation.Directory, /// This is where all `Export` values are stored. Not all values here are necessarily valid exports; /// to enumerate all exports, `single_exports` and `multi_exports` must be consulted. -all_exports: std.ArrayListUnmanaged(Export) = .{}, +all_exports: std.ArrayListUnmanaged(Export) = .empty, /// This is a list of free indices in `all_exports`. These indices may be reused by exports from /// future semantic analysis. -free_exports: std.ArrayListUnmanaged(u32) = .{}, +free_exports: std.ArrayListUnmanaged(u32) = .empty, /// Maps from an `AnalUnit` which performs a single export, to the index into `all_exports` of /// the export it performs. Note that the key is not the `Decl` being exported, but the `AnalUnit` /// whose analysis triggered the export. -single_exports: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .{}, +single_exports: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .empty, /// Like `single_exports`, but for `AnalUnit`s which perform multiple exports. /// The exports are `all_exports.items[index..][0..len]`. multi_exports: std.AutoArrayHashMapUnmanaged(AnalUnit, extern struct { @@ -104,29 +104,29 @@ multi_exports: std.AutoArrayHashMapUnmanaged(AnalUnit, extern struct { /// `Compilation.update` of the process for a given `Compilation`. /// /// Indexes correspond 1:1 to `files`. -import_table: std.StringArrayHashMapUnmanaged(File.Index) = .{}, +import_table: std.StringArrayHashMapUnmanaged(File.Index) = .empty, /// The set of all the files which have been loaded with `@embedFile` in the Module. /// We keep track of this in order to iterate over it and check which files have been /// modified on the file system when an update is requested, as well as to cache /// `@embedFile` results. /// Keys are fully resolved file paths. This table owns the keys and values. -embed_table: std.StringArrayHashMapUnmanaged(*EmbedFile) = .{}, +embed_table: std.StringArrayHashMapUnmanaged(*EmbedFile) = .empty, /// Stores all Type and Value objects. /// The idea is that this will be periodically garbage-collected, but such logic /// is not yet implemented. intern_pool: InternPool = .{}, -analysis_in_progress: std.AutoArrayHashMapUnmanaged(AnalUnit, void) = .{}, +analysis_in_progress: std.AutoArrayHashMapUnmanaged(AnalUnit, void) = .empty, /// The ErrorMsg memory is owned by the `AnalUnit`, using Module's general purpose allocator. -failed_analysis: std.AutoArrayHashMapUnmanaged(AnalUnit, *ErrorMsg) = .{}, +failed_analysis: std.AutoArrayHashMapUnmanaged(AnalUnit, *ErrorMsg) = .empty, /// This `AnalUnit` failed semantic analysis because it required analysis of another `AnalUnit` which itself failed. -transitive_failed_analysis: std.AutoArrayHashMapUnmanaged(AnalUnit, void) = .{}, +transitive_failed_analysis: std.AutoArrayHashMapUnmanaged(AnalUnit, void) = .empty, /// This `Nav` succeeded analysis, but failed codegen. /// This may be a simple "value" `Nav`, or it may be a function. /// The ErrorMsg memory is owned by the `AnalUnit`, using Module's general purpose allocator. -failed_codegen: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, *ErrorMsg) = .{}, +failed_codegen: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, *ErrorMsg) = .empty, /// Keep track of one `@compileLog` callsite per `AnalUnit`. /// The value is the source location of the `@compileLog` call, convertible to a `LazySrcLoc`. compile_log_sources: std.AutoArrayHashMapUnmanaged(AnalUnit, extern struct { @@ -141,14 +141,14 @@ compile_log_sources: std.AutoArrayHashMapUnmanaged(AnalUnit, extern struct { }) = .{}, /// Using a map here for consistency with the other fields here. /// The ErrorMsg memory is owned by the `File`, using Module's general purpose allocator. -failed_files: std.AutoArrayHashMapUnmanaged(*File, ?*ErrorMsg) = .{}, +failed_files: std.AutoArrayHashMapUnmanaged(*File, ?*ErrorMsg) = .empty, /// The ErrorMsg memory is owned by the `EmbedFile`, using Module's general purpose allocator. -failed_embed_files: std.AutoArrayHashMapUnmanaged(*EmbedFile, *ErrorMsg) = .{}, +failed_embed_files: std.AutoArrayHashMapUnmanaged(*EmbedFile, *ErrorMsg) = .empty, /// Key is index into `all_exports`. -failed_exports: std.AutoArrayHashMapUnmanaged(u32, *ErrorMsg) = .{}, +failed_exports: std.AutoArrayHashMapUnmanaged(u32, *ErrorMsg) = .empty, /// If analysis failed due to a cimport error, the corresponding Clang errors /// are stored here. -cimport_errors: std.AutoArrayHashMapUnmanaged(AnalUnit, std.zig.ErrorBundle) = .{}, +cimport_errors: std.AutoArrayHashMapUnmanaged(AnalUnit, std.zig.ErrorBundle) = .empty, /// Maximum amount of distinct error values, set by --error-limit error_limit: ErrorInt, @@ -156,19 +156,19 @@ error_limit: ErrorInt, /// Value is the number of PO dependencies of this AnalUnit. /// This value will decrease as we perform semantic analysis to learn what is outdated. /// If any of these PO deps is outdated, this value will be moved to `outdated`. -potentially_outdated: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .{}, +potentially_outdated: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .empty, /// Value is the number of PO dependencies of this AnalUnit. /// Once this value drops to 0, the AnalUnit is a candidate for re-analysis. -outdated: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .{}, +outdated: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .empty, /// This contains all `AnalUnit`s in `outdated` whose PO dependency count is 0. /// Such `AnalUnit`s are ready for immediate re-analysis. /// See `findOutdatedToAnalyze` for details. -outdated_ready: std.AutoArrayHashMapUnmanaged(AnalUnit, void) = .{}, +outdated_ready: std.AutoArrayHashMapUnmanaged(AnalUnit, void) = .empty, /// This contains a list of AnalUnit whose analysis or codegen failed, but the /// failure was something like running out of disk space, and trying again may /// succeed. On the next update, we will flush this list, marking all members of /// it as outdated. -retryable_failures: std.ArrayListUnmanaged(AnalUnit) = .{}, +retryable_failures: std.ArrayListUnmanaged(AnalUnit) = .empty, /// These are the modules which we initially queue for analysis in `Compilation.update`. /// `resolveReferences` will use these as the root of its reachability traversal. @@ -184,31 +184,31 @@ stage1_flags: packed struct { reserved: u2 = 0, } = .{}, -compile_log_text: std.ArrayListUnmanaged(u8) = .{}, +compile_log_text: std.ArrayListUnmanaged(u8) = .empty, -test_functions: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, void) = .{}, +test_functions: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, void) = .empty, -global_assembly: std.AutoArrayHashMapUnmanaged(InternPool.Cau.Index, []u8) = .{}, +global_assembly: std.AutoArrayHashMapUnmanaged(InternPool.Cau.Index, []u8) = .empty, /// Key is the `AnalUnit` *performing* the reference. This representation allows /// incremental updates to quickly delete references caused by a specific `AnalUnit`. /// Value is index into `all_references` of the first reference triggered by the unit. /// The `next` field on the `Reference` forms a linked list of all references /// triggered by the key `AnalUnit`. -reference_table: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .{}, -all_references: std.ArrayListUnmanaged(Reference) = .{}, +reference_table: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .empty, +all_references: std.ArrayListUnmanaged(Reference) = .empty, /// Freelist of indices in `all_references`. -free_references: std.ArrayListUnmanaged(u32) = .{}, +free_references: std.ArrayListUnmanaged(u32) = .empty, /// Key is the `AnalUnit` *performing* the reference. This representation allows /// incremental updates to quickly delete references caused by a specific `AnalUnit`. /// Value is index into `all_type_reference` of the first reference triggered by the unit. /// The `next` field on the `TypeReference` forms a linked list of all type references /// triggered by the key `AnalUnit`. -type_reference_table: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .{}, -all_type_references: std.ArrayListUnmanaged(TypeReference) = .{}, +type_reference_table: std.AutoArrayHashMapUnmanaged(AnalUnit, u32) = .empty, +all_type_references: std.ArrayListUnmanaged(TypeReference) = .empty, /// Freelist of indices in `all_type_references`. -free_type_references: std.ArrayListUnmanaged(u32) = .{}, +free_type_references: std.ArrayListUnmanaged(u32) = .empty, panic_messages: [PanicId.len]InternPool.Nav.Index.Optional = .{.none} ** PanicId.len, /// The panic function body. @@ -338,16 +338,16 @@ pub const Namespace = struct { /// Will be a struct, enum, union, or opaque. owner_type: InternPool.Index, /// Members of the namespace which are marked `pub`. - pub_decls: std.ArrayHashMapUnmanaged(InternPool.Nav.Index, void, NavNameContext, true) = .{}, + pub_decls: std.ArrayHashMapUnmanaged(InternPool.Nav.Index, void, NavNameContext, true) = .empty, /// Members of the namespace which are *not* marked `pub`. - priv_decls: std.ArrayHashMapUnmanaged(InternPool.Nav.Index, void, NavNameContext, true) = .{}, + priv_decls: std.ArrayHashMapUnmanaged(InternPool.Nav.Index, void, NavNameContext, true) = .empty, /// All `usingnamespace` declarations in this namespace which are marked `pub`. - pub_usingnamespace: std.ArrayListUnmanaged(InternPool.Nav.Index) = .{}, + pub_usingnamespace: std.ArrayListUnmanaged(InternPool.Nav.Index) = .empty, /// All `usingnamespace` declarations in this namespace which are *not* marked `pub`. - priv_usingnamespace: std.ArrayListUnmanaged(InternPool.Nav.Index) = .{}, + priv_usingnamespace: std.ArrayListUnmanaged(InternPool.Nav.Index) = .empty, /// All `comptime` and `test` declarations in this namespace. We store these purely so that /// incremental compilation can re-use the existing `Cau`s when a namespace changes. - other_decls: std.ArrayListUnmanaged(InternPool.Cau.Index) = .{}, + other_decls: std.ArrayListUnmanaged(InternPool.Cau.Index) = .empty, pub const Index = InternPool.NamespaceIndex; pub const OptionalIndex = InternPool.OptionalNamespaceIndex; @@ -451,7 +451,7 @@ pub const File = struct { /// Whether this file is a part of multiple packages. This is an error condition which will be reported after AstGen. multi_pkg: bool = false, /// List of references to this file, used for multi-package errors. - references: std.ArrayListUnmanaged(File.Reference) = .{}, + references: std.ArrayListUnmanaged(File.Reference) = .empty, /// The most recent successful ZIR for this file, with no errors. /// This is only populated when a previously successful ZIR @@ -2551,13 +2551,13 @@ pub fn mapOldZirToNew( old_inst: Zir.Inst.Index, new_inst: Zir.Inst.Index, }; - var match_stack: std.ArrayListUnmanaged(MatchedZirDecl) = .{}; + var match_stack: std.ArrayListUnmanaged(MatchedZirDecl) = .empty; defer match_stack.deinit(gpa); // Used as temporary buffers for namespace declaration instructions - var old_decls: std.ArrayListUnmanaged(Zir.Inst.Index) = .{}; + var old_decls: std.ArrayListUnmanaged(Zir.Inst.Index) = .empty; defer old_decls.deinit(gpa); - var new_decls: std.ArrayListUnmanaged(Zir.Inst.Index) = .{}; + var new_decls: std.ArrayListUnmanaged(Zir.Inst.Index) = .empty; defer new_decls.deinit(gpa); // Map the main struct inst (and anything in its fields) @@ -2582,19 +2582,19 @@ pub fn mapOldZirToNew( try inst_map.put(gpa, match_item.old_inst, match_item.new_inst); // Maps decl name to `declaration` instruction. - var named_decls: std.StringHashMapUnmanaged(Zir.Inst.Index) = .{}; + var named_decls: std.StringHashMapUnmanaged(Zir.Inst.Index) = .empty; defer named_decls.deinit(gpa); // Maps test name to `declaration` instruction. - var named_tests: std.StringHashMapUnmanaged(Zir.Inst.Index) = .{}; + var named_tests: std.StringHashMapUnmanaged(Zir.Inst.Index) = .empty; defer named_tests.deinit(gpa); // All unnamed tests, in order, for a best-effort match. - var unnamed_tests: std.ArrayListUnmanaged(Zir.Inst.Index) = .{}; + var unnamed_tests: std.ArrayListUnmanaged(Zir.Inst.Index) = .empty; defer unnamed_tests.deinit(gpa); // All comptime declarations, in order, for a best-effort match. - var comptime_decls: std.ArrayListUnmanaged(Zir.Inst.Index) = .{}; + var comptime_decls: std.ArrayListUnmanaged(Zir.Inst.Index) = .empty; defer comptime_decls.deinit(gpa); // All usingnamespace declarations, in order, for a best-effort match. - var usingnamespace_decls: std.ArrayListUnmanaged(Zir.Inst.Index) = .{}; + var usingnamespace_decls: std.ArrayListUnmanaged(Zir.Inst.Index) = .empty; defer usingnamespace_decls.deinit(gpa); { @@ -3154,12 +3154,12 @@ pub fn resolveReferences(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolve const comp = zcu.comp; const ip = &zcu.intern_pool; - var result: std.AutoHashMapUnmanaged(AnalUnit, ?ResolvedReference) = .{}; + var result: std.AutoHashMapUnmanaged(AnalUnit, ?ResolvedReference) = .empty; errdefer result.deinit(gpa); - var checked_types: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .{}; - var type_queue: std.AutoArrayHashMapUnmanaged(InternPool.Index, ?ResolvedReference) = .{}; - var unit_queue: std.AutoArrayHashMapUnmanaged(AnalUnit, ?ResolvedReference) = .{}; + var checked_types: std.AutoArrayHashMapUnmanaged(InternPool.Index, void) = .empty; + var type_queue: std.AutoArrayHashMapUnmanaged(InternPool.Index, ?ResolvedReference) = .empty; + var unit_queue: std.AutoArrayHashMapUnmanaged(AnalUnit, ?ResolvedReference) = .empty; defer { checked_types.deinit(gpa); type_queue.deinit(gpa); diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index 17c5413b30f0..837895f78304 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -320,7 +320,7 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void { const gpa = zcu.gpa; // We need to visit every updated File for every TrackedInst in InternPool. - var updated_files: std.AutoArrayHashMapUnmanaged(Zcu.File.Index, UpdatedFile) = .{}; + var updated_files: std.AutoArrayHashMapUnmanaged(Zcu.File.Index, UpdatedFile) = .empty; defer cleanupUpdatedFiles(gpa, &updated_files); for (zcu.import_table.values()) |file_index| { const file = zcu.fileByIndex(file_index); @@ -399,7 +399,7 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void { }; if (!has_namespace) continue; - var old_names: std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{}; + var old_names: std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void) = .empty; defer old_names.deinit(zcu.gpa); { var it = old_zir.declIterator(old_inst); @@ -1721,7 +1721,7 @@ pub fn scanNamespace( // For incremental updates, `scanDecl` wants to look up existing decls by their ZIR index rather // than their name. We'll build an efficient mapping now, then discard the current `decls`. // We map to the `Cau`, since not every declaration has a `Nav`. - var existing_by_inst: std.AutoHashMapUnmanaged(InternPool.TrackedInst.Index, InternPool.Cau.Index) = .{}; + var existing_by_inst: std.AutoHashMapUnmanaged(InternPool.TrackedInst.Index, InternPool.Cau.Index) = .empty; defer existing_by_inst.deinit(gpa); try existing_by_inst.ensureTotalCapacity(gpa, @intCast( @@ -1761,7 +1761,7 @@ pub fn scanNamespace( } } - var seen_decls: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{}; + var seen_decls: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void) = .empty; defer seen_decls.deinit(gpa); namespace.pub_decls.clearRetainingCapacity(); @@ -2293,8 +2293,8 @@ pub fn processExports(pt: Zcu.PerThread) !void { const gpa = zcu.gpa; // First, construct a mapping of every exported value and Nav to the indices of all its different exports. - var nav_exports: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, std.ArrayListUnmanaged(u32)) = .{}; - var uav_exports: std.AutoArrayHashMapUnmanaged(InternPool.Index, std.ArrayListUnmanaged(u32)) = .{}; + var nav_exports: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, std.ArrayListUnmanaged(u32)) = .empty; + var uav_exports: std.AutoArrayHashMapUnmanaged(InternPool.Index, std.ArrayListUnmanaged(u32)) = .empty; defer { for (nav_exports.values()) |*exports| { exports.deinit(gpa); diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 463fdde84447..649be16c06ba 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -62,7 +62,7 @@ stack_align: u32, /// MIR Instructions mir_instructions: std.MultiArrayList(Mir.Inst) = .{}, /// MIR extra data -mir_extra: std.ArrayListUnmanaged(u32) = .{}, +mir_extra: std.ArrayListUnmanaged(u32) = .empty, /// Byte offset within the source file of the ending curly. end_di_line: u32, @@ -71,13 +71,13 @@ end_di_column: u32, /// The value is an offset into the `Function` `code` from the beginning. /// To perform the reloc, write 32-bit signed little-endian integer /// which is a relative jump, based on the address following the reloc. -exitlude_jump_relocs: std.ArrayListUnmanaged(usize) = .{}, +exitlude_jump_relocs: std.ArrayListUnmanaged(usize) = .empty, /// We postpone the creation of debug info for function args and locals /// until after all Mir instructions have been generated. Only then we /// will know saved_regs_stack_space which is necessary in order to /// calculate the right stack offsest with respect to the `.fp` register. -dbg_info_relocs: std.ArrayListUnmanaged(DbgInfoReloc) = .{}, +dbg_info_relocs: std.ArrayListUnmanaged(DbgInfoReloc) = .empty, /// Whenever there is a runtime branch, we push a Branch onto this stack, /// and pop it off when the runtime branch joins. This provides an "overlay" @@ -89,11 +89,11 @@ dbg_info_relocs: std.ArrayListUnmanaged(DbgInfoReloc) = .{}, branch_stack: *std.ArrayList(Branch), // Key is the block instruction -blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .{}, +blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .empty, register_manager: RegisterManager = .{}, /// Maps offset to what is stored there. -stack: std.AutoHashMapUnmanaged(u32, StackAllocation) = .{}, +stack: std.AutoHashMapUnmanaged(u32, StackAllocation) = .empty, /// Tracks the current instruction allocated to the compare flags compare_flags_inst: ?Air.Inst.Index = null, @@ -247,7 +247,7 @@ const DbgInfoReloc = struct { }; const Branch = struct { - inst_table: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, MCValue) = .{}, + inst_table: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, MCValue) = .empty, fn deinit(self: *Branch, gpa: Allocator) void { self.inst_table.deinit(gpa); diff --git a/src/arch/aarch64/Emit.zig b/src/arch/aarch64/Emit.zig index 860a264e72ec..f6b6564f58e4 100644 --- a/src/arch/aarch64/Emit.zig +++ b/src/arch/aarch64/Emit.zig @@ -33,18 +33,18 @@ prev_di_pc: usize, saved_regs_stack_space: u32, /// The branch type of every branch -branch_types: std.AutoHashMapUnmanaged(Mir.Inst.Index, BranchType) = .{}, +branch_types: std.AutoHashMapUnmanaged(Mir.Inst.Index, BranchType) = .empty, /// For every forward branch, maps the target instruction to a list of /// branches which branch to this target instruction -branch_forward_origins: std.AutoHashMapUnmanaged(Mir.Inst.Index, std.ArrayListUnmanaged(Mir.Inst.Index)) = .{}, +branch_forward_origins: std.AutoHashMapUnmanaged(Mir.Inst.Index, std.ArrayListUnmanaged(Mir.Inst.Index)) = .empty, /// For backward branches: stores the code offset of the target /// instruction /// /// For forward branches: stores the code offset of the branch /// instruction -code_offset_mapping: std.AutoHashMapUnmanaged(Mir.Inst.Index, usize) = .{}, +code_offset_mapping: std.AutoHashMapUnmanaged(Mir.Inst.Index, usize) = .empty, /// The final stack frame size of the function (already aligned to the /// respective stack alignment). Does not include prologue stack space. @@ -346,7 +346,7 @@ fn lowerBranches(emit: *Emit) !void { if (emit.branch_forward_origins.getPtr(target_inst)) |origin_list| { try origin_list.append(gpa, inst); } else { - var origin_list: std.ArrayListUnmanaged(Mir.Inst.Index) = .{}; + var origin_list: std.ArrayListUnmanaged(Mir.Inst.Index) = .empty; try origin_list.append(gpa, inst); try emit.branch_forward_origins.put(gpa, target_inst, origin_list); } diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index a0b529b75eb9..48137d4413e0 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -62,7 +62,7 @@ stack_align: u32, /// MIR Instructions mir_instructions: std.MultiArrayList(Mir.Inst) = .{}, /// MIR extra data -mir_extra: std.ArrayListUnmanaged(u32) = .{}, +mir_extra: std.ArrayListUnmanaged(u32) = .empty, /// Byte offset within the source file of the ending curly. end_di_line: u32, @@ -71,13 +71,13 @@ end_di_column: u32, /// The value is an offset into the `Function` `code` from the beginning. /// To perform the reloc, write 32-bit signed little-endian integer /// which is a relative jump, based on the address following the reloc. -exitlude_jump_relocs: std.ArrayListUnmanaged(usize) = .{}, +exitlude_jump_relocs: std.ArrayListUnmanaged(usize) = .empty, /// We postpone the creation of debug info for function args and locals /// until after all Mir instructions have been generated. Only then we /// will know saved_regs_stack_space which is necessary in order to /// calculate the right stack offsest with respect to the `.fp` register. -dbg_info_relocs: std.ArrayListUnmanaged(DbgInfoReloc) = .{}, +dbg_info_relocs: std.ArrayListUnmanaged(DbgInfoReloc) = .empty, /// Whenever there is a runtime branch, we push a Branch onto this stack, /// and pop it off when the runtime branch joins. This provides an "overlay" @@ -89,11 +89,11 @@ dbg_info_relocs: std.ArrayListUnmanaged(DbgInfoReloc) = .{}, branch_stack: *std.ArrayList(Branch), // Key is the block instruction -blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .{}, +blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .empty, register_manager: RegisterManager = .{}, /// Maps offset to what is stored there. -stack: std.AutoHashMapUnmanaged(u32, StackAllocation) = .{}, +stack: std.AutoHashMapUnmanaged(u32, StackAllocation) = .empty, /// Tracks the current instruction allocated to the compare flags cpsr_flags_inst: ?Air.Inst.Index = null, @@ -168,7 +168,7 @@ const MCValue = union(enum) { }; const Branch = struct { - inst_table: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, MCValue) = .{}, + inst_table: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, MCValue) = .empty, fn deinit(self: *Branch, gpa: Allocator) void { self.inst_table.deinit(gpa); diff --git a/src/arch/arm/Emit.zig b/src/arch/arm/Emit.zig index 9ccef5a29973..03940dfc3cb9 100644 --- a/src/arch/arm/Emit.zig +++ b/src/arch/arm/Emit.zig @@ -40,16 +40,16 @@ saved_regs_stack_space: u32, stack_size: u32, /// The branch type of every branch -branch_types: std.AutoHashMapUnmanaged(Mir.Inst.Index, BranchType) = .{}, +branch_types: std.AutoHashMapUnmanaged(Mir.Inst.Index, BranchType) = .empty, /// For every forward branch, maps the target instruction to a list of /// branches which branch to this target instruction -branch_forward_origins: std.AutoHashMapUnmanaged(Mir.Inst.Index, std.ArrayListUnmanaged(Mir.Inst.Index)) = .{}, +branch_forward_origins: std.AutoHashMapUnmanaged(Mir.Inst.Index, std.ArrayListUnmanaged(Mir.Inst.Index)) = .empty, /// For backward branches: stores the code offset of the target /// instruction /// /// For forward branches: stores the code offset of the branch /// instruction -code_offset_mapping: std.AutoHashMapUnmanaged(Mir.Inst.Index, usize) = .{}, +code_offset_mapping: std.AutoHashMapUnmanaged(Mir.Inst.Index, usize) = .empty, const InnerError = error{ OutOfMemory, @@ -264,7 +264,7 @@ fn lowerBranches(emit: *Emit) !void { if (emit.branch_forward_origins.getPtr(target_inst)) |origin_list| { try origin_list.append(gpa, inst); } else { - var origin_list: std.ArrayListUnmanaged(Mir.Inst.Index) = .{}; + var origin_list: std.ArrayListUnmanaged(Mir.Inst.Index) = .empty; try origin_list.append(gpa, inst); try emit.branch_forward_origins.put(gpa, target_inst, origin_list); } diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 262dad6d2448..f887c6cb13e3 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -81,7 +81,7 @@ scope_generation: u32, /// The value is an offset into the `Function` `code` from the beginning. /// To perform the reloc, write 32-bit signed little-endian integer /// which is a relative jump, based on the address following the reloc. -exitlude_jump_relocs: std.ArrayListUnmanaged(usize) = .{}, +exitlude_jump_relocs: std.ArrayListUnmanaged(usize) = .empty, /// Whenever there is a runtime branch, we push a Branch onto this stack, /// and pop it off when the runtime branch joins. This provides an "overlay" @@ -97,14 +97,14 @@ avl: ?u64, vtype: ?bits.VType, // Key is the block instruction -blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .{}, +blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .empty, register_manager: RegisterManager = .{}, const_tracking: ConstTrackingMap = .{}, inst_tracking: InstTrackingMap = .{}, frame_allocs: std.MultiArrayList(FrameAlloc) = .{}, -free_frame_indices: std.AutoArrayHashMapUnmanaged(FrameIndex, void) = .{}, +free_frame_indices: std.AutoArrayHashMapUnmanaged(FrameIndex, void) = .empty, frame_locs: std.MultiArrayList(Mir.FrameLoc) = .{}, loops: std.AutoHashMapUnmanaged(Air.Inst.Index, struct { @@ -342,7 +342,7 @@ const MCValue = union(enum) { }; const Branch = struct { - inst_table: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, MCValue) = .{}, + inst_table: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, MCValue) = .empty, fn deinit(func: *Branch, gpa: Allocator) void { func.inst_table.deinit(gpa); @@ -621,7 +621,7 @@ const FrameAlloc = struct { }; const BlockData = struct { - relocs: std.ArrayListUnmanaged(Mir.Inst.Index) = .{}, + relocs: std.ArrayListUnmanaged(Mir.Inst.Index) = .empty, state: State, fn deinit(bd: *BlockData, gpa: Allocator) void { @@ -6193,7 +6193,7 @@ fn airAsm(func: *Func, inst: Air.Inst.Index) !void { const Label = struct { target: Mir.Inst.Index = undefined, - pending_relocs: std.ArrayListUnmanaged(Mir.Inst.Index) = .{}, + pending_relocs: std.ArrayListUnmanaged(Mir.Inst.Index) = .empty, const Kind = enum { definition, reference }; @@ -6217,7 +6217,7 @@ fn airAsm(func: *Func, inst: Air.Inst.Index) !void { return name.len > 0; } }; - var labels: std.StringHashMapUnmanaged(Label) = .{}; + var labels: std.StringHashMapUnmanaged(Label) = .empty; defer { var label_it = labels.valueIterator(); while (label_it.next()) |label| label.pending_relocs.deinit(func.gpa); diff --git a/src/arch/riscv64/Emit.zig b/src/arch/riscv64/Emit.zig index 8ee566c7edf2..2c4c04d5d339 100644 --- a/src/arch/riscv64/Emit.zig +++ b/src/arch/riscv64/Emit.zig @@ -10,8 +10,8 @@ prev_di_column: u32, /// Relative to the beginning of `code`. prev_di_pc: usize, -code_offset_mapping: std.AutoHashMapUnmanaged(Mir.Inst.Index, usize) = .{}, -relocs: std.ArrayListUnmanaged(Reloc) = .{}, +code_offset_mapping: std.AutoHashMapUnmanaged(Mir.Inst.Index, usize) = .empty, +relocs: std.ArrayListUnmanaged(Reloc) = .empty, pub const Error = Lower.Error || error{ EmitFail, diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 589e9978a256..f2ffd517f987 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -68,7 +68,7 @@ stack_align: Alignment, /// MIR Instructions mir_instructions: std.MultiArrayList(Mir.Inst) = .{}, /// MIR extra data -mir_extra: std.ArrayListUnmanaged(u32) = .{}, +mir_extra: std.ArrayListUnmanaged(u32) = .empty, /// Byte offset within the source file of the ending curly. end_di_line: u32, @@ -77,7 +77,7 @@ end_di_column: u32, /// The value is an offset into the `Function` `code` from the beginning. /// To perform the reloc, write 32-bit signed little-endian integer /// which is a relative jump, based on the address following the reloc. -exitlude_jump_relocs: std.ArrayListUnmanaged(usize) = .{}, +exitlude_jump_relocs: std.ArrayListUnmanaged(usize) = .empty, /// Whenever there is a runtime branch, we push a Branch onto this stack, /// and pop it off when the runtime branch joins. This provides an "overlay" @@ -89,12 +89,12 @@ exitlude_jump_relocs: std.ArrayListUnmanaged(usize) = .{}, branch_stack: *std.ArrayList(Branch), // Key is the block instruction -blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .{}, +blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .empty, register_manager: RegisterManager = .{}, /// Maps offset to what is stored there. -stack: std.AutoHashMapUnmanaged(u32, StackAllocation) = .{}, +stack: std.AutoHashMapUnmanaged(u32, StackAllocation) = .empty, /// Tracks the current instruction allocated to the condition flags condition_flags_inst: ?Air.Inst.Index = null, @@ -201,7 +201,7 @@ const MCValue = union(enum) { }; const Branch = struct { - inst_table: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, MCValue) = .{}, + inst_table: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, MCValue) = .empty, fn deinit(self: *Branch, gpa: Allocator) void { self.inst_table.deinit(gpa); diff --git a/src/arch/sparc64/Emit.zig b/src/arch/sparc64/Emit.zig index a87c9cd0ae3a..ca50aa50c637 100644 --- a/src/arch/sparc64/Emit.zig +++ b/src/arch/sparc64/Emit.zig @@ -30,16 +30,16 @@ prev_di_column: u32, prev_di_pc: usize, /// The branch type of every branch -branch_types: std.AutoHashMapUnmanaged(Mir.Inst.Index, BranchType) = .{}, +branch_types: std.AutoHashMapUnmanaged(Mir.Inst.Index, BranchType) = .empty, /// For every forward branch, maps the target instruction to a list of /// branches which branch to this target instruction -branch_forward_origins: std.AutoHashMapUnmanaged(Mir.Inst.Index, std.ArrayListUnmanaged(Mir.Inst.Index)) = .{}, +branch_forward_origins: std.AutoHashMapUnmanaged(Mir.Inst.Index, std.ArrayListUnmanaged(Mir.Inst.Index)) = .empty, /// For backward branches: stores the code offset of the target /// instruction /// /// For forward branches: stores the code offset of the branch /// instruction -code_offset_mapping: std.AutoHashMapUnmanaged(Mir.Inst.Index, usize) = .{}, +code_offset_mapping: std.AutoHashMapUnmanaged(Mir.Inst.Index, usize) = .empty, const InnerError = error{ OutOfMemory, @@ -571,7 +571,7 @@ fn lowerBranches(emit: *Emit) !void { if (emit.branch_forward_origins.getPtr(target_inst)) |origin_list| { try origin_list.append(gpa, inst); } else { - var origin_list: std.ArrayListUnmanaged(Mir.Inst.Index) = .{}; + var origin_list: std.ArrayListUnmanaged(Mir.Inst.Index) = .empty; try origin_list.append(gpa, inst); try emit.branch_forward_origins.put(gpa, target_inst, origin_list); } diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index 4c42cd4ad297..193bd27a388e 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -654,7 +654,7 @@ func_index: InternPool.Index, /// When we return from a branch, the branch will be popped from this list, /// which means branches can only contain references from within its own branch, /// or a branch higher (lower index) in the tree. -branches: std.ArrayListUnmanaged(Branch) = .{}, +branches: std.ArrayListUnmanaged(Branch) = .empty, /// Table to save `WValue`'s generated by an `Air.Inst` // values: ValueTable, /// Mapping from Air.Inst.Index to block ids @@ -663,7 +663,7 @@ blocks: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, struct { value: WValue, }) = .{}, /// Maps `loop` instructions to their label. `br` to here repeats the loop. -loops: std.AutoHashMapUnmanaged(Air.Inst.Index, u32) = .{}, +loops: std.AutoHashMapUnmanaged(Air.Inst.Index, u32) = .empty, /// `bytes` contains the wasm bytecode belonging to the 'code' section. code: *ArrayList(u8), /// The index the next local generated will have @@ -681,7 +681,7 @@ locals: std.ArrayListUnmanaged(u8), /// List of simd128 immediates. Each value is stored as an array of bytes. /// This list will only be populated for 128bit-simd values when the target features /// are enabled also. -simd_immediates: std.ArrayListUnmanaged([16]u8) = .{}, +simd_immediates: std.ArrayListUnmanaged([16]u8) = .empty, /// The Target we're emitting (used to call intInfo) target: *const std.Target, /// Represents the wasm binary file that is being linked. @@ -690,7 +690,7 @@ pt: Zcu.PerThread, /// List of MIR Instructions mir_instructions: std.MultiArrayList(Mir.Inst) = .{}, /// Contains extra data for MIR -mir_extra: std.ArrayListUnmanaged(u32) = .{}, +mir_extra: std.ArrayListUnmanaged(u32) = .empty, /// When a function is executing, we store the the current stack pointer's value within this local. /// This value is then used to restore the stack pointer to the original value at the return of the function. initial_stack_value: WValue = .none, @@ -717,19 +717,19 @@ stack_alignment: Alignment = .@"16", // allows us to re-use locals that are no longer used. e.g. a temporary local. /// A list of indexes which represents a local of valtype `i32`. /// It is illegal to store a non-i32 valtype in this list. -free_locals_i32: std.ArrayListUnmanaged(u32) = .{}, +free_locals_i32: std.ArrayListUnmanaged(u32) = .empty, /// A list of indexes which represents a local of valtype `i64`. /// It is illegal to store a non-i64 valtype in this list. -free_locals_i64: std.ArrayListUnmanaged(u32) = .{}, +free_locals_i64: std.ArrayListUnmanaged(u32) = .empty, /// A list of indexes which represents a local of valtype `f32`. /// It is illegal to store a non-f32 valtype in this list. -free_locals_f32: std.ArrayListUnmanaged(u32) = .{}, +free_locals_f32: std.ArrayListUnmanaged(u32) = .empty, /// A list of indexes which represents a local of valtype `f64`. /// It is illegal to store a non-f64 valtype in this list. -free_locals_f64: std.ArrayListUnmanaged(u32) = .{}, +free_locals_f64: std.ArrayListUnmanaged(u32) = .empty, /// A list of indexes which represents a local of valtype `v127`. /// It is illegal to store a non-v128 valtype in this list. -free_locals_v128: std.ArrayListUnmanaged(u32) = .{}, +free_locals_v128: std.ArrayListUnmanaged(u32) = .empty, /// When in debug mode, this tracks if no `finishAir` was missed. /// Forgetting to call `finishAir` will cause the result to not be diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 2d0b35a63680..01e60367eafd 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -78,7 +78,7 @@ eflags_inst: ?Air.Inst.Index = null, /// MIR Instructions mir_instructions: std.MultiArrayList(Mir.Inst) = .{}, /// MIR extra data -mir_extra: std.ArrayListUnmanaged(u32) = .{}, +mir_extra: std.ArrayListUnmanaged(u32) = .empty, /// Byte offset within the source file of the ending curly. end_di_line: u32, @@ -87,13 +87,13 @@ end_di_column: u32, /// The value is an offset into the `Function` `code` from the beginning. /// To perform the reloc, write 32-bit signed little-endian integer /// which is a relative jump, based on the address following the reloc. -exitlude_jump_relocs: std.ArrayListUnmanaged(Mir.Inst.Index) = .{}, +exitlude_jump_relocs: std.ArrayListUnmanaged(Mir.Inst.Index) = .empty, const_tracking: ConstTrackingMap = .{}, inst_tracking: InstTrackingMap = .{}, // Key is the block instruction -blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .{}, +blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .empty, register_manager: RegisterManager = .{}, @@ -101,7 +101,7 @@ register_manager: RegisterManager = .{}, scope_generation: u32 = 0, frame_allocs: std.MultiArrayList(FrameAlloc) = .{}, -free_frame_indices: std.AutoArrayHashMapUnmanaged(FrameIndex, void) = .{}, +free_frame_indices: std.AutoArrayHashMapUnmanaged(FrameIndex, void) = .empty, frame_locs: std.MultiArrayList(Mir.FrameLoc) = .{}, loops: std.AutoHashMapUnmanaged(Air.Inst.Index, struct { @@ -799,7 +799,7 @@ const StackAllocation = struct { }; const BlockData = struct { - relocs: std.ArrayListUnmanaged(Mir.Inst.Index) = .{}, + relocs: std.ArrayListUnmanaged(Mir.Inst.Index) = .empty, state: State, fn deinit(self: *BlockData, gpa: Allocator) void { @@ -14248,7 +14248,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { const Label = struct { target: Mir.Inst.Index = undefined, - pending_relocs: std.ArrayListUnmanaged(Mir.Inst.Index) = .{}, + pending_relocs: std.ArrayListUnmanaged(Mir.Inst.Index) = .empty, const Kind = enum { definition, reference }; @@ -14272,7 +14272,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { return name.len > 0; } }; - var labels: std.StringHashMapUnmanaged(Label) = .{}; + var labels: std.StringHashMapUnmanaged(Label) = .empty; defer { var label_it = labels.valueIterator(); while (label_it.next()) |label| label.pending_relocs.deinit(self.gpa); diff --git a/src/arch/x86_64/Emit.zig b/src/arch/x86_64/Emit.zig index 372a520e52a0..c33fbb53a51b 100644 --- a/src/arch/x86_64/Emit.zig +++ b/src/arch/x86_64/Emit.zig @@ -11,8 +11,8 @@ prev_di_column: u32, /// Relative to the beginning of `code`. prev_di_pc: usize, -code_offset_mapping: std.AutoHashMapUnmanaged(Mir.Inst.Index, usize) = .{}, -relocs: std.ArrayListUnmanaged(Reloc) = .{}, +code_offset_mapping: std.AutoHashMapUnmanaged(Mir.Inst.Index, usize) = .empty, +relocs: std.ArrayListUnmanaged(Reloc) = .empty, pub const Error = Lower.Error || error{ EmitFail, diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 0ec5513b6f35..466bdcde6a2b 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -304,14 +304,14 @@ pub const Function = struct { air: Air, liveness: Liveness, value_map: CValueMap, - blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .{}, + blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .empty, next_arg_index: usize = 0, next_block_index: usize = 0, object: Object, lazy_fns: LazyFnMap, func_index: InternPool.Index, /// All the locals, to be emitted at the top of the function. - locals: std.ArrayListUnmanaged(Local) = .{}, + locals: std.ArrayListUnmanaged(Local) = .empty, /// Which locals are available for reuse, based on Type. free_locals_map: LocalsMap = .{}, /// Locals which will not be freed by Liveness. This is used after a @@ -320,10 +320,10 @@ pub const Function = struct { /// of variable declarations at the top of a function, sorted descending /// by type alignment. /// The value is whether the alloc needs to be emitted in the header. - allocs: std.AutoArrayHashMapUnmanaged(LocalIndex, bool) = .{}, + allocs: std.AutoArrayHashMapUnmanaged(LocalIndex, bool) = .empty, /// Maps from `loop_switch_br` instructions to the allocated local used /// for the switch cond. Dispatches should set this local to the new cond. - loop_switch_conds: std.AutoHashMapUnmanaged(Air.Inst.Index, LocalIndex) = .{}, + loop_switch_conds: std.AutoHashMapUnmanaged(Air.Inst.Index, LocalIndex) = .empty, fn resolveInst(f: *Function, ref: Air.Inst.Ref) !CValue { const gop = try f.value_map.getOrPut(ref); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 956ed7de0823..ec2ba4e4c197 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1500,7 +1500,7 @@ pub const Object = struct { // instructions. Depending on the calling convention, this list is not necessarily // a bijection with the actual LLVM parameters of the function. const gpa = o.gpa; - var args: std.ArrayListUnmanaged(Builder.Value) = .{}; + var args: std.ArrayListUnmanaged(Builder.Value) = .empty; defer args.deinit(gpa); { @@ -2497,7 +2497,7 @@ pub const Object = struct { switch (ip.indexToKey(ty.toIntern())) { .anon_struct_type => |tuple| { - var fields: std.ArrayListUnmanaged(Builder.Metadata) = .{}; + var fields: std.ArrayListUnmanaged(Builder.Metadata) = .empty; defer fields.deinit(gpa); try fields.ensureUnusedCapacity(gpa, tuple.types.len); @@ -2574,7 +2574,7 @@ pub const Object = struct { const struct_type = zcu.typeToStruct(ty).?; - var fields: std.ArrayListUnmanaged(Builder.Metadata) = .{}; + var fields: std.ArrayListUnmanaged(Builder.Metadata) = .empty; defer fields.deinit(gpa); try fields.ensureUnusedCapacity(gpa, struct_type.field_types.len); @@ -2667,7 +2667,7 @@ pub const Object = struct { return debug_union_type; } - var fields: std.ArrayListUnmanaged(Builder.Metadata) = .{}; + var fields: std.ArrayListUnmanaged(Builder.Metadata) = .empty; defer fields.deinit(gpa); try fields.ensureUnusedCapacity(gpa, union_type.loadTagType(ip).names.len); @@ -3412,7 +3412,7 @@ pub const Object = struct { return int_ty; } - var llvm_field_types = std.ArrayListUnmanaged(Builder.Type){}; + var llvm_field_types: std.ArrayListUnmanaged(Builder.Type) = .empty; defer llvm_field_types.deinit(o.gpa); // Although we can estimate how much capacity to add, these cannot be // relied upon because of the recursive calls to lowerType below. @@ -3481,7 +3481,7 @@ pub const Object = struct { return ty; }, .anon_struct_type => |anon_struct_type| { - var llvm_field_types: std.ArrayListUnmanaged(Builder.Type) = .{}; + var llvm_field_types: std.ArrayListUnmanaged(Builder.Type) = .empty; defer llvm_field_types.deinit(o.gpa); // Although we can estimate how much capacity to add, these cannot be // relied upon because of the recursive calls to lowerType below. @@ -3672,7 +3672,7 @@ pub const Object = struct { const target = zcu.getTarget(); const ret_ty = try lowerFnRetTy(o, fn_info); - var llvm_params = std.ArrayListUnmanaged(Builder.Type){}; + var llvm_params: std.ArrayListUnmanaged(Builder.Type) = .empty; defer llvm_params.deinit(o.gpa); if (firstParamSRet(fn_info, zcu, target)) { @@ -7438,7 +7438,7 @@ pub const FuncGen = struct { const inputs: []const Air.Inst.Ref = @ptrCast(self.air.extra[extra_i..][0..extra.data.inputs_len]); extra_i += inputs.len; - var llvm_constraints: std.ArrayListUnmanaged(u8) = .{}; + var llvm_constraints: std.ArrayListUnmanaged(u8) = .empty; defer llvm_constraints.deinit(self.gpa); var arena_allocator = std.heap.ArenaAllocator.init(self.gpa); @@ -7466,7 +7466,7 @@ pub const FuncGen = struct { var llvm_param_i: usize = 0; var total_i: u16 = 0; - var name_map: std.StringArrayHashMapUnmanaged(u16) = .{}; + var name_map: std.StringArrayHashMapUnmanaged(u16) = .empty; try name_map.ensureUnusedCapacity(arena, max_param_count); var rw_extra_i = extra_i; diff --git a/src/codegen/llvm/Builder.zig b/src/codegen/llvm/Builder.zig index f6bfcab1ad92..d3aa6e34c400 100644 --- a/src/codegen/llvm/Builder.zig +++ b/src/codegen/llvm/Builder.zig @@ -3994,7 +3994,7 @@ pub const Function = struct { names: [*]const String = &[0]String{}, value_indices: [*]const u32 = &[0]u32{}, strip: bool, - debug_locations: std.AutoHashMapUnmanaged(Instruction.Index, DebugLocation) = .{}, + debug_locations: std.AutoHashMapUnmanaged(Instruction.Index, DebugLocation) = .empty, debug_values: []const Instruction.Index = &.{}, extra: []const u32 = &.{}, @@ -6166,7 +6166,7 @@ pub const WipFunction = struct { const value_indices = try gpa.alloc(u32, final_instructions_len); errdefer gpa.free(value_indices); - var debug_locations: std.AutoHashMapUnmanaged(Instruction.Index, DebugLocation) = .{}; + var debug_locations: std.AutoHashMapUnmanaged(Instruction.Index, DebugLocation) = .empty; errdefer debug_locations.deinit(gpa); try debug_locations.ensureUnusedCapacity(gpa, @intCast(self.debug_locations.count())); @@ -9557,7 +9557,7 @@ pub fn printUnbuffered( } } - var attribute_groups: std.AutoArrayHashMapUnmanaged(Attributes, void) = .{}; + var attribute_groups: std.AutoArrayHashMapUnmanaged(Attributes, void) = .empty; defer attribute_groups.deinit(self.gpa); for (0.., self.functions.items) |function_i, function| { @@ -13133,7 +13133,7 @@ pub fn toBitcode(self: *Builder, allocator: Allocator) bitcode_writer.Error![]co // Write LLVM IR magic try bitcode.writeBits(ir.MAGIC, 32); - var record: std.ArrayListUnmanaged(u64) = .{}; + var record: std.ArrayListUnmanaged(u64) = .empty; defer record.deinit(self.gpa); // IDENTIFICATION_BLOCK @@ -13524,7 +13524,7 @@ pub fn toBitcode(self: *Builder, allocator: Allocator) bitcode_writer.Error![]co try paramattr_block.end(); } - var globals: std.AutoArrayHashMapUnmanaged(Global.Index, void) = .{}; + var globals: std.AutoArrayHashMapUnmanaged(Global.Index, void) = .empty; defer globals.deinit(self.gpa); try globals.ensureUnusedCapacity( self.gpa, @@ -13587,7 +13587,7 @@ pub fn toBitcode(self: *Builder, allocator: Allocator) bitcode_writer.Error![]co // Globals { - var section_map: std.AutoArrayHashMapUnmanaged(String, void) = .{}; + var section_map: std.AutoArrayHashMapUnmanaged(String, void) = .empty; defer section_map.deinit(self.gpa); try section_map.ensureUnusedCapacity(self.gpa, globals.count()); diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index afc7641072f5..dc45b269312c 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -79,7 +79,7 @@ const ControlFlow = union(enum) { selection: struct { /// In order to know which merges we still need to do, we need to keep /// a stack of those. - merge_stack: std.ArrayListUnmanaged(SelectionMerge) = .{}, + merge_stack: std.ArrayListUnmanaged(SelectionMerge) = .empty, }, /// For a `loop` type block, we can early-exit the block by /// jumping to the loop exit node, and we don't need to generate @@ -87,7 +87,7 @@ const ControlFlow = union(enum) { loop: struct { /// The next block to jump to can be determined from any number /// of conditions that jump to the loop exit. - merges: std.ArrayListUnmanaged(Incoming) = .{}, + merges: std.ArrayListUnmanaged(Incoming) = .empty, /// The label id of the loop's merge block. merge_block: IdRef, }, @@ -102,10 +102,10 @@ const ControlFlow = union(enum) { }; /// The stack of (structured) blocks that we are currently in. This determines /// how exits from the current block must be handled. - block_stack: std.ArrayListUnmanaged(*Structured.Block) = .{}, + block_stack: std.ArrayListUnmanaged(*Structured.Block) = .empty, /// Maps `block` inst indices to the variable that the block's result /// value must be written to. - block_results: std.AutoHashMapUnmanaged(Air.Inst.Index, IdRef) = .{}, + block_results: std.AutoHashMapUnmanaged(Air.Inst.Index, IdRef) = .empty, }; const Unstructured = struct { @@ -116,12 +116,12 @@ const ControlFlow = union(enum) { const Block = struct { label: ?IdRef = null, - incoming_blocks: std.ArrayListUnmanaged(Incoming) = .{}, + incoming_blocks: std.ArrayListUnmanaged(Incoming) = .empty, }; /// We need to keep track of result ids for block labels, as well as the 'incoming' /// blocks for a block. - blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, *Block) = .{}, + blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, *Block) = .empty, }; structured: Structured, @@ -153,10 +153,10 @@ pub const Object = struct { /// The Zig module that this object file is generated for. /// A map of Zig decl indices to SPIR-V decl indices. - nav_link: std.AutoHashMapUnmanaged(InternPool.Nav.Index, SpvModule.Decl.Index) = .{}, + nav_link: std.AutoHashMapUnmanaged(InternPool.Nav.Index, SpvModule.Decl.Index) = .empty, /// A map of Zig InternPool indices for anonymous decls to SPIR-V decl indices. - uav_link: std.AutoHashMapUnmanaged(struct { InternPool.Index, StorageClass }, SpvModule.Decl.Index) = .{}, + uav_link: std.AutoHashMapUnmanaged(struct { InternPool.Index, StorageClass }, SpvModule.Decl.Index) = .empty, /// A map that maps AIR intern pool indices to SPIR-V result-ids. intern_map: InternMap = .{}, @@ -300,7 +300,7 @@ const NavGen = struct { /// An array of function argument result-ids. Each index corresponds with the /// function argument of the same index. - args: std.ArrayListUnmanaged(IdRef) = .{}, + args: std.ArrayListUnmanaged(IdRef) = .empty, /// A counter to keep track of how many `arg` instructions we've seen yet. next_arg_index: u32 = 0, @@ -6270,7 +6270,7 @@ const NavGen = struct { } } - var incoming_structured_blocks = std.ArrayListUnmanaged(ControlFlow.Structured.Block.Incoming){}; + var incoming_structured_blocks: std.ArrayListUnmanaged(ControlFlow.Structured.Block.Incoming) = .empty; defer incoming_structured_blocks.deinit(self.gpa); if (self.control_flow == .structured) { diff --git a/src/codegen/spirv/Assembler.zig b/src/codegen/spirv/Assembler.zig index 2cbb873d30cc..9e39f2ed0988 100644 --- a/src/codegen/spirv/Assembler.zig +++ b/src/codegen/spirv/Assembler.zig @@ -148,7 +148,7 @@ const AsmValueMap = std.StringArrayHashMapUnmanaged(AsmValue); gpa: Allocator, /// A list of errors that occured during processing the assembly. -errors: std.ArrayListUnmanaged(ErrorMsg) = .{}, +errors: std.ArrayListUnmanaged(ErrorMsg) = .empty, /// The source code that is being assembled. src: []const u8, @@ -161,7 +161,7 @@ spv: *SpvModule, func: *SpvModule.Fn, /// `self.src` tokenized. -tokens: std.ArrayListUnmanaged(Token) = .{}, +tokens: std.ArrayListUnmanaged(Token) = .empty, /// The token that is next during parsing. current_token: u32 = 0, @@ -172,9 +172,9 @@ inst: struct { /// The opcode of the current instruction. opcode: Opcode = undefined, /// Operands of the current instruction. - operands: std.ArrayListUnmanaged(Operand) = .{}, + operands: std.ArrayListUnmanaged(Operand) = .empty, /// This is where string data resides. Strings are zero-terminated. - string_bytes: std.ArrayListUnmanaged(u8) = .{}, + string_bytes: std.ArrayListUnmanaged(u8) = .empty, /// Return a reference to the result of this instruction, if any. fn result(self: @This()) ?AsmValue.Ref { @@ -196,7 +196,7 @@ value_map: AsmValueMap = .{}, /// This set is used to quickly transform from an opcode name to the /// index in its instruction set. The index of the key is the /// index in `spec.InstructionSet.core.instructions()`. -instruction_map: std.StringArrayHashMapUnmanaged(void) = .{}, +instruction_map: std.StringArrayHashMapUnmanaged(void) = .empty, /// Free the resources owned by this assembler. pub fn deinit(self: *Assembler) void { diff --git a/src/codegen/spirv/Module.zig b/src/codegen/spirv/Module.zig index ae30de156b93..94787e06b9cf 100644 --- a/src/codegen/spirv/Module.zig +++ b/src/codegen/spirv/Module.zig @@ -35,7 +35,7 @@ pub const Fn = struct { /// the end of this function definition. body: Section = .{}, /// The decl dependencies that this function depends on. - decl_deps: std.AutoArrayHashMapUnmanaged(Decl.Index, void) = .{}, + decl_deps: std.AutoArrayHashMapUnmanaged(Decl.Index, void) = .empty, /// Reset this function without deallocating resources, so that /// it may be used to emit code for another function. @@ -141,7 +141,7 @@ sections: struct { next_result_id: Word, /// Cache for results of OpString instructions. -strings: std.StringArrayHashMapUnmanaged(IdRef) = .{}, +strings: std.StringArrayHashMapUnmanaged(IdRef) = .empty, /// Some types shouldn't be emitted more than one time, but cannot be caught by /// the `intern_map` during codegen. Sometimes, IDs are compared to check if @@ -154,27 +154,27 @@ strings: std.StringArrayHashMapUnmanaged(IdRef) = .{}, cache: struct { bool_type: ?IdRef = null, void_type: ?IdRef = null, - int_types: std.AutoHashMapUnmanaged(std.builtin.Type.Int, IdRef) = .{}, - float_types: std.AutoHashMapUnmanaged(std.builtin.Type.Float, IdRef) = .{}, + int_types: std.AutoHashMapUnmanaged(std.builtin.Type.Int, IdRef) = .empty, + float_types: std.AutoHashMapUnmanaged(std.builtin.Type.Float, IdRef) = .empty, // This cache is required so that @Vector(X, u1) in direct representation has the // same ID as @Vector(X, bool) in indirect representation. - vector_types: std.AutoHashMapUnmanaged(struct { IdRef, u32 }, IdRef) = .{}, + vector_types: std.AutoHashMapUnmanaged(struct { IdRef, u32 }, IdRef) = .empty, - builtins: std.AutoHashMapUnmanaged(struct { IdRef, spec.BuiltIn }, Decl.Index) = .{}, + builtins: std.AutoHashMapUnmanaged(struct { IdRef, spec.BuiltIn }, Decl.Index) = .empty, } = .{}, /// Set of Decls, referred to by Decl.Index. -decls: std.ArrayListUnmanaged(Decl) = .{}, +decls: std.ArrayListUnmanaged(Decl) = .empty, /// List of dependencies, per decl. This list holds all the dependencies, sliced by the /// begin_dep and end_dep in `self.decls`. -decl_deps: std.ArrayListUnmanaged(Decl.Index) = .{}, +decl_deps: std.ArrayListUnmanaged(Decl.Index) = .empty, /// The list of entry points that should be exported from this module. -entry_points: std.ArrayListUnmanaged(EntryPoint) = .{}, +entry_points: std.ArrayListUnmanaged(EntryPoint) = .empty, /// The list of extended instruction sets that should be imported. -extended_instruction_set: std.AutoHashMapUnmanaged(spec.InstructionSet, IdRef) = .{}, +extended_instruction_set: std.AutoHashMapUnmanaged(spec.InstructionSet, IdRef) = .empty, pub fn init(gpa: Allocator) Module { return .{ diff --git a/src/codegen/spirv/Section.zig b/src/codegen/spirv/Section.zig index 20abf8ab70f6..1fdf884bdbed 100644 --- a/src/codegen/spirv/Section.zig +++ b/src/codegen/spirv/Section.zig @@ -15,7 +15,7 @@ const Opcode = spec.Opcode; /// The instructions in this section. Memory is owned by the Module /// externally associated to this Section. -instructions: std.ArrayListUnmanaged(Word) = .{}, +instructions: std.ArrayListUnmanaged(Word) = .empty, pub fn deinit(section: *Section, allocator: Allocator) void { section.instructions.deinit(allocator); diff --git a/src/link/C.zig b/src/link/C.zig index 7d00ad9028e0..a674b256b09c 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -26,34 +26,34 @@ base: link.File, /// This linker backend does not try to incrementally link output C source code. /// Instead, it tracks all declarations in this table, and iterates over it /// in the flush function, stitching pre-rendered pieces of C code together. -navs: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, AvBlock) = .{}, +navs: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, AvBlock) = .empty, /// All the string bytes of rendered C code, all squished into one array. /// While in progress, a separate buffer is used, and then when finished, the /// buffer is copied into this one. -string_bytes: std.ArrayListUnmanaged(u8) = .{}, +string_bytes: std.ArrayListUnmanaged(u8) = .empty, /// Tracks all the anonymous decls that are used by all the decls so they can /// be rendered during flush(). -uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, AvBlock) = .{}, +uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, AvBlock) = .empty, /// Sparse set of uavs that are overaligned. Underaligned anon decls are /// lowered the same as ABI-aligned anon decls. The keys here are a subset of /// the keys of `uavs`. -aligned_uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, Alignment) = .{}, +aligned_uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, Alignment) = .empty, -exported_navs: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, ExportedBlock) = .{}, -exported_uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, ExportedBlock) = .{}, +exported_navs: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, ExportedBlock) = .empty, +exported_uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, ExportedBlock) = .empty, /// Optimization, `updateDecl` reuses this buffer rather than creating a new /// one with every call. -fwd_decl_buf: std.ArrayListUnmanaged(u8) = .{}, +fwd_decl_buf: std.ArrayListUnmanaged(u8) = .empty, /// Optimization, `updateDecl` reuses this buffer rather than creating a new /// one with every call. -code_buf: std.ArrayListUnmanaged(u8) = .{}, +code_buf: std.ArrayListUnmanaged(u8) = .empty, /// Optimization, `flush` reuses this buffer rather than creating a new /// one with every call. -lazy_fwd_decl_buf: std.ArrayListUnmanaged(u8) = .{}, +lazy_fwd_decl_buf: std.ArrayListUnmanaged(u8) = .empty, /// Optimization, `flush` reuses this buffer rather than creating a new /// one with every call. -lazy_code_buf: std.ArrayListUnmanaged(u8) = .{}, +lazy_code_buf: std.ArrayListUnmanaged(u8) = .empty, /// A reference into `string_bytes`. const String = extern struct { @@ -469,7 +469,7 @@ pub fn flushModule(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: // `CType`s, forward decls, and non-functions first. { - var export_names: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{}; + var export_names: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void) = .empty; defer export_names.deinit(gpa); try export_names.ensureTotalCapacity(gpa, @intCast(zcu.single_exports.count())); for (zcu.single_exports.values()) |export_index| { @@ -559,16 +559,16 @@ pub fn flushModule(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: const Flush = struct { ctype_pool: codegen.CType.Pool, - ctype_global_from_decl_map: std.ArrayListUnmanaged(codegen.CType) = .{}, - ctypes_buf: std.ArrayListUnmanaged(u8) = .{}, + ctype_global_from_decl_map: std.ArrayListUnmanaged(codegen.CType) = .empty, + ctypes_buf: std.ArrayListUnmanaged(u8) = .empty, lazy_ctype_pool: codegen.CType.Pool, lazy_fns: LazyFns = .{}, - asm_buf: std.ArrayListUnmanaged(u8) = .{}, + asm_buf: std.ArrayListUnmanaged(u8) = .empty, /// We collect a list of buffers to write, and write them all at once with pwritev 😎 - all_buffers: std.ArrayListUnmanaged(std.posix.iovec_const) = .{}, + all_buffers: std.ArrayListUnmanaged(std.posix.iovec_const) = .empty, /// Keeps track of the total bytes of `all_buffers`. file_size: u64 = 0, diff --git a/src/link/Coff.zig b/src/link/Coff.zig index f67c7d54d798..24040cb2f852 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -26,7 +26,7 @@ repro: bool, ptr_width: PtrWidth, page_size: u32, -objects: std.ArrayListUnmanaged(Object) = .{}, +objects: std.ArrayListUnmanaged(Object) = .empty, sections: std.MultiArrayList(Section) = .{}, data_directories: [coff.IMAGE_NUMBEROF_DIRECTORY_ENTRIES]coff.ImageDataDirectory, @@ -38,14 +38,14 @@ data_section_index: ?u16 = null, reloc_section_index: ?u16 = null, idata_section_index: ?u16 = null, -locals: std.ArrayListUnmanaged(coff.Symbol) = .{}, -globals: std.ArrayListUnmanaged(SymbolWithLoc) = .{}, -resolver: std.StringHashMapUnmanaged(u32) = .{}, -unresolved: std.AutoArrayHashMapUnmanaged(u32, bool) = .{}, -need_got_table: std.AutoHashMapUnmanaged(u32, void) = .{}, +locals: std.ArrayListUnmanaged(coff.Symbol) = .empty, +globals: std.ArrayListUnmanaged(SymbolWithLoc) = .empty, +resolver: std.StringHashMapUnmanaged(u32) = .empty, +unresolved: std.AutoArrayHashMapUnmanaged(u32, bool) = .empty, +need_got_table: std.AutoHashMapUnmanaged(u32, void) = .empty, -locals_free_list: std.ArrayListUnmanaged(u32) = .{}, -globals_free_list: std.ArrayListUnmanaged(u32) = .{}, +locals_free_list: std.ArrayListUnmanaged(u32) = .empty, +globals_free_list: std.ArrayListUnmanaged(u32) = .empty, strtab: StringTable = .{}, strtab_offset: ?u32 = null, @@ -56,7 +56,7 @@ got_table: TableSection(SymbolWithLoc) = .{}, /// A table of ImportTables partitioned by the library name. /// Key is an offset into the interning string table `temp_strtab`. -import_tables: std.AutoArrayHashMapUnmanaged(u32, ImportTable) = .{}, +import_tables: std.AutoArrayHashMapUnmanaged(u32, ImportTable) = .empty, got_table_count_dirty: bool = true, got_table_contents_dirty: bool = true, @@ -69,10 +69,10 @@ lazy_syms: LazySymbolTable = .{}, navs: NavTable = .{}, /// List of atoms that are either synthetic or map directly to the Zig source program. -atoms: std.ArrayListUnmanaged(Atom) = .{}, +atoms: std.ArrayListUnmanaged(Atom) = .empty, /// Table of atoms indexed by the symbol index. -atom_by_index_table: std.AutoHashMapUnmanaged(u32, Atom.Index) = .{}, +atom_by_index_table: std.AutoHashMapUnmanaged(u32, Atom.Index) = .empty, uavs: UavTable = .{}, @@ -131,7 +131,7 @@ const Section = struct { /// overcapacity can be negative. A simple way to have negative overcapacity is to /// allocate a fresh atom, which will have ideal capacity, and then grow it /// by 1 byte. It will then have -1 overcapacity. - free_list: std.ArrayListUnmanaged(Atom.Index) = .{}, + free_list: std.ArrayListUnmanaged(Atom.Index) = .empty, }; const LazySymbolTable = std.AutoArrayHashMapUnmanaged(InternPool.Index, LazySymbolMetadata); @@ -148,7 +148,7 @@ const AvMetadata = struct { atom: Atom.Index, section: u16, /// A list of all exports aliases of this Decl. - exports: std.ArrayListUnmanaged(u32) = .{}, + exports: std.ArrayListUnmanaged(u32) = .empty, fn deinit(m: *AvMetadata, allocator: Allocator) void { m.exports.deinit(allocator); diff --git a/src/link/Coff/ImportTable.zig b/src/link/Coff/ImportTable.zig index c25851fe72c3..9ce00cf5ee65 100644 --- a/src/link/Coff/ImportTable.zig +++ b/src/link/Coff/ImportTable.zig @@ -26,9 +26,9 @@ //! DLL#2 name //! --- END -entries: std.ArrayListUnmanaged(SymbolWithLoc) = .{}, -free_list: std.ArrayListUnmanaged(u32) = .{}, -lookup: std.AutoHashMapUnmanaged(SymbolWithLoc, u32) = .{}, +entries: std.ArrayListUnmanaged(SymbolWithLoc) = .empty, +free_list: std.ArrayListUnmanaged(u32) = .empty, +lookup: std.AutoHashMapUnmanaged(SymbolWithLoc, u32) = .empty, pub fn deinit(itab: *ImportTable, allocator: Allocator) void { itab.entries.deinit(allocator); diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 333501b29f47..ba3abcf3308d 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -39,11 +39,11 @@ files: std.MultiArrayList(File.Entry) = .{}, /// Long-lived list of all file descriptors. /// We store them globally rather than per actual File so that we can re-use /// one file handle per every object file within an archive. -file_handles: std.ArrayListUnmanaged(File.Handle) = .{}, +file_handles: std.ArrayListUnmanaged(File.Handle) = .empty, zig_object_index: ?File.Index = null, linker_defined_index: ?File.Index = null, -objects: std.ArrayListUnmanaged(File.Index) = .{}, -shared_objects: std.ArrayListUnmanaged(File.Index) = .{}, +objects: std.ArrayListUnmanaged(File.Index) = .empty, +shared_objects: std.ArrayListUnmanaged(File.Index) = .empty, /// List of all output sections and their associated metadata. sections: std.MultiArrayList(Section) = .{}, @@ -52,7 +52,7 @@ shdr_table_offset: ?u64 = null, /// Stored in native-endian format, depending on target endianness needs to be bswapped on read/write. /// Same order as in the file. -phdrs: std.ArrayListUnmanaged(elf.Elf64_Phdr) = .{}, +phdrs: std.ArrayListUnmanaged(elf.Elf64_Phdr) = .empty, /// Special program headers /// PT_PHDR @@ -77,23 +77,23 @@ page_size: u32, default_sym_version: elf.Elf64_Versym, /// .shstrtab buffer -shstrtab: std.ArrayListUnmanaged(u8) = .{}, +shstrtab: std.ArrayListUnmanaged(u8) = .empty, /// .symtab buffer -symtab: std.ArrayListUnmanaged(elf.Elf64_Sym) = .{}, +symtab: std.ArrayListUnmanaged(elf.Elf64_Sym) = .empty, /// .strtab buffer -strtab: std.ArrayListUnmanaged(u8) = .{}, +strtab: std.ArrayListUnmanaged(u8) = .empty, /// Dynamic symbol table. Only populated and emitted when linking dynamically. dynsym: DynsymSection = .{}, /// .dynstrtab buffer -dynstrtab: std.ArrayListUnmanaged(u8) = .{}, +dynstrtab: std.ArrayListUnmanaged(u8) = .empty, /// Version symbol table. Only populated and emitted when linking dynamically. -versym: std.ArrayListUnmanaged(elf.Elf64_Versym) = .{}, +versym: std.ArrayListUnmanaged(elf.Elf64_Versym) = .empty, /// .verneed section verneed: VerneedSection = .{}, /// .got section got: GotSection = .{}, /// .rela.dyn section -rela_dyn: std.ArrayListUnmanaged(elf.Elf64_Rela) = .{}, +rela_dyn: std.ArrayListUnmanaged(elf.Elf64_Rela) = .empty, /// .dynamic section dynamic: DynamicSection = .{}, /// .hash section @@ -109,10 +109,10 @@ plt_got: PltGotSection = .{}, /// .copyrel section copy_rel: CopyRelSection = .{}, /// .rela.plt section -rela_plt: std.ArrayListUnmanaged(elf.Elf64_Rela) = .{}, +rela_plt: std.ArrayListUnmanaged(elf.Elf64_Rela) = .empty, /// SHT_GROUP sections /// Applies only to a relocatable. -comdat_group_sections: std.ArrayListUnmanaged(ComdatGroupSection) = .{}, +comdat_group_sections: std.ArrayListUnmanaged(ComdatGroupSection) = .empty, copy_rel_section_index: ?u32 = null, dynamic_section_index: ?u32 = null, @@ -143,10 +143,10 @@ has_text_reloc: bool = false, num_ifunc_dynrelocs: usize = 0, /// List of range extension thunks. -thunks: std.ArrayListUnmanaged(Thunk) = .{}, +thunks: std.ArrayListUnmanaged(Thunk) = .empty, /// List of output merge sections with deduped contents. -merge_sections: std.ArrayListUnmanaged(MergeSection) = .{}, +merge_sections: std.ArrayListUnmanaged(MergeSection) = .empty, first_eflags: ?elf.Elf64_Word = null, @@ -5487,9 +5487,9 @@ pub const Ref = struct { }; pub const SymbolResolver = struct { - keys: std.ArrayListUnmanaged(Key) = .{}, - values: std.ArrayListUnmanaged(Ref) = .{}, - table: std.AutoArrayHashMapUnmanaged(void, void) = .{}, + keys: std.ArrayListUnmanaged(Key) = .empty, + values: std.ArrayListUnmanaged(Ref) = .empty, + table: std.AutoArrayHashMapUnmanaged(void, void) = .empty, const Result = struct { found_existing: bool, @@ -5586,7 +5586,7 @@ const Section = struct { /// List of atoms contributing to this section. /// TODO currently this is only used for relocations tracking in relocatable mode /// but will be merged with atom_list_2. - atom_list: std.ArrayListUnmanaged(Ref) = .{}, + atom_list: std.ArrayListUnmanaged(Ref) = .empty, /// List of atoms contributing to this section. /// This can be used by sections that require special handling such as init/fini array, etc. @@ -5610,7 +5610,7 @@ const Section = struct { /// overcapacity can be negative. A simple way to have negative overcapacity is to /// allocate a fresh text block, which will have ideal capacity, and then grow it /// by 1 byte. It will then have -1 overcapacity. - free_list: std.ArrayListUnmanaged(Ref) = .{}, + free_list: std.ArrayListUnmanaged(Ref) = .empty, }; fn defaultEntrySymbolName(cpu_arch: std.Target.Cpu.Arch) []const u8 { diff --git a/src/link/Elf/Archive.zig b/src/link/Elf/Archive.zig index 030ddc13c645..8a9716103498 100644 --- a/src/link/Elf/Archive.zig +++ b/src/link/Elf/Archive.zig @@ -1,5 +1,5 @@ -objects: std.ArrayListUnmanaged(Object) = .{}, -strtab: std.ArrayListUnmanaged(u8) = .{}, +objects: std.ArrayListUnmanaged(Object) = .empty, +strtab: std.ArrayListUnmanaged(u8) = .empty, pub fn isArchive(path: []const u8) !bool { const file = try std.fs.cwd().openFile(path, .{}); @@ -127,7 +127,7 @@ const strtab_delimiter = '\n'; pub const max_member_name_len = 15; pub const ArSymtab = struct { - symtab: std.ArrayListUnmanaged(Entry) = .{}, + symtab: std.ArrayListUnmanaged(Entry) = .empty, strtab: StringTable = .{}, pub fn deinit(ar: *ArSymtab, allocator: Allocator) void { @@ -241,7 +241,7 @@ pub const ArSymtab = struct { }; pub const ArStrtab = struct { - buffer: std.ArrayListUnmanaged(u8) = .{}, + buffer: std.ArrayListUnmanaged(u8) = .empty, pub fn deinit(ar: *ArStrtab, allocator: Allocator) void { ar.buffer.deinit(allocator); diff --git a/src/link/Elf/AtomList.zig b/src/link/Elf/AtomList.zig index 51407ca6d977..5fc189337671 100644 --- a/src/link/Elf/AtomList.zig +++ b/src/link/Elf/AtomList.zig @@ -2,7 +2,7 @@ value: i64 = 0, size: u64 = 0, alignment: Atom.Alignment = .@"1", output_section_index: u32 = 0, -atoms: std.ArrayListUnmanaged(Elf.Ref) = .{}, +atoms: std.ArrayListUnmanaged(Elf.Ref) = .empty, pub fn deinit(list: *AtomList, allocator: Allocator) void { list.atoms.deinit(allocator); diff --git a/src/link/Elf/LdScript.zig b/src/link/Elf/LdScript.zig index 414ce035a48f..bf5efd213727 100644 --- a/src/link/Elf/LdScript.zig +++ b/src/link/Elf/LdScript.zig @@ -1,6 +1,6 @@ path: []const u8, cpu_arch: ?std.Target.Cpu.Arch = null, -args: std.ArrayListUnmanaged(Elf.SystemLib) = .{}, +args: std.ArrayListUnmanaged(Elf.SystemLib) = .empty, pub fn deinit(scr: *LdScript, allocator: Allocator) void { scr.args.deinit(allocator); diff --git a/src/link/Elf/LinkerDefined.zig b/src/link/Elf/LinkerDefined.zig index 131ed6ad715a..59aea19efa33 100644 --- a/src/link/Elf/LinkerDefined.zig +++ b/src/link/Elf/LinkerDefined.zig @@ -1,11 +1,11 @@ index: File.Index, -symtab: std.ArrayListUnmanaged(elf.Elf64_Sym) = .{}, -strtab: std.ArrayListUnmanaged(u8) = .{}, +symtab: std.ArrayListUnmanaged(elf.Elf64_Sym) = .empty, +strtab: std.ArrayListUnmanaged(u8) = .empty, -symbols: std.ArrayListUnmanaged(Symbol) = .{}, -symbols_extra: std.ArrayListUnmanaged(u32) = .{}, -symbols_resolver: std.ArrayListUnmanaged(Elf.SymbolResolver.Index) = .{}, +symbols: std.ArrayListUnmanaged(Symbol) = .empty, +symbols_extra: std.ArrayListUnmanaged(u32) = .empty, +symbols_resolver: std.ArrayListUnmanaged(Elf.SymbolResolver.Index) = .empty, entry_index: ?Symbol.Index = null, dynamic_index: ?Symbol.Index = null, @@ -24,7 +24,7 @@ dso_handle_index: ?Symbol.Index = null, rela_iplt_start_index: ?Symbol.Index = null, rela_iplt_end_index: ?Symbol.Index = null, global_pointer_index: ?Symbol.Index = null, -start_stop_indexes: std.ArrayListUnmanaged(u32) = .{}, +start_stop_indexes: std.ArrayListUnmanaged(u32) = .empty, output_symtab_ctx: Elf.SymtabCtx = .{}, diff --git a/src/link/Elf/Object.zig b/src/link/Elf/Object.zig index 18c7a91c8f0e..a7091b739423 100644 --- a/src/link/Elf/Object.zig +++ b/src/link/Elf/Object.zig @@ -4,29 +4,29 @@ file_handle: File.HandleIndex, index: File.Index, header: ?elf.Elf64_Ehdr = null, -shdrs: std.ArrayListUnmanaged(elf.Elf64_Shdr) = .{}, +shdrs: std.ArrayListUnmanaged(elf.Elf64_Shdr) = .empty, -symtab: std.ArrayListUnmanaged(elf.Elf64_Sym) = .{}, -strtab: std.ArrayListUnmanaged(u8) = .{}, +symtab: std.ArrayListUnmanaged(elf.Elf64_Sym) = .empty, +strtab: std.ArrayListUnmanaged(u8) = .empty, first_global: ?Symbol.Index = null, -symbols: std.ArrayListUnmanaged(Symbol) = .{}, -symbols_extra: std.ArrayListUnmanaged(u32) = .{}, -symbols_resolver: std.ArrayListUnmanaged(Elf.SymbolResolver.Index) = .{}, -relocs: std.ArrayListUnmanaged(elf.Elf64_Rela) = .{}, +symbols: std.ArrayListUnmanaged(Symbol) = .empty, +symbols_extra: std.ArrayListUnmanaged(u32) = .empty, +symbols_resolver: std.ArrayListUnmanaged(Elf.SymbolResolver.Index) = .empty, +relocs: std.ArrayListUnmanaged(elf.Elf64_Rela) = .empty, -atoms: std.ArrayListUnmanaged(Atom) = .{}, -atoms_indexes: std.ArrayListUnmanaged(Atom.Index) = .{}, -atoms_extra: std.ArrayListUnmanaged(u32) = .{}, +atoms: std.ArrayListUnmanaged(Atom) = .empty, +atoms_indexes: std.ArrayListUnmanaged(Atom.Index) = .empty, +atoms_extra: std.ArrayListUnmanaged(u32) = .empty, -comdat_groups: std.ArrayListUnmanaged(Elf.ComdatGroup) = .{}, -comdat_group_data: std.ArrayListUnmanaged(u32) = .{}, +comdat_groups: std.ArrayListUnmanaged(Elf.ComdatGroup) = .empty, +comdat_group_data: std.ArrayListUnmanaged(u32) = .empty, -input_merge_sections: std.ArrayListUnmanaged(InputMergeSection) = .{}, -input_merge_sections_indexes: std.ArrayListUnmanaged(InputMergeSection.Index) = .{}, +input_merge_sections: std.ArrayListUnmanaged(InputMergeSection) = .empty, +input_merge_sections_indexes: std.ArrayListUnmanaged(InputMergeSection.Index) = .empty, -fdes: std.ArrayListUnmanaged(Fde) = .{}, -cies: std.ArrayListUnmanaged(Cie) = .{}, -eh_frame_data: std.ArrayListUnmanaged(u8) = .{}, +fdes: std.ArrayListUnmanaged(Fde) = .empty, +cies: std.ArrayListUnmanaged(Cie) = .empty, +eh_frame_data: std.ArrayListUnmanaged(u8) = .empty, alive: bool = true, num_dynrelocs: u32 = 0, diff --git a/src/link/Elf/SharedObject.zig b/src/link/Elf/SharedObject.zig index 677e63ebafeb..3000af39ca06 100644 --- a/src/link/Elf/SharedObject.zig +++ b/src/link/Elf/SharedObject.zig @@ -2,20 +2,20 @@ path: []const u8, index: File.Index, header: ?elf.Elf64_Ehdr = null, -shdrs: std.ArrayListUnmanaged(elf.Elf64_Shdr) = .{}, +shdrs: std.ArrayListUnmanaged(elf.Elf64_Shdr) = .empty, -symtab: std.ArrayListUnmanaged(elf.Elf64_Sym) = .{}, -strtab: std.ArrayListUnmanaged(u8) = .{}, +symtab: std.ArrayListUnmanaged(elf.Elf64_Sym) = .empty, +strtab: std.ArrayListUnmanaged(u8) = .empty, /// Version symtab contains version strings of the symbols if present. -versyms: std.ArrayListUnmanaged(elf.Elf64_Versym) = .{}, -verstrings: std.ArrayListUnmanaged(u32) = .{}, +versyms: std.ArrayListUnmanaged(elf.Elf64_Versym) = .empty, +verstrings: std.ArrayListUnmanaged(u32) = .empty, -symbols: std.ArrayListUnmanaged(Symbol) = .{}, -symbols_extra: std.ArrayListUnmanaged(u32) = .{}, -symbols_resolver: std.ArrayListUnmanaged(Elf.SymbolResolver.Index) = .{}, +symbols: std.ArrayListUnmanaged(Symbol) = .empty, +symbols_extra: std.ArrayListUnmanaged(u32) = .empty, +symbols_resolver: std.ArrayListUnmanaged(Elf.SymbolResolver.Index) = .empty, aliases: ?std.ArrayListUnmanaged(u32) = null, -dynamic_table: std.ArrayListUnmanaged(elf.Elf64_Dyn) = .{}, +dynamic_table: std.ArrayListUnmanaged(elf.Elf64_Dyn) = .empty, needed: bool, alive: bool, diff --git a/src/link/Elf/Thunk.zig b/src/link/Elf/Thunk.zig index 389ba7ffed4c..23dc2f3b0b27 100644 --- a/src/link/Elf/Thunk.zig +++ b/src/link/Elf/Thunk.zig @@ -1,6 +1,6 @@ value: i64 = 0, output_section_index: u32 = 0, -symbols: std.AutoArrayHashMapUnmanaged(Elf.Ref, void) = .{}, +symbols: std.AutoArrayHashMapUnmanaged(Elf.Ref, void) = .empty, output_symtab_ctx: Elf.SymtabCtx = .{}, pub fn deinit(thunk: *Thunk, allocator: Allocator) void { diff --git a/src/link/Elf/ZigObject.zig b/src/link/Elf/ZigObject.zig index 98449d6a5be2..671049919d99 100644 --- a/src/link/Elf/ZigObject.zig +++ b/src/link/Elf/ZigObject.zig @@ -3,24 +3,24 @@ //! and any relocations that may have been emitted. //! Think about this as fake in-memory Object file for the Zig module. -data: std.ArrayListUnmanaged(u8) = .{}, +data: std.ArrayListUnmanaged(u8) = .empty, /// Externally owned memory. path: []const u8, index: File.Index, symtab: std.MultiArrayList(ElfSym) = .{}, strtab: StringTable = .{}, -symbols: std.ArrayListUnmanaged(Symbol) = .{}, -symbols_extra: std.ArrayListUnmanaged(u32) = .{}, -symbols_resolver: std.ArrayListUnmanaged(Elf.SymbolResolver.Index) = .{}, -local_symbols: std.ArrayListUnmanaged(Symbol.Index) = .{}, -global_symbols: std.ArrayListUnmanaged(Symbol.Index) = .{}, -globals_lookup: std.AutoHashMapUnmanaged(u32, Symbol.Index) = .{}, - -atoms: std.ArrayListUnmanaged(Atom) = .{}, -atoms_indexes: std.ArrayListUnmanaged(Atom.Index) = .{}, -atoms_extra: std.ArrayListUnmanaged(u32) = .{}, -relocs: std.ArrayListUnmanaged(std.ArrayListUnmanaged(elf.Elf64_Rela)) = .{}, +symbols: std.ArrayListUnmanaged(Symbol) = .empty, +symbols_extra: std.ArrayListUnmanaged(u32) = .empty, +symbols_resolver: std.ArrayListUnmanaged(Elf.SymbolResolver.Index) = .empty, +local_symbols: std.ArrayListUnmanaged(Symbol.Index) = .empty, +global_symbols: std.ArrayListUnmanaged(Symbol.Index) = .empty, +globals_lookup: std.AutoHashMapUnmanaged(u32, Symbol.Index) = .empty, + +atoms: std.ArrayListUnmanaged(Atom) = .empty, +atoms_indexes: std.ArrayListUnmanaged(Atom.Index) = .empty, +atoms_extra: std.ArrayListUnmanaged(u32) = .empty, +relocs: std.ArrayListUnmanaged(std.ArrayListUnmanaged(elf.Elf64_Rela)) = .empty, num_dynrelocs: u32 = 0, @@ -2313,7 +2313,7 @@ const LazySymbolMetadata = struct { const AvMetadata = struct { symbol_index: Symbol.Index, /// A list of all exports aliases of this Av. - exports: std.ArrayListUnmanaged(Symbol.Index) = .{}, + exports: std.ArrayListUnmanaged(Symbol.Index) = .empty, /// Set to true if the AV has been initialized and allocated. allocated: bool = false, diff --git a/src/link/Elf/merge_section.zig b/src/link/Elf/merge_section.zig index 6241e1aec967..cf6506a9ea80 100644 --- a/src/link/Elf/merge_section.zig +++ b/src/link/Elf/merge_section.zig @@ -7,15 +7,15 @@ pub const MergeSection = struct { type: u32 = 0, flags: u64 = 0, output_section_index: u32 = 0, - bytes: std.ArrayListUnmanaged(u8) = .{}, + bytes: std.ArrayListUnmanaged(u8) = .empty, table: std.HashMapUnmanaged( String, MergeSubsection.Index, IndexContext, std.hash_map.default_max_load_percentage, ) = .{}, - subsections: std.ArrayListUnmanaged(MergeSubsection) = .{}, - finalized_subsections: std.ArrayListUnmanaged(MergeSubsection.Index) = .{}, + subsections: std.ArrayListUnmanaged(MergeSubsection) = .empty, + finalized_subsections: std.ArrayListUnmanaged(MergeSubsection.Index) = .empty, pub fn deinit(msec: *MergeSection, allocator: Allocator) void { msec.bytes.deinit(allocator); @@ -276,10 +276,10 @@ pub const MergeSubsection = struct { pub const InputMergeSection = struct { merge_section_index: MergeSection.Index = 0, atom_index: Atom.Index = 0, - offsets: std.ArrayListUnmanaged(u32) = .{}, - subsections: std.ArrayListUnmanaged(MergeSubsection.Index) = .{}, - bytes: std.ArrayListUnmanaged(u8) = .{}, - strings: std.ArrayListUnmanaged(String) = .{}, + offsets: std.ArrayListUnmanaged(u32) = .empty, + subsections: std.ArrayListUnmanaged(MergeSubsection.Index) = .empty, + bytes: std.ArrayListUnmanaged(u8) = .empty, + strings: std.ArrayListUnmanaged(String) = .empty, pub fn deinit(imsec: *InputMergeSection, allocator: Allocator) void { imsec.offsets.deinit(allocator); diff --git a/src/link/Elf/synthetic_sections.zig b/src/link/Elf/synthetic_sections.zig index a159ba23c16b..f914bb8d8415 100644 --- a/src/link/Elf/synthetic_sections.zig +++ b/src/link/Elf/synthetic_sections.zig @@ -1,6 +1,6 @@ pub const DynamicSection = struct { soname: ?u32 = null, - needed: std.ArrayListUnmanaged(u32) = .{}, + needed: std.ArrayListUnmanaged(u32) = .empty, rpath: u32 = 0, pub fn deinit(dt: *DynamicSection, allocator: Allocator) void { @@ -226,7 +226,7 @@ pub const DynamicSection = struct { }; pub const GotSection = struct { - entries: std.ArrayListUnmanaged(Entry) = .{}, + entries: std.ArrayListUnmanaged(Entry) = .empty, output_symtab_ctx: Elf.SymtabCtx = .{}, tlsld_index: ?u32 = null, flags: Flags = .{}, @@ -629,7 +629,7 @@ pub const GotSection = struct { }; pub const PltSection = struct { - symbols: std.ArrayListUnmanaged(Elf.Ref) = .{}, + symbols: std.ArrayListUnmanaged(Elf.Ref) = .empty, output_symtab_ctx: Elf.SymtabCtx = .{}, pub fn deinit(plt: *PltSection, allocator: Allocator) void { @@ -883,7 +883,7 @@ pub const GotPltSection = struct { }; pub const PltGotSection = struct { - symbols: std.ArrayListUnmanaged(Elf.Ref) = .{}, + symbols: std.ArrayListUnmanaged(Elf.Ref) = .empty, output_symtab_ctx: Elf.SymtabCtx = .{}, pub fn deinit(plt_got: *PltGotSection, allocator: Allocator) void { @@ -994,7 +994,7 @@ pub const PltGotSection = struct { }; pub const CopyRelSection = struct { - symbols: std.ArrayListUnmanaged(Elf.Ref) = .{}, + symbols: std.ArrayListUnmanaged(Elf.Ref) = .empty, pub fn deinit(copy_rel: *CopyRelSection, allocator: Allocator) void { copy_rel.symbols.deinit(allocator); @@ -1072,7 +1072,7 @@ pub const CopyRelSection = struct { }; pub const DynsymSection = struct { - entries: std.ArrayListUnmanaged(Entry) = .{}, + entries: std.ArrayListUnmanaged(Entry) = .empty, pub const Entry = struct { /// Ref of the symbol which gets privilege of getting a dynamic treatment @@ -1156,7 +1156,7 @@ pub const DynsymSection = struct { }; pub const HashSection = struct { - buffer: std.ArrayListUnmanaged(u8) = .{}, + buffer: std.ArrayListUnmanaged(u8) = .empty, pub fn deinit(hs: *HashSection, allocator: Allocator) void { hs.buffer.deinit(allocator); @@ -1320,8 +1320,8 @@ pub const GnuHashSection = struct { }; pub const VerneedSection = struct { - verneed: std.ArrayListUnmanaged(elf.Elf64_Verneed) = .{}, - vernaux: std.ArrayListUnmanaged(elf.Elf64_Vernaux) = .{}, + verneed: std.ArrayListUnmanaged(elf.Elf64_Verneed) = .empty, + vernaux: std.ArrayListUnmanaged(elf.Elf64_Vernaux) = .empty, index: elf.Elf64_Versym = elf.VER_NDX_GLOBAL + 1, pub fn deinit(vern: *VerneedSection, allocator: Allocator) void { diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 27bfc9392ea6..42923ecfba20 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -13,21 +13,21 @@ files: std.MultiArrayList(File.Entry) = .{}, /// Long-lived list of all file descriptors. /// We store them globally rather than per actual File so that we can re-use /// one file handle per every object file within an archive. -file_handles: std.ArrayListUnmanaged(File.Handle) = .{}, +file_handles: std.ArrayListUnmanaged(File.Handle) = .empty, zig_object: ?File.Index = null, internal_object: ?File.Index = null, -objects: std.ArrayListUnmanaged(File.Index) = .{}, -dylibs: std.ArrayListUnmanaged(File.Index) = .{}, +objects: std.ArrayListUnmanaged(File.Index) = .empty, +dylibs: std.ArrayListUnmanaged(File.Index) = .empty, -segments: std.ArrayListUnmanaged(macho.segment_command_64) = .{}, +segments: std.ArrayListUnmanaged(macho.segment_command_64) = .empty, sections: std.MultiArrayList(Section) = .{}, resolver: SymbolResolver = .{}, /// This table will be populated after `scanRelocs` has run. /// Key is symbol index. -undefs: std.AutoArrayHashMapUnmanaged(SymbolResolver.Index, std.ArrayListUnmanaged(Ref)) = .{}, +undefs: std.AutoArrayHashMapUnmanaged(SymbolResolver.Index, std.ArrayListUnmanaged(Ref)) = .empty, undefs_mutex: std.Thread.Mutex = .{}, -dupes: std.AutoArrayHashMapUnmanaged(SymbolResolver.Index, std.ArrayListUnmanaged(File.Index)) = .{}, +dupes: std.AutoArrayHashMapUnmanaged(SymbolResolver.Index, std.ArrayListUnmanaged(File.Index)) = .empty, dupes_mutex: std.Thread.Mutex = .{}, dyld_info_cmd: macho.dyld_info_command = .{}, @@ -52,11 +52,11 @@ eh_frame_sect_index: ?u8 = null, unwind_info_sect_index: ?u8 = null, objc_stubs_sect_index: ?u8 = null, -thunks: std.ArrayListUnmanaged(Thunk) = .{}, +thunks: std.ArrayListUnmanaged(Thunk) = .empty, /// Output synthetic sections -symtab: std.ArrayListUnmanaged(macho.nlist_64) = .{}, -strtab: std.ArrayListUnmanaged(u8) = .{}, +symtab: std.ArrayListUnmanaged(macho.nlist_64) = .empty, +strtab: std.ArrayListUnmanaged(u8) = .empty, indsymtab: Indsymtab = .{}, got: GotSection = .{}, stubs: StubsSection = .{}, @@ -4041,19 +4041,19 @@ const default_entry_symbol_name = "_main"; const Section = struct { header: macho.section_64, segment_id: u8, - atoms: std.ArrayListUnmanaged(Ref) = .{}, - free_list: std.ArrayListUnmanaged(Atom.Index) = .{}, + atoms: std.ArrayListUnmanaged(Ref) = .empty, + free_list: std.ArrayListUnmanaged(Atom.Index) = .empty, last_atom_index: Atom.Index = 0, - thunks: std.ArrayListUnmanaged(Thunk.Index) = .{}, - out: std.ArrayListUnmanaged(u8) = .{}, - relocs: std.ArrayListUnmanaged(macho.relocation_info) = .{}, + thunks: std.ArrayListUnmanaged(Thunk.Index) = .empty, + out: std.ArrayListUnmanaged(u8) = .empty, + relocs: std.ArrayListUnmanaged(macho.relocation_info) = .empty, }; pub const LiteralPool = struct { - table: std.AutoArrayHashMapUnmanaged(void, void) = .{}, - keys: std.ArrayListUnmanaged(Key) = .{}, - values: std.ArrayListUnmanaged(MachO.Ref) = .{}, - data: std.ArrayListUnmanaged(u8) = .{}, + table: std.AutoArrayHashMapUnmanaged(void, void) = .empty, + keys: std.ArrayListUnmanaged(Key) = .empty, + values: std.ArrayListUnmanaged(MachO.Ref) = .empty, + data: std.ArrayListUnmanaged(u8) = .empty, pub fn deinit(lp: *LiteralPool, allocator: Allocator) void { lp.table.deinit(allocator); @@ -4480,9 +4480,9 @@ pub const Ref = struct { }; pub const SymbolResolver = struct { - keys: std.ArrayListUnmanaged(Key) = .{}, - values: std.ArrayListUnmanaged(Ref) = .{}, - table: std.AutoArrayHashMapUnmanaged(void, void) = .{}, + keys: std.ArrayListUnmanaged(Key) = .empty, + values: std.ArrayListUnmanaged(Ref) = .empty, + table: std.AutoArrayHashMapUnmanaged(void, void) = .empty, const Result = struct { found_existing: bool, diff --git a/src/link/MachO/Archive.zig b/src/link/MachO/Archive.zig index a1c8b84deb96..bb589dfb6619 100644 --- a/src/link/MachO/Archive.zig +++ b/src/link/MachO/Archive.zig @@ -1,4 +1,4 @@ -objects: std.ArrayListUnmanaged(Object) = .{}, +objects: std.ArrayListUnmanaged(Object) = .empty, pub fn deinit(self: *Archive, allocator: Allocator) void { self.objects.deinit(allocator); @@ -181,7 +181,7 @@ pub const ar_hdr = extern struct { }; pub const ArSymtab = struct { - entries: std.ArrayListUnmanaged(Entry) = .{}, + entries: std.ArrayListUnmanaged(Entry) = .empty, strtab: StringTable = .{}, pub fn deinit(ar: *ArSymtab, allocator: Allocator) void { diff --git a/src/link/MachO/CodeSignature.zig b/src/link/MachO/CodeSignature.zig index 0b9c12204f51..c8a092eab6f9 100644 --- a/src/link/MachO/CodeSignature.zig +++ b/src/link/MachO/CodeSignature.zig @@ -53,7 +53,7 @@ const CodeDirectory = struct { inner: macho.CodeDirectory, ident: []const u8, special_slots: [n_special_slots][hash_size]u8, - code_slots: std.ArrayListUnmanaged([hash_size]u8) = .{}, + code_slots: std.ArrayListUnmanaged([hash_size]u8) = .empty, const n_special_slots: usize = 7; diff --git a/src/link/MachO/DebugSymbols.zig b/src/link/MachO/DebugSymbols.zig index 7d00c413c2e0..093a202fd4f9 100644 --- a/src/link/MachO/DebugSymbols.zig +++ b/src/link/MachO/DebugSymbols.zig @@ -4,8 +4,8 @@ file: fs.File, symtab_cmd: macho.symtab_command = .{}, uuid_cmd: macho.uuid_command = .{ .uuid = [_]u8{0} ** 16 }, -segments: std.ArrayListUnmanaged(macho.segment_command_64) = .{}, -sections: std.ArrayListUnmanaged(macho.section_64) = .{}, +segments: std.ArrayListUnmanaged(macho.segment_command_64) = .empty, +sections: std.ArrayListUnmanaged(macho.section_64) = .empty, dwarf_segment_cmd_index: ?u8 = null, linkedit_segment_cmd_index: ?u8 = null, @@ -19,11 +19,11 @@ debug_line_str_section_index: ?u8 = null, debug_loclists_section_index: ?u8 = null, debug_rnglists_section_index: ?u8 = null, -relocs: std.ArrayListUnmanaged(Reloc) = .{}, +relocs: std.ArrayListUnmanaged(Reloc) = .empty, /// Output synthetic sections -symtab: std.ArrayListUnmanaged(macho.nlist_64) = .{}, -strtab: std.ArrayListUnmanaged(u8) = .{}, +symtab: std.ArrayListUnmanaged(macho.nlist_64) = .empty, +strtab: std.ArrayListUnmanaged(u8) = .empty, pub const Reloc = struct { type: enum { diff --git a/src/link/MachO/Dylib.zig b/src/link/MachO/Dylib.zig index f5ed166ee0e2..9852cfb234be 100644 --- a/src/link/MachO/Dylib.zig +++ b/src/link/MachO/Dylib.zig @@ -6,15 +6,15 @@ file_handle: File.HandleIndex, tag: enum { dylib, tbd }, exports: std.MultiArrayList(Export) = .{}, -strtab: std.ArrayListUnmanaged(u8) = .{}, +strtab: std.ArrayListUnmanaged(u8) = .empty, id: ?Id = null, ordinal: u16 = 0, -symbols: std.ArrayListUnmanaged(Symbol) = .{}, -symbols_extra: std.ArrayListUnmanaged(u32) = .{}, -globals: std.ArrayListUnmanaged(MachO.SymbolResolver.Index) = .{}, -dependents: std.ArrayListUnmanaged(Id) = .{}, -rpaths: std.StringArrayHashMapUnmanaged(void) = .{}, +symbols: std.ArrayListUnmanaged(Symbol) = .empty, +symbols_extra: std.ArrayListUnmanaged(u32) = .empty, +globals: std.ArrayListUnmanaged(MachO.SymbolResolver.Index) = .empty, +dependents: std.ArrayListUnmanaged(Id) = .empty, +rpaths: std.StringArrayHashMapUnmanaged(void) = .empty, umbrella: File.Index, platform: ?MachO.Platform = null, @@ -742,7 +742,7 @@ pub const TargetMatcher = struct { allocator: Allocator, cpu_arch: std.Target.Cpu.Arch, platform: macho.PLATFORM, - target_strings: std.ArrayListUnmanaged([]const u8) = .{}, + target_strings: std.ArrayListUnmanaged([]const u8) = .empty, pub fn init(allocator: Allocator, cpu_arch: std.Target.Cpu.Arch, platform: macho.PLATFORM) !TargetMatcher { var self = TargetMatcher{ diff --git a/src/link/MachO/InternalObject.zig b/src/link/MachO/InternalObject.zig index 4054429ef867..ed7a05b02300 100644 --- a/src/link/MachO/InternalObject.zig +++ b/src/link/MachO/InternalObject.zig @@ -1,19 +1,19 @@ index: File.Index, sections: std.MultiArrayList(Section) = .{}, -atoms: std.ArrayListUnmanaged(Atom) = .{}, -atoms_indexes: std.ArrayListUnmanaged(Atom.Index) = .{}, -atoms_extra: std.ArrayListUnmanaged(u32) = .{}, -symtab: std.ArrayListUnmanaged(macho.nlist_64) = .{}, -strtab: std.ArrayListUnmanaged(u8) = .{}, -symbols: std.ArrayListUnmanaged(Symbol) = .{}, -symbols_extra: std.ArrayListUnmanaged(u32) = .{}, -globals: std.ArrayListUnmanaged(MachO.SymbolResolver.Index) = .{}, - -objc_methnames: std.ArrayListUnmanaged(u8) = .{}, +atoms: std.ArrayListUnmanaged(Atom) = .empty, +atoms_indexes: std.ArrayListUnmanaged(Atom.Index) = .empty, +atoms_extra: std.ArrayListUnmanaged(u32) = .empty, +symtab: std.ArrayListUnmanaged(macho.nlist_64) = .empty, +strtab: std.ArrayListUnmanaged(u8) = .empty, +symbols: std.ArrayListUnmanaged(Symbol) = .empty, +symbols_extra: std.ArrayListUnmanaged(u32) = .empty, +globals: std.ArrayListUnmanaged(MachO.SymbolResolver.Index) = .empty, + +objc_methnames: std.ArrayListUnmanaged(u8) = .empty, objc_selrefs: [@sizeOf(u64)]u8 = [_]u8{0} ** @sizeOf(u64), -force_undefined: std.ArrayListUnmanaged(Symbol.Index) = .{}, +force_undefined: std.ArrayListUnmanaged(Symbol.Index) = .empty, entry_index: ?Symbol.Index = null, dyld_stub_binder_index: ?Symbol.Index = null, dyld_private_index: ?Symbol.Index = null, @@ -21,7 +21,7 @@ objc_msg_send_index: ?Symbol.Index = null, mh_execute_header_index: ?Symbol.Index = null, mh_dylib_header_index: ?Symbol.Index = null, dso_handle_index: ?Symbol.Index = null, -boundary_symbols: std.ArrayListUnmanaged(Symbol.Index) = .{}, +boundary_symbols: std.ArrayListUnmanaged(Symbol.Index) = .empty, output_symtab_ctx: MachO.SymtabCtx = .{}, @@ -849,7 +849,7 @@ fn formatSymtab( const Section = struct { header: macho.section_64, - relocs: std.ArrayListUnmanaged(Relocation) = .{}, + relocs: std.ArrayListUnmanaged(Relocation) = .empty, extra: Extra = .{}, const Extra = packed struct { diff --git a/src/link/MachO/Object.zig b/src/link/MachO/Object.zig index 4d2662a83800..81f28de65a9d 100644 --- a/src/link/MachO/Object.zig +++ b/src/link/MachO/Object.zig @@ -9,27 +9,27 @@ in_archive: ?InArchive = null, header: ?macho.mach_header_64 = null, sections: std.MultiArrayList(Section) = .{}, symtab: std.MultiArrayList(Nlist) = .{}, -strtab: std.ArrayListUnmanaged(u8) = .{}, +strtab: std.ArrayListUnmanaged(u8) = .empty, -symbols: std.ArrayListUnmanaged(Symbol) = .{}, -symbols_extra: std.ArrayListUnmanaged(u32) = .{}, -globals: std.ArrayListUnmanaged(MachO.SymbolResolver.Index) = .{}, -atoms: std.ArrayListUnmanaged(Atom) = .{}, -atoms_indexes: std.ArrayListUnmanaged(Atom.Index) = .{}, -atoms_extra: std.ArrayListUnmanaged(u32) = .{}, +symbols: std.ArrayListUnmanaged(Symbol) = .empty, +symbols_extra: std.ArrayListUnmanaged(u32) = .empty, +globals: std.ArrayListUnmanaged(MachO.SymbolResolver.Index) = .empty, +atoms: std.ArrayListUnmanaged(Atom) = .empty, +atoms_indexes: std.ArrayListUnmanaged(Atom.Index) = .empty, +atoms_extra: std.ArrayListUnmanaged(u32) = .empty, platform: ?MachO.Platform = null, compile_unit: ?CompileUnit = null, -stab_files: std.ArrayListUnmanaged(StabFile) = .{}, +stab_files: std.ArrayListUnmanaged(StabFile) = .empty, eh_frame_sect_index: ?u8 = null, compact_unwind_sect_index: ?u8 = null, -cies: std.ArrayListUnmanaged(Cie) = .{}, -fdes: std.ArrayListUnmanaged(Fde) = .{}, -eh_frame_data: std.ArrayListUnmanaged(u8) = .{}, -unwind_records: std.ArrayListUnmanaged(UnwindInfo.Record) = .{}, -unwind_records_indexes: std.ArrayListUnmanaged(UnwindInfo.Record.Index) = .{}, -data_in_code: std.ArrayListUnmanaged(macho.data_in_code_entry) = .{}, +cies: std.ArrayListUnmanaged(Cie) = .empty, +fdes: std.ArrayListUnmanaged(Fde) = .empty, +eh_frame_data: std.ArrayListUnmanaged(u8) = .empty, +unwind_records: std.ArrayListUnmanaged(UnwindInfo.Record) = .empty, +unwind_records_indexes: std.ArrayListUnmanaged(UnwindInfo.Record.Index) = .empty, +data_in_code: std.ArrayListUnmanaged(macho.data_in_code_entry) = .empty, alive: bool = true, hidden: bool = false, @@ -2675,8 +2675,8 @@ fn formatPath( const Section = struct { header: macho.section_64, - subsections: std.ArrayListUnmanaged(Subsection) = .{}, - relocs: std.ArrayListUnmanaged(Relocation) = .{}, + subsections: std.ArrayListUnmanaged(Subsection) = .empty, + relocs: std.ArrayListUnmanaged(Relocation) = .empty, }; const Subsection = struct { @@ -2692,7 +2692,7 @@ pub const Nlist = struct { const StabFile = struct { comp_dir: u32, - stabs: std.ArrayListUnmanaged(Stab) = .{}, + stabs: std.ArrayListUnmanaged(Stab) = .empty, fn getCompDir(sf: StabFile, object: Object) [:0]const u8 { const nlist = object.symtab.items(.nlist)[sf.comp_dir]; diff --git a/src/link/MachO/Thunk.zig b/src/link/MachO/Thunk.zig index 4a76a408edec..d720d4fd2558 100644 --- a/src/link/MachO/Thunk.zig +++ b/src/link/MachO/Thunk.zig @@ -1,6 +1,6 @@ value: u64 = 0, out_n_sect: u8 = 0, -symbols: std.AutoArrayHashMapUnmanaged(MachO.Ref, void) = .{}, +symbols: std.AutoArrayHashMapUnmanaged(MachO.Ref, void) = .empty, output_symtab_ctx: MachO.SymtabCtx = .{}, pub fn deinit(thunk: *Thunk, allocator: Allocator) void { diff --git a/src/link/MachO/UnwindInfo.zig b/src/link/MachO/UnwindInfo.zig index 42172b851873..cf8a49bed129 100644 --- a/src/link/MachO/UnwindInfo.zig +++ b/src/link/MachO/UnwindInfo.zig @@ -1,6 +1,6 @@ /// List of all unwind records gathered from all objects and sorted /// by allocated relative function address within the section. -records: std.ArrayListUnmanaged(Record.Ref) = .{}, +records: std.ArrayListUnmanaged(Record.Ref) = .empty, /// List of all personalities referenced by either unwind info entries /// or __eh_frame entries. @@ -12,11 +12,11 @@ common_encodings: [max_common_encodings]Encoding = undefined, common_encodings_count: u7 = 0, /// List of record indexes containing an LSDA pointer. -lsdas: std.ArrayListUnmanaged(u32) = .{}, -lsdas_lookup: std.ArrayListUnmanaged(u32) = .{}, +lsdas: std.ArrayListUnmanaged(u32) = .empty, +lsdas_lookup: std.ArrayListUnmanaged(u32) = .empty, /// List of second level pages. -pages: std.ArrayListUnmanaged(Page) = .{}, +pages: std.ArrayListUnmanaged(Page) = .empty, pub fn deinit(info: *UnwindInfo, allocator: Allocator) void { info.records.deinit(allocator); diff --git a/src/link/MachO/ZigObject.zig b/src/link/MachO/ZigObject.zig index 3ffa9c474581..a2d578845c1b 100644 --- a/src/link/MachO/ZigObject.zig +++ b/src/link/MachO/ZigObject.zig @@ -1,4 +1,4 @@ -data: std.ArrayListUnmanaged(u8) = .{}, +data: std.ArrayListUnmanaged(u8) = .empty, /// Externally owned memory. path: []const u8, index: File.Index, @@ -6,15 +6,15 @@ index: File.Index, symtab: std.MultiArrayList(Nlist) = .{}, strtab: StringTable = .{}, -symbols: std.ArrayListUnmanaged(Symbol) = .{}, -symbols_extra: std.ArrayListUnmanaged(u32) = .{}, -globals: std.ArrayListUnmanaged(MachO.SymbolResolver.Index) = .{}, +symbols: std.ArrayListUnmanaged(Symbol) = .empty, +symbols_extra: std.ArrayListUnmanaged(u32) = .empty, +globals: std.ArrayListUnmanaged(MachO.SymbolResolver.Index) = .empty, /// Maps string index (so name) into nlist index for the global symbol defined within this /// module. -globals_lookup: std.AutoHashMapUnmanaged(u32, u32) = .{}, -atoms: std.ArrayListUnmanaged(Atom) = .{}, -atoms_indexes: std.ArrayListUnmanaged(Atom.Index) = .{}, -atoms_extra: std.ArrayListUnmanaged(u32) = .{}, +globals_lookup: std.AutoHashMapUnmanaged(u32, u32) = .empty, +atoms: std.ArrayListUnmanaged(Atom) = .empty, +atoms_indexes: std.ArrayListUnmanaged(Atom.Index) = .empty, +atoms_extra: std.ArrayListUnmanaged(u32) = .empty, /// Table of tracked LazySymbols. lazy_syms: LazySymbolTable = .{}, @@ -1786,7 +1786,7 @@ fn formatAtoms( const AvMetadata = struct { symbol_index: Symbol.Index, /// A list of all exports aliases of this Av. - exports: std.ArrayListUnmanaged(Symbol.Index) = .{}, + exports: std.ArrayListUnmanaged(Symbol.Index) = .empty, fn @"export"(m: AvMetadata, zig_object: *ZigObject, name: []const u8) ?*u32 { for (m.exports.items) |*exp| { diff --git a/src/link/MachO/dyld_info/Rebase.zig b/src/link/MachO/dyld_info/Rebase.zig index 2bee8ad22caf..cbd0461431b6 100644 --- a/src/link/MachO/dyld_info/Rebase.zig +++ b/src/link/MachO/dyld_info/Rebase.zig @@ -1,5 +1,5 @@ -entries: std.ArrayListUnmanaged(Entry) = .{}, -buffer: std.ArrayListUnmanaged(u8) = .{}, +entries: std.ArrayListUnmanaged(Entry) = .empty, +buffer: std.ArrayListUnmanaged(u8) = .empty, pub const Entry = struct { offset: u64, diff --git a/src/link/MachO/dyld_info/Trie.zig b/src/link/MachO/dyld_info/Trie.zig index aed7b61df872..b45651eb67d8 100644 --- a/src/link/MachO/dyld_info/Trie.zig +++ b/src/link/MachO/dyld_info/Trie.zig @@ -31,9 +31,9 @@ /// The root node of the trie. root: ?Node.Index = null, -buffer: std.ArrayListUnmanaged(u8) = .{}, +buffer: std.ArrayListUnmanaged(u8) = .empty, nodes: std.MultiArrayList(Node) = .{}, -edges: std.ArrayListUnmanaged(Edge) = .{}, +edges: std.ArrayListUnmanaged(Edge) = .empty, /// Insert a symbol into the trie, updating the prefixes in the process. /// This operation may change the layout of the trie by splicing edges in @@ -317,7 +317,7 @@ const Node = struct { trie_offset: u32 = 0, /// List of all edges originating from this node. - edges: std.ArrayListUnmanaged(Edge.Index) = .{}, + edges: std.ArrayListUnmanaged(Edge.Index) = .empty, const Index = u32; }; diff --git a/src/link/MachO/dyld_info/bind.zig b/src/link/MachO/dyld_info/bind.zig index 310118af4138..328d6a402cda 100644 --- a/src/link/MachO/dyld_info/bind.zig +++ b/src/link/MachO/dyld_info/bind.zig @@ -17,8 +17,8 @@ pub const Entry = struct { }; pub const Bind = struct { - entries: std.ArrayListUnmanaged(Entry) = .{}, - buffer: std.ArrayListUnmanaged(u8) = .{}, + entries: std.ArrayListUnmanaged(Entry) = .empty, + buffer: std.ArrayListUnmanaged(u8) = .empty, const Self = @This(); @@ -269,8 +269,8 @@ pub const Bind = struct { }; pub const WeakBind = struct { - entries: std.ArrayListUnmanaged(Entry) = .{}, - buffer: std.ArrayListUnmanaged(u8) = .{}, + entries: std.ArrayListUnmanaged(Entry) = .empty, + buffer: std.ArrayListUnmanaged(u8) = .empty, const Self = @This(); @@ -511,9 +511,9 @@ pub const WeakBind = struct { }; pub const LazyBind = struct { - entries: std.ArrayListUnmanaged(Entry) = .{}, - buffer: std.ArrayListUnmanaged(u8) = .{}, - offsets: std.ArrayListUnmanaged(u32) = .{}, + entries: std.ArrayListUnmanaged(Entry) = .empty, + buffer: std.ArrayListUnmanaged(u8) = .empty, + offsets: std.ArrayListUnmanaged(u32) = .empty, const Self = @This(); diff --git a/src/link/MachO/synthetic.zig b/src/link/MachO/synthetic.zig index 5c7ede387d08..900316a76908 100644 --- a/src/link/MachO/synthetic.zig +++ b/src/link/MachO/synthetic.zig @@ -1,5 +1,5 @@ pub const GotSection = struct { - symbols: std.ArrayListUnmanaged(MachO.Ref) = .{}, + symbols: std.ArrayListUnmanaged(MachO.Ref) = .empty, pub const Index = u32; @@ -68,7 +68,7 @@ pub const GotSection = struct { }; pub const StubsSection = struct { - symbols: std.ArrayListUnmanaged(MachO.Ref) = .{}, + symbols: std.ArrayListUnmanaged(MachO.Ref) = .empty, pub const Index = u32; @@ -316,7 +316,7 @@ pub const LaSymbolPtrSection = struct { }; pub const TlvPtrSection = struct { - symbols: std.ArrayListUnmanaged(MachO.Ref) = .{}, + symbols: std.ArrayListUnmanaged(MachO.Ref) = .empty, pub const Index = u32; @@ -388,7 +388,7 @@ pub const TlvPtrSection = struct { }; pub const ObjcStubsSection = struct { - symbols: std.ArrayListUnmanaged(MachO.Ref) = .{}, + symbols: std.ArrayListUnmanaged(MachO.Ref) = .empty, pub fn deinit(objc: *ObjcStubsSection, allocator: Allocator) void { objc.symbols.deinit(allocator); @@ -548,7 +548,7 @@ pub const Indsymtab = struct { }; pub const DataInCode = struct { - entries: std.ArrayListUnmanaged(Entry) = .{}, + entries: std.ArrayListUnmanaged(Entry) = .empty, pub fn deinit(dice: *DataInCode, allocator: Allocator) void { dice.entries.deinit(allocator); diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index 7737c22d05fa..413ab7372ac9 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -34,13 +34,13 @@ bases: Bases, /// Does not represent the order or amount of symbols in the file /// it is just useful for storing symbols. Some other symbols are in /// file_segments. -syms: std.ArrayListUnmanaged(aout.Sym) = .{}, +syms: std.ArrayListUnmanaged(aout.Sym) = .empty, /// The plan9 a.out format requires segments of /// filenames to be deduplicated, so we use this map to /// de duplicate it. The value is the value of the path /// component -file_segments: std.StringArrayHashMapUnmanaged(u16) = .{}, +file_segments: std.StringArrayHashMapUnmanaged(u16) = .empty, /// The value of a 'f' symbol increments by 1 every time, so that no 2 'f' /// symbols have the same value. file_segments_i: u16 = 1, @@ -54,19 +54,19 @@ path_arena: std.heap.ArenaAllocator, /// If we group the decls by file, it makes it really easy to do this (put the symbol in the correct place) fn_nav_table: std.AutoArrayHashMapUnmanaged( Zcu.File.Index, - struct { sym_index: u32, functions: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, FnNavOutput) = .{} }, + struct { sym_index: u32, functions: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, FnNavOutput) = .empty }, ) = .{}, /// the code is modified when relocated, so that is why it is mutable -data_nav_table: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, []u8) = .{}, +data_nav_table: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, []u8) = .empty, /// When `updateExports` is called, we store the export indices here, to be used /// during flush. -nav_exports: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, []u32) = .{}, +nav_exports: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, []u32) = .empty, lazy_syms: LazySymbolTable = .{}, -uavs: std.AutoHashMapUnmanaged(InternPool.Index, Atom.Index) = .{}, +uavs: std.AutoHashMapUnmanaged(InternPool.Index, Atom.Index) = .empty, -relocs: std.AutoHashMapUnmanaged(Atom.Index, std.ArrayListUnmanaged(Reloc)) = .{}, +relocs: std.AutoHashMapUnmanaged(Atom.Index, std.ArrayListUnmanaged(Reloc)) = .empty, hdr: aout.ExecHdr = undefined, // relocs: std. @@ -77,12 +77,12 @@ entry_val: ?u64 = null, got_len: usize = 0, // A list of all the free got indexes, so when making a new decl // don't make a new one, just use one from here. -got_index_free_list: std.ArrayListUnmanaged(usize) = .{}, +got_index_free_list: std.ArrayListUnmanaged(usize) = .empty, -syms_index_free_list: std.ArrayListUnmanaged(usize) = .{}, +syms_index_free_list: std.ArrayListUnmanaged(usize) = .empty, -atoms: std.ArrayListUnmanaged(Atom) = .{}, -navs: std.AutoHashMapUnmanaged(InternPool.Nav.Index, NavMetadata) = .{}, +atoms: std.ArrayListUnmanaged(Atom) = .empty, +navs: std.AutoHashMapUnmanaged(InternPool.Nav.Index, NavMetadata) = .empty, /// Indices of the three "special" symbols into atoms etext_edata_end_atom_indices: [3]?Atom.Index = .{ null, null, null }, @@ -220,7 +220,7 @@ pub const DebugInfoOutput = struct { const NavMetadata = struct { index: Atom.Index, - exports: std.ArrayListUnmanaged(usize) = .{}, + exports: std.ArrayListUnmanaged(usize) = .empty, fn getExport(m: NavMetadata, p9: *const Plan9, name: []const u8) ?usize { for (m.exports.items) |exp| { diff --git a/src/link/SpirV/BinaryModule.zig b/src/link/SpirV/BinaryModule.zig index 648e55f2caf4..c80bf9b06ae0 100644 --- a/src/link/SpirV/BinaryModule.zig +++ b/src/link/SpirV/BinaryModule.zig @@ -148,7 +148,7 @@ pub const Parser = struct { a: Allocator, /// Maps (instruction set, opcode) => instruction index (for instruction set) - opcode_table: std.AutoHashMapUnmanaged(u32, u16) = .{}, + opcode_table: std.AutoHashMapUnmanaged(u32, u16) = .empty, pub fn init(a: Allocator) !Parser { var self = Parser{ diff --git a/src/link/SpirV/deduplicate.zig b/src/link/SpirV/deduplicate.zig index 292ff0e86821..f639644f7b19 100644 --- a/src/link/SpirV/deduplicate.zig +++ b/src/link/SpirV/deduplicate.zig @@ -178,8 +178,8 @@ const ModuleInfo = struct { const EntityContext = struct { a: Allocator, - ptr_map_a: std.AutoArrayHashMapUnmanaged(ResultId, void) = .{}, - ptr_map_b: std.AutoArrayHashMapUnmanaged(ResultId, void) = .{}, + ptr_map_a: std.AutoArrayHashMapUnmanaged(ResultId, void) = .empty, + ptr_map_b: std.AutoArrayHashMapUnmanaged(ResultId, void) = .empty, info: *const ModuleInfo, binary: *const BinaryModule, diff --git a/src/link/SpirV/lower_invocation_globals.zig b/src/link/SpirV/lower_invocation_globals.zig index 111ec2621bca..a06a868e1877 100644 --- a/src/link/SpirV/lower_invocation_globals.zig +++ b/src/link/SpirV/lower_invocation_globals.zig @@ -342,9 +342,9 @@ const ModuleBuilder = struct { entry_point_new_id_base: u32, /// A set of all function types in the new program. SPIR-V mandates that these are unique, /// and until a general type deduplication pass is programmed, we just handle it here via this. - function_types: std.ArrayHashMapUnmanaged(FunctionType, ResultId, FunctionType.Context, true) = .{}, + function_types: std.ArrayHashMapUnmanaged(FunctionType, ResultId, FunctionType.Context, true) = .empty, /// Maps functions to new information required for creating the module - function_new_info: std.AutoArrayHashMapUnmanaged(ResultId, FunctionNewInfo) = .{}, + function_new_info: std.AutoArrayHashMapUnmanaged(ResultId, FunctionNewInfo) = .empty, /// Offset of the functions section in the new binary. new_functions_section: ?usize, diff --git a/src/link/StringTable.zig b/src/link/StringTable.zig index 2375bf44491b..b03e025ff006 100644 --- a/src/link/StringTable.zig +++ b/src/link/StringTable.zig @@ -1,5 +1,5 @@ -buffer: std.ArrayListUnmanaged(u8) = .{}, -table: std.HashMapUnmanaged(u32, void, StringIndexContext, std.hash_map.default_max_load_percentage) = .{}, +buffer: std.ArrayListUnmanaged(u8) = .empty, +table: std.HashMapUnmanaged(u32, void, StringIndexContext, std.hash_map.default_max_load_percentage) = .empty, pub fn deinit(self: *Self, gpa: Allocator) void { self.buffer.deinit(gpa); diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 23425a2e7c6e..7d97fb4c7d8e 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -72,11 +72,11 @@ files: std.MultiArrayList(File.Entry) = .{}, /// TODO: Allow setting this through a flag? host_name: []const u8 = "env", /// List of symbols generated by the linker. -synthetic_symbols: std.ArrayListUnmanaged(Symbol) = .{}, +synthetic_symbols: std.ArrayListUnmanaged(Symbol) = .empty, /// Maps atoms to their segment index -atoms: std.AutoHashMapUnmanaged(u32, Atom.Index) = .{}, +atoms: std.AutoHashMapUnmanaged(u32, Atom.Index) = .empty, /// List of all atoms. -managed_atoms: std.ArrayListUnmanaged(Atom) = .{}, +managed_atoms: std.ArrayListUnmanaged(Atom) = .empty, /// Represents the index into `segments` where the 'code' section /// lives. code_section_index: ?u32 = null, @@ -106,22 +106,22 @@ imported_globals_count: u32 = 0, /// to the table indexes when sections are merged. imported_tables_count: u32 = 0, /// Map of symbol locations, represented by its `types.Import` -imports: std.AutoHashMapUnmanaged(SymbolLoc, types.Import) = .{}, +imports: std.AutoHashMapUnmanaged(SymbolLoc, types.Import) = .empty, /// Represents non-synthetic section entries. /// Used for code, data and custom sections. -segments: std.ArrayListUnmanaged(Segment) = .{}, +segments: std.ArrayListUnmanaged(Segment) = .empty, /// Maps a data segment key (such as .rodata) to the index into `segments`. -data_segments: std.StringArrayHashMapUnmanaged(u32) = .{}, +data_segments: std.StringArrayHashMapUnmanaged(u32) = .empty, /// A table of `types.Segment` which provide meta data /// about a data symbol such as its name where the key is /// the segment index, which can be found from `data_segments` -segment_info: std.AutoArrayHashMapUnmanaged(u32, types.Segment) = .{}, +segment_info: std.AutoArrayHashMapUnmanaged(u32, types.Segment) = .empty, /// Deduplicated string table for strings used by symbols, imports and exports. string_table: StringTable = .{}, // Output sections /// Output type section -func_types: std.ArrayListUnmanaged(std.wasm.Type) = .{}, +func_types: std.ArrayListUnmanaged(std.wasm.Type) = .empty, /// Output function section where the key is the original /// function index and the value is function. /// This allows us to map multiple symbols to the same function. @@ -130,7 +130,7 @@ functions: std.AutoArrayHashMapUnmanaged( struct { func: std.wasm.Func, sym_index: Symbol.Index }, ) = .{}, /// Output global section -wasm_globals: std.ArrayListUnmanaged(std.wasm.Global) = .{}, +wasm_globals: std.ArrayListUnmanaged(std.wasm.Global) = .empty, /// Memory section memories: std.wasm.Memory = .{ .limits = .{ .min = 0, @@ -138,12 +138,12 @@ memories: std.wasm.Memory = .{ .limits = .{ .flags = 0, } }, /// Output table section -tables: std.ArrayListUnmanaged(std.wasm.Table) = .{}, +tables: std.ArrayListUnmanaged(std.wasm.Table) = .empty, /// Output export section -exports: std.ArrayListUnmanaged(types.Export) = .{}, +exports: std.ArrayListUnmanaged(types.Export) = .empty, /// List of initialization functions. These must be called in order of priority /// by the (synthetic) __wasm_call_ctors function. -init_funcs: std.ArrayListUnmanaged(InitFuncLoc) = .{}, +init_funcs: std.ArrayListUnmanaged(InitFuncLoc) = .empty, /// Index to a function defining the entry of the wasm file entry: ?u32 = null, @@ -152,31 +152,31 @@ entry: ?u32 = null, /// as well as an 'elements' section. /// /// Note: Key is symbol location, value represents the index into the table -function_table: std.AutoHashMapUnmanaged(SymbolLoc, u32) = .{}, +function_table: std.AutoHashMapUnmanaged(SymbolLoc, u32) = .empty, /// All object files and their data which are linked into the final binary -objects: std.ArrayListUnmanaged(File.Index) = .{}, +objects: std.ArrayListUnmanaged(File.Index) = .empty, /// All archive files that are lazy loaded. /// e.g. when an undefined symbol references a symbol from the archive. -archives: std.ArrayListUnmanaged(Archive) = .{}, +archives: std.ArrayListUnmanaged(Archive) = .empty, /// A map of global names (read: offset into string table) to their symbol location -globals: std.AutoHashMapUnmanaged(u32, SymbolLoc) = .{}, +globals: std.AutoHashMapUnmanaged(u32, SymbolLoc) = .empty, /// The list of GOT symbols and their location -got_symbols: std.ArrayListUnmanaged(SymbolLoc) = .{}, +got_symbols: std.ArrayListUnmanaged(SymbolLoc) = .empty, /// Maps discarded symbols and their positions to the location of the symbol /// it was resolved to -discarded: std.AutoHashMapUnmanaged(SymbolLoc, SymbolLoc) = .{}, +discarded: std.AutoHashMapUnmanaged(SymbolLoc, SymbolLoc) = .empty, /// List of all symbol locations which have been resolved by the linker and will be emit /// into the final binary. -resolved_symbols: std.AutoArrayHashMapUnmanaged(SymbolLoc, void) = .{}, +resolved_symbols: std.AutoArrayHashMapUnmanaged(SymbolLoc, void) = .empty, /// Symbols that remain undefined after symbol resolution. /// Note: The key represents an offset into the string table, rather than the actual string. -undefs: std.AutoArrayHashMapUnmanaged(u32, SymbolLoc) = .{}, +undefs: std.AutoArrayHashMapUnmanaged(u32, SymbolLoc) = .empty, /// Maps a symbol's location to an atom. This can be used to find meta /// data of a symbol, such as its size, or its offset to perform a relocation. /// Undefined (and synthetic) symbols do not have an Atom and therefore cannot be mapped. -symbol_atom: std.AutoHashMapUnmanaged(SymbolLoc, Atom.Index) = .{}, +symbol_atom: std.AutoHashMapUnmanaged(SymbolLoc, Atom.Index) = .empty, pub const Alignment = types.Alignment; @@ -287,7 +287,7 @@ pub const StringTable = struct { std.hash_map.default_max_load_percentage, ) = .{}, /// Holds the actual data of the string table. - string_data: std.ArrayListUnmanaged(u8) = .{}, + string_data: std.ArrayListUnmanaged(u8) = .empty, /// Accepts a string and searches for a corresponding string. /// When found, de-duplicates the string and returns the existing offset instead. @@ -1698,7 +1698,7 @@ fn allocateVirtualAddresses(wasm: *Wasm) void { fn sortDataSegments(wasm: *Wasm) !void { const gpa = wasm.base.comp.gpa; - var new_mapping: std.StringArrayHashMapUnmanaged(u32) = .{}; + var new_mapping: std.StringArrayHashMapUnmanaged(u32) = .empty; try new_mapping.ensureUnusedCapacity(gpa, wasm.data_segments.count()); errdefer new_mapping.deinit(gpa); diff --git a/src/link/Wasm/Archive.zig b/src/link/Wasm/Archive.zig index e069aeef8c97..c7e5c7caba2e 100644 --- a/src/link/Wasm/Archive.zig +++ b/src/link/Wasm/Archive.zig @@ -12,7 +12,7 @@ long_file_names: []const u8 = undefined, /// Parsed table of contents. /// Each symbol name points to a list of all definition /// sites within the current static archive. -toc: std.StringArrayHashMapUnmanaged(std.ArrayListUnmanaged(u32)) = .{}, +toc: std.StringArrayHashMapUnmanaged(std.ArrayListUnmanaged(u32)) = .empty, // Archive files start with the ARMAG identifying string. Then follows a // `struct ar_hdr', and as many bytes of member file data as its `ar_size' diff --git a/src/link/Wasm/Atom.zig b/src/link/Wasm/Atom.zig index e5ad4ee16126..dd373552d598 100644 --- a/src/link/Wasm/Atom.zig +++ b/src/link/Wasm/Atom.zig @@ -6,9 +6,9 @@ sym_index: Symbol.Index, /// Size of the atom, used to calculate section sizes in the final binary size: u32 = 0, /// List of relocations belonging to this atom -relocs: std.ArrayListUnmanaged(types.Relocation) = .{}, +relocs: std.ArrayListUnmanaged(types.Relocation) = .empty, /// Contains the binary data of an atom, which can be non-relocated -code: std.ArrayListUnmanaged(u8) = .{}, +code: std.ArrayListUnmanaged(u8) = .empty, /// For code this is 1, for data this is set to the highest value of all segments alignment: Wasm.Alignment = .@"1", /// Offset into the section where the atom lives, this already accounts @@ -22,7 +22,7 @@ original_offset: u32 = 0, prev: Atom.Index = .null, /// Contains atoms local to a decl, all managed by this `Atom`. /// When the parent atom is being freed, it will also do so for all local atoms. -locals: std.ArrayListUnmanaged(Atom.Index) = .{}, +locals: std.ArrayListUnmanaged(Atom.Index) = .empty, /// Represents the index of an Atom where `null` is considered /// an invalid atom. diff --git a/src/link/Wasm/Object.zig b/src/link/Wasm/Object.zig index fa46a1fea4b7..81a3cac7373e 100644 --- a/src/link/Wasm/Object.zig +++ b/src/link/Wasm/Object.zig @@ -51,7 +51,7 @@ start: ?u32 = null, features: []const types.Feature = &.{}, /// A table that maps the relocations we must perform where the key represents /// the section that the list of relocations applies to. -relocations: std.AutoArrayHashMapUnmanaged(u32, []types.Relocation) = .{}, +relocations: std.AutoArrayHashMapUnmanaged(u32, []types.Relocation) = .empty, /// Table of symbols belonging to this Object file symtable: []Symbol = &.{}, /// Extra metadata about the linking section, such as alignment of segments and their name @@ -62,7 +62,7 @@ init_funcs: []const types.InitFunc = &.{}, comdat_info: []const types.Comdat = &.{}, /// Represents non-synthetic sections that can essentially be mem-cpy'd into place /// after performing relocations. -relocatable_data: std.AutoHashMapUnmanaged(RelocatableData.Tag, []RelocatableData) = .{}, +relocatable_data: std.AutoHashMapUnmanaged(RelocatableData.Tag, []RelocatableData) = .empty, /// String table for all strings required by the object file, such as symbol names, /// import name, module name and export names. Each string will be deduplicated /// and returns an offset into the table. @@ -379,7 +379,7 @@ fn Parser(comptime ReaderType: type) type { try parser.parseFeatures(gpa); } else if (std.mem.startsWith(u8, name, ".debug")) { const gop = try parser.object.relocatable_data.getOrPut(gpa, .custom); - var relocatable_data: std.ArrayListUnmanaged(RelocatableData) = .{}; + var relocatable_data: std.ArrayListUnmanaged(RelocatableData) = .empty; defer relocatable_data.deinit(gpa); if (!gop.found_existing) { gop.value_ptr.* = &.{}; diff --git a/src/link/Wasm/ZigObject.zig b/src/link/Wasm/ZigObject.zig index afb0216fd77c..962024eeef3b 100644 --- a/src/link/Wasm/ZigObject.zig +++ b/src/link/Wasm/ZigObject.zig @@ -8,37 +8,37 @@ path: []const u8, index: File.Index, /// Map of all `Nav` that are currently alive. /// Each index maps to the corresponding `NavInfo`. -navs: std.AutoHashMapUnmanaged(InternPool.Nav.Index, NavInfo) = .{}, +navs: std.AutoHashMapUnmanaged(InternPool.Nav.Index, NavInfo) = .empty, /// List of function type signatures for this Zig module. -func_types: std.ArrayListUnmanaged(std.wasm.Type) = .{}, +func_types: std.ArrayListUnmanaged(std.wasm.Type) = .empty, /// List of `std.wasm.Func`. Each entry contains the function signature, /// rather than the actual body. -functions: std.ArrayListUnmanaged(std.wasm.Func) = .{}, +functions: std.ArrayListUnmanaged(std.wasm.Func) = .empty, /// List of indexes pointing to an entry within the `functions` list which has been removed. -functions_free_list: std.ArrayListUnmanaged(u32) = .{}, +functions_free_list: std.ArrayListUnmanaged(u32) = .empty, /// Map of symbol locations, represented by its `types.Import`. -imports: std.AutoHashMapUnmanaged(Symbol.Index, types.Import) = .{}, +imports: std.AutoHashMapUnmanaged(Symbol.Index, types.Import) = .empty, /// List of WebAssembly globals. -globals: std.ArrayListUnmanaged(std.wasm.Global) = .{}, +globals: std.ArrayListUnmanaged(std.wasm.Global) = .empty, /// Mapping between an `Atom` and its type index representing the Wasm /// type of the function signature. -atom_types: std.AutoHashMapUnmanaged(Atom.Index, u32) = .{}, +atom_types: std.AutoHashMapUnmanaged(Atom.Index, u32) = .empty, /// List of all symbols generated by Zig code. -symbols: std.ArrayListUnmanaged(Symbol) = .{}, +symbols: std.ArrayListUnmanaged(Symbol) = .empty, /// Map from symbol name offset to their index into the `symbols` list. -global_syms: std.AutoHashMapUnmanaged(u32, Symbol.Index) = .{}, +global_syms: std.AutoHashMapUnmanaged(u32, Symbol.Index) = .empty, /// List of symbol indexes which are free to be used. -symbols_free_list: std.ArrayListUnmanaged(Symbol.Index) = .{}, +symbols_free_list: std.ArrayListUnmanaged(Symbol.Index) = .empty, /// Extra metadata about the linking section, such as alignment of segments and their name. -segment_info: std.ArrayListUnmanaged(types.Segment) = .{}, +segment_info: std.ArrayListUnmanaged(types.Segment) = .empty, /// List of indexes which contain a free slot in the `segment_info` list. -segment_free_list: std.ArrayListUnmanaged(u32) = .{}, +segment_free_list: std.ArrayListUnmanaged(u32) = .empty, /// File encapsulated string table, used to deduplicate strings within the generated file. string_table: StringTable = .{}, /// Map for storing anonymous declarations. Each anonymous decl maps to its Atom's index. -uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, Atom.Index) = .{}, +uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, Atom.Index) = .empty, /// List of atom indexes of functions that are generated by the backend. -synthetic_functions: std.ArrayListUnmanaged(Atom.Index) = .{}, +synthetic_functions: std.ArrayListUnmanaged(Atom.Index) = .empty, /// Represents the symbol index of the error name table /// When this is `null`, no code references an error using runtime `@errorName`. /// During initializion, a symbol with corresponding atom will be created that is @@ -88,7 +88,7 @@ debug_abbrev_index: ?u32 = null, const NavInfo = struct { atom: Atom.Index = .null, - exports: std.ArrayListUnmanaged(Symbol.Index) = .{}, + exports: std.ArrayListUnmanaged(Symbol.Index) = .empty, fn @"export"(ni: NavInfo, zig_object: *const ZigObject, name: []const u8) ?Symbol.Index { for (ni.exports.items) |sym_index| { diff --git a/src/link/table_section.zig b/src/link/table_section.zig index 2c70b03f4290..f3762b38dd97 100644 --- a/src/link/table_section.zig +++ b/src/link/table_section.zig @@ -1,8 +1,8 @@ pub fn TableSection(comptime Entry: type) type { return struct { - entries: std.ArrayListUnmanaged(Entry) = .{}, - free_list: std.ArrayListUnmanaged(Index) = .{}, - lookup: std.AutoHashMapUnmanaged(Entry, Index) = .{}, + entries: std.ArrayListUnmanaged(Entry) = .empty, + free_list: std.ArrayListUnmanaged(Index) = .empty, + lookup: std.AutoHashMapUnmanaged(Entry, Index) = .empty, pub fn deinit(self: *Self, allocator: Allocator) void { self.entries.deinit(allocator); diff --git a/src/link/tapi/parse.zig b/src/link/tapi/parse.zig index deba9aaef01f..f6556dd5ddb6 100644 --- a/src/link/tapi/parse.zig +++ b/src/link/tapi/parse.zig @@ -115,7 +115,7 @@ pub const Node = struct { .start = undefined, .end = undefined, }, - values: std.ArrayListUnmanaged(Entry) = .{}, + values: std.ArrayListUnmanaged(Entry) = .empty, pub const base_tag: Node.Tag = .map; @@ -161,7 +161,7 @@ pub const Node = struct { .start = undefined, .end = undefined, }, - values: std.ArrayListUnmanaged(*Node) = .{}, + values: std.ArrayListUnmanaged(*Node) = .empty, pub const base_tag: Node.Tag = .list; @@ -195,7 +195,7 @@ pub const Node = struct { .start = undefined, .end = undefined, }, - string_value: std.ArrayListUnmanaged(u8) = .{}, + string_value: std.ArrayListUnmanaged(u8) = .empty, pub const base_tag: Node.Tag = .value; @@ -227,7 +227,7 @@ pub const Tree = struct { source: []const u8, tokens: []Token, line_cols: std.AutoHashMap(TokenIndex, LineCol), - docs: std.ArrayListUnmanaged(*Node) = .{}, + docs: std.ArrayListUnmanaged(*Node) = .empty, pub fn init(allocator: Allocator) Tree { return .{ diff --git a/src/main.zig b/src/main.zig index 0e7a801b4617..4a7f47710684 100644 --- a/src/main.zig +++ b/src/main.zig @@ -126,7 +126,7 @@ const debug_usage = normal_usage ++ const usage = if (build_options.enable_debug_extensions) debug_usage else normal_usage; const default_local_zig_cache_basename = ".zig-cache"; -var log_scopes: std.ArrayListUnmanaged([]const u8) = .{}; +var log_scopes: std.ArrayListUnmanaged([]const u8) = .empty; pub fn log( comptime level: std.log.Level, @@ -895,14 +895,14 @@ fn buildOutputType( var linker_module_definition_file: ?[]const u8 = null; var test_no_exec = false; var entry: Compilation.CreateOptions.Entry = .default; - var force_undefined_symbols: std.StringArrayHashMapUnmanaged(void) = .{}; + var force_undefined_symbols: std.StringArrayHashMapUnmanaged(void) = .empty; var stack_size: ?u64 = null; var image_base: ?u64 = null; var link_eh_frame_hdr = false; var link_emit_relocs = false; var build_id: ?std.zig.BuildId = null; var runtime_args_start: ?usize = null; - var test_filters: std.ArrayListUnmanaged([]const u8) = .{}; + var test_filters: std.ArrayListUnmanaged([]const u8) = .empty; var test_name_prefix: ?[]const u8 = null; var test_runner_path: ?[]const u8 = null; var override_local_cache_dir: ?[]const u8 = try EnvVar.ZIG_LOCAL_CACHE_DIR.get(arena); @@ -931,12 +931,12 @@ fn buildOutputType( var pdb_out_path: ?[]const u8 = null; var error_limit: ?Zcu.ErrorInt = null; // These are before resolving sysroot. - var extra_cflags: std.ArrayListUnmanaged([]const u8) = .{}; - var extra_rcflags: std.ArrayListUnmanaged([]const u8) = .{}; - var symbol_wrap_set: std.StringArrayHashMapUnmanaged(void) = .{}; + var extra_cflags: std.ArrayListUnmanaged([]const u8) = .empty; + var extra_rcflags: std.ArrayListUnmanaged([]const u8) = .empty; + var symbol_wrap_set: std.StringArrayHashMapUnmanaged(void) = .empty; var rc_includes: Compilation.RcIncludes = .any; var manifest_file: ?[]const u8 = null; - var linker_export_symbol_names: std.ArrayListUnmanaged([]const u8) = .{}; + var linker_export_symbol_names: std.ArrayListUnmanaged([]const u8) = .empty; // Tracks the position in c_source_files which have already their owner populated. var c_source_files_owner_index: usize = 0; @@ -944,7 +944,7 @@ fn buildOutputType( var rc_source_files_owner_index: usize = 0; // null means replace with the test executable binary - var test_exec_args: std.ArrayListUnmanaged(?[]const u8) = .{}; + var test_exec_args: std.ArrayListUnmanaged(?[]const u8) = .empty; // These get set by CLI flags and then snapshotted when a `-M` flag is // encountered. @@ -953,8 +953,8 @@ fn buildOutputType( // These get appended to by CLI flags and then slurped when a `-M` flag // is encountered. var cssan: ClangSearchSanitizer = .{}; - var cc_argv: std.ArrayListUnmanaged([]const u8) = .{}; - var deps: std.ArrayListUnmanaged(CliModule.Dep) = .{}; + var cc_argv: std.ArrayListUnmanaged([]const u8) = .empty; + var deps: std.ArrayListUnmanaged(CliModule.Dep) = .empty; // Contains every module specified via -M. The dependencies are added // after argument parsing is completed. We use a StringArrayHashMap to make @@ -2806,7 +2806,7 @@ fn buildOutputType( create_module.opts.emit_bin = emit_bin != .no; create_module.opts.any_c_source_files = create_module.c_source_files.items.len != 0; - var builtin_modules: std.StringHashMapUnmanaged(*Package.Module) = .{}; + var builtin_modules: std.StringHashMapUnmanaged(*Package.Module) = .empty; // `builtin_modules` allocated into `arena`, so no deinit const main_mod = try createModule(gpa, arena, &create_module, 0, null, zig_lib_directory, &builtin_modules); for (create_module.modules.keys(), create_module.modules.values()) |key, cli_mod| { @@ -3290,7 +3290,7 @@ fn buildOutputType( process.raiseFileDescriptorLimit(); - var file_system_inputs: std.ArrayListUnmanaged(u8) = .{}; + var file_system_inputs: std.ArrayListUnmanaged(u8) = .empty; defer file_system_inputs.deinit(gpa); const comp = Compilation.create(gpa, arena, .{ @@ -5451,7 +5451,7 @@ fn jitCmd( }); defer thread_pool.deinit(); - var child_argv: std.ArrayListUnmanaged([]const u8) = .{}; + var child_argv: std.ArrayListUnmanaged([]const u8) = .empty; try child_argv.ensureUnusedCapacity(arena, args.len + 4); // We want to release all the locks before executing the child process, so we make a nice @@ -6553,7 +6553,7 @@ fn cmdChangelist( process.exit(1); } - var inst_map: std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .{}; + var inst_map: std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .empty; defer inst_map.deinit(gpa); try Zcu.mapOldZirToNew(gpa, old_zir, file.zir, &inst_map); @@ -6738,7 +6738,7 @@ fn parseSubSystem(next_arg: []const u8) !std.Target.SubSystem { /// Silently ignore superfluous search dirs. /// Warn when a dir is added to multiple searchlists. const ClangSearchSanitizer = struct { - map: std.StringHashMapUnmanaged(Membership) = .{}, + map: std.StringHashMapUnmanaged(Membership) = .empty, fn reset(self: *@This()) void { self.map.clearRetainingCapacity(); diff --git a/src/register_manager.zig b/src/register_manager.zig index 7ca117be0c20..0b569467e74d 100644 --- a/src/register_manager.zig +++ b/src/register_manager.zig @@ -516,7 +516,7 @@ fn MockFunction(comptime Register: type) type { return struct { allocator: Allocator, register_manager: Register.RM = .{}, - spilled: std.ArrayListUnmanaged(Register) = .{}, + spilled: std.ArrayListUnmanaged(Register) = .empty, const Self = @This(); diff --git a/src/translate_c.zig b/src/translate_c.zig index 9e974fc237c6..6b84aeb743cd 100644 --- a/src/translate_c.zig +++ b/src/translate_c.zig @@ -27,23 +27,23 @@ pub const Context = struct { gpa: mem.Allocator, arena: mem.Allocator, source_manager: *clang.SourceManager, - decl_table: std.AutoArrayHashMapUnmanaged(usize, []const u8) = .{}, + decl_table: std.AutoArrayHashMapUnmanaged(usize, []const u8) = .empty, alias_list: AliasList, global_scope: *Scope.Root, clang_context: *clang.ASTContext, mangle_count: u32 = 0, /// Table of record decls that have been demoted to opaques. - opaque_demotes: std.AutoHashMapUnmanaged(usize, void) = .{}, + opaque_demotes: std.AutoHashMapUnmanaged(usize, void) = .empty, /// Table of unnamed enums and records that are child types of typedefs. - unnamed_typedefs: std.AutoHashMapUnmanaged(usize, []const u8) = .{}, + unnamed_typedefs: std.AutoHashMapUnmanaged(usize, []const u8) = .empty, /// Needed to decide if we are parsing a typename - typedefs: std.StringArrayHashMapUnmanaged(void) = .{}, + typedefs: std.StringArrayHashMapUnmanaged(void) = .empty, /// This one is different than the root scope's name table. This contains /// a list of names that we found by visiting all the top level decls without /// translating them. The other maps are updated as we translate; this one is updated /// up front in a pre-processing step. - global_names: std.StringArrayHashMapUnmanaged(void) = .{}, + global_names: std.StringArrayHashMapUnmanaged(void) = .empty, /// This is similar to `global_names`, but contains names which we would /// *like* to use, but do not strictly *have* to if they are unavailable. @@ -52,7 +52,7 @@ pub const Context = struct { /// may be mangled. /// This is distinct from `global_names` so we can detect at a type /// declaration whether or not the name is available. - weak_global_names: std.StringArrayHashMapUnmanaged(void) = .{}, + weak_global_names: std.StringArrayHashMapUnmanaged(void) = .empty, pattern_list: PatternList, diff --git a/test/behavior/fn.zig b/test/behavior/fn.zig index 0066de128424..41e9f11f0b42 100644 --- a/test/behavior/fn.zig +++ b/test/behavior/fn.zig @@ -415,7 +415,7 @@ test "import passed byref to function in return type" { const S = struct { fn get() @import("std").ArrayListUnmanaged(i32) { - const x: @import("std").ArrayListUnmanaged(i32) = .{}; + const x: @import("std").ArrayListUnmanaged(i32) = .empty; return x; } }; diff --git a/test/compare_output.zig b/test/compare_output.zig index d07864360f2f..b5c65df889c2 100644 --- a/test/compare_output.zig +++ b/test/compare_output.zig @@ -291,7 +291,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void { \\ stdout.print("before\n", .{}) catch unreachable; \\ defer stdout.print("defer1\n", .{}) catch unreachable; \\ defer stdout.print("defer2\n", .{}) catch unreachable; - \\ var gpa = @import("std").heap.GeneralPurposeAllocator(.{}){}; + \\ var gpa: @import("std").heap.GeneralPurposeAllocator(.{}) = .init; \\ defer _ = gpa.deinit(); \\ var arena = @import("std").heap.ArenaAllocator.init(gpa.allocator()); \\ defer arena.deinit(); @@ -361,7 +361,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void { \\const os = std.os; \\ \\pub fn main() !void { - \\ var gpa = std.heap.GeneralPurposeAllocator(.{}){}; + \\ var gpa: std.heap.GeneralPurposeAllocator(.{}) = .init; \\ defer _ = gpa.deinit(); \\ var arena = std.heap.ArenaAllocator.init(gpa.allocator()); \\ defer arena.deinit(); @@ -402,7 +402,7 @@ pub fn addCases(cases: *tests.CompareOutputContext) void { \\const os = std.os; \\ \\pub fn main() !void { - \\ var gpa = std.heap.GeneralPurposeAllocator(.{}){}; + \\ var gpa: std.heap.GeneralPurposeAllocator(.{}) = .init; \\ defer _ = gpa.deinit(); \\ var arena = std.heap.ArenaAllocator.init(gpa.allocator()); \\ defer arena.deinit(); diff --git a/test/standalone/coff_dwarf/main.zig b/test/standalone/coff_dwarf/main.zig index 18a7262a3076..ce74876f08ff 100644 --- a/test/standalone/coff_dwarf/main.zig +++ b/test/standalone/coff_dwarf/main.zig @@ -5,7 +5,7 @@ const testing = std.testing; extern fn add(a: u32, b: u32, addr: *usize) u32; pub fn main() !void { - var gpa = std.heap.GeneralPurposeAllocator(.{}){}; + var gpa: std.heap.GeneralPurposeAllocator(.{}) = .init; defer assert(gpa.deinit() == .ok); const allocator = gpa.allocator(); diff --git a/test/standalone/empty_env/main.zig b/test/standalone/empty_env/main.zig index 37f5d6e76a1a..1dc435d9fa90 100644 --- a/test/standalone/empty_env/main.zig +++ b/test/standalone/empty_env/main.zig @@ -1,7 +1,7 @@ const std = @import("std"); pub fn main() !void { - var gpa = std.heap.GeneralPurposeAllocator(.{}){}; + var gpa: std.heap.GeneralPurposeAllocator(.{}) = .init; defer _ = gpa.deinit(); const env_map = std.process.getEnvMap(gpa.allocator()) catch @panic("unable to get env map"); try std.testing.expect(env_map.count() == 0); diff --git a/test/standalone/load_dynamic_library/main.zig b/test/standalone/load_dynamic_library/main.zig index b47ea8a81f1c..e4466f0667de 100644 --- a/test/standalone/load_dynamic_library/main.zig +++ b/test/standalone/load_dynamic_library/main.zig @@ -1,7 +1,7 @@ const std = @import("std"); pub fn main() !void { - var gpa = std.heap.GeneralPurposeAllocator(.{}){}; + var gpa: std.heap.GeneralPurposeAllocator(.{}) = .init; defer _ = gpa.deinit(); const args = try std.process.argsAlloc(gpa.allocator()); defer std.process.argsFree(gpa.allocator(), args); diff --git a/test/standalone/self_exe_symlink/create-symlink.zig b/test/standalone/self_exe_symlink/create-symlink.zig index e558df04d6a6..7bc36df8fe30 100644 --- a/test/standalone/self_exe_symlink/create-symlink.zig +++ b/test/standalone/self_exe_symlink/create-symlink.zig @@ -1,7 +1,7 @@ const std = @import("std"); pub fn main() anyerror!void { - var gpa = std.heap.GeneralPurposeAllocator(.{}){}; + var gpa: std.heap.GeneralPurposeAllocator(.{}) = .init; defer if (gpa.deinit() == .leak) @panic("found memory leaks"); const allocator = gpa.allocator(); diff --git a/test/standalone/self_exe_symlink/main.zig b/test/standalone/self_exe_symlink/main.zig index 309db8abf0c0..b74c4c7f95da 100644 --- a/test/standalone/self_exe_symlink/main.zig +++ b/test/standalone/self_exe_symlink/main.zig @@ -1,7 +1,7 @@ const std = @import("std"); pub fn main() !void { - var gpa = std.heap.GeneralPurposeAllocator(.{}){}; + var gpa: std.heap.GeneralPurposeAllocator(.{}) = .init; defer std.debug.assert(gpa.deinit() == .ok); const allocator = gpa.allocator(); diff --git a/test/standalone/simple/brace_expansion.zig b/test/standalone/simple/brace_expansion.zig index 7a769f6af743..facaf4a75483 100644 --- a/test/standalone/simple/brace_expansion.zig +++ b/test/standalone/simple/brace_expansion.zig @@ -15,7 +15,7 @@ const Token = union(enum) { Eof, }; -var gpa = std.heap.GeneralPurposeAllocator(.{}){}; +var gpa: std.heap.GeneralPurposeAllocator(.{}) = .init; var global_allocator = gpa.allocator(); fn tokenize(input: []const u8) !ArrayList(Token) { diff --git a/test/standalone/windows_argv/fuzz.zig b/test/standalone/windows_argv/fuzz.zig index b45ed9fdab84..4accccb3bad7 100644 --- a/test/standalone/windows_argv/fuzz.zig +++ b/test/standalone/windows_argv/fuzz.zig @@ -4,7 +4,7 @@ const windows = std.os.windows; const Allocator = std.mem.Allocator; pub fn main() !void { - var gpa = std.heap.GeneralPurposeAllocator(.{}){}; + var gpa: std.heap.GeneralPurposeAllocator(.{}) = .init; defer std.debug.assert(gpa.deinit() == .ok); const allocator = gpa.allocator(); diff --git a/test/standalone/windows_bat_args/fuzz.zig b/test/standalone/windows_bat_args/fuzz.zig index 07370a412efe..8b145ae0d2d0 100644 --- a/test/standalone/windows_bat_args/fuzz.zig +++ b/test/standalone/windows_bat_args/fuzz.zig @@ -3,7 +3,7 @@ const builtin = @import("builtin"); const Allocator = std.mem.Allocator; pub fn main() anyerror!void { - var gpa = std.heap.GeneralPurposeAllocator(.{}){}; + var gpa: std.heap.GeneralPurposeAllocator(.{}) = .init; defer if (gpa.deinit() == .leak) @panic("found memory leaks"); const allocator = gpa.allocator(); diff --git a/test/standalone/windows_bat_args/test.zig b/test/standalone/windows_bat_args/test.zig index b2a9aed6caa8..42df2ab94cdb 100644 --- a/test/standalone/windows_bat_args/test.zig +++ b/test/standalone/windows_bat_args/test.zig @@ -1,7 +1,7 @@ const std = @import("std"); pub fn main() anyerror!void { - var gpa = std.heap.GeneralPurposeAllocator(.{}){}; + var gpa: std.heap.GeneralPurposeAllocator(.{}) = .init; defer if (gpa.deinit() == .leak) @panic("found memory leaks"); const allocator = gpa.allocator(); diff --git a/test/standalone/windows_spawn/main.zig b/test/standalone/windows_spawn/main.zig index 4c637d36ff0b..9496895d618f 100644 --- a/test/standalone/windows_spawn/main.zig +++ b/test/standalone/windows_spawn/main.zig @@ -3,7 +3,7 @@ const windows = std.os.windows; const utf16Literal = std.unicode.utf8ToUtf16LeStringLiteral; pub fn main() anyerror!void { - var gpa = std.heap.GeneralPurposeAllocator(.{}){}; + var gpa: std.heap.GeneralPurposeAllocator(.{}) = .init; defer if (gpa.deinit() == .leak) @panic("found memory leaks"); const allocator = gpa.allocator(); diff --git a/tools/doctest.zig b/tools/doctest.zig index fad9be86db2c..070a59ee37a1 100644 --- a/tools/doctest.zig +++ b/tools/doctest.zig @@ -868,8 +868,8 @@ fn parseManifest(arena: Allocator, source_bytes: []const u8) !Code { var mode: std.builtin.OptimizeMode = .Debug; var link_mode: ?std.builtin.LinkMode = null; - var link_objects: std.ArrayListUnmanaged([]const u8) = .{}; - var additional_options: std.ArrayListUnmanaged([]const u8) = .{}; + var link_objects: std.ArrayListUnmanaged([]const u8) = .empty; + var additional_options: std.ArrayListUnmanaged([]const u8) = .empty; var target_str: ?[]const u8 = null; var link_libc = false; var disable_cache = false; diff --git a/tools/dump-cov.zig b/tools/dump-cov.zig index 24fc96950a69..65bd19000dbf 100644 --- a/tools/dump-cov.zig +++ b/tools/dump-cov.zig @@ -8,7 +8,7 @@ const assert = std.debug.assert; const SeenPcsHeader = std.Build.Fuzz.abi.SeenPcsHeader; pub fn main() !void { - var general_purpose_allocator: std.heap.GeneralPurposeAllocator(.{}) = .{}; + var general_purpose_allocator: std.heap.GeneralPurposeAllocator(.{}) = .init; defer _ = general_purpose_allocator.deinit(); const gpa = general_purpose_allocator.allocator(); @@ -55,7 +55,7 @@ pub fn main() !void { try stdout.print("{any}\n", .{header.*}); const pcs = header.pcAddrs(); - var indexed_pcs: std.AutoArrayHashMapUnmanaged(usize, void) = .{}; + var indexed_pcs: std.AutoArrayHashMapUnmanaged(usize, void) = .empty; try indexed_pcs.entries.resize(arena, pcs.len); @memcpy(indexed_pcs.entries.items(.key), pcs); try indexed_pcs.reIndex(arena); diff --git a/tools/generate_JSONTestSuite.zig b/tools/generate_JSONTestSuite.zig index ed3e5bd0829d..42dc777e8212 100644 --- a/tools/generate_JSONTestSuite.zig +++ b/tools/generate_JSONTestSuite.zig @@ -3,7 +3,7 @@ const std = @import("std"); pub fn main() !void { - var gpa = std.heap.GeneralPurposeAllocator(.{}){}; + var gpa: std.heap.GeneralPurposeAllocator(.{}) = .init; var allocator = gpa.allocator(); var output = std.io.getStdOut().writer(); diff --git a/tools/generate_c_size_and_align_checks.zig b/tools/generate_c_size_and_align_checks.zig index 96874635c223..588deb4935dd 100644 --- a/tools/generate_c_size_and_align_checks.zig +++ b/tools/generate_c_size_and_align_checks.zig @@ -25,7 +25,7 @@ fn cName(ty: std.Target.CType) []const u8 { }; } -var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{}){}; +var general_purpose_allocator: std.heap.GeneralPurposeAllocator(.{}) = .init; pub fn main() !void { const gpa = general_purpose_allocator.allocator(); diff --git a/tools/incr-check.zig b/tools/incr-check.zig index a5c0afc62a06..eafe6e8c0476 100644 --- a/tools/incr-check.zig +++ b/tools/incr-check.zig @@ -73,7 +73,7 @@ pub fn main() !void { else null; - var child_args: std.ArrayListUnmanaged([]const u8) = .{}; + var child_args: std.ArrayListUnmanaged([]const u8) = .empty; try child_args.appendSlice(arena, &.{ resolved_zig_exe, "build-exe", @@ -107,7 +107,7 @@ pub fn main() !void { child.cwd_dir = tmp_dir; child.cwd = tmp_dir_path; - var cc_child_args: std.ArrayListUnmanaged([]const u8) = .{}; + var cc_child_args: std.ArrayListUnmanaged([]const u8) = .empty; if (emit == .c) { const resolved_cc_zig_exe = if (opt_cc_zig) |cc_zig_exe| try std.fs.path.relative(arena, tmp_dir_path, cc_zig_exe) @@ -492,8 +492,8 @@ const Case = struct { }; fn parse(arena: Allocator, bytes: []const u8) !Case { - var updates: std.ArrayListUnmanaged(Update) = .{}; - var changes: std.ArrayListUnmanaged(FullContents) = .{}; + var updates: std.ArrayListUnmanaged(Update) = .empty; + var changes: std.ArrayListUnmanaged(FullContents) = .empty; var target_query: ?[]const u8 = null; var it = std.mem.splitScalar(u8, bytes, '\n'); var line_n: usize = 1; From 9271a89c65967ff0fed7011b4195abdd0f9195eb Mon Sep 17 00:00:00 2001 From: Linus Groh Date: Wed, 11 Sep 2024 22:44:02 +0100 Subject: [PATCH 135/202] InternPool: Replace default values with a .empty declaration --- src/InternPool.zig | 50 +++++++++++++++++++++++++++++++--------------- src/Zcu.zig | 2 +- 2 files changed, 35 insertions(+), 17 deletions(-) diff --git a/src/InternPool.zig b/src/InternPool.zig index 8160265e6540..6b20f79561af 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -2,20 +2,20 @@ //! This data structure is self-contained. /// One item per thread, indexed by `tid`, which is dense and unique per thread. -locals: []Local = &.{}, +locals: []Local, /// Length must be a power of two and represents the number of simultaneous /// writers that can mutate any single sharded data structure. -shards: []Shard = &.{}, +shards: []Shard, /// Key is the error name, index is the error tag value. Index 0 has a length-0 string. -global_error_set: GlobalErrorSet = GlobalErrorSet.empty, +global_error_set: GlobalErrorSet, /// Cached number of active bits in a `tid`. -tid_width: if (single_threaded) u0 else std.math.Log2Int(u32) = 0, +tid_width: if (single_threaded) u0 else std.math.Log2Int(u32), /// Cached shift amount to put a `tid` in the top bits of a 30-bit value. -tid_shift_30: if (single_threaded) u0 else std.math.Log2Int(u32) = if (single_threaded) 0 else 31, +tid_shift_30: if (single_threaded) u0 else std.math.Log2Int(u32), /// Cached shift amount to put a `tid` in the top bits of a 31-bit value. -tid_shift_31: if (single_threaded) u0 else std.math.Log2Int(u32) = if (single_threaded) 0 else 31, +tid_shift_31: if (single_threaded) u0 else std.math.Log2Int(u32), /// Cached shift amount to put a `tid` in the top bits of a 32-bit value. -tid_shift_32: if (single_threaded) u0 else std.math.Log2Int(u32) = if (single_threaded) 0 else 31, +tid_shift_32: if (single_threaded) u0 else std.math.Log2Int(u32), /// Dependencies on the source code hash associated with a ZIR instruction. /// * For a `declaration`, this is the entire declaration body. @@ -23,36 +23,36 @@ tid_shift_32: if (single_threaded) u0 else std.math.Log2Int(u32) = if (single_th /// * For a `func`, this is the source of the full function signature. /// These are also invalidated if tracking fails for this instruction. /// Value is index into `dep_entries` of the first dependency on this hash. -src_hash_deps: std.AutoArrayHashMapUnmanaged(TrackedInst.Index, DepEntry.Index) = .empty, +src_hash_deps: std.AutoArrayHashMapUnmanaged(TrackedInst.Index, DepEntry.Index), /// Dependencies on the value of a Nav. /// Value is index into `dep_entries` of the first dependency on this Nav value. -nav_val_deps: std.AutoArrayHashMapUnmanaged(Nav.Index, DepEntry.Index) = .empty, +nav_val_deps: std.AutoArrayHashMapUnmanaged(Nav.Index, DepEntry.Index), /// Dependencies on an interned value, either: /// * a runtime function (invalidated when its IES changes) /// * a container type requiring resolution (invalidated when the type must be recreated at a new index) /// Value is index into `dep_entries` of the first dependency on this interned value. -interned_deps: std.AutoArrayHashMapUnmanaged(Index, DepEntry.Index) = .empty, +interned_deps: std.AutoArrayHashMapUnmanaged(Index, DepEntry.Index), /// Dependencies on the full set of names in a ZIR namespace. /// Key refers to a `struct_decl`, `union_decl`, etc. /// Value is index into `dep_entries` of the first dependency on this namespace. -namespace_deps: std.AutoArrayHashMapUnmanaged(TrackedInst.Index, DepEntry.Index) = .empty, +namespace_deps: std.AutoArrayHashMapUnmanaged(TrackedInst.Index, DepEntry.Index), /// Dependencies on the (non-)existence of some name in a namespace. /// Value is index into `dep_entries` of the first dependency on this name. -namespace_name_deps: std.AutoArrayHashMapUnmanaged(NamespaceNameKey, DepEntry.Index) = .empty, +namespace_name_deps: std.AutoArrayHashMapUnmanaged(NamespaceNameKey, DepEntry.Index), /// Given a `Depender`, points to an entry in `dep_entries` whose `depender` /// matches. The `next_dependee` field can be used to iterate all such entries /// and remove them from the corresponding lists. -first_dependency: std.AutoArrayHashMapUnmanaged(AnalUnit, DepEntry.Index) = .empty, +first_dependency: std.AutoArrayHashMapUnmanaged(AnalUnit, DepEntry.Index), /// Stores dependency information. The hashmaps declared above are used to look /// up entries in this list as required. This is not stored in `extra` so that /// we can use `free_dep_entries` to track free indices, since dependencies are /// removed frequently. -dep_entries: std.ArrayListUnmanaged(DepEntry) = .empty, +dep_entries: std.ArrayListUnmanaged(DepEntry), /// Stores unused indices in `dep_entries` which can be reused without a full /// garbage collection pass. -free_dep_entries: std.ArrayListUnmanaged(DepEntry.Index) = .empty, +free_dep_entries: std.ArrayListUnmanaged(DepEntry.Index), /// Whether a multi-threaded intern pool is useful. /// Currently `false` until the intern pool is actually accessed @@ -62,6 +62,24 @@ const want_multi_threaded = true; /// Whether a single-threaded intern pool impl is in use. pub const single_threaded = builtin.single_threaded or !want_multi_threaded; +pub const empty: InternPool = .{ + .locals = &.{}, + .shards = &.{}, + .global_error_set = .empty, + .tid_width = 0, + .tid_shift_30 = if (single_threaded) 0 else 31, + .tid_shift_31 = if (single_threaded) 0 else 31, + .tid_shift_32 = if (single_threaded) 0 else 31, + .src_hash_deps = .empty, + .nav_val_deps = .empty, + .interned_deps = .empty, + .namespace_deps = .empty, + .namespace_name_deps = .empty, + .first_dependency = .empty, + .dep_entries = .empty, + .free_dep_entries = .empty, +}; + /// A `TrackedInst.Index` provides a single, unchanging reference to a ZIR instruction across a whole /// compilation. From this index, you can acquire a `TrackedInst`, which containss a reference to both /// the file which the instruction lives in, and the instruction index itself, which is updated on @@ -9858,7 +9876,7 @@ fn extraData(extra: Local.Extra, comptime T: type, index: u32) T { test "basic usage" { const gpa = std.testing.allocator; - var ip: InternPool = .{}; + var ip: InternPool = .empty; defer ip.deinit(gpa); const i32_type = try ip.get(gpa, .main, .{ .int_type = .{ diff --git a/src/Zcu.zig b/src/Zcu.zig index 7bddabaa5053..3b0d5d9d8712 100644 --- a/src/Zcu.zig +++ b/src/Zcu.zig @@ -116,7 +116,7 @@ embed_table: std.StringArrayHashMapUnmanaged(*EmbedFile) = .empty, /// Stores all Type and Value objects. /// The idea is that this will be periodically garbage-collected, but such logic /// is not yet implemented. -intern_pool: InternPool = .{}, +intern_pool: InternPool = .empty, analysis_in_progress: std.AutoArrayHashMapUnmanaged(AnalUnit, void) = .empty, /// The ErrorMsg memory is owned by the `AnalUnit`, using Module's general purpose allocator. From 03c363300fd829f3a656c6a9854a9c9720c9b3f1 Mon Sep 17 00:00:00 2001 From: mlugg Date: Wed, 11 Sep 2024 09:01:05 +0100 Subject: [PATCH 136/202] AstGen: do not allow unlabeled `break` to exit a labeled switch `break`ing from something which isn't a loop should always be opt-in. This was a bug in #21257. --- lib/std/zig/AstGen.zig | 4 ++-- test/behavior/switch.zig | 17 +++++++++++++++++ 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/lib/std/zig/AstGen.zig b/lib/std/zig/AstGen.zig index 79ab85f6db7a..c00ea3ff0fbb 100644 --- a/lib/std/zig/AstGen.zig +++ b/lib/std/zig/AstGen.zig @@ -7811,9 +7811,7 @@ fn switchExpr( const switch_block = try parent_gz.makeBlockInst(switch_tag, node); if (switch_full.label_token) |label_token| { - block_scope.break_block = switch_block.toOptional(); block_scope.continue_block = switch_block.toOptional(); - // `break_result_info` already set above block_scope.continue_result_info = .{ .rl = if (any_payload_is_ref) .{ .ref_coerced_ty = raw_operand_ty_ref } @@ -7825,6 +7823,8 @@ fn switchExpr( .token = label_token, .block_inst = switch_block, }; + // `break` can target this via `label.block_inst` + // `break_result_info` already set by `setBreakResultInfo` } // We re-use this same scope for all cases, including the special prong, if any. diff --git a/test/behavior/switch.zig b/test/behavior/switch.zig index f1ded573a029..fd1cd41e4b56 100644 --- a/test/behavior/switch.zig +++ b/test/behavior/switch.zig @@ -985,3 +985,20 @@ test "labeled switch with break" { comptime assert(comptime_val); } + +test "unlabeled break ignores switch" { + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO + + const result = while (true) { + _ = s: switch (@as(u32, 1)) { + 1 => continue :s 123, + else => |x| break x, + }; + comptime unreachable; // control flow never breaks from the switch + }; + try expect(result == 123); +} From 55250a9370ae247d52d9d78033880451cb1e9add Mon Sep 17 00:00:00 2001 From: mlugg Date: Thu, 12 Sep 2024 19:20:52 +0100 Subject: [PATCH 137/202] Sema: perform requested coercion when decl literal demoted to enum literal Resolves: #21392 --- src/Sema.zig | 39 +++++++++++-------- .../compile_errors/error_set_decl_literal.zig | 9 +++++ 2 files changed, 31 insertions(+), 17 deletions(-) create mode 100644 test/cases/compile_errors/error_set_decl_literal.zig diff --git a/src/Sema.zig b/src/Sema.zig index 8f097bb35f9c..0a609ff0f4a2 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -8980,36 +8980,41 @@ fn zirDeclLiteral(sema: *Sema, block: *Block, inst: Zir.Inst.Index, do_coerce: b sema.code.nullTerminatedString(extra.field_name_start), .no_embedded_nulls, ); + const orig_ty = sema.resolveType(block, src, extra.lhs) catch |err| switch (err) { - error.GenericPoison => { - // Treat this as a normal enum literal. - return Air.internedToRef(try pt.intern(.{ .enum_literal = name })); - }, + error.GenericPoison => Type.generic_poison, else => |e| return e, }; - var ty = orig_ty; - while (true) switch (ty.zigTypeTag(zcu)) { - .error_union => ty = ty.errorUnionPayload(zcu), - .optional => ty = ty.optionalChild(zcu), - .enum_literal, .error_set => { + const uncoerced_result = res: { + if (orig_ty.toIntern() == .generic_poison_type) { // Treat this as a normal enum literal. - return Air.internedToRef(try pt.intern(.{ .enum_literal = name })); - }, - else => break, - }; + break :res Air.internedToRef(try pt.intern(.{ .enum_literal = name })); + } + + var ty = orig_ty; + while (true) switch (ty.zigTypeTag(zcu)) { + .error_union => ty = ty.errorUnionPayload(zcu), + .optional => ty = ty.optionalChild(zcu), + .enum_literal, .error_set => { + // Treat this as a normal enum literal. + break :res Air.internedToRef(try pt.intern(.{ .enum_literal = name })); + }, + else => break, + }; - const result = try sema.fieldVal(block, src, Air.internedToRef(ty.toIntern()), name, src); + break :res try sema.fieldVal(block, src, Air.internedToRef(ty.toIntern()), name, src); + }; // Decl literals cannot lookup runtime `var`s. - if (!try sema.isComptimeKnown(result)) { + if (!try sema.isComptimeKnown(uncoerced_result)) { return sema.fail(block, src, "decl literal must be comptime-known", .{}); } if (do_coerce) { - return sema.coerce(block, orig_ty, result, src); + return sema.coerce(block, orig_ty, uncoerced_result, src); } else { - return result; + return uncoerced_result; } } diff --git a/test/cases/compile_errors/error_set_decl_literal.zig b/test/cases/compile_errors/error_set_decl_literal.zig new file mode 100644 index 000000000000..b825c09ae5a4 --- /dev/null +++ b/test/cases/compile_errors/error_set_decl_literal.zig @@ -0,0 +1,9 @@ +export fn entry() void { + const E = error{Foo}; + const e: E = .Foo; + _ = e; +} + +// error +// +// :3:19: error: expected type 'error{Foo}', found '@TypeOf(.enum_literal)' From 0329b8387ce53574c42565322c191e7cc30eb3f7 Mon Sep 17 00:00:00 2001 From: xdBronch <51252236+xdBronch@users.noreply.github.com> Date: Thu, 12 Sep 2024 11:25:40 -0400 Subject: [PATCH 138/202] make decl literals work with single item pointers --- src/Sema.zig | 1 + test/behavior/decl_literals.zig | 49 +++++++++++++++++++++++++++++++++ 2 files changed, 50 insertions(+) diff --git a/src/Sema.zig b/src/Sema.zig index 0a609ff0f4a2..61501fa4552d 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -8996,6 +8996,7 @@ fn zirDeclLiteral(sema: *Sema, block: *Block, inst: Zir.Inst.Index, do_coerce: b while (true) switch (ty.zigTypeTag(zcu)) { .error_union => ty = ty.errorUnionPayload(zcu), .optional => ty = ty.optionalChild(zcu), + .pointer => ty = if (ty.isSinglePointer(zcu)) ty.childType(zcu) else break, .enum_literal, .error_set => { // Treat this as a normal enum literal. break :res Air.internedToRef(try pt.intern(.{ .enum_literal = name })); diff --git a/test/behavior/decl_literals.zig b/test/behavior/decl_literals.zig index 795668912280..b5961c2c13bc 100644 --- a/test/behavior/decl_literals.zig +++ b/test/behavior/decl_literals.zig @@ -12,6 +12,55 @@ test "decl literal" { try expect(val.x == 123); } +test "decl literal with optional" { + const S = struct { + x: u32, + const foo: ?@This() = .{ .x = 123 }; + }; + + const val: ?S = .foo; + try expect(val.?.x == 123); +} + +test "decl literal with pointer" { + const S = struct { + x: u32, + const foo: *const @This() = &.{ .x = 123 }; + }; + + const val: *const S = .foo; + try expect(val.x == 123); +} + +test "call decl literal with optional" { + if (builtin.zig_backend == .stage2_riscv64 or + builtin.zig_backend == .stage2_sparc64 or + builtin.zig_backend == .stage2_arm or + builtin.zig_backend == .stage2_aarch64 or + builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO + const S = struct { + x: u32, + fn init() ?@This() { + return .{ .x = 123 }; + } + }; + + const val: ?S = .init(); + try expect(val.?.x == 123); +} + +test "call decl literal with pointer" { + const S = struct { + x: u32, + fn init() *const @This() { + return &.{ .x = 123 }; + } + }; + + const val: *const S = .init(); + try expect(val.x == 123); +} + test "call decl literal" { const S = struct { x: u32, From bc161430b0006386ae19c51cfc574b5d8f8fef6e Mon Sep 17 00:00:00 2001 From: David Rubin Date: Thu, 12 Sep 2024 17:05:48 -0700 Subject: [PATCH 139/202] riscv: implement `optional_payload_ptr_set` --- src/arch/riscv64/CodeGen.zig | 32 +++++++++++++++++++++++++++++++- test/behavior/cast.zig | 1 - test/behavior/decl_literals.zig | 3 +-- test/behavior/optional.zig | 1 - test/behavior/struct.zig | 3 --- test/behavior/union.zig | 1 - 6 files changed, 32 insertions(+), 9 deletions(-) diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index f887c6cb13e3..0c6da840ebf0 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -3364,8 +3364,38 @@ fn airOptionalPayloadPtr(func: *Func, inst: Air.Inst.Index) !void { } fn airOptionalPayloadPtrSet(func: *Func, inst: Air.Inst.Index) !void { + const zcu = func.pt.zcu; + const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op; - const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else return func.fail("TODO implement .optional_payload_ptr_set for {}", .{func.target.cpu.arch}); + const result: MCValue = if (func.liveness.isUnused(inst)) .unreach else result: { + const dst_ty = func.typeOfIndex(inst); + const src_ty = func.typeOf(ty_op.operand); + const opt_ty = src_ty.childType(zcu); + const src_mcv = try func.resolveInst(ty_op.operand); + + if (opt_ty.optionalReprIsPayload(zcu)) { + break :result if (func.reuseOperand(inst, ty_op.operand, 0, src_mcv)) + src_mcv + else + try func.copyToNewRegister(inst, src_mcv); + } + + const dst_mcv: MCValue = if (src_mcv.isRegister() and + func.reuseOperand(inst, ty_op.operand, 0, src_mcv)) + src_mcv + else + try func.copyToNewRegister(inst, src_mcv); + + const pl_ty = dst_ty.childType(zcu); + const pl_abi_size: i32 = @intCast(pl_ty.abiSize(zcu)); + try func.genSetMem( + .{ .reg = dst_mcv.getReg().? }, + pl_abi_size, + Type.bool, + .{ .immediate = 1 }, + ); + break :result dst_mcv; + }; return func.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig index b414977ad6de..7dca19d2127e 100644 --- a/test/behavior/cast.zig +++ b/test/behavior/cast.zig @@ -1875,7 +1875,6 @@ test "peer type resolution: vector and optional vector" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; var a: ?@Vector(3, u32) = .{ 0, 1, 2 }; var b: @Vector(3, u32) = .{ 3, 4, 5 }; diff --git a/test/behavior/decl_literals.zig b/test/behavior/decl_literals.zig index b5961c2c13bc..f2f7f8a81f09 100644 --- a/test/behavior/decl_literals.zig +++ b/test/behavior/decl_literals.zig @@ -33,8 +33,7 @@ test "decl literal with pointer" { } test "call decl literal with optional" { - if (builtin.zig_backend == .stage2_riscv64 or - builtin.zig_backend == .stage2_sparc64 or + if (builtin.zig_backend == .stage2_sparc64 or builtin.zig_backend == .stage2_arm or builtin.zig_backend == .stage2_aarch64 or builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO diff --git a/test/behavior/optional.zig b/test/behavior/optional.zig index 53738a107bbd..967bc8cf8597 100644 --- a/test/behavior/optional.zig +++ b/test/behavior/optional.zig @@ -320,7 +320,6 @@ test "coerce an anon struct literal to optional struct" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const Struct = struct { diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig index 4694758e1915..989da34ae7ea 100644 --- a/test/behavior/struct.zig +++ b/test/behavior/struct.zig @@ -1165,7 +1165,6 @@ test "anon init through error unions and optionals" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { a: u32, @@ -1193,7 +1192,6 @@ test "anon init through optional" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { a: u32, @@ -1503,7 +1501,6 @@ test "no dependency loop on pointer to optional struct" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const S = struct { const A = struct { b: B }; diff --git a/test/behavior/union.zig b/test/behavior/union.zig index 9938c3c04578..b0209c124ecf 100644 --- a/test/behavior/union.zig +++ b/test/behavior/union.zig @@ -1268,7 +1268,6 @@ test "extern union most-aligned field is smaller" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; const U = extern union { in6: extern struct { From 54611e32d76e97c1f3145f4a14221668e70d52fb Mon Sep 17 00:00:00 2001 From: mlugg Date: Thu, 12 Sep 2024 22:32:13 +0100 Subject: [PATCH 140/202] Package.Fetch: add another non-standard Content-Type For instance, the official download site for libvterm uses this MIME type for tar.gz tarballs. --- src/Package/Fetch.zig | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Package/Fetch.zig b/src/Package/Fetch.zig index 05f724cda367..6e7469425bce 100644 --- a/src/Package/Fetch.zig +++ b/src/Package/Fetch.zig @@ -1068,7 +1068,8 @@ fn unpackResource( if (ascii.eqlIgnoreCase(mime_type, "application/gzip") or ascii.eqlIgnoreCase(mime_type, "application/x-gzip") or ascii.eqlIgnoreCase(mime_type, "application/tar+gzip") or - ascii.eqlIgnoreCase(mime_type, "application/x-tar-gz")) + ascii.eqlIgnoreCase(mime_type, "application/x-tar-gz") or + ascii.eqlIgnoreCase(mime_type, "application/x-gtar-compressed")) { break :ft .@"tar.gz"; } From e17dfb9da0bee4c1f118e0a72b88f29f43365f61 Mon Sep 17 00:00:00 2001 From: Linus Groh Date: Thu, 12 Sep 2024 18:42:21 +0100 Subject: [PATCH 141/202] std.http.WebSocket: Make 'upgrade: websocket' check case-insensitive I've seen implementations in the wild that send 'Upgrade: WebSocket', which currently fails the handshake. From https://datatracker.ietf.org/doc/html/rfc6455: "If the response lacks an |Upgrade| header field or the |Upgrade| header field contains a value that is not an ASCII case-insensitive match for the value "websocket", the client MUST _Fail the WebSocket Connection_." --- lib/std/http/WebSocket.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/std/http/WebSocket.zig b/lib/std/http/WebSocket.zig index ad513fddf8af..08bc420b6789 100644 --- a/lib/std/http/WebSocket.zig +++ b/lib/std/http/WebSocket.zig @@ -30,7 +30,7 @@ pub fn init( if (std.ascii.eqlIgnoreCase(header.name, "sec-websocket-key")) { sec_websocket_key = header.value; } else if (std.ascii.eqlIgnoreCase(header.name, "upgrade")) { - if (!std.mem.eql(u8, header.value, "websocket")) + if (!std.ascii.eqlIgnoreCase(header.value, "websocket")) return false; upgrade_websocket = true; } From cf691543323be9bb663aac2d19f62412435a4d39 Mon Sep 17 00:00:00 2001 From: LiterallyVoid Date: Thu, 12 Sep 2024 20:06:49 -0700 Subject: [PATCH 142/202] Labeled switch documentation (#21383) Add langref docs for labeled switch This feature was proposed in #8220, and implemented in #21257. Co-authored-by: Andrew Kelley --- doc/langref.html.in | 47 +++++++++++++++++++ doc/langref/test_switch_continue.zig | 26 ++++++++++ .../test_switch_continue_equivalent.zig | 28 +++++++++++ doc/langref/test_switch_dispatch_loop.zig | 38 +++++++++++++++ 4 files changed, 139 insertions(+) create mode 100644 doc/langref/test_switch_continue.zig create mode 100644 doc/langref/test_switch_continue_equivalent.zig create mode 100644 doc/langref/test_switch_dispatch_loop.zig diff --git a/doc/langref.html.in b/doc/langref.html.in index cea86e895541..9f12829349fa 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -2495,6 +2495,53 @@ or

        {#code|test_exhaustive_switch.zig#} + {#header_close#} + + {#header_open|Labeled switch#} +

        + When a switch statement is labeled, it can be referenced from a + {#syntax#}break{#endsyntax#} or {#syntax#}continue{#endsyntax#}. + {#syntax#}break{#endsyntax#} will return a value from the {#syntax#} + switch{#endsyntax#}. +

        +

        + A {#syntax#}continue{#endsyntax#} targeting a switch must have an + operand. When executed, it will jump to the matching prong, as if the + {#syntax#}switch{#endsyntax#} were executed again with the {#syntax#} + continue{#endsyntax#}'s operand replacing the initial switch value. +

        + + {#code|test_switch_continue.zig#} + +

        + Semantically, this is equivalent to the following loop: +

        + {#code|test_switch_continue_equivalent.zig#} + +

        + This can improve clarity of (for example) state machines, where the syntax {#syntax#}continue :sw .next_state{#endsyntax#} is unambiguous, explicit, and immediately understandable. +

        +

        + However, the motivating example is a switch on each element of an array, where using a single switch can improve clarity and performance: +

        + {#code|test_switch_dispatch_loop.zig#} + +

        + If the operand to {#syntax#}continue{#endsyntax#} is + {#link|comptime#}-known, then it can be lowered to an unconditional branch + to the relevant case. Such a branch is perfectly predicted, and hence + typically very fast to execute. +

        + +

        + If the operand is runtime-known, each {#syntax#}continue{#endsyntax#} can + embed a conditional branch inline (ideally through a jump table), which + allows a CPU to predict its target independently of any other prong. A + loop-based lowering would force every branch through the same dispatch + point, hindering branch prediction. +

        + + {#header_close#} {#header_open|Inline Switch Prongs#} diff --git a/doc/langref/test_switch_continue.zig b/doc/langref/test_switch_continue.zig new file mode 100644 index 000000000000..dc6ba67b0e1c --- /dev/null +++ b/doc/langref/test_switch_continue.zig @@ -0,0 +1,26 @@ +const std = @import("std"); + +test "switch continue" { + sw: switch (@as(i32, 5)) { + 5 => continue :sw 4, + + // `continue` can occur multiple times within a single switch prong. + 2...4 => |v| { + if (v > 3) { + continue :sw 2; + } else if (v == 3) { + + // `break` can target labeled loops. + break :sw; + } + + continue :sw 1; + }, + + 1 => return, + + else => unreachable, + } +} + +// test diff --git a/doc/langref/test_switch_continue_equivalent.zig b/doc/langref/test_switch_continue_equivalent.zig new file mode 100644 index 000000000000..8e2fce8f70b9 --- /dev/null +++ b/doc/langref/test_switch_continue_equivalent.zig @@ -0,0 +1,28 @@ +const std = @import("std"); + +test "switch continue, equivalent loop" { + var sw: i32 = 5; + while (true) { + switch (sw) { + 5 => { + sw = 4; + continue; + }, + 2...4 => |v| { + if (v > 3) { + sw = 2; + continue; + } else if (v == 3) { + break; + } + + sw = 1; + continue; + }, + 1 => return, + else => unreachable, + } + } +} + +// test diff --git a/doc/langref/test_switch_dispatch_loop.zig b/doc/langref/test_switch_dispatch_loop.zig new file mode 100644 index 000000000000..cc7af1704e65 --- /dev/null +++ b/doc/langref/test_switch_dispatch_loop.zig @@ -0,0 +1,38 @@ +const std = @import("std"); +const expectEqual = std.testing.expectEqual; + +const Instruction = enum { + add, + mul, + end, +}; + +fn evaluate(initial_stack: []const i32, code: []const Instruction) !i32 { + var stack = try std.BoundedArray(i32, 8).fromSlice(initial_stack); + var ip: usize = 0; + + return vm: switch (code[ip]) { + // Because all code after `continue` is unreachable, this branch does + // not provide a result. + .add => { + try stack.append(stack.pop() + stack.pop()); + + ip += 1; + continue :vm code[ip]; + }, + .mul => { + try stack.append(stack.pop() * stack.pop()); + + ip += 1; + continue :vm code[ip]; + }, + .end => stack.pop(), + }; +} + +test "evaluate" { + const result = try evaluate(&.{ 7, 2, -3 }, &.{ .mul, .add, .end }); + try expectEqual(1, result); +} + +// test From b56a667ecdb9f34dbd60d247d4237bc008755979 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Wed, 11 Sep 2024 07:32:44 +0200 Subject: [PATCH 143/202] start: Rewrite arm code to work for thumb1 too. 0ecc6332b4eb1ced547ffa38f57471134aaa4d13 improved things for thumb2, but thumb1 has a much smaller permissible instruction set. This commit makes that work. --- lib/std/start.zig | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/lib/std/start.zig b/lib/std/start.zig index 5c94a4b591b8..a8109f7ed9be 100644 --- a/lib/std/start.zig +++ b/lib/std/start.zig @@ -280,12 +280,14 @@ fn _start() callconv(.Naked) noreturn { \\ b %[posixCallMainAndExit] , .arm, .armeb, .thumb, .thumbeb => - \\ mov fp, #0 - \\ mov lr, #0 + // Note that this code must work for Thumb-1. + \\ movs v1, #0 + \\ mov fp, v1 + \\ mov lr, v1 \\ mov a1, sp - \\ mov ip, sp - \\ and ip, ip, #-16 - \\ mov sp, ip + \\ subs v1, #16 + \\ ands v1, a1 + \\ mov sp, v1 \\ b %[posixCallMainAndExit] , .csky => From 8ddce90e62e52244b7f6d1104bb39a55350f0a83 Mon Sep 17 00:00:00 2001 From: CrazyboyQCD <53971641+CrazyboyQCD@users.noreply.github.com> Date: Sat, 14 Sep 2024 08:22:19 +0800 Subject: [PATCH 144/202] `std.ascii`: make `toLower` `toUpper` branchless (#21369) Co-authored-by: WX\shixi --- lib/std/ascii.zig | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/lib/std/ascii.zig b/lib/std/ascii.zig index d5b028500f18..7b99f6f4cf7c 100644 --- a/lib/std/ascii.zig +++ b/lib/std/ascii.zig @@ -182,20 +182,14 @@ pub const isASCII = isAscii; /// Uppercases the character and returns it as-is if already uppercase or not a letter. pub fn toUpper(c: u8) u8 { - if (isLower(c)) { - return c & 0b11011111; - } else { - return c; - } + const mask = @as(u8, @intFromBool(isLower(c))) << 5; + return c ^ mask; } /// Lowercases the character and returns it as-is if already lowercase or not a letter. pub fn toLower(c: u8) u8 { - if (isUpper(c)) { - return c | 0b00100000; - } else { - return c; - } + const mask = @as(u8, @intFromBool(isUpper(c))) << 5; + return c | mask; } test "ASCII character classes" { From bab6bf4194511c609210624d53b6601268719ac9 Mon Sep 17 00:00:00 2001 From: mlugg Date: Sun, 15 Sep 2024 13:54:36 +0100 Subject: [PATCH 145/202] compiler: always resolve field inits, remove unnecessary eager resolution Resolves: #21362 --- src/Sema.zig | 1 + src/Zcu/PerThread.zig | 21 ++++++--------------- 2 files changed, 7 insertions(+), 15 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index 61501fa4552d..278ca154d778 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -36337,6 +36337,7 @@ pub fn resolveUnionLayout(sema: *Sema, ty: Type) SemaError!void { /// be resolved. pub fn resolveStructFully(sema: *Sema, ty: Type) SemaError!void { try sema.resolveStructLayout(ty); + try sema.resolveStructFieldInits(ty); const pt = sema.pt; const zcu = pt.zcu; diff --git a/src/Zcu/PerThread.zig b/src/Zcu/PerThread.zig index 837895f78304..a11910302d6e 100644 --- a/src/Zcu/PerThread.zig +++ b/src/Zcu/PerThread.zig @@ -1240,11 +1240,11 @@ fn semaCau(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) !SemaCauResult { }; } - const nav_already_populated, const queue_linker_work, const resolve_type = switch (ip.indexToKey(decl_val.toIntern())) { - .func => |f| .{ f.owner_nav == nav_index, true, false }, - .variable => |v| .{ false, v.owner_nav == nav_index, true }, - .@"extern" => .{ false, false, false }, - else => .{ false, true, true }, + const nav_already_populated, const queue_linker_work = switch (ip.indexToKey(decl_val.toIntern())) { + .func => |f| .{ f.owner_nav == nav_index, true }, + .variable => |v| .{ false, v.owner_nav == nav_index }, + .@"extern" => .{ false, false }, + else => .{ false, true }, }; if (nav_already_populated) { @@ -1317,16 +1317,7 @@ fn semaCau(pt: Zcu.PerThread, cau_index: InternPool.Cau.Index) !SemaCauResult { queue_codegen: { if (!queue_linker_work) break :queue_codegen; - if (resolve_type) { - // Needed for codegen_nav which will call updateDecl and then the - // codegen backend wants full access to the Decl Type. - // We also need this for the `isFnOrHasRuntimeBits` check below. - // TODO: we could make the language more lenient by deferring this work - // to the `codegen_nav` job. - try decl_ty.resolveFully(pt); - } - - if (!resolve_type or !decl_ty.hasRuntimeBits(zcu)) { + if (!try decl_ty.hasRuntimeBitsSema(pt)) { if (zcu.comp.config.use_llvm) break :queue_codegen; if (file.mod.strip) break :queue_codegen; } From 8ff2f1057a9e9cc258f335e4a26101866be987a9 Mon Sep 17 00:00:00 2001 From: mlugg Date: Sun, 15 Sep 2024 13:54:23 +0100 Subject: [PATCH 146/202] Revert "Dwarf: prevent crash on missing field inits" This reverts commit faafc4132731e854a471ad4c4bb231efb525ea9a. --- src/link/Dwarf.zig | 78 ++++++++++++++++------------------------------ 1 file changed, 27 insertions(+), 51 deletions(-) diff --git a/src/link/Dwarf.zig b/src/link/Dwarf.zig index 9ec0fa301288..6efa708d0510 100644 --- a/src/link/Dwarf.zig +++ b/src/link/Dwarf.zig @@ -2643,10 +2643,8 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool try uleb128(diw, nav_val.toType().abiAlignment(zcu).toByteUnits().?); for (0..loaded_struct.field_types.len) |field_index| { const is_comptime = loaded_struct.fieldIsComptime(ip, field_index); - const field_init = if (loaded_struct.haveFieldInits(ip)) - loaded_struct.fieldInit(ip, field_index) - else - .none; + const field_init = loaded_struct.fieldInit(ip, field_index); + assert(!(is_comptime and field_init == .none)); try wip_nav.abbrevCode(if (is_comptime) .struct_field_comptime else if (field_init != .none) @@ -2658,20 +2656,14 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool defer dwarf.gpa.free(field_name); try wip_nav.strp(field_name); } - if (is_comptime and field_init == .none) { - // workaround frontend bug - try wip_nav.refType(Type.void); - try wip_nav.blockValue(nav_src_loc, Value.void); - } else { - const field_type = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); - try wip_nav.refType(field_type); - if (!is_comptime) { - try uleb128(diw, loaded_struct.offsets.get(ip)[field_index]); - try uleb128(diw, loaded_struct.fieldAlign(ip, field_index).toByteUnits() orelse - field_type.abiAlignment(zcu).toByteUnits().?); - } - if (field_init != .none) try wip_nav.blockValue(nav_src_loc, Value.fromInterned(field_init)); + const field_type = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); + try wip_nav.refType(field_type); + if (!is_comptime) { + try uleb128(diw, loaded_struct.offsets.get(ip)[field_index]); + try uleb128(diw, loaded_struct.fieldAlign(ip, field_index).toByteUnits() orelse + field_type.abiAlignment(zcu).toByteUnits().?); } + if (field_init != .none) try wip_nav.blockValue(nav_src_loc, Value.fromInterned(field_init)); } try uleb128(diw, @intFromEnum(AbbrevCode.null)); } @@ -3511,10 +3503,8 @@ pub fn updateContainerType(dwarf: *Dwarf, pt: Zcu.PerThread, type_index: InternP try uleb128(diw, ty.abiAlignment(zcu).toByteUnits().?); for (0..loaded_struct.field_types.len) |field_index| { const is_comptime = loaded_struct.fieldIsComptime(ip, field_index); - const field_init = if (loaded_struct.haveFieldInits(ip)) - loaded_struct.fieldInit(ip, field_index) - else - .none; + const field_init = loaded_struct.fieldInit(ip, field_index); + assert(!(is_comptime and field_init == .none)); try wip_nav.abbrevCode(if (is_comptime) .struct_field_comptime else if (field_init != .none) @@ -3526,20 +3516,14 @@ pub fn updateContainerType(dwarf: *Dwarf, pt: Zcu.PerThread, type_index: InternP defer dwarf.gpa.free(field_name); try wip_nav.strp(field_name); } - if (is_comptime and field_init == .none) { - // workaround frontend bug - try wip_nav.refType(Type.void); - try wip_nav.blockValue(ty_src_loc, Value.void); - } else { - const field_type = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); - try wip_nav.refType(field_type); - if (!is_comptime) { - try uleb128(diw, loaded_struct.offsets.get(ip)[field_index]); - try uleb128(diw, loaded_struct.fieldAlign(ip, field_index).toByteUnits() orelse - field_type.abiAlignment(zcu).toByteUnits().?); - } - if (field_init != .none) try wip_nav.blockValue(ty_src_loc, Value.fromInterned(field_init)); + const field_type = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); + try wip_nav.refType(field_type); + if (!is_comptime) { + try uleb128(diw, loaded_struct.offsets.get(ip)[field_index]); + try uleb128(diw, loaded_struct.fieldAlign(ip, field_index).toByteUnits() orelse + field_type.abiAlignment(zcu).toByteUnits().?); } + if (field_init != .none) try wip_nav.blockValue(ty_src_loc, Value.fromInterned(field_init)); } try uleb128(diw, @intFromEnum(AbbrevCode.null)); } @@ -3595,10 +3579,8 @@ pub fn updateContainerType(dwarf: *Dwarf, pt: Zcu.PerThread, type_index: InternP try uleb128(diw, ty.abiAlignment(zcu).toByteUnits().?); for (0..loaded_struct.field_types.len) |field_index| { const is_comptime = loaded_struct.fieldIsComptime(ip, field_index); - const field_init = if (loaded_struct.haveFieldInits(ip)) - loaded_struct.fieldInit(ip, field_index) - else - .none; + const field_init = loaded_struct.fieldInit(ip, field_index); + assert(!(is_comptime and field_init == .none)); try wip_nav.abbrevCode(if (is_comptime) .struct_field_comptime else if (field_init != .none) @@ -3610,20 +3592,14 @@ pub fn updateContainerType(dwarf: *Dwarf, pt: Zcu.PerThread, type_index: InternP defer dwarf.gpa.free(field_name); try wip_nav.strp(field_name); } - if (is_comptime and field_init == .none) { - // workaround frontend bug - try wip_nav.refType(Type.void); - try wip_nav.blockValue(ty_src_loc, Value.void); - } else { - const field_type = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); - try wip_nav.refType(field_type); - if (!is_comptime) { - try uleb128(diw, loaded_struct.offsets.get(ip)[field_index]); - try uleb128(diw, loaded_struct.fieldAlign(ip, field_index).toByteUnits() orelse - field_type.abiAlignment(zcu).toByteUnits().?); - } - if (field_init != .none) try wip_nav.blockValue(ty_src_loc, Value.fromInterned(field_init)); + const field_type = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]); + try wip_nav.refType(field_type); + if (!is_comptime) { + try uleb128(diw, loaded_struct.offsets.get(ip)[field_index]); + try uleb128(diw, loaded_struct.fieldAlign(ip, field_index).toByteUnits() orelse + field_type.abiAlignment(zcu).toByteUnits().?); } + if (field_init != .none) try wip_nav.blockValue(ty_src_loc, Value.fromInterned(field_init)); } try uleb128(diw, @intFromEnum(AbbrevCode.null)); } From 19924ca2890964b411362c423dd9f4b10596a18f Mon Sep 17 00:00:00 2001 From: mlugg Date: Sun, 15 Sep 2024 12:58:39 +0100 Subject: [PATCH 147/202] Sema: give `try` operand `error{}` result type in non-errorable functions Resolves: #21414 --- src/Sema.zig | 18 +++++++----------- test/behavior/try.zig | 37 +++++++++++++++++++++++++++++++++++++ 2 files changed, 44 insertions(+), 11 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index 61501fa4552d..433429f92a95 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -4487,7 +4487,7 @@ fn zirTryOperandTy(sema: *Sema, block: *Block, inst: Zir.Inst.Index, is_ref: boo break :ty operand_ty.childType(zcu); } else operand_ty; - const err_set_ty = err_set: { + const err_set_ty: Type = err_set: { // There are awkward cases, like `?E`. Our strategy is to repeatedly unwrap optionals // until we hit an error union or set. var cur_ty = sema.fn_ret_ty; @@ -4496,16 +4496,12 @@ fn zirTryOperandTy(sema: *Sema, block: *Block, inst: Zir.Inst.Index, is_ref: boo .error_set => break :err_set cur_ty, .error_union => break :err_set cur_ty.errorUnionSet(zcu), .optional => cur_ty = cur_ty.optionalChild(zcu), - else => return sema.failWithOwnedErrorMsg(block, msg: { - const msg = try sema.errMsg(src, "expected '{}', found error set", .{sema.fn_ret_ty.fmt(pt)}); - errdefer msg.destroy(sema.gpa); - const ret_ty_src: LazySrcLoc = .{ - .base_node_inst = sema.getOwnerFuncDeclInst(), - .offset = .{ .node_offset_fn_type_ret_ty = 0 }, - }; - try sema.errNote(ret_ty_src, msg, "function cannot return an error", .{}); - break :msg msg; - }), + else => { + // This function cannot return an error. + // `try` is still valid if the error case is impossible, i.e. no error is returned. + // So, the result type has an error set of `error{}`. + break :err_set .fromInterned(try zcu.intern_pool.getErrorSetType(zcu.gpa, pt.tid, &.{})); + }, } } }; diff --git a/test/behavior/try.zig b/test/behavior/try.zig index f17133fabee3..3e66582aaa19 100644 --- a/test/behavior/try.zig +++ b/test/behavior/try.zig @@ -86,3 +86,40 @@ test "try forwards result location" { try expect((S.foo(false) catch return error.TestUnexpectedResult) == 123); try std.testing.expectError(error.Foo, S.foo(true)); } + +test "'return try' of empty error set in function returning non-error" { + if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + + const S = struct { + fn succeed0() error{}!u32 { + return 123; + } + fn succeed1() !u32 { + return 456; + } + fn tryNoError0() u32 { + return try succeed0(); + } + fn tryNoError1() u32 { + return try succeed1(); + } + fn tryNoError2() u32 { + const e: error{}!u32 = 789; + return try e; + } + fn doTheTest() !void { + const res0 = tryNoError0(); + const res1 = tryNoError1(); + const res2 = tryNoError2(); + try expect(res0 == 123); + try expect(res1 == 456); + try expect(res2 == 789); + } + }; + try S.doTheTest(); + try comptime S.doTheTest(); +} From 258236ec1bbfa72555189d87db42e57e1f74be3c Mon Sep 17 00:00:00 2001 From: mlugg Date: Sun, 15 Sep 2024 13:25:18 +0100 Subject: [PATCH 148/202] Sema: don't emit instruction when casting @min/@max result to OPV type Resolves: #21408 --- src/Sema.zig | 4 ++++ test/behavior/maximum_minimum.zig | 14 ++++++++++++++ 2 files changed, 18 insertions(+) diff --git a/src/Sema.zig b/src/Sema.zig index 433429f92a95..7a4b9b3210b8 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -26201,6 +26201,10 @@ fn analyzeMinMax( .child = refined_scalar_ty.toIntern(), }) else refined_scalar_ty; + if (try sema.typeHasOnePossibleValue(refined_ty)) |opv| { + return Air.internedToRef(opv.toIntern()); + } + if (!refined_ty.eql(unrefined_ty, zcu)) { // We've reduced the type - cast the result down return block.addTyOp(.intcast, refined_ty, cur_minmax.?); diff --git a/test/behavior/maximum_minimum.zig b/test/behavior/maximum_minimum.zig index ab1803c5b163..8994d9d18241 100644 --- a/test/behavior/maximum_minimum.zig +++ b/test/behavior/maximum_minimum.zig @@ -336,3 +336,17 @@ test "@min/@max of signed and unsigned runtime integers" { try expectEqual(x, @min(x, y)); try expectEqual(y, @max(x, y)); } + +test "@min resulting in u0" { + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + + const S = struct { + fn min(a: u0, b: u8) u8 { + return @min(a, b); + } + }; + const x = S.min(0, 1); + try expect(x == 0); +} From 5d7fa5513f92a43a418e3c5c4d27f0b61db313ff Mon Sep 17 00:00:00 2001 From: mlugg Date: Wed, 11 Sep 2024 09:06:54 +0100 Subject: [PATCH 149/202] std.Build: allow packages to expose arbitrary LazyPaths by name --- lib/std/Build.zig | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/lib/std/Build.zig b/lib/std/Build.zig index 236ac16c476b..dbd96441b1a1 100644 --- a/lib/std/Build.zig +++ b/lib/std/Build.zig @@ -89,6 +89,7 @@ dep_prefix: []const u8 = "", modules: std.StringArrayHashMap(*Module), named_writefiles: std.StringArrayHashMap(*Step.WriteFile), +named_lazy_paths: std.StringArrayHashMap(LazyPath), /// A map from build root dirs to the corresponding `*Dependency`. This is shared with all child /// `Build`s. initialized_deps: *InitializedDepMap, @@ -300,8 +301,9 @@ pub fn create( .install_path = undefined, .args = null, .host = graph.host, - .modules = std.StringArrayHashMap(*Module).init(arena), - .named_writefiles = std.StringArrayHashMap(*Step.WriteFile).init(arena), + .modules = .init(arena), + .named_writefiles = .init(arena), + .named_lazy_paths = .init(arena), .initialized_deps = initialized_deps, .pkg_hash = "", .available_deps = available_deps, @@ -393,8 +395,9 @@ fn createChildOnly( .glibc_runtimes_dir = parent.glibc_runtimes_dir, .host = parent.host, .dep_prefix = parent.fmt("{s}{s}.", .{ parent.dep_prefix, dep_name }), - .modules = std.StringArrayHashMap(*Module).init(allocator), - .named_writefiles = std.StringArrayHashMap(*Step.WriteFile).init(allocator), + .modules = .init(allocator), + .named_writefiles = .init(allocator), + .named_lazy_paths = .init(allocator), .initialized_deps = parent.initialized_deps, .pkg_hash = pkg_hash, .available_deps = pkg_deps, @@ -1060,6 +1063,10 @@ pub fn addNamedWriteFiles(b: *Build, name: []const u8) *Step.WriteFile { return wf; } +pub fn addNamedLazyPath(b: *Build, name: []const u8, lp: LazyPath) void { + b.named_lazy_paths.put(b.dupe(name), lp.dupe(b)) catch @panic("OOM"); +} + pub fn addWriteFiles(b: *Build) *Step.WriteFile { return Step.WriteFile.create(b); } @@ -1902,6 +1909,12 @@ pub const Dependency = struct { }; } + pub fn namedLazyPath(d: *Dependency, name: []const u8) LazyPath { + return d.builder.named_lazy_paths.get(name) orelse { + panic("unable to find named lazypath '{s}'", .{name}); + }; + } + pub fn path(d: *Dependency, sub_path: []const u8) LazyPath { return .{ .dependency = .{ From 1365be5d02429dbbbfab43133d399b2af42047c5 Mon Sep 17 00:00:00 2001 From: mlugg Date: Sun, 15 Sep 2024 15:45:20 +0100 Subject: [PATCH 150/202] compiler: provide correct result types to `+=` and `-=` Resolves: #21341 --- lib/std/zig/AstGen.zig | 22 ++++++++++++++++++++-- lib/std/zig/Zir.zig | 10 ++++++++++ src/Sema.zig | 28 ++++++++++++++++++++++++++++ src/print_zir.zig | 7 +++++++ test/behavior/pointers.zig | 15 +++++++++++++++ 5 files changed, 80 insertions(+), 2 deletions(-) diff --git a/lib/std/zig/AstGen.zig b/lib/std/zig/AstGen.zig index c00ea3ff0fbb..14902a6726c0 100644 --- a/lib/std/zig/AstGen.zig +++ b/lib/std/zig/AstGen.zig @@ -3785,8 +3785,26 @@ fn assignOp( else => undefined, }; const lhs = try gz.addUnNode(.load, lhs_ptr, infix_node); - const lhs_type = try gz.addUnNode(.typeof, lhs, infix_node); - const rhs = try expr(gz, scope, .{ .rl = .{ .coerced_ty = lhs_type } }, node_datas[infix_node].rhs); + + const rhs_res_ty = switch (op_inst_tag) { + .add, + .sub, + => try gz.add(.{ + .tag = .extended, + .data = .{ .extended = .{ + .opcode = .inplace_arith_result_ty, + .small = @intFromEnum(@as(Zir.Inst.InplaceOp, switch (op_inst_tag) { + .add => .add_eq, + .sub => .sub_eq, + else => unreachable, + })), + .operand = @intFromEnum(lhs), + } }, + }), + else => try gz.addUnNode(.typeof, lhs, infix_node), // same as LHS type + }; + // Not `coerced_ty` since `add`/etc won't coerce to this type. + const rhs = try expr(gz, scope, .{ .rl = .{ .ty = rhs_res_ty } }, node_datas[infix_node].rhs); switch (op_inst_tag) { .add, .sub, .mul, .div, .mod_rem => { diff --git a/lib/std/zig/Zir.zig b/lib/std/zig/Zir.zig index 8f86dabb5d04..dda7e7bbd01c 100644 --- a/lib/std/zig/Zir.zig +++ b/lib/std/zig/Zir.zig @@ -2086,6 +2086,10 @@ pub const Inst = struct { /// `operand` is payload index to `UnNode`. /// `small` is unused. branch_hint, + /// Compute the result type for in-place arithmetic, e.g. `+=`. + /// `operand` is `Zir.Inst.Ref` of the loaded LHS (*not* its type). + /// `small` is an `Inst.InplaceOp`. + inplace_arith_result_ty, pub const InstData = struct { opcode: Extended, @@ -3188,6 +3192,11 @@ pub const Inst = struct { calling_convention_inline, }; + pub const InplaceOp = enum(u16) { + add_eq, + sub_eq, + }; + /// Trailing: /// 0. tag_type: Ref, // if has_tag_type /// 1. captures_len: u32, // if has_captures_len @@ -4032,6 +4041,7 @@ fn findDeclsInner( .field_parent_ptr, .builtin_value, .branch_hint, + .inplace_arith_result_ty, => return, // `@TypeOf` has a body. diff --git a/src/Sema.zig b/src/Sema.zig index 7a4b9b3210b8..b859b8a183b5 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -1361,6 +1361,7 @@ fn analyzeBodyInner( .value_placeholder => unreachable, // never appears in a body .field_parent_ptr => try sema.zirFieldParentPtr(block, extended), .builtin_value => try sema.zirBuiltinValue(extended), + .inplace_arith_result_ty => try sema.zirInplaceArithResultTy(extended), }; }, @@ -27342,6 +27343,33 @@ fn zirBuiltinValue(sema: *Sema, extended: Zir.Inst.Extended.InstData) CompileErr return Air.internedToRef(ty.toIntern()); } +fn zirInplaceArithResultTy(sema: *Sema, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref { + const pt = sema.pt; + const zcu = pt.zcu; + + const lhs = try sema.resolveInst(@enumFromInt(extended.operand)); + const lhs_ty = sema.typeOf(lhs); + + const op: Zir.Inst.InplaceOp = @enumFromInt(extended.small); + const ty: Type = switch (op) { + .add_eq => ty: { + const ptr_size = lhs_ty.ptrSizeOrNull(zcu) orelse break :ty lhs_ty; + switch (ptr_size) { + .One, .Slice => break :ty lhs_ty, // invalid, let it error + .Many, .C => break :ty .usize, // `[*]T + usize` + } + }, + .sub_eq => ty: { + const ptr_size = lhs_ty.ptrSizeOrNull(zcu) orelse break :ty lhs_ty; + switch (ptr_size) { + .One, .Slice => break :ty lhs_ty, // invalid, let it error + .Many, .C => break :ty .generic_poison, // could be `[*]T - [*]T` or `[*]T - usize` + } + }, + }; + return Air.internedToRef(ty.toIntern()); +} + fn zirBranchHint(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!void { const pt = sema.pt; const zcu = pt.zcu; diff --git a/src/print_zir.zig b/src/print_zir.zig index f5c83c98e261..c5f1517edd60 100644 --- a/src/print_zir.zig +++ b/src/print_zir.zig @@ -620,6 +620,7 @@ const Writer = struct { .closure_get => try self.writeClosureGet(stream, extended), .field_parent_ptr => try self.writeFieldParentPtr(stream, extended), .builtin_value => try self.writeBuiltinValue(stream, extended), + .inplace_arith_result_ty => try self.writeInplaceArithResultTy(stream, extended), } } @@ -2781,6 +2782,12 @@ const Writer = struct { try self.writeSrcNode(stream, @bitCast(extended.operand)); } + fn writeInplaceArithResultTy(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void { + const op: Zir.Inst.InplaceOp = @enumFromInt(extended.small); + try self.writeInstRef(stream, @enumFromInt(extended.operand)); + try stream.print(", {s}))", .{@tagName(op)}); + } + fn writeInstRef(self: *Writer, stream: anytype, ref: Zir.Inst.Ref) !void { if (ref == .none) { return stream.writeAll(".none"); diff --git a/test/behavior/pointers.zig b/test/behavior/pointers.zig index 07041e70a129..bf0d37cc2bb3 100644 --- a/test/behavior/pointers.zig +++ b/test/behavior/pointers.zig @@ -98,6 +98,21 @@ test "pointer subtraction" { } } +test "pointer arithmetic with non-trivial RHS" { + var t: bool = undefined; + t = true; + + var ptr: [*]const u8 = "Hello, World!"; + ptr += if (t) 5 else 2; + try expect(ptr[0] == ','); + ptr += if (!t) 4 else 2; + try expect(ptr[0] == 'W'); + ptr -= if (t) @as(usize, 6) else 3; + try expect(ptr[0] == 'e'); + ptr -= if (!t) @as(usize, 0) else 1; + try expect(ptr[0] == 'H'); +} + test "double pointer parsing" { comptime assert(PtrOf(PtrOf(i32)) == **i32); } From 7f60d2e4658ad78839ce0fce63a95dbcb893a256 Mon Sep 17 00:00:00 2001 From: David Rubin Date: Sun, 15 Sep 2024 14:41:00 -0700 Subject: [PATCH 151/202] riscv: fix up `ptr_elem_val` to not doubly lock --- src/arch/riscv64/CodeGen.zig | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 0c6da840ebf0..73fa0460de80 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -3897,7 +3897,7 @@ fn airArrayElemVal(func: *Func, inst: Air.Inst.Index) !void { if (array_ty.isVector(zcu)) { // we need to load the vector, vslidedown to get the element we want - // and store that element at in a load frame. + // and store that element in a load frame. const src_reg, const src_lock = try func.allocReg(.vector); defer func.register_manager.unlockReg(src_lock); @@ -3970,12 +3970,15 @@ fn airPtrElemVal(func: *Func, inst: Air.Inst.Index) !void { }; defer if (index_lock) |lock| func.register_manager.unlockReg(lock); - const elem_ptr_reg = if (base_ptr_mcv.isRegister() and func.liveness.operandDies(inst, 0)) - base_ptr_mcv.register - else - try func.copyToTmpRegister(base_ptr_ty, base_ptr_mcv); - const elem_ptr_lock = func.register_manager.lockRegAssumeUnused(elem_ptr_reg); - defer func.register_manager.unlockReg(elem_ptr_lock); + const elem_ptr_reg, const elem_ptr_lock = if (base_ptr_mcv.isRegister() and + func.liveness.operandDies(inst, 0)) + .{ base_ptr_mcv.register, null } + else blk: { + const reg, const lock = try func.allocReg(.int); + try func.genSetReg(base_ptr_ty, reg, base_ptr_mcv); + break :blk .{ reg, lock }; + }; + defer if (elem_ptr_lock) |lock| func.register_manager.unlockReg(lock); try func.genBinOp( .ptr_add, From a5c922179f99591d20e5b6b203c7e292692e0c28 Mon Sep 17 00:00:00 2001 From: mlugg Date: Sun, 15 Sep 2024 19:17:06 +0100 Subject: [PATCH 152/202] Sema: return undefined on comparison of runtime value against undefined Resolves: #10703 Resolves: #17798 --- src/Sema.zig | 128 +++++++++--------- ...nst_undefined_produces_undefined_value.zig | 9 +- 2 files changed, 69 insertions(+), 68 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index 7a4b9b3210b8..02b85243ec26 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -17910,12 +17910,15 @@ fn cmpSelf( const pt = sema.pt; const zcu = pt.zcu; const resolved_type = sema.typeOf(casted_lhs); - const runtime_src: LazySrcLoc = src: { - if (try sema.resolveValue(casted_lhs)) |lhs_val| { - if (lhs_val.isUndef(zcu)) return pt.undefRef(Type.bool); - if (try sema.resolveValue(casted_rhs)) |rhs_val| { - if (rhs_val.isUndef(zcu)) return pt.undefRef(Type.bool); + const maybe_lhs_val = try sema.resolveValue(casted_lhs); + const maybe_rhs_val = try sema.resolveValue(casted_rhs); + if (maybe_lhs_val) |v| if (v.isUndef(zcu)) return pt.undefRef(Type.bool); + if (maybe_rhs_val) |v| if (v.isUndef(zcu)) return pt.undefRef(Type.bool); + + const runtime_src: LazySrcLoc = src: { + if (maybe_lhs_val) |lhs_val| { + if (maybe_rhs_val) |rhs_val| { if (resolved_type.zigTypeTag(zcu) == .vector) { const cmp_val = try sema.compareVector(lhs_val, op, rhs_val, resolved_type); return Air.internedToRef(cmp_val.toIntern()); @@ -17936,8 +17939,7 @@ fn cmpSelf( // For bools, we still check the other operand, because we can lower // bool eq/neq more efficiently. if (resolved_type.zigTypeTag(zcu) == .bool) { - if (try sema.resolveValue(casted_rhs)) |rhs_val| { - if (rhs_val.isUndef(zcu)) return pt.undefRef(Type.bool); + if (maybe_rhs_val) |rhs_val| { return sema.runtimeBoolCmp(block, src, op, casted_lhs, rhs_val.toBool(), lhs_src); } } @@ -33809,51 +33811,47 @@ fn cmpNumeric( else uncasted_rhs; - const runtime_src: LazySrcLoc = src: { - if (try sema.resolveValue(lhs)) |lhs_val| { - if (try sema.resolveValue(rhs)) |rhs_val| { - // Compare ints: const vs. undefined (or vice versa) - if (!lhs_val.isUndef(zcu) and (lhs_ty.isInt(zcu) or lhs_ty_tag == .comptime_int) and rhs_ty.isInt(zcu) and rhs_val.isUndef(zcu)) { - if (try sema.compareIntsOnlyPossibleResult(try sema.resolveLazyValue(lhs_val), op, rhs_ty)) |res| { - return if (res) .bool_true else .bool_false; - } - } else if (!rhs_val.isUndef(zcu) and (rhs_ty.isInt(zcu) or rhs_ty_tag == .comptime_int) and lhs_ty.isInt(zcu) and lhs_val.isUndef(zcu)) { - if (try sema.compareIntsOnlyPossibleResult(try sema.resolveLazyValue(rhs_val), op.reverse(), lhs_ty)) |res| { - return if (res) .bool_true else .bool_false; - } - } + const maybe_lhs_val = try sema.resolveValue(lhs); + const maybe_rhs_val = try sema.resolveValue(rhs); - if (lhs_val.isUndef(zcu) or rhs_val.isUndef(zcu)) { - return pt.undefRef(Type.bool); - } - if (lhs_val.isNan(zcu) or rhs_val.isNan(zcu)) { - return if (op == std.math.CompareOperator.neq) .bool_true else .bool_false; - } - return if (try Value.compareHeteroSema(lhs_val, op, rhs_val, pt)) - .bool_true - else - .bool_false; - } else { - if (!lhs_val.isUndef(zcu) and (lhs_ty.isInt(zcu) or lhs_ty_tag == .comptime_int) and rhs_ty.isInt(zcu)) { - // Compare ints: const vs. var - if (try sema.compareIntsOnlyPossibleResult(try sema.resolveLazyValue(lhs_val), op, rhs_ty)) |res| { - return if (res) .bool_true else .bool_false; - } - } - break :src rhs_src; + // If the LHS is const, check if there is a guaranteed result which does not depend on ths RHS. + if (maybe_lhs_val) |lhs_val| { + // Result based on comparison exceeding type bounds + if (!lhs_val.isUndef(zcu) and (lhs_ty.isInt(zcu) or lhs_ty_tag == .comptime_int) and rhs_ty.isInt(zcu)) { + if (try sema.compareIntsOnlyPossibleResult(try sema.resolveLazyValue(lhs_val), op, rhs_ty)) |res| { + return if (res) .bool_true else .bool_false; } - } else { - if (try sema.resolveValueResolveLazy(rhs)) |rhs_val| { - if (!rhs_val.isUndef(zcu) and (rhs_ty.isInt(zcu) or rhs_ty_tag == .comptime_int) and lhs_ty.isInt(zcu)) { - // Compare ints: var vs. const - if (try sema.compareIntsOnlyPossibleResult(try sema.resolveLazyValue(rhs_val), op.reverse(), lhs_ty)) |res| { - return if (res) .bool_true else .bool_false; - } - } + } + // Result based on NaN comparison + if (lhs_val.isNan(zcu)) { + return if (op == .neq) .bool_true else .bool_false; + } + } + + // If the RHS is const, check if there is a guaranteed result which does not depend on ths LHS. + if (maybe_rhs_val) |rhs_val| { + // Result based on comparison exceeding type bounds + if (!rhs_val.isUndef(zcu) and (rhs_ty.isInt(zcu) or rhs_ty_tag == .comptime_int) and lhs_ty.isInt(zcu)) { + if (try sema.compareIntsOnlyPossibleResult(try sema.resolveLazyValue(rhs_val), op.reverse(), lhs_ty)) |res| { + return if (res) .bool_true else .bool_false; } - break :src lhs_src; } - }; + // Result based on NaN comparison + if (rhs_val.isNan(zcu)) { + return if (op == .neq) .bool_true else .bool_false; + } + } + + // Any other comparison depends on both values, so the result is undef if either is undef. + if (maybe_lhs_val) |v| if (v.isUndef(zcu)) return pt.undefRef(Type.bool); + if (maybe_rhs_val) |v| if (v.isUndef(zcu)) return pt.undefRef(Type.bool); + + const runtime_src: LazySrcLoc = if (maybe_lhs_val) |lhs_val| rs: { + if (maybe_rhs_val) |rhs_val| { + const res = try Value.compareHeteroSema(lhs_val, op, rhs_val, pt); + return if (res) .bool_true else .bool_false; + } else break :rs rhs_src; + } else lhs_src; // TODO handle comparisons against lazy zero values // Some values can be compared against zero without being runtime-known or without forcing @@ -34161,21 +34159,17 @@ fn cmpVector( .child = .bool_type, }); - const runtime_src: LazySrcLoc = src: { - if (try sema.resolveValue(casted_lhs)) |lhs_val| { - if (try sema.resolveValue(casted_rhs)) |rhs_val| { - if (lhs_val.isUndef(zcu) or rhs_val.isUndef(zcu)) { - return pt.undefRef(result_ty); - } - const cmp_val = try sema.compareVector(lhs_val, op, rhs_val, resolved_ty); - return Air.internedToRef(cmp_val.toIntern()); - } else { - break :src rhs_src; - } - } else { - break :src lhs_src; - } - }; + const maybe_lhs_val = try sema.resolveValue(casted_lhs); + const maybe_rhs_val = try sema.resolveValue(casted_rhs); + if (maybe_lhs_val) |v| if (v.isUndef(zcu)) return pt.undefRef(result_ty); + if (maybe_rhs_val) |v| if (v.isUndef(zcu)) return pt.undefRef(result_ty); + + const runtime_src: LazySrcLoc = if (maybe_lhs_val) |lhs_val| src: { + if (maybe_rhs_val) |rhs_val| { + const cmp_val = try sema.compareVector(lhs_val, op, rhs_val, resolved_ty); + return Air.internedToRef(cmp_val.toIntern()); + } else break :src rhs_src; + } else lhs_src; try sema.requireRuntimeBlock(block, src, runtime_src); return block.addCmpVector(casted_lhs, casted_rhs, op); @@ -38662,8 +38656,12 @@ fn compareVector( for (result_data, 0..) |*scalar, i| { const lhs_elem = try lhs.elemValue(pt, i); const rhs_elem = try rhs.elemValue(pt, i); - const res_bool = try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(zcu)); - scalar.* = Value.makeBool(res_bool).toIntern(); + if (lhs_elem.isUndef(zcu) or rhs_elem.isUndef(zcu)) { + scalar.* = try pt.intern(.{ .undef = .bool_type }); + } else { + const res_bool = try sema.compareScalar(lhs_elem, op, rhs_elem, ty.scalarType(zcu)); + scalar.* = Value.makeBool(res_bool).toIntern(); + } } return Value.fromInterned(try pt.intern(.{ .aggregate = .{ .ty = (try pt.vectorType(.{ .len = ty.vectorLen(zcu), .child = .bool_type })).toIntern(), diff --git a/test/cases/compile_errors/comparing_against_undefined_produces_undefined_value.zig b/test/cases/compile_errors/comparing_against_undefined_produces_undefined_value.zig index d98741b80d9c..8c2d56a0b42b 100644 --- a/test/cases/compile_errors/comparing_against_undefined_produces_undefined_value.zig +++ b/test/cases/compile_errors/comparing_against_undefined_produces_undefined_value.zig @@ -1,9 +1,12 @@ -export fn entry() void { +export fn foo() void { if (2 == undefined) {} } +export fn bar(x: u32) void { + if (x == undefined) {} +} + // error -// backend=stage2 -// target=native // // :2:11: error: use of undefined value here causes undefined behavior +// :6:11: error: use of undefined value here causes undefined behavior From 812557bfde3c577b5f00cb556201c71ad5ed6fa4 Mon Sep 17 00:00:00 2001 From: Jay Petacat Date: Mon, 9 Sep 2024 22:23:18 -0600 Subject: [PATCH 153/202] std: Restore conventional `compareFn` behavior for `binarySearch` PR #20927 made some improvements to the `binarySearch` API, but one change I found surprising was the relationship between the left-hand and right-hand parameters of `compareFn` was inverted. This is different from how comparison functions typically behave, both in other parts of Zig (e.g. `std.math.order`) and in other languages (e.g. C's `bsearch`). Unless a strong reason can be identified and documented for doing otherwise, I think it'll be better to stick with convention. While writing this patch and changing things back to the way they were, the predicates of `lowerBound` and `upperBound` seemed to be the only areas that benefited from the inversion. I don't think that benefit is worth the cost, personally. Calling `Order.invert()` in the predicates accomplishes the same goal. --- lib/compiler/aro/aro/Preprocessor.zig | 2 +- lib/std/debug/Coverage.zig | 2 +- lib/std/debug/Dwarf.zig | 2 +- lib/std/debug/SelfInfo.zig | 4 +-- lib/std/sort.zig | 52 +++++++++++++-------------- 5 files changed, 30 insertions(+), 32 deletions(-) diff --git a/lib/compiler/aro/aro/Preprocessor.zig b/lib/compiler/aro/aro/Preprocessor.zig index a8eb74a4a8c7..5a03cc0dff01 100644 --- a/lib/compiler/aro/aro/Preprocessor.zig +++ b/lib/compiler/aro/aro/Preprocessor.zig @@ -271,7 +271,7 @@ fn clearBuffers(pp: *Preprocessor) void { pub fn expansionSlice(pp: *Preprocessor, tok: Tree.TokenIndex) []Source.Location { const S = struct { fn orderTokenIndex(context: Tree.TokenIndex, item: Tree.TokenIndex) std.math.Order { - return std.math.order(item, context); + return std.math.order(context, item); } }; diff --git a/lib/std/debug/Coverage.zig b/lib/std/debug/Coverage.zig index 2d0e0546734d..58e600dc6370 100644 --- a/lib/std/debug/Coverage.zig +++ b/lib/std/debug/Coverage.zig @@ -196,7 +196,7 @@ pub fn resolveAddressesDwarf( const table_addrs = slc.line_table.keys(); line_table_i = std.sort.upperBound(u64, table_addrs, pc, struct { fn order(context: u64, item: u64) std.math.Order { - return std.math.order(item, context); + return std.math.order(context, item); } }.order); } diff --git a/lib/std/debug/Dwarf.zig b/lib/std/debug/Dwarf.zig index d36e4f961d05..73b1871c4683 100644 --- a/lib/std/debug/Dwarf.zig +++ b/lib/std/debug/Dwarf.zig @@ -182,7 +182,7 @@ pub const CompileUnit = struct { pub fn findSource(slc: *const SrcLocCache, address: u64) !LineEntry { const index = std.sort.upperBound(u64, slc.line_table.keys(), address, struct { fn order(context: u64, item: u64) std.math.Order { - return std.math.order(item, context); + return std.math.order(context, item); } }.order); if (index == 0) return missing(); diff --git a/lib/std/debug/SelfInfo.zig b/lib/std/debug/SelfInfo.zig index 76c0505e9675..5e9afc8cfd4b 100644 --- a/lib/std/debug/SelfInfo.zig +++ b/lib/std/debug/SelfInfo.zig @@ -1624,12 +1624,12 @@ pub fn unwindFrameDwarf( } else { const index = std.sort.binarySearch(Dwarf.FrameDescriptionEntry, di.fde_list.items, context.pc, struct { pub fn compareFn(pc: usize, item: Dwarf.FrameDescriptionEntry) std.math.Order { - if (pc < item.pc_begin) return .gt; + if (pc < item.pc_begin) return .lt; const range_end = item.pc_begin + item.pc_range; if (pc < range_end) return .eq; - return .lt; + return .gt; } }.compareFn); diff --git a/lib/std/sort.zig b/lib/std/sort.zig index 9dad2949bfdf..23707f138591 100644 --- a/lib/std/sort.zig +++ b/lib/std/sort.zig @@ -461,8 +461,8 @@ pub fn binarySearch( const mid = low + (high - low) / 2; switch (compareFn(context, items[mid])) { .eq => return mid, - .lt => low = mid + 1, // item too small - .gt => high = mid, // item too big + .gt => low = mid + 1, + .lt => high = mid, } } return null; @@ -471,13 +471,13 @@ pub fn binarySearch( test binarySearch { const S = struct { fn orderU32(context: u32, item: u32) std.math.Order { - return std.math.order(item, context); + return std.math.order(context, item); } fn orderI32(context: i32, item: i32) std.math.Order { - return std.math.order(item, context); + return std.math.order(context, item); } fn orderLength(context: usize, item: []const u8) std.math.Order { - return std.math.order(item.len, context); + return std.math.order(context, item.len); } }; const R = struct { @@ -489,9 +489,9 @@ test binarySearch { } fn order(context: i32, item: @This()) std.math.Order { - if (item.e < context) { + if (context < item.b) { return .lt; - } else if (item.b > context) { + } else if (context > item.e) { return .gt; } else { return .eq; @@ -513,9 +513,8 @@ test binarySearch { try std.testing.expectEqual(2, binarySearch([]const u8, &[_][]const u8{ "", "abc", "1234", "vwxyz" }, @as(usize, 4), S.orderLength)); } -/// Returns the index of the first element in `items` returning `.eq` or `.gt` -/// when given to `compareFn`. -/// - Returns `items.len` if all elements return `.lt`. +/// Returns the index of the first element in `items` that is greater than or equal to `context`, +/// as determined by `compareFn`. If no such element exists, returns `items.len`. /// /// `items` must be sorted in ascending order with respect to `compareFn`: /// ``` @@ -540,7 +539,7 @@ pub fn lowerBound( ) usize { const S = struct { fn predicate(ctx: @TypeOf(context), item: T) bool { - return compareFn(ctx, item) == .lt; + return compareFn(ctx, item).invert() == .lt; } }; return partitionPoint(T, items, context, S.predicate); @@ -549,13 +548,13 @@ pub fn lowerBound( test lowerBound { const S = struct { fn compareU32(context: u32, item: u32) std.math.Order { - return std.math.order(item, context); + return std.math.order(context, item); } fn compareI32(context: i32, item: i32) std.math.Order { - return std.math.order(item, context); + return std.math.order(context, item); } fn compareF32(context: f32, item: f32) std.math.Order { - return std.math.order(item, context); + return std.math.order(context, item); } }; const R = struct { @@ -566,7 +565,7 @@ test lowerBound { } fn compareFn(context: i32, item: @This()) std.math.Order { - return std.math.order(item.val, context); + return std.math.order(context, item.val); } }; @@ -584,9 +583,8 @@ test lowerBound { try std.testing.expectEqual(2, lowerBound(R, &[_]R{ R.r(-100), R.r(-40), R.r(-10), R.r(30) }, @as(i32, -20), R.compareFn)); } -/// Returns the index of the first element in `items` returning `.gt` -/// when given to `compareFn`. -/// - Returns `items.len` if none of the elements return `.gt`. +/// Returns the index of the first element in `items` that is greater than `context`, as determined +/// by `compareFn`. If no such element exists, returns `items.len`. /// /// `items` must be sorted in ascending order with respect to `compareFn`: /// ``` @@ -611,7 +609,7 @@ pub fn upperBound( ) usize { const S = struct { fn predicate(ctx: @TypeOf(context), item: T) bool { - return compareFn(ctx, item) != .gt; + return compareFn(ctx, item).invert() != .gt; } }; return partitionPoint(T, items, context, S.predicate); @@ -620,13 +618,13 @@ pub fn upperBound( test upperBound { const S = struct { fn compareU32(context: u32, item: u32) std.math.Order { - return std.math.order(item, context); + return std.math.order(context, item); } fn compareI32(context: i32, item: i32) std.math.Order { - return std.math.order(item, context); + return std.math.order(context, item); } fn compareF32(context: f32, item: f32) std.math.Order { - return std.math.order(item, context); + return std.math.order(context, item); } }; const R = struct { @@ -637,7 +635,7 @@ test upperBound { } fn compareFn(context: i32, item: @This()) std.math.Order { - return std.math.order(item.val, context); + return std.math.order(context, item.val); } }; @@ -780,16 +778,16 @@ pub fn equalRange( test equalRange { const S = struct { fn orderU32(context: u32, item: u32) std.math.Order { - return std.math.order(item, context); + return std.math.order(context, item); } fn orderI32(context: i32, item: i32) std.math.Order { - return std.math.order(item, context); + return std.math.order(context, item); } fn orderF32(context: f32, item: f32) std.math.Order { - return std.math.order(item, context); + return std.math.order(context, item); } fn orderLength(context: usize, item: []const u8) std.math.Order { - return std.math.order(item.len, context); + return std.math.order(context, item.len); } }; From 4650e5b9fcaa74b724a51458f5cf8952f3c734de Mon Sep 17 00:00:00 2001 From: mlugg Date: Tue, 17 Sep 2024 11:00:38 +0100 Subject: [PATCH 154/202] Sema: clean up cmpNumeric There is one minor language change here, which is that comparisons of the form `comptime_inf < runtime_f32` have their results comptime-known. This is consistent with comparisons against comptime NaN for instance, which are always comptime known. A corresponding behavior test is added. This fixes a bug with int comparison elision which my previous commit somehow triggered. `Sema.compareIntsOnlyPossibleResult` is much cleaner now! --- src/Sema.zig | 168 +++++++++++++++-------------------------- src/Type.zig | 8 +- src/Value.zig | 4 +- test/behavior/math.zig | 62 +++++++++++++++ 4 files changed, 126 insertions(+), 116 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index 02b85243ec26..9687d28b33f2 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -33814,11 +33814,11 @@ fn cmpNumeric( const maybe_lhs_val = try sema.resolveValue(lhs); const maybe_rhs_val = try sema.resolveValue(rhs); - // If the LHS is const, check if there is a guaranteed result which does not depend on ths RHS. + // If the LHS is const, check if there is a guaranteed result which does not depend on ths RHS value. if (maybe_lhs_val) |lhs_val| { // Result based on comparison exceeding type bounds - if (!lhs_val.isUndef(zcu) and (lhs_ty.isInt(zcu) or lhs_ty_tag == .comptime_int) and rhs_ty.isInt(zcu)) { - if (try sema.compareIntsOnlyPossibleResult(try sema.resolveLazyValue(lhs_val), op, rhs_ty)) |res| { + if (!lhs_val.isUndef(zcu) and (lhs_ty_tag == .int or lhs_ty_tag == .comptime_int) and rhs_ty.isInt(zcu)) { + if (try sema.compareIntsOnlyPossibleResult(lhs_val, op, rhs_ty)) |res| { return if (res) .bool_true else .bool_false; } } @@ -33826,13 +33826,20 @@ fn cmpNumeric( if (lhs_val.isNan(zcu)) { return if (op == .neq) .bool_true else .bool_false; } + // Result based on inf comparison to int + if (lhs_val.isInf(zcu) and rhs_ty_tag == .int) return switch (op) { + .neq => .bool_true, + .eq => .bool_false, + .gt, .gte => if (lhs_val.isNegativeInf(zcu)) .bool_false else .bool_true, + .lt, .lte => if (lhs_val.isNegativeInf(zcu)) .bool_true else .bool_false, + }; } - // If the RHS is const, check if there is a guaranteed result which does not depend on ths LHS. + // If the RHS is const, check if there is a guaranteed result which does not depend on ths LHS value. if (maybe_rhs_val) |rhs_val| { // Result based on comparison exceeding type bounds - if (!rhs_val.isUndef(zcu) and (rhs_ty.isInt(zcu) or rhs_ty_tag == .comptime_int) and lhs_ty.isInt(zcu)) { - if (try sema.compareIntsOnlyPossibleResult(try sema.resolveLazyValue(rhs_val), op.reverse(), lhs_ty)) |res| { + if (!rhs_val.isUndef(zcu) and (rhs_ty_tag == .int or rhs_ty_tag == .comptime_int) and lhs_ty.isInt(zcu)) { + if (try sema.compareIntsOnlyPossibleResult(rhs_val, op.reverse(), lhs_ty)) |res| { return if (res) .bool_true else .bool_false; } } @@ -33840,6 +33847,13 @@ fn cmpNumeric( if (rhs_val.isNan(zcu)) { return if (op == .neq) .bool_true else .bool_false; } + // Result based on inf comparison to int + if (rhs_val.isInf(zcu) and lhs_ty_tag == .int) return switch (op) { + .neq => .bool_true, + .eq => .bool_false, + .gt, .gte => if (rhs_val.isNegativeInf(zcu)) .bool_true else .bool_false, + .lt, .lte => if (rhs_val.isNegativeInf(zcu)) .bool_false else .bool_true, + }; } // Any other comparison depends on both values, so the result is undef if either is undef. @@ -33889,17 +33903,18 @@ fn cmpNumeric( const casted_rhs = try sema.coerce(block, dest_ty, rhs, rhs_src); return block.addBinOp(Air.Inst.Tag.fromCmpOp(op, block.float_mode == .optimized), casted_lhs, casted_rhs); } + // For mixed unsigned integer sizes, implicit cast both operands to the larger integer. // For mixed signed and unsigned integers, implicit cast both operands to a signed // integer with + 1 bit. // For mixed floats and integers, extract the integer part from the float, cast that to // a signed integer with mantissa bits + 1, and if there was any non-integral part of the float, // add/subtract 1. - const lhs_is_signed = if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| + const lhs_is_signed = if (maybe_lhs_val) |lhs_val| !(try lhs_val.compareAllWithZeroSema(.gte, pt)) else (lhs_ty.isRuntimeFloat() or lhs_ty.isSignedInt(zcu)); - const rhs_is_signed = if (try sema.resolveDefinedValue(block, rhs_src, rhs)) |rhs_val| + const rhs_is_signed = if (maybe_rhs_val) |rhs_val| !(try rhs_val.compareAllWithZeroSema(.gte, pt)) else (rhs_ty.isRuntimeFloat() or rhs_ty.isSignedInt(zcu)); @@ -33908,19 +33923,8 @@ fn cmpNumeric( var dest_float_type: ?Type = null; var lhs_bits: usize = undefined; - if (try sema.resolveValueResolveLazy(lhs)) |lhs_val| { - if (lhs_val.isUndef(zcu)) - return pt.undefRef(Type.bool); - if (lhs_val.isNan(zcu)) switch (op) { - .neq => return .bool_true, - else => return .bool_false, - }; - if (lhs_val.isInf(zcu)) switch (op) { - .neq => return .bool_true, - .eq => return .bool_false, - .gt, .gte => return if (lhs_val.isNegativeInf(zcu)) .bool_false else .bool_true, - .lt, .lte => return if (lhs_val.isNegativeInf(zcu)) .bool_true else .bool_false, - }; + if (maybe_lhs_val) |unresolved_lhs_val| { + const lhs_val = try sema.resolveLazyValue(unresolved_lhs_val); if (!rhs_is_signed) { switch (lhs_val.orderAgainstZero(zcu)) { .gt => {}, @@ -33966,19 +33970,8 @@ fn cmpNumeric( } var rhs_bits: usize = undefined; - if (try sema.resolveValueResolveLazy(rhs)) |rhs_val| { - if (rhs_val.isUndef(zcu)) - return pt.undefRef(Type.bool); - if (rhs_val.isNan(zcu)) switch (op) { - .neq => return .bool_true, - else => return .bool_false, - }; - if (rhs_val.isInf(zcu)) switch (op) { - .neq => return .bool_true, - .eq => return .bool_false, - .gt, .gte => return if (rhs_val.isNegativeInf(zcu)) .bool_true else .bool_false, - .lt, .lte => return if (rhs_val.isNegativeInf(zcu)) .bool_false else .bool_true, - }; + if (maybe_rhs_val) |unresolved_rhs_val| { + const rhs_val = try sema.resolveLazyValue(unresolved_rhs_val); if (!lhs_is_signed) { switch (rhs_val.orderAgainstZero(zcu)) { .gt => {}, @@ -34045,90 +34038,49 @@ fn compareIntsOnlyPossibleResult( lhs_val: Value, op: std.math.CompareOperator, rhs_ty: Type, -) Allocator.Error!?bool { +) SemaError!?bool { const pt = sema.pt; const zcu = pt.zcu; - const rhs_info = rhs_ty.intInfo(zcu); - const vs_zero = lhs_val.orderAgainstZeroSema(pt) catch unreachable; - const is_zero = vs_zero == .eq; - const is_negative = vs_zero == .lt; - const is_positive = vs_zero == .gt; - // Anything vs. zero-sized type has guaranteed outcome. - if (rhs_info.bits == 0) return switch (op) { - .eq, .lte, .gte => is_zero, - .neq, .lt, .gt => !is_zero, - }; + const min_rhs = try rhs_ty.minInt(pt, rhs_ty); + const max_rhs = try rhs_ty.maxInt(pt, rhs_ty); - // Special case for i1, which can only be 0 or -1. - // Zero and positive ints have guaranteed outcome. - if (rhs_info.bits == 1 and rhs_info.signedness == .signed) { - if (is_positive) return switch (op) { - .gt, .gte, .neq => true, - .lt, .lte, .eq => false, - }; - if (is_zero) return switch (op) { - .gte => true, - .lt => false, - .gt, .lte, .eq, .neq => null, - }; + if (min_rhs.toIntern() == max_rhs.toIntern()) { + // RHS is effectively comptime-known. + return try Value.compareHeteroSema(lhs_val, op, min_rhs, pt); } - // Negative vs. unsigned has guaranteed outcome. - if (rhs_info.signedness == .unsigned and is_negative) return switch (op) { - .eq, .gt, .gte => false, - .neq, .lt, .lte => true, - }; - - const sign_adj = @intFromBool(!is_negative and rhs_info.signedness == .signed); - const req_bits = lhs_val.intBitCountTwosComp(zcu) + sign_adj; - - // No sized type can have more than 65535 bits. - // The RHS type operand is either a runtime value or sized (but undefined) constant. - if (req_bits > 65535) return switch (op) { - .lt, .lte => is_negative, - .gt, .gte => is_positive, - .eq => false, - .neq => true, - }; - const fits = req_bits <= rhs_info.bits; + const against_min = try lhs_val.orderAdvanced(min_rhs, .sema, zcu, pt.tid); + const against_max = try lhs_val.orderAdvanced(max_rhs, .sema, zcu, pt.tid); - // Oversized int has guaranteed outcome. switch (op) { - .eq => return if (!fits) false else null, - .neq => return if (!fits) true else null, - .lt, .lte => if (!fits) return is_negative, - .gt, .gte => if (!fits) return !is_negative, + .eq => { + if (against_min.compare(.lt)) return false; + if (against_max.compare(.gt)) return false; + }, + .neq => { + if (against_min.compare(.lt)) return true; + if (against_max.compare(.gt)) return true; + }, + .lt => { + if (against_min.compare(.lt)) return true; + if (against_max.compare(.gte)) return false; + }, + .gt => { + if (against_max.compare(.gt)) return true; + if (against_min.compare(.lte)) return false; + }, + .lte => { + if (against_min.compare(.lte)) return true; + if (against_max.compare(.gt)) return false; + }, + .gte => { + if (against_max.compare(.gte)) return true; + if (against_min.compare(.lt)) return false; + }, } - // For any other comparison, we need to know if the LHS value is - // equal to the maximum or minimum possible value of the RHS type. - const is_min, const is_max = edge: { - if (is_zero and rhs_info.signedness == .unsigned) break :edge .{ true, false }; - - if (req_bits != rhs_info.bits) break :edge .{ false, false }; - - const ty = try pt.intType( - if (is_negative) .signed else .unsigned, - @intCast(req_bits), - ); - const pop_count = lhs_val.popCount(ty, zcu); - - if (is_negative) { - break :edge .{ pop_count == 1, false }; - } else { - break :edge .{ false, pop_count == req_bits - sign_adj }; - } - }; - - assert(fits); - return switch (op) { - .lt => if (is_max) false else null, - .lte => if (is_min) true else null, - .gt => if (is_min) false else null, - .gte => if (is_max) true else null, - .eq, .neq => unreachable, - }; + return null; } /// Asserts that lhs and rhs types are both vectors. diff --git a/src/Type.zig b/src/Type.zig index 2048fc852b72..0dfd12cb350f 100644 --- a/src/Type.zig +++ b/src/Type.zig @@ -3040,8 +3040,7 @@ pub fn minInt(ty: Type, pt: Zcu.PerThread, dest_ty: Type) !Value { pub fn minIntScalar(ty: Type, pt: Zcu.PerThread, dest_ty: Type) !Value { const zcu = pt.zcu; const info = ty.intInfo(zcu); - if (info.signedness == .unsigned) return pt.intValue(dest_ty, 0); - if (info.bits == 0) return pt.intValue(dest_ty, -1); + if (info.signedness == .unsigned or info.bits == 0) return pt.intValue(dest_ty, 0); if (std.math.cast(u6, info.bits - 1)) |shift| { const n = @as(i64, std.math.minInt(i64)) >> (63 - shift); @@ -3072,10 +3071,7 @@ pub fn maxIntScalar(ty: Type, pt: Zcu.PerThread, dest_ty: Type) !Value { const info = ty.intInfo(pt.zcu); switch (info.bits) { - 0 => return switch (info.signedness) { - .signed => try pt.intValue(dest_ty, -1), - .unsigned => try pt.intValue(dest_ty, 0), - }, + 0 => return pt.intValue(dest_ty, 0), 1 => return switch (info.signedness) { .signed => try pt.intValue(dest_ty, 0), .unsigned => try pt.intValue(dest_ty, 1), diff --git a/src/Value.zig b/src/Value.zig index 575a84f10397..d6e533429a47 100644 --- a/src/Value.zig +++ b/src/Value.zig @@ -191,7 +191,7 @@ pub fn toBigIntAdvanced( comptime strat: ResolveStrat, zcu: *Zcu, tid: strat.Tid(), -) Zcu.CompileError!BigIntConst { +) Zcu.SemaError!BigIntConst { const ip = &zcu.intern_pool; return switch (val.toIntern()) { .bool_false => BigIntMutable.init(&space.limbs, 0).toConst(), @@ -1038,7 +1038,7 @@ pub fn orderAgainstZeroInner( comptime strat: ResolveStrat, zcu: *Zcu, tid: strat.Tid(), -) Zcu.CompileError!std.math.Order { +) Zcu.SemaError!std.math.Order { return switch (lhs.toIntern()) { .bool_false => .eq, .bool_true => .gt, diff --git a/test/behavior/math.zig b/test/behavior/math.zig index 5ee07b9e9829..bbf87fc83409 100644 --- a/test/behavior/math.zig +++ b/test/behavior/math.zig @@ -1729,3 +1729,65 @@ test "@clz works on both vector and scalar inputs" { try std.testing.expectEqual(@as(u6, 31), a); try std.testing.expectEqual([_]u6{ 31, 31, 31, 31 }, b); } + +test "runtime comparison to NaN is comptime-known" { + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + if (builtin.cpu.arch.isArmOrThumb() and builtin.target.floatAbi() == .soft) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21234 + + const S = struct { + fn doTheTest(comptime F: type, x: F) void { + const nan = math.nan(F); + if (!(nan != x)) comptime unreachable; + if (nan == x) comptime unreachable; + if (nan > x) comptime unreachable; + if (nan < x) comptime unreachable; + if (nan >= x) comptime unreachable; + if (nan <= x) comptime unreachable; + } + }; + + S.doTheTest(f16, 123.0); + S.doTheTest(f32, 123.0); + S.doTheTest(f64, 123.0); + S.doTheTest(f128, 123.0); + comptime S.doTheTest(f16, 123.0); + comptime S.doTheTest(f32, 123.0); + comptime S.doTheTest(f64, 123.0); + comptime S.doTheTest(f128, 123.0); +} + +test "runtime int comparison to inf is comptime-known" { + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + if (builtin.cpu.arch.isArmOrThumb() and builtin.target.floatAbi() == .soft) return error.SkipZigTest; // https://github.com/ziglang/zig/issues/21234 + + const S = struct { + fn doTheTest(comptime F: type, x: u32) void { + const inf = math.inf(F); + if (!(inf != x)) comptime unreachable; + if (inf == x) comptime unreachable; + if (x > inf) comptime unreachable; + if (x >= inf) comptime unreachable; + if (!(x < inf)) comptime unreachable; + if (!(x <= inf)) comptime unreachable; + } + }; + + S.doTheTest(f16, 123); + S.doTheTest(f32, 123); + S.doTheTest(f64, 123); + S.doTheTest(f128, 123); + comptime S.doTheTest(f16, 123); + comptime S.doTheTest(f32, 123); + comptime S.doTheTest(f64, 123); + comptime S.doTheTest(f128, 123); +} From feaee2ba170766cc905a6aa9c799f3105cdc8145 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maciej=20=27vesim=27=20Kuli=C5=84ski?= Date: Wed, 18 Sep 2024 05:10:36 +0200 Subject: [PATCH 155/202] cc: Add support for -Wp, --- src/clang_options_data.zig | 2 +- src/main.zig | 34 ++++++++++++++++++++++++++++++++++ tools/update_clang_options.zig | 4 ++++ 3 files changed, 39 insertions(+), 1 deletion(-) diff --git a/src/clang_options_data.zig b/src/clang_options_data.zig index ad07b45500e8..f41a4513338d 100644 --- a/src/clang_options_data.zig +++ b/src/clang_options_data.zig @@ -7472,7 +7472,7 @@ joinpd1("mtp="), .{ .name = "Wp,", .syntax = .comma_joined, - .zig_equivalent = .other, + .zig_equivalent = .wp, .pd1 = true, .pd2 = false, .psl = false, diff --git a/src/main.zig b/src/main.zig index 4a7f47710684..95f1ad569b0f 100644 --- a/src/main.zig +++ b/src/main.zig @@ -1791,6 +1791,7 @@ fn buildOutputType( var c_out_mode: ?COutMode = null; var out_path: ?[]const u8 = null; var is_shared_lib = false; + var preprocessor_args = std.ArrayList([]const u8).init(arena); var linker_args = std.ArrayList([]const u8).init(arena); var it = ClangArgIterator.init(arena, all_args); var emit_llvm = false; @@ -1946,6 +1947,24 @@ fn buildOutputType( is_shared_lib = true; }, .rdynamic => create_module.opts.rdynamic = true, + .wp => { + var split_it = mem.splitScalar(u8, it.only_arg, ','); + while (split_it.next()) |preprocessor_arg| { + if (preprocessor_arg.len >= 3 and + preprocessor_arg[0] == '-' and + preprocessor_arg[2] != '-') + { + if (mem.indexOfScalar(u8, preprocessor_arg, '=')) |equals_pos| { + const key = preprocessor_arg[0..equals_pos]; + const value = preprocessor_arg[equals_pos + 1 ..]; + try preprocessor_args.append(key); + try preprocessor_args.append(value); + continue; + } + } + try preprocessor_args.append(preprocessor_arg); + } + }, .wl => { var split_it = mem.splitScalar(u8, it.only_arg, ','); while (split_it.next()) |linker_arg| { @@ -2554,6 +2573,20 @@ fn buildOutputType( } } + // Parse preprocessor args. + var preprocessor_args_it = ArgsIterator{ + .args = preprocessor_args.items, + }; + while (preprocessor_args_it.next()) |arg| { + if (mem.eql(u8, arg, "-MD") or mem.eql(u8, arg, "-MMD") or mem.eql(u8, arg, "-MT")) { + disable_c_depfile = true; + const cc_arg = try std.fmt.allocPrint(arena, "-Wp,{s},{s}", .{ arg, preprocessor_args_it.nextOrFatal() }); + try cc_argv.append(arena, cc_arg); + } else { + fatal("unsupported preprocessor arg: {s}", .{arg}); + } + } + if (mod_opts.sanitize_c) |wsc| { if (wsc and mod_opts.optimize_mode == .ReleaseFast) { mod_opts.optimize_mode = .ReleaseSafe; @@ -5771,6 +5804,7 @@ pub const ClangArgIterator = struct { shared, rdynamic, wl, + wp, preprocess_only, asm_only, optimize, diff --git a/tools/update_clang_options.zig b/tools/update_clang_options.zig index f21993fbd6fc..3d32e4c7e1b7 100644 --- a/tools/update_clang_options.zig +++ b/tools/update_clang_options.zig @@ -154,6 +154,10 @@ const known_options = [_]KnownOpt{ .name = "Wl,", .ident = "wl", }, + .{ + .name = "Wp,", + .ident = "wp", + }, .{ .name = "Xlinker", .ident = "for_linker", From 2111f4c38b4c91a2406da3a5cf578162c1cafc4d Mon Sep 17 00:00:00 2001 From: mlugg Date: Wed, 18 Sep 2024 17:57:54 +0100 Subject: [PATCH 156/202] Sema: mark export on owner nav when exporting function alias Resolves: #20847 --- src/Sema.zig | 22 +++++++++++----------- test/behavior/export_keyword.zig | 13 +++++++++++++ 2 files changed, 24 insertions(+), 11 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index fdf5eca76412..14a4061b00e6 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -6457,15 +6457,7 @@ fn zirExport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void if (ptr_info.byte_offset != 0) { return sema.fail(block, ptr_src, "TODO: export pointer in middle of value", .{}); } - try sema.ensureNavResolved(src, nav); - // Make sure to export the owner Nav if applicable. - const exported_nav = switch (ip.indexToKey(ip.getNav(nav).status.resolved.val)) { - .variable => |v| v.owner_nav, - .@"extern" => |e| e.owner_nav, - .func => |f| f.owner_nav, - else => nav, - }; - try sema.analyzeExport(block, src, options, exported_nav); + try sema.analyzeExport(block, src, options, nav); }, } } @@ -6475,7 +6467,7 @@ pub fn analyzeExport( block: *Block, src: LazySrcLoc, options: Zcu.Export.Options, - exported_nav_index: InternPool.Nav.Index, + orig_nav_index: InternPool.Nav.Index, ) !void { const gpa = sema.gpa; const pt = sema.pt; @@ -6485,7 +6477,15 @@ pub fn analyzeExport( if (options.linkage == .internal) return; - try sema.ensureNavResolved(src, exported_nav_index); + try sema.ensureNavResolved(src, orig_nav_index); + + const exported_nav_index = switch (ip.indexToKey(ip.getNav(orig_nav_index).status.resolved.val)) { + .variable => |v| v.owner_nav, + .@"extern" => |e| e.owner_nav, + .func => |f| f.owner_nav, + else => orig_nav_index, + }; + const exported_nav = ip.getNav(exported_nav_index); const export_ty = Type.fromInterned(exported_nav.typeOf(ip)); diff --git a/test/behavior/export_keyword.zig b/test/behavior/export_keyword.zig index 70839959d218..bb86c287ca9c 100644 --- a/test/behavior/export_keyword.zig +++ b/test/behavior/export_keyword.zig @@ -39,3 +39,16 @@ export fn testPackedStuff(a: *const PackedStruct, b: *const PackedUnion) void { b; } } + +test "export function alias" { + _ = struct { + fn foo_internal() callconv(.C) u32 { + return 123; + } + export const foo_exported = foo_internal; + }; + const Import = struct { + extern fn foo_exported() u32; + }; + try expect(Import.foo_exported() == 123); +} From 72fc1641786082cb82355dac5d357bc2398bd51e Mon Sep 17 00:00:00 2001 From: Linus Groh Date: Wed, 18 Sep 2024 23:33:23 +0100 Subject: [PATCH 157/202] std.os.linux: Fix tc_oflag_t for PowerPC std/os/linux.zig:6504:20: error: expected type 'os.linux.NLDLY__enum_11313', found 'comptime_int' --- lib/std/os/linux.zig | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig index 9039acedee9e..16db9db5e2cd 100644 --- a/lib/std/os/linux.zig +++ b/lib/std/os/linux.zig @@ -6501,12 +6501,12 @@ pub const tc_oflag_t = if (is_ppc) packed struct(tcflag_t) { ONLRET: bool = false, OFILL: bool = false, OFDEL: bool = false, - NLDLY: NLDLY = 0, - TABDLY: TABDLY = 0, - CRDLY: CRDLY = 0, - FFDLY: FFDLY = 0, - BSDLY: BSDLY = 0, - VTDLY: VTDLY = 0, + NLDLY: NLDLY = .NL0, + TABDLY: TABDLY = .TAB0, + CRDLY: CRDLY = .CR0, + FFDLY: FFDLY = .FF0, + BSDLY: BSDLY = .BS0, + VTDLY: VTDLY = .VT0, _17: u15 = 0, } else if (is_sparc) packed struct(tcflag_t) { OPOST: bool = false, From 8b82a0e0fc5cac9a5376f06955ca4419b9a9923f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Juan=20Juli=C3=A1n=20Merelo=20Guerv=C3=B3s?= Date: Fri, 20 Sep 2024 02:06:23 +0200 Subject: [PATCH 158/202] std.DynamicBitSet: remove wrong and useless comments (#21418) --- lib/std/bit_set.zig | 3 --- 1 file changed, 3 deletions(-) diff --git a/lib/std/bit_set.zig b/lib/std/bit_set.zig index 1e2a0179ef9e..7ff97cfa9dac 100644 --- a/lib/std/bit_set.zig +++ b/lib/std/bit_set.zig @@ -1043,10 +1043,7 @@ pub const DynamicBitSet = struct { /// The integer type used to shift a mask in this bit set pub const ShiftInt = std.math.Log2Int(MaskInt); - /// The allocator used by this bit set allocator: Allocator, - - /// The number of valid items in this bit set unmanaged: DynamicBitSetUnmanaged = .{}, /// Creates a bit set with no elements present. From dd095e506ab647e79b85541e23b3f696ce999d2f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Fri, 23 Aug 2024 01:09:12 +0200 Subject: [PATCH 159/202] cmake: Update to LLVM 19. Co-authored-by: David Rubin --- CMakeLists.txt | 6 +++--- cmake/Findclang.cmake | 9 +++++---- cmake/Findlld.cmake | 32 ++++++++++++++++---------------- cmake/Findllvm.cmake | 14 +++++++------- 4 files changed, 31 insertions(+), 30 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 781076ef6159..51e2f7d77df1 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -137,9 +137,9 @@ else() set(ZIG_SYSTEM_LIBCXX "stdc++" CACHE STRING "system libcxx name for build.zig") endif() -find_package(llvm 18) -find_package(clang 18) -find_package(lld 18) +find_package(llvm 19) +find_package(clang 19) +find_package(lld 19) if(ZIG_STATIC_ZLIB) if (MSVC) diff --git a/cmake/Findclang.cmake b/cmake/Findclang.cmake index c21fe188374c..6dbe13eccde5 100644 --- a/cmake/Findclang.cmake +++ b/cmake/Findclang.cmake @@ -17,10 +17,10 @@ find_path(CLANG_INCLUDE_DIRS NAMES clang/Frontend/ASTUnit.h if(${LLVM_LINK_MODE} STREQUAL "shared") find_library(CLANG_LIBRARIES NAMES - libclang-cpp.so.18 - libclang-cpp.so.18.1 - clang-cpp-18.0 - clang-cpp180 + libclang-cpp.so.19 + libclang-cpp.so.19.1 + clang-cpp-19.0 + clang-cpp190 clang-cpp NAMES_PER_DIR HINTS "${LLVM_LIBDIRS}" @@ -68,6 +68,7 @@ else() FIND_AND_ADD_CLANG_LIB(clangToolingCore) FIND_AND_ADD_CLANG_LIB(clangExtractAPI) FIND_AND_ADD_CLANG_LIB(clangSupport) + FIND_AND_ADD_CLANG_LIB(clangInstallAPI) endif() if (MSVC) diff --git a/cmake/Findlld.cmake b/cmake/Findlld.cmake index abe29b5fcefe..7c86aaed0711 100644 --- a/cmake/Findlld.cmake +++ b/cmake/Findlld.cmake @@ -9,21 +9,21 @@ find_path(LLD_INCLUDE_DIRS NAMES lld/Common/Driver.h HINTS ${LLVM_INCLUDE_DIRS} PATHS - /usr/lib/llvm-18/include - /usr/local/llvm180/include - /usr/local/llvm18/include - /usr/local/opt/llvm@18/include - /opt/homebrew/opt/llvm@18/include + /usr/lib/llvm-19/include + /usr/local/llvm190/include + /usr/local/llvm19/include + /usr/local/opt/llvm@19/include + /opt/homebrew/opt/llvm@19/include /mingw64/include) -find_library(LLD_LIBRARY NAMES lld-18.0 lld180 lld NAMES_PER_DIR +find_library(LLD_LIBRARY NAMES lld-19.0 lld190 lld NAMES_PER_DIR HINTS ${LLVM_LIBDIRS} PATHS - /usr/lib/llvm-18/lib - /usr/local/llvm180/lib - /usr/local/llvm18/lib - /usr/local/opt/llvm@18/lib - /opt/homebrew/opt/llvm@18/lib + /usr/lib/llvm-19/lib + /usr/local/llvm190/lib + /usr/local/llvm19/lib + /usr/local/opt/llvm@19/lib + /opt/homebrew/opt/llvm@19/lib ) if(EXISTS ${LLD_LIBRARY}) set(LLD_LIBRARIES ${LLD_LIBRARY}) @@ -34,11 +34,11 @@ else() HINTS ${LLVM_LIBDIRS} PATHS ${LLD_LIBDIRS} - /usr/lib/llvm-18/lib - /usr/local/llvm180/lib - /usr/local/llvm18/lib - /usr/local/opt/llvm@18/lib - /opt/homebrew/opt/llvm@18/lib + /usr/lib/llvm-19/lib + /usr/local/llvm190/lib + /usr/local/llvm19/lib + /usr/local/opt/llvm@19/lib + /opt/homebrew/opt/llvm@19/lib /mingw64/lib /c/msys64/mingw64/lib c:/msys64/mingw64/lib) diff --git a/cmake/Findllvm.cmake b/cmake/Findllvm.cmake index c4eb49fe7600..16a429fd28f1 100644 --- a/cmake/Findllvm.cmake +++ b/cmake/Findllvm.cmake @@ -17,12 +17,12 @@ if(ZIG_USE_LLVM_CONFIG) # terminate when the right LLVM version is not found. unset(LLVM_CONFIG_EXE CACHE) find_program(LLVM_CONFIG_EXE - NAMES llvm-config-18 llvm-config-18.0 llvm-config180 llvm-config18 llvm-config NAMES_PER_DIR + NAMES llvm-config-19 llvm-config-19.0 llvm-config190 llvm-config19 llvm-config NAMES_PER_DIR PATHS "/mingw64/bin" "/c/msys64/mingw64/bin" "c:/msys64/mingw64/bin" - "C:/Libraries/llvm-18.0.0/bin") + "C:/Libraries/llvm-19.0.0/bin") if ("${LLVM_CONFIG_EXE}" STREQUAL "LLVM_CONFIG_EXE-NOTFOUND") if (NOT LLVM_CONFIG_ERROR_MESSAGES STREQUAL "") @@ -40,9 +40,9 @@ if(ZIG_USE_LLVM_CONFIG) OUTPUT_STRIP_TRAILING_WHITESPACE) get_filename_component(LLVM_CONFIG_DIR "${LLVM_CONFIG_EXE}" DIRECTORY) - if("${LLVM_CONFIG_VERSION}" VERSION_LESS 18 OR "${LLVM_CONFIG_VERSION}" VERSION_EQUAL 19 OR "${LLVM_CONFIG_VERSION}" VERSION_GREATER 19) + if("${LLVM_CONFIG_VERSION}" VERSION_LESS 19 OR "${LLVM_CONFIG_VERSION}" VERSION_EQUAL 20 OR "${LLVM_CONFIG_VERSION}" VERSION_GREATER 20) # Save the error message, in case this is the last llvm-config we find - list(APPEND LLVM_CONFIG_ERROR_MESSAGES "expected LLVM 18.x but found ${LLVM_CONFIG_VERSION} using ${LLVM_CONFIG_EXE}") + list(APPEND LLVM_CONFIG_ERROR_MESSAGES "expected LLVM 19.x but found ${LLVM_CONFIG_VERSION} using ${LLVM_CONFIG_EXE}") # Ignore this directory and try the search again list(APPEND CMAKE_IGNORE_PATH "${LLVM_CONFIG_DIR}") @@ -63,12 +63,12 @@ if(ZIG_USE_LLVM_CONFIG) ERROR_VARIABLE LLVM_CONFIG_ERROR ERROR_STRIP_TRAILING_WHITESPACE) - if (LLVM_CONFIG_ERROR) + if (LLVM_CONFIG_ERROR) # Save the error message, in case this is the last llvm-config we find if (ZIG_SHARED_LLVM) - list(APPEND LLVM_CONFIG_ERROR_MESSAGES "LLVM 18.x found at ${LLVM_CONFIG_EXE} does not support linking as a shared library") + list(APPEND LLVM_CONFIG_ERROR_MESSAGES "LLVM 19.x found at ${LLVM_CONFIG_EXE} does not support linking as a shared library") else() - list(APPEND LLVM_CONFIG_ERROR_MESSAGES "LLVM 18.x found at ${LLVM_CONFIG_EXE} does not support linking as a static library") + list(APPEND LLVM_CONFIG_ERROR_MESSAGES "LLVM 19.x found at ${LLVM_CONFIG_EXE} does not support linking as a static library") endif() # Ignore this directory and try the search again From da8f81c78b5612464486172329a9162986eb5d6e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Fri, 23 Aug 2024 01:22:23 +0200 Subject: [PATCH 160/202] compiler: Update LLVM/Clang driver files to LLVM/Clang 19. --- src/zig_clang_cc1_main.cpp | 122 ++++++++++++------------- src/zig_clang_cc1as_main.cpp | 52 ++++++++--- src/zig_clang_driver.cpp | 169 +++-------------------------------- src/zig_llvm-ar.cpp | 42 +++++---- 4 files changed, 130 insertions(+), 255 deletions(-) diff --git a/src/zig_clang_cc1_main.cpp b/src/zig_clang_cc1_main.cpp index e9d2c6aad371..f5e5fad36573 100644 --- a/src/zig_clang_cc1_main.cpp +++ b/src/zig_clang_cc1_main.cpp @@ -26,6 +26,7 @@ #include "clang/Frontend/Utils.h" #include "clang/FrontendTool/Utils.h" #include "llvm/ADT/Statistic.h" +#include "llvm/ADT/StringExtras.h" #include "llvm/Config/llvm-config.h" #include "llvm/LinkAllPasses.h" #include "llvm/MC/MCSubtargetInfo.h" @@ -39,7 +40,6 @@ #include "llvm/Support/ManagedStatic.h" #include "llvm/Support/Path.h" #include "llvm/Support/Process.h" -#include "llvm/Support/RISCVISAInfo.h" #include "llvm/Support/Signals.h" #include "llvm/Support/TargetSelect.h" #include "llvm/Support/TimeProfiler.h" @@ -48,6 +48,7 @@ #include "llvm/Target/TargetMachine.h" #include "llvm/TargetParser/AArch64TargetParser.h" #include "llvm/TargetParser/ARMTargetParser.h" +#include "llvm/TargetParser/RISCVISAInfo.h" #include #ifdef CLANG_HAVE_RLIMITS @@ -78,64 +79,6 @@ static void LLVMErrorHandler(void *UserData, const char *Message, } #ifdef CLANG_HAVE_RLIMITS -#if defined(__linux__) && defined(__PIE__) -static size_t getCurrentStackAllocation() { - // If we can't compute the current stack usage, allow for 512K of command - // line arguments and environment. - size_t Usage = 512 * 1024; - if (FILE *StatFile = fopen("/proc/self/stat", "r")) { - // We assume that the stack extends from its current address to the end of - // the environment space. In reality, there is another string literal (the - // program name) after the environment, but this is close enough (we only - // need to be within 100K or so). - unsigned long StackPtr, EnvEnd; - // Disable silly GCC -Wformat warning that complains about length - // modifiers on ignored format specifiers. We want to retain these - // for documentation purposes even though they have no effect. -#if defined(__GNUC__) && !defined(__clang__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wformat" -#endif - if (fscanf(StatFile, - "%*d %*s %*c %*d %*d %*d %*d %*d %*u %*lu %*lu %*lu %*lu %*lu " - "%*lu %*ld %*ld %*ld %*ld %*ld %*ld %*llu %*lu %*ld %*lu %*lu " - "%*lu %*lu %lu %*lu %*lu %*lu %*lu %*lu %*llu %*lu %*lu %*d %*d " - "%*u %*u %*llu %*lu %*ld %*lu %*lu %*lu %*lu %*lu %*lu %lu %*d", - &StackPtr, &EnvEnd) == 2) { -#if defined(__GNUC__) && !defined(__clang__) -#pragma GCC diagnostic pop -#endif - Usage = StackPtr < EnvEnd ? EnvEnd - StackPtr : StackPtr - EnvEnd; - } - fclose(StatFile); - } - return Usage; -} - -#include - -LLVM_ATTRIBUTE_NOINLINE -static void ensureStackAddressSpace() { - // Linux kernels prior to 4.1 will sometimes locate the heap of a PIE binary - // relatively close to the stack (they are only guaranteed to be 128MiB - // apart). This results in crashes if we happen to heap-allocate more than - // 128MiB before we reach our stack high-water mark. - // - // To avoid these crashes, ensure that we have sufficient virtual memory - // pages allocated before we start running. - size_t Curr = getCurrentStackAllocation(); - const int kTargetStack = DesiredStackSize - 256 * 1024; - if (Curr < kTargetStack) { - volatile char *volatile Alloc = - static_cast(alloca(kTargetStack - Curr)); - Alloc[0] = 0; - Alloc[kTargetStack - Curr - 1] = 0; - } -} -#else -static void ensureStackAddressSpace() {} -#endif - /// Attempt to ensure that we have at least 8MiB of usable stack space. static void ensureSufficientStack() { struct rlimit rlim; @@ -159,10 +102,6 @@ static void ensureSufficientStack() { rlim.rlim_cur != DesiredStackSize) return; } - - // We should now have a stack of size at least DesiredStackSize. Ensure - // that we can actually use that much, if necessary. - ensureStackAddressSpace(); } #else static void ensureSufficientStack() {} @@ -208,9 +147,9 @@ static int PrintSupportedExtensions(std::string TargetStr) { DescMap.insert({feature.Key, feature.Desc}); if (MachineTriple.isRISCV()) - llvm::riscvExtensionsHelp(DescMap); + llvm::RISCVISAInfo::printSupportedExtensions(DescMap); else if (MachineTriple.isAArch64()) - llvm::AArch64::PrintSupportedExtensions(DescMap); + llvm::AArch64::PrintSupportedExtensions(); else if (MachineTriple.isARM()) llvm::ARM::PrintSupportedExtensions(DescMap); else { @@ -223,6 +162,52 @@ static int PrintSupportedExtensions(std::string TargetStr) { return 0; } +static int PrintEnabledExtensions(const TargetOptions& TargetOpts) { + std::string Error; + const llvm::Target *TheTarget = + llvm::TargetRegistry::lookupTarget(TargetOpts.Triple, Error); + if (!TheTarget) { + llvm::errs() << Error; + return 1; + } + + // Create a target machine using the input features, the triple information + // and a dummy instance of llvm::TargetOptions. Note that this is _not_ the + // same as the `clang::TargetOptions` instance we have access to here. + llvm::TargetOptions BackendOptions; + std::string FeaturesStr = llvm::join(TargetOpts.FeaturesAsWritten, ","); + std::unique_ptr TheTargetMachine( + TheTarget->createTargetMachine(TargetOpts.Triple, TargetOpts.CPU, FeaturesStr, BackendOptions, std::nullopt)); + const llvm::Triple &MachineTriple = TheTargetMachine->getTargetTriple(); + const llvm::MCSubtargetInfo *MCInfo = TheTargetMachine->getMCSubtargetInfo(); + + // Extract the feature names that are enabled for the given target. + // We do that by capturing the key from the set of SubtargetFeatureKV entries + // provided by MCSubtargetInfo, which match the '-target-feature' values. + const std::vector Features = + MCInfo->getEnabledProcessorFeatures(); + std::set EnabledFeatureNames; + for (const llvm::SubtargetFeatureKV &feature : Features) + EnabledFeatureNames.insert(feature.Key); + + if (MachineTriple.isAArch64()) + llvm::AArch64::printEnabledExtensions(EnabledFeatureNames); + else if (MachineTriple.isRISCV()) { + llvm::StringMap DescMap; + for (const llvm::SubtargetFeatureKV &feature : Features) + DescMap.insert({feature.Key, feature.Desc}); + llvm::RISCVISAInfo::printEnabledExtensions(MachineTriple.isArch64Bit(), + EnabledFeatureNames, DescMap); + } else { + // The option was already checked in Driver::HandleImmediateArgs, + // so we do not expect to get here if we are not a supported architecture. + assert(0 && "Unhandled triple for --print-enabled-extensions option."); + return 1; + } + + return 0; +} + int cc1_main(ArrayRef Argv, const char *Argv0, void *MainAddr) { ensureSufficientStack(); @@ -256,7 +241,8 @@ int cc1_main(ArrayRef Argv, const char *Argv0, void *MainAddr) { if (!Clang->getFrontendOpts().TimeTracePath.empty()) { llvm::timeTraceProfilerInitialize( - Clang->getFrontendOpts().TimeTraceGranularity, Argv0); + Clang->getFrontendOpts().TimeTraceGranularity, Argv0, + Clang->getFrontendOpts().TimeTraceVerbose); } // --print-supported-cpus takes priority over the actual compilation. if (Clang->getFrontendOpts().PrintSupportedCPUs) @@ -266,6 +252,10 @@ int cc1_main(ArrayRef Argv, const char *Argv0, void *MainAddr) { if (Clang->getFrontendOpts().PrintSupportedExtensions) return PrintSupportedExtensions(Clang->getTargetOpts().Triple); + // --print-enabled-extensions takes priority over the actual compilation. + if (Clang->getFrontendOpts().PrintEnabledExtensions) + return PrintEnabledExtensions(Clang->getTargetOpts()); + // Infer the builtin include path if unspecified. if (Clang->getHeaderSearchOpts().UseBuiltinIncludes && Clang->getHeaderSearchOpts().ResourceDir.empty()) diff --git a/src/zig_clang_cc1as_main.cpp b/src/zig_clang_cc1as_main.cpp index bc398fa0731f..b661a43c88b0 100644 --- a/src/zig_clang_cc1as_main.cpp +++ b/src/zig_clang_cc1as_main.cpp @@ -89,10 +89,17 @@ struct AssemblerInvocation { /// @{ std::vector IncludePaths; + LLVM_PREFERRED_TYPE(bool) unsigned NoInitialTextSection : 1; + LLVM_PREFERRED_TYPE(bool) unsigned SaveTemporaryLabels : 1; + LLVM_PREFERRED_TYPE(bool) unsigned GenDwarfForAssembly : 1; + LLVM_PREFERRED_TYPE(bool) unsigned RelaxELFRelocations : 1; + LLVM_PREFERRED_TYPE(bool) + unsigned SSE2AVX : 1; + LLVM_PREFERRED_TYPE(bool) unsigned Dwarf64 : 1; unsigned DwarfVersion; std::string DwarfDebugFlags; @@ -117,7 +124,9 @@ struct AssemblerInvocation { FT_Obj ///< Object file output. }; FileType OutputType; + LLVM_PREFERRED_TYPE(bool) unsigned ShowHelp : 1; + LLVM_PREFERRED_TYPE(bool) unsigned ShowVersion : 1; /// @} @@ -125,19 +134,28 @@ struct AssemblerInvocation { /// @{ unsigned OutputAsmVariant; + LLVM_PREFERRED_TYPE(bool) unsigned ShowEncoding : 1; + LLVM_PREFERRED_TYPE(bool) unsigned ShowInst : 1; /// @} /// @name Assembler Options /// @{ + LLVM_PREFERRED_TYPE(bool) unsigned RelaxAll : 1; + LLVM_PREFERRED_TYPE(bool) unsigned NoExecStack : 1; + LLVM_PREFERRED_TYPE(bool) unsigned FatalWarnings : 1; + LLVM_PREFERRED_TYPE(bool) unsigned NoWarn : 1; + LLVM_PREFERRED_TYPE(bool) unsigned NoTypeCheck : 1; + LLVM_PREFERRED_TYPE(bool) unsigned IncrementalLinkerCompatible : 1; + LLVM_PREFERRED_TYPE(bool) unsigned EmbedBitcode : 1; /// Whether to emit DWARF unwind info. @@ -145,8 +163,12 @@ struct AssemblerInvocation { // Whether to emit compact-unwind for non-canonical entries. // Note: maybe overriden by other constraints. + LLVM_PREFERRED_TYPE(bool) unsigned EmitCompactUnwindNonCanonical : 1; + LLVM_PREFERRED_TYPE(bool) + unsigned Crel : 1; + /// The name of the relocation model to use. std::string RelocationModel; @@ -177,6 +199,7 @@ struct AssemblerInvocation { ShowInst = 0; ShowEncoding = 0; RelaxAll = 0; + SSE2AVX = 0; NoExecStack = 0; FatalWarnings = 0; NoWarn = 0; @@ -187,6 +210,7 @@ struct AssemblerInvocation { EmbedBitcode = 0; EmitDwarfUnwind = EmitDwarfUnwindType::Default; EmitCompactUnwindNonCanonical = false; + Crel = false; } static bool CreateFromArgs(AssemblerInvocation &Res, @@ -267,6 +291,7 @@ bool AssemblerInvocation::CreateFromArgs(AssemblerInvocation &Opts, } Opts.RelaxELFRelocations = !Args.hasArg(OPT_mrelax_relocations_no); + Opts.SSE2AVX = Args.hasArg(OPT_msse2avx); if (auto *DwarfFormatArg = Args.getLastArg(OPT_gdwarf64, OPT_gdwarf32)) Opts.Dwarf64 = DwarfFormatArg->getOption().matches(OPT_gdwarf64); Opts.DwarfVersion = getLastArgIntValue(Args, OPT_dwarf_version_EQ, 2, Diags); @@ -356,6 +381,7 @@ bool AssemblerInvocation::CreateFromArgs(AssemblerInvocation &Opts, Opts.EmitCompactUnwindNonCanonical = Args.hasArg(OPT_femit_compact_unwind_non_canonical); + Opts.Crel = Args.hasArg(OPT_crel); Opts.AsSecureLogFile = Args.getLastArgValue(OPT_as_secure_log_file); @@ -409,8 +435,14 @@ static bool ExecuteAssemblerImpl(AssemblerInvocation &Opts, assert(MRI && "Unable to create target register info!"); MCTargetOptions MCOptions; + MCOptions.MCRelaxAll = Opts.RelaxAll; MCOptions.EmitDwarfUnwind = Opts.EmitDwarfUnwind; MCOptions.EmitCompactUnwindNonCanonical = Opts.EmitCompactUnwindNonCanonical; + MCOptions.MCSaveTempLabels = Opts.SaveTemporaryLabels; + MCOptions.Crel = Opts.Crel; + MCOptions.X86RelaxRelocations = Opts.RelaxELFRelocations; + MCOptions.X86Sse2Avx = Opts.SSE2AVX; + MCOptions.CompressDebugSections = Opts.CompressDebugSections; MCOptions.AsSecureLogFile = Opts.AsSecureLogFile; std::unique_ptr MAI( @@ -419,9 +451,7 @@ static bool ExecuteAssemblerImpl(AssemblerInvocation &Opts, // Ensure MCAsmInfo initialization occurs before any use, otherwise sections // may be created with a combination of default and explicit settings. - MAI->setCompressDebugSections(Opts.CompressDebugSections); - MAI->setRelaxELFRelocations(Opts.RelaxELFRelocations); bool IsBinary = Opts.OutputType == AssemblerInvocation::FT_Obj; if (Opts.OutputPath.empty()) @@ -465,8 +495,6 @@ static bool ExecuteAssemblerImpl(AssemblerInvocation &Opts, MOFI->setDarwinTargetVariantSDKVersion(Opts.DarwinTargetVariantSDKVersion); Ctx.setObjectFileInfo(MOFI.get()); - if (Opts.SaveTemporaryLabels) - Ctx.setAllowTemporaryLabels(false); if (Opts.GenDwarfForAssembly) Ctx.setGenDwarfForAssembly(true); if (!Opts.DwarfDebugFlags.empty()) @@ -503,6 +531,9 @@ static bool ExecuteAssemblerImpl(AssemblerInvocation &Opts, MCOptions.MCNoWarn = Opts.NoWarn; MCOptions.MCFatalWarnings = Opts.FatalWarnings; MCOptions.MCNoTypeCheck = Opts.NoTypeCheck; + MCOptions.ShowMCInst = Opts.ShowInst; + MCOptions.AsmVerbose = true; + MCOptions.MCUseDwarfDirectory = MCTargetOptions::EnableDwarfDirectory; MCOptions.ABIName = Opts.TargetABI; // FIXME: There is a bit of code duplication with addPassesToEmitFile. @@ -517,10 +548,8 @@ static bool ExecuteAssemblerImpl(AssemblerInvocation &Opts, TheTarget->createMCAsmBackend(*STI, *MRI, MCOptions)); auto FOut = std::make_unique(*Out); - Str.reset(TheTarget->createAsmStreamer( - Ctx, std::move(FOut), /*asmverbose*/ true, - /*useDwarfDirectory*/ true, IP, std::move(CE), std::move(MAB), - Opts.ShowInst)); + Str.reset(TheTarget->createAsmStreamer(Ctx, std::move(FOut), IP, + std::move(CE), std::move(MAB))); } else if (Opts.OutputType == AssemblerInvocation::FT_Null) { Str.reset(createNullStreamer(Ctx)); } else { @@ -543,9 +572,7 @@ static bool ExecuteAssemblerImpl(AssemblerInvocation &Opts, Triple T(Opts.Triple); Str.reset(TheTarget->createMCObjectStreamer( - T, Ctx, std::move(MAB), std::move(OW), std::move(CE), *STI, - Opts.RelaxAll, Opts.IncrementalLinkerCompatible, - /*DWARFMustBeAtTheEnd*/ true)); + T, Ctx, std::move(MAB), std::move(OW), std::move(CE), *STI)); Str.get()->initSections(Opts.NoExecStack, *STI); } @@ -558,9 +585,6 @@ static bool ExecuteAssemblerImpl(AssemblerInvocation &Opts, Str.get()->emitZeros(1); } - // Assembly to object compilation should leverage assembly info. - Str->setUseAssemblerInfoForParsing(true); - bool Failed = false; std::unique_ptr Parser( diff --git a/src/zig_clang_driver.cpp b/src/zig_clang_driver.cpp index 9e4e471e9443..8bf1d5385765 100644 --- a/src/zig_clang_driver.cpp +++ b/src/zig_clang_driver.cpp @@ -28,6 +28,7 @@ #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/StringSet.h" #include "llvm/Option/ArgList.h" #include "llvm/Option/OptTable.h" #include "llvm/Option/Option.h" @@ -41,7 +42,6 @@ #include "llvm/Support/PrettyStackTrace.h" #include "llvm/Support/Process.h" #include "llvm/Support/Program.h" -#include "llvm/Support/Regex.h" #include "llvm/Support/Signals.h" #include "llvm/Support/StringSaver.h" #include "llvm/Support/TargetSelect.h" @@ -73,136 +73,8 @@ std::string GetExecutablePath(const char *Argv0, bool CanonicalPrefixes) { return llvm::sys::fs::getMainExecutable(Argv0, P); } -static const char *GetStableCStr(std::set &SavedStrings, - StringRef S) { - return SavedStrings.insert(std::string(S)).first->c_str(); -} - -/// ApplyOneQAOverride - Apply a list of edits to the input argument lists. -/// -/// The input string is a space separated list of edits to perform, -/// they are applied in order to the input argument lists. Edits -/// should be one of the following forms: -/// -/// '#': Silence information about the changes to the command line arguments. -/// -/// '^': Add FOO as a new argument at the beginning of the command line. -/// -/// '+': Add FOO as a new argument at the end of the command line. -/// -/// 's/XXX/YYY/': Substitute the regular expression XXX with YYY in the command -/// line. -/// -/// 'xOPTION': Removes all instances of the literal argument OPTION. -/// -/// 'XOPTION': Removes all instances of the literal argument OPTION, -/// and the following argument. -/// -/// 'Ox': Removes all flags matching 'O' or 'O[sz0-9]' and adds 'Ox' -/// at the end of the command line. -/// -/// \param OS - The stream to write edit information to. -/// \param Args - The vector of command line arguments. -/// \param Edit - The override command to perform. -/// \param SavedStrings - Set to use for storing string representations. -static void ApplyOneQAOverride(raw_ostream &OS, - SmallVectorImpl &Args, - StringRef Edit, - std::set &SavedStrings) { - // This does not need to be efficient. - - if (Edit[0] == '^') { - const char *Str = - GetStableCStr(SavedStrings, Edit.substr(1)); - OS << "### Adding argument " << Str << " at beginning\n"; - Args.insert(Args.begin() + 1, Str); - } else if (Edit[0] == '+') { - const char *Str = - GetStableCStr(SavedStrings, Edit.substr(1)); - OS << "### Adding argument " << Str << " at end\n"; - Args.push_back(Str); - } else if (Edit[0] == 's' && Edit[1] == '/' && Edit.ends_with("/") && - Edit.slice(2, Edit.size() - 1).contains('/')) { - StringRef MatchPattern = Edit.substr(2).split('/').first; - StringRef ReplPattern = Edit.substr(2).split('/').second; - ReplPattern = ReplPattern.slice(0, ReplPattern.size()-1); - - for (unsigned i = 1, e = Args.size(); i != e; ++i) { - // Ignore end-of-line response file markers - if (Args[i] == nullptr) - continue; - std::string Repl = llvm::Regex(MatchPattern).sub(ReplPattern, Args[i]); - - if (Repl != Args[i]) { - OS << "### Replacing '" << Args[i] << "' with '" << Repl << "'\n"; - Args[i] = GetStableCStr(SavedStrings, Repl); - } - } - } else if (Edit[0] == 'x' || Edit[0] == 'X') { - auto Option = Edit.substr(1); - for (unsigned i = 1; i < Args.size();) { - if (Option == Args[i]) { - OS << "### Deleting argument " << Args[i] << '\n'; - Args.erase(Args.begin() + i); - if (Edit[0] == 'X') { - if (i < Args.size()) { - OS << "### Deleting argument " << Args[i] << '\n'; - Args.erase(Args.begin() + i); - } else - OS << "### Invalid X edit, end of command line!\n"; - } - } else - ++i; - } - } else if (Edit[0] == 'O') { - for (unsigned i = 1; i < Args.size();) { - const char *A = Args[i]; - // Ignore end-of-line response file markers - if (A == nullptr) - continue; - if (A[0] == '-' && A[1] == 'O' && - (A[2] == '\0' || - (A[3] == '\0' && (A[2] == 's' || A[2] == 'z' || - ('0' <= A[2] && A[2] <= '9'))))) { - OS << "### Deleting argument " << Args[i] << '\n'; - Args.erase(Args.begin() + i); - } else - ++i; - } - OS << "### Adding argument " << Edit << " at end\n"; - Args.push_back(GetStableCStr(SavedStrings, '-' + Edit.str())); - } else { - OS << "### Unrecognized edit: " << Edit << "\n"; - } -} - -/// ApplyQAOverride - Apply a space separated list of edits to the -/// input argument lists. See ApplyOneQAOverride. -static void ApplyQAOverride(SmallVectorImpl &Args, - const char *OverrideStr, - std::set &SavedStrings) { - raw_ostream *OS = &llvm::errs(); - - if (OverrideStr[0] == '#') { - ++OverrideStr; - OS = &llvm::nulls(); - } - - *OS << "### CCC_OVERRIDE_OPTIONS: " << OverrideStr << "\n"; - - // This does not need to be efficient. - - const char *S = OverrideStr; - while (*S) { - const char *End = ::strchr(S, ' '); - if (!End) - End = S + strlen(S); - if (End != S) - ApplyOneQAOverride(*OS, Args, std::string(S, End), SavedStrings); - S = End; - if (*S != '\0') - ++S; - } +static const char *GetStableCStr(llvm::StringSet<> &SavedStrings, StringRef S) { + return SavedStrings.insert(S).first->getKeyData(); } extern int cc1_main(ArrayRef Argv, const char *Argv0, @@ -212,7 +84,7 @@ extern int cc1as_main(ArrayRef Argv, const char *Argv0, static void insertTargetAndModeArgs(const ParsedClangName &NameParts, SmallVectorImpl &ArgVector, - std::set &SavedStrings) { + llvm::StringSet<> &SavedStrings) { // Put target and mode arguments at the start of argument list so that // arguments specified in command line could override them. Avoid putting // them at index 0, as an option like '-cc1' must remain the first. @@ -320,28 +192,6 @@ static void FixupDiagPrefixExeName(TextDiagnosticPrinter *DiagClient, DiagClient->setPrefix(std::string(ExeBasename)); } -static void SetInstallDir(SmallVectorImpl &argv, - Driver &TheDriver, bool CanonicalPrefixes) { - // Attempt to find the original path used to invoke the driver, to determine - // the installed path. We do this manually, because we want to support that - // path being a symlink. - SmallString<128> InstalledPath(argv[0]); - - // Do a PATH lookup, if there are no directory components. - if (llvm::sys::path::filename(InstalledPath) == InstalledPath) - if (llvm::ErrorOr Tmp = llvm::sys::findProgramByName( - llvm::sys::path::filename(InstalledPath.str()))) - InstalledPath = *Tmp; - - // FIXME: We don't actually canonicalize this, we just make it absolute. - if (CanonicalPrefixes) - llvm::sys::fs::make_absolute(InstalledPath); - - StringRef InstalledPathParent(llvm::sys::path::parent_path(InstalledPath)); - if (llvm::sys::fs::exists(InstalledPathParent)) - TheDriver.setInstalledDir(InstalledPathParent); -} - static int ExecuteCC1Tool(SmallVectorImpl &ArgV, const llvm::ToolContext &ToolContext) { // If we call the cc1 tool from the clangDriver library (through @@ -363,8 +213,9 @@ static int ExecuteCC1Tool(SmallVectorImpl &ArgV, if (Tool == "-cc1as") return cc1as_main(ArrayRef(ArgV).slice(2), ArgV[0], GetExecutablePathVP); // Reject unknown tools. - llvm::errs() << "error: unknown integrated tool '" << Tool << "'. " - << "Valid tools include '-cc1' and '-cc1as'.\n"; + llvm::errs() + << "error: unknown integrated tool '" << Tool << "'. " + << "Valid tools include '-cc1' and '-cc1as'.\n"; return 1; } @@ -435,12 +286,13 @@ static int clang_main(int Argc, char **Argv, const llvm::ToolContext &ToolContex } } - std::set SavedStrings; + llvm::StringSet<> SavedStrings; // Handle CCC_OVERRIDE_OPTIONS, used for editing a command line behind the // scenes. if (const char *OverrideStr = ::getenv("CCC_OVERRIDE_OPTIONS")) { // FIXME: Driver shouldn't take extra initial argument. - ApplyQAOverride(Args, OverrideStr, SavedStrings); + driver::applyOverrideOptions(Args, OverrideStr, SavedStrings, + &llvm::errs()); } std::string Path = GetExecutablePath(ToolContext.Path, CanonicalPrefixes); @@ -478,7 +330,6 @@ static int clang_main(int Argc, char **Argv, const llvm::ToolContext &ToolContex ProcessWarningOptions(Diags, *DiagOpts, /*ReportDiags=*/false); Driver TheDriver(Path, llvm::sys::getDefaultTargetTriple(), Diags); - SetInstallDir(Args, TheDriver, CanonicalPrefixes); auto TargetAndMode = ToolChain::getTargetAndModeFromProgramName(ProgName); TheDriver.setTargetAndMode(TargetAndMode); // If -canonical-prefixes is set, GetExecutablePath will have resolved Path diff --git a/src/zig_llvm-ar.cpp b/src/zig_llvm-ar.cpp index 08b67eb02963..7353ca5616e9 100644 --- a/src/zig_llvm-ar.cpp +++ b/src/zig_llvm-ar.cpp @@ -65,7 +65,7 @@ static void printRanLibHelp(StringRef ToolName) { << "USAGE: " + ToolName + " archive...\n\n" << "OPTIONS:\n" << " -h --help - Display available options\n" - << " -v --version - Display the version of this program\n" + << " -V --version - Display the version of this program\n" << " -D - Use zero for timestamps and uids/gids " "(default)\n" << " -U - Use actual timestamps and uids/gids\n" @@ -82,6 +82,7 @@ static void printArHelp(StringRef ToolName) { =darwin - darwin =bsd - bsd =bigarchive - big archive (AIX OS) + =coff - coff --plugin= - ignored for compatibility -h --help - display this help and exit --output - the directory to extract archive members to @@ -193,7 +194,7 @@ static SmallVector PositionalArgs; static bool MRI; namespace { -enum Format { Default, GNU, BSD, DARWIN, BIGARCHIVE, Unknown }; +enum Format { Default, GNU, COFF, BSD, DARWIN, BIGARCHIVE, Unknown }; } static Format FormatType = Default; @@ -670,7 +671,7 @@ Expected> getAsBinary(const Archive::Child &C, } template static bool isValidInBitMode(const A &Member) { - if (object::Archive::getDefaultKindForHost() != object::Archive::K_AIXBIG) + if (object::Archive::getDefaultKind() != object::Archive::K_AIXBIG) return true; LLVMContext Context; Expected> BinOrErr = getAsBinary(Member, &Context); @@ -1025,25 +1026,35 @@ static void performWriteOperation(ArchiveOperation Operation, Kind = object::Archive::K_GNU; else if (OldArchive) { Kind = OldArchive->kind(); - if (Kind == object::Archive::K_BSD) { - auto InferredKind = object::Archive::K_BSD; + std::optional AltKind; + if (Kind == object::Archive::K_BSD) + AltKind = object::Archive::K_DARWIN; + else if (Kind == object::Archive::K_GNU && !OldArchive->hasSymbolTable()) + // If there is no symbol table, we can't tell GNU from COFF format + // from the old archive type. + AltKind = object::Archive::K_COFF; + if (AltKind) { + auto InferredKind = Kind; if (NewMembersP && !NewMembersP->empty()) InferredKind = NewMembersP->front().detectKindFromObject(); else if (!NewMembers.empty()) InferredKind = NewMembers.front().detectKindFromObject(); - if (InferredKind == object::Archive::K_DARWIN) - Kind = object::Archive::K_DARWIN; + if (InferredKind == AltKind) + Kind = *AltKind; } } else if (NewMembersP) Kind = !NewMembersP->empty() ? NewMembersP->front().detectKindFromObject() - : object::Archive::getDefaultKindForHost(); + : object::Archive::getDefaultKind(); else Kind = !NewMembers.empty() ? NewMembers.front().detectKindFromObject() - : object::Archive::getDefaultKindForHost(); + : object::Archive::getDefaultKind(); break; case GNU: Kind = object::Archive::K_GNU; break; + case COFF: + Kind = object::Archive::K_COFF; + break; case BSD: if (Thin) fail("only the gnu format has a thin mode"); @@ -1331,7 +1342,7 @@ static int ar_main(int argc, char **argv) { // Get BitMode from enviorment variable "OBJECT_MODE" for AIX OS, if // specified. - if (object::Archive::getDefaultKindForHost() == object::Archive::K_AIXBIG) { + if (object::Archive::getDefaultKind() == object::Archive::K_AIXBIG) { BitMode = getBitMode(getenv("OBJECT_MODE")); if (BitMode == BitModeTy::Unknown) BitMode = BitModeTy::Bit32; @@ -1376,6 +1387,7 @@ static int ar_main(int argc, char **argv) { .Case("darwin", DARWIN) .Case("bsd", BSD) .Case("bigarchive", BIGARCHIVE) + .Case("coff", COFF) .Default(Unknown); if (FormatType == Unknown) fail(std::string("Invalid format ") + Match); @@ -1392,8 +1404,7 @@ static int ar_main(int argc, char **argv) { continue; if (strncmp(*ArgIt, "-X", 2) == 0) { - if (object::Archive::getDefaultKindForHost() == - object::Archive::K_AIXBIG) { + if (object::Archive::getDefaultKind() == object::Archive::K_AIXBIG) { Match = *(*ArgIt + 2) != '\0' ? *ArgIt + 2 : *(++ArgIt); BitMode = getBitMode(Match); if (BitMode == BitModeTy::Unknown) @@ -1428,12 +1439,11 @@ static int ranlib_main(int argc, char **argv) { } else if (arg.front() == 'h') { printHelpMessage(); return 0; - } else if (arg.front() == 'v') { + } else if (arg.front() == 'V') { cl::PrintVersionMessage(); return 0; } else if (arg.front() == 'X') { - if (object::Archive::getDefaultKindForHost() == - object::Archive::K_AIXBIG) { + if (object::Archive::getDefaultKind() == object::Archive::K_AIXBIG) { HasAIXXOption = true; arg.consume_front("X"); const char *Xarg = arg.data(); @@ -1464,7 +1474,7 @@ static int ranlib_main(int argc, char **argv) { } } - if (object::Archive::getDefaultKindForHost() == object::Archive::K_AIXBIG) { + if (object::Archive::getDefaultKind() == object::Archive::K_AIXBIG) { // If not specify -X option, get BitMode from enviorment variable // "OBJECT_MODE" for AIX OS if specify. if (!HasAIXXOption) { From 973ebeb610277334b014a040ccb0b951cdc38f1a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Fri, 23 Aug 2024 03:13:37 +0200 Subject: [PATCH 161/202] zig_llvm: Update to LLVM 19. --- src/codegen/llvm/bindings.zig | 1 + src/zig_llvm.cpp | 7 +++++-- src/zig_llvm.h | 6 +++++- 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/src/codegen/llvm/bindings.zig b/src/codegen/llvm/bindings.zig index a002dbcd8a53..07183b994fd0 100644 --- a/src/codegen/llvm/bindings.zig +++ b/src/codegen/llvm/bindings.zig @@ -352,6 +352,7 @@ pub const OSType = enum(c_int) { ELFIAMCU, TvOS, WatchOS, + BridgeOS, DriverKit, XROS, Mesa3D, diff --git a/src/zig_llvm.cpp b/src/zig_llvm.cpp index 351b28df6b58..d2f68a6d4992 100644 --- a/src/zig_llvm.cpp +++ b/src/zig_llvm.cpp @@ -478,7 +478,7 @@ bool ZigLLVMWriteImportLibrary(const char *def_path, const ZigLLVM_ArchType arch return true; } - // The exports-juggling code below is ripped from LLVM's DllToolDriver.cpp + // The exports-juggling code below is ripped from LLVM's DlltoolDriver.cpp // If ExtName is set (if the "ExtName = Name" syntax was used), overwrite // Name with ExtName and clear ExtName. When only creating an import @@ -494,7 +494,7 @@ bool ZigLLVMWriteImportLibrary(const char *def_path, const ZigLLVM_ArchType arch if (machine == COFF::IMAGE_FILE_MACHINE_I386 && kill_at) { for (object::COFFShortExport& E : def->Exports) { - if (!E.AliasTarget.empty() || (!E.Name.empty() && E.Name[0] == '?')) + if (!E.ImportName.empty() || (!E.Name.empty() && E.Name[0] == '?')) continue; E.SymbolName = E.Name; // Trim off the trailing decoration. Symbols will always have a @@ -692,6 +692,7 @@ static_assert((Triple::OSType)ZigLLVM_PS4 == Triple::PS4, ""); static_assert((Triple::OSType)ZigLLVM_ELFIAMCU == Triple::ELFIAMCU, ""); static_assert((Triple::OSType)ZigLLVM_TvOS == Triple::TvOS, ""); static_assert((Triple::OSType)ZigLLVM_WatchOS == Triple::WatchOS, ""); +static_assert((Triple::OSType)ZigLLVM_BridgeOS == Triple::BridgeOS, ""); static_assert((Triple::OSType)ZigLLVM_DriverKit == Triple::DriverKit, ""); static_assert((Triple::OSType)ZigLLVM_XROS == Triple::XROS, ""); static_assert((Triple::OSType)ZigLLVM_Mesa3D == Triple::Mesa3D, ""); @@ -746,7 +747,9 @@ static_assert((Triple::EnvironmentType)ZigLLVM_Miss == Triple::Miss, ""); static_assert((Triple::EnvironmentType)ZigLLVM_Callable == Triple::Callable, ""); static_assert((Triple::EnvironmentType)ZigLLVM_Mesh == Triple::Mesh, ""); static_assert((Triple::EnvironmentType)ZigLLVM_Amplification == Triple::Amplification, ""); +static_assert((Triple::EnvironmentType)ZigLLVM_OpenCL == Triple::OpenCL, ""); static_assert((Triple::EnvironmentType)ZigLLVM_OpenHOS == Triple::OpenHOS, ""); +static_assert((Triple::EnvironmentType)ZigLLVM_PAuthTest == Triple::PAuthTest, ""); static_assert((Triple::EnvironmentType)ZigLLVM_LastEnvironmentType == Triple::LastEnvironmentType, ""); static_assert((Triple::ObjectFormatType)ZigLLVM_UnknownObjectFormat == Triple::UnknownObjectFormat, ""); diff --git a/src/zig_llvm.h b/src/zig_llvm.h index e831e9cf8bae..2eb6ae290ff5 100644 --- a/src/zig_llvm.h +++ b/src/zig_llvm.h @@ -280,6 +280,7 @@ enum ZigLLVM_OSType { ZigLLVM_ELFIAMCU, ZigLLVM_TvOS, // Apple tvOS ZigLLVM_WatchOS, // Apple watchOS + ZigLLVM_BridgeOS, // Apple bridgeOS ZigLLVM_DriverKit, // Apple DriverKit ZigLLVM_XROS, // Apple XROS ZigLLVM_Mesa3D, @@ -340,9 +341,12 @@ enum ZigLLVM_EnvironmentType { ZigLLVM_Callable, ZigLLVM_Mesh, ZigLLVM_Amplification, + ZigLLVM_OpenCL, ZigLLVM_OpenHOS, - ZigLLVM_LastEnvironmentType = ZigLLVM_OpenHOS + ZigLLVM_PAuthTest, + + ZigLLVM_LastEnvironmentType = ZigLLVM_PAuthTest }; enum ZigLLVM_ObjectFormatType { From 41e5acd89d17b9b032377ba1d5f4d65d965e4d2a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Fri, 23 Aug 2024 04:32:32 +0200 Subject: [PATCH 162/202] zig_clang: Update to Clang 19. Co-authored-by: David Rubin --- src/clang.zig | 27 +++++++++++++++++++--- src/translate_c.zig | 9 ++++++-- src/zig_clang.cpp | 56 +++++++++++++++++++++++++++++++++++++-------- src/zig_clang.h | 25 +++++++++++++++++--- 4 files changed, 100 insertions(+), 17 deletions(-) diff --git a/src/clang.zig b/src/clang.zig index 04c142b3e338..cb58a92bd040 100644 --- a/src/clang.zig +++ b/src/clang.zig @@ -114,14 +114,21 @@ pub const APFloatBaseSemantics = enum(c_int) { PPCDoubleDouble, Float8E5M2, Float8E5M2FNUZ, + Float8E4M3, Float8E4M3FN, Float8E4M3FNUZ, Float8E4M3B11FNUZ, FloatTF32, + Float6E3M2FN, + Float6E2M3FN, + Float4E2M1FN, x87DoubleExtended, }; pub const APInt = opaque { + pub const free = ZigClangAPInt_free; + extern fn ZigClangAPInt_free(*const APInt) void; + pub fn getLimitedValue(self: *const APInt, comptime T: type) T { return @as(T, @truncate(ZigClangAPInt_getLimitedValue(self, std.math.maxInt(T)))); } @@ -337,7 +344,7 @@ pub const ConstantArrayType = opaque { extern fn ZigClangConstantArrayType_getElementType(*const ConstantArrayType) QualType; pub const getSize = ZigClangConstantArrayType_getSize; - extern fn ZigClangConstantArrayType_getSize(*const ConstantArrayType) *const APInt; + extern fn ZigClangConstantArrayType_getSize(*const ConstantArrayType, **const APInt) void; }; pub const ConstantExpr = opaque {}; @@ -1135,6 +1142,7 @@ pub const TypeClass = enum(c_int) { Adjusted, Decayed, ConstantArray, + ArrayParameter, DependentSizedArray, IncompleteArray, VariableArray, @@ -1143,6 +1151,7 @@ pub const TypeClass = enum(c_int) { BTFTagAttributed, BitInt, BlockPointer, + CountAttributed, Builtin, Complex, Decltype, @@ -1167,6 +1176,7 @@ pub const TypeClass = enum(c_int) { ObjCInterface, ObjCTypeParam, PackExpansion, + PackIndexing, Paren, Pipe, Pointer, @@ -1211,6 +1221,7 @@ const StmtClass = enum(c_int) { PredefinedExprClass, ParenListExprClass, ParenExprClass, + PackIndexingExprClass, PackExpansionExprClass, UnresolvedMemberExprClass, UnresolvedLookupExprClass, @@ -1233,7 +1244,6 @@ const StmtClass = enum(c_int) { ObjCArrayLiteralClass, OMPIteratorExprClass, OMPArrayShapingExprClass, - OMPArraySectionExprClass, NoInitExprClass, MemberExprClass, MatrixSubscriptExprClass, @@ -1254,6 +1264,7 @@ const StmtClass = enum(c_int) { FixedPointLiteralClass, ExtVectorElementExprClass, ExpressionTraitExprClass, + EmbedExprClass, DesignatedInitUpdateExprClass, DesignatedInitExprClass, DependentScopeDeclRefExprClass, @@ -1311,6 +1322,7 @@ const StmtClass = enum(c_int) { AsTypeExprClass, ArrayTypeTraitExprClass, ArraySubscriptExprClass, + ArraySectionExprClass, ArrayInitLoopExprClass, ArrayInitIndexExprClass, AddrLabelExprClass, @@ -1325,6 +1337,8 @@ const StmtClass = enum(c_int) { SEHFinallyStmtClass, SEHExceptStmtClass, ReturnStmtClass, + OpenACCLoopConstructClass, + OpenACCComputeConstructClass, ObjCForCollectionStmtClass, ObjCAutoreleasePoolStmtClass, ObjCAtTryStmtClass, @@ -1360,6 +1374,8 @@ const StmtClass = enum(c_int) { OMPMaskedDirectiveClass, OMPUnrollDirectiveClass, OMPTileDirectiveClass, + OMPReverseDirectiveClass, + OMPInterchangeDirectiveClass, OMPTeamsGenericLoopDirectiveClass, OMPTeamsDistributeSimdDirectiveClass, OMPTeamsDistributeParallelForSimdDirectiveClass, @@ -1496,13 +1512,13 @@ pub const CK = enum(c_int) { pub const DeclKind = enum(c_int) { TranslationUnit, + TopLevelStmt, RequiresExprBody, LinkageSpec, ExternCContext, Export, Captured, Block, - TopLevelStmt, StaticAssert, PragmaDetectMismatch, PragmaComment, @@ -2010,6 +2026,7 @@ pub const BuiltinTypeKind = enum(c_int) { RvvBFloat16m2x4, RvvBFloat16m4x2, WasmExternRef, + AMDGPUBufferRsrc, Void, Bool, Char_U, @@ -2075,6 +2092,7 @@ pub const BuiltinTypeKind = enum(c_int) { Dependent, Overload, BoundMember, + UnresolvedTemplate, PseudoObject, UnknownAny, BuiltinFn, @@ -2108,6 +2126,8 @@ pub const CallingConv = enum(c_int) { AArch64SVEPCS, AMDGPUKernelCall, M68kRTD, + PreserveNone, + RISCVVectorCall, }; pub const StorageClass = enum(c_int) { @@ -2172,6 +2192,7 @@ pub const UnaryExprOrTypeTrait_Kind = enum(c_int) { DataSizeOf, AlignOf, PreferredAlignOf, + PtrAuthTypeDiscriminator, VecStep, OpenMPRequiredSimdAlign, }; diff --git a/src/translate_c.zig b/src/translate_c.zig index 6b84aeb743cd..22b03fcb7569 100644 --- a/src/translate_c.zig +++ b/src/translate_c.zig @@ -2644,7 +2644,9 @@ fn transInitListExprArray( const init_count = expr.getNumInits(); assert(@as(*const clang.Type, @ptrCast(arr_type)).isConstantArrayType()); const const_arr_ty = @as(*const clang.ConstantArrayType, @ptrCast(arr_type)); - const size_ap_int = const_arr_ty.getSize(); + var size_ap_int: *const clang.APInt = undefined; + const_arr_ty.getSize(&size_ap_int); + defer size_ap_int.free(); const all_count = size_ap_int.getLimitedValue(usize); const leftover_count = all_count - init_count; @@ -3665,6 +3667,7 @@ fn transUnaryExprOrTypeTraitExpr( .AlignOf => try Tag.alignof.create(c.arena, type_node), .DataSizeOf, .PreferredAlignOf, + .PtrAuthTypeDiscriminator, .VecStep, .OpenMPRequiredSimdAlign, => return fail( @@ -4793,7 +4796,9 @@ fn transType(c: *Context, scope: *Scope, ty: *const clang.Type, source_loc: clan .ConstantArray => { const const_arr_ty = @as(*const clang.ConstantArrayType, @ptrCast(ty)); - const size_ap_int = const_arr_ty.getSize(); + var size_ap_int: *const clang.APInt = undefined; + const_arr_ty.getSize(&size_ap_int); + defer size_ap_int.free(); const size = size_ap_int.getLimitedValue(usize); const elem_type = try transType(c, scope, const_arr_ty.getElementType().getTypePtr(), source_loc); diff --git a/src/zig_clang.cpp b/src/zig_clang.cpp index 635bad13747f..92485a6a353c 100644 --- a/src/zig_clang.cpp +++ b/src/zig_clang.cpp @@ -208,6 +208,8 @@ void ZigClang_detect_enum_CK(clang::CastKind x) { case clang::CK_UserDefinedConversion: case clang::CK_VectorSplat: case clang::CK_ZeroToOCLOpaqueType: + case clang::CK_HLSLVectorTruncation: + case clang::CK_HLSLArrayRValue: break; } }; @@ -285,10 +287,12 @@ void ZigClang_detect_enum_TypeClass(clang::Type::TypeClass ty) { case clang::Type::Complex: case clang::Type::Pointer: case clang::Type::BlockPointer: + case clang::Type::CountAttributed: case clang::Type::LValueReference: case clang::Type::RValueReference: case clang::Type::MemberPointer: case clang::Type::ConstantArray: + case clang::Type::ArrayParameter: case clang::Type::IncompleteArray: case clang::Type::VariableArray: case clang::Type::DependentSizedArray: @@ -329,6 +333,7 @@ void ZigClang_detect_enum_TypeClass(clang::Type::TypeClass ty) { case clang::Type::DependentName: case clang::Type::DependentTemplateSpecialization: case clang::Type::PackExpansion: + case clang::Type::PackIndexing: case clang::Type::ObjCTypeParam: case clang::Type::ObjCObject: case clang::Type::ObjCInterface: @@ -342,6 +347,7 @@ void ZigClang_detect_enum_TypeClass(clang::Type::TypeClass ty) { static_assert((clang::Type::TypeClass)ZigClangType_Adjusted == clang::Type::Adjusted, ""); static_assert((clang::Type::TypeClass)ZigClangType_Decayed == clang::Type::Decayed, ""); static_assert((clang::Type::TypeClass)ZigClangType_ConstantArray == clang::Type::ConstantArray, ""); +static_assert((clang::Type::TypeClass)ZigClangType_ArrayParameter == clang::Type::ArrayParameter, ""); static_assert((clang::Type::TypeClass)ZigClangType_DependentSizedArray == clang::Type::DependentSizedArray, ""); static_assert((clang::Type::TypeClass)ZigClangType_IncompleteArray == clang::Type::IncompleteArray, ""); static_assert((clang::Type::TypeClass)ZigClangType_VariableArray == clang::Type::VariableArray, ""); @@ -350,6 +356,7 @@ static_assert((clang::Type::TypeClass)ZigClangType_Attributed == clang::Type::At static_assert((clang::Type::TypeClass)ZigClangType_BTFTagAttributed == clang::Type::BTFTagAttributed, ""); static_assert((clang::Type::TypeClass)ZigClangType_BitInt == clang::Type::BitInt, ""); static_assert((clang::Type::TypeClass)ZigClangType_BlockPointer == clang::Type::BlockPointer, ""); +static_assert((clang::Type::TypeClass)ZigClangType_CountAttributed == clang::Type::CountAttributed, ""); static_assert((clang::Type::TypeClass)ZigClangType_Builtin == clang::Type::Builtin, ""); static_assert((clang::Type::TypeClass)ZigClangType_Complex == clang::Type::Complex, ""); static_assert((clang::Type::TypeClass)ZigClangType_Decltype == clang::Type::Decltype, ""); @@ -374,6 +381,7 @@ static_assert((clang::Type::TypeClass)ZigClangType_ObjCObject == clang::Type::Ob static_assert((clang::Type::TypeClass)ZigClangType_ObjCInterface == clang::Type::ObjCInterface, ""); static_assert((clang::Type::TypeClass)ZigClangType_ObjCTypeParam == clang::Type::ObjCTypeParam, ""); static_assert((clang::Type::TypeClass)ZigClangType_PackExpansion == clang::Type::PackExpansion, ""); +static_assert((clang::Type::TypeClass)ZigClangType_PackIndexing == clang::Type::PackIndexing, ""); static_assert((clang::Type::TypeClass)ZigClangType_Paren == clang::Type::Paren, ""); static_assert((clang::Type::TypeClass)ZigClangType_Pipe == clang::Type::Pipe, ""); static_assert((clang::Type::TypeClass)ZigClangType_Pointer == clang::Type::Pointer, ""); @@ -419,6 +427,7 @@ void ZigClang_detect_enum_StmtClass(clang::Stmt::StmtClass x) { case clang::Stmt::PredefinedExprClass: case clang::Stmt::ParenListExprClass: case clang::Stmt::ParenExprClass: + case clang::Stmt::PackIndexingExprClass: case clang::Stmt::PackExpansionExprClass: case clang::Stmt::UnresolvedMemberExprClass: case clang::Stmt::UnresolvedLookupExprClass: @@ -441,7 +450,6 @@ void ZigClang_detect_enum_StmtClass(clang::Stmt::StmtClass x) { case clang::Stmt::ObjCArrayLiteralClass: case clang::Stmt::OMPIteratorExprClass: case clang::Stmt::OMPArrayShapingExprClass: - case clang::Stmt::OMPArraySectionExprClass: case clang::Stmt::NoInitExprClass: case clang::Stmt::MemberExprClass: case clang::Stmt::MatrixSubscriptExprClass: @@ -462,6 +470,7 @@ void ZigClang_detect_enum_StmtClass(clang::Stmt::StmtClass x) { case clang::Stmt::FixedPointLiteralClass: case clang::Stmt::ExtVectorElementExprClass: case clang::Stmt::ExpressionTraitExprClass: + case clang::Stmt::EmbedExprClass: case clang::Stmt::DesignatedInitUpdateExprClass: case clang::Stmt::DesignatedInitExprClass: case clang::Stmt::DependentScopeDeclRefExprClass: @@ -519,6 +528,7 @@ void ZigClang_detect_enum_StmtClass(clang::Stmt::StmtClass x) { case clang::Stmt::AsTypeExprClass: case clang::Stmt::ArrayTypeTraitExprClass: case clang::Stmt::ArraySubscriptExprClass: + case clang::Stmt::ArraySectionExprClass: case clang::Stmt::ArrayInitLoopExprClass: case clang::Stmt::ArrayInitIndexExprClass: case clang::Stmt::AddrLabelExprClass: @@ -533,6 +543,8 @@ void ZigClang_detect_enum_StmtClass(clang::Stmt::StmtClass x) { case clang::Stmt::SEHFinallyStmtClass: case clang::Stmt::SEHExceptStmtClass: case clang::Stmt::ReturnStmtClass: + case clang::Stmt::OpenACCLoopConstructClass: + case clang::Stmt::OpenACCComputeConstructClass: case clang::Stmt::ObjCForCollectionStmtClass: case clang::Stmt::ObjCAutoreleasePoolStmtClass: case clang::Stmt::ObjCAtTryStmtClass: @@ -568,6 +580,8 @@ void ZigClang_detect_enum_StmtClass(clang::Stmt::StmtClass x) { case clang::Stmt::OMPMaskedDirectiveClass: case clang::Stmt::OMPUnrollDirectiveClass: case clang::Stmt::OMPTileDirectiveClass: + case clang::Stmt::OMPReverseDirectiveClass: + case clang::Stmt::OMPInterchangeDirectiveClass: case clang::Stmt::OMPTeamsGenericLoopDirectiveClass: case clang::Stmt::OMPTeamsDistributeSimdDirectiveClass: case clang::Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass: @@ -658,6 +672,7 @@ static_assert((clang::Stmt::StmtClass)ZigClangStmt_PseudoObjectExprClass == clan static_assert((clang::Stmt::StmtClass)ZigClangStmt_PredefinedExprClass == clang::Stmt::PredefinedExprClass, ""); static_assert((clang::Stmt::StmtClass)ZigClangStmt_ParenListExprClass == clang::Stmt::ParenListExprClass, ""); static_assert((clang::Stmt::StmtClass)ZigClangStmt_ParenExprClass == clang::Stmt::ParenExprClass, ""); +static_assert((clang::Stmt::StmtClass)ZigClangStmt_PackIndexingExprClass == clang::Stmt::PackIndexingExprClass, ""); static_assert((clang::Stmt::StmtClass)ZigClangStmt_PackExpansionExprClass == clang::Stmt::PackExpansionExprClass, ""); static_assert((clang::Stmt::StmtClass)ZigClangStmt_UnresolvedMemberExprClass == clang::Stmt::UnresolvedMemberExprClass, ""); static_assert((clang::Stmt::StmtClass)ZigClangStmt_UnresolvedLookupExprClass == clang::Stmt::UnresolvedLookupExprClass, ""); @@ -680,7 +695,6 @@ static_assert((clang::Stmt::StmtClass)ZigClangStmt_ObjCAvailabilityCheckExprClas static_assert((clang::Stmt::StmtClass)ZigClangStmt_ObjCArrayLiteralClass == clang::Stmt::ObjCArrayLiteralClass, ""); static_assert((clang::Stmt::StmtClass)ZigClangStmt_OMPIteratorExprClass == clang::Stmt::OMPIteratorExprClass, ""); static_assert((clang::Stmt::StmtClass)ZigClangStmt_OMPArrayShapingExprClass == clang::Stmt::OMPArrayShapingExprClass, ""); -static_assert((clang::Stmt::StmtClass)ZigClangStmt_OMPArraySectionExprClass == clang::Stmt::OMPArraySectionExprClass, ""); static_assert((clang::Stmt::StmtClass)ZigClangStmt_NoInitExprClass == clang::Stmt::NoInitExprClass, ""); static_assert((clang::Stmt::StmtClass)ZigClangStmt_MemberExprClass == clang::Stmt::MemberExprClass, ""); static_assert((clang::Stmt::StmtClass)ZigClangStmt_MatrixSubscriptExprClass == clang::Stmt::MatrixSubscriptExprClass, ""); @@ -701,6 +715,7 @@ static_assert((clang::Stmt::StmtClass)ZigClangStmt_FloatingLiteralClass == clang static_assert((clang::Stmt::StmtClass)ZigClangStmt_FixedPointLiteralClass == clang::Stmt::FixedPointLiteralClass, ""); static_assert((clang::Stmt::StmtClass)ZigClangStmt_ExtVectorElementExprClass == clang::Stmt::ExtVectorElementExprClass, ""); static_assert((clang::Stmt::StmtClass)ZigClangStmt_ExpressionTraitExprClass == clang::Stmt::ExpressionTraitExprClass, ""); +static_assert((clang::Stmt::StmtClass)ZigClangStmt_EmbedExprClass == clang::Stmt::EmbedExprClass, ""); static_assert((clang::Stmt::StmtClass)ZigClangStmt_DesignatedInitUpdateExprClass == clang::Stmt::DesignatedInitUpdateExprClass, ""); static_assert((clang::Stmt::StmtClass)ZigClangStmt_DesignatedInitExprClass == clang::Stmt::DesignatedInitExprClass, ""); static_assert((clang::Stmt::StmtClass)ZigClangStmt_DependentScopeDeclRefExprClass == clang::Stmt::DependentScopeDeclRefExprClass, ""); @@ -758,6 +773,7 @@ static_assert((clang::Stmt::StmtClass)ZigClangStmt_AtomicExprClass == clang::Stm static_assert((clang::Stmt::StmtClass)ZigClangStmt_AsTypeExprClass == clang::Stmt::AsTypeExprClass, ""); static_assert((clang::Stmt::StmtClass)ZigClangStmt_ArrayTypeTraitExprClass == clang::Stmt::ArrayTypeTraitExprClass, ""); static_assert((clang::Stmt::StmtClass)ZigClangStmt_ArraySubscriptExprClass == clang::Stmt::ArraySubscriptExprClass, ""); +static_assert((clang::Stmt::StmtClass)ZigClangStmt_ArraySectionExprClass == clang::Stmt::ArraySectionExprClass, ""); static_assert((clang::Stmt::StmtClass)ZigClangStmt_ArrayInitLoopExprClass == clang::Stmt::ArrayInitLoopExprClass, ""); static_assert((clang::Stmt::StmtClass)ZigClangStmt_ArrayInitIndexExprClass == clang::Stmt::ArrayInitIndexExprClass, ""); static_assert((clang::Stmt::StmtClass)ZigClangStmt_AddrLabelExprClass == clang::Stmt::AddrLabelExprClass, ""); @@ -772,6 +788,8 @@ static_assert((clang::Stmt::StmtClass)ZigClangStmt_SEHLeaveStmtClass == clang::S static_assert((clang::Stmt::StmtClass)ZigClangStmt_SEHFinallyStmtClass == clang::Stmt::SEHFinallyStmtClass, ""); static_assert((clang::Stmt::StmtClass)ZigClangStmt_SEHExceptStmtClass == clang::Stmt::SEHExceptStmtClass, ""); static_assert((clang::Stmt::StmtClass)ZigClangStmt_ReturnStmtClass == clang::Stmt::ReturnStmtClass, ""); +static_assert((clang::Stmt::StmtClass)ZigClangStmt_OpenACCLoopConstructClass == clang::Stmt::OpenACCLoopConstructClass, ""); +static_assert((clang::Stmt::StmtClass)ZigClangStmt_OpenACCComputeConstructClass == clang::Stmt::OpenACCComputeConstructClass, ""); static_assert((clang::Stmt::StmtClass)ZigClangStmt_ObjCForCollectionStmtClass == clang::Stmt::ObjCForCollectionStmtClass, ""); static_assert((clang::Stmt::StmtClass)ZigClangStmt_ObjCAutoreleasePoolStmtClass == clang::Stmt::ObjCAutoreleasePoolStmtClass, ""); static_assert((clang::Stmt::StmtClass)ZigClangStmt_ObjCAtTryStmtClass == clang::Stmt::ObjCAtTryStmtClass, ""); @@ -807,6 +825,8 @@ static_assert((clang::Stmt::StmtClass)ZigClangStmt_OMPMasterDirectiveClass == cl static_assert((clang::Stmt::StmtClass)ZigClangStmt_OMPMaskedDirectiveClass == clang::Stmt::OMPMaskedDirectiveClass, ""); static_assert((clang::Stmt::StmtClass)ZigClangStmt_OMPUnrollDirectiveClass == clang::Stmt::OMPUnrollDirectiveClass, ""); static_assert((clang::Stmt::StmtClass)ZigClangStmt_OMPTileDirectiveClass == clang::Stmt::OMPTileDirectiveClass, ""); +static_assert((clang::Stmt::StmtClass)ZigClangStmt_OMPReverseDirectiveClass == clang::Stmt::OMPReverseDirectiveClass, ""); +static_assert((clang::Stmt::StmtClass)ZigClangStmt_OMPInterchangeDirectiveClass == clang::Stmt::OMPInterchangeDirectiveClass, ""); static_assert((clang::Stmt::StmtClass)ZigClangStmt_OMPTeamsGenericLoopDirectiveClass == clang::Stmt::OMPTeamsGenericLoopDirectiveClass, ""); static_assert((clang::Stmt::StmtClass)ZigClangStmt_OMPTeamsDistributeSimdDirectiveClass == clang::Stmt::OMPTeamsDistributeSimdDirectiveClass, ""); static_assert((clang::Stmt::StmtClass)ZigClangStmt_OMPTeamsDistributeParallelForSimdDirectiveClass == clang::Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass, ""); @@ -1001,13 +1021,13 @@ void ZigClang_detect_enum_DeclKind(clang::Decl::Kind x) { } static_assert((clang::Decl::Kind)ZigClangDeclTranslationUnit == clang::Decl::TranslationUnit, ""); +static_assert((clang::Decl::Kind)ZigClangDeclTopLevelStmt == clang::Decl::TopLevelStmt, ""); static_assert((clang::Decl::Kind)ZigClangDeclRequiresExprBody == clang::Decl::RequiresExprBody, ""); static_assert((clang::Decl::Kind)ZigClangDeclLinkageSpec == clang::Decl::LinkageSpec, ""); static_assert((clang::Decl::Kind)ZigClangDeclExternCContext == clang::Decl::ExternCContext, ""); static_assert((clang::Decl::Kind)ZigClangDeclExport == clang::Decl::Export, ""); static_assert((clang::Decl::Kind)ZigClangDeclCaptured == clang::Decl::Captured, ""); static_assert((clang::Decl::Kind)ZigClangDeclBlock == clang::Decl::Block, ""); -static_assert((clang::Decl::Kind)ZigClangDeclTopLevelStmt == clang::Decl::TopLevelStmt, ""); static_assert((clang::Decl::Kind)ZigClangDeclStaticAssert == clang::Decl::StaticAssert, ""); static_assert((clang::Decl::Kind)ZigClangDeclPragmaDetectMismatch == clang::Decl::PragmaDetectMismatch, ""); static_assert((clang::Decl::Kind)ZigClangDeclPragmaComment == clang::Decl::PragmaComment, ""); @@ -1515,6 +1535,7 @@ void ZigClang_detect_enum_BuiltinTypeKind(clang::BuiltinType::Kind x) { case clang::BuiltinType::RvvBFloat16m2x4: case clang::BuiltinType::RvvBFloat16m4x2: case clang::BuiltinType::WasmExternRef: + case clang::BuiltinType::AMDGPUBufferRsrc: case clang::BuiltinType::Void: case clang::BuiltinType::Bool: case clang::BuiltinType::Char_U: @@ -1580,14 +1601,15 @@ void ZigClang_detect_enum_BuiltinTypeKind(clang::BuiltinType::Kind x) { case clang::BuiltinType::Dependent: case clang::BuiltinType::Overload: case clang::BuiltinType::BoundMember: + case clang::BuiltinType::UnresolvedTemplate: case clang::BuiltinType::PseudoObject: case clang::BuiltinType::UnknownAny: case clang::BuiltinType::BuiltinFn: case clang::BuiltinType::ARCUnbridgedCast: case clang::BuiltinType::IncompleteMatrixIdx: - case clang::BuiltinType::OMPArraySection: case clang::BuiltinType::OMPArrayShaping: case clang::BuiltinType::OMPIterator: + case clang::BuiltinType::ArraySection: break; } } @@ -2018,6 +2040,7 @@ static_assert((clang::BuiltinType::Kind)ZigClangBuiltinTypeRvvBFloat16m2x3 == cl static_assert((clang::BuiltinType::Kind)ZigClangBuiltinTypeRvvBFloat16m2x4 == clang::BuiltinType::RvvBFloat16m2x4, ""); static_assert((clang::BuiltinType::Kind)ZigClangBuiltinTypeRvvBFloat16m4x2 == clang::BuiltinType::RvvBFloat16m4x2, ""); static_assert((clang::BuiltinType::Kind)ZigClangBuiltinTypeWasmExternRef == clang::BuiltinType::WasmExternRef, ""); +static_assert((clang::BuiltinType::Kind)ZigClangBuiltinTypeAMDGPUBufferRsrc == clang::BuiltinType::AMDGPUBufferRsrc, ""); static_assert((clang::BuiltinType::Kind)ZigClangBuiltinTypeVoid == clang::BuiltinType::Void, ""); static_assert((clang::BuiltinType::Kind)ZigClangBuiltinTypeBool == clang::BuiltinType::Bool, ""); static_assert((clang::BuiltinType::Kind)ZigClangBuiltinTypeChar_U == clang::BuiltinType::Char_U, ""); @@ -2083,12 +2106,12 @@ static_assert((clang::BuiltinType::Kind)ZigClangBuiltinTypeOCLReserveID == clang static_assert((clang::BuiltinType::Kind)ZigClangBuiltinTypeDependent == clang::BuiltinType::Dependent, ""); static_assert((clang::BuiltinType::Kind)ZigClangBuiltinTypeOverload == clang::BuiltinType::Overload, ""); static_assert((clang::BuiltinType::Kind)ZigClangBuiltinTypeBoundMember == clang::BuiltinType::BoundMember, ""); +static_assert((clang::BuiltinType::Kind)ZigClangBuiltinTypeUnresolvedTemplate == clang::BuiltinType::UnresolvedTemplate, ""); static_assert((clang::BuiltinType::Kind)ZigClangBuiltinTypePseudoObject == clang::BuiltinType::PseudoObject, ""); static_assert((clang::BuiltinType::Kind)ZigClangBuiltinTypeUnknownAny == clang::BuiltinType::UnknownAny, ""); static_assert((clang::BuiltinType::Kind)ZigClangBuiltinTypeBuiltinFn == clang::BuiltinType::BuiltinFn, ""); static_assert((clang::BuiltinType::Kind)ZigClangBuiltinTypeARCUnbridgedCast == clang::BuiltinType::ARCUnbridgedCast, ""); static_assert((clang::BuiltinType::Kind)ZigClangBuiltinTypeIncompleteMatrixIdx == clang::BuiltinType::IncompleteMatrixIdx, ""); -static_assert((clang::BuiltinType::Kind)ZigClangBuiltinTypeOMPArraySection == clang::BuiltinType::OMPArraySection, ""); static_assert((clang::BuiltinType::Kind)ZigClangBuiltinTypeOMPArrayShaping == clang::BuiltinType::OMPArrayShaping, ""); static_assert((clang::BuiltinType::Kind)ZigClangBuiltinTypeOMPIterator == clang::BuiltinType::OMPIterator, ""); @@ -2116,6 +2139,8 @@ void ZigClang_detect_enum_CallingConv(clang::CallingConv x) { case clang::CC_AArch64SVEPCS: case clang::CC_AMDGPUKernelCall: case clang::CC_M68kRTD: + case clang::CC_PreserveNone: + case clang::CC_RISCVVectorCall: break; } } @@ -2142,6 +2167,8 @@ static_assert((clang::CallingConv)ZigClangCallingConv_AArch64VectorCall == clang static_assert((clang::CallingConv)ZigClangCallingConv_AArch64SVEPCS == clang::CC_AArch64SVEPCS, ""); static_assert((clang::CallingConv)ZigClangCallingConv_AMDGPUKernelCall == clang::CC_AMDGPUKernelCall, ""); static_assert((clang::CallingConv)ZigClangCallingConv_M68kRTD == clang::CC_M68kRTD, ""); +static_assert((clang::CallingConv)ZigClangCallingConv_PreserveNone == clang::CC_PreserveNone, ""); +static_assert((clang::CallingConv)ZigClangCallingConv_RISCVVectorCall == clang::CC_RISCVVectorCall, ""); void ZigClang_detect_enum_StorageClass(clang::StorageClass x) { switch (x) { @@ -2251,6 +2278,7 @@ static_assert((clang::UnaryExprOrTypeTrait)ZigClangUnaryExprOrTypeTrait_Kind::Zi static_assert((clang::UnaryExprOrTypeTrait)ZigClangUnaryExprOrTypeTrait_Kind::ZigClangUnaryExprOrTypeTrait_KindDataSizeOf == clang::UnaryExprOrTypeTrait::UETT_DataSizeOf, ""); static_assert((clang::UnaryExprOrTypeTrait)ZigClangUnaryExprOrTypeTrait_Kind::ZigClangUnaryExprOrTypeTrait_KindAlignOf == clang::UnaryExprOrTypeTrait::UETT_AlignOf, ""); static_assert((clang::UnaryExprOrTypeTrait)ZigClangUnaryExprOrTypeTrait_Kind::ZigClangUnaryExprOrTypeTrait_KindPreferredAlignOf == clang::UnaryExprOrTypeTrait::UETT_PreferredAlignOf, ""); +static_assert((clang::UnaryExprOrTypeTrait)ZigClangUnaryExprOrTypeTrait_Kind::ZigClangUnaryExprOrTypeTrait_KindPtrAuthTypeDiscriminator == clang::UnaryExprOrTypeTrait::UETT_PtrAuthTypeDiscriminator, ""); static_assert((clang::UnaryExprOrTypeTrait)ZigClangUnaryExprOrTypeTrait_Kind::ZigClangUnaryExprOrTypeTrait_KindVecStep == clang::UnaryExprOrTypeTrait::UETT_VecStep, ""); static_assert((clang::UnaryExprOrTypeTrait)ZigClangUnaryExprOrTypeTrait_Kind::ZigClangUnaryExprOrTypeTrait_KindOpenMPRequiredSimdAlign == clang::UnaryExprOrTypeTrait::UETT_OpenMPRequiredSimdAlign, ""); @@ -2811,7 +2839,7 @@ bool ZigClangType_isIncompleteOrZeroLengthArrayType(const ZigClangQualType *self if (casted_type->isIncompleteArrayType()) return true; - clang::QualType elem_type = *casted; + clang::QualType elem_type = *casted; while (const clang::ConstantArrayType *ArrayT = casted_ctx->getAsConstantArrayType(elem_type)) { if (ArrayT->getSize() == 0) return true; @@ -3021,6 +3049,11 @@ bool ZigClangAPSInt_lessThanEqual(const ZigClangAPSInt *self, uint64_t rhs) { return casted->ule(rhs); } +void ZigClangAPInt_free(const ZigClangAPInt *self) { + auto casted = reinterpret_cast(self); + delete casted; +} + uint64_t ZigClangAPInt_getLimitedValue(const ZigClangAPInt *self, uint64_t limit) { auto casted = reinterpret_cast(self); return casted->getLimitedValue(limit); @@ -3400,9 +3433,10 @@ struct ZigClangQualType ZigClangConstantArrayType_getElementType(const struct Zi return bitcast(casted->getElementType()); } -const struct ZigClangAPInt *ZigClangConstantArrayType_getSize(const struct ZigClangConstantArrayType *self) { +void ZigClangConstantArrayType_getSize(const struct ZigClangConstantArrayType *self, const struct ZigClangAPInt **result) { auto casted = reinterpret_cast(self); - return reinterpret_cast(&casted->getSize()); + llvm::APInt *ap_int = new llvm::APInt(casted->getSize()); + *result = reinterpret_cast(ap_int); } const struct ZigClangValueDecl *ZigClangDeclRefExpr_getDecl(const struct ZigClangDeclRefExpr *self) { @@ -4059,7 +4093,7 @@ const struct ZigClangAPSInt *ZigClangEnumConstantDecl_getInitVal(const struct Zi // See also: https://github.com/ziglang/zig/issues/11168 bool ZigClangIsLLVMUsingSeparateLibcxx() { - // Temporarily create an InMemoryFileSystem, so that we can perform a file + // Temporarily create an InMemoryFileSystem, so that we can perform a file // lookup that is guaranteed to fail. auto FS = new llvm::vfs::InMemoryFileSystem(true); auto StatusOrErr = FS->status("foo.txt"); @@ -4080,9 +4114,13 @@ static_assert((llvm::APFloatBase::Semantics)ZigClangAPFloatBase_Semantics_IEEEqu static_assert((llvm::APFloatBase::Semantics)ZigClangAPFloatBase_Semantics_PPCDoubleDouble == llvm::APFloatBase::S_PPCDoubleDouble); static_assert((llvm::APFloatBase::Semantics)ZigClangAPFloatBase_Semantics_Float8E5M2 == llvm::APFloatBase::S_Float8E5M2); static_assert((llvm::APFloatBase::Semantics)ZigClangAPFloatBase_Semantics_Float8E5M2FNUZ == llvm::APFloatBase::S_Float8E5M2FNUZ); +static_assert((llvm::APFloatBase::Semantics)ZigClangAPFloatBase_Semantics_Float8E4M3 == llvm::APFloatBase::S_Float8E4M3); static_assert((llvm::APFloatBase::Semantics)ZigClangAPFloatBase_Semantics_Float8E4M3FN == llvm::APFloatBase::S_Float8E4M3FN); static_assert((llvm::APFloatBase::Semantics)ZigClangAPFloatBase_Semantics_Float8E4M3FNUZ == llvm::APFloatBase::S_Float8E4M3FNUZ); static_assert((llvm::APFloatBase::Semantics)ZigClangAPFloatBase_Semantics_Float8E4M3B11FNUZ == llvm::APFloatBase::S_Float8E4M3B11FNUZ); static_assert((llvm::APFloatBase::Semantics)ZigClangAPFloatBase_Semantics_FloatTF32 == llvm::APFloatBase::S_FloatTF32); +static_assert((llvm::APFloatBase::Semantics)ZigClangAPFloatBase_Semantics_Float6E3M2FN == llvm::APFloatBase::S_Float6E3M2FN); +static_assert((llvm::APFloatBase::Semantics)ZigClangAPFloatBase_Semantics_Float6E2M3FN == llvm::APFloatBase::S_Float6E2M3FN); +static_assert((llvm::APFloatBase::Semantics)ZigClangAPFloatBase_Semantics_Float4E2M1FN == llvm::APFloatBase::S_Float4E2M1FN); static_assert((llvm::APFloatBase::Semantics)ZigClangAPFloatBase_Semantics_x87DoubleExtended == llvm::APFloatBase::S_x87DoubleExtended); static_assert((llvm::APFloatBase::Semantics)ZigClangAPFloatBase_Semantics_MaxSemantics == llvm::APFloatBase::S_MaxSemantics); diff --git a/src/zig_clang.h b/src/zig_clang.h index 6d7ebc64f541..e2b6c3c2f657 100644 --- a/src/zig_clang.h +++ b/src/zig_clang.h @@ -247,6 +247,7 @@ enum ZigClangTypeClass { ZigClangType_Adjusted, ZigClangType_Decayed, ZigClangType_ConstantArray, + ZigClangType_ArrayParameter, ZigClangType_DependentSizedArray, ZigClangType_IncompleteArray, ZigClangType_VariableArray, @@ -255,6 +256,7 @@ enum ZigClangTypeClass { ZigClangType_BTFTagAttributed, ZigClangType_BitInt, ZigClangType_BlockPointer, + ZigClangType_CountAttributed, ZigClangType_Builtin, ZigClangType_Complex, ZigClangType_Decltype, @@ -279,6 +281,7 @@ enum ZigClangTypeClass { ZigClangType_ObjCInterface, ZigClangType_ObjCTypeParam, ZigClangType_PackExpansion, + ZigClangType_PackIndexing, ZigClangType_Paren, ZigClangType_Pipe, ZigClangType_Pointer, @@ -323,6 +326,7 @@ enum ZigClangStmtClass { ZigClangStmt_PredefinedExprClass, ZigClangStmt_ParenListExprClass, ZigClangStmt_ParenExprClass, + ZigClangStmt_PackIndexingExprClass, ZigClangStmt_PackExpansionExprClass, ZigClangStmt_UnresolvedMemberExprClass, ZigClangStmt_UnresolvedLookupExprClass, @@ -345,7 +349,6 @@ enum ZigClangStmtClass { ZigClangStmt_ObjCArrayLiteralClass, ZigClangStmt_OMPIteratorExprClass, ZigClangStmt_OMPArrayShapingExprClass, - ZigClangStmt_OMPArraySectionExprClass, ZigClangStmt_NoInitExprClass, ZigClangStmt_MemberExprClass, ZigClangStmt_MatrixSubscriptExprClass, @@ -366,6 +369,7 @@ enum ZigClangStmtClass { ZigClangStmt_FixedPointLiteralClass, ZigClangStmt_ExtVectorElementExprClass, ZigClangStmt_ExpressionTraitExprClass, + ZigClangStmt_EmbedExprClass, ZigClangStmt_DesignatedInitUpdateExprClass, ZigClangStmt_DesignatedInitExprClass, ZigClangStmt_DependentScopeDeclRefExprClass, @@ -423,6 +427,7 @@ enum ZigClangStmtClass { ZigClangStmt_AsTypeExprClass, ZigClangStmt_ArrayTypeTraitExprClass, ZigClangStmt_ArraySubscriptExprClass, + ZigClangStmt_ArraySectionExprClass, ZigClangStmt_ArrayInitLoopExprClass, ZigClangStmt_ArrayInitIndexExprClass, ZigClangStmt_AddrLabelExprClass, @@ -437,6 +442,8 @@ enum ZigClangStmtClass { ZigClangStmt_SEHFinallyStmtClass, ZigClangStmt_SEHExceptStmtClass, ZigClangStmt_ReturnStmtClass, + ZigClangStmt_OpenACCLoopConstructClass, + ZigClangStmt_OpenACCComputeConstructClass, ZigClangStmt_ObjCForCollectionStmtClass, ZigClangStmt_ObjCAutoreleasePoolStmtClass, ZigClangStmt_ObjCAtTryStmtClass, @@ -472,6 +479,8 @@ enum ZigClangStmtClass { ZigClangStmt_OMPMaskedDirectiveClass, ZigClangStmt_OMPUnrollDirectiveClass, ZigClangStmt_OMPTileDirectiveClass, + ZigClangStmt_OMPReverseDirectiveClass, + ZigClangStmt_OMPInterchangeDirectiveClass, ZigClangStmt_OMPTeamsGenericLoopDirectiveClass, ZigClangStmt_OMPTeamsDistributeSimdDirectiveClass, ZigClangStmt_OMPTeamsDistributeParallelForSimdDirectiveClass, @@ -608,13 +617,13 @@ enum ZigClangCK { enum ZigClangDeclKind { ZigClangDeclTranslationUnit, + ZigClangDeclTopLevelStmt, ZigClangDeclRequiresExprBody, ZigClangDeclLinkageSpec, ZigClangDeclExternCContext, ZigClangDeclExport, ZigClangDeclCaptured, ZigClangDeclBlock, - ZigClangDeclTopLevelStmt, ZigClangDeclStaticAssert, ZigClangDeclPragmaDetectMismatch, ZigClangDeclPragmaComment, @@ -1122,6 +1131,7 @@ enum ZigClangBuiltinTypeKind { ZigClangBuiltinTypeRvvBFloat16m2x4, ZigClangBuiltinTypeRvvBFloat16m4x2, ZigClangBuiltinTypeWasmExternRef, + ZigClangBuiltinTypeAMDGPUBufferRsrc, ZigClangBuiltinTypeVoid, ZigClangBuiltinTypeBool, ZigClangBuiltinTypeChar_U, @@ -1187,6 +1197,7 @@ enum ZigClangBuiltinTypeKind { ZigClangBuiltinTypeDependent, ZigClangBuiltinTypeOverload, ZigClangBuiltinTypeBoundMember, + ZigClangBuiltinTypeUnresolvedTemplate, ZigClangBuiltinTypePseudoObject, ZigClangBuiltinTypeUnknownAny, ZigClangBuiltinTypeBuiltinFn, @@ -1220,6 +1231,8 @@ enum ZigClangCallingConv { ZigClangCallingConv_AArch64SVEPCS, ZigClangCallingConv_AMDGPUKernelCall, ZigClangCallingConv_M68kRTD, + ZigClangCallingConv_PreserveNone, + ZigClangCallingConv_RISCVVectorCall, }; enum ZigClangStorageClass { @@ -1255,10 +1268,14 @@ enum ZigClangAPFloatBase_Semantics { ZigClangAPFloatBase_Semantics_PPCDoubleDouble, ZigClangAPFloatBase_Semantics_Float8E5M2, ZigClangAPFloatBase_Semantics_Float8E5M2FNUZ, + ZigClangAPFloatBase_Semantics_Float8E4M3, ZigClangAPFloatBase_Semantics_Float8E4M3FN, ZigClangAPFloatBase_Semantics_Float8E4M3FNUZ, ZigClangAPFloatBase_Semantics_Float8E4M3B11FNUZ, ZigClangAPFloatBase_Semantics_FloatTF32, + ZigClangAPFloatBase_Semantics_Float6E3M2FN, + ZigClangAPFloatBase_Semantics_Float6E2M3FN, + ZigClangAPFloatBase_Semantics_Float4E2M1FN, ZigClangAPFloatBase_Semantics_x87DoubleExtended, ZigClangAPFloatBase_Semantics_MaxSemantics = ZigClangAPFloatBase_Semantics_x87DoubleExtended, }; @@ -1314,6 +1331,7 @@ enum ZigClangUnaryExprOrTypeTrait_Kind { ZigClangUnaryExprOrTypeTrait_KindDataSizeOf, ZigClangUnaryExprOrTypeTrait_KindAlignOf, ZigClangUnaryExprOrTypeTrait_KindPreferredAlignOf, + ZigClangUnaryExprOrTypeTrait_KindPtrAuthTypeDiscriminator, ZigClangUnaryExprOrTypeTrait_KindVecStep, ZigClangUnaryExprOrTypeTrait_KindOpenMPRequiredSimdAlign, }; @@ -1515,6 +1533,7 @@ ZIG_EXTERN_C const uint64_t *ZigClangAPSInt_getRawData(const struct ZigClangAPSI ZIG_EXTERN_C unsigned ZigClangAPSInt_getNumWords(const struct ZigClangAPSInt *self); ZIG_EXTERN_C bool ZigClangAPSInt_lessThanEqual(const struct ZigClangAPSInt *self, uint64_t rhs); +ZIG_EXTERN_C void ZigClangAPInt_free(const struct ZigClangAPInt *self); ZIG_EXTERN_C uint64_t ZigClangAPInt_getLimitedValue(const struct ZigClangAPInt *self, uint64_t limit); ZIG_EXTERN_C const struct ZigClangExpr *ZigClangAPValueLValueBase_dyn_cast_Expr(struct ZigClangAPValueLValueBase self); @@ -1569,7 +1588,7 @@ ZIG_EXTERN_C struct ZigClangQualType ZigClangArrayType_getElementType(const stru ZIG_EXTERN_C struct ZigClangQualType ZigClangIncompleteArrayType_getElementType(const struct ZigClangIncompleteArrayType *); ZIG_EXTERN_C struct ZigClangQualType ZigClangConstantArrayType_getElementType(const struct ZigClangConstantArrayType *); -ZIG_EXTERN_C const struct ZigClangAPInt *ZigClangConstantArrayType_getSize(const struct ZigClangConstantArrayType *); +ZIG_EXTERN_C void ZigClangConstantArrayType_getSize(const struct ZigClangConstantArrayType *, const struct ZigClangAPInt **result); ZIG_EXTERN_C const struct ZigClangValueDecl *ZigClangDeclRefExpr_getDecl(const struct ZigClangDeclRefExpr *); ZIG_EXTERN_C const struct ZigClangNamedDecl *ZigClangDeclRefExpr_getFoundDecl(const struct ZigClangDeclRefExpr *); From f69ff5e9b5267bd1e66ef0320b7fe6780858677d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Fri, 23 Aug 2024 19:43:51 +0200 Subject: [PATCH 163/202] clang: Update options data to Clang 19. --- src/clang_options_data.zig | 303 ++++++++++++++++++++++++++++++++----- 1 file changed, 265 insertions(+), 38 deletions(-) diff --git a/src/clang_options_data.zig b/src/clang_options_data.zig index f41a4513338d..9ced7e9ebe21 100644 --- a/src/clang_options_data.zig +++ b/src/clang_options_data.zig @@ -149,8 +149,10 @@ flagpd1("WCL4"), flagpd1("Wall"), flagpd1("Wdeprecated"), flagpd1("Wframe-larger-than"), +flagpd1("Winvalid-constexpr"), flagpd1("Wlarge-by-value-copy"), flagpd1("Wno-deprecated"), +flagpd1("Wno-invalid-constexpr"), flagpd1("Wno-rewrite-macros"), flagpd1("Wno-system-headers"), flagpd1("Wno-write-strings"), @@ -885,6 +887,14 @@ flagpsl("MT"), .pd2 = false, .psl = true, }, +.{ + .name = "Zc:__STDC__", + .syntax = .flag, + .zig_equivalent = .other, + .pd1 = true, + .pd2 = false, + .psl = true, +}, .{ .name = "Zc:__cplusplus", .syntax = .flag, @@ -2102,6 +2112,7 @@ flagpsl("MT"), .psl = false, }, sepd1("add-plugin"), +sepd1("alias_list"), flagpd1("faligned-alloc-unavailable"), flagpd1("all_load"), sepd1("allowable_client"), @@ -2129,25 +2140,24 @@ sepd1("analyzer-inline-max-stack-depth"), sepd1("analyzer-inlining-mode"), flagpd1("analyzer-list-enabled-checkers"), sepd1("analyzer-max-loop"), +flagpd1("analyzer-note-analysis-entry-points"), flagpd1("analyzer-opt-analyze-headers"), sepd1("analyzer-output"), sepd1("analyzer-purge"), flagpd1("analyzer-stats"), flagpd1("analyzer-viz-egraph-graphviz"), flagpd1("analyzer-werror"), -flagpd1("fslp-vectorize-aggressive"), -flagpd1("fno-slp-vectorize-aggressive"), sepd1("fnew-alignment"), flagpd1("faligned-new"), flagpd1("fno-aligned-new"), -flagpd1("fcuda-rdc"), -flagpd1("fno-cuda-rdc"), flagpd1("fsched-interblock"), flagpd1("ftree-vectorize"), flagpd1("fno-tree-vectorize"), flagpd1("ftree-slp-vectorize"), flagpd1("fno-tree-slp-vectorize"), flagpd1("fterminated-vtables"), +flagpd1("fcuda-rdc"), +flagpd1("fno-cuda-rdc"), flagpd1("grecord-gcc-switches"), flagpd1("gno-record-gcc-switches"), flagpd1("nocudainc"), @@ -2181,7 +2191,6 @@ flagpd1("mtune=help"), flagpd1("integrated-as"), flagpd1("no-integrated-as"), flagpd1("fopenmp-is-device"), -flagpd1("shared-libasan"), flagpd1("fcuda-approx-transcendentals"), flagpd1("fno-cuda-approx-transcendentals"), .{ @@ -2232,6 +2241,14 @@ flagpd1("fno-cuda-approx-transcendentals"), .pd2 = false, .psl = true, }, +.{ + .name = "Ob3", + .syntax = .flag, + .zig_equivalent = .other, + .pd1 = true, + .pd2 = false, + .psl = true, +}, .{ .name = "Od", .syntax = .flag, @@ -2324,6 +2341,7 @@ sepd1("Xmicrosoft-visualc-tools-root"), sepd1("Xmicrosoft-visualc-tools-version"), sepd1("Xmicrosoft-windows-sdk-root"), sepd1("Xmicrosoft-windows-sdk-version"), +sepd1("Xmicrosoft-windows-sys-root"), .{ .name = "Qembed_debug", .syntax = .flag, @@ -2332,21 +2350,28 @@ sepd1("Xmicrosoft-windows-sdk-version"), .pd2 = false, .psl = true, }, +flagpd1("shared-libasan"), +flagpd1("static-libasan"), +flagpd1("fslp-vectorize-aggressive"), flagpd1("fident"), +flagpd1("fno-slp-vectorize-aggressive"), flagpd1("fno-ident"), flagpd1("fdiagnostics-color"), flagpd1("fno-diagnostics-color"), flagpd1("frecord-gcc-switches"), flagpd1("fno-record-gcc-switches"), -flagpd1("Xparser"), -flagpd1("Xcompiler"), flagpd1("fexpensive-optimizations"), flagpd1("fno-expensive-optimizations"), flagpd1("fdefer-pop"), flagpd1("fno-defer-pop"), flagpd1("fextended-identifiers"), flagpd1("fno-extended-identifiers"), +flagpd1("Xparser"), +flagpd1("Xcompiler"), flagpd1("fno-sanitize-blacklist"), +flagpd1("fhonor-infinites"), +flagpd1("fno-honor-infinites"), +flagpd1("findirect-virtual-calls"), .{ .name = "config", .syntax = .separate, @@ -2355,9 +2380,6 @@ flagpd1("fno-sanitize-blacklist"), .pd2 = true, .psl = false, }, -flagpd1("fhonor-infinites"), -flagpd1("fno-honor-infinites"), -flagpd1("findirect-virtual-calls"), .{ .name = "ansi", .syntax = .flag, @@ -2457,6 +2479,14 @@ flagpd1("compiler-options-dump"), }, flagpd1("cpp"), flagpd1("cpp-precomp"), +.{ + .name = "crel", + .syntax = .flag, + .zig_equivalent = .other, + .pd1 = false, + .pd2 = true, + .psl = false, +}, .{ .name = "cuda-compile-host-device", .syntax = .flag, @@ -2499,6 +2529,7 @@ flagpd1("cpp-precomp"), }, flagpd1("dA"), flagpd1("dD"), +flagpd1("dE"), flagpd1("dI"), flagpd1("dM"), flagpd1("d"), @@ -2508,7 +2539,14 @@ flagpd1("dead_strip"), flagpd1("debug-forward-template-params"), flagpd1("debug-info-macro"), sepd1("default-function-attr"), -sepd1("defsym"), +.{ + .name = "defsym", + .syntax = .separate, + .zig_equivalent = .other, + .pd1 = false, + .pd2 = true, + .psl = false, +}, sepd1("dependency-dot"), sepd1("dependency-file"), flagpd1("detailed-preprocessing-record"), @@ -2573,7 +2611,16 @@ flagpd1("dynamic"), }, flagpd1("ehcontguard"), flagpd1("emit-ast"), +flagpd1("emit-cir"), flagpd1("emit-codegen-only"), +.{ + .name = "emit-extension-symbol-graphs", + .syntax = .flag, + .zig_equivalent = .other, + .pd1 = false, + .pd2 = true, + .psl = false, +}, flagpd1("emit-fir"), flagpd1("emit-header-unit"), flagpd1("emit-hlfir"), @@ -2596,6 +2643,14 @@ flagpd1("emit-module"), flagpd1("emit-module-interface"), flagpd1("emit-obj"), flagpd1("emit-pch"), +.{ + .name = "pretty-sgf", + .syntax = .flag, + .zig_equivalent = .other, + .pd1 = false, + .pd2 = true, + .psl = false, +}, .{ .name = "emit-pristine-llvm", .syntax = .flag, @@ -2604,6 +2659,15 @@ flagpd1("emit-pch"), .pd2 = false, .psl = true, }, +flagpd1("emit-reduced-module-interface"), +.{ + .name = "emit-sgf-symbol-labels-for-testing", + .syntax = .flag, + .zig_equivalent = .other, + .pd1 = false, + .pd2 = true, + .psl = false, +}, .{ .name = "emit-static-lib", .syntax = .flag, @@ -2612,6 +2676,7 @@ flagpd1("emit-pch"), .pd2 = true, .psl = false, }, +flagpd1("emit-symbol-graph"), .{ .name = "enable-16bit-types", .syntax = .flag, @@ -2687,6 +2752,7 @@ flagpd1("fassociative-math"), flagpd1("fassume-nothrow-exception-dtor"), flagpd1("fassume-sane-operator-new"), flagpd1("fassume-unique-vtables"), +flagpd1("fassumptions"), flagpd1("fast"), flagpd1("fastcp"), flagpd1("fastf"), @@ -2699,10 +2765,12 @@ flagpd1("fautolink"), flagpd1("fautomatic"), flagpd1("fbackslash"), flagpd1("fbacktrace"), +flagpd1("fbasic-block-address-map"), flagpd1("fblocks"), flagpd1("fblocks-runtime-optional"), flagpd1("fborland-extensions"), flagpd1("fbounds-check"), +flagpd1("fexperimental-bounds-safety"), sepd1("fbracket-depth"), flagpd1("fbranch-count-reg"), .{ @@ -2745,6 +2813,7 @@ flagpd1("fcf-protection"), flagpd1("fchar8_t"), flagpd1("fcheck-array-temporaries"), flagpd1("fcheck-new"), +flagpd1("fclangir"), .{ .name = "fcolor-diagnostics", .syntax = .flag, @@ -2773,6 +2842,8 @@ flagpd1("fcuda-flush-denormals-to-zero"), sepd1("fcuda-include-gpubinary"), flagpd1("fcuda-is-device"), flagpd1("fcuda-short-ptr"), +flagpd1("fcx-fortran-rules"), +flagpd1("fcx-limited-range"), flagpd1("fcxx-exceptions"), flagpd1("fcxx-modules"), flagpd1("fc++-static-destructors"), @@ -2806,6 +2877,7 @@ flagpd1("fdebug-ranges-base-address"), flagpd1("fdebug-types-section"), flagpd1("fdebug-unparse"), flagpd1("fdebug-unparse-no-sema"), +flagpd1("fdebug-unparse-with-modules"), flagpd1("fdebug-unparse-with-symbols"), flagpd1("fdebugger-cast-result-to-id"), flagpd1("fdebugger-objc-literal"), @@ -2836,6 +2908,7 @@ flagpd1("fdiagnostics-show-template-tree"), flagpd1("fdigraphs"), flagpd1("fdirect-access-external-data"), flagpd1("fdirectives-only"), +flagpd1("fdisable-block-signature-string"), flagpd1("fdisable-module-hash"), flagpd1("fdiscard-value-names"), flagpd1("fdollar-ok"), @@ -2867,6 +2940,7 @@ sepd1("ferror-limit"), flagpd1("fescaping-block-tail-calls"), flagpd1("fexceptions"), flagpd1("fexperimental-isel"), +flagpd1("fexperimental-late-parse-attributes"), flagpd1("fexperimental-library"), flagpd1("fexperimental-new-constant-interpreter"), flagpd1("fexperimental-omit-vtable-rtti"), @@ -3012,11 +3086,13 @@ flagpd1("fgpu-rdc"), flagpd1("fgpu-sanitize"), flagpd1("fhalf-no-semantic-interposition"), flagpd1("fheinous-gnu-extensions"), +flagpd1("fhermetic-module-files"), flagpd1("fhip-dump-offload-linker-script"), flagpd1("fhip-emit-relocatable"), flagpd1("fhip-fp32-correctly-rounded-divide-sqrt"), flagpd1("fhip-kernel-arg-name"), flagpd1("fhip-new-launch-api"), +flagpd1("fhlsl-strict-availability"), flagpd1("fhonor-infinities"), flagpd1("fhonor-nans"), flagpd1("fhosted"), @@ -3058,7 +3134,7 @@ flagpd1("fkeep-static-consts"), flagpd1("fkeep-system-includes"), flagpd1("flang-deprecated-no-hlfir"), flagpd1("flang-experimental-hlfir"), -flagpd1("flang-experimental-polymorphism"), +flagpd1("flang-experimental-integer-overflow"), flagpd1("flarge-sizes"), flagpd1("flat_namespace"), flagpd1("flax-vector-conversions"), @@ -3113,6 +3189,7 @@ flagpd1("fmodules-validate-system-headers"), flagpd1("fmodulo-sched"), flagpd1("fmodulo-sched-allow-regmoves"), flagpd1("fms-compatibility"), +flagpd1("fms-define-stdc"), flagpd1("fms-extensions"), flagpd1("fms-hotpatch"), flagpd1("fms-kernel"), @@ -3168,6 +3245,7 @@ flagpd1("fno-associative-math"), flagpd1("fno-assume-nothrow-exception-dtor"), flagpd1("fno-assume-sane-operator-new"), flagpd1("fno-assume-unique-vtables"), +flagpd1("fno-assumptions"), flagpd1("fno-async-exceptions"), flagpd1("fno-asynchronous-unwind-tables"), flagpd1("fno-auto-import"), @@ -3177,10 +3255,12 @@ flagpd1("fno-autolink"), flagpd1("fno-automatic"), flagpd1("fno-backslash"), flagpd1("fno-backtrace"), +flagpd1("fno-basic-block-address-map"), flagpd1("fno-bitfield-type-align"), flagpd1("fno-blocks"), flagpd1("fno-borland-extensions"), flagpd1("fno-bounds-check"), +flagpd1("fno-experimental-bounds-safety"), flagpd1("fno-branch-count-reg"), .{ .name = "fno-builtin", @@ -3202,6 +3282,7 @@ flagpd1("fno-caller-saves"), flagpd1("fno-char8_t"), flagpd1("fno-check-array-temporaries"), flagpd1("fno-check-new"), +flagpd1("fno-clangir"), .{ .name = "fno-color-diagnostics", .syntax = .flag, @@ -3223,6 +3304,8 @@ flagpd1("fno-cray-pointer"), flagpd1("fno-cuda-flush-denormals-to-zero"), flagpd1("fno-cuda-host-device-constexpr"), flagpd1("fno-cuda-short-ptr"), +flagpd1("fno-cx-fortran-rules"), +flagpd1("fno-cx-limited-range"), flagpd1("fno-cxx-exceptions"), flagpd1("fno-cxx-modules"), flagpd1("fno-c++-static-destructors"), @@ -3258,6 +3341,7 @@ flagpd1("fno-diagnostics-use-presumed-location"), flagpd1("fno-digraphs"), flagpd1("fno-direct-access-external-data"), flagpd1("fno-directives-only"), +flagpd1("fno-disable-block-signature-string"), flagpd1("fno-discard-value-names"), flagpd1("fno-dllexport-inlines"), flagpd1("fno-dollar-ok"), @@ -3277,6 +3361,7 @@ flagpd1("fno-emulated-tls"), flagpd1("fno-escaping-block-tail-calls"), flagpd1("fno-exceptions"), flagpd1("fno-experimental-isel"), +flagpd1("fno-experimental-late-parse-attributes"), flagpd1("fno-experimental-library"), flagpd1("fno-experimental-omit-vtable-rtti"), flagpd1("fno-experimental-relative-c++-abi-vtables"), @@ -3380,6 +3465,7 @@ flagpd1("fno-modules"), flagpd1("fno-modules-decluse"), flagpd1("fno-modules-error-recovery"), flagpd1("fno-modules-global-index"), +flagpd1("fno-modules-prune-non-affecting-module-map-files"), flagpd1("fno-modules-search-all"), flagpd1("fno-modules-share-filemanager"), flagpd1("fno-modules-skip-diagnostic-options"), @@ -3457,6 +3543,7 @@ flagpd1("fno-permissive"), .psl = false, }, flagpd1("fno-plt"), +flagpd1("fno-pointer-tbaa"), flagpd1("fno-ppc-native-vector-element-order"), flagpd1("fno-prebuilt-implicit-modules"), flagpd1("fno-prefetch-loop-arrays"), @@ -3476,7 +3563,18 @@ flagpd1("fno-profile-use"), flagpd1("fno-profile-values"), flagpd1("fno-protect-parens"), flagpd1("fno-pseudo-probe-for-profiling"), +flagpd1("fno-ptrauth-auth-traps"), +flagpd1("fno-ptrauth-calls"), +flagpd1("fno-ptrauth-function-pointer-type-discrimination"), +flagpd1("fno-ptrauth-indirect-gotos"), +flagpd1("fno-ptrauth-init-fini"), +flagpd1("fno-ptrauth-intrinsics"), +flagpd1("fno-ptrauth-returns"), +flagpd1("fno-ptrauth-type-info-vtable-pointer-discrimination"), +flagpd1("fno-ptrauth-vtable-pointer-address-discrimination"), +flagpd1("fno-ptrauth-vtable-pointer-type-discrimination"), flagpd1("fno-range-check"), +flagpd1("fno-raw-string-literals"), flagpd1("fno-real-4-real-10"), flagpd1("fno-real-4-real-16"), flagpd1("fno-real-4-real-8"), @@ -3496,13 +3594,13 @@ flagpd1("fno-relaxed-template-template-args"), flagpd1("fno-rename-registers"), flagpd1("fno-reorder-blocks"), flagpd1("fno-repack-arrays"), -flagpd1("fno-reroll-loops"), flagpd1("fno-rewrite-imports"), flagpd1("fno-rewrite-includes"), flagpd1("fno-ripa"), flagpd1("fno-ropi"), flagpd1("fno-rounding-math"), flagpd1("fno-rtlib-add-rpath"), +flagpd1("fno-rtlib-defaultlib"), flagpd1("fno-rtti"), flagpd1("fno-rtti-data"), flagpd1("fno-rwpi"), @@ -3536,6 +3634,7 @@ flagpd1("fno-schedule-insns2"), flagpd1("fno-second-underscore"), flagpd1("fno-see"), flagpd1("fno-semantic-interposition"), +flagpd1("fno-separate-named-sections"), flagpd1("fno-short-enums"), flagpd1("fno-short-wchar"), flagpd1("fno-show-column"), @@ -3741,6 +3840,7 @@ flagpd1("fpermissive"), .psl = false, }, flagpd1("fplt"), +flagpd1("fpointer-tbaa"), flagpd1("fppc-native-vector-element-order"), flagpd1("fprebuilt-implicit-modules"), flagpd1("fprefetch-loop-arrays"), @@ -3761,6 +3861,16 @@ flagpd1("fprofile-use"), flagpd1("fprofile-values"), flagpd1("fprotect-parens"), flagpd1("fpseudo-probe-for-profiling"), +flagpd1("fptrauth-auth-traps"), +flagpd1("fptrauth-calls"), +flagpd1("fptrauth-function-pointer-type-discrimination"), +flagpd1("fptrauth-indirect-gotos"), +flagpd1("fptrauth-init-fini"), +flagpd1("fptrauth-intrinsics"), +flagpd1("fptrauth-returns"), +flagpd1("fptrauth-type-info-vtable-pointer-discrimination"), +flagpd1("fptrauth-vtable-pointer-address-discrimination"), +flagpd1("fptrauth-vtable-pointer-type-discrimination"), .{ .name = "framework", .syntax = .separate, @@ -3770,6 +3880,7 @@ flagpd1("fpseudo-probe-for-profiling"), .psl = false, }, flagpd1("frange-check"), +flagpd1("fraw-string-literals"), flagpd1("freal-4-real-10"), flagpd1("freal-4-real-16"), flagpd1("freal-4-real-8"), @@ -3789,7 +3900,6 @@ flagpd1("frelaxed-template-template-args"), flagpd1("frename-registers"), flagpd1("freorder-blocks"), flagpd1("frepack-arrays"), -flagpd1("freroll-loops"), flagpd1("fretain-comments-from-system-headers"), flagpd1("frewrite-imports"), flagpd1("frewrite-includes"), @@ -3797,6 +3907,7 @@ flagpd1("fripa"), flagpd1("fropi"), flagpd1("frounding-math"), flagpd1("frtlib-add-rpath"), +flagpd1("frtlib-defaultlib"), flagpd1("frtti"), flagpd1("frtti-data"), flagpd1("frwpi"), @@ -3856,6 +3967,7 @@ flagpd1("fsecond-underscore"), flagpd1("fsee"), flagpd1("fseh-exceptions"), flagpd1("fsemantic-interposition"), +flagpd1("fseparate-named-sections"), flagpd1("fshort-enums"), flagpd1("fshort-wchar"), flagpd1("fshow-column"), @@ -4114,11 +4226,14 @@ flagpd1("gno-embed-source"), flagpd1("gno-gnu-pubnames"), flagpd1("gno-inline-line-tables"), flagpd1("gno-modules"), +flagpd1("gno-omit-unreferenced-methods"), flagpd1("gno-pubnames"), flagpd1("gno-record-command-line"), flagpd1("gno-simple-template-names"), flagpd1("gno-split-dwarf"), flagpd1("gno-strict-dwarf"), +flagpd1("gno-template-alias"), +flagpd1("gomit-unreferenced-methods"), .{ .name = "gpu-bundle-output", .syntax = .flag, @@ -4142,6 +4257,7 @@ flagpd1("gsce"), flagpd1("gsimple-template-names"), flagpd1("gsplit-dwarf"), flagpd1("gstrict-dwarf"), +flagpd1("gtemplate-alias"), flagpd1("gtoggle"), flagpd1("gused"), flagpd1("gz"), @@ -4202,8 +4318,8 @@ flagpd1("llvm-verify-each"), sepd1("load"), flagpd1("m16"), flagpd1("m32"), -m("m3dnow"), -m("m3dnowa"), +flagpd1("m3dnow"), +flagpd1("m3dnowa"), flagpd1("m64"), flagpd1("m68000"), flagpd1("m68010"), @@ -4223,16 +4339,20 @@ m("maes"), sepd1("main-file-name"), flagpd1("maix32"), flagpd1("maix64"), +m("maix-shared-lib-tls-model-opt"), +m("maix-small-local-dynamic-tls"), m("maix-small-local-exec-tls"), flagpd1("maix-struct-return"), flagpd1("malign-double"), m("maltivec"), flagpd1("mamdgpu-ieee"), +flagpd1("mamdgpu-precise-memory-op"), m("mamx-bf16"), m("mamx-complex"), m("mamx-fp16"), m("mamx-int8"), m("mamx-tile"), +flagpd1("mapx-inline-asm-use-gpr32"), flagpd1("mapxf"), flagpd1("marm"), flagpd1("massembler-fatal-warnings"), @@ -4248,11 +4368,9 @@ m("mavx512bitalg"), m("mavx512bw"), m("mavx512cd"), m("mavx512dq"), -m("mavx512er"), m("mavx512f"), m("mavx512fp16"), m("mavx512ifma"), -m("mavx512pf"), m("mavx512vbmi"), m("mavx512vbmi2"), m("mavx512vl"), @@ -4289,6 +4407,7 @@ m("mcrc"), m("mcrc32"), m("mcumode"), m("mcx16"), +flagpd1("mdaz-ftz"), sepd1("mdebug-pass"), m("mdirect-move"), flagpd1("mdouble-float"), @@ -4319,6 +4438,7 @@ m("mfloat128"), sepd1("mfloat-abi"), m("mfma"), m("mfma4"), +m("mforced-sw-shadow-stack"), flagpd1("mfp32"), m("mfp64"), sepd1("mfpmath"), @@ -4335,6 +4455,7 @@ flagpd1("mglibc"), flagpd1("mglobal-merge"), flagpd1("mgpopt"), flagpd1("mguarded-control-stack"), +m("mhalf-precision"), m("mhard-float"), m("mhard-quad-float"), m("mhvx"), @@ -4379,6 +4500,7 @@ flagpd1("mldc1-sdc1"), sepd1("mlimit-float-precision"), sepd1("mlink-bitcode-file"), sepd1("mlink-builtin-bitcode"), +flagpd1("mlink-builtin-bitcode-postopt"), flagpd1("mlittle-endian"), sepd1("mllvm"), flagpd1("mlocal-sdata"), @@ -4387,6 +4509,7 @@ flagpd1("mlong-double-128"), flagpd1("mlong-double-64"), flagpd1("mlong-double-80"), m("mlongcall"), +flagpd1("mlr-for-calls-only"), m("mlsx"), m("mlvi-cfi"), flagpd1("mlvi-hardening"), @@ -4412,14 +4535,15 @@ m("mmultimemory"), m("mmultivalue"), m("mmutable-globals"), m("mmwaitx"), -m("mno-3dnow"), -m("mno-3dnowa"), +flagpd1("mno-3dnow"), +flagpd1("mno-3dnowa"), flagpd1("mno-80387"), flagpd1("mno-abicalls"), m("mno-adx"), m("mno-aes"), m("mno-altivec"), flagpd1("mno-amdgpu-ieee"), +flagpd1("mno-amdgpu-precise-memory-op"), m("mno-amx-bf16"), m("mno-amx-complex"), m("mno-amx-fp16"), @@ -4437,11 +4561,9 @@ m("mno-avx512bitalg"), m("mno-avx512bw"), m("mno-avx512cd"), m("mno-avx512dq"), -m("mno-avx512er"), m("mno-avx512f"), m("mno-avx512fp16"), m("mno-avx512ifma"), -m("mno-avx512pf"), m("mno-avx512vbmi"), m("mno-avx512vbmi2"), m("mno-avx512vl"), @@ -4473,6 +4595,7 @@ m("mno-crc"), m("mno-crc32"), m("mno-cumode"), m("mno-cx16"), +flagpd1("mno-daz-ftz"), m("mno-dsp"), m("mno-dspr2"), flagpd1("mno-embedded-data"), @@ -4492,6 +4615,7 @@ m("mno-float128"), m("mno-fma"), m("mno-fma4"), m("mno-fmv"), +m("mno-forced-sw-shadow-stack"), flagpd1("mno-fp-ret-in-387"), m("mno-fprnd"), m("mno-fpu"), @@ -4503,6 +4627,7 @@ m("mno-gfni"), m("mno-ginv"), flagpd1("mno-global-merge"), flagpd1("mno-gpopt"), +m("mno-half-precision"), m("mno-hvx"), m("mno-hvx-ieee-fp"), m("mno-hvx-qfloat"), @@ -4518,6 +4643,7 @@ m("mno-isel"), m("mno-kl"), m("mno-lasx"), flagpd1("mno-ldc1-sdc1"), +flagpd1("mno-link-builtin-bitcode-postopt"), flagpd1("mno-local-sdata"), m("mno-long-calls"), m("mno-longcall"), @@ -4569,7 +4695,6 @@ m("mno-power10-vector"), m("mno-power8-vector"), m("mno-power9-vector"), m("mno-prefetchi"), -m("mno-prefetchwt1"), flagpd1("mno-prefixed"), m("mno-prfchw"), m("mno-ptwrite"), @@ -4600,6 +4725,7 @@ flagpd1("mno-rtd"), m("mno-rtm"), m("mno-sahf"), m("mno-save-restore"), +flagpd1("mno-scalar-strict-align"), flagpd1("mno-scatter"), m("mno-serialize"), m("mno-seses"), @@ -4631,13 +4757,16 @@ m("mno-tbm"), m("mno-tgsplit"), flagpd1("mno-thumb"), flagpd1("mno-tls-direct-seg-refs"), +flagpd1("mno-tocdata"), m("mno-tsxldtrk"), flagpd1("mno-type-check"), m("mno-uintr"), flagpd1("mno-unaligned-access"), +m("mno-unaligned-symbols"), flagpd1("mno-unsafe-fp-atomics"), m("mno-usermsr"), m("mno-vaes"), +flagpd1("mno-vector-strict-align"), flagpd1("mno-vevpu"), m("mno-virt"), m("mno-vis"), @@ -4674,6 +4803,7 @@ sepd1("module-dependency-dir"), flagpd1("module-file-deps"), flagpd1("module-file-info"), sepd1("module-suffix"), +flagpd1("fexperimental-modules-reduced-bmi"), flagpd1("momit-leaf-frame-pointer"), flagpd1("moutline"), m("moutline-atomics"), @@ -4694,7 +4824,6 @@ m("mcrypto"), m("mpower8-vector"), m("mpower9-vector"), m("mprefetchi"), -m("mprefetchwt1"), flagpd1("mprefixed"), m("mprfchw"), m("mprivileged"), @@ -4735,6 +4864,7 @@ m("mrtm"), m("msahf"), m("msave-restore"), flagpd1("msave-temp-labels"), +flagpd1("mscalar-strict-align"), m("msecure-plt"), m("mserialize"), m("msgx"), @@ -4755,6 +4885,7 @@ m("mspe"), flagpd1("mspeculative-load-hardening"), m("msse"), m("msse2"), +flagpd1("msse2avx"), m("msse3"), flagpd1("msse4"), m("msse4.1"), @@ -4772,6 +4903,7 @@ m("mtgsplit"), sepd1("mthread-model"), flagpd1("mthumb"), flagpd1("mtls-direct-seg-refs"), +flagpd1("mtocdata"), sepd1("mtp"), m("mtsxldtrk"), flagpd1("muclibc"), @@ -4780,6 +4912,7 @@ flagpd1("multi_module"), sepd1("multiply_defined"), sepd1("multiply_defined_unused"), flagpd1("munaligned-access"), +m("munaligned-symbols"), flagpd1("munsafe-fp-atomics"), m("musermsr"), m("mv5"), @@ -4796,6 +4929,7 @@ m("mv71"), flagpd1("mv71t"), m("mv73"), m("mvaes"), +flagpd1("mvector-strict-align"), flagpd1("mvevpu"), m("mvirt"), m("mvis"), @@ -4854,7 +4988,6 @@ flagpd1("no-cpp-precomp"), }, flagpd1("no-emit-llvm-uselists"), flagpd1("no-enable-noundef-analysis"), -flagpd1("fno-fortran-main"), .{ .name = "no-gpu-bundle-output", .syntax = .flag, @@ -4916,6 +5049,14 @@ flagpd1("no-implicit-float"), flagpd1("no-pthread"), flagpd1("no-round-trip-args"), flagpd1("no-struct-path-tbaa"), +.{ + .name = "no-wasm-opt", + .syntax = .flag, + .zig_equivalent = .other, + .pd1 = false, + .pd2 = true, + .psl = false, +}, flagpd1("nobuiltininc"), flagpd1("nocpp"), flagpd1("nodefaultlibs"), @@ -5114,6 +5255,7 @@ sepd1("pic-level"), .psl = false, }, sepd1("plugin"), +flagpd1("pointer-tbaa"), flagpd1("prebind"), flagpd1("prebind_all_twolevel_modules"), flagpd1("preload"), @@ -5134,6 +5276,14 @@ flagpd1("print-dependency-directives-minimized-source"), .pd2 = true, .psl = false, }, +.{ + .name = "print-enabled-extensions", + .syntax = .flag, + .zig_equivalent = .other, + .pd1 = true, + .pd2 = true, + .psl = false, +}, flagpd1("print-ivar-layout"), .{ .name = "print-libgcc-file-name", @@ -5209,6 +5359,14 @@ flagpd1("print-preamble"), .psl = false, }, flagpd1("print-stats"), +.{ + .name = "print-library-module-manifest-path", + .syntax = .flag, + .zig_equivalent = .other, + .pd1 = true, + .pd2 = true, + .psl = false, +}, .{ .name = "print-supported-cpus", .syntax = .flag, @@ -5262,6 +5420,7 @@ flagpd1("pthreads"), }, sepd1("read_only_relocs"), sepd1("record-command-line"), +sepd1("reexport_framework"), flagpd1("regcall4"), flagpd1("relaxed-aliasing"), .{ @@ -5729,6 +5888,14 @@ flagpd1("version"), .psl = false, }, flagpd1("w"), +.{ + .name = "wasm-opt", + .syntax = .flag, + .zig_equivalent = .other, + .pd1 = false, + .pd2 = true, + .psl = false, +}, .{ .name = "weak_framework", .syntax = .separate, @@ -5921,6 +6088,14 @@ joinpd1("ftemplate-backtrace-limit="), .psl = false, }, joinpd1("objcmt-allowlist-dir-path="), +.{ + .name = "offload-compression-level=", + .syntax = .joined, + .zig_equivalent = .other, + .pd1 = false, + .pd2 = true, + .psl = false, +}, joinpd1("Wno-nonportable-cfstrings"), joinpd1("analyzer-disable-checker="), joinpd1("fbuild-session-timestamp="), @@ -5996,6 +6171,7 @@ joinpd1("fdebug-default-version="), joinpd1("ffp-exception-behavior="), joinpd1("fmacro-backtrace-limit="), joinpd1("fmax-array-constructor="), +joinpd1("fmcdc-max-test-vectors="), joinpd1("fprofile-exclude-files="), joinpd1("frandomize-layout-seed="), joinpd1("ftrivial-auto-var-init="), @@ -6050,6 +6226,7 @@ jspd1("compatibility_version"), jspd1("dylinker_install_name"), joinpd1("fcoverage-prefix-map="), joinpd1("fcs-profile-generate="), +joinpd1("fmcdc-max-conditions="), joinpd1("fmodules-prune-after="), .{ .name = "fno-sanitize-recover=", @@ -6113,6 +6290,7 @@ joinpd1("fsanitize-blacklist="), .psl = false, }, joinpd1("fbuild-session-file="), +joinpd1("fcomplex-arithmetic="), joinpd1("fdiagnostics-format="), joinpd1("fgpu-default-stream="), joinpd1("fmax-stack-var-size="), @@ -6120,8 +6298,6 @@ joinpd1("fmemory-profile-use="), joinpd1("fmodules-cache-path="), joinpd1("fmodules-embed-file="), joinpd1("fms-omit-default-lib"), -joinpd1("fno-cx-fortran-rules"), -joinpd1("fno-cx-limited-range"), joinpd1("fprofile-instrument="), joinpd1("fprofile-sample-use="), joinpd1("fstrict-flex-arrays="), @@ -6162,6 +6338,7 @@ joinpd1("fprofile-instr-use="), .psl = false, }, joinpd1("fthin-link-bitcode="), +joinpd1("ftime-trace-verbose"), .{ .name = "gpu-instrument-lib=", .syntax = .joined, @@ -6222,14 +6399,6 @@ joinpd1("target-sdk-version="), .pd2 = true, .psl = false, }, -.{ - .name = "emit-symbol-graph=", - .syntax = .joined_or_separate, - .zig_equivalent = .other, - .pd1 = false, - .pd2 = true, - .psl = false, -}, joinpd1("fbinutils-version="), joinpd1("fclang-abi-compat="), joinpd1("fcompile-resource="), @@ -6287,8 +6456,6 @@ joinpd1("coverage-version="), joinpd1("falign-functions="), joinpd1("fconstexpr-depth="), joinpd1("fconstexpr-steps="), -joinpd1("fcx-fortran-rules"), -joinpd1("fcx-limited-range"), joinpd1("ffile-prefix-map="), joinpd1("fmodule-map-file="), joinpd1("fobjc-arc-cxxlib="), @@ -6321,6 +6488,14 @@ joinpd1("mzos-sys-include="), .psl = false, }, joinpd1("object-file-name="), +.{ + .name = "symbol-graph-dir=", + .syntax = .joined, + .zig_equivalent = .other, + .pd1 = false, + .pd2 = true, + .psl = false, +}, .{ .name = "headerUnit:angle", .syntax = .joined_or_separate, @@ -6416,6 +6591,7 @@ joinpd1("mzos-hlq-csslib="), .pd2 = true, .psl = false, }, +jspd1("reexport_library"), jspd1("stdlib++-isystem"), joinpd1("Rpass-analysis="), .{ @@ -6750,6 +6926,14 @@ jspd1("iwithsysroot"), }, joinpd1("mharden-sls="), joinpd1("mhvx-length="), +.{ + .name = "mno-tocdata=", + .syntax = .comma_joined, + .zig_equivalent = .other, + .pd1 = true, + .pd2 = false, + .psl = false, +}, joinpd1("mvscale-max="), joinpd1("mvscale-min="), joinpd1("mzos-hlq-le="), @@ -6927,6 +7111,14 @@ jspd1("sub_library"), .pd2 = true, .psl = false, }, +.{ + .name = "embed-dir=", + .syntax = .joined, + .zig_equivalent = .other, + .pd1 = false, + .pd2 = true, + .psl = false, +}, joinpd1("fencoding="), joinpd1("ffp-model="), joinpd1("ffpe-trap="), @@ -6945,6 +7137,7 @@ jspd1("iframework"), jspd1("module-dir"), joinpd1("mtargetos="), joinpd1("mtls-size="), +joinpd1("reexport-l"), .{ .name = "rocm-path=", .syntax = .joined, @@ -7066,6 +7259,14 @@ joinpd1("ftabstop="), }, jspd1("idirafter"), joinpd1("mregparm="), +.{ + .name = "mtocdata=", + .syntax = .comma_joined, + .zig_equivalent = .other, + .pd1 = true, + .pd2 = false, + .psl = false, +}, joinpd1("sycl-std="), .{ .name = "undefined", @@ -7214,6 +7415,7 @@ joinpd1("fcheck="), }, jspd1("iprefix"), jspd1("isystem"), +joinpd1("marm64x"), joinpd1("mguard="), joinpd1("mhwdiv="), joinpd1("moslib="), @@ -7320,6 +7522,7 @@ jspd1("iquote"), .pd2 = false, .psl = false, }, +joinpd1("msimd="), .{ .name = "mtune=", .syntax = .joined, @@ -7479,7 +7682,31 @@ joinpd1("mtp="), }, .{ .name = "Fe:", - .syntax = .joined, + .syntax = .joined_or_separate, + .zig_equivalent = .other, + .pd1 = true, + .pd2 = false, + .psl = true, +}, +.{ + .name = "Fi:", + .syntax = .joined_or_separate, + .zig_equivalent = .other, + .pd1 = true, + .pd2 = false, + .psl = true, +}, +.{ + .name = "Fo:", + .syntax = .joined_or_separate, + .zig_equivalent = .other, + .pd1 = true, + .pd2 = false, + .psl = true, +}, +.{ + .name = "Fp:", + .syntax = .joined_or_separate, .zig_equivalent = .other, .pd1 = true, .pd2 = false, From dc14434c0aed409a9152a05e4741623e0e99f32a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Fri, 23 Aug 2024 01:24:37 +0200 Subject: [PATCH 164/202] clang: Update compiler-provided C headers to Clang 19. --- lib/include/__clang_cuda_intrinsics.h | 4 +- lib/include/__stdarg_header_macro.h | 12 + lib/include/__stddef_header_macro.h | 12 + lib/include/arm_acle.h | 36 +- lib/include/arm_fp16.h | 2 +- lib/include/arm_neon.h | 87597 +++++++++++----------- lib/include/arm_sme.h | 409 +- lib/include/arm_sve.h | 54980 +++++++------- lib/include/arm_vector_types.h | 10 +- lib/include/avx512erintrin.h | 271 - lib/include/avx512fp16intrin.h | 76 +- lib/include/avx512pfintrin.h | 92 - lib/include/avxintrin.h | 102 +- lib/include/bmiintrin.h | 6 +- lib/include/builtins.h | 3 + lib/include/cpuid.h | 26 +- lib/include/cuda_wrappers/algorithm | 2 +- lib/include/emmintrin.h | 469 +- lib/include/float.h | 28 + lib/include/fmaintrin.h | 48 +- lib/include/ia32intrin.h | 72 +- lib/include/immintrin.h | 244 +- lib/include/intrin.h | 272 +- lib/include/intrin0.h | 247 + lib/include/inttypes.h | 4 + lib/include/iso646.h | 4 + lib/include/keylockerintrin.h | 13 +- lib/include/limits.h | 5 + lib/include/llvm_libc_wrappers/assert.h | 2 +- lib/include/mm3dnow.h | 147 +- lib/include/mmintrin.h | 160 +- lib/include/module.modulemap | 15 +- lib/include/opencl-c-base.h | 4 + lib/include/opencl-c.h | 15 + lib/include/prfchwintrin.h | 18 +- lib/include/ptrauth.h | 330 + lib/include/riscv_vector.h | 4 - lib/include/sifive_vector.h | 102 + lib/include/smmintrin.h | 24 +- lib/include/stdalign.h | 5 + lib/include/stdarg.h | 34 +- lib/include/stdatomic.h | 12 +- lib/include/stdbool.h | 5 + lib/include/stddef.h | 60 +- lib/include/stdint.h | 5 + lib/include/stdnoreturn.h | 6 + lib/include/tmmintrin.h | 36 +- lib/include/varargs.h | 6 +- lib/include/x86gprintrin.h | 21 +- lib/include/x86intrin.h | 32 +- lib/include/xmmintrin.h | 382 +- lib/include/yvals_core.h | 25 + lib/include/zos_wrappers/builtins.h | 18 + 53 files changed, 73474 insertions(+), 73040 deletions(-) create mode 100644 lib/include/__stdarg_header_macro.h create mode 100644 lib/include/__stddef_header_macro.h delete mode 100644 lib/include/avx512erintrin.h delete mode 100644 lib/include/avx512pfintrin.h create mode 100644 lib/include/intrin0.h create mode 100644 lib/include/ptrauth.h create mode 100644 lib/include/yvals_core.h create mode 100644 lib/include/zos_wrappers/builtins.h diff --git a/lib/include/__clang_cuda_intrinsics.h b/lib/include/__clang_cuda_intrinsics.h index 3c3948863c1d..a04e8b6de44d 100644 --- a/lib/include/__clang_cuda_intrinsics.h +++ b/lib/include/__clang_cuda_intrinsics.h @@ -215,9 +215,7 @@ inline __device__ unsigned int __activemask() { #if CUDA_VERSION < 9020 return __nvvm_vote_ballot(1); #else - unsigned int mask; - asm volatile("activemask.b32 %0;" : "=r"(mask)); - return mask; + return __nvvm_activemask(); #endif } diff --git a/lib/include/__stdarg_header_macro.h b/lib/include/__stdarg_header_macro.h new file mode 100644 index 000000000000..beb92ee02526 --- /dev/null +++ b/lib/include/__stdarg_header_macro.h @@ -0,0 +1,12 @@ +/*===---- __stdarg_header_macro.h ------------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __STDARG_H +#define __STDARG_H +#endif diff --git a/lib/include/__stddef_header_macro.h b/lib/include/__stddef_header_macro.h new file mode 100644 index 000000000000..db5fb3c0abc1 --- /dev/null +++ b/lib/include/__stddef_header_macro.h @@ -0,0 +1,12 @@ +/*===---- __stddef_header_macro.h ------------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __STDDEF_H +#define __STDDEF_H +#endif diff --git a/lib/include/arm_acle.h b/lib/include/arm_acle.h index 9cd34948e3c5..1518b0c4c842 100644 --- a/lib/include/arm_acle.h +++ b/lib/include/arm_acle.h @@ -75,6 +75,14 @@ static __inline__ void __attribute__((__always_inline__, __nodebug__)) __yield(v #define __dbg(t) __builtin_arm_dbg(t) #endif +#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE +#define _CHKFEAT_GCS 1 +static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__)) +__chkfeat(uint64_t __features) { + return __builtin_arm_chkfeat(__features) ^ __features; +} +#endif + /* 7.5 Swap */ static __inline__ uint32_t __attribute__((__always_inline__, __nodebug__)) __swp(uint32_t __x, volatile uint32_t *__p) { @@ -109,7 +117,7 @@ __swp(uint32_t __x, volatile uint32_t *__p) { #endif /* 7.7 NOP */ -#if !defined(_MSC_VER) || !defined(__aarch64__) +#if !defined(_MSC_VER) || (!defined(__aarch64__) && !defined(__arm64ec__)) static __inline__ void __attribute__((__always_inline__, __nodebug__)) __nop(void) { __builtin_arm_nop(); } @@ -313,7 +321,7 @@ __qdbl(int32_t __t) { } #endif -/* 8.4.3 Accumultating multiplications */ +/* 8.4.3 Accumulating multiplications */ #if defined(__ARM_FEATURE_DSP) && __ARM_FEATURE_DSP static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) __smlabb(int32_t __a, int32_t __b, int32_t __c) { @@ -545,7 +553,7 @@ __usub16(uint16x2_t __a, uint16x2_t __b) { } #endif -/* 8.5.10 Parallel 16-bit multiplications */ +/* 8.5.10 Parallel 16-bit multiplication */ #if defined(__ARM_FEATURE_SIMD32) && __ARM_FEATURE_SIMD32 static __inline__ int32_t __attribute__((__always_inline__, __nodebug__)) __smlad(int16x2_t __a, int16x2_t __b, int32_t __c) { @@ -748,7 +756,7 @@ __arm_st64bv0(void *__addr, data512_t __value) { #define __arm_wsrf(sysreg, v) __arm_wsr(sysreg, __builtin_bit_cast(uint32_t, v)) #define __arm_wsrf64(sysreg, v) __arm_wsr64(sysreg, __builtin_bit_cast(uint64_t, v)) -/* 10.3 Memory Tagging Extensions (MTE) Intrinsics */ +/* 10.3 MTE intrinsics */ #if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE #define __arm_mte_create_random_tag(__ptr, __mask) __builtin_arm_irg(__ptr, __mask) #define __arm_mte_increment_tag(__ptr, __tag_offset) __builtin_arm_addg(__ptr, __tag_offset) @@ -757,7 +765,7 @@ __arm_st64bv0(void *__addr, data512_t __value) { #define __arm_mte_set_tag(__ptr) __builtin_arm_stg(__ptr) #define __arm_mte_ptrdiff(__ptra, __ptrb) __builtin_arm_subp(__ptra, __ptrb) -/* 18 Memory Operations Intrinsics */ +/* 18 memcpy family of operations intrinsics - MOPS */ #define __arm_mops_memset_tag(__tagged_address, __value, __size) \ __builtin_arm_mops_memset_tag(__tagged_address, __value, __size) #endif @@ -855,6 +863,24 @@ __rndrrs(uint64_t *__p) { } #endif +/* 11.2 Guarded Control Stack intrinsics */ +#if defined(__ARM_64BIT_STATE) && __ARM_64BIT_STATE +static __inline__ void * __attribute__((__always_inline__, __nodebug__)) +__gcspr() { + return (void *)__builtin_arm_rsr64("gcspr_el0"); +} + +static __inline__ uint64_t __attribute__((__always_inline__, __nodebug__, target("gcs"))) +__gcspopm() { + return __builtin_arm_gcspopm(0); +} + +static __inline__ const void * __attribute__((__always_inline__, __nodebug__, target("gcs"))) +__gcsss(const void *__stack) { + return __builtin_arm_gcsss(__stack); +} +#endif + #if defined(__cplusplus) } #endif diff --git a/lib/include/arm_fp16.h b/lib/include/arm_fp16.h index f114c6997b76..2dd0653ab038 100644 --- a/lib/include/arm_fp16.h +++ b/lib/include/arm_fp16.h @@ -29,7 +29,7 @@ typedef __fp16 float16_t; #define __ai static __inline__ __attribute__((__always_inline__, __nodebug__)) -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(__arm64ec__) #define vabdh_f16(__p0, __p1) __extension__ ({ \ float16_t __ret; \ float16_t __s0 = __p0; \ diff --git a/lib/include/arm_neon.h b/lib/include/arm_neon.h index 97431fccab5d..b67616134b88 100644 --- a/lib/include/arm_neon.h +++ b/lib/include/arm_neon.h @@ -28,15 +28,11 @@ #error "NEON intrinsics not available with the soft-float ABI. Please use -mfloat-abi=softfp or -mfloat-abi=hard" #else -#if !defined(__ARM_NEON) -#error "NEON support not enabled" -#else - #include #include #include -#ifdef __aarch64__ +#if defined(__aarch64__) || defined(__arm64ec__) typedef uint8_t poly8_t; typedef uint16_t poly16_t; typedef uint64_t poly64_t; @@ -128,13147 +124,13213 @@ typedef struct poly64x2x4_t { #define __ai static __inline__ __attribute__((__always_inline__, __nodebug__)) #ifdef __LITTLE_ENDIAN__ -#define splat_lane_p8(__p0, __p1) __extension__ ({ \ - poly8x8_t __ret; \ - poly8x8_t __s0 = __p0; \ - __ret = (poly8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 4); \ +#define splatq_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_bf16((int8x8_t)__s0, __p1, 11); \ __ret; \ }) #else -#define splat_lane_p8(__p0, __p1) __extension__ ({ \ - poly8x8_t __ret; \ - poly8x8_t __s0 = __p0; \ - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (poly8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 4); \ +#define splatq_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_bf16((int8x8_t)__rev0, __p1, 11); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) -#define __noswap_splat_lane_p8(__p0, __p1) __extension__ ({ \ - poly8x8_t __ret; \ - poly8x8_t __s0 = __p0; \ - __ret = (poly8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 4); \ +#define __noswap_splatq_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_bf16((int8x8_t)__s0, __p1, 11); \ __ret; \ }) #endif -#define splat_lane_p64(__p0, __p1) __extension__ ({ \ - poly64x1_t __ret; \ - poly64x1_t __s0 = __p0; \ - __ret = (poly64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 6); \ - __ret; \ -}) #ifdef __LITTLE_ENDIAN__ -#define splat_lane_p16(__p0, __p1) __extension__ ({ \ - poly16x4_t __ret; \ - poly16x4_t __s0 = __p0; \ - __ret = (poly16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 5); \ +#define splat_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + __ret = (bfloat16x4_t) __builtin_neon_splat_lane_bf16((int8x8_t)__s0, __p1, 11); \ __ret; \ }) #else -#define splat_lane_p16(__p0, __p1) __extension__ ({ \ - poly16x4_t __ret; \ - poly16x4_t __s0 = __p0; \ - poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (poly16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 5); \ +#define splat_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (bfloat16x4_t) __builtin_neon_splat_lane_bf16((int8x8_t)__rev0, __p1, 11); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) -#define __noswap_splat_lane_p16(__p0, __p1) __extension__ ({ \ - poly16x4_t __ret; \ - poly16x4_t __s0 = __p0; \ - __ret = (poly16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 5); \ +#define __noswap_splat_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + __ret = (bfloat16x4_t) __builtin_neon_splat_lane_bf16((int8x8_t)__s0, __p1, 11); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define splatq_lane_p8(__p0, __p1) __extension__ ({ \ - poly8x16_t __ret; \ - poly8x8_t __s0 = __p0; \ - __ret = (poly8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 4); \ +#define splatq_laneq_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_bf16((int8x16_t)__s0, __p1, 43); \ __ret; \ }) #else -#define splatq_lane_p8(__p0, __p1) __extension__ ({ \ - poly8x16_t __ret; \ - poly8x8_t __s0 = __p0; \ - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (poly8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 4); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +#define splatq_laneq_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_bf16((int8x16_t)__rev0, __p1, 43); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) -#define __noswap_splatq_lane_p8(__p0, __p1) __extension__ ({ \ - poly8x16_t __ret; \ - poly8x8_t __s0 = __p0; \ - __ret = (poly8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 4); \ +#define __noswap_splatq_laneq_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_bf16((int8x16_t)__s0, __p1, 43); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define splatq_lane_p64(__p0, __p1) __extension__ ({ \ - poly64x2_t __ret; \ - poly64x1_t __s0 = __p0; \ - __ret = (poly64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 6); \ +#define splat_laneq_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_bf16((int8x16_t)__s0, __p1, 43); \ __ret; \ }) #else -#define splatq_lane_p64(__p0, __p1) __extension__ ({ \ - poly64x2_t __ret; \ - poly64x1_t __s0 = __p0; \ - __ret = (poly64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 6); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ +#define splat_laneq_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_bf16((int8x16_t)__rev0, __p1, 43); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) -#define __noswap_splatq_lane_p64(__p0, __p1) __extension__ ({ \ - poly64x2_t __ret; \ - poly64x1_t __s0 = __p0; \ - __ret = (poly64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 6); \ +#define __noswap_splat_laneq_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_bf16((int8x16_t)__s0, __p1, 43); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define splatq_lane_p16(__p0, __p1) __extension__ ({ \ - poly16x8_t __ret; \ - poly16x4_t __s0 = __p0; \ - __ret = (poly16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 5); \ +__ai __attribute__((target("bf16,neon"))) float32x4_t vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbfdotq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("bf16,neon"))) float32x4_t vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + bfloat16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vbfdotq_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) float32x4_t __noswap_vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbfdotq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16,neon"))) float32x2_t vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vbfdot_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#else +__ai __attribute__((target("bf16,neon"))) float32x2_t vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + bfloat16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vbfdot_f32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) float32x2_t __noswap_vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vbfdot_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16,neon"))) float32x4_t vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbfmlalbq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("bf16,neon"))) float32x4_t vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + bfloat16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vbfmlalbq_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) float32x4_t __noswap_vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbfmlalbq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16,neon"))) float32x4_t vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbfmlaltq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("bf16,neon"))) float32x4_t vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + bfloat16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vbfmlaltq_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) float32x4_t __noswap_vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbfmlaltq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16,neon"))) float32x4_t vbfmmlaq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vbfmmlaq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} +#else +__ai __attribute__((target("bf16,neon"))) float32x4_t vbfmmlaq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + bfloat16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vbfmmlaq_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) { + bfloat16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#else +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) { + bfloat16x8_t __ret; + bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t __noswap_vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) { + bfloat16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#endif + +#define vcreate_bf16(__p0) __extension__ ({ \ + bfloat16x4_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (bfloat16x4_t)(__promote); \ __ret; \ }) -#else -#define splatq_lane_p16(__p0, __p1) __extension__ ({ \ - poly16x8_t __ret; \ - poly16x4_t __s0 = __p0; \ - poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (poly16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 5); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ +__ai __attribute__((target("bf16,neon"))) float32_t vcvtah_f32_bf16(bfloat16_t __p0) { + float32_t __ret; +bfloat16_t __reint = __p0; +int32_t __reint1 = (int32_t)(*(int16_t *) &__reint) << 16; + __ret = *(float32_t *) &__reint1; + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16_t vcvth_bf16_f32(float32_t __p0) { + bfloat16_t __ret; + __ret = (bfloat16_t) __builtin_neon_vcvth_bf16_f32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +#define vduph_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + __ret = (bfloat16_t) __builtin_neon_vduph_lane_bf16((bfloat16x4_t)__s0, __p1); \ __ret; \ }) -#define __noswap_splatq_lane_p16(__p0, __p1) __extension__ ({ \ - poly16x8_t __ret; \ - poly16x4_t __s0 = __p0; \ - __ret = (poly16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 5); \ +#else +#define vduph_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (bfloat16_t) __builtin_neon_vduph_lane_bf16((bfloat16x4_t)__rev0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define splatq_lane_u8(__p0, __p1) __extension__ ({ \ - uint8x16_t __ret; \ - uint8x8_t __s0 = __p0; \ - __ret = (uint8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 16); \ - __ret; \ +#define vdupq_lane_bf16(__p0_0, __p1_0) __extension__ ({ \ + bfloat16x8_t __ret_0; \ + bfloat16x4_t __s0_0 = __p0_0; \ + __ret_0 = splatq_lane_bf16(__s0_0, __p1_0); \ + __ret_0; \ }) #else -#define splatq_lane_u8(__p0, __p1) __extension__ ({ \ - uint8x16_t __ret; \ - uint8x8_t __s0 = __p0; \ - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 16); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_splatq_lane_u8(__p0, __p1) __extension__ ({ \ - uint8x16_t __ret; \ - uint8x8_t __s0 = __p0; \ - __ret = (uint8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 16); \ - __ret; \ +#define vdupq_lane_bf16(__p0_1, __p1_1) __extension__ ({ \ + bfloat16x8_t __ret_1; \ + bfloat16x4_t __s0_1 = __p0_1; \ + bfloat16x4_t __rev0_1; __rev0_1 = __builtin_shufflevector(__s0_1, __s0_1, 3, 2, 1, 0); \ + __ret_1 = __noswap_splatq_lane_bf16(__rev0_1, __p1_1); \ + __ret_1 = __builtin_shufflevector(__ret_1, __ret_1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_1; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define splatq_lane_u32(__p0, __p1) __extension__ ({ \ - uint32x4_t __ret; \ - uint32x2_t __s0 = __p0; \ - __ret = (uint32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 18); \ - __ret; \ +#define vdup_lane_bf16(__p0_2, __p1_2) __extension__ ({ \ + bfloat16x4_t __ret_2; \ + bfloat16x4_t __s0_2 = __p0_2; \ + __ret_2 = splat_lane_bf16(__s0_2, __p1_2); \ + __ret_2; \ }) #else -#define splatq_lane_u32(__p0, __p1) __extension__ ({ \ - uint32x4_t __ret; \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (uint32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 18); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ +#define vdup_lane_bf16(__p0_3, __p1_3) __extension__ ({ \ + bfloat16x4_t __ret_3; \ + bfloat16x4_t __s0_3 = __p0_3; \ + bfloat16x4_t __rev0_3; __rev0_3 = __builtin_shufflevector(__s0_3, __s0_3, 3, 2, 1, 0); \ + __ret_3 = __noswap_splat_lane_bf16(__rev0_3, __p1_3); \ + __ret_3 = __builtin_shufflevector(__ret_3, __ret_3, 3, 2, 1, 0); \ + __ret_3; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vduph_laneq_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + __ret = (bfloat16_t) __builtin_neon_vduph_laneq_bf16((bfloat16x8_t)__s0, __p1); \ __ret; \ }) -#define __noswap_splatq_lane_u32(__p0, __p1) __extension__ ({ \ - uint32x4_t __ret; \ - uint32x2_t __s0 = __p0; \ - __ret = (uint32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 18); \ +#else +#define vduph_laneq_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (bfloat16_t) __builtin_neon_vduph_laneq_bf16((bfloat16x8_t)__rev0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define splatq_lane_u64(__p0, __p1) __extension__ ({ \ - uint64x2_t __ret; \ - uint64x1_t __s0 = __p0; \ - __ret = (uint64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 19); \ - __ret; \ +#define vdupq_laneq_bf16(__p0_4, __p1_4) __extension__ ({ \ + bfloat16x8_t __ret_4; \ + bfloat16x8_t __s0_4 = __p0_4; \ + __ret_4 = splatq_laneq_bf16(__s0_4, __p1_4); \ + __ret_4; \ }) #else -#define splatq_lane_u64(__p0, __p1) __extension__ ({ \ - uint64x2_t __ret; \ - uint64x1_t __s0 = __p0; \ - __ret = (uint64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 19); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ +#define vdupq_laneq_bf16(__p0_5, __p1_5) __extension__ ({ \ + bfloat16x8_t __ret_5; \ + bfloat16x8_t __s0_5 = __p0_5; \ + bfloat16x8_t __rev0_5; __rev0_5 = __builtin_shufflevector(__s0_5, __s0_5, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_5 = __noswap_splatq_laneq_bf16(__rev0_5, __p1_5); \ + __ret_5 = __builtin_shufflevector(__ret_5, __ret_5, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_5; \ }) -#define __noswap_splatq_lane_u64(__p0, __p1) __extension__ ({ \ - uint64x2_t __ret; \ - uint64x1_t __s0 = __p0; \ - __ret = (uint64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 19); \ - __ret; \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_bf16(__p0_6, __p1_6) __extension__ ({ \ + bfloat16x4_t __ret_6; \ + bfloat16x8_t __s0_6 = __p0_6; \ + __ret_6 = splat_laneq_bf16(__s0_6, __p1_6); \ + __ret_6; \ +}) +#else +#define vdup_laneq_bf16(__p0_7, __p1_7) __extension__ ({ \ + bfloat16x4_t __ret_7; \ + bfloat16x8_t __s0_7 = __p0_7; \ + bfloat16x8_t __rev0_7; __rev0_7 = __builtin_shufflevector(__s0_7, __s0_7, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_7 = __noswap_splat_laneq_bf16(__rev0_7, __p1_7); \ + __ret_7 = __builtin_shufflevector(__ret_7, __ret_7, 3, 2, 1, 0); \ + __ret_7; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define splatq_lane_u16(__p0, __p1) __extension__ ({ \ - uint16x8_t __ret; \ - uint16x4_t __s0 = __p0; \ - __ret = (uint16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 17); \ +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vdupq_n_bf16(bfloat16_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vdupq_n_bf16(bfloat16_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vdup_n_bf16(bfloat16_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vdup_n_bf16(bfloat16_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vget_high_bf16(bfloat16x8_t __p0) { + bfloat16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); + return __ret; +} +#else +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vget_high_bf16(bfloat16x8_t __p0) { + bfloat16x4_t __ret; + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t __noswap_vget_high_bf16(bfloat16x8_t __p0) { + bfloat16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vgetq_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__s0, __p1); \ __ret; \ }) #else -#define splatq_lane_u16(__p0, __p1) __extension__ ({ \ - uint16x8_t __ret; \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (uint16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 17); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vgetq_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__rev0, __p1); \ __ret; \ }) -#define __noswap_splatq_lane_u16(__p0, __p1) __extension__ ({ \ - uint16x8_t __ret; \ - uint16x4_t __s0 = __p0; \ - __ret = (uint16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 17); \ +#define __noswap_vgetq_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x8_t __s0 = __p0; \ + __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__s0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define splatq_lane_s8(__p0, __p1) __extension__ ({ \ - int8x16_t __ret; \ - int8x8_t __s0 = __p0; \ - __ret = (int8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 0); \ +#define vget_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__s0, __p1); \ __ret; \ }) #else -#define splatq_lane_s8(__p0, __p1) __extension__ ({ \ - int8x16_t __ret; \ - int8x8_t __s0 = __p0; \ - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 0); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vget_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__rev0, __p1); \ __ret; \ }) -#define __noswap_splatq_lane_s8(__p0, __p1) __extension__ ({ \ - int8x16_t __ret; \ - int8x8_t __s0 = __p0; \ - __ret = (int8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 0); \ +#define __noswap_vget_lane_bf16(__p0, __p1) __extension__ ({ \ + bfloat16_t __ret; \ + bfloat16x4_t __s0 = __p0; \ + __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__s0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define splatq_lane_f64(__p0, __p1) __extension__ ({ \ - float64x2_t __ret; \ - float64x1_t __s0 = __p0; \ - __ret = (float64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 10); \ - __ret; \ -}) +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vget_low_bf16(bfloat16x8_t __p0) { + bfloat16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); + return __ret; +} #else -#define splatq_lane_f64(__p0, __p1) __extension__ ({ \ - float64x2_t __ret; \ - float64x1_t __s0 = __p0; \ - __ret = (float64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 10); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vget_low_bf16(bfloat16x8_t __p0) { + bfloat16x4_t __ret; + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t __noswap_vget_low_bf16(bfloat16x8_t __p0) { + bfloat16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_bf16(__p0) __extension__ ({ \ + bfloat16x8_t __ret; \ + __ret = (bfloat16x8_t) __builtin_neon_vld1q_bf16(__p0, 43); \ __ret; \ }) -#define __noswap_splatq_lane_f64(__p0, __p1) __extension__ ({ \ - float64x2_t __ret; \ - float64x1_t __s0 = __p0; \ - __ret = (float64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 10); \ +#else +#define vld1q_bf16(__p0) __extension__ ({ \ + bfloat16x8_t __ret; \ + __ret = (bfloat16x8_t) __builtin_neon_vld1q_bf16(__p0, 43); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define splatq_lane_f32(__p0, __p1) __extension__ ({ \ - float32x4_t __ret; \ - float32x2_t __s0 = __p0; \ - __ret = (float32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 9); \ +#define vld1_bf16(__p0) __extension__ ({ \ + bfloat16x4_t __ret; \ + __ret = (bfloat16x4_t) __builtin_neon_vld1_bf16(__p0, 11); \ __ret; \ }) #else -#define splatq_lane_f32(__p0, __p1) __extension__ ({ \ - float32x4_t __ret; \ - float32x2_t __s0 = __p0; \ - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (float32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 9); \ +#define vld1_bf16(__p0) __extension__ ({ \ + bfloat16x4_t __ret; \ + __ret = (bfloat16x4_t) __builtin_neon_vld1_bf16(__p0, 11); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) -#define __noswap_splatq_lane_f32(__p0, __p1) __extension__ ({ \ - float32x4_t __ret; \ - float32x2_t __s0 = __p0; \ - __ret = (float32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 9); \ - __ret; \ -}) #endif #ifdef __LITTLE_ENDIAN__ -#define splatq_lane_f16(__p0, __p1) __extension__ ({ \ - float16x8_t __ret; \ - float16x4_t __s0 = __p0; \ - __ret = (float16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 8); \ +#define vld1q_dup_bf16(__p0) __extension__ ({ \ + bfloat16x8_t __ret; \ + __ret = (bfloat16x8_t) __builtin_neon_vld1q_dup_bf16(__p0, 43); \ __ret; \ }) #else -#define splatq_lane_f16(__p0, __p1) __extension__ ({ \ - float16x8_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (float16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 8); \ +#define vld1q_dup_bf16(__p0) __extension__ ({ \ + bfloat16x8_t __ret; \ + __ret = (bfloat16x8_t) __builtin_neon_vld1q_dup_bf16(__p0, 43); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) -#define __noswap_splatq_lane_f16(__p0, __p1) __extension__ ({ \ - float16x8_t __ret; \ - float16x4_t __s0 = __p0; \ - __ret = (float16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 8); \ - __ret; \ -}) #endif #ifdef __LITTLE_ENDIAN__ -#define splatq_lane_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __ret; \ - int32x2_t __s0 = __p0; \ - __ret = (int32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 2); \ +#define vld1_dup_bf16(__p0) __extension__ ({ \ + bfloat16x4_t __ret; \ + __ret = (bfloat16x4_t) __builtin_neon_vld1_dup_bf16(__p0, 11); \ __ret; \ }) #else -#define splatq_lane_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __ret; \ - int32x2_t __s0 = __p0; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (int32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 2); \ +#define vld1_dup_bf16(__p0) __extension__ ({ \ + bfloat16x4_t __ret; \ + __ret = (bfloat16x4_t) __builtin_neon_vld1_dup_bf16(__p0, 11); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) -#define __noswap_splatq_lane_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __ret; \ - int32x2_t __s0 = __p0; \ - __ret = (int32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 2); \ - __ret; \ -}) #endif #ifdef __LITTLE_ENDIAN__ -#define splatq_lane_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __ret; \ - int64x1_t __s0 = __p0; \ - __ret = (int64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 3); \ +#define vld1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16x8_t __s1 = __p1; \ + __ret = (bfloat16x8_t) __builtin_neon_vld1q_lane_bf16(__p0, (int8x16_t)__s1, __p2, 43); \ __ret; \ }) #else -#define splatq_lane_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __ret; \ - int64x1_t __s0 = __p0; \ - __ret = (int64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 3); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_splatq_lane_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __ret; \ - int64x1_t __s0 = __p0; \ - __ret = (int64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 3); \ +#define vld1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16x8_t __s1 = __p1; \ + bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (bfloat16x8_t) __builtin_neon_vld1q_lane_bf16(__p0, (int8x16_t)__rev1, __p2, 43); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define splatq_lane_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __ret; \ - int16x4_t __s0 = __p0; \ - __ret = (int16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 1); \ +#define vld1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16x4_t __s1 = __p1; \ + __ret = (bfloat16x4_t) __builtin_neon_vld1_lane_bf16(__p0, (int8x8_t)__s1, __p2, 11); \ __ret; \ }) #else -#define splatq_lane_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __ret; \ - int16x4_t __s0 = __p0; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (int16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 1); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_splatq_lane_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __ret; \ - int16x4_t __s0 = __p0; \ - __ret = (int16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 1); \ +#define vld1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16x4_t __s1 = __p1; \ + bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (bfloat16x4_t) __builtin_neon_vld1_lane_bf16(__p0, (int8x8_t)__rev1, __p2, 11); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define splat_lane_u8(__p0, __p1) __extension__ ({ \ - uint8x8_t __ret; \ - uint8x8_t __s0 = __p0; \ - __ret = (uint8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 16); \ +#define vld1q_bf16_x2(__p0) __extension__ ({ \ + bfloat16x8x2_t __ret; \ + __builtin_neon_vld1q_bf16_x2(&__ret, __p0, 43); \ __ret; \ }) #else -#define splat_lane_u8(__p0, __p1) __extension__ ({ \ - uint8x8_t __ret; \ - uint8x8_t __s0 = __p0; \ - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 16); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_splat_lane_u8(__p0, __p1) __extension__ ({ \ - uint8x8_t __ret; \ - uint8x8_t __s0 = __p0; \ - __ret = (uint8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 16); \ +#define vld1q_bf16_x2(__p0) __extension__ ({ \ + bfloat16x8x2_t __ret; \ + __builtin_neon_vld1q_bf16_x2(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define splat_lane_u32(__p0, __p1) __extension__ ({ \ - uint32x2_t __ret; \ - uint32x2_t __s0 = __p0; \ - __ret = (uint32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 18); \ +#define vld1_bf16_x2(__p0) __extension__ ({ \ + bfloat16x4x2_t __ret; \ + __builtin_neon_vld1_bf16_x2(&__ret, __p0, 11); \ __ret; \ }) #else -#define splat_lane_u32(__p0, __p1) __extension__ ({ \ - uint32x2_t __ret; \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (uint32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 18); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_splat_lane_u32(__p0, __p1) __extension__ ({ \ - uint32x2_t __ret; \ - uint32x2_t __s0 = __p0; \ - __ret = (uint32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 18); \ +#define vld1_bf16_x2(__p0) __extension__ ({ \ + bfloat16x4x2_t __ret; \ + __builtin_neon_vld1_bf16_x2(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret; \ }) #endif -#define splat_lane_u64(__p0, __p1) __extension__ ({ \ - uint64x1_t __ret; \ - uint64x1_t __s0 = __p0; \ - __ret = (uint64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 19); \ - __ret; \ -}) #ifdef __LITTLE_ENDIAN__ -#define splat_lane_u16(__p0, __p1) __extension__ ({ \ - uint16x4_t __ret; \ - uint16x4_t __s0 = __p0; \ - __ret = (uint16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 17); \ +#define vld1q_bf16_x3(__p0) __extension__ ({ \ + bfloat16x8x3_t __ret; \ + __builtin_neon_vld1q_bf16_x3(&__ret, __p0, 43); \ __ret; \ }) #else -#define splat_lane_u16(__p0, __p1) __extension__ ({ \ - uint16x4_t __ret; \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (uint16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 17); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_splat_lane_u16(__p0, __p1) __extension__ ({ \ - uint16x4_t __ret; \ - uint16x4_t __s0 = __p0; \ - __ret = (uint16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 17); \ +#define vld1q_bf16_x3(__p0) __extension__ ({ \ + bfloat16x8x3_t __ret; \ + __builtin_neon_vld1q_bf16_x3(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define splat_lane_s8(__p0, __p1) __extension__ ({ \ - int8x8_t __ret; \ - int8x8_t __s0 = __p0; \ - __ret = (int8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 0); \ +#define vld1_bf16_x3(__p0) __extension__ ({ \ + bfloat16x4x3_t __ret; \ + __builtin_neon_vld1_bf16_x3(&__ret, __p0, 11); \ __ret; \ }) #else -#define splat_lane_s8(__p0, __p1) __extension__ ({ \ - int8x8_t __ret; \ - int8x8_t __s0 = __p0; \ - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 0); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_splat_lane_s8(__p0, __p1) __extension__ ({ \ - int8x8_t __ret; \ - int8x8_t __s0 = __p0; \ - __ret = (int8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 0); \ +#define vld1_bf16_x3(__p0) __extension__ ({ \ + bfloat16x4x3_t __ret; \ + __builtin_neon_vld1_bf16_x3(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret; \ }) #endif -#define splat_lane_f64(__p0, __p1) __extension__ ({ \ - float64x1_t __ret; \ - float64x1_t __s0 = __p0; \ - __ret = (float64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 10); \ - __ret; \ -}) #ifdef __LITTLE_ENDIAN__ -#define splat_lane_f32(__p0, __p1) __extension__ ({ \ - float32x2_t __ret; \ - float32x2_t __s0 = __p0; \ - __ret = (float32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 9); \ +#define vld1q_bf16_x4(__p0) __extension__ ({ \ + bfloat16x8x4_t __ret; \ + __builtin_neon_vld1q_bf16_x4(&__ret, __p0, 43); \ __ret; \ }) #else -#define splat_lane_f32(__p0, __p1) __extension__ ({ \ - float32x2_t __ret; \ - float32x2_t __s0 = __p0; \ - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (float32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 9); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_splat_lane_f32(__p0, __p1) __extension__ ({ \ - float32x2_t __ret; \ - float32x2_t __s0 = __p0; \ - __ret = (float32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 9); \ +#define vld1q_bf16_x4(__p0) __extension__ ({ \ + bfloat16x8x4_t __ret; \ + __builtin_neon_vld1q_bf16_x4(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define splat_lane_f16(__p0, __p1) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - __ret = (float16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 8); \ +#define vld1_bf16_x4(__p0) __extension__ ({ \ + bfloat16x4x4_t __ret; \ + __builtin_neon_vld1_bf16_x4(&__ret, __p0, 11); \ __ret; \ }) #else -#define splat_lane_f16(__p0, __p1) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (float16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 8); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_splat_lane_f16(__p0, __p1) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - __ret = (float16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 8); \ +#define vld1_bf16_x4(__p0) __extension__ ({ \ + bfloat16x4x4_t __ret; \ + __builtin_neon_vld1_bf16_x4(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define splat_lane_s32(__p0, __p1) __extension__ ({ \ - int32x2_t __ret; \ - int32x2_t __s0 = __p0; \ - __ret = (int32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 2); \ +#define vld2q_bf16(__p0) __extension__ ({ \ + bfloat16x8x2_t __ret; \ + __builtin_neon_vld2q_bf16(&__ret, __p0, 43); \ __ret; \ }) #else -#define splat_lane_s32(__p0, __p1) __extension__ ({ \ - int32x2_t __ret; \ - int32x2_t __s0 = __p0; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (int32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_splat_lane_s32(__p0, __p1) __extension__ ({ \ - int32x2_t __ret; \ - int32x2_t __s0 = __p0; \ - __ret = (int32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 2); \ +#define vld2q_bf16(__p0) __extension__ ({ \ + bfloat16x8x2_t __ret; \ + __builtin_neon_vld2q_bf16(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif -#define splat_lane_s64(__p0, __p1) __extension__ ({ \ - int64x1_t __ret; \ - int64x1_t __s0 = __p0; \ - __ret = (int64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 3); \ - __ret; \ -}) #ifdef __LITTLE_ENDIAN__ -#define splat_lane_s16(__p0, __p1) __extension__ ({ \ - int16x4_t __ret; \ - int16x4_t __s0 = __p0; \ - __ret = (int16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 1); \ +#define vld2_bf16(__p0) __extension__ ({ \ + bfloat16x4x2_t __ret; \ + __builtin_neon_vld2_bf16(&__ret, __p0, 11); \ __ret; \ }) #else -#define splat_lane_s16(__p0, __p1) __extension__ ({ \ - int16x4_t __ret; \ - int16x4_t __s0 = __p0; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (int16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_splat_lane_s16(__p0, __p1) __extension__ ({ \ - int16x4_t __ret; \ - int16x4_t __s0 = __p0; \ - __ret = (int16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 1); \ +#define vld2_bf16(__p0) __extension__ ({ \ + bfloat16x4x2_t __ret; \ + __builtin_neon_vld2_bf16(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define splat_laneq_p8(__p0, __p1) __extension__ ({ \ - poly8x8_t __ret; \ - poly8x16_t __s0 = __p0; \ - __ret = (poly8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 36); \ +#define vld2q_dup_bf16(__p0) __extension__ ({ \ + bfloat16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_bf16(&__ret, __p0, 43); \ __ret; \ }) #else -#define splat_laneq_p8(__p0, __p1) __extension__ ({ \ - poly8x8_t __ret; \ - poly8x16_t __s0 = __p0; \ - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (poly8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 36); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_splat_laneq_p8(__p0, __p1) __extension__ ({ \ - poly8x8_t __ret; \ - poly8x16_t __s0 = __p0; \ - __ret = (poly8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 36); \ +#define vld2q_dup_bf16(__p0) __extension__ ({ \ + bfloat16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_bf16(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define splat_laneq_p64(__p0, __p1) __extension__ ({ \ - poly64x1_t __ret; \ - poly64x2_t __s0 = __p0; \ - __ret = (poly64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 38); \ +#define vld2_dup_bf16(__p0) __extension__ ({ \ + bfloat16x4x2_t __ret; \ + __builtin_neon_vld2_dup_bf16(&__ret, __p0, 11); \ __ret; \ }) #else -#define splat_laneq_p64(__p0, __p1) __extension__ ({ \ - poly64x1_t __ret; \ - poly64x2_t __s0 = __p0; \ - poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (poly64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 38); \ - __ret; \ -}) -#define __noswap_splat_laneq_p64(__p0, __p1) __extension__ ({ \ - poly64x1_t __ret; \ - poly64x2_t __s0 = __p0; \ - __ret = (poly64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 38); \ +#define vld2_dup_bf16(__p0) __extension__ ({ \ + bfloat16x4x2_t __ret; \ + __builtin_neon_vld2_dup_bf16(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define splat_laneq_p16(__p0, __p1) __extension__ ({ \ - poly16x4_t __ret; \ - poly16x8_t __s0 = __p0; \ - __ret = (poly16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 37); \ +#define vld2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x2_t __ret; \ + bfloat16x8x2_t __s1 = __p1; \ + __builtin_neon_vld2q_lane_bf16(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 43); \ __ret; \ }) #else -#define splat_laneq_p16(__p0, __p1) __extension__ ({ \ - poly16x4_t __ret; \ - poly16x8_t __s0 = __p0; \ - poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (poly16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 37); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_splat_laneq_p16(__p0, __p1) __extension__ ({ \ - poly16x4_t __ret; \ - poly16x8_t __s0 = __p0; \ - __ret = (poly16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 37); \ +#define vld2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x2_t __ret; \ + bfloat16x8x2_t __s1 = __p1; \ + bfloat16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld2q_lane_bf16(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define splatq_laneq_p8(__p0, __p1) __extension__ ({ \ - poly8x16_t __ret; \ - poly8x16_t __s0 = __p0; \ - __ret = (poly8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 36); \ +#define vld2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x2_t __ret; \ + bfloat16x4x2_t __s1 = __p1; \ + __builtin_neon_vld2_lane_bf16(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 11); \ __ret; \ }) #else -#define splatq_laneq_p8(__p0, __p1) __extension__ ({ \ - poly8x16_t __ret; \ - poly8x16_t __s0 = __p0; \ - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (poly8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 36); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_splatq_laneq_p8(__p0, __p1) __extension__ ({ \ - poly8x16_t __ret; \ - poly8x16_t __s0 = __p0; \ - __ret = (poly8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 36); \ +#define vld2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x2_t __ret; \ + bfloat16x4x2_t __s1 = __p1; \ + bfloat16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vld2_lane_bf16(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define splatq_laneq_p64(__p0, __p1) __extension__ ({ \ - poly64x2_t __ret; \ - poly64x2_t __s0 = __p0; \ - __ret = (poly64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 38); \ +#define vld3q_bf16(__p0) __extension__ ({ \ + bfloat16x8x3_t __ret; \ + __builtin_neon_vld3q_bf16(&__ret, __p0, 43); \ __ret; \ }) #else -#define splatq_laneq_p64(__p0, __p1) __extension__ ({ \ - poly64x2_t __ret; \ - poly64x2_t __s0 = __p0; \ - poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (poly64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 38); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_splatq_laneq_p64(__p0, __p1) __extension__ ({ \ - poly64x2_t __ret; \ - poly64x2_t __s0 = __p0; \ - __ret = (poly64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 38); \ +#define vld3q_bf16(__p0) __extension__ ({ \ + bfloat16x8x3_t __ret; \ + __builtin_neon_vld3q_bf16(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define splatq_laneq_p16(__p0, __p1) __extension__ ({ \ - poly16x8_t __ret; \ - poly16x8_t __s0 = __p0; \ - __ret = (poly16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 37); \ +#define vld3_bf16(__p0) __extension__ ({ \ + bfloat16x4x3_t __ret; \ + __builtin_neon_vld3_bf16(&__ret, __p0, 11); \ __ret; \ }) #else -#define splatq_laneq_p16(__p0, __p1) __extension__ ({ \ - poly16x8_t __ret; \ - poly16x8_t __s0 = __p0; \ - poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (poly16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 37); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_splatq_laneq_p16(__p0, __p1) __extension__ ({ \ - poly16x8_t __ret; \ - poly16x8_t __s0 = __p0; \ - __ret = (poly16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 37); \ +#define vld3_bf16(__p0) __extension__ ({ \ + bfloat16x4x3_t __ret; \ + __builtin_neon_vld3_bf16(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define splatq_laneq_u8(__p0, __p1) __extension__ ({ \ - uint8x16_t __ret; \ - uint8x16_t __s0 = __p0; \ - __ret = (uint8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 48); \ +#define vld3q_dup_bf16(__p0) __extension__ ({ \ + bfloat16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_bf16(&__ret, __p0, 43); \ __ret; \ }) #else -#define splatq_laneq_u8(__p0, __p1) __extension__ ({ \ - uint8x16_t __ret; \ - uint8x16_t __s0 = __p0; \ - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 48); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_splatq_laneq_u8(__p0, __p1) __extension__ ({ \ - uint8x16_t __ret; \ - uint8x16_t __s0 = __p0; \ - __ret = (uint8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 48); \ +#define vld3q_dup_bf16(__p0) __extension__ ({ \ + bfloat16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_bf16(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define splatq_laneq_u32(__p0, __p1) __extension__ ({ \ - uint32x4_t __ret; \ - uint32x4_t __s0 = __p0; \ - __ret = (uint32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 50); \ +#define vld3_dup_bf16(__p0) __extension__ ({ \ + bfloat16x4x3_t __ret; \ + __builtin_neon_vld3_dup_bf16(&__ret, __p0, 11); \ __ret; \ }) #else -#define splatq_laneq_u32(__p0, __p1) __extension__ ({ \ - uint32x4_t __ret; \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (uint32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 50); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_splatq_laneq_u32(__p0, __p1) __extension__ ({ \ - uint32x4_t __ret; \ - uint32x4_t __s0 = __p0; \ - __ret = (uint32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 50); \ +#define vld3_dup_bf16(__p0) __extension__ ({ \ + bfloat16x4x3_t __ret; \ + __builtin_neon_vld3_dup_bf16(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define splatq_laneq_u64(__p0, __p1) __extension__ ({ \ - uint64x2_t __ret; \ - uint64x2_t __s0 = __p0; \ - __ret = (uint64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 51); \ +#define vld3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x3_t __ret; \ + bfloat16x8x3_t __s1 = __p1; \ + __builtin_neon_vld3q_lane_bf16(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 43); \ __ret; \ }) #else -#define splatq_laneq_u64(__p0, __p1) __extension__ ({ \ - uint64x2_t __ret; \ - uint64x2_t __s0 = __p0; \ - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (uint64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 51); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_splatq_laneq_u64(__p0, __p1) __extension__ ({ \ - uint64x2_t __ret; \ - uint64x2_t __s0 = __p0; \ - __ret = (uint64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 51); \ +#define vld3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x3_t __ret; \ + bfloat16x8x3_t __s1 = __p1; \ + bfloat16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld3q_lane_bf16(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define splatq_laneq_u16(__p0, __p1) __extension__ ({ \ - uint16x8_t __ret; \ - uint16x8_t __s0 = __p0; \ - __ret = (uint16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 49); \ +#define vld3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x3_t __ret; \ + bfloat16x4x3_t __s1 = __p1; \ + __builtin_neon_vld3_lane_bf16(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 11); \ __ret; \ }) #else -#define splatq_laneq_u16(__p0, __p1) __extension__ ({ \ - uint16x8_t __ret; \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 49); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_splatq_laneq_u16(__p0, __p1) __extension__ ({ \ - uint16x8_t __ret; \ - uint16x8_t __s0 = __p0; \ - __ret = (uint16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 49); \ +#define vld3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x3_t __ret; \ + bfloat16x4x3_t __s1 = __p1; \ + bfloat16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vld3_lane_bf16(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define splatq_laneq_s8(__p0, __p1) __extension__ ({ \ - int8x16_t __ret; \ - int8x16_t __s0 = __p0; \ - __ret = (int8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 32); \ +#define vld4q_bf16(__p0) __extension__ ({ \ + bfloat16x8x4_t __ret; \ + __builtin_neon_vld4q_bf16(&__ret, __p0, 43); \ __ret; \ }) #else -#define splatq_laneq_s8(__p0, __p1) __extension__ ({ \ - int8x16_t __ret; \ - int8x16_t __s0 = __p0; \ - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 32); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_splatq_laneq_s8(__p0, __p1) __extension__ ({ \ - int8x16_t __ret; \ - int8x16_t __s0 = __p0; \ - __ret = (int8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 32); \ +#define vld4q_bf16(__p0) __extension__ ({ \ + bfloat16x8x4_t __ret; \ + __builtin_neon_vld4q_bf16(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define splatq_laneq_f64(__p0, __p1) __extension__ ({ \ - float64x2_t __ret; \ - float64x2_t __s0 = __p0; \ - __ret = (float64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 42); \ +#define vld4_bf16(__p0) __extension__ ({ \ + bfloat16x4x4_t __ret; \ + __builtin_neon_vld4_bf16(&__ret, __p0, 11); \ __ret; \ }) #else -#define splatq_laneq_f64(__p0, __p1) __extension__ ({ \ - float64x2_t __ret; \ - float64x2_t __s0 = __p0; \ - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (float64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 42); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_splatq_laneq_f64(__p0, __p1) __extension__ ({ \ - float64x2_t __ret; \ - float64x2_t __s0 = __p0; \ - __ret = (float64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 42); \ +#define vld4_bf16(__p0) __extension__ ({ \ + bfloat16x4x4_t __ret; \ + __builtin_neon_vld4_bf16(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define splatq_laneq_f32(__p0, __p1) __extension__ ({ \ - float32x4_t __ret; \ - float32x4_t __s0 = __p0; \ - __ret = (float32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 41); \ +#define vld4q_dup_bf16(__p0) __extension__ ({ \ + bfloat16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_bf16(&__ret, __p0, 43); \ __ret; \ }) #else -#define splatq_laneq_f32(__p0, __p1) __extension__ ({ \ - float32x4_t __ret; \ - float32x4_t __s0 = __p0; \ - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (float32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 41); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_splatq_laneq_f32(__p0, __p1) __extension__ ({ \ - float32x4_t __ret; \ - float32x4_t __s0 = __p0; \ - __ret = (float32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 41); \ +#define vld4q_dup_bf16(__p0) __extension__ ({ \ + bfloat16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_bf16(&__ret, __p0, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define splatq_laneq_f16(__p0, __p1) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - __ret = (float16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 40); \ +#define vld4_dup_bf16(__p0) __extension__ ({ \ + bfloat16x4x4_t __ret; \ + __builtin_neon_vld4_dup_bf16(&__ret, __p0, 11); \ __ret; \ }) #else -#define splatq_laneq_f16(__p0, __p1) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (float16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 40); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_splatq_laneq_f16(__p0, __p1) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - __ret = (float16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 40); \ +#define vld4_dup_bf16(__p0) __extension__ ({ \ + bfloat16x4x4_t __ret; \ + __builtin_neon_vld4_dup_bf16(&__ret, __p0, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define splatq_laneq_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __ret; \ - int32x4_t __s0 = __p0; \ - __ret = (int32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 34); \ +#define vld4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x4_t __ret; \ + bfloat16x8x4_t __s1 = __p1; \ + __builtin_neon_vld4q_lane_bf16(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 43); \ __ret; \ }) #else -#define splatq_laneq_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __ret; \ - int32x4_t __s0 = __p0; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (int32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 34); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_splatq_laneq_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __ret; \ - int32x4_t __s0 = __p0; \ - __ret = (int32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 34); \ +#define vld4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x4_t __ret; \ + bfloat16x8x4_t __s1 = __p1; \ + bfloat16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld4q_lane_bf16(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 43); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define splatq_laneq_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __ret; \ - int64x2_t __s0 = __p0; \ - __ret = (int64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 35); \ +#define vld4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x4_t __ret; \ + bfloat16x4x4_t __s1 = __p1; \ + __builtin_neon_vld4_lane_bf16(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 11); \ __ret; \ }) #else -#define splatq_laneq_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __ret; \ - int64x2_t __s0 = __p0; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (int64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 35); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_splatq_laneq_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __ret; \ - int64x2_t __s0 = __p0; \ - __ret = (int64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 35); \ +#define vld4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x4_t __ret; \ + bfloat16x4x4_t __s1 = __p1; \ + bfloat16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vld4_lane_bf16(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 11); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define splatq_laneq_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __ret; \ - int16x8_t __s0 = __p0; \ - __ret = (int16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 33); \ +#define vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16_t __s0 = __p0; \ + bfloat16x8_t __s1 = __p1; \ + __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__s1, __p2); \ __ret; \ }) #else -#define splatq_laneq_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __ret; \ - int16x8_t __s0 = __p0; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 33); \ +#define vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16_t __s0 = __p0; \ + bfloat16x8_t __s1 = __p1; \ + bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__rev1, __p2); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) -#define __noswap_splatq_laneq_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __ret; \ - int16x8_t __s0 = __p0; \ - __ret = (int16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 33); \ +#define __noswap_vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8_t __ret; \ + bfloat16_t __s0 = __p0; \ + bfloat16x8_t __s1 = __p1; \ + __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__s1, __p2); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define splat_laneq_u8(__p0, __p1) __extension__ ({ \ - uint8x8_t __ret; \ - uint8x16_t __s0 = __p0; \ - __ret = (uint8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 48); \ +#define vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16_t __s0 = __p0; \ + bfloat16x4_t __s1 = __p1; \ + __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__s1, __p2); \ __ret; \ }) #else -#define splat_laneq_u8(__p0, __p1) __extension__ ({ \ - uint8x8_t __ret; \ - uint8x16_t __s0 = __p0; \ - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 48); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16_t __s0 = __p0; \ + bfloat16x4_t __s1 = __p1; \ + bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) -#define __noswap_splat_laneq_u8(__p0, __p1) __extension__ ({ \ - uint8x8_t __ret; \ - uint8x16_t __s0 = __p0; \ - __ret = (uint8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 48); \ +#define __noswap_vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4_t __ret; \ + bfloat16_t __s0 = __p0; \ + bfloat16x4_t __s1 = __p1; \ + __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__s1, __p2); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define splat_laneq_u32(__p0, __p1) __extension__ ({ \ - uint32x2_t __ret; \ - uint32x4_t __s0 = __p0; \ - __ret = (uint32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 50); \ - __ret; \ +#define vst1q_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_bf16(__p0, (int8x16_t)__s1, 43); \ }) #else -#define splat_laneq_u32(__p0, __p1) __extension__ ({ \ - uint32x2_t __ret; \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (uint32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 50); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_splat_laneq_u32(__p0, __p1) __extension__ ({ \ - uint32x2_t __ret; \ - uint32x4_t __s0 = __p0; \ - __ret = (uint32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 50); \ - __ret; \ +#define vst1q_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8_t __s1 = __p1; \ + bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_bf16(__p0, (int8x16_t)__rev1, 43); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define splat_laneq_u64(__p0, __p1) __extension__ ({ \ - uint64x1_t __ret; \ - uint64x2_t __s0 = __p0; \ - __ret = (uint64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 51); \ - __ret; \ +#define vst1_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __s1 = __p1; \ + __builtin_neon_vst1_bf16(__p0, (int8x8_t)__s1, 11); \ }) #else -#define splat_laneq_u64(__p0, __p1) __extension__ ({ \ - uint64x1_t __ret; \ - uint64x2_t __s0 = __p0; \ - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (uint64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 51); \ - __ret; \ -}) -#define __noswap_splat_laneq_u64(__p0, __p1) __extension__ ({ \ - uint64x1_t __ret; \ - uint64x2_t __s0 = __p0; \ - __ret = (uint64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 51); \ - __ret; \ +#define vst1_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4_t __s1 = __p1; \ + bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_bf16(__p0, (int8x8_t)__rev1, 11); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define splat_laneq_u16(__p0, __p1) __extension__ ({ \ - uint16x4_t __ret; \ - uint16x8_t __s0 = __p0; \ - __ret = (uint16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 49); \ - __ret; \ +#define vst1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_bf16(__p0, (int8x16_t)__s1, __p2, 43); \ }) #else -#define splat_laneq_u16(__p0, __p1) __extension__ ({ \ - uint16x4_t __ret; \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 49); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_splat_laneq_u16(__p0, __p1) __extension__ ({ \ - uint16x4_t __ret; \ - uint16x8_t __s0 = __p0; \ - __ret = (uint16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 49); \ - __ret; \ +#define vst1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8_t __s1 = __p1; \ + bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_bf16(__p0, (int8x16_t)__rev1, __p2, 43); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define splat_laneq_s8(__p0, __p1) __extension__ ({ \ - int8x8_t __ret; \ - int8x16_t __s0 = __p0; \ - __ret = (int8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 32); \ - __ret; \ +#define vst1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4_t __s1 = __p1; \ + __builtin_neon_vst1_lane_bf16(__p0, (int8x8_t)__s1, __p2, 11); \ }) #else -#define splat_laneq_s8(__p0, __p1) __extension__ ({ \ - int8x8_t __ret; \ - int8x16_t __s0 = __p0; \ - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 32); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_splat_laneq_s8(__p0, __p1) __extension__ ({ \ - int8x8_t __ret; \ - int8x16_t __s0 = __p0; \ - __ret = (int8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 32); \ - __ret; \ +#define vst1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4_t __s1 = __p1; \ + bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_lane_bf16(__p0, (int8x8_t)__rev1, __p2, 11); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define splat_laneq_f64(__p0, __p1) __extension__ ({ \ - float64x1_t __ret; \ - float64x2_t __s0 = __p0; \ - __ret = (float64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 42); \ - __ret; \ +#define vst1q_bf16_x2(__p0, __p1) __extension__ ({ \ + bfloat16x8x2_t __s1 = __p1; \ + __builtin_neon_vst1q_bf16_x2(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 43); \ }) #else -#define splat_laneq_f64(__p0, __p1) __extension__ ({ \ - float64x1_t __ret; \ - float64x2_t __s0 = __p0; \ - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (float64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 42); \ - __ret; \ -}) -#define __noswap_splat_laneq_f64(__p0, __p1) __extension__ ({ \ - float64x1_t __ret; \ - float64x2_t __s0 = __p0; \ - __ret = (float64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 42); \ - __ret; \ +#define vst1q_bf16_x2(__p0, __p1) __extension__ ({ \ + bfloat16x8x2_t __s1 = __p1; \ + bfloat16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_bf16_x2(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 43); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define splat_laneq_f32(__p0, __p1) __extension__ ({ \ - float32x2_t __ret; \ - float32x4_t __s0 = __p0; \ - __ret = (float32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 41); \ - __ret; \ +#define vst1_bf16_x2(__p0, __p1) __extension__ ({ \ + bfloat16x4x2_t __s1 = __p1; \ + __builtin_neon_vst1_bf16_x2(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 11); \ }) #else -#define splat_laneq_f32(__p0, __p1) __extension__ ({ \ - float32x2_t __ret; \ - float32x4_t __s0 = __p0; \ - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (float32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 41); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_splat_laneq_f32(__p0, __p1) __extension__ ({ \ - float32x2_t __ret; \ - float32x4_t __s0 = __p0; \ - __ret = (float32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 41); \ - __ret; \ +#define vst1_bf16_x2(__p0, __p1) __extension__ ({ \ + bfloat16x4x2_t __s1 = __p1; \ + bfloat16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst1_bf16_x2(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 11); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define splat_laneq_f16(__p0, __p1) __extension__ ({ \ - float16x4_t __ret; \ - float16x8_t __s0 = __p0; \ - __ret = (float16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 40); \ - __ret; \ +#define vst1q_bf16_x3(__p0, __p1) __extension__ ({ \ + bfloat16x8x3_t __s1 = __p1; \ + __builtin_neon_vst1q_bf16_x3(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 43); \ }) #else -#define splat_laneq_f16(__p0, __p1) __extension__ ({ \ - float16x4_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (float16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 40); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_splat_laneq_f16(__p0, __p1) __extension__ ({ \ - float16x4_t __ret; \ - float16x8_t __s0 = __p0; \ - __ret = (float16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 40); \ - __ret; \ +#define vst1q_bf16_x3(__p0, __p1) __extension__ ({ \ + bfloat16x8x3_t __s1 = __p1; \ + bfloat16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_bf16_x3(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 43); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define splat_laneq_s32(__p0, __p1) __extension__ ({ \ - int32x2_t __ret; \ - int32x4_t __s0 = __p0; \ - __ret = (int32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 34); \ - __ret; \ +#define vst1_bf16_x3(__p0, __p1) __extension__ ({ \ + bfloat16x4x3_t __s1 = __p1; \ + __builtin_neon_vst1_bf16_x3(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 11); \ }) #else -#define splat_laneq_s32(__p0, __p1) __extension__ ({ \ - int32x2_t __ret; \ - int32x4_t __s0 = __p0; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (int32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 34); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_splat_laneq_s32(__p0, __p1) __extension__ ({ \ - int32x2_t __ret; \ - int32x4_t __s0 = __p0; \ - __ret = (int32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 34); \ - __ret; \ +#define vst1_bf16_x3(__p0, __p1) __extension__ ({ \ + bfloat16x4x3_t __s1 = __p1; \ + bfloat16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst1_bf16_x3(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 11); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define splat_laneq_s64(__p0, __p1) __extension__ ({ \ - int64x1_t __ret; \ - int64x2_t __s0 = __p0; \ - __ret = (int64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 35); \ - __ret; \ +#define vst1q_bf16_x4(__p0, __p1) __extension__ ({ \ + bfloat16x8x4_t __s1 = __p1; \ + __builtin_neon_vst1q_bf16_x4(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 43); \ }) #else -#define splat_laneq_s64(__p0, __p1) __extension__ ({ \ - int64x1_t __ret; \ - int64x2_t __s0 = __p0; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (int64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 35); \ - __ret; \ +#define vst1q_bf16_x4(__p0, __p1) __extension__ ({ \ + bfloat16x8x4_t __s1 = __p1; \ + bfloat16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_bf16_x4(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 43); \ }) -#define __noswap_splat_laneq_s64(__p0, __p1) __extension__ ({ \ - int64x1_t __ret; \ - int64x2_t __s0 = __p0; \ - __ret = (int64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 35); \ - __ret; \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1_bf16_x4(__p0, __p1) __extension__ ({ \ + bfloat16x4x4_t __s1 = __p1; \ + __builtin_neon_vst1_bf16_x4(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 11); \ +}) +#else +#define vst1_bf16_x4(__p0, __p1) __extension__ ({ \ + bfloat16x4x4_t __s1 = __p1; \ + bfloat16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst1_bf16_x4(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 11); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define splat_laneq_s16(__p0, __p1) __extension__ ({ \ - int16x4_t __ret; \ - int16x8_t __s0 = __p0; \ - __ret = (int16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 33); \ - __ret; \ +#define vst2q_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_bf16(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 43); \ }) #else -#define splat_laneq_s16(__p0, __p1) __extension__ ({ \ - int16x4_t __ret; \ - int16x8_t __s0 = __p0; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 33); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ +#define vst2q_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8x2_t __s1 = __p1; \ + bfloat16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_bf16(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 43); \ }) -#define __noswap_splat_laneq_s16(__p0, __p1) __extension__ ({ \ - int16x4_t __ret; \ - int16x8_t __s0 = __p0; \ - __ret = (int16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 33); \ - __ret; \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_bf16(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 11); \ +}) +#else +#define vst2_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4x2_t __s1 = __p1; \ + bfloat16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_bf16(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 11); \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); - return __ret; -} +#define vst2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_bf16(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 43); \ +}) #else -__ai uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai uint8x16_t __noswap_vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); - return __ret; -} +#define vst2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x2_t __s1 = __p1; \ + bfloat16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_bf16(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 43); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); - return __ret; -} +#define vst2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_bf16(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 11); \ +}) #else -__ai uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai uint32x4_t __noswap_vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); - return __ret; -} +#define vst2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x2_t __s1 = __p1; \ + bfloat16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_lane_bf16(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 11); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; -} +#define vst3q_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_bf16(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 43); \ +}) #else -__ai uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai uint16x8_t __noswap_vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; -} +#define vst3q_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8x3_t __s1 = __p1; \ + bfloat16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_bf16(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 43); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); - return __ret; -} +#define vst3_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_bf16(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 11); \ +}) #else -__ai int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai int8x16_t __noswap_vabdq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); - return __ret; -} +#define vst3_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4x3_t __s1 = __p1; \ + bfloat16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_bf16(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 11); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); - return __ret; -} +#define vst3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_bf16(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 43); \ +}) #else -__ai float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vst3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x3_t __s1 = __p1; \ + bfloat16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_bf16(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 43); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); - return __ret; -} +#define vst3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_bf16(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 11); \ +}) #else -__ai int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int32x4_t __noswap_vabdq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); - return __ret; -} +#define vst3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x3_t __s1 = __p1; \ + bfloat16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_lane_bf16(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 11); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); +#define vst4q_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_bf16(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 43); \ +}) +#else +#define vst4q_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x8x4_t __s1 = __p1; \ + bfloat16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_bf16(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_bf16(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 11); \ +}) +#else +#define vst4_bf16(__p0, __p1) __extension__ ({ \ + bfloat16x4x4_t __s1 = __p1; \ + bfloat16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_bf16(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_bf16(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 43); \ +}) +#else +#define vst4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x8x4_t __s1 = __p1; \ + bfloat16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_bf16(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 43); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_bf16(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 11); \ +}) +#else +#define vst4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ + bfloat16x4x4_t __s1 = __p1; \ + bfloat16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_lane_bf16(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 11); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("dotprod,neon"))) uint32x4_t vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vdotq_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); return __ret; } #else -__ai int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("dotprod,neon"))) uint32x4_t vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vdotq_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int16x8_t __noswap_vabdq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); +__ai __attribute__((target("dotprod,neon"))) uint32x4_t __noswap_vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vdotq_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); +__ai __attribute__((target("dotprod,neon"))) int32x4_t vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vdotq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); return __ret; } #else -__ai uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("dotprod,neon"))) int32x4_t vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vdotq_s32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai uint8x8_t __noswap_vabd_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); +__ai __attribute__((target("dotprod,neon"))) int32x4_t __noswap_vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vdotq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("dotprod,neon"))) uint32x2_t vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + __ret = (uint32x2_t) __builtin_neon_vdot_u32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18); return __ret; } #else -__ai uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("dotprod,neon"))) uint32x2_t vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vdot_u32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai uint32x2_t __noswap_vabd_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("dotprod,neon"))) uint32x2_t __noswap_vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + __ret = (uint32x2_t) __builtin_neon_vdot_u32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); +__ai __attribute__((target("dotprod,neon"))) int32x2_t vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vdot_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); return __ret; } #else -__ai uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("dotprod,neon"))) int32x2_t vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int32x2_t) __builtin_neon_vdot_s32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai uint16x4_t __noswap_vabd_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); +__ai __attribute__((target("dotprod,neon"))) int32x2_t __noswap_vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vdot_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vabdq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vabdq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); return __ret; } #else -__ai int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vabdq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vabdq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai int8x8_t __noswap_vabd_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); - return __ret; -} #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 9); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vabd_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vabd_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); return __ret; } #else -__ai float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (float32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vabd_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vabd_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vabsq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vabsq_f16((int8x16_t)__p0, 40); return __ret; } #else -__ai int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai int32x2_t __noswap_vabd_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vabsq_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vabsq_f16((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vabs_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vabs_f16((int8x8_t)__p0, 8); return __ret; } #else -__ai int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vabs_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vabs_f16((int8x8_t)__rev0, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int16x4_t __noswap_vabd_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); - return __ret; -} #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vabsq_s8(int8x16_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 32); +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vaddq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __p0 + __p1; return __ret; } #else -__ai int8x16_t vabsq_s8(int8x16_t __p0) { - int8x16_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x16_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vaddq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vabsq_f32(float32x4_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 41); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vadd_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __p0 + __p1; return __ret; } #else -__ai float32x4_t vabsq_f32(float32x4_t __p0) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 41); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vadd_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 + __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vabsq_s32(int32x4_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 34); +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcageq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcageq_f16((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else -__ai int32x4_t vabsq_s32(int32x4_t __p0) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcageq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcageq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vabsq_s16(int16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 33); +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcage_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcage_f16((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else -__ai int16x8_t vabsq_s16(int16x8_t __p0) { - int16x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcage_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcage_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vabs_s8(int8x8_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vabs_v((int8x8_t)__p0, 0); +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcagtq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcagtq_f16((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else -__ai int8x8_t vabs_s8(int8x8_t __p0) { - int8x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x8_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 0); +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcagtq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcagtq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vabs_f32(float32x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vabs_v((int8x8_t)__p0, 9); +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcagt_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcagt_f16((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else -__ai float32x2_t vabs_f32(float32x2_t __p0) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float32x2_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcagt_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcagt_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vabs_s32(int32x2_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vabs_v((int8x8_t)__p0, 2); +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcaleq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcaleq_f16((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else -__ai int32x2_t vabs_s32(int32x2_t __p0) { - int32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (int32x2_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcaleq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcaleq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vabs_s16(int16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vabs_v((int8x8_t)__p0, 1); +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcale_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcale_f16((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else -__ai int16x4_t vabs_s16(int16x4_t __p0) { - int16x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 1); +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcale_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcale_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = __p0 + __p1; +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcaltq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcaltq_f16((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else -__ai uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 + __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcaltq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcaltq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = __p0 + __p1; +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcalt_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcalt_f16((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else -__ai uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 + __rev1; +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcalt_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcalt_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - __ret = __p0 + __p1; +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vceqq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 == __p1); return __ret; } #else -__ai uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __rev0 + __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vceqq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = __p0 + __p1; +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vceq_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 == __p1); return __ret; } #else -__ai uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 + __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vceq_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = __p0 + __p1; +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vceqzq_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vceqzq_f16((int8x16_t)__p0, 49); return __ret; } #else -__ai int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 + __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vceqzq_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vceqzq_f16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = __p0 + __p1; +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vceqz_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vceqz_f16((int8x8_t)__p0, 17); return __ret; } #else -__ai float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 + __rev1; +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vceqz_f16(float16x4_t __p0) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vceqz_f16((int8x8_t)__rev0, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = __p0 + __p1; +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcgeq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 >= __p1); return __ret; } #else -__ai int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 + __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcgeq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - __ret = __p0 + __p1; +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcge_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 >= __p1); return __ret; } #else -__ai int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __rev0 + __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcge_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = __p0 + __p1; +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcgezq_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcgezq_f16((int8x16_t)__p0, 49); return __ret; } #else -__ai int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 + __rev1; +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcgezq_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcgezq_f16((int8x16_t)__rev0, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = __p0 + __p1; +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcgez_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcgez_f16((int8x8_t)__p0, 17); return __ret; } #else -__ai uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 + __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcgez_f16(float16x4_t __p0) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcgez_f16((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = __p0 + __p1; +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcgtq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 > __p1); return __ret; } #else -__ai uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __rev0 + __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcgtq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif -__ai uint64x1_t vadd_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x1_t __ret; - __ret = __p0 + __p1; - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcgt_f16(float16x4_t __p0, float16x4_t __p1) { uint16x4_t __ret; - __ret = __p0 + __p1; + __ret = (uint16x4_t)(__p0 > __p1); return __ret; } #else -__ai uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcgt_f16(float16x4_t __p0, float16x4_t __p1) { uint16x4_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 + __rev1; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 > __rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = __p0 + __p1; +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcgtzq_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcgtzq_f16((int8x16_t)__p0, 49); return __ret; } #else -__ai int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 + __rev1; +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcgtzq_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcgtzq_f16((int8x16_t)__rev0, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = __p0 + __p1; +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcgtz_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcgtz_f16((int8x8_t)__p0, 17); return __ret; } #else -__ai float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __rev0 + __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcgtz_f16(float16x4_t __p0) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcgtz_f16((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = __p0 + __p1; +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcleq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 <= __p1); return __ret; } #else -__ai int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __rev0 + __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcleq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif -__ai int64x1_t vadd_s64(int64x1_t __p0, int64x1_t __p1) { - int64x1_t __ret; - __ret = __p0 + __p1; - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = __p0 + __p1; +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcle_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 <= __p1); return __ret; } #else -__ai int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 + __rev1; +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcle_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 <= __rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vadd_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vadd_v((int8x8_t)__p0, (int8x8_t)__p1, 4); +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vclezq_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vclezq_f16((int8x16_t)__p0, 49); return __ret; } #else -__ai poly8x8_t vadd_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly8x8_t __ret; - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (poly8x8_t) __builtin_neon_vadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4); +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vclezq_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vclezq_f16((int8x16_t)__rev0, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif -__ai poly64x1_t vadd_p64(poly64x1_t __p0, poly64x1_t __p1) { - poly64x1_t __ret; - __ret = (poly64x1_t) __builtin_neon_vadd_v((int8x8_t)__p0, (int8x8_t)__p1, 6); - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vadd_p16(poly16x4_t __p0, poly16x4_t __p1) { - poly16x4_t __ret; - __ret = (poly16x4_t) __builtin_neon_vadd_v((int8x8_t)__p0, (int8x8_t)__p1, 5); +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vclez_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vclez_f16((int8x8_t)__p0, 17); return __ret; } #else -__ai poly16x4_t vadd_p16(poly16x4_t __p0, poly16x4_t __p1) { - poly16x4_t __ret; - poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (poly16x4_t) __builtin_neon_vadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 5); +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vclez_f16(float16x4_t __p0) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vclez_f16((int8x8_t)__rev0, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vaddq_p8(poly8x16_t __p0, poly8x16_t __p1) { - poly8x16_t __ret; - __ret = (poly8x16_t) __builtin_neon_vaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 36); +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcltq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 < __p1); return __ret; } #else -__ai poly8x16_t vaddq_p8(poly8x16_t __p0, poly8x16_t __p1) { - poly8x16_t __ret; - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (poly8x16_t) __builtin_neon_vaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcltq_f16(float16x8_t __p0, float16x8_t __p1) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly64x2_t vaddq_p64(poly64x2_t __p0, poly64x2_t __p1) { - poly64x2_t __ret; - __ret = (poly64x2_t) __builtin_neon_vaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 38); +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vclt_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 < __p1); return __ret; } #else -__ai poly64x2_t vaddq_p64(poly64x2_t __p0, poly64x2_t __p1) { - poly64x2_t __ret; - poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (poly64x2_t) __builtin_neon_vaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 38); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vclt_f16(float16x4_t __p0, float16x4_t __p1) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vaddq_p16(poly16x8_t __p0, poly16x8_t __p1) { - poly16x8_t __ret; - __ret = (poly16x8_t) __builtin_neon_vaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 37); +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcltzq_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcltzq_f16((int8x16_t)__p0, 49); return __ret; } #else -__ai poly16x8_t vaddq_p16(poly16x8_t __p0, poly16x8_t __p1) { - poly16x8_t __ret; - poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (poly16x8_t) __builtin_neon_vaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 37); +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcltzq_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcltzq_f16((int8x16_t)__rev0, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcltz_f16(float16x4_t __p0) { uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); + __ret = (uint16x4_t) __builtin_neon_vcltz_f16((int8x8_t)__p0, 17); return __ret; } #else -__ai uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcltz_f16(float16x4_t __p0) { uint16x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17); + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcltz_f16((int8x8_t)__rev0, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai uint16x4_t __noswap_vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); - return __ret; -} #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vcvtq_f16_u16(uint16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcvtq_f16_u16((int8x16_t)__p0, 49); return __ret; } #else -__ai uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint32x2_t __ret; - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai uint32x2_t __noswap_vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vcvtq_f16_u16(uint16x8_t __p0) { + float16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vcvtq_f16_u16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vcvtq_f16_s16(int16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcvtq_f16_s16((int8x16_t)__p0, 33); return __ret; } #else -__ai uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint8x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16); +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vcvtq_f16_s16(int16x8_t __p0) { + float16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vcvtq_f16_s16((int8x16_t)__rev0, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai uint8x8_t __noswap_vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); - return __ret; -} #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vcvt_f16_u16(uint16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcvt_f16_u16((int8x8_t)__p0, 17); return __ret; } #else -__ai int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) { - int16x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vcvt_f16_u16(uint16x4_t __p0) { + float16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vcvt_f16_u16((int8x8_t)__rev0, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int16x4_t __noswap_vaddhn_s32(int32x4_t __p0, int32x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); - return __ret; -} #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vcvt_f16_s16(int16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcvt_f16_s16((int8x8_t)__p0, 1); return __ret; } #else -__ai int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) { - int32x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai int32x2_t __noswap_vaddhn_s64(int64x2_t __p0, int64x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vcvt_f16_s16(int16x4_t __p0) { + float16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vcvt_f16_s16((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); - return __ret; -} +#define vcvtq_n_f16_u16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_u16((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) #else -__ai int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) { - int8x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai int8x8_t __noswap_vaddhn_s16(int16x8_t __p0, int16x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); - return __ret; -} +#define vcvtq_n_f16_u16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_u16((int8x16_t)__rev0, __p1, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = __p0 & __p1; - return __ret; -} +#define vcvtq_n_f16_s16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_s16((int8x16_t)__s0, __p1, 33); \ + __ret; \ +}) #else -__ai uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 & __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vcvtq_n_f16_s16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_s16((int8x16_t)__rev0, __p1, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = __p0 & __p1; - return __ret; -} +#define vcvt_n_f16_u16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_u16((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) #else -__ai uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 & __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vcvt_n_f16_u16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_u16((int8x8_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - __ret = __p0 & __p1; - return __ret; -} +#define vcvt_n_f16_s16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_s16((int8x8_t)__s0, __p1, 1); \ + __ret; \ +}) #else -__ai uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __rev0 & __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vcvt_n_f16_s16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_s16((int8x8_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = __p0 & __p1; - return __ret; -} +#define vcvtq_n_s16_f16(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + __ret = (int16x8_t) __builtin_neon_vcvtq_n_s16_f16((int8x16_t)__s0, __p1, 33); \ + __ret; \ +}) #else -__ai uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 & __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vcvtq_n_s16_f16(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vcvtq_n_s16_f16((int8x16_t)__rev0, __p1, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = __p0 & __p1; - return __ret; -} +#define vcvt_n_s16_f16(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + __ret = (int16x4_t) __builtin_neon_vcvt_n_s16_f16((int8x8_t)__s0, __p1, 1); \ + __ret; \ +}) #else -__ai int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 & __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vcvt_n_s16_f16(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vcvt_n_s16_f16((int8x8_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = __p0 & __p1; - return __ret; -} +#define vcvtq_n_u16_f16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + __ret = (uint16x8_t) __builtin_neon_vcvtq_n_u16_f16((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) #else -__ai int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 & __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vcvtq_n_u16_f16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16x8_t) __builtin_neon_vcvtq_n_u16_f16((int8x16_t)__rev0, __p1, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - __ret = __p0 & __p1; - return __ret; -} +#define vcvt_n_u16_f16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vcvt_n_u16_f16((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) #else -__ai int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __rev0 & __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vcvt_n_u16_f16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vcvt_n_u16_f16((int8x8_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("fullfp16,neon"))) int16x8_t vcvtq_s16_f16(float16x8_t __p0) { int16x8_t __ret; - __ret = __p0 & __p1; + __ret = (int16x8_t) __builtin_neon_vcvtq_s16_f16((int8x16_t)__p0, 33); return __ret; } #else -__ai int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("fullfp16,neon"))) int16x8_t vcvtq_s16_f16(float16x8_t __p0) { int16x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 & __rev1; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vcvtq_s16_f16((int8x16_t)__rev0, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = __p0 & __p1; +__ai __attribute__((target("fullfp16,neon"))) int16x4_t vcvt_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vcvt_s16_f16((int8x8_t)__p0, 1); return __ret; } #else -__ai uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 & __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) int16x4_t vcvt_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vcvt_s16_f16((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = __p0 & __p1; +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcvtq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcvtq_u16_f16((int8x16_t)__p0, 49); return __ret; } #else -__ai uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __rev0 & __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcvtq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcvtq_u16_f16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif -__ai uint64x1_t vand_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x1_t __ret; - __ret = __p0 & __p1; - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcvt_u16_f16(float16x4_t __p0) { uint16x4_t __ret; - __ret = __p0 & __p1; + __ret = (uint16x4_t) __builtin_neon_vcvt_u16_f16((int8x8_t)__p0, 17); return __ret; } #else -__ai uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcvt_u16_f16(float16x4_t __p0) { uint16x4_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 & __rev1; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcvt_u16_f16((int8x8_t)__rev0, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = __p0 & __p1; +__ai __attribute__((target("fullfp16,neon"))) int16x8_t vcvtaq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vcvtaq_s16_f16((int8x16_t)__p0, 33); return __ret; } #else -__ai int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 & __rev1; +__ai __attribute__((target("fullfp16,neon"))) int16x8_t vcvtaq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vcvtaq_s16_f16((int8x16_t)__rev0, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = __p0 & __p1; - return __ret; -} -#else -__ai int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __rev0 & __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -__ai int64x1_t vand_s64(int64x1_t __p0, int64x1_t __p1) { - int64x1_t __ret; - __ret = __p0 & __p1; - return __ret; -} -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("fullfp16,neon"))) int16x4_t vcvta_s16_f16(float16x4_t __p0) { int16x4_t __ret; - __ret = __p0 & __p1; + __ret = (int16x4_t) __builtin_neon_vcvta_s16_f16((int8x8_t)__p0, 1); return __ret; } #else -__ai int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("fullfp16,neon"))) int16x4_t vcvta_s16_f16(float16x4_t __p0) { int16x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 & __rev1; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vcvta_s16_f16((int8x8_t)__rev0, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = __p0 & ~__p1; +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcvtaq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcvtaq_u16_f16((int8x16_t)__p0, 49); return __ret; } #else -__ai uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 & ~__rev1; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcvtaq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcvtaq_u16_f16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = __p0 & ~__p1; +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcvta_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcvta_u16_f16((int8x8_t)__p0, 17); return __ret; } #else -__ai uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 & ~__rev1; +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcvta_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcvta_u16_f16((int8x8_t)__rev0, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - __ret = __p0 & ~__p1; - return __ret; -} -#else -__ai uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __rev0 & ~__rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = __p0 & ~__p1; +__ai __attribute__((target("fullfp16,neon"))) int16x8_t vcvtmq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vcvtmq_s16_f16((int8x16_t)__p0, 33); return __ret; } #else -__ai uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 & ~__rev1; +__ai __attribute__((target("fullfp16,neon"))) int16x8_t vcvtmq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vcvtmq_s16_f16((int8x16_t)__rev0, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = __p0 & ~__p1; +__ai __attribute__((target("fullfp16,neon"))) int16x4_t vcvtm_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vcvtm_s16_f16((int8x8_t)__p0, 1); return __ret; } #else -__ai int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 & ~__rev1; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) int16x4_t vcvtm_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vcvtm_s16_f16((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = __p0 & ~__p1; +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcvtmq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcvtmq_u16_f16((int8x16_t)__p0, 49); return __ret; } #else -__ai int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 & ~__rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcvtmq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcvtmq_u16_f16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - __ret = __p0 & ~__p1; +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcvtm_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcvtm_u16_f16((int8x8_t)__p0, 17); return __ret; } #else -__ai int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __rev0 & ~__rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcvtm_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcvtm_u16_f16((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("fullfp16,neon"))) int16x8_t vcvtnq_s16_f16(float16x8_t __p0) { int16x8_t __ret; - __ret = __p0 & ~__p1; + __ret = (int16x8_t) __builtin_neon_vcvtnq_s16_f16((int8x16_t)__p0, 33); return __ret; } #else -__ai int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("fullfp16,neon"))) int16x8_t vcvtnq_s16_f16(float16x8_t __p0) { int16x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 & ~__rev1; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vcvtnq_s16_f16((int8x16_t)__rev0, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = __p0 & ~__p1; +__ai __attribute__((target("fullfp16,neon"))) int16x4_t vcvtn_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vcvtn_s16_f16((int8x8_t)__p0, 1); return __ret; } #else -__ai uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 & ~__rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) int16x4_t vcvtn_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vcvtn_s16_f16((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = __p0 & ~__p1; +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcvtnq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcvtnq_u16_f16((int8x16_t)__p0, 49); return __ret; } #else -__ai uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __rev0 & ~__rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcvtnq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcvtnq_u16_f16((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif -__ai uint64x1_t vbic_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x1_t __ret; - __ret = __p0 & ~__p1; - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcvtn_u16_f16(float16x4_t __p0) { uint16x4_t __ret; - __ret = __p0 & ~__p1; + __ret = (uint16x4_t) __builtin_neon_vcvtn_u16_f16((int8x8_t)__p0, 17); return __ret; } #else -__ai uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcvtn_u16_f16(float16x4_t __p0) { uint16x4_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 & ~__rev1; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcvtn_u16_f16((int8x8_t)__rev0, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = __p0 & ~__p1; +__ai __attribute__((target("fullfp16,neon"))) int16x8_t vcvtpq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vcvtpq_s16_f16((int8x16_t)__p0, 33); return __ret; } #else -__ai int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 & ~__rev1; +__ai __attribute__((target("fullfp16,neon"))) int16x8_t vcvtpq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vcvtpq_s16_f16((int8x16_t)__rev0, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = __p0 & ~__p1; - return __ret; -} -#else -__ai int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __rev0 & ~__rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -__ai int64x1_t vbic_s64(int64x1_t __p0, int64x1_t __p1) { - int64x1_t __ret; - __ret = __p0 & ~__p1; - return __ret; -} -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("fullfp16,neon"))) int16x4_t vcvtp_s16_f16(float16x4_t __p0) { int16x4_t __ret; - __ret = __p0 & ~__p1; + __ret = (int16x4_t) __builtin_neon_vcvtp_s16_f16((int8x8_t)__p0, 1); return __ret; } #else -__ai int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("fullfp16,neon"))) int16x4_t vcvtp_s16_f16(float16x4_t __p0) { int16x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 & ~__rev1; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vcvtp_s16_f16((int8x8_t)__rev0, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) { - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 4); +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcvtpq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcvtpq_u16_f16((int8x16_t)__p0, 49); return __ret; } #else -__ai poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) { - poly8x8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (poly8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 4); +__ai __attribute__((target("fullfp16,neon"))) uint16x8_t vcvtpq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcvtpq_u16_f16((int8x16_t)__rev0, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) { - poly16x4_t __ret; - __ret = (poly16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 5); +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcvtp_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcvtp_u16_f16((int8x8_t)__p0, 17); return __ret; } #else -__ai poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) { - poly16x4_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - poly16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (poly16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 5); +__ai __attribute__((target("fullfp16,neon"))) uint16x4_t vcvtp_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcvtp_u16_f16((int8x8_t)__rev0, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) { - poly8x16_t __ret; - __ret = (poly8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 36); +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vfmaq_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); return __ret; } #else -__ai poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) { - poly8x16_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (poly8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 36); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vfmaq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("fullfp16,neon"))) float16x8_t __noswap_vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vfmaq_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) { - poly16x8_t __ret; - __ret = (poly16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 37); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vfma_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); return __ret; } #else -__ai poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) { - poly16x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - poly16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (poly16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 37); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vfma_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("fullfp16,neon"))) float16x4_t __noswap_vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vfma_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48); +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vfmsq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = vfmaq_f16(__p0, -__p1, __p2); return __ret; } #else -__ai uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { - uint8x16_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vfmsq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vfmaq_f16(__rev0, -__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vfms_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = vfma_f16(__p0, -__p1, __p2); return __ret; } #else -__ai uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vfms_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vfma_f16(__rev0, -__rev1, __rev2); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vmaxq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vmaxq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); return __ret; } #else -__ai uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { - uint64x2_t __ret; - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = (uint64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vmaxq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vmaxq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 49); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vmax_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vmax_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); return __ret; } #else -__ai uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { - uint16x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vmax_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vmax_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32); +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vminq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vminq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); return __ret; } #else -__ai int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { - int8x16_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vminq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vminq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vmin_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vmin_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); return __ret; } #else -__ai float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vmin_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vmin_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vmulq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __p0 * __p1; return __ret; } #else -__ai int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vmulq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 35); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vmul_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __p0 * __p1; return __ret; } #else -__ai int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { - int64x2_t __ret; - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = (int64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vmul_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); - return __ret; -} -#else -__ai int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int16x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 16); - return __ret; -} -#else -__ai uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { - uint8x8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18); - return __ret; -} +#define vmulq_n_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + __ret = __s0 * (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}; \ + __ret; \ +}) #else -__ai uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { - uint32x2_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vmulq_n_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = __rev0 * (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}; \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif -__ai uint64x1_t vbsl_u64(uint64x1_t __p0, uint64x1_t __p1, uint64x1_t __p2) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 19); - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 17); - return __ret; -} +#define vmul_n_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + __ret = __s0 * (float16x4_t) {__s1, __s1, __s1, __s1}; \ + __ret; \ +}) #else -__ai uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { - uint16x4_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vmul_n_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = __rev0 * (float16x4_t) {__s1, __s1, __s1, __s1}; \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 0); +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vnegq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = -__p0; return __ret; } #else -__ai int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { - int8x8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 0); +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vnegq_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = -__rev0; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vneg_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = -__p0; return __ret; } #else -__ai float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = (float32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vneg_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vpadd_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vpadd_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); return __ret; } #else -__ai int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { - int32x2_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = (int32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vpadd_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vpadd_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif -__ai int64x1_t vbsl_s64(uint64x1_t __p0, int64x1_t __p1, int64x1_t __p2) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 3); - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vpmax_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vpmax_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); return __ret; } #else -__ai int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { - int16x4_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 1); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vpmax_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vpmax_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcageq_f32(float32x4_t __p0, float32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vpmin_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vpmin_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); return __ret; } #else -__ai uint32x4_t vcageq_f32(float32x4_t __p0, float32x4_t __p1) { - uint32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vpmin_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vpmin_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcage_f32(float32x2_t __p0, float32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 18); +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrecpeq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrecpeq_f16((int8x16_t)__p0, 40); return __ret; } #else -__ai uint32x2_t vcage_f32(float32x2_t __p0, float32x2_t __p1) { - uint32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vcage_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrecpeq_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vrecpeq_f16((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcagtq_f32(float32x4_t __p0, float32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrecpe_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrecpe_f16((int8x8_t)__p0, 8); return __ret; } #else -__ai uint32x4_t vcagtq_f32(float32x4_t __p0, float32x4_t __p1) { - uint32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrecpe_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vrecpe_f16((int8x8_t)__rev0, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcagt_f32(float32x2_t __p0, float32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 18); +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrecpsq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrecpsq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); return __ret; } #else -__ai uint32x2_t vcagt_f32(float32x2_t __p0, float32x2_t __p1) { - uint32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vcagt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrecpsq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vrecpsq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcaleq_f32(float32x4_t __p0, float32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrecps_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrecps_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); return __ret; } #else -__ai uint32x4_t vcaleq_f32(float32x4_t __p0, float32x4_t __p1) { - uint32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrecps_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vrecps_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcale_f32(float32x2_t __p0, float32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 18); +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrsqrteq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrsqrteq_f16((int8x16_t)__p0, 40); return __ret; } #else -__ai uint32x2_t vcale_f32(float32x2_t __p0, float32x2_t __p1) { - uint32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vcale_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrsqrteq_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vrsqrteq_f16((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcaltq_f32(float32x4_t __p0, float32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrsqrte_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrsqrte_f16((int8x8_t)__p0, 8); return __ret; } #else -__ai uint32x4_t vcaltq_f32(float32x4_t __p0, float32x4_t __p1) { - uint32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrsqrte_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vrsqrte_f16((int8x8_t)__rev0, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcalt_f32(float32x2_t __p0, float32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 18); +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrsqrtsq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrsqrtsq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); return __ret; } #else -__ai uint32x2_t vcalt_f32(float32x2_t __p0, float32x2_t __p1) { - uint32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vcalt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrsqrtsq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vrsqrtsq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vceq_p8(poly8x8_t __p0, poly8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0 == __p1); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrsqrts_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrsqrts_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); return __ret; } #else -__ai uint8x8_t vceq_p8(poly8x8_t __p0, poly8x8_t __p1) { - uint8x8_t __ret; - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t)(__rev0 == __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrsqrts_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vrsqrts_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vceqq_p8(poly8x16_t __p0, poly8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0 == __p1); +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vsubq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __p0 - __p1; return __ret; } #else -__ai uint8x16_t vceqq_p8(poly8x16_t __p0, poly8x16_t __p1) { - uint8x16_t __ret; - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t)(__rev0 == __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vsubq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vceqq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0 == __p1); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vsub_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __p0 - __p1; return __ret; } #else -__ai uint8x16_t vceqq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t)(__rev0 == __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vsub_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vceqq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("i8mm,neon"))) uint32x4_t vmmlaq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint32x4_t __ret; - __ret = (uint32x4_t)(__p0 == __p1); + __ret = (uint32x4_t) __builtin_neon_vmmlaq_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); return __ret; } #else -__ai uint32x4_t vceqq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("i8mm,neon"))) uint32x4_t vmmlaq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint32x4_t)(__rev0 == __rev1); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vmmlaq_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vceqq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0 == __p1); - return __ret; -} -#else -__ai uint16x8_t vceqq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t)(__rev0 == __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vceqq_s8(int8x16_t __p0, int8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0 == __p1); +__ai __attribute__((target("i8mm,neon"))) int32x4_t vmmlaq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vmmlaq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); return __ret; } #else -__ai uint8x16_t vceqq_s8(int8x16_t __p0, int8x16_t __p1) { - uint8x16_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("i8mm,neon"))) int32x4_t vmmlaq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t)(__rev0 == __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vceqq_f32(float32x4_t __p0, float32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0 == __p1); - return __ret; -} -#else -__ai uint32x4_t vceqq_f32(float32x4_t __p0, float32x4_t __p1) { - uint32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint32x4_t)(__rev0 == __rev1); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vmmlaq_s32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vceqq_s32(int32x4_t __p0, int32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0 == __p1); +__ai __attribute__((target("i8mm,neon"))) int32x4_t vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vusdotq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); return __ret; } #else -__ai uint32x4_t vceqq_s32(int32x4_t __p0, int32x4_t __p1) { - uint32x4_t __ret; +__ai __attribute__((target("i8mm,neon"))) int32x4_t vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint32x4_t)(__rev0 == __rev1); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vusdotq_s32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vceqq_s16(int16x8_t __p0, int16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0 == __p1); - return __ret; -} -#else -__ai uint16x8_t vceqq_s16(int16x8_t __p0, int16x8_t __p1) { - uint16x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t)(__rev0 == __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("i8mm,neon"))) int32x4_t __noswap_vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vusdotq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vceq_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0 == __p1); +__ai __attribute__((target("i8mm,neon"))) int32x2_t vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vusdot_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); return __ret; } #else -__ai uint8x8_t vceq_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("i8mm,neon"))) int32x2_t vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t)(__rev0 == __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vceq_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0 == __p1); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int32x2_t) __builtin_neon_vusdot_s32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -#else -__ai uint32x2_t vceq_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint32x2_t)(__rev0 == __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("i8mm,neon"))) int32x2_t __noswap_vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vusdot_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vceq_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0 == __p1); +__ai __attribute__((target("i8mm,neon"))) int32x4_t vusmmlaq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vusmmlaq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); return __ret; } #else -__ai uint16x4_t vceq_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t)(__rev0 == __rev1); +__ai __attribute__((target("i8mm,neon"))) int32x4_t vusmmlaq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vusmmlaq_s32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vceq_s8(int8x8_t __p0, int8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0 == __p1); - return __ret; -} +#define splat_lane_p8(__p0, __p1) __extension__ ({ \ + poly8x8_t __ret; \ + poly8x8_t __s0 = __p0; \ + __ret = (poly8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 4); \ + __ret; \ +}) #else -__ai uint8x8_t vceq_s8(int8x8_t __p0, int8x8_t __p1) { - uint8x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t)(__rev0 == __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define splat_lane_p8(__p0, __p1) __extension__ ({ \ + poly8x8_t __ret; \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 4); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_lane_p8(__p0, __p1) __extension__ ({ \ + poly8x8_t __ret; \ + poly8x8_t __s0 = __p0; \ + __ret = (poly8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 4); \ + __ret; \ +}) #endif +#define splat_lane_p64(__p0, __p1) __extension__ ({ \ + poly64x1_t __ret; \ + poly64x1_t __s0 = __p0; \ + __ret = (poly64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 6); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vceq_f32(float32x2_t __p0, float32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0 == __p1); - return __ret; -} +#define splat_lane_p16(__p0, __p1) __extension__ ({ \ + poly16x4_t __ret; \ + poly16x4_t __s0 = __p0; \ + __ret = (poly16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 5); \ + __ret; \ +}) #else -__ai uint32x2_t vceq_f32(float32x2_t __p0, float32x2_t __p1) { - uint32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint32x2_t)(__rev0 == __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define splat_lane_p16(__p0, __p1) __extension__ ({ \ + poly16x4_t __ret; \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (poly16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 5); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_lane_p16(__p0, __p1) __extension__ ({ \ + poly16x4_t __ret; \ + poly16x4_t __s0 = __p0; \ + __ret = (poly16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 5); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vceq_s32(int32x2_t __p0, int32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0 == __p1); - return __ret; -} +#define splatq_lane_p8(__p0, __p1) __extension__ ({ \ + poly8x16_t __ret; \ + poly8x8_t __s0 = __p0; \ + __ret = (poly8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 4); \ + __ret; \ +}) #else -__ai uint32x2_t vceq_s32(int32x2_t __p0, int32x2_t __p1) { - uint32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint32x2_t)(__rev0 == __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define splatq_lane_p8(__p0, __p1) __extension__ ({ \ + poly8x16_t __ret; \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 4); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_p8(__p0, __p1) __extension__ ({ \ + poly8x16_t __ret; \ + poly8x8_t __s0 = __p0; \ + __ret = (poly8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 4); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vceq_s16(int16x4_t __p0, int16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0 == __p1); - return __ret; -} +#define splatq_lane_p64(__p0, __p1) __extension__ ({ \ + poly64x2_t __ret; \ + poly64x1_t __s0 = __p0; \ + __ret = (poly64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 6); \ + __ret; \ +}) #else -__ai uint16x4_t vceq_s16(int16x4_t __p0, int16x4_t __p1) { - uint16x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t)(__rev0 == __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vcgeq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0 >= __p1); - return __ret; -} -#else -__ai uint8x16_t vcgeq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t)(__rev0 >= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define splatq_lane_p64(__p0, __p1) __extension__ ({ \ + poly64x2_t __ret; \ + poly64x1_t __s0 = __p0; \ + __ret = (poly64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 6); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_p64(__p0, __p1) __extension__ ({ \ + poly64x2_t __ret; \ + poly64x1_t __s0 = __p0; \ + __ret = (poly64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 6); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcgeq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0 >= __p1); - return __ret; -} +#define splatq_lane_p16(__p0, __p1) __extension__ ({ \ + poly16x8_t __ret; \ + poly16x4_t __s0 = __p0; \ + __ret = (poly16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 5); \ + __ret; \ +}) #else -__ai uint32x4_t vcgeq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint32x4_t)(__rev0 >= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define splatq_lane_p16(__p0, __p1) __extension__ ({ \ + poly16x8_t __ret; \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (poly16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 5); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_p16(__p0, __p1) __extension__ ({ \ + poly16x8_t __ret; \ + poly16x4_t __s0 = __p0; \ + __ret = (poly16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 5); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcgeq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0 >= __p1); - return __ret; -} +#define splatq_lane_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x8_t __s0 = __p0; \ + __ret = (uint8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 16); \ + __ret; \ +}) #else -__ai uint16x8_t vcgeq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t)(__rev0 >= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define splatq_lane_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x8_t __s0 = __p0; \ + __ret = (uint8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 16); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vcgeq_s8(int8x16_t __p0, int8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0 >= __p1); - return __ret; -} +#define splatq_lane_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x2_t __s0 = __p0; \ + __ret = (uint32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 18); \ + __ret; \ +}) #else -__ai uint8x16_t vcgeq_s8(int8x16_t __p0, int8x16_t __p1) { - uint8x16_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t)(__rev0 >= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define splatq_lane_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x2_t __s0 = __p0; \ + __ret = (uint32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 18); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcgeq_f32(float32x4_t __p0, float32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0 >= __p1); - return __ret; -} +#define splatq_lane_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x1_t __s0 = __p0; \ + __ret = (uint64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) #else -__ai uint32x4_t vcgeq_f32(float32x4_t __p0, float32x4_t __p1) { - uint32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint32x4_t)(__rev0 >= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define splatq_lane_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x1_t __s0 = __p0; \ + __ret = (uint64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 19); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x1_t __s0 = __p0; \ + __ret = (uint64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcgeq_s32(int32x4_t __p0, int32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0 >= __p1); - return __ret; -} +#define splatq_lane_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x4_t __s0 = __p0; \ + __ret = (uint16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) #else -__ai uint32x4_t vcgeq_s32(int32x4_t __p0, int32x4_t __p1) { - uint32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint32x4_t)(__rev0 >= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define splatq_lane_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x4_t __s0 = __p0; \ + __ret = (uint16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcgeq_s16(int16x8_t __p0, int16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0 >= __p1); - return __ret; -} +#define splatq_lane_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __ret; \ + int8x8_t __s0 = __p0; \ + __ret = (int8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 0); \ + __ret; \ +}) #else -__ai uint16x8_t vcgeq_s16(int16x8_t __p0, int16x8_t __p1) { - uint16x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t)(__rev0 >= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define splatq_lane_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __ret; \ + int8x8_t __s0 = __p0; \ + __ret = (int8x16_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vcge_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0 >= __p1); - return __ret; -} +#define splatq_lane_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __ret; \ + float64x1_t __s0 = __p0; \ + __ret = (float64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 10); \ + __ret; \ +}) #else -__ai uint8x8_t vcge_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t)(__rev0 >= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define splatq_lane_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __ret; \ + float64x1_t __s0 = __p0; \ + __ret = (float64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 10); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __ret; \ + float64x1_t __s0 = __p0; \ + __ret = (float64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 10); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcge_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0 >= __p1); - return __ret; -} +#define splatq_lane_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __ret; \ + float32x2_t __s0 = __p0; \ + __ret = (float32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 9); \ + __ret; \ +}) #else -__ai uint32x2_t vcge_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint32x2_t)(__rev0 >= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define splatq_lane_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __ret; \ + float32x2_t __s0 = __p0; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (float32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 9); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __ret; \ + float32x2_t __s0 = __p0; \ + __ret = (float32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 9); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcge_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0 >= __p1); - return __ret; -} +#define splatq_lane_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + float16x4_t __s0 = __p0; \ + __ret = (float16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 8); \ + __ret; \ +}) #else -__ai uint16x4_t vcge_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t)(__rev0 >= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define splatq_lane_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 8); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + float16x4_t __s0 = __p0; \ + __ret = (float16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 8); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vcge_s8(int8x8_t __p0, int8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0 >= __p1); - return __ret; -} +#define splatq_lane_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + int32x2_t __s0 = __p0; \ + __ret = (int32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 2); \ + __ret; \ +}) #else -__ai uint8x8_t vcge_s8(int8x8_t __p0, int8x8_t __p1) { - uint8x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t)(__rev0 >= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define splatq_lane_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + int32x2_t __s0 = __p0; \ + __ret = (int32x4_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 2); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcge_f32(float32x2_t __p0, float32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0 >= __p1); - return __ret; -} +#define splatq_lane_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + int64x1_t __s0 = __p0; \ + __ret = (int64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 3); \ + __ret; \ +}) #else -__ai uint32x2_t vcge_f32(float32x2_t __p0, float32x2_t __p1) { - uint32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint32x2_t)(__rev0 >= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define splatq_lane_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + int64x1_t __s0 = __p0; \ + __ret = (int64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 3); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + int64x1_t __s0 = __p0; \ + __ret = (int64x2_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 3); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcge_s32(int32x2_t __p0, int32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0 >= __p1); - return __ret; -} +#define splatq_lane_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + int16x4_t __s0 = __p0; \ + __ret = (int16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 1); \ + __ret; \ +}) #else -__ai uint32x2_t vcge_s32(int32x2_t __p0, int32x2_t __p1) { - uint32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint32x2_t)(__rev0 >= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define splatq_lane_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_lane_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + int16x4_t __s0 = __p0; \ + __ret = (int16x8_t) __builtin_neon_splatq_lane_v((int8x8_t)__s0, __p1, 1); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcge_s16(int16x4_t __p0, int16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0 >= __p1); - return __ret; -} +#define splat_lane_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 16); \ + __ret; \ +}) #else -__ai uint16x4_t vcge_s16(int16x4_t __p0, int16x4_t __p1) { - uint16x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t)(__rev0 >= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define splat_lane_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_lane_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 16); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vcgtq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0 > __p1); - return __ret; -} +#define splat_lane_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 18); \ + __ret; \ +}) #else -__ai uint8x16_t vcgtq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t)(__rev0 > __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define splat_lane_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_lane_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 18); \ + __ret; \ +}) #endif +#define splat_lane_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __ret; \ + uint64x1_t __s0 = __p0; \ + __ret = (uint64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcgtq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0 > __p1); - return __ret; -} +#define splat_lane_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) #else -__ai uint32x4_t vcgtq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint32x4_t)(__rev0 > __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define splat_lane_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_lane_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcgtq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0 > __p1); - return __ret; -} +#define splat_lane_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + __ret = (int8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 0); \ + __ret; \ +}) #else -__ai uint16x8_t vcgtq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t)(__rev0 > __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define splat_lane_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_lane_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + __ret = (int8x8_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 0); \ + __ret; \ +}) #endif +#define splat_lane_f64(__p0, __p1) __extension__ ({ \ + float64x1_t __ret; \ + float64x1_t __s0 = __p0; \ + __ret = (float64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 10); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vcgtq_s8(int8x16_t __p0, int8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0 > __p1); - return __ret; -} +#define splat_lane_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __ret; \ + float32x2_t __s0 = __p0; \ + __ret = (float32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 9); \ + __ret; \ +}) #else -__ai uint8x16_t vcgtq_s8(int8x16_t __p0, int8x16_t __p1) { - uint8x16_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t)(__rev0 > __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define splat_lane_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __ret; \ + float32x2_t __s0 = __p0; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (float32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 9); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_lane_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __ret; \ + float32x2_t __s0 = __p0; \ + __ret = (float32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 9); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcgtq_f32(float32x4_t __p0, float32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0 > __p1); - return __ret; -} +#define splat_lane_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + __ret = (float16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 8); \ + __ret; \ +}) #else -__ai uint32x4_t vcgtq_f32(float32x4_t __p0, float32x4_t __p1) { - uint32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint32x4_t)(__rev0 > __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define splat_lane_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 8); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_lane_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + __ret = (float16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 8); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcgtq_s32(int32x4_t __p0, int32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0 > __p1); - return __ret; -} +#define splat_lane_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + __ret = (int32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 2); \ + __ret; \ +}) #else -__ai uint32x4_t vcgtq_s32(int32x4_t __p0, int32x4_t __p1) { - uint32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint32x4_t)(__rev0 > __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define splat_lane_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_lane_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + __ret = (int32x2_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 2); \ + __ret; \ +}) #endif +#define splat_lane_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __ret; \ + int64x1_t __s0 = __p0; \ + __ret = (int64x1_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 3); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcgtq_s16(int16x8_t __p0, int16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0 > __p1); - return __ret; -} +#define splat_lane_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + __ret = (int16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 1); \ + __ret; \ +}) #else -__ai uint16x8_t vcgtq_s16(int16x8_t __p0, int16x8_t __p1) { - uint16x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t)(__rev0 > __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define splat_lane_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_lane_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + __ret = (int16x4_t) __builtin_neon_splat_lane_v((int8x8_t)__s0, __p1, 1); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vcgt_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0 > __p1); - return __ret; -} +#define splat_laneq_p8(__p0, __p1) __extension__ ({ \ + poly8x8_t __ret; \ + poly8x16_t __s0 = __p0; \ + __ret = (poly8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 36); \ + __ret; \ +}) #else -__ai uint8x8_t vcgt_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t)(__rev0 > __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define splat_laneq_p8(__p0, __p1) __extension__ ({ \ + poly8x8_t __ret; \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 36); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_laneq_p8(__p0, __p1) __extension__ ({ \ + poly8x8_t __ret; \ + poly8x16_t __s0 = __p0; \ + __ret = (poly8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 36); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcgt_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0 > __p1); - return __ret; -} +#define splat_laneq_p64(__p0, __p1) __extension__ ({ \ + poly64x1_t __ret; \ + poly64x2_t __s0 = __p0; \ + __ret = (poly64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 38); \ + __ret; \ +}) #else -__ai uint32x2_t vcgt_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint32x2_t)(__rev0 > __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define splat_laneq_p64(__p0, __p1) __extension__ ({ \ + poly64x1_t __ret; \ + poly64x2_t __s0 = __p0; \ + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (poly64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 38); \ + __ret; \ +}) +#define __noswap_splat_laneq_p64(__p0, __p1) __extension__ ({ \ + poly64x1_t __ret; \ + poly64x2_t __s0 = __p0; \ + __ret = (poly64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 38); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcgt_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0 > __p1); - return __ret; -} +#define splat_laneq_p16(__p0, __p1) __extension__ ({ \ + poly16x4_t __ret; \ + poly16x8_t __s0 = __p0; \ + __ret = (poly16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 37); \ + __ret; \ +}) #else -__ai uint16x4_t vcgt_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t)(__rev0 > __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define splat_laneq_p16(__p0, __p1) __extension__ ({ \ + poly16x4_t __ret; \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 37); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_laneq_p16(__p0, __p1) __extension__ ({ \ + poly16x4_t __ret; \ + poly16x8_t __s0 = __p0; \ + __ret = (poly16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 37); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vcgt_s8(int8x8_t __p0, int8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0 > __p1); - return __ret; -} +#define splatq_laneq_p8(__p0, __p1) __extension__ ({ \ + poly8x16_t __ret; \ + poly8x16_t __s0 = __p0; \ + __ret = (poly8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 36); \ + __ret; \ +}) #else -__ai uint8x8_t vcgt_s8(int8x8_t __p0, int8x8_t __p1) { - uint8x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t)(__rev0 > __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define splatq_laneq_p8(__p0, __p1) __extension__ ({ \ + poly8x16_t __ret; \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 36); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_p8(__p0, __p1) __extension__ ({ \ + poly8x16_t __ret; \ + poly8x16_t __s0 = __p0; \ + __ret = (poly8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 36); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcgt_f32(float32x2_t __p0, float32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0 > __p1); - return __ret; -} +#define splatq_laneq_p64(__p0, __p1) __extension__ ({ \ + poly64x2_t __ret; \ + poly64x2_t __s0 = __p0; \ + __ret = (poly64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 38); \ + __ret; \ +}) #else -__ai uint32x2_t vcgt_f32(float32x2_t __p0, float32x2_t __p1) { - uint32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint32x2_t)(__rev0 > __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define splatq_laneq_p64(__p0, __p1) __extension__ ({ \ + poly64x2_t __ret; \ + poly64x2_t __s0 = __p0; \ + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (poly64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 38); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_p64(__p0, __p1) __extension__ ({ \ + poly64x2_t __ret; \ + poly64x2_t __s0 = __p0; \ + __ret = (poly64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 38); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcgt_s32(int32x2_t __p0, int32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0 > __p1); - return __ret; -} +#define splatq_laneq_p16(__p0, __p1) __extension__ ({ \ + poly16x8_t __ret; \ + poly16x8_t __s0 = __p0; \ + __ret = (poly16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 37); \ + __ret; \ +}) #else -__ai uint32x2_t vcgt_s32(int32x2_t __p0, int32x2_t __p1) { - uint32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint32x2_t)(__rev0 > __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define splatq_laneq_p16(__p0, __p1) __extension__ ({ \ + poly16x8_t __ret; \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 37); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_p16(__p0, __p1) __extension__ ({ \ + poly16x8_t __ret; \ + poly16x8_t __s0 = __p0; \ + __ret = (poly16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 37); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcgt_s16(int16x4_t __p0, int16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0 > __p1); - return __ret; -} +#define splatq_laneq_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + __ret = (uint8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 48); \ + __ret; \ +}) #else -__ai uint16x4_t vcgt_s16(int16x4_t __p0, int16x4_t __p1) { - uint16x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t)(__rev0 > __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define splatq_laneq_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + __ret = (uint8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 48); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vcleq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0 <= __p1); - return __ret; -} +#define splatq_laneq_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 50); \ + __ret; \ +}) #else -__ai uint8x16_t vcleq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t)(__rev0 <= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define splatq_laneq_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 50); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcleq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0 <= __p1); - return __ret; -} +#define splatq_laneq_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 51); \ + __ret; \ +}) #else -__ai uint32x4_t vcleq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint32x4_t)(__rev0 <= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define splatq_laneq_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 51); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcleq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0 <= __p1); - return __ret; -} +#define splatq_laneq_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) #else -__ai uint16x8_t vcleq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t)(__rev0 <= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define splatq_laneq_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vcleq_s8(int8x16_t __p0, int8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0 <= __p1); - return __ret; -} +#define splatq_laneq_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + __ret = (int8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 32); \ + __ret; \ +}) #else -__ai uint8x16_t vcleq_s8(int8x16_t __p0, int8x16_t __p1) { - uint8x16_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t)(__rev0 <= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define splatq_laneq_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + __ret = (int8x16_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 32); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcleq_f32(float32x4_t __p0, float32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0 <= __p1); - return __ret; -} +#define splatq_laneq_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __ret; \ + float64x2_t __s0 = __p0; \ + __ret = (float64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 42); \ + __ret; \ +}) #else -__ai uint32x4_t vcleq_f32(float32x4_t __p0, float32x4_t __p1) { - uint32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint32x4_t)(__rev0 <= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define splatq_laneq_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __ret; \ + float64x2_t __s0 = __p0; \ + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (float64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 42); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __ret; \ + float64x2_t __s0 = __p0; \ + __ret = (float64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 42); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcleq_s32(int32x4_t __p0, int32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0 <= __p1); - return __ret; -} +#define splatq_laneq_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __ret; \ + float32x4_t __s0 = __p0; \ + __ret = (float32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 41); \ + __ret; \ +}) #else -__ai uint32x4_t vcleq_s32(int32x4_t __p0, int32x4_t __p1) { - uint32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint32x4_t)(__rev0 <= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define splatq_laneq_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __ret; \ + float32x4_t __s0 = __p0; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 41); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __ret; \ + float32x4_t __s0 = __p0; \ + __ret = (float32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 41); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcleq_s16(int16x8_t __p0, int16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0 <= __p1); - return __ret; -} +#define splatq_laneq_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + __ret = (float16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 40); \ + __ret; \ +}) #else -__ai uint16x8_t vcleq_s16(int16x8_t __p0, int16x8_t __p1) { - uint16x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t)(__rev0 <= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define splatq_laneq_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 40); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + __ret = (float16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 40); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vcle_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0 <= __p1); - return __ret; -} +#define splatq_laneq_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 34); \ + __ret; \ +}) #else -__ai uint8x8_t vcle_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t)(__rev0 <= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define splatq_laneq_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int32x4_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 34); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcle_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0 <= __p1); - return __ret; -} +#define splatq_laneq_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 35); \ + __ret; \ +}) #else -__ai uint32x2_t vcle_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint32x2_t)(__rev0 <= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define splatq_laneq_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int64x2_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 35); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcle_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0 <= __p1); - return __ret; -} +#define splatq_laneq_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 33); \ + __ret; \ +}) #else -__ai uint16x4_t vcle_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t)(__rev0 <= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define splatq_laneq_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__rev0, __p1, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splatq_laneq_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int16x8_t) __builtin_neon_splatq_laneq_v((int8x16_t)__s0, __p1, 33); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vcle_s8(int8x8_t __p0, int8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0 <= __p1); - return __ret; -} +#define splat_laneq_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x16_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 48); \ + __ret; \ +}) #else -__ai uint8x8_t vcle_s8(int8x8_t __p0, int8x8_t __p1) { - uint8x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t)(__rev0 <= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define splat_laneq_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_laneq_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x16_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 48); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcle_f32(float32x2_t __p0, float32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0 <= __p1); - return __ret; -} +#define splat_laneq_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 50); \ + __ret; \ +}) #else -__ai uint32x2_t vcle_f32(float32x2_t __p0, float32x2_t __p1) { - uint32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint32x2_t)(__rev0 <= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define splat_laneq_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_laneq_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 50); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcle_s32(int32x2_t __p0, int32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0 <= __p1); - return __ret; -} +#define splat_laneq_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 51); \ + __ret; \ +}) #else -__ai uint32x2_t vcle_s32(int32x2_t __p0, int32x2_t __p1) { - uint32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint32x2_t)(__rev0 <= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define splat_laneq_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 51); \ + __ret; \ +}) +#define __noswap_splat_laneq_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 51); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcle_s16(int16x4_t __p0, int16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0 <= __p1); - return __ret; -} +#define splat_laneq_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) #else -__ai uint16x4_t vcle_s16(int16x4_t __p0, int16x4_t __p1) { - uint16x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t)(__rev0 <= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define splat_laneq_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_laneq_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vclsq_u8(uint8x16_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 32); +#define splat_laneq_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int8x16_t __s0 = __p0; \ + __ret = (int8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 32); \ + __ret; \ +}) +#else +#define splat_laneq_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_laneq_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int8x16_t __s0 = __p0; \ + __ret = (int8x8_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 32); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_f64(__p0, __p1) __extension__ ({ \ + float64x1_t __ret; \ + float64x2_t __s0 = __p0; \ + __ret = (float64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 42); \ + __ret; \ +}) +#else +#define splat_laneq_f64(__p0, __p1) __extension__ ({ \ + float64x1_t __ret; \ + float64x2_t __s0 = __p0; \ + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (float64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 42); \ + __ret; \ +}) +#define __noswap_splat_laneq_f64(__p0, __p1) __extension__ ({ \ + float64x1_t __ret; \ + float64x2_t __s0 = __p0; \ + __ret = (float64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 42); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __ret; \ + float32x4_t __s0 = __p0; \ + __ret = (float32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 41); \ + __ret; \ +}) +#else +#define splat_laneq_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __ret; \ + float32x4_t __s0 = __p0; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 41); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_laneq_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __ret; \ + float32x4_t __s0 = __p0; \ + __ret = (float32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 41); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + float16x8_t __s0 = __p0; \ + __ret = (float16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 40); \ + __ret; \ +}) +#else +#define splat_laneq_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 40); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_laneq_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + float16x8_t __s0 = __p0; \ + __ret = (float16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 40); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 34); \ + __ret; \ +}) +#else +#define splat_laneq_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_laneq_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int32x2_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 34); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 35); \ + __ret; \ +}) +#else +#define splat_laneq_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 35); \ + __ret; \ +}) +#define __noswap_splat_laneq_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int64x1_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 35); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define splat_laneq_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 33); \ + __ret; \ +}) +#else +#define splat_laneq_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__rev0, __p1, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_splat_laneq_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int16x4_t) __builtin_neon_splat_laneq_v((int8x16_t)__s0, __p1, 33); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else -__ai int8x16_t vclsq_u8(uint8x16_t __p0) { - int8x16_t __ret; +__ai __attribute__((target("neon"))) uint8x16_t vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 32); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } +__ai __attribute__((target("neon"))) uint8x16_t __noswap_vabdq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vclsq_u32(uint32x4_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 34); +__ai __attribute__((target("neon"))) uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else -__ai int32x4_t vclsq_u32(uint32x4_t __p0) { - int32x4_t __ret; +__ai __attribute__((target("neon"))) uint32x4_t vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 34); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } +__ai __attribute__((target("neon"))) uint32x4_t __noswap_vabdq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vclsq_u16(uint16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 33); +__ai __attribute__((target("neon"))) uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else -__ai int16x8_t vclsq_u16(uint16x8_t __p0) { - int16x8_t __ret; +__ai __attribute__((target("neon"))) uint16x8_t vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 33); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } +__ai __attribute__((target("neon"))) uint16x8_t __noswap_vabdq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vclsq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 32); + __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } #else -__ai int8x16_t vclsq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vabdq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 32); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } +__ai __attribute__((target("neon"))) int8x16_t __noswap_vabdq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vclsq_s32(int32x4_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 34); +__ai __attribute__((target("neon"))) float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); return __ret; } #else -__ai int32x4_t vclsq_s32(int32x4_t __p0) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 34); +__ai __attribute__((target("neon"))) float32x4_t vabdq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vabdq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } +__ai __attribute__((target("neon"))) int32x4_t __noswap_vabdq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vclsq_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 33); + __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else -__ai int16x8_t vclsq_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vabdq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 33); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } +__ai __attribute__((target("neon"))) int16x8_t __noswap_vabdq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vcls_u8(uint8x8_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__p0, 0); +__ai __attribute__((target("neon"))) uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else -__ai int8x8_t vcls_u8(uint8x8_t __p0) { - int8x8_t __ret; +__ai __attribute__((target("neon"))) uint8x8_t vabd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } +__ai __attribute__((target("neon"))) uint8x8_t __noswap_vabd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vcls_u32(uint32x2_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__p0, 2); +__ai __attribute__((target("neon"))) uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else -__ai int32x2_t vcls_u32(uint32x2_t __p0) { - int32x2_t __ret; +__ai __attribute__((target("neon"))) uint32x2_t vabd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 2); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } +__ai __attribute__((target("neon"))) uint32x2_t __noswap_vabd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vcls_u16(uint16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__p0, 1); +__ai __attribute__((target("neon"))) uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else -__ai int16x4_t vcls_u16(uint16x4_t __p0) { - int16x4_t __ret; +__ai __attribute__((target("neon"))) uint16x4_t vabd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 1); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } +__ai __attribute__((target("neon"))) uint16x4_t __noswap_vabd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vcls_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__p0, 0); + __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else -__ai int8x8_t vcls_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vabd_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } +__ai __attribute__((target("neon"))) int8x8_t __noswap_vabd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vcls_s32(int32x2_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__p0, 2); +__ai __attribute__((target("neon"))) float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 9); return __ret; } #else -__ai int32x2_t vcls_s32(int32x2_t __p0) { - int32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 2); +__ai __attribute__((target("neon"))) float32x2_t vabd_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vcls_s16(int16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__p0, 1); +__ai __attribute__((target("neon"))) int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else -__ai int16x4_t vcls_s16(int16x4_t __p0) { - int16x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vcltq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0 < __p1); +__ai __attribute__((target("neon"))) int32x2_t vabd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -#else -__ai uint8x16_t vcltq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t)(__rev0 < __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int32x2_t __noswap_vabd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcltq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0 < __p1); +__ai __attribute__((target("neon"))) int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else -__ai uint32x4_t vcltq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint32x4_t)(__rev0 < __rev1); +__ai __attribute__((target("neon"))) int16x4_t vabd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcltq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0 < __p1); - return __ret; -} -#else -__ai uint16x8_t vcltq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t)(__rev0 < __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int16x4_t __noswap_vabd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vcltq_s8(int8x16_t __p0, int8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0 < __p1); +__ai __attribute__((target("neon"))) int8x16_t vabsq_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 32); return __ret; } #else -__ai uint8x16_t vcltq_s8(int8x16_t __p0, int8x16_t __p1) { - uint8x16_t __ret; +__ai __attribute__((target("neon"))) int8x16_t vabsq_s8(int8x16_t __p0) { + int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t)(__rev0 < __rev1); + __ret = (int8x16_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 32); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcltq_f32(float32x4_t __p0, float32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0 < __p1); +__ai __attribute__((target("neon"))) float32x4_t vabsq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 41); return __ret; } #else -__ai uint32x4_t vcltq_f32(float32x4_t __p0, float32x4_t __p1) { - uint32x4_t __ret; +__ai __attribute__((target("neon"))) float32x4_t vabsq_f32(float32x4_t __p0) { + float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint32x4_t)(__rev0 < __rev1); + __ret = (float32x4_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcltq_s32(int32x4_t __p0, int32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0 < __p1); +__ai __attribute__((target("neon"))) int32x4_t vabsq_s32(int32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 34); return __ret; } #else -__ai uint32x4_t vcltq_s32(int32x4_t __p0, int32x4_t __p1) { - uint32x4_t __ret; +__ai __attribute__((target("neon"))) int32x4_t vabsq_s32(int32x4_t __p0) { + int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint32x4_t)(__rev0 < __rev1); + __ret = (int32x4_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcltq_s16(int16x8_t __p0, int16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0 < __p1); +__ai __attribute__((target("neon"))) int16x8_t vabsq_s16(int16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 33); return __ret; } #else -__ai uint16x8_t vcltq_s16(int16x8_t __p0, int16x8_t __p1) { - uint16x8_t __ret; +__ai __attribute__((target("neon"))) int16x8_t vabsq_s16(int16x8_t __p0) { + int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t)(__rev0 < __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vclt_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0 < __p1); - return __ret; -} -#else -__ai uint8x8_t vclt_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t)(__rev0 < __rev1); + __ret = (int16x8_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vclt_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0 < __p1); - return __ret; -} -#else -__ai uint32x2_t vclt_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint32x2_t)(__rev0 < __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vclt_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0 < __p1); - return __ret; -} -#else -__ai uint16x4_t vclt_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t)(__rev0 < __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vclt_s8(int8x8_t __p0, int8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0 < __p1); +__ai __attribute__((target("neon"))) int8x8_t vabs_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vabs_v((int8x8_t)__p0, 0); return __ret; } #else -__ai uint8x8_t vclt_s8(int8x8_t __p0, int8x8_t __p1) { - uint8x8_t __ret; +__ai __attribute__((target("neon"))) int8x8_t vabs_s8(int8x8_t __p0) { + int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t)(__rev0 < __rev1); + __ret = (int8x8_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vclt_f32(float32x2_t __p0, float32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0 < __p1); +__ai __attribute__((target("neon"))) float32x2_t vabs_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vabs_v((int8x8_t)__p0, 9); return __ret; } #else -__ai uint32x2_t vclt_f32(float32x2_t __p0, float32x2_t __p1) { - uint32x2_t __ret; +__ai __attribute__((target("neon"))) float32x2_t vabs_f32(float32x2_t __p0) { + float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint32x2_t)(__rev0 < __rev1); + __ret = (float32x2_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vclt_s32(int32x2_t __p0, int32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0 < __p1); +__ai __attribute__((target("neon"))) int32x2_t vabs_s32(int32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vabs_v((int8x8_t)__p0, 2); return __ret; } #else -__ai uint32x2_t vclt_s32(int32x2_t __p0, int32x2_t __p1) { - uint32x2_t __ret; +__ai __attribute__((target("neon"))) int32x2_t vabs_s32(int32x2_t __p0) { + int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint32x2_t)(__rev0 < __rev1); + __ret = (int32x2_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vclt_s16(int16x4_t __p0, int16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0 < __p1); +__ai __attribute__((target("neon"))) int16x4_t vabs_s16(int16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vabs_v((int8x8_t)__p0, 1); return __ret; } #else -__ai uint16x4_t vclt_s16(int16x4_t __p0, int16x4_t __p1) { - uint16x4_t __ret; +__ai __attribute__((target("neon"))) int16x4_t vabs_s16(int16x4_t __p0) { + int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t)(__rev0 < __rev1); + __ret = (int16x4_t) __builtin_neon_vabs_v((int8x8_t)__rev0, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vclzq_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 48); + __ret = __p0 + __p1; return __ret; } #else -__ai uint8x16_t vclzq_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 48); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __rev1; __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vclzq_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 50); + __ret = __p0 + __p1; return __ret; } #else -__ai uint32x4_t vclzq_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 50); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 + __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vclzq_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 49); + __ret = __p0 + __p1; return __ret; } #else -__ai uint16x8_t vclzq_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 49); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vclzq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 32); + __ret = __p0 + __p1; return __ret; } #else -__ai int8x16_t vclzq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vaddq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x16_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 32); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __rev1; __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vclzq_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vaddq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 34); + __ret = __p0 + __p1; return __ret; } #else -__ai int32x4_t vclzq_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vaddq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 34); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 + __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vclzq_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vaddq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 33); + __ret = __p0 + __p1; return __ret; } #else -__ai int16x8_t vclzq_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vaddq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 33); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vclz_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vclz_v((int8x8_t)__p0, 16); + __ret = __p0 + __p1; return __ret; } #else -__ai uint8x8_t vclz_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vadd_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 16); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vclz_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vclz_v((int8x8_t)__p0, 18); + __ret = __p0 + __p1; return __ret; } #else -__ai uint32x2_t vclz_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vadd_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 18); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 + __rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif +__ai __attribute__((target("neon"))) uint64x1_t vadd_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = __p0 + __p1; + return __ret; +} #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vclz_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vclz_v((int8x8_t)__p0, 17); + __ret = __p0 + __p1; return __ret; } #else -__ai uint16x4_t vclz_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vadd_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 17); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 + __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vclz_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vclz_v((int8x8_t)__p0, 0); + __ret = __p0 + __p1; return __ret; } #else -__ai int8x8_t vclz_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vadd_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x8_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vclz_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = __p0 + __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vadd_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vclz_v((int8x8_t)__p0, 2); + __ret = __p0 + __p1; return __ret; } #else -__ai int32x2_t vclz_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vadd_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (int32x2_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 2); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 + __rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif +__ai __attribute__((target("neon"))) int64x1_t vadd_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = __p0 + __p1; + return __ret; +} #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vclz_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vclz_v((int8x8_t)__p0, 1); + __ret = __p0 + __p1; return __ret; } #else -__ai int16x4_t vclz_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vadd_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 1); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 + __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vcnt_p8(poly8x8_t __p0) { +__ai __attribute__((target("neon"))) poly8x8_t vadd_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 4); + __ret = (poly8x8_t) __builtin_neon_vadd_v((int8x8_t)__p0, (int8x8_t)__p1, 4); return __ret; } #else -__ai poly8x8_t vcnt_p8(poly8x8_t __p0) { +__ai __attribute__((target("neon"))) poly8x8_t vadd_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (poly8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 4); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif +__ai __attribute__((target("neon"))) poly64x1_t vadd_p64(poly64x1_t __p0, poly64x1_t __p1) { + poly64x1_t __ret; + __ret = (poly64x1_t) __builtin_neon_vadd_v((int8x8_t)__p0, (int8x8_t)__p1, 6); + return __ret; +} #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vcntq_p8(poly8x16_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 36); +__ai __attribute__((target("neon"))) poly16x4_t vadd_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + __ret = (poly16x4_t) __builtin_neon_vadd_v((int8x8_t)__p0, (int8x8_t)__p1, 5); return __ret; } #else -__ai poly8x16_t vcntq_p8(poly8x16_t __p0) { - poly8x16_t __ret; - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (poly8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 36); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) poly16x4_t vadd_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (poly16x4_t) __builtin_neon_vadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 5); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vcntq_u8(uint8x16_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 48); +__ai __attribute__((target("neon"))) poly8x16_t vaddq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 36); return __ret; } #else -__ai uint8x16_t vcntq_u8(uint8x16_t __p0) { - uint8x16_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 48); +__ai __attribute__((target("neon"))) poly8x16_t vaddq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x16_t) __builtin_neon_vaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vcntq_s8(int8x16_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 32); +__ai __attribute__((target("neon"))) poly64x2_t vaddq_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + __ret = (poly64x2_t) __builtin_neon_vaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 38); return __ret; } #else -__ai int8x16_t vcntq_s8(int8x16_t __p0) { - int8x16_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) poly64x2_t vaddq_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (poly64x2_t) __builtin_neon_vaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 38); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vcnt_u8(uint8x8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 16); +__ai __attribute__((target("neon"))) poly16x8_t vaddq_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + __ret = (poly16x8_t) __builtin_neon_vaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 37); return __ret; } #else -__ai uint8x8_t vcnt_u8(uint8x8_t __p0) { - uint8x8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 16); +__ai __attribute__((target("neon"))) poly16x8_t vaddq_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly16x8_t) __builtin_neon_vaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 37); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vcnt_s8(int8x8_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 0); +__ai __attribute__((target("neon"))) uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); return __ret; } #else -__ai int8x8_t vcnt_s8(int8x8_t __p0) { - int8x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint16x4_t vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x4_t __noswap_vaddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vcombine_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); +__ai __attribute__((target("neon"))) uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); return __ret; } #else -__ai poly8x16_t vcombine_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly8x16_t __ret; - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint32x2_t vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x2_t __noswap_vaddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vcombine_p16(poly16x4_t __p0, poly16x4_t __p1) { - poly16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); +__ai __attribute__((target("neon"))) uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); return __ret; } #else -__ai poly16x8_t vcombine_p16(poly16x4_t __p0, poly16x4_t __p1) { - poly16x8_t __ret; - poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7); +__ai __attribute__((target("neon"))) uint8x8_t vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } +__ai __attribute__((target("neon"))) uint8x8_t __noswap_vaddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); +__ai __attribute__((target("neon"))) int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); return __ret; } #else -__ai uint8x16_t vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x16_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai uint8x16_t __noswap_vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); - return __ret; -} -#else -__ai uint32x4_t vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x4_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3); +__ai __attribute__((target("neon"))) int16x4_t vaddhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai uint32x4_t __noswap_vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); +__ai __attribute__((target("neon"))) int16x4_t __noswap_vaddhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcombine_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1); +__ai __attribute__((target("neon"))) int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); return __ret; } #else -__ai uint64x2_t vcombine_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1); +__ai __attribute__((target("neon"))) int32x2_t vaddhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } +__ai __attribute__((target("neon"))) int32x2_t __noswap_vaddhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); +__ai __attribute__((target("neon"))) int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); return __ret; } #else -__ai uint16x8_t vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x8_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7); +__ai __attribute__((target("neon"))) int8x8_t vaddhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai uint16x8_t __noswap_vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); +__ai __attribute__((target("neon"))) int8x8_t __noswap_vaddhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vaddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vcombine_s8(int8x8_t __p0, int8x8_t __p1) { - int8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); +__ai __attribute__((target("neon"))) uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __p0 & __p1; return __ret; } #else -__ai int8x16_t vcombine_s8(int8x8_t __p0, int8x8_t __p1) { - int8x16_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); +__ai __attribute__((target("neon"))) uint8x16_t vandq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 & __rev1; __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai int8x16_t __noswap_vcombine_s8(int8x8_t __p0, int8x8_t __p1) { - int8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); - return __ret; -} #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vcombine_f32(float32x2_t __p0, float32x2_t __p1) { - float32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); +__ai __attribute__((target("neon"))) uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __p0 & __p1; return __ret; } #else -__ai float32x4_t vcombine_f32(float32x2_t __p0, float32x2_t __p1) { - float32x4_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3); +__ai __attribute__((target("neon"))) uint32x4_t vandq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 & __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai float32x4_t __noswap_vcombine_f32(float32x2_t __p0, float32x2_t __p1) { - float32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vandq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vcombine_f16(float16x4_t __p0, float16x4_t __p1) { - float16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); +__ai __attribute__((target("neon"))) uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __p0 & __p1; return __ret; } #else -__ai float16x8_t vcombine_f16(float16x4_t __p0, float16x4_t __p1) { - float16x8_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7); +__ai __attribute__((target("neon"))) uint16x8_t vandq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 & __rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai float16x8_t __noswap_vcombine_f16(float16x4_t __p0, float16x4_t __p1) { - float16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vandq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vcombine_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); + __ret = __p0 & __p1; return __ret; } #else -__ai int32x4_t vcombine_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vandq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3); + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 & __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int32x4_t __noswap_vcombine_s32(int32x2_t __p0, int32x2_t __p1) { - int32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); - return __ret; -} #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vcombine_s64(int64x1_t __p0, int64x1_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1); + __ret = __p0 & __p1; return __ret; } #else -__ai int64x2_t vcombine_s64(int64x1_t __p0, int64x1_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vandq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1); + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 & __rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vcombine_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); + __ret = __p0 & __p1; return __ret; } #else -__ai int16x8_t vcombine_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vandq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7); + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 & __rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai int16x8_t __noswap_vcombine_s16(int16x4_t __p0, int16x4_t __p1) { - int16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __p0 & __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vand_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif -#define vcreate_p8(__p0) __extension__ ({ \ - poly8x8_t __ret; \ - uint64_t __promote = __p0; \ - __ret = (poly8x8_t)(__promote); \ - __ret; \ -}) -#define vcreate_p16(__p0) __extension__ ({ \ - poly16x4_t __ret; \ - uint64_t __promote = __p0; \ - __ret = (poly16x4_t)(__promote); \ - __ret; \ -}) -#define vcreate_u8(__p0) __extension__ ({ \ - uint8x8_t __ret; \ - uint64_t __promote = __p0; \ - __ret = (uint8x8_t)(__promote); \ - __ret; \ -}) -#define vcreate_u32(__p0) __extension__ ({ \ - uint32x2_t __ret; \ - uint64_t __promote = __p0; \ - __ret = (uint32x2_t)(__promote); \ - __ret; \ -}) -#define vcreate_u64(__p0) __extension__ ({ \ - uint64x1_t __ret; \ - uint64_t __promote = __p0; \ - __ret = (uint64x1_t)(__promote); \ - __ret; \ -}) -#define vcreate_u16(__p0) __extension__ ({ \ - uint16x4_t __ret; \ - uint64_t __promote = __p0; \ - __ret = (uint16x4_t)(__promote); \ - __ret; \ -}) -#define vcreate_s8(__p0) __extension__ ({ \ - int8x8_t __ret; \ - uint64_t __promote = __p0; \ - __ret = (int8x8_t)(__promote); \ - __ret; \ -}) -#define vcreate_f32(__p0) __extension__ ({ \ - float32x2_t __ret; \ - uint64_t __promote = __p0; \ - __ret = (float32x2_t)(__promote); \ - __ret; \ -}) -#define vcreate_f16(__p0) __extension__ ({ \ - float16x4_t __ret; \ - uint64_t __promote = __p0; \ - __ret = (float16x4_t)(__promote); \ - __ret; \ -}) -#define vcreate_s32(__p0) __extension__ ({ \ - int32x2_t __ret; \ - uint64_t __promote = __p0; \ - __ret = (int32x2_t)(__promote); \ - __ret; \ -}) -#define vcreate_s64(__p0) __extension__ ({ \ - int64x1_t __ret; \ - uint64_t __promote = __p0; \ - __ret = (int64x1_t)(__promote); \ - __ret; \ -}) -#define vcreate_s16(__p0) __extension__ ({ \ - int16x4_t __ret; \ - uint64_t __promote = __p0; \ - __ret = (int16x4_t)(__promote); \ - __ret; \ -}) #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vcvtq_f32_u32(uint32x4_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__p0, 50); +__ai __attribute__((target("neon"))) uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __p0 & __p1; return __ret; } #else -__ai float32x4_t vcvtq_f32_u32(uint32x4_t __p0) { - float32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__rev0, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint32x2_t vand_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif +__ai __attribute__((target("neon"))) uint64x1_t vand_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = __p0 & __p1; + return __ret; +} #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vcvtq_f32_s32(int32x4_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__p0, 34); +__ai __attribute__((target("neon"))) uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __p0 & __p1; return __ret; } #else -__ai float32x4_t vcvtq_f32_s32(int32x4_t __p0) { - float32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__rev0, 34); +__ai __attribute__((target("neon"))) uint16x4_t vand_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 & __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vcvt_f32_u32(uint32x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__p0, 18); +__ai __attribute__((target("neon"))) int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __p0 & __p1; return __ret; } #else -__ai float32x2_t vcvt_f32_u32(uint32x2_t __p0) { - float32x2_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__rev0, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) int8x8_t vand_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vcvt_f32_s32(int32x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__p0, 2); +__ai __attribute__((target("neon"))) int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __p0 & __p1; return __ret; } #else -__ai float32x2_t vcvt_f32_s32(int32x2_t __p0) { - float32x2_t __ret; +__ai __attribute__((target("neon"))) int32x2_t vand_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__rev0, 2); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 & __rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif +__ai __attribute__((target("neon"))) int64x1_t vand_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = __p0 & __p1; + return __ret; +} #ifdef __LITTLE_ENDIAN__ -#define vcvtq_n_f32_u32(__p0, __p1) __extension__ ({ \ - float32x4_t __ret; \ - uint32x4_t __s0 = __p0; \ - __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__s0, __p1, 50); \ - __ret; \ -}) -#else -#define vcvtq_n_f32_u32(__p0, __p1) __extension__ ({ \ - float32x4_t __ret; \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__rev0, __p1, 50); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvtq_n_f32_s32(__p0, __p1) __extension__ ({ \ - float32x4_t __ret; \ - int32x4_t __s0 = __p0; \ - __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__s0, __p1, 34); \ - __ret; \ -}) -#else -#define vcvtq_n_f32_s32(__p0, __p1) __extension__ ({ \ - float32x4_t __ret; \ - int32x4_t __s0 = __p0; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__rev0, __p1, 34); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcvt_n_f32_u32(__p0, __p1) __extension__ ({ \ - float32x2_t __ret; \ - uint32x2_t __s0 = __p0; \ - __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__s0, __p1, 18); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __p0 & __p1; + return __ret; +} #else -#define vcvt_n_f32_u32(__p0, __p1) __extension__ ({ \ - float32x2_t __ret; \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__rev0, __p1, 18); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x4_t vand_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 & __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vcvt_n_f32_s32(__p0, __p1) __extension__ ({ \ - float32x2_t __ret; \ - int32x2_t __s0 = __p0; \ - __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__s0, __p1, 2); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} #else -#define vcvt_n_f32_s32(__p0, __p1) __extension__ ({ \ - float32x2_t __ret; \ - int32x2_t __s0 = __p0; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__rev0, __p1, 2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x16_t vbicq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vcvtq_n_s32_f32(__p0, __p1) __extension__ ({ \ - int32x4_t __ret; \ - float32x4_t __s0 = __p0; \ - __ret = (int32x4_t) __builtin_neon_vcvtq_n_s32_v((int8x16_t)__s0, __p1, 34); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} #else -#define vcvtq_n_s32_f32(__p0, __p1) __extension__ ({ \ - int32x4_t __ret; \ - float32x4_t __s0 = __p0; \ - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (int32x4_t) __builtin_neon_vcvtq_n_s32_v((int8x16_t)__rev0, __p1, 34); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vbicq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vcvt_n_s32_f32(__p0, __p1) __extension__ ({ \ - int32x2_t __ret; \ - float32x2_t __s0 = __p0; \ - __ret = (int32x2_t) __builtin_neon_vcvt_n_s32_v((int8x8_t)__s0, __p1, 2); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} #else -#define vcvt_n_s32_f32(__p0, __p1) __extension__ ({ \ - int32x2_t __ret; \ - float32x2_t __s0 = __p0; \ - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (int32x2_t) __builtin_neon_vcvt_n_s32_v((int8x8_t)__rev0, __p1, 2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vbicq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vcvtq_n_u32_f32(__p0, __p1) __extension__ ({ \ - uint32x4_t __ret; \ - float32x4_t __s0 = __p0; \ - __ret = (uint32x4_t) __builtin_neon_vcvtq_n_u32_v((int8x16_t)__s0, __p1, 50); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} #else -#define vcvtq_n_u32_f32(__p0, __p1) __extension__ ({ \ - uint32x4_t __ret; \ - float32x4_t __s0 = __p0; \ - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (uint32x4_t) __builtin_neon_vcvtq_n_u32_v((int8x16_t)__rev0, __p1, 50); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x8_t vbicq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vcvt_n_u32_f32(__p0, __p1) __extension__ ({ \ - uint32x2_t __ret; \ - float32x2_t __s0 = __p0; \ - __ret = (uint32x2_t) __builtin_neon_vcvt_n_u32_v((int8x8_t)__s0, __p1, 18); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} #else -#define vcvt_n_u32_f32(__p0, __p1) __extension__ ({ \ - uint32x2_t __ret; \ - float32x2_t __s0 = __p0; \ - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (uint32x2_t) __builtin_neon_vcvt_n_u32_v((int8x8_t)__rev0, __p1, 18); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x16_t vbicq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vcvtq_s32_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vcvtq_s32_v((int8x16_t)__p0, 34); + __ret = __p0 & ~__p1; return __ret; } #else -__ai int32x4_t vcvtq_s32_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vbicq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vcvtq_s32_v((int8x16_t)__rev0, 34); + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 & ~__rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vcvt_s32_f32(float32x2_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vcvt_s32_v((int8x8_t)__p0, 2); +__ai __attribute__((target("neon"))) int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __p0 & ~__p1; return __ret; } #else -__ai int32x2_t vcvt_s32_f32(float32x2_t __p0) { - int32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (int32x2_t) __builtin_neon_vcvt_s32_v((int8x8_t)__rev0, 2); +__ai __attribute__((target("neon"))) int64x2_t vbicq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 & ~__rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcvtq_u32_f32(float32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vcvtq_u32_v((int8x16_t)__p0, 50); +__ai __attribute__((target("neon"))) int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __p0 & ~__p1; return __ret; } #else -__ai uint32x4_t vcvtq_u32_f32(float32x4_t __p0) { - uint32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vcvtq_u32_v((int8x16_t)__rev0, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int16x8_t vbicq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcvt_u32_f32(float32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vcvt_u32_v((int8x8_t)__p0, 18); +__ai __attribute__((target("neon"))) uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __p0 & ~__p1; return __ret; } #else -__ai uint32x2_t vcvt_u32_f32(float32x2_t __p0) { - uint32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vcvt_u32_v((int8x8_t)__rev0, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) uint8x8_t vbic_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -#define vdup_lane_p8(__p0_0, __p1_0) __extension__ ({ \ - poly8x8_t __ret_0; \ - poly8x8_t __s0_0 = __p0_0; \ - __ret_0 = splat_lane_p8(__s0_0, __p1_0); \ - __ret_0; \ -}) -#else -#define vdup_lane_p8(__p0_1, __p1_1) __extension__ ({ \ - poly8x8_t __ret_1; \ - poly8x8_t __s0_1 = __p0_1; \ - poly8x8_t __rev0_1; __rev0_1 = __builtin_shufflevector(__s0_1, __s0_1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_1 = __noswap_splat_lane_p8(__rev0_1, __p1_1); \ - __ret_1 = __builtin_shufflevector(__ret_1, __ret_1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_1; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_lane_p16(__p0_2, __p1_2) __extension__ ({ \ - poly16x4_t __ret_2; \ - poly16x4_t __s0_2 = __p0_2; \ - __ret_2 = splat_lane_p16(__s0_2, __p1_2); \ - __ret_2; \ -}) -#else -#define vdup_lane_p16(__p0_3, __p1_3) __extension__ ({ \ - poly16x4_t __ret_3; \ - poly16x4_t __s0_3 = __p0_3; \ - poly16x4_t __rev0_3; __rev0_3 = __builtin_shufflevector(__s0_3, __s0_3, 3, 2, 1, 0); \ - __ret_3 = __noswap_splat_lane_p16(__rev0_3, __p1_3); \ - __ret_3 = __builtin_shufflevector(__ret_3, __ret_3, 3, 2, 1, 0); \ - __ret_3; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_p8(__p0_4, __p1_4) __extension__ ({ \ - poly8x16_t __ret_4; \ - poly8x8_t __s0_4 = __p0_4; \ - __ret_4 = splatq_lane_p8(__s0_4, __p1_4); \ - __ret_4; \ -}) -#else -#define vdupq_lane_p8(__p0_5, __p1_5) __extension__ ({ \ - poly8x16_t __ret_5; \ - poly8x8_t __s0_5 = __p0_5; \ - poly8x8_t __rev0_5; __rev0_5 = __builtin_shufflevector(__s0_5, __s0_5, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_5 = __noswap_splatq_lane_p8(__rev0_5, __p1_5); \ - __ret_5 = __builtin_shufflevector(__ret_5, __ret_5, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_5; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_p16(__p0_6, __p1_6) __extension__ ({ \ - poly16x8_t __ret_6; \ - poly16x4_t __s0_6 = __p0_6; \ - __ret_6 = splatq_lane_p16(__s0_6, __p1_6); \ - __ret_6; \ -}) -#else -#define vdupq_lane_p16(__p0_7, __p1_7) __extension__ ({ \ - poly16x8_t __ret_7; \ - poly16x4_t __s0_7 = __p0_7; \ - poly16x4_t __rev0_7; __rev0_7 = __builtin_shufflevector(__s0_7, __s0_7, 3, 2, 1, 0); \ - __ret_7 = __noswap_splatq_lane_p16(__rev0_7, __p1_7); \ - __ret_7 = __builtin_shufflevector(__ret_7, __ret_7, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_7; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_u8(__p0_8, __p1_8) __extension__ ({ \ - uint8x16_t __ret_8; \ - uint8x8_t __s0_8 = __p0_8; \ - __ret_8 = splatq_lane_u8(__s0_8, __p1_8); \ - __ret_8; \ -}) -#else -#define vdupq_lane_u8(__p0_9, __p1_9) __extension__ ({ \ - uint8x16_t __ret_9; \ - uint8x8_t __s0_9 = __p0_9; \ - uint8x8_t __rev0_9; __rev0_9 = __builtin_shufflevector(__s0_9, __s0_9, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_9 = __noswap_splatq_lane_u8(__rev0_9, __p1_9); \ - __ret_9 = __builtin_shufflevector(__ret_9, __ret_9, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_9; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_u32(__p0_10, __p1_10) __extension__ ({ \ - uint32x4_t __ret_10; \ - uint32x2_t __s0_10 = __p0_10; \ - __ret_10 = splatq_lane_u32(__s0_10, __p1_10); \ - __ret_10; \ -}) -#else -#define vdupq_lane_u32(__p0_11, __p1_11) __extension__ ({ \ - uint32x4_t __ret_11; \ - uint32x2_t __s0_11 = __p0_11; \ - uint32x2_t __rev0_11; __rev0_11 = __builtin_shufflevector(__s0_11, __s0_11, 1, 0); \ - __ret_11 = __noswap_splatq_lane_u32(__rev0_11, __p1_11); \ - __ret_11 = __builtin_shufflevector(__ret_11, __ret_11, 3, 2, 1, 0); \ - __ret_11; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_u64(__p0_12, __p1_12) __extension__ ({ \ - uint64x2_t __ret_12; \ - uint64x1_t __s0_12 = __p0_12; \ - __ret_12 = splatq_lane_u64(__s0_12, __p1_12); \ - __ret_12; \ -}) -#else -#define vdupq_lane_u64(__p0_13, __p1_13) __extension__ ({ \ - uint64x2_t __ret_13; \ - uint64x1_t __s0_13 = __p0_13; \ - __ret_13 = __noswap_splatq_lane_u64(__s0_13, __p1_13); \ - __ret_13 = __builtin_shufflevector(__ret_13, __ret_13, 1, 0); \ - __ret_13; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_u16(__p0_14, __p1_14) __extension__ ({ \ - uint16x8_t __ret_14; \ - uint16x4_t __s0_14 = __p0_14; \ - __ret_14 = splatq_lane_u16(__s0_14, __p1_14); \ - __ret_14; \ -}) -#else -#define vdupq_lane_u16(__p0_15, __p1_15) __extension__ ({ \ - uint16x8_t __ret_15; \ - uint16x4_t __s0_15 = __p0_15; \ - uint16x4_t __rev0_15; __rev0_15 = __builtin_shufflevector(__s0_15, __s0_15, 3, 2, 1, 0); \ - __ret_15 = __noswap_splatq_lane_u16(__rev0_15, __p1_15); \ - __ret_15 = __builtin_shufflevector(__ret_15, __ret_15, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_15; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_s8(__p0_16, __p1_16) __extension__ ({ \ - int8x16_t __ret_16; \ - int8x8_t __s0_16 = __p0_16; \ - __ret_16 = splatq_lane_s8(__s0_16, __p1_16); \ - __ret_16; \ -}) -#else -#define vdupq_lane_s8(__p0_17, __p1_17) __extension__ ({ \ - int8x16_t __ret_17; \ - int8x8_t __s0_17 = __p0_17; \ - int8x8_t __rev0_17; __rev0_17 = __builtin_shufflevector(__s0_17, __s0_17, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_17 = __noswap_splatq_lane_s8(__rev0_17, __p1_17); \ - __ret_17 = __builtin_shufflevector(__ret_17, __ret_17, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_17; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_f32(__p0_18, __p1_18) __extension__ ({ \ - float32x4_t __ret_18; \ - float32x2_t __s0_18 = __p0_18; \ - __ret_18 = splatq_lane_f32(__s0_18, __p1_18); \ - __ret_18; \ -}) -#else -#define vdupq_lane_f32(__p0_19, __p1_19) __extension__ ({ \ - float32x4_t __ret_19; \ - float32x2_t __s0_19 = __p0_19; \ - float32x2_t __rev0_19; __rev0_19 = __builtin_shufflevector(__s0_19, __s0_19, 1, 0); \ - __ret_19 = __noswap_splatq_lane_f32(__rev0_19, __p1_19); \ - __ret_19 = __builtin_shufflevector(__ret_19, __ret_19, 3, 2, 1, 0); \ - __ret_19; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_f16(__p0_20, __p1_20) __extension__ ({ \ - float16x8_t __ret_20; \ - float16x4_t __s0_20 = __p0_20; \ - __ret_20 = splatq_lane_f16(__s0_20, __p1_20); \ - __ret_20; \ -}) -#else -#define vdupq_lane_f16(__p0_21, __p1_21) __extension__ ({ \ - float16x8_t __ret_21; \ - float16x4_t __s0_21 = __p0_21; \ - float16x4_t __rev0_21; __rev0_21 = __builtin_shufflevector(__s0_21, __s0_21, 3, 2, 1, 0); \ - __ret_21 = __noswap_splatq_lane_f16(__rev0_21, __p1_21); \ - __ret_21 = __builtin_shufflevector(__ret_21, __ret_21, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_21; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_s32(__p0_22, __p1_22) __extension__ ({ \ - int32x4_t __ret_22; \ - int32x2_t __s0_22 = __p0_22; \ - __ret_22 = splatq_lane_s32(__s0_22, __p1_22); \ - __ret_22; \ -}) -#else -#define vdupq_lane_s32(__p0_23, __p1_23) __extension__ ({ \ - int32x4_t __ret_23; \ - int32x2_t __s0_23 = __p0_23; \ - int32x2_t __rev0_23; __rev0_23 = __builtin_shufflevector(__s0_23, __s0_23, 1, 0); \ - __ret_23 = __noswap_splatq_lane_s32(__rev0_23, __p1_23); \ - __ret_23 = __builtin_shufflevector(__ret_23, __ret_23, 3, 2, 1, 0); \ - __ret_23; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_s64(__p0_24, __p1_24) __extension__ ({ \ - int64x2_t __ret_24; \ - int64x1_t __s0_24 = __p0_24; \ - __ret_24 = splatq_lane_s64(__s0_24, __p1_24); \ - __ret_24; \ -}) -#else -#define vdupq_lane_s64(__p0_25, __p1_25) __extension__ ({ \ - int64x2_t __ret_25; \ - int64x1_t __s0_25 = __p0_25; \ - __ret_25 = __noswap_splatq_lane_s64(__s0_25, __p1_25); \ - __ret_25 = __builtin_shufflevector(__ret_25, __ret_25, 1, 0); \ - __ret_25; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_s16(__p0_26, __p1_26) __extension__ ({ \ - int16x8_t __ret_26; \ - int16x4_t __s0_26 = __p0_26; \ - __ret_26 = splatq_lane_s16(__s0_26, __p1_26); \ - __ret_26; \ -}) -#else -#define vdupq_lane_s16(__p0_27, __p1_27) __extension__ ({ \ - int16x8_t __ret_27; \ - int16x4_t __s0_27 = __p0_27; \ - int16x4_t __rev0_27; __rev0_27 = __builtin_shufflevector(__s0_27, __s0_27, 3, 2, 1, 0); \ - __ret_27 = __noswap_splatq_lane_s16(__rev0_27, __p1_27); \ - __ret_27 = __builtin_shufflevector(__ret_27, __ret_27, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_27; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_lane_u8(__p0_28, __p1_28) __extension__ ({ \ - uint8x8_t __ret_28; \ - uint8x8_t __s0_28 = __p0_28; \ - __ret_28 = splat_lane_u8(__s0_28, __p1_28); \ - __ret_28; \ -}) -#else -#define vdup_lane_u8(__p0_29, __p1_29) __extension__ ({ \ - uint8x8_t __ret_29; \ - uint8x8_t __s0_29 = __p0_29; \ - uint8x8_t __rev0_29; __rev0_29 = __builtin_shufflevector(__s0_29, __s0_29, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_29 = __noswap_splat_lane_u8(__rev0_29, __p1_29); \ - __ret_29 = __builtin_shufflevector(__ret_29, __ret_29, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_29; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_lane_u32(__p0_30, __p1_30) __extension__ ({ \ - uint32x2_t __ret_30; \ - uint32x2_t __s0_30 = __p0_30; \ - __ret_30 = splat_lane_u32(__s0_30, __p1_30); \ - __ret_30; \ -}) -#else -#define vdup_lane_u32(__p0_31, __p1_31) __extension__ ({ \ - uint32x2_t __ret_31; \ - uint32x2_t __s0_31 = __p0_31; \ - uint32x2_t __rev0_31; __rev0_31 = __builtin_shufflevector(__s0_31, __s0_31, 1, 0); \ - __ret_31 = __noswap_splat_lane_u32(__rev0_31, __p1_31); \ - __ret_31 = __builtin_shufflevector(__ret_31, __ret_31, 1, 0); \ - __ret_31; \ -}) -#endif - -#define vdup_lane_u64(__p0_32, __p1_32) __extension__ ({ \ - uint64x1_t __ret_32; \ - uint64x1_t __s0_32 = __p0_32; \ - __ret_32 = splat_lane_u64(__s0_32, __p1_32); \ - __ret_32; \ -}) -#ifdef __LITTLE_ENDIAN__ -#define vdup_lane_u16(__p0_33, __p1_33) __extension__ ({ \ - uint16x4_t __ret_33; \ - uint16x4_t __s0_33 = __p0_33; \ - __ret_33 = splat_lane_u16(__s0_33, __p1_33); \ - __ret_33; \ -}) -#else -#define vdup_lane_u16(__p0_34, __p1_34) __extension__ ({ \ - uint16x4_t __ret_34; \ - uint16x4_t __s0_34 = __p0_34; \ - uint16x4_t __rev0_34; __rev0_34 = __builtin_shufflevector(__s0_34, __s0_34, 3, 2, 1, 0); \ - __ret_34 = __noswap_splat_lane_u16(__rev0_34, __p1_34); \ - __ret_34 = __builtin_shufflevector(__ret_34, __ret_34, 3, 2, 1, 0); \ - __ret_34; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vdup_lane_s8(__p0_35, __p1_35) __extension__ ({ \ - int8x8_t __ret_35; \ - int8x8_t __s0_35 = __p0_35; \ - __ret_35 = splat_lane_s8(__s0_35, __p1_35); \ - __ret_35; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} #else -#define vdup_lane_s8(__p0_36, __p1_36) __extension__ ({ \ - int8x8_t __ret_36; \ - int8x8_t __s0_36 = __p0_36; \ - int8x8_t __rev0_36; __rev0_36 = __builtin_shufflevector(__s0_36, __s0_36, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_36 = __noswap_splat_lane_s8(__rev0_36, __p1_36); \ - __ret_36 = __builtin_shufflevector(__ret_36, __ret_36, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_36; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vbic_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif +__ai __attribute__((target("neon"))) uint64x1_t vbic_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} #ifdef __LITTLE_ENDIAN__ -#define vdup_lane_f32(__p0_37, __p1_37) __extension__ ({ \ - float32x2_t __ret_37; \ - float32x2_t __s0_37 = __p0_37; \ - __ret_37 = splat_lane_f32(__s0_37, __p1_37); \ - __ret_37; \ -}) +__ai __attribute__((target("neon"))) uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} #else -#define vdup_lane_f32(__p0_38, __p1_38) __extension__ ({ \ - float32x2_t __ret_38; \ - float32x2_t __s0_38 = __p0_38; \ - float32x2_t __rev0_38; __rev0_38 = __builtin_shufflevector(__s0_38, __s0_38, 1, 0); \ - __ret_38 = __noswap_splat_lane_f32(__rev0_38, __p1_38); \ - __ret_38 = __builtin_shufflevector(__ret_38, __ret_38, 1, 0); \ - __ret_38; \ -}) +__ai __attribute__((target("neon"))) uint16x4_t vbic_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vdup_lane_f16(__p0_39, __p1_39) __extension__ ({ \ - float16x4_t __ret_39; \ - float16x4_t __s0_39 = __p0_39; \ - __ret_39 = splat_lane_f16(__s0_39, __p1_39); \ - __ret_39; \ -}) +__ai __attribute__((target("neon"))) int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} #else -#define vdup_lane_f16(__p0_40, __p1_40) __extension__ ({ \ - float16x4_t __ret_40; \ - float16x4_t __s0_40 = __p0_40; \ - float16x4_t __rev0_40; __rev0_40 = __builtin_shufflevector(__s0_40, __s0_40, 3, 2, 1, 0); \ - __ret_40 = __noswap_splat_lane_f16(__rev0_40, __p1_40); \ - __ret_40 = __builtin_shufflevector(__ret_40, __ret_40, 3, 2, 1, 0); \ - __ret_40; \ -}) +__ai __attribute__((target("neon"))) int8x8_t vbic_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vdup_lane_s32(__p0_41, __p1_41) __extension__ ({ \ - int32x2_t __ret_41; \ - int32x2_t __s0_41 = __p0_41; \ - __ret_41 = splat_lane_s32(__s0_41, __p1_41); \ - __ret_41; \ -}) +__ai __attribute__((target("neon"))) int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} #else -#define vdup_lane_s32(__p0_42, __p1_42) __extension__ ({ \ - int32x2_t __ret_42; \ - int32x2_t __s0_42 = __p0_42; \ - int32x2_t __rev0_42; __rev0_42 = __builtin_shufflevector(__s0_42, __s0_42, 1, 0); \ - __ret_42 = __noswap_splat_lane_s32(__rev0_42, __p1_42); \ - __ret_42 = __builtin_shufflevector(__ret_42, __ret_42, 1, 0); \ - __ret_42; \ -}) +__ai __attribute__((target("neon"))) int32x2_t vbic_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif -#define vdup_lane_s64(__p0_43, __p1_43) __extension__ ({ \ - int64x1_t __ret_43; \ - int64x1_t __s0_43 = __p0_43; \ - __ret_43 = splat_lane_s64(__s0_43, __p1_43); \ - __ret_43; \ -}) +__ai __attribute__((target("neon"))) int64x1_t vbic_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} #ifdef __LITTLE_ENDIAN__ -#define vdup_lane_s16(__p0_44, __p1_44) __extension__ ({ \ - int16x4_t __ret_44; \ - int16x4_t __s0_44 = __p0_44; \ - __ret_44 = splat_lane_s16(__s0_44, __p1_44); \ - __ret_44; \ -}) +__ai __attribute__((target("neon"))) int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __p0 & ~__p1; + return __ret; +} #else -#define vdup_lane_s16(__p0_45, __p1_45) __extension__ ({ \ - int16x4_t __ret_45; \ - int16x4_t __s0_45 = __p0_45; \ - int16x4_t __rev0_45; __rev0_45 = __builtin_shufflevector(__s0_45, __s0_45, 3, 2, 1, 0); \ - __ret_45 = __noswap_splat_lane_s16(__rev0_45, __p1_45); \ - __ret_45 = __builtin_shufflevector(__ret_45, __ret_45, 3, 2, 1, 0); \ - __ret_45; \ -}) +__ai __attribute__((target("neon"))) int16x4_t vbic_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 & ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vdup_n_p8(poly8_t __p0) { +__ai __attribute__((target("neon"))) poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) { poly8x8_t __ret; - __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = (poly8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 4); return __ret; } #else -__ai poly8x8_t vdup_n_p8(poly8_t __p0) { +__ai __attribute__((target("neon"))) poly8x8_t vbsl_p8(uint8x8_t __p0, poly8x8_t __p1, poly8x8_t __p2) { poly8x8_t __ret; - __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 4); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vdup_n_p16(poly16_t __p0) { +__ai __attribute__((target("neon"))) poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) { poly16x4_t __ret; - __ret = (poly16x4_t) {__p0, __p0, __p0, __p0}; + __ret = (poly16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 5); return __ret; } #else -__ai poly16x4_t vdup_n_p16(poly16_t __p0) { +__ai __attribute__((target("neon"))) poly16x4_t vbsl_p16(uint16x4_t __p0, poly16x4_t __p1, poly16x4_t __p2) { poly16x4_t __ret; - __ret = (poly16x4_t) {__p0, __p0, __p0, __p0}; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + poly16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (poly16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 5); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vdupq_n_p8(poly8_t __p0) { +__ai __attribute__((target("neon"))) poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) { poly8x16_t __ret; - __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = (poly8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 36); return __ret; } #else -__ai poly8x16_t vdupq_n_p8(poly8_t __p0) { +__ai __attribute__((target("neon"))) poly8x16_t vbslq_p8(uint8x16_t __p0, poly8x16_t __p1, poly8x16_t __p2) { poly8x16_t __ret; - __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 36); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vdupq_n_p16(poly16_t __p0) { +__ai __attribute__((target("neon"))) poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) { poly16x8_t __ret; - __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = (poly16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 37); return __ret; } #else -__ai poly16x8_t vdupq_n_p16(poly16_t __p0) { +__ai __attribute__((target("neon"))) poly16x8_t vbslq_p16(uint16x8_t __p0, poly16x8_t __p1, poly16x8_t __p2) { poly16x8_t __ret; - __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 37); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vdupq_n_u8(uint8_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint8x16_t __ret; - __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = (uint8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48); return __ret; } #else -__ai uint8x16_t vdupq_n_u8(uint8_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vbslq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint8x16_t __ret; - __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vdupq_n_u32(uint32_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; - __ret = (uint32x4_t) {__p0, __p0, __p0, __p0}; + __ret = (uint32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); return __ret; } #else -__ai uint32x4_t vdupq_n_u32(uint32_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vbslq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; - __ret = (uint32x4_t) {__p0, __p0, __p0, __p0}; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vdupq_n_u64(uint64_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint64x2_t __ret; - __ret = (uint64x2_t) {__p0, __p0}; + __ret = (uint64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); return __ret; } #else -__ai uint64x2_t vdupq_n_u64(uint64_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vbslq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint64x2_t __ret; - __ret = (uint64x2_t) {__p0, __p0}; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vdupq_n_u16(uint16_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint16x8_t __ret; - __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = (uint16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 49); return __ret; } #else -__ai uint16x8_t vdupq_n_u16(uint16_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vbslq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint16x8_t __ret; - __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vdupq_n_s8(int8_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { int8x16_t __ret; - __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = (int8x16_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32); return __ret; } #else -__ai int8x16_t vdupq_n_s8(int8_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vbslq_s8(uint8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { int8x16_t __ret; - __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vdupq_n_f32(float32_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { float32x4_t __ret; - __ret = (float32x4_t) {__p0, __p0, __p0, __p0}; + __ret = (float32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); return __ret; } #else -__ai float32x4_t vdupq_n_f32(float32_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vbslq_f32(uint32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { float32x4_t __ret; - __ret = (float32x4_t) {__p0, __p0, __p0, __p0}; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -#define vdupq_n_f16(__p0) __extension__ ({ \ - float16x8_t __ret; \ - float16_t __s0 = __p0; \ - __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \ - __ret; \ -}) -#else -#define vdupq_n_f16(__p0) __extension__ ({ \ - float16x8_t __ret; \ - float16_t __s0 = __p0; \ - __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vdupq_n_s32(int32_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; - __ret = (int32x4_t) {__p0, __p0, __p0, __p0}; + __ret = (int32x4_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); return __ret; } #else -__ai int32x4_t vdupq_n_s32(int32_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vbslq_s32(uint32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; - __ret = (int32x4_t) {__p0, __p0, __p0, __p0}; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vdupq_n_s64(int64_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { int64x2_t __ret; - __ret = (int64x2_t) {__p0, __p0}; + __ret = (int64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 35); return __ret; } #else -__ai int64x2_t vdupq_n_s64(int64_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vbslq_s64(uint64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { int64x2_t __ret; - __ret = (int64x2_t) {__p0, __p0}; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (int64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 35); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vdupq_n_s16(int16_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int16x8_t __ret; - __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = (int16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); return __ret; } #else -__ai int16x8_t vdupq_n_s16(int16_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vbslq_s16(uint16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int16x8_t __ret; - __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vdup_n_u8(uint8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint8x8_t __ret; - __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = (uint8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 16); return __ret; } #else -__ai uint8x8_t vdup_n_u8(uint8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vbsl_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint8x8_t __ret; - __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vdup_n_u32(uint32_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint32x2_t __ret; - __ret = (uint32x2_t) {__p0, __p0}; + __ret = (uint32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18); return __ret; } #else -__ai uint32x2_t vdup_n_u32(uint32_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vbsl_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint32x2_t __ret; - __ret = (uint32x2_t) {__p0, __p0}; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif -__ai uint64x1_t vdup_n_u64(uint64_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vbsl_u64(uint64x1_t __p0, uint64x1_t __p1, uint64x1_t __p2) { uint64x1_t __ret; - __ret = (uint64x1_t) {__p0}; + __ret = (uint64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vdup_n_u16(uint16_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint16x4_t __ret; - __ret = (uint16x4_t) {__p0, __p0, __p0, __p0}; + __ret = (uint16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 17); return __ret; } #else -__ai uint16x4_t vdup_n_u16(uint16_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vbsl_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint16x4_t __ret; - __ret = (uint16x4_t) {__p0, __p0, __p0, __p0}; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vdup_n_s8(int8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int8x8_t __ret; - __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = (int8x8_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 0); return __ret; } #else -__ai int8x8_t vdup_n_s8(int8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vbsl_s8(uint8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int8x8_t __ret; - __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vdup_n_f32(float32_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { float32x2_t __ret; - __ret = (float32x2_t) {__p0, __p0}; + __ret = (float32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); return __ret; } #else -__ai float32x2_t vdup_n_f32(float32_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vbsl_f32(uint32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { float32x2_t __ret; - __ret = (float32x2_t) {__p0, __p0}; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -#define vdup_n_f16(__p0) __extension__ ({ \ - float16x4_t __ret; \ - float16_t __s0 = __p0; \ - __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \ - __ret; \ -}) -#else -#define vdup_n_f16(__p0) __extension__ ({ \ - float16x4_t __ret; \ - float16_t __s0 = __p0; \ - __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vdup_n_s32(int32_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int32x2_t __ret; - __ret = (int32x2_t) {__p0, __p0}; + __ret = (int32x2_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); return __ret; } #else -__ai int32x2_t vdup_n_s32(int32_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vbsl_s32(uint32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int32x2_t __ret; - __ret = (int32x2_t) {__p0, __p0}; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (int32x2_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif -__ai int64x1_t vdup_n_s64(int64_t __p0) { +__ai __attribute__((target("neon"))) int64x1_t vbsl_s64(uint64x1_t __p0, int64x1_t __p1, int64x1_t __p2) { int64x1_t __ret; - __ret = (int64x1_t) {__p0}; + __ret = (int64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 3); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vdup_n_s16(int16_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int16x4_t __ret; - __ret = (int16x4_t) {__p0, __p0, __p0, __p0}; + __ret = (int16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1); return __ret; } #else -__ai int16x4_t vdup_n_s16(int16_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vbsl_s16(uint16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int16x4_t __ret; - __ret = (int16x4_t) {__p0, __p0, __p0, __p0}; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t veorq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = __p0 ^ __p1; +__ai __attribute__((target("neon"))) float16x8_t vbslq_f16(uint16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); return __ret; } #else -__ai uint8x16_t veorq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 ^ __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) float16x8_t vbslq_f16(uint16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t veorq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = __p0 ^ __p1; +__ai __attribute__((target("neon"))) float16x4_t vbsl_f16(uint16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); return __ret; } #else -__ai uint32x4_t veorq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 ^ __rev1; +__ai __attribute__((target("neon"))) float16x4_t vbsl_f16(uint16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vbsl_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t veorq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - __ret = __p0 ^ __p1; +__ai __attribute__((target("neon"))) uint32x4_t vcageq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else -__ai uint64x2_t veorq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __rev0 ^ __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) uint32x4_t vcageq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t veorq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = __p0 ^ __p1; +__ai __attribute__((target("neon"))) uint32x2_t vcage_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else -__ai uint16x8_t veorq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 ^ __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint32x2_t vcage_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vcage_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t veorq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = __p0 ^ __p1; +__ai __attribute__((target("neon"))) uint32x4_t vcagtq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else -__ai int8x16_t veorq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 ^ __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint32x4_t vcagtq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t veorq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = __p0 ^ __p1; +__ai __attribute__((target("neon"))) uint32x2_t vcagt_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else -__ai int32x4_t veorq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 ^ __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint32x2_t vcagt_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vcagt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t veorq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - __ret = __p0 ^ __p1; +__ai __attribute__((target("neon"))) uint32x4_t vcaleq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else -__ai int64x2_t veorq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __rev0 ^ __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) uint32x4_t vcaleq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t veorq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = __p0 ^ __p1; +__ai __attribute__((target("neon"))) uint32x2_t vcale_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else -__ai int16x8_t veorq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; +__ai __attribute__((target("neon"))) uint32x2_t vcale_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vcale_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vcaltq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vcaltq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vcalt_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vcalt_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vcalt_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vceq_p8(poly8x8_t __p0, poly8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 == __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vceq_p8(poly8x8_t __p0, poly8x8_t __p1) { + uint8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vceqq_p8(poly8x16_t __p0, poly8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 == __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vceqq_p8(poly8x16_t __p0, poly8x16_t __p1) { + uint8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vceqq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 == __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vceqq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vceqq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 == __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vceqq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vceqq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 == __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vceqq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vceqq_s8(int8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 == __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vceqq_s8(int8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vceqq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 == __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vceqq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vceqq_s32(int32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 == __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vceqq_s32(int32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vceqq_s16(int16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 == __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vceqq_s16(int16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 ^ __rev1; + __ret = (uint16x8_t)(__rev0 == __rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t veor_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vceq_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; - __ret = __p0 ^ __p1; + __ret = (uint8x8_t)(__p0 == __p1); return __ret; } #else -__ai uint8x8_t veor_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vceq_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 ^ __rev1; + __ret = (uint8x8_t)(__rev0 == __rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t veor_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vceq_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; - __ret = __p0 ^ __p1; + __ret = (uint32x2_t)(__p0 == __p1); return __ret; } #else -__ai uint32x2_t veor_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vceq_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __rev0 ^ __rev1; + __ret = (uint32x2_t)(__rev0 == __rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif -__ai uint64x1_t veor_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x1_t __ret; - __ret = __p0 ^ __p1; - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t veor_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vceq_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; - __ret = __p0 ^ __p1; + __ret = (uint16x4_t)(__p0 == __p1); return __ret; } #else -__ai uint16x4_t veor_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vceq_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 ^ __rev1; + __ret = (uint16x4_t)(__rev0 == __rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t veor_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = __p0 ^ __p1; +__ai __attribute__((target("neon"))) uint8x8_t vceq_s8(int8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 == __p1); return __ret; } #else -__ai int8x8_t veor_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; +__ai __attribute__((target("neon"))) uint8x8_t vceq_s8(int8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 ^ __rev1; + __ret = (uint8x8_t)(__rev0 == __rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t veor_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = __p0 ^ __p1; +__ai __attribute__((target("neon"))) uint32x2_t vceq_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 == __p1); return __ret; } #else -__ai int32x2_t veor_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __rev0 ^ __rev1; +__ai __attribute__((target("neon"))) uint32x2_t vceq_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t)(__rev0 == __rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif -__ai int64x1_t veor_s64(int64x1_t __p0, int64x1_t __p1) { - int64x1_t __ret; - __ret = __p0 ^ __p1; +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vceq_s32(int32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 == __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vceq_s32(int32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } +#endif + #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t veor_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = __p0 ^ __p1; +__ai __attribute__((target("neon"))) uint16x4_t vceq_s16(int16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 == __p1); return __ret; } #else -__ai int16x4_t veor_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; +__ai __attribute__((target("neon"))) uint16x4_t vceq_s16(int16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 ^ __rev1; + __ret = (uint16x4_t)(__rev0 == __rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -#define vext_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x8_t __ret; \ - poly8x8_t __s0 = __p0; \ - poly8x8_t __s1 = __p1; \ - __ret = (poly8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x16_t vcgeq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 >= __p1); + return __ret; +} #else -#define vext_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x8_t __ret; \ - poly8x8_t __s0 = __p0; \ - poly8x8_t __s1 = __p1; \ - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (poly8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x16_t vcgeq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vext_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x4_t __ret; \ - poly16x4_t __s0 = __p0; \ - poly16x4_t __s1 = __p1; \ - __ret = (poly16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vcgeq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 >= __p1); + return __ret; +} #else -#define vext_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x4_t __ret; \ - poly16x4_t __s0 = __p0; \ - poly16x4_t __s1 = __p1; \ - poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (poly16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vcgeq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vextq_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x16_t __ret; \ - poly8x16_t __s0 = __p0; \ - poly8x16_t __s1 = __p1; \ - __ret = (poly8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x8_t vcgeq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 >= __p1); + return __ret; +} #else -#define vextq_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x16_t __ret; \ - poly8x16_t __s0 = __p0; \ - poly8x16_t __s1 = __p1; \ - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (poly8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x8_t vcgeq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vextq_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x8_t __ret; \ - poly16x8_t __s0 = __p0; \ - poly16x8_t __s1 = __p1; \ - __ret = (poly16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x16_t vcgeq_s8(int8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 >= __p1); + return __ret; +} #else -#define vextq_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x8_t __ret; \ - poly16x8_t __s0 = __p0; \ - poly16x8_t __s1 = __p1; \ - poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (poly16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x16_t vcgeq_s8(int8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vextq_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16_t __ret; \ - uint8x16_t __s0 = __p0; \ - uint8x16_t __s1 = __p1; \ - __ret = (uint8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vcgeq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 >= __p1); + return __ret; +} #else -#define vextq_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16_t __ret; \ - uint8x16_t __s0 = __p0; \ - uint8x16_t __s1 = __p1; \ - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vcgeq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vextq_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4_t __ret; \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - __ret = (uint32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vcgeq_s32(int32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 >= __p1); + return __ret; +} #else -#define vextq_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4_t __ret; \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (uint32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vcgeq_s32(int32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vextq_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2_t __ret; \ - uint64x2_t __s0 = __p0; \ - uint64x2_t __s1 = __p1; \ - __ret = (uint64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x8_t vcgeq_s16(int16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 >= __p1); + return __ret; +} #else -#define vextq_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2_t __ret; \ - uint64x2_t __s0 = __p0; \ - uint64x2_t __s1 = __p1; \ - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __ret = (uint64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x8_t vcgeq_s16(int16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vextq_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8_t __ret; \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __s1 = __p1; \ - __ret = (uint16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vcge_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 >= __p1); + return __ret; +} #else -#define vextq_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8_t __ret; \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __s1 = __p1; \ - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vcge_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vextq_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16_t __ret; \ - int8x16_t __s0 = __p0; \ - int8x16_t __s1 = __p1; \ - __ret = (int8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vcge_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 >= __p1); + return __ret; +} #else -#define vextq_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16_t __ret; \ - int8x16_t __s0 = __p0; \ - int8x16_t __s1 = __p1; \ - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vcge_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vextq_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x4_t __ret; \ - float32x4_t __s0 = __p0; \ - float32x4_t __s1 = __p1; \ - __ret = (float32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 41); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x4_t vcge_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 >= __p1); + return __ret; +} #else -#define vextq_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x4_t __ret; \ - float32x4_t __s0 = __p0; \ - float32x4_t __s1 = __p1; \ - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (float32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 41); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x4_t vcge_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vextq_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __ret; \ - int32x4_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - __ret = (int32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vcge_s8(int8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 >= __p1); + return __ret; +} #else -#define vextq_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __ret; \ - int32x4_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (int32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vcge_s8(int8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vextq_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2_t __ret; \ - int64x2_t __s0 = __p0; \ - int64x2_t __s1 = __p1; \ - __ret = (int64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vcge_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 >= __p1); + return __ret; +} #else -#define vextq_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2_t __ret; \ - int64x2_t __s0 = __p0; \ - int64x2_t __s1 = __p1; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __ret = (int64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vcge_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vextq_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __ret; \ - int16x8_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - __ret = (int16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vcge_s32(int32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 >= __p1); + return __ret; +} #else -#define vextq_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __ret; \ - int16x8_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vcge_s32(int32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vext_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x8_t __ret; \ - uint8x8_t __s0 = __p0; \ - uint8x8_t __s1 = __p1; \ - __ret = (uint8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x4_t vcge_s16(int16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 >= __p1); + return __ret; +} #else -#define vext_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x8_t __ret; \ - uint8x8_t __s0 = __p0; \ - uint8x8_t __s1 = __p1; \ - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x4_t vcge_s16(int16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vext_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2_t __ret; \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __s1 = __p1; \ - __ret = (uint32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x16_t vcgtq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 > __p1); + return __ret; +} #else -#define vext_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2_t __ret; \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __s1 = __p1; \ - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __ret = (uint32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x16_t vcgtq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif -#define vext_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x1_t __ret; \ - uint64x1_t __s0 = __p0; \ - uint64x1_t __s1 = __p1; \ - __ret = (uint64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ - __ret; \ -}) #ifdef __LITTLE_ENDIAN__ -#define vext_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4_t __ret; \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __s1 = __p1; \ - __ret = (uint16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vcgtq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 > __p1); + return __ret; +} #else -#define vext_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4_t __ret; \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __s1 = __p1; \ - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (uint16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vcgtq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vext_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x8_t __ret; \ - int8x8_t __s0 = __p0; \ - int8x8_t __s1 = __p1; \ - __ret = (int8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x8_t vcgtq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 > __p1); + return __ret; +} #else -#define vext_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x8_t __ret; \ - int8x8_t __s0 = __p0; \ - int8x8_t __s1 = __p1; \ - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x8_t vcgtq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vext_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x2_t __ret; \ - float32x2_t __s0 = __p0; \ - float32x2_t __s1 = __p1; \ - __ret = (float32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 9); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x16_t vcgtq_s8(int8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 > __p1); + return __ret; +} #else -#define vext_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x2_t __ret; \ - float32x2_t __s0 = __p0; \ - float32x2_t __s1 = __p1; \ - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __ret = (float32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 9); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x16_t vcgtq_s8(int8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vext_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __ret; \ - int32x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - __ret = (int32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vcgtq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 > __p1); + return __ret; +} #else -#define vext_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __ret; \ - int32x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __ret = (int32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vcgtq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif -#define vext_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x1_t __ret; \ - int64x1_t __s0 = __p0; \ - int64x1_t __s1 = __p1; \ - __ret = (int64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ - __ret; \ -}) #ifdef __LITTLE_ENDIAN__ -#define vext_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __ret; \ - int16x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - __ret = (int16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vcgtq_s32(int32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 > __p1); + return __ret; +} #else -#define vext_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __ret; \ - int16x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (int16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vcgtq_s32(int32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vget_high_p8(poly8x16_t __p0) { - poly8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); +__ai __attribute__((target("neon"))) uint16x8_t vcgtq_s16(int16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 > __p1); return __ret; } #else -__ai poly8x8_t vget_high_p8(poly8x16_t __p0) { - poly8x8_t __ret; - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15); +__ai __attribute__((target("neon"))) uint16x8_t vcgtq_s16(int16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 > __rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly8x8_t __noswap_vget_high_p8(poly8x16_t __p0) { - poly8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vcgt_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 > __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vcgt_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vget_high_p16(poly16x8_t __p0) { - poly16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); +__ai __attribute__((target("neon"))) uint32x2_t vcgt_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 > __p1); return __ret; } #else -__ai poly16x4_t vget_high_p16(poly16x8_t __p0) { - poly16x4_t __ret; - poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint32x2_t vcgt_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vget_high_u8(uint8x16_t __p0) { - uint8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); +__ai __attribute__((target("neon"))) uint16x4_t vcgt_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 > __p1); return __ret; } #else -__ai uint8x8_t vget_high_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vcgt_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vcgt_s8(int8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t)(__p0 > __p1); return __ret; } -__ai uint8x8_t __noswap_vget_high_u8(uint8x16_t __p0) { +#else +__ai __attribute__((target("neon"))) uint8x8_t vcgt_s8(int8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vget_high_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vcgt_f32(float32x2_t __p0, float32x2_t __p1) { uint32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 2, 3); + __ret = (uint32x2_t)(__p0 > __p1); return __ret; } #else -__ai uint32x2_t vget_high_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vcgt_f32(float32x2_t __p0, float32x2_t __p1) { uint32x2_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3); + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t)(__rev0 > __rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai uint32x2_t __noswap_vget_high_u32(uint32x4_t __p0) { - uint32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 2, 3); - return __ret; -} #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vget_high_u64(uint64x2_t __p0) { - uint64x1_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 1); +__ai __attribute__((target("neon"))) uint32x2_t vcgt_s32(int32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 > __p1); return __ret; } #else -__ai uint64x1_t vget_high_u64(uint64x2_t __p0) { - uint64x1_t __ret; - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 1); +__ai __attribute__((target("neon"))) uint32x2_t vcgt_s32(int32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t)(__rev0 > __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vget_high_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vcgt_s16(int16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); + __ret = (uint16x4_t)(__p0 > __p1); return __ret; } #else -__ai uint16x4_t vget_high_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vcgt_s16(int16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7); + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 > __rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai uint16x4_t __noswap_vget_high_u16(uint16x8_t __p0) { - uint16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); - return __ret; -} #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vget_high_s8(int8x16_t __p0) { - int8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); +__ai __attribute__((target("neon"))) uint8x16_t vcleq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 <= __p1); return __ret; } #else -__ai int8x8_t vget_high_s8(int8x16_t __p0) { - int8x8_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint8x16_t vcleq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai int8x8_t __noswap_vget_high_s8(int8x16_t __p0) { - int8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vcleq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 <= __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vcleq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vget_high_f32(float32x4_t __p0) { - float32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 2, 3); +__ai __attribute__((target("neon"))) uint16x8_t vcleq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 <= __p1); return __ret; } #else -__ai float32x2_t vget_high_f32(float32x4_t __p0) { - float32x2_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) uint16x8_t vcleq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai float32x2_t __noswap_vget_high_f32(float32x4_t __p0) { - float32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 2, 3); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vcleq_s8(int8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 <= __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vcleq_s8(int8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vget_high_f16(float16x8_t __p0) { - float16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); +__ai __attribute__((target("neon"))) uint32x4_t vcleq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 <= __p1); return __ret; } #else -__ai float16x4_t vget_high_f16(float16x8_t __p0) { - float16x4_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7); +__ai __attribute__((target("neon"))) uint32x4_t vcleq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t)(__rev0 <= __rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai float16x4_t __noswap_vget_high_f16(float16x8_t __p0) { - float16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); - return __ret; -} #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vget_high_s32(int32x4_t __p0) { - int32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 2, 3); +__ai __attribute__((target("neon"))) uint32x4_t vcleq_s32(int32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 <= __p1); return __ret; } #else -__ai int32x2_t vget_high_s32(int32x4_t __p0) { - int32x2_t __ret; +__ai __attribute__((target("neon"))) uint32x4_t vcleq_s32(int32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai int32x2_t __noswap_vget_high_s32(int32x4_t __p0) { - int32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 2, 3); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vget_high_s64(int64x2_t __p0) { - int64x1_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 1); +__ai __attribute__((target("neon"))) uint16x8_t vcleq_s16(int16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 <= __p1); return __ret; } #else -__ai int64x1_t vget_high_s64(int64x2_t __p0) { - int64x1_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 1); +__ai __attribute__((target("neon"))) uint16x8_t vcleq_s16(int16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vget_high_s16(int16x8_t __p0) { - int16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); +__ai __attribute__((target("neon"))) uint8x8_t vcle_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 <= __p1); return __ret; } #else -__ai int16x4_t vget_high_s16(int16x8_t __p0) { - int16x4_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int16x4_t __noswap_vget_high_s16(int16x8_t __p0) { - int16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); +__ai __attribute__((target("neon"))) uint8x8_t vcle_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -#define vget_lane_p8(__p0, __p1) __extension__ ({ \ - poly8_t __ret; \ - poly8x8_t __s0 = __p0; \ - __ret = (poly8_t) __builtin_neon_vget_lane_i8((poly8x8_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vget_lane_p8(__p0, __p1) __extension__ ({ \ - poly8_t __ret; \ - poly8x8_t __s0 = __p0; \ - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (poly8_t) __builtin_neon_vget_lane_i8((poly8x8_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vget_lane_p8(__p0, __p1) __extension__ ({ \ - poly8_t __ret; \ - poly8x8_t __s0 = __p0; \ - __ret = (poly8_t) __builtin_neon_vget_lane_i8((poly8x8_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vcle_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 <= __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vcle_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vget_lane_p16(__p0, __p1) __extension__ ({ \ - poly16_t __ret; \ - poly16x4_t __s0 = __p0; \ - __ret = (poly16_t) __builtin_neon_vget_lane_i16((poly16x4_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x4_t vcle_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 <= __p1); + return __ret; +} #else -#define vget_lane_p16(__p0, __p1) __extension__ ({ \ - poly16_t __ret; \ - poly16x4_t __s0 = __p0; \ - poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (poly16_t) __builtin_neon_vget_lane_i16((poly16x4_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vget_lane_p16(__p0, __p1) __extension__ ({ \ - poly16_t __ret; \ - poly16x4_t __s0 = __p0; \ - __ret = (poly16_t) __builtin_neon_vget_lane_i16((poly16x4_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x4_t vcle_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vgetq_lane_p8(__p0, __p1) __extension__ ({ \ - poly8_t __ret; \ - poly8x16_t __s0 = __p0; \ - __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((poly8x16_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vcle_s8(int8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 <= __p1); + return __ret; +} #else -#define vgetq_lane_p8(__p0, __p1) __extension__ ({ \ - poly8_t __ret; \ - poly8x16_t __s0 = __p0; \ - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((poly8x16_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vgetq_lane_p8(__p0, __p1) __extension__ ({ \ - poly8_t __ret; \ - poly8x16_t __s0 = __p0; \ - __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((poly8x16_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vcle_s8(int8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vgetq_lane_p16(__p0, __p1) __extension__ ({ \ - poly16_t __ret; \ - poly16x8_t __s0 = __p0; \ - __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((poly16x8_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vcle_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 <= __p1); + return __ret; +} #else -#define vgetq_lane_p16(__p0, __p1) __extension__ ({ \ - poly16_t __ret; \ - poly16x8_t __s0 = __p0; \ - poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((poly16x8_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vgetq_lane_p16(__p0, __p1) __extension__ ({ \ - poly16_t __ret; \ - poly16x8_t __s0 = __p0; \ - __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((poly16x8_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vcle_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vgetq_lane_u8(__p0, __p1) __extension__ ({ \ - uint8_t __ret; \ - uint8x16_t __s0 = __p0; \ - __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vcle_s32(int32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 <= __p1); + return __ret; +} #else -#define vgetq_lane_u8(__p0, __p1) __extension__ ({ \ - uint8_t __ret; \ - uint8x16_t __s0 = __p0; \ - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vgetq_lane_u8(__p0, __p1) __extension__ ({ \ - uint8_t __ret; \ - uint8x16_t __s0 = __p0; \ - __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vcle_s32(int32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vgetq_lane_u32(__p0, __p1) __extension__ ({ \ - uint32_t __ret; \ - uint32x4_t __s0 = __p0; \ - __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x4_t vcle_s16(int16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 <= __p1); + return __ret; +} #else -#define vgetq_lane_u32(__p0, __p1) __extension__ ({ \ - uint32_t __ret; \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vgetq_lane_u32(__p0, __p1) __extension__ ({ \ - uint32_t __ret; \ - uint32x4_t __s0 = __p0; \ - __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x4_t vcle_s16(int16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 <= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vgetq_lane_u64(__p0, __p1) __extension__ ({ \ - uint64_t __ret; \ - uint64x2_t __s0 = __p0; \ - __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x16_t vclsq_u8(uint8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 32); + return __ret; +} #else -#define vgetq_lane_u64(__p0, __p1) __extension__ ({ \ - uint64_t __ret; \ - uint64x2_t __s0 = __p0; \ - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vgetq_lane_u64(__p0, __p1) __extension__ ({ \ - uint64_t __ret; \ - uint64x2_t __s0 = __p0; \ - __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x16_t vclsq_u8(uint8x16_t __p0) { + int8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vgetq_lane_u16(__p0, __p1) __extension__ ({ \ - uint16_t __ret; \ - uint16x8_t __s0 = __p0; \ - __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x4_t vclsq_u32(uint32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 34); + return __ret; +} #else -#define vgetq_lane_u16(__p0, __p1) __extension__ ({ \ - uint16_t __ret; \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vgetq_lane_u16(__p0, __p1) __extension__ ({ \ - uint16_t __ret; \ - uint16x8_t __s0 = __p0; \ - __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x4_t vclsq_u32(uint32x4_t __p0) { + int32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vgetq_lane_s8(__p0, __p1) __extension__ ({ \ - int8_t __ret; \ - int8x16_t __s0 = __p0; \ - __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x8_t vclsq_u16(uint16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 33); + return __ret; +} #else -#define vgetq_lane_s8(__p0, __p1) __extension__ ({ \ - int8_t __ret; \ - int8x16_t __s0 = __p0; \ - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vgetq_lane_s8(__p0, __p1) __extension__ ({ \ - int8_t __ret; \ - int8x16_t __s0 = __p0; \ - __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x8_t vclsq_u16(uint16x8_t __p0) { + int16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vgetq_lane_f32(__p0, __p1) __extension__ ({ \ - float32_t __ret; \ - float32x4_t __s0 = __p0; \ - __ret = (float32_t) __builtin_neon_vgetq_lane_f32((float32x4_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x16_t vclsq_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 32); + return __ret; +} #else -#define vgetq_lane_f32(__p0, __p1) __extension__ ({ \ - float32_t __ret; \ - float32x4_t __s0 = __p0; \ - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (float32_t) __builtin_neon_vgetq_lane_f32((float32x4_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vgetq_lane_f32(__p0, __p1) __extension__ ({ \ - float32_t __ret; \ - float32x4_t __s0 = __p0; \ - __ret = (float32_t) __builtin_neon_vgetq_lane_f32((float32x4_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x16_t vclsq_s8(int8x16_t __p0) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vgetq_lane_s32(__p0, __p1) __extension__ ({ \ - int32_t __ret; \ - int32x4_t __s0 = __p0; \ - __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x4_t vclsq_s32(int32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 34); + return __ret; +} #else -#define vgetq_lane_s32(__p0, __p1) __extension__ ({ \ - int32_t __ret; \ - int32x4_t __s0 = __p0; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vgetq_lane_s32(__p0, __p1) __extension__ ({ \ - int32_t __ret; \ - int32x4_t __s0 = __p0; \ - __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x4_t vclsq_s32(int32x4_t __p0) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vgetq_lane_s64(__p0, __p1) __extension__ ({ \ - int64_t __ret; \ - int64x2_t __s0 = __p0; \ - __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x8_t vclsq_s16(int16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__p0, 33); + return __ret; +} #else -#define vgetq_lane_s64(__p0, __p1) __extension__ ({ \ - int64_t __ret; \ - int64x2_t __s0 = __p0; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vgetq_lane_s64(__p0, __p1) __extension__ ({ \ - int64_t __ret; \ - int64x2_t __s0 = __p0; \ - __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x8_t vclsq_s16(int16x8_t __p0) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vclsq_v((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vgetq_lane_s16(__p0, __p1) __extension__ ({ \ - int16_t __ret; \ - int16x8_t __s0 = __p0; \ - __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x8_t vcls_u8(uint8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__p0, 0); + return __ret; +} #else -#define vgetq_lane_s16(__p0, __p1) __extension__ ({ \ - int16_t __ret; \ - int16x8_t __s0 = __p0; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vgetq_lane_s16(__p0, __p1) __extension__ ({ \ - int16_t __ret; \ - int16x8_t __s0 = __p0; \ - __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x8_t vcls_u8(uint8x8_t __p0) { + int8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vget_lane_u8(__p0, __p1) __extension__ ({ \ - uint8_t __ret; \ - uint8x8_t __s0 = __p0; \ - __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x2_t vcls_u32(uint32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__p0, 2); + return __ret; +} #else -#define vget_lane_u8(__p0, __p1) __extension__ ({ \ - uint8_t __ret; \ - uint8x8_t __s0 = __p0; \ - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vget_lane_u8(__p0, __p1) __extension__ ({ \ - uint8_t __ret; \ - uint8x8_t __s0 = __p0; \ - __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x2_t vcls_u32(uint32x2_t __p0) { + int32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vget_lane_u32(__p0, __p1) __extension__ ({ \ - uint32_t __ret; \ - uint32x2_t __s0 = __p0; \ - __ret = (uint32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x4_t vcls_u16(uint16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__p0, 1); + return __ret; +} #else -#define vget_lane_u32(__p0, __p1) __extension__ ({ \ - uint32_t __ret; \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (uint32_t) __builtin_neon_vget_lane_i32((int32x2_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vget_lane_u32(__p0, __p1) __extension__ ({ \ - uint32_t __ret; \ - uint32x2_t __s0 = __p0; \ - __ret = (uint32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x4_t vcls_u16(uint16x4_t __p0) { + int16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif -#define vget_lane_u64(__p0, __p1) __extension__ ({ \ - uint64_t __ret; \ - uint64x1_t __s0 = __p0; \ - __ret = (uint64_t) __builtin_neon_vget_lane_i64((int64x1_t)__s0, __p1); \ - __ret; \ -}) #ifdef __LITTLE_ENDIAN__ -#define vget_lane_u16(__p0, __p1) __extension__ ({ \ - uint16_t __ret; \ - uint16x4_t __s0 = __p0; \ - __ret = (uint16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x8_t vcls_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__p0, 0); + return __ret; +} #else -#define vget_lane_u16(__p0, __p1) __extension__ ({ \ - uint16_t __ret; \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (uint16_t) __builtin_neon_vget_lane_i16((int16x4_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vget_lane_u16(__p0, __p1) __extension__ ({ \ - uint16_t __ret; \ - uint16x4_t __s0 = __p0; \ - __ret = (uint16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x8_t vcls_s8(int8x8_t __p0) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vget_lane_s8(__p0, __p1) __extension__ ({ \ - int8_t __ret; \ - int8x8_t __s0 = __p0; \ - __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x2_t vcls_s32(int32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__p0, 2); + return __ret; +} #else -#define vget_lane_s8(__p0, __p1) __extension__ ({ \ - int8_t __ret; \ - int8x8_t __s0 = __p0; \ - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vget_lane_s8(__p0, __p1) __extension__ ({ \ - int8_t __ret; \ - int8x8_t __s0 = __p0; \ - __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x2_t vcls_s32(int32x2_t __p0) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int32x2_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vget_lane_f32(__p0, __p1) __extension__ ({ \ - float32_t __ret; \ - float32x2_t __s0 = __p0; \ - __ret = (float32_t) __builtin_neon_vget_lane_f32((float32x2_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x4_t vcls_s16(int16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__p0, 1); + return __ret; +} #else -#define vget_lane_f32(__p0, __p1) __extension__ ({ \ - float32_t __ret; \ - float32x2_t __s0 = __p0; \ - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (float32_t) __builtin_neon_vget_lane_f32((float32x2_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vget_lane_f32(__p0, __p1) __extension__ ({ \ - float32_t __ret; \ - float32x2_t __s0 = __p0; \ - __ret = (float32_t) __builtin_neon_vget_lane_f32((float32x2_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x4_t vcls_s16(int16x4_t __p0) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vcls_v((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vget_lane_s32(__p0, __p1) __extension__ ({ \ - int32_t __ret; \ - int32x2_t __s0 = __p0; \ - __ret = (int32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x16_t vcltq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 < __p1); + return __ret; +} #else -#define vget_lane_s32(__p0, __p1) __extension__ ({ \ - int32_t __ret; \ - int32x2_t __s0 = __p0; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (int32_t) __builtin_neon_vget_lane_i32((int32x2_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vget_lane_s32(__p0, __p1) __extension__ ({ \ - int32_t __ret; \ - int32x2_t __s0 = __p0; \ - __ret = (int32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x16_t vcltq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif -#define vget_lane_s64(__p0, __p1) __extension__ ({ \ - int64_t __ret; \ - int64x1_t __s0 = __p0; \ - __ret = (int64_t) __builtin_neon_vget_lane_i64((int64x1_t)__s0, __p1); \ - __ret; \ -}) #ifdef __LITTLE_ENDIAN__ -#define vget_lane_s16(__p0, __p1) __extension__ ({ \ - int16_t __ret; \ - int16x4_t __s0 = __p0; \ - __ret = (int16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vcltq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 < __p1); + return __ret; +} #else -#define vget_lane_s16(__p0, __p1) __extension__ ({ \ - int16_t __ret; \ - int16x4_t __s0 = __p0; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (int16_t) __builtin_neon_vget_lane_i16((int16x4_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vget_lane_s16(__p0, __p1) __extension__ ({ \ - int16_t __ret; \ - int16x4_t __s0 = __p0; \ - __ret = (int16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vcltq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vget_low_p8(poly8x16_t __p0) { - poly8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7); +__ai __attribute__((target("neon"))) uint16x8_t vcltq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 < __p1); return __ret; } #else -__ai poly8x8_t vget_low_p8(poly8x16_t __p0) { - poly8x8_t __ret; - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7); +__ai __attribute__((target("neon"))) uint16x8_t vcltq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 < __rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vget_low_p16(poly16x8_t __p0) { - poly16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); +__ai __attribute__((target("neon"))) uint8x16_t vcltq_s8(int8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0 < __p1); return __ret; } #else -__ai poly16x4_t vget_low_p16(poly16x8_t __p0) { - poly16x4_t __ret; - poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint8x16_t vcltq_s8(int8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vget_low_u8(uint8x16_t __p0) { - uint8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7); +__ai __attribute__((target("neon"))) uint32x4_t vcltq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 < __p1); return __ret; } #else -__ai uint8x8_t vget_low_u8(uint8x16_t __p0) { - uint8x8_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint32x4_t vcltq_f32(float32x4_t __p0, float32x4_t __p1) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vget_low_u32(uint32x4_t __p0) { - uint32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 0, 1); +__ai __attribute__((target("neon"))) uint32x4_t vcltq_s32(int32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0 < __p1); return __ret; } #else -__ai uint32x2_t vget_low_u32(uint32x4_t __p0) { - uint32x2_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) uint32x4_t vcltq_s32(int32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vget_low_u64(uint64x2_t __p0) { - uint64x1_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 0); +__ai __attribute__((target("neon"))) uint16x8_t vcltq_s16(int16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0 < __p1); return __ret; } #else -__ai uint64x1_t vget_low_u64(uint64x2_t __p0) { - uint64x1_t __ret; - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 0); +__ai __attribute__((target("neon"))) uint16x8_t vcltq_s16(int16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vget_low_u16(uint16x8_t __p0) { - uint16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); +__ai __attribute__((target("neon"))) uint8x8_t vclt_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 < __p1); return __ret; } #else -__ai uint16x4_t vget_low_u16(uint16x8_t __p0) { - uint16x4_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint8x8_t vclt_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vget_low_s8(int8x16_t __p0) { - int8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7); +__ai __attribute__((target("neon"))) uint32x2_t vclt_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 < __p1); return __ret; } #else -__ai int8x8_t vget_low_s8(int8x16_t __p0) { - int8x8_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint32x2_t vclt_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vget_low_f32(float32x4_t __p0) { - float32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 0, 1); +__ai __attribute__((target("neon"))) uint16x4_t vclt_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 < __p1); return __ret; } #else -__ai float32x2_t vget_low_f32(float32x4_t __p0) { - float32x2_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) uint16x4_t vclt_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vget_low_f16(float16x8_t __p0) { - float16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); +__ai __attribute__((target("neon"))) uint8x8_t vclt_s8(int8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0 < __p1); return __ret; } #else -__ai float16x4_t vget_low_f16(float16x8_t __p0) { - float16x4_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint8x8_t vclt_s8(int8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vget_low_s32(int32x4_t __p0) { - int32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 0, 1); +__ai __attribute__((target("neon"))) uint32x2_t vclt_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 < __p1); return __ret; } #else -__ai int32x2_t vget_low_s32(int32x4_t __p0) { - int32x2_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1); +__ai __attribute__((target("neon"))) uint32x2_t vclt_f32(float32x2_t __p0, float32x2_t __p1) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t)(__rev0 < __rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vget_low_s64(int64x2_t __p0) { - int64x1_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 0); +__ai __attribute__((target("neon"))) uint32x2_t vclt_s32(int32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0 < __p1); return __ret; } #else -__ai int64x1_t vget_low_s64(int64x2_t __p0) { - int64x1_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 0); +__ai __attribute__((target("neon"))) uint32x2_t vclt_s32(int32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vget_low_s16(int16x8_t __p0) { - int16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); +__ai __attribute__((target("neon"))) uint16x4_t vclt_s16(int16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0 < __p1); return __ret; } #else -__ai int16x4_t vget_low_s16(int16x8_t __p0) { - int16x4_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3); +__ai __attribute__((target("neon"))) uint16x4_t vclt_s16(int16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t)(__rev0 < __rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vclzq_u8(uint8x16_t __p0) { uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + __ret = (uint8x16_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 48); return __ret; } #else -__ai uint8x16_t vhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vclzq_u8(uint8x16_t __p0) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = (uint8x16_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vclzq_u32(uint32x4_t __p0) { uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + __ret = (uint32x4_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 50); return __ret; } #else -__ai uint32x4_t vhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vclzq_u32(uint32x4_t __p0) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = (uint32x4_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vclzq_u16(uint16x8_t __p0) { uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + __ret = (uint16x8_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 49); return __ret; } #else -__ai uint16x8_t vhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vclzq_u16(uint16x8_t __p0) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = (uint16x8_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vhaddq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vclzq_s8(int8x16_t __p0) { int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + __ret = (int8x16_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 32); return __ret; } #else -__ai int8x16_t vhaddq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vclzq_s8(int8x16_t __p0) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = (int8x16_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 32); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vhaddq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vclzq_s32(int32x4_t __p0) { int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + __ret = (int32x4_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 34); return __ret; } #else -__ai int32x4_t vhaddq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vclzq_s32(int32x4_t __p0) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = (int32x4_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vhaddq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vclzq_s16(int16x8_t __p0) { int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + __ret = (int16x8_t) __builtin_neon_vclzq_v((int8x16_t)__p0, 33); return __ret; } #else -__ai int16x8_t vhaddq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vclzq_s16(int16x8_t __p0) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = (int16x8_t) __builtin_neon_vclzq_v((int8x16_t)__rev0, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vhadd_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vclz_u8(uint8x8_t __p0) { uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + __ret = (uint8x8_t) __builtin_neon_vclz_v((int8x8_t)__p0, 16); return __ret; } #else -__ai uint8x8_t vhadd_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vclz_u8(uint8x8_t __p0) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = (uint8x8_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vhadd_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vclz_u32(uint32x2_t __p0) { uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + __ret = (uint32x2_t) __builtin_neon_vclz_v((int8x8_t)__p0, 18); return __ret; } #else -__ai uint32x2_t vhadd_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vclz_u32(uint32x2_t __p0) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = (uint32x2_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vhadd_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vclz_u16(uint16x4_t __p0) { uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + __ret = (uint16x4_t) __builtin_neon_vclz_v((int8x8_t)__p0, 17); return __ret; } #else -__ai uint16x4_t vhadd_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vclz_u16(uint16x4_t __p0) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = (uint16x4_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vhadd_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vclz_s8(int8x8_t __p0) { int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + __ret = (int8x8_t) __builtin_neon_vclz_v((int8x8_t)__p0, 0); return __ret; } #else -__ai int8x8_t vhadd_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vclz_s8(int8x8_t __p0) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x8_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = (int8x8_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vhadd_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vclz_s32(int32x2_t __p0) { int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + __ret = (int32x2_t) __builtin_neon_vclz_v((int8x8_t)__p0, 2); return __ret; } #else -__ai int32x2_t vhadd_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vclz_s32(int32x2_t __p0) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (int32x2_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = (int32x2_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vhadd_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vclz_s16(int16x4_t __p0) { int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + __ret = (int16x4_t) __builtin_neon_vclz_v((int8x8_t)__p0, 1); return __ret; } #else -__ai int16x4_t vhadd_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vclz_s16(int16x4_t __p0) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = (int16x4_t) __builtin_neon_vclz_v((int8x8_t)__rev0, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vhsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) poly8x8_t vcnt_p8(poly8x8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 4); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x8_t vcnt_p8(poly8x8_t __p0) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x16_t vcntq_p8(poly8x16_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 36); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x16_t vcntq_p8(poly8x16_t __p0) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vcntq_u8(uint8x16_t __p0) { uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + __ret = (uint8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 48); return __ret; } #else -__ai uint8x16_t vhsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vcntq_u8(uint8x16_t __p0) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = (uint8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vhsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); +__ai __attribute__((target("neon"))) int8x16_t vcntq_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vcntq_v((int8x16_t)__p0, 32); return __ret; } #else -__ai uint32x4_t vhsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int8x16_t vcntq_s8(int8x16_t __p0) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vcntq_v((int8x16_t)__rev0, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vhsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); +__ai __attribute__((target("neon"))) uint8x8_t vcnt_u8(uint8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 16); return __ret; } #else -__ai uint16x8_t vhsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); +__ai __attribute__((target("neon"))) uint8x8_t vcnt_u8(uint8x8_t __p0) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vhsubq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); +__ai __attribute__((target("neon"))) int8x8_t vcnt_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vcnt_v((int8x8_t)__p0, 0); return __ret; } #else -__ai int8x16_t vhsubq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int8x8_t vcnt_s8(int8x8_t __p0) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vcnt_v((int8x8_t)__rev0, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vhsubq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); +__ai __attribute__((target("neon"))) poly8x16_t vcombine_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); return __ret; } #else -__ai int32x4_t vhsubq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) poly8x16_t vcombine_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x16_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vhsubq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); +__ai __attribute__((target("neon"))) poly16x8_t vcombine_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); return __ret; } #else -__ai int16x8_t vhsubq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); +__ai __attribute__((target("neon"))) poly16x8_t vcombine_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x8_t __ret; + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vhsub_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 16); +__ai __attribute__((target("neon"))) uint8x16_t vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); return __ret; } #else -__ai uint8x8_t vhsub_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; +__ai __attribute__((target("neon"))) uint8x16_t vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x16_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t __noswap_vcombine_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vhsub_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 18); +__ai __attribute__((target("neon"))) uint32x4_t vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); return __ret; } #else -__ai uint32x2_t vhsub_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; +__ai __attribute__((target("neon"))) uint32x4_t vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x4_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t __noswap_vcombine_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vhsub_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 17); +__ai __attribute__((target("neon"))) uint64x2_t vcombine_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1); return __ret; } #else -__ai uint16x4_t vhsub_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint64x2_t vcombine_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vhsub_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 0); +__ai __attribute__((target("neon"))) uint16x8_t vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); return __ret; } #else -__ai int8x8_t vhsub_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x8_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); +__ai __attribute__((target("neon"))) uint16x8_t vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x8_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } +__ai __attribute__((target("neon"))) uint16x8_t __noswap_vcombine_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vhsub_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2); +__ai __attribute__((target("neon"))) int8x16_t vcombine_s8(int8x8_t __p0, int8x8_t __p1) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); return __ret; } #else -__ai int32x2_t vhsub_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (int32x2_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) int8x16_t vcombine_s8(int8x8_t __p0, int8x8_t __p1) { + int8x16_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t __noswap_vcombine_s8(int8x8_t __p0, int8x8_t __p1) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1); +__ai __attribute__((target("neon"))) float32x4_t vcombine_f32(float32x2_t __p0, float32x2_t __p1) { + float32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); return __ret; } #else -__ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); +__ai __attribute__((target("neon"))) float32x4_t vcombine_f32(float32x2_t __p0, float32x2_t __p1) { + float32x4_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } +__ai __attribute__((target("neon"))) float32x4_t __noswap_vcombine_f32(float32x2_t __p0, float32x2_t __p1) { + float32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1_p8(__p0) __extension__ ({ \ - poly8x8_t __ret; \ - __ret = (poly8x8_t) __builtin_neon_vld1_v(__p0, 4); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float16x8_t vcombine_f16(float16x4_t __p0, float16x4_t __p1) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} #else -#define vld1_p8(__p0) __extension__ ({ \ - poly8x8_t __ret; \ - __ret = (poly8x8_t) __builtin_neon_vld1_v(__p0, 4); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float16x8_t vcombine_f16(float16x4_t __p0, float16x4_t __p1) { + float16x8_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x8_t __noswap_vcombine_f16(float16x4_t __p0, float16x4_t __p1) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1_p16(__p0) __extension__ ({ \ - poly16x4_t __ret; \ - __ret = (poly16x4_t) __builtin_neon_vld1_v(__p0, 5); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x4_t vcombine_s32(int32x2_t __p0, int32x2_t __p1) { + int32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); + return __ret; +} #else -#define vld1_p16(__p0) __extension__ ({ \ - poly16x4_t __ret; \ - __ret = (poly16x4_t) __builtin_neon_vld1_v(__p0, 5); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x4_t vcombine_s32(int32x2_t __p0, int32x2_t __p1) { + int32x4_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t __noswap_vcombine_s32(int32x2_t __p0, int32x2_t __p1) { + int32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_p8(__p0) __extension__ ({ \ - poly8x16_t __ret; \ - __ret = (poly8x16_t) __builtin_neon_vld1q_v(__p0, 36); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int64x2_t vcombine_s64(int64x1_t __p0, int64x1_t __p1) { + int64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1); + return __ret; +} #else -#define vld1q_p8(__p0) __extension__ ({ \ - poly8x16_t __ret; \ - __ret = (poly8x16_t) __builtin_neon_vld1q_v(__p0, 36); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int64x2_t vcombine_s64(int64x1_t __p0, int64x1_t __p1) { + int64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_p16(__p0) __extension__ ({ \ - poly16x8_t __ret; \ - __ret = (poly16x8_t) __builtin_neon_vld1q_v(__p0, 37); \ +__ai __attribute__((target("neon"))) int16x8_t vcombine_s16(int16x4_t __p0, int16x4_t __p1) { + int16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vcombine_s16(int16x4_t __p0, int16x4_t __p1) { + int16x8_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x8_t __noswap_vcombine_s16(int16x4_t __p0, int16x4_t __p1) { + int16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} +#endif + +#define vcreate_p8(__p0) __extension__ ({ \ + poly8x8_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (poly8x8_t)(__promote); \ __ret; \ }) -#else -#define vld1q_p16(__p0) __extension__ ({ \ - poly16x8_t __ret; \ - __ret = (poly16x8_t) __builtin_neon_vld1q_v(__p0, 37); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vcreate_p16(__p0) __extension__ ({ \ + poly16x4_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (poly16x4_t)(__promote); \ __ret; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_u8(__p0) __extension__ ({ \ - uint8x16_t __ret; \ - __ret = (uint8x16_t) __builtin_neon_vld1q_v(__p0, 48); \ +#define vcreate_u8(__p0) __extension__ ({ \ + uint8x8_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (uint8x8_t)(__promote); \ __ret; \ }) -#else -#define vld1q_u8(__p0) __extension__ ({ \ - uint8x16_t __ret; \ - __ret = (uint8x16_t) __builtin_neon_vld1q_v(__p0, 48); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vcreate_u32(__p0) __extension__ ({ \ + uint32x2_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (uint32x2_t)(__promote); \ __ret; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_u32(__p0) __extension__ ({ \ - uint32x4_t __ret; \ - __ret = (uint32x4_t) __builtin_neon_vld1q_v(__p0, 50); \ +#define vcreate_u64(__p0) __extension__ ({ \ + uint64x1_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (uint64x1_t)(__promote); \ __ret; \ }) -#else -#define vld1q_u32(__p0) __extension__ ({ \ - uint32x4_t __ret; \ - __ret = (uint32x4_t) __builtin_neon_vld1q_v(__p0, 50); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ +#define vcreate_u16(__p0) __extension__ ({ \ + uint16x4_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (uint16x4_t)(__promote); \ __ret; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_u64(__p0) __extension__ ({ \ - uint64x2_t __ret; \ - __ret = (uint64x2_t) __builtin_neon_vld1q_v(__p0, 51); \ +#define vcreate_s8(__p0) __extension__ ({ \ + int8x8_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (int8x8_t)(__promote); \ __ret; \ }) -#else -#define vld1q_u64(__p0) __extension__ ({ \ - uint64x2_t __ret; \ - __ret = (uint64x2_t) __builtin_neon_vld1q_v(__p0, 51); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ +#define vcreate_f32(__p0) __extension__ ({ \ + float32x2_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (float32x2_t)(__promote); \ __ret; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_u16(__p0) __extension__ ({ \ - uint16x8_t __ret; \ - __ret = (uint16x8_t) __builtin_neon_vld1q_v(__p0, 49); \ +#define vcreate_f16(__p0) __extension__ ({ \ + float16x4_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (float16x4_t)(__promote); \ __ret; \ }) -#else -#define vld1q_u16(__p0) __extension__ ({ \ - uint16x8_t __ret; \ - __ret = (uint16x8_t) __builtin_neon_vld1q_v(__p0, 49); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vcreate_s32(__p0) __extension__ ({ \ + int32x2_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (int32x2_t)(__promote); \ __ret; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1q_s8(__p0) __extension__ ({ \ - int8x16_t __ret; \ - __ret = (int8x16_t) __builtin_neon_vld1q_v(__p0, 32); \ +#define vcreate_s64(__p0) __extension__ ({ \ + int64x1_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (int64x1_t)(__promote); \ __ret; \ }) -#else -#define vld1q_s8(__p0) __extension__ ({ \ - int8x16_t __ret; \ - __ret = (int8x16_t) __builtin_neon_vld1q_v(__p0, 32); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vcreate_s16(__p0) __extension__ ({ \ + int16x4_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (int16x4_t)(__promote); \ __ret; \ }) +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vcvtq_f32_u32(uint32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vcvtq_f32_u32(uint32x4_t __p0) { + float32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_f32(__p0) __extension__ ({ \ +__ai __attribute__((target("neon"))) float32x4_t vcvtq_f32_s32(int32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__p0, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vcvtq_f32_s32(int32x4_t __p0) { + float32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vcvtq_f32_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vcvt_f32_u32(uint32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vcvt_f32_u32(uint32x2_t __p0) { + float32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vcvt_f32_s32(int32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vcvt_f32_s32(int32x2_t __p0) { + float32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vcvt_f32_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_f32_u32(__p0, __p1) __extension__ ({ \ float32x4_t __ret; \ - __ret = (float32x4_t) __builtin_neon_vld1q_v(__p0, 41); \ + uint32x4_t __s0 = __p0; \ + __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__s0, __p1, 50); \ __ret; \ }) #else -#define vld1q_f32(__p0) __extension__ ({ \ +#define vcvtq_n_f32_u32(__p0, __p1) __extension__ ({ \ float32x4_t __ret; \ - __ret = (float32x4_t) __builtin_neon_vld1q_v(__p0, 41); \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__rev0, __p1, 50); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_s32(__p0) __extension__ ({ \ - int32x4_t __ret; \ - __ret = (int32x4_t) __builtin_neon_vld1q_v(__p0, 34); \ +#define vcvtq_n_f32_s32(__p0, __p1) __extension__ ({ \ + float32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__s0, __p1, 34); \ __ret; \ }) #else -#define vld1q_s32(__p0) __extension__ ({ \ - int32x4_t __ret; \ - __ret = (int32x4_t) __builtin_neon_vld1q_v(__p0, 34); \ +#define vcvtq_n_f32_s32(__p0, __p1) __extension__ ({ \ + float32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float32x4_t) __builtin_neon_vcvtq_n_f32_v((int8x16_t)__rev0, __p1, 34); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_s64(__p0) __extension__ ({ \ - int64x2_t __ret; \ - __ret = (int64x2_t) __builtin_neon_vld1q_v(__p0, 35); \ +#define vcvt_n_f32_u32(__p0, __p1) __extension__ ({ \ + float32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__s0, __p1, 18); \ __ret; \ }) #else -#define vld1q_s64(__p0) __extension__ ({ \ - int64x2_t __ret; \ - __ret = (int64x2_t) __builtin_neon_vld1q_v(__p0, 35); \ +#define vcvt_n_f32_u32(__p0, __p1) __extension__ ({ \ + float32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__rev0, __p1, 18); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_s16(__p0) __extension__ ({ \ - int16x8_t __ret; \ - __ret = (int16x8_t) __builtin_neon_vld1q_v(__p0, 33); \ +#define vcvt_n_f32_s32(__p0, __p1) __extension__ ({ \ + float32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__s0, __p1, 2); \ __ret; \ }) #else -#define vld1q_s16(__p0) __extension__ ({ \ - int16x8_t __ret; \ - __ret = (int16x8_t) __builtin_neon_vld1q_v(__p0, 33); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vcvt_n_f32_s32(__p0, __p1) __extension__ ({ \ + float32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (float32x2_t) __builtin_neon_vcvt_n_f32_v((int8x8_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1_u8(__p0) __extension__ ({ \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vld1_v(__p0, 16); \ +#define vcvtq_n_s32_f32(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + float32x4_t __s0 = __p0; \ + __ret = (int32x4_t) __builtin_neon_vcvtq_n_s32_v((int8x16_t)__s0, __p1, 34); \ __ret; \ }) #else -#define vld1_u8(__p0) __extension__ ({ \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vld1_v(__p0, 16); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vcvtq_n_s32_f32(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + float32x4_t __s0 = __p0; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_vcvtq_n_s32_v((int8x16_t)__rev0, __p1, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1_u32(__p0) __extension__ ({ \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vld1_v(__p0, 18); \ +#define vcvt_n_s32_f32(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + float32x2_t __s0 = __p0; \ + __ret = (int32x2_t) __builtin_neon_vcvt_n_s32_v((int8x8_t)__s0, __p1, 2); \ __ret; \ }) #else -#define vld1_u32(__p0) __extension__ ({ \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vld1_v(__p0, 18); \ +#define vcvt_n_s32_f32(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + float32x2_t __s0 = __p0; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vcvt_n_s32_v((int8x8_t)__rev0, __p1, 2); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif -#define vld1_u64(__p0) __extension__ ({ \ - uint64x1_t __ret; \ - __ret = (uint64x1_t) __builtin_neon_vld1_v(__p0, 19); \ - __ret; \ -}) #ifdef __LITTLE_ENDIAN__ -#define vld1_u16(__p0) __extension__ ({ \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vld1_v(__p0, 17); \ +#define vcvtq_n_u32_f32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + float32x4_t __s0 = __p0; \ + __ret = (uint32x4_t) __builtin_neon_vcvtq_n_u32_v((int8x16_t)__s0, __p1, 50); \ __ret; \ }) #else -#define vld1_u16(__p0) __extension__ ({ \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vld1_v(__p0, 17); \ +#define vcvtq_n_u32_f32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + float32x4_t __s0 = __p0; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vcvtq_n_u32_v((int8x16_t)__rev0, __p1, 50); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1_s8(__p0) __extension__ ({ \ - int8x8_t __ret; \ - __ret = (int8x8_t) __builtin_neon_vld1_v(__p0, 0); \ +#define vcvt_n_u32_f32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + float32x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_vcvt_n_u32_v((int8x8_t)__s0, __p1, 18); \ __ret; \ }) #else -#define vld1_s8(__p0) __extension__ ({ \ - int8x8_t __ret; \ - __ret = (int8x8_t) __builtin_neon_vld1_v(__p0, 0); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vcvt_n_u32_f32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + float32x2_t __s0 = __p0; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vcvt_n_u32_v((int8x8_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1_f32(__p0) __extension__ ({ \ - float32x2_t __ret; \ - __ret = (float32x2_t) __builtin_neon_vld1_v(__p0, 9); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x4_t vcvtq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vcvtq_s32_v((int8x16_t)__p0, 34); + return __ret; +} #else -#define vld1_f32(__p0) __extension__ ({ \ - float32x2_t __ret; \ - __ret = (float32x2_t) __builtin_neon_vld1_v(__p0, 9); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x4_t vcvtq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vcvtq_s32_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1_s32(__p0) __extension__ ({ \ - int32x2_t __ret; \ - __ret = (int32x2_t) __builtin_neon_vld1_v(__p0, 2); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x2_t vcvt_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vcvt_s32_v((int8x8_t)__p0, 2); + return __ret; +} #else -#define vld1_s32(__p0) __extension__ ({ \ - int32x2_t __ret; \ - __ret = (int32x2_t) __builtin_neon_vld1_v(__p0, 2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x2_t vcvt_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int32x2_t) __builtin_neon_vcvt_s32_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif -#define vld1_s64(__p0) __extension__ ({ \ - int64x1_t __ret; \ - __ret = (int64x1_t) __builtin_neon_vld1_v(__p0, 3); \ - __ret; \ -}) #ifdef __LITTLE_ENDIAN__ -#define vld1_s16(__p0) __extension__ ({ \ - int16x4_t __ret; \ - __ret = (int16x4_t) __builtin_neon_vld1_v(__p0, 1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vcvtq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcvtq_u32_v((int8x16_t)__p0, 50); + return __ret; +} #else -#define vld1_s16(__p0) __extension__ ({ \ - int16x4_t __ret; \ - __ret = (int16x4_t) __builtin_neon_vld1_v(__p0, 1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vcvtq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vcvtq_u32_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1_dup_p8(__p0) __extension__ ({ \ - poly8x8_t __ret; \ - __ret = (poly8x8_t) __builtin_neon_vld1_dup_v(__p0, 4); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vcvt_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcvt_u32_v((int8x8_t)__p0, 18); + return __ret; +} #else -#define vld1_dup_p8(__p0) __extension__ ({ \ - poly8x8_t __ret; \ - __ret = (poly8x8_t) __builtin_neon_vld1_dup_v(__p0, 4); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vcvt_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vcvt_u32_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1_dup_p16(__p0) __extension__ ({ \ - poly16x4_t __ret; \ - __ret = (poly16x4_t) __builtin_neon_vld1_dup_v(__p0, 5); \ - __ret; \ +#define vdup_lane_p8(__p0_8, __p1_8) __extension__ ({ \ + poly8x8_t __ret_8; \ + poly8x8_t __s0_8 = __p0_8; \ + __ret_8 = splat_lane_p8(__s0_8, __p1_8); \ + __ret_8; \ }) #else -#define vld1_dup_p16(__p0) __extension__ ({ \ - poly16x4_t __ret; \ - __ret = (poly16x4_t) __builtin_neon_vld1_dup_v(__p0, 5); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ +#define vdup_lane_p8(__p0_9, __p1_9) __extension__ ({ \ + poly8x8_t __ret_9; \ + poly8x8_t __s0_9 = __p0_9; \ + poly8x8_t __rev0_9; __rev0_9 = __builtin_shufflevector(__s0_9, __s0_9, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_9 = __noswap_splat_lane_p8(__rev0_9, __p1_9); \ + __ret_9 = __builtin_shufflevector(__ret_9, __ret_9, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_9; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_dup_p8(__p0) __extension__ ({ \ - poly8x16_t __ret; \ - __ret = (poly8x16_t) __builtin_neon_vld1q_dup_v(__p0, 36); \ - __ret; \ +#define vdup_lane_p16(__p0_10, __p1_10) __extension__ ({ \ + poly16x4_t __ret_10; \ + poly16x4_t __s0_10 = __p0_10; \ + __ret_10 = splat_lane_p16(__s0_10, __p1_10); \ + __ret_10; \ }) #else -#define vld1q_dup_p8(__p0) __extension__ ({ \ - poly8x16_t __ret; \ - __ret = (poly8x16_t) __builtin_neon_vld1q_dup_v(__p0, 36); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ +#define vdup_lane_p16(__p0_11, __p1_11) __extension__ ({ \ + poly16x4_t __ret_11; \ + poly16x4_t __s0_11 = __p0_11; \ + poly16x4_t __rev0_11; __rev0_11 = __builtin_shufflevector(__s0_11, __s0_11, 3, 2, 1, 0); \ + __ret_11 = __noswap_splat_lane_p16(__rev0_11, __p1_11); \ + __ret_11 = __builtin_shufflevector(__ret_11, __ret_11, 3, 2, 1, 0); \ + __ret_11; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_dup_p16(__p0) __extension__ ({ \ - poly16x8_t __ret; \ - __ret = (poly16x8_t) __builtin_neon_vld1q_dup_v(__p0, 37); \ - __ret; \ +#define vdupq_lane_p8(__p0_12, __p1_12) __extension__ ({ \ + poly8x16_t __ret_12; \ + poly8x8_t __s0_12 = __p0_12; \ + __ret_12 = splatq_lane_p8(__s0_12, __p1_12); \ + __ret_12; \ }) #else -#define vld1q_dup_p16(__p0) __extension__ ({ \ - poly16x8_t __ret; \ - __ret = (poly16x8_t) __builtin_neon_vld1q_dup_v(__p0, 37); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ +#define vdupq_lane_p8(__p0_13, __p1_13) __extension__ ({ \ + poly8x16_t __ret_13; \ + poly8x8_t __s0_13 = __p0_13; \ + poly8x8_t __rev0_13; __rev0_13 = __builtin_shufflevector(__s0_13, __s0_13, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_13 = __noswap_splatq_lane_p8(__rev0_13, __p1_13); \ + __ret_13 = __builtin_shufflevector(__ret_13, __ret_13, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_13; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_dup_u8(__p0) __extension__ ({ \ - uint8x16_t __ret; \ - __ret = (uint8x16_t) __builtin_neon_vld1q_dup_v(__p0, 48); \ - __ret; \ +#define vdupq_lane_p16(__p0_14, __p1_14) __extension__ ({ \ + poly16x8_t __ret_14; \ + poly16x4_t __s0_14 = __p0_14; \ + __ret_14 = splatq_lane_p16(__s0_14, __p1_14); \ + __ret_14; \ }) #else -#define vld1q_dup_u8(__p0) __extension__ ({ \ - uint8x16_t __ret; \ - __ret = (uint8x16_t) __builtin_neon_vld1q_dup_v(__p0, 48); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ +#define vdupq_lane_p16(__p0_15, __p1_15) __extension__ ({ \ + poly16x8_t __ret_15; \ + poly16x4_t __s0_15 = __p0_15; \ + poly16x4_t __rev0_15; __rev0_15 = __builtin_shufflevector(__s0_15, __s0_15, 3, 2, 1, 0); \ + __ret_15 = __noswap_splatq_lane_p16(__rev0_15, __p1_15); \ + __ret_15 = __builtin_shufflevector(__ret_15, __ret_15, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_15; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_dup_u32(__p0) __extension__ ({ \ - uint32x4_t __ret; \ - __ret = (uint32x4_t) __builtin_neon_vld1q_dup_v(__p0, 50); \ - __ret; \ +#define vdupq_lane_u8(__p0_16, __p1_16) __extension__ ({ \ + uint8x16_t __ret_16; \ + uint8x8_t __s0_16 = __p0_16; \ + __ret_16 = splatq_lane_u8(__s0_16, __p1_16); \ + __ret_16; \ }) #else -#define vld1q_dup_u32(__p0) __extension__ ({ \ - uint32x4_t __ret; \ - __ret = (uint32x4_t) __builtin_neon_vld1q_dup_v(__p0, 50); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ +#define vdupq_lane_u8(__p0_17, __p1_17) __extension__ ({ \ + uint8x16_t __ret_17; \ + uint8x8_t __s0_17 = __p0_17; \ + uint8x8_t __rev0_17; __rev0_17 = __builtin_shufflevector(__s0_17, __s0_17, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_17 = __noswap_splatq_lane_u8(__rev0_17, __p1_17); \ + __ret_17 = __builtin_shufflevector(__ret_17, __ret_17, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_17; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_dup_u64(__p0) __extension__ ({ \ - uint64x2_t __ret; \ - __ret = (uint64x2_t) __builtin_neon_vld1q_dup_v(__p0, 51); \ - __ret; \ +#define vdupq_lane_u32(__p0_18, __p1_18) __extension__ ({ \ + uint32x4_t __ret_18; \ + uint32x2_t __s0_18 = __p0_18; \ + __ret_18 = splatq_lane_u32(__s0_18, __p1_18); \ + __ret_18; \ }) #else -#define vld1q_dup_u64(__p0) __extension__ ({ \ - uint64x2_t __ret; \ - __ret = (uint64x2_t) __builtin_neon_vld1q_dup_v(__p0, 51); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ +#define vdupq_lane_u32(__p0_19, __p1_19) __extension__ ({ \ + uint32x4_t __ret_19; \ + uint32x2_t __s0_19 = __p0_19; \ + uint32x2_t __rev0_19; __rev0_19 = __builtin_shufflevector(__s0_19, __s0_19, 1, 0); \ + __ret_19 = __noswap_splatq_lane_u32(__rev0_19, __p1_19); \ + __ret_19 = __builtin_shufflevector(__ret_19, __ret_19, 3, 2, 1, 0); \ + __ret_19; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_dup_u16(__p0) __extension__ ({ \ - uint16x8_t __ret; \ - __ret = (uint16x8_t) __builtin_neon_vld1q_dup_v(__p0, 49); \ - __ret; \ +#define vdupq_lane_u64(__p0_20, __p1_20) __extension__ ({ \ + uint64x2_t __ret_20; \ + uint64x1_t __s0_20 = __p0_20; \ + __ret_20 = splatq_lane_u64(__s0_20, __p1_20); \ + __ret_20; \ }) #else -#define vld1q_dup_u16(__p0) __extension__ ({ \ - uint16x8_t __ret; \ - __ret = (uint16x8_t) __builtin_neon_vld1q_dup_v(__p0, 49); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ +#define vdupq_lane_u64(__p0_21, __p1_21) __extension__ ({ \ + uint64x2_t __ret_21; \ + uint64x1_t __s0_21 = __p0_21; \ + __ret_21 = __noswap_splatq_lane_u64(__s0_21, __p1_21); \ + __ret_21 = __builtin_shufflevector(__ret_21, __ret_21, 1, 0); \ + __ret_21; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_dup_s8(__p0) __extension__ ({ \ - int8x16_t __ret; \ - __ret = (int8x16_t) __builtin_neon_vld1q_dup_v(__p0, 32); \ - __ret; \ +#define vdupq_lane_u16(__p0_22, __p1_22) __extension__ ({ \ + uint16x8_t __ret_22; \ + uint16x4_t __s0_22 = __p0_22; \ + __ret_22 = splatq_lane_u16(__s0_22, __p1_22); \ + __ret_22; \ }) #else -#define vld1q_dup_s8(__p0) __extension__ ({ \ - int8x16_t __ret; \ - __ret = (int8x16_t) __builtin_neon_vld1q_dup_v(__p0, 32); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ +#define vdupq_lane_u16(__p0_23, __p1_23) __extension__ ({ \ + uint16x8_t __ret_23; \ + uint16x4_t __s0_23 = __p0_23; \ + uint16x4_t __rev0_23; __rev0_23 = __builtin_shufflevector(__s0_23, __s0_23, 3, 2, 1, 0); \ + __ret_23 = __noswap_splatq_lane_u16(__rev0_23, __p1_23); \ + __ret_23 = __builtin_shufflevector(__ret_23, __ret_23, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_23; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_dup_f32(__p0) __extension__ ({ \ - float32x4_t __ret; \ - __ret = (float32x4_t) __builtin_neon_vld1q_dup_v(__p0, 41); \ - __ret; \ +#define vdupq_lane_s8(__p0_24, __p1_24) __extension__ ({ \ + int8x16_t __ret_24; \ + int8x8_t __s0_24 = __p0_24; \ + __ret_24 = splatq_lane_s8(__s0_24, __p1_24); \ + __ret_24; \ }) #else -#define vld1q_dup_f32(__p0) __extension__ ({ \ - float32x4_t __ret; \ - __ret = (float32x4_t) __builtin_neon_vld1q_dup_v(__p0, 41); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ +#define vdupq_lane_s8(__p0_25, __p1_25) __extension__ ({ \ + int8x16_t __ret_25; \ + int8x8_t __s0_25 = __p0_25; \ + int8x8_t __rev0_25; __rev0_25 = __builtin_shufflevector(__s0_25, __s0_25, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_25 = __noswap_splatq_lane_s8(__rev0_25, __p1_25); \ + __ret_25 = __builtin_shufflevector(__ret_25, __ret_25, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_25; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_dup_s32(__p0) __extension__ ({ \ - int32x4_t __ret; \ - __ret = (int32x4_t) __builtin_neon_vld1q_dup_v(__p0, 34); \ - __ret; \ +#define vdupq_lane_f32(__p0_26, __p1_26) __extension__ ({ \ + float32x4_t __ret_26; \ + float32x2_t __s0_26 = __p0_26; \ + __ret_26 = splatq_lane_f32(__s0_26, __p1_26); \ + __ret_26; \ }) #else -#define vld1q_dup_s32(__p0) __extension__ ({ \ - int32x4_t __ret; \ - __ret = (int32x4_t) __builtin_neon_vld1q_dup_v(__p0, 34); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ +#define vdupq_lane_f32(__p0_27, __p1_27) __extension__ ({ \ + float32x4_t __ret_27; \ + float32x2_t __s0_27 = __p0_27; \ + float32x2_t __rev0_27; __rev0_27 = __builtin_shufflevector(__s0_27, __s0_27, 1, 0); \ + __ret_27 = __noswap_splatq_lane_f32(__rev0_27, __p1_27); \ + __ret_27 = __builtin_shufflevector(__ret_27, __ret_27, 3, 2, 1, 0); \ + __ret_27; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_dup_s64(__p0) __extension__ ({ \ - int64x2_t __ret; \ - __ret = (int64x2_t) __builtin_neon_vld1q_dup_v(__p0, 35); \ - __ret; \ +#define vdupq_lane_f16(__p0_28, __p1_28) __extension__ ({ \ + float16x8_t __ret_28; \ + float16x4_t __s0_28 = __p0_28; \ + __ret_28 = splatq_lane_f16(__s0_28, __p1_28); \ + __ret_28; \ }) #else -#define vld1q_dup_s64(__p0) __extension__ ({ \ - int64x2_t __ret; \ - __ret = (int64x2_t) __builtin_neon_vld1q_dup_v(__p0, 35); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ +#define vdupq_lane_f16(__p0_29, __p1_29) __extension__ ({ \ + float16x8_t __ret_29; \ + float16x4_t __s0_29 = __p0_29; \ + float16x4_t __rev0_29; __rev0_29 = __builtin_shufflevector(__s0_29, __s0_29, 3, 2, 1, 0); \ + __ret_29 = __noswap_splatq_lane_f16(__rev0_29, __p1_29); \ + __ret_29 = __builtin_shufflevector(__ret_29, __ret_29, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_29; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_dup_s16(__p0) __extension__ ({ \ - int16x8_t __ret; \ - __ret = (int16x8_t) __builtin_neon_vld1q_dup_v(__p0, 33); \ - __ret; \ +#define vdupq_lane_s32(__p0_30, __p1_30) __extension__ ({ \ + int32x4_t __ret_30; \ + int32x2_t __s0_30 = __p0_30; \ + __ret_30 = splatq_lane_s32(__s0_30, __p1_30); \ + __ret_30; \ }) #else -#define vld1q_dup_s16(__p0) __extension__ ({ \ - int16x8_t __ret; \ - __ret = (int16x8_t) __builtin_neon_vld1q_dup_v(__p0, 33); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ +#define vdupq_lane_s32(__p0_31, __p1_31) __extension__ ({ \ + int32x4_t __ret_31; \ + int32x2_t __s0_31 = __p0_31; \ + int32x2_t __rev0_31; __rev0_31 = __builtin_shufflevector(__s0_31, __s0_31, 1, 0); \ + __ret_31 = __noswap_splatq_lane_s32(__rev0_31, __p1_31); \ + __ret_31 = __builtin_shufflevector(__ret_31, __ret_31, 3, 2, 1, 0); \ + __ret_31; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1_dup_u8(__p0) __extension__ ({ \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vld1_dup_v(__p0, 16); \ - __ret; \ +#define vdupq_lane_s64(__p0_32, __p1_32) __extension__ ({ \ + int64x2_t __ret_32; \ + int64x1_t __s0_32 = __p0_32; \ + __ret_32 = splatq_lane_s64(__s0_32, __p1_32); \ + __ret_32; \ }) #else -#define vld1_dup_u8(__p0) __extension__ ({ \ - uint8x8_t __ret; \ - __ret = (uint8x8_t) __builtin_neon_vld1_dup_v(__p0, 16); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ +#define vdupq_lane_s64(__p0_33, __p1_33) __extension__ ({ \ + int64x2_t __ret_33; \ + int64x1_t __s0_33 = __p0_33; \ + __ret_33 = __noswap_splatq_lane_s64(__s0_33, __p1_33); \ + __ret_33 = __builtin_shufflevector(__ret_33, __ret_33, 1, 0); \ + __ret_33; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1_dup_u32(__p0) __extension__ ({ \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vld1_dup_v(__p0, 18); \ - __ret; \ +#define vdupq_lane_s16(__p0_34, __p1_34) __extension__ ({ \ + int16x8_t __ret_34; \ + int16x4_t __s0_34 = __p0_34; \ + __ret_34 = splatq_lane_s16(__s0_34, __p1_34); \ + __ret_34; \ }) #else -#define vld1_dup_u32(__p0) __extension__ ({ \ - uint32x2_t __ret; \ - __ret = (uint32x2_t) __builtin_neon_vld1_dup_v(__p0, 18); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ +#define vdupq_lane_s16(__p0_35, __p1_35) __extension__ ({ \ + int16x8_t __ret_35; \ + int16x4_t __s0_35 = __p0_35; \ + int16x4_t __rev0_35; __rev0_35 = __builtin_shufflevector(__s0_35, __s0_35, 3, 2, 1, 0); \ + __ret_35 = __noswap_splatq_lane_s16(__rev0_35, __p1_35); \ + __ret_35 = __builtin_shufflevector(__ret_35, __ret_35, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_35; \ }) #endif -#define vld1_dup_u64(__p0) __extension__ ({ \ - uint64x1_t __ret; \ - __ret = (uint64x1_t) __builtin_neon_vld1_dup_v(__p0, 19); \ - __ret; \ -}) #ifdef __LITTLE_ENDIAN__ -#define vld1_dup_u16(__p0) __extension__ ({ \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vld1_dup_v(__p0, 17); \ - __ret; \ +#define vdup_lane_u8(__p0_36, __p1_36) __extension__ ({ \ + uint8x8_t __ret_36; \ + uint8x8_t __s0_36 = __p0_36; \ + __ret_36 = splat_lane_u8(__s0_36, __p1_36); \ + __ret_36; \ }) #else -#define vld1_dup_u16(__p0) __extension__ ({ \ - uint16x4_t __ret; \ - __ret = (uint16x4_t) __builtin_neon_vld1_dup_v(__p0, 17); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ +#define vdup_lane_u8(__p0_37, __p1_37) __extension__ ({ \ + uint8x8_t __ret_37; \ + uint8x8_t __s0_37 = __p0_37; \ + uint8x8_t __rev0_37; __rev0_37 = __builtin_shufflevector(__s0_37, __s0_37, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_37 = __noswap_splat_lane_u8(__rev0_37, __p1_37); \ + __ret_37 = __builtin_shufflevector(__ret_37, __ret_37, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_37; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1_dup_s8(__p0) __extension__ ({ \ - int8x8_t __ret; \ - __ret = (int8x8_t) __builtin_neon_vld1_dup_v(__p0, 0); \ - __ret; \ +#define vdup_lane_u32(__p0_38, __p1_38) __extension__ ({ \ + uint32x2_t __ret_38; \ + uint32x2_t __s0_38 = __p0_38; \ + __ret_38 = splat_lane_u32(__s0_38, __p1_38); \ + __ret_38; \ }) #else -#define vld1_dup_s8(__p0) __extension__ ({ \ - int8x8_t __ret; \ - __ret = (int8x8_t) __builtin_neon_vld1_dup_v(__p0, 0); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ +#define vdup_lane_u32(__p0_39, __p1_39) __extension__ ({ \ + uint32x2_t __ret_39; \ + uint32x2_t __s0_39 = __p0_39; \ + uint32x2_t __rev0_39; __rev0_39 = __builtin_shufflevector(__s0_39, __s0_39, 1, 0); \ + __ret_39 = __noswap_splat_lane_u32(__rev0_39, __p1_39); \ + __ret_39 = __builtin_shufflevector(__ret_39, __ret_39, 1, 0); \ + __ret_39; \ }) #endif +#define vdup_lane_u64(__p0_40, __p1_40) __extension__ ({ \ + uint64x1_t __ret_40; \ + uint64x1_t __s0_40 = __p0_40; \ + __ret_40 = splat_lane_u64(__s0_40, __p1_40); \ + __ret_40; \ +}) #ifdef __LITTLE_ENDIAN__ -#define vld1_dup_f32(__p0) __extension__ ({ \ - float32x2_t __ret; \ - __ret = (float32x2_t) __builtin_neon_vld1_dup_v(__p0, 9); \ - __ret; \ +#define vdup_lane_u16(__p0_41, __p1_41) __extension__ ({ \ + uint16x4_t __ret_41; \ + uint16x4_t __s0_41 = __p0_41; \ + __ret_41 = splat_lane_u16(__s0_41, __p1_41); \ + __ret_41; \ }) #else -#define vld1_dup_f32(__p0) __extension__ ({ \ - float32x2_t __ret; \ - __ret = (float32x2_t) __builtin_neon_vld1_dup_v(__p0, 9); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ +#define vdup_lane_u16(__p0_42, __p1_42) __extension__ ({ \ + uint16x4_t __ret_42; \ + uint16x4_t __s0_42 = __p0_42; \ + uint16x4_t __rev0_42; __rev0_42 = __builtin_shufflevector(__s0_42, __s0_42, 3, 2, 1, 0); \ + __ret_42 = __noswap_splat_lane_u16(__rev0_42, __p1_42); \ + __ret_42 = __builtin_shufflevector(__ret_42, __ret_42, 3, 2, 1, 0); \ + __ret_42; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1_dup_s32(__p0) __extension__ ({ \ - int32x2_t __ret; \ - __ret = (int32x2_t) __builtin_neon_vld1_dup_v(__p0, 2); \ - __ret; \ +#define vdup_lane_s8(__p0_43, __p1_43) __extension__ ({ \ + int8x8_t __ret_43; \ + int8x8_t __s0_43 = __p0_43; \ + __ret_43 = splat_lane_s8(__s0_43, __p1_43); \ + __ret_43; \ }) #else -#define vld1_dup_s32(__p0) __extension__ ({ \ - int32x2_t __ret; \ - __ret = (int32x2_t) __builtin_neon_vld1_dup_v(__p0, 2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ +#define vdup_lane_s8(__p0_44, __p1_44) __extension__ ({ \ + int8x8_t __ret_44; \ + int8x8_t __s0_44 = __p0_44; \ + int8x8_t __rev0_44; __rev0_44 = __builtin_shufflevector(__s0_44, __s0_44, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_44 = __noswap_splat_lane_s8(__rev0_44, __p1_44); \ + __ret_44 = __builtin_shufflevector(__ret_44, __ret_44, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_44; \ }) #endif -#define vld1_dup_s64(__p0) __extension__ ({ \ - int64x1_t __ret; \ - __ret = (int64x1_t) __builtin_neon_vld1_dup_v(__p0, 3); \ - __ret; \ -}) #ifdef __LITTLE_ENDIAN__ -#define vld1_dup_s16(__p0) __extension__ ({ \ - int16x4_t __ret; \ - __ret = (int16x4_t) __builtin_neon_vld1_dup_v(__p0, 1); \ - __ret; \ +#define vdup_lane_f32(__p0_45, __p1_45) __extension__ ({ \ + float32x2_t __ret_45; \ + float32x2_t __s0_45 = __p0_45; \ + __ret_45 = splat_lane_f32(__s0_45, __p1_45); \ + __ret_45; \ }) #else -#define vld1_dup_s16(__p0) __extension__ ({ \ - int16x4_t __ret; \ - __ret = (int16x4_t) __builtin_neon_vld1_dup_v(__p0, 1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ +#define vdup_lane_f32(__p0_46, __p1_46) __extension__ ({ \ + float32x2_t __ret_46; \ + float32x2_t __s0_46 = __p0_46; \ + float32x2_t __rev0_46; __rev0_46 = __builtin_shufflevector(__s0_46, __s0_46, 1, 0); \ + __ret_46 = __noswap_splat_lane_f32(__rev0_46, __p1_46); \ + __ret_46 = __builtin_shufflevector(__ret_46, __ret_46, 1, 0); \ + __ret_46; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x8_t __ret; \ - poly8x8_t __s1 = __p1; \ - __ret = (poly8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 4); \ - __ret; \ +#define vdup_lane_f16(__p0_47, __p1_47) __extension__ ({ \ + float16x4_t __ret_47; \ + float16x4_t __s0_47 = __p0_47; \ + __ret_47 = splat_lane_f16(__s0_47, __p1_47); \ + __ret_47; \ }) #else -#define vld1_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x8_t __ret; \ - poly8x8_t __s1 = __p1; \ - poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (poly8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 4); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ +#define vdup_lane_f16(__p0_48, __p1_48) __extension__ ({ \ + float16x4_t __ret_48; \ + float16x4_t __s0_48 = __p0_48; \ + float16x4_t __rev0_48; __rev0_48 = __builtin_shufflevector(__s0_48, __s0_48, 3, 2, 1, 0); \ + __ret_48 = __noswap_splat_lane_f16(__rev0_48, __p1_48); \ + __ret_48 = __builtin_shufflevector(__ret_48, __ret_48, 3, 2, 1, 0); \ + __ret_48; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x4_t __ret; \ - poly16x4_t __s1 = __p1; \ - __ret = (poly16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 5); \ - __ret; \ +#define vdup_lane_s32(__p0_49, __p1_49) __extension__ ({ \ + int32x2_t __ret_49; \ + int32x2_t __s0_49 = __p0_49; \ + __ret_49 = splat_lane_s32(__s0_49, __p1_49); \ + __ret_49; \ }) #else -#define vld1_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x4_t __ret; \ - poly16x4_t __s1 = __p1; \ - poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (poly16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 5); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ +#define vdup_lane_s32(__p0_50, __p1_50) __extension__ ({ \ + int32x2_t __ret_50; \ + int32x2_t __s0_50 = __p0_50; \ + int32x2_t __rev0_50; __rev0_50 = __builtin_shufflevector(__s0_50, __s0_50, 1, 0); \ + __ret_50 = __noswap_splat_lane_s32(__rev0_50, __p1_50); \ + __ret_50 = __builtin_shufflevector(__ret_50, __ret_50, 1, 0); \ + __ret_50; \ }) #endif +#define vdup_lane_s64(__p0_51, __p1_51) __extension__ ({ \ + int64x1_t __ret_51; \ + int64x1_t __s0_51 = __p0_51; \ + __ret_51 = splat_lane_s64(__s0_51, __p1_51); \ + __ret_51; \ +}) #ifdef __LITTLE_ENDIAN__ -#define vld1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x16_t __ret; \ - poly8x16_t __s1 = __p1; \ - __ret = (poly8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 36); \ - __ret; \ +#define vdup_lane_s16(__p0_52, __p1_52) __extension__ ({ \ + int16x4_t __ret_52; \ + int16x4_t __s0_52 = __p0_52; \ + __ret_52 = splat_lane_s16(__s0_52, __p1_52); \ + __ret_52; \ }) #else -#define vld1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x16_t __ret; \ - poly8x16_t __s1 = __p1; \ - poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (poly8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 36); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ +#define vdup_lane_s16(__p0_53, __p1_53) __extension__ ({ \ + int16x4_t __ret_53; \ + int16x4_t __s0_53 = __p0_53; \ + int16x4_t __rev0_53; __rev0_53 = __builtin_shufflevector(__s0_53, __s0_53, 3, 2, 1, 0); \ + __ret_53 = __noswap_splat_lane_s16(__rev0_53, __p1_53); \ + __ret_53 = __builtin_shufflevector(__ret_53, __ret_53, 3, 2, 1, 0); \ + __ret_53; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x8_t __ret; \ - poly16x8_t __s1 = __p1; \ - __ret = (poly16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 37); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) poly8x8_t vdup_n_p8(poly8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} #else -#define vld1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x8_t __ret; \ - poly16x8_t __s1 = __p1; \ - poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (poly16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 37); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) poly8x8_t vdup_n_p8(poly8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16_t __ret; \ - uint8x16_t __s1 = __p1; \ - __ret = (uint8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 48); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) poly16x4_t vdup_n_p16(poly16_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} #else -#define vld1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16_t __ret; \ - uint8x16_t __s1 = __p1; \ - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 48); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) poly16x4_t vdup_n_p16(poly16_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4_t __ret; \ - uint32x4_t __s1 = __p1; \ - __ret = (uint32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 50); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) poly8x16_t vdupq_n_p8(poly8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} #else -#define vld1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4_t __ret; \ - uint32x4_t __s1 = __p1; \ - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (uint32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 50); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) poly8x16_t vdupq_n_p8(poly8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2_t __ret; \ - uint64x2_t __s1 = __p1; \ - __ret = (uint64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 51); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) poly16x8_t vdupq_n_p16(poly16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} #else -#define vld1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2_t __ret; \ - uint64x2_t __s1 = __p1; \ - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __ret = (uint64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 51); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) poly16x8_t vdupq_n_p16(poly16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8_t __ret; \ - uint16x8_t __s1 = __p1; \ - __ret = (uint16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 49); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x16_t vdupq_n_u8(uint8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} #else -#define vld1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8_t __ret; \ - uint16x8_t __s1 = __p1; \ - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 49); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x16_t vdupq_n_u8(uint8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16_t __ret; \ - int8x16_t __s1 = __p1; \ - __ret = (int8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 32); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vdupq_n_u32(uint32_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} #else -#define vld1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16_t __ret; \ - int8x16_t __s1 = __p1; \ - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 32); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vdupq_n_u32(uint32_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x4_t __ret; \ - float32x4_t __s1 = __p1; \ - __ret = (float32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 41); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vdupq_n_u64(uint64_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) {__p0, __p0}; + return __ret; +} #else -#define vld1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x4_t __ret; \ - float32x4_t __s1 = __p1; \ - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (float32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 41); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vdupq_n_u64(uint64_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __ret; \ - int32x4_t __s1 = __p1; \ - __ret = (int32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 34); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x8_t vdupq_n_u16(uint16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} #else -#define vld1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __ret; \ - int32x4_t __s1 = __p1; \ - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (int32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 34); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x8_t vdupq_n_u16(uint16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2_t __ret; \ - int64x2_t __s1 = __p1; \ - __ret = (int64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 35); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x16_t vdupq_n_s8(int8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} #else -#define vld1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2_t __ret; \ - int64x2_t __s1 = __p1; \ - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __ret = (int64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 35); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x16_t vdupq_n_s8(int8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __ret; \ - int16x8_t __s1 = __p1; \ - __ret = (int16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 33); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float32x4_t vdupq_n_f32(float32_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} #else -#define vld1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __ret; \ - int16x8_t __s1 = __p1; \ - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 33); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float32x4_t vdupq_n_f32(float32_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x8_t __ret; \ - uint8x8_t __s1 = __p1; \ - __ret = (uint8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 16); \ +#define vdupq_n_f16(__p0) __extension__ ({ \ + float16x8_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \ __ret; \ }) #else -#define vld1_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x8_t __ret; \ - uint8x8_t __s1 = __p1; \ - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 16); \ +#define vdupq_n_f16(__p0) __extension__ ({ \ + float16x8_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2_t __ret; \ - uint32x2_t __s1 = __p1; \ - __ret = (uint32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 18); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x4_t vdupq_n_s32(int32_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} #else -#define vld1_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2_t __ret; \ - uint32x2_t __s1 = __p1; \ - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __ret = (uint32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 18); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x4_t vdupq_n_s32(int32_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif -#define vld1_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x1_t __ret; \ - uint64x1_t __s1 = __p1; \ - __ret = (uint64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \ - __ret; \ -}) #ifdef __LITTLE_ENDIAN__ -#define vld1_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4_t __ret; \ - uint16x4_t __s1 = __p1; \ - __ret = (uint16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 17); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int64x2_t vdupq_n_s64(int64_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) {__p0, __p0}; + return __ret; +} #else -#define vld1_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4_t __ret; \ - uint16x4_t __s1 = __p1; \ - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (uint16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 17); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int64x2_t vdupq_n_s64(int64_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x8_t __ret; \ - int8x8_t __s1 = __p1; \ - __ret = (int8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x8_t vdupq_n_s16(int16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} #else -#define vld1_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x8_t __ret; \ - int8x8_t __s1 = __p1; \ - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 0); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld1_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x2_t __ret; \ - float32x2_t __s1 = __p1; \ - __ret = (float32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 9); \ - __ret; \ -}) -#else -#define vld1_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x2_t __ret; \ - float32x2_t __s1 = __p1; \ - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __ret = (float32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 9); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x8_t vdupq_n_s16(int16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __ret; \ - int32x2_t __s1 = __p1; \ - __ret = (int32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 2); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vdup_n_u8(uint8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} #else -#define vld1_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __ret; \ - int32x2_t __s1 = __p1; \ - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __ret = (int32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vdup_n_u8(uint8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif -#define vld1_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x1_t __ret; \ - int64x1_t __s1 = __p1; \ - __ret = (int64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \ - __ret; \ -}) #ifdef __LITTLE_ENDIAN__ -#define vld1_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __ret; \ - int16x4_t __s1 = __p1; \ - __ret = (int16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vdup_n_u32(uint32_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) {__p0, __p0}; + return __ret; +} #else -#define vld1_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __ret; \ - int16x4_t __s1 = __p1; \ - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (int16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vdup_n_u32(uint32_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif +__ai __attribute__((target("neon"))) uint64x1_t vdup_n_u64(uint64_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) {__p0}; + return __ret; +} #ifdef __LITTLE_ENDIAN__ -#define vld1_p8_x2(__p0) __extension__ ({ \ - poly8x8x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 4); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x4_t vdup_n_u16(uint16_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} #else -#define vld1_p8_x2(__p0) __extension__ ({ \ - poly8x8x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 4); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x4_t vdup_n_u16(uint16_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1_p16_x2(__p0) __extension__ ({ \ - poly16x4x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 5); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x8_t vdup_n_s8(int8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} #else -#define vld1_p16_x2(__p0) __extension__ ({ \ - poly16x4x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 5); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x8_t vdup_n_s8(int8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_p8_x2(__p0) __extension__ ({ \ - poly8x16x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 36); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float32x2_t vdup_n_f32(float32_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) {__p0, __p0}; + return __ret; +} #else -#define vld1q_p8_x2(__p0) __extension__ ({ \ - poly8x16x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 36); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float32x2_t vdup_n_f32(float32_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_p16_x2(__p0) __extension__ ({ \ - poly16x8x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 37); \ +#define vdup_n_f16(__p0) __extension__ ({ \ + float16x4_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \ __ret; \ }) #else -#define vld1q_p16_x2(__p0) __extension__ ({ \ - poly16x8x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 37); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vdup_n_f16(__p0) __extension__ ({ \ + float16x4_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_u8_x2(__p0) __extension__ ({ \ - uint8x16x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 48); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x2_t vdup_n_s32(int32_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) {__p0, __p0}; + return __ret; +} #else -#define vld1q_u8_x2(__p0) __extension__ ({ \ - uint8x16x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 48); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x2_t vdup_n_s32(int32_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif +__ai __attribute__((target("neon"))) int64x1_t vdup_n_s64(int64_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) {__p0}; + return __ret; +} #ifdef __LITTLE_ENDIAN__ -#define vld1q_u32_x2(__p0) __extension__ ({ \ - uint32x4x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 50); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x4_t vdup_n_s16(int16_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) {__p0, __p0, __p0, __p0}; + return __ret; +} #else -#define vld1q_u32_x2(__p0) __extension__ ({ \ - uint32x4x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 50); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x4_t vdup_n_s16(int16_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_u64_x2(__p0) __extension__ ({ \ - uint64x2x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 51); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x16_t veorq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} #else -#define vld1q_u64_x2(__p0) __extension__ ({ \ - uint64x2x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 51); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x16_t veorq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_u16_x2(__p0) __extension__ ({ \ - uint16x8x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 49); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t veorq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} #else -#define vld1q_u16_x2(__p0) __extension__ ({ \ - uint16x8x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 49); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t veorq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_s8_x2(__p0) __extension__ ({ \ - int8x16x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 32); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t veorq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} #else -#define vld1q_s8_x2(__p0) __extension__ ({ \ - int8x16x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 32); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t veorq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_f32_x2(__p0) __extension__ ({ \ - float32x4x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 41); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x8_t veorq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} #else -#define vld1q_f32_x2(__p0) __extension__ ({ \ - float32x4x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 41); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x8_t veorq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_s32_x2(__p0) __extension__ ({ \ - int32x4x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 34); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x16_t veorq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} #else -#define vld1q_s32_x2(__p0) __extension__ ({ \ - int32x4x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 34); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x16_t veorq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_s64_x2(__p0) __extension__ ({ \ - int64x2x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 35); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x4_t veorq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} #else -#define vld1q_s64_x2(__p0) __extension__ ({ \ - int64x2x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 35); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x4_t veorq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_s16_x2(__p0) __extension__ ({ \ - int16x8x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 33); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int64x2_t veorq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} #else -#define vld1q_s16_x2(__p0) __extension__ ({ \ - int16x8x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 33); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int64x2_t veorq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1_u8_x2(__p0) __extension__ ({ \ - uint8x8x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 16); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x8_t veorq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} #else -#define vld1_u8_x2(__p0) __extension__ ({ \ - uint8x8x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 16); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x8_t veorq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1_u32_x2(__p0) __extension__ ({ \ - uint32x2x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 18); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t veor_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} #else -#define vld1_u32_x2(__p0) __extension__ ({ \ - uint32x2x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 18); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t veor_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif -#define vld1_u64_x2(__p0) __extension__ ({ \ - uint64x1x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 19); \ - __ret; \ -}) #ifdef __LITTLE_ENDIAN__ -#define vld1_u16_x2(__p0) __extension__ ({ \ - uint16x4x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 17); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t veor_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} #else -#define vld1_u16_x2(__p0) __extension__ ({ \ - uint16x4x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 17); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t veor_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif +__ai __attribute__((target("neon"))) uint64x1_t veor_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} #ifdef __LITTLE_ENDIAN__ -#define vld1_s8_x2(__p0) __extension__ ({ \ - int8x8x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x4_t veor_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} #else -#define vld1_s8_x2(__p0) __extension__ ({ \ - int8x8x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 0); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x4_t veor_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1_f32_x2(__p0) __extension__ ({ \ - float32x2x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 9); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x8_t veor_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} #else -#define vld1_f32_x2(__p0) __extension__ ({ \ - float32x2x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 9); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x8_t veor_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1_s32_x2(__p0) __extension__ ({ \ - int32x2x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 2); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x2_t veor_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} #else -#define vld1_s32_x2(__p0) __extension__ ({ \ - int32x2x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 2); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x2_t veor_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif -#define vld1_s64_x2(__p0) __extension__ ({ \ - int64x1x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 3); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int64x1_t veor_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} #ifdef __LITTLE_ENDIAN__ -#define vld1_s16_x2(__p0) __extension__ ({ \ - int16x4x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x4_t veor_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __p0 ^ __p1; + return __ret; +} #else -#define vld1_s16_x2(__p0) __extension__ ({ \ - int16x4x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 1); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x4_t veor_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 ^ __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1_p8_x3(__p0) __extension__ ({ \ - poly8x8x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 4); \ +#define vext_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __ret; \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __s1 = __p1; \ + __ret = (poly8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \ __ret; \ }) #else -#define vld1_p8_x3(__p0) __extension__ ({ \ - poly8x8x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 4); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vext_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __ret; \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __s1 = __p1; \ + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1_p16_x3(__p0) __extension__ ({ \ - poly16x4x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 5); \ +#define vext_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __ret; \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __s1 = __p1; \ + __ret = (poly16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \ __ret; \ }) #else -#define vld1_p16_x3(__p0) __extension__ ({ \ - poly16x4x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 5); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ +#define vext_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __ret; \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __s1 = __p1; \ + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (poly16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_p8_x3(__p0) __extension__ ({ \ - poly8x16x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 36); \ +#define vextq_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __ret; \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __s1 = __p1; \ + __ret = (poly8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \ __ret; \ }) #else -#define vld1q_p8_x3(__p0) __extension__ ({ \ - poly8x16x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 36); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vextq_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __ret; \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __s1 = __p1; \ + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_p16_x3(__p0) __extension__ ({ \ - poly16x8x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 37); \ +#define vextq_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __ret; \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __s1 = __p1; \ + __ret = (poly16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \ __ret; \ }) #else -#define vld1q_p16_x3(__p0) __extension__ ({ \ - poly16x8x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 37); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vextq_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __ret; \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __s1 = __p1; \ + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_u8_x3(__p0) __extension__ ({ \ - uint8x16x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 48); \ +#define vextq_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + __ret = (uint8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \ __ret; \ }) #else -#define vld1q_u8_x3(__p0) __extension__ ({ \ - uint8x16x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 48); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vextq_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_u32_x3(__p0) __extension__ ({ \ - uint32x4x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 50); \ +#define vextq_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + __ret = (uint32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \ __ret; \ }) #else -#define vld1q_u32_x3(__p0) __extension__ ({ \ - uint32x4x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 50); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ +#define vextq_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_u64_x3(__p0) __extension__ ({ \ - uint64x2x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 51); \ +#define vextq_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + __ret = (uint64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \ __ret; \ }) #else -#define vld1q_u64_x3(__p0) __extension__ ({ \ - uint64x2x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 51); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ +#define vextq_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (uint64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_u16_x3(__p0) __extension__ ({ \ - uint16x8x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 49); \ +#define vextq_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + __ret = (uint16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \ __ret; \ }) #else -#define vld1q_u16_x3(__p0) __extension__ ({ \ - uint16x8x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 49); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vextq_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_s8_x3(__p0) __extension__ ({ \ - int8x16x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 32); \ +#define vextq_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + __ret = (int8x16_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \ __ret; \ }) #else -#define vld1q_s8_x3(__p0) __extension__ ({ \ - int8x16x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 32); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vextq_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x16_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_f32_x3(__p0) __extension__ ({ \ - float32x4x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 41); \ +#define vextq_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4_t __ret; \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + __ret = (float32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 41); \ __ret; \ }) #else -#define vld1q_f32_x3(__p0) __extension__ ({ \ - float32x4x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 41); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ +#define vextq_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4_t __ret; \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (float32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 41); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_s32_x3(__p0) __extension__ ({ \ - int32x4x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 34); \ +#define vextq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + __ret = (int32x4_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ __ret; \ }) #else -#define vld1q_s32_x3(__p0) __extension__ ({ \ - int32x4x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 34); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ +#define vextq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_s64_x3(__p0) __extension__ ({ \ - int64x2x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 35); \ +#define vextq_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + __ret = (int64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \ __ret; \ }) #else -#define vld1q_s64_x3(__p0) __extension__ ({ \ - int64x2x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 35); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ +#define vextq_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_s16_x3(__p0) __extension__ ({ \ - int16x8x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 33); \ +#define vextq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + __ret = (int16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ __ret; \ }) #else -#define vld1q_s16_x3(__p0) __extension__ ({ \ - int16x8x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 33); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vextq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1_u8_x3(__p0) __extension__ ({ \ - uint8x8x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 16); \ +#define vext_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + __ret = (uint8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \ __ret; \ }) #else -#define vld1_u8_x3(__p0) __extension__ ({ \ - uint8x8x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 16); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vext_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1_u32_x3(__p0) __extension__ ({ \ - uint32x2x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 18); \ +#define vext_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + __ret = (uint32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \ __ret; \ }) #else -#define vld1_u32_x3(__p0) __extension__ ({ \ - uint32x2x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 18); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ +#define vext_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif -#define vld1_u64_x3(__p0) __extension__ ({ \ - uint64x1x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 19); \ +#define vext_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1_t __ret; \ + uint64x1_t __s0 = __p0; \ + uint64x1_t __s1 = __p1; \ + __ret = (uint64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ -#define vld1_u16_x3(__p0) __extension__ ({ \ - uint16x4x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 17); \ +#define vext_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + __ret = (uint16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \ __ret; \ }) #else -#define vld1_u16_x3(__p0) __extension__ ({ \ - uint16x4x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 17); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ +#define vext_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1_s8_x3(__p0) __extension__ ({ \ - int8x8x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 0); \ +#define vext_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + __ret = (int8x8_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \ __ret; \ }) #else -#define vld1_s8_x3(__p0) __extension__ ({ \ - int8x8x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 0); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vext_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x8_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1_f32_x3(__p0) __extension__ ({ \ - float32x2x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 9); \ +#define vext_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2_t __ret; \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + __ret = (float32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 9); \ __ret; \ }) #else -#define vld1_f32_x3(__p0) __extension__ ({ \ - float32x2x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 9); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ +#define vext_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2_t __ret; \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (float32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 9); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1_s32_x3(__p0) __extension__ ({ \ - int32x2x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 2); \ +#define vext_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + __ret = (int32x2_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ __ret; \ }) #else -#define vld1_s32_x3(__p0) __extension__ ({ \ - int32x2x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 2); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ +#define vext_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif -#define vld1_s64_x3(__p0) __extension__ ({ \ - int64x1x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 3); \ +#define vext_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1_t __ret; \ + int64x1_t __s0 = __p0; \ + int64x1_t __s1 = __p1; \ + __ret = (int64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ -#define vld1_s16_x3(__p0) __extension__ ({ \ - int16x4x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 1); \ - __ret; \ -}) -#else -#define vld1_s16_x3(__p0) __extension__ ({ \ - int16x4x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 1); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ +#define vext_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + __ret = (int16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ + __ret; \ +}) +#else +#define vext_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1_p8_x4(__p0) __extension__ ({ \ - poly8x8x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 4); \ +#define vextq_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + __ret = (float16x8_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 40); \ __ret; \ }) #else -#define vld1_p8_x4(__p0) __extension__ ({ \ - poly8x8x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 4); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vextq_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16x8_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 40); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1_p16_x4(__p0) __extension__ ({ \ - poly16x4x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 5); \ +#define vext_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + __ret = (float16x4_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 8); \ __ret; \ }) #else -#define vld1_p16_x4(__p0) __extension__ ({ \ - poly16x4x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 5); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ +#define vext_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (float16x4_t) __builtin_neon_vext_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 8); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_p8_x4(__p0) __extension__ ({ \ - poly8x16x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 36); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) poly8x8_t vget_high_p8(poly8x16_t __p0) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} #else -#define vld1q_p8_x4(__p0) __extension__ ({ \ - poly8x16x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 36); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) poly8x8_t vget_high_p8(poly8x16_t __p0) { + poly8x8_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x8_t __noswap_vget_high_p8(poly8x16_t __p0) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_p16_x4(__p0) __extension__ ({ \ - poly16x8x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 37); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) poly16x4_t vget_high_p16(poly16x8_t __p0) { + poly16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); + return __ret; +} #else -#define vld1q_p16_x4(__p0) __extension__ ({ \ - poly16x8x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 37); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) poly16x4_t vget_high_p16(poly16x8_t __p0) { + poly16x4_t __ret; + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_u8_x4(__p0) __extension__ ({ \ - uint8x16x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 48); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vget_high_u8(uint8x16_t __p0) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} #else -#define vld1q_u8_x4(__p0) __extension__ ({ \ - uint8x16x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 48); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vget_high_u8(uint8x16_t __p0) { + uint8x8_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x8_t __noswap_vget_high_u8(uint8x16_t __p0) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_u32_x4(__p0) __extension__ ({ \ - uint32x4x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 50); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vget_high_u32(uint32x4_t __p0) { + uint32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 2, 3); + return __ret; +} #else -#define vld1q_u32_x4(__p0) __extension__ ({ \ - uint32x4x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 50); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vget_high_u32(uint32x4_t __p0) { + uint32x2_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x2_t __noswap_vget_high_u32(uint32x4_t __p0) { + uint32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 2, 3); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_u64_x4(__p0) __extension__ ({ \ - uint64x2x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 51); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint64x1_t vget_high_u64(uint64x2_t __p0) { + uint64x1_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1); + return __ret; +} #else -#define vld1q_u64_x4(__p0) __extension__ ({ \ - uint64x2x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 51); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint64x1_t vget_high_u64(uint64x2_t __p0) { + uint64x1_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_u16_x4(__p0) __extension__ ({ \ - uint16x8x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 49); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x4_t vget_high_u16(uint16x8_t __p0) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); + return __ret; +} #else -#define vld1q_u16_x4(__p0) __extension__ ({ \ - uint16x8x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 49); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x4_t vget_high_u16(uint16x8_t __p0) { + uint16x4_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x4_t __noswap_vget_high_u16(uint16x8_t __p0) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_s8_x4(__p0) __extension__ ({ \ - int8x16x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 32); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x8_t vget_high_s8(int8x16_t __p0) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} #else -#define vld1q_s8_x4(__p0) __extension__ ({ \ - int8x16x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 32); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x8_t vget_high_s8(int8x16_t __p0) { + int8x8_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 8, 9, 10, 11, 12, 13, 14, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t __noswap_vget_high_s8(int8x16_t __p0) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 8, 9, 10, 11, 12, 13, 14, 15); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_f32_x4(__p0) __extension__ ({ \ - float32x4x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 41); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float32x2_t vget_high_f32(float32x4_t __p0) { + float32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 2, 3); + return __ret; +} #else -#define vld1q_f32_x4(__p0) __extension__ ({ \ - float32x4x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 41); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float32x2_t vget_high_f32(float32x4_t __p0) { + float32x2_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x2_t __noswap_vget_high_f32(float32x4_t __p0) { + float32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 2, 3); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_s32_x4(__p0) __extension__ ({ \ - int32x4x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 34); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float16x4_t vget_high_f16(float16x8_t __p0) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); + return __ret; +} #else -#define vld1q_s32_x4(__p0) __extension__ ({ \ - int32x4x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 34); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float16x4_t vget_high_f16(float16x8_t __p0) { + float16x4_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x4_t __noswap_vget_high_f16(float16x8_t __p0) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_s64_x4(__p0) __extension__ ({ \ - int64x2x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 35); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x2_t vget_high_s32(int32x4_t __p0) { + int32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 2, 3); + return __ret; +} #else -#define vld1q_s64_x4(__p0) __extension__ ({ \ - int64x2x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 35); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x2_t vget_high_s32(int32x4_t __p0) { + int32x2_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t __noswap_vget_high_s32(int32x4_t __p0) { + int32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 2, 3); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_s16_x4(__p0) __extension__ ({ \ - int16x8x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 33); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int64x1_t vget_high_s64(int64x2_t __p0) { + int64x1_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1); + return __ret; +} #else -#define vld1q_s16_x4(__p0) __extension__ ({ \ - int16x8x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 33); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int64x1_t vget_high_s64(int64x2_t __p0) { + int64x1_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1_u8_x4(__p0) __extension__ ({ \ - uint8x8x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 16); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x4_t vget_high_s16(int16x8_t __p0) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); + return __ret; +} #else -#define vld1_u8_x4(__p0) __extension__ ({ \ - uint8x8x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 16); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x4_t vget_high_s16(int16x8_t __p0) { + int16x4_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t __noswap_vget_high_s16(int16x8_t __p0) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1_u32_x4(__p0) __extension__ ({ \ - uint32x2x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 18); \ +#define vget_lane_p8(__p0, __p1) __extension__ ({ \ + poly8_t __ret; \ + poly8x8_t __s0 = __p0; \ + __ret = (poly8_t) __builtin_neon_vget_lane_i8((poly8x8_t)__s0, __p1); \ __ret; \ }) #else -#define vld1_u32_x4(__p0) __extension__ ({ \ - uint32x2x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 18); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ +#define vget_lane_p8(__p0, __p1) __extension__ ({ \ + poly8_t __ret; \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8_t) __builtin_neon_vget_lane_i8((poly8x8_t)__rev0, __p1); \ __ret; \ }) -#endif - -#define vld1_u64_x4(__p0) __extension__ ({ \ - uint64x1x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 19); \ +#define __noswap_vget_lane_p8(__p0, __p1) __extension__ ({ \ + poly8_t __ret; \ + poly8x8_t __s0 = __p0; \ + __ret = (poly8_t) __builtin_neon_vget_lane_i8((poly8x8_t)__s0, __p1); \ __ret; \ }) +#endif + #ifdef __LITTLE_ENDIAN__ -#define vld1_u16_x4(__p0) __extension__ ({ \ - uint16x4x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 17); \ +#define vget_lane_p16(__p0, __p1) __extension__ ({ \ + poly16_t __ret; \ + poly16x4_t __s0 = __p0; \ + __ret = (poly16_t) __builtin_neon_vget_lane_i16((poly16x4_t)__s0, __p1); \ __ret; \ }) #else -#define vld1_u16_x4(__p0) __extension__ ({ \ - uint16x4x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 17); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ +#define vget_lane_p16(__p0, __p1) __extension__ ({ \ + poly16_t __ret; \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (poly16_t) __builtin_neon_vget_lane_i16((poly16x4_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_p16(__p0, __p1) __extension__ ({ \ + poly16_t __ret; \ + poly16x4_t __s0 = __p0; \ + __ret = (poly16_t) __builtin_neon_vget_lane_i16((poly16x4_t)__s0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1_s8_x4(__p0) __extension__ ({ \ - int8x8x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 0); \ +#define vgetq_lane_p8(__p0, __p1) __extension__ ({ \ + poly8_t __ret; \ + poly8x16_t __s0 = __p0; \ + __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((poly8x16_t)__s0, __p1); \ __ret; \ }) #else -#define vld1_s8_x4(__p0) __extension__ ({ \ - int8x8x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 0); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vgetq_lane_p8(__p0, __p1) __extension__ ({ \ + poly8_t __ret; \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((poly8x16_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_p8(__p0, __p1) __extension__ ({ \ + poly8_t __ret; \ + poly8x16_t __s0 = __p0; \ + __ret = (poly8_t) __builtin_neon_vgetq_lane_i8((poly8x16_t)__s0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1_f32_x4(__p0) __extension__ ({ \ - float32x2x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 9); \ +#define vgetq_lane_p16(__p0, __p1) __extension__ ({ \ + poly16_t __ret; \ + poly16x8_t __s0 = __p0; \ + __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((poly16x8_t)__s0, __p1); \ __ret; \ }) #else -#define vld1_f32_x4(__p0) __extension__ ({ \ - float32x2x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 9); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ +#define vgetq_lane_p16(__p0, __p1) __extension__ ({ \ + poly16_t __ret; \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((poly16x8_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_p16(__p0, __p1) __extension__ ({ \ + poly16_t __ret; \ + poly16x8_t __s0 = __p0; \ + __ret = (poly16_t) __builtin_neon_vgetq_lane_i16((poly16x8_t)__s0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1_s32_x4(__p0) __extension__ ({ \ - int32x2x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 2); \ +#define vgetq_lane_u8(__p0, __p1) __extension__ ({ \ + uint8_t __ret; \ + uint8x16_t __s0 = __p0; \ + __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \ __ret; \ }) #else -#define vld1_s32_x4(__p0) __extension__ ({ \ - int32x2x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 2); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ - __ret; \ -}) -#endif - -#define vld1_s64_x4(__p0) __extension__ ({ \ - int64x1x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 3); \ - __ret; \ -}) -#ifdef __LITTLE_ENDIAN__ -#define vld1_s16_x4(__p0) __extension__ ({ \ - int16x4x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 1); \ +#define vgetq_lane_u8(__p0, __p1) __extension__ ({ \ + uint8_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \ __ret; \ }) -#else -#define vld1_s16_x4(__p0) __extension__ ({ \ - int16x4x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 1); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ +#define __noswap_vgetq_lane_u8(__p0, __p1) __extension__ ({ \ + uint8_t __ret; \ + uint8x16_t __s0 = __p0; \ + __ret = (uint8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld2_p8(__p0) __extension__ ({ \ - poly8x8x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 4); \ +#define vgetq_lane_u32(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \ __ret; \ }) #else -#define vld2_p8(__p0) __extension__ ({ \ - poly8x8x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 4); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2_p16(__p0) __extension__ ({ \ - poly16x4x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 5); \ +#define vgetq_lane_u32(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__rev0, __p1); \ __ret; \ }) -#else -#define vld2_p16(__p0) __extension__ ({ \ - poly16x4x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 5); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ +#define __noswap_vgetq_lane_u32(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld2q_p8(__p0) __extension__ ({ \ - poly8x16x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 36); \ +#define vgetq_lane_u64(__p0, __p1) __extension__ ({ \ + uint64_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \ __ret; \ }) #else -#define vld2q_p8(__p0) __extension__ ({ \ - poly8x16x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 36); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_p16(__p0) __extension__ ({ \ - poly16x8x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 37); \ +#define vgetq_lane_u64(__p0, __p1) __extension__ ({ \ + uint64_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__rev0, __p1); \ __ret; \ }) -#else -#define vld2q_p16(__p0) __extension__ ({ \ - poly16x8x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 37); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ +#define __noswap_vgetq_lane_u64(__p0, __p1) __extension__ ({ \ + uint64_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld2q_u8(__p0) __extension__ ({ \ - uint8x16x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 48); \ +#define vgetq_lane_u16(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \ __ret; \ }) #else -#define vld2q_u8(__p0) __extension__ ({ \ - uint8x16x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 48); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vld2q_u32(__p0) __extension__ ({ \ - uint32x4x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 50); \ +#define vgetq_lane_u16(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__rev0, __p1); \ __ret; \ }) -#else -#define vld2q_u32(__p0) __extension__ ({ \ - uint32x4x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 50); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ +#define __noswap_vgetq_lane_u16(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld2q_u16(__p0) __extension__ ({ \ - uint16x8x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 49); \ +#define vgetq_lane_s8(__p0, __p1) __extension__ ({ \ + int8_t __ret; \ + int8x16_t __s0 = __p0; \ + __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \ __ret; \ }) #else -#define vld2q_u16(__p0) __extension__ ({ \ - uint16x8x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 49); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vgetq_lane_s8(__p0, __p1) __extension__ ({ \ + int8_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_s8(__p0, __p1) __extension__ ({ \ + int8_t __ret; \ + int8x16_t __s0 = __p0; \ + __ret = (int8_t) __builtin_neon_vgetq_lane_i8((int8x16_t)__s0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld2q_s8(__p0) __extension__ ({ \ - int8x16x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 32); \ +#define vgetq_lane_f32(__p0, __p1) __extension__ ({ \ + float32_t __ret; \ + float32x4_t __s0 = __p0; \ + __ret = (float32_t) __builtin_neon_vgetq_lane_f32((float32x4_t)__s0, __p1); \ __ret; \ }) #else -#define vld2q_s8(__p0) __extension__ ({ \ - int8x16x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 32); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vgetq_lane_f32(__p0, __p1) __extension__ ({ \ + float32_t __ret; \ + float32x4_t __s0 = __p0; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float32_t) __builtin_neon_vgetq_lane_f32((float32x4_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_f32(__p0, __p1) __extension__ ({ \ + float32_t __ret; \ + float32x4_t __s0 = __p0; \ + __ret = (float32_t) __builtin_neon_vgetq_lane_f32((float32x4_t)__s0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld2q_f32(__p0) __extension__ ({ \ - float32x4x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 41); \ +#define vgetq_lane_s32(__p0, __p1) __extension__ ({ \ + int32_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \ __ret; \ }) #else -#define vld2q_f32(__p0) __extension__ ({ \ - float32x4x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 41); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ +#define vgetq_lane_s32(__p0, __p1) __extension__ ({ \ + int32_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_s32(__p0, __p1) __extension__ ({ \ + int32_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int32_t) __builtin_neon_vgetq_lane_i32((int32x4_t)__s0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld2q_s32(__p0) __extension__ ({ \ - int32x4x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 34); \ +#define vgetq_lane_s64(__p0, __p1) __extension__ ({ \ + int64_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \ __ret; \ }) #else -#define vld2q_s32(__p0) __extension__ ({ \ - int32x4x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 34); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ +#define vgetq_lane_s64(__p0, __p1) __extension__ ({ \ + int64_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_s64(__p0, __p1) __extension__ ({ \ + int64_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int64_t) __builtin_neon_vgetq_lane_i64((int64x2_t)__s0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld2q_s16(__p0) __extension__ ({ \ - int16x8x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 33); \ +#define vgetq_lane_s16(__p0, __p1) __extension__ ({ \ + int16_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \ __ret; \ }) #else -#define vld2q_s16(__p0) __extension__ ({ \ - int16x8x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 33); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vgetq_lane_s16(__p0, __p1) __extension__ ({ \ + int16_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_s16(__p0, __p1) __extension__ ({ \ + int16_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int16_t) __builtin_neon_vgetq_lane_i16((int16x8_t)__s0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld2_u8(__p0) __extension__ ({ \ - uint8x8x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 16); \ +#define vget_lane_u8(__p0, __p1) __extension__ ({ \ + uint8_t __ret; \ + uint8x8_t __s0 = __p0; \ + __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \ __ret; \ }) #else -#define vld2_u8(__p0) __extension__ ({ \ - uint8x8x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 16); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vget_lane_u8(__p0, __p1) __extension__ ({ \ + uint8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_u8(__p0, __p1) __extension__ ({ \ + uint8_t __ret; \ + uint8x8_t __s0 = __p0; \ + __ret = (uint8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld2_u32(__p0) __extension__ ({ \ - uint32x2x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 18); \ +#define vget_lane_u32(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ + uint32x2_t __s0 = __p0; \ + __ret = (uint32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \ __ret; \ }) #else -#define vld2_u32(__p0) __extension__ ({ \ - uint32x2x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 18); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ +#define vget_lane_u32(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint32_t) __builtin_neon_vget_lane_i32((int32x2_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_u32(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ + uint32x2_t __s0 = __p0; \ + __ret = (uint32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \ __ret; \ }) #endif -#define vld2_u64(__p0) __extension__ ({ \ - uint64x1x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 19); \ +#define vget_lane_u64(__p0, __p1) __extension__ ({ \ + uint64_t __ret; \ + uint64x1_t __s0 = __p0; \ + __ret = (uint64_t) __builtin_neon_vget_lane_i64((int64x1_t)__s0, __p1); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ -#define vld2_u16(__p0) __extension__ ({ \ - uint16x4x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 17); \ +#define vget_lane_u16(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + uint16x4_t __s0 = __p0; \ + __ret = (uint16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \ __ret; \ }) #else -#define vld2_u16(__p0) __extension__ ({ \ - uint16x4x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 17); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ +#define vget_lane_u16(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint16_t) __builtin_neon_vget_lane_i16((int16x4_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_u16(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + uint16x4_t __s0 = __p0; \ + __ret = (uint16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld2_s8(__p0) __extension__ ({ \ - int8x8x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 0); \ +#define vget_lane_s8(__p0, __p1) __extension__ ({ \ + int8_t __ret; \ + int8x8_t __s0 = __p0; \ + __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \ __ret; \ }) #else -#define vld2_s8(__p0) __extension__ ({ \ - int8x8x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 0); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vget_lane_s8(__p0, __p1) __extension__ ({ \ + int8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_s8(__p0, __p1) __extension__ ({ \ + int8_t __ret; \ + int8x8_t __s0 = __p0; \ + __ret = (int8_t) __builtin_neon_vget_lane_i8((int8x8_t)__s0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld2_f32(__p0) __extension__ ({ \ - float32x2x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 9); \ +#define vget_lane_f32(__p0, __p1) __extension__ ({ \ + float32_t __ret; \ + float32x2_t __s0 = __p0; \ + __ret = (float32_t) __builtin_neon_vget_lane_f32((float32x2_t)__s0, __p1); \ __ret; \ }) #else -#define vld2_f32(__p0) __extension__ ({ \ - float32x2x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 9); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ +#define vget_lane_f32(__p0, __p1) __extension__ ({ \ + float32_t __ret; \ + float32x2_t __s0 = __p0; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (float32_t) __builtin_neon_vget_lane_f32((float32x2_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_f32(__p0, __p1) __extension__ ({ \ + float32_t __ret; \ + float32x2_t __s0 = __p0; \ + __ret = (float32_t) __builtin_neon_vget_lane_f32((float32x2_t)__s0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld2_s32(__p0) __extension__ ({ \ - int32x2x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 2); \ +#define vget_lane_s32(__p0, __p1) __extension__ ({ \ + int32_t __ret; \ + int32x2_t __s0 = __p0; \ + __ret = (int32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \ __ret; \ }) #else -#define vld2_s32(__p0) __extension__ ({ \ - int32x2x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 2); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ +#define vget_lane_s32(__p0, __p1) __extension__ ({ \ + int32_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int32_t) __builtin_neon_vget_lane_i32((int32x2_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_s32(__p0, __p1) __extension__ ({ \ + int32_t __ret; \ + int32x2_t __s0 = __p0; \ + __ret = (int32_t) __builtin_neon_vget_lane_i32((int32x2_t)__s0, __p1); \ __ret; \ }) #endif -#define vld2_s64(__p0) __extension__ ({ \ - int64x1x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 3); \ +#define vget_lane_s64(__p0, __p1) __extension__ ({ \ + int64_t __ret; \ + int64x1_t __s0 = __p0; \ + __ret = (int64_t) __builtin_neon_vget_lane_i64((int64x1_t)__s0, __p1); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ -#define vld2_s16(__p0) __extension__ ({ \ - int16x4x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 1); \ +#define vget_lane_s16(__p0, __p1) __extension__ ({ \ + int16_t __ret; \ + int16x4_t __s0 = __p0; \ + __ret = (int16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \ __ret; \ }) #else -#define vld2_s16(__p0) __extension__ ({ \ - int16x4x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 1); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ +#define vget_lane_s16(__p0, __p1) __extension__ ({ \ + int16_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int16_t) __builtin_neon_vget_lane_i16((int16x4_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vget_lane_s16(__p0, __p1) __extension__ ({ \ + int16_t __ret; \ + int16x4_t __s0 = __p0; \ + __ret = (int16_t) __builtin_neon_vget_lane_i16((int16x4_t)__s0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld2_dup_p8(__p0) __extension__ ({ \ - poly8x8x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 4); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) poly8x8_t vget_low_p8(poly8x16_t __p0) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} #else -#define vld2_dup_p8(__p0) __extension__ ({ \ - poly8x8x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 4); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) poly8x8_t vget_low_p8(poly8x16_t __p0) { + poly8x8_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld2_dup_p16(__p0) __extension__ ({ \ - poly16x4x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 5); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) poly16x4_t vget_low_p16(poly16x8_t __p0) { + poly16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); + return __ret; +} #else -#define vld2_dup_p16(__p0) __extension__ ({ \ - poly16x4x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 5); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) poly16x4_t vget_low_p16(poly16x8_t __p0) { + poly16x4_t __ret; + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld2q_dup_p8(__p0) __extension__ ({ \ - poly8x16x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 36); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vget_low_u8(uint8x16_t __p0) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} #else -#define vld2q_dup_p8(__p0) __extension__ ({ \ - poly8x16x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 36); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vget_low_u8(uint8x16_t __p0) { + uint8x8_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld2q_dup_p16(__p0) __extension__ ({ \ - poly16x8x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 37); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vget_low_u32(uint32x4_t __p0) { + uint32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1); + return __ret; +} #else -#define vld2q_dup_p16(__p0) __extension__ ({ \ - poly16x8x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 37); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vget_low_u32(uint32x4_t __p0) { + uint32x2_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld2q_dup_u8(__p0) __extension__ ({ \ - uint8x16x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 48); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint64x1_t vget_low_u64(uint64x2_t __p0) { + uint64x1_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0); + return __ret; +} #else -#define vld2q_dup_u8(__p0) __extension__ ({ \ - uint8x16x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 48); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint64x1_t vget_low_u64(uint64x2_t __p0) { + uint64x1_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld2q_dup_u32(__p0) __extension__ ({ \ - uint32x4x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 50); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x4_t vget_low_u16(uint16x8_t __p0) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); + return __ret; +} #else -#define vld2q_dup_u32(__p0) __extension__ ({ \ - uint32x4x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 50); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x4_t vget_low_u16(uint16x8_t __p0) { + uint16x4_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld2q_dup_u64(__p0) __extension__ ({ \ - uint64x2x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 51); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x8_t vget_low_s8(int8x16_t __p0) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3, 4, 5, 6, 7); + return __ret; +} #else -#define vld2q_dup_u64(__p0) __extension__ ({ \ - uint64x2x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 51); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x8_t vget_low_s8(int8x16_t __p0) { + int8x8_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3, 4, 5, 6, 7); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld2q_dup_u16(__p0) __extension__ ({ \ - uint16x8x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 49); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float32x2_t vget_low_f32(float32x4_t __p0) { + float32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1); + return __ret; +} #else -#define vld2q_dup_u16(__p0) __extension__ ({ \ - uint16x8x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 49); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float32x2_t vget_low_f32(float32x4_t __p0) { + float32x2_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld2q_dup_s8(__p0) __extension__ ({ \ - int8x16x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 32); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float16x4_t vget_low_f16(float16x8_t __p0) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); + return __ret; +} #else -#define vld2q_dup_s8(__p0) __extension__ ({ \ - int8x16x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 32); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float16x4_t vget_low_f16(float16x8_t __p0) { + float16x4_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld2q_dup_f32(__p0) __extension__ ({ \ - float32x4x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 41); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x2_t vget_low_s32(int32x4_t __p0) { + int32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1); + return __ret; +} #else -#define vld2q_dup_f32(__p0) __extension__ ({ \ - float32x4x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 41); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x2_t vget_low_s32(int32x4_t __p0) { + int32x2_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld2q_dup_s32(__p0) __extension__ ({ \ - int32x4x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 34); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int64x1_t vget_low_s64(int64x2_t __p0) { + int64x1_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0); + return __ret; +} #else -#define vld2q_dup_s32(__p0) __extension__ ({ \ - int32x4x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 34); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int64x1_t vget_low_s64(int64x2_t __p0) { + int64x1_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld2q_dup_s64(__p0) __extension__ ({ \ - int64x2x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 35); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x4_t vget_low_s16(int16x8_t __p0) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); + return __ret; +} #else -#define vld2q_dup_s64(__p0) __extension__ ({ \ - int64x2x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 35); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x4_t vget_low_s16(int16x8_t __p0) { + int16x4_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld2q_dup_s16(__p0) __extension__ ({ \ - int16x8x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 33); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x16_t vhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} #else -#define vld2q_dup_s16(__p0) __extension__ ({ \ - int16x8x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 33); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x16_t vhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld2_dup_u8(__p0) __extension__ ({ \ - uint8x8x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 16); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} #else -#define vld2_dup_u8(__p0) __extension__ ({ \ - uint8x8x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 16); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld2_dup_u32(__p0) __extension__ ({ \ - uint32x2x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 18); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x8_t vhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} #else -#define vld2_dup_u32(__p0) __extension__ ({ \ - uint32x2x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 18); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x8_t vhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif -#define vld2_dup_u64(__p0) __extension__ ({ \ - uint64x1x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 19); \ - __ret; \ -}) #ifdef __LITTLE_ENDIAN__ -#define vld2_dup_u16(__p0) __extension__ ({ \ - uint16x4x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 17); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x16_t vhaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} #else -#define vld2_dup_u16(__p0) __extension__ ({ \ - uint16x4x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 17); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x16_t vhaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld2_dup_s8(__p0) __extension__ ({ \ - int8x8x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x4_t vhaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} #else -#define vld2_dup_s8(__p0) __extension__ ({ \ - int8x8x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 0); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x4_t vhaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld2_dup_f32(__p0) __extension__ ({ \ - float32x2x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 9); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x8_t vhaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} #else -#define vld2_dup_f32(__p0) __extension__ ({ \ - float32x2x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 9); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x8_t vhaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld2_dup_s32(__p0) __extension__ ({ \ - int32x2x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 2); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vhadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} #else -#define vld2_dup_s32(__p0) __extension__ ({ \ - int32x2x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 2); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vhadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif -#define vld2_dup_s64(__p0) __extension__ ({ \ - int64x1x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 3); \ - __ret; \ -}) #ifdef __LITTLE_ENDIAN__ -#define vld2_dup_s16(__p0) __extension__ ({ \ - int16x4x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vhadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} #else -#define vld2_dup_s16(__p0) __extension__ ({ \ - int16x4x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 1); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vhadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld2_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x8x2_t __ret; \ - poly8x8x2_t __s1 = __p1; \ - __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 4); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x4_t vhadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} #else -#define vld2_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x8x2_t __ret; \ - poly8x8x2_t __s1 = __p1; \ - poly8x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 4); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x4_t vhadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld2_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x4x2_t __ret; \ - poly16x4x2_t __s1 = __p1; \ - __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 5); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x8_t vhadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} #else -#define vld2_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x4x2_t __ret; \ - poly16x4x2_t __s1 = __p1; \ - poly16x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 5); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x8_t vhadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x8x2_t __ret; \ - poly16x8x2_t __s1 = __p1; \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 37); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x2_t vhadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} #else -#define vld2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x8x2_t __ret; \ - poly16x8x2_t __s1 = __p1; \ - poly16x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 37); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x2_t vhadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4x2_t __ret; \ - uint32x4x2_t __s1 = __p1; \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 50); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x4_t vhadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} #else -#define vld2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4x2_t __ret; \ - uint32x4x2_t __s1 = __p1; \ - uint32x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 50); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x4_t vhadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8x2_t __ret; \ - uint16x8x2_t __s1 = __p1; \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 49); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x16_t vhsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} #else -#define vld2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8x2_t __ret; \ - uint16x8x2_t __s1 = __p1; \ - uint16x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 49); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x16_t vhsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x4x2_t __ret; \ - float32x4x2_t __s1 = __p1; \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 41); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vhsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} #else -#define vld2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x4x2_t __ret; \ - float32x4x2_t __s1 = __p1; \ - float32x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 41); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vhsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4x2_t __ret; \ - int32x4x2_t __s1 = __p1; \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 34); \ +__ai __attribute__((target("neon"))) uint16x8_t vhsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vhsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vhsubq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vhsubq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vhsubq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vhsubq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vhsubq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vhsubq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vhsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vhsub_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vhsub_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vhsub_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vhsub_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vhsub_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vhsub_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vhsub_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vhsub_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vhsub_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vhsub_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vhsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vhsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1_p8(__p0) __extension__ ({ \ + poly8x8_t __ret; \ + __ret = (poly8x8_t) __builtin_neon_vld1_v(__p0, 4); \ __ret; \ }) #else -#define vld2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4x2_t __ret; \ - int32x4x2_t __s1 = __p1; \ - int32x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 34); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ +#define vld1_p8(__p0) __extension__ ({ \ + poly8x8_t __ret; \ + __ret = (poly8x8_t) __builtin_neon_vld1_v(__p0, 4); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8x2_t __ret; \ - int16x8x2_t __s1 = __p1; \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 33); \ +#define vld1_p16(__p0) __extension__ ({ \ + poly16x4_t __ret; \ + __ret = (poly16x4_t) __builtin_neon_vld1_v(__p0, 5); \ __ret; \ }) #else -#define vld2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8x2_t __ret; \ - int16x8x2_t __s1 = __p1; \ - int16x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 33); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vld1_p16(__p0) __extension__ ({ \ + poly16x4_t __ret; \ + __ret = (poly16x4_t) __builtin_neon_vld1_v(__p0, 5); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld2_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x8x2_t __ret; \ - uint8x8x2_t __s1 = __p1; \ - __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 16); \ +#define vld1q_p8(__p0) __extension__ ({ \ + poly8x16_t __ret; \ + __ret = (poly8x16_t) __builtin_neon_vld1q_v(__p0, 36); \ __ret; \ }) #else -#define vld2_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x8x2_t __ret; \ - uint8x8x2_t __s1 = __p1; \ - uint8x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 16); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vld1q_p8(__p0) __extension__ ({ \ + poly8x16_t __ret; \ + __ret = (poly8x16_t) __builtin_neon_vld1q_v(__p0, 36); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld2_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2x2_t __ret; \ - uint32x2x2_t __s1 = __p1; \ - __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 18); \ +#define vld1q_p16(__p0) __extension__ ({ \ + poly16x8_t __ret; \ + __ret = (poly16x8_t) __builtin_neon_vld1q_v(__p0, 37); \ __ret; \ }) #else -#define vld2_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2x2_t __ret; \ - uint32x2x2_t __s1 = __p1; \ - uint32x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 18); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ +#define vld1q_p16(__p0) __extension__ ({ \ + poly16x8_t __ret; \ + __ret = (poly16x8_t) __builtin_neon_vld1q_v(__p0, 37); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld2_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4x2_t __ret; \ - uint16x4x2_t __s1 = __p1; \ - __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 17); \ +#define vld1q_u8(__p0) __extension__ ({ \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vld1q_v(__p0, 48); \ __ret; \ }) #else -#define vld2_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4x2_t __ret; \ - uint16x4x2_t __s1 = __p1; \ - uint16x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 17); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ +#define vld1q_u8(__p0) __extension__ ({ \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vld1q_v(__p0, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld2_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x8x2_t __ret; \ - int8x8x2_t __s1 = __p1; \ - __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 0); \ +#define vld1q_u32(__p0) __extension__ ({ \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vld1q_v(__p0, 50); \ __ret; \ }) #else -#define vld2_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x8x2_t __ret; \ - int8x8x2_t __s1 = __p1; \ - int8x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 0); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vld1q_u32(__p0) __extension__ ({ \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vld1q_v(__p0, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld2_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x2x2_t __ret; \ - float32x2x2_t __s1 = __p1; \ - __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 9); \ +#define vld1q_u64(__p0) __extension__ ({ \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vld1q_v(__p0, 51); \ __ret; \ }) #else -#define vld2_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x2x2_t __ret; \ - float32x2x2_t __s1 = __p1; \ - float32x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 9); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ +#define vld1q_u64(__p0) __extension__ ({ \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vld1q_v(__p0, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld2_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2x2_t __ret; \ - int32x2x2_t __s1 = __p1; \ - __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 2); \ +#define vld1q_u16(__p0) __extension__ ({ \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vld1q_v(__p0, 49); \ __ret; \ }) #else -#define vld2_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2x2_t __ret; \ - int32x2x2_t __s1 = __p1; \ - int32x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 2); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ +#define vld1q_u16(__p0) __extension__ ({ \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vld1q_v(__p0, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld2_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4x2_t __ret; \ - int16x4x2_t __s1 = __p1; \ - __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 1); \ +#define vld1q_s8(__p0) __extension__ ({ \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vld1q_v(__p0, 32); \ __ret; \ }) #else -#define vld2_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4x2_t __ret; \ - int16x4x2_t __s1 = __p1; \ - int16x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 1); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ +#define vld1q_s8(__p0) __extension__ ({ \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vld1q_v(__p0, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3_p8(__p0) __extension__ ({ \ - poly8x8x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 4); \ +#define vld1q_f32(__p0) __extension__ ({ \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vld1q_v(__p0, 41); \ __ret; \ }) #else -#define vld3_p8(__p0) __extension__ ({ \ - poly8x8x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 4); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vld1q_f32(__p0) __extension__ ({ \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vld1q_v(__p0, 41); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3_p16(__p0) __extension__ ({ \ - poly16x4x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 5); \ +#define vld1q_s32(__p0) __extension__ ({ \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vld1q_v(__p0, 34); \ __ret; \ }) #else -#define vld3_p16(__p0) __extension__ ({ \ - poly16x4x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 5); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ +#define vld1q_s32(__p0) __extension__ ({ \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vld1q_v(__p0, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3q_p8(__p0) __extension__ ({ \ - poly8x16x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 36); \ +#define vld1q_s64(__p0) __extension__ ({ \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vld1q_v(__p0, 35); \ __ret; \ }) #else -#define vld3q_p8(__p0) __extension__ ({ \ - poly8x16x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 36); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vld1q_s64(__p0) __extension__ ({ \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vld1q_v(__p0, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3q_p16(__p0) __extension__ ({ \ - poly16x8x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 37); \ +#define vld1q_s16(__p0) __extension__ ({ \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vld1q_v(__p0, 33); \ __ret; \ }) #else -#define vld3q_p16(__p0) __extension__ ({ \ - poly16x8x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 37); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vld1q_s16(__p0) __extension__ ({ \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vld1q_v(__p0, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3q_u8(__p0) __extension__ ({ \ - uint8x16x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 48); \ +#define vld1_u8(__p0) __extension__ ({ \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vld1_v(__p0, 16); \ __ret; \ }) #else -#define vld3q_u8(__p0) __extension__ ({ \ - uint8x16x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 48); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vld1_u8(__p0) __extension__ ({ \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vld1_v(__p0, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3q_u32(__p0) __extension__ ({ \ - uint32x4x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 50); \ +#define vld1_u32(__p0) __extension__ ({ \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vld1_v(__p0, 18); \ __ret; \ }) #else -#define vld3q_u32(__p0) __extension__ ({ \ - uint32x4x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 50); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ +#define vld1_u32(__p0) __extension__ ({ \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vld1_v(__p0, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif +#define vld1_u64(__p0) __extension__ ({ \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vld1_v(__p0, 19); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -#define vld3q_u16(__p0) __extension__ ({ \ - uint16x8x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 49); \ +#define vld1_u16(__p0) __extension__ ({ \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vld1_v(__p0, 17); \ __ret; \ }) #else -#define vld3q_u16(__p0) __extension__ ({ \ - uint16x8x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 49); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vld1_u16(__p0) __extension__ ({ \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vld1_v(__p0, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3q_s8(__p0) __extension__ ({ \ - int8x16x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 32); \ +#define vld1_s8(__p0) __extension__ ({ \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vld1_v(__p0, 0); \ __ret; \ }) #else -#define vld3q_s8(__p0) __extension__ ({ \ - int8x16x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 32); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vld1_s8(__p0) __extension__ ({ \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vld1_v(__p0, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3q_f32(__p0) __extension__ ({ \ - float32x4x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 41); \ +#define vld1_f32(__p0) __extension__ ({ \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vld1_v(__p0, 9); \ __ret; \ }) #else -#define vld3q_f32(__p0) __extension__ ({ \ - float32x4x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 41); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ +#define vld1_f32(__p0) __extension__ ({ \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vld1_v(__p0, 9); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3q_s32(__p0) __extension__ ({ \ - int32x4x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 34); \ +#define vld1_s32(__p0) __extension__ ({ \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vld1_v(__p0, 2); \ __ret; \ }) #else -#define vld3q_s32(__p0) __extension__ ({ \ - int32x4x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 34); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ +#define vld1_s32(__p0) __extension__ ({ \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vld1_v(__p0, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif +#define vld1_s64(__p0) __extension__ ({ \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vld1_v(__p0, 3); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -#define vld3q_s16(__p0) __extension__ ({ \ - int16x8x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 33); \ +#define vld1_s16(__p0) __extension__ ({ \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vld1_v(__p0, 1); \ __ret; \ }) #else -#define vld3q_s16(__p0) __extension__ ({ \ - int16x8x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 33); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vld1_s16(__p0) __extension__ ({ \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vld1_v(__p0, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3_u8(__p0) __extension__ ({ \ - uint8x8x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 16); \ +#define vld1_dup_p8(__p0) __extension__ ({ \ + poly8x8_t __ret; \ + __ret = (poly8x8_t) __builtin_neon_vld1_dup_v(__p0, 4); \ __ret; \ }) #else -#define vld3_u8(__p0) __extension__ ({ \ - uint8x8x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 16); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vld1_dup_p8(__p0) __extension__ ({ \ + poly8x8_t __ret; \ + __ret = (poly8x8_t) __builtin_neon_vld1_dup_v(__p0, 4); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3_u32(__p0) __extension__ ({ \ - uint32x2x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 18); \ +#define vld1_dup_p16(__p0) __extension__ ({ \ + poly16x4_t __ret; \ + __ret = (poly16x4_t) __builtin_neon_vld1_dup_v(__p0, 5); \ __ret; \ }) #else -#define vld3_u32(__p0) __extension__ ({ \ - uint32x2x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 18); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ +#define vld1_dup_p16(__p0) __extension__ ({ \ + poly16x4_t __ret; \ + __ret = (poly16x4_t) __builtin_neon_vld1_dup_v(__p0, 5); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif -#define vld3_u64(__p0) __extension__ ({ \ - uint64x1x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 19); \ - __ret; \ -}) #ifdef __LITTLE_ENDIAN__ -#define vld3_u16(__p0) __extension__ ({ \ - uint16x4x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 17); \ +#define vld1q_dup_p8(__p0) __extension__ ({ \ + poly8x16_t __ret; \ + __ret = (poly8x16_t) __builtin_neon_vld1q_dup_v(__p0, 36); \ __ret; \ }) #else -#define vld3_u16(__p0) __extension__ ({ \ - uint16x4x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 17); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ +#define vld1q_dup_p8(__p0) __extension__ ({ \ + poly8x16_t __ret; \ + __ret = (poly8x16_t) __builtin_neon_vld1q_dup_v(__p0, 36); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3_s8(__p0) __extension__ ({ \ - int8x8x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 0); \ +#define vld1q_dup_p16(__p0) __extension__ ({ \ + poly16x8_t __ret; \ + __ret = (poly16x8_t) __builtin_neon_vld1q_dup_v(__p0, 37); \ __ret; \ }) #else -#define vld3_s8(__p0) __extension__ ({ \ - int8x8x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 0); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vld1q_dup_p16(__p0) __extension__ ({ \ + poly16x8_t __ret; \ + __ret = (poly16x8_t) __builtin_neon_vld1q_dup_v(__p0, 37); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3_f32(__p0) __extension__ ({ \ - float32x2x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 9); \ +#define vld1q_dup_u8(__p0) __extension__ ({ \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vld1q_dup_v(__p0, 48); \ __ret; \ }) #else -#define vld3_f32(__p0) __extension__ ({ \ - float32x2x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 9); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ +#define vld1q_dup_u8(__p0) __extension__ ({ \ + uint8x16_t __ret; \ + __ret = (uint8x16_t) __builtin_neon_vld1q_dup_v(__p0, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3_s32(__p0) __extension__ ({ \ - int32x2x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 2); \ +#define vld1q_dup_u32(__p0) __extension__ ({ \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vld1q_dup_v(__p0, 50); \ __ret; \ }) #else -#define vld3_s32(__p0) __extension__ ({ \ - int32x2x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 2); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ +#define vld1q_dup_u32(__p0) __extension__ ({ \ + uint32x4_t __ret; \ + __ret = (uint32x4_t) __builtin_neon_vld1q_dup_v(__p0, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif -#define vld3_s64(__p0) __extension__ ({ \ - int64x1x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 3); \ - __ret; \ -}) #ifdef __LITTLE_ENDIAN__ -#define vld3_s16(__p0) __extension__ ({ \ - int16x4x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 1); \ +#define vld1q_dup_u64(__p0) __extension__ ({ \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vld1q_dup_v(__p0, 51); \ __ret; \ }) #else -#define vld3_s16(__p0) __extension__ ({ \ - int16x4x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 1); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ +#define vld1q_dup_u64(__p0) __extension__ ({ \ + uint64x2_t __ret; \ + __ret = (uint64x2_t) __builtin_neon_vld1q_dup_v(__p0, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3_dup_p8(__p0) __extension__ ({ \ - poly8x8x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 4); \ +#define vld1q_dup_u16(__p0) __extension__ ({ \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vld1q_dup_v(__p0, 49); \ __ret; \ }) #else -#define vld3_dup_p8(__p0) __extension__ ({ \ - poly8x8x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 4); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vld1q_dup_u16(__p0) __extension__ ({ \ + uint16x8_t __ret; \ + __ret = (uint16x8_t) __builtin_neon_vld1q_dup_v(__p0, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3_dup_p16(__p0) __extension__ ({ \ - poly16x4x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 5); \ +#define vld1q_dup_s8(__p0) __extension__ ({ \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vld1q_dup_v(__p0, 32); \ __ret; \ }) #else -#define vld3_dup_p16(__p0) __extension__ ({ \ - poly16x4x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 5); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ +#define vld1q_dup_s8(__p0) __extension__ ({ \ + int8x16_t __ret; \ + __ret = (int8x16_t) __builtin_neon_vld1q_dup_v(__p0, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3q_dup_p8(__p0) __extension__ ({ \ - poly8x16x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 36); \ +#define vld1q_dup_f32(__p0) __extension__ ({ \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vld1q_dup_v(__p0, 41); \ __ret; \ }) #else -#define vld3q_dup_p8(__p0) __extension__ ({ \ - poly8x16x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 36); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vld1q_dup_f32(__p0) __extension__ ({ \ + float32x4_t __ret; \ + __ret = (float32x4_t) __builtin_neon_vld1q_dup_v(__p0, 41); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3q_dup_p16(__p0) __extension__ ({ \ - poly16x8x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 37); \ +#define vld1q_dup_s32(__p0) __extension__ ({ \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vld1q_dup_v(__p0, 34); \ __ret; \ }) #else -#define vld3q_dup_p16(__p0) __extension__ ({ \ - poly16x8x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 37); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vld1q_dup_s32(__p0) __extension__ ({ \ + int32x4_t __ret; \ + __ret = (int32x4_t) __builtin_neon_vld1q_dup_v(__p0, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3q_dup_u8(__p0) __extension__ ({ \ - uint8x16x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 48); \ +#define vld1q_dup_s64(__p0) __extension__ ({ \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vld1q_dup_v(__p0, 35); \ __ret; \ }) #else -#define vld3q_dup_u8(__p0) __extension__ ({ \ - uint8x16x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 48); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vld1q_dup_s64(__p0) __extension__ ({ \ + int64x2_t __ret; \ + __ret = (int64x2_t) __builtin_neon_vld1q_dup_v(__p0, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3q_dup_u32(__p0) __extension__ ({ \ - uint32x4x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 50); \ +#define vld1q_dup_s16(__p0) __extension__ ({ \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vld1q_dup_v(__p0, 33); \ __ret; \ }) #else -#define vld3q_dup_u32(__p0) __extension__ ({ \ - uint32x4x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 50); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ +#define vld1q_dup_s16(__p0) __extension__ ({ \ + int16x8_t __ret; \ + __ret = (int16x8_t) __builtin_neon_vld1q_dup_v(__p0, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3q_dup_u64(__p0) __extension__ ({ \ - uint64x2x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 51); \ +#define vld1_dup_u8(__p0) __extension__ ({ \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vld1_dup_v(__p0, 16); \ __ret; \ }) #else -#define vld3q_dup_u64(__p0) __extension__ ({ \ - uint64x2x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 51); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ +#define vld1_dup_u8(__p0) __extension__ ({ \ + uint8x8_t __ret; \ + __ret = (uint8x8_t) __builtin_neon_vld1_dup_v(__p0, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3q_dup_u16(__p0) __extension__ ({ \ - uint16x8x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 49); \ +#define vld1_dup_u32(__p0) __extension__ ({ \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vld1_dup_v(__p0, 18); \ __ret; \ }) #else -#define vld3q_dup_u16(__p0) __extension__ ({ \ - uint16x8x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 49); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vld1_dup_u32(__p0) __extension__ ({ \ + uint32x2_t __ret; \ + __ret = (uint32x2_t) __builtin_neon_vld1_dup_v(__p0, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif +#define vld1_dup_u64(__p0) __extension__ ({ \ + uint64x1_t __ret; \ + __ret = (uint64x1_t) __builtin_neon_vld1_dup_v(__p0, 19); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -#define vld3q_dup_s8(__p0) __extension__ ({ \ - int8x16x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 32); \ +#define vld1_dup_u16(__p0) __extension__ ({ \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vld1_dup_v(__p0, 17); \ __ret; \ }) #else -#define vld3q_dup_s8(__p0) __extension__ ({ \ - int8x16x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 32); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vld1_dup_u16(__p0) __extension__ ({ \ + uint16x4_t __ret; \ + __ret = (uint16x4_t) __builtin_neon_vld1_dup_v(__p0, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3q_dup_f32(__p0) __extension__ ({ \ - float32x4x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 41); \ +#define vld1_dup_s8(__p0) __extension__ ({ \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vld1_dup_v(__p0, 0); \ __ret; \ }) #else -#define vld3q_dup_f32(__p0) __extension__ ({ \ - float32x4x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 41); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ +#define vld1_dup_s8(__p0) __extension__ ({ \ + int8x8_t __ret; \ + __ret = (int8x8_t) __builtin_neon_vld1_dup_v(__p0, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3q_dup_s32(__p0) __extension__ ({ \ - int32x4x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 34); \ +#define vld1_dup_f32(__p0) __extension__ ({ \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vld1_dup_v(__p0, 9); \ __ret; \ }) #else -#define vld3q_dup_s32(__p0) __extension__ ({ \ - int32x4x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 34); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ +#define vld1_dup_f32(__p0) __extension__ ({ \ + float32x2_t __ret; \ + __ret = (float32x2_t) __builtin_neon_vld1_dup_v(__p0, 9); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3q_dup_s64(__p0) __extension__ ({ \ - int64x2x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 35); \ +#define vld1_dup_s32(__p0) __extension__ ({ \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vld1_dup_v(__p0, 2); \ __ret; \ }) #else -#define vld3q_dup_s64(__p0) __extension__ ({ \ - int64x2x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 35); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ +#define vld1_dup_s32(__p0) __extension__ ({ \ + int32x2_t __ret; \ + __ret = (int32x2_t) __builtin_neon_vld1_dup_v(__p0, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif +#define vld1_dup_s64(__p0) __extension__ ({ \ + int64x1_t __ret; \ + __ret = (int64x1_t) __builtin_neon_vld1_dup_v(__p0, 3); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -#define vld3q_dup_s16(__p0) __extension__ ({ \ - int16x8x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 33); \ +#define vld1_dup_s16(__p0) __extension__ ({ \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vld1_dup_v(__p0, 1); \ __ret; \ }) #else -#define vld3q_dup_s16(__p0) __extension__ ({ \ - int16x8x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 33); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vld1_dup_s16(__p0) __extension__ ({ \ + int16x4_t __ret; \ + __ret = (int16x4_t) __builtin_neon_vld1_dup_v(__p0, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3_dup_u8(__p0) __extension__ ({ \ - uint8x8x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 16); \ +#define vld1_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __ret; \ + poly8x8_t __s1 = __p1; \ + __ret = (poly8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 4); \ __ret; \ }) #else -#define vld3_dup_u8(__p0) __extension__ ({ \ - uint8x8x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 16); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vld1_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __ret; \ + poly8x8_t __s1 = __p1; \ + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 4); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3_dup_u32(__p0) __extension__ ({ \ - uint32x2x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 18); \ +#define vld1_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __ret; \ + poly16x4_t __s1 = __p1; \ + __ret = (poly16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 5); \ __ret; \ }) #else -#define vld3_dup_u32(__p0) __extension__ ({ \ - uint32x2x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 18); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ +#define vld1_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __ret; \ + poly16x4_t __s1 = __p1; \ + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (poly16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 5); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif -#define vld3_dup_u64(__p0) __extension__ ({ \ - uint64x1x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 19); \ - __ret; \ -}) #ifdef __LITTLE_ENDIAN__ -#define vld3_dup_u16(__p0) __extension__ ({ \ - uint16x4x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 17); \ +#define vld1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __ret; \ + poly8x16_t __s1 = __p1; \ + __ret = (poly8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 36); \ __ret; \ }) #else -#define vld3_dup_u16(__p0) __extension__ ({ \ - uint16x4x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 17); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ +#define vld1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __ret; \ + poly8x16_t __s1 = __p1; \ + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 36); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3_dup_s8(__p0) __extension__ ({ \ - int8x8x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 0); \ +#define vld1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __ret; \ + poly16x8_t __s1 = __p1; \ + __ret = (poly16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 37); \ __ret; \ }) #else -#define vld3_dup_s8(__p0) __extension__ ({ \ - int8x8x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 0); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vld1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __ret; \ + poly16x8_t __s1 = __p1; \ + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 37); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3_dup_f32(__p0) __extension__ ({ \ - float32x2x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 9); \ +#define vld1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s1 = __p1; \ + __ret = (uint8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 48); \ __ret; \ }) #else -#define vld3_dup_f32(__p0) __extension__ ({ \ - float32x2x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 9); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ +#define vld1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3_dup_s32(__p0) __extension__ ({ \ - int32x2x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 2); \ +#define vld1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s1 = __p1; \ + __ret = (uint32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 50); \ __ret; \ }) #else -#define vld3_dup_s32(__p0) __extension__ ({ \ - int32x2x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 2); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ +#define vld1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif -#define vld3_dup_s64(__p0) __extension__ ({ \ - int64x1x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 3); \ - __ret; \ -}) #ifdef __LITTLE_ENDIAN__ -#define vld3_dup_s16(__p0) __extension__ ({ \ - int16x4x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 1); \ +#define vld1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s1 = __p1; \ + __ret = (uint64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 51); \ __ret; \ }) #else -#define vld3_dup_s16(__p0) __extension__ ({ \ - int16x4x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 1); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ +#define vld1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (uint64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x8x3_t __ret; \ - poly8x8x3_t __s1 = __p1; \ - __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 4); \ +#define vld1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s1 = __p1; \ + __ret = (uint16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 49); \ __ret; \ }) #else -#define vld3_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x8x3_t __ret; \ - poly8x8x3_t __s1 = __p1; \ - poly8x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 4); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vld1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x4x3_t __ret; \ - poly16x4x3_t __s1 = __p1; \ - __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 5); \ +#define vld1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s1 = __p1; \ + __ret = (int8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 32); \ __ret; \ }) #else -#define vld3_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x4x3_t __ret; \ - poly16x4x3_t __s1 = __p1; \ - poly16x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 5); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ +#define vld1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s1 = __p1; \ + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x16_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x8x3_t __ret; \ - poly16x8x3_t __s1 = __p1; \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 37); \ +#define vld1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4_t __ret; \ + float32x4_t __s1 = __p1; \ + __ret = (float32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 41); \ __ret; \ }) #else -#define vld3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x8x3_t __ret; \ - poly16x8x3_t __s1 = __p1; \ - poly16x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 37); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vld1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4_t __ret; \ + float32x4_t __s1 = __p1; \ + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (float32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 41); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4x3_t __ret; \ - uint32x4x3_t __s1 = __p1; \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 50); \ +#define vld1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s1 = __p1; \ + __ret = (int32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 34); \ __ret; \ }) #else -#define vld3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4x3_t __ret; \ - uint32x4x3_t __s1 = __p1; \ - uint32x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 50); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ +#define vld1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8x3_t __ret; \ - uint16x8x3_t __s1 = __p1; \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 49); \ +#define vld1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s1 = __p1; \ + __ret = (int64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 35); \ __ret; \ }) #else -#define vld3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8x3_t __ret; \ - uint16x8x3_t __s1 = __p1; \ - uint16x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 49); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vld1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s1 = __p1; \ + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x4x3_t __ret; \ - float32x4x3_t __s1 = __p1; \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 41); \ +#define vld1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s1 = __p1; \ + __ret = (int16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 33); \ __ret; \ }) #else -#define vld3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x4x3_t __ret; \ - float32x4x3_t __s1 = __p1; \ - float32x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 41); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ +#define vld1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4x3_t __ret; \ - int32x4x3_t __s1 = __p1; \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 34); \ +#define vld1_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s1 = __p1; \ + __ret = (uint8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 16); \ __ret; \ }) #else -#define vld3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4x3_t __ret; \ - int32x4x3_t __s1 = __p1; \ - int32x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 34); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ +#define vld1_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8x3_t __ret; \ - int16x8x3_t __s1 = __p1; \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 33); \ +#define vld1_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s1 = __p1; \ + __ret = (uint32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 18); \ __ret; \ }) #else -#define vld3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8x3_t __ret; \ - int16x8x3_t __s1 = __p1; \ - int16x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 33); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vld1_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif +#define vld1_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1_t __ret; \ + uint64x1_t __s1 = __p1; \ + __ret = (uint64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -#define vld3_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x8x3_t __ret; \ - uint8x8x3_t __s1 = __p1; \ - __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 16); \ +#define vld1_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s1 = __p1; \ + __ret = (uint16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 17); \ __ret; \ }) #else -#define vld3_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x8x3_t __ret; \ - uint8x8x3_t __s1 = __p1; \ - uint8x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 16); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vld1_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2x3_t __ret; \ - uint32x2x3_t __s1 = __p1; \ - __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 18); \ +#define vld1_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s1 = __p1; \ + __ret = (int8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 0); \ __ret; \ }) #else -#define vld3_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2x3_t __ret; \ - uint32x2x3_t __s1 = __p1; \ - uint32x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 18); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ +#define vld1_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s1 = __p1; \ + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x8_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4x3_t __ret; \ - uint16x4x3_t __s1 = __p1; \ - __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 17); \ +#define vld1_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2_t __ret; \ + float32x2_t __s1 = __p1; \ + __ret = (float32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 9); \ __ret; \ }) #else -#define vld3_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4x3_t __ret; \ - uint16x4x3_t __s1 = __p1; \ - uint16x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 17); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ +#define vld1_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2_t __ret; \ + float32x2_t __s1 = __p1; \ + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (float32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 9); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x8x3_t __ret; \ - int8x8x3_t __s1 = __p1; \ - __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 0); \ +#define vld1_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s1 = __p1; \ + __ret = (int32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 2); \ __ret; \ }) #else -#define vld3_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x8x3_t __ret; \ - int8x8x3_t __s1 = __p1; \ - int8x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 0); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vld1_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif +#define vld1_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1_t __ret; \ + int64x1_t __s1 = __p1; \ + __ret = (int64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -#define vld3_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x2x3_t __ret; \ - float32x2x3_t __s1 = __p1; \ - __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 9); \ +#define vld1_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s1 = __p1; \ + __ret = (int16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 1); \ __ret; \ }) #else -#define vld3_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x2x3_t __ret; \ - float32x2x3_t __s1 = __p1; \ - float32x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 9); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ +#define vld1_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2x3_t __ret; \ - int32x2x3_t __s1 = __p1; \ - __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 2); \ +#define vld1_p8_x2(__p0) __extension__ ({ \ + poly8x8x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 4); \ __ret; \ }) #else -#define vld3_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2x3_t __ret; \ - int32x2x3_t __s1 = __p1; \ - int32x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 2); \ +#define vld1_p8_x2(__p0) __extension__ ({ \ + poly8x8x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 4); \ \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4x3_t __ret; \ - int16x4x3_t __s1 = __p1; \ - __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 1); \ +#define vld1_p16_x2(__p0) __extension__ ({ \ + poly16x4x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 5); \ __ret; \ }) #else -#define vld3_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4x3_t __ret; \ - int16x4x3_t __s1 = __p1; \ - int16x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 1); \ +#define vld1_p16_x2(__p0) __extension__ ({ \ + poly16x4x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 5); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld4_p8(__p0) __extension__ ({ \ - poly8x8x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 4); \ +#define vld1q_p8_x2(__p0) __extension__ ({ \ + poly8x16x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 36); \ __ret; \ }) #else -#define vld4_p8(__p0) __extension__ ({ \ - poly8x8x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 4); \ +#define vld1q_p8_x2(__p0) __extension__ ({ \ + poly8x16x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 36); \ \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld4_p16(__p0) __extension__ ({ \ - poly16x4x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 5); \ +#define vld1q_p16_x2(__p0) __extension__ ({ \ + poly16x8x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 37); \ __ret; \ }) #else -#define vld4_p16(__p0) __extension__ ({ \ - poly16x4x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 5); \ +#define vld1q_p16_x2(__p0) __extension__ ({ \ + poly16x8x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 37); \ \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld4q_p8(__p0) __extension__ ({ \ - poly8x16x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 36); \ +#define vld1q_u8_x2(__p0) __extension__ ({ \ + uint8x16x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 48); \ __ret; \ }) #else -#define vld4q_p8(__p0) __extension__ ({ \ - poly8x16x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 36); \ +#define vld1q_u8_x2(__p0) __extension__ ({ \ + uint8x16x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 48); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld4q_p16(__p0) __extension__ ({ \ - poly16x8x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 37); \ +#define vld1q_u32_x2(__p0) __extension__ ({ \ + uint32x4x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 50); \ __ret; \ }) #else -#define vld4q_p16(__p0) __extension__ ({ \ - poly16x8x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 37); \ +#define vld1q_u32_x2(__p0) __extension__ ({ \ + uint32x4x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 50); \ \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld4q_u8(__p0) __extension__ ({ \ - uint8x16x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 48); \ +#define vld1q_u64_x2(__p0) __extension__ ({ \ + uint64x2x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 51); \ __ret; \ }) #else -#define vld4q_u8(__p0) __extension__ ({ \ - uint8x16x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 48); \ +#define vld1q_u64_x2(__p0) __extension__ ({ \ + uint64x2x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 51); \ \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld4q_u32(__p0) __extension__ ({ \ - uint32x4x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 50); \ +#define vld1q_u16_x2(__p0) __extension__ ({ \ + uint16x8x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 49); \ __ret; \ }) #else -#define vld4q_u32(__p0) __extension__ ({ \ - uint32x4x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 50); \ +#define vld1q_u16_x2(__p0) __extension__ ({ \ + uint16x8x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 49); \ \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld4q_u16(__p0) __extension__ ({ \ - uint16x8x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 49); \ +#define vld1q_s8_x2(__p0) __extension__ ({ \ + int8x16x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 32); \ __ret; \ }) #else -#define vld4q_u16(__p0) __extension__ ({ \ - uint16x8x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 49); \ +#define vld1q_s8_x2(__p0) __extension__ ({ \ + int8x16x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 32); \ \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld4q_s8(__p0) __extension__ ({ \ - int8x16x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 32); \ +#define vld1q_f32_x2(__p0) __extension__ ({ \ + float32x4x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 41); \ __ret; \ }) #else -#define vld4q_s8(__p0) __extension__ ({ \ - int8x16x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 32); \ +#define vld1q_f32_x2(__p0) __extension__ ({ \ + float32x4x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 41); \ \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld4q_f32(__p0) __extension__ ({ \ - float32x4x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 41); \ +#define vld1q_s32_x2(__p0) __extension__ ({ \ + int32x4x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 34); \ __ret; \ }) #else -#define vld4q_f32(__p0) __extension__ ({ \ - float32x4x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 41); \ +#define vld1q_s32_x2(__p0) __extension__ ({ \ + int32x4x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 34); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld4q_s32(__p0) __extension__ ({ \ - int32x4x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 34); \ +#define vld1q_s64_x2(__p0) __extension__ ({ \ + int64x2x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 35); \ __ret; \ }) #else -#define vld4q_s32(__p0) __extension__ ({ \ - int32x4x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 34); \ +#define vld1q_s64_x2(__p0) __extension__ ({ \ + int64x2x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 35); \ \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld4q_s16(__p0) __extension__ ({ \ - int16x8x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 33); \ +#define vld1q_s16_x2(__p0) __extension__ ({ \ + int16x8x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 33); \ __ret; \ }) #else -#define vld4q_s16(__p0) __extension__ ({ \ - int16x8x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 33); \ +#define vld1q_s16_x2(__p0) __extension__ ({ \ + int16x8x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 33); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld4_u8(__p0) __extension__ ({ \ - uint8x8x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 16); \ +#define vld1_u8_x2(__p0) __extension__ ({ \ + uint8x8x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 16); \ __ret; \ }) #else -#define vld4_u8(__p0) __extension__ ({ \ - uint8x8x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 16); \ +#define vld1_u8_x2(__p0) __extension__ ({ \ + uint8x8x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 16); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld4_u32(__p0) __extension__ ({ \ - uint32x2x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 18); \ +#define vld1_u32_x2(__p0) __extension__ ({ \ + uint32x2x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 18); \ __ret; \ }) #else -#define vld4_u32(__p0) __extension__ ({ \ - uint32x2x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 18); \ +#define vld1_u32_x2(__p0) __extension__ ({ \ + uint32x2x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 18); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ __ret; \ }) #endif -#define vld4_u64(__p0) __extension__ ({ \ - uint64x1x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 19); \ +#define vld1_u64_x2(__p0) __extension__ ({ \ + uint64x1x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 19); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ -#define vld4_u16(__p0) __extension__ ({ \ - uint16x4x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 17); \ +#define vld1_u16_x2(__p0) __extension__ ({ \ + uint16x4x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 17); \ __ret; \ }) #else -#define vld4_u16(__p0) __extension__ ({ \ - uint16x4x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 17); \ +#define vld1_u16_x2(__p0) __extension__ ({ \ + uint16x4x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 17); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld4_s8(__p0) __extension__ ({ \ - int8x8x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 0); \ +#define vld1_s8_x2(__p0) __extension__ ({ \ + int8x8x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 0); \ __ret; \ }) #else -#define vld4_s8(__p0) __extension__ ({ \ - int8x8x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 0); \ +#define vld1_s8_x2(__p0) __extension__ ({ \ + int8x8x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 0); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld4_f32(__p0) __extension__ ({ \ - float32x2x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 9); \ +#define vld1_f32_x2(__p0) __extension__ ({ \ + float32x2x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 9); \ __ret; \ }) #else -#define vld4_f32(__p0) __extension__ ({ \ - float32x2x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 9); \ +#define vld1_f32_x2(__p0) __extension__ ({ \ + float32x2x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 9); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld4_s32(__p0) __extension__ ({ \ - int32x2x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 2); \ +#define vld1_s32_x2(__p0) __extension__ ({ \ + int32x2x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 2); \ __ret; \ }) #else -#define vld4_s32(__p0) __extension__ ({ \ - int32x2x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 2); \ +#define vld1_s32_x2(__p0) __extension__ ({ \ + int32x2x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 2); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ __ret; \ }) #endif -#define vld4_s64(__p0) __extension__ ({ \ - int64x1x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 3); \ +#define vld1_s64_x2(__p0) __extension__ ({ \ + int64x1x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 3); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ -#define vld4_s16(__p0) __extension__ ({ \ - int16x4x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 1); \ +#define vld1_s16_x2(__p0) __extension__ ({ \ + int16x4x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 1); \ __ret; \ }) #else -#define vld4_s16(__p0) __extension__ ({ \ - int16x4x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 1); \ +#define vld1_s16_x2(__p0) __extension__ ({ \ + int16x4x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 1); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld4_dup_p8(__p0) __extension__ ({ \ - poly8x8x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 4); \ +#define vld1_p8_x3(__p0) __extension__ ({ \ + poly8x8x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 4); \ __ret; \ }) #else -#define vld4_dup_p8(__p0) __extension__ ({ \ - poly8x8x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 4); \ +#define vld1_p8_x3(__p0) __extension__ ({ \ + poly8x8x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 4); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld4_dup_p16(__p0) __extension__ ({ \ - poly16x4x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 5); \ +#define vld1_p16_x3(__p0) __extension__ ({ \ + poly16x4x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 5); \ __ret; \ }) #else -#define vld4_dup_p16(__p0) __extension__ ({ \ - poly16x4x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 5); \ +#define vld1_p16_x3(__p0) __extension__ ({ \ + poly16x4x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 5); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld4q_dup_p8(__p0) __extension__ ({ \ - poly8x16x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 36); \ +#define vld1q_p8_x3(__p0) __extension__ ({ \ + poly8x16x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 36); \ __ret; \ }) #else -#define vld4q_dup_p8(__p0) __extension__ ({ \ - poly8x16x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 36); \ +#define vld1q_p8_x3(__p0) __extension__ ({ \ + poly8x16x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 36); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld4q_dup_p16(__p0) __extension__ ({ \ - poly16x8x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 37); \ +#define vld1q_p16_x3(__p0) __extension__ ({ \ + poly16x8x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 37); \ __ret; \ }) #else -#define vld4q_dup_p16(__p0) __extension__ ({ \ - poly16x8x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 37); \ +#define vld1q_p16_x3(__p0) __extension__ ({ \ + poly16x8x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 37); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld4q_dup_u8(__p0) __extension__ ({ \ - uint8x16x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 48); \ +#define vld1q_u8_x3(__p0) __extension__ ({ \ + uint8x16x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 48); \ __ret; \ }) #else -#define vld4q_dup_u8(__p0) __extension__ ({ \ - uint8x16x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 48); \ +#define vld1q_u8_x3(__p0) __extension__ ({ \ + uint8x16x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 48); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld4q_dup_u32(__p0) __extension__ ({ \ - uint32x4x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 50); \ +#define vld1q_u32_x3(__p0) __extension__ ({ \ + uint32x4x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 50); \ __ret; \ }) #else -#define vld4q_dup_u32(__p0) __extension__ ({ \ - uint32x4x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 50); \ +#define vld1q_u32_x3(__p0) __extension__ ({ \ + uint32x4x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 50); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld4q_dup_u64(__p0) __extension__ ({ \ - uint64x2x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 51); \ +#define vld1q_u64_x3(__p0) __extension__ ({ \ + uint64x2x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 51); \ __ret; \ }) #else -#define vld4q_dup_u64(__p0) __extension__ ({ \ - uint64x2x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 51); \ +#define vld1q_u64_x3(__p0) __extension__ ({ \ + uint64x2x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 51); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld4q_dup_u16(__p0) __extension__ ({ \ - uint16x8x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 49); \ +#define vld1q_u16_x3(__p0) __extension__ ({ \ + uint16x8x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 49); \ __ret; \ }) #else -#define vld4q_dup_u16(__p0) __extension__ ({ \ - uint16x8x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 49); \ +#define vld1q_u16_x3(__p0) __extension__ ({ \ + uint16x8x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 49); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld4q_dup_s8(__p0) __extension__ ({ \ - int8x16x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 32); \ +#define vld1q_s8_x3(__p0) __extension__ ({ \ + int8x16x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 32); \ __ret; \ }) #else -#define vld4q_dup_s8(__p0) __extension__ ({ \ - int8x16x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 32); \ +#define vld1q_s8_x3(__p0) __extension__ ({ \ + int8x16x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 32); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld4q_dup_f32(__p0) __extension__ ({ \ - float32x4x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 41); \ +#define vld1q_f32_x3(__p0) __extension__ ({ \ + float32x4x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 41); \ __ret; \ }) #else -#define vld4q_dup_f32(__p0) __extension__ ({ \ - float32x4x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 41); \ +#define vld1q_f32_x3(__p0) __extension__ ({ \ + float32x4x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 41); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld4q_dup_s32(__p0) __extension__ ({ \ - int32x4x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 34); \ +#define vld1q_s32_x3(__p0) __extension__ ({ \ + int32x4x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 34); \ __ret; \ }) #else -#define vld4q_dup_s32(__p0) __extension__ ({ \ - int32x4x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 34); \ +#define vld1q_s32_x3(__p0) __extension__ ({ \ + int32x4x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 34); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld4q_dup_s64(__p0) __extension__ ({ \ - int64x2x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 35); \ +#define vld1q_s64_x3(__p0) __extension__ ({ \ + int64x2x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 35); \ __ret; \ }) #else -#define vld4q_dup_s64(__p0) __extension__ ({ \ - int64x2x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 35); \ +#define vld1q_s64_x3(__p0) __extension__ ({ \ + int64x2x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 35); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld4q_dup_s16(__p0) __extension__ ({ \ - int16x8x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 33); \ +#define vld1q_s16_x3(__p0) __extension__ ({ \ + int16x8x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 33); \ __ret; \ }) #else -#define vld4q_dup_s16(__p0) __extension__ ({ \ - int16x8x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 33); \ +#define vld1q_s16_x3(__p0) __extension__ ({ \ + int16x8x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 33); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld4_dup_u8(__p0) __extension__ ({ \ - uint8x8x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 16); \ +#define vld1_u8_x3(__p0) __extension__ ({ \ + uint8x8x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 16); \ __ret; \ }) #else -#define vld4_dup_u8(__p0) __extension__ ({ \ - uint8x8x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 16); \ +#define vld1_u8_x3(__p0) __extension__ ({ \ + uint8x8x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 16); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld4_dup_u32(__p0) __extension__ ({ \ - uint32x2x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 18); \ +#define vld1_u32_x3(__p0) __extension__ ({ \ + uint32x2x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 18); \ __ret; \ }) #else -#define vld4_dup_u32(__p0) __extension__ ({ \ - uint32x2x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 18); \ +#define vld1_u32_x3(__p0) __extension__ ({ \ + uint32x2x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 18); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ __ret; \ }) #endif -#define vld4_dup_u64(__p0) __extension__ ({ \ - uint64x1x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 19); \ +#define vld1_u64_x3(__p0) __extension__ ({ \ + uint64x1x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 19); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ -#define vld4_dup_u16(__p0) __extension__ ({ \ - uint16x4x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 17); \ +#define vld1_u16_x3(__p0) __extension__ ({ \ + uint16x4x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 17); \ __ret; \ }) #else -#define vld4_dup_u16(__p0) __extension__ ({ \ - uint16x4x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 17); \ +#define vld1_u16_x3(__p0) __extension__ ({ \ + uint16x4x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 17); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld4_dup_s8(__p0) __extension__ ({ \ - int8x8x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 0); \ +#define vld1_s8_x3(__p0) __extension__ ({ \ + int8x8x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 0); \ __ret; \ }) #else -#define vld4_dup_s8(__p0) __extension__ ({ \ - int8x8x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 0); \ +#define vld1_s8_x3(__p0) __extension__ ({ \ + int8x8x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 0); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld4_dup_f32(__p0) __extension__ ({ \ - float32x2x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 9); \ - __ret; \ -}) +#define vld1_f32_x3(__p0) __extension__ ({ \ + float32x2x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 9); \ + __ret; \ +}) #else -#define vld4_dup_f32(__p0) __extension__ ({ \ - float32x2x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 9); \ +#define vld1_f32_x3(__p0) __extension__ ({ \ + float32x2x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 9); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld4_dup_s32(__p0) __extension__ ({ \ - int32x2x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 2); \ +#define vld1_s32_x3(__p0) __extension__ ({ \ + int32x2x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 2); \ __ret; \ }) #else -#define vld4_dup_s32(__p0) __extension__ ({ \ - int32x2x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 2); \ +#define vld1_s32_x3(__p0) __extension__ ({ \ + int32x2x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 2); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ __ret; \ }) #endif -#define vld4_dup_s64(__p0) __extension__ ({ \ - int64x1x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 3); \ +#define vld1_s64_x3(__p0) __extension__ ({ \ + int64x1x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 3); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ -#define vld4_dup_s16(__p0) __extension__ ({ \ - int16x4x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 1); \ +#define vld1_s16_x3(__p0) __extension__ ({ \ + int16x4x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 1); \ __ret; \ }) #else -#define vld4_dup_s16(__p0) __extension__ ({ \ - int16x4x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 1); \ +#define vld1_s16_x3(__p0) __extension__ ({ \ + int16x4x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 1); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld4_lane_p8(__p0, __p1, __p2) __extension__ ({ \ +#define vld1_p8_x4(__p0) __extension__ ({ \ poly8x8x4_t __ret; \ - poly8x8x4_t __s1 = __p1; \ - __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 4); \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 4); \ __ret; \ }) #else -#define vld4_lane_p8(__p0, __p1, __p2) __extension__ ({ \ +#define vld1_p8_x4(__p0) __extension__ ({ \ poly8x8x4_t __ret; \ - poly8x8x4_t __s1 = __p1; \ - poly8x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 4); \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 4); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ @@ -13279,22 +13341,15 @@ __ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -#define vld4_lane_p16(__p0, __p1, __p2) __extension__ ({ \ +#define vld1_p16_x4(__p0) __extension__ ({ \ poly16x4x4_t __ret; \ - poly16x4x4_t __s1 = __p1; \ - __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 5); \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 5); \ __ret; \ }) #else -#define vld4_lane_p16(__p0, __p1, __p2) __extension__ ({ \ +#define vld1_p16_x4(__p0) __extension__ ({ \ poly16x4x4_t __ret; \ - poly16x4x4_t __s1 = __p1; \ - poly16x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 5); \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 5); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ @@ -13305,22 +13360,34 @@ __ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -#define vld4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ +#define vld1q_p8_x4(__p0) __extension__ ({ \ + poly8x16x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 36); \ + __ret; \ +}) +#else +#define vld1q_p8_x4(__p0) __extension__ ({ \ + poly8x16x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_p16_x4(__p0) __extension__ ({ \ poly16x8x4_t __ret; \ - poly16x8x4_t __s1 = __p1; \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 37); \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 37); \ __ret; \ }) #else -#define vld4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ +#define vld1q_p16_x4(__p0) __extension__ ({ \ poly16x8x4_t __ret; \ - poly16x8x4_t __s1 = __p1; \ - poly16x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 37); \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 37); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ @@ -13331,22 +13398,34 @@ __ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -#define vld4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ +#define vld1q_u8_x4(__p0) __extension__ ({ \ + uint8x16x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 48); \ + __ret; \ +}) +#else +#define vld1q_u8_x4(__p0) __extension__ ({ \ + uint8x16x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u32_x4(__p0) __extension__ ({ \ uint32x4x4_t __ret; \ - uint32x4x4_t __s1 = __p1; \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 50); \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 50); \ __ret; \ }) #else -#define vld4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ +#define vld1q_u32_x4(__p0) __extension__ ({ \ uint32x4x4_t __ret; \ - uint32x4x4_t __s1 = __p1; \ - uint32x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 50); \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 50); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ @@ -13357,22 +13436,34 @@ __ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -#define vld4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ +#define vld1q_u64_x4(__p0) __extension__ ({ \ + uint64x2x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 51); \ + __ret; \ +}) +#else +#define vld1q_u64_x4(__p0) __extension__ ({ \ + uint64x2x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_u16_x4(__p0) __extension__ ({ \ uint16x8x4_t __ret; \ - uint16x8x4_t __s1 = __p1; \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 49); \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 49); \ __ret; \ }) #else -#define vld4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ +#define vld1q_u16_x4(__p0) __extension__ ({ \ uint16x8x4_t __ret; \ - uint16x8x4_t __s1 = __p1; \ - uint16x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 49); \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 49); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ @@ -13383,22 +13474,34 @@ __ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -#define vld4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ +#define vld1q_s8_x4(__p0) __extension__ ({ \ + int8x16x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 32); \ + __ret; \ +}) +#else +#define vld1q_s8_x4(__p0) __extension__ ({ \ + int8x16x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_f32_x4(__p0) __extension__ ({ \ float32x4x4_t __ret; \ - float32x4x4_t __s1 = __p1; \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 41); \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 41); \ __ret; \ }) #else -#define vld4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ +#define vld1q_f32_x4(__p0) __extension__ ({ \ float32x4x4_t __ret; \ - float32x4x4_t __s1 = __p1; \ - float32x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 41); \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 41); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ @@ -13409,22 +13512,15 @@ __ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -#define vld4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ +#define vld1q_s32_x4(__p0) __extension__ ({ \ int32x4x4_t __ret; \ - int32x4x4_t __s1 = __p1; \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 34); \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 34); \ __ret; \ }) #else -#define vld4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ +#define vld1q_s32_x4(__p0) __extension__ ({ \ int32x4x4_t __ret; \ - int32x4x4_t __s1 = __p1; \ - int32x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 34); \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 34); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ @@ -13435,22 +13531,34 @@ __ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -#define vld4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ +#define vld1q_s64_x4(__p0) __extension__ ({ \ + int64x2x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 35); \ + __ret; \ +}) +#else +#define vld1q_s64_x4(__p0) __extension__ ({ \ + int64x2x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld1q_s16_x4(__p0) __extension__ ({ \ int16x8x4_t __ret; \ - int16x8x4_t __s1 = __p1; \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 33); \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 33); \ __ret; \ }) #else -#define vld4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ +#define vld1q_s16_x4(__p0) __extension__ ({ \ int16x8x4_t __ret; \ - int16x8x4_t __s1 = __p1; \ - int16x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 33); \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 33); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ @@ -13461,22 +13569,15 @@ __ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -#define vld4_lane_u8(__p0, __p1, __p2) __extension__ ({ \ +#define vld1_u8_x4(__p0) __extension__ ({ \ uint8x8x4_t __ret; \ - uint8x8x4_t __s1 = __p1; \ - __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 16); \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 16); \ __ret; \ }) #else -#define vld4_lane_u8(__p0, __p1, __p2) __extension__ ({ \ +#define vld1_u8_x4(__p0) __extension__ ({ \ uint8x8x4_t __ret; \ - uint8x8x4_t __s1 = __p1; \ - uint8x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 16); \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 16); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ @@ -13487,22 +13588,15 @@ __ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -#define vld4_lane_u32(__p0, __p1, __p2) __extension__ ({ \ +#define vld1_u32_x4(__p0) __extension__ ({ \ uint32x2x4_t __ret; \ - uint32x2x4_t __s1 = __p1; \ - __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 18); \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 18); \ __ret; \ }) #else -#define vld4_lane_u32(__p0, __p1, __p2) __extension__ ({ \ +#define vld1_u32_x4(__p0) __extension__ ({ \ uint32x2x4_t __ret; \ - uint32x2x4_t __s1 = __p1; \ - uint32x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 18); \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 18); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ @@ -13512,23 +13606,21 @@ __ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) { }) #endif +#define vld1_u64_x4(__p0) __extension__ ({ \ + uint64x1x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 19); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -#define vld4_lane_u16(__p0, __p1, __p2) __extension__ ({ \ +#define vld1_u16_x4(__p0) __extension__ ({ \ uint16x4x4_t __ret; \ - uint16x4x4_t __s1 = __p1; \ - __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 17); \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 17); \ __ret; \ }) #else -#define vld4_lane_u16(__p0, __p1, __p2) __extension__ ({ \ +#define vld1_u16_x4(__p0) __extension__ ({ \ uint16x4x4_t __ret; \ - uint16x4x4_t __s1 = __p1; \ - uint16x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 17); \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 17); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ @@ -13539,22 +13631,15 @@ __ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -#define vld4_lane_s8(__p0, __p1, __p2) __extension__ ({ \ +#define vld1_s8_x4(__p0) __extension__ ({ \ int8x8x4_t __ret; \ - int8x8x4_t __s1 = __p1; \ - __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 0); \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 0); \ __ret; \ }) #else -#define vld4_lane_s8(__p0, __p1, __p2) __extension__ ({ \ +#define vld1_s8_x4(__p0) __extension__ ({ \ int8x8x4_t __ret; \ - int8x8x4_t __s1 = __p1; \ - int8x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 0); \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 0); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ @@ -13565,22 +13650,15 @@ __ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -#define vld4_lane_f32(__p0, __p1, __p2) __extension__ ({ \ +#define vld1_f32_x4(__p0) __extension__ ({ \ float32x2x4_t __ret; \ - float32x2x4_t __s1 = __p1; \ - __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 9); \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 9); \ __ret; \ }) #else -#define vld4_lane_f32(__p0, __p1, __p2) __extension__ ({ \ +#define vld1_f32_x4(__p0) __extension__ ({ \ float32x2x4_t __ret; \ - float32x2x4_t __s1 = __p1; \ - float32x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 9); \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 9); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ @@ -13591,22 +13669,15 @@ __ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -#define vld4_lane_s32(__p0, __p1, __p2) __extension__ ({ \ +#define vld1_s32_x4(__p0) __extension__ ({ \ int32x2x4_t __ret; \ - int32x2x4_t __s1 = __p1; \ - __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 2); \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 2); \ __ret; \ }) #else -#define vld4_lane_s32(__p0, __p1, __p2) __extension__ ({ \ +#define vld1_s32_x4(__p0) __extension__ ({ \ int32x2x4_t __ret; \ - int32x2x4_t __s1 = __p1; \ - int32x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 2); \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 2); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ @@ -13616,23 +13687,21 @@ __ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) { }) #endif +#define vld1_s64_x4(__p0) __extension__ ({ \ + int64x1x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 3); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -#define vld4_lane_s16(__p0, __p1, __p2) __extension__ ({ \ +#define vld1_s16_x4(__p0) __extension__ ({ \ int16x4x4_t __ret; \ - int16x4x4_t __s1 = __p1; \ - __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 1); \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 1); \ __ret; \ }) #else -#define vld4_lane_s16(__p0, __p1, __p2) __extension__ ({ \ +#define vld1_s16_x4(__p0) __extension__ ({ \ int16x4x4_t __ret; \ - int16x4x4_t __s1 = __p1; \ - int16x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 1); \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 1); \ \ __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ @@ -13643,25692 +13712,25274 @@ __ai int16x4_t vhsub_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); - return __ret; -} +#define vld2_p8(__p0) __extension__ ({ \ + poly8x8x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 4); \ + __ret; \ +}) #else -__ai uint8x16_t vmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vld2_p8(__p0) __extension__ ({ \ + poly8x8x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); - return __ret; -} +#define vld2_p16(__p0) __extension__ ({ \ + poly16x4x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 5); \ + __ret; \ +}) #else -__ai uint32x4_t vmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vld2_p16(__p0) __extension__ ({ \ + poly16x4x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; -} +#define vld2q_p8(__p0) __extension__ ({ \ + poly8x16x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 36); \ + __ret; \ +}) #else -__ai uint16x8_t vmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vld2q_p8(__p0) __extension__ ({ \ + poly8x16x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vmaxq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); - return __ret; -} +#define vld2q_p16(__p0) __extension__ ({ \ + poly16x8x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 37); \ + __ret; \ +}) #else -__ai int8x16_t vmaxq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vld2q_p16(__p0) __extension__ ({ \ + poly16x8x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vmaxq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); - return __ret; -} +#define vld2q_u8(__p0) __extension__ ({ \ + uint8x16x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 48); \ + __ret; \ +}) #else -__ai float32x4_t vmaxq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vld2q_u8(__p0) __extension__ ({ \ + uint8x16x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmaxq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); - return __ret; -} +#define vld2q_u32(__p0) __extension__ ({ \ + uint32x4x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 50); \ + __ret; \ +}) #else -__ai int32x4_t vmaxq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmaxq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); - return __ret; -} -#else -__ai int16x8_t vmaxq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vld2q_u32(__p0) __extension__ ({ \ + uint32x4x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vmax_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 16); - return __ret; -} +#define vld2q_u16(__p0) __extension__ ({ \ + uint16x8x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 49); \ + __ret; \ +}) #else -__ai uint8x8_t vmax_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vld2q_u16(__p0) __extension__ ({ \ + uint16x8x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vmax_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 18); - return __ret; -} +#define vld2q_s8(__p0) __extension__ ({ \ + int8x16x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 32); \ + __ret; \ +}) #else -__ai uint32x2_t vmax_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vld2q_s8(__p0) __extension__ ({ \ + int8x16x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vmax_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 17); - return __ret; -} +#define vld2q_f32(__p0) __extension__ ({ \ + float32x4x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 41); \ + __ret; \ +}) #else -__ai uint16x4_t vmax_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vld2q_f32(__p0) __extension__ ({ \ + float32x4x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vmax_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 0); - return __ret; -} +#define vld2q_s32(__p0) __extension__ ({ \ + int32x4x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 34); \ + __ret; \ +}) #else -__ai int8x8_t vmax_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x8_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vld2q_s32(__p0) __extension__ ({ \ + int32x4x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vmax_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 9); - return __ret; -} +#define vld2q_s16(__p0) __extension__ ({ \ + int16x8x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 33); \ + __ret; \ +}) #else -__ai float32x2_t vmax_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (float32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vld2q_s16(__p0) __extension__ ({ \ + int16x8x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vmax_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 2); - return __ret; -} +#define vld2_u8(__p0) __extension__ ({ \ + uint8x8x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 16); \ + __ret; \ +}) #else -__ai int32x2_t vmax_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (int32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vld2_u8(__p0) __extension__ ({ \ + uint8x8x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vmax_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 1); - return __ret; -} +#define vld2_u32(__p0) __extension__ ({ \ + uint32x2x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 18); \ + __ret; \ +}) #else -__ai int16x4_t vmax_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vld2_u32(__p0) __extension__ ({ \ + uint32x2x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) #endif +#define vld2_u64(__p0) __extension__ ({ \ + uint64x1x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 19); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vminq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); - return __ret; -} +#define vld2_u16(__p0) __extension__ ({ \ + uint16x4x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 17); \ + __ret; \ +}) #else -__ai uint8x16_t vminq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vld2_u16(__p0) __extension__ ({ \ + uint16x4x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vminq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); - return __ret; -} +#define vld2_s8(__p0) __extension__ ({ \ + int8x8x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 0); \ + __ret; \ +}) #else -__ai uint32x4_t vminq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vld2_s8(__p0) __extension__ ({ \ + int8x8x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vminq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; -} +#define vld2_f32(__p0) __extension__ ({ \ + float32x2x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 9); \ + __ret; \ +}) #else -__ai uint16x8_t vminq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vld2_f32(__p0) __extension__ ({ \ + float32x2x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vminq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); - return __ret; -} +#define vld2_s32(__p0) __extension__ ({ \ + int32x2x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 2); \ + __ret; \ +}) #else -__ai int8x16_t vminq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x16_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vld2_s32(__p0) __extension__ ({ \ + int32x2x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) #endif +#define vld2_s64(__p0) __extension__ ({ \ + int64x1x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 3); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vminq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); - return __ret; -} +#define vld2_s16(__p0) __extension__ ({ \ + int16x4x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 1); \ + __ret; \ +}) #else -__ai float32x4_t vminq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vld2_s16(__p0) __extension__ ({ \ + int16x4x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vminq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); - return __ret; -} +#define vld2_dup_p8(__p0) __extension__ ({ \ + poly8x8x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 4); \ + __ret; \ +}) #else -__ai int32x4_t vminq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vld2_dup_p8(__p0) __extension__ ({ \ + poly8x8x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vminq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); - return __ret; -} +#define vld2_dup_p16(__p0) __extension__ ({ \ + poly16x4x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 5); \ + __ret; \ +}) #else -__ai int16x8_t vminq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vld2_dup_p16(__p0) __extension__ ({ \ + poly16x4x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vmin_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 16); - return __ret; -} +#define vld2q_dup_p8(__p0) __extension__ ({ \ + poly8x16x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 36); \ + __ret; \ +}) #else -__ai uint8x8_t vmin_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vld2q_dup_p8(__p0) __extension__ ({ \ + poly8x16x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vmin_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 18); - return __ret; -} +#define vld2q_dup_p16(__p0) __extension__ ({ \ + poly16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 37); \ + __ret; \ +}) #else -__ai uint32x2_t vmin_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vld2q_dup_p16(__p0) __extension__ ({ \ + poly16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vmin_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 17); - return __ret; -} +#define vld2q_dup_u8(__p0) __extension__ ({ \ + uint8x16x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 48); \ + __ret; \ +}) #else -__ai uint16x4_t vmin_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vld2q_dup_u8(__p0) __extension__ ({ \ + uint8x16x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vmin_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 0); - return __ret; -} +#define vld2q_dup_u32(__p0) __extension__ ({ \ + uint32x4x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 50); \ + __ret; \ +}) #else -__ai int8x8_t vmin_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x8_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vld2q_dup_u32(__p0) __extension__ ({ \ + uint32x4x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vmin_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 9); - return __ret; -} +#define vld2q_dup_u64(__p0) __extension__ ({ \ + uint64x2x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 51); \ + __ret; \ +}) #else -__ai float32x2_t vmin_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (float32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vld2q_dup_u64(__p0) __extension__ ({ \ + uint64x2x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vmin_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 2); - return __ret; -} +#define vld2q_dup_u16(__p0) __extension__ ({ \ + uint16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 49); \ + __ret; \ +}) #else -__ai int32x2_t vmin_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (int32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vld2q_dup_u16(__p0) __extension__ ({ \ + uint16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vmin_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 1); - return __ret; -} -#else -__ai int16x4_t vmin_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vmlaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { - uint8x16_t __ret; - __ret = __p0 + __p1 * __p2; - return __ret; -} -#else -__ai uint8x16_t vmlaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { - uint8x16_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 + __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmlaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint32x4_t __ret; - __ret = __p0 + __p1 * __p2; - return __ret; -} -#else -__ai uint32x4_t vmlaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = __rev0 + __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmlaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { - uint16x8_t __ret; - __ret = __p0 + __p1 * __p2; - return __ret; -} -#else -__ai uint16x8_t vmlaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { - uint16x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 + __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vmlaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { - int8x16_t __ret; - __ret = __p0 + __p1 * __p2; - return __ret; -} -#else -__ai int8x16_t vmlaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { - int8x16_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 + __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __ret; - __ret = __p0 + __p1 * __p2; - return __ret; -} +#define vld2q_dup_s8(__p0) __extension__ ({ \ + int8x16x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 32); \ + __ret; \ +}) #else -__ai float32x4_t vmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = __rev0 + __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vld2q_dup_s8(__p0) __extension__ ({ \ + int8x16x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmlaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int32x4_t __ret; - __ret = __p0 + __p1 * __p2; - return __ret; -} +#define vld2q_dup_f32(__p0) __extension__ ({ \ + float32x4x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 41); \ + __ret; \ +}) #else -__ai int32x4_t vmlaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = __rev0 + __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vld2q_dup_f32(__p0) __extension__ ({ \ + float32x4x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmlaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int16x8_t __ret; - __ret = __p0 + __p1 * __p2; - return __ret; -} +#define vld2q_dup_s32(__p0) __extension__ ({ \ + int32x4x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 34); \ + __ret; \ +}) #else -__ai int16x8_t vmlaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int16x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 + __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vld2q_dup_s32(__p0) __extension__ ({ \ + int32x4x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vmla_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { - uint8x8_t __ret; - __ret = __p0 + __p1 * __p2; - return __ret; -} +#define vld2q_dup_s64(__p0) __extension__ ({ \ + int64x2x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 35); \ + __ret; \ +}) #else -__ai uint8x8_t vmla_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { - uint8x8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 + __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vld2q_dup_s64(__p0) __extension__ ({ \ + int64x2x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vmla_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { - uint32x2_t __ret; - __ret = __p0 + __p1 * __p2; - return __ret; -} +#define vld2q_dup_s16(__p0) __extension__ ({ \ + int16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 33); \ + __ret; \ +}) #else -__ai uint32x2_t vmla_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { - uint32x2_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = __rev0 + __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vld2q_dup_s16(__p0) __extension__ ({ \ + int16x8x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vmla_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { - uint16x4_t __ret; - __ret = __p0 + __p1 * __p2; - return __ret; -} +#define vld2_dup_u8(__p0) __extension__ ({ \ + uint8x8x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 16); \ + __ret; \ +}) #else -__ai uint16x4_t vmla_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { - uint16x4_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = __rev0 + __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vld2_dup_u8(__p0) __extension__ ({ \ + uint8x8x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vmla_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { - int8x8_t __ret; - __ret = __p0 + __p1 * __p2; - return __ret; -} +#define vld2_dup_u32(__p0) __extension__ ({ \ + uint32x2x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 18); \ + __ret; \ +}) #else -__ai int8x8_t vmla_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { - int8x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 + __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vld2_dup_u32(__p0) __extension__ ({ \ + uint32x2x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) #endif +#define vld2_dup_u64(__p0) __extension__ ({ \ + uint64x1x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 19); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __ret; - __ret = __p0 + __p1 * __p2; - return __ret; -} +#define vld2_dup_u16(__p0) __extension__ ({ \ + uint16x4x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 17); \ + __ret; \ +}) #else -__ai float32x2_t vmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = __rev0 + __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vld2_dup_u16(__p0) __extension__ ({ \ + uint16x4x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vmla_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { - int32x2_t __ret; - __ret = __p0 + __p1 * __p2; - return __ret; -} +#define vld2_dup_s8(__p0) __extension__ ({ \ + int8x8x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 0); \ + __ret; \ +}) #else -__ai int32x2_t vmla_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { - int32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = __rev0 + __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vld2_dup_s8(__p0) __extension__ ({ \ + int8x8x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vmla_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { - int16x4_t __ret; - __ret = __p0 + __p1 * __p2; - return __ret; -} +#define vld2_dup_f32(__p0) __extension__ ({ \ + float32x2x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 9); \ + __ret; \ +}) #else -__ai int16x4_t vmla_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { - int16x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = __rev0 + __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vld2_dup_f32(__p0) __extension__ ({ \ + float32x2x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlaq_lane_u32(__p0_46, __p1_46, __p2_46, __p3_46) __extension__ ({ \ - uint32x4_t __ret_46; \ - uint32x4_t __s0_46 = __p0_46; \ - uint32x4_t __s1_46 = __p1_46; \ - uint32x2_t __s2_46 = __p2_46; \ - __ret_46 = __s0_46 + __s1_46 * splatq_lane_u32(__s2_46, __p3_46); \ - __ret_46; \ +#define vld2_dup_s32(__p0) __extension__ ({ \ + int32x2x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 2); \ + __ret; \ }) #else -#define vmlaq_lane_u32(__p0_47, __p1_47, __p2_47, __p3_47) __extension__ ({ \ - uint32x4_t __ret_47; \ - uint32x4_t __s0_47 = __p0_47; \ - uint32x4_t __s1_47 = __p1_47; \ - uint32x2_t __s2_47 = __p2_47; \ - uint32x4_t __rev0_47; __rev0_47 = __builtin_shufflevector(__s0_47, __s0_47, 3, 2, 1, 0); \ - uint32x4_t __rev1_47; __rev1_47 = __builtin_shufflevector(__s1_47, __s1_47, 3, 2, 1, 0); \ - uint32x2_t __rev2_47; __rev2_47 = __builtin_shufflevector(__s2_47, __s2_47, 1, 0); \ - __ret_47 = __rev0_47 + __rev1_47 * __noswap_splatq_lane_u32(__rev2_47, __p3_47); \ - __ret_47 = __builtin_shufflevector(__ret_47, __ret_47, 3, 2, 1, 0); \ - __ret_47; \ +#define vld2_dup_s32(__p0) __extension__ ({ \ + int32x2x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ }) #endif +#define vld2_dup_s64(__p0) __extension__ ({ \ + int64x1x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 3); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -#define vmlaq_lane_u16(__p0_48, __p1_48, __p2_48, __p3_48) __extension__ ({ \ - uint16x8_t __ret_48; \ - uint16x8_t __s0_48 = __p0_48; \ - uint16x8_t __s1_48 = __p1_48; \ - uint16x4_t __s2_48 = __p2_48; \ - __ret_48 = __s0_48 + __s1_48 * splatq_lane_u16(__s2_48, __p3_48); \ - __ret_48; \ +#define vld2_dup_s16(__p0) __extension__ ({ \ + int16x4x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 1); \ + __ret; \ }) #else -#define vmlaq_lane_u16(__p0_49, __p1_49, __p2_49, __p3_49) __extension__ ({ \ - uint16x8_t __ret_49; \ - uint16x8_t __s0_49 = __p0_49; \ - uint16x8_t __s1_49 = __p1_49; \ - uint16x4_t __s2_49 = __p2_49; \ - uint16x8_t __rev0_49; __rev0_49 = __builtin_shufflevector(__s0_49, __s0_49, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev1_49; __rev1_49 = __builtin_shufflevector(__s1_49, __s1_49, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x4_t __rev2_49; __rev2_49 = __builtin_shufflevector(__s2_49, __s2_49, 3, 2, 1, 0); \ - __ret_49 = __rev0_49 + __rev1_49 * __noswap_splatq_lane_u16(__rev2_49, __p3_49); \ - __ret_49 = __builtin_shufflevector(__ret_49, __ret_49, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_49; \ +#define vld2_dup_s16(__p0) __extension__ ({ \ + int16x4x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlaq_lane_f32(__p0_50, __p1_50, __p2_50, __p3_50) __extension__ ({ \ - float32x4_t __ret_50; \ - float32x4_t __s0_50 = __p0_50; \ - float32x4_t __s1_50 = __p1_50; \ - float32x2_t __s2_50 = __p2_50; \ - __ret_50 = __s0_50 + __s1_50 * splatq_lane_f32(__s2_50, __p3_50); \ - __ret_50; \ +#define vld2_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x2_t __ret; \ + poly8x8x2_t __s1 = __p1; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 4); \ + __ret; \ }) #else -#define vmlaq_lane_f32(__p0_51, __p1_51, __p2_51, __p3_51) __extension__ ({ \ - float32x4_t __ret_51; \ - float32x4_t __s0_51 = __p0_51; \ - float32x4_t __s1_51 = __p1_51; \ - float32x2_t __s2_51 = __p2_51; \ - float32x4_t __rev0_51; __rev0_51 = __builtin_shufflevector(__s0_51, __s0_51, 3, 2, 1, 0); \ - float32x4_t __rev1_51; __rev1_51 = __builtin_shufflevector(__s1_51, __s1_51, 3, 2, 1, 0); \ - float32x2_t __rev2_51; __rev2_51 = __builtin_shufflevector(__s2_51, __s2_51, 1, 0); \ - __ret_51 = __rev0_51 + __rev1_51 * __noswap_splatq_lane_f32(__rev2_51, __p3_51); \ - __ret_51 = __builtin_shufflevector(__ret_51, __ret_51, 3, 2, 1, 0); \ - __ret_51; \ +#define vld2_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x2_t __ret; \ + poly8x8x2_t __s1 = __p1; \ + poly8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlaq_lane_s32(__p0_52, __p1_52, __p2_52, __p3_52) __extension__ ({ \ - int32x4_t __ret_52; \ - int32x4_t __s0_52 = __p0_52; \ - int32x4_t __s1_52 = __p1_52; \ - int32x2_t __s2_52 = __p2_52; \ - __ret_52 = __s0_52 + __s1_52 * splatq_lane_s32(__s2_52, __p3_52); \ - __ret_52; \ +#define vld2_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x2_t __ret; \ + poly16x4x2_t __s1 = __p1; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 5); \ + __ret; \ }) #else -#define vmlaq_lane_s32(__p0_53, __p1_53, __p2_53, __p3_53) __extension__ ({ \ - int32x4_t __ret_53; \ - int32x4_t __s0_53 = __p0_53; \ - int32x4_t __s1_53 = __p1_53; \ - int32x2_t __s2_53 = __p2_53; \ - int32x4_t __rev0_53; __rev0_53 = __builtin_shufflevector(__s0_53, __s0_53, 3, 2, 1, 0); \ - int32x4_t __rev1_53; __rev1_53 = __builtin_shufflevector(__s1_53, __s1_53, 3, 2, 1, 0); \ - int32x2_t __rev2_53; __rev2_53 = __builtin_shufflevector(__s2_53, __s2_53, 1, 0); \ - __ret_53 = __rev0_53 + __rev1_53 * __noswap_splatq_lane_s32(__rev2_53, __p3_53); \ - __ret_53 = __builtin_shufflevector(__ret_53, __ret_53, 3, 2, 1, 0); \ - __ret_53; \ +#define vld2_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x2_t __ret; \ + poly16x4x2_t __s1 = __p1; \ + poly16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlaq_lane_s16(__p0_54, __p1_54, __p2_54, __p3_54) __extension__ ({ \ - int16x8_t __ret_54; \ - int16x8_t __s0_54 = __p0_54; \ - int16x8_t __s1_54 = __p1_54; \ - int16x4_t __s2_54 = __p2_54; \ - __ret_54 = __s0_54 + __s1_54 * splatq_lane_s16(__s2_54, __p3_54); \ - __ret_54; \ +#define vld2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x2_t __ret; \ + poly16x8x2_t __s1 = __p1; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 37); \ + __ret; \ }) #else -#define vmlaq_lane_s16(__p0_55, __p1_55, __p2_55, __p3_55) __extension__ ({ \ - int16x8_t __ret_55; \ - int16x8_t __s0_55 = __p0_55; \ - int16x8_t __s1_55 = __p1_55; \ - int16x4_t __s2_55 = __p2_55; \ - int16x8_t __rev0_55; __rev0_55 = __builtin_shufflevector(__s0_55, __s0_55, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_55; __rev1_55 = __builtin_shufflevector(__s1_55, __s1_55, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev2_55; __rev2_55 = __builtin_shufflevector(__s2_55, __s2_55, 3, 2, 1, 0); \ - __ret_55 = __rev0_55 + __rev1_55 * __noswap_splatq_lane_s16(__rev2_55, __p3_55); \ - __ret_55 = __builtin_shufflevector(__ret_55, __ret_55, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_55; \ +#define vld2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x2_t __ret; \ + poly16x8x2_t __s1 = __p1; \ + poly16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmla_lane_u32(__p0_56, __p1_56, __p2_56, __p3_56) __extension__ ({ \ - uint32x2_t __ret_56; \ - uint32x2_t __s0_56 = __p0_56; \ - uint32x2_t __s1_56 = __p1_56; \ - uint32x2_t __s2_56 = __p2_56; \ - __ret_56 = __s0_56 + __s1_56 * splat_lane_u32(__s2_56, __p3_56); \ - __ret_56; \ +#define vld2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x2_t __ret; \ + uint32x4x2_t __s1 = __p1; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 50); \ + __ret; \ }) #else -#define vmla_lane_u32(__p0_57, __p1_57, __p2_57, __p3_57) __extension__ ({ \ - uint32x2_t __ret_57; \ - uint32x2_t __s0_57 = __p0_57; \ - uint32x2_t __s1_57 = __p1_57; \ - uint32x2_t __s2_57 = __p2_57; \ - uint32x2_t __rev0_57; __rev0_57 = __builtin_shufflevector(__s0_57, __s0_57, 1, 0); \ - uint32x2_t __rev1_57; __rev1_57 = __builtin_shufflevector(__s1_57, __s1_57, 1, 0); \ - uint32x2_t __rev2_57; __rev2_57 = __builtin_shufflevector(__s2_57, __s2_57, 1, 0); \ - __ret_57 = __rev0_57 + __rev1_57 * __noswap_splat_lane_u32(__rev2_57, __p3_57); \ - __ret_57 = __builtin_shufflevector(__ret_57, __ret_57, 1, 0); \ - __ret_57; \ +#define vld2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x2_t __ret; \ + uint32x4x2_t __s1 = __p1; \ + uint32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmla_lane_u16(__p0_58, __p1_58, __p2_58, __p3_58) __extension__ ({ \ - uint16x4_t __ret_58; \ - uint16x4_t __s0_58 = __p0_58; \ - uint16x4_t __s1_58 = __p1_58; \ - uint16x4_t __s2_58 = __p2_58; \ - __ret_58 = __s0_58 + __s1_58 * splat_lane_u16(__s2_58, __p3_58); \ - __ret_58; \ +#define vld2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x2_t __ret; \ + uint16x8x2_t __s1 = __p1; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 49); \ + __ret; \ }) #else -#define vmla_lane_u16(__p0_59, __p1_59, __p2_59, __p3_59) __extension__ ({ \ - uint16x4_t __ret_59; \ - uint16x4_t __s0_59 = __p0_59; \ - uint16x4_t __s1_59 = __p1_59; \ - uint16x4_t __s2_59 = __p2_59; \ - uint16x4_t __rev0_59; __rev0_59 = __builtin_shufflevector(__s0_59, __s0_59, 3, 2, 1, 0); \ - uint16x4_t __rev1_59; __rev1_59 = __builtin_shufflevector(__s1_59, __s1_59, 3, 2, 1, 0); \ - uint16x4_t __rev2_59; __rev2_59 = __builtin_shufflevector(__s2_59, __s2_59, 3, 2, 1, 0); \ - __ret_59 = __rev0_59 + __rev1_59 * __noswap_splat_lane_u16(__rev2_59, __p3_59); \ - __ret_59 = __builtin_shufflevector(__ret_59, __ret_59, 3, 2, 1, 0); \ - __ret_59; \ +#define vld2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x2_t __ret; \ + uint16x8x2_t __s1 = __p1; \ + uint16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmla_lane_f32(__p0_60, __p1_60, __p2_60, __p3_60) __extension__ ({ \ - float32x2_t __ret_60; \ - float32x2_t __s0_60 = __p0_60; \ - float32x2_t __s1_60 = __p1_60; \ - float32x2_t __s2_60 = __p2_60; \ - __ret_60 = __s0_60 + __s1_60 * splat_lane_f32(__s2_60, __p3_60); \ - __ret_60; \ +#define vld2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x2_t __ret; \ + float32x4x2_t __s1 = __p1; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 41); \ + __ret; \ }) #else -#define vmla_lane_f32(__p0_61, __p1_61, __p2_61, __p3_61) __extension__ ({ \ - float32x2_t __ret_61; \ - float32x2_t __s0_61 = __p0_61; \ - float32x2_t __s1_61 = __p1_61; \ - float32x2_t __s2_61 = __p2_61; \ - float32x2_t __rev0_61; __rev0_61 = __builtin_shufflevector(__s0_61, __s0_61, 1, 0); \ - float32x2_t __rev1_61; __rev1_61 = __builtin_shufflevector(__s1_61, __s1_61, 1, 0); \ - float32x2_t __rev2_61; __rev2_61 = __builtin_shufflevector(__s2_61, __s2_61, 1, 0); \ - __ret_61 = __rev0_61 + __rev1_61 * __noswap_splat_lane_f32(__rev2_61, __p3_61); \ - __ret_61 = __builtin_shufflevector(__ret_61, __ret_61, 1, 0); \ - __ret_61; \ +#define vld2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x2_t __ret; \ + float32x4x2_t __s1 = __p1; \ + float32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmla_lane_s32(__p0_62, __p1_62, __p2_62, __p3_62) __extension__ ({ \ - int32x2_t __ret_62; \ - int32x2_t __s0_62 = __p0_62; \ - int32x2_t __s1_62 = __p1_62; \ - int32x2_t __s2_62 = __p2_62; \ - __ret_62 = __s0_62 + __s1_62 * splat_lane_s32(__s2_62, __p3_62); \ - __ret_62; \ +#define vld2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x2_t __ret; \ + int32x4x2_t __s1 = __p1; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 34); \ + __ret; \ }) #else -#define vmla_lane_s32(__p0_63, __p1_63, __p2_63, __p3_63) __extension__ ({ \ - int32x2_t __ret_63; \ - int32x2_t __s0_63 = __p0_63; \ - int32x2_t __s1_63 = __p1_63; \ - int32x2_t __s2_63 = __p2_63; \ - int32x2_t __rev0_63; __rev0_63 = __builtin_shufflevector(__s0_63, __s0_63, 1, 0); \ - int32x2_t __rev1_63; __rev1_63 = __builtin_shufflevector(__s1_63, __s1_63, 1, 0); \ - int32x2_t __rev2_63; __rev2_63 = __builtin_shufflevector(__s2_63, __s2_63, 1, 0); \ - __ret_63 = __rev0_63 + __rev1_63 * __noswap_splat_lane_s32(__rev2_63, __p3_63); \ - __ret_63 = __builtin_shufflevector(__ret_63, __ret_63, 1, 0); \ - __ret_63; \ +#define vld2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x2_t __ret; \ + int32x4x2_t __s1 = __p1; \ + int32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmla_lane_s16(__p0_64, __p1_64, __p2_64, __p3_64) __extension__ ({ \ - int16x4_t __ret_64; \ - int16x4_t __s0_64 = __p0_64; \ - int16x4_t __s1_64 = __p1_64; \ - int16x4_t __s2_64 = __p2_64; \ - __ret_64 = __s0_64 + __s1_64 * splat_lane_s16(__s2_64, __p3_64); \ - __ret_64; \ +#define vld2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x2_t __ret; \ + int16x8x2_t __s1 = __p1; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 33); \ + __ret; \ }) #else -#define vmla_lane_s16(__p0_65, __p1_65, __p2_65, __p3_65) __extension__ ({ \ - int16x4_t __ret_65; \ - int16x4_t __s0_65 = __p0_65; \ - int16x4_t __s1_65 = __p1_65; \ - int16x4_t __s2_65 = __p2_65; \ - int16x4_t __rev0_65; __rev0_65 = __builtin_shufflevector(__s0_65, __s0_65, 3, 2, 1, 0); \ - int16x4_t __rev1_65; __rev1_65 = __builtin_shufflevector(__s1_65, __s1_65, 3, 2, 1, 0); \ - int16x4_t __rev2_65; __rev2_65 = __builtin_shufflevector(__s2_65, __s2_65, 3, 2, 1, 0); \ - __ret_65 = __rev0_65 + __rev1_65 * __noswap_splat_lane_s16(__rev2_65, __p3_65); \ - __ret_65 = __builtin_shufflevector(__ret_65, __ret_65, 3, 2, 1, 0); \ - __ret_65; \ +#define vld2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x2_t __ret; \ + int16x8x2_t __s1 = __p1; \ + int16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) { - uint32x4_t __ret; - __ret = __p0 + __p1 * (uint32x4_t) {__p2, __p2, __p2, __p2}; - return __ret; -} +#define vld2_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x2_t __ret; \ + uint8x8x2_t __s1 = __p1; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 16); \ + __ret; \ +}) #else -__ai uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 + __rev1 * (uint32x4_t) {__p2, __p2, __p2, __p2}; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vld2_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x2_t __ret; \ + uint8x8x2_t __s1 = __p1; \ + uint8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmlaq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) { - uint16x8_t __ret; - __ret = __p0 + __p1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; - return __ret; -} +#define vld2_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x2_t __ret; \ + uint32x2x2_t __s1 = __p1; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 18); \ + __ret; \ +}) #else -__ai uint16x8_t vmlaq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) { - uint16x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 + __rev1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vld2_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x2_t __ret; \ + uint32x2x2_t __s1 = __p1; \ + uint32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vmlaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { - float32x4_t __ret; - __ret = __p0 + __p1 * (float32x4_t) {__p2, __p2, __p2, __p2}; - return __ret; -} +#define vld2_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x2_t __ret; \ + uint16x4x2_t __s1 = __p1; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 17); \ + __ret; \ +}) #else -__ai float32x4_t vmlaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 + __rev1 * (float32x4_t) {__p2, __p2, __p2, __p2}; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vld2_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x2_t __ret; \ + uint16x4x2_t __s1 = __p1; \ + uint16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmlaq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) { - int32x4_t __ret; - __ret = __p0 + __p1 * (int32x4_t) {__p2, __p2, __p2, __p2}; - return __ret; -} +#define vld2_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x2_t __ret; \ + int8x8x2_t __s1 = __p1; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 0); \ + __ret; \ +}) #else -__ai int32x4_t vmlaq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 + __rev1 * (int32x4_t) {__p2, __p2, __p2, __p2}; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vld2_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x2_t __ret; \ + int8x8x2_t __s1 = __p1; \ + int8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmlaq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) { - int16x8_t __ret; - __ret = __p0 + __p1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; - return __ret; -} +#define vld2_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x2_t __ret; \ + float32x2x2_t __s1 = __p1; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 9); \ + __ret; \ +}) #else -__ai int16x8_t vmlaq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) { - int16x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 + __rev1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vld2_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x2_t __ret; \ + float32x2x2_t __s1 = __p1; \ + float32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vmla_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) { - uint32x2_t __ret; - __ret = __p0 + __p1 * (uint32x2_t) {__p2, __p2}; - return __ret; -} +#define vld2_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x2_t __ret; \ + int32x2x2_t __s1 = __p1; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 2); \ + __ret; \ +}) #else -__ai uint32x2_t vmla_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) { - uint32x2_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __rev0 + __rev1 * (uint32x2_t) {__p2, __p2}; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vld2_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x2_t __ret; \ + int32x2x2_t __s1 = __p1; \ + int32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vmla_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) { - uint16x4_t __ret; - __ret = __p0 + __p1 * (uint16x4_t) {__p2, __p2, __p2, __p2}; - return __ret; -} +#define vld2_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x2_t __ret; \ + int16x4x2_t __s1 = __p1; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 1); \ + __ret; \ +}) #else -__ai uint16x4_t vmla_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) { - uint16x4_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 + __rev1 * (uint16x4_t) {__p2, __p2, __p2, __p2}; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vld2_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x2_t __ret; \ + int16x4x2_t __s1 = __p1; \ + int16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vmla_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { - float32x2_t __ret; - __ret = __p0 + __p1 * (float32x2_t) {__p2, __p2}; - return __ret; -} +#define vld3_p8(__p0) __extension__ ({ \ + poly8x8x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 4); \ + __ret; \ +}) #else -__ai float32x2_t vmla_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __rev0 + __rev1 * (float32x2_t) {__p2, __p2}; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vld3_p8(__p0) __extension__ ({ \ + poly8x8x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vmla_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) { - int32x2_t __ret; - __ret = __p0 + __p1 * (int32x2_t) {__p2, __p2}; - return __ret; -} +#define vld3_p16(__p0) __extension__ ({ \ + poly16x4x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 5); \ + __ret; \ +}) #else -__ai int32x2_t vmla_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) { - int32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __rev0 + __rev1 * (int32x2_t) {__p2, __p2}; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vld3_p16(__p0) __extension__ ({ \ + poly16x4x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vmla_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) { - int16x4_t __ret; - __ret = __p0 + __p1 * (int16x4_t) {__p2, __p2, __p2, __p2}; - return __ret; -} +#define vld3q_p8(__p0) __extension__ ({ \ + poly8x16x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 36); \ + __ret; \ +}) #else -__ai int16x4_t vmla_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) { - int16x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 + __rev1 * (int16x4_t) {__p2, __p2, __p2, __p2}; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vld3q_p8(__p0) __extension__ ({ \ + poly8x16x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vmlsq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { - uint8x16_t __ret; - __ret = __p0 - __p1 * __p2; - return __ret; -} +#define vld3q_p16(__p0) __extension__ ({ \ + poly16x8x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 37); \ + __ret; \ +}) #else -__ai uint8x16_t vmlsq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { - uint8x16_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 - __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vld3q_p16(__p0) __extension__ ({ \ + poly16x8x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmlsq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint32x4_t __ret; - __ret = __p0 - __p1 * __p2; - return __ret; -} +#define vld3q_u8(__p0) __extension__ ({ \ + uint8x16x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 48); \ + __ret; \ +}) #else -__ai uint32x4_t vmlsq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = __rev0 - __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vld3q_u8(__p0) __extension__ ({ \ + uint8x16x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmlsq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { - uint16x8_t __ret; - __ret = __p0 - __p1 * __p2; - return __ret; -} +#define vld3q_u32(__p0) __extension__ ({ \ + uint32x4x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 50); \ + __ret; \ +}) #else -__ai uint16x8_t vmlsq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { - uint16x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 - __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vld3q_u32(__p0) __extension__ ({ \ + uint32x4x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vmlsq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { - int8x16_t __ret; - __ret = __p0 - __p1 * __p2; - return __ret; -} +#define vld3q_u16(__p0) __extension__ ({ \ + uint16x8x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 49); \ + __ret; \ +}) #else -__ai int8x16_t vmlsq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { - int8x16_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 - __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vld3q_u16(__p0) __extension__ ({ \ + uint16x8x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vmlsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __ret; - __ret = __p0 - __p1 * __p2; - return __ret; -} +#define vld3q_s8(__p0) __extension__ ({ \ + int8x16x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 32); \ + __ret; \ +}) #else -__ai float32x4_t vmlsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = __rev0 - __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vld3q_s8(__p0) __extension__ ({ \ + int8x16x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmlsq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int32x4_t __ret; - __ret = __p0 - __p1 * __p2; - return __ret; -} +#define vld3q_f32(__p0) __extension__ ({ \ + float32x4x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 41); \ + __ret; \ +}) #else -__ai int32x4_t vmlsq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = __rev0 - __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vld3q_f32(__p0) __extension__ ({ \ + float32x4x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmlsq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int16x8_t __ret; - __ret = __p0 - __p1 * __p2; - return __ret; -} +#define vld3q_s32(__p0) __extension__ ({ \ + int32x4x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 34); \ + __ret; \ +}) #else -__ai int16x8_t vmlsq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int16x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 - __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vld3q_s32(__p0) __extension__ ({ \ + int32x4x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vmls_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { - uint8x8_t __ret; - __ret = __p0 - __p1 * __p2; - return __ret; -} +#define vld3q_s16(__p0) __extension__ ({ \ + int16x8x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 33); \ + __ret; \ +}) #else -__ai uint8x8_t vmls_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { - uint8x8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 - __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vld3q_s16(__p0) __extension__ ({ \ + int16x8x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vmls_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { - uint32x2_t __ret; - __ret = __p0 - __p1 * __p2; - return __ret; -} +#define vld3_u8(__p0) __extension__ ({ \ + uint8x8x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 16); \ + __ret; \ +}) #else -__ai uint32x2_t vmls_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { - uint32x2_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = __rev0 - __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vld3_u8(__p0) __extension__ ({ \ + uint8x8x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vmls_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { - uint16x4_t __ret; - __ret = __p0 - __p1 * __p2; - return __ret; -} +#define vld3_u32(__p0) __extension__ ({ \ + uint32x2x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 18); \ + __ret; \ +}) #else -__ai uint16x4_t vmls_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { - uint16x4_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = __rev0 - __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vld3_u32(__p0) __extension__ ({ \ + uint32x2x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) #endif +#define vld3_u64(__p0) __extension__ ({ \ + uint64x1x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 19); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vmls_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { - int8x8_t __ret; - __ret = __p0 - __p1 * __p2; - return __ret; -} +#define vld3_u16(__p0) __extension__ ({ \ + uint16x4x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 17); \ + __ret; \ +}) #else -__ai int8x8_t vmls_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { - int8x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 - __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vld3_u16(__p0) __extension__ ({ \ + uint16x4x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vmls_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __ret; - __ret = __p0 - __p1 * __p2; - return __ret; -} +#define vld3_s8(__p0) __extension__ ({ \ + int8x8x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 0); \ + __ret; \ +}) #else -__ai float32x2_t vmls_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = __rev0 - __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vld3_s8(__p0) __extension__ ({ \ + int8x8x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vmls_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { - int32x2_t __ret; - __ret = __p0 - __p1 * __p2; - return __ret; -} +#define vld3_f32(__p0) __extension__ ({ \ + float32x2x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 9); \ + __ret; \ +}) #else -__ai int32x2_t vmls_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { - int32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = __rev0 - __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vld3_f32(__p0) __extension__ ({ \ + float32x2x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vmls_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { - int16x4_t __ret; - __ret = __p0 - __p1 * __p2; - return __ret; -} +#define vld3_s32(__p0) __extension__ ({ \ + int32x2x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 2); \ + __ret; \ +}) #else -__ai int16x4_t vmls_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { - int16x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = __rev0 - __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vld3_s32(__p0) __extension__ ({ \ + int32x2x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) #endif +#define vld3_s64(__p0) __extension__ ({ \ + int64x1x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 3); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -#define vmlsq_lane_u32(__p0_66, __p1_66, __p2_66, __p3_66) __extension__ ({ \ - uint32x4_t __ret_66; \ - uint32x4_t __s0_66 = __p0_66; \ - uint32x4_t __s1_66 = __p1_66; \ - uint32x2_t __s2_66 = __p2_66; \ - __ret_66 = __s0_66 - __s1_66 * splatq_lane_u32(__s2_66, __p3_66); \ - __ret_66; \ +#define vld3_s16(__p0) __extension__ ({ \ + int16x4x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 1); \ + __ret; \ }) #else -#define vmlsq_lane_u32(__p0_67, __p1_67, __p2_67, __p3_67) __extension__ ({ \ - uint32x4_t __ret_67; \ - uint32x4_t __s0_67 = __p0_67; \ - uint32x4_t __s1_67 = __p1_67; \ - uint32x2_t __s2_67 = __p2_67; \ - uint32x4_t __rev0_67; __rev0_67 = __builtin_shufflevector(__s0_67, __s0_67, 3, 2, 1, 0); \ - uint32x4_t __rev1_67; __rev1_67 = __builtin_shufflevector(__s1_67, __s1_67, 3, 2, 1, 0); \ - uint32x2_t __rev2_67; __rev2_67 = __builtin_shufflevector(__s2_67, __s2_67, 1, 0); \ - __ret_67 = __rev0_67 - __rev1_67 * __noswap_splatq_lane_u32(__rev2_67, __p3_67); \ - __ret_67 = __builtin_shufflevector(__ret_67, __ret_67, 3, 2, 1, 0); \ - __ret_67; \ +#define vld3_s16(__p0) __extension__ ({ \ + int16x4x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlsq_lane_u16(__p0_68, __p1_68, __p2_68, __p3_68) __extension__ ({ \ - uint16x8_t __ret_68; \ - uint16x8_t __s0_68 = __p0_68; \ - uint16x8_t __s1_68 = __p1_68; \ - uint16x4_t __s2_68 = __p2_68; \ - __ret_68 = __s0_68 - __s1_68 * splatq_lane_u16(__s2_68, __p3_68); \ - __ret_68; \ +#define vld3_dup_p8(__p0) __extension__ ({ \ + poly8x8x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 4); \ + __ret; \ }) #else -#define vmlsq_lane_u16(__p0_69, __p1_69, __p2_69, __p3_69) __extension__ ({ \ - uint16x8_t __ret_69; \ - uint16x8_t __s0_69 = __p0_69; \ - uint16x8_t __s1_69 = __p1_69; \ - uint16x4_t __s2_69 = __p2_69; \ - uint16x8_t __rev0_69; __rev0_69 = __builtin_shufflevector(__s0_69, __s0_69, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev1_69; __rev1_69 = __builtin_shufflevector(__s1_69, __s1_69, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x4_t __rev2_69; __rev2_69 = __builtin_shufflevector(__s2_69, __s2_69, 3, 2, 1, 0); \ - __ret_69 = __rev0_69 - __rev1_69 * __noswap_splatq_lane_u16(__rev2_69, __p3_69); \ - __ret_69 = __builtin_shufflevector(__ret_69, __ret_69, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_69; \ +#define vld3_dup_p8(__p0) __extension__ ({ \ + poly8x8x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlsq_lane_f32(__p0_70, __p1_70, __p2_70, __p3_70) __extension__ ({ \ - float32x4_t __ret_70; \ - float32x4_t __s0_70 = __p0_70; \ - float32x4_t __s1_70 = __p1_70; \ - float32x2_t __s2_70 = __p2_70; \ - __ret_70 = __s0_70 - __s1_70 * splatq_lane_f32(__s2_70, __p3_70); \ - __ret_70; \ +#define vld3_dup_p16(__p0) __extension__ ({ \ + poly16x4x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 5); \ + __ret; \ }) #else -#define vmlsq_lane_f32(__p0_71, __p1_71, __p2_71, __p3_71) __extension__ ({ \ - float32x4_t __ret_71; \ - float32x4_t __s0_71 = __p0_71; \ - float32x4_t __s1_71 = __p1_71; \ - float32x2_t __s2_71 = __p2_71; \ - float32x4_t __rev0_71; __rev0_71 = __builtin_shufflevector(__s0_71, __s0_71, 3, 2, 1, 0); \ - float32x4_t __rev1_71; __rev1_71 = __builtin_shufflevector(__s1_71, __s1_71, 3, 2, 1, 0); \ - float32x2_t __rev2_71; __rev2_71 = __builtin_shufflevector(__s2_71, __s2_71, 1, 0); \ - __ret_71 = __rev0_71 - __rev1_71 * __noswap_splatq_lane_f32(__rev2_71, __p3_71); \ - __ret_71 = __builtin_shufflevector(__ret_71, __ret_71, 3, 2, 1, 0); \ - __ret_71; \ +#define vld3_dup_p16(__p0) __extension__ ({ \ + poly16x4x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlsq_lane_s32(__p0_72, __p1_72, __p2_72, __p3_72) __extension__ ({ \ - int32x4_t __ret_72; \ - int32x4_t __s0_72 = __p0_72; \ - int32x4_t __s1_72 = __p1_72; \ - int32x2_t __s2_72 = __p2_72; \ - __ret_72 = __s0_72 - __s1_72 * splatq_lane_s32(__s2_72, __p3_72); \ - __ret_72; \ +#define vld3q_dup_p8(__p0) __extension__ ({ \ + poly8x16x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 36); \ + __ret; \ }) #else -#define vmlsq_lane_s32(__p0_73, __p1_73, __p2_73, __p3_73) __extension__ ({ \ - int32x4_t __ret_73; \ - int32x4_t __s0_73 = __p0_73; \ - int32x4_t __s1_73 = __p1_73; \ - int32x2_t __s2_73 = __p2_73; \ - int32x4_t __rev0_73; __rev0_73 = __builtin_shufflevector(__s0_73, __s0_73, 3, 2, 1, 0); \ - int32x4_t __rev1_73; __rev1_73 = __builtin_shufflevector(__s1_73, __s1_73, 3, 2, 1, 0); \ - int32x2_t __rev2_73; __rev2_73 = __builtin_shufflevector(__s2_73, __s2_73, 1, 0); \ - __ret_73 = __rev0_73 - __rev1_73 * __noswap_splatq_lane_s32(__rev2_73, __p3_73); \ - __ret_73 = __builtin_shufflevector(__ret_73, __ret_73, 3, 2, 1, 0); \ - __ret_73; \ +#define vld3q_dup_p8(__p0) __extension__ ({ \ + poly8x16x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlsq_lane_s16(__p0_74, __p1_74, __p2_74, __p3_74) __extension__ ({ \ - int16x8_t __ret_74; \ - int16x8_t __s0_74 = __p0_74; \ - int16x8_t __s1_74 = __p1_74; \ - int16x4_t __s2_74 = __p2_74; \ - __ret_74 = __s0_74 - __s1_74 * splatq_lane_s16(__s2_74, __p3_74); \ - __ret_74; \ +#define vld3q_dup_p16(__p0) __extension__ ({ \ + poly16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 37); \ + __ret; \ }) #else -#define vmlsq_lane_s16(__p0_75, __p1_75, __p2_75, __p3_75) __extension__ ({ \ - int16x8_t __ret_75; \ - int16x8_t __s0_75 = __p0_75; \ - int16x8_t __s1_75 = __p1_75; \ - int16x4_t __s2_75 = __p2_75; \ - int16x8_t __rev0_75; __rev0_75 = __builtin_shufflevector(__s0_75, __s0_75, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_75; __rev1_75 = __builtin_shufflevector(__s1_75, __s1_75, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev2_75; __rev2_75 = __builtin_shufflevector(__s2_75, __s2_75, 3, 2, 1, 0); \ - __ret_75 = __rev0_75 - __rev1_75 * __noswap_splatq_lane_s16(__rev2_75, __p3_75); \ - __ret_75 = __builtin_shufflevector(__ret_75, __ret_75, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_75; \ +#define vld3q_dup_p16(__p0) __extension__ ({ \ + poly16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmls_lane_u32(__p0_76, __p1_76, __p2_76, __p3_76) __extension__ ({ \ - uint32x2_t __ret_76; \ - uint32x2_t __s0_76 = __p0_76; \ - uint32x2_t __s1_76 = __p1_76; \ - uint32x2_t __s2_76 = __p2_76; \ - __ret_76 = __s0_76 - __s1_76 * splat_lane_u32(__s2_76, __p3_76); \ - __ret_76; \ +#define vld3q_dup_u8(__p0) __extension__ ({ \ + uint8x16x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 48); \ + __ret; \ }) #else -#define vmls_lane_u32(__p0_77, __p1_77, __p2_77, __p3_77) __extension__ ({ \ - uint32x2_t __ret_77; \ - uint32x2_t __s0_77 = __p0_77; \ - uint32x2_t __s1_77 = __p1_77; \ - uint32x2_t __s2_77 = __p2_77; \ - uint32x2_t __rev0_77; __rev0_77 = __builtin_shufflevector(__s0_77, __s0_77, 1, 0); \ - uint32x2_t __rev1_77; __rev1_77 = __builtin_shufflevector(__s1_77, __s1_77, 1, 0); \ - uint32x2_t __rev2_77; __rev2_77 = __builtin_shufflevector(__s2_77, __s2_77, 1, 0); \ - __ret_77 = __rev0_77 - __rev1_77 * __noswap_splat_lane_u32(__rev2_77, __p3_77); \ - __ret_77 = __builtin_shufflevector(__ret_77, __ret_77, 1, 0); \ - __ret_77; \ +#define vld3q_dup_u8(__p0) __extension__ ({ \ + uint8x16x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmls_lane_u16(__p0_78, __p1_78, __p2_78, __p3_78) __extension__ ({ \ - uint16x4_t __ret_78; \ - uint16x4_t __s0_78 = __p0_78; \ - uint16x4_t __s1_78 = __p1_78; \ - uint16x4_t __s2_78 = __p2_78; \ - __ret_78 = __s0_78 - __s1_78 * splat_lane_u16(__s2_78, __p3_78); \ - __ret_78; \ +#define vld3q_dup_u32(__p0) __extension__ ({ \ + uint32x4x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 50); \ + __ret; \ }) #else -#define vmls_lane_u16(__p0_79, __p1_79, __p2_79, __p3_79) __extension__ ({ \ - uint16x4_t __ret_79; \ - uint16x4_t __s0_79 = __p0_79; \ - uint16x4_t __s1_79 = __p1_79; \ - uint16x4_t __s2_79 = __p2_79; \ - uint16x4_t __rev0_79; __rev0_79 = __builtin_shufflevector(__s0_79, __s0_79, 3, 2, 1, 0); \ - uint16x4_t __rev1_79; __rev1_79 = __builtin_shufflevector(__s1_79, __s1_79, 3, 2, 1, 0); \ - uint16x4_t __rev2_79; __rev2_79 = __builtin_shufflevector(__s2_79, __s2_79, 3, 2, 1, 0); \ - __ret_79 = __rev0_79 - __rev1_79 * __noswap_splat_lane_u16(__rev2_79, __p3_79); \ - __ret_79 = __builtin_shufflevector(__ret_79, __ret_79, 3, 2, 1, 0); \ - __ret_79; \ +#define vld3q_dup_u32(__p0) __extension__ ({ \ + uint32x4x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmls_lane_f32(__p0_80, __p1_80, __p2_80, __p3_80) __extension__ ({ \ - float32x2_t __ret_80; \ - float32x2_t __s0_80 = __p0_80; \ - float32x2_t __s1_80 = __p1_80; \ - float32x2_t __s2_80 = __p2_80; \ - __ret_80 = __s0_80 - __s1_80 * splat_lane_f32(__s2_80, __p3_80); \ - __ret_80; \ +#define vld3q_dup_u64(__p0) __extension__ ({ \ + uint64x2x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 51); \ + __ret; \ }) #else -#define vmls_lane_f32(__p0_81, __p1_81, __p2_81, __p3_81) __extension__ ({ \ - float32x2_t __ret_81; \ - float32x2_t __s0_81 = __p0_81; \ - float32x2_t __s1_81 = __p1_81; \ - float32x2_t __s2_81 = __p2_81; \ - float32x2_t __rev0_81; __rev0_81 = __builtin_shufflevector(__s0_81, __s0_81, 1, 0); \ - float32x2_t __rev1_81; __rev1_81 = __builtin_shufflevector(__s1_81, __s1_81, 1, 0); \ - float32x2_t __rev2_81; __rev2_81 = __builtin_shufflevector(__s2_81, __s2_81, 1, 0); \ - __ret_81 = __rev0_81 - __rev1_81 * __noswap_splat_lane_f32(__rev2_81, __p3_81); \ - __ret_81 = __builtin_shufflevector(__ret_81, __ret_81, 1, 0); \ - __ret_81; \ +#define vld3q_dup_u64(__p0) __extension__ ({ \ + uint64x2x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmls_lane_s32(__p0_82, __p1_82, __p2_82, __p3_82) __extension__ ({ \ - int32x2_t __ret_82; \ - int32x2_t __s0_82 = __p0_82; \ - int32x2_t __s1_82 = __p1_82; \ - int32x2_t __s2_82 = __p2_82; \ - __ret_82 = __s0_82 - __s1_82 * splat_lane_s32(__s2_82, __p3_82); \ - __ret_82; \ +#define vld3q_dup_u16(__p0) __extension__ ({ \ + uint16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 49); \ + __ret; \ }) #else -#define vmls_lane_s32(__p0_83, __p1_83, __p2_83, __p3_83) __extension__ ({ \ - int32x2_t __ret_83; \ - int32x2_t __s0_83 = __p0_83; \ - int32x2_t __s1_83 = __p1_83; \ - int32x2_t __s2_83 = __p2_83; \ - int32x2_t __rev0_83; __rev0_83 = __builtin_shufflevector(__s0_83, __s0_83, 1, 0); \ - int32x2_t __rev1_83; __rev1_83 = __builtin_shufflevector(__s1_83, __s1_83, 1, 0); \ - int32x2_t __rev2_83; __rev2_83 = __builtin_shufflevector(__s2_83, __s2_83, 1, 0); \ - __ret_83 = __rev0_83 - __rev1_83 * __noswap_splat_lane_s32(__rev2_83, __p3_83); \ - __ret_83 = __builtin_shufflevector(__ret_83, __ret_83, 1, 0); \ - __ret_83; \ +#define vld3q_dup_u16(__p0) __extension__ ({ \ + uint16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmls_lane_s16(__p0_84, __p1_84, __p2_84, __p3_84) __extension__ ({ \ - int16x4_t __ret_84; \ - int16x4_t __s0_84 = __p0_84; \ - int16x4_t __s1_84 = __p1_84; \ - int16x4_t __s2_84 = __p2_84; \ - __ret_84 = __s0_84 - __s1_84 * splat_lane_s16(__s2_84, __p3_84); \ - __ret_84; \ +#define vld3q_dup_s8(__p0) __extension__ ({ \ + int8x16x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 32); \ + __ret; \ }) #else -#define vmls_lane_s16(__p0_85, __p1_85, __p2_85, __p3_85) __extension__ ({ \ - int16x4_t __ret_85; \ - int16x4_t __s0_85 = __p0_85; \ - int16x4_t __s1_85 = __p1_85; \ - int16x4_t __s2_85 = __p2_85; \ - int16x4_t __rev0_85; __rev0_85 = __builtin_shufflevector(__s0_85, __s0_85, 3, 2, 1, 0); \ - int16x4_t __rev1_85; __rev1_85 = __builtin_shufflevector(__s1_85, __s1_85, 3, 2, 1, 0); \ - int16x4_t __rev2_85; __rev2_85 = __builtin_shufflevector(__s2_85, __s2_85, 3, 2, 1, 0); \ - __ret_85 = __rev0_85 - __rev1_85 * __noswap_splat_lane_s16(__rev2_85, __p3_85); \ - __ret_85 = __builtin_shufflevector(__ret_85, __ret_85, 3, 2, 1, 0); \ - __ret_85; \ +#define vld3q_dup_s8(__p0) __extension__ ({ \ + int8x16x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) { - uint32x4_t __ret; - __ret = __p0 - __p1 * (uint32x4_t) {__p2, __p2, __p2, __p2}; - return __ret; -} +#define vld3q_dup_f32(__p0) __extension__ ({ \ + float32x4x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 41); \ + __ret; \ +}) #else -__ai uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 - __rev1 * (uint32x4_t) {__p2, __p2, __p2, __p2}; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vld3q_dup_f32(__p0) __extension__ ({ \ + float32x4x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmlsq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) { - uint16x8_t __ret; - __ret = __p0 - __p1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; - return __ret; -} +#define vld3q_dup_s32(__p0) __extension__ ({ \ + int32x4x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 34); \ + __ret; \ +}) #else -__ai uint16x8_t vmlsq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) { - uint16x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 - __rev1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vld3q_dup_s32(__p0) __extension__ ({ \ + int32x4x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vmlsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { - float32x4_t __ret; - __ret = __p0 - __p1 * (float32x4_t) {__p2, __p2, __p2, __p2}; - return __ret; -} +#define vld3q_dup_s64(__p0) __extension__ ({ \ + int64x2x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 35); \ + __ret; \ +}) #else -__ai float32x4_t vmlsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 - __rev1 * (float32x4_t) {__p2, __p2, __p2, __p2}; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vld3q_dup_s64(__p0) __extension__ ({ \ + int64x2x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmlsq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) { - int32x4_t __ret; - __ret = __p0 - __p1 * (int32x4_t) {__p2, __p2, __p2, __p2}; - return __ret; -} +#define vld3q_dup_s16(__p0) __extension__ ({ \ + int16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 33); \ + __ret; \ +}) #else -__ai int32x4_t vmlsq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 - __rev1 * (int32x4_t) {__p2, __p2, __p2, __p2}; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vld3q_dup_s16(__p0) __extension__ ({ \ + int16x8x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmlsq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) { - int16x8_t __ret; - __ret = __p0 - __p1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; - return __ret; -} +#define vld3_dup_u8(__p0) __extension__ ({ \ + uint8x8x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 16); \ + __ret; \ +}) #else -__ai int16x8_t vmlsq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) { - int16x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 - __rev1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vld3_dup_u8(__p0) __extension__ ({ \ + uint8x8x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vmls_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) { - uint32x2_t __ret; - __ret = __p0 - __p1 * (uint32x2_t) {__p2, __p2}; - return __ret; -} +#define vld3_dup_u32(__p0) __extension__ ({ \ + uint32x2x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 18); \ + __ret; \ +}) #else -__ai uint32x2_t vmls_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) { - uint32x2_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __rev0 - __rev1 * (uint32x2_t) {__p2, __p2}; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vld3_dup_u32(__p0) __extension__ ({ \ + uint32x2x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) #endif +#define vld3_dup_u64(__p0) __extension__ ({ \ + uint64x1x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 19); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vmls_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) { - uint16x4_t __ret; - __ret = __p0 - __p1 * (uint16x4_t) {__p2, __p2, __p2, __p2}; - return __ret; -} +#define vld3_dup_u16(__p0) __extension__ ({ \ + uint16x4x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 17); \ + __ret; \ +}) #else -__ai uint16x4_t vmls_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) { - uint16x4_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 - __rev1 * (uint16x4_t) {__p2, __p2, __p2, __p2}; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vld3_dup_u16(__p0) __extension__ ({ \ + uint16x4x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vmls_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { - float32x2_t __ret; - __ret = __p0 - __p1 * (float32x2_t) {__p2, __p2}; - return __ret; -} +#define vld3_dup_s8(__p0) __extension__ ({ \ + int8x8x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 0); \ + __ret; \ +}) #else -__ai float32x2_t vmls_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __rev0 - __rev1 * (float32x2_t) {__p2, __p2}; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vld3_dup_s8(__p0) __extension__ ({ \ + int8x8x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vmls_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) { - int32x2_t __ret; - __ret = __p0 - __p1 * (int32x2_t) {__p2, __p2}; - return __ret; -} +#define vld3_dup_f32(__p0) __extension__ ({ \ + float32x2x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 9); \ + __ret; \ +}) #else -__ai int32x2_t vmls_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) { - int32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __rev0 - __rev1 * (int32x2_t) {__p2, __p2}; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vld3_dup_f32(__p0) __extension__ ({ \ + float32x2x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vmls_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) { - int16x4_t __ret; - __ret = __p0 - __p1 * (int16x4_t) {__p2, __p2, __p2, __p2}; - return __ret; -} +#define vld3_dup_s32(__p0) __extension__ ({ \ + int32x2x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 2); \ + __ret; \ +}) #else -__ai int16x4_t vmls_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) { - int16x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 - __rev1 * (int16x4_t) {__p2, __p2, __p2, __p2}; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vld3_dup_s32(__p0) __extension__ ({ \ + int32x2x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) #endif +#define vld3_dup_s64(__p0) __extension__ ({ \ + int64x1x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 3); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vmov_n_p8(poly8_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - return __ret; -} +#define vld3_dup_s16(__p0) __extension__ ({ \ + int16x4x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 1); \ + __ret; \ +}) #else -__ai poly8x8_t vmov_n_p8(poly8_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vld3_dup_s16(__p0) __extension__ ({ \ + int16x4x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vmov_n_p16(poly16_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t) {__p0, __p0, __p0, __p0}; - return __ret; -} +#define vld3_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x3_t __ret; \ + poly8x8x3_t __s1 = __p1; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 4); \ + __ret; \ +}) #else -__ai poly16x4_t vmov_n_p16(poly16_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t) {__p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vld3_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x3_t __ret; \ + poly8x8x3_t __s1 = __p1; \ + poly8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vmovq_n_p8(poly8_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - return __ret; -} +#define vld3_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x3_t __ret; \ + poly16x4x3_t __s1 = __p1; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 5); \ + __ret; \ +}) #else -__ai poly8x16_t vmovq_n_p8(poly8_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vld3_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x3_t __ret; \ + poly16x4x3_t __s1 = __p1; \ + poly16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vmovq_n_p16(poly16_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - return __ret; -} +#define vld3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x3_t __ret; \ + poly16x8x3_t __s1 = __p1; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 37); \ + __ret; \ +}) #else -__ai poly16x8_t vmovq_n_p16(poly16_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vld3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x3_t __ret; \ + poly16x8x3_t __s1 = __p1; \ + poly16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vmovq_n_u8(uint8_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - return __ret; -} +#define vld3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x3_t __ret; \ + uint32x4x3_t __s1 = __p1; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 50); \ + __ret; \ +}) #else -__ai uint8x16_t vmovq_n_u8(uint8_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vld3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x3_t __ret; \ + uint32x4x3_t __s1 = __p1; \ + uint32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmovq_n_u32(uint32_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t) {__p0, __p0, __p0, __p0}; - return __ret; -} +#define vld3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x3_t __ret; \ + uint16x8x3_t __s1 = __p1; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 49); \ + __ret; \ +}) #else -__ai uint32x4_t vmovq_n_u32(uint32_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t) {__p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vld3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x3_t __ret; \ + uint16x8x3_t __s1 = __p1; \ + uint16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vmovq_n_u64(uint64_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t) {__p0, __p0}; - return __ret; -} +#define vld3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x3_t __ret; \ + float32x4x3_t __s1 = __p1; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 41); \ + __ret; \ +}) #else -__ai uint64x2_t vmovq_n_u64(uint64_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t) {__p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vld3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x3_t __ret; \ + float32x4x3_t __s1 = __p1; \ + float32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmovq_n_u16(uint16_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - return __ret; -} +#define vld3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x3_t __ret; \ + int32x4x3_t __s1 = __p1; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 34); \ + __ret; \ +}) #else -__ai uint16x8_t vmovq_n_u16(uint16_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vld3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x3_t __ret; \ + int32x4x3_t __s1 = __p1; \ + int32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vmovq_n_s8(int8_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - return __ret; -} +#define vld3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x3_t __ret; \ + int16x8x3_t __s1 = __p1; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 33); \ + __ret; \ +}) #else -__ai int8x16_t vmovq_n_s8(int8_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vld3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x3_t __ret; \ + int16x8x3_t __s1 = __p1; \ + int16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vmovq_n_f32(float32_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t) {__p0, __p0, __p0, __p0}; - return __ret; -} +#define vld3_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x3_t __ret; \ + uint8x8x3_t __s1 = __p1; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 16); \ + __ret; \ +}) #else -__ai float32x4_t vmovq_n_f32(float32_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t) {__p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vld3_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x3_t __ret; \ + uint8x8x3_t __s1 = __p1; \ + uint8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -#define vmovq_n_f16(__p0) __extension__ ({ \ - float16x8_t __ret; \ - float16_t __s0 = __p0; \ - __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \ +#define vld3_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x3_t __ret; \ + uint32x2x3_t __s1 = __p1; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 18); \ __ret; \ }) #else -#define vmovq_n_f16(__p0) __extension__ ({ \ - float16x8_t __ret; \ - float16_t __s0 = __p0; \ - __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vld3_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x3_t __ret; \ + uint32x2x3_t __s1 = __p1; \ + uint32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmovq_n_s32(int32_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t) {__p0, __p0, __p0, __p0}; - return __ret; -} +#define vld3_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x3_t __ret; \ + uint16x4x3_t __s1 = __p1; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 17); \ + __ret; \ +}) #else -__ai int32x4_t vmovq_n_s32(int32_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t) {__p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vld3_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x3_t __ret; \ + uint16x4x3_t __s1 = __p1; \ + uint16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vmovq_n_s64(int64_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t) {__p0, __p0}; - return __ret; -} +#define vld3_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x3_t __ret; \ + int8x8x3_t __s1 = __p1; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 0); \ + __ret; \ +}) #else -__ai int64x2_t vmovq_n_s64(int64_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t) {__p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vld3_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x3_t __ret; \ + int8x8x3_t __s1 = __p1; \ + int8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmovq_n_s16(int16_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - return __ret; -} +#define vld3_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x3_t __ret; \ + float32x2x3_t __s1 = __p1; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 9); \ + __ret; \ +}) #else -__ai int16x8_t vmovq_n_s16(int16_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vld3_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x3_t __ret; \ + float32x2x3_t __s1 = __p1; \ + float32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vmov_n_u8(uint8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - return __ret; -} +#define vld3_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x3_t __ret; \ + int32x2x3_t __s1 = __p1; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 2); \ + __ret; \ +}) #else -__ai uint8x8_t vmov_n_u8(uint8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vld3_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x3_t __ret; \ + int32x2x3_t __s1 = __p1; \ + int32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vmov_n_u32(uint32_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) {__p0, __p0}; - return __ret; -} +#define vld3_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x3_t __ret; \ + int16x4x3_t __s1 = __p1; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 1); \ + __ret; \ +}) #else -__ai uint32x2_t vmov_n_u32(uint32_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) {__p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vld3_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x3_t __ret; \ + int16x4x3_t __s1 = __p1; \ + int16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret; \ +}) #endif -__ai uint64x1_t vmov_n_u64(uint64_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) {__p0}; - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vmov_n_u16(uint16_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) {__p0, __p0, __p0, __p0}; - return __ret; -} +#define vld4_p8(__p0) __extension__ ({ \ + poly8x8x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 4); \ + __ret; \ +}) #else -__ai uint16x4_t vmov_n_u16(uint16_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) {__p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vld4_p8(__p0) __extension__ ({ \ + poly8x8x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vmov_n_s8(int8_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - return __ret; -} +#define vld4_p16(__p0) __extension__ ({ \ + poly16x4x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 5); \ + __ret; \ +}) #else -__ai int8x8_t vmov_n_s8(int8_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vld4_p16(__p0) __extension__ ({ \ + poly16x4x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vmov_n_f32(float32_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t) {__p0, __p0}; - return __ret; -} +#define vld4q_p8(__p0) __extension__ ({ \ + poly8x16x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 36); \ + __ret; \ +}) #else -__ai float32x2_t vmov_n_f32(float32_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t) {__p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vld4q_p8(__p0) __extension__ ({ \ + poly8x16x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -#define vmov_n_f16(__p0) __extension__ ({ \ - float16x4_t __ret; \ - float16_t __s0 = __p0; \ - __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \ +#define vld4q_p16(__p0) __extension__ ({ \ + poly16x8x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 37); \ __ret; \ }) #else -#define vmov_n_f16(__p0) __extension__ ({ \ - float16x4_t __ret; \ - float16_t __s0 = __p0; \ - __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ +#define vld4q_p16(__p0) __extension__ ({ \ + poly16x8x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vmov_n_s32(int32_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t) {__p0, __p0}; - return __ret; -} +#define vld4q_u8(__p0) __extension__ ({ \ + uint8x16x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 48); \ + __ret; \ +}) #else -__ai int32x2_t vmov_n_s32(int32_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t) {__p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vld4q_u8(__p0) __extension__ ({ \ + uint8x16x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif -__ai int64x1_t vmov_n_s64(int64_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t) {__p0}; - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vmov_n_s16(int16_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t) {__p0, __p0, __p0, __p0}; - return __ret; -} +#define vld4q_u32(__p0) __extension__ ({ \ + uint32x4x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 50); \ + __ret; \ +}) #else -__ai int16x4_t vmov_n_s16(int16_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t) {__p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vld4q_u32(__p0) __extension__ ({ \ + uint32x4x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmovl_u8(uint8x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 49); - return __ret; -} +#define vld4q_u16(__p0) __extension__ ({ \ + uint16x8x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 49); \ + __ret; \ +}) #else -__ai uint16x8_t vmovl_u8(uint8x8_t __p0) { - uint16x8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai uint16x8_t __noswap_vmovl_u8(uint8x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 49); - return __ret; -} +#define vld4q_u16(__p0) __extension__ ({ \ + uint16x8x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vmovl_u32(uint32x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 51); - return __ret; -} +#define vld4q_s8(__p0) __extension__ ({ \ + int8x16x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 32); \ + __ret; \ +}) #else -__ai uint64x2_t vmovl_u32(uint32x2_t __p0) { - uint64x2_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai uint64x2_t __noswap_vmovl_u32(uint32x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 51); - return __ret; -} +#define vld4q_s8(__p0) __extension__ ({ \ + int8x16x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmovl_u16(uint16x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 50); - return __ret; -} +#define vld4q_f32(__p0) __extension__ ({ \ + float32x4x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 41); \ + __ret; \ +}) #else -__ai uint32x4_t vmovl_u16(uint16x4_t __p0) { - uint32x4_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai uint32x4_t __noswap_vmovl_u16(uint16x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 50); - return __ret; -} +#define vld4q_f32(__p0) __extension__ ({ \ + float32x4x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmovl_s8(int8x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 33); - return __ret; -} +#define vld4q_s32(__p0) __extension__ ({ \ + int32x4x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 34); \ + __ret; \ +}) #else -__ai int16x8_t vmovl_s8(int8x8_t __p0) { - int16x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai int16x8_t __noswap_vmovl_s8(int8x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 33); - return __ret; -} +#define vld4q_s32(__p0) __extension__ ({ \ + int32x4x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vmovl_s32(int32x2_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 35); - return __ret; -} +#define vld4q_s16(__p0) __extension__ ({ \ + int16x8x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 33); \ + __ret; \ +}) #else -__ai int64x2_t vmovl_s32(int32x2_t __p0) { - int64x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai int64x2_t __noswap_vmovl_s32(int32x2_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 35); - return __ret; -} +#define vld4q_s16(__p0) __extension__ ({ \ + int16x8x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmovl_s16(int16x4_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 34); - return __ret; -} +#define vld4_u8(__p0) __extension__ ({ \ + uint8x8x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 16); \ + __ret; \ +}) #else -__ai int32x4_t vmovl_s16(int16x4_t __p0) { - int32x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int32x4_t __noswap_vmovl_s16(int16x4_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 34); - return __ret; -} +#define vld4_u8(__p0) __extension__ ({ \ + uint8x8x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vmovn_u32(uint32x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 17); - return __ret; -} +#define vld4_u32(__p0) __extension__ ({ \ + uint32x2x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 18); \ + __ret; \ +}) #else -__ai uint16x4_t vmovn_u32(uint32x4_t __p0) { - uint16x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai uint16x4_t __noswap_vmovn_u32(uint32x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 17); - return __ret; -} +#define vld4_u32(__p0) __extension__ ({ \ + uint32x2x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) #endif +#define vld4_u64(__p0) __extension__ ({ \ + uint64x1x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 19); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vmovn_u64(uint64x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 18); - return __ret; -} +#define vld4_u16(__p0) __extension__ ({ \ + uint16x4x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 17); \ + __ret; \ +}) #else -__ai uint32x2_t vmovn_u64(uint64x2_t __p0) { - uint32x2_t __ret; - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai uint32x2_t __noswap_vmovn_u64(uint64x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 18); - return __ret; -} +#define vld4_u16(__p0) __extension__ ({ \ + uint16x4x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vmovn_u16(uint16x8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 16); - return __ret; -} +#define vld4_s8(__p0) __extension__ ({ \ + int8x8x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 0); \ + __ret; \ +}) #else -__ai uint8x8_t vmovn_u16(uint16x8_t __p0) { - uint8x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai uint8x8_t __noswap_vmovn_u16(uint16x8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 16); - return __ret; -} +#define vld4_s8(__p0) __extension__ ({ \ + int8x8x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vmovn_s32(int32x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 1); - return __ret; -} +#define vld4_f32(__p0) __extension__ ({ \ + float32x2x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 9); \ + __ret; \ +}) #else -__ai int16x4_t vmovn_s32(int32x4_t __p0) { - int16x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int16x4_t __noswap_vmovn_s32(int32x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 1); - return __ret; -} +#define vld4_f32(__p0) __extension__ ({ \ + float32x2x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vmovn_s64(int64x2_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 2); - return __ret; -} +#define vld4_s32(__p0) __extension__ ({ \ + int32x2x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 2); \ + __ret; \ +}) #else -__ai int32x2_t vmovn_s64(int64x2_t __p0) { - int32x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai int32x2_t __noswap_vmovn_s64(int64x2_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 2); - return __ret; -} +#define vld4_s32(__p0) __extension__ ({ \ + int32x2x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) #endif +#define vld4_s64(__p0) __extension__ ({ \ + int64x1x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 3); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vmovn_s16(int16x8_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 0); - return __ret; -} +#define vld4_s16(__p0) __extension__ ({ \ + int16x4x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 1); \ + __ret; \ +}) #else -__ai int8x8_t vmovn_s16(int16x8_t __p0) { - int8x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai int8x8_t __noswap_vmovn_s16(int16x8_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 0); - return __ret; -} +#define vld4_s16(__p0) __extension__ ({ \ + int16x4x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vmulq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = __p0 * __p1; - return __ret; -} +#define vld4_dup_p8(__p0) __extension__ ({ \ + poly8x8x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 4); \ + __ret; \ +}) #else -__ai uint8x16_t vmulq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 * __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vld4_dup_p8(__p0) __extension__ ({ \ + poly8x8x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmulq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = __p0 * __p1; - return __ret; -} +#define vld4_dup_p16(__p0) __extension__ ({ \ + poly16x4x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 5); \ + __ret; \ +}) #else -__ai uint32x4_t vmulq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 * __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmulq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = __p0 * __p1; - return __ret; -} -#else -__ai uint16x8_t vmulq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 * __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vmulq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = __p0 * __p1; - return __ret; -} -#else -__ai int8x16_t vmulq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 * __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vmulq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = __p0 * __p1; - return __ret; -} -#else -__ai float32x4_t vmulq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 * __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vld4_dup_p16(__p0) __extension__ ({ \ + poly16x4x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmulq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = __p0 * __p1; - return __ret; -} +#define vld4q_dup_p8(__p0) __extension__ ({ \ + poly8x16x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 36); \ + __ret; \ +}) #else -__ai int32x4_t vmulq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 * __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vld4q_dup_p8(__p0) __extension__ ({ \ + poly8x16x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmulq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = __p0 * __p1; - return __ret; -} +#define vld4q_dup_p16(__p0) __extension__ ({ \ + poly16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 37); \ + __ret; \ +}) #else -__ai int16x8_t vmulq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 * __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vld4q_dup_p16(__p0) __extension__ ({ \ + poly16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vmul_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = __p0 * __p1; - return __ret; -} +#define vld4q_dup_u8(__p0) __extension__ ({ \ + uint8x16x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 48); \ + __ret; \ +}) #else -__ai uint8x8_t vmul_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 * __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vld4q_dup_u8(__p0) __extension__ ({ \ + uint8x16x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vmul_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = __p0 * __p1; - return __ret; -} +#define vld4q_dup_u32(__p0) __extension__ ({ \ + uint32x4x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 50); \ + __ret; \ +}) #else -__ai uint32x2_t vmul_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __rev0 * __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vld4q_dup_u32(__p0) __extension__ ({ \ + uint32x4x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vmul_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = __p0 * __p1; - return __ret; -} +#define vld4q_dup_u64(__p0) __extension__ ({ \ + uint64x2x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 51); \ + __ret; \ +}) #else -__ai uint16x4_t vmul_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 * __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vld4q_dup_u64(__p0) __extension__ ({ \ + uint64x2x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vmul_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = __p0 * __p1; - return __ret; -} +#define vld4q_dup_u16(__p0) __extension__ ({ \ + uint16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 49); \ + __ret; \ +}) #else -__ai int8x8_t vmul_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 * __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vld4q_dup_u16(__p0) __extension__ ({ \ + uint16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vmul_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = __p0 * __p1; - return __ret; -} +#define vld4q_dup_s8(__p0) __extension__ ({ \ + int8x16x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 32); \ + __ret; \ +}) #else -__ai float32x2_t vmul_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __rev0 * __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vld4q_dup_s8(__p0) __extension__ ({ \ + int8x16x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vmul_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = __p0 * __p1; - return __ret; -} +#define vld4q_dup_f32(__p0) __extension__ ({ \ + float32x4x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 41); \ + __ret; \ +}) #else -__ai int32x2_t vmul_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __rev0 * __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vld4q_dup_f32(__p0) __extension__ ({ \ + float32x4x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vmul_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = __p0 * __p1; - return __ret; -} +#define vld4q_dup_s32(__p0) __extension__ ({ \ + int32x4x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 34); \ + __ret; \ +}) #else -__ai int16x4_t vmul_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 * __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vld4q_dup_s32(__p0) __extension__ ({ \ + int32x4x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vmul_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vmul_v((int8x8_t)__p0, (int8x8_t)__p1, 4); - return __ret; -} +#define vld4q_dup_s64(__p0) __extension__ ({ \ + int64x2x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 35); \ + __ret; \ +}) #else -__ai poly8x8_t vmul_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly8x8_t __ret; - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (poly8x8_t) __builtin_neon_vmul_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vld4q_dup_s64(__p0) __extension__ ({ \ + int64x2x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vmulq_p8(poly8x16_t __p0, poly8x16_t __p1) { - poly8x16_t __ret; - __ret = (poly8x16_t) __builtin_neon_vmulq_v((int8x16_t)__p0, (int8x16_t)__p1, 36); - return __ret; -} +#define vld4q_dup_s16(__p0) __extension__ ({ \ + int16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 33); \ + __ret; \ +}) #else -__ai poly8x16_t vmulq_p8(poly8x16_t __p0, poly8x16_t __p1) { - poly8x16_t __ret; - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (poly8x16_t) __builtin_neon_vmulq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vld4q_dup_s16(__p0) __extension__ ({ \ + int16x8x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -#define vmulq_lane_u32(__p0_86, __p1_86, __p2_86) __extension__ ({ \ - uint32x4_t __ret_86; \ - uint32x4_t __s0_86 = __p0_86; \ - uint32x2_t __s1_86 = __p1_86; \ - __ret_86 = __s0_86 * splatq_lane_u32(__s1_86, __p2_86); \ - __ret_86; \ +#define vld4_dup_u8(__p0) __extension__ ({ \ + uint8x8x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 16); \ + __ret; \ }) #else -#define vmulq_lane_u32(__p0_87, __p1_87, __p2_87) __extension__ ({ \ - uint32x4_t __ret_87; \ - uint32x4_t __s0_87 = __p0_87; \ - uint32x2_t __s1_87 = __p1_87; \ - uint32x4_t __rev0_87; __rev0_87 = __builtin_shufflevector(__s0_87, __s0_87, 3, 2, 1, 0); \ - uint32x2_t __rev1_87; __rev1_87 = __builtin_shufflevector(__s1_87, __s1_87, 1, 0); \ - __ret_87 = __rev0_87 * __noswap_splatq_lane_u32(__rev1_87, __p2_87); \ - __ret_87 = __builtin_shufflevector(__ret_87, __ret_87, 3, 2, 1, 0); \ - __ret_87; \ +#define vld4_dup_u8(__p0) __extension__ ({ \ + uint8x8x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmulq_lane_u16(__p0_88, __p1_88, __p2_88) __extension__ ({ \ - uint16x8_t __ret_88; \ - uint16x8_t __s0_88 = __p0_88; \ - uint16x4_t __s1_88 = __p1_88; \ - __ret_88 = __s0_88 * splatq_lane_u16(__s1_88, __p2_88); \ - __ret_88; \ +#define vld4_dup_u32(__p0) __extension__ ({ \ + uint32x2x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 18); \ + __ret; \ }) #else -#define vmulq_lane_u16(__p0_89, __p1_89, __p2_89) __extension__ ({ \ - uint16x8_t __ret_89; \ - uint16x8_t __s0_89 = __p0_89; \ - uint16x4_t __s1_89 = __p1_89; \ - uint16x8_t __rev0_89; __rev0_89 = __builtin_shufflevector(__s0_89, __s0_89, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x4_t __rev1_89; __rev1_89 = __builtin_shufflevector(__s1_89, __s1_89, 3, 2, 1, 0); \ - __ret_89 = __rev0_89 * __noswap_splatq_lane_u16(__rev1_89, __p2_89); \ - __ret_89 = __builtin_shufflevector(__ret_89, __ret_89, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_89; \ +#define vld4_dup_u32(__p0) __extension__ ({ \ + uint32x2x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ }) #endif +#define vld4_dup_u64(__p0) __extension__ ({ \ + uint64x1x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 19); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -#define vmulq_lane_f32(__p0_90, __p1_90, __p2_90) __extension__ ({ \ - float32x4_t __ret_90; \ - float32x4_t __s0_90 = __p0_90; \ - float32x2_t __s1_90 = __p1_90; \ - __ret_90 = __s0_90 * splatq_lane_f32(__s1_90, __p2_90); \ - __ret_90; \ +#define vld4_dup_u16(__p0) __extension__ ({ \ + uint16x4x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 17); \ + __ret; \ }) #else -#define vmulq_lane_f32(__p0_91, __p1_91, __p2_91) __extension__ ({ \ - float32x4_t __ret_91; \ - float32x4_t __s0_91 = __p0_91; \ - float32x2_t __s1_91 = __p1_91; \ - float32x4_t __rev0_91; __rev0_91 = __builtin_shufflevector(__s0_91, __s0_91, 3, 2, 1, 0); \ - float32x2_t __rev1_91; __rev1_91 = __builtin_shufflevector(__s1_91, __s1_91, 1, 0); \ - __ret_91 = __rev0_91 * __noswap_splatq_lane_f32(__rev1_91, __p2_91); \ - __ret_91 = __builtin_shufflevector(__ret_91, __ret_91, 3, 2, 1, 0); \ - __ret_91; \ +#define vld4_dup_u16(__p0) __extension__ ({ \ + uint16x4x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmulq_lane_s32(__p0_92, __p1_92, __p2_92) __extension__ ({ \ - int32x4_t __ret_92; \ - int32x4_t __s0_92 = __p0_92; \ - int32x2_t __s1_92 = __p1_92; \ - __ret_92 = __s0_92 * splatq_lane_s32(__s1_92, __p2_92); \ - __ret_92; \ +#define vld4_dup_s8(__p0) __extension__ ({ \ + int8x8x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 0); \ + __ret; \ }) #else -#define vmulq_lane_s32(__p0_93, __p1_93, __p2_93) __extension__ ({ \ - int32x4_t __ret_93; \ - int32x4_t __s0_93 = __p0_93; \ - int32x2_t __s1_93 = __p1_93; \ - int32x4_t __rev0_93; __rev0_93 = __builtin_shufflevector(__s0_93, __s0_93, 3, 2, 1, 0); \ - int32x2_t __rev1_93; __rev1_93 = __builtin_shufflevector(__s1_93, __s1_93, 1, 0); \ - __ret_93 = __rev0_93 * __noswap_splatq_lane_s32(__rev1_93, __p2_93); \ - __ret_93 = __builtin_shufflevector(__ret_93, __ret_93, 3, 2, 1, 0); \ - __ret_93; \ +#define vld4_dup_s8(__p0) __extension__ ({ \ + int8x8x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmulq_lane_s16(__p0_94, __p1_94, __p2_94) __extension__ ({ \ - int16x8_t __ret_94; \ - int16x8_t __s0_94 = __p0_94; \ - int16x4_t __s1_94 = __p1_94; \ - __ret_94 = __s0_94 * splatq_lane_s16(__s1_94, __p2_94); \ - __ret_94; \ +#define vld4_dup_f32(__p0) __extension__ ({ \ + float32x2x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 9); \ + __ret; \ }) #else -#define vmulq_lane_s16(__p0_95, __p1_95, __p2_95) __extension__ ({ \ - int16x8_t __ret_95; \ - int16x8_t __s0_95 = __p0_95; \ - int16x4_t __s1_95 = __p1_95; \ - int16x8_t __rev0_95; __rev0_95 = __builtin_shufflevector(__s0_95, __s0_95, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev1_95; __rev1_95 = __builtin_shufflevector(__s1_95, __s1_95, 3, 2, 1, 0); \ - __ret_95 = __rev0_95 * __noswap_splatq_lane_s16(__rev1_95, __p2_95); \ - __ret_95 = __builtin_shufflevector(__ret_95, __ret_95, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_95; \ +#define vld4_dup_f32(__p0) __extension__ ({ \ + float32x2x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmul_lane_u32(__p0_96, __p1_96, __p2_96) __extension__ ({ \ - uint32x2_t __ret_96; \ - uint32x2_t __s0_96 = __p0_96; \ - uint32x2_t __s1_96 = __p1_96; \ - __ret_96 = __s0_96 * splat_lane_u32(__s1_96, __p2_96); \ - __ret_96; \ +#define vld4_dup_s32(__p0) __extension__ ({ \ + int32x2x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 2); \ + __ret; \ }) #else -#define vmul_lane_u32(__p0_97, __p1_97, __p2_97) __extension__ ({ \ - uint32x2_t __ret_97; \ - uint32x2_t __s0_97 = __p0_97; \ - uint32x2_t __s1_97 = __p1_97; \ - uint32x2_t __rev0_97; __rev0_97 = __builtin_shufflevector(__s0_97, __s0_97, 1, 0); \ - uint32x2_t __rev1_97; __rev1_97 = __builtin_shufflevector(__s1_97, __s1_97, 1, 0); \ - __ret_97 = __rev0_97 * __noswap_splat_lane_u32(__rev1_97, __p2_97); \ - __ret_97 = __builtin_shufflevector(__ret_97, __ret_97, 1, 0); \ - __ret_97; \ +#define vld4_dup_s32(__p0) __extension__ ({ \ + int32x2x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ }) #endif +#define vld4_dup_s64(__p0) __extension__ ({ \ + int64x1x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 3); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -#define vmul_lane_u16(__p0_98, __p1_98, __p2_98) __extension__ ({ \ - uint16x4_t __ret_98; \ - uint16x4_t __s0_98 = __p0_98; \ - uint16x4_t __s1_98 = __p1_98; \ - __ret_98 = __s0_98 * splat_lane_u16(__s1_98, __p2_98); \ - __ret_98; \ +#define vld4_dup_s16(__p0) __extension__ ({ \ + int16x4x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 1); \ + __ret; \ }) #else -#define vmul_lane_u16(__p0_99, __p1_99, __p2_99) __extension__ ({ \ - uint16x4_t __ret_99; \ - uint16x4_t __s0_99 = __p0_99; \ - uint16x4_t __s1_99 = __p1_99; \ - uint16x4_t __rev0_99; __rev0_99 = __builtin_shufflevector(__s0_99, __s0_99, 3, 2, 1, 0); \ - uint16x4_t __rev1_99; __rev1_99 = __builtin_shufflevector(__s1_99, __s1_99, 3, 2, 1, 0); \ - __ret_99 = __rev0_99 * __noswap_splat_lane_u16(__rev1_99, __p2_99); \ - __ret_99 = __builtin_shufflevector(__ret_99, __ret_99, 3, 2, 1, 0); \ - __ret_99; \ +#define vld4_dup_s16(__p0) __extension__ ({ \ + int16x4x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmul_lane_f32(__p0_100, __p1_100, __p2_100) __extension__ ({ \ - float32x2_t __ret_100; \ - float32x2_t __s0_100 = __p0_100; \ - float32x2_t __s1_100 = __p1_100; \ - __ret_100 = __s0_100 * splat_lane_f32(__s1_100, __p2_100); \ - __ret_100; \ +#define vld4_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x4_t __ret; \ + poly8x8x4_t __s1 = __p1; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 4); \ + __ret; \ }) #else -#define vmul_lane_f32(__p0_101, __p1_101, __p2_101) __extension__ ({ \ - float32x2_t __ret_101; \ - float32x2_t __s0_101 = __p0_101; \ - float32x2_t __s1_101 = __p1_101; \ - float32x2_t __rev0_101; __rev0_101 = __builtin_shufflevector(__s0_101, __s0_101, 1, 0); \ - float32x2_t __rev1_101; __rev1_101 = __builtin_shufflevector(__s1_101, __s1_101, 1, 0); \ - __ret_101 = __rev0_101 * __noswap_splat_lane_f32(__rev1_101, __p2_101); \ - __ret_101 = __builtin_shufflevector(__ret_101, __ret_101, 1, 0); \ - __ret_101; \ +#define vld4_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x4_t __ret; \ + poly8x8x4_t __s1 = __p1; \ + poly8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 4); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmul_lane_s32(__p0_102, __p1_102, __p2_102) __extension__ ({ \ - int32x2_t __ret_102; \ - int32x2_t __s0_102 = __p0_102; \ - int32x2_t __s1_102 = __p1_102; \ - __ret_102 = __s0_102 * splat_lane_s32(__s1_102, __p2_102); \ - __ret_102; \ +#define vld4_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x4_t __ret; \ + poly16x4x4_t __s1 = __p1; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 5); \ + __ret; \ }) #else -#define vmul_lane_s32(__p0_103, __p1_103, __p2_103) __extension__ ({ \ - int32x2_t __ret_103; \ - int32x2_t __s0_103 = __p0_103; \ - int32x2_t __s1_103 = __p1_103; \ - int32x2_t __rev0_103; __rev0_103 = __builtin_shufflevector(__s0_103, __s0_103, 1, 0); \ - int32x2_t __rev1_103; __rev1_103 = __builtin_shufflevector(__s1_103, __s1_103, 1, 0); \ - __ret_103 = __rev0_103 * __noswap_splat_lane_s32(__rev1_103, __p2_103); \ - __ret_103 = __builtin_shufflevector(__ret_103, __ret_103, 1, 0); \ - __ret_103; \ +#define vld4_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x4_t __ret; \ + poly16x4x4_t __s1 = __p1; \ + poly16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 5); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmul_lane_s16(__p0_104, __p1_104, __p2_104) __extension__ ({ \ - int16x4_t __ret_104; \ - int16x4_t __s0_104 = __p0_104; \ - int16x4_t __s1_104 = __p1_104; \ - __ret_104 = __s0_104 * splat_lane_s16(__s1_104, __p2_104); \ - __ret_104; \ -}) +#define vld4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x4_t __ret; \ + poly16x8x4_t __s1 = __p1; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 37); \ + __ret; \ +}) #else -#define vmul_lane_s16(__p0_105, __p1_105, __p2_105) __extension__ ({ \ - int16x4_t __ret_105; \ - int16x4_t __s0_105 = __p0_105; \ - int16x4_t __s1_105 = __p1_105; \ - int16x4_t __rev0_105; __rev0_105 = __builtin_shufflevector(__s0_105, __s0_105, 3, 2, 1, 0); \ - int16x4_t __rev1_105; __rev1_105 = __builtin_shufflevector(__s1_105, __s1_105, 3, 2, 1, 0); \ - __ret_105 = __rev0_105 * __noswap_splat_lane_s16(__rev1_105, __p2_105); \ - __ret_105 = __builtin_shufflevector(__ret_105, __ret_105, 3, 2, 1, 0); \ - __ret_105; \ +#define vld4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x4_t __ret; \ + poly16x8x4_t __s1 = __p1; \ + poly16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 37); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x4_t __ret; \ + uint32x4x4_t __s1 = __p1; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 50); \ + __ret; \ +}) +#else +#define vld4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x4_t __ret; \ + uint32x4x4_t __s1 = __p1; \ + uint32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 50); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x4_t __ret; \ + uint16x8x4_t __s1 = __p1; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 49); \ + __ret; \ +}) +#else +#define vld4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x4_t __ret; \ + uint16x8x4_t __s1 = __p1; \ + uint16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 49); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x4_t __ret; \ + float32x4x4_t __s1 = __p1; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 41); \ + __ret; \ +}) +#else +#define vld4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x4_t __ret; \ + float32x4x4_t __s1 = __p1; \ + float32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 41); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x4_t __ret; \ + int32x4x4_t __s1 = __p1; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 34); \ + __ret; \ +}) +#else +#define vld4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x4_t __ret; \ + int32x4x4_t __s1 = __p1; \ + int32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 34); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x4_t __ret; \ + int16x8x4_t __s1 = __p1; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 33); \ + __ret; \ +}) +#else +#define vld4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x4_t __ret; \ + int16x8x4_t __s1 = __p1; \ + int16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 33); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x4_t __ret; \ + uint8x8x4_t __s1 = __p1; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 16); \ + __ret; \ +}) +#else +#define vld4_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x4_t __ret; \ + uint8x8x4_t __s1 = __p1; \ + uint8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 16); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x4_t __ret; \ + uint32x2x4_t __s1 = __p1; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 18); \ + __ret; \ +}) +#else +#define vld4_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x4_t __ret; \ + uint32x2x4_t __s1 = __p1; \ + uint32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 18); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x4_t __ret; \ + uint16x4x4_t __s1 = __p1; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 17); \ + __ret; \ +}) +#else +#define vld4_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x4_t __ret; \ + uint16x4x4_t __s1 = __p1; \ + uint16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 17); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x4_t __ret; \ + int8x8x4_t __s1 = __p1; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 0); \ + __ret; \ +}) +#else +#define vld4_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x4_t __ret; \ + int8x8x4_t __s1 = __p1; \ + int8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 0); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) { +#define vld4_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x4_t __ret; \ + float32x2x4_t __s1 = __p1; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 9); \ + __ret; \ +}) +#else +#define vld4_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x4_t __ret; \ + float32x2x4_t __s1 = __p1; \ + float32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 9); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x4_t __ret; \ + int32x2x4_t __s1 = __p1; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 2); \ + __ret; \ +}) +#else +#define vld4_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x4_t __ret; \ + int32x2x4_t __s1 = __p1; \ + int32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 2); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vld4_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x4_t __ret; \ + int16x4x4_t __s1 = __p1; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 1); \ + __ret; \ +}) +#else +#define vld4_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x4_t __ret; \ + int16x4x4_t __s1 = __p1; \ + int16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 1); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; - __ret = __p0 * (uint32x4_t) {__p1, __p1, __p1, __p1}; + __ret = (uint32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else -__ai uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = __rev0 * (uint32x4_t) {__p1, __p1, __p1, __p1}; + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmulq_n_u16(uint16x8_t __p0, uint16_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; - __ret = __p0 * (uint16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}; + __ret = (uint16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else -__ai uint16x8_t vmulq_n_u16(uint16x8_t __p0, uint16_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 * (uint16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}; + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vmulq_n_f32(float32x4_t __p0, float32_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vmaxq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vmaxq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vmaxq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; - __ret = __p0 * (float32x4_t) {__p1, __p1, __p1, __p1}; + __ret = (float32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); return __ret; } #else -__ai float32x4_t vmulq_n_f32(float32x4_t __p0, float32_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vmaxq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = __rev0 * (float32x4_t) {__p1, __p1, __p1, __p1}; + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmulq_n_s32(int32x4_t __p0, int32_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vmaxq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; - __ret = __p0 * (int32x4_t) {__p1, __p1, __p1, __p1}; + __ret = (int32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else -__ai int32x4_t vmulq_n_s32(int32x4_t __p0, int32_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vmaxq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = __rev0 * (int32x4_t) {__p1, __p1, __p1, __p1}; + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmulq_n_s16(int16x8_t __p0, int16_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vmaxq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; - __ret = __p0 * (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}; + __ret = (int16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else -__ai int16x8_t vmulq_n_s16(int16x8_t __p0, int16_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vmaxq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 * (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}; + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vmax_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vmax_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vmul_n_u32(uint32x2_t __p0, uint32_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vmax_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; - __ret = __p0 * (uint32x2_t) {__p1, __p1}; + __ret = (uint32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else -__ai uint32x2_t vmul_n_u32(uint32x2_t __p0, uint32_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vmax_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = __rev0 * (uint32x2_t) {__p1, __p1}; + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vmul_n_u16(uint16x4_t __p0, uint16_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vmax_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; - __ret = __p0 * (uint16x4_t) {__p1, __p1, __p1, __p1}; + __ret = (uint16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else -__ai uint16x4_t vmul_n_u16(uint16x4_t __p0, uint16_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vmax_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = __rev0 * (uint16x4_t) {__p1, __p1, __p1, __p1}; + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vmul_n_f32(float32x2_t __p0, float32_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vmax_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vmax_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vmax_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; - __ret = __p0 * (float32x2_t) {__p1, __p1}; + __ret = (float32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 9); return __ret; } #else -__ai float32x2_t vmul_n_f32(float32x2_t __p0, float32_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vmax_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = __rev0 * (float32x2_t) {__p1, __p1}; + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vmul_n_s32(int32x2_t __p0, int32_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vmax_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; - __ret = __p0 * (int32x2_t) {__p1, __p1}; + __ret = (int32x2_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else -__ai int32x2_t vmul_n_s32(int32x2_t __p0, int32_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vmax_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = __rev0 * (int32x2_t) {__p1, __p1}; + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vmul_n_s16(int16x4_t __p0, int16_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vmax_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; - __ret = __p0 * (int16x4_t) {__p1, __p1, __p1, __p1}; + __ret = (int16x4_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else -__ai int16x4_t vmul_n_s16(int16x4_t __p0, int16_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vmax_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = __rev0 * (int16x4_t) {__p1, __p1, __p1, __p1}; + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vmull_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly16x8_t __ret; - __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 37); +__ai __attribute__((target("neon"))) uint8x16_t vminq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else -__ai poly16x8_t vmull_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly16x8_t __ret; - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 37); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint8x16_t vminq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly16x8_t __noswap_vmull_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly16x8_t __ret; - __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 37); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vminq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vminq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmull_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vminq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 49); + __ret = (uint16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else -__ai uint16x8_t vmull_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vminq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 49); + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai uint16x8_t __noswap_vmull_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 49); - return __ret; -} #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vmull_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 51); +__ai __attribute__((target("neon"))) int8x16_t vminq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } #else -__ai uint64x2_t vmull_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint64x2_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai uint64x2_t __noswap_vmull_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 51); +__ai __attribute__((target("neon"))) int8x16_t vminq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmull_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 50); +__ai __attribute__((target("neon"))) float32x4_t vminq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); return __ret; } #else -__ai uint32x4_t vmull_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint32x4_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai uint32x4_t __noswap_vmull_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 50); +__ai __attribute__((target("neon"))) float32x4_t vminq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmull_s8(int8x8_t __p0, int8x8_t __p1) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 33); +__ai __attribute__((target("neon"))) int32x4_t vminq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else -__ai int16x8_t vmull_s8(int8x8_t __p0, int8x8_t __p1) { - int16x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai int16x8_t __noswap_vmull_s8(int8x8_t __p0, int8x8_t __p1) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 33); +__ai __attribute__((target("neon"))) int32x4_t vminq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vmull_s32(int32x2_t __p0, int32x2_t __p1) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35); +__ai __attribute__((target("neon"))) int16x8_t vminq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else -__ai int64x2_t vmull_s32(int32x2_t __p0, int32x2_t __p1) { - int64x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai int64x2_t __noswap_vmull_s32(int32x2_t __p0, int32x2_t __p1) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35); +__ai __attribute__((target("neon"))) int16x8_t vminq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmull_s16(int16x4_t __p0, int16x4_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34); +__ai __attribute__((target("neon"))) uint8x8_t vmin_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else -__ai int32x4_t vmull_s16(int16x4_t __p0, int16x4_t __p1) { - int32x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int32x4_t __noswap_vmull_s16(int16x4_t __p0, int16x4_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34); +__ai __attribute__((target("neon"))) uint8x8_t vmin_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -#define vmull_lane_u32(__p0_106, __p1_106, __p2_106) __extension__ ({ \ - uint64x2_t __ret_106; \ - uint32x2_t __s0_106 = __p0_106; \ - uint32x2_t __s1_106 = __p1_106; \ - __ret_106 = vmull_u32(__s0_106, splat_lane_u32(__s1_106, __p2_106)); \ - __ret_106; \ -}) -#else -#define vmull_lane_u32(__p0_107, __p1_107, __p2_107) __extension__ ({ \ - uint64x2_t __ret_107; \ - uint32x2_t __s0_107 = __p0_107; \ - uint32x2_t __s1_107 = __p1_107; \ - uint32x2_t __rev0_107; __rev0_107 = __builtin_shufflevector(__s0_107, __s0_107, 1, 0); \ - uint32x2_t __rev1_107; __rev1_107 = __builtin_shufflevector(__s1_107, __s1_107, 1, 0); \ - __ret_107 = __noswap_vmull_u32(__rev0_107, __noswap_splat_lane_u32(__rev1_107, __p2_107)); \ - __ret_107 = __builtin_shufflevector(__ret_107, __ret_107, 1, 0); \ - __ret_107; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmull_lane_u16(__p0_108, __p1_108, __p2_108) __extension__ ({ \ - uint32x4_t __ret_108; \ - uint16x4_t __s0_108 = __p0_108; \ - uint16x4_t __s1_108 = __p1_108; \ - __ret_108 = vmull_u16(__s0_108, splat_lane_u16(__s1_108, __p2_108)); \ - __ret_108; \ -}) -#else -#define vmull_lane_u16(__p0_109, __p1_109, __p2_109) __extension__ ({ \ - uint32x4_t __ret_109; \ - uint16x4_t __s0_109 = __p0_109; \ - uint16x4_t __s1_109 = __p1_109; \ - uint16x4_t __rev0_109; __rev0_109 = __builtin_shufflevector(__s0_109, __s0_109, 3, 2, 1, 0); \ - uint16x4_t __rev1_109; __rev1_109 = __builtin_shufflevector(__s1_109, __s1_109, 3, 2, 1, 0); \ - __ret_109 = __noswap_vmull_u16(__rev0_109, __noswap_splat_lane_u16(__rev1_109, __p2_109)); \ - __ret_109 = __builtin_shufflevector(__ret_109, __ret_109, 3, 2, 1, 0); \ - __ret_109; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmull_lane_s32(__p0_110, __p1_110, __p2_110) __extension__ ({ \ - int64x2_t __ret_110; \ - int32x2_t __s0_110 = __p0_110; \ - int32x2_t __s1_110 = __p1_110; \ - __ret_110 = vmull_s32(__s0_110, splat_lane_s32(__s1_110, __p2_110)); \ - __ret_110; \ -}) -#else -#define vmull_lane_s32(__p0_111, __p1_111, __p2_111) __extension__ ({ \ - int64x2_t __ret_111; \ - int32x2_t __s0_111 = __p0_111; \ - int32x2_t __s1_111 = __p1_111; \ - int32x2_t __rev0_111; __rev0_111 = __builtin_shufflevector(__s0_111, __s0_111, 1, 0); \ - int32x2_t __rev1_111; __rev1_111 = __builtin_shufflevector(__s1_111, __s1_111, 1, 0); \ - __ret_111 = __noswap_vmull_s32(__rev0_111, __noswap_splat_lane_s32(__rev1_111, __p2_111)); \ - __ret_111 = __builtin_shufflevector(__ret_111, __ret_111, 1, 0); \ - __ret_111; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmull_lane_s16(__p0_112, __p1_112, __p2_112) __extension__ ({ \ - int32x4_t __ret_112; \ - int16x4_t __s0_112 = __p0_112; \ - int16x4_t __s1_112 = __p1_112; \ - __ret_112 = vmull_s16(__s0_112, splat_lane_s16(__s1_112, __p2_112)); \ - __ret_112; \ -}) -#else -#define vmull_lane_s16(__p0_113, __p1_113, __p2_113) __extension__ ({ \ - int32x4_t __ret_113; \ - int16x4_t __s0_113 = __p0_113; \ - int16x4_t __s1_113 = __p1_113; \ - int16x4_t __rev0_113; __rev0_113 = __builtin_shufflevector(__s0_113, __s0_113, 3, 2, 1, 0); \ - int16x4_t __rev1_113; __rev1_113 = __builtin_shufflevector(__s1_113, __s1_113, 3, 2, 1, 0); \ - __ret_113 = __noswap_vmull_s16(__rev0_113, __noswap_splat_lane_s16(__rev1_113, __p2_113)); \ - __ret_113 = __builtin_shufflevector(__ret_113, __ret_113, 3, 2, 1, 0); \ - __ret_113; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) { - uint64x2_t __ret; - __ret = vmull_u32(__p0, (uint32x2_t) {__p1, __p1}); +__ai __attribute__((target("neon"))) uint32x2_t vmin_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else -__ai uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) { - uint64x2_t __ret; +__ai __attribute__((target("neon"))) uint32x2_t vmin_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = __noswap_vmull_u32(__rev0, (uint32x2_t) {__p1, __p1}); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai uint64x2_t __noswap_vmull_n_u32(uint32x2_t __p0, uint32_t __p1) { - uint64x2_t __ret; - __ret = __noswap_vmull_u32(__p0, (uint32x2_t) {__p1, __p1}); - return __ret; -} #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) { - uint32x4_t __ret; - __ret = vmull_u16(__p0, (uint16x4_t) {__p1, __p1, __p1, __p1}); +__ai __attribute__((target("neon"))) uint16x4_t vmin_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else -__ai uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) { - uint32x4_t __ret; +__ai __attribute__((target("neon"))) uint16x4_t vmin_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = __noswap_vmull_u16(__rev0, (uint16x4_t) {__p1, __p1, __p1, __p1}); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai uint32x4_t __noswap_vmull_n_u16(uint16x4_t __p0, uint16_t __p1) { - uint32x4_t __ret; - __ret = __noswap_vmull_u16(__p0, (uint16x4_t) {__p1, __p1, __p1, __p1}); - return __ret; -} #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) { - int64x2_t __ret; - __ret = vmull_s32(__p0, (int32x2_t) {__p1, __p1}); +__ai __attribute__((target("neon"))) int8x8_t vmin_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else -__ai int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) { - int64x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = __noswap_vmull_s32(__rev0, (int32x2_t) {__p1, __p1}); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai int64x2_t __noswap_vmull_n_s32(int32x2_t __p0, int32_t __p1) { - int64x2_t __ret; - __ret = __noswap_vmull_s32(__p0, (int32x2_t) {__p1, __p1}); +__ai __attribute__((target("neon"))) int8x8_t vmin_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) { - int32x4_t __ret; - __ret = vmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1}); +__ai __attribute__((target("neon"))) float32x2_t vmin_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 9); return __ret; } #else -__ai int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) { - int32x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = __noswap_vmull_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1}); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int32x4_t __noswap_vmull_n_s16(int16x4_t __p0, int16_t __p1) { - int32x4_t __ret; - __ret = __noswap_vmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1}); +__ai __attribute__((target("neon"))) float32x2_t vmin_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vmvn_p8(poly8x8_t __p0) { - poly8x8_t __ret; - __ret = ~__p0; +__ai __attribute__((target("neon"))) int32x2_t vmin_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else -__ai poly8x8_t vmvn_p8(poly8x8_t __p0) { - poly8x8_t __ret; - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = ~__rev0; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int32x2_t vmin_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vmvnq_p8(poly8x16_t __p0) { - poly8x16_t __ret; - __ret = ~__p0; +__ai __attribute__((target("neon"))) int16x4_t vmin_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else -__ai poly8x16_t vmvnq_p8(poly8x16_t __p0) { - poly8x16_t __ret; - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = ~__rev0; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int16x4_t vmin_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vmvnq_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vmlaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint8x16_t __ret; - __ret = ~__p0; + __ret = __p0 + __p1 * __p2; return __ret; } #else -__ai uint8x16_t vmvnq_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vmlaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = ~__rev0; + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmvnq_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vmlaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; - __ret = ~__p0; + __ret = __p0 + __p1 * __p2; return __ret; } #else -__ai uint32x4_t vmvnq_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vmlaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = ~__rev0; + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmvnq_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vmlaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint16x8_t __ret; - __ret = ~__p0; + __ret = __p0 + __p1 * __p2; return __ret; } #else -__ai uint16x8_t vmvnq_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vmlaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = ~__rev0; + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vmvnq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vmlaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { int8x16_t __ret; - __ret = ~__p0; + __ret = __p0 + __p1 * __p2; return __ret; } #else -__ai int8x16_t vmvnq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vmlaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = ~__rev0; + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmvnq_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vmlaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; - __ret = ~__p0; + __ret = __p0 + __p1 * __p2; return __ret; } #else -__ai int32x4_t vmvnq_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vmlaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = ~__rev0; + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmvnq_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vmlaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int16x8_t __ret; - __ret = ~__p0; + __ret = __p0 + __p1 * __p2; return __ret; } #else -__ai int16x8_t vmvnq_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vmlaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = ~__rev0; + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vmvn_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vmla_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint8x8_t __ret; - __ret = ~__p0; + __ret = __p0 + __p1 * __p2; return __ret; } #else -__ai uint8x8_t vmvn_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vmla_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = ~__rev0; + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vmvn_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vmla_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint32x2_t __ret; - __ret = ~__p0; + __ret = __p0 + __p1 * __p2; return __ret; } #else -__ai uint32x2_t vmvn_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vmla_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = ~__rev0; + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __rev0 + __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vmvn_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vmla_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint16x4_t __ret; - __ret = ~__p0; + __ret = __p0 + __p1 * __p2; return __ret; } #else -__ai uint16x4_t vmvn_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vmla_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = ~__rev0; + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vmvn_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vmla_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int8x8_t __ret; - __ret = ~__p0; + __ret = __p0 + __p1 * __p2; return __ret; } #else -__ai int8x8_t vmvn_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vmla_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = ~__rev0; + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vmvn_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vmla_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int32x2_t __ret; - __ret = ~__p0; + __ret = __p0 + __p1 * __p2; return __ret; } #else -__ai int32x2_t vmvn_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vmla_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = ~__rev0; + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __rev0 + __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vmvn_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vmla_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int16x4_t __ret; - __ret = ~__p0; + __ret = __p0 + __p1 * __p2; return __ret; } #else -__ai int16x4_t vmvn_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vmla_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = ~__rev0; + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vnegq_s8(int8x16_t __p0) { - int8x16_t __ret; - __ret = -__p0; +#define vmlaq_lane_u32(__p0_54, __p1_54, __p2_54, __p3_54) __extension__ ({ \ + uint32x4_t __ret_54; \ + uint32x4_t __s0_54 = __p0_54; \ + uint32x4_t __s1_54 = __p1_54; \ + uint32x2_t __s2_54 = __p2_54; \ + __ret_54 = __s0_54 + __s1_54 * splatq_lane_u32(__s2_54, __p3_54); \ + __ret_54; \ +}) +#else +#define vmlaq_lane_u32(__p0_55, __p1_55, __p2_55, __p3_55) __extension__ ({ \ + uint32x4_t __ret_55; \ + uint32x4_t __s0_55 = __p0_55; \ + uint32x4_t __s1_55 = __p1_55; \ + uint32x2_t __s2_55 = __p2_55; \ + uint32x4_t __rev0_55; __rev0_55 = __builtin_shufflevector(__s0_55, __s0_55, 3, 2, 1, 0); \ + uint32x4_t __rev1_55; __rev1_55 = __builtin_shufflevector(__s1_55, __s1_55, 3, 2, 1, 0); \ + uint32x2_t __rev2_55; __rev2_55 = __builtin_shufflevector(__s2_55, __s2_55, 1, 0); \ + __ret_55 = __rev0_55 + __rev1_55 * __noswap_splatq_lane_u32(__rev2_55, __p3_55); \ + __ret_55 = __builtin_shufflevector(__ret_55, __ret_55, 3, 2, 1, 0); \ + __ret_55; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlaq_lane_u16(__p0_56, __p1_56, __p2_56, __p3_56) __extension__ ({ \ + uint16x8_t __ret_56; \ + uint16x8_t __s0_56 = __p0_56; \ + uint16x8_t __s1_56 = __p1_56; \ + uint16x4_t __s2_56 = __p2_56; \ + __ret_56 = __s0_56 + __s1_56 * splatq_lane_u16(__s2_56, __p3_56); \ + __ret_56; \ +}) +#else +#define vmlaq_lane_u16(__p0_57, __p1_57, __p2_57, __p3_57) __extension__ ({ \ + uint16x8_t __ret_57; \ + uint16x8_t __s0_57 = __p0_57; \ + uint16x8_t __s1_57 = __p1_57; \ + uint16x4_t __s2_57 = __p2_57; \ + uint16x8_t __rev0_57; __rev0_57 = __builtin_shufflevector(__s0_57, __s0_57, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_57; __rev1_57 = __builtin_shufflevector(__s1_57, __s1_57, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __rev2_57; __rev2_57 = __builtin_shufflevector(__s2_57, __s2_57, 3, 2, 1, 0); \ + __ret_57 = __rev0_57 + __rev1_57 * __noswap_splatq_lane_u16(__rev2_57, __p3_57); \ + __ret_57 = __builtin_shufflevector(__ret_57, __ret_57, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_57; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlaq_lane_f32(__p0_58, __p1_58, __p2_58, __p3_58) __extension__ ({ \ + float32x4_t __ret_58; \ + float32x4_t __s0_58 = __p0_58; \ + float32x4_t __s1_58 = __p1_58; \ + float32x2_t __s2_58 = __p2_58; \ + __ret_58 = __s0_58 + __s1_58 * splatq_lane_f32(__s2_58, __p3_58); \ + __ret_58; \ +}) +#else +#define vmlaq_lane_f32(__p0_59, __p1_59, __p2_59, __p3_59) __extension__ ({ \ + float32x4_t __ret_59; \ + float32x4_t __s0_59 = __p0_59; \ + float32x4_t __s1_59 = __p1_59; \ + float32x2_t __s2_59 = __p2_59; \ + float32x4_t __rev0_59; __rev0_59 = __builtin_shufflevector(__s0_59, __s0_59, 3, 2, 1, 0); \ + float32x4_t __rev1_59; __rev1_59 = __builtin_shufflevector(__s1_59, __s1_59, 3, 2, 1, 0); \ + float32x2_t __rev2_59; __rev2_59 = __builtin_shufflevector(__s2_59, __s2_59, 1, 0); \ + __ret_59 = __rev0_59 + __rev1_59 * __noswap_splatq_lane_f32(__rev2_59, __p3_59); \ + __ret_59 = __builtin_shufflevector(__ret_59, __ret_59, 3, 2, 1, 0); \ + __ret_59; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlaq_lane_s32(__p0_60, __p1_60, __p2_60, __p3_60) __extension__ ({ \ + int32x4_t __ret_60; \ + int32x4_t __s0_60 = __p0_60; \ + int32x4_t __s1_60 = __p1_60; \ + int32x2_t __s2_60 = __p2_60; \ + __ret_60 = __s0_60 + __s1_60 * splatq_lane_s32(__s2_60, __p3_60); \ + __ret_60; \ +}) +#else +#define vmlaq_lane_s32(__p0_61, __p1_61, __p2_61, __p3_61) __extension__ ({ \ + int32x4_t __ret_61; \ + int32x4_t __s0_61 = __p0_61; \ + int32x4_t __s1_61 = __p1_61; \ + int32x2_t __s2_61 = __p2_61; \ + int32x4_t __rev0_61; __rev0_61 = __builtin_shufflevector(__s0_61, __s0_61, 3, 2, 1, 0); \ + int32x4_t __rev1_61; __rev1_61 = __builtin_shufflevector(__s1_61, __s1_61, 3, 2, 1, 0); \ + int32x2_t __rev2_61; __rev2_61 = __builtin_shufflevector(__s2_61, __s2_61, 1, 0); \ + __ret_61 = __rev0_61 + __rev1_61 * __noswap_splatq_lane_s32(__rev2_61, __p3_61); \ + __ret_61 = __builtin_shufflevector(__ret_61, __ret_61, 3, 2, 1, 0); \ + __ret_61; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlaq_lane_s16(__p0_62, __p1_62, __p2_62, __p3_62) __extension__ ({ \ + int16x8_t __ret_62; \ + int16x8_t __s0_62 = __p0_62; \ + int16x8_t __s1_62 = __p1_62; \ + int16x4_t __s2_62 = __p2_62; \ + __ret_62 = __s0_62 + __s1_62 * splatq_lane_s16(__s2_62, __p3_62); \ + __ret_62; \ +}) +#else +#define vmlaq_lane_s16(__p0_63, __p1_63, __p2_63, __p3_63) __extension__ ({ \ + int16x8_t __ret_63; \ + int16x8_t __s0_63 = __p0_63; \ + int16x8_t __s1_63 = __p1_63; \ + int16x4_t __s2_63 = __p2_63; \ + int16x8_t __rev0_63; __rev0_63 = __builtin_shufflevector(__s0_63, __s0_63, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_63; __rev1_63 = __builtin_shufflevector(__s1_63, __s1_63, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_63; __rev2_63 = __builtin_shufflevector(__s2_63, __s2_63, 3, 2, 1, 0); \ + __ret_63 = __rev0_63 + __rev1_63 * __noswap_splatq_lane_s16(__rev2_63, __p3_63); \ + __ret_63 = __builtin_shufflevector(__ret_63, __ret_63, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_63; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_lane_u32(__p0_64, __p1_64, __p2_64, __p3_64) __extension__ ({ \ + uint32x2_t __ret_64; \ + uint32x2_t __s0_64 = __p0_64; \ + uint32x2_t __s1_64 = __p1_64; \ + uint32x2_t __s2_64 = __p2_64; \ + __ret_64 = __s0_64 + __s1_64 * splat_lane_u32(__s2_64, __p3_64); \ + __ret_64; \ +}) +#else +#define vmla_lane_u32(__p0_65, __p1_65, __p2_65, __p3_65) __extension__ ({ \ + uint32x2_t __ret_65; \ + uint32x2_t __s0_65 = __p0_65; \ + uint32x2_t __s1_65 = __p1_65; \ + uint32x2_t __s2_65 = __p2_65; \ + uint32x2_t __rev0_65; __rev0_65 = __builtin_shufflevector(__s0_65, __s0_65, 1, 0); \ + uint32x2_t __rev1_65; __rev1_65 = __builtin_shufflevector(__s1_65, __s1_65, 1, 0); \ + uint32x2_t __rev2_65; __rev2_65 = __builtin_shufflevector(__s2_65, __s2_65, 1, 0); \ + __ret_65 = __rev0_65 + __rev1_65 * __noswap_splat_lane_u32(__rev2_65, __p3_65); \ + __ret_65 = __builtin_shufflevector(__ret_65, __ret_65, 1, 0); \ + __ret_65; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_lane_u16(__p0_66, __p1_66, __p2_66, __p3_66) __extension__ ({ \ + uint16x4_t __ret_66; \ + uint16x4_t __s0_66 = __p0_66; \ + uint16x4_t __s1_66 = __p1_66; \ + uint16x4_t __s2_66 = __p2_66; \ + __ret_66 = __s0_66 + __s1_66 * splat_lane_u16(__s2_66, __p3_66); \ + __ret_66; \ +}) +#else +#define vmla_lane_u16(__p0_67, __p1_67, __p2_67, __p3_67) __extension__ ({ \ + uint16x4_t __ret_67; \ + uint16x4_t __s0_67 = __p0_67; \ + uint16x4_t __s1_67 = __p1_67; \ + uint16x4_t __s2_67 = __p2_67; \ + uint16x4_t __rev0_67; __rev0_67 = __builtin_shufflevector(__s0_67, __s0_67, 3, 2, 1, 0); \ + uint16x4_t __rev1_67; __rev1_67 = __builtin_shufflevector(__s1_67, __s1_67, 3, 2, 1, 0); \ + uint16x4_t __rev2_67; __rev2_67 = __builtin_shufflevector(__s2_67, __s2_67, 3, 2, 1, 0); \ + __ret_67 = __rev0_67 + __rev1_67 * __noswap_splat_lane_u16(__rev2_67, __p3_67); \ + __ret_67 = __builtin_shufflevector(__ret_67, __ret_67, 3, 2, 1, 0); \ + __ret_67; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_lane_f32(__p0_68, __p1_68, __p2_68, __p3_68) __extension__ ({ \ + float32x2_t __ret_68; \ + float32x2_t __s0_68 = __p0_68; \ + float32x2_t __s1_68 = __p1_68; \ + float32x2_t __s2_68 = __p2_68; \ + __ret_68 = __s0_68 + __s1_68 * splat_lane_f32(__s2_68, __p3_68); \ + __ret_68; \ +}) +#else +#define vmla_lane_f32(__p0_69, __p1_69, __p2_69, __p3_69) __extension__ ({ \ + float32x2_t __ret_69; \ + float32x2_t __s0_69 = __p0_69; \ + float32x2_t __s1_69 = __p1_69; \ + float32x2_t __s2_69 = __p2_69; \ + float32x2_t __rev0_69; __rev0_69 = __builtin_shufflevector(__s0_69, __s0_69, 1, 0); \ + float32x2_t __rev1_69; __rev1_69 = __builtin_shufflevector(__s1_69, __s1_69, 1, 0); \ + float32x2_t __rev2_69; __rev2_69 = __builtin_shufflevector(__s2_69, __s2_69, 1, 0); \ + __ret_69 = __rev0_69 + __rev1_69 * __noswap_splat_lane_f32(__rev2_69, __p3_69); \ + __ret_69 = __builtin_shufflevector(__ret_69, __ret_69, 1, 0); \ + __ret_69; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_lane_s32(__p0_70, __p1_70, __p2_70, __p3_70) __extension__ ({ \ + int32x2_t __ret_70; \ + int32x2_t __s0_70 = __p0_70; \ + int32x2_t __s1_70 = __p1_70; \ + int32x2_t __s2_70 = __p2_70; \ + __ret_70 = __s0_70 + __s1_70 * splat_lane_s32(__s2_70, __p3_70); \ + __ret_70; \ +}) +#else +#define vmla_lane_s32(__p0_71, __p1_71, __p2_71, __p3_71) __extension__ ({ \ + int32x2_t __ret_71; \ + int32x2_t __s0_71 = __p0_71; \ + int32x2_t __s1_71 = __p1_71; \ + int32x2_t __s2_71 = __p2_71; \ + int32x2_t __rev0_71; __rev0_71 = __builtin_shufflevector(__s0_71, __s0_71, 1, 0); \ + int32x2_t __rev1_71; __rev1_71 = __builtin_shufflevector(__s1_71, __s1_71, 1, 0); \ + int32x2_t __rev2_71; __rev2_71 = __builtin_shufflevector(__s2_71, __s2_71, 1, 0); \ + __ret_71 = __rev0_71 + __rev1_71 * __noswap_splat_lane_s32(__rev2_71, __p3_71); \ + __ret_71 = __builtin_shufflevector(__ret_71, __ret_71, 1, 0); \ + __ret_71; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmla_lane_s16(__p0_72, __p1_72, __p2_72, __p3_72) __extension__ ({ \ + int16x4_t __ret_72; \ + int16x4_t __s0_72 = __p0_72; \ + int16x4_t __s1_72 = __p1_72; \ + int16x4_t __s2_72 = __p2_72; \ + __ret_72 = __s0_72 + __s1_72 * splat_lane_s16(__s2_72, __p3_72); \ + __ret_72; \ +}) +#else +#define vmla_lane_s16(__p0_73, __p1_73, __p2_73, __p3_73) __extension__ ({ \ + int16x4_t __ret_73; \ + int16x4_t __s0_73 = __p0_73; \ + int16x4_t __s1_73 = __p1_73; \ + int16x4_t __s2_73 = __p2_73; \ + int16x4_t __rev0_73; __rev0_73 = __builtin_shufflevector(__s0_73, __s0_73, 3, 2, 1, 0); \ + int16x4_t __rev1_73; __rev1_73 = __builtin_shufflevector(__s1_73, __s1_73, 3, 2, 1, 0); \ + int16x4_t __rev2_73; __rev2_73 = __builtin_shufflevector(__s2_73, __s2_73, 3, 2, 1, 0); \ + __ret_73 = __rev0_73 + __rev1_73 * __noswap_splat_lane_s16(__rev2_73, __p3_73); \ + __ret_73 = __builtin_shufflevector(__ret_73, __ret_73, 3, 2, 1, 0); \ + __ret_73; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) { + uint32x4_t __ret; + __ret = __p0 + __p1 * (uint32x4_t) {__p2, __p2, __p2, __p2}; return __ret; } #else -__ai int8x16_t vnegq_s8(int8x16_t __p0) { - int8x16_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = -__rev0; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint32x4_t vmlaq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * (uint32x4_t) {__p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vmlaq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) { + uint16x8_t __ret; + __ret = __p0 + __p1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vmlaq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vnegq_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vmlaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { float32x4_t __ret; - __ret = -__p0; + __ret = __p0 + __p1 * (float32x4_t) {__p2, __p2, __p2, __p2}; return __ret; } #else -__ai float32x4_t vnegq_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vmlaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = -__rev0; + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * (float32x4_t) {__p2, __p2, __p2, __p2}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vnegq_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vmlaq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) { int32x4_t __ret; - __ret = -__p0; + __ret = __p0 + __p1 * (int32x4_t) {__p2, __p2, __p2, __p2}; return __ret; } #else -__ai int32x4_t vnegq_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vmlaq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = -__rev0; + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * (int32x4_t) {__p2, __p2, __p2, __p2}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vnegq_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vmlaq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) { int16x8_t __ret; - __ret = -__p0; + __ret = __p0 + __p1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; return __ret; } #else -__ai int16x8_t vnegq_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vmlaq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = -__rev0; + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vneg_s8(int8x8_t __p0) { - int8x8_t __ret; - __ret = -__p0; +__ai __attribute__((target("neon"))) uint32x2_t vmla_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) { + uint32x2_t __ret; + __ret = __p0 + __p1 * (uint32x2_t) {__p2, __p2}; return __ret; } #else -__ai int8x8_t vneg_s8(int8x8_t __p0) { - int8x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = -__rev0; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint32x2_t vmla_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 + __rev1 * (uint32x2_t) {__p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vmla_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) { + uint16x4_t __ret; + __ret = __p0 + __p1 * (uint16x4_t) {__p2, __p2, __p2, __p2}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vmla_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * (uint16x4_t) {__p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vneg_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vmla_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { float32x2_t __ret; - __ret = -__p0; + __ret = __p0 + __p1 * (float32x2_t) {__p2, __p2}; return __ret; } #else -__ai float32x2_t vneg_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vmla_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = -__rev0; + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 + __rev1 * (float32x2_t) {__p2, __p2}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vneg_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vmla_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) { int32x2_t __ret; - __ret = -__p0; + __ret = __p0 + __p1 * (int32x2_t) {__p2, __p2}; return __ret; } #else -__ai int32x2_t vneg_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vmla_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = -__rev0; + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 + __rev1 * (int32x2_t) {__p2, __p2}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vneg_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vmla_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) { int16x4_t __ret; - __ret = -__p0; + __ret = __p0 + __p1 * (int16x4_t) {__p2, __p2, __p2, __p2}; return __ret; } #else -__ai int16x4_t vneg_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vmla_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = -__rev0; + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 + __rev1 * (int16x4_t) {__p2, __p2, __p2, __p2}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vornq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vmlsq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint8x16_t __ret; - __ret = __p0 | ~__p1; + __ret = __p0 - __p1 * __p2; return __ret; } #else -__ai uint8x16_t vornq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vmlsq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 | ~__rev1; + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vornq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vmlsq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; - __ret = __p0 | ~__p1; + __ret = __p0 - __p1 * __p2; return __ret; } #else -__ai uint32x4_t vornq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vmlsq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 | ~__rev1; + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 - __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vornq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - __ret = __p0 | ~__p1; - return __ret; -} -#else -__ai uint64x2_t vornq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __rev0 | ~__rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vornq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vmlsq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint16x8_t __ret; - __ret = __p0 | ~__p1; + __ret = __p0 - __p1 * __p2; return __ret; } #else -__ai uint16x8_t vornq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vmlsq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 | ~__rev1; + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vornq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vmlsq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { int8x16_t __ret; - __ret = __p0 | ~__p1; + __ret = __p0 - __p1 * __p2; return __ret; } #else -__ai int8x16_t vornq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vmlsq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 | ~__rev1; + int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vornq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = __p0 | ~__p1; +__ai __attribute__((target("neon"))) float32x4_t vmlsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = __p0 - __p1 * __p2; return __ret; } #else -__ai int32x4_t vornq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 | ~__rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) float32x4_t vmlsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vornq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - __ret = __p0 | ~__p1; +__ai __attribute__((target("neon"))) int32x4_t vmlsq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + __ret = __p0 - __p1 * __p2; return __ret; } #else -__ai int64x2_t vornq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __rev0 | ~__rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) int32x4_t vmlsq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vornq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vmlsq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int16x8_t __ret; - __ret = __p0 | ~__p1; + __ret = __p0 - __p1 * __p2; return __ret; } #else -__ai int16x8_t vornq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vmlsq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 | ~__rev1; + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vorn_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vmls_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint8x8_t __ret; - __ret = __p0 | ~__p1; + __ret = __p0 - __p1 * __p2; return __ret; } #else -__ai uint8x8_t vorn_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vmls_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 | ~__rev1; + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vorn_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vmls_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint32x2_t __ret; - __ret = __p0 | ~__p1; + __ret = __p0 - __p1 * __p2; return __ret; } #else -__ai uint32x2_t vorn_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vmls_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __rev0 | ~__rev1; + uint32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __rev0 - __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif -__ai uint64x1_t vorn_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x1_t __ret; - __ret = __p0 | ~__p1; - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vorn_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vmls_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint16x4_t __ret; - __ret = __p0 | ~__p1; + __ret = __p0 - __p1 * __p2; return __ret; } #else -__ai uint16x4_t vorn_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vmls_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 | ~__rev1; + uint16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 - __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vorn_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vmls_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int8x8_t __ret; - __ret = __p0 | ~__p1; + __ret = __p0 - __p1 * __p2; return __ret; } #else -__ai int8x8_t vorn_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vmls_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 | ~__rev1; + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vorn_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vmls_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = __p0 - __p1 * __p2; + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vmls_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __rev0 - __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vmls_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int32x2_t __ret; - __ret = __p0 | ~__p1; + __ret = __p0 - __p1 * __p2; return __ret; } #else -__ai int32x2_t vorn_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vmls_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __rev0 | ~__rev1; + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __rev0 - __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif -__ai int64x1_t vorn_s64(int64x1_t __p0, int64x1_t __p1) { - int64x1_t __ret; - __ret = __p0 | ~__p1; - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vorn_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vmls_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int16x4_t __ret; - __ret = __p0 | ~__p1; + __ret = __p0 - __p1 * __p2; return __ret; } #else -__ai int16x4_t vorn_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vmls_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 | ~__rev1; + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __rev0 - __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vorrq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = __p0 | __p1; - return __ret; -} +#define vmlsq_lane_u32(__p0_74, __p1_74, __p2_74, __p3_74) __extension__ ({ \ + uint32x4_t __ret_74; \ + uint32x4_t __s0_74 = __p0_74; \ + uint32x4_t __s1_74 = __p1_74; \ + uint32x2_t __s2_74 = __p2_74; \ + __ret_74 = __s0_74 - __s1_74 * splatq_lane_u32(__s2_74, __p3_74); \ + __ret_74; \ +}) #else -__ai uint8x16_t vorrq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 | __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vmlsq_lane_u32(__p0_75, __p1_75, __p2_75, __p3_75) __extension__ ({ \ + uint32x4_t __ret_75; \ + uint32x4_t __s0_75 = __p0_75; \ + uint32x4_t __s1_75 = __p1_75; \ + uint32x2_t __s2_75 = __p2_75; \ + uint32x4_t __rev0_75; __rev0_75 = __builtin_shufflevector(__s0_75, __s0_75, 3, 2, 1, 0); \ + uint32x4_t __rev1_75; __rev1_75 = __builtin_shufflevector(__s1_75, __s1_75, 3, 2, 1, 0); \ + uint32x2_t __rev2_75; __rev2_75 = __builtin_shufflevector(__s2_75, __s2_75, 1, 0); \ + __ret_75 = __rev0_75 - __rev1_75 * __noswap_splatq_lane_u32(__rev2_75, __p3_75); \ + __ret_75 = __builtin_shufflevector(__ret_75, __ret_75, 3, 2, 1, 0); \ + __ret_75; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_lane_u16(__p0_76, __p1_76, __p2_76, __p3_76) __extension__ ({ \ + uint16x8_t __ret_76; \ + uint16x8_t __s0_76 = __p0_76; \ + uint16x8_t __s1_76 = __p1_76; \ + uint16x4_t __s2_76 = __p2_76; \ + __ret_76 = __s0_76 - __s1_76 * splatq_lane_u16(__s2_76, __p3_76); \ + __ret_76; \ +}) +#else +#define vmlsq_lane_u16(__p0_77, __p1_77, __p2_77, __p3_77) __extension__ ({ \ + uint16x8_t __ret_77; \ + uint16x8_t __s0_77 = __p0_77; \ + uint16x8_t __s1_77 = __p1_77; \ + uint16x4_t __s2_77 = __p2_77; \ + uint16x8_t __rev0_77; __rev0_77 = __builtin_shufflevector(__s0_77, __s0_77, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_77; __rev1_77 = __builtin_shufflevector(__s1_77, __s1_77, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __rev2_77; __rev2_77 = __builtin_shufflevector(__s2_77, __s2_77, 3, 2, 1, 0); \ + __ret_77 = __rev0_77 - __rev1_77 * __noswap_splatq_lane_u16(__rev2_77, __p3_77); \ + __ret_77 = __builtin_shufflevector(__ret_77, __ret_77, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_77; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_lane_f32(__p0_78, __p1_78, __p2_78, __p3_78) __extension__ ({ \ + float32x4_t __ret_78; \ + float32x4_t __s0_78 = __p0_78; \ + float32x4_t __s1_78 = __p1_78; \ + float32x2_t __s2_78 = __p2_78; \ + __ret_78 = __s0_78 - __s1_78 * splatq_lane_f32(__s2_78, __p3_78); \ + __ret_78; \ +}) +#else +#define vmlsq_lane_f32(__p0_79, __p1_79, __p2_79, __p3_79) __extension__ ({ \ + float32x4_t __ret_79; \ + float32x4_t __s0_79 = __p0_79; \ + float32x4_t __s1_79 = __p1_79; \ + float32x2_t __s2_79 = __p2_79; \ + float32x4_t __rev0_79; __rev0_79 = __builtin_shufflevector(__s0_79, __s0_79, 3, 2, 1, 0); \ + float32x4_t __rev1_79; __rev1_79 = __builtin_shufflevector(__s1_79, __s1_79, 3, 2, 1, 0); \ + float32x2_t __rev2_79; __rev2_79 = __builtin_shufflevector(__s2_79, __s2_79, 1, 0); \ + __ret_79 = __rev0_79 - __rev1_79 * __noswap_splatq_lane_f32(__rev2_79, __p3_79); \ + __ret_79 = __builtin_shufflevector(__ret_79, __ret_79, 3, 2, 1, 0); \ + __ret_79; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_lane_s32(__p0_80, __p1_80, __p2_80, __p3_80) __extension__ ({ \ + int32x4_t __ret_80; \ + int32x4_t __s0_80 = __p0_80; \ + int32x4_t __s1_80 = __p1_80; \ + int32x2_t __s2_80 = __p2_80; \ + __ret_80 = __s0_80 - __s1_80 * splatq_lane_s32(__s2_80, __p3_80); \ + __ret_80; \ +}) +#else +#define vmlsq_lane_s32(__p0_81, __p1_81, __p2_81, __p3_81) __extension__ ({ \ + int32x4_t __ret_81; \ + int32x4_t __s0_81 = __p0_81; \ + int32x4_t __s1_81 = __p1_81; \ + int32x2_t __s2_81 = __p2_81; \ + int32x4_t __rev0_81; __rev0_81 = __builtin_shufflevector(__s0_81, __s0_81, 3, 2, 1, 0); \ + int32x4_t __rev1_81; __rev1_81 = __builtin_shufflevector(__s1_81, __s1_81, 3, 2, 1, 0); \ + int32x2_t __rev2_81; __rev2_81 = __builtin_shufflevector(__s2_81, __s2_81, 1, 0); \ + __ret_81 = __rev0_81 - __rev1_81 * __noswap_splatq_lane_s32(__rev2_81, __p3_81); \ + __ret_81 = __builtin_shufflevector(__ret_81, __ret_81, 3, 2, 1, 0); \ + __ret_81; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_lane_s16(__p0_82, __p1_82, __p2_82, __p3_82) __extension__ ({ \ + int16x8_t __ret_82; \ + int16x8_t __s0_82 = __p0_82; \ + int16x8_t __s1_82 = __p1_82; \ + int16x4_t __s2_82 = __p2_82; \ + __ret_82 = __s0_82 - __s1_82 * splatq_lane_s16(__s2_82, __p3_82); \ + __ret_82; \ +}) +#else +#define vmlsq_lane_s16(__p0_83, __p1_83, __p2_83, __p3_83) __extension__ ({ \ + int16x8_t __ret_83; \ + int16x8_t __s0_83 = __p0_83; \ + int16x8_t __s1_83 = __p1_83; \ + int16x4_t __s2_83 = __p2_83; \ + int16x8_t __rev0_83; __rev0_83 = __builtin_shufflevector(__s0_83, __s0_83, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_83; __rev1_83 = __builtin_shufflevector(__s1_83, __s1_83, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_83; __rev2_83 = __builtin_shufflevector(__s2_83, __s2_83, 3, 2, 1, 0); \ + __ret_83 = __rev0_83 - __rev1_83 * __noswap_splatq_lane_s16(__rev2_83, __p3_83); \ + __ret_83 = __builtin_shufflevector(__ret_83, __ret_83, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_83; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vorrq_u32(uint32x4_t __p0, uint32x4_t __p1) { +#define vmls_lane_u32(__p0_84, __p1_84, __p2_84, __p3_84) __extension__ ({ \ + uint32x2_t __ret_84; \ + uint32x2_t __s0_84 = __p0_84; \ + uint32x2_t __s1_84 = __p1_84; \ + uint32x2_t __s2_84 = __p2_84; \ + __ret_84 = __s0_84 - __s1_84 * splat_lane_u32(__s2_84, __p3_84); \ + __ret_84; \ +}) +#else +#define vmls_lane_u32(__p0_85, __p1_85, __p2_85, __p3_85) __extension__ ({ \ + uint32x2_t __ret_85; \ + uint32x2_t __s0_85 = __p0_85; \ + uint32x2_t __s1_85 = __p1_85; \ + uint32x2_t __s2_85 = __p2_85; \ + uint32x2_t __rev0_85; __rev0_85 = __builtin_shufflevector(__s0_85, __s0_85, 1, 0); \ + uint32x2_t __rev1_85; __rev1_85 = __builtin_shufflevector(__s1_85, __s1_85, 1, 0); \ + uint32x2_t __rev2_85; __rev2_85 = __builtin_shufflevector(__s2_85, __s2_85, 1, 0); \ + __ret_85 = __rev0_85 - __rev1_85 * __noswap_splat_lane_u32(__rev2_85, __p3_85); \ + __ret_85 = __builtin_shufflevector(__ret_85, __ret_85, 1, 0); \ + __ret_85; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_lane_u16(__p0_86, __p1_86, __p2_86, __p3_86) __extension__ ({ \ + uint16x4_t __ret_86; \ + uint16x4_t __s0_86 = __p0_86; \ + uint16x4_t __s1_86 = __p1_86; \ + uint16x4_t __s2_86 = __p2_86; \ + __ret_86 = __s0_86 - __s1_86 * splat_lane_u16(__s2_86, __p3_86); \ + __ret_86; \ +}) +#else +#define vmls_lane_u16(__p0_87, __p1_87, __p2_87, __p3_87) __extension__ ({ \ + uint16x4_t __ret_87; \ + uint16x4_t __s0_87 = __p0_87; \ + uint16x4_t __s1_87 = __p1_87; \ + uint16x4_t __s2_87 = __p2_87; \ + uint16x4_t __rev0_87; __rev0_87 = __builtin_shufflevector(__s0_87, __s0_87, 3, 2, 1, 0); \ + uint16x4_t __rev1_87; __rev1_87 = __builtin_shufflevector(__s1_87, __s1_87, 3, 2, 1, 0); \ + uint16x4_t __rev2_87; __rev2_87 = __builtin_shufflevector(__s2_87, __s2_87, 3, 2, 1, 0); \ + __ret_87 = __rev0_87 - __rev1_87 * __noswap_splat_lane_u16(__rev2_87, __p3_87); \ + __ret_87 = __builtin_shufflevector(__ret_87, __ret_87, 3, 2, 1, 0); \ + __ret_87; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_lane_f32(__p0_88, __p1_88, __p2_88, __p3_88) __extension__ ({ \ + float32x2_t __ret_88; \ + float32x2_t __s0_88 = __p0_88; \ + float32x2_t __s1_88 = __p1_88; \ + float32x2_t __s2_88 = __p2_88; \ + __ret_88 = __s0_88 - __s1_88 * splat_lane_f32(__s2_88, __p3_88); \ + __ret_88; \ +}) +#else +#define vmls_lane_f32(__p0_89, __p1_89, __p2_89, __p3_89) __extension__ ({ \ + float32x2_t __ret_89; \ + float32x2_t __s0_89 = __p0_89; \ + float32x2_t __s1_89 = __p1_89; \ + float32x2_t __s2_89 = __p2_89; \ + float32x2_t __rev0_89; __rev0_89 = __builtin_shufflevector(__s0_89, __s0_89, 1, 0); \ + float32x2_t __rev1_89; __rev1_89 = __builtin_shufflevector(__s1_89, __s1_89, 1, 0); \ + float32x2_t __rev2_89; __rev2_89 = __builtin_shufflevector(__s2_89, __s2_89, 1, 0); \ + __ret_89 = __rev0_89 - __rev1_89 * __noswap_splat_lane_f32(__rev2_89, __p3_89); \ + __ret_89 = __builtin_shufflevector(__ret_89, __ret_89, 1, 0); \ + __ret_89; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_lane_s32(__p0_90, __p1_90, __p2_90, __p3_90) __extension__ ({ \ + int32x2_t __ret_90; \ + int32x2_t __s0_90 = __p0_90; \ + int32x2_t __s1_90 = __p1_90; \ + int32x2_t __s2_90 = __p2_90; \ + __ret_90 = __s0_90 - __s1_90 * splat_lane_s32(__s2_90, __p3_90); \ + __ret_90; \ +}) +#else +#define vmls_lane_s32(__p0_91, __p1_91, __p2_91, __p3_91) __extension__ ({ \ + int32x2_t __ret_91; \ + int32x2_t __s0_91 = __p0_91; \ + int32x2_t __s1_91 = __p1_91; \ + int32x2_t __s2_91 = __p2_91; \ + int32x2_t __rev0_91; __rev0_91 = __builtin_shufflevector(__s0_91, __s0_91, 1, 0); \ + int32x2_t __rev1_91; __rev1_91 = __builtin_shufflevector(__s1_91, __s1_91, 1, 0); \ + int32x2_t __rev2_91; __rev2_91 = __builtin_shufflevector(__s2_91, __s2_91, 1, 0); \ + __ret_91 = __rev0_91 - __rev1_91 * __noswap_splat_lane_s32(__rev2_91, __p3_91); \ + __ret_91 = __builtin_shufflevector(__ret_91, __ret_91, 1, 0); \ + __ret_91; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmls_lane_s16(__p0_92, __p1_92, __p2_92, __p3_92) __extension__ ({ \ + int16x4_t __ret_92; \ + int16x4_t __s0_92 = __p0_92; \ + int16x4_t __s1_92 = __p1_92; \ + int16x4_t __s2_92 = __p2_92; \ + __ret_92 = __s0_92 - __s1_92 * splat_lane_s16(__s2_92, __p3_92); \ + __ret_92; \ +}) +#else +#define vmls_lane_s16(__p0_93, __p1_93, __p2_93, __p3_93) __extension__ ({ \ + int16x4_t __ret_93; \ + int16x4_t __s0_93 = __p0_93; \ + int16x4_t __s1_93 = __p1_93; \ + int16x4_t __s2_93 = __p2_93; \ + int16x4_t __rev0_93; __rev0_93 = __builtin_shufflevector(__s0_93, __s0_93, 3, 2, 1, 0); \ + int16x4_t __rev1_93; __rev1_93 = __builtin_shufflevector(__s1_93, __s1_93, 3, 2, 1, 0); \ + int16x4_t __rev2_93; __rev2_93 = __builtin_shufflevector(__s2_93, __s2_93, 3, 2, 1, 0); \ + __ret_93 = __rev0_93 - __rev1_93 * __noswap_splat_lane_s16(__rev2_93, __p3_93); \ + __ret_93 = __builtin_shufflevector(__ret_93, __ret_93, 3, 2, 1, 0); \ + __ret_93; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) { uint32x4_t __ret; - __ret = __p0 | __p1; + __ret = __p0 - __p1 * (uint32x4_t) {__p2, __p2, __p2, __p2}; return __ret; } #else -__ai uint32x4_t vorrq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vmlsq_n_u32(uint32x4_t __p0, uint32x4_t __p1, uint32_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 | __rev1; + __ret = __rev0 - __rev1 * (uint32x4_t) {__p2, __p2, __p2, __p2}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vorrq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - __ret = __p0 | __p1; - return __ret; -} -#else -__ai uint64x2_t vorrq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __rev0 | __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vorrq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vmlsq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) { uint16x8_t __ret; - __ret = __p0 | __p1; + __ret = __p0 - __p1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; return __ret; } #else -__ai uint16x8_t vorrq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vmlsq_n_u16(uint16x8_t __p0, uint16x8_t __p1, uint16_t __p2) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 | __rev1; + __ret = __rev0 - __rev1 * (uint16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vorrq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = __p0 | __p1; +__ai __attribute__((target("neon"))) float32x4_t vmlsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { + float32x4_t __ret; + __ret = __p0 - __p1 * (float32x4_t) {__p2, __p2, __p2, __p2}; return __ret; } #else -__ai int8x16_t vorrq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 | __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) float32x4_t vmlsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 - __rev1 * (float32x4_t) {__p2, __p2, __p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vorrq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vmlsq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) { int32x4_t __ret; - __ret = __p0 | __p1; + __ret = __p0 - __p1 * (int32x4_t) {__p2, __p2, __p2, __p2}; return __ret; } #else -__ai int32x4_t vorrq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vmlsq_n_s32(int32x4_t __p0, int32x4_t __p1, int32_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 | __rev1; + __ret = __rev0 - __rev1 * (int32x4_t) {__p2, __p2, __p2, __p2}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vorrq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - __ret = __p0 | __p1; - return __ret; -} -#else -__ai int64x2_t vorrq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __rev0 | __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vorrq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vmlsq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) { int16x8_t __ret; - __ret = __p0 | __p1; + __ret = __p0 - __p1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; return __ret; } #else -__ai int16x8_t vorrq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vmlsq_n_s16(int16x8_t __p0, int16x8_t __p1, int16_t __p2) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 | __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vorr_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = __p0 | __p1; - return __ret; -} -#else -__ai uint8x8_t vorr_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 | __rev1; + __ret = __rev0 - __rev1 * (int16x8_t) {__p2, __p2, __p2, __p2, __p2, __p2, __p2, __p2}; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vorr_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vmls_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) { uint32x2_t __ret; - __ret = __p0 | __p1; + __ret = __p0 - __p1 * (uint32x2_t) {__p2, __p2}; return __ret; } #else -__ai uint32x2_t vorr_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vmls_n_u32(uint32x2_t __p0, uint32x2_t __p1, uint32_t __p2) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __rev0 | __rev1; + __ret = __rev0 - __rev1 * (uint32x2_t) {__p2, __p2}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif -__ai uint64x1_t vorr_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x1_t __ret; - __ret = __p0 | __p1; - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vorr_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vmls_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) { uint16x4_t __ret; - __ret = __p0 | __p1; + __ret = __p0 - __p1 * (uint16x4_t) {__p2, __p2, __p2, __p2}; return __ret; } #else -__ai uint16x4_t vorr_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vmls_n_u16(uint16x4_t __p0, uint16x4_t __p1, uint16_t __p2) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 | __rev1; + __ret = __rev0 - __rev1 * (uint16x4_t) {__p2, __p2, __p2, __p2}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vorr_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = __p0 | __p1; +__ai __attribute__((target("neon"))) float32x2_t vmls_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { + float32x2_t __ret; + __ret = __p0 - __p1 * (float32x2_t) {__p2, __p2}; return __ret; } #else -__ai int8x8_t vorr_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 | __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) float32x2_t vmls_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 - __rev1 * (float32x2_t) {__p2, __p2}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vorr_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vmls_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) { int32x2_t __ret; - __ret = __p0 | __p1; + __ret = __p0 - __p1 * (int32x2_t) {__p2, __p2}; return __ret; } #else -__ai int32x2_t vorr_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vmls_n_s32(int32x2_t __p0, int32x2_t __p1, int32_t __p2) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __rev0 | __rev1; + __ret = __rev0 - __rev1 * (int32x2_t) {__p2, __p2}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif -__ai int64x1_t vorr_s64(int64x1_t __p0, int64x1_t __p1) { - int64x1_t __ret; - __ret = __p0 | __p1; - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vorr_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vmls_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) { int16x4_t __ret; - __ret = __p0 | __p1; + __ret = __p0 - __p1 * (int16x4_t) {__p2, __p2, __p2, __p2}; return __ret; } #else -__ai int16x4_t vorr_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vmls_n_s16(int16x4_t __p0, int16x4_t __p1, int16_t __p2) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 | __rev1; + __ret = __rev0 - __rev1 * (int16x4_t) {__p2, __p2, __p2, __p2}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vpadalq_u8(uint16x8_t __p0, uint8x16_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); +__ai __attribute__((target("neon"))) poly8x8_t vmov_n_p8(poly8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; return __ret; } #else -__ai uint16x8_t vpadalq_u8(uint16x8_t __p0, uint8x16_t __p1) { - uint16x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); +__ai __attribute__((target("neon"))) poly8x8_t vmov_n_p8(poly8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vpadalq_u32(uint64x2_t __p0, uint32x4_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); +__ai __attribute__((target("neon"))) poly16x4_t vmov_n_p16(poly16_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t) {__p0, __p0, __p0, __p0}; return __ret; } #else -__ai uint64x2_t vpadalq_u32(uint64x2_t __p0, uint32x4_t __p1) { - uint64x2_t __ret; - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) poly16x4_t vmov_n_p16(poly16_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vpadalq_u16(uint32x4_t __p0, uint16x8_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); +__ai __attribute__((target("neon"))) poly8x16_t vmovq_n_p8(poly8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; return __ret; } #else -__ai uint32x4_t vpadalq_u16(uint32x4_t __p0, uint16x8_t __p1) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) poly8x16_t vmovq_n_p8(poly8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vpadalq_s8(int16x8_t __p0, int8x16_t __p1) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); +__ai __attribute__((target("neon"))) poly16x8_t vmovq_n_p16(poly16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; return __ret; } #else -__ai int16x8_t vpadalq_s8(int16x8_t __p0, int8x16_t __p1) { - int16x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); +__ai __attribute__((target("neon"))) poly16x8_t vmovq_n_p16(poly16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vpadalq_s32(int64x2_t __p0, int32x4_t __p1) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); +__ai __attribute__((target("neon"))) uint8x16_t vmovq_n_u8(uint8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; return __ret; } #else -__ai int64x2_t vpadalq_s32(int64x2_t __p0, int32x4_t __p1) { - int64x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (int64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) uint8x16_t vmovq_n_u8(uint8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vpadalq_s16(int32x4_t __p0, int16x8_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); +__ai __attribute__((target("neon"))) uint32x4_t vmovq_n_u32(uint32_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) {__p0, __p0, __p0, __p0}; return __ret; } #else -__ai int32x4_t vpadalq_s16(int32x4_t __p0, int16x8_t __p1) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); +__ai __attribute__((target("neon"))) uint32x4_t vmovq_n_u32(uint32_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) {__p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vpadal_u8(uint16x4_t __p0, uint8x8_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 17); +__ai __attribute__((target("neon"))) uint64x2_t vmovq_n_u64(uint64_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) {__p0, __p0}; return __ret; } #else -__ai uint16x4_t vpadal_u8(uint16x4_t __p0, uint8x8_t __p1) { - uint16x4_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint64x2_t vmovq_n_u64(uint64_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vpadal_u32(uint64x1_t __p0, uint32x2_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 19); +__ai __attribute__((target("neon"))) uint16x8_t vmovq_n_u16(uint16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; return __ret; } #else -__ai uint64x1_t vpadal_u32(uint64x1_t __p0, uint32x2_t __p1) { - uint64x1_t __ret; - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__rev1, 19); +__ai __attribute__((target("neon"))) uint16x8_t vmovq_n_u16(uint16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vpadal_u16(uint32x2_t __p0, uint16x4_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 18); +__ai __attribute__((target("neon"))) int8x16_t vmovq_n_s8(int8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; return __ret; } #else -__ai uint32x2_t vpadal_u16(uint32x2_t __p0, uint16x4_t __p1) { - uint32x2_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) int8x16_t vmovq_n_s8(int8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vpadal_s8(int16x4_t __p0, int8x8_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 1); +__ai __attribute__((target("neon"))) float32x4_t vmovq_n_f32(float32_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) {__p0, __p0, __p0, __p0}; return __ret; } #else -__ai int16x4_t vpadal_s8(int16x4_t __p0, int8x8_t __p1) { - int16x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); +__ai __attribute__((target("neon"))) float32x4_t vmovq_n_f32(float32_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) {__p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vpadal_s32(int64x1_t __p0, int32x2_t __p1) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 3); +#define vmovq_n_f16(__p0) __extension__ ({ \ + float16x8_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \ + __ret; \ +}) +#else +#define vmovq_n_f16(__p0) __extension__ ({ \ + float16x8_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (float16x8_t) {__s0, __s0, __s0, __s0, __s0, __s0, __s0, __s0}; \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vmovq_n_s32(int32_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) {__p0, __p0, __p0, __p0}; return __ret; } #else -__ai int64x1_t vpadal_s32(int64x1_t __p0, int32x2_t __p1) { - int64x1_t __ret; - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (int64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__rev1, 3); +__ai __attribute__((target("neon"))) int32x4_t vmovq_n_s32(int32_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) {__p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vpadal_s16(int32x2_t __p0, int16x4_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 2); +__ai __attribute__((target("neon"))) int64x2_t vmovq_n_s64(int64_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) {__p0, __p0}; return __ret; } #else -__ai int32x2_t vpadal_s16(int32x2_t __p0, int16x4_t __p1) { - int32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (int32x2_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); +__ai __attribute__((target("neon"))) int64x2_t vmovq_n_s64(int64_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) {__p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vpadd_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vmovq_n_s16(int16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vmovq_n_s16(int16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vmov_n_u8(uint8_t __p0) { uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; return __ret; } #else -__ai uint8x8_t vpadd_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vmov_n_u8(uint8_t __p0) { uint8x8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = (uint8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vpadd_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vmov_n_u32(uint32_t __p0) { uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + __ret = (uint32x2_t) {__p0, __p0}; return __ret; } #else -__ai uint32x2_t vpadd_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vmov_n_u32(uint32_t __p0) { uint32x2_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = (uint32x2_t) {__p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif +__ai __attribute__((target("neon"))) uint64x1_t vmov_n_u64(uint64_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) {__p0}; + return __ret; +} #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vpadd_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vmov_n_u16(uint16_t __p0) { uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + __ret = (uint16x4_t) {__p0, __p0, __p0, __p0}; return __ret; } #else -__ai uint16x4_t vpadd_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vmov_n_u16(uint16_t __p0) { uint16x4_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = (uint16x4_t) {__p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vpadd_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vmov_n_s8(int8_t __p0) { int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; return __ret; } #else -__ai int8x8_t vpadd_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vmov_n_s8(int8_t __p0) { int8x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x8_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = (int8x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vpadd_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vmov_n_f32(float32_t __p0) { float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + __ret = (float32x2_t) {__p0, __p0}; return __ret; } #else -__ai float32x2_t vpadd_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vmov_n_f32(float32_t __p0) { float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (float32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = (float32x2_t) {__p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vpadd_s32(int32x2_t __p0, int32x2_t __p1) { +#define vmov_n_f16(__p0) __extension__ ({ \ + float16x4_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \ + __ret; \ +}) +#else +#define vmov_n_f16(__p0) __extension__ ({ \ + float16x4_t __ret; \ + float16_t __s0 = __p0; \ + __ret = (float16x4_t) {__s0, __s0, __s0, __s0}; \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vmov_n_s32(int32_t __p0) { int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + __ret = (int32x2_t) {__p0, __p0}; return __ret; } #else -__ai int32x2_t vpadd_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vmov_n_s32(int32_t __p0) { int32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (int32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = (int32x2_t) {__p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif +__ai __attribute__((target("neon"))) int64x1_t vmov_n_s64(int64_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) {__p0}; + return __ret; +} #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vpadd_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vmov_n_s16(int16_t __p0) { int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + __ret = (int16x4_t) {__p0, __p0, __p0, __p0}; return __ret; } #else -__ai int16x4_t vpadd_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vmov_n_s16(int16_t __p0) { int16x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = (int16x4_t) {__p0, __p0, __p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vpaddlq_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vmovl_u8(uint8x8_t __p0) { uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 49); + __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 49); return __ret; } #else -__ai uint16x8_t vpaddlq_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vmovl_u8(uint8x8_t __p0) { uint16x8_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 49); + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } +__ai __attribute__((target("neon"))) uint16x8_t __noswap_vmovl_u8(uint8x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 49); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vpaddlq_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vmovl_u32(uint32x2_t __p0) { uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 51); + __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 51); return __ret; } #else -__ai uint64x2_t vpaddlq_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vmovl_u32(uint32x2_t __p0) { uint64x2_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 51); + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } +__ai __attribute__((target("neon"))) uint64x2_t __noswap_vmovl_u32(uint32x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 51); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vpaddlq_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vmovl_u16(uint16x4_t __p0) { uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 50); + __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 50); return __ret; } #else -__ai uint32x4_t vpaddlq_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vmovl_u16(uint16x4_t __p0) { uint32x4_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 50); + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } +__ai __attribute__((target("neon"))) uint32x4_t __noswap_vmovl_u16(uint16x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 50); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vpaddlq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vmovl_s8(int8x8_t __p0) { int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 33); + __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 33); return __ret; } #else -__ai int16x8_t vpaddlq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vmovl_s8(int8x8_t __p0) { int16x8_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 33); + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } +__ai __attribute__((target("neon"))) int16x8_t __noswap_vmovl_s8(int8x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 33); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vpaddlq_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vmovl_s32(int32x2_t __p0) { int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 35); + __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 35); return __ret; } #else -__ai int64x2_t vpaddlq_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vmovl_s32(int32x2_t __p0) { int64x2_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (int64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 35); + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 35); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } +__ai __attribute__((target("neon"))) int64x2_t __noswap_vmovl_s32(int32x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 35); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vpaddlq_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vmovl_s16(int16x4_t __p0) { int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 34); + __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 34); return __ret; } #else -__ai int32x4_t vpaddlq_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vmovl_s16(int16x4_t __p0) { int32x4_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 34); + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__rev0, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } +__ai __attribute__((target("neon"))) int32x4_t __noswap_vmovl_s16(int16x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vmovl_v((int8x8_t)__p0, 34); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vpaddl_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vmovn_u32(uint32x4_t __p0) { uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 17); + __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 17); return __ret; } #else -__ai uint16x4_t vpaddl_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vmovn_u32(uint32x4_t __p0) { uint16x4_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 17); + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x1_t vpaddl_u32(uint32x2_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 19); - return __ret; -} -#else -__ai uint64x1_t vpaddl_u32(uint32x2_t __p0) { - uint64x1_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 19); +__ai __attribute__((target("neon"))) uint16x4_t __noswap_vmovn_u32(uint32x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 17); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vpaddl_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vmovn_u64(uint64x2_t __p0) { uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 18); + __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 18); return __ret; } #else -__ai uint32x2_t vpaddl_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vmovn_u64(uint64x2_t __p0) { uint32x2_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 18); + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } +__ai __attribute__((target("neon"))) uint32x2_t __noswap_vmovn_u64(uint64x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 18); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vpaddl_s8(int8x8_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 1); +__ai __attribute__((target("neon"))) uint8x8_t vmovn_u16(uint16x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 16); return __ret; } #else -__ai int16x4_t vpaddl_s8(int8x8_t __p0) { - int16x4_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint8x8_t vmovn_u16(uint16x8_t __p0) { + uint8x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x8_t __noswap_vmovn_u16(uint16x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 16); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x1_t vpaddl_s32(int32x2_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 3); +__ai __attribute__((target("neon"))) int16x4_t vmovn_s32(int32x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 1); return __ret; } #else -__ai int64x1_t vpaddl_s32(int32x2_t __p0) { - int64x1_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (int64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 3); +__ai __attribute__((target("neon"))) int16x4_t vmovn_s32(int32x4_t __p0) { + int16x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t __noswap_vmovn_s32(int32x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 1); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vpaddl_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vmovn_s64(int64x2_t __p0) { int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 2); + __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 2); return __ret; } #else -__ai int32x2_t vpaddl_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vmovn_s64(int64x2_t __p0) { int32x2_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (int32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 2); + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } +__ai __attribute__((target("neon"))) int32x2_t __noswap_vmovn_s64(int64x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 2); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vpmax_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 16); +__ai __attribute__((target("neon"))) int8x8_t vmovn_s16(int16x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 0); return __ret; } #else -__ai uint8x8_t vpmax_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); +__ai __attribute__((target("neon"))) int8x8_t vmovn_s16(int16x8_t __p0) { + int8x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__rev0, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } +__ai __attribute__((target("neon"))) int8x8_t __noswap_vmovn_s16(int16x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vmovn_v((int8x16_t)__p0, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vpmax_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 18); +__ai __attribute__((target("neon"))) uint8x16_t vmulq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __p0 * __p1; return __ret; } #else -__ai uint32x2_t vpmax_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) uint8x16_t vmulq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vpmax_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 17); +__ai __attribute__((target("neon"))) uint32x4_t vmulq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __p0 * __p1; return __ret; } #else -__ai uint16x4_t vpmax_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); +__ai __attribute__((target("neon"))) uint32x4_t vmulq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 * __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vpmax_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 0); +__ai __attribute__((target("neon"))) uint16x8_t vmulq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __p0 * __p1; return __ret; } #else -__ai int8x8_t vpmax_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x8_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); +__ai __attribute__((target("neon"))) uint16x8_t vmulq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 * __rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vpmax_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 9); +__ai __attribute__((target("neon"))) int8x16_t vmulq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __p0 * __p1; return __ret; } #else -__ai float32x2_t vpmax_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (float32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) int8x16_t vmulq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vpmax_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 2); +__ai __attribute__((target("neon"))) float32x4_t vmulq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = __p0 * __p1; return __ret; } #else -__ai int32x2_t vpmax_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (int32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) float32x4_t vmulq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vpmax_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 1); +__ai __attribute__((target("neon"))) int32x4_t vmulq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __p0 * __p1; return __ret; } #else -__ai int16x4_t vpmax_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); +__ai __attribute__((target("neon"))) int32x4_t vmulq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 * __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vpmin_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vmulq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __p0 * __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vmulq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 * __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vmul_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + __ret = __p0 * __p1; return __ret; } #else -__ai uint8x8_t vpmin_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vmul_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __rev0 * __rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vpmin_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vmul_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + __ret = __p0 * __p1; return __ret; } #else -__ai uint32x2_t vpmin_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vmul_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __rev0 * __rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vpmin_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vmul_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + __ret = __p0 * __p1; return __ret; } #else -__ai uint16x4_t vpmin_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vmul_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __rev0 * __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vpmin_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vmul_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + __ret = __p0 * __p1; return __ret; } #else -__ai int8x8_t vpmin_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vmul_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x8_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __rev0 * __rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vpmin_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vmul_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + __ret = __p0 * __p1; return __ret; } #else -__ai float32x2_t vpmin_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vmul_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (float32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __rev0 * __rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vpmin_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vmul_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + __ret = __p0 * __p1; return __ret; } #else -__ai int32x2_t vpmin_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vmul_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (int32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __rev0 * __rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vpmin_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vmul_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + __ret = __p0 * __p1; return __ret; } #else -__ai int16x4_t vpmin_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vmul_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __rev0 * __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vqabsq_s8(int8x16_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 32); +__ai __attribute__((target("neon"))) poly8x8_t vmul_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vmul_v((int8x8_t)__p0, (int8x8_t)__p1, 4); return __ret; } #else -__ai int8x16_t vqabsq_s8(int8x16_t __p0) { - int8x16_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x16_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) poly8x8_t vmul_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vmul_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqabsq_s32(int32x4_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 34); +__ai __attribute__((target("neon"))) poly8x16_t vmulq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vmulq_v((int8x16_t)__p0, (int8x16_t)__p1, 36); return __ret; } #else -__ai int32x4_t vqabsq_s32(int32x4_t __p0) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) poly8x16_t vmulq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x16_t) __builtin_neon_vmulq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vqabsq_s16(int16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 33); - return __ret; -} +#define vmulq_lane_u32(__p0_94, __p1_94, __p2_94) __extension__ ({ \ + uint32x4_t __ret_94; \ + uint32x4_t __s0_94 = __p0_94; \ + uint32x2_t __s1_94 = __p1_94; \ + __ret_94 = __s0_94 * splatq_lane_u32(__s1_94, __p2_94); \ + __ret_94; \ +}) #else -__ai int16x8_t vqabsq_s16(int16x8_t __p0) { - int16x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vmulq_lane_u32(__p0_95, __p1_95, __p2_95) __extension__ ({ \ + uint32x4_t __ret_95; \ + uint32x4_t __s0_95 = __p0_95; \ + uint32x2_t __s1_95 = __p1_95; \ + uint32x4_t __rev0_95; __rev0_95 = __builtin_shufflevector(__s0_95, __s0_95, 3, 2, 1, 0); \ + uint32x2_t __rev1_95; __rev1_95 = __builtin_shufflevector(__s1_95, __s1_95, 1, 0); \ + __ret_95 = __rev0_95 * __noswap_splatq_lane_u32(__rev1_95, __p2_95); \ + __ret_95 = __builtin_shufflevector(__ret_95, __ret_95, 3, 2, 1, 0); \ + __ret_95; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vqabs_s8(int8x8_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 0); - return __ret; -} +#define vmulq_lane_u16(__p0_96, __p1_96, __p2_96) __extension__ ({ \ + uint16x8_t __ret_96; \ + uint16x8_t __s0_96 = __p0_96; \ + uint16x4_t __s1_96 = __p1_96; \ + __ret_96 = __s0_96 * splatq_lane_u16(__s1_96, __p2_96); \ + __ret_96; \ +}) #else -__ai int8x8_t vqabs_s8(int8x8_t __p0) { - int8x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x8_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vmulq_lane_u16(__p0_97, __p1_97, __p2_97) __extension__ ({ \ + uint16x8_t __ret_97; \ + uint16x8_t __s0_97 = __p0_97; \ + uint16x4_t __s1_97 = __p1_97; \ + uint16x8_t __rev0_97; __rev0_97 = __builtin_shufflevector(__s0_97, __s0_97, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __rev1_97; __rev1_97 = __builtin_shufflevector(__s1_97, __s1_97, 3, 2, 1, 0); \ + __ret_97 = __rev0_97 * __noswap_splatq_lane_u16(__rev1_97, __p2_97); \ + __ret_97 = __builtin_shufflevector(__ret_97, __ret_97, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_97; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vqabs_s32(int32x2_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 2); - return __ret; -} +#define vmulq_lane_f32(__p0_98, __p1_98, __p2_98) __extension__ ({ \ + float32x4_t __ret_98; \ + float32x4_t __s0_98 = __p0_98; \ + float32x2_t __s1_98 = __p1_98; \ + __ret_98 = __s0_98 * splatq_lane_f32(__s1_98, __p2_98); \ + __ret_98; \ +}) #else -__ai int32x2_t vqabs_s32(int32x2_t __p0) { - int32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (int32x2_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vmulq_lane_f32(__p0_99, __p1_99, __p2_99) __extension__ ({ \ + float32x4_t __ret_99; \ + float32x4_t __s0_99 = __p0_99; \ + float32x2_t __s1_99 = __p1_99; \ + float32x4_t __rev0_99; __rev0_99 = __builtin_shufflevector(__s0_99, __s0_99, 3, 2, 1, 0); \ + float32x2_t __rev1_99; __rev1_99 = __builtin_shufflevector(__s1_99, __s1_99, 1, 0); \ + __ret_99 = __rev0_99 * __noswap_splatq_lane_f32(__rev1_99, __p2_99); \ + __ret_99 = __builtin_shufflevector(__ret_99, __ret_99, 3, 2, 1, 0); \ + __ret_99; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vqabs_s16(int16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 1); - return __ret; -} +#define vmulq_lane_s32(__p0_100, __p1_100, __p2_100) __extension__ ({ \ + int32x4_t __ret_100; \ + int32x4_t __s0_100 = __p0_100; \ + int32x2_t __s1_100 = __p1_100; \ + __ret_100 = __s0_100 * splatq_lane_s32(__s1_100, __p2_100); \ + __ret_100; \ +}) #else -__ai int16x4_t vqabs_s16(int16x4_t __p0) { - int16x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vmulq_lane_s32(__p0_101, __p1_101, __p2_101) __extension__ ({ \ + int32x4_t __ret_101; \ + int32x4_t __s0_101 = __p0_101; \ + int32x2_t __s1_101 = __p1_101; \ + int32x4_t __rev0_101; __rev0_101 = __builtin_shufflevector(__s0_101, __s0_101, 3, 2, 1, 0); \ + int32x2_t __rev1_101; __rev1_101 = __builtin_shufflevector(__s1_101, __s1_101, 1, 0); \ + __ret_101 = __rev0_101 * __noswap_splatq_lane_s32(__rev1_101, __p2_101); \ + __ret_101 = __builtin_shufflevector(__ret_101, __ret_101, 3, 2, 1, 0); \ + __ret_101; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); - return __ret; -} +#define vmulq_lane_s16(__p0_102, __p1_102, __p2_102) __extension__ ({ \ + int16x8_t __ret_102; \ + int16x8_t __s0_102 = __p0_102; \ + int16x4_t __s1_102 = __p1_102; \ + __ret_102 = __s0_102 * splatq_lane_s16(__s1_102, __p2_102); \ + __ret_102; \ +}) #else -__ai uint8x16_t vqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vmulq_lane_s16(__p0_103, __p1_103, __p2_103) __extension__ ({ \ + int16x8_t __ret_103; \ + int16x8_t __s0_103 = __p0_103; \ + int16x4_t __s1_103 = __p1_103; \ + int16x8_t __rev0_103; __rev0_103 = __builtin_shufflevector(__s0_103, __s0_103, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev1_103; __rev1_103 = __builtin_shufflevector(__s1_103, __s1_103, 3, 2, 1, 0); \ + __ret_103 = __rev0_103 * __noswap_splatq_lane_s16(__rev1_103, __p2_103); \ + __ret_103 = __builtin_shufflevector(__ret_103, __ret_103, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_103; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); - return __ret; -} +#define vmul_lane_u32(__p0_104, __p1_104, __p2_104) __extension__ ({ \ + uint32x2_t __ret_104; \ + uint32x2_t __s0_104 = __p0_104; \ + uint32x2_t __s1_104 = __p1_104; \ + __ret_104 = __s0_104 * splat_lane_u32(__s1_104, __p2_104); \ + __ret_104; \ +}) #else -__ai uint32x4_t vqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vmul_lane_u32(__p0_105, __p1_105, __p2_105) __extension__ ({ \ + uint32x2_t __ret_105; \ + uint32x2_t __s0_105 = __p0_105; \ + uint32x2_t __s1_105 = __p1_105; \ + uint32x2_t __rev0_105; __rev0_105 = __builtin_shufflevector(__s0_105, __s0_105, 1, 0); \ + uint32x2_t __rev1_105; __rev1_105 = __builtin_shufflevector(__s1_105, __s1_105, 1, 0); \ + __ret_105 = __rev0_105 * __noswap_splat_lane_u32(__rev1_105, __p2_105); \ + __ret_105 = __builtin_shufflevector(__ret_105, __ret_105, 1, 0); \ + __ret_105; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); +#define vmul_lane_u16(__p0_106, __p1_106, __p2_106) __extension__ ({ \ + uint16x4_t __ret_106; \ + uint16x4_t __s0_106 = __p0_106; \ + uint16x4_t __s1_106 = __p1_106; \ + __ret_106 = __s0_106 * splat_lane_u16(__s1_106, __p2_106); \ + __ret_106; \ +}) +#else +#define vmul_lane_u16(__p0_107, __p1_107, __p2_107) __extension__ ({ \ + uint16x4_t __ret_107; \ + uint16x4_t __s0_107 = __p0_107; \ + uint16x4_t __s1_107 = __p1_107; \ + uint16x4_t __rev0_107; __rev0_107 = __builtin_shufflevector(__s0_107, __s0_107, 3, 2, 1, 0); \ + uint16x4_t __rev1_107; __rev1_107 = __builtin_shufflevector(__s1_107, __s1_107, 3, 2, 1, 0); \ + __ret_107 = __rev0_107 * __noswap_splat_lane_u16(__rev1_107, __p2_107); \ + __ret_107 = __builtin_shufflevector(__ret_107, __ret_107, 3, 2, 1, 0); \ + __ret_107; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_lane_f32(__p0_108, __p1_108, __p2_108) __extension__ ({ \ + float32x2_t __ret_108; \ + float32x2_t __s0_108 = __p0_108; \ + float32x2_t __s1_108 = __p1_108; \ + __ret_108 = __s0_108 * splat_lane_f32(__s1_108, __p2_108); \ + __ret_108; \ +}) +#else +#define vmul_lane_f32(__p0_109, __p1_109, __p2_109) __extension__ ({ \ + float32x2_t __ret_109; \ + float32x2_t __s0_109 = __p0_109; \ + float32x2_t __s1_109 = __p1_109; \ + float32x2_t __rev0_109; __rev0_109 = __builtin_shufflevector(__s0_109, __s0_109, 1, 0); \ + float32x2_t __rev1_109; __rev1_109 = __builtin_shufflevector(__s1_109, __s1_109, 1, 0); \ + __ret_109 = __rev0_109 * __noswap_splat_lane_f32(__rev1_109, __p2_109); \ + __ret_109 = __builtin_shufflevector(__ret_109, __ret_109, 1, 0); \ + __ret_109; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_lane_s32(__p0_110, __p1_110, __p2_110) __extension__ ({ \ + int32x2_t __ret_110; \ + int32x2_t __s0_110 = __p0_110; \ + int32x2_t __s1_110 = __p1_110; \ + __ret_110 = __s0_110 * splat_lane_s32(__s1_110, __p2_110); \ + __ret_110; \ +}) +#else +#define vmul_lane_s32(__p0_111, __p1_111, __p2_111) __extension__ ({ \ + int32x2_t __ret_111; \ + int32x2_t __s0_111 = __p0_111; \ + int32x2_t __s1_111 = __p1_111; \ + int32x2_t __rev0_111; __rev0_111 = __builtin_shufflevector(__s0_111, __s0_111, 1, 0); \ + int32x2_t __rev1_111; __rev1_111 = __builtin_shufflevector(__s1_111, __s1_111, 1, 0); \ + __ret_111 = __rev0_111 * __noswap_splat_lane_s32(__rev1_111, __p2_111); \ + __ret_111 = __builtin_shufflevector(__ret_111, __ret_111, 1, 0); \ + __ret_111; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_lane_s16(__p0_112, __p1_112, __p2_112) __extension__ ({ \ + int16x4_t __ret_112; \ + int16x4_t __s0_112 = __p0_112; \ + int16x4_t __s1_112 = __p1_112; \ + __ret_112 = __s0_112 * splat_lane_s16(__s1_112, __p2_112); \ + __ret_112; \ +}) +#else +#define vmul_lane_s16(__p0_113, __p1_113, __p2_113) __extension__ ({ \ + int16x4_t __ret_113; \ + int16x4_t __s0_113 = __p0_113; \ + int16x4_t __s1_113 = __p1_113; \ + int16x4_t __rev0_113; __rev0_113 = __builtin_shufflevector(__s0_113, __s0_113, 3, 2, 1, 0); \ + int16x4_t __rev1_113; __rev1_113 = __builtin_shufflevector(__s1_113, __s1_113, 3, 2, 1, 0); \ + __ret_113 = __rev0_113 * __noswap_splat_lane_s16(__rev1_113, __p2_113); \ + __ret_113 = __builtin_shufflevector(__ret_113, __ret_113, 3, 2, 1, 0); \ + __ret_113; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) { + uint32x4_t __ret; + __ret = __p0 * (uint32x4_t) {__p1, __p1, __p1, __p1}; return __ret; } #else -__ai uint64x2_t vqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) uint32x4_t vmulq_n_u32(uint32x4_t __p0, uint32_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __rev0 * (uint32x4_t) {__p1, __p1, __p1, __p1}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vmulq_n_u16(uint16x8_t __p0, uint16_t __p1) { uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + __ret = __p0 * (uint16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}; return __ret; } #else -__ai uint16x8_t vqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vmulq_n_u16(uint16x8_t __p0, uint16_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __rev0 * (uint16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vqaddq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); +__ai __attribute__((target("neon"))) float32x4_t vmulq_n_f32(float32x4_t __p0, float32_t __p1) { + float32x4_t __ret; + __ret = __p0 * (float32x4_t) {__p1, __p1, __p1, __p1}; return __ret; } #else -__ai int8x16_t vqaddq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) float32x4_t vmulq_n_f32(float32x4_t __p0, float32_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __rev0 * (float32x4_t) {__p1, __p1, __p1, __p1}; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqaddq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vmulq_n_s32(int32x4_t __p0, int32_t __p1) { int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + __ret = __p0 * (int32x4_t) {__p1, __p1, __p1, __p1}; return __ret; } #else -__ai int32x4_t vqaddq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vmulq_n_s32(int32x4_t __p0, int32_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __rev0 * (int32x4_t) {__p1, __p1, __p1, __p1}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqaddq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); - return __ret; -} -#else -__ai int64x2_t vqaddq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (int64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vqaddq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vmulq_n_s16(int16x8_t __p0, int16_t __p1) { int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + __ret = __p0 * (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}; return __ret; } #else -__ai int16x8_t vqaddq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vmulq_n_s16(int16x8_t __p0, int16_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vqadd_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); - return __ret; -} -#else -__ai uint8x8_t vqadd_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __rev0 * (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vqadd_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vmul_n_u32(uint32x2_t __p0, uint32_t __p1) { uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + __ret = __p0 * (uint32x2_t) {__p1, __p1}; return __ret; } #else -__ai uint32x2_t vqadd_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vmul_n_u32(uint32x2_t __p0, uint32_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __rev0 * (uint32x2_t) {__p1, __p1}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif -__ai uint64x1_t vqadd_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19); - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vqadd_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vmul_n_u16(uint16x4_t __p0, uint16_t __p1) { uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + __ret = __p0 * (uint16x4_t) {__p1, __p1, __p1, __p1}; return __ret; } #else -__ai uint16x4_t vqadd_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vmul_n_u16(uint16x4_t __p0, uint16_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __rev0 * (uint16x4_t) {__p1, __p1, __p1, __p1}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vqadd_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); +__ai __attribute__((target("neon"))) float32x2_t vmul_n_f32(float32x2_t __p0, float32_t __p1) { + float32x2_t __ret; + __ret = __p0 * (float32x2_t) {__p1, __p1}; return __ret; } #else -__ai int8x8_t vqadd_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x8_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) float32x2_t vmul_n_f32(float32x2_t __p0, float32_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __rev0 * (float32x2_t) {__p1, __p1}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vqadd_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vmul_n_s32(int32x2_t __p0, int32_t __p1) { int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + __ret = __p0 * (int32x2_t) {__p1, __p1}; return __ret; } #else -__ai int32x2_t vqadd_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vmul_n_s32(int32x2_t __p0, int32_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __rev0 * (int32x2_t) {__p1, __p1}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif -__ai int64x1_t vqadd_s64(int64x1_t __p0, int64x1_t __p1) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3); - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vqadd_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vmul_n_s16(int16x4_t __p0, int16_t __p1) { int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + __ret = __p0 * (int16x4_t) {__p1, __p1, __p1, __p1}; return __ret; } #else -__ai int16x4_t vqadd_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vmul_n_s16(int16x4_t __p0, int16_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __rev0 * (int16x4_t) {__p1, __p1, __p1, __p1}; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35); +__ai __attribute__((target("neon"))) poly16x8_t vmull_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly16x8_t __ret; + __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 37); return __ret; } #else -__ai int64x2_t vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { - int64x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) poly16x8_t vmull_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly16x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 37); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai int64x2_t __noswap_vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35); +__ai __attribute__((target("neon"))) poly16x8_t __noswap_vmull_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly16x8_t __ret; + __ret = (poly16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 37); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34); +__ai __attribute__((target("neon"))) uint16x8_t vmull_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 49); return __ret; } #else -__ai int32x4_t vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint16x8_t vmull_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai int32x4_t __noswap_vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34); +__ai __attribute__((target("neon"))) uint16x8_t __noswap_vmull_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 49); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlal_lane_s32(__p0_114, __p1_114, __p2_114, __p3_114) __extension__ ({ \ - int64x2_t __ret_114; \ - int64x2_t __s0_114 = __p0_114; \ - int32x2_t __s1_114 = __p1_114; \ - int32x2_t __s2_114 = __p2_114; \ - __ret_114 = vqdmlal_s32(__s0_114, __s1_114, splat_lane_s32(__s2_114, __p3_114)); \ - __ret_114; \ -}) -#else -#define vqdmlal_lane_s32(__p0_115, __p1_115, __p2_115, __p3_115) __extension__ ({ \ - int64x2_t __ret_115; \ - int64x2_t __s0_115 = __p0_115; \ - int32x2_t __s1_115 = __p1_115; \ - int32x2_t __s2_115 = __p2_115; \ - int64x2_t __rev0_115; __rev0_115 = __builtin_shufflevector(__s0_115, __s0_115, 1, 0); \ - int32x2_t __rev1_115; __rev1_115 = __builtin_shufflevector(__s1_115, __s1_115, 1, 0); \ - int32x2_t __rev2_115; __rev2_115 = __builtin_shufflevector(__s2_115, __s2_115, 1, 0); \ - __ret_115 = __noswap_vqdmlal_s32(__rev0_115, __rev1_115, __noswap_splat_lane_s32(__rev2_115, __p3_115)); \ - __ret_115 = __builtin_shufflevector(__ret_115, __ret_115, 1, 0); \ - __ret_115; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmlal_lane_s16(__p0_116, __p1_116, __p2_116, __p3_116) __extension__ ({ \ - int32x4_t __ret_116; \ - int32x4_t __s0_116 = __p0_116; \ - int16x4_t __s1_116 = __p1_116; \ - int16x4_t __s2_116 = __p2_116; \ - __ret_116 = vqdmlal_s16(__s0_116, __s1_116, splat_lane_s16(__s2_116, __p3_116)); \ - __ret_116; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vmull_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 51); + return __ret; +} #else -#define vqdmlal_lane_s16(__p0_117, __p1_117, __p2_117, __p3_117) __extension__ ({ \ - int32x4_t __ret_117; \ - int32x4_t __s0_117 = __p0_117; \ - int16x4_t __s1_117 = __p1_117; \ - int16x4_t __s2_117 = __p2_117; \ - int32x4_t __rev0_117; __rev0_117 = __builtin_shufflevector(__s0_117, __s0_117, 3, 2, 1, 0); \ - int16x4_t __rev1_117; __rev1_117 = __builtin_shufflevector(__s1_117, __s1_117, 3, 2, 1, 0); \ - int16x4_t __rev2_117; __rev2_117 = __builtin_shufflevector(__s2_117, __s2_117, 3, 2, 1, 0); \ - __ret_117 = __noswap_vqdmlal_s16(__rev0_117, __rev1_117, __noswap_splat_lane_s16(__rev2_117, __p3_117)); \ - __ret_117 = __builtin_shufflevector(__ret_117, __ret_117, 3, 2, 1, 0); \ - __ret_117; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vmull_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t __noswap_vmull_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 51); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { - int64x2_t __ret; - __ret = vqdmlal_s32(__p0, __p1, (int32x2_t) {__p2, __p2}); +__ai __attribute__((target("neon"))) uint32x4_t vmull_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 50); return __ret; } #else -__ai int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { - int64x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __noswap_vqdmlal_s32(__rev0, __rev1, (int32x2_t) {__p2, __p2}); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) uint32x4_t vmull_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int64x2_t __noswap_vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { - int64x2_t __ret; - __ret = __noswap_vqdmlal_s32(__p0, __p1, (int32x2_t) {__p2, __p2}); +__ai __attribute__((target("neon"))) uint32x4_t __noswap_vmull_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 50); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { - int32x4_t __ret; - __ret = vqdmlal_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2}); +__ai __attribute__((target("neon"))) int16x8_t vmull_s8(int8x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 33); return __ret; } #else -__ai int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __noswap_vqdmlal_s16(__rev0, __rev1, (int16x4_t) {__p2, __p2, __p2, __p2}); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int16x8_t vmull_s8(int8x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai int32x4_t __noswap_vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { - int32x4_t __ret; - __ret = __noswap_vqdmlal_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2}); +__ai __attribute__((target("neon"))) int16x8_t __noswap_vmull_s8(int8x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 33); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vmull_s32(int32x2_t __p0, int32x2_t __p1) { int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35); + __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35); return __ret; } #else -__ai int64x2_t vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vmull_s32(int32x2_t __p0, int32x2_t __p1) { int64x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 35); + __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 35); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai int64x2_t __noswap_vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t __noswap_vmull_s32(int32x2_t __p0, int32x2_t __p1) { int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35); + __ret = (int64x2_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vmull_s16(int16x4_t __p0, int16x4_t __p1) { int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34); + __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34); return __ret; } #else -__ai int32x4_t vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vmull_s16(int16x4_t __p0, int16x4_t __p1) { int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 34); + __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int32x4_t __noswap_vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t __noswap_vmull_s16(int16x4_t __p0, int16x4_t __p1) { int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34); + __ret = (int32x4_t) __builtin_neon_vmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlsl_lane_s32(__p0_118, __p1_118, __p2_118, __p3_118) __extension__ ({ \ +#define vmull_lane_u32(__p0_114, __p1_114, __p2_114) __extension__ ({ \ + uint64x2_t __ret_114; \ + uint32x2_t __s0_114 = __p0_114; \ + uint32x2_t __s1_114 = __p1_114; \ + __ret_114 = vmull_u32(__s0_114, splat_lane_u32(__s1_114, __p2_114)); \ + __ret_114; \ +}) +#else +#define vmull_lane_u32(__p0_115, __p1_115, __p2_115) __extension__ ({ \ + uint64x2_t __ret_115; \ + uint32x2_t __s0_115 = __p0_115; \ + uint32x2_t __s1_115 = __p1_115; \ + uint32x2_t __rev0_115; __rev0_115 = __builtin_shufflevector(__s0_115, __s0_115, 1, 0); \ + uint32x2_t __rev1_115; __rev1_115 = __builtin_shufflevector(__s1_115, __s1_115, 1, 0); \ + __ret_115 = __noswap_vmull_u32(__rev0_115, __noswap_splat_lane_u32(__rev1_115, __p2_115)); \ + __ret_115 = __builtin_shufflevector(__ret_115, __ret_115, 1, 0); \ + __ret_115; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_lane_u16(__p0_116, __p1_116, __p2_116) __extension__ ({ \ + uint32x4_t __ret_116; \ + uint16x4_t __s0_116 = __p0_116; \ + uint16x4_t __s1_116 = __p1_116; \ + __ret_116 = vmull_u16(__s0_116, splat_lane_u16(__s1_116, __p2_116)); \ + __ret_116; \ +}) +#else +#define vmull_lane_u16(__p0_117, __p1_117, __p2_117) __extension__ ({ \ + uint32x4_t __ret_117; \ + uint16x4_t __s0_117 = __p0_117; \ + uint16x4_t __s1_117 = __p1_117; \ + uint16x4_t __rev0_117; __rev0_117 = __builtin_shufflevector(__s0_117, __s0_117, 3, 2, 1, 0); \ + uint16x4_t __rev1_117; __rev1_117 = __builtin_shufflevector(__s1_117, __s1_117, 3, 2, 1, 0); \ + __ret_117 = __noswap_vmull_u16(__rev0_117, __noswap_splat_lane_u16(__rev1_117, __p2_117)); \ + __ret_117 = __builtin_shufflevector(__ret_117, __ret_117, 3, 2, 1, 0); \ + __ret_117; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_lane_s32(__p0_118, __p1_118, __p2_118) __extension__ ({ \ int64x2_t __ret_118; \ - int64x2_t __s0_118 = __p0_118; \ + int32x2_t __s0_118 = __p0_118; \ int32x2_t __s1_118 = __p1_118; \ - int32x2_t __s2_118 = __p2_118; \ - __ret_118 = vqdmlsl_s32(__s0_118, __s1_118, splat_lane_s32(__s2_118, __p3_118)); \ + __ret_118 = vmull_s32(__s0_118, splat_lane_s32(__s1_118, __p2_118)); \ __ret_118; \ }) #else -#define vqdmlsl_lane_s32(__p0_119, __p1_119, __p2_119, __p3_119) __extension__ ({ \ +#define vmull_lane_s32(__p0_119, __p1_119, __p2_119) __extension__ ({ \ int64x2_t __ret_119; \ - int64x2_t __s0_119 = __p0_119; \ + int32x2_t __s0_119 = __p0_119; \ int32x2_t __s1_119 = __p1_119; \ - int32x2_t __s2_119 = __p2_119; \ - int64x2_t __rev0_119; __rev0_119 = __builtin_shufflevector(__s0_119, __s0_119, 1, 0); \ + int32x2_t __rev0_119; __rev0_119 = __builtin_shufflevector(__s0_119, __s0_119, 1, 0); \ int32x2_t __rev1_119; __rev1_119 = __builtin_shufflevector(__s1_119, __s1_119, 1, 0); \ - int32x2_t __rev2_119; __rev2_119 = __builtin_shufflevector(__s2_119, __s2_119, 1, 0); \ - __ret_119 = __noswap_vqdmlsl_s32(__rev0_119, __rev1_119, __noswap_splat_lane_s32(__rev2_119, __p3_119)); \ + __ret_119 = __noswap_vmull_s32(__rev0_119, __noswap_splat_lane_s32(__rev1_119, __p2_119)); \ __ret_119 = __builtin_shufflevector(__ret_119, __ret_119, 1, 0); \ __ret_119; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlsl_lane_s16(__p0_120, __p1_120, __p2_120, __p3_120) __extension__ ({ \ +#define vmull_lane_s16(__p0_120, __p1_120, __p2_120) __extension__ ({ \ int32x4_t __ret_120; \ - int32x4_t __s0_120 = __p0_120; \ + int16x4_t __s0_120 = __p0_120; \ int16x4_t __s1_120 = __p1_120; \ - int16x4_t __s2_120 = __p2_120; \ - __ret_120 = vqdmlsl_s16(__s0_120, __s1_120, splat_lane_s16(__s2_120, __p3_120)); \ + __ret_120 = vmull_s16(__s0_120, splat_lane_s16(__s1_120, __p2_120)); \ __ret_120; \ }) #else -#define vqdmlsl_lane_s16(__p0_121, __p1_121, __p2_121, __p3_121) __extension__ ({ \ +#define vmull_lane_s16(__p0_121, __p1_121, __p2_121) __extension__ ({ \ int32x4_t __ret_121; \ - int32x4_t __s0_121 = __p0_121; \ + int16x4_t __s0_121 = __p0_121; \ int16x4_t __s1_121 = __p1_121; \ - int16x4_t __s2_121 = __p2_121; \ - int32x4_t __rev0_121; __rev0_121 = __builtin_shufflevector(__s0_121, __s0_121, 3, 2, 1, 0); \ + int16x4_t __rev0_121; __rev0_121 = __builtin_shufflevector(__s0_121, __s0_121, 3, 2, 1, 0); \ int16x4_t __rev1_121; __rev1_121 = __builtin_shufflevector(__s1_121, __s1_121, 3, 2, 1, 0); \ - int16x4_t __rev2_121; __rev2_121 = __builtin_shufflevector(__s2_121, __s2_121, 3, 2, 1, 0); \ - __ret_121 = __noswap_vqdmlsl_s16(__rev0_121, __rev1_121, __noswap_splat_lane_s16(__rev2_121, __p3_121)); \ + __ret_121 = __noswap_vmull_s16(__rev0_121, __noswap_splat_lane_s16(__rev1_121, __p2_121)); \ __ret_121 = __builtin_shufflevector(__ret_121, __ret_121, 3, 2, 1, 0); \ __ret_121; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { - int64x2_t __ret; - __ret = vqdmlsl_s32(__p0, __p1, (int32x2_t) {__p2, __p2}); +__ai __attribute__((target("neon"))) uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) { + uint64x2_t __ret; + __ret = vmull_u32(__p0, (uint32x2_t) {__p1, __p1}); return __ret; } #else -__ai int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { - int64x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __noswap_vqdmlsl_s32(__rev0, __rev1, (int32x2_t) {__p2, __p2}); +__ai __attribute__((target("neon"))) uint64x2_t vmull_n_u32(uint32x2_t __p0, uint32_t __p1) { + uint64x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __noswap_vmull_u32(__rev0, (uint32x2_t) {__p1, __p1}); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai int64x2_t __noswap_vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { - int64x2_t __ret; - __ret = __noswap_vqdmlsl_s32(__p0, __p1, (int32x2_t) {__p2, __p2}); +__ai __attribute__((target("neon"))) uint64x2_t __noswap_vmull_n_u32(uint32x2_t __p0, uint32_t __p1) { + uint64x2_t __ret; + __ret = __noswap_vmull_u32(__p0, (uint32x2_t) {__p1, __p1}); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { - int32x4_t __ret; - __ret = vqdmlsl_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2}); +__ai __attribute__((target("neon"))) uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) { + uint32x4_t __ret; + __ret = vmull_u16(__p0, (uint16x4_t) {__p1, __p1, __p1, __p1}); return __ret; } #else -__ai int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __noswap_vqdmlsl_s16(__rev0, __rev1, (int16x4_t) {__p2, __p2, __p2, __p2}); +__ai __attribute__((target("neon"))) uint32x4_t vmull_n_u16(uint16x4_t __p0, uint16_t __p1) { + uint32x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap_vmull_u16(__rev0, (uint16x4_t) {__p1, __p1, __p1, __p1}); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int32x4_t __noswap_vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { - int32x4_t __ret; - __ret = __noswap_vqdmlsl_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2}); +__ai __attribute__((target("neon"))) uint32x4_t __noswap_vmull_n_u16(uint16x4_t __p0, uint16_t __p1) { + uint32x4_t __ret; + __ret = __noswap_vmull_u16(__p0, (uint16x4_t) {__p1, __p1, __p1, __p1}); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) { + int64x2_t __ret; + __ret = vmull_s32(__p0, (int32x2_t) {__p1, __p1}); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vmull_n_s32(int32x2_t __p0, int32_t __p1) { + int64x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __noswap_vmull_s32(__rev0, (int32x2_t) {__p1, __p1}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t __noswap_vmull_n_s32(int32x2_t __p0, int32_t __p1) { + int64x2_t __ret; + __ret = __noswap_vmull_s32(__p0, (int32x2_t) {__p1, __p1}); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) { int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + __ret = vmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1}); return __ret; } #else -__ai int32x4_t vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vmull_n_s16(int16x4_t __p0, int16_t __p1) { int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap_vmull_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1}); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int32x4_t __noswap_vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t __noswap_vmull_n_s16(int16x4_t __p0, int16_t __p1) { int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + __ret = __noswap_vmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1}); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); +__ai __attribute__((target("neon"))) poly8x8_t vmvn_p8(poly8x8_t __p0) { + poly8x8_t __ret; + __ret = ~__p0; return __ret; } #else -__ai int16x8_t vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); +__ai __attribute__((target("neon"))) poly8x8_t vmvn_p8(poly8x8_t __p0) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = ~__rev0; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai int16x8_t __noswap_vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x16_t vmvnq_p8(poly8x16_t __p0) { + poly8x16_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x16_t vmvnq_p8(poly8x16_t __p0) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2); +__ai __attribute__((target("neon"))) uint8x16_t vmvnq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + __ret = ~__p0; return __ret; } #else -__ai int32x2_t vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) uint8x16_t vmvnq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai int32x2_t __noswap_vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vmvnq_u32(uint32x4_t __p0) { + uint32x4_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vmvnq_u32(uint32x4_t __p0) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1); +__ai __attribute__((target("neon"))) uint16x8_t vmvnq_u16(uint16x8_t __p0) { + uint16x8_t __ret; + __ret = ~__p0; return __ret; } #else -__ai int16x4_t vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint16x8_t vmvnq_u16(uint16x8_t __p0) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai int16x4_t __noswap_vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vmvnq_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = ~__p0; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vmvnq_s8(int8x16_t __p0) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vmvnq_s32(int32x4_t __p0) { int32x4_t __ret; - __ret = vqdmulhq_s32(__p0, (int32x4_t) {__p1, __p1, __p1, __p1}); + __ret = ~__p0; return __ret; } #else -__ai int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vmvnq_s32(int32x4_t __p0) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = __noswap_vqdmulhq_s32(__rev0, (int32x4_t) {__p1, __p1, __p1, __p1}); + __ret = ~__rev0; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vmvnq_s16(int16x8_t __p0) { int16x8_t __ret; - __ret = vqdmulhq_s16(__p0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}); + __ret = ~__p0; return __ret; } #else -__ai int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vmvnq_s16(int16x8_t __p0) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __noswap_vqdmulhq_s16(__rev0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}); + __ret = ~__rev0; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) { - int32x2_t __ret; - __ret = vqdmulh_s32(__p0, (int32x2_t) {__p1, __p1}); +__ai __attribute__((target("neon"))) uint8x8_t vmvn_u8(uint8x8_t __p0) { + uint8x8_t __ret; + __ret = ~__p0; return __ret; } #else -__ai int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) { - int32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = __noswap_vqdmulh_s32(__rev0, (int32x2_t) {__p1, __p1}); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) uint8x8_t vmvn_u8(uint8x8_t __p0) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) { - int16x4_t __ret; - __ret = vqdmulh_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1}); +__ai __attribute__((target("neon"))) uint32x2_t vmvn_u32(uint32x2_t __p0) { + uint32x2_t __ret; + __ret = ~__p0; return __ret; } #else -__ai int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) { - int16x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = __noswap_vqdmulh_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1}); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint32x2_t vmvn_u32(uint32x2_t __p0) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqdmull_s32(int32x2_t __p0, int32x2_t __p1) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35); +__ai __attribute__((target("neon"))) uint16x4_t vmvn_u16(uint16x4_t __p0) { + uint16x4_t __ret; + __ret = ~__p0; return __ret; } #else -__ai int64x2_t vqdmull_s32(int32x2_t __p0, int32x2_t __p1) { - int64x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai int64x2_t __noswap_vqdmull_s32(int32x2_t __p0, int32x2_t __p1) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35); +__ai __attribute__((target("neon"))) uint16x4_t vmvn_u16(uint16x4_t __p0) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqdmull_s16(int16x4_t __p0, int16x4_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34); +__ai __attribute__((target("neon"))) int8x8_t vmvn_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = ~__p0; return __ret; } #else -__ai int32x4_t vqdmull_s16(int16x4_t __p0, int16x4_t __p1) { - int32x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int32x4_t __noswap_vqdmull_s16(int16x4_t __p0, int16x4_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34); +__ai __attribute__((target("neon"))) int8x8_t vmvn_s8(int8x8_t __p0) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmull_lane_s32(__p0_122, __p1_122, __p2_122) __extension__ ({ \ - int64x2_t __ret_122; \ - int32x2_t __s0_122 = __p0_122; \ - int32x2_t __s1_122 = __p1_122; \ - __ret_122 = vqdmull_s32(__s0_122, splat_lane_s32(__s1_122, __p2_122)); \ - __ret_122; \ -}) +__ai __attribute__((target("neon"))) int32x2_t vmvn_s32(int32x2_t __p0) { + int32x2_t __ret; + __ret = ~__p0; + return __ret; +} #else -#define vqdmull_lane_s32(__p0_123, __p1_123, __p2_123) __extension__ ({ \ - int64x2_t __ret_123; \ - int32x2_t __s0_123 = __p0_123; \ - int32x2_t __s1_123 = __p1_123; \ - int32x2_t __rev0_123; __rev0_123 = __builtin_shufflevector(__s0_123, __s0_123, 1, 0); \ - int32x2_t __rev1_123; __rev1_123 = __builtin_shufflevector(__s1_123, __s1_123, 1, 0); \ - __ret_123 = __noswap_vqdmull_s32(__rev0_123, __noswap_splat_lane_s32(__rev1_123, __p2_123)); \ - __ret_123 = __builtin_shufflevector(__ret_123, __ret_123, 1, 0); \ - __ret_123; \ -}) +__ai __attribute__((target("neon"))) int32x2_t vmvn_s32(int32x2_t __p0) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmull_lane_s16(__p0_124, __p1_124, __p2_124) __extension__ ({ \ - int32x4_t __ret_124; \ - int16x4_t __s0_124 = __p0_124; \ - int16x4_t __s1_124 = __p1_124; \ - __ret_124 = vqdmull_s16(__s0_124, splat_lane_s16(__s1_124, __p2_124)); \ - __ret_124; \ -}) +__ai __attribute__((target("neon"))) int16x4_t vmvn_s16(int16x4_t __p0) { + int16x4_t __ret; + __ret = ~__p0; + return __ret; +} #else -#define vqdmull_lane_s16(__p0_125, __p1_125, __p2_125) __extension__ ({ \ - int32x4_t __ret_125; \ - int16x4_t __s0_125 = __p0_125; \ - int16x4_t __s1_125 = __p1_125; \ - int16x4_t __rev0_125; __rev0_125 = __builtin_shufflevector(__s0_125, __s0_125, 3, 2, 1, 0); \ - int16x4_t __rev1_125; __rev1_125 = __builtin_shufflevector(__s1_125, __s1_125, 3, 2, 1, 0); \ - __ret_125 = __noswap_vqdmull_s16(__rev0_125, __noswap_splat_lane_s16(__rev1_125, __p2_125)); \ - __ret_125 = __builtin_shufflevector(__ret_125, __ret_125, 3, 2, 1, 0); \ - __ret_125; \ -}) +__ai __attribute__((target("neon"))) int16x4_t vmvn_s16(int16x4_t __p0) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = ~__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) { - int64x2_t __ret; - __ret = vqdmull_s32(__p0, (int32x2_t) {__p1, __p1}); +__ai __attribute__((target("neon"))) int8x16_t vnegq_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = -__p0; return __ret; } #else -__ai int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) { - int64x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = __noswap_vqdmull_s32(__rev0, (int32x2_t) {__p1, __p1}); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai int64x2_t __noswap_vqdmull_n_s32(int32x2_t __p0, int32_t __p1) { - int64x2_t __ret; - __ret = __noswap_vqdmull_s32(__p0, (int32x2_t) {__p1, __p1}); +__ai __attribute__((target("neon"))) int8x16_t vnegq_s8(int8x16_t __p0) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) { - int32x4_t __ret; - __ret = vqdmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1}); +__ai __attribute__((target("neon"))) float32x4_t vnegq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = -__p0; return __ret; } #else -__ai int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) { - int32x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = __noswap_vqdmull_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1}); +__ai __attribute__((target("neon"))) float32x4_t vnegq_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = -__rev0; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int32x4_t __noswap_vqdmull_n_s16(int16x4_t __p0, int16_t __p1) { - int32x4_t __ret; - __ret = __noswap_vqdmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1}); - return __ret; -} #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vqmovn_u32(uint32x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 17); +__ai __attribute__((target("neon"))) int32x4_t vnegq_s32(int32x4_t __p0) { + int32x4_t __ret; + __ret = -__p0; return __ret; } #else -__ai uint16x4_t vqmovn_u32(uint32x4_t __p0) { - uint16x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 17); +__ai __attribute__((target("neon"))) int32x4_t vnegq_s32(int32x4_t __p0) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = -__rev0; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai uint16x4_t __noswap_vqmovn_u32(uint32x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 17); - return __ret; -} #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vqmovn_u64(uint64x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 18); +__ai __attribute__((target("neon"))) int16x8_t vnegq_s16(int16x8_t __p0) { + int16x8_t __ret; + __ret = -__p0; return __ret; } #else -__ai uint32x2_t vqmovn_u64(uint64x2_t __p0) { - uint32x2_t __ret; - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai uint32x2_t __noswap_vqmovn_u64(uint64x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 18); +__ai __attribute__((target("neon"))) int16x8_t vnegq_s16(int16x8_t __p0) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vqmovn_u16(uint16x8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 16); +__ai __attribute__((target("neon"))) int8x8_t vneg_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = -__p0; return __ret; } #else -__ai uint8x8_t vqmovn_u16(uint16x8_t __p0) { - uint8x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 16); +__ai __attribute__((target("neon"))) int8x8_t vneg_s8(int8x8_t __p0) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = -__rev0; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai uint8x8_t __noswap_vqmovn_u16(uint16x8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 16); - return __ret; -} #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vqmovn_s32(int32x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 1); +__ai __attribute__((target("neon"))) float32x2_t vneg_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = -__p0; return __ret; } #else -__ai int16x4_t vqmovn_s32(int32x4_t __p0) { - int16x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int16x4_t __noswap_vqmovn_s32(int32x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 1); +__ai __attribute__((target("neon"))) float32x2_t vneg_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vqmovn_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vneg_s32(int32x2_t __p0) { int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 2); + __ret = -__p0; return __ret; } #else -__ai int32x2_t vqmovn_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vneg_s32(int32x2_t __p0) { int32x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 2); + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = -__rev0; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai int32x2_t __noswap_vqmovn_s64(int64x2_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 2); - return __ret; -} #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vqmovn_s16(int16x8_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 0); +__ai __attribute__((target("neon"))) int16x4_t vneg_s16(int16x4_t __p0) { + int16x4_t __ret; + __ret = -__p0; return __ret; } #else -__ai int8x8_t vqmovn_s16(int16x8_t __p0) { - int8x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int16x4_t vneg_s16(int16x4_t __p0) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int8x8_t __noswap_vqmovn_s16(int16x8_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vornq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vornq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vqmovun_s32(int32x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 17); +__ai __attribute__((target("neon"))) uint32x4_t vornq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __p0 | ~__p1; return __ret; } #else -__ai uint16x4_t vqmovun_s32(int32x4_t __p0) { - uint16x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 17); +__ai __attribute__((target("neon"))) uint32x4_t vornq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 | ~__rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai uint16x4_t __noswap_vqmovun_s32(int32x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 17); - return __ret; -} #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vqmovun_s64(int64x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 18); +__ai __attribute__((target("neon"))) uint64x2_t vornq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __p0 | ~__p1; return __ret; } #else -__ai uint32x2_t vqmovun_s64(int64x2_t __p0) { - uint32x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 18); +__ai __attribute__((target("neon"))) uint64x2_t vornq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 | ~__rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai uint32x2_t __noswap_vqmovun_s64(int64x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 18); - return __ret; -} #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vqmovun_s16(int16x8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 16); +__ai __attribute__((target("neon"))) uint16x8_t vornq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __p0 | ~__p1; return __ret; } #else -__ai uint8x8_t vqmovun_s16(int16x8_t __p0) { - uint8x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 16); +__ai __attribute__((target("neon"))) uint16x8_t vornq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 | ~__rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai uint8x8_t __noswap_vqmovun_s16(int16x8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 16); - return __ret; -} #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vqnegq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vornq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 32); + __ret = __p0 | ~__p1; return __ret; } #else -__ai int8x16_t vqnegq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vornq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x16_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 32); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 | ~__rev1; __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqnegq_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vornq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 34); + __ret = __p0 | ~__p1; return __ret; } #else -__ai int32x4_t vqnegq_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vornq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 34); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 | ~__rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vqnegq_s16(int16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 33); - return __ret; -} -#else -__ai int16x8_t vqnegq_s16(int16x8_t __p0) { - int16x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vqneg_s8(int8x8_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 0); - return __ret; -} -#else -__ai int8x8_t vqneg_s8(int8x8_t __p0) { - int8x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x8_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vqneg_s32(int32x2_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 2); +__ai __attribute__((target("neon"))) int64x2_t vornq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __p0 | ~__p1; return __ret; } #else -__ai int32x2_t vqneg_s32(int32x2_t __p0) { - int32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (int32x2_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 2); +__ai __attribute__((target("neon"))) int64x2_t vornq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 | ~__rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vqneg_s16(int16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 1); - return __ret; -} -#else -__ai int16x4_t vqneg_s16(int16x4_t __p0) { - int16x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); - return __ret; -} -#else -__ai int32x4_t vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int32x4_t __noswap_vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vornq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + __ret = __p0 | ~__p1; return __ret; } #else -__ai int16x8_t vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vornq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __rev0 | ~__rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai int16x8_t __noswap_vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); - return __ret; -} #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2); +__ai __attribute__((target("neon"))) uint8x8_t vorn_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __p0 | ~__p1; return __ret; } #else -__ai int32x2_t vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai int32x2_t __noswap_vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2); +__ai __attribute__((target("neon"))) uint8x8_t vorn_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1); +__ai __attribute__((target("neon"))) uint32x2_t vorn_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __p0 | ~__p1; return __ret; } #else -__ai int16x4_t vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int16x4_t __noswap_vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1); +__ai __attribute__((target("neon"))) uint32x2_t vorn_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 | ~__rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif +__ai __attribute__((target("neon"))) uint64x1_t vorn_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) { - int32x4_t __ret; - __ret = vqrdmulhq_s32(__p0, (int32x4_t) {__p1, __p1, __p1, __p1}); +__ai __attribute__((target("neon"))) uint16x4_t vorn_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __p0 | ~__p1; return __ret; } #else -__ai int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = __noswap_vqrdmulhq_s32(__rev0, (int32x4_t) {__p1, __p1, __p1, __p1}); +__ai __attribute__((target("neon"))) uint16x4_t vorn_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 | ~__rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) { - int16x8_t __ret; - __ret = vqrdmulhq_s16(__p0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}); +__ai __attribute__((target("neon"))) int8x8_t vorn_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __p0 | ~__p1; return __ret; } #else -__ai int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) { - int16x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __noswap_vqrdmulhq_s16(__rev0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}); +__ai __attribute__((target("neon"))) int8x8_t vorn_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 | ~__rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vorn_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; - __ret = vqrdmulh_s32(__p0, (int32x2_t) {__p1, __p1}); + __ret = __p0 | ~__p1; return __ret; } #else -__ai int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vorn_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = __noswap_vqrdmulh_s32(__rev0, (int32x2_t) {__p1, __p1}); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 | ~__rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif +__ai __attribute__((target("neon"))) int64x1_t vorn_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = __p0 | ~__p1; + return __ret; +} #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vorn_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; - __ret = vqrdmulh_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1}); + __ret = __p0 | ~__p1; return __ret; } #else -__ai int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vorn_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = __noswap_vqrdmulh_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1}); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 | ~__rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vqrshlq_u8(uint8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vorrq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + __ret = __p0 | __p1; return __ret; } #else -__ai uint8x16_t vqrshlq_u8(uint8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vorrq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 | __rev1; __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vqrshlq_u32(uint32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vorrq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + __ret = __p0 | __p1; return __ret; } #else -__ai uint32x4_t vqrshlq_u32(uint32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vorrq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 | __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vqrshlq_u64(uint64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vorrq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + __ret = __p0 | __p1; return __ret; } #else -__ai uint64x2_t vqrshlq_u64(uint64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vorrq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 | __rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vqrshlq_u16(uint16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vorrq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + __ret = __p0 | __p1; return __ret; } #else -__ai uint16x8_t vqrshlq_u16(uint16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vorrq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 | __rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vqrshlq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vorrq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + __ret = __p0 | __p1; return __ret; } #else -__ai int8x16_t vqrshlq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vorrq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __rev0 | __rev1; __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqrshlq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vorrq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + __ret = __p0 | __p1; return __ret; } #else -__ai int32x4_t vqrshlq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vorrq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __rev0 | __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqrshlq_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vorrq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); + __ret = __p0 | __p1; return __ret; } #else -__ai int64x2_t vqrshlq_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vorrq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (int64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); + __ret = __rev0 | __rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vqrshlq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vorrq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + __ret = __p0 | __p1; return __ret; } #else -__ai int16x8_t vqrshlq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vorrq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __rev0 | __rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vqrshl_u8(uint8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vorr_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + __ret = __p0 | __p1; return __ret; } #else -__ai uint8x8_t vqrshl_u8(uint8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vorr_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 | __rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vqrshl_u32(uint32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vorr_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + __ret = __p0 | __p1; return __ret; } #else -__ai uint32x2_t vqrshl_u32(uint32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vorr_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 | __rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif -__ai uint64x1_t vqrshl_u64(uint64x1_t __p0, int64x1_t __p1) { +__ai __attribute__((target("neon"))) uint64x1_t vorr_u64(uint64x1_t __p0, uint64x1_t __p1) { uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + __ret = __p0 | __p1; return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vqrshl_u16(uint16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vorr_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + __ret = __p0 | __p1; return __ret; } #else -__ai uint16x4_t vqrshl_u16(uint16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vorr_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 | __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vqrshl_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vorr_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + __ret = __p0 | __p1; return __ret; } #else -__ai int8x8_t vqrshl_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vorr_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __rev0 | __rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vqrshl_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vorr_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + __ret = __p0 | __p1; return __ret; } #else -__ai int32x2_t vqrshl_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vorr_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (int32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __rev0 | __rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif -__ai int64x1_t vqrshl_s64(int64x1_t __p0, int64x1_t __p1) { +__ai __attribute__((target("neon"))) int64x1_t vorr_s64(int64x1_t __p0, int64x1_t __p1) { int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3); + __ret = __p0 | __p1; return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vqrshl_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vorr_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + __ret = __p0 | __p1; return __ret; } #else -__ai int16x4_t vqrshl_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vorr_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __rev0 | __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -#define vqrshrn_n_u32(__p0, __p1) __extension__ ({ \ - uint16x4_t __ret; \ - uint32x4_t __s0 = __p0; \ - __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 17); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x8_t vpadalq_u8(uint16x8_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} #else -#define vqrshrn_n_u32(__p0, __p1) __extension__ ({ \ - uint16x4_t __ret; \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 17); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vqrshrn_n_u32(__p0, __p1) __extension__ ({ \ - uint16x4_t __ret; \ - uint32x4_t __s0 = __p0; \ - __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 17); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x8_t vpadalq_u8(uint16x8_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqrshrn_n_u64(__p0, __p1) __extension__ ({ \ - uint32x2_t __ret; \ - uint64x2_t __s0 = __p0; \ - __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 18); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vpadalq_u32(uint64x2_t __p0, uint32x4_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} #else -#define vqrshrn_n_u64(__p0, __p1) __extension__ ({ \ - uint32x2_t __ret; \ - uint64x2_t __s0 = __p0; \ - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 18); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_vqrshrn_n_u64(__p0, __p1) __extension__ ({ \ - uint32x2_t __ret; \ - uint64x2_t __s0 = __p0; \ - __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 18); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vpadalq_u32(uint64x2_t __p0, uint32x4_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqrshrn_n_u16(__p0, __p1) __extension__ ({ \ - uint8x8_t __ret; \ - uint16x8_t __s0 = __p0; \ - __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 16); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vpadalq_u16(uint32x4_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} #else -#define vqrshrn_n_u16(__p0, __p1) __extension__ ({ \ - uint8x8_t __ret; \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 16); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vqrshrn_n_u16(__p0, __p1) __extension__ ({ \ - uint8x8_t __ret; \ - uint16x8_t __s0 = __p0; \ - __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 16); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vpadalq_u16(uint32x4_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqrshrn_n_s32(__p0, __p1) __extension__ ({ \ - int16x4_t __ret; \ - int32x4_t __s0 = __p0; \ - __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x8_t vpadalq_s8(int16x8_t __p0, int8x16_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} #else -#define vqrshrn_n_s32(__p0, __p1) __extension__ ({ \ - int16x4_t __ret; \ - int32x4_t __s0 = __p0; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vqrshrn_n_s32(__p0, __p1) __extension__ ({ \ - int16x4_t __ret; \ - int32x4_t __s0 = __p0; \ - __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x8_t vpadalq_s8(int16x8_t __p0, int8x16_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqrshrn_n_s64(__p0, __p1) __extension__ ({ \ - int32x2_t __ret; \ - int64x2_t __s0 = __p0; \ - __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 2); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int64x2_t vpadalq_s32(int64x2_t __p0, int32x4_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); + return __ret; +} #else -#define vqrshrn_n_s64(__p0, __p1) __extension__ ({ \ - int32x2_t __ret; \ - int64x2_t __s0 = __p0; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_vqrshrn_n_s64(__p0, __p1) __extension__ ({ \ - int32x2_t __ret; \ - int64x2_t __s0 = __p0; \ - __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 2); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int64x2_t vpadalq_s32(int64x2_t __p0, int32x4_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int64x2_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqrshrn_n_s16(__p0, __p1) __extension__ ({ \ - int8x8_t __ret; \ - int16x8_t __s0 = __p0; \ - __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x4_t vpadalq_s16(int32x4_t __p0, int16x8_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} #else -#define vqrshrn_n_s16(__p0, __p1) __extension__ ({ \ - int8x8_t __ret; \ - int16x8_t __s0 = __p0; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 0); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vqrshrn_n_s16(__p0, __p1) __extension__ ({ \ - int8x8_t __ret; \ - int16x8_t __s0 = __p0; \ - __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x4_t vpadalq_s16(int32x4_t __p0, int16x8_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vpadalq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqrshrun_n_s32(__p0, __p1) __extension__ ({ \ - uint16x4_t __ret; \ - int32x4_t __s0 = __p0; \ - __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 17); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x4_t vpadal_u8(uint16x4_t __p0, uint8x8_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} #else -#define vqrshrun_n_s32(__p0, __p1) __extension__ ({ \ - uint16x4_t __ret; \ - int32x4_t __s0 = __p0; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 17); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vqrshrun_n_s32(__p0, __p1) __extension__ ({ \ - uint16x4_t __ret; \ - int32x4_t __s0 = __p0; \ - __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 17); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x4_t vpadal_u8(uint16x4_t __p0, uint8x8_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqrshrun_n_s64(__p0, __p1) __extension__ ({ \ - uint32x2_t __ret; \ - int64x2_t __s0 = __p0; \ - __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 18); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint64x1_t vpadal_u32(uint64x1_t __p0, uint32x2_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} #else -#define vqrshrun_n_s64(__p0, __p1) __extension__ ({ \ - uint32x2_t __ret; \ - int64x2_t __s0 = __p0; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 18); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_vqrshrun_n_s64(__p0, __p1) __extension__ ({ \ - uint32x2_t __ret; \ - int64x2_t __s0 = __p0; \ - __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 18); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint64x1_t vpadal_u32(uint64x1_t __p0, uint32x2_t __p1) { + uint64x1_t __ret; + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__rev1, 19); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqrshrun_n_s16(__p0, __p1) __extension__ ({ \ - uint8x8_t __ret; \ - int16x8_t __s0 = __p0; \ - __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 16); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vpadal_u16(uint32x2_t __p0, uint16x4_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} #else -#define vqrshrun_n_s16(__p0, __p1) __extension__ ({ \ - uint8x8_t __ret; \ - int16x8_t __s0 = __p0; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 16); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vqrshrun_n_s16(__p0, __p1) __extension__ ({ \ - uint8x8_t __ret; \ - int16x8_t __s0 = __p0; \ - __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 16); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vpadal_u16(uint32x2_t __p0, uint16x4_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vqshlq_u8(uint8x16_t __p0, int8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); +__ai __attribute__((target("neon"))) int16x4_t vpadal_s8(int16x4_t __p0, int8x8_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else -__ai uint8x16_t vqshlq_u8(uint8x16_t __p0, int8x16_t __p1) { - uint8x16_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int16x4_t vpadal_s8(int16x4_t __p0, int8x8_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vqshlq_u32(uint32x4_t __p0, int32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); +__ai __attribute__((target("neon"))) int64x1_t vpadal_s32(int64x1_t __p0, int32x2_t __p1) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 3); return __ret; } #else -__ai uint32x4_t vqshlq_u32(uint32x4_t __p0, int32x4_t __p1) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); +__ai __attribute__((target("neon"))) int64x1_t vpadal_s32(int64x1_t __p0, int32x2_t __p1) { + int64x1_t __ret; + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int64x1_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__rev1, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vpadal_s16(int32x2_t __p0, int16x4_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vpadal_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vpadal_s16(int32x2_t __p0, int16x4_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x2_t) __builtin_neon_vpadal_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vpadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vpadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vpadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vpadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vpadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vpadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vqshlq_u64(uint64x2_t __p0, int64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); +__ai __attribute__((target("neon"))) int8x8_t vpadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else -__ai uint64x2_t vqshlq_u64(uint64x2_t __p0, int64x2_t __p1) { - uint64x2_t __ret; - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); +__ai __attribute__((target("neon"))) int8x8_t vpadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vpadd_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vpadd_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vpadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vpadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vqshlq_u16(uint16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vpadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vpadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vpadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vpadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vpaddlq_u8(uint8x16_t __p0) { uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + __ret = (uint16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 49); return __ret; } #else -__ai uint16x8_t vqshlq_u16(uint16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vpaddlq_u8(uint8x16_t __p0) { uint16x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vqshlq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); +__ai __attribute__((target("neon"))) uint64x2_t vpaddlq_u32(uint32x4_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 51); return __ret; } #else -__ai int8x16_t vqshlq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint64x2_t vpaddlq_u32(uint32x4_t __p0) { + uint64x2_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqshlq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); +__ai __attribute__((target("neon"))) uint32x4_t vpaddlq_u16(uint16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 50); return __ret; } #else -__ai int32x4_t vqshlq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); +__ai __attribute__((target("neon"))) uint32x4_t vpaddlq_u16(uint16x8_t __p0) { + uint32x4_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqshlq_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vpaddlq_s8(int8x16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 33); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vpaddlq_s8(int8x16_t __p0) { + int16x8_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vpaddlq_s32(int32x4_t __p0) { int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); + __ret = (int64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 35); return __ret; } #else -__ai int64x2_t vqshlq_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vpaddlq_s32(int32x4_t __p0) { int64x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (int64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int64x2_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 35); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vqshlq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); +__ai __attribute__((target("neon"))) int32x4_t vpaddlq_s16(int16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__p0, 34); return __ret; } #else -__ai int16x8_t vqshlq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; +__ai __attribute__((target("neon"))) int32x4_t vpaddlq_s16(int16x8_t __p0) { + int32x4_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vpaddlq_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vpaddl_u8(uint8x8_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 17); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vpaddl_u8(uint8x8_t __p0) { + uint16x4_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x1_t vpaddl_u32(uint32x2_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 19); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x1_t vpaddl_u32(uint32x2_t __p0) { + uint64x1_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 19); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vpaddl_u16(uint16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vpaddl_u16(uint16x4_t __p0) { + uint32x2_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vpaddl_s8(int8x8_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vpaddl_s8(int8x8_t __p0) { + int16x4_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x1_t vpaddl_s32(int32x2_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 3); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x1_t vpaddl_s32(int32x2_t __p0) { + int64x1_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int64x1_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 3); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vpaddl_s16(int16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__p0, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vpaddl_s16(int16x4_t __p0) { + int32x2_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int32x2_t) __builtin_neon_vpaddl_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vqshl_u8(uint8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vpmax_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + __ret = (uint8x8_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else -__ai uint8x8_t vqshl_u8(uint8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vpmax_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vqshl_u32(uint32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vpmax_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + __ret = (uint32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else -__ai uint32x2_t vqshl_u32(uint32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vpmax_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif -__ai uint64x1_t vqshl_u64(uint64x1_t __p0, int64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19); - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vqshl_u16(uint16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vpmax_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + __ret = (uint16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else -__ai uint16x4_t vqshl_u16(uint16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vpmax_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vqshl_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vpmax_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + __ret = (int8x8_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else -__ai int8x8_t vqshl_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vpmax_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x8_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = (int8x8_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vqshl_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vpmax_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vpmax_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vpmax_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + __ret = (int32x2_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else -__ai int32x2_t vqshl_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vpmax_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (int32x2_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = (int32x2_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif -__ai int64x1_t vqshl_s64(int64x1_t __p0, int64x1_t __p1) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3); - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vqshl_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vpmax_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + __ret = (int16x4_t) __builtin_neon_vpmax_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else -__ai int16x4_t vqshl_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vpmax_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = (int16x4_t) __builtin_neon_vpmax_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -#define vqshlq_n_u8(__p0, __p1) __extension__ ({ \ - uint8x16_t __ret; \ - uint8x16_t __s0 = __p0; \ - __ret = (uint8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 48); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vpmin_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} #else -#define vqshlq_n_u8(__p0, __p1) __extension__ ({ \ - uint8x16_t __ret; \ - uint8x16_t __s0 = __p0; \ - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 48); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vpmin_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqshlq_n_u32(__p0, __p1) __extension__ ({ \ - uint32x4_t __ret; \ - uint32x4_t __s0 = __p0; \ - __ret = (uint32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 50); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vpmin_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} #else -#define vqshlq_n_u32(__p0, __p1) __extension__ ({ \ - uint32x4_t __ret; \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (uint32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 50); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vpmin_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqshlq_n_u64(__p0, __p1) __extension__ ({ \ - uint64x2_t __ret; \ - uint64x2_t __s0 = __p0; \ - __ret = (uint64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 51); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x4_t vpmin_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} #else -#define vqshlq_n_u64(__p0, __p1) __extension__ ({ \ - uint64x2_t __ret; \ - uint64x2_t __s0 = __p0; \ - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (uint64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 51); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x4_t vpmin_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqshlq_n_u16(__p0, __p1) __extension__ ({ \ - uint16x8_t __ret; \ - uint16x8_t __s0 = __p0; \ - __ret = (uint16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 49); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x8_t vpmin_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} #else -#define vqshlq_n_u16(__p0, __p1) __extension__ ({ \ - uint16x8_t __ret; \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 49); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x8_t vpmin_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqshlq_n_s8(__p0, __p1) __extension__ ({ \ - int8x16_t __ret; \ - int8x16_t __s0 = __p0; \ - __ret = (int8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 32); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float32x2_t vpmin_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} #else -#define vqshlq_n_s8(__p0, __p1) __extension__ ({ \ - int8x16_t __ret; \ - int8x16_t __s0 = __p0; \ - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 32); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float32x2_t vpmin_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqshlq_n_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __ret; \ - int32x4_t __s0 = __p0; \ - __ret = (int32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 34); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x2_t vpmin_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} #else -#define vqshlq_n_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __ret; \ - int32x4_t __s0 = __p0; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (int32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 34); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x2_t vpmin_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqshlq_n_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __ret; \ - int64x2_t __s0 = __p0; \ - __ret = (int64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 35); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x4_t vpmin_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vpmin_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} #else -#define vqshlq_n_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __ret; \ - int64x2_t __s0 = __p0; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (int64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 35); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x4_t vpmin_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vpmin_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqshlq_n_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __ret; \ - int16x8_t __s0 = __p0; \ - __ret = (int16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 33); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x16_t vqabsq_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 32); + return __ret; +} #else -#define vqshlq_n_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __ret; \ - int16x8_t __s0 = __p0; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 33); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x16_t vqabsq_s8(int8x16_t __p0) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqshl_n_u8(__p0, __p1) __extension__ ({ \ - uint8x8_t __ret; \ - uint8x8_t __s0 = __p0; \ - __ret = (uint8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 16); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x4_t vqabsq_s32(int32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 34); + return __ret; +} #else -#define vqshl_n_u8(__p0, __p1) __extension__ ({ \ - uint8x8_t __ret; \ - uint8x8_t __s0 = __p0; \ - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 16); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x4_t vqabsq_s32(int32x4_t __p0) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqshl_n_u32(__p0, __p1) __extension__ ({ \ - uint32x2_t __ret; \ - uint32x2_t __s0 = __p0; \ - __ret = (uint32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 18); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x8_t vqabsq_s16(int16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 33); + return __ret; +} #else -#define vqshl_n_u32(__p0, __p1) __extension__ ({ \ - uint32x2_t __ret; \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (uint32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 18); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x8_t vqabsq_s16(int16x8_t __p0) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif -#define vqshl_n_u64(__p0, __p1) __extension__ ({ \ - uint64x1_t __ret; \ - uint64x1_t __s0 = __p0; \ - __ret = (uint64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 19); \ - __ret; \ -}) #ifdef __LITTLE_ENDIAN__ -#define vqshl_n_u16(__p0, __p1) __extension__ ({ \ - uint16x4_t __ret; \ - uint16x4_t __s0 = __p0; \ - __ret = (uint16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 17); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x8_t vqabs_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 0); + return __ret; +} #else -#define vqshl_n_u16(__p0, __p1) __extension__ ({ \ - uint16x4_t __ret; \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (uint16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 17); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x8_t vqabs_s8(int8x8_t __p0) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqshl_n_s8(__p0, __p1) __extension__ ({ \ - int8x8_t __ret; \ - int8x8_t __s0 = __p0; \ - __ret = (int8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x2_t vqabs_s32(int32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 2); + return __ret; +} #else -#define vqshl_n_s8(__p0, __p1) __extension__ ({ \ - int8x8_t __ret; \ - int8x8_t __s0 = __p0; \ - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 0); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x2_t vqabs_s32(int32x2_t __p0) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int32x2_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqshl_n_s32(__p0, __p1) __extension__ ({ \ - int32x2_t __ret; \ - int32x2_t __s0 = __p0; \ - __ret = (int32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 2); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x4_t vqabs_s16(int16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 1); + return __ret; +} #else -#define vqshl_n_s32(__p0, __p1) __extension__ ({ \ - int32x2_t __ret; \ - int32x2_t __s0 = __p0; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (int32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x4_t vqabs_s16(int16x4_t __p0) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vqabs_v((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif -#define vqshl_n_s64(__p0, __p1) __extension__ ({ \ - int64x1_t __ret; \ - int64x1_t __s0 = __p0; \ - __ret = (int64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 3); \ - __ret; \ -}) #ifdef __LITTLE_ENDIAN__ -#define vqshl_n_s16(__p0, __p1) __extension__ ({ \ - int16x4_t __ret; \ - int16x4_t __s0 = __p0; \ - __ret = (int16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x16_t vqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} #else -#define vqshl_n_s16(__p0, __p1) __extension__ ({ \ - int16x4_t __ret; \ - int16x4_t __s0 = __p0; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (int16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x16_t vqaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqshluq_n_s8(__p0, __p1) __extension__ ({ \ - uint8x16_t __ret; \ - int8x16_t __s0 = __p0; \ - __ret = (uint8x16_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 48); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} #else -#define vqshluq_n_s8(__p0, __p1) __extension__ ({ \ - uint8x16_t __ret; \ - int8x16_t __s0 = __p0; \ - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint8x16_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 48); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vqaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqshluq_n_s32(__p0, __p1) __extension__ ({ \ - uint32x4_t __ret; \ - int32x4_t __s0 = __p0; \ - __ret = (uint32x4_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 50); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} #else -#define vqshluq_n_s32(__p0, __p1) __extension__ ({ \ - uint32x4_t __ret; \ - int32x4_t __s0 = __p0; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (uint32x4_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 50); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vqaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqshluq_n_s64(__p0, __p1) __extension__ ({ \ - uint64x2_t __ret; \ - int64x2_t __s0 = __p0; \ - __ret = (uint64x2_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 51); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x8_t vqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} #else -#define vqshluq_n_s64(__p0, __p1) __extension__ ({ \ - uint64x2_t __ret; \ - int64x2_t __s0 = __p0; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (uint64x2_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 51); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x8_t vqaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqshluq_n_s16(__p0, __p1) __extension__ ({ \ - uint16x8_t __ret; \ - int16x8_t __s0 = __p0; \ - __ret = (uint16x8_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 49); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x16_t vqaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} #else -#define vqshluq_n_s16(__p0, __p1) __extension__ ({ \ - uint16x8_t __ret; \ - int16x8_t __s0 = __p0; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint16x8_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 49); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x16_t vqaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqshlu_n_s8(__p0, __p1) __extension__ ({ \ - uint8x8_t __ret; \ - int8x8_t __s0 = __p0; \ - __ret = (uint8x8_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 16); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x4_t vqaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} #else -#define vqshlu_n_s8(__p0, __p1) __extension__ ({ \ - uint8x8_t __ret; \ - int8x8_t __s0 = __p0; \ - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint8x8_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 16); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x4_t vqaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqshlu_n_s32(__p0, __p1) __extension__ ({ \ - uint32x2_t __ret; \ - int32x2_t __s0 = __p0; \ - __ret = (uint32x2_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 18); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int64x2_t vqaddq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); + return __ret; +} #else -#define vqshlu_n_s32(__p0, __p1) __extension__ ({ \ - uint32x2_t __ret; \ - int32x2_t __s0 = __p0; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (uint32x2_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 18); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int64x2_t vqaddq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int64x2_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif -#define vqshlu_n_s64(__p0, __p1) __extension__ ({ \ - uint64x1_t __ret; \ - int64x1_t __s0 = __p0; \ - __ret = (uint64x1_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 19); \ - __ret; \ -}) #ifdef __LITTLE_ENDIAN__ -#define vqshlu_n_s16(__p0, __p1) __extension__ ({ \ - uint16x4_t __ret; \ - int16x4_t __s0 = __p0; \ - __ret = (uint16x4_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 17); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x8_t vqaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} #else -#define vqshlu_n_s16(__p0, __p1) __extension__ ({ \ - uint16x4_t __ret; \ - int16x4_t __s0 = __p0; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (uint16x4_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 17); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x8_t vqaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqshrn_n_u32(__p0, __p1) __extension__ ({ \ - uint16x4_t __ret; \ - uint32x4_t __s0 = __p0; \ - __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 17); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vqadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} #else -#define vqshrn_n_u32(__p0, __p1) __extension__ ({ \ - uint16x4_t __ret; \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 17); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vqshrn_n_u32(__p0, __p1) __extension__ ({ \ - uint16x4_t __ret; \ - uint32x4_t __s0 = __p0; \ - __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 17); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vqadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqshrn_n_u64(__p0, __p1) __extension__ ({ \ - uint32x2_t __ret; \ - uint64x2_t __s0 = __p0; \ - __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 18); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vqadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} #else -#define vqshrn_n_u64(__p0, __p1) __extension__ ({ \ - uint32x2_t __ret; \ - uint64x2_t __s0 = __p0; \ - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 18); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_vqshrn_n_u64(__p0, __p1) __extension__ ({ \ - uint32x2_t __ret; \ - uint64x2_t __s0 = __p0; \ - __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 18); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vqadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif +__ai __attribute__((target("neon"))) uint64x1_t vqadd_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} #ifdef __LITTLE_ENDIAN__ -#define vqshrn_n_u16(__p0, __p1) __extension__ ({ \ - uint8x8_t __ret; \ - uint16x8_t __s0 = __p0; \ - __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 16); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x4_t vqadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} #else -#define vqshrn_n_u16(__p0, __p1) __extension__ ({ \ - uint8x8_t __ret; \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 16); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vqshrn_n_u16(__p0, __p1) __extension__ ({ \ - uint8x8_t __ret; \ - uint16x8_t __s0 = __p0; \ - __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 16); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x4_t vqadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqshrn_n_s32(__p0, __p1) __extension__ ({ \ - int16x4_t __ret; \ - int32x4_t __s0 = __p0; \ - __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x8_t vqadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} #else -#define vqshrn_n_s32(__p0, __p1) __extension__ ({ \ - int16x4_t __ret; \ - int32x4_t __s0 = __p0; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vqshrn_n_s32(__p0, __p1) __extension__ ({ \ - int16x4_t __ret; \ - int32x4_t __s0 = __p0; \ - __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x8_t vqadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqshrn_n_s64(__p0, __p1) __extension__ ({ \ - int32x2_t __ret; \ - int64x2_t __s0 = __p0; \ - __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 2); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x2_t vqadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} #else -#define vqshrn_n_s64(__p0, __p1) __extension__ ({ \ - int32x2_t __ret; \ - int64x2_t __s0 = __p0; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_vqshrn_n_s64(__p0, __p1) __extension__ ({ \ - int32x2_t __ret; \ - int64x2_t __s0 = __p0; \ - __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 2); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x2_t vqadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif +__ai __attribute__((target("neon"))) int64x1_t vqadd_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3); + return __ret; +} #ifdef __LITTLE_ENDIAN__ -#define vqshrn_n_s16(__p0, __p1) __extension__ ({ \ - int8x8_t __ret; \ - int16x8_t __s0 = __p0; \ - __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x4_t vqadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} #else -#define vqshrn_n_s16(__p0, __p1) __extension__ ({ \ - int8x8_t __ret; \ - int16x8_t __s0 = __p0; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 0); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vqshrn_n_s16(__p0, __p1) __extension__ ({ \ - int8x8_t __ret; \ - int16x8_t __s0 = __p0; \ - __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x4_t vqadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqshrun_n_s32(__p0, __p1) __extension__ ({ \ - uint16x4_t __ret; \ - int32x4_t __s0 = __p0; \ - __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 17); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int64x2_t vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35); + return __ret; +} #else -#define vqshrun_n_s32(__p0, __p1) __extension__ ({ \ - uint16x4_t __ret; \ - int32x4_t __s0 = __p0; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 17); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vqshrun_n_s32(__p0, __p1) __extension__ ({ \ - uint16x4_t __ret; \ - int32x4_t __s0 = __p0; \ - __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 17); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int64x2_t vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t __noswap_vqdmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqshrun_n_s64(__p0, __p1) __extension__ ({ \ - uint32x2_t __ret; \ - int64x2_t __s0 = __p0; \ - __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 18); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x4_t vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34); + return __ret; +} #else -#define vqshrun_n_s64(__p0, __p1) __extension__ ({ \ - uint32x2_t __ret; \ - int64x2_t __s0 = __p0; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 18); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ +__ai __attribute__((target("neon"))) int32x4_t vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t __noswap_vqdmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmlal_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlal_lane_s32(__p0_122, __p1_122, __p2_122, __p3_122) __extension__ ({ \ + int64x2_t __ret_122; \ + int64x2_t __s0_122 = __p0_122; \ + int32x2_t __s1_122 = __p1_122; \ + int32x2_t __s2_122 = __p2_122; \ + __ret_122 = vqdmlal_s32(__s0_122, __s1_122, splat_lane_s32(__s2_122, __p3_122)); \ + __ret_122; \ }) -#define __noswap_vqshrun_n_s64(__p0, __p1) __extension__ ({ \ - uint32x2_t __ret; \ - int64x2_t __s0 = __p0; \ - __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 18); \ - __ret; \ +#else +#define vqdmlal_lane_s32(__p0_123, __p1_123, __p2_123, __p3_123) __extension__ ({ \ + int64x2_t __ret_123; \ + int64x2_t __s0_123 = __p0_123; \ + int32x2_t __s1_123 = __p1_123; \ + int32x2_t __s2_123 = __p2_123; \ + int64x2_t __rev0_123; __rev0_123 = __builtin_shufflevector(__s0_123, __s0_123, 1, 0); \ + int32x2_t __rev1_123; __rev1_123 = __builtin_shufflevector(__s1_123, __s1_123, 1, 0); \ + int32x2_t __rev2_123; __rev2_123 = __builtin_shufflevector(__s2_123, __s2_123, 1, 0); \ + __ret_123 = __noswap_vqdmlal_s32(__rev0_123, __rev1_123, __noswap_splat_lane_s32(__rev2_123, __p3_123)); \ + __ret_123 = __builtin_shufflevector(__ret_123, __ret_123, 1, 0); \ + __ret_123; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqshrun_n_s16(__p0, __p1) __extension__ ({ \ - uint8x8_t __ret; \ - int16x8_t __s0 = __p0; \ - __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 16); \ - __ret; \ +#define vqdmlal_lane_s16(__p0_124, __p1_124, __p2_124, __p3_124) __extension__ ({ \ + int32x4_t __ret_124; \ + int32x4_t __s0_124 = __p0_124; \ + int16x4_t __s1_124 = __p1_124; \ + int16x4_t __s2_124 = __p2_124; \ + __ret_124 = vqdmlal_s16(__s0_124, __s1_124, splat_lane_s16(__s2_124, __p3_124)); \ + __ret_124; \ }) #else -#define vqshrun_n_s16(__p0, __p1) __extension__ ({ \ - uint8x8_t __ret; \ - int16x8_t __s0 = __p0; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 16); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vqshrun_n_s16(__p0, __p1) __extension__ ({ \ - uint8x8_t __ret; \ - int16x8_t __s0 = __p0; \ - __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 16); \ - __ret; \ +#define vqdmlal_lane_s16(__p0_125, __p1_125, __p2_125, __p3_125) __extension__ ({ \ + int32x4_t __ret_125; \ + int32x4_t __s0_125 = __p0_125; \ + int16x4_t __s1_125 = __p1_125; \ + int16x4_t __s2_125 = __p2_125; \ + int32x4_t __rev0_125; __rev0_125 = __builtin_shufflevector(__s0_125, __s0_125, 3, 2, 1, 0); \ + int16x4_t __rev1_125; __rev1_125 = __builtin_shufflevector(__s1_125, __s1_125, 3, 2, 1, 0); \ + int16x4_t __rev2_125; __rev2_125 = __builtin_shufflevector(__s2_125, __s2_125, 3, 2, 1, 0); \ + __ret_125 = __noswap_vqdmlal_s16(__rev0_125, __rev1_125, __noswap_splat_lane_s16(__rev2_125, __p3_125)); \ + __ret_125 = __builtin_shufflevector(__ret_125, __ret_125, 3, 2, 1, 0); \ + __ret_125; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vqsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); +__ai __attribute__((target("neon"))) int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = vqdmlal_s32(__p0, __p1, (int32x2_t) {__p2, __p2}); return __ret; } #else -__ai uint8x16_t vqsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int64x2_t vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __noswap_vqdmlal_s32(__rev0, __rev1, (int32x2_t) {__p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t __noswap_vqdmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = __noswap_vqdmlal_s32(__p0, __p1, (int32x2_t) {__p2, __p2}); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vqsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); +__ai __attribute__((target("neon"))) int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = vqdmlal_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2}); return __ret; } #else -__ai uint32x4_t vqsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); +__ai __attribute__((target("neon"))) int32x4_t vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vqdmlal_s16(__rev0, __rev1, (int16x4_t) {__p2, __p2, __p2, __p2}); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } +__ai __attribute__((target("neon"))) int32x4_t __noswap_vqdmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = __noswap_vqdmlal_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vqsubq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); +__ai __attribute__((target("neon"))) int64x2_t vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35); return __ret; } #else -__ai uint64x2_t vqsubq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); +__ai __attribute__((target("neon"))) int64x2_t vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 35); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } +__ai __attribute__((target("neon"))) int64x2_t __noswap_vqdmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 35); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vqsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); +__ai __attribute__((target("neon"))) int32x4_t vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34); return __ret; } #else -__ai uint16x8_t vqsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int32x4_t vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t __noswap_vqdmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmlsl_v((int8x16_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 34); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vqsubq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); +#define vqdmlsl_lane_s32(__p0_126, __p1_126, __p2_126, __p3_126) __extension__ ({ \ + int64x2_t __ret_126; \ + int64x2_t __s0_126 = __p0_126; \ + int32x2_t __s1_126 = __p1_126; \ + int32x2_t __s2_126 = __p2_126; \ + __ret_126 = vqdmlsl_s32(__s0_126, __s1_126, splat_lane_s32(__s2_126, __p3_126)); \ + __ret_126; \ +}) +#else +#define vqdmlsl_lane_s32(__p0_127, __p1_127, __p2_127, __p3_127) __extension__ ({ \ + int64x2_t __ret_127; \ + int64x2_t __s0_127 = __p0_127; \ + int32x2_t __s1_127 = __p1_127; \ + int32x2_t __s2_127 = __p2_127; \ + int64x2_t __rev0_127; __rev0_127 = __builtin_shufflevector(__s0_127, __s0_127, 1, 0); \ + int32x2_t __rev1_127; __rev1_127 = __builtin_shufflevector(__s1_127, __s1_127, 1, 0); \ + int32x2_t __rev2_127; __rev2_127 = __builtin_shufflevector(__s2_127, __s2_127, 1, 0); \ + __ret_127 = __noswap_vqdmlsl_s32(__rev0_127, __rev1_127, __noswap_splat_lane_s32(__rev2_127, __p3_127)); \ + __ret_127 = __builtin_shufflevector(__ret_127, __ret_127, 1, 0); \ + __ret_127; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlsl_lane_s16(__p0_128, __p1_128, __p2_128, __p3_128) __extension__ ({ \ + int32x4_t __ret_128; \ + int32x4_t __s0_128 = __p0_128; \ + int16x4_t __s1_128 = __p1_128; \ + int16x4_t __s2_128 = __p2_128; \ + __ret_128 = vqdmlsl_s16(__s0_128, __s1_128, splat_lane_s16(__s2_128, __p3_128)); \ + __ret_128; \ +}) +#else +#define vqdmlsl_lane_s16(__p0_129, __p1_129, __p2_129, __p3_129) __extension__ ({ \ + int32x4_t __ret_129; \ + int32x4_t __s0_129 = __p0_129; \ + int16x4_t __s1_129 = __p1_129; \ + int16x4_t __s2_129 = __p2_129; \ + int32x4_t __rev0_129; __rev0_129 = __builtin_shufflevector(__s0_129, __s0_129, 3, 2, 1, 0); \ + int16x4_t __rev1_129; __rev1_129 = __builtin_shufflevector(__s1_129, __s1_129, 3, 2, 1, 0); \ + int16x4_t __rev2_129; __rev2_129 = __builtin_shufflevector(__s2_129, __s2_129, 3, 2, 1, 0); \ + __ret_129 = __noswap_vqdmlsl_s16(__rev0_129, __rev1_129, __noswap_splat_lane_s16(__rev2_129, __p3_129)); \ + __ret_129 = __builtin_shufflevector(__ret_129, __ret_129, 3, 2, 1, 0); \ + __ret_129; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = vqdmlsl_s32(__p0, __p1, (int32x2_t) {__p2, __p2}); return __ret; } #else -__ai int8x16_t vqsubq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int64x2_t vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __noswap_vqdmlsl_s32(__rev0, __rev1, (int32x2_t) {__p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x2_t __noswap_vqdmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = __noswap_vqdmlsl_s32(__p0, __p1, (int32x2_t) {__p2, __p2}); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqsubq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + __ret = vqdmlsl_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2}); return __ret; } #else -__ai int32x4_t vqsubq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vqdmlsl_s16(__rev0, __rev1, (int16x4_t) {__p2, __p2, __p2, __p2}); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } +__ai __attribute__((target("neon"))) int32x4_t __noswap_vqdmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = __noswap_vqdmlsl_s16(__p0, __p1, (int16x4_t) {__p2, __p2, __p2, __p2}); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqsubq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); +__ai __attribute__((target("neon"))) int32x4_t vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else -__ai int64x2_t vqsubq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (int64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) int32x4_t vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t __noswap_vqdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vqsubq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else -__ai int16x8_t vqsubq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } +__ai __attribute__((target("neon"))) int16x8_t __noswap_vqdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vqsub_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 16); +__ai __attribute__((target("neon"))) int32x2_t vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else -__ai uint8x8_t vqsub_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int32x2_t vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t __noswap_vqdmulh_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vqsub_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 18); +__ai __attribute__((target("neon"))) int16x4_t vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else -__ai uint32x2_t vqsub_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) int16x4_t vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -#endif - -__ai uint64x1_t vqsub_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 19); +__ai __attribute__((target("neon"))) int16x4_t __noswap_vqdmulh_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } +#endif + #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vqsub_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 17); +__ai __attribute__((target("neon"))) int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) { + int32x4_t __ret; + __ret = vqdmulhq_s32(__p0, (int32x4_t) {__p1, __p1, __p1, __p1}); return __ret; } #else -__ai uint16x4_t vqsub_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); +__ai __attribute__((target("neon"))) int32x4_t vqdmulhq_n_s32(int32x4_t __p0, int32_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap_vqdmulhq_s32(__rev0, (int32x4_t) {__p1, __p1, __p1, __p1}); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vqsub_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 0); +__ai __attribute__((target("neon"))) int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) { + int16x8_t __ret; + __ret = vqdmulhq_s16(__p0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}); return __ret; } #else -__ai int8x8_t vqsub_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x8_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); +__ai __attribute__((target("neon"))) int16x8_t vqdmulhq_n_s16(int16x8_t __p0, int16_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vqdmulhq_s16(__rev0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vqsub_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) { int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + __ret = vqdmulh_s32(__p0, (int32x2_t) {__p1, __p1}); return __ret; } #else -__ai int32x2_t vqsub_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vqdmulh_n_s32(int32x2_t __p0, int32_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __noswap_vqdmulh_s32(__rev0, (int32x2_t) {__p1, __p1}); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif -__ai int64x1_t vqsub_s64(int64x1_t __p0, int64x1_t __p1) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 3); - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vqsub_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) { int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + __ret = vqdmulh_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1}); return __ret; } #else -__ai int16x4_t vqsub_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vqdmulh_n_s16(int16x4_t __p0, int16_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __noswap_vqdmulh_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1}); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); +__ai __attribute__((target("neon"))) int64x2_t vqdmull_s32(int32x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35); return __ret; } #else -__ai uint16x4_t vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint16x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int64x2_t vqdmull_s32(int32x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai uint16x4_t __noswap_vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); +__ai __attribute__((target("neon"))) int64x2_t __noswap_vqdmull_s32(int32x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 35); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); +__ai __attribute__((target("neon"))) int32x4_t vqdmull_s16(int16x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34); return __ret; } #else -__ai uint32x2_t vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint32x2_t __ret; - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) int32x4_t vqdmull_s16(int16x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__rev0, (int8x8_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai uint32x2_t __noswap_vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); +__ai __attribute__((target("neon"))) int32x4_t __noswap_vqdmull_s16(int16x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqdmull_v((int8x8_t)__p0, (int8x8_t)__p1, 34); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); +#define vqdmull_lane_s32(__p0_130, __p1_130, __p2_130) __extension__ ({ \ + int64x2_t __ret_130; \ + int32x2_t __s0_130 = __p0_130; \ + int32x2_t __s1_130 = __p1_130; \ + __ret_130 = vqdmull_s32(__s0_130, splat_lane_s32(__s1_130, __p2_130)); \ + __ret_130; \ +}) +#else +#define vqdmull_lane_s32(__p0_131, __p1_131, __p2_131) __extension__ ({ \ + int64x2_t __ret_131; \ + int32x2_t __s0_131 = __p0_131; \ + int32x2_t __s1_131 = __p1_131; \ + int32x2_t __rev0_131; __rev0_131 = __builtin_shufflevector(__s0_131, __s0_131, 1, 0); \ + int32x2_t __rev1_131; __rev1_131 = __builtin_shufflevector(__s1_131, __s1_131, 1, 0); \ + __ret_131 = __noswap_vqdmull_s32(__rev0_131, __noswap_splat_lane_s32(__rev1_131, __p2_131)); \ + __ret_131 = __builtin_shufflevector(__ret_131, __ret_131, 1, 0); \ + __ret_131; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmull_lane_s16(__p0_132, __p1_132, __p2_132) __extension__ ({ \ + int32x4_t __ret_132; \ + int16x4_t __s0_132 = __p0_132; \ + int16x4_t __s1_132 = __p1_132; \ + __ret_132 = vqdmull_s16(__s0_132, splat_lane_s16(__s1_132, __p2_132)); \ + __ret_132; \ +}) +#else +#define vqdmull_lane_s16(__p0_133, __p1_133, __p2_133) __extension__ ({ \ + int32x4_t __ret_133; \ + int16x4_t __s0_133 = __p0_133; \ + int16x4_t __s1_133 = __p1_133; \ + int16x4_t __rev0_133; __rev0_133 = __builtin_shufflevector(__s0_133, __s0_133, 3, 2, 1, 0); \ + int16x4_t __rev1_133; __rev1_133 = __builtin_shufflevector(__s1_133, __s1_133, 3, 2, 1, 0); \ + __ret_133 = __noswap_vqdmull_s16(__rev0_133, __noswap_splat_lane_s16(__rev1_133, __p2_133)); \ + __ret_133 = __builtin_shufflevector(__ret_133, __ret_133, 3, 2, 1, 0); \ + __ret_133; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) { + int64x2_t __ret; + __ret = vqdmull_s32(__p0, (int32x2_t) {__p1, __p1}); return __ret; } #else -__ai uint8x8_t vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint8x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int64x2_t vqdmull_n_s32(int32x2_t __p0, int32_t __p1) { + int64x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __noswap_vqdmull_s32(__rev0, (int32x2_t) {__p1, __p1}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai uint8x8_t __noswap_vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); +__ai __attribute__((target("neon"))) int64x2_t __noswap_vqdmull_n_s32(int32x2_t __p0, int32_t __p1) { + int64x2_t __ret; + __ret = __noswap_vqdmull_s32(__p0, (int32x2_t) {__p1, __p1}); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vraddhn_s32(int32x4_t __p0, int32x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); +__ai __attribute__((target("neon"))) int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) { + int32x4_t __ret; + __ret = vqdmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1}); return __ret; } #else -__ai int16x4_t vraddhn_s32(int32x4_t __p0, int32x4_t __p1) { - int16x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1); +__ai __attribute__((target("neon"))) int32x4_t vqdmull_n_s16(int16x4_t __p0, int16_t __p1) { + int32x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap_vqdmull_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1}); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int16x4_t __noswap_vraddhn_s32(int32x4_t __p0, int32x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); +__ai __attribute__((target("neon"))) int32x4_t __noswap_vqdmull_n_s16(int16x4_t __p0, int16_t __p1) { + int32x4_t __ret; + __ret = __noswap_vqdmull_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1}); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vraddhn_s64(int64x2_t __p0, int64x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); +__ai __attribute__((target("neon"))) uint16x4_t vqmovn_u32(uint32x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 17); return __ret; } #else -__ai int32x2_t vraddhn_s64(int64x2_t __p0, int64x2_t __p1) { - int32x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) uint16x4_t vqmovn_u32(uint32x4_t __p0) { + uint16x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int32x2_t __noswap_vraddhn_s64(int64x2_t __p0, int64x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); +__ai __attribute__((target("neon"))) uint16x4_t __noswap_vqmovn_u32(uint32x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 17); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vraddhn_s16(int16x8_t __p0, int16x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); +__ai __attribute__((target("neon"))) uint32x2_t vqmovn_u64(uint64x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 18); return __ret; } #else -__ai int8x8_t vraddhn_s16(int16x8_t __p0, int16x8_t __p1) { - int8x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint32x2_t vqmovn_u64(uint64x2_t __p0) { + uint32x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai int8x8_t __noswap_vraddhn_s16(int16x8_t __p0, int16x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); +__ai __attribute__((target("neon"))) uint32x2_t __noswap_vqmovn_u64(uint64x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 18); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vrecpeq_u32(uint32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 50); +__ai __attribute__((target("neon"))) uint8x8_t vqmovn_u16(uint16x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 16); return __ret; } #else -__ai uint32x4_t vrecpeq_u32(uint32x4_t __p0) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint8x8_t vqmovn_u16(uint16x8_t __p0) { + uint8x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x8_t __noswap_vqmovn_u16(uint16x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 16); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vrecpeq_f32(float32x4_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 41); +__ai __attribute__((target("neon"))) int16x4_t vqmovn_s32(int32x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 1); return __ret; } #else -__ai float32x4_t vrecpeq_f32(float32x4_t __p0) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 41); +__ai __attribute__((target("neon"))) int16x4_t vqmovn_s32(int32x4_t __p0) { + int16x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vrecpe_u32(uint32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 18); - return __ret; -} -#else -__ai uint32x2_t vrecpe_u32(uint32x2_t __p0) { - uint32x2_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) int16x4_t __noswap_vqmovn_s32(int32x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 1); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vrecpe_f32(float32x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 9); +__ai __attribute__((target("neon"))) int32x2_t vqmovn_s64(int64x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 2); return __ret; } #else -__ai float32x2_t vrecpe_f32(float32x2_t __p0) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 9); +__ai __attribute__((target("neon"))) int32x2_t vqmovn_s64(int64x2_t __p0) { + int32x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vrecpsq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); - return __ret; -} -#else -__ai float32x4_t vrecpsq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int32x2_t __noswap_vqmovn_s64(int64x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 2); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vrecps_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 9); +__ai __attribute__((target("neon"))) int8x8_t vqmovn_s16(int16x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 0); return __ret; } #else -__ai float32x2_t vrecps_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (float32x2_t) __builtin_neon_vrecps_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vrev16_p8(poly8x8_t __p0) { - poly8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); +__ai __attribute__((target("neon"))) int8x8_t vqmovn_s16(int16x8_t __p0) { + int8x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__rev0, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -#else -__ai poly8x8_t vrev16_p8(poly8x8_t __p0) { - poly8x8_t __ret; - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int8x8_t __noswap_vqmovn_s16(int16x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqmovn_v((int8x16_t)__p0, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vrev16q_p8(poly8x16_t __p0) { - poly8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); +__ai __attribute__((target("neon"))) uint16x4_t vqmovun_s32(int32x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 17); return __ret; } #else -__ai poly8x16_t vrev16q_p8(poly8x16_t __p0) { - poly8x16_t __ret; - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vrev16q_u8(uint8x16_t __p0) { - uint8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); +__ai __attribute__((target("neon"))) uint16x4_t vqmovun_s32(int32x4_t __p0) { + uint16x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -#else -__ai uint8x16_t vrev16q_u8(uint8x16_t __p0) { - uint8x16_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint16x4_t __noswap_vqmovun_s32(int32x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 17); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vrev16q_s8(int8x16_t __p0) { - int8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); +__ai __attribute__((target("neon"))) uint32x2_t vqmovun_s64(int64x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 18); return __ret; } #else -__ai int8x16_t vrev16q_s8(int8x16_t __p0) { - int8x16_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint32x2_t vqmovun_s64(int64x2_t __p0) { + uint32x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x2_t __noswap_vqmovun_s64(int64x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 18); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vrev16_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vqmovun_s16(int16x8_t __p0) { uint8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); + __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 16); return __ret; } #else -__ai uint8x8_t vrev16_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vqmovun_s16(int16x8_t __p0) { uint8x8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__rev0, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vrev16_s8(int8x8_t __p0) { - int8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); - return __ret; -} -#else -__ai int8x8_t vrev16_s8(int8x8_t __p0) { - int8x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint8x8_t __noswap_vqmovun_s16(int16x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqmovun_v((int8x16_t)__p0, 16); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vrev32_p8(poly8x8_t __p0) { - poly8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); +__ai __attribute__((target("neon"))) int8x16_t vqnegq_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 32); return __ret; } #else -__ai poly8x8_t vrev32_p8(poly8x8_t __p0) { - poly8x8_t __ret; - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int8x16_t vqnegq_s8(int8x16_t __p0) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vrev32_p16(poly16x4_t __p0) { - poly16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); +__ai __attribute__((target("neon"))) int32x4_t vqnegq_s32(int32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 34); return __ret; } #else -__ai poly16x4_t vrev32_p16(poly16x4_t __p0) { - poly16x4_t __ret; - poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); +__ai __attribute__((target("neon"))) int32x4_t vqnegq_s32(int32x4_t __p0) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vrev32q_p8(poly8x16_t __p0) { - poly8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); +__ai __attribute__((target("neon"))) int16x8_t vqnegq_s16(int16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 33); return __ret; } #else -__ai poly8x16_t vrev32q_p8(poly8x16_t __p0) { - poly8x16_t __ret; - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int16x8_t vqnegq_s16(int16x8_t __p0) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vrev32q_p16(poly16x8_t __p0) { - poly16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); +__ai __attribute__((target("neon"))) int8x8_t vqneg_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 0); return __ret; } #else -__ai poly16x8_t vrev32q_p16(poly16x8_t __p0) { - poly16x8_t __ret; - poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); +__ai __attribute__((target("neon"))) int8x8_t vqneg_s8(int8x8_t __p0) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vrev32q_u8(uint8x16_t __p0) { - uint8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); +__ai __attribute__((target("neon"))) int32x2_t vqneg_s32(int32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 2); return __ret; } #else -__ai uint8x16_t vrev32q_u8(uint8x16_t __p0) { - uint8x16_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int32x2_t vqneg_s32(int32x2_t __p0) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int32x2_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vrev32q_u16(uint16x8_t __p0) { - uint16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); +__ai __attribute__((target("neon"))) int16x4_t vqneg_s16(int16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 1); return __ret; } #else -__ai uint16x8_t vrev32q_u16(uint16x8_t __p0) { - uint16x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int16x4_t vqneg_s16(int16x4_t __p0) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vqneg_v((int8x8_t)__rev0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vrev32q_s8(int8x16_t __p0) { - int8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); +__ai __attribute__((target("neon"))) int32x4_t vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else -__ai int8x16_t vrev32q_s8(int8x16_t __p0) { - int8x16_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int32x4_t vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x4_t __noswap_vqrdmulhq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vrev32q_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); + __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else -__ai int16x8_t vrev32q_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vrev32_u8(uint8x8_t __p0) { - uint8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); - return __ret; -} -#else -__ai uint8x8_t vrev32_u8(uint8x8_t __p0) { - uint8x8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int16x8_t __noswap_vqrdmulhq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqrdmulhq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vrev32_u16(uint16x4_t __p0) { - uint16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); +__ai __attribute__((target("neon"))) int32x2_t vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else -__ai uint16x4_t vrev32_u16(uint16x4_t __p0) { - uint16x4_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vrev32_s8(int8x8_t __p0) { - int8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); +__ai __attribute__((target("neon"))) int32x2_t vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -#else -__ai int8x8_t vrev32_s8(int8x8_t __p0) { - int8x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int32x2_t __noswap_vqrdmulh_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vrev32_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); + __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else -__ai int16x4_t vrev32_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } +__ai __attribute__((target("neon"))) int16x4_t __noswap_vqrdmulh_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqrdmulh_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vrev64_p8(poly8x8_t __p0) { - poly8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) { + int32x4_t __ret; + __ret = vqrdmulhq_s32(__p0, (int32x4_t) {__p1, __p1, __p1, __p1}); return __ret; } #else -__ai poly8x8_t vrev64_p8(poly8x8_t __p0) { - poly8x8_t __ret; - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int32x4_t vqrdmulhq_n_s32(int32x4_t __p0, int32_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap_vqrdmulhq_s32(__rev0, (int32x4_t) {__p1, __p1, __p1, __p1}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vrev64_p16(poly16x4_t __p0) { - poly16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) { + int16x8_t __ret; + __ret = vqrdmulhq_s16(__p0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}); return __ret; } #else -__ai poly16x4_t vrev64_p16(poly16x4_t __p0) { - poly16x4_t __ret; - poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int16x8_t vqrdmulhq_n_s16(int16x8_t __p0, int16_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vqrdmulhq_s16(__rev0, (int16x8_t) {__p1, __p1, __p1, __p1, __p1, __p1, __p1, __p1}); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vrev64q_p8(poly8x16_t __p0) { - poly8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); +__ai __attribute__((target("neon"))) int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) { + int32x2_t __ret; + __ret = vqrdmulh_s32(__p0, (int32x2_t) {__p1, __p1}); return __ret; } #else -__ai poly8x16_t vrev64q_p8(poly8x16_t __p0) { - poly8x16_t __ret; - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int32x2_t vqrdmulh_n_s32(int32x2_t __p0, int32_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __noswap_vqrdmulh_s32(__rev0, (int32x2_t) {__p1, __p1}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vrev64q_p16(poly16x8_t __p0) { - poly16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); +__ai __attribute__((target("neon"))) int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) { + int16x4_t __ret; + __ret = vqrdmulh_s16(__p0, (int16x4_t) {__p1, __p1, __p1, __p1}); return __ret; } #else -__ai poly16x8_t vrev64q_p16(poly16x8_t __p0) { - poly16x8_t __ret; - poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int16x4_t vqrdmulh_n_s16(int16x4_t __p0, int16_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap_vqrdmulh_s16(__rev0, (int16x4_t) {__p1, __p1, __p1, __p1}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vrev64q_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vqrshlq_u8(uint8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); + __ret = (uint8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else -__ai uint8x16_t vrev64q_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vqrshlq_u8(uint8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vrev64q_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vqrshlq_u32(uint32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); + __ret = (uint32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else -__ai uint32x4_t vrev64q_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vqrshlq_u32(uint32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vrev64q_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vqrshlq_u64(uint64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vqrshlq_u64(uint64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vqrshlq_u16(uint16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); + __ret = (uint16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else -__ai uint16x8_t vrev64q_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) uint16x8_t vqrshlq_u16(uint16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vrev64q_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vqrshlq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); + __ret = (int8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } #else -__ai int8x16_t vrev64q_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vqrshlq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vrev64q_f32(float32x4_t __p0) { - float32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); +__ai __attribute__((target("neon"))) int32x4_t vqrshlq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else -__ai float32x4_t vrev64q_f32(float32x4_t __p0) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); +__ai __attribute__((target("neon"))) int32x4_t vqrshlq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vrev64q_s32(int32x4_t __p0) { - int32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); +__ai __attribute__((target("neon"))) int64x2_t vqrshlq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); return __ret; } #else -__ai int32x4_t vrev64q_s32(int32x4_t __p0) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int64x2_t vqrshlq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int64x2_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vrev64q_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vqrshlq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); + __ret = (int16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else -__ai int16x8_t vrev64q_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vqrshlq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vqrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vrev64_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vqrshl_u8(uint8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else -__ai uint8x8_t vrev64_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vqrshl_u8(uint8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vrev64_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vqrshl_u32(uint32x2_t __p0, int32x2_t __p1) { uint32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else -__ai uint32x2_t vrev64_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vqrshl_u32(uint32x2_t __p0, int32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif +__ai __attribute__((target("neon"))) uint64x1_t vqrshl_u64(uint64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vrev64_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vqrshl_u16(uint16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else -__ai uint16x4_t vrev64_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vqrshl_u16(uint16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vrev64_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vqrshl_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else -__ai int8x8_t vrev64_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vqrshl_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vrev64_f32(float32x2_t __p0) { - float32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 1, 0); - return __ret; -} -#else -__ai float32x2_t vrev64_f32(float32x2_t __p0) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vrev64_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vqrshl_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else -__ai int32x2_t vrev64_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vqrshl_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif +__ai __attribute__((target("neon"))) int64x1_t vqrshl_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3); + return __ret; +} #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vrev64_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vqrshl_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else -__ai int16x4_t vrev64_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) int16x4_t vqrshl_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vqrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vrhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); - return __ret; -} -#else -__ai uint8x16_t vrhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vrhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); - return __ret; -} -#else -__ai uint32x4_t vrhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vrhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; -} -#else -__ai uint16x8_t vrhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vrhaddq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); - return __ret; -} +#define vqrshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) #else -__ai int8x16_t vrhaddq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vqrshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqrshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vrhaddq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); - return __ret; -} +#define vqrshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) #else -__ai int32x4_t vrhaddq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vqrshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vqrshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vrhaddq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); - return __ret; -} +#define vqrshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) #else -__ai int16x8_t vrhaddq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vqrshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqrshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vrhadd_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); - return __ret; -} +#define vqrshrn_n_s32(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 1); \ + __ret; \ +}) #else -__ai uint8x8_t vrhadd_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vqrshrn_n_s32(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqrshrn_n_s32(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int16x4_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 1); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vrhadd_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); - return __ret; -} +#define vqrshrn_n_s64(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 2); \ + __ret; \ +}) #else -__ai uint32x2_t vrhadd_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vqrshrn_n_s64(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vqrshrn_n_s64(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int32x2_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 2); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vrhadd_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); - return __ret; -} +#define vqrshrn_n_s16(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 0); \ + __ret; \ +}) #else -__ai uint16x4_t vrhadd_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vqrshrn_n_s16(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__rev0, __p1, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqrshrn_n_s16(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int8x8_t) __builtin_neon_vqrshrn_n_v((int8x16_t)__s0, __p1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vrhadd_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); - return __ret; -} +#define vqrshrun_n_s32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) #else -__ai int8x8_t vrhadd_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vqrshrun_n_s32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqrshrun_n_s32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vrhadd_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); - return __ret; -} +#define vqrshrun_n_s64(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) #else -__ai int32x2_t vrhadd_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (int32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vqrshrun_n_s64(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vqrshrun_n_s64(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vrhadd_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); - return __ret; -} +#define vqrshrun_n_s16(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) #else -__ai int16x4_t vrhadd_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vqrshrun_n_s16(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqrshrun_n_s16(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_vqrshrun_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vrshlq_u8(uint8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vqshlq_u8(uint8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + __ret = (uint8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else -__ai uint8x16_t vrshlq_u8(uint8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vqshlq_u8(uint8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = (uint8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vrshlq_u32(uint32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vqshlq_u32(uint32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + __ret = (uint32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else -__ai uint32x4_t vrshlq_u32(uint32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vqshlq_u32(uint32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = (uint32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vrshlq_u64(uint64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vqshlq_u64(uint64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + __ret = (uint64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); return __ret; } #else -__ai uint64x2_t vrshlq_u64(uint64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vqshlq_u64(uint64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = (uint64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vrshlq_u16(uint16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vqshlq_u16(uint16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + __ret = (uint16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else -__ai uint16x8_t vrshlq_u16(uint16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vqshlq_u16(uint16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = (uint16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vrshlq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vqshlq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + __ret = (int8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } #else -__ai int8x16_t vrshlq_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vqshlq_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = (int8x16_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vrshlq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vqshlq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + __ret = (int32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else -__ai int32x4_t vrshlq_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vqshlq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = (int32x4_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vrshlq_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vqshlq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); + __ret = (int64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); return __ret; } #else -__ai int64x2_t vrshlq_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vqshlq_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (int64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); + __ret = (int64x2_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vrshlq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vqshlq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + __ret = (int16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else -__ai int16x8_t vrshlq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vqshlq_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = (int16x8_t) __builtin_neon_vqshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vrshl_u8(uint8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vqshl_u8(uint8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + __ret = (uint8x8_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else -__ai uint8x8_t vrshl_u8(uint8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vqshl_u8(uint8x8_t __p0, int8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = (uint8x8_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vrshl_u32(uint32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vqshl_u32(uint32x2_t __p0, int32x2_t __p1) { uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + __ret = (uint32x2_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else -__ai uint32x2_t vrshl_u32(uint32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vqshl_u32(uint32x2_t __p0, int32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = (uint32x2_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif -__ai uint64x1_t vrshl_u64(uint64x1_t __p0, int64x1_t __p1) { +__ai __attribute__((target("neon"))) uint64x1_t vqshl_u64(uint64x1_t __p0, int64x1_t __p1) { uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + __ret = (uint64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vrshl_u16(uint16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vqshl_u16(uint16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + __ret = (uint16x4_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else -__ai uint16x4_t vrshl_u16(uint16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vqshl_u16(uint16x4_t __p0, int16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = (uint16x4_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vrshl_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vqshl_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + __ret = (int8x8_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else -__ai int8x8_t vrshl_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vqshl_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x8_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = (int8x8_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vrshl_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vqshl_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + __ret = (int32x2_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else -__ai int32x2_t vrshl_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vqshl_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (int32x2_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = (int32x2_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif -__ai int64x1_t vrshl_s64(int64x1_t __p0, int64x1_t __p1) { +__ai __attribute__((target("neon"))) int64x1_t vqshl_s64(int64x1_t __p0, int64x1_t __p1) { int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3); + __ret = (int64x1_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vrshl_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vqshl_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + __ret = (int16x4_t) __builtin_neon_vqshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else -__ai int16x4_t vrshl_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vqshl_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = (int16x4_t) __builtin_neon_vqshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -#define vrshrq_n_u8(__p0, __p1) __extension__ ({ \ +#define vqshlq_n_u8(__p0, __p1) __extension__ ({ \ uint8x16_t __ret; \ uint8x16_t __s0 = __p0; \ - __ret = (uint8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 48); \ + __ret = (uint8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 48); \ __ret; \ }) #else -#define vrshrq_n_u8(__p0, __p1) __extension__ ({ \ +#define vqshlq_n_u8(__p0, __p1) __extension__ ({ \ uint8x16_t __ret; \ uint8x16_t __s0 = __p0; \ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 48); \ + __ret = (uint8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 48); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vrshrq_n_u32(__p0, __p1) __extension__ ({ \ +#define vqshlq_n_u32(__p0, __p1) __extension__ ({ \ uint32x4_t __ret; \ uint32x4_t __s0 = __p0; \ - __ret = (uint32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 50); \ + __ret = (uint32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 50); \ __ret; \ }) #else -#define vrshrq_n_u32(__p0, __p1) __extension__ ({ \ +#define vqshlq_n_u32(__p0, __p1) __extension__ ({ \ uint32x4_t __ret; \ uint32x4_t __s0 = __p0; \ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (uint32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 50); \ + __ret = (uint32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 50); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vrshrq_n_u64(__p0, __p1) __extension__ ({ \ +#define vqshlq_n_u64(__p0, __p1) __extension__ ({ \ uint64x2_t __ret; \ uint64x2_t __s0 = __p0; \ - __ret = (uint64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 51); \ + __ret = (uint64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 51); \ __ret; \ }) #else -#define vrshrq_n_u64(__p0, __p1) __extension__ ({ \ +#define vqshlq_n_u64(__p0, __p1) __extension__ ({ \ uint64x2_t __ret; \ uint64x2_t __s0 = __p0; \ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (uint64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 51); \ + __ret = (uint64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 51); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vrshrq_n_u16(__p0, __p1) __extension__ ({ \ +#define vqshlq_n_u16(__p0, __p1) __extension__ ({ \ uint16x8_t __ret; \ uint16x8_t __s0 = __p0; \ - __ret = (uint16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 49); \ + __ret = (uint16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 49); \ __ret; \ }) #else -#define vrshrq_n_u16(__p0, __p1) __extension__ ({ \ +#define vqshlq_n_u16(__p0, __p1) __extension__ ({ \ uint16x8_t __ret; \ uint16x8_t __s0 = __p0; \ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 49); \ + __ret = (uint16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 49); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vrshrq_n_s8(__p0, __p1) __extension__ ({ \ +#define vqshlq_n_s8(__p0, __p1) __extension__ ({ \ int8x16_t __ret; \ int8x16_t __s0 = __p0; \ - __ret = (int8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 32); \ + __ret = (int8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 32); \ __ret; \ }) #else -#define vrshrq_n_s8(__p0, __p1) __extension__ ({ \ +#define vqshlq_n_s8(__p0, __p1) __extension__ ({ \ int8x16_t __ret; \ int8x16_t __s0 = __p0; \ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 32); \ + __ret = (int8x16_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 32); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vrshrq_n_s32(__p0, __p1) __extension__ ({ \ +#define vqshlq_n_s32(__p0, __p1) __extension__ ({ \ int32x4_t __ret; \ int32x4_t __s0 = __p0; \ - __ret = (int32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 34); \ + __ret = (int32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 34); \ __ret; \ }) #else -#define vrshrq_n_s32(__p0, __p1) __extension__ ({ \ +#define vqshlq_n_s32(__p0, __p1) __extension__ ({ \ int32x4_t __ret; \ int32x4_t __s0 = __p0; \ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (int32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 34); \ + __ret = (int32x4_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 34); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vrshrq_n_s64(__p0, __p1) __extension__ ({ \ +#define vqshlq_n_s64(__p0, __p1) __extension__ ({ \ int64x2_t __ret; \ int64x2_t __s0 = __p0; \ - __ret = (int64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 35); \ + __ret = (int64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 35); \ __ret; \ }) #else -#define vrshrq_n_s64(__p0, __p1) __extension__ ({ \ +#define vqshlq_n_s64(__p0, __p1) __extension__ ({ \ int64x2_t __ret; \ int64x2_t __s0 = __p0; \ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (int64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 35); \ + __ret = (int64x2_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 35); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vrshrq_n_s16(__p0, __p1) __extension__ ({ \ +#define vqshlq_n_s16(__p0, __p1) __extension__ ({ \ int16x8_t __ret; \ int16x8_t __s0 = __p0; \ - __ret = (int16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 33); \ + __ret = (int16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__s0, __p1, 33); \ __ret; \ }) #else -#define vrshrq_n_s16(__p0, __p1) __extension__ ({ \ +#define vqshlq_n_s16(__p0, __p1) __extension__ ({ \ int16x8_t __ret; \ int16x8_t __s0 = __p0; \ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 33); \ + __ret = (int16x8_t) __builtin_neon_vqshlq_n_v((int8x16_t)__rev0, __p1, 33); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vrshr_n_u8(__p0, __p1) __extension__ ({ \ +#define vqshl_n_u8(__p0, __p1) __extension__ ({ \ uint8x8_t __ret; \ uint8x8_t __s0 = __p0; \ - __ret = (uint8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 16); \ + __ret = (uint8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 16); \ __ret; \ }) #else -#define vrshr_n_u8(__p0, __p1) __extension__ ({ \ +#define vqshl_n_u8(__p0, __p1) __extension__ ({ \ uint8x8_t __ret; \ uint8x8_t __s0 = __p0; \ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 16); \ + __ret = (uint8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 16); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vrshr_n_u32(__p0, __p1) __extension__ ({ \ +#define vqshl_n_u32(__p0, __p1) __extension__ ({ \ uint32x2_t __ret; \ uint32x2_t __s0 = __p0; \ - __ret = (uint32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 18); \ + __ret = (uint32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 18); \ __ret; \ }) #else -#define vrshr_n_u32(__p0, __p1) __extension__ ({ \ +#define vqshl_n_u32(__p0, __p1) __extension__ ({ \ uint32x2_t __ret; \ uint32x2_t __s0 = __p0; \ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (uint32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 18); \ + __ret = (uint32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 18); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif -#define vrshr_n_u64(__p0, __p1) __extension__ ({ \ +#define vqshl_n_u64(__p0, __p1) __extension__ ({ \ uint64x1_t __ret; \ uint64x1_t __s0 = __p0; \ - __ret = (uint64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 19); \ + __ret = (uint64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 19); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ -#define vrshr_n_u16(__p0, __p1) __extension__ ({ \ +#define vqshl_n_u16(__p0, __p1) __extension__ ({ \ uint16x4_t __ret; \ uint16x4_t __s0 = __p0; \ - __ret = (uint16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 17); \ + __ret = (uint16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 17); \ __ret; \ }) #else -#define vrshr_n_u16(__p0, __p1) __extension__ ({ \ +#define vqshl_n_u16(__p0, __p1) __extension__ ({ \ uint16x4_t __ret; \ uint16x4_t __s0 = __p0; \ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (uint16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 17); \ + __ret = (uint16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 17); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vrshr_n_s8(__p0, __p1) __extension__ ({ \ +#define vqshl_n_s8(__p0, __p1) __extension__ ({ \ int8x8_t __ret; \ int8x8_t __s0 = __p0; \ - __ret = (int8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 0); \ + __ret = (int8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 0); \ __ret; \ }) #else -#define vrshr_n_s8(__p0, __p1) __extension__ ({ \ +#define vqshl_n_s8(__p0, __p1) __extension__ ({ \ int8x8_t __ret; \ int8x8_t __s0 = __p0; \ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 0); \ + __ret = (int8x8_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 0); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vrshr_n_s32(__p0, __p1) __extension__ ({ \ +#define vqshl_n_s32(__p0, __p1) __extension__ ({ \ int32x2_t __ret; \ int32x2_t __s0 = __p0; \ - __ret = (int32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 2); \ + __ret = (int32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 2); \ __ret; \ }) #else -#define vrshr_n_s32(__p0, __p1) __extension__ ({ \ +#define vqshl_n_s32(__p0, __p1) __extension__ ({ \ int32x2_t __ret; \ int32x2_t __s0 = __p0; \ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (int32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 2); \ + __ret = (int32x2_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 2); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif -#define vrshr_n_s64(__p0, __p1) __extension__ ({ \ +#define vqshl_n_s64(__p0, __p1) __extension__ ({ \ int64x1_t __ret; \ int64x1_t __s0 = __p0; \ - __ret = (int64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 3); \ + __ret = (int64x1_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 3); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ -#define vrshr_n_s16(__p0, __p1) __extension__ ({ \ +#define vqshl_n_s16(__p0, __p1) __extension__ ({ \ int16x4_t __ret; \ int16x4_t __s0 = __p0; \ - __ret = (int16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 1); \ + __ret = (int16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__s0, __p1, 1); \ __ret; \ }) #else -#define vrshr_n_s16(__p0, __p1) __extension__ ({ \ +#define vqshl_n_s16(__p0, __p1) __extension__ ({ \ int16x4_t __ret; \ int16x4_t __s0 = __p0; \ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (int16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 1); \ + __ret = (int16x4_t) __builtin_neon_vqshl_n_v((int8x8_t)__rev0, __p1, 1); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vrshrn_n_u32(__p0, __p1) __extension__ ({ \ - uint16x4_t __ret; \ - uint32x4_t __s0 = __p0; \ - __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 17); \ +#define vqshluq_n_s8(__p0, __p1) __extension__ ({ \ + uint8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + __ret = (uint8x16_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 48); \ __ret; \ }) #else -#define vrshrn_n_u32(__p0, __p1) __extension__ ({ \ - uint16x4_t __ret; \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 17); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vrshrn_n_u32(__p0, __p1) __extension__ ({ \ +#define vqshluq_n_s8(__p0, __p1) __extension__ ({ \ + uint8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x16_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshluq_n_s32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (uint32x4_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 50); \ + __ret; \ +}) +#else +#define vqshluq_n_s32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshluq_n_s64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (uint64x2_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 51); \ + __ret; \ +}) +#else +#define vqshluq_n_s64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint64x2_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshluq_n_s16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (uint16x8_t) __builtin_neon_vqshluq_n_v((int8x16_t)__s0, __p1, 49); \ + __ret; \ +}) +#else +#define vqshluq_n_s16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16x8_t) __builtin_neon_vqshluq_n_v((int8x16_t)__rev0, __p1, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlu_n_s8(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define vqshlu_n_s8(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshlu_n_s32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vqshlu_n_s32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#define vqshlu_n_s64(__p0, __p1) __extension__ ({ \ + uint64x1_t __ret; \ + int64x1_t __s0 = __p0; \ + __ret = (uint64x1_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vqshlu_n_s16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vqshlu_n_v((int8x8_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vqshlu_n_s16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vqshlu_n_v((int8x8_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_n_u32(__p0, __p1) __extension__ ({ \ uint16x4_t __ret; \ uint32x4_t __s0 = __p0; \ - __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 17); \ + __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vqshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 17); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vrshrn_n_u64(__p0, __p1) __extension__ ({ \ +#define vqshrn_n_u64(__p0, __p1) __extension__ ({ \ uint32x2_t __ret; \ uint64x2_t __s0 = __p0; \ - __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 18); \ + __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 18); \ __ret; \ }) #else -#define vrshrn_n_u64(__p0, __p1) __extension__ ({ \ +#define vqshrn_n_u64(__p0, __p1) __extension__ ({ \ uint32x2_t __ret; \ uint64x2_t __s0 = __p0; \ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 18); \ + __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 18); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) -#define __noswap_vrshrn_n_u64(__p0, __p1) __extension__ ({ \ +#define __noswap_vqshrn_n_u64(__p0, __p1) __extension__ ({ \ uint32x2_t __ret; \ uint64x2_t __s0 = __p0; \ - __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 18); \ + __ret = (uint32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 18); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vrshrn_n_u16(__p0, __p1) __extension__ ({ \ +#define vqshrn_n_u16(__p0, __p1) __extension__ ({ \ uint8x8_t __ret; \ uint16x8_t __s0 = __p0; \ - __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 16); \ + __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 16); \ __ret; \ }) #else -#define vrshrn_n_u16(__p0, __p1) __extension__ ({ \ +#define vqshrn_n_u16(__p0, __p1) __extension__ ({ \ uint8x8_t __ret; \ uint16x8_t __s0 = __p0; \ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 16); \ + __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 16); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) -#define __noswap_vrshrn_n_u16(__p0, __p1) __extension__ ({ \ +#define __noswap_vqshrn_n_u16(__p0, __p1) __extension__ ({ \ uint8x8_t __ret; \ uint16x8_t __s0 = __p0; \ - __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 16); \ + __ret = (uint8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 16); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vrshrn_n_s32(__p0, __p1) __extension__ ({ \ +#define vqshrn_n_s32(__p0, __p1) __extension__ ({ \ int16x4_t __ret; \ int32x4_t __s0 = __p0; \ - __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 1); \ + __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 1); \ __ret; \ }) #else -#define vrshrn_n_s32(__p0, __p1) __extension__ ({ \ +#define vqshrn_n_s32(__p0, __p1) __extension__ ({ \ int16x4_t __ret; \ int32x4_t __s0 = __p0; \ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 1); \ + __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 1); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) -#define __noswap_vrshrn_n_s32(__p0, __p1) __extension__ ({ \ +#define __noswap_vqshrn_n_s32(__p0, __p1) __extension__ ({ \ int16x4_t __ret; \ int32x4_t __s0 = __p0; \ - __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 1); \ + __ret = (int16x4_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vrshrn_n_s64(__p0, __p1) __extension__ ({ \ +#define vqshrn_n_s64(__p0, __p1) __extension__ ({ \ int32x2_t __ret; \ int64x2_t __s0 = __p0; \ - __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 2); \ + __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 2); \ __ret; \ }) #else -#define vrshrn_n_s64(__p0, __p1) __extension__ ({ \ +#define vqshrn_n_s64(__p0, __p1) __extension__ ({ \ int32x2_t __ret; \ int64x2_t __s0 = __p0; \ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 2); \ + __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 2); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) -#define __noswap_vrshrn_n_s64(__p0, __p1) __extension__ ({ \ +#define __noswap_vqshrn_n_s64(__p0, __p1) __extension__ ({ \ int32x2_t __ret; \ int64x2_t __s0 = __p0; \ - __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 2); \ + __ret = (int32x2_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 2); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vrshrn_n_s16(__p0, __p1) __extension__ ({ \ +#define vqshrn_n_s16(__p0, __p1) __extension__ ({ \ int8x8_t __ret; \ int16x8_t __s0 = __p0; \ - __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 0); \ + __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 0); \ __ret; \ }) #else -#define vrshrn_n_s16(__p0, __p1) __extension__ ({ \ +#define vqshrn_n_s16(__p0, __p1) __extension__ ({ \ int8x8_t __ret; \ int16x8_t __s0 = __p0; \ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 0); \ + __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__rev0, __p1, 0); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) -#define __noswap_vrshrn_n_s16(__p0, __p1) __extension__ ({ \ +#define __noswap_vqshrn_n_s16(__p0, __p1) __extension__ ({ \ int8x8_t __ret; \ int16x8_t __s0 = __p0; \ - __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 0); \ + __ret = (int8x8_t) __builtin_neon_vqshrn_n_v((int8x16_t)__s0, __p1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrun_n_s32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#else +#define vqshrun_n_s32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqshrun_n_s32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrun_n_s64(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#else +#define vqshrun_n_s64(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vqshrun_n_s64(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrun_n_s16(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define vqshrun_n_s16(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vqshrun_n_s16(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_vqshrun_n_v((int8x16_t)__s0, __p1, 16); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vrsqrteq_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vqsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vqsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vqsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 50); + __ret = (uint32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else -__ai uint32x4_t vrsqrteq_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vqsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 50); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vrsqrteq_f32(float32x4_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 41); +__ai __attribute__((target("neon"))) uint64x2_t vqsubq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); return __ret; } #else -__ai float32x4_t vrsqrteq_f32(float32x4_t __p0) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint64x2_t vqsubq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vrsqrte_u32(uint32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 18); +__ai __attribute__((target("neon"))) uint16x8_t vqsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else -__ai uint32x2_t vrsqrte_u32(uint32x2_t __p0) { - uint32x2_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) uint16x8_t vqsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vrsqrte_f32(float32x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 9); +__ai __attribute__((target("neon"))) int8x16_t vqsubq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } #else -__ai float32x2_t vrsqrte_f32(float32x2_t __p0) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) int8x16_t vqsubq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vrsqrtsq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); +__ai __attribute__((target("neon"))) int32x4_t vqsubq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else -__ai float32x4_t vrsqrtsq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); +__ai __attribute__((target("neon"))) int32x4_t vqsubq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vrsqrts_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 9); +__ai __attribute__((target("neon"))) int64x2_t vqsubq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); return __ret; } #else -__ai float32x2_t vrsqrts_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (float32x2_t) __builtin_neon_vrsqrts_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); +__ai __attribute__((target("neon"))) int64x2_t vqsubq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int64x2_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -#define vrsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16_t __ret; \ - uint8x16_t __s0 = __p0; \ - uint8x16_t __s1 = __p1; \ - __ret = (uint8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x8_t vqsubq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} #else -#define vrsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16_t __ret; \ - uint8x16_t __s0 = __p0; \ - uint8x16_t __s1 = __p1; \ - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x8_t vqsubq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vqsubq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vrsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4_t __ret; \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - __ret = (uint32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vqsub_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} #else -#define vrsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4_t __ret; \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (uint32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vqsub_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vrsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2_t __ret; \ - uint64x2_t __s0 = __p0; \ - uint64x2_t __s1 = __p1; \ - __ret = (uint64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vqsub_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} #else -#define vrsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2_t __ret; \ - uint64x2_t __s0 = __p0; \ - uint64x2_t __s1 = __p1; \ - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __ret = (uint64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vqsub_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif +__ai __attribute__((target("neon"))) uint64x1_t vqsub_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} #ifdef __LITTLE_ENDIAN__ -#define vrsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8_t __ret; \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __s1 = __p1; \ - __ret = (uint16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x4_t vqsub_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} #else -#define vrsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8_t __ret; \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __s1 = __p1; \ - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x4_t vqsub_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vrsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16_t __ret; \ - int8x16_t __s0 = __p0; \ - int8x16_t __s1 = __p1; \ - __ret = (int8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x8_t vqsub_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} #else -#define vrsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16_t __ret; \ - int8x16_t __s0 = __p0; \ - int8x16_t __s1 = __p1; \ - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x8_t vqsub_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vrsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __ret; \ - int32x4_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - __ret = (int32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ - __ret; \ -}) -#else -#define vrsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __ret; \ - int32x4_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (int32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2_t __ret; \ - int64x2_t __s0 = __p0; \ - int64x2_t __s1 = __p1; \ - __ret = (int64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \ - __ret; \ -}) -#else -#define vrsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2_t __ret; \ - int64x2_t __s0 = __p0; \ - int64x2_t __s1 = __p1; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __ret = (int64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __ret; \ - int16x8_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - __ret = (int16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ - __ret; \ -}) -#else -#define vrsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __ret; \ - int16x8_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrsra_n_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x8_t __ret; \ - uint8x8_t __s0 = __p0; \ - uint8x8_t __s1 = __p1; \ - __ret = (uint8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \ - __ret; \ -}) -#else -#define vrsra_n_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x8_t __ret; \ - uint8x8_t __s0 = __p0; \ - uint8x8_t __s1 = __p1; \ - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrsra_n_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2_t __ret; \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __s1 = __p1; \ - __ret = (uint32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \ - __ret; \ -}) -#else -#define vrsra_n_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2_t __ret; \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __s1 = __p1; \ - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __ret = (uint32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#define vrsra_n_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x1_t __ret; \ - uint64x1_t __s0 = __p0; \ - uint64x1_t __s1 = __p1; \ - __ret = (uint64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ - __ret; \ -}) -#ifdef __LITTLE_ENDIAN__ -#define vrsra_n_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4_t __ret; \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __s1 = __p1; \ - __ret = (uint16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \ - __ret; \ -}) -#else -#define vrsra_n_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4_t __ret; \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __s1 = __p1; \ - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (uint16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrsra_n_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x8_t __ret; \ - int8x8_t __s0 = __p0; \ - int8x8_t __s1 = __p1; \ - __ret = (int8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \ - __ret; \ -}) -#else -#define vrsra_n_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x8_t __ret; \ - int8x8_t __s0 = __p0; \ - int8x8_t __s1 = __p1; \ - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrsra_n_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __ret; \ - int32x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - __ret = (int32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x2_t vqsub_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} #else -#define vrsra_n_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __ret; \ - int32x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __ret = (int32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x2_t vqsub_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif -#define vrsra_n_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x1_t __ret; \ - int64x1_t __s0 = __p0; \ - int64x1_t __s1 = __p1; \ - __ret = (int64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int64x1_t vqsub_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 3); + return __ret; +} #ifdef __LITTLE_ENDIAN__ -#define vrsra_n_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __ret; \ - int16x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - __ret = (int16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x4_t vqsub_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} #else -#define vrsra_n_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __ret; \ - int16x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (int16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x4_t vqsub_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vqsub_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); + __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); return __ret; } #else -__ai uint16x4_t vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { uint16x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17); + __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai uint16x4_t __noswap_vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t __noswap_vraddhn_u32(uint32x4_t __p0, uint32x4_t __p1) { uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); + __ret = (uint16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); + __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); return __ret; } #else -__ai uint32x2_t vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { uint32x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18); + __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai uint32x2_t __noswap_vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t __noswap_vraddhn_u64(uint64x2_t __p0, uint64x2_t __p1) { uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); + __ret = (uint32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); + __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); return __ret; } #else -__ai uint8x8_t vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { uint8x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16); + __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai uint8x8_t __noswap_vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t __noswap_vraddhn_u16(uint16x8_t __p0, uint16x8_t __p1) { uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); + __ret = (uint8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vraddhn_s32(int32x4_t __p0, int32x4_t __p1) { int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); + __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); return __ret; } #else -__ai int16x4_t vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vraddhn_s32(int32x4_t __p0, int32x4_t __p1) { int16x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1); + __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int16x4_t __noswap_vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t __noswap_vraddhn_s32(int32x4_t __p0, int32x4_t __p1) { int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); + __ret = (int16x4_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vraddhn_s64(int64x2_t __p0, int64x2_t __p1) { int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); + __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); return __ret; } #else -__ai int32x2_t vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vraddhn_s64(int64x2_t __p0, int64x2_t __p1) { int32x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2); + __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai int32x2_t __noswap_vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t __noswap_vraddhn_s64(int64x2_t __p0, int64x2_t __p1) { int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); + __ret = (int32x2_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vraddhn_s16(int16x8_t __p0, int16x8_t __p1) { int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); + __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); return __ret; } #else -__ai int8x8_t vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vraddhn_s16(int16x8_t __p0, int16x8_t __p1) { int8x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0); + __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai int8x8_t __noswap_vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t __noswap_vraddhn_s16(int16x8_t __p0, int16x8_t __p1) { int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); + __ret = (int8x8_t) __builtin_neon_vraddhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -#define vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x8_t __ret; \ - poly8_t __s0 = __p0; \ - poly8x8_t __s1 = __p1; \ - __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (poly8x8_t)__s1, __p2); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vrecpeq_u32(uint32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 50); + return __ret; +} #else -#define vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x8_t __ret; \ - poly8_t __s0 = __p0; \ - poly8x8_t __s1 = __p1; \ - poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (poly8x8_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x8_t __ret; \ - poly8_t __s0 = __p0; \ - poly8x8_t __s1 = __p1; \ - __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (poly8x8_t)__s1, __p2); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vrecpeq_u32(uint32x4_t __p0) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x4_t __ret; \ - poly16_t __s0 = __p0; \ - poly16x4_t __s1 = __p1; \ - __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (poly16x4_t)__s1, __p2); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float32x4_t vrecpeq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 41); + return __ret; +} #else -#define vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x4_t __ret; \ - poly16_t __s0 = __p0; \ - poly16x4_t __s1 = __p1; \ - poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (poly16x4_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x4_t __ret; \ - poly16_t __s0 = __p0; \ - poly16x4_t __s1 = __p1; \ - __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (poly16x4_t)__s1, __p2); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float32x4_t vrecpeq_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x16_t __ret; \ - poly8_t __s0 = __p0; \ - poly8x16_t __s1 = __p1; \ - __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (poly8x16_t)__s1, __p2); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vrecpe_u32(uint32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 18); + return __ret; +} #else -#define vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x16_t __ret; \ - poly8_t __s0 = __p0; \ - poly8x16_t __s1 = __p1; \ - poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (poly8x16_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x16_t __ret; \ - poly8_t __s0 = __p0; \ - poly8x16_t __s1 = __p1; \ - __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (poly8x16_t)__s1, __p2); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vrecpe_u32(uint32x2_t __p0) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x8_t __ret; \ - poly16_t __s0 = __p0; \ - poly16x8_t __s1 = __p1; \ - __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (poly16x8_t)__s1, __p2); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float32x2_t vrecpe_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 9); + return __ret; +} #else -#define vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x8_t __ret; \ - poly16_t __s0 = __p0; \ - poly16x8_t __s1 = __p1; \ - poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (poly16x8_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x8_t __ret; \ - poly16_t __s0 = __p0; \ - poly16x8_t __s1 = __p1; \ - __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (poly16x8_t)__s1, __p2); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float32x2_t vrecpe_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vrecpe_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16_t __ret; \ - uint8_t __s0 = __p0; \ - uint8x16_t __s1 = __p1; \ - __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float32x4_t vrecpsq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} #else -#define vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16_t __ret; \ - uint8_t __s0 = __p0; \ - uint8x16_t __s1 = __p1; \ - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16_t __ret; \ - uint8_t __s0 = __p0; \ - uint8x16_t __s1 = __p1; \ - __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float32x4_t vrecpsq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4_t __ret; \ - uint32_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float32x2_t vrecps_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} #else -#define vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4_t __ret; \ - uint32_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4_t __ret; \ - uint32_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float32x2_t vrecps_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vrecps_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2_t __ret; \ - uint64_t __s0 = __p0; \ - uint64x2_t __s1 = __p1; \ - __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) poly8x8_t vrev16_p8(poly8x8_t __p0) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); + return __ret; +} #else -#define vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2_t __ret; \ - uint64_t __s0 = __p0; \ - uint64x2_t __s1 = __p1; \ - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2_t __ret; \ - uint64_t __s0 = __p0; \ - uint64x2_t __s1 = __p1; \ - __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) poly8x8_t vrev16_p8(poly8x8_t __p0) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8_t __ret; \ - uint16_t __s0 = __p0; \ - uint16x8_t __s1 = __p1; \ - __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) poly8x16_t vrev16q_p8(poly8x16_t __p0) { + poly8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); + return __ret; +} #else -#define vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8_t __ret; \ - uint16_t __s0 = __p0; \ - uint16x8_t __s1 = __p1; \ - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8_t __ret; \ - uint16_t __s0 = __p0; \ - uint16x8_t __s1 = __p1; \ - __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16_t __ret; \ - int8_t __s0 = __p0; \ - int8x16_t __s1 = __p1; \ - __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \ - __ret; \ -}) -#else -#define vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16_t __ret; \ - int8_t __s0 = __p0; \ - int8x16_t __s1 = __p1; \ - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16_t __ret; \ - int8_t __s0 = __p0; \ - int8x16_t __s1 = __p1; \ - __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x4_t __ret; \ - float32_t __s0 = __p0; \ - float32x4_t __s1 = __p1; \ - __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (float32x4_t)__s1, __p2); \ - __ret; \ -}) -#else -#define vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x4_t __ret; \ - float32_t __s0 = __p0; \ - float32x4_t __s1 = __p1; \ - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (float32x4_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x4_t __ret; \ - float32_t __s0 = __p0; \ - float32x4_t __s1 = __p1; \ - __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (float32x4_t)__s1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __ret; \ - int32_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \ - __ret; \ -}) -#else -#define vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __ret; \ - int32_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __ret; \ - int32_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2_t __ret; \ - int64_t __s0 = __p0; \ - int64x2_t __s1 = __p1; \ - __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \ - __ret; \ -}) -#else -#define vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2_t __ret; \ - int64_t __s0 = __p0; \ - int64x2_t __s1 = __p1; \ - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2_t __ret; \ - int64_t __s0 = __p0; \ - int64x2_t __s1 = __p1; \ - __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __ret; \ - int16_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \ - __ret; \ -}) -#else -#define vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __ret; \ - int16_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __ret; \ - int16_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x8_t __ret; \ - uint8_t __s0 = __p0; \ - uint8x8_t __s1 = __p1; \ - __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \ - __ret; \ -}) -#else -#define vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x8_t __ret; \ - uint8_t __s0 = __p0; \ - uint8x8_t __s1 = __p1; \ - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x8_t __ret; \ - uint8_t __s0 = __p0; \ - uint8x8_t __s1 = __p1; \ - __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2_t __ret; \ - uint32_t __s0 = __p0; \ - uint32x2_t __s1 = __p1; \ - __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \ - __ret; \ -}) -#else -#define vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2_t __ret; \ - uint32_t __s0 = __p0; \ - uint32x2_t __s1 = __p1; \ - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2_t __ret; \ - uint32_t __s0 = __p0; \ - uint32x2_t __s1 = __p1; \ - __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \ - __ret; \ -}) -#endif - -#define vset_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x1_t __ret; \ - uint64_t __s0 = __p0; \ - uint64x1_t __s1 = __p1; \ - __ret = (uint64x1_t) __builtin_neon_vset_lane_i64(__s0, (int64x1_t)__s1, __p2); \ - __ret; \ -}) -#ifdef __LITTLE_ENDIAN__ -#define vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4_t __ret; \ - uint16_t __s0 = __p0; \ - uint16x4_t __s1 = __p1; \ - __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \ - __ret; \ -}) -#else -#define vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4_t __ret; \ - uint16_t __s0 = __p0; \ - uint16x4_t __s1 = __p1; \ - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4_t __ret; \ - uint16_t __s0 = __p0; \ - uint16x4_t __s1 = __p1; \ - __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) poly8x16_t vrev16q_p8(poly8x16_t __p0) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x8_t __ret; \ - int8_t __s0 = __p0; \ - int8x8_t __s1 = __p1; \ - __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x16_t vrev16q_u8(uint8x16_t __p0) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); + return __ret; +} #else -#define vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x8_t __ret; \ - int8_t __s0 = __p0; \ - int8x8_t __s1 = __p1; \ - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x8_t __ret; \ - int8_t __s0 = __p0; \ - int8x8_t __s1 = __p1; \ - __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x16_t vrev16q_u8(uint8x16_t __p0) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x2_t __ret; \ - float32_t __s0 = __p0; \ - float32x2_t __s1 = __p1; \ - __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (float32x2_t)__s1, __p2); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x16_t vrev16q_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); + return __ret; +} #else -#define vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x2_t __ret; \ - float32_t __s0 = __p0; \ - float32x2_t __s1 = __p1; \ - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (float32x2_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x2_t __ret; \ - float32_t __s0 = __p0; \ - float32x2_t __s1 = __p1; \ - __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (float32x2_t)__s1, __p2); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x16_t vrev16q_s8(int8x16_t __p0) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __ret; \ - int32_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vrev16_u8(uint8x8_t __p0) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); + return __ret; +} #else -#define vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __ret; \ - int32_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __ret; \ - int32_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vrev16_u8(uint8x8_t __p0) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif -#define vset_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x1_t __ret; \ - int64_t __s0 = __p0; \ - int64x1_t __s1 = __p1; \ - __ret = (int64x1_t) __builtin_neon_vset_lane_i64(__s0, (int64x1_t)__s1, __p2); \ - __ret; \ -}) #ifdef __LITTLE_ENDIAN__ -#define vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __ret; \ - int16_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x8_t vrev16_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); + return __ret; +} #else -#define vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __ret; \ - int16_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __ret; \ - int16_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x8_t vrev16_s8(int8x8_t __p0) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vshlq_u8(uint8x16_t __p0, int8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); +__ai __attribute__((target("neon"))) poly8x8_t vrev32_p8(poly8x8_t __p0) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); return __ret; } #else -__ai uint8x16_t vshlq_u8(uint8x16_t __p0, int8x16_t __p1) { - uint8x16_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) poly8x8_t vrev32_p8(poly8x8_t __p0) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vshlq_u32(uint32x4_t __p0, int32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); +__ai __attribute__((target("neon"))) poly16x4_t vrev32_p16(poly16x4_t __p0) { + poly16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); return __ret; } #else -__ai uint32x4_t vshlq_u32(uint32x4_t __p0, int32x4_t __p1) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); +__ai __attribute__((target("neon"))) poly16x4_t vrev32_p16(poly16x4_t __p0) { + poly16x4_t __ret; + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vshlq_u64(uint64x2_t __p0, int64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); +__ai __attribute__((target("neon"))) poly8x16_t vrev32q_p8(poly8x16_t __p0) { + poly8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); return __ret; } #else -__ai uint64x2_t vshlq_u64(uint64x2_t __p0, int64x2_t __p1) { - uint64x2_t __ret; - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint64x2_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) poly8x16_t vrev32q_p8(poly8x16_t __p0) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vshlq_u16(uint16x8_t __p0, int16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); +__ai __attribute__((target("neon"))) poly16x8_t vrev32q_p16(poly16x8_t __p0) { + poly16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); return __ret; } #else -__ai uint16x8_t vshlq_u16(uint16x8_t __p0, int16x8_t __p1) { - uint16x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); +__ai __attribute__((target("neon"))) poly16x8_t vrev32q_p16(poly16x8_t __p0) { + poly16x8_t __ret; + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vshlq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); +__ai __attribute__((target("neon"))) uint8x16_t vrev32q_u8(uint8x16_t __p0) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); return __ret; } #else -__ai int8x16_t vshlq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x16_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); +__ai __attribute__((target("neon"))) uint8x16_t vrev32q_u8(uint8x16_t __p0) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vshlq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); +__ai __attribute__((target("neon"))) uint16x8_t vrev32q_u16(uint16x8_t __p0) { + uint16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); return __ret; } #else -__ai int32x4_t vshlq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint16x8_t vrev32q_u16(uint16x8_t __p0) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vshlq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); +__ai __attribute__((target("neon"))) int8x16_t vrev32q_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); return __ret; } #else -__ai int64x2_t vshlq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (int64x2_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) int8x16_t vrev32q_s8(int8x16_t __p0) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vshlq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vrev32q_s16(int16x8_t __p0) { int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2, 5, 4, 7, 6); return __ret; } #else -__ai int16x8_t vshlq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vrev32q_s16(int16x8_t __p0) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2, 5, 4, 7, 6); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vshl_u8(uint8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vrev32_u8(uint8x8_t __p0) { uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); return __ret; } #else -__ai uint8x8_t vshl_u8(uint8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vrev32_u8(uint8x8_t __p0) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vshl_u32(uint32x2_t __p0, int32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18); - return __ret; -} -#else -__ai uint32x2_t vshl_u32(uint32x2_t __p0, int32x2_t __p1) { - uint32x2_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -__ai uint64x1_t vshl_u64(uint64x1_t __p0, int64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19); - return __ret; -} -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vshl_u16(uint16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vrev32_u16(uint16x4_t __p0) { uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); return __ret; } #else -__ai uint16x4_t vshl_u16(uint16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vrev32_u16(uint16x4_t __p0) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vshl_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vrev32_s8(int8x8_t __p0) { int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); return __ret; } #else -__ai int8x8_t vshl_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vrev32_s8(int8x8_t __p0) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x8_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vshl_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2); - return __ret; -} -#else -__ai int32x2_t vshl_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (int32x2_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -__ai int64x1_t vshl_s64(int64x1_t __p0, int64x1_t __p1) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3); - return __ret; -} -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vshl_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vrev32_s16(int16x4_t __p0) { int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); return __ret; } #else -__ai int16x4_t vshl_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vrev32_s16(int16x4_t __p0) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -#define vshlq_n_u8(__p0, __p1) __extension__ ({ \ - uint8x16_t __ret; \ - uint8x16_t __s0 = __p0; \ - __ret = (uint8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 48); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) poly8x8_t vrev64_p8(poly8x8_t __p0) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #else -#define vshlq_n_u8(__p0, __p1) __extension__ ({ \ - uint8x16_t __ret; \ - uint8x16_t __s0 = __p0; \ - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 48); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) poly8x8_t vrev64_p8(poly8x8_t __p0) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vshlq_n_u32(__p0, __p1) __extension__ ({ \ - uint32x4_t __ret; \ - uint32x4_t __s0 = __p0; \ - __ret = (uint32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 50); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) poly16x4_t vrev64_p16(poly16x4_t __p0) { + poly16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + return __ret; +} #else -#define vshlq_n_u32(__p0, __p1) __extension__ ({ \ - uint32x4_t __ret; \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (uint32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 50); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) poly16x4_t vrev64_p16(poly16x4_t __p0) { + poly16x4_t __ret; + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vshlq_n_u64(__p0, __p1) __extension__ ({ \ - uint64x2_t __ret; \ - uint64x2_t __s0 = __p0; \ - __ret = (uint64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 51); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) poly8x16_t vrev64q_p8(poly8x16_t __p0) { + poly8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); + return __ret; +} #else -#define vshlq_n_u64(__p0, __p1) __extension__ ({ \ - uint64x2_t __ret; \ - uint64x2_t __s0 = __p0; \ - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (uint64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 51); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) poly8x16_t vrev64q_p8(poly8x16_t __p0) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vshlq_n_u16(__p0, __p1) __extension__ ({ \ - uint16x8_t __ret; \ - uint16x8_t __s0 = __p0; \ - __ret = (uint16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 49); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) poly16x8_t vrev64q_p16(poly16x8_t __p0) { + poly16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); + return __ret; +} #else -#define vshlq_n_u16(__p0, __p1) __extension__ ({ \ - uint16x8_t __ret; \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 49); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) poly16x8_t vrev64q_p16(poly16x8_t __p0) { + poly16x8_t __ret; + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vshlq_n_s8(__p0, __p1) __extension__ ({ \ - int8x16_t __ret; \ - int8x16_t __s0 = __p0; \ - __ret = (int8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 32); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x16_t vrev64q_u8(uint8x16_t __p0) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); + return __ret; +} #else -#define vshlq_n_s8(__p0, __p1) __extension__ ({ \ - int8x16_t __ret; \ - int8x16_t __s0 = __p0; \ - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 32); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x16_t vrev64q_u8(uint8x16_t __p0) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vshlq_n_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __ret; \ - int32x4_t __s0 = __p0; \ - __ret = (int32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 34); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vrev64q_u32(uint32x4_t __p0) { + uint32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); + return __ret; +} #else -#define vshlq_n_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __ret; \ - int32x4_t __s0 = __p0; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (int32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 34); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vrev64q_u32(uint32x4_t __p0) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vshlq_n_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __ret; \ - int64x2_t __s0 = __p0; \ - __ret = (int64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 35); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x8_t vrev64q_u16(uint16x8_t __p0) { + uint16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); + return __ret; +} #else -#define vshlq_n_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __ret; \ - int64x2_t __s0 = __p0; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (int64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 35); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x8_t vrev64q_u16(uint16x8_t __p0) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vshlq_n_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __ret; \ - int16x8_t __s0 = __p0; \ - __ret = (int16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 33); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x16_t vrev64q_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); + return __ret; +} #else -#define vshlq_n_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __ret; \ - int16x8_t __s0 = __p0; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 33); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x16_t vrev64q_s8(int8x16_t __p0) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vshl_n_u8(__p0, __p1) __extension__ ({ \ - uint8x8_t __ret; \ - uint8x8_t __s0 = __p0; \ - __ret = (uint8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 16); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float32x4_t vrev64q_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); + return __ret; +} #else -#define vshl_n_u8(__p0, __p1) __extension__ ({ \ - uint8x8_t __ret; \ - uint8x8_t __s0 = __p0; \ - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 16); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float32x4_t vrev64q_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vshl_n_u32(__p0, __p1) __extension__ ({ \ - uint32x2_t __ret; \ - uint32x2_t __s0 = __p0; \ - __ret = (uint32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 18); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x4_t vrev64q_s32(int32x4_t __p0) { + int32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0, 3, 2); + return __ret; +} #else -#define vshl_n_u32(__p0, __p1) __extension__ ({ \ - uint32x2_t __ret; \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (uint32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 18); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x4_t vrev64q_s32(int32x4_t __p0) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0, 3, 2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif -#define vshl_n_u64(__p0, __p1) __extension__ ({ \ - uint64x1_t __ret; \ - uint64x1_t __s0 = __p0; \ - __ret = (uint64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 19); \ - __ret; \ -}) #ifdef __LITTLE_ENDIAN__ -#define vshl_n_u16(__p0, __p1) __extension__ ({ \ - uint16x4_t __ret; \ - uint16x4_t __s0 = __p0; \ - __ret = (uint16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 17); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x8_t vrev64q_s16(int16x8_t __p0) { + int16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); + return __ret; +} #else -#define vshl_n_u16(__p0, __p1) __extension__ ({ \ - uint16x4_t __ret; \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (uint16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 17); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x8_t vrev64q_s16(int16x8_t __p0) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vshl_n_s8(__p0, __p1) __extension__ ({ \ - int8x8_t __ret; \ - int8x8_t __s0 = __p0; \ - __ret = (int8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vrev64_u8(uint8x8_t __p0) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #else -#define vshl_n_s8(__p0, __p1) __extension__ ({ \ - int8x8_t __ret; \ - int8x8_t __s0 = __p0; \ - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 0); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vrev64_u8(uint8x8_t __p0) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vshl_n_s32(__p0, __p1) __extension__ ({ \ - int32x2_t __ret; \ - int32x2_t __s0 = __p0; \ - __ret = (int32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 2); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vrev64_u32(uint32x2_t __p0) { + uint32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0); + return __ret; +} #else -#define vshl_n_s32(__p0, __p1) __extension__ ({ \ - int32x2_t __ret; \ - int32x2_t __s0 = __p0; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (int32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vrev64_u32(uint32x2_t __p0) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif -#define vshl_n_s64(__p0, __p1) __extension__ ({ \ - int64x1_t __ret; \ - int64x1_t __s0 = __p0; \ - __ret = (int64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 3); \ - __ret; \ -}) #ifdef __LITTLE_ENDIAN__ -#define vshl_n_s16(__p0, __p1) __extension__ ({ \ - int16x4_t __ret; \ - int16x4_t __s0 = __p0; \ - __ret = (int16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x4_t vrev64_u16(uint16x4_t __p0) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + return __ret; +} #else -#define vshl_n_s16(__p0, __p1) __extension__ ({ \ - int16x4_t __ret; \ - int16x4_t __s0 = __p0; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (int16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x4_t vrev64_u16(uint16x4_t __p0) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vshll_n_u8(__p0, __p1) __extension__ ({ \ - uint16x8_t __ret; \ - uint8x8_t __s0 = __p0; \ - __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 49); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x8_t vrev64_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #else -#define vshll_n_u8(__p0, __p1) __extension__ ({ \ - uint16x8_t __ret; \ - uint8x8_t __s0 = __p0; \ - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 49); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vshll_n_u8(__p0, __p1) __extension__ ({ \ - uint16x8_t __ret; \ - uint8x8_t __s0 = __p0; \ - __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 49); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x8_t vrev64_s8(int8x8_t __p0) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vshll_n_u32(__p0, __p1) __extension__ ({ \ - uint64x2_t __ret; \ - uint32x2_t __s0 = __p0; \ - __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 51); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float32x2_t vrev64_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0); + return __ret; +} #else -#define vshll_n_u32(__p0, __p1) __extension__ ({ \ - uint64x2_t __ret; \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 51); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_vshll_n_u32(__p0, __p1) __extension__ ({ \ - uint64x2_t __ret; \ - uint32x2_t __s0 = __p0; \ - __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 51); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float32x2_t vrev64_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vshll_n_u16(__p0, __p1) __extension__ ({ \ - uint32x4_t __ret; \ - uint16x4_t __s0 = __p0; \ - __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 50); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x2_t vrev64_s32(int32x2_t __p0) { + int32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1, 0); + return __ret; +} #else -#define vshll_n_u16(__p0, __p1) __extension__ ({ \ - uint32x4_t __ret; \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 50); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vshll_n_u16(__p0, __p1) __extension__ ({ \ - uint32x4_t __ret; \ - uint16x4_t __s0 = __p0; \ - __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 50); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x2_t vrev64_s32(int32x2_t __p0) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vshll_n_s8(__p0, __p1) __extension__ ({ \ - int16x8_t __ret; \ - int8x8_t __s0 = __p0; \ - __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 33); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x4_t vrev64_s16(int16x4_t __p0) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + return __ret; +} #else -#define vshll_n_s8(__p0, __p1) __extension__ ({ \ - int16x8_t __ret; \ - int8x8_t __s0 = __p0; \ - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 33); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vshll_n_s8(__p0, __p1) __extension__ ({ \ - int16x8_t __ret; \ - int8x8_t __s0 = __p0; \ - __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 33); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x4_t vrev64_s16(int16x4_t __p0) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vshll_n_s32(__p0, __p1) __extension__ ({ \ - int64x2_t __ret; \ - int32x2_t __s0 = __p0; \ - __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 35); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float16x8_t vrev64q_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); + return __ret; +} #else -#define vshll_n_s32(__p0, __p1) __extension__ ({ \ - int64x2_t __ret; \ - int32x2_t __s0 = __p0; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 35); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_vshll_n_s32(__p0, __p1) __extension__ ({ \ - int64x2_t __ret; \ - int32x2_t __s0 = __p0; \ - __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 35); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float16x8_t vrev64q_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vshll_n_s16(__p0, __p1) __extension__ ({ \ - int32x4_t __ret; \ - int16x4_t __s0 = __p0; \ - __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 34); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float16x4_t vrev64_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + return __ret; +} #else -#define vshll_n_s16(__p0, __p1) __extension__ ({ \ - int32x4_t __ret; \ - int16x4_t __s0 = __p0; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 34); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vshll_n_s16(__p0, __p1) __extension__ ({ \ - int32x4_t __ret; \ - int16x4_t __s0 = __p0; \ - __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 34); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float16x4_t vrev64_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vshrq_n_u8(__p0, __p1) __extension__ ({ \ +__ai __attribute__((target("neon"))) uint8x16_t vrhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vrhaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vrhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vrhaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vrhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vrhaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vrhaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vrhaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vrhaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vrhaddq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vrhaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vrhaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vrhaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vrhadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vrhadd_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vrhadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vrhadd_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vrhadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vrhadd_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vrhadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vrhadd_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vrhadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vrhadd_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vrhadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vrhadd_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vrhadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vrshlq_u8(uint8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vrshlq_u8(uint8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vrshlq_u32(uint32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vrshlq_u32(uint32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vrshlq_u64(uint64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vrshlq_u64(uint64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vrshlq_u16(uint16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vrshlq_u16(uint16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vrshlq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vrshlq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vrshlq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vrshlq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vrshlq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vrshlq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int64x2_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vrshlq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vrshlq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vrshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vrshl_u8(uint8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vrshl_u8(uint8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vrshl_u32(uint32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vrshl_u32(uint32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vrshl_u64(uint64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vrshl_u16(uint16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vrshl_u16(uint16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vrshl_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vrshl_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vrshl_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vrshl_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) int64x1_t vrshl_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vrshl_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vrshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vrshl_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vrshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrq_n_u8(__p0, __p1) __extension__ ({ \ uint8x16_t __ret; \ uint8x16_t __s0 = __p0; \ - __ret = (uint8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 48); \ + __ret = (uint8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 48); \ __ret; \ }) #else -#define vshrq_n_u8(__p0, __p1) __extension__ ({ \ +#define vrshrq_n_u8(__p0, __p1) __extension__ ({ \ uint8x16_t __ret; \ uint8x16_t __s0 = __p0; \ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 48); \ + __ret = (uint8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 48); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vshrq_n_u32(__p0, __p1) __extension__ ({ \ +#define vrshrq_n_u32(__p0, __p1) __extension__ ({ \ uint32x4_t __ret; \ uint32x4_t __s0 = __p0; \ - __ret = (uint32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 50); \ + __ret = (uint32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 50); \ __ret; \ }) #else -#define vshrq_n_u32(__p0, __p1) __extension__ ({ \ +#define vrshrq_n_u32(__p0, __p1) __extension__ ({ \ uint32x4_t __ret; \ uint32x4_t __s0 = __p0; \ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (uint32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 50); \ + __ret = (uint32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 50); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vshrq_n_u64(__p0, __p1) __extension__ ({ \ +#define vrshrq_n_u64(__p0, __p1) __extension__ ({ \ uint64x2_t __ret; \ uint64x2_t __s0 = __p0; \ - __ret = (uint64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 51); \ + __ret = (uint64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 51); \ __ret; \ }) #else -#define vshrq_n_u64(__p0, __p1) __extension__ ({ \ +#define vrshrq_n_u64(__p0, __p1) __extension__ ({ \ uint64x2_t __ret; \ uint64x2_t __s0 = __p0; \ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (uint64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 51); \ + __ret = (uint64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 51); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vshrq_n_u16(__p0, __p1) __extension__ ({ \ +#define vrshrq_n_u16(__p0, __p1) __extension__ ({ \ uint16x8_t __ret; \ uint16x8_t __s0 = __p0; \ - __ret = (uint16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 49); \ + __ret = (uint16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 49); \ __ret; \ }) #else -#define vshrq_n_u16(__p0, __p1) __extension__ ({ \ +#define vrshrq_n_u16(__p0, __p1) __extension__ ({ \ uint16x8_t __ret; \ uint16x8_t __s0 = __p0; \ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 49); \ + __ret = (uint16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 49); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vshrq_n_s8(__p0, __p1) __extension__ ({ \ +#define vrshrq_n_s8(__p0, __p1) __extension__ ({ \ int8x16_t __ret; \ int8x16_t __s0 = __p0; \ - __ret = (int8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 32); \ + __ret = (int8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 32); \ __ret; \ }) #else -#define vshrq_n_s8(__p0, __p1) __extension__ ({ \ +#define vrshrq_n_s8(__p0, __p1) __extension__ ({ \ int8x16_t __ret; \ int8x16_t __s0 = __p0; \ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 32); \ + __ret = (int8x16_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 32); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vshrq_n_s32(__p0, __p1) __extension__ ({ \ +#define vrshrq_n_s32(__p0, __p1) __extension__ ({ \ int32x4_t __ret; \ int32x4_t __s0 = __p0; \ - __ret = (int32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 34); \ + __ret = (int32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 34); \ __ret; \ }) #else -#define vshrq_n_s32(__p0, __p1) __extension__ ({ \ +#define vrshrq_n_s32(__p0, __p1) __extension__ ({ \ int32x4_t __ret; \ int32x4_t __s0 = __p0; \ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (int32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 34); \ + __ret = (int32x4_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 34); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vshrq_n_s64(__p0, __p1) __extension__ ({ \ +#define vrshrq_n_s64(__p0, __p1) __extension__ ({ \ int64x2_t __ret; \ int64x2_t __s0 = __p0; \ - __ret = (int64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 35); \ + __ret = (int64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 35); \ __ret; \ }) #else -#define vshrq_n_s64(__p0, __p1) __extension__ ({ \ +#define vrshrq_n_s64(__p0, __p1) __extension__ ({ \ int64x2_t __ret; \ int64x2_t __s0 = __p0; \ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (int64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 35); \ + __ret = (int64x2_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 35); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vshrq_n_s16(__p0, __p1) __extension__ ({ \ +#define vrshrq_n_s16(__p0, __p1) __extension__ ({ \ int16x8_t __ret; \ int16x8_t __s0 = __p0; \ - __ret = (int16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 33); \ + __ret = (int16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__s0, __p1, 33); \ __ret; \ }) #else -#define vshrq_n_s16(__p0, __p1) __extension__ ({ \ +#define vrshrq_n_s16(__p0, __p1) __extension__ ({ \ int16x8_t __ret; \ int16x8_t __s0 = __p0; \ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 33); \ + __ret = (int16x8_t) __builtin_neon_vrshrq_n_v((int8x16_t)__rev0, __p1, 33); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vshr_n_u8(__p0, __p1) __extension__ ({ \ +#define vrshr_n_u8(__p0, __p1) __extension__ ({ \ uint8x8_t __ret; \ uint8x8_t __s0 = __p0; \ - __ret = (uint8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 16); \ + __ret = (uint8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 16); \ __ret; \ }) #else -#define vshr_n_u8(__p0, __p1) __extension__ ({ \ +#define vrshr_n_u8(__p0, __p1) __extension__ ({ \ uint8x8_t __ret; \ uint8x8_t __s0 = __p0; \ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 16); \ + __ret = (uint8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 16); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vshr_n_u32(__p0, __p1) __extension__ ({ \ +#define vrshr_n_u32(__p0, __p1) __extension__ ({ \ uint32x2_t __ret; \ uint32x2_t __s0 = __p0; \ - __ret = (uint32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 18); \ + __ret = (uint32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 18); \ __ret; \ }) #else -#define vshr_n_u32(__p0, __p1) __extension__ ({ \ +#define vrshr_n_u32(__p0, __p1) __extension__ ({ \ uint32x2_t __ret; \ uint32x2_t __s0 = __p0; \ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (uint32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 18); \ + __ret = (uint32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 18); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif -#define vshr_n_u64(__p0, __p1) __extension__ ({ \ +#define vrshr_n_u64(__p0, __p1) __extension__ ({ \ uint64x1_t __ret; \ uint64x1_t __s0 = __p0; \ - __ret = (uint64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 19); \ + __ret = (uint64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 19); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ -#define vshr_n_u16(__p0, __p1) __extension__ ({ \ +#define vrshr_n_u16(__p0, __p1) __extension__ ({ \ uint16x4_t __ret; \ uint16x4_t __s0 = __p0; \ - __ret = (uint16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 17); \ + __ret = (uint16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 17); \ __ret; \ }) #else -#define vshr_n_u16(__p0, __p1) __extension__ ({ \ +#define vrshr_n_u16(__p0, __p1) __extension__ ({ \ uint16x4_t __ret; \ uint16x4_t __s0 = __p0; \ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (uint16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 17); \ + __ret = (uint16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 17); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vshr_n_s8(__p0, __p1) __extension__ ({ \ +#define vrshr_n_s8(__p0, __p1) __extension__ ({ \ int8x8_t __ret; \ int8x8_t __s0 = __p0; \ - __ret = (int8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 0); \ + __ret = (int8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 0); \ __ret; \ }) #else -#define vshr_n_s8(__p0, __p1) __extension__ ({ \ +#define vrshr_n_s8(__p0, __p1) __extension__ ({ \ int8x8_t __ret; \ int8x8_t __s0 = __p0; \ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 0); \ + __ret = (int8x8_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 0); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vshr_n_s32(__p0, __p1) __extension__ ({ \ +#define vrshr_n_s32(__p0, __p1) __extension__ ({ \ int32x2_t __ret; \ int32x2_t __s0 = __p0; \ - __ret = (int32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 2); \ + __ret = (int32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 2); \ __ret; \ }) #else -#define vshr_n_s32(__p0, __p1) __extension__ ({ \ +#define vrshr_n_s32(__p0, __p1) __extension__ ({ \ int32x2_t __ret; \ int32x2_t __s0 = __p0; \ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (int32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 2); \ + __ret = (int32x2_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 2); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif -#define vshr_n_s64(__p0, __p1) __extension__ ({ \ +#define vrshr_n_s64(__p0, __p1) __extension__ ({ \ int64x1_t __ret; \ int64x1_t __s0 = __p0; \ - __ret = (int64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 3); \ + __ret = (int64x1_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 3); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ -#define vshr_n_s16(__p0, __p1) __extension__ ({ \ +#define vrshr_n_s16(__p0, __p1) __extension__ ({ \ int16x4_t __ret; \ int16x4_t __s0 = __p0; \ - __ret = (int16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 1); \ + __ret = (int16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__s0, __p1, 1); \ __ret; \ }) #else -#define vshr_n_s16(__p0, __p1) __extension__ ({ \ +#define vrshr_n_s16(__p0, __p1) __extension__ ({ \ int16x4_t __ret; \ int16x4_t __s0 = __p0; \ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (int16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 1); \ + __ret = (int16x4_t) __builtin_neon_vrshr_n_v((int8x8_t)__rev0, __p1, 1); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vshrn_n_u32(__p0, __p1) __extension__ ({ \ +#define vrshrn_n_u32(__p0, __p1) __extension__ ({ \ uint16x4_t __ret; \ uint32x4_t __s0 = __p0; \ - __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 17); \ + __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 17); \ __ret; \ }) #else -#define vshrn_n_u32(__p0, __p1) __extension__ ({ \ +#define vrshrn_n_u32(__p0, __p1) __extension__ ({ \ uint16x4_t __ret; \ uint32x4_t __s0 = __p0; \ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 17); \ + __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 17); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) -#define __noswap_vshrn_n_u32(__p0, __p1) __extension__ ({ \ +#define __noswap_vrshrn_n_u32(__p0, __p1) __extension__ ({ \ uint16x4_t __ret; \ uint32x4_t __s0 = __p0; \ - __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 17); \ + __ret = (uint16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 17); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vshrn_n_u64(__p0, __p1) __extension__ ({ \ +#define vrshrn_n_u64(__p0, __p1) __extension__ ({ \ uint32x2_t __ret; \ uint64x2_t __s0 = __p0; \ - __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 18); \ + __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 18); \ __ret; \ }) #else -#define vshrn_n_u64(__p0, __p1) __extension__ ({ \ +#define vrshrn_n_u64(__p0, __p1) __extension__ ({ \ uint32x2_t __ret; \ uint64x2_t __s0 = __p0; \ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 18); \ + __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 18); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) -#define __noswap_vshrn_n_u64(__p0, __p1) __extension__ ({ \ +#define __noswap_vrshrn_n_u64(__p0, __p1) __extension__ ({ \ uint32x2_t __ret; \ uint64x2_t __s0 = __p0; \ - __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 18); \ + __ret = (uint32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 18); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vshrn_n_u16(__p0, __p1) __extension__ ({ \ +#define vrshrn_n_u16(__p0, __p1) __extension__ ({ \ uint8x8_t __ret; \ uint16x8_t __s0 = __p0; \ - __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 16); \ + __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 16); \ __ret; \ }) #else -#define vshrn_n_u16(__p0, __p1) __extension__ ({ \ +#define vrshrn_n_u16(__p0, __p1) __extension__ ({ \ uint8x8_t __ret; \ uint16x8_t __s0 = __p0; \ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 16); \ + __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 16); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) -#define __noswap_vshrn_n_u16(__p0, __p1) __extension__ ({ \ +#define __noswap_vrshrn_n_u16(__p0, __p1) __extension__ ({ \ uint8x8_t __ret; \ uint16x8_t __s0 = __p0; \ - __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 16); \ + __ret = (uint8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 16); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vshrn_n_s32(__p0, __p1) __extension__ ({ \ +#define vrshrn_n_s32(__p0, __p1) __extension__ ({ \ int16x4_t __ret; \ int32x4_t __s0 = __p0; \ - __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 1); \ + __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 1); \ __ret; \ }) #else -#define vshrn_n_s32(__p0, __p1) __extension__ ({ \ +#define vrshrn_n_s32(__p0, __p1) __extension__ ({ \ int16x4_t __ret; \ int32x4_t __s0 = __p0; \ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 1); \ + __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 1); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) -#define __noswap_vshrn_n_s32(__p0, __p1) __extension__ ({ \ +#define __noswap_vrshrn_n_s32(__p0, __p1) __extension__ ({ \ int16x4_t __ret; \ int32x4_t __s0 = __p0; \ - __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 1); \ + __ret = (int16x4_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vshrn_n_s64(__p0, __p1) __extension__ ({ \ +#define vrshrn_n_s64(__p0, __p1) __extension__ ({ \ int32x2_t __ret; \ int64x2_t __s0 = __p0; \ - __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 2); \ + __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 2); \ __ret; \ }) #else -#define vshrn_n_s64(__p0, __p1) __extension__ ({ \ +#define vrshrn_n_s64(__p0, __p1) __extension__ ({ \ int32x2_t __ret; \ int64x2_t __s0 = __p0; \ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 2); \ + __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 2); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) -#define __noswap_vshrn_n_s64(__p0, __p1) __extension__ ({ \ +#define __noswap_vrshrn_n_s64(__p0, __p1) __extension__ ({ \ int32x2_t __ret; \ int64x2_t __s0 = __p0; \ - __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 2); \ + __ret = (int32x2_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 2); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vshrn_n_s16(__p0, __p1) __extension__ ({ \ +#define vrshrn_n_s16(__p0, __p1) __extension__ ({ \ int8x8_t __ret; \ int16x8_t __s0 = __p0; \ - __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 0); \ + __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 0); \ __ret; \ }) #else -#define vshrn_n_s16(__p0, __p1) __extension__ ({ \ +#define vrshrn_n_s16(__p0, __p1) __extension__ ({ \ int8x8_t __ret; \ int16x8_t __s0 = __p0; \ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 0); \ + __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__rev0, __p1, 0); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) -#define __noswap_vshrn_n_s16(__p0, __p1) __extension__ ({ \ +#define __noswap_vrshrn_n_s16(__p0, __p1) __extension__ ({ \ int8x8_t __ret; \ int16x8_t __s0 = __p0; \ - __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 0); \ + __ret = (int8x8_t) __builtin_neon_vrshrn_n_v((int8x16_t)__s0, __p1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vsli_n_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x8_t __ret; \ - poly8x8_t __s0 = __p0; \ - poly8x8_t __s1 = __p1; \ - __ret = (poly8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vrsqrteq_u32(uint32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 50); + return __ret; +} #else -#define vsli_n_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x8_t __ret; \ - poly8x8_t __s0 = __p0; \ - poly8x8_t __s1 = __p1; \ - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (poly8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vrsqrteq_u32(uint32x4_t __p0) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vsli_n_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x4_t __ret; \ - poly16x4_t __s0 = __p0; \ - poly16x4_t __s1 = __p1; \ - __ret = (poly16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float32x4_t vrsqrteq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 41); + return __ret; +} #else -#define vsli_n_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x4_t __ret; \ - poly16x4_t __s0 = __p0; \ - poly16x4_t __s1 = __p1; \ - poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (poly16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float32x4_t vrsqrteq_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vsliq_n_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x16_t __ret; \ - poly8x16_t __s0 = __p0; \ - poly8x16_t __s1 = __p1; \ - __ret = (poly8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vrsqrte_u32(uint32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 18); + return __ret; +} #else -#define vsliq_n_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x16_t __ret; \ - poly8x16_t __s0 = __p0; \ - poly8x16_t __s1 = __p1; \ - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (poly8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vrsqrte_u32(uint32x2_t __p0) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vsliq_n_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x8_t __ret; \ - poly16x8_t __s0 = __p0; \ - poly16x8_t __s1 = __p1; \ - __ret = (poly16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float32x2_t vrsqrte_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 9); + return __ret; +} #else -#define vsliq_n_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x8_t __ret; \ - poly16x8_t __s0 = __p0; \ - poly16x8_t __s1 = __p1; \ - poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (poly16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float32x2_t vrsqrte_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vrsqrte_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vsliq_n_u8(__p0, __p1, __p2) __extension__ ({ \ +__ai __attribute__((target("neon"))) float32x4_t vrsqrtsq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vrsqrtsq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vrsqrts_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vrsqrts_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vrsqrts_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x16_t __ret; \ uint8x16_t __s0 = __p0; \ uint8x16_t __s1 = __p1; \ - __ret = (uint8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \ + __ret = (uint8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \ __ret; \ }) #else -#define vsliq_n_u8(__p0, __p1, __p2) __extension__ ({ \ +#define vrsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x16_t __ret; \ uint8x16_t __s0 = __p0; \ uint8x16_t __s1 = __p1; \ uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \ + __ret = (uint8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vsliq_n_u32(__p0, __p1, __p2) __extension__ ({ \ +#define vrsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x4_t __ret; \ uint32x4_t __s0 = __p0; \ uint32x4_t __s1 = __p1; \ - __ret = (uint32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \ + __ret = (uint32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \ __ret; \ }) #else -#define vsliq_n_u32(__p0, __p1, __p2) __extension__ ({ \ +#define vrsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x4_t __ret; \ uint32x4_t __s0 = __p0; \ uint32x4_t __s1 = __p1; \ uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (uint32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \ + __ret = (uint32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vsliq_n_u64(__p0, __p1, __p2) __extension__ ({ \ +#define vrsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x2_t __ret; \ uint64x2_t __s0 = __p0; \ uint64x2_t __s1 = __p1; \ - __ret = (uint64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \ + __ret = (uint64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \ __ret; \ }) #else -#define vsliq_n_u64(__p0, __p1, __p2) __extension__ ({ \ +#define vrsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x2_t __ret; \ uint64x2_t __s0 = __p0; \ uint64x2_t __s1 = __p1; \ uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __ret = (uint64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \ + __ret = (uint64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vsliq_n_u16(__p0, __p1, __p2) __extension__ ({ \ +#define vrsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x8_t __ret; \ uint16x8_t __s0 = __p0; \ uint16x8_t __s1 = __p1; \ - __ret = (uint16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \ + __ret = (uint16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \ __ret; \ }) #else -#define vsliq_n_u16(__p0, __p1, __p2) __extension__ ({ \ +#define vrsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x8_t __ret; \ uint16x8_t __s0 = __p0; \ uint16x8_t __s1 = __p1; \ uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \ + __ret = (uint16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vsliq_n_s8(__p0, __p1, __p2) __extension__ ({ \ +#define vrsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \ int8x16_t __ret; \ int8x16_t __s0 = __p0; \ int8x16_t __s1 = __p1; \ - __ret = (int8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \ + __ret = (int8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \ __ret; \ }) #else -#define vsliq_n_s8(__p0, __p1, __p2) __extension__ ({ \ +#define vrsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \ int8x16_t __ret; \ int8x16_t __s0 = __p0; \ int8x16_t __s1 = __p1; \ int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \ + __ret = (int8x16_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vsliq_n_s32(__p0, __p1, __p2) __extension__ ({ \ +#define vrsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \ int32x4_t __ret; \ int32x4_t __s0 = __p0; \ int32x4_t __s1 = __p1; \ - __ret = (int32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ + __ret = (int32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ __ret; \ }) #else -#define vsliq_n_s32(__p0, __p1, __p2) __extension__ ({ \ +#define vrsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \ int32x4_t __ret; \ int32x4_t __s0 = __p0; \ int32x4_t __s1 = __p1; \ int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (int32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ + __ret = (int32x4_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vsliq_n_s64(__p0, __p1, __p2) __extension__ ({ \ +#define vrsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \ int64x2_t __ret; \ int64x2_t __s0 = __p0; \ int64x2_t __s1 = __p1; \ - __ret = (int64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \ + __ret = (int64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \ __ret; \ }) #else -#define vsliq_n_s64(__p0, __p1, __p2) __extension__ ({ \ +#define vrsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \ int64x2_t __ret; \ int64x2_t __s0 = __p0; \ int64x2_t __s1 = __p1; \ int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __ret = (int64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \ + __ret = (int64x2_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vsliq_n_s16(__p0, __p1, __p2) __extension__ ({ \ +#define vrsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \ int16x8_t __ret; \ int16x8_t __s0 = __p0; \ int16x8_t __s1 = __p1; \ - __ret = (int16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ + __ret = (int16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ __ret; \ }) #else -#define vsliq_n_s16(__p0, __p1, __p2) __extension__ ({ \ +#define vrsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \ int16x8_t __ret; \ int16x8_t __s0 = __p0; \ int16x8_t __s1 = __p1; \ int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ + __ret = (int16x8_t) __builtin_neon_vrsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vsli_n_u8(__p0, __p1, __p2) __extension__ ({ \ +#define vrsra_n_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x8_t __ret; \ uint8x8_t __s0 = __p0; \ uint8x8_t __s1 = __p1; \ - __ret = (uint8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \ + __ret = (uint8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \ __ret; \ }) #else -#define vsli_n_u8(__p0, __p1, __p2) __extension__ ({ \ +#define vrsra_n_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x8_t __ret; \ uint8x8_t __s0 = __p0; \ uint8x8_t __s1 = __p1; \ uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \ + __ret = (uint8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vsli_n_u32(__p0, __p1, __p2) __extension__ ({ \ +#define vrsra_n_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x2_t __ret; \ uint32x2_t __s0 = __p0; \ uint32x2_t __s1 = __p1; \ - __ret = (uint32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \ + __ret = (uint32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \ __ret; \ }) #else -#define vsli_n_u32(__p0, __p1, __p2) __extension__ ({ \ +#define vrsra_n_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x2_t __ret; \ uint32x2_t __s0 = __p0; \ uint32x2_t __s1 = __p1; \ uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __ret = (uint32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \ + __ret = (uint32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif -#define vsli_n_u64(__p0, __p1, __p2) __extension__ ({ \ +#define vrsra_n_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x1_t __ret; \ uint64x1_t __s0 = __p0; \ uint64x1_t __s1 = __p1; \ - __ret = (uint64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ + __ret = (uint64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ -#define vsli_n_u16(__p0, __p1, __p2) __extension__ ({ \ +#define vrsra_n_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x4_t __ret; \ uint16x4_t __s0 = __p0; \ uint16x4_t __s1 = __p1; \ - __ret = (uint16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \ + __ret = (uint16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \ __ret; \ }) #else -#define vsli_n_u16(__p0, __p1, __p2) __extension__ ({ \ +#define vrsra_n_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x4_t __ret; \ uint16x4_t __s0 = __p0; \ uint16x4_t __s1 = __p1; \ uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (uint16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \ + __ret = (uint16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vsli_n_s8(__p0, __p1, __p2) __extension__ ({ \ +#define vrsra_n_s8(__p0, __p1, __p2) __extension__ ({ \ int8x8_t __ret; \ int8x8_t __s0 = __p0; \ int8x8_t __s1 = __p1; \ - __ret = (int8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \ + __ret = (int8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \ __ret; \ }) #else -#define vsli_n_s8(__p0, __p1, __p2) __extension__ ({ \ +#define vrsra_n_s8(__p0, __p1, __p2) __extension__ ({ \ int8x8_t __ret; \ int8x8_t __s0 = __p0; \ int8x8_t __s1 = __p1; \ int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \ + __ret = (int8x8_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vsli_n_s32(__p0, __p1, __p2) __extension__ ({ \ +#define vrsra_n_s32(__p0, __p1, __p2) __extension__ ({ \ int32x2_t __ret; \ int32x2_t __s0 = __p0; \ int32x2_t __s1 = __p1; \ - __ret = (int32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ + __ret = (int32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ __ret; \ }) #else -#define vsli_n_s32(__p0, __p1, __p2) __extension__ ({ \ +#define vrsra_n_s32(__p0, __p1, __p2) __extension__ ({ \ int32x2_t __ret; \ int32x2_t __s0 = __p0; \ int32x2_t __s1 = __p1; \ int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __ret = (int32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ + __ret = (int32x2_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif -#define vsli_n_s64(__p0, __p1, __p2) __extension__ ({ \ +#define vrsra_n_s64(__p0, __p1, __p2) __extension__ ({ \ int64x1_t __ret; \ int64x1_t __s0 = __p0; \ int64x1_t __s1 = __p1; \ - __ret = (int64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ + __ret = (int64x1_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ -#define vsli_n_s16(__p0, __p1, __p2) __extension__ ({ \ +#define vrsra_n_s16(__p0, __p1, __p2) __extension__ ({ \ int16x4_t __ret; \ int16x4_t __s0 = __p0; \ int16x4_t __s1 = __p1; \ - __ret = (int16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ + __ret = (int16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ __ret; \ }) #else -#define vsli_n_s16(__p0, __p1, __p2) __extension__ ({ \ +#define vrsra_n_s16(__p0, __p1, __p2) __extension__ ({ \ int16x4_t __ret; \ int16x4_t __s0 = __p0; \ int16x4_t __s1 = __p1; \ int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (int16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ + __ret = (int16x4_t) __builtin_neon_vrsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16_t __ret; \ - uint8x16_t __s0 = __p0; \ - uint8x16_t __s1 = __p1; \ - __ret = (uint8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x4_t vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); + return __ret; +} #else -#define vsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16_t __ret; \ - uint8x16_t __s0 = __p0; \ - uint8x16_t __s1 = __p1; \ - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x4_t vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x4_t __noswap_vrsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4_t __ret; \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - __ret = (uint32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); + return __ret; +} #else -#define vsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4_t __ret; \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __s1 = __p1; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (uint32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x2_t __noswap_vrsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2_t __ret; \ - uint64x2_t __s0 = __p0; \ - uint64x2_t __s1 = __p1; \ - __ret = (uint64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); + return __ret; +} #else -#define vsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2_t __ret; \ - uint64x2_t __s0 = __p0; \ - uint64x2_t __s1 = __p1; \ - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __ret = (uint64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x8_t __noswap_vrsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8_t __ret; \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __s1 = __p1; \ - __ret = (uint16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x4_t vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); + return __ret; +} #else -#define vsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8_t __ret; \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __s1 = __p1; \ - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x4_t vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t __noswap_vrsubhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16_t __ret; \ - int8x16_t __s0 = __p0; \ - int8x16_t __s1 = __p1; \ - __ret = (int8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x2_t vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); + return __ret; +} #else -#define vsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16_t __ret; \ - int8x16_t __s0 = __p0; \ - int8x16_t __s1 = __p1; \ - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \ - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x2_t vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t __noswap_vrsubhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __ret; \ - int32x4_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - __ret = (int32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ - __ret; \ -}) -#else -#define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __ret; \ - int32x4_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (int32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2_t __ret; \ - int64x2_t __s0 = __p0; \ - int64x2_t __s1 = __p1; \ - __ret = (int64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \ - __ret; \ -}) -#else -#define vsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2_t __ret; \ - int64x2_t __s0 = __p0; \ - int64x2_t __s1 = __p1; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __ret = (int64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __ret; \ - int16x8_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - __ret = (int16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ - __ret; \ -}) -#else -#define vsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __ret; \ - int16x8_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsra_n_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x8_t __ret; \ - uint8x8_t __s0 = __p0; \ - uint8x8_t __s1 = __p1; \ - __ret = (uint8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \ - __ret; \ -}) -#else -#define vsra_n_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x8_t __ret; \ - uint8x8_t __s0 = __p0; \ - uint8x8_t __s1 = __p1; \ - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsra_n_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2_t __ret; \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __s1 = __p1; \ - __ret = (uint32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \ - __ret; \ -}) -#else -#define vsra_n_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2_t __ret; \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __s1 = __p1; \ - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __ret = (uint32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#define vsra_n_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x1_t __ret; \ - uint64x1_t __s0 = __p0; \ - uint64x1_t __s1 = __p1; \ - __ret = (uint64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ - __ret; \ -}) -#ifdef __LITTLE_ENDIAN__ -#define vsra_n_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4_t __ret; \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __s1 = __p1; \ - __ret = (uint16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \ - __ret; \ -}) -#else -#define vsra_n_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4_t __ret; \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __s1 = __p1; \ - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (uint16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsra_n_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x8_t __ret; \ - int8x8_t __s0 = __p0; \ - int8x8_t __s1 = __p1; \ - __ret = (int8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \ - __ret; \ -}) -#else -#define vsra_n_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x8_t __ret; \ - int8x8_t __s0 = __p0; \ - int8x8_t __s1 = __p1; \ - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsra_n_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __ret; \ - int32x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - __ret = (int32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ - __ret; \ -}) -#else -#define vsra_n_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __ret; \ - int32x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __ret = (int32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#define vsra_n_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x1_t __ret; \ - int64x1_t __s0 = __p0; \ - int64x1_t __s1 = __p1; \ - __ret = (int64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ - __ret; \ -}) -#ifdef __LITTLE_ENDIAN__ -#define vsra_n_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __ret; \ - int16x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - __ret = (int16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x8_t vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); + return __ret; +} #else -#define vsra_n_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __ret; \ - int16x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (int16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x8_t vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t __noswap_vrsubhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vrsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vsri_n_p8(__p0, __p1, __p2) __extension__ ({ \ +#define vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x8_t __ret; \ - poly8x8_t __s0 = __p0; \ + poly8_t __s0 = __p0; \ poly8x8_t __s1 = __p1; \ - __ret = (poly8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \ + __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (poly8x8_t)__s1, __p2); \ __ret; \ }) #else -#define vsri_n_p8(__p0, __p1, __p2) __extension__ ({ \ +#define vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x8_t __ret; \ - poly8x8_t __s0 = __p0; \ + poly8_t __s0 = __p0; \ poly8x8_t __s1 = __p1; \ - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (poly8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \ + __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (poly8x8_t)__rev1, __p2); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) +#define __noswap_vset_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __ret; \ + poly8_t __s0 = __p0; \ + poly8x8_t __s1 = __p1; \ + __ret = (poly8x8_t) __builtin_neon_vset_lane_i8(__s0, (poly8x8_t)__s1, __p2); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -#define vsri_n_p16(__p0, __p1, __p2) __extension__ ({ \ +#define vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x4_t __ret; \ - poly16x4_t __s0 = __p0; \ + poly16_t __s0 = __p0; \ poly16x4_t __s1 = __p1; \ - __ret = (poly16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \ + __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (poly16x4_t)__s1, __p2); \ __ret; \ }) #else -#define vsri_n_p16(__p0, __p1, __p2) __extension__ ({ \ +#define vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x4_t __ret; \ - poly16x4_t __s0 = __p0; \ + poly16_t __s0 = __p0; \ poly16x4_t __s1 = __p1; \ - poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (poly16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \ + __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (poly16x4_t)__rev1, __p2); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) +#define __noswap_vset_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __ret; \ + poly16_t __s0 = __p0; \ + poly16x4_t __s1 = __p1; \ + __ret = (poly16x4_t) __builtin_neon_vset_lane_i16(__s0, (poly16x4_t)__s1, __p2); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -#define vsriq_n_p8(__p0, __p1, __p2) __extension__ ({ \ +#define vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x16_t __ret; \ - poly8x16_t __s0 = __p0; \ + poly8_t __s0 = __p0; \ poly8x16_t __s1 = __p1; \ - __ret = (poly8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \ + __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (poly8x16_t)__s1, __p2); \ __ret; \ }) #else -#define vsriq_n_p8(__p0, __p1, __p2) __extension__ ({ \ +#define vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \ poly8x16_t __ret; \ - poly8x16_t __s0 = __p0; \ + poly8_t __s0 = __p0; \ poly8x16_t __s1 = __p1; \ - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (poly8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \ + __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (poly8x16_t)__rev1, __p2); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) +#define __noswap_vsetq_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __ret; \ + poly8_t __s0 = __p0; \ + poly8x16_t __s1 = __p1; \ + __ret = (poly8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (poly8x16_t)__s1, __p2); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -#define vsriq_n_p16(__p0, __p1, __p2) __extension__ ({ \ +#define vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x8_t __ret; \ - poly16x8_t __s0 = __p0; \ + poly16_t __s0 = __p0; \ poly16x8_t __s1 = __p1; \ - __ret = (poly16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \ + __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (poly16x8_t)__s1, __p2); \ __ret; \ }) #else -#define vsriq_n_p16(__p0, __p1, __p2) __extension__ ({ \ +#define vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \ poly16x8_t __ret; \ - poly16x8_t __s0 = __p0; \ + poly16_t __s0 = __p0; \ poly16x8_t __s1 = __p1; \ - poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (poly16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \ + __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (poly16x8_t)__rev1, __p2); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) +#define __noswap_vsetq_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __ret; \ + poly16_t __s0 = __p0; \ + poly16x8_t __s1 = __p1; \ + __ret = (poly16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (poly16x8_t)__s1, __p2); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -#define vsriq_n_u8(__p0, __p1, __p2) __extension__ ({ \ +#define vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x16_t __ret; \ - uint8x16_t __s0 = __p0; \ + uint8_t __s0 = __p0; \ uint8x16_t __s1 = __p1; \ - __ret = (uint8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \ + __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \ __ret; \ }) #else -#define vsriq_n_u8(__p0, __p1, __p2) __extension__ ({ \ +#define vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x16_t __ret; \ - uint8x16_t __s0 = __p0; \ + uint8_t __s0 = __p0; \ uint8x16_t __s1 = __p1; \ - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \ + __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) +#define __noswap_vsetq_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __ret; \ + uint8_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + __ret = (uint8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -#define vsriq_n_u32(__p0, __p1, __p2) __extension__ ({ \ +#define vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x4_t __ret; \ - uint32x4_t __s0 = __p0; \ + uint32_t __s0 = __p0; \ uint32x4_t __s1 = __p1; \ - __ret = (uint32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \ + __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \ __ret; \ }) #else -#define vsriq_n_u32(__p0, __p1, __p2) __extension__ ({ \ +#define vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x4_t __ret; \ - uint32x4_t __s0 = __p0; \ + uint32_t __s0 = __p0; \ uint32x4_t __s1 = __p1; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (uint32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \ + __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__rev1, __p2); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) +#define __noswap_vsetq_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __ret; \ + uint32_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + __ret = (uint32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -#define vsriq_n_u64(__p0, __p1, __p2) __extension__ ({ \ +#define vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x2_t __ret; \ - uint64x2_t __s0 = __p0; \ + uint64_t __s0 = __p0; \ uint64x2_t __s1 = __p1; \ - __ret = (uint64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \ + __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \ __ret; \ }) #else -#define vsriq_n_u64(__p0, __p1, __p2) __extension__ ({ \ +#define vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x2_t __ret; \ - uint64x2_t __s0 = __p0; \ + uint64_t __s0 = __p0; \ uint64x2_t __s1 = __p1; \ - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __ret = (uint64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \ + __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__rev1, __p2); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) +#define __noswap_vsetq_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + __ret = (uint64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -#define vsriq_n_u16(__p0, __p1, __p2) __extension__ ({ \ +#define vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x8_t __ret; \ - uint16x8_t __s0 = __p0; \ + uint16_t __s0 = __p0; \ uint16x8_t __s1 = __p1; \ - __ret = (uint16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \ + __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \ __ret; \ }) #else -#define vsriq_n_u16(__p0, __p1, __p2) __extension__ ({ \ +#define vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x8_t __ret; \ - uint16x8_t __s0 = __p0; \ + uint16_t __s0 = __p0; \ uint16x8_t __s1 = __p1; \ - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \ + __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__rev1, __p2); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) +#define __noswap_vsetq_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __ret; \ + uint16_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + __ret = (uint16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -#define vsriq_n_s8(__p0, __p1, __p2) __extension__ ({ \ +#define vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \ int8x16_t __ret; \ - int8x16_t __s0 = __p0; \ + int8_t __s0 = __p0; \ int8x16_t __s1 = __p1; \ - __ret = (int8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \ + __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \ __ret; \ }) #else -#define vsriq_n_s8(__p0, __p1, __p2) __extension__ ({ \ +#define vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \ int8x16_t __ret; \ - int8x16_t __s0 = __p0; \ + int8_t __s0 = __p0; \ int8x16_t __s1 = __p1; \ - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \ + __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__rev1, __p2); \ __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) +#define __noswap_vsetq_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __ret; \ + int8_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + __ret = (int8x16_t) __builtin_neon_vsetq_lane_i8(__s0, (int8x16_t)__s1, __p2); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -#define vsriq_n_s32(__p0, __p1, __p2) __extension__ ({ \ +#define vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4_t __ret; \ + float32_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (float32x4_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4_t __ret; \ + float32_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (float32x4_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4_t __ret; \ + float32_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + __ret = (float32x4_t) __builtin_neon_vsetq_lane_f32(__s0, (float32x4_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x4_t __ret; \ - int32x4_t __s0 = __p0; \ + int32_t __s0 = __p0; \ int32x4_t __s1 = __p1; \ - __ret = (int32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ + __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \ __ret; \ }) #else -#define vsriq_n_s32(__p0, __p1, __p2) __extension__ ({ \ +#define vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x4_t __ret; \ - int32x4_t __s0 = __p0; \ + int32_t __s0 = __p0; \ int32x4_t __s1 = __p1; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (int32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ + __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__rev1, __p2); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) +#define __noswap_vsetq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + __ret = (int32x4_t) __builtin_neon_vsetq_lane_i32(__s0, (int32x4_t)__s1, __p2); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -#define vsriq_n_s64(__p0, __p1, __p2) __extension__ ({ \ +#define vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \ int64x2_t __ret; \ - int64x2_t __s0 = __p0; \ + int64_t __s0 = __p0; \ int64x2_t __s1 = __p1; \ - __ret = (int64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \ + __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \ __ret; \ }) #else -#define vsriq_n_s64(__p0, __p1, __p2) __extension__ ({ \ +#define vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \ int64x2_t __ret; \ - int64x2_t __s0 = __p0; \ + int64_t __s0 = __p0; \ int64x2_t __s1 = __p1; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __ret = (int64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \ + __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__rev1, __p2); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) +#define __noswap_vsetq_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __ret; \ + int64_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + __ret = (int64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (int64x2_t)__s1, __p2); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -#define vsriq_n_s16(__p0, __p1, __p2) __extension__ ({ \ +#define vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x8_t __ret; \ - int16x8_t __s0 = __p0; \ + int16_t __s0 = __p0; \ int16x8_t __s1 = __p1; \ - __ret = (int16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ + __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \ __ret; \ }) #else -#define vsriq_n_s16(__p0, __p1, __p2) __extension__ ({ \ +#define vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x8_t __ret; \ - int16x8_t __s0 = __p0; \ + int16_t __s0 = __p0; \ int16x8_t __s1 = __p1; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ + __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__rev1, __p2); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) +#define __noswap_vsetq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + __ret = (int16x8_t) __builtin_neon_vsetq_lane_i16(__s0, (int16x8_t)__s1, __p2); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -#define vsri_n_u8(__p0, __p1, __p2) __extension__ ({ \ +#define vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x8_t __ret; \ - uint8x8_t __s0 = __p0; \ + uint8_t __s0 = __p0; \ uint8x8_t __s1 = __p1; \ - __ret = (uint8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \ + __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \ __ret; \ }) #else -#define vsri_n_u8(__p0, __p1, __p2) __extension__ ({ \ +#define vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \ uint8x8_t __ret; \ - uint8x8_t __s0 = __p0; \ + uint8_t __s0 = __p0; \ uint8x8_t __s1 = __p1; \ - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \ + __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) +#define __noswap_vset_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __ret; \ + uint8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + __ret = (uint8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -#define vsri_n_u32(__p0, __p1, __p2) __extension__ ({ \ +#define vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x2_t __ret; \ - uint32x2_t __s0 = __p0; \ + uint32_t __s0 = __p0; \ uint32x2_t __s1 = __p1; \ - __ret = (uint32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \ + __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \ __ret; \ }) #else -#define vsri_n_u32(__p0, __p1, __p2) __extension__ ({ \ +#define vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \ uint32x2_t __ret; \ - uint32x2_t __s0 = __p0; \ + uint32_t __s0 = __p0; \ uint32x2_t __s1 = __p1; \ - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __ret = (uint32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \ + __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__rev1, __p2); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) +#define __noswap_vset_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __ret; \ + uint32_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + __ret = (uint32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \ + __ret; \ +}) #endif -#define vsri_n_u64(__p0, __p1, __p2) __extension__ ({ \ +#define vset_lane_u64(__p0, __p1, __p2) __extension__ ({ \ uint64x1_t __ret; \ - uint64x1_t __s0 = __p0; \ + uint64_t __s0 = __p0; \ uint64x1_t __s1 = __p1; \ - __ret = (uint64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ + __ret = (uint64x1_t) __builtin_neon_vset_lane_i64(__s0, (int64x1_t)__s1, __p2); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ -#define vsri_n_u16(__p0, __p1, __p2) __extension__ ({ \ +#define vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x4_t __ret; \ - uint16x4_t __s0 = __p0; \ + uint16_t __s0 = __p0; \ uint16x4_t __s1 = __p1; \ - __ret = (uint16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \ + __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \ __ret; \ }) #else -#define vsri_n_u16(__p0, __p1, __p2) __extension__ ({ \ +#define vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \ uint16x4_t __ret; \ - uint16x4_t __s0 = __p0; \ + uint16_t __s0 = __p0; \ uint16x4_t __s1 = __p1; \ - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (uint16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \ + __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__rev1, __p2); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) +#define __noswap_vset_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __ret; \ + uint16_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + __ret = (uint16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -#define vsri_n_s8(__p0, __p1, __p2) __extension__ ({ \ +#define vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \ int8x8_t __ret; \ - int8x8_t __s0 = __p0; \ + int8_t __s0 = __p0; \ int8x8_t __s1 = __p1; \ - __ret = (int8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \ + __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \ __ret; \ }) #else -#define vsri_n_s8(__p0, __p1, __p2) __extension__ ({ \ +#define vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \ int8x8_t __ret; \ - int8x8_t __s0 = __p0; \ + int8_t __s0 = __p0; \ int8x8_t __s1 = __p1; \ - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \ + __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__rev1, __p2); \ __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) +#define __noswap_vset_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __ret; \ + int8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + __ret = (int8x8_t) __builtin_neon_vset_lane_i8(__s0, (int8x8_t)__s1, __p2); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -#define vsri_n_s32(__p0, __p1, __p2) __extension__ ({ \ +#define vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2_t __ret; \ + float32_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (float32x2_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2_t __ret; \ + float32_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (float32x2_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vset_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2_t __ret; \ + float32_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + __ret = (float32x2_t) __builtin_neon_vset_lane_f32(__s0, (float32x2_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x2_t __ret; \ - int32x2_t __s0 = __p0; \ + int32_t __s0 = __p0; \ int32x2_t __s1 = __p1; \ - __ret = (int32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ + __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \ __ret; \ }) #else -#define vsri_n_s32(__p0, __p1, __p2) __extension__ ({ \ +#define vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \ int32x2_t __ret; \ - int32x2_t __s0 = __p0; \ + int32_t __s0 = __p0; \ int32x2_t __s1 = __p1; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __ret = (int32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ + __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__rev1, __p2); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) +#define __noswap_vset_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + __ret = (int32x2_t) __builtin_neon_vset_lane_i32(__s0, (int32x2_t)__s1, __p2); \ + __ret; \ +}) #endif -#define vsri_n_s64(__p0, __p1, __p2) __extension__ ({ \ +#define vset_lane_s64(__p0, __p1, __p2) __extension__ ({ \ int64x1_t __ret; \ - int64x1_t __s0 = __p0; \ + int64_t __s0 = __p0; \ int64x1_t __s1 = __p1; \ - __ret = (int64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ + __ret = (int64x1_t) __builtin_neon_vset_lane_i64(__s0, (int64x1_t)__s1, __p2); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ -#define vsri_n_s16(__p0, __p1, __p2) __extension__ ({ \ +#define vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x4_t __ret; \ - int16x4_t __s0 = __p0; \ + int16_t __s0 = __p0; \ int16x4_t __s1 = __p1; \ - __ret = (int16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ + __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \ __ret; \ }) #else -#define vsri_n_s16(__p0, __p1, __p2) __extension__ ({ \ +#define vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \ int16x4_t __ret; \ - int16x4_t __s0 = __p0; \ + int16_t __s0 = __p0; \ int16x4_t __s1 = __p1; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (int16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ + __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__rev1, __p2); \ __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) +#define __noswap_vset_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + __ret = (int16x4_t) __builtin_neon_vset_lane_i16(__s0, (int16x4_t)__s1, __p2); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1_p8(__p0, __p1) __extension__ ({ \ - poly8x8_t __s1 = __p1; \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 4); \ -}) +__ai __attribute__((target("neon"))) uint8x16_t vshlq_u8(uint8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} #else -#define vst1_p8(__p0, __p1) __extension__ ({ \ - poly8x8_t __s1 = __p1; \ - poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 4); \ -}) +__ai __attribute__((target("neon"))) uint8x16_t vshlq_u8(uint8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vst1_p16(__p0, __p1) __extension__ ({ \ - poly16x4_t __s1 = __p1; \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 5); \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vshlq_u32(uint32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} #else -#define vst1_p16(__p0, __p1) __extension__ ({ \ - poly16x4_t __s1 = __p1; \ - poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 5); \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vshlq_u32(uint32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_p8(__p0, __p1) __extension__ ({ \ - poly8x16_t __s1 = __p1; \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 36); \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vshlq_u64(uint64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} #else -#define vst1q_p8(__p0, __p1) __extension__ ({ \ - poly8x16_t __s1 = __p1; \ - poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 36); \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vshlq_u64(uint64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_p16(__p0, __p1) __extension__ ({ \ - poly16x8_t __s1 = __p1; \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 37); \ -}) +__ai __attribute__((target("neon"))) uint16x8_t vshlq_u16(uint16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} #else -#define vst1q_p16(__p0, __p1) __extension__ ({ \ - poly16x8_t __s1 = __p1; \ - poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 37); \ -}) +__ai __attribute__((target("neon"))) uint16x8_t vshlq_u16(uint16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_u8(__p0, __p1) __extension__ ({ \ - uint8x16_t __s1 = __p1; \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 48); \ -}) +__ai __attribute__((target("neon"))) int8x16_t vshlq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} #else -#define vst1q_u8(__p0, __p1) __extension__ ({ \ - uint8x16_t __s1 = __p1; \ - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 48); \ -}) +__ai __attribute__((target("neon"))) int8x16_t vshlq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_u32(__p0, __p1) __extension__ ({ \ - uint32x4_t __s1 = __p1; \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 50); \ -}) +__ai __attribute__((target("neon"))) int32x4_t vshlq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} #else -#define vst1q_u32(__p0, __p1) __extension__ ({ \ - uint32x4_t __s1 = __p1; \ - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 50); \ -}) +__ai __attribute__((target("neon"))) int32x4_t vshlq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_u64(__p0, __p1) __extension__ ({ \ - uint64x2_t __s1 = __p1; \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 51); \ -}) +__ai __attribute__((target("neon"))) int64x2_t vshlq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); + return __ret; +} #else -#define vst1q_u64(__p0, __p1) __extension__ ({ \ - uint64x2_t __s1 = __p1; \ - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 51); \ -}) +__ai __attribute__((target("neon"))) int64x2_t vshlq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int64x2_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_u16(__p0, __p1) __extension__ ({ \ - uint16x8_t __s1 = __p1; \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 49); \ -}) +__ai __attribute__((target("neon"))) int16x8_t vshlq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vshlq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} #else -#define vst1q_u16(__p0, __p1) __extension__ ({ \ - uint16x8_t __s1 = __p1; \ - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 49); \ -}) +__ai __attribute__((target("neon"))) int16x8_t vshlq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vshlq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_s8(__p0, __p1) __extension__ ({ \ - int8x16_t __s1 = __p1; \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 32); \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vshl_u8(uint8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} #else -#define vst1q_s8(__p0, __p1) __extension__ ({ \ - int8x16_t __s1 = __p1; \ - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 32); \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vshl_u8(uint8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_f32(__p0, __p1) __extension__ ({ \ - float32x4_t __s1 = __p1; \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 41); \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vshl_u32(uint32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} #else -#define vst1q_f32(__p0, __p1) __extension__ ({ \ - float32x4_t __s1 = __p1; \ - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 41); \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vshl_u32(uint32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif +__ai __attribute__((target("neon"))) uint64x1_t vshl_u64(uint64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} #ifdef __LITTLE_ENDIAN__ -#define vst1q_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __s1 = __p1; \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 34); \ -}) +__ai __attribute__((target("neon"))) uint16x4_t vshl_u16(uint16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} #else -#define vst1q_s32(__p0, __p1) __extension__ ({ \ - int32x4_t __s1 = __p1; \ - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 34); \ -}) +__ai __attribute__((target("neon"))) uint16x4_t vshl_u16(uint16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __s1 = __p1; \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 35); \ -}) +__ai __attribute__((target("neon"))) int8x8_t vshl_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} #else -#define vst1q_s64(__p0, __p1) __extension__ ({ \ - int64x2_t __s1 = __p1; \ - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 35); \ -}) +__ai __attribute__((target("neon"))) int8x8_t vshl_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __s1 = __p1; \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 33); \ -}) +__ai __attribute__((target("neon"))) int32x2_t vshl_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} #else -#define vst1q_s16(__p0, __p1) __extension__ ({ \ - int16x8_t __s1 = __p1; \ - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 33); \ -}) +__ai __attribute__((target("neon"))) int32x2_t vshl_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif +__ai __attribute__((target("neon"))) int64x1_t vshl_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 3); + return __ret; +} #ifdef __LITTLE_ENDIAN__ -#define vst1_u8(__p0, __p1) __extension__ ({ \ - uint8x8_t __s1 = __p1; \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 16); \ -}) +__ai __attribute__((target("neon"))) int16x4_t vshl_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vshl_v((int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} #else -#define vst1_u8(__p0, __p1) __extension__ ({ \ - uint8x8_t __s1 = __p1; \ - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 16); \ -}) +__ai __attribute__((target("neon"))) int16x4_t vshl_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vshl_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vst1_u32(__p0, __p1) __extension__ ({ \ - uint32x2_t __s1 = __p1; \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 18); \ +#define vshlq_n_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + __ret = (uint8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 48); \ + __ret; \ }) #else -#define vst1_u32(__p0, __p1) __extension__ ({ \ - uint32x2_t __s1 = __p1; \ - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 18); \ +#define vshlq_n_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif -#define vst1_u64(__p0, __p1) __extension__ ({ \ - uint64x1_t __s1 = __p1; \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 19); \ -}) #ifdef __LITTLE_ENDIAN__ -#define vst1_u16(__p0, __p1) __extension__ ({ \ - uint16x4_t __s1 = __p1; \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 17); \ +#define vshlq_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 50); \ + __ret; \ }) #else -#define vst1_u16(__p0, __p1) __extension__ ({ \ - uint16x4_t __s1 = __p1; \ - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 17); \ +#define vshlq_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1_s8(__p0, __p1) __extension__ ({ \ - int8x8_t __s1 = __p1; \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 0); \ +#define vshlq_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 51); \ + __ret; \ }) #else -#define vst1_s8(__p0, __p1) __extension__ ({ \ - int8x8_t __s1 = __p1; \ - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 0); \ +#define vshlq_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1_f32(__p0, __p1) __extension__ ({ \ - float32x2_t __s1 = __p1; \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 9); \ +#define vshlq_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 49); \ + __ret; \ }) #else -#define vst1_f32(__p0, __p1) __extension__ ({ \ - float32x2_t __s1 = __p1; \ - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 9); \ +#define vshlq_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1_s32(__p0, __p1) __extension__ ({ \ - int32x2_t __s1 = __p1; \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 2); \ +#define vshlq_n_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + __ret = (int8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 32); \ + __ret; \ }) #else -#define vst1_s32(__p0, __p1) __extension__ ({ \ - int32x2_t __s1 = __p1; \ - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 2); \ -}) -#endif - -#define vst1_s64(__p0, __p1) __extension__ ({ \ - int64x1_t __s1 = __p1; \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 3); \ +#define vshlq_n_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x16_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) +#endif + #ifdef __LITTLE_ENDIAN__ -#define vst1_s16(__p0, __p1) __extension__ ({ \ - int16x4_t __s1 = __p1; \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 1); \ +#define vshlq_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 34); \ + __ret; \ }) #else -#define vst1_s16(__p0, __p1) __extension__ ({ \ - int16x4_t __s1 = __p1; \ - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 1); \ +#define vshlq_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x8_t __s1 = __p1; \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 4); \ +#define vshlq_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 35); \ + __ret; \ }) #else -#define vst1_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x8_t __s1 = __p1; \ - poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 4); \ +#define vshlq_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int64x2_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x4_t __s1 = __p1; \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 5); \ +#define vshlq_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__s0, __p1, 33); \ + __ret; \ }) #else -#define vst1_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x4_t __s1 = __p1; \ - poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 5); \ +#define vshlq_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vshlq_n_v((int8x16_t)__rev0, __p1, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x16_t __s1 = __p1; \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 36); \ +#define vshl_n_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 16); \ + __ret; \ }) #else -#define vst1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x16_t __s1 = __p1; \ - poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 36); \ +#define vshl_n_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x8_t __s1 = __p1; \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 37); \ +#define vshl_n_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 18); \ + __ret; \ }) #else -#define vst1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x8_t __s1 = __p1; \ - poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 37); \ +#define vshl_n_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ }) #endif +#define vshl_n_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __ret; \ + uint64x1_t __s0 = __p0; \ + __ret = (uint64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -#define vst1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16_t __s1 = __p1; \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 48); \ +#define vshl_n_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 17); \ + __ret; \ }) #else -#define vst1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16_t __s1 = __p1; \ - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 48); \ +#define vshl_n_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4_t __s1 = __p1; \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 50); \ +#define vshl_n_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + __ret = (int8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 0); \ + __ret; \ }) #else -#define vst1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4_t __s1 = __p1; \ - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 50); \ +#define vshl_n_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x8_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2_t __s1 = __p1; \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 51); \ +#define vshl_n_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + __ret = (int32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 2); \ + __ret; \ }) #else -#define vst1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2_t __s1 = __p1; \ - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 51); \ +#define vshl_n_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ }) #endif +#define vshl_n_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __ret; \ + int64x1_t __s0 = __p0; \ + __ret = (int64x1_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 3); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -#define vst1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8_t __s1 = __p1; \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 49); \ +#define vshl_n_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + __ret = (int16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__s0, __p1, 1); \ + __ret; \ }) #else -#define vst1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8_t __s1 = __p1; \ - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 49); \ +#define vshl_n_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vshl_n_v((int8x8_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16_t __s1 = __p1; \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 32); \ +#define vshll_n_u8(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 49); \ + __ret; \ }) #else -#define vst1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16_t __s1 = __p1; \ - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 32); \ +#define vshll_n_u8(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vshll_n_u8(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + __ret = (uint16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 49); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x4_t __s1 = __p1; \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 41); \ +#define vshll_n_u32(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 51); \ + __ret; \ }) #else -#define vst1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x4_t __s1 = __p1; \ - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 41); \ +#define vshll_n_u32(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vshll_n_u32(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + __ret = (uint64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 51); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __s1 = __p1; \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 34); \ +#define vshll_n_u16(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 50); \ + __ret; \ }) #else -#define vst1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __s1 = __p1; \ - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 34); \ +#define vshll_n_u16(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vshll_n_u16(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + __ret = (uint32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 50); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2_t __s1 = __p1; \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 35); \ +#define vshll_n_s8(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + int8x8_t __s0 = __p0; \ + __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 33); \ + __ret; \ }) #else -#define vst1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2_t __s1 = __p1; \ - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 35); \ +#define vshll_n_s8(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vshll_n_s8(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + int8x8_t __s0 = __p0; \ + __ret = (int16x8_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 33); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __s1 = __p1; \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 33); \ +#define vshll_n_s32(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + int32x2_t __s0 = __p0; \ + __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 35); \ + __ret; \ }) #else -#define vst1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __s1 = __p1; \ - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 33); \ +#define vshll_n_s32(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vshll_n_s32(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + int32x2_t __s0 = __p0; \ + __ret = (int64x2_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 35); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x8_t __s1 = __p1; \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 16); \ +#define vshll_n_s16(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + int16x4_t __s0 = __p0; \ + __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 34); \ + __ret; \ }) #else -#define vst1_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x8_t __s1 = __p1; \ - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 16); \ +#define vshll_n_s16(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__rev0, __p1, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vshll_n_s16(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + int16x4_t __s0 = __p0; \ + __ret = (int32x4_t) __builtin_neon_vshll_n_v((int8x8_t)__s0, __p1, 34); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2_t __s1 = __p1; \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 18); \ +#define vshrq_n_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + __ret = (uint8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 48); \ + __ret; \ }) #else -#define vst1_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2_t __s1 = __p1; \ - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 18); \ +#define vshrq_n_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif -#define vst1_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x1_t __s1 = __p1; \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \ -}) #ifdef __LITTLE_ENDIAN__ -#define vst1_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4_t __s1 = __p1; \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 17); \ +#define vshrq_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 50); \ + __ret; \ }) #else -#define vst1_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4_t __s1 = __p1; \ - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 17); \ +#define vshrq_n_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x8_t __s1 = __p1; \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 0); \ +#define vshrq_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 51); \ + __ret; \ }) #else -#define vst1_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x8_t __s1 = __p1; \ - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 0); \ +#define vshrq_n_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x2_t __s1 = __p1; \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 9); \ +#define vshrq_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 49); \ + __ret; \ }) #else -#define vst1_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x2_t __s1 = __p1; \ - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 9); \ +#define vshrq_n_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __s1 = __p1; \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 2); \ +#define vshrq_n_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + __ret = (int8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 32); \ + __ret; \ }) #else -#define vst1_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __s1 = __p1; \ - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 2); \ +#define vshrq_n_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x16_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif -#define vst1_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x1_t __s1 = __p1; \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \ -}) #ifdef __LITTLE_ENDIAN__ -#define vst1_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __s1 = __p1; \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 1); \ +#define vshrq_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 34); \ + __ret; \ }) #else -#define vst1_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __s1 = __p1; \ - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 1); \ +#define vshrq_n_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1_p8_x2(__p0, __p1) __extension__ ({ \ - poly8x8x2_t __s1 = __p1; \ - __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 4); \ +#define vshrq_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 35); \ + __ret; \ }) #else -#define vst1_p8_x2(__p0, __p1) __extension__ ({ \ - poly8x8x2_t __s1 = __p1; \ - poly8x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 4); \ +#define vshrq_n_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int64x2_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1_p16_x2(__p0, __p1) __extension__ ({ \ - poly16x4x2_t __s1 = __p1; \ - __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 5); \ +#define vshrq_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__s0, __p1, 33); \ + __ret; \ }) #else -#define vst1_p16_x2(__p0, __p1) __extension__ ({ \ - poly16x4x2_t __s1 = __p1; \ - poly16x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 5); \ +#define vshrq_n_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vshrq_n_v((int8x16_t)__rev0, __p1, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_p8_x2(__p0, __p1) __extension__ ({ \ - poly8x16x2_t __s1 = __p1; \ - __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 36); \ -}) -#else -#define vst1q_p8_x2(__p0, __p1) __extension__ ({ \ - poly8x16x2_t __s1 = __p1; \ - poly8x16x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 36); \ +#define vshr_n_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 16); \ + __ret; \ +}) +#else +#define vshr_n_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_p16_x2(__p0, __p1) __extension__ ({ \ - poly16x8x2_t __s1 = __p1; \ - __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 37); \ +#define vshr_n_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 18); \ + __ret; \ }) #else -#define vst1q_p16_x2(__p0, __p1) __extension__ ({ \ - poly16x8x2_t __s1 = __p1; \ - poly16x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 37); \ +#define vshr_n_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ }) #endif +#define vshr_n_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __ret; \ + uint64x1_t __s0 = __p0; \ + __ret = (uint64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 19); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -#define vst1q_u8_x2(__p0, __p1) __extension__ ({ \ - uint8x16x2_t __s1 = __p1; \ - __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 48); \ +#define vshr_n_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 17); \ + __ret; \ }) #else -#define vst1q_u8_x2(__p0, __p1) __extension__ ({ \ - uint8x16x2_t __s1 = __p1; \ - uint8x16x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 48); \ +#define vshr_n_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_u32_x2(__p0, __p1) __extension__ ({ \ - uint32x4x2_t __s1 = __p1; \ - __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 50); \ +#define vshr_n_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + __ret = (int8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 0); \ + __ret; \ }) #else -#define vst1q_u32_x2(__p0, __p1) __extension__ ({ \ - uint32x4x2_t __s1 = __p1; \ - uint32x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 50); \ +#define vshr_n_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x8_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_u64_x2(__p0, __p1) __extension__ ({ \ - uint64x2x2_t __s1 = __p1; \ - __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 51); \ +#define vshr_n_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + __ret = (int32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 2); \ + __ret; \ }) #else -#define vst1q_u64_x2(__p0, __p1) __extension__ ({ \ - uint64x2x2_t __s1 = __p1; \ - uint64x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 51); \ +#define vshr_n_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ }) #endif +#define vshr_n_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __ret; \ + int64x1_t __s0 = __p0; \ + __ret = (int64x1_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 3); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -#define vst1q_u16_x2(__p0, __p1) __extension__ ({ \ - uint16x8x2_t __s1 = __p1; \ - __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 49); \ +#define vshr_n_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + __ret = (int16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__s0, __p1, 1); \ + __ret; \ }) #else -#define vst1q_u16_x2(__p0, __p1) __extension__ ({ \ - uint16x8x2_t __s1 = __p1; \ - uint16x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 49); \ +#define vshr_n_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vshr_n_v((int8x8_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_s8_x2(__p0, __p1) __extension__ ({ \ - int8x16x2_t __s1 = __p1; \ - __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 32); \ +#define vshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ }) #else -#define vst1q_s8_x2(__p0, __p1) __extension__ ({ \ - int8x16x2_t __s1 = __p1; \ - int8x16x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 32); \ +#define vshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vshrn_n_u32(__p0, __p1) __extension__ ({ \ + uint16x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 17); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_f32_x2(__p0, __p1) __extension__ ({ \ - float32x4x2_t __s1 = __p1; \ - __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 41); \ +#define vshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ }) #else -#define vst1q_f32_x2(__p0, __p1) __extension__ ({ \ - float32x4x2_t __s1 = __p1; \ - float32x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 41); \ +#define vshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vshrn_n_u64(__p0, __p1) __extension__ ({ \ + uint32x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 18); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_s32_x2(__p0, __p1) __extension__ ({ \ - int32x4x2_t __s1 = __p1; \ - __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 34); \ +#define vshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ }) #else -#define vst1q_s32_x2(__p0, __p1) __extension__ ({ \ - int32x4x2_t __s1 = __p1; \ - int32x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 34); \ +#define vshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vshrn_n_u16(__p0, __p1) __extension__ ({ \ + uint8x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 16); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_s64_x2(__p0, __p1) __extension__ ({ \ - int64x2x2_t __s1 = __p1; \ - __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 35); \ +#define vshrn_n_s32(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 1); \ + __ret; \ }) #else -#define vst1q_s64_x2(__p0, __p1) __extension__ ({ \ - int64x2x2_t __s1 = __p1; \ - int64x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 35); \ +#define vshrn_n_s32(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vshrn_n_s32(__p0, __p1) __extension__ ({ \ + int16x4_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int16x4_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 1); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_s16_x2(__p0, __p1) __extension__ ({ \ - int16x8x2_t __s1 = __p1; \ - __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 33); \ +#define vshrn_n_s64(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 2); \ + __ret; \ }) #else -#define vst1q_s16_x2(__p0, __p1) __extension__ ({ \ - int16x8x2_t __s1 = __p1; \ - int16x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 33); \ +#define vshrn_n_s64(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vshrn_n_s64(__p0, __p1) __extension__ ({ \ + int32x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int32x2_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 2); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1_u8_x2(__p0, __p1) __extension__ ({ \ - uint8x8x2_t __s1 = __p1; \ - __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 16); \ +#define vshrn_n_s16(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 0); \ + __ret; \ }) #else -#define vst1_u8_x2(__p0, __p1) __extension__ ({ \ - uint8x8x2_t __s1 = __p1; \ - uint8x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 16); \ +#define vshrn_n_s16(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__rev0, __p1, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vshrn_n_s16(__p0, __p1) __extension__ ({ \ + int8x8_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int8x8_t) __builtin_neon_vshrn_n_v((int8x16_t)__s0, __p1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1_u32_x2(__p0, __p1) __extension__ ({ \ - uint32x2x2_t __s1 = __p1; \ - __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 18); \ +#define vsli_n_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __ret; \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __s1 = __p1; \ + __ret = (poly8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \ + __ret; \ }) #else -#define vst1_u32_x2(__p0, __p1) __extension__ ({ \ - uint32x2x2_t __s1 = __p1; \ - uint32x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 18); \ +#define vsli_n_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __ret; \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __s1 = __p1; \ + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif -#define vst1_u64_x2(__p0, __p1) __extension__ ({ \ - uint64x1x2_t __s1 = __p1; \ - __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \ -}) #ifdef __LITTLE_ENDIAN__ -#define vst1_u16_x2(__p0, __p1) __extension__ ({ \ - uint16x4x2_t __s1 = __p1; \ - __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 17); \ +#define vsli_n_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __ret; \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __s1 = __p1; \ + __ret = (poly16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \ + __ret; \ }) #else -#define vst1_u16_x2(__p0, __p1) __extension__ ({ \ - uint16x4x2_t __s1 = __p1; \ - uint16x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 17); \ +#define vsli_n_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __ret; \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __s1 = __p1; \ + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (poly16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1_s8_x2(__p0, __p1) __extension__ ({ \ - int8x8x2_t __s1 = __p1; \ - __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 0); \ +#define vsliq_n_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __ret; \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __s1 = __p1; \ + __ret = (poly8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \ + __ret; \ }) #else -#define vst1_s8_x2(__p0, __p1) __extension__ ({ \ - int8x8x2_t __s1 = __p1; \ - int8x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 0); \ +#define vsliq_n_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __ret; \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __s1 = __p1; \ + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1_f32_x2(__p0, __p1) __extension__ ({ \ - float32x2x2_t __s1 = __p1; \ - __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 9); \ +#define vsliq_n_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __ret; \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __s1 = __p1; \ + __ret = (poly16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \ + __ret; \ }) #else -#define vst1_f32_x2(__p0, __p1) __extension__ ({ \ - float32x2x2_t __s1 = __p1; \ - float32x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 9); \ +#define vsliq_n_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __ret; \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __s1 = __p1; \ + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1_s32_x2(__p0, __p1) __extension__ ({ \ - int32x2x2_t __s1 = __p1; \ - __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 2); \ +#define vsliq_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + __ret = (uint8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \ + __ret; \ }) #else -#define vst1_s32_x2(__p0, __p1) __extension__ ({ \ - int32x2x2_t __s1 = __p1; \ - int32x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 2); \ +#define vsliq_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif -#define vst1_s64_x2(__p0, __p1) __extension__ ({ \ - int64x1x2_t __s1 = __p1; \ - __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 3); \ -}) #ifdef __LITTLE_ENDIAN__ -#define vst1_s16_x2(__p0, __p1) __extension__ ({ \ - int16x4x2_t __s1 = __p1; \ - __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 1); \ +#define vsliq_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + __ret = (uint32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \ + __ret; \ }) #else -#define vst1_s16_x2(__p0, __p1) __extension__ ({ \ - int16x4x2_t __s1 = __p1; \ - int16x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 1); \ +#define vsliq_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1_p8_x3(__p0, __p1) __extension__ ({ \ - poly8x8x3_t __s1 = __p1; \ - __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 4); \ +#define vsliq_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + __ret = (uint64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \ + __ret; \ }) #else -#define vst1_p8_x3(__p0, __p1) __extension__ ({ \ - poly8x8x3_t __s1 = __p1; \ - poly8x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 4); \ +#define vsliq_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (uint64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1_p16_x3(__p0, __p1) __extension__ ({ \ - poly16x4x3_t __s1 = __p1; \ - __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 5); \ +#define vsliq_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + __ret = (uint16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \ + __ret; \ }) #else -#define vst1_p16_x3(__p0, __p1) __extension__ ({ \ - poly16x4x3_t __s1 = __p1; \ - poly16x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 5); \ +#define vsliq_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_p8_x3(__p0, __p1) __extension__ ({ \ - poly8x16x3_t __s1 = __p1; \ - __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 36); \ +#define vsliq_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + __ret = (int8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \ + __ret; \ }) #else -#define vst1q_p8_x3(__p0, __p1) __extension__ ({ \ - poly8x16x3_t __s1 = __p1; \ - poly8x16x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 36); \ +#define vsliq_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x16_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_p16_x3(__p0, __p1) __extension__ ({ \ - poly16x8x3_t __s1 = __p1; \ - __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 37); \ +#define vsliq_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + __ret = (int32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ + __ret; \ }) #else -#define vst1q_p16_x3(__p0, __p1) __extension__ ({ \ - poly16x8x3_t __s1 = __p1; \ - poly16x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 37); \ +#define vsliq_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_u8_x3(__p0, __p1) __extension__ ({ \ - uint8x16x3_t __s1 = __p1; \ - __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 48); \ +#define vsliq_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + __ret = (int64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \ + __ret; \ }) #else -#define vst1q_u8_x3(__p0, __p1) __extension__ ({ \ - uint8x16x3_t __s1 = __p1; \ - uint8x16x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 48); \ +#define vsliq_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_u32_x3(__p0, __p1) __extension__ ({ \ - uint32x4x3_t __s1 = __p1; \ - __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 50); \ +#define vsliq_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + __ret = (int16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ + __ret; \ }) #else -#define vst1q_u32_x3(__p0, __p1) __extension__ ({ \ - uint32x4x3_t __s1 = __p1; \ - uint32x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 50); \ +#define vsliq_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_u64_x3(__p0, __p1) __extension__ ({ \ - uint64x2x3_t __s1 = __p1; \ - __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 51); \ +#define vsli_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + __ret = (uint8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \ + __ret; \ }) #else -#define vst1q_u64_x3(__p0, __p1) __extension__ ({ \ - uint64x2x3_t __s1 = __p1; \ - uint64x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 51); \ +#define vsli_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_u16_x3(__p0, __p1) __extension__ ({ \ - uint16x8x3_t __s1 = __p1; \ - __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 49); \ +#define vsli_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + __ret = (uint32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \ + __ret; \ }) #else -#define vst1q_u16_x3(__p0, __p1) __extension__ ({ \ - uint16x8x3_t __s1 = __p1; \ - uint16x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 49); \ +#define vsli_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ }) #endif -#ifdef __LITTLE_ENDIAN__ -#define vst1q_s8_x3(__p0, __p1) __extension__ ({ \ - int8x16x3_t __s1 = __p1; \ - __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 32); \ -}) -#else -#define vst1q_s8_x3(__p0, __p1) __extension__ ({ \ - int8x16x3_t __s1 = __p1; \ - int8x16x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 32); \ +#define vsli_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1_t __ret; \ + uint64x1_t __s0 = __p0; \ + uint64x1_t __s1 = __p1; \ + __ret = (uint64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ + __ret; \ }) -#endif - #ifdef __LITTLE_ENDIAN__ -#define vst1q_f32_x3(__p0, __p1) __extension__ ({ \ - float32x4x3_t __s1 = __p1; \ - __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 41); \ +#define vsli_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + __ret = (uint16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \ + __ret; \ }) #else -#define vst1q_f32_x3(__p0, __p1) __extension__ ({ \ - float32x4x3_t __s1 = __p1; \ - float32x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 41); \ +#define vsli_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_s32_x3(__p0, __p1) __extension__ ({ \ - int32x4x3_t __s1 = __p1; \ - __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 34); \ +#define vsli_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + __ret = (int8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \ + __ret; \ }) #else -#define vst1q_s32_x3(__p0, __p1) __extension__ ({ \ - int32x4x3_t __s1 = __p1; \ - int32x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 34); \ +#define vsli_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x8_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_s64_x3(__p0, __p1) __extension__ ({ \ - int64x2x3_t __s1 = __p1; \ - __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 35); \ +#define vsli_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + __ret = (int32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ + __ret; \ }) #else -#define vst1q_s64_x3(__p0, __p1) __extension__ ({ \ - int64x2x3_t __s1 = __p1; \ - int64x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 35); \ +#define vsli_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ }) #endif +#define vsli_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1_t __ret; \ + int64x1_t __s0 = __p0; \ + int64x1_t __s1 = __p1; \ + __ret = (int64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -#define vst1q_s16_x3(__p0, __p1) __extension__ ({ \ - int16x8x3_t __s1 = __p1; \ - __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 33); \ +#define vsli_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + __ret = (int16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ + __ret; \ }) #else -#define vst1q_s16_x3(__p0, __p1) __extension__ ({ \ - int16x8x3_t __s1 = __p1; \ - int16x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 33); \ +#define vsli_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vsli_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1_u8_x3(__p0, __p1) __extension__ ({ \ - uint8x8x3_t __s1 = __p1; \ - __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 16); \ +#define vsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + __ret = (uint8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \ + __ret; \ }) #else -#define vst1_u8_x3(__p0, __p1) __extension__ ({ \ - uint8x8x3_t __s1 = __p1; \ - uint8x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 16); \ +#define vsraq_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1_u32_x3(__p0, __p1) __extension__ ({ \ - uint32x2x3_t __s1 = __p1; \ - __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 18); \ +#define vsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + __ret = (uint32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \ + __ret; \ }) #else -#define vst1_u32_x3(__p0, __p1) __extension__ ({ \ - uint32x2x3_t __s1 = __p1; \ - uint32x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 18); \ +#define vsraq_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ }) #endif -#define vst1_u64_x3(__p0, __p1) __extension__ ({ \ - uint64x1x3_t __s1 = __p1; \ - __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \ -}) #ifdef __LITTLE_ENDIAN__ -#define vst1_u16_x3(__p0, __p1) __extension__ ({ \ - uint16x4x3_t __s1 = __p1; \ - __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 17); \ +#define vsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + __ret = (uint64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \ + __ret; \ }) #else -#define vst1_u16_x3(__p0, __p1) __extension__ ({ \ - uint16x4x3_t __s1 = __p1; \ - uint16x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 17); \ +#define vsraq_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (uint64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1_s8_x3(__p0, __p1) __extension__ ({ \ - int8x8x3_t __s1 = __p1; \ - __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 0); \ +#define vsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + __ret = (uint16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \ + __ret; \ }) #else -#define vst1_s8_x3(__p0, __p1) __extension__ ({ \ - int8x8x3_t __s1 = __p1; \ - int8x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 0); \ +#define vsraq_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1_f32_x3(__p0, __p1) __extension__ ({ \ - float32x2x3_t __s1 = __p1; \ - __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 9); \ +#define vsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + __ret = (int8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \ + __ret; \ }) #else -#define vst1_f32_x3(__p0, __p1) __extension__ ({ \ - float32x2x3_t __s1 = __p1; \ - float32x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 9); \ +#define vsraq_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x16_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1_s32_x3(__p0, __p1) __extension__ ({ \ - int32x2x3_t __s1 = __p1; \ - __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 2); \ +#define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + __ret = (int32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ + __ret; \ }) #else -#define vst1_s32_x3(__p0, __p1) __extension__ ({ \ - int32x2x3_t __s1 = __p1; \ - int32x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 2); \ +#define vsraq_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ }) #endif -#define vst1_s64_x3(__p0, __p1) __extension__ ({ \ - int64x1x3_t __s1 = __p1; \ - __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 3); \ -}) #ifdef __LITTLE_ENDIAN__ -#define vst1_s16_x3(__p0, __p1) __extension__ ({ \ - int16x4x3_t __s1 = __p1; \ - __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 1); \ +#define vsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + __ret = (int64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \ + __ret; \ }) #else -#define vst1_s16_x3(__p0, __p1) __extension__ ({ \ - int16x4x3_t __s1 = __p1; \ - int16x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 1); \ +#define vsraq_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int64x2_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1_p8_x4(__p0, __p1) __extension__ ({ \ - poly8x8x4_t __s1 = __p1; \ - __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 4); \ +#define vsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + __ret = (int16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ + __ret; \ }) #else -#define vst1_p8_x4(__p0, __p1) __extension__ ({ \ - poly8x8x4_t __s1 = __p1; \ - poly8x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 4); \ +#define vsraq_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vsraq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1_p16_x4(__p0, __p1) __extension__ ({ \ - poly16x4x4_t __s1 = __p1; \ - __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 5); \ +#define vsra_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + __ret = (uint8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \ + __ret; \ }) #else -#define vst1_p16_x4(__p0, __p1) __extension__ ({ \ - poly16x4x4_t __s1 = __p1; \ - poly16x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 5); \ +#define vsra_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_p8_x4(__p0, __p1) __extension__ ({ \ - poly8x16x4_t __s1 = __p1; \ - __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 36); \ +#define vsra_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + __ret = (uint32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \ + __ret; \ }) #else -#define vst1q_p8_x4(__p0, __p1) __extension__ ({ \ - poly8x16x4_t __s1 = __p1; \ - poly8x16x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 36); \ +#define vsra_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ }) #endif +#define vsra_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1_t __ret; \ + uint64x1_t __s0 = __p0; \ + uint64x1_t __s1 = __p1; \ + __ret = (uint64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -#define vst1q_p16_x4(__p0, __p1) __extension__ ({ \ - poly16x8x4_t __s1 = __p1; \ - __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 37); \ +#define vsra_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + __ret = (uint16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \ + __ret; \ }) #else -#define vst1q_p16_x4(__p0, __p1) __extension__ ({ \ - poly16x8x4_t __s1 = __p1; \ - poly16x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 37); \ +#define vsra_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_u8_x4(__p0, __p1) __extension__ ({ \ - uint8x16x4_t __s1 = __p1; \ - __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 48); \ +#define vsra_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + __ret = (int8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \ + __ret; \ }) #else -#define vst1q_u8_x4(__p0, __p1) __extension__ ({ \ - uint8x16x4_t __s1 = __p1; \ - uint8x16x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 48); \ +#define vsra_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x8_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_u32_x4(__p0, __p1) __extension__ ({ \ - uint32x4x4_t __s1 = __p1; \ - __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 50); \ +#define vsra_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + __ret = (int32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ + __ret; \ }) #else -#define vst1q_u32_x4(__p0, __p1) __extension__ ({ \ - uint32x4x4_t __s1 = __p1; \ - uint32x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 50); \ +#define vsra_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ }) #endif +#define vsra_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1_t __ret; \ + int64x1_t __s0 = __p0; \ + int64x1_t __s1 = __p1; \ + __ret = (int64x1_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -#define vst1q_u64_x4(__p0, __p1) __extension__ ({ \ - uint64x2x4_t __s1 = __p1; \ - __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 51); \ +#define vsra_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + __ret = (int16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ + __ret; \ }) #else -#define vst1q_u64_x4(__p0, __p1) __extension__ ({ \ - uint64x2x4_t __s1 = __p1; \ - uint64x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 51); \ +#define vsra_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vsra_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_u16_x4(__p0, __p1) __extension__ ({ \ - uint16x8x4_t __s1 = __p1; \ - __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 49); \ +#define vsri_n_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __ret; \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __s1 = __p1; \ + __ret = (poly8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 4); \ + __ret; \ }) #else -#define vst1q_u16_x4(__p0, __p1) __extension__ ({ \ - uint16x8x4_t __s1 = __p1; \ - uint16x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 49); \ +#define vsri_n_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __ret; \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __s1 = __p1; \ + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 4); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_s8_x4(__p0, __p1) __extension__ ({ \ - int8x16x4_t __s1 = __p1; \ - __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 32); \ +#define vsri_n_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __ret; \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __s1 = __p1; \ + __ret = (poly16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 5); \ + __ret; \ }) #else -#define vst1q_s8_x4(__p0, __p1) __extension__ ({ \ - int8x16x4_t __s1 = __p1; \ - int8x16x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 32); \ +#define vsri_n_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __ret; \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __s1 = __p1; \ + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (poly16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 5); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_f32_x4(__p0, __p1) __extension__ ({ \ - float32x4x4_t __s1 = __p1; \ - __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 41); \ +#define vsriq_n_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __ret; \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __s1 = __p1; \ + __ret = (poly8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 36); \ + __ret; \ }) #else -#define vst1q_f32_x4(__p0, __p1) __extension__ ({ \ - float32x4x4_t __s1 = __p1; \ - float32x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 41); \ +#define vsriq_n_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __ret; \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __s1 = __p1; \ + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 36); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_s32_x4(__p0, __p1) __extension__ ({ \ - int32x4x4_t __s1 = __p1; \ - __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 34); \ +#define vsriq_n_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __ret; \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __s1 = __p1; \ + __ret = (poly16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 37); \ + __ret; \ }) #else -#define vst1q_s32_x4(__p0, __p1) __extension__ ({ \ - int32x4x4_t __s1 = __p1; \ - int32x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 34); \ +#define vsriq_n_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __ret; \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __s1 = __p1; \ + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 37); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_s64_x4(__p0, __p1) __extension__ ({ \ - int64x2x4_t __s1 = __p1; \ - __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 35); \ +#define vsriq_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + __ret = (uint8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 48); \ + __ret; \ }) #else -#define vst1q_s64_x4(__p0, __p1) __extension__ ({ \ - int64x2x4_t __s1 = __p1; \ - int64x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 35); \ +#define vsriq_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 48); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_s16_x4(__p0, __p1) __extension__ ({ \ - int16x8x4_t __s1 = __p1; \ - __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 33); \ +#define vsriq_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + __ret = (uint32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 50); \ + __ret; \ }) #else -#define vst1q_s16_x4(__p0, __p1) __extension__ ({ \ - int16x8x4_t __s1 = __p1; \ - int16x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 33); \ +#define vsriq_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (uint32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 50); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1_u8_x4(__p0, __p1) __extension__ ({ \ - uint8x8x4_t __s1 = __p1; \ - __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 16); \ +#define vsriq_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + __ret = (uint64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 51); \ + __ret; \ }) #else -#define vst1_u8_x4(__p0, __p1) __extension__ ({ \ - uint8x8x4_t __s1 = __p1; \ - uint8x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 16); \ +#define vsriq_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (uint64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 51); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1_u32_x4(__p0, __p1) __extension__ ({ \ - uint32x2x4_t __s1 = __p1; \ - __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 18); \ +#define vsriq_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + __ret = (uint16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 49); \ + __ret; \ }) #else -#define vst1_u32_x4(__p0, __p1) __extension__ ({ \ - uint32x2x4_t __s1 = __p1; \ - uint32x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 18); \ +#define vsriq_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 49); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif -#define vst1_u64_x4(__p0, __p1) __extension__ ({ \ - uint64x1x4_t __s1 = __p1; \ - __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \ -}) #ifdef __LITTLE_ENDIAN__ -#define vst1_u16_x4(__p0, __p1) __extension__ ({ \ - uint16x4x4_t __s1 = __p1; \ - __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 17); \ +#define vsriq_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + __ret = (int8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 32); \ + __ret; \ }) #else -#define vst1_u16_x4(__p0, __p1) __extension__ ({ \ - uint16x4x4_t __s1 = __p1; \ - uint16x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 17); \ +#define vsriq_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __s1 = __p1; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x16_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 32); \ + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1_s8_x4(__p0, __p1) __extension__ ({ \ - int8x8x4_t __s1 = __p1; \ - __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 0); \ +#define vsriq_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + __ret = (int32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ + __ret; \ }) #else -#define vst1_s8_x4(__p0, __p1) __extension__ ({ \ - int8x8x4_t __s1 = __p1; \ - int8x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 0); \ +#define vsriq_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1_f32_x4(__p0, __p1) __extension__ ({ \ - float32x2x4_t __s1 = __p1; \ - __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 9); \ +#define vsriq_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + __ret = (int64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 35); \ + __ret; \ }) #else -#define vst1_f32_x4(__p0, __p1) __extension__ ({ \ - float32x2x4_t __s1 = __p1; \ - float32x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 9); \ +#define vsriq_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __s1 = __p1; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst1_s32_x4(__p0, __p1) __extension__ ({ \ - int32x2x4_t __s1 = __p1; \ - __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 2); \ +#define vsriq_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + __ret = (int16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ + __ret; \ }) #else -#define vst1_s32_x4(__p0, __p1) __extension__ ({ \ - int32x2x4_t __s1 = __p1; \ - int32x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 2); \ +#define vsriq_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif -#define vst1_s64_x4(__p0, __p1) __extension__ ({ \ - int64x1x4_t __s1 = __p1; \ - __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 3); \ -}) #ifdef __LITTLE_ENDIAN__ -#define vst1_s16_x4(__p0, __p1) __extension__ ({ \ - int16x4x4_t __s1 = __p1; \ - __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 1); \ +#define vsri_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + __ret = (uint8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 16); \ + __ret; \ }) #else -#define vst1_s16_x4(__p0, __p1) __extension__ ({ \ - int16x4x4_t __s1 = __p1; \ - int16x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 1); \ +#define vsri_n_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 16); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst2_p8(__p0, __p1) __extension__ ({ \ - poly8x8x2_t __s1 = __p1; \ - __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 4); \ +#define vsri_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + __ret = (uint32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 18); \ + __ret; \ }) #else -#define vst2_p8(__p0, __p1) __extension__ ({ \ - poly8x8x2_t __s1 = __p1; \ - poly8x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 4); \ +#define vsri_n_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (uint32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 18); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ }) #endif +#define vsri_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1_t __ret; \ + uint64x1_t __s0 = __p0; \ + uint64x1_t __s1 = __p1; \ + __ret = (uint64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 19); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -#define vst2_p16(__p0, __p1) __extension__ ({ \ - poly16x4x2_t __s1 = __p1; \ - __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 5); \ +#define vsri_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + __ret = (uint16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 17); \ + __ret; \ }) #else -#define vst2_p16(__p0, __p1) __extension__ ({ \ - poly16x4x2_t __s1 = __p1; \ - poly16x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 5); \ +#define vsri_n_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (uint16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 17); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst2q_p8(__p0, __p1) __extension__ ({ \ - poly8x16x2_t __s1 = __p1; \ - __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 36); \ +#define vsri_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + __ret = (int8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 0); \ + __ret; \ }) #else -#define vst2q_p8(__p0, __p1) __extension__ ({ \ - poly8x16x2_t __s1 = __p1; \ - poly8x16x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 36); \ +#define vsri_n_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __s1 = __p1; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8x8_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 0); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst2q_p16(__p0, __p1) __extension__ ({ \ - poly16x8x2_t __s1 = __p1; \ - __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 37); \ +#define vsri_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + __ret = (int32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ + __ret; \ }) #else -#define vst2q_p16(__p0, __p1) __extension__ ({ \ - poly16x8x2_t __s1 = __p1; \ - poly16x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 37); \ +#define vsri_n_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ }) #endif +#define vsri_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1_t __ret; \ + int64x1_t __s0 = __p0; \ + int64x1_t __s1 = __p1; \ + __ret = (int64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 3); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -#define vst2q_u8(__p0, __p1) __extension__ ({ \ - uint8x16x2_t __s1 = __p1; \ - __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 48); \ +#define vsri_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + __ret = (int16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ + __ret; \ }) #else -#define vst2q_u8(__p0, __p1) __extension__ ({ \ - uint8x16x2_t __s1 = __p1; \ - uint8x16x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 48); \ +#define vsri_n_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vsri_n_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst2q_u32(__p0, __p1) __extension__ ({ \ - uint32x4x2_t __s1 = __p1; \ - __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 50); \ +#define vst1_p8(__p0, __p1) __extension__ ({ \ + poly8x8_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 4); \ }) #else -#define vst2q_u32(__p0, __p1) __extension__ ({ \ - uint32x4x2_t __s1 = __p1; \ - uint32x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 50); \ +#define vst1_p8(__p0, __p1) __extension__ ({ \ + poly8x8_t __s1 = __p1; \ + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 4); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst2q_u16(__p0, __p1) __extension__ ({ \ - uint16x8x2_t __s1 = __p1; \ - __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 49); \ +#define vst1_p16(__p0, __p1) __extension__ ({ \ + poly16x4_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 5); \ }) #else -#define vst2q_u16(__p0, __p1) __extension__ ({ \ - uint16x8x2_t __s1 = __p1; \ - uint16x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 49); \ +#define vst1_p16(__p0, __p1) __extension__ ({ \ + poly16x4_t __s1 = __p1; \ + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 5); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst2q_s8(__p0, __p1) __extension__ ({ \ - int8x16x2_t __s1 = __p1; \ - __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 32); \ +#define vst1q_p8(__p0, __p1) __extension__ ({ \ + poly8x16_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 36); \ }) #else -#define vst2q_s8(__p0, __p1) __extension__ ({ \ - int8x16x2_t __s1 = __p1; \ - int8x16x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 32); \ +#define vst1q_p8(__p0, __p1) __extension__ ({ \ + poly8x16_t __s1 = __p1; \ + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 36); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst2q_f32(__p0, __p1) __extension__ ({ \ - float32x4x2_t __s1 = __p1; \ - __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 41); \ +#define vst1q_p16(__p0, __p1) __extension__ ({ \ + poly16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 37); \ }) #else -#define vst2q_f32(__p0, __p1) __extension__ ({ \ - float32x4x2_t __s1 = __p1; \ - float32x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 41); \ +#define vst1q_p16(__p0, __p1) __extension__ ({ \ + poly16x8_t __s1 = __p1; \ + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 37); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst2q_s32(__p0, __p1) __extension__ ({ \ - int32x4x2_t __s1 = __p1; \ - __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 34); \ +#define vst1q_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 48); \ }) #else -#define vst2q_s32(__p0, __p1) __extension__ ({ \ - int32x4x2_t __s1 = __p1; \ - int32x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 34); \ +#define vst1q_u8(__p0, __p1) __extension__ ({ \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 48); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst2q_s16(__p0, __p1) __extension__ ({ \ - int16x8x2_t __s1 = __p1; \ - __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 33); \ +#define vst1q_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 50); \ }) #else -#define vst2q_s16(__p0, __p1) __extension__ ({ \ - int16x8x2_t __s1 = __p1; \ - int16x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 33); \ +#define vst1q_u32(__p0, __p1) __extension__ ({ \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 50); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst2_u8(__p0, __p1) __extension__ ({ \ - uint8x8x2_t __s1 = __p1; \ - __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 16); \ +#define vst1q_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 51); \ }) #else -#define vst2_u8(__p0, __p1) __extension__ ({ \ - uint8x8x2_t __s1 = __p1; \ - uint8x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 16); \ +#define vst1q_u64(__p0, __p1) __extension__ ({ \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 51); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst2_u32(__p0, __p1) __extension__ ({ \ - uint32x2x2_t __s1 = __p1; \ - __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 18); \ +#define vst1q_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 49); \ }) #else -#define vst2_u32(__p0, __p1) __extension__ ({ \ - uint32x2x2_t __s1 = __p1; \ - uint32x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 18); \ +#define vst1q_u16(__p0, __p1) __extension__ ({ \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 49); \ }) #endif -#define vst2_u64(__p0, __p1) __extension__ ({ \ - uint64x1x2_t __s1 = __p1; \ - __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \ -}) #ifdef __LITTLE_ENDIAN__ -#define vst2_u16(__p0, __p1) __extension__ ({ \ - uint16x4x2_t __s1 = __p1; \ - __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 17); \ +#define vst1q_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 32); \ }) #else -#define vst2_u16(__p0, __p1) __extension__ ({ \ - uint16x4x2_t __s1 = __p1; \ - uint16x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 17); \ +#define vst1q_s8(__p0, __p1) __extension__ ({ \ + int8x16_t __s1 = __p1; \ + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 32); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst2_s8(__p0, __p1) __extension__ ({ \ - int8x8x2_t __s1 = __p1; \ - __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 0); \ +#define vst1q_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 41); \ }) #else -#define vst2_s8(__p0, __p1) __extension__ ({ \ - int8x8x2_t __s1 = __p1; \ - int8x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 0); \ +#define vst1q_f32(__p0, __p1) __extension__ ({ \ + float32x4_t __s1 = __p1; \ + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 41); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst2_f32(__p0, __p1) __extension__ ({ \ - float32x2x2_t __s1 = __p1; \ - __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 9); \ +#define vst1q_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 34); \ }) #else -#define vst2_f32(__p0, __p1) __extension__ ({ \ - float32x2x2_t __s1 = __p1; \ - float32x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 9); \ +#define vst1q_s32(__p0, __p1) __extension__ ({ \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 34); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst2_s32(__p0, __p1) __extension__ ({ \ - int32x2x2_t __s1 = __p1; \ - __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 2); \ +#define vst1q_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 35); \ }) #else -#define vst2_s32(__p0, __p1) __extension__ ({ \ - int32x2x2_t __s1 = __p1; \ - int32x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 2); \ +#define vst1q_s64(__p0, __p1) __extension__ ({ \ + int64x2_t __s1 = __p1; \ + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 35); \ }) #endif -#define vst2_s64(__p0, __p1) __extension__ ({ \ - int64x1x2_t __s1 = __p1; \ - __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 3); \ -}) #ifdef __LITTLE_ENDIAN__ -#define vst2_s16(__p0, __p1) __extension__ ({ \ - int16x4x2_t __s1 = __p1; \ - __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 1); \ +#define vst1q_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 33); \ }) #else -#define vst2_s16(__p0, __p1) __extension__ ({ \ - int16x4x2_t __s1 = __p1; \ - int16x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 1); \ +#define vst1q_s16(__p0, __p1) __extension__ ({ \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 33); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst2_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x8x2_t __s1 = __p1; \ - __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 4); \ +#define vst1_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 16); \ }) #else -#define vst2_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x8x2_t __s1 = __p1; \ - poly8x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 4); \ +#define vst1_u8(__p0, __p1) __extension__ ({ \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 16); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst2_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x4x2_t __s1 = __p1; \ - __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 5); \ +#define vst1_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 18); \ }) #else -#define vst2_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x4x2_t __s1 = __p1; \ - poly16x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 5); \ +#define vst1_u32(__p0, __p1) __extension__ ({ \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 18); \ }) #endif +#define vst1_u64(__p0, __p1) __extension__ ({ \ + uint64x1_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 19); \ +}) #ifdef __LITTLE_ENDIAN__ -#define vst2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x8x2_t __s1 = __p1; \ - __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 37); \ +#define vst1_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 17); \ }) #else -#define vst2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x8x2_t __s1 = __p1; \ - poly16x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 37); \ +#define vst1_u16(__p0, __p1) __extension__ ({ \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 17); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4x2_t __s1 = __p1; \ - __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 50); \ +#define vst1_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 0); \ }) #else -#define vst2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4x2_t __s1 = __p1; \ - uint32x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 50); \ +#define vst1_s8(__p0, __p1) __extension__ ({ \ + int8x8_t __s1 = __p1; \ + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 0); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8x2_t __s1 = __p1; \ - __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 49); \ +#define vst1_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 9); \ }) #else -#define vst2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8x2_t __s1 = __p1; \ - uint16x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 49); \ +#define vst1_f32(__p0, __p1) __extension__ ({ \ + float32x2_t __s1 = __p1; \ + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 9); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x4x2_t __s1 = __p1; \ - __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 41); \ +#define vst1_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 2); \ }) #else -#define vst2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x4x2_t __s1 = __p1; \ - float32x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 41); \ +#define vst1_s32(__p0, __p1) __extension__ ({ \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 2); \ }) #endif +#define vst1_s64(__p0, __p1) __extension__ ({ \ + int64x1_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 3); \ +}) #ifdef __LITTLE_ENDIAN__ -#define vst2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4x2_t __s1 = __p1; \ - __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 34); \ +#define vst1_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 1); \ }) #else -#define vst2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4x2_t __s1 = __p1; \ - int32x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 34); \ +#define vst1_s16(__p0, __p1) __extension__ ({ \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__rev1, 1); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8x2_t __s1 = __p1; \ - __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 33); \ +#define vst1_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 4); \ }) #else -#define vst2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8x2_t __s1 = __p1; \ - int16x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 33); \ +#define vst1_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8_t __s1 = __p1; \ + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 4); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst2_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x8x2_t __s1 = __p1; \ - __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 16); \ +#define vst1_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 5); \ }) #else -#define vst2_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x8x2_t __s1 = __p1; \ - uint8x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 16); \ +#define vst1_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4_t __s1 = __p1; \ + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 5); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst2_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2x2_t __s1 = __p1; \ - __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 18); \ +#define vst1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 36); \ }) #else -#define vst2_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2x2_t __s1 = __p1; \ - uint32x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 18); \ +#define vst1q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16_t __s1 = __p1; \ + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 36); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst2_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4x2_t __s1 = __p1; \ - __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 17); \ +#define vst1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 37); \ }) #else -#define vst2_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4x2_t __s1 = __p1; \ - uint16x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 17); \ +#define vst1q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8_t __s1 = __p1; \ + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 37); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst2_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x8x2_t __s1 = __p1; \ - __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 0); \ +#define vst1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 48); \ }) #else -#define vst2_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x8x2_t __s1 = __p1; \ - int8x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 0); \ +#define vst1q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16_t __s1 = __p1; \ + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 48); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst2_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x2x2_t __s1 = __p1; \ - __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 9); \ +#define vst1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 50); \ }) #else -#define vst2_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x2x2_t __s1 = __p1; \ - float32x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 9); \ +#define vst1q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4_t __s1 = __p1; \ + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 50); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst2_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2x2_t __s1 = __p1; \ - __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 2); \ +#define vst1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 51); \ }) #else -#define vst2_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2x2_t __s1 = __p1; \ - int32x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 2); \ +#define vst1q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2_t __s1 = __p1; \ + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 51); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst2_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4x2_t __s1 = __p1; \ - __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 1); \ +#define vst1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 49); \ }) #else -#define vst2_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4x2_t __s1 = __p1; \ - int16x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 1); \ +#define vst1q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8_t __s1 = __p1; \ + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 49); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst3_p8(__p0, __p1) __extension__ ({ \ - poly8x8x3_t __s1 = __p1; \ - __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 4); \ +#define vst1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 32); \ }) #else -#define vst3_p8(__p0, __p1) __extension__ ({ \ - poly8x8x3_t __s1 = __p1; \ - poly8x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 4); \ +#define vst1q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16_t __s1 = __p1; \ + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 32); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst3_p16(__p0, __p1) __extension__ ({ \ - poly16x4x3_t __s1 = __p1; \ - __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 5); \ +#define vst1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 41); \ }) #else -#define vst3_p16(__p0, __p1) __extension__ ({ \ - poly16x4x3_t __s1 = __p1; \ - poly16x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 5); \ +#define vst1q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4_t __s1 = __p1; \ + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 41); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst3q_p8(__p0, __p1) __extension__ ({ \ - poly8x16x3_t __s1 = __p1; \ - __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 36); \ +#define vst1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 34); \ }) #else -#define vst3q_p8(__p0, __p1) __extension__ ({ \ - poly8x16x3_t __s1 = __p1; \ - poly8x16x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 36); \ +#define vst1q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 34); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst3q_p16(__p0, __p1) __extension__ ({ \ - poly16x8x3_t __s1 = __p1; \ - __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 37); \ +#define vst1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 35); \ }) #else -#define vst3q_p16(__p0, __p1) __extension__ ({ \ - poly16x8x3_t __s1 = __p1; \ - poly16x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 37); \ +#define vst1q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2_t __s1 = __p1; \ + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 35); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst3q_u8(__p0, __p1) __extension__ ({ \ - uint8x16x3_t __s1 = __p1; \ - __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 48); \ +#define vst1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 33); \ }) #else -#define vst3q_u8(__p0, __p1) __extension__ ({ \ - uint8x16x3_t __s1 = __p1; \ - uint8x16x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 48); \ +#define vst1q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 33); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst3q_u32(__p0, __p1) __extension__ ({ \ - uint32x4x3_t __s1 = __p1; \ - __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 50); \ +#define vst1_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 16); \ }) #else -#define vst3q_u32(__p0, __p1) __extension__ ({ \ - uint32x4x3_t __s1 = __p1; \ - uint32x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 50); \ +#define vst1_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8_t __s1 = __p1; \ + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 16); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst3q_u16(__p0, __p1) __extension__ ({ \ - uint16x8x3_t __s1 = __p1; \ - __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 49); \ +#define vst1_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 18); \ }) #else -#define vst3q_u16(__p0, __p1) __extension__ ({ \ - uint16x8x3_t __s1 = __p1; \ - uint16x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 49); \ +#define vst1_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2_t __s1 = __p1; \ + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 18); \ }) #endif +#define vst1_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 19); \ +}) #ifdef __LITTLE_ENDIAN__ -#define vst3q_s8(__p0, __p1) __extension__ ({ \ - int8x16x3_t __s1 = __p1; \ - __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 32); \ +#define vst1_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 17); \ }) #else -#define vst3q_s8(__p0, __p1) __extension__ ({ \ - int8x16x3_t __s1 = __p1; \ - int8x16x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 32); \ +#define vst1_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4_t __s1 = __p1; \ + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 17); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst3q_f32(__p0, __p1) __extension__ ({ \ - float32x4x3_t __s1 = __p1; \ - __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 41); \ +#define vst1_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 0); \ }) #else -#define vst3q_f32(__p0, __p1) __extension__ ({ \ - float32x4x3_t __s1 = __p1; \ - float32x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 41); \ +#define vst1_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8_t __s1 = __p1; \ + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 0); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst3q_s32(__p0, __p1) __extension__ ({ \ - int32x4x3_t __s1 = __p1; \ - __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 34); \ +#define vst1_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 9); \ }) #else -#define vst3q_s32(__p0, __p1) __extension__ ({ \ - int32x4x3_t __s1 = __p1; \ - int32x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 34); \ +#define vst1_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2_t __s1 = __p1; \ + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 9); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst3q_s16(__p0, __p1) __extension__ ({ \ - int16x8x3_t __s1 = __p1; \ - __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 33); \ +#define vst1_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 2); \ }) #else -#define vst3q_s16(__p0, __p1) __extension__ ({ \ - int16x8x3_t __s1 = __p1; \ - int16x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 33); \ +#define vst1_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 2); \ }) #endif +#define vst1_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 3); \ +}) #ifdef __LITTLE_ENDIAN__ -#define vst3_u8(__p0, __p1) __extension__ ({ \ - uint8x8x3_t __s1 = __p1; \ - __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 16); \ +#define vst1_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 1); \ }) #else -#define vst3_u8(__p0, __p1) __extension__ ({ \ - uint8x8x3_t __s1 = __p1; \ - uint8x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 16); \ +#define vst1_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__rev1, __p2, 1); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst3_u32(__p0, __p1) __extension__ ({ \ - uint32x2x3_t __s1 = __p1; \ - __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 18); \ +#define vst1_p8_x2(__p0, __p1) __extension__ ({ \ + poly8x8x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 4); \ }) #else -#define vst3_u32(__p0, __p1) __extension__ ({ \ - uint32x2x3_t __s1 = __p1; \ - uint32x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 18); \ +#define vst1_p8_x2(__p0, __p1) __extension__ ({ \ + poly8x8x2_t __s1 = __p1; \ + poly8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 4); \ }) #endif -#define vst3_u64(__p0, __p1) __extension__ ({ \ - uint64x1x3_t __s1 = __p1; \ - __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \ -}) #ifdef __LITTLE_ENDIAN__ -#define vst3_u16(__p0, __p1) __extension__ ({ \ - uint16x4x3_t __s1 = __p1; \ - __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 17); \ +#define vst1_p16_x2(__p0, __p1) __extension__ ({ \ + poly16x4x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 5); \ }) #else -#define vst3_u16(__p0, __p1) __extension__ ({ \ - uint16x4x3_t __s1 = __p1; \ - uint16x4x3_t __rev1; \ +#define vst1_p16_x2(__p0, __p1) __extension__ ({ \ + poly16x4x2_t __s1 = __p1; \ + poly16x4x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 17); \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 5); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst3_s8(__p0, __p1) __extension__ ({ \ - int8x8x3_t __s1 = __p1; \ - __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 0); \ +#define vst1q_p8_x2(__p0, __p1) __extension__ ({ \ + poly8x16x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 36); \ }) #else -#define vst3_s8(__p0, __p1) __extension__ ({ \ - int8x8x3_t __s1 = __p1; \ - int8x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 0); \ +#define vst1q_p8_x2(__p0, __p1) __extension__ ({ \ + poly8x16x2_t __s1 = __p1; \ + poly8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 36); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst3_f32(__p0, __p1) __extension__ ({ \ - float32x2x3_t __s1 = __p1; \ - __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 9); \ +#define vst1q_p16_x2(__p0, __p1) __extension__ ({ \ + poly16x8x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 37); \ }) #else -#define vst3_f32(__p0, __p1) __extension__ ({ \ - float32x2x3_t __s1 = __p1; \ - float32x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 9); \ +#define vst1q_p16_x2(__p0, __p1) __extension__ ({ \ + poly16x8x2_t __s1 = __p1; \ + poly16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 37); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst3_s32(__p0, __p1) __extension__ ({ \ - int32x2x3_t __s1 = __p1; \ - __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 2); \ +#define vst1q_u8_x2(__p0, __p1) __extension__ ({ \ + uint8x16x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 48); \ }) #else -#define vst3_s32(__p0, __p1) __extension__ ({ \ - int32x2x3_t __s1 = __p1; \ - int32x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 2); \ +#define vst1q_u8_x2(__p0, __p1) __extension__ ({ \ + uint8x16x2_t __s1 = __p1; \ + uint8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 48); \ }) #endif -#define vst3_s64(__p0, __p1) __extension__ ({ \ - int64x1x3_t __s1 = __p1; \ - __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 3); \ -}) #ifdef __LITTLE_ENDIAN__ -#define vst3_s16(__p0, __p1) __extension__ ({ \ - int16x4x3_t __s1 = __p1; \ - __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 1); \ +#define vst1q_u32_x2(__p0, __p1) __extension__ ({ \ + uint32x4x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 50); \ }) #else -#define vst3_s16(__p0, __p1) __extension__ ({ \ - int16x4x3_t __s1 = __p1; \ - int16x4x3_t __rev1; \ +#define vst1q_u32_x2(__p0, __p1) __extension__ ({ \ + uint32x4x2_t __s1 = __p1; \ + uint32x4x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 1); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 50); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst3_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x8x3_t __s1 = __p1; \ - __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 4); \ +#define vst1q_u64_x2(__p0, __p1) __extension__ ({ \ + uint64x2x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 51); \ }) #else -#define vst3_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x8x3_t __s1 = __p1; \ - poly8x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 4); \ +#define vst1q_u64_x2(__p0, __p1) __extension__ ({ \ + uint64x2x2_t __s1 = __p1; \ + uint64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 51); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst3_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x4x3_t __s1 = __p1; \ - __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 5); \ +#define vst1q_u16_x2(__p0, __p1) __extension__ ({ \ + uint16x8x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 49); \ }) #else -#define vst3_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x4x3_t __s1 = __p1; \ - poly16x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 5); \ +#define vst1q_u16_x2(__p0, __p1) __extension__ ({ \ + uint16x8x2_t __s1 = __p1; \ + uint16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 49); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x8x3_t __s1 = __p1; \ - __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 37); \ +#define vst1q_s8_x2(__p0, __p1) __extension__ ({ \ + int8x16x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 32); \ }) #else -#define vst3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ - poly16x8x3_t __s1 = __p1; \ - poly16x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 37); \ +#define vst1q_s8_x2(__p0, __p1) __extension__ ({ \ + int8x16x2_t __s1 = __p1; \ + int8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 32); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4x3_t __s1 = __p1; \ - __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 50); \ +#define vst1q_f32_x2(__p0, __p1) __extension__ ({ \ + float32x4x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 41); \ }) #else -#define vst3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x4x3_t __s1 = __p1; \ - uint32x4x3_t __rev1; \ +#define vst1q_f32_x2(__p0, __p1) __extension__ ({ \ + float32x4x2_t __s1 = __p1; \ + float32x4x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 50); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 41); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8x3_t __s1 = __p1; \ - __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 49); \ +#define vst1q_s32_x2(__p0, __p1) __extension__ ({ \ + int32x4x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 34); \ }) #else -#define vst3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8x3_t __s1 = __p1; \ - uint16x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 49); \ +#define vst1q_s32_x2(__p0, __p1) __extension__ ({ \ + int32x4x2_t __s1 = __p1; \ + int32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 34); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x4x3_t __s1 = __p1; \ - __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 41); \ +#define vst1q_s64_x2(__p0, __p1) __extension__ ({ \ + int64x2x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 35); \ }) #else -#define vst3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x4x3_t __s1 = __p1; \ - float32x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 41); \ +#define vst1q_s64_x2(__p0, __p1) __extension__ ({ \ + int64x2x2_t __s1 = __p1; \ + int64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 35); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4x3_t __s1 = __p1; \ - __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 34); \ +#define vst1q_s16_x2(__p0, __p1) __extension__ ({ \ + int16x8x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 33); \ }) #else -#define vst3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4x3_t __s1 = __p1; \ - int32x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 34); \ +#define vst1q_s16_x2(__p0, __p1) __extension__ ({ \ + int16x8x2_t __s1 = __p1; \ + int16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 33); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8x3_t __s1 = __p1; \ - __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 33); \ -}) -#else -#define vst3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8x3_t __s1 = __p1; \ - int16x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 33); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x8x3_t __s1 = __p1; \ - __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 16); \ +#define vst1_u8_x2(__p0, __p1) __extension__ ({ \ + uint8x8x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 16); \ }) #else -#define vst3_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x8x3_t __s1 = __p1; \ - uint8x8x3_t __rev1; \ +#define vst1_u8_x2(__p0, __p1) __extension__ ({ \ + uint8x8x2_t __s1 = __p1; \ + uint8x8x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 16); \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 16); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst3_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2x3_t __s1 = __p1; \ - __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 18); \ +#define vst1_u32_x2(__p0, __p1) __extension__ ({ \ + uint32x2x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 18); \ }) #else -#define vst3_lane_u32(__p0, __p1, __p2) __extension__ ({ \ - uint32x2x3_t __s1 = __p1; \ - uint32x2x3_t __rev1; \ +#define vst1_u32_x2(__p0, __p1) __extension__ ({ \ + uint32x2x2_t __s1 = __p1; \ + uint32x2x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 18); \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 18); \ }) #endif +#define vst1_u64_x2(__p0, __p1) __extension__ ({ \ + uint64x1x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \ +}) #ifdef __LITTLE_ENDIAN__ -#define vst3_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4x3_t __s1 = __p1; \ - __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 17); \ +#define vst1_u16_x2(__p0, __p1) __extension__ ({ \ + uint16x4x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 17); \ }) #else -#define vst3_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x4x3_t __s1 = __p1; \ - uint16x4x3_t __rev1; \ +#define vst1_u16_x2(__p0, __p1) __extension__ ({ \ + uint16x4x2_t __s1 = __p1; \ + uint16x4x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 17); \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 17); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst3_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x8x3_t __s1 = __p1; \ - __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 0); \ +#define vst1_s8_x2(__p0, __p1) __extension__ ({ \ + int8x8x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 0); \ }) #else -#define vst3_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x8x3_t __s1 = __p1; \ - int8x8x3_t __rev1; \ +#define vst1_s8_x2(__p0, __p1) __extension__ ({ \ + int8x8x2_t __s1 = __p1; \ + int8x8x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 0); \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 0); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst3_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x2x3_t __s1 = __p1; \ - __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 9); \ +#define vst1_f32_x2(__p0, __p1) __extension__ ({ \ + float32x2x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 9); \ }) #else -#define vst3_lane_f32(__p0, __p1, __p2) __extension__ ({ \ - float32x2x3_t __s1 = __p1; \ - float32x2x3_t __rev1; \ +#define vst1_f32_x2(__p0, __p1) __extension__ ({ \ + float32x2x2_t __s1 = __p1; \ + float32x2x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 9); \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 9); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst3_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2x3_t __s1 = __p1; \ - __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 2); \ +#define vst1_s32_x2(__p0, __p1) __extension__ ({ \ + int32x2x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 2); \ }) #else -#define vst3_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2x3_t __s1 = __p1; \ - int32x2x3_t __rev1; \ +#define vst1_s32_x2(__p0, __p1) __extension__ ({ \ + int32x2x2_t __s1 = __p1; \ + int32x2x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 2); \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 2); \ }) #endif +#define vst1_s64_x2(__p0, __p1) __extension__ ({ \ + int64x1x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 3); \ +}) #ifdef __LITTLE_ENDIAN__ -#define vst3_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4x3_t __s1 = __p1; \ - __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 1); \ +#define vst1_s16_x2(__p0, __p1) __extension__ ({ \ + int16x4x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 1); \ }) #else -#define vst3_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4x3_t __s1 = __p1; \ - int16x4x3_t __rev1; \ +#define vst1_s16_x2(__p0, __p1) __extension__ ({ \ + int16x4x2_t __s1 = __p1; \ + int16x4x2_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 1); \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 1); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst4_p8(__p0, __p1) __extension__ ({ \ - poly8x8x4_t __s1 = __p1; \ - __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 4); \ +#define vst1_p8_x3(__p0, __p1) __extension__ ({ \ + poly8x8x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 4); \ }) #else -#define vst4_p8(__p0, __p1) __extension__ ({ \ - poly8x8x4_t __s1 = __p1; \ - poly8x8x4_t __rev1; \ +#define vst1_p8_x3(__p0, __p1) __extension__ ({ \ + poly8x8x3_t __s1 = __p1; \ + poly8x8x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 4); \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 4); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst4_p16(__p0, __p1) __extension__ ({ \ - poly16x4x4_t __s1 = __p1; \ - __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 5); \ +#define vst1_p16_x3(__p0, __p1) __extension__ ({ \ + poly16x4x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 5); \ }) #else -#define vst4_p16(__p0, __p1) __extension__ ({ \ - poly16x4x4_t __s1 = __p1; \ - poly16x4x4_t __rev1; \ +#define vst1_p16_x3(__p0, __p1) __extension__ ({ \ + poly16x4x3_t __s1 = __p1; \ + poly16x4x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 5); \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 5); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst4q_p8(__p0, __p1) __extension__ ({ \ - poly8x16x4_t __s1 = __p1; \ - __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 36); \ +#define vst1q_p8_x3(__p0, __p1) __extension__ ({ \ + poly8x16x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 36); \ }) #else -#define vst4q_p8(__p0, __p1) __extension__ ({ \ - poly8x16x4_t __s1 = __p1; \ - poly8x16x4_t __rev1; \ +#define vst1q_p8_x3(__p0, __p1) __extension__ ({ \ + poly8x16x3_t __s1 = __p1; \ + poly8x16x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 36); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 36); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst4q_p16(__p0, __p1) __extension__ ({ \ - poly16x8x4_t __s1 = __p1; \ - __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 37); \ +#define vst1q_p16_x3(__p0, __p1) __extension__ ({ \ + poly16x8x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 37); \ }) #else -#define vst4q_p16(__p0, __p1) __extension__ ({ \ - poly16x8x4_t __s1 = __p1; \ - poly16x8x4_t __rev1; \ +#define vst1q_p16_x3(__p0, __p1) __extension__ ({ \ + poly16x8x3_t __s1 = __p1; \ + poly16x8x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 37); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 37); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst4q_u8(__p0, __p1) __extension__ ({ \ - uint8x16x4_t __s1 = __p1; \ - __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 48); \ +#define vst1q_u8_x3(__p0, __p1) __extension__ ({ \ + uint8x16x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 48); \ }) #else -#define vst4q_u8(__p0, __p1) __extension__ ({ \ - uint8x16x4_t __s1 = __p1; \ - uint8x16x4_t __rev1; \ +#define vst1q_u8_x3(__p0, __p1) __extension__ ({ \ + uint8x16x3_t __s1 = __p1; \ + uint8x16x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 48); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 48); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst4q_u32(__p0, __p1) __extension__ ({ \ - uint32x4x4_t __s1 = __p1; \ - __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 50); \ +#define vst1q_u32_x3(__p0, __p1) __extension__ ({ \ + uint32x4x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 50); \ }) #else -#define vst4q_u32(__p0, __p1) __extension__ ({ \ - uint32x4x4_t __s1 = __p1; \ - uint32x4x4_t __rev1; \ +#define vst1q_u32_x3(__p0, __p1) __extension__ ({ \ + uint32x4x3_t __s1 = __p1; \ + uint32x4x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 50); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 50); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst4q_u16(__p0, __p1) __extension__ ({ \ - uint16x8x4_t __s1 = __p1; \ - __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 49); \ +#define vst1q_u64_x3(__p0, __p1) __extension__ ({ \ + uint64x2x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 51); \ }) #else -#define vst4q_u16(__p0, __p1) __extension__ ({ \ - uint16x8x4_t __s1 = __p1; \ - uint16x8x4_t __rev1; \ +#define vst1q_u64_x3(__p0, __p1) __extension__ ({ \ + uint64x2x3_t __s1 = __p1; \ + uint64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u16_x3(__p0, __p1) __extension__ ({ \ + uint16x8x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 49); \ +}) +#else +#define vst1q_u16_x3(__p0, __p1) __extension__ ({ \ + uint16x8x3_t __s1 = __p1; \ + uint16x8x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 49); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 49); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst4q_s8(__p0, __p1) __extension__ ({ \ - int8x16x4_t __s1 = __p1; \ - __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 32); \ +#define vst1q_s8_x3(__p0, __p1) __extension__ ({ \ + int8x16x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 32); \ }) #else -#define vst4q_s8(__p0, __p1) __extension__ ({ \ - int8x16x4_t __s1 = __p1; \ - int8x16x4_t __rev1; \ +#define vst1q_s8_x3(__p0, __p1) __extension__ ({ \ + int8x16x3_t __s1 = __p1; \ + int8x16x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 32); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 32); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst4q_f32(__p0, __p1) __extension__ ({ \ - float32x4x4_t __s1 = __p1; \ - __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 41); \ +#define vst1q_f32_x3(__p0, __p1) __extension__ ({ \ + float32x4x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 41); \ }) #else -#define vst4q_f32(__p0, __p1) __extension__ ({ \ - float32x4x4_t __s1 = __p1; \ - float32x4x4_t __rev1; \ +#define vst1q_f32_x3(__p0, __p1) __extension__ ({ \ + float32x4x3_t __s1 = __p1; \ + float32x4x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 41); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 41); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst4q_s32(__p0, __p1) __extension__ ({ \ - int32x4x4_t __s1 = __p1; \ - __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 34); \ +#define vst1q_s32_x3(__p0, __p1) __extension__ ({ \ + int32x4x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 34); \ }) #else -#define vst4q_s32(__p0, __p1) __extension__ ({ \ - int32x4x4_t __s1 = __p1; \ - int32x4x4_t __rev1; \ +#define vst1q_s32_x3(__p0, __p1) __extension__ ({ \ + int32x4x3_t __s1 = __p1; \ + int32x4x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 34); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 34); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst4q_s16(__p0, __p1) __extension__ ({ \ - int16x8x4_t __s1 = __p1; \ - __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 33); \ +#define vst1q_s64_x3(__p0, __p1) __extension__ ({ \ + int64x2x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 35); \ }) #else -#define vst4q_s16(__p0, __p1) __extension__ ({ \ - int16x8x4_t __s1 = __p1; \ - int16x8x4_t __rev1; \ +#define vst1q_s64_x3(__p0, __p1) __extension__ ({ \ + int64x2x3_t __s1 = __p1; \ + int64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 35); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s16_x3(__p0, __p1) __extension__ ({ \ + int16x8x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 33); \ +}) +#else +#define vst1q_s16_x3(__p0, __p1) __extension__ ({ \ + int16x8x3_t __s1 = __p1; \ + int16x8x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 33); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 33); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst4_u8(__p0, __p1) __extension__ ({ \ - uint8x8x4_t __s1 = __p1; \ - __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 16); \ +#define vst1_u8_x3(__p0, __p1) __extension__ ({ \ + uint8x8x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 16); \ }) #else -#define vst4_u8(__p0, __p1) __extension__ ({ \ - uint8x8x4_t __s1 = __p1; \ - uint8x8x4_t __rev1; \ +#define vst1_u8_x3(__p0, __p1) __extension__ ({ \ + uint8x8x3_t __s1 = __p1; \ + uint8x8x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 16); \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 16); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst4_u32(__p0, __p1) __extension__ ({ \ - uint32x2x4_t __s1 = __p1; \ - __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 18); \ +#define vst1_u32_x3(__p0, __p1) __extension__ ({ \ + uint32x2x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 18); \ }) #else -#define vst4_u32(__p0, __p1) __extension__ ({ \ - uint32x2x4_t __s1 = __p1; \ - uint32x2x4_t __rev1; \ +#define vst1_u32_x3(__p0, __p1) __extension__ ({ \ + uint32x2x3_t __s1 = __p1; \ + uint32x2x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 18); \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 18); \ }) #endif -#define vst4_u64(__p0, __p1) __extension__ ({ \ - uint64x1x4_t __s1 = __p1; \ - __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \ +#define vst1_u64_x3(__p0, __p1) __extension__ ({ \ + uint64x1x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \ }) #ifdef __LITTLE_ENDIAN__ -#define vst4_u16(__p0, __p1) __extension__ ({ \ - uint16x4x4_t __s1 = __p1; \ - __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 17); \ +#define vst1_u16_x3(__p0, __p1) __extension__ ({ \ + uint16x4x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 17); \ }) #else -#define vst4_u16(__p0, __p1) __extension__ ({ \ - uint16x4x4_t __s1 = __p1; \ - uint16x4x4_t __rev1; \ +#define vst1_u16_x3(__p0, __p1) __extension__ ({ \ + uint16x4x3_t __s1 = __p1; \ + uint16x4x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 17); \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 17); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst4_s8(__p0, __p1) __extension__ ({ \ - int8x8x4_t __s1 = __p1; \ - __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 0); \ +#define vst1_s8_x3(__p0, __p1) __extension__ ({ \ + int8x8x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 0); \ }) #else -#define vst4_s8(__p0, __p1) __extension__ ({ \ - int8x8x4_t __s1 = __p1; \ - int8x8x4_t __rev1; \ +#define vst1_s8_x3(__p0, __p1) __extension__ ({ \ + int8x8x3_t __s1 = __p1; \ + int8x8x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 0); \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 0); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst4_f32(__p0, __p1) __extension__ ({ \ - float32x2x4_t __s1 = __p1; \ - __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 9); \ +#define vst1_f32_x3(__p0, __p1) __extension__ ({ \ + float32x2x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 9); \ }) #else -#define vst4_f32(__p0, __p1) __extension__ ({ \ - float32x2x4_t __s1 = __p1; \ - float32x2x4_t __rev1; \ +#define vst1_f32_x3(__p0, __p1) __extension__ ({ \ + float32x2x3_t __s1 = __p1; \ + float32x2x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 9); \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 9); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst4_s32(__p0, __p1) __extension__ ({ \ - int32x2x4_t __s1 = __p1; \ - __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 2); \ +#define vst1_s32_x3(__p0, __p1) __extension__ ({ \ + int32x2x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 2); \ }) #else -#define vst4_s32(__p0, __p1) __extension__ ({ \ - int32x2x4_t __s1 = __p1; \ - int32x2x4_t __rev1; \ +#define vst1_s32_x3(__p0, __p1) __extension__ ({ \ + int32x2x3_t __s1 = __p1; \ + int32x2x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 2); \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 2); \ }) #endif -#define vst4_s64(__p0, __p1) __extension__ ({ \ - int64x1x4_t __s1 = __p1; \ - __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 3); \ +#define vst1_s64_x3(__p0, __p1) __extension__ ({ \ + int64x1x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 3); \ }) #ifdef __LITTLE_ENDIAN__ -#define vst4_s16(__p0, __p1) __extension__ ({ \ - int16x4x4_t __s1 = __p1; \ - __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 1); \ +#define vst1_s16_x3(__p0, __p1) __extension__ ({ \ + int16x4x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 1); \ }) #else -#define vst4_s16(__p0, __p1) __extension__ ({ \ - int16x4x4_t __s1 = __p1; \ - int16x4x4_t __rev1; \ +#define vst1_s16_x3(__p0, __p1) __extension__ ({ \ + int16x4x3_t __s1 = __p1; \ + int16x4x3_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 1); \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 1); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst4_lane_p8(__p0, __p1, __p2) __extension__ ({ \ +#define vst1_p8_x4(__p0, __p1) __extension__ ({ \ poly8x8x4_t __s1 = __p1; \ - __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 4); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 4); \ }) #else -#define vst4_lane_p8(__p0, __p1, __p2) __extension__ ({ \ +#define vst1_p8_x4(__p0, __p1) __extension__ ({ \ poly8x8x4_t __s1 = __p1; \ poly8x8x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 4); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 4); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst4_lane_p16(__p0, __p1, __p2) __extension__ ({ \ +#define vst1_p16_x4(__p0, __p1) __extension__ ({ \ poly16x4x4_t __s1 = __p1; \ - __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 5); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 5); \ }) #else -#define vst4_lane_p16(__p0, __p1, __p2) __extension__ ({ \ +#define vst1_p16_x4(__p0, __p1) __extension__ ({ \ poly16x4x4_t __s1 = __p1; \ poly16x4x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 5); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 5); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ +#define vst1q_p8_x4(__p0, __p1) __extension__ ({ \ + poly8x16x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 36); \ +}) +#else +#define vst1q_p8_x4(__p0, __p1) __extension__ ({ \ + poly8x16x4_t __s1 = __p1; \ + poly8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 36); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p16_x4(__p0, __p1) __extension__ ({ \ poly16x8x4_t __s1 = __p1; \ - __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 37); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 37); \ }) #else -#define vst4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ +#define vst1q_p16_x4(__p0, __p1) __extension__ ({ \ poly16x8x4_t __s1 = __p1; \ poly16x8x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 37); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 37); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ +#define vst1q_u8_x4(__p0, __p1) __extension__ ({ \ + uint8x16x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 48); \ +}) +#else +#define vst1q_u8_x4(__p0, __p1) __extension__ ({ \ + uint8x16x4_t __s1 = __p1; \ + uint8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 48); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u32_x4(__p0, __p1) __extension__ ({ \ uint32x4x4_t __s1 = __p1; \ - __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 50); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 50); \ }) #else -#define vst4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ +#define vst1q_u32_x4(__p0, __p1) __extension__ ({ \ uint32x4x4_t __s1 = __p1; \ uint32x4x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 50); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 50); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ - uint16x8x4_t __s1 = __p1; \ - __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 49); \ +#define vst1q_u64_x4(__p0, __p1) __extension__ ({ \ + uint64x2x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 51); \ }) #else -#define vst4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ +#define vst1q_u64_x4(__p0, __p1) __extension__ ({ \ + uint64x2x4_t __s1 = __p1; \ + uint64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_u16_x4(__p0, __p1) __extension__ ({ \ + uint16x8x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 49); \ +}) +#else +#define vst1q_u16_x4(__p0, __p1) __extension__ ({ \ uint16x8x4_t __s1 = __p1; \ uint16x8x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 49); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 49); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ +#define vst1q_s8_x4(__p0, __p1) __extension__ ({ \ + int8x16x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 32); \ +}) +#else +#define vst1q_s8_x4(__p0, __p1) __extension__ ({ \ + int8x16x4_t __s1 = __p1; \ + int8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 32); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f32_x4(__p0, __p1) __extension__ ({ \ float32x4x4_t __s1 = __p1; \ - __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 41); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 41); \ }) #else -#define vst4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ +#define vst1q_f32_x4(__p0, __p1) __extension__ ({ \ float32x4x4_t __s1 = __p1; \ float32x4x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 41); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 41); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ +#define vst1q_s32_x4(__p0, __p1) __extension__ ({ \ int32x4x4_t __s1 = __p1; \ - __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 34); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 34); \ }) #else -#define vst4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ +#define vst1q_s32_x4(__p0, __p1) __extension__ ({ \ int32x4x4_t __s1 = __p1; \ int32x4x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 34); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 34); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ +#define vst1q_s64_x4(__p0, __p1) __extension__ ({ \ + int64x2x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 35); \ +}) +#else +#define vst1q_s64_x4(__p0, __p1) __extension__ ({ \ + int64x2x4_t __s1 = __p1; \ + int64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 35); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_s16_x4(__p0, __p1) __extension__ ({ \ int16x8x4_t __s1 = __p1; \ - __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 33); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 33); \ }) #else -#define vst4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ +#define vst1q_s16_x4(__p0, __p1) __extension__ ({ \ int16x8x4_t __s1 = __p1; \ int16x8x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 33); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 33); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst4_lane_u8(__p0, __p1, __p2) __extension__ ({ \ +#define vst1_u8_x4(__p0, __p1) __extension__ ({ \ uint8x8x4_t __s1 = __p1; \ - __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 16); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 16); \ }) #else -#define vst4_lane_u8(__p0, __p1, __p2) __extension__ ({ \ +#define vst1_u8_x4(__p0, __p1) __extension__ ({ \ uint8x8x4_t __s1 = __p1; \ uint8x8x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 16); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 16); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst4_lane_u32(__p0, __p1, __p2) __extension__ ({ \ +#define vst1_u32_x4(__p0, __p1) __extension__ ({ \ uint32x2x4_t __s1 = __p1; \ - __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 18); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 18); \ }) #else -#define vst4_lane_u32(__p0, __p1, __p2) __extension__ ({ \ +#define vst1_u32_x4(__p0, __p1) __extension__ ({ \ uint32x2x4_t __s1 = __p1; \ uint32x2x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 18); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 18); \ }) #endif +#define vst1_u64_x4(__p0, __p1) __extension__ ({ \ + uint64x1x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \ +}) #ifdef __LITTLE_ENDIAN__ -#define vst4_lane_u16(__p0, __p1, __p2) __extension__ ({ \ +#define vst1_u16_x4(__p0, __p1) __extension__ ({ \ uint16x4x4_t __s1 = __p1; \ - __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 17); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 17); \ }) #else -#define vst4_lane_u16(__p0, __p1, __p2) __extension__ ({ \ +#define vst1_u16_x4(__p0, __p1) __extension__ ({ \ uint16x4x4_t __s1 = __p1; \ uint16x4x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 17); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 17); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst4_lane_s8(__p0, __p1, __p2) __extension__ ({ \ +#define vst1_s8_x4(__p0, __p1) __extension__ ({ \ int8x8x4_t __s1 = __p1; \ - __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 0); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 0); \ }) #else -#define vst4_lane_s8(__p0, __p1, __p2) __extension__ ({ \ +#define vst1_s8_x4(__p0, __p1) __extension__ ({ \ int8x8x4_t __s1 = __p1; \ int8x8x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 0); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 0); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst4_lane_f32(__p0, __p1, __p2) __extension__ ({ \ +#define vst1_f32_x4(__p0, __p1) __extension__ ({ \ float32x2x4_t __s1 = __p1; \ - __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 9); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 9); \ }) #else -#define vst4_lane_f32(__p0, __p1, __p2) __extension__ ({ \ +#define vst1_f32_x4(__p0, __p1) __extension__ ({ \ float32x2x4_t __s1 = __p1; \ float32x2x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 9); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 9); \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vst4_lane_s32(__p0, __p1, __p2) __extension__ ({ \ +#define vst1_s32_x4(__p0, __p1) __extension__ ({ \ int32x2x4_t __s1 = __p1; \ - __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 2); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 2); \ }) #else -#define vst4_lane_s32(__p0, __p1, __p2) __extension__ ({ \ +#define vst1_s32_x4(__p0, __p1) __extension__ ({ \ int32x2x4_t __s1 = __p1; \ int32x2x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 2); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 2); \ }) #endif +#define vst1_s64_x4(__p0, __p1) __extension__ ({ \ + int64x1x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 3); \ +}) #ifdef __LITTLE_ENDIAN__ -#define vst4_lane_s16(__p0, __p1, __p2) __extension__ ({ \ +#define vst1_s16_x4(__p0, __p1) __extension__ ({ \ int16x4x4_t __s1 = __p1; \ - __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 1); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 1); \ }) #else -#define vst4_lane_s16(__p0, __p1, __p2) __extension__ ({ \ +#define vst1_s16_x4(__p0, __p1) __extension__ ({ \ int16x4x4_t __s1 = __p1; \ int16x4x4_t __rev1; \ __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 1); \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 1); \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = __p0 - __p1; - return __ret; -} +#define vst2_p8(__p0, __p1) __extension__ ({ \ + poly8x8x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 4); \ +}) #else -__ai uint8x16_t vsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 - __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst2_p8(__p0, __p1) __extension__ ({ \ + poly8x8x2_t __s1 = __p1; \ + poly8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 4); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = __p0 - __p1; - return __ret; -} +#define vst2_p16(__p0, __p1) __extension__ ({ \ + poly16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 5); \ +}) #else -__ai uint32x4_t vsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 - __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vst2_p16(__p0, __p1) __extension__ ({ \ + poly16x4x2_t __s1 = __p1; \ + poly16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 5); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vsubq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - __ret = __p0 - __p1; - return __ret; -} +#define vst2q_p8(__p0, __p1) __extension__ ({ \ + poly8x16x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 36); \ +}) #else -__ai uint64x2_t vsubq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __rev0 - __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vst2q_p8(__p0, __p1) __extension__ ({ \ + poly8x16x2_t __s1 = __p1; \ + poly8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 36); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = __p0 - __p1; - return __ret; -} +#define vst2q_p16(__p0, __p1) __extension__ ({ \ + poly16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 37); \ +}) #else -__ai uint16x8_t vsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 - __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst2q_p16(__p0, __p1) __extension__ ({ \ + poly16x8x2_t __s1 = __p1; \ + poly16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 37); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vsubq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = __p0 - __p1; - return __ret; -} +#define vst2q_u8(__p0, __p1) __extension__ ({ \ + uint8x16x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 48); \ +}) #else -__ai int8x16_t vsubq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 - __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst2q_u8(__p0, __p1) __extension__ ({ \ + uint8x16x2_t __s1 = __p1; \ + uint8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 48); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vsubq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = __p0 - __p1; - return __ret; -} +#define vst2q_u32(__p0, __p1) __extension__ ({ \ + uint32x4x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 50); \ +}) #else -__ai float32x4_t vsubq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 - __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vst2q_u32(__p0, __p1) __extension__ ({ \ + uint32x4x2_t __s1 = __p1; \ + uint32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 50); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vsubq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = __p0 - __p1; - return __ret; -} +#define vst2q_u16(__p0, __p1) __extension__ ({ \ + uint16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 49); \ +}) #else -__ai int32x4_t vsubq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 - __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vst2q_u16(__p0, __p1) __extension__ ({ \ + uint16x8x2_t __s1 = __p1; \ + uint16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 49); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vsubq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - __ret = __p0 - __p1; - return __ret; -} +#define vst2q_s8(__p0, __p1) __extension__ ({ \ + int8x16x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 32); \ +}) #else -__ai int64x2_t vsubq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __rev0 - __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vst2q_s8(__p0, __p1) __extension__ ({ \ + int8x16x2_t __s1 = __p1; \ + int8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 32); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vsubq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = __p0 - __p1; - return __ret; -} +#define vst2q_f32(__p0, __p1) __extension__ ({ \ + float32x4x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 41); \ +}) #else -__ai int16x8_t vsubq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 - __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst2q_f32(__p0, __p1) __extension__ ({ \ + float32x4x2_t __s1 = __p1; \ + float32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 41); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vsub_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = __p0 - __p1; - return __ret; -} +#define vst2q_s32(__p0, __p1) __extension__ ({ \ + int32x4x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 34); \ +}) #else -__ai uint8x8_t vsub_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 - __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst2q_s32(__p0, __p1) __extension__ ({ \ + int32x4x2_t __s1 = __p1; \ + int32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 34); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vsub_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = __p0 - __p1; - return __ret; -} +#define vst2q_s16(__p0, __p1) __extension__ ({ \ + int16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 33); \ +}) #else -__ai uint32x2_t vsub_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __rev0 - __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vst2q_s16(__p0, __p1) __extension__ ({ \ + int16x8x2_t __s1 = __p1; \ + int16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 33); \ +}) #endif -__ai uint64x1_t vsub_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x1_t __ret; - __ret = __p0 - __p1; - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vsub_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = __p0 - __p1; - return __ret; -} +#define vst2_u8(__p0, __p1) __extension__ ({ \ + uint8x8x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 16); \ +}) #else -__ai uint16x4_t vsub_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 - __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vst2_u8(__p0, __p1) __extension__ ({ \ + uint8x8x2_t __s1 = __p1; \ + uint8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 16); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vsub_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = __p0 - __p1; - return __ret; -} +#define vst2_u32(__p0, __p1) __extension__ ({ \ + uint32x2x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 18); \ +}) #else -__ai int8x8_t vsub_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 - __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst2_u32(__p0, __p1) __extension__ ({ \ + uint32x2x2_t __s1 = __p1; \ + uint32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 18); \ +}) #endif +#define vst2_u64(__p0, __p1) __extension__ ({ \ + uint64x1x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 19); \ +}) #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vsub_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = __p0 - __p1; - return __ret; -} +#define vst2_u16(__p0, __p1) __extension__ ({ \ + uint16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 17); \ +}) #else -__ai float32x2_t vsub_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __rev0 - __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vst2_u16(__p0, __p1) __extension__ ({ \ + uint16x4x2_t __s1 = __p1; \ + uint16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 17); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vsub_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = __p0 - __p1; - return __ret; -} +#define vst2_s8(__p0, __p1) __extension__ ({ \ + int8x8x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 0); \ +}) #else -__ai int32x2_t vsub_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __rev0 - __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vst2_s8(__p0, __p1) __extension__ ({ \ + int8x8x2_t __s1 = __p1; \ + int8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 0); \ +}) #endif -__ai int64x1_t vsub_s64(int64x1_t __p0, int64x1_t __p1) { - int64x1_t __ret; - __ret = __p0 - __p1; - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vsub_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = __p0 - __p1; - return __ret; -} +#define vst2_f32(__p0, __p1) __extension__ ({ \ + float32x2x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 9); \ +}) #else -__ai int16x4_t vsub_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 - __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vst2_f32(__p0, __p1) __extension__ ({ \ + float32x2x2_t __s1 = __p1; \ + float32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 9); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); - return __ret; -} -#else -__ai uint16x4_t vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint16x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai uint16x4_t __noswap_vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); - return __ret; -} +#define vst2_s32(__p0, __p1) __extension__ ({ \ + int32x2x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 2); \ +}) #else -__ai uint32x2_t vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint32x2_t __ret; - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai uint32x2_t __noswap_vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); - return __ret; -} +#define vst2_s32(__p0, __p1) __extension__ ({ \ + int32x2x2_t __s1 = __p1; \ + int32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 2); \ +}) #endif +#define vst2_s64(__p0, __p1) __extension__ ({ \ + int64x1x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 3); \ +}) #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); - return __ret; -} +#define vst2_s16(__p0, __p1) __extension__ ({ \ + int16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 1); \ +}) #else -__ai uint8x8_t vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint8x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai uint8x8_t __noswap_vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); - return __ret; -} +#define vst2_s16(__p0, __p1) __extension__ ({ \ + int16x4x2_t __s1 = __p1; \ + int16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 1); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vsubhn_s32(int32x4_t __p0, int32x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); - return __ret; -} +#define vst2_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 4); \ +}) #else -__ai int16x4_t vsubhn_s32(int32x4_t __p0, int32x4_t __p1) { - int16x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai int16x4_t __noswap_vsubhn_s32(int32x4_t __p0, int32x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); - return __ret; -} +#define vst2_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x2_t __s1 = __p1; \ + poly8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 4); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vsubhn_s64(int64x2_t __p0, int64x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); - return __ret; -} +#define vst2_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 5); \ +}) #else -__ai int32x2_t vsubhn_s64(int64x2_t __p0, int64x2_t __p1) { - int32x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai int32x2_t __noswap_vsubhn_s64(int64x2_t __p0, int64x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); - return __ret; -} +#define vst2_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x2_t __s1 = __p1; \ + poly16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 5); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vsubhn_s16(int16x8_t __p0, int16x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); - return __ret; -} +#define vst2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 37); \ +}) #else -__ai int8x8_t vsubhn_s16(int16x8_t __p0, int16x8_t __p1) { - int8x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai int8x8_t __noswap_vsubhn_s16(int16x8_t __p0, int16x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); - return __ret; -} +#define vst2q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x2_t __s1 = __p1; \ + poly16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 37); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vsubl_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint16x8_t __ret; - __ret = vmovl_u8(__p0) - vmovl_u8(__p1); - return __ret; -} +#define vst2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 50); \ +}) #else -__ai uint16x8_t vsubl_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint16x8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __noswap_vmovl_u8(__rev0) - __noswap_vmovl_u8(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst2q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x2_t __s1 = __p1; \ + uint32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 50); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vsubl_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint64x2_t __ret; - __ret = vmovl_u32(__p0) - vmovl_u32(__p1); - return __ret; -} +#define vst2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 49); \ +}) #else -__ai uint64x2_t vsubl_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint64x2_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __noswap_vmovl_u32(__rev0) - __noswap_vmovl_u32(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vst2q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x2_t __s1 = __p1; \ + uint16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 49); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vsubl_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint32x4_t __ret; - __ret = vmovl_u16(__p0) - vmovl_u16(__p1); - return __ret; -} +#define vst2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 41); \ +}) #else -__ai uint32x4_t vsubl_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint32x4_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __noswap_vmovl_u16(__rev0) - __noswap_vmovl_u16(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vst2q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x2_t __s1 = __p1; \ + float32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 41); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vsubl_s8(int8x8_t __p0, int8x8_t __p1) { - int16x8_t __ret; - __ret = vmovl_s8(__p0) - vmovl_s8(__p1); - return __ret; -} +#define vst2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 34); \ +}) #else -__ai int16x8_t vsubl_s8(int8x8_t __p0, int8x8_t __p1) { - int16x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __noswap_vmovl_s8(__rev0) - __noswap_vmovl_s8(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst2q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x2_t __s1 = __p1; \ + int32x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 34); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vsubl_s32(int32x2_t __p0, int32x2_t __p1) { - int64x2_t __ret; - __ret = vmovl_s32(__p0) - vmovl_s32(__p1); - return __ret; -} +#define vst2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 33); \ +}) #else -__ai int64x2_t vsubl_s32(int32x2_t __p0, int32x2_t __p1) { - int64x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __noswap_vmovl_s32(__rev0) - __noswap_vmovl_s32(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vst2q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x2_t __s1 = __p1; \ + int16x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 33); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vsubl_s16(int16x4_t __p0, int16x4_t __p1) { - int32x4_t __ret; - __ret = vmovl_s16(__p0) - vmovl_s16(__p1); - return __ret; -} +#define vst2_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 16); \ +}) #else -__ai int32x4_t vsubl_s16(int16x4_t __p0, int16x4_t __p1) { - int32x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __noswap_vmovl_s16(__rev0) - __noswap_vmovl_s16(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vst2_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x2_t __s1 = __p1; \ + uint8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 16); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vsubw_u8(uint16x8_t __p0, uint8x8_t __p1) { - uint16x8_t __ret; - __ret = __p0 - vmovl_u8(__p1); - return __ret; -} +#define vst2_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 18); \ +}) #else -__ai uint16x8_t vsubw_u8(uint16x8_t __p0, uint8x8_t __p1) { - uint16x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 - __noswap_vmovl_u8(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst2_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x2_t __s1 = __p1; \ + uint32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 18); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vsubw_u32(uint64x2_t __p0, uint32x2_t __p1) { - uint64x2_t __ret; - __ret = __p0 - vmovl_u32(__p1); - return __ret; -} +#define vst2_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 17); \ +}) #else -__ai uint64x2_t vsubw_u32(uint64x2_t __p0, uint32x2_t __p1) { - uint64x2_t __ret; - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __rev0 - __noswap_vmovl_u32(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vst2_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x2_t __s1 = __p1; \ + uint16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 17); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vsubw_u16(uint32x4_t __p0, uint16x4_t __p1) { - uint32x4_t __ret; - __ret = __p0 - vmovl_u16(__p1); - return __ret; -} +#define vst2_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 0); \ +}) #else -__ai uint32x4_t vsubw_u16(uint32x4_t __p0, uint16x4_t __p1) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 - __noswap_vmovl_u16(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vst2_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x2_t __s1 = __p1; \ + int8x8x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 0); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vsubw_s8(int16x8_t __p0, int8x8_t __p1) { - int16x8_t __ret; - __ret = __p0 - vmovl_s8(__p1); - return __ret; -} +#define vst2_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 9); \ +}) #else -__ai int16x8_t vsubw_s8(int16x8_t __p0, int8x8_t __p1) { - int16x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 - __noswap_vmovl_s8(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst2_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x2_t __s1 = __p1; \ + float32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 9); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vsubw_s32(int64x2_t __p0, int32x2_t __p1) { - int64x2_t __ret; - __ret = __p0 - vmovl_s32(__p1); - return __ret; -} +#define vst2_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 2); \ +}) #else -__ai int64x2_t vsubw_s32(int64x2_t __p0, int32x2_t __p1) { - int64x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __rev0 - __noswap_vmovl_s32(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vst2_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x2_t __s1 = __p1; \ + int32x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 2); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vsubw_s16(int32x4_t __p0, int16x4_t __p1) { - int32x4_t __ret; - __ret = __p0 - vmovl_s16(__p1); - return __ret; -} +#define vst2_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 1); \ +}) #else -__ai int32x4_t vsubw_s16(int32x4_t __p0, int16x4_t __p1) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 - __noswap_vmovl_s16(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vst2_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x2_t __s1 = __p1; \ + int16x4x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 1); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vtbl1_p8(poly8x8_t __p0, uint8x8_t __p1) { - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 4); - return __ret; -} +#define vst3_p8(__p0, __p1) __extension__ ({ \ + poly8x8x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 4); \ +}) #else -__ai poly8x8_t vtbl1_p8(poly8x8_t __p0, uint8x8_t __p1) { - poly8x8_t __ret; - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (poly8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst3_p8(__p0, __p1) __extension__ ({ \ + poly8x8x3_t __s1 = __p1; \ + poly8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 4); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vtbl1_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 16); - return __ret; -} +#define vst3_p16(__p0, __p1) __extension__ ({ \ + poly16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 5); \ +}) #else -__ai uint8x8_t vtbl1_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst3_p16(__p0, __p1) __extension__ ({ \ + poly16x4x3_t __s1 = __p1; \ + poly16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 5); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vtbl1_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 0); - return __ret; -} +#define vst3q_p8(__p0, __p1) __extension__ ({ \ + poly8x16x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 36); \ +}) #else -__ai int8x8_t vtbl1_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst3q_p8(__p0, __p1) __extension__ ({ \ + poly8x16x3_t __s1 = __p1; \ + poly8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 36); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vtbl2_p8(poly8x8x2_t __p0, uint8x8_t __p1) { - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 4); - return __ret; -} +#define vst3q_p16(__p0, __p1) __extension__ ({ \ + poly16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 37); \ +}) #else -__ai poly8x8_t vtbl2_p8(poly8x8x2_t __p0, uint8x8_t __p1) { - poly8x8_t __ret; - poly8x8x2_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (poly8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst3q_p16(__p0, __p1) __extension__ ({ \ + poly16x8x3_t __s1 = __p1; \ + poly16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 37); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vtbl2_u8(uint8x8x2_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 16); - return __ret; -} -#else -__ai uint8x8_t vtbl2_u8(uint8x8x2_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - uint8x8x2_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst3q_u8(__p0, __p1) __extension__ ({ \ + uint8x16x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 48); \ +}) +#else +#define vst3q_u8(__p0, __p1) __extension__ ({ \ + uint8x16x3_t __s1 = __p1; \ + uint8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 48); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vtbl2_s8(int8x8x2_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 0); - return __ret; -} +#define vst3q_u32(__p0, __p1) __extension__ ({ \ + uint32x4x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 50); \ +}) #else -__ai int8x8_t vtbl2_s8(int8x8x2_t __p0, int8x8_t __p1) { - int8x8_t __ret; - int8x8x2_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst3q_u32(__p0, __p1) __extension__ ({ \ + uint32x4x3_t __s1 = __p1; \ + uint32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 50); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vtbl3_p8(poly8x8x3_t __p0, uint8x8_t __p1) { - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 4); - return __ret; -} +#define vst3q_u16(__p0, __p1) __extension__ ({ \ + uint16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 49); \ +}) #else -__ai poly8x8_t vtbl3_p8(poly8x8x3_t __p0, uint8x8_t __p1) { - poly8x8_t __ret; - poly8x8x3_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (poly8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst3q_u16(__p0, __p1) __extension__ ({ \ + uint16x8x3_t __s1 = __p1; \ + uint16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 49); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vtbl3_u8(uint8x8x3_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 16); - return __ret; -} +#define vst3q_s8(__p0, __p1) __extension__ ({ \ + int8x16x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 32); \ +}) #else -__ai uint8x8_t vtbl3_u8(uint8x8x3_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - uint8x8x3_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst3q_s8(__p0, __p1) __extension__ ({ \ + int8x16x3_t __s1 = __p1; \ + int8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 32); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vtbl3_s8(int8x8x3_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 0); - return __ret; -} +#define vst3q_f32(__p0, __p1) __extension__ ({ \ + float32x4x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 41); \ +}) #else -__ai int8x8_t vtbl3_s8(int8x8x3_t __p0, int8x8_t __p1) { - int8x8_t __ret; - int8x8x3_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst3q_f32(__p0, __p1) __extension__ ({ \ + float32x4x3_t __s1 = __p1; \ + float32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 41); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vtbl4_p8(poly8x8x4_t __p0, uint8x8_t __p1) { - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 4); - return __ret; -} +#define vst3q_s32(__p0, __p1) __extension__ ({ \ + int32x4x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 34); \ +}) #else -__ai poly8x8_t vtbl4_p8(poly8x8x4_t __p0, uint8x8_t __p1) { - poly8x8_t __ret; - poly8x8x4_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (poly8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst3q_s32(__p0, __p1) __extension__ ({ \ + int32x4x3_t __s1 = __p1; \ + int32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 34); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vtbl4_u8(uint8x8x4_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 16); - return __ret; -} +#define vst3q_s16(__p0, __p1) __extension__ ({ \ + int16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 33); \ +}) #else -__ai uint8x8_t vtbl4_u8(uint8x8x4_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - uint8x8x4_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst3q_s16(__p0, __p1) __extension__ ({ \ + int16x8x3_t __s1 = __p1; \ + int16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 33); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vtbl4_s8(int8x8x4_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 0); - return __ret; -} +#define vst3_u8(__p0, __p1) __extension__ ({ \ + uint8x8x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 16); \ +}) #else -__ai int8x8_t vtbl4_s8(int8x8x4_t __p0, int8x8_t __p1) { - int8x8_t __ret; - int8x8x4_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst3_u8(__p0, __p1) __extension__ ({ \ + uint8x8x3_t __s1 = __p1; \ + uint8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 16); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vtbx1_p8(poly8x8_t __p0, poly8x8_t __p1, uint8x8_t __p2) { - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 4); - return __ret; -} +#define vst3_u32(__p0, __p1) __extension__ ({ \ + uint32x2x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 18); \ +}) #else -__ai poly8x8_t vtbx1_p8(poly8x8_t __p0, poly8x8_t __p1, uint8x8_t __p2) { - poly8x8_t __ret; - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (poly8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst3_u32(__p0, __p1) __extension__ ({ \ + uint32x2x3_t __s1 = __p1; \ + uint32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 18); \ +}) #endif +#define vst3_u64(__p0, __p1) __extension__ ({ \ + uint64x1x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 19); \ +}) #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vtbx1_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 16); - return __ret; -} +#define vst3_u16(__p0, __p1) __extension__ ({ \ + uint16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 17); \ +}) #else -__ai uint8x8_t vtbx1_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { - uint8x8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst3_u16(__p0, __p1) __extension__ ({ \ + uint16x4x3_t __s1 = __p1; \ + uint16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 17); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vtbx1_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 0); - return __ret; -} +#define vst3_s8(__p0, __p1) __extension__ ({ \ + int8x8x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 0); \ +}) #else -__ai int8x8_t vtbx1_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { - int8x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst3_s8(__p0, __p1) __extension__ ({ \ + int8x8x3_t __s1 = __p1; \ + int8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 0); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vtbx2_p8(poly8x8_t __p0, poly8x8x2_t __p1, uint8x8_t __p2) { - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 4); - return __ret; -} +#define vst3_f32(__p0, __p1) __extension__ ({ \ + float32x2x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 9); \ +}) #else -__ai poly8x8_t vtbx2_p8(poly8x8_t __p0, poly8x8x2_t __p1, uint8x8_t __p2) { - poly8x8_t __ret; - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8x2_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (poly8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst3_f32(__p0, __p1) __extension__ ({ \ + float32x2x3_t __s1 = __p1; \ + float32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 9); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vtbx2_u8(uint8x8_t __p0, uint8x8x2_t __p1, uint8x8_t __p2) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 16); - return __ret; -} +#define vst3_s32(__p0, __p1) __extension__ ({ \ + int32x2x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 2); \ +}) #else -__ai uint8x8_t vtbx2_u8(uint8x8_t __p0, uint8x8x2_t __p1, uint8x8_t __p2) { - uint8x8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8x2_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst3_s32(__p0, __p1) __extension__ ({ \ + int32x2x3_t __s1 = __p1; \ + int32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 2); \ +}) #endif +#define vst3_s64(__p0, __p1) __extension__ ({ \ + int64x1x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 3); \ +}) #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vtbx2_s8(int8x8_t __p0, int8x8x2_t __p1, int8x8_t __p2) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 0); - return __ret; -} +#define vst3_s16(__p0, __p1) __extension__ ({ \ + int16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 1); \ +}) #else -__ai int8x8_t vtbx2_s8(int8x8_t __p0, int8x8x2_t __p1, int8x8_t __p2) { - int8x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8x2_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst3_s16(__p0, __p1) __extension__ ({ \ + int16x4x3_t __s1 = __p1; \ + int16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 1); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vtbx3_p8(poly8x8_t __p0, poly8x8x3_t __p1, uint8x8_t __p2) { - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 4); - return __ret; -} +#define vst3_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 4); \ +}) #else -__ai poly8x8_t vtbx3_p8(poly8x8_t __p0, poly8x8x3_t __p1, uint8x8_t __p2) { - poly8x8_t __ret; - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8x3_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (poly8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst3_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x3_t __s1 = __p1; \ + poly8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 4); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vtbx3_u8(uint8x8_t __p0, uint8x8x3_t __p1, uint8x8_t __p2) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 16); - return __ret; -} +#define vst3_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 5); \ +}) #else -__ai uint8x8_t vtbx3_u8(uint8x8_t __p0, uint8x8x3_t __p1, uint8x8_t __p2) { - uint8x8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8x3_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst3_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x3_t __s1 = __p1; \ + poly16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 5); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vtbx3_s8(int8x8_t __p0, int8x8x3_t __p1, int8x8_t __p2) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 0); - return __ret; -} +#define vst3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 37); \ +}) #else -__ai int8x8_t vtbx3_s8(int8x8_t __p0, int8x8x3_t __p1, int8x8_t __p2) { - int8x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8x3_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst3q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x3_t __s1 = __p1; \ + poly16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 37); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vtbx4_p8(poly8x8_t __p0, poly8x8x4_t __p1, uint8x8_t __p2) { - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 4); - return __ret; -} +#define vst3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 50); \ +}) #else -__ai poly8x8_t vtbx4_p8(poly8x8_t __p0, poly8x8x4_t __p1, uint8x8_t __p2) { - poly8x8_t __ret; - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8x4_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (poly8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst3q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x3_t __s1 = __p1; \ + uint32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 50); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vtbx4_u8(uint8x8_t __p0, uint8x8x4_t __p1, uint8x8_t __p2) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 16); - return __ret; -} +#define vst3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 49); \ +}) #else -__ai uint8x8_t vtbx4_u8(uint8x8_t __p0, uint8x8x4_t __p1, uint8x8_t __p2) { - uint8x8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8x4_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst3q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x3_t __s1 = __p1; \ + uint16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 49); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vtbx4_s8(int8x8_t __p0, int8x8x4_t __p1, int8x8_t __p2) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 0); - return __ret; -} +#define vst3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 41); \ +}) #else -__ai int8x8_t vtbx4_s8(int8x8_t __p0, int8x8x4_t __p1, int8x8_t __p2) { - int8x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8x4_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst3q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x3_t __s1 = __p1; \ + float32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 41); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8x2_t vtrn_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly8x8x2_t __ret; - __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4); - return __ret; -} +#define vst3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 34); \ +}) #else -__ai poly8x8x2_t vtrn_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly8x8x2_t __ret; - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst3q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x3_t __s1 = __p1; \ + int32x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 34); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x4x2_t vtrn_p16(poly16x4_t __p0, poly16x4_t __p1) { - poly16x4x2_t __ret; - __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5); - return __ret; -} +#define vst3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 33); \ +}) #else -__ai poly16x4x2_t vtrn_p16(poly16x4_t __p0, poly16x4_t __p1) { - poly16x4x2_t __ret; - poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); - return __ret; -} +#define vst3q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x3_t __s1 = __p1; \ + int16x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 33); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16x2_t vtrnq_p8(poly8x16_t __p0, poly8x16_t __p1) { - poly8x16x2_t __ret; - __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36); - return __ret; -} +#define vst3_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 16); \ +}) #else -__ai poly8x16x2_t vtrnq_p8(poly8x16_t __p0, poly8x16_t __p1) { - poly8x16x2_t __ret; - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst3_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x3_t __s1 = __p1; \ + uint8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 16); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x8x2_t vtrnq_p16(poly16x8_t __p0, poly16x8_t __p1) { - poly16x8x2_t __ret; - __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37); - return __ret; -} +#define vst3_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 18); \ +}) #else -__ai poly16x8x2_t vtrnq_p16(poly16x8_t __p0, poly16x8_t __p1) { - poly16x8x2_t __ret; - poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst3_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x3_t __s1 = __p1; \ + uint32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 18); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16x2_t vtrnq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16x2_t __ret; - __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48); - return __ret; -} +#define vst3_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 17); \ +}) #else -__ai uint8x16x2_t vtrnq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16x2_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst3_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x3_t __s1 = __p1; \ + uint16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 17); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4x2_t vtrnq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4x2_t __ret; - __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50); - return __ret; -} +#define vst3_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 0); \ +}) #else -__ai uint32x4x2_t vtrnq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4x2_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); - return __ret; -} +#define vst3_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x3_t __s1 = __p1; \ + int8x8x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 0); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8x2_t vtrnq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8x2_t __ret; - __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; -} +#define vst3_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 9); \ +}) #else -__ai uint16x8x2_t vtrnq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8x2_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst3_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x3_t __s1 = __p1; \ + float32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 9); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16x2_t vtrnq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16x2_t __ret; - __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32); - return __ret; -} +#define vst3_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 2); \ +}) #else -__ai int8x16x2_t vtrnq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16x2_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst3_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x3_t __s1 = __p1; \ + int32x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 2); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4x2_t vtrnq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4x2_t __ret; - __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41); - return __ret; -} +#define vst3_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 1); \ +}) #else -__ai float32x4x2_t vtrnq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4x2_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); - return __ret; -} +#define vst3_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x3_t __s1 = __p1; \ + int16x4x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 1); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4x2_t vtrnq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4x2_t __ret; - __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34); - return __ret; -} +#define vst4_p8(__p0, __p1) __extension__ ({ \ + poly8x8x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 4); \ +}) #else -__ai int32x4x2_t vtrnq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4x2_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); - return __ret; -} +#define vst4_p8(__p0, __p1) __extension__ ({ \ + poly8x8x4_t __s1 = __p1; \ + poly8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 4); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8x2_t vtrnq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8x2_t __ret; - __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33); - return __ret; -} +#define vst4_p16(__p0, __p1) __extension__ ({ \ + poly16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 5); \ +}) #else -__ai int16x8x2_t vtrnq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8x2_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst4_p16(__p0, __p1) __extension__ ({ \ + poly16x4x4_t __s1 = __p1; \ + poly16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 5); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8x2_t vtrn_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8x2_t __ret; - __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16); - return __ret; -} +#define vst4q_p8(__p0, __p1) __extension__ ({ \ + poly8x16x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 36); \ +}) #else -__ai uint8x8x2_t vtrn_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8x2_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst4q_p8(__p0, __p1) __extension__ ({ \ + poly8x16x4_t __s1 = __p1; \ + poly8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 36); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2x2_t vtrn_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2x2_t __ret; - __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18); - return __ret; -} +#define vst4q_p16(__p0, __p1) __extension__ ({ \ + poly16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 37); \ +}) #else -__ai uint32x2x2_t vtrn_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2x2_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); - return __ret; -} +#define vst4q_p16(__p0, __p1) __extension__ ({ \ + poly16x8x4_t __s1 = __p1; \ + poly16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 37); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4x2_t vtrn_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4x2_t __ret; - __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17); - return __ret; -} +#define vst4q_u8(__p0, __p1) __extension__ ({ \ + uint8x16x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 48); \ +}) #else -__ai uint16x4x2_t vtrn_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4x2_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); - return __ret; -} +#define vst4q_u8(__p0, __p1) __extension__ ({ \ + uint8x16x4_t __s1 = __p1; \ + uint8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 48); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8x2_t vtrn_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8x2_t __ret; - __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0); - return __ret; -} +#define vst4q_u32(__p0, __p1) __extension__ ({ \ + uint32x4x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 50); \ +}) #else -__ai int8x8x2_t vtrn_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8x2_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst4q_u32(__p0, __p1) __extension__ ({ \ + uint32x4x4_t __s1 = __p1; \ + uint32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 50); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2x2_t vtrn_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2x2_t __ret; - __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9); - return __ret; -} +#define vst4q_u16(__p0, __p1) __extension__ ({ \ + uint16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 49); \ +}) #else -__ai float32x2x2_t vtrn_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); - return __ret; -} +#define vst4q_u16(__p0, __p1) __extension__ ({ \ + uint16x8x4_t __s1 = __p1; \ + uint16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 49); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2x2_t vtrn_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2x2_t __ret; - __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2); - return __ret; -} +#define vst4q_s8(__p0, __p1) __extension__ ({ \ + int8x16x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 32); \ +}) #else -__ai int32x2x2_t vtrn_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); - return __ret; -} +#define vst4q_s8(__p0, __p1) __extension__ ({ \ + int8x16x4_t __s1 = __p1; \ + int8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 32); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4x2_t vtrn_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4x2_t __ret; - __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1); - return __ret; -} +#define vst4q_f32(__p0, __p1) __extension__ ({ \ + float32x4x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 41); \ +}) #else -__ai int16x4x2_t vtrn_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4x2_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); - return __ret; -} +#define vst4q_f32(__p0, __p1) __extension__ ({ \ + float32x4x4_t __s1 = __p1; \ + float32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 41); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vtst_p8(poly8x8_t __p0, poly8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16); - return __ret; -} +#define vst4q_s32(__p0, __p1) __extension__ ({ \ + int32x4x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 34); \ +}) #else -__ai uint8x8_t vtst_p8(poly8x8_t __p0, poly8x8_t __p1) { - uint8x8_t __ret; - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst4q_s32(__p0, __p1) __extension__ ({ \ + int32x4x4_t __s1 = __p1; \ + int32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 34); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vtst_p16(poly16x4_t __p0, poly16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17); - return __ret; -} +#define vst4q_s16(__p0, __p1) __extension__ ({ \ + int16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 33); \ +}) #else -__ai uint16x4_t vtst_p16(poly16x4_t __p0, poly16x4_t __p1) { - uint16x4_t __ret; - poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vst4q_s16(__p0, __p1) __extension__ ({ \ + int16x8x4_t __s1 = __p1; \ + int16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 33); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vtstq_p8(poly8x16_t __p0, poly8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); - return __ret; -} +#define vst4_u8(__p0, __p1) __extension__ ({ \ + uint8x8x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 16); \ +}) #else -__ai uint8x16_t vtstq_p8(poly8x16_t __p0, poly8x16_t __p1) { - uint8x16_t __ret; - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst4_u8(__p0, __p1) __extension__ ({ \ + uint8x8x4_t __s1 = __p1; \ + uint8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 16); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vtstq_p16(poly16x8_t __p0, poly16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; -} +#define vst4_u32(__p0, __p1) __extension__ ({ \ + uint32x2x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 18); \ +}) #else -__ai uint16x8_t vtstq_p16(poly16x8_t __p0, poly16x8_t __p1) { - uint16x8_t __ret; - poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst4_u32(__p0, __p1) __extension__ ({ \ + uint32x2x4_t __s1 = __p1; \ + uint32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 18); \ +}) +#endif + +#define vst4_u64(__p0, __p1) __extension__ ({ \ + uint64x1x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 19); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst4_u16(__p0, __p1) __extension__ ({ \ + uint16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 17); \ +}) +#else +#define vst4_u16(__p0, __p1) __extension__ ({ \ + uint16x4x4_t __s1 = __p1; \ + uint16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 17); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_s8(__p0, __p1) __extension__ ({ \ + int8x8x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 0); \ +}) +#else +#define vst4_s8(__p0, __p1) __extension__ ({ \ + int8x8x4_t __s1 = __p1; \ + int8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 0); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_f32(__p0, __p1) __extension__ ({ \ + float32x2x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 9); \ +}) +#else +#define vst4_f32(__p0, __p1) __extension__ ({ \ + float32x2x4_t __s1 = __p1; \ + float32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 9); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_s32(__p0, __p1) __extension__ ({ \ + int32x2x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 2); \ +}) +#else +#define vst4_s32(__p0, __p1) __extension__ ({ \ + int32x2x4_t __s1 = __p1; \ + int32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 2); \ +}) +#endif + +#define vst4_s64(__p0, __p1) __extension__ ({ \ + int64x1x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 3); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst4_s16(__p0, __p1) __extension__ ({ \ + int16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 1); \ +}) +#else +#define vst4_s16(__p0, __p1) __extension__ ({ \ + int16x4x4_t __s1 = __p1; \ + int16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 1); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 4); \ +}) +#else +#define vst4_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x8x4_t __s1 = __p1; \ + poly8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 4); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 5); \ +}) +#else +#define vst4_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x4x4_t __s1 = __p1; \ + poly16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 5); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 37); \ +}) +#else +#define vst4q_lane_p16(__p0, __p1, __p2) __extension__ ({ \ + poly16x8x4_t __s1 = __p1; \ + poly16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 37); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 50); \ +}) +#else +#define vst4q_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x4x4_t __s1 = __p1; \ + uint32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 50); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 49); \ +}) +#else +#define vst4q_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x8x4_t __s1 = __p1; \ + uint16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 49); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 41); \ +}) +#else +#define vst4q_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x4x4_t __s1 = __p1; \ + float32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 41); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 34); \ +}) +#else +#define vst4q_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4x4_t __s1 = __p1; \ + int32x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 34); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 33); \ +}) +#else +#define vst4q_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8x4_t __s1 = __p1; \ + int16x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 33); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 16); \ +}) +#else +#define vst4_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x8x4_t __s1 = __p1; \ + uint8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 16); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 18); \ +}) +#else +#define vst4_lane_u32(__p0, __p1, __p2) __extension__ ({ \ + uint32x2x4_t __s1 = __p1; \ + uint32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 18); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 17); \ +}) +#else +#define vst4_lane_u16(__p0, __p1, __p2) __extension__ ({ \ + uint16x4x4_t __s1 = __p1; \ + uint16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 17); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 0); \ +}) +#else +#define vst4_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x8x4_t __s1 = __p1; \ + int8x8x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 0); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 9); \ +}) +#else +#define vst4_lane_f32(__p0, __p1, __p2) __extension__ ({ \ + float32x2x4_t __s1 = __p1; \ + float32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 9); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 2); \ +}) +#else +#define vst4_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2x4_t __s1 = __p1; \ + int32x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 2); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst4_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 1); \ +}) +#else +#define vst4_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4x4_t __s1 = __p1; \ + int16x4x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 1); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vtstq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + __ret = __p0 - __p1; return __ret; } #else -__ai uint8x16_t vtstq_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vsubq_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __rev0 - __rev1; __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vtstq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + __ret = __p0 - __p1; return __ret; } #else -__ai uint32x4_t vtstq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vsubq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __rev0 - __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vtstq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vsubq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vsubq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + __ret = __p0 - __p1; return __ret; } #else -__ai uint16x8_t vtstq_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vsubq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __rev0 - __rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vtstq_s8(int8x16_t __p0, int8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); +__ai __attribute__((target("neon"))) int8x16_t vsubq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __p0 - __p1; return __ret; } #else -__ai uint8x16_t vtstq_s8(int8x16_t __p0, int8x16_t __p1) { - uint8x16_t __ret; +__ai __attribute__((target("neon"))) int8x16_t vsubq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __rev0 - __rev1; __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vtstq_s32(int32x4_t __p0, int32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); +__ai __attribute__((target("neon"))) float32x4_t vsubq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = __p0 - __p1; return __ret; } #else -__ai uint32x4_t vtstq_s32(int32x4_t __p0, int32x4_t __p1) { - uint32x4_t __ret; +__ai __attribute__((target("neon"))) float32x4_t vsubq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vsubq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vsubq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __rev0 - __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vtstq_s16(int16x8_t __p0, int16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); +__ai __attribute__((target("neon"))) int64x2_t vsubq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __p0 - __p1; return __ret; } #else -__ai uint16x8_t vtstq_s16(int16x8_t __p0, int16x8_t __p1) { - uint16x8_t __ret; +__ai __attribute__((target("neon"))) int64x2_t vsubq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vsubq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __p0 - __p1; + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vsubq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __rev0 - __rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vtst_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vsub_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + __ret = __p0 - __p1; return __ret; } #else -__ai uint8x8_t vtst_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vsub_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __rev0 - __rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vtst_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vsub_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + __ret = __p0 - __p1; return __ret; } #else -__ai uint32x2_t vtst_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vsub_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __rev0 - __rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif +__ai __attribute__((target("neon"))) uint64x1_t vsub_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = __p0 - __p1; + return __ret; +} #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vtst_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vsub_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + __ret = __p0 - __p1; return __ret; } #else -__ai uint16x4_t vtst_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vsub_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __rev0 - __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vtst_s8(int8x8_t __p0, int8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16); +__ai __attribute__((target("neon"))) int8x8_t vsub_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __p0 - __p1; return __ret; } #else -__ai uint8x8_t vtst_s8(int8x8_t __p0, int8x8_t __p1) { - uint8x8_t __ret; +__ai __attribute__((target("neon"))) int8x8_t vsub_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __rev0 - __rev1; __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vtst_s32(int32x2_t __p0, int32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 18); +__ai __attribute__((target("neon"))) float32x2_t vsub_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = __p0 - __p1; return __ret; } #else -__ai uint32x2_t vtst_s32(int32x2_t __p0, int32x2_t __p1) { - uint32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); +__ai __attribute__((target("neon"))) float32x2_t vsub_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 - __rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vtst_s16(int16x4_t __p0, int16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17); +__ai __attribute__((target("neon"))) int32x2_t vsub_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __p0 - __p1; return __ret; } #else -__ai uint16x4_t vtst_s16(int16x4_t __p0, int16x4_t __p1) { - uint16x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int32x2_t vsub_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8x2_t vuzp_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly8x8x2_t __ret; - __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4); - return __ret; -} -#else -__ai poly8x8x2_t vuzp_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly8x8x2_t __ret; - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int64x1_t vsub_s64(int64x1_t __p0, int64x1_t __p1) { + int64x1_t __ret; + __ret = __p0 - __p1; return __ret; } -#endif - #ifdef __LITTLE_ENDIAN__ -__ai poly16x4x2_t vuzp_p16(poly16x4_t __p0, poly16x4_t __p1) { - poly16x4x2_t __ret; - __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5); +__ai __attribute__((target("neon"))) int16x4_t vsub_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __p0 - __p1; return __ret; } #else -__ai poly16x4x2_t vuzp_p16(poly16x4_t __p0, poly16x4_t __p1) { - poly16x4x2_t __ret; - poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int16x4_t vsub_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 - __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16x2_t vuzpq_p8(poly8x16_t __p0, poly8x16_t __p1) { - poly8x16x2_t __ret; - __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36); +__ai __attribute__((target("neon"))) uint16x4_t vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); return __ret; } #else -__ai poly8x16x2_t vuzpq_p8(poly8x16_t __p0, poly8x16_t __p1) { - poly8x16x2_t __ret; - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x8x2_t vuzpq_p16(poly16x8_t __p0, poly16x8_t __p1) { - poly16x8x2_t __ret; - __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37); +__ai __attribute__((target("neon"))) uint16x4_t vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -#else -__ai poly16x8x2_t vuzpq_p16(poly16x8_t __p0, poly16x8_t __p1) { - poly16x8x2_t __ret; - poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint16x4_t __noswap_vsubhn_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 17); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16x2_t vuzpq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16x2_t __ret; - __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48); +__ai __attribute__((target("neon"))) uint32x2_t vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); return __ret; } #else -__ai uint8x16x2_t vuzpq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16x2_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4x2_t vuzpq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4x2_t __ret; - __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50); +__ai __attribute__((target("neon"))) uint32x2_t vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -#else -__ai uint32x4x2_t vuzpq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4x2_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint32x2_t __noswap_vsubhn_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 18); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8x2_t vuzpq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8x2_t __ret; - __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49); +__ai __attribute__((target("neon"))) uint8x8_t vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); return __ret; } #else -__ai uint16x8x2_t vuzpq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8x2_t __ret; +__ai __attribute__((target("neon"))) uint8x8_t vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16x2_t vuzpq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16x2_t __ret; - __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32); + __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -#else -__ai int8x16x2_t vuzpq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16x2_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint8x8_t __noswap_vsubhn_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 16); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4x2_t vuzpq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4x2_t __ret; - __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41); +__ai __attribute__((target("neon"))) int16x4_t vsubhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); return __ret; } #else -__ai float32x4x2_t vuzpq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4x2_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int16x4_t vsubhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t __noswap_vsubhn_s32(int32x4_t __p0, int32x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 1); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4x2_t vuzpq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4x2_t __ret; - __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34); +__ai __attribute__((target("neon"))) int32x2_t vsubhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); return __ret; } #else -__ai int32x4x2_t vuzpq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4x2_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int32x2_t vsubhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t __noswap_vsubhn_s64(int64x2_t __p0, int64x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 2); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8x2_t vuzpq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8x2_t __ret; - __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33); +__ai __attribute__((target("neon"))) int8x8_t vsubhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); return __ret; } #else -__ai int16x8x2_t vuzpq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8x2_t __ret; +__ai __attribute__((target("neon"))) int8x8_t vsubhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__rev0, (int8x16_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t __noswap_vsubhn_s16(int16x8_t __p0, int16x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vsubhn_v((int8x16_t)__p0, (int8x16_t)__p1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8x2_t vuzp_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8x2_t __ret; - __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16); +__ai __attribute__((target("neon"))) uint16x8_t vsubl_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + __ret = vmovl_u8(__p0) - vmovl_u8(__p1); return __ret; } #else -__ai uint8x8x2_t vuzp_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8x2_t __ret; +__ai __attribute__((target("neon"))) uint16x8_t vsubl_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmovl_u8(__rev0) - __noswap_vmovl_u8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2x2_t vuzp_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2x2_t __ret; - __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18); +__ai __attribute__((target("neon"))) uint64x2_t vsubl_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + __ret = vmovl_u32(__p0) - vmovl_u32(__p1); return __ret; } #else -__ai uint32x2x2_t vuzp_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2x2_t __ret; +__ai __attribute__((target("neon"))) uint64x2_t vsubl_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); + __ret = __noswap_vmovl_u32(__rev0) - __noswap_vmovl_u32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4x2_t vuzp_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4x2_t __ret; - __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17); +__ai __attribute__((target("neon"))) uint32x4_t vsubl_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + __ret = vmovl_u16(__p0) - vmovl_u16(__p1); return __ret; } #else -__ai uint16x4x2_t vuzp_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4x2_t __ret; +__ai __attribute__((target("neon"))) uint32x4_t vsubl_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + __ret = __noswap_vmovl_u16(__rev0) - __noswap_vmovl_u16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8x2_t vuzp_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8x2_t __ret; - __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0); +__ai __attribute__((target("neon"))) int16x8_t vsubl_s8(int8x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + __ret = vmovl_s8(__p0) - vmovl_s8(__p1); return __ret; } #else -__ai int8x8x2_t vuzp_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8x2_t __ret; +__ai __attribute__((target("neon"))) int16x8_t vsubl_s8(int8x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmovl_s8(__rev0) - __noswap_vmovl_s8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2x2_t vuzp_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2x2_t __ret; - __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9); +__ai __attribute__((target("neon"))) int64x2_t vsubl_s32(int32x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + __ret = vmovl_s32(__p0) - vmovl_s32(__p1); return __ret; } #else -__ai float32x2x2_t vuzp_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); +__ai __attribute__((target("neon"))) int64x2_t vsubl_s32(int32x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __noswap_vmovl_s32(__rev0) - __noswap_vmovl_s32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2x2_t vuzp_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2x2_t __ret; - __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2); +__ai __attribute__((target("neon"))) int32x4_t vsubl_s16(int16x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + __ret = vmovl_s16(__p0) - vmovl_s16(__p1); return __ret; } #else -__ai int32x2x2_t vuzp_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); +__ai __attribute__((target("neon"))) int32x4_t vsubl_s16(int16x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vmovl_s16(__rev0) - __noswap_vmovl_s16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4x2_t vuzp_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4x2_t __ret; - __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1); +__ai __attribute__((target("neon"))) uint16x8_t vsubw_u8(uint16x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + __ret = __p0 - vmovl_u8(__p1); return __ret; } #else -__ai int16x4x2_t vuzp_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4x2_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint16x8_t vsubw_u8(uint16x8_t __p0, uint8x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __noswap_vmovl_u8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8x2_t vzip_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly8x8x2_t __ret; - __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4); +__ai __attribute__((target("neon"))) uint64x2_t vsubw_u32(uint64x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + __ret = __p0 - vmovl_u32(__p1); return __ret; } #else -__ai poly8x8x2_t vzip_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly8x8x2_t __ret; - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint64x2_t vsubw_u32(uint64x2_t __p0, uint32x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 - __noswap_vmovl_u32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x4x2_t vzip_p16(poly16x4_t __p0, poly16x4_t __p1) { - poly16x4x2_t __ret; - __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5); +__ai __attribute__((target("neon"))) uint32x4_t vsubw_u16(uint32x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + __ret = __p0 - vmovl_u16(__p1); return __ret; } #else -__ai poly16x4x2_t vzip_p16(poly16x4_t __p0, poly16x4_t __p1) { - poly16x4x2_t __ret; - poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint32x4_t vsubw_u16(uint32x4_t __p0, uint16x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 - __noswap_vmovl_u16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16x2_t vzipq_p8(poly8x16_t __p0, poly8x16_t __p1) { - poly8x16x2_t __ret; - __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36); +__ai __attribute__((target("neon"))) int16x8_t vsubw_s8(int16x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + __ret = __p0 - vmovl_s8(__p1); return __ret; } #else -__ai poly8x16x2_t vzipq_p8(poly8x16_t __p0, poly8x16_t __p1) { - poly8x16x2_t __ret; - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int16x8_t vsubw_s8(int16x8_t __p0, int8x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __noswap_vmovl_s8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x8x2_t vzipq_p16(poly16x8_t __p0, poly16x8_t __p1) { - poly16x8x2_t __ret; - __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37); +__ai __attribute__((target("neon"))) int64x2_t vsubw_s32(int64x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + __ret = __p0 - vmovl_s32(__p1); return __ret; } #else -__ai poly16x8x2_t vzipq_p16(poly16x8_t __p0, poly16x8_t __p1) { - poly16x8x2_t __ret; - poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int64x2_t vsubw_s32(int64x2_t __p0, int32x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 - __noswap_vmovl_s32(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16x2_t vzipq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16x2_t __ret; - __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48); +__ai __attribute__((target("neon"))) int32x4_t vsubw_s16(int32x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + __ret = __p0 - vmovl_s16(__p1); return __ret; } #else -__ai uint8x16x2_t vzipq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16x2_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int32x4_t vsubw_s16(int32x4_t __p0, int16x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 - __noswap_vmovl_s16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4x2_t vzipq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4x2_t __ret; - __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50); +__ai __attribute__((target("neon"))) poly8x8_t vtbl1_p8(poly8x8_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 4); return __ret; } #else -__ai uint32x4x2_t vzipq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4x2_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); +__ai __attribute__((target("neon"))) poly8x8_t vtbl1_p8(poly8x8_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8x2_t vzipq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8x2_t __ret; - __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49); +__ai __attribute__((target("neon"))) uint8x8_t vtbl1_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else -__ai uint16x8x2_t vzipq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8x2_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint8x8_t vtbl1_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16x2_t vzipq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16x2_t __ret; - __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32); +__ai __attribute__((target("neon"))) int8x8_t vtbl1_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else -__ai int8x16x2_t vzipq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16x2_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int8x8_t vtbl1_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vtbl1_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4x2_t vzipq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4x2_t __ret; - __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41); +__ai __attribute__((target("neon"))) poly8x8_t vtbl2_p8(poly8x8x2_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 4); return __ret; } #else -__ai float32x4x2_t vzipq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4x2_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); +__ai __attribute__((target("neon"))) poly8x8_t vtbl2_p8(poly8x8x2_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + poly8x8x2_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4x2_t vzipq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4x2_t __ret; - __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34); +__ai __attribute__((target("neon"))) uint8x8_t vtbl2_u8(uint8x8x2_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 16); return __ret; } #else -__ai int32x4x2_t vzipq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4x2_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint8x8_t vtbl2_u8(uint8x8x2_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8x2_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8x2_t vzipq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8x2_t __ret; - __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33); +__ai __attribute__((target("neon"))) int8x8_t vtbl2_s8(int8x8x2_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p1, 0); return __ret; } #else -__ai int16x8x2_t vzipq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8x2_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int8x8_t vtbl2_s8(int8x8x2_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8x2_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vtbl2_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8x2_t vzip_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8x2_t __ret; - __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16); +__ai __attribute__((target("neon"))) poly8x8_t vtbl3_p8(poly8x8x3_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 4); return __ret; } #else -__ai uint8x8x2_t vzip_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8x2_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) poly8x8_t vtbl3_p8(poly8x8x3_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + poly8x8x3_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2x2_t vzip_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2x2_t __ret; - __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18); +__ai __attribute__((target("neon"))) uint8x8_t vtbl3_u8(uint8x8x3_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 16); return __ret; } #else -__ai uint32x2x2_t vzip_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2x2_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); +__ai __attribute__((target("neon"))) uint8x8_t vtbl3_u8(uint8x8x3_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8x3_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4x2_t vzip_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4x2_t __ret; - __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17); +__ai __attribute__((target("neon"))) int8x8_t vtbl3_s8(int8x8x3_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p1, 0); return __ret; } #else -__ai uint16x4x2_t vzip_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4x2_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int8x8_t vtbl3_s8(int8x8x3_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8x3_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vtbl3_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8x2_t vzip_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8x2_t __ret; - __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0); +__ai __attribute__((target("neon"))) poly8x8_t vtbl4_p8(poly8x8x4_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 4); return __ret; } #else -__ai int8x8x2_t vzip_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8x2_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) poly8x8_t vtbl4_p8(poly8x8x4_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + poly8x8x4_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2x2_t vzip_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2x2_t __ret; - __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9); +__ai __attribute__((target("neon"))) uint8x8_t vtbl4_u8(uint8x8x4_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 16); return __ret; } #else -__ai float32x2x2_t vzip_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); +__ai __attribute__((target("neon"))) uint8x8_t vtbl4_u8(uint8x8x4_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8x4_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2x2_t vzip_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2x2_t __ret; - __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2); +__ai __attribute__((target("neon"))) int8x8_t vtbl4_s8(int8x8x4_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__p0.val[0], (int8x8_t)__p0.val[1], (int8x8_t)__p0.val[2], (int8x8_t)__p0.val[3], (int8x8_t)__p1, 0); return __ret; } #else -__ai int32x2x2_t vzip_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); +__ai __attribute__((target("neon"))) int8x8_t vtbl4_s8(int8x8x4_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8x4_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vtbl4_v((int8x8_t)__rev0.val[0], (int8x8_t)__rev0.val[1], (int8x8_t)__rev0.val[2], (int8x8_t)__rev0.val[3], (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4x2_t vzip_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4x2_t __ret; - __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1); +__ai __attribute__((target("neon"))) poly8x8_t vtbx1_p8(poly8x8_t __p0, poly8x8_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 4); return __ret; } #else -__ai int16x4x2_t vzip_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4x2_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); +__ai __attribute__((target("neon"))) poly8x8_t vtbx1_p8(poly8x8_t __p0, poly8x8_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -#define splatq_lane_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x8_t __ret; \ - bfloat16x4_t __s0 = __p0; \ - __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_bf16((int8x8_t)__s0, __p1, 11); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vtbx1_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 16); + return __ret; +} #else -#define splatq_lane_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x8_t __ret; \ - bfloat16x4_t __s0 = __p0; \ - bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_bf16((int8x8_t)__rev0, __p1, 11); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_splatq_lane_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x8_t __ret; \ - bfloat16x4_t __s0 = __p0; \ - __ret = (bfloat16x8_t) __builtin_neon_splatq_lane_bf16((int8x8_t)__s0, __p1, 11); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vtbx1_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define splat_lane_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x4_t __ret; \ - bfloat16x4_t __s0 = __p0; \ - __ret = (bfloat16x4_t) __builtin_neon_splat_lane_bf16((int8x8_t)__s0, __p1, 11); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x8_t vtbx1_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 0); + return __ret; +} #else -#define splat_lane_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x4_t __ret; \ - bfloat16x4_t __s0 = __p0; \ - bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (bfloat16x4_t) __builtin_neon_splat_lane_bf16((int8x8_t)__rev0, __p1, 11); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_splat_lane_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x4_t __ret; \ - bfloat16x4_t __s0 = __p0; \ - __ret = (bfloat16x4_t) __builtin_neon_splat_lane_bf16((int8x8_t)__s0, __p1, 11); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x8_t vtbx1_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vtbx1_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define splatq_laneq_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x8_t __ret; \ - bfloat16x8_t __s0 = __p0; \ - __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_bf16((int8x16_t)__s0, __p1, 43); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) poly8x8_t vtbx2_p8(poly8x8_t __p0, poly8x8x2_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 4); + return __ret; +} #else -#define splatq_laneq_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x8_t __ret; \ - bfloat16x8_t __s0 = __p0; \ - bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_bf16((int8x16_t)__rev0, __p1, 43); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_splatq_laneq_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x8_t __ret; \ - bfloat16x8_t __s0 = __p0; \ - __ret = (bfloat16x8_t) __builtin_neon_splatq_laneq_bf16((int8x16_t)__s0, __p1, 43); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) poly8x8_t vtbx2_p8(poly8x8_t __p0, poly8x8x2_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8x2_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define splat_laneq_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x4_t __ret; \ - bfloat16x8_t __s0 = __p0; \ - __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_bf16((int8x16_t)__s0, __p1, 43); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vtbx2_u8(uint8x8_t __p0, uint8x8x2_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 16); + return __ret; +} #else -#define splat_laneq_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x4_t __ret; \ - bfloat16x8_t __s0 = __p0; \ - bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_bf16((int8x16_t)__rev0, __p1, 43); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_splat_laneq_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x4_t __ret; \ - bfloat16x8_t __s0 = __p0; \ - __ret = (bfloat16x4_t) __builtin_neon_splat_laneq_bf16((int8x16_t)__s0, __p1, 43); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vtbx2_u8(uint8x8_t __p0, uint8x8x2_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8x2_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("bf16"))) float32x4_t vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vbfdotq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); +__ai __attribute__((target("neon"))) int8x8_t vtbx2_s8(int8x8_t __p0, int8x8x2_t __p1, int8x8_t __p2) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p2, 0); return __ret; } #else -__ai __attribute__((target("bf16"))) float32x4_t vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - bfloat16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vbfdotq_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai __attribute__((target("bf16"))) float32x4_t __noswap_vbfdotq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vbfdotq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); +__ai __attribute__((target("neon"))) int8x8_t vtbx2_s8(int8x8_t __p0, int8x8x2_t __p1, int8x8_t __p2) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8x2_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vtbx2_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev2, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("bf16"))) float32x2_t vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vbfdot_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); +__ai __attribute__((target("neon"))) poly8x8_t vtbx3_p8(poly8x8_t __p0, poly8x8x3_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 4); return __ret; } #else -__ai __attribute__((target("bf16"))) float32x2_t vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - bfloat16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (float32x2_t) __builtin_neon_vbfdot_f32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai __attribute__((target("bf16"))) float32x2_t __noswap_vbfdot_f32(float32x2_t __p0, bfloat16x4_t __p1, bfloat16x4_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vbfdot_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); +__ai __attribute__((target("neon"))) poly8x8_t vtbx3_p8(poly8x8_t __p0, poly8x8x3_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8x3_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -#define vbfdotq_lane_f32(__p0_126, __p1_126, __p2_126, __p3_126) __extension__ ({ \ - float32x4_t __ret_126; \ - float32x4_t __s0_126 = __p0_126; \ - bfloat16x8_t __s1_126 = __p1_126; \ - bfloat16x4_t __s2_126 = __p2_126; \ -bfloat16x4_t __reint_126 = __s2_126; \ -float32x4_t __reint1_126 = splatq_lane_f32(*(float32x2_t *) &__reint_126, __p3_126); \ - __ret_126 = vbfdotq_f32(__s0_126, __s1_126, *(bfloat16x8_t *) &__reint1_126); \ - __ret_126; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vtbx3_u8(uint8x8_t __p0, uint8x8x3_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 16); + return __ret; +} #else -#define vbfdotq_lane_f32(__p0_127, __p1_127, __p2_127, __p3_127) __extension__ ({ \ - float32x4_t __ret_127; \ - float32x4_t __s0_127 = __p0_127; \ - bfloat16x8_t __s1_127 = __p1_127; \ - bfloat16x4_t __s2_127 = __p2_127; \ - float32x4_t __rev0_127; __rev0_127 = __builtin_shufflevector(__s0_127, __s0_127, 3, 2, 1, 0); \ - bfloat16x8_t __rev1_127; __rev1_127 = __builtin_shufflevector(__s1_127, __s1_127, 7, 6, 5, 4, 3, 2, 1, 0); \ - bfloat16x4_t __rev2_127; __rev2_127 = __builtin_shufflevector(__s2_127, __s2_127, 3, 2, 1, 0); \ -bfloat16x4_t __reint_127 = __rev2_127; \ -float32x4_t __reint1_127 = __noswap_splatq_lane_f32(*(float32x2_t *) &__reint_127, __p3_127); \ - __ret_127 = __noswap_vbfdotq_f32(__rev0_127, __rev1_127, *(bfloat16x8_t *) &__reint1_127); \ - __ret_127 = __builtin_shufflevector(__ret_127, __ret_127, 3, 2, 1, 0); \ - __ret_127; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vtbx3_u8(uint8x8_t __p0, uint8x8x3_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8x3_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vbfdot_lane_f32(__p0_128, __p1_128, __p2_128, __p3_128) __extension__ ({ \ - float32x2_t __ret_128; \ - float32x2_t __s0_128 = __p0_128; \ - bfloat16x4_t __s1_128 = __p1_128; \ - bfloat16x4_t __s2_128 = __p2_128; \ -bfloat16x4_t __reint_128 = __s2_128; \ -float32x2_t __reint1_128 = splat_lane_f32(*(float32x2_t *) &__reint_128, __p3_128); \ - __ret_128 = vbfdot_f32(__s0_128, __s1_128, *(bfloat16x4_t *) &__reint1_128); \ - __ret_128; \ -}) +__ai __attribute__((target("neon"))) int8x8_t vtbx3_s8(int8x8_t __p0, int8x8x3_t __p1, int8x8_t __p2) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p2, 0); + return __ret; +} #else -#define vbfdot_lane_f32(__p0_129, __p1_129, __p2_129, __p3_129) __extension__ ({ \ - float32x2_t __ret_129; \ - float32x2_t __s0_129 = __p0_129; \ - bfloat16x4_t __s1_129 = __p1_129; \ - bfloat16x4_t __s2_129 = __p2_129; \ - float32x2_t __rev0_129; __rev0_129 = __builtin_shufflevector(__s0_129, __s0_129, 1, 0); \ - bfloat16x4_t __rev1_129; __rev1_129 = __builtin_shufflevector(__s1_129, __s1_129, 3, 2, 1, 0); \ - bfloat16x4_t __rev2_129; __rev2_129 = __builtin_shufflevector(__s2_129, __s2_129, 3, 2, 1, 0); \ -bfloat16x4_t __reint_129 = __rev2_129; \ -float32x2_t __reint1_129 = __noswap_splat_lane_f32(*(float32x2_t *) &__reint_129, __p3_129); \ - __ret_129 = __noswap_vbfdot_f32(__rev0_129, __rev1_129, *(bfloat16x4_t *) &__reint1_129); \ - __ret_129 = __builtin_shufflevector(__ret_129, __ret_129, 1, 0); \ - __ret_129; \ -}) +__ai __attribute__((target("neon"))) int8x8_t vtbx3_s8(int8x8_t __p0, int8x8x3_t __p1, int8x8_t __p2) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8x3_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vtbx3_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev2, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vbfdotq_laneq_f32(__p0_130, __p1_130, __p2_130, __p3_130) __extension__ ({ \ - float32x4_t __ret_130; \ - float32x4_t __s0_130 = __p0_130; \ - bfloat16x8_t __s1_130 = __p1_130; \ - bfloat16x8_t __s2_130 = __p2_130; \ -bfloat16x8_t __reint_130 = __s2_130; \ -float32x4_t __reint1_130 = splatq_laneq_f32(*(float32x4_t *) &__reint_130, __p3_130); \ - __ret_130 = vbfdotq_f32(__s0_130, __s1_130, *(bfloat16x8_t *) &__reint1_130); \ - __ret_130; \ -}) +__ai __attribute__((target("neon"))) poly8x8_t vtbx4_p8(poly8x8_t __p0, poly8x8x4_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 4); + return __ret; +} #else -#define vbfdotq_laneq_f32(__p0_131, __p1_131, __p2_131, __p3_131) __extension__ ({ \ - float32x4_t __ret_131; \ - float32x4_t __s0_131 = __p0_131; \ - bfloat16x8_t __s1_131 = __p1_131; \ - bfloat16x8_t __s2_131 = __p2_131; \ - float32x4_t __rev0_131; __rev0_131 = __builtin_shufflevector(__s0_131, __s0_131, 3, 2, 1, 0); \ - bfloat16x8_t __rev1_131; __rev1_131 = __builtin_shufflevector(__s1_131, __s1_131, 7, 6, 5, 4, 3, 2, 1, 0); \ - bfloat16x8_t __rev2_131; __rev2_131 = __builtin_shufflevector(__s2_131, __s2_131, 7, 6, 5, 4, 3, 2, 1, 0); \ -bfloat16x8_t __reint_131 = __rev2_131; \ -float32x4_t __reint1_131 = __noswap_splatq_laneq_f32(*(float32x4_t *) &__reint_131, __p3_131); \ - __ret_131 = __noswap_vbfdotq_f32(__rev0_131, __rev1_131, *(bfloat16x8_t *) &__reint1_131); \ - __ret_131 = __builtin_shufflevector(__ret_131, __ret_131, 3, 2, 1, 0); \ - __ret_131; \ -}) +__ai __attribute__((target("neon"))) poly8x8_t vtbx4_p8(poly8x8_t __p0, poly8x8x4_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8x4_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vbfdot_laneq_f32(__p0_132, __p1_132, __p2_132, __p3_132) __extension__ ({ \ - float32x2_t __ret_132; \ - float32x2_t __s0_132 = __p0_132; \ - bfloat16x4_t __s1_132 = __p1_132; \ - bfloat16x8_t __s2_132 = __p2_132; \ -bfloat16x8_t __reint_132 = __s2_132; \ -float32x2_t __reint1_132 = splat_laneq_f32(*(float32x4_t *) &__reint_132, __p3_132); \ - __ret_132 = vbfdot_f32(__s0_132, __s1_132, *(bfloat16x4_t *) &__reint1_132); \ - __ret_132; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vtbx4_u8(uint8x8_t __p0, uint8x8x4_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 16); + return __ret; +} #else -#define vbfdot_laneq_f32(__p0_133, __p1_133, __p2_133, __p3_133) __extension__ ({ \ - float32x2_t __ret_133; \ - float32x2_t __s0_133 = __p0_133; \ - bfloat16x4_t __s1_133 = __p1_133; \ - bfloat16x8_t __s2_133 = __p2_133; \ - float32x2_t __rev0_133; __rev0_133 = __builtin_shufflevector(__s0_133, __s0_133, 1, 0); \ - bfloat16x4_t __rev1_133; __rev1_133 = __builtin_shufflevector(__s1_133, __s1_133, 3, 2, 1, 0); \ - bfloat16x8_t __rev2_133; __rev2_133 = __builtin_shufflevector(__s2_133, __s2_133, 7, 6, 5, 4, 3, 2, 1, 0); \ -bfloat16x8_t __reint_133 = __rev2_133; \ -float32x2_t __reint1_133 = __noswap_splat_laneq_f32(*(float32x4_t *) &__reint_133, __p3_133); \ - __ret_133 = __noswap_vbfdot_f32(__rev0_133, __rev1_133, *(bfloat16x4_t *) &__reint1_133); \ - __ret_133 = __builtin_shufflevector(__ret_133, __ret_133, 1, 0); \ - __ret_133; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vtbx4_u8(uint8x8_t __p0, uint8x8x4_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8x4_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("bf16"))) float32x4_t vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vbfmlalbq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); +__ai __attribute__((target("neon"))) int8x8_t vtbx4_s8(int8x8_t __p0, int8x8x4_t __p1, int8x8_t __p2) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__p0, (int8x8_t)__p1.val[0], (int8x8_t)__p1.val[1], (int8x8_t)__p1.val[2], (int8x8_t)__p1.val[3], (int8x8_t)__p2, 0); return __ret; } #else -__ai __attribute__((target("bf16"))) float32x4_t vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - bfloat16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vbfmlalbq_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai __attribute__((target("bf16"))) float32x4_t __noswap_vbfmlalbq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vbfmlalbq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); +__ai __attribute__((target("neon"))) int8x8_t vtbx4_s8(int8x8_t __p0, int8x8x4_t __p1, int8x8_t __p2) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8x4_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vtbx4_v((int8x8_t)__rev0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], (int8x8_t)__rev2, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("bf16"))) float32x4_t vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vbfmlaltq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); +__ai __attribute__((target("neon"))) poly8x8x2_t vtrn_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4); return __ret; } #else -__ai __attribute__((target("bf16"))) float32x4_t vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - bfloat16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vbfmlaltq_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai __attribute__((target("bf16"))) float32x4_t __noswap_vbfmlaltq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vbfmlaltq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); +__ai __attribute__((target("neon"))) poly8x8x2_t vtrn_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8x2_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("bf16"))) float32x4_t vbfmmlaq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vbfmmlaq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); +__ai __attribute__((target("neon"))) poly16x4x2_t vtrn_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5); return __ret; } #else -__ai __attribute__((target("bf16"))) float32x4_t vbfmmlaq_f32(float32x4_t __p0, bfloat16x8_t __p1, bfloat16x8_t __p2) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - bfloat16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vbfmmlaq_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) poly16x4x2_t vtrn_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4x2_t __ret; + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("bf16"))) bfloat16x8_t vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) { - bfloat16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); +__ai __attribute__((target("neon"))) poly8x16x2_t vtrnq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36); return __ret; } #else -__ai __attribute__((target("bf16"))) bfloat16x8_t vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) { - bfloat16x8_t __ret; - bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 1, 2, 3, 4, 5, 6, 7); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x8_t __noswap_vcombine_bf16(bfloat16x4_t __p0, bfloat16x4_t __p1) { - bfloat16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1, 2, 3, 4, 5, 6, 7); +__ai __attribute__((target("neon"))) poly8x16x2_t vtrnq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16x2_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif -#define vcreate_bf16(__p0) __extension__ ({ \ - bfloat16x4_t __ret; \ - uint64_t __promote = __p0; \ - __ret = (bfloat16x4_t)(__promote); \ - __ret; \ -}) #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("bf16"))) float32x4_t vcvt_f32_bf16(bfloat16x4_t __p0_134) { - float32x4_t __ret_134; -bfloat16x4_t __reint_134 = __p0_134; -int32x4_t __reint1_134 = vshll_n_s16(*(int16x4_t *) &__reint_134, 16); - __ret_134 = *(float32x4_t *) &__reint1_134; - return __ret_134; +__ai __attribute__((target("neon"))) poly16x8x2_t vtrnq_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37); + return __ret; } #else -__ai __attribute__((target("bf16"))) float32x4_t vcvt_f32_bf16(bfloat16x4_t __p0_135) { - float32x4_t __ret_135; - bfloat16x4_t __rev0_135; __rev0_135 = __builtin_shufflevector(__p0_135, __p0_135, 3, 2, 1, 0); -bfloat16x4_t __reint_135 = __rev0_135; -int32x4_t __reint1_135 = __noswap_vshll_n_s16(*(int16x4_t *) &__reint_135, 16); - __ret_135 = *(float32x4_t *) &__reint1_135; - __ret_135 = __builtin_shufflevector(__ret_135, __ret_135, 3, 2, 1, 0); - return __ret_135; -} -__ai __attribute__((target("bf16"))) float32x4_t __noswap_vcvt_f32_bf16(bfloat16x4_t __p0_136) { - float32x4_t __ret_136; -bfloat16x4_t __reint_136 = __p0_136; -int32x4_t __reint1_136 = __noswap_vshll_n_s16(*(int16x4_t *) &__reint_136, 16); - __ret_136 = *(float32x4_t *) &__reint1_136; - return __ret_136; +__ai __attribute__((target("neon"))) poly16x8x2_t vtrnq_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8x2_t __ret; + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; } #endif -__ai __attribute__((target("bf16"))) float32_t vcvtah_f32_bf16(bfloat16_t __p0) { - float32_t __ret; -bfloat16_t __reint = __p0; -int32_t __reint1 = *(int32_t *) &__reint << 16; - __ret = *(float32_t *) &__reint1; +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16x2_t vtrnq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } -__ai __attribute__((target("bf16"))) bfloat16_t vcvth_bf16_f32(float32_t __p0) { - bfloat16_t __ret; - __ret = (bfloat16_t) __builtin_neon_vcvth_bf16_f32(__p0); +#else +__ai __attribute__((target("neon"))) uint8x16x2_t vtrnq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16x2_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -#ifdef __LITTLE_ENDIAN__ -#define vduph_lane_bf16(__p0, __p1) __extension__ ({ \ - bfloat16_t __ret; \ - bfloat16x4_t __s0 = __p0; \ - __ret = (bfloat16_t) __builtin_neon_vduph_lane_bf16((bfloat16x4_t)__s0, __p1); \ - __ret; \ -}) -#else -#define vduph_lane_bf16(__p0, __p1) __extension__ ({ \ - bfloat16_t __ret; \ - bfloat16x4_t __s0 = __p0; \ - bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (bfloat16_t) __builtin_neon_vduph_lane_bf16((bfloat16x4_t)__rev0, __p1); \ - __ret; \ -}) #endif #ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_bf16(__p0_137, __p1_137) __extension__ ({ \ - bfloat16x8_t __ret_137; \ - bfloat16x4_t __s0_137 = __p0_137; \ - __ret_137 = splatq_lane_bf16(__s0_137, __p1_137); \ - __ret_137; \ -}) +__ai __attribute__((target("neon"))) uint32x4x2_t vtrnq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} #else -#define vdupq_lane_bf16(__p0_138, __p1_138) __extension__ ({ \ - bfloat16x8_t __ret_138; \ - bfloat16x4_t __s0_138 = __p0_138; \ - bfloat16x4_t __rev0_138; __rev0_138 = __builtin_shufflevector(__s0_138, __s0_138, 3, 2, 1, 0); \ - __ret_138 = __noswap_splatq_lane_bf16(__rev0_138, __p1_138); \ - __ret_138 = __builtin_shufflevector(__ret_138, __ret_138, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_138; \ -}) +__ai __attribute__((target("neon"))) uint32x4x2_t vtrnq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4x2_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vdup_lane_bf16(__p0_139, __p1_139) __extension__ ({ \ - bfloat16x4_t __ret_139; \ - bfloat16x4_t __s0_139 = __p0_139; \ - __ret_139 = splat_lane_bf16(__s0_139, __p1_139); \ - __ret_139; \ -}) +__ai __attribute__((target("neon"))) uint16x8x2_t vtrnq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} #else -#define vdup_lane_bf16(__p0_140, __p1_140) __extension__ ({ \ - bfloat16x4_t __ret_140; \ - bfloat16x4_t __s0_140 = __p0_140; \ - bfloat16x4_t __rev0_140; __rev0_140 = __builtin_shufflevector(__s0_140, __s0_140, 3, 2, 1, 0); \ - __ret_140 = __noswap_splat_lane_bf16(__rev0_140, __p1_140); \ - __ret_140 = __builtin_shufflevector(__ret_140, __ret_140, 3, 2, 1, 0); \ - __ret_140; \ -}) +__ai __attribute__((target("neon"))) uint16x8x2_t vtrnq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8x2_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vduph_laneq_bf16(__p0, __p1) __extension__ ({ \ - bfloat16_t __ret; \ - bfloat16x8_t __s0 = __p0; \ - __ret = (bfloat16_t) __builtin_neon_vduph_laneq_bf16((bfloat16x8_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x16x2_t vtrnq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} #else -#define vduph_laneq_bf16(__p0, __p1) __extension__ ({ \ - bfloat16_t __ret; \ - bfloat16x8_t __s0 = __p0; \ - bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (bfloat16_t) __builtin_neon_vduph_laneq_bf16((bfloat16x8_t)__rev0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x16x2_t vtrnq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16x2_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_bf16(__p0_141, __p1_141) __extension__ ({ \ - bfloat16x8_t __ret_141; \ - bfloat16x8_t __s0_141 = __p0_141; \ - __ret_141 = splatq_laneq_bf16(__s0_141, __p1_141); \ - __ret_141; \ -}) +__ai __attribute__((target("neon"))) float32x4x2_t vtrnq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} #else -#define vdupq_laneq_bf16(__p0_142, __p1_142) __extension__ ({ \ - bfloat16x8_t __ret_142; \ - bfloat16x8_t __s0_142 = __p0_142; \ - bfloat16x8_t __rev0_142; __rev0_142 = __builtin_shufflevector(__s0_142, __s0_142, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_142 = __noswap_splatq_laneq_bf16(__rev0_142, __p1_142); \ - __ret_142 = __builtin_shufflevector(__ret_142, __ret_142, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_142; \ -}) +__ai __attribute__((target("neon"))) float32x4x2_t vtrnq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4x2_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_bf16(__p0_143, __p1_143) __extension__ ({ \ - bfloat16x4_t __ret_143; \ - bfloat16x8_t __s0_143 = __p0_143; \ - __ret_143 = splat_laneq_bf16(__s0_143, __p1_143); \ - __ret_143; \ -}) +__ai __attribute__((target("neon"))) int32x4x2_t vtrnq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} #else -#define vdup_laneq_bf16(__p0_144, __p1_144) __extension__ ({ \ - bfloat16x4_t __ret_144; \ - bfloat16x8_t __s0_144 = __p0_144; \ - bfloat16x8_t __rev0_144; __rev0_144 = __builtin_shufflevector(__s0_144, __s0_144, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_144 = __noswap_splat_laneq_bf16(__rev0_144, __p1_144); \ - __ret_144 = __builtin_shufflevector(__ret_144, __ret_144, 3, 2, 1, 0); \ - __ret_144; \ -}) +__ai __attribute__((target("neon"))) int32x4x2_t vtrnq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4x2_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("bf16"))) bfloat16x8_t vdupq_n_bf16(bfloat16_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; +__ai __attribute__((target("neon"))) int16x8x2_t vtrnq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else -__ai __attribute__((target("bf16"))) bfloat16x8_t vdupq_n_bf16(bfloat16_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t) {__p0, __p0, __p0, __p0, __p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int16x8x2_t vtrnq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8x2_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("bf16"))) bfloat16x4_t vdup_n_bf16(bfloat16_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t) {__p0, __p0, __p0, __p0}; +__ai __attribute__((target("neon"))) uint8x8x2_t vtrn_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else -__ai __attribute__((target("bf16"))) bfloat16x4_t vdup_n_bf16(bfloat16_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t) {__p0, __p0, __p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint8x8x2_t vtrn_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8x2_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("bf16"))) bfloat16x4_t vget_high_bf16(bfloat16x8_t __p0) { - bfloat16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); +__ai __attribute__((target("neon"))) uint32x2x2_t vtrn_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else -__ai __attribute__((target("bf16"))) bfloat16x4_t vget_high_bf16(bfloat16x8_t __p0) { - bfloat16x4_t __ret; - bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 4, 5, 6, 7); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x4_t __noswap_vget_high_bf16(bfloat16x8_t __p0) { - bfloat16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 4, 5, 6, 7); +__ai __attribute__((target("neon"))) uint32x2x2_t vtrn_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -#define vgetq_lane_bf16(__p0, __p1) __extension__ ({ \ - bfloat16_t __ret; \ - bfloat16x8_t __s0 = __p0; \ - __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x4x2_t vtrn_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} #else -#define vgetq_lane_bf16(__p0, __p1) __extension__ ({ \ - bfloat16_t __ret; \ - bfloat16x8_t __s0 = __p0; \ - bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vgetq_lane_bf16(__p0, __p1) __extension__ ({ \ - bfloat16_t __ret; \ - bfloat16x8_t __s0 = __p0; \ - __ret = (bfloat16_t) __builtin_neon_vgetq_lane_bf16((bfloat16x8_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x4x2_t vtrn_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4x2_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vget_lane_bf16(__p0, __p1) __extension__ ({ \ - bfloat16_t __ret; \ - bfloat16x4_t __s0 = __p0; \ - __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x8x2_t vtrn_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} #else -#define vget_lane_bf16(__p0, __p1) __extension__ ({ \ - bfloat16_t __ret; \ - bfloat16x4_t __s0 = __p0; \ - bfloat16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vget_lane_bf16(__p0, __p1) __extension__ ({ \ - bfloat16_t __ret; \ - bfloat16x4_t __s0 = __p0; \ - __ret = (bfloat16_t) __builtin_neon_vget_lane_bf16((bfloat16x4_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x8x2_t vtrn_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8x2_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("bf16"))) bfloat16x4_t vget_low_bf16(bfloat16x8_t __p0) { - bfloat16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); +__ai __attribute__((target("neon"))) float32x2x2_t vtrn_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9); return __ret; } #else -__ai __attribute__((target("bf16"))) bfloat16x4_t vget_low_bf16(bfloat16x8_t __p0) { - bfloat16x4_t __ret; - bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 0, 1, 2, 3); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) float32x2x2_t vtrn_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); return __ret; } -__ai __attribute__((target("bf16"))) bfloat16x4_t __noswap_vget_low_bf16(bfloat16x8_t __p0) { - bfloat16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 0, 1, 2, 3); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2x2_t vtrn_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2x2_t vtrn_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_bf16(__p0) __extension__ ({ \ - bfloat16x8_t __ret; \ - __ret = (bfloat16x8_t) __builtin_neon_vld1q_bf16(__p0, 43); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x4x2_t vtrn_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} #else -#define vld1q_bf16(__p0) __extension__ ({ \ - bfloat16x8_t __ret; \ - __ret = (bfloat16x8_t) __builtin_neon_vld1q_bf16(__p0, 43); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x4x2_t vtrn_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4x2_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1_bf16(__p0) __extension__ ({ \ - bfloat16x4_t __ret; \ - __ret = (bfloat16x4_t) __builtin_neon_vld1_bf16(__p0, 11); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float16x8x2_t vtrnq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8x2_t __ret; + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} #else -#define vld1_bf16(__p0) __extension__ ({ \ - bfloat16x4_t __ret; \ - __ret = (bfloat16x4_t) __builtin_neon_vld1_bf16(__p0, 11); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float16x8x2_t vtrnq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8x2_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vtrnq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_dup_bf16(__p0) __extension__ ({ \ - bfloat16x8_t __ret; \ - __ret = (bfloat16x8_t) __builtin_neon_vld1q_dup_bf16(__p0, 43); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float16x4x2_t vtrn_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4x2_t __ret; + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} #else -#define vld1q_dup_bf16(__p0) __extension__ ({ \ - bfloat16x8_t __ret; \ - __ret = (bfloat16x8_t) __builtin_neon_vld1q_dup_bf16(__p0, 43); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float16x4x2_t vtrn_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4x2_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vtrn_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1_dup_bf16(__p0) __extension__ ({ \ - bfloat16x4_t __ret; \ - __ret = (bfloat16x4_t) __builtin_neon_vld1_dup_bf16(__p0, 11); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vtst_p8(poly8x8_t __p0, poly8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} #else -#define vld1_dup_bf16(__p0) __extension__ ({ \ - bfloat16x4_t __ret; \ - __ret = (bfloat16x4_t) __builtin_neon_vld1_dup_bf16(__p0, 11); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vtst_p8(poly8x8_t __p0, poly8x8_t __p1) { + uint8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8_t __ret; \ - bfloat16x8_t __s1 = __p1; \ - __ret = (bfloat16x8_t) __builtin_neon_vld1q_lane_bf16(__p0, (int8x16_t)__s1, __p2, 43); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x4_t vtst_p16(poly16x4_t __p0, poly16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} #else -#define vld1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8_t __ret; \ - bfloat16x8_t __s1 = __p1; \ - bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (bfloat16x8_t) __builtin_neon_vld1q_lane_bf16(__p0, (int8x16_t)__rev1, __p2, 43); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x4_t vtst_p16(poly16x4_t __p0, poly16x4_t __p1) { + uint16x4_t __ret; + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4_t __ret; \ - bfloat16x4_t __s1 = __p1; \ - __ret = (bfloat16x4_t) __builtin_neon_vld1_lane_bf16(__p0, (int8x8_t)__s1, __p2, 11); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x16_t vtstq_p8(poly8x16_t __p0, poly8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} #else -#define vld1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4_t __ret; \ - bfloat16x4_t __s1 = __p1; \ - bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (bfloat16x4_t) __builtin_neon_vld1_lane_bf16(__p0, (int8x8_t)__rev1, __p2, 11); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x16_t vtstq_p8(poly8x16_t __p0, poly8x16_t __p1) { + uint8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_bf16_x2(__p0) __extension__ ({ \ - bfloat16x8x2_t __ret; \ - __builtin_neon_vld1q_bf16_x2(&__ret, __p0, 43); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x8_t vtstq_p16(poly16x8_t __p0, poly16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} #else -#define vld1q_bf16_x2(__p0) __extension__ ({ \ - bfloat16x8x2_t __ret; \ - __builtin_neon_vld1q_bf16_x2(&__ret, __p0, 43); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x8_t vtstq_p16(poly16x8_t __p0, poly16x8_t __p1) { + uint16x8_t __ret; + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1_bf16_x2(__p0) __extension__ ({ \ - bfloat16x4x2_t __ret; \ - __builtin_neon_vld1_bf16_x2(&__ret, __p0, 11); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x16_t vtstq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} #else -#define vld1_bf16_x2(__p0) __extension__ ({ \ - bfloat16x4x2_t __ret; \ - __builtin_neon_vld1_bf16_x2(&__ret, __p0, 11); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x16_t vtstq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_bf16_x3(__p0) __extension__ ({ \ - bfloat16x8x3_t __ret; \ - __builtin_neon_vld1q_bf16_x3(&__ret, __p0, 43); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vtstq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} #else -#define vld1q_bf16_x3(__p0) __extension__ ({ \ - bfloat16x8x3_t __ret; \ - __builtin_neon_vld1q_bf16_x3(&__ret, __p0, 43); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vtstq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1_bf16_x3(__p0) __extension__ ({ \ - bfloat16x4x3_t __ret; \ - __builtin_neon_vld1_bf16_x3(&__ret, __p0, 11); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x8_t vtstq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} #else -#define vld1_bf16_x3(__p0) __extension__ ({ \ - bfloat16x4x3_t __ret; \ - __builtin_neon_vld1_bf16_x3(&__ret, __p0, 11); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x8_t vtstq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_bf16_x4(__p0) __extension__ ({ \ - bfloat16x8x4_t __ret; \ - __builtin_neon_vld1q_bf16_x4(&__ret, __p0, 43); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x16_t vtstq_s8(int8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} #else -#define vld1q_bf16_x4(__p0) __extension__ ({ \ - bfloat16x8x4_t __ret; \ - __builtin_neon_vld1q_bf16_x4(&__ret, __p0, 43); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x16_t vtstq_s8(int8x16_t __p0, int8x16_t __p1) { + uint8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld1_bf16_x4(__p0) __extension__ ({ \ - bfloat16x4x4_t __ret; \ - __builtin_neon_vld1_bf16_x4(&__ret, __p0, 11); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vtstq_s32(int32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} #else -#define vld1_bf16_x4(__p0) __extension__ ({ \ - bfloat16x4x4_t __ret; \ - __builtin_neon_vld1_bf16_x4(&__ret, __p0, 11); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vtstq_s32(int32x4_t __p0, int32x4_t __p1) { + uint32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld2q_bf16(__p0) __extension__ ({ \ - bfloat16x8x2_t __ret; \ - __builtin_neon_vld2q_bf16(&__ret, __p0, 43); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x8_t vtstq_s16(int16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} #else -#define vld2q_bf16(__p0) __extension__ ({ \ - bfloat16x8x2_t __ret; \ - __builtin_neon_vld2q_bf16(&__ret, __p0, 43); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x8_t vtstq_s16(int16x8_t __p0, int16x8_t __p1) { + uint16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld2_bf16(__p0) __extension__ ({ \ - bfloat16x4x2_t __ret; \ - __builtin_neon_vld2_bf16(&__ret, __p0, 11); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vtst_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} #else -#define vld2_bf16(__p0) __extension__ ({ \ - bfloat16x4x2_t __ret; \ - __builtin_neon_vld2_bf16(&__ret, __p0, 11); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vtst_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld2q_dup_bf16(__p0) __extension__ ({ \ - bfloat16x8x2_t __ret; \ - __builtin_neon_vld2q_dup_bf16(&__ret, __p0, 43); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vtst_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} #else -#define vld2q_dup_bf16(__p0) __extension__ ({ \ - bfloat16x8x2_t __ret; \ - __builtin_neon_vld2q_dup_bf16(&__ret, __p0, 43); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vtst_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld2_dup_bf16(__p0) __extension__ ({ \ - bfloat16x4x2_t __ret; \ - __builtin_neon_vld2_dup_bf16(&__ret, __p0, 11); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x4_t vtst_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} #else -#define vld2_dup_bf16(__p0) __extension__ ({ \ - bfloat16x4x2_t __ret; \ - __builtin_neon_vld2_dup_bf16(&__ret, __p0, 11); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x4_t vtst_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8x2_t __ret; \ - bfloat16x8x2_t __s1 = __p1; \ - __builtin_neon_vld2q_lane_bf16(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 43); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vtst_s8(int8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} #else -#define vld2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8x2_t __ret; \ - bfloat16x8x2_t __s1 = __p1; \ - bfloat16x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vld2q_lane_bf16(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 43); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vtst_s8(int8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4x2_t __ret; \ - bfloat16x4x2_t __s1 = __p1; \ - __builtin_neon_vld2_lane_bf16(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 11); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vtst_s32(int32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} #else -#define vld2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4x2_t __ret; \ - bfloat16x4x2_t __s1 = __p1; \ - bfloat16x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vld2_lane_bf16(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 11); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vtst_s32(int32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld3q_bf16(__p0) __extension__ ({ \ - bfloat16x8x3_t __ret; \ - __builtin_neon_vld3q_bf16(&__ret, __p0, 43); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x4_t vtst_s16(int16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} #else -#define vld3q_bf16(__p0) __extension__ ({ \ - bfloat16x8x3_t __ret; \ - __builtin_neon_vld3q_bf16(&__ret, __p0, 43); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x4_t vtst_s16(int16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vtst_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld3_bf16(__p0) __extension__ ({ \ - bfloat16x4x3_t __ret; \ - __builtin_neon_vld3_bf16(&__ret, __p0, 11); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) poly8x8x2_t vuzp_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4); + return __ret; +} #else -#define vld3_bf16(__p0) __extension__ ({ \ - bfloat16x4x3_t __ret; \ - __builtin_neon_vld3_bf16(&__ret, __p0, 11); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) poly8x8x2_t vuzp_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8x2_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld3q_dup_bf16(__p0) __extension__ ({ \ - bfloat16x8x3_t __ret; \ - __builtin_neon_vld3q_dup_bf16(&__ret, __p0, 43); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) poly16x4x2_t vuzp_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5); + return __ret; +} #else -#define vld3q_dup_bf16(__p0) __extension__ ({ \ - bfloat16x8x3_t __ret; \ - __builtin_neon_vld3q_dup_bf16(&__ret, __p0, 43); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) poly16x4x2_t vuzp_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4x2_t __ret; + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld3_dup_bf16(__p0) __extension__ ({ \ - bfloat16x4x3_t __ret; \ - __builtin_neon_vld3_dup_bf16(&__ret, __p0, 11); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) poly8x16x2_t vuzpq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36); + return __ret; +} #else -#define vld3_dup_bf16(__p0) __extension__ ({ \ - bfloat16x4x3_t __ret; \ - __builtin_neon_vld3_dup_bf16(&__ret, __p0, 11); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) poly8x16x2_t vuzpq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16x2_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8x3_t __ret; \ - bfloat16x8x3_t __s1 = __p1; \ - __builtin_neon_vld3q_lane_bf16(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 43); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) poly16x8x2_t vuzpq_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37); + return __ret; +} #else -#define vld3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8x3_t __ret; \ - bfloat16x8x3_t __s1 = __p1; \ - bfloat16x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vld3q_lane_bf16(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 43); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) poly16x8x2_t vuzpq_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8x2_t __ret; + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4x3_t __ret; \ - bfloat16x4x3_t __s1 = __p1; \ - __builtin_neon_vld3_lane_bf16(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 11); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x16x2_t vuzpq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} #else -#define vld3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4x3_t __ret; \ - bfloat16x4x3_t __s1 = __p1; \ - bfloat16x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vld3_lane_bf16(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 11); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x16x2_t vuzpq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16x2_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld4q_bf16(__p0) __extension__ ({ \ - bfloat16x8x4_t __ret; \ - __builtin_neon_vld4q_bf16(&__ret, __p0, 43); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x4x2_t vuzpq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} #else -#define vld4q_bf16(__p0) __extension__ ({ \ - bfloat16x8x4_t __ret; \ - __builtin_neon_vld4q_bf16(&__ret, __p0, 43); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x4x2_t vuzpq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4x2_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld4_bf16(__p0) __extension__ ({ \ - bfloat16x4x4_t __ret; \ - __builtin_neon_vld4_bf16(&__ret, __p0, 11); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x8x2_t vuzpq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} #else -#define vld4_bf16(__p0) __extension__ ({ \ - bfloat16x4x4_t __ret; \ - __builtin_neon_vld4_bf16(&__ret, __p0, 11); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x8x2_t vuzpq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8x2_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld4q_dup_bf16(__p0) __extension__ ({ \ - bfloat16x8x4_t __ret; \ - __builtin_neon_vld4q_dup_bf16(&__ret, __p0, 43); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x16x2_t vuzpq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} #else -#define vld4q_dup_bf16(__p0) __extension__ ({ \ - bfloat16x8x4_t __ret; \ - __builtin_neon_vld4q_dup_bf16(&__ret, __p0, 43); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x16x2_t vuzpq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16x2_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld4_dup_bf16(__p0) __extension__ ({ \ - bfloat16x4x4_t __ret; \ - __builtin_neon_vld4_dup_bf16(&__ret, __p0, 11); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float32x4x2_t vuzpq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} #else -#define vld4_dup_bf16(__p0) __extension__ ({ \ - bfloat16x4x4_t __ret; \ - __builtin_neon_vld4_dup_bf16(&__ret, __p0, 11); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float32x4x2_t vuzpq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4x2_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8x4_t __ret; \ - bfloat16x8x4_t __s1 = __p1; \ - __builtin_neon_vld4q_lane_bf16(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 43); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x4x2_t vuzpq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} #else -#define vld4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8x4_t __ret; \ - bfloat16x8x4_t __s1 = __p1; \ - bfloat16x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vld4q_lane_bf16(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 43); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x4x2_t vuzpq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4x2_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vld4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4x4_t __ret; \ - bfloat16x4x4_t __s1 = __p1; \ - __builtin_neon_vld4_lane_bf16(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 11); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x8x2_t vuzpq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} #else -#define vld4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4x4_t __ret; \ - bfloat16x4x4_t __s1 = __p1; \ - bfloat16x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vld4_lane_bf16(&__ret, __p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 11); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x8x2_t vuzpq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8x2_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8_t __ret; \ - bfloat16_t __s0 = __p0; \ - bfloat16x8_t __s1 = __p1; \ - __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__s1, __p2); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x8x2_t vuzp_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} #else -#define vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8_t __ret; \ - bfloat16_t __s0 = __p0; \ - bfloat16x8_t __s1 = __p1; \ - bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vsetq_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8_t __ret; \ - bfloat16_t __s0 = __p0; \ - bfloat16x8_t __s1 = __p1; \ - __ret = (bfloat16x8_t) __builtin_neon_vsetq_lane_bf16(__s0, (bfloat16x8_t)__s1, __p2); \ - __ret; \ -}) -#endif +__ai __attribute__((target("neon"))) uint8x8x2_t vuzp_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8x2_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16); -#ifdef __LITTLE_ENDIAN__ -#define vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4_t __ret; \ - bfloat16_t __s0 = __p0; \ - bfloat16x4_t __s1 = __p1; \ - __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__s1, __p2); \ - __ret; \ -}) -#else -#define vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4_t __ret; \ - bfloat16_t __s0 = __p0; \ - bfloat16x4_t __s1 = __p1; \ - bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vset_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4_t __ret; \ - bfloat16_t __s0 = __p0; \ - bfloat16x4_t __s1 = __p1; \ - __ret = (bfloat16x4_t) __builtin_neon_vset_lane_bf16(__s0, (bfloat16x4_t)__s1, __p2); \ - __ret; \ -}) + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x8_t __s1 = __p1; \ - __builtin_neon_vst1q_bf16(__p0, (int8x16_t)__s1, 43); \ -}) +__ai __attribute__((target("neon"))) uint32x2x2_t vuzp_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} #else -#define vst1q_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x8_t __s1 = __p1; \ - bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_bf16(__p0, (int8x16_t)__rev1, 43); \ -}) +__ai __attribute__((target("neon"))) uint32x2x2_t vuzp_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vst1_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x4_t __s1 = __p1; \ - __builtin_neon_vst1_bf16(__p0, (int8x8_t)__s1, 11); \ -}) +__ai __attribute__((target("neon"))) uint16x4x2_t vuzp_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} #else -#define vst1_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x4_t __s1 = __p1; \ - bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __builtin_neon_vst1_bf16(__p0, (int8x8_t)__rev1, 11); \ -}) +__ai __attribute__((target("neon"))) uint16x4x2_t vuzp_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4x2_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8_t __s1 = __p1; \ - __builtin_neon_vst1q_lane_bf16(__p0, (int8x16_t)__s1, __p2, 43); \ -}) +__ai __attribute__((target("neon"))) int8x8x2_t vuzp_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0); + return __ret; +} #else -#define vst1q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8_t __s1 = __p1; \ - bfloat16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_lane_bf16(__p0, (int8x16_t)__rev1, __p2, 43); \ -}) +__ai __attribute__((target("neon"))) int8x8x2_t vuzp_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8x2_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vst1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4_t __s1 = __p1; \ - __builtin_neon_vst1_lane_bf16(__p0, (int8x8_t)__s1, __p2, 11); \ -}) +__ai __attribute__((target("neon"))) float32x2x2_t vuzp_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} #else -#define vst1_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4_t __s1 = __p1; \ - bfloat16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __builtin_neon_vst1_lane_bf16(__p0, (int8x8_t)__rev1, __p2, 11); \ -}) +__ai __attribute__((target("neon"))) float32x2x2_t vuzp_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_bf16_x2(__p0, __p1) __extension__ ({ \ - bfloat16x8x2_t __s1 = __p1; \ - __builtin_neon_vst1q_bf16_x2(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 43); \ -}) +__ai __attribute__((target("neon"))) int32x2x2_t vuzp_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} #else -#define vst1q_bf16_x2(__p0, __p1) __extension__ ({ \ - bfloat16x8x2_t __s1 = __p1; \ - bfloat16x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_bf16_x2(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 43); \ -}) +__ai __attribute__((target("neon"))) int32x2x2_t vuzp_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vst1_bf16_x2(__p0, __p1) __extension__ ({ \ - bfloat16x4x2_t __s1 = __p1; \ - __builtin_neon_vst1_bf16_x2(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 11); \ -}) +__ai __attribute__((target("neon"))) int16x4x2_t vuzp_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} #else -#define vst1_bf16_x2(__p0, __p1) __extension__ ({ \ - bfloat16x4x2_t __s1 = __p1; \ - bfloat16x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vst1_bf16_x2(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 11); \ -}) +__ai __attribute__((target("neon"))) int16x4x2_t vuzp_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4x2_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_bf16_x3(__p0, __p1) __extension__ ({ \ - bfloat16x8x3_t __s1 = __p1; \ - __builtin_neon_vst1q_bf16_x3(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 43); \ -}) +__ai __attribute__((target("neon"))) float16x8x2_t vuzpq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8x2_t __ret; + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} #else -#define vst1q_bf16_x3(__p0, __p1) __extension__ ({ \ - bfloat16x8x3_t __s1 = __p1; \ - bfloat16x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_bf16_x3(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 43); \ -}) +__ai __attribute__((target("neon"))) float16x8x2_t vuzpq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8x2_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vuzpq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vst1_bf16_x3(__p0, __p1) __extension__ ({ \ - bfloat16x4x3_t __s1 = __p1; \ - __builtin_neon_vst1_bf16_x3(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 11); \ -}) +__ai __attribute__((target("neon"))) float16x4x2_t vuzp_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4x2_t __ret; + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} #else -#define vst1_bf16_x3(__p0, __p1) __extension__ ({ \ - bfloat16x4x3_t __s1 = __p1; \ - bfloat16x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vst1_bf16_x3(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 11); \ -}) +__ai __attribute__((target("neon"))) float16x4x2_t vuzp_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4x2_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vuzp_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vst1q_bf16_x4(__p0, __p1) __extension__ ({ \ - bfloat16x8x4_t __s1 = __p1; \ - __builtin_neon_vst1q_bf16_x4(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 43); \ -}) +__ai __attribute__((target("neon"))) poly8x8x2_t vzip_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 4); + return __ret; +} #else -#define vst1q_bf16_x4(__p0, __p1) __extension__ ({ \ - bfloat16x8x4_t __s1 = __p1; \ - bfloat16x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst1q_bf16_x4(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 43); \ -}) +__ai __attribute__((target("neon"))) poly8x8x2_t vzip_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8x2_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 4); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vst1_bf16_x4(__p0, __p1) __extension__ ({ \ - bfloat16x4x4_t __s1 = __p1; \ - __builtin_neon_vst1_bf16_x4(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 11); \ -}) +__ai __attribute__((target("neon"))) poly16x4x2_t vzip_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 5); + return __ret; +} #else -#define vst1_bf16_x4(__p0, __p1) __extension__ ({ \ - bfloat16x4x4_t __s1 = __p1; \ - bfloat16x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vst1_bf16_x4(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 11); \ -}) +__ai __attribute__((target("neon"))) poly16x4x2_t vzip_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4x2_t __ret; + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 5); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vst2q_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x8x2_t __s1 = __p1; \ - __builtin_neon_vst2q_bf16(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 43); \ -}) +__ai __attribute__((target("neon"))) poly8x16x2_t vzipq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 36); + return __ret; +} #else -#define vst2q_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x8x2_t __s1 = __p1; \ - bfloat16x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst2q_bf16(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 43); \ -}) +__ai __attribute__((target("neon"))) poly8x16x2_t vzipq_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16x2_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 36); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vst2_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x4x2_t __s1 = __p1; \ - __builtin_neon_vst2_bf16(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 11); \ -}) +__ai __attribute__((target("neon"))) poly16x8x2_t vzipq_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 37); + return __ret; +} #else -#define vst2_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x4x2_t __s1 = __p1; \ - bfloat16x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vst2_bf16(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], 11); \ -}) +__ai __attribute__((target("neon"))) poly16x8x2_t vzipq_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8x2_t __ret; + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 37); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vst2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8x2_t __s1 = __p1; \ - __builtin_neon_vst2q_lane_bf16(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 43); \ -}) +__ai __attribute__((target("neon"))) uint8x16x2_t vzipq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} #else -#define vst2q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8x2_t __s1 = __p1; \ - bfloat16x8x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst2q_lane_bf16(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 43); \ -}) +__ai __attribute__((target("neon"))) uint8x16x2_t vzipq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16x2_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 48); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vst2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4x2_t __s1 = __p1; \ - __builtin_neon_vst2_lane_bf16(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 11); \ -}) +__ai __attribute__((target("neon"))) uint32x4x2_t vzipq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 50); + return __ret; +} #else -#define vst2_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4x2_t __s1 = __p1; \ - bfloat16x4x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __builtin_neon_vst2_lane_bf16(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], __p2, 11); \ -}) +__ai __attribute__((target("neon"))) uint32x4x2_t vzipq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4x2_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 50); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vst3q_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x8x3_t __s1 = __p1; \ - __builtin_neon_vst3q_bf16(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 43); \ -}) +__ai __attribute__((target("neon"))) uint16x8x2_t vzipq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} #else -#define vst3q_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x8x3_t __s1 = __p1; \ - bfloat16x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst3q_bf16(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 43); \ -}) +__ai __attribute__((target("neon"))) uint16x8x2_t vzipq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8x2_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 49); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vst3_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x4x3_t __s1 = __p1; \ - __builtin_neon_vst3_bf16(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 11); \ -}) +__ai __attribute__((target("neon"))) int8x16x2_t vzipq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} #else -#define vst3_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x4x3_t __s1 = __p1; \ - bfloat16x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vst3_bf16(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], 11); \ -}) +__ai __attribute__((target("neon"))) int8x16x2_t vzipq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16x2_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 32); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vst3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8x3_t __s1 = __p1; \ - __builtin_neon_vst3q_lane_bf16(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 43); \ -}) +__ai __attribute__((target("neon"))) float32x4x2_t vzipq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} #else -#define vst3q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8x3_t __s1 = __p1; \ - bfloat16x8x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst3q_lane_bf16(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 43); \ -}) +__ai __attribute__((target("neon"))) float32x4x2_t vzipq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4x2_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 41); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vst3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4x3_t __s1 = __p1; \ - __builtin_neon_vst3_lane_bf16(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 11); \ -}) +__ai __attribute__((target("neon"))) int32x4x2_t vzipq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} #else -#define vst3_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4x3_t __s1 = __p1; \ - bfloat16x4x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __builtin_neon_vst3_lane_bf16(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], __p2, 11); \ -}) +__ai __attribute__((target("neon"))) int32x4x2_t vzipq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4x2_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 34); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vst4q_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x8x4_t __s1 = __p1; \ - __builtin_neon_vst4q_bf16(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 43); \ -}) +__ai __attribute__((target("neon"))) int16x8x2_t vzipq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} #else -#define vst4q_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x8x4_t __s1 = __p1; \ - bfloat16x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst4q_bf16(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 43); \ -}) +__ai __attribute__((target("neon"))) int16x8x2_t vzipq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8x2_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 33); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vst4_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x4x4_t __s1 = __p1; \ - __builtin_neon_vst4_bf16(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 11); \ -}) +__ai __attribute__((target("neon"))) uint8x8x2_t vzip_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 16); + return __ret; +} #else -#define vst4_bf16(__p0, __p1) __extension__ ({ \ - bfloat16x4x4_t __s1 = __p1; \ - bfloat16x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vst4_bf16(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], 11); \ -}) +__ai __attribute__((target("neon"))) uint8x8x2_t vzip_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8x2_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 16); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vst4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8x4_t __s1 = __p1; \ - __builtin_neon_vst4q_lane_bf16(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 43); \ -}) +__ai __attribute__((target("neon"))) uint32x2x2_t vzip_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 18); + return __ret; +} #else -#define vst4q_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x8x4_t __s1 = __p1; \ - bfloat16x8x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst4q_lane_bf16(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 43); \ -}) +__ai __attribute__((target("neon"))) uint32x2x2_t vzip_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 18); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vst4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4x4_t __s1 = __p1; \ - __builtin_neon_vst4_lane_bf16(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 11); \ -}) +__ai __attribute__((target("neon"))) uint16x4x2_t vzip_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 17); + return __ret; +} #else -#define vst4_lane_bf16(__p0, __p1, __p2) __extension__ ({ \ - bfloat16x4x4_t __s1 = __p1; \ - bfloat16x4x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 3, 2, 1, 0); \ - __builtin_neon_vst4_lane_bf16(__p0, (int8x8_t)__rev1.val[0], (int8x8_t)__rev1.val[1], (int8x8_t)__rev1.val[2], (int8x8_t)__rev1.val[3], __p2, 11); \ -}) +__ai __attribute__((target("neon"))) uint16x4x2_t vzip_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4x2_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 17); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("dotprod"))) uint32x4_t vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vdotq_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); +__ai __attribute__((target("neon"))) int8x8x2_t vzip_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else -__ai __attribute__((target("dotprod"))) uint32x4_t vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vdotq_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int8x8x2_t vzip_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8x2_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 0); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai __attribute__((target("dotprod"))) uint32x4_t __noswap_vdotq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vdotq_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2x2_t vzip_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2x2_t vzip_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 9); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2x2_t vzip_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2x2_t vzip_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 2); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4x2_t vzip_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4x2_t vzip_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4x2_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 1); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float16x8x2_t vzipq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8x2_t __ret; + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x8x2_t vzipq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8x2_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __builtin_neon_vzipq_v(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float16x4x2_t vzip_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4x2_t __ret; + __builtin_neon_vzip_v(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x4x2_t vzip_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4x2_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __builtin_neon_vzip_v(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8); + + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("dotprod"))) int32x4_t vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { +__ai __attribute__((target("v8.1a,neon"))) int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vdotq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + __ret = (int32x4_t) __builtin_neon_vqrdmlahq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); return __ret; } #else -__ai __attribute__((target("dotprod"))) int32x4_t vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { +__ai __attribute__((target("v8.1a,neon"))) int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vdotq_s32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vqrdmlahq_s32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai __attribute__((target("dotprod"))) int32x4_t __noswap_vdotq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { +__ai __attribute__((target("v8.1a,neon"))) int32x4_t __noswap_vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vdotq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); + __ret = (int32x4_t) __builtin_neon_vqrdmlahq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("dotprod"))) uint32x2_t vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vdot_u32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18); +__ai __attribute__((target("v8.1a,neon"))) int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqrdmlahq_s16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); return __ret; } #else -__ai __attribute__((target("dotprod"))) uint32x2_t vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) { - uint32x2_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vdot_u32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("v8.1a,neon"))) int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vqrdmlahq_s16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai __attribute__((target("dotprod"))) uint32x2_t __noswap_vdot_u32(uint32x2_t __p0, uint8x8_t __p1, uint8x8_t __p2) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vdot_u32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 18); +__ai __attribute__((target("v8.1a,neon"))) int16x8_t __noswap_vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqrdmlahq_s16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("dotprod"))) int32x2_t vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) { +__ai __attribute__((target("v8.1a,neon"))) int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vdot_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); + __ret = (int32x2_t) __builtin_neon_vqrdmlah_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); return __ret; } #else -__ai __attribute__((target("dotprod"))) int32x2_t vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) { +__ai __attribute__((target("v8.1a,neon"))) int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int32x2_t) __builtin_neon_vdot_s32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (int32x2_t) __builtin_neon_vqrdmlah_s32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai __attribute__((target("dotprod"))) int32x2_t __noswap_vdot_s32(int32x2_t __p0, int8x8_t __p1, int8x8_t __p2) { +__ai __attribute__((target("v8.1a,neon"))) int32x2_t __noswap_vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vdot_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); + __ret = (int32x2_t) __builtin_neon_vqrdmlah_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -#define vdotq_lane_u32(__p0_145, __p1_145, __p2_145, __p3_145) __extension__ ({ \ - uint32x4_t __ret_145; \ - uint32x4_t __s0_145 = __p0_145; \ - uint8x16_t __s1_145 = __p1_145; \ - uint8x8_t __s2_145 = __p2_145; \ -uint8x8_t __reint_145 = __s2_145; \ -uint32x4_t __reint1_145 = splatq_lane_u32(*(uint32x2_t *) &__reint_145, __p3_145); \ - __ret_145 = vdotq_u32(__s0_145, __s1_145, *(uint8x16_t *) &__reint1_145); \ - __ret_145; \ +__ai __attribute__((target("v8.1a,neon"))) int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqrdmlah_s16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1); + return __ret; +} +#else +__ai __attribute__((target("v8.1a,neon"))) int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vqrdmlah_s16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.1a,neon"))) int16x4_t __noswap_vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqrdmlah_s16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlahq_lane_s32(__p0_134, __p1_134, __p2_134, __p3_134) __extension__ ({ \ + int32x4_t __ret_134; \ + int32x4_t __s0_134 = __p0_134; \ + int32x4_t __s1_134 = __p1_134; \ + int32x2_t __s2_134 = __p2_134; \ + __ret_134 = vqrdmlahq_s32(__s0_134, __s1_134, splatq_lane_s32(__s2_134, __p3_134)); \ + __ret_134; \ }) #else -#define vdotq_lane_u32(__p0_146, __p1_146, __p2_146, __p3_146) __extension__ ({ \ - uint32x4_t __ret_146; \ - uint32x4_t __s0_146 = __p0_146; \ - uint8x16_t __s1_146 = __p1_146; \ - uint8x8_t __s2_146 = __p2_146; \ - uint32x4_t __rev0_146; __rev0_146 = __builtin_shufflevector(__s0_146, __s0_146, 3, 2, 1, 0); \ - uint8x16_t __rev1_146; __rev1_146 = __builtin_shufflevector(__s1_146, __s1_146, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __rev2_146; __rev2_146 = __builtin_shufflevector(__s2_146, __s2_146, 7, 6, 5, 4, 3, 2, 1, 0); \ -uint8x8_t __reint_146 = __rev2_146; \ -uint32x4_t __reint1_146 = __noswap_splatq_lane_u32(*(uint32x2_t *) &__reint_146, __p3_146); \ - __ret_146 = __noswap_vdotq_u32(__rev0_146, __rev1_146, *(uint8x16_t *) &__reint1_146); \ - __ret_146 = __builtin_shufflevector(__ret_146, __ret_146, 3, 2, 1, 0); \ - __ret_146; \ +#define vqrdmlahq_lane_s32(__p0_135, __p1_135, __p2_135, __p3_135) __extension__ ({ \ + int32x4_t __ret_135; \ + int32x4_t __s0_135 = __p0_135; \ + int32x4_t __s1_135 = __p1_135; \ + int32x2_t __s2_135 = __p2_135; \ + int32x4_t __rev0_135; __rev0_135 = __builtin_shufflevector(__s0_135, __s0_135, 3, 2, 1, 0); \ + int32x4_t __rev1_135; __rev1_135 = __builtin_shufflevector(__s1_135, __s1_135, 3, 2, 1, 0); \ + int32x2_t __rev2_135; __rev2_135 = __builtin_shufflevector(__s2_135, __s2_135, 1, 0); \ + __ret_135 = __noswap_vqrdmlahq_s32(__rev0_135, __rev1_135, __noswap_splatq_lane_s32(__rev2_135, __p3_135)); \ + __ret_135 = __builtin_shufflevector(__ret_135, __ret_135, 3, 2, 1, 0); \ + __ret_135; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdotq_lane_s32(__p0_147, __p1_147, __p2_147, __p3_147) __extension__ ({ \ - int32x4_t __ret_147; \ - int32x4_t __s0_147 = __p0_147; \ - int8x16_t __s1_147 = __p1_147; \ - int8x8_t __s2_147 = __p2_147; \ -int8x8_t __reint_147 = __s2_147; \ -int32x4_t __reint1_147 = splatq_lane_s32(*(int32x2_t *) &__reint_147, __p3_147); \ - __ret_147 = vdotq_s32(__s0_147, __s1_147, *(int8x16_t *) &__reint1_147); \ - __ret_147; \ +#define vqrdmlahq_lane_s16(__p0_136, __p1_136, __p2_136, __p3_136) __extension__ ({ \ + int16x8_t __ret_136; \ + int16x8_t __s0_136 = __p0_136; \ + int16x8_t __s1_136 = __p1_136; \ + int16x4_t __s2_136 = __p2_136; \ + __ret_136 = vqrdmlahq_s16(__s0_136, __s1_136, splatq_lane_s16(__s2_136, __p3_136)); \ + __ret_136; \ }) #else -#define vdotq_lane_s32(__p0_148, __p1_148, __p2_148, __p3_148) __extension__ ({ \ - int32x4_t __ret_148; \ - int32x4_t __s0_148 = __p0_148; \ - int8x16_t __s1_148 = __p1_148; \ - int8x8_t __s2_148 = __p2_148; \ - int32x4_t __rev0_148; __rev0_148 = __builtin_shufflevector(__s0_148, __s0_148, 3, 2, 1, 0); \ - int8x16_t __rev1_148; __rev1_148 = __builtin_shufflevector(__s1_148, __s1_148, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8_t __rev2_148; __rev2_148 = __builtin_shufflevector(__s2_148, __s2_148, 7, 6, 5, 4, 3, 2, 1, 0); \ -int8x8_t __reint_148 = __rev2_148; \ -int32x4_t __reint1_148 = __noswap_splatq_lane_s32(*(int32x2_t *) &__reint_148, __p3_148); \ - __ret_148 = __noswap_vdotq_s32(__rev0_148, __rev1_148, *(int8x16_t *) &__reint1_148); \ - __ret_148 = __builtin_shufflevector(__ret_148, __ret_148, 3, 2, 1, 0); \ - __ret_148; \ +#define vqrdmlahq_lane_s16(__p0_137, __p1_137, __p2_137, __p3_137) __extension__ ({ \ + int16x8_t __ret_137; \ + int16x8_t __s0_137 = __p0_137; \ + int16x8_t __s1_137 = __p1_137; \ + int16x4_t __s2_137 = __p2_137; \ + int16x8_t __rev0_137; __rev0_137 = __builtin_shufflevector(__s0_137, __s0_137, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_137; __rev1_137 = __builtin_shufflevector(__s1_137, __s1_137, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_137; __rev2_137 = __builtin_shufflevector(__s2_137, __s2_137, 3, 2, 1, 0); \ + __ret_137 = __noswap_vqrdmlahq_s16(__rev0_137, __rev1_137, __noswap_splatq_lane_s16(__rev2_137, __p3_137)); \ + __ret_137 = __builtin_shufflevector(__ret_137, __ret_137, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_137; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdot_lane_u32(__p0_149, __p1_149, __p2_149, __p3_149) __extension__ ({ \ - uint32x2_t __ret_149; \ - uint32x2_t __s0_149 = __p0_149; \ - uint8x8_t __s1_149 = __p1_149; \ - uint8x8_t __s2_149 = __p2_149; \ -uint8x8_t __reint_149 = __s2_149; \ -uint32x2_t __reint1_149 = splat_lane_u32(*(uint32x2_t *) &__reint_149, __p3_149); \ - __ret_149 = vdot_u32(__s0_149, __s1_149, *(uint8x8_t *) &__reint1_149); \ - __ret_149; \ +#define vqrdmlah_lane_s32(__p0_138, __p1_138, __p2_138, __p3_138) __extension__ ({ \ + int32x2_t __ret_138; \ + int32x2_t __s0_138 = __p0_138; \ + int32x2_t __s1_138 = __p1_138; \ + int32x2_t __s2_138 = __p2_138; \ + __ret_138 = vqrdmlah_s32(__s0_138, __s1_138, splat_lane_s32(__s2_138, __p3_138)); \ + __ret_138; \ }) #else -#define vdot_lane_u32(__p0_150, __p1_150, __p2_150, __p3_150) __extension__ ({ \ - uint32x2_t __ret_150; \ - uint32x2_t __s0_150 = __p0_150; \ - uint8x8_t __s1_150 = __p1_150; \ - uint8x8_t __s2_150 = __p2_150; \ - uint32x2_t __rev0_150; __rev0_150 = __builtin_shufflevector(__s0_150, __s0_150, 1, 0); \ - uint8x8_t __rev1_150; __rev1_150 = __builtin_shufflevector(__s1_150, __s1_150, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __rev2_150; __rev2_150 = __builtin_shufflevector(__s2_150, __s2_150, 7, 6, 5, 4, 3, 2, 1, 0); \ -uint8x8_t __reint_150 = __rev2_150; \ -uint32x2_t __reint1_150 = __noswap_splat_lane_u32(*(uint32x2_t *) &__reint_150, __p3_150); \ - __ret_150 = __noswap_vdot_u32(__rev0_150, __rev1_150, *(uint8x8_t *) &__reint1_150); \ - __ret_150 = __builtin_shufflevector(__ret_150, __ret_150, 1, 0); \ - __ret_150; \ +#define vqrdmlah_lane_s32(__p0_139, __p1_139, __p2_139, __p3_139) __extension__ ({ \ + int32x2_t __ret_139; \ + int32x2_t __s0_139 = __p0_139; \ + int32x2_t __s1_139 = __p1_139; \ + int32x2_t __s2_139 = __p2_139; \ + int32x2_t __rev0_139; __rev0_139 = __builtin_shufflevector(__s0_139, __s0_139, 1, 0); \ + int32x2_t __rev1_139; __rev1_139 = __builtin_shufflevector(__s1_139, __s1_139, 1, 0); \ + int32x2_t __rev2_139; __rev2_139 = __builtin_shufflevector(__s2_139, __s2_139, 1, 0); \ + __ret_139 = __noswap_vqrdmlah_s32(__rev0_139, __rev1_139, __noswap_splat_lane_s32(__rev2_139, __p3_139)); \ + __ret_139 = __builtin_shufflevector(__ret_139, __ret_139, 1, 0); \ + __ret_139; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdot_lane_s32(__p0_151, __p1_151, __p2_151, __p3_151) __extension__ ({ \ - int32x2_t __ret_151; \ - int32x2_t __s0_151 = __p0_151; \ - int8x8_t __s1_151 = __p1_151; \ - int8x8_t __s2_151 = __p2_151; \ -int8x8_t __reint_151 = __s2_151; \ -int32x2_t __reint1_151 = splat_lane_s32(*(int32x2_t *) &__reint_151, __p3_151); \ - __ret_151 = vdot_s32(__s0_151, __s1_151, *(int8x8_t *) &__reint1_151); \ - __ret_151; \ +#define vqrdmlah_lane_s16(__p0_140, __p1_140, __p2_140, __p3_140) __extension__ ({ \ + int16x4_t __ret_140; \ + int16x4_t __s0_140 = __p0_140; \ + int16x4_t __s1_140 = __p1_140; \ + int16x4_t __s2_140 = __p2_140; \ + __ret_140 = vqrdmlah_s16(__s0_140, __s1_140, splat_lane_s16(__s2_140, __p3_140)); \ + __ret_140; \ }) #else -#define vdot_lane_s32(__p0_152, __p1_152, __p2_152, __p3_152) __extension__ ({ \ - int32x2_t __ret_152; \ - int32x2_t __s0_152 = __p0_152; \ - int8x8_t __s1_152 = __p1_152; \ - int8x8_t __s2_152 = __p2_152; \ - int32x2_t __rev0_152; __rev0_152 = __builtin_shufflevector(__s0_152, __s0_152, 1, 0); \ - int8x8_t __rev1_152; __rev1_152 = __builtin_shufflevector(__s1_152, __s1_152, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8_t __rev2_152; __rev2_152 = __builtin_shufflevector(__s2_152, __s2_152, 7, 6, 5, 4, 3, 2, 1, 0); \ -int8x8_t __reint_152 = __rev2_152; \ -int32x2_t __reint1_152 = __noswap_splat_lane_s32(*(int32x2_t *) &__reint_152, __p3_152); \ - __ret_152 = __noswap_vdot_s32(__rev0_152, __rev1_152, *(int8x8_t *) &__reint1_152); \ - __ret_152 = __builtin_shufflevector(__ret_152, __ret_152, 1, 0); \ - __ret_152; \ +#define vqrdmlah_lane_s16(__p0_141, __p1_141, __p2_141, __p3_141) __extension__ ({ \ + int16x4_t __ret_141; \ + int16x4_t __s0_141 = __p0_141; \ + int16x4_t __s1_141 = __p1_141; \ + int16x4_t __s2_141 = __p2_141; \ + int16x4_t __rev0_141; __rev0_141 = __builtin_shufflevector(__s0_141, __s0_141, 3, 2, 1, 0); \ + int16x4_t __rev1_141; __rev1_141 = __builtin_shufflevector(__s1_141, __s1_141, 3, 2, 1, 0); \ + int16x4_t __rev2_141; __rev2_141 = __builtin_shufflevector(__s2_141, __s2_141, 3, 2, 1, 0); \ + __ret_141 = __noswap_vqrdmlah_s16(__rev0_141, __rev1_141, __noswap_splat_lane_s16(__rev2_141, __p3_141)); \ + __ret_141 = __builtin_shufflevector(__ret_141, __ret_141, 3, 2, 1, 0); \ + __ret_141; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vabdq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vabdq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); +__ai __attribute__((target("v8.1a,neon"))) int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqrdmlshq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x8_t vabdq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vabdq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("v8.1a,neon"))) int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vqrdmlshq_s32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.1a,neon"))) int32x4_t __noswap_vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vqrdmlshq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vabd_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vabd_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); +__ai __attribute__((target("v8.1a,neon"))) int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqrdmlshq_s16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x4_t vabd_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vabd_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("v8.1a,neon"))) int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vqrdmlshq_s16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.1a,neon"))) int16x8_t __noswap_vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vqrdmlshq_s16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vabsq_f16(float16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vabsq_f16((int8x16_t)__p0, 40); +__ai __attribute__((target("v8.1a,neon"))) int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqrdmlsh_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x8_t vabsq_f16(float16x8_t __p0) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vabsq_f16((int8x16_t)__rev0, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("v8.1a,neon"))) int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (int32x2_t) __builtin_neon_vqrdmlsh_s32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.1a,neon"))) int32x2_t __noswap_vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vqrdmlsh_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vabs_f16(float16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vabs_f16((int8x8_t)__p0, 8); +__ai __attribute__((target("v8.1a,neon"))) int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqrdmlsh_s16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x4_t vabs_f16(float16x4_t __p0) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vabs_f16((int8x8_t)__rev0, 8); +__ai __attribute__((target("v8.1a,neon"))) int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vqrdmlsh_s16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } +__ai __attribute__((target("v8.1a,neon"))) int16x4_t __noswap_vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vqrdmlsh_s16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vaddq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = __p0 + __p1; +#define vqrdmlshq_lane_s32(__p0_142, __p1_142, __p2_142, __p3_142) __extension__ ({ \ + int32x4_t __ret_142; \ + int32x4_t __s0_142 = __p0_142; \ + int32x4_t __s1_142 = __p1_142; \ + int32x2_t __s2_142 = __p2_142; \ + __ret_142 = vqrdmlshq_s32(__s0_142, __s1_142, splatq_lane_s32(__s2_142, __p3_142)); \ + __ret_142; \ +}) +#else +#define vqrdmlshq_lane_s32(__p0_143, __p1_143, __p2_143, __p3_143) __extension__ ({ \ + int32x4_t __ret_143; \ + int32x4_t __s0_143 = __p0_143; \ + int32x4_t __s1_143 = __p1_143; \ + int32x2_t __s2_143 = __p2_143; \ + int32x4_t __rev0_143; __rev0_143 = __builtin_shufflevector(__s0_143, __s0_143, 3, 2, 1, 0); \ + int32x4_t __rev1_143; __rev1_143 = __builtin_shufflevector(__s1_143, __s1_143, 3, 2, 1, 0); \ + int32x2_t __rev2_143; __rev2_143 = __builtin_shufflevector(__s2_143, __s2_143, 1, 0); \ + __ret_143 = __noswap_vqrdmlshq_s32(__rev0_143, __rev1_143, __noswap_splatq_lane_s32(__rev2_143, __p3_143)); \ + __ret_143 = __builtin_shufflevector(__ret_143, __ret_143, 3, 2, 1, 0); \ + __ret_143; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlshq_lane_s16(__p0_144, __p1_144, __p2_144, __p3_144) __extension__ ({ \ + int16x8_t __ret_144; \ + int16x8_t __s0_144 = __p0_144; \ + int16x8_t __s1_144 = __p1_144; \ + int16x4_t __s2_144 = __p2_144; \ + __ret_144 = vqrdmlshq_s16(__s0_144, __s1_144, splatq_lane_s16(__s2_144, __p3_144)); \ + __ret_144; \ +}) +#else +#define vqrdmlshq_lane_s16(__p0_145, __p1_145, __p2_145, __p3_145) __extension__ ({ \ + int16x8_t __ret_145; \ + int16x8_t __s0_145 = __p0_145; \ + int16x8_t __s1_145 = __p1_145; \ + int16x4_t __s2_145 = __p2_145; \ + int16x8_t __rev0_145; __rev0_145 = __builtin_shufflevector(__s0_145, __s0_145, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_145; __rev1_145 = __builtin_shufflevector(__s1_145, __s1_145, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_145; __rev2_145 = __builtin_shufflevector(__s2_145, __s2_145, 3, 2, 1, 0); \ + __ret_145 = __noswap_vqrdmlshq_s16(__rev0_145, __rev1_145, __noswap_splatq_lane_s16(__rev2_145, __p3_145)); \ + __ret_145 = __builtin_shufflevector(__ret_145, __ret_145, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_145; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlsh_lane_s32(__p0_146, __p1_146, __p2_146, __p3_146) __extension__ ({ \ + int32x2_t __ret_146; \ + int32x2_t __s0_146 = __p0_146; \ + int32x2_t __s1_146 = __p1_146; \ + int32x2_t __s2_146 = __p2_146; \ + __ret_146 = vqrdmlsh_s32(__s0_146, __s1_146, splat_lane_s32(__s2_146, __p3_146)); \ + __ret_146; \ +}) +#else +#define vqrdmlsh_lane_s32(__p0_147, __p1_147, __p2_147, __p3_147) __extension__ ({ \ + int32x2_t __ret_147; \ + int32x2_t __s0_147 = __p0_147; \ + int32x2_t __s1_147 = __p1_147; \ + int32x2_t __s2_147 = __p2_147; \ + int32x2_t __rev0_147; __rev0_147 = __builtin_shufflevector(__s0_147, __s0_147, 1, 0); \ + int32x2_t __rev1_147; __rev1_147 = __builtin_shufflevector(__s1_147, __s1_147, 1, 0); \ + int32x2_t __rev2_147; __rev2_147 = __builtin_shufflevector(__s2_147, __s2_147, 1, 0); \ + __ret_147 = __noswap_vqrdmlsh_s32(__rev0_147, __rev1_147, __noswap_splat_lane_s32(__rev2_147, __p3_147)); \ + __ret_147 = __builtin_shufflevector(__ret_147, __ret_147, 1, 0); \ + __ret_147; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrdmlsh_lane_s16(__p0_148, __p1_148, __p2_148, __p3_148) __extension__ ({ \ + int16x4_t __ret_148; \ + int16x4_t __s0_148 = __p0_148; \ + int16x4_t __s1_148 = __p1_148; \ + int16x4_t __s2_148 = __p2_148; \ + __ret_148 = vqrdmlsh_s16(__s0_148, __s1_148, splat_lane_s16(__s2_148, __p3_148)); \ + __ret_148; \ +}) +#else +#define vqrdmlsh_lane_s16(__p0_149, __p1_149, __p2_149, __p3_149) __extension__ ({ \ + int16x4_t __ret_149; \ + int16x4_t __s0_149 = __p0_149; \ + int16x4_t __s1_149 = __p1_149; \ + int16x4_t __s2_149 = __p2_149; \ + int16x4_t __rev0_149; __rev0_149 = __builtin_shufflevector(__s0_149, __s0_149, 3, 2, 1, 0); \ + int16x4_t __rev1_149; __rev1_149 = __builtin_shufflevector(__s1_149, __s1_149, 3, 2, 1, 0); \ + int16x4_t __rev2_149; __rev2_149 = __builtin_shufflevector(__s2_149, __s2_149, 3, 2, 1, 0); \ + __ret_149 = __noswap_vqrdmlsh_s16(__rev0_149, __rev1_149, __noswap_splat_lane_s16(__rev2_149, __p3_149)); \ + __ret_149 = __builtin_shufflevector(__ret_149, __ret_149, 3, 2, 1, 0); \ + __ret_149; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t vcadd_rot270_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcadd_rot270_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x8_t vaddq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 + __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t vcadd_rot270_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vcadd_rot270_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vadd_f16(float16x4_t __p0, float16x4_t __p1) { +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t vcadd_rot90_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; - __ret = __p0 + __p1; + __ret = (float16x4_t) __builtin_neon_vcadd_rot90_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x4_t vadd_f16(float16x4_t __p0, float16x4_t __p1) { +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t vcadd_rot90_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 + __rev1; + __ret = (float16x4_t) __builtin_neon_vcadd_rot90_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vbslq_f16(uint16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t vcaddq_rot270_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vbslq_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + __ret = (float16x8_t) __builtin_neon_vcaddq_rot270_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x8_t vbslq_f16(uint16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t vcaddq_rot270_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vbslq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); + __ret = (float16x8_t) __builtin_neon_vcaddq_rot270_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vbsl_f16(uint16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vbsl_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t vcaddq_rot90_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcaddq_rot90_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x4_t vbsl_f16(uint16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vbsl_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t vcaddq_rot90_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vcaddq_rot90_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x8_t vcageq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcageq_f16((int8x16_t)__p0, (int8x16_t)__p1, 49); +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcmlaq_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); return __ret; } #else -__ai __attribute__((target("fullfp16"))) uint16x8_t vcageq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vcageq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vcmlaq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t __noswap_vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcmlaq_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x4_t vcage_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcage_f16((int8x8_t)__p0, (int8x8_t)__p1, 17); +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcmla_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); return __ret; } #else -__ai __attribute__((target("fullfp16"))) uint16x4_t vcage_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vcage_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vcmla_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t __noswap_vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcmla_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x8_t vcagtq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcagtq_f16((int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; -} +#define vcmla_lane_f16(__p0_150, __p1_150, __p2_150, __p3_150) __extension__ ({ \ + float16x4_t __ret_150; \ + float16x4_t __s0_150 = __p0_150; \ + float16x4_t __s1_150 = __p1_150; \ + float16x4_t __s2_150 = __p2_150; \ +float16x4_t __reint_150 = __s2_150; \ +uint32x2_t __reint1_150 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_150, __p3_150), vget_lane_u32(*(uint32x2_t *) &__reint_150, __p3_150)}; \ + __ret_150 = vcmla_f16(__s0_150, __s1_150, *(float16x4_t *) &__reint1_150); \ + __ret_150; \ +}) #else -__ai __attribute__((target("fullfp16"))) uint16x8_t vcagtq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vcagtq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vcmla_lane_f16(__p0_151, __p1_151, __p2_151, __p3_151) __extension__ ({ \ + float16x4_t __ret_151; \ + float16x4_t __s0_151 = __p0_151; \ + float16x4_t __s1_151 = __p1_151; \ + float16x4_t __s2_151 = __p2_151; \ + float16x4_t __rev0_151; __rev0_151 = __builtin_shufflevector(__s0_151, __s0_151, 3, 2, 1, 0); \ + float16x4_t __rev1_151; __rev1_151 = __builtin_shufflevector(__s1_151, __s1_151, 3, 2, 1, 0); \ + float16x4_t __rev2_151; __rev2_151 = __builtin_shufflevector(__s2_151, __s2_151, 3, 2, 1, 0); \ +float16x4_t __reint_151 = __rev2_151; \ +uint32x2_t __reint1_151 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_151, __p3_151), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_151, __p3_151)}; \ + __ret_151 = __noswap_vcmla_f16(__rev0_151, __rev1_151, *(float16x4_t *) &__reint1_151); \ + __ret_151 = __builtin_shufflevector(__ret_151, __ret_151, 3, 2, 1, 0); \ + __ret_151; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x4_t vcagt_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcagt_f16((int8x8_t)__p0, (int8x8_t)__p1, 17); - return __ret; -} +#define vcmlaq_lane_f16(__p0_152, __p1_152, __p2_152, __p3_152) __extension__ ({ \ + float16x8_t __ret_152; \ + float16x8_t __s0_152 = __p0_152; \ + float16x8_t __s1_152 = __p1_152; \ + float16x4_t __s2_152 = __p2_152; \ +float16x4_t __reint_152 = __s2_152; \ +uint32x4_t __reint1_152 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_152, __p3_152), vget_lane_u32(*(uint32x2_t *) &__reint_152, __p3_152), vget_lane_u32(*(uint32x2_t *) &__reint_152, __p3_152), vget_lane_u32(*(uint32x2_t *) &__reint_152, __p3_152)}; \ + __ret_152 = vcmlaq_f16(__s0_152, __s1_152, *(float16x8_t *) &__reint1_152); \ + __ret_152; \ +}) #else -__ai __attribute__((target("fullfp16"))) uint16x4_t vcagt_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vcagt_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vcmlaq_lane_f16(__p0_153, __p1_153, __p2_153, __p3_153) __extension__ ({ \ + float16x8_t __ret_153; \ + float16x8_t __s0_153 = __p0_153; \ + float16x8_t __s1_153 = __p1_153; \ + float16x4_t __s2_153 = __p2_153; \ + float16x8_t __rev0_153; __rev0_153 = __builtin_shufflevector(__s0_153, __s0_153, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_153; __rev1_153 = __builtin_shufflevector(__s1_153, __s1_153, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_153; __rev2_153 = __builtin_shufflevector(__s2_153, __s2_153, 3, 2, 1, 0); \ +float16x4_t __reint_153 = __rev2_153; \ +uint32x4_t __reint1_153 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_153, __p3_153), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_153, __p3_153), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_153, __p3_153), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_153, __p3_153)}; \ + __ret_153 = __noswap_vcmlaq_f16(__rev0_153, __rev1_153, *(float16x8_t *) &__reint1_153); \ + __ret_153 = __builtin_shufflevector(__ret_153, __ret_153, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_153; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x8_t vcaleq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcaleq_f16((int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; -} +#define vcmla_laneq_f16(__p0_154, __p1_154, __p2_154, __p3_154) __extension__ ({ \ + float16x4_t __ret_154; \ + float16x4_t __s0_154 = __p0_154; \ + float16x4_t __s1_154 = __p1_154; \ + float16x8_t __s2_154 = __p2_154; \ +float16x8_t __reint_154 = __s2_154; \ +uint32x2_t __reint1_154 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_154, __p3_154), vgetq_lane_u32(*(uint32x4_t *) &__reint_154, __p3_154)}; \ + __ret_154 = vcmla_f16(__s0_154, __s1_154, *(float16x4_t *) &__reint1_154); \ + __ret_154; \ +}) #else -__ai __attribute__((target("fullfp16"))) uint16x8_t vcaleq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vcaleq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vcmla_laneq_f16(__p0_155, __p1_155, __p2_155, __p3_155) __extension__ ({ \ + float16x4_t __ret_155; \ + float16x4_t __s0_155 = __p0_155; \ + float16x4_t __s1_155 = __p1_155; \ + float16x8_t __s2_155 = __p2_155; \ + float16x4_t __rev0_155; __rev0_155 = __builtin_shufflevector(__s0_155, __s0_155, 3, 2, 1, 0); \ + float16x4_t __rev1_155; __rev1_155 = __builtin_shufflevector(__s1_155, __s1_155, 3, 2, 1, 0); \ + float16x8_t __rev2_155; __rev2_155 = __builtin_shufflevector(__s2_155, __s2_155, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16x8_t __reint_155 = __rev2_155; \ +uint32x2_t __reint1_155 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_155, __p3_155), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_155, __p3_155)}; \ + __ret_155 = __noswap_vcmla_f16(__rev0_155, __rev1_155, *(float16x4_t *) &__reint1_155); \ + __ret_155 = __builtin_shufflevector(__ret_155, __ret_155, 3, 2, 1, 0); \ + __ret_155; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x4_t vcale_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcale_f16((int8x8_t)__p0, (int8x8_t)__p1, 17); - return __ret; -} +#define vcmlaq_laneq_f16(__p0_156, __p1_156, __p2_156, __p3_156) __extension__ ({ \ + float16x8_t __ret_156; \ + float16x8_t __s0_156 = __p0_156; \ + float16x8_t __s1_156 = __p1_156; \ + float16x8_t __s2_156 = __p2_156; \ +float16x8_t __reint_156 = __s2_156; \ +uint32x4_t __reint1_156 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_156, __p3_156), vgetq_lane_u32(*(uint32x4_t *) &__reint_156, __p3_156), vgetq_lane_u32(*(uint32x4_t *) &__reint_156, __p3_156), vgetq_lane_u32(*(uint32x4_t *) &__reint_156, __p3_156)}; \ + __ret_156 = vcmlaq_f16(__s0_156, __s1_156, *(float16x8_t *) &__reint1_156); \ + __ret_156; \ +}) #else -__ai __attribute__((target("fullfp16"))) uint16x4_t vcale_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vcale_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vcmlaq_laneq_f16(__p0_157, __p1_157, __p2_157, __p3_157) __extension__ ({ \ + float16x8_t __ret_157; \ + float16x8_t __s0_157 = __p0_157; \ + float16x8_t __s1_157 = __p1_157; \ + float16x8_t __s2_157 = __p2_157; \ + float16x8_t __rev0_157; __rev0_157 = __builtin_shufflevector(__s0_157, __s0_157, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_157; __rev1_157 = __builtin_shufflevector(__s1_157, __s1_157, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_157; __rev2_157 = __builtin_shufflevector(__s2_157, __s2_157, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16x8_t __reint_157 = __rev2_157; \ +uint32x4_t __reint1_157 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_157, __p3_157), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_157, __p3_157), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_157, __p3_157), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_157, __p3_157)}; \ + __ret_157 = __noswap_vcmlaq_f16(__rev0_157, __rev1_157, *(float16x8_t *) &__reint1_157); \ + __ret_157 = __builtin_shufflevector(__ret_157, __ret_157, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_157; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x8_t vcaltq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcaltq_f16((int8x16_t)__p0, (int8x16_t)__p1, 49); +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcmlaq_rot180_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); return __ret; } #else -__ai __attribute__((target("fullfp16"))) uint16x8_t vcaltq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vcaltq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vcmlaq_rot180_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t __noswap_vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcmlaq_rot180_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x4_t vcalt_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcalt_f16((int8x8_t)__p0, (int8x8_t)__p1, 17); +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcmla_rot180_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); return __ret; } #else -__ai __attribute__((target("fullfp16"))) uint16x4_t vcalt_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vcalt_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 17); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vcmla_rot180_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t __noswap_vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcmla_rot180_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x8_t vceqq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0 == __p1); - return __ret; -} +#define vcmla_rot180_lane_f16(__p0_158, __p1_158, __p2_158, __p3_158) __extension__ ({ \ + float16x4_t __ret_158; \ + float16x4_t __s0_158 = __p0_158; \ + float16x4_t __s1_158 = __p1_158; \ + float16x4_t __s2_158 = __p2_158; \ +float16x4_t __reint_158 = __s2_158; \ +uint32x2_t __reint1_158 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_158, __p3_158), vget_lane_u32(*(uint32x2_t *) &__reint_158, __p3_158)}; \ + __ret_158 = vcmla_rot180_f16(__s0_158, __s1_158, *(float16x4_t *) &__reint1_158); \ + __ret_158; \ +}) #else -__ai __attribute__((target("fullfp16"))) uint16x8_t vceqq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t)(__rev0 == __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vcmla_rot180_lane_f16(__p0_159, __p1_159, __p2_159, __p3_159) __extension__ ({ \ + float16x4_t __ret_159; \ + float16x4_t __s0_159 = __p0_159; \ + float16x4_t __s1_159 = __p1_159; \ + float16x4_t __s2_159 = __p2_159; \ + float16x4_t __rev0_159; __rev0_159 = __builtin_shufflevector(__s0_159, __s0_159, 3, 2, 1, 0); \ + float16x4_t __rev1_159; __rev1_159 = __builtin_shufflevector(__s1_159, __s1_159, 3, 2, 1, 0); \ + float16x4_t __rev2_159; __rev2_159 = __builtin_shufflevector(__s2_159, __s2_159, 3, 2, 1, 0); \ +float16x4_t __reint_159 = __rev2_159; \ +uint32x2_t __reint1_159 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_159, __p3_159), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_159, __p3_159)}; \ + __ret_159 = __noswap_vcmla_rot180_f16(__rev0_159, __rev1_159, *(float16x4_t *) &__reint1_159); \ + __ret_159 = __builtin_shufflevector(__ret_159, __ret_159, 3, 2, 1, 0); \ + __ret_159; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x4_t vceq_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0 == __p1); - return __ret; -} +#define vcmlaq_rot180_lane_f16(__p0_160, __p1_160, __p2_160, __p3_160) __extension__ ({ \ + float16x8_t __ret_160; \ + float16x8_t __s0_160 = __p0_160; \ + float16x8_t __s1_160 = __p1_160; \ + float16x4_t __s2_160 = __p2_160; \ +float16x4_t __reint_160 = __s2_160; \ +uint32x4_t __reint1_160 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_160, __p3_160), vget_lane_u32(*(uint32x2_t *) &__reint_160, __p3_160), vget_lane_u32(*(uint32x2_t *) &__reint_160, __p3_160), vget_lane_u32(*(uint32x2_t *) &__reint_160, __p3_160)}; \ + __ret_160 = vcmlaq_rot180_f16(__s0_160, __s1_160, *(float16x8_t *) &__reint1_160); \ + __ret_160; \ +}) #else -__ai __attribute__((target("fullfp16"))) uint16x4_t vceq_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t)(__rev0 == __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vcmlaq_rot180_lane_f16(__p0_161, __p1_161, __p2_161, __p3_161) __extension__ ({ \ + float16x8_t __ret_161; \ + float16x8_t __s0_161 = __p0_161; \ + float16x8_t __s1_161 = __p1_161; \ + float16x4_t __s2_161 = __p2_161; \ + float16x8_t __rev0_161; __rev0_161 = __builtin_shufflevector(__s0_161, __s0_161, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_161; __rev1_161 = __builtin_shufflevector(__s1_161, __s1_161, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_161; __rev2_161 = __builtin_shufflevector(__s2_161, __s2_161, 3, 2, 1, 0); \ +float16x4_t __reint_161 = __rev2_161; \ +uint32x4_t __reint1_161 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_161, __p3_161), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_161, __p3_161), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_161, __p3_161), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_161, __p3_161)}; \ + __ret_161 = __noswap_vcmlaq_rot180_f16(__rev0_161, __rev1_161, *(float16x8_t *) &__reint1_161); \ + __ret_161 = __builtin_shufflevector(__ret_161, __ret_161, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_161; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x8_t vceqzq_f16(float16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vceqzq_f16((int8x16_t)__p0, 49); - return __ret; -} +#define vcmla_rot180_laneq_f16(__p0_162, __p1_162, __p2_162, __p3_162) __extension__ ({ \ + float16x4_t __ret_162; \ + float16x4_t __s0_162 = __p0_162; \ + float16x4_t __s1_162 = __p1_162; \ + float16x8_t __s2_162 = __p2_162; \ +float16x8_t __reint_162 = __s2_162; \ +uint32x2_t __reint1_162 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_162, __p3_162), vgetq_lane_u32(*(uint32x4_t *) &__reint_162, __p3_162)}; \ + __ret_162 = vcmla_rot180_f16(__s0_162, __s1_162, *(float16x4_t *) &__reint1_162); \ + __ret_162; \ +}) #else -__ai __attribute__((target("fullfp16"))) uint16x8_t vceqzq_f16(float16x8_t __p0) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vceqzq_f16((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vcmla_rot180_laneq_f16(__p0_163, __p1_163, __p2_163, __p3_163) __extension__ ({ \ + float16x4_t __ret_163; \ + float16x4_t __s0_163 = __p0_163; \ + float16x4_t __s1_163 = __p1_163; \ + float16x8_t __s2_163 = __p2_163; \ + float16x4_t __rev0_163; __rev0_163 = __builtin_shufflevector(__s0_163, __s0_163, 3, 2, 1, 0); \ + float16x4_t __rev1_163; __rev1_163 = __builtin_shufflevector(__s1_163, __s1_163, 3, 2, 1, 0); \ + float16x8_t __rev2_163; __rev2_163 = __builtin_shufflevector(__s2_163, __s2_163, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16x8_t __reint_163 = __rev2_163; \ +uint32x2_t __reint1_163 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_163, __p3_163), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_163, __p3_163)}; \ + __ret_163 = __noswap_vcmla_rot180_f16(__rev0_163, __rev1_163, *(float16x4_t *) &__reint1_163); \ + __ret_163 = __builtin_shufflevector(__ret_163, __ret_163, 3, 2, 1, 0); \ + __ret_163; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x4_t vceqz_f16(float16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vceqz_f16((int8x8_t)__p0, 17); - return __ret; -} +#define vcmlaq_rot180_laneq_f16(__p0_164, __p1_164, __p2_164, __p3_164) __extension__ ({ \ + float16x8_t __ret_164; \ + float16x8_t __s0_164 = __p0_164; \ + float16x8_t __s1_164 = __p1_164; \ + float16x8_t __s2_164 = __p2_164; \ +float16x8_t __reint_164 = __s2_164; \ +uint32x4_t __reint1_164 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_164, __p3_164), vgetq_lane_u32(*(uint32x4_t *) &__reint_164, __p3_164), vgetq_lane_u32(*(uint32x4_t *) &__reint_164, __p3_164), vgetq_lane_u32(*(uint32x4_t *) &__reint_164, __p3_164)}; \ + __ret_164 = vcmlaq_rot180_f16(__s0_164, __s1_164, *(float16x8_t *) &__reint1_164); \ + __ret_164; \ +}) #else -__ai __attribute__((target("fullfp16"))) uint16x4_t vceqz_f16(float16x4_t __p0) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vceqz_f16((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vcmlaq_rot180_laneq_f16(__p0_165, __p1_165, __p2_165, __p3_165) __extension__ ({ \ + float16x8_t __ret_165; \ + float16x8_t __s0_165 = __p0_165; \ + float16x8_t __s1_165 = __p1_165; \ + float16x8_t __s2_165 = __p2_165; \ + float16x8_t __rev0_165; __rev0_165 = __builtin_shufflevector(__s0_165, __s0_165, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_165; __rev1_165 = __builtin_shufflevector(__s1_165, __s1_165, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_165; __rev2_165 = __builtin_shufflevector(__s2_165, __s2_165, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16x8_t __reint_165 = __rev2_165; \ +uint32x4_t __reint1_165 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_165, __p3_165), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_165, __p3_165), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_165, __p3_165), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_165, __p3_165)}; \ + __ret_165 = __noswap_vcmlaq_rot180_f16(__rev0_165, __rev1_165, *(float16x8_t *) &__reint1_165); \ + __ret_165 = __builtin_shufflevector(__ret_165, __ret_165, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_165; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x8_t vcgeq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0 >= __p1); +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcmlaq_rot270_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); return __ret; } #else -__ai __attribute__((target("fullfp16"))) uint16x8_t vcgeq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t)(__rev0 >= __rev1); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vcmlaq_rot270_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t __noswap_vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcmlaq_rot270_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x4_t vcge_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0 >= __p1); +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcmla_rot270_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); return __ret; } #else -__ai __attribute__((target("fullfp16"))) uint16x4_t vcge_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t)(__rev0 >= __rev1); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vcmla_rot270_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x8_t vcgezq_f16(float16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcgezq_f16((int8x16_t)__p0, 49); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) uint16x8_t vcgezq_f16(float16x8_t __p0) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vcgezq_f16((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x4_t vcgez_f16(float16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcgez_f16((int8x8_t)__p0, 17); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) uint16x4_t vcgez_f16(float16x4_t __p0) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vcgez_f16((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t __noswap_vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcmla_rot270_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x8_t vcgtq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0 > __p1); - return __ret; -} +#define vcmla_rot270_lane_f16(__p0_166, __p1_166, __p2_166, __p3_166) __extension__ ({ \ + float16x4_t __ret_166; \ + float16x4_t __s0_166 = __p0_166; \ + float16x4_t __s1_166 = __p1_166; \ + float16x4_t __s2_166 = __p2_166; \ +float16x4_t __reint_166 = __s2_166; \ +uint32x2_t __reint1_166 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_166, __p3_166), vget_lane_u32(*(uint32x2_t *) &__reint_166, __p3_166)}; \ + __ret_166 = vcmla_rot270_f16(__s0_166, __s1_166, *(float16x4_t *) &__reint1_166); \ + __ret_166; \ +}) #else -__ai __attribute__((target("fullfp16"))) uint16x8_t vcgtq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t)(__rev0 > __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vcmla_rot270_lane_f16(__p0_167, __p1_167, __p2_167, __p3_167) __extension__ ({ \ + float16x4_t __ret_167; \ + float16x4_t __s0_167 = __p0_167; \ + float16x4_t __s1_167 = __p1_167; \ + float16x4_t __s2_167 = __p2_167; \ + float16x4_t __rev0_167; __rev0_167 = __builtin_shufflevector(__s0_167, __s0_167, 3, 2, 1, 0); \ + float16x4_t __rev1_167; __rev1_167 = __builtin_shufflevector(__s1_167, __s1_167, 3, 2, 1, 0); \ + float16x4_t __rev2_167; __rev2_167 = __builtin_shufflevector(__s2_167, __s2_167, 3, 2, 1, 0); \ +float16x4_t __reint_167 = __rev2_167; \ +uint32x2_t __reint1_167 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_167, __p3_167), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_167, __p3_167)}; \ + __ret_167 = __noswap_vcmla_rot270_f16(__rev0_167, __rev1_167, *(float16x4_t *) &__reint1_167); \ + __ret_167 = __builtin_shufflevector(__ret_167, __ret_167, 3, 2, 1, 0); \ + __ret_167; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x4_t vcgt_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0 > __p1); - return __ret; -} +#define vcmlaq_rot270_lane_f16(__p0_168, __p1_168, __p2_168, __p3_168) __extension__ ({ \ + float16x8_t __ret_168; \ + float16x8_t __s0_168 = __p0_168; \ + float16x8_t __s1_168 = __p1_168; \ + float16x4_t __s2_168 = __p2_168; \ +float16x4_t __reint_168 = __s2_168; \ +uint32x4_t __reint1_168 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_168, __p3_168), vget_lane_u32(*(uint32x2_t *) &__reint_168, __p3_168), vget_lane_u32(*(uint32x2_t *) &__reint_168, __p3_168), vget_lane_u32(*(uint32x2_t *) &__reint_168, __p3_168)}; \ + __ret_168 = vcmlaq_rot270_f16(__s0_168, __s1_168, *(float16x8_t *) &__reint1_168); \ + __ret_168; \ +}) #else -__ai __attribute__((target("fullfp16"))) uint16x4_t vcgt_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t)(__rev0 > __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vcmlaq_rot270_lane_f16(__p0_169, __p1_169, __p2_169, __p3_169) __extension__ ({ \ + float16x8_t __ret_169; \ + float16x8_t __s0_169 = __p0_169; \ + float16x8_t __s1_169 = __p1_169; \ + float16x4_t __s2_169 = __p2_169; \ + float16x8_t __rev0_169; __rev0_169 = __builtin_shufflevector(__s0_169, __s0_169, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_169; __rev1_169 = __builtin_shufflevector(__s1_169, __s1_169, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_169; __rev2_169 = __builtin_shufflevector(__s2_169, __s2_169, 3, 2, 1, 0); \ +float16x4_t __reint_169 = __rev2_169; \ +uint32x4_t __reint1_169 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_169, __p3_169), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_169, __p3_169), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_169, __p3_169), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_169, __p3_169)}; \ + __ret_169 = __noswap_vcmlaq_rot270_f16(__rev0_169, __rev1_169, *(float16x8_t *) &__reint1_169); \ + __ret_169 = __builtin_shufflevector(__ret_169, __ret_169, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_169; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x8_t vcgtzq_f16(float16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcgtzq_f16((int8x16_t)__p0, 49); - return __ret; -} +#define vcmla_rot270_laneq_f16(__p0_170, __p1_170, __p2_170, __p3_170) __extension__ ({ \ + float16x4_t __ret_170; \ + float16x4_t __s0_170 = __p0_170; \ + float16x4_t __s1_170 = __p1_170; \ + float16x8_t __s2_170 = __p2_170; \ +float16x8_t __reint_170 = __s2_170; \ +uint32x2_t __reint1_170 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_170, __p3_170), vgetq_lane_u32(*(uint32x4_t *) &__reint_170, __p3_170)}; \ + __ret_170 = vcmla_rot270_f16(__s0_170, __s1_170, *(float16x4_t *) &__reint1_170); \ + __ret_170; \ +}) #else -__ai __attribute__((target("fullfp16"))) uint16x8_t vcgtzq_f16(float16x8_t __p0) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vcgtzq_f16((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vcmla_rot270_laneq_f16(__p0_171, __p1_171, __p2_171, __p3_171) __extension__ ({ \ + float16x4_t __ret_171; \ + float16x4_t __s0_171 = __p0_171; \ + float16x4_t __s1_171 = __p1_171; \ + float16x8_t __s2_171 = __p2_171; \ + float16x4_t __rev0_171; __rev0_171 = __builtin_shufflevector(__s0_171, __s0_171, 3, 2, 1, 0); \ + float16x4_t __rev1_171; __rev1_171 = __builtin_shufflevector(__s1_171, __s1_171, 3, 2, 1, 0); \ + float16x8_t __rev2_171; __rev2_171 = __builtin_shufflevector(__s2_171, __s2_171, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16x8_t __reint_171 = __rev2_171; \ +uint32x2_t __reint1_171 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_171, __p3_171), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_171, __p3_171)}; \ + __ret_171 = __noswap_vcmla_rot270_f16(__rev0_171, __rev1_171, *(float16x4_t *) &__reint1_171); \ + __ret_171 = __builtin_shufflevector(__ret_171, __ret_171, 3, 2, 1, 0); \ + __ret_171; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x4_t vcgtz_f16(float16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcgtz_f16((int8x8_t)__p0, 17); - return __ret; -} +#define vcmlaq_rot270_laneq_f16(__p0_172, __p1_172, __p2_172, __p3_172) __extension__ ({ \ + float16x8_t __ret_172; \ + float16x8_t __s0_172 = __p0_172; \ + float16x8_t __s1_172 = __p1_172; \ + float16x8_t __s2_172 = __p2_172; \ +float16x8_t __reint_172 = __s2_172; \ +uint32x4_t __reint1_172 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_172, __p3_172), vgetq_lane_u32(*(uint32x4_t *) &__reint_172, __p3_172), vgetq_lane_u32(*(uint32x4_t *) &__reint_172, __p3_172), vgetq_lane_u32(*(uint32x4_t *) &__reint_172, __p3_172)}; \ + __ret_172 = vcmlaq_rot270_f16(__s0_172, __s1_172, *(float16x8_t *) &__reint1_172); \ + __ret_172; \ +}) #else -__ai __attribute__((target("fullfp16"))) uint16x4_t vcgtz_f16(float16x4_t __p0) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vcgtz_f16((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vcmlaq_rot270_laneq_f16(__p0_173, __p1_173, __p2_173, __p3_173) __extension__ ({ \ + float16x8_t __ret_173; \ + float16x8_t __s0_173 = __p0_173; \ + float16x8_t __s1_173 = __p1_173; \ + float16x8_t __s2_173 = __p2_173; \ + float16x8_t __rev0_173; __rev0_173 = __builtin_shufflevector(__s0_173, __s0_173, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_173; __rev1_173 = __builtin_shufflevector(__s1_173, __s1_173, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_173; __rev2_173 = __builtin_shufflevector(__s2_173, __s2_173, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16x8_t __reint_173 = __rev2_173; \ +uint32x4_t __reint1_173 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_173, __p3_173), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_173, __p3_173), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_173, __p3_173), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_173, __p3_173)}; \ + __ret_173 = __noswap_vcmlaq_rot270_f16(__rev0_173, __rev1_173, *(float16x8_t *) &__reint1_173); \ + __ret_173 = __builtin_shufflevector(__ret_173, __ret_173, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_173; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x8_t vcleq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0 <= __p1); +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcmlaq_rot90_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); return __ret; } #else -__ai __attribute__((target("fullfp16"))) uint16x8_t vcleq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t)(__rev0 <= __rev1); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vcmlaq_rot90_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x8_t __noswap_vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vcmlaq_rot90_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x4_t vcle_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0 <= __p1); +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcmla_rot90_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); return __ret; } #else -__ai __attribute__((target("fullfp16"))) uint16x4_t vcle_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t)(__rev0 <= __rev1); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vcmla_rot90_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } +__ai __attribute__((target("v8.3a,fullfp16,neon"))) float16x4_t __noswap_vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vcmla_rot90_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x8_t vclezq_f16(float16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vclezq_f16((int8x16_t)__p0, 49); - return __ret; -} +#define vcmla_rot90_lane_f16(__p0_174, __p1_174, __p2_174, __p3_174) __extension__ ({ \ + float16x4_t __ret_174; \ + float16x4_t __s0_174 = __p0_174; \ + float16x4_t __s1_174 = __p1_174; \ + float16x4_t __s2_174 = __p2_174; \ +float16x4_t __reint_174 = __s2_174; \ +uint32x2_t __reint1_174 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_174, __p3_174), vget_lane_u32(*(uint32x2_t *) &__reint_174, __p3_174)}; \ + __ret_174 = vcmla_rot90_f16(__s0_174, __s1_174, *(float16x4_t *) &__reint1_174); \ + __ret_174; \ +}) #else -__ai __attribute__((target("fullfp16"))) uint16x8_t vclezq_f16(float16x8_t __p0) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vclezq_f16((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vcmla_rot90_lane_f16(__p0_175, __p1_175, __p2_175, __p3_175) __extension__ ({ \ + float16x4_t __ret_175; \ + float16x4_t __s0_175 = __p0_175; \ + float16x4_t __s1_175 = __p1_175; \ + float16x4_t __s2_175 = __p2_175; \ + float16x4_t __rev0_175; __rev0_175 = __builtin_shufflevector(__s0_175, __s0_175, 3, 2, 1, 0); \ + float16x4_t __rev1_175; __rev1_175 = __builtin_shufflevector(__s1_175, __s1_175, 3, 2, 1, 0); \ + float16x4_t __rev2_175; __rev2_175 = __builtin_shufflevector(__s2_175, __s2_175, 3, 2, 1, 0); \ +float16x4_t __reint_175 = __rev2_175; \ +uint32x2_t __reint1_175 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_175, __p3_175), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_175, __p3_175)}; \ + __ret_175 = __noswap_vcmla_rot90_f16(__rev0_175, __rev1_175, *(float16x4_t *) &__reint1_175); \ + __ret_175 = __builtin_shufflevector(__ret_175, __ret_175, 3, 2, 1, 0); \ + __ret_175; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x4_t vclez_f16(float16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vclez_f16((int8x8_t)__p0, 17); - return __ret; -} +#define vcmlaq_rot90_lane_f16(__p0_176, __p1_176, __p2_176, __p3_176) __extension__ ({ \ + float16x8_t __ret_176; \ + float16x8_t __s0_176 = __p0_176; \ + float16x8_t __s1_176 = __p1_176; \ + float16x4_t __s2_176 = __p2_176; \ +float16x4_t __reint_176 = __s2_176; \ +uint32x4_t __reint1_176 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_176, __p3_176), vget_lane_u32(*(uint32x2_t *) &__reint_176, __p3_176), vget_lane_u32(*(uint32x2_t *) &__reint_176, __p3_176), vget_lane_u32(*(uint32x2_t *) &__reint_176, __p3_176)}; \ + __ret_176 = vcmlaq_rot90_f16(__s0_176, __s1_176, *(float16x8_t *) &__reint1_176); \ + __ret_176; \ +}) #else -__ai __attribute__((target("fullfp16"))) uint16x4_t vclez_f16(float16x4_t __p0) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vclez_f16((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vcmlaq_rot90_lane_f16(__p0_177, __p1_177, __p2_177, __p3_177) __extension__ ({ \ + float16x8_t __ret_177; \ + float16x8_t __s0_177 = __p0_177; \ + float16x8_t __s1_177 = __p1_177; \ + float16x4_t __s2_177 = __p2_177; \ + float16x8_t __rev0_177; __rev0_177 = __builtin_shufflevector(__s0_177, __s0_177, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_177; __rev1_177 = __builtin_shufflevector(__s1_177, __s1_177, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_177; __rev2_177 = __builtin_shufflevector(__s2_177, __s2_177, 3, 2, 1, 0); \ +float16x4_t __reint_177 = __rev2_177; \ +uint32x4_t __reint1_177 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_177, __p3_177), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_177, __p3_177), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_177, __p3_177), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_177, __p3_177)}; \ + __ret_177 = __noswap_vcmlaq_rot90_f16(__rev0_177, __rev1_177, *(float16x8_t *) &__reint1_177); \ + __ret_177 = __builtin_shufflevector(__ret_177, __ret_177, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_177; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x8_t vcltq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0 < __p1); - return __ret; -} +#define vcmla_rot90_laneq_f16(__p0_178, __p1_178, __p2_178, __p3_178) __extension__ ({ \ + float16x4_t __ret_178; \ + float16x4_t __s0_178 = __p0_178; \ + float16x4_t __s1_178 = __p1_178; \ + float16x8_t __s2_178 = __p2_178; \ +float16x8_t __reint_178 = __s2_178; \ +uint32x2_t __reint1_178 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_178, __p3_178), vgetq_lane_u32(*(uint32x4_t *) &__reint_178, __p3_178)}; \ + __ret_178 = vcmla_rot90_f16(__s0_178, __s1_178, *(float16x4_t *) &__reint1_178); \ + __ret_178; \ +}) #else -__ai __attribute__((target("fullfp16"))) uint16x8_t vcltq_f16(float16x8_t __p0, float16x8_t __p1) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t)(__rev0 < __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vcmla_rot90_laneq_f16(__p0_179, __p1_179, __p2_179, __p3_179) __extension__ ({ \ + float16x4_t __ret_179; \ + float16x4_t __s0_179 = __p0_179; \ + float16x4_t __s1_179 = __p1_179; \ + float16x8_t __s2_179 = __p2_179; \ + float16x4_t __rev0_179; __rev0_179 = __builtin_shufflevector(__s0_179, __s0_179, 3, 2, 1, 0); \ + float16x4_t __rev1_179; __rev1_179 = __builtin_shufflevector(__s1_179, __s1_179, 3, 2, 1, 0); \ + float16x8_t __rev2_179; __rev2_179 = __builtin_shufflevector(__s2_179, __s2_179, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16x8_t __reint_179 = __rev2_179; \ +uint32x2_t __reint1_179 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_179, __p3_179), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_179, __p3_179)}; \ + __ret_179 = __noswap_vcmla_rot90_f16(__rev0_179, __rev1_179, *(float16x4_t *) &__reint1_179); \ + __ret_179 = __builtin_shufflevector(__ret_179, __ret_179, 3, 2, 1, 0); \ + __ret_179; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x4_t vclt_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0 < __p1); - return __ret; -} +#define vcmlaq_rot90_laneq_f16(__p0_180, __p1_180, __p2_180, __p3_180) __extension__ ({ \ + float16x8_t __ret_180; \ + float16x8_t __s0_180 = __p0_180; \ + float16x8_t __s1_180 = __p1_180; \ + float16x8_t __s2_180 = __p2_180; \ +float16x8_t __reint_180 = __s2_180; \ +uint32x4_t __reint1_180 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_180, __p3_180), vgetq_lane_u32(*(uint32x4_t *) &__reint_180, __p3_180), vgetq_lane_u32(*(uint32x4_t *) &__reint_180, __p3_180), vgetq_lane_u32(*(uint32x4_t *) &__reint_180, __p3_180)}; \ + __ret_180 = vcmlaq_rot90_f16(__s0_180, __s1_180, *(float16x8_t *) &__reint1_180); \ + __ret_180; \ +}) #else -__ai __attribute__((target("fullfp16"))) uint16x4_t vclt_f16(float16x4_t __p0, float16x4_t __p1) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t)(__rev0 < __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vcmlaq_rot90_laneq_f16(__p0_181, __p1_181, __p2_181, __p3_181) __extension__ ({ \ + float16x8_t __ret_181; \ + float16x8_t __s0_181 = __p0_181; \ + float16x8_t __s1_181 = __p1_181; \ + float16x8_t __s2_181 = __p2_181; \ + float16x8_t __rev0_181; __rev0_181 = __builtin_shufflevector(__s0_181, __s0_181, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_181; __rev1_181 = __builtin_shufflevector(__s1_181, __s1_181, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_181; __rev2_181 = __builtin_shufflevector(__s2_181, __s2_181, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16x8_t __reint_181 = __rev2_181; \ +uint32x4_t __reint1_181 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_181, __p3_181), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_181, __p3_181), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_181, __p3_181), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_181, __p3_181)}; \ + __ret_181 = __noswap_vcmlaq_rot90_f16(__rev0_181, __rev1_181, *(float16x8_t *) &__reint1_181); \ + __ret_181 = __builtin_shufflevector(__ret_181, __ret_181, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_181; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x8_t vcltzq_f16(float16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcltzq_f16((int8x16_t)__p0, 49); +__ai __attribute__((target("v8.3a,neon"))) float32x2_t vcadd_rot270_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcadd_rot270_f32((int8x8_t)__p0, (int8x8_t)__p1, 9); return __ret; } #else -__ai __attribute__((target("fullfp16"))) uint16x8_t vcltzq_f16(float16x8_t __p0) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vcltzq_f16((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("v8.3a,neon"))) float32x2_t vcadd_rot270_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vcadd_rot270_f32((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x4_t vcltz_f16(float16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcltz_f16((int8x8_t)__p0, 17); +__ai __attribute__((target("v8.3a,neon"))) float32x2_t vcadd_rot90_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcadd_rot90_f32((int8x8_t)__p0, (int8x8_t)__p1, 9); return __ret; } #else -__ai __attribute__((target("fullfp16"))) uint16x4_t vcltz_f16(float16x4_t __p0) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vcltz_f16((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("v8.3a,neon"))) float32x2_t vcadd_rot90_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vcadd_rot90_f32((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vcvtq_f16_u16(uint16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vcvtq_f16_u16((int8x16_t)__p0, 49); +__ai __attribute__((target("v8.3a,neon"))) float32x4_t vcaddq_rot270_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcaddq_rot270_f32((int8x16_t)__p0, (int8x16_t)__p1, 41); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x8_t vcvtq_f16_u16(uint16x8_t __p0) { - float16x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vcvtq_f16_u16((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("v8.3a,neon"))) float32x4_t vcaddq_rot270_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vcaddq_rot270_f32((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vcvtq_f16_s16(int16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vcvtq_f16_s16((int8x16_t)__p0, 33); +__ai __attribute__((target("v8.3a,neon"))) float32x4_t vcaddq_rot90_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcaddq_rot90_f32((int8x16_t)__p0, (int8x16_t)__p1, 41); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x8_t vcvtq_f16_s16(int16x8_t __p0) { - float16x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vcvtq_f16_s16((int8x16_t)__rev0, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("v8.3a,neon"))) float32x4_t vcaddq_rot90_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vcaddq_rot90_f32((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vcvt_f16_u16(uint16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vcvt_f16_u16((int8x8_t)__p0, 17); +__ai __attribute__((target("v8.3a,neon"))) float32x4_t vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x4_t vcvt_f16_u16(uint16x4_t __p0) { - float16x4_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vcvt_f16_u16((int8x8_t)__rev0, 17); +__ai __attribute__((target("v8.3a,neon"))) float32x4_t vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vcmlaq_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } +__ai __attribute__((target("v8.3a,neon"))) float32x4_t __noswap_vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vcvt_f16_s16(int16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vcvt_f16_s16((int8x8_t)__p0, 1); +__ai __attribute__((target("v8.3a,neon"))) float32x2_t vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x4_t vcvt_f16_s16(int16x4_t __p0) { - float16x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vcvt_f16_s16((int8x8_t)__rev0, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("v8.3a,neon"))) float32x2_t vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vcmla_f32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,neon"))) float32x2_t __noswap_vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -#define vcvtq_n_f16_u16(__p0, __p1) __extension__ ({ \ - float16x8_t __ret; \ - uint16x8_t __s0 = __p0; \ - __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_u16((int8x16_t)__s0, __p1, 49); \ - __ret; \ +#define vcmla_lane_f32(__p0_182, __p1_182, __p2_182, __p3_182) __extension__ ({ \ + float32x2_t __ret_182; \ + float32x2_t __s0_182 = __p0_182; \ + float32x2_t __s1_182 = __p1_182; \ + float32x2_t __s2_182 = __p2_182; \ +float32x2_t __reint_182 = __s2_182; \ +uint64x1_t __reint1_182 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_182, __p3_182)}; \ + __ret_182 = vcmla_f32(__s0_182, __s1_182, *(float32x2_t *) &__reint1_182); \ + __ret_182; \ }) #else -#define vcvtq_n_f16_u16(__p0, __p1) __extension__ ({ \ - float16x8_t __ret; \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_u16((int8x16_t)__rev0, __p1, 49); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ +#define vcmla_lane_f32(__p0_183, __p1_183, __p2_183, __p3_183) __extension__ ({ \ + float32x2_t __ret_183; \ + float32x2_t __s0_183 = __p0_183; \ + float32x2_t __s1_183 = __p1_183; \ + float32x2_t __s2_183 = __p2_183; \ + float32x2_t __rev0_183; __rev0_183 = __builtin_shufflevector(__s0_183, __s0_183, 1, 0); \ + float32x2_t __rev1_183; __rev1_183 = __builtin_shufflevector(__s1_183, __s1_183, 1, 0); \ + float32x2_t __rev2_183; __rev2_183 = __builtin_shufflevector(__s2_183, __s2_183, 1, 0); \ +float32x2_t __reint_183 = __rev2_183; \ +uint64x1_t __reint1_183 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_183, __p3_183)}; \ + __ret_183 = __noswap_vcmla_f32(__rev0_183, __rev1_183, *(float32x2_t *) &__reint1_183); \ + __ret_183 = __builtin_shufflevector(__ret_183, __ret_183, 1, 0); \ + __ret_183; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcvtq_n_f16_s16(__p0, __p1) __extension__ ({ \ - float16x8_t __ret; \ - int16x8_t __s0 = __p0; \ - __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_s16((int8x16_t)__s0, __p1, 33); \ - __ret; \ +#define vcmlaq_lane_f32(__p0_184, __p1_184, __p2_184, __p3_184) __extension__ ({ \ + float32x4_t __ret_184; \ + float32x4_t __s0_184 = __p0_184; \ + float32x4_t __s1_184 = __p1_184; \ + float32x2_t __s2_184 = __p2_184; \ +float32x2_t __reint_184 = __s2_184; \ +uint64x2_t __reint1_184 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_184, __p3_184), vget_lane_u64(*(uint64x1_t *) &__reint_184, __p3_184)}; \ + __ret_184 = vcmlaq_f32(__s0_184, __s1_184, *(float32x4_t *) &__reint1_184); \ + __ret_184; \ }) #else -#define vcvtq_n_f16_s16(__p0, __p1) __extension__ ({ \ - float16x8_t __ret; \ - int16x8_t __s0 = __p0; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (float16x8_t) __builtin_neon_vcvtq_n_f16_s16((int8x16_t)__rev0, __p1, 33); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ +#define vcmlaq_lane_f32(__p0_185, __p1_185, __p2_185, __p3_185) __extension__ ({ \ + float32x4_t __ret_185; \ + float32x4_t __s0_185 = __p0_185; \ + float32x4_t __s1_185 = __p1_185; \ + float32x2_t __s2_185 = __p2_185; \ + float32x4_t __rev0_185; __rev0_185 = __builtin_shufflevector(__s0_185, __s0_185, 3, 2, 1, 0); \ + float32x4_t __rev1_185; __rev1_185 = __builtin_shufflevector(__s1_185, __s1_185, 3, 2, 1, 0); \ + float32x2_t __rev2_185; __rev2_185 = __builtin_shufflevector(__s2_185, __s2_185, 1, 0); \ +float32x2_t __reint_185 = __rev2_185; \ +uint64x2_t __reint1_185 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_185, __p3_185), vget_lane_u64(*(uint64x1_t *) &__reint_185, __p3_185)}; \ + __ret_185 = __noswap_vcmlaq_f32(__rev0_185, __rev1_185, *(float32x4_t *) &__reint1_185); \ + __ret_185 = __builtin_shufflevector(__ret_185, __ret_185, 3, 2, 1, 0); \ + __ret_185; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcvt_n_f16_u16(__p0, __p1) __extension__ ({ \ - float16x4_t __ret; \ - uint16x4_t __s0 = __p0; \ - __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_u16((int8x8_t)__s0, __p1, 17); \ - __ret; \ +#define vcmla_laneq_f32(__p0_186, __p1_186, __p2_186, __p3_186) __extension__ ({ \ + float32x2_t __ret_186; \ + float32x2_t __s0_186 = __p0_186; \ + float32x2_t __s1_186 = __p1_186; \ + float32x4_t __s2_186 = __p2_186; \ +float32x4_t __reint_186 = __s2_186; \ +uint64x1_t __reint1_186 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_186, __p3_186)}; \ + __ret_186 = vcmla_f32(__s0_186, __s1_186, *(float32x2_t *) &__reint1_186); \ + __ret_186; \ }) #else -#define vcvt_n_f16_u16(__p0, __p1) __extension__ ({ \ - float16x4_t __ret; \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_u16((int8x8_t)__rev0, __p1, 17); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ +#define vcmla_laneq_f32(__p0_187, __p1_187, __p2_187, __p3_187) __extension__ ({ \ + float32x2_t __ret_187; \ + float32x2_t __s0_187 = __p0_187; \ + float32x2_t __s1_187 = __p1_187; \ + float32x4_t __s2_187 = __p2_187; \ + float32x2_t __rev0_187; __rev0_187 = __builtin_shufflevector(__s0_187, __s0_187, 1, 0); \ + float32x2_t __rev1_187; __rev1_187 = __builtin_shufflevector(__s1_187, __s1_187, 1, 0); \ + float32x4_t __rev2_187; __rev2_187 = __builtin_shufflevector(__s2_187, __s2_187, 3, 2, 1, 0); \ +float32x4_t __reint_187 = __rev2_187; \ +uint64x1_t __reint1_187 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_187, __p3_187)}; \ + __ret_187 = __noswap_vcmla_f32(__rev0_187, __rev1_187, *(float32x2_t *) &__reint1_187); \ + __ret_187 = __builtin_shufflevector(__ret_187, __ret_187, 1, 0); \ + __ret_187; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcvt_n_f16_s16(__p0, __p1) __extension__ ({ \ - float16x4_t __ret; \ - int16x4_t __s0 = __p0; \ - __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_s16((int8x8_t)__s0, __p1, 1); \ - __ret; \ +#define vcmlaq_laneq_f32(__p0_188, __p1_188, __p2_188, __p3_188) __extension__ ({ \ + float32x4_t __ret_188; \ + float32x4_t __s0_188 = __p0_188; \ + float32x4_t __s1_188 = __p1_188; \ + float32x4_t __s2_188 = __p2_188; \ +float32x4_t __reint_188 = __s2_188; \ +uint64x2_t __reint1_188 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_188, __p3_188), vgetq_lane_u64(*(uint64x2_t *) &__reint_188, __p3_188)}; \ + __ret_188 = vcmlaq_f32(__s0_188, __s1_188, *(float32x4_t *) &__reint1_188); \ + __ret_188; \ }) #else -#define vcvt_n_f16_s16(__p0, __p1) __extension__ ({ \ - float16x4_t __ret; \ - int16x4_t __s0 = __p0; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (float16x4_t) __builtin_neon_vcvt_n_f16_s16((int8x8_t)__rev0, __p1, 1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ +#define vcmlaq_laneq_f32(__p0_189, __p1_189, __p2_189, __p3_189) __extension__ ({ \ + float32x4_t __ret_189; \ + float32x4_t __s0_189 = __p0_189; \ + float32x4_t __s1_189 = __p1_189; \ + float32x4_t __s2_189 = __p2_189; \ + float32x4_t __rev0_189; __rev0_189 = __builtin_shufflevector(__s0_189, __s0_189, 3, 2, 1, 0); \ + float32x4_t __rev1_189; __rev1_189 = __builtin_shufflevector(__s1_189, __s1_189, 3, 2, 1, 0); \ + float32x4_t __rev2_189; __rev2_189 = __builtin_shufflevector(__s2_189, __s2_189, 3, 2, 1, 0); \ +float32x4_t __reint_189 = __rev2_189; \ +uint64x2_t __reint1_189 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_189, __p3_189), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_189, __p3_189)}; \ + __ret_189 = __noswap_vcmlaq_f32(__rev0_189, __rev1_189, *(float32x4_t *) &__reint1_189); \ + __ret_189 = __builtin_shufflevector(__ret_189, __ret_189, 3, 2, 1, 0); \ + __ret_189; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcvtq_n_s16_f16(__p0, __p1) __extension__ ({ \ - int16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - __ret = (int16x8_t) __builtin_neon_vcvtq_n_s16_f16((int8x16_t)__s0, __p1, 33); \ - __ret; \ -}) +__ai __attribute__((target("v8.3a,neon"))) float32x4_t vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot180_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} #else -#define vcvtq_n_s16_f16(__p0, __p1) __extension__ ({ \ - int16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int16x8_t) __builtin_neon_vcvtq_n_s16_f16((int8x16_t)__rev0, __p1, 33); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("v8.3a,neon"))) float32x4_t vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot180_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,neon"))) float32x4_t __noswap_vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot180_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vcvt_n_s16_f16(__p0, __p1) __extension__ ({ \ - int16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - __ret = (int16x4_t) __builtin_neon_vcvt_n_s16_f16((int8x8_t)__s0, __p1, 1); \ - __ret; \ -}) +__ai __attribute__((target("v8.3a,neon"))) float32x2_t vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_rot180_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} #else -#define vcvt_n_s16_f16(__p0, __p1) __extension__ ({ \ - int16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (int16x4_t) __builtin_neon_vcvt_n_s16_f16((int8x8_t)__rev0, __p1, 1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("v8.3a,neon"))) float32x2_t vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vcmla_rot180_f32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,neon"))) float32x2_t __noswap_vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_rot180_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vcvtq_n_u16_f16(__p0, __p1) __extension__ ({ \ - uint16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - __ret = (uint16x8_t) __builtin_neon_vcvtq_n_u16_f16((int8x16_t)__s0, __p1, 49); \ - __ret; \ +#define vcmla_rot180_lane_f32(__p0_190, __p1_190, __p2_190, __p3_190) __extension__ ({ \ + float32x2_t __ret_190; \ + float32x2_t __s0_190 = __p0_190; \ + float32x2_t __s1_190 = __p1_190; \ + float32x2_t __s2_190 = __p2_190; \ +float32x2_t __reint_190 = __s2_190; \ +uint64x1_t __reint1_190 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_190, __p3_190)}; \ + __ret_190 = vcmla_rot180_f32(__s0_190, __s1_190, *(float32x2_t *) &__reint1_190); \ + __ret_190; \ }) #else -#define vcvtq_n_u16_f16(__p0, __p1) __extension__ ({ \ - uint16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint16x8_t) __builtin_neon_vcvtq_n_u16_f16((int8x16_t)__rev0, __p1, 49); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ +#define vcmla_rot180_lane_f32(__p0_191, __p1_191, __p2_191, __p3_191) __extension__ ({ \ + float32x2_t __ret_191; \ + float32x2_t __s0_191 = __p0_191; \ + float32x2_t __s1_191 = __p1_191; \ + float32x2_t __s2_191 = __p2_191; \ + float32x2_t __rev0_191; __rev0_191 = __builtin_shufflevector(__s0_191, __s0_191, 1, 0); \ + float32x2_t __rev1_191; __rev1_191 = __builtin_shufflevector(__s1_191, __s1_191, 1, 0); \ + float32x2_t __rev2_191; __rev2_191 = __builtin_shufflevector(__s2_191, __s2_191, 1, 0); \ +float32x2_t __reint_191 = __rev2_191; \ +uint64x1_t __reint1_191 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_191, __p3_191)}; \ + __ret_191 = __noswap_vcmla_rot180_f32(__rev0_191, __rev1_191, *(float32x2_t *) &__reint1_191); \ + __ret_191 = __builtin_shufflevector(__ret_191, __ret_191, 1, 0); \ + __ret_191; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcvt_n_u16_f16(__p0, __p1) __extension__ ({ \ - uint16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - __ret = (uint16x4_t) __builtin_neon_vcvt_n_u16_f16((int8x8_t)__s0, __p1, 17); \ - __ret; \ +#define vcmlaq_rot180_lane_f32(__p0_192, __p1_192, __p2_192, __p3_192) __extension__ ({ \ + float32x4_t __ret_192; \ + float32x4_t __s0_192 = __p0_192; \ + float32x4_t __s1_192 = __p1_192; \ + float32x2_t __s2_192 = __p2_192; \ +float32x2_t __reint_192 = __s2_192; \ +uint64x2_t __reint1_192 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_192, __p3_192), vget_lane_u64(*(uint64x1_t *) &__reint_192, __p3_192)}; \ + __ret_192 = vcmlaq_rot180_f32(__s0_192, __s1_192, *(float32x4_t *) &__reint1_192); \ + __ret_192; \ }) #else -#define vcvt_n_u16_f16(__p0, __p1) __extension__ ({ \ - uint16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (uint16x4_t) __builtin_neon_vcvt_n_u16_f16((int8x8_t)__rev0, __p1, 17); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ +#define vcmlaq_rot180_lane_f32(__p0_193, __p1_193, __p2_193, __p3_193) __extension__ ({ \ + float32x4_t __ret_193; \ + float32x4_t __s0_193 = __p0_193; \ + float32x4_t __s1_193 = __p1_193; \ + float32x2_t __s2_193 = __p2_193; \ + float32x4_t __rev0_193; __rev0_193 = __builtin_shufflevector(__s0_193, __s0_193, 3, 2, 1, 0); \ + float32x4_t __rev1_193; __rev1_193 = __builtin_shufflevector(__s1_193, __s1_193, 3, 2, 1, 0); \ + float32x2_t __rev2_193; __rev2_193 = __builtin_shufflevector(__s2_193, __s2_193, 1, 0); \ +float32x2_t __reint_193 = __rev2_193; \ +uint64x2_t __reint1_193 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_193, __p3_193), vget_lane_u64(*(uint64x1_t *) &__reint_193, __p3_193)}; \ + __ret_193 = __noswap_vcmlaq_rot180_f32(__rev0_193, __rev1_193, *(float32x4_t *) &__reint1_193); \ + __ret_193 = __builtin_shufflevector(__ret_193, __ret_193, 3, 2, 1, 0); \ + __ret_193; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) int16x8_t vcvtq_s16_f16(float16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vcvtq_s16_f16((int8x16_t)__p0, 33); - return __ret; -} +#define vcmla_rot180_laneq_f32(__p0_194, __p1_194, __p2_194, __p3_194) __extension__ ({ \ + float32x2_t __ret_194; \ + float32x2_t __s0_194 = __p0_194; \ + float32x2_t __s1_194 = __p1_194; \ + float32x4_t __s2_194 = __p2_194; \ +float32x4_t __reint_194 = __s2_194; \ +uint64x1_t __reint1_194 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_194, __p3_194)}; \ + __ret_194 = vcmla_rot180_f32(__s0_194, __s1_194, *(float32x2_t *) &__reint1_194); \ + __ret_194; \ +}) #else -__ai __attribute__((target("fullfp16"))) int16x8_t vcvtq_s16_f16(float16x8_t __p0) { - int16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vcvtq_s16_f16((int8x16_t)__rev0, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vcmla_rot180_laneq_f32(__p0_195, __p1_195, __p2_195, __p3_195) __extension__ ({ \ + float32x2_t __ret_195; \ + float32x2_t __s0_195 = __p0_195; \ + float32x2_t __s1_195 = __p1_195; \ + float32x4_t __s2_195 = __p2_195; \ + float32x2_t __rev0_195; __rev0_195 = __builtin_shufflevector(__s0_195, __s0_195, 1, 0); \ + float32x2_t __rev1_195; __rev1_195 = __builtin_shufflevector(__s1_195, __s1_195, 1, 0); \ + float32x4_t __rev2_195; __rev2_195 = __builtin_shufflevector(__s2_195, __s2_195, 3, 2, 1, 0); \ +float32x4_t __reint_195 = __rev2_195; \ +uint64x1_t __reint1_195 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_195, __p3_195)}; \ + __ret_195 = __noswap_vcmla_rot180_f32(__rev0_195, __rev1_195, *(float32x2_t *) &__reint1_195); \ + __ret_195 = __builtin_shufflevector(__ret_195, __ret_195, 1, 0); \ + __ret_195; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) int16x4_t vcvt_s16_f16(float16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vcvt_s16_f16((int8x8_t)__p0, 1); +#define vcmlaq_rot180_laneq_f32(__p0_196, __p1_196, __p2_196, __p3_196) __extension__ ({ \ + float32x4_t __ret_196; \ + float32x4_t __s0_196 = __p0_196; \ + float32x4_t __s1_196 = __p1_196; \ + float32x4_t __s2_196 = __p2_196; \ +float32x4_t __reint_196 = __s2_196; \ +uint64x2_t __reint1_196 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_196, __p3_196), vgetq_lane_u64(*(uint64x2_t *) &__reint_196, __p3_196)}; \ + __ret_196 = vcmlaq_rot180_f32(__s0_196, __s1_196, *(float32x4_t *) &__reint1_196); \ + __ret_196; \ +}) +#else +#define vcmlaq_rot180_laneq_f32(__p0_197, __p1_197, __p2_197, __p3_197) __extension__ ({ \ + float32x4_t __ret_197; \ + float32x4_t __s0_197 = __p0_197; \ + float32x4_t __s1_197 = __p1_197; \ + float32x4_t __s2_197 = __p2_197; \ + float32x4_t __rev0_197; __rev0_197 = __builtin_shufflevector(__s0_197, __s0_197, 3, 2, 1, 0); \ + float32x4_t __rev1_197; __rev1_197 = __builtin_shufflevector(__s1_197, __s1_197, 3, 2, 1, 0); \ + float32x4_t __rev2_197; __rev2_197 = __builtin_shufflevector(__s2_197, __s2_197, 3, 2, 1, 0); \ +float32x4_t __reint_197 = __rev2_197; \ +uint64x2_t __reint1_197 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_197, __p3_197), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_197, __p3_197)}; \ + __ret_197 = __noswap_vcmlaq_rot180_f32(__rev0_197, __rev1_197, *(float32x4_t *) &__reint1_197); \ + __ret_197 = __builtin_shufflevector(__ret_197, __ret_197, 3, 2, 1, 0); \ + __ret_197; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,neon"))) float32x4_t vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot270_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); return __ret; } #else -__ai __attribute__((target("fullfp16"))) int16x4_t vcvt_s16_f16(float16x4_t __p0) { - int16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vcvt_s16_f16((int8x8_t)__rev0, 1); +__ai __attribute__((target("v8.3a,neon"))) float32x4_t vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot270_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } +__ai __attribute__((target("v8.3a,neon"))) float32x4_t __noswap_vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot270_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x8_t vcvtq_u16_f16(float16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcvtq_u16_f16((int8x16_t)__p0, 49); +__ai __attribute__((target("v8.3a,neon"))) float32x2_t vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_rot270_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); return __ret; } #else -__ai __attribute__((target("fullfp16"))) uint16x8_t vcvtq_u16_f16(float16x8_t __p0) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vcvtq_u16_f16((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("v8.3a,neon"))) float32x2_t vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vcmla_rot270_f32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,neon"))) float32x2_t __noswap_vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_rot270_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x4_t vcvt_u16_f16(float16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcvt_u16_f16((int8x8_t)__p0, 17); - return __ret; -} +#define vcmla_rot270_lane_f32(__p0_198, __p1_198, __p2_198, __p3_198) __extension__ ({ \ + float32x2_t __ret_198; \ + float32x2_t __s0_198 = __p0_198; \ + float32x2_t __s1_198 = __p1_198; \ + float32x2_t __s2_198 = __p2_198; \ +float32x2_t __reint_198 = __s2_198; \ +uint64x1_t __reint1_198 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_198, __p3_198)}; \ + __ret_198 = vcmla_rot270_f32(__s0_198, __s1_198, *(float32x2_t *) &__reint1_198); \ + __ret_198; \ +}) #else -__ai __attribute__((target("fullfp16"))) uint16x4_t vcvt_u16_f16(float16x4_t __p0) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vcvt_u16_f16((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vcmla_rot270_lane_f32(__p0_199, __p1_199, __p2_199, __p3_199) __extension__ ({ \ + float32x2_t __ret_199; \ + float32x2_t __s0_199 = __p0_199; \ + float32x2_t __s1_199 = __p1_199; \ + float32x2_t __s2_199 = __p2_199; \ + float32x2_t __rev0_199; __rev0_199 = __builtin_shufflevector(__s0_199, __s0_199, 1, 0); \ + float32x2_t __rev1_199; __rev1_199 = __builtin_shufflevector(__s1_199, __s1_199, 1, 0); \ + float32x2_t __rev2_199; __rev2_199 = __builtin_shufflevector(__s2_199, __s2_199, 1, 0); \ +float32x2_t __reint_199 = __rev2_199; \ +uint64x1_t __reint1_199 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_199, __p3_199)}; \ + __ret_199 = __noswap_vcmla_rot270_f32(__rev0_199, __rev1_199, *(float32x2_t *) &__reint1_199); \ + __ret_199 = __builtin_shufflevector(__ret_199, __ret_199, 1, 0); \ + __ret_199; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) int16x8_t vcvtaq_s16_f16(float16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vcvtaq_s16_f16((int8x16_t)__p0, 33); - return __ret; -} +#define vcmlaq_rot270_lane_f32(__p0_200, __p1_200, __p2_200, __p3_200) __extension__ ({ \ + float32x4_t __ret_200; \ + float32x4_t __s0_200 = __p0_200; \ + float32x4_t __s1_200 = __p1_200; \ + float32x2_t __s2_200 = __p2_200; \ +float32x2_t __reint_200 = __s2_200; \ +uint64x2_t __reint1_200 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_200, __p3_200), vget_lane_u64(*(uint64x1_t *) &__reint_200, __p3_200)}; \ + __ret_200 = vcmlaq_rot270_f32(__s0_200, __s1_200, *(float32x4_t *) &__reint1_200); \ + __ret_200; \ +}) #else -__ai __attribute__((target("fullfp16"))) int16x8_t vcvtaq_s16_f16(float16x8_t __p0) { - int16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vcvtaq_s16_f16((int8x16_t)__rev0, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vcmlaq_rot270_lane_f32(__p0_201, __p1_201, __p2_201, __p3_201) __extension__ ({ \ + float32x4_t __ret_201; \ + float32x4_t __s0_201 = __p0_201; \ + float32x4_t __s1_201 = __p1_201; \ + float32x2_t __s2_201 = __p2_201; \ + float32x4_t __rev0_201; __rev0_201 = __builtin_shufflevector(__s0_201, __s0_201, 3, 2, 1, 0); \ + float32x4_t __rev1_201; __rev1_201 = __builtin_shufflevector(__s1_201, __s1_201, 3, 2, 1, 0); \ + float32x2_t __rev2_201; __rev2_201 = __builtin_shufflevector(__s2_201, __s2_201, 1, 0); \ +float32x2_t __reint_201 = __rev2_201; \ +uint64x2_t __reint1_201 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_201, __p3_201), vget_lane_u64(*(uint64x1_t *) &__reint_201, __p3_201)}; \ + __ret_201 = __noswap_vcmlaq_rot270_f32(__rev0_201, __rev1_201, *(float32x4_t *) &__reint1_201); \ + __ret_201 = __builtin_shufflevector(__ret_201, __ret_201, 3, 2, 1, 0); \ + __ret_201; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) int16x4_t vcvta_s16_f16(float16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vcvta_s16_f16((int8x8_t)__p0, 1); - return __ret; -} +#define vcmla_rot270_laneq_f32(__p0_202, __p1_202, __p2_202, __p3_202) __extension__ ({ \ + float32x2_t __ret_202; \ + float32x2_t __s0_202 = __p0_202; \ + float32x2_t __s1_202 = __p1_202; \ + float32x4_t __s2_202 = __p2_202; \ +float32x4_t __reint_202 = __s2_202; \ +uint64x1_t __reint1_202 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_202, __p3_202)}; \ + __ret_202 = vcmla_rot270_f32(__s0_202, __s1_202, *(float32x2_t *) &__reint1_202); \ + __ret_202; \ +}) #else -__ai __attribute__((target("fullfp16"))) int16x4_t vcvta_s16_f16(float16x4_t __p0) { - int16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vcvta_s16_f16((int8x8_t)__rev0, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vcmla_rot270_laneq_f32(__p0_203, __p1_203, __p2_203, __p3_203) __extension__ ({ \ + float32x2_t __ret_203; \ + float32x2_t __s0_203 = __p0_203; \ + float32x2_t __s1_203 = __p1_203; \ + float32x4_t __s2_203 = __p2_203; \ + float32x2_t __rev0_203; __rev0_203 = __builtin_shufflevector(__s0_203, __s0_203, 1, 0); \ + float32x2_t __rev1_203; __rev1_203 = __builtin_shufflevector(__s1_203, __s1_203, 1, 0); \ + float32x4_t __rev2_203; __rev2_203 = __builtin_shufflevector(__s2_203, __s2_203, 3, 2, 1, 0); \ +float32x4_t __reint_203 = __rev2_203; \ +uint64x1_t __reint1_203 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_203, __p3_203)}; \ + __ret_203 = __noswap_vcmla_rot270_f32(__rev0_203, __rev1_203, *(float32x2_t *) &__reint1_203); \ + __ret_203 = __builtin_shufflevector(__ret_203, __ret_203, 1, 0); \ + __ret_203; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x8_t vcvtaq_u16_f16(float16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcvtaq_u16_f16((int8x16_t)__p0, 49); - return __ret; -} +#define vcmlaq_rot270_laneq_f32(__p0_204, __p1_204, __p2_204, __p3_204) __extension__ ({ \ + float32x4_t __ret_204; \ + float32x4_t __s0_204 = __p0_204; \ + float32x4_t __s1_204 = __p1_204; \ + float32x4_t __s2_204 = __p2_204; \ +float32x4_t __reint_204 = __s2_204; \ +uint64x2_t __reint1_204 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_204, __p3_204), vgetq_lane_u64(*(uint64x2_t *) &__reint_204, __p3_204)}; \ + __ret_204 = vcmlaq_rot270_f32(__s0_204, __s1_204, *(float32x4_t *) &__reint1_204); \ + __ret_204; \ +}) #else -__ai __attribute__((target("fullfp16"))) uint16x8_t vcvtaq_u16_f16(float16x8_t __p0) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vcvtaq_u16_f16((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vcmlaq_rot270_laneq_f32(__p0_205, __p1_205, __p2_205, __p3_205) __extension__ ({ \ + float32x4_t __ret_205; \ + float32x4_t __s0_205 = __p0_205; \ + float32x4_t __s1_205 = __p1_205; \ + float32x4_t __s2_205 = __p2_205; \ + float32x4_t __rev0_205; __rev0_205 = __builtin_shufflevector(__s0_205, __s0_205, 3, 2, 1, 0); \ + float32x4_t __rev1_205; __rev1_205 = __builtin_shufflevector(__s1_205, __s1_205, 3, 2, 1, 0); \ + float32x4_t __rev2_205; __rev2_205 = __builtin_shufflevector(__s2_205, __s2_205, 3, 2, 1, 0); \ +float32x4_t __reint_205 = __rev2_205; \ +uint64x2_t __reint1_205 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_205, __p3_205), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_205, __p3_205)}; \ + __ret_205 = __noswap_vcmlaq_rot270_f32(__rev0_205, __rev1_205, *(float32x4_t *) &__reint1_205); \ + __ret_205 = __builtin_shufflevector(__ret_205, __ret_205, 3, 2, 1, 0); \ + __ret_205; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x4_t vcvta_u16_f16(float16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcvta_u16_f16((int8x8_t)__p0, 17); +__ai __attribute__((target("v8.3a,neon"))) float32x4_t vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot90_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); return __ret; } #else -__ai __attribute__((target("fullfp16"))) uint16x4_t vcvta_u16_f16(float16x4_t __p0) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vcvta_u16_f16((int8x8_t)__rev0, 17); +__ai __attribute__((target("v8.3a,neon"))) float32x4_t vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot90_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } +__ai __attribute__((target("v8.3a,neon"))) float32x4_t __noswap_vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcmlaq_rot90_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) int16x8_t vcvtmq_s16_f16(float16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vcvtmq_s16_f16((int8x16_t)__p0, 33); +__ai __attribute__((target("v8.3a,neon"))) float32x2_t vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_rot90_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); return __ret; } #else -__ai __attribute__((target("fullfp16"))) int16x8_t vcvtmq_s16_f16(float16x8_t __p0) { - int16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vcvtmq_s16_f16((int8x16_t)__rev0, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("v8.3a,neon"))) float32x2_t vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vcmla_rot90_f32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("v8.3a,neon"))) float32x2_t __noswap_vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcmla_rot90_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) int16x4_t vcvtm_s16_f16(float16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vcvtm_s16_f16((int8x8_t)__p0, 1); - return __ret; -} +#define vcmla_rot90_lane_f32(__p0_206, __p1_206, __p2_206, __p3_206) __extension__ ({ \ + float32x2_t __ret_206; \ + float32x2_t __s0_206 = __p0_206; \ + float32x2_t __s1_206 = __p1_206; \ + float32x2_t __s2_206 = __p2_206; \ +float32x2_t __reint_206 = __s2_206; \ +uint64x1_t __reint1_206 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_206, __p3_206)}; \ + __ret_206 = vcmla_rot90_f32(__s0_206, __s1_206, *(float32x2_t *) &__reint1_206); \ + __ret_206; \ +}) #else -__ai __attribute__((target("fullfp16"))) int16x4_t vcvtm_s16_f16(float16x4_t __p0) { - int16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vcvtm_s16_f16((int8x8_t)__rev0, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vcmla_rot90_lane_f32(__p0_207, __p1_207, __p2_207, __p3_207) __extension__ ({ \ + float32x2_t __ret_207; \ + float32x2_t __s0_207 = __p0_207; \ + float32x2_t __s1_207 = __p1_207; \ + float32x2_t __s2_207 = __p2_207; \ + float32x2_t __rev0_207; __rev0_207 = __builtin_shufflevector(__s0_207, __s0_207, 1, 0); \ + float32x2_t __rev1_207; __rev1_207 = __builtin_shufflevector(__s1_207, __s1_207, 1, 0); \ + float32x2_t __rev2_207; __rev2_207 = __builtin_shufflevector(__s2_207, __s2_207, 1, 0); \ +float32x2_t __reint_207 = __rev2_207; \ +uint64x1_t __reint1_207 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_207, __p3_207)}; \ + __ret_207 = __noswap_vcmla_rot90_f32(__rev0_207, __rev1_207, *(float32x2_t *) &__reint1_207); \ + __ret_207 = __builtin_shufflevector(__ret_207, __ret_207, 1, 0); \ + __ret_207; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x8_t vcvtmq_u16_f16(float16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcvtmq_u16_f16((int8x16_t)__p0, 49); - return __ret; -} +#define vcmlaq_rot90_lane_f32(__p0_208, __p1_208, __p2_208, __p3_208) __extension__ ({ \ + float32x4_t __ret_208; \ + float32x4_t __s0_208 = __p0_208; \ + float32x4_t __s1_208 = __p1_208; \ + float32x2_t __s2_208 = __p2_208; \ +float32x2_t __reint_208 = __s2_208; \ +uint64x2_t __reint1_208 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_208, __p3_208), vget_lane_u64(*(uint64x1_t *) &__reint_208, __p3_208)}; \ + __ret_208 = vcmlaq_rot90_f32(__s0_208, __s1_208, *(float32x4_t *) &__reint1_208); \ + __ret_208; \ +}) #else -__ai __attribute__((target("fullfp16"))) uint16x8_t vcvtmq_u16_f16(float16x8_t __p0) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vcvtmq_u16_f16((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vcmlaq_rot90_lane_f32(__p0_209, __p1_209, __p2_209, __p3_209) __extension__ ({ \ + float32x4_t __ret_209; \ + float32x4_t __s0_209 = __p0_209; \ + float32x4_t __s1_209 = __p1_209; \ + float32x2_t __s2_209 = __p2_209; \ + float32x4_t __rev0_209; __rev0_209 = __builtin_shufflevector(__s0_209, __s0_209, 3, 2, 1, 0); \ + float32x4_t __rev1_209; __rev1_209 = __builtin_shufflevector(__s1_209, __s1_209, 3, 2, 1, 0); \ + float32x2_t __rev2_209; __rev2_209 = __builtin_shufflevector(__s2_209, __s2_209, 1, 0); \ +float32x2_t __reint_209 = __rev2_209; \ +uint64x2_t __reint1_209 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_209, __p3_209), vget_lane_u64(*(uint64x1_t *) &__reint_209, __p3_209)}; \ + __ret_209 = __noswap_vcmlaq_rot90_f32(__rev0_209, __rev1_209, *(float32x4_t *) &__reint1_209); \ + __ret_209 = __builtin_shufflevector(__ret_209, __ret_209, 3, 2, 1, 0); \ + __ret_209; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x4_t vcvtm_u16_f16(float16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcvtm_u16_f16((int8x8_t)__p0, 17); - return __ret; -} +#define vcmla_rot90_laneq_f32(__p0_210, __p1_210, __p2_210, __p3_210) __extension__ ({ \ + float32x2_t __ret_210; \ + float32x2_t __s0_210 = __p0_210; \ + float32x2_t __s1_210 = __p1_210; \ + float32x4_t __s2_210 = __p2_210; \ +float32x4_t __reint_210 = __s2_210; \ +uint64x1_t __reint1_210 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_210, __p3_210)}; \ + __ret_210 = vcmla_rot90_f32(__s0_210, __s1_210, *(float32x2_t *) &__reint1_210); \ + __ret_210; \ +}) #else -__ai __attribute__((target("fullfp16"))) uint16x4_t vcvtm_u16_f16(float16x4_t __p0) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vcvtm_u16_f16((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vcmla_rot90_laneq_f32(__p0_211, __p1_211, __p2_211, __p3_211) __extension__ ({ \ + float32x2_t __ret_211; \ + float32x2_t __s0_211 = __p0_211; \ + float32x2_t __s1_211 = __p1_211; \ + float32x4_t __s2_211 = __p2_211; \ + float32x2_t __rev0_211; __rev0_211 = __builtin_shufflevector(__s0_211, __s0_211, 1, 0); \ + float32x2_t __rev1_211; __rev1_211 = __builtin_shufflevector(__s1_211, __s1_211, 1, 0); \ + float32x4_t __rev2_211; __rev2_211 = __builtin_shufflevector(__s2_211, __s2_211, 3, 2, 1, 0); \ +float32x4_t __reint_211 = __rev2_211; \ +uint64x1_t __reint1_211 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_211, __p3_211)}; \ + __ret_211 = __noswap_vcmla_rot90_f32(__rev0_211, __rev1_211, *(float32x2_t *) &__reint1_211); \ + __ret_211 = __builtin_shufflevector(__ret_211, __ret_211, 1, 0); \ + __ret_211; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) int16x8_t vcvtnq_s16_f16(float16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vcvtnq_s16_f16((int8x16_t)__p0, 33); - return __ret; -} +#define vcmlaq_rot90_laneq_f32(__p0_212, __p1_212, __p2_212, __p3_212) __extension__ ({ \ + float32x4_t __ret_212; \ + float32x4_t __s0_212 = __p0_212; \ + float32x4_t __s1_212 = __p1_212; \ + float32x4_t __s2_212 = __p2_212; \ +float32x4_t __reint_212 = __s2_212; \ +uint64x2_t __reint1_212 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_212, __p3_212), vgetq_lane_u64(*(uint64x2_t *) &__reint_212, __p3_212)}; \ + __ret_212 = vcmlaq_rot90_f32(__s0_212, __s1_212, *(float32x4_t *) &__reint1_212); \ + __ret_212; \ +}) #else -__ai __attribute__((target("fullfp16"))) int16x8_t vcvtnq_s16_f16(float16x8_t __p0) { - int16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vcvtnq_s16_f16((int8x16_t)__rev0, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vcmlaq_rot90_laneq_f32(__p0_213, __p1_213, __p2_213, __p3_213) __extension__ ({ \ + float32x4_t __ret_213; \ + float32x4_t __s0_213 = __p0_213; \ + float32x4_t __s1_213 = __p1_213; \ + float32x4_t __s2_213 = __p2_213; \ + float32x4_t __rev0_213; __rev0_213 = __builtin_shufflevector(__s0_213, __s0_213, 3, 2, 1, 0); \ + float32x4_t __rev1_213; __rev1_213 = __builtin_shufflevector(__s1_213, __s1_213, 3, 2, 1, 0); \ + float32x4_t __rev2_213; __rev2_213 = __builtin_shufflevector(__s2_213, __s2_213, 3, 2, 1, 0); \ +float32x4_t __reint_213 = __rev2_213; \ +uint64x2_t __reint1_213 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_213, __p3_213), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_213, __p3_213)}; \ + __ret_213 = __noswap_vcmlaq_rot90_f32(__rev0_213, __rev1_213, *(float32x4_t *) &__reint1_213); \ + __ret_213 = __builtin_shufflevector(__ret_213, __ret_213, 3, 2, 1, 0); \ + __ret_213; \ +}) #endif +#if !defined(__aarch64__) && !defined(__arm64ec__) #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) int16x4_t vcvtn_s16_f16(float16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vcvtn_s16_f16((int8x8_t)__p0, 1); +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t __a32_vcvt_bf16_f32(float32x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_f32((int8x16_t)__p0, 11); return __ret; } #else -__ai __attribute__((target("fullfp16"))) int16x4_t vcvtn_s16_f16(float16x4_t __p0) { - int16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vcvtn_s16_f16((int8x8_t)__rev0, 1); +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t __a32_vcvt_bf16_f32(float32x4_t __p0) { + bfloat16x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_f32((int8x16_t)__rev0, 11); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t __noswap___a32_vcvt_bf16_f32(float32x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_f32((int8x16_t)__p0, 11); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x8_t vcvtnq_u16_f16(float16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcvtnq_u16_f16((int8x16_t)__p0, 49); +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) { + bfloat16x4_t __ret; + __ret = __a32_vcvt_bf16_f32(__p0); return __ret; } #else -__ai __attribute__((target("fullfp16"))) uint16x8_t vcvtnq_u16_f16(float16x8_t __p0) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vcvtnq_u16_f16((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) { + bfloat16x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap___a32_vcvt_bf16_f32(__rev0); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x4_t vcvtn_u16_f16(float16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcvtn_u16_f16((int8x8_t)__p0, 17); +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) { + bfloat16x8_t __ret; + __ret = vcombine_bf16(__a32_vcvt_bf16_f32(__p1), vget_low_bf16(__p0)); return __ret; } #else -__ai __attribute__((target("fullfp16"))) uint16x4_t vcvtn_u16_f16(float16x4_t __p0) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vcvtn_u16_f16((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) { + bfloat16x8_t __ret; + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vcombine_bf16(__noswap___a32_vcvt_bf16_f32(__rev1), __noswap_vget_low_bf16(__rev0)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) int16x8_t vcvtpq_s16_f16(float16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vcvtpq_s16_f16((int8x16_t)__p0, 33); +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) { + bfloat16x8_t __ret; + __ret = vcombine_bf16((bfloat16x4_t)(0ULL), __a32_vcvt_bf16_f32(__p0)); return __ret; } #else -__ai __attribute__((target("fullfp16"))) int16x8_t vcvtpq_s16_f16(float16x8_t __p0) { - int16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vcvtpq_s16_f16((int8x16_t)__rev0, 33); +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) { + bfloat16x8_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap_vcombine_bf16((bfloat16x4_t)(0ULL), __noswap___a32_vcvt_bf16_f32(__rev0)); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) int16x4_t vcvtp_s16_f16(float16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vcvtp_s16_f16((int8x8_t)__p0, 1); +__ai __attribute__((target("bf16,neon"))) poly8x8_t vreinterpret_p8_bf16(bfloat16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); return __ret; } -#else -__ai __attribute__((target("fullfp16"))) int16x4_t vcvtp_s16_f16(float16x4_t __p0) { - int16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vcvtp_s16_f16((int8x8_t)__rev0, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("bf16,neon"))) poly64x1_t vreinterpret_p64_bf16(bfloat16x4_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x8_t vcvtpq_u16_f16(float16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcvtpq_u16_f16((int8x16_t)__p0, 49); +__ai __attribute__((target("bf16,neon"))) poly16x4_t vreinterpret_p16_bf16(bfloat16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); return __ret; } -#else -__ai __attribute__((target("fullfp16"))) uint16x8_t vcvtpq_u16_f16(float16x8_t __p0) { - uint16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vcvtpq_u16_f16((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("bf16,neon"))) poly8x16_t vreinterpretq_p8_bf16(bfloat16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) uint16x4_t vcvtp_u16_f16(float16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcvtp_u16_f16((int8x8_t)__p0, 17); +__ai __attribute__((target("bf16,neon"))) poly64x2_t vreinterpretq_p64_bf16(bfloat16x8_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); return __ret; } -#else -__ai __attribute__((target("fullfp16"))) uint16x4_t vcvtp_u16_f16(float16x4_t __p0) { - uint16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vcvtp_u16_f16((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("bf16,neon"))) poly16x8_t vreinterpretq_p16_bf16(bfloat16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vextq_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - __ret = (float16x8_t) __builtin_neon_vextq_f16((int8x16_t)__s0, (int8x16_t)__s1, __p2, 40); \ - __ret; \ -}) -#else -#define vextq_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (float16x8_t) __builtin_neon_vextq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 40); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vext_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - __ret = (float16x4_t) __builtin_neon_vext_f16((int8x8_t)__s0, (int8x8_t)__s1, __p2, 8); \ - __ret; \ -}) -#else -#define vext_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (float16x4_t) __builtin_neon_vext_f16((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 8); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vfmaq_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); +__ai __attribute__((target("bf16,neon"))) uint8x16_t vreinterpretq_u8_bf16(bfloat16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); return __ret; } -#else -__ai __attribute__((target("fullfp16"))) float16x8_t vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vfmaq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("bf16,neon"))) uint32x4_t vreinterpretq_u32_bf16(bfloat16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); return __ret; } -__ai __attribute__((target("fullfp16"))) float16x8_t __noswap_vfmaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vfmaq_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); +__ai __attribute__((target("bf16,neon"))) uint64x2_t vreinterpretq_u64_bf16(bfloat16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vfma_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); +__ai __attribute__((target("bf16,neon"))) uint16x8_t vreinterpretq_u16_bf16(bfloat16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); return __ret; } -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vfma_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("bf16,neon"))) int8x16_t vreinterpretq_s8_bf16(bfloat16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); return __ret; } -__ai __attribute__((target("fullfp16"))) float16x4_t __noswap_vfma_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vfma_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); +__ai __attribute__((target("bf16,neon"))) float32x4_t vreinterpretq_f32_bf16(bfloat16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vfmsq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { +__ai __attribute__((target("bf16,neon"))) float16x8_t vreinterpretq_f16_bf16(bfloat16x8_t __p0) { float16x8_t __ret; - __ret = vfmaq_f16(__p0, -__p1, __p2); + __ret = (float16x8_t)(__p0); return __ret; } -#else -__ai __attribute__((target("fullfp16"))) float16x8_t vfmsq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __noswap_vfmaq_f16(__rev0, -__rev1, __rev2); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("bf16,neon"))) int32x4_t vreinterpretq_s32_bf16(bfloat16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vfms_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - __ret = vfma_f16(__p0, -__p1, __p2); +__ai __attribute__((target("bf16,neon"))) int64x2_t vreinterpretq_s64_bf16(bfloat16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); return __ret; } -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vfms_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = __noswap_vfma_f16(__rev0, -__rev1, __rev2); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("bf16,neon"))) int16x8_t vreinterpretq_s16_bf16(bfloat16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vmaxq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vmaxq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); +__ai __attribute__((target("bf16,neon"))) uint8x8_t vreinterpret_u8_bf16(bfloat16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); return __ret; } -#else -__ai __attribute__((target("fullfp16"))) float16x8_t vmaxq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vmaxq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("bf16,neon"))) uint32x2_t vreinterpret_u32_bf16(bfloat16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vmax_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vmax_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); +__ai __attribute__((target("bf16,neon"))) uint64x1_t vreinterpret_u64_bf16(bfloat16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); return __ret; } -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vmax_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vmax_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("bf16,neon"))) uint16x4_t vreinterpret_u16_bf16(bfloat16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vminq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vminq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); +__ai __attribute__((target("bf16,neon"))) int8x8_t vreinterpret_s8_bf16(bfloat16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); return __ret; } -#else -__ai __attribute__((target("fullfp16"))) float16x8_t vminq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vminq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("bf16,neon"))) float32x2_t vreinterpret_f32_bf16(bfloat16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vmin_f16(float16x4_t __p0, float16x4_t __p1) { +__ai __attribute__((target("bf16,neon"))) float16x4_t vreinterpret_f16_bf16(bfloat16x4_t __p0) { float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vmin_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); + __ret = (float16x4_t)(__p0); return __ret; } -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vmin_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vmin_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("bf16,neon"))) int32x2_t vreinterpret_s32_bf16(bfloat16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vmulq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = __p0 * __p1; +__ai __attribute__((target("bf16,neon"))) int64x1_t vreinterpret_s64_bf16(bfloat16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); return __ret; } -#else -__ai __attribute__((target("fullfp16"))) float16x8_t vmulq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 * __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("bf16,neon"))) int16x4_t vreinterpret_s16_bf16(bfloat16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vmul_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = __p0 * __p1; +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_p8(poly8x16_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); return __ret; } -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vmul_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 * __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_p64(poly64x2_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulq_lane_f16(__p0_153, __p1_153, __p2_153) __extension__ ({ \ - float16x8_t __ret_153; \ - float16x8_t __s0_153 = __p0_153; \ - float16x4_t __s1_153 = __p1_153; \ - __ret_153 = __s0_153 * splatq_lane_f16(__s1_153, __p2_153); \ - __ret_153; \ -}) -#else -#define vmulq_lane_f16(__p0_154, __p1_154, __p2_154) __extension__ ({ \ - float16x8_t __ret_154; \ - float16x8_t __s0_154 = __p0_154; \ - float16x4_t __s1_154 = __p1_154; \ - float16x8_t __rev0_154; __rev0_154 = __builtin_shufflevector(__s0_154, __s0_154, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x4_t __rev1_154; __rev1_154 = __builtin_shufflevector(__s1_154, __s1_154, 3, 2, 1, 0); \ - __ret_154 = __rev0_154 * __noswap_splatq_lane_f16(__rev1_154, __p2_154); \ - __ret_154 = __builtin_shufflevector(__ret_154, __ret_154, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_154; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmul_lane_f16(__p0_155, __p1_155, __p2_155) __extension__ ({ \ - float16x4_t __ret_155; \ - float16x4_t __s0_155 = __p0_155; \ - float16x4_t __s1_155 = __p1_155; \ - __ret_155 = __s0_155 * splat_lane_f16(__s1_155, __p2_155); \ - __ret_155; \ -}) -#else -#define vmul_lane_f16(__p0_156, __p1_156, __p2_156) __extension__ ({ \ - float16x4_t __ret_156; \ - float16x4_t __s0_156 = __p0_156; \ - float16x4_t __s1_156 = __p1_156; \ - float16x4_t __rev0_156; __rev0_156 = __builtin_shufflevector(__s0_156, __s0_156, 3, 2, 1, 0); \ - float16x4_t __rev1_156; __rev1_156 = __builtin_shufflevector(__s1_156, __s1_156, 3, 2, 1, 0); \ - __ret_156 = __rev0_156 * __noswap_splat_lane_f16(__rev1_156, __p2_156); \ - __ret_156 = __builtin_shufflevector(__ret_156, __ret_156, 3, 2, 1, 0); \ - __ret_156; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulq_n_f16(__p0, __p1) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16_t __s1 = __p1; \ - __ret = __s0 * (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}; \ - __ret; \ -}) -#else -#define vmulq_n_f16(__p0, __p1) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16_t __s1 = __p1; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = __rev0 * (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}; \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmul_n_f16(__p0, __p1) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16_t __s1 = __p1; \ - __ret = __s0 * (float16x4_t) {__s1, __s1, __s1, __s1}; \ - __ret; \ -}) -#else -#define vmul_n_f16(__p0, __p1) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16_t __s1 = __p1; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = __rev0 * (float16x4_t) {__s1, __s1, __s1, __s1}; \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vnegq_f16(float16x8_t __p0) { - float16x8_t __ret; - __ret = -__p0; +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_p16(poly16x8_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); return __ret; } -#else -__ai __attribute__((target("fullfp16"))) float16x8_t vnegq_f16(float16x8_t __p0) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = -__rev0; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_u8(uint8x16_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vneg_f16(float16x4_t __p0) { - float16x4_t __ret; - __ret = -__p0; +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_u32(uint32x4_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); return __ret; } -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vneg_f16(float16x4_t __p0) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = -__rev0; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_u64(uint64x2_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vpadd_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vpadd_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_u16(uint16x8_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); return __ret; } -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vpadd_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vpadd_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_s8(int8x16_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vpmax_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vpmax_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_f32(float32x4_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); return __ret; } -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vpmax_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vpmax_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_f16(float16x8_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vpmin_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vpmin_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_s32(int32x4_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); return __ret; } -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vpmin_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vpmin_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_s64(int64x2_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vrecpeq_f16(float16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrecpeq_f16((int8x16_t)__p0, 40); +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_s16(int16x8_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); return __ret; } -#else -__ai __attribute__((target("fullfp16"))) float16x8_t vrecpeq_f16(float16x8_t __p0) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vrecpeq_f16((int8x16_t)__rev0, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_p8(poly8x8_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vrecpe_f16(float16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrecpe_f16((int8x8_t)__p0, 8); +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_p64(poly64x1_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); return __ret; } -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vrecpe_f16(float16x4_t __p0) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vrecpe_f16((int8x8_t)__rev0, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_p16(poly16x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vrecpsq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrecpsq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_u8(uint8x8_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); return __ret; } -#else -__ai __attribute__((target("fullfp16"))) float16x8_t vrecpsq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vrecpsq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_u32(uint32x2_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vrecps_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrecps_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_u64(uint64x1_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); return __ret; } -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vrecps_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vrecps_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_u16(uint16x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vrev64q_f16(float16x8_t __p0) { - float16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0, 7, 6, 5, 4); +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_s8(int8x8_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); return __ret; } -#else -__ai __attribute__((target("fullfp16"))) float16x8_t vrev64q_f16(float16x8_t __p0) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0, 7, 6, 5, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_f32(float32x2_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vrev64_f16(float16x4_t __p0) { - float16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_f16(float16x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); return __ret; } -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vrev64_f16(float16x4_t __p0) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_s32(int32x2_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vrsqrteq_f16(float16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrsqrteq_f16((int8x16_t)__p0, 40); +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_s64(int64x1_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); return __ret; } -#else -__ai __attribute__((target("fullfp16"))) float16x8_t vrsqrteq_f16(float16x8_t __p0) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vrsqrteq_f16((int8x16_t)__rev0, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_s16(int16x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); return __ret; } +#ifdef __LITTLE_ENDIAN__ +#define vqdmulhq_lane_s32(__p0_214, __p1_214, __p2_214) __extension__ ({ \ + int32x4_t __ret_214; \ + int32x4_t __s0_214 = __p0_214; \ + int32x2_t __s1_214 = __p1_214; \ + __ret_214 = vqdmulhq_s32(__s0_214, splatq_lane_s32(__s1_214, __p2_214)); \ + __ret_214; \ +}) +#else +#define vqdmulhq_lane_s32(__p0_215, __p1_215, __p2_215) __extension__ ({ \ + int32x4_t __ret_215; \ + int32x4_t __s0_215 = __p0_215; \ + int32x2_t __s1_215 = __p1_215; \ + int32x4_t __rev0_215; __rev0_215 = __builtin_shufflevector(__s0_215, __s0_215, 3, 2, 1, 0); \ + int32x2_t __rev1_215; __rev1_215 = __builtin_shufflevector(__s1_215, __s1_215, 1, 0); \ + __ret_215 = __noswap_vqdmulhq_s32(__rev0_215, __noswap_splatq_lane_s32(__rev1_215, __p2_215)); \ + __ret_215 = __builtin_shufflevector(__ret_215, __ret_215, 3, 2, 1, 0); \ + __ret_215; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vrsqrte_f16(float16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrsqrte_f16((int8x8_t)__p0, 8); - return __ret; -} +#define vqdmulhq_lane_s16(__p0_216, __p1_216, __p2_216) __extension__ ({ \ + int16x8_t __ret_216; \ + int16x8_t __s0_216 = __p0_216; \ + int16x4_t __s1_216 = __p1_216; \ + __ret_216 = vqdmulhq_s16(__s0_216, splatq_lane_s16(__s1_216, __p2_216)); \ + __ret_216; \ +}) #else -__ai __attribute__((target("fullfp16"))) float16x4_t vrsqrte_f16(float16x4_t __p0) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vrsqrte_f16((int8x8_t)__rev0, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vqdmulhq_lane_s16(__p0_217, __p1_217, __p2_217) __extension__ ({ \ + int16x8_t __ret_217; \ + int16x8_t __s0_217 = __p0_217; \ + int16x4_t __s1_217 = __p1_217; \ + int16x8_t __rev0_217; __rev0_217 = __builtin_shufflevector(__s0_217, __s0_217, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev1_217; __rev1_217 = __builtin_shufflevector(__s1_217, __s1_217, 3, 2, 1, 0); \ + __ret_217 = __noswap_vqdmulhq_s16(__rev0_217, __noswap_splatq_lane_s16(__rev1_217, __p2_217)); \ + __ret_217 = __builtin_shufflevector(__ret_217, __ret_217, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_217; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vrsqrtsq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrsqrtsq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} +#define vqdmulh_lane_s32(__p0_218, __p1_218, __p2_218) __extension__ ({ \ + int32x2_t __ret_218; \ + int32x2_t __s0_218 = __p0_218; \ + int32x2_t __s1_218 = __p1_218; \ + __ret_218 = vqdmulh_s32(__s0_218, splat_lane_s32(__s1_218, __p2_218)); \ + __ret_218; \ +}) #else -__ai __attribute__((target("fullfp16"))) float16x8_t vrsqrtsq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vrsqrtsq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vqdmulh_lane_s32(__p0_219, __p1_219, __p2_219) __extension__ ({ \ + int32x2_t __ret_219; \ + int32x2_t __s0_219 = __p0_219; \ + int32x2_t __s1_219 = __p1_219; \ + int32x2_t __rev0_219; __rev0_219 = __builtin_shufflevector(__s0_219, __s0_219, 1, 0); \ + int32x2_t __rev1_219; __rev1_219 = __builtin_shufflevector(__s1_219, __s1_219, 1, 0); \ + __ret_219 = __noswap_vqdmulh_s32(__rev0_219, __noswap_splat_lane_s32(__rev1_219, __p2_219)); \ + __ret_219 = __builtin_shufflevector(__ret_219, __ret_219, 1, 0); \ + __ret_219; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vrsqrts_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrsqrts_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} +#define vqdmulh_lane_s16(__p0_220, __p1_220, __p2_220) __extension__ ({ \ + int16x4_t __ret_220; \ + int16x4_t __s0_220 = __p0_220; \ + int16x4_t __s1_220 = __p1_220; \ + __ret_220 = vqdmulh_s16(__s0_220, splat_lane_s16(__s1_220, __p2_220)); \ + __ret_220; \ +}) #else -__ai __attribute__((target("fullfp16"))) float16x4_t vrsqrts_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vrsqrts_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vqdmulh_lane_s16(__p0_221, __p1_221, __p2_221) __extension__ ({ \ + int16x4_t __ret_221; \ + int16x4_t __s0_221 = __p0_221; \ + int16x4_t __s1_221 = __p1_221; \ + int16x4_t __rev0_221; __rev0_221 = __builtin_shufflevector(__s0_221, __s0_221, 3, 2, 1, 0); \ + int16x4_t __rev1_221; __rev1_221 = __builtin_shufflevector(__s1_221, __s1_221, 3, 2, 1, 0); \ + __ret_221 = __noswap_vqdmulh_s16(__rev0_221, __noswap_splat_lane_s16(__rev1_221, __p2_221)); \ + __ret_221 = __builtin_shufflevector(__ret_221, __ret_221, 3, 2, 1, 0); \ + __ret_221; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vsubq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = __p0 - __p1; - return __ret; -} +#define vqrdmulhq_lane_s32(__p0_222, __p1_222, __p2_222) __extension__ ({ \ + int32x4_t __ret_222; \ + int32x4_t __s0_222 = __p0_222; \ + int32x2_t __s1_222 = __p1_222; \ + __ret_222 = vqrdmulhq_s32(__s0_222, splatq_lane_s32(__s1_222, __p2_222)); \ + __ret_222; \ +}) #else -__ai __attribute__((target("fullfp16"))) float16x8_t vsubq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 - __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vqrdmulhq_lane_s32(__p0_223, __p1_223, __p2_223) __extension__ ({ \ + int32x4_t __ret_223; \ + int32x4_t __s0_223 = __p0_223; \ + int32x2_t __s1_223 = __p1_223; \ + int32x4_t __rev0_223; __rev0_223 = __builtin_shufflevector(__s0_223, __s0_223, 3, 2, 1, 0); \ + int32x2_t __rev1_223; __rev1_223 = __builtin_shufflevector(__s1_223, __s1_223, 1, 0); \ + __ret_223 = __noswap_vqrdmulhq_s32(__rev0_223, __noswap_splatq_lane_s32(__rev1_223, __p2_223)); \ + __ret_223 = __builtin_shufflevector(__ret_223, __ret_223, 3, 2, 1, 0); \ + __ret_223; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vsub_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = __p0 - __p1; - return __ret; -} +#define vqrdmulhq_lane_s16(__p0_224, __p1_224, __p2_224) __extension__ ({ \ + int16x8_t __ret_224; \ + int16x8_t __s0_224 = __p0_224; \ + int16x4_t __s1_224 = __p1_224; \ + __ret_224 = vqrdmulhq_s16(__s0_224, splatq_lane_s16(__s1_224, __p2_224)); \ + __ret_224; \ +}) #else -__ai __attribute__((target("fullfp16"))) float16x4_t vsub_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 - __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vqrdmulhq_lane_s16(__p0_225, __p1_225, __p2_225) __extension__ ({ \ + int16x8_t __ret_225; \ + int16x8_t __s0_225 = __p0_225; \ + int16x4_t __s1_225 = __p1_225; \ + int16x8_t __rev0_225; __rev0_225 = __builtin_shufflevector(__s0_225, __s0_225, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev1_225; __rev1_225 = __builtin_shufflevector(__s1_225, __s1_225, 3, 2, 1, 0); \ + __ret_225 = __noswap_vqrdmulhq_s16(__rev0_225, __noswap_splatq_lane_s16(__rev1_225, __p2_225)); \ + __ret_225 = __builtin_shufflevector(__ret_225, __ret_225, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_225; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8x2_t vtrnq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8x2_t __ret; - __builtin_neon_vtrnq_f16(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} +#define vqrdmulh_lane_s32(__p0_226, __p1_226, __p2_226) __extension__ ({ \ + int32x2_t __ret_226; \ + int32x2_t __s0_226 = __p0_226; \ + int32x2_t __s1_226 = __p1_226; \ + __ret_226 = vqrdmulh_s32(__s0_226, splat_lane_s32(__s1_226, __p2_226)); \ + __ret_226; \ +}) #else -__ai __attribute__((target("fullfp16"))) float16x8x2_t vtrnq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8x2_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __builtin_neon_vtrnq_f16(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vqrdmulh_lane_s32(__p0_227, __p1_227, __p2_227) __extension__ ({ \ + int32x2_t __ret_227; \ + int32x2_t __s0_227 = __p0_227; \ + int32x2_t __s1_227 = __p1_227; \ + int32x2_t __rev0_227; __rev0_227 = __builtin_shufflevector(__s0_227, __s0_227, 1, 0); \ + int32x2_t __rev1_227; __rev1_227 = __builtin_shufflevector(__s1_227, __s1_227, 1, 0); \ + __ret_227 = __noswap_vqrdmulh_s32(__rev0_227, __noswap_splat_lane_s32(__rev1_227, __p2_227)); \ + __ret_227 = __builtin_shufflevector(__ret_227, __ret_227, 1, 0); \ + __ret_227; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4x2_t vtrn_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4x2_t __ret; - __builtin_neon_vtrn_f16(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} +#define vqrdmulh_lane_s16(__p0_228, __p1_228, __p2_228) __extension__ ({ \ + int16x4_t __ret_228; \ + int16x4_t __s0_228 = __p0_228; \ + int16x4_t __s1_228 = __p1_228; \ + __ret_228 = vqrdmulh_s16(__s0_228, splat_lane_s16(__s1_228, __p2_228)); \ + __ret_228; \ +}) #else -__ai __attribute__((target("fullfp16"))) float16x4x2_t vtrn_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4x2_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __builtin_neon_vtrn_f16(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); - return __ret; -} +#define vqrdmulh_lane_s16(__p0_229, __p1_229, __p2_229) __extension__ ({ \ + int16x4_t __ret_229; \ + int16x4_t __s0_229 = __p0_229; \ + int16x4_t __s1_229 = __p1_229; \ + int16x4_t __rev0_229; __rev0_229 = __builtin_shufflevector(__s0_229, __s0_229, 3, 2, 1, 0); \ + int16x4_t __rev1_229; __rev1_229 = __builtin_shufflevector(__s1_229, __s1_229, 3, 2, 1, 0); \ + __ret_229 = __noswap_vqrdmulh_s16(__rev0_229, __noswap_splat_lane_s16(__rev1_229, __p2_229)); \ + __ret_229 = __builtin_shufflevector(__ret_229, __ret_229, 3, 2, 1, 0); \ + __ret_229; \ +}) #endif -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8x2_t vuzpq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8x2_t __ret; - __builtin_neon_vuzpq_f16(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40); +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); return __ret; } -#else -__ai __attribute__((target("fullfp16"))) float16x8x2_t vuzpq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8x2_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __builtin_neon_vuzpq_f16(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4x2_t vuzp_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4x2_t __ret; - __builtin_neon_vuzp_f16(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8); +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); return __ret; } -#else -__ai __attribute__((target("fullfp16"))) float16x4x2_t vuzp_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4x2_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __builtin_neon_vuzp_f16(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8x2_t vzipq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8x2_t __ret; - __builtin_neon_vzipq_f16(&__ret, (int8x16_t)__p0, (int8x16_t)__p1, 40); +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); return __ret; } -#else -__ai __attribute__((target("fullfp16"))) float16x8x2_t vzipq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8x2_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __builtin_neon_vzipq_f16(&__ret, (int8x16_t)__rev0, (int8x16_t)__rev1, 40); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 7, 6, 5, 4, 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4x2_t vzip_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4x2_t __ret; - __builtin_neon_vzip_f16(&__ret, (int8x8_t)__p0, (int8x8_t)__p1, 8); +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); return __ret; } -#else -__ai __attribute__((target("fullfp16"))) float16x4x2_t vzip_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4x2_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __builtin_neon_vzip_f16(&__ret, (int8x8_t)__rev0, (int8x8_t)__rev1, 8); - - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 3, 2, 1, 0); - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 3, 2, 1, 0); +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("i8mm"))) uint32x4_t vmmlaq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vmmlaq_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); return __ret; } -#else -__ai __attribute__((target("i8mm"))) uint32x4_t vmmlaq_u32(uint32x4_t __p0, uint8x16_t __p1, uint8x16_t __p2) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vmmlaq_u32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("i8mm"))) int32x4_t vmmlaq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vmmlaq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); return __ret; } -#else -__ai __attribute__((target("i8mm"))) int32x4_t vmmlaq_s32(int32x4_t __p0, int8x16_t __p1, int8x16_t __p2) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vmmlaq_s32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("i8mm"))) int32x4_t vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vusdotq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); return __ret; } -#else -__ai __attribute__((target("i8mm"))) int32x4_t vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vusdotq_s32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); return __ret; } -__ai __attribute__((target("i8mm"))) int32x4_t __noswap_vusdotq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vusdotq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("i8mm"))) int32x2_t vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vusdot_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); return __ret; } -#else -__ai __attribute__((target("i8mm"))) int32x2_t vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) { - int32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int32x2_t) __builtin_neon_vusdot_s32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); return __ret; } -__ai __attribute__((target("i8mm"))) int32x2_t __noswap_vusdot_s32(int32x2_t __p0, uint8x8_t __p1, int8x8_t __p2) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vusdot_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vusdotq_lane_s32(__p0_157, __p1_157, __p2_157, __p3_157) __extension__ ({ \ - int32x4_t __ret_157; \ - int32x4_t __s0_157 = __p0_157; \ - uint8x16_t __s1_157 = __p1_157; \ - int8x8_t __s2_157 = __p2_157; \ -int8x8_t __reint_157 = __s2_157; \ - __ret_157 = vusdotq_s32(__s0_157, __s1_157, (int8x16_t)(splatq_lane_s32(*(int32x2_t *) &__reint_157, __p3_157))); \ - __ret_157; \ -}) -#else -#define vusdotq_lane_s32(__p0_158, __p1_158, __p2_158, __p3_158) __extension__ ({ \ - int32x4_t __ret_158; \ - int32x4_t __s0_158 = __p0_158; \ - uint8x16_t __s1_158 = __p1_158; \ - int8x8_t __s2_158 = __p2_158; \ - int32x4_t __rev0_158; __rev0_158 = __builtin_shufflevector(__s0_158, __s0_158, 3, 2, 1, 0); \ - uint8x16_t __rev1_158; __rev1_158 = __builtin_shufflevector(__s1_158, __s1_158, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8_t __rev2_158; __rev2_158 = __builtin_shufflevector(__s2_158, __s2_158, 7, 6, 5, 4, 3, 2, 1, 0); \ -int8x8_t __reint_158 = __rev2_158; \ - __ret_158 = __noswap_vusdotq_s32(__rev0_158, __rev1_158, (int8x16_t)(__noswap_splatq_lane_s32(*(int32x2_t *) &__reint_158, __p3_158))); \ - __ret_158 = __builtin_shufflevector(__ret_158, __ret_158, 3, 2, 1, 0); \ - __ret_158; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vusdot_lane_s32(__p0_159, __p1_159, __p2_159, __p3_159) __extension__ ({ \ - int32x2_t __ret_159; \ - int32x2_t __s0_159 = __p0_159; \ - uint8x8_t __s1_159 = __p1_159; \ - int8x8_t __s2_159 = __p2_159; \ -int8x8_t __reint_159 = __s2_159; \ - __ret_159 = vusdot_s32(__s0_159, __s1_159, (int8x8_t)(splat_lane_s32(*(int32x2_t *) &__reint_159, __p3_159))); \ - __ret_159; \ -}) -#else -#define vusdot_lane_s32(__p0_160, __p1_160, __p2_160, __p3_160) __extension__ ({ \ - int32x2_t __ret_160; \ - int32x2_t __s0_160 = __p0_160; \ - uint8x8_t __s1_160 = __p1_160; \ - int8x8_t __s2_160 = __p2_160; \ - int32x2_t __rev0_160; __rev0_160 = __builtin_shufflevector(__s0_160, __s0_160, 1, 0); \ - uint8x8_t __rev1_160; __rev1_160 = __builtin_shufflevector(__s1_160, __s1_160, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8_t __rev2_160; __rev2_160 = __builtin_shufflevector(__s2_160, __s2_160, 7, 6, 5, 4, 3, 2, 1, 0); \ -int8x8_t __reint_160 = __rev2_160; \ - __ret_160 = __noswap_vusdot_s32(__rev0_160, __rev1_160, (int8x8_t)(__noswap_splat_lane_s32(*(int32x2_t *) &__reint_160, __p3_160))); \ - __ret_160 = __builtin_shufflevector(__ret_160, __ret_160, 1, 0); \ - __ret_160; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("i8mm"))) int32x4_t vusmmlaq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vusmmlaq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); return __ret; } -#else -__ai __attribute__((target("i8mm"))) int32x4_t vusmmlaq_s32(int32x4_t __p0, uint8x16_t __p1, int8x16_t __p2) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vusmmlaq_s32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.1a"))) int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqrdmlahq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); return __ret; } -#else -__ai __attribute__((target("v8.1a"))) int32x4_t vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vqrdmlahq_s32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); return __ret; } -__ai __attribute__((target("v8.1a"))) int32x4_t __noswap_vqrdmlahq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqrdmlahq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.1a"))) int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vqrdmlahq_s16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); return __ret; } -#else -__ai __attribute__((target("v8.1a"))) int16x8_t vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int16x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vqrdmlahq_s16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); return __ret; } -__ai __attribute__((target("v8.1a"))) int16x8_t __noswap_vqrdmlahq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vqrdmlahq_s16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.1a"))) int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vqrdmlah_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); return __ret; } -#else -__ai __attribute__((target("v8.1a"))) int32x2_t vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { - int32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = (int32x2_t) __builtin_neon_vqrdmlah_s32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); return __ret; } -__ai __attribute__((target("v8.1a"))) int32x2_t __noswap_vqrdmlah_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vqrdmlah_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.1a"))) int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vqrdmlah_s16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1); +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); return __ret; } -#else -__ai __attribute__((target("v8.1a"))) int16x4_t vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { - int16x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vqrdmlah_s16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); return __ret; } -__ai __attribute__((target("v8.1a"))) int16x4_t __noswap_vqrdmlah_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vqrdmlah_s16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1); +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlahq_lane_s32(__p0_161, __p1_161, __p2_161, __p3_161) __extension__ ({ \ - int32x4_t __ret_161; \ - int32x4_t __s0_161 = __p0_161; \ - int32x4_t __s1_161 = __p1_161; \ - int32x2_t __s2_161 = __p2_161; \ - __ret_161 = vqrdmlahq_s32(__s0_161, __s1_161, splatq_lane_s32(__s2_161, __p3_161)); \ - __ret_161; \ -}) -#else -#define vqrdmlahq_lane_s32(__p0_162, __p1_162, __p2_162, __p3_162) __extension__ ({ \ - int32x4_t __ret_162; \ - int32x4_t __s0_162 = __p0_162; \ - int32x4_t __s1_162 = __p1_162; \ - int32x2_t __s2_162 = __p2_162; \ - int32x4_t __rev0_162; __rev0_162 = __builtin_shufflevector(__s0_162, __s0_162, 3, 2, 1, 0); \ - int32x4_t __rev1_162; __rev1_162 = __builtin_shufflevector(__s1_162, __s1_162, 3, 2, 1, 0); \ - int32x2_t __rev2_162; __rev2_162 = __builtin_shufflevector(__s2_162, __s2_162, 1, 0); \ - __ret_162 = __noswap_vqrdmlahq_s32(__rev0_162, __rev1_162, __noswap_splatq_lane_s32(__rev2_162, __p3_162)); \ - __ret_162 = __builtin_shufflevector(__ret_162, __ret_162, 3, 2, 1, 0); \ - __ret_162; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlahq_lane_s16(__p0_163, __p1_163, __p2_163, __p3_163) __extension__ ({ \ - int16x8_t __ret_163; \ - int16x8_t __s0_163 = __p0_163; \ - int16x8_t __s1_163 = __p1_163; \ - int16x4_t __s2_163 = __p2_163; \ - __ret_163 = vqrdmlahq_s16(__s0_163, __s1_163, splatq_lane_s16(__s2_163, __p3_163)); \ - __ret_163; \ -}) -#else -#define vqrdmlahq_lane_s16(__p0_164, __p1_164, __p2_164, __p3_164) __extension__ ({ \ - int16x8_t __ret_164; \ - int16x8_t __s0_164 = __p0_164; \ - int16x8_t __s1_164 = __p1_164; \ - int16x4_t __s2_164 = __p2_164; \ - int16x8_t __rev0_164; __rev0_164 = __builtin_shufflevector(__s0_164, __s0_164, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_164; __rev1_164 = __builtin_shufflevector(__s1_164, __s1_164, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev2_164; __rev2_164 = __builtin_shufflevector(__s2_164, __s2_164, 3, 2, 1, 0); \ - __ret_164 = __noswap_vqrdmlahq_s16(__rev0_164, __rev1_164, __noswap_splatq_lane_s16(__rev2_164, __p3_164)); \ - __ret_164 = __builtin_shufflevector(__ret_164, __ret_164, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_164; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlah_lane_s32(__p0_165, __p1_165, __p2_165, __p3_165) __extension__ ({ \ - int32x2_t __ret_165; \ - int32x2_t __s0_165 = __p0_165; \ - int32x2_t __s1_165 = __p1_165; \ - int32x2_t __s2_165 = __p2_165; \ - __ret_165 = vqrdmlah_s32(__s0_165, __s1_165, splat_lane_s32(__s2_165, __p3_165)); \ - __ret_165; \ -}) -#else -#define vqrdmlah_lane_s32(__p0_166, __p1_166, __p2_166, __p3_166) __extension__ ({ \ - int32x2_t __ret_166; \ - int32x2_t __s0_166 = __p0_166; \ - int32x2_t __s1_166 = __p1_166; \ - int32x2_t __s2_166 = __p2_166; \ - int32x2_t __rev0_166; __rev0_166 = __builtin_shufflevector(__s0_166, __s0_166, 1, 0); \ - int32x2_t __rev1_166; __rev1_166 = __builtin_shufflevector(__s1_166, __s1_166, 1, 0); \ - int32x2_t __rev2_166; __rev2_166 = __builtin_shufflevector(__s2_166, __s2_166, 1, 0); \ - __ret_166 = __noswap_vqrdmlah_s32(__rev0_166, __rev1_166, __noswap_splat_lane_s32(__rev2_166, __p3_166)); \ - __ret_166 = __builtin_shufflevector(__ret_166, __ret_166, 1, 0); \ - __ret_166; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlah_lane_s16(__p0_167, __p1_167, __p2_167, __p3_167) __extension__ ({ \ - int16x4_t __ret_167; \ - int16x4_t __s0_167 = __p0_167; \ - int16x4_t __s1_167 = __p1_167; \ - int16x4_t __s2_167 = __p2_167; \ - __ret_167 = vqrdmlah_s16(__s0_167, __s1_167, splat_lane_s16(__s2_167, __p3_167)); \ - __ret_167; \ -}) -#else -#define vqrdmlah_lane_s16(__p0_168, __p1_168, __p2_168, __p3_168) __extension__ ({ \ - int16x4_t __ret_168; \ - int16x4_t __s0_168 = __p0_168; \ - int16x4_t __s1_168 = __p1_168; \ - int16x4_t __s2_168 = __p2_168; \ - int16x4_t __rev0_168; __rev0_168 = __builtin_shufflevector(__s0_168, __s0_168, 3, 2, 1, 0); \ - int16x4_t __rev1_168; __rev1_168 = __builtin_shufflevector(__s1_168, __s1_168, 3, 2, 1, 0); \ - int16x4_t __rev2_168; __rev2_168 = __builtin_shufflevector(__s2_168, __s2_168, 3, 2, 1, 0); \ - __ret_168 = __noswap_vqrdmlah_s16(__rev0_168, __rev1_168, __noswap_splat_lane_s16(__rev2_168, __p3_168)); \ - __ret_168 = __builtin_shufflevector(__ret_168, __ret_168, 3, 2, 1, 0); \ - __ret_168; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.1a"))) int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqrdmlshq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); return __ret; } -#else -__ai __attribute__((target("v8.1a"))) int32x4_t vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vqrdmlshq_s32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); return __ret; } -__ai __attribute__((target("v8.1a"))) int32x4_t __noswap_vqrdmlshq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vqrdmlshq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.1a"))) int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vqrdmlshq_s16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); return __ret; } -#else -__ai __attribute__((target("v8.1a"))) int16x8_t vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int16x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vqrdmlshq_s16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); return __ret; } -__ai __attribute__((target("v8.1a"))) int16x8_t __noswap_vqrdmlshq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vqrdmlshq_s16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.1a"))) int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vqrdmlsh_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); return __ret; } -#else -__ai __attribute__((target("v8.1a"))) int32x2_t vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { - int32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = (int32x2_t) __builtin_neon_vqrdmlsh_s32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); return __ret; } -__ai __attribute__((target("v8.1a"))) int32x2_t __noswap_vqrdmlsh_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vqrdmlsh_s32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 2); +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.1a"))) int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vqrdmlsh_s16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1); +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); return __ret; } -#else -__ai __attribute__((target("v8.1a"))) int16x4_t vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { - int16x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vqrdmlsh_s16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); return __ret; } -__ai __attribute__((target("v8.1a"))) int16x4_t __noswap_vqrdmlsh_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vqrdmlsh_s16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 1); +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlshq_lane_s32(__p0_169, __p1_169, __p2_169, __p3_169) __extension__ ({ \ - int32x4_t __ret_169; \ - int32x4_t __s0_169 = __p0_169; \ - int32x4_t __s1_169 = __p1_169; \ - int32x2_t __s2_169 = __p2_169; \ - __ret_169 = vqrdmlshq_s32(__s0_169, __s1_169, splatq_lane_s32(__s2_169, __p3_169)); \ - __ret_169; \ -}) -#else -#define vqrdmlshq_lane_s32(__p0_170, __p1_170, __p2_170, __p3_170) __extension__ ({ \ - int32x4_t __ret_170; \ - int32x4_t __s0_170 = __p0_170; \ - int32x4_t __s1_170 = __p1_170; \ - int32x2_t __s2_170 = __p2_170; \ - int32x4_t __rev0_170; __rev0_170 = __builtin_shufflevector(__s0_170, __s0_170, 3, 2, 1, 0); \ - int32x4_t __rev1_170; __rev1_170 = __builtin_shufflevector(__s1_170, __s1_170, 3, 2, 1, 0); \ - int32x2_t __rev2_170; __rev2_170 = __builtin_shufflevector(__s2_170, __s2_170, 1, 0); \ - __ret_170 = __noswap_vqrdmlshq_s32(__rev0_170, __rev1_170, __noswap_splatq_lane_s32(__rev2_170, __p3_170)); \ - __ret_170 = __builtin_shufflevector(__ret_170, __ret_170, 3, 2, 1, 0); \ - __ret_170; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlshq_lane_s16(__p0_171, __p1_171, __p2_171, __p3_171) __extension__ ({ \ - int16x8_t __ret_171; \ - int16x8_t __s0_171 = __p0_171; \ - int16x8_t __s1_171 = __p1_171; \ - int16x4_t __s2_171 = __p2_171; \ - __ret_171 = vqrdmlshq_s16(__s0_171, __s1_171, splatq_lane_s16(__s2_171, __p3_171)); \ - __ret_171; \ -}) -#else -#define vqrdmlshq_lane_s16(__p0_172, __p1_172, __p2_172, __p3_172) __extension__ ({ \ - int16x8_t __ret_172; \ - int16x8_t __s0_172 = __p0_172; \ - int16x8_t __s1_172 = __p1_172; \ - int16x4_t __s2_172 = __p2_172; \ - int16x8_t __rev0_172; __rev0_172 = __builtin_shufflevector(__s0_172, __s0_172, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_172; __rev1_172 = __builtin_shufflevector(__s1_172, __s1_172, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev2_172; __rev2_172 = __builtin_shufflevector(__s2_172, __s2_172, 3, 2, 1, 0); \ - __ret_172 = __noswap_vqrdmlshq_s16(__rev0_172, __rev1_172, __noswap_splatq_lane_s16(__rev2_172, __p3_172)); \ - __ret_172 = __builtin_shufflevector(__ret_172, __ret_172, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_172; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlsh_lane_s32(__p0_173, __p1_173, __p2_173, __p3_173) __extension__ ({ \ - int32x2_t __ret_173; \ - int32x2_t __s0_173 = __p0_173; \ - int32x2_t __s1_173 = __p1_173; \ - int32x2_t __s2_173 = __p2_173; \ - __ret_173 = vqrdmlsh_s32(__s0_173, __s1_173, splat_lane_s32(__s2_173, __p3_173)); \ - __ret_173; \ -}) -#else -#define vqrdmlsh_lane_s32(__p0_174, __p1_174, __p2_174, __p3_174) __extension__ ({ \ - int32x2_t __ret_174; \ - int32x2_t __s0_174 = __p0_174; \ - int32x2_t __s1_174 = __p1_174; \ - int32x2_t __s2_174 = __p2_174; \ - int32x2_t __rev0_174; __rev0_174 = __builtin_shufflevector(__s0_174, __s0_174, 1, 0); \ - int32x2_t __rev1_174; __rev1_174 = __builtin_shufflevector(__s1_174, __s1_174, 1, 0); \ - int32x2_t __rev2_174; __rev2_174 = __builtin_shufflevector(__s2_174, __s2_174, 1, 0); \ - __ret_174 = __noswap_vqrdmlsh_s32(__rev0_174, __rev1_174, __noswap_splat_lane_s32(__rev2_174, __p3_174)); \ - __ret_174 = __builtin_shufflevector(__ret_174, __ret_174, 1, 0); \ - __ret_174; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmlsh_lane_s16(__p0_175, __p1_175, __p2_175, __p3_175) __extension__ ({ \ - int16x4_t __ret_175; \ - int16x4_t __s0_175 = __p0_175; \ - int16x4_t __s1_175 = __p1_175; \ - int16x4_t __s2_175 = __p2_175; \ - __ret_175 = vqrdmlsh_s16(__s0_175, __s1_175, splat_lane_s16(__s2_175, __p3_175)); \ - __ret_175; \ -}) -#else -#define vqrdmlsh_lane_s16(__p0_176, __p1_176, __p2_176, __p3_176) __extension__ ({ \ - int16x4_t __ret_176; \ - int16x4_t __s0_176 = __p0_176; \ - int16x4_t __s1_176 = __p1_176; \ - int16x4_t __s2_176 = __p2_176; \ - int16x4_t __rev0_176; __rev0_176 = __builtin_shufflevector(__s0_176, __s0_176, 3, 2, 1, 0); \ - int16x4_t __rev1_176; __rev1_176 = __builtin_shufflevector(__s1_176, __s1_176, 3, 2, 1, 0); \ - int16x4_t __rev2_176; __rev2_176 = __builtin_shufflevector(__s2_176, __s2_176, 3, 2, 1, 0); \ - __ret_176 = __noswap_vqrdmlsh_s16(__rev0_176, __rev1_176, __noswap_splat_lane_s16(__rev2_176, __p3_176)); \ - __ret_176 = __builtin_shufflevector(__ret_176, __ret_176, 3, 2, 1, 0); \ - __ret_176; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a"))) float32x2_t vcadd_rot270_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vcadd_rot270_f32((int8x8_t)__p0, (int8x8_t)__p1, 9); +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); return __ret; } -#else -__ai __attribute__((target("v8.3a"))) float32x2_t vcadd_rot270_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (float32x2_t) __builtin_neon_vcadd_rot270_f32((int8x8_t)__rev0, (int8x8_t)__rev1, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a"))) float32x2_t vcadd_rot90_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vcadd_rot90_f32((int8x8_t)__p0, (int8x8_t)__p1, 9); +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); return __ret; } -#else -__ai __attribute__((target("v8.3a"))) float32x2_t vcadd_rot90_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (float32x2_t) __builtin_neon_vcadd_rot90_f32((int8x8_t)__rev0, (int8x8_t)__rev1, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a"))) float32x4_t vcaddq_rot270_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vcaddq_rot270_f32((int8x16_t)__p0, (int8x16_t)__p1, 41); +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); return __ret; } -#else -__ai __attribute__((target("v8.3a"))) float32x4_t vcaddq_rot270_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vcaddq_rot270_f32((int8x16_t)__rev0, (int8x16_t)__rev1, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a"))) float32x4_t vcaddq_rot90_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vcaddq_rot90_f32((int8x16_t)__p0, (int8x16_t)__p1, 41); +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); return __ret; } -#else -__ai __attribute__((target("v8.3a"))) float32x4_t vcaddq_rot90_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vcaddq_rot90_f32((int8x16_t)__rev0, (int8x16_t)__rev1, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a"))) float32x4_t vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vcmlaq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); return __ret; } -#else -__ai __attribute__((target("v8.3a"))) float32x4_t vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vcmlaq_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); return __ret; } -__ai __attribute__((target("v8.3a"))) float32x4_t __noswap_vcmlaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vcmlaq_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a"))) float32x2_t vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vcmla_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); return __ret; } -#else -__ai __attribute__((target("v8.3a"))) float32x2_t vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = (float32x2_t) __builtin_neon_vcmla_f32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); return __ret; } -__ai __attribute__((target("v8.3a"))) float32x2_t __noswap_vcmla_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vcmla_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmla_lane_f32(__p0_177, __p1_177, __p2_177, __p3_177) __extension__ ({ \ - float32x2_t __ret_177; \ - float32x2_t __s0_177 = __p0_177; \ - float32x2_t __s1_177 = __p1_177; \ - float32x2_t __s2_177 = __p2_177; \ -float32x2_t __reint_177 = __s2_177; \ -uint64x1_t __reint1_177 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_177, __p3_177)}; \ - __ret_177 = vcmla_f32(__s0_177, __s1_177, *(float32x2_t *) &__reint1_177); \ - __ret_177; \ -}) -#else -#define vcmla_lane_f32(__p0_178, __p1_178, __p2_178, __p3_178) __extension__ ({ \ - float32x2_t __ret_178; \ - float32x2_t __s0_178 = __p0_178; \ - float32x2_t __s1_178 = __p1_178; \ - float32x2_t __s2_178 = __p2_178; \ - float32x2_t __rev0_178; __rev0_178 = __builtin_shufflevector(__s0_178, __s0_178, 1, 0); \ - float32x2_t __rev1_178; __rev1_178 = __builtin_shufflevector(__s1_178, __s1_178, 1, 0); \ - float32x2_t __rev2_178; __rev2_178 = __builtin_shufflevector(__s2_178, __s2_178, 1, 0); \ -float32x2_t __reint_178 = __rev2_178; \ -uint64x1_t __reint1_178 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_178, __p3_178)}; \ - __ret_178 = __noswap_vcmla_f32(__rev0_178, __rev1_178, *(float32x2_t *) &__reint1_178); \ - __ret_178 = __builtin_shufflevector(__ret_178, __ret_178, 1, 0); \ - __ret_178; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_lane_f32(__p0_179, __p1_179, __p2_179, __p3_179) __extension__ ({ \ - float32x4_t __ret_179; \ - float32x4_t __s0_179 = __p0_179; \ - float32x4_t __s1_179 = __p1_179; \ - float32x2_t __s2_179 = __p2_179; \ -float32x2_t __reint_179 = __s2_179; \ -uint64x2_t __reint1_179 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_179, __p3_179), vget_lane_u64(*(uint64x1_t *) &__reint_179, __p3_179)}; \ - __ret_179 = vcmlaq_f32(__s0_179, __s1_179, *(float32x4_t *) &__reint1_179); \ - __ret_179; \ -}) -#else -#define vcmlaq_lane_f32(__p0_180, __p1_180, __p2_180, __p3_180) __extension__ ({ \ - float32x4_t __ret_180; \ - float32x4_t __s0_180 = __p0_180; \ - float32x4_t __s1_180 = __p1_180; \ - float32x2_t __s2_180 = __p2_180; \ - float32x4_t __rev0_180; __rev0_180 = __builtin_shufflevector(__s0_180, __s0_180, 3, 2, 1, 0); \ - float32x4_t __rev1_180; __rev1_180 = __builtin_shufflevector(__s1_180, __s1_180, 3, 2, 1, 0); \ - float32x2_t __rev2_180; __rev2_180 = __builtin_shufflevector(__s2_180, __s2_180, 1, 0); \ -float32x2_t __reint_180 = __rev2_180; \ -uint64x2_t __reint1_180 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_180, __p3_180), vget_lane_u64(*(uint64x1_t *) &__reint_180, __p3_180)}; \ - __ret_180 = __noswap_vcmlaq_f32(__rev0_180, __rev1_180, *(float32x4_t *) &__reint1_180); \ - __ret_180 = __builtin_shufflevector(__ret_180, __ret_180, 3, 2, 1, 0); \ - __ret_180; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmla_laneq_f32(__p0_181, __p1_181, __p2_181, __p3_181) __extension__ ({ \ - float32x2_t __ret_181; \ - float32x2_t __s0_181 = __p0_181; \ - float32x2_t __s1_181 = __p1_181; \ - float32x4_t __s2_181 = __p2_181; \ -float32x4_t __reint_181 = __s2_181; \ -uint64x1_t __reint1_181 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_181, __p3_181)}; \ - __ret_181 = vcmla_f32(__s0_181, __s1_181, *(float32x2_t *) &__reint1_181); \ - __ret_181; \ -}) -#else -#define vcmla_laneq_f32(__p0_182, __p1_182, __p2_182, __p3_182) __extension__ ({ \ - float32x2_t __ret_182; \ - float32x2_t __s0_182 = __p0_182; \ - float32x2_t __s1_182 = __p1_182; \ - float32x4_t __s2_182 = __p2_182; \ - float32x2_t __rev0_182; __rev0_182 = __builtin_shufflevector(__s0_182, __s0_182, 1, 0); \ - float32x2_t __rev1_182; __rev1_182 = __builtin_shufflevector(__s1_182, __s1_182, 1, 0); \ - float32x4_t __rev2_182; __rev2_182 = __builtin_shufflevector(__s2_182, __s2_182, 3, 2, 1, 0); \ -float32x4_t __reint_182 = __rev2_182; \ -uint64x1_t __reint1_182 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_182, __p3_182)}; \ - __ret_182 = __noswap_vcmla_f32(__rev0_182, __rev1_182, *(float32x2_t *) &__reint1_182); \ - __ret_182 = __builtin_shufflevector(__ret_182, __ret_182, 1, 0); \ - __ret_182; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_laneq_f32(__p0_183, __p1_183, __p2_183, __p3_183) __extension__ ({ \ - float32x4_t __ret_183; \ - float32x4_t __s0_183 = __p0_183; \ - float32x4_t __s1_183 = __p1_183; \ - float32x4_t __s2_183 = __p2_183; \ -float32x4_t __reint_183 = __s2_183; \ -uint64x2_t __reint1_183 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_183, __p3_183), vgetq_lane_u64(*(uint64x2_t *) &__reint_183, __p3_183)}; \ - __ret_183 = vcmlaq_f32(__s0_183, __s1_183, *(float32x4_t *) &__reint1_183); \ - __ret_183; \ -}) -#else -#define vcmlaq_laneq_f32(__p0_184, __p1_184, __p2_184, __p3_184) __extension__ ({ \ - float32x4_t __ret_184; \ - float32x4_t __s0_184 = __p0_184; \ - float32x4_t __s1_184 = __p1_184; \ - float32x4_t __s2_184 = __p2_184; \ - float32x4_t __rev0_184; __rev0_184 = __builtin_shufflevector(__s0_184, __s0_184, 3, 2, 1, 0); \ - float32x4_t __rev1_184; __rev1_184 = __builtin_shufflevector(__s1_184, __s1_184, 3, 2, 1, 0); \ - float32x4_t __rev2_184; __rev2_184 = __builtin_shufflevector(__s2_184, __s2_184, 3, 2, 1, 0); \ -float32x4_t __reint_184 = __rev2_184; \ -uint64x2_t __reint1_184 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_184, __p3_184), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_184, __p3_184)}; \ - __ret_184 = __noswap_vcmlaq_f32(__rev0_184, __rev1_184, *(float32x4_t *) &__reint1_184); \ - __ret_184 = __builtin_shufflevector(__ret_184, __ret_184, 3, 2, 1, 0); \ - __ret_184; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a"))) float32x4_t vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vcmlaq_rot180_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); return __ret; } -#else -__ai __attribute__((target("v8.3a"))) float32x4_t vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vcmlaq_rot180_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); return __ret; } -__ai __attribute__((target("v8.3a"))) float32x4_t __noswap_vcmlaq_rot180_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vcmlaq_rot180_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a"))) float32x2_t vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vcmla_rot180_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); return __ret; } -#else -__ai __attribute__((target("v8.3a"))) float32x2_t vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = (float32x2_t) __builtin_neon_vcmla_rot180_f32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); return __ret; } -__ai __attribute__((target("v8.3a"))) float32x2_t __noswap_vcmla_rot180_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vcmla_rot180_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmla_rot180_lane_f32(__p0_185, __p1_185, __p2_185, __p3_185) __extension__ ({ \ - float32x2_t __ret_185; \ - float32x2_t __s0_185 = __p0_185; \ - float32x2_t __s1_185 = __p1_185; \ - float32x2_t __s2_185 = __p2_185; \ -float32x2_t __reint_185 = __s2_185; \ -uint64x1_t __reint1_185 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_185, __p3_185)}; \ - __ret_185 = vcmla_rot180_f32(__s0_185, __s1_185, *(float32x2_t *) &__reint1_185); \ - __ret_185; \ -}) -#else -#define vcmla_rot180_lane_f32(__p0_186, __p1_186, __p2_186, __p3_186) __extension__ ({ \ - float32x2_t __ret_186; \ - float32x2_t __s0_186 = __p0_186; \ - float32x2_t __s1_186 = __p1_186; \ - float32x2_t __s2_186 = __p2_186; \ - float32x2_t __rev0_186; __rev0_186 = __builtin_shufflevector(__s0_186, __s0_186, 1, 0); \ - float32x2_t __rev1_186; __rev1_186 = __builtin_shufflevector(__s1_186, __s1_186, 1, 0); \ - float32x2_t __rev2_186; __rev2_186 = __builtin_shufflevector(__s2_186, __s2_186, 1, 0); \ -float32x2_t __reint_186 = __rev2_186; \ -uint64x1_t __reint1_186 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_186, __p3_186)}; \ - __ret_186 = __noswap_vcmla_rot180_f32(__rev0_186, __rev1_186, *(float32x2_t *) &__reint1_186); \ - __ret_186 = __builtin_shufflevector(__ret_186, __ret_186, 1, 0); \ - __ret_186; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot180_lane_f32(__p0_187, __p1_187, __p2_187, __p3_187) __extension__ ({ \ - float32x4_t __ret_187; \ - float32x4_t __s0_187 = __p0_187; \ - float32x4_t __s1_187 = __p1_187; \ - float32x2_t __s2_187 = __p2_187; \ -float32x2_t __reint_187 = __s2_187; \ -uint64x2_t __reint1_187 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_187, __p3_187), vget_lane_u64(*(uint64x1_t *) &__reint_187, __p3_187)}; \ - __ret_187 = vcmlaq_rot180_f32(__s0_187, __s1_187, *(float32x4_t *) &__reint1_187); \ - __ret_187; \ -}) -#else -#define vcmlaq_rot180_lane_f32(__p0_188, __p1_188, __p2_188, __p3_188) __extension__ ({ \ - float32x4_t __ret_188; \ - float32x4_t __s0_188 = __p0_188; \ - float32x4_t __s1_188 = __p1_188; \ - float32x2_t __s2_188 = __p2_188; \ - float32x4_t __rev0_188; __rev0_188 = __builtin_shufflevector(__s0_188, __s0_188, 3, 2, 1, 0); \ - float32x4_t __rev1_188; __rev1_188 = __builtin_shufflevector(__s1_188, __s1_188, 3, 2, 1, 0); \ - float32x2_t __rev2_188; __rev2_188 = __builtin_shufflevector(__s2_188, __s2_188, 1, 0); \ -float32x2_t __reint_188 = __rev2_188; \ -uint64x2_t __reint1_188 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_188, __p3_188), vget_lane_u64(*(uint64x1_t *) &__reint_188, __p3_188)}; \ - __ret_188 = __noswap_vcmlaq_rot180_f32(__rev0_188, __rev1_188, *(float32x4_t *) &__reint1_188); \ - __ret_188 = __builtin_shufflevector(__ret_188, __ret_188, 3, 2, 1, 0); \ - __ret_188; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmla_rot180_laneq_f32(__p0_189, __p1_189, __p2_189, __p3_189) __extension__ ({ \ - float32x2_t __ret_189; \ - float32x2_t __s0_189 = __p0_189; \ - float32x2_t __s1_189 = __p1_189; \ - float32x4_t __s2_189 = __p2_189; \ -float32x4_t __reint_189 = __s2_189; \ -uint64x1_t __reint1_189 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_189, __p3_189)}; \ - __ret_189 = vcmla_rot180_f32(__s0_189, __s1_189, *(float32x2_t *) &__reint1_189); \ - __ret_189; \ -}) -#else -#define vcmla_rot180_laneq_f32(__p0_190, __p1_190, __p2_190, __p3_190) __extension__ ({ \ - float32x2_t __ret_190; \ - float32x2_t __s0_190 = __p0_190; \ - float32x2_t __s1_190 = __p1_190; \ - float32x4_t __s2_190 = __p2_190; \ - float32x2_t __rev0_190; __rev0_190 = __builtin_shufflevector(__s0_190, __s0_190, 1, 0); \ - float32x2_t __rev1_190; __rev1_190 = __builtin_shufflevector(__s1_190, __s1_190, 1, 0); \ - float32x4_t __rev2_190; __rev2_190 = __builtin_shufflevector(__s2_190, __s2_190, 3, 2, 1, 0); \ -float32x4_t __reint_190 = __rev2_190; \ -uint64x1_t __reint1_190 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_190, __p3_190)}; \ - __ret_190 = __noswap_vcmla_rot180_f32(__rev0_190, __rev1_190, *(float32x2_t *) &__reint1_190); \ - __ret_190 = __builtin_shufflevector(__ret_190, __ret_190, 1, 0); \ - __ret_190; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot180_laneq_f32(__p0_191, __p1_191, __p2_191, __p3_191) __extension__ ({ \ - float32x4_t __ret_191; \ - float32x4_t __s0_191 = __p0_191; \ - float32x4_t __s1_191 = __p1_191; \ - float32x4_t __s2_191 = __p2_191; \ -float32x4_t __reint_191 = __s2_191; \ -uint64x2_t __reint1_191 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_191, __p3_191), vgetq_lane_u64(*(uint64x2_t *) &__reint_191, __p3_191)}; \ - __ret_191 = vcmlaq_rot180_f32(__s0_191, __s1_191, *(float32x4_t *) &__reint1_191); \ - __ret_191; \ -}) -#else -#define vcmlaq_rot180_laneq_f32(__p0_192, __p1_192, __p2_192, __p3_192) __extension__ ({ \ - float32x4_t __ret_192; \ - float32x4_t __s0_192 = __p0_192; \ - float32x4_t __s1_192 = __p1_192; \ - float32x4_t __s2_192 = __p2_192; \ - float32x4_t __rev0_192; __rev0_192 = __builtin_shufflevector(__s0_192, __s0_192, 3, 2, 1, 0); \ - float32x4_t __rev1_192; __rev1_192 = __builtin_shufflevector(__s1_192, __s1_192, 3, 2, 1, 0); \ - float32x4_t __rev2_192; __rev2_192 = __builtin_shufflevector(__s2_192, __s2_192, 3, 2, 1, 0); \ -float32x4_t __reint_192 = __rev2_192; \ -uint64x2_t __reint1_192 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_192, __p3_192), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_192, __p3_192)}; \ - __ret_192 = __noswap_vcmlaq_rot180_f32(__rev0_192, __rev1_192, *(float32x4_t *) &__reint1_192); \ - __ret_192 = __builtin_shufflevector(__ret_192, __ret_192, 3, 2, 1, 0); \ - __ret_192; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a"))) float32x4_t vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vcmlaq_rot270_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); return __ret; } -#else -__ai __attribute__((target("v8.3a"))) float32x4_t vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vcmlaq_rot270_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); return __ret; } -__ai __attribute__((target("v8.3a"))) float32x4_t __noswap_vcmlaq_rot270_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vcmlaq_rot270_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a"))) float32x2_t vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vcmla_rot270_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); return __ret; } -#else -__ai __attribute__((target("v8.3a"))) float32x2_t vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = (float32x2_t) __builtin_neon_vcmla_rot270_f32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); return __ret; } -__ai __attribute__((target("v8.3a"))) float32x2_t __noswap_vcmla_rot270_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vcmla_rot270_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmla_rot270_lane_f32(__p0_193, __p1_193, __p2_193, __p3_193) __extension__ ({ \ - float32x2_t __ret_193; \ - float32x2_t __s0_193 = __p0_193; \ - float32x2_t __s1_193 = __p1_193; \ - float32x2_t __s2_193 = __p2_193; \ -float32x2_t __reint_193 = __s2_193; \ -uint64x1_t __reint1_193 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_193, __p3_193)}; \ - __ret_193 = vcmla_rot270_f32(__s0_193, __s1_193, *(float32x2_t *) &__reint1_193); \ - __ret_193; \ -}) -#else -#define vcmla_rot270_lane_f32(__p0_194, __p1_194, __p2_194, __p3_194) __extension__ ({ \ - float32x2_t __ret_194; \ - float32x2_t __s0_194 = __p0_194; \ - float32x2_t __s1_194 = __p1_194; \ - float32x2_t __s2_194 = __p2_194; \ - float32x2_t __rev0_194; __rev0_194 = __builtin_shufflevector(__s0_194, __s0_194, 1, 0); \ - float32x2_t __rev1_194; __rev1_194 = __builtin_shufflevector(__s1_194, __s1_194, 1, 0); \ - float32x2_t __rev2_194; __rev2_194 = __builtin_shufflevector(__s2_194, __s2_194, 1, 0); \ -float32x2_t __reint_194 = __rev2_194; \ -uint64x1_t __reint1_194 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_194, __p3_194)}; \ - __ret_194 = __noswap_vcmla_rot270_f32(__rev0_194, __rev1_194, *(float32x2_t *) &__reint1_194); \ - __ret_194 = __builtin_shufflevector(__ret_194, __ret_194, 1, 0); \ - __ret_194; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot270_lane_f32(__p0_195, __p1_195, __p2_195, __p3_195) __extension__ ({ \ - float32x4_t __ret_195; \ - float32x4_t __s0_195 = __p0_195; \ - float32x4_t __s1_195 = __p1_195; \ - float32x2_t __s2_195 = __p2_195; \ -float32x2_t __reint_195 = __s2_195; \ -uint64x2_t __reint1_195 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_195, __p3_195), vget_lane_u64(*(uint64x1_t *) &__reint_195, __p3_195)}; \ - __ret_195 = vcmlaq_rot270_f32(__s0_195, __s1_195, *(float32x4_t *) &__reint1_195); \ - __ret_195; \ -}) -#else -#define vcmlaq_rot270_lane_f32(__p0_196, __p1_196, __p2_196, __p3_196) __extension__ ({ \ - float32x4_t __ret_196; \ - float32x4_t __s0_196 = __p0_196; \ - float32x4_t __s1_196 = __p1_196; \ - float32x2_t __s2_196 = __p2_196; \ - float32x4_t __rev0_196; __rev0_196 = __builtin_shufflevector(__s0_196, __s0_196, 3, 2, 1, 0); \ - float32x4_t __rev1_196; __rev1_196 = __builtin_shufflevector(__s1_196, __s1_196, 3, 2, 1, 0); \ - float32x2_t __rev2_196; __rev2_196 = __builtin_shufflevector(__s2_196, __s2_196, 1, 0); \ -float32x2_t __reint_196 = __rev2_196; \ -uint64x2_t __reint1_196 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_196, __p3_196), vget_lane_u64(*(uint64x1_t *) &__reint_196, __p3_196)}; \ - __ret_196 = __noswap_vcmlaq_rot270_f32(__rev0_196, __rev1_196, *(float32x4_t *) &__reint1_196); \ - __ret_196 = __builtin_shufflevector(__ret_196, __ret_196, 3, 2, 1, 0); \ - __ret_196; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmla_rot270_laneq_f32(__p0_197, __p1_197, __p2_197, __p3_197) __extension__ ({ \ - float32x2_t __ret_197; \ - float32x2_t __s0_197 = __p0_197; \ - float32x2_t __s1_197 = __p1_197; \ - float32x4_t __s2_197 = __p2_197; \ -float32x4_t __reint_197 = __s2_197; \ -uint64x1_t __reint1_197 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_197, __p3_197)}; \ - __ret_197 = vcmla_rot270_f32(__s0_197, __s1_197, *(float32x2_t *) &__reint1_197); \ - __ret_197; \ -}) -#else -#define vcmla_rot270_laneq_f32(__p0_198, __p1_198, __p2_198, __p3_198) __extension__ ({ \ - float32x2_t __ret_198; \ - float32x2_t __s0_198 = __p0_198; \ - float32x2_t __s1_198 = __p1_198; \ - float32x4_t __s2_198 = __p2_198; \ - float32x2_t __rev0_198; __rev0_198 = __builtin_shufflevector(__s0_198, __s0_198, 1, 0); \ - float32x2_t __rev1_198; __rev1_198 = __builtin_shufflevector(__s1_198, __s1_198, 1, 0); \ - float32x4_t __rev2_198; __rev2_198 = __builtin_shufflevector(__s2_198, __s2_198, 3, 2, 1, 0); \ -float32x4_t __reint_198 = __rev2_198; \ -uint64x1_t __reint1_198 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_198, __p3_198)}; \ - __ret_198 = __noswap_vcmla_rot270_f32(__rev0_198, __rev1_198, *(float32x2_t *) &__reint1_198); \ - __ret_198 = __builtin_shufflevector(__ret_198, __ret_198, 1, 0); \ - __ret_198; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot270_laneq_f32(__p0_199, __p1_199, __p2_199, __p3_199) __extension__ ({ \ - float32x4_t __ret_199; \ - float32x4_t __s0_199 = __p0_199; \ - float32x4_t __s1_199 = __p1_199; \ - float32x4_t __s2_199 = __p2_199; \ -float32x4_t __reint_199 = __s2_199; \ -uint64x2_t __reint1_199 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_199, __p3_199), vgetq_lane_u64(*(uint64x2_t *) &__reint_199, __p3_199)}; \ - __ret_199 = vcmlaq_rot270_f32(__s0_199, __s1_199, *(float32x4_t *) &__reint1_199); \ - __ret_199; \ -}) -#else -#define vcmlaq_rot270_laneq_f32(__p0_200, __p1_200, __p2_200, __p3_200) __extension__ ({ \ - float32x4_t __ret_200; \ - float32x4_t __s0_200 = __p0_200; \ - float32x4_t __s1_200 = __p1_200; \ - float32x4_t __s2_200 = __p2_200; \ - float32x4_t __rev0_200; __rev0_200 = __builtin_shufflevector(__s0_200, __s0_200, 3, 2, 1, 0); \ - float32x4_t __rev1_200; __rev1_200 = __builtin_shufflevector(__s1_200, __s1_200, 3, 2, 1, 0); \ - float32x4_t __rev2_200; __rev2_200 = __builtin_shufflevector(__s2_200, __s2_200, 3, 2, 1, 0); \ -float32x4_t __reint_200 = __rev2_200; \ -uint64x2_t __reint1_200 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_200, __p3_200), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_200, __p3_200)}; \ - __ret_200 = __noswap_vcmlaq_rot270_f32(__rev0_200, __rev1_200, *(float32x4_t *) &__reint1_200); \ - __ret_200 = __builtin_shufflevector(__ret_200, __ret_200, 3, 2, 1, 0); \ - __ret_200; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a"))) float32x4_t vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) { float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vcmlaq_rot90_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + __ret = (float32x4_t)(__p0); return __ret; } -#else -__ai __attribute__((target("v8.3a"))) float32x4_t vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) { float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vcmlaq_rot90_f32((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + __ret = (float32x4_t)(__p0); return __ret; } -__ai __attribute__((target("v8.3a"))) float32x4_t __noswap_vcmlaq_rot90_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) { float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vcmlaq_rot90_f32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); + __ret = (float32x4_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a"))) float32x2_t vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vcmla_rot90_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); return __ret; } -#else -__ai __attribute__((target("v8.3a"))) float32x2_t vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float32x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = (float32x2_t) __builtin_neon_vcmla_rot90_f32((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); return __ret; } -__ai __attribute__((target("v8.3a"))) float32x2_t __noswap_vcmla_rot90_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vcmla_rot90_f32((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmla_rot90_lane_f32(__p0_201, __p1_201, __p2_201, __p3_201) __extension__ ({ \ - float32x2_t __ret_201; \ - float32x2_t __s0_201 = __p0_201; \ - float32x2_t __s1_201 = __p1_201; \ - float32x2_t __s2_201 = __p2_201; \ -float32x2_t __reint_201 = __s2_201; \ -uint64x1_t __reint1_201 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_201, __p3_201)}; \ - __ret_201 = vcmla_rot90_f32(__s0_201, __s1_201, *(float32x2_t *) &__reint1_201); \ - __ret_201; \ -}) -#else -#define vcmla_rot90_lane_f32(__p0_202, __p1_202, __p2_202, __p3_202) __extension__ ({ \ - float32x2_t __ret_202; \ - float32x2_t __s0_202 = __p0_202; \ - float32x2_t __s1_202 = __p1_202; \ - float32x2_t __s2_202 = __p2_202; \ - float32x2_t __rev0_202; __rev0_202 = __builtin_shufflevector(__s0_202, __s0_202, 1, 0); \ - float32x2_t __rev1_202; __rev1_202 = __builtin_shufflevector(__s1_202, __s1_202, 1, 0); \ - float32x2_t __rev2_202; __rev2_202 = __builtin_shufflevector(__s2_202, __s2_202, 1, 0); \ -float32x2_t __reint_202 = __rev2_202; \ -uint64x1_t __reint1_202 = (uint64x1_t) {vget_lane_u64(*(uint64x1_t *) &__reint_202, __p3_202)}; \ - __ret_202 = __noswap_vcmla_rot90_f32(__rev0_202, __rev1_202, *(float32x2_t *) &__reint1_202); \ - __ret_202 = __builtin_shufflevector(__ret_202, __ret_202, 1, 0); \ - __ret_202; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot90_lane_f32(__p0_203, __p1_203, __p2_203, __p3_203) __extension__ ({ \ - float32x4_t __ret_203; \ - float32x4_t __s0_203 = __p0_203; \ - float32x4_t __s1_203 = __p1_203; \ - float32x2_t __s2_203 = __p2_203; \ -float32x2_t __reint_203 = __s2_203; \ -uint64x2_t __reint1_203 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_203, __p3_203), vget_lane_u64(*(uint64x1_t *) &__reint_203, __p3_203)}; \ - __ret_203 = vcmlaq_rot90_f32(__s0_203, __s1_203, *(float32x4_t *) &__reint1_203); \ - __ret_203; \ -}) -#else -#define vcmlaq_rot90_lane_f32(__p0_204, __p1_204, __p2_204, __p3_204) __extension__ ({ \ - float32x4_t __ret_204; \ - float32x4_t __s0_204 = __p0_204; \ - float32x4_t __s1_204 = __p1_204; \ - float32x2_t __s2_204 = __p2_204; \ - float32x4_t __rev0_204; __rev0_204 = __builtin_shufflevector(__s0_204, __s0_204, 3, 2, 1, 0); \ - float32x4_t __rev1_204; __rev1_204 = __builtin_shufflevector(__s1_204, __s1_204, 3, 2, 1, 0); \ - float32x2_t __rev2_204; __rev2_204 = __builtin_shufflevector(__s2_204, __s2_204, 1, 0); \ -float32x2_t __reint_204 = __rev2_204; \ -uint64x2_t __reint1_204 = (uint64x2_t) {vget_lane_u64(*(uint64x1_t *) &__reint_204, __p3_204), vget_lane_u64(*(uint64x1_t *) &__reint_204, __p3_204)}; \ - __ret_204 = __noswap_vcmlaq_rot90_f32(__rev0_204, __rev1_204, *(float32x4_t *) &__reint1_204); \ - __ret_204 = __builtin_shufflevector(__ret_204, __ret_204, 3, 2, 1, 0); \ - __ret_204; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmla_rot90_laneq_f32(__p0_205, __p1_205, __p2_205, __p3_205) __extension__ ({ \ - float32x2_t __ret_205; \ - float32x2_t __s0_205 = __p0_205; \ - float32x2_t __s1_205 = __p1_205; \ - float32x4_t __s2_205 = __p2_205; \ -float32x4_t __reint_205 = __s2_205; \ -uint64x1_t __reint1_205 = (uint64x1_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_205, __p3_205)}; \ - __ret_205 = vcmla_rot90_f32(__s0_205, __s1_205, *(float32x2_t *) &__reint1_205); \ - __ret_205; \ -}) -#else -#define vcmla_rot90_laneq_f32(__p0_206, __p1_206, __p2_206, __p3_206) __extension__ ({ \ - float32x2_t __ret_206; \ - float32x2_t __s0_206 = __p0_206; \ - float32x2_t __s1_206 = __p1_206; \ - float32x4_t __s2_206 = __p2_206; \ - float32x2_t __rev0_206; __rev0_206 = __builtin_shufflevector(__s0_206, __s0_206, 1, 0); \ - float32x2_t __rev1_206; __rev1_206 = __builtin_shufflevector(__s1_206, __s1_206, 1, 0); \ - float32x4_t __rev2_206; __rev2_206 = __builtin_shufflevector(__s2_206, __s2_206, 3, 2, 1, 0); \ -float32x4_t __reint_206 = __rev2_206; \ -uint64x1_t __reint1_206 = (uint64x1_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_206, __p3_206)}; \ - __ret_206 = __noswap_vcmla_rot90_f32(__rev0_206, __rev1_206, *(float32x2_t *) &__reint1_206); \ - __ret_206 = __builtin_shufflevector(__ret_206, __ret_206, 1, 0); \ - __ret_206; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot90_laneq_f32(__p0_207, __p1_207, __p2_207, __p3_207) __extension__ ({ \ - float32x4_t __ret_207; \ - float32x4_t __s0_207 = __p0_207; \ - float32x4_t __s1_207 = __p1_207; \ - float32x4_t __s2_207 = __p2_207; \ -float32x4_t __reint_207 = __s2_207; \ -uint64x2_t __reint1_207 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_207, __p3_207), vgetq_lane_u64(*(uint64x2_t *) &__reint_207, __p3_207)}; \ - __ret_207 = vcmlaq_rot90_f32(__s0_207, __s1_207, *(float32x4_t *) &__reint1_207); \ - __ret_207; \ -}) -#else -#define vcmlaq_rot90_laneq_f32(__p0_208, __p1_208, __p2_208, __p3_208) __extension__ ({ \ - float32x4_t __ret_208; \ - float32x4_t __s0_208 = __p0_208; \ - float32x4_t __s1_208 = __p1_208; \ - float32x4_t __s2_208 = __p2_208; \ - float32x4_t __rev0_208; __rev0_208 = __builtin_shufflevector(__s0_208, __s0_208, 3, 2, 1, 0); \ - float32x4_t __rev1_208; __rev1_208 = __builtin_shufflevector(__s1_208, __s1_208, 3, 2, 1, 0); \ - float32x4_t __rev2_208; __rev2_208 = __builtin_shufflevector(__s2_208, __s2_208, 3, 2, 1, 0); \ -float32x4_t __reint_208 = __rev2_208; \ -uint64x2_t __reint1_208 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_208, __p3_208), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_208, __p3_208)}; \ - __ret_208 = __noswap_vcmlaq_rot90_f32(__rev0_208, __rev1_208, *(float32x4_t *) &__reint1_208); \ - __ret_208 = __builtin_shufflevector(__ret_208, __ret_208, 3, 2, 1, 0); \ - __ret_208; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcadd_rot270_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vcadd_rot270_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); return __ret; } -#else -__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcadd_rot270_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vcadd_rot270_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcadd_rot90_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vcadd_rot90_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); return __ret; } -#else -__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcadd_rot90_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vcadd_rot90_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcaddq_rot270_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vcaddq_rot270_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); return __ret; } -#else -__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcaddq_rot270_f16(float16x8_t __p0, float16x8_t __p1) { +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) { float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vcaddq_rot270_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcaddq_rot90_f16(float16x8_t __p0, float16x8_t __p1) { +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) { float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vcaddq_rot90_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); + __ret = (float16x8_t)(__p0); return __ret; } -#else -__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcaddq_rot90_f16(float16x8_t __p0, float16x8_t __p1) { +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) { float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vcaddq_rot90_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) { float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vcmlaq_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + __ret = (float16x8_t)(__p0); return __ret; } -#else -__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) { float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vcmlaq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t)(__p0); return __ret; } -__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t __noswap_vcmlaq_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) { float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vcmlaq_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + __ret = (float16x8_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vcmla_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); return __ret; } -#else -__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vcmla_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); return __ret; } -__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t __noswap_vcmla_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vcmla_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmla_lane_f16(__p0_209, __p1_209, __p2_209, __p3_209) __extension__ ({ \ - float16x4_t __ret_209; \ - float16x4_t __s0_209 = __p0_209; \ - float16x4_t __s1_209 = __p1_209; \ - float16x4_t __s2_209 = __p2_209; \ -float16x4_t __reint_209 = __s2_209; \ -uint32x2_t __reint1_209 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_209, __p3_209), vget_lane_u32(*(uint32x2_t *) &__reint_209, __p3_209)}; \ - __ret_209 = vcmla_f16(__s0_209, __s1_209, *(float16x4_t *) &__reint1_209); \ - __ret_209; \ -}) -#else -#define vcmla_lane_f16(__p0_210, __p1_210, __p2_210, __p3_210) __extension__ ({ \ - float16x4_t __ret_210; \ - float16x4_t __s0_210 = __p0_210; \ - float16x4_t __s1_210 = __p1_210; \ - float16x4_t __s2_210 = __p2_210; \ - float16x4_t __rev0_210; __rev0_210 = __builtin_shufflevector(__s0_210, __s0_210, 3, 2, 1, 0); \ - float16x4_t __rev1_210; __rev1_210 = __builtin_shufflevector(__s1_210, __s1_210, 3, 2, 1, 0); \ - float16x4_t __rev2_210; __rev2_210 = __builtin_shufflevector(__s2_210, __s2_210, 3, 2, 1, 0); \ -float16x4_t __reint_210 = __rev2_210; \ -uint32x2_t __reint1_210 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_210, __p3_210), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_210, __p3_210)}; \ - __ret_210 = __noswap_vcmla_f16(__rev0_210, __rev1_210, *(float16x4_t *) &__reint1_210); \ - __ret_210 = __builtin_shufflevector(__ret_210, __ret_210, 3, 2, 1, 0); \ - __ret_210; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_lane_f16(__p0_211, __p1_211, __p2_211, __p3_211) __extension__ ({ \ - float16x8_t __ret_211; \ - float16x8_t __s0_211 = __p0_211; \ - float16x8_t __s1_211 = __p1_211; \ - float16x4_t __s2_211 = __p2_211; \ -float16x4_t __reint_211 = __s2_211; \ -uint32x4_t __reint1_211 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211), vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211), vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211), vget_lane_u32(*(uint32x2_t *) &__reint_211, __p3_211)}; \ - __ret_211 = vcmlaq_f16(__s0_211, __s1_211, *(float16x8_t *) &__reint1_211); \ - __ret_211; \ -}) -#else -#define vcmlaq_lane_f16(__p0_212, __p1_212, __p2_212, __p3_212) __extension__ ({ \ - float16x8_t __ret_212; \ - float16x8_t __s0_212 = __p0_212; \ - float16x8_t __s1_212 = __p1_212; \ - float16x4_t __s2_212 = __p2_212; \ - float16x8_t __rev0_212; __rev0_212 = __builtin_shufflevector(__s0_212, __s0_212, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1_212; __rev1_212 = __builtin_shufflevector(__s1_212, __s1_212, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x4_t __rev2_212; __rev2_212 = __builtin_shufflevector(__s2_212, __s2_212, 3, 2, 1, 0); \ -float16x4_t __reint_212 = __rev2_212; \ -uint32x4_t __reint1_212 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_212, __p3_212)}; \ - __ret_212 = __noswap_vcmlaq_f16(__rev0_212, __rev1_212, *(float16x8_t *) &__reint1_212); \ - __ret_212 = __builtin_shufflevector(__ret_212, __ret_212, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_212; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmla_laneq_f16(__p0_213, __p1_213, __p2_213, __p3_213) __extension__ ({ \ - float16x4_t __ret_213; \ - float16x4_t __s0_213 = __p0_213; \ - float16x4_t __s1_213 = __p1_213; \ - float16x8_t __s2_213 = __p2_213; \ -float16x8_t __reint_213 = __s2_213; \ -uint32x2_t __reint1_213 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_213, __p3_213), vgetq_lane_u32(*(uint32x4_t *) &__reint_213, __p3_213)}; \ - __ret_213 = vcmla_f16(__s0_213, __s1_213, *(float16x4_t *) &__reint1_213); \ - __ret_213; \ -}) -#else -#define vcmla_laneq_f16(__p0_214, __p1_214, __p2_214, __p3_214) __extension__ ({ \ - float16x4_t __ret_214; \ - float16x4_t __s0_214 = __p0_214; \ - float16x4_t __s1_214 = __p1_214; \ - float16x8_t __s2_214 = __p2_214; \ - float16x4_t __rev0_214; __rev0_214 = __builtin_shufflevector(__s0_214, __s0_214, 3, 2, 1, 0); \ - float16x4_t __rev1_214; __rev1_214 = __builtin_shufflevector(__s1_214, __s1_214, 3, 2, 1, 0); \ - float16x8_t __rev2_214; __rev2_214 = __builtin_shufflevector(__s2_214, __s2_214, 7, 6, 5, 4, 3, 2, 1, 0); \ -float16x8_t __reint_214 = __rev2_214; \ -uint32x2_t __reint1_214 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_214, __p3_214), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_214, __p3_214)}; \ - __ret_214 = __noswap_vcmla_f16(__rev0_214, __rev1_214, *(float16x4_t *) &__reint1_214); \ - __ret_214 = __builtin_shufflevector(__ret_214, __ret_214, 3, 2, 1, 0); \ - __ret_214; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_laneq_f16(__p0_215, __p1_215, __p2_215, __p3_215) __extension__ ({ \ - float16x8_t __ret_215; \ - float16x8_t __s0_215 = __p0_215; \ - float16x8_t __s1_215 = __p1_215; \ - float16x8_t __s2_215 = __p2_215; \ -float16x8_t __reint_215 = __s2_215; \ -uint32x4_t __reint1_215 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215), vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215), vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215), vgetq_lane_u32(*(uint32x4_t *) &__reint_215, __p3_215)}; \ - __ret_215 = vcmlaq_f16(__s0_215, __s1_215, *(float16x8_t *) &__reint1_215); \ - __ret_215; \ -}) -#else -#define vcmlaq_laneq_f16(__p0_216, __p1_216, __p2_216, __p3_216) __extension__ ({ \ - float16x8_t __ret_216; \ - float16x8_t __s0_216 = __p0_216; \ - float16x8_t __s1_216 = __p1_216; \ - float16x8_t __s2_216 = __p2_216; \ - float16x8_t __rev0_216; __rev0_216 = __builtin_shufflevector(__s0_216, __s0_216, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1_216; __rev1_216 = __builtin_shufflevector(__s1_216, __s1_216, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev2_216; __rev2_216 = __builtin_shufflevector(__s2_216, __s2_216, 7, 6, 5, 4, 3, 2, 1, 0); \ -float16x8_t __reint_216 = __rev2_216; \ -uint32x4_t __reint1_216 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_216, __p3_216)}; \ - __ret_216 = __noswap_vcmlaq_f16(__rev0_216, __rev1_216, *(float16x8_t *) &__reint1_216); \ - __ret_216 = __builtin_shufflevector(__ret_216, __ret_216, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_216; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) { float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vcmlaq_rot180_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); + __ret = (float16x8_t)(__p0); return __ret; } -#else -__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) { float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vcmlaq_rot180_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t)(__p0); return __ret; } -__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t __noswap_vcmlaq_rot180_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vcmlaq_rot180_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vcmla_rot180_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); return __ret; } -#else -__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vcmla_rot180_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); return __ret; } -__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t __noswap_vcmla_rot180_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vcmla_rot180_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmla_rot180_lane_f16(__p0_217, __p1_217, __p2_217, __p3_217) __extension__ ({ \ - float16x4_t __ret_217; \ - float16x4_t __s0_217 = __p0_217; \ - float16x4_t __s1_217 = __p1_217; \ - float16x4_t __s2_217 = __p2_217; \ -float16x4_t __reint_217 = __s2_217; \ -uint32x2_t __reint1_217 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_217, __p3_217), vget_lane_u32(*(uint32x2_t *) &__reint_217, __p3_217)}; \ - __ret_217 = vcmla_rot180_f16(__s0_217, __s1_217, *(float16x4_t *) &__reint1_217); \ - __ret_217; \ -}) -#else -#define vcmla_rot180_lane_f16(__p0_218, __p1_218, __p2_218, __p3_218) __extension__ ({ \ - float16x4_t __ret_218; \ - float16x4_t __s0_218 = __p0_218; \ - float16x4_t __s1_218 = __p1_218; \ - float16x4_t __s2_218 = __p2_218; \ - float16x4_t __rev0_218; __rev0_218 = __builtin_shufflevector(__s0_218, __s0_218, 3, 2, 1, 0); \ - float16x4_t __rev1_218; __rev1_218 = __builtin_shufflevector(__s1_218, __s1_218, 3, 2, 1, 0); \ - float16x4_t __rev2_218; __rev2_218 = __builtin_shufflevector(__s2_218, __s2_218, 3, 2, 1, 0); \ -float16x4_t __reint_218 = __rev2_218; \ -uint32x2_t __reint1_218 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_218, __p3_218), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_218, __p3_218)}; \ - __ret_218 = __noswap_vcmla_rot180_f16(__rev0_218, __rev1_218, *(float16x4_t *) &__reint1_218); \ - __ret_218 = __builtin_shufflevector(__ret_218, __ret_218, 3, 2, 1, 0); \ - __ret_218; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot180_lane_f16(__p0_219, __p1_219, __p2_219, __p3_219) __extension__ ({ \ - float16x8_t __ret_219; \ - float16x8_t __s0_219 = __p0_219; \ - float16x8_t __s1_219 = __p1_219; \ - float16x4_t __s2_219 = __p2_219; \ -float16x4_t __reint_219 = __s2_219; \ -uint32x4_t __reint1_219 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219), vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219), vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219), vget_lane_u32(*(uint32x2_t *) &__reint_219, __p3_219)}; \ - __ret_219 = vcmlaq_rot180_f16(__s0_219, __s1_219, *(float16x8_t *) &__reint1_219); \ - __ret_219; \ -}) -#else -#define vcmlaq_rot180_lane_f16(__p0_220, __p1_220, __p2_220, __p3_220) __extension__ ({ \ - float16x8_t __ret_220; \ - float16x8_t __s0_220 = __p0_220; \ - float16x8_t __s1_220 = __p1_220; \ - float16x4_t __s2_220 = __p2_220; \ - float16x8_t __rev0_220; __rev0_220 = __builtin_shufflevector(__s0_220, __s0_220, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1_220; __rev1_220 = __builtin_shufflevector(__s1_220, __s1_220, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x4_t __rev2_220; __rev2_220 = __builtin_shufflevector(__s2_220, __s2_220, 3, 2, 1, 0); \ -float16x4_t __reint_220 = __rev2_220; \ -uint32x4_t __reint1_220 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_220, __p3_220)}; \ - __ret_220 = __noswap_vcmlaq_rot180_f16(__rev0_220, __rev1_220, *(float16x8_t *) &__reint1_220); \ - __ret_220 = __builtin_shufflevector(__ret_220, __ret_220, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_220; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmla_rot180_laneq_f16(__p0_221, __p1_221, __p2_221, __p3_221) __extension__ ({ \ - float16x4_t __ret_221; \ - float16x4_t __s0_221 = __p0_221; \ - float16x4_t __s1_221 = __p1_221; \ - float16x8_t __s2_221 = __p2_221; \ -float16x8_t __reint_221 = __s2_221; \ -uint32x2_t __reint1_221 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_221, __p3_221), vgetq_lane_u32(*(uint32x4_t *) &__reint_221, __p3_221)}; \ - __ret_221 = vcmla_rot180_f16(__s0_221, __s1_221, *(float16x4_t *) &__reint1_221); \ - __ret_221; \ -}) -#else -#define vcmla_rot180_laneq_f16(__p0_222, __p1_222, __p2_222, __p3_222) __extension__ ({ \ - float16x4_t __ret_222; \ - float16x4_t __s0_222 = __p0_222; \ - float16x4_t __s1_222 = __p1_222; \ - float16x8_t __s2_222 = __p2_222; \ - float16x4_t __rev0_222; __rev0_222 = __builtin_shufflevector(__s0_222, __s0_222, 3, 2, 1, 0); \ - float16x4_t __rev1_222; __rev1_222 = __builtin_shufflevector(__s1_222, __s1_222, 3, 2, 1, 0); \ - float16x8_t __rev2_222; __rev2_222 = __builtin_shufflevector(__s2_222, __s2_222, 7, 6, 5, 4, 3, 2, 1, 0); \ -float16x8_t __reint_222 = __rev2_222; \ -uint32x2_t __reint1_222 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_222, __p3_222), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_222, __p3_222)}; \ - __ret_222 = __noswap_vcmla_rot180_f16(__rev0_222, __rev1_222, *(float16x4_t *) &__reint1_222); \ - __ret_222 = __builtin_shufflevector(__ret_222, __ret_222, 3, 2, 1, 0); \ - __ret_222; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot180_laneq_f16(__p0_223, __p1_223, __p2_223, __p3_223) __extension__ ({ \ - float16x8_t __ret_223; \ - float16x8_t __s0_223 = __p0_223; \ - float16x8_t __s1_223 = __p1_223; \ - float16x8_t __s2_223 = __p2_223; \ -float16x8_t __reint_223 = __s2_223; \ -uint32x4_t __reint1_223 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223), vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223), vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223), vgetq_lane_u32(*(uint32x4_t *) &__reint_223, __p3_223)}; \ - __ret_223 = vcmlaq_rot180_f16(__s0_223, __s1_223, *(float16x8_t *) &__reint1_223); \ - __ret_223; \ -}) -#else -#define vcmlaq_rot180_laneq_f16(__p0_224, __p1_224, __p2_224, __p3_224) __extension__ ({ \ - float16x8_t __ret_224; \ - float16x8_t __s0_224 = __p0_224; \ - float16x8_t __s1_224 = __p1_224; \ - float16x8_t __s2_224 = __p2_224; \ - float16x8_t __rev0_224; __rev0_224 = __builtin_shufflevector(__s0_224, __s0_224, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1_224; __rev1_224 = __builtin_shufflevector(__s1_224, __s1_224, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev2_224; __rev2_224 = __builtin_shufflevector(__s2_224, __s2_224, 7, 6, 5, 4, 3, 2, 1, 0); \ -float16x8_t __reint_224 = __rev2_224; \ -uint32x4_t __reint1_224 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_224, __p3_224)}; \ - __ret_224 = __noswap_vcmlaq_rot180_f16(__rev0_224, __rev1_224, *(float16x8_t *) &__reint1_224); \ - __ret_224 = __builtin_shufflevector(__ret_224, __ret_224, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_224; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vcmlaq_rot270_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); return __ret; } -#else -__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vcmlaq_rot270_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); return __ret; } -__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t __noswap_vcmlaq_rot270_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vcmlaq_rot270_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vcmla_rot270_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); return __ret; } -#else -__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vcmla_rot270_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); return __ret; } -__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t __noswap_vcmla_rot270_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vcmla_rot270_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmla_rot270_lane_f16(__p0_225, __p1_225, __p2_225, __p3_225) __extension__ ({ \ - float16x4_t __ret_225; \ - float16x4_t __s0_225 = __p0_225; \ - float16x4_t __s1_225 = __p1_225; \ - float16x4_t __s2_225 = __p2_225; \ -float16x4_t __reint_225 = __s2_225; \ -uint32x2_t __reint1_225 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_225, __p3_225), vget_lane_u32(*(uint32x2_t *) &__reint_225, __p3_225)}; \ - __ret_225 = vcmla_rot270_f16(__s0_225, __s1_225, *(float16x4_t *) &__reint1_225); \ - __ret_225; \ -}) -#else -#define vcmla_rot270_lane_f16(__p0_226, __p1_226, __p2_226, __p3_226) __extension__ ({ \ - float16x4_t __ret_226; \ - float16x4_t __s0_226 = __p0_226; \ - float16x4_t __s1_226 = __p1_226; \ - float16x4_t __s2_226 = __p2_226; \ - float16x4_t __rev0_226; __rev0_226 = __builtin_shufflevector(__s0_226, __s0_226, 3, 2, 1, 0); \ - float16x4_t __rev1_226; __rev1_226 = __builtin_shufflevector(__s1_226, __s1_226, 3, 2, 1, 0); \ - float16x4_t __rev2_226; __rev2_226 = __builtin_shufflevector(__s2_226, __s2_226, 3, 2, 1, 0); \ -float16x4_t __reint_226 = __rev2_226; \ -uint32x2_t __reint1_226 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_226, __p3_226), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_226, __p3_226)}; \ - __ret_226 = __noswap_vcmla_rot270_f16(__rev0_226, __rev1_226, *(float16x4_t *) &__reint1_226); \ - __ret_226 = __builtin_shufflevector(__ret_226, __ret_226, 3, 2, 1, 0); \ - __ret_226; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot270_lane_f16(__p0_227, __p1_227, __p2_227, __p3_227) __extension__ ({ \ - float16x8_t __ret_227; \ - float16x8_t __s0_227 = __p0_227; \ - float16x8_t __s1_227 = __p1_227; \ - float16x4_t __s2_227 = __p2_227; \ -float16x4_t __reint_227 = __s2_227; \ -uint32x4_t __reint1_227 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227), vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227), vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227), vget_lane_u32(*(uint32x2_t *) &__reint_227, __p3_227)}; \ - __ret_227 = vcmlaq_rot270_f16(__s0_227, __s1_227, *(float16x8_t *) &__reint1_227); \ - __ret_227; \ -}) -#else -#define vcmlaq_rot270_lane_f16(__p0_228, __p1_228, __p2_228, __p3_228) __extension__ ({ \ - float16x8_t __ret_228; \ - float16x8_t __s0_228 = __p0_228; \ - float16x8_t __s1_228 = __p1_228; \ - float16x4_t __s2_228 = __p2_228; \ - float16x8_t __rev0_228; __rev0_228 = __builtin_shufflevector(__s0_228, __s0_228, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1_228; __rev1_228 = __builtin_shufflevector(__s1_228, __s1_228, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x4_t __rev2_228; __rev2_228 = __builtin_shufflevector(__s2_228, __s2_228, 3, 2, 1, 0); \ -float16x4_t __reint_228 = __rev2_228; \ -uint32x4_t __reint1_228 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_228, __p3_228)}; \ - __ret_228 = __noswap_vcmlaq_rot270_f16(__rev0_228, __rev1_228, *(float16x8_t *) &__reint1_228); \ - __ret_228 = __builtin_shufflevector(__ret_228, __ret_228, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_228; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmla_rot270_laneq_f16(__p0_229, __p1_229, __p2_229, __p3_229) __extension__ ({ \ - float16x4_t __ret_229; \ - float16x4_t __s0_229 = __p0_229; \ - float16x4_t __s1_229 = __p1_229; \ - float16x8_t __s2_229 = __p2_229; \ -float16x8_t __reint_229 = __s2_229; \ -uint32x2_t __reint1_229 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_229, __p3_229), vgetq_lane_u32(*(uint32x4_t *) &__reint_229, __p3_229)}; \ - __ret_229 = vcmla_rot270_f16(__s0_229, __s1_229, *(float16x4_t *) &__reint1_229); \ - __ret_229; \ -}) -#else -#define vcmla_rot270_laneq_f16(__p0_230, __p1_230, __p2_230, __p3_230) __extension__ ({ \ - float16x4_t __ret_230; \ - float16x4_t __s0_230 = __p0_230; \ - float16x4_t __s1_230 = __p1_230; \ - float16x8_t __s2_230 = __p2_230; \ - float16x4_t __rev0_230; __rev0_230 = __builtin_shufflevector(__s0_230, __s0_230, 3, 2, 1, 0); \ - float16x4_t __rev1_230; __rev1_230 = __builtin_shufflevector(__s1_230, __s1_230, 3, 2, 1, 0); \ - float16x8_t __rev2_230; __rev2_230 = __builtin_shufflevector(__s2_230, __s2_230, 7, 6, 5, 4, 3, 2, 1, 0); \ -float16x8_t __reint_230 = __rev2_230; \ -uint32x2_t __reint1_230 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_230, __p3_230), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_230, __p3_230)}; \ - __ret_230 = __noswap_vcmla_rot270_f16(__rev0_230, __rev1_230, *(float16x4_t *) &__reint1_230); \ - __ret_230 = __builtin_shufflevector(__ret_230, __ret_230, 3, 2, 1, 0); \ - __ret_230; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot270_laneq_f16(__p0_231, __p1_231, __p2_231, __p3_231) __extension__ ({ \ - float16x8_t __ret_231; \ - float16x8_t __s0_231 = __p0_231; \ - float16x8_t __s1_231 = __p1_231; \ - float16x8_t __s2_231 = __p2_231; \ -float16x8_t __reint_231 = __s2_231; \ -uint32x4_t __reint1_231 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231), vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231), vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231), vgetq_lane_u32(*(uint32x4_t *) &__reint_231, __p3_231)}; \ - __ret_231 = vcmlaq_rot270_f16(__s0_231, __s1_231, *(float16x8_t *) &__reint1_231); \ - __ret_231; \ -}) -#else -#define vcmlaq_rot270_laneq_f16(__p0_232, __p1_232, __p2_232, __p3_232) __extension__ ({ \ - float16x8_t __ret_232; \ - float16x8_t __s0_232 = __p0_232; \ - float16x8_t __s1_232 = __p1_232; \ - float16x8_t __s2_232 = __p2_232; \ - float16x8_t __rev0_232; __rev0_232 = __builtin_shufflevector(__s0_232, __s0_232, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1_232; __rev1_232 = __builtin_shufflevector(__s1_232, __s1_232, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev2_232; __rev2_232 = __builtin_shufflevector(__s2_232, __s2_232, 7, 6, 5, 4, 3, 2, 1, 0); \ -float16x8_t __reint_232 = __rev2_232; \ -uint32x4_t __reint1_232 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_232, __p3_232)}; \ - __ret_232 = __noswap_vcmlaq_rot270_f16(__rev0_232, __rev1_232, *(float16x8_t *) &__reint1_232); \ - __ret_232 = __builtin_shufflevector(__ret_232, __ret_232, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_232; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vcmlaq_rot90_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); return __ret; } -#else -__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vcmlaq_rot90_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); return __ret; } -__ai __attribute__((target("v8.3a,fullfp16"))) float16x8_t __noswap_vcmlaq_rot90_f16(float16x8_t __p0, float16x8_t __p1, float16x8_t __p2) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vcmlaq_rot90_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 40); +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vcmla_rot90_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); return __ret; } -#else -__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vcmla_rot90_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); return __ret; } -__ai __attribute__((target("v8.3a,fullfp16"))) float16x4_t __noswap_vcmla_rot90_f16(float16x4_t __p0, float16x4_t __p1, float16x4_t __p2) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vcmla_rot90_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 8); +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmla_rot90_lane_f16(__p0_233, __p1_233, __p2_233, __p3_233) __extension__ ({ \ - float16x4_t __ret_233; \ - float16x4_t __s0_233 = __p0_233; \ - float16x4_t __s1_233 = __p1_233; \ - float16x4_t __s2_233 = __p2_233; \ -float16x4_t __reint_233 = __s2_233; \ -uint32x2_t __reint1_233 = (uint32x2_t) {vget_lane_u32(*(uint32x2_t *) &__reint_233, __p3_233), vget_lane_u32(*(uint32x2_t *) &__reint_233, __p3_233)}; \ - __ret_233 = vcmla_rot90_f16(__s0_233, __s1_233, *(float16x4_t *) &__reint1_233); \ - __ret_233; \ -}) -#else -#define vcmla_rot90_lane_f16(__p0_234, __p1_234, __p2_234, __p3_234) __extension__ ({ \ - float16x4_t __ret_234; \ - float16x4_t __s0_234 = __p0_234; \ - float16x4_t __s1_234 = __p1_234; \ - float16x4_t __s2_234 = __p2_234; \ - float16x4_t __rev0_234; __rev0_234 = __builtin_shufflevector(__s0_234, __s0_234, 3, 2, 1, 0); \ - float16x4_t __rev1_234; __rev1_234 = __builtin_shufflevector(__s1_234, __s1_234, 3, 2, 1, 0); \ - float16x4_t __rev2_234; __rev2_234 = __builtin_shufflevector(__s2_234, __s2_234, 3, 2, 1, 0); \ -float16x4_t __reint_234 = __rev2_234; \ -uint32x2_t __reint1_234 = (uint32x2_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_234, __p3_234), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_234, __p3_234)}; \ - __ret_234 = __noswap_vcmla_rot90_f16(__rev0_234, __rev1_234, *(float16x4_t *) &__reint1_234); \ - __ret_234 = __builtin_shufflevector(__ret_234, __ret_234, 3, 2, 1, 0); \ - __ret_234; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot90_lane_f16(__p0_235, __p1_235, __p2_235, __p3_235) __extension__ ({ \ - float16x8_t __ret_235; \ - float16x8_t __s0_235 = __p0_235; \ - float16x8_t __s1_235 = __p1_235; \ - float16x4_t __s2_235 = __p2_235; \ -float16x4_t __reint_235 = __s2_235; \ -uint32x4_t __reint1_235 = (uint32x4_t) {vget_lane_u32(*(uint32x2_t *) &__reint_235, __p3_235), vget_lane_u32(*(uint32x2_t *) &__reint_235, __p3_235), vget_lane_u32(*(uint32x2_t *) &__reint_235, __p3_235), vget_lane_u32(*(uint32x2_t *) &__reint_235, __p3_235)}; \ - __ret_235 = vcmlaq_rot90_f16(__s0_235, __s1_235, *(float16x8_t *) &__reint1_235); \ - __ret_235; \ -}) -#else -#define vcmlaq_rot90_lane_f16(__p0_236, __p1_236, __p2_236, __p3_236) __extension__ ({ \ - float16x8_t __ret_236; \ - float16x8_t __s0_236 = __p0_236; \ - float16x8_t __s1_236 = __p1_236; \ - float16x4_t __s2_236 = __p2_236; \ - float16x8_t __rev0_236; __rev0_236 = __builtin_shufflevector(__s0_236, __s0_236, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1_236; __rev1_236 = __builtin_shufflevector(__s1_236, __s1_236, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x4_t __rev2_236; __rev2_236 = __builtin_shufflevector(__s2_236, __s2_236, 3, 2, 1, 0); \ -float16x4_t __reint_236 = __rev2_236; \ -uint32x4_t __reint1_236 = (uint32x4_t) {__noswap_vget_lane_u32(*(uint32x2_t *) &__reint_236, __p3_236), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_236, __p3_236), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_236, __p3_236), __noswap_vget_lane_u32(*(uint32x2_t *) &__reint_236, __p3_236)}; \ - __ret_236 = __noswap_vcmlaq_rot90_f16(__rev0_236, __rev1_236, *(float16x8_t *) &__reint1_236); \ - __ret_236 = __builtin_shufflevector(__ret_236, __ret_236, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_236; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmla_rot90_laneq_f16(__p0_237, __p1_237, __p2_237, __p3_237) __extension__ ({ \ - float16x4_t __ret_237; \ - float16x4_t __s0_237 = __p0_237; \ - float16x4_t __s1_237 = __p1_237; \ - float16x8_t __s2_237 = __p2_237; \ -float16x8_t __reint_237 = __s2_237; \ -uint32x2_t __reint1_237 = (uint32x2_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_237, __p3_237), vgetq_lane_u32(*(uint32x4_t *) &__reint_237, __p3_237)}; \ - __ret_237 = vcmla_rot90_f16(__s0_237, __s1_237, *(float16x4_t *) &__reint1_237); \ - __ret_237; \ -}) -#else -#define vcmla_rot90_laneq_f16(__p0_238, __p1_238, __p2_238, __p3_238) __extension__ ({ \ - float16x4_t __ret_238; \ - float16x4_t __s0_238 = __p0_238; \ - float16x4_t __s1_238 = __p1_238; \ - float16x8_t __s2_238 = __p2_238; \ - float16x4_t __rev0_238; __rev0_238 = __builtin_shufflevector(__s0_238, __s0_238, 3, 2, 1, 0); \ - float16x4_t __rev1_238; __rev1_238 = __builtin_shufflevector(__s1_238, __s1_238, 3, 2, 1, 0); \ - float16x8_t __rev2_238; __rev2_238 = __builtin_shufflevector(__s2_238, __s2_238, 7, 6, 5, 4, 3, 2, 1, 0); \ -float16x8_t __reint_238 = __rev2_238; \ -uint32x2_t __reint1_238 = (uint32x2_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_238, __p3_238), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_238, __p3_238)}; \ - __ret_238 = __noswap_vcmla_rot90_f16(__rev0_238, __rev1_238, *(float16x4_t *) &__reint1_238); \ - __ret_238 = __builtin_shufflevector(__ret_238, __ret_238, 3, 2, 1, 0); \ - __ret_238; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot90_laneq_f16(__p0_239, __p1_239, __p2_239, __p3_239) __extension__ ({ \ - float16x8_t __ret_239; \ - float16x8_t __s0_239 = __p0_239; \ - float16x8_t __s1_239 = __p1_239; \ - float16x8_t __s2_239 = __p2_239; \ -float16x8_t __reint_239 = __s2_239; \ -uint32x4_t __reint1_239 = (uint32x4_t) {vgetq_lane_u32(*(uint32x4_t *) &__reint_239, __p3_239), vgetq_lane_u32(*(uint32x4_t *) &__reint_239, __p3_239), vgetq_lane_u32(*(uint32x4_t *) &__reint_239, __p3_239), vgetq_lane_u32(*(uint32x4_t *) &__reint_239, __p3_239)}; \ - __ret_239 = vcmlaq_rot90_f16(__s0_239, __s1_239, *(float16x8_t *) &__reint1_239); \ - __ret_239; \ -}) -#else -#define vcmlaq_rot90_laneq_f16(__p0_240, __p1_240, __p2_240, __p3_240) __extension__ ({ \ - float16x8_t __ret_240; \ - float16x8_t __s0_240 = __p0_240; \ - float16x8_t __s1_240 = __p1_240; \ - float16x8_t __s2_240 = __p2_240; \ - float16x8_t __rev0_240; __rev0_240 = __builtin_shufflevector(__s0_240, __s0_240, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1_240; __rev1_240 = __builtin_shufflevector(__s1_240, __s1_240, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev2_240; __rev2_240 = __builtin_shufflevector(__s2_240, __s2_240, 7, 6, 5, 4, 3, 2, 1, 0); \ -float16x8_t __reint_240 = __rev2_240; \ -uint32x4_t __reint1_240 = (uint32x4_t) {__noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_240, __p3_240), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_240, __p3_240), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_240, __p3_240), __noswap_vgetq_lane_u32(*(uint32x4_t *) &__reint_240, __p3_240)}; \ - __ret_240 = __noswap_vcmlaq_rot90_f16(__rev0_240, __rev1_240, *(float16x8_t *) &__reint1_240); \ - __ret_240 = __builtin_shufflevector(__ret_240, __ret_240, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_240; \ -}) -#endif - -#if !defined(__aarch64__) -#ifdef __LITTLE_ENDIAN__ -#define vqdmulhq_lane_s32(__p0_241, __p1_241, __p2_241) __extension__ ({ \ - int32x4_t __ret_241; \ - int32x4_t __s0_241 = __p0_241; \ - int32x2_t __s1_241 = __p1_241; \ - __ret_241 = vqdmulhq_s32(__s0_241, splatq_lane_s32(__s1_241, __p2_241)); \ - __ret_241; \ -}) -#else -#define vqdmulhq_lane_s32(__p0_242, __p1_242, __p2_242) __extension__ ({ \ - int32x4_t __ret_242; \ - int32x4_t __s0_242 = __p0_242; \ - int32x2_t __s1_242 = __p1_242; \ - int32x4_t __rev0_242; __rev0_242 = __builtin_shufflevector(__s0_242, __s0_242, 3, 2, 1, 0); \ - int32x2_t __rev1_242; __rev1_242 = __builtin_shufflevector(__s1_242, __s1_242, 1, 0); \ - __ret_242 = __noswap_vqdmulhq_s32(__rev0_242, __noswap_splatq_lane_s32(__rev1_242, __p2_242)); \ - __ret_242 = __builtin_shufflevector(__ret_242, __ret_242, 3, 2, 1, 0); \ - __ret_242; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmulhq_lane_s16(__p0_243, __p1_243, __p2_243) __extension__ ({ \ - int16x8_t __ret_243; \ - int16x8_t __s0_243 = __p0_243; \ - int16x4_t __s1_243 = __p1_243; \ - __ret_243 = vqdmulhq_s16(__s0_243, splatq_lane_s16(__s1_243, __p2_243)); \ - __ret_243; \ -}) -#else -#define vqdmulhq_lane_s16(__p0_244, __p1_244, __p2_244) __extension__ ({ \ - int16x8_t __ret_244; \ - int16x8_t __s0_244 = __p0_244; \ - int16x4_t __s1_244 = __p1_244; \ - int16x8_t __rev0_244; __rev0_244 = __builtin_shufflevector(__s0_244, __s0_244, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev1_244; __rev1_244 = __builtin_shufflevector(__s1_244, __s1_244, 3, 2, 1, 0); \ - __ret_244 = __noswap_vqdmulhq_s16(__rev0_244, __noswap_splatq_lane_s16(__rev1_244, __p2_244)); \ - __ret_244 = __builtin_shufflevector(__ret_244, __ret_244, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_244; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmulh_lane_s32(__p0_245, __p1_245, __p2_245) __extension__ ({ \ - int32x2_t __ret_245; \ - int32x2_t __s0_245 = __p0_245; \ - int32x2_t __s1_245 = __p1_245; \ - __ret_245 = vqdmulh_s32(__s0_245, splat_lane_s32(__s1_245, __p2_245)); \ - __ret_245; \ -}) -#else -#define vqdmulh_lane_s32(__p0_246, __p1_246, __p2_246) __extension__ ({ \ - int32x2_t __ret_246; \ - int32x2_t __s0_246 = __p0_246; \ - int32x2_t __s1_246 = __p1_246; \ - int32x2_t __rev0_246; __rev0_246 = __builtin_shufflevector(__s0_246, __s0_246, 1, 0); \ - int32x2_t __rev1_246; __rev1_246 = __builtin_shufflevector(__s1_246, __s1_246, 1, 0); \ - __ret_246 = __noswap_vqdmulh_s32(__rev0_246, __noswap_splat_lane_s32(__rev1_246, __p2_246)); \ - __ret_246 = __builtin_shufflevector(__ret_246, __ret_246, 1, 0); \ - __ret_246; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqdmulh_lane_s16(__p0_247, __p1_247, __p2_247) __extension__ ({ \ - int16x4_t __ret_247; \ - int16x4_t __s0_247 = __p0_247; \ - int16x4_t __s1_247 = __p1_247; \ - __ret_247 = vqdmulh_s16(__s0_247, splat_lane_s16(__s1_247, __p2_247)); \ - __ret_247; \ -}) -#else -#define vqdmulh_lane_s16(__p0_248, __p1_248, __p2_248) __extension__ ({ \ - int16x4_t __ret_248; \ - int16x4_t __s0_248 = __p0_248; \ - int16x4_t __s1_248 = __p1_248; \ - int16x4_t __rev0_248; __rev0_248 = __builtin_shufflevector(__s0_248, __s0_248, 3, 2, 1, 0); \ - int16x4_t __rev1_248; __rev1_248 = __builtin_shufflevector(__s1_248, __s1_248, 3, 2, 1, 0); \ - __ret_248 = __noswap_vqdmulh_s16(__rev0_248, __noswap_splat_lane_s16(__rev1_248, __p2_248)); \ - __ret_248 = __builtin_shufflevector(__ret_248, __ret_248, 3, 2, 1, 0); \ - __ret_248; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmulhq_lane_s32(__p0_249, __p1_249, __p2_249) __extension__ ({ \ - int32x4_t __ret_249; \ - int32x4_t __s0_249 = __p0_249; \ - int32x2_t __s1_249 = __p1_249; \ - __ret_249 = vqrdmulhq_s32(__s0_249, splatq_lane_s32(__s1_249, __p2_249)); \ - __ret_249; \ -}) -#else -#define vqrdmulhq_lane_s32(__p0_250, __p1_250, __p2_250) __extension__ ({ \ - int32x4_t __ret_250; \ - int32x4_t __s0_250 = __p0_250; \ - int32x2_t __s1_250 = __p1_250; \ - int32x4_t __rev0_250; __rev0_250 = __builtin_shufflevector(__s0_250, __s0_250, 3, 2, 1, 0); \ - int32x2_t __rev1_250; __rev1_250 = __builtin_shufflevector(__s1_250, __s1_250, 1, 0); \ - __ret_250 = __noswap_vqrdmulhq_s32(__rev0_250, __noswap_splatq_lane_s32(__rev1_250, __p2_250)); \ - __ret_250 = __builtin_shufflevector(__ret_250, __ret_250, 3, 2, 1, 0); \ - __ret_250; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmulhq_lane_s16(__p0_251, __p1_251, __p2_251) __extension__ ({ \ - int16x8_t __ret_251; \ - int16x8_t __s0_251 = __p0_251; \ - int16x4_t __s1_251 = __p1_251; \ - __ret_251 = vqrdmulhq_s16(__s0_251, splatq_lane_s16(__s1_251, __p2_251)); \ - __ret_251; \ -}) -#else -#define vqrdmulhq_lane_s16(__p0_252, __p1_252, __p2_252) __extension__ ({ \ - int16x8_t __ret_252; \ - int16x8_t __s0_252 = __p0_252; \ - int16x4_t __s1_252 = __p1_252; \ - int16x8_t __rev0_252; __rev0_252 = __builtin_shufflevector(__s0_252, __s0_252, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev1_252; __rev1_252 = __builtin_shufflevector(__s1_252, __s1_252, 3, 2, 1, 0); \ - __ret_252 = __noswap_vqrdmulhq_s16(__rev0_252, __noswap_splatq_lane_s16(__rev1_252, __p2_252)); \ - __ret_252 = __builtin_shufflevector(__ret_252, __ret_252, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_252; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmulh_lane_s32(__p0_253, __p1_253, __p2_253) __extension__ ({ \ - int32x2_t __ret_253; \ - int32x2_t __s0_253 = __p0_253; \ - int32x2_t __s1_253 = __p1_253; \ - __ret_253 = vqrdmulh_s32(__s0_253, splat_lane_s32(__s1_253, __p2_253)); \ - __ret_253; \ -}) -#else -#define vqrdmulh_lane_s32(__p0_254, __p1_254, __p2_254) __extension__ ({ \ - int32x2_t __ret_254; \ - int32x2_t __s0_254 = __p0_254; \ - int32x2_t __s1_254 = __p1_254; \ - int32x2_t __rev0_254; __rev0_254 = __builtin_shufflevector(__s0_254, __s0_254, 1, 0); \ - int32x2_t __rev1_254; __rev1_254 = __builtin_shufflevector(__s1_254, __s1_254, 1, 0); \ - __ret_254 = __noswap_vqrdmulh_s32(__rev0_254, __noswap_splat_lane_s32(__rev1_254, __p2_254)); \ - __ret_254 = __builtin_shufflevector(__ret_254, __ret_254, 1, 0); \ - __ret_254; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vqrdmulh_lane_s16(__p0_255, __p1_255, __p2_255) __extension__ ({ \ - int16x4_t __ret_255; \ - int16x4_t __s0_255 = __p0_255; \ - int16x4_t __s1_255 = __p1_255; \ - __ret_255 = vqrdmulh_s16(__s0_255, splat_lane_s16(__s1_255, __p2_255)); \ - __ret_255; \ -}) -#else -#define vqrdmulh_lane_s16(__p0_256, __p1_256, __p2_256) __extension__ ({ \ - int16x4_t __ret_256; \ - int16x4_t __s0_256 = __p0_256; \ - int16x4_t __s1_256 = __p1_256; \ - int16x4_t __rev0_256; __rev0_256 = __builtin_shufflevector(__s0_256, __s0_256, 3, 2, 1, 0); \ - int16x4_t __rev1_256; __rev1_256 = __builtin_shufflevector(__s1_256, __s1_256, 3, 2, 1, 0); \ - __ret_256 = __noswap_vqrdmulh_s16(__rev0_256, __noswap_splat_lane_s16(__rev1_256, __p2_256)); \ - __ret_256 = __builtin_shufflevector(__ret_256, __ret_256, 3, 2, 1, 0); \ - __ret_256; \ -}) -#endif - -__ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); return __ret; } -__ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); return __ret; } -__ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); return __ret; } -__ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); return __ret; } -__ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); return __ret; } -__ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); return __ret; } -__ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); return __ret; } -__ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); return __ret; } -__ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); return __ret; } -__ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); return __ret; } -__ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); return __ret; } -__ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); return __ret; } -__ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); return __ret; } -__ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); return __ret; } -__ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); return __ret; } -__ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); return __ret; } -__ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); return __ret; } -__ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); return __ret; } -__ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); return __ret; } -__ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); return __ret; } -__ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); return __ret; } -__ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); return __ret; } -__ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); return __ret; } -__ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); return __ret; } -__ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); return __ret; } -__ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); return __ret; } -__ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); return __ret; } -__ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); return __ret; } -__ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); return __ret; } -__ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); return __ret; } -__ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); return __ret; } -__ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); return __ret; } -__ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); return __ret; } -__ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); return __ret; } -__ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); return __ret; } -__ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); return __ret; } -__ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); return __ret; } -__ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); return __ret; } -__ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); return __ret; } -__ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); return __ret; } -__ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); return __ret; } -__ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); return __ret; } -__ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); return __ret; } -__ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); return __ret; } -__ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); return __ret; } -__ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); return __ret; } -__ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); return __ret; } -__ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); return __ret; } -__ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); return __ret; } -__ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); return __ret; } -__ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); return __ret; } -__ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); return __ret; } -__ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); return __ret; } -__ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); return __ret; } -__ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); return __ret; } -__ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); return __ret; } -__ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); return __ret; } -__ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); return __ret; } -__ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); return __ret; } -__ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); return __ret; } -__ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); return __ret; } -__ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); return __ret; } -__ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); return __ret; } -__ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); return __ret; } -__ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); return __ret; } -__ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); return __ret; } -__ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); return __ret; } -__ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_f32(float32x2_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); return __ret; } -__ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_f16(float16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); return __ret; } -__ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_s32(int32x2_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); return __ret; } -__ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_s64(int64x1_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); return __ret; } -__ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_s16(int16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); return __ret; } -__ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); return __ret; } -__ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); return __ret; } -__ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); return __ret; } -__ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); return __ret; } -__ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); return __ret; } -__ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); return __ret; } -__ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_s8(int8x8_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); return __ret; } -__ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_f16(float16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); return __ret; } -__ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_s32(int32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); return __ret; } -__ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_s64(int64x1_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); return __ret; } -__ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_s16(int16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); return __ret; } -__ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); return __ret; } -__ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); return __ret; } -__ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); return __ret; } -__ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); return __ret; } -__ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); return __ret; } -__ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); return __ret; } -__ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_s8(int8x8_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); return __ret; } -__ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_f32(float32x2_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); return __ret; } -__ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_s32(int32x2_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); return __ret; } -__ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_s64(int64x1_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); return __ret; } -__ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_s16(int16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); return __ret; } -__ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); return __ret; } -__ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); return __ret; } -__ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); return __ret; } -__ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); return __ret; } -__ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); return __ret; } -__ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); return __ret; } -__ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_s8(int8x8_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); return __ret; } -__ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); return __ret; } -__ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_f16(float16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); return __ret; } -__ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_s64(int64x1_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); return __ret; } -__ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_s16(int16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); return __ret; } -__ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); return __ret; } -__ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -__ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -__ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -__ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -__ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -__ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -__ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -__ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -__ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -__ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -__ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -__ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -__ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -__ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -__ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -__ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -__ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -__ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -__ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -__ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -__ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -__ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -__ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -__ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -__ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -__ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -__ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -__ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -__ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -__ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -__ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -__ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -__ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -__ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -__ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -__ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -__ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -__ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -__ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -__ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -__ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -__ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -__ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -__ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -__ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -__ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -__ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -__ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -__ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -__ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -__ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -__ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -__ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -__ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -__ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -__ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -__ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -__ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -__ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -__ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -__ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -__ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -__ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -__ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -__ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -__ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -__ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -__ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -__ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -__ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -__ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -__ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -__ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -__ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -__ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -__ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -__ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -__ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -__ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -__ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -__ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -__ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -__ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -__ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -__ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -__ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -__ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -__ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -__ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -__ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -__ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -__ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -__ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -__ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -__ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -__ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -__ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -__ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -__ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -__ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); return __ret; } -__ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); return __ret; } -__ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); return __ret; } -__ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); return __ret; } -__ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); return __ret; } -__ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_s8(int8x8_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); return __ret; } -__ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_f32(float32x2_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); return __ret; } -__ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_f16(float16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); return __ret; } -__ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_s32(int32x2_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); return __ret; } -__ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_s16(int16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); return __ret; } -__ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); return __ret; } -__ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); return __ret; } -__ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); return __ret; } -__ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); return __ret; } -__ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); return __ret; } -__ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); return __ret; } -__ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_s8(int8x8_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); return __ret; } -__ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_f32(float32x2_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); return __ret; } -__ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); return __ret; } -__ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_s32(int32x2_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); return __ret; } -__ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_s64(int64x1_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); return __ret; } -__ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) { +#endif +#if (__ARM_FP & 2) +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float16x4_t vcvt_f16_f32(float32x4_t __p0) { float16x4_t __ret; - __ret = (float16x4_t)(__p0); + __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__p0, 41); return __ret; } -__ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) { +#else +__ai __attribute__((target("neon"))) float16x4_t vcvt_f16_f32(float32x4_t __p0) { float16x4_t __ret; - __ret = (float16x4_t)(__p0); + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) float16x4_t __noswap_vcvt_f16_f32(float32x4_t __p0) { float16x4_t __ret; - __ret = (float16x4_t)(__p0); + __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__p0, 41); return __ret; } -__ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vcvt_f32_f16(float16x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__p0, 8); return __ret; } -__ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); +#else +__ai __attribute__((target("neon"))) float32x4_t vcvt_f32_f16(float16x4_t __p0) { + float32x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("bf16"))) bfloat16x4_t __a32_vcvt_bf16_f32(float32x4_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_f32((int8x16_t)__p0, 11); - return __ret; -} -#else -__ai __attribute__((target("bf16"))) bfloat16x4_t __a32_vcvt_bf16_f32(float32x4_t __p0) { - bfloat16x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_f32((int8x16_t)__rev0, 11); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x4_t __noswap___a32_vcvt_bf16_f32(float32x4_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t) __builtin_neon___a32_vcvt_bf16_f32((int8x16_t)__p0, 11); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("bf16"))) bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) { - bfloat16x4_t __ret; - __ret = __a32_vcvt_bf16_f32(__p0); - return __ret; -} -#else -__ai __attribute__((target("bf16"))) bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) { - bfloat16x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = __noswap___a32_vcvt_bf16_f32(__rev0); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("bf16"))) bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) { - bfloat16x8_t __ret; - __ret = vcombine_bf16(__a32_vcvt_bf16_f32(__p1), vget_low_bf16(__p0)); - return __ret; -} -#else -__ai __attribute__((target("bf16"))) bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) { - bfloat16x8_t __ret; - bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __noswap_vcombine_bf16(__noswap___a32_vcvt_bf16_f32(__rev1), __noswap_vget_low_bf16(__rev0)); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("bf16"))) bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) { - bfloat16x8_t __ret; - __ret = vcombine_bf16((bfloat16x4_t)(0ULL), __a32_vcvt_bf16_f32(__p0)); - return __ret; -} -#else -__ai __attribute__((target("bf16"))) bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) { - bfloat16x8_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = __noswap_vcombine_bf16((bfloat16x4_t)(0ULL), __noswap___a32_vcvt_bf16_f32(__rev0)); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -__ai __attribute__((target("bf16"))) poly8x8_t vreinterpret_p8_bf16(bfloat16x4_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) poly64x1_t vreinterpret_p64_bf16(bfloat16x4_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) poly16x4_t vreinterpret_p16_bf16(bfloat16x4_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) poly8x16_t vreinterpretq_p8_bf16(bfloat16x8_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) poly64x2_t vreinterpretq_p64_bf16(bfloat16x8_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) poly16x8_t vreinterpretq_p16_bf16(bfloat16x8_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) uint8x16_t vreinterpretq_u8_bf16(bfloat16x8_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) uint32x4_t vreinterpretq_u32_bf16(bfloat16x8_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) uint64x2_t vreinterpretq_u64_bf16(bfloat16x8_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) uint16x8_t vreinterpretq_u16_bf16(bfloat16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) int8x16_t vreinterpretq_s8_bf16(bfloat16x8_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) float32x4_t vreinterpretq_f32_bf16(bfloat16x8_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) float16x8_t vreinterpretq_f16_bf16(bfloat16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) int32x4_t vreinterpretq_s32_bf16(bfloat16x8_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) int64x2_t vreinterpretq_s64_bf16(bfloat16x8_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) int16x8_t vreinterpretq_s16_bf16(bfloat16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) uint8x8_t vreinterpret_u8_bf16(bfloat16x4_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) uint32x2_t vreinterpret_u32_bf16(bfloat16x4_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) uint64x1_t vreinterpret_u64_bf16(bfloat16x4_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) uint16x4_t vreinterpret_u16_bf16(bfloat16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) int8x8_t vreinterpret_s8_bf16(bfloat16x4_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) float32x2_t vreinterpret_f32_bf16(bfloat16x4_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) float16x4_t vreinterpret_f16_bf16(bfloat16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) int32x2_t vreinterpret_s32_bf16(bfloat16x4_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) int64x1_t vreinterpret_s64_bf16(bfloat16x4_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) int16x4_t vreinterpret_s16_bf16(bfloat16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_p8(poly8x16_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_p64(poly64x2_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_p16(poly16x8_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_u8(uint8x16_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_u32(uint32x4_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_u64(uint64x2_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_u16(uint16x8_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_s8(int8x16_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_f32(float32x4_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_f16(float16x8_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_s32(int32x4_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_s64(int64x2_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_s16(int16x8_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_p8(poly8x8_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_p64(poly64x1_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_p16(poly16x4_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_u8(uint8x8_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_u32(uint32x2_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_u64(uint64x1_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_u16(uint16x4_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_s8(int8x8_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_f32(float32x2_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_f16(float16x4_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_s32(int32x2_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_s64(int64x1_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_s16(int16x4_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); - return __ret; -} -#endif -#if (__ARM_FP & 2) -#ifdef __LITTLE_ENDIAN__ -__ai float16x4_t vcvt_f16_f32(float32x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__p0, 41); - return __ret; -} -#else -__ai float16x4_t vcvt_f16_f32(float32x4_t __p0) { - float16x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__rev0, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai float16x4_t __noswap_vcvt_f16_f32(float32x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vcvt_f16_f32((int8x16_t)__p0, 41); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vcvt_f32_f16(float16x4_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__p0, 8); - return __ret; -} -#else -__ai float32x4_t vcvt_f32_f16(float16x4_t __p0) { - float32x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__rev0, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai float32x4_t __noswap_vcvt_f32_f16(float16x4_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__p0, 8); +__ai __attribute__((target("neon"))) float32x4_t __noswap_vcvt_f32_f16(float16x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vcvt_f32_f16((int8x8_t)__p0, 8); return __ret; } #endif @@ -40237,16 +39888,277 @@ __ai float32x4_t __noswap_vcvt_f32_f16(float16x4_t __p0) { }) #endif +#endif +#if (defined(__aarch64__) || defined(__arm64ec__)) && defined(__ARM_FEATURE_NUMERIC_MAXMIN) +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vmaxnm_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vminnm_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + return __ret; +} +#endif +#if (defined(__aarch64__) || defined(__arm64ec__)) && defined(__ARM_FEATURE_DIRECTED_ROUNDING) +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vrndq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vrndq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vrnd_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vrndaq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vrndaq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vrnda_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vrndiq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vrndiq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vrndi_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vrndmq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vrndmq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vrndm_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vrndnq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vrndnq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vrndn_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vrndpq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vrndpq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vrndp_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 10); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vrndxq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 42); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vrndxq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vrndx_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 10); + return __ret; +} #endif #if __ARM_ARCH >= 8 #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vcvtaq_s32_f32(float32x4_t __p0) { +__ai __attribute__((target("aes,neon"))) uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vaesdq_u8((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai __attribute__((target("aes,neon"))) uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vaesdq_u8((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("aes,neon"))) uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vaeseq_u8((int8x16_t)__p0, (int8x16_t)__p1, 48); + return __ret; +} +#else +__ai __attribute__((target("aes,neon"))) uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vaeseq_u8((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("aes,neon"))) uint8x16_t vaesimcq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vaesimcq_u8((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai __attribute__((target("aes,neon"))) uint8x16_t vaesimcq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vaesimcq_u8((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("aes,neon"))) uint8x16_t vaesmcq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vaesmcq_u8((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai __attribute__((target("aes,neon"))) uint8x16_t vaesmcq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vaesmcq_u8((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vcvtaq_s32_f32(float32x4_t __p0) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vcvtaq_s32_v((int8x16_t)__p0, 34); return __ret; } #else -__ai int32x4_t vcvtaq_s32_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vcvtaq_s32_f32(float32x4_t __p0) { int32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vcvtaq_s32_v((int8x16_t)__rev0, 34); @@ -40256,13 +40168,13 @@ __ai int32x4_t vcvtaq_s32_f32(float32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vcvta_s32_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vcvta_s32_f32(float32x2_t __p0) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vcvta_s32_v((int8x8_t)__p0, 2); return __ret; } #else -__ai int32x2_t vcvta_s32_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vcvta_s32_f32(float32x2_t __p0) { int32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int32x2_t) __builtin_neon_vcvta_s32_v((int8x8_t)__rev0, 2); @@ -40272,13 +40184,13 @@ __ai int32x2_t vcvta_s32_f32(float32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcvtaq_u32_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vcvtaq_u32_f32(float32x4_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vcvtaq_u32_v((int8x16_t)__p0, 50); return __ret; } #else -__ai uint32x4_t vcvtaq_u32_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vcvtaq_u32_f32(float32x4_t __p0) { uint32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vcvtaq_u32_v((int8x16_t)__rev0, 50); @@ -40288,13 +40200,13 @@ __ai uint32x4_t vcvtaq_u32_f32(float32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcvta_u32_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vcvta_u32_f32(float32x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vcvta_u32_v((int8x8_t)__p0, 18); return __ret; } #else -__ai uint32x2_t vcvta_u32_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vcvta_u32_f32(float32x2_t __p0) { uint32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32x2_t) __builtin_neon_vcvta_u32_v((int8x8_t)__rev0, 18); @@ -40304,13 +40216,13 @@ __ai uint32x2_t vcvta_u32_f32(float32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vcvtmq_s32_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vcvtmq_s32_f32(float32x4_t __p0) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vcvtmq_s32_v((int8x16_t)__p0, 34); return __ret; } #else -__ai int32x4_t vcvtmq_s32_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vcvtmq_s32_f32(float32x4_t __p0) { int32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vcvtmq_s32_v((int8x16_t)__rev0, 34); @@ -40320,13 +40232,13 @@ __ai int32x4_t vcvtmq_s32_f32(float32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vcvtm_s32_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vcvtm_s32_f32(float32x2_t __p0) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vcvtm_s32_v((int8x8_t)__p0, 2); return __ret; } #else -__ai int32x2_t vcvtm_s32_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vcvtm_s32_f32(float32x2_t __p0) { int32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int32x2_t) __builtin_neon_vcvtm_s32_v((int8x8_t)__rev0, 2); @@ -40336,13 +40248,13 @@ __ai int32x2_t vcvtm_s32_f32(float32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcvtmq_u32_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vcvtmq_u32_f32(float32x4_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vcvtmq_u32_v((int8x16_t)__p0, 50); return __ret; } #else -__ai uint32x4_t vcvtmq_u32_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vcvtmq_u32_f32(float32x4_t __p0) { uint32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vcvtmq_u32_v((int8x16_t)__rev0, 50); @@ -40352,13 +40264,13 @@ __ai uint32x4_t vcvtmq_u32_f32(float32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcvtm_u32_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vcvtm_u32_f32(float32x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vcvtm_u32_v((int8x8_t)__p0, 18); return __ret; } #else -__ai uint32x2_t vcvtm_u32_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vcvtm_u32_f32(float32x2_t __p0) { uint32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32x2_t) __builtin_neon_vcvtm_u32_v((int8x8_t)__rev0, 18); @@ -40368,13 +40280,13 @@ __ai uint32x2_t vcvtm_u32_f32(float32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vcvtnq_s32_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vcvtnq_s32_f32(float32x4_t __p0) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vcvtnq_s32_v((int8x16_t)__p0, 34); return __ret; } #else -__ai int32x4_t vcvtnq_s32_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vcvtnq_s32_f32(float32x4_t __p0) { int32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vcvtnq_s32_v((int8x16_t)__rev0, 34); @@ -40384,13 +40296,13 @@ __ai int32x4_t vcvtnq_s32_f32(float32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vcvtn_s32_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vcvtn_s32_f32(float32x2_t __p0) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vcvtn_s32_v((int8x8_t)__p0, 2); return __ret; } #else -__ai int32x2_t vcvtn_s32_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vcvtn_s32_f32(float32x2_t __p0) { int32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int32x2_t) __builtin_neon_vcvtn_s32_v((int8x8_t)__rev0, 2); @@ -40400,13 +40312,13 @@ __ai int32x2_t vcvtn_s32_f32(float32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcvtnq_u32_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vcvtnq_u32_f32(float32x4_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vcvtnq_u32_v((int8x16_t)__p0, 50); return __ret; } #else -__ai uint32x4_t vcvtnq_u32_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vcvtnq_u32_f32(float32x4_t __p0) { uint32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vcvtnq_u32_v((int8x16_t)__rev0, 50); @@ -40416,13 +40328,13 @@ __ai uint32x4_t vcvtnq_u32_f32(float32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcvtn_u32_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vcvtn_u32_f32(float32x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vcvtn_u32_v((int8x8_t)__p0, 18); return __ret; } #else -__ai uint32x2_t vcvtn_u32_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vcvtn_u32_f32(float32x2_t __p0) { uint32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32x2_t) __builtin_neon_vcvtn_u32_v((int8x8_t)__rev0, 18); @@ -40432,13 +40344,13 @@ __ai uint32x2_t vcvtn_u32_f32(float32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vcvtpq_s32_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vcvtpq_s32_f32(float32x4_t __p0) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vcvtpq_s32_v((int8x16_t)__p0, 34); return __ret; } #else -__ai int32x4_t vcvtpq_s32_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vcvtpq_s32_f32(float32x4_t __p0) { int32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (int32x4_t) __builtin_neon_vcvtpq_s32_v((int8x16_t)__rev0, 34); @@ -40448,13 +40360,13 @@ __ai int32x4_t vcvtpq_s32_f32(float32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vcvtp_s32_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vcvtp_s32_f32(float32x2_t __p0) { int32x2_t __ret; __ret = (int32x2_t) __builtin_neon_vcvtp_s32_v((int8x8_t)__p0, 2); return __ret; } #else -__ai int32x2_t vcvtp_s32_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) int32x2_t vcvtp_s32_f32(float32x2_t __p0) { int32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (int32x2_t) __builtin_neon_vcvtp_s32_v((int8x8_t)__rev0, 2); @@ -40464,13 +40376,13 @@ __ai int32x2_t vcvtp_s32_f32(float32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcvtpq_u32_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vcvtpq_u32_f32(float32x4_t __p0) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vcvtpq_u32_v((int8x16_t)__p0, 50); return __ret; } #else -__ai uint32x4_t vcvtpq_u32_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x4_t vcvtpq_u32_f32(float32x4_t __p0) { uint32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (uint32x4_t) __builtin_neon_vcvtpq_u32_v((int8x16_t)__rev0, 50); @@ -40480,13 +40392,13 @@ __ai uint32x4_t vcvtpq_u32_f32(float32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcvtp_u32_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vcvtp_u32_f32(float32x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t) __builtin_neon_vcvtp_u32_v((int8x8_t)__p0, 18); return __ret; } #else -__ai uint32x2_t vcvtp_u32_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vcvtp_u32_f32(float32x2_t __p0) { uint32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (uint32x2_t) __builtin_neon_vcvtp_u32_v((int8x8_t)__rev0, 18); @@ -40496,79 +40408,13 @@ __ai uint32x2_t vcvtp_u32_f32(float32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("aes"))) uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vaesdq_u8((int8x16_t)__p0, (int8x16_t)__p1, 48); - return __ret; -} -#else -__ai __attribute__((target("aes"))) uint8x16_t vaesdq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t) __builtin_neon_vaesdq_u8((int8x16_t)__rev0, (int8x16_t)__rev1, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("aes"))) uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vaeseq_u8((int8x16_t)__p0, (int8x16_t)__p1, 48); - return __ret; -} -#else -__ai __attribute__((target("aes"))) uint8x16_t vaeseq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t) __builtin_neon_vaeseq_u8((int8x16_t)__rev0, (int8x16_t)__rev1, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("aes"))) uint8x16_t vaesimcq_u8(uint8x16_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vaesimcq_u8((int8x16_t)__p0, 48); - return __ret; -} -#else -__ai __attribute__((target("aes"))) uint8x16_t vaesimcq_u8(uint8x16_t __p0) { - uint8x16_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t) __builtin_neon_vaesimcq_u8((int8x16_t)__rev0, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("aes"))) uint8x16_t vaesmcq_u8(uint8x16_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vaesmcq_u8((int8x16_t)__p0, 48); - return __ret; -} -#else -__ai __attribute__((target("aes"))) uint8x16_t vaesmcq_u8(uint8x16_t __p0) { - uint8x16_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t) __builtin_neon_vaesmcq_u8((int8x16_t)__rev0, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha2"))) uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vsha1cq_u32(__p0, __p1, __p2); return __ret; } #else -__ai __attribute__((target("sha2"))) uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha1cq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); @@ -40578,19 +40424,19 @@ __ai __attribute__((target("sha2"))) uint32x4_t vsha1cq_u32(uint32x4_t __p0, uin } #endif -__ai __attribute__((target("sha2"))) uint32_t vsha1h_u32(uint32_t __p0) { +__ai __attribute__((target("sha2,neon"))) uint32_t vsha1h_u32(uint32_t __p0) { uint32_t __ret; __ret = (uint32_t) __builtin_neon_vsha1h_u32(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha2"))) uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vsha1mq_u32(__p0, __p1, __p2); return __ret; } #else -__ai __attribute__((target("sha2"))) uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha1mq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); @@ -40601,13 +40447,13 @@ __ai __attribute__((target("sha2"))) uint32x4_t vsha1mq_u32(uint32x4_t __p0, uin #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha2"))) uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vsha1pq_u32(__p0, __p1, __p2); return __ret; } #else -__ai __attribute__((target("sha2"))) uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha1pq_u32(uint32x4_t __p0, uint32_t __p1, uint32x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); @@ -40618,13 +40464,13 @@ __ai __attribute__((target("sha2"))) uint32x4_t vsha1pq_u32(uint32x4_t __p0, uin #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha2"))) uint32x4_t vsha1su0q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha1su0q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vsha1su0q_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); return __ret; } #else -__ai __attribute__((target("sha2"))) uint32x4_t vsha1su0q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha1su0q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -40636,13 +40482,13 @@ __ai __attribute__((target("sha2"))) uint32x4_t vsha1su0q_u32(uint32x4_t __p0, u #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha2"))) uint32x4_t vsha1su1q_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha1su1q_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vsha1su1q_u32((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else -__ai __attribute__((target("sha2"))) uint32x4_t vsha1su1q_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha1su1q_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -40653,13 +40499,13 @@ __ai __attribute__((target("sha2"))) uint32x4_t vsha1su1q_u32(uint32x4_t __p0, u #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha2"))) uint32x4_t vsha256hq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha256hq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vsha256hq_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); return __ret; } #else -__ai __attribute__((target("sha2"))) uint32x4_t vsha256hq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha256hq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -40671,13 +40517,13 @@ __ai __attribute__((target("sha2"))) uint32x4_t vsha256hq_u32(uint32x4_t __p0, u #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha2"))) uint32x4_t vsha256h2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha256h2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vsha256h2q_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); return __ret; } #else -__ai __attribute__((target("sha2"))) uint32x4_t vsha256h2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha256h2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -40689,13 +40535,13 @@ __ai __attribute__((target("sha2"))) uint32x4_t vsha256h2q_u32(uint32x4_t __p0, #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha2"))) uint32x4_t vsha256su0q_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha256su0q_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vsha256su0q_u32((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else -__ai __attribute__((target("sha2"))) uint32x4_t vsha256su0q_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha256su0q_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -40706,13 +40552,13 @@ __ai __attribute__((target("sha2"))) uint32x4_t vsha256su0q_u32(uint32x4_t __p0, #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha2"))) uint32x4_t vsha256su1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha256su1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vsha256su1q_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); return __ret; } #else -__ai __attribute__((target("sha2"))) uint32x4_t vsha256su1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("sha2,neon"))) uint32x4_t vsha256su1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -40726,13 +40572,205 @@ __ai __attribute__((target("sha2"))) uint32x4_t vsha256su1q_u32(uint32x4_t __p0, #endif #if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_DIRECTED_ROUNDING) #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vrndq_f32(float32x4_t __p0) { +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrndq_f16((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndq_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vrndq_f16((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrnd_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrnd_f16((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrnd_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vrnd_f16((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndaq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrndaq_f16((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndaq_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vrndaq_f16((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrnda_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrnda_f16((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrnda_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vrnda_f16((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndmq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrndmq_f16((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndmq_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vrndmq_f16((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrndm_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrndm_f16((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrndm_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vrndm_f16((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndnq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrndnq_f16((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndnq_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vrndnq_f16((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrndn_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrndn_f16((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrndn_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vrndn_f16((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndpq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrndpq_f16((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndpq_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vrndpq_f16((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrndp_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrndp_f16((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrndp_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vrndp_f16((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndxq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrndxq_f16((int8x16_t)__p0, 40); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndxq_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vrndxq_f16((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrndx_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrndx_f16((int8x8_t)__p0, 8); + return __ret; +} +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrndx_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vrndx_f16((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vrndq_f32(float32x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 41); return __ret; } #else -__ai float32x4_t vrndq_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vrndq_f32(float32x4_t __p0) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 41); @@ -40742,13 +40780,13 @@ __ai float32x4_t vrndq_f32(float32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vrnd_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vrnd_f32(float32x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 9); return __ret; } #else -__ai float32x2_t vrnd_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vrnd_f32(float32x2_t __p0) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32x2_t) __builtin_neon_vrnd_v((int8x8_t)__rev0, 9); @@ -40758,13 +40796,13 @@ __ai float32x2_t vrnd_f32(float32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vrndaq_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vrndaq_f32(float32x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 41); return __ret; } #else -__ai float32x4_t vrndaq_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vrndaq_f32(float32x4_t __p0) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 41); @@ -40774,13 +40812,13 @@ __ai float32x4_t vrndaq_f32(float32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vrnda_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vrnda_f32(float32x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 9); return __ret; } #else -__ai float32x2_t vrnda_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vrnda_f32(float32x2_t __p0) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32x2_t) __builtin_neon_vrnda_v((int8x8_t)__rev0, 9); @@ -40790,13 +40828,13 @@ __ai float32x2_t vrnda_f32(float32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vrndiq_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vrndiq_f32(float32x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 41); return __ret; } #else -__ai float32x4_t vrndiq_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vrndiq_f32(float32x4_t __p0) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 41); @@ -40806,13 +40844,13 @@ __ai float32x4_t vrndiq_f32(float32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vrndi_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vrndi_f32(float32x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 9); return __ret; } #else -__ai float32x2_t vrndi_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vrndi_f32(float32x2_t __p0) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32x2_t) __builtin_neon_vrndi_v((int8x8_t)__rev0, 9); @@ -40822,13 +40860,13 @@ __ai float32x2_t vrndi_f32(float32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vrndmq_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vrndmq_f32(float32x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 41); return __ret; } #else -__ai float32x4_t vrndmq_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vrndmq_f32(float32x4_t __p0) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 41); @@ -40838,13 +40876,13 @@ __ai float32x4_t vrndmq_f32(float32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vrndm_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vrndm_f32(float32x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 9); return __ret; } #else -__ai float32x2_t vrndm_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vrndm_f32(float32x2_t __p0) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32x2_t) __builtin_neon_vrndm_v((int8x8_t)__rev0, 9); @@ -40854,13 +40892,13 @@ __ai float32x2_t vrndm_f32(float32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vrndnq_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vrndnq_f32(float32x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 41); return __ret; } #else -__ai float32x4_t vrndnq_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vrndnq_f32(float32x4_t __p0) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 41); @@ -40870,13 +40908,13 @@ __ai float32x4_t vrndnq_f32(float32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vrndn_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vrndn_f32(float32x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 9); return __ret; } #else -__ai float32x2_t vrndn_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vrndn_f32(float32x2_t __p0) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32x2_t) __builtin_neon_vrndn_v((int8x8_t)__rev0, 9); @@ -40885,19 +40923,19 @@ __ai float32x2_t vrndn_f32(float32x2_t __p0) { } #endif -__ai float32_t vrndns_f32(float32_t __p0) { +__ai __attribute__((target("neon"))) float32_t vrndns_f32(float32_t __p0) { float32_t __ret; __ret = (float32_t) __builtin_neon_vrndns_f32(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vrndpq_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vrndpq_f32(float32x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 41); return __ret; } #else -__ai float32x4_t vrndpq_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vrndpq_f32(float32x4_t __p0) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 41); @@ -40907,13 +40945,13 @@ __ai float32x4_t vrndpq_f32(float32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vrndp_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vrndp_f32(float32x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 9); return __ret; } #else -__ai float32x2_t vrndp_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vrndp_f32(float32x2_t __p0) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32x2_t) __builtin_neon_vrndp_v((int8x8_t)__rev0, 9); @@ -40923,13 +40961,13 @@ __ai float32x2_t vrndp_f32(float32x2_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vrndxq_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vrndxq_f32(float32x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 41); return __ret; } #else -__ai float32x4_t vrndxq_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vrndxq_f32(float32x4_t __p0) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 41); @@ -40939,13 +40977,13 @@ __ai float32x4_t vrndxq_f32(float32x4_t __p0) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vrndx_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vrndx_f32(float32x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 9); return __ret; } #else -__ai float32x2_t vrndx_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) float32x2_t vrndx_f32(float32x2_t __p0) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32x2_t) __builtin_neon_vrndx_v((int8x8_t)__rev0, 9); @@ -40954,259 +40992,135 @@ __ai float32x2_t vrndx_f32(float32x2_t __p0) { } #endif +#endif +#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_NUMERIC_MAXMIN) #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vrndq_f16(float16x8_t __p0) { +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrndq_f16((int8x16_t)__p0, 40); + __ret = (float16x8_t) __builtin_neon_vmaxnmq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x8_t vrndq_f16(float16x8_t __p0) { +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vrndq_f16((int8x16_t)__rev0, 40); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vmaxnmq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vrnd_f16(float16x4_t __p0) { +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vmaxnm_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrnd_f16((int8x8_t)__p0, 8); + __ret = (float16x4_t) __builtin_neon_vmaxnm_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x4_t vrnd_f16(float16x4_t __p0) { +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vmaxnm_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vrnd_f16((int8x8_t)__rev0, 8); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vmaxnm_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vrndaq_f16(float16x8_t __p0) { +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vminnmq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrndaq_f16((int8x16_t)__p0, 40); + __ret = (float16x8_t) __builtin_neon_vminnmq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x8_t vrndaq_f16(float16x8_t __p0) { +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vminnmq_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vrndaq_f16((int8x16_t)__rev0, 40); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vminnmq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vrnda_f16(float16x4_t __p0) { +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vminnm_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrnda_f16((int8x8_t)__p0, 8); + __ret = (float16x4_t) __builtin_neon_vminnm_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x4_t vrnda_f16(float16x4_t __p0) { +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vminnm_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vrnda_f16((int8x8_t)__rev0, 8); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vminnm_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vrndmq_f16(float16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrndmq_f16((int8x16_t)__p0, 40); +__ai __attribute__((target("neon"))) float32x4_t vmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x8_t vrndmq_f16(float16x8_t __p0) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vrndmq_f16((int8x16_t)__rev0, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) float32x4_t vmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vrndm_f16(float16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrndm_f16((int8x8_t)__p0, 8); +__ai __attribute__((target("neon"))) float32x2_t vmaxnm_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x4_t vrndm_f16(float16x4_t __p0) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vrndm_f16((int8x8_t)__rev0, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) float32x2_t vmaxnm_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vrndnq_f16(float16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrndnq_f16((int8x16_t)__p0, 40); +__ai __attribute__((target("neon"))) float32x4_t vminnmq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x8_t vrndnq_f16(float16x8_t __p0) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vrndnq_f16((int8x16_t)__rev0, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) float32x4_t vminnmq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vrndn_f16(float16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrndn_f16((int8x8_t)__p0, 8); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vrndn_f16(float16x4_t __p0) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vrndn_f16((int8x8_t)__rev0, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vrndpq_f16(float16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrndpq_f16((int8x16_t)__p0, 40); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x8_t vrndpq_f16(float16x8_t __p0) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vrndpq_f16((int8x16_t)__rev0, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vrndp_f16(float16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrndp_f16((int8x8_t)__p0, 8); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vrndp_f16(float16x4_t __p0) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vrndp_f16((int8x8_t)__rev0, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vrndxq_f16(float16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrndxq_f16((int8x16_t)__p0, 40); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x8_t vrndxq_f16(float16x8_t __p0) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vrndxq_f16((int8x16_t)__rev0, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vrndx_f16(float16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrndx_f16((int8x8_t)__p0, 8); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vrndx_f16(float16x4_t __p0) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vrndx_f16((int8x8_t)__rev0, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#endif -#if __ARM_ARCH >= 8 && defined(__ARM_FEATURE_NUMERIC_MAXMIN) -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); - return __ret; -} -#else -__ai float32x4_t vmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vmaxnm_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9); - return __ret; -} -#else -__ai float32x2_t vmaxnm_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (float32x2_t) __builtin_neon_vmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vminnmq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); - return __ret; -} -#else -__ai float32x4_t vminnmq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vminnm_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vminnm_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9); return __ret; } #else -__ai float32x2_t vminnm_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vminnm_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -41216,84 +41130,16 @@ __ai float32x2_t vminnm_f32(float32x2_t __p0, float32x2_t __p1) { } #endif -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vmaxnmq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x8_t vmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vmaxnmq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vmaxnm_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vmaxnm_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vmaxnm_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vmaxnm_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vminnmq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vminnmq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x8_t vminnmq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vminnmq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vminnm_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vminnm_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); - return __ret; -} -#else -__ai __attribute__((target("fullfp16"))) float16x4_t vminnm_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vminnm_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - #endif #if defined(__ARM_FEATURE_FMA) #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { +__ai __attribute__((target("neon"))) float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); return __ret; } #else -__ai float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { +__ai __attribute__((target("neon"))) float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -41302,7 +41148,7 @@ __ai float32x4_t vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai float32x4_t __noswap_vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { +__ai __attribute__((target("neon"))) float32x4_t __noswap_vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); return __ret; @@ -41310,13 +41156,13 @@ __ai float32x4_t __noswap_vfmaq_f32(float32x4_t __p0, float32x4_t __p1, float32x #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { +__ai __attribute__((target("neon"))) float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); return __ret; } #else -__ai float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { +__ai __attribute__((target("neon"))) float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -41325,7 +41171,7 @@ __ai float32x2_t vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai float32x2_t __noswap_vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { +__ai __attribute__((target("neon"))) float32x2_t __noswap_vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); return __ret; @@ -41333,13 +41179,13 @@ __ai float32x2_t __noswap_vfma_f32(float32x2_t __p0, float32x2_t __p1, float32x2 #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { +__ai __attribute__((target("neon"))) float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { float32x4_t __ret; __ret = vfmaq_f32(__p0, __p1, (float32x4_t) {__p2, __p2, __p2, __p2}); return __ret; } #else -__ai float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { +__ai __attribute__((target("neon"))) float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -41350,13 +41196,13 @@ __ai float32x4_t vfmaq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { +__ai __attribute__((target("neon"))) float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { float32x2_t __ret; __ret = vfma_f32(__p0, __p1, (float32x2_t) {__p2, __p2}); return __ret; } #else -__ai float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { +__ai __attribute__((target("neon"))) float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -41367,13 +41213,13 @@ __ai float32x2_t vfma_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { +__ai __attribute__((target("neon"))) float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { float32x4_t __ret; __ret = vfmaq_f32(__p0, -__p1, __p2); return __ret; } #else -__ai float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { +__ai __attribute__((target("neon"))) float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -41385,13 +41231,13 @@ __ai float32x4_t vfmsq_f32(float32x4_t __p0, float32x4_t __p1, float32x4_t __p2) #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { +__ai __attribute__((target("neon"))) float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { float32x2_t __ret; __ret = vfma_f32(__p0, -__p1, __p2); return __ret; } #else -__ai float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { +__ai __attribute__((target("neon"))) float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -41403,20405 +41249,20589 @@ __ai float32x2_t vfms_f32(float32x2_t __p0, float32x2_t __p1, float32x2_t __p2) #endif #endif -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(__arm64ec__) +__ai __attribute__((target("aes,neon"))) poly128_t vmull_p64(poly64_t __p0, poly64_t __p1) { + poly128_t __ret; + __ret = (poly128_t) __builtin_neon_vmull_p64(__p0, __p1); + return __ret; +} #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t __a64_vcvtq_low_bf16_f32(float32x4_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_f32((int8x16_t)__p0, 43); return __ret; } #else -__ai float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (float64x2_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t __a64_vcvtq_low_bf16_f32(float32x4_t __p0) { + bfloat16x8_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_f32((int8x16_t)__rev0, 43); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -#endif - -__ai float64x1_t vabd_f64(float64x1_t __p0, float64x1_t __p1) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 10); +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t __noswap___a64_vcvtq_low_bf16_f32(float32x4_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_f32((int8x16_t)__p0, 43); return __ret; } -__ai float64_t vabdd_f64(float64_t __p0, float64_t __p1) { - float64_t __ret; - __ret = (float64_t) __builtin_neon_vabdd_f64(__p0, __p1); +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_lane_bf16(__p0_230, __p1_230, __p2_230, __p3_230) __extension__ ({ \ + bfloat16x8_t __ret_230; \ + bfloat16x8_t __s0_230 = __p0_230; \ + bfloat16x4_t __s2_230 = __p2_230; \ + __ret_230 = vsetq_lane_bf16(vget_lane_bf16(__s2_230, __p3_230), __s0_230, __p1_230); \ + __ret_230; \ +}) +#else +#define vcopyq_lane_bf16(__p0_231, __p1_231, __p2_231, __p3_231) __extension__ ({ \ + bfloat16x8_t __ret_231; \ + bfloat16x8_t __s0_231 = __p0_231; \ + bfloat16x4_t __s2_231 = __p2_231; \ + bfloat16x8_t __rev0_231; __rev0_231 = __builtin_shufflevector(__s0_231, __s0_231, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x4_t __rev2_231; __rev2_231 = __builtin_shufflevector(__s2_231, __s2_231, 3, 2, 1, 0); \ + __ret_231 = __noswap_vsetq_lane_bf16(__noswap_vget_lane_bf16(__rev2_231, __p3_231), __rev0_231, __p1_231); \ + __ret_231 = __builtin_shufflevector(__ret_231, __ret_231, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_231; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_lane_bf16(__p0_232, __p1_232, __p2_232, __p3_232) __extension__ ({ \ + bfloat16x4_t __ret_232; \ + bfloat16x4_t __s0_232 = __p0_232; \ + bfloat16x4_t __s2_232 = __p2_232; \ + __ret_232 = vset_lane_bf16(vget_lane_bf16(__s2_232, __p3_232), __s0_232, __p1_232); \ + __ret_232; \ +}) +#else +#define vcopy_lane_bf16(__p0_233, __p1_233, __p2_233, __p3_233) __extension__ ({ \ + bfloat16x4_t __ret_233; \ + bfloat16x4_t __s0_233 = __p0_233; \ + bfloat16x4_t __s2_233 = __p2_233; \ + bfloat16x4_t __rev0_233; __rev0_233 = __builtin_shufflevector(__s0_233, __s0_233, 3, 2, 1, 0); \ + bfloat16x4_t __rev2_233; __rev2_233 = __builtin_shufflevector(__s2_233, __s2_233, 3, 2, 1, 0); \ + __ret_233 = __noswap_vset_lane_bf16(__noswap_vget_lane_bf16(__rev2_233, __p3_233), __rev0_233, __p1_233); \ + __ret_233 = __builtin_shufflevector(__ret_233, __ret_233, 3, 2, 1, 0); \ + __ret_233; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopyq_laneq_bf16(__p0_234, __p1_234, __p2_234, __p3_234) __extension__ ({ \ + bfloat16x8_t __ret_234; \ + bfloat16x8_t __s0_234 = __p0_234; \ + bfloat16x8_t __s2_234 = __p2_234; \ + __ret_234 = vsetq_lane_bf16(vgetq_lane_bf16(__s2_234, __p3_234), __s0_234, __p1_234); \ + __ret_234; \ +}) +#else +#define vcopyq_laneq_bf16(__p0_235, __p1_235, __p2_235, __p3_235) __extension__ ({ \ + bfloat16x8_t __ret_235; \ + bfloat16x8_t __s0_235 = __p0_235; \ + bfloat16x8_t __s2_235 = __p2_235; \ + bfloat16x8_t __rev0_235; __rev0_235 = __builtin_shufflevector(__s0_235, __s0_235, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x8_t __rev2_235; __rev2_235 = __builtin_shufflevector(__s2_235, __s2_235, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_235 = __noswap_vsetq_lane_bf16(__noswap_vgetq_lane_bf16(__rev2_235, __p3_235), __rev0_235, __p1_235); \ + __ret_235 = __builtin_shufflevector(__ret_235, __ret_235, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_235; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vcopy_laneq_bf16(__p0_236, __p1_236, __p2_236, __p3_236) __extension__ ({ \ + bfloat16x4_t __ret_236; \ + bfloat16x4_t __s0_236 = __p0_236; \ + bfloat16x8_t __s2_236 = __p2_236; \ + __ret_236 = vset_lane_bf16(vgetq_lane_bf16(__s2_236, __p3_236), __s0_236, __p1_236); \ + __ret_236; \ +}) +#else +#define vcopy_laneq_bf16(__p0_237, __p1_237, __p2_237, __p3_237) __extension__ ({ \ + bfloat16x4_t __ret_237; \ + bfloat16x4_t __s0_237 = __p0_237; \ + bfloat16x8_t __s2_237 = __p2_237; \ + bfloat16x4_t __rev0_237; __rev0_237 = __builtin_shufflevector(__s0_237, __s0_237, 3, 2, 1, 0); \ + bfloat16x8_t __rev2_237; __rev2_237 = __builtin_shufflevector(__s2_237, __s2_237, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_237 = __noswap_vset_lane_bf16(__noswap_vgetq_lane_bf16(__rev2_237, __p3_237), __rev0_237, __p1_237); \ + __ret_237 = __builtin_shufflevector(__ret_237, __ret_237, 3, 2, 1, 0); \ + __ret_237; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) { + bfloat16x4_t __ret; + __ret = vget_low_bf16(__a64_vcvtq_low_bf16_f32(__p0)); return __ret; } -__ai float32_t vabds_f32(float32_t __p0, float32_t __p1) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vabds_f32(__p0, __p1); +#else +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) { + bfloat16x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap_vget_low_bf16(__noswap___a64_vcvtq_low_bf16_f32(__rev0)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } +#endif + #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vabsq_f64(float64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 42); +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t) __builtin_neon_vcvtq_high_bf16_f32((int8x16_t)__p0, (int8x16_t)__p1, 43); return __ret; } #else -__ai float64x2_t vabsq_f64(float64x2_t __p0) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float64x2_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) { + bfloat16x8_t __ret; + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (bfloat16x8_t) __builtin_neon_vcvtq_high_bf16_f32((int8x16_t)__rev0, (int8x16_t)__rev1, 43); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vabsq_s64(int64x2_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 35); +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) { + bfloat16x8_t __ret; + __ret = __a64_vcvtq_low_bf16_f32(__p0); return __ret; } #else -__ai int64x2_t vabsq_s64(int64x2_t __p0) { - int64x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (int64x2_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) { + bfloat16x8_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap___a64_vcvtq_low_bf16_f32(__rev0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif -__ai float64x1_t vabs_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 10); +__ai __attribute__((target("bf16,neon"))) poly8x8_t vreinterpret_p8_bf16(bfloat16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); return __ret; } -__ai int64x1_t vabs_s64(int64x1_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 3); +__ai __attribute__((target("bf16,neon"))) poly64x1_t vreinterpret_p64_bf16(bfloat16x4_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); return __ret; } -__ai int64_t vabsd_s64(int64_t __p0) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vabsd_s64(__p0); +__ai __attribute__((target("bf16,neon"))) poly16x4_t vreinterpret_p16_bf16(bfloat16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); return __ret; } -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = __p0 + __p1; +__ai __attribute__((target("bf16,neon"))) poly8x16_t vreinterpretq_p8_bf16(bfloat16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); return __ret; } -#else -__ai float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __rev0 + __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("bf16,neon"))) poly128_t vreinterpretq_p128_bf16(bfloat16x8_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); return __ret; } -#endif - -__ai float64x1_t vadd_f64(float64x1_t __p0, float64x1_t __p1) { - float64x1_t __ret; - __ret = __p0 + __p1; +__ai __attribute__((target("bf16,neon"))) poly64x2_t vreinterpretq_p64_bf16(bfloat16x8_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); return __ret; } -__ai uint64_t vaddd_u64(uint64_t __p0, uint64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vaddd_u64(__p0, __p1); +__ai __attribute__((target("bf16,neon"))) poly16x8_t vreinterpretq_p16_bf16(bfloat16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); return __ret; } -__ai int64_t vaddd_s64(int64_t __p0, int64_t __p1) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vaddd_s64(__p0, __p1); +__ai __attribute__((target("bf16,neon"))) uint8x16_t vreinterpretq_u8_bf16(bfloat16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); return __ret; } -__ai poly128_t vaddq_p128(poly128_t __p0, poly128_t __p1) { - poly128_t __ret; - __ret = (poly128_t) __builtin_neon_vaddq_p128(__p0, __p1); +__ai __attribute__((target("bf16,neon"))) uint32x4_t vreinterpretq_u32_bf16(bfloat16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); return __ret; } -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint16x8_t __ret; - __ret = vcombine_u16(__p0, vaddhn_u32(__p1, __p2)); +__ai __attribute__((target("bf16,neon"))) uint64x2_t vreinterpretq_u64_bf16(bfloat16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); return __ret; } -#else -__ai uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("bf16,neon"))) uint16x8_t vreinterpretq_u16_bf16(bfloat16x8_t __p0) { uint16x8_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = __noswap_vcombine_u16(__rev0, __noswap_vaddhn_u32(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { - uint32x4_t __ret; - __ret = vcombine_u32(__p0, vaddhn_u64(__p1, __p2)); +__ai __attribute__((target("bf16,neon"))) int8x16_t vreinterpretq_s8_bf16(bfloat16x8_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t)(__p0); return __ret; } -#else -__ai uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { - uint32x4_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = __noswap_vcombine_u32(__rev0, __noswap_vaddhn_u64(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("bf16,neon"))) float64x2_t vreinterpretq_f64_bf16(bfloat16x8_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { - uint8x16_t __ret; - __ret = vcombine_u8(__p0, vaddhn_u16(__p1, __p2)); +__ai __attribute__((target("bf16,neon"))) float32x4_t vreinterpretq_f32_bf16(bfloat16x8_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t)(__p0); return __ret; } -#else -__ai uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { - uint8x16_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __noswap_vcombine_u8(__rev0, __noswap_vaddhn_u16(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("bf16,neon"))) float16x8_t vreinterpretq_f16_bf16(bfloat16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int16x8_t __ret; - __ret = vcombine_s16(__p0, vaddhn_s32(__p1, __p2)); +__ai __attribute__((target("bf16,neon"))) int32x4_t vreinterpretq_s32_bf16(bfloat16x8_t __p0) { + int32x4_t __ret; + __ret = (int32x4_t)(__p0); return __ret; } -#else -__ai int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int16x8_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = __noswap_vcombine_s16(__rev0, __noswap_vaddhn_s32(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("bf16,neon"))) int64x2_t vreinterpretq_s64_bf16(bfloat16x8_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { - int32x4_t __ret; - __ret = vcombine_s32(__p0, vaddhn_s64(__p1, __p2)); +__ai __attribute__((target("bf16,neon"))) int16x8_t vreinterpretq_s16_bf16(bfloat16x8_t __p0) { + int16x8_t __ret; + __ret = (int16x8_t)(__p0); return __ret; } -#else -__ai int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { - int32x4_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = __noswap_vcombine_s32(__rev0, __noswap_vaddhn_s64(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("bf16,neon"))) uint8x8_t vreinterpret_u8_bf16(bfloat16x4_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int8x16_t __ret; - __ret = vcombine_s8(__p0, vaddhn_s16(__p1, __p2)); +__ai __attribute__((target("bf16,neon"))) uint32x2_t vreinterpret_u32_bf16(bfloat16x4_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t)(__p0); return __ret; } -#else -__ai int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int8x16_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __noswap_vcombine_s8(__rev0, __noswap_vaddhn_s16(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("bf16,neon"))) uint64x1_t vreinterpret_u64_bf16(bfloat16x4_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16_t vaddlvq_u8(uint8x16_t __p0) { - uint16_t __ret; - __ret = (uint16_t) __builtin_neon_vaddlvq_u8(__p0); +__ai __attribute__((target("bf16,neon"))) uint16x4_t vreinterpret_u16_bf16(bfloat16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t)(__p0); return __ret; } -#else -__ai uint16_t vaddlvq_u8(uint8x16_t __p0) { - uint16_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16_t) __builtin_neon_vaddlvq_u8(__rev0); +__ai __attribute__((target("bf16,neon"))) int8x8_t vreinterpret_s8_bf16(bfloat16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64_t vaddlvq_u32(uint32x4_t __p0) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vaddlvq_u32(__p0); +__ai __attribute__((target("bf16,neon"))) float64x1_t vreinterpret_f64_bf16(bfloat16x4_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); return __ret; } -#else -__ai uint64_t vaddlvq_u32(uint32x4_t __p0) { - uint64_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint64_t) __builtin_neon_vaddlvq_u32(__rev0); +__ai __attribute__((target("bf16,neon"))) float32x2_t vreinterpret_f32_bf16(bfloat16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vaddlvq_u16(uint16x8_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vaddlvq_u16(__p0); +__ai __attribute__((target("bf16,neon"))) float16x4_t vreinterpret_f16_bf16(bfloat16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); return __ret; } -#else -__ai uint32_t vaddlvq_u16(uint16x8_t __p0) { - uint32_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint32_t) __builtin_neon_vaddlvq_u16(__rev0); +__ai __attribute__((target("bf16,neon"))) int32x2_t vreinterpret_s32_bf16(bfloat16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16_t vaddlvq_s8(int8x16_t __p0) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vaddlvq_s8(__p0); +__ai __attribute__((target("bf16,neon"))) int64x1_t vreinterpret_s64_bf16(bfloat16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); return __ret; } -#else -__ai int16_t vaddlvq_s8(int8x16_t __p0) { - int16_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16_t) __builtin_neon_vaddlvq_s8(__rev0); +__ai __attribute__((target("bf16,neon"))) int16x4_t vreinterpret_s16_bf16(bfloat16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64_t vaddlvq_s32(int32x4_t __p0) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vaddlvq_s32(__p0); +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_p8(poly8x16_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); return __ret; } -#else -__ai int64_t vaddlvq_s32(int32x4_t __p0) { - int64_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (int64_t) __builtin_neon_vaddlvq_s32(__rev0); +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_p128(poly128_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32_t vaddlvq_s16(int16x8_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vaddlvq_s16(__p0); +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_p64(poly64x2_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); return __ret; } -#else -__ai int32_t vaddlvq_s16(int16x8_t __p0) { - int32_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int32_t) __builtin_neon_vaddlvq_s16(__rev0); +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_p16(poly16x8_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16_t vaddlv_u8(uint8x8_t __p0) { - uint16_t __ret; - __ret = (uint16_t) __builtin_neon_vaddlv_u8(__p0); +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_u8(uint8x16_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); return __ret; } -#else -__ai uint16_t vaddlv_u8(uint8x8_t __p0) { - uint16_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16_t) __builtin_neon_vaddlv_u8(__rev0); +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_u32(uint32x4_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64_t vaddlv_u32(uint32x2_t __p0) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vaddlv_u32(__p0); +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_u64(uint64x2_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); return __ret; } -#else -__ai uint64_t vaddlv_u32(uint32x2_t __p0) { - uint64_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint64_t) __builtin_neon_vaddlv_u32(__rev0); +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_u16(uint16x8_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vaddlv_u16(uint16x4_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vaddlv_u16(__p0); +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_s8(int8x16_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); return __ret; } -#else -__ai uint32_t vaddlv_u16(uint16x4_t __p0) { - uint32_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint32_t) __builtin_neon_vaddlv_u16(__rev0); +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_f64(float64x2_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16_t vaddlv_s8(int8x8_t __p0) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vaddlv_s8(__p0); +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_f32(float32x4_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); return __ret; } -#else -__ai int16_t vaddlv_s8(int8x8_t __p0) { - int16_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16_t) __builtin_neon_vaddlv_s8(__rev0); +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_f16(float16x8_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64_t vaddlv_s32(int32x2_t __p0) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vaddlv_s32(__p0); +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_s32(int32x4_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); return __ret; } -#else -__ai int64_t vaddlv_s32(int32x2_t __p0) { - int64_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (int64_t) __builtin_neon_vaddlv_s32(__rev0); +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_s64(int64x2_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32_t vaddlv_s16(int16x4_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vaddlv_s16(__p0); +__ai __attribute__((target("bf16,neon"))) bfloat16x8_t vreinterpretq_bf16_s16(int16x8_t __p0) { + bfloat16x8_t __ret; + __ret = (bfloat16x8_t)(__p0); return __ret; } -#else -__ai int32_t vaddlv_s16(int16x4_t __p0) { - int32_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (int32_t) __builtin_neon_vaddlv_s16(__rev0); +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_p8(poly8x8_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8_t vaddvq_u8(uint8x16_t __p0) { - uint8_t __ret; - __ret = (uint8_t) __builtin_neon_vaddvq_u8(__p0); +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_p64(poly64x1_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); return __ret; } -#else -__ai uint8_t vaddvq_u8(uint8x16_t __p0) { - uint8_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8_t) __builtin_neon_vaddvq_u8(__rev0); +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_p16(poly16x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vaddvq_u32(uint32x4_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vaddvq_u32(__p0); +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_u8(uint8x8_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); return __ret; } -#else -__ai uint32_t vaddvq_u32(uint32x4_t __p0) { - uint32_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint32_t) __builtin_neon_vaddvq_u32(__rev0); +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_u32(uint32x2_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64_t vaddvq_u64(uint64x2_t __p0) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vaddvq_u64(__p0); +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_u64(uint64x1_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); return __ret; } -#else -__ai uint64_t vaddvq_u64(uint64x2_t __p0) { - uint64_t __ret; - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint64_t) __builtin_neon_vaddvq_u64(__rev0); +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_u16(uint16x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16_t vaddvq_u16(uint16x8_t __p0) { - uint16_t __ret; - __ret = (uint16_t) __builtin_neon_vaddvq_u16(__p0); +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_s8(int8x8_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); return __ret; } -#else -__ai uint16_t vaddvq_u16(uint16x8_t __p0) { - uint16_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16_t) __builtin_neon_vaddvq_u16(__rev0); +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_f64(float64x1_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8_t vaddvq_s8(int8x16_t __p0) { - int8_t __ret; - __ret = (int8_t) __builtin_neon_vaddvq_s8(__p0); +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_f32(float32x2_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); return __ret; } -#else -__ai int8_t vaddvq_s8(int8x16_t __p0) { - int8_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8_t) __builtin_neon_vaddvq_s8(__rev0); +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_f16(float16x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64_t vaddvq_f64(float64x2_t __p0) { - float64_t __ret; - __ret = (float64_t) __builtin_neon_vaddvq_f64(__p0); +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_s32(int32x2_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); return __ret; } -#else -__ai float64_t vaddvq_f64(float64x2_t __p0) { - float64_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float64_t) __builtin_neon_vaddvq_f64(__rev0); +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_s64(int64x1_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32_t vaddvq_f32(float32x4_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vaddvq_f32(__p0); +__ai __attribute__((target("bf16,neon"))) bfloat16x4_t vreinterpret_bf16_s16(int16x4_t __p0) { + bfloat16x4_t __ret; + __ret = (bfloat16x4_t)(__p0); return __ret; } +#ifdef __LITTLE_ENDIAN__ +#define vdotq_laneq_u32(__p0_238, __p1_238, __p2_238, __p3_238) __extension__ ({ \ + uint32x4_t __ret_238; \ + uint32x4_t __s0_238 = __p0_238; \ + uint8x16_t __s1_238 = __p1_238; \ + uint8x16_t __s2_238 = __p2_238; \ +uint8x16_t __reint_238 = __s2_238; \ +uint32x4_t __reint1_238 = splatq_laneq_u32(*(uint32x4_t *) &__reint_238, __p3_238); \ + __ret_238 = vdotq_u32(__s0_238, __s1_238, *(uint8x16_t *) &__reint1_238); \ + __ret_238; \ +}) #else -__ai float32_t vaddvq_f32(float32x4_t __p0) { - float32_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float32_t) __builtin_neon_vaddvq_f32(__rev0); - return __ret; -} +#define vdotq_laneq_u32(__p0_239, __p1_239, __p2_239, __p3_239) __extension__ ({ \ + uint32x4_t __ret_239; \ + uint32x4_t __s0_239 = __p0_239; \ + uint8x16_t __s1_239 = __p1_239; \ + uint8x16_t __s2_239 = __p2_239; \ + uint32x4_t __rev0_239; __rev0_239 = __builtin_shufflevector(__s0_239, __s0_239, 3, 2, 1, 0); \ + uint8x16_t __rev1_239; __rev1_239 = __builtin_shufflevector(__s1_239, __s1_239, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev2_239; __rev2_239 = __builtin_shufflevector(__s2_239, __s2_239, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +uint8x16_t __reint_239 = __rev2_239; \ +uint32x4_t __reint1_239 = __noswap_splatq_laneq_u32(*(uint32x4_t *) &__reint_239, __p3_239); \ + __ret_239 = __noswap_vdotq_u32(__rev0_239, __rev1_239, *(uint8x16_t *) &__reint1_239); \ + __ret_239 = __builtin_shufflevector(__ret_239, __ret_239, 3, 2, 1, 0); \ + __ret_239; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32_t vaddvq_s32(int32x4_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vaddvq_s32(__p0); - return __ret; -} +#define vdotq_laneq_s32(__p0_240, __p1_240, __p2_240, __p3_240) __extension__ ({ \ + int32x4_t __ret_240; \ + int32x4_t __s0_240 = __p0_240; \ + int8x16_t __s1_240 = __p1_240; \ + int8x16_t __s2_240 = __p2_240; \ +int8x16_t __reint_240 = __s2_240; \ +int32x4_t __reint1_240 = splatq_laneq_s32(*(int32x4_t *) &__reint_240, __p3_240); \ + __ret_240 = vdotq_s32(__s0_240, __s1_240, *(int8x16_t *) &__reint1_240); \ + __ret_240; \ +}) #else -__ai int32_t vaddvq_s32(int32x4_t __p0) { - int32_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (int32_t) __builtin_neon_vaddvq_s32(__rev0); - return __ret; -} +#define vdotq_laneq_s32(__p0_241, __p1_241, __p2_241, __p3_241) __extension__ ({ \ + int32x4_t __ret_241; \ + int32x4_t __s0_241 = __p0_241; \ + int8x16_t __s1_241 = __p1_241; \ + int8x16_t __s2_241 = __p2_241; \ + int32x4_t __rev0_241; __rev0_241 = __builtin_shufflevector(__s0_241, __s0_241, 3, 2, 1, 0); \ + int8x16_t __rev1_241; __rev1_241 = __builtin_shufflevector(__s1_241, __s1_241, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev2_241; __rev2_241 = __builtin_shufflevector(__s2_241, __s2_241, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +int8x16_t __reint_241 = __rev2_241; \ +int32x4_t __reint1_241 = __noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_241, __p3_241); \ + __ret_241 = __noswap_vdotq_s32(__rev0_241, __rev1_241, *(int8x16_t *) &__reint1_241); \ + __ret_241 = __builtin_shufflevector(__ret_241, __ret_241, 3, 2, 1, 0); \ + __ret_241; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int64_t vaddvq_s64(int64x2_t __p0) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vaddvq_s64(__p0); - return __ret; -} +#define vdot_laneq_u32(__p0_242, __p1_242, __p2_242, __p3_242) __extension__ ({ \ + uint32x2_t __ret_242; \ + uint32x2_t __s0_242 = __p0_242; \ + uint8x8_t __s1_242 = __p1_242; \ + uint8x16_t __s2_242 = __p2_242; \ +uint8x16_t __reint_242 = __s2_242; \ +uint32x2_t __reint1_242 = splat_laneq_u32(*(uint32x4_t *) &__reint_242, __p3_242); \ + __ret_242 = vdot_u32(__s0_242, __s1_242, *(uint8x8_t *) &__reint1_242); \ + __ret_242; \ +}) #else -__ai int64_t vaddvq_s64(int64x2_t __p0) { - int64_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (int64_t) __builtin_neon_vaddvq_s64(__rev0); - return __ret; -} +#define vdot_laneq_u32(__p0_243, __p1_243, __p2_243, __p3_243) __extension__ ({ \ + uint32x2_t __ret_243; \ + uint32x2_t __s0_243 = __p0_243; \ + uint8x8_t __s1_243 = __p1_243; \ + uint8x16_t __s2_243 = __p2_243; \ + uint32x2_t __rev0_243; __rev0_243 = __builtin_shufflevector(__s0_243, __s0_243, 1, 0); \ + uint8x8_t __rev1_243; __rev1_243 = __builtin_shufflevector(__s1_243, __s1_243, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev2_243; __rev2_243 = __builtin_shufflevector(__s2_243, __s2_243, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +uint8x16_t __reint_243 = __rev2_243; \ +uint32x2_t __reint1_243 = __noswap_splat_laneq_u32(*(uint32x4_t *) &__reint_243, __p3_243); \ + __ret_243 = __noswap_vdot_u32(__rev0_243, __rev1_243, *(uint8x8_t *) &__reint1_243); \ + __ret_243 = __builtin_shufflevector(__ret_243, __ret_243, 1, 0); \ + __ret_243; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int16_t vaddvq_s16(int16x8_t __p0) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vaddvq_s16(__p0); - return __ret; -} +#define vdot_laneq_s32(__p0_244, __p1_244, __p2_244, __p3_244) __extension__ ({ \ + int32x2_t __ret_244; \ + int32x2_t __s0_244 = __p0_244; \ + int8x8_t __s1_244 = __p1_244; \ + int8x16_t __s2_244 = __p2_244; \ +int8x16_t __reint_244 = __s2_244; \ +int32x2_t __reint1_244 = splat_laneq_s32(*(int32x4_t *) &__reint_244, __p3_244); \ + __ret_244 = vdot_s32(__s0_244, __s1_244, *(int8x8_t *) &__reint1_244); \ + __ret_244; \ +}) #else -__ai int16_t vaddvq_s16(int16x8_t __p0) { - int16_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16_t) __builtin_neon_vaddvq_s16(__rev0); - return __ret; -} +#define vdot_laneq_s32(__p0_245, __p1_245, __p2_245, __p3_245) __extension__ ({ \ + int32x2_t __ret_245; \ + int32x2_t __s0_245 = __p0_245; \ + int8x8_t __s1_245 = __p1_245; \ + int8x16_t __s2_245 = __p2_245; \ + int32x2_t __rev0_245; __rev0_245 = __builtin_shufflevector(__s0_245, __s0_245, 1, 0); \ + int8x8_t __rev1_245; __rev1_245 = __builtin_shufflevector(__s1_245, __s1_245, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev2_245; __rev2_245 = __builtin_shufflevector(__s2_245, __s2_245, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +int8x16_t __reint_245 = __rev2_245; \ +int32x2_t __reint1_245 = __noswap_splat_laneq_s32(*(int32x4_t *) &__reint_245, __p3_245); \ + __ret_245 = __noswap_vdot_s32(__rev0_245, __rev1_245, *(int8x8_t *) &__reint1_245); \ + __ret_245 = __builtin_shufflevector(__ret_245, __ret_245, 1, 0); \ + __ret_245; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8_t vaddv_u8(uint8x8_t __p0) { - uint8_t __ret; - __ret = (uint8_t) __builtin_neon_vaddv_u8(__p0); +__ai __attribute__((target("fp16fml,neon"))) float32x4_t vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlalq_high_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); return __ret; } #else -__ai uint8_t vaddv_u8(uint8x8_t __p0) { - uint8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8_t) __builtin_neon_vaddv_u8(__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32_t vaddv_u32(uint32x2_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vaddv_u32(__p0); +__ai __attribute__((target("fp16fml,neon"))) float32x4_t vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vfmlalq_high_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -#else -__ai uint32_t vaddv_u32(uint32x2_t __p0) { - uint32_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint32_t) __builtin_neon_vaddv_u32(__rev0); +__ai __attribute__((target("fp16fml,neon"))) float32x4_t __noswap_vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlalq_high_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16_t vaddv_u16(uint16x4_t __p0) { - uint16_t __ret; - __ret = (uint16_t) __builtin_neon_vaddv_u16(__p0); +__ai __attribute__((target("fp16fml,neon"))) float32x2_t vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlal_high_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); return __ret; } #else -__ai uint16_t vaddv_u16(uint16x4_t __p0) { - uint16_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint16_t) __builtin_neon_vaddv_u16(__rev0); +__ai __attribute__((target("fp16fml,neon"))) float32x2_t vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vfmlal_high_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("fp16fml,neon"))) float32x2_t __noswap_vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlal_high_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8_t vaddv_s8(int8x8_t __p0) { - int8_t __ret; - __ret = (int8_t) __builtin_neon_vaddv_s8(__p0); +__ai __attribute__((target("fp16fml,neon"))) float32x4_t vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlalq_low_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); return __ret; } #else -__ai int8_t vaddv_s8(int8x8_t __p0) { - int8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8_t) __builtin_neon_vaddv_s8(__rev0); +__ai __attribute__((target("fp16fml,neon"))) float32x4_t vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vfmlalq_low_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("fp16fml,neon"))) float32x4_t __noswap_vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlalq_low_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float32_t vaddv_f32(float32x2_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vaddv_f32(__p0); +__ai __attribute__((target("fp16fml,neon"))) float32x2_t vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlal_low_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); return __ret; } #else -__ai float32_t vaddv_f32(float32x2_t __p0) { - float32_t __ret; +__ai __attribute__((target("fp16fml,neon"))) float32x2_t vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float32_t) __builtin_neon_vaddv_f32(__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32_t vaddv_s32(int32x2_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vaddv_s32(__p0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vfmlal_low_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -#else -__ai int32_t vaddv_s32(int32x2_t __p0) { - int32_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (int32_t) __builtin_neon_vaddv_s32(__rev0); +__ai __attribute__((target("fp16fml,neon"))) float32x2_t __noswap_vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlal_low_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16_t vaddv_s16(int16x4_t __p0) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vaddv_s16(__p0); +__ai __attribute__((target("fp16fml,neon"))) float32x4_t vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlslq_high_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); return __ret; } #else -__ai int16_t vaddv_s16(int16x4_t __p0) { - int16_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (int16_t) __builtin_neon_vaddv_s16(__rev0); - return __ret; -} -#endif - -__ai poly64x1_t vbsl_p64(uint64x1_t __p0, poly64x1_t __p1, poly64x1_t __p2) { - poly64x1_t __ret; - __ret = (poly64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 6); - return __ret; -} -#ifdef __LITTLE_ENDIAN__ -__ai poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) { - poly64x2_t __ret; - __ret = (poly64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 38); +__ai __attribute__((target("fp16fml,neon"))) float32x4_t vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vfmlslq_high_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -#else -__ai poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) { - poly64x2_t __ret; - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - poly64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = (poly64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 38); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("fp16fml,neon"))) float32x4_t __noswap_vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlslq_high_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); +__ai __attribute__((target("fp16fml,neon"))) float32x2_t vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlsl_high_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); return __ret; } #else -__ai float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { - float64x2_t __ret; - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = (float64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42); +__ai __attribute__((target("fp16fml,neon"))) float32x2_t vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vfmlsl_high_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -#endif - -__ai float64x1_t vbsl_f64(uint64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); - return __ret; -} -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); - return __ret; -} -#else -__ai uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) { - uint64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint64x2_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("fp16fml,neon"))) float32x2_t __noswap_vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlsl_high_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); return __ret; } #endif -__ai uint64x1_t vcage_f64(float64x1_t __p0, float64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 19); - return __ret; -} -__ai uint64_t vcaged_f64(float64_t __p0, float64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcaged_f64(__p0, __p1); - return __ret; -} -__ai uint32_t vcages_f32(float32_t __p0, float32_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcages_f32(__p0, __p1); - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); +__ai __attribute__((target("fp16fml,neon"))) float32x4_t vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlslq_low_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); return __ret; } #else -__ai uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) { - uint64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint64x2_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -__ai uint64x1_t vcagt_f64(float64x1_t __p0, float64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 19); - return __ret; -} -__ai uint64_t vcagtd_f64(float64_t __p0, float64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcagtd_f64(__p0, __p1); - return __ret; -} -__ai uint32_t vcagts_f32(float32_t __p0, float32_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcagts_f32(__p0, __p1); - return __ret; -} -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); +__ai __attribute__((target("fp16fml,neon"))) float32x4_t vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vfmlslq_low_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -#else -__ai uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) { - uint64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint64x2_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("fp16fml,neon"))) float32x4_t __noswap_vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vfmlslq_low_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); return __ret; } #endif -__ai uint64x1_t vcale_f64(float64x1_t __p0, float64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 19); - return __ret; -} -__ai uint64_t vcaled_f64(float64_t __p0, float64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcaled_f64(__p0, __p1); - return __ret; -} -__ai uint32_t vcales_f32(float32_t __p0, float32_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcales_f32(__p0, __p1); - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); +__ai __attribute__((target("fp16fml,neon"))) float32x2_t vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlsl_low_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); return __ret; } #else -__ai uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) { - uint64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint64x2_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); +__ai __attribute__((target("fp16fml,neon"))) float32x2_t vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = (float32x2_t) __builtin_neon_vfmlsl_low_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -#endif - -__ai uint64x1_t vcalt_f64(float64x1_t __p0, float64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 19); - return __ret; -} -__ai uint64_t vcaltd_f64(float64_t __p0, float64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcaltd_f64(__p0, __p1); - return __ret; -} -__ai uint32_t vcalts_f32(float32_t __p0, float32_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcalts_f32(__p0, __p1); - return __ret; -} -__ai uint64x1_t vceq_p64(poly64x1_t __p0, poly64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0 == __p1); +__ai __attribute__((target("fp16fml,neon"))) float32x2_t __noswap_vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vfmlsl_low_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); return __ret; } +#endif + #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0 == __p1); +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vdivq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __p0 / __p1; return __ret; } #else -__ai uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) { - uint64x2_t __ret; - poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint64x2_t)(__rev0 == __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vdivq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 / __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0 == __p1); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vdiv_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __p0 / __p1; return __ret; } #else -__ai uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint64x2_t)(__rev0 == __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vdiv_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 / __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0 == __p1); - return __ret; -} +#define vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__s2, __p3); \ + __ret; \ +}) #else -__ai uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) { - uint64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint64x2_t)(__rev0 == __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__rev2, __p3); \ + __ret; \ +}) +#define __noswap_vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__s2, __p3); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0 == __p1); - return __ret; -} -#else -__ai uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) { - uint64x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint64x2_t)(__rev0 == __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + __ret = (float16x8_t) __builtin_neon_vfmaq_lane_f16((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 40); \ + __ret; \ +}) +#else +#define vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + __ret = (float16x8_t) __builtin_neon_vfmaq_lane_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, __p3, 40); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + __ret = (float16x8_t) __builtin_neon_vfmaq_lane_f16((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 40); \ + __ret; \ +}) #endif -__ai uint64x1_t vceq_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0 == __p1); - return __ret; -} -__ai uint64x1_t vceq_f64(float64x1_t __p0, float64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0 == __p1); - return __ret; -} -__ai uint64x1_t vceq_s64(int64x1_t __p0, int64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0 == __p1); - return __ret; -} -__ai uint64_t vceqd_u64(uint64_t __p0, uint64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vceqd_u64(__p0, __p1); - return __ret; -} -__ai uint64_t vceqd_s64(int64_t __p0, int64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vceqd_s64(__p0, __p1); - return __ret; -} -__ai uint64_t vceqd_f64(float64_t __p0, float64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vceqd_f64(__p0, __p1); - return __ret; -} -__ai uint32_t vceqs_f32(float32_t __p0, float32_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vceqs_f32(__p0, __p1); - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vceqz_p8(poly8x8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16); - return __ret; -} +#define vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + __ret = (float16x4_t) __builtin_neon_vfma_lane_f16((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 8); \ + __ret; \ +}) #else -__ai uint8x8_t vceqz_p8(poly8x8_t __p0) { - uint8x8_t __ret; - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + float16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + __ret = (float16x4_t) __builtin_neon_vfma_lane_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, __p3, 8); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x4_t __s2 = __p2; \ + __ret = (float16x4_t) __builtin_neon_vfma_lane_f16((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 8); \ + __ret; \ +}) #endif -__ai uint64x1_t vceqz_p64(poly64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19); - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vceqzq_p8(poly8x16_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48); - return __ret; -} +#define vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__s2, __p3); \ + __ret; \ +}) #else -__ai uint8x16_t vceqzq_p8(poly8x16_t __p0) { - uint8x16_t __ret; - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__rev2, __p3); \ + __ret; \ +}) +#define __noswap_vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__s2, __p3); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vceqzq_p64(poly64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51); - return __ret; -} +#define vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_f16((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 40); \ + __ret; \ +}) #else -__ai uint64x2_t vceqzq_p64(poly64x2_t __p0) { - uint64x2_t __ret; - poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 40); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_f16((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 40); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vceqzq_u8(uint8x16_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48); - return __ret; -} +#define vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + __ret = (float16x4_t) __builtin_neon_vfma_laneq_f16((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 8); \ + __ret; \ +}) #else -__ai uint8x16_t vceqzq_u8(uint8x16_t __p0) { - uint8x16_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + float16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16x4_t) __builtin_neon_vfma_laneq_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x16_t)__rev2, __p3, 8); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x8_t __s2 = __p2; \ + __ret = (float16x4_t) __builtin_neon_vfma_laneq_f16((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 8); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vceqzq_u32(uint32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50); - return __ret; -} +#define vfmaq_n_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + __ret = vfmaq_f16(__s0, __s1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \ + __ret; \ +}) #else -__ai uint32x4_t vceqzq_u32(uint32x4_t __p0) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vfmaq_n_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = __noswap_vfmaq_f16(__rev0, __rev1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vceqzq_u64(uint64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51); - return __ret; -} +#define vfma_n_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + __ret = vfma_f16(__s0, __s1, (float16x4_t) {__s2, __s2, __s2, __s2}); \ + __ret; \ +}) #else -__ai uint64x2_t vceqzq_u64(uint64x2_t __p0) { - uint64x2_t __ret; - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vfma_n_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = __noswap_vfma_f16(__rev0, __rev1, (float16x4_t) {__s2, __s2, __s2, __s2}); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vceqzq_u16(uint16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49); - return __ret; -} +#define vfmsh_lane_f16(__p0_246, __p1_246, __p2_246, __p3_246) __extension__ ({ \ + float16_t __ret_246; \ + float16_t __s0_246 = __p0_246; \ + float16_t __s1_246 = __p1_246; \ + float16x4_t __s2_246 = __p2_246; \ + __ret_246 = vfmah_lane_f16(__s0_246, -__s1_246, __s2_246, __p3_246); \ + __ret_246; \ +}) #else -__ai uint16x8_t vceqzq_u16(uint16x8_t __p0) { - uint16x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vfmsh_lane_f16(__p0_247, __p1_247, __p2_247, __p3_247) __extension__ ({ \ + float16_t __ret_247; \ + float16_t __s0_247 = __p0_247; \ + float16_t __s1_247 = __p1_247; \ + float16x4_t __s2_247 = __p2_247; \ + float16x4_t __rev2_247; __rev2_247 = __builtin_shufflevector(__s2_247, __s2_247, 3, 2, 1, 0); \ + __ret_247 = __noswap_vfmah_lane_f16(__s0_247, -__s1_247, __rev2_247, __p3_247); \ + __ret_247; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vceqzq_s8(int8x16_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48); - return __ret; -} +#define vfmsq_lane_f16(__p0_248, __p1_248, __p2_248, __p3_248) __extension__ ({ \ + float16x8_t __ret_248; \ + float16x8_t __s0_248 = __p0_248; \ + float16x8_t __s1_248 = __p1_248; \ + float16x4_t __s2_248 = __p2_248; \ + __ret_248 = vfmaq_lane_f16(__s0_248, -__s1_248, __s2_248, __p3_248); \ + __ret_248; \ +}) #else -__ai uint8x16_t vceqzq_s8(int8x16_t __p0) { - uint8x16_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vfmsq_lane_f16(__p0_249, __p1_249, __p2_249, __p3_249) __extension__ ({ \ + float16x8_t __ret_249; \ + float16x8_t __s0_249 = __p0_249; \ + float16x8_t __s1_249 = __p1_249; \ + float16x4_t __s2_249 = __p2_249; \ + float16x8_t __rev0_249; __rev0_249 = __builtin_shufflevector(__s0_249, __s0_249, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_249; __rev1_249 = __builtin_shufflevector(__s1_249, __s1_249, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_249; __rev2_249 = __builtin_shufflevector(__s2_249, __s2_249, 3, 2, 1, 0); \ + __ret_249 = __noswap_vfmaq_lane_f16(__rev0_249, -__rev1_249, __rev2_249, __p3_249); \ + __ret_249 = __builtin_shufflevector(__ret_249, __ret_249, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_249; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vceqzq_f64(float64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51); - return __ret; -} +#define vfms_lane_f16(__p0_250, __p1_250, __p2_250, __p3_250) __extension__ ({ \ + float16x4_t __ret_250; \ + float16x4_t __s0_250 = __p0_250; \ + float16x4_t __s1_250 = __p1_250; \ + float16x4_t __s2_250 = __p2_250; \ + __ret_250 = vfma_lane_f16(__s0_250, -__s1_250, __s2_250, __p3_250); \ + __ret_250; \ +}) #else -__ai uint64x2_t vceqzq_f64(float64x2_t __p0) { - uint64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vfms_lane_f16(__p0_251, __p1_251, __p2_251, __p3_251) __extension__ ({ \ + float16x4_t __ret_251; \ + float16x4_t __s0_251 = __p0_251; \ + float16x4_t __s1_251 = __p1_251; \ + float16x4_t __s2_251 = __p2_251; \ + float16x4_t __rev0_251; __rev0_251 = __builtin_shufflevector(__s0_251, __s0_251, 3, 2, 1, 0); \ + float16x4_t __rev1_251; __rev1_251 = __builtin_shufflevector(__s1_251, __s1_251, 3, 2, 1, 0); \ + float16x4_t __rev2_251; __rev2_251 = __builtin_shufflevector(__s2_251, __s2_251, 3, 2, 1, 0); \ + __ret_251 = __noswap_vfma_lane_f16(__rev0_251, -__rev1_251, __rev2_251, __p3_251); \ + __ret_251 = __builtin_shufflevector(__ret_251, __ret_251, 3, 2, 1, 0); \ + __ret_251; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vceqzq_f32(float32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50); - return __ret; -} +#define vfmsh_laneq_f16(__p0_252, __p1_252, __p2_252, __p3_252) __extension__ ({ \ + float16_t __ret_252; \ + float16_t __s0_252 = __p0_252; \ + float16_t __s1_252 = __p1_252; \ + float16x8_t __s2_252 = __p2_252; \ + __ret_252 = vfmah_laneq_f16(__s0_252, -__s1_252, __s2_252, __p3_252); \ + __ret_252; \ +}) #else -__ai uint32x4_t vceqzq_f32(float32x4_t __p0) { - uint32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vfmsh_laneq_f16(__p0_253, __p1_253, __p2_253, __p3_253) __extension__ ({ \ + float16_t __ret_253; \ + float16_t __s0_253 = __p0_253; \ + float16_t __s1_253 = __p1_253; \ + float16x8_t __s2_253 = __p2_253; \ + float16x8_t __rev2_253; __rev2_253 = __builtin_shufflevector(__s2_253, __s2_253, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_253 = __noswap_vfmah_laneq_f16(__s0_253, -__s1_253, __rev2_253, __p3_253); \ + __ret_253; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vceqzq_s32(int32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50); - return __ret; -} +#define vfmsq_laneq_f16(__p0_254, __p1_254, __p2_254, __p3_254) __extension__ ({ \ + float16x8_t __ret_254; \ + float16x8_t __s0_254 = __p0_254; \ + float16x8_t __s1_254 = __p1_254; \ + float16x8_t __s2_254 = __p2_254; \ + __ret_254 = vfmaq_laneq_f16(__s0_254, -__s1_254, __s2_254, __p3_254); \ + __ret_254; \ +}) #else -__ai uint32x4_t vceqzq_s32(int32x4_t __p0) { - uint32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vfmsq_laneq_f16(__p0_255, __p1_255, __p2_255, __p3_255) __extension__ ({ \ + float16x8_t __ret_255; \ + float16x8_t __s0_255 = __p0_255; \ + float16x8_t __s1_255 = __p1_255; \ + float16x8_t __s2_255 = __p2_255; \ + float16x8_t __rev0_255; __rev0_255 = __builtin_shufflevector(__s0_255, __s0_255, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_255; __rev1_255 = __builtin_shufflevector(__s1_255, __s1_255, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_255; __rev2_255 = __builtin_shufflevector(__s2_255, __s2_255, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_255 = __noswap_vfmaq_laneq_f16(__rev0_255, -__rev1_255, __rev2_255, __p3_255); \ + __ret_255 = __builtin_shufflevector(__ret_255, __ret_255, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_255; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vceqzq_s64(int64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51); - return __ret; -} +#define vfms_laneq_f16(__p0_256, __p1_256, __p2_256, __p3_256) __extension__ ({ \ + float16x4_t __ret_256; \ + float16x4_t __s0_256 = __p0_256; \ + float16x4_t __s1_256 = __p1_256; \ + float16x8_t __s2_256 = __p2_256; \ + __ret_256 = vfma_laneq_f16(__s0_256, -__s1_256, __s2_256, __p3_256); \ + __ret_256; \ +}) #else -__ai uint64x2_t vceqzq_s64(int64x2_t __p0) { - uint64x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vfms_laneq_f16(__p0_257, __p1_257, __p2_257, __p3_257) __extension__ ({ \ + float16x4_t __ret_257; \ + float16x4_t __s0_257 = __p0_257; \ + float16x4_t __s1_257 = __p1_257; \ + float16x8_t __s2_257 = __p2_257; \ + float16x4_t __rev0_257; __rev0_257 = __builtin_shufflevector(__s0_257, __s0_257, 3, 2, 1, 0); \ + float16x4_t __rev1_257; __rev1_257 = __builtin_shufflevector(__s1_257, __s1_257, 3, 2, 1, 0); \ + float16x8_t __rev2_257; __rev2_257 = __builtin_shufflevector(__s2_257, __s2_257, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_257 = __noswap_vfma_laneq_f16(__rev0_257, -__rev1_257, __rev2_257, __p3_257); \ + __ret_257 = __builtin_shufflevector(__ret_257, __ret_257, 3, 2, 1, 0); \ + __ret_257; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vceqzq_s16(int16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49); - return __ret; -} +#define vfmsq_n_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + __ret = vfmaq_f16(__s0, -__s1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \ + __ret; \ +}) #else -__ai uint16x8_t vceqzq_s16(int16x8_t __p0) { - uint16x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vfmsq_n_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = __noswap_vfmaq_f16(__rev0, -__rev1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vceqz_u8(uint8x8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16); - return __ret; -} +#define vfms_n_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + __ret = vfma_f16(__s0, -__s1, (float16x4_t) {__s2, __s2, __s2, __s2}); \ + __ret; \ +}) #else -__ai uint8x8_t vceqz_u8(uint8x8_t __p0) { - uint8x8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vfms_n_f16(__p0, __p1, __p2) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16_t __s2 = __p2; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = __noswap_vfma_f16(__rev0, -__rev1, (float16x4_t) {__s2, __s2, __s2, __s2}); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vceqz_u32(uint32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18); - return __ret; -} +#define vmaxnmvq_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vmaxnmvq_f16((int8x16_t)__s0); \ + __ret; \ +}) #else -__ai uint32x2_t vceqz_u32(uint32x2_t __p0) { - uint32x2_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vmaxnmvq_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vmaxnmvq_f16((int8x16_t)__rev0); \ + __ret; \ +}) #endif -__ai uint64x1_t vceqz_u64(uint64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19); - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vceqz_u16(uint16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17); - return __ret; -} +#define vmaxnmv_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vmaxnmv_f16((int8x8_t)__s0); \ + __ret; \ +}) #else -__ai uint16x4_t vceqz_u16(uint16x4_t __p0) { - uint16x4_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vmaxnmv_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vmaxnmv_f16((int8x8_t)__rev0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vceqz_s8(int8x8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16); - return __ret; -} +#define vmaxvq_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vmaxvq_f16((int8x16_t)__s0); \ + __ret; \ +}) #else -__ai uint8x8_t vceqz_s8(int8x8_t __p0) { - uint8x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vmaxvq_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vmaxvq_f16((int8x16_t)__rev0); \ + __ret; \ +}) #endif -__ai uint64x1_t vceqz_f64(float64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19); - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vceqz_f32(float32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18); - return __ret; -} +#define vmaxv_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vmaxv_f16((int8x8_t)__s0); \ + __ret; \ +}) #else -__ai uint32x2_t vceqz_f32(float32x2_t __p0) { - uint32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vmaxv_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vmaxv_f16((int8x8_t)__rev0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vceqz_s32(int32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18); - return __ret; -} +#define vminnmvq_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vminnmvq_f16((int8x16_t)__s0); \ + __ret; \ +}) #else -__ai uint32x2_t vceqz_s32(int32x2_t __p0) { - uint32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vminnmvq_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vminnmvq_f16((int8x16_t)__rev0); \ + __ret; \ +}) #endif -__ai uint64x1_t vceqz_s64(int64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19); - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vceqz_s16(int16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17); - return __ret; -} +#define vminnmv_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vminnmv_f16((int8x8_t)__s0); \ + __ret; \ +}) #else -__ai uint16x4_t vceqz_s16(int16x4_t __p0) { - uint16x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vminnmv_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vminnmv_f16((int8x8_t)__rev0); \ + __ret; \ +}) #endif -__ai uint64_t vceqzd_u64(uint64_t __p0) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vceqzd_u64(__p0); - return __ret; -} -__ai uint64_t vceqzd_s64(int64_t __p0) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vceqzd_s64(__p0); - return __ret; -} -__ai uint64_t vceqzd_f64(float64_t __p0) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vceqzd_f64(__p0); - return __ret; -} -__ai uint32_t vceqzs_f32(float32_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vceqzs_f32(__p0); - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0 >= __p1); - return __ret; -} +#define vminvq_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vminvq_f16((int8x16_t)__s0); \ + __ret; \ +}) #else -__ai uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint64x2_t)(__rev0 >= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vminvq_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vminvq_f16((int8x16_t)__rev0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0 >= __p1); - return __ret; -} +#define vminv_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vminv_f16((int8x8_t)__s0); \ + __ret; \ +}) #else -__ai uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) { - uint64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint64x2_t)(__rev0 >= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vminv_f16(__p0) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vminv_f16((int8x8_t)__rev0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0 >= __p1); - return __ret; -} +#define vmulq_laneq_f16(__p0_258, __p1_258, __p2_258) __extension__ ({ \ + float16x8_t __ret_258; \ + float16x8_t __s0_258 = __p0_258; \ + float16x8_t __s1_258 = __p1_258; \ + __ret_258 = __s0_258 * splatq_laneq_f16(__s1_258, __p2_258); \ + __ret_258; \ +}) #else -__ai uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) { - uint64x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint64x2_t)(__rev0 >= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vmulq_laneq_f16(__p0_259, __p1_259, __p2_259) __extension__ ({ \ + float16x8_t __ret_259; \ + float16x8_t __s0_259 = __p0_259; \ + float16x8_t __s1_259 = __p1_259; \ + float16x8_t __rev0_259; __rev0_259 = __builtin_shufflevector(__s0_259, __s0_259, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_259; __rev1_259 = __builtin_shufflevector(__s1_259, __s1_259, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_259 = __rev0_259 * __noswap_splatq_laneq_f16(__rev1_259, __p2_259); \ + __ret_259 = __builtin_shufflevector(__ret_259, __ret_259, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_259; \ +}) #endif -__ai uint64x1_t vcge_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0 >= __p1); - return __ret; -} -__ai uint64x1_t vcge_f64(float64x1_t __p0, float64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0 >= __p1); +#ifdef __LITTLE_ENDIAN__ +#define vmul_laneq_f16(__p0_260, __p1_260, __p2_260) __extension__ ({ \ + float16x4_t __ret_260; \ + float16x4_t __s0_260 = __p0_260; \ + float16x8_t __s1_260 = __p1_260; \ + __ret_260 = __s0_260 * splat_laneq_f16(__s1_260, __p2_260); \ + __ret_260; \ +}) +#else +#define vmul_laneq_f16(__p0_261, __p1_261, __p2_261) __extension__ ({ \ + float16x4_t __ret_261; \ + float16x4_t __s0_261 = __p0_261; \ + float16x8_t __s1_261 = __p1_261; \ + float16x4_t __rev0_261; __rev0_261 = __builtin_shufflevector(__s0_261, __s0_261, 3, 2, 1, 0); \ + float16x8_t __rev1_261; __rev1_261 = __builtin_shufflevector(__s1_261, __s1_261, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_261 = __rev0_261 * __noswap_splat_laneq_f16(__rev1_261, __p2_261); \ + __ret_261 = __builtin_shufflevector(__ret_261, __ret_261, 3, 2, 1, 0); \ + __ret_261; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vmulxq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vmulxq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); return __ret; } -__ai uint64x1_t vcge_s64(int64x1_t __p0, int64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0 >= __p1); +#else +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vmulxq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vmulxq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai uint64_t vcged_s64(int64_t __p0, int64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcged_s64(__p0, __p1); +__ai __attribute__((target("fullfp16,neon"))) float16x8_t __noswap_vmulxq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vmulxq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); return __ret; } -__ai uint64_t vcged_u64(uint64_t __p0, uint64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcged_u64(__p0, __p1); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vmulx_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vmulx_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); return __ret; } -__ai uint64_t vcged_f64(float64_t __p0, float64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcged_f64(__p0, __p1); +#else +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vmulx_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vmulx_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai uint32_t vcges_f32(float32_t __p0, float32_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcges_f32(__p0, __p1); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t __noswap_vmulx_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vmulx_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); return __ret; } +#endif + #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vcgezq_s8(int8x16_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 48); - return __ret; -} +#define vmulxh_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + __ret = (float16_t) __builtin_neon_vmulxh_lane_f16(__s0, (float16x4_t)__s1, __p2); \ + __ret; \ +}) #else -__ai uint8x16_t vcgezq_s8(int8x16_t __p0) { - uint8x16_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vmulxh_lane_f16(__p0, __p1, __p2) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16x4_t __s1 = __p1; \ + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vmulxh_lane_f16(__s0, (float16x4_t)__rev1, __p2); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcgezq_f64(float64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 51); - return __ret; -} +#define vmulxq_lane_f16(__p0_262, __p1_262, __p2_262) __extension__ ({ \ + float16x8_t __ret_262; \ + float16x8_t __s0_262 = __p0_262; \ + float16x4_t __s1_262 = __p1_262; \ + __ret_262 = vmulxq_f16(__s0_262, splatq_lane_f16(__s1_262, __p2_262)); \ + __ret_262; \ +}) #else -__ai uint64x2_t vcgezq_f64(float64x2_t __p0) { - uint64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vmulxq_lane_f16(__p0_263, __p1_263, __p2_263) __extension__ ({ \ + float16x8_t __ret_263; \ + float16x8_t __s0_263 = __p0_263; \ + float16x4_t __s1_263 = __p1_263; \ + float16x8_t __rev0_263; __rev0_263 = __builtin_shufflevector(__s0_263, __s0_263, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev1_263; __rev1_263 = __builtin_shufflevector(__s1_263, __s1_263, 3, 2, 1, 0); \ + __ret_263 = __noswap_vmulxq_f16(__rev0_263, __noswap_splatq_lane_f16(__rev1_263, __p2_263)); \ + __ret_263 = __builtin_shufflevector(__ret_263, __ret_263, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_263; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcgezq_f32(float32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 50); - return __ret; -} +#define vmulx_lane_f16(__p0_264, __p1_264, __p2_264) __extension__ ({ \ + float16x4_t __ret_264; \ + float16x4_t __s0_264 = __p0_264; \ + float16x4_t __s1_264 = __p1_264; \ + __ret_264 = vmulx_f16(__s0_264, splat_lane_f16(__s1_264, __p2_264)); \ + __ret_264; \ +}) #else -__ai uint32x4_t vcgezq_f32(float32x4_t __p0) { - uint32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vmulx_lane_f16(__p0_265, __p1_265, __p2_265) __extension__ ({ \ + float16x4_t __ret_265; \ + float16x4_t __s0_265 = __p0_265; \ + float16x4_t __s1_265 = __p1_265; \ + float16x4_t __rev0_265; __rev0_265 = __builtin_shufflevector(__s0_265, __s0_265, 3, 2, 1, 0); \ + float16x4_t __rev1_265; __rev1_265 = __builtin_shufflevector(__s1_265, __s1_265, 3, 2, 1, 0); \ + __ret_265 = __noswap_vmulx_f16(__rev0_265, __noswap_splat_lane_f16(__rev1_265, __p2_265)); \ + __ret_265 = __builtin_shufflevector(__ret_265, __ret_265, 3, 2, 1, 0); \ + __ret_265; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcgezq_s32(int32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 50); - return __ret; -} +#define vmulxh_laneq_f16(__p0, __p1, __p2) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + __ret = (float16_t) __builtin_neon_vmulxh_laneq_f16(__s0, (float16x8_t)__s1, __p2); \ + __ret; \ +}) #else -__ai uint32x4_t vcgezq_s32(int32x4_t __p0) { - uint32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vmulxh_laneq_f16(__p0, __p1, __p2) __extension__ ({ \ + float16_t __ret; \ + float16_t __s0 = __p0; \ + float16x8_t __s1 = __p1; \ + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vmulxh_laneq_f16(__s0, (float16x8_t)__rev1, __p2); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcgezq_s64(int64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 51); - return __ret; -} +#define vmulxq_laneq_f16(__p0_266, __p1_266, __p2_266) __extension__ ({ \ + float16x8_t __ret_266; \ + float16x8_t __s0_266 = __p0_266; \ + float16x8_t __s1_266 = __p1_266; \ + __ret_266 = vmulxq_f16(__s0_266, splatq_laneq_f16(__s1_266, __p2_266)); \ + __ret_266; \ +}) #else -__ai uint64x2_t vcgezq_s64(int64x2_t __p0) { - uint64x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vmulxq_laneq_f16(__p0_267, __p1_267, __p2_267) __extension__ ({ \ + float16x8_t __ret_267; \ + float16x8_t __s0_267 = __p0_267; \ + float16x8_t __s1_267 = __p1_267; \ + float16x8_t __rev0_267; __rev0_267 = __builtin_shufflevector(__s0_267, __s0_267, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev1_267; __rev1_267 = __builtin_shufflevector(__s1_267, __s1_267, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_267 = __noswap_vmulxq_f16(__rev0_267, __noswap_splatq_laneq_f16(__rev1_267, __p2_267)); \ + __ret_267 = __builtin_shufflevector(__ret_267, __ret_267, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_267; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcgezq_s16(int16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 49); - return __ret; -} +#define vmulx_laneq_f16(__p0_268, __p1_268, __p2_268) __extension__ ({ \ + float16x4_t __ret_268; \ + float16x4_t __s0_268 = __p0_268; \ + float16x8_t __s1_268 = __p1_268; \ + __ret_268 = vmulx_f16(__s0_268, splat_laneq_f16(__s1_268, __p2_268)); \ + __ret_268; \ +}) #else -__ai uint16x8_t vcgezq_s16(int16x8_t __p0) { - uint16x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vmulx_laneq_f16(__p0_269, __p1_269, __p2_269) __extension__ ({ \ + float16x4_t __ret_269; \ + float16x4_t __s0_269 = __p0_269; \ + float16x8_t __s1_269 = __p1_269; \ + float16x4_t __rev0_269; __rev0_269 = __builtin_shufflevector(__s0_269, __s0_269, 3, 2, 1, 0); \ + float16x8_t __rev1_269; __rev1_269 = __builtin_shufflevector(__s1_269, __s1_269, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_269 = __noswap_vmulx_f16(__rev0_269, __noswap_splat_laneq_f16(__rev1_269, __p2_269)); \ + __ret_269 = __builtin_shufflevector(__ret_269, __ret_269, 3, 2, 1, 0); \ + __ret_269; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vcgez_s8(int8x8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 16); +#define vmulxq_n_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + __ret = vmulxq_f16(__s0, (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}); \ + __ret; \ +}) +#else +#define vmulxq_n_f16(__p0, __p1) __extension__ ({ \ + float16x8_t __ret; \ + float16x8_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = __noswap_vmulxq_f16(__rev0, (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulx_n_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + __ret = vmulx_f16(__s0, (float16x4_t) {__s1, __s1, __s1, __s1}); \ + __ret; \ +}) +#else +#define vmulx_n_f16(__p0, __p1) __extension__ ({ \ + float16x4_t __ret; \ + float16x4_t __s0 = __p0; \ + float16_t __s1 = __p1; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = __noswap_vmulx_f16(__rev0, (float16x4_t) {__s1, __s1, __s1, __s1}); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vpaddq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vpaddq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); return __ret; } #else -__ai uint8x8_t vcgez_s8(int8x8_t __p0) { - uint8x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 16); +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vpaddq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vpaddq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif -__ai uint64x1_t vcgez_f64(float64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19); - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcgez_f32(float32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 18); +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vpmaxq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vpmaxq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); return __ret; } #else -__ai uint32x2_t vcgez_f32(float32x2_t __p0) { - uint32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vpmaxq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vpmaxq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcgez_s32(int32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 18); +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vpmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vpmaxnmq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); return __ret; } #else -__ai uint32x2_t vcgez_s32(int32x2_t __p0) { - uint32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vpmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vpmaxnmq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif -__ai uint64x1_t vcgez_s64(int64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19); - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcgez_s16(int16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 17); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vpmaxnm_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vpmaxnm_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); return __ret; } #else -__ai uint16x4_t vcgez_s16(int16x4_t __p0) { - uint16x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 17); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vpmaxnm_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vpmaxnm_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif -__ai uint64_t vcgezd_s64(int64_t __p0) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcgezd_s64(__p0); - return __ret; -} -__ai uint64_t vcgezd_f64(float64_t __p0) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcgezd_f64(__p0); - return __ret; -} -__ai uint32_t vcgezs_f32(float32_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcgezs_f32(__p0); - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0 > __p1); +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vpminq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vpminq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); return __ret; } #else -__ai uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint64x2_t)(__rev0 > __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vpminq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vpminq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0 > __p1); +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vpminnmq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vpminnmq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); return __ret; } #else -__ai uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) { - uint64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint64x2_t)(__rev0 > __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vpminnmq_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vpminnmq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0 > __p1); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vpminnm_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vpminnm_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); return __ret; } #else -__ai uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) { - uint64x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint64x2_t)(__rev0 > __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vpminnm_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vpminnm_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif -__ai uint64x1_t vcgt_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0 > __p1); - return __ret; -} -__ai uint64x1_t vcgt_f64(float64x1_t __p0, float64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0 > __p1); - return __ret; -} -__ai uint64x1_t vcgt_s64(int64x1_t __p0, int64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0 > __p1); - return __ret; -} -__ai uint64_t vcgtd_s64(int64_t __p0, int64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcgtd_s64(__p0, __p1); - return __ret; -} -__ai uint64_t vcgtd_u64(uint64_t __p0, uint64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcgtd_u64(__p0, __p1); - return __ret; -} -__ai uint64_t vcgtd_f64(float64_t __p0, float64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcgtd_f64(__p0, __p1); - return __ret; -} -__ai uint32_t vcgts_f32(float32_t __p0, float32_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcgts_f32(__p0, __p1); - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vcgtzq_s8(int8x16_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 48); +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndiq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vrndiq_f16((int8x16_t)__p0, 40); return __ret; } #else -__ai uint8x16_t vcgtzq_s8(int8x16_t __p0) { - uint8x16_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vrndiq_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vrndiq_f16((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcgtzq_f64(float64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 51); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrndi_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vrndi_f16((int8x8_t)__p0, 8); return __ret; } #else -__ai uint64x2_t vcgtzq_f64(float64x2_t __p0) { - uint64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vrndi_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vrndi_f16((int8x8_t)__rev0, 8); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcgtzq_f32(float32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 50); +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vsqrtq_f16(float16x8_t __p0) { + float16x8_t __ret; + __ret = (float16x8_t) __builtin_neon_vsqrtq_f16((int8x16_t)__p0, 40); return __ret; } #else -__ai uint32x4_t vcgtzq_f32(float32x4_t __p0) { - uint32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("fullfp16,neon"))) float16x8_t vsqrtq_f16(float16x8_t __p0) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (float16x8_t) __builtin_neon_vsqrtq_f16((int8x16_t)__rev0, 40); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcgtzq_s32(int32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 50); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vsqrt_f16(float16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t) __builtin_neon_vsqrt_f16((int8x8_t)__p0, 8); return __ret; } #else -__ai uint32x4_t vcgtzq_s32(int32x4_t __p0) { - uint32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 50); +__ai __attribute__((target("fullfp16,neon"))) float16x4_t vsqrt_f16(float16x4_t __p0) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float16x4_t) __builtin_neon_vsqrt_f16((int8x8_t)__rev0, 8); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcgtzq_s64(int64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 51); - return __ret; -} +#define vsudotq_laneq_s32(__p0_270, __p1_270, __p2_270, __p3_270) __extension__ ({ \ + int32x4_t __ret_270; \ + int32x4_t __s0_270 = __p0_270; \ + int8x16_t __s1_270 = __p1_270; \ + uint8x16_t __s2_270 = __p2_270; \ +uint8x16_t __reint_270 = __s2_270; \ + __ret_270 = vusdotq_s32(__s0_270, (uint8x16_t)(splatq_laneq_s32(*(int32x4_t *) &__reint_270, __p3_270)), __s1_270); \ + __ret_270; \ +}) #else -__ai uint64x2_t vcgtzq_s64(int64x2_t __p0) { - uint64x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vsudotq_laneq_s32(__p0_271, __p1_271, __p2_271, __p3_271) __extension__ ({ \ + int32x4_t __ret_271; \ + int32x4_t __s0_271 = __p0_271; \ + int8x16_t __s1_271 = __p1_271; \ + uint8x16_t __s2_271 = __p2_271; \ + int32x4_t __rev0_271; __rev0_271 = __builtin_shufflevector(__s0_271, __s0_271, 3, 2, 1, 0); \ + int8x16_t __rev1_271; __rev1_271 = __builtin_shufflevector(__s1_271, __s1_271, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev2_271; __rev2_271 = __builtin_shufflevector(__s2_271, __s2_271, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +uint8x16_t __reint_271 = __rev2_271; \ + __ret_271 = __noswap_vusdotq_s32(__rev0_271, (uint8x16_t)(__noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_271, __p3_271)), __rev1_271); \ + __ret_271 = __builtin_shufflevector(__ret_271, __ret_271, 3, 2, 1, 0); \ + __ret_271; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcgtzq_s16(int16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 49); - return __ret; -} +#define vsudot_laneq_s32(__p0_272, __p1_272, __p2_272, __p3_272) __extension__ ({ \ + int32x2_t __ret_272; \ + int32x2_t __s0_272 = __p0_272; \ + int8x8_t __s1_272 = __p1_272; \ + uint8x16_t __s2_272 = __p2_272; \ +uint8x16_t __reint_272 = __s2_272; \ + __ret_272 = vusdot_s32(__s0_272, (uint8x8_t)(splat_laneq_s32(*(int32x4_t *) &__reint_272, __p3_272)), __s1_272); \ + __ret_272; \ +}) #else -__ai uint16x8_t vcgtzq_s16(int16x8_t __p0) { - uint16x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vsudot_laneq_s32(__p0_273, __p1_273, __p2_273, __p3_273) __extension__ ({ \ + int32x2_t __ret_273; \ + int32x2_t __s0_273 = __p0_273; \ + int8x8_t __s1_273 = __p1_273; \ + uint8x16_t __s2_273 = __p2_273; \ + int32x2_t __rev0_273; __rev0_273 = __builtin_shufflevector(__s0_273, __s0_273, 1, 0); \ + int8x8_t __rev1_273; __rev1_273 = __builtin_shufflevector(__s1_273, __s1_273, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev2_273; __rev2_273 = __builtin_shufflevector(__s2_273, __s2_273, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +uint8x16_t __reint_273 = __rev2_273; \ + __ret_273 = __noswap_vusdot_s32(__rev0_273, (uint8x8_t)(__noswap_splat_laneq_s32(*(int32x4_t *) &__reint_273, __p3_273)), __rev1_273); \ + __ret_273 = __builtin_shufflevector(__ret_273, __ret_273, 1, 0); \ + __ret_273; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vcgtz_s8(int8x8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 16); +#define vusdotq_laneq_s32(__p0_274, __p1_274, __p2_274, __p3_274) __extension__ ({ \ + int32x4_t __ret_274; \ + int32x4_t __s0_274 = __p0_274; \ + uint8x16_t __s1_274 = __p1_274; \ + int8x16_t __s2_274 = __p2_274; \ +int8x16_t __reint_274 = __s2_274; \ + __ret_274 = vusdotq_s32(__s0_274, __s1_274, (int8x16_t)(splatq_laneq_s32(*(int32x4_t *) &__reint_274, __p3_274))); \ + __ret_274; \ +}) +#else +#define vusdotq_laneq_s32(__p0_275, __p1_275, __p2_275, __p3_275) __extension__ ({ \ + int32x4_t __ret_275; \ + int32x4_t __s0_275 = __p0_275; \ + uint8x16_t __s1_275 = __p1_275; \ + int8x16_t __s2_275 = __p2_275; \ + int32x4_t __rev0_275; __rev0_275 = __builtin_shufflevector(__s0_275, __s0_275, 3, 2, 1, 0); \ + uint8x16_t __rev1_275; __rev1_275 = __builtin_shufflevector(__s1_275, __s1_275, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev2_275; __rev2_275 = __builtin_shufflevector(__s2_275, __s2_275, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +int8x16_t __reint_275 = __rev2_275; \ + __ret_275 = __noswap_vusdotq_s32(__rev0_275, __rev1_275, (int8x16_t)(__noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_275, __p3_275))); \ + __ret_275 = __builtin_shufflevector(__ret_275, __ret_275, 3, 2, 1, 0); \ + __ret_275; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vusdot_laneq_s32(__p0_276, __p1_276, __p2_276, __p3_276) __extension__ ({ \ + int32x2_t __ret_276; \ + int32x2_t __s0_276 = __p0_276; \ + uint8x8_t __s1_276 = __p1_276; \ + int8x16_t __s2_276 = __p2_276; \ +int8x16_t __reint_276 = __s2_276; \ + __ret_276 = vusdot_s32(__s0_276, __s1_276, (int8x8_t)(splat_laneq_s32(*(int32x4_t *) &__reint_276, __p3_276))); \ + __ret_276; \ +}) +#else +#define vusdot_laneq_s32(__p0_277, __p1_277, __p2_277, __p3_277) __extension__ ({ \ + int32x2_t __ret_277; \ + int32x2_t __s0_277 = __p0_277; \ + uint8x8_t __s1_277 = __p1_277; \ + int8x16_t __s2_277 = __p2_277; \ + int32x2_t __rev0_277; __rev0_277 = __builtin_shufflevector(__s0_277, __s0_277, 1, 0); \ + uint8x8_t __rev1_277; __rev1_277 = __builtin_shufflevector(__s1_277, __s1_277, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev2_277; __rev2_277 = __builtin_shufflevector(__s2_277, __s2_277, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +int8x16_t __reint_277 = __rev2_277; \ + __ret_277 = __noswap_vusdot_s32(__rev0_277, __rev1_277, (int8x8_t)(__noswap_splat_laneq_s32(*(int32x4_t *) &__reint_277, __p3_277))); \ + __ret_277 = __builtin_shufflevector(__ret_277, __ret_277, 1, 0); \ + __ret_277; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vabdq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); return __ret; } #else -__ai uint8x8_t vcgtz_s8(int8x8_t __p0) { - uint8x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) float64x2_t vabdq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vabdq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif -__ai uint64x1_t vcgtz_f64(float64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19); +__ai __attribute__((target("neon"))) float64x1_t vabd_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vabd_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + return __ret; +} +__ai __attribute__((target("neon"))) float64_t vabdd_f64(float64_t __p0, float64_t __p1) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vabdd_f64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) float32_t vabds_f32(float32_t __p0, float32_t __p1) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vabds_f32(__p0, __p1); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcgtz_f32(float32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 18); +__ai __attribute__((target("neon"))) float64x2_t vabsq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 42); return __ret; } #else -__ai uint32x2_t vcgtz_f32(float32x2_t __p0) { - uint32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 18); +__ai __attribute__((target("neon"))) float64x2_t vabsq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 42); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcgtz_s32(int32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 18); +__ai __attribute__((target("neon"))) int64x2_t vabsq_s64(int64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vabsq_v((int8x16_t)__p0, 35); return __ret; } #else -__ai uint32x2_t vcgtz_s32(int32x2_t __p0) { - uint32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 18); +__ai __attribute__((target("neon"))) int64x2_t vabsq_s64(int64x2_t __p0) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int64x2_t) __builtin_neon_vabsq_v((int8x16_t)__rev0, 35); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif -__ai uint64x1_t vcgtz_s64(int64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19); +__ai __attribute__((target("neon"))) float64x1_t vabs_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 10); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vabs_s64(int64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vabs_v((int8x8_t)__p0, 3); + return __ret; +} +__ai __attribute__((target("neon"))) int64_t vabsd_s64(int64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vabsd_s64(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcgtz_s16(int16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 17); +__ai __attribute__((target("neon"))) float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = __p0 + __p1; return __ret; } #else -__ai uint16x4_t vcgtz_s16(int16x4_t __p0) { - uint16x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) float64x2_t vaddq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 + __rev1; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif -__ai uint64_t vcgtzd_s64(int64_t __p0) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcgtzd_s64(__p0); +__ai __attribute__((target("neon"))) float64x1_t vadd_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = __p0 + __p1; return __ret; } -__ai uint64_t vcgtzd_f64(float64_t __p0) { +__ai __attribute__((target("neon"))) uint64_t vaddd_u64(uint64_t __p0, uint64_t __p1) { uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcgtzd_f64(__p0); + __ret = (uint64_t) __builtin_neon_vaddd_u64(__p0, __p1); return __ret; } -__ai uint32_t vcgtzs_f32(float32_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcgtzs_f32(__p0); +__ai __attribute__((target("neon"))) int64_t vaddd_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vaddd_s64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) poly128_t vaddq_p128(poly128_t __p0, poly128_t __p1) { + poly128_t __ret; + __ret = (poly128_t) __builtin_neon_vaddq_p128(__p0, __p1); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0 <= __p1); +__ai __attribute__((target("neon"))) uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint16x8_t __ret; + __ret = vcombine_u16(__p0, vaddhn_u32(__p1, __p2)); return __ret; } #else -__ai uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint64x2_t)(__rev0 <= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) uint16x8_t vaddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint16x8_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vcombine_u16(__rev0, __noswap_vaddhn_u32(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0 <= __p1); +__ai __attribute__((target("neon"))) uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint32x4_t __ret; + __ret = vcombine_u32(__p0, vaddhn_u64(__p1, __p2)); return __ret; } #else -__ai uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) { - uint64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint64x2_t)(__rev0 <= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) uint32x4_t vaddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint32x4_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __noswap_vcombine_u32(__rev0, __noswap_vaddhn_u64(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0 <= __p1); +__ai __attribute__((target("neon"))) uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint8x16_t __ret; + __ret = vcombine_u8(__p0, vaddhn_u16(__p1, __p2)); return __ret; } #else -__ai uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) { - uint64x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint64x2_t)(__rev0 <= __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) uint8x16_t vaddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint8x16_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcombine_u8(__rev0, __noswap_vaddhn_u16(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif -__ai uint64x1_t vcle_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0 <= __p1); - return __ret; -} -__ai uint64x1_t vcle_f64(float64x1_t __p0, float64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0 <= __p1); - return __ret; -} -__ai uint64x1_t vcle_s64(int64x1_t __p0, int64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0 <= __p1); - return __ret; -} -__ai uint64_t vcled_u64(uint64_t __p0, uint64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcled_u64(__p0, __p1); +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int16x8_t __ret; + __ret = vcombine_s16(__p0, vaddhn_s32(__p1, __p2)); return __ret; } -__ai uint64_t vcled_s64(int64_t __p0, int64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcled_s64(__p0, __p1); +#else +__ai __attribute__((target("neon"))) int16x8_t vaddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int16x8_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vcombine_s16(__rev0, __noswap_vaddhn_s32(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai uint64_t vcled_f64(float64_t __p0, float64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcled_f64(__p0, __p1); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int32x4_t __ret; + __ret = vcombine_s32(__p0, vaddhn_s64(__p1, __p2)); return __ret; } -__ai uint32_t vcles_f32(float32_t __p0, float32_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcles_f32(__p0, __p1); +#else +__ai __attribute__((target("neon"))) int32x4_t vaddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int32x4_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __noswap_vcombine_s32(__rev0, __noswap_vaddhn_s64(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } +#endif + #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vclezq_s8(int8x16_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 48); +__ai __attribute__((target("neon"))) int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int8x16_t __ret; + __ret = vcombine_s8(__p0, vaddhn_s16(__p1, __p2)); return __ret; } #else -__ai uint8x16_t vclezq_s8(int8x16_t __p0) { - uint8x16_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 48); +__ai __attribute__((target("neon"))) int8x16_t vaddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int8x16_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcombine_s8(__rev0, __noswap_vaddhn_s16(__rev1, __rev2)); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vclezq_f64(float64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 51); +__ai __attribute__((target("neon"))) uint16_t vaddlvq_u8(uint8x16_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vaddlvq_u8(__p0); return __ret; } #else -__ai uint64x2_t vclezq_f64(float64x2_t __p0) { - uint64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) uint16_t vaddlvq_u8(uint8x16_t __p0) { + uint16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16_t) __builtin_neon_vaddlvq_u8(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vclezq_f32(float32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 50); +__ai __attribute__((target("neon"))) uint64_t vaddlvq_u32(uint32x4_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vaddlvq_u32(__p0); return __ret; } #else -__ai uint32x4_t vclezq_f32(float32x4_t __p0) { - uint32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint64_t vaddlvq_u32(uint32x4_t __p0) { + uint64_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint64_t) __builtin_neon_vaddlvq_u32(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vclezq_s32(int32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 50); +__ai __attribute__((target("neon"))) uint32_t vaddlvq_u16(uint16x8_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vaddlvq_u16(__p0); return __ret; } #else -__ai uint32x4_t vclezq_s32(int32x4_t __p0) { - uint32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint32_t vaddlvq_u16(uint16x8_t __p0) { + uint32_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint32_t) __builtin_neon_vaddlvq_u16(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vclezq_s64(int64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 51); +__ai __attribute__((target("neon"))) int16_t vaddlvq_s8(int8x16_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vaddlvq_s8(__p0); return __ret; } #else -__ai uint64x2_t vclezq_s64(int64x2_t __p0) { - uint64x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) int16_t vaddlvq_s8(int8x16_t __p0) { + int16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16_t) __builtin_neon_vaddlvq_s8(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vclezq_s16(int16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 49); +__ai __attribute__((target("neon"))) int64_t vaddlvq_s32(int32x4_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vaddlvq_s32(__p0); return __ret; } #else -__ai uint16x8_t vclezq_s16(int16x8_t __p0) { - uint16x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int64_t vaddlvq_s32(int32x4_t __p0) { + int64_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int64_t) __builtin_neon_vaddlvq_s32(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vclez_s8(int8x8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vclez_v((int8x8_t)__p0, 16); +__ai __attribute__((target("neon"))) int32_t vaddlvq_s16(int16x8_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vaddlvq_s16(__p0); return __ret; } #else -__ai uint8x8_t vclez_s8(int8x8_t __p0) { - uint8x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int32_t vaddlvq_s16(int16x8_t __p0) { + int32_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int32_t) __builtin_neon_vaddlvq_s16(__rev0); return __ret; } #endif -__ai uint64x1_t vclez_f64(float64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19); - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vclez_f32(float32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__p0, 18); +__ai __attribute__((target("neon"))) uint16_t vaddlv_u8(uint8x8_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vaddlv_u8(__p0); return __ret; } #else -__ai uint32x2_t vclez_f32(float32x2_t __p0) { - uint32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) uint16_t vaddlv_u8(uint8x8_t __p0) { + uint16_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16_t) __builtin_neon_vaddlv_u8(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vclez_s32(int32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__p0, 18); +__ai __attribute__((target("neon"))) uint64_t vaddlv_u32(uint32x2_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vaddlv_u32(__p0); return __ret; } #else -__ai uint32x2_t vclez_s32(int32x2_t __p0) { - uint32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) uint64_t vaddlv_u32(uint32x2_t __p0) { + uint64_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64_t) __builtin_neon_vaddlv_u32(__rev0); return __ret; } #endif -__ai uint64x1_t vclez_s64(int64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19); - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vclez_s16(int16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__p0, 17); +__ai __attribute__((target("neon"))) uint32_t vaddlv_u16(uint16x4_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vaddlv_u16(__p0); return __ret; } #else -__ai uint16x4_t vclez_s16(int16x4_t __p0) { - uint16x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint32_t vaddlv_u16(uint16x4_t __p0) { + uint32_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32_t) __builtin_neon_vaddlv_u16(__rev0); return __ret; } #endif -__ai uint64_t vclezd_s64(int64_t __p0) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vclezd_s64(__p0); - return __ret; -} -__ai uint64_t vclezd_f64(float64_t __p0) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vclezd_f64(__p0); +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16_t vaddlv_s8(int8x8_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vaddlv_s8(__p0); return __ret; } -__ai uint32_t vclezs_f32(float32_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vclezs_f32(__p0); +#else +__ai __attribute__((target("neon"))) int16_t vaddlv_s8(int8x8_t __p0) { + int16_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16_t) __builtin_neon_vaddlv_s8(__rev0); return __ret; } +#endif + #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0 < __p1); +__ai __attribute__((target("neon"))) int64_t vaddlv_s32(int32x2_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vaddlv_s32(__p0); return __ret; } #else -__ai uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint64x2_t)(__rev0 < __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) int64_t vaddlv_s32(int32x2_t __p0) { + int64_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int64_t) __builtin_neon_vaddlv_s32(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0 < __p1); +__ai __attribute__((target("neon"))) int32_t vaddlv_s16(int16x4_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vaddlv_s16(__p0); return __ret; } #else -__ai uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) { - uint64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint64x2_t)(__rev0 < __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) int32_t vaddlv_s16(int16x4_t __p0) { + int32_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int32_t) __builtin_neon_vaddlv_s16(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0 < __p1); +__ai __attribute__((target("neon"))) uint8_t vaddvq_u8(uint8x16_t __p0) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vaddvq_u8(__p0); return __ret; } #else -__ai uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) { - uint64x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint64x2_t)(__rev0 < __rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) uint8_t vaddvq_u8(uint8x16_t __p0) { + uint8_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8_t) __builtin_neon_vaddvq_u8(__rev0); return __ret; } #endif -__ai uint64x1_t vclt_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0 < __p1); - return __ret; -} -__ai uint64x1_t vclt_f64(float64x1_t __p0, float64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0 < __p1); +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32_t vaddvq_u32(uint32x4_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vaddvq_u32(__p0); return __ret; } -__ai uint64x1_t vclt_s64(int64x1_t __p0, int64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t)(__p0 < __p1); +#else +__ai __attribute__((target("neon"))) uint32_t vaddvq_u32(uint32x4_t __p0) { + uint32_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32_t) __builtin_neon_vaddvq_u32(__rev0); return __ret; } -__ai uint64_t vcltd_u64(uint64_t __p0, uint64_t __p1) { +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64_t vaddvq_u64(uint64x2_t __p0) { uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcltd_u64(__p0, __p1); + __ret = (uint64_t) __builtin_neon_vaddvq_u64(__p0); return __ret; } -__ai uint64_t vcltd_s64(int64_t __p0, int64_t __p1) { +#else +__ai __attribute__((target("neon"))) uint64_t vaddvq_u64(uint64x2_t __p0) { uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcltd_s64(__p0, __p1); + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64_t) __builtin_neon_vaddvq_u64(__rev0); return __ret; } -__ai uint64_t vcltd_f64(float64_t __p0, float64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcltd_f64(__p0, __p1); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16_t vaddvq_u16(uint16x8_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vaddvq_u16(__p0); return __ret; } -__ai uint32_t vclts_f32(float32_t __p0, float32_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vclts_f32(__p0, __p1); +#else +__ai __attribute__((target("neon"))) uint16_t vaddvq_u16(uint16x8_t __p0) { + uint16_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16_t) __builtin_neon_vaddvq_u16(__rev0); return __ret; } +#endif + #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vcltzq_s8(int8x16_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 48); +__ai __attribute__((target("neon"))) int8_t vaddvq_s8(int8x16_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vaddvq_s8(__p0); return __ret; } #else -__ai uint8x16_t vcltzq_s8(int8x16_t __p0) { - uint8x16_t __ret; +__ai __attribute__((target("neon"))) int8_t vaddvq_s8(int8x16_t __p0) { + int8_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8_t) __builtin_neon_vaddvq_s8(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcltzq_f64(float64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 51); +__ai __attribute__((target("neon"))) float64_t vaddvq_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vaddvq_f64(__p0); return __ret; } #else -__ai uint64x2_t vcltzq_f64(float64x2_t __p0) { - uint64x2_t __ret; +__ai __attribute__((target("neon"))) float64_t vaddvq_f64(float64x2_t __p0) { + float64_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + __ret = (float64_t) __builtin_neon_vaddvq_f64(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcltzq_f32(float32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 50); +__ai __attribute__((target("neon"))) float32_t vaddvq_f32(float32x4_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vaddvq_f32(__p0); return __ret; } #else -__ai uint32x4_t vcltzq_f32(float32x4_t __p0) { - uint32x4_t __ret; +__ai __attribute__((target("neon"))) float32_t vaddvq_f32(float32x4_t __p0) { + float32_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + __ret = (float32_t) __builtin_neon_vaddvq_f32(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vcltzq_s32(int32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 50); +__ai __attribute__((target("neon"))) int32_t vaddvq_s32(int32x4_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vaddvq_s32(__p0); return __ret; } #else -__ai uint32x4_t vcltzq_s32(int32x4_t __p0) { - uint32x4_t __ret; +__ai __attribute__((target("neon"))) int32_t vaddvq_s32(int32x4_t __p0) { + int32_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + __ret = (int32_t) __builtin_neon_vaddvq_s32(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcltzq_s64(int64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 51); +__ai __attribute__((target("neon"))) int64_t vaddvq_s64(int64x2_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vaddvq_s64(__p0); return __ret; } #else -__ai uint64x2_t vcltzq_s64(int64x2_t __p0) { - uint64x2_t __ret; +__ai __attribute__((target("neon"))) int64_t vaddvq_s64(int64x2_t __p0) { + int64_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + __ret = (int64_t) __builtin_neon_vaddvq_s64(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vcltzq_s16(int16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 49); +__ai __attribute__((target("neon"))) int16_t vaddvq_s16(int16x8_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vaddvq_s16(__p0); return __ret; } #else -__ai uint16x8_t vcltzq_s16(int16x8_t __p0) { - uint16x8_t __ret; +__ai __attribute__((target("neon"))) int16_t vaddvq_s16(int16x8_t __p0) { + int16_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16_t) __builtin_neon_vaddvq_s16(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vcltz_s8(int8x8_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 16); +__ai __attribute__((target("neon"))) uint8_t vaddv_u8(uint8x8_t __p0) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vaddv_u8(__p0); return __ret; } #else -__ai uint8x8_t vcltz_s8(int8x8_t __p0) { - uint8x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint8_t vaddv_u8(uint8x8_t __p0) { + uint8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8_t) __builtin_neon_vaddv_u8(__rev0); return __ret; } #endif -__ai uint64x1_t vcltz_f64(float64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19); - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcltz_f32(float32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 18); +__ai __attribute__((target("neon"))) uint32_t vaddv_u32(uint32x2_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vaddv_u32(__p0); return __ret; } #else -__ai uint32x2_t vcltz_f32(float32x2_t __p0) { - uint32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) uint32_t vaddv_u32(uint32x2_t __p0) { + uint32_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32_t) __builtin_neon_vaddv_u32(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vcltz_s32(int32x2_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 18); +__ai __attribute__((target("neon"))) uint16_t vaddv_u16(uint16x4_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vaddv_u16(__p0); return __ret; } #else -__ai uint32x2_t vcltz_s32(int32x2_t __p0) { - uint32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) uint16_t vaddv_u16(uint16x4_t __p0) { + uint16_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16_t) __builtin_neon_vaddv_u16(__rev0); return __ret; } #endif -__ai uint64x1_t vcltz_s64(int64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19); +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8_t vaddv_s8(int8x8_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vaddv_s8(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8_t vaddv_s8(int8x8_t __p0) { + int8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8_t) __builtin_neon_vaddv_s8(__rev0); return __ret; } +#endif + #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vcltz_s16(int16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 17); +__ai __attribute__((target("neon"))) float32_t vaddv_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vaddv_f32(__p0); return __ret; } #else -__ai uint16x4_t vcltz_s16(int16x4_t __p0) { - uint16x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) float32_t vaddv_f32(float32x2_t __p0) { + float32_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32_t) __builtin_neon_vaddv_f32(__rev0); return __ret; } #endif -__ai uint64_t vcltzd_s64(int64_t __p0) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcltzd_s64(__p0); +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32_t vaddv_s32(int32x2_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vaddv_s32(__p0); return __ret; } -__ai uint64_t vcltzd_f64(float64_t __p0) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcltzd_f64(__p0); +#else +__ai __attribute__((target("neon"))) int32_t vaddv_s32(int32x2_t __p0) { + int32_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int32_t) __builtin_neon_vaddv_s32(__rev0); return __ret; } -__ai uint32_t vcltzs_f32(float32_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcltzs_f32(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16_t vaddv_s16(int16x4_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vaddv_s16(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16_t vaddv_s16(int16x4_t __p0) { + int16_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16_t) __builtin_neon_vaddv_s16(__rev0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) poly64x1_t vbsl_p64(uint64x1_t __p0, poly64x1_t __p1, poly64x1_t __p2) { + poly64x1_t __ret; + __ret = (poly64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 6); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) { +__ai __attribute__((target("neon"))) poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) { poly64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1); + __ret = (poly64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 38); return __ret; } #else -__ai poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) { +__ai __attribute__((target("neon"))) poly64x2_t vbslq_p64(uint64x2_t __p0, poly64x2_t __p1, poly64x2_t __p2) { poly64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1); + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + poly64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (poly64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 38); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1); + __ret = (float64x2_t) __builtin_neon_vbslq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); return __ret; } #else -__ai float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vbslq_f64(uint64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 1); + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (float64x2_t) __builtin_neon_vbslq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif +__ai __attribute__((target("neon"))) float64x1_t vbsl_f64(uint64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vbsl_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); + return __ret; +} #ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_p8(__p0_257, __p1_257, __p2_257, __p3_257) __extension__ ({ \ - poly8x16_t __ret_257; \ - poly8x16_t __s0_257 = __p0_257; \ - poly8x8_t __s2_257 = __p2_257; \ - __ret_257 = vsetq_lane_p8(vget_lane_p8(__s2_257, __p3_257), __s0_257, __p1_257); \ - __ret_257; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcageq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} #else -#define vcopyq_lane_p8(__p0_258, __p1_258, __p2_258, __p3_258) __extension__ ({ \ - poly8x16_t __ret_258; \ - poly8x16_t __s0_258 = __p0_258; \ - poly8x8_t __s2_258 = __p2_258; \ - poly8x16_t __rev0_258; __rev0_258 = __builtin_shufflevector(__s0_258, __s0_258, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x8_t __rev2_258; __rev2_258 = __builtin_shufflevector(__s2_258, __s2_258, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_258 = __noswap_vsetq_lane_p8(__noswap_vget_lane_p8(__rev2_258, __p3_258), __rev0_258, __p1_258); \ - __ret_258 = __builtin_shufflevector(__ret_258, __ret_258, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_258; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vcageq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vcageq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif +__ai __attribute__((target("neon"))) uint64x1_t vcage_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcage_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vcaged_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcaged_f64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint32_t vcages_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcages_f32(__p0, __p1); + return __ret; +} #ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_p16(__p0_259, __p1_259, __p2_259, __p3_259) __extension__ ({ \ - poly16x8_t __ret_259; \ - poly16x8_t __s0_259 = __p0_259; \ - poly16x4_t __s2_259 = __p2_259; \ - __ret_259 = vsetq_lane_p16(vget_lane_p16(__s2_259, __p3_259), __s0_259, __p1_259); \ - __ret_259; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcagtq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} #else -#define vcopyq_lane_p16(__p0_260, __p1_260, __p2_260, __p3_260) __extension__ ({ \ - poly16x8_t __ret_260; \ - poly16x8_t __s0_260 = __p0_260; \ - poly16x4_t __s2_260 = __p2_260; \ - poly16x8_t __rev0_260; __rev0_260 = __builtin_shufflevector(__s0_260, __s0_260, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly16x4_t __rev2_260; __rev2_260 = __builtin_shufflevector(__s2_260, __s2_260, 3, 2, 1, 0); \ - __ret_260 = __noswap_vsetq_lane_p16(__noswap_vget_lane_p16(__rev2_260, __p3_260), __rev0_260, __p1_260); \ - __ret_260 = __builtin_shufflevector(__ret_260, __ret_260, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_260; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vcagtq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vcagtq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif +__ai __attribute__((target("neon"))) uint64x1_t vcagt_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcagt_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vcagtd_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcagtd_f64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint32_t vcagts_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcagts_f32(__p0, __p1); + return __ret; +} #ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_u8(__p0_261, __p1_261, __p2_261, __p3_261) __extension__ ({ \ - uint8x16_t __ret_261; \ - uint8x16_t __s0_261 = __p0_261; \ - uint8x8_t __s2_261 = __p2_261; \ - __ret_261 = vsetq_lane_u8(vget_lane_u8(__s2_261, __p3_261), __s0_261, __p1_261); \ - __ret_261; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcaleq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} #else -#define vcopyq_lane_u8(__p0_262, __p1_262, __p2_262, __p3_262) __extension__ ({ \ - uint8x16_t __ret_262; \ - uint8x16_t __s0_262 = __p0_262; \ - uint8x8_t __s2_262 = __p2_262; \ - uint8x16_t __rev0_262; __rev0_262 = __builtin_shufflevector(__s0_262, __s0_262, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __rev2_262; __rev2_262 = __builtin_shufflevector(__s2_262, __s2_262, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_262 = __noswap_vsetq_lane_u8(__noswap_vget_lane_u8(__rev2_262, __p3_262), __rev0_262, __p1_262); \ - __ret_262 = __builtin_shufflevector(__ret_262, __ret_262, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_262; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vcaleq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vcaleq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif +__ai __attribute__((target("neon"))) uint64x1_t vcale_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcale_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vcaled_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcaled_f64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint32_t vcales_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcales_f32(__p0, __p1); + return __ret; +} #ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_u32(__p0_263, __p1_263, __p2_263, __p3_263) __extension__ ({ \ - uint32x4_t __ret_263; \ - uint32x4_t __s0_263 = __p0_263; \ - uint32x2_t __s2_263 = __p2_263; \ - __ret_263 = vsetq_lane_u32(vget_lane_u32(__s2_263, __p3_263), __s0_263, __p1_263); \ - __ret_263; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcaltq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} #else -#define vcopyq_lane_u32(__p0_264, __p1_264, __p2_264, __p3_264) __extension__ ({ \ - uint32x4_t __ret_264; \ - uint32x4_t __s0_264 = __p0_264; \ - uint32x2_t __s2_264 = __p2_264; \ - uint32x4_t __rev0_264; __rev0_264 = __builtin_shufflevector(__s0_264, __s0_264, 3, 2, 1, 0); \ - uint32x2_t __rev2_264; __rev2_264 = __builtin_shufflevector(__s2_264, __s2_264, 1, 0); \ - __ret_264 = __noswap_vsetq_lane_u32(__noswap_vget_lane_u32(__rev2_264, __p3_264), __rev0_264, __p1_264); \ - __ret_264 = __builtin_shufflevector(__ret_264, __ret_264, 3, 2, 1, 0); \ - __ret_264; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vcaltq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vcaltq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif +__ai __attribute__((target("neon"))) uint64x1_t vcalt_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcalt_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vcaltd_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcaltd_f64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint32_t vcalts_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcalts_f32(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vceq_p64(poly64x1_t __p0, poly64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 == __p1); + return __ret; +} #ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_u64(__p0_265, __p1_265, __p2_265, __p3_265) __extension__ ({ \ - uint64x2_t __ret_265; \ - uint64x2_t __s0_265 = __p0_265; \ - uint64x1_t __s2_265 = __p2_265; \ - __ret_265 = vsetq_lane_u64(vget_lane_u64(__s2_265, __p3_265), __s0_265, __p1_265); \ - __ret_265; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 == __p1); + return __ret; +} #else -#define vcopyq_lane_u64(__p0_266, __p1_266, __p2_266, __p3_266) __extension__ ({ \ - uint64x2_t __ret_266; \ - uint64x2_t __s0_266 = __p0_266; \ - uint64x1_t __s2_266 = __p2_266; \ - uint64x2_t __rev0_266; __rev0_266 = __builtin_shufflevector(__s0_266, __s0_266, 1, 0); \ - __ret_266 = __noswap_vsetq_lane_u64(vget_lane_u64(__s2_266, __p3_266), __rev0_266, __p1_266); \ - __ret_266 = __builtin_shufflevector(__ret_266, __ret_266, 1, 0); \ - __ret_266; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vceqq_p64(poly64x2_t __p0, poly64x2_t __p1) { + uint64x2_t __ret; + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_u16(__p0_267, __p1_267, __p2_267, __p3_267) __extension__ ({ \ - uint16x8_t __ret_267; \ - uint16x8_t __s0_267 = __p0_267; \ - uint16x4_t __s2_267 = __p2_267; \ - __ret_267 = vsetq_lane_u16(vget_lane_u16(__s2_267, __p3_267), __s0_267, __p1_267); \ - __ret_267; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 == __p1); + return __ret; +} #else -#define vcopyq_lane_u16(__p0_268, __p1_268, __p2_268, __p3_268) __extension__ ({ \ - uint16x8_t __ret_268; \ - uint16x8_t __s0_268 = __p0_268; \ - uint16x4_t __s2_268 = __p2_268; \ - uint16x8_t __rev0_268; __rev0_268 = __builtin_shufflevector(__s0_268, __s0_268, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x4_t __rev2_268; __rev2_268 = __builtin_shufflevector(__s2_268, __s2_268, 3, 2, 1, 0); \ - __ret_268 = __noswap_vsetq_lane_u16(__noswap_vget_lane_u16(__rev2_268, __p3_268), __rev0_268, __p1_268); \ - __ret_268 = __builtin_shufflevector(__ret_268, __ret_268, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_268; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vceqq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_s8(__p0_269, __p1_269, __p2_269, __p3_269) __extension__ ({ \ - int8x16_t __ret_269; \ - int8x16_t __s0_269 = __p0_269; \ - int8x8_t __s2_269 = __p2_269; \ - __ret_269 = vsetq_lane_s8(vget_lane_s8(__s2_269, __p3_269), __s0_269, __p1_269); \ - __ret_269; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 == __p1); + return __ret; +} #else -#define vcopyq_lane_s8(__p0_270, __p1_270, __p2_270, __p3_270) __extension__ ({ \ - int8x16_t __ret_270; \ - int8x16_t __s0_270 = __p0_270; \ - int8x8_t __s2_270 = __p2_270; \ - int8x16_t __rev0_270; __rev0_270 = __builtin_shufflevector(__s0_270, __s0_270, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8_t __rev2_270; __rev2_270 = __builtin_shufflevector(__s2_270, __s2_270, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_270 = __noswap_vsetq_lane_s8(__noswap_vget_lane_s8(__rev2_270, __p3_270), __rev0_270, __p1_270); \ - __ret_270 = __builtin_shufflevector(__ret_270, __ret_270, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_270; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vceqq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_f32(__p0_271, __p1_271, __p2_271, __p3_271) __extension__ ({ \ - float32x4_t __ret_271; \ - float32x4_t __s0_271 = __p0_271; \ - float32x2_t __s2_271 = __p2_271; \ - __ret_271 = vsetq_lane_f32(vget_lane_f32(__s2_271, __p3_271), __s0_271, __p1_271); \ - __ret_271; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 == __p1); + return __ret; +} #else -#define vcopyq_lane_f32(__p0_272, __p1_272, __p2_272, __p3_272) __extension__ ({ \ - float32x4_t __ret_272; \ - float32x4_t __s0_272 = __p0_272; \ - float32x2_t __s2_272 = __p2_272; \ - float32x4_t __rev0_272; __rev0_272 = __builtin_shufflevector(__s0_272, __s0_272, 3, 2, 1, 0); \ - float32x2_t __rev2_272; __rev2_272 = __builtin_shufflevector(__s2_272, __s2_272, 1, 0); \ - __ret_272 = __noswap_vsetq_lane_f32(__noswap_vget_lane_f32(__rev2_272, __p3_272), __rev0_272, __p1_272); \ - __ret_272 = __builtin_shufflevector(__ret_272, __ret_272, 3, 2, 1, 0); \ - __ret_272; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vceqq_s64(int64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__rev0 == __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif +__ai __attribute__((target("neon"))) uint64x1_t vceq_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 == __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vceq_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 == __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vceq_s64(int64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 == __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vceqd_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vceqd_u64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vceqd_s64(int64_t __p0, int64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vceqd_s64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vceqd_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vceqd_f64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint32_t vceqs_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vceqs_f32(__p0, __p1); + return __ret; +} #ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_s32(__p0_273, __p1_273, __p2_273, __p3_273) __extension__ ({ \ - int32x4_t __ret_273; \ - int32x4_t __s0_273 = __p0_273; \ - int32x2_t __s2_273 = __p2_273; \ - __ret_273 = vsetq_lane_s32(vget_lane_s32(__s2_273, __p3_273), __s0_273, __p1_273); \ - __ret_273; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vceqz_p8(poly8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16); + return __ret; +} #else -#define vcopyq_lane_s32(__p0_274, __p1_274, __p2_274, __p3_274) __extension__ ({ \ - int32x4_t __ret_274; \ - int32x4_t __s0_274 = __p0_274; \ - int32x2_t __s2_274 = __p2_274; \ - int32x4_t __rev0_274; __rev0_274 = __builtin_shufflevector(__s0_274, __s0_274, 3, 2, 1, 0); \ - int32x2_t __rev2_274; __rev2_274 = __builtin_shufflevector(__s2_274, __s2_274, 1, 0); \ - __ret_274 = __noswap_vsetq_lane_s32(__noswap_vget_lane_s32(__rev2_274, __p3_274), __rev0_274, __p1_274); \ - __ret_274 = __builtin_shufflevector(__ret_274, __ret_274, 3, 2, 1, 0); \ - __ret_274; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vceqz_p8(poly8x8_t __p0) { + uint8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif +__ai __attribute__((target("neon"))) uint64x1_t vceqz_p64(poly64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19); + return __ret; +} #ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_s64(__p0_275, __p1_275, __p2_275, __p3_275) __extension__ ({ \ - int64x2_t __ret_275; \ - int64x2_t __s0_275 = __p0_275; \ - int64x1_t __s2_275 = __p2_275; \ - __ret_275 = vsetq_lane_s64(vget_lane_s64(__s2_275, __p3_275), __s0_275, __p1_275); \ - __ret_275; \ -}) +__ai __attribute__((target("neon"))) uint8x16_t vceqzq_p8(poly8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48); + return __ret; +} #else -#define vcopyq_lane_s64(__p0_276, __p1_276, __p2_276, __p3_276) __extension__ ({ \ - int64x2_t __ret_276; \ - int64x2_t __s0_276 = __p0_276; \ - int64x1_t __s2_276 = __p2_276; \ - int64x2_t __rev0_276; __rev0_276 = __builtin_shufflevector(__s0_276, __s0_276, 1, 0); \ - __ret_276 = __noswap_vsetq_lane_s64(vget_lane_s64(__s2_276, __p3_276), __rev0_276, __p1_276); \ - __ret_276 = __builtin_shufflevector(__ret_276, __ret_276, 1, 0); \ - __ret_276; \ -}) +__ai __attribute__((target("neon"))) uint8x16_t vceqzq_p8(poly8x16_t __p0) { + uint8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_s16(__p0_277, __p1_277, __p2_277, __p3_277) __extension__ ({ \ - int16x8_t __ret_277; \ - int16x8_t __s0_277 = __p0_277; \ - int16x4_t __s2_277 = __p2_277; \ - __ret_277 = vsetq_lane_s16(vget_lane_s16(__s2_277, __p3_277), __s0_277, __p1_277); \ - __ret_277; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vceqzq_p64(poly64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51); + return __ret; +} #else -#define vcopyq_lane_s16(__p0_278, __p1_278, __p2_278, __p3_278) __extension__ ({ \ - int16x8_t __ret_278; \ - int16x8_t __s0_278 = __p0_278; \ - int16x4_t __s2_278 = __p2_278; \ - int16x8_t __rev0_278; __rev0_278 = __builtin_shufflevector(__s0_278, __s0_278, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev2_278; __rev2_278 = __builtin_shufflevector(__s2_278, __s2_278, 3, 2, 1, 0); \ - __ret_278 = __noswap_vsetq_lane_s16(__noswap_vget_lane_s16(__rev2_278, __p3_278), __rev0_278, __p1_278); \ - __ret_278 = __builtin_shufflevector(__ret_278, __ret_278, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_278; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vceqzq_p64(poly64x2_t __p0) { + uint64x2_t __ret; + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vcopy_lane_p8(__p0_279, __p1_279, __p2_279, __p3_279) __extension__ ({ \ - poly8x8_t __ret_279; \ - poly8x8_t __s0_279 = __p0_279; \ - poly8x8_t __s2_279 = __p2_279; \ - __ret_279 = vset_lane_p8(vget_lane_p8(__s2_279, __p3_279), __s0_279, __p1_279); \ - __ret_279; \ -}) +__ai __attribute__((target("neon"))) uint8x16_t vceqzq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48); + return __ret; +} #else -#define vcopy_lane_p8(__p0_280, __p1_280, __p2_280, __p3_280) __extension__ ({ \ - poly8x8_t __ret_280; \ - poly8x8_t __s0_280 = __p0_280; \ - poly8x8_t __s2_280 = __p2_280; \ - poly8x8_t __rev0_280; __rev0_280 = __builtin_shufflevector(__s0_280, __s0_280, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x8_t __rev2_280; __rev2_280 = __builtin_shufflevector(__s2_280, __s2_280, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_280 = __noswap_vset_lane_p8(__noswap_vget_lane_p8(__rev2_280, __p3_280), __rev0_280, __p1_280); \ - __ret_280 = __builtin_shufflevector(__ret_280, __ret_280, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_280; \ -}) +__ai __attribute__((target("neon"))) uint8x16_t vceqzq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vcopy_lane_p16(__p0_281, __p1_281, __p2_281, __p3_281) __extension__ ({ \ - poly16x4_t __ret_281; \ - poly16x4_t __s0_281 = __p0_281; \ - poly16x4_t __s2_281 = __p2_281; \ - __ret_281 = vset_lane_p16(vget_lane_p16(__s2_281, __p3_281), __s0_281, __p1_281); \ - __ret_281; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vceqzq_u32(uint32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50); + return __ret; +} #else -#define vcopy_lane_p16(__p0_282, __p1_282, __p2_282, __p3_282) __extension__ ({ \ - poly16x4_t __ret_282; \ - poly16x4_t __s0_282 = __p0_282; \ - poly16x4_t __s2_282 = __p2_282; \ - poly16x4_t __rev0_282; __rev0_282 = __builtin_shufflevector(__s0_282, __s0_282, 3, 2, 1, 0); \ - poly16x4_t __rev2_282; __rev2_282 = __builtin_shufflevector(__s2_282, __s2_282, 3, 2, 1, 0); \ - __ret_282 = __noswap_vset_lane_p16(__noswap_vget_lane_p16(__rev2_282, __p3_282), __rev0_282, __p1_282); \ - __ret_282 = __builtin_shufflevector(__ret_282, __ret_282, 3, 2, 1, 0); \ - __ret_282; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vceqzq_u32(uint32x4_t __p0) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vcopy_lane_u8(__p0_283, __p1_283, __p2_283, __p3_283) __extension__ ({ \ - uint8x8_t __ret_283; \ - uint8x8_t __s0_283 = __p0_283; \ - uint8x8_t __s2_283 = __p2_283; \ - __ret_283 = vset_lane_u8(vget_lane_u8(__s2_283, __p3_283), __s0_283, __p1_283); \ - __ret_283; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vceqzq_u64(uint64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51); + return __ret; +} #else -#define vcopy_lane_u8(__p0_284, __p1_284, __p2_284, __p3_284) __extension__ ({ \ - uint8x8_t __ret_284; \ - uint8x8_t __s0_284 = __p0_284; \ - uint8x8_t __s2_284 = __p2_284; \ - uint8x8_t __rev0_284; __rev0_284 = __builtin_shufflevector(__s0_284, __s0_284, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __rev2_284; __rev2_284 = __builtin_shufflevector(__s2_284, __s2_284, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_284 = __noswap_vset_lane_u8(__noswap_vget_lane_u8(__rev2_284, __p3_284), __rev0_284, __p1_284); \ - __ret_284 = __builtin_shufflevector(__ret_284, __ret_284, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_284; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vceqzq_u64(uint64x2_t __p0) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vcopy_lane_u32(__p0_285, __p1_285, __p2_285, __p3_285) __extension__ ({ \ - uint32x2_t __ret_285; \ - uint32x2_t __s0_285 = __p0_285; \ - uint32x2_t __s2_285 = __p2_285; \ - __ret_285 = vset_lane_u32(vget_lane_u32(__s2_285, __p3_285), __s0_285, __p1_285); \ - __ret_285; \ -}) +__ai __attribute__((target("neon"))) uint16x8_t vceqzq_u16(uint16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49); + return __ret; +} #else -#define vcopy_lane_u32(__p0_286, __p1_286, __p2_286, __p3_286) __extension__ ({ \ - uint32x2_t __ret_286; \ - uint32x2_t __s0_286 = __p0_286; \ - uint32x2_t __s2_286 = __p2_286; \ - uint32x2_t __rev0_286; __rev0_286 = __builtin_shufflevector(__s0_286, __s0_286, 1, 0); \ - uint32x2_t __rev2_286; __rev2_286 = __builtin_shufflevector(__s2_286, __s2_286, 1, 0); \ - __ret_286 = __noswap_vset_lane_u32(__noswap_vget_lane_u32(__rev2_286, __p3_286), __rev0_286, __p1_286); \ - __ret_286 = __builtin_shufflevector(__ret_286, __ret_286, 1, 0); \ - __ret_286; \ -}) +__ai __attribute__((target("neon"))) uint16x8_t vceqzq_u16(uint16x8_t __p0) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif -#define vcopy_lane_u64(__p0_287, __p1_287, __p2_287, __p3_287) __extension__ ({ \ - uint64x1_t __ret_287; \ - uint64x1_t __s0_287 = __p0_287; \ - uint64x1_t __s2_287 = __p2_287; \ - __ret_287 = vset_lane_u64(vget_lane_u64(__s2_287, __p3_287), __s0_287, __p1_287); \ - __ret_287; \ -}) #ifdef __LITTLE_ENDIAN__ -#define vcopy_lane_u16(__p0_288, __p1_288, __p2_288, __p3_288) __extension__ ({ \ - uint16x4_t __ret_288; \ - uint16x4_t __s0_288 = __p0_288; \ - uint16x4_t __s2_288 = __p2_288; \ - __ret_288 = vset_lane_u16(vget_lane_u16(__s2_288, __p3_288), __s0_288, __p1_288); \ - __ret_288; \ -}) +__ai __attribute__((target("neon"))) uint8x16_t vceqzq_s8(int8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 48); + return __ret; +} #else -#define vcopy_lane_u16(__p0_289, __p1_289, __p2_289, __p3_289) __extension__ ({ \ - uint16x4_t __ret_289; \ - uint16x4_t __s0_289 = __p0_289; \ - uint16x4_t __s2_289 = __p2_289; \ - uint16x4_t __rev0_289; __rev0_289 = __builtin_shufflevector(__s0_289, __s0_289, 3, 2, 1, 0); \ - uint16x4_t __rev2_289; __rev2_289 = __builtin_shufflevector(__s2_289, __s2_289, 3, 2, 1, 0); \ - __ret_289 = __noswap_vset_lane_u16(__noswap_vget_lane_u16(__rev2_289, __p3_289), __rev0_289, __p1_289); \ - __ret_289 = __builtin_shufflevector(__ret_289, __ret_289, 3, 2, 1, 0); \ - __ret_289; \ -}) +__ai __attribute__((target("neon"))) uint8x16_t vceqzq_s8(int8x16_t __p0) { + uint8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vcopy_lane_s8(__p0_290, __p1_290, __p2_290, __p3_290) __extension__ ({ \ - int8x8_t __ret_290; \ - int8x8_t __s0_290 = __p0_290; \ - int8x8_t __s2_290 = __p2_290; \ - __ret_290 = vset_lane_s8(vget_lane_s8(__s2_290, __p3_290), __s0_290, __p1_290); \ - __ret_290; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vceqzq_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51); + return __ret; +} #else -#define vcopy_lane_s8(__p0_291, __p1_291, __p2_291, __p3_291) __extension__ ({ \ - int8x8_t __ret_291; \ - int8x8_t __s0_291 = __p0_291; \ - int8x8_t __s2_291 = __p2_291; \ - int8x8_t __rev0_291; __rev0_291 = __builtin_shufflevector(__s0_291, __s0_291, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x8_t __rev2_291; __rev2_291 = __builtin_shufflevector(__s2_291, __s2_291, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_291 = __noswap_vset_lane_s8(__noswap_vget_lane_s8(__rev2_291, __p3_291), __rev0_291, __p1_291); \ - __ret_291 = __builtin_shufflevector(__ret_291, __ret_291, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_291; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vceqzq_f64(float64x2_t __p0) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vcopy_lane_f32(__p0_292, __p1_292, __p2_292, __p3_292) __extension__ ({ \ - float32x2_t __ret_292; \ - float32x2_t __s0_292 = __p0_292; \ - float32x2_t __s2_292 = __p2_292; \ - __ret_292 = vset_lane_f32(vget_lane_f32(__s2_292, __p3_292), __s0_292, __p1_292); \ - __ret_292; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vceqzq_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50); + return __ret; +} #else -#define vcopy_lane_f32(__p0_293, __p1_293, __p2_293, __p3_293) __extension__ ({ \ - float32x2_t __ret_293; \ - float32x2_t __s0_293 = __p0_293; \ - float32x2_t __s2_293 = __p2_293; \ - float32x2_t __rev0_293; __rev0_293 = __builtin_shufflevector(__s0_293, __s0_293, 1, 0); \ - float32x2_t __rev2_293; __rev2_293 = __builtin_shufflevector(__s2_293, __s2_293, 1, 0); \ - __ret_293 = __noswap_vset_lane_f32(__noswap_vget_lane_f32(__rev2_293, __p3_293), __rev0_293, __p1_293); \ - __ret_293 = __builtin_shufflevector(__ret_293, __ret_293, 1, 0); \ - __ret_293; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vceqzq_f32(float32x4_t __p0) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vcopy_lane_s32(__p0_294, __p1_294, __p2_294, __p3_294) __extension__ ({ \ - int32x2_t __ret_294; \ - int32x2_t __s0_294 = __p0_294; \ - int32x2_t __s2_294 = __p2_294; \ - __ret_294 = vset_lane_s32(vget_lane_s32(__s2_294, __p3_294), __s0_294, __p1_294); \ - __ret_294; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vceqzq_s32(int32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 50); + return __ret; +} #else -#define vcopy_lane_s32(__p0_295, __p1_295, __p2_295, __p3_295) __extension__ ({ \ - int32x2_t __ret_295; \ - int32x2_t __s0_295 = __p0_295; \ - int32x2_t __s2_295 = __p2_295; \ - int32x2_t __rev0_295; __rev0_295 = __builtin_shufflevector(__s0_295, __s0_295, 1, 0); \ - int32x2_t __rev2_295; __rev2_295 = __builtin_shufflevector(__s2_295, __s2_295, 1, 0); \ - __ret_295 = __noswap_vset_lane_s32(__noswap_vget_lane_s32(__rev2_295, __p3_295), __rev0_295, __p1_295); \ - __ret_295 = __builtin_shufflevector(__ret_295, __ret_295, 1, 0); \ - __ret_295; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vceqzq_s32(int32x4_t __p0) { + uint32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif -#define vcopy_lane_s64(__p0_296, __p1_296, __p2_296, __p3_296) __extension__ ({ \ - int64x1_t __ret_296; \ - int64x1_t __s0_296 = __p0_296; \ - int64x1_t __s2_296 = __p2_296; \ - __ret_296 = vset_lane_s64(vget_lane_s64(__s2_296, __p3_296), __s0_296, __p1_296); \ - __ret_296; \ -}) #ifdef __LITTLE_ENDIAN__ -#define vcopy_lane_s16(__p0_297, __p1_297, __p2_297, __p3_297) __extension__ ({ \ - int16x4_t __ret_297; \ - int16x4_t __s0_297 = __p0_297; \ - int16x4_t __s2_297 = __p2_297; \ - __ret_297 = vset_lane_s16(vget_lane_s16(__s2_297, __p3_297), __s0_297, __p1_297); \ - __ret_297; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vceqzq_s64(int64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 51); + return __ret; +} #else -#define vcopy_lane_s16(__p0_298, __p1_298, __p2_298, __p3_298) __extension__ ({ \ - int16x4_t __ret_298; \ - int16x4_t __s0_298 = __p0_298; \ - int16x4_t __s2_298 = __p2_298; \ - int16x4_t __rev0_298; __rev0_298 = __builtin_shufflevector(__s0_298, __s0_298, 3, 2, 1, 0); \ - int16x4_t __rev2_298; __rev2_298 = __builtin_shufflevector(__s2_298, __s2_298, 3, 2, 1, 0); \ - __ret_298 = __noswap_vset_lane_s16(__noswap_vget_lane_s16(__rev2_298, __p3_298), __rev0_298, __p1_298); \ - __ret_298 = __builtin_shufflevector(__ret_298, __ret_298, 3, 2, 1, 0); \ - __ret_298; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vceqzq_s64(int64x2_t __p0) { + uint64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_p8(__p0_299, __p1_299, __p2_299, __p3_299) __extension__ ({ \ - poly8x16_t __ret_299; \ - poly8x16_t __s0_299 = __p0_299; \ - poly8x16_t __s2_299 = __p2_299; \ - __ret_299 = vsetq_lane_p8(vgetq_lane_p8(__s2_299, __p3_299), __s0_299, __p1_299); \ - __ret_299; \ -}) +__ai __attribute__((target("neon"))) uint16x8_t vceqzq_s16(int16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__p0, 49); + return __ret; +} #else -#define vcopyq_laneq_p8(__p0_300, __p1_300, __p2_300, __p3_300) __extension__ ({ \ - poly8x16_t __ret_300; \ - poly8x16_t __s0_300 = __p0_300; \ - poly8x16_t __s2_300 = __p2_300; \ - poly8x16_t __rev0_300; __rev0_300 = __builtin_shufflevector(__s0_300, __s0_300, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x16_t __rev2_300; __rev2_300 = __builtin_shufflevector(__s2_300, __s2_300, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_300 = __noswap_vsetq_lane_p8(__noswap_vgetq_lane_p8(__rev2_300, __p3_300), __rev0_300, __p1_300); \ - __ret_300 = __builtin_shufflevector(__ret_300, __ret_300, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_300; \ -}) +__ai __attribute__((target("neon"))) uint16x8_t vceqzq_s16(int16x8_t __p0) { + uint16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vceqzq_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_p16(__p0_301, __p1_301, __p2_301, __p3_301) __extension__ ({ \ - poly16x8_t __ret_301; \ - poly16x8_t __s0_301 = __p0_301; \ - poly16x8_t __s2_301 = __p2_301; \ - __ret_301 = vsetq_lane_p16(vgetq_lane_p16(__s2_301, __p3_301), __s0_301, __p1_301); \ - __ret_301; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vceqz_u8(uint8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16); + return __ret; +} #else -#define vcopyq_laneq_p16(__p0_302, __p1_302, __p2_302, __p3_302) __extension__ ({ \ - poly16x8_t __ret_302; \ - poly16x8_t __s0_302 = __p0_302; \ - poly16x8_t __s2_302 = __p2_302; \ - poly16x8_t __rev0_302; __rev0_302 = __builtin_shufflevector(__s0_302, __s0_302, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly16x8_t __rev2_302; __rev2_302 = __builtin_shufflevector(__s2_302, __s2_302, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_302 = __noswap_vsetq_lane_p16(__noswap_vgetq_lane_p16(__rev2_302, __p3_302), __rev0_302, __p1_302); \ - __ret_302 = __builtin_shufflevector(__ret_302, __ret_302, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_302; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vceqz_u8(uint8x8_t __p0) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_u8(__p0_303, __p1_303, __p2_303, __p3_303) __extension__ ({ \ - uint8x16_t __ret_303; \ - uint8x16_t __s0_303 = __p0_303; \ - uint8x16_t __s2_303 = __p2_303; \ - __ret_303 = vsetq_lane_u8(vgetq_lane_u8(__s2_303, __p3_303), __s0_303, __p1_303); \ - __ret_303; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vceqz_u32(uint32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18); + return __ret; +} #else -#define vcopyq_laneq_u8(__p0_304, __p1_304, __p2_304, __p3_304) __extension__ ({ \ - uint8x16_t __ret_304; \ - uint8x16_t __s0_304 = __p0_304; \ - uint8x16_t __s2_304 = __p2_304; \ - uint8x16_t __rev0_304; __rev0_304 = __builtin_shufflevector(__s0_304, __s0_304, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16_t __rev2_304; __rev2_304 = __builtin_shufflevector(__s2_304, __s2_304, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_304 = __noswap_vsetq_lane_u8(__noswap_vgetq_lane_u8(__rev2_304, __p3_304), __rev0_304, __p1_304); \ - __ret_304 = __builtin_shufflevector(__ret_304, __ret_304, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_304; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_u32(__p0_305, __p1_305, __p2_305, __p3_305) __extension__ ({ \ - uint32x4_t __ret_305; \ - uint32x4_t __s0_305 = __p0_305; \ - uint32x4_t __s2_305 = __p2_305; \ - __ret_305 = vsetq_lane_u32(vgetq_lane_u32(__s2_305, __p3_305), __s0_305, __p1_305); \ - __ret_305; \ -}) -#else -#define vcopyq_laneq_u32(__p0_306, __p1_306, __p2_306, __p3_306) __extension__ ({ \ - uint32x4_t __ret_306; \ - uint32x4_t __s0_306 = __p0_306; \ - uint32x4_t __s2_306 = __p2_306; \ - uint32x4_t __rev0_306; __rev0_306 = __builtin_shufflevector(__s0_306, __s0_306, 3, 2, 1, 0); \ - uint32x4_t __rev2_306; __rev2_306 = __builtin_shufflevector(__s2_306, __s2_306, 3, 2, 1, 0); \ - __ret_306 = __noswap_vsetq_lane_u32(__noswap_vgetq_lane_u32(__rev2_306, __p3_306), __rev0_306, __p1_306); \ - __ret_306 = __builtin_shufflevector(__ret_306, __ret_306, 3, 2, 1, 0); \ - __ret_306; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vceqz_u32(uint32x2_t __p0) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif +__ai __attribute__((target("neon"))) uint64x1_t vceqz_u64(uint64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19); + return __ret; +} #ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_u64(__p0_307, __p1_307, __p2_307, __p3_307) __extension__ ({ \ - uint64x2_t __ret_307; \ - uint64x2_t __s0_307 = __p0_307; \ - uint64x2_t __s2_307 = __p2_307; \ - __ret_307 = vsetq_lane_u64(vgetq_lane_u64(__s2_307, __p3_307), __s0_307, __p1_307); \ - __ret_307; \ -}) +__ai __attribute__((target("neon"))) uint16x4_t vceqz_u16(uint16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17); + return __ret; +} #else -#define vcopyq_laneq_u64(__p0_308, __p1_308, __p2_308, __p3_308) __extension__ ({ \ - uint64x2_t __ret_308; \ - uint64x2_t __s0_308 = __p0_308; \ - uint64x2_t __s2_308 = __p2_308; \ - uint64x2_t __rev0_308; __rev0_308 = __builtin_shufflevector(__s0_308, __s0_308, 1, 0); \ - uint64x2_t __rev2_308; __rev2_308 = __builtin_shufflevector(__s2_308, __s2_308, 1, 0); \ - __ret_308 = __noswap_vsetq_lane_u64(__noswap_vgetq_lane_u64(__rev2_308, __p3_308), __rev0_308, __p1_308); \ - __ret_308 = __builtin_shufflevector(__ret_308, __ret_308, 1, 0); \ - __ret_308; \ -}) +__ai __attribute__((target("neon"))) uint16x4_t vceqz_u16(uint16x4_t __p0) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_u16(__p0_309, __p1_309, __p2_309, __p3_309) __extension__ ({ \ - uint16x8_t __ret_309; \ - uint16x8_t __s0_309 = __p0_309; \ - uint16x8_t __s2_309 = __p2_309; \ - __ret_309 = vsetq_lane_u16(vgetq_lane_u16(__s2_309, __p3_309), __s0_309, __p1_309); \ - __ret_309; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vceqz_s8(int8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 16); + return __ret; +} #else -#define vcopyq_laneq_u16(__p0_310, __p1_310, __p2_310, __p3_310) __extension__ ({ \ - uint16x8_t __ret_310; \ - uint16x8_t __s0_310 = __p0_310; \ - uint16x8_t __s2_310 = __p2_310; \ - uint16x8_t __rev0_310; __rev0_310 = __builtin_shufflevector(__s0_310, __s0_310, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev2_310; __rev2_310 = __builtin_shufflevector(__s2_310, __s2_310, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_310 = __noswap_vsetq_lane_u16(__noswap_vgetq_lane_u16(__rev2_310, __p3_310), __rev0_310, __p1_310); \ - __ret_310 = __builtin_shufflevector(__ret_310, __ret_310, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_310; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vceqz_s8(int8x8_t __p0) { + uint8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif +__ai __attribute__((target("neon"))) uint64x1_t vceqz_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19); + return __ret; +} #ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_s8(__p0_311, __p1_311, __p2_311, __p3_311) __extension__ ({ \ - int8x16_t __ret_311; \ - int8x16_t __s0_311 = __p0_311; \ - int8x16_t __s2_311 = __p2_311; \ - __ret_311 = vsetq_lane_s8(vgetq_lane_s8(__s2_311, __p3_311), __s0_311, __p1_311); \ - __ret_311; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vceqz_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18); + return __ret; +} #else -#define vcopyq_laneq_s8(__p0_312, __p1_312, __p2_312, __p3_312) __extension__ ({ \ - int8x16_t __ret_312; \ - int8x16_t __s0_312 = __p0_312; \ - int8x16_t __s2_312 = __p2_312; \ - int8x16_t __rev0_312; __rev0_312 = __builtin_shufflevector(__s0_312, __s0_312, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __rev2_312; __rev2_312 = __builtin_shufflevector(__s2_312, __s2_312, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_312 = __noswap_vsetq_lane_s8(__noswap_vgetq_lane_s8(__rev2_312, __p3_312), __rev0_312, __p1_312); \ - __ret_312 = __builtin_shufflevector(__ret_312, __ret_312, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_312; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vceqz_f32(float32x2_t __p0) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_f32(__p0_313, __p1_313, __p2_313, __p3_313) __extension__ ({ \ - float32x4_t __ret_313; \ - float32x4_t __s0_313 = __p0_313; \ - float32x4_t __s2_313 = __p2_313; \ - __ret_313 = vsetq_lane_f32(vgetq_lane_f32(__s2_313, __p3_313), __s0_313, __p1_313); \ - __ret_313; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vceqz_s32(int32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 18); + return __ret; +} #else -#define vcopyq_laneq_f32(__p0_314, __p1_314, __p2_314, __p3_314) __extension__ ({ \ - float32x4_t __ret_314; \ - float32x4_t __s0_314 = __p0_314; \ - float32x4_t __s2_314 = __p2_314; \ - float32x4_t __rev0_314; __rev0_314 = __builtin_shufflevector(__s0_314, __s0_314, 3, 2, 1, 0); \ - float32x4_t __rev2_314; __rev2_314 = __builtin_shufflevector(__s2_314, __s2_314, 3, 2, 1, 0); \ - __ret_314 = __noswap_vsetq_lane_f32(__noswap_vgetq_lane_f32(__rev2_314, __p3_314), __rev0_314, __p1_314); \ - __ret_314 = __builtin_shufflevector(__ret_314, __ret_314, 3, 2, 1, 0); \ - __ret_314; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vceqz_s32(int32x2_t __p0) { + uint32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif +__ai __attribute__((target("neon"))) uint64x1_t vceqz_s64(int64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 19); + return __ret; +} #ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_s32(__p0_315, __p1_315, __p2_315, __p3_315) __extension__ ({ \ - int32x4_t __ret_315; \ - int32x4_t __s0_315 = __p0_315; \ - int32x4_t __s2_315 = __p2_315; \ - __ret_315 = vsetq_lane_s32(vgetq_lane_s32(__s2_315, __p3_315), __s0_315, __p1_315); \ - __ret_315; \ -}) +__ai __attribute__((target("neon"))) uint16x4_t vceqz_s16(int16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__p0, 17); + return __ret; +} #else -#define vcopyq_laneq_s32(__p0_316, __p1_316, __p2_316, __p3_316) __extension__ ({ \ - int32x4_t __ret_316; \ - int32x4_t __s0_316 = __p0_316; \ - int32x4_t __s2_316 = __p2_316; \ - int32x4_t __rev0_316; __rev0_316 = __builtin_shufflevector(__s0_316, __s0_316, 3, 2, 1, 0); \ - int32x4_t __rev2_316; __rev2_316 = __builtin_shufflevector(__s2_316, __s2_316, 3, 2, 1, 0); \ - __ret_316 = __noswap_vsetq_lane_s32(__noswap_vgetq_lane_s32(__rev2_316, __p3_316), __rev0_316, __p1_316); \ - __ret_316 = __builtin_shufflevector(__ret_316, __ret_316, 3, 2, 1, 0); \ - __ret_316; \ -}) +__ai __attribute__((target("neon"))) uint16x4_t vceqz_s16(int16x4_t __p0) { + uint16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vceqz_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif +__ai __attribute__((target("neon"))) uint64_t vceqzd_u64(uint64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vceqzd_u64(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vceqzd_s64(int64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vceqzd_s64(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vceqzd_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vceqzd_f64(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32_t vceqzs_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vceqzs_f32(__p0); + return __ret; +} #ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_s64(__p0_317, __p1_317, __p2_317, __p3_317) __extension__ ({ \ - int64x2_t __ret_317; \ - int64x2_t __s0_317 = __p0_317; \ - int64x2_t __s2_317 = __p2_317; \ - __ret_317 = vsetq_lane_s64(vgetq_lane_s64(__s2_317, __p3_317), __s0_317, __p1_317); \ - __ret_317; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 >= __p1); + return __ret; +} #else -#define vcopyq_laneq_s64(__p0_318, __p1_318, __p2_318, __p3_318) __extension__ ({ \ - int64x2_t __ret_318; \ - int64x2_t __s0_318 = __p0_318; \ - int64x2_t __s2_318 = __p2_318; \ - int64x2_t __rev0_318; __rev0_318 = __builtin_shufflevector(__s0_318, __s0_318, 1, 0); \ - int64x2_t __rev2_318; __rev2_318 = __builtin_shufflevector(__s2_318, __s2_318, 1, 0); \ - __ret_318 = __noswap_vsetq_lane_s64(__noswap_vgetq_lane_s64(__rev2_318, __p3_318), __rev0_318, __p1_318); \ - __ret_318 = __builtin_shufflevector(__ret_318, __ret_318, 1, 0); \ - __ret_318; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vcgeq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_s16(__p0_319, __p1_319, __p2_319, __p3_319) __extension__ ({ \ - int16x8_t __ret_319; \ - int16x8_t __s0_319 = __p0_319; \ - int16x8_t __s2_319 = __p2_319; \ - __ret_319 = vsetq_lane_s16(vgetq_lane_s16(__s2_319, __p3_319), __s0_319, __p1_319); \ - __ret_319; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 >= __p1); + return __ret; +} #else -#define vcopyq_laneq_s16(__p0_320, __p1_320, __p2_320, __p3_320) __extension__ ({ \ - int16x8_t __ret_320; \ - int16x8_t __s0_320 = __p0_320; \ - int16x8_t __s2_320 = __p2_320; \ - int16x8_t __rev0_320; __rev0_320 = __builtin_shufflevector(__s0_320, __s0_320, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev2_320; __rev2_320 = __builtin_shufflevector(__s2_320, __s2_320, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_320 = __noswap_vsetq_lane_s16(__noswap_vgetq_lane_s16(__rev2_320, __p3_320), __rev0_320, __p1_320); \ - __ret_320 = __builtin_shufflevector(__ret_320, __ret_320, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_320; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vcgeq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_p8(__p0_321, __p1_321, __p2_321, __p3_321) __extension__ ({ \ - poly8x8_t __ret_321; \ - poly8x8_t __s0_321 = __p0_321; \ - poly8x16_t __s2_321 = __p2_321; \ - __ret_321 = vset_lane_p8(vgetq_lane_p8(__s2_321, __p3_321), __s0_321, __p1_321); \ - __ret_321; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 >= __p1); + return __ret; +} #else -#define vcopy_laneq_p8(__p0_322, __p1_322, __p2_322, __p3_322) __extension__ ({ \ - poly8x8_t __ret_322; \ - poly8x8_t __s0_322 = __p0_322; \ - poly8x16_t __s2_322 = __p2_322; \ - poly8x8_t __rev0_322; __rev0_322 = __builtin_shufflevector(__s0_322, __s0_322, 7, 6, 5, 4, 3, 2, 1, 0); \ - poly8x16_t __rev2_322; __rev2_322 = __builtin_shufflevector(__s2_322, __s2_322, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_322 = __noswap_vset_lane_p8(__noswap_vgetq_lane_p8(__rev2_322, __p3_322), __rev0_322, __p1_322); \ - __ret_322 = __builtin_shufflevector(__ret_322, __ret_322, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_322; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vcgeq_s64(int64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__rev0 >= __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif +__ai __attribute__((target("neon"))) uint64x1_t vcge_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 >= __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vcge_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 >= __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vcge_s64(int64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 >= __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vcged_s64(int64_t __p0, int64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcged_s64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vcged_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcged_u64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vcged_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcged_f64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint32_t vcges_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcges_f32(__p0, __p1); + return __ret; +} #ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_p16(__p0_323, __p1_323, __p2_323, __p3_323) __extension__ ({ \ - poly16x4_t __ret_323; \ - poly16x4_t __s0_323 = __p0_323; \ - poly16x8_t __s2_323 = __p2_323; \ - __ret_323 = vset_lane_p16(vgetq_lane_p16(__s2_323, __p3_323), __s0_323, __p1_323); \ - __ret_323; \ -}) +__ai __attribute__((target("neon"))) uint8x16_t vcgezq_s8(int8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 48); + return __ret; +} #else -#define vcopy_laneq_p16(__p0_324, __p1_324, __p2_324, __p3_324) __extension__ ({ \ - poly16x4_t __ret_324; \ - poly16x4_t __s0_324 = __p0_324; \ - poly16x8_t __s2_324 = __p2_324; \ - poly16x4_t __rev0_324; __rev0_324 = __builtin_shufflevector(__s0_324, __s0_324, 3, 2, 1, 0); \ - poly16x8_t __rev2_324; __rev2_324 = __builtin_shufflevector(__s2_324, __s2_324, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_324 = __noswap_vset_lane_p16(__noswap_vgetq_lane_p16(__rev2_324, __p3_324), __rev0_324, __p1_324); \ - __ret_324 = __builtin_shufflevector(__ret_324, __ret_324, 3, 2, 1, 0); \ - __ret_324; \ -}) +__ai __attribute__((target("neon"))) uint8x16_t vcgezq_s8(int8x16_t __p0) { + uint8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_u8(__p0_325, __p1_325, __p2_325, __p3_325) __extension__ ({ \ - uint8x8_t __ret_325; \ - uint8x8_t __s0_325 = __p0_325; \ - uint8x16_t __s2_325 = __p2_325; \ - __ret_325 = vset_lane_u8(vgetq_lane_u8(__s2_325, __p3_325), __s0_325, __p1_325); \ - __ret_325; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vcgezq_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 51); + return __ret; +} #else -#define vcopy_laneq_u8(__p0_326, __p1_326, __p2_326, __p3_326) __extension__ ({ \ - uint8x8_t __ret_326; \ - uint8x8_t __s0_326 = __p0_326; \ - uint8x16_t __s2_326 = __p2_326; \ - uint8x8_t __rev0_326; __rev0_326 = __builtin_shufflevector(__s0_326, __s0_326, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16_t __rev2_326; __rev2_326 = __builtin_shufflevector(__s2_326, __s2_326, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_326 = __noswap_vset_lane_u8(__noswap_vgetq_lane_u8(__rev2_326, __p3_326), __rev0_326, __p1_326); \ - __ret_326 = __builtin_shufflevector(__ret_326, __ret_326, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_326; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vcgezq_f64(float64x2_t __p0) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_u32(__p0_327, __p1_327, __p2_327, __p3_327) __extension__ ({ \ - uint32x2_t __ret_327; \ - uint32x2_t __s0_327 = __p0_327; \ - uint32x4_t __s2_327 = __p2_327; \ - __ret_327 = vset_lane_u32(vgetq_lane_u32(__s2_327, __p3_327), __s0_327, __p1_327); \ - __ret_327; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vcgezq_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 50); + return __ret; +} #else -#define vcopy_laneq_u32(__p0_328, __p1_328, __p2_328, __p3_328) __extension__ ({ \ - uint32x2_t __ret_328; \ - uint32x2_t __s0_328 = __p0_328; \ - uint32x4_t __s2_328 = __p2_328; \ - uint32x2_t __rev0_328; __rev0_328 = __builtin_shufflevector(__s0_328, __s0_328, 1, 0); \ - uint32x4_t __rev2_328; __rev2_328 = __builtin_shufflevector(__s2_328, __s2_328, 3, 2, 1, 0); \ - __ret_328 = __noswap_vset_lane_u32(__noswap_vgetq_lane_u32(__rev2_328, __p3_328), __rev0_328, __p1_328); \ - __ret_328 = __builtin_shufflevector(__ret_328, __ret_328, 1, 0); \ - __ret_328; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vcgezq_f32(float32x4_t __p0) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_u64(__p0_329, __p1_329, __p2_329, __p3_329) __extension__ ({ \ - uint64x1_t __ret_329; \ - uint64x1_t __s0_329 = __p0_329; \ - uint64x2_t __s2_329 = __p2_329; \ - __ret_329 = vset_lane_u64(vgetq_lane_u64(__s2_329, __p3_329), __s0_329, __p1_329); \ - __ret_329; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vcgezq_s32(int32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 50); + return __ret; +} #else -#define vcopy_laneq_u64(__p0_330, __p1_330, __p2_330, __p3_330) __extension__ ({ \ - uint64x1_t __ret_330; \ - uint64x1_t __s0_330 = __p0_330; \ - uint64x2_t __s2_330 = __p2_330; \ - uint64x2_t __rev2_330; __rev2_330 = __builtin_shufflevector(__s2_330, __s2_330, 1, 0); \ - __ret_330 = vset_lane_u64(__noswap_vgetq_lane_u64(__rev2_330, __p3_330), __s0_330, __p1_330); \ - __ret_330; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vcgezq_s32(int32x4_t __p0) { + uint32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_u16(__p0_331, __p1_331, __p2_331, __p3_331) __extension__ ({ \ - uint16x4_t __ret_331; \ - uint16x4_t __s0_331 = __p0_331; \ - uint16x8_t __s2_331 = __p2_331; \ - __ret_331 = vset_lane_u16(vgetq_lane_u16(__s2_331, __p3_331), __s0_331, __p1_331); \ - __ret_331; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vcgezq_s64(int64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 51); + return __ret; +} #else -#define vcopy_laneq_u16(__p0_332, __p1_332, __p2_332, __p3_332) __extension__ ({ \ - uint16x4_t __ret_332; \ - uint16x4_t __s0_332 = __p0_332; \ - uint16x8_t __s2_332 = __p2_332; \ - uint16x4_t __rev0_332; __rev0_332 = __builtin_shufflevector(__s0_332, __s0_332, 3, 2, 1, 0); \ - uint16x8_t __rev2_332; __rev2_332 = __builtin_shufflevector(__s2_332, __s2_332, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_332 = __noswap_vset_lane_u16(__noswap_vgetq_lane_u16(__rev2_332, __p3_332), __rev0_332, __p1_332); \ - __ret_332 = __builtin_shufflevector(__ret_332, __ret_332, 3, 2, 1, 0); \ - __ret_332; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vcgezq_s64(int64x2_t __p0) { + uint64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_s8(__p0_333, __p1_333, __p2_333, __p3_333) __extension__ ({ \ - int8x8_t __ret_333; \ - int8x8_t __s0_333 = __p0_333; \ - int8x16_t __s2_333 = __p2_333; \ - __ret_333 = vset_lane_s8(vgetq_lane_s8(__s2_333, __p3_333), __s0_333, __p1_333); \ - __ret_333; \ -}) +__ai __attribute__((target("neon"))) uint16x8_t vcgezq_s16(int16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__p0, 49); + return __ret; +} #else -#define vcopy_laneq_s8(__p0_334, __p1_334, __p2_334, __p3_334) __extension__ ({ \ - int8x8_t __ret_334; \ - int8x8_t __s0_334 = __p0_334; \ - int8x16_t __s2_334 = __p2_334; \ - int8x8_t __rev0_334; __rev0_334 = __builtin_shufflevector(__s0_334, __s0_334, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __rev2_334; __rev2_334 = __builtin_shufflevector(__s2_334, __s2_334, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_334 = __noswap_vset_lane_s8(__noswap_vgetq_lane_s8(__rev2_334, __p3_334), __rev0_334, __p1_334); \ - __ret_334 = __builtin_shufflevector(__ret_334, __ret_334, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_334; \ -}) +__ai __attribute__((target("neon"))) uint16x8_t vcgezq_s16(int16x8_t __p0) { + uint16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcgezq_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_f32(__p0_335, __p1_335, __p2_335, __p3_335) __extension__ ({ \ - float32x2_t __ret_335; \ - float32x2_t __s0_335 = __p0_335; \ - float32x4_t __s2_335 = __p2_335; \ - __ret_335 = vset_lane_f32(vgetq_lane_f32(__s2_335, __p3_335), __s0_335, __p1_335); \ - __ret_335; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vcgez_s8(int8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 16); + return __ret; +} #else -#define vcopy_laneq_f32(__p0_336, __p1_336, __p2_336, __p3_336) __extension__ ({ \ - float32x2_t __ret_336; \ - float32x2_t __s0_336 = __p0_336; \ - float32x4_t __s2_336 = __p2_336; \ - float32x2_t __rev0_336; __rev0_336 = __builtin_shufflevector(__s0_336, __s0_336, 1, 0); \ - float32x4_t __rev2_336; __rev2_336 = __builtin_shufflevector(__s2_336, __s2_336, 3, 2, 1, 0); \ - __ret_336 = __noswap_vset_lane_f32(__noswap_vgetq_lane_f32(__rev2_336, __p3_336), __rev0_336, __p1_336); \ - __ret_336 = __builtin_shufflevector(__ret_336, __ret_336, 1, 0); \ - __ret_336; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vcgez_s8(int8x8_t __p0) { + uint8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif +__ai __attribute__((target("neon"))) uint64x1_t vcgez_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19); + return __ret; +} #ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_s32(__p0_337, __p1_337, __p2_337, __p3_337) __extension__ ({ \ - int32x2_t __ret_337; \ - int32x2_t __s0_337 = __p0_337; \ - int32x4_t __s2_337 = __p2_337; \ - __ret_337 = vset_lane_s32(vgetq_lane_s32(__s2_337, __p3_337), __s0_337, __p1_337); \ - __ret_337; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vcgez_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 18); + return __ret; +} #else -#define vcopy_laneq_s32(__p0_338, __p1_338, __p2_338, __p3_338) __extension__ ({ \ - int32x2_t __ret_338; \ - int32x2_t __s0_338 = __p0_338; \ - int32x4_t __s2_338 = __p2_338; \ - int32x2_t __rev0_338; __rev0_338 = __builtin_shufflevector(__s0_338, __s0_338, 1, 0); \ - int32x4_t __rev2_338; __rev2_338 = __builtin_shufflevector(__s2_338, __s2_338, 3, 2, 1, 0); \ - __ret_338 = __noswap_vset_lane_s32(__noswap_vgetq_lane_s32(__rev2_338, __p3_338), __rev0_338, __p1_338); \ - __ret_338 = __builtin_shufflevector(__ret_338, __ret_338, 1, 0); \ - __ret_338; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vcgez_f32(float32x2_t __p0) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_s64(__p0_339, __p1_339, __p2_339, __p3_339) __extension__ ({ \ - int64x1_t __ret_339; \ - int64x1_t __s0_339 = __p0_339; \ - int64x2_t __s2_339 = __p2_339; \ - __ret_339 = vset_lane_s64(vgetq_lane_s64(__s2_339, __p3_339), __s0_339, __p1_339); \ - __ret_339; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vcgez_s32(int32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 18); + return __ret; +} #else -#define vcopy_laneq_s64(__p0_340, __p1_340, __p2_340, __p3_340) __extension__ ({ \ - int64x1_t __ret_340; \ - int64x1_t __s0_340 = __p0_340; \ - int64x2_t __s2_340 = __p2_340; \ - int64x2_t __rev2_340; __rev2_340 = __builtin_shufflevector(__s2_340, __s2_340, 1, 0); \ - __ret_340 = vset_lane_s64(__noswap_vgetq_lane_s64(__rev2_340, __p3_340), __s0_340, __p1_340); \ - __ret_340; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vcgez_s32(int32x2_t __p0) { + uint32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif +__ai __attribute__((target("neon"))) uint64x1_t vcgez_s64(int64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 19); + return __ret; +} #ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_s16(__p0_341, __p1_341, __p2_341, __p3_341) __extension__ ({ \ - int16x4_t __ret_341; \ - int16x4_t __s0_341 = __p0_341; \ - int16x8_t __s2_341 = __p2_341; \ - __ret_341 = vset_lane_s16(vgetq_lane_s16(__s2_341, __p3_341), __s0_341, __p1_341); \ - __ret_341; \ -}) +__ai __attribute__((target("neon"))) uint16x4_t vcgez_s16(int16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__p0, 17); + return __ret; +} #else -#define vcopy_laneq_s16(__p0_342, __p1_342, __p2_342, __p3_342) __extension__ ({ \ - int16x4_t __ret_342; \ - int16x4_t __s0_342 = __p0_342; \ - int16x8_t __s2_342 = __p2_342; \ - int16x4_t __rev0_342; __rev0_342 = __builtin_shufflevector(__s0_342, __s0_342, 3, 2, 1, 0); \ - int16x8_t __rev2_342; __rev2_342 = __builtin_shufflevector(__s2_342, __s2_342, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_342 = __noswap_vset_lane_s16(__noswap_vgetq_lane_s16(__rev2_342, __p3_342), __rev0_342, __p1_342); \ - __ret_342 = __builtin_shufflevector(__ret_342, __ret_342, 3, 2, 1, 0); \ - __ret_342; \ -}) +__ai __attribute__((target("neon"))) uint16x4_t vcgez_s16(int16x4_t __p0) { + uint16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcgez_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif -#define vcreate_p64(__p0) __extension__ ({ \ - poly64x1_t __ret; \ - uint64_t __promote = __p0; \ - __ret = (poly64x1_t)(__promote); \ - __ret; \ -}) -#define vcreate_f64(__p0) __extension__ ({ \ - float64x1_t __ret; \ - uint64_t __promote = __p0; \ - __ret = (float64x1_t)(__promote); \ - __ret; \ -}) -__ai float32_t vcvts_f32_s32(int32_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vcvts_f32_s32(__p0); +__ai __attribute__((target("neon"))) uint64_t vcgezd_s64(int64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcgezd_s64(__p0); return __ret; } -__ai float32_t vcvts_f32_u32(uint32_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vcvts_f32_u32(__p0); +__ai __attribute__((target("neon"))) uint64_t vcgezd_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcgezd_f64(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32_t vcgezs_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcgezs_f32(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vcvt_f32_f64(float64x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__p0, 9); +__ai __attribute__((target("neon"))) uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 > __p1); return __ret; } #else -__ai float32x2_t vcvt_f32_f64(float64x2_t __p0) { - float32x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__rev0, 9); +__ai __attribute__((target("neon"))) uint64x2_t vcgtq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__rev0 > __rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai float32x2_t __noswap_vcvt_f32_f64(float64x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__p0, 9); - return __ret; -} #endif -__ai float64_t vcvtd_f64_s64(int64_t __p0) { - float64_t __ret; - __ret = (float64_t) __builtin_neon_vcvtd_f64_s64(__p0); - return __ret; -} -__ai float64_t vcvtd_f64_u64(uint64_t __p0) { - float64_t __ret; - __ret = (float64_t) __builtin_neon_vcvtd_f64_u64(__p0); - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vcvtq_f64_u64(uint64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__p0, 51); +__ai __attribute__((target("neon"))) uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 > __p1); return __ret; } #else -__ai float64x2_t vcvtq_f64_u64(uint64x2_t __p0) { - float64x2_t __ret; - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__rev0, 51); +__ai __attribute__((target("neon"))) uint64x2_t vcgtq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__rev0 > __rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vcvtq_f64_s64(int64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__p0, 35); +__ai __attribute__((target("neon"))) uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 > __p1); return __ret; } #else -__ai float64x2_t vcvtq_f64_s64(int64x2_t __p0) { - float64x2_t __ret; +__ai __attribute__((target("neon"))) uint64x2_t vcgtq_s64(int64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__rev0, 35); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__rev0 > __rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif -__ai float64x1_t vcvt_f64_u64(uint64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 19); +__ai __attribute__((target("neon"))) uint64x1_t vcgt_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 > __p1); return __ret; } -__ai float64x1_t vcvt_f64_s64(int64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 3); +__ai __attribute__((target("neon"))) uint64x1_t vcgt_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 > __p1); return __ret; } -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vcvt_f64_f32(float32x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__p0, 42); +__ai __attribute__((target("neon"))) uint64x1_t vcgt_s64(int64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 > __p1); return __ret; } -#else -__ai float64x2_t vcvt_f64_f32(float32x2_t __p0) { - float64x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__rev0, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) uint64_t vcgtd_s64(int64_t __p0, int64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcgtd_s64(__p0, __p1); return __ret; } -__ai float64x2_t __noswap_vcvt_f64_f32(float32x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__p0, 42); +__ai __attribute__((target("neon"))) uint64_t vcgtd_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcgtd_u64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vcgtd_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcgtd_f64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint32_t vcgts_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcgts_f32(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vcgtzq_s8(int8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 48); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x16_t vcgtzq_s8(int8x16_t __p0) { + uint8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) { - float16x8_t __ret; - __ret = vcombine_f16(__p0, vcvt_f16_f32(__p1)); +__ai __attribute__((target("neon"))) uint64x2_t vcgtzq_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 51); return __ret; } #else -__ai float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) { - float16x8_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __noswap_vcombine_f16(__rev0, __noswap_vcvt_f16_f32(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint64x2_t vcgtzq_f64(float64x2_t __p0) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vcvt_high_f32_f16(float16x8_t __p0) { - float32x4_t __ret; - __ret = vcvt_f32_f16(vget_high_f16(__p0)); +__ai __attribute__((target("neon"))) uint32x4_t vcgtzq_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 50); return __ret; } #else -__ai float32x4_t vcvt_high_f32_f16(float16x8_t __p0) { - float32x4_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __noswap_vcvt_f32_f16(__noswap_vget_high_f16(__rev0)); +__ai __attribute__((target("neon"))) uint32x4_t vcgtzq_f32(float32x4_t __p0) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) { - float32x4_t __ret; - __ret = vcombine_f32(__p0, vcvt_f32_f64(__p1)); +__ai __attribute__((target("neon"))) uint32x4_t vcgtzq_s32(int32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 50); return __ret; } #else -__ai float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) { - float32x4_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __noswap_vcombine_f32(__rev0, __noswap_vcvt_f32_f64(__rev1)); +__ai __attribute__((target("neon"))) uint32x4_t vcgtzq_s32(int32x4_t __p0) { + uint32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vcvt_high_f64_f32(float32x4_t __p0) { - float64x2_t __ret; - __ret = vcvt_f64_f32(vget_high_f32(__p0)); +__ai __attribute__((target("neon"))) uint64x2_t vcgtzq_s64(int64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 51); return __ret; } #else -__ai float64x2_t vcvt_high_f64_f32(float32x4_t __p0) { - float64x2_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = __noswap_vcvt_f64_f32(__noswap_vget_high_f32(__rev0)); +__ai __attribute__((target("neon"))) uint64x2_t vcgtzq_s64(int64x2_t __p0) { + uint64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif -#define vcvts_n_f32_u32(__p0, __p1) __extension__ ({ \ - float32_t __ret; \ - uint32_t __s0 = __p0; \ - __ret = (float32_t) __builtin_neon_vcvts_n_f32_u32(__s0, __p1); \ - __ret; \ -}) -#define vcvts_n_f32_s32(__p0, __p1) __extension__ ({ \ - float32_t __ret; \ - int32_t __s0 = __p0; \ - __ret = (float32_t) __builtin_neon_vcvts_n_f32_s32(__s0, __p1); \ - __ret; \ -}) #ifdef __LITTLE_ENDIAN__ -#define vcvtq_n_f64_u64(__p0, __p1) __extension__ ({ \ - float64x2_t __ret; \ - uint64x2_t __s0 = __p0; \ - __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__s0, __p1, 51); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x8_t vcgtzq_s16(int16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__p0, 49); + return __ret; +} #else -#define vcvtq_n_f64_u64(__p0, __p1) __extension__ ({ \ - float64x2_t __ret; \ - uint64x2_t __s0 = __p0; \ - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__rev0, __p1, 51); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x8_t vcgtzq_s16(int16x8_t __p0) { + uint16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcgtzq_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vcvtq_n_f64_s64(__p0, __p1) __extension__ ({ \ - float64x2_t __ret; \ - int64x2_t __s0 = __p0; \ - __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__s0, __p1, 35); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vcgtz_s8(int8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 16); + return __ret; +} #else -#define vcvtq_n_f64_s64(__p0, __p1) __extension__ ({ \ - float64x2_t __ret; \ - int64x2_t __s0 = __p0; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__rev0, __p1, 35); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vcgtz_s8(int8x8_t __p0) { + uint8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif -#define vcvt_n_f64_u64(__p0, __p1) __extension__ ({ \ - float64x1_t __ret; \ - uint64x1_t __s0 = __p0; \ - __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 19); \ - __ret; \ -}) -#define vcvt_n_f64_s64(__p0, __p1) __extension__ ({ \ - float64x1_t __ret; \ - int64x1_t __s0 = __p0; \ - __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 3); \ - __ret; \ -}) -#define vcvtd_n_f64_u64(__p0, __p1) __extension__ ({ \ - float64_t __ret; \ - uint64_t __s0 = __p0; \ - __ret = (float64_t) __builtin_neon_vcvtd_n_f64_u64(__s0, __p1); \ - __ret; \ -}) -#define vcvtd_n_f64_s64(__p0, __p1) __extension__ ({ \ - float64_t __ret; \ - int64_t __s0 = __p0; \ - __ret = (float64_t) __builtin_neon_vcvtd_n_f64_s64(__s0, __p1); \ - __ret; \ -}) -#define vcvts_n_s32_f32(__p0, __p1) __extension__ ({ \ - int32_t __ret; \ - float32_t __s0 = __p0; \ - __ret = (int32_t) __builtin_neon_vcvts_n_s32_f32(__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint64x1_t vcgtz_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19); + return __ret; +} #ifdef __LITTLE_ENDIAN__ -#define vcvtq_n_s64_f64(__p0, __p1) __extension__ ({ \ - int64x2_t __ret; \ - float64x2_t __s0 = __p0; \ - __ret = (int64x2_t) __builtin_neon_vcvtq_n_s64_v((int8x16_t)__s0, __p1, 35); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vcgtz_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 18); + return __ret; +} #else -#define vcvtq_n_s64_f64(__p0, __p1) __extension__ ({ \ - int64x2_t __ret; \ - float64x2_t __s0 = __p0; \ - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (int64x2_t) __builtin_neon_vcvtq_n_s64_v((int8x16_t)__rev0, __p1, 35); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vcgtz_f32(float32x2_t __p0) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif -#define vcvt_n_s64_f64(__p0, __p1) __extension__ ({ \ - int64x1_t __ret; \ - float64x1_t __s0 = __p0; \ - __ret = (int64x1_t) __builtin_neon_vcvt_n_s64_v((int8x8_t)__s0, __p1, 3); \ - __ret; \ -}) -#define vcvtd_n_s64_f64(__p0, __p1) __extension__ ({ \ - int64_t __ret; \ - float64_t __s0 = __p0; \ - __ret = (int64_t) __builtin_neon_vcvtd_n_s64_f64(__s0, __p1); \ - __ret; \ -}) -#define vcvts_n_u32_f32(__p0, __p1) __extension__ ({ \ - uint32_t __ret; \ - float32_t __s0 = __p0; \ - __ret = (uint32_t) __builtin_neon_vcvts_n_u32_f32(__s0, __p1); \ - __ret; \ -}) #ifdef __LITTLE_ENDIAN__ -#define vcvtq_n_u64_f64(__p0, __p1) __extension__ ({ \ - uint64x2_t __ret; \ - float64x2_t __s0 = __p0; \ - __ret = (uint64x2_t) __builtin_neon_vcvtq_n_u64_v((int8x16_t)__s0, __p1, 51); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vcgtz_s32(int32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 18); + return __ret; +} #else -#define vcvtq_n_u64_f64(__p0, __p1) __extension__ ({ \ - uint64x2_t __ret; \ - float64x2_t __s0 = __p0; \ - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (uint64x2_t) __builtin_neon_vcvtq_n_u64_v((int8x16_t)__rev0, __p1, 51); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#define vcvt_n_u64_f64(__p0, __p1) __extension__ ({ \ - uint64x1_t __ret; \ - float64x1_t __s0 = __p0; \ - __ret = (uint64x1_t) __builtin_neon_vcvt_n_u64_v((int8x8_t)__s0, __p1, 19); \ - __ret; \ -}) -#define vcvtd_n_u64_f64(__p0, __p1) __extension__ ({ \ - uint64_t __ret; \ - float64_t __s0 = __p0; \ - __ret = (uint64_t) __builtin_neon_vcvtd_n_u64_f64(__s0, __p1); \ - __ret; \ -}) -__ai int32_t vcvts_s32_f32(float32_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vcvts_s32_f32(__p0); +__ai __attribute__((target("neon"))) uint32x2_t vcgtz_s32(int32x2_t __p0) { + uint32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai int64_t vcvtd_s64_f64(float64_t __p0) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vcvtd_s64_f64(__p0); +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vcgtz_s64(int64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vcvtq_s64_f64(float64x2_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vcvtq_s64_v((int8x16_t)__p0, 35); +__ai __attribute__((target("neon"))) uint16x4_t vcgtz_s16(int16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__p0, 17); return __ret; } #else -__ai int64x2_t vcvtq_s64_f64(float64x2_t __p0) { - int64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (int64x2_t) __builtin_neon_vcvtq_s64_v((int8x16_t)__rev0, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) uint16x4_t vcgtz_s16(int16x4_t __p0) { + uint16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcgtz_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif -__ai int64x1_t vcvt_s64_f64(float64x1_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vcvt_s64_v((int8x8_t)__p0, 3); +__ai __attribute__((target("neon"))) uint64_t vcgtzd_s64(int64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcgtzd_s64(__p0); return __ret; } -__ai uint32_t vcvts_u32_f32(float32_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcvts_u32_f32(__p0); +__ai __attribute__((target("neon"))) uint64_t vcgtzd_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcgtzd_f64(__p0); return __ret; } -__ai uint64_t vcvtd_u64_f64(float64_t __p0) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcvtd_u64_f64(__p0); +__ai __attribute__((target("neon"))) uint32_t vcgtzs_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcgtzs_f32(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcvtq_u64_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcvtq_u64_v((int8x16_t)__p0, 51); + __ret = (uint64x2_t)(__p0 <= __p1); return __ret; } #else -__ai uint64x2_t vcvtq_u64_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vcleq_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint64x2_t) __builtin_neon_vcvtq_u64_v((int8x16_t)__rev0, 51); + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__rev0 <= __rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif -__ai uint64x1_t vcvt_u64_f64(float64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcvt_u64_v((int8x8_t)__p0, 19); - return __ret; -} -__ai int32_t vcvtas_s32_f32(float32_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vcvtas_s32_f32(__p0); - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vcvtaq_s64_f64(float64x2_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vcvtaq_s64_v((int8x16_t)__p0, 35); +__ai __attribute__((target("neon"))) uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 <= __p1); return __ret; } #else -__ai int64x2_t vcvtaq_s64_f64(float64x2_t __p0) { - int64x2_t __ret; +__ai __attribute__((target("neon"))) uint64x2_t vcleq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (int64x2_t) __builtin_neon_vcvtaq_s64_v((int8x16_t)__rev0, 35); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__rev0 <= __rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif -__ai int64x1_t vcvta_s64_f64(float64x1_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vcvta_s64_v((int8x8_t)__p0, 3); - return __ret; -} -__ai int64_t vcvtad_s64_f64(float64_t __p0) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vcvtad_s64_f64(__p0); - return __ret; -} -__ai uint32_t vcvtas_u32_f32(float32_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcvtas_u32_f32(__p0); - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcvtaq_u64_v((int8x16_t)__p0, 51); + __ret = (uint64x2_t)(__p0 <= __p1); return __ret; } #else -__ai uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vcleq_s64(int64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint64x2_t) __builtin_neon_vcvtaq_u64_v((int8x16_t)__rev0, 51); + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__rev0 <= __rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif -__ai uint64x1_t vcvta_u64_f64(float64x1_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vcle_u64(uint64x1_t __p0, uint64x1_t __p1) { uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcvta_u64_v((int8x8_t)__p0, 19); + __ret = (uint64x1_t)(__p0 <= __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vcle_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 <= __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x1_t vcle_s64(int64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 <= __p1); return __ret; } -__ai uint64_t vcvtad_u64_f64(float64_t __p0) { +__ai __attribute__((target("neon"))) uint64_t vcled_u64(uint64_t __p0, uint64_t __p1) { uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcvtad_u64_f64(__p0); + __ret = (uint64_t) __builtin_neon_vcled_u64(__p0, __p1); return __ret; } -__ai int32_t vcvtms_s32_f32(float32_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vcvtms_s32_f32(__p0); +__ai __attribute__((target("neon"))) uint64_t vcled_s64(int64_t __p0, int64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcled_s64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vcled_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcled_f64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint32_t vcles_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcles_f32(__p0, __p1); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vcvtmq_s64_f64(float64x2_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vcvtmq_s64_v((int8x16_t)__p0, 35); +__ai __attribute__((target("neon"))) uint8x16_t vclezq_s8(int8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 48); return __ret; } #else -__ai int64x2_t vcvtmq_s64_f64(float64x2_t __p0) { - int64x2_t __ret; +__ai __attribute__((target("neon"))) uint8x16_t vclezq_s8(int8x16_t __p0) { + uint8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vclezq_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 51); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vclezq_f64(float64x2_t __p0) { + uint64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (int64x2_t) __builtin_neon_vcvtmq_s64_v((int8x16_t)__rev0, 35); + __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif -__ai int64x1_t vcvtm_s64_f64(float64x1_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vcvtm_s64_v((int8x8_t)__p0, 3); +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vclezq_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 50); return __ret; } -__ai int64_t vcvtmd_s64_f64(float64_t __p0) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vcvtmd_s64_f64(__p0); +#else +__ai __attribute__((target("neon"))) uint32x4_t vclezq_f32(float32x4_t __p0) { + uint32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai uint32_t vcvtms_u32_f32(float32_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcvtms_u32_f32(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vclezq_s32(int32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 50); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vclezq_s32(int32x4_t __p0) { + uint32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } +#endif + #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vclezq_s64(int64x2_t __p0) { uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcvtmq_u64_v((int8x16_t)__p0, 51); + __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 51); return __ret; } #else -__ai uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vclezq_s64(int64x2_t __p0) { uint64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint64x2_t) __builtin_neon_vcvtmq_u64_v((int8x16_t)__rev0, 51); + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif -__ai uint64x1_t vcvtm_u64_f64(float64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcvtm_u64_v((int8x8_t)__p0, 19); +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vclezq_s16(int16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__p0, 49); return __ret; } -__ai uint64_t vcvtmd_u64_f64(float64_t __p0) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcvtmd_u64_f64(__p0); +#else +__ai __attribute__((target("neon"))) uint16x8_t vclezq_s16(int16x8_t __p0) { + uint16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vclezq_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai int32_t vcvtns_s32_f32(float32_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vcvtns_s32_f32(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vclez_s8(int8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vclez_v((int8x8_t)__p0, 16); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vclez_s8(int8x8_t __p0) { + uint8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vclez_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vcvtnq_s64_f64(float64x2_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vcvtnq_s64_v((int8x16_t)__p0, 35); +__ai __attribute__((target("neon"))) uint32x2_t vclez_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__p0, 18); return __ret; } #else -__ai int64x2_t vcvtnq_s64_f64(float64x2_t __p0) { - int64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (int64x2_t) __builtin_neon_vcvtnq_s64_v((int8x16_t)__rev0, 35); +__ai __attribute__((target("neon"))) uint32x2_t vclez_f32(float32x2_t __p0) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif -__ai int64x1_t vcvtn_s64_f64(float64x1_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vcvtn_s64_v((int8x8_t)__p0, 3); +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vclez_s32(int32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__p0, 18); return __ret; } -__ai int64_t vcvtnd_s64_f64(float64_t __p0) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vcvtnd_s64_f64(__p0); +#else +__ai __attribute__((target("neon"))) uint32x2_t vclez_s32(int32x2_t __p0) { + uint32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai uint32_t vcvtns_u32_f32(float32_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcvtns_u32_f32(__p0); +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vclez_s64(int64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vclez_v((int8x8_t)__p0, 19); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcvtnq_u64_v((int8x16_t)__p0, 51); +__ai __attribute__((target("neon"))) uint16x4_t vclez_s16(int16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__p0, 17); return __ret; } #else -__ai uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) { - uint64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint64x2_t) __builtin_neon_vcvtnq_u64_v((int8x16_t)__rev0, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) uint16x4_t vclez_s16(int16x4_t __p0) { + uint16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vclez_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif -__ai uint64x1_t vcvtn_u64_f64(float64x1_t __p0) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcvtn_u64_v((int8x8_t)__p0, 19); +__ai __attribute__((target("neon"))) uint64_t vclezd_s64(int64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vclezd_s64(__p0); return __ret; } -__ai uint64_t vcvtnd_u64_f64(float64_t __p0) { +__ai __attribute__((target("neon"))) uint64_t vclezd_f64(float64_t __p0) { uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcvtnd_u64_f64(__p0); + __ret = (uint64_t) __builtin_neon_vclezd_f64(__p0); return __ret; } -__ai int32_t vcvtps_s32_f32(float32_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vcvtps_s32_f32(__p0); +__ai __attribute__((target("neon"))) uint32_t vclezs_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vclezs_f32(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vcvtpq_s64_f64(float64x2_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vcvtpq_s64_v((int8x16_t)__p0, 35); +__ai __attribute__((target("neon"))) uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 < __p1); return __ret; } #else -__ai int64x2_t vcvtpq_s64_f64(float64x2_t __p0) { - int64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (int64x2_t) __builtin_neon_vcvtpq_s64_v((int8x16_t)__rev0, 35); +__ai __attribute__((target("neon"))) uint64x2_t vcltq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__rev0 < __rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif -__ai int64x1_t vcvtp_s64_f64(float64x1_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vcvtp_s64_v((int8x8_t)__p0, 3); - return __ret; -} -__ai int64_t vcvtpd_s64_f64(float64_t __p0) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vcvtpd_s64_f64(__p0); +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0 < __p1); return __ret; } -__ai uint32_t vcvtps_u32_f32(float32_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vcvtps_u32_f32(__p0); +#else +__ai __attribute__((target("neon"))) uint64x2_t vcltq_f64(float64x2_t __p0, float64x2_t __p1) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__rev0 < __rev1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } +#endif + #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vcvtpq_u64_v((int8x16_t)__p0, 51); + __ret = (uint64x2_t)(__p0 < __p1); return __ret; } #else -__ai uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x2_t vcltq_s64(int64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint64x2_t) __builtin_neon_vcvtpq_u64_v((int8x16_t)__rev0, 51); + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t)(__rev0 < __rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif -__ai uint64x1_t vcvtp_u64_f64(float64x1_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vclt_u64(uint64x1_t __p0, uint64x1_t __p1) { uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vcvtp_u64_v((int8x8_t)__p0, 19); + __ret = (uint64x1_t)(__p0 < __p1); return __ret; } -__ai uint64_t vcvtpd_u64_f64(float64_t __p0) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vcvtpd_u64_f64(__p0); +__ai __attribute__((target("neon"))) uint64x1_t vclt_f64(float64x1_t __p0, float64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 < __p1); return __ret; } -__ai float32_t vcvtxd_f32_f64(float64_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vcvtxd_f32_f64(__p0); +__ai __attribute__((target("neon"))) uint64x1_t vclt_s64(int64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t)(__p0 < __p1); return __ret; } -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vcvtx_f32_f64(float64x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__p0, 42); +__ai __attribute__((target("neon"))) uint64_t vcltd_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcltd_u64(__p0, __p1); return __ret; } -#else -__ai float32x2_t vcvtx_f32_f64(float64x2_t __p0) { - float32x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__rev0, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) uint64_t vcltd_s64(int64_t __p0, int64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcltd_s64(__p0, __p1); return __ret; } -__ai float32x2_t __noswap_vcvtx_f32_f64(float64x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__p0, 42); +__ai __attribute__((target("neon"))) uint64_t vcltd_f64(float64_t __p0, float64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcltd_f64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint32_t vclts_f32(float32_t __p0, float32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vclts_f32(__p0, __p1); return __ret; } -#endif - #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) { - float32x4_t __ret; - __ret = vcombine_f32(__p0, vcvtx_f32_f64(__p1)); +__ai __attribute__((target("neon"))) uint8x16_t vcltzq_s8(int8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 48); return __ret; } #else -__ai float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) { - float32x4_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __noswap_vcombine_f32(__rev0, __noswap_vcvtx_f32_f64(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint8x16_t vcltzq_s8(int8x16_t __p0) { + uint8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = __p0 / __p1; +__ai __attribute__((target("neon"))) uint64x2_t vcltzq_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 51); return __ret; } #else -__ai float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; +__ai __attribute__((target("neon"))) uint64x2_t vcltzq_f64(float64x2_t __p0) { + uint64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __rev0 / __rev1; + __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = __p0 / __p1; +__ai __attribute__((target("neon"))) uint32x4_t vcltzq_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 50); return __ret; } #else -__ai float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; +__ai __attribute__((target("neon"))) uint32x4_t vcltzq_f32(float32x4_t __p0) { + uint32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 / __rev1; + __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif -__ai float64x1_t vdiv_f64(float64x1_t __p0, float64x1_t __p1) { - float64x1_t __ret; - __ret = __p0 / __p1; - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = __p0 / __p1; +__ai __attribute__((target("neon"))) uint32x4_t vcltzq_s32(int32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 50); return __ret; } #else -__ai float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __rev0 / __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) uint32x4_t vcltzq_s32(int32x4_t __p0) { + uint32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -#define vdupb_lane_p8(__p0, __p1) __extension__ ({ \ - poly8_t __ret; \ - poly8x8_t __s0 = __p0; \ - __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((poly8x8_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vcltzq_s64(int64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 51); + return __ret; +} #else -#define vdupb_lane_p8(__p0, __p1) __extension__ ({ \ - poly8_t __ret; \ - poly8x8_t __s0 = __p0; \ - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((poly8x8_t)__rev0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vcltzq_s64(int64x2_t __p0) { + uint64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vduph_lane_p16(__p0, __p1) __extension__ ({ \ - poly16_t __ret; \ - poly16x4_t __s0 = __p0; \ - __ret = (poly16_t) __builtin_neon_vduph_lane_i16((poly16x4_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x8_t vcltzq_s16(int16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__p0, 49); + return __ret; +} #else -#define vduph_lane_p16(__p0, __p1) __extension__ ({ \ - poly16_t __ret; \ - poly16x4_t __s0 = __p0; \ - poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (poly16_t) __builtin_neon_vduph_lane_i16((poly16x4_t)__rev0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x8_t vcltzq_s16(int16x8_t __p0) { + uint16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vcltzq_v((int8x16_t)__rev0, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vdupb_lane_u8(__p0, __p1) __extension__ ({ \ - uint8_t __ret; \ - uint8x8_t __s0 = __p0; \ - __ret = (uint8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vcltz_s8(int8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 16); + return __ret; +} #else -#define vdupb_lane_u8(__p0, __p1) __extension__ ({ \ - uint8_t __ret; \ - uint8x8_t __s0 = __p0; \ - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x8_t vcltz_s8(int8x8_t __p0) { + uint8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif +__ai __attribute__((target("neon"))) uint64x1_t vcltz_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19); + return __ret; +} #ifdef __LITTLE_ENDIAN__ -#define vdups_lane_u32(__p0, __p1) __extension__ ({ \ - uint32_t __ret; \ - uint32x2_t __s0 = __p0; \ - __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vcltz_f32(float32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 18); + return __ret; +} #else -#define vdups_lane_u32(__p0, __p1) __extension__ ({ \ - uint32_t __ret; \ - uint32x2_t __s0 = __p0; \ - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__rev0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vcltz_f32(float32x2_t __p0) { + uint32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif -#define vdupd_lane_u64(__p0, __p1) __extension__ ({ \ - uint64_t __ret; \ - uint64x1_t __s0 = __p0; \ - __ret = (uint64_t) __builtin_neon_vdupd_lane_i64((int64x1_t)__s0, __p1); \ - __ret; \ -}) #ifdef __LITTLE_ENDIAN__ -#define vduph_lane_u16(__p0, __p1) __extension__ ({ \ - uint16_t __ret; \ - uint16x4_t __s0 = __p0; \ - __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vcltz_s32(int32x2_t __p0) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 18); + return __ret; +} #else -#define vduph_lane_u16(__p0, __p1) __extension__ ({ \ - uint16_t __ret; \ - uint16x4_t __s0 = __p0; \ - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__rev0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vcltz_s32(int32x2_t __p0) { + uint32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 18); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif +__ai __attribute__((target("neon"))) uint64x1_t vcltz_s64(int64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 19); + return __ret; +} #ifdef __LITTLE_ENDIAN__ -#define vdupb_lane_s8(__p0, __p1) __extension__ ({ \ - int8_t __ret; \ - int8x8_t __s0 = __p0; \ - __ret = (int8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x4_t vcltz_s16(int16x4_t __p0) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__p0, 17); + return __ret; +} #else -#define vdupb_lane_s8(__p0, __p1) __extension__ ({ \ - int8_t __ret; \ - int8x8_t __s0 = __p0; \ - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x4_t vcltz_s16(int16x4_t __p0) { + uint16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vcltz_v((int8x8_t)__rev0, 17); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif -#define vdupd_lane_f64(__p0, __p1) __extension__ ({ \ - float64_t __ret; \ - float64x1_t __s0 = __p0; \ - __ret = (float64_t) __builtin_neon_vdupd_lane_f64((float64x1_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint64_t vcltzd_s64(int64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcltzd_s64(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vcltzd_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcltzd_f64(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32_t vcltzs_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcltzs_f32(__p0); + return __ret; +} #ifdef __LITTLE_ENDIAN__ -#define vdups_lane_f32(__p0, __p1) __extension__ ({ \ - float32_t __ret; \ - float32x2_t __s0 = __p0; \ - __ret = (float32_t) __builtin_neon_vdups_lane_f32((float32x2_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) { + poly64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1); + return __ret; +} #else -#define vdups_lane_f32(__p0, __p1) __extension__ ({ \ - float32_t __ret; \ - float32x2_t __s0 = __p0; \ - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (float32_t) __builtin_neon_vdups_lane_f32((float32x2_t)__rev0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) poly64x2_t vcombine_p64(poly64x1_t __p0, poly64x1_t __p1) { + poly64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vdups_lane_s32(__p0, __p1) __extension__ ({ \ - int32_t __ret; \ - int32x2_t __s0 = __p0; \ - __ret = (int32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) { + float64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1); + return __ret; +} #else -#define vdups_lane_s32(__p0, __p1) __extension__ ({ \ - int32_t __ret; \ - int32x2_t __s0 = __p0; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (int32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__rev0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float64x2_t vcombine_f64(float64x1_t __p0, float64x1_t __p1) { + float64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif -#define vdupd_lane_s64(__p0, __p1) __extension__ ({ \ - int64_t __ret; \ - int64x1_t __s0 = __p0; \ - __ret = (int64_t) __builtin_neon_vdupd_lane_i64((int64x1_t)__s0, __p1); \ - __ret; \ -}) #ifdef __LITTLE_ENDIAN__ -#define vduph_lane_s16(__p0, __p1) __extension__ ({ \ - int16_t __ret; \ - int16x4_t __s0 = __p0; \ - __ret = (int16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__s0, __p1); \ - __ret; \ +#define vcopyq_lane_p8(__p0_278, __p1_278, __p2_278, __p3_278) __extension__ ({ \ + poly8x16_t __ret_278; \ + poly8x16_t __s0_278 = __p0_278; \ + poly8x8_t __s2_278 = __p2_278; \ + __ret_278 = vsetq_lane_p8(vget_lane_p8(__s2_278, __p3_278), __s0_278, __p1_278); \ + __ret_278; \ }) #else -#define vduph_lane_s16(__p0, __p1) __extension__ ({ \ - int16_t __ret; \ - int16x4_t __s0 = __p0; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (int16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__rev0, __p1); \ - __ret; \ +#define vcopyq_lane_p8(__p0_279, __p1_279, __p2_279, __p3_279) __extension__ ({ \ + poly8x16_t __ret_279; \ + poly8x16_t __s0_279 = __p0_279; \ + poly8x8_t __s2_279 = __p2_279; \ + poly8x16_t __rev0_279; __rev0_279 = __builtin_shufflevector(__s0_279, __s0_279, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __rev2_279; __rev2_279 = __builtin_shufflevector(__s2_279, __s2_279, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_279 = __noswap_vsetq_lane_p8(__noswap_vget_lane_p8(__rev2_279, __p3_279), __rev0_279, __p1_279); \ + __ret_279 = __builtin_shufflevector(__ret_279, __ret_279, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_279; \ }) #endif -#define vdup_lane_p64(__p0_343, __p1_343) __extension__ ({ \ - poly64x1_t __ret_343; \ - poly64x1_t __s0_343 = __p0_343; \ - __ret_343 = splat_lane_p64(__s0_343, __p1_343); \ - __ret_343; \ -}) #ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_p64(__p0_344, __p1_344) __extension__ ({ \ - poly64x2_t __ret_344; \ - poly64x1_t __s0_344 = __p0_344; \ - __ret_344 = splatq_lane_p64(__s0_344, __p1_344); \ - __ret_344; \ +#define vcopyq_lane_p16(__p0_280, __p1_280, __p2_280, __p3_280) __extension__ ({ \ + poly16x8_t __ret_280; \ + poly16x8_t __s0_280 = __p0_280; \ + poly16x4_t __s2_280 = __p2_280; \ + __ret_280 = vsetq_lane_p16(vget_lane_p16(__s2_280, __p3_280), __s0_280, __p1_280); \ + __ret_280; \ }) #else -#define vdupq_lane_p64(__p0_345, __p1_345) __extension__ ({ \ - poly64x2_t __ret_345; \ - poly64x1_t __s0_345 = __p0_345; \ - __ret_345 = __noswap_splatq_lane_p64(__s0_345, __p1_345); \ - __ret_345 = __builtin_shufflevector(__ret_345, __ret_345, 1, 0); \ - __ret_345; \ +#define vcopyq_lane_p16(__p0_281, __p1_281, __p2_281, __p3_281) __extension__ ({ \ + poly16x8_t __ret_281; \ + poly16x8_t __s0_281 = __p0_281; \ + poly16x4_t __s2_281 = __p2_281; \ + poly16x8_t __rev0_281; __rev0_281 = __builtin_shufflevector(__s0_281, __s0_281, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x4_t __rev2_281; __rev2_281 = __builtin_shufflevector(__s2_281, __s2_281, 3, 2, 1, 0); \ + __ret_281 = __noswap_vsetq_lane_p16(__noswap_vget_lane_p16(__rev2_281, __p3_281), __rev0_281, __p1_281); \ + __ret_281 = __builtin_shufflevector(__ret_281, __ret_281, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_281; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdupq_lane_f64(__p0_346, __p1_346) __extension__ ({ \ - float64x2_t __ret_346; \ - float64x1_t __s0_346 = __p0_346; \ - __ret_346 = splatq_lane_f64(__s0_346, __p1_346); \ - __ret_346; \ +#define vcopyq_lane_u8(__p0_282, __p1_282, __p2_282, __p3_282) __extension__ ({ \ + uint8x16_t __ret_282; \ + uint8x16_t __s0_282 = __p0_282; \ + uint8x8_t __s2_282 = __p2_282; \ + __ret_282 = vsetq_lane_u8(vget_lane_u8(__s2_282, __p3_282), __s0_282, __p1_282); \ + __ret_282; \ }) #else -#define vdupq_lane_f64(__p0_347, __p1_347) __extension__ ({ \ - float64x2_t __ret_347; \ - float64x1_t __s0_347 = __p0_347; \ - __ret_347 = __noswap_splatq_lane_f64(__s0_347, __p1_347); \ - __ret_347 = __builtin_shufflevector(__ret_347, __ret_347, 1, 0); \ - __ret_347; \ +#define vcopyq_lane_u8(__p0_283, __p1_283, __p2_283, __p3_283) __extension__ ({ \ + uint8x16_t __ret_283; \ + uint8x16_t __s0_283 = __p0_283; \ + uint8x8_t __s2_283 = __p2_283; \ + uint8x16_t __rev0_283; __rev0_283 = __builtin_shufflevector(__s0_283, __s0_283, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev2_283; __rev2_283 = __builtin_shufflevector(__s2_283, __s2_283, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_283 = __noswap_vsetq_lane_u8(__noswap_vget_lane_u8(__rev2_283, __p3_283), __rev0_283, __p1_283); \ + __ret_283 = __builtin_shufflevector(__ret_283, __ret_283, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_283; \ }) #endif -#define vdup_lane_f64(__p0_348, __p1_348) __extension__ ({ \ - float64x1_t __ret_348; \ - float64x1_t __s0_348 = __p0_348; \ - __ret_348 = splat_lane_f64(__s0_348, __p1_348); \ - __ret_348; \ -}) #ifdef __LITTLE_ENDIAN__ -#define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \ - poly8_t __ret; \ - poly8x16_t __s0 = __p0; \ - __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((poly8x16_t)__s0, __p1); \ - __ret; \ +#define vcopyq_lane_u32(__p0_284, __p1_284, __p2_284, __p3_284) __extension__ ({ \ + uint32x4_t __ret_284; \ + uint32x4_t __s0_284 = __p0_284; \ + uint32x2_t __s2_284 = __p2_284; \ + __ret_284 = vsetq_lane_u32(vget_lane_u32(__s2_284, __p3_284), __s0_284, __p1_284); \ + __ret_284; \ }) #else -#define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \ - poly8_t __ret; \ - poly8x16_t __s0 = __p0; \ - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((poly8x16_t)__rev0, __p1); \ - __ret; \ +#define vcopyq_lane_u32(__p0_285, __p1_285, __p2_285, __p3_285) __extension__ ({ \ + uint32x4_t __ret_285; \ + uint32x4_t __s0_285 = __p0_285; \ + uint32x2_t __s2_285 = __p2_285; \ + uint32x4_t __rev0_285; __rev0_285 = __builtin_shufflevector(__s0_285, __s0_285, 3, 2, 1, 0); \ + uint32x2_t __rev2_285; __rev2_285 = __builtin_shufflevector(__s2_285, __s2_285, 1, 0); \ + __ret_285 = __noswap_vsetq_lane_u32(__noswap_vget_lane_u32(__rev2_285, __p3_285), __rev0_285, __p1_285); \ + __ret_285 = __builtin_shufflevector(__ret_285, __ret_285, 3, 2, 1, 0); \ + __ret_285; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vduph_laneq_p16(__p0, __p1) __extension__ ({ \ - poly16_t __ret; \ - poly16x8_t __s0 = __p0; \ - __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((poly16x8_t)__s0, __p1); \ - __ret; \ +#define vcopyq_lane_u64(__p0_286, __p1_286, __p2_286, __p3_286) __extension__ ({ \ + uint64x2_t __ret_286; \ + uint64x2_t __s0_286 = __p0_286; \ + uint64x1_t __s2_286 = __p2_286; \ + __ret_286 = vsetq_lane_u64(vget_lane_u64(__s2_286, __p3_286), __s0_286, __p1_286); \ + __ret_286; \ }) #else -#define vduph_laneq_p16(__p0, __p1) __extension__ ({ \ - poly16_t __ret; \ - poly16x8_t __s0 = __p0; \ - poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((poly16x8_t)__rev0, __p1); \ - __ret; \ +#define vcopyq_lane_u64(__p0_287, __p1_287, __p2_287, __p3_287) __extension__ ({ \ + uint64x2_t __ret_287; \ + uint64x2_t __s0_287 = __p0_287; \ + uint64x1_t __s2_287 = __p2_287; \ + uint64x2_t __rev0_287; __rev0_287 = __builtin_shufflevector(__s0_287, __s0_287, 1, 0); \ + __ret_287 = __noswap_vsetq_lane_u64(vget_lane_u64(__s2_287, __p3_287), __rev0_287, __p1_287); \ + __ret_287 = __builtin_shufflevector(__ret_287, __ret_287, 1, 0); \ + __ret_287; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdupb_laneq_u8(__p0, __p1) __extension__ ({ \ - uint8_t __ret; \ - uint8x16_t __s0 = __p0; \ - __ret = (uint8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \ - __ret; \ +#define vcopyq_lane_u16(__p0_288, __p1_288, __p2_288, __p3_288) __extension__ ({ \ + uint16x8_t __ret_288; \ + uint16x8_t __s0_288 = __p0_288; \ + uint16x4_t __s2_288 = __p2_288; \ + __ret_288 = vsetq_lane_u16(vget_lane_u16(__s2_288, __p3_288), __s0_288, __p1_288); \ + __ret_288; \ }) #else -#define vdupb_laneq_u8(__p0, __p1) __extension__ ({ \ - uint8_t __ret; \ - uint8x16_t __s0 = __p0; \ - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \ - __ret; \ +#define vcopyq_lane_u16(__p0_289, __p1_289, __p2_289, __p3_289) __extension__ ({ \ + uint16x8_t __ret_289; \ + uint16x8_t __s0_289 = __p0_289; \ + uint16x4_t __s2_289 = __p2_289; \ + uint16x8_t __rev0_289; __rev0_289 = __builtin_shufflevector(__s0_289, __s0_289, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __rev2_289; __rev2_289 = __builtin_shufflevector(__s2_289, __s2_289, 3, 2, 1, 0); \ + __ret_289 = __noswap_vsetq_lane_u16(__noswap_vget_lane_u16(__rev2_289, __p3_289), __rev0_289, __p1_289); \ + __ret_289 = __builtin_shufflevector(__ret_289, __ret_289, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_289; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdups_laneq_u32(__p0, __p1) __extension__ ({ \ - uint32_t __ret; \ - uint32x4_t __s0 = __p0; \ - __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__s0, __p1); \ - __ret; \ +#define vcopyq_lane_s8(__p0_290, __p1_290, __p2_290, __p3_290) __extension__ ({ \ + int8x16_t __ret_290; \ + int8x16_t __s0_290 = __p0_290; \ + int8x8_t __s2_290 = __p2_290; \ + __ret_290 = vsetq_lane_s8(vget_lane_s8(__s2_290, __p3_290), __s0_290, __p1_290); \ + __ret_290; \ }) #else -#define vdups_laneq_u32(__p0, __p1) __extension__ ({ \ - uint32_t __ret; \ - uint32x4_t __s0 = __p0; \ - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__rev0, __p1); \ - __ret; \ +#define vcopyq_lane_s8(__p0_291, __p1_291, __p2_291, __p3_291) __extension__ ({ \ + int8x16_t __ret_291; \ + int8x16_t __s0_291 = __p0_291; \ + int8x8_t __s2_291 = __p2_291; \ + int8x16_t __rev0_291; __rev0_291 = __builtin_shufflevector(__s0_291, __s0_291, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev2_291; __rev2_291 = __builtin_shufflevector(__s2_291, __s2_291, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_291 = __noswap_vsetq_lane_s8(__noswap_vget_lane_s8(__rev2_291, __p3_291), __rev0_291, __p1_291); \ + __ret_291 = __builtin_shufflevector(__ret_291, __ret_291, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_291; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdupd_laneq_u64(__p0, __p1) __extension__ ({ \ - uint64_t __ret; \ - uint64x2_t __s0 = __p0; \ - __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__s0, __p1); \ - __ret; \ +#define vcopyq_lane_f32(__p0_292, __p1_292, __p2_292, __p3_292) __extension__ ({ \ + float32x4_t __ret_292; \ + float32x4_t __s0_292 = __p0_292; \ + float32x2_t __s2_292 = __p2_292; \ + __ret_292 = vsetq_lane_f32(vget_lane_f32(__s2_292, __p3_292), __s0_292, __p1_292); \ + __ret_292; \ }) #else -#define vdupd_laneq_u64(__p0, __p1) __extension__ ({ \ - uint64_t __ret; \ - uint64x2_t __s0 = __p0; \ - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__rev0, __p1); \ - __ret; \ +#define vcopyq_lane_f32(__p0_293, __p1_293, __p2_293, __p3_293) __extension__ ({ \ + float32x4_t __ret_293; \ + float32x4_t __s0_293 = __p0_293; \ + float32x2_t __s2_293 = __p2_293; \ + float32x4_t __rev0_293; __rev0_293 = __builtin_shufflevector(__s0_293, __s0_293, 3, 2, 1, 0); \ + float32x2_t __rev2_293; __rev2_293 = __builtin_shufflevector(__s2_293, __s2_293, 1, 0); \ + __ret_293 = __noswap_vsetq_lane_f32(__noswap_vget_lane_f32(__rev2_293, __p3_293), __rev0_293, __p1_293); \ + __ret_293 = __builtin_shufflevector(__ret_293, __ret_293, 3, 2, 1, 0); \ + __ret_293; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vduph_laneq_u16(__p0, __p1) __extension__ ({ \ - uint16_t __ret; \ - uint16x8_t __s0 = __p0; \ - __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__s0, __p1); \ - __ret; \ +#define vcopyq_lane_s32(__p0_294, __p1_294, __p2_294, __p3_294) __extension__ ({ \ + int32x4_t __ret_294; \ + int32x4_t __s0_294 = __p0_294; \ + int32x2_t __s2_294 = __p2_294; \ + __ret_294 = vsetq_lane_s32(vget_lane_s32(__s2_294, __p3_294), __s0_294, __p1_294); \ + __ret_294; \ }) #else -#define vduph_laneq_u16(__p0, __p1) __extension__ ({ \ - uint16_t __ret; \ - uint16x8_t __s0 = __p0; \ - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__rev0, __p1); \ - __ret; \ +#define vcopyq_lane_s32(__p0_295, __p1_295, __p2_295, __p3_295) __extension__ ({ \ + int32x4_t __ret_295; \ + int32x4_t __s0_295 = __p0_295; \ + int32x2_t __s2_295 = __p2_295; \ + int32x4_t __rev0_295; __rev0_295 = __builtin_shufflevector(__s0_295, __s0_295, 3, 2, 1, 0); \ + int32x2_t __rev2_295; __rev2_295 = __builtin_shufflevector(__s2_295, __s2_295, 1, 0); \ + __ret_295 = __noswap_vsetq_lane_s32(__noswap_vget_lane_s32(__rev2_295, __p3_295), __rev0_295, __p1_295); \ + __ret_295 = __builtin_shufflevector(__ret_295, __ret_295, 3, 2, 1, 0); \ + __ret_295; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdupb_laneq_s8(__p0, __p1) __extension__ ({ \ - int8_t __ret; \ - int8x16_t __s0 = __p0; \ - __ret = (int8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \ - __ret; \ +#define vcopyq_lane_s64(__p0_296, __p1_296, __p2_296, __p3_296) __extension__ ({ \ + int64x2_t __ret_296; \ + int64x2_t __s0_296 = __p0_296; \ + int64x1_t __s2_296 = __p2_296; \ + __ret_296 = vsetq_lane_s64(vget_lane_s64(__s2_296, __p3_296), __s0_296, __p1_296); \ + __ret_296; \ }) #else -#define vdupb_laneq_s8(__p0, __p1) __extension__ ({ \ - int8_t __ret; \ - int8x16_t __s0 = __p0; \ - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \ - __ret; \ +#define vcopyq_lane_s64(__p0_297, __p1_297, __p2_297, __p3_297) __extension__ ({ \ + int64x2_t __ret_297; \ + int64x2_t __s0_297 = __p0_297; \ + int64x1_t __s2_297 = __p2_297; \ + int64x2_t __rev0_297; __rev0_297 = __builtin_shufflevector(__s0_297, __s0_297, 1, 0); \ + __ret_297 = __noswap_vsetq_lane_s64(vget_lane_s64(__s2_297, __p3_297), __rev0_297, __p1_297); \ + __ret_297 = __builtin_shufflevector(__ret_297, __ret_297, 1, 0); \ + __ret_297; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdupd_laneq_f64(__p0, __p1) __extension__ ({ \ - float64_t __ret; \ - float64x2_t __s0 = __p0; \ - __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((float64x2_t)__s0, __p1); \ - __ret; \ +#define vcopyq_lane_s16(__p0_298, __p1_298, __p2_298, __p3_298) __extension__ ({ \ + int16x8_t __ret_298; \ + int16x8_t __s0_298 = __p0_298; \ + int16x4_t __s2_298 = __p2_298; \ + __ret_298 = vsetq_lane_s16(vget_lane_s16(__s2_298, __p3_298), __s0_298, __p1_298); \ + __ret_298; \ }) #else -#define vdupd_laneq_f64(__p0, __p1) __extension__ ({ \ - float64_t __ret; \ - float64x2_t __s0 = __p0; \ - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((float64x2_t)__rev0, __p1); \ - __ret; \ +#define vcopyq_lane_s16(__p0_299, __p1_299, __p2_299, __p3_299) __extension__ ({ \ + int16x8_t __ret_299; \ + int16x8_t __s0_299 = __p0_299; \ + int16x4_t __s2_299 = __p2_299; \ + int16x8_t __rev0_299; __rev0_299 = __builtin_shufflevector(__s0_299, __s0_299, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_299; __rev2_299 = __builtin_shufflevector(__s2_299, __s2_299, 3, 2, 1, 0); \ + __ret_299 = __noswap_vsetq_lane_s16(__noswap_vget_lane_s16(__rev2_299, __p3_299), __rev0_299, __p1_299); \ + __ret_299 = __builtin_shufflevector(__ret_299, __ret_299, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_299; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdups_laneq_f32(__p0, __p1) __extension__ ({ \ - float32_t __ret; \ - float32x4_t __s0 = __p0; \ - __ret = (float32_t) __builtin_neon_vdups_laneq_f32((float32x4_t)__s0, __p1); \ - __ret; \ +#define vcopy_lane_p8(__p0_300, __p1_300, __p2_300, __p3_300) __extension__ ({ \ + poly8x8_t __ret_300; \ + poly8x8_t __s0_300 = __p0_300; \ + poly8x8_t __s2_300 = __p2_300; \ + __ret_300 = vset_lane_p8(vget_lane_p8(__s2_300, __p3_300), __s0_300, __p1_300); \ + __ret_300; \ }) #else -#define vdups_laneq_f32(__p0, __p1) __extension__ ({ \ - float32_t __ret; \ - float32x4_t __s0 = __p0; \ - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (float32_t) __builtin_neon_vdups_laneq_f32((float32x4_t)__rev0, __p1); \ - __ret; \ +#define vcopy_lane_p8(__p0_301, __p1_301, __p2_301, __p3_301) __extension__ ({ \ + poly8x8_t __ret_301; \ + poly8x8_t __s0_301 = __p0_301; \ + poly8x8_t __s2_301 = __p2_301; \ + poly8x8_t __rev0_301; __rev0_301 = __builtin_shufflevector(__s0_301, __s0_301, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x8_t __rev2_301; __rev2_301 = __builtin_shufflevector(__s2_301, __s2_301, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_301 = __noswap_vset_lane_p8(__noswap_vget_lane_p8(__rev2_301, __p3_301), __rev0_301, __p1_301); \ + __ret_301 = __builtin_shufflevector(__ret_301, __ret_301, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_301; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdups_laneq_s32(__p0, __p1) __extension__ ({ \ - int32_t __ret; \ - int32x4_t __s0 = __p0; \ - __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__s0, __p1); \ - __ret; \ +#define vcopy_lane_p16(__p0_302, __p1_302, __p2_302, __p3_302) __extension__ ({ \ + poly16x4_t __ret_302; \ + poly16x4_t __s0_302 = __p0_302; \ + poly16x4_t __s2_302 = __p2_302; \ + __ret_302 = vset_lane_p16(vget_lane_p16(__s2_302, __p3_302), __s0_302, __p1_302); \ + __ret_302; \ }) #else -#define vdups_laneq_s32(__p0, __p1) __extension__ ({ \ - int32_t __ret; \ - int32x4_t __s0 = __p0; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__rev0, __p1); \ - __ret; \ +#define vcopy_lane_p16(__p0_303, __p1_303, __p2_303, __p3_303) __extension__ ({ \ + poly16x4_t __ret_303; \ + poly16x4_t __s0_303 = __p0_303; \ + poly16x4_t __s2_303 = __p2_303; \ + poly16x4_t __rev0_303; __rev0_303 = __builtin_shufflevector(__s0_303, __s0_303, 3, 2, 1, 0); \ + poly16x4_t __rev2_303; __rev2_303 = __builtin_shufflevector(__s2_303, __s2_303, 3, 2, 1, 0); \ + __ret_303 = __noswap_vset_lane_p16(__noswap_vget_lane_p16(__rev2_303, __p3_303), __rev0_303, __p1_303); \ + __ret_303 = __builtin_shufflevector(__ret_303, __ret_303, 3, 2, 1, 0); \ + __ret_303; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdupd_laneq_s64(__p0, __p1) __extension__ ({ \ - int64_t __ret; \ - int64x2_t __s0 = __p0; \ - __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__s0, __p1); \ - __ret; \ +#define vcopy_lane_u8(__p0_304, __p1_304, __p2_304, __p3_304) __extension__ ({ \ + uint8x8_t __ret_304; \ + uint8x8_t __s0_304 = __p0_304; \ + uint8x8_t __s2_304 = __p2_304; \ + __ret_304 = vset_lane_u8(vget_lane_u8(__s2_304, __p3_304), __s0_304, __p1_304); \ + __ret_304; \ }) #else -#define vdupd_laneq_s64(__p0, __p1) __extension__ ({ \ - int64_t __ret; \ - int64x2_t __s0 = __p0; \ - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__rev0, __p1); \ - __ret; \ +#define vcopy_lane_u8(__p0_305, __p1_305, __p2_305, __p3_305) __extension__ ({ \ + uint8x8_t __ret_305; \ + uint8x8_t __s0_305 = __p0_305; \ + uint8x8_t __s2_305 = __p2_305; \ + uint8x8_t __rev0_305; __rev0_305 = __builtin_shufflevector(__s0_305, __s0_305, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev2_305; __rev2_305 = __builtin_shufflevector(__s2_305, __s2_305, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_305 = __noswap_vset_lane_u8(__noswap_vget_lane_u8(__rev2_305, __p3_305), __rev0_305, __p1_305); \ + __ret_305 = __builtin_shufflevector(__ret_305, __ret_305, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_305; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vduph_laneq_s16(__p0, __p1) __extension__ ({ \ - int16_t __ret; \ - int16x8_t __s0 = __p0; \ - __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__s0, __p1); \ - __ret; \ +#define vcopy_lane_u32(__p0_306, __p1_306, __p2_306, __p3_306) __extension__ ({ \ + uint32x2_t __ret_306; \ + uint32x2_t __s0_306 = __p0_306; \ + uint32x2_t __s2_306 = __p2_306; \ + __ret_306 = vset_lane_u32(vget_lane_u32(__s2_306, __p3_306), __s0_306, __p1_306); \ + __ret_306; \ }) #else -#define vduph_laneq_s16(__p0, __p1) __extension__ ({ \ - int16_t __ret; \ - int16x8_t __s0 = __p0; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__rev0, __p1); \ - __ret; \ +#define vcopy_lane_u32(__p0_307, __p1_307, __p2_307, __p3_307) __extension__ ({ \ + uint32x2_t __ret_307; \ + uint32x2_t __s0_307 = __p0_307; \ + uint32x2_t __s2_307 = __p2_307; \ + uint32x2_t __rev0_307; __rev0_307 = __builtin_shufflevector(__s0_307, __s0_307, 1, 0); \ + uint32x2_t __rev2_307; __rev2_307 = __builtin_shufflevector(__s2_307, __s2_307, 1, 0); \ + __ret_307 = __noswap_vset_lane_u32(__noswap_vget_lane_u32(__rev2_307, __p3_307), __rev0_307, __p1_307); \ + __ret_307 = __builtin_shufflevector(__ret_307, __ret_307, 1, 0); \ + __ret_307; \ }) #endif +#define vcopy_lane_u64(__p0_308, __p1_308, __p2_308, __p3_308) __extension__ ({ \ + uint64x1_t __ret_308; \ + uint64x1_t __s0_308 = __p0_308; \ + uint64x1_t __s2_308 = __p2_308; \ + __ret_308 = vset_lane_u64(vget_lane_u64(__s2_308, __p3_308), __s0_308, __p1_308); \ + __ret_308; \ +}) #ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_p8(__p0_349, __p1_349) __extension__ ({ \ - poly8x8_t __ret_349; \ - poly8x16_t __s0_349 = __p0_349; \ - __ret_349 = splat_laneq_p8(__s0_349, __p1_349); \ - __ret_349; \ +#define vcopy_lane_u16(__p0_309, __p1_309, __p2_309, __p3_309) __extension__ ({ \ + uint16x4_t __ret_309; \ + uint16x4_t __s0_309 = __p0_309; \ + uint16x4_t __s2_309 = __p2_309; \ + __ret_309 = vset_lane_u16(vget_lane_u16(__s2_309, __p3_309), __s0_309, __p1_309); \ + __ret_309; \ }) #else -#define vdup_laneq_p8(__p0_350, __p1_350) __extension__ ({ \ - poly8x8_t __ret_350; \ - poly8x16_t __s0_350 = __p0_350; \ - poly8x16_t __rev0_350; __rev0_350 = __builtin_shufflevector(__s0_350, __s0_350, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_350 = __noswap_splat_laneq_p8(__rev0_350, __p1_350); \ - __ret_350 = __builtin_shufflevector(__ret_350, __ret_350, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_350; \ +#define vcopy_lane_u16(__p0_310, __p1_310, __p2_310, __p3_310) __extension__ ({ \ + uint16x4_t __ret_310; \ + uint16x4_t __s0_310 = __p0_310; \ + uint16x4_t __s2_310 = __p2_310; \ + uint16x4_t __rev0_310; __rev0_310 = __builtin_shufflevector(__s0_310, __s0_310, 3, 2, 1, 0); \ + uint16x4_t __rev2_310; __rev2_310 = __builtin_shufflevector(__s2_310, __s2_310, 3, 2, 1, 0); \ + __ret_310 = __noswap_vset_lane_u16(__noswap_vget_lane_u16(__rev2_310, __p3_310), __rev0_310, __p1_310); \ + __ret_310 = __builtin_shufflevector(__ret_310, __ret_310, 3, 2, 1, 0); \ + __ret_310; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_p64(__p0_351, __p1_351) __extension__ ({ \ - poly64x1_t __ret_351; \ - poly64x2_t __s0_351 = __p0_351; \ - __ret_351 = splat_laneq_p64(__s0_351, __p1_351); \ - __ret_351; \ +#define vcopy_lane_s8(__p0_311, __p1_311, __p2_311, __p3_311) __extension__ ({ \ + int8x8_t __ret_311; \ + int8x8_t __s0_311 = __p0_311; \ + int8x8_t __s2_311 = __p2_311; \ + __ret_311 = vset_lane_s8(vget_lane_s8(__s2_311, __p3_311), __s0_311, __p1_311); \ + __ret_311; \ }) #else -#define vdup_laneq_p64(__p0_352, __p1_352) __extension__ ({ \ - poly64x1_t __ret_352; \ - poly64x2_t __s0_352 = __p0_352; \ - poly64x2_t __rev0_352; __rev0_352 = __builtin_shufflevector(__s0_352, __s0_352, 1, 0); \ - __ret_352 = __noswap_splat_laneq_p64(__rev0_352, __p1_352); \ - __ret_352; \ +#define vcopy_lane_s8(__p0_312, __p1_312, __p2_312, __p3_312) __extension__ ({ \ + int8x8_t __ret_312; \ + int8x8_t __s0_312 = __p0_312; \ + int8x8_t __s2_312 = __p2_312; \ + int8x8_t __rev0_312; __rev0_312 = __builtin_shufflevector(__s0_312, __s0_312, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev2_312; __rev2_312 = __builtin_shufflevector(__s2_312, __s2_312, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_312 = __noswap_vset_lane_s8(__noswap_vget_lane_s8(__rev2_312, __p3_312), __rev0_312, __p1_312); \ + __ret_312 = __builtin_shufflevector(__ret_312, __ret_312, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_312; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_p16(__p0_353, __p1_353) __extension__ ({ \ - poly16x4_t __ret_353; \ - poly16x8_t __s0_353 = __p0_353; \ - __ret_353 = splat_laneq_p16(__s0_353, __p1_353); \ - __ret_353; \ +#define vcopy_lane_f32(__p0_313, __p1_313, __p2_313, __p3_313) __extension__ ({ \ + float32x2_t __ret_313; \ + float32x2_t __s0_313 = __p0_313; \ + float32x2_t __s2_313 = __p2_313; \ + __ret_313 = vset_lane_f32(vget_lane_f32(__s2_313, __p3_313), __s0_313, __p1_313); \ + __ret_313; \ }) #else -#define vdup_laneq_p16(__p0_354, __p1_354) __extension__ ({ \ - poly16x4_t __ret_354; \ - poly16x8_t __s0_354 = __p0_354; \ - poly16x8_t __rev0_354; __rev0_354 = __builtin_shufflevector(__s0_354, __s0_354, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_354 = __noswap_splat_laneq_p16(__rev0_354, __p1_354); \ - __ret_354 = __builtin_shufflevector(__ret_354, __ret_354, 3, 2, 1, 0); \ - __ret_354; \ +#define vcopy_lane_f32(__p0_314, __p1_314, __p2_314, __p3_314) __extension__ ({ \ + float32x2_t __ret_314; \ + float32x2_t __s0_314 = __p0_314; \ + float32x2_t __s2_314 = __p2_314; \ + float32x2_t __rev0_314; __rev0_314 = __builtin_shufflevector(__s0_314, __s0_314, 1, 0); \ + float32x2_t __rev2_314; __rev2_314 = __builtin_shufflevector(__s2_314, __s2_314, 1, 0); \ + __ret_314 = __noswap_vset_lane_f32(__noswap_vget_lane_f32(__rev2_314, __p3_314), __rev0_314, __p1_314); \ + __ret_314 = __builtin_shufflevector(__ret_314, __ret_314, 1, 0); \ + __ret_314; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_p8(__p0_355, __p1_355) __extension__ ({ \ - poly8x16_t __ret_355; \ - poly8x16_t __s0_355 = __p0_355; \ - __ret_355 = splatq_laneq_p8(__s0_355, __p1_355); \ - __ret_355; \ +#define vcopy_lane_s32(__p0_315, __p1_315, __p2_315, __p3_315) __extension__ ({ \ + int32x2_t __ret_315; \ + int32x2_t __s0_315 = __p0_315; \ + int32x2_t __s2_315 = __p2_315; \ + __ret_315 = vset_lane_s32(vget_lane_s32(__s2_315, __p3_315), __s0_315, __p1_315); \ + __ret_315; \ }) #else -#define vdupq_laneq_p8(__p0_356, __p1_356) __extension__ ({ \ - poly8x16_t __ret_356; \ - poly8x16_t __s0_356 = __p0_356; \ - poly8x16_t __rev0_356; __rev0_356 = __builtin_shufflevector(__s0_356, __s0_356, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_356 = __noswap_splatq_laneq_p8(__rev0_356, __p1_356); \ - __ret_356 = __builtin_shufflevector(__ret_356, __ret_356, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_356; \ +#define vcopy_lane_s32(__p0_316, __p1_316, __p2_316, __p3_316) __extension__ ({ \ + int32x2_t __ret_316; \ + int32x2_t __s0_316 = __p0_316; \ + int32x2_t __s2_316 = __p2_316; \ + int32x2_t __rev0_316; __rev0_316 = __builtin_shufflevector(__s0_316, __s0_316, 1, 0); \ + int32x2_t __rev2_316; __rev2_316 = __builtin_shufflevector(__s2_316, __s2_316, 1, 0); \ + __ret_316 = __noswap_vset_lane_s32(__noswap_vget_lane_s32(__rev2_316, __p3_316), __rev0_316, __p1_316); \ + __ret_316 = __builtin_shufflevector(__ret_316, __ret_316, 1, 0); \ + __ret_316; \ }) #endif +#define vcopy_lane_s64(__p0_317, __p1_317, __p2_317, __p3_317) __extension__ ({ \ + int64x1_t __ret_317; \ + int64x1_t __s0_317 = __p0_317; \ + int64x1_t __s2_317 = __p2_317; \ + __ret_317 = vset_lane_s64(vget_lane_s64(__s2_317, __p3_317), __s0_317, __p1_317); \ + __ret_317; \ +}) #ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_p64(__p0_357, __p1_357) __extension__ ({ \ - poly64x2_t __ret_357; \ - poly64x2_t __s0_357 = __p0_357; \ - __ret_357 = splatq_laneq_p64(__s0_357, __p1_357); \ - __ret_357; \ +#define vcopy_lane_s16(__p0_318, __p1_318, __p2_318, __p3_318) __extension__ ({ \ + int16x4_t __ret_318; \ + int16x4_t __s0_318 = __p0_318; \ + int16x4_t __s2_318 = __p2_318; \ + __ret_318 = vset_lane_s16(vget_lane_s16(__s2_318, __p3_318), __s0_318, __p1_318); \ + __ret_318; \ }) #else -#define vdupq_laneq_p64(__p0_358, __p1_358) __extension__ ({ \ - poly64x2_t __ret_358; \ - poly64x2_t __s0_358 = __p0_358; \ - poly64x2_t __rev0_358; __rev0_358 = __builtin_shufflevector(__s0_358, __s0_358, 1, 0); \ - __ret_358 = __noswap_splatq_laneq_p64(__rev0_358, __p1_358); \ - __ret_358 = __builtin_shufflevector(__ret_358, __ret_358, 1, 0); \ - __ret_358; \ +#define vcopy_lane_s16(__p0_319, __p1_319, __p2_319, __p3_319) __extension__ ({ \ + int16x4_t __ret_319; \ + int16x4_t __s0_319 = __p0_319; \ + int16x4_t __s2_319 = __p2_319; \ + int16x4_t __rev0_319; __rev0_319 = __builtin_shufflevector(__s0_319, __s0_319, 3, 2, 1, 0); \ + int16x4_t __rev2_319; __rev2_319 = __builtin_shufflevector(__s2_319, __s2_319, 3, 2, 1, 0); \ + __ret_319 = __noswap_vset_lane_s16(__noswap_vget_lane_s16(__rev2_319, __p3_319), __rev0_319, __p1_319); \ + __ret_319 = __builtin_shufflevector(__ret_319, __ret_319, 3, 2, 1, 0); \ + __ret_319; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_p16(__p0_359, __p1_359) __extension__ ({ \ - poly16x8_t __ret_359; \ - poly16x8_t __s0_359 = __p0_359; \ - __ret_359 = splatq_laneq_p16(__s0_359, __p1_359); \ - __ret_359; \ +#define vcopyq_laneq_p8(__p0_320, __p1_320, __p2_320, __p3_320) __extension__ ({ \ + poly8x16_t __ret_320; \ + poly8x16_t __s0_320 = __p0_320; \ + poly8x16_t __s2_320 = __p2_320; \ + __ret_320 = vsetq_lane_p8(vgetq_lane_p8(__s2_320, __p3_320), __s0_320, __p1_320); \ + __ret_320; \ }) #else -#define vdupq_laneq_p16(__p0_360, __p1_360) __extension__ ({ \ - poly16x8_t __ret_360; \ - poly16x8_t __s0_360 = __p0_360; \ - poly16x8_t __rev0_360; __rev0_360 = __builtin_shufflevector(__s0_360, __s0_360, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_360 = __noswap_splatq_laneq_p16(__rev0_360, __p1_360); \ - __ret_360 = __builtin_shufflevector(__ret_360, __ret_360, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_360; \ +#define vcopyq_laneq_p8(__p0_321, __p1_321, __p2_321, __p3_321) __extension__ ({ \ + poly8x16_t __ret_321; \ + poly8x16_t __s0_321 = __p0_321; \ + poly8x16_t __s2_321 = __p2_321; \ + poly8x16_t __rev0_321; __rev0_321 = __builtin_shufflevector(__s0_321, __s0_321, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __rev2_321; __rev2_321 = __builtin_shufflevector(__s2_321, __s2_321, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_321 = __noswap_vsetq_lane_p8(__noswap_vgetq_lane_p8(__rev2_321, __p3_321), __rev0_321, __p1_321); \ + __ret_321 = __builtin_shufflevector(__ret_321, __ret_321, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_321; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_u8(__p0_361, __p1_361) __extension__ ({ \ - uint8x16_t __ret_361; \ - uint8x16_t __s0_361 = __p0_361; \ - __ret_361 = splatq_laneq_u8(__s0_361, __p1_361); \ - __ret_361; \ +#define vcopyq_laneq_p16(__p0_322, __p1_322, __p2_322, __p3_322) __extension__ ({ \ + poly16x8_t __ret_322; \ + poly16x8_t __s0_322 = __p0_322; \ + poly16x8_t __s2_322 = __p2_322; \ + __ret_322 = vsetq_lane_p16(vgetq_lane_p16(__s2_322, __p3_322), __s0_322, __p1_322); \ + __ret_322; \ }) #else -#define vdupq_laneq_u8(__p0_362, __p1_362) __extension__ ({ \ - uint8x16_t __ret_362; \ - uint8x16_t __s0_362 = __p0_362; \ - uint8x16_t __rev0_362; __rev0_362 = __builtin_shufflevector(__s0_362, __s0_362, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_362 = __noswap_splatq_laneq_u8(__rev0_362, __p1_362); \ - __ret_362 = __builtin_shufflevector(__ret_362, __ret_362, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_362; \ +#define vcopyq_laneq_p16(__p0_323, __p1_323, __p2_323, __p3_323) __extension__ ({ \ + poly16x8_t __ret_323; \ + poly16x8_t __s0_323 = __p0_323; \ + poly16x8_t __s2_323 = __p2_323; \ + poly16x8_t __rev0_323; __rev0_323 = __builtin_shufflevector(__s0_323, __s0_323, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly16x8_t __rev2_323; __rev2_323 = __builtin_shufflevector(__s2_323, __s2_323, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_323 = __noswap_vsetq_lane_p16(__noswap_vgetq_lane_p16(__rev2_323, __p3_323), __rev0_323, __p1_323); \ + __ret_323 = __builtin_shufflevector(__ret_323, __ret_323, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_323; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_u32(__p0_363, __p1_363) __extension__ ({ \ - uint32x4_t __ret_363; \ - uint32x4_t __s0_363 = __p0_363; \ - __ret_363 = splatq_laneq_u32(__s0_363, __p1_363); \ - __ret_363; \ +#define vcopyq_laneq_u8(__p0_324, __p1_324, __p2_324, __p3_324) __extension__ ({ \ + uint8x16_t __ret_324; \ + uint8x16_t __s0_324 = __p0_324; \ + uint8x16_t __s2_324 = __p2_324; \ + __ret_324 = vsetq_lane_u8(vgetq_lane_u8(__s2_324, __p3_324), __s0_324, __p1_324); \ + __ret_324; \ }) #else -#define vdupq_laneq_u32(__p0_364, __p1_364) __extension__ ({ \ - uint32x4_t __ret_364; \ - uint32x4_t __s0_364 = __p0_364; \ - uint32x4_t __rev0_364; __rev0_364 = __builtin_shufflevector(__s0_364, __s0_364, 3, 2, 1, 0); \ - __ret_364 = __noswap_splatq_laneq_u32(__rev0_364, __p1_364); \ - __ret_364 = __builtin_shufflevector(__ret_364, __ret_364, 3, 2, 1, 0); \ - __ret_364; \ +#define vcopyq_laneq_u8(__p0_325, __p1_325, __p2_325, __p3_325) __extension__ ({ \ + uint8x16_t __ret_325; \ + uint8x16_t __s0_325 = __p0_325; \ + uint8x16_t __s2_325 = __p2_325; \ + uint8x16_t __rev0_325; __rev0_325 = __builtin_shufflevector(__s0_325, __s0_325, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev2_325; __rev2_325 = __builtin_shufflevector(__s2_325, __s2_325, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_325 = __noswap_vsetq_lane_u8(__noswap_vgetq_lane_u8(__rev2_325, __p3_325), __rev0_325, __p1_325); \ + __ret_325 = __builtin_shufflevector(__ret_325, __ret_325, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_325; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_u64(__p0_365, __p1_365) __extension__ ({ \ - uint64x2_t __ret_365; \ - uint64x2_t __s0_365 = __p0_365; \ - __ret_365 = splatq_laneq_u64(__s0_365, __p1_365); \ - __ret_365; \ +#define vcopyq_laneq_u32(__p0_326, __p1_326, __p2_326, __p3_326) __extension__ ({ \ + uint32x4_t __ret_326; \ + uint32x4_t __s0_326 = __p0_326; \ + uint32x4_t __s2_326 = __p2_326; \ + __ret_326 = vsetq_lane_u32(vgetq_lane_u32(__s2_326, __p3_326), __s0_326, __p1_326); \ + __ret_326; \ }) #else -#define vdupq_laneq_u64(__p0_366, __p1_366) __extension__ ({ \ - uint64x2_t __ret_366; \ - uint64x2_t __s0_366 = __p0_366; \ - uint64x2_t __rev0_366; __rev0_366 = __builtin_shufflevector(__s0_366, __s0_366, 1, 0); \ - __ret_366 = __noswap_splatq_laneq_u64(__rev0_366, __p1_366); \ - __ret_366 = __builtin_shufflevector(__ret_366, __ret_366, 1, 0); \ - __ret_366; \ +#define vcopyq_laneq_u32(__p0_327, __p1_327, __p2_327, __p3_327) __extension__ ({ \ + uint32x4_t __ret_327; \ + uint32x4_t __s0_327 = __p0_327; \ + uint32x4_t __s2_327 = __p2_327; \ + uint32x4_t __rev0_327; __rev0_327 = __builtin_shufflevector(__s0_327, __s0_327, 3, 2, 1, 0); \ + uint32x4_t __rev2_327; __rev2_327 = __builtin_shufflevector(__s2_327, __s2_327, 3, 2, 1, 0); \ + __ret_327 = __noswap_vsetq_lane_u32(__noswap_vgetq_lane_u32(__rev2_327, __p3_327), __rev0_327, __p1_327); \ + __ret_327 = __builtin_shufflevector(__ret_327, __ret_327, 3, 2, 1, 0); \ + __ret_327; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_u16(__p0_367, __p1_367) __extension__ ({ \ - uint16x8_t __ret_367; \ - uint16x8_t __s0_367 = __p0_367; \ - __ret_367 = splatq_laneq_u16(__s0_367, __p1_367); \ - __ret_367; \ +#define vcopyq_laneq_u64(__p0_328, __p1_328, __p2_328, __p3_328) __extension__ ({ \ + uint64x2_t __ret_328; \ + uint64x2_t __s0_328 = __p0_328; \ + uint64x2_t __s2_328 = __p2_328; \ + __ret_328 = vsetq_lane_u64(vgetq_lane_u64(__s2_328, __p3_328), __s0_328, __p1_328); \ + __ret_328; \ }) #else -#define vdupq_laneq_u16(__p0_368, __p1_368) __extension__ ({ \ - uint16x8_t __ret_368; \ - uint16x8_t __s0_368 = __p0_368; \ - uint16x8_t __rev0_368; __rev0_368 = __builtin_shufflevector(__s0_368, __s0_368, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_368 = __noswap_splatq_laneq_u16(__rev0_368, __p1_368); \ - __ret_368 = __builtin_shufflevector(__ret_368, __ret_368, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_368; \ +#define vcopyq_laneq_u64(__p0_329, __p1_329, __p2_329, __p3_329) __extension__ ({ \ + uint64x2_t __ret_329; \ + uint64x2_t __s0_329 = __p0_329; \ + uint64x2_t __s2_329 = __p2_329; \ + uint64x2_t __rev0_329; __rev0_329 = __builtin_shufflevector(__s0_329, __s0_329, 1, 0); \ + uint64x2_t __rev2_329; __rev2_329 = __builtin_shufflevector(__s2_329, __s2_329, 1, 0); \ + __ret_329 = __noswap_vsetq_lane_u64(__noswap_vgetq_lane_u64(__rev2_329, __p3_329), __rev0_329, __p1_329); \ + __ret_329 = __builtin_shufflevector(__ret_329, __ret_329, 1, 0); \ + __ret_329; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_s8(__p0_369, __p1_369) __extension__ ({ \ - int8x16_t __ret_369; \ - int8x16_t __s0_369 = __p0_369; \ - __ret_369 = splatq_laneq_s8(__s0_369, __p1_369); \ - __ret_369; \ +#define vcopyq_laneq_u16(__p0_330, __p1_330, __p2_330, __p3_330) __extension__ ({ \ + uint16x8_t __ret_330; \ + uint16x8_t __s0_330 = __p0_330; \ + uint16x8_t __s2_330 = __p2_330; \ + __ret_330 = vsetq_lane_u16(vgetq_lane_u16(__s2_330, __p3_330), __s0_330, __p1_330); \ + __ret_330; \ }) #else -#define vdupq_laneq_s8(__p0_370, __p1_370) __extension__ ({ \ - int8x16_t __ret_370; \ - int8x16_t __s0_370 = __p0_370; \ - int8x16_t __rev0_370; __rev0_370 = __builtin_shufflevector(__s0_370, __s0_370, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_370 = __noswap_splatq_laneq_s8(__rev0_370, __p1_370); \ - __ret_370 = __builtin_shufflevector(__ret_370, __ret_370, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_370; \ +#define vcopyq_laneq_u16(__p0_331, __p1_331, __p2_331, __p3_331) __extension__ ({ \ + uint16x8_t __ret_331; \ + uint16x8_t __s0_331 = __p0_331; \ + uint16x8_t __s2_331 = __p2_331; \ + uint16x8_t __rev0_331; __rev0_331 = __builtin_shufflevector(__s0_331, __s0_331, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev2_331; __rev2_331 = __builtin_shufflevector(__s2_331, __s2_331, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_331 = __noswap_vsetq_lane_u16(__noswap_vgetq_lane_u16(__rev2_331, __p3_331), __rev0_331, __p1_331); \ + __ret_331 = __builtin_shufflevector(__ret_331, __ret_331, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_331; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_f64(__p0_371, __p1_371) __extension__ ({ \ - float64x2_t __ret_371; \ - float64x2_t __s0_371 = __p0_371; \ - __ret_371 = splatq_laneq_f64(__s0_371, __p1_371); \ - __ret_371; \ +#define vcopyq_laneq_s8(__p0_332, __p1_332, __p2_332, __p3_332) __extension__ ({ \ + int8x16_t __ret_332; \ + int8x16_t __s0_332 = __p0_332; \ + int8x16_t __s2_332 = __p2_332; \ + __ret_332 = vsetq_lane_s8(vgetq_lane_s8(__s2_332, __p3_332), __s0_332, __p1_332); \ + __ret_332; \ }) #else -#define vdupq_laneq_f64(__p0_372, __p1_372) __extension__ ({ \ - float64x2_t __ret_372; \ - float64x2_t __s0_372 = __p0_372; \ - float64x2_t __rev0_372; __rev0_372 = __builtin_shufflevector(__s0_372, __s0_372, 1, 0); \ - __ret_372 = __noswap_splatq_laneq_f64(__rev0_372, __p1_372); \ - __ret_372 = __builtin_shufflevector(__ret_372, __ret_372, 1, 0); \ - __ret_372; \ +#define vcopyq_laneq_s8(__p0_333, __p1_333, __p2_333, __p3_333) __extension__ ({ \ + int8x16_t __ret_333; \ + int8x16_t __s0_333 = __p0_333; \ + int8x16_t __s2_333 = __p2_333; \ + int8x16_t __rev0_333; __rev0_333 = __builtin_shufflevector(__s0_333, __s0_333, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev2_333; __rev2_333 = __builtin_shufflevector(__s2_333, __s2_333, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_333 = __noswap_vsetq_lane_s8(__noswap_vgetq_lane_s8(__rev2_333, __p3_333), __rev0_333, __p1_333); \ + __ret_333 = __builtin_shufflevector(__ret_333, __ret_333, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_333; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_f32(__p0_373, __p1_373) __extension__ ({ \ - float32x4_t __ret_373; \ - float32x4_t __s0_373 = __p0_373; \ - __ret_373 = splatq_laneq_f32(__s0_373, __p1_373); \ - __ret_373; \ +#define vcopyq_laneq_f32(__p0_334, __p1_334, __p2_334, __p3_334) __extension__ ({ \ + float32x4_t __ret_334; \ + float32x4_t __s0_334 = __p0_334; \ + float32x4_t __s2_334 = __p2_334; \ + __ret_334 = vsetq_lane_f32(vgetq_lane_f32(__s2_334, __p3_334), __s0_334, __p1_334); \ + __ret_334; \ }) #else -#define vdupq_laneq_f32(__p0_374, __p1_374) __extension__ ({ \ - float32x4_t __ret_374; \ - float32x4_t __s0_374 = __p0_374; \ - float32x4_t __rev0_374; __rev0_374 = __builtin_shufflevector(__s0_374, __s0_374, 3, 2, 1, 0); \ - __ret_374 = __noswap_splatq_laneq_f32(__rev0_374, __p1_374); \ - __ret_374 = __builtin_shufflevector(__ret_374, __ret_374, 3, 2, 1, 0); \ - __ret_374; \ +#define vcopyq_laneq_f32(__p0_335, __p1_335, __p2_335, __p3_335) __extension__ ({ \ + float32x4_t __ret_335; \ + float32x4_t __s0_335 = __p0_335; \ + float32x4_t __s2_335 = __p2_335; \ + float32x4_t __rev0_335; __rev0_335 = __builtin_shufflevector(__s0_335, __s0_335, 3, 2, 1, 0); \ + float32x4_t __rev2_335; __rev2_335 = __builtin_shufflevector(__s2_335, __s2_335, 3, 2, 1, 0); \ + __ret_335 = __noswap_vsetq_lane_f32(__noswap_vgetq_lane_f32(__rev2_335, __p3_335), __rev0_335, __p1_335); \ + __ret_335 = __builtin_shufflevector(__ret_335, __ret_335, 3, 2, 1, 0); \ + __ret_335; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_f16(__p0_375, __p1_375) __extension__ ({ \ - float16x8_t __ret_375; \ - float16x8_t __s0_375 = __p0_375; \ - __ret_375 = splatq_laneq_f16(__s0_375, __p1_375); \ - __ret_375; \ +#define vcopyq_laneq_s32(__p0_336, __p1_336, __p2_336, __p3_336) __extension__ ({ \ + int32x4_t __ret_336; \ + int32x4_t __s0_336 = __p0_336; \ + int32x4_t __s2_336 = __p2_336; \ + __ret_336 = vsetq_lane_s32(vgetq_lane_s32(__s2_336, __p3_336), __s0_336, __p1_336); \ + __ret_336; \ }) #else -#define vdupq_laneq_f16(__p0_376, __p1_376) __extension__ ({ \ - float16x8_t __ret_376; \ - float16x8_t __s0_376 = __p0_376; \ - float16x8_t __rev0_376; __rev0_376 = __builtin_shufflevector(__s0_376, __s0_376, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_376 = __noswap_splatq_laneq_f16(__rev0_376, __p1_376); \ - __ret_376 = __builtin_shufflevector(__ret_376, __ret_376, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_376; \ +#define vcopyq_laneq_s32(__p0_337, __p1_337, __p2_337, __p3_337) __extension__ ({ \ + int32x4_t __ret_337; \ + int32x4_t __s0_337 = __p0_337; \ + int32x4_t __s2_337 = __p2_337; \ + int32x4_t __rev0_337; __rev0_337 = __builtin_shufflevector(__s0_337, __s0_337, 3, 2, 1, 0); \ + int32x4_t __rev2_337; __rev2_337 = __builtin_shufflevector(__s2_337, __s2_337, 3, 2, 1, 0); \ + __ret_337 = __noswap_vsetq_lane_s32(__noswap_vgetq_lane_s32(__rev2_337, __p3_337), __rev0_337, __p1_337); \ + __ret_337 = __builtin_shufflevector(__ret_337, __ret_337, 3, 2, 1, 0); \ + __ret_337; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_s32(__p0_377, __p1_377) __extension__ ({ \ - int32x4_t __ret_377; \ - int32x4_t __s0_377 = __p0_377; \ - __ret_377 = splatq_laneq_s32(__s0_377, __p1_377); \ - __ret_377; \ +#define vcopyq_laneq_s64(__p0_338, __p1_338, __p2_338, __p3_338) __extension__ ({ \ + int64x2_t __ret_338; \ + int64x2_t __s0_338 = __p0_338; \ + int64x2_t __s2_338 = __p2_338; \ + __ret_338 = vsetq_lane_s64(vgetq_lane_s64(__s2_338, __p3_338), __s0_338, __p1_338); \ + __ret_338; \ }) #else -#define vdupq_laneq_s32(__p0_378, __p1_378) __extension__ ({ \ - int32x4_t __ret_378; \ - int32x4_t __s0_378 = __p0_378; \ - int32x4_t __rev0_378; __rev0_378 = __builtin_shufflevector(__s0_378, __s0_378, 3, 2, 1, 0); \ - __ret_378 = __noswap_splatq_laneq_s32(__rev0_378, __p1_378); \ - __ret_378 = __builtin_shufflevector(__ret_378, __ret_378, 3, 2, 1, 0); \ - __ret_378; \ +#define vcopyq_laneq_s64(__p0_339, __p1_339, __p2_339, __p3_339) __extension__ ({ \ + int64x2_t __ret_339; \ + int64x2_t __s0_339 = __p0_339; \ + int64x2_t __s2_339 = __p2_339; \ + int64x2_t __rev0_339; __rev0_339 = __builtin_shufflevector(__s0_339, __s0_339, 1, 0); \ + int64x2_t __rev2_339; __rev2_339 = __builtin_shufflevector(__s2_339, __s2_339, 1, 0); \ + __ret_339 = __noswap_vsetq_lane_s64(__noswap_vgetq_lane_s64(__rev2_339, __p3_339), __rev0_339, __p1_339); \ + __ret_339 = __builtin_shufflevector(__ret_339, __ret_339, 1, 0); \ + __ret_339; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_s64(__p0_379, __p1_379) __extension__ ({ \ - int64x2_t __ret_379; \ - int64x2_t __s0_379 = __p0_379; \ - __ret_379 = splatq_laneq_s64(__s0_379, __p1_379); \ - __ret_379; \ +#define vcopyq_laneq_s16(__p0_340, __p1_340, __p2_340, __p3_340) __extension__ ({ \ + int16x8_t __ret_340; \ + int16x8_t __s0_340 = __p0_340; \ + int16x8_t __s2_340 = __p2_340; \ + __ret_340 = vsetq_lane_s16(vgetq_lane_s16(__s2_340, __p3_340), __s0_340, __p1_340); \ + __ret_340; \ }) #else -#define vdupq_laneq_s64(__p0_380, __p1_380) __extension__ ({ \ - int64x2_t __ret_380; \ - int64x2_t __s0_380 = __p0_380; \ - int64x2_t __rev0_380; __rev0_380 = __builtin_shufflevector(__s0_380, __s0_380, 1, 0); \ - __ret_380 = __noswap_splatq_laneq_s64(__rev0_380, __p1_380); \ - __ret_380 = __builtin_shufflevector(__ret_380, __ret_380, 1, 0); \ - __ret_380; \ +#define vcopyq_laneq_s16(__p0_341, __p1_341, __p2_341, __p3_341) __extension__ ({ \ + int16x8_t __ret_341; \ + int16x8_t __s0_341 = __p0_341; \ + int16x8_t __s2_341 = __p2_341; \ + int16x8_t __rev0_341; __rev0_341 = __builtin_shufflevector(__s0_341, __s0_341, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_341; __rev2_341 = __builtin_shufflevector(__s2_341, __s2_341, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_341 = __noswap_vsetq_lane_s16(__noswap_vgetq_lane_s16(__rev2_341, __p3_341), __rev0_341, __p1_341); \ + __ret_341 = __builtin_shufflevector(__ret_341, __ret_341, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_341; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdupq_laneq_s16(__p0_381, __p1_381) __extension__ ({ \ - int16x8_t __ret_381; \ - int16x8_t __s0_381 = __p0_381; \ - __ret_381 = splatq_laneq_s16(__s0_381, __p1_381); \ - __ret_381; \ +#define vcopy_laneq_p8(__p0_342, __p1_342, __p2_342, __p3_342) __extension__ ({ \ + poly8x8_t __ret_342; \ + poly8x8_t __s0_342 = __p0_342; \ + poly8x16_t __s2_342 = __p2_342; \ + __ret_342 = vset_lane_p8(vgetq_lane_p8(__s2_342, __p3_342), __s0_342, __p1_342); \ + __ret_342; \ }) #else -#define vdupq_laneq_s16(__p0_382, __p1_382) __extension__ ({ \ - int16x8_t __ret_382; \ - int16x8_t __s0_382 = __p0_382; \ - int16x8_t __rev0_382; __rev0_382 = __builtin_shufflevector(__s0_382, __s0_382, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_382 = __noswap_splatq_laneq_s16(__rev0_382, __p1_382); \ - __ret_382 = __builtin_shufflevector(__ret_382, __ret_382, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_382; \ +#define vcopy_laneq_p8(__p0_343, __p1_343, __p2_343, __p3_343) __extension__ ({ \ + poly8x8_t __ret_343; \ + poly8x8_t __s0_343 = __p0_343; \ + poly8x16_t __s2_343 = __p2_343; \ + poly8x8_t __rev0_343; __rev0_343 = __builtin_shufflevector(__s0_343, __s0_343, 7, 6, 5, 4, 3, 2, 1, 0); \ + poly8x16_t __rev2_343; __rev2_343 = __builtin_shufflevector(__s2_343, __s2_343, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_343 = __noswap_vset_lane_p8(__noswap_vgetq_lane_p8(__rev2_343, __p3_343), __rev0_343, __p1_343); \ + __ret_343 = __builtin_shufflevector(__ret_343, __ret_343, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_343; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_u8(__p0_383, __p1_383) __extension__ ({ \ - uint8x8_t __ret_383; \ - uint8x16_t __s0_383 = __p0_383; \ - __ret_383 = splat_laneq_u8(__s0_383, __p1_383); \ - __ret_383; \ +#define vcopy_laneq_p16(__p0_344, __p1_344, __p2_344, __p3_344) __extension__ ({ \ + poly16x4_t __ret_344; \ + poly16x4_t __s0_344 = __p0_344; \ + poly16x8_t __s2_344 = __p2_344; \ + __ret_344 = vset_lane_p16(vgetq_lane_p16(__s2_344, __p3_344), __s0_344, __p1_344); \ + __ret_344; \ }) #else -#define vdup_laneq_u8(__p0_384, __p1_384) __extension__ ({ \ - uint8x8_t __ret_384; \ - uint8x16_t __s0_384 = __p0_384; \ - uint8x16_t __rev0_384; __rev0_384 = __builtin_shufflevector(__s0_384, __s0_384, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_384 = __noswap_splat_laneq_u8(__rev0_384, __p1_384); \ - __ret_384 = __builtin_shufflevector(__ret_384, __ret_384, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_384; \ +#define vcopy_laneq_p16(__p0_345, __p1_345, __p2_345, __p3_345) __extension__ ({ \ + poly16x4_t __ret_345; \ + poly16x4_t __s0_345 = __p0_345; \ + poly16x8_t __s2_345 = __p2_345; \ + poly16x4_t __rev0_345; __rev0_345 = __builtin_shufflevector(__s0_345, __s0_345, 3, 2, 1, 0); \ + poly16x8_t __rev2_345; __rev2_345 = __builtin_shufflevector(__s2_345, __s2_345, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_345 = __noswap_vset_lane_p16(__noswap_vgetq_lane_p16(__rev2_345, __p3_345), __rev0_345, __p1_345); \ + __ret_345 = __builtin_shufflevector(__ret_345, __ret_345, 3, 2, 1, 0); \ + __ret_345; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_u32(__p0_385, __p1_385) __extension__ ({ \ - uint32x2_t __ret_385; \ - uint32x4_t __s0_385 = __p0_385; \ - __ret_385 = splat_laneq_u32(__s0_385, __p1_385); \ - __ret_385; \ +#define vcopy_laneq_u8(__p0_346, __p1_346, __p2_346, __p3_346) __extension__ ({ \ + uint8x8_t __ret_346; \ + uint8x8_t __s0_346 = __p0_346; \ + uint8x16_t __s2_346 = __p2_346; \ + __ret_346 = vset_lane_u8(vgetq_lane_u8(__s2_346, __p3_346), __s0_346, __p1_346); \ + __ret_346; \ }) #else -#define vdup_laneq_u32(__p0_386, __p1_386) __extension__ ({ \ - uint32x2_t __ret_386; \ - uint32x4_t __s0_386 = __p0_386; \ - uint32x4_t __rev0_386; __rev0_386 = __builtin_shufflevector(__s0_386, __s0_386, 3, 2, 1, 0); \ - __ret_386 = __noswap_splat_laneq_u32(__rev0_386, __p1_386); \ - __ret_386 = __builtin_shufflevector(__ret_386, __ret_386, 1, 0); \ - __ret_386; \ +#define vcopy_laneq_u8(__p0_347, __p1_347, __p2_347, __p3_347) __extension__ ({ \ + uint8x8_t __ret_347; \ + uint8x8_t __s0_347 = __p0_347; \ + uint8x16_t __s2_347 = __p2_347; \ + uint8x8_t __rev0_347; __rev0_347 = __builtin_shufflevector(__s0_347, __s0_347, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x16_t __rev2_347; __rev2_347 = __builtin_shufflevector(__s2_347, __s2_347, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_347 = __noswap_vset_lane_u8(__noswap_vgetq_lane_u8(__rev2_347, __p3_347), __rev0_347, __p1_347); \ + __ret_347 = __builtin_shufflevector(__ret_347, __ret_347, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_347; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_u64(__p0_387, __p1_387) __extension__ ({ \ - uint64x1_t __ret_387; \ - uint64x2_t __s0_387 = __p0_387; \ - __ret_387 = splat_laneq_u64(__s0_387, __p1_387); \ - __ret_387; \ +#define vcopy_laneq_u32(__p0_348, __p1_348, __p2_348, __p3_348) __extension__ ({ \ + uint32x2_t __ret_348; \ + uint32x2_t __s0_348 = __p0_348; \ + uint32x4_t __s2_348 = __p2_348; \ + __ret_348 = vset_lane_u32(vgetq_lane_u32(__s2_348, __p3_348), __s0_348, __p1_348); \ + __ret_348; \ }) #else -#define vdup_laneq_u64(__p0_388, __p1_388) __extension__ ({ \ - uint64x1_t __ret_388; \ - uint64x2_t __s0_388 = __p0_388; \ - uint64x2_t __rev0_388; __rev0_388 = __builtin_shufflevector(__s0_388, __s0_388, 1, 0); \ - __ret_388 = __noswap_splat_laneq_u64(__rev0_388, __p1_388); \ - __ret_388; \ +#define vcopy_laneq_u32(__p0_349, __p1_349, __p2_349, __p3_349) __extension__ ({ \ + uint32x2_t __ret_349; \ + uint32x2_t __s0_349 = __p0_349; \ + uint32x4_t __s2_349 = __p2_349; \ + uint32x2_t __rev0_349; __rev0_349 = __builtin_shufflevector(__s0_349, __s0_349, 1, 0); \ + uint32x4_t __rev2_349; __rev2_349 = __builtin_shufflevector(__s2_349, __s2_349, 3, 2, 1, 0); \ + __ret_349 = __noswap_vset_lane_u32(__noswap_vgetq_lane_u32(__rev2_349, __p3_349), __rev0_349, __p1_349); \ + __ret_349 = __builtin_shufflevector(__ret_349, __ret_349, 1, 0); \ + __ret_349; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_u16(__p0_389, __p1_389) __extension__ ({ \ - uint16x4_t __ret_389; \ - uint16x8_t __s0_389 = __p0_389; \ - __ret_389 = splat_laneq_u16(__s0_389, __p1_389); \ - __ret_389; \ +#define vcopy_laneq_u64(__p0_350, __p1_350, __p2_350, __p3_350) __extension__ ({ \ + uint64x1_t __ret_350; \ + uint64x1_t __s0_350 = __p0_350; \ + uint64x2_t __s2_350 = __p2_350; \ + __ret_350 = vset_lane_u64(vgetq_lane_u64(__s2_350, __p3_350), __s0_350, __p1_350); \ + __ret_350; \ }) #else -#define vdup_laneq_u16(__p0_390, __p1_390) __extension__ ({ \ - uint16x4_t __ret_390; \ - uint16x8_t __s0_390 = __p0_390; \ - uint16x8_t __rev0_390; __rev0_390 = __builtin_shufflevector(__s0_390, __s0_390, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_390 = __noswap_splat_laneq_u16(__rev0_390, __p1_390); \ - __ret_390 = __builtin_shufflevector(__ret_390, __ret_390, 3, 2, 1, 0); \ - __ret_390; \ +#define vcopy_laneq_u64(__p0_351, __p1_351, __p2_351, __p3_351) __extension__ ({ \ + uint64x1_t __ret_351; \ + uint64x1_t __s0_351 = __p0_351; \ + uint64x2_t __s2_351 = __p2_351; \ + uint64x2_t __rev2_351; __rev2_351 = __builtin_shufflevector(__s2_351, __s2_351, 1, 0); \ + __ret_351 = vset_lane_u64(__noswap_vgetq_lane_u64(__rev2_351, __p3_351), __s0_351, __p1_351); \ + __ret_351; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_s8(__p0_391, __p1_391) __extension__ ({ \ - int8x8_t __ret_391; \ - int8x16_t __s0_391 = __p0_391; \ - __ret_391 = splat_laneq_s8(__s0_391, __p1_391); \ - __ret_391; \ +#define vcopy_laneq_u16(__p0_352, __p1_352, __p2_352, __p3_352) __extension__ ({ \ + uint16x4_t __ret_352; \ + uint16x4_t __s0_352 = __p0_352; \ + uint16x8_t __s2_352 = __p2_352; \ + __ret_352 = vset_lane_u16(vgetq_lane_u16(__s2_352, __p3_352), __s0_352, __p1_352); \ + __ret_352; \ }) #else -#define vdup_laneq_s8(__p0_392, __p1_392) __extension__ ({ \ - int8x8_t __ret_392; \ - int8x16_t __s0_392 = __p0_392; \ - int8x16_t __rev0_392; __rev0_392 = __builtin_shufflevector(__s0_392, __s0_392, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_392 = __noswap_splat_laneq_s8(__rev0_392, __p1_392); \ - __ret_392 = __builtin_shufflevector(__ret_392, __ret_392, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_392; \ +#define vcopy_laneq_u16(__p0_353, __p1_353, __p2_353, __p3_353) __extension__ ({ \ + uint16x4_t __ret_353; \ + uint16x4_t __s0_353 = __p0_353; \ + uint16x8_t __s2_353 = __p2_353; \ + uint16x4_t __rev0_353; __rev0_353 = __builtin_shufflevector(__s0_353, __s0_353, 3, 2, 1, 0); \ + uint16x8_t __rev2_353; __rev2_353 = __builtin_shufflevector(__s2_353, __s2_353, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_353 = __noswap_vset_lane_u16(__noswap_vgetq_lane_u16(__rev2_353, __p3_353), __rev0_353, __p1_353); \ + __ret_353 = __builtin_shufflevector(__ret_353, __ret_353, 3, 2, 1, 0); \ + __ret_353; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_f64(__p0_393, __p1_393) __extension__ ({ \ - float64x1_t __ret_393; \ - float64x2_t __s0_393 = __p0_393; \ - __ret_393 = splat_laneq_f64(__s0_393, __p1_393); \ - __ret_393; \ +#define vcopy_laneq_s8(__p0_354, __p1_354, __p2_354, __p3_354) __extension__ ({ \ + int8x8_t __ret_354; \ + int8x8_t __s0_354 = __p0_354; \ + int8x16_t __s2_354 = __p2_354; \ + __ret_354 = vset_lane_s8(vgetq_lane_s8(__s2_354, __p3_354), __s0_354, __p1_354); \ + __ret_354; \ }) #else -#define vdup_laneq_f64(__p0_394, __p1_394) __extension__ ({ \ - float64x1_t __ret_394; \ - float64x2_t __s0_394 = __p0_394; \ - float64x2_t __rev0_394; __rev0_394 = __builtin_shufflevector(__s0_394, __s0_394, 1, 0); \ - __ret_394 = __noswap_splat_laneq_f64(__rev0_394, __p1_394); \ - __ret_394; \ +#define vcopy_laneq_s8(__p0_355, __p1_355, __p2_355, __p3_355) __extension__ ({ \ + int8x8_t __ret_355; \ + int8x8_t __s0_355 = __p0_355; \ + int8x16_t __s2_355 = __p2_355; \ + int8x8_t __rev0_355; __rev0_355 = __builtin_shufflevector(__s0_355, __s0_355, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x16_t __rev2_355; __rev2_355 = __builtin_shufflevector(__s2_355, __s2_355, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_355 = __noswap_vset_lane_s8(__noswap_vgetq_lane_s8(__rev2_355, __p3_355), __rev0_355, __p1_355); \ + __ret_355 = __builtin_shufflevector(__ret_355, __ret_355, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_355; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_f32(__p0_395, __p1_395) __extension__ ({ \ - float32x2_t __ret_395; \ - float32x4_t __s0_395 = __p0_395; \ - __ret_395 = splat_laneq_f32(__s0_395, __p1_395); \ - __ret_395; \ +#define vcopy_laneq_f32(__p0_356, __p1_356, __p2_356, __p3_356) __extension__ ({ \ + float32x2_t __ret_356; \ + float32x2_t __s0_356 = __p0_356; \ + float32x4_t __s2_356 = __p2_356; \ + __ret_356 = vset_lane_f32(vgetq_lane_f32(__s2_356, __p3_356), __s0_356, __p1_356); \ + __ret_356; \ }) #else -#define vdup_laneq_f32(__p0_396, __p1_396) __extension__ ({ \ - float32x2_t __ret_396; \ - float32x4_t __s0_396 = __p0_396; \ - float32x4_t __rev0_396; __rev0_396 = __builtin_shufflevector(__s0_396, __s0_396, 3, 2, 1, 0); \ - __ret_396 = __noswap_splat_laneq_f32(__rev0_396, __p1_396); \ - __ret_396 = __builtin_shufflevector(__ret_396, __ret_396, 1, 0); \ - __ret_396; \ +#define vcopy_laneq_f32(__p0_357, __p1_357, __p2_357, __p3_357) __extension__ ({ \ + float32x2_t __ret_357; \ + float32x2_t __s0_357 = __p0_357; \ + float32x4_t __s2_357 = __p2_357; \ + float32x2_t __rev0_357; __rev0_357 = __builtin_shufflevector(__s0_357, __s0_357, 1, 0); \ + float32x4_t __rev2_357; __rev2_357 = __builtin_shufflevector(__s2_357, __s2_357, 3, 2, 1, 0); \ + __ret_357 = __noswap_vset_lane_f32(__noswap_vgetq_lane_f32(__rev2_357, __p3_357), __rev0_357, __p1_357); \ + __ret_357 = __builtin_shufflevector(__ret_357, __ret_357, 1, 0); \ + __ret_357; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_f16(__p0_397, __p1_397) __extension__ ({ \ - float16x4_t __ret_397; \ - float16x8_t __s0_397 = __p0_397; \ - __ret_397 = splat_laneq_f16(__s0_397, __p1_397); \ - __ret_397; \ +#define vcopy_laneq_s32(__p0_358, __p1_358, __p2_358, __p3_358) __extension__ ({ \ + int32x2_t __ret_358; \ + int32x2_t __s0_358 = __p0_358; \ + int32x4_t __s2_358 = __p2_358; \ + __ret_358 = vset_lane_s32(vgetq_lane_s32(__s2_358, __p3_358), __s0_358, __p1_358); \ + __ret_358; \ }) #else -#define vdup_laneq_f16(__p0_398, __p1_398) __extension__ ({ \ - float16x4_t __ret_398; \ - float16x8_t __s0_398 = __p0_398; \ - float16x8_t __rev0_398; __rev0_398 = __builtin_shufflevector(__s0_398, __s0_398, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_398 = __noswap_splat_laneq_f16(__rev0_398, __p1_398); \ - __ret_398 = __builtin_shufflevector(__ret_398, __ret_398, 3, 2, 1, 0); \ - __ret_398; \ +#define vcopy_laneq_s32(__p0_359, __p1_359, __p2_359, __p3_359) __extension__ ({ \ + int32x2_t __ret_359; \ + int32x2_t __s0_359 = __p0_359; \ + int32x4_t __s2_359 = __p2_359; \ + int32x2_t __rev0_359; __rev0_359 = __builtin_shufflevector(__s0_359, __s0_359, 1, 0); \ + int32x4_t __rev2_359; __rev2_359 = __builtin_shufflevector(__s2_359, __s2_359, 3, 2, 1, 0); \ + __ret_359 = __noswap_vset_lane_s32(__noswap_vgetq_lane_s32(__rev2_359, __p3_359), __rev0_359, __p1_359); \ + __ret_359 = __builtin_shufflevector(__ret_359, __ret_359, 1, 0); \ + __ret_359; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_s32(__p0_399, __p1_399) __extension__ ({ \ - int32x2_t __ret_399; \ - int32x4_t __s0_399 = __p0_399; \ - __ret_399 = splat_laneq_s32(__s0_399, __p1_399); \ - __ret_399; \ +#define vcopy_laneq_s64(__p0_360, __p1_360, __p2_360, __p3_360) __extension__ ({ \ + int64x1_t __ret_360; \ + int64x1_t __s0_360 = __p0_360; \ + int64x2_t __s2_360 = __p2_360; \ + __ret_360 = vset_lane_s64(vgetq_lane_s64(__s2_360, __p3_360), __s0_360, __p1_360); \ + __ret_360; \ }) #else -#define vdup_laneq_s32(__p0_400, __p1_400) __extension__ ({ \ - int32x2_t __ret_400; \ - int32x4_t __s0_400 = __p0_400; \ - int32x4_t __rev0_400; __rev0_400 = __builtin_shufflevector(__s0_400, __s0_400, 3, 2, 1, 0); \ - __ret_400 = __noswap_splat_laneq_s32(__rev0_400, __p1_400); \ - __ret_400 = __builtin_shufflevector(__ret_400, __ret_400, 1, 0); \ - __ret_400; \ +#define vcopy_laneq_s64(__p0_361, __p1_361, __p2_361, __p3_361) __extension__ ({ \ + int64x1_t __ret_361; \ + int64x1_t __s0_361 = __p0_361; \ + int64x2_t __s2_361 = __p2_361; \ + int64x2_t __rev2_361; __rev2_361 = __builtin_shufflevector(__s2_361, __s2_361, 1, 0); \ + __ret_361 = vset_lane_s64(__noswap_vgetq_lane_s64(__rev2_361, __p3_361), __s0_361, __p1_361); \ + __ret_361; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_s64(__p0_401, __p1_401) __extension__ ({ \ - int64x1_t __ret_401; \ - int64x2_t __s0_401 = __p0_401; \ - __ret_401 = splat_laneq_s64(__s0_401, __p1_401); \ - __ret_401; \ +#define vcopy_laneq_s16(__p0_362, __p1_362, __p2_362, __p3_362) __extension__ ({ \ + int16x4_t __ret_362; \ + int16x4_t __s0_362 = __p0_362; \ + int16x8_t __s2_362 = __p2_362; \ + __ret_362 = vset_lane_s16(vgetq_lane_s16(__s2_362, __p3_362), __s0_362, __p1_362); \ + __ret_362; \ }) #else -#define vdup_laneq_s64(__p0_402, __p1_402) __extension__ ({ \ - int64x1_t __ret_402; \ - int64x2_t __s0_402 = __p0_402; \ - int64x2_t __rev0_402; __rev0_402 = __builtin_shufflevector(__s0_402, __s0_402, 1, 0); \ - __ret_402 = __noswap_splat_laneq_s64(__rev0_402, __p1_402); \ - __ret_402; \ +#define vcopy_laneq_s16(__p0_363, __p1_363, __p2_363, __p3_363) __extension__ ({ \ + int16x4_t __ret_363; \ + int16x4_t __s0_363 = __p0_363; \ + int16x8_t __s2_363 = __p2_363; \ + int16x4_t __rev0_363; __rev0_363 = __builtin_shufflevector(__s0_363, __s0_363, 3, 2, 1, 0); \ + int16x8_t __rev2_363; __rev2_363 = __builtin_shufflevector(__s2_363, __s2_363, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_363 = __noswap_vset_lane_s16(__noswap_vgetq_lane_s16(__rev2_363, __p3_363), __rev0_363, __p1_363); \ + __ret_363 = __builtin_shufflevector(__ret_363, __ret_363, 3, 2, 1, 0); \ + __ret_363; \ }) #endif -#ifdef __LITTLE_ENDIAN__ -#define vdup_laneq_s16(__p0_403, __p1_403) __extension__ ({ \ - int16x4_t __ret_403; \ - int16x8_t __s0_403 = __p0_403; \ - __ret_403 = splat_laneq_s16(__s0_403, __p1_403); \ - __ret_403; \ +#define vcreate_p64(__p0) __extension__ ({ \ + poly64x1_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (poly64x1_t)(__promote); \ + __ret; \ }) -#else -#define vdup_laneq_s16(__p0_404, __p1_404) __extension__ ({ \ - int16x4_t __ret_404; \ - int16x8_t __s0_404 = __p0_404; \ - int16x8_t __rev0_404; __rev0_404 = __builtin_shufflevector(__s0_404, __s0_404, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_404 = __noswap_splat_laneq_s16(__rev0_404, __p1_404); \ - __ret_404 = __builtin_shufflevector(__ret_404, __ret_404, 3, 2, 1, 0); \ - __ret_404; \ +#define vcreate_f64(__p0) __extension__ ({ \ + float64x1_t __ret; \ + uint64_t __promote = __p0; \ + __ret = (float64x1_t)(__promote); \ + __ret; \ }) +__ai __attribute__((target("neon"))) float32_t vcvts_f32_s32(int32_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vcvts_f32_s32(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32_t vcvts_f32_u32(uint32_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vcvts_f32_u32(__p0); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vcvt_f32_f64(float64x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__p0, 9); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vcvt_f32_f64(float64x2_t __p0) { + float32x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x2_t __noswap_vcvt_f32_f64(float64x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcvt_f32_f64((int8x16_t)__p0, 9); + return __ret; +} #endif -__ai poly64x1_t vdup_n_p64(poly64_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t) {__p0}; +__ai __attribute__((target("neon"))) float64_t vcvtd_f64_s64(int64_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vcvtd_f64_s64(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float64_t vcvtd_f64_u64(uint64_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vcvtd_f64_u64(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai poly64x2_t vdupq_n_p64(poly64_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t) {__p0, __p0}; +__ai __attribute__((target("neon"))) float64x2_t vcvtq_f64_u64(uint64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__p0, 51); return __ret; } #else -__ai poly64x2_t vdupq_n_p64(poly64_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t) {__p0, __p0}; +__ai __attribute__((target("neon"))) float64x2_t vcvtq_f64_u64(uint64x2_t __p0) { + float64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__rev0, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vdupq_n_f64(float64_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vcvtq_f64_s64(int64x2_t __p0) { float64x2_t __ret; - __ret = (float64x2_t) {__p0, __p0}; + __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__p0, 35); return __ret; } #else -__ai float64x2_t vdupq_n_f64(float64_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vcvtq_f64_s64(int64x2_t __p0) { float64x2_t __ret; - __ret = (float64x2_t) {__p0, __p0}; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vcvtq_f64_v((int8x16_t)__rev0, 35); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif -__ai float64x1_t vdup_n_f64(float64_t __p0) { +__ai __attribute__((target("neon"))) float64x1_t vcvt_f64_u64(uint64x1_t __p0) { float64x1_t __ret; - __ret = (float64x1_t) {__p0}; + __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 19); + return __ret; +} +__ai __attribute__((target("neon"))) float64x1_t vcvt_f64_s64(int64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vcvt_f64_v((int8x8_t)__p0, 3); return __ret; } -#define vext_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x1_t __ret; \ - poly64x1_t __s0 = __p0; \ - poly64x1_t __s1 = __p1; \ - __ret = (poly64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \ - __ret; \ -}) #ifdef __LITTLE_ENDIAN__ -#define vextq_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x2_t __ret; \ - poly64x2_t __s0 = __p0; \ - poly64x2_t __s1 = __p1; \ - __ret = (poly64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float64x2_t vcvt_f64_f32(float32x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__p0, 42); + return __ret; +} #else -#define vextq_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x2_t __ret; \ - poly64x2_t __s0 = __p0; \ - poly64x2_t __s1 = __p1; \ - poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __ret = (poly64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float64x2_t vcvt_f64_f32(float32x2_t __p0) { + float64x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) float64x2_t __noswap_vcvt_f64_f32(float32x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vcvt_f64_f32((int8x8_t)__p0, 42); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vextq_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x2_t __ret; \ - float64x2_t __s0 = __p0; \ - float64x2_t __s1 = __p1; \ - __ret = (float64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 42); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) { + float16x8_t __ret; + __ret = vcombine_f16(__p0, vcvt_f16_f32(__p1)); + return __ret; +} #else -#define vextq_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x2_t __ret; \ - float64x2_t __s0 = __p0; \ - float64x2_t __s1 = __p1; \ - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __ret = (float64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 42); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float16x8_t vcvt_high_f16_f32(float16x4_t __p0, float32x4_t __p1) { + float16x8_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vcombine_f16(__rev0, __noswap_vcvt_f16_f32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif -#define vext_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x1_t __ret; \ - float64x1_t __s0 = __p0; \ - float64x1_t __s1 = __p1; \ - __ret = (float64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \ - __ret; \ -}) #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); +__ai __attribute__((target("neon"))) float32x4_t vcvt_high_f32_f16(float16x8_t __p0) { + float32x4_t __ret; + __ret = vcvt_f32_f16(vget_high_f16(__p0)); return __ret; } #else -__ai float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) float32x4_t vcvt_high_f32_f16(float16x8_t __p0) { + float32x4_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcvt_f32_f16(__noswap_vget_high_f16(__rev0)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai float64x2_t __noswap_vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) { + float32x4_t __ret; + __ret = vcombine_f32(__p0, vcvt_f32_f64(__p1)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vcvt_high_f32_f64(float32x2_t __p0, float64x2_t __p1) { + float32x4_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __noswap_vcombine_f32(__rev0, __noswap_vcvt_f32_f64(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif -__ai float64x1_t vfma_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vcvt_high_f64_f32(float32x4_t __p0) { + float64x2_t __ret; + __ret = vcvt_f64_f32(vget_high_f32(__p0)); return __ret; } -#define vfmad_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ - float64_t __ret; \ - float64_t __s0 = __p0; \ - float64_t __s1 = __p1; \ - float64x1_t __s2 = __p2; \ - __ret = (float64_t) __builtin_neon_vfmad_lane_f64(__s0, __s1, (float64x1_t)__s2, __p3); \ - __ret; \ -}) -#ifdef __LITTLE_ENDIAN__ -#define vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32_t __ret; \ - float32_t __s0 = __p0; \ - float32_t __s1 = __p1; \ - float32x2_t __s2 = __p2; \ - __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (float32x2_t)__s2, __p3); \ - __ret; \ -}) #else -#define vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ +__ai __attribute__((target("neon"))) float64x2_t vcvt_high_f64_f32(float32x4_t __p0) { + float64x2_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap_vcvt_f64_f32(__noswap_vget_high_f32(__rev0)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#define vcvts_n_f32_u32(__p0, __p1) __extension__ ({ \ float32_t __ret; \ - float32_t __s0 = __p0; \ - float32_t __s1 = __p1; \ - float32x2_t __s2 = __p2; \ - float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ - __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (float32x2_t)__rev2, __p3); \ + uint32_t __s0 = __p0; \ + __ret = (float32_t) __builtin_neon_vcvts_n_f32_u32(__s0, __p1); \ __ret; \ }) -#define __noswap_vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ +#define vcvts_n_f32_s32(__p0, __p1) __extension__ ({ \ float32_t __ret; \ - float32_t __s0 = __p0; \ - float32_t __s1 = __p1; \ - float32x2_t __s2 = __p2; \ - __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (float32x2_t)__s2, __p3); \ + int32_t __s0 = __p0; \ + __ret = (float32_t) __builtin_neon_vcvts_n_f32_s32(__s0, __p1); \ __ret; \ }) -#endif - #ifdef __LITTLE_ENDIAN__ -#define vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ +#define vcvtq_n_f64_u64(__p0, __p1) __extension__ ({ \ float64x2_t __ret; \ - float64x2_t __s0 = __p0; \ - float64x2_t __s1 = __p1; \ - float64x1_t __s2 = __p2; \ - __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 42); \ + uint64x2_t __s0 = __p0; \ + __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__s0, __p1, 51); \ __ret; \ }) #else -#define vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ +#define vcvtq_n_f64_u64(__p0, __p1) __extension__ ({ \ float64x2_t __ret; \ - float64x2_t __s0 = __p0; \ - float64x2_t __s1 = __p1; \ - float64x1_t __s2 = __p2; \ - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__s2, __p3, 42); \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__rev0, __p1, 51); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) -#define __noswap_vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ - float64x2_t __ret; \ - float64x2_t __s0 = __p0; \ - float64x2_t __s1 = __p1; \ - float64x1_t __s2 = __p2; \ - __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 42); \ - __ret; \ -}) #endif #ifdef __LITTLE_ENDIAN__ -#define vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32x4_t __ret; \ - float32x4_t __s0 = __p0; \ - float32x4_t __s1 = __p1; \ - float32x2_t __s2 = __p2; \ - __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 41); \ +#define vcvtq_n_f64_s64(__p0, __p1) __extension__ ({ \ + float64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__s0, __p1, 35); \ __ret; \ }) #else -#define vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32x4_t __ret; \ - float32x4_t __s0 = __p0; \ - float32x4_t __s1 = __p1; \ - float32x2_t __s2 = __p2; \ - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ - __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, __p3, 41); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32x4_t __ret; \ - float32x4_t __s0 = __p0; \ - float32x4_t __s1 = __p1; \ - float32x2_t __s2 = __p2; \ - __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 41); \ +#define vcvtq_n_f64_s64(__p0, __p1) __extension__ ({ \ + float64x2_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (float64x2_t) __builtin_neon_vcvtq_n_f64_v((int8x16_t)__rev0, __p1, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif -#define vfma_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ +#define vcvt_n_f64_u64(__p0, __p1) __extension__ ({ \ float64x1_t __ret; \ - float64x1_t __s0 = __p0; \ - float64x1_t __s1 = __p1; \ - float64x1_t __s2 = __p2; \ - __ret = (float64x1_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 10); \ + uint64x1_t __s0 = __p0; \ + __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 19); \ __ret; \ }) -#ifdef __LITTLE_ENDIAN__ -#define vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32x2_t __ret; \ - float32x2_t __s0 = __p0; \ - float32x2_t __s1 = __p1; \ - float32x2_t __s2 = __p2; \ - __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 9); \ +#define vcvt_n_f64_s64(__p0, __p1) __extension__ ({ \ + float64x1_t __ret; \ + int64x1_t __s0 = __p0; \ + __ret = (float64x1_t) __builtin_neon_vcvt_n_f64_v((int8x8_t)__s0, __p1, 3); \ __ret; \ }) -#else -#define vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32x2_t __ret; \ - float32x2_t __s0 = __p0; \ - float32x2_t __s1 = __p1; \ - float32x2_t __s2 = __p2; \ - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ - __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, __p3, 9); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ +#define vcvtd_n_f64_u64(__p0, __p1) __extension__ ({ \ + float64_t __ret; \ + uint64_t __s0 = __p0; \ + __ret = (float64_t) __builtin_neon_vcvtd_n_f64_u64(__s0, __p1); \ __ret; \ }) -#define __noswap_vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32x2_t __ret; \ - float32x2_t __s0 = __p0; \ - float32x2_t __s1 = __p1; \ - float32x2_t __s2 = __p2; \ - __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 9); \ +#define vcvtd_n_f64_s64(__p0, __p1) __extension__ ({ \ + float64_t __ret; \ + int64_t __s0 = __p0; \ + __ret = (float64_t) __builtin_neon_vcvtd_n_f64_s64(__s0, __p1); \ __ret; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ - float64_t __ret; \ - float64_t __s0 = __p0; \ - float64_t __s1 = __p1; \ - float64x2_t __s2 = __p2; \ - __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (float64x2_t)__s2, __p3); \ +#define vcvts_n_s32_f32(__p0, __p1) __extension__ ({ \ + int32_t __ret; \ + float32_t __s0 = __p0; \ + __ret = (int32_t) __builtin_neon_vcvts_n_s32_f32(__s0, __p1); \ __ret; \ }) -#else -#define vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ - float64_t __ret; \ - float64_t __s0 = __p0; \ - float64_t __s1 = __p1; \ - float64x2_t __s2 = __p2; \ - float64x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ - __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (float64x2_t)__rev2, __p3); \ +#ifdef __LITTLE_ENDIAN__ +#define vcvtq_n_s64_f64(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + float64x2_t __s0 = __p0; \ + __ret = (int64x2_t) __builtin_neon_vcvtq_n_s64_v((int8x16_t)__s0, __p1, 35); \ __ret; \ }) -#define __noswap_vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ - float64_t __ret; \ - float64_t __s0 = __p0; \ - float64_t __s1 = __p1; \ - float64x2_t __s2 = __p2; \ - __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (float64x2_t)__s2, __p3); \ +#else +#define vcvtq_n_s64_f64(__p0, __p1) __extension__ ({ \ + int64x2_t __ret; \ + float64x2_t __s0 = __p0; \ + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int64x2_t) __builtin_neon_vcvtq_n_s64_v((int8x16_t)__rev0, __p1, 35); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif -#ifdef __LITTLE_ENDIAN__ -#define vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32_t __ret; \ - float32_t __s0 = __p0; \ - float32_t __s1 = __p1; \ - float32x4_t __s2 = __p2; \ - __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (float32x4_t)__s2, __p3); \ +#define vcvt_n_s64_f64(__p0, __p1) __extension__ ({ \ + int64x1_t __ret; \ + float64x1_t __s0 = __p0; \ + __ret = (int64x1_t) __builtin_neon_vcvt_n_s64_v((int8x8_t)__s0, __p1, 3); \ __ret; \ }) -#else -#define vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32_t __ret; \ - float32_t __s0 = __p0; \ - float32_t __s1 = __p1; \ - float32x4_t __s2 = __p2; \ - float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (float32x4_t)__rev2, __p3); \ +#define vcvtd_n_s64_f64(__p0, __p1) __extension__ ({ \ + int64_t __ret; \ + float64_t __s0 = __p0; \ + __ret = (int64_t) __builtin_neon_vcvtd_n_s64_f64(__s0, __p1); \ __ret; \ }) -#define __noswap_vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32_t __ret; \ +#define vcvts_n_u32_f32(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ float32_t __s0 = __p0; \ - float32_t __s1 = __p1; \ - float32x4_t __s2 = __p2; \ - __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (float32x4_t)__s2, __p3); \ + __ret = (uint32_t) __builtin_neon_vcvts_n_u32_f32(__s0, __p1); \ __ret; \ }) -#endif - #ifdef __LITTLE_ENDIAN__ -#define vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ - float64x2_t __ret; \ +#define vcvtq_n_u64_f64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ float64x2_t __s0 = __p0; \ - float64x2_t __s1 = __p1; \ - float64x2_t __s2 = __p2; \ - __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 42); \ + __ret = (uint64x2_t) __builtin_neon_vcvtq_n_u64_v((int8x16_t)__s0, __p1, 51); \ __ret; \ }) #else -#define vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ - float64x2_t __ret; \ +#define vcvtq_n_u64_f64(__p0, __p1) __extension__ ({ \ + uint64x2_t __ret; \ float64x2_t __s0 = __p0; \ - float64x2_t __s1 = __p1; \ - float64x2_t __s2 = __p2; \ float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - float64x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ - __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 42); \ + __ret = (uint64x2_t) __builtin_neon_vcvtq_n_u64_v((int8x16_t)__rev0, __p1, 51); \ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) -#define __noswap_vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ - float64x2_t __ret; \ - float64x2_t __s0 = __p0; \ - float64x2_t __s1 = __p1; \ - float64x2_t __s2 = __p2; \ - __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 42); \ - __ret; \ -}) #endif -#ifdef __LITTLE_ENDIAN__ -#define vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32x4_t __ret; \ - float32x4_t __s0 = __p0; \ - float32x4_t __s1 = __p1; \ - float32x4_t __s2 = __p2; \ - __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 41); \ - __ret; \ -}) -#else -#define vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32x4_t __ret; \ - float32x4_t __s0 = __p0; \ - float32x4_t __s1 = __p1; \ - float32x4_t __s2 = __p2; \ - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 41); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ +#define vcvt_n_u64_f64(__p0, __p1) __extension__ ({ \ + uint64x1_t __ret; \ + float64x1_t __s0 = __p0; \ + __ret = (uint64x1_t) __builtin_neon_vcvt_n_u64_v((int8x8_t)__s0, __p1, 19); \ __ret; \ }) -#define __noswap_vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32x4_t __ret; \ - float32x4_t __s0 = __p0; \ - float32x4_t __s1 = __p1; \ - float32x4_t __s2 = __p2; \ - __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 41); \ +#define vcvtd_n_u64_f64(__p0, __p1) __extension__ ({ \ + uint64_t __ret; \ + float64_t __s0 = __p0; \ + __ret = (uint64_t) __builtin_neon_vcvtd_n_u64_f64(__s0, __p1); \ __ret; \ }) -#endif - +__ai __attribute__((target("neon"))) int32_t vcvts_s32_f32(float32_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vcvts_s32_f32(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64_t vcvtd_s64_f64(float64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vcvtd_s64_f64(__p0); + return __ret; +} #ifdef __LITTLE_ENDIAN__ -#define vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ - float64x1_t __ret; \ - float64x1_t __s0 = __p0; \ - float64x1_t __s1 = __p1; \ - float64x2_t __s2 = __p2; \ - __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 10); \ - __ret; \ -}) -#else -#define vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ - float64x1_t __ret; \ - float64x1_t __s0 = __p0; \ - float64x1_t __s1 = __p1; \ - float64x2_t __s2 = __p2; \ - float64x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ - __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__rev2, __p3, 10); \ - __ret; \ -}) -#define __noswap_vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ - float64x1_t __ret; \ - float64x1_t __s0 = __p0; \ - float64x1_t __s1 = __p1; \ - float64x2_t __s2 = __p2; \ - __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 10); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32x2_t __ret; \ - float32x2_t __s0 = __p0; \ - float32x2_t __s1 = __p1; \ - float32x4_t __s2 = __p2; \ - __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 9); \ - __ret; \ -}) -#else -#define vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32x2_t __ret; \ - float32x2_t __s0 = __p0; \ - float32x2_t __s1 = __p1; \ - float32x4_t __s2 = __p2; \ - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x16_t)__rev2, __p3, 9); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ - float32x2_t __ret; \ - float32x2_t __s0 = __p0; \ - float32x2_t __s1 = __p1; \ - float32x4_t __s2 = __p2; \ - __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 9); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) { - float64x2_t __ret; - __ret = vfmaq_f64(__p0, __p1, (float64x2_t) {__p2, __p2}); +__ai __attribute__((target("neon"))) int64x2_t vcvtq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vcvtq_s64_v((int8x16_t)__p0, 35); return __ret; } #else -__ai float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) { - float64x2_t __ret; +__ai __attribute__((target("neon"))) int64x2_t vcvtq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __noswap_vfmaq_f64(__rev0, __rev1, (float64x2_t) {__p2, __p2}); + __ret = (int64x2_t) __builtin_neon_vcvtq_s64_v((int8x16_t)__rev0, 35); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif -__ai float64x1_t vfma_n_f64(float64x1_t __p0, float64x1_t __p1, float64_t __p2) { - float64x1_t __ret; - __ret = vfma_f64(__p0, __p1, (float64x1_t) {__p2}); +__ai __attribute__((target("neon"))) int64x1_t vcvt_s64_f64(float64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vcvt_s64_v((int8x8_t)__p0, 3); + return __ret; +} +__ai __attribute__((target("neon"))) uint32_t vcvts_u32_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcvts_u32_f32(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vcvtd_u64_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcvtd_u64_f64(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { - float64x2_t __ret; - __ret = vfmaq_f64(__p0, -__p1, __p2); +__ai __attribute__((target("neon"))) uint64x2_t vcvtq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcvtq_u64_v((int8x16_t)__p0, 51); return __ret; } #else -__ai float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { - float64x2_t __ret; +__ai __attribute__((target("neon"))) uint64x2_t vcvtq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = __noswap_vfmaq_f64(__rev0, -__rev1, __rev2); + __ret = (uint64x2_t) __builtin_neon_vcvtq_u64_v((int8x16_t)__rev0, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif -__ai float64x1_t vfms_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { - float64x1_t __ret; - __ret = vfma_f64(__p0, -__p1, __p2); +__ai __attribute__((target("neon"))) uint64x1_t vcvt_u64_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcvt_u64_v((int8x8_t)__p0, 19); + return __ret; +} +__ai __attribute__((target("neon"))) int32_t vcvtas_s32_f32(float32_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vcvtas_s32_f32(__p0); return __ret; } -#define vfmsd_lane_f64(__p0_405, __p1_405, __p2_405, __p3_405) __extension__ ({ \ - float64_t __ret_405; \ - float64_t __s0_405 = __p0_405; \ - float64_t __s1_405 = __p1_405; \ - float64x1_t __s2_405 = __p2_405; \ - __ret_405 = vfmad_lane_f64(__s0_405, -__s1_405, __s2_405, __p3_405); \ - __ret_405; \ -}) #ifdef __LITTLE_ENDIAN__ -#define vfmss_lane_f32(__p0_406, __p1_406, __p2_406, __p3_406) __extension__ ({ \ - float32_t __ret_406; \ - float32_t __s0_406 = __p0_406; \ - float32_t __s1_406 = __p1_406; \ - float32x2_t __s2_406 = __p2_406; \ - __ret_406 = vfmas_lane_f32(__s0_406, -__s1_406, __s2_406, __p3_406); \ - __ret_406; \ -}) +__ai __attribute__((target("neon"))) int64x2_t vcvtaq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vcvtaq_s64_v((int8x16_t)__p0, 35); + return __ret; +} #else -#define vfmss_lane_f32(__p0_407, __p1_407, __p2_407, __p3_407) __extension__ ({ \ - float32_t __ret_407; \ - float32_t __s0_407 = __p0_407; \ - float32_t __s1_407 = __p1_407; \ - float32x2_t __s2_407 = __p2_407; \ - float32x2_t __rev2_407; __rev2_407 = __builtin_shufflevector(__s2_407, __s2_407, 1, 0); \ - __ret_407 = __noswap_vfmas_lane_f32(__s0_407, -__s1_407, __rev2_407, __p3_407); \ - __ret_407; \ -}) +__ai __attribute__((target("neon"))) int64x2_t vcvtaq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int64x2_t) __builtin_neon_vcvtaq_s64_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif +__ai __attribute__((target("neon"))) int64x1_t vcvta_s64_f64(float64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vcvta_s64_v((int8x8_t)__p0, 3); + return __ret; +} +__ai __attribute__((target("neon"))) int64_t vcvtad_s64_f64(float64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vcvtad_s64_f64(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32_t vcvtas_u32_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcvtas_u32_f32(__p0); + return __ret; +} #ifdef __LITTLE_ENDIAN__ -#define vfmsq_lane_f64(__p0_408, __p1_408, __p2_408, __p3_408) __extension__ ({ \ - float64x2_t __ret_408; \ - float64x2_t __s0_408 = __p0_408; \ - float64x2_t __s1_408 = __p1_408; \ - float64x1_t __s2_408 = __p2_408; \ - __ret_408 = vfmaq_lane_f64(__s0_408, -__s1_408, __s2_408, __p3_408); \ - __ret_408; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcvtaq_u64_v((int8x16_t)__p0, 51); + return __ret; +} #else -#define vfmsq_lane_f64(__p0_409, __p1_409, __p2_409, __p3_409) __extension__ ({ \ - float64x2_t __ret_409; \ - float64x2_t __s0_409 = __p0_409; \ - float64x2_t __s1_409 = __p1_409; \ - float64x1_t __s2_409 = __p2_409; \ - float64x2_t __rev0_409; __rev0_409 = __builtin_shufflevector(__s0_409, __s0_409, 1, 0); \ - float64x2_t __rev1_409; __rev1_409 = __builtin_shufflevector(__s1_409, __s1_409, 1, 0); \ - __ret_409 = __noswap_vfmaq_lane_f64(__rev0_409, -__rev1_409, __s2_409, __p3_409); \ - __ret_409 = __builtin_shufflevector(__ret_409, __ret_409, 1, 0); \ - __ret_409; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vcvtaq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vcvtaq_u64_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif +__ai __attribute__((target("neon"))) uint64x1_t vcvta_u64_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcvta_u64_v((int8x8_t)__p0, 19); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vcvtad_u64_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcvtad_u64_f64(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32_t vcvtms_s32_f32(float32_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vcvtms_s32_f32(__p0); + return __ret; +} #ifdef __LITTLE_ENDIAN__ -#define vfmsq_lane_f32(__p0_410, __p1_410, __p2_410, __p3_410) __extension__ ({ \ - float32x4_t __ret_410; \ - float32x4_t __s0_410 = __p0_410; \ - float32x4_t __s1_410 = __p1_410; \ - float32x2_t __s2_410 = __p2_410; \ - __ret_410 = vfmaq_lane_f32(__s0_410, -__s1_410, __s2_410, __p3_410); \ - __ret_410; \ -}) +__ai __attribute__((target("neon"))) int64x2_t vcvtmq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vcvtmq_s64_v((int8x16_t)__p0, 35); + return __ret; +} #else -#define vfmsq_lane_f32(__p0_411, __p1_411, __p2_411, __p3_411) __extension__ ({ \ - float32x4_t __ret_411; \ - float32x4_t __s0_411 = __p0_411; \ - float32x4_t __s1_411 = __p1_411; \ - float32x2_t __s2_411 = __p2_411; \ - float32x4_t __rev0_411; __rev0_411 = __builtin_shufflevector(__s0_411, __s0_411, 3, 2, 1, 0); \ - float32x4_t __rev1_411; __rev1_411 = __builtin_shufflevector(__s1_411, __s1_411, 3, 2, 1, 0); \ - float32x2_t __rev2_411; __rev2_411 = __builtin_shufflevector(__s2_411, __s2_411, 1, 0); \ - __ret_411 = __noswap_vfmaq_lane_f32(__rev0_411, -__rev1_411, __rev2_411, __p3_411); \ - __ret_411 = __builtin_shufflevector(__ret_411, __ret_411, 3, 2, 1, 0); \ - __ret_411; \ -}) +__ai __attribute__((target("neon"))) int64x2_t vcvtmq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int64x2_t) __builtin_neon_vcvtmq_s64_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif -#define vfms_lane_f64(__p0_412, __p1_412, __p2_412, __p3_412) __extension__ ({ \ - float64x1_t __ret_412; \ - float64x1_t __s0_412 = __p0_412; \ - float64x1_t __s1_412 = __p1_412; \ - float64x1_t __s2_412 = __p2_412; \ - __ret_412 = vfma_lane_f64(__s0_412, -__s1_412, __s2_412, __p3_412); \ - __ret_412; \ -}) +__ai __attribute__((target("neon"))) int64x1_t vcvtm_s64_f64(float64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vcvtm_s64_v((int8x8_t)__p0, 3); + return __ret; +} +__ai __attribute__((target("neon"))) int64_t vcvtmd_s64_f64(float64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vcvtmd_s64_f64(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32_t vcvtms_u32_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcvtms_u32_f32(__p0); + return __ret; +} #ifdef __LITTLE_ENDIAN__ -#define vfms_lane_f32(__p0_413, __p1_413, __p2_413, __p3_413) __extension__ ({ \ - float32x2_t __ret_413; \ - float32x2_t __s0_413 = __p0_413; \ - float32x2_t __s1_413 = __p1_413; \ - float32x2_t __s2_413 = __p2_413; \ - __ret_413 = vfma_lane_f32(__s0_413, -__s1_413, __s2_413, __p3_413); \ - __ret_413; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcvtmq_u64_v((int8x16_t)__p0, 51); + return __ret; +} #else -#define vfms_lane_f32(__p0_414, __p1_414, __p2_414, __p3_414) __extension__ ({ \ - float32x2_t __ret_414; \ - float32x2_t __s0_414 = __p0_414; \ - float32x2_t __s1_414 = __p1_414; \ - float32x2_t __s2_414 = __p2_414; \ - float32x2_t __rev0_414; __rev0_414 = __builtin_shufflevector(__s0_414, __s0_414, 1, 0); \ - float32x2_t __rev1_414; __rev1_414 = __builtin_shufflevector(__s1_414, __s1_414, 1, 0); \ - float32x2_t __rev2_414; __rev2_414 = __builtin_shufflevector(__s2_414, __s2_414, 1, 0); \ - __ret_414 = __noswap_vfma_lane_f32(__rev0_414, -__rev1_414, __rev2_414, __p3_414); \ - __ret_414 = __builtin_shufflevector(__ret_414, __ret_414, 1, 0); \ - __ret_414; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vcvtmq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vcvtmq_u64_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif +__ai __attribute__((target("neon"))) uint64x1_t vcvtm_u64_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcvtm_u64_v((int8x8_t)__p0, 19); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vcvtmd_u64_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcvtmd_u64_f64(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32_t vcvtns_s32_f32(float32_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vcvtns_s32_f32(__p0); + return __ret; +} #ifdef __LITTLE_ENDIAN__ -#define vfmsd_laneq_f64(__p0_415, __p1_415, __p2_415, __p3_415) __extension__ ({ \ - float64_t __ret_415; \ - float64_t __s0_415 = __p0_415; \ - float64_t __s1_415 = __p1_415; \ - float64x2_t __s2_415 = __p2_415; \ - __ret_415 = vfmad_laneq_f64(__s0_415, -__s1_415, __s2_415, __p3_415); \ - __ret_415; \ -}) +__ai __attribute__((target("neon"))) int64x2_t vcvtnq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vcvtnq_s64_v((int8x16_t)__p0, 35); + return __ret; +} #else -#define vfmsd_laneq_f64(__p0_416, __p1_416, __p2_416, __p3_416) __extension__ ({ \ - float64_t __ret_416; \ - float64_t __s0_416 = __p0_416; \ - float64_t __s1_416 = __p1_416; \ - float64x2_t __s2_416 = __p2_416; \ - float64x2_t __rev2_416; __rev2_416 = __builtin_shufflevector(__s2_416, __s2_416, 1, 0); \ - __ret_416 = __noswap_vfmad_laneq_f64(__s0_416, -__s1_416, __rev2_416, __p3_416); \ - __ret_416; \ -}) +__ai __attribute__((target("neon"))) int64x2_t vcvtnq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int64x2_t) __builtin_neon_vcvtnq_s64_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif +__ai __attribute__((target("neon"))) int64x1_t vcvtn_s64_f64(float64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vcvtn_s64_v((int8x8_t)__p0, 3); + return __ret; +} +__ai __attribute__((target("neon"))) int64_t vcvtnd_s64_f64(float64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vcvtnd_s64_f64(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32_t vcvtns_u32_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcvtns_u32_f32(__p0); + return __ret; +} #ifdef __LITTLE_ENDIAN__ -#define vfmss_laneq_f32(__p0_417, __p1_417, __p2_417, __p3_417) __extension__ ({ \ - float32_t __ret_417; \ - float32_t __s0_417 = __p0_417; \ - float32_t __s1_417 = __p1_417; \ - float32x4_t __s2_417 = __p2_417; \ - __ret_417 = vfmas_laneq_f32(__s0_417, -__s1_417, __s2_417, __p3_417); \ - __ret_417; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcvtnq_u64_v((int8x16_t)__p0, 51); + return __ret; +} #else -#define vfmss_laneq_f32(__p0_418, __p1_418, __p2_418, __p3_418) __extension__ ({ \ - float32_t __ret_418; \ - float32_t __s0_418 = __p0_418; \ - float32_t __s1_418 = __p1_418; \ - float32x4_t __s2_418 = __p2_418; \ - float32x4_t __rev2_418; __rev2_418 = __builtin_shufflevector(__s2_418, __s2_418, 3, 2, 1, 0); \ - __ret_418 = __noswap_vfmas_laneq_f32(__s0_418, -__s1_418, __rev2_418, __p3_418); \ - __ret_418; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vcvtnq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vcvtnq_u64_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif +__ai __attribute__((target("neon"))) uint64x1_t vcvtn_u64_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcvtn_u64_v((int8x8_t)__p0, 19); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vcvtnd_u64_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcvtnd_u64_f64(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32_t vcvtps_s32_f32(float32_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vcvtps_s32_f32(__p0); + return __ret; +} #ifdef __LITTLE_ENDIAN__ -#define vfmsq_laneq_f64(__p0_419, __p1_419, __p2_419, __p3_419) __extension__ ({ \ - float64x2_t __ret_419; \ - float64x2_t __s0_419 = __p0_419; \ - float64x2_t __s1_419 = __p1_419; \ - float64x2_t __s2_419 = __p2_419; \ - __ret_419 = vfmaq_laneq_f64(__s0_419, -__s1_419, __s2_419, __p3_419); \ - __ret_419; \ -}) +__ai __attribute__((target("neon"))) int64x2_t vcvtpq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vcvtpq_s64_v((int8x16_t)__p0, 35); + return __ret; +} #else -#define vfmsq_laneq_f64(__p0_420, __p1_420, __p2_420, __p3_420) __extension__ ({ \ - float64x2_t __ret_420; \ - float64x2_t __s0_420 = __p0_420; \ - float64x2_t __s1_420 = __p1_420; \ - float64x2_t __s2_420 = __p2_420; \ - float64x2_t __rev0_420; __rev0_420 = __builtin_shufflevector(__s0_420, __s0_420, 1, 0); \ - float64x2_t __rev1_420; __rev1_420 = __builtin_shufflevector(__s1_420, __s1_420, 1, 0); \ - float64x2_t __rev2_420; __rev2_420 = __builtin_shufflevector(__s2_420, __s2_420, 1, 0); \ - __ret_420 = __noswap_vfmaq_laneq_f64(__rev0_420, -__rev1_420, __rev2_420, __p3_420); \ - __ret_420 = __builtin_shufflevector(__ret_420, __ret_420, 1, 0); \ - __ret_420; \ -}) +__ai __attribute__((target("neon"))) int64x2_t vcvtpq_s64_f64(float64x2_t __p0) { + int64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int64x2_t) __builtin_neon_vcvtpq_s64_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif +__ai __attribute__((target("neon"))) int64x1_t vcvtp_s64_f64(float64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vcvtp_s64_v((int8x8_t)__p0, 3); + return __ret; +} +__ai __attribute__((target("neon"))) int64_t vcvtpd_s64_f64(float64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vcvtpd_s64_f64(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32_t vcvtps_u32_f32(float32_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vcvtps_u32_f32(__p0); + return __ret; +} #ifdef __LITTLE_ENDIAN__ -#define vfmsq_laneq_f32(__p0_421, __p1_421, __p2_421, __p3_421) __extension__ ({ \ - float32x4_t __ret_421; \ - float32x4_t __s0_421 = __p0_421; \ - float32x4_t __s1_421 = __p1_421; \ - float32x4_t __s2_421 = __p2_421; \ - __ret_421 = vfmaq_laneq_f32(__s0_421, -__s1_421, __s2_421, __p3_421); \ - __ret_421; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vcvtpq_u64_v((int8x16_t)__p0, 51); + return __ret; +} #else -#define vfmsq_laneq_f32(__p0_422, __p1_422, __p2_422, __p3_422) __extension__ ({ \ - float32x4_t __ret_422; \ - float32x4_t __s0_422 = __p0_422; \ - float32x4_t __s1_422 = __p1_422; \ - float32x4_t __s2_422 = __p2_422; \ - float32x4_t __rev0_422; __rev0_422 = __builtin_shufflevector(__s0_422, __s0_422, 3, 2, 1, 0); \ - float32x4_t __rev1_422; __rev1_422 = __builtin_shufflevector(__s1_422, __s1_422, 3, 2, 1, 0); \ - float32x4_t __rev2_422; __rev2_422 = __builtin_shufflevector(__s2_422, __s2_422, 3, 2, 1, 0); \ - __ret_422 = __noswap_vfmaq_laneq_f32(__rev0_422, -__rev1_422, __rev2_422, __p3_422); \ - __ret_422 = __builtin_shufflevector(__ret_422, __ret_422, 3, 2, 1, 0); \ - __ret_422; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vcvtpq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vcvtpq_u64_v((int8x16_t)__rev0, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif +__ai __attribute__((target("neon"))) uint64x1_t vcvtp_u64_f64(float64x1_t __p0) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vcvtp_u64_v((int8x8_t)__p0, 19); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vcvtpd_u64_f64(float64_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vcvtpd_u64_f64(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32_t vcvtxd_f32_f64(float64_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vcvtxd_f32_f64(__p0); + return __ret; +} #ifdef __LITTLE_ENDIAN__ -#define vfms_laneq_f64(__p0_423, __p1_423, __p2_423, __p3_423) __extension__ ({ \ - float64x1_t __ret_423; \ - float64x1_t __s0_423 = __p0_423; \ - float64x1_t __s1_423 = __p1_423; \ - float64x2_t __s2_423 = __p2_423; \ - __ret_423 = vfma_laneq_f64(__s0_423, -__s1_423, __s2_423, __p3_423); \ - __ret_423; \ -}) +__ai __attribute__((target("neon"))) float32x2_t vcvtx_f32_f64(float64x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__p0, 42); + return __ret; +} #else -#define vfms_laneq_f64(__p0_424, __p1_424, __p2_424, __p3_424) __extension__ ({ \ - float64x1_t __ret_424; \ - float64x1_t __s0_424 = __p0_424; \ - float64x1_t __s1_424 = __p1_424; \ - float64x2_t __s2_424 = __p2_424; \ - float64x2_t __rev2_424; __rev2_424 = __builtin_shufflevector(__s2_424, __s2_424, 1, 0); \ - __ret_424 = __noswap_vfma_laneq_f64(__s0_424, -__s1_424, __rev2_424, __p3_424); \ - __ret_424; \ -}) +__ai __attribute__((target("neon"))) float32x2_t vcvtx_f32_f64(float64x2_t __p0) { + float32x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x2_t __noswap_vcvtx_f32_f64(float64x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vcvtx_f32_v((int8x16_t)__p0, 42); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vfms_laneq_f32(__p0_425, __p1_425, __p2_425, __p3_425) __extension__ ({ \ - float32x2_t __ret_425; \ - float32x2_t __s0_425 = __p0_425; \ - float32x2_t __s1_425 = __p1_425; \ - float32x4_t __s2_425 = __p2_425; \ - __ret_425 = vfma_laneq_f32(__s0_425, -__s1_425, __s2_425, __p3_425); \ - __ret_425; \ -}) +__ai __attribute__((target("neon"))) float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) { + float32x4_t __ret; + __ret = vcombine_f32(__p0, vcvtx_f32_f64(__p1)); + return __ret; +} #else -#define vfms_laneq_f32(__p0_426, __p1_426, __p2_426, __p3_426) __extension__ ({ \ - float32x2_t __ret_426; \ - float32x2_t __s0_426 = __p0_426; \ - float32x2_t __s1_426 = __p1_426; \ - float32x4_t __s2_426 = __p2_426; \ - float32x2_t __rev0_426; __rev0_426 = __builtin_shufflevector(__s0_426, __s0_426, 1, 0); \ - float32x2_t __rev1_426; __rev1_426 = __builtin_shufflevector(__s1_426, __s1_426, 1, 0); \ - float32x4_t __rev2_426; __rev2_426 = __builtin_shufflevector(__s2_426, __s2_426, 3, 2, 1, 0); \ - __ret_426 = __noswap_vfma_laneq_f32(__rev0_426, -__rev1_426, __rev2_426, __p3_426); \ - __ret_426 = __builtin_shufflevector(__ret_426, __ret_426, 1, 0); \ - __ret_426; \ -}) +__ai __attribute__((target("neon"))) float32x4_t vcvtx_high_f32_f64(float32x2_t __p0, float64x2_t __p1) { + float32x4_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __noswap_vcombine_f32(__rev0, __noswap_vcvtx_f32_f64(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) { +__ai __attribute__((target("neon"))) float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; - __ret = vfmaq_f64(__p0, -__p1, (float64x2_t) {__p2, __p2}); + __ret = __p0 / __p1; return __ret; } #else -__ai float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) { +__ai __attribute__((target("neon"))) float64x2_t vdivq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __noswap_vfmaq_f64(__rev0, -__rev1, (float64x2_t) {__p2, __p2}); + __ret = __rev0 / __rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { +__ai __attribute__((target("neon"))) float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; - __ret = vfmaq_f32(__p0, -__p1, (float32x4_t) {__p2, __p2, __p2, __p2}); + __ret = __p0 / __p1; return __ret; } #else -__ai float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { +__ai __attribute__((target("neon"))) float32x4_t vdivq_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __noswap_vfmaq_f32(__rev0, -__rev1, (float32x4_t) {__p2, __p2, __p2, __p2}); + __ret = __rev0 / __rev1; __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif -__ai float64x1_t vfms_n_f64(float64x1_t __p0, float64x1_t __p1, float64_t __p2) { +__ai __attribute__((target("neon"))) float64x1_t vdiv_f64(float64x1_t __p0, float64x1_t __p1) { float64x1_t __ret; - __ret = vfma_f64(__p0, -__p1, (float64x1_t) {__p2}); + __ret = __p0 / __p1; return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { +__ai __attribute__((target("neon"))) float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; - __ret = vfma_f32(__p0, -__p1, (float32x2_t) {__p2, __p2}); + __ret = __p0 / __p1; return __ret; } #else -__ai float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { +__ai __attribute__((target("neon"))) float32x2_t vdiv_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __noswap_vfma_f32(__rev0, -__rev1, (float32x2_t) {__p2, __p2}); + __ret = __rev0 / __rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly64x1_t vget_high_p64(poly64x2_t __p0) { - poly64x1_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 1); - return __ret; -} -#else -__ai poly64x1_t vget_high_p64(poly64x2_t __p0) { - poly64x1_t __ret; - poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 1); - return __ret; -} -__ai poly64x1_t __noswap_vget_high_p64(poly64x2_t __p0) { - poly64x1_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 1); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vget_high_f64(float64x2_t __p0) { - float64x1_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 1); - return __ret; -} -#else -__ai float64x1_t vget_high_f64(float64x2_t __p0) { - float64x1_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 1); - return __ret; -} -#endif - -#define vget_lane_p64(__p0, __p1) __extension__ ({ \ - poly64_t __ret; \ - poly64x1_t __s0 = __p0; \ - __ret = (poly64_t) __builtin_neon_vget_lane_i64((poly64x1_t)__s0, __p1); \ - __ret; \ -}) -#ifdef __LITTLE_ENDIAN__ -#define vgetq_lane_p64(__p0, __p1) __extension__ ({ \ - poly64_t __ret; \ - poly64x2_t __s0 = __p0; \ - __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((poly64x2_t)__s0, __p1); \ +#define vdupb_lane_p8(__p0, __p1) __extension__ ({ \ + poly8_t __ret; \ + poly8x8_t __s0 = __p0; \ + __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((poly8x8_t)__s0, __p1); \ __ret; \ }) #else -#define vgetq_lane_p64(__p0, __p1) __extension__ ({ \ - poly64_t __ret; \ - poly64x2_t __s0 = __p0; \ - poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((poly64x2_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vgetq_lane_p64(__p0, __p1) __extension__ ({ \ - poly64_t __ret; \ - poly64x2_t __s0 = __p0; \ - __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((poly64x2_t)__s0, __p1); \ +#define vdupb_lane_p8(__p0, __p1) __extension__ ({ \ + poly8_t __ret; \ + poly8x8_t __s0 = __p0; \ + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8_t) __builtin_neon_vdupb_lane_i8((poly8x8_t)__rev0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vgetq_lane_f64(__p0, __p1) __extension__ ({ \ - float64_t __ret; \ - float64x2_t __s0 = __p0; \ - __ret = (float64_t) __builtin_neon_vgetq_lane_f64((float64x2_t)__s0, __p1); \ +#define vduph_lane_p16(__p0, __p1) __extension__ ({ \ + poly16_t __ret; \ + poly16x4_t __s0 = __p0; \ + __ret = (poly16_t) __builtin_neon_vduph_lane_i16((poly16x4_t)__s0, __p1); \ __ret; \ }) #else -#define vgetq_lane_f64(__p0, __p1) __extension__ ({ \ - float64_t __ret; \ - float64x2_t __s0 = __p0; \ - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - __ret = (float64_t) __builtin_neon_vgetq_lane_f64((float64x2_t)__rev0, __p1); \ - __ret; \ -}) -#define __noswap_vgetq_lane_f64(__p0, __p1) __extension__ ({ \ - float64_t __ret; \ - float64x2_t __s0 = __p0; \ - __ret = (float64_t) __builtin_neon_vgetq_lane_f64((float64x2_t)__s0, __p1); \ +#define vduph_lane_p16(__p0, __p1) __extension__ ({ \ + poly16_t __ret; \ + poly16x4_t __s0 = __p0; \ + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (poly16_t) __builtin_neon_vduph_lane_i16((poly16x4_t)__rev0, __p1); \ __ret; \ }) #endif -#define vget_lane_f64(__p0, __p1) __extension__ ({ \ - float64_t __ret; \ - float64x1_t __s0 = __p0; \ - __ret = (float64_t) __builtin_neon_vget_lane_f64((float64x1_t)__s0, __p1); \ +#ifdef __LITTLE_ENDIAN__ +#define vdupb_lane_u8(__p0, __p1) __extension__ ({ \ + uint8_t __ret; \ + uint8x8_t __s0 = __p0; \ + __ret = (uint8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \ __ret; \ }) -#ifdef __LITTLE_ENDIAN__ -__ai poly64x1_t vget_low_p64(poly64x2_t __p0) { - poly64x1_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 0); - return __ret; -} #else -__ai poly64x1_t vget_low_p64(poly64x2_t __p0) { - poly64x1_t __ret; - poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 0); - return __ret; -} +#define vdupb_lane_u8(__p0, __p1) __extension__ ({ \ + uint8_t __ret; \ + uint8x8_t __s0 = __p0; \ + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x1_t vget_low_f64(float64x2_t __p0) { - float64x1_t __ret; - __ret = __builtin_shufflevector(__p0, __p0, 0); - return __ret; -} +#define vdups_lane_u32(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ + uint32x2_t __s0 = __p0; \ + __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__s0, __p1); \ + __ret; \ +}) #else -__ai float64x1_t vget_low_f64(float64x2_t __p0) { - float64x1_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev0, 0); - return __ret; -} +#define vdups_lane_u32(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ + uint32x2_t __s0 = __p0; \ + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__rev0, __p1); \ + __ret; \ +}) #endif -#define vld1_p64(__p0) __extension__ ({ \ - poly64x1_t __ret; \ - __ret = (poly64x1_t) __builtin_neon_vld1_v(__p0, 6); \ +#define vdupd_lane_u64(__p0, __p1) __extension__ ({ \ + uint64_t __ret; \ + uint64x1_t __s0 = __p0; \ + __ret = (uint64_t) __builtin_neon_vdupd_lane_i64((int64x1_t)__s0, __p1); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ -#define vld1q_p64(__p0) __extension__ ({ \ - poly64x2_t __ret; \ - __ret = (poly64x2_t) __builtin_neon_vld1q_v(__p0, 38); \ +#define vduph_lane_u16(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + uint16x4_t __s0 = __p0; \ + __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__s0, __p1); \ __ret; \ }) #else -#define vld1q_p64(__p0) __extension__ ({ \ - poly64x2_t __ret; \ - __ret = (poly64x2_t) __builtin_neon_vld1q_v(__p0, 38); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ +#define vduph_lane_u16(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + uint16x4_t __s0 = __p0; \ + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__rev0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_f64(__p0) __extension__ ({ \ - float64x2_t __ret; \ - __ret = (float64x2_t) __builtin_neon_vld1q_v(__p0, 42); \ +#define vdupb_lane_s8(__p0, __p1) __extension__ ({ \ + int8_t __ret; \ + int8x8_t __s0 = __p0; \ + __ret = (int8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__s0, __p1); \ __ret; \ }) #else -#define vld1q_f64(__p0) __extension__ ({ \ - float64x2_t __ret; \ - __ret = (float64x2_t) __builtin_neon_vld1q_v(__p0, 42); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ +#define vdupb_lane_s8(__p0, __p1) __extension__ ({ \ + int8_t __ret; \ + int8x8_t __s0 = __p0; \ + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8_t) __builtin_neon_vdupb_lane_i8((int8x8_t)__rev0, __p1); \ __ret; \ }) #endif -#define vld1_f64(__p0) __extension__ ({ \ - float64x1_t __ret; \ - __ret = (float64x1_t) __builtin_neon_vld1_v(__p0, 10); \ - __ret; \ -}) -#define vld1_dup_p64(__p0) __extension__ ({ \ - poly64x1_t __ret; \ - __ret = (poly64x1_t) __builtin_neon_vld1_dup_v(__p0, 6); \ +#define vdupd_lane_f64(__p0, __p1) __extension__ ({ \ + float64_t __ret; \ + float64x1_t __s0 = __p0; \ + __ret = (float64_t) __builtin_neon_vdupd_lane_f64((float64x1_t)__s0, __p1); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ -#define vld1q_dup_p64(__p0) __extension__ ({ \ - poly64x2_t __ret; \ - __ret = (poly64x2_t) __builtin_neon_vld1q_dup_v(__p0, 38); \ +#define vdups_lane_f32(__p0, __p1) __extension__ ({ \ + float32_t __ret; \ + float32x2_t __s0 = __p0; \ + __ret = (float32_t) __builtin_neon_vdups_lane_f32((float32x2_t)__s0, __p1); \ __ret; \ }) #else -#define vld1q_dup_p64(__p0) __extension__ ({ \ - poly64x2_t __ret; \ - __ret = (poly64x2_t) __builtin_neon_vld1q_dup_v(__p0, 38); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ +#define vdups_lane_f32(__p0, __p1) __extension__ ({ \ + float32_t __ret; \ + float32x2_t __s0 = __p0; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (float32_t) __builtin_neon_vdups_lane_f32((float32x2_t)__rev0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_dup_f64(__p0) __extension__ ({ \ - float64x2_t __ret; \ - __ret = (float64x2_t) __builtin_neon_vld1q_dup_v(__p0, 42); \ +#define vdups_lane_s32(__p0, __p1) __extension__ ({ \ + int32_t __ret; \ + int32x2_t __s0 = __p0; \ + __ret = (int32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__s0, __p1); \ __ret; \ }) #else -#define vld1q_dup_f64(__p0) __extension__ ({ \ - float64x2_t __ret; \ - __ret = (float64x2_t) __builtin_neon_vld1q_dup_v(__p0, 42); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ +#define vdups_lane_s32(__p0, __p1) __extension__ ({ \ + int32_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int32_t) __builtin_neon_vdups_lane_i32((int32x2_t)__rev0, __p1); \ __ret; \ }) #endif -#define vld1_dup_f64(__p0) __extension__ ({ \ - float64x1_t __ret; \ - __ret = (float64x1_t) __builtin_neon_vld1_dup_v(__p0, 10); \ - __ret; \ -}) -#define vld1_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x1_t __ret; \ - poly64x1_t __s1 = __p1; \ - __ret = (poly64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \ +#define vdupd_lane_s64(__p0, __p1) __extension__ ({ \ + int64_t __ret; \ + int64x1_t __s0 = __p0; \ + __ret = (int64_t) __builtin_neon_vdupd_lane_i64((int64x1_t)__s0, __p1); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ -#define vld1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x2_t __ret; \ - poly64x2_t __s1 = __p1; \ - __ret = (poly64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 38); \ +#define vduph_lane_s16(__p0, __p1) __extension__ ({ \ + int16_t __ret; \ + int16x4_t __s0 = __p0; \ + __ret = (int16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__s0, __p1); \ __ret; \ }) #else -#define vld1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x2_t __ret; \ - poly64x2_t __s1 = __p1; \ - poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __ret = (poly64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 38); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ +#define vduph_lane_s16(__p0, __p1) __extension__ ({ \ + int16_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int16_t) __builtin_neon_vduph_lane_i16((int16x4_t)__rev0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x2_t __ret; \ - float64x2_t __s1 = __p1; \ - __ret = (float64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 42); \ +#define vduph_lane_f16(__p0, __p1) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vduph_lane_f16((float16x4_t)__s0, __p1); \ __ret; \ }) #else -#define vld1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x2_t __ret; \ - float64x2_t __s1 = __p1; \ - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __ret = (float64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 42); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ +#define vduph_lane_f16(__p0, __p1) __extension__ ({ \ + float16_t __ret; \ + float16x4_t __s0 = __p0; \ + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vduph_lane_f16((float16x4_t)__rev0, __p1); \ __ret; \ }) #endif -#define vld1_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x1_t __ret; \ - float64x1_t __s1 = __p1; \ - __ret = (float64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \ - __ret; \ -}) -#define vld1_p64_x2(__p0) __extension__ ({ \ - poly64x1x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 6); \ - __ret; \ +#define vdup_lane_p64(__p0_364, __p1_364) __extension__ ({ \ + poly64x1_t __ret_364; \ + poly64x1_t __s0_364 = __p0_364; \ + __ret_364 = splat_lane_p64(__s0_364, __p1_364); \ + __ret_364; \ }) #ifdef __LITTLE_ENDIAN__ -#define vld1q_p64_x2(__p0) __extension__ ({ \ - poly64x2x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 38); \ - __ret; \ +#define vdupq_lane_p64(__p0_365, __p1_365) __extension__ ({ \ + poly64x2_t __ret_365; \ + poly64x1_t __s0_365 = __p0_365; \ + __ret_365 = splatq_lane_p64(__s0_365, __p1_365); \ + __ret_365; \ }) #else -#define vld1q_p64_x2(__p0) __extension__ ({ \ - poly64x2x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 38); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret; \ +#define vdupq_lane_p64(__p0_366, __p1_366) __extension__ ({ \ + poly64x2_t __ret_366; \ + poly64x1_t __s0_366 = __p0_366; \ + __ret_366 = __noswap_splatq_lane_p64(__s0_366, __p1_366); \ + __ret_366 = __builtin_shufflevector(__ret_366, __ret_366, 1, 0); \ + __ret_366; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_f64_x2(__p0) __extension__ ({ \ - float64x2x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 42); \ - __ret; \ +#define vdupq_lane_f64(__p0_367, __p1_367) __extension__ ({ \ + float64x2_t __ret_367; \ + float64x1_t __s0_367 = __p0_367; \ + __ret_367 = splatq_lane_f64(__s0_367, __p1_367); \ + __ret_367; \ }) #else -#define vld1q_f64_x2(__p0) __extension__ ({ \ - float64x2x2_t __ret; \ - __builtin_neon_vld1q_x2_v(&__ret, __p0, 42); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret; \ +#define vdupq_lane_f64(__p0_368, __p1_368) __extension__ ({ \ + float64x2_t __ret_368; \ + float64x1_t __s0_368 = __p0_368; \ + __ret_368 = __noswap_splatq_lane_f64(__s0_368, __p1_368); \ + __ret_368 = __builtin_shufflevector(__ret_368, __ret_368, 1, 0); \ + __ret_368; \ }) #endif -#define vld1_f64_x2(__p0) __extension__ ({ \ - float64x1x2_t __ret; \ - __builtin_neon_vld1_x2_v(&__ret, __p0, 10); \ - __ret; \ -}) -#define vld1_p64_x3(__p0) __extension__ ({ \ - poly64x1x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 6); \ - __ret; \ +#define vdup_lane_f64(__p0_369, __p1_369) __extension__ ({ \ + float64x1_t __ret_369; \ + float64x1_t __s0_369 = __p0_369; \ + __ret_369 = splat_lane_f64(__s0_369, __p1_369); \ + __ret_369; \ }) #ifdef __LITTLE_ENDIAN__ -#define vld1q_p64_x3(__p0) __extension__ ({ \ - poly64x2x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 38); \ +#define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \ + poly8_t __ret; \ + poly8x16_t __s0 = __p0; \ + __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((poly8x16_t)__s0, __p1); \ __ret; \ }) #else -#define vld1q_p64_x3(__p0) __extension__ ({ \ - poly64x2x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 38); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ +#define vdupb_laneq_p8(__p0, __p1) __extension__ ({ \ + poly8_t __ret; \ + poly8x16_t __s0 = __p0; \ + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly8_t) __builtin_neon_vdupb_laneq_i8((poly8x16_t)__rev0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_f64_x3(__p0) __extension__ ({ \ - float64x2x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 42); \ +#define vduph_laneq_p16(__p0, __p1) __extension__ ({ \ + poly16_t __ret; \ + poly16x8_t __s0 = __p0; \ + __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((poly16x8_t)__s0, __p1); \ __ret; \ }) #else -#define vld1q_f64_x3(__p0) __extension__ ({ \ - float64x2x3_t __ret; \ - __builtin_neon_vld1q_x3_v(&__ret, __p0, 42); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ +#define vduph_laneq_p16(__p0, __p1) __extension__ ({ \ + poly16_t __ret; \ + poly16x8_t __s0 = __p0; \ + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (poly16_t) __builtin_neon_vduph_laneq_i16((poly16x8_t)__rev0, __p1); \ __ret; \ }) #endif -#define vld1_f64_x3(__p0) __extension__ ({ \ - float64x1x3_t __ret; \ - __builtin_neon_vld1_x3_v(&__ret, __p0, 10); \ +#ifdef __LITTLE_ENDIAN__ +#define vdupb_laneq_u8(__p0, __p1) __extension__ ({ \ + uint8_t __ret; \ + uint8x16_t __s0 = __p0; \ + __ret = (uint8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \ __ret; \ }) -#define vld1_p64_x4(__p0) __extension__ ({ \ - poly64x1x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 6); \ +#else +#define vdupb_laneq_u8(__p0, __p1) __extension__ ({ \ + uint8_t __ret; \ + uint8x16_t __s0 = __p0; \ + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \ __ret; \ }) +#endif + #ifdef __LITTLE_ENDIAN__ -#define vld1q_p64_x4(__p0) __extension__ ({ \ - poly64x2x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 38); \ +#define vdups_laneq_u32(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ + uint32x4_t __s0 = __p0; \ + __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__s0, __p1); \ __ret; \ }) #else -#define vld1q_p64_x4(__p0) __extension__ ({ \ - poly64x2x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 38); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ +#define vdups_laneq_u32(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ + uint32x4_t __s0 = __p0; \ + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (uint32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__rev0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld1q_f64_x4(__p0) __extension__ ({ \ - float64x2x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 42); \ +#define vdupd_laneq_u64(__p0, __p1) __extension__ ({ \ + uint64_t __ret; \ + uint64x2_t __s0 = __p0; \ + __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__s0, __p1); \ __ret; \ }) #else -#define vld1q_f64_x4(__p0) __extension__ ({ \ - float64x2x4_t __ret; \ - __builtin_neon_vld1q_x4_v(&__ret, __p0, 42); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ +#define vdupd_laneq_u64(__p0, __p1) __extension__ ({ \ + uint64_t __ret; \ + uint64x2_t __s0 = __p0; \ + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (uint64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__rev0, __p1); \ __ret; \ }) #endif -#define vld1_f64_x4(__p0) __extension__ ({ \ - float64x1x4_t __ret; \ - __builtin_neon_vld1_x4_v(&__ret, __p0, 10); \ +#ifdef __LITTLE_ENDIAN__ +#define vduph_laneq_u16(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + uint16x8_t __s0 = __p0; \ + __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__s0, __p1); \ __ret; \ }) -#define vld2_p64(__p0) __extension__ ({ \ - poly64x1x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 6); \ +#else +#define vduph_laneq_u16(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + uint16x8_t __s0 = __p0; \ + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (uint16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__rev0, __p1); \ __ret; \ }) +#endif + #ifdef __LITTLE_ENDIAN__ -#define vld2q_p64(__p0) __extension__ ({ \ - poly64x2x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 38); \ +#define vdupb_laneq_s8(__p0, __p1) __extension__ ({ \ + int8_t __ret; \ + int8x16_t __s0 = __p0; \ + __ret = (int8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__s0, __p1); \ __ret; \ }) #else -#define vld2q_p64(__p0) __extension__ ({ \ - poly64x2x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 38); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ +#define vdupb_laneq_s8(__p0, __p1) __extension__ ({ \ + int8_t __ret; \ + int8x16_t __s0 = __p0; \ + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int8_t) __builtin_neon_vdupb_laneq_i8((int8x16_t)__rev0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld2q_u64(__p0) __extension__ ({ \ - uint64x2x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 51); \ +#define vdupd_laneq_f64(__p0, __p1) __extension__ ({ \ + float64_t __ret; \ + float64x2_t __s0 = __p0; \ + __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((float64x2_t)__s0, __p1); \ __ret; \ }) #else -#define vld2q_u64(__p0) __extension__ ({ \ - uint64x2x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 51); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ +#define vdupd_laneq_f64(__p0, __p1) __extension__ ({ \ + float64_t __ret; \ + float64x2_t __s0 = __p0; \ + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (float64_t) __builtin_neon_vdupd_laneq_f64((float64x2_t)__rev0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld2q_f64(__p0) __extension__ ({ \ - float64x2x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 42); \ +#define vdups_laneq_f32(__p0, __p1) __extension__ ({ \ + float32_t __ret; \ + float32x4_t __s0 = __p0; \ + __ret = (float32_t) __builtin_neon_vdups_laneq_f32((float32x4_t)__s0, __p1); \ __ret; \ }) #else -#define vld2q_f64(__p0) __extension__ ({ \ - float64x2x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 42); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ +#define vdups_laneq_f32(__p0, __p1) __extension__ ({ \ + float32_t __ret; \ + float32x4_t __s0 = __p0; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (float32_t) __builtin_neon_vdups_laneq_f32((float32x4_t)__rev0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld2q_s64(__p0) __extension__ ({ \ - int64x2x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 35); \ +#define vdups_laneq_s32(__p0, __p1) __extension__ ({ \ + int32_t __ret; \ + int32x4_t __s0 = __p0; \ + __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__s0, __p1); \ __ret; \ }) #else -#define vld2q_s64(__p0) __extension__ ({ \ - int64x2x2_t __ret; \ - __builtin_neon_vld2q_v(&__ret, __p0, 35); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ +#define vdups_laneq_s32(__p0, __p1) __extension__ ({ \ + int32_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + __ret = (int32_t) __builtin_neon_vdups_laneq_i32((int32x4_t)__rev0, __p1); \ __ret; \ }) #endif -#define vld2_f64(__p0) __extension__ ({ \ - float64x1x2_t __ret; \ - __builtin_neon_vld2_v(&__ret, __p0, 10); \ +#ifdef __LITTLE_ENDIAN__ +#define vdupd_laneq_s64(__p0, __p1) __extension__ ({ \ + int64_t __ret; \ + int64x2_t __s0 = __p0; \ + __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__s0, __p1); \ __ret; \ }) -#define vld2_dup_p64(__p0) __extension__ ({ \ - poly64x1x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 6); \ +#else +#define vdupd_laneq_s64(__p0, __p1) __extension__ ({ \ + int64_t __ret; \ + int64x2_t __s0 = __p0; \ + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (int64_t) __builtin_neon_vdupd_laneq_i64((int64x2_t)__rev0, __p1); \ __ret; \ }) +#endif + #ifdef __LITTLE_ENDIAN__ -#define vld2q_dup_p64(__p0) __extension__ ({ \ - poly64x2x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 38); \ +#define vduph_laneq_s16(__p0, __p1) __extension__ ({ \ + int16_t __ret; \ + int16x8_t __s0 = __p0; \ + __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__s0, __p1); \ __ret; \ }) #else -#define vld2q_dup_p64(__p0) __extension__ ({ \ - poly64x2x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 38); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ +#define vduph_laneq_s16(__p0, __p1) __extension__ ({ \ + int16_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16_t) __builtin_neon_vduph_laneq_i16((int16x8_t)__rev0, __p1); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld2q_dup_f64(__p0) __extension__ ({ \ - float64x2x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 42); \ +#define vduph_laneq_f16(__p0, __p1) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + __ret = (float16_t) __builtin_neon_vduph_laneq_f16((float16x8_t)__s0, __p1); \ __ret; \ }) #else -#define vld2q_dup_f64(__p0) __extension__ ({ \ - float64x2x2_t __ret; \ - __builtin_neon_vld2q_dup_v(&__ret, __p0, 42); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ +#define vduph_laneq_f16(__p0, __p1) __extension__ ({ \ + float16_t __ret; \ + float16x8_t __s0 = __p0; \ + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (float16_t) __builtin_neon_vduph_laneq_f16((float16x8_t)__rev0, __p1); \ __ret; \ }) #endif -#define vld2_dup_f64(__p0) __extension__ ({ \ - float64x1x2_t __ret; \ - __builtin_neon_vld2_dup_v(&__ret, __p0, 10); \ - __ret; \ +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_p8(__p0_370, __p1_370) __extension__ ({ \ + poly8x8_t __ret_370; \ + poly8x16_t __s0_370 = __p0_370; \ + __ret_370 = splat_laneq_p8(__s0_370, __p1_370); \ + __ret_370; \ }) -#define vld2_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x1x2_t __ret; \ - poly64x1x2_t __s1 = __p1; \ - __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \ - __ret; \ +#else +#define vdup_laneq_p8(__p0_371, __p1_371) __extension__ ({ \ + poly8x8_t __ret_371; \ + poly8x16_t __s0_371 = __p0_371; \ + poly8x16_t __rev0_371; __rev0_371 = __builtin_shufflevector(__s0_371, __s0_371, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_371 = __noswap_splat_laneq_p8(__rev0_371, __p1_371); \ + __ret_371 = __builtin_shufflevector(__ret_371, __ret_371, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_371; \ }) +#endif + #ifdef __LITTLE_ENDIAN__ -#define vld2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x16x2_t __ret; \ - poly8x16x2_t __s1 = __p1; \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 36); \ - __ret; \ +#define vdup_laneq_p64(__p0_372, __p1_372) __extension__ ({ \ + poly64x1_t __ret_372; \ + poly64x2_t __s0_372 = __p0_372; \ + __ret_372 = splat_laneq_p64(__s0_372, __p1_372); \ + __ret_372; \ }) #else -#define vld2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x16x2_t __ret; \ - poly8x16x2_t __s1 = __p1; \ - poly8x16x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 36); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ +#define vdup_laneq_p64(__p0_373, __p1_373) __extension__ ({ \ + poly64x1_t __ret_373; \ + poly64x2_t __s0_373 = __p0_373; \ + poly64x2_t __rev0_373; __rev0_373 = __builtin_shufflevector(__s0_373, __s0_373, 1, 0); \ + __ret_373 = __noswap_splat_laneq_p64(__rev0_373, __p1_373); \ + __ret_373; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x2x2_t __ret; \ - poly64x2x2_t __s1 = __p1; \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 38); \ - __ret; \ +#define vdup_laneq_p16(__p0_374, __p1_374) __extension__ ({ \ + poly16x4_t __ret_374; \ + poly16x8_t __s0_374 = __p0_374; \ + __ret_374 = splat_laneq_p16(__s0_374, __p1_374); \ + __ret_374; \ }) #else -#define vld2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x2x2_t __ret; \ - poly64x2x2_t __s1 = __p1; \ - poly64x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 38); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret; \ +#define vdup_laneq_p16(__p0_375, __p1_375) __extension__ ({ \ + poly16x4_t __ret_375; \ + poly16x8_t __s0_375 = __p0_375; \ + poly16x8_t __rev0_375; __rev0_375 = __builtin_shufflevector(__s0_375, __s0_375, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_375 = __noswap_splat_laneq_p16(__rev0_375, __p1_375); \ + __ret_375 = __builtin_shufflevector(__ret_375, __ret_375, 3, 2, 1, 0); \ + __ret_375; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16x2_t __ret; \ - uint8x16x2_t __s1 = __p1; \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 48); \ - __ret; \ +#define vdupq_laneq_p8(__p0_376, __p1_376) __extension__ ({ \ + poly8x16_t __ret_376; \ + poly8x16_t __s0_376 = __p0_376; \ + __ret_376 = splatq_laneq_p8(__s0_376, __p1_376); \ + __ret_376; \ }) #else -#define vld2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16x2_t __ret; \ - uint8x16x2_t __s1 = __p1; \ - uint8x16x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 48); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ +#define vdupq_laneq_p8(__p0_377, __p1_377) __extension__ ({ \ + poly8x16_t __ret_377; \ + poly8x16_t __s0_377 = __p0_377; \ + poly8x16_t __rev0_377; __rev0_377 = __builtin_shufflevector(__s0_377, __s0_377, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_377 = __noswap_splatq_laneq_p8(__rev0_377, __p1_377); \ + __ret_377 = __builtin_shufflevector(__ret_377, __ret_377, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_377; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2x2_t __ret; \ - uint64x2x2_t __s1 = __p1; \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 51); \ - __ret; \ +#define vdupq_laneq_p64(__p0_378, __p1_378) __extension__ ({ \ + poly64x2_t __ret_378; \ + poly64x2_t __s0_378 = __p0_378; \ + __ret_378 = splatq_laneq_p64(__s0_378, __p1_378); \ + __ret_378; \ }) #else -#define vld2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2x2_t __ret; \ - uint64x2x2_t __s1 = __p1; \ - uint64x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 51); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret; \ +#define vdupq_laneq_p64(__p0_379, __p1_379) __extension__ ({ \ + poly64x2_t __ret_379; \ + poly64x2_t __s0_379 = __p0_379; \ + poly64x2_t __rev0_379; __rev0_379 = __builtin_shufflevector(__s0_379, __s0_379, 1, 0); \ + __ret_379 = __noswap_splatq_laneq_p64(__rev0_379, __p1_379); \ + __ret_379 = __builtin_shufflevector(__ret_379, __ret_379, 1, 0); \ + __ret_379; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16x2_t __ret; \ - int8x16x2_t __s1 = __p1; \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 32); \ - __ret; \ +#define vdupq_laneq_p16(__p0_380, __p1_380) __extension__ ({ \ + poly16x8_t __ret_380; \ + poly16x8_t __s0_380 = __p0_380; \ + __ret_380 = splatq_laneq_p16(__s0_380, __p1_380); \ + __ret_380; \ }) #else -#define vld2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16x2_t __ret; \ - int8x16x2_t __s1 = __p1; \ - int8x16x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 32); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ +#define vdupq_laneq_p16(__p0_381, __p1_381) __extension__ ({ \ + poly16x8_t __ret_381; \ + poly16x8_t __s0_381 = __p0_381; \ + poly16x8_t __rev0_381; __rev0_381 = __builtin_shufflevector(__s0_381, __s0_381, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_381 = __noswap_splatq_laneq_p16(__rev0_381, __p1_381); \ + __ret_381 = __builtin_shufflevector(__ret_381, __ret_381, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_381; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x2x2_t __ret; \ - float64x2x2_t __s1 = __p1; \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 42); \ - __ret; \ +#define vdupq_laneq_u8(__p0_382, __p1_382) __extension__ ({ \ + uint8x16_t __ret_382; \ + uint8x16_t __s0_382 = __p0_382; \ + __ret_382 = splatq_laneq_u8(__s0_382, __p1_382); \ + __ret_382; \ }) #else -#define vld2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x2x2_t __ret; \ - float64x2x2_t __s1 = __p1; \ - float64x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 42); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret; \ +#define vdupq_laneq_u8(__p0_383, __p1_383) __extension__ ({ \ + uint8x16_t __ret_383; \ + uint8x16_t __s0_383 = __p0_383; \ + uint8x16_t __rev0_383; __rev0_383 = __builtin_shufflevector(__s0_383, __s0_383, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_383 = __noswap_splatq_laneq_u8(__rev0_383, __p1_383); \ + __ret_383 = __builtin_shufflevector(__ret_383, __ret_383, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_383; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2x2_t __ret; \ - int64x2x2_t __s1 = __p1; \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 35); \ - __ret; \ +#define vdupq_laneq_u32(__p0_384, __p1_384) __extension__ ({ \ + uint32x4_t __ret_384; \ + uint32x4_t __s0_384 = __p0_384; \ + __ret_384 = splatq_laneq_u32(__s0_384, __p1_384); \ + __ret_384; \ }) #else -#define vld2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2x2_t __ret; \ - int64x2x2_t __s1 = __p1; \ - int64x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 35); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret; \ +#define vdupq_laneq_u32(__p0_385, __p1_385) __extension__ ({ \ + uint32x4_t __ret_385; \ + uint32x4_t __s0_385 = __p0_385; \ + uint32x4_t __rev0_385; __rev0_385 = __builtin_shufflevector(__s0_385, __s0_385, 3, 2, 1, 0); \ + __ret_385 = __noswap_splatq_laneq_u32(__rev0_385, __p1_385); \ + __ret_385 = __builtin_shufflevector(__ret_385, __ret_385, 3, 2, 1, 0); \ + __ret_385; \ }) #endif -#define vld2_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x1x2_t __ret; \ - uint64x1x2_t __s1 = __p1; \ - __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \ - __ret; \ -}) -#define vld2_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x1x2_t __ret; \ - float64x1x2_t __s1 = __p1; \ - __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 10); \ - __ret; \ -}) -#define vld2_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x1x2_t __ret; \ - int64x1x2_t __s1 = __p1; \ - __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 3); \ - __ret; \ -}) -#define vld3_p64(__p0) __extension__ ({ \ - poly64x1x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 6); \ - __ret; \ -}) #ifdef __LITTLE_ENDIAN__ -#define vld3q_p64(__p0) __extension__ ({ \ - poly64x2x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 38); \ - __ret; \ +#define vdupq_laneq_u64(__p0_386, __p1_386) __extension__ ({ \ + uint64x2_t __ret_386; \ + uint64x2_t __s0_386 = __p0_386; \ + __ret_386 = splatq_laneq_u64(__s0_386, __p1_386); \ + __ret_386; \ }) #else -#define vld3q_p64(__p0) __extension__ ({ \ - poly64x2x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 38); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret; \ +#define vdupq_laneq_u64(__p0_387, __p1_387) __extension__ ({ \ + uint64x2_t __ret_387; \ + uint64x2_t __s0_387 = __p0_387; \ + uint64x2_t __rev0_387; __rev0_387 = __builtin_shufflevector(__s0_387, __s0_387, 1, 0); \ + __ret_387 = __noswap_splatq_laneq_u64(__rev0_387, __p1_387); \ + __ret_387 = __builtin_shufflevector(__ret_387, __ret_387, 1, 0); \ + __ret_387; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3q_u64(__p0) __extension__ ({ \ - uint64x2x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 51); \ - __ret; \ +#define vdupq_laneq_u16(__p0_388, __p1_388) __extension__ ({ \ + uint16x8_t __ret_388; \ + uint16x8_t __s0_388 = __p0_388; \ + __ret_388 = splatq_laneq_u16(__s0_388, __p1_388); \ + __ret_388; \ }) #else -#define vld3q_u64(__p0) __extension__ ({ \ - uint64x2x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 51); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret; \ +#define vdupq_laneq_u16(__p0_389, __p1_389) __extension__ ({ \ + uint16x8_t __ret_389; \ + uint16x8_t __s0_389 = __p0_389; \ + uint16x8_t __rev0_389; __rev0_389 = __builtin_shufflevector(__s0_389, __s0_389, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_389 = __noswap_splatq_laneq_u16(__rev0_389, __p1_389); \ + __ret_389 = __builtin_shufflevector(__ret_389, __ret_389, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_389; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3q_f64(__p0) __extension__ ({ \ - float64x2x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 42); \ - __ret; \ +#define vdupq_laneq_s8(__p0_390, __p1_390) __extension__ ({ \ + int8x16_t __ret_390; \ + int8x16_t __s0_390 = __p0_390; \ + __ret_390 = splatq_laneq_s8(__s0_390, __p1_390); \ + __ret_390; \ }) #else -#define vld3q_f64(__p0) __extension__ ({ \ - float64x2x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 42); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret; \ +#define vdupq_laneq_s8(__p0_391, __p1_391) __extension__ ({ \ + int8x16_t __ret_391; \ + int8x16_t __s0_391 = __p0_391; \ + int8x16_t __rev0_391; __rev0_391 = __builtin_shufflevector(__s0_391, __s0_391, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_391 = __noswap_splatq_laneq_s8(__rev0_391, __p1_391); \ + __ret_391 = __builtin_shufflevector(__ret_391, __ret_391, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_391; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3q_s64(__p0) __extension__ ({ \ - int64x2x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 35); \ - __ret; \ +#define vdupq_laneq_f64(__p0_392, __p1_392) __extension__ ({ \ + float64x2_t __ret_392; \ + float64x2_t __s0_392 = __p0_392; \ + __ret_392 = splatq_laneq_f64(__s0_392, __p1_392); \ + __ret_392; \ }) #else -#define vld3q_s64(__p0) __extension__ ({ \ - int64x2x3_t __ret; \ - __builtin_neon_vld3q_v(&__ret, __p0, 35); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret; \ +#define vdupq_laneq_f64(__p0_393, __p1_393) __extension__ ({ \ + float64x2_t __ret_393; \ + float64x2_t __s0_393 = __p0_393; \ + float64x2_t __rev0_393; __rev0_393 = __builtin_shufflevector(__s0_393, __s0_393, 1, 0); \ + __ret_393 = __noswap_splatq_laneq_f64(__rev0_393, __p1_393); \ + __ret_393 = __builtin_shufflevector(__ret_393, __ret_393, 1, 0); \ + __ret_393; \ }) #endif -#define vld3_f64(__p0) __extension__ ({ \ - float64x1x3_t __ret; \ - __builtin_neon_vld3_v(&__ret, __p0, 10); \ - __ret; \ -}) -#define vld3_dup_p64(__p0) __extension__ ({ \ - poly64x1x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 6); \ - __ret; \ -}) #ifdef __LITTLE_ENDIAN__ -#define vld3q_dup_p64(__p0) __extension__ ({ \ - poly64x2x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 38); \ - __ret; \ +#define vdupq_laneq_f32(__p0_394, __p1_394) __extension__ ({ \ + float32x4_t __ret_394; \ + float32x4_t __s0_394 = __p0_394; \ + __ret_394 = splatq_laneq_f32(__s0_394, __p1_394); \ + __ret_394; \ }) #else -#define vld3q_dup_p64(__p0) __extension__ ({ \ - poly64x2x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 38); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret; \ +#define vdupq_laneq_f32(__p0_395, __p1_395) __extension__ ({ \ + float32x4_t __ret_395; \ + float32x4_t __s0_395 = __p0_395; \ + float32x4_t __rev0_395; __rev0_395 = __builtin_shufflevector(__s0_395, __s0_395, 3, 2, 1, 0); \ + __ret_395 = __noswap_splatq_laneq_f32(__rev0_395, __p1_395); \ + __ret_395 = __builtin_shufflevector(__ret_395, __ret_395, 3, 2, 1, 0); \ + __ret_395; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3q_dup_f64(__p0) __extension__ ({ \ - float64x2x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 42); \ - __ret; \ +#define vdupq_laneq_f16(__p0_396, __p1_396) __extension__ ({ \ + float16x8_t __ret_396; \ + float16x8_t __s0_396 = __p0_396; \ + __ret_396 = splatq_laneq_f16(__s0_396, __p1_396); \ + __ret_396; \ }) #else -#define vld3q_dup_f64(__p0) __extension__ ({ \ - float64x2x3_t __ret; \ - __builtin_neon_vld3q_dup_v(&__ret, __p0, 42); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret; \ +#define vdupq_laneq_f16(__p0_397, __p1_397) __extension__ ({ \ + float16x8_t __ret_397; \ + float16x8_t __s0_397 = __p0_397; \ + float16x8_t __rev0_397; __rev0_397 = __builtin_shufflevector(__s0_397, __s0_397, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_397 = __noswap_splatq_laneq_f16(__rev0_397, __p1_397); \ + __ret_397 = __builtin_shufflevector(__ret_397, __ret_397, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_397; \ }) #endif -#define vld3_dup_f64(__p0) __extension__ ({ \ - float64x1x3_t __ret; \ - __builtin_neon_vld3_dup_v(&__ret, __p0, 10); \ - __ret; \ +#ifdef __LITTLE_ENDIAN__ +#define vdupq_laneq_s32(__p0_398, __p1_398) __extension__ ({ \ + int32x4_t __ret_398; \ + int32x4_t __s0_398 = __p0_398; \ + __ret_398 = splatq_laneq_s32(__s0_398, __p1_398); \ + __ret_398; \ }) -#define vld3_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x1x3_t __ret; \ - poly64x1x3_t __s1 = __p1; \ - __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \ - __ret; \ +#else +#define vdupq_laneq_s32(__p0_399, __p1_399) __extension__ ({ \ + int32x4_t __ret_399; \ + int32x4_t __s0_399 = __p0_399; \ + int32x4_t __rev0_399; __rev0_399 = __builtin_shufflevector(__s0_399, __s0_399, 3, 2, 1, 0); \ + __ret_399 = __noswap_splatq_laneq_s32(__rev0_399, __p1_399); \ + __ret_399 = __builtin_shufflevector(__ret_399, __ret_399, 3, 2, 1, 0); \ + __ret_399; \ }) +#endif + #ifdef __LITTLE_ENDIAN__ -#define vld3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x16x3_t __ret; \ - poly8x16x3_t __s1 = __p1; \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 36); \ - __ret; \ +#define vdupq_laneq_s64(__p0_400, __p1_400) __extension__ ({ \ + int64x2_t __ret_400; \ + int64x2_t __s0_400 = __p0_400; \ + __ret_400 = splatq_laneq_s64(__s0_400, __p1_400); \ + __ret_400; \ }) #else -#define vld3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x16x3_t __ret; \ - poly8x16x3_t __s1 = __p1; \ - poly8x16x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 36); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ +#define vdupq_laneq_s64(__p0_401, __p1_401) __extension__ ({ \ + int64x2_t __ret_401; \ + int64x2_t __s0_401 = __p0_401; \ + int64x2_t __rev0_401; __rev0_401 = __builtin_shufflevector(__s0_401, __s0_401, 1, 0); \ + __ret_401 = __noswap_splatq_laneq_s64(__rev0_401, __p1_401); \ + __ret_401 = __builtin_shufflevector(__ret_401, __ret_401, 1, 0); \ + __ret_401; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x2x3_t __ret; \ - poly64x2x3_t __s1 = __p1; \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 38); \ - __ret; \ +#define vdupq_laneq_s16(__p0_402, __p1_402) __extension__ ({ \ + int16x8_t __ret_402; \ + int16x8_t __s0_402 = __p0_402; \ + __ret_402 = splatq_laneq_s16(__s0_402, __p1_402); \ + __ret_402; \ }) #else -#define vld3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x2x3_t __ret; \ - poly64x2x3_t __s1 = __p1; \ - poly64x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 38); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret; \ +#define vdupq_laneq_s16(__p0_403, __p1_403) __extension__ ({ \ + int16x8_t __ret_403; \ + int16x8_t __s0_403 = __p0_403; \ + int16x8_t __rev0_403; __rev0_403 = __builtin_shufflevector(__s0_403, __s0_403, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_403 = __noswap_splatq_laneq_s16(__rev0_403, __p1_403); \ + __ret_403 = __builtin_shufflevector(__ret_403, __ret_403, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_403; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16x3_t __ret; \ - uint8x16x3_t __s1 = __p1; \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 48); \ - __ret; \ +#define vdup_laneq_u8(__p0_404, __p1_404) __extension__ ({ \ + uint8x8_t __ret_404; \ + uint8x16_t __s0_404 = __p0_404; \ + __ret_404 = splat_laneq_u8(__s0_404, __p1_404); \ + __ret_404; \ }) #else -#define vld3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16x3_t __ret; \ - uint8x16x3_t __s1 = __p1; \ - uint8x16x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 48); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ +#define vdup_laneq_u8(__p0_405, __p1_405) __extension__ ({ \ + uint8x8_t __ret_405; \ + uint8x16_t __s0_405 = __p0_405; \ + uint8x16_t __rev0_405; __rev0_405 = __builtin_shufflevector(__s0_405, __s0_405, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_405 = __noswap_splat_laneq_u8(__rev0_405, __p1_405); \ + __ret_405 = __builtin_shufflevector(__ret_405, __ret_405, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_405; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2x3_t __ret; \ - uint64x2x3_t __s1 = __p1; \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 51); \ - __ret; \ +#define vdup_laneq_u32(__p0_406, __p1_406) __extension__ ({ \ + uint32x2_t __ret_406; \ + uint32x4_t __s0_406 = __p0_406; \ + __ret_406 = splat_laneq_u32(__s0_406, __p1_406); \ + __ret_406; \ }) #else -#define vld3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2x3_t __ret; \ - uint64x2x3_t __s1 = __p1; \ - uint64x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 51); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret; \ +#define vdup_laneq_u32(__p0_407, __p1_407) __extension__ ({ \ + uint32x2_t __ret_407; \ + uint32x4_t __s0_407 = __p0_407; \ + uint32x4_t __rev0_407; __rev0_407 = __builtin_shufflevector(__s0_407, __s0_407, 3, 2, 1, 0); \ + __ret_407 = __noswap_splat_laneq_u32(__rev0_407, __p1_407); \ + __ret_407 = __builtin_shufflevector(__ret_407, __ret_407, 1, 0); \ + __ret_407; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16x3_t __ret; \ - int8x16x3_t __s1 = __p1; \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 32); \ - __ret; \ +#define vdup_laneq_u64(__p0_408, __p1_408) __extension__ ({ \ + uint64x1_t __ret_408; \ + uint64x2_t __s0_408 = __p0_408; \ + __ret_408 = splat_laneq_u64(__s0_408, __p1_408); \ + __ret_408; \ }) #else -#define vld3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16x3_t __ret; \ - int8x16x3_t __s1 = __p1; \ - int8x16x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 32); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ +#define vdup_laneq_u64(__p0_409, __p1_409) __extension__ ({ \ + uint64x1_t __ret_409; \ + uint64x2_t __s0_409 = __p0_409; \ + uint64x2_t __rev0_409; __rev0_409 = __builtin_shufflevector(__s0_409, __s0_409, 1, 0); \ + __ret_409 = __noswap_splat_laneq_u64(__rev0_409, __p1_409); \ + __ret_409; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x2x3_t __ret; \ - float64x2x3_t __s1 = __p1; \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 42); \ - __ret; \ +#define vdup_laneq_u16(__p0_410, __p1_410) __extension__ ({ \ + uint16x4_t __ret_410; \ + uint16x8_t __s0_410 = __p0_410; \ + __ret_410 = splat_laneq_u16(__s0_410, __p1_410); \ + __ret_410; \ }) #else -#define vld3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x2x3_t __ret; \ - float64x2x3_t __s1 = __p1; \ - float64x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 42); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret; \ +#define vdup_laneq_u16(__p0_411, __p1_411) __extension__ ({ \ + uint16x4_t __ret_411; \ + uint16x8_t __s0_411 = __p0_411; \ + uint16x8_t __rev0_411; __rev0_411 = __builtin_shufflevector(__s0_411, __s0_411, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_411 = __noswap_splat_laneq_u16(__rev0_411, __p1_411); \ + __ret_411 = __builtin_shufflevector(__ret_411, __ret_411, 3, 2, 1, 0); \ + __ret_411; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2x3_t __ret; \ - int64x2x3_t __s1 = __p1; \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 35); \ - __ret; \ +#define vdup_laneq_s8(__p0_412, __p1_412) __extension__ ({ \ + int8x8_t __ret_412; \ + int8x16_t __s0_412 = __p0_412; \ + __ret_412 = splat_laneq_s8(__s0_412, __p1_412); \ + __ret_412; \ }) #else -#define vld3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2x3_t __ret; \ - int64x2x3_t __s1 = __p1; \ - int64x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 35); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret; \ +#define vdup_laneq_s8(__p0_413, __p1_413) __extension__ ({ \ + int8x8_t __ret_413; \ + int8x16_t __s0_413 = __p0_413; \ + int8x16_t __rev0_413; __rev0_413 = __builtin_shufflevector(__s0_413, __s0_413, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_413 = __noswap_splat_laneq_s8(__rev0_413, __p1_413); \ + __ret_413 = __builtin_shufflevector(__ret_413, __ret_413, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_413; \ }) #endif -#define vld3_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x1x3_t __ret; \ - uint64x1x3_t __s1 = __p1; \ - __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \ - __ret; \ +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_f64(__p0_414, __p1_414) __extension__ ({ \ + float64x1_t __ret_414; \ + float64x2_t __s0_414 = __p0_414; \ + __ret_414 = splat_laneq_f64(__s0_414, __p1_414); \ + __ret_414; \ }) -#define vld3_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x1x3_t __ret; \ - float64x1x3_t __s1 = __p1; \ - __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 10); \ - __ret; \ +#else +#define vdup_laneq_f64(__p0_415, __p1_415) __extension__ ({ \ + float64x1_t __ret_415; \ + float64x2_t __s0_415 = __p0_415; \ + float64x2_t __rev0_415; __rev0_415 = __builtin_shufflevector(__s0_415, __s0_415, 1, 0); \ + __ret_415 = __noswap_splat_laneq_f64(__rev0_415, __p1_415); \ + __ret_415; \ }) -#define vld3_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x1x3_t __ret; \ - int64x1x3_t __s1 = __p1; \ - __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 3); \ - __ret; \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vdup_laneq_f32(__p0_416, __p1_416) __extension__ ({ \ + float32x2_t __ret_416; \ + float32x4_t __s0_416 = __p0_416; \ + __ret_416 = splat_laneq_f32(__s0_416, __p1_416); \ + __ret_416; \ }) -#define vld4_p64(__p0) __extension__ ({ \ - poly64x1x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 6); \ - __ret; \ +#else +#define vdup_laneq_f32(__p0_417, __p1_417) __extension__ ({ \ + float32x2_t __ret_417; \ + float32x4_t __s0_417 = __p0_417; \ + float32x4_t __rev0_417; __rev0_417 = __builtin_shufflevector(__s0_417, __s0_417, 3, 2, 1, 0); \ + __ret_417 = __noswap_splat_laneq_f32(__rev0_417, __p1_417); \ + __ret_417 = __builtin_shufflevector(__ret_417, __ret_417, 1, 0); \ + __ret_417; \ }) +#endif + #ifdef __LITTLE_ENDIAN__ -#define vld4q_p64(__p0) __extension__ ({ \ - poly64x2x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 38); \ - __ret; \ +#define vdup_laneq_f16(__p0_418, __p1_418) __extension__ ({ \ + float16x4_t __ret_418; \ + float16x8_t __s0_418 = __p0_418; \ + __ret_418 = splat_laneq_f16(__s0_418, __p1_418); \ + __ret_418; \ }) #else -#define vld4q_p64(__p0) __extension__ ({ \ - poly64x2x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 38); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ - __ret; \ +#define vdup_laneq_f16(__p0_419, __p1_419) __extension__ ({ \ + float16x4_t __ret_419; \ + float16x8_t __s0_419 = __p0_419; \ + float16x8_t __rev0_419; __rev0_419 = __builtin_shufflevector(__s0_419, __s0_419, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_419 = __noswap_splat_laneq_f16(__rev0_419, __p1_419); \ + __ret_419 = __builtin_shufflevector(__ret_419, __ret_419, 3, 2, 1, 0); \ + __ret_419; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld4q_u64(__p0) __extension__ ({ \ - uint64x2x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 51); \ - __ret; \ +#define vdup_laneq_s32(__p0_420, __p1_420) __extension__ ({ \ + int32x2_t __ret_420; \ + int32x4_t __s0_420 = __p0_420; \ + __ret_420 = splat_laneq_s32(__s0_420, __p1_420); \ + __ret_420; \ }) #else -#define vld4q_u64(__p0) __extension__ ({ \ - uint64x2x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 51); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ - __ret; \ +#define vdup_laneq_s32(__p0_421, __p1_421) __extension__ ({ \ + int32x2_t __ret_421; \ + int32x4_t __s0_421 = __p0_421; \ + int32x4_t __rev0_421; __rev0_421 = __builtin_shufflevector(__s0_421, __s0_421, 3, 2, 1, 0); \ + __ret_421 = __noswap_splat_laneq_s32(__rev0_421, __p1_421); \ + __ret_421 = __builtin_shufflevector(__ret_421, __ret_421, 1, 0); \ + __ret_421; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld4q_f64(__p0) __extension__ ({ \ - float64x2x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 42); \ - __ret; \ +#define vdup_laneq_s64(__p0_422, __p1_422) __extension__ ({ \ + int64x1_t __ret_422; \ + int64x2_t __s0_422 = __p0_422; \ + __ret_422 = splat_laneq_s64(__s0_422, __p1_422); \ + __ret_422; \ }) #else -#define vld4q_f64(__p0) __extension__ ({ \ - float64x2x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 42); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ - __ret; \ +#define vdup_laneq_s64(__p0_423, __p1_423) __extension__ ({ \ + int64x1_t __ret_423; \ + int64x2_t __s0_423 = __p0_423; \ + int64x2_t __rev0_423; __rev0_423 = __builtin_shufflevector(__s0_423, __s0_423, 1, 0); \ + __ret_423 = __noswap_splat_laneq_s64(__rev0_423, __p1_423); \ + __ret_423; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld4q_s64(__p0) __extension__ ({ \ - int64x2x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 35); \ - __ret; \ +#define vdup_laneq_s16(__p0_424, __p1_424) __extension__ ({ \ + int16x4_t __ret_424; \ + int16x8_t __s0_424 = __p0_424; \ + __ret_424 = splat_laneq_s16(__s0_424, __p1_424); \ + __ret_424; \ }) #else -#define vld4q_s64(__p0) __extension__ ({ \ - int64x2x4_t __ret; \ - __builtin_neon_vld4q_v(&__ret, __p0, 35); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ - __ret; \ +#define vdup_laneq_s16(__p0_425, __p1_425) __extension__ ({ \ + int16x4_t __ret_425; \ + int16x8_t __s0_425 = __p0_425; \ + int16x8_t __rev0_425; __rev0_425 = __builtin_shufflevector(__s0_425, __s0_425, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_425 = __noswap_splat_laneq_s16(__rev0_425, __p1_425); \ + __ret_425 = __builtin_shufflevector(__ret_425, __ret_425, 3, 2, 1, 0); \ + __ret_425; \ }) #endif -#define vld4_f64(__p0) __extension__ ({ \ - float64x1x4_t __ret; \ - __builtin_neon_vld4_v(&__ret, __p0, 10); \ - __ret; \ -}) -#define vld4_dup_p64(__p0) __extension__ ({ \ - poly64x1x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 6); \ +__ai __attribute__((target("neon"))) poly64x1_t vdup_n_p64(poly64_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t) {__p0}; + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly64x2_t vdupq_n_p64(poly64_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly64x2_t vdupq_n_p64(poly64_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vdupq_n_f64(float64_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) {__p0, __p0}; + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vdupq_n_f64(float64_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vdup_n_f64(float64_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) {__p0}; + return __ret; +} +#define vext_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1_t __ret; \ + poly64x1_t __s0 = __p0; \ + poly64x1_t __s1 = __p1; \ + __ret = (poly64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \ __ret; \ }) #ifdef __LITTLE_ENDIAN__ -#define vld4q_dup_p64(__p0) __extension__ ({ \ - poly64x2x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 38); \ +#define vextq_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __ret; \ + poly64x2_t __s0 = __p0; \ + poly64x2_t __s1 = __p1; \ + __ret = (poly64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \ __ret; \ }) #else -#define vld4q_dup_p64(__p0) __extension__ ({ \ - poly64x2x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 38); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ +#define vextq_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __ret; \ + poly64x2_t __s0 = __p0; \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (poly64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld4q_dup_f64(__p0) __extension__ ({ \ - float64x2x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 42); \ +#define vextq_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __ret; \ + float64x2_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + __ret = (float64x2_t) __builtin_neon_vextq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 42); \ __ret; \ }) #else -#define vld4q_dup_f64(__p0) __extension__ ({ \ - float64x2x4_t __ret; \ - __builtin_neon_vld4q_dup_v(&__ret, __p0, 42); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ +#define vextq_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __ret; \ + float64x2_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (float64x2_t) __builtin_neon_vextq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 42); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ __ret; \ }) #endif -#define vld4_dup_f64(__p0) __extension__ ({ \ - float64x1x4_t __ret; \ - __builtin_neon_vld4_dup_v(&__ret, __p0, 10); \ +#define vext_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1_t __ret; \ + float64x1_t __s0 = __p0; \ + float64x1_t __s1 = __p1; \ + __ret = (float64x1_t) __builtin_neon_vext_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \ __ret; \ }) -#define vld4_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x1x4_t __ret; \ - poly64x1x4_t __s1 = __p1; \ - __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \ - __ret; \ +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) float64x2_t __noswap_vfmaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vfmaq_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); + return __ret; +} +#endif + +__ai __attribute__((target("neon"))) float64x1_t vfma_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vfma_v((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); + return __ret; +} +#define vfmad_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64_t __ret; \ + float64_t __s0 = __p0; \ + float64_t __s1 = __p1; \ + float64x1_t __s2 = __p2; \ + __ret = (float64_t) __builtin_neon_vfmad_lane_f64(__s0, __s1, (float64x1_t)__s2, __p3); \ + __ret; \ }) #ifdef __LITTLE_ENDIAN__ -#define vld4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x16x4_t __ret; \ - poly8x16x4_t __s1 = __p1; \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 36); \ +#define vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32_t __ret; \ + float32_t __s0 = __p0; \ + float32_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (float32x2_t)__s2, __p3); \ __ret; \ }) #else -#define vld4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x16x4_t __ret; \ - poly8x16x4_t __s1 = __p1; \ - poly8x16x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 36); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32_t __ret; \ + float32_t __s0 = __p0; \ + float32_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (float32x2_t)__rev2, __p3); \ + __ret; \ +}) +#define __noswap_vfmas_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32_t __ret; \ + float32_t __s0 = __p0; \ + float32_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + __ret = (float32_t) __builtin_neon_vfmas_lane_f32(__s0, __s1, (float32x2_t)__s2, __p3); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x2x4_t __ret; \ - poly64x2x4_t __s1 = __p1; \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 38); \ +#define vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x2_t __ret; \ + float64x2_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x1_t __s2 = __p2; \ + __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 42); \ __ret; \ }) #else -#define vld4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x2x4_t __ret; \ - poly64x2x4_t __s1 = __p1; \ - poly64x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 38); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ +#define vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x2_t __ret; \ + float64x2_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x1_t __s2 = __p2; \ + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__s2, __p3, 42); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vfmaq_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x2_t __ret; \ + float64x2_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x1_t __s2 = __p2; \ + __ret = (float64x2_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 42); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16x4_t __ret; \ - uint8x16x4_t __s1 = __p1; \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 48); \ +#define vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x4_t __ret; \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 41); \ __ret; \ }) #else -#define vld4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16x4_t __ret; \ - uint8x16x4_t __s1 = __p1; \ - uint8x16x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 48); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x4_t __ret; \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, __p3, 41); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#define __noswap_vfmaq_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x4_t __ret; \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + __ret = (float32x4_t) __builtin_neon_vfmaq_lane_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 41); \ __ret; \ }) #endif +#define vfma_lane_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x1_t __ret; \ + float64x1_t __s0 = __p0; \ + float64x1_t __s1 = __p1; \ + float64x1_t __s2 = __p2; \ + __ret = (float64x1_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 10); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -#define vld4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2x4_t __ret; \ - uint64x2x4_t __s1 = __p1; \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 51); \ +#define vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x2_t __ret; \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 9); \ __ret; \ }) #else -#define vld4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2x4_t __ret; \ - uint64x2x4_t __s1 = __p1; \ - uint64x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 51); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ +#define vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x2_t __ret; \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + float32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, __p3, 9); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vfma_lane_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x2_t __ret; \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x2_t __s2 = __p2; \ + __ret = (float32x2_t) __builtin_neon_vfma_lane_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 9); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16x4_t __ret; \ - int8x16x4_t __s1 = __p1; \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 32); \ +#define vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64_t __ret; \ + float64_t __s0 = __p0; \ + float64_t __s1 = __p1; \ + float64x2_t __s2 = __p2; \ + __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (float64x2_t)__s2, __p3); \ __ret; \ }) #else -#define vld4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16x4_t __ret; \ - int8x16x4_t __s1 = __p1; \ - int8x16x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 32); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ +#define vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64_t __ret; \ + float64_t __s0 = __p0; \ + float64_t __s1 = __p1; \ + float64x2_t __s2 = __p2; \ + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (float64x2_t)__rev2, __p3); \ + __ret; \ +}) +#define __noswap_vfmad_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64_t __ret; \ + float64_t __s0 = __p0; \ + float64_t __s1 = __p1; \ + float64x2_t __s2 = __p2; \ + __ret = (float64_t) __builtin_neon_vfmad_laneq_f64(__s0, __s1, (float64x2_t)__s2, __p3); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x2x4_t __ret; \ - float64x2x4_t __s1 = __p1; \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 42); \ +#define vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32_t __ret; \ + float32_t __s0 = __p0; \ + float32_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (float32x4_t)__s2, __p3); \ __ret; \ }) #else -#define vld4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x2x4_t __ret; \ - float64x2x4_t __s1 = __p1; \ - float64x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 42); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ +#define vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32_t __ret; \ + float32_t __s0 = __p0; \ + float32_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (float32x4_t)__rev2, __p3); \ + __ret; \ +}) +#define __noswap_vfmas_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32_t __ret; \ + float32_t __s0 = __p0; \ + float32_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + __ret = (float32_t) __builtin_neon_vfmas_laneq_f32(__s0, __s1, (float32x4_t)__s2, __p3); \ __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vld4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2x4_t __ret; \ - int64x2x4_t __s1 = __p1; \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 35); \ +#define vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x2_t __ret; \ + float64x2_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x2_t __s2 = __p2; \ + __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 42); \ __ret; \ }) #else -#define vld4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2x4_t __ret; \ - int64x2x4_t __s1 = __p1; \ - int64x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 35); \ - \ - __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ - __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ - __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ - __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ +#define vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x2_t __ret; \ + float64x2_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x2_t __s2 = __p2; \ + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 42); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vfmaq_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x2_t __ret; \ + float64x2_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x2_t __s2 = __p2; \ + __ret = (float64x2_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 42); \ __ret; \ }) #endif -#define vld4_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x1x4_t __ret; \ - uint64x1x4_t __s1 = __p1; \ - __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \ +#ifdef __LITTLE_ENDIAN__ +#define vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x4_t __ret; \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 41); \ __ret; \ }) -#define vld4_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x1x4_t __ret; \ - float64x1x4_t __s1 = __p1; \ - __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 10); \ +#else +#define vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x4_t __ret; \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 41); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) -#define vld4_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x1x4_t __ret; \ - int64x1x4_t __s1 = __p1; \ - __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 3); \ +#define __noswap_vfmaq_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x4_t __ret; \ + float32x4_t __s0 = __p0; \ + float32x4_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + __ret = (float32x4_t) __builtin_neon_vfmaq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 41); \ __ret; \ }) -#define vldrq_p128(__p0) __extension__ ({ \ - poly128_t __ret; \ - __ret = (poly128_t) __builtin_neon_vldrq_p128(__p0); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x1_t __ret; \ + float64x1_t __s0 = __p0; \ + float64x1_t __s1 = __p1; \ + float64x2_t __s2 = __p2; \ + __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 10); \ + __ret; \ +}) +#else +#define vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x1_t __ret; \ + float64x1_t __s0 = __p0; \ + float64x1_t __s1 = __p1; \ + float64x2_t __s2 = __p2; \ + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__rev2, __p3, 10); \ + __ret; \ +}) +#define __noswap_vfma_laneq_f64(__p0, __p1, __p2, __p3) __extension__ ({ \ + float64x1_t __ret; \ + float64x1_t __s0 = __p0; \ + float64x1_t __s1 = __p1; \ + float64x2_t __s2 = __p2; \ + __ret = (float64x1_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 10); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x2_t __ret; \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 9); \ + __ret; \ +}) +#else +#define vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x2_t __ret; \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + float32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x16_t)__rev2, __p3, 9); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vfma_laneq_f32(__p0, __p1, __p2, __p3) __extension__ ({ \ + float32x2_t __ret; \ + float32x2_t __s0 = __p0; \ + float32x2_t __s1 = __p1; \ + float32x4_t __s2 = __p2; \ + __ret = (float32x2_t) __builtin_neon_vfma_laneq_v((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 9); \ __ret; \ }) +#endif + #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) { float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + __ret = vfmaq_f64(__p0, __p1, (float64x2_t) {__p2, __p2}); return __ret; } #else -__ai float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vfmaq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (float64x2_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __noswap_vfmaq_f64(__rev0, __rev1, (float64x2_t) {__p2, __p2}); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif -__ai float64x1_t vmax_f64(float64x1_t __p0, float64x1_t __p1) { +__ai __attribute__((target("neon"))) float64x1_t vfma_n_f64(float64x1_t __p0, float64x1_t __p1, float64_t __p2) { float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + __ret = vfma_f64(__p0, __p1, (float64x1_t) {__p2}); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai float64_t vmaxnmvq_f64(float64x2_t __p0) { - float64_t __ret; - __ret = (float64_t) __builtin_neon_vmaxnmvq_f64(__p0); +__ai __attribute__((target("neon"))) float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = vfmaq_f64(__p0, -__p1, __p2); return __ret; } #else -__ai float64_t vmaxnmvq_f64(float64x2_t __p0) { - float64_t __ret; +__ai __attribute__((target("neon"))) float64x2_t vfmsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float64_t) __builtin_neon_vmaxnmvq_f64(__rev0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __noswap_vfmaq_f64(__rev0, -__rev1, __rev2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif -#ifdef __LITTLE_ENDIAN__ -__ai float32_t vmaxnmvq_f32(float32x4_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vmaxnmvq_f32(__p0); +__ai __attribute__((target("neon"))) float64x1_t vfms_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { + float64x1_t __ret; + __ret = vfma_f64(__p0, -__p1, __p2); return __ret; } +#define vfmsd_lane_f64(__p0_426, __p1_426, __p2_426, __p3_426) __extension__ ({ \ + float64_t __ret_426; \ + float64_t __s0_426 = __p0_426; \ + float64_t __s1_426 = __p1_426; \ + float64x1_t __s2_426 = __p2_426; \ + __ret_426 = vfmad_lane_f64(__s0_426, -__s1_426, __s2_426, __p3_426); \ + __ret_426; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vfmss_lane_f32(__p0_427, __p1_427, __p2_427, __p3_427) __extension__ ({ \ + float32_t __ret_427; \ + float32_t __s0_427 = __p0_427; \ + float32_t __s1_427 = __p1_427; \ + float32x2_t __s2_427 = __p2_427; \ + __ret_427 = vfmas_lane_f32(__s0_427, -__s1_427, __s2_427, __p3_427); \ + __ret_427; \ +}) #else -__ai float32_t vmaxnmvq_f32(float32x4_t __p0) { - float32_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float32_t) __builtin_neon_vmaxnmvq_f32(__rev0); - return __ret; -} +#define vfmss_lane_f32(__p0_428, __p1_428, __p2_428, __p3_428) __extension__ ({ \ + float32_t __ret_428; \ + float32_t __s0_428 = __p0_428; \ + float32_t __s1_428 = __p1_428; \ + float32x2_t __s2_428 = __p2_428; \ + float32x2_t __rev2_428; __rev2_428 = __builtin_shufflevector(__s2_428, __s2_428, 1, 0); \ + __ret_428 = __noswap_vfmas_lane_f32(__s0_428, -__s1_428, __rev2_428, __p3_428); \ + __ret_428; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai float32_t vmaxnmv_f32(float32x2_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vmaxnmv_f32(__p0); - return __ret; -} +#define vfmsq_lane_f64(__p0_429, __p1_429, __p2_429, __p3_429) __extension__ ({ \ + float64x2_t __ret_429; \ + float64x2_t __s0_429 = __p0_429; \ + float64x2_t __s1_429 = __p1_429; \ + float64x1_t __s2_429 = __p2_429; \ + __ret_429 = vfmaq_lane_f64(__s0_429, -__s1_429, __s2_429, __p3_429); \ + __ret_429; \ +}) #else -__ai float32_t vmaxnmv_f32(float32x2_t __p0) { - float32_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float32_t) __builtin_neon_vmaxnmv_f32(__rev0); - return __ret; -} +#define vfmsq_lane_f64(__p0_430, __p1_430, __p2_430, __p3_430) __extension__ ({ \ + float64x2_t __ret_430; \ + float64x2_t __s0_430 = __p0_430; \ + float64x2_t __s1_430 = __p1_430; \ + float64x1_t __s2_430 = __p2_430; \ + float64x2_t __rev0_430; __rev0_430 = __builtin_shufflevector(__s0_430, __s0_430, 1, 0); \ + float64x2_t __rev1_430; __rev1_430 = __builtin_shufflevector(__s1_430, __s1_430, 1, 0); \ + __ret_430 = __noswap_vfmaq_lane_f64(__rev0_430, -__rev1_430, __s2_430, __p3_430); \ + __ret_430 = __builtin_shufflevector(__ret_430, __ret_430, 1, 0); \ + __ret_430; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8_t vmaxvq_u8(uint8x16_t __p0) { - uint8_t __ret; - __ret = (uint8_t) __builtin_neon_vmaxvq_u8(__p0); - return __ret; -} +#define vfmsq_lane_f32(__p0_431, __p1_431, __p2_431, __p3_431) __extension__ ({ \ + float32x4_t __ret_431; \ + float32x4_t __s0_431 = __p0_431; \ + float32x4_t __s1_431 = __p1_431; \ + float32x2_t __s2_431 = __p2_431; \ + __ret_431 = vfmaq_lane_f32(__s0_431, -__s1_431, __s2_431, __p3_431); \ + __ret_431; \ +}) #else -__ai uint8_t vmaxvq_u8(uint8x16_t __p0) { - uint8_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8_t) __builtin_neon_vmaxvq_u8(__rev0); - return __ret; -} +#define vfmsq_lane_f32(__p0_432, __p1_432, __p2_432, __p3_432) __extension__ ({ \ + float32x4_t __ret_432; \ + float32x4_t __s0_432 = __p0_432; \ + float32x4_t __s1_432 = __p1_432; \ + float32x2_t __s2_432 = __p2_432; \ + float32x4_t __rev0_432; __rev0_432 = __builtin_shufflevector(__s0_432, __s0_432, 3, 2, 1, 0); \ + float32x4_t __rev1_432; __rev1_432 = __builtin_shufflevector(__s1_432, __s1_432, 3, 2, 1, 0); \ + float32x2_t __rev2_432; __rev2_432 = __builtin_shufflevector(__s2_432, __s2_432, 1, 0); \ + __ret_432 = __noswap_vfmaq_lane_f32(__rev0_432, -__rev1_432, __rev2_432, __p3_432); \ + __ret_432 = __builtin_shufflevector(__ret_432, __ret_432, 3, 2, 1, 0); \ + __ret_432; \ +}) #endif +#define vfms_lane_f64(__p0_433, __p1_433, __p2_433, __p3_433) __extension__ ({ \ + float64x1_t __ret_433; \ + float64x1_t __s0_433 = __p0_433; \ + float64x1_t __s1_433 = __p1_433; \ + float64x1_t __s2_433 = __p2_433; \ + __ret_433 = vfma_lane_f64(__s0_433, -__s1_433, __s2_433, __p3_433); \ + __ret_433; \ +}) #ifdef __LITTLE_ENDIAN__ -__ai uint32_t vmaxvq_u32(uint32x4_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vmaxvq_u32(__p0); - return __ret; -} +#define vfms_lane_f32(__p0_434, __p1_434, __p2_434, __p3_434) __extension__ ({ \ + float32x2_t __ret_434; \ + float32x2_t __s0_434 = __p0_434; \ + float32x2_t __s1_434 = __p1_434; \ + float32x2_t __s2_434 = __p2_434; \ + __ret_434 = vfma_lane_f32(__s0_434, -__s1_434, __s2_434, __p3_434); \ + __ret_434; \ +}) #else -__ai uint32_t vmaxvq_u32(uint32x4_t __p0) { - uint32_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint32_t) __builtin_neon_vmaxvq_u32(__rev0); - return __ret; -} +#define vfms_lane_f32(__p0_435, __p1_435, __p2_435, __p3_435) __extension__ ({ \ + float32x2_t __ret_435; \ + float32x2_t __s0_435 = __p0_435; \ + float32x2_t __s1_435 = __p1_435; \ + float32x2_t __s2_435 = __p2_435; \ + float32x2_t __rev0_435; __rev0_435 = __builtin_shufflevector(__s0_435, __s0_435, 1, 0); \ + float32x2_t __rev1_435; __rev1_435 = __builtin_shufflevector(__s1_435, __s1_435, 1, 0); \ + float32x2_t __rev2_435; __rev2_435 = __builtin_shufflevector(__s2_435, __s2_435, 1, 0); \ + __ret_435 = __noswap_vfma_lane_f32(__rev0_435, -__rev1_435, __rev2_435, __p3_435); \ + __ret_435 = __builtin_shufflevector(__ret_435, __ret_435, 1, 0); \ + __ret_435; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16_t vmaxvq_u16(uint16x8_t __p0) { - uint16_t __ret; - __ret = (uint16_t) __builtin_neon_vmaxvq_u16(__p0); - return __ret; -} -#else -__ai uint16_t vmaxvq_u16(uint16x8_t __p0) { - uint16_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16_t) __builtin_neon_vmaxvq_u16(__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8_t vmaxvq_s8(int8x16_t __p0) { - int8_t __ret; - __ret = (int8_t) __builtin_neon_vmaxvq_s8(__p0); - return __ret; -} +#define vfmsd_laneq_f64(__p0_436, __p1_436, __p2_436, __p3_436) __extension__ ({ \ + float64_t __ret_436; \ + float64_t __s0_436 = __p0_436; \ + float64_t __s1_436 = __p1_436; \ + float64x2_t __s2_436 = __p2_436; \ + __ret_436 = vfmad_laneq_f64(__s0_436, -__s1_436, __s2_436, __p3_436); \ + __ret_436; \ +}) #else -__ai int8_t vmaxvq_s8(int8x16_t __p0) { - int8_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8_t) __builtin_neon_vmaxvq_s8(__rev0); - return __ret; -} +#define vfmsd_laneq_f64(__p0_437, __p1_437, __p2_437, __p3_437) __extension__ ({ \ + float64_t __ret_437; \ + float64_t __s0_437 = __p0_437; \ + float64_t __s1_437 = __p1_437; \ + float64x2_t __s2_437 = __p2_437; \ + float64x2_t __rev2_437; __rev2_437 = __builtin_shufflevector(__s2_437, __s2_437, 1, 0); \ + __ret_437 = __noswap_vfmad_laneq_f64(__s0_437, -__s1_437, __rev2_437, __p3_437); \ + __ret_437; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai float64_t vmaxvq_f64(float64x2_t __p0) { - float64_t __ret; - __ret = (float64_t) __builtin_neon_vmaxvq_f64(__p0); - return __ret; -} +#define vfmss_laneq_f32(__p0_438, __p1_438, __p2_438, __p3_438) __extension__ ({ \ + float32_t __ret_438; \ + float32_t __s0_438 = __p0_438; \ + float32_t __s1_438 = __p1_438; \ + float32x4_t __s2_438 = __p2_438; \ + __ret_438 = vfmas_laneq_f32(__s0_438, -__s1_438, __s2_438, __p3_438); \ + __ret_438; \ +}) #else -__ai float64_t vmaxvq_f64(float64x2_t __p0) { - float64_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float64_t) __builtin_neon_vmaxvq_f64(__rev0); - return __ret; -} +#define vfmss_laneq_f32(__p0_439, __p1_439, __p2_439, __p3_439) __extension__ ({ \ + float32_t __ret_439; \ + float32_t __s0_439 = __p0_439; \ + float32_t __s1_439 = __p1_439; \ + float32x4_t __s2_439 = __p2_439; \ + float32x4_t __rev2_439; __rev2_439 = __builtin_shufflevector(__s2_439, __s2_439, 3, 2, 1, 0); \ + __ret_439 = __noswap_vfmas_laneq_f32(__s0_439, -__s1_439, __rev2_439, __p3_439); \ + __ret_439; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai float32_t vmaxvq_f32(float32x4_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vmaxvq_f32(__p0); - return __ret; -} +#define vfmsq_laneq_f64(__p0_440, __p1_440, __p2_440, __p3_440) __extension__ ({ \ + float64x2_t __ret_440; \ + float64x2_t __s0_440 = __p0_440; \ + float64x2_t __s1_440 = __p1_440; \ + float64x2_t __s2_440 = __p2_440; \ + __ret_440 = vfmaq_laneq_f64(__s0_440, -__s1_440, __s2_440, __p3_440); \ + __ret_440; \ +}) #else -__ai float32_t vmaxvq_f32(float32x4_t __p0) { - float32_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float32_t) __builtin_neon_vmaxvq_f32(__rev0); - return __ret; -} +#define vfmsq_laneq_f64(__p0_441, __p1_441, __p2_441, __p3_441) __extension__ ({ \ + float64x2_t __ret_441; \ + float64x2_t __s0_441 = __p0_441; \ + float64x2_t __s1_441 = __p1_441; \ + float64x2_t __s2_441 = __p2_441; \ + float64x2_t __rev0_441; __rev0_441 = __builtin_shufflevector(__s0_441, __s0_441, 1, 0); \ + float64x2_t __rev1_441; __rev1_441 = __builtin_shufflevector(__s1_441, __s1_441, 1, 0); \ + float64x2_t __rev2_441; __rev2_441 = __builtin_shufflevector(__s2_441, __s2_441, 1, 0); \ + __ret_441 = __noswap_vfmaq_laneq_f64(__rev0_441, -__rev1_441, __rev2_441, __p3_441); \ + __ret_441 = __builtin_shufflevector(__ret_441, __ret_441, 1, 0); \ + __ret_441; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32_t vmaxvq_s32(int32x4_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vmaxvq_s32(__p0); - return __ret; -} +#define vfmsq_laneq_f32(__p0_442, __p1_442, __p2_442, __p3_442) __extension__ ({ \ + float32x4_t __ret_442; \ + float32x4_t __s0_442 = __p0_442; \ + float32x4_t __s1_442 = __p1_442; \ + float32x4_t __s2_442 = __p2_442; \ + __ret_442 = vfmaq_laneq_f32(__s0_442, -__s1_442, __s2_442, __p3_442); \ + __ret_442; \ +}) #else -__ai int32_t vmaxvq_s32(int32x4_t __p0) { - int32_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (int32_t) __builtin_neon_vmaxvq_s32(__rev0); - return __ret; -} +#define vfmsq_laneq_f32(__p0_443, __p1_443, __p2_443, __p3_443) __extension__ ({ \ + float32x4_t __ret_443; \ + float32x4_t __s0_443 = __p0_443; \ + float32x4_t __s1_443 = __p1_443; \ + float32x4_t __s2_443 = __p2_443; \ + float32x4_t __rev0_443; __rev0_443 = __builtin_shufflevector(__s0_443, __s0_443, 3, 2, 1, 0); \ + float32x4_t __rev1_443; __rev1_443 = __builtin_shufflevector(__s1_443, __s1_443, 3, 2, 1, 0); \ + float32x4_t __rev2_443; __rev2_443 = __builtin_shufflevector(__s2_443, __s2_443, 3, 2, 1, 0); \ + __ret_443 = __noswap_vfmaq_laneq_f32(__rev0_443, -__rev1_443, __rev2_443, __p3_443); \ + __ret_443 = __builtin_shufflevector(__ret_443, __ret_443, 3, 2, 1, 0); \ + __ret_443; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int16_t vmaxvq_s16(int16x8_t __p0) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vmaxvq_s16(__p0); - return __ret; -} +#define vfms_laneq_f64(__p0_444, __p1_444, __p2_444, __p3_444) __extension__ ({ \ + float64x1_t __ret_444; \ + float64x1_t __s0_444 = __p0_444; \ + float64x1_t __s1_444 = __p1_444; \ + float64x2_t __s2_444 = __p2_444; \ + __ret_444 = vfma_laneq_f64(__s0_444, -__s1_444, __s2_444, __p3_444); \ + __ret_444; \ +}) #else -__ai int16_t vmaxvq_s16(int16x8_t __p0) { - int16_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16_t) __builtin_neon_vmaxvq_s16(__rev0); - return __ret; -} +#define vfms_laneq_f64(__p0_445, __p1_445, __p2_445, __p3_445) __extension__ ({ \ + float64x1_t __ret_445; \ + float64x1_t __s0_445 = __p0_445; \ + float64x1_t __s1_445 = __p1_445; \ + float64x2_t __s2_445 = __p2_445; \ + float64x2_t __rev2_445; __rev2_445 = __builtin_shufflevector(__s2_445, __s2_445, 1, 0); \ + __ret_445 = __noswap_vfma_laneq_f64(__s0_445, -__s1_445, __rev2_445, __p3_445); \ + __ret_445; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8_t vmaxv_u8(uint8x8_t __p0) { - uint8_t __ret; - __ret = (uint8_t) __builtin_neon_vmaxv_u8(__p0); - return __ret; -} +#define vfms_laneq_f32(__p0_446, __p1_446, __p2_446, __p3_446) __extension__ ({ \ + float32x2_t __ret_446; \ + float32x2_t __s0_446 = __p0_446; \ + float32x2_t __s1_446 = __p1_446; \ + float32x4_t __s2_446 = __p2_446; \ + __ret_446 = vfma_laneq_f32(__s0_446, -__s1_446, __s2_446, __p3_446); \ + __ret_446; \ +}) #else -__ai uint8_t vmaxv_u8(uint8x8_t __p0) { - uint8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8_t) __builtin_neon_vmaxv_u8(__rev0); - return __ret; -} +#define vfms_laneq_f32(__p0_447, __p1_447, __p2_447, __p3_447) __extension__ ({ \ + float32x2_t __ret_447; \ + float32x2_t __s0_447 = __p0_447; \ + float32x2_t __s1_447 = __p1_447; \ + float32x4_t __s2_447 = __p2_447; \ + float32x2_t __rev0_447; __rev0_447 = __builtin_shufflevector(__s0_447, __s0_447, 1, 0); \ + float32x2_t __rev1_447; __rev1_447 = __builtin_shufflevector(__s1_447, __s1_447, 1, 0); \ + float32x4_t __rev2_447; __rev2_447 = __builtin_shufflevector(__s2_447, __s2_447, 3, 2, 1, 0); \ + __ret_447 = __noswap_vfma_laneq_f32(__rev0_447, -__rev1_447, __rev2_447, __p3_447); \ + __ret_447 = __builtin_shufflevector(__ret_447, __ret_447, 1, 0); \ + __ret_447; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32_t vmaxv_u32(uint32x2_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vmaxv_u32(__p0); +__ai __attribute__((target("neon"))) float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) { + float64x2_t __ret; + __ret = vfmaq_f64(__p0, -__p1, (float64x2_t) {__p2, __p2}); return __ret; } #else -__ai uint32_t vmaxv_u32(uint32x2_t __p0) { - uint32_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint32_t) __builtin_neon_vmaxv_u32(__rev0); +__ai __attribute__((target("neon"))) float64x2_t vfmsq_n_f64(float64x2_t __p0, float64x2_t __p1, float64_t __p2) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __noswap_vfmaq_f64(__rev0, -__rev1, (float64x2_t) {__p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16_t vmaxv_u16(uint16x4_t __p0) { - uint16_t __ret; - __ret = (uint16_t) __builtin_neon_vmaxv_u16(__p0); +__ai __attribute__((target("neon"))) float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { + float32x4_t __ret; + __ret = vfmaq_f32(__p0, -__p1, (float32x4_t) {__p2, __p2, __p2, __p2}); return __ret; } #else -__ai uint16_t vmaxv_u16(uint16x4_t __p0) { - uint16_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint16_t) __builtin_neon_vmaxv_u16(__rev0); +__ai __attribute__((target("neon"))) float32x4_t vfmsq_n_f32(float32x4_t __p0, float32x4_t __p1, float32_t __p2) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vfmaq_f32(__rev0, -__rev1, (float32x4_t) {__p2, __p2, __p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif -#ifdef __LITTLE_ENDIAN__ -__ai int8_t vmaxv_s8(int8x8_t __p0) { - int8_t __ret; - __ret = (int8_t) __builtin_neon_vmaxv_s8(__p0); - return __ret; -} -#else -__ai int8_t vmaxv_s8(int8x8_t __p0) { - int8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8_t) __builtin_neon_vmaxv_s8(__rev0); +__ai __attribute__((target("neon"))) float64x1_t vfms_n_f64(float64x1_t __p0, float64x1_t __p1, float64_t __p2) { + float64x1_t __ret; + __ret = vfma_f64(__p0, -__p1, (float64x1_t) {__p2}); return __ret; } -#endif - #ifdef __LITTLE_ENDIAN__ -__ai float32_t vmaxv_f32(float32x2_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vmaxv_f32(__p0); +__ai __attribute__((target("neon"))) float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { + float32x2_t __ret; + __ret = vfma_f32(__p0, -__p1, (float32x2_t) {__p2, __p2}); return __ret; } #else -__ai float32_t vmaxv_f32(float32x2_t __p0) { - float32_t __ret; +__ai __attribute__((target("neon"))) float32x2_t vfms_n_f32(float32x2_t __p0, float32x2_t __p1, float32_t __p2) { + float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float32_t) __builtin_neon_vmaxv_f32(__rev0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __noswap_vfma_f32(__rev0, -__rev1, (float32x2_t) {__p2, __p2}); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32_t vmaxv_s32(int32x2_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vmaxv_s32(__p0); +__ai __attribute__((target("neon"))) poly64x1_t vget_high_p64(poly64x2_t __p0) { + poly64x1_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1); return __ret; } #else -__ai int32_t vmaxv_s32(int32x2_t __p0) { - int32_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (int32_t) __builtin_neon_vmaxv_s32(__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16_t vmaxv_s16(int16x4_t __p0) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vmaxv_s16(__p0); +__ai __attribute__((target("neon"))) poly64x1_t vget_high_p64(poly64x2_t __p0) { + poly64x1_t __ret; + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1); return __ret; } -#else -__ai int16_t vmaxv_s16(int16x4_t __p0) { - int16_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (int16_t) __builtin_neon_vmaxv_s16(__rev0); +__ai __attribute__((target("neon"))) poly64x1_t __noswap_vget_high_p64(poly64x2_t __p0) { + poly64x1_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 1); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); - return __ret; -} -#else -__ai float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (float64x2_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -__ai float64x1_t vmin_f64(float64x1_t __p0, float64x1_t __p1) { +__ai __attribute__((target("neon"))) float64x1_t vget_high_f64(float64x2_t __p0) { float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 10); - return __ret; -} -#ifdef __LITTLE_ENDIAN__ -__ai float64_t vminnmvq_f64(float64x2_t __p0) { - float64_t __ret; - __ret = (float64_t) __builtin_neon_vminnmvq_f64(__p0); + __ret = __builtin_shufflevector(__p0, __p0, 1); return __ret; } #else -__ai float64_t vminnmvq_f64(float64x2_t __p0) { - float64_t __ret; +__ai __attribute__((target("neon"))) float64x1_t vget_high_f64(float64x2_t __p0) { + float64x1_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float64_t) __builtin_neon_vminnmvq_f64(__rev0); + __ret = __builtin_shufflevector(__rev0, __rev0, 1); return __ret; } #endif +#define vget_lane_p64(__p0, __p1) __extension__ ({ \ + poly64_t __ret; \ + poly64x1_t __s0 = __p0; \ + __ret = (poly64_t) __builtin_neon_vget_lane_i64((poly64x1_t)__s0, __p1); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -__ai float32_t vminnmvq_f32(float32x4_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vminnmvq_f32(__p0); - return __ret; -} +#define vgetq_lane_p64(__p0, __p1) __extension__ ({ \ + poly64_t __ret; \ + poly64x2_t __s0 = __p0; \ + __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((poly64x2_t)__s0, __p1); \ + __ret; \ +}) #else -__ai float32_t vminnmvq_f32(float32x4_t __p0) { - float32_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float32_t) __builtin_neon_vminnmvq_f32(__rev0); - return __ret; -} +#define vgetq_lane_p64(__p0, __p1) __extension__ ({ \ + poly64_t __ret; \ + poly64x2_t __s0 = __p0; \ + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((poly64x2_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_p64(__p0, __p1) __extension__ ({ \ + poly64_t __ret; \ + poly64x2_t __s0 = __p0; \ + __ret = (poly64_t) __builtin_neon_vgetq_lane_i64((poly64x2_t)__s0, __p1); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai float32_t vminnmv_f32(float32x2_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vminnmv_f32(__p0); - return __ret; -} +#define vgetq_lane_f64(__p0, __p1) __extension__ ({ \ + float64_t __ret; \ + float64x2_t __s0 = __p0; \ + __ret = (float64_t) __builtin_neon_vgetq_lane_f64((float64x2_t)__s0, __p1); \ + __ret; \ +}) #else -__ai float32_t vminnmv_f32(float32x2_t __p0) { - float32_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float32_t) __builtin_neon_vminnmv_f32(__rev0); - return __ret; -} +#define vgetq_lane_f64(__p0, __p1) __extension__ ({ \ + float64_t __ret; \ + float64x2_t __s0 = __p0; \ + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + __ret = (float64_t) __builtin_neon_vgetq_lane_f64((float64x2_t)__rev0, __p1); \ + __ret; \ +}) +#define __noswap_vgetq_lane_f64(__p0, __p1) __extension__ ({ \ + float64_t __ret; \ + float64x2_t __s0 = __p0; \ + __ret = (float64_t) __builtin_neon_vgetq_lane_f64((float64x2_t)__s0, __p1); \ + __ret; \ +}) #endif +#define vget_lane_f64(__p0, __p1) __extension__ ({ \ + float64_t __ret; \ + float64x1_t __s0 = __p0; \ + __ret = (float64_t) __builtin_neon_vget_lane_f64((float64x1_t)__s0, __p1); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -__ai uint8_t vminvq_u8(uint8x16_t __p0) { - uint8_t __ret; - __ret = (uint8_t) __builtin_neon_vminvq_u8(__p0); +__ai __attribute__((target("neon"))) poly64x1_t vget_low_p64(poly64x2_t __p0) { + poly64x1_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0); return __ret; } #else -__ai uint8_t vminvq_u8(uint8x16_t __p0) { - uint8_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8_t) __builtin_neon_vminvq_u8(__rev0); +__ai __attribute__((target("neon"))) poly64x1_t vget_low_p64(poly64x2_t __p0) { + poly64x1_t __ret; + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32_t vminvq_u32(uint32x4_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vminvq_u32(__p0); +__ai __attribute__((target("neon"))) float64x1_t vget_low_f64(float64x2_t __p0) { + float64x1_t __ret; + __ret = __builtin_shufflevector(__p0, __p0, 0); return __ret; } #else -__ai uint32_t vminvq_u32(uint32x4_t __p0) { - uint32_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint32_t) __builtin_neon_vminvq_u32(__rev0); +__ai __attribute__((target("neon"))) float64x1_t vget_low_f64(float64x2_t __p0) { + float64x1_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev0, 0); return __ret; } #endif +#define vld1_p64(__p0) __extension__ ({ \ + poly64x1_t __ret; \ + __ret = (poly64x1_t) __builtin_neon_vld1_v(__p0, 6); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -__ai uint16_t vminvq_u16(uint16x8_t __p0) { - uint16_t __ret; - __ret = (uint16_t) __builtin_neon_vminvq_u16(__p0); - return __ret; -} +#define vld1q_p64(__p0) __extension__ ({ \ + poly64x2_t __ret; \ + __ret = (poly64x2_t) __builtin_neon_vld1q_v(__p0, 38); \ + __ret; \ +}) #else -__ai uint16_t vminvq_u16(uint16x8_t __p0) { - uint16_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16_t) __builtin_neon_vminvq_u16(__rev0); - return __ret; -} +#define vld1q_p64(__p0) __extension__ ({ \ + poly64x2_t __ret; \ + __ret = (poly64x2_t) __builtin_neon_vld1q_v(__p0, 38); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int8_t vminvq_s8(int8x16_t __p0) { - int8_t __ret; - __ret = (int8_t) __builtin_neon_vminvq_s8(__p0); - return __ret; -} +#define vld1q_f64(__p0) __extension__ ({ \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vld1q_v(__p0, 42); \ + __ret; \ +}) #else -__ai int8_t vminvq_s8(int8x16_t __p0) { - int8_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8_t) __builtin_neon_vminvq_s8(__rev0); - return __ret; -} +#define vld1q_f64(__p0) __extension__ ({ \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vld1q_v(__p0, 42); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) #endif +#define vld1_f64(__p0) __extension__ ({ \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_vld1_v(__p0, 10); \ + __ret; \ +}) +#define vld1_dup_p64(__p0) __extension__ ({ \ + poly64x1_t __ret; \ + __ret = (poly64x1_t) __builtin_neon_vld1_dup_v(__p0, 6); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -__ai float64_t vminvq_f64(float64x2_t __p0) { - float64_t __ret; - __ret = (float64_t) __builtin_neon_vminvq_f64(__p0); - return __ret; -} +#define vld1q_dup_p64(__p0) __extension__ ({ \ + poly64x2_t __ret; \ + __ret = (poly64x2_t) __builtin_neon_vld1q_dup_v(__p0, 38); \ + __ret; \ +}) #else -__ai float64_t vminvq_f64(float64x2_t __p0) { - float64_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float64_t) __builtin_neon_vminvq_f64(__rev0); - return __ret; -} +#define vld1q_dup_p64(__p0) __extension__ ({ \ + poly64x2_t __ret; \ + __ret = (poly64x2_t) __builtin_neon_vld1q_dup_v(__p0, 38); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai float32_t vminvq_f32(float32x4_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vminvq_f32(__p0); - return __ret; -} +#define vld1q_dup_f64(__p0) __extension__ ({ \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vld1q_dup_v(__p0, 42); \ + __ret; \ +}) #else -__ai float32_t vminvq_f32(float32x4_t __p0) { - float32_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float32_t) __builtin_neon_vminvq_f32(__rev0); - return __ret; -} +#define vld1q_dup_f64(__p0) __extension__ ({ \ + float64x2_t __ret; \ + __ret = (float64x2_t) __builtin_neon_vld1q_dup_v(__p0, 42); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) #endif +#define vld1_dup_f64(__p0) __extension__ ({ \ + float64x1_t __ret; \ + __ret = (float64x1_t) __builtin_neon_vld1_dup_v(__p0, 10); \ + __ret; \ +}) +#define vld1_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1_t __ret; \ + poly64x1_t __s1 = __p1; \ + __ret = (poly64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -__ai int32_t vminvq_s32(int32x4_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vminvq_s32(__p0); - return __ret; -} +#define vld1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __ret; \ + poly64x2_t __s1 = __p1; \ + __ret = (poly64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 38); \ + __ret; \ +}) #else -__ai int32_t vminvq_s32(int32x4_t __p0) { - int32_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (int32_t) __builtin_neon_vminvq_s32(__rev0); - return __ret; -} +#define vld1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __ret; \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (poly64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 38); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int16_t vminvq_s16(int16x8_t __p0) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vminvq_s16(__p0); - return __ret; -} +#define vld1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __ret; \ + float64x2_t __s1 = __p1; \ + __ret = (float64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__s1, __p2, 42); \ + __ret; \ +}) #else -__ai int16_t vminvq_s16(int16x8_t __p0) { - int16_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16_t) __builtin_neon_vminvq_s16(__rev0); - return __ret; -} +#define vld1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __ret; \ + float64x2_t __s1 = __p1; \ + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (float64x2_t) __builtin_neon_vld1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 42); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) #endif +#define vld1_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1_t __ret; \ + float64x1_t __s1 = __p1; \ + __ret = (float64x1_t) __builtin_neon_vld1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \ + __ret; \ +}) +#define vld1_p64_x2(__p0) __extension__ ({ \ + poly64x1x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 6); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -__ai uint8_t vminv_u8(uint8x8_t __p0) { - uint8_t __ret; - __ret = (uint8_t) __builtin_neon_vminv_u8(__p0); - return __ret; -} +#define vld1q_p64_x2(__p0) __extension__ ({ \ + poly64x2x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 38); \ + __ret; \ +}) #else -__ai uint8_t vminv_u8(uint8x8_t __p0) { - uint8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8_t) __builtin_neon_vminv_u8(__rev0); - return __ret; -} +#define vld1q_p64_x2(__p0) __extension__ ({ \ + poly64x2x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32_t vminv_u32(uint32x2_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vminv_u32(__p0); - return __ret; -} +#define vld1q_f64_x2(__p0) __extension__ ({ \ + float64x2x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 42); \ + __ret; \ +}) #else -__ai uint32_t vminv_u32(uint32x2_t __p0) { - uint32_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint32_t) __builtin_neon_vminv_u32(__rev0); - return __ret; -} +#define vld1q_f64_x2(__p0) __extension__ ({ \ + float64x2x2_t __ret; \ + __builtin_neon_vld1q_x2_v(&__ret, __p0, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ +}) #endif +#define vld1_f64_x2(__p0) __extension__ ({ \ + float64x1x2_t __ret; \ + __builtin_neon_vld1_x2_v(&__ret, __p0, 10); \ + __ret; \ +}) +#define vld1_p64_x3(__p0) __extension__ ({ \ + poly64x1x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 6); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -__ai uint16_t vminv_u16(uint16x4_t __p0) { - uint16_t __ret; - __ret = (uint16_t) __builtin_neon_vminv_u16(__p0); - return __ret; -} +#define vld1q_p64_x3(__p0) __extension__ ({ \ + poly64x2x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 38); \ + __ret; \ +}) #else -__ai uint16_t vminv_u16(uint16x4_t __p0) { - uint16_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (uint16_t) __builtin_neon_vminv_u16(__rev0); - return __ret; -} +#define vld1q_p64_x3(__p0) __extension__ ({ \ + poly64x2x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int8_t vminv_s8(int8x8_t __p0) { - int8_t __ret; - __ret = (int8_t) __builtin_neon_vminv_s8(__p0); - return __ret; -} -#else -__ai int8_t vminv_s8(int8x8_t __p0) { - int8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8_t) __builtin_neon_vminv_s8(__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32_t vminv_f32(float32x2_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vminv_f32(__p0); - return __ret; -} -#else -__ai float32_t vminv_f32(float32x2_t __p0) { - float32_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float32_t) __builtin_neon_vminv_f32(__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32_t vminv_s32(int32x2_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vminv_s32(__p0); - return __ret; -} -#else -__ai int32_t vminv_s32(int32x2_t __p0) { - int32_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (int32_t) __builtin_neon_vminv_s32(__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16_t vminv_s16(int16x4_t __p0) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vminv_s16(__p0); - return __ret; -} -#else -__ai int16_t vminv_s16(int16x4_t __p0) { - int16_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (int16_t) __builtin_neon_vminv_s16(__rev0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { - float64x2_t __ret; - __ret = __p0 + __p1 * __p2; - return __ret; -} -#else -__ai float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = __rev0 + __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -__ai float64x1_t vmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { - float64x1_t __ret; - __ret = __p0 + __p1 * __p2; - return __ret; -} -#ifdef __LITTLE_ENDIAN__ -#define vmlaq_laneq_u32(__p0_427, __p1_427, __p2_427, __p3_427) __extension__ ({ \ - uint32x4_t __ret_427; \ - uint32x4_t __s0_427 = __p0_427; \ - uint32x4_t __s1_427 = __p1_427; \ - uint32x4_t __s2_427 = __p2_427; \ - __ret_427 = __s0_427 + __s1_427 * splatq_laneq_u32(__s2_427, __p3_427); \ - __ret_427; \ +#define vld1q_f64_x3(__p0) __extension__ ({ \ + float64x2x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 42); \ + __ret; \ }) #else -#define vmlaq_laneq_u32(__p0_428, __p1_428, __p2_428, __p3_428) __extension__ ({ \ - uint32x4_t __ret_428; \ - uint32x4_t __s0_428 = __p0_428; \ - uint32x4_t __s1_428 = __p1_428; \ - uint32x4_t __s2_428 = __p2_428; \ - uint32x4_t __rev0_428; __rev0_428 = __builtin_shufflevector(__s0_428, __s0_428, 3, 2, 1, 0); \ - uint32x4_t __rev1_428; __rev1_428 = __builtin_shufflevector(__s1_428, __s1_428, 3, 2, 1, 0); \ - uint32x4_t __rev2_428; __rev2_428 = __builtin_shufflevector(__s2_428, __s2_428, 3, 2, 1, 0); \ - __ret_428 = __rev0_428 + __rev1_428 * __noswap_splatq_laneq_u32(__rev2_428, __p3_428); \ - __ret_428 = __builtin_shufflevector(__ret_428, __ret_428, 3, 2, 1, 0); \ - __ret_428; \ +#define vld1q_f64_x3(__p0) __extension__ ({ \ + float64x2x3_t __ret; \ + __builtin_neon_vld1q_x3_v(&__ret, __p0, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ }) #endif -#ifdef __LITTLE_ENDIAN__ -#define vmlaq_laneq_u16(__p0_429, __p1_429, __p2_429, __p3_429) __extension__ ({ \ - uint16x8_t __ret_429; \ - uint16x8_t __s0_429 = __p0_429; \ - uint16x8_t __s1_429 = __p1_429; \ - uint16x8_t __s2_429 = __p2_429; \ - __ret_429 = __s0_429 + __s1_429 * splatq_laneq_u16(__s2_429, __p3_429); \ - __ret_429; \ +#define vld1_f64_x3(__p0) __extension__ ({ \ + float64x1x3_t __ret; \ + __builtin_neon_vld1_x3_v(&__ret, __p0, 10); \ + __ret; \ }) -#else -#define vmlaq_laneq_u16(__p0_430, __p1_430, __p2_430, __p3_430) __extension__ ({ \ - uint16x8_t __ret_430; \ - uint16x8_t __s0_430 = __p0_430; \ - uint16x8_t __s1_430 = __p1_430; \ - uint16x8_t __s2_430 = __p2_430; \ - uint16x8_t __rev0_430; __rev0_430 = __builtin_shufflevector(__s0_430, __s0_430, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev1_430; __rev1_430 = __builtin_shufflevector(__s1_430, __s1_430, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev2_430; __rev2_430 = __builtin_shufflevector(__s2_430, __s2_430, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_430 = __rev0_430 + __rev1_430 * __noswap_splatq_laneq_u16(__rev2_430, __p3_430); \ - __ret_430 = __builtin_shufflevector(__ret_430, __ret_430, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_430; \ +#define vld1_p64_x4(__p0) __extension__ ({ \ + poly64x1x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 6); \ + __ret; \ }) -#endif - #ifdef __LITTLE_ENDIAN__ -#define vmlaq_laneq_f32(__p0_431, __p1_431, __p2_431, __p3_431) __extension__ ({ \ - float32x4_t __ret_431; \ - float32x4_t __s0_431 = __p0_431; \ - float32x4_t __s1_431 = __p1_431; \ - float32x4_t __s2_431 = __p2_431; \ - __ret_431 = __s0_431 + __s1_431 * splatq_laneq_f32(__s2_431, __p3_431); \ - __ret_431; \ +#define vld1q_p64_x4(__p0) __extension__ ({ \ + poly64x2x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 38); \ + __ret; \ }) #else -#define vmlaq_laneq_f32(__p0_432, __p1_432, __p2_432, __p3_432) __extension__ ({ \ - float32x4_t __ret_432; \ - float32x4_t __s0_432 = __p0_432; \ - float32x4_t __s1_432 = __p1_432; \ - float32x4_t __s2_432 = __p2_432; \ - float32x4_t __rev0_432; __rev0_432 = __builtin_shufflevector(__s0_432, __s0_432, 3, 2, 1, 0); \ - float32x4_t __rev1_432; __rev1_432 = __builtin_shufflevector(__s1_432, __s1_432, 3, 2, 1, 0); \ - float32x4_t __rev2_432; __rev2_432 = __builtin_shufflevector(__s2_432, __s2_432, 3, 2, 1, 0); \ - __ret_432 = __rev0_432 + __rev1_432 * __noswap_splatq_laneq_f32(__rev2_432, __p3_432); \ - __ret_432 = __builtin_shufflevector(__ret_432, __ret_432, 3, 2, 1, 0); \ - __ret_432; \ +#define vld1q_p64_x4(__p0) __extension__ ({ \ + poly64x2x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlaq_laneq_s32(__p0_433, __p1_433, __p2_433, __p3_433) __extension__ ({ \ - int32x4_t __ret_433; \ - int32x4_t __s0_433 = __p0_433; \ - int32x4_t __s1_433 = __p1_433; \ - int32x4_t __s2_433 = __p2_433; \ - __ret_433 = __s0_433 + __s1_433 * splatq_laneq_s32(__s2_433, __p3_433); \ - __ret_433; \ +#define vld1q_f64_x4(__p0) __extension__ ({ \ + float64x2x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 42); \ + __ret; \ }) #else -#define vmlaq_laneq_s32(__p0_434, __p1_434, __p2_434, __p3_434) __extension__ ({ \ - int32x4_t __ret_434; \ - int32x4_t __s0_434 = __p0_434; \ - int32x4_t __s1_434 = __p1_434; \ - int32x4_t __s2_434 = __p2_434; \ - int32x4_t __rev0_434; __rev0_434 = __builtin_shufflevector(__s0_434, __s0_434, 3, 2, 1, 0); \ - int32x4_t __rev1_434; __rev1_434 = __builtin_shufflevector(__s1_434, __s1_434, 3, 2, 1, 0); \ - int32x4_t __rev2_434; __rev2_434 = __builtin_shufflevector(__s2_434, __s2_434, 3, 2, 1, 0); \ - __ret_434 = __rev0_434 + __rev1_434 * __noswap_splatq_laneq_s32(__rev2_434, __p3_434); \ - __ret_434 = __builtin_shufflevector(__ret_434, __ret_434, 3, 2, 1, 0); \ - __ret_434; \ +#define vld1q_f64_x4(__p0) __extension__ ({ \ + float64x2x4_t __ret; \ + __builtin_neon_vld1q_x4_v(&__ret, __p0, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ }) #endif -#ifdef __LITTLE_ENDIAN__ -#define vmlaq_laneq_s16(__p0_435, __p1_435, __p2_435, __p3_435) __extension__ ({ \ - int16x8_t __ret_435; \ - int16x8_t __s0_435 = __p0_435; \ - int16x8_t __s1_435 = __p1_435; \ - int16x8_t __s2_435 = __p2_435; \ - __ret_435 = __s0_435 + __s1_435 * splatq_laneq_s16(__s2_435, __p3_435); \ - __ret_435; \ +#define vld1_f64_x4(__p0) __extension__ ({ \ + float64x1x4_t __ret; \ + __builtin_neon_vld1_x4_v(&__ret, __p0, 10); \ + __ret; \ }) -#else -#define vmlaq_laneq_s16(__p0_436, __p1_436, __p2_436, __p3_436) __extension__ ({ \ - int16x8_t __ret_436; \ - int16x8_t __s0_436 = __p0_436; \ - int16x8_t __s1_436 = __p1_436; \ - int16x8_t __s2_436 = __p2_436; \ - int16x8_t __rev0_436; __rev0_436 = __builtin_shufflevector(__s0_436, __s0_436, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_436; __rev1_436 = __builtin_shufflevector(__s1_436, __s1_436, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev2_436; __rev2_436 = __builtin_shufflevector(__s2_436, __s2_436, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_436 = __rev0_436 + __rev1_436 * __noswap_splatq_laneq_s16(__rev2_436, __p3_436); \ - __ret_436 = __builtin_shufflevector(__ret_436, __ret_436, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_436; \ +#define vld2_p64(__p0) __extension__ ({ \ + poly64x1x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 6); \ + __ret; \ }) -#endif - #ifdef __LITTLE_ENDIAN__ -#define vmla_laneq_u32(__p0_437, __p1_437, __p2_437, __p3_437) __extension__ ({ \ - uint32x2_t __ret_437; \ - uint32x2_t __s0_437 = __p0_437; \ - uint32x2_t __s1_437 = __p1_437; \ - uint32x4_t __s2_437 = __p2_437; \ - __ret_437 = __s0_437 + __s1_437 * splat_laneq_u32(__s2_437, __p3_437); \ - __ret_437; \ +#define vld2q_p64(__p0) __extension__ ({ \ + poly64x2x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 38); \ + __ret; \ }) #else -#define vmla_laneq_u32(__p0_438, __p1_438, __p2_438, __p3_438) __extension__ ({ \ - uint32x2_t __ret_438; \ - uint32x2_t __s0_438 = __p0_438; \ - uint32x2_t __s1_438 = __p1_438; \ - uint32x4_t __s2_438 = __p2_438; \ - uint32x2_t __rev0_438; __rev0_438 = __builtin_shufflevector(__s0_438, __s0_438, 1, 0); \ - uint32x2_t __rev1_438; __rev1_438 = __builtin_shufflevector(__s1_438, __s1_438, 1, 0); \ - uint32x4_t __rev2_438; __rev2_438 = __builtin_shufflevector(__s2_438, __s2_438, 3, 2, 1, 0); \ - __ret_438 = __rev0_438 + __rev1_438 * __noswap_splat_laneq_u32(__rev2_438, __p3_438); \ - __ret_438 = __builtin_shufflevector(__ret_438, __ret_438, 1, 0); \ - __ret_438; \ +#define vld2q_p64(__p0) __extension__ ({ \ + poly64x2x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmla_laneq_u16(__p0_439, __p1_439, __p2_439, __p3_439) __extension__ ({ \ - uint16x4_t __ret_439; \ - uint16x4_t __s0_439 = __p0_439; \ - uint16x4_t __s1_439 = __p1_439; \ - uint16x8_t __s2_439 = __p2_439; \ - __ret_439 = __s0_439 + __s1_439 * splat_laneq_u16(__s2_439, __p3_439); \ - __ret_439; \ +#define vld2q_u64(__p0) __extension__ ({ \ + uint64x2x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 51); \ + __ret; \ }) #else -#define vmla_laneq_u16(__p0_440, __p1_440, __p2_440, __p3_440) __extension__ ({ \ - uint16x4_t __ret_440; \ - uint16x4_t __s0_440 = __p0_440; \ - uint16x4_t __s1_440 = __p1_440; \ - uint16x8_t __s2_440 = __p2_440; \ - uint16x4_t __rev0_440; __rev0_440 = __builtin_shufflevector(__s0_440, __s0_440, 3, 2, 1, 0); \ - uint16x4_t __rev1_440; __rev1_440 = __builtin_shufflevector(__s1_440, __s1_440, 3, 2, 1, 0); \ - uint16x8_t __rev2_440; __rev2_440 = __builtin_shufflevector(__s2_440, __s2_440, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_440 = __rev0_440 + __rev1_440 * __noswap_splat_laneq_u16(__rev2_440, __p3_440); \ - __ret_440 = __builtin_shufflevector(__ret_440, __ret_440, 3, 2, 1, 0); \ - __ret_440; \ +#define vld2q_u64(__p0) __extension__ ({ \ + uint64x2x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmla_laneq_f32(__p0_441, __p1_441, __p2_441, __p3_441) __extension__ ({ \ - float32x2_t __ret_441; \ - float32x2_t __s0_441 = __p0_441; \ - float32x2_t __s1_441 = __p1_441; \ - float32x4_t __s2_441 = __p2_441; \ - __ret_441 = __s0_441 + __s1_441 * splat_laneq_f32(__s2_441, __p3_441); \ - __ret_441; \ +#define vld2q_f64(__p0) __extension__ ({ \ + float64x2x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 42); \ + __ret; \ }) #else -#define vmla_laneq_f32(__p0_442, __p1_442, __p2_442, __p3_442) __extension__ ({ \ - float32x2_t __ret_442; \ - float32x2_t __s0_442 = __p0_442; \ - float32x2_t __s1_442 = __p1_442; \ - float32x4_t __s2_442 = __p2_442; \ - float32x2_t __rev0_442; __rev0_442 = __builtin_shufflevector(__s0_442, __s0_442, 1, 0); \ - float32x2_t __rev1_442; __rev1_442 = __builtin_shufflevector(__s1_442, __s1_442, 1, 0); \ - float32x4_t __rev2_442; __rev2_442 = __builtin_shufflevector(__s2_442, __s2_442, 3, 2, 1, 0); \ - __ret_442 = __rev0_442 + __rev1_442 * __noswap_splat_laneq_f32(__rev2_442, __p3_442); \ - __ret_442 = __builtin_shufflevector(__ret_442, __ret_442, 1, 0); \ - __ret_442; \ +#define vld2q_f64(__p0) __extension__ ({ \ + float64x2x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmla_laneq_s32(__p0_443, __p1_443, __p2_443, __p3_443) __extension__ ({ \ - int32x2_t __ret_443; \ - int32x2_t __s0_443 = __p0_443; \ - int32x2_t __s1_443 = __p1_443; \ - int32x4_t __s2_443 = __p2_443; \ - __ret_443 = __s0_443 + __s1_443 * splat_laneq_s32(__s2_443, __p3_443); \ - __ret_443; \ +#define vld2q_s64(__p0) __extension__ ({ \ + int64x2x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 35); \ + __ret; \ }) #else -#define vmla_laneq_s32(__p0_444, __p1_444, __p2_444, __p3_444) __extension__ ({ \ - int32x2_t __ret_444; \ - int32x2_t __s0_444 = __p0_444; \ - int32x2_t __s1_444 = __p1_444; \ - int32x4_t __s2_444 = __p2_444; \ - int32x2_t __rev0_444; __rev0_444 = __builtin_shufflevector(__s0_444, __s0_444, 1, 0); \ - int32x2_t __rev1_444; __rev1_444 = __builtin_shufflevector(__s1_444, __s1_444, 1, 0); \ - int32x4_t __rev2_444; __rev2_444 = __builtin_shufflevector(__s2_444, __s2_444, 3, 2, 1, 0); \ - __ret_444 = __rev0_444 + __rev1_444 * __noswap_splat_laneq_s32(__rev2_444, __p3_444); \ - __ret_444 = __builtin_shufflevector(__ret_444, __ret_444, 1, 0); \ - __ret_444; \ +#define vld2q_s64(__p0) __extension__ ({ \ + int64x2x2_t __ret; \ + __builtin_neon_vld2q_v(&__ret, __p0, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ }) #endif -#ifdef __LITTLE_ENDIAN__ -#define vmla_laneq_s16(__p0_445, __p1_445, __p2_445, __p3_445) __extension__ ({ \ - int16x4_t __ret_445; \ - int16x4_t __s0_445 = __p0_445; \ - int16x4_t __s1_445 = __p1_445; \ - int16x8_t __s2_445 = __p2_445; \ - __ret_445 = __s0_445 + __s1_445 * splat_laneq_s16(__s2_445, __p3_445); \ - __ret_445; \ +#define vld2_f64(__p0) __extension__ ({ \ + float64x1x2_t __ret; \ + __builtin_neon_vld2_v(&__ret, __p0, 10); \ + __ret; \ }) -#else -#define vmla_laneq_s16(__p0_446, __p1_446, __p2_446, __p3_446) __extension__ ({ \ - int16x4_t __ret_446; \ - int16x4_t __s0_446 = __p0_446; \ - int16x4_t __s1_446 = __p1_446; \ - int16x8_t __s2_446 = __p2_446; \ - int16x4_t __rev0_446; __rev0_446 = __builtin_shufflevector(__s0_446, __s0_446, 3, 2, 1, 0); \ - int16x4_t __rev1_446; __rev1_446 = __builtin_shufflevector(__s1_446, __s1_446, 3, 2, 1, 0); \ - int16x8_t __rev2_446; __rev2_446 = __builtin_shufflevector(__s2_446, __s2_446, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_446 = __rev0_446 + __rev1_446 * __noswap_splat_laneq_s16(__rev2_446, __p3_446); \ - __ret_446 = __builtin_shufflevector(__ret_446, __ret_446, 3, 2, 1, 0); \ - __ret_446; \ +#define vld2_dup_p64(__p0) __extension__ ({ \ + poly64x1x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 6); \ + __ret; \ }) -#endif - #ifdef __LITTLE_ENDIAN__ -#define vmlal_high_lane_u32(__p0_447, __p1_447, __p2_447, __p3_447) __extension__ ({ \ - uint64x2_t __ret_447; \ - uint64x2_t __s0_447 = __p0_447; \ - uint32x4_t __s1_447 = __p1_447; \ - uint32x2_t __s2_447 = __p2_447; \ - __ret_447 = __s0_447 + vmull_u32(vget_high_u32(__s1_447), splat_lane_u32(__s2_447, __p3_447)); \ - __ret_447; \ +#define vld2q_dup_p64(__p0) __extension__ ({ \ + poly64x2x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 38); \ + __ret; \ }) #else -#define vmlal_high_lane_u32(__p0_448, __p1_448, __p2_448, __p3_448) __extension__ ({ \ - uint64x2_t __ret_448; \ - uint64x2_t __s0_448 = __p0_448; \ - uint32x4_t __s1_448 = __p1_448; \ - uint32x2_t __s2_448 = __p2_448; \ - uint64x2_t __rev0_448; __rev0_448 = __builtin_shufflevector(__s0_448, __s0_448, 1, 0); \ - uint32x4_t __rev1_448; __rev1_448 = __builtin_shufflevector(__s1_448, __s1_448, 3, 2, 1, 0); \ - uint32x2_t __rev2_448; __rev2_448 = __builtin_shufflevector(__s2_448, __s2_448, 1, 0); \ - __ret_448 = __rev0_448 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_448), __noswap_splat_lane_u32(__rev2_448, __p3_448)); \ - __ret_448 = __builtin_shufflevector(__ret_448, __ret_448, 1, 0); \ - __ret_448; \ +#define vld2q_dup_p64(__p0) __extension__ ({ \ + poly64x2x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlal_high_lane_u16(__p0_449, __p1_449, __p2_449, __p3_449) __extension__ ({ \ - uint32x4_t __ret_449; \ - uint32x4_t __s0_449 = __p0_449; \ - uint16x8_t __s1_449 = __p1_449; \ - uint16x4_t __s2_449 = __p2_449; \ - __ret_449 = __s0_449 + vmull_u16(vget_high_u16(__s1_449), splat_lane_u16(__s2_449, __p3_449)); \ - __ret_449; \ +#define vld2q_dup_f64(__p0) __extension__ ({ \ + float64x2x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 42); \ + __ret; \ }) #else -#define vmlal_high_lane_u16(__p0_450, __p1_450, __p2_450, __p3_450) __extension__ ({ \ - uint32x4_t __ret_450; \ - uint32x4_t __s0_450 = __p0_450; \ - uint16x8_t __s1_450 = __p1_450; \ - uint16x4_t __s2_450 = __p2_450; \ - uint32x4_t __rev0_450; __rev0_450 = __builtin_shufflevector(__s0_450, __s0_450, 3, 2, 1, 0); \ - uint16x8_t __rev1_450; __rev1_450 = __builtin_shufflevector(__s1_450, __s1_450, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x4_t __rev2_450; __rev2_450 = __builtin_shufflevector(__s2_450, __s2_450, 3, 2, 1, 0); \ - __ret_450 = __rev0_450 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_450), __noswap_splat_lane_u16(__rev2_450, __p3_450)); \ - __ret_450 = __builtin_shufflevector(__ret_450, __ret_450, 3, 2, 1, 0); \ - __ret_450; \ +#define vld2q_dup_f64(__p0) __extension__ ({ \ + float64x2x2_t __ret; \ + __builtin_neon_vld2q_dup_v(&__ret, __p0, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ }) #endif -#ifdef __LITTLE_ENDIAN__ -#define vmlal_high_lane_s32(__p0_451, __p1_451, __p2_451, __p3_451) __extension__ ({ \ - int64x2_t __ret_451; \ - int64x2_t __s0_451 = __p0_451; \ - int32x4_t __s1_451 = __p1_451; \ - int32x2_t __s2_451 = __p2_451; \ - __ret_451 = __s0_451 + vmull_s32(vget_high_s32(__s1_451), splat_lane_s32(__s2_451, __p3_451)); \ - __ret_451; \ +#define vld2_dup_f64(__p0) __extension__ ({ \ + float64x1x2_t __ret; \ + __builtin_neon_vld2_dup_v(&__ret, __p0, 10); \ + __ret; \ }) -#else -#define vmlal_high_lane_s32(__p0_452, __p1_452, __p2_452, __p3_452) __extension__ ({ \ - int64x2_t __ret_452; \ - int64x2_t __s0_452 = __p0_452; \ - int32x4_t __s1_452 = __p1_452; \ - int32x2_t __s2_452 = __p2_452; \ - int64x2_t __rev0_452; __rev0_452 = __builtin_shufflevector(__s0_452, __s0_452, 1, 0); \ - int32x4_t __rev1_452; __rev1_452 = __builtin_shufflevector(__s1_452, __s1_452, 3, 2, 1, 0); \ - int32x2_t __rev2_452; __rev2_452 = __builtin_shufflevector(__s2_452, __s2_452, 1, 0); \ - __ret_452 = __rev0_452 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_452), __noswap_splat_lane_s32(__rev2_452, __p3_452)); \ - __ret_452 = __builtin_shufflevector(__ret_452, __ret_452, 1, 0); \ - __ret_452; \ +#define vld2_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1x2_t __ret; \ + poly64x1x2_t __s1 = __p1; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \ + __ret; \ }) -#endif - #ifdef __LITTLE_ENDIAN__ -#define vmlal_high_lane_s16(__p0_453, __p1_453, __p2_453, __p3_453) __extension__ ({ \ - int32x4_t __ret_453; \ - int32x4_t __s0_453 = __p0_453; \ - int16x8_t __s1_453 = __p1_453; \ - int16x4_t __s2_453 = __p2_453; \ - __ret_453 = __s0_453 + vmull_s16(vget_high_s16(__s1_453), splat_lane_s16(__s2_453, __p3_453)); \ - __ret_453; \ +#define vld2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x2_t __ret; \ + poly8x16x2_t __s1 = __p1; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 36); \ + __ret; \ }) #else -#define vmlal_high_lane_s16(__p0_454, __p1_454, __p2_454, __p3_454) __extension__ ({ \ - int32x4_t __ret_454; \ - int32x4_t __s0_454 = __p0_454; \ - int16x8_t __s1_454 = __p1_454; \ - int16x4_t __s2_454 = __p2_454; \ - int32x4_t __rev0_454; __rev0_454 = __builtin_shufflevector(__s0_454, __s0_454, 3, 2, 1, 0); \ - int16x8_t __rev1_454; __rev1_454 = __builtin_shufflevector(__s1_454, __s1_454, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev2_454; __rev2_454 = __builtin_shufflevector(__s2_454, __s2_454, 3, 2, 1, 0); \ - __ret_454 = __rev0_454 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_454), __noswap_splat_lane_s16(__rev2_454, __p3_454)); \ - __ret_454 = __builtin_shufflevector(__ret_454, __ret_454, 3, 2, 1, 0); \ - __ret_454; \ +#define vld2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x2_t __ret; \ + poly8x16x2_t __s1 = __p1; \ + poly8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlal_high_laneq_u32(__p0_455, __p1_455, __p2_455, __p3_455) __extension__ ({ \ - uint64x2_t __ret_455; \ - uint64x2_t __s0_455 = __p0_455; \ - uint32x4_t __s1_455 = __p1_455; \ - uint32x4_t __s2_455 = __p2_455; \ - __ret_455 = __s0_455 + vmull_u32(vget_high_u32(__s1_455), splat_laneq_u32(__s2_455, __p3_455)); \ - __ret_455; \ +#define vld2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x2_t __ret; \ + poly64x2x2_t __s1 = __p1; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 38); \ + __ret; \ }) #else -#define vmlal_high_laneq_u32(__p0_456, __p1_456, __p2_456, __p3_456) __extension__ ({ \ - uint64x2_t __ret_456; \ - uint64x2_t __s0_456 = __p0_456; \ - uint32x4_t __s1_456 = __p1_456; \ - uint32x4_t __s2_456 = __p2_456; \ - uint64x2_t __rev0_456; __rev0_456 = __builtin_shufflevector(__s0_456, __s0_456, 1, 0); \ - uint32x4_t __rev1_456; __rev1_456 = __builtin_shufflevector(__s1_456, __s1_456, 3, 2, 1, 0); \ - uint32x4_t __rev2_456; __rev2_456 = __builtin_shufflevector(__s2_456, __s2_456, 3, 2, 1, 0); \ - __ret_456 = __rev0_456 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_456), __noswap_splat_laneq_u32(__rev2_456, __p3_456)); \ - __ret_456 = __builtin_shufflevector(__ret_456, __ret_456, 1, 0); \ - __ret_456; \ +#define vld2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x2_t __ret; \ + poly64x2x2_t __s1 = __p1; \ + poly64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlal_high_laneq_u16(__p0_457, __p1_457, __p2_457, __p3_457) __extension__ ({ \ - uint32x4_t __ret_457; \ - uint32x4_t __s0_457 = __p0_457; \ - uint16x8_t __s1_457 = __p1_457; \ - uint16x8_t __s2_457 = __p2_457; \ - __ret_457 = __s0_457 + vmull_u16(vget_high_u16(__s1_457), splat_laneq_u16(__s2_457, __p3_457)); \ - __ret_457; \ +#define vld2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x2_t __ret; \ + uint8x16x2_t __s1 = __p1; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 48); \ + __ret; \ }) #else -#define vmlal_high_laneq_u16(__p0_458, __p1_458, __p2_458, __p3_458) __extension__ ({ \ - uint32x4_t __ret_458; \ - uint32x4_t __s0_458 = __p0_458; \ - uint16x8_t __s1_458 = __p1_458; \ - uint16x8_t __s2_458 = __p2_458; \ - uint32x4_t __rev0_458; __rev0_458 = __builtin_shufflevector(__s0_458, __s0_458, 3, 2, 1, 0); \ - uint16x8_t __rev1_458; __rev1_458 = __builtin_shufflevector(__s1_458, __s1_458, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev2_458; __rev2_458 = __builtin_shufflevector(__s2_458, __s2_458, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_458 = __rev0_458 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_458), __noswap_splat_laneq_u16(__rev2_458, __p3_458)); \ - __ret_458 = __builtin_shufflevector(__ret_458, __ret_458, 3, 2, 1, 0); \ - __ret_458; \ +#define vld2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x2_t __ret; \ + uint8x16x2_t __s1 = __p1; \ + uint8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlal_high_laneq_s32(__p0_459, __p1_459, __p2_459, __p3_459) __extension__ ({ \ - int64x2_t __ret_459; \ - int64x2_t __s0_459 = __p0_459; \ - int32x4_t __s1_459 = __p1_459; \ - int32x4_t __s2_459 = __p2_459; \ - __ret_459 = __s0_459 + vmull_s32(vget_high_s32(__s1_459), splat_laneq_s32(__s2_459, __p3_459)); \ - __ret_459; \ +#define vld2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x2_t __ret; \ + uint64x2x2_t __s1 = __p1; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 51); \ + __ret; \ }) #else -#define vmlal_high_laneq_s32(__p0_460, __p1_460, __p2_460, __p3_460) __extension__ ({ \ - int64x2_t __ret_460; \ - int64x2_t __s0_460 = __p0_460; \ - int32x4_t __s1_460 = __p1_460; \ - int32x4_t __s2_460 = __p2_460; \ - int64x2_t __rev0_460; __rev0_460 = __builtin_shufflevector(__s0_460, __s0_460, 1, 0); \ - int32x4_t __rev1_460; __rev1_460 = __builtin_shufflevector(__s1_460, __s1_460, 3, 2, 1, 0); \ - int32x4_t __rev2_460; __rev2_460 = __builtin_shufflevector(__s2_460, __s2_460, 3, 2, 1, 0); \ - __ret_460 = __rev0_460 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_460), __noswap_splat_laneq_s32(__rev2_460, __p3_460)); \ - __ret_460 = __builtin_shufflevector(__ret_460, __ret_460, 1, 0); \ - __ret_460; \ +#define vld2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x2_t __ret; \ + uint64x2x2_t __s1 = __p1; \ + uint64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlal_high_laneq_s16(__p0_461, __p1_461, __p2_461, __p3_461) __extension__ ({ \ - int32x4_t __ret_461; \ - int32x4_t __s0_461 = __p0_461; \ - int16x8_t __s1_461 = __p1_461; \ - int16x8_t __s2_461 = __p2_461; \ - __ret_461 = __s0_461 + vmull_s16(vget_high_s16(__s1_461), splat_laneq_s16(__s2_461, __p3_461)); \ - __ret_461; \ +#define vld2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x2_t __ret; \ + int8x16x2_t __s1 = __p1; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 32); \ + __ret; \ }) #else -#define vmlal_high_laneq_s16(__p0_462, __p1_462, __p2_462, __p3_462) __extension__ ({ \ - int32x4_t __ret_462; \ - int32x4_t __s0_462 = __p0_462; \ - int16x8_t __s1_462 = __p1_462; \ - int16x8_t __s2_462 = __p2_462; \ - int32x4_t __rev0_462; __rev0_462 = __builtin_shufflevector(__s0_462, __s0_462, 3, 2, 1, 0); \ - int16x8_t __rev1_462; __rev1_462 = __builtin_shufflevector(__s1_462, __s1_462, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev2_462; __rev2_462 = __builtin_shufflevector(__s2_462, __s2_462, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_462 = __rev0_462 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_462), __noswap_splat_laneq_s16(__rev2_462, __p3_462)); \ - __ret_462 = __builtin_shufflevector(__ret_462, __ret_462, 3, 2, 1, 0); \ - __ret_462; \ +#define vld2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x2_t __ret; \ + int8x16x2_t __s1 = __p1; \ + int8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlal_laneq_u32(__p0_463, __p1_463, __p2_463, __p3_463) __extension__ ({ \ - uint64x2_t __ret_463; \ - uint64x2_t __s0_463 = __p0_463; \ - uint32x2_t __s1_463 = __p1_463; \ - uint32x4_t __s2_463 = __p2_463; \ - __ret_463 = __s0_463 + vmull_u32(__s1_463, splat_laneq_u32(__s2_463, __p3_463)); \ - __ret_463; \ +#define vld2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x2_t __ret; \ + float64x2x2_t __s1 = __p1; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 42); \ + __ret; \ }) #else -#define vmlal_laneq_u32(__p0_464, __p1_464, __p2_464, __p3_464) __extension__ ({ \ - uint64x2_t __ret_464; \ - uint64x2_t __s0_464 = __p0_464; \ - uint32x2_t __s1_464 = __p1_464; \ - uint32x4_t __s2_464 = __p2_464; \ - uint64x2_t __rev0_464; __rev0_464 = __builtin_shufflevector(__s0_464, __s0_464, 1, 0); \ - uint32x2_t __rev1_464; __rev1_464 = __builtin_shufflevector(__s1_464, __s1_464, 1, 0); \ - uint32x4_t __rev2_464; __rev2_464 = __builtin_shufflevector(__s2_464, __s2_464, 3, 2, 1, 0); \ - __ret_464 = __rev0_464 + __noswap_vmull_u32(__rev1_464, __noswap_splat_laneq_u32(__rev2_464, __p3_464)); \ - __ret_464 = __builtin_shufflevector(__ret_464, __ret_464, 1, 0); \ - __ret_464; \ +#define vld2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x2_t __ret; \ + float64x2x2_t __s1 = __p1; \ + float64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlal_laneq_u16(__p0_465, __p1_465, __p2_465, __p3_465) __extension__ ({ \ - uint32x4_t __ret_465; \ - uint32x4_t __s0_465 = __p0_465; \ - uint16x4_t __s1_465 = __p1_465; \ - uint16x8_t __s2_465 = __p2_465; \ - __ret_465 = __s0_465 + vmull_u16(__s1_465, splat_laneq_u16(__s2_465, __p3_465)); \ - __ret_465; \ +#define vld2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x2_t __ret; \ + int64x2x2_t __s1 = __p1; \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 35); \ + __ret; \ }) #else -#define vmlal_laneq_u16(__p0_466, __p1_466, __p2_466, __p3_466) __extension__ ({ \ - uint32x4_t __ret_466; \ - uint32x4_t __s0_466 = __p0_466; \ - uint16x4_t __s1_466 = __p1_466; \ - uint16x8_t __s2_466 = __p2_466; \ - uint32x4_t __rev0_466; __rev0_466 = __builtin_shufflevector(__s0_466, __s0_466, 3, 2, 1, 0); \ - uint16x4_t __rev1_466; __rev1_466 = __builtin_shufflevector(__s1_466, __s1_466, 3, 2, 1, 0); \ - uint16x8_t __rev2_466; __rev2_466 = __builtin_shufflevector(__s2_466, __s2_466, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_466 = __rev0_466 + __noswap_vmull_u16(__rev1_466, __noswap_splat_laneq_u16(__rev2_466, __p3_466)); \ - __ret_466 = __builtin_shufflevector(__ret_466, __ret_466, 3, 2, 1, 0); \ - __ret_466; \ +#define vld2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x2_t __ret; \ + int64x2x2_t __s1 = __p1; \ + int64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vld2q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret; \ }) #endif -#ifdef __LITTLE_ENDIAN__ -#define vmlal_laneq_s32(__p0_467, __p1_467, __p2_467, __p3_467) __extension__ ({ \ - int64x2_t __ret_467; \ - int64x2_t __s0_467 = __p0_467; \ - int32x2_t __s1_467 = __p1_467; \ - int32x4_t __s2_467 = __p2_467; \ - __ret_467 = __s0_467 + vmull_s32(__s1_467, splat_laneq_s32(__s2_467, __p3_467)); \ - __ret_467; \ +#define vld2_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1x2_t __ret; \ + uint64x1x2_t __s1 = __p1; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \ + __ret; \ }) -#else -#define vmlal_laneq_s32(__p0_468, __p1_468, __p2_468, __p3_468) __extension__ ({ \ - int64x2_t __ret_468; \ - int64x2_t __s0_468 = __p0_468; \ - int32x2_t __s1_468 = __p1_468; \ - int32x4_t __s2_468 = __p2_468; \ - int64x2_t __rev0_468; __rev0_468 = __builtin_shufflevector(__s0_468, __s0_468, 1, 0); \ - int32x2_t __rev1_468; __rev1_468 = __builtin_shufflevector(__s1_468, __s1_468, 1, 0); \ - int32x4_t __rev2_468; __rev2_468 = __builtin_shufflevector(__s2_468, __s2_468, 3, 2, 1, 0); \ - __ret_468 = __rev0_468 + __noswap_vmull_s32(__rev1_468, __noswap_splat_laneq_s32(__rev2_468, __p3_468)); \ - __ret_468 = __builtin_shufflevector(__ret_468, __ret_468, 1, 0); \ - __ret_468; \ +#define vld2_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1x2_t __ret; \ + float64x1x2_t __s1 = __p1; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 10); \ + __ret; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmlal_laneq_s16(__p0_469, __p1_469, __p2_469, __p3_469) __extension__ ({ \ - int32x4_t __ret_469; \ - int32x4_t __s0_469 = __p0_469; \ - int16x4_t __s1_469 = __p1_469; \ - int16x8_t __s2_469 = __p2_469; \ - __ret_469 = __s0_469 + vmull_s16(__s1_469, splat_laneq_s16(__s2_469, __p3_469)); \ - __ret_469; \ +#define vld2_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1x2_t __ret; \ + int64x1x2_t __s1 = __p1; \ + __builtin_neon_vld2_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 3); \ + __ret; \ }) -#else -#define vmlal_laneq_s16(__p0_470, __p1_470, __p2_470, __p3_470) __extension__ ({ \ - int32x4_t __ret_470; \ - int32x4_t __s0_470 = __p0_470; \ - int16x4_t __s1_470 = __p1_470; \ - int16x8_t __s2_470 = __p2_470; \ - int32x4_t __rev0_470; __rev0_470 = __builtin_shufflevector(__s0_470, __s0_470, 3, 2, 1, 0); \ - int16x4_t __rev1_470; __rev1_470 = __builtin_shufflevector(__s1_470, __s1_470, 3, 2, 1, 0); \ - int16x8_t __rev2_470; __rev2_470 = __builtin_shufflevector(__s2_470, __s2_470, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_470 = __rev0_470 + __noswap_vmull_s16(__rev1_470, __noswap_splat_laneq_s16(__rev2_470, __p3_470)); \ - __ret_470 = __builtin_shufflevector(__ret_470, __ret_470, 3, 2, 1, 0); \ - __ret_470; \ +#define vld3_p64(__p0) __extension__ ({ \ + poly64x1x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 6); \ + __ret; \ }) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { - float64x2_t __ret; - __ret = __p0 - __p1 * __p2; - return __ret; -} -#else -__ai float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = __rev0 - __rev1 * __rev2; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -__ai float64x1_t vmls_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { - float64x1_t __ret; - __ret = __p0 - __p1 * __p2; - return __ret; -} #ifdef __LITTLE_ENDIAN__ -#define vmlsq_laneq_u32(__p0_471, __p1_471, __p2_471, __p3_471) __extension__ ({ \ - uint32x4_t __ret_471; \ - uint32x4_t __s0_471 = __p0_471; \ - uint32x4_t __s1_471 = __p1_471; \ - uint32x4_t __s2_471 = __p2_471; \ - __ret_471 = __s0_471 - __s1_471 * splatq_laneq_u32(__s2_471, __p3_471); \ - __ret_471; \ +#define vld3q_p64(__p0) __extension__ ({ \ + poly64x2x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 38); \ + __ret; \ }) #else -#define vmlsq_laneq_u32(__p0_472, __p1_472, __p2_472, __p3_472) __extension__ ({ \ - uint32x4_t __ret_472; \ - uint32x4_t __s0_472 = __p0_472; \ - uint32x4_t __s1_472 = __p1_472; \ - uint32x4_t __s2_472 = __p2_472; \ - uint32x4_t __rev0_472; __rev0_472 = __builtin_shufflevector(__s0_472, __s0_472, 3, 2, 1, 0); \ - uint32x4_t __rev1_472; __rev1_472 = __builtin_shufflevector(__s1_472, __s1_472, 3, 2, 1, 0); \ - uint32x4_t __rev2_472; __rev2_472 = __builtin_shufflevector(__s2_472, __s2_472, 3, 2, 1, 0); \ - __ret_472 = __rev0_472 - __rev1_472 * __noswap_splatq_laneq_u32(__rev2_472, __p3_472); \ - __ret_472 = __builtin_shufflevector(__ret_472, __ret_472, 3, 2, 1, 0); \ - __ret_472; \ +#define vld3q_p64(__p0) __extension__ ({ \ + poly64x2x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlsq_laneq_u16(__p0_473, __p1_473, __p2_473, __p3_473) __extension__ ({ \ - uint16x8_t __ret_473; \ - uint16x8_t __s0_473 = __p0_473; \ - uint16x8_t __s1_473 = __p1_473; \ - uint16x8_t __s2_473 = __p2_473; \ - __ret_473 = __s0_473 - __s1_473 * splatq_laneq_u16(__s2_473, __p3_473); \ - __ret_473; \ +#define vld3q_u64(__p0) __extension__ ({ \ + uint64x2x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 51); \ + __ret; \ }) #else -#define vmlsq_laneq_u16(__p0_474, __p1_474, __p2_474, __p3_474) __extension__ ({ \ - uint16x8_t __ret_474; \ - uint16x8_t __s0_474 = __p0_474; \ - uint16x8_t __s1_474 = __p1_474; \ - uint16x8_t __s2_474 = __p2_474; \ - uint16x8_t __rev0_474; __rev0_474 = __builtin_shufflevector(__s0_474, __s0_474, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev1_474; __rev1_474 = __builtin_shufflevector(__s1_474, __s1_474, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev2_474; __rev2_474 = __builtin_shufflevector(__s2_474, __s2_474, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_474 = __rev0_474 - __rev1_474 * __noswap_splatq_laneq_u16(__rev2_474, __p3_474); \ - __ret_474 = __builtin_shufflevector(__ret_474, __ret_474, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_474; \ +#define vld3q_u64(__p0) __extension__ ({ \ + uint64x2x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlsq_laneq_f32(__p0_475, __p1_475, __p2_475, __p3_475) __extension__ ({ \ - float32x4_t __ret_475; \ - float32x4_t __s0_475 = __p0_475; \ - float32x4_t __s1_475 = __p1_475; \ - float32x4_t __s2_475 = __p2_475; \ - __ret_475 = __s0_475 - __s1_475 * splatq_laneq_f32(__s2_475, __p3_475); \ - __ret_475; \ +#define vld3q_f64(__p0) __extension__ ({ \ + float64x2x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 42); \ + __ret; \ }) #else -#define vmlsq_laneq_f32(__p0_476, __p1_476, __p2_476, __p3_476) __extension__ ({ \ - float32x4_t __ret_476; \ - float32x4_t __s0_476 = __p0_476; \ - float32x4_t __s1_476 = __p1_476; \ - float32x4_t __s2_476 = __p2_476; \ - float32x4_t __rev0_476; __rev0_476 = __builtin_shufflevector(__s0_476, __s0_476, 3, 2, 1, 0); \ - float32x4_t __rev1_476; __rev1_476 = __builtin_shufflevector(__s1_476, __s1_476, 3, 2, 1, 0); \ - float32x4_t __rev2_476; __rev2_476 = __builtin_shufflevector(__s2_476, __s2_476, 3, 2, 1, 0); \ - __ret_476 = __rev0_476 - __rev1_476 * __noswap_splatq_laneq_f32(__rev2_476, __p3_476); \ - __ret_476 = __builtin_shufflevector(__ret_476, __ret_476, 3, 2, 1, 0); \ - __ret_476; \ +#define vld3q_f64(__p0) __extension__ ({ \ + float64x2x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlsq_laneq_s32(__p0_477, __p1_477, __p2_477, __p3_477) __extension__ ({ \ - int32x4_t __ret_477; \ - int32x4_t __s0_477 = __p0_477; \ - int32x4_t __s1_477 = __p1_477; \ - int32x4_t __s2_477 = __p2_477; \ - __ret_477 = __s0_477 - __s1_477 * splatq_laneq_s32(__s2_477, __p3_477); \ - __ret_477; \ +#define vld3q_s64(__p0) __extension__ ({ \ + int64x2x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 35); \ + __ret; \ }) #else -#define vmlsq_laneq_s32(__p0_478, __p1_478, __p2_478, __p3_478) __extension__ ({ \ - int32x4_t __ret_478; \ - int32x4_t __s0_478 = __p0_478; \ - int32x4_t __s1_478 = __p1_478; \ - int32x4_t __s2_478 = __p2_478; \ - int32x4_t __rev0_478; __rev0_478 = __builtin_shufflevector(__s0_478, __s0_478, 3, 2, 1, 0); \ - int32x4_t __rev1_478; __rev1_478 = __builtin_shufflevector(__s1_478, __s1_478, 3, 2, 1, 0); \ - int32x4_t __rev2_478; __rev2_478 = __builtin_shufflevector(__s2_478, __s2_478, 3, 2, 1, 0); \ - __ret_478 = __rev0_478 - __rev1_478 * __noswap_splatq_laneq_s32(__rev2_478, __p3_478); \ - __ret_478 = __builtin_shufflevector(__ret_478, __ret_478, 3, 2, 1, 0); \ - __ret_478; \ +#define vld3q_s64(__p0) __extension__ ({ \ + int64x2x3_t __ret; \ + __builtin_neon_vld3q_v(&__ret, __p0, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ }) #endif -#ifdef __LITTLE_ENDIAN__ -#define vmlsq_laneq_s16(__p0_479, __p1_479, __p2_479, __p3_479) __extension__ ({ \ - int16x8_t __ret_479; \ - int16x8_t __s0_479 = __p0_479; \ - int16x8_t __s1_479 = __p1_479; \ - int16x8_t __s2_479 = __p2_479; \ - __ret_479 = __s0_479 - __s1_479 * splatq_laneq_s16(__s2_479, __p3_479); \ - __ret_479; \ +#define vld3_f64(__p0) __extension__ ({ \ + float64x1x3_t __ret; \ + __builtin_neon_vld3_v(&__ret, __p0, 10); \ + __ret; \ }) -#else -#define vmlsq_laneq_s16(__p0_480, __p1_480, __p2_480, __p3_480) __extension__ ({ \ - int16x8_t __ret_480; \ - int16x8_t __s0_480 = __p0_480; \ - int16x8_t __s1_480 = __p1_480; \ - int16x8_t __s2_480 = __p2_480; \ - int16x8_t __rev0_480; __rev0_480 = __builtin_shufflevector(__s0_480, __s0_480, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_480; __rev1_480 = __builtin_shufflevector(__s1_480, __s1_480, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev2_480; __rev2_480 = __builtin_shufflevector(__s2_480, __s2_480, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_480 = __rev0_480 - __rev1_480 * __noswap_splatq_laneq_s16(__rev2_480, __p3_480); \ - __ret_480 = __builtin_shufflevector(__ret_480, __ret_480, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_480; \ +#define vld3_dup_p64(__p0) __extension__ ({ \ + poly64x1x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 6); \ + __ret; \ }) -#endif - #ifdef __LITTLE_ENDIAN__ -#define vmls_laneq_u32(__p0_481, __p1_481, __p2_481, __p3_481) __extension__ ({ \ - uint32x2_t __ret_481; \ - uint32x2_t __s0_481 = __p0_481; \ - uint32x2_t __s1_481 = __p1_481; \ - uint32x4_t __s2_481 = __p2_481; \ - __ret_481 = __s0_481 - __s1_481 * splat_laneq_u32(__s2_481, __p3_481); \ - __ret_481; \ +#define vld3q_dup_p64(__p0) __extension__ ({ \ + poly64x2x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 38); \ + __ret; \ }) #else -#define vmls_laneq_u32(__p0_482, __p1_482, __p2_482, __p3_482) __extension__ ({ \ - uint32x2_t __ret_482; \ - uint32x2_t __s0_482 = __p0_482; \ - uint32x2_t __s1_482 = __p1_482; \ - uint32x4_t __s2_482 = __p2_482; \ - uint32x2_t __rev0_482; __rev0_482 = __builtin_shufflevector(__s0_482, __s0_482, 1, 0); \ - uint32x2_t __rev1_482; __rev1_482 = __builtin_shufflevector(__s1_482, __s1_482, 1, 0); \ - uint32x4_t __rev2_482; __rev2_482 = __builtin_shufflevector(__s2_482, __s2_482, 3, 2, 1, 0); \ - __ret_482 = __rev0_482 - __rev1_482 * __noswap_splat_laneq_u32(__rev2_482, __p3_482); \ - __ret_482 = __builtin_shufflevector(__ret_482, __ret_482, 1, 0); \ - __ret_482; \ +#define vld3q_dup_p64(__p0) __extension__ ({ \ + poly64x2x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmls_laneq_u16(__p0_483, __p1_483, __p2_483, __p3_483) __extension__ ({ \ - uint16x4_t __ret_483; \ - uint16x4_t __s0_483 = __p0_483; \ - uint16x4_t __s1_483 = __p1_483; \ - uint16x8_t __s2_483 = __p2_483; \ - __ret_483 = __s0_483 - __s1_483 * splat_laneq_u16(__s2_483, __p3_483); \ - __ret_483; \ +#define vld3q_dup_f64(__p0) __extension__ ({ \ + float64x2x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 42); \ + __ret; \ }) #else -#define vmls_laneq_u16(__p0_484, __p1_484, __p2_484, __p3_484) __extension__ ({ \ - uint16x4_t __ret_484; \ - uint16x4_t __s0_484 = __p0_484; \ - uint16x4_t __s1_484 = __p1_484; \ - uint16x8_t __s2_484 = __p2_484; \ - uint16x4_t __rev0_484; __rev0_484 = __builtin_shufflevector(__s0_484, __s0_484, 3, 2, 1, 0); \ - uint16x4_t __rev1_484; __rev1_484 = __builtin_shufflevector(__s1_484, __s1_484, 3, 2, 1, 0); \ - uint16x8_t __rev2_484; __rev2_484 = __builtin_shufflevector(__s2_484, __s2_484, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_484 = __rev0_484 - __rev1_484 * __noswap_splat_laneq_u16(__rev2_484, __p3_484); \ - __ret_484 = __builtin_shufflevector(__ret_484, __ret_484, 3, 2, 1, 0); \ - __ret_484; \ +#define vld3q_dup_f64(__p0) __extension__ ({ \ + float64x2x3_t __ret; \ + __builtin_neon_vld3q_dup_v(&__ret, __p0, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ }) #endif -#ifdef __LITTLE_ENDIAN__ -#define vmls_laneq_f32(__p0_485, __p1_485, __p2_485, __p3_485) __extension__ ({ \ - float32x2_t __ret_485; \ - float32x2_t __s0_485 = __p0_485; \ - float32x2_t __s1_485 = __p1_485; \ - float32x4_t __s2_485 = __p2_485; \ - __ret_485 = __s0_485 - __s1_485 * splat_laneq_f32(__s2_485, __p3_485); \ - __ret_485; \ +#define vld3_dup_f64(__p0) __extension__ ({ \ + float64x1x3_t __ret; \ + __builtin_neon_vld3_dup_v(&__ret, __p0, 10); \ + __ret; \ }) -#else -#define vmls_laneq_f32(__p0_486, __p1_486, __p2_486, __p3_486) __extension__ ({ \ - float32x2_t __ret_486; \ - float32x2_t __s0_486 = __p0_486; \ - float32x2_t __s1_486 = __p1_486; \ - float32x4_t __s2_486 = __p2_486; \ - float32x2_t __rev0_486; __rev0_486 = __builtin_shufflevector(__s0_486, __s0_486, 1, 0); \ - float32x2_t __rev1_486; __rev1_486 = __builtin_shufflevector(__s1_486, __s1_486, 1, 0); \ - float32x4_t __rev2_486; __rev2_486 = __builtin_shufflevector(__s2_486, __s2_486, 3, 2, 1, 0); \ - __ret_486 = __rev0_486 - __rev1_486 * __noswap_splat_laneq_f32(__rev2_486, __p3_486); \ - __ret_486 = __builtin_shufflevector(__ret_486, __ret_486, 1, 0); \ - __ret_486; \ +#define vld3_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1x3_t __ret; \ + poly64x1x3_t __s1 = __p1; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \ + __ret; \ }) -#endif - #ifdef __LITTLE_ENDIAN__ -#define vmls_laneq_s32(__p0_487, __p1_487, __p2_487, __p3_487) __extension__ ({ \ - int32x2_t __ret_487; \ - int32x2_t __s0_487 = __p0_487; \ - int32x2_t __s1_487 = __p1_487; \ - int32x4_t __s2_487 = __p2_487; \ - __ret_487 = __s0_487 - __s1_487 * splat_laneq_s32(__s2_487, __p3_487); \ - __ret_487; \ +#define vld3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x3_t __ret; \ + poly8x16x3_t __s1 = __p1; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 36); \ + __ret; \ }) #else -#define vmls_laneq_s32(__p0_488, __p1_488, __p2_488, __p3_488) __extension__ ({ \ - int32x2_t __ret_488; \ - int32x2_t __s0_488 = __p0_488; \ - int32x2_t __s1_488 = __p1_488; \ - int32x4_t __s2_488 = __p2_488; \ - int32x2_t __rev0_488; __rev0_488 = __builtin_shufflevector(__s0_488, __s0_488, 1, 0); \ - int32x2_t __rev1_488; __rev1_488 = __builtin_shufflevector(__s1_488, __s1_488, 1, 0); \ - int32x4_t __rev2_488; __rev2_488 = __builtin_shufflevector(__s2_488, __s2_488, 3, 2, 1, 0); \ - __ret_488 = __rev0_488 - __rev1_488 * __noswap_splat_laneq_s32(__rev2_488, __p3_488); \ - __ret_488 = __builtin_shufflevector(__ret_488, __ret_488, 1, 0); \ - __ret_488; \ +#define vld3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x3_t __ret; \ + poly8x16x3_t __s1 = __p1; \ + poly8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmls_laneq_s16(__p0_489, __p1_489, __p2_489, __p3_489) __extension__ ({ \ - int16x4_t __ret_489; \ - int16x4_t __s0_489 = __p0_489; \ - int16x4_t __s1_489 = __p1_489; \ - int16x8_t __s2_489 = __p2_489; \ - __ret_489 = __s0_489 - __s1_489 * splat_laneq_s16(__s2_489, __p3_489); \ - __ret_489; \ +#define vld3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x3_t __ret; \ + poly64x2x3_t __s1 = __p1; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 38); \ + __ret; \ }) #else -#define vmls_laneq_s16(__p0_490, __p1_490, __p2_490, __p3_490) __extension__ ({ \ - int16x4_t __ret_490; \ - int16x4_t __s0_490 = __p0_490; \ - int16x4_t __s1_490 = __p1_490; \ - int16x8_t __s2_490 = __p2_490; \ - int16x4_t __rev0_490; __rev0_490 = __builtin_shufflevector(__s0_490, __s0_490, 3, 2, 1, 0); \ - int16x4_t __rev1_490; __rev1_490 = __builtin_shufflevector(__s1_490, __s1_490, 3, 2, 1, 0); \ - int16x8_t __rev2_490; __rev2_490 = __builtin_shufflevector(__s2_490, __s2_490, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_490 = __rev0_490 - __rev1_490 * __noswap_splat_laneq_s16(__rev2_490, __p3_490); \ - __ret_490 = __builtin_shufflevector(__ret_490, __ret_490, 3, 2, 1, 0); \ - __ret_490; \ +#define vld3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x3_t __ret; \ + poly64x2x3_t __s1 = __p1; \ + poly64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlsl_high_lane_u32(__p0_491, __p1_491, __p2_491, __p3_491) __extension__ ({ \ - uint64x2_t __ret_491; \ - uint64x2_t __s0_491 = __p0_491; \ - uint32x4_t __s1_491 = __p1_491; \ - uint32x2_t __s2_491 = __p2_491; \ - __ret_491 = __s0_491 - vmull_u32(vget_high_u32(__s1_491), splat_lane_u32(__s2_491, __p3_491)); \ - __ret_491; \ +#define vld3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x3_t __ret; \ + uint8x16x3_t __s1 = __p1; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 48); \ + __ret; \ }) #else -#define vmlsl_high_lane_u32(__p0_492, __p1_492, __p2_492, __p3_492) __extension__ ({ \ - uint64x2_t __ret_492; \ - uint64x2_t __s0_492 = __p0_492; \ - uint32x4_t __s1_492 = __p1_492; \ - uint32x2_t __s2_492 = __p2_492; \ - uint64x2_t __rev0_492; __rev0_492 = __builtin_shufflevector(__s0_492, __s0_492, 1, 0); \ - uint32x4_t __rev1_492; __rev1_492 = __builtin_shufflevector(__s1_492, __s1_492, 3, 2, 1, 0); \ - uint32x2_t __rev2_492; __rev2_492 = __builtin_shufflevector(__s2_492, __s2_492, 1, 0); \ - __ret_492 = __rev0_492 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_492), __noswap_splat_lane_u32(__rev2_492, __p3_492)); \ - __ret_492 = __builtin_shufflevector(__ret_492, __ret_492, 1, 0); \ - __ret_492; \ +#define vld3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x3_t __ret; \ + uint8x16x3_t __s1 = __p1; \ + uint8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlsl_high_lane_u16(__p0_493, __p1_493, __p2_493, __p3_493) __extension__ ({ \ - uint32x4_t __ret_493; \ - uint32x4_t __s0_493 = __p0_493; \ - uint16x8_t __s1_493 = __p1_493; \ - uint16x4_t __s2_493 = __p2_493; \ - __ret_493 = __s0_493 - vmull_u16(vget_high_u16(__s1_493), splat_lane_u16(__s2_493, __p3_493)); \ - __ret_493; \ +#define vld3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x3_t __ret; \ + uint64x2x3_t __s1 = __p1; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 51); \ + __ret; \ }) #else -#define vmlsl_high_lane_u16(__p0_494, __p1_494, __p2_494, __p3_494) __extension__ ({ \ - uint32x4_t __ret_494; \ - uint32x4_t __s0_494 = __p0_494; \ - uint16x8_t __s1_494 = __p1_494; \ - uint16x4_t __s2_494 = __p2_494; \ - uint32x4_t __rev0_494; __rev0_494 = __builtin_shufflevector(__s0_494, __s0_494, 3, 2, 1, 0); \ - uint16x8_t __rev1_494; __rev1_494 = __builtin_shufflevector(__s1_494, __s1_494, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x4_t __rev2_494; __rev2_494 = __builtin_shufflevector(__s2_494, __s2_494, 3, 2, 1, 0); \ - __ret_494 = __rev0_494 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_494), __noswap_splat_lane_u16(__rev2_494, __p3_494)); \ - __ret_494 = __builtin_shufflevector(__ret_494, __ret_494, 3, 2, 1, 0); \ - __ret_494; \ +#define vld3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x3_t __ret; \ + uint64x2x3_t __s1 = __p1; \ + uint64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlsl_high_lane_s32(__p0_495, __p1_495, __p2_495, __p3_495) __extension__ ({ \ - int64x2_t __ret_495; \ - int64x2_t __s0_495 = __p0_495; \ - int32x4_t __s1_495 = __p1_495; \ - int32x2_t __s2_495 = __p2_495; \ - __ret_495 = __s0_495 - vmull_s32(vget_high_s32(__s1_495), splat_lane_s32(__s2_495, __p3_495)); \ - __ret_495; \ +#define vld3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x3_t __ret; \ + int8x16x3_t __s1 = __p1; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 32); \ + __ret; \ }) #else -#define vmlsl_high_lane_s32(__p0_496, __p1_496, __p2_496, __p3_496) __extension__ ({ \ - int64x2_t __ret_496; \ - int64x2_t __s0_496 = __p0_496; \ - int32x4_t __s1_496 = __p1_496; \ - int32x2_t __s2_496 = __p2_496; \ - int64x2_t __rev0_496; __rev0_496 = __builtin_shufflevector(__s0_496, __s0_496, 1, 0); \ - int32x4_t __rev1_496; __rev1_496 = __builtin_shufflevector(__s1_496, __s1_496, 3, 2, 1, 0); \ - int32x2_t __rev2_496; __rev2_496 = __builtin_shufflevector(__s2_496, __s2_496, 1, 0); \ - __ret_496 = __rev0_496 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_496), __noswap_splat_lane_s32(__rev2_496, __p3_496)); \ - __ret_496 = __builtin_shufflevector(__ret_496, __ret_496, 1, 0); \ - __ret_496; \ +#define vld3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x3_t __ret; \ + int8x16x3_t __s1 = __p1; \ + int8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlsl_high_lane_s16(__p0_497, __p1_497, __p2_497, __p3_497) __extension__ ({ \ - int32x4_t __ret_497; \ - int32x4_t __s0_497 = __p0_497; \ - int16x8_t __s1_497 = __p1_497; \ - int16x4_t __s2_497 = __p2_497; \ - __ret_497 = __s0_497 - vmull_s16(vget_high_s16(__s1_497), splat_lane_s16(__s2_497, __p3_497)); \ - __ret_497; \ +#define vld3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x3_t __ret; \ + float64x2x3_t __s1 = __p1; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 42); \ + __ret; \ }) #else -#define vmlsl_high_lane_s16(__p0_498, __p1_498, __p2_498, __p3_498) __extension__ ({ \ - int32x4_t __ret_498; \ - int32x4_t __s0_498 = __p0_498; \ - int16x8_t __s1_498 = __p1_498; \ - int16x4_t __s2_498 = __p2_498; \ - int32x4_t __rev0_498; __rev0_498 = __builtin_shufflevector(__s0_498, __s0_498, 3, 2, 1, 0); \ - int16x8_t __rev1_498; __rev1_498 = __builtin_shufflevector(__s1_498, __s1_498, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev2_498; __rev2_498 = __builtin_shufflevector(__s2_498, __s2_498, 3, 2, 1, 0); \ - __ret_498 = __rev0_498 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_498), __noswap_splat_lane_s16(__rev2_498, __p3_498)); \ - __ret_498 = __builtin_shufflevector(__ret_498, __ret_498, 3, 2, 1, 0); \ - __ret_498; \ +#define vld3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x3_t __ret; \ + float64x2x3_t __s1 = __p1; \ + float64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlsl_high_laneq_u32(__p0_499, __p1_499, __p2_499, __p3_499) __extension__ ({ \ - uint64x2_t __ret_499; \ - uint64x2_t __s0_499 = __p0_499; \ - uint32x4_t __s1_499 = __p1_499; \ - uint32x4_t __s2_499 = __p2_499; \ - __ret_499 = __s0_499 - vmull_u32(vget_high_u32(__s1_499), splat_laneq_u32(__s2_499, __p3_499)); \ - __ret_499; \ +#define vld3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x3_t __ret; \ + int64x2x3_t __s1 = __p1; \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 35); \ + __ret; \ }) #else -#define vmlsl_high_laneq_u32(__p0_500, __p1_500, __p2_500, __p3_500) __extension__ ({ \ - uint64x2_t __ret_500; \ - uint64x2_t __s0_500 = __p0_500; \ - uint32x4_t __s1_500 = __p1_500; \ - uint32x4_t __s2_500 = __p2_500; \ - uint64x2_t __rev0_500; __rev0_500 = __builtin_shufflevector(__s0_500, __s0_500, 1, 0); \ - uint32x4_t __rev1_500; __rev1_500 = __builtin_shufflevector(__s1_500, __s1_500, 3, 2, 1, 0); \ - uint32x4_t __rev2_500; __rev2_500 = __builtin_shufflevector(__s2_500, __s2_500, 3, 2, 1, 0); \ - __ret_500 = __rev0_500 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_500), __noswap_splat_laneq_u32(__rev2_500, __p3_500)); \ - __ret_500 = __builtin_shufflevector(__ret_500, __ret_500, 1, 0); \ - __ret_500; \ +#define vld3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x3_t __ret; \ + int64x2x3_t __s1 = __p1; \ + int64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vld3q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret; \ }) #endif -#ifdef __LITTLE_ENDIAN__ -#define vmlsl_high_laneq_u16(__p0_501, __p1_501, __p2_501, __p3_501) __extension__ ({ \ - uint32x4_t __ret_501; \ - uint32x4_t __s0_501 = __p0_501; \ - uint16x8_t __s1_501 = __p1_501; \ - uint16x8_t __s2_501 = __p2_501; \ - __ret_501 = __s0_501 - vmull_u16(vget_high_u16(__s1_501), splat_laneq_u16(__s2_501, __p3_501)); \ - __ret_501; \ +#define vld3_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1x3_t __ret; \ + uint64x1x3_t __s1 = __p1; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \ + __ret; \ }) -#else -#define vmlsl_high_laneq_u16(__p0_502, __p1_502, __p2_502, __p3_502) __extension__ ({ \ - uint32x4_t __ret_502; \ - uint32x4_t __s0_502 = __p0_502; \ - uint16x8_t __s1_502 = __p1_502; \ - uint16x8_t __s2_502 = __p2_502; \ - uint32x4_t __rev0_502; __rev0_502 = __builtin_shufflevector(__s0_502, __s0_502, 3, 2, 1, 0); \ - uint16x8_t __rev1_502; __rev1_502 = __builtin_shufflevector(__s1_502, __s1_502, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev2_502; __rev2_502 = __builtin_shufflevector(__s2_502, __s2_502, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_502 = __rev0_502 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_502), __noswap_splat_laneq_u16(__rev2_502, __p3_502)); \ - __ret_502 = __builtin_shufflevector(__ret_502, __ret_502, 3, 2, 1, 0); \ - __ret_502; \ +#define vld3_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1x3_t __ret; \ + float64x1x3_t __s1 = __p1; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 10); \ + __ret; \ +}) +#define vld3_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1x3_t __ret; \ + int64x1x3_t __s1 = __p1; \ + __builtin_neon_vld3_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 3); \ + __ret; \ +}) +#define vld4_p64(__p0) __extension__ ({ \ + poly64x1x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 6); \ + __ret; \ }) -#endif - #ifdef __LITTLE_ENDIAN__ -#define vmlsl_high_laneq_s32(__p0_503, __p1_503, __p2_503, __p3_503) __extension__ ({ \ - int64x2_t __ret_503; \ - int64x2_t __s0_503 = __p0_503; \ - int32x4_t __s1_503 = __p1_503; \ - int32x4_t __s2_503 = __p2_503; \ - __ret_503 = __s0_503 - vmull_s32(vget_high_s32(__s1_503), splat_laneq_s32(__s2_503, __p3_503)); \ - __ret_503; \ +#define vld4q_p64(__p0) __extension__ ({ \ + poly64x2x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 38); \ + __ret; \ }) #else -#define vmlsl_high_laneq_s32(__p0_504, __p1_504, __p2_504, __p3_504) __extension__ ({ \ - int64x2_t __ret_504; \ - int64x2_t __s0_504 = __p0_504; \ - int32x4_t __s1_504 = __p1_504; \ - int32x4_t __s2_504 = __p2_504; \ - int64x2_t __rev0_504; __rev0_504 = __builtin_shufflevector(__s0_504, __s0_504, 1, 0); \ - int32x4_t __rev1_504; __rev1_504 = __builtin_shufflevector(__s1_504, __s1_504, 3, 2, 1, 0); \ - int32x4_t __rev2_504; __rev2_504 = __builtin_shufflevector(__s2_504, __s2_504, 3, 2, 1, 0); \ - __ret_504 = __rev0_504 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_504), __noswap_splat_laneq_s32(__rev2_504, __p3_504)); \ - __ret_504 = __builtin_shufflevector(__ret_504, __ret_504, 1, 0); \ - __ret_504; \ +#define vld4q_p64(__p0) __extension__ ({ \ + poly64x2x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlsl_high_laneq_s16(__p0_505, __p1_505, __p2_505, __p3_505) __extension__ ({ \ - int32x4_t __ret_505; \ - int32x4_t __s0_505 = __p0_505; \ - int16x8_t __s1_505 = __p1_505; \ - int16x8_t __s2_505 = __p2_505; \ - __ret_505 = __s0_505 - vmull_s16(vget_high_s16(__s1_505), splat_laneq_s16(__s2_505, __p3_505)); \ - __ret_505; \ +#define vld4q_u64(__p0) __extension__ ({ \ + uint64x2x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 51); \ + __ret; \ }) #else -#define vmlsl_high_laneq_s16(__p0_506, __p1_506, __p2_506, __p3_506) __extension__ ({ \ - int32x4_t __ret_506; \ - int32x4_t __s0_506 = __p0_506; \ - int16x8_t __s1_506 = __p1_506; \ - int16x8_t __s2_506 = __p2_506; \ - int32x4_t __rev0_506; __rev0_506 = __builtin_shufflevector(__s0_506, __s0_506, 3, 2, 1, 0); \ - int16x8_t __rev1_506; __rev1_506 = __builtin_shufflevector(__s1_506, __s1_506, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev2_506; __rev2_506 = __builtin_shufflevector(__s2_506, __s2_506, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_506 = __rev0_506 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_506), __noswap_splat_laneq_s16(__rev2_506, __p3_506)); \ - __ret_506 = __builtin_shufflevector(__ret_506, __ret_506, 3, 2, 1, 0); \ - __ret_506; \ +#define vld4q_u64(__p0) __extension__ ({ \ + uint64x2x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlsl_laneq_u32(__p0_507, __p1_507, __p2_507, __p3_507) __extension__ ({ \ - uint64x2_t __ret_507; \ - uint64x2_t __s0_507 = __p0_507; \ - uint32x2_t __s1_507 = __p1_507; \ - uint32x4_t __s2_507 = __p2_507; \ - __ret_507 = __s0_507 - vmull_u32(__s1_507, splat_laneq_u32(__s2_507, __p3_507)); \ - __ret_507; \ +#define vld4q_f64(__p0) __extension__ ({ \ + float64x2x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 42); \ + __ret; \ }) #else -#define vmlsl_laneq_u32(__p0_508, __p1_508, __p2_508, __p3_508) __extension__ ({ \ - uint64x2_t __ret_508; \ - uint64x2_t __s0_508 = __p0_508; \ - uint32x2_t __s1_508 = __p1_508; \ - uint32x4_t __s2_508 = __p2_508; \ - uint64x2_t __rev0_508; __rev0_508 = __builtin_shufflevector(__s0_508, __s0_508, 1, 0); \ - uint32x2_t __rev1_508; __rev1_508 = __builtin_shufflevector(__s1_508, __s1_508, 1, 0); \ - uint32x4_t __rev2_508; __rev2_508 = __builtin_shufflevector(__s2_508, __s2_508, 3, 2, 1, 0); \ - __ret_508 = __rev0_508 - __noswap_vmull_u32(__rev1_508, __noswap_splat_laneq_u32(__rev2_508, __p3_508)); \ - __ret_508 = __builtin_shufflevector(__ret_508, __ret_508, 1, 0); \ - __ret_508; \ +#define vld4q_f64(__p0) __extension__ ({ \ + float64x2x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlsl_laneq_u16(__p0_509, __p1_509, __p2_509, __p3_509) __extension__ ({ \ - uint32x4_t __ret_509; \ - uint32x4_t __s0_509 = __p0_509; \ - uint16x4_t __s1_509 = __p1_509; \ - uint16x8_t __s2_509 = __p2_509; \ - __ret_509 = __s0_509 - vmull_u16(__s1_509, splat_laneq_u16(__s2_509, __p3_509)); \ - __ret_509; \ +#define vld4q_s64(__p0) __extension__ ({ \ + int64x2x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 35); \ + __ret; \ }) #else -#define vmlsl_laneq_u16(__p0_510, __p1_510, __p2_510, __p3_510) __extension__ ({ \ - uint32x4_t __ret_510; \ - uint32x4_t __s0_510 = __p0_510; \ - uint16x4_t __s1_510 = __p1_510; \ - uint16x8_t __s2_510 = __p2_510; \ - uint32x4_t __rev0_510; __rev0_510 = __builtin_shufflevector(__s0_510, __s0_510, 3, 2, 1, 0); \ - uint16x4_t __rev1_510; __rev1_510 = __builtin_shufflevector(__s1_510, __s1_510, 3, 2, 1, 0); \ - uint16x8_t __rev2_510; __rev2_510 = __builtin_shufflevector(__s2_510, __s2_510, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_510 = __rev0_510 - __noswap_vmull_u16(__rev1_510, __noswap_splat_laneq_u16(__rev2_510, __p3_510)); \ - __ret_510 = __builtin_shufflevector(__ret_510, __ret_510, 3, 2, 1, 0); \ - __ret_510; \ +#define vld4q_s64(__p0) __extension__ ({ \ + int64x2x4_t __ret; \ + __builtin_neon_vld4q_v(&__ret, __p0, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ }) #endif +#define vld4_f64(__p0) __extension__ ({ \ + float64x1x4_t __ret; \ + __builtin_neon_vld4_v(&__ret, __p0, 10); \ + __ret; \ +}) +#define vld4_dup_p64(__p0) __extension__ ({ \ + poly64x1x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 6); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -#define vmlsl_laneq_s32(__p0_511, __p1_511, __p2_511, __p3_511) __extension__ ({ \ - int64x2_t __ret_511; \ - int64x2_t __s0_511 = __p0_511; \ - int32x2_t __s1_511 = __p1_511; \ - int32x4_t __s2_511 = __p2_511; \ - __ret_511 = __s0_511 - vmull_s32(__s1_511, splat_laneq_s32(__s2_511, __p3_511)); \ - __ret_511; \ +#define vld4q_dup_p64(__p0) __extension__ ({ \ + poly64x2x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 38); \ + __ret; \ }) #else -#define vmlsl_laneq_s32(__p0_512, __p1_512, __p2_512, __p3_512) __extension__ ({ \ - int64x2_t __ret_512; \ - int64x2_t __s0_512 = __p0_512; \ - int32x2_t __s1_512 = __p1_512; \ - int32x4_t __s2_512 = __p2_512; \ - int64x2_t __rev0_512; __rev0_512 = __builtin_shufflevector(__s0_512, __s0_512, 1, 0); \ - int32x2_t __rev1_512; __rev1_512 = __builtin_shufflevector(__s1_512, __s1_512, 1, 0); \ - int32x4_t __rev2_512; __rev2_512 = __builtin_shufflevector(__s2_512, __s2_512, 3, 2, 1, 0); \ - __ret_512 = __rev0_512 - __noswap_vmull_s32(__rev1_512, __noswap_splat_laneq_s32(__rev2_512, __p3_512)); \ - __ret_512 = __builtin_shufflevector(__ret_512, __ret_512, 1, 0); \ - __ret_512; \ +#define vld4q_dup_p64(__p0) __extension__ ({ \ + poly64x2x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlsl_laneq_s16(__p0_513, __p1_513, __p2_513, __p3_513) __extension__ ({ \ - int32x4_t __ret_513; \ - int32x4_t __s0_513 = __p0_513; \ - int16x4_t __s1_513 = __p1_513; \ - int16x8_t __s2_513 = __p2_513; \ - __ret_513 = __s0_513 - vmull_s16(__s1_513, splat_laneq_s16(__s2_513, __p3_513)); \ - __ret_513; \ +#define vld4q_dup_f64(__p0) __extension__ ({ \ + float64x2x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 42); \ + __ret; \ }) #else -#define vmlsl_laneq_s16(__p0_514, __p1_514, __p2_514, __p3_514) __extension__ ({ \ - int32x4_t __ret_514; \ - int32x4_t __s0_514 = __p0_514; \ - int16x4_t __s1_514 = __p1_514; \ - int16x8_t __s2_514 = __p2_514; \ - int32x4_t __rev0_514; __rev0_514 = __builtin_shufflevector(__s0_514, __s0_514, 3, 2, 1, 0); \ - int16x4_t __rev1_514; __rev1_514 = __builtin_shufflevector(__s1_514, __s1_514, 3, 2, 1, 0); \ - int16x8_t __rev2_514; __rev2_514 = __builtin_shufflevector(__s2_514, __s2_514, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_514 = __rev0_514 - __noswap_vmull_s16(__rev1_514, __noswap_splat_laneq_s16(__rev2_514, __p3_514)); \ - __ret_514 = __builtin_shufflevector(__ret_514, __ret_514, 3, 2, 1, 0); \ - __ret_514; \ +#define vld4q_dup_f64(__p0) __extension__ ({ \ + float64x2x4_t __ret; \ + __builtin_neon_vld4q_dup_v(&__ret, __p0, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ }) #endif -__ai poly64x1_t vmov_n_p64(poly64_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t) {__p0}; - return __ret; -} +#define vld4_dup_f64(__p0) __extension__ ({ \ + float64x1x4_t __ret; \ + __builtin_neon_vld4_dup_v(&__ret, __p0, 10); \ + __ret; \ +}) +#define vld4_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1x4_t __ret; \ + poly64x1x4_t __s1 = __p1; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -__ai poly64x2_t vmovq_n_p64(poly64_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t) {__p0, __p0}; - return __ret; -} +#define vld4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x4_t __ret; \ + poly8x16x4_t __s1 = __p1; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 36); \ + __ret; \ +}) #else -__ai poly64x2_t vmovq_n_p64(poly64_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t) {__p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vld4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x4_t __ret; \ + poly8x16x4_t __s1 = __p1; \ + poly8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 36); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vmovq_n_f64(float64_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) {__p0, __p0}; - return __ret; -} +#define vld4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x4_t __ret; \ + poly64x2x4_t __s1 = __p1; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 38); \ + __ret; \ +}) #else -__ai float64x2_t vmovq_n_f64(float64_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) {__p0, __p0}; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vld4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x4_t __ret; \ + poly64x2x4_t __s1 = __p1; \ + poly64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 38); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) #endif -__ai float64x1_t vmov_n_f64(float64_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) {__p0}; - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_515) { - uint16x8_t __ret_515; - uint8x8_t __a1_515 = vget_high_u8(__p0_515); - __ret_515 = (uint16x8_t)(vshll_n_u8(__a1_515, 0)); - return __ret_515; -} +#define vld4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x4_t __ret; \ + uint8x16x4_t __s1 = __p1; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 48); \ + __ret; \ +}) #else -__ai uint16x8_t vmovl_high_u8(uint8x16_t __p0_516) { - uint16x8_t __ret_516; - uint8x16_t __rev0_516; __rev0_516 = __builtin_shufflevector(__p0_516, __p0_516, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __a1_516 = __noswap_vget_high_u8(__rev0_516); - __ret_516 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_516, 0)); - __ret_516 = __builtin_shufflevector(__ret_516, __ret_516, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret_516; -} -__ai uint16x8_t __noswap_vmovl_high_u8(uint8x16_t __p0_517) { - uint16x8_t __ret_517; - uint8x8_t __a1_517 = __noswap_vget_high_u8(__p0_517); - __ret_517 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_517, 0)); - return __ret_517; -} +#define vld4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x4_t __ret; \ + uint8x16x4_t __s1 = __p1; \ + uint8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 48); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_518) { - uint64x2_t __ret_518; - uint32x2_t __a1_518 = vget_high_u32(__p0_518); - __ret_518 = (uint64x2_t)(vshll_n_u32(__a1_518, 0)); - return __ret_518; -} +#define vld4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x4_t __ret; \ + uint64x2x4_t __s1 = __p1; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 51); \ + __ret; \ +}) #else -__ai uint64x2_t vmovl_high_u32(uint32x4_t __p0_519) { - uint64x2_t __ret_519; - uint32x4_t __rev0_519; __rev0_519 = __builtin_shufflevector(__p0_519, __p0_519, 3, 2, 1, 0); - uint32x2_t __a1_519 = __noswap_vget_high_u32(__rev0_519); - __ret_519 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_519, 0)); - __ret_519 = __builtin_shufflevector(__ret_519, __ret_519, 1, 0); - return __ret_519; -} -__ai uint64x2_t __noswap_vmovl_high_u32(uint32x4_t __p0_520) { - uint64x2_t __ret_520; - uint32x2_t __a1_520 = __noswap_vget_high_u32(__p0_520); - __ret_520 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_520, 0)); - return __ret_520; -} +#define vld4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x4_t __ret; \ + uint64x2x4_t __s1 = __p1; \ + uint64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 51); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_521) { - uint32x4_t __ret_521; - uint16x4_t __a1_521 = vget_high_u16(__p0_521); - __ret_521 = (uint32x4_t)(vshll_n_u16(__a1_521, 0)); - return __ret_521; -} +#define vld4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x4_t __ret; \ + int8x16x4_t __s1 = __p1; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 32); \ + __ret; \ +}) #else -__ai uint32x4_t vmovl_high_u16(uint16x8_t __p0_522) { - uint32x4_t __ret_522; - uint16x8_t __rev0_522; __rev0_522 = __builtin_shufflevector(__p0_522, __p0_522, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x4_t __a1_522 = __noswap_vget_high_u16(__rev0_522); - __ret_522 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_522, 0)); - __ret_522 = __builtin_shufflevector(__ret_522, __ret_522, 3, 2, 1, 0); - return __ret_522; -} -__ai uint32x4_t __noswap_vmovl_high_u16(uint16x8_t __p0_523) { - uint32x4_t __ret_523; - uint16x4_t __a1_523 = __noswap_vget_high_u16(__p0_523); - __ret_523 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_523, 0)); - return __ret_523; -} +#define vld4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x4_t __ret; \ + int8x16x4_t __s1 = __p1; \ + int8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 32); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmovl_high_s8(int8x16_t __p0_524) { - int16x8_t __ret_524; - int8x8_t __a1_524 = vget_high_s8(__p0_524); - __ret_524 = (int16x8_t)(vshll_n_s8(__a1_524, 0)); - return __ret_524; -} +#define vld4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x4_t __ret; \ + float64x2x4_t __s1 = __p1; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 42); \ + __ret; \ +}) #else -__ai int16x8_t vmovl_high_s8(int8x16_t __p0_525) { - int16x8_t __ret_525; - int8x16_t __rev0_525; __rev0_525 = __builtin_shufflevector(__p0_525, __p0_525, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __a1_525 = __noswap_vget_high_s8(__rev0_525); - __ret_525 = (int16x8_t)(__noswap_vshll_n_s8(__a1_525, 0)); - __ret_525 = __builtin_shufflevector(__ret_525, __ret_525, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret_525; -} -__ai int16x8_t __noswap_vmovl_high_s8(int8x16_t __p0_526) { - int16x8_t __ret_526; - int8x8_t __a1_526 = __noswap_vget_high_s8(__p0_526); - __ret_526 = (int16x8_t)(__noswap_vshll_n_s8(__a1_526, 0)); - return __ret_526; -} +#define vld4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x4_t __ret; \ + float64x2x4_t __s1 = __p1; \ + float64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 42); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vmovl_high_s32(int32x4_t __p0_527) { - int64x2_t __ret_527; - int32x2_t __a1_527 = vget_high_s32(__p0_527); - __ret_527 = (int64x2_t)(vshll_n_s32(__a1_527, 0)); - return __ret_527; -} +#define vld4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x4_t __ret; \ + int64x2x4_t __s1 = __p1; \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 35); \ + __ret; \ +}) #else -__ai int64x2_t vmovl_high_s32(int32x4_t __p0_528) { - int64x2_t __ret_528; - int32x4_t __rev0_528; __rev0_528 = __builtin_shufflevector(__p0_528, __p0_528, 3, 2, 1, 0); - int32x2_t __a1_528 = __noswap_vget_high_s32(__rev0_528); - __ret_528 = (int64x2_t)(__noswap_vshll_n_s32(__a1_528, 0)); - __ret_528 = __builtin_shufflevector(__ret_528, __ret_528, 1, 0); - return __ret_528; -} -__ai int64x2_t __noswap_vmovl_high_s32(int32x4_t __p0_529) { - int64x2_t __ret_529; - int32x2_t __a1_529 = __noswap_vget_high_s32(__p0_529); - __ret_529 = (int64x2_t)(__noswap_vshll_n_s32(__a1_529, 0)); - return __ret_529; -} +#define vld4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x4_t __ret; \ + int64x2x4_t __s1 = __p1; \ + int64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vld4q_lane_v(&__ret, __p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 35); \ + \ + __ret.val[0] = __builtin_shufflevector(__ret.val[0], __ret.val[0], 1, 0); \ + __ret.val[1] = __builtin_shufflevector(__ret.val[1], __ret.val[1], 1, 0); \ + __ret.val[2] = __builtin_shufflevector(__ret.val[2], __ret.val[2], 1, 0); \ + __ret.val[3] = __builtin_shufflevector(__ret.val[3], __ret.val[3], 1, 0); \ + __ret; \ +}) #endif +#define vld4_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1x4_t __ret; \ + uint64x1x4_t __s1 = __p1; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \ + __ret; \ +}) +#define vld4_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1x4_t __ret; \ + float64x1x4_t __s1 = __p1; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 10); \ + __ret; \ +}) +#define vld4_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1x4_t __ret; \ + int64x1x4_t __s1 = __p1; \ + __builtin_neon_vld4_lane_v(&__ret, __p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 3); \ + __ret; \ +}) +#define vldrq_p128(__p0) __extension__ ({ \ + poly128_t __ret; \ + __ret = (poly128_t) __builtin_neon_vldrq_p128(__p0); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmovl_high_s16(int16x8_t __p0_530) { - int32x4_t __ret_530; - int16x4_t __a1_530 = vget_high_s16(__p0_530); - __ret_530 = (int32x4_t)(vshll_n_s16(__a1_530, 0)); - return __ret_530; +__ai __attribute__((target("neon"))) float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; } #else -__ai int32x4_t vmovl_high_s16(int16x8_t __p0_531) { - int32x4_t __ret_531; - int16x8_t __rev0_531; __rev0_531 = __builtin_shufflevector(__p0_531, __p0_531, 7, 6, 5, 4, 3, 2, 1, 0); - int16x4_t __a1_531 = __noswap_vget_high_s16(__rev0_531); - __ret_531 = (int32x4_t)(__noswap_vshll_n_s16(__a1_531, 0)); - __ret_531 = __builtin_shufflevector(__ret_531, __ret_531, 3, 2, 1, 0); - return __ret_531; -} -__ai int32x4_t __noswap_vmovl_high_s16(int16x8_t __p0_532) { - int32x4_t __ret_532; - int16x4_t __a1_532 = __noswap_vget_high_s16(__p0_532); - __ret_532 = (int32x4_t)(__noswap_vshll_n_s16(__a1_532, 0)); - return __ret_532; +__ai __attribute__((target("neon"))) float64x2_t vmaxq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; } #endif +__ai __attribute__((target("neon"))) float64x1_t vmax_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vmax_v((int8x8_t)__p0, (int8x8_t)__p1, 10); + return __ret; +} #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) { - uint16x8_t __ret; - __ret = vcombine_u16(__p0, vmovn_u32(__p1)); +__ai __attribute__((target("neon"))) float64_t vmaxnmvq_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vmaxnmvq_f64(__p0); return __ret; } #else -__ai uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) { - uint16x8_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __noswap_vcombine_u16(__rev0, __noswap_vmovn_u32(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) float64_t vmaxnmvq_f64(float64x2_t __p0) { + float64_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64_t) __builtin_neon_vmaxnmvq_f64(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) { - uint32x4_t __ret; - __ret = vcombine_u32(__p0, vmovn_u64(__p1)); +__ai __attribute__((target("neon"))) float32_t vmaxnmvq_f32(float32x4_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vmaxnmvq_f32(__p0); return __ret; } #else -__ai uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) { - uint32x4_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __noswap_vcombine_u32(__rev0, __noswap_vmovn_u64(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) float32_t vmaxnmvq_f32(float32x4_t __p0) { + float32_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32_t) __builtin_neon_vmaxnmvq_f32(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) { - uint8x16_t __ret; - __ret = vcombine_u8(__p0, vmovn_u16(__p1)); +__ai __attribute__((target("neon"))) float32_t vmaxnmv_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vmaxnmv_f32(__p0); return __ret; } #else -__ai uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) { - uint8x16_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __noswap_vcombine_u8(__rev0, __noswap_vmovn_u16(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) float32_t vmaxnmv_f32(float32x2_t __p0) { + float32_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32_t) __builtin_neon_vmaxnmv_f32(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) { - int16x8_t __ret; - __ret = vcombine_s16(__p0, vmovn_s32(__p1)); +__ai __attribute__((target("neon"))) uint8_t vmaxvq_u8(uint8x16_t __p0) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vmaxvq_u8(__p0); return __ret; } #else -__ai int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) { - int16x8_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __noswap_vcombine_s16(__rev0, __noswap_vmovn_s32(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint8_t vmaxvq_u8(uint8x16_t __p0) { + uint8_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8_t) __builtin_neon_vmaxvq_u8(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) { - int32x4_t __ret; - __ret = vcombine_s32(__p0, vmovn_s64(__p1)); +__ai __attribute__((target("neon"))) uint32_t vmaxvq_u32(uint32x4_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vmaxvq_u32(__p0); return __ret; } #else -__ai int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) { - int32x4_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __noswap_vcombine_s32(__rev0, __noswap_vmovn_s64(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint32_t vmaxvq_u32(uint32x4_t __p0) { + uint32_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint32_t) __builtin_neon_vmaxvq_u32(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) { - int8x16_t __ret; - __ret = vcombine_s8(__p0, vmovn_s16(__p1)); +__ai __attribute__((target("neon"))) uint16_t vmaxvq_u16(uint16x8_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vmaxvq_u16(__p0); return __ret; } #else -__ai int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) { - int8x16_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __noswap_vcombine_s8(__rev0, __noswap_vmovn_s16(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint16_t vmaxvq_u16(uint16x8_t __p0) { + uint16_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16_t) __builtin_neon_vmaxvq_u16(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = __p0 * __p1; +__ai __attribute__((target("neon"))) int8_t vmaxvq_s8(int8x16_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vmaxvq_s8(__p0); return __ret; } #else -__ai float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __rev0 * __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) int8_t vmaxvq_s8(int8x16_t __p0) { + int8_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8_t) __builtin_neon_vmaxvq_s8(__rev0); return __ret; } #endif -__ai float64x1_t vmul_f64(float64x1_t __p0, float64x1_t __p1) { - float64x1_t __ret; - __ret = __p0 * __p1; +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64_t vmaxvq_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vmaxvq_f64(__p0); return __ret; } -#define vmuld_lane_f64(__p0_533, __p1_533, __p2_533) __extension__ ({ \ - float64_t __ret_533; \ - float64_t __s0_533 = __p0_533; \ - float64x1_t __s1_533 = __p1_533; \ - __ret_533 = __s0_533 * vget_lane_f64(__s1_533, __p2_533); \ - __ret_533; \ -}) -#ifdef __LITTLE_ENDIAN__ -#define vmuls_lane_f32(__p0_534, __p1_534, __p2_534) __extension__ ({ \ - float32_t __ret_534; \ - float32_t __s0_534 = __p0_534; \ - float32x2_t __s1_534 = __p1_534; \ - __ret_534 = __s0_534 * vget_lane_f32(__s1_534, __p2_534); \ - __ret_534; \ -}) -#else -#define vmuls_lane_f32(__p0_535, __p1_535, __p2_535) __extension__ ({ \ - float32_t __ret_535; \ - float32_t __s0_535 = __p0_535; \ - float32x2_t __s1_535 = __p1_535; \ - float32x2_t __rev1_535; __rev1_535 = __builtin_shufflevector(__s1_535, __s1_535, 1, 0); \ - __ret_535 = __s0_535 * __noswap_vget_lane_f32(__rev1_535, __p2_535); \ - __ret_535; \ -}) -#endif - -#define vmul_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x1_t __ret; \ - float64x1_t __s0 = __p0; \ - float64x1_t __s1 = __p1; \ - __ret = (float64x1_t) __builtin_neon_vmul_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \ - __ret; \ -}) -#ifdef __LITTLE_ENDIAN__ -#define vmulq_lane_f64(__p0_536, __p1_536, __p2_536) __extension__ ({ \ - float64x2_t __ret_536; \ - float64x2_t __s0_536 = __p0_536; \ - float64x1_t __s1_536 = __p1_536; \ - __ret_536 = __s0_536 * splatq_lane_f64(__s1_536, __p2_536); \ - __ret_536; \ -}) -#else -#define vmulq_lane_f64(__p0_537, __p1_537, __p2_537) __extension__ ({ \ - float64x2_t __ret_537; \ - float64x2_t __s0_537 = __p0_537; \ - float64x1_t __s1_537 = __p1_537; \ - float64x2_t __rev0_537; __rev0_537 = __builtin_shufflevector(__s0_537, __s0_537, 1, 0); \ - __ret_537 = __rev0_537 * __noswap_splatq_lane_f64(__s1_537, __p2_537); \ - __ret_537 = __builtin_shufflevector(__ret_537, __ret_537, 1, 0); \ - __ret_537; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmuld_laneq_f64(__p0_538, __p1_538, __p2_538) __extension__ ({ \ - float64_t __ret_538; \ - float64_t __s0_538 = __p0_538; \ - float64x2_t __s1_538 = __p1_538; \ - __ret_538 = __s0_538 * vgetq_lane_f64(__s1_538, __p2_538); \ - __ret_538; \ -}) -#else -#define vmuld_laneq_f64(__p0_539, __p1_539, __p2_539) __extension__ ({ \ - float64_t __ret_539; \ - float64_t __s0_539 = __p0_539; \ - float64x2_t __s1_539 = __p1_539; \ - float64x2_t __rev1_539; __rev1_539 = __builtin_shufflevector(__s1_539, __s1_539, 1, 0); \ - __ret_539 = __s0_539 * __noswap_vgetq_lane_f64(__rev1_539, __p2_539); \ - __ret_539; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmuls_laneq_f32(__p0_540, __p1_540, __p2_540) __extension__ ({ \ - float32_t __ret_540; \ - float32_t __s0_540 = __p0_540; \ - float32x4_t __s1_540 = __p1_540; \ - __ret_540 = __s0_540 * vgetq_lane_f32(__s1_540, __p2_540); \ - __ret_540; \ -}) -#else -#define vmuls_laneq_f32(__p0_541, __p1_541, __p2_541) __extension__ ({ \ - float32_t __ret_541; \ - float32_t __s0_541 = __p0_541; \ - float32x4_t __s1_541 = __p1_541; \ - float32x4_t __rev1_541; __rev1_541 = __builtin_shufflevector(__s1_541, __s1_541, 3, 2, 1, 0); \ - __ret_541 = __s0_541 * __noswap_vgetq_lane_f32(__rev1_541, __p2_541); \ - __ret_541; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmul_laneq_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x1_t __ret; \ - float64x1_t __s0 = __p0; \ - float64x2_t __s1 = __p1; \ - __ret = (float64x1_t) __builtin_neon_vmul_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 10); \ - __ret; \ -}) #else -#define vmul_laneq_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x1_t __ret; \ - float64x1_t __s0 = __p0; \ - float64x2_t __s1 = __p1; \ - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __ret = (float64x1_t) __builtin_neon_vmul_laneq_v((int8x8_t)__s0, (int8x16_t)__rev1, __p2, 10); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float64_t vmaxvq_f64(float64x2_t __p0) { + float64_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64_t) __builtin_neon_vmaxvq_f64(__rev0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vmulq_laneq_u32(__p0_542, __p1_542, __p2_542) __extension__ ({ \ - uint32x4_t __ret_542; \ - uint32x4_t __s0_542 = __p0_542; \ - uint32x4_t __s1_542 = __p1_542; \ - __ret_542 = __s0_542 * splatq_laneq_u32(__s1_542, __p2_542); \ - __ret_542; \ -}) +__ai __attribute__((target("neon"))) float32_t vmaxvq_f32(float32x4_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vmaxvq_f32(__p0); + return __ret; +} #else -#define vmulq_laneq_u32(__p0_543, __p1_543, __p2_543) __extension__ ({ \ - uint32x4_t __ret_543; \ - uint32x4_t __s0_543 = __p0_543; \ - uint32x4_t __s1_543 = __p1_543; \ - uint32x4_t __rev0_543; __rev0_543 = __builtin_shufflevector(__s0_543, __s0_543, 3, 2, 1, 0); \ - uint32x4_t __rev1_543; __rev1_543 = __builtin_shufflevector(__s1_543, __s1_543, 3, 2, 1, 0); \ - __ret_543 = __rev0_543 * __noswap_splatq_laneq_u32(__rev1_543, __p2_543); \ - __ret_543 = __builtin_shufflevector(__ret_543, __ret_543, 3, 2, 1, 0); \ - __ret_543; \ -}) +__ai __attribute__((target("neon"))) float32_t vmaxvq_f32(float32x4_t __p0) { + float32_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32_t) __builtin_neon_vmaxvq_f32(__rev0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vmulq_laneq_u16(__p0_544, __p1_544, __p2_544) __extension__ ({ \ - uint16x8_t __ret_544; \ - uint16x8_t __s0_544 = __p0_544; \ - uint16x8_t __s1_544 = __p1_544; \ - __ret_544 = __s0_544 * splatq_laneq_u16(__s1_544, __p2_544); \ - __ret_544; \ -}) +__ai __attribute__((target("neon"))) int32_t vmaxvq_s32(int32x4_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vmaxvq_s32(__p0); + return __ret; +} #else -#define vmulq_laneq_u16(__p0_545, __p1_545, __p2_545) __extension__ ({ \ - uint16x8_t __ret_545; \ - uint16x8_t __s0_545 = __p0_545; \ - uint16x8_t __s1_545 = __p1_545; \ - uint16x8_t __rev0_545; __rev0_545 = __builtin_shufflevector(__s0_545, __s0_545, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev1_545; __rev1_545 = __builtin_shufflevector(__s1_545, __s1_545, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_545 = __rev0_545 * __noswap_splatq_laneq_u16(__rev1_545, __p2_545); \ - __ret_545 = __builtin_shufflevector(__ret_545, __ret_545, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_545; \ -}) +__ai __attribute__((target("neon"))) int32_t vmaxvq_s32(int32x4_t __p0) { + int32_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int32_t) __builtin_neon_vmaxvq_s32(__rev0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vmulq_laneq_f64(__p0_546, __p1_546, __p2_546) __extension__ ({ \ - float64x2_t __ret_546; \ - float64x2_t __s0_546 = __p0_546; \ - float64x2_t __s1_546 = __p1_546; \ - __ret_546 = __s0_546 * splatq_laneq_f64(__s1_546, __p2_546); \ - __ret_546; \ -}) +__ai __attribute__((target("neon"))) int16_t vmaxvq_s16(int16x8_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vmaxvq_s16(__p0); + return __ret; +} #else -#define vmulq_laneq_f64(__p0_547, __p1_547, __p2_547) __extension__ ({ \ - float64x2_t __ret_547; \ - float64x2_t __s0_547 = __p0_547; \ - float64x2_t __s1_547 = __p1_547; \ - float64x2_t __rev0_547; __rev0_547 = __builtin_shufflevector(__s0_547, __s0_547, 1, 0); \ - float64x2_t __rev1_547; __rev1_547 = __builtin_shufflevector(__s1_547, __s1_547, 1, 0); \ - __ret_547 = __rev0_547 * __noswap_splatq_laneq_f64(__rev1_547, __p2_547); \ - __ret_547 = __builtin_shufflevector(__ret_547, __ret_547, 1, 0); \ - __ret_547; \ -}) +__ai __attribute__((target("neon"))) int16_t vmaxvq_s16(int16x8_t __p0) { + int16_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16_t) __builtin_neon_vmaxvq_s16(__rev0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vmulq_laneq_f32(__p0_548, __p1_548, __p2_548) __extension__ ({ \ - float32x4_t __ret_548; \ - float32x4_t __s0_548 = __p0_548; \ - float32x4_t __s1_548 = __p1_548; \ - __ret_548 = __s0_548 * splatq_laneq_f32(__s1_548, __p2_548); \ - __ret_548; \ -}) +__ai __attribute__((target("neon"))) uint8_t vmaxv_u8(uint8x8_t __p0) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vmaxv_u8(__p0); + return __ret; +} #else -#define vmulq_laneq_f32(__p0_549, __p1_549, __p2_549) __extension__ ({ \ - float32x4_t __ret_549; \ - float32x4_t __s0_549 = __p0_549; \ - float32x4_t __s1_549 = __p1_549; \ - float32x4_t __rev0_549; __rev0_549 = __builtin_shufflevector(__s0_549, __s0_549, 3, 2, 1, 0); \ - float32x4_t __rev1_549; __rev1_549 = __builtin_shufflevector(__s1_549, __s1_549, 3, 2, 1, 0); \ - __ret_549 = __rev0_549 * __noswap_splatq_laneq_f32(__rev1_549, __p2_549); \ - __ret_549 = __builtin_shufflevector(__ret_549, __ret_549, 3, 2, 1, 0); \ - __ret_549; \ -}) +__ai __attribute__((target("neon"))) uint8_t vmaxv_u8(uint8x8_t __p0) { + uint8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8_t) __builtin_neon_vmaxv_u8(__rev0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vmulq_laneq_s32(__p0_550, __p1_550, __p2_550) __extension__ ({ \ - int32x4_t __ret_550; \ - int32x4_t __s0_550 = __p0_550; \ - int32x4_t __s1_550 = __p1_550; \ - __ret_550 = __s0_550 * splatq_laneq_s32(__s1_550, __p2_550); \ - __ret_550; \ -}) +__ai __attribute__((target("neon"))) uint32_t vmaxv_u32(uint32x2_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vmaxv_u32(__p0); + return __ret; +} #else -#define vmulq_laneq_s32(__p0_551, __p1_551, __p2_551) __extension__ ({ \ - int32x4_t __ret_551; \ - int32x4_t __s0_551 = __p0_551; \ - int32x4_t __s1_551 = __p1_551; \ - int32x4_t __rev0_551; __rev0_551 = __builtin_shufflevector(__s0_551, __s0_551, 3, 2, 1, 0); \ - int32x4_t __rev1_551; __rev1_551 = __builtin_shufflevector(__s1_551, __s1_551, 3, 2, 1, 0); \ - __ret_551 = __rev0_551 * __noswap_splatq_laneq_s32(__rev1_551, __p2_551); \ - __ret_551 = __builtin_shufflevector(__ret_551, __ret_551, 3, 2, 1, 0); \ - __ret_551; \ -}) +__ai __attribute__((target("neon"))) uint32_t vmaxv_u32(uint32x2_t __p0) { + uint32_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32_t) __builtin_neon_vmaxv_u32(__rev0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vmulq_laneq_s16(__p0_552, __p1_552, __p2_552) __extension__ ({ \ - int16x8_t __ret_552; \ - int16x8_t __s0_552 = __p0_552; \ - int16x8_t __s1_552 = __p1_552; \ - __ret_552 = __s0_552 * splatq_laneq_s16(__s1_552, __p2_552); \ - __ret_552; \ -}) +__ai __attribute__((target("neon"))) uint16_t vmaxv_u16(uint16x4_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vmaxv_u16(__p0); + return __ret; +} #else -#define vmulq_laneq_s16(__p0_553, __p1_553, __p2_553) __extension__ ({ \ - int16x8_t __ret_553; \ - int16x8_t __s0_553 = __p0_553; \ - int16x8_t __s1_553 = __p1_553; \ - int16x8_t __rev0_553; __rev0_553 = __builtin_shufflevector(__s0_553, __s0_553, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_553; __rev1_553 = __builtin_shufflevector(__s1_553, __s1_553, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_553 = __rev0_553 * __noswap_splatq_laneq_s16(__rev1_553, __p2_553); \ - __ret_553 = __builtin_shufflevector(__ret_553, __ret_553, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_553; \ -}) +__ai __attribute__((target("neon"))) uint16_t vmaxv_u16(uint16x4_t __p0) { + uint16_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16_t) __builtin_neon_vmaxv_u16(__rev0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vmul_laneq_u32(__p0_554, __p1_554, __p2_554) __extension__ ({ \ - uint32x2_t __ret_554; \ - uint32x2_t __s0_554 = __p0_554; \ - uint32x4_t __s1_554 = __p1_554; \ - __ret_554 = __s0_554 * splat_laneq_u32(__s1_554, __p2_554); \ - __ret_554; \ -}) +__ai __attribute__((target("neon"))) int8_t vmaxv_s8(int8x8_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vmaxv_s8(__p0); + return __ret; +} #else -#define vmul_laneq_u32(__p0_555, __p1_555, __p2_555) __extension__ ({ \ - uint32x2_t __ret_555; \ - uint32x2_t __s0_555 = __p0_555; \ - uint32x4_t __s1_555 = __p1_555; \ - uint32x2_t __rev0_555; __rev0_555 = __builtin_shufflevector(__s0_555, __s0_555, 1, 0); \ - uint32x4_t __rev1_555; __rev1_555 = __builtin_shufflevector(__s1_555, __s1_555, 3, 2, 1, 0); \ - __ret_555 = __rev0_555 * __noswap_splat_laneq_u32(__rev1_555, __p2_555); \ - __ret_555 = __builtin_shufflevector(__ret_555, __ret_555, 1, 0); \ - __ret_555; \ -}) +__ai __attribute__((target("neon"))) int8_t vmaxv_s8(int8x8_t __p0) { + int8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8_t) __builtin_neon_vmaxv_s8(__rev0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vmul_laneq_u16(__p0_556, __p1_556, __p2_556) __extension__ ({ \ - uint16x4_t __ret_556; \ - uint16x4_t __s0_556 = __p0_556; \ - uint16x8_t __s1_556 = __p1_556; \ - __ret_556 = __s0_556 * splat_laneq_u16(__s1_556, __p2_556); \ - __ret_556; \ -}) +__ai __attribute__((target("neon"))) float32_t vmaxv_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vmaxv_f32(__p0); + return __ret; +} #else -#define vmul_laneq_u16(__p0_557, __p1_557, __p2_557) __extension__ ({ \ - uint16x4_t __ret_557; \ - uint16x4_t __s0_557 = __p0_557; \ - uint16x8_t __s1_557 = __p1_557; \ - uint16x4_t __rev0_557; __rev0_557 = __builtin_shufflevector(__s0_557, __s0_557, 3, 2, 1, 0); \ - uint16x8_t __rev1_557; __rev1_557 = __builtin_shufflevector(__s1_557, __s1_557, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_557 = __rev0_557 * __noswap_splat_laneq_u16(__rev1_557, __p2_557); \ - __ret_557 = __builtin_shufflevector(__ret_557, __ret_557, 3, 2, 1, 0); \ - __ret_557; \ -}) +__ai __attribute__((target("neon"))) float32_t vmaxv_f32(float32x2_t __p0) { + float32_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32_t) __builtin_neon_vmaxv_f32(__rev0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vmul_laneq_f32(__p0_558, __p1_558, __p2_558) __extension__ ({ \ - float32x2_t __ret_558; \ - float32x2_t __s0_558 = __p0_558; \ - float32x4_t __s1_558 = __p1_558; \ - __ret_558 = __s0_558 * splat_laneq_f32(__s1_558, __p2_558); \ - __ret_558; \ -}) +__ai __attribute__((target("neon"))) int32_t vmaxv_s32(int32x2_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vmaxv_s32(__p0); + return __ret; +} #else -#define vmul_laneq_f32(__p0_559, __p1_559, __p2_559) __extension__ ({ \ - float32x2_t __ret_559; \ - float32x2_t __s0_559 = __p0_559; \ - float32x4_t __s1_559 = __p1_559; \ - float32x2_t __rev0_559; __rev0_559 = __builtin_shufflevector(__s0_559, __s0_559, 1, 0); \ - float32x4_t __rev1_559; __rev1_559 = __builtin_shufflevector(__s1_559, __s1_559, 3, 2, 1, 0); \ - __ret_559 = __rev0_559 * __noswap_splat_laneq_f32(__rev1_559, __p2_559); \ - __ret_559 = __builtin_shufflevector(__ret_559, __ret_559, 1, 0); \ - __ret_559; \ -}) +__ai __attribute__((target("neon"))) int32_t vmaxv_s32(int32x2_t __p0) { + int32_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int32_t) __builtin_neon_vmaxv_s32(__rev0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vmul_laneq_s32(__p0_560, __p1_560, __p2_560) __extension__ ({ \ - int32x2_t __ret_560; \ - int32x2_t __s0_560 = __p0_560; \ - int32x4_t __s1_560 = __p1_560; \ - __ret_560 = __s0_560 * splat_laneq_s32(__s1_560, __p2_560); \ - __ret_560; \ -}) +__ai __attribute__((target("neon"))) int16_t vmaxv_s16(int16x4_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vmaxv_s16(__p0); + return __ret; +} #else -#define vmul_laneq_s32(__p0_561, __p1_561, __p2_561) __extension__ ({ \ - int32x2_t __ret_561; \ - int32x2_t __s0_561 = __p0_561; \ - int32x4_t __s1_561 = __p1_561; \ - int32x2_t __rev0_561; __rev0_561 = __builtin_shufflevector(__s0_561, __s0_561, 1, 0); \ - int32x4_t __rev1_561; __rev1_561 = __builtin_shufflevector(__s1_561, __s1_561, 3, 2, 1, 0); \ - __ret_561 = __rev0_561 * __noswap_splat_laneq_s32(__rev1_561, __p2_561); \ - __ret_561 = __builtin_shufflevector(__ret_561, __ret_561, 1, 0); \ - __ret_561; \ -}) +__ai __attribute__((target("neon"))) int16_t vmaxv_s16(int16x4_t __p0) { + int16_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16_t) __builtin_neon_vmaxv_s16(__rev0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vmul_laneq_s16(__p0_562, __p1_562, __p2_562) __extension__ ({ \ - int16x4_t __ret_562; \ - int16x4_t __s0_562 = __p0_562; \ - int16x8_t __s1_562 = __p1_562; \ - __ret_562 = __s0_562 * splat_laneq_s16(__s1_562, __p2_562); \ - __ret_562; \ -}) +__ai __attribute__((target("neon"))) float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vminq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} #else -#define vmul_laneq_s16(__p0_563, __p1_563, __p2_563) __extension__ ({ \ - int16x4_t __ret_563; \ - int16x4_t __s0_563 = __p0_563; \ - int16x8_t __s1_563 = __p1_563; \ - int16x4_t __rev0_563; __rev0_563 = __builtin_shufflevector(__s0_563, __s0_563, 3, 2, 1, 0); \ - int16x8_t __rev1_563; __rev1_563 = __builtin_shufflevector(__s1_563, __s1_563, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_563 = __rev0_563 * __noswap_splat_laneq_s16(__rev1_563, __p2_563); \ - __ret_563 = __builtin_shufflevector(__ret_563, __ret_563, 3, 2, 1, 0); \ - __ret_563; \ -}) +__ai __attribute__((target("neon"))) float64x2_t vminq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif -__ai float64x1_t vmul_n_f64(float64x1_t __p0, float64_t __p1) { +__ai __attribute__((target("neon"))) float64x1_t vmin_f64(float64x1_t __p0, float64x1_t __p1) { float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vmul_n_f64((float64x1_t)__p0, __p1); + __ret = (float64x1_t) __builtin_neon_vmin_v((int8x8_t)__p0, (int8x8_t)__p1, 10); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) { - float64x2_t __ret; - __ret = __p0 * (float64x2_t) {__p1, __p1}; +__ai __attribute__((target("neon"))) float64_t vminnmvq_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vminnmvq_f64(__p0); return __ret; } #else -__ai float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) { - float64x2_t __ret; +__ai __attribute__((target("neon"))) float64_t vminnmvq_f64(float64x2_t __p0) { + float64_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = __rev0 * (float64x2_t) {__p1, __p1}; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + __ret = (float64_t) __builtin_neon_vminnmvq_f64(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) { - poly16x8_t __ret; - __ret = vmull_p8(vget_high_p8(__p0), vget_high_p8(__p1)); +__ai __attribute__((target("neon"))) float32_t vminnmvq_f32(float32x4_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vminnmvq_f32(__p0); return __ret; } #else -__ai poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) { - poly16x8_t __ret; - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __noswap_vmull_p8(__noswap_vget_high_p8(__rev0), __noswap_vget_high_p8(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) float32_t vminnmvq_f32(float32x4_t __p0) { + float32_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32_t) __builtin_neon_vminnmvq_f32(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint16x8_t __ret; - __ret = vmull_u8(vget_high_u8(__p0), vget_high_u8(__p1)); +__ai __attribute__((target("neon"))) float32_t vminnmv_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vminnmv_f32(__p0); return __ret; } #else -__ai uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint16x8_t __ret; +__ai __attribute__((target("neon"))) float32_t vminnmv_f32(float32x2_t __p0) { + float32_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32_t) __builtin_neon_vminnmv_f32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8_t vminvq_u8(uint8x16_t __p0) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vminvq_u8(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8_t vminvq_u8(uint8x16_t __p0) { + uint8_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __noswap_vmull_u8(__noswap_vget_high_u8(__rev0), __noswap_vget_high_u8(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8_t) __builtin_neon_vminvq_u8(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint64x2_t __ret; - __ret = vmull_u32(vget_high_u32(__p0), vget_high_u32(__p1)); +__ai __attribute__((target("neon"))) uint32_t vminvq_u32(uint32x4_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vminvq_u32(__p0); return __ret; } #else -__ai uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint64x2_t __ret; +__ai __attribute__((target("neon"))) uint32_t vminvq_u32(uint32x4_t __p0) { + uint32_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0), __noswap_vget_high_u32(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + __ret = (uint32_t) __builtin_neon_vminvq_u32(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint32x4_t __ret; - __ret = vmull_u16(vget_high_u16(__p0), vget_high_u16(__p1)); +__ai __attribute__((target("neon"))) uint16_t vminvq_u16(uint16x8_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vminvq_u16(__p0); return __ret; } #else -__ai uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint32x4_t __ret; +__ai __attribute__((target("neon"))) uint16_t vminvq_u16(uint16x8_t __p0) { + uint16_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0), __noswap_vget_high_u16(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + __ret = (uint16_t) __builtin_neon_vminvq_u16(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) { - int16x8_t __ret; - __ret = vmull_s8(vget_high_s8(__p0), vget_high_s8(__p1)); +__ai __attribute__((target("neon"))) int8_t vminvq_s8(int8x16_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vminvq_s8(__p0); return __ret; } #else -__ai int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) { - int16x8_t __ret; +__ai __attribute__((target("neon"))) int8_t vminvq_s8(int8x16_t __p0) { + int8_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __noswap_vmull_s8(__noswap_vget_high_s8(__rev0), __noswap_vget_high_s8(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8_t) __builtin_neon_vminvq_s8(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) { - int64x2_t __ret; - __ret = vmull_s32(vget_high_s32(__p0), vget_high_s32(__p1)); +__ai __attribute__((target("neon"))) float64_t vminvq_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vminvq_f64(__p0); return __ret; } #else -__ai int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) { - int64x2_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) float64_t vminvq_f64(float64x2_t __p0) { + float64_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64_t) __builtin_neon_vminvq_f64(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) { - int32x4_t __ret; - __ret = vmull_s16(vget_high_s16(__p0), vget_high_s16(__p1)); +__ai __attribute__((target("neon"))) float32_t vminvq_f32(float32x4_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vminvq_f32(__p0); return __ret; } #else -__ai int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) { - int32x4_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) float32_t vminvq_f32(float32x4_t __p0) { + float32_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32_t) __builtin_neon_vminvq_f32(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -#define vmull_high_lane_u32(__p0_564, __p1_564, __p2_564) __extension__ ({ \ - uint64x2_t __ret_564; \ - uint32x4_t __s0_564 = __p0_564; \ - uint32x2_t __s1_564 = __p1_564; \ - __ret_564 = vmull_u32(vget_high_u32(__s0_564), splat_lane_u32(__s1_564, __p2_564)); \ - __ret_564; \ -}) +__ai __attribute__((target("neon"))) int32_t vminvq_s32(int32x4_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vminvq_s32(__p0); + return __ret; +} #else -#define vmull_high_lane_u32(__p0_565, __p1_565, __p2_565) __extension__ ({ \ - uint64x2_t __ret_565; \ - uint32x4_t __s0_565 = __p0_565; \ - uint32x2_t __s1_565 = __p1_565; \ - uint32x4_t __rev0_565; __rev0_565 = __builtin_shufflevector(__s0_565, __s0_565, 3, 2, 1, 0); \ - uint32x2_t __rev1_565; __rev1_565 = __builtin_shufflevector(__s1_565, __s1_565, 1, 0); \ - __ret_565 = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0_565), __noswap_splat_lane_u32(__rev1_565, __p2_565)); \ - __ret_565 = __builtin_shufflevector(__ret_565, __ret_565, 1, 0); \ - __ret_565; \ -}) +__ai __attribute__((target("neon"))) int32_t vminvq_s32(int32x4_t __p0) { + int32_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int32_t) __builtin_neon_vminvq_s32(__rev0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vmull_high_lane_u16(__p0_566, __p1_566, __p2_566) __extension__ ({ \ - uint32x4_t __ret_566; \ - uint16x8_t __s0_566 = __p0_566; \ - uint16x4_t __s1_566 = __p1_566; \ - __ret_566 = vmull_u16(vget_high_u16(__s0_566), splat_lane_u16(__s1_566, __p2_566)); \ - __ret_566; \ -}) +__ai __attribute__((target("neon"))) int16_t vminvq_s16(int16x8_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vminvq_s16(__p0); + return __ret; +} #else -#define vmull_high_lane_u16(__p0_567, __p1_567, __p2_567) __extension__ ({ \ - uint32x4_t __ret_567; \ - uint16x8_t __s0_567 = __p0_567; \ - uint16x4_t __s1_567 = __p1_567; \ - uint16x8_t __rev0_567; __rev0_567 = __builtin_shufflevector(__s0_567, __s0_567, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x4_t __rev1_567; __rev1_567 = __builtin_shufflevector(__s1_567, __s1_567, 3, 2, 1, 0); \ - __ret_567 = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0_567), __noswap_splat_lane_u16(__rev1_567, __p2_567)); \ - __ret_567 = __builtin_shufflevector(__ret_567, __ret_567, 3, 2, 1, 0); \ - __ret_567; \ -}) +__ai __attribute__((target("neon"))) int16_t vminvq_s16(int16x8_t __p0) { + int16_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16_t) __builtin_neon_vminvq_s16(__rev0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vmull_high_lane_s32(__p0_568, __p1_568, __p2_568) __extension__ ({ \ - int64x2_t __ret_568; \ - int32x4_t __s0_568 = __p0_568; \ - int32x2_t __s1_568 = __p1_568; \ - __ret_568 = vmull_s32(vget_high_s32(__s0_568), splat_lane_s32(__s1_568, __p2_568)); \ - __ret_568; \ -}) +__ai __attribute__((target("neon"))) uint8_t vminv_u8(uint8x8_t __p0) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vminv_u8(__p0); + return __ret; +} #else -#define vmull_high_lane_s32(__p0_569, __p1_569, __p2_569) __extension__ ({ \ - int64x2_t __ret_569; \ - int32x4_t __s0_569 = __p0_569; \ - int32x2_t __s1_569 = __p1_569; \ - int32x4_t __rev0_569; __rev0_569 = __builtin_shufflevector(__s0_569, __s0_569, 3, 2, 1, 0); \ - int32x2_t __rev1_569; __rev1_569 = __builtin_shufflevector(__s1_569, __s1_569, 1, 0); \ - __ret_569 = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0_569), __noswap_splat_lane_s32(__rev1_569, __p2_569)); \ - __ret_569 = __builtin_shufflevector(__ret_569, __ret_569, 1, 0); \ - __ret_569; \ -}) +__ai __attribute__((target("neon"))) uint8_t vminv_u8(uint8x8_t __p0) { + uint8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8_t) __builtin_neon_vminv_u8(__rev0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vmull_high_lane_s16(__p0_570, __p1_570, __p2_570) __extension__ ({ \ - int32x4_t __ret_570; \ - int16x8_t __s0_570 = __p0_570; \ - int16x4_t __s1_570 = __p1_570; \ - __ret_570 = vmull_s16(vget_high_s16(__s0_570), splat_lane_s16(__s1_570, __p2_570)); \ - __ret_570; \ -}) +__ai __attribute__((target("neon"))) uint32_t vminv_u32(uint32x2_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vminv_u32(__p0); + return __ret; +} #else -#define vmull_high_lane_s16(__p0_571, __p1_571, __p2_571) __extension__ ({ \ - int32x4_t __ret_571; \ - int16x8_t __s0_571 = __p0_571; \ - int16x4_t __s1_571 = __p1_571; \ - int16x8_t __rev0_571; __rev0_571 = __builtin_shufflevector(__s0_571, __s0_571, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev1_571; __rev1_571 = __builtin_shufflevector(__s1_571, __s1_571, 3, 2, 1, 0); \ - __ret_571 = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0_571), __noswap_splat_lane_s16(__rev1_571, __p2_571)); \ - __ret_571 = __builtin_shufflevector(__ret_571, __ret_571, 3, 2, 1, 0); \ - __ret_571; \ -}) +__ai __attribute__((target("neon"))) uint32_t vminv_u32(uint32x2_t __p0) { + uint32_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint32_t) __builtin_neon_vminv_u32(__rev0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vmull_high_laneq_u32(__p0_572, __p1_572, __p2_572) __extension__ ({ \ - uint64x2_t __ret_572; \ - uint32x4_t __s0_572 = __p0_572; \ - uint32x4_t __s1_572 = __p1_572; \ - __ret_572 = vmull_u32(vget_high_u32(__s0_572), splat_laneq_u32(__s1_572, __p2_572)); \ - __ret_572; \ -}) +__ai __attribute__((target("neon"))) uint16_t vminv_u16(uint16x4_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vminv_u16(__p0); + return __ret; +} #else -#define vmull_high_laneq_u32(__p0_573, __p1_573, __p2_573) __extension__ ({ \ - uint64x2_t __ret_573; \ - uint32x4_t __s0_573 = __p0_573; \ - uint32x4_t __s1_573 = __p1_573; \ - uint32x4_t __rev0_573; __rev0_573 = __builtin_shufflevector(__s0_573, __s0_573, 3, 2, 1, 0); \ - uint32x4_t __rev1_573; __rev1_573 = __builtin_shufflevector(__s1_573, __s1_573, 3, 2, 1, 0); \ - __ret_573 = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0_573), __noswap_splat_laneq_u32(__rev1_573, __p2_573)); \ - __ret_573 = __builtin_shufflevector(__ret_573, __ret_573, 1, 0); \ - __ret_573; \ -}) +__ai __attribute__((target("neon"))) uint16_t vminv_u16(uint16x4_t __p0) { + uint16_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (uint16_t) __builtin_neon_vminv_u16(__rev0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vmull_high_laneq_u16(__p0_574, __p1_574, __p2_574) __extension__ ({ \ - uint32x4_t __ret_574; \ - uint16x8_t __s0_574 = __p0_574; \ - uint16x8_t __s1_574 = __p1_574; \ - __ret_574 = vmull_u16(vget_high_u16(__s0_574), splat_laneq_u16(__s1_574, __p2_574)); \ - __ret_574; \ -}) +__ai __attribute__((target("neon"))) int8_t vminv_s8(int8x8_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vminv_s8(__p0); + return __ret; +} #else -#define vmull_high_laneq_u16(__p0_575, __p1_575, __p2_575) __extension__ ({ \ - uint32x4_t __ret_575; \ - uint16x8_t __s0_575 = __p0_575; \ - uint16x8_t __s1_575 = __p1_575; \ - uint16x8_t __rev0_575; __rev0_575 = __builtin_shufflevector(__s0_575, __s0_575, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev1_575; __rev1_575 = __builtin_shufflevector(__s1_575, __s1_575, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_575 = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0_575), __noswap_splat_laneq_u16(__rev1_575, __p2_575)); \ - __ret_575 = __builtin_shufflevector(__ret_575, __ret_575, 3, 2, 1, 0); \ - __ret_575; \ -}) +__ai __attribute__((target("neon"))) int8_t vminv_s8(int8x8_t __p0) { + int8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8_t) __builtin_neon_vminv_s8(__rev0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vmull_high_laneq_s32(__p0_576, __p1_576, __p2_576) __extension__ ({ \ - int64x2_t __ret_576; \ - int32x4_t __s0_576 = __p0_576; \ - int32x4_t __s1_576 = __p1_576; \ - __ret_576 = vmull_s32(vget_high_s32(__s0_576), splat_laneq_s32(__s1_576, __p2_576)); \ - __ret_576; \ -}) -#else -#define vmull_high_laneq_s32(__p0_577, __p1_577, __p2_577) __extension__ ({ \ - int64x2_t __ret_577; \ - int32x4_t __s0_577 = __p0_577; \ - int32x4_t __s1_577 = __p1_577; \ - int32x4_t __rev0_577; __rev0_577 = __builtin_shufflevector(__s0_577, __s0_577, 3, 2, 1, 0); \ - int32x4_t __rev1_577; __rev1_577 = __builtin_shufflevector(__s1_577, __s1_577, 3, 2, 1, 0); \ - __ret_577 = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0_577), __noswap_splat_laneq_s32(__rev1_577, __p2_577)); \ - __ret_577 = __builtin_shufflevector(__ret_577, __ret_577, 1, 0); \ - __ret_577; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmull_high_laneq_s16(__p0_578, __p1_578, __p2_578) __extension__ ({ \ - int32x4_t __ret_578; \ - int16x8_t __s0_578 = __p0_578; \ - int16x8_t __s1_578 = __p1_578; \ - __ret_578 = vmull_s16(vget_high_s16(__s0_578), splat_laneq_s16(__s1_578, __p2_578)); \ - __ret_578; \ -}) -#else -#define vmull_high_laneq_s16(__p0_579, __p1_579, __p2_579) __extension__ ({ \ - int32x4_t __ret_579; \ - int16x8_t __s0_579 = __p0_579; \ - int16x8_t __s1_579 = __p1_579; \ - int16x8_t __rev0_579; __rev0_579 = __builtin_shufflevector(__s0_579, __s0_579, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_579; __rev1_579 = __builtin_shufflevector(__s1_579, __s1_579, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_579 = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0_579), __noswap_splat_laneq_s16(__rev1_579, __p2_579)); \ - __ret_579 = __builtin_shufflevector(__ret_579, __ret_579, 3, 2, 1, 0); \ - __ret_579; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) { - uint64x2_t __ret; - __ret = vmull_n_u32(vget_high_u32(__p0), __p1); +__ai __attribute__((target("neon"))) float32_t vminv_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vminv_f32(__p0); return __ret; } #else -__ai uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) { - uint64x2_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = __noswap_vmull_n_u32(__noswap_vget_high_u32(__rev0), __p1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) float32_t vminv_f32(float32x2_t __p0) { + float32_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32_t) __builtin_neon_vminv_f32(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) { - uint32x4_t __ret; - __ret = vmull_n_u16(vget_high_u16(__p0), __p1); +__ai __attribute__((target("neon"))) int32_t vminv_s32(int32x2_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vminv_s32(__p0); return __ret; } #else -__ai uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) { - uint32x4_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __noswap_vmull_n_u16(__noswap_vget_high_u16(__rev0), __p1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int32_t vminv_s32(int32x2_t __p0) { + int32_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int32_t) __builtin_neon_vminv_s32(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) { - int64x2_t __ret; - __ret = vmull_n_s32(vget_high_s32(__p0), __p1); +__ai __attribute__((target("neon"))) int16_t vminv_s16(int16x4_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vminv_s16(__p0); return __ret; } #else -__ai int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) { - int64x2_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = __noswap_vmull_n_s32(__noswap_vget_high_s32(__rev0), __p1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) int16_t vminv_s16(int16x4_t __p0) { + int16_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (int16_t) __builtin_neon_vminv_s16(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) { - int32x4_t __ret; - __ret = vmull_n_s16(vget_high_s16(__p0), __p1); +__ai __attribute__((target("neon"))) float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + __ret = __p0 + __p1 * __p2; return __ret; } #else -__ai int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) { - int32x4_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __noswap_vmull_n_s16(__noswap_vget_high_s16(__rev0), __p1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) float64x2_t vmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __rev0 + __rev1 * __rev2; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif +__ai __attribute__((target("neon"))) float64x1_t vmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { + float64x1_t __ret; + __ret = __p0 + __p1 * __p2; + return __ret; +} #ifdef __LITTLE_ENDIAN__ -#define vmull_laneq_u32(__p0_580, __p1_580, __p2_580) __extension__ ({ \ - uint64x2_t __ret_580; \ - uint32x2_t __s0_580 = __p0_580; \ - uint32x4_t __s1_580 = __p1_580; \ - __ret_580 = vmull_u32(__s0_580, splat_laneq_u32(__s1_580, __p2_580)); \ - __ret_580; \ +#define vmlaq_laneq_u32(__p0_448, __p1_448, __p2_448, __p3_448) __extension__ ({ \ + uint32x4_t __ret_448; \ + uint32x4_t __s0_448 = __p0_448; \ + uint32x4_t __s1_448 = __p1_448; \ + uint32x4_t __s2_448 = __p2_448; \ + __ret_448 = __s0_448 + __s1_448 * splatq_laneq_u32(__s2_448, __p3_448); \ + __ret_448; \ }) #else -#define vmull_laneq_u32(__p0_581, __p1_581, __p2_581) __extension__ ({ \ - uint64x2_t __ret_581; \ - uint32x2_t __s0_581 = __p0_581; \ - uint32x4_t __s1_581 = __p1_581; \ - uint32x2_t __rev0_581; __rev0_581 = __builtin_shufflevector(__s0_581, __s0_581, 1, 0); \ - uint32x4_t __rev1_581; __rev1_581 = __builtin_shufflevector(__s1_581, __s1_581, 3, 2, 1, 0); \ - __ret_581 = __noswap_vmull_u32(__rev0_581, __noswap_splat_laneq_u32(__rev1_581, __p2_581)); \ - __ret_581 = __builtin_shufflevector(__ret_581, __ret_581, 1, 0); \ - __ret_581; \ +#define vmlaq_laneq_u32(__p0_449, __p1_449, __p2_449, __p3_449) __extension__ ({ \ + uint32x4_t __ret_449; \ + uint32x4_t __s0_449 = __p0_449; \ + uint32x4_t __s1_449 = __p1_449; \ + uint32x4_t __s2_449 = __p2_449; \ + uint32x4_t __rev0_449; __rev0_449 = __builtin_shufflevector(__s0_449, __s0_449, 3, 2, 1, 0); \ + uint32x4_t __rev1_449; __rev1_449 = __builtin_shufflevector(__s1_449, __s1_449, 3, 2, 1, 0); \ + uint32x4_t __rev2_449; __rev2_449 = __builtin_shufflevector(__s2_449, __s2_449, 3, 2, 1, 0); \ + __ret_449 = __rev0_449 + __rev1_449 * __noswap_splatq_laneq_u32(__rev2_449, __p3_449); \ + __ret_449 = __builtin_shufflevector(__ret_449, __ret_449, 3, 2, 1, 0); \ + __ret_449; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmull_laneq_u16(__p0_582, __p1_582, __p2_582) __extension__ ({ \ - uint32x4_t __ret_582; \ - uint16x4_t __s0_582 = __p0_582; \ - uint16x8_t __s1_582 = __p1_582; \ - __ret_582 = vmull_u16(__s0_582, splat_laneq_u16(__s1_582, __p2_582)); \ - __ret_582; \ +#define vmlaq_laneq_u16(__p0_450, __p1_450, __p2_450, __p3_450) __extension__ ({ \ + uint16x8_t __ret_450; \ + uint16x8_t __s0_450 = __p0_450; \ + uint16x8_t __s1_450 = __p1_450; \ + uint16x8_t __s2_450 = __p2_450; \ + __ret_450 = __s0_450 + __s1_450 * splatq_laneq_u16(__s2_450, __p3_450); \ + __ret_450; \ }) #else -#define vmull_laneq_u16(__p0_583, __p1_583, __p2_583) __extension__ ({ \ - uint32x4_t __ret_583; \ - uint16x4_t __s0_583 = __p0_583; \ - uint16x8_t __s1_583 = __p1_583; \ - uint16x4_t __rev0_583; __rev0_583 = __builtin_shufflevector(__s0_583, __s0_583, 3, 2, 1, 0); \ - uint16x8_t __rev1_583; __rev1_583 = __builtin_shufflevector(__s1_583, __s1_583, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_583 = __noswap_vmull_u16(__rev0_583, __noswap_splat_laneq_u16(__rev1_583, __p2_583)); \ - __ret_583 = __builtin_shufflevector(__ret_583, __ret_583, 3, 2, 1, 0); \ - __ret_583; \ +#define vmlaq_laneq_u16(__p0_451, __p1_451, __p2_451, __p3_451) __extension__ ({ \ + uint16x8_t __ret_451; \ + uint16x8_t __s0_451 = __p0_451; \ + uint16x8_t __s1_451 = __p1_451; \ + uint16x8_t __s2_451 = __p2_451; \ + uint16x8_t __rev0_451; __rev0_451 = __builtin_shufflevector(__s0_451, __s0_451, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_451; __rev1_451 = __builtin_shufflevector(__s1_451, __s1_451, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev2_451; __rev2_451 = __builtin_shufflevector(__s2_451, __s2_451, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_451 = __rev0_451 + __rev1_451 * __noswap_splatq_laneq_u16(__rev2_451, __p3_451); \ + __ret_451 = __builtin_shufflevector(__ret_451, __ret_451, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_451; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmull_laneq_s32(__p0_584, __p1_584, __p2_584) __extension__ ({ \ - int64x2_t __ret_584; \ - int32x2_t __s0_584 = __p0_584; \ - int32x4_t __s1_584 = __p1_584; \ - __ret_584 = vmull_s32(__s0_584, splat_laneq_s32(__s1_584, __p2_584)); \ - __ret_584; \ +#define vmlaq_laneq_f32(__p0_452, __p1_452, __p2_452, __p3_452) __extension__ ({ \ + float32x4_t __ret_452; \ + float32x4_t __s0_452 = __p0_452; \ + float32x4_t __s1_452 = __p1_452; \ + float32x4_t __s2_452 = __p2_452; \ + __ret_452 = __s0_452 + __s1_452 * splatq_laneq_f32(__s2_452, __p3_452); \ + __ret_452; \ }) #else -#define vmull_laneq_s32(__p0_585, __p1_585, __p2_585) __extension__ ({ \ - int64x2_t __ret_585; \ - int32x2_t __s0_585 = __p0_585; \ - int32x4_t __s1_585 = __p1_585; \ - int32x2_t __rev0_585; __rev0_585 = __builtin_shufflevector(__s0_585, __s0_585, 1, 0); \ - int32x4_t __rev1_585; __rev1_585 = __builtin_shufflevector(__s1_585, __s1_585, 3, 2, 1, 0); \ - __ret_585 = __noswap_vmull_s32(__rev0_585, __noswap_splat_laneq_s32(__rev1_585, __p2_585)); \ - __ret_585 = __builtin_shufflevector(__ret_585, __ret_585, 1, 0); \ - __ret_585; \ +#define vmlaq_laneq_f32(__p0_453, __p1_453, __p2_453, __p3_453) __extension__ ({ \ + float32x4_t __ret_453; \ + float32x4_t __s0_453 = __p0_453; \ + float32x4_t __s1_453 = __p1_453; \ + float32x4_t __s2_453 = __p2_453; \ + float32x4_t __rev0_453; __rev0_453 = __builtin_shufflevector(__s0_453, __s0_453, 3, 2, 1, 0); \ + float32x4_t __rev1_453; __rev1_453 = __builtin_shufflevector(__s1_453, __s1_453, 3, 2, 1, 0); \ + float32x4_t __rev2_453; __rev2_453 = __builtin_shufflevector(__s2_453, __s2_453, 3, 2, 1, 0); \ + __ret_453 = __rev0_453 + __rev1_453 * __noswap_splatq_laneq_f32(__rev2_453, __p3_453); \ + __ret_453 = __builtin_shufflevector(__ret_453, __ret_453, 3, 2, 1, 0); \ + __ret_453; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmull_laneq_s16(__p0_586, __p1_586, __p2_586) __extension__ ({ \ - int32x4_t __ret_586; \ - int16x4_t __s0_586 = __p0_586; \ - int16x8_t __s1_586 = __p1_586; \ - __ret_586 = vmull_s16(__s0_586, splat_laneq_s16(__s1_586, __p2_586)); \ - __ret_586; \ +#define vmlaq_laneq_s32(__p0_454, __p1_454, __p2_454, __p3_454) __extension__ ({ \ + int32x4_t __ret_454; \ + int32x4_t __s0_454 = __p0_454; \ + int32x4_t __s1_454 = __p1_454; \ + int32x4_t __s2_454 = __p2_454; \ + __ret_454 = __s0_454 + __s1_454 * splatq_laneq_s32(__s2_454, __p3_454); \ + __ret_454; \ }) #else -#define vmull_laneq_s16(__p0_587, __p1_587, __p2_587) __extension__ ({ \ - int32x4_t __ret_587; \ - int16x4_t __s0_587 = __p0_587; \ - int16x8_t __s1_587 = __p1_587; \ - int16x4_t __rev0_587; __rev0_587 = __builtin_shufflevector(__s0_587, __s0_587, 3, 2, 1, 0); \ - int16x8_t __rev1_587; __rev1_587 = __builtin_shufflevector(__s1_587, __s1_587, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_587 = __noswap_vmull_s16(__rev0_587, __noswap_splat_laneq_s16(__rev1_587, __p2_587)); \ - __ret_587 = __builtin_shufflevector(__ret_587, __ret_587, 3, 2, 1, 0); \ - __ret_587; \ +#define vmlaq_laneq_s32(__p0_455, __p1_455, __p2_455, __p3_455) __extension__ ({ \ + int32x4_t __ret_455; \ + int32x4_t __s0_455 = __p0_455; \ + int32x4_t __s1_455 = __p1_455; \ + int32x4_t __s2_455 = __p2_455; \ + int32x4_t __rev0_455; __rev0_455 = __builtin_shufflevector(__s0_455, __s0_455, 3, 2, 1, 0); \ + int32x4_t __rev1_455; __rev1_455 = __builtin_shufflevector(__s1_455, __s1_455, 3, 2, 1, 0); \ + int32x4_t __rev2_455; __rev2_455 = __builtin_shufflevector(__s2_455, __s2_455, 3, 2, 1, 0); \ + __ret_455 = __rev0_455 + __rev1_455 * __noswap_splatq_laneq_s32(__rev2_455, __p3_455); \ + __ret_455 = __builtin_shufflevector(__ret_455, __ret_455, 3, 2, 1, 0); \ + __ret_455; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); - return __ret; -} -#else -__ai float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai float64x2_t __noswap_vmulxq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); - return __ret; -} +#define vmlaq_laneq_s16(__p0_456, __p1_456, __p2_456, __p3_456) __extension__ ({ \ + int16x8_t __ret_456; \ + int16x8_t __s0_456 = __p0_456; \ + int16x8_t __s1_456 = __p1_456; \ + int16x8_t __s2_456 = __p2_456; \ + __ret_456 = __s0_456 + __s1_456 * splatq_laneq_s16(__s2_456, __p3_456); \ + __ret_456; \ +}) #else -__ai float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai float32x4_t __noswap_vmulxq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); - return __ret; -} +#define vmlaq_laneq_s16(__p0_457, __p1_457, __p2_457, __p3_457) __extension__ ({ \ + int16x8_t __ret_457; \ + int16x8_t __s0_457 = __p0_457; \ + int16x8_t __s1_457 = __p1_457; \ + int16x8_t __s2_457 = __p2_457; \ + int16x8_t __rev0_457; __rev0_457 = __builtin_shufflevector(__s0_457, __s0_457, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_457; __rev1_457 = __builtin_shufflevector(__s1_457, __s1_457, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_457; __rev2_457 = __builtin_shufflevector(__s2_457, __s2_457, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_457 = __rev0_457 + __rev1_457 * __noswap_splatq_laneq_s16(__rev2_457, __p3_457); \ + __ret_457 = __builtin_shufflevector(__ret_457, __ret_457, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_457; \ +}) #endif -__ai float64x1_t vmulx_f64(float64x1_t __p0, float64x1_t __p1) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 10); - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 9); - return __ret; -} +#define vmla_laneq_u32(__p0_458, __p1_458, __p2_458, __p3_458) __extension__ ({ \ + uint32x2_t __ret_458; \ + uint32x2_t __s0_458 = __p0_458; \ + uint32x2_t __s1_458 = __p1_458; \ + uint32x4_t __s2_458 = __p2_458; \ + __ret_458 = __s0_458 + __s1_458 * splat_laneq_u32(__s2_458, __p3_458); \ + __ret_458; \ +}) #else -__ai float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai float32x2_t __noswap_vmulx_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 9); - return __ret; -} +#define vmla_laneq_u32(__p0_459, __p1_459, __p2_459, __p3_459) __extension__ ({ \ + uint32x2_t __ret_459; \ + uint32x2_t __s0_459 = __p0_459; \ + uint32x2_t __s1_459 = __p1_459; \ + uint32x4_t __s2_459 = __p2_459; \ + uint32x2_t __rev0_459; __rev0_459 = __builtin_shufflevector(__s0_459, __s0_459, 1, 0); \ + uint32x2_t __rev1_459; __rev1_459 = __builtin_shufflevector(__s1_459, __s1_459, 1, 0); \ + uint32x4_t __rev2_459; __rev2_459 = __builtin_shufflevector(__s2_459, __s2_459, 3, 2, 1, 0); \ + __ret_459 = __rev0_459 + __rev1_459 * __noswap_splat_laneq_u32(__rev2_459, __p3_459); \ + __ret_459 = __builtin_shufflevector(__ret_459, __ret_459, 1, 0); \ + __ret_459; \ +}) #endif -__ai float64_t vmulxd_f64(float64_t __p0, float64_t __p1) { - float64_t __ret; - __ret = (float64_t) __builtin_neon_vmulxd_f64(__p0, __p1); - return __ret; -} -__ai float32_t vmulxs_f32(float32_t __p0, float32_t __p1) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vmulxs_f32(__p0, __p1); - return __ret; -} -#define vmulxd_lane_f64(__p0_588, __p1_588, __p2_588) __extension__ ({ \ - float64_t __ret_588; \ - float64_t __s0_588 = __p0_588; \ - float64x1_t __s1_588 = __p1_588; \ - __ret_588 = vmulxd_f64(__s0_588, vget_lane_f64(__s1_588, __p2_588)); \ - __ret_588; \ -}) #ifdef __LITTLE_ENDIAN__ -#define vmulxs_lane_f32(__p0_589, __p1_589, __p2_589) __extension__ ({ \ - float32_t __ret_589; \ - float32_t __s0_589 = __p0_589; \ - float32x2_t __s1_589 = __p1_589; \ - __ret_589 = vmulxs_f32(__s0_589, vget_lane_f32(__s1_589, __p2_589)); \ - __ret_589; \ +#define vmla_laneq_u16(__p0_460, __p1_460, __p2_460, __p3_460) __extension__ ({ \ + uint16x4_t __ret_460; \ + uint16x4_t __s0_460 = __p0_460; \ + uint16x4_t __s1_460 = __p1_460; \ + uint16x8_t __s2_460 = __p2_460; \ + __ret_460 = __s0_460 + __s1_460 * splat_laneq_u16(__s2_460, __p3_460); \ + __ret_460; \ }) #else -#define vmulxs_lane_f32(__p0_590, __p1_590, __p2_590) __extension__ ({ \ - float32_t __ret_590; \ - float32_t __s0_590 = __p0_590; \ - float32x2_t __s1_590 = __p1_590; \ - float32x2_t __rev1_590; __rev1_590 = __builtin_shufflevector(__s1_590, __s1_590, 1, 0); \ - __ret_590 = vmulxs_f32(__s0_590, __noswap_vget_lane_f32(__rev1_590, __p2_590)); \ - __ret_590; \ +#define vmla_laneq_u16(__p0_461, __p1_461, __p2_461, __p3_461) __extension__ ({ \ + uint16x4_t __ret_461; \ + uint16x4_t __s0_461 = __p0_461; \ + uint16x4_t __s1_461 = __p1_461; \ + uint16x8_t __s2_461 = __p2_461; \ + uint16x4_t __rev0_461; __rev0_461 = __builtin_shufflevector(__s0_461, __s0_461, 3, 2, 1, 0); \ + uint16x4_t __rev1_461; __rev1_461 = __builtin_shufflevector(__s1_461, __s1_461, 3, 2, 1, 0); \ + uint16x8_t __rev2_461; __rev2_461 = __builtin_shufflevector(__s2_461, __s2_461, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_461 = __rev0_461 + __rev1_461 * __noswap_splat_laneq_u16(__rev2_461, __p3_461); \ + __ret_461 = __builtin_shufflevector(__ret_461, __ret_461, 3, 2, 1, 0); \ + __ret_461; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmulxq_lane_f64(__p0_591, __p1_591, __p2_591) __extension__ ({ \ - float64x2_t __ret_591; \ - float64x2_t __s0_591 = __p0_591; \ - float64x1_t __s1_591 = __p1_591; \ - __ret_591 = vmulxq_f64(__s0_591, splatq_lane_f64(__s1_591, __p2_591)); \ - __ret_591; \ +#define vmla_laneq_f32(__p0_462, __p1_462, __p2_462, __p3_462) __extension__ ({ \ + float32x2_t __ret_462; \ + float32x2_t __s0_462 = __p0_462; \ + float32x2_t __s1_462 = __p1_462; \ + float32x4_t __s2_462 = __p2_462; \ + __ret_462 = __s0_462 + __s1_462 * splat_laneq_f32(__s2_462, __p3_462); \ + __ret_462; \ }) #else -#define vmulxq_lane_f64(__p0_592, __p1_592, __p2_592) __extension__ ({ \ - float64x2_t __ret_592; \ - float64x2_t __s0_592 = __p0_592; \ - float64x1_t __s1_592 = __p1_592; \ - float64x2_t __rev0_592; __rev0_592 = __builtin_shufflevector(__s0_592, __s0_592, 1, 0); \ - __ret_592 = __noswap_vmulxq_f64(__rev0_592, __noswap_splatq_lane_f64(__s1_592, __p2_592)); \ - __ret_592 = __builtin_shufflevector(__ret_592, __ret_592, 1, 0); \ - __ret_592; \ +#define vmla_laneq_f32(__p0_463, __p1_463, __p2_463, __p3_463) __extension__ ({ \ + float32x2_t __ret_463; \ + float32x2_t __s0_463 = __p0_463; \ + float32x2_t __s1_463 = __p1_463; \ + float32x4_t __s2_463 = __p2_463; \ + float32x2_t __rev0_463; __rev0_463 = __builtin_shufflevector(__s0_463, __s0_463, 1, 0); \ + float32x2_t __rev1_463; __rev1_463 = __builtin_shufflevector(__s1_463, __s1_463, 1, 0); \ + float32x4_t __rev2_463; __rev2_463 = __builtin_shufflevector(__s2_463, __s2_463, 3, 2, 1, 0); \ + __ret_463 = __rev0_463 + __rev1_463 * __noswap_splat_laneq_f32(__rev2_463, __p3_463); \ + __ret_463 = __builtin_shufflevector(__ret_463, __ret_463, 1, 0); \ + __ret_463; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmulxq_lane_f32(__p0_593, __p1_593, __p2_593) __extension__ ({ \ - float32x4_t __ret_593; \ - float32x4_t __s0_593 = __p0_593; \ - float32x2_t __s1_593 = __p1_593; \ - __ret_593 = vmulxq_f32(__s0_593, splatq_lane_f32(__s1_593, __p2_593)); \ - __ret_593; \ +#define vmla_laneq_s32(__p0_464, __p1_464, __p2_464, __p3_464) __extension__ ({ \ + int32x2_t __ret_464; \ + int32x2_t __s0_464 = __p0_464; \ + int32x2_t __s1_464 = __p1_464; \ + int32x4_t __s2_464 = __p2_464; \ + __ret_464 = __s0_464 + __s1_464 * splat_laneq_s32(__s2_464, __p3_464); \ + __ret_464; \ }) #else -#define vmulxq_lane_f32(__p0_594, __p1_594, __p2_594) __extension__ ({ \ - float32x4_t __ret_594; \ - float32x4_t __s0_594 = __p0_594; \ - float32x2_t __s1_594 = __p1_594; \ - float32x4_t __rev0_594; __rev0_594 = __builtin_shufflevector(__s0_594, __s0_594, 3, 2, 1, 0); \ - float32x2_t __rev1_594; __rev1_594 = __builtin_shufflevector(__s1_594, __s1_594, 1, 0); \ - __ret_594 = __noswap_vmulxq_f32(__rev0_594, __noswap_splatq_lane_f32(__rev1_594, __p2_594)); \ - __ret_594 = __builtin_shufflevector(__ret_594, __ret_594, 3, 2, 1, 0); \ - __ret_594; \ +#define vmla_laneq_s32(__p0_465, __p1_465, __p2_465, __p3_465) __extension__ ({ \ + int32x2_t __ret_465; \ + int32x2_t __s0_465 = __p0_465; \ + int32x2_t __s1_465 = __p1_465; \ + int32x4_t __s2_465 = __p2_465; \ + int32x2_t __rev0_465; __rev0_465 = __builtin_shufflevector(__s0_465, __s0_465, 1, 0); \ + int32x2_t __rev1_465; __rev1_465 = __builtin_shufflevector(__s1_465, __s1_465, 1, 0); \ + int32x4_t __rev2_465; __rev2_465 = __builtin_shufflevector(__s2_465, __s2_465, 3, 2, 1, 0); \ + __ret_465 = __rev0_465 + __rev1_465 * __noswap_splat_laneq_s32(__rev2_465, __p3_465); \ + __ret_465 = __builtin_shufflevector(__ret_465, __ret_465, 1, 0); \ + __ret_465; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmulx_lane_f32(__p0_595, __p1_595, __p2_595) __extension__ ({ \ - float32x2_t __ret_595; \ - float32x2_t __s0_595 = __p0_595; \ - float32x2_t __s1_595 = __p1_595; \ - __ret_595 = vmulx_f32(__s0_595, splat_lane_f32(__s1_595, __p2_595)); \ - __ret_595; \ +#define vmla_laneq_s16(__p0_466, __p1_466, __p2_466, __p3_466) __extension__ ({ \ + int16x4_t __ret_466; \ + int16x4_t __s0_466 = __p0_466; \ + int16x4_t __s1_466 = __p1_466; \ + int16x8_t __s2_466 = __p2_466; \ + __ret_466 = __s0_466 + __s1_466 * splat_laneq_s16(__s2_466, __p3_466); \ + __ret_466; \ }) #else -#define vmulx_lane_f32(__p0_596, __p1_596, __p2_596) __extension__ ({ \ - float32x2_t __ret_596; \ - float32x2_t __s0_596 = __p0_596; \ - float32x2_t __s1_596 = __p1_596; \ - float32x2_t __rev0_596; __rev0_596 = __builtin_shufflevector(__s0_596, __s0_596, 1, 0); \ - float32x2_t __rev1_596; __rev1_596 = __builtin_shufflevector(__s1_596, __s1_596, 1, 0); \ - __ret_596 = __noswap_vmulx_f32(__rev0_596, __noswap_splat_lane_f32(__rev1_596, __p2_596)); \ - __ret_596 = __builtin_shufflevector(__ret_596, __ret_596, 1, 0); \ - __ret_596; \ +#define vmla_laneq_s16(__p0_467, __p1_467, __p2_467, __p3_467) __extension__ ({ \ + int16x4_t __ret_467; \ + int16x4_t __s0_467 = __p0_467; \ + int16x4_t __s1_467 = __p1_467; \ + int16x8_t __s2_467 = __p2_467; \ + int16x4_t __rev0_467; __rev0_467 = __builtin_shufflevector(__s0_467, __s0_467, 3, 2, 1, 0); \ + int16x4_t __rev1_467; __rev1_467 = __builtin_shufflevector(__s1_467, __s1_467, 3, 2, 1, 0); \ + int16x8_t __rev2_467; __rev2_467 = __builtin_shufflevector(__s2_467, __s2_467, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_467 = __rev0_467 + __rev1_467 * __noswap_splat_laneq_s16(__rev2_467, __p3_467); \ + __ret_467 = __builtin_shufflevector(__ret_467, __ret_467, 3, 2, 1, 0); \ + __ret_467; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmulxd_laneq_f64(__p0_597, __p1_597, __p2_597) __extension__ ({ \ - float64_t __ret_597; \ - float64_t __s0_597 = __p0_597; \ - float64x2_t __s1_597 = __p1_597; \ - __ret_597 = vmulxd_f64(__s0_597, vgetq_lane_f64(__s1_597, __p2_597)); \ - __ret_597; \ +#define vmlal_high_lane_u32(__p0_468, __p1_468, __p2_468, __p3_468) __extension__ ({ \ + uint64x2_t __ret_468; \ + uint64x2_t __s0_468 = __p0_468; \ + uint32x4_t __s1_468 = __p1_468; \ + uint32x2_t __s2_468 = __p2_468; \ + __ret_468 = __s0_468 + vmull_u32(vget_high_u32(__s1_468), splat_lane_u32(__s2_468, __p3_468)); \ + __ret_468; \ }) #else -#define vmulxd_laneq_f64(__p0_598, __p1_598, __p2_598) __extension__ ({ \ - float64_t __ret_598; \ - float64_t __s0_598 = __p0_598; \ - float64x2_t __s1_598 = __p1_598; \ - float64x2_t __rev1_598; __rev1_598 = __builtin_shufflevector(__s1_598, __s1_598, 1, 0); \ - __ret_598 = vmulxd_f64(__s0_598, __noswap_vgetq_lane_f64(__rev1_598, __p2_598)); \ - __ret_598; \ +#define vmlal_high_lane_u32(__p0_469, __p1_469, __p2_469, __p3_469) __extension__ ({ \ + uint64x2_t __ret_469; \ + uint64x2_t __s0_469 = __p0_469; \ + uint32x4_t __s1_469 = __p1_469; \ + uint32x2_t __s2_469 = __p2_469; \ + uint64x2_t __rev0_469; __rev0_469 = __builtin_shufflevector(__s0_469, __s0_469, 1, 0); \ + uint32x4_t __rev1_469; __rev1_469 = __builtin_shufflevector(__s1_469, __s1_469, 3, 2, 1, 0); \ + uint32x2_t __rev2_469; __rev2_469 = __builtin_shufflevector(__s2_469, __s2_469, 1, 0); \ + __ret_469 = __rev0_469 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_469), __noswap_splat_lane_u32(__rev2_469, __p3_469)); \ + __ret_469 = __builtin_shufflevector(__ret_469, __ret_469, 1, 0); \ + __ret_469; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmulxs_laneq_f32(__p0_599, __p1_599, __p2_599) __extension__ ({ \ - float32_t __ret_599; \ - float32_t __s0_599 = __p0_599; \ - float32x4_t __s1_599 = __p1_599; \ - __ret_599 = vmulxs_f32(__s0_599, vgetq_lane_f32(__s1_599, __p2_599)); \ - __ret_599; \ +#define vmlal_high_lane_u16(__p0_470, __p1_470, __p2_470, __p3_470) __extension__ ({ \ + uint32x4_t __ret_470; \ + uint32x4_t __s0_470 = __p0_470; \ + uint16x8_t __s1_470 = __p1_470; \ + uint16x4_t __s2_470 = __p2_470; \ + __ret_470 = __s0_470 + vmull_u16(vget_high_u16(__s1_470), splat_lane_u16(__s2_470, __p3_470)); \ + __ret_470; \ }) #else -#define vmulxs_laneq_f32(__p0_600, __p1_600, __p2_600) __extension__ ({ \ - float32_t __ret_600; \ - float32_t __s0_600 = __p0_600; \ - float32x4_t __s1_600 = __p1_600; \ - float32x4_t __rev1_600; __rev1_600 = __builtin_shufflevector(__s1_600, __s1_600, 3, 2, 1, 0); \ - __ret_600 = vmulxs_f32(__s0_600, __noswap_vgetq_lane_f32(__rev1_600, __p2_600)); \ - __ret_600; \ +#define vmlal_high_lane_u16(__p0_471, __p1_471, __p2_471, __p3_471) __extension__ ({ \ + uint32x4_t __ret_471; \ + uint32x4_t __s0_471 = __p0_471; \ + uint16x8_t __s1_471 = __p1_471; \ + uint16x4_t __s2_471 = __p2_471; \ + uint32x4_t __rev0_471; __rev0_471 = __builtin_shufflevector(__s0_471, __s0_471, 3, 2, 1, 0); \ + uint16x8_t __rev1_471; __rev1_471 = __builtin_shufflevector(__s1_471, __s1_471, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __rev2_471; __rev2_471 = __builtin_shufflevector(__s2_471, __s2_471, 3, 2, 1, 0); \ + __ret_471 = __rev0_471 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_471), __noswap_splat_lane_u16(__rev2_471, __p3_471)); \ + __ret_471 = __builtin_shufflevector(__ret_471, __ret_471, 3, 2, 1, 0); \ + __ret_471; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmulxq_laneq_f64(__p0_601, __p1_601, __p2_601) __extension__ ({ \ - float64x2_t __ret_601; \ - float64x2_t __s0_601 = __p0_601; \ - float64x2_t __s1_601 = __p1_601; \ - __ret_601 = vmulxq_f64(__s0_601, splatq_laneq_f64(__s1_601, __p2_601)); \ - __ret_601; \ +#define vmlal_high_lane_s32(__p0_472, __p1_472, __p2_472, __p3_472) __extension__ ({ \ + int64x2_t __ret_472; \ + int64x2_t __s0_472 = __p0_472; \ + int32x4_t __s1_472 = __p1_472; \ + int32x2_t __s2_472 = __p2_472; \ + __ret_472 = __s0_472 + vmull_s32(vget_high_s32(__s1_472), splat_lane_s32(__s2_472, __p3_472)); \ + __ret_472; \ }) #else -#define vmulxq_laneq_f64(__p0_602, __p1_602, __p2_602) __extension__ ({ \ - float64x2_t __ret_602; \ - float64x2_t __s0_602 = __p0_602; \ - float64x2_t __s1_602 = __p1_602; \ - float64x2_t __rev0_602; __rev0_602 = __builtin_shufflevector(__s0_602, __s0_602, 1, 0); \ - float64x2_t __rev1_602; __rev1_602 = __builtin_shufflevector(__s1_602, __s1_602, 1, 0); \ - __ret_602 = __noswap_vmulxq_f64(__rev0_602, __noswap_splatq_laneq_f64(__rev1_602, __p2_602)); \ - __ret_602 = __builtin_shufflevector(__ret_602, __ret_602, 1, 0); \ - __ret_602; \ +#define vmlal_high_lane_s32(__p0_473, __p1_473, __p2_473, __p3_473) __extension__ ({ \ + int64x2_t __ret_473; \ + int64x2_t __s0_473 = __p0_473; \ + int32x4_t __s1_473 = __p1_473; \ + int32x2_t __s2_473 = __p2_473; \ + int64x2_t __rev0_473; __rev0_473 = __builtin_shufflevector(__s0_473, __s0_473, 1, 0); \ + int32x4_t __rev1_473; __rev1_473 = __builtin_shufflevector(__s1_473, __s1_473, 3, 2, 1, 0); \ + int32x2_t __rev2_473; __rev2_473 = __builtin_shufflevector(__s2_473, __s2_473, 1, 0); \ + __ret_473 = __rev0_473 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_473), __noswap_splat_lane_s32(__rev2_473, __p3_473)); \ + __ret_473 = __builtin_shufflevector(__ret_473, __ret_473, 1, 0); \ + __ret_473; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmulxq_laneq_f32(__p0_603, __p1_603, __p2_603) __extension__ ({ \ - float32x4_t __ret_603; \ - float32x4_t __s0_603 = __p0_603; \ - float32x4_t __s1_603 = __p1_603; \ - __ret_603 = vmulxq_f32(__s0_603, splatq_laneq_f32(__s1_603, __p2_603)); \ - __ret_603; \ +#define vmlal_high_lane_s16(__p0_474, __p1_474, __p2_474, __p3_474) __extension__ ({ \ + int32x4_t __ret_474; \ + int32x4_t __s0_474 = __p0_474; \ + int16x8_t __s1_474 = __p1_474; \ + int16x4_t __s2_474 = __p2_474; \ + __ret_474 = __s0_474 + vmull_s16(vget_high_s16(__s1_474), splat_lane_s16(__s2_474, __p3_474)); \ + __ret_474; \ }) #else -#define vmulxq_laneq_f32(__p0_604, __p1_604, __p2_604) __extension__ ({ \ - float32x4_t __ret_604; \ - float32x4_t __s0_604 = __p0_604; \ - float32x4_t __s1_604 = __p1_604; \ - float32x4_t __rev0_604; __rev0_604 = __builtin_shufflevector(__s0_604, __s0_604, 3, 2, 1, 0); \ - float32x4_t __rev1_604; __rev1_604 = __builtin_shufflevector(__s1_604, __s1_604, 3, 2, 1, 0); \ - __ret_604 = __noswap_vmulxq_f32(__rev0_604, __noswap_splatq_laneq_f32(__rev1_604, __p2_604)); \ - __ret_604 = __builtin_shufflevector(__ret_604, __ret_604, 3, 2, 1, 0); \ - __ret_604; \ +#define vmlal_high_lane_s16(__p0_475, __p1_475, __p2_475, __p3_475) __extension__ ({ \ + int32x4_t __ret_475; \ + int32x4_t __s0_475 = __p0_475; \ + int16x8_t __s1_475 = __p1_475; \ + int16x4_t __s2_475 = __p2_475; \ + int32x4_t __rev0_475; __rev0_475 = __builtin_shufflevector(__s0_475, __s0_475, 3, 2, 1, 0); \ + int16x8_t __rev1_475; __rev1_475 = __builtin_shufflevector(__s1_475, __s1_475, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_475; __rev2_475 = __builtin_shufflevector(__s2_475, __s2_475, 3, 2, 1, 0); \ + __ret_475 = __rev0_475 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_475), __noswap_splat_lane_s16(__rev2_475, __p3_475)); \ + __ret_475 = __builtin_shufflevector(__ret_475, __ret_475, 3, 2, 1, 0); \ + __ret_475; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmulx_laneq_f32(__p0_605, __p1_605, __p2_605) __extension__ ({ \ - float32x2_t __ret_605; \ - float32x2_t __s0_605 = __p0_605; \ - float32x4_t __s1_605 = __p1_605; \ - __ret_605 = vmulx_f32(__s0_605, splat_laneq_f32(__s1_605, __p2_605)); \ - __ret_605; \ +#define vmlal_high_laneq_u32(__p0_476, __p1_476, __p2_476, __p3_476) __extension__ ({ \ + uint64x2_t __ret_476; \ + uint64x2_t __s0_476 = __p0_476; \ + uint32x4_t __s1_476 = __p1_476; \ + uint32x4_t __s2_476 = __p2_476; \ + __ret_476 = __s0_476 + vmull_u32(vget_high_u32(__s1_476), splat_laneq_u32(__s2_476, __p3_476)); \ + __ret_476; \ }) #else -#define vmulx_laneq_f32(__p0_606, __p1_606, __p2_606) __extension__ ({ \ - float32x2_t __ret_606; \ - float32x2_t __s0_606 = __p0_606; \ - float32x4_t __s1_606 = __p1_606; \ - float32x2_t __rev0_606; __rev0_606 = __builtin_shufflevector(__s0_606, __s0_606, 1, 0); \ - float32x4_t __rev1_606; __rev1_606 = __builtin_shufflevector(__s1_606, __s1_606, 3, 2, 1, 0); \ - __ret_606 = __noswap_vmulx_f32(__rev0_606, __noswap_splat_laneq_f32(__rev1_606, __p2_606)); \ - __ret_606 = __builtin_shufflevector(__ret_606, __ret_606, 1, 0); \ - __ret_606; \ +#define vmlal_high_laneq_u32(__p0_477, __p1_477, __p2_477, __p3_477) __extension__ ({ \ + uint64x2_t __ret_477; \ + uint64x2_t __s0_477 = __p0_477; \ + uint32x4_t __s1_477 = __p1_477; \ + uint32x4_t __s2_477 = __p2_477; \ + uint64x2_t __rev0_477; __rev0_477 = __builtin_shufflevector(__s0_477, __s0_477, 1, 0); \ + uint32x4_t __rev1_477; __rev1_477 = __builtin_shufflevector(__s1_477, __s1_477, 3, 2, 1, 0); \ + uint32x4_t __rev2_477; __rev2_477 = __builtin_shufflevector(__s2_477, __s2_477, 3, 2, 1, 0); \ + __ret_477 = __rev0_477 + __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_477), __noswap_splat_laneq_u32(__rev2_477, __p3_477)); \ + __ret_477 = __builtin_shufflevector(__ret_477, __ret_477, 1, 0); \ + __ret_477; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vnegq_f64(float64x2_t __p0) { - float64x2_t __ret; - __ret = -__p0; - return __ret; -} +#define vmlal_high_laneq_u16(__p0_478, __p1_478, __p2_478, __p3_478) __extension__ ({ \ + uint32x4_t __ret_478; \ + uint32x4_t __s0_478 = __p0_478; \ + uint16x8_t __s1_478 = __p1_478; \ + uint16x8_t __s2_478 = __p2_478; \ + __ret_478 = __s0_478 + vmull_u16(vget_high_u16(__s1_478), splat_laneq_u16(__s2_478, __p3_478)); \ + __ret_478; \ +}) #else -__ai float64x2_t vnegq_f64(float64x2_t __p0) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = -__rev0; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vmlal_high_laneq_u16(__p0_479, __p1_479, __p2_479, __p3_479) __extension__ ({ \ + uint32x4_t __ret_479; \ + uint32x4_t __s0_479 = __p0_479; \ + uint16x8_t __s1_479 = __p1_479; \ + uint16x8_t __s2_479 = __p2_479; \ + uint32x4_t __rev0_479; __rev0_479 = __builtin_shufflevector(__s0_479, __s0_479, 3, 2, 1, 0); \ + uint16x8_t __rev1_479; __rev1_479 = __builtin_shufflevector(__s1_479, __s1_479, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev2_479; __rev2_479 = __builtin_shufflevector(__s2_479, __s2_479, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_479 = __rev0_479 + __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_479), __noswap_splat_laneq_u16(__rev2_479, __p3_479)); \ + __ret_479 = __builtin_shufflevector(__ret_479, __ret_479, 3, 2, 1, 0); \ + __ret_479; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vnegq_s64(int64x2_t __p0) { - int64x2_t __ret; - __ret = -__p0; - return __ret; -} +#define vmlal_high_laneq_s32(__p0_480, __p1_480, __p2_480, __p3_480) __extension__ ({ \ + int64x2_t __ret_480; \ + int64x2_t __s0_480 = __p0_480; \ + int32x4_t __s1_480 = __p1_480; \ + int32x4_t __s2_480 = __p2_480; \ + __ret_480 = __s0_480 + vmull_s32(vget_high_s32(__s1_480), splat_laneq_s32(__s2_480, __p3_480)); \ + __ret_480; \ +}) #else -__ai int64x2_t vnegq_s64(int64x2_t __p0) { - int64x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = -__rev0; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vmlal_high_laneq_s32(__p0_481, __p1_481, __p2_481, __p3_481) __extension__ ({ \ + int64x2_t __ret_481; \ + int64x2_t __s0_481 = __p0_481; \ + int32x4_t __s1_481 = __p1_481; \ + int32x4_t __s2_481 = __p2_481; \ + int64x2_t __rev0_481; __rev0_481 = __builtin_shufflevector(__s0_481, __s0_481, 1, 0); \ + int32x4_t __rev1_481; __rev1_481 = __builtin_shufflevector(__s1_481, __s1_481, 3, 2, 1, 0); \ + int32x4_t __rev2_481; __rev2_481 = __builtin_shufflevector(__s2_481, __s2_481, 3, 2, 1, 0); \ + __ret_481 = __rev0_481 + __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_481), __noswap_splat_laneq_s32(__rev2_481, __p3_481)); \ + __ret_481 = __builtin_shufflevector(__ret_481, __ret_481, 1, 0); \ + __ret_481; \ +}) #endif -__ai float64x1_t vneg_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = -__p0; - return __ret; -} -__ai int64x1_t vneg_s64(int64x1_t __p0) { - int64x1_t __ret; - __ret = -__p0; - return __ret; -} -__ai int64_t vnegd_s64(int64_t __p0) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vnegd_s64(__p0); - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); - return __ret; -} +#define vmlal_high_laneq_s16(__p0_482, __p1_482, __p2_482, __p3_482) __extension__ ({ \ + int32x4_t __ret_482; \ + int32x4_t __s0_482 = __p0_482; \ + int16x8_t __s1_482 = __p1_482; \ + int16x8_t __s2_482 = __p2_482; \ + __ret_482 = __s0_482 + vmull_s16(vget_high_s16(__s1_482), splat_laneq_s16(__s2_482, __p3_482)); \ + __ret_482; \ +}) #else -__ai uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vmlal_high_laneq_s16(__p0_483, __p1_483, __p2_483, __p3_483) __extension__ ({ \ + int32x4_t __ret_483; \ + int32x4_t __s0_483 = __p0_483; \ + int16x8_t __s1_483 = __p1_483; \ + int16x8_t __s2_483 = __p2_483; \ + int32x4_t __rev0_483; __rev0_483 = __builtin_shufflevector(__s0_483, __s0_483, 3, 2, 1, 0); \ + int16x8_t __rev1_483; __rev1_483 = __builtin_shufflevector(__s1_483, __s1_483, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_483; __rev2_483 = __builtin_shufflevector(__s2_483, __s2_483, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_483 = __rev0_483 + __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_483), __noswap_splat_laneq_s16(__rev2_483, __p3_483)); \ + __ret_483 = __builtin_shufflevector(__ret_483, __ret_483, 3, 2, 1, 0); \ + __ret_483; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); - return __ret; -} +#define vmlal_laneq_u32(__p0_484, __p1_484, __p2_484, __p3_484) __extension__ ({ \ + uint64x2_t __ret_484; \ + uint64x2_t __s0_484 = __p0_484; \ + uint32x2_t __s1_484 = __p1_484; \ + uint32x4_t __s2_484 = __p2_484; \ + __ret_484 = __s0_484 + vmull_u32(__s1_484, splat_laneq_u32(__s2_484, __p3_484)); \ + __ret_484; \ +}) #else -__ai uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vmlal_laneq_u32(__p0_485, __p1_485, __p2_485, __p3_485) __extension__ ({ \ + uint64x2_t __ret_485; \ + uint64x2_t __s0_485 = __p0_485; \ + uint32x2_t __s1_485 = __p1_485; \ + uint32x4_t __s2_485 = __p2_485; \ + uint64x2_t __rev0_485; __rev0_485 = __builtin_shufflevector(__s0_485, __s0_485, 1, 0); \ + uint32x2_t __rev1_485; __rev1_485 = __builtin_shufflevector(__s1_485, __s1_485, 1, 0); \ + uint32x4_t __rev2_485; __rev2_485 = __builtin_shufflevector(__s2_485, __s2_485, 3, 2, 1, 0); \ + __ret_485 = __rev0_485 + __noswap_vmull_u32(__rev1_485, __noswap_splat_laneq_u32(__rev2_485, __p3_485)); \ + __ret_485 = __builtin_shufflevector(__ret_485, __ret_485, 1, 0); \ + __ret_485; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); - return __ret; -} +#define vmlal_laneq_u16(__p0_486, __p1_486, __p2_486, __p3_486) __extension__ ({ \ + uint32x4_t __ret_486; \ + uint32x4_t __s0_486 = __p0_486; \ + uint16x4_t __s1_486 = __p1_486; \ + uint16x8_t __s2_486 = __p2_486; \ + __ret_486 = __s0_486 + vmull_u16(__s1_486, splat_laneq_u16(__s2_486, __p3_486)); \ + __ret_486; \ +}) #else -__ai uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vmlal_laneq_u16(__p0_487, __p1_487, __p2_487, __p3_487) __extension__ ({ \ + uint32x4_t __ret_487; \ + uint32x4_t __s0_487 = __p0_487; \ + uint16x4_t __s1_487 = __p1_487; \ + uint16x8_t __s2_487 = __p2_487; \ + uint32x4_t __rev0_487; __rev0_487 = __builtin_shufflevector(__s0_487, __s0_487, 3, 2, 1, 0); \ + uint16x4_t __rev1_487; __rev1_487 = __builtin_shufflevector(__s1_487, __s1_487, 3, 2, 1, 0); \ + uint16x8_t __rev2_487; __rev2_487 = __builtin_shufflevector(__s2_487, __s2_487, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_487 = __rev0_487 + __noswap_vmull_u16(__rev1_487, __noswap_splat_laneq_u16(__rev2_487, __p3_487)); \ + __ret_487 = __builtin_shufflevector(__ret_487, __ret_487, 3, 2, 1, 0); \ + __ret_487; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; -} +#define vmlal_laneq_s32(__p0_488, __p1_488, __p2_488, __p3_488) __extension__ ({ \ + int64x2_t __ret_488; \ + int64x2_t __s0_488 = __p0_488; \ + int32x2_t __s1_488 = __p1_488; \ + int32x4_t __s2_488 = __p2_488; \ + __ret_488 = __s0_488 + vmull_s32(__s1_488, splat_laneq_s32(__s2_488, __p3_488)); \ + __ret_488; \ +}) #else -__ai uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vmlal_laneq_s32(__p0_489, __p1_489, __p2_489, __p3_489) __extension__ ({ \ + int64x2_t __ret_489; \ + int64x2_t __s0_489 = __p0_489; \ + int32x2_t __s1_489 = __p1_489; \ + int32x4_t __s2_489 = __p2_489; \ + int64x2_t __rev0_489; __rev0_489 = __builtin_shufflevector(__s0_489, __s0_489, 1, 0); \ + int32x2_t __rev1_489; __rev1_489 = __builtin_shufflevector(__s1_489, __s1_489, 1, 0); \ + int32x4_t __rev2_489; __rev2_489 = __builtin_shufflevector(__s2_489, __s2_489, 3, 2, 1, 0); \ + __ret_489 = __rev0_489 + __noswap_vmull_s32(__rev1_489, __noswap_splat_laneq_s32(__rev2_489, __p3_489)); \ + __ret_489 = __builtin_shufflevector(__ret_489, __ret_489, 1, 0); \ + __ret_489; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); - return __ret; -} +#define vmlal_laneq_s16(__p0_490, __p1_490, __p2_490, __p3_490) __extension__ ({ \ + int32x4_t __ret_490; \ + int32x4_t __s0_490 = __p0_490; \ + int16x4_t __s1_490 = __p1_490; \ + int16x8_t __s2_490 = __p2_490; \ + __ret_490 = __s0_490 + vmull_s16(__s1_490, splat_laneq_s16(__s2_490, __p3_490)); \ + __ret_490; \ +}) #else -__ai int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vmlal_laneq_s16(__p0_491, __p1_491, __p2_491, __p3_491) __extension__ ({ \ + int32x4_t __ret_491; \ + int32x4_t __s0_491 = __p0_491; \ + int16x4_t __s1_491 = __p1_491; \ + int16x8_t __s2_491 = __p2_491; \ + int32x4_t __rev0_491; __rev0_491 = __builtin_shufflevector(__s0_491, __s0_491, 3, 2, 1, 0); \ + int16x4_t __rev1_491; __rev1_491 = __builtin_shufflevector(__s1_491, __s1_491, 3, 2, 1, 0); \ + int16x8_t __rev2_491; __rev2_491 = __builtin_shufflevector(__s2_491, __s2_491, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_491 = __rev0_491 + __noswap_vmull_s16(__rev1_491, __noswap_splat_laneq_s16(__rev2_491, __p3_491)); \ + __ret_491 = __builtin_shufflevector(__ret_491, __ret_491, 3, 2, 1, 0); \ + __ret_491; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + __ret = __p0 - __p1 * __p2; return __ret; } #else -__ai float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vmlsq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (float64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + float64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __rev0 - __rev1 * __rev2; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); +__ai __attribute__((target("neon"))) float64x1_t vmls_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { + float64x1_t __ret; + __ret = __p0 - __p1 * __p2; return __ret; } +#ifdef __LITTLE_ENDIAN__ +#define vmlsq_laneq_u32(__p0_492, __p1_492, __p2_492, __p3_492) __extension__ ({ \ + uint32x4_t __ret_492; \ + uint32x4_t __s0_492 = __p0_492; \ + uint32x4_t __s1_492 = __p1_492; \ + uint32x4_t __s2_492 = __p2_492; \ + __ret_492 = __s0_492 - __s1_492 * splatq_laneq_u32(__s2_492, __p3_492); \ + __ret_492; \ +}) #else -__ai float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vmlsq_laneq_u32(__p0_493, __p1_493, __p2_493, __p3_493) __extension__ ({ \ + uint32x4_t __ret_493; \ + uint32x4_t __s0_493 = __p0_493; \ + uint32x4_t __s1_493 = __p1_493; \ + uint32x4_t __s2_493 = __p2_493; \ + uint32x4_t __rev0_493; __rev0_493 = __builtin_shufflevector(__s0_493, __s0_493, 3, 2, 1, 0); \ + uint32x4_t __rev1_493; __rev1_493 = __builtin_shufflevector(__s1_493, __s1_493, 3, 2, 1, 0); \ + uint32x4_t __rev2_493; __rev2_493 = __builtin_shufflevector(__s2_493, __s2_493, 3, 2, 1, 0); \ + __ret_493 = __rev0_493 - __rev1_493 * __noswap_splatq_laneq_u32(__rev2_493, __p3_493); \ + __ret_493 = __builtin_shufflevector(__ret_493, __ret_493, 3, 2, 1, 0); \ + __ret_493; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); - return __ret; -} +#define vmlsq_laneq_u16(__p0_494, __p1_494, __p2_494, __p3_494) __extension__ ({ \ + uint16x8_t __ret_494; \ + uint16x8_t __s0_494 = __p0_494; \ + uint16x8_t __s1_494 = __p1_494; \ + uint16x8_t __s2_494 = __p2_494; \ + __ret_494 = __s0_494 - __s1_494 * splatq_laneq_u16(__s2_494, __p3_494); \ + __ret_494; \ +}) #else -__ai int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vmlsq_laneq_u16(__p0_495, __p1_495, __p2_495, __p3_495) __extension__ ({ \ + uint16x8_t __ret_495; \ + uint16x8_t __s0_495 = __p0_495; \ + uint16x8_t __s1_495 = __p1_495; \ + uint16x8_t __s2_495 = __p2_495; \ + uint16x8_t __rev0_495; __rev0_495 = __builtin_shufflevector(__s0_495, __s0_495, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_495; __rev1_495 = __builtin_shufflevector(__s1_495, __s1_495, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev2_495; __rev2_495 = __builtin_shufflevector(__s2_495, __s2_495, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_495 = __rev0_495 - __rev1_495 * __noswap_splatq_laneq_u16(__rev2_495, __p3_495); \ + __ret_495 = __builtin_shufflevector(__ret_495, __ret_495, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_495; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); - return __ret; -} +#define vmlsq_laneq_f32(__p0_496, __p1_496, __p2_496, __p3_496) __extension__ ({ \ + float32x4_t __ret_496; \ + float32x4_t __s0_496 = __p0_496; \ + float32x4_t __s1_496 = __p1_496; \ + float32x4_t __s2_496 = __p2_496; \ + __ret_496 = __s0_496 - __s1_496 * splatq_laneq_f32(__s2_496, __p3_496); \ + __ret_496; \ +}) #else -__ai int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (int64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vmlsq_laneq_f32(__p0_497, __p1_497, __p2_497, __p3_497) __extension__ ({ \ + float32x4_t __ret_497; \ + float32x4_t __s0_497 = __p0_497; \ + float32x4_t __s1_497 = __p1_497; \ + float32x4_t __s2_497 = __p2_497; \ + float32x4_t __rev0_497; __rev0_497 = __builtin_shufflevector(__s0_497, __s0_497, 3, 2, 1, 0); \ + float32x4_t __rev1_497; __rev1_497 = __builtin_shufflevector(__s1_497, __s1_497, 3, 2, 1, 0); \ + float32x4_t __rev2_497; __rev2_497 = __builtin_shufflevector(__s2_497, __s2_497, 3, 2, 1, 0); \ + __ret_497 = __rev0_497 - __rev1_497 * __noswap_splatq_laneq_f32(__rev2_497, __p3_497); \ + __ret_497 = __builtin_shufflevector(__ret_497, __ret_497, 3, 2, 1, 0); \ + __ret_497; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); - return __ret; -} +#define vmlsq_laneq_s32(__p0_498, __p1_498, __p2_498, __p3_498) __extension__ ({ \ + int32x4_t __ret_498; \ + int32x4_t __s0_498 = __p0_498; \ + int32x4_t __s1_498 = __p1_498; \ + int32x4_t __s2_498 = __p2_498; \ + __ret_498 = __s0_498 - __s1_498 * splatq_laneq_s32(__s2_498, __p3_498); \ + __ret_498; \ +}) #else -__ai int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vmlsq_laneq_s32(__p0_499, __p1_499, __p2_499, __p3_499) __extension__ ({ \ + int32x4_t __ret_499; \ + int32x4_t __s0_499 = __p0_499; \ + int32x4_t __s1_499 = __p1_499; \ + int32x4_t __s2_499 = __p2_499; \ + int32x4_t __rev0_499; __rev0_499 = __builtin_shufflevector(__s0_499, __s0_499, 3, 2, 1, 0); \ + int32x4_t __rev1_499; __rev1_499 = __builtin_shufflevector(__s1_499, __s1_499, 3, 2, 1, 0); \ + int32x4_t __rev2_499; __rev2_499 = __builtin_shufflevector(__s2_499, __s2_499, 3, 2, 1, 0); \ + __ret_499 = __rev0_499 - __rev1_499 * __noswap_splatq_laneq_s32(__rev2_499, __p3_499); \ + __ret_499 = __builtin_shufflevector(__ret_499, __ret_499, 3, 2, 1, 0); \ + __ret_499; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64_t vpaddd_u64(uint64x2_t __p0) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vpaddd_u64(__p0); - return __ret; -} +#define vmlsq_laneq_s16(__p0_500, __p1_500, __p2_500, __p3_500) __extension__ ({ \ + int16x8_t __ret_500; \ + int16x8_t __s0_500 = __p0_500; \ + int16x8_t __s1_500 = __p1_500; \ + int16x8_t __s2_500 = __p2_500; \ + __ret_500 = __s0_500 - __s1_500 * splatq_laneq_s16(__s2_500, __p3_500); \ + __ret_500; \ +}) #else -__ai uint64_t vpaddd_u64(uint64x2_t __p0) { - uint64_t __ret; - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (uint64_t) __builtin_neon_vpaddd_u64(__rev0); - return __ret; -} +#define vmlsq_laneq_s16(__p0_501, __p1_501, __p2_501, __p3_501) __extension__ ({ \ + int16x8_t __ret_501; \ + int16x8_t __s0_501 = __p0_501; \ + int16x8_t __s1_501 = __p1_501; \ + int16x8_t __s2_501 = __p2_501; \ + int16x8_t __rev0_501; __rev0_501 = __builtin_shufflevector(__s0_501, __s0_501, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_501; __rev1_501 = __builtin_shufflevector(__s1_501, __s1_501, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_501; __rev2_501 = __builtin_shufflevector(__s2_501, __s2_501, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_501 = __rev0_501 - __rev1_501 * __noswap_splatq_laneq_s16(__rev2_501, __p3_501); \ + __ret_501 = __builtin_shufflevector(__ret_501, __ret_501, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_501; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai float64_t vpaddd_f64(float64x2_t __p0) { - float64_t __ret; - __ret = (float64_t) __builtin_neon_vpaddd_f64(__p0); - return __ret; -} +#define vmls_laneq_u32(__p0_502, __p1_502, __p2_502, __p3_502) __extension__ ({ \ + uint32x2_t __ret_502; \ + uint32x2_t __s0_502 = __p0_502; \ + uint32x2_t __s1_502 = __p1_502; \ + uint32x4_t __s2_502 = __p2_502; \ + __ret_502 = __s0_502 - __s1_502 * splat_laneq_u32(__s2_502, __p3_502); \ + __ret_502; \ +}) #else -__ai float64_t vpaddd_f64(float64x2_t __p0) { - float64_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float64_t) __builtin_neon_vpaddd_f64(__rev0); - return __ret; -} +#define vmls_laneq_u32(__p0_503, __p1_503, __p2_503, __p3_503) __extension__ ({ \ + uint32x2_t __ret_503; \ + uint32x2_t __s0_503 = __p0_503; \ + uint32x2_t __s1_503 = __p1_503; \ + uint32x4_t __s2_503 = __p2_503; \ + uint32x2_t __rev0_503; __rev0_503 = __builtin_shufflevector(__s0_503, __s0_503, 1, 0); \ + uint32x2_t __rev1_503; __rev1_503 = __builtin_shufflevector(__s1_503, __s1_503, 1, 0); \ + uint32x4_t __rev2_503; __rev2_503 = __builtin_shufflevector(__s2_503, __s2_503, 3, 2, 1, 0); \ + __ret_503 = __rev0_503 - __rev1_503 * __noswap_splat_laneq_u32(__rev2_503, __p3_503); \ + __ret_503 = __builtin_shufflevector(__ret_503, __ret_503, 1, 0); \ + __ret_503; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int64_t vpaddd_s64(int64x2_t __p0) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vpaddd_s64(__p0); - return __ret; -} +#define vmls_laneq_u16(__p0_504, __p1_504, __p2_504, __p3_504) __extension__ ({ \ + uint16x4_t __ret_504; \ + uint16x4_t __s0_504 = __p0_504; \ + uint16x4_t __s1_504 = __p1_504; \ + uint16x8_t __s2_504 = __p2_504; \ + __ret_504 = __s0_504 - __s1_504 * splat_laneq_u16(__s2_504, __p3_504); \ + __ret_504; \ +}) #else -__ai int64_t vpaddd_s64(int64x2_t __p0) { - int64_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (int64_t) __builtin_neon_vpaddd_s64(__rev0); - return __ret; -} +#define vmls_laneq_u16(__p0_505, __p1_505, __p2_505, __p3_505) __extension__ ({ \ + uint16x4_t __ret_505; \ + uint16x4_t __s0_505 = __p0_505; \ + uint16x4_t __s1_505 = __p1_505; \ + uint16x8_t __s2_505 = __p2_505; \ + uint16x4_t __rev0_505; __rev0_505 = __builtin_shufflevector(__s0_505, __s0_505, 3, 2, 1, 0); \ + uint16x4_t __rev1_505; __rev1_505 = __builtin_shufflevector(__s1_505, __s1_505, 3, 2, 1, 0); \ + uint16x8_t __rev2_505; __rev2_505 = __builtin_shufflevector(__s2_505, __s2_505, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_505 = __rev0_505 - __rev1_505 * __noswap_splat_laneq_u16(__rev2_505, __p3_505); \ + __ret_505 = __builtin_shufflevector(__ret_505, __ret_505, 3, 2, 1, 0); \ + __ret_505; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai float32_t vpadds_f32(float32x2_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vpadds_f32(__p0); - return __ret; -} +#define vmls_laneq_f32(__p0_506, __p1_506, __p2_506, __p3_506) __extension__ ({ \ + float32x2_t __ret_506; \ + float32x2_t __s0_506 = __p0_506; \ + float32x2_t __s1_506 = __p1_506; \ + float32x4_t __s2_506 = __p2_506; \ + __ret_506 = __s0_506 - __s1_506 * splat_laneq_f32(__s2_506, __p3_506); \ + __ret_506; \ +}) #else -__ai float32_t vpadds_f32(float32x2_t __p0) { - float32_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float32_t) __builtin_neon_vpadds_f32(__rev0); - return __ret; -} +#define vmls_laneq_f32(__p0_507, __p1_507, __p2_507, __p3_507) __extension__ ({ \ + float32x2_t __ret_507; \ + float32x2_t __s0_507 = __p0_507; \ + float32x2_t __s1_507 = __p1_507; \ + float32x4_t __s2_507 = __p2_507; \ + float32x2_t __rev0_507; __rev0_507 = __builtin_shufflevector(__s0_507, __s0_507, 1, 0); \ + float32x2_t __rev1_507; __rev1_507 = __builtin_shufflevector(__s1_507, __s1_507, 1, 0); \ + float32x4_t __rev2_507; __rev2_507 = __builtin_shufflevector(__s2_507, __s2_507, 3, 2, 1, 0); \ + __ret_507 = __rev0_507 - __rev1_507 * __noswap_splat_laneq_f32(__rev2_507, __p3_507); \ + __ret_507 = __builtin_shufflevector(__ret_507, __ret_507, 1, 0); \ + __ret_507; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); - return __ret; -} +#define vmls_laneq_s32(__p0_508, __p1_508, __p2_508, __p3_508) __extension__ ({ \ + int32x2_t __ret_508; \ + int32x2_t __s0_508 = __p0_508; \ + int32x2_t __s1_508 = __p1_508; \ + int32x4_t __s2_508 = __p2_508; \ + __ret_508 = __s0_508 - __s1_508 * splat_laneq_s32(__s2_508, __p3_508); \ + __ret_508; \ +}) #else -__ai uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vmls_laneq_s32(__p0_509, __p1_509, __p2_509, __p3_509) __extension__ ({ \ + int32x2_t __ret_509; \ + int32x2_t __s0_509 = __p0_509; \ + int32x2_t __s1_509 = __p1_509; \ + int32x4_t __s2_509 = __p2_509; \ + int32x2_t __rev0_509; __rev0_509 = __builtin_shufflevector(__s0_509, __s0_509, 1, 0); \ + int32x2_t __rev1_509; __rev1_509 = __builtin_shufflevector(__s1_509, __s1_509, 1, 0); \ + int32x4_t __rev2_509; __rev2_509 = __builtin_shufflevector(__s2_509, __s2_509, 3, 2, 1, 0); \ + __ret_509 = __rev0_509 - __rev1_509 * __noswap_splat_laneq_s32(__rev2_509, __p3_509); \ + __ret_509 = __builtin_shufflevector(__ret_509, __ret_509, 1, 0); \ + __ret_509; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); - return __ret; -} +#define vmls_laneq_s16(__p0_510, __p1_510, __p2_510, __p3_510) __extension__ ({ \ + int16x4_t __ret_510; \ + int16x4_t __s0_510 = __p0_510; \ + int16x4_t __s1_510 = __p1_510; \ + int16x8_t __s2_510 = __p2_510; \ + __ret_510 = __s0_510 - __s1_510 * splat_laneq_s16(__s2_510, __p3_510); \ + __ret_510; \ +}) #else -__ai uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vmls_laneq_s16(__p0_511, __p1_511, __p2_511, __p3_511) __extension__ ({ \ + int16x4_t __ret_511; \ + int16x4_t __s0_511 = __p0_511; \ + int16x4_t __s1_511 = __p1_511; \ + int16x8_t __s2_511 = __p2_511; \ + int16x4_t __rev0_511; __rev0_511 = __builtin_shufflevector(__s0_511, __s0_511, 3, 2, 1, 0); \ + int16x4_t __rev1_511; __rev1_511 = __builtin_shufflevector(__s1_511, __s1_511, 3, 2, 1, 0); \ + int16x8_t __rev2_511; __rev2_511 = __builtin_shufflevector(__s2_511, __s2_511, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_511 = __rev0_511 - __rev1_511 * __noswap_splat_laneq_s16(__rev2_511, __p3_511); \ + __ret_511 = __builtin_shufflevector(__ret_511, __ret_511, 3, 2, 1, 0); \ + __ret_511; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; -} +#define vmlsl_high_lane_u32(__p0_512, __p1_512, __p2_512, __p3_512) __extension__ ({ \ + uint64x2_t __ret_512; \ + uint64x2_t __s0_512 = __p0_512; \ + uint32x4_t __s1_512 = __p1_512; \ + uint32x2_t __s2_512 = __p2_512; \ + __ret_512 = __s0_512 - vmull_u32(vget_high_u32(__s1_512), splat_lane_u32(__s2_512, __p3_512)); \ + __ret_512; \ +}) #else -__ai uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vmlsl_high_lane_u32(__p0_513, __p1_513, __p2_513, __p3_513) __extension__ ({ \ + uint64x2_t __ret_513; \ + uint64x2_t __s0_513 = __p0_513; \ + uint32x4_t __s1_513 = __p1_513; \ + uint32x2_t __s2_513 = __p2_513; \ + uint64x2_t __rev0_513; __rev0_513 = __builtin_shufflevector(__s0_513, __s0_513, 1, 0); \ + uint32x4_t __rev1_513; __rev1_513 = __builtin_shufflevector(__s1_513, __s1_513, 3, 2, 1, 0); \ + uint32x2_t __rev2_513; __rev2_513 = __builtin_shufflevector(__s2_513, __s2_513, 1, 0); \ + __ret_513 = __rev0_513 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_513), __noswap_splat_lane_u32(__rev2_513, __p3_513)); \ + __ret_513 = __builtin_shufflevector(__ret_513, __ret_513, 1, 0); \ + __ret_513; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); - return __ret; -} +#define vmlsl_high_lane_u16(__p0_514, __p1_514, __p2_514, __p3_514) __extension__ ({ \ + uint32x4_t __ret_514; \ + uint32x4_t __s0_514 = __p0_514; \ + uint16x8_t __s1_514 = __p1_514; \ + uint16x4_t __s2_514 = __p2_514; \ + __ret_514 = __s0_514 - vmull_u16(vget_high_u16(__s1_514), splat_lane_u16(__s2_514, __p3_514)); \ + __ret_514; \ +}) #else -__ai int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vmlsl_high_lane_u16(__p0_515, __p1_515, __p2_515, __p3_515) __extension__ ({ \ + uint32x4_t __ret_515; \ + uint32x4_t __s0_515 = __p0_515; \ + uint16x8_t __s1_515 = __p1_515; \ + uint16x4_t __s2_515 = __p2_515; \ + uint32x4_t __rev0_515; __rev0_515 = __builtin_shufflevector(__s0_515, __s0_515, 3, 2, 1, 0); \ + uint16x8_t __rev1_515; __rev1_515 = __builtin_shufflevector(__s1_515, __s1_515, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __rev2_515; __rev2_515 = __builtin_shufflevector(__s2_515, __s2_515, 3, 2, 1, 0); \ + __ret_515 = __rev0_515 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_515), __noswap_splat_lane_u16(__rev2_515, __p3_515)); \ + __ret_515 = __builtin_shufflevector(__ret_515, __ret_515, 3, 2, 1, 0); \ + __ret_515; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); - return __ret; -} +#define vmlsl_high_lane_s32(__p0_516, __p1_516, __p2_516, __p3_516) __extension__ ({ \ + int64x2_t __ret_516; \ + int64x2_t __s0_516 = __p0_516; \ + int32x4_t __s1_516 = __p1_516; \ + int32x2_t __s2_516 = __p2_516; \ + __ret_516 = __s0_516 - vmull_s32(vget_high_s32(__s1_516), splat_lane_s32(__s2_516, __p3_516)); \ + __ret_516; \ +}) #else -__ai float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (float64x2_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vmlsl_high_lane_s32(__p0_517, __p1_517, __p2_517, __p3_517) __extension__ ({ \ + int64x2_t __ret_517; \ + int64x2_t __s0_517 = __p0_517; \ + int32x4_t __s1_517 = __p1_517; \ + int32x2_t __s2_517 = __p2_517; \ + int64x2_t __rev0_517; __rev0_517 = __builtin_shufflevector(__s0_517, __s0_517, 1, 0); \ + int32x4_t __rev1_517; __rev1_517 = __builtin_shufflevector(__s1_517, __s1_517, 3, 2, 1, 0); \ + int32x2_t __rev2_517; __rev2_517 = __builtin_shufflevector(__s2_517, __s2_517, 1, 0); \ + __ret_517 = __rev0_517 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_517), __noswap_splat_lane_s32(__rev2_517, __p3_517)); \ + __ret_517 = __builtin_shufflevector(__ret_517, __ret_517, 1, 0); \ + __ret_517; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); - return __ret; -} +#define vmlsl_high_lane_s16(__p0_518, __p1_518, __p2_518, __p3_518) __extension__ ({ \ + int32x4_t __ret_518; \ + int32x4_t __s0_518 = __p0_518; \ + int16x8_t __s1_518 = __p1_518; \ + int16x4_t __s2_518 = __p2_518; \ + __ret_518 = __s0_518 - vmull_s16(vget_high_s16(__s1_518), splat_lane_s16(__s2_518, __p3_518)); \ + __ret_518; \ +}) #else -__ai float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vmlsl_high_lane_s16(__p0_519, __p1_519, __p2_519, __p3_519) __extension__ ({ \ + int32x4_t __ret_519; \ + int32x4_t __s0_519 = __p0_519; \ + int16x8_t __s1_519 = __p1_519; \ + int16x4_t __s2_519 = __p2_519; \ + int32x4_t __rev0_519; __rev0_519 = __builtin_shufflevector(__s0_519, __s0_519, 3, 2, 1, 0); \ + int16x8_t __rev1_519; __rev1_519 = __builtin_shufflevector(__s1_519, __s1_519, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_519; __rev2_519 = __builtin_shufflevector(__s2_519, __s2_519, 3, 2, 1, 0); \ + __ret_519 = __rev0_519 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_519), __noswap_splat_lane_s16(__rev2_519, __p3_519)); \ + __ret_519 = __builtin_shufflevector(__ret_519, __ret_519, 3, 2, 1, 0); \ + __ret_519; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); - return __ret; -} +#define vmlsl_high_laneq_u32(__p0_520, __p1_520, __p2_520, __p3_520) __extension__ ({ \ + uint64x2_t __ret_520; \ + uint64x2_t __s0_520 = __p0_520; \ + uint32x4_t __s1_520 = __p1_520; \ + uint32x4_t __s2_520 = __p2_520; \ + __ret_520 = __s0_520 - vmull_u32(vget_high_u32(__s1_520), splat_laneq_u32(__s2_520, __p3_520)); \ + __ret_520; \ +}) #else -__ai int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vmlsl_high_laneq_u32(__p0_521, __p1_521, __p2_521, __p3_521) __extension__ ({ \ + uint64x2_t __ret_521; \ + uint64x2_t __s0_521 = __p0_521; \ + uint32x4_t __s1_521 = __p1_521; \ + uint32x4_t __s2_521 = __p2_521; \ + uint64x2_t __rev0_521; __rev0_521 = __builtin_shufflevector(__s0_521, __s0_521, 1, 0); \ + uint32x4_t __rev1_521; __rev1_521 = __builtin_shufflevector(__s1_521, __s1_521, 3, 2, 1, 0); \ + uint32x4_t __rev2_521; __rev2_521 = __builtin_shufflevector(__s2_521, __s2_521, 3, 2, 1, 0); \ + __ret_521 = __rev0_521 - __noswap_vmull_u32(__noswap_vget_high_u32(__rev1_521), __noswap_splat_laneq_u32(__rev2_521, __p3_521)); \ + __ret_521 = __builtin_shufflevector(__ret_521, __ret_521, 1, 0); \ + __ret_521; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); - return __ret; -} +#define vmlsl_high_laneq_u16(__p0_522, __p1_522, __p2_522, __p3_522) __extension__ ({ \ + uint32x4_t __ret_522; \ + uint32x4_t __s0_522 = __p0_522; \ + uint16x8_t __s1_522 = __p1_522; \ + uint16x8_t __s2_522 = __p2_522; \ + __ret_522 = __s0_522 - vmull_u16(vget_high_u16(__s1_522), splat_laneq_u16(__s2_522, __p3_522)); \ + __ret_522; \ +}) #else -__ai int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vmlsl_high_laneq_u16(__p0_523, __p1_523, __p2_523, __p3_523) __extension__ ({ \ + uint32x4_t __ret_523; \ + uint32x4_t __s0_523 = __p0_523; \ + uint16x8_t __s1_523 = __p1_523; \ + uint16x8_t __s2_523 = __p2_523; \ + uint32x4_t __rev0_523; __rev0_523 = __builtin_shufflevector(__s0_523, __s0_523, 3, 2, 1, 0); \ + uint16x8_t __rev1_523; __rev1_523 = __builtin_shufflevector(__s1_523, __s1_523, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev2_523; __rev2_523 = __builtin_shufflevector(__s2_523, __s2_523, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_523 = __rev0_523 - __noswap_vmull_u16(__noswap_vget_high_u16(__rev1_523), __noswap_splat_laneq_u16(__rev2_523, __p3_523)); \ + __ret_523 = __builtin_shufflevector(__ret_523, __ret_523, 3, 2, 1, 0); \ + __ret_523; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai float64_t vpmaxqd_f64(float64x2_t __p0) { - float64_t __ret; - __ret = (float64_t) __builtin_neon_vpmaxqd_f64(__p0); - return __ret; -} +#define vmlsl_high_laneq_s32(__p0_524, __p1_524, __p2_524, __p3_524) __extension__ ({ \ + int64x2_t __ret_524; \ + int64x2_t __s0_524 = __p0_524; \ + int32x4_t __s1_524 = __p1_524; \ + int32x4_t __s2_524 = __p2_524; \ + __ret_524 = __s0_524 - vmull_s32(vget_high_s32(__s1_524), splat_laneq_s32(__s2_524, __p3_524)); \ + __ret_524; \ +}) #else -__ai float64_t vpmaxqd_f64(float64x2_t __p0) { - float64_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float64_t) __builtin_neon_vpmaxqd_f64(__rev0); - return __ret; -} +#define vmlsl_high_laneq_s32(__p0_525, __p1_525, __p2_525, __p3_525) __extension__ ({ \ + int64x2_t __ret_525; \ + int64x2_t __s0_525 = __p0_525; \ + int32x4_t __s1_525 = __p1_525; \ + int32x4_t __s2_525 = __p2_525; \ + int64x2_t __rev0_525; __rev0_525 = __builtin_shufflevector(__s0_525, __s0_525, 1, 0); \ + int32x4_t __rev1_525; __rev1_525 = __builtin_shufflevector(__s1_525, __s1_525, 3, 2, 1, 0); \ + int32x4_t __rev2_525; __rev2_525 = __builtin_shufflevector(__s2_525, __s2_525, 3, 2, 1, 0); \ + __ret_525 = __rev0_525 - __noswap_vmull_s32(__noswap_vget_high_s32(__rev1_525), __noswap_splat_laneq_s32(__rev2_525, __p3_525)); \ + __ret_525 = __builtin_shufflevector(__ret_525, __ret_525, 1, 0); \ + __ret_525; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai float32_t vpmaxs_f32(float32x2_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vpmaxs_f32(__p0); - return __ret; -} -#else -__ai float32_t vpmaxs_f32(float32x2_t __p0) { - float32_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float32_t) __builtin_neon_vpmaxs_f32(__rev0); - return __ret; -} +#define vmlsl_high_laneq_s16(__p0_526, __p1_526, __p2_526, __p3_526) __extension__ ({ \ + int32x4_t __ret_526; \ + int32x4_t __s0_526 = __p0_526; \ + int16x8_t __s1_526 = __p1_526; \ + int16x8_t __s2_526 = __p2_526; \ + __ret_526 = __s0_526 - vmull_s16(vget_high_s16(__s1_526), splat_laneq_s16(__s2_526, __p3_526)); \ + __ret_526; \ +}) +#else +#define vmlsl_high_laneq_s16(__p0_527, __p1_527, __p2_527, __p3_527) __extension__ ({ \ + int32x4_t __ret_527; \ + int32x4_t __s0_527 = __p0_527; \ + int16x8_t __s1_527 = __p1_527; \ + int16x8_t __s2_527 = __p2_527; \ + int32x4_t __rev0_527; __rev0_527 = __builtin_shufflevector(__s0_527, __s0_527, 3, 2, 1, 0); \ + int16x8_t __rev1_527; __rev1_527 = __builtin_shufflevector(__s1_527, __s1_527, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_527; __rev2_527 = __builtin_shufflevector(__s2_527, __s2_527, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_527 = __rev0_527 - __noswap_vmull_s16(__noswap_vget_high_s16(__rev1_527), __noswap_splat_laneq_s16(__rev2_527, __p3_527)); \ + __ret_527 = __builtin_shufflevector(__ret_527, __ret_527, 3, 2, 1, 0); \ + __ret_527; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); - return __ret; -} +#define vmlsl_laneq_u32(__p0_528, __p1_528, __p2_528, __p3_528) __extension__ ({ \ + uint64x2_t __ret_528; \ + uint64x2_t __s0_528 = __p0_528; \ + uint32x2_t __s1_528 = __p1_528; \ + uint32x4_t __s2_528 = __p2_528; \ + __ret_528 = __s0_528 - vmull_u32(__s1_528, splat_laneq_u32(__s2_528, __p3_528)); \ + __ret_528; \ +}) +#else +#define vmlsl_laneq_u32(__p0_529, __p1_529, __p2_529, __p3_529) __extension__ ({ \ + uint64x2_t __ret_529; \ + uint64x2_t __s0_529 = __p0_529; \ + uint32x2_t __s1_529 = __p1_529; \ + uint32x4_t __s2_529 = __p2_529; \ + uint64x2_t __rev0_529; __rev0_529 = __builtin_shufflevector(__s0_529, __s0_529, 1, 0); \ + uint32x2_t __rev1_529; __rev1_529 = __builtin_shufflevector(__s1_529, __s1_529, 1, 0); \ + uint32x4_t __rev2_529; __rev2_529 = __builtin_shufflevector(__s2_529, __s2_529, 3, 2, 1, 0); \ + __ret_529 = __rev0_529 - __noswap_vmull_u32(__rev1_529, __noswap_splat_laneq_u32(__rev2_529, __p3_529)); \ + __ret_529 = __builtin_shufflevector(__ret_529, __ret_529, 1, 0); \ + __ret_529; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_laneq_u16(__p0_530, __p1_530, __p2_530, __p3_530) __extension__ ({ \ + uint32x4_t __ret_530; \ + uint32x4_t __s0_530 = __p0_530; \ + uint16x4_t __s1_530 = __p1_530; \ + uint16x8_t __s2_530 = __p2_530; \ + __ret_530 = __s0_530 - vmull_u16(__s1_530, splat_laneq_u16(__s2_530, __p3_530)); \ + __ret_530; \ +}) +#else +#define vmlsl_laneq_u16(__p0_531, __p1_531, __p2_531, __p3_531) __extension__ ({ \ + uint32x4_t __ret_531; \ + uint32x4_t __s0_531 = __p0_531; \ + uint16x4_t __s1_531 = __p1_531; \ + uint16x8_t __s2_531 = __p2_531; \ + uint32x4_t __rev0_531; __rev0_531 = __builtin_shufflevector(__s0_531, __s0_531, 3, 2, 1, 0); \ + uint16x4_t __rev1_531; __rev1_531 = __builtin_shufflevector(__s1_531, __s1_531, 3, 2, 1, 0); \ + uint16x8_t __rev2_531; __rev2_531 = __builtin_shufflevector(__s2_531, __s2_531, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_531 = __rev0_531 - __noswap_vmull_u16(__rev1_531, __noswap_splat_laneq_u16(__rev2_531, __p3_531)); \ + __ret_531 = __builtin_shufflevector(__ret_531, __ret_531, 3, 2, 1, 0); \ + __ret_531; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_laneq_s32(__p0_532, __p1_532, __p2_532, __p3_532) __extension__ ({ \ + int64x2_t __ret_532; \ + int64x2_t __s0_532 = __p0_532; \ + int32x2_t __s1_532 = __p1_532; \ + int32x4_t __s2_532 = __p2_532; \ + __ret_532 = __s0_532 - vmull_s32(__s1_532, splat_laneq_s32(__s2_532, __p3_532)); \ + __ret_532; \ +}) +#else +#define vmlsl_laneq_s32(__p0_533, __p1_533, __p2_533, __p3_533) __extension__ ({ \ + int64x2_t __ret_533; \ + int64x2_t __s0_533 = __p0_533; \ + int32x2_t __s1_533 = __p1_533; \ + int32x4_t __s2_533 = __p2_533; \ + int64x2_t __rev0_533; __rev0_533 = __builtin_shufflevector(__s0_533, __s0_533, 1, 0); \ + int32x2_t __rev1_533; __rev1_533 = __builtin_shufflevector(__s1_533, __s1_533, 1, 0); \ + int32x4_t __rev2_533; __rev2_533 = __builtin_shufflevector(__s2_533, __s2_533, 3, 2, 1, 0); \ + __ret_533 = __rev0_533 - __noswap_vmull_s32(__rev1_533, __noswap_splat_laneq_s32(__rev2_533, __p3_533)); \ + __ret_533 = __builtin_shufflevector(__ret_533, __ret_533, 1, 0); \ + __ret_533; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmlsl_laneq_s16(__p0_534, __p1_534, __p2_534, __p3_534) __extension__ ({ \ + int32x4_t __ret_534; \ + int32x4_t __s0_534 = __p0_534; \ + int16x4_t __s1_534 = __p1_534; \ + int16x8_t __s2_534 = __p2_534; \ + __ret_534 = __s0_534 - vmull_s16(__s1_534, splat_laneq_s16(__s2_534, __p3_534)); \ + __ret_534; \ +}) #else -__ai float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (float64x2_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vmlsl_laneq_s16(__p0_535, __p1_535, __p2_535, __p3_535) __extension__ ({ \ + int32x4_t __ret_535; \ + int32x4_t __s0_535 = __p0_535; \ + int16x4_t __s1_535 = __p1_535; \ + int16x8_t __s2_535 = __p2_535; \ + int32x4_t __rev0_535; __rev0_535 = __builtin_shufflevector(__s0_535, __s0_535, 3, 2, 1, 0); \ + int16x4_t __rev1_535; __rev1_535 = __builtin_shufflevector(__s1_535, __s1_535, 3, 2, 1, 0); \ + int16x8_t __rev2_535; __rev2_535 = __builtin_shufflevector(__s2_535, __s2_535, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_535 = __rev0_535 - __noswap_vmull_s16(__rev1_535, __noswap_splat_laneq_s16(__rev2_535, __p3_535)); \ + __ret_535 = __builtin_shufflevector(__ret_535, __ret_535, 3, 2, 1, 0); \ + __ret_535; \ +}) #endif +__ai __attribute__((target("neon"))) poly64x1_t vmov_n_p64(poly64_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t) {__p0}; + return __ret; +} #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); +__ai __attribute__((target("neon"))) poly64x2_t vmovq_n_p64(poly64_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t) {__p0, __p0}; return __ret; } #else -__ai float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) poly64x2_t vmovq_n_p64(poly64_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t) {__p0, __p0}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vpmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9); +__ai __attribute__((target("neon"))) float64x2_t vmovq_n_f64(float64_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) {__p0, __p0}; return __ret; } #else -__ai float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (float32x2_t) __builtin_neon_vpmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); +__ai __attribute__((target("neon"))) float64x2_t vmovq_n_f64(float64_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) {__p0, __p0}; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif -#ifdef __LITTLE_ENDIAN__ -__ai float64_t vpmaxnmqd_f64(float64x2_t __p0) { - float64_t __ret; - __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64(__p0); +__ai __attribute__((target("neon"))) float64x1_t vmov_n_f64(float64_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) {__p0}; return __ret; } +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vmovl_high_u8(uint8x16_t __p0_536) { + uint16x8_t __ret_536; + uint8x8_t __a1_536 = vget_high_u8(__p0_536); + __ret_536 = (uint16x8_t)(vshll_n_u8(__a1_536, 0)); + return __ret_536; +} #else -__ai float64_t vpmaxnmqd_f64(float64x2_t __p0) { - float64_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64(__rev0); - return __ret; +__ai __attribute__((target("neon"))) uint16x8_t vmovl_high_u8(uint8x16_t __p0_537) { + uint16x8_t __ret_537; + uint8x16_t __rev0_537; __rev0_537 = __builtin_shufflevector(__p0_537, __p0_537, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __a1_537 = __noswap_vget_high_u8(__rev0_537); + __ret_537 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_537, 0)); + __ret_537 = __builtin_shufflevector(__ret_537, __ret_537, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret_537; +} +__ai __attribute__((target("neon"))) uint16x8_t __noswap_vmovl_high_u8(uint8x16_t __p0_538) { + uint16x8_t __ret_538; + uint8x8_t __a1_538 = __noswap_vget_high_u8(__p0_538); + __ret_538 = (uint16x8_t)(__noswap_vshll_n_u8(__a1_538, 0)); + return __ret_538; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float32_t vpmaxnms_f32(float32x2_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vpmaxnms_f32(__p0); - return __ret; +__ai __attribute__((target("neon"))) uint64x2_t vmovl_high_u32(uint32x4_t __p0_539) { + uint64x2_t __ret_539; + uint32x2_t __a1_539 = vget_high_u32(__p0_539); + __ret_539 = (uint64x2_t)(vshll_n_u32(__a1_539, 0)); + return __ret_539; } #else -__ai float32_t vpmaxnms_f32(float32x2_t __p0) { - float32_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float32_t) __builtin_neon_vpmaxnms_f32(__rev0); - return __ret; +__ai __attribute__((target("neon"))) uint64x2_t vmovl_high_u32(uint32x4_t __p0_540) { + uint64x2_t __ret_540; + uint32x4_t __rev0_540; __rev0_540 = __builtin_shufflevector(__p0_540, __p0_540, 3, 2, 1, 0); + uint32x2_t __a1_540 = __noswap_vget_high_u32(__rev0_540); + __ret_540 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_540, 0)); + __ret_540 = __builtin_shufflevector(__ret_540, __ret_540, 1, 0); + return __ret_540; +} +__ai __attribute__((target("neon"))) uint64x2_t __noswap_vmovl_high_u32(uint32x4_t __p0_541) { + uint64x2_t __ret_541; + uint32x2_t __a1_541 = __noswap_vget_high_u32(__p0_541); + __ret_541 = (uint64x2_t)(__noswap_vshll_n_u32(__a1_541, 0)); + return __ret_541; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); - return __ret; +__ai __attribute__((target("neon"))) uint32x4_t vmovl_high_u16(uint16x8_t __p0_542) { + uint32x4_t __ret_542; + uint16x4_t __a1_542 = vget_high_u16(__p0_542); + __ret_542 = (uint32x4_t)(vshll_n_u16(__a1_542, 0)); + return __ret_542; } #else -__ai uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; +__ai __attribute__((target("neon"))) uint32x4_t vmovl_high_u16(uint16x8_t __p0_543) { + uint32x4_t __ret_543; + uint16x8_t __rev0_543; __rev0_543 = __builtin_shufflevector(__p0_543, __p0_543, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x4_t __a1_543 = __noswap_vget_high_u16(__rev0_543); + __ret_543 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_543, 0)); + __ret_543 = __builtin_shufflevector(__ret_543, __ret_543, 3, 2, 1, 0); + return __ret_543; +} +__ai __attribute__((target("neon"))) uint32x4_t __noswap_vmovl_high_u16(uint16x8_t __p0_544) { + uint32x4_t __ret_544; + uint16x4_t __a1_544 = __noswap_vget_high_u16(__p0_544); + __ret_544 = (uint32x4_t)(__noswap_vshll_n_u16(__a1_544, 0)); + return __ret_544; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); - return __ret; +__ai __attribute__((target("neon"))) int16x8_t vmovl_high_s8(int8x16_t __p0_545) { + int16x8_t __ret_545; + int8x8_t __a1_545 = vget_high_s8(__p0_545); + __ret_545 = (int16x8_t)(vshll_n_s8(__a1_545, 0)); + return __ret_545; } #else -__ai uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; +__ai __attribute__((target("neon"))) int16x8_t vmovl_high_s8(int8x16_t __p0_546) { + int16x8_t __ret_546; + int8x16_t __rev0_546; __rev0_546 = __builtin_shufflevector(__p0_546, __p0_546, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __a1_546 = __noswap_vget_high_s8(__rev0_546); + __ret_546 = (int16x8_t)(__noswap_vshll_n_s8(__a1_546, 0)); + __ret_546 = __builtin_shufflevector(__ret_546, __ret_546, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret_546; +} +__ai __attribute__((target("neon"))) int16x8_t __noswap_vmovl_high_s8(int8x16_t __p0_547) { + int16x8_t __ret_547; + int8x8_t __a1_547 = __noswap_vget_high_s8(__p0_547); + __ret_547 = (int16x8_t)(__noswap_vshll_n_s8(__a1_547, 0)); + return __ret_547; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; +__ai __attribute__((target("neon"))) int64x2_t vmovl_high_s32(int32x4_t __p0_548) { + int64x2_t __ret_548; + int32x2_t __a1_548 = vget_high_s32(__p0_548); + __ret_548 = (int64x2_t)(vshll_n_s32(__a1_548, 0)); + return __ret_548; } #else -__ai uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; +__ai __attribute__((target("neon"))) int64x2_t vmovl_high_s32(int32x4_t __p0_549) { + int64x2_t __ret_549; + int32x4_t __rev0_549; __rev0_549 = __builtin_shufflevector(__p0_549, __p0_549, 3, 2, 1, 0); + int32x2_t __a1_549 = __noswap_vget_high_s32(__rev0_549); + __ret_549 = (int64x2_t)(__noswap_vshll_n_s32(__a1_549, 0)); + __ret_549 = __builtin_shufflevector(__ret_549, __ret_549, 1, 0); + return __ret_549; +} +__ai __attribute__((target("neon"))) int64x2_t __noswap_vmovl_high_s32(int32x4_t __p0_550) { + int64x2_t __ret_550; + int32x2_t __a1_550 = __noswap_vget_high_s32(__p0_550); + __ret_550 = (int64x2_t)(__noswap_vshll_n_s32(__a1_550, 0)); + return __ret_550; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); - return __ret; +__ai __attribute__((target("neon"))) int32x4_t vmovl_high_s16(int16x8_t __p0_551) { + int32x4_t __ret_551; + int16x4_t __a1_551 = vget_high_s16(__p0_551); + __ret_551 = (int32x4_t)(vshll_n_s16(__a1_551, 0)); + return __ret_551; } #else -__ai int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x16_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; +__ai __attribute__((target("neon"))) int32x4_t vmovl_high_s16(int16x8_t __p0_552) { + int32x4_t __ret_552; + int16x8_t __rev0_552; __rev0_552 = __builtin_shufflevector(__p0_552, __p0_552, 7, 6, 5, 4, 3, 2, 1, 0); + int16x4_t __a1_552 = __noswap_vget_high_s16(__rev0_552); + __ret_552 = (int32x4_t)(__noswap_vshll_n_s16(__a1_552, 0)); + __ret_552 = __builtin_shufflevector(__ret_552, __ret_552, 3, 2, 1, 0); + return __ret_552; +} +__ai __attribute__((target("neon"))) int32x4_t __noswap_vmovl_high_s16(int16x8_t __p0_553) { + int32x4_t __ret_553; + int16x4_t __a1_553 = __noswap_vget_high_s16(__p0_553); + __ret_553 = (int32x4_t)(__noswap_vshll_n_s16(__a1_553, 0)); + return __ret_553; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); +__ai __attribute__((target("neon"))) uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) { + uint16x8_t __ret; + __ret = vcombine_u16(__p0, vmovn_u32(__p1)); return __ret; } #else -__ai float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (float64x2_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) uint16x8_t vmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) { + uint16x8_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vcombine_u16(__rev0, __noswap_vmovn_u32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); +__ai __attribute__((target("neon"))) uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) { + uint32x4_t __ret; + __ret = vcombine_u32(__p0, vmovn_u64(__p1)); return __ret; } #else -__ai float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); +__ai __attribute__((target("neon"))) uint32x4_t vmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) { + uint32x4_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __noswap_vcombine_u32(__rev0, __noswap_vmovn_u64(__rev1)); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); +__ai __attribute__((target("neon"))) uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) { + uint8x16_t __ret; + __ret = vcombine_u8(__p0, vmovn_u16(__p1)); return __ret; } #else -__ai int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint8x16_t vmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) { + uint8x16_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcombine_u8(__rev0, __noswap_vmovn_u16(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) { int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + __ret = vcombine_s16(__p0, vmovn_s32(__p1)); return __ret; } #else -__ai int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vmovn_high_s32(int16x4_t __p0, int32x4_t __p1) { int16x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vcombine_s16(__rev0, __noswap_vmovn_s32(__rev1)); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float64_t vpminqd_f64(float64x2_t __p0) { - float64_t __ret; - __ret = (float64_t) __builtin_neon_vpminqd_f64(__p0); +__ai __attribute__((target("neon"))) int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) { + int32x4_t __ret; + __ret = vcombine_s32(__p0, vmovn_s64(__p1)); return __ret; } #else -__ai float64_t vpminqd_f64(float64x2_t __p0) { - float64_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float64_t) __builtin_neon_vpminqd_f64(__rev0); +__ai __attribute__((target("neon"))) int32x4_t vmovn_high_s64(int32x2_t __p0, int64x2_t __p1) { + int32x4_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __noswap_vcombine_s32(__rev0, __noswap_vmovn_s64(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float32_t vpmins_f32(float32x2_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vpmins_f32(__p0); +__ai __attribute__((target("neon"))) int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) { + int8x16_t __ret; + __ret = vcombine_s8(__p0, vmovn_s16(__p1)); return __ret; } #else -__ai float32_t vpmins_f32(float32x2_t __p0) { - float32_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float32_t) __builtin_neon_vpmins_f32(__rev0); +__ai __attribute__((target("neon"))) int8x16_t vmovn_high_s16(int8x8_t __p0, int16x8_t __p1) { + int8x16_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcombine_s8(__rev0, __noswap_vmovn_s16(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + __ret = __p0 * __p1; return __ret; } #else -__ai float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vmulq_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (float64x2_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __rev0 * __rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); +__ai __attribute__((target("neon"))) float64x1_t vmul_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = __p0 * __p1; return __ret; } +#define vmuld_lane_f64(__p0_554, __p1_554, __p2_554) __extension__ ({ \ + float64_t __ret_554; \ + float64_t __s0_554 = __p0_554; \ + float64x1_t __s1_554 = __p1_554; \ + __ret_554 = __s0_554 * vget_lane_f64(__s1_554, __p2_554); \ + __ret_554; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vmuls_lane_f32(__p0_555, __p1_555, __p2_555) __extension__ ({ \ + float32_t __ret_555; \ + float32_t __s0_555 = __p0_555; \ + float32x2_t __s1_555 = __p1_555; \ + __ret_555 = __s0_555 * vget_lane_f32(__s1_555, __p2_555); \ + __ret_555; \ +}) #else -__ai float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vmuls_lane_f32(__p0_556, __p1_556, __p2_556) __extension__ ({ \ + float32_t __ret_556; \ + float32_t __s0_556 = __p0_556; \ + float32x2_t __s1_556 = __p1_556; \ + float32x2_t __rev1_556; __rev1_556 = __builtin_shufflevector(__s1_556, __s1_556, 1, 0); \ + __ret_556 = __s0_556 * __noswap_vget_lane_f32(__rev1_556, __p2_556); \ + __ret_556; \ +}) #endif +#define vmul_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1_t __ret; \ + float64x1_t __s0 = __p0; \ + float64x1_t __s1 = __p1; \ + __ret = (float64x1_t) __builtin_neon_vmul_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 10); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vpminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9); - return __ret; -} +#define vmulq_lane_f64(__p0_557, __p1_557, __p2_557) __extension__ ({ \ + float64x2_t __ret_557; \ + float64x2_t __s0_557 = __p0_557; \ + float64x1_t __s1_557 = __p1_557; \ + __ret_557 = __s0_557 * splatq_lane_f64(__s1_557, __p2_557); \ + __ret_557; \ +}) #else -__ai float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (float32x2_t) __builtin_neon_vpminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vmulq_lane_f64(__p0_558, __p1_558, __p2_558) __extension__ ({ \ + float64x2_t __ret_558; \ + float64x2_t __s0_558 = __p0_558; \ + float64x1_t __s1_558 = __p1_558; \ + float64x2_t __rev0_558; __rev0_558 = __builtin_shufflevector(__s0_558, __s0_558, 1, 0); \ + __ret_558 = __rev0_558 * __noswap_splatq_lane_f64(__s1_558, __p2_558); \ + __ret_558 = __builtin_shufflevector(__ret_558, __ret_558, 1, 0); \ + __ret_558; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai float64_t vpminnmqd_f64(float64x2_t __p0) { - float64_t __ret; - __ret = (float64_t) __builtin_neon_vpminnmqd_f64(__p0); - return __ret; -} +#define vmuld_laneq_f64(__p0_559, __p1_559, __p2_559) __extension__ ({ \ + float64_t __ret_559; \ + float64_t __s0_559 = __p0_559; \ + float64x2_t __s1_559 = __p1_559; \ + __ret_559 = __s0_559 * vgetq_lane_f64(__s1_559, __p2_559); \ + __ret_559; \ +}) #else -__ai float64_t vpminnmqd_f64(float64x2_t __p0) { - float64_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float64_t) __builtin_neon_vpminnmqd_f64(__rev0); - return __ret; -} +#define vmuld_laneq_f64(__p0_560, __p1_560, __p2_560) __extension__ ({ \ + float64_t __ret_560; \ + float64_t __s0_560 = __p0_560; \ + float64x2_t __s1_560 = __p1_560; \ + float64x2_t __rev1_560; __rev1_560 = __builtin_shufflevector(__s1_560, __s1_560, 1, 0); \ + __ret_560 = __s0_560 * __noswap_vgetq_lane_f64(__rev1_560, __p2_560); \ + __ret_560; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai float32_t vpminnms_f32(float32x2_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vpminnms_f32(__p0); - return __ret; -} +#define vmuls_laneq_f32(__p0_561, __p1_561, __p2_561) __extension__ ({ \ + float32_t __ret_561; \ + float32_t __s0_561 = __p0_561; \ + float32x4_t __s1_561 = __p1_561; \ + __ret_561 = __s0_561 * vgetq_lane_f32(__s1_561, __p2_561); \ + __ret_561; \ +}) #else -__ai float32_t vpminnms_f32(float32x2_t __p0) { - float32_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float32_t) __builtin_neon_vpminnms_f32(__rev0); - return __ret; -} +#define vmuls_laneq_f32(__p0_562, __p1_562, __p2_562) __extension__ ({ \ + float32_t __ret_562; \ + float32_t __s0_562 = __p0_562; \ + float32x4_t __s1_562 = __p1_562; \ + float32x4_t __rev1_562; __rev1_562 = __builtin_shufflevector(__s1_562, __s1_562, 3, 2, 1, 0); \ + __ret_562 = __s0_562 * __noswap_vgetq_lane_f32(__rev1_562, __p2_562); \ + __ret_562; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqabsq_s64(int64x2_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 35); - return __ret; -} +#define vmul_laneq_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1_t __ret; \ + float64x1_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + __ret = (float64x1_t) __builtin_neon_vmul_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 10); \ + __ret; \ +}) #else -__ai int64x2_t vqabsq_s64(int64x2_t __p0) { - int64x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (int64x2_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vmul_laneq_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1_t __ret; \ + float64x1_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (float64x1_t) __builtin_neon_vmul_laneq_v((int8x8_t)__s0, (int8x16_t)__rev1, __p2, 10); \ + __ret; \ +}) #endif -__ai int64x1_t vqabs_s64(int64x1_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 3); - return __ret; -} -__ai int8_t vqabsb_s8(int8_t __p0) { - int8_t __ret; - __ret = (int8_t) __builtin_neon_vqabsb_s8(__p0); - return __ret; -} -__ai int32_t vqabss_s32(int32_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vqabss_s32(__p0); - return __ret; -} -__ai int64_t vqabsd_s64(int64_t __p0) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vqabsd_s64(__p0); - return __ret; -} -__ai int16_t vqabsh_s16(int16_t __p0) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vqabsh_s16(__p0); - return __ret; -} -__ai uint8_t vqaddb_u8(uint8_t __p0, uint8_t __p1) { - uint8_t __ret; - __ret = (uint8_t) __builtin_neon_vqaddb_u8(__p0, __p1); - return __ret; -} -__ai uint32_t vqadds_u32(uint32_t __p0, uint32_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vqadds_u32(__p0, __p1); - return __ret; -} -__ai uint64_t vqaddd_u64(uint64_t __p0, uint64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vqaddd_u64(__p0, __p1); - return __ret; -} -__ai uint16_t vqaddh_u16(uint16_t __p0, uint16_t __p1) { - uint16_t __ret; - __ret = (uint16_t) __builtin_neon_vqaddh_u16(__p0, __p1); - return __ret; -} -__ai int8_t vqaddb_s8(int8_t __p0, int8_t __p1) { - int8_t __ret; - __ret = (int8_t) __builtin_neon_vqaddb_s8(__p0, __p1); - return __ret; -} -__ai int32_t vqadds_s32(int32_t __p0, int32_t __p1) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vqadds_s32(__p0, __p1); - return __ret; -} -__ai int64_t vqaddd_s64(int64_t __p0, int64_t __p1) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vqaddd_s64(__p0, __p1); - return __ret; -} -__ai int16_t vqaddh_s16(int16_t __p0, int16_t __p1) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vqaddh_s16(__p0, __p1); - return __ret; -} -__ai int64_t vqdmlals_s32(int64_t __p0, int32_t __p1, int32_t __p2) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vqdmlals_s32(__p0, __p1, __p2); - return __ret; -} -__ai int32_t vqdmlalh_s16(int32_t __p0, int16_t __p1, int16_t __p2) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vqdmlalh_s16(__p0, __p1, __p2); - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { - int64x2_t __ret; - __ret = vqdmlal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2)); - return __ret; -} +#define vmulq_laneq_u32(__p0_563, __p1_563, __p2_563) __extension__ ({ \ + uint32x4_t __ret_563; \ + uint32x4_t __s0_563 = __p0_563; \ + uint32x4_t __s1_563 = __p1_563; \ + __ret_563 = __s0_563 * splatq_laneq_u32(__s1_563, __p2_563); \ + __ret_563; \ +}) #else -__ai int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { - int64x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = __noswap_vqdmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vmulq_laneq_u32(__p0_564, __p1_564, __p2_564) __extension__ ({ \ + uint32x4_t __ret_564; \ + uint32x4_t __s0_564 = __p0_564; \ + uint32x4_t __s1_564 = __p1_564; \ + uint32x4_t __rev0_564; __rev0_564 = __builtin_shufflevector(__s0_564, __s0_564, 3, 2, 1, 0); \ + uint32x4_t __rev1_564; __rev1_564 = __builtin_shufflevector(__s1_564, __s1_564, 3, 2, 1, 0); \ + __ret_564 = __rev0_564 * __noswap_splatq_laneq_u32(__rev1_564, __p2_564); \ + __ret_564 = __builtin_shufflevector(__ret_564, __ret_564, 3, 2, 1, 0); \ + __ret_564; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { - int32x4_t __ret; - __ret = vqdmlal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2)); - return __ret; -} +#define vmulq_laneq_u16(__p0_565, __p1_565, __p2_565) __extension__ ({ \ + uint16x8_t __ret_565; \ + uint16x8_t __s0_565 = __p0_565; \ + uint16x8_t __s1_565 = __p1_565; \ + __ret_565 = __s0_565 * splatq_laneq_u16(__s1_565, __p2_565); \ + __ret_565; \ +}) #else -__ai int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __noswap_vqdmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vmulq_laneq_u16(__p0_566, __p1_566, __p2_566) __extension__ ({ \ + uint16x8_t __ret_566; \ + uint16x8_t __s0_566 = __p0_566; \ + uint16x8_t __s1_566 = __p1_566; \ + uint16x8_t __rev0_566; __rev0_566 = __builtin_shufflevector(__s0_566, __s0_566, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_566; __rev1_566 = __builtin_shufflevector(__s1_566, __s1_566, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_566 = __rev0_566 * __noswap_splatq_laneq_u16(__rev1_566, __p2_566); \ + __ret_566 = __builtin_shufflevector(__ret_566, __ret_566, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_566; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlal_high_lane_s32(__p0_607, __p1_607, __p2_607, __p3_607) __extension__ ({ \ - int64x2_t __ret_607; \ - int64x2_t __s0_607 = __p0_607; \ - int32x4_t __s1_607 = __p1_607; \ - int32x2_t __s2_607 = __p2_607; \ - __ret_607 = vqdmlal_s32(__s0_607, vget_high_s32(__s1_607), splat_lane_s32(__s2_607, __p3_607)); \ - __ret_607; \ +#define vmulq_laneq_f64(__p0_567, __p1_567, __p2_567) __extension__ ({ \ + float64x2_t __ret_567; \ + float64x2_t __s0_567 = __p0_567; \ + float64x2_t __s1_567 = __p1_567; \ + __ret_567 = __s0_567 * splatq_laneq_f64(__s1_567, __p2_567); \ + __ret_567; \ }) #else -#define vqdmlal_high_lane_s32(__p0_608, __p1_608, __p2_608, __p3_608) __extension__ ({ \ - int64x2_t __ret_608; \ - int64x2_t __s0_608 = __p0_608; \ - int32x4_t __s1_608 = __p1_608; \ - int32x2_t __s2_608 = __p2_608; \ - int64x2_t __rev0_608; __rev0_608 = __builtin_shufflevector(__s0_608, __s0_608, 1, 0); \ - int32x4_t __rev1_608; __rev1_608 = __builtin_shufflevector(__s1_608, __s1_608, 3, 2, 1, 0); \ - int32x2_t __rev2_608; __rev2_608 = __builtin_shufflevector(__s2_608, __s2_608, 1, 0); \ - __ret_608 = __noswap_vqdmlal_s32(__rev0_608, __noswap_vget_high_s32(__rev1_608), __noswap_splat_lane_s32(__rev2_608, __p3_608)); \ - __ret_608 = __builtin_shufflevector(__ret_608, __ret_608, 1, 0); \ - __ret_608; \ +#define vmulq_laneq_f64(__p0_568, __p1_568, __p2_568) __extension__ ({ \ + float64x2_t __ret_568; \ + float64x2_t __s0_568 = __p0_568; \ + float64x2_t __s1_568 = __p1_568; \ + float64x2_t __rev0_568; __rev0_568 = __builtin_shufflevector(__s0_568, __s0_568, 1, 0); \ + float64x2_t __rev1_568; __rev1_568 = __builtin_shufflevector(__s1_568, __s1_568, 1, 0); \ + __ret_568 = __rev0_568 * __noswap_splatq_laneq_f64(__rev1_568, __p2_568); \ + __ret_568 = __builtin_shufflevector(__ret_568, __ret_568, 1, 0); \ + __ret_568; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlal_high_lane_s16(__p0_609, __p1_609, __p2_609, __p3_609) __extension__ ({ \ - int32x4_t __ret_609; \ - int32x4_t __s0_609 = __p0_609; \ - int16x8_t __s1_609 = __p1_609; \ - int16x4_t __s2_609 = __p2_609; \ - __ret_609 = vqdmlal_s16(__s0_609, vget_high_s16(__s1_609), splat_lane_s16(__s2_609, __p3_609)); \ - __ret_609; \ +#define vmulq_laneq_f32(__p0_569, __p1_569, __p2_569) __extension__ ({ \ + float32x4_t __ret_569; \ + float32x4_t __s0_569 = __p0_569; \ + float32x4_t __s1_569 = __p1_569; \ + __ret_569 = __s0_569 * splatq_laneq_f32(__s1_569, __p2_569); \ + __ret_569; \ }) #else -#define vqdmlal_high_lane_s16(__p0_610, __p1_610, __p2_610, __p3_610) __extension__ ({ \ - int32x4_t __ret_610; \ - int32x4_t __s0_610 = __p0_610; \ - int16x8_t __s1_610 = __p1_610; \ - int16x4_t __s2_610 = __p2_610; \ - int32x4_t __rev0_610; __rev0_610 = __builtin_shufflevector(__s0_610, __s0_610, 3, 2, 1, 0); \ - int16x8_t __rev1_610; __rev1_610 = __builtin_shufflevector(__s1_610, __s1_610, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev2_610; __rev2_610 = __builtin_shufflevector(__s2_610, __s2_610, 3, 2, 1, 0); \ - __ret_610 = __noswap_vqdmlal_s16(__rev0_610, __noswap_vget_high_s16(__rev1_610), __noswap_splat_lane_s16(__rev2_610, __p3_610)); \ - __ret_610 = __builtin_shufflevector(__ret_610, __ret_610, 3, 2, 1, 0); \ - __ret_610; \ +#define vmulq_laneq_f32(__p0_570, __p1_570, __p2_570) __extension__ ({ \ + float32x4_t __ret_570; \ + float32x4_t __s0_570 = __p0_570; \ + float32x4_t __s1_570 = __p1_570; \ + float32x4_t __rev0_570; __rev0_570 = __builtin_shufflevector(__s0_570, __s0_570, 3, 2, 1, 0); \ + float32x4_t __rev1_570; __rev1_570 = __builtin_shufflevector(__s1_570, __s1_570, 3, 2, 1, 0); \ + __ret_570 = __rev0_570 * __noswap_splatq_laneq_f32(__rev1_570, __p2_570); \ + __ret_570 = __builtin_shufflevector(__ret_570, __ret_570, 3, 2, 1, 0); \ + __ret_570; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlal_high_laneq_s32(__p0_611, __p1_611, __p2_611, __p3_611) __extension__ ({ \ - int64x2_t __ret_611; \ - int64x2_t __s0_611 = __p0_611; \ - int32x4_t __s1_611 = __p1_611; \ - int32x4_t __s2_611 = __p2_611; \ - __ret_611 = vqdmlal_s32(__s0_611, vget_high_s32(__s1_611), splat_laneq_s32(__s2_611, __p3_611)); \ - __ret_611; \ +#define vmulq_laneq_s32(__p0_571, __p1_571, __p2_571) __extension__ ({ \ + int32x4_t __ret_571; \ + int32x4_t __s0_571 = __p0_571; \ + int32x4_t __s1_571 = __p1_571; \ + __ret_571 = __s0_571 * splatq_laneq_s32(__s1_571, __p2_571); \ + __ret_571; \ }) #else -#define vqdmlal_high_laneq_s32(__p0_612, __p1_612, __p2_612, __p3_612) __extension__ ({ \ - int64x2_t __ret_612; \ - int64x2_t __s0_612 = __p0_612; \ - int32x4_t __s1_612 = __p1_612; \ - int32x4_t __s2_612 = __p2_612; \ - int64x2_t __rev0_612; __rev0_612 = __builtin_shufflevector(__s0_612, __s0_612, 1, 0); \ - int32x4_t __rev1_612; __rev1_612 = __builtin_shufflevector(__s1_612, __s1_612, 3, 2, 1, 0); \ - int32x4_t __rev2_612; __rev2_612 = __builtin_shufflevector(__s2_612, __s2_612, 3, 2, 1, 0); \ - __ret_612 = __noswap_vqdmlal_s32(__rev0_612, __noswap_vget_high_s32(__rev1_612), __noswap_splat_laneq_s32(__rev2_612, __p3_612)); \ - __ret_612 = __builtin_shufflevector(__ret_612, __ret_612, 1, 0); \ - __ret_612; \ +#define vmulq_laneq_s32(__p0_572, __p1_572, __p2_572) __extension__ ({ \ + int32x4_t __ret_572; \ + int32x4_t __s0_572 = __p0_572; \ + int32x4_t __s1_572 = __p1_572; \ + int32x4_t __rev0_572; __rev0_572 = __builtin_shufflevector(__s0_572, __s0_572, 3, 2, 1, 0); \ + int32x4_t __rev1_572; __rev1_572 = __builtin_shufflevector(__s1_572, __s1_572, 3, 2, 1, 0); \ + __ret_572 = __rev0_572 * __noswap_splatq_laneq_s32(__rev1_572, __p2_572); \ + __ret_572 = __builtin_shufflevector(__ret_572, __ret_572, 3, 2, 1, 0); \ + __ret_572; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlal_high_laneq_s16(__p0_613, __p1_613, __p2_613, __p3_613) __extension__ ({ \ - int32x4_t __ret_613; \ - int32x4_t __s0_613 = __p0_613; \ - int16x8_t __s1_613 = __p1_613; \ - int16x8_t __s2_613 = __p2_613; \ - __ret_613 = vqdmlal_s16(__s0_613, vget_high_s16(__s1_613), splat_laneq_s16(__s2_613, __p3_613)); \ - __ret_613; \ +#define vmulq_laneq_s16(__p0_573, __p1_573, __p2_573) __extension__ ({ \ + int16x8_t __ret_573; \ + int16x8_t __s0_573 = __p0_573; \ + int16x8_t __s1_573 = __p1_573; \ + __ret_573 = __s0_573 * splatq_laneq_s16(__s1_573, __p2_573); \ + __ret_573; \ }) #else -#define vqdmlal_high_laneq_s16(__p0_614, __p1_614, __p2_614, __p3_614) __extension__ ({ \ - int32x4_t __ret_614; \ - int32x4_t __s0_614 = __p0_614; \ - int16x8_t __s1_614 = __p1_614; \ - int16x8_t __s2_614 = __p2_614; \ - int32x4_t __rev0_614; __rev0_614 = __builtin_shufflevector(__s0_614, __s0_614, 3, 2, 1, 0); \ - int16x8_t __rev1_614; __rev1_614 = __builtin_shufflevector(__s1_614, __s1_614, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev2_614; __rev2_614 = __builtin_shufflevector(__s2_614, __s2_614, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_614 = __noswap_vqdmlal_s16(__rev0_614, __noswap_vget_high_s16(__rev1_614), __noswap_splat_laneq_s16(__rev2_614, __p3_614)); \ - __ret_614 = __builtin_shufflevector(__ret_614, __ret_614, 3, 2, 1, 0); \ - __ret_614; \ +#define vmulq_laneq_s16(__p0_574, __p1_574, __p2_574) __extension__ ({ \ + int16x8_t __ret_574; \ + int16x8_t __s0_574 = __p0_574; \ + int16x8_t __s1_574 = __p1_574; \ + int16x8_t __rev0_574; __rev0_574 = __builtin_shufflevector(__s0_574, __s0_574, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_574; __rev1_574 = __builtin_shufflevector(__s1_574, __s1_574, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_574 = __rev0_574 * __noswap_splatq_laneq_s16(__rev1_574, __p2_574); \ + __ret_574 = __builtin_shufflevector(__ret_574, __ret_574, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_574; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { - int64x2_t __ret; - __ret = vqdmlal_n_s32(__p0, vget_high_s32(__p1), __p2); - return __ret; -} +#define vmul_laneq_u32(__p0_575, __p1_575, __p2_575) __extension__ ({ \ + uint32x2_t __ret_575; \ + uint32x2_t __s0_575 = __p0_575; \ + uint32x4_t __s1_575 = __p1_575; \ + __ret_575 = __s0_575 * splat_laneq_u32(__s1_575, __p2_575); \ + __ret_575; \ +}) #else -__ai int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { - int64x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __noswap_vqdmlal_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vmul_laneq_u32(__p0_576, __p1_576, __p2_576) __extension__ ({ \ + uint32x2_t __ret_576; \ + uint32x2_t __s0_576 = __p0_576; \ + uint32x4_t __s1_576 = __p1_576; \ + uint32x2_t __rev0_576; __rev0_576 = __builtin_shufflevector(__s0_576, __s0_576, 1, 0); \ + uint32x4_t __rev1_576; __rev1_576 = __builtin_shufflevector(__s1_576, __s1_576, 3, 2, 1, 0); \ + __ret_576 = __rev0_576 * __noswap_splat_laneq_u32(__rev1_576, __p2_576); \ + __ret_576 = __builtin_shufflevector(__ret_576, __ret_576, 1, 0); \ + __ret_576; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { - int32x4_t __ret; - __ret = vqdmlal_n_s16(__p0, vget_high_s16(__p1), __p2); - return __ret; -} +#define vmul_laneq_u16(__p0_577, __p1_577, __p2_577) __extension__ ({ \ + uint16x4_t __ret_577; \ + uint16x4_t __s0_577 = __p0_577; \ + uint16x8_t __s1_577 = __p1_577; \ + __ret_577 = __s0_577 * splat_laneq_u16(__s1_577, __p2_577); \ + __ret_577; \ +}) #else -__ai int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __noswap_vqdmlal_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vmul_laneq_u16(__p0_578, __p1_578, __p2_578) __extension__ ({ \ + uint16x4_t __ret_578; \ + uint16x4_t __s0_578 = __p0_578; \ + uint16x8_t __s1_578 = __p1_578; \ + uint16x4_t __rev0_578; __rev0_578 = __builtin_shufflevector(__s0_578, __s0_578, 3, 2, 1, 0); \ + uint16x8_t __rev1_578; __rev1_578 = __builtin_shufflevector(__s1_578, __s1_578, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_578 = __rev0_578 * __noswap_splat_laneq_u16(__rev1_578, __p2_578); \ + __ret_578 = __builtin_shufflevector(__ret_578, __ret_578, 3, 2, 1, 0); \ + __ret_578; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlals_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int64_t __ret; \ - int64_t __s0 = __p0; \ - int32_t __s1 = __p1; \ - int32x2_t __s2 = __p2; \ - __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, __s2, __p3); \ - __ret; \ +#define vmul_laneq_f32(__p0_579, __p1_579, __p2_579) __extension__ ({ \ + float32x2_t __ret_579; \ + float32x2_t __s0_579 = __p0_579; \ + float32x4_t __s1_579 = __p1_579; \ + __ret_579 = __s0_579 * splat_laneq_f32(__s1_579, __p2_579); \ + __ret_579; \ }) #else -#define vqdmlals_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int64_t __ret; \ - int64_t __s0 = __p0; \ - int32_t __s1 = __p1; \ - int32x2_t __s2 = __p2; \ - int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ - __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, __rev2, __p3); \ - __ret; \ +#define vmul_laneq_f32(__p0_580, __p1_580, __p2_580) __extension__ ({ \ + float32x2_t __ret_580; \ + float32x2_t __s0_580 = __p0_580; \ + float32x4_t __s1_580 = __p1_580; \ + float32x2_t __rev0_580; __rev0_580 = __builtin_shufflevector(__s0_580, __s0_580, 1, 0); \ + float32x4_t __rev1_580; __rev1_580 = __builtin_shufflevector(__s1_580, __s1_580, 3, 2, 1, 0); \ + __ret_580 = __rev0_580 * __noswap_splat_laneq_f32(__rev1_580, __p2_580); \ + __ret_580 = __builtin_shufflevector(__ret_580, __ret_580, 1, 0); \ + __ret_580; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlalh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32_t __ret; \ - int32_t __s0 = __p0; \ - int16_t __s1 = __p1; \ - int16x4_t __s2 = __p2; \ - __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, __s2, __p3); \ - __ret; \ +#define vmul_laneq_s32(__p0_581, __p1_581, __p2_581) __extension__ ({ \ + int32x2_t __ret_581; \ + int32x2_t __s0_581 = __p0_581; \ + int32x4_t __s1_581 = __p1_581; \ + __ret_581 = __s0_581 * splat_laneq_s32(__s1_581, __p2_581); \ + __ret_581; \ }) #else -#define vqdmlalh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32_t __ret; \ - int32_t __s0 = __p0; \ - int16_t __s1 = __p1; \ - int16x4_t __s2 = __p2; \ - int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, __rev2, __p3); \ - __ret; \ +#define vmul_laneq_s32(__p0_582, __p1_582, __p2_582) __extension__ ({ \ + int32x2_t __ret_582; \ + int32x2_t __s0_582 = __p0_582; \ + int32x4_t __s1_582 = __p1_582; \ + int32x2_t __rev0_582; __rev0_582 = __builtin_shufflevector(__s0_582, __s0_582, 1, 0); \ + int32x4_t __rev1_582; __rev1_582 = __builtin_shufflevector(__s1_582, __s1_582, 3, 2, 1, 0); \ + __ret_582 = __rev0_582 * __noswap_splat_laneq_s32(__rev1_582, __p2_582); \ + __ret_582 = __builtin_shufflevector(__ret_582, __ret_582, 1, 0); \ + __ret_582; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlals_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int64_t __ret; \ - int64_t __s0 = __p0; \ - int32_t __s1 = __p1; \ - int32x4_t __s2 = __p2; \ - __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, __s2, __p3); \ - __ret; \ +#define vmul_laneq_s16(__p0_583, __p1_583, __p2_583) __extension__ ({ \ + int16x4_t __ret_583; \ + int16x4_t __s0_583 = __p0_583; \ + int16x8_t __s1_583 = __p1_583; \ + __ret_583 = __s0_583 * splat_laneq_s16(__s1_583, __p2_583); \ + __ret_583; \ }) #else -#define vqdmlals_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int64_t __ret; \ - int64_t __s0 = __p0; \ - int32_t __s1 = __p1; \ - int32x4_t __s2 = __p2; \ - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, __rev2, __p3); \ - __ret; \ +#define vmul_laneq_s16(__p0_584, __p1_584, __p2_584) __extension__ ({ \ + int16x4_t __ret_584; \ + int16x4_t __s0_584 = __p0_584; \ + int16x8_t __s1_584 = __p1_584; \ + int16x4_t __rev0_584; __rev0_584 = __builtin_shufflevector(__s0_584, __s0_584, 3, 2, 1, 0); \ + int16x8_t __rev1_584; __rev1_584 = __builtin_shufflevector(__s1_584, __s1_584, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_584 = __rev0_584 * __noswap_splat_laneq_s16(__rev1_584, __p2_584); \ + __ret_584 = __builtin_shufflevector(__ret_584, __ret_584, 3, 2, 1, 0); \ + __ret_584; \ }) #endif +__ai __attribute__((target("neon"))) float64x1_t vmul_n_f64(float64x1_t __p0, float64_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vmul_n_f64((float64x1_t)__p0, __p1); + return __ret; +} #ifdef __LITTLE_ENDIAN__ -#define vqdmlalh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32_t __ret; \ - int32_t __s0 = __p0; \ - int16_t __s1 = __p1; \ - int16x8_t __s2 = __p2; \ - __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, __s2, __p3); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) { + float64x2_t __ret; + __ret = __p0 * (float64x2_t) {__p1, __p1}; + return __ret; +} #else -#define vqdmlalh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32_t __ret; \ - int32_t __s0 = __p0; \ - int16_t __s1 = __p1; \ - int16x8_t __s2 = __p2; \ - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, __rev2, __p3); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float64x2_t vmulq_n_f64(float64x2_t __p0, float64_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = __rev0 * (float64x2_t) {__p1, __p1}; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlal_laneq_s32(__p0_615, __p1_615, __p2_615, __p3_615) __extension__ ({ \ - int64x2_t __ret_615; \ - int64x2_t __s0_615 = __p0_615; \ - int32x2_t __s1_615 = __p1_615; \ - int32x4_t __s2_615 = __p2_615; \ - __ret_615 = vqdmlal_s32(__s0_615, __s1_615, splat_laneq_s32(__s2_615, __p3_615)); \ - __ret_615; \ -}) +__ai __attribute__((target("neon"))) poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly16x8_t __ret; + __ret = vmull_p8(vget_high_p8(__p0), vget_high_p8(__p1)); + return __ret; +} #else -#define vqdmlal_laneq_s32(__p0_616, __p1_616, __p2_616, __p3_616) __extension__ ({ \ - int64x2_t __ret_616; \ - int64x2_t __s0_616 = __p0_616; \ - int32x2_t __s1_616 = __p1_616; \ - int32x4_t __s2_616 = __p2_616; \ - int64x2_t __rev0_616; __rev0_616 = __builtin_shufflevector(__s0_616, __s0_616, 1, 0); \ - int32x2_t __rev1_616; __rev1_616 = __builtin_shufflevector(__s1_616, __s1_616, 1, 0); \ - int32x4_t __rev2_616; __rev2_616 = __builtin_shufflevector(__s2_616, __s2_616, 3, 2, 1, 0); \ - __ret_616 = __noswap_vqdmlal_s32(__rev0_616, __rev1_616, __noswap_splat_laneq_s32(__rev2_616, __p3_616)); \ - __ret_616 = __builtin_shufflevector(__ret_616, __ret_616, 1, 0); \ - __ret_616; \ -}) +__ai __attribute__((target("neon"))) poly16x8_t vmull_high_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly16x8_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmull_p8(__noswap_vget_high_p8(__rev0), __noswap_vget_high_p8(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlal_laneq_s16(__p0_617, __p1_617, __p2_617, __p3_617) __extension__ ({ \ - int32x4_t __ret_617; \ - int32x4_t __s0_617 = __p0_617; \ - int16x4_t __s1_617 = __p1_617; \ - int16x8_t __s2_617 = __p2_617; \ - __ret_617 = vqdmlal_s16(__s0_617, __s1_617, splat_laneq_s16(__s2_617, __p3_617)); \ - __ret_617; \ -}) +__ai __attribute__((target("neon"))) uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + __ret = vmull_u8(vget_high_u8(__p0), vget_high_u8(__p1)); + return __ret; +} #else -#define vqdmlal_laneq_s16(__p0_618, __p1_618, __p2_618, __p3_618) __extension__ ({ \ - int32x4_t __ret_618; \ - int32x4_t __s0_618 = __p0_618; \ - int16x4_t __s1_618 = __p1_618; \ - int16x8_t __s2_618 = __p2_618; \ - int32x4_t __rev0_618; __rev0_618 = __builtin_shufflevector(__s0_618, __s0_618, 3, 2, 1, 0); \ - int16x4_t __rev1_618; __rev1_618 = __builtin_shufflevector(__s1_618, __s1_618, 3, 2, 1, 0); \ - int16x8_t __rev2_618; __rev2_618 = __builtin_shufflevector(__s2_618, __s2_618, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_618 = __noswap_vqdmlal_s16(__rev0_618, __rev1_618, __noswap_splat_laneq_s16(__rev2_618, __p3_618)); \ - __ret_618 = __builtin_shufflevector(__ret_618, __ret_618, 3, 2, 1, 0); \ - __ret_618; \ -}) +__ai __attribute__((target("neon"))) uint16x8_t vmull_high_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmull_u8(__noswap_vget_high_u8(__rev0), __noswap_vget_high_u8(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif -__ai int64_t vqdmlsls_s32(int64_t __p0, int32_t __p1, int32_t __p2) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vqdmlsls_s32(__p0, __p1, __p2); +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint64x2_t __ret; + __ret = vmull_u32(vget_high_u32(__p0), vget_high_u32(__p1)); return __ret; } -__ai int32_t vqdmlslh_s16(int32_t __p0, int16_t __p1, int16_t __p2) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vqdmlslh_s16(__p0, __p1, __p2); +#else +__ai __attribute__((target("neon"))) uint64x2_t vmull_high_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint64x2_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0), __noswap_vget_high_u32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + __ret = vmull_u16(vget_high_u16(__p0), vget_high_u16(__p1)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vmull_high_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0), __noswap_vget_high_u16(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) { + int16x8_t __ret; + __ret = vmull_s8(vget_high_s8(__p0), vget_high_s8(__p1)); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vmull_high_s8(int8x16_t __p0, int8x16_t __p1) { + int16x8_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmull_s8(__noswap_vget_high_s8(__rev0), __noswap_vget_high_s8(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } +#endif + #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) { int64x2_t __ret; - __ret = vqdmlsl_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2)); + __ret = vmull_s32(vget_high_s32(__p0), vget_high_s32(__p1)); return __ret; } #else -__ai int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vmull_high_s32(int32x4_t __p0, int32x4_t __p1) { int64x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = __noswap_vqdmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2)); + __ret = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1)); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) { int32x4_t __ret; - __ret = vqdmlsl_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2)); + __ret = vmull_s16(vget_high_s16(__p0), vget_high_s16(__p1)); return __ret; } #else -__ai int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vmull_high_s16(int16x8_t __p0, int16x8_t __p1) { int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __noswap_vqdmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2)); + __ret = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1)); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlsl_high_lane_s32(__p0_619, __p1_619, __p2_619, __p3_619) __extension__ ({ \ - int64x2_t __ret_619; \ - int64x2_t __s0_619 = __p0_619; \ - int32x4_t __s1_619 = __p1_619; \ - int32x2_t __s2_619 = __p2_619; \ - __ret_619 = vqdmlsl_s32(__s0_619, vget_high_s32(__s1_619), splat_lane_s32(__s2_619, __p3_619)); \ - __ret_619; \ +#define vmull_high_lane_u32(__p0_585, __p1_585, __p2_585) __extension__ ({ \ + uint64x2_t __ret_585; \ + uint32x4_t __s0_585 = __p0_585; \ + uint32x2_t __s1_585 = __p1_585; \ + __ret_585 = vmull_u32(vget_high_u32(__s0_585), splat_lane_u32(__s1_585, __p2_585)); \ + __ret_585; \ }) #else -#define vqdmlsl_high_lane_s32(__p0_620, __p1_620, __p2_620, __p3_620) __extension__ ({ \ - int64x2_t __ret_620; \ - int64x2_t __s0_620 = __p0_620; \ - int32x4_t __s1_620 = __p1_620; \ - int32x2_t __s2_620 = __p2_620; \ - int64x2_t __rev0_620; __rev0_620 = __builtin_shufflevector(__s0_620, __s0_620, 1, 0); \ - int32x4_t __rev1_620; __rev1_620 = __builtin_shufflevector(__s1_620, __s1_620, 3, 2, 1, 0); \ - int32x2_t __rev2_620; __rev2_620 = __builtin_shufflevector(__s2_620, __s2_620, 1, 0); \ - __ret_620 = __noswap_vqdmlsl_s32(__rev0_620, __noswap_vget_high_s32(__rev1_620), __noswap_splat_lane_s32(__rev2_620, __p3_620)); \ - __ret_620 = __builtin_shufflevector(__ret_620, __ret_620, 1, 0); \ - __ret_620; \ +#define vmull_high_lane_u32(__p0_586, __p1_586, __p2_586) __extension__ ({ \ + uint64x2_t __ret_586; \ + uint32x4_t __s0_586 = __p0_586; \ + uint32x2_t __s1_586 = __p1_586; \ + uint32x4_t __rev0_586; __rev0_586 = __builtin_shufflevector(__s0_586, __s0_586, 3, 2, 1, 0); \ + uint32x2_t __rev1_586; __rev1_586 = __builtin_shufflevector(__s1_586, __s1_586, 1, 0); \ + __ret_586 = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0_586), __noswap_splat_lane_u32(__rev1_586, __p2_586)); \ + __ret_586 = __builtin_shufflevector(__ret_586, __ret_586, 1, 0); \ + __ret_586; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlsl_high_lane_s16(__p0_621, __p1_621, __p2_621, __p3_621) __extension__ ({ \ - int32x4_t __ret_621; \ - int32x4_t __s0_621 = __p0_621; \ - int16x8_t __s1_621 = __p1_621; \ - int16x4_t __s2_621 = __p2_621; \ - __ret_621 = vqdmlsl_s16(__s0_621, vget_high_s16(__s1_621), splat_lane_s16(__s2_621, __p3_621)); \ - __ret_621; \ +#define vmull_high_lane_u16(__p0_587, __p1_587, __p2_587) __extension__ ({ \ + uint32x4_t __ret_587; \ + uint16x8_t __s0_587 = __p0_587; \ + uint16x4_t __s1_587 = __p1_587; \ + __ret_587 = vmull_u16(vget_high_u16(__s0_587), splat_lane_u16(__s1_587, __p2_587)); \ + __ret_587; \ }) #else -#define vqdmlsl_high_lane_s16(__p0_622, __p1_622, __p2_622, __p3_622) __extension__ ({ \ - int32x4_t __ret_622; \ - int32x4_t __s0_622 = __p0_622; \ - int16x8_t __s1_622 = __p1_622; \ - int16x4_t __s2_622 = __p2_622; \ - int32x4_t __rev0_622; __rev0_622 = __builtin_shufflevector(__s0_622, __s0_622, 3, 2, 1, 0); \ - int16x8_t __rev1_622; __rev1_622 = __builtin_shufflevector(__s1_622, __s1_622, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev2_622; __rev2_622 = __builtin_shufflevector(__s2_622, __s2_622, 3, 2, 1, 0); \ - __ret_622 = __noswap_vqdmlsl_s16(__rev0_622, __noswap_vget_high_s16(__rev1_622), __noswap_splat_lane_s16(__rev2_622, __p3_622)); \ - __ret_622 = __builtin_shufflevector(__ret_622, __ret_622, 3, 2, 1, 0); \ - __ret_622; \ +#define vmull_high_lane_u16(__p0_588, __p1_588, __p2_588) __extension__ ({ \ + uint32x4_t __ret_588; \ + uint16x8_t __s0_588 = __p0_588; \ + uint16x4_t __s1_588 = __p1_588; \ + uint16x8_t __rev0_588; __rev0_588 = __builtin_shufflevector(__s0_588, __s0_588, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x4_t __rev1_588; __rev1_588 = __builtin_shufflevector(__s1_588, __s1_588, 3, 2, 1, 0); \ + __ret_588 = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0_588), __noswap_splat_lane_u16(__rev1_588, __p2_588)); \ + __ret_588 = __builtin_shufflevector(__ret_588, __ret_588, 3, 2, 1, 0); \ + __ret_588; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlsl_high_laneq_s32(__p0_623, __p1_623, __p2_623, __p3_623) __extension__ ({ \ - int64x2_t __ret_623; \ - int64x2_t __s0_623 = __p0_623; \ - int32x4_t __s1_623 = __p1_623; \ - int32x4_t __s2_623 = __p2_623; \ - __ret_623 = vqdmlsl_s32(__s0_623, vget_high_s32(__s1_623), splat_laneq_s32(__s2_623, __p3_623)); \ - __ret_623; \ +#define vmull_high_lane_s32(__p0_589, __p1_589, __p2_589) __extension__ ({ \ + int64x2_t __ret_589; \ + int32x4_t __s0_589 = __p0_589; \ + int32x2_t __s1_589 = __p1_589; \ + __ret_589 = vmull_s32(vget_high_s32(__s0_589), splat_lane_s32(__s1_589, __p2_589)); \ + __ret_589; \ }) #else -#define vqdmlsl_high_laneq_s32(__p0_624, __p1_624, __p2_624, __p3_624) __extension__ ({ \ - int64x2_t __ret_624; \ - int64x2_t __s0_624 = __p0_624; \ - int32x4_t __s1_624 = __p1_624; \ - int32x4_t __s2_624 = __p2_624; \ - int64x2_t __rev0_624; __rev0_624 = __builtin_shufflevector(__s0_624, __s0_624, 1, 0); \ - int32x4_t __rev1_624; __rev1_624 = __builtin_shufflevector(__s1_624, __s1_624, 3, 2, 1, 0); \ - int32x4_t __rev2_624; __rev2_624 = __builtin_shufflevector(__s2_624, __s2_624, 3, 2, 1, 0); \ - __ret_624 = __noswap_vqdmlsl_s32(__rev0_624, __noswap_vget_high_s32(__rev1_624), __noswap_splat_laneq_s32(__rev2_624, __p3_624)); \ - __ret_624 = __builtin_shufflevector(__ret_624, __ret_624, 1, 0); \ - __ret_624; \ +#define vmull_high_lane_s32(__p0_590, __p1_590, __p2_590) __extension__ ({ \ + int64x2_t __ret_590; \ + int32x4_t __s0_590 = __p0_590; \ + int32x2_t __s1_590 = __p1_590; \ + int32x4_t __rev0_590; __rev0_590 = __builtin_shufflevector(__s0_590, __s0_590, 3, 2, 1, 0); \ + int32x2_t __rev1_590; __rev1_590 = __builtin_shufflevector(__s1_590, __s1_590, 1, 0); \ + __ret_590 = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0_590), __noswap_splat_lane_s32(__rev1_590, __p2_590)); \ + __ret_590 = __builtin_shufflevector(__ret_590, __ret_590, 1, 0); \ + __ret_590; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlsl_high_laneq_s16(__p0_625, __p1_625, __p2_625, __p3_625) __extension__ ({ \ - int32x4_t __ret_625; \ - int32x4_t __s0_625 = __p0_625; \ - int16x8_t __s1_625 = __p1_625; \ - int16x8_t __s2_625 = __p2_625; \ - __ret_625 = vqdmlsl_s16(__s0_625, vget_high_s16(__s1_625), splat_laneq_s16(__s2_625, __p3_625)); \ - __ret_625; \ +#define vmull_high_lane_s16(__p0_591, __p1_591, __p2_591) __extension__ ({ \ + int32x4_t __ret_591; \ + int16x8_t __s0_591 = __p0_591; \ + int16x4_t __s1_591 = __p1_591; \ + __ret_591 = vmull_s16(vget_high_s16(__s0_591), splat_lane_s16(__s1_591, __p2_591)); \ + __ret_591; \ }) #else -#define vqdmlsl_high_laneq_s16(__p0_626, __p1_626, __p2_626, __p3_626) __extension__ ({ \ - int32x4_t __ret_626; \ - int32x4_t __s0_626 = __p0_626; \ - int16x8_t __s1_626 = __p1_626; \ - int16x8_t __s2_626 = __p2_626; \ - int32x4_t __rev0_626; __rev0_626 = __builtin_shufflevector(__s0_626, __s0_626, 3, 2, 1, 0); \ - int16x8_t __rev1_626; __rev1_626 = __builtin_shufflevector(__s1_626, __s1_626, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev2_626; __rev2_626 = __builtin_shufflevector(__s2_626, __s2_626, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_626 = __noswap_vqdmlsl_s16(__rev0_626, __noswap_vget_high_s16(__rev1_626), __noswap_splat_laneq_s16(__rev2_626, __p3_626)); \ - __ret_626 = __builtin_shufflevector(__ret_626, __ret_626, 3, 2, 1, 0); \ - __ret_626; \ +#define vmull_high_lane_s16(__p0_592, __p1_592, __p2_592) __extension__ ({ \ + int32x4_t __ret_592; \ + int16x8_t __s0_592 = __p0_592; \ + int16x4_t __s1_592 = __p1_592; \ + int16x8_t __rev0_592; __rev0_592 = __builtin_shufflevector(__s0_592, __s0_592, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev1_592; __rev1_592 = __builtin_shufflevector(__s1_592, __s1_592, 3, 2, 1, 0); \ + __ret_592 = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0_592), __noswap_splat_lane_s16(__rev1_592, __p2_592)); \ + __ret_592 = __builtin_shufflevector(__ret_592, __ret_592, 3, 2, 1, 0); \ + __ret_592; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_high_laneq_u32(__p0_593, __p1_593, __p2_593) __extension__ ({ \ + uint64x2_t __ret_593; \ + uint32x4_t __s0_593 = __p0_593; \ + uint32x4_t __s1_593 = __p1_593; \ + __ret_593 = vmull_u32(vget_high_u32(__s0_593), splat_laneq_u32(__s1_593, __p2_593)); \ + __ret_593; \ +}) +#else +#define vmull_high_laneq_u32(__p0_594, __p1_594, __p2_594) __extension__ ({ \ + uint64x2_t __ret_594; \ + uint32x4_t __s0_594 = __p0_594; \ + uint32x4_t __s1_594 = __p1_594; \ + uint32x4_t __rev0_594; __rev0_594 = __builtin_shufflevector(__s0_594, __s0_594, 3, 2, 1, 0); \ + uint32x4_t __rev1_594; __rev1_594 = __builtin_shufflevector(__s1_594, __s1_594, 3, 2, 1, 0); \ + __ret_594 = __noswap_vmull_u32(__noswap_vget_high_u32(__rev0_594), __noswap_splat_laneq_u32(__rev1_594, __p2_594)); \ + __ret_594 = __builtin_shufflevector(__ret_594, __ret_594, 1, 0); \ + __ret_594; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_high_laneq_u16(__p0_595, __p1_595, __p2_595) __extension__ ({ \ + uint32x4_t __ret_595; \ + uint16x8_t __s0_595 = __p0_595; \ + uint16x8_t __s1_595 = __p1_595; \ + __ret_595 = vmull_u16(vget_high_u16(__s0_595), splat_laneq_u16(__s1_595, __p2_595)); \ + __ret_595; \ +}) +#else +#define vmull_high_laneq_u16(__p0_596, __p1_596, __p2_596) __extension__ ({ \ + uint32x4_t __ret_596; \ + uint16x8_t __s0_596 = __p0_596; \ + uint16x8_t __s1_596 = __p1_596; \ + uint16x8_t __rev0_596; __rev0_596 = __builtin_shufflevector(__s0_596, __s0_596, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_596; __rev1_596 = __builtin_shufflevector(__s1_596, __s1_596, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_596 = __noswap_vmull_u16(__noswap_vget_high_u16(__rev0_596), __noswap_splat_laneq_u16(__rev1_596, __p2_596)); \ + __ret_596 = __builtin_shufflevector(__ret_596, __ret_596, 3, 2, 1, 0); \ + __ret_596; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_high_laneq_s32(__p0_597, __p1_597, __p2_597) __extension__ ({ \ + int64x2_t __ret_597; \ + int32x4_t __s0_597 = __p0_597; \ + int32x4_t __s1_597 = __p1_597; \ + __ret_597 = vmull_s32(vget_high_s32(__s0_597), splat_laneq_s32(__s1_597, __p2_597)); \ + __ret_597; \ +}) +#else +#define vmull_high_laneq_s32(__p0_598, __p1_598, __p2_598) __extension__ ({ \ + int64x2_t __ret_598; \ + int32x4_t __s0_598 = __p0_598; \ + int32x4_t __s1_598 = __p1_598; \ + int32x4_t __rev0_598; __rev0_598 = __builtin_shufflevector(__s0_598, __s0_598, 3, 2, 1, 0); \ + int32x4_t __rev1_598; __rev1_598 = __builtin_shufflevector(__s1_598, __s1_598, 3, 2, 1, 0); \ + __ret_598 = __noswap_vmull_s32(__noswap_vget_high_s32(__rev0_598), __noswap_splat_laneq_s32(__rev1_598, __p2_598)); \ + __ret_598 = __builtin_shufflevector(__ret_598, __ret_598, 1, 0); \ + __ret_598; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmull_high_laneq_s16(__p0_599, __p1_599, __p2_599) __extension__ ({ \ + int32x4_t __ret_599; \ + int16x8_t __s0_599 = __p0_599; \ + int16x8_t __s1_599 = __p1_599; \ + __ret_599 = vmull_s16(vget_high_s16(__s0_599), splat_laneq_s16(__s1_599, __p2_599)); \ + __ret_599; \ +}) +#else +#define vmull_high_laneq_s16(__p0_600, __p1_600, __p2_600) __extension__ ({ \ + int32x4_t __ret_600; \ + int16x8_t __s0_600 = __p0_600; \ + int16x8_t __s1_600 = __p1_600; \ + int16x8_t __rev0_600; __rev0_600 = __builtin_shufflevector(__s0_600, __s0_600, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_600; __rev1_600 = __builtin_shufflevector(__s1_600, __s1_600, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_600 = __noswap_vmull_s16(__noswap_vget_high_s16(__rev0_600), __noswap_splat_laneq_s16(__rev1_600, __p2_600)); \ + __ret_600 = __builtin_shufflevector(__ret_600, __ret_600, 3, 2, 1, 0); \ + __ret_600; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) { + uint64x2_t __ret; + __ret = vmull_n_u32(vget_high_u32(__p0), __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vmull_high_n_u32(uint32x4_t __p0, uint32_t __p1) { + uint64x2_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap_vmull_n_u32(__noswap_vget_high_u32(__rev0), __p1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) { + uint32x4_t __ret; + __ret = vmull_n_u16(vget_high_u16(__p0), __p1); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vmull_high_n_u16(uint16x8_t __p0, uint16_t __p1) { + uint32x4_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmull_n_u16(__noswap_vget_high_u16(__rev0), __p1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) { int64x2_t __ret; - __ret = vqdmlsl_n_s32(__p0, vget_high_s32(__p1), __p2); + __ret = vmull_n_s32(vget_high_s32(__p0), __p1); return __ret; } #else -__ai int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vmull_high_n_s32(int32x4_t __p0, int32_t __p1) { int64x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __noswap_vqdmlsl_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2); + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap_vmull_n_s32(__noswap_vget_high_s32(__rev0), __p1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) { int32x4_t __ret; - __ret = vqdmlsl_n_s16(__p0, vget_high_s16(__p1), __p2); + __ret = vmull_n_s16(vget_high_s16(__p0), __p1); return __ret; } #else -__ai int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vmull_high_n_s16(int16x8_t __p0, int16_t __p1) { int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __noswap_vqdmlsl_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2); + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmull_n_s16(__noswap_vget_high_s16(__rev0), __p1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlsls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int64_t __ret; \ - int64_t __s0 = __p0; \ - int32_t __s1 = __p1; \ - int32x2_t __s2 = __p2; \ - __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, __s2, __p3); \ - __ret; \ +#define vmull_laneq_u32(__p0_601, __p1_601, __p2_601) __extension__ ({ \ + uint64x2_t __ret_601; \ + uint32x2_t __s0_601 = __p0_601; \ + uint32x4_t __s1_601 = __p1_601; \ + __ret_601 = vmull_u32(__s0_601, splat_laneq_u32(__s1_601, __p2_601)); \ + __ret_601; \ }) #else -#define vqdmlsls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int64_t __ret; \ - int64_t __s0 = __p0; \ - int32_t __s1 = __p1; \ - int32x2_t __s2 = __p2; \ - int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ - __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, __rev2, __p3); \ - __ret; \ +#define vmull_laneq_u32(__p0_602, __p1_602, __p2_602) __extension__ ({ \ + uint64x2_t __ret_602; \ + uint32x2_t __s0_602 = __p0_602; \ + uint32x4_t __s1_602 = __p1_602; \ + uint32x2_t __rev0_602; __rev0_602 = __builtin_shufflevector(__s0_602, __s0_602, 1, 0); \ + uint32x4_t __rev1_602; __rev1_602 = __builtin_shufflevector(__s1_602, __s1_602, 3, 2, 1, 0); \ + __ret_602 = __noswap_vmull_u32(__rev0_602, __noswap_splat_laneq_u32(__rev1_602, __p2_602)); \ + __ret_602 = __builtin_shufflevector(__ret_602, __ret_602, 1, 0); \ + __ret_602; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlslh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32_t __ret; \ - int32_t __s0 = __p0; \ - int16_t __s1 = __p1; \ - int16x4_t __s2 = __p2; \ - __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, __s2, __p3); \ - __ret; \ +#define vmull_laneq_u16(__p0_603, __p1_603, __p2_603) __extension__ ({ \ + uint32x4_t __ret_603; \ + uint16x4_t __s0_603 = __p0_603; \ + uint16x8_t __s1_603 = __p1_603; \ + __ret_603 = vmull_u16(__s0_603, splat_laneq_u16(__s1_603, __p2_603)); \ + __ret_603; \ }) #else -#define vqdmlslh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32_t __ret; \ - int32_t __s0 = __p0; \ - int16_t __s1 = __p1; \ - int16x4_t __s2 = __p2; \ - int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, __rev2, __p3); \ - __ret; \ +#define vmull_laneq_u16(__p0_604, __p1_604, __p2_604) __extension__ ({ \ + uint32x4_t __ret_604; \ + uint16x4_t __s0_604 = __p0_604; \ + uint16x8_t __s1_604 = __p1_604; \ + uint16x4_t __rev0_604; __rev0_604 = __builtin_shufflevector(__s0_604, __s0_604, 3, 2, 1, 0); \ + uint16x8_t __rev1_604; __rev1_604 = __builtin_shufflevector(__s1_604, __s1_604, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_604 = __noswap_vmull_u16(__rev0_604, __noswap_splat_laneq_u16(__rev1_604, __p2_604)); \ + __ret_604 = __builtin_shufflevector(__ret_604, __ret_604, 3, 2, 1, 0); \ + __ret_604; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlsls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int64_t __ret; \ - int64_t __s0 = __p0; \ - int32_t __s1 = __p1; \ - int32x4_t __s2 = __p2; \ - __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, __s2, __p3); \ - __ret; \ +#define vmull_laneq_s32(__p0_605, __p1_605, __p2_605) __extension__ ({ \ + int64x2_t __ret_605; \ + int32x2_t __s0_605 = __p0_605; \ + int32x4_t __s1_605 = __p1_605; \ + __ret_605 = vmull_s32(__s0_605, splat_laneq_s32(__s1_605, __p2_605)); \ + __ret_605; \ }) #else -#define vqdmlsls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ - int64_t __ret; \ - int64_t __s0 = __p0; \ - int32_t __s1 = __p1; \ - int32x4_t __s2 = __p2; \ - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, __rev2, __p3); \ - __ret; \ +#define vmull_laneq_s32(__p0_606, __p1_606, __p2_606) __extension__ ({ \ + int64x2_t __ret_606; \ + int32x2_t __s0_606 = __p0_606; \ + int32x4_t __s1_606 = __p1_606; \ + int32x2_t __rev0_606; __rev0_606 = __builtin_shufflevector(__s0_606, __s0_606, 1, 0); \ + int32x4_t __rev1_606; __rev1_606 = __builtin_shufflevector(__s1_606, __s1_606, 3, 2, 1, 0); \ + __ret_606 = __noswap_vmull_s32(__rev0_606, __noswap_splat_laneq_s32(__rev1_606, __p2_606)); \ + __ret_606 = __builtin_shufflevector(__ret_606, __ret_606, 1, 0); \ + __ret_606; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlslh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32_t __ret; \ - int32_t __s0 = __p0; \ - int16_t __s1 = __p1; \ - int16x8_t __s2 = __p2; \ - __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, __s2, __p3); \ - __ret; \ +#define vmull_laneq_s16(__p0_607, __p1_607, __p2_607) __extension__ ({ \ + int32x4_t __ret_607; \ + int16x4_t __s0_607 = __p0_607; \ + int16x8_t __s1_607 = __p1_607; \ + __ret_607 = vmull_s16(__s0_607, splat_laneq_s16(__s1_607, __p2_607)); \ + __ret_607; \ }) #else -#define vqdmlslh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ - int32_t __ret; \ - int32_t __s0 = __p0; \ - int16_t __s1 = __p1; \ - int16x8_t __s2 = __p2; \ - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, __rev2, __p3); \ - __ret; \ +#define vmull_laneq_s16(__p0_608, __p1_608, __p2_608) __extension__ ({ \ + int32x4_t __ret_608; \ + int16x4_t __s0_608 = __p0_608; \ + int16x8_t __s1_608 = __p1_608; \ + int16x4_t __rev0_608; __rev0_608 = __builtin_shufflevector(__s0_608, __s0_608, 3, 2, 1, 0); \ + int16x8_t __rev1_608; __rev1_608 = __builtin_shufflevector(__s1_608, __s1_608, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_608 = __noswap_vmull_s16(__rev0_608, __noswap_splat_laneq_s16(__rev1_608, __p2_608)); \ + __ret_608 = __builtin_shufflevector(__ret_608, __ret_608, 3, 2, 1, 0); \ + __ret_608; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlsl_laneq_s32(__p0_627, __p1_627, __p2_627, __p3_627) __extension__ ({ \ - int64x2_t __ret_627; \ - int64x2_t __s0_627 = __p0_627; \ - int32x2_t __s1_627 = __p1_627; \ - int32x4_t __s2_627 = __p2_627; \ - __ret_627 = vqdmlsl_s32(__s0_627, __s1_627, splat_laneq_s32(__s2_627, __p3_627)); \ - __ret_627; \ -}) +__ai __attribute__((target("neon"))) float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} #else -#define vqdmlsl_laneq_s32(__p0_628, __p1_628, __p2_628, __p3_628) __extension__ ({ \ - int64x2_t __ret_628; \ - int64x2_t __s0_628 = __p0_628; \ - int32x2_t __s1_628 = __p1_628; \ - int32x4_t __s2_628 = __p2_628; \ - int64x2_t __rev0_628; __rev0_628 = __builtin_shufflevector(__s0_628, __s0_628, 1, 0); \ - int32x2_t __rev1_628; __rev1_628 = __builtin_shufflevector(__s1_628, __s1_628, 1, 0); \ - int32x4_t __rev2_628; __rev2_628 = __builtin_shufflevector(__s2_628, __s2_628, 3, 2, 1, 0); \ - __ret_628 = __noswap_vqdmlsl_s32(__rev0_628, __rev1_628, __noswap_splat_laneq_s32(__rev2_628, __p3_628)); \ - __ret_628 = __builtin_shufflevector(__ret_628, __ret_628, 1, 0); \ - __ret_628; \ -}) +__ai __attribute__((target("neon"))) float64x2_t vmulxq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) float64x2_t __noswap_vmulxq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmlsl_laneq_s16(__p0_629, __p1_629, __p2_629, __p3_629) __extension__ ({ \ - int32x4_t __ret_629; \ - int32x4_t __s0_629 = __p0_629; \ - int16x4_t __s1_629 = __p1_629; \ - int16x8_t __s2_629 = __p2_629; \ - __ret_629 = vqdmlsl_s16(__s0_629, __s1_629, splat_laneq_s16(__s2_629, __p3_629)); \ - __ret_629; \ -}) +__ai __attribute__((target("neon"))) float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} #else -#define vqdmlsl_laneq_s16(__p0_630, __p1_630, __p2_630, __p3_630) __extension__ ({ \ - int32x4_t __ret_630; \ - int32x4_t __s0_630 = __p0_630; \ - int16x4_t __s1_630 = __p1_630; \ - int16x8_t __s2_630 = __p2_630; \ - int32x4_t __rev0_630; __rev0_630 = __builtin_shufflevector(__s0_630, __s0_630, 3, 2, 1, 0); \ - int16x4_t __rev1_630; __rev1_630 = __builtin_shufflevector(__s1_630, __s1_630, 3, 2, 1, 0); \ - int16x8_t __rev2_630; __rev2_630 = __builtin_shufflevector(__s2_630, __s2_630, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_630 = __noswap_vqdmlsl_s16(__rev0_630, __rev1_630, __noswap_splat_laneq_s16(__rev2_630, __p3_630)); \ - __ret_630 = __builtin_shufflevector(__ret_630, __ret_630, 3, 2, 1, 0); \ - __ret_630; \ -}) +__ai __attribute__((target("neon"))) float32x4_t vmulxq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x4_t __noswap_vmulxq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vmulxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} #endif -__ai int32_t vqdmulhs_s32(int32_t __p0, int32_t __p1) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vqdmulhs_s32(__p0, __p1); +__ai __attribute__((target("neon"))) float64x1_t vmulx_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 10); return __ret; } -__ai int16_t vqdmulhh_s16(int16_t __p0, int16_t __p1) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vqdmulhh_s16(__p0, __p1); +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 9); return __ret; } -#ifdef __LITTLE_ENDIAN__ -#define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __ret; \ - int32x4_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - __ret = (int32x4_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 2); \ - __ret; \ -}) #else -#define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __ret; \ - int32x4_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __ret = (int32x4_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float32x2_t vmulx_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +__ai __attribute__((target("neon"))) float32x2_t __noswap_vmulx_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vmulx_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} #endif +__ai __attribute__((target("neon"))) float64_t vmulxd_f64(float64_t __p0, float64_t __p1) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vmulxd_f64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) float32_t vmulxs_f32(float32_t __p0, float32_t __p1) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vmulxs_f32(__p0, __p1); + return __ret; +} +#define vmulxd_lane_f64(__p0_609, __p1_609, __p2_609) __extension__ ({ \ + float64_t __ret_609; \ + float64_t __s0_609 = __p0_609; \ + float64x1_t __s1_609 = __p1_609; \ + __ret_609 = vmulxd_f64(__s0_609, vget_lane_f64(__s1_609, __p2_609)); \ + __ret_609; \ +}) #ifdef __LITTLE_ENDIAN__ -#define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __ret; \ - int16x8_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - __ret = (int16x8_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 1); \ - __ret; \ +#define vmulxs_lane_f32(__p0_610, __p1_610, __p2_610) __extension__ ({ \ + float32_t __ret_610; \ + float32_t __s0_610 = __p0_610; \ + float32x2_t __s1_610 = __p1_610; \ + __ret_610 = vmulxs_f32(__s0_610, vget_lane_f32(__s1_610, __p2_610)); \ + __ret_610; \ }) #else -#define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __ret; \ - int16x8_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (int16x8_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ +#define vmulxs_lane_f32(__p0_611, __p1_611, __p2_611) __extension__ ({ \ + float32_t __ret_611; \ + float32_t __s0_611 = __p0_611; \ + float32x2_t __s1_611 = __p1_611; \ + float32x2_t __rev1_611; __rev1_611 = __builtin_shufflevector(__s1_611, __s1_611, 1, 0); \ + __ret_611 = vmulxs_f32(__s0_611, __noswap_vget_lane_f32(__rev1_611, __p2_611)); \ + __ret_611; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __ret; \ - int32x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - __ret = (int32x2_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ - __ret; \ +#define vmulxq_lane_f64(__p0_612, __p1_612, __p2_612) __extension__ ({ \ + float64x2_t __ret_612; \ + float64x2_t __s0_612 = __p0_612; \ + float64x1_t __s1_612 = __p1_612; \ + __ret_612 = vmulxq_f64(__s0_612, splatq_lane_f64(__s1_612, __p2_612)); \ + __ret_612; \ }) #else -#define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __ret; \ - int32x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __ret = (int32x2_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ +#define vmulxq_lane_f64(__p0_613, __p1_613, __p2_613) __extension__ ({ \ + float64x2_t __ret_613; \ + float64x2_t __s0_613 = __p0_613; \ + float64x1_t __s1_613 = __p1_613; \ + float64x2_t __rev0_613; __rev0_613 = __builtin_shufflevector(__s0_613, __s0_613, 1, 0); \ + __ret_613 = __noswap_vmulxq_f64(__rev0_613, __noswap_splatq_lane_f64(__s1_613, __p2_613)); \ + __ret_613 = __builtin_shufflevector(__ret_613, __ret_613, 1, 0); \ + __ret_613; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __ret; \ - int16x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - __ret = (int16x4_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ - __ret; \ +#define vmulxq_lane_f32(__p0_614, __p1_614, __p2_614) __extension__ ({ \ + float32x4_t __ret_614; \ + float32x4_t __s0_614 = __p0_614; \ + float32x2_t __s1_614 = __p1_614; \ + __ret_614 = vmulxq_f32(__s0_614, splatq_lane_f32(__s1_614, __p2_614)); \ + __ret_614; \ }) #else -#define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __ret; \ - int16x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (int16x4_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ +#define vmulxq_lane_f32(__p0_615, __p1_615, __p2_615) __extension__ ({ \ + float32x4_t __ret_615; \ + float32x4_t __s0_615 = __p0_615; \ + float32x2_t __s1_615 = __p1_615; \ + float32x4_t __rev0_615; __rev0_615 = __builtin_shufflevector(__s0_615, __s0_615, 3, 2, 1, 0); \ + float32x2_t __rev1_615; __rev1_615 = __builtin_shufflevector(__s1_615, __s1_615, 1, 0); \ + __ret_615 = __noswap_vmulxq_f32(__rev0_615, __noswap_splatq_lane_f32(__rev1_615, __p2_615)); \ + __ret_615 = __builtin_shufflevector(__ret_615, __ret_615, 3, 2, 1, 0); \ + __ret_615; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmulhs_lane_s32(__p0_631, __p1_631, __p2_631) __extension__ ({ \ - int32_t __ret_631; \ - int32_t __s0_631 = __p0_631; \ - int32x2_t __s1_631 = __p1_631; \ - __ret_631 = vqdmulhs_s32(__s0_631, vget_lane_s32(__s1_631, __p2_631)); \ - __ret_631; \ +#define vmulx_lane_f32(__p0_616, __p1_616, __p2_616) __extension__ ({ \ + float32x2_t __ret_616; \ + float32x2_t __s0_616 = __p0_616; \ + float32x2_t __s1_616 = __p1_616; \ + __ret_616 = vmulx_f32(__s0_616, splat_lane_f32(__s1_616, __p2_616)); \ + __ret_616; \ }) #else -#define vqdmulhs_lane_s32(__p0_632, __p1_632, __p2_632) __extension__ ({ \ - int32_t __ret_632; \ - int32_t __s0_632 = __p0_632; \ - int32x2_t __s1_632 = __p1_632; \ - int32x2_t __rev1_632; __rev1_632 = __builtin_shufflevector(__s1_632, __s1_632, 1, 0); \ - __ret_632 = vqdmulhs_s32(__s0_632, __noswap_vget_lane_s32(__rev1_632, __p2_632)); \ - __ret_632; \ +#define vmulx_lane_f32(__p0_617, __p1_617, __p2_617) __extension__ ({ \ + float32x2_t __ret_617; \ + float32x2_t __s0_617 = __p0_617; \ + float32x2_t __s1_617 = __p1_617; \ + float32x2_t __rev0_617; __rev0_617 = __builtin_shufflevector(__s0_617, __s0_617, 1, 0); \ + float32x2_t __rev1_617; __rev1_617 = __builtin_shufflevector(__s1_617, __s1_617, 1, 0); \ + __ret_617 = __noswap_vmulx_f32(__rev0_617, __noswap_splat_lane_f32(__rev1_617, __p2_617)); \ + __ret_617 = __builtin_shufflevector(__ret_617, __ret_617, 1, 0); \ + __ret_617; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmulhh_lane_s16(__p0_633, __p1_633, __p2_633) __extension__ ({ \ - int16_t __ret_633; \ - int16_t __s0_633 = __p0_633; \ - int16x4_t __s1_633 = __p1_633; \ - __ret_633 = vqdmulhh_s16(__s0_633, vget_lane_s16(__s1_633, __p2_633)); \ - __ret_633; \ +#define vmulxd_laneq_f64(__p0_618, __p1_618, __p2_618) __extension__ ({ \ + float64_t __ret_618; \ + float64_t __s0_618 = __p0_618; \ + float64x2_t __s1_618 = __p1_618; \ + __ret_618 = vmulxd_f64(__s0_618, vgetq_lane_f64(__s1_618, __p2_618)); \ + __ret_618; \ }) #else -#define vqdmulhh_lane_s16(__p0_634, __p1_634, __p2_634) __extension__ ({ \ - int16_t __ret_634; \ - int16_t __s0_634 = __p0_634; \ - int16x4_t __s1_634 = __p1_634; \ - int16x4_t __rev1_634; __rev1_634 = __builtin_shufflevector(__s1_634, __s1_634, 3, 2, 1, 0); \ - __ret_634 = vqdmulhh_s16(__s0_634, __noswap_vget_lane_s16(__rev1_634, __p2_634)); \ - __ret_634; \ +#define vmulxd_laneq_f64(__p0_619, __p1_619, __p2_619) __extension__ ({ \ + float64_t __ret_619; \ + float64_t __s0_619 = __p0_619; \ + float64x2_t __s1_619 = __p1_619; \ + float64x2_t __rev1_619; __rev1_619 = __builtin_shufflevector(__s1_619, __s1_619, 1, 0); \ + __ret_619 = vmulxd_f64(__s0_619, __noswap_vgetq_lane_f64(__rev1_619, __p2_619)); \ + __ret_619; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmulhs_laneq_s32(__p0_635, __p1_635, __p2_635) __extension__ ({ \ - int32_t __ret_635; \ - int32_t __s0_635 = __p0_635; \ - int32x4_t __s1_635 = __p1_635; \ - __ret_635 = vqdmulhs_s32(__s0_635, vgetq_lane_s32(__s1_635, __p2_635)); \ - __ret_635; \ +#define vmulxs_laneq_f32(__p0_620, __p1_620, __p2_620) __extension__ ({ \ + float32_t __ret_620; \ + float32_t __s0_620 = __p0_620; \ + float32x4_t __s1_620 = __p1_620; \ + __ret_620 = vmulxs_f32(__s0_620, vgetq_lane_f32(__s1_620, __p2_620)); \ + __ret_620; \ }) #else -#define vqdmulhs_laneq_s32(__p0_636, __p1_636, __p2_636) __extension__ ({ \ - int32_t __ret_636; \ - int32_t __s0_636 = __p0_636; \ - int32x4_t __s1_636 = __p1_636; \ - int32x4_t __rev1_636; __rev1_636 = __builtin_shufflevector(__s1_636, __s1_636, 3, 2, 1, 0); \ - __ret_636 = vqdmulhs_s32(__s0_636, __noswap_vgetq_lane_s32(__rev1_636, __p2_636)); \ - __ret_636; \ +#define vmulxs_laneq_f32(__p0_621, __p1_621, __p2_621) __extension__ ({ \ + float32_t __ret_621; \ + float32_t __s0_621 = __p0_621; \ + float32x4_t __s1_621 = __p1_621; \ + float32x4_t __rev1_621; __rev1_621 = __builtin_shufflevector(__s1_621, __s1_621, 3, 2, 1, 0); \ + __ret_621 = vmulxs_f32(__s0_621, __noswap_vgetq_lane_f32(__rev1_621, __p2_621)); \ + __ret_621; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmulhh_laneq_s16(__p0_637, __p1_637, __p2_637) __extension__ ({ \ - int16_t __ret_637; \ - int16_t __s0_637 = __p0_637; \ - int16x8_t __s1_637 = __p1_637; \ - __ret_637 = vqdmulhh_s16(__s0_637, vgetq_lane_s16(__s1_637, __p2_637)); \ - __ret_637; \ +#define vmulxq_laneq_f64(__p0_622, __p1_622, __p2_622) __extension__ ({ \ + float64x2_t __ret_622; \ + float64x2_t __s0_622 = __p0_622; \ + float64x2_t __s1_622 = __p1_622; \ + __ret_622 = vmulxq_f64(__s0_622, splatq_laneq_f64(__s1_622, __p2_622)); \ + __ret_622; \ }) #else -#define vqdmulhh_laneq_s16(__p0_638, __p1_638, __p2_638) __extension__ ({ \ - int16_t __ret_638; \ - int16_t __s0_638 = __p0_638; \ - int16x8_t __s1_638 = __p1_638; \ - int16x8_t __rev1_638; __rev1_638 = __builtin_shufflevector(__s1_638, __s1_638, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_638 = vqdmulhh_s16(__s0_638, __noswap_vgetq_lane_s16(__rev1_638, __p2_638)); \ - __ret_638; \ +#define vmulxq_laneq_f64(__p0_623, __p1_623, __p2_623) __extension__ ({ \ + float64x2_t __ret_623; \ + float64x2_t __s0_623 = __p0_623; \ + float64x2_t __s1_623 = __p1_623; \ + float64x2_t __rev0_623; __rev0_623 = __builtin_shufflevector(__s0_623, __s0_623, 1, 0); \ + float64x2_t __rev1_623; __rev1_623 = __builtin_shufflevector(__s1_623, __s1_623, 1, 0); \ + __ret_623 = __noswap_vmulxq_f64(__rev0_623, __noswap_splatq_laneq_f64(__rev1_623, __p2_623)); \ + __ret_623 = __builtin_shufflevector(__ret_623, __ret_623, 1, 0); \ + __ret_623; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __ret; \ - int32x4_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - __ret = (int32x4_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ - __ret; \ +#define vmulxq_laneq_f32(__p0_624, __p1_624, __p2_624) __extension__ ({ \ + float32x4_t __ret_624; \ + float32x4_t __s0_624 = __p0_624; \ + float32x4_t __s1_624 = __p1_624; \ + __ret_624 = vmulxq_f32(__s0_624, splatq_laneq_f32(__s1_624, __p2_624)); \ + __ret_624; \ }) #else -#define vqdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __ret; \ - int32x4_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (int32x4_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ +#define vmulxq_laneq_f32(__p0_625, __p1_625, __p2_625) __extension__ ({ \ + float32x4_t __ret_625; \ + float32x4_t __s0_625 = __p0_625; \ + float32x4_t __s1_625 = __p1_625; \ + float32x4_t __rev0_625; __rev0_625 = __builtin_shufflevector(__s0_625, __s0_625, 3, 2, 1, 0); \ + float32x4_t __rev1_625; __rev1_625 = __builtin_shufflevector(__s1_625, __s1_625, 3, 2, 1, 0); \ + __ret_625 = __noswap_vmulxq_f32(__rev0_625, __noswap_splatq_laneq_f32(__rev1_625, __p2_625)); \ + __ret_625 = __builtin_shufflevector(__ret_625, __ret_625, 3, 2, 1, 0); \ + __ret_625; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __ret; \ - int16x8_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - __ret = (int16x8_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ - __ret; \ +#define vmulx_laneq_f32(__p0_626, __p1_626, __p2_626) __extension__ ({ \ + float32x2_t __ret_626; \ + float32x2_t __s0_626 = __p0_626; \ + float32x4_t __s1_626 = __p1_626; \ + __ret_626 = vmulx_f32(__s0_626, splat_laneq_f32(__s1_626, __p2_626)); \ + __ret_626; \ }) #else -#define vqdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __ret; \ - int16x8_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int16x8_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ +#define vmulx_laneq_f32(__p0_627, __p1_627, __p2_627) __extension__ ({ \ + float32x2_t __ret_627; \ + float32x2_t __s0_627 = __p0_627; \ + float32x4_t __s1_627 = __p1_627; \ + float32x2_t __rev0_627; __rev0_627 = __builtin_shufflevector(__s0_627, __s0_627, 1, 0); \ + float32x4_t __rev1_627; __rev1_627 = __builtin_shufflevector(__s1_627, __s1_627, 3, 2, 1, 0); \ + __ret_627 = __noswap_vmulx_f32(__rev0_627, __noswap_splat_laneq_f32(__rev1_627, __p2_627)); \ + __ret_627 = __builtin_shufflevector(__ret_627, __ret_627, 1, 0); \ + __ret_627; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __ret; \ - int32x2_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - __ret = (int32x2_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 2); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float64x2_t vnegq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = -__p0; + return __ret; +} #else -#define vqdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __ret; \ - int32x2_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (int32x2_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float64x2_t vnegq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __ret; \ - int16x4_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - __ret = (int16x4_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int64x2_t vnegq_s64(int64x2_t __p0) { + int64x2_t __ret; + __ret = -__p0; + return __ret; +} #else -#define vqdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __ret; \ - int16x4_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int16x4_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int64x2_t vnegq_s64(int64x2_t __p0) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = -__rev0; + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif -__ai int64_t vqdmulls_s32(int32_t __p0, int32_t __p1) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vqdmulls_s32(__p0, __p1); +__ai __attribute__((target("neon"))) float64x1_t vneg_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = -__p0; return __ret; } -__ai int32_t vqdmullh_s16(int16_t __p0, int16_t __p1) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vqdmullh_s16(__p0, __p1); +__ai __attribute__((target("neon"))) int64x1_t vneg_s64(int64x1_t __p0) { + int64x1_t __ret; + __ret = -__p0; + return __ret; +} +__ai __attribute__((target("neon"))) int64_t vnegd_s64(int64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vnegd_s64(__p0); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) { - int64x2_t __ret; - __ret = vqdmull_s32(vget_high_s32(__p0), vget_high_s32(__p1)); +__ai __attribute__((target("neon"))) uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else -__ai int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) { - int64x2_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) uint8x16_t vpaddq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) { - int32x4_t __ret; - __ret = vqdmull_s16(vget_high_s16(__p0), vget_high_s16(__p1)); +__ai __attribute__((target("neon"))) uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else -__ai int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) { - int32x4_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1)); +__ai __attribute__((target("neon"))) uint32x4_t vpaddq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmull_high_lane_s32(__p0_639, __p1_639, __p2_639) __extension__ ({ \ - int64x2_t __ret_639; \ - int32x4_t __s0_639 = __p0_639; \ - int32x2_t __s1_639 = __p1_639; \ - __ret_639 = vqdmull_s32(vget_high_s32(__s0_639), splat_lane_s32(__s1_639, __p2_639)); \ - __ret_639; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); + return __ret; +} #else -#define vqdmull_high_lane_s32(__p0_640, __p1_640, __p2_640) __extension__ ({ \ - int64x2_t __ret_640; \ - int32x4_t __s0_640 = __p0_640; \ - int32x2_t __s1_640 = __p1_640; \ - int32x4_t __rev0_640; __rev0_640 = __builtin_shufflevector(__s0_640, __s0_640, 3, 2, 1, 0); \ - int32x2_t __rev1_640; __rev1_640 = __builtin_shufflevector(__s1_640, __s1_640, 1, 0); \ - __ret_640 = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0_640), __noswap_splat_lane_s32(__rev1_640, __p2_640)); \ - __ret_640 = __builtin_shufflevector(__ret_640, __ret_640, 1, 0); \ - __ret_640; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vpaddq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmull_high_lane_s16(__p0_641, __p1_641, __p2_641) __extension__ ({ \ - int32x4_t __ret_641; \ - int16x8_t __s0_641 = __p0_641; \ - int16x4_t __s1_641 = __p1_641; \ - __ret_641 = vqdmull_s16(vget_high_s16(__s0_641), splat_lane_s16(__s1_641, __p2_641)); \ - __ret_641; \ -}) +__ai __attribute__((target("neon"))) uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} #else -#define vqdmull_high_lane_s16(__p0_642, __p1_642, __p2_642) __extension__ ({ \ - int32x4_t __ret_642; \ - int16x8_t __s0_642 = __p0_642; \ - int16x4_t __s1_642 = __p1_642; \ - int16x8_t __rev0_642; __rev0_642 = __builtin_shufflevector(__s0_642, __s0_642, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev1_642; __rev1_642 = __builtin_shufflevector(__s1_642, __s1_642, 3, 2, 1, 0); \ - __ret_642 = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0_642), __noswap_splat_lane_s16(__rev1_642, __p2_642)); \ - __ret_642 = __builtin_shufflevector(__ret_642, __ret_642, 3, 2, 1, 0); \ - __ret_642; \ -}) +__ai __attribute__((target("neon"))) uint16x8_t vpaddq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmull_high_laneq_s32(__p0_643, __p1_643, __p2_643) __extension__ ({ \ - int64x2_t __ret_643; \ - int32x4_t __s0_643 = __p0_643; \ - int32x4_t __s1_643 = __p1_643; \ - __ret_643 = vqdmull_s32(vget_high_s32(__s0_643), splat_laneq_s32(__s1_643, __p2_643)); \ - __ret_643; \ -}) +__ai __attribute__((target("neon"))) int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} #else -#define vqdmull_high_laneq_s32(__p0_644, __p1_644, __p2_644) __extension__ ({ \ - int64x2_t __ret_644; \ - int32x4_t __s0_644 = __p0_644; \ - int32x4_t __s1_644 = __p1_644; \ - int32x4_t __rev0_644; __rev0_644 = __builtin_shufflevector(__s0_644, __s0_644, 3, 2, 1, 0); \ - int32x4_t __rev1_644; __rev1_644 = __builtin_shufflevector(__s1_644, __s1_644, 3, 2, 1, 0); \ - __ret_644 = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0_644), __noswap_splat_laneq_s32(__rev1_644, __p2_644)); \ - __ret_644 = __builtin_shufflevector(__ret_644, __ret_644, 1, 0); \ - __ret_644; \ -}) +__ai __attribute__((target("neon"))) int8x16_t vpaddq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmull_high_laneq_s16(__p0_645, __p1_645, __p2_645) __extension__ ({ \ - int32x4_t __ret_645; \ - int16x8_t __s0_645 = __p0_645; \ - int16x8_t __s1_645 = __p1_645; \ - __ret_645 = vqdmull_s16(vget_high_s16(__s0_645), splat_laneq_s16(__s1_645, __p2_645)); \ - __ret_645; \ -}) +__ai __attribute__((target("neon"))) float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} #else -#define vqdmull_high_laneq_s16(__p0_646, __p1_646, __p2_646) __extension__ ({ \ - int32x4_t __ret_646; \ - int16x8_t __s0_646 = __p0_646; \ - int16x8_t __s1_646 = __p1_646; \ - int16x8_t __rev0_646; __rev0_646 = __builtin_shufflevector(__s0_646, __s0_646, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_646; __rev1_646 = __builtin_shufflevector(__s1_646, __s1_646, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_646 = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0_646), __noswap_splat_laneq_s16(__rev1_646, __p2_646)); \ - __ret_646 = __builtin_shufflevector(__ret_646, __ret_646, 3, 2, 1, 0); \ - __ret_646; \ -}) +__ai __attribute__((target("neon"))) float64x2_t vpaddq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) { - int64x2_t __ret; - __ret = vqdmull_n_s32(vget_high_s32(__p0), __p1); +__ai __attribute__((target("neon"))) float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); return __ret; } #else -__ai int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) { - int64x2_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = __noswap_vqdmull_n_s32(__noswap_vget_high_s32(__rev0), __p1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) float32x4_t vpaddq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; - __ret = vqdmull_n_s16(vget_high_s16(__p0), __p1); + __ret = (int32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else -__ai int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vpaddq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __noswap_vqdmull_n_s16(__noswap_vget_high_s16(__rev0), __p1); + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmulls_lane_s32(__p0_647, __p1_647, __p2_647) __extension__ ({ \ - int64_t __ret_647; \ - int32_t __s0_647 = __p0_647; \ - int32x2_t __s1_647 = __p1_647; \ - __ret_647 = vqdmulls_s32(__s0_647, vget_lane_s32(__s1_647, __p2_647)); \ - __ret_647; \ -}) +__ai __attribute__((target("neon"))) int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); + return __ret; +} #else -#define vqdmulls_lane_s32(__p0_648, __p1_648, __p2_648) __extension__ ({ \ - int64_t __ret_648; \ - int32_t __s0_648 = __p0_648; \ - int32x2_t __s1_648 = __p1_648; \ - int32x2_t __rev1_648; __rev1_648 = __builtin_shufflevector(__s1_648, __s1_648, 1, 0); \ - __ret_648 = vqdmulls_s32(__s0_648, __noswap_vget_lane_s32(__rev1_648, __p2_648)); \ - __ret_648; \ -}) +__ai __attribute__((target("neon"))) int64x2_t vpaddq_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int64x2_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmullh_lane_s16(__p0_649, __p1_649, __p2_649) __extension__ ({ \ - int32_t __ret_649; \ - int16_t __s0_649 = __p0_649; \ - int16x4_t __s1_649 = __p1_649; \ - __ret_649 = vqdmullh_s16(__s0_649, vget_lane_s16(__s1_649, __p2_649)); \ - __ret_649; \ -}) +__ai __attribute__((target("neon"))) int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} #else -#define vqdmullh_lane_s16(__p0_650, __p1_650, __p2_650) __extension__ ({ \ - int32_t __ret_650; \ - int16_t __s0_650 = __p0_650; \ - int16x4_t __s1_650 = __p1_650; \ - int16x4_t __rev1_650; __rev1_650 = __builtin_shufflevector(__s1_650, __s1_650, 3, 2, 1, 0); \ - __ret_650 = vqdmullh_s16(__s0_650, __noswap_vget_lane_s16(__rev1_650, __p2_650)); \ - __ret_650; \ -}) +__ai __attribute__((target("neon"))) int16x8_t vpaddq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vpaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmulls_laneq_s32(__p0_651, __p1_651, __p2_651) __extension__ ({ \ - int64_t __ret_651; \ - int32_t __s0_651 = __p0_651; \ - int32x4_t __s1_651 = __p1_651; \ - __ret_651 = vqdmulls_s32(__s0_651, vgetq_lane_s32(__s1_651, __p2_651)); \ - __ret_651; \ -}) +__ai __attribute__((target("neon"))) uint64_t vpaddd_u64(uint64x2_t __p0) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vpaddd_u64(__p0); + return __ret; +} #else -#define vqdmulls_laneq_s32(__p0_652, __p1_652, __p2_652) __extension__ ({ \ - int64_t __ret_652; \ - int32_t __s0_652 = __p0_652; \ - int32x4_t __s1_652 = __p1_652; \ - int32x4_t __rev1_652; __rev1_652 = __builtin_shufflevector(__s1_652, __s1_652, 3, 2, 1, 0); \ - __ret_652 = vqdmulls_s32(__s0_652, __noswap_vgetq_lane_s32(__rev1_652, __p2_652)); \ - __ret_652; \ -}) +__ai __attribute__((target("neon"))) uint64_t vpaddd_u64(uint64x2_t __p0) { + uint64_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (uint64_t) __builtin_neon_vpaddd_u64(__rev0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmullh_laneq_s16(__p0_653, __p1_653, __p2_653) __extension__ ({ \ - int32_t __ret_653; \ - int16_t __s0_653 = __p0_653; \ - int16x8_t __s1_653 = __p1_653; \ - __ret_653 = vqdmullh_s16(__s0_653, vgetq_lane_s16(__s1_653, __p2_653)); \ - __ret_653; \ -}) +__ai __attribute__((target("neon"))) float64_t vpaddd_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vpaddd_f64(__p0); + return __ret; +} #else -#define vqdmullh_laneq_s16(__p0_654, __p1_654, __p2_654) __extension__ ({ \ - int32_t __ret_654; \ - int16_t __s0_654 = __p0_654; \ - int16x8_t __s1_654 = __p1_654; \ - int16x8_t __rev1_654; __rev1_654 = __builtin_shufflevector(__s1_654, __s1_654, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_654 = vqdmullh_s16(__s0_654, __noswap_vgetq_lane_s16(__rev1_654, __p2_654)); \ - __ret_654; \ -}) +__ai __attribute__((target("neon"))) float64_t vpaddd_f64(float64x2_t __p0) { + float64_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64_t) __builtin_neon_vpaddd_f64(__rev0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmull_laneq_s32(__p0_655, __p1_655, __p2_655) __extension__ ({ \ - int64x2_t __ret_655; \ - int32x2_t __s0_655 = __p0_655; \ - int32x4_t __s1_655 = __p1_655; \ - __ret_655 = vqdmull_s32(__s0_655, splat_laneq_s32(__s1_655, __p2_655)); \ - __ret_655; \ -}) +__ai __attribute__((target("neon"))) int64_t vpaddd_s64(int64x2_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vpaddd_s64(__p0); + return __ret; +} #else -#define vqdmull_laneq_s32(__p0_656, __p1_656, __p2_656) __extension__ ({ \ - int64x2_t __ret_656; \ - int32x2_t __s0_656 = __p0_656; \ - int32x4_t __s1_656 = __p1_656; \ - int32x2_t __rev0_656; __rev0_656 = __builtin_shufflevector(__s0_656, __s0_656, 1, 0); \ - int32x4_t __rev1_656; __rev1_656 = __builtin_shufflevector(__s1_656, __s1_656, 3, 2, 1, 0); \ - __ret_656 = __noswap_vqdmull_s32(__rev0_656, __noswap_splat_laneq_s32(__rev1_656, __p2_656)); \ - __ret_656 = __builtin_shufflevector(__ret_656, __ret_656, 1, 0); \ - __ret_656; \ -}) +__ai __attribute__((target("neon"))) int64_t vpaddd_s64(int64x2_t __p0) { + int64_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int64_t) __builtin_neon_vpaddd_s64(__rev0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqdmull_laneq_s16(__p0_657, __p1_657, __p2_657) __extension__ ({ \ - int32x4_t __ret_657; \ - int16x4_t __s0_657 = __p0_657; \ - int16x8_t __s1_657 = __p1_657; \ - __ret_657 = vqdmull_s16(__s0_657, splat_laneq_s16(__s1_657, __p2_657)); \ - __ret_657; \ -}) -#else -#define vqdmull_laneq_s16(__p0_658, __p1_658, __p2_658) __extension__ ({ \ - int32x4_t __ret_658; \ - int16x4_t __s0_658 = __p0_658; \ - int16x8_t __s1_658 = __p1_658; \ - int16x4_t __rev0_658; __rev0_658 = __builtin_shufflevector(__s0_658, __s0_658, 3, 2, 1, 0); \ - int16x8_t __rev1_658; __rev1_658 = __builtin_shufflevector(__s1_658, __s1_658, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_658 = __noswap_vqdmull_s16(__rev0_658, __noswap_splat_laneq_s16(__rev1_658, __p2_658)); \ - __ret_658 = __builtin_shufflevector(__ret_658, __ret_658, 3, 2, 1, 0); \ - __ret_658; \ -}) -#endif - -__ai int16_t vqmovns_s32(int32_t __p0) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vqmovns_s32(__p0); +__ai __attribute__((target("neon"))) float32_t vpadds_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vpadds_f32(__p0); return __ret; } -__ai int32_t vqmovnd_s64(int64_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vqmovnd_s64(__p0); +#else +__ai __attribute__((target("neon"))) float32_t vpadds_f32(float32x2_t __p0) { + float32_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32_t) __builtin_neon_vpadds_f32(__rev0); return __ret; } -__ai int8_t vqmovnh_s16(int16_t __p0) { - int8_t __ret; - __ret = (int8_t) __builtin_neon_vqmovnh_s16(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } -__ai uint16_t vqmovns_u32(uint32_t __p0) { - uint16_t __ret; - __ret = (uint16_t) __builtin_neon_vqmovns_u32(__p0); +#else +__ai __attribute__((target("neon"))) uint8x16_t vpmaxq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai uint32_t vqmovnd_u64(uint64_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vqmovnd_u64(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } -__ai uint8_t vqmovnh_u16(uint16_t __p0) { - uint8_t __ret; - __ret = (uint8_t) __builtin_neon_vqmovnh_u16(__p0); +#else +__ai __attribute__((target("neon"))) uint32x4_t vpmaxq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } +#endif + #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; - __ret = vcombine_u16(__p0, vqmovn_u32(__p1)); + __ret = (uint16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else -__ai uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vpmaxq_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __noswap_vcombine_u16(__rev0, __noswap_vqmovn_u32(__rev1)); + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) { - uint32x4_t __ret; - __ret = vcombine_u32(__p0, vqmovn_u64(__p1)); +__ai __attribute__((target("neon"))) int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } #else -__ai uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) { - uint32x4_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __noswap_vcombine_u32(__rev0, __noswap_vqmovn_u64(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int8x16_t vpmaxq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) { - uint8x16_t __ret; - __ret = vcombine_u8(__p0, vqmovn_u16(__p1)); +__ai __attribute__((target("neon"))) float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); return __ret; } #else -__ai uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) { - uint8x16_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __noswap_vcombine_u8(__rev0, __noswap_vqmovn_u16(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) float64x2_t vpmaxq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) { - int16x8_t __ret; - __ret = vcombine_s16(__p0, vqmovn_s32(__p1)); +__ai __attribute__((target("neon"))) float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); return __ret; } #else -__ai int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) { - int16x8_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __noswap_vcombine_s16(__rev0, __noswap_vqmovn_s32(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) float32x4_t vpmaxq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; - __ret = vcombine_s32(__p0, vqmovn_s64(__p1)); + __ret = (int32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); return __ret; } #else -__ai int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vpmaxq_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __noswap_vcombine_s32(__rev0, __noswap_vqmovn_s64(__rev1)); + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) { - int8x16_t __ret; - __ret = vcombine_s8(__p0, vqmovn_s16(__p1)); +__ai __attribute__((target("neon"))) int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); return __ret; } #else -__ai int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) { - int8x16_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int16x8_t vpmaxq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __noswap_vcombine_s8(__rev0, __noswap_vqmovn_s16(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vpmaxq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif -__ai uint16_t vqmovuns_s32(int32_t __p0) { - uint16_t __ret; - __ret = (uint16_t) __builtin_neon_vqmovuns_s32(__p0); - return __ret; -} -__ai uint32_t vqmovund_s64(int64_t __p0) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vqmovund_s64(__p0); +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64_t vpmaxqd_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vpmaxqd_f64(__p0); return __ret; } -__ai uint8_t vqmovunh_s16(int16_t __p0) { - uint8_t __ret; - __ret = (uint8_t) __builtin_neon_vqmovunh_s16(__p0); +#else +__ai __attribute__((target("neon"))) float64_t vpmaxqd_f64(float64x2_t __p0) { + float64_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64_t) __builtin_neon_vpmaxqd_f64(__rev0); return __ret; } +#endif + #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vqmovun_high_s32(uint16x4_t __p0, int32x4_t __p1) { - uint16x8_t __ret; - __ret = vcombine_u16((uint16x4_t)(__p0), vqmovun_s32(__p1)); +__ai __attribute__((target("neon"))) float32_t vpmaxs_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vpmaxs_f32(__p0); return __ret; } #else -__ai uint16x8_t vqmovun_high_s32(uint16x4_t __p0, int32x4_t __p1) { - uint16x8_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __noswap_vcombine_u16((uint16x4_t)(__rev0), __noswap_vqmovun_s32(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) float32_t vpmaxs_f32(float32x2_t __p0) { + float32_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32_t) __builtin_neon_vpmaxs_f32(__rev0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vqmovun_high_s64(uint32x2_t __p0, int64x2_t __p1) { - uint32x4_t __ret; - __ret = vcombine_u32((uint32x2_t)(__p0), vqmovun_s64(__p1)); +__ai __attribute__((target("neon"))) float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); return __ret; } #else -__ai uint32x4_t vqmovun_high_s64(uint32x2_t __p0, int64x2_t __p1) { - uint32x4_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __noswap_vcombine_u32((uint32x2_t)(__rev0), __noswap_vqmovun_s64(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) float64x2_t vpmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vqmovun_high_s16(uint8x8_t __p0, int16x8_t __p1) { - uint8x16_t __ret; - __ret = vcombine_u8((uint8x8_t)(__p0), vqmovun_s16(__p1)); +__ai __attribute__((target("neon"))) float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); return __ret; } #else -__ai uint8x16_t vqmovun_high_s16(uint8x8_t __p0, int16x8_t __p1) { - uint8x16_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __noswap_vcombine_u8((uint8x8_t)(__rev0), __noswap_vqmovun_s16(__rev1)); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) float32x4_t vpmaxnmq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vpmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vqnegq_s64(int64x2_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 35); +__ai __attribute__((target("neon"))) float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vpmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9); return __ret; } #else -__ai int64x2_t vqnegq_s64(int64x2_t __p0) { - int64x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (int64x2_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 35); +__ai __attribute__((target("neon"))) float32x2_t vpmaxnm_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vpmaxnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif -__ai int64x1_t vqneg_s64(int64x1_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 3); +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64_t vpmaxnmqd_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64(__p0); return __ret; } -__ai int8_t vqnegb_s8(int8_t __p0) { - int8_t __ret; - __ret = (int8_t) __builtin_neon_vqnegb_s8(__p0); +#else +__ai __attribute__((target("neon"))) float64_t vpmaxnmqd_f64(float64x2_t __p0) { + float64_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64_t) __builtin_neon_vpmaxnmqd_f64(__rev0); return __ret; } -__ai int32_t vqnegs_s32(int32_t __p0) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vqnegs_s32(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32_t vpmaxnms_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vpmaxnms_f32(__p0); return __ret; } -__ai int64_t vqnegd_s64(int64_t __p0) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vqnegd_s64(__p0); +#else +__ai __attribute__((target("neon"))) float32_t vpmaxnms_f32(float32x2_t __p0) { + float32_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32_t) __builtin_neon_vpmaxnms_f32(__rev0); return __ret; } -__ai int16_t vqnegh_s16(int16_t __p0) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vqnegh_s16(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } -__ai int32_t vqrdmulhs_s32(int32_t __p0, int32_t __p1) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vqrdmulhs_s32(__p0, __p1); +#else +__ai __attribute__((target("neon"))) uint8x16_t vpminq_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai int16_t vqrdmulhh_s16(int16_t __p0, int16_t __p1) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vqrdmulhh_s16(__p0, __p1); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = (uint32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } -#ifdef __LITTLE_ENDIAN__ -#define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __ret; \ - int32x4_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - __ret = (int32x4_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 2); \ - __ret; \ -}) #else -#define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __ret; \ - int32x4_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __ret = (int32x4_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vpminq_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __ret; \ - int16x8_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - __ret = (int16x8_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 1); \ - __ret; \ -}) -#else -#define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __ret; \ - int16x8_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (int16x8_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = (uint16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vpminq_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __ret; \ - int32x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - __ret = (int32x2_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); + return __ret; +} #else -#define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __ret; \ - int32x2_t __s0 = __p0; \ - int32x2_t __s1 = __p1; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __ret = (int32x2_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x16_t vpminq_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __ret; \ - int16x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - __ret = (int16x4_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} #else -#define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __ret; \ - int16x4_t __s0 = __p0; \ - int16x4_t __s1 = __p1; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (int16x4_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float64x2_t vpminq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmulhs_lane_s32(__p0_659, __p1_659, __p2_659) __extension__ ({ \ - int32_t __ret_659; \ - int32_t __s0_659 = __p0_659; \ - int32x2_t __s1_659 = __p1_659; \ - __ret_659 = vqrdmulhs_s32(__s0_659, vget_lane_s32(__s1_659, __p2_659)); \ - __ret_659; \ -}) +__ai __attribute__((target("neon"))) float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} #else -#define vqrdmulhs_lane_s32(__p0_660, __p1_660, __p2_660) __extension__ ({ \ - int32_t __ret_660; \ - int32_t __s0_660 = __p0_660; \ - int32x2_t __s1_660 = __p1_660; \ - int32x2_t __rev1_660; __rev1_660 = __builtin_shufflevector(__s1_660, __s1_660, 1, 0); \ - __ret_660 = vqrdmulhs_s32(__s0_660, __noswap_vget_lane_s32(__rev1_660, __p2_660)); \ - __ret_660; \ -}) +__ai __attribute__((target("neon"))) float32x4_t vpminq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmulhh_lane_s16(__p0_661, __p1_661, __p2_661) __extension__ ({ \ - int16_t __ret_661; \ - int16_t __s0_661 = __p0_661; \ - int16x4_t __s1_661 = __p1_661; \ - __ret_661 = vqrdmulhh_s16(__s0_661, vget_lane_s16(__s1_661, __p2_661)); \ - __ret_661; \ -}) +__ai __attribute__((target("neon"))) int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} #else -#define vqrdmulhh_lane_s16(__p0_662, __p1_662, __p2_662) __extension__ ({ \ - int16_t __ret_662; \ - int16_t __s0_662 = __p0_662; \ - int16x4_t __s1_662 = __p1_662; \ - int16x4_t __rev1_662; __rev1_662 = __builtin_shufflevector(__s1_662, __s1_662, 3, 2, 1, 0); \ - __ret_662 = vqrdmulhh_s16(__s0_662, __noswap_vget_lane_s16(__rev1_662, __p2_662)); \ - __ret_662; \ -}) +__ai __attribute__((target("neon"))) int32x4_t vpminq_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmulhs_laneq_s32(__p0_663, __p1_663, __p2_663) __extension__ ({ \ - int32_t __ret_663; \ - int32_t __s0_663 = __p0_663; \ - int32x4_t __s1_663 = __p1_663; \ - __ret_663 = vqrdmulhs_s32(__s0_663, vgetq_lane_s32(__s1_663, __p2_663)); \ - __ret_663; \ -}) +__ai __attribute__((target("neon"))) int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vpminq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} #else -#define vqrdmulhs_laneq_s32(__p0_664, __p1_664, __p2_664) __extension__ ({ \ - int32_t __ret_664; \ - int32_t __s0_664 = __p0_664; \ - int32x4_t __s1_664 = __p1_664; \ - int32x4_t __rev1_664; __rev1_664 = __builtin_shufflevector(__s1_664, __s1_664, 3, 2, 1, 0); \ - __ret_664 = vqrdmulhs_s32(__s0_664, __noswap_vgetq_lane_s32(__rev1_664, __p2_664)); \ - __ret_664; \ -}) +__ai __attribute__((target("neon"))) int16x8_t vpminq_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vpminq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmulhh_laneq_s16(__p0_665, __p1_665, __p2_665) __extension__ ({ \ - int16_t __ret_665; \ - int16_t __s0_665 = __p0_665; \ - int16x8_t __s1_665 = __p1_665; \ - __ret_665 = vqrdmulhh_s16(__s0_665, vgetq_lane_s16(__s1_665, __p2_665)); \ - __ret_665; \ -}) +__ai __attribute__((target("neon"))) float64_t vpminqd_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vpminqd_f64(__p0); + return __ret; +} #else -#define vqrdmulhh_laneq_s16(__p0_666, __p1_666, __p2_666) __extension__ ({ \ - int16_t __ret_666; \ - int16_t __s0_666 = __p0_666; \ - int16x8_t __s1_666 = __p1_666; \ - int16x8_t __rev1_666; __rev1_666 = __builtin_shufflevector(__s1_666, __s1_666, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_666 = vqrdmulhh_s16(__s0_666, __noswap_vgetq_lane_s16(__rev1_666, __p2_666)); \ - __ret_666; \ -}) +__ai __attribute__((target("neon"))) float64_t vpminqd_f64(float64x2_t __p0) { + float64_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64_t) __builtin_neon_vpminqd_f64(__rev0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __ret; \ - int32x4_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - __ret = (int32x4_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float32_t vpmins_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vpmins_f32(__p0); + return __ret; +} #else -#define vqrdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x4_t __ret; \ - int32x4_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (int32x4_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float32_t vpmins_f32(float32x2_t __p0) { + float32_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32_t) __builtin_neon_vpmins_f32(__rev0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __ret; \ - int16x8_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - __ret = (int16x8_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); + return __ret; +} #else -#define vqrdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x8_t __ret; \ - int16x8_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int16x8_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float64x2_t vpminnmq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __ret; \ - int32x2_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - __ret = (int32x2_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 2); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vpminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 41); + return __ret; +} #else -#define vqrdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ - int32x2_t __ret; \ - int32x2_t __s0 = __p0; \ - int32x4_t __s1 = __p1; \ - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (int32x2_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float32x4_t vpminnmq_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vpminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __ret; \ - int16x4_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - __ret = (int16x4_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vpminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 9); + return __ret; +} #else -#define vqrdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ - int16x4_t __ret; \ - int16x4_t __s0 = __p0; \ - int16x8_t __s1 = __p1; \ - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (int16x4_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 1); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float32x2_t vpminnm_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float32x2_t) __builtin_neon_vpminnm_v((int8x8_t)__rev0, (int8x8_t)__rev1, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64_t vpminnmqd_f64(float64x2_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vpminnmqd_f64(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64_t vpminnmqd_f64(float64x2_t __p0) { + float64_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64_t) __builtin_neon_vpminnmqd_f64(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32_t vpminnms_f32(float32x2_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vpminnms_f32(__p0); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32_t vpminnms_f32(float32x2_t __p0) { + float32_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32_t) __builtin_neon_vpminnms_f32(__rev0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vqabsq_s64(int64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqabsq_v((int8x16_t)__p0, 35); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vqabsq_s64(int64x2_t __p0) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int64x2_t) __builtin_neon_vqabsq_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif -__ai uint8_t vqrshlb_u8(uint8_t __p0, int8_t __p1) { +__ai __attribute__((target("neon"))) int64x1_t vqabs_s64(int64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vqabs_v((int8x8_t)__p0, 3); + return __ret; +} +__ai __attribute__((target("neon"))) int8_t vqabsb_s8(int8_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vqabsb_s8(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32_t vqabss_s32(int32_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqabss_s32(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64_t vqabsd_s64(int64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqabsd_s64(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16_t vqabsh_s16(int16_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqabsh_s16(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8_t vqaddb_u8(uint8_t __p0, uint8_t __p1) { uint8_t __ret; - __ret = (uint8_t) __builtin_neon_vqrshlb_u8(__p0, __p1); + __ret = (uint8_t) __builtin_neon_vqaddb_u8(__p0, __p1); return __ret; } -__ai uint32_t vqrshls_u32(uint32_t __p0, int32_t __p1) { +__ai __attribute__((target("neon"))) uint32_t vqadds_u32(uint32_t __p0, uint32_t __p1) { uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vqrshls_u32(__p0, __p1); + __ret = (uint32_t) __builtin_neon_vqadds_u32(__p0, __p1); return __ret; } -__ai uint64_t vqrshld_u64(uint64_t __p0, int64_t __p1) { +__ai __attribute__((target("neon"))) uint64_t vqaddd_u64(uint64_t __p0, uint64_t __p1) { uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vqrshld_u64(__p0, __p1); + __ret = (uint64_t) __builtin_neon_vqaddd_u64(__p0, __p1); return __ret; } -__ai uint16_t vqrshlh_u16(uint16_t __p0, int16_t __p1) { +__ai __attribute__((target("neon"))) uint16_t vqaddh_u16(uint16_t __p0, uint16_t __p1) { uint16_t __ret; - __ret = (uint16_t) __builtin_neon_vqrshlh_u16(__p0, __p1); + __ret = (uint16_t) __builtin_neon_vqaddh_u16(__p0, __p1); return __ret; } -__ai int8_t vqrshlb_s8(int8_t __p0, int8_t __p1) { +__ai __attribute__((target("neon"))) int8_t vqaddb_s8(int8_t __p0, int8_t __p1) { int8_t __ret; - __ret = (int8_t) __builtin_neon_vqrshlb_s8(__p0, __p1); + __ret = (int8_t) __builtin_neon_vqaddb_s8(__p0, __p1); return __ret; } -__ai int32_t vqrshls_s32(int32_t __p0, int32_t __p1) { +__ai __attribute__((target("neon"))) int32_t vqadds_s32(int32_t __p0, int32_t __p1) { int32_t __ret; - __ret = (int32_t) __builtin_neon_vqrshls_s32(__p0, __p1); + __ret = (int32_t) __builtin_neon_vqadds_s32(__p0, __p1); return __ret; } -__ai int64_t vqrshld_s64(int64_t __p0, int64_t __p1) { +__ai __attribute__((target("neon"))) int64_t vqaddd_s64(int64_t __p0, int64_t __p1) { int64_t __ret; - __ret = (int64_t) __builtin_neon_vqrshld_s64(__p0, __p1); + __ret = (int64_t) __builtin_neon_vqaddd_s64(__p0, __p1); return __ret; } -__ai int16_t vqrshlh_s16(int16_t __p0, int16_t __p1) { +__ai __attribute__((target("neon"))) int16_t vqaddh_s16(int16_t __p0, int16_t __p1) { int16_t __ret; - __ret = (int16_t) __builtin_neon_vqrshlh_s16(__p0, __p1); + __ret = (int16_t) __builtin_neon_vqaddh_s16(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) int64_t vqdmlals_s32(int64_t __p0, int32_t __p1, int32_t __p2) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqdmlals_s32(__p0, __p1, __p2); + return __ret; +} +__ai __attribute__((target("neon"))) int32_t vqdmlalh_s16(int32_t __p0, int16_t __p1, int16_t __p2) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqdmlalh_s16(__p0, __p1, __p2); return __ret; } #ifdef __LITTLE_ENDIAN__ -#define vqrshrn_high_n_u32(__p0_667, __p1_667, __p2_667) __extension__ ({ \ - uint16x8_t __ret_667; \ - uint16x4_t __s0_667 = __p0_667; \ - uint32x4_t __s1_667 = __p1_667; \ - __ret_667 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_667), (uint16x4_t)(vqrshrn_n_u32(__s1_667, __p2_667)))); \ - __ret_667; \ -}) +__ai __attribute__((target("neon"))) int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { + int64x2_t __ret; + __ret = vqdmlal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2)); + return __ret; +} #else -#define vqrshrn_high_n_u32(__p0_668, __p1_668, __p2_668) __extension__ ({ \ - uint16x8_t __ret_668; \ - uint16x4_t __s0_668 = __p0_668; \ - uint32x4_t __s1_668 = __p1_668; \ - uint16x4_t __rev0_668; __rev0_668 = __builtin_shufflevector(__s0_668, __s0_668, 3, 2, 1, 0); \ - uint32x4_t __rev1_668; __rev1_668 = __builtin_shufflevector(__s1_668, __s1_668, 3, 2, 1, 0); \ - __ret_668 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_668), (uint16x4_t)(__noswap_vqrshrn_n_u32(__rev1_668, __p2_668)))); \ - __ret_668 = __builtin_shufflevector(__ret_668, __ret_668, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_668; \ -}) +__ai __attribute__((target("neon"))) int64x2_t vqdmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vqdmlal_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqrshrn_high_n_u64(__p0_669, __p1_669, __p2_669) __extension__ ({ \ - uint32x4_t __ret_669; \ - uint32x2_t __s0_669 = __p0_669; \ - uint64x2_t __s1_669 = __p1_669; \ - __ret_669 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_669), (uint32x2_t)(vqrshrn_n_u64(__s1_669, __p2_669)))); \ - __ret_669; \ -}) +__ai __attribute__((target("neon"))) int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { + int32x4_t __ret; + __ret = vqdmlal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2)); + return __ret; +} #else -#define vqrshrn_high_n_u64(__p0_670, __p1_670, __p2_670) __extension__ ({ \ - uint32x4_t __ret_670; \ - uint32x2_t __s0_670 = __p0_670; \ - uint64x2_t __s1_670 = __p1_670; \ - uint32x2_t __rev0_670; __rev0_670 = __builtin_shufflevector(__s0_670, __s0_670, 1, 0); \ - uint64x2_t __rev1_670; __rev1_670 = __builtin_shufflevector(__s1_670, __s1_670, 1, 0); \ - __ret_670 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_670), (uint32x2_t)(__noswap_vqrshrn_n_u64(__rev1_670, __p2_670)))); \ - __ret_670 = __builtin_shufflevector(__ret_670, __ret_670, 3, 2, 1, 0); \ - __ret_670; \ -}) +__ai __attribute__((target("neon"))) int32x4_t vqdmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vqdmlal_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vqrshrn_high_n_u16(__p0_671, __p1_671, __p2_671) __extension__ ({ \ - uint8x16_t __ret_671; \ - uint8x8_t __s0_671 = __p0_671; \ - uint16x8_t __s1_671 = __p1_671; \ - __ret_671 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_671), (uint8x8_t)(vqrshrn_n_u16(__s1_671, __p2_671)))); \ - __ret_671; \ +#define vqdmlal_high_lane_s32(__p0_628, __p1_628, __p2_628, __p3_628) __extension__ ({ \ + int64x2_t __ret_628; \ + int64x2_t __s0_628 = __p0_628; \ + int32x4_t __s1_628 = __p1_628; \ + int32x2_t __s2_628 = __p2_628; \ + __ret_628 = vqdmlal_s32(__s0_628, vget_high_s32(__s1_628), splat_lane_s32(__s2_628, __p3_628)); \ + __ret_628; \ }) #else -#define vqrshrn_high_n_u16(__p0_672, __p1_672, __p2_672) __extension__ ({ \ - uint8x16_t __ret_672; \ - uint8x8_t __s0_672 = __p0_672; \ - uint16x8_t __s1_672 = __p1_672; \ - uint8x8_t __rev0_672; __rev0_672 = __builtin_shufflevector(__s0_672, __s0_672, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev1_672; __rev1_672 = __builtin_shufflevector(__s1_672, __s1_672, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_672 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_672), (uint8x8_t)(__noswap_vqrshrn_n_u16(__rev1_672, __p2_672)))); \ - __ret_672 = __builtin_shufflevector(__ret_672, __ret_672, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_672; \ +#define vqdmlal_high_lane_s32(__p0_629, __p1_629, __p2_629, __p3_629) __extension__ ({ \ + int64x2_t __ret_629; \ + int64x2_t __s0_629 = __p0_629; \ + int32x4_t __s1_629 = __p1_629; \ + int32x2_t __s2_629 = __p2_629; \ + int64x2_t __rev0_629; __rev0_629 = __builtin_shufflevector(__s0_629, __s0_629, 1, 0); \ + int32x4_t __rev1_629; __rev1_629 = __builtin_shufflevector(__s1_629, __s1_629, 3, 2, 1, 0); \ + int32x2_t __rev2_629; __rev2_629 = __builtin_shufflevector(__s2_629, __s2_629, 1, 0); \ + __ret_629 = __noswap_vqdmlal_s32(__rev0_629, __noswap_vget_high_s32(__rev1_629), __noswap_splat_lane_s32(__rev2_629, __p3_629)); \ + __ret_629 = __builtin_shufflevector(__ret_629, __ret_629, 1, 0); \ + __ret_629; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrshrn_high_n_s32(__p0_673, __p1_673, __p2_673) __extension__ ({ \ - int16x8_t __ret_673; \ - int16x4_t __s0_673 = __p0_673; \ - int32x4_t __s1_673 = __p1_673; \ - __ret_673 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_673), (int16x4_t)(vqrshrn_n_s32(__s1_673, __p2_673)))); \ - __ret_673; \ +#define vqdmlal_high_lane_s16(__p0_630, __p1_630, __p2_630, __p3_630) __extension__ ({ \ + int32x4_t __ret_630; \ + int32x4_t __s0_630 = __p0_630; \ + int16x8_t __s1_630 = __p1_630; \ + int16x4_t __s2_630 = __p2_630; \ + __ret_630 = vqdmlal_s16(__s0_630, vget_high_s16(__s1_630), splat_lane_s16(__s2_630, __p3_630)); \ + __ret_630; \ }) #else -#define vqrshrn_high_n_s32(__p0_674, __p1_674, __p2_674) __extension__ ({ \ - int16x8_t __ret_674; \ - int16x4_t __s0_674 = __p0_674; \ - int32x4_t __s1_674 = __p1_674; \ - int16x4_t __rev0_674; __rev0_674 = __builtin_shufflevector(__s0_674, __s0_674, 3, 2, 1, 0); \ - int32x4_t __rev1_674; __rev1_674 = __builtin_shufflevector(__s1_674, __s1_674, 3, 2, 1, 0); \ - __ret_674 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_674), (int16x4_t)(__noswap_vqrshrn_n_s32(__rev1_674, __p2_674)))); \ - __ret_674 = __builtin_shufflevector(__ret_674, __ret_674, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_674; \ +#define vqdmlal_high_lane_s16(__p0_631, __p1_631, __p2_631, __p3_631) __extension__ ({ \ + int32x4_t __ret_631; \ + int32x4_t __s0_631 = __p0_631; \ + int16x8_t __s1_631 = __p1_631; \ + int16x4_t __s2_631 = __p2_631; \ + int32x4_t __rev0_631; __rev0_631 = __builtin_shufflevector(__s0_631, __s0_631, 3, 2, 1, 0); \ + int16x8_t __rev1_631; __rev1_631 = __builtin_shufflevector(__s1_631, __s1_631, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_631; __rev2_631 = __builtin_shufflevector(__s2_631, __s2_631, 3, 2, 1, 0); \ + __ret_631 = __noswap_vqdmlal_s16(__rev0_631, __noswap_vget_high_s16(__rev1_631), __noswap_splat_lane_s16(__rev2_631, __p3_631)); \ + __ret_631 = __builtin_shufflevector(__ret_631, __ret_631, 3, 2, 1, 0); \ + __ret_631; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrshrn_high_n_s64(__p0_675, __p1_675, __p2_675) __extension__ ({ \ - int32x4_t __ret_675; \ - int32x2_t __s0_675 = __p0_675; \ - int64x2_t __s1_675 = __p1_675; \ - __ret_675 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_675), (int32x2_t)(vqrshrn_n_s64(__s1_675, __p2_675)))); \ - __ret_675; \ +#define vqdmlal_high_laneq_s32(__p0_632, __p1_632, __p2_632, __p3_632) __extension__ ({ \ + int64x2_t __ret_632; \ + int64x2_t __s0_632 = __p0_632; \ + int32x4_t __s1_632 = __p1_632; \ + int32x4_t __s2_632 = __p2_632; \ + __ret_632 = vqdmlal_s32(__s0_632, vget_high_s32(__s1_632), splat_laneq_s32(__s2_632, __p3_632)); \ + __ret_632; \ }) #else -#define vqrshrn_high_n_s64(__p0_676, __p1_676, __p2_676) __extension__ ({ \ - int32x4_t __ret_676; \ - int32x2_t __s0_676 = __p0_676; \ - int64x2_t __s1_676 = __p1_676; \ - int32x2_t __rev0_676; __rev0_676 = __builtin_shufflevector(__s0_676, __s0_676, 1, 0); \ - int64x2_t __rev1_676; __rev1_676 = __builtin_shufflevector(__s1_676, __s1_676, 1, 0); \ - __ret_676 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_676), (int32x2_t)(__noswap_vqrshrn_n_s64(__rev1_676, __p2_676)))); \ - __ret_676 = __builtin_shufflevector(__ret_676, __ret_676, 3, 2, 1, 0); \ - __ret_676; \ +#define vqdmlal_high_laneq_s32(__p0_633, __p1_633, __p2_633, __p3_633) __extension__ ({ \ + int64x2_t __ret_633; \ + int64x2_t __s0_633 = __p0_633; \ + int32x4_t __s1_633 = __p1_633; \ + int32x4_t __s2_633 = __p2_633; \ + int64x2_t __rev0_633; __rev0_633 = __builtin_shufflevector(__s0_633, __s0_633, 1, 0); \ + int32x4_t __rev1_633; __rev1_633 = __builtin_shufflevector(__s1_633, __s1_633, 3, 2, 1, 0); \ + int32x4_t __rev2_633; __rev2_633 = __builtin_shufflevector(__s2_633, __s2_633, 3, 2, 1, 0); \ + __ret_633 = __noswap_vqdmlal_s32(__rev0_633, __noswap_vget_high_s32(__rev1_633), __noswap_splat_laneq_s32(__rev2_633, __p3_633)); \ + __ret_633 = __builtin_shufflevector(__ret_633, __ret_633, 1, 0); \ + __ret_633; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrshrn_high_n_s16(__p0_677, __p1_677, __p2_677) __extension__ ({ \ - int8x16_t __ret_677; \ - int8x8_t __s0_677 = __p0_677; \ - int16x8_t __s1_677 = __p1_677; \ - __ret_677 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_677), (int8x8_t)(vqrshrn_n_s16(__s1_677, __p2_677)))); \ - __ret_677; \ +#define vqdmlal_high_laneq_s16(__p0_634, __p1_634, __p2_634, __p3_634) __extension__ ({ \ + int32x4_t __ret_634; \ + int32x4_t __s0_634 = __p0_634; \ + int16x8_t __s1_634 = __p1_634; \ + int16x8_t __s2_634 = __p2_634; \ + __ret_634 = vqdmlal_s16(__s0_634, vget_high_s16(__s1_634), splat_laneq_s16(__s2_634, __p3_634)); \ + __ret_634; \ }) #else -#define vqrshrn_high_n_s16(__p0_678, __p1_678, __p2_678) __extension__ ({ \ - int8x16_t __ret_678; \ - int8x8_t __s0_678 = __p0_678; \ - int16x8_t __s1_678 = __p1_678; \ - int8x8_t __rev0_678; __rev0_678 = __builtin_shufflevector(__s0_678, __s0_678, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_678; __rev1_678 = __builtin_shufflevector(__s1_678, __s1_678, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_678 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_678), (int8x8_t)(__noswap_vqrshrn_n_s16(__rev1_678, __p2_678)))); \ - __ret_678 = __builtin_shufflevector(__ret_678, __ret_678, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_678; \ +#define vqdmlal_high_laneq_s16(__p0_635, __p1_635, __p2_635, __p3_635) __extension__ ({ \ + int32x4_t __ret_635; \ + int32x4_t __s0_635 = __p0_635; \ + int16x8_t __s1_635 = __p1_635; \ + int16x8_t __s2_635 = __p2_635; \ + int32x4_t __rev0_635; __rev0_635 = __builtin_shufflevector(__s0_635, __s0_635, 3, 2, 1, 0); \ + int16x8_t __rev1_635; __rev1_635 = __builtin_shufflevector(__s1_635, __s1_635, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_635; __rev2_635 = __builtin_shufflevector(__s2_635, __s2_635, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_635 = __noswap_vqdmlal_s16(__rev0_635, __noswap_vget_high_s16(__rev1_635), __noswap_splat_laneq_s16(__rev2_635, __p3_635)); \ + __ret_635 = __builtin_shufflevector(__ret_635, __ret_635, 3, 2, 1, 0); \ + __ret_635; \ }) #endif -#define vqrshrns_n_u32(__p0, __p1) __extension__ ({ \ - uint16_t __ret; \ - uint32_t __s0 = __p0; \ - __ret = (uint16_t) __builtin_neon_vqrshrns_n_u32(__s0, __p1); \ +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = vqdmlal_n_s32(__p0, vget_high_s32(__p1), __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vqdmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vqdmlal_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = vqdmlal_n_s16(__p0, vget_high_s16(__p1), __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vqdmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vqdmlal_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlals_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64_t __ret; \ + int64_t __s0 = __p0; \ + int32_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, __s2, __p3); \ __ret; \ }) -#define vqrshrnd_n_u64(__p0, __p1) __extension__ ({ \ - uint32_t __ret; \ - uint64_t __s0 = __p0; \ - __ret = (uint32_t) __builtin_neon_vqrshrnd_n_u64(__s0, __p1); \ +#else +#define vqdmlals_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64_t __ret; \ + int64_t __s0 = __p0; \ + int32_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + __ret = (int64_t) __builtin_neon_vqdmlals_lane_s32(__s0, __s1, __rev2, __p3); \ __ret; \ }) -#define vqrshrnh_n_u16(__p0, __p1) __extension__ ({ \ - uint8_t __ret; \ - uint16_t __s0 = __p0; \ - __ret = (uint8_t) __builtin_neon_vqrshrnh_n_u16(__s0, __p1); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlalh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32_t __ret; \ + int32_t __s0 = __p0; \ + int16_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, __s2, __p3); \ __ret; \ }) -#define vqrshrns_n_s32(__p0, __p1) __extension__ ({ \ - int16_t __ret; \ +#else +#define vqdmlalh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32_t __ret; \ int32_t __s0 = __p0; \ - __ret = (int16_t) __builtin_neon_vqrshrns_n_s32(__s0, __p1); \ + int16_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + __ret = (int32_t) __builtin_neon_vqdmlalh_lane_s16(__s0, __s1, __rev2, __p3); \ __ret; \ }) -#define vqrshrnd_n_s64(__p0, __p1) __extension__ ({ \ - int32_t __ret; \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlals_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64_t __ret; \ int64_t __s0 = __p0; \ - __ret = (int32_t) __builtin_neon_vqrshrnd_n_s64(__s0, __p1); \ + int32_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, __s2, __p3); \ __ret; \ }) -#define vqrshrnh_n_s16(__p0, __p1) __extension__ ({ \ - int8_t __ret; \ - int16_t __s0 = __p0; \ - __ret = (int8_t) __builtin_neon_vqrshrnh_n_s16(__s0, __p1); \ +#else +#define vqdmlals_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64_t __ret; \ + int64_t __s0 = __p0; \ + int32_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + __ret = (int64_t) __builtin_neon_vqdmlals_laneq_s32(__s0, __s1, __rev2, __p3); \ __ret; \ }) +#endif + #ifdef __LITTLE_ENDIAN__ -#define vqrshrun_high_n_s32(__p0_679, __p1_679, __p2_679) __extension__ ({ \ - int16x8_t __ret_679; \ - int16x4_t __s0_679 = __p0_679; \ - int32x4_t __s1_679 = __p1_679; \ - __ret_679 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_679), (int16x4_t)(vqrshrun_n_s32(__s1_679, __p2_679)))); \ - __ret_679; \ +#define vqdmlalh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32_t __ret; \ + int32_t __s0 = __p0; \ + int16_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, __s2, __p3); \ + __ret; \ }) #else -#define vqrshrun_high_n_s32(__p0_680, __p1_680, __p2_680) __extension__ ({ \ - int16x8_t __ret_680; \ - int16x4_t __s0_680 = __p0_680; \ - int32x4_t __s1_680 = __p1_680; \ - int16x4_t __rev0_680; __rev0_680 = __builtin_shufflevector(__s0_680, __s0_680, 3, 2, 1, 0); \ - int32x4_t __rev1_680; __rev1_680 = __builtin_shufflevector(__s1_680, __s1_680, 3, 2, 1, 0); \ - __ret_680 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_680), (int16x4_t)(__noswap_vqrshrun_n_s32(__rev1_680, __p2_680)))); \ - __ret_680 = __builtin_shufflevector(__ret_680, __ret_680, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_680; \ +#define vqdmlalh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32_t __ret; \ + int32_t __s0 = __p0; \ + int16_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int32_t) __builtin_neon_vqdmlalh_laneq_s16(__s0, __s1, __rev2, __p3); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrshrun_high_n_s64(__p0_681, __p1_681, __p2_681) __extension__ ({ \ - int32x4_t __ret_681; \ - int32x2_t __s0_681 = __p0_681; \ - int64x2_t __s1_681 = __p1_681; \ - __ret_681 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_681), (int32x2_t)(vqrshrun_n_s64(__s1_681, __p2_681)))); \ - __ret_681; \ +#define vqdmlal_laneq_s32(__p0_636, __p1_636, __p2_636, __p3_636) __extension__ ({ \ + int64x2_t __ret_636; \ + int64x2_t __s0_636 = __p0_636; \ + int32x2_t __s1_636 = __p1_636; \ + int32x4_t __s2_636 = __p2_636; \ + __ret_636 = vqdmlal_s32(__s0_636, __s1_636, splat_laneq_s32(__s2_636, __p3_636)); \ + __ret_636; \ }) #else -#define vqrshrun_high_n_s64(__p0_682, __p1_682, __p2_682) __extension__ ({ \ - int32x4_t __ret_682; \ - int32x2_t __s0_682 = __p0_682; \ - int64x2_t __s1_682 = __p1_682; \ - int32x2_t __rev0_682; __rev0_682 = __builtin_shufflevector(__s0_682, __s0_682, 1, 0); \ - int64x2_t __rev1_682; __rev1_682 = __builtin_shufflevector(__s1_682, __s1_682, 1, 0); \ - __ret_682 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_682), (int32x2_t)(__noswap_vqrshrun_n_s64(__rev1_682, __p2_682)))); \ - __ret_682 = __builtin_shufflevector(__ret_682, __ret_682, 3, 2, 1, 0); \ - __ret_682; \ +#define vqdmlal_laneq_s32(__p0_637, __p1_637, __p2_637, __p3_637) __extension__ ({ \ + int64x2_t __ret_637; \ + int64x2_t __s0_637 = __p0_637; \ + int32x2_t __s1_637 = __p1_637; \ + int32x4_t __s2_637 = __p2_637; \ + int64x2_t __rev0_637; __rev0_637 = __builtin_shufflevector(__s0_637, __s0_637, 1, 0); \ + int32x2_t __rev1_637; __rev1_637 = __builtin_shufflevector(__s1_637, __s1_637, 1, 0); \ + int32x4_t __rev2_637; __rev2_637 = __builtin_shufflevector(__s2_637, __s2_637, 3, 2, 1, 0); \ + __ret_637 = __noswap_vqdmlal_s32(__rev0_637, __rev1_637, __noswap_splat_laneq_s32(__rev2_637, __p3_637)); \ + __ret_637 = __builtin_shufflevector(__ret_637, __ret_637, 1, 0); \ + __ret_637; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrshrun_high_n_s16(__p0_683, __p1_683, __p2_683) __extension__ ({ \ - int8x16_t __ret_683; \ - int8x8_t __s0_683 = __p0_683; \ - int16x8_t __s1_683 = __p1_683; \ - __ret_683 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_683), (int8x8_t)(vqrshrun_n_s16(__s1_683, __p2_683)))); \ - __ret_683; \ +#define vqdmlal_laneq_s16(__p0_638, __p1_638, __p2_638, __p3_638) __extension__ ({ \ + int32x4_t __ret_638; \ + int32x4_t __s0_638 = __p0_638; \ + int16x4_t __s1_638 = __p1_638; \ + int16x8_t __s2_638 = __p2_638; \ + __ret_638 = vqdmlal_s16(__s0_638, __s1_638, splat_laneq_s16(__s2_638, __p3_638)); \ + __ret_638; \ }) #else -#define vqrshrun_high_n_s16(__p0_684, __p1_684, __p2_684) __extension__ ({ \ - int8x16_t __ret_684; \ - int8x8_t __s0_684 = __p0_684; \ - int16x8_t __s1_684 = __p1_684; \ - int8x8_t __rev0_684; __rev0_684 = __builtin_shufflevector(__s0_684, __s0_684, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_684; __rev1_684 = __builtin_shufflevector(__s1_684, __s1_684, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_684 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_684), (int8x8_t)(__noswap_vqrshrun_n_s16(__rev1_684, __p2_684)))); \ - __ret_684 = __builtin_shufflevector(__ret_684, __ret_684, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_684; \ +#define vqdmlal_laneq_s16(__p0_639, __p1_639, __p2_639, __p3_639) __extension__ ({ \ + int32x4_t __ret_639; \ + int32x4_t __s0_639 = __p0_639; \ + int16x4_t __s1_639 = __p1_639; \ + int16x8_t __s2_639 = __p2_639; \ + int32x4_t __rev0_639; __rev0_639 = __builtin_shufflevector(__s0_639, __s0_639, 3, 2, 1, 0); \ + int16x4_t __rev1_639; __rev1_639 = __builtin_shufflevector(__s1_639, __s1_639, 3, 2, 1, 0); \ + int16x8_t __rev2_639; __rev2_639 = __builtin_shufflevector(__s2_639, __s2_639, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_639 = __noswap_vqdmlal_s16(__rev0_639, __rev1_639, __noswap_splat_laneq_s16(__rev2_639, __p3_639)); \ + __ret_639 = __builtin_shufflevector(__ret_639, __ret_639, 3, 2, 1, 0); \ + __ret_639; \ }) #endif -#define vqrshruns_n_s32(__p0, __p1) __extension__ ({ \ - int16_t __ret; \ - int32_t __s0 = __p0; \ - __ret = (int16_t) __builtin_neon_vqrshruns_n_s32(__s0, __p1); \ - __ret; \ -}) -#define vqrshrund_n_s64(__p0, __p1) __extension__ ({ \ - int32_t __ret; \ - int64_t __s0 = __p0; \ - __ret = (int32_t) __builtin_neon_vqrshrund_n_s64(__s0, __p1); \ - __ret; \ -}) -#define vqrshrunh_n_s16(__p0, __p1) __extension__ ({ \ - int8_t __ret; \ - int16_t __s0 = __p0; \ - __ret = (int8_t) __builtin_neon_vqrshrunh_n_s16(__s0, __p1); \ - __ret; \ -}) -__ai uint8_t vqshlb_u8(uint8_t __p0, int8_t __p1) { - uint8_t __ret; - __ret = (uint8_t) __builtin_neon_vqshlb_u8(__p0, __p1); - return __ret; -} -__ai uint32_t vqshls_u32(uint32_t __p0, int32_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vqshls_u32(__p0, __p1); - return __ret; -} -__ai uint64_t vqshld_u64(uint64_t __p0, int64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vqshld_u64(__p0, __p1); +__ai __attribute__((target("neon"))) int64_t vqdmlsls_s32(int64_t __p0, int32_t __p1, int32_t __p2) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqdmlsls_s32(__p0, __p1, __p2); return __ret; } -__ai uint16_t vqshlh_u16(uint16_t __p0, int16_t __p1) { - uint16_t __ret; - __ret = (uint16_t) __builtin_neon_vqshlh_u16(__p0, __p1); +__ai __attribute__((target("neon"))) int32_t vqdmlslh_s16(int32_t __p0, int16_t __p1, int16_t __p2) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqdmlslh_s16(__p0, __p1, __p2); return __ret; } -__ai int8_t vqshlb_s8(int8_t __p0, int8_t __p1) { - int8_t __ret; - __ret = (int8_t) __builtin_neon_vqshlb_s8(__p0, __p1); +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { + int64x2_t __ret; + __ret = vqdmlsl_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2)); return __ret; } -__ai int32_t vqshls_s32(int32_t __p0, int32_t __p1) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vqshls_s32(__p0, __p1); +#else +__ai __attribute__((target("neon"))) int64x2_t vqdmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vqdmlsl_s32(__rev0, __noswap_vget_high_s32(__rev1), __noswap_vget_high_s32(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai int64_t vqshld_s64(int64_t __p0, int64_t __p1) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vqshld_s64(__p0, __p1); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { + int32x4_t __ret; + __ret = vqdmlsl_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2)); return __ret; } -__ai int16_t vqshlh_s16(int16_t __p0, int16_t __p1) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vqshlh_s16(__p0, __p1); +#else +__ai __attribute__((target("neon"))) int32x4_t vqdmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vqdmlsl_s16(__rev0, __noswap_vget_high_s16(__rev1), __noswap_vget_high_s16(__rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -#define vqshlb_n_u8(__p0, __p1) __extension__ ({ \ - uint8_t __ret; \ - uint8_t __s0 = __p0; \ - __ret = (uint8_t) __builtin_neon_vqshlb_n_u8(__s0, __p1); \ - __ret; \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlsl_high_lane_s32(__p0_640, __p1_640, __p2_640, __p3_640) __extension__ ({ \ + int64x2_t __ret_640; \ + int64x2_t __s0_640 = __p0_640; \ + int32x4_t __s1_640 = __p1_640; \ + int32x2_t __s2_640 = __p2_640; \ + __ret_640 = vqdmlsl_s32(__s0_640, vget_high_s32(__s1_640), splat_lane_s32(__s2_640, __p3_640)); \ + __ret_640; \ }) -#define vqshls_n_u32(__p0, __p1) __extension__ ({ \ - uint32_t __ret; \ - uint32_t __s0 = __p0; \ - __ret = (uint32_t) __builtin_neon_vqshls_n_u32(__s0, __p1); \ - __ret; \ +#else +#define vqdmlsl_high_lane_s32(__p0_641, __p1_641, __p2_641, __p3_641) __extension__ ({ \ + int64x2_t __ret_641; \ + int64x2_t __s0_641 = __p0_641; \ + int32x4_t __s1_641 = __p1_641; \ + int32x2_t __s2_641 = __p2_641; \ + int64x2_t __rev0_641; __rev0_641 = __builtin_shufflevector(__s0_641, __s0_641, 1, 0); \ + int32x4_t __rev1_641; __rev1_641 = __builtin_shufflevector(__s1_641, __s1_641, 3, 2, 1, 0); \ + int32x2_t __rev2_641; __rev2_641 = __builtin_shufflevector(__s2_641, __s2_641, 1, 0); \ + __ret_641 = __noswap_vqdmlsl_s32(__rev0_641, __noswap_vget_high_s32(__rev1_641), __noswap_splat_lane_s32(__rev2_641, __p3_641)); \ + __ret_641 = __builtin_shufflevector(__ret_641, __ret_641, 1, 0); \ + __ret_641; \ }) -#define vqshld_n_u64(__p0, __p1) __extension__ ({ \ - uint64_t __ret; \ - uint64_t __s0 = __p0; \ - __ret = (uint64_t) __builtin_neon_vqshld_n_u64(__s0, __p1); \ - __ret; \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlsl_high_lane_s16(__p0_642, __p1_642, __p2_642, __p3_642) __extension__ ({ \ + int32x4_t __ret_642; \ + int32x4_t __s0_642 = __p0_642; \ + int16x8_t __s1_642 = __p1_642; \ + int16x4_t __s2_642 = __p2_642; \ + __ret_642 = vqdmlsl_s16(__s0_642, vget_high_s16(__s1_642), splat_lane_s16(__s2_642, __p3_642)); \ + __ret_642; \ }) -#define vqshlh_n_u16(__p0, __p1) __extension__ ({ \ - uint16_t __ret; \ - uint16_t __s0 = __p0; \ - __ret = (uint16_t) __builtin_neon_vqshlh_n_u16(__s0, __p1); \ - __ret; \ +#else +#define vqdmlsl_high_lane_s16(__p0_643, __p1_643, __p2_643, __p3_643) __extension__ ({ \ + int32x4_t __ret_643; \ + int32x4_t __s0_643 = __p0_643; \ + int16x8_t __s1_643 = __p1_643; \ + int16x4_t __s2_643 = __p2_643; \ + int32x4_t __rev0_643; __rev0_643 = __builtin_shufflevector(__s0_643, __s0_643, 3, 2, 1, 0); \ + int16x8_t __rev1_643; __rev1_643 = __builtin_shufflevector(__s1_643, __s1_643, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev2_643; __rev2_643 = __builtin_shufflevector(__s2_643, __s2_643, 3, 2, 1, 0); \ + __ret_643 = __noswap_vqdmlsl_s16(__rev0_643, __noswap_vget_high_s16(__rev1_643), __noswap_splat_lane_s16(__rev2_643, __p3_643)); \ + __ret_643 = __builtin_shufflevector(__ret_643, __ret_643, 3, 2, 1, 0); \ + __ret_643; \ }) -#define vqshlb_n_s8(__p0, __p1) __extension__ ({ \ - int8_t __ret; \ - int8_t __s0 = __p0; \ - __ret = (int8_t) __builtin_neon_vqshlb_n_s8(__s0, __p1); \ - __ret; \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlsl_high_laneq_s32(__p0_644, __p1_644, __p2_644, __p3_644) __extension__ ({ \ + int64x2_t __ret_644; \ + int64x2_t __s0_644 = __p0_644; \ + int32x4_t __s1_644 = __p1_644; \ + int32x4_t __s2_644 = __p2_644; \ + __ret_644 = vqdmlsl_s32(__s0_644, vget_high_s32(__s1_644), splat_laneq_s32(__s2_644, __p3_644)); \ + __ret_644; \ }) -#define vqshls_n_s32(__p0, __p1) __extension__ ({ \ - int32_t __ret; \ - int32_t __s0 = __p0; \ - __ret = (int32_t) __builtin_neon_vqshls_n_s32(__s0, __p1); \ - __ret; \ +#else +#define vqdmlsl_high_laneq_s32(__p0_645, __p1_645, __p2_645, __p3_645) __extension__ ({ \ + int64x2_t __ret_645; \ + int64x2_t __s0_645 = __p0_645; \ + int32x4_t __s1_645 = __p1_645; \ + int32x4_t __s2_645 = __p2_645; \ + int64x2_t __rev0_645; __rev0_645 = __builtin_shufflevector(__s0_645, __s0_645, 1, 0); \ + int32x4_t __rev1_645; __rev1_645 = __builtin_shufflevector(__s1_645, __s1_645, 3, 2, 1, 0); \ + int32x4_t __rev2_645; __rev2_645 = __builtin_shufflevector(__s2_645, __s2_645, 3, 2, 1, 0); \ + __ret_645 = __noswap_vqdmlsl_s32(__rev0_645, __noswap_vget_high_s32(__rev1_645), __noswap_splat_laneq_s32(__rev2_645, __p3_645)); \ + __ret_645 = __builtin_shufflevector(__ret_645, __ret_645, 1, 0); \ + __ret_645; \ }) -#define vqshld_n_s64(__p0, __p1) __extension__ ({ \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlsl_high_laneq_s16(__p0_646, __p1_646, __p2_646, __p3_646) __extension__ ({ \ + int32x4_t __ret_646; \ + int32x4_t __s0_646 = __p0_646; \ + int16x8_t __s1_646 = __p1_646; \ + int16x8_t __s2_646 = __p2_646; \ + __ret_646 = vqdmlsl_s16(__s0_646, vget_high_s16(__s1_646), splat_laneq_s16(__s2_646, __p3_646)); \ + __ret_646; \ +}) +#else +#define vqdmlsl_high_laneq_s16(__p0_647, __p1_647, __p2_647, __p3_647) __extension__ ({ \ + int32x4_t __ret_647; \ + int32x4_t __s0_647 = __p0_647; \ + int16x8_t __s1_647 = __p1_647; \ + int16x8_t __s2_647 = __p2_647; \ + int32x4_t __rev0_647; __rev0_647 = __builtin_shufflevector(__s0_647, __s0_647, 3, 2, 1, 0); \ + int16x8_t __rev1_647; __rev1_647 = __builtin_shufflevector(__s1_647, __s1_647, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_647; __rev2_647 = __builtin_shufflevector(__s2_647, __s2_647, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_647 = __noswap_vqdmlsl_s16(__rev0_647, __noswap_vget_high_s16(__rev1_647), __noswap_splat_laneq_s16(__rev2_647, __p3_647)); \ + __ret_647 = __builtin_shufflevector(__ret_647, __ret_647, 3, 2, 1, 0); \ + __ret_647; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { + int64x2_t __ret; + __ret = vqdmlsl_n_s32(__p0, vget_high_s32(__p1), __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vqdmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vqdmlsl_n_s32(__rev0, __noswap_vget_high_s32(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { + int32x4_t __ret; + __ret = vqdmlsl_n_s16(__p0, vget_high_s16(__p1), __p2); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vqdmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vqdmlsl_n_s16(__rev0, __noswap_vget_high_s16(__rev1), __p2); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlsls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ int64_t __ret; \ int64_t __s0 = __p0; \ - __ret = (int64_t) __builtin_neon_vqshld_n_s64(__s0, __p1); \ + int32_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, __s2, __p3); \ __ret; \ }) -#define vqshlh_n_s16(__p0, __p1) __extension__ ({ \ - int16_t __ret; \ - int16_t __s0 = __p0; \ - __ret = (int16_t) __builtin_neon_vqshlh_n_s16(__s0, __p1); \ +#else +#define vqdmlsls_lane_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64_t __ret; \ + int64_t __s0 = __p0; \ + int32_t __s1 = __p1; \ + int32x2_t __s2 = __p2; \ + int32x2_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 1, 0); \ + __ret = (int64_t) __builtin_neon_vqdmlsls_lane_s32(__s0, __s1, __rev2, __p3); \ __ret; \ }) -#define vqshlub_n_s8(__p0, __p1) __extension__ ({ \ - int8_t __ret; \ - int8_t __s0 = __p0; \ - __ret = (int8_t) __builtin_neon_vqshlub_n_s8(__s0, __p1); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlslh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32_t __ret; \ + int32_t __s0 = __p0; \ + int16_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, __s2, __p3); \ __ret; \ }) -#define vqshlus_n_s32(__p0, __p1) __extension__ ({ \ +#else +#define vqdmlslh_lane_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ int32_t __ret; \ int32_t __s0 = __p0; \ - __ret = (int32_t) __builtin_neon_vqshlus_n_s32(__s0, __p1); \ + int16_t __s1 = __p1; \ + int16x4_t __s2 = __p2; \ + int16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + __ret = (int32_t) __builtin_neon_vqdmlslh_lane_s16(__s0, __s1, __rev2, __p3); \ __ret; \ }) -#define vqshlud_n_s64(__p0, __p1) __extension__ ({ \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmlsls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ int64_t __ret; \ int64_t __s0 = __p0; \ - __ret = (int64_t) __builtin_neon_vqshlud_n_s64(__s0, __p1); \ + int32_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, __s2, __p3); \ __ret; \ }) -#define vqshluh_n_s16(__p0, __p1) __extension__ ({ \ - int16_t __ret; \ - int16_t __s0 = __p0; \ - __ret = (int16_t) __builtin_neon_vqshluh_n_s16(__s0, __p1); \ +#else +#define vqdmlsls_laneq_s32(__p0, __p1, __p2, __p3) __extension__ ({ \ + int64_t __ret; \ + int64_t __s0 = __p0; \ + int32_t __s1 = __p1; \ + int32x4_t __s2 = __p2; \ + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ + __ret = (int64_t) __builtin_neon_vqdmlsls_laneq_s32(__s0, __s1, __rev2, __p3); \ __ret; \ }) +#endif + #ifdef __LITTLE_ENDIAN__ -#define vqshrn_high_n_u32(__p0_685, __p1_685, __p2_685) __extension__ ({ \ - uint16x8_t __ret_685; \ - uint16x4_t __s0_685 = __p0_685; \ - uint32x4_t __s1_685 = __p1_685; \ - __ret_685 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_685), (uint16x4_t)(vqshrn_n_u32(__s1_685, __p2_685)))); \ - __ret_685; \ +#define vqdmlslh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32_t __ret; \ + int32_t __s0 = __p0; \ + int16_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, __s2, __p3); \ + __ret; \ }) #else -#define vqshrn_high_n_u32(__p0_686, __p1_686, __p2_686) __extension__ ({ \ - uint16x8_t __ret_686; \ - uint16x4_t __s0_686 = __p0_686; \ - uint32x4_t __s1_686 = __p1_686; \ - uint16x4_t __rev0_686; __rev0_686 = __builtin_shufflevector(__s0_686, __s0_686, 3, 2, 1, 0); \ - uint32x4_t __rev1_686; __rev1_686 = __builtin_shufflevector(__s1_686, __s1_686, 3, 2, 1, 0); \ - __ret_686 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_686), (uint16x4_t)(__noswap_vqshrn_n_u32(__rev1_686, __p2_686)))); \ - __ret_686 = __builtin_shufflevector(__ret_686, __ret_686, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_686; \ +#define vqdmlslh_laneq_s16(__p0, __p1, __p2, __p3) __extension__ ({ \ + int32_t __ret; \ + int32_t __s0 = __p0; \ + int16_t __s1 = __p1; \ + int16x8_t __s2 = __p2; \ + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int32_t) __builtin_neon_vqdmlslh_laneq_s16(__s0, __s1, __rev2, __p3); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqshrn_high_n_u64(__p0_687, __p1_687, __p2_687) __extension__ ({ \ - uint32x4_t __ret_687; \ - uint32x2_t __s0_687 = __p0_687; \ - uint64x2_t __s1_687 = __p1_687; \ - __ret_687 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_687), (uint32x2_t)(vqshrn_n_u64(__s1_687, __p2_687)))); \ - __ret_687; \ +#define vqdmlsl_laneq_s32(__p0_648, __p1_648, __p2_648, __p3_648) __extension__ ({ \ + int64x2_t __ret_648; \ + int64x2_t __s0_648 = __p0_648; \ + int32x2_t __s1_648 = __p1_648; \ + int32x4_t __s2_648 = __p2_648; \ + __ret_648 = vqdmlsl_s32(__s0_648, __s1_648, splat_laneq_s32(__s2_648, __p3_648)); \ + __ret_648; \ }) #else -#define vqshrn_high_n_u64(__p0_688, __p1_688, __p2_688) __extension__ ({ \ - uint32x4_t __ret_688; \ - uint32x2_t __s0_688 = __p0_688; \ - uint64x2_t __s1_688 = __p1_688; \ - uint32x2_t __rev0_688; __rev0_688 = __builtin_shufflevector(__s0_688, __s0_688, 1, 0); \ - uint64x2_t __rev1_688; __rev1_688 = __builtin_shufflevector(__s1_688, __s1_688, 1, 0); \ - __ret_688 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_688), (uint32x2_t)(__noswap_vqshrn_n_u64(__rev1_688, __p2_688)))); \ - __ret_688 = __builtin_shufflevector(__ret_688, __ret_688, 3, 2, 1, 0); \ - __ret_688; \ +#define vqdmlsl_laneq_s32(__p0_649, __p1_649, __p2_649, __p3_649) __extension__ ({ \ + int64x2_t __ret_649; \ + int64x2_t __s0_649 = __p0_649; \ + int32x2_t __s1_649 = __p1_649; \ + int32x4_t __s2_649 = __p2_649; \ + int64x2_t __rev0_649; __rev0_649 = __builtin_shufflevector(__s0_649, __s0_649, 1, 0); \ + int32x2_t __rev1_649; __rev1_649 = __builtin_shufflevector(__s1_649, __s1_649, 1, 0); \ + int32x4_t __rev2_649; __rev2_649 = __builtin_shufflevector(__s2_649, __s2_649, 3, 2, 1, 0); \ + __ret_649 = __noswap_vqdmlsl_s32(__rev0_649, __rev1_649, __noswap_splat_laneq_s32(__rev2_649, __p3_649)); \ + __ret_649 = __builtin_shufflevector(__ret_649, __ret_649, 1, 0); \ + __ret_649; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqshrn_high_n_u16(__p0_689, __p1_689, __p2_689) __extension__ ({ \ - uint8x16_t __ret_689; \ - uint8x8_t __s0_689 = __p0_689; \ - uint16x8_t __s1_689 = __p1_689; \ - __ret_689 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_689), (uint8x8_t)(vqshrn_n_u16(__s1_689, __p2_689)))); \ - __ret_689; \ +#define vqdmlsl_laneq_s16(__p0_650, __p1_650, __p2_650, __p3_650) __extension__ ({ \ + int32x4_t __ret_650; \ + int32x4_t __s0_650 = __p0_650; \ + int16x4_t __s1_650 = __p1_650; \ + int16x8_t __s2_650 = __p2_650; \ + __ret_650 = vqdmlsl_s16(__s0_650, __s1_650, splat_laneq_s16(__s2_650, __p3_650)); \ + __ret_650; \ }) #else -#define vqshrn_high_n_u16(__p0_690, __p1_690, __p2_690) __extension__ ({ \ - uint8x16_t __ret_690; \ - uint8x8_t __s0_690 = __p0_690; \ - uint16x8_t __s1_690 = __p1_690; \ - uint8x8_t __rev0_690; __rev0_690 = __builtin_shufflevector(__s0_690, __s0_690, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev1_690; __rev1_690 = __builtin_shufflevector(__s1_690, __s1_690, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_690 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_690), (uint8x8_t)(__noswap_vqshrn_n_u16(__rev1_690, __p2_690)))); \ - __ret_690 = __builtin_shufflevector(__ret_690, __ret_690, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_690; \ +#define vqdmlsl_laneq_s16(__p0_651, __p1_651, __p2_651, __p3_651) __extension__ ({ \ + int32x4_t __ret_651; \ + int32x4_t __s0_651 = __p0_651; \ + int16x4_t __s1_651 = __p1_651; \ + int16x8_t __s2_651 = __p2_651; \ + int32x4_t __rev0_651; __rev0_651 = __builtin_shufflevector(__s0_651, __s0_651, 3, 2, 1, 0); \ + int16x4_t __rev1_651; __rev1_651 = __builtin_shufflevector(__s1_651, __s1_651, 3, 2, 1, 0); \ + int16x8_t __rev2_651; __rev2_651 = __builtin_shufflevector(__s2_651, __s2_651, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_651 = __noswap_vqdmlsl_s16(__rev0_651, __rev1_651, __noswap_splat_laneq_s16(__rev2_651, __p3_651)); \ + __ret_651 = __builtin_shufflevector(__ret_651, __ret_651, 3, 2, 1, 0); \ + __ret_651; \ }) #endif +__ai __attribute__((target("neon"))) int32_t vqdmulhs_s32(int32_t __p0, int32_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqdmulhs_s32(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) int16_t vqdmulhh_s16(int16_t __p0, int16_t __p1) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqdmulhh_s16(__p0, __p1); + return __ret; +} #ifdef __LITTLE_ENDIAN__ -#define vqshrn_high_n_s32(__p0_691, __p1_691, __p2_691) __extension__ ({ \ - int16x8_t __ret_691; \ - int16x4_t __s0_691 = __p0_691; \ - int32x4_t __s1_691 = __p1_691; \ - __ret_691 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_691), (int16x4_t)(vqshrn_n_s32(__s1_691, __p2_691)))); \ - __ret_691; \ +#define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + __ret = (int32x4_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 2); \ + __ret; \ }) #else -#define vqshrn_high_n_s32(__p0_692, __p1_692, __p2_692) __extension__ ({ \ - int16x8_t __ret_692; \ - int16x4_t __s0_692 = __p0_692; \ - int32x4_t __s1_692 = __p1_692; \ - int16x4_t __rev0_692; __rev0_692 = __builtin_shufflevector(__s0_692, __s0_692, 3, 2, 1, 0); \ - int32x4_t __rev1_692; __rev1_692 = __builtin_shufflevector(__s1_692, __s1_692, 3, 2, 1, 0); \ - __ret_692 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_692), (int16x4_t)(__noswap_vqshrn_n_s32(__rev1_692, __p2_692)))); \ - __ret_692 = __builtin_shufflevector(__ret_692, __ret_692, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_692; \ +#define vqdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqshrn_high_n_s64(__p0_693, __p1_693, __p2_693) __extension__ ({ \ - int32x4_t __ret_693; \ - int32x2_t __s0_693 = __p0_693; \ - int64x2_t __s1_693 = __p1_693; \ - __ret_693 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_693), (int32x2_t)(vqshrn_n_s64(__s1_693, __p2_693)))); \ - __ret_693; \ +#define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + __ret = (int16x8_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 1); \ + __ret; \ }) #else -#define vqshrn_high_n_s64(__p0_694, __p1_694, __p2_694) __extension__ ({ \ - int32x4_t __ret_694; \ - int32x2_t __s0_694 = __p0_694; \ - int64x2_t __s1_694 = __p1_694; \ - int32x2_t __rev0_694; __rev0_694 = __builtin_shufflevector(__s0_694, __s0_694, 1, 0); \ - int64x2_t __rev1_694; __rev1_694 = __builtin_shufflevector(__s1_694, __s1_694, 1, 0); \ - __ret_694 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_694), (int32x2_t)(__noswap_vqshrn_n_s64(__rev1_694, __p2_694)))); \ - __ret_694 = __builtin_shufflevector(__ret_694, __ret_694, 3, 2, 1, 0); \ - __ret_694; \ +#define vqdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vqdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqshrn_high_n_s16(__p0_695, __p1_695, __p2_695) __extension__ ({ \ - int8x16_t __ret_695; \ - int8x8_t __s0_695 = __p0_695; \ - int16x8_t __s1_695 = __p1_695; \ - __ret_695 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_695), (int8x8_t)(vqshrn_n_s16(__s1_695, __p2_695)))); \ - __ret_695; \ +#define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + __ret = (int32x2_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ + __ret; \ }) #else -#define vqshrn_high_n_s16(__p0_696, __p1_696, __p2_696) __extension__ ({ \ - int8x16_t __ret_696; \ - int8x8_t __s0_696 = __p0_696; \ - int16x8_t __s1_696 = __p1_696; \ - int8x8_t __rev0_696; __rev0_696 = __builtin_shufflevector(__s0_696, __s0_696, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_696; __rev1_696 = __builtin_shufflevector(__s1_696, __s1_696, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_696 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_696), (int8x8_t)(__noswap_vqshrn_n_s16(__rev1_696, __p2_696)))); \ - __ret_696 = __builtin_shufflevector(__ret_696, __ret_696, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_696; \ +#define vqdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ }) #endif -#define vqshrns_n_u32(__p0, __p1) __extension__ ({ \ - uint16_t __ret; \ - uint32_t __s0 = __p0; \ - __ret = (uint16_t) __builtin_neon_vqshrns_n_u32(__s0, __p1); \ +#ifdef __LITTLE_ENDIAN__ +#define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + __ret = (int16x4_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ __ret; \ }) -#define vqshrnd_n_u64(__p0, __p1) __extension__ ({ \ - uint32_t __ret; \ - uint64_t __s0 = __p0; \ - __ret = (uint32_t) __builtin_neon_vqshrnd_n_u64(__s0, __p1); \ +#else +#define vqdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vqdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ __ret; \ }) -#define vqshrnh_n_u16(__p0, __p1) __extension__ ({ \ - uint8_t __ret; \ - uint16_t __s0 = __p0; \ - __ret = (uint8_t) __builtin_neon_vqshrnh_n_u16(__s0, __p1); \ - __ret; \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulhs_lane_s32(__p0_652, __p1_652, __p2_652) __extension__ ({ \ + int32_t __ret_652; \ + int32_t __s0_652 = __p0_652; \ + int32x2_t __s1_652 = __p1_652; \ + __ret_652 = vqdmulhs_s32(__s0_652, vget_lane_s32(__s1_652, __p2_652)); \ + __ret_652; \ }) -#define vqshrns_n_s32(__p0, __p1) __extension__ ({ \ - int16_t __ret; \ - int32_t __s0 = __p0; \ - __ret = (int16_t) __builtin_neon_vqshrns_n_s32(__s0, __p1); \ - __ret; \ +#else +#define vqdmulhs_lane_s32(__p0_653, __p1_653, __p2_653) __extension__ ({ \ + int32_t __ret_653; \ + int32_t __s0_653 = __p0_653; \ + int32x2_t __s1_653 = __p1_653; \ + int32x2_t __rev1_653; __rev1_653 = __builtin_shufflevector(__s1_653, __s1_653, 1, 0); \ + __ret_653 = vqdmulhs_s32(__s0_653, __noswap_vget_lane_s32(__rev1_653, __p2_653)); \ + __ret_653; \ }) -#define vqshrnd_n_s64(__p0, __p1) __extension__ ({ \ - int32_t __ret; \ - int64_t __s0 = __p0; \ - __ret = (int32_t) __builtin_neon_vqshrnd_n_s64(__s0, __p1); \ - __ret; \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulhh_lane_s16(__p0_654, __p1_654, __p2_654) __extension__ ({ \ + int16_t __ret_654; \ + int16_t __s0_654 = __p0_654; \ + int16x4_t __s1_654 = __p1_654; \ + __ret_654 = vqdmulhh_s16(__s0_654, vget_lane_s16(__s1_654, __p2_654)); \ + __ret_654; \ }) -#define vqshrnh_n_s16(__p0, __p1) __extension__ ({ \ - int8_t __ret; \ - int16_t __s0 = __p0; \ - __ret = (int8_t) __builtin_neon_vqshrnh_n_s16(__s0, __p1); \ - __ret; \ +#else +#define vqdmulhh_lane_s16(__p0_655, __p1_655, __p2_655) __extension__ ({ \ + int16_t __ret_655; \ + int16_t __s0_655 = __p0_655; \ + int16x4_t __s1_655 = __p1_655; \ + int16x4_t __rev1_655; __rev1_655 = __builtin_shufflevector(__s1_655, __s1_655, 3, 2, 1, 0); \ + __ret_655 = vqdmulhh_s16(__s0_655, __noswap_vget_lane_s16(__rev1_655, __p2_655)); \ + __ret_655; \ }) +#endif + #ifdef __LITTLE_ENDIAN__ -#define vqshrun_high_n_s32(__p0_697, __p1_697, __p2_697) __extension__ ({ \ - int16x8_t __ret_697; \ - int16x4_t __s0_697 = __p0_697; \ - int32x4_t __s1_697 = __p1_697; \ - __ret_697 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_697), (int16x4_t)(vqshrun_n_s32(__s1_697, __p2_697)))); \ - __ret_697; \ +#define vqdmulhs_laneq_s32(__p0_656, __p1_656, __p2_656) __extension__ ({ \ + int32_t __ret_656; \ + int32_t __s0_656 = __p0_656; \ + int32x4_t __s1_656 = __p1_656; \ + __ret_656 = vqdmulhs_s32(__s0_656, vgetq_lane_s32(__s1_656, __p2_656)); \ + __ret_656; \ }) #else -#define vqshrun_high_n_s32(__p0_698, __p1_698, __p2_698) __extension__ ({ \ - int16x8_t __ret_698; \ - int16x4_t __s0_698 = __p0_698; \ - int32x4_t __s1_698 = __p1_698; \ - int16x4_t __rev0_698; __rev0_698 = __builtin_shufflevector(__s0_698, __s0_698, 3, 2, 1, 0); \ - int32x4_t __rev1_698; __rev1_698 = __builtin_shufflevector(__s1_698, __s1_698, 3, 2, 1, 0); \ - __ret_698 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_698), (int16x4_t)(__noswap_vqshrun_n_s32(__rev1_698, __p2_698)))); \ - __ret_698 = __builtin_shufflevector(__ret_698, __ret_698, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_698; \ +#define vqdmulhs_laneq_s32(__p0_657, __p1_657, __p2_657) __extension__ ({ \ + int32_t __ret_657; \ + int32_t __s0_657 = __p0_657; \ + int32x4_t __s1_657 = __p1_657; \ + int32x4_t __rev1_657; __rev1_657 = __builtin_shufflevector(__s1_657, __s1_657, 3, 2, 1, 0); \ + __ret_657 = vqdmulhs_s32(__s0_657, __noswap_vgetq_lane_s32(__rev1_657, __p2_657)); \ + __ret_657; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqshrun_high_n_s64(__p0_699, __p1_699, __p2_699) __extension__ ({ \ - int32x4_t __ret_699; \ - int32x2_t __s0_699 = __p0_699; \ - int64x2_t __s1_699 = __p1_699; \ - __ret_699 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_699), (int32x2_t)(vqshrun_n_s64(__s1_699, __p2_699)))); \ - __ret_699; \ +#define vqdmulhh_laneq_s16(__p0_658, __p1_658, __p2_658) __extension__ ({ \ + int16_t __ret_658; \ + int16_t __s0_658 = __p0_658; \ + int16x8_t __s1_658 = __p1_658; \ + __ret_658 = vqdmulhh_s16(__s0_658, vgetq_lane_s16(__s1_658, __p2_658)); \ + __ret_658; \ }) #else -#define vqshrun_high_n_s64(__p0_700, __p1_700, __p2_700) __extension__ ({ \ - int32x4_t __ret_700; \ - int32x2_t __s0_700 = __p0_700; \ - int64x2_t __s1_700 = __p1_700; \ - int32x2_t __rev0_700; __rev0_700 = __builtin_shufflevector(__s0_700, __s0_700, 1, 0); \ - int64x2_t __rev1_700; __rev1_700 = __builtin_shufflevector(__s1_700, __s1_700, 1, 0); \ - __ret_700 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_700), (int32x2_t)(__noswap_vqshrun_n_s64(__rev1_700, __p2_700)))); \ - __ret_700 = __builtin_shufflevector(__ret_700, __ret_700, 3, 2, 1, 0); \ - __ret_700; \ +#define vqdmulhh_laneq_s16(__p0_659, __p1_659, __p2_659) __extension__ ({ \ + int16_t __ret_659; \ + int16_t __s0_659 = __p0_659; \ + int16x8_t __s1_659 = __p1_659; \ + int16x8_t __rev1_659; __rev1_659 = __builtin_shufflevector(__s1_659, __s1_659, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_659 = vqdmulhh_s16(__s0_659, __noswap_vgetq_lane_s16(__rev1_659, __p2_659)); \ + __ret_659; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqshrun_high_n_s16(__p0_701, __p1_701, __p2_701) __extension__ ({ \ - int8x16_t __ret_701; \ - int8x8_t __s0_701 = __p0_701; \ - int16x8_t __s1_701 = __p1_701; \ - __ret_701 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_701), (int8x8_t)(vqshrun_n_s16(__s1_701, __p2_701)))); \ - __ret_701; \ +#define vqdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + __ret = (int32x4_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ + __ret; \ }) #else -#define vqshrun_high_n_s16(__p0_702, __p1_702, __p2_702) __extension__ ({ \ - int8x16_t __ret_702; \ - int8x8_t __s0_702 = __p0_702; \ - int16x8_t __s1_702 = __p1_702; \ - int8x8_t __rev0_702; __rev0_702 = __builtin_shufflevector(__s0_702, __s0_702, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_702; __rev1_702 = __builtin_shufflevector(__s1_702, __s1_702, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_702 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_702), (int8x8_t)(__noswap_vqshrun_n_s16(__rev1_702, __p2_702)))); \ - __ret_702 = __builtin_shufflevector(__ret_702, __ret_702, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_702; \ +#define vqdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ }) #endif -#define vqshruns_n_s32(__p0, __p1) __extension__ ({ \ - int16_t __ret; \ - int32_t __s0 = __p0; \ - __ret = (int16_t) __builtin_neon_vqshruns_n_s32(__s0, __p1); \ +#ifdef __LITTLE_ENDIAN__ +#define vqdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + __ret = (int16x8_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ __ret; \ }) -#define vqshrund_n_s64(__p0, __p1) __extension__ ({ \ - int32_t __ret; \ - int64_t __s0 = __p0; \ - __ret = (int32_t) __builtin_neon_vqshrund_n_s64(__s0, __p1); \ +#else +#define vqdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vqdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ __ret; \ }) -#define vqshrunh_n_s16(__p0, __p1) __extension__ ({ \ - int8_t __ret; \ - int16_t __s0 = __p0; \ - __ret = (int8_t) __builtin_neon_vqshrunh_n_s16(__s0, __p1); \ +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + __ret = (int32x2_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 2); \ __ret; \ }) -__ai uint8_t vqsubb_u8(uint8_t __p0, uint8_t __p1) { - uint8_t __ret; - __ret = (uint8_t) __builtin_neon_vqsubb_u8(__p0, __p1); - return __ret; -} -__ai uint32_t vqsubs_u32(uint32_t __p0, uint32_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vqsubs_u32(__p0, __p1); - return __ret; -} -__ai uint64_t vqsubd_u64(uint64_t __p0, uint64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vqsubd_u64(__p0, __p1); - return __ret; -} -__ai uint16_t vqsubh_u16(uint16_t __p0, uint16_t __p1) { - uint16_t __ret; - __ret = (uint16_t) __builtin_neon_vqsubh_u16(__p0, __p1); - return __ret; -} -__ai int8_t vqsubb_s8(int8_t __p0, int8_t __p1) { - int8_t __ret; - __ret = (int8_t) __builtin_neon_vqsubb_s8(__p0, __p1); +#else +#define vqdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + __ret = (int16x4_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 1); \ + __ret; \ +}) +#else +#define vqdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vqdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) +#endif + +__ai __attribute__((target("neon"))) int64_t vqdmulls_s32(int32_t __p0, int32_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqdmulls_s32(__p0, __p1); return __ret; } -__ai int32_t vqsubs_s32(int32_t __p0, int32_t __p1) { +__ai __attribute__((target("neon"))) int32_t vqdmullh_s16(int16_t __p0, int16_t __p1) { int32_t __ret; - __ret = (int32_t) __builtin_neon_vqsubs_s32(__p0, __p1); + __ret = (int32_t) __builtin_neon_vqdmullh_s16(__p0, __p1); return __ret; } -__ai int64_t vqsubd_s64(int64_t __p0, int64_t __p1) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vqsubd_s64(__p0, __p1); +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) { + int64x2_t __ret; + __ret = vqdmull_s32(vget_high_s32(__p0), vget_high_s32(__p1)); return __ret; } -__ai int16_t vqsubh_s16(int16_t __p0, int16_t __p1) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vqsubh_s16(__p0, __p1); +#else +__ai __attribute__((target("neon"))) int64x2_t vqdmull_high_s32(int32x4_t __p0, int32x4_t __p1) { + int64x2_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0), __noswap_vget_high_s32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } +#endif + #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) { - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 4); +__ai __attribute__((target("neon"))) int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) { + int32x4_t __ret; + __ret = vqdmull_s16(vget_high_s16(__p0), vget_high_s16(__p1)); return __ret; } #else -__ai poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) { - poly8x8_t __ret; - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (poly8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int32x4_t vqdmull_high_s16(int16x8_t __p0, int16x8_t __p1) { + int32x4_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0), __noswap_vget_high_s16(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) { - poly8x16_t __ret; - __ret = (poly8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 36); - return __ret; -} +#define vqdmull_high_lane_s32(__p0_660, __p1_660, __p2_660) __extension__ ({ \ + int64x2_t __ret_660; \ + int32x4_t __s0_660 = __p0_660; \ + int32x2_t __s1_660 = __p1_660; \ + __ret_660 = vqdmull_s32(vget_high_s32(__s0_660), splat_lane_s32(__s1_660, __p2_660)); \ + __ret_660; \ +}) #else -__ai poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) { - poly8x16_t __ret; - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (poly8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vqdmull_high_lane_s32(__p0_661, __p1_661, __p2_661) __extension__ ({ \ + int64x2_t __ret_661; \ + int32x4_t __s0_661 = __p0_661; \ + int32x2_t __s1_661 = __p1_661; \ + int32x4_t __rev0_661; __rev0_661 = __builtin_shufflevector(__s0_661, __s0_661, 3, 2, 1, 0); \ + int32x2_t __rev1_661; __rev1_661 = __builtin_shufflevector(__s1_661, __s1_661, 1, 0); \ + __ret_661 = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0_661), __noswap_splat_lane_s32(__rev1_661, __p2_661)); \ + __ret_661 = __builtin_shufflevector(__ret_661, __ret_661, 1, 0); \ + __ret_661; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 48); - return __ret; -} +#define vqdmull_high_lane_s16(__p0_662, __p1_662, __p2_662) __extension__ ({ \ + int32x4_t __ret_662; \ + int16x8_t __s0_662 = __p0_662; \ + int16x4_t __s1_662 = __p1_662; \ + __ret_662 = vqdmull_s16(vget_high_s16(__s0_662), splat_lane_s16(__s1_662, __p2_662)); \ + __ret_662; \ +}) #else -__ai uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vqdmull_high_lane_s16(__p0_663, __p1_663, __p2_663) __extension__ ({ \ + int32x4_t __ret_663; \ + int16x8_t __s0_663 = __p0_663; \ + int16x4_t __s1_663 = __p1_663; \ + int16x8_t __rev0_663; __rev0_663 = __builtin_shufflevector(__s0_663, __s0_663, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev1_663; __rev1_663 = __builtin_shufflevector(__s1_663, __s1_663, 3, 2, 1, 0); \ + __ret_663 = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0_663), __noswap_splat_lane_s16(__rev1_663, __p2_663)); \ + __ret_663 = __builtin_shufflevector(__ret_663, __ret_663, 3, 2, 1, 0); \ + __ret_663; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vqtbl1q_s8(int8x16_t __p0, uint8x16_t __p1) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 32); - return __ret; -} +#define vqdmull_high_laneq_s32(__p0_664, __p1_664, __p2_664) __extension__ ({ \ + int64x2_t __ret_664; \ + int32x4_t __s0_664 = __p0_664; \ + int32x4_t __s1_664 = __p1_664; \ + __ret_664 = vqdmull_s32(vget_high_s32(__s0_664), splat_laneq_s32(__s1_664, __p2_664)); \ + __ret_664; \ +}) #else -__ai int8x16_t vqtbl1q_s8(int8x16_t __p0, uint8x16_t __p1) { - int8x16_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vqdmull_high_laneq_s32(__p0_665, __p1_665, __p2_665) __extension__ ({ \ + int64x2_t __ret_665; \ + int32x4_t __s0_665 = __p0_665; \ + int32x4_t __s1_665 = __p1_665; \ + int32x4_t __rev0_665; __rev0_665 = __builtin_shufflevector(__s0_665, __s0_665, 3, 2, 1, 0); \ + int32x4_t __rev1_665; __rev1_665 = __builtin_shufflevector(__s1_665, __s1_665, 3, 2, 1, 0); \ + __ret_665 = __noswap_vqdmull_s32(__noswap_vget_high_s32(__rev0_665), __noswap_splat_laneq_s32(__rev1_665, __p2_665)); \ + __ret_665 = __builtin_shufflevector(__ret_665, __ret_665, 1, 0); \ + __ret_665; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 16); - return __ret; -} +#define vqdmull_high_laneq_s16(__p0_666, __p1_666, __p2_666) __extension__ ({ \ + int32x4_t __ret_666; \ + int16x8_t __s0_666 = __p0_666; \ + int16x8_t __s1_666 = __p1_666; \ + __ret_666 = vqdmull_s16(vget_high_s16(__s0_666), splat_laneq_s16(__s1_666, __p2_666)); \ + __ret_666; \ +}) #else -__ai uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vqdmull_high_laneq_s16(__p0_667, __p1_667, __p2_667) __extension__ ({ \ + int32x4_t __ret_667; \ + int16x8_t __s0_667 = __p0_667; \ + int16x8_t __s1_667 = __p1_667; \ + int16x8_t __rev0_667; __rev0_667 = __builtin_shufflevector(__s0_667, __s0_667, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_667; __rev1_667 = __builtin_shufflevector(__s1_667, __s1_667, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_667 = __noswap_vqdmull_s16(__noswap_vget_high_s16(__rev0_667), __noswap_splat_laneq_s16(__rev1_667, __p2_667)); \ + __ret_667 = __builtin_shufflevector(__ret_667, __ret_667, 3, 2, 1, 0); \ + __ret_667; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vqtbl1_s8(int8x16_t __p0, uint8x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 0); +__ai __attribute__((target("neon"))) int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) { + int64x2_t __ret; + __ret = vqdmull_n_s32(vget_high_s32(__p0), __p1); return __ret; } #else -__ai int8x8_t vqtbl1_s8(int8x16_t __p0, uint8x8_t __p1) { - int8x8_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int64x2_t vqdmull_high_n_s32(int32x4_t __p0, int32_t __p1) { + int64x2_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = __noswap_vqdmull_n_s32(__noswap_vget_high_s32(__rev0), __p1); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) { - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 4); +__ai __attribute__((target("neon"))) int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) { + int32x4_t __ret; + __ret = vqdmull_n_s16(vget_high_s16(__p0), __p1); return __ret; } #else -__ai poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) { - poly8x8_t __ret; - poly8x16x2_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (poly8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int32x4_t vqdmull_high_n_s16(int16x8_t __p0, int16_t __p1) { + int32x4_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vqdmull_n_s16(__noswap_vget_high_s16(__rev0), __p1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) { - poly8x16_t __ret; - __ret = (poly8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 36); - return __ret; -} +#define vqdmulls_lane_s32(__p0_668, __p1_668, __p2_668) __extension__ ({ \ + int64_t __ret_668; \ + int32_t __s0_668 = __p0_668; \ + int32x2_t __s1_668 = __p1_668; \ + __ret_668 = vqdmulls_s32(__s0_668, vget_lane_s32(__s1_668, __p2_668)); \ + __ret_668; \ +}) #else -__ai poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) { - poly8x16_t __ret; - poly8x16x2_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (poly8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 36); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vqdmulls_lane_s32(__p0_669, __p1_669, __p2_669) __extension__ ({ \ + int64_t __ret_669; \ + int32_t __s0_669 = __p0_669; \ + int32x2_t __s1_669 = __p1_669; \ + int32x2_t __rev1_669; __rev1_669 = __builtin_shufflevector(__s1_669, __s1_669, 1, 0); \ + __ret_669 = vqdmulls_s32(__s0_669, __noswap_vget_lane_s32(__rev1_669, __p2_669)); \ + __ret_669; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 48); - return __ret; -} +#define vqdmullh_lane_s16(__p0_670, __p1_670, __p2_670) __extension__ ({ \ + int32_t __ret_670; \ + int16_t __s0_670 = __p0_670; \ + int16x4_t __s1_670 = __p1_670; \ + __ret_670 = vqdmullh_s16(__s0_670, vget_lane_s16(__s1_670, __p2_670)); \ + __ret_670; \ +}) #else -__ai uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - uint8x16x2_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vqdmullh_lane_s16(__p0_671, __p1_671, __p2_671) __extension__ ({ \ + int32_t __ret_671; \ + int16_t __s0_671 = __p0_671; \ + int16x4_t __s1_671 = __p1_671; \ + int16x4_t __rev1_671; __rev1_671 = __builtin_shufflevector(__s1_671, __s1_671, 3, 2, 1, 0); \ + __ret_671 = vqdmullh_s16(__s0_671, __noswap_vget_lane_s16(__rev1_671, __p2_671)); \ + __ret_671; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vqtbl2q_s8(int8x16x2_t __p0, uint8x16_t __p1) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 32); - return __ret; -} +#define vqdmulls_laneq_s32(__p0_672, __p1_672, __p2_672) __extension__ ({ \ + int64_t __ret_672; \ + int32_t __s0_672 = __p0_672; \ + int32x4_t __s1_672 = __p1_672; \ + __ret_672 = vqdmulls_s32(__s0_672, vgetq_lane_s32(__s1_672, __p2_672)); \ + __ret_672; \ +}) #else -__ai int8x16_t vqtbl2q_s8(int8x16x2_t __p0, uint8x16_t __p1) { - int8x16_t __ret; - int8x16x2_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vqdmulls_laneq_s32(__p0_673, __p1_673, __p2_673) __extension__ ({ \ + int64_t __ret_673; \ + int32_t __s0_673 = __p0_673; \ + int32x4_t __s1_673 = __p1_673; \ + int32x4_t __rev1_673; __rev1_673 = __builtin_shufflevector(__s1_673, __s1_673, 3, 2, 1, 0); \ + __ret_673 = vqdmulls_s32(__s0_673, __noswap_vgetq_lane_s32(__rev1_673, __p2_673)); \ + __ret_673; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 16); - return __ret; -} +#define vqdmullh_laneq_s16(__p0_674, __p1_674, __p2_674) __extension__ ({ \ + int32_t __ret_674; \ + int16_t __s0_674 = __p0_674; \ + int16x8_t __s1_674 = __p1_674; \ + __ret_674 = vqdmullh_s16(__s0_674, vgetq_lane_s16(__s1_674, __p2_674)); \ + __ret_674; \ +}) #else -__ai uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - uint8x16x2_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vqdmullh_laneq_s16(__p0_675, __p1_675, __p2_675) __extension__ ({ \ + int32_t __ret_675; \ + int16_t __s0_675 = __p0_675; \ + int16x8_t __s1_675 = __p1_675; \ + int16x8_t __rev1_675; __rev1_675 = __builtin_shufflevector(__s1_675, __s1_675, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_675 = vqdmullh_s16(__s0_675, __noswap_vgetq_lane_s16(__rev1_675, __p2_675)); \ + __ret_675; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vqtbl2_s8(int8x16x2_t __p0, uint8x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 0); - return __ret; -} +#define vqdmull_laneq_s32(__p0_676, __p1_676, __p2_676) __extension__ ({ \ + int64x2_t __ret_676; \ + int32x2_t __s0_676 = __p0_676; \ + int32x4_t __s1_676 = __p1_676; \ + __ret_676 = vqdmull_s32(__s0_676, splat_laneq_s32(__s1_676, __p2_676)); \ + __ret_676; \ +}) #else -__ai int8x8_t vqtbl2_s8(int8x16x2_t __p0, uint8x8_t __p1) { - int8x8_t __ret; - int8x16x2_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vqdmull_laneq_s32(__p0_677, __p1_677, __p2_677) __extension__ ({ \ + int64x2_t __ret_677; \ + int32x2_t __s0_677 = __p0_677; \ + int32x4_t __s1_677 = __p1_677; \ + int32x2_t __rev0_677; __rev0_677 = __builtin_shufflevector(__s0_677, __s0_677, 1, 0); \ + int32x4_t __rev1_677; __rev1_677 = __builtin_shufflevector(__s1_677, __s1_677, 3, 2, 1, 0); \ + __ret_677 = __noswap_vqdmull_s32(__rev0_677, __noswap_splat_laneq_s32(__rev1_677, __p2_677)); \ + __ret_677 = __builtin_shufflevector(__ret_677, __ret_677, 1, 0); \ + __ret_677; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) { - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 4); - return __ret; -} +#define vqdmull_laneq_s16(__p0_678, __p1_678, __p2_678) __extension__ ({ \ + int32x4_t __ret_678; \ + int16x4_t __s0_678 = __p0_678; \ + int16x8_t __s1_678 = __p1_678; \ + __ret_678 = vqdmull_s16(__s0_678, splat_laneq_s16(__s1_678, __p2_678)); \ + __ret_678; \ +}) #else -__ai poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) { - poly8x8_t __ret; - poly8x16x3_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (poly8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vqdmull_laneq_s16(__p0_679, __p1_679, __p2_679) __extension__ ({ \ + int32x4_t __ret_679; \ + int16x4_t __s0_679 = __p0_679; \ + int16x8_t __s1_679 = __p1_679; \ + int16x4_t __rev0_679; __rev0_679 = __builtin_shufflevector(__s0_679, __s0_679, 3, 2, 1, 0); \ + int16x8_t __rev1_679; __rev1_679 = __builtin_shufflevector(__s1_679, __s1_679, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_679 = __noswap_vqdmull_s16(__rev0_679, __noswap_splat_laneq_s16(__rev1_679, __p2_679)); \ + __ret_679 = __builtin_shufflevector(__ret_679, __ret_679, 3, 2, 1, 0); \ + __ret_679; \ +}) #endif -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) { - poly8x16_t __ret; - __ret = (poly8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 36); +__ai __attribute__((target("neon"))) int16_t vqmovns_s32(int32_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqmovns_s32(__p0); return __ret; } -#else -__ai poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) { - poly8x16_t __ret; - poly8x16x3_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (poly8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 36); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int32_t vqmovnd_s64(int64_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqmovnd_s64(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 48); +__ai __attribute__((target("neon"))) int8_t vqmovnh_s16(int16_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vqmovnh_s16(__p0); return __ret; } -#else -__ai uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - uint8x16x3_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint16_t vqmovns_u32(uint32_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vqmovns_u32(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vqtbl3q_s8(int8x16x3_t __p0, uint8x16_t __p1) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 32); +__ai __attribute__((target("neon"))) uint32_t vqmovnd_u64(uint64_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vqmovnd_u64(__p0); return __ret; } -#else -__ai int8x16_t vqtbl3q_s8(int8x16x3_t __p0, uint8x16_t __p1) { - int8x16_t __ret; - int8x16x3_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint8_t vqmovnh_u16(uint16_t __p0) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vqmovnh_u16(__p0); return __ret; } -#endif - #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 16); +__ai __attribute__((target("neon"))) uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) { + uint16x8_t __ret; + __ret = vcombine_u16(__p0, vqmovn_u32(__p1)); return __ret; } #else -__ai uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - uint8x16x3_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 16); +__ai __attribute__((target("neon"))) uint16x8_t vqmovn_high_u32(uint16x4_t __p0, uint32x4_t __p1) { + uint16x8_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vcombine_u16(__rev0, __noswap_vqmovn_u32(__rev1)); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vqtbl3_s8(int8x16x3_t __p0, uint8x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 0); +__ai __attribute__((target("neon"))) uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) { + uint32x4_t __ret; + __ret = vcombine_u32(__p0, vqmovn_u64(__p1)); return __ret; } #else -__ai int8x8_t vqtbl3_s8(int8x16x3_t __p0, uint8x8_t __p1) { - int8x8_t __ret; - int8x16x3_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint32x4_t vqmovn_high_u64(uint32x2_t __p0, uint64x2_t __p1) { + uint32x4_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __noswap_vcombine_u32(__rev0, __noswap_vqmovn_u64(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) { - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 4); +__ai __attribute__((target("neon"))) uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) { + uint8x16_t __ret; + __ret = vcombine_u8(__p0, vqmovn_u16(__p1)); return __ret; } #else -__ai poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) { - poly8x8_t __ret; - poly8x16x4_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (poly8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint8x16_t vqmovn_high_u16(uint8x8_t __p0, uint16x8_t __p1) { + uint8x16_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcombine_u8(__rev0, __noswap_vqmovn_u16(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) { - poly8x16_t __ret; - __ret = (poly8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 36); +__ai __attribute__((target("neon"))) int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) { + int16x8_t __ret; + __ret = vcombine_s16(__p0, vqmovn_s32(__p1)); return __ret; } #else -__ai poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) { - poly8x16_t __ret; - poly8x16x4_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (poly8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 36); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int16x8_t vqmovn_high_s32(int16x4_t __p0, int32x4_t __p1) { + int16x8_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vcombine_s16(__rev0, __noswap_vqmovn_s32(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 48); +__ai __attribute__((target("neon"))) int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) { + int32x4_t __ret; + __ret = vcombine_s32(__p0, vqmovn_s64(__p1)); return __ret; } #else -__ai uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - uint8x16x4_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int32x4_t vqmovn_high_s64(int32x2_t __p0, int64x2_t __p1) { + int32x4_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __noswap_vcombine_s32(__rev0, __noswap_vqmovn_s64(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vqtbl4q_s8(int8x16x4_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) { int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 32); + __ret = vcombine_s8(__p0, vqmovn_s16(__p1)); return __ret; } #else -__ai int8x16_t vqtbl4q_s8(int8x16x4_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vqmovn_high_s16(int8x8_t __p0, int16x8_t __p1) { int8x16_t __ret; - int8x16x4_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 32); + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcombine_s8(__rev0, __noswap_vqmovn_s16(__rev1)); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 16); - return __ret; -} -#else -__ai uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - uint8x16x4_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint16_t vqmovuns_s32(int32_t __p0) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vqmovuns_s32(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vqtbl4_s8(int8x16x4_t __p0, uint8x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 0); +__ai __attribute__((target("neon"))) uint32_t vqmovund_s64(int64_t __p0) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vqmovund_s64(__p0); return __ret; } -#else -__ai int8x8_t vqtbl4_s8(int8x16x4_t __p0, uint8x8_t __p1) { - int8x8_t __ret; - int8x16x4_t __rev0; - __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint8_t vqmovunh_s16(int16_t __p0) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vqmovunh_s16(__p0); return __ret; } -#endif - #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) { - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 4); +__ai __attribute__((target("neon"))) uint16x8_t vqmovun_high_s32(uint16x4_t __p0, int32x4_t __p1) { + uint16x8_t __ret; + __ret = vcombine_u16((uint16x4_t)(__p0), vqmovun_s32(__p1)); return __ret; } #else -__ai poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) { - poly8x8_t __ret; - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (poly8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 4); +__ai __attribute__((target("neon"))) uint16x8_t vqmovun_high_s32(uint16x4_t __p0, int32x4_t __p1) { + uint16x8_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vcombine_u16((uint16x4_t)(__rev0), __noswap_vqmovun_s32(__rev1)); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) { - poly8x16_t __ret; - __ret = (poly8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 36); +__ai __attribute__((target("neon"))) uint32x4_t vqmovun_high_s64(uint32x2_t __p0, int64x2_t __p1) { + uint32x4_t __ret; + __ret = vcombine_u32((uint32x2_t)(__p0), vqmovun_s64(__p1)); return __ret; } #else -__ai poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) { - poly8x16_t __ret; - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (poly8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 36); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint32x4_t vqmovun_high_s64(uint32x2_t __p0, int64x2_t __p1) { + uint32x4_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __noswap_vcombine_u32((uint32x2_t)(__rev0), __noswap_vqmovun_s64(__rev1)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("neon"))) uint8x16_t vqmovun_high_s16(uint8x8_t __p0, int16x8_t __p1) { uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48); + __ret = vcombine_u8((uint8x8_t)(__p0), vqmovun_s16(__p1)); return __ret; } #else -__ai uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("neon"))) uint8x16_t vqmovun_high_s16(uint8x8_t __p0, int16x8_t __p1) { uint8x16_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48); + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcombine_u8((uint8x8_t)(__rev0), __noswap_vqmovun_s16(__rev1)); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, uint8x16_t __p2) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32); +__ai __attribute__((target("neon"))) int64x2_t vqnegq_s64(int64x2_t __p0) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vqnegq_v((int8x16_t)__p0, 35); return __ret; } #else -__ai int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, uint8x16_t __p2) { - int8x16_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int64x2_t vqnegq_s64(int64x2_t __p0) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (int64x2_t) __builtin_neon_vqnegq_v((int8x16_t)__rev0, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 16); - return __ret; -} -#else -__ai uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) { - uint8x8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int64x1_t vqneg_s64(int64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vqneg_v((int8x8_t)__p0, 3); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, uint8x8_t __p2) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 0); +__ai __attribute__((target("neon"))) int8_t vqnegb_s8(int8_t __p0) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vqnegb_s8(__p0); return __ret; } -#else -__ai int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, uint8x8_t __p2) { - int8x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int32_t vqnegs_s32(int32_t __p0) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqnegs_s32(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) { - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 4); +__ai __attribute__((target("neon"))) int64_t vqnegd_s64(int64_t __p0) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqnegd_s64(__p0); return __ret; } -#else -__ai poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) { - poly8x8_t __ret; - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16x2_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (poly8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int16_t vqnegh_s16(int16_t __p0) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqnegh_s16(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) { - poly8x16_t __ret; - __ret = (poly8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 36); +__ai __attribute__((target("neon"))) int32_t vqrdmulhs_s32(int32_t __p0, int32_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqrdmulhs_s32(__p0, __p1); return __ret; } -#else -__ai poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) { - poly8x16_t __ret; - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16x2_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (poly8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 36); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int16_t vqrdmulhh_s16(int16_t __p0, int16_t __p1) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqrdmulhh_s16(__p0, __p1); return __ret; } -#endif - #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 48); - return __ret; -} +#define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + __ret = (int32x4_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 2); \ + __ret; \ +}) #else -__ai uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) { - uint8x16_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16x2_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vqrdmulhq_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, uint8x16_t __p2) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 32); - return __ret; -} +#define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + __ret = (int16x8_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__s0, (int8x8_t)__s1, __p2, 1); \ + __ret; \ +}) #else -__ai int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, uint8x16_t __p2) { - int8x16_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16x2_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vqrdmulhq_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vqrdmulhq_lane_v((int8x16_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 16); - return __ret; -} +#define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + __ret = (int32x2_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 2); \ + __ret; \ +}) #else -__ai uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) { - uint8x8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16x2_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vqrdmulh_lane_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x2_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, uint8x8_t __p2) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 0); - return __ret; -} +#define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + __ret = (int16x4_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 1); \ + __ret; \ +}) #else -__ai int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, uint8x8_t __p2) { - int8x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16x2_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vqrdmulh_lane_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x4_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vqrdmulh_lane_v((int8x8_t)__rev0, (int8x8_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) { - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 4); - return __ret; -} +#define vqrdmulhs_lane_s32(__p0_680, __p1_680, __p2_680) __extension__ ({ \ + int32_t __ret_680; \ + int32_t __s0_680 = __p0_680; \ + int32x2_t __s1_680 = __p1_680; \ + __ret_680 = vqrdmulhs_s32(__s0_680, vget_lane_s32(__s1_680, __p2_680)); \ + __ret_680; \ +}) #else -__ai poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) { - poly8x8_t __ret; - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16x3_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (poly8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vqrdmulhs_lane_s32(__p0_681, __p1_681, __p2_681) __extension__ ({ \ + int32_t __ret_681; \ + int32_t __s0_681 = __p0_681; \ + int32x2_t __s1_681 = __p1_681; \ + int32x2_t __rev1_681; __rev1_681 = __builtin_shufflevector(__s1_681, __s1_681, 1, 0); \ + __ret_681 = vqrdmulhs_s32(__s0_681, __noswap_vget_lane_s32(__rev1_681, __p2_681)); \ + __ret_681; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) { - poly8x16_t __ret; - __ret = (poly8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 36); - return __ret; -} +#define vqrdmulhh_lane_s16(__p0_682, __p1_682, __p2_682) __extension__ ({ \ + int16_t __ret_682; \ + int16_t __s0_682 = __p0_682; \ + int16x4_t __s1_682 = __p1_682; \ + __ret_682 = vqrdmulhh_s16(__s0_682, vget_lane_s16(__s1_682, __p2_682)); \ + __ret_682; \ +}) #else -__ai poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) { - poly8x16_t __ret; - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16x3_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (poly8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 36); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vqrdmulhh_lane_s16(__p0_683, __p1_683, __p2_683) __extension__ ({ \ + int16_t __ret_683; \ + int16_t __s0_683 = __p0_683; \ + int16x4_t __s1_683 = __p1_683; \ + int16x4_t __rev1_683; __rev1_683 = __builtin_shufflevector(__s1_683, __s1_683, 3, 2, 1, 0); \ + __ret_683 = vqrdmulhh_s16(__s0_683, __noswap_vget_lane_s16(__rev1_683, __p2_683)); \ + __ret_683; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 48); - return __ret; -} +#define vqrdmulhs_laneq_s32(__p0_684, __p1_684, __p2_684) __extension__ ({ \ + int32_t __ret_684; \ + int32_t __s0_684 = __p0_684; \ + int32x4_t __s1_684 = __p1_684; \ + __ret_684 = vqrdmulhs_s32(__s0_684, vgetq_lane_s32(__s1_684, __p2_684)); \ + __ret_684; \ +}) #else -__ai uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) { - uint8x16_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16x3_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vqrdmulhs_laneq_s32(__p0_685, __p1_685, __p2_685) __extension__ ({ \ + int32_t __ret_685; \ + int32_t __s0_685 = __p0_685; \ + int32x4_t __s1_685 = __p1_685; \ + int32x4_t __rev1_685; __rev1_685 = __builtin_shufflevector(__s1_685, __s1_685, 3, 2, 1, 0); \ + __ret_685 = vqrdmulhs_s32(__s0_685, __noswap_vgetq_lane_s32(__rev1_685, __p2_685)); \ + __ret_685; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, uint8x16_t __p2) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 32); - return __ret; -} +#define vqrdmulhh_laneq_s16(__p0_686, __p1_686, __p2_686) __extension__ ({ \ + int16_t __ret_686; \ + int16_t __s0_686 = __p0_686; \ + int16x8_t __s1_686 = __p1_686; \ + __ret_686 = vqrdmulhh_s16(__s0_686, vgetq_lane_s16(__s1_686, __p2_686)); \ + __ret_686; \ +}) #else -__ai int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, uint8x16_t __p2) { - int8x16_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16x3_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vqrdmulhh_laneq_s16(__p0_687, __p1_687, __p2_687) __extension__ ({ \ + int16_t __ret_687; \ + int16_t __s0_687 = __p0_687; \ + int16x8_t __s1_687 = __p1_687; \ + int16x8_t __rev1_687; __rev1_687 = __builtin_shufflevector(__s1_687, __s1_687, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_687 = vqrdmulhh_s16(__s0_687, __noswap_vgetq_lane_s16(__rev1_687, __p2_687)); \ + __ret_687; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 16); - return __ret; -} +#define vqrdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + __ret = (int32x4_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 34); \ + __ret; \ +}) #else -__ai uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) { - uint8x8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16x3_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vqrdmulhq_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x4_t __ret; \ + int32x4_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int32x4_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 34); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, uint8x8_t __p2) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 0); - return __ret; -} +#define vqrdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + __ret = (int16x8_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 33); \ + __ret; \ +}) #else -__ai int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, uint8x8_t __p2) { - int8x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16x3_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vqrdmulhq_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x8_t __ret; \ + int16x8_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x8_t) __builtin_neon_vqrdmulhq_laneq_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 33); \ + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) { - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 4); - return __ret; -} +#define vqrdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + __ret = (int32x2_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 2); \ + __ret; \ +}) #else -__ai poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) { - poly8x8_t __ret; - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16x4_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (poly8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vqrdmulh_laneq_s32(__p0, __p1, __p2) __extension__ ({ \ + int32x2_t __ret; \ + int32x2_t __s0 = __p0; \ + int32x4_t __s1 = __p1; \ + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ + __ret = (int32x2_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) { - poly8x16_t __ret; - __ret = (poly8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 36); - return __ret; -} +#define vqrdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + __ret = (int16x4_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__s0, (int8x16_t)__s1, __p2, 1); \ + __ret; \ +}) #else -__ai poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) { - poly8x16_t __ret; - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16x4_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (poly8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 36); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vqrdmulh_laneq_s16(__p0, __p1, __p2) __extension__ ({ \ + int16x4_t __ret; \ + int16x4_t __s0 = __p0; \ + int16x8_t __s1 = __p1; \ + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret = (int16x4_t) __builtin_neon_vqrdmulh_laneq_v((int8x8_t)__rev0, (int8x16_t)__rev1, __p2, 1); \ + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ + __ret; \ +}) #endif -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 48); +__ai __attribute__((target("neon"))) uint8_t vqrshlb_u8(uint8_t __p0, int8_t __p1) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vqrshlb_u8(__p0, __p1); return __ret; } -#else -__ai uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) { - uint8x16_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16x4_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint32_t vqrshls_u32(uint32_t __p0, int32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vqrshls_u32(__p0, __p1); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, uint8x16_t __p2) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 32); +__ai __attribute__((target("neon"))) uint64_t vqrshld_u64(uint64_t __p0, int64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vqrshld_u64(__p0, __p1); return __ret; } -#else -__ai int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, uint8x16_t __p2) { - int8x16_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16x4_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint16_t vqrshlh_u16(uint16_t __p0, int16_t __p1) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vqrshlh_u16(__p0, __p1); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 16); +__ai __attribute__((target("neon"))) int8_t vqrshlb_s8(int8_t __p0, int8_t __p1) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vqrshlb_s8(__p0, __p1); return __ret; } -#else -__ai uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) { - uint8x8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16x4_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int32_t vqrshls_s32(int32_t __p0, int32_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqrshls_s32(__p0, __p1); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, uint8x8_t __p2) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 0); +__ai __attribute__((target("neon"))) int64_t vqrshld_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqrshld_s64(__p0, __p1); return __ret; } -#else -__ai int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, uint8x8_t __p2) { - int8x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16x4_t __rev1; - __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int16_t vqrshlh_s16(int16_t __p0, int16_t __p1) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqrshlh_s16(__p0, __p1); return __ret; } -#endif - #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint16x8_t __ret; - __ret = vcombine_u16(__p0, vraddhn_u32(__p1, __p2)); - return __ret; -} +#define vqrshrn_high_n_u32(__p0_688, __p1_688, __p2_688) __extension__ ({ \ + uint16x8_t __ret_688; \ + uint16x4_t __s0_688 = __p0_688; \ + uint32x4_t __s1_688 = __p1_688; \ + __ret_688 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_688), (uint16x4_t)(vqrshrn_n_u32(__s1_688, __p2_688)))); \ + __ret_688; \ +}) #else -__ai uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint16x8_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = __noswap_vcombine_u16(__rev0, __noswap_vraddhn_u32(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vqrshrn_high_n_u32(__p0_689, __p1_689, __p2_689) __extension__ ({ \ + uint16x8_t __ret_689; \ + uint16x4_t __s0_689 = __p0_689; \ + uint32x4_t __s1_689 = __p1_689; \ + uint16x4_t __rev0_689; __rev0_689 = __builtin_shufflevector(__s0_689, __s0_689, 3, 2, 1, 0); \ + uint32x4_t __rev1_689; __rev1_689 = __builtin_shufflevector(__s1_689, __s1_689, 3, 2, 1, 0); \ + __ret_689 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_689), (uint16x4_t)(__noswap_vqrshrn_n_u32(__rev1_689, __p2_689)))); \ + __ret_689 = __builtin_shufflevector(__ret_689, __ret_689, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_689; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { - uint32x4_t __ret; - __ret = vcombine_u32(__p0, vraddhn_u64(__p1, __p2)); - return __ret; -} +#define vqrshrn_high_n_u64(__p0_690, __p1_690, __p2_690) __extension__ ({ \ + uint32x4_t __ret_690; \ + uint32x2_t __s0_690 = __p0_690; \ + uint64x2_t __s1_690 = __p1_690; \ + __ret_690 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_690), (uint32x2_t)(vqrshrn_n_u64(__s1_690, __p2_690)))); \ + __ret_690; \ +}) #else -__ai uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { - uint32x4_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = __noswap_vcombine_u32(__rev0, __noswap_vraddhn_u64(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vqrshrn_high_n_u64(__p0_691, __p1_691, __p2_691) __extension__ ({ \ + uint32x4_t __ret_691; \ + uint32x2_t __s0_691 = __p0_691; \ + uint64x2_t __s1_691 = __p1_691; \ + uint32x2_t __rev0_691; __rev0_691 = __builtin_shufflevector(__s0_691, __s0_691, 1, 0); \ + uint64x2_t __rev1_691; __rev1_691 = __builtin_shufflevector(__s1_691, __s1_691, 1, 0); \ + __ret_691 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_691), (uint32x2_t)(__noswap_vqrshrn_n_u64(__rev1_691, __p2_691)))); \ + __ret_691 = __builtin_shufflevector(__ret_691, __ret_691, 3, 2, 1, 0); \ + __ret_691; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { - uint8x16_t __ret; - __ret = vcombine_u8(__p0, vraddhn_u16(__p1, __p2)); - return __ret; -} +#define vqrshrn_high_n_u16(__p0_692, __p1_692, __p2_692) __extension__ ({ \ + uint8x16_t __ret_692; \ + uint8x8_t __s0_692 = __p0_692; \ + uint16x8_t __s1_692 = __p1_692; \ + __ret_692 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_692), (uint8x8_t)(vqrshrn_n_u16(__s1_692, __p2_692)))); \ + __ret_692; \ +}) #else -__ai uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { - uint8x16_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __noswap_vcombine_u8(__rev0, __noswap_vraddhn_u16(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vqrshrn_high_n_u16(__p0_693, __p1_693, __p2_693) __extension__ ({ \ + uint8x16_t __ret_693; \ + uint8x8_t __s0_693 = __p0_693; \ + uint16x8_t __s1_693 = __p1_693; \ + uint8x8_t __rev0_693; __rev0_693 = __builtin_shufflevector(__s0_693, __s0_693, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_693; __rev1_693 = __builtin_shufflevector(__s1_693, __s1_693, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_693 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_693), (uint8x8_t)(__noswap_vqrshrn_n_u16(__rev1_693, __p2_693)))); \ + __ret_693 = __builtin_shufflevector(__ret_693, __ret_693, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_693; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int16x8_t __ret; - __ret = vcombine_s16(__p0, vraddhn_s32(__p1, __p2)); - return __ret; -} +#define vqrshrn_high_n_s32(__p0_694, __p1_694, __p2_694) __extension__ ({ \ + int16x8_t __ret_694; \ + int16x4_t __s0_694 = __p0_694; \ + int32x4_t __s1_694 = __p1_694; \ + __ret_694 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_694), (int16x4_t)(vqrshrn_n_s32(__s1_694, __p2_694)))); \ + __ret_694; \ +}) #else -__ai int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int16x8_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = __noswap_vcombine_s16(__rev0, __noswap_vraddhn_s32(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vqrshrn_high_n_s32(__p0_695, __p1_695, __p2_695) __extension__ ({ \ + int16x8_t __ret_695; \ + int16x4_t __s0_695 = __p0_695; \ + int32x4_t __s1_695 = __p1_695; \ + int16x4_t __rev0_695; __rev0_695 = __builtin_shufflevector(__s0_695, __s0_695, 3, 2, 1, 0); \ + int32x4_t __rev1_695; __rev1_695 = __builtin_shufflevector(__s1_695, __s1_695, 3, 2, 1, 0); \ + __ret_695 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_695), (int16x4_t)(__noswap_vqrshrn_n_s32(__rev1_695, __p2_695)))); \ + __ret_695 = __builtin_shufflevector(__ret_695, __ret_695, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_695; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { - int32x4_t __ret; - __ret = vcombine_s32(__p0, vraddhn_s64(__p1, __p2)); - return __ret; -} +#define vqrshrn_high_n_s64(__p0_696, __p1_696, __p2_696) __extension__ ({ \ + int32x4_t __ret_696; \ + int32x2_t __s0_696 = __p0_696; \ + int64x2_t __s1_696 = __p1_696; \ + __ret_696 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_696), (int32x2_t)(vqrshrn_n_s64(__s1_696, __p2_696)))); \ + __ret_696; \ +}) #else -__ai int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { - int32x4_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = __noswap_vcombine_s32(__rev0, __noswap_vraddhn_s64(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vqrshrn_high_n_s64(__p0_697, __p1_697, __p2_697) __extension__ ({ \ + int32x4_t __ret_697; \ + int32x2_t __s0_697 = __p0_697; \ + int64x2_t __s1_697 = __p1_697; \ + int32x2_t __rev0_697; __rev0_697 = __builtin_shufflevector(__s0_697, __s0_697, 1, 0); \ + int64x2_t __rev1_697; __rev1_697 = __builtin_shufflevector(__s1_697, __s1_697, 1, 0); \ + __ret_697 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_697), (int32x2_t)(__noswap_vqrshrn_n_s64(__rev1_697, __p2_697)))); \ + __ret_697 = __builtin_shufflevector(__ret_697, __ret_697, 3, 2, 1, 0); \ + __ret_697; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int8x16_t __ret; - __ret = vcombine_s8(__p0, vraddhn_s16(__p1, __p2)); - return __ret; -} +#define vqrshrn_high_n_s16(__p0_698, __p1_698, __p2_698) __extension__ ({ \ + int8x16_t __ret_698; \ + int8x8_t __s0_698 = __p0_698; \ + int16x8_t __s1_698 = __p1_698; \ + __ret_698 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_698), (int8x8_t)(vqrshrn_n_s16(__s1_698, __p2_698)))); \ + __ret_698; \ +}) #else -__ai int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int8x16_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __noswap_vcombine_s8(__rev0, __noswap_vraddhn_s16(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vqrshrn_high_n_s16(__p0_699, __p1_699, __p2_699) __extension__ ({ \ + int8x16_t __ret_699; \ + int8x8_t __s0_699 = __p0_699; \ + int16x8_t __s1_699 = __p1_699; \ + int8x8_t __rev0_699; __rev0_699 = __builtin_shufflevector(__s0_699, __s0_699, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_699; __rev1_699 = __builtin_shufflevector(__s1_699, __s1_699, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_699 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_699), (int8x8_t)(__noswap_vqrshrn_n_s16(__rev1_699, __p2_699)))); \ + __ret_699 = __builtin_shufflevector(__ret_699, __ret_699, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_699; \ +}) #endif -#ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vrbit_p8(poly8x8_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 4); - return __ret; -} -#else -__ai poly8x8_t vrbit_p8(poly8x8_t __p0) { - poly8x8_t __ret; - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (poly8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 4); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +#define vqrshrns_n_u32(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + uint32_t __s0 = __p0; \ + __ret = (uint16_t) __builtin_neon_vqrshrns_n_u32(__s0, __p1); \ + __ret; \ +}) +#define vqrshrnd_n_u64(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ + uint64_t __s0 = __p0; \ + __ret = (uint32_t) __builtin_neon_vqrshrnd_n_u64(__s0, __p1); \ + __ret; \ +}) +#define vqrshrnh_n_u16(__p0, __p1) __extension__ ({ \ + uint8_t __ret; \ + uint16_t __s0 = __p0; \ + __ret = (uint8_t) __builtin_neon_vqrshrnh_n_u16(__s0, __p1); \ + __ret; \ +}) +#define vqrshrns_n_s32(__p0, __p1) __extension__ ({ \ + int16_t __ret; \ + int32_t __s0 = __p0; \ + __ret = (int16_t) __builtin_neon_vqrshrns_n_s32(__s0, __p1); \ + __ret; \ +}) +#define vqrshrnd_n_s64(__p0, __p1) __extension__ ({ \ + int32_t __ret; \ + int64_t __s0 = __p0; \ + __ret = (int32_t) __builtin_neon_vqrshrnd_n_s64(__s0, __p1); \ + __ret; \ +}) +#define vqrshrnh_n_s16(__p0, __p1) __extension__ ({ \ + int8_t __ret; \ + int16_t __s0 = __p0; \ + __ret = (int8_t) __builtin_neon_vqrshrnh_n_s16(__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vqrshrun_high_n_s32(__p0_700, __p1_700, __p2_700) __extension__ ({ \ + int16x8_t __ret_700; \ + int16x4_t __s0_700 = __p0_700; \ + int32x4_t __s1_700 = __p1_700; \ + __ret_700 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_700), (int16x4_t)(vqrshrun_n_s32(__s1_700, __p2_700)))); \ + __ret_700; \ +}) +#else +#define vqrshrun_high_n_s32(__p0_701, __p1_701, __p2_701) __extension__ ({ \ + int16x8_t __ret_701; \ + int16x4_t __s0_701 = __p0_701; \ + int32x4_t __s1_701 = __p1_701; \ + int16x4_t __rev0_701; __rev0_701 = __builtin_shufflevector(__s0_701, __s0_701, 3, 2, 1, 0); \ + int32x4_t __rev1_701; __rev1_701 = __builtin_shufflevector(__s1_701, __s1_701, 3, 2, 1, 0); \ + __ret_701 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_701), (int16x4_t)(__noswap_vqrshrun_n_s32(__rev1_701, __p2_701)))); \ + __ret_701 = __builtin_shufflevector(__ret_701, __ret_701, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_701; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrun_high_n_s64(__p0_702, __p1_702, __p2_702) __extension__ ({ \ + int32x4_t __ret_702; \ + int32x2_t __s0_702 = __p0_702; \ + int64x2_t __s1_702 = __p1_702; \ + __ret_702 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_702), (int32x2_t)(vqrshrun_n_s64(__s1_702, __p2_702)))); \ + __ret_702; \ +}) +#else +#define vqrshrun_high_n_s64(__p0_703, __p1_703, __p2_703) __extension__ ({ \ + int32x4_t __ret_703; \ + int32x2_t __s0_703 = __p0_703; \ + int64x2_t __s1_703 = __p1_703; \ + int32x2_t __rev0_703; __rev0_703 = __builtin_shufflevector(__s0_703, __s0_703, 1, 0); \ + int64x2_t __rev1_703; __rev1_703 = __builtin_shufflevector(__s1_703, __s1_703, 1, 0); \ + __ret_703 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_703), (int32x2_t)(__noswap_vqrshrun_n_s64(__rev1_703, __p2_703)))); \ + __ret_703 = __builtin_shufflevector(__ret_703, __ret_703, 3, 2, 1, 0); \ + __ret_703; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqrshrun_high_n_s16(__p0_704, __p1_704, __p2_704) __extension__ ({ \ + int8x16_t __ret_704; \ + int8x8_t __s0_704 = __p0_704; \ + int16x8_t __s1_704 = __p1_704; \ + __ret_704 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_704), (int8x8_t)(vqrshrun_n_s16(__s1_704, __p2_704)))); \ + __ret_704; \ +}) +#else +#define vqrshrun_high_n_s16(__p0_705, __p1_705, __p2_705) __extension__ ({ \ + int8x16_t __ret_705; \ + int8x8_t __s0_705 = __p0_705; \ + int16x8_t __s1_705 = __p1_705; \ + int8x8_t __rev0_705; __rev0_705 = __builtin_shufflevector(__s0_705, __s0_705, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_705; __rev1_705 = __builtin_shufflevector(__s1_705, __s1_705, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_705 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_705), (int8x8_t)(__noswap_vqrshrun_n_s16(__rev1_705, __p2_705)))); \ + __ret_705 = __builtin_shufflevector(__ret_705, __ret_705, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_705; \ +}) +#endif + +#define vqrshruns_n_s32(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + int32_t __s0 = __p0; \ + __ret = (uint16_t) __builtin_neon_vqrshruns_n_s32(__s0, __p1); \ + __ret; \ +}) +#define vqrshrund_n_s64(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ + int64_t __s0 = __p0; \ + __ret = (uint32_t) __builtin_neon_vqrshrund_n_s64(__s0, __p1); \ + __ret; \ +}) +#define vqrshrunh_n_s16(__p0, __p1) __extension__ ({ \ + uint8_t __ret; \ + int16_t __s0 = __p0; \ + __ret = (uint8_t) __builtin_neon_vqrshrunh_n_s16(__s0, __p1); \ + __ret; \ +}) +__ai __attribute__((target("neon"))) uint8_t vqshlb_u8(uint8_t __p0, int8_t __p1) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vqshlb_u8(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint32_t vqshls_u32(uint32_t __p0, int32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vqshls_u32(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vqshld_u64(uint64_t __p0, int64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vqshld_u64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint16_t vqshlh_u16(uint16_t __p0, int16_t __p1) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vqshlh_u16(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) int8_t vqshlb_s8(int8_t __p0, int8_t __p1) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vqshlb_s8(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) int32_t vqshls_s32(int32_t __p0, int32_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqshls_s32(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) int64_t vqshld_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqshld_s64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) int16_t vqshlh_s16(int16_t __p0, int16_t __p1) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqshlh_s16(__p0, __p1); + return __ret; +} +#define vqshlb_n_u8(__p0, __p1) __extension__ ({ \ + uint8_t __ret; \ + uint8_t __s0 = __p0; \ + __ret = (uint8_t) __builtin_neon_vqshlb_n_u8(__s0, __p1); \ + __ret; \ +}) +#define vqshls_n_u32(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ + uint32_t __s0 = __p0; \ + __ret = (uint32_t) __builtin_neon_vqshls_n_u32(__s0, __p1); \ + __ret; \ +}) +#define vqshld_n_u64(__p0, __p1) __extension__ ({ \ + uint64_t __ret; \ + uint64_t __s0 = __p0; \ + __ret = (uint64_t) __builtin_neon_vqshld_n_u64(__s0, __p1); \ + __ret; \ +}) +#define vqshlh_n_u16(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + uint16_t __s0 = __p0; \ + __ret = (uint16_t) __builtin_neon_vqshlh_n_u16(__s0, __p1); \ + __ret; \ +}) +#define vqshlb_n_s8(__p0, __p1) __extension__ ({ \ + int8_t __ret; \ + int8_t __s0 = __p0; \ + __ret = (int8_t) __builtin_neon_vqshlb_n_s8(__s0, __p1); \ + __ret; \ +}) +#define vqshls_n_s32(__p0, __p1) __extension__ ({ \ + int32_t __ret; \ + int32_t __s0 = __p0; \ + __ret = (int32_t) __builtin_neon_vqshls_n_s32(__s0, __p1); \ + __ret; \ +}) +#define vqshld_n_s64(__p0, __p1) __extension__ ({ \ + int64_t __ret; \ + int64_t __s0 = __p0; \ + __ret = (int64_t) __builtin_neon_vqshld_n_s64(__s0, __p1); \ + __ret; \ +}) +#define vqshlh_n_s16(__p0, __p1) __extension__ ({ \ + int16_t __ret; \ + int16_t __s0 = __p0; \ + __ret = (int16_t) __builtin_neon_vqshlh_n_s16(__s0, __p1); \ + __ret; \ +}) +#define vqshlub_n_s8(__p0, __p1) __extension__ ({ \ + int8_t __ret; \ + int8_t __s0 = __p0; \ + __ret = (int8_t) __builtin_neon_vqshlub_n_s8(__s0, __p1); \ + __ret; \ +}) +#define vqshlus_n_s32(__p0, __p1) __extension__ ({ \ + int32_t __ret; \ + int32_t __s0 = __p0; \ + __ret = (int32_t) __builtin_neon_vqshlus_n_s32(__s0, __p1); \ + __ret; \ +}) +#define vqshlud_n_s64(__p0, __p1) __extension__ ({ \ + int64_t __ret; \ + int64_t __s0 = __p0; \ + __ret = (int64_t) __builtin_neon_vqshlud_n_s64(__s0, __p1); \ + __ret; \ +}) +#define vqshluh_n_s16(__p0, __p1) __extension__ ({ \ + int16_t __ret; \ + int16_t __s0 = __p0; \ + __ret = (int16_t) __builtin_neon_vqshluh_n_s16(__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_high_n_u32(__p0_706, __p1_706, __p2_706) __extension__ ({ \ + uint16x8_t __ret_706; \ + uint16x4_t __s0_706 = __p0_706; \ + uint32x4_t __s1_706 = __p1_706; \ + __ret_706 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_706), (uint16x4_t)(vqshrn_n_u32(__s1_706, __p2_706)))); \ + __ret_706; \ +}) +#else +#define vqshrn_high_n_u32(__p0_707, __p1_707, __p2_707) __extension__ ({ \ + uint16x8_t __ret_707; \ + uint16x4_t __s0_707 = __p0_707; \ + uint32x4_t __s1_707 = __p1_707; \ + uint16x4_t __rev0_707; __rev0_707 = __builtin_shufflevector(__s0_707, __s0_707, 3, 2, 1, 0); \ + uint32x4_t __rev1_707; __rev1_707 = __builtin_shufflevector(__s1_707, __s1_707, 3, 2, 1, 0); \ + __ret_707 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_707), (uint16x4_t)(__noswap_vqshrn_n_u32(__rev1_707, __p2_707)))); \ + __ret_707 = __builtin_shufflevector(__ret_707, __ret_707, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_707; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_high_n_u64(__p0_708, __p1_708, __p2_708) __extension__ ({ \ + uint32x4_t __ret_708; \ + uint32x2_t __s0_708 = __p0_708; \ + uint64x2_t __s1_708 = __p1_708; \ + __ret_708 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_708), (uint32x2_t)(vqshrn_n_u64(__s1_708, __p2_708)))); \ + __ret_708; \ +}) +#else +#define vqshrn_high_n_u64(__p0_709, __p1_709, __p2_709) __extension__ ({ \ + uint32x4_t __ret_709; \ + uint32x2_t __s0_709 = __p0_709; \ + uint64x2_t __s1_709 = __p1_709; \ + uint32x2_t __rev0_709; __rev0_709 = __builtin_shufflevector(__s0_709, __s0_709, 1, 0); \ + uint64x2_t __rev1_709; __rev1_709 = __builtin_shufflevector(__s1_709, __s1_709, 1, 0); \ + __ret_709 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_709), (uint32x2_t)(__noswap_vqshrn_n_u64(__rev1_709, __p2_709)))); \ + __ret_709 = __builtin_shufflevector(__ret_709, __ret_709, 3, 2, 1, 0); \ + __ret_709; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_high_n_u16(__p0_710, __p1_710, __p2_710) __extension__ ({ \ + uint8x16_t __ret_710; \ + uint8x8_t __s0_710 = __p0_710; \ + uint16x8_t __s1_710 = __p1_710; \ + __ret_710 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_710), (uint8x8_t)(vqshrn_n_u16(__s1_710, __p2_710)))); \ + __ret_710; \ +}) +#else +#define vqshrn_high_n_u16(__p0_711, __p1_711, __p2_711) __extension__ ({ \ + uint8x16_t __ret_711; \ + uint8x8_t __s0_711 = __p0_711; \ + uint16x8_t __s1_711 = __p1_711; \ + uint8x8_t __rev0_711; __rev0_711 = __builtin_shufflevector(__s0_711, __s0_711, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_711; __rev1_711 = __builtin_shufflevector(__s1_711, __s1_711, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_711 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_711), (uint8x8_t)(__noswap_vqshrn_n_u16(__rev1_711, __p2_711)))); \ + __ret_711 = __builtin_shufflevector(__ret_711, __ret_711, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_711; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_high_n_s32(__p0_712, __p1_712, __p2_712) __extension__ ({ \ + int16x8_t __ret_712; \ + int16x4_t __s0_712 = __p0_712; \ + int32x4_t __s1_712 = __p1_712; \ + __ret_712 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_712), (int16x4_t)(vqshrn_n_s32(__s1_712, __p2_712)))); \ + __ret_712; \ +}) +#else +#define vqshrn_high_n_s32(__p0_713, __p1_713, __p2_713) __extension__ ({ \ + int16x8_t __ret_713; \ + int16x4_t __s0_713 = __p0_713; \ + int32x4_t __s1_713 = __p1_713; \ + int16x4_t __rev0_713; __rev0_713 = __builtin_shufflevector(__s0_713, __s0_713, 3, 2, 1, 0); \ + int32x4_t __rev1_713; __rev1_713 = __builtin_shufflevector(__s1_713, __s1_713, 3, 2, 1, 0); \ + __ret_713 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_713), (int16x4_t)(__noswap_vqshrn_n_s32(__rev1_713, __p2_713)))); \ + __ret_713 = __builtin_shufflevector(__ret_713, __ret_713, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_713; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_high_n_s64(__p0_714, __p1_714, __p2_714) __extension__ ({ \ + int32x4_t __ret_714; \ + int32x2_t __s0_714 = __p0_714; \ + int64x2_t __s1_714 = __p1_714; \ + __ret_714 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_714), (int32x2_t)(vqshrn_n_s64(__s1_714, __p2_714)))); \ + __ret_714; \ +}) +#else +#define vqshrn_high_n_s64(__p0_715, __p1_715, __p2_715) __extension__ ({ \ + int32x4_t __ret_715; \ + int32x2_t __s0_715 = __p0_715; \ + int64x2_t __s1_715 = __p1_715; \ + int32x2_t __rev0_715; __rev0_715 = __builtin_shufflevector(__s0_715, __s0_715, 1, 0); \ + int64x2_t __rev1_715; __rev1_715 = __builtin_shufflevector(__s1_715, __s1_715, 1, 0); \ + __ret_715 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_715), (int32x2_t)(__noswap_vqshrn_n_s64(__rev1_715, __p2_715)))); \ + __ret_715 = __builtin_shufflevector(__ret_715, __ret_715, 3, 2, 1, 0); \ + __ret_715; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrn_high_n_s16(__p0_716, __p1_716, __p2_716) __extension__ ({ \ + int8x16_t __ret_716; \ + int8x8_t __s0_716 = __p0_716; \ + int16x8_t __s1_716 = __p1_716; \ + __ret_716 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_716), (int8x8_t)(vqshrn_n_s16(__s1_716, __p2_716)))); \ + __ret_716; \ +}) +#else +#define vqshrn_high_n_s16(__p0_717, __p1_717, __p2_717) __extension__ ({ \ + int8x16_t __ret_717; \ + int8x8_t __s0_717 = __p0_717; \ + int16x8_t __s1_717 = __p1_717; \ + int8x8_t __rev0_717; __rev0_717 = __builtin_shufflevector(__s0_717, __s0_717, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_717; __rev1_717 = __builtin_shufflevector(__s1_717, __s1_717, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_717 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_717), (int8x8_t)(__noswap_vqshrn_n_s16(__rev1_717, __p2_717)))); \ + __ret_717 = __builtin_shufflevector(__ret_717, __ret_717, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_717; \ +}) +#endif + +#define vqshrns_n_u32(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + uint32_t __s0 = __p0; \ + __ret = (uint16_t) __builtin_neon_vqshrns_n_u32(__s0, __p1); \ + __ret; \ +}) +#define vqshrnd_n_u64(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ + uint64_t __s0 = __p0; \ + __ret = (uint32_t) __builtin_neon_vqshrnd_n_u64(__s0, __p1); \ + __ret; \ +}) +#define vqshrnh_n_u16(__p0, __p1) __extension__ ({ \ + uint8_t __ret; \ + uint16_t __s0 = __p0; \ + __ret = (uint8_t) __builtin_neon_vqshrnh_n_u16(__s0, __p1); \ + __ret; \ +}) +#define vqshrns_n_s32(__p0, __p1) __extension__ ({ \ + int16_t __ret; \ + int32_t __s0 = __p0; \ + __ret = (int16_t) __builtin_neon_vqshrns_n_s32(__s0, __p1); \ + __ret; \ +}) +#define vqshrnd_n_s64(__p0, __p1) __extension__ ({ \ + int32_t __ret; \ + int64_t __s0 = __p0; \ + __ret = (int32_t) __builtin_neon_vqshrnd_n_s64(__s0, __p1); \ + __ret; \ +}) +#define vqshrnh_n_s16(__p0, __p1) __extension__ ({ \ + int8_t __ret; \ + int16_t __s0 = __p0; \ + __ret = (int8_t) __builtin_neon_vqshrnh_n_s16(__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vqshrun_high_n_s32(__p0_718, __p1_718, __p2_718) __extension__ ({ \ + int16x8_t __ret_718; \ + int16x4_t __s0_718 = __p0_718; \ + int32x4_t __s1_718 = __p1_718; \ + __ret_718 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_718), (int16x4_t)(vqshrun_n_s32(__s1_718, __p2_718)))); \ + __ret_718; \ +}) +#else +#define vqshrun_high_n_s32(__p0_719, __p1_719, __p2_719) __extension__ ({ \ + int16x8_t __ret_719; \ + int16x4_t __s0_719 = __p0_719; \ + int32x4_t __s1_719 = __p1_719; \ + int16x4_t __rev0_719; __rev0_719 = __builtin_shufflevector(__s0_719, __s0_719, 3, 2, 1, 0); \ + int32x4_t __rev1_719; __rev1_719 = __builtin_shufflevector(__s1_719, __s1_719, 3, 2, 1, 0); \ + __ret_719 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_719), (int16x4_t)(__noswap_vqshrun_n_s32(__rev1_719, __p2_719)))); \ + __ret_719 = __builtin_shufflevector(__ret_719, __ret_719, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_719; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrun_high_n_s64(__p0_720, __p1_720, __p2_720) __extension__ ({ \ + int32x4_t __ret_720; \ + int32x2_t __s0_720 = __p0_720; \ + int64x2_t __s1_720 = __p1_720; \ + __ret_720 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_720), (int32x2_t)(vqshrun_n_s64(__s1_720, __p2_720)))); \ + __ret_720; \ +}) +#else +#define vqshrun_high_n_s64(__p0_721, __p1_721, __p2_721) __extension__ ({ \ + int32x4_t __ret_721; \ + int32x2_t __s0_721 = __p0_721; \ + int64x2_t __s1_721 = __p1_721; \ + int32x2_t __rev0_721; __rev0_721 = __builtin_shufflevector(__s0_721, __s0_721, 1, 0); \ + int64x2_t __rev1_721; __rev1_721 = __builtin_shufflevector(__s1_721, __s1_721, 1, 0); \ + __ret_721 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_721), (int32x2_t)(__noswap_vqshrun_n_s64(__rev1_721, __p2_721)))); \ + __ret_721 = __builtin_shufflevector(__ret_721, __ret_721, 3, 2, 1, 0); \ + __ret_721; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vqshrun_high_n_s16(__p0_722, __p1_722, __p2_722) __extension__ ({ \ + int8x16_t __ret_722; \ + int8x8_t __s0_722 = __p0_722; \ + int16x8_t __s1_722 = __p1_722; \ + __ret_722 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_722), (int8x8_t)(vqshrun_n_s16(__s1_722, __p2_722)))); \ + __ret_722; \ +}) +#else +#define vqshrun_high_n_s16(__p0_723, __p1_723, __p2_723) __extension__ ({ \ + int8x16_t __ret_723; \ + int8x8_t __s0_723 = __p0_723; \ + int16x8_t __s1_723 = __p1_723; \ + int8x8_t __rev0_723; __rev0_723 = __builtin_shufflevector(__s0_723, __s0_723, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_723; __rev1_723 = __builtin_shufflevector(__s1_723, __s1_723, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_723 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_723), (int8x8_t)(__noswap_vqshrun_n_s16(__rev1_723, __p2_723)))); \ + __ret_723 = __builtin_shufflevector(__ret_723, __ret_723, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_723; \ +}) +#endif + +#define vqshruns_n_s32(__p0, __p1) __extension__ ({ \ + uint16_t __ret; \ + int32_t __s0 = __p0; \ + __ret = (uint16_t) __builtin_neon_vqshruns_n_s32(__s0, __p1); \ + __ret; \ +}) +#define vqshrund_n_s64(__p0, __p1) __extension__ ({ \ + uint32_t __ret; \ + int64_t __s0 = __p0; \ + __ret = (uint32_t) __builtin_neon_vqshrund_n_s64(__s0, __p1); \ + __ret; \ +}) +#define vqshrunh_n_s16(__p0, __p1) __extension__ ({ \ + uint8_t __ret; \ + int16_t __s0 = __p0; \ + __ret = (uint8_t) __builtin_neon_vqshrunh_n_s16(__s0, __p1); \ + __ret; \ +}) +__ai __attribute__((target("neon"))) uint8_t vqsubb_u8(uint8_t __p0, uint8_t __p1) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vqsubb_u8(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint32_t vqsubs_u32(uint32_t __p0, uint32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vqsubs_u32(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vqsubd_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vqsubd_u64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) uint16_t vqsubh_u16(uint16_t __p0, uint16_t __p1) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vqsubh_u16(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) int8_t vqsubb_s8(int8_t __p0, int8_t __p1) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vqsubb_s8(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) int32_t vqsubs_s32(int32_t __p0, int32_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vqsubs_s32(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) int64_t vqsubd_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vqsubd_s64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) int16_t vqsubh_s16(int16_t __p0, int16_t __p1) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vqsubh_s16(__p0, __p1); + return __ret; +} +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 4); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x8_t vqtbl1_p8(poly8x16_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vrbitq_p8(poly8x16_t __p0) { +__ai __attribute__((target("neon"))) poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) { poly8x16_t __ret; - __ret = (poly8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 36); + __ret = (poly8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 36); return __ret; } #else -__ai poly8x16_t vrbitq_p8(poly8x16_t __p0) { +__ai __attribute__((target("neon"))) poly8x16_t vqtbl1q_p8(poly8x16_t __p0, uint8x16_t __p1) { poly8x16_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (poly8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 36); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 36); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vrbitq_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 48); + __ret = (uint8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else -__ai uint8x16_t vrbitq_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) uint8x16_t vqtbl1q_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 48); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vrbitq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vqtbl1q_s8(int8x16_t __p0, uint8x16_t __p1) { int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 32); + __ret = (int8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } #else -__ai int8x16_t vrbitq_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vqtbl1q_s8(int8x16_t __p0, uint8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 32); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vqtbl1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vrbit_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) { uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 16); + __ret = (uint8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else -__ai uint8x8_t vrbit_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vqtbl1_u8(uint8x16_t __p0, uint8x8_t __p1) { uint8x8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 16); + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 16); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vrbit_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vqtbl1_s8(int8x16_t __p0, uint8x8_t __p1) { int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 0); + __ret = (int8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else -__ai int8x8_t vrbit_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vqtbl1_s8(int8x16_t __p0, uint8x8_t __p1) { int8x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 0); + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vqtbl1_v((int8x16_t)__rev0, (int8x8_t)__rev1, 0); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vrecpeq_f64(float64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 42); +__ai __attribute__((target("neon"))) poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 4); return __ret; } #else -__ai float64x2_t vrecpeq_f64(float64x2_t __p0) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float64x2_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) poly8x8_t vqtbl2_p8(poly8x16x2_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + poly8x16x2_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif -__ai float64x1_t vrecpe_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 10); - return __ret; -} -__ai float64_t vrecped_f64(float64_t __p0) { - float64_t __ret; - __ret = (float64_t) __builtin_neon_vrecped_f64(__p0); +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 36); return __ret; } -__ai float32_t vrecpes_f32(float32_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vrecpes_f32(__p0); +#else +__ai __attribute__((target("neon"))) poly8x16_t vqtbl2q_p8(poly8x16x2_t __p0, uint8x16_t __p1) { + poly8x16_t __ret; + poly8x16x2_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } +#endif + #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); +__ai __attribute__((target("neon"))) uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 48); return __ret; } #else -__ai float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (float64x2_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) uint8x16_t vqtbl2q_u8(uint8x16x2_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16x2_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif -__ai float64x1_t vrecps_f64(float64x1_t __p0, float64x1_t __p1) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 10); - return __ret; -} -__ai float64_t vrecpsd_f64(float64_t __p0, float64_t __p1) { - float64_t __ret; - __ret = (float64_t) __builtin_neon_vrecpsd_f64(__p0, __p1); +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vqtbl2q_s8(int8x16x2_t __p0, uint8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p1, 32); return __ret; } -__ai float32_t vrecpss_f32(float32_t __p0, float32_t __p1) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vrecpss_f32(__p0, __p1); +#else +__ai __attribute__((target("neon"))) int8x16_t vqtbl2q_s8(int8x16x2_t __p0, uint8x16_t __p1) { + int8x16_t __ret; + int8x16x2_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vqtbl2q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai float64_t vrecpxd_f64(float64_t __p0) { - float64_t __ret; - __ret = (float64_t) __builtin_neon_vrecpxd_f64(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 16); return __ret; } -__ai float32_t vrecpxs_f32(float32_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vrecpxs_f32(__p0); +#else +__ai __attribute__((target("neon"))) uint8x8_t vqtbl2_u8(uint8x16x2_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x16x2_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly8x8_t vreinterpret_p8_p64(poly64x1_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vqtbl2_s8(int8x16x2_t __p0, uint8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x8_t)__p1, 0); return __ret; } -__ai poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); +#else +__ai __attribute__((target("neon"))) int8x8_t vqtbl2_s8(int8x16x2_t __p0, uint8x8_t __p1) { + int8x8_t __ret; + int8x16x2_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vqtbl2_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) { +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) { poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); + __ret = (poly8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 4); return __ret; } -__ai poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) { +#else +__ai __attribute__((target("neon"))) poly8x8_t vqtbl3_p8(poly8x16x3_t __p0, uint8x8_t __p1) { poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); + poly8x16x3_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 36); return __ret; } -__ai poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); +#else +__ai __attribute__((target("neon"))) poly8x16_t vqtbl3q_p8(poly8x16x3_t __p0, uint8x16_t __p1) { + poly8x16_t __ret; + poly8x16x3_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 48); return __ret; } -__ai poly8x8_t vreinterpret_p8_f64(float64x1_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); +#else +__ai __attribute__((target("neon"))) uint8x16_t vqtbl3q_u8(uint8x16x3_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16x3_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vqtbl3q_s8(int8x16x3_t __p0, uint8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p1, 32); return __ret; } -__ai poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); +#else +__ai __attribute__((target("neon"))) int8x16_t vqtbl3q_s8(int8x16x3_t __p0, uint8x16_t __p1) { + int8x16_t __ret; + int8x16x3_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vqtbl3q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 16); return __ret; } -__ai poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); +#else +__ai __attribute__((target("neon"))) uint8x8_t vqtbl3_u8(uint8x16x3_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x16x3_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vqtbl3_s8(int8x16x3_t __p0, uint8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x8_t)__p1, 0); return __ret; } -__ai poly64x1_t vreinterpret_p64_p8(poly8x8_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); +#else +__ai __attribute__((target("neon"))) int8x8_t vqtbl3_s8(int8x16x3_t __p0, uint8x8_t __p1) { + int8x8_t __ret; + int8x16x3_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vqtbl3_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly64x1_t vreinterpret_p64_p16(poly16x4_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 4); return __ret; } -__ai poly64x1_t vreinterpret_p64_u8(uint8x8_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); +#else +__ai __attribute__((target("neon"))) poly8x8_t vqtbl4_p8(poly8x16x4_t __p0, uint8x8_t __p1) { + poly8x8_t __ret; + poly8x16x4_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly64x1_t vreinterpret_p64_u32(uint32x2_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 36); return __ret; } -__ai poly64x1_t vreinterpret_p64_u64(uint64x1_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); +#else +__ai __attribute__((target("neon"))) poly8x16_t vqtbl4q_p8(poly8x16x4_t __p0, uint8x16_t __p1) { + poly8x16_t __ret; + poly8x16x4_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly64x1_t vreinterpret_p64_u16(uint16x4_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 48); return __ret; } -__ai poly64x1_t vreinterpret_p64_s8(int8x8_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); +#else +__ai __attribute__((target("neon"))) uint8x16_t vqtbl4q_u8(uint8x16x4_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16x4_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly64x1_t vreinterpret_p64_f64(float64x1_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vqtbl4q_s8(int8x16x4_t __p0, uint8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x16_t)__p1, 32); return __ret; } -__ai poly64x1_t vreinterpret_p64_f32(float32x2_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); +#else +__ai __attribute__((target("neon"))) int8x16_t vqtbl4q_s8(int8x16x4_t __p0, uint8x16_t __p1) { + int8x16_t __ret; + int8x16x4_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vqtbl4q_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly64x1_t vreinterpret_p64_f16(float16x4_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 16); return __ret; } -__ai poly64x1_t vreinterpret_p64_s32(int32x2_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); +#else +__ai __attribute__((target("neon"))) uint8x8_t vqtbl4_u8(uint8x16x4_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x16x4_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly64x1_t vreinterpret_p64_s64(int64x1_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vqtbl4_s8(int8x16x4_t __p0, uint8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__p0.val[0], (int8x16_t)__p0.val[1], (int8x16_t)__p0.val[2], (int8x16_t)__p0.val[3], (int8x8_t)__p1, 0); return __ret; } -__ai poly64x1_t vreinterpret_p64_s16(int16x4_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); +#else +__ai __attribute__((target("neon"))) int8x8_t vqtbl4_s8(int8x16x4_t __p0, uint8x8_t __p1) { + int8x8_t __ret; + int8x16x4_t __rev0; + __rev0.val[0] = __builtin_shufflevector(__p0.val[0], __p0.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[1] = __builtin_shufflevector(__p0.val[1], __p0.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[2] = __builtin_shufflevector(__p0.val[2], __p0.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev0.val[3] = __builtin_shufflevector(__p0.val[3], __p0.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vqtbl4_v((int8x16_t)__rev0.val[0], (int8x16_t)__rev0.val[1], (int8x16_t)__rev0.val[2], (int8x16_t)__rev0.val[3], (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 4); return __ret; } -__ai poly16x4_t vreinterpret_p16_p64(poly64x1_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); +#else +__ai __attribute__((target("neon"))) poly8x8_t vqtbx1_p8(poly8x8_t __p0, poly8x16_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 36); return __ret; } -__ai poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); +#else +__ai __attribute__((target("neon"))) poly8x16_t vqtbx1q_p8(poly8x16_t __p0, poly8x16_t __p1, uint8x16_t __p2) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48); return __ret; } -__ai poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); +#else +__ai __attribute__((target("neon"))) uint8x16_t vqtbx1q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, uint8x16_t __p2) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32); return __ret; } -__ai poly16x4_t vreinterpret_p16_f64(float64x1_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); +#else +__ai __attribute__((target("neon"))) int8x16_t vqtbx1q_s8(int8x16_t __p0, int8x16_t __p1, uint8x16_t __p2) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vqtbx1q_v((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 16); return __ret; } -__ai poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); +#else +__ai __attribute__((target("neon"))) uint8x8_t vqtbx1_u8(uint8x8_t __p0, uint8x16_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, uint8x8_t __p2) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__p0, (int8x16_t)__p1, (int8x8_t)__p2, 0); return __ret; } -__ai poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); +#else +__ai __attribute__((target("neon"))) int8x8_t vqtbx1_s8(int8x8_t __p0, int8x16_t __p1, uint8x8_t __p2) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vqtbx1_v((int8x8_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 4); return __ret; } -__ai poly8x16_t vreinterpretq_p8_p128(poly128_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); +#else +__ai __attribute__((target("neon"))) poly8x8_t vqtbx2_p8(poly8x8_t __p0, poly8x16x2_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16x2_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly8x16_t vreinterpretq_p8_p64(poly64x2_t __p0) { +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) { poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); + __ret = (poly8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 36); return __ret; } -__ai poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) { +#else +__ai __attribute__((target("neon"))) poly8x16_t vqtbx2q_p8(poly8x16_t __p0, poly8x16x2_t __p1, uint8x16_t __p2) { poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16x2_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 48); return __ret; } -__ai poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); +#else +__ai __attribute__((target("neon"))) uint8x16_t vqtbx2q_u8(uint8x16_t __p0, uint8x16x2_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16x2_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, uint8x16_t __p2) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p2, 32); return __ret; } -__ai poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); +#else +__ai __attribute__((target("neon"))) int8x16_t vqtbx2q_s8(int8x16_t __p0, int8x16x2_t __p1, uint8x16_t __p2) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16x2_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vqtbx2q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev2, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 16); return __ret; } -__ai poly8x16_t vreinterpretq_p8_f64(float64x2_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); +#else +__ai __attribute__((target("neon"))) uint8x8_t vqtbx2_u8(uint8x8_t __p0, uint8x16x2_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16x2_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, uint8x8_t __p2) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x8_t)__p2, 0); return __ret; } -__ai poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); +#else +__ai __attribute__((target("neon"))) int8x8_t vqtbx2_s8(int8x8_t __p0, int8x16x2_t __p1, uint8x8_t __p2) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16x2_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vqtbx2_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x8_t)__rev2, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 4); return __ret; } -__ai poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); +#else +__ai __attribute__((target("neon"))) poly8x8_t vqtbx3_p8(poly8x8_t __p0, poly8x16x3_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16x3_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) { +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) { poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -__ai poly128_t vreinterpretq_p128_p8(poly8x16_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); - return __ret; -} -__ai poly128_t vreinterpretq_p128_p64(poly64x2_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); + __ret = (poly8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 36); return __ret; } -__ai poly128_t vreinterpretq_p128_p16(poly16x8_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); +#else +__ai __attribute__((target("neon"))) poly8x16_t vqtbx3q_p8(poly8x16_t __p0, poly8x16x3_t __p1, uint8x16_t __p2) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16x3_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly128_t vreinterpretq_p128_u8(uint8x16_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 48); return __ret; } -__ai poly128_t vreinterpretq_p128_u32(uint32x4_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); +#else +__ai __attribute__((target("neon"))) uint8x16_t vqtbx3q_u8(uint8x16_t __p0, uint8x16x3_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16x3_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly128_t vreinterpretq_p128_u64(uint64x2_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, uint8x16_t __p2) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p2, 32); return __ret; } -__ai poly128_t vreinterpretq_p128_u16(uint16x8_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); +#else +__ai __attribute__((target("neon"))) int8x16_t vqtbx3q_s8(int8x16_t __p0, int8x16x3_t __p1, uint8x16_t __p2) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16x3_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vqtbx3q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev2, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly128_t vreinterpretq_p128_s8(int8x16_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 16); return __ret; } -__ai poly128_t vreinterpretq_p128_f64(float64x2_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); +#else +__ai __attribute__((target("neon"))) uint8x8_t vqtbx3_u8(uint8x8_t __p0, uint8x16x3_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16x3_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly128_t vreinterpretq_p128_f32(float32x4_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, uint8x8_t __p2) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x8_t)__p2, 0); return __ret; } -__ai poly128_t vreinterpretq_p128_f16(float16x8_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); +#else +__ai __attribute__((target("neon"))) int8x8_t vqtbx3_s8(int8x8_t __p0, int8x16x3_t __p1, uint8x8_t __p2) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16x3_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vqtbx3_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x8_t)__rev2, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly128_t vreinterpretq_p128_s32(int32x4_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 4); return __ret; } -__ai poly128_t vreinterpretq_p128_s64(int64x2_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); +#else +__ai __attribute__((target("neon"))) poly8x8_t vqtbx4_p8(poly8x8_t __p0, poly8x16x4_t __p1, uint8x8_t __p2) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16x4_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly128_t vreinterpretq_p128_s16(int16x8_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 36); return __ret; } -__ai poly64x2_t vreinterpretq_p64_p8(poly8x16_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); +#else +__ai __attribute__((target("neon"))) poly8x16_t vqtbx4q_p8(poly8x16_t __p0, poly8x16x4_t __p1, uint8x16_t __p2) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16x4_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly64x2_t vreinterpretq_p64_p128(poly128_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 48); return __ret; } -__ai poly64x2_t vreinterpretq_p64_p16(poly16x8_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); +#else +__ai __attribute__((target("neon"))) uint8x16_t vqtbx4q_u8(uint8x16_t __p0, uint8x16x4_t __p1, uint8x16_t __p2) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16x4_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly64x2_t vreinterpretq_p64_u8(uint8x16_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, uint8x16_t __p2) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x16_t)__p2, 32); return __ret; } -__ai poly64x2_t vreinterpretq_p64_u32(uint32x4_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); +#else +__ai __attribute__((target("neon"))) int8x16_t vqtbx4q_s8(int8x16_t __p0, int8x16x4_t __p1, uint8x16_t __p2) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16x4_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vqtbx4q_v((int8x16_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x16_t)__rev2, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly64x2_t vreinterpretq_p64_u64(uint64x2_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 16); return __ret; } -__ai poly64x2_t vreinterpretq_p64_u16(uint16x8_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); +#else +__ai __attribute__((target("neon"))) uint8x8_t vqtbx4_u8(uint8x8_t __p0, uint8x16x4_t __p1, uint8x8_t __p2) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16x4_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly64x2_t vreinterpretq_p64_s8(int8x16_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, uint8x8_t __p2) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__p0, (int8x16_t)__p1.val[0], (int8x16_t)__p1.val[1], (int8x16_t)__p1.val[2], (int8x16_t)__p1.val[3], (int8x8_t)__p2, 0); return __ret; } -__ai poly64x2_t vreinterpretq_p64_f64(float64x2_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); +#else +__ai __attribute__((target("neon"))) int8x8_t vqtbx4_s8(int8x8_t __p0, int8x16x4_t __p1, uint8x8_t __p2) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16x4_t __rev1; + __rev1.val[0] = __builtin_shufflevector(__p1.val[0], __p1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[1] = __builtin_shufflevector(__p1.val[1], __p1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[2] = __builtin_shufflevector(__p1.val[2], __p1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __rev1.val[3] = __builtin_shufflevector(__p1.val[3], __p1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vqtbx4_v((int8x8_t)__rev0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], (int8x8_t)__rev2, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly64x2_t vreinterpretq_p64_f32(float32x4_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint16x8_t __ret; + __ret = vcombine_u16(__p0, vraddhn_u32(__p1, __p2)); return __ret; } -__ai poly64x2_t vreinterpretq_p64_f16(float16x8_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); +#else +__ai __attribute__((target("neon"))) uint16x8_t vraddhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint16x8_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vcombine_u16(__rev0, __noswap_vraddhn_u32(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly64x2_t vreinterpretq_p64_s32(int32x4_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint32x4_t __ret; + __ret = vcombine_u32(__p0, vraddhn_u64(__p1, __p2)); return __ret; } -__ai poly64x2_t vreinterpretq_p64_s64(int64x2_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); +#else +__ai __attribute__((target("neon"))) uint32x4_t vraddhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint32x4_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __noswap_vcombine_u32(__rev0, __noswap_vraddhn_u64(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai poly64x2_t vreinterpretq_p64_s16(int16x8_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint8x16_t __ret; + __ret = vcombine_u8(__p0, vraddhn_u16(__p1, __p2)); return __ret; } -__ai poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); +#else +__ai __attribute__((target("neon"))) uint8x16_t vraddhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint8x16_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcombine_u8(__rev0, __noswap_vraddhn_u16(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly16x8_t vreinterpretq_p16_p128(poly128_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int16x8_t __ret; + __ret = vcombine_s16(__p0, vraddhn_s32(__p1, __p2)); return __ret; } -__ai poly16x8_t vreinterpretq_p16_p64(poly64x2_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); +#else +__ai __attribute__((target("neon"))) int16x8_t vraddhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int16x8_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vcombine_s16(__rev0, __noswap_vraddhn_s32(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int32x4_t __ret; + __ret = vcombine_s32(__p0, vraddhn_s64(__p1, __p2)); return __ret; } -__ai poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); +#else +__ai __attribute__((target("neon"))) int32x4_t vraddhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int32x4_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __noswap_vcombine_s32(__rev0, __noswap_vraddhn_s64(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int8x16_t __ret; + __ret = vcombine_s8(__p0, vraddhn_s16(__p1, __p2)); return __ret; } -__ai poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); +#else +__ai __attribute__((target("neon"))) int8x16_t vraddhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int8x16_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcombine_s8(__rev0, __noswap_vraddhn_s16(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x8_t vrbit_p8(poly8x8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 4); return __ret; } -__ai poly16x8_t vreinterpretq_p16_f64(float64x2_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); +#else +__ai __attribute__((target("neon"))) poly8x8_t vrbit_p8(poly8x8_t __p0) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 4); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x16_t vrbitq_p8(poly8x16_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 36); return __ret; } -__ai poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); +#else +__ai __attribute__((target("neon"))) poly8x16_t vrbitq_p8(poly8x16_t __p0) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (poly8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 36); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vrbitq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 48); return __ret; } -__ai poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); +#else +__ai __attribute__((target("neon"))) uint8x16_t vrbitq_u8(uint8x16_t __p0) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 48); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vrbitq_s8(int8x16_t __p0) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__p0, 32); return __ret; } -__ai uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); +#else +__ai __attribute__((target("neon"))) int8x16_t vrbitq_s8(int8x16_t __p0) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vrbitq_v((int8x16_t)__rev0, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai uint8x16_t vreinterpretq_u8_p128(poly128_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vrbit_u8(uint8x8_t __p0) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 16); return __ret; } -__ai uint8x16_t vreinterpretq_u8_p64(poly64x2_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); +#else +__ai __attribute__((target("neon"))) uint8x8_t vrbit_u8(uint8x8_t __p0) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vrbit_s8(int8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vrbit_v((int8x8_t)__p0, 0); return __ret; } -__ai uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); +#else +__ai __attribute__((target("neon"))) int8x8_t vrbit_s8(int8x8_t __p0) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vrbit_v((int8x8_t)__rev0, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vrecpeq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrecpeq_v((int8x16_t)__p0, 42); return __ret; } -__ai uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); +#else +__ai __attribute__((target("neon"))) float64x2_t vrecpeq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrecpeq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); +#endif + +__ai __attribute__((target("neon"))) float64x1_t vrecpe_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrecpe_v((int8x8_t)__p0, 10); return __ret; } -__ai uint8x16_t vreinterpretq_u8_f64(float64x2_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); +__ai __attribute__((target("neon"))) float64_t vrecped_f64(float64_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vrecped_f64(__p0); return __ret; } -__ai uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); +__ai __attribute__((target("neon"))) float32_t vrecpes_f32(float32_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vrecpes_f32(__p0); return __ret; } -__ai uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrecpsq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); return __ret; } -__ai uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); +#else +__ai __attribute__((target("neon"))) float64x2_t vrecpsq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrecpsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); +#endif + +__ai __attribute__((target("neon"))) float64x1_t vrecps_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrecps_v((int8x8_t)__p0, (int8x8_t)__p1, 10); return __ret; } -__ai uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); +__ai __attribute__((target("neon"))) float64_t vrecpsd_f64(float64_t __p0, float64_t __p1) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vrecpsd_f64(__p0, __p1); return __ret; } -__ai uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); +__ai __attribute__((target("neon"))) float32_t vrecpss_f32(float32_t __p0, float32_t __p1) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vrecpss_f32(__p0, __p1); return __ret; } -__ai uint32x4_t vreinterpretq_u32_p128(poly128_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); +__ai __attribute__((target("neon"))) float64_t vrecpxd_f64(float64_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vrecpxd_f64(__p0); return __ret; } -__ai uint32x4_t vreinterpretq_u32_p64(poly64x2_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); +__ai __attribute__((target("neon"))) float32_t vrecpxs_f32(float32_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vrecpxs_f32(__p0); return __ret; } -__ai uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_p64(poly64x1_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); return __ret; } -__ai uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_p16(poly16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); return __ret; } -__ai uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_u8(uint8x8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); return __ret; } -__ai uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_u32(uint32x2_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); return __ret; } -__ai uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_u64(uint64x1_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); return __ret; } -__ai uint32x4_t vreinterpretq_u32_f64(float64x2_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_u16(uint16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); return __ret; } -__ai uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_s8(int8x8_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); return __ret; } -__ai uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_f64(float64x1_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); return __ret; } -__ai uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_f32(float32x2_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); return __ret; } -__ai uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_f16(float16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); return __ret; } -__ai uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_s32(int32x2_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); return __ret; } -__ai uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_s64(int64x1_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); return __ret; } -__ai uint64x2_t vreinterpretq_u64_p128(poly128_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); +__ai __attribute__((target("neon"))) poly8x8_t vreinterpret_p8_s16(int16x4_t __p0) { + poly8x8_t __ret; + __ret = (poly8x8_t)(__p0); return __ret; } -__ai uint64x2_t vreinterpretq_u64_p64(poly64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); +__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_p8(poly8x8_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); return __ret; } -__ai uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); +__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_p16(poly16x4_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); return __ret; } -__ai uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); +__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_u8(uint8x8_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); return __ret; } -__ai uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); +__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_u32(uint32x2_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); return __ret; } -__ai uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); +__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_u64(uint64x1_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); return __ret; } -__ai uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); +__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_u16(uint16x4_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); return __ret; } -__ai uint64x2_t vreinterpretq_u64_f64(float64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); +__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_s8(int8x8_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); return __ret; } -__ai uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); +__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_f64(float64x1_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); return __ret; } -__ai uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); +__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_f32(float32x2_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); return __ret; } -__ai uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); +__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_f16(float16x4_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); return __ret; } -__ai uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); +__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_s32(int32x2_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); return __ret; } -__ai uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); +__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_s64(int64x1_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); return __ret; } -__ai uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); +__ai __attribute__((target("neon"))) poly64x1_t vreinterpret_p64_s16(int16x4_t __p0) { + poly64x1_t __ret; + __ret = (poly64x1_t)(__p0); return __ret; } -__ai uint16x8_t vreinterpretq_u16_p128(poly128_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_p8(poly8x8_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); return __ret; } -__ai uint16x8_t vreinterpretq_u16_p64(poly64x2_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_p64(poly64x1_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); return __ret; } -__ai uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_u8(uint8x8_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); return __ret; } -__ai uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_u32(uint32x2_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); return __ret; } -__ai uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_u64(uint64x1_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); return __ret; } -__ai uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_u16(uint16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); return __ret; } -__ai uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_s8(int8x8_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); return __ret; } -__ai uint16x8_t vreinterpretq_u16_f64(float64x2_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_f64(float64x1_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); return __ret; } -__ai uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_f32(float32x2_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); return __ret; } -__ai uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_f16(float16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); return __ret; } -__ai uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_s32(int32x2_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); return __ret; } -__ai uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_s64(int64x1_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); return __ret; } -__ai uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); +__ai __attribute__((target("neon"))) poly16x4_t vreinterpret_p16_s16(int16x4_t __p0) { + poly16x4_t __ret; + __ret = (poly16x4_t)(__p0); return __ret; } -__ai int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) { +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_p128(poly128_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_p64(poly64x2_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_p16(poly16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_u8(uint8x16_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_u32(uint32x4_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_u64(uint64x2_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_u16(uint16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_s8(int8x16_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_f64(float64x2_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_f32(float32x4_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_f16(float16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_s32(int32x4_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_s64(int64x2_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly8x16_t vreinterpretq_p8_s16(int16x8_t __p0) { + poly8x16_t __ret; + __ret = (poly8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_p8(poly8x16_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_p64(poly64x2_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_p16(poly16x8_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_u8(uint8x16_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_u32(uint32x4_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_u64(uint64x2_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_u16(uint16x8_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_s8(int8x16_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_f64(float64x2_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_f32(float32x4_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_f16(float16x8_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_s32(int32x4_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_s64(int64x2_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly128_t vreinterpretq_p128_s16(int16x8_t __p0) { + poly128_t __ret; + __ret = (poly128_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_p8(poly8x16_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_p128(poly128_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_p16(poly16x8_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_u8(uint8x16_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_u32(uint32x4_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_u64(uint64x2_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_u16(uint16x8_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_s8(int8x16_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_f64(float64x2_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_f32(float32x4_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_f16(float16x8_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_s32(int32x4_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_s64(int64x2_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly64x2_t vreinterpretq_p64_s16(int16x8_t __p0) { + poly64x2_t __ret; + __ret = (poly64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_p8(poly8x16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_p128(poly128_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_p64(poly64x2_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_u8(uint8x16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_u32(uint32x4_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_u64(uint64x2_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_u16(uint16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_s8(int8x16_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_f64(float64x2_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_f32(float32x4_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_f16(float16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_s32(int32x4_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_s64(int64x2_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) poly16x8_t vreinterpretq_p16_s16(int16x8_t __p0) { + poly16x8_t __ret; + __ret = (poly16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_p8(poly8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_p128(poly128_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_p64(poly64x2_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_p16(poly16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_u32(uint32x4_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_u64(uint64x2_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_u16(uint16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_s8(int8x16_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_f64(float64x2_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_f32(float32x4_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_f16(float16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_s32(int32x4_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_s64(int64x2_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint8x16_t vreinterpretq_u8_s16(int16x8_t __p0) { + uint8x16_t __ret; + __ret = (uint8x16_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_p8(poly8x16_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_p128(poly128_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_p64(poly64x2_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_p16(poly16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_u8(uint8x16_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_u64(uint64x2_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_u16(uint16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_s8(int8x16_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_f64(float64x2_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_f32(float32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_f16(float16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_s32(int32x4_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_s64(int64x2_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint32x4_t vreinterpretq_u32_s16(int16x8_t __p0) { + uint32x4_t __ret; + __ret = (uint32x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_p8(poly8x16_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_p128(poly128_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_p64(poly64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_p16(poly16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_u8(uint8x16_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_u32(uint32x4_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_u16(uint16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_s8(int8x16_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_f64(float64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_f32(float32x4_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_f16(float16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_s32(int32x4_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_s64(int64x2_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64x2_t vreinterpretq_u64_s16(int16x8_t __p0) { + uint64x2_t __ret; + __ret = (uint64x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_p8(poly8x16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_p128(poly128_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_p64(poly64x2_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_p16(poly16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_u8(uint8x16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_u32(uint32x4_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_u64(uint64x2_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_s8(int8x16_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_f64(float64x2_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_f32(float32x4_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_f16(float16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_s32(int32x4_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_s64(int64x2_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint16x8_t vreinterpretq_u16_s16(int16x8_t __p0) { + uint16x8_t __ret; + __ret = (uint16x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_p8(poly8x16_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } -__ai int8x16_t vreinterpretq_s8_p128(poly128_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_p128(poly128_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } -__ai int8x16_t vreinterpretq_s8_p64(poly64x2_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_p64(poly64x2_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } -__ai int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_p16(poly16x8_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } -__ai int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_u8(uint8x16_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } -__ai int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_u32(uint32x4_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } -__ai int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_u64(uint64x2_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } -__ai int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_u16(uint16x8_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } -__ai int8x16_t vreinterpretq_s8_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_f64(float64x2_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } -__ai int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_f32(float32x4_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } -__ai int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_f16(float16x8_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } -__ai int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_s32(int32x4_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } -__ai int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_s64(int64x2_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } -__ai int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int8x16_t vreinterpretq_s8_s16(int16x8_t __p0) { int8x16_t __ret; __ret = (int8x16_t)(__p0); return __ret; } -__ai float64x2_t vreinterpretq_f64_p8(poly8x16_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_p8(poly8x16_t __p0) { float64x2_t __ret; __ret = (float64x2_t)(__p0); return __ret; } -__ai float64x2_t vreinterpretq_f64_p128(poly128_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_p128(poly128_t __p0) { float64x2_t __ret; __ret = (float64x2_t)(__p0); return __ret; } -__ai float64x2_t vreinterpretq_f64_p64(poly64x2_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_p64(poly64x2_t __p0) { float64x2_t __ret; __ret = (float64x2_t)(__p0); return __ret; } -__ai float64x2_t vreinterpretq_f64_p16(poly16x8_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_p16(poly16x8_t __p0) { float64x2_t __ret; __ret = (float64x2_t)(__p0); return __ret; } -__ai float64x2_t vreinterpretq_f64_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_u8(uint8x16_t __p0) { float64x2_t __ret; __ret = (float64x2_t)(__p0); return __ret; } -__ai float64x2_t vreinterpretq_f64_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_u32(uint32x4_t __p0) { float64x2_t __ret; __ret = (float64x2_t)(__p0); return __ret; } -__ai float64x2_t vreinterpretq_f64_u64(uint64x2_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_u64(uint64x2_t __p0) { float64x2_t __ret; __ret = (float64x2_t)(__p0); return __ret; } -__ai float64x2_t vreinterpretq_f64_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_u16(uint16x8_t __p0) { float64x2_t __ret; __ret = (float64x2_t)(__p0); return __ret; } -__ai float64x2_t vreinterpretq_f64_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_s8(int8x16_t __p0) { float64x2_t __ret; __ret = (float64x2_t)(__p0); return __ret; } -__ai float64x2_t vreinterpretq_f64_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_f32(float32x4_t __p0) { float64x2_t __ret; __ret = (float64x2_t)(__p0); return __ret; } -__ai float64x2_t vreinterpretq_f64_f16(float16x8_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_f16(float16x8_t __p0) { float64x2_t __ret; __ret = (float64x2_t)(__p0); return __ret; } -__ai float64x2_t vreinterpretq_f64_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_s32(int32x4_t __p0) { float64x2_t __ret; __ret = (float64x2_t)(__p0); return __ret; } -__ai float64x2_t vreinterpretq_f64_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_s64(int64x2_t __p0) { float64x2_t __ret; __ret = (float64x2_t)(__p0); return __ret; } -__ai float64x2_t vreinterpretq_f64_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) float64x2_t vreinterpretq_f64_s16(int16x8_t __p0) { float64x2_t __ret; __ret = (float64x2_t)(__p0); return __ret; } -__ai float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_p8(poly8x16_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } -__ai float32x4_t vreinterpretq_f32_p128(poly128_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_p128(poly128_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } -__ai float32x4_t vreinterpretq_f32_p64(poly64x2_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_p64(poly64x2_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } -__ai float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_p16(poly16x8_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } -__ai float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_u8(uint8x16_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } -__ai float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_u32(uint32x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } -__ai float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_u64(uint64x2_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } -__ai float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_u16(uint16x8_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } -__ai float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_s8(int8x16_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } -__ai float32x4_t vreinterpretq_f32_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_f64(float64x2_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } -__ai float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_f16(float16x8_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } -__ai float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_s32(int32x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } -__ai float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_s64(int64x2_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } -__ai float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) float32x4_t vreinterpretq_f32_s16(int16x8_t __p0) { float32x4_t __ret; __ret = (float32x4_t)(__p0); return __ret; } -__ai float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) { +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_p8(poly8x16_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } -__ai float16x8_t vreinterpretq_f16_p128(poly128_t __p0) { +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_p128(poly128_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } -__ai float16x8_t vreinterpretq_f16_p64(poly64x2_t __p0) { +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_p64(poly64x2_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } -__ai float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) { +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_p16(poly16x8_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } -__ai float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_u8(uint8x16_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } -__ai float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_u32(uint32x4_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } -__ai float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) { +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_u64(uint64x2_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } -__ai float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_u16(uint16x8_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } -__ai float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_s8(int8x16_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } -__ai float16x8_t vreinterpretq_f16_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_f64(float64x2_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } -__ai float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_f32(float32x4_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } -__ai float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_s32(int32x4_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } -__ai float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_s64(int64x2_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } -__ai float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) float16x8_t vreinterpretq_f16_s16(int16x8_t __p0) { float16x8_t __ret; __ret = (float16x8_t)(__p0); return __ret; } -__ai int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_p8(poly8x16_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } -__ai int32x4_t vreinterpretq_s32_p128(poly128_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_p128(poly128_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } -__ai int32x4_t vreinterpretq_s32_p64(poly64x2_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_p64(poly64x2_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } -__ai int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_p16(poly16x8_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } -__ai int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_u8(uint8x16_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } -__ai int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_u32(uint32x4_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } -__ai int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_u64(uint64x2_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } -__ai int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_u16(uint16x8_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } -__ai int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_s8(int8x16_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } -__ai int32x4_t vreinterpretq_s32_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_f64(float64x2_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } -__ai int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_f32(float32x4_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } -__ai int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_f16(float16x8_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } -__ai int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_s64(int64x2_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } -__ai int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int32x4_t vreinterpretq_s32_s16(int16x8_t __p0) { int32x4_t __ret; __ret = (int32x4_t)(__p0); return __ret; } -__ai int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_p8(poly8x16_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } -__ai int64x2_t vreinterpretq_s64_p128(poly128_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_p128(poly128_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } -__ai int64x2_t vreinterpretq_s64_p64(poly64x2_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_p64(poly64x2_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } -__ai int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_p16(poly16x8_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } -__ai int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_u8(uint8x16_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } -__ai int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_u32(uint32x4_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } -__ai int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_u64(uint64x2_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } -__ai int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_u16(uint16x8_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } -__ai int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_s8(int8x16_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } -__ai int64x2_t vreinterpretq_s64_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_f64(float64x2_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } -__ai int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_f32(float32x4_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } -__ai int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_f16(float16x8_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } -__ai int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_s32(int32x4_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } -__ai int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) { +__ai __attribute__((target("neon"))) int64x2_t vreinterpretq_s64_s16(int16x8_t __p0) { int64x2_t __ret; __ret = (int64x2_t)(__p0); return __ret; } -__ai int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_p8(poly8x16_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } -__ai int16x8_t vreinterpretq_s16_p128(poly128_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_p128(poly128_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } -__ai int16x8_t vreinterpretq_s16_p64(poly64x2_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_p64(poly64x2_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } -__ai int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_p16(poly16x8_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } -__ai int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_u8(uint8x16_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } -__ai int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_u32(uint32x4_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } -__ai int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_u64(uint64x2_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } -__ai int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_u16(uint16x8_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } -__ai int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_s8(int8x16_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } -__ai int16x8_t vreinterpretq_s16_f64(float64x2_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_f64(float64x2_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } -__ai int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_f32(float32x4_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } -__ai int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_f16(float16x8_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } -__ai int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_s32(int32x4_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } -__ai int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) { +__ai __attribute__((target("neon"))) int16x8_t vreinterpretq_s16_s64(int64x2_t __p0) { int16x8_t __ret; __ret = (int16x8_t)(__p0); return __ret; } -__ai uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_p8(poly8x8_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } -__ai uint8x8_t vreinterpret_u8_p64(poly64x1_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_p64(poly64x1_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } -__ai uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_p16(poly16x4_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } -__ai uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_u32(uint32x2_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } -__ai uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_u64(uint64x1_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } -__ai uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_u16(uint16x4_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } -__ai uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_s8(int8x8_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } -__ai uint8x8_t vreinterpret_u8_f64(float64x1_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_f64(float64x1_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } -__ai uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_f32(float32x2_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } -__ai uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_f16(float16x4_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } -__ai uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_s32(int32x2_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } -__ai uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_s64(int64x1_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } -__ai uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) uint8x8_t vreinterpret_u8_s16(int16x4_t __p0) { uint8x8_t __ret; __ret = (uint8x8_t)(__p0); return __ret; } -__ai uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_p8(poly8x8_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } -__ai uint32x2_t vreinterpret_u32_p64(poly64x1_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_p64(poly64x1_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } -__ai uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_p16(poly16x4_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } -__ai uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_u8(uint8x8_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } -__ai uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_u64(uint64x1_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } -__ai uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_u16(uint16x4_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } -__ai uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_s8(int8x8_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } -__ai uint32x2_t vreinterpret_u32_f64(float64x1_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_f64(float64x1_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } -__ai uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_f32(float32x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } -__ai uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_f16(float16x4_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } -__ai uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_s32(int32x2_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } -__ai uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_s64(int64x1_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } -__ai uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) uint32x2_t vreinterpret_u32_s16(int16x4_t __p0) { uint32x2_t __ret; __ret = (uint32x2_t)(__p0); return __ret; } -__ai uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_p8(poly8x8_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } -__ai uint64x1_t vreinterpret_u64_p64(poly64x1_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_p64(poly64x1_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } -__ai uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_p16(poly16x4_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } -__ai uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_u8(uint8x8_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } -__ai uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_u32(uint32x2_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } -__ai uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_u16(uint16x4_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } -__ai uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_s8(int8x8_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } -__ai uint64x1_t vreinterpret_u64_f64(float64x1_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_f64(float64x1_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } -__ai uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_f32(float32x2_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } -__ai uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_f16(float16x4_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } -__ai uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_s32(int32x2_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } -__ai uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_s64(int64x1_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } -__ai uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vreinterpret_u64_s16(int16x4_t __p0) { uint64x1_t __ret; __ret = (uint64x1_t)(__p0); return __ret; } -__ai uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_p8(poly8x8_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } -__ai uint16x4_t vreinterpret_u16_p64(poly64x1_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_p64(poly64x1_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } -__ai uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_p16(poly16x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } -__ai uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_u8(uint8x8_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } -__ai uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_u32(uint32x2_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } -__ai uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_u64(uint64x1_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } -__ai uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_s8(int8x8_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } -__ai uint16x4_t vreinterpret_u16_f64(float64x1_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_f64(float64x1_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } -__ai uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_f32(float32x2_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } -__ai uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_f16(float16x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } -__ai uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_s32(int32x2_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } -__ai uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_s64(int64x1_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } -__ai uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) { +__ai __attribute__((target("neon"))) uint16x4_t vreinterpret_u16_s16(int16x4_t __p0) { uint16x4_t __ret; __ret = (uint16x4_t)(__p0); return __ret; } -__ai int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) { +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_p8(poly8x8_t __p0) { int8x8_t __ret; __ret = (int8x8_t)(__p0); return __ret; } -__ai int8x8_t vreinterpret_s8_p64(poly64x1_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -__ai int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -__ai int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -__ai int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -__ai int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -__ai int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -__ai int8x8_t vreinterpret_s8_f64(float64x1_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -__ai int8x8_t vreinterpret_s8_f32(float32x2_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -__ai int8x8_t vreinterpret_s8_f16(float16x4_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -__ai int8x8_t vreinterpret_s8_s32(int32x2_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -__ai int8x8_t vreinterpret_s8_s64(int64x1_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -__ai int8x8_t vreinterpret_s8_s16(int16x4_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -__ai float64x1_t vreinterpret_f64_p8(poly8x8_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -__ai float64x1_t vreinterpret_f64_p64(poly64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -__ai float64x1_t vreinterpret_f64_p16(poly16x4_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -__ai float64x1_t vreinterpret_f64_u8(uint8x8_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -__ai float64x1_t vreinterpret_f64_u32(uint32x2_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -__ai float64x1_t vreinterpret_f64_u64(uint64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -__ai float64x1_t vreinterpret_f64_u16(uint16x4_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -__ai float64x1_t vreinterpret_f64_s8(int8x8_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -__ai float64x1_t vreinterpret_f64_f32(float32x2_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -__ai float64x1_t vreinterpret_f64_f16(float16x4_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -__ai float64x1_t vreinterpret_f64_s32(int32x2_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -__ai float64x1_t vreinterpret_f64_s64(int64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -__ai float64x1_t vreinterpret_f64_s16(int16x4_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -__ai float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -__ai float32x2_t vreinterpret_f32_p64(poly64x1_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -__ai float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -__ai float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -__ai float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -__ai float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -__ai float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -__ai float32x2_t vreinterpret_f32_s8(int8x8_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -__ai float32x2_t vreinterpret_f32_f64(float64x1_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -__ai float32x2_t vreinterpret_f32_f16(float16x4_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -__ai float32x2_t vreinterpret_f32_s32(int32x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -__ai float32x2_t vreinterpret_f32_s64(int64x1_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -__ai float32x2_t vreinterpret_f32_s16(int16x4_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -__ai float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -__ai float16x4_t vreinterpret_f16_p64(poly64x1_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -__ai float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -__ai float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -__ai float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -__ai float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -__ai float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -__ai float16x4_t vreinterpret_f16_s8(int8x8_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -__ai float16x4_t vreinterpret_f16_f64(float64x1_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -__ai float16x4_t vreinterpret_f16_f32(float32x2_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -__ai float16x4_t vreinterpret_f16_s32(int32x2_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -__ai float16x4_t vreinterpret_f16_s64(int64x1_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -__ai float16x4_t vreinterpret_f16_s16(int16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_p64(poly64x1_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_s8(int8x8_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_f64(float64x1_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_f32(float32x2_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_f16(float16x4_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_s64(int64x1_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int32x2_t vreinterpret_s32_s16(int16x4_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_p64(poly64x1_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_s8(int8x8_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_f64(float64x1_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_f32(float32x2_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_f16(float16x4_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_s32(int32x2_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int64x1_t vreinterpret_s64_s16(int16x4_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_p64(poly64x1_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_s8(int8x8_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_f64(float64x1_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_f32(float32x2_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_f16(float16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_s32(int32x2_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai int16x4_t vreinterpret_s16_s64(int64x1_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai uint64_t vrshld_u64(uint64_t __p0, int64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vrshld_u64(__p0, __p1); - return __ret; -} -__ai int64_t vrshld_s64(int64_t __p0, int64_t __p1) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vrshld_s64(__p0, __p1); - return __ret; -} -#define vrshrd_n_u64(__p0, __p1) __extension__ ({ \ - uint64_t __ret; \ - uint64_t __s0 = __p0; \ - __ret = (uint64_t) __builtin_neon_vrshrd_n_u64(__s0, __p1); \ - __ret; \ -}) -#define vrshrd_n_s64(__p0, __p1) __extension__ ({ \ - int64_t __ret; \ - int64_t __s0 = __p0; \ - __ret = (int64_t) __builtin_neon_vrshrd_n_s64(__s0, __p1); \ - __ret; \ -}) -#ifdef __LITTLE_ENDIAN__ -#define vrshrn_high_n_u32(__p0_703, __p1_703, __p2_703) __extension__ ({ \ - uint16x8_t __ret_703; \ - uint16x4_t __s0_703 = __p0_703; \ - uint32x4_t __s1_703 = __p1_703; \ - __ret_703 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_703), (uint16x4_t)(vrshrn_n_u32(__s1_703, __p2_703)))); \ - __ret_703; \ -}) -#else -#define vrshrn_high_n_u32(__p0_704, __p1_704, __p2_704) __extension__ ({ \ - uint16x8_t __ret_704; \ - uint16x4_t __s0_704 = __p0_704; \ - uint32x4_t __s1_704 = __p1_704; \ - uint16x4_t __rev0_704; __rev0_704 = __builtin_shufflevector(__s0_704, __s0_704, 3, 2, 1, 0); \ - uint32x4_t __rev1_704; __rev1_704 = __builtin_shufflevector(__s1_704, __s1_704, 3, 2, 1, 0); \ - __ret_704 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_704), (uint16x4_t)(__noswap_vrshrn_n_u32(__rev1_704, __p2_704)))); \ - __ret_704 = __builtin_shufflevector(__ret_704, __ret_704, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_704; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrshrn_high_n_u64(__p0_705, __p1_705, __p2_705) __extension__ ({ \ - uint32x4_t __ret_705; \ - uint32x2_t __s0_705 = __p0_705; \ - uint64x2_t __s1_705 = __p1_705; \ - __ret_705 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_705), (uint32x2_t)(vrshrn_n_u64(__s1_705, __p2_705)))); \ - __ret_705; \ -}) -#else -#define vrshrn_high_n_u64(__p0_706, __p1_706, __p2_706) __extension__ ({ \ - uint32x4_t __ret_706; \ - uint32x2_t __s0_706 = __p0_706; \ - uint64x2_t __s1_706 = __p1_706; \ - uint32x2_t __rev0_706; __rev0_706 = __builtin_shufflevector(__s0_706, __s0_706, 1, 0); \ - uint64x2_t __rev1_706; __rev1_706 = __builtin_shufflevector(__s1_706, __s1_706, 1, 0); \ - __ret_706 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_706), (uint32x2_t)(__noswap_vrshrn_n_u64(__rev1_706, __p2_706)))); \ - __ret_706 = __builtin_shufflevector(__ret_706, __ret_706, 3, 2, 1, 0); \ - __ret_706; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrshrn_high_n_u16(__p0_707, __p1_707, __p2_707) __extension__ ({ \ - uint8x16_t __ret_707; \ - uint8x8_t __s0_707 = __p0_707; \ - uint16x8_t __s1_707 = __p1_707; \ - __ret_707 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_707), (uint8x8_t)(vrshrn_n_u16(__s1_707, __p2_707)))); \ - __ret_707; \ -}) -#else -#define vrshrn_high_n_u16(__p0_708, __p1_708, __p2_708) __extension__ ({ \ - uint8x16_t __ret_708; \ - uint8x8_t __s0_708 = __p0_708; \ - uint16x8_t __s1_708 = __p1_708; \ - uint8x8_t __rev0_708; __rev0_708 = __builtin_shufflevector(__s0_708, __s0_708, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev1_708; __rev1_708 = __builtin_shufflevector(__s1_708, __s1_708, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_708 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_708), (uint8x8_t)(__noswap_vrshrn_n_u16(__rev1_708, __p2_708)))); \ - __ret_708 = __builtin_shufflevector(__ret_708, __ret_708, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_708; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrshrn_high_n_s32(__p0_709, __p1_709, __p2_709) __extension__ ({ \ - int16x8_t __ret_709; \ - int16x4_t __s0_709 = __p0_709; \ - int32x4_t __s1_709 = __p1_709; \ - __ret_709 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_709), (int16x4_t)(vrshrn_n_s32(__s1_709, __p2_709)))); \ - __ret_709; \ -}) -#else -#define vrshrn_high_n_s32(__p0_710, __p1_710, __p2_710) __extension__ ({ \ - int16x8_t __ret_710; \ - int16x4_t __s0_710 = __p0_710; \ - int32x4_t __s1_710 = __p1_710; \ - int16x4_t __rev0_710; __rev0_710 = __builtin_shufflevector(__s0_710, __s0_710, 3, 2, 1, 0); \ - int32x4_t __rev1_710; __rev1_710 = __builtin_shufflevector(__s1_710, __s1_710, 3, 2, 1, 0); \ - __ret_710 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_710), (int16x4_t)(__noswap_vrshrn_n_s32(__rev1_710, __p2_710)))); \ - __ret_710 = __builtin_shufflevector(__ret_710, __ret_710, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_710; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrshrn_high_n_s64(__p0_711, __p1_711, __p2_711) __extension__ ({ \ - int32x4_t __ret_711; \ - int32x2_t __s0_711 = __p0_711; \ - int64x2_t __s1_711 = __p1_711; \ - __ret_711 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_711), (int32x2_t)(vrshrn_n_s64(__s1_711, __p2_711)))); \ - __ret_711; \ -}) -#else -#define vrshrn_high_n_s64(__p0_712, __p1_712, __p2_712) __extension__ ({ \ - int32x4_t __ret_712; \ - int32x2_t __s0_712 = __p0_712; \ - int64x2_t __s1_712 = __p1_712; \ - int32x2_t __rev0_712; __rev0_712 = __builtin_shufflevector(__s0_712, __s0_712, 1, 0); \ - int64x2_t __rev1_712; __rev1_712 = __builtin_shufflevector(__s1_712, __s1_712, 1, 0); \ - __ret_712 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_712), (int32x2_t)(__noswap_vrshrn_n_s64(__rev1_712, __p2_712)))); \ - __ret_712 = __builtin_shufflevector(__ret_712, __ret_712, 3, 2, 1, 0); \ - __ret_712; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vrshrn_high_n_s16(__p0_713, __p1_713, __p2_713) __extension__ ({ \ - int8x16_t __ret_713; \ - int8x8_t __s0_713 = __p0_713; \ - int16x8_t __s1_713 = __p1_713; \ - __ret_713 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_713), (int8x8_t)(vrshrn_n_s16(__s1_713, __p2_713)))); \ - __ret_713; \ -}) -#else -#define vrshrn_high_n_s16(__p0_714, __p1_714, __p2_714) __extension__ ({ \ - int8x16_t __ret_714; \ - int8x8_t __s0_714 = __p0_714; \ - int16x8_t __s1_714 = __p1_714; \ - int8x8_t __rev0_714; __rev0_714 = __builtin_shufflevector(__s0_714, __s0_714, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_714; __rev1_714 = __builtin_shufflevector(__s1_714, __s1_714, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_714 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_714), (int8x8_t)(__noswap_vrshrn_n_s16(__rev1_714, __p2_714)))); \ - __ret_714 = __builtin_shufflevector(__ret_714, __ret_714, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_714; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vrsqrteq_f64(float64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 42); - return __ret; -} -#else -__ai float64x2_t vrsqrteq_f64(float64x2_t __p0) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float64x2_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -__ai float64x1_t vrsqrte_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 10); - return __ret; -} -__ai float64_t vrsqrted_f64(float64_t __p0) { - float64_t __ret; - __ret = (float64_t) __builtin_neon_vrsqrted_f64(__p0); - return __ret; -} -__ai float32_t vrsqrtes_f32(float32_t __p0) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vrsqrtes_f32(__p0); - return __ret; -} -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vrsqrtsq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); - return __ret; -} -#else -__ai float64x2_t vrsqrtsq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (float64x2_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -__ai float64x1_t vrsqrts_f64(float64x1_t __p0, float64x1_t __p1) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 10); - return __ret; -} -__ai float64_t vrsqrtsd_f64(float64_t __p0, float64_t __p1) { - float64_t __ret; - __ret = (float64_t) __builtin_neon_vrsqrtsd_f64(__p0, __p1); - return __ret; -} -__ai float32_t vrsqrtss_f32(float32_t __p0, float32_t __p1) { - float32_t __ret; - __ret = (float32_t) __builtin_neon_vrsqrtss_f32(__p0, __p1); - return __ret; -} -#define vrsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64_t __ret; \ - uint64_t __s0 = __p0; \ - uint64_t __s1 = __p1; \ - __ret = (uint64_t) __builtin_neon_vrsrad_n_u64(__s0, __s1, __p2); \ - __ret; \ -}) -#define vrsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \ - int64_t __ret; \ - int64_t __s0 = __p0; \ - int64_t __s1 = __p1; \ - __ret = (int64_t) __builtin_neon_vrsrad_n_s64(__s0, __s1, __p2); \ - __ret; \ -}) -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vrsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint16x8_t __ret; - __ret = vcombine_u16(__p0, vrsubhn_u32(__p1, __p2)); - return __ret; -} -#else -__ai uint16x8_t vrsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint16x8_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = __noswap_vcombine_u16(__rev0, __noswap_vrsubhn_u32(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vrsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { - uint32x4_t __ret; - __ret = vcombine_u32(__p0, vrsubhn_u64(__p1, __p2)); - return __ret; -} -#else -__ai uint32x4_t vrsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { - uint32x4_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = __noswap_vcombine_u32(__rev0, __noswap_vrsubhn_u64(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vrsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { - uint8x16_t __ret; - __ret = vcombine_u8(__p0, vrsubhn_u16(__p1, __p2)); - return __ret; -} -#else -__ai uint8x16_t vrsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { - uint8x16_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __noswap_vcombine_u8(__rev0, __noswap_vrsubhn_u16(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vrsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int16x8_t __ret; - __ret = vcombine_s16(__p0, vrsubhn_s32(__p1, __p2)); - return __ret; -} -#else -__ai int16x8_t vrsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int16x8_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = __noswap_vcombine_s16(__rev0, __noswap_vrsubhn_s32(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vrsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { - int32x4_t __ret; - __ret = vcombine_s32(__p0, vrsubhn_s64(__p1, __p2)); - return __ret; -} -#else -__ai int32x4_t vrsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { - int32x4_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = __noswap_vcombine_s32(__rev0, __noswap_vrsubhn_s64(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vrsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int8x16_t __ret; - __ret = vcombine_s8(__p0, vrsubhn_s16(__p1, __p2)); - return __ret; -} -#else -__ai int8x16_t vrsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int8x16_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __noswap_vcombine_s8(__rev0, __noswap_vrsubhn_s16(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#define vset_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x1_t __ret; \ - poly64_t __s0 = __p0; \ - poly64x1_t __s1 = __p1; \ - __ret = (poly64x1_t) __builtin_neon_vset_lane_i64(__s0, (poly64x1_t)__s1, __p2); \ - __ret; \ -}) -#ifdef __LITTLE_ENDIAN__ -#define vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x2_t __ret; \ - poly64_t __s0 = __p0; \ - poly64x2_t __s1 = __p1; \ - __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (poly64x2_t)__s1, __p2); \ - __ret; \ -}) -#else -#define vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x2_t __ret; \ - poly64_t __s0 = __p0; \ - poly64x2_t __s1 = __p1; \ - poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (poly64x2_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x2_t __ret; \ - poly64_t __s0 = __p0; \ - poly64x2_t __s1 = __p1; \ - __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (poly64x2_t)__s1, __p2); \ - __ret; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x2_t __ret; \ - float64_t __s0 = __p0; \ - float64x2_t __s1 = __p1; \ - __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (float64x2_t)__s1, __p2); \ - __ret; \ -}) -#else -#define vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x2_t __ret; \ - float64_t __s0 = __p0; \ - float64x2_t __s1 = __p1; \ - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (float64x2_t)__rev1, __p2); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#define __noswap_vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x2_t __ret; \ - float64_t __s0 = __p0; \ - float64x2_t __s1 = __p1; \ - __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (float64x2_t)__s1, __p2); \ - __ret; \ -}) -#endif - -#define vset_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x1_t __ret; \ - float64_t __s0 = __p0; \ - float64x1_t __s1 = __p1; \ - __ret = (float64x1_t) __builtin_neon_vset_lane_f64(__s0, (float64x1_t)__s1, __p2); \ - __ret; \ -}) -__ai uint64_t vshld_u64(uint64_t __p0, int64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vshld_u64(__p0, __p1); - return __ret; -} -__ai int64_t vshld_s64(int64_t __p0, int64_t __p1) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vshld_s64(__p0, __p1); - return __ret; -} -#define vshld_n_u64(__p0, __p1) __extension__ ({ \ - uint64_t __ret; \ - uint64_t __s0 = __p0; \ - __ret = (uint64_t) __builtin_neon_vshld_n_u64(__s0, __p1); \ - __ret; \ -}) -#define vshld_n_s64(__p0, __p1) __extension__ ({ \ - int64_t __ret; \ - int64_t __s0 = __p0; \ - __ret = (int64_t) __builtin_neon_vshld_n_s64(__s0, __p1); \ - __ret; \ -}) -#ifdef __LITTLE_ENDIAN__ -#define vshll_high_n_u8(__p0_715, __p1_715) __extension__ ({ \ - uint16x8_t __ret_715; \ - uint8x16_t __s0_715 = __p0_715; \ - __ret_715 = (uint16x8_t)(vshll_n_u8(vget_high_u8(__s0_715), __p1_715)); \ - __ret_715; \ -}) -#else -#define vshll_high_n_u8(__p0_716, __p1_716) __extension__ ({ \ - uint16x8_t __ret_716; \ - uint8x16_t __s0_716 = __p0_716; \ - uint8x16_t __rev0_716; __rev0_716 = __builtin_shufflevector(__s0_716, __s0_716, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_716 = (uint16x8_t)(__noswap_vshll_n_u8(__noswap_vget_high_u8(__rev0_716), __p1_716)); \ - __ret_716 = __builtin_shufflevector(__ret_716, __ret_716, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_716; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshll_high_n_u32(__p0_717, __p1_717) __extension__ ({ \ - uint64x2_t __ret_717; \ - uint32x4_t __s0_717 = __p0_717; \ - __ret_717 = (uint64x2_t)(vshll_n_u32(vget_high_u32(__s0_717), __p1_717)); \ - __ret_717; \ -}) -#else -#define vshll_high_n_u32(__p0_718, __p1_718) __extension__ ({ \ - uint64x2_t __ret_718; \ - uint32x4_t __s0_718 = __p0_718; \ - uint32x4_t __rev0_718; __rev0_718 = __builtin_shufflevector(__s0_718, __s0_718, 3, 2, 1, 0); \ - __ret_718 = (uint64x2_t)(__noswap_vshll_n_u32(__noswap_vget_high_u32(__rev0_718), __p1_718)); \ - __ret_718 = __builtin_shufflevector(__ret_718, __ret_718, 1, 0); \ - __ret_718; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshll_high_n_u16(__p0_719, __p1_719) __extension__ ({ \ - uint32x4_t __ret_719; \ - uint16x8_t __s0_719 = __p0_719; \ - __ret_719 = (uint32x4_t)(vshll_n_u16(vget_high_u16(__s0_719), __p1_719)); \ - __ret_719; \ -}) -#else -#define vshll_high_n_u16(__p0_720, __p1_720) __extension__ ({ \ - uint32x4_t __ret_720; \ - uint16x8_t __s0_720 = __p0_720; \ - uint16x8_t __rev0_720; __rev0_720 = __builtin_shufflevector(__s0_720, __s0_720, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_720 = (uint32x4_t)(__noswap_vshll_n_u16(__noswap_vget_high_u16(__rev0_720), __p1_720)); \ - __ret_720 = __builtin_shufflevector(__ret_720, __ret_720, 3, 2, 1, 0); \ - __ret_720; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshll_high_n_s8(__p0_721, __p1_721) __extension__ ({ \ - int16x8_t __ret_721; \ - int8x16_t __s0_721 = __p0_721; \ - __ret_721 = (int16x8_t)(vshll_n_s8(vget_high_s8(__s0_721), __p1_721)); \ - __ret_721; \ -}) -#else -#define vshll_high_n_s8(__p0_722, __p1_722) __extension__ ({ \ - int16x8_t __ret_722; \ - int8x16_t __s0_722 = __p0_722; \ - int8x16_t __rev0_722; __rev0_722 = __builtin_shufflevector(__s0_722, __s0_722, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_722 = (int16x8_t)(__noswap_vshll_n_s8(__noswap_vget_high_s8(__rev0_722), __p1_722)); \ - __ret_722 = __builtin_shufflevector(__ret_722, __ret_722, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_722; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshll_high_n_s32(__p0_723, __p1_723) __extension__ ({ \ - int64x2_t __ret_723; \ - int32x4_t __s0_723 = __p0_723; \ - __ret_723 = (int64x2_t)(vshll_n_s32(vget_high_s32(__s0_723), __p1_723)); \ - __ret_723; \ -}) -#else -#define vshll_high_n_s32(__p0_724, __p1_724) __extension__ ({ \ - int64x2_t __ret_724; \ - int32x4_t __s0_724 = __p0_724; \ - int32x4_t __rev0_724; __rev0_724 = __builtin_shufflevector(__s0_724, __s0_724, 3, 2, 1, 0); \ - __ret_724 = (int64x2_t)(__noswap_vshll_n_s32(__noswap_vget_high_s32(__rev0_724), __p1_724)); \ - __ret_724 = __builtin_shufflevector(__ret_724, __ret_724, 1, 0); \ - __ret_724; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshll_high_n_s16(__p0_725, __p1_725) __extension__ ({ \ - int32x4_t __ret_725; \ - int16x8_t __s0_725 = __p0_725; \ - __ret_725 = (int32x4_t)(vshll_n_s16(vget_high_s16(__s0_725), __p1_725)); \ - __ret_725; \ -}) -#else -#define vshll_high_n_s16(__p0_726, __p1_726) __extension__ ({ \ - int32x4_t __ret_726; \ - int16x8_t __s0_726 = __p0_726; \ - int16x8_t __rev0_726; __rev0_726 = __builtin_shufflevector(__s0_726, __s0_726, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_726 = (int32x4_t)(__noswap_vshll_n_s16(__noswap_vget_high_s16(__rev0_726), __p1_726)); \ - __ret_726 = __builtin_shufflevector(__ret_726, __ret_726, 3, 2, 1, 0); \ - __ret_726; \ -}) -#endif - -#define vshrd_n_u64(__p0, __p1) __extension__ ({ \ - uint64_t __ret; \ - uint64_t __s0 = __p0; \ - __ret = (uint64_t) __builtin_neon_vshrd_n_u64(__s0, __p1); \ - __ret; \ -}) -#define vshrd_n_s64(__p0, __p1) __extension__ ({ \ - int64_t __ret; \ - int64_t __s0 = __p0; \ - __ret = (int64_t) __builtin_neon_vshrd_n_s64(__s0, __p1); \ - __ret; \ -}) -#ifdef __LITTLE_ENDIAN__ -#define vshrn_high_n_u32(__p0_727, __p1_727, __p2_727) __extension__ ({ \ - uint16x8_t __ret_727; \ - uint16x4_t __s0_727 = __p0_727; \ - uint32x4_t __s1_727 = __p1_727; \ - __ret_727 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_727), (uint16x4_t)(vshrn_n_u32(__s1_727, __p2_727)))); \ - __ret_727; \ -}) -#else -#define vshrn_high_n_u32(__p0_728, __p1_728, __p2_728) __extension__ ({ \ - uint16x8_t __ret_728; \ - uint16x4_t __s0_728 = __p0_728; \ - uint32x4_t __s1_728 = __p1_728; \ - uint16x4_t __rev0_728; __rev0_728 = __builtin_shufflevector(__s0_728, __s0_728, 3, 2, 1, 0); \ - uint32x4_t __rev1_728; __rev1_728 = __builtin_shufflevector(__s1_728, __s1_728, 3, 2, 1, 0); \ - __ret_728 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_728), (uint16x4_t)(__noswap_vshrn_n_u32(__rev1_728, __p2_728)))); \ - __ret_728 = __builtin_shufflevector(__ret_728, __ret_728, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_728; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshrn_high_n_u64(__p0_729, __p1_729, __p2_729) __extension__ ({ \ - uint32x4_t __ret_729; \ - uint32x2_t __s0_729 = __p0_729; \ - uint64x2_t __s1_729 = __p1_729; \ - __ret_729 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_729), (uint32x2_t)(vshrn_n_u64(__s1_729, __p2_729)))); \ - __ret_729; \ -}) -#else -#define vshrn_high_n_u64(__p0_730, __p1_730, __p2_730) __extension__ ({ \ - uint32x4_t __ret_730; \ - uint32x2_t __s0_730 = __p0_730; \ - uint64x2_t __s1_730 = __p1_730; \ - uint32x2_t __rev0_730; __rev0_730 = __builtin_shufflevector(__s0_730, __s0_730, 1, 0); \ - uint64x2_t __rev1_730; __rev1_730 = __builtin_shufflevector(__s1_730, __s1_730, 1, 0); \ - __ret_730 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_730), (uint32x2_t)(__noswap_vshrn_n_u64(__rev1_730, __p2_730)))); \ - __ret_730 = __builtin_shufflevector(__ret_730, __ret_730, 3, 2, 1, 0); \ - __ret_730; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshrn_high_n_u16(__p0_731, __p1_731, __p2_731) __extension__ ({ \ - uint8x16_t __ret_731; \ - uint8x8_t __s0_731 = __p0_731; \ - uint16x8_t __s1_731 = __p1_731; \ - __ret_731 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_731), (uint8x8_t)(vshrn_n_u16(__s1_731, __p2_731)))); \ - __ret_731; \ -}) -#else -#define vshrn_high_n_u16(__p0_732, __p1_732, __p2_732) __extension__ ({ \ - uint8x16_t __ret_732; \ - uint8x8_t __s0_732 = __p0_732; \ - uint16x8_t __s1_732 = __p1_732; \ - uint8x8_t __rev0_732; __rev0_732 = __builtin_shufflevector(__s0_732, __s0_732, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint16x8_t __rev1_732; __rev1_732 = __builtin_shufflevector(__s1_732, __s1_732, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_732 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_732), (uint8x8_t)(__noswap_vshrn_n_u16(__rev1_732, __p2_732)))); \ - __ret_732 = __builtin_shufflevector(__ret_732, __ret_732, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_732; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshrn_high_n_s32(__p0_733, __p1_733, __p2_733) __extension__ ({ \ - int16x8_t __ret_733; \ - int16x4_t __s0_733 = __p0_733; \ - int32x4_t __s1_733 = __p1_733; \ - __ret_733 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_733), (int16x4_t)(vshrn_n_s32(__s1_733, __p2_733)))); \ - __ret_733; \ -}) -#else -#define vshrn_high_n_s32(__p0_734, __p1_734, __p2_734) __extension__ ({ \ - int16x8_t __ret_734; \ - int16x4_t __s0_734 = __p0_734; \ - int32x4_t __s1_734 = __p1_734; \ - int16x4_t __rev0_734; __rev0_734 = __builtin_shufflevector(__s0_734, __s0_734, 3, 2, 1, 0); \ - int32x4_t __rev1_734; __rev1_734 = __builtin_shufflevector(__s1_734, __s1_734, 3, 2, 1, 0); \ - __ret_734 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_734), (int16x4_t)(__noswap_vshrn_n_s32(__rev1_734, __p2_734)))); \ - __ret_734 = __builtin_shufflevector(__ret_734, __ret_734, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_734; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshrn_high_n_s64(__p0_735, __p1_735, __p2_735) __extension__ ({ \ - int32x4_t __ret_735; \ - int32x2_t __s0_735 = __p0_735; \ - int64x2_t __s1_735 = __p1_735; \ - __ret_735 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_735), (int32x2_t)(vshrn_n_s64(__s1_735, __p2_735)))); \ - __ret_735; \ -}) -#else -#define vshrn_high_n_s64(__p0_736, __p1_736, __p2_736) __extension__ ({ \ - int32x4_t __ret_736; \ - int32x2_t __s0_736 = __p0_736; \ - int64x2_t __s1_736 = __p1_736; \ - int32x2_t __rev0_736; __rev0_736 = __builtin_shufflevector(__s0_736, __s0_736, 1, 0); \ - int64x2_t __rev1_736; __rev1_736 = __builtin_shufflevector(__s1_736, __s1_736, 1, 0); \ - __ret_736 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_736), (int32x2_t)(__noswap_vshrn_n_s64(__rev1_736, __p2_736)))); \ - __ret_736 = __builtin_shufflevector(__ret_736, __ret_736, 3, 2, 1, 0); \ - __ret_736; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vshrn_high_n_s16(__p0_737, __p1_737, __p2_737) __extension__ ({ \ - int8x16_t __ret_737; \ - int8x8_t __s0_737 = __p0_737; \ - int16x8_t __s1_737 = __p1_737; \ - __ret_737 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_737), (int8x8_t)(vshrn_n_s16(__s1_737, __p2_737)))); \ - __ret_737; \ -}) -#else -#define vshrn_high_n_s16(__p0_738, __p1_738, __p2_738) __extension__ ({ \ - int8x16_t __ret_738; \ - int8x8_t __s0_738 = __p0_738; \ - int16x8_t __s1_738 = __p1_738; \ - int8x8_t __rev0_738; __rev0_738 = __builtin_shufflevector(__s0_738, __s0_738, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_738; __rev1_738 = __builtin_shufflevector(__s1_738, __s1_738, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_738 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_738), (int8x8_t)(__noswap_vshrn_n_s16(__rev1_738, __p2_738)))); \ - __ret_738 = __builtin_shufflevector(__ret_738, __ret_738, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_738; \ -}) -#endif - -#define vslid_n_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64_t __ret; \ - uint64_t __s0 = __p0; \ - uint64_t __s1 = __p1; \ - __ret = (uint64_t) __builtin_neon_vslid_n_u64(__s0, __s1, __p2); \ - __ret; \ -}) -#define vslid_n_s64(__p0, __p1, __p2) __extension__ ({ \ - int64_t __ret; \ - int64_t __s0 = __p0; \ - int64_t __s1 = __p1; \ - __ret = (int64_t) __builtin_neon_vslid_n_s64(__s0, __s1, __p2); \ - __ret; \ -}) -#define vsli_n_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x1_t __ret; \ - poly64x1_t __s0 = __p0; \ - poly64x1_t __s1 = __p1; \ - __ret = (poly64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \ - __ret; \ -}) -#ifdef __LITTLE_ENDIAN__ -#define vsliq_n_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x2_t __ret; \ - poly64x2_t __s0 = __p0; \ - poly64x2_t __s1 = __p1; \ - __ret = (poly64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \ - __ret; \ -}) -#else -#define vsliq_n_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x2_t __ret; \ - poly64x2_t __s0 = __p0; \ - poly64x2_t __s1 = __p1; \ - poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __ret = (poly64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -__ai uint8_t vsqaddb_u8(uint8_t __p0, int8_t __p1) { - uint8_t __ret; - __ret = (uint8_t) __builtin_neon_vsqaddb_u8(__p0, __p1); - return __ret; -} -__ai uint32_t vsqadds_u32(uint32_t __p0, int32_t __p1) { - uint32_t __ret; - __ret = (uint32_t) __builtin_neon_vsqadds_u32(__p0, __p1); - return __ret; -} -__ai uint64_t vsqaddd_u64(uint64_t __p0, int64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vsqaddd_u64(__p0, __p1); - return __ret; -} -__ai uint16_t vsqaddh_u16(uint16_t __p0, int16_t __p1) { - uint16_t __ret; - __ret = (uint16_t) __builtin_neon_vsqaddh_u16(__p0, __p1); - return __ret; -} -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vsqaddq_u8(uint8x16_t __p0, int8x16_t __p1) { - uint8x16_t __ret; - __ret = (uint8x16_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); - return __ret; -} -#else -__ai uint8x16_t vsqaddq_u8(uint8x16_t __p0, int8x16_t __p1) { - uint8x16_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x16_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vsqaddq_u32(uint32x4_t __p0, int32x4_t __p1) { - uint32x4_t __ret; - __ret = (uint32x4_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); - return __ret; -} -#else -__ai uint32x4_t vsqaddq_u32(uint32x4_t __p0, int32x4_t __p1) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint32x4_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vsqaddq_u64(uint64x2_t __p0, int64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); - return __ret; -} -#else -__ai uint64x2_t vsqaddq_u64(uint64x2_t __p0, int64x2_t __p1) { - uint64x2_t __ret; - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint64x2_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vsqaddq_u16(uint16x8_t __p0, int16x8_t __p1) { - uint16x8_t __ret; - __ret = (uint16x8_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); - return __ret; -} -#else -__ai uint16x8_t vsqaddq_u16(uint16x8_t __p0, int16x8_t __p1) { - uint16x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint16x8_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vsqadd_u8(uint8x8_t __p0, int8x8_t __p1) { - uint8x8_t __ret; - __ret = (uint8x8_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); - return __ret; -} -#else -__ai uint8x8_t vsqadd_u8(uint8x8_t __p0, int8x8_t __p1) { - uint8x8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (uint8x8_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vsqadd_u32(uint32x2_t __p0, int32x2_t __p1) { - uint32x2_t __ret; - __ret = (uint32x2_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); - return __ret; -} -#else -__ai uint32x2_t vsqadd_u32(uint32x2_t __p0, int32x2_t __p1) { - uint32x2_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint32x2_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -__ai uint64x1_t vsqadd_u64(uint64x1_t __p0, int64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19); - return __ret; -} -#ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vsqadd_u16(uint16x4_t __p0, int16x4_t __p1) { - uint16x4_t __ret; - __ret = (uint16x4_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); - return __ret; -} -#else -__ai uint16x4_t vsqadd_u16(uint16x4_t __p0, int16x4_t __p1) { - uint16x4_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (uint16x4_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vsqrtq_f64(float64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 42); - return __ret; -} -#else -__ai float64x2_t vsqrtq_f64(float64x2_t __p0) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float64x2_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vsqrtq_f32(float32x4_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 41); - return __ret; -} -#else -__ai float32x4_t vsqrtq_f32(float32x4_t __p0) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -__ai float64x1_t vsqrt_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 10); - return __ret; -} -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vsqrt_f32(float32x2_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 9); - return __ret; -} -#else -__ai float32x2_t vsqrt_f32(float32x2_t __p0) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float32x2_t) __builtin_neon_vsqrt_v((int8x8_t)__rev0, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#define vsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64_t __ret; \ - uint64_t __s0 = __p0; \ - uint64_t __s1 = __p1; \ - __ret = (uint64_t) __builtin_neon_vsrad_n_u64(__s0, __s1, __p2); \ - __ret; \ -}) -#define vsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \ - int64_t __ret; \ - int64_t __s0 = __p0; \ - int64_t __s1 = __p1; \ - __ret = (int64_t) __builtin_neon_vsrad_n_s64(__s0, __s1, __p2); \ - __ret; \ -}) -#define vsrid_n_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64_t __ret; \ - uint64_t __s0 = __p0; \ - uint64_t __s1 = __p1; \ - __ret = (uint64_t) __builtin_neon_vsrid_n_u64(__s0, __s1, __p2); \ - __ret; \ -}) -#define vsrid_n_s64(__p0, __p1, __p2) __extension__ ({ \ - int64_t __ret; \ - int64_t __s0 = __p0; \ - int64_t __s1 = __p1; \ - __ret = (int64_t) __builtin_neon_vsrid_n_s64(__s0, __s1, __p2); \ - __ret; \ -}) -#define vsri_n_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x1_t __ret; \ - poly64x1_t __s0 = __p0; \ - poly64x1_t __s1 = __p1; \ - __ret = (poly64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \ - __ret; \ -}) -#ifdef __LITTLE_ENDIAN__ -#define vsriq_n_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x2_t __ret; \ - poly64x2_t __s0 = __p0; \ - poly64x2_t __s1 = __p1; \ - __ret = (poly64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \ - __ret; \ -}) -#else -#define vsriq_n_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x2_t __ret; \ - poly64x2_t __s0 = __p0; \ - poly64x2_t __s1 = __p1; \ - poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ - poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __ret = (poly64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \ - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ - __ret; \ -}) -#endif - -#define vst1_p64(__p0, __p1) __extension__ ({ \ - poly64x1_t __s1 = __p1; \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 6); \ -}) -#ifdef __LITTLE_ENDIAN__ -#define vst1q_p64(__p0, __p1) __extension__ ({ \ - poly64x2_t __s1 = __p1; \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 38); \ -}) -#else -#define vst1q_p64(__p0, __p1) __extension__ ({ \ - poly64x2_t __s1 = __p1; \ - poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 38); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_f64(__p0, __p1) __extension__ ({ \ - float64x2_t __s1 = __p1; \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 42); \ -}) -#else -#define vst1q_f64(__p0, __p1) __extension__ ({ \ - float64x2_t __s1 = __p1; \ - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 42); \ -}) -#endif - -#define vst1_f64(__p0, __p1) __extension__ ({ \ - float64x1_t __s1 = __p1; \ - __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 10); \ -}) -#define vst1_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x1_t __s1 = __p1; \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \ -}) -#ifdef __LITTLE_ENDIAN__ -#define vst1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x2_t __s1 = __p1; \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 38); \ -}) -#else -#define vst1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x2_t __s1 = __p1; \ - poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 38); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x2_t __s1 = __p1; \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 42); \ -}) -#else -#define vst1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x2_t __s1 = __p1; \ - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ - __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 42); \ -}) -#endif - -#define vst1_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x1_t __s1 = __p1; \ - __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \ -}) -#define vst1_p64_x2(__p0, __p1) __extension__ ({ \ - poly64x1x2_t __s1 = __p1; \ - __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \ -}) -#ifdef __LITTLE_ENDIAN__ -#define vst1q_p64_x2(__p0, __p1) __extension__ ({ \ - poly64x2x2_t __s1 = __p1; \ - __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 38); \ -}) -#else -#define vst1q_p64_x2(__p0, __p1) __extension__ ({ \ - poly64x2x2_t __s1 = __p1; \ - poly64x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 38); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_f64_x2(__p0, __p1) __extension__ ({ \ - float64x2x2_t __s1 = __p1; \ - __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 42); \ -}) -#else -#define vst1q_f64_x2(__p0, __p1) __extension__ ({ \ - float64x2x2_t __s1 = __p1; \ - float64x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 42); \ -}) -#endif - -#define vst1_f64_x2(__p0, __p1) __extension__ ({ \ - float64x1x2_t __s1 = __p1; \ - __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 10); \ -}) -#define vst1_p64_x3(__p0, __p1) __extension__ ({ \ - poly64x1x3_t __s1 = __p1; \ - __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \ -}) -#ifdef __LITTLE_ENDIAN__ -#define vst1q_p64_x3(__p0, __p1) __extension__ ({ \ - poly64x2x3_t __s1 = __p1; \ - __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 38); \ -}) -#else -#define vst1q_p64_x3(__p0, __p1) __extension__ ({ \ - poly64x2x3_t __s1 = __p1; \ - poly64x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 38); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_f64_x3(__p0, __p1) __extension__ ({ \ - float64x2x3_t __s1 = __p1; \ - __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 42); \ -}) -#else -#define vst1q_f64_x3(__p0, __p1) __extension__ ({ \ - float64x2x3_t __s1 = __p1; \ - float64x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 42); \ -}) -#endif - -#define vst1_f64_x3(__p0, __p1) __extension__ ({ \ - float64x1x3_t __s1 = __p1; \ - __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 10); \ -}) -#define vst1_p64_x4(__p0, __p1) __extension__ ({ \ - poly64x1x4_t __s1 = __p1; \ - __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \ -}) -#ifdef __LITTLE_ENDIAN__ -#define vst1q_p64_x4(__p0, __p1) __extension__ ({ \ - poly64x2x4_t __s1 = __p1; \ - __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 38); \ -}) -#else -#define vst1q_p64_x4(__p0, __p1) __extension__ ({ \ - poly64x2x4_t __s1 = __p1; \ - poly64x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 38); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst1q_f64_x4(__p0, __p1) __extension__ ({ \ - float64x2x4_t __s1 = __p1; \ - __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 42); \ -}) -#else -#define vst1q_f64_x4(__p0, __p1) __extension__ ({ \ - float64x2x4_t __s1 = __p1; \ - float64x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 42); \ -}) -#endif - -#define vst1_f64_x4(__p0, __p1) __extension__ ({ \ - float64x1x4_t __s1 = __p1; \ - __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 10); \ -}) -#define vst2_p64(__p0, __p1) __extension__ ({ \ - poly64x1x2_t __s1 = __p1; \ - __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \ -}) -#ifdef __LITTLE_ENDIAN__ -#define vst2q_p64(__p0, __p1) __extension__ ({ \ - poly64x2x2_t __s1 = __p1; \ - __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 38); \ -}) -#else -#define vst2q_p64(__p0, __p1) __extension__ ({ \ - poly64x2x2_t __s1 = __p1; \ - poly64x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 38); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2q_u64(__p0, __p1) __extension__ ({ \ - uint64x2x2_t __s1 = __p1; \ - __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 51); \ -}) -#else -#define vst2q_u64(__p0, __p1) __extension__ ({ \ - uint64x2x2_t __s1 = __p1; \ - uint64x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 51); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2q_f64(__p0, __p1) __extension__ ({ \ - float64x2x2_t __s1 = __p1; \ - __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 42); \ -}) -#else -#define vst2q_f64(__p0, __p1) __extension__ ({ \ - float64x2x2_t __s1 = __p1; \ - float64x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 42); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2q_s64(__p0, __p1) __extension__ ({ \ - int64x2x2_t __s1 = __p1; \ - __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 35); \ -}) -#else -#define vst2q_s64(__p0, __p1) __extension__ ({ \ - int64x2x2_t __s1 = __p1; \ - int64x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 35); \ -}) -#endif - -#define vst2_f64(__p0, __p1) __extension__ ({ \ - float64x1x2_t __s1 = __p1; \ - __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 10); \ -}) -#define vst2_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x1x2_t __s1 = __p1; \ - __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \ -}) -#ifdef __LITTLE_ENDIAN__ -#define vst2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x16x2_t __s1 = __p1; \ - __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 36); \ -}) -#else -#define vst2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x16x2_t __s1 = __p1; \ - poly8x16x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 36); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x2x2_t __s1 = __p1; \ - __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 38); \ -}) -#else -#define vst2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x2x2_t __s1 = __p1; \ - poly64x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 38); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16x2_t __s1 = __p1; \ - __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 48); \ -}) -#else -#define vst2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16x2_t __s1 = __p1; \ - uint8x16x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 48); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2x2_t __s1 = __p1; \ - __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 51); \ -}) -#else -#define vst2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2x2_t __s1 = __p1; \ - uint64x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 51); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16x2_t __s1 = __p1; \ - __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 32); \ -}) -#else -#define vst2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16x2_t __s1 = __p1; \ - int8x16x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 32); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x2x2_t __s1 = __p1; \ - __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 42); \ -}) -#else -#define vst2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x2x2_t __s1 = __p1; \ - float64x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 42); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2x2_t __s1 = __p1; \ - __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 35); \ -}) -#else -#define vst2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2x2_t __s1 = __p1; \ - int64x2x2_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 35); \ -}) -#endif - -#define vst2_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x1x2_t __s1 = __p1; \ - __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \ -}) -#define vst2_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x1x2_t __s1 = __p1; \ - __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 10); \ -}) -#define vst2_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x1x2_t __s1 = __p1; \ - __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 3); \ -}) -#define vst3_p64(__p0, __p1) __extension__ ({ \ - poly64x1x3_t __s1 = __p1; \ - __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \ -}) -#ifdef __LITTLE_ENDIAN__ -#define vst3q_p64(__p0, __p1) __extension__ ({ \ - poly64x2x3_t __s1 = __p1; \ - __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 38); \ -}) -#else -#define vst3q_p64(__p0, __p1) __extension__ ({ \ - poly64x2x3_t __s1 = __p1; \ - poly64x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 38); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3q_u64(__p0, __p1) __extension__ ({ \ - uint64x2x3_t __s1 = __p1; \ - __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 51); \ -}) -#else -#define vst3q_u64(__p0, __p1) __extension__ ({ \ - uint64x2x3_t __s1 = __p1; \ - uint64x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 51); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3q_f64(__p0, __p1) __extension__ ({ \ - float64x2x3_t __s1 = __p1; \ - __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 42); \ -}) -#else -#define vst3q_f64(__p0, __p1) __extension__ ({ \ - float64x2x3_t __s1 = __p1; \ - float64x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 42); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3q_s64(__p0, __p1) __extension__ ({ \ - int64x2x3_t __s1 = __p1; \ - __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 35); \ -}) -#else -#define vst3q_s64(__p0, __p1) __extension__ ({ \ - int64x2x3_t __s1 = __p1; \ - int64x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 35); \ -}) -#endif - -#define vst3_f64(__p0, __p1) __extension__ ({ \ - float64x1x3_t __s1 = __p1; \ - __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 10); \ -}) -#define vst3_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x1x3_t __s1 = __p1; \ - __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \ -}) -#ifdef __LITTLE_ENDIAN__ -#define vst3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x16x3_t __s1 = __p1; \ - __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 36); \ -}) -#else -#define vst3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x16x3_t __s1 = __p1; \ - poly8x16x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 36); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x2x3_t __s1 = __p1; \ - __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 38); \ -}) -#else -#define vst3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x2x3_t __s1 = __p1; \ - poly64x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 38); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16x3_t __s1 = __p1; \ - __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 48); \ -}) -#else -#define vst3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16x3_t __s1 = __p1; \ - uint8x16x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 48); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2x3_t __s1 = __p1; \ - __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 51); \ -}) -#else -#define vst3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2x3_t __s1 = __p1; \ - uint64x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 51); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16x3_t __s1 = __p1; \ - __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 32); \ -}) -#else -#define vst3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16x3_t __s1 = __p1; \ - int8x16x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 32); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x2x3_t __s1 = __p1; \ - __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 42); \ -}) -#else -#define vst3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x2x3_t __s1 = __p1; \ - float64x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 42); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2x3_t __s1 = __p1; \ - __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 35); \ -}) -#else -#define vst3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2x3_t __s1 = __p1; \ - int64x2x3_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 35); \ -}) -#endif - -#define vst3_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x1x3_t __s1 = __p1; \ - __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \ -}) -#define vst3_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x1x3_t __s1 = __p1; \ - __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 10); \ -}) -#define vst3_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x1x3_t __s1 = __p1; \ - __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 3); \ -}) -#define vst4_p64(__p0, __p1) __extension__ ({ \ - poly64x1x4_t __s1 = __p1; \ - __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \ -}) -#ifdef __LITTLE_ENDIAN__ -#define vst4q_p64(__p0, __p1) __extension__ ({ \ - poly64x2x4_t __s1 = __p1; \ - __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 38); \ -}) -#else -#define vst4q_p64(__p0, __p1) __extension__ ({ \ - poly64x2x4_t __s1 = __p1; \ - poly64x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 38); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4q_u64(__p0, __p1) __extension__ ({ \ - uint64x2x4_t __s1 = __p1; \ - __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 51); \ -}) -#else -#define vst4q_u64(__p0, __p1) __extension__ ({ \ - uint64x2x4_t __s1 = __p1; \ - uint64x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 51); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4q_f64(__p0, __p1) __extension__ ({ \ - float64x2x4_t __s1 = __p1; \ - __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 42); \ -}) -#else -#define vst4q_f64(__p0, __p1) __extension__ ({ \ - float64x2x4_t __s1 = __p1; \ - float64x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 42); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4q_s64(__p0, __p1) __extension__ ({ \ - int64x2x4_t __s1 = __p1; \ - __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 35); \ -}) -#else -#define vst4q_s64(__p0, __p1) __extension__ ({ \ - int64x2x4_t __s1 = __p1; \ - int64x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 35); \ -}) -#endif - -#define vst4_f64(__p0, __p1) __extension__ ({ \ - float64x1x4_t __s1 = __p1; \ - __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 10); \ -}) -#define vst4_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x1x4_t __s1 = __p1; \ - __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \ -}) -#ifdef __LITTLE_ENDIAN__ -#define vst4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x16x4_t __s1 = __p1; \ - __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 36); \ -}) -#else -#define vst4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ - poly8x16x4_t __s1 = __p1; \ - poly8x16x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 36); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x2x4_t __s1 = __p1; \ - __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 38); \ -}) -#else -#define vst4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ - poly64x2x4_t __s1 = __p1; \ - poly64x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 38); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16x4_t __s1 = __p1; \ - __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 48); \ -}) -#else -#define vst4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ - uint8x16x4_t __s1 = __p1; \ - uint8x16x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 48); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2x4_t __s1 = __p1; \ - __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 51); \ -}) -#else -#define vst4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x2x4_t __s1 = __p1; \ - uint64x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 51); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16x4_t __s1 = __p1; \ - __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 32); \ -}) -#else -#define vst4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ - int8x16x4_t __s1 = __p1; \ - int8x16x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 32); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x2x4_t __s1 = __p1; \ - __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 42); \ -}) -#else -#define vst4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x2x4_t __s1 = __p1; \ - float64x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 42); \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vst4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2x4_t __s1 = __p1; \ - __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 35); \ -}) -#else -#define vst4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x2x4_t __s1 = __p1; \ - int64x2x4_t __rev1; \ - __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ - __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ - __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ - __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ - __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 35); \ -}) -#endif - -#define vst4_lane_u64(__p0, __p1, __p2) __extension__ ({ \ - uint64x1x4_t __s1 = __p1; \ - __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \ -}) -#define vst4_lane_f64(__p0, __p1, __p2) __extension__ ({ \ - float64x1x4_t __s1 = __p1; \ - __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 10); \ -}) -#define vst4_lane_s64(__p0, __p1, __p2) __extension__ ({ \ - int64x1x4_t __s1 = __p1; \ - __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 3); \ -}) -#define vstrq_p128(__p0, __p1) __extension__ ({ \ - poly128_t __s1 = __p1; \ - __builtin_neon_vstrq_p128(__p0, __s1); \ -}) -__ai uint64_t vsubd_u64(uint64_t __p0, uint64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vsubd_u64(__p0, __p1); +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_p64(poly64x1_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); return __ret; } -__ai int64_t vsubd_s64(int64_t __p0, int64_t __p1) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vsubd_s64(__p0, __p1); +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_p16(poly16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); return __ret; } -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vsubq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = __p0 - __p1; +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_u8(uint8x8_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); return __ret; } -#else -__ai float64x2_t vsubq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __rev0 - __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_u32(uint32x2_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); return __ret; } -#endif - -__ai float64x1_t vsub_f64(float64x1_t __p0, float64x1_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_u64(uint64x1_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_u16(uint16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_f64(float64x1_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_f32(float32x2_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_f16(float16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_s32(int32x2_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_s64(int64x1_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int8x8_t vreinterpret_s8_s16(int16x4_t __p0) { + int8x8_t __ret; + __ret = (int8x8_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_p8(poly8x8_t __p0) { float64x1_t __ret; - __ret = __p0 - __p1; + __ret = (float64x1_t)(__p0); return __ret; } -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint16x8_t __ret; - __ret = vcombine_u16(__p0, vsubhn_u32(__p1, __p2)); +__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_p64(poly64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); return __ret; } -#else -__ai uint16x8_t vsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { - uint16x8_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = __noswap_vcombine_u16(__rev0, __noswap_vsubhn_u32(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_p16(poly16x4_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { - uint32x4_t __ret; - __ret = vcombine_u32(__p0, vsubhn_u64(__p1, __p2)); +__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_u8(uint8x8_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); return __ret; } -#else -__ai uint32x4_t vsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { - uint32x4_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = __noswap_vcombine_u32(__rev0, __noswap_vsubhn_u64(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_u32(uint32x2_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { - uint8x16_t __ret; - __ret = vcombine_u8(__p0, vsubhn_u16(__p1, __p2)); +__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_u64(uint64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); return __ret; } -#else -__ai uint8x16_t vsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { - uint8x16_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __noswap_vcombine_u8(__rev0, __noswap_vsubhn_u16(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_u16(uint16x4_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int16x8_t __ret; - __ret = vcombine_s16(__p0, vsubhn_s32(__p1, __p2)); +__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_s8(int8x8_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); return __ret; } -#else -__ai int16x8_t vsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { - int16x8_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = __noswap_vcombine_s16(__rev0, __noswap_vsubhn_s32(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_f32(float32x2_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { - int32x4_t __ret; - __ret = vcombine_s32(__p0, vsubhn_s64(__p1, __p2)); +__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_f16(float16x4_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); return __ret; } -#else -__ai int32x4_t vsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { - int32x4_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); - __ret = __noswap_vcombine_s32(__rev0, __noswap_vsubhn_s64(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_s32(int32x2_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int8x16_t __ret; - __ret = vcombine_s8(__p0, vsubhn_s16(__p1, __p2)); +__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_s64(int64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); return __ret; } -#else -__ai int8x16_t vsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { - int8x16_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __noswap_vcombine_s8(__rev0, __noswap_vsubhn_s16(__rev1, __rev2)); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) float64x1_t vreinterpret_f64_s16(int16x4_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vsubl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint16x8_t __ret; - __ret = vmovl_high_u8(__p0) - vmovl_high_u8(__p1); +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_p8(poly8x8_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); return __ret; } -#else -__ai uint16x8_t vsubl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint16x8_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __noswap_vmovl_high_u8(__rev0) - __noswap_vmovl_high_u8(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_p64(poly64x1_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vsubl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint64x2_t __ret; - __ret = vmovl_high_u32(__p0) - vmovl_high_u32(__p1); +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_p16(poly16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); return __ret; } -#else -__ai uint64x2_t vsubl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint64x2_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __noswap_vmovl_high_u32(__rev0) - __noswap_vmovl_high_u32(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_u8(uint8x8_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vsubl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint32x4_t __ret; - __ret = vmovl_high_u16(__p0) - vmovl_high_u16(__p1); +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_u32(uint32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); return __ret; } -#else -__ai uint32x4_t vsubl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint32x4_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __noswap_vmovl_high_u16(__rev0) - __noswap_vmovl_high_u16(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_u64(uint64x1_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vsubl_high_s8(int8x16_t __p0, int8x16_t __p1) { - int16x8_t __ret; - __ret = vmovl_high_s8(__p0) - vmovl_high_s8(__p1); +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_u16(uint16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); return __ret; } -#else -__ai int16x8_t vsubl_high_s8(int8x16_t __p0, int8x16_t __p1) { - int16x8_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __noswap_vmovl_high_s8(__rev0) - __noswap_vmovl_high_s8(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_s8(int8x8_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vsubl_high_s32(int32x4_t __p0, int32x4_t __p1) { - int64x2_t __ret; - __ret = vmovl_high_s32(__p0) - vmovl_high_s32(__p1); +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_f64(float64x1_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); return __ret; } -#else -__ai int64x2_t vsubl_high_s32(int32x4_t __p0, int32x4_t __p1) { - int64x2_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __noswap_vmovl_high_s32(__rev0) - __noswap_vmovl_high_s32(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_f16(float16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vsubl_high_s16(int16x8_t __p0, int16x8_t __p1) { - int32x4_t __ret; - __ret = vmovl_high_s16(__p0) - vmovl_high_s16(__p1); +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_s32(int32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); return __ret; } -#else -__ai int32x4_t vsubl_high_s16(int16x8_t __p0, int16x8_t __p1) { - int32x4_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __noswap_vmovl_high_s16(__rev0) - __noswap_vmovl_high_s16(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_s64(int64x1_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vsubw_high_u8(uint16x8_t __p0, uint8x16_t __p1) { - uint16x8_t __ret; - __ret = __p0 - vmovl_high_u8(__p1); +__ai __attribute__((target("neon"))) float32x2_t vreinterpret_f32_s16(int16x4_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t)(__p0); return __ret; } -#else -__ai uint16x8_t vsubw_high_u8(uint16x8_t __p0, uint8x16_t __p1) { - uint16x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 - __noswap_vmovl_high_u8(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_p8(poly8x8_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vsubw_high_u32(uint64x2_t __p0, uint32x4_t __p1) { - uint64x2_t __ret; - __ret = __p0 - vmovl_high_u32(__p1); +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_p64(poly64x1_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); return __ret; } -#else -__ai uint64x2_t vsubw_high_u32(uint64x2_t __p0, uint32x4_t __p1) { - uint64x2_t __ret; - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 - __noswap_vmovl_high_u32(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_p16(poly16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vsubw_high_u16(uint32x4_t __p0, uint16x8_t __p1) { - uint32x4_t __ret; - __ret = __p0 - vmovl_high_u16(__p1); +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_u8(uint8x8_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); return __ret; } -#else -__ai uint32x4_t vsubw_high_u16(uint32x4_t __p0, uint16x8_t __p1) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 - __noswap_vmovl_high_u16(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_u32(uint32x2_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_u64(uint64x1_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_u16(uint16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_s8(int8x8_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_f64(float64x1_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_f32(float32x2_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_s32(int32x2_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_s64(int64x1_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float16x4_t vreinterpret_f16_s16(int16x4_t __p0) { + float16x4_t __ret; + __ret = (float16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_p8(poly8x8_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_p64(poly64x1_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_p16(poly16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_u8(uint8x8_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_u32(uint32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_u64(uint64x1_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_u16(uint16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_s8(int8x8_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_f64(float64x1_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_f32(float32x2_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_f16(float16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_s64(int64x1_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int32x2_t vreinterpret_s32_s16(int16x4_t __p0) { + int32x2_t __ret; + __ret = (int32x2_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_p8(poly8x8_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_p64(poly64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_p16(poly16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_u8(uint8x8_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); return __ret; } +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_u32(uint32x2_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_u64(uint64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_u16(uint16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_s8(int8x8_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_f64(float64x1_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_f32(float32x2_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_f16(float16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_s32(int32x2_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int64x1_t vreinterpret_s64_s16(int16x4_t __p0) { + int64x1_t __ret; + __ret = (int64x1_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_p8(poly8x8_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_p64(poly64x1_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_p16(poly16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_u8(uint8x8_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_u32(uint32x2_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_u64(uint64x1_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_u16(uint16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_s8(int8x8_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_f64(float64x1_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_f32(float32x2_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_f16(float16x4_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_s32(int32x2_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) int16x4_t vreinterpret_s16_s64(int64x1_t __p0) { + int16x4_t __ret; + __ret = (int16x4_t)(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) uint64_t vrshld_u64(uint64_t __p0, int64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vrshld_u64(__p0, __p1); + return __ret; +} +__ai __attribute__((target("neon"))) int64_t vrshld_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vrshld_s64(__p0, __p1); + return __ret; +} +#define vrshrd_n_u64(__p0, __p1) __extension__ ({ \ + uint64_t __ret; \ + uint64_t __s0 = __p0; \ + __ret = (uint64_t) __builtin_neon_vrshrd_n_u64(__s0, __p1); \ + __ret; \ +}) +#define vrshrd_n_s64(__p0, __p1) __extension__ ({ \ + int64_t __ret; \ + int64_t __s0 = __p0; \ + __ret = (int64_t) __builtin_neon_vrshrd_n_s64(__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_high_n_u32(__p0_724, __p1_724, __p2_724) __extension__ ({ \ + uint16x8_t __ret_724; \ + uint16x4_t __s0_724 = __p0_724; \ + uint32x4_t __s1_724 = __p1_724; \ + __ret_724 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_724), (uint16x4_t)(vrshrn_n_u32(__s1_724, __p2_724)))); \ + __ret_724; \ +}) +#else +#define vrshrn_high_n_u32(__p0_725, __p1_725, __p2_725) __extension__ ({ \ + uint16x8_t __ret_725; \ + uint16x4_t __s0_725 = __p0_725; \ + uint32x4_t __s1_725 = __p1_725; \ + uint16x4_t __rev0_725; __rev0_725 = __builtin_shufflevector(__s0_725, __s0_725, 3, 2, 1, 0); \ + uint32x4_t __rev1_725; __rev1_725 = __builtin_shufflevector(__s1_725, __s1_725, 3, 2, 1, 0); \ + __ret_725 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_725), (uint16x4_t)(__noswap_vrshrn_n_u32(__rev1_725, __p2_725)))); \ + __ret_725 = __builtin_shufflevector(__ret_725, __ret_725, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_725; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vsubw_high_s8(int16x8_t __p0, int8x16_t __p1) { - int16x8_t __ret; - __ret = __p0 - vmovl_high_s8(__p1); - return __ret; -} +#define vrshrn_high_n_u64(__p0_726, __p1_726, __p2_726) __extension__ ({ \ + uint32x4_t __ret_726; \ + uint32x2_t __s0_726 = __p0_726; \ + uint64x2_t __s1_726 = __p1_726; \ + __ret_726 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_726), (uint32x2_t)(vrshrn_n_u64(__s1_726, __p2_726)))); \ + __ret_726; \ +}) +#else +#define vrshrn_high_n_u64(__p0_727, __p1_727, __p2_727) __extension__ ({ \ + uint32x4_t __ret_727; \ + uint32x2_t __s0_727 = __p0_727; \ + uint64x2_t __s1_727 = __p1_727; \ + uint32x2_t __rev0_727; __rev0_727 = __builtin_shufflevector(__s0_727, __s0_727, 1, 0); \ + uint64x2_t __rev1_727; __rev1_727 = __builtin_shufflevector(__s1_727, __s1_727, 1, 0); \ + __ret_727 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_727), (uint32x2_t)(__noswap_vrshrn_n_u64(__rev1_727, __p2_727)))); \ + __ret_727 = __builtin_shufflevector(__ret_727, __ret_727, 3, 2, 1, 0); \ + __ret_727; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vrshrn_high_n_u16(__p0_728, __p1_728, __p2_728) __extension__ ({ \ + uint8x16_t __ret_728; \ + uint8x8_t __s0_728 = __p0_728; \ + uint16x8_t __s1_728 = __p1_728; \ + __ret_728 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_728), (uint8x8_t)(vrshrn_n_u16(__s1_728, __p2_728)))); \ + __ret_728; \ +}) #else -__ai int16x8_t vsubw_high_s8(int16x8_t __p0, int8x16_t __p1) { - int16x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 - __noswap_vmovl_high_s8(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vrshrn_high_n_u16(__p0_729, __p1_729, __p2_729) __extension__ ({ \ + uint8x16_t __ret_729; \ + uint8x8_t __s0_729 = __p0_729; \ + uint16x8_t __s1_729 = __p1_729; \ + uint8x8_t __rev0_729; __rev0_729 = __builtin_shufflevector(__s0_729, __s0_729, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_729; __rev1_729 = __builtin_shufflevector(__s1_729, __s1_729, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_729 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_729), (uint8x8_t)(__noswap_vrshrn_n_u16(__rev1_729, __p2_729)))); \ + __ret_729 = __builtin_shufflevector(__ret_729, __ret_729, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_729; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vsubw_high_s32(int64x2_t __p0, int32x4_t __p1) { - int64x2_t __ret; - __ret = __p0 - vmovl_high_s32(__p1); - return __ret; -} +#define vrshrn_high_n_s32(__p0_730, __p1_730, __p2_730) __extension__ ({ \ + int16x8_t __ret_730; \ + int16x4_t __s0_730 = __p0_730; \ + int32x4_t __s1_730 = __p1_730; \ + __ret_730 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_730), (int16x4_t)(vrshrn_n_s32(__s1_730, __p2_730)))); \ + __ret_730; \ +}) #else -__ai int64x2_t vsubw_high_s32(int64x2_t __p0, int32x4_t __p1) { - int64x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 - __noswap_vmovl_high_s32(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vrshrn_high_n_s32(__p0_731, __p1_731, __p2_731) __extension__ ({ \ + int16x8_t __ret_731; \ + int16x4_t __s0_731 = __p0_731; \ + int32x4_t __s1_731 = __p1_731; \ + int16x4_t __rev0_731; __rev0_731 = __builtin_shufflevector(__s0_731, __s0_731, 3, 2, 1, 0); \ + int32x4_t __rev1_731; __rev1_731 = __builtin_shufflevector(__s1_731, __s1_731, 3, 2, 1, 0); \ + __ret_731 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_731), (int16x4_t)(__noswap_vrshrn_n_s32(__rev1_731, __p2_731)))); \ + __ret_731 = __builtin_shufflevector(__ret_731, __ret_731, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_731; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vsubw_high_s16(int32x4_t __p0, int16x8_t __p1) { - int32x4_t __ret; - __ret = __p0 - vmovl_high_s16(__p1); - return __ret; -} +#define vrshrn_high_n_s64(__p0_732, __p1_732, __p2_732) __extension__ ({ \ + int32x4_t __ret_732; \ + int32x2_t __s0_732 = __p0_732; \ + int64x2_t __s1_732 = __p1_732; \ + __ret_732 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_732), (int32x2_t)(vrshrn_n_s64(__s1_732, __p2_732)))); \ + __ret_732; \ +}) #else -__ai int32x4_t vsubw_high_s16(int32x4_t __p0, int16x8_t __p1) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 - __noswap_vmovl_high_s16(__rev1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vrshrn_high_n_s64(__p0_733, __p1_733, __p2_733) __extension__ ({ \ + int32x4_t __ret_733; \ + int32x2_t __s0_733 = __p0_733; \ + int64x2_t __s1_733 = __p1_733; \ + int32x2_t __rev0_733; __rev0_733 = __builtin_shufflevector(__s0_733, __s0_733, 1, 0); \ + int64x2_t __rev1_733; __rev1_733 = __builtin_shufflevector(__s1_733, __s1_733, 1, 0); \ + __ret_733 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_733), (int32x2_t)(__noswap_vrshrn_n_s64(__rev1_733, __p2_733)))); \ + __ret_733 = __builtin_shufflevector(__ret_733, __ret_733, 3, 2, 1, 0); \ + __ret_733; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vtrn1_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); - return __ret; -} +#define vrshrn_high_n_s16(__p0_734, __p1_734, __p2_734) __extension__ ({ \ + int8x16_t __ret_734; \ + int8x8_t __s0_734 = __p0_734; \ + int16x8_t __s1_734 = __p1_734; \ + __ret_734 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_734), (int8x8_t)(vrshrn_n_s16(__s1_734, __p2_734)))); \ + __ret_734; \ +}) #else -__ai poly8x8_t vtrn1_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly8x8_t __ret; - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vrshrn_high_n_s16(__p0_735, __p1_735, __p2_735) __extension__ ({ \ + int8x16_t __ret_735; \ + int8x8_t __s0_735 = __p0_735; \ + int16x8_t __s1_735 = __p1_735; \ + int8x8_t __rev0_735; __rev0_735 = __builtin_shufflevector(__s0_735, __s0_735, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_735; __rev1_735 = __builtin_shufflevector(__s1_735, __s1_735, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_735 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_735), (int8x8_t)(__noswap_vrshrn_n_s16(__rev1_735, __p2_735)))); \ + __ret_735 = __builtin_shufflevector(__ret_735, __ret_735, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_735; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vtrn1_p16(poly16x4_t __p0, poly16x4_t __p1) { - poly16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); +__ai __attribute__((target("neon"))) float64x2_t vrsqrteq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrsqrteq_v((int8x16_t)__p0, 42); return __ret; } #else -__ai poly16x4_t vtrn1_p16(poly16x4_t __p0, poly16x4_t __p1) { - poly16x4_t __ret; - poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) float64x2_t vrsqrteq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrsqrteq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif -#ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vtrn1q_p8(poly8x16_t __p0, poly8x16_t __p1) { - poly8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); +__ai __attribute__((target("neon"))) float64x1_t vrsqrte_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrsqrte_v((int8x8_t)__p0, 10); return __ret; } -#else -__ai poly8x16_t vtrn1q_p8(poly8x16_t __p0, poly8x16_t __p1) { - poly8x16_t __ret; - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) float64_t vrsqrted_f64(float64_t __p0) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vrsqrted_f64(__p0); + return __ret; +} +__ai __attribute__((target("neon"))) float32_t vrsqrtes_f32(float32_t __p0) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vrsqrtes_f32(__p0); return __ret; } -#endif - #ifdef __LITTLE_ENDIAN__ -__ai poly64x2_t vtrn1q_p64(poly64x2_t __p0, poly64x2_t __p1) { - poly64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2); +__ai __attribute__((target("neon"))) float64x2_t vrsqrtsq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); return __ret; } #else -__ai poly64x2_t vtrn1q_p64(poly64x2_t __p0, poly64x2_t __p1) { - poly64x2_t __ret; - poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); +__ai __attribute__((target("neon"))) float64x2_t vrsqrtsq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (float64x2_t) __builtin_neon_vrsqrtsq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif -#ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vtrn1q_p16(poly16x8_t __p0, poly16x8_t __p1) { - poly16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); +__ai __attribute__((target("neon"))) float64x1_t vrsqrts_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vrsqrts_v((int8x8_t)__p0, (int8x8_t)__p1, 10); return __ret; } -#else -__ai poly16x8_t vtrn1q_p16(poly16x8_t __p0, poly16x8_t __p1) { - poly16x8_t __ret; - poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) float64_t vrsqrtsd_f64(float64_t __p0, float64_t __p1) { + float64_t __ret; + __ret = (float64_t) __builtin_neon_vrsqrtsd_f64(__p0, __p1); return __ret; } -#endif - +__ai __attribute__((target("neon"))) float32_t vrsqrtss_f32(float32_t __p0, float32_t __p1) { + float32_t __ret; + __ret = (float32_t) __builtin_neon_vrsqrtss_f32(__p0, __p1); + return __ret; +} +#define vrsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64_t __ret; \ + uint64_t __s0 = __p0; \ + uint64_t __s1 = __p1; \ + __ret = (uint64_t) __builtin_neon_vrsrad_n_u64(__s0, __s1, __p2); \ + __ret; \ +}) +#define vrsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64_t __ret; \ + int64_t __s0 = __p0; \ + int64_t __s1 = __p1; \ + __ret = (int64_t) __builtin_neon_vrsrad_n_s64(__s0, __s1, __p2); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vtrn1q_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); +__ai __attribute__((target("neon"))) uint16x8_t vrsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint16x8_t __ret; + __ret = vcombine_u16(__p0, vrsubhn_u32(__p1, __p2)); return __ret; } #else -__ai uint8x16_t vtrn1q_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint16x8_t vrsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint16x8_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vcombine_u16(__rev0, __noswap_vrsubhn_u32(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vtrn1q_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vrsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); + __ret = vcombine_u32(__p0, vrsubhn_u64(__p1, __p2)); return __ret; } #else -__ai uint32x4_t vtrn1q_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vrsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __noswap_vcombine_u32(__rev0, __noswap_vrsubhn_u64(__rev1, __rev2)); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vtrn1q_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2); +__ai __attribute__((target("neon"))) uint8x16_t vrsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint8x16_t __ret; + __ret = vcombine_u8(__p0, vrsubhn_u16(__p1, __p2)); return __ret; } #else -__ai uint64x2_t vtrn1q_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) uint8x16_t vrsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint8x16_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcombine_u8(__rev0, __noswap_vrsubhn_u16(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vtrn1q_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); +__ai __attribute__((target("neon"))) int16x8_t vrsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int16x8_t __ret; + __ret = vcombine_s16(__p0, vrsubhn_s32(__p1, __p2)); return __ret; } #else -__ai uint16x8_t vtrn1q_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); +__ai __attribute__((target("neon"))) int16x8_t vrsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int16x8_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vcombine_s16(__rev0, __noswap_vrsubhn_s32(__rev1, __rev2)); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vtrn1q_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); +__ai __attribute__((target("neon"))) int32x4_t vrsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int32x4_t __ret; + __ret = vcombine_s32(__p0, vrsubhn_s64(__p1, __p2)); return __ret; } #else -__ai int8x16_t vtrn1q_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int32x4_t vrsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int32x4_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __noswap_vcombine_s32(__rev0, __noswap_vrsubhn_s64(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vtrn1q_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2); +__ai __attribute__((target("neon"))) int8x16_t vrsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int8x16_t __ret; + __ret = vcombine_s8(__p0, vrsubhn_s16(__p1, __p2)); return __ret; } #else -__ai float64x2_t vtrn1q_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) int8x16_t vrsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int8x16_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcombine_s8(__rev0, __noswap_vrsubhn_s16(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif +#define vset_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1_t __ret; \ + poly64_t __s0 = __p0; \ + poly64x1_t __s1 = __p1; \ + __ret = (poly64x1_t) __builtin_neon_vset_lane_i64(__s0, (poly64x1_t)__s1, __p2); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vtrn1q_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); - return __ret; -} +#define vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __ret; \ + poly64_t __s0 = __p0; \ + poly64x2_t __s1 = __p1; \ + __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (poly64x2_t)__s1, __p2); \ + __ret; \ +}) #else -__ai float32x4_t vtrn1q_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __ret; \ + poly64_t __s0 = __p0; \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (poly64x2_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __ret; \ + poly64_t __s0 = __p0; \ + poly64x2_t __s1 = __p1; \ + __ret = (poly64x2_t) __builtin_neon_vsetq_lane_i64(__s0, (poly64x2_t)__s1, __p2); \ + __ret; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vtrn1q_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); +#define vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __ret; \ + float64_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (float64x2_t)__s1, __p2); \ + __ret; \ +}) +#else +#define vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __ret; \ + float64_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (float64x2_t)__rev1, __p2); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) +#define __noswap_vsetq_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __ret; \ + float64_t __s0 = __p0; \ + float64x2_t __s1 = __p1; \ + __ret = (float64x2_t) __builtin_neon_vsetq_lane_f64(__s0, (float64x2_t)__s1, __p2); \ + __ret; \ +}) +#endif + +#define vset_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1_t __ret; \ + float64_t __s0 = __p0; \ + float64x1_t __s1 = __p1; \ + __ret = (float64x1_t) __builtin_neon_vset_lane_f64(__s0, (float64x1_t)__s1, __p2); \ + __ret; \ +}) +__ai __attribute__((target("neon"))) uint64_t vshld_u64(uint64_t __p0, int64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vshld_u64(__p0, __p1); return __ret; } -#else -__ai int32x4_t vtrn1q_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int64_t vshld_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vshld_s64(__p0, __p1); return __ret; } +#define vshld_n_u64(__p0, __p1) __extension__ ({ \ + uint64_t __ret; \ + uint64_t __s0 = __p0; \ + __ret = (uint64_t) __builtin_neon_vshld_n_u64(__s0, __p1); \ + __ret; \ +}) +#define vshld_n_s64(__p0, __p1) __extension__ ({ \ + int64_t __ret; \ + int64_t __s0 = __p0; \ + __ret = (int64_t) __builtin_neon_vshld_n_s64(__s0, __p1); \ + __ret; \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vshll_high_n_u8(__p0_736, __p1_736) __extension__ ({ \ + uint16x8_t __ret_736; \ + uint8x16_t __s0_736 = __p0_736; \ + __ret_736 = (uint16x8_t)(vshll_n_u8(vget_high_u8(__s0_736), __p1_736)); \ + __ret_736; \ +}) +#else +#define vshll_high_n_u8(__p0_737, __p1_737) __extension__ ({ \ + uint16x8_t __ret_737; \ + uint8x16_t __s0_737 = __p0_737; \ + uint8x16_t __rev0_737; __rev0_737 = __builtin_shufflevector(__s0_737, __s0_737, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_737 = (uint16x8_t)(__noswap_vshll_n_u8(__noswap_vget_high_u8(__rev0_737), __p1_737)); \ + __ret_737 = __builtin_shufflevector(__ret_737, __ret_737, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_737; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vtrn1q_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2); - return __ret; -} +#define vshll_high_n_u32(__p0_738, __p1_738) __extension__ ({ \ + uint64x2_t __ret_738; \ + uint32x4_t __s0_738 = __p0_738; \ + __ret_738 = (uint64x2_t)(vshll_n_u32(vget_high_u32(__s0_738), __p1_738)); \ + __ret_738; \ +}) #else -__ai int64x2_t vtrn1q_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vshll_high_n_u32(__p0_739, __p1_739) __extension__ ({ \ + uint64x2_t __ret_739; \ + uint32x4_t __s0_739 = __p0_739; \ + uint32x4_t __rev0_739; __rev0_739 = __builtin_shufflevector(__s0_739, __s0_739, 3, 2, 1, 0); \ + __ret_739 = (uint64x2_t)(__noswap_vshll_n_u32(__noswap_vget_high_u32(__rev0_739), __p1_739)); \ + __ret_739 = __builtin_shufflevector(__ret_739, __ret_739, 1, 0); \ + __ret_739; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vtrn1q_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); - return __ret; -} +#define vshll_high_n_u16(__p0_740, __p1_740) __extension__ ({ \ + uint32x4_t __ret_740; \ + uint16x8_t __s0_740 = __p0_740; \ + __ret_740 = (uint32x4_t)(vshll_n_u16(vget_high_u16(__s0_740), __p1_740)); \ + __ret_740; \ +}) #else -__ai int16x8_t vtrn1q_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vshll_high_n_u16(__p0_741, __p1_741) __extension__ ({ \ + uint32x4_t __ret_741; \ + uint16x8_t __s0_741 = __p0_741; \ + uint16x8_t __rev0_741; __rev0_741 = __builtin_shufflevector(__s0_741, __s0_741, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_741 = (uint32x4_t)(__noswap_vshll_n_u16(__noswap_vget_high_u16(__rev0_741), __p1_741)); \ + __ret_741 = __builtin_shufflevector(__ret_741, __ret_741, 3, 2, 1, 0); \ + __ret_741; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vtrn1_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); - return __ret; -} +#define vshll_high_n_s8(__p0_742, __p1_742) __extension__ ({ \ + int16x8_t __ret_742; \ + int8x16_t __s0_742 = __p0_742; \ + __ret_742 = (int16x8_t)(vshll_n_s8(vget_high_s8(__s0_742), __p1_742)); \ + __ret_742; \ +}) #else -__ai uint8x8_t vtrn1_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vshll_high_n_s8(__p0_743, __p1_743) __extension__ ({ \ + int16x8_t __ret_743; \ + int8x16_t __s0_743 = __p0_743; \ + int8x16_t __rev0_743; __rev0_743 = __builtin_shufflevector(__s0_743, __s0_743, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_743 = (int16x8_t)(__noswap_vshll_n_s8(__noswap_vget_high_s8(__rev0_743), __p1_743)); \ + __ret_743 = __builtin_shufflevector(__ret_743, __ret_743, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_743; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vtrn1_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2); - return __ret; -} +#define vshll_high_n_s32(__p0_744, __p1_744) __extension__ ({ \ + int64x2_t __ret_744; \ + int32x4_t __s0_744 = __p0_744; \ + __ret_744 = (int64x2_t)(vshll_n_s32(vget_high_s32(__s0_744), __p1_744)); \ + __ret_744; \ +}) #else -__ai uint32x2_t vtrn1_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vshll_high_n_s32(__p0_745, __p1_745) __extension__ ({ \ + int64x2_t __ret_745; \ + int32x4_t __s0_745 = __p0_745; \ + int32x4_t __rev0_745; __rev0_745 = __builtin_shufflevector(__s0_745, __s0_745, 3, 2, 1, 0); \ + __ret_745 = (int64x2_t)(__noswap_vshll_n_s32(__noswap_vget_high_s32(__rev0_745), __p1_745)); \ + __ret_745 = __builtin_shufflevector(__ret_745, __ret_745, 1, 0); \ + __ret_745; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vtrn1_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); - return __ret; -} +#define vshll_high_n_s16(__p0_746, __p1_746) __extension__ ({ \ + int32x4_t __ret_746; \ + int16x8_t __s0_746 = __p0_746; \ + __ret_746 = (int32x4_t)(vshll_n_s16(vget_high_s16(__s0_746), __p1_746)); \ + __ret_746; \ +}) #else -__ai uint16x4_t vtrn1_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vshll_high_n_s16(__p0_747, __p1_747) __extension__ ({ \ + int32x4_t __ret_747; \ + int16x8_t __s0_747 = __p0_747; \ + int16x8_t __rev0_747; __rev0_747 = __builtin_shufflevector(__s0_747, __s0_747, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_747 = (int32x4_t)(__noswap_vshll_n_s16(__noswap_vget_high_s16(__rev0_747), __p1_747)); \ + __ret_747 = __builtin_shufflevector(__ret_747, __ret_747, 3, 2, 1, 0); \ + __ret_747; \ +}) #endif +#define vshrd_n_u64(__p0, __p1) __extension__ ({ \ + uint64_t __ret; \ + uint64_t __s0 = __p0; \ + __ret = (uint64_t) __builtin_neon_vshrd_n_u64(__s0, __p1); \ + __ret; \ +}) +#define vshrd_n_s64(__p0, __p1) __extension__ ({ \ + int64_t __ret; \ + int64_t __s0 = __p0; \ + __ret = (int64_t) __builtin_neon_vshrd_n_s64(__s0, __p1); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vtrn1_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); - return __ret; -} +#define vshrn_high_n_u32(__p0_748, __p1_748, __p2_748) __extension__ ({ \ + uint16x8_t __ret_748; \ + uint16x4_t __s0_748 = __p0_748; \ + uint32x4_t __s1_748 = __p1_748; \ + __ret_748 = (uint16x8_t)(vcombine_u16((uint16x4_t)(__s0_748), (uint16x4_t)(vshrn_n_u32(__s1_748, __p2_748)))); \ + __ret_748; \ +}) #else -__ai int8x8_t vtrn1_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vshrn_high_n_u32(__p0_749, __p1_749, __p2_749) __extension__ ({ \ + uint16x8_t __ret_749; \ + uint16x4_t __s0_749 = __p0_749; \ + uint32x4_t __s1_749 = __p1_749; \ + uint16x4_t __rev0_749; __rev0_749 = __builtin_shufflevector(__s0_749, __s0_749, 3, 2, 1, 0); \ + uint32x4_t __rev1_749; __rev1_749 = __builtin_shufflevector(__s1_749, __s1_749, 3, 2, 1, 0); \ + __ret_749 = (uint16x8_t)(__noswap_vcombine_u16((uint16x4_t)(__rev0_749), (uint16x4_t)(__noswap_vshrn_n_u32(__rev1_749, __p2_749)))); \ + __ret_749 = __builtin_shufflevector(__ret_749, __ret_749, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_749; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vtrn1_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2); - return __ret; -} +#define vshrn_high_n_u64(__p0_750, __p1_750, __p2_750) __extension__ ({ \ + uint32x4_t __ret_750; \ + uint32x2_t __s0_750 = __p0_750; \ + uint64x2_t __s1_750 = __p1_750; \ + __ret_750 = (uint32x4_t)(vcombine_u32((uint32x2_t)(__s0_750), (uint32x2_t)(vshrn_n_u64(__s1_750, __p2_750)))); \ + __ret_750; \ +}) #else -__ai float32x2_t vtrn1_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vshrn_high_n_u64(__p0_751, __p1_751, __p2_751) __extension__ ({ \ + uint32x4_t __ret_751; \ + uint32x2_t __s0_751 = __p0_751; \ + uint64x2_t __s1_751 = __p1_751; \ + uint32x2_t __rev0_751; __rev0_751 = __builtin_shufflevector(__s0_751, __s0_751, 1, 0); \ + uint64x2_t __rev1_751; __rev1_751 = __builtin_shufflevector(__s1_751, __s1_751, 1, 0); \ + __ret_751 = (uint32x4_t)(__noswap_vcombine_u32((uint32x2_t)(__rev0_751), (uint32x2_t)(__noswap_vshrn_n_u64(__rev1_751, __p2_751)))); \ + __ret_751 = __builtin_shufflevector(__ret_751, __ret_751, 3, 2, 1, 0); \ + __ret_751; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vtrn1_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2); - return __ret; -} +#define vshrn_high_n_u16(__p0_752, __p1_752, __p2_752) __extension__ ({ \ + uint8x16_t __ret_752; \ + uint8x8_t __s0_752 = __p0_752; \ + uint16x8_t __s1_752 = __p1_752; \ + __ret_752 = (uint8x16_t)(vcombine_u8((uint8x8_t)(__s0_752), (uint8x8_t)(vshrn_n_u16(__s1_752, __p2_752)))); \ + __ret_752; \ +}) #else -__ai int32x2_t vtrn1_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vshrn_high_n_u16(__p0_753, __p1_753, __p2_753) __extension__ ({ \ + uint8x16_t __ret_753; \ + uint8x8_t __s0_753 = __p0_753; \ + uint16x8_t __s1_753 = __p1_753; \ + uint8x8_t __rev0_753; __rev0_753 = __builtin_shufflevector(__s0_753, __s0_753, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint16x8_t __rev1_753; __rev1_753 = __builtin_shufflevector(__s1_753, __s1_753, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_753 = (uint8x16_t)(__noswap_vcombine_u8((uint8x8_t)(__rev0_753), (uint8x8_t)(__noswap_vshrn_n_u16(__rev1_753, __p2_753)))); \ + __ret_753 = __builtin_shufflevector(__ret_753, __ret_753, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_753; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vtrn1_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); - return __ret; -} +#define vshrn_high_n_s32(__p0_754, __p1_754, __p2_754) __extension__ ({ \ + int16x8_t __ret_754; \ + int16x4_t __s0_754 = __p0_754; \ + int32x4_t __s1_754 = __p1_754; \ + __ret_754 = (int16x8_t)(vcombine_s16((int16x4_t)(__s0_754), (int16x4_t)(vshrn_n_s32(__s1_754, __p2_754)))); \ + __ret_754; \ +}) #else -__ai int16x4_t vtrn1_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vshrn_high_n_s32(__p0_755, __p1_755, __p2_755) __extension__ ({ \ + int16x8_t __ret_755; \ + int16x4_t __s0_755 = __p0_755; \ + int32x4_t __s1_755 = __p1_755; \ + int16x4_t __rev0_755; __rev0_755 = __builtin_shufflevector(__s0_755, __s0_755, 3, 2, 1, 0); \ + int32x4_t __rev1_755; __rev1_755 = __builtin_shufflevector(__s1_755, __s1_755, 3, 2, 1, 0); \ + __ret_755 = (int16x8_t)(__noswap_vcombine_s16((int16x4_t)(__rev0_755), (int16x4_t)(__noswap_vshrn_n_s32(__rev1_755, __p2_755)))); \ + __ret_755 = __builtin_shufflevector(__ret_755, __ret_755, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_755; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vtrn2_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); - return __ret; -} +#define vshrn_high_n_s64(__p0_756, __p1_756, __p2_756) __extension__ ({ \ + int32x4_t __ret_756; \ + int32x2_t __s0_756 = __p0_756; \ + int64x2_t __s1_756 = __p1_756; \ + __ret_756 = (int32x4_t)(vcombine_s32((int32x2_t)(__s0_756), (int32x2_t)(vshrn_n_s64(__s1_756, __p2_756)))); \ + __ret_756; \ +}) #else -__ai poly8x8_t vtrn2_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly8x8_t __ret; - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vshrn_high_n_s64(__p0_757, __p1_757, __p2_757) __extension__ ({ \ + int32x4_t __ret_757; \ + int32x2_t __s0_757 = __p0_757; \ + int64x2_t __s1_757 = __p1_757; \ + int32x2_t __rev0_757; __rev0_757 = __builtin_shufflevector(__s0_757, __s0_757, 1, 0); \ + int64x2_t __rev1_757; __rev1_757 = __builtin_shufflevector(__s1_757, __s1_757, 1, 0); \ + __ret_757 = (int32x4_t)(__noswap_vcombine_s32((int32x2_t)(__rev0_757), (int32x2_t)(__noswap_vshrn_n_s64(__rev1_757, __p2_757)))); \ + __ret_757 = __builtin_shufflevector(__ret_757, __ret_757, 3, 2, 1, 0); \ + __ret_757; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vtrn2_p16(poly16x4_t __p0, poly16x4_t __p1) { - poly16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); - return __ret; -} +#define vshrn_high_n_s16(__p0_758, __p1_758, __p2_758) __extension__ ({ \ + int8x16_t __ret_758; \ + int8x8_t __s0_758 = __p0_758; \ + int16x8_t __s1_758 = __p1_758; \ + __ret_758 = (int8x16_t)(vcombine_s8((int8x8_t)(__s0_758), (int8x8_t)(vshrn_n_s16(__s1_758, __p2_758)))); \ + __ret_758; \ +}) #else -__ai poly16x4_t vtrn2_p16(poly16x4_t __p0, poly16x4_t __p1) { - poly16x4_t __ret; - poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vshrn_high_n_s16(__p0_759, __p1_759, __p2_759) __extension__ ({ \ + int8x16_t __ret_759; \ + int8x8_t __s0_759 = __p0_759; \ + int16x8_t __s1_759 = __p1_759; \ + int8x8_t __rev0_759; __rev0_759 = __builtin_shufflevector(__s0_759, __s0_759, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_759; __rev1_759 = __builtin_shufflevector(__s1_759, __s1_759, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_759 = (int8x16_t)(__noswap_vcombine_s8((int8x8_t)(__rev0_759), (int8x8_t)(__noswap_vshrn_n_s16(__rev1_759, __p2_759)))); \ + __ret_759 = __builtin_shufflevector(__ret_759, __ret_759, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_759; \ +}) #endif +#define vslid_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64_t __ret; \ + uint64_t __s0 = __p0; \ + uint64_t __s1 = __p1; \ + __ret = (uint64_t) __builtin_neon_vslid_n_u64(__s0, __s1, __p2); \ + __ret; \ +}) +#define vslid_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64_t __ret; \ + int64_t __s0 = __p0; \ + int64_t __s1 = __p1; \ + __ret = (int64_t) __builtin_neon_vslid_n_s64(__s0, __s1, __p2); \ + __ret; \ +}) +#define vsli_n_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1_t __ret; \ + poly64x1_t __s0 = __p0; \ + poly64x1_t __s1 = __p1; \ + __ret = (poly64x1_t) __builtin_neon_vsli_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vtrn2q_p8(poly8x16_t __p0, poly8x16_t __p1) { - poly8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); - return __ret; -} +#define vsliq_n_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __ret; \ + poly64x2_t __s0 = __p0; \ + poly64x2_t __s1 = __p1; \ + __ret = (poly64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \ + __ret; \ +}) #else -__ai poly8x16_t vtrn2q_p8(poly8x16_t __p0, poly8x16_t __p1) { - poly8x16_t __ret; - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vsliq_n_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __ret; \ + poly64x2_t __s0 = __p0; \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (poly64x2_t) __builtin_neon_vsliq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) #endif -#ifdef __LITTLE_ENDIAN__ -__ai poly64x2_t vtrn2q_p64(poly64x2_t __p0, poly64x2_t __p1) { - poly64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3); +__ai __attribute__((target("neon"))) uint8_t vsqaddb_u8(uint8_t __p0, int8_t __p1) { + uint8_t __ret; + __ret = (uint8_t) __builtin_neon_vsqaddb_u8(__p0, __p1); return __ret; } -#else -__ai poly64x2_t vtrn2q_p64(poly64x2_t __p0, poly64x2_t __p1) { - poly64x2_t __ret; - poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) uint32_t vsqadds_u32(uint32_t __p0, int32_t __p1) { + uint32_t __ret; + __ret = (uint32_t) __builtin_neon_vsqadds_u32(__p0, __p1); return __ret; } -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vtrn2q_p16(poly16x8_t __p0, poly16x8_t __p1) { - poly16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); +__ai __attribute__((target("neon"))) uint64_t vsqaddd_u64(uint64_t __p0, int64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vsqaddd_u64(__p0, __p1); return __ret; } -#else -__ai poly16x8_t vtrn2q_p16(poly16x8_t __p0, poly16x8_t __p1) { - poly16x8_t __ret; - poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint16_t vsqaddh_u16(uint16_t __p0, int16_t __p1) { + uint16_t __ret; + __ret = (uint16_t) __builtin_neon_vsqaddh_u16(__p0, __p1); return __ret; } -#endif - #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vtrn2q_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vsqaddq_u8(uint8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); + __ret = (uint8x16_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 48); return __ret; } #else -__ai uint8x16_t vtrn2q_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vsqaddq_u8(uint8x16_t __p0, int8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x16_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 48); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vtrn2q_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vsqaddq_u32(uint32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); + __ret = (uint32x4_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else -__ai uint32x4_t vtrn2q_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vsqaddq_u32(uint32x4_t __p0, int32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint32x4_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 50); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vtrn2q_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vsqaddq_u64(uint64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + __ret = (uint64x2_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); return __ret; } #else -__ai uint64x2_t vtrn2q_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vsqaddq_u64(uint64x2_t __p0, int64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vtrn2q_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vsqaddq_u16(uint16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); + __ret = (uint16x8_t) __builtin_neon_vsqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 49); return __ret; } #else -__ai uint16x8_t vtrn2q_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vsqaddq_u16(uint16x8_t __p0, int16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint16x8_t) __builtin_neon_vsqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 49); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vtrn2q_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); +__ai __attribute__((target("neon"))) uint8x8_t vsqadd_u8(uint8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + __ret = (uint8x8_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 16); return __ret; } #else -__ai int8x16_t vtrn2q_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint8x8_t vsqadd_u8(uint8x8_t __p0, int8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (uint8x8_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 16); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vtrn2q_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3); +__ai __attribute__((target("neon"))) uint32x2_t vsqadd_u32(uint32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + __ret = (uint32x2_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 18); return __ret; } #else -__ai float64x2_t vtrn2q_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); +__ai __attribute__((target("neon"))) uint32x2_t vsqadd_u32(uint32x2_t __p0, int32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint32x2_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 18); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif +__ai __attribute__((target("neon"))) uint64x1_t vsqadd_u64(uint64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 19); + return __ret; +} #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vtrn2q_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); +__ai __attribute__((target("neon"))) uint16x4_t vsqadd_u16(uint16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + __ret = (uint16x4_t) __builtin_neon_vsqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 17); return __ret; } #else -__ai float32x4_t vtrn2q_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); +__ai __attribute__((target("neon"))) uint16x4_t vsqadd_u16(uint16x4_t __p0, int16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (uint16x4_t) __builtin_neon_vsqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 17); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vtrn2q_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); +__ai __attribute__((target("neon"))) float64x2_t vsqrtq_f64(float64x2_t __p0) { + float64x2_t __ret; + __ret = (float64x2_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 42); return __ret; } #else -__ai int32x4_t vtrn2q_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) float64x2_t vsqrtq_f64(float64x2_t __p0) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float64x2_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 42); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vtrn2q_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3); +__ai __attribute__((target("neon"))) float32x4_t vsqrtq_f32(float32x4_t __p0) { + float32x4_t __ret; + __ret = (float32x4_t) __builtin_neon_vsqrtq_v((int8x16_t)__p0, 41); return __ret; } #else -__ai int64x2_t vtrn2q_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) float32x4_t vsqrtq_f32(float32x4_t __p0) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + __ret = (float32x4_t) __builtin_neon_vsqrtq_v((int8x16_t)__rev0, 41); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif +__ai __attribute__((target("neon"))) float64x1_t vsqrt_f64(float64x1_t __p0) { + float64x1_t __ret; + __ret = (float64x1_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 10); + return __ret; +} #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vtrn2q_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); +__ai __attribute__((target("neon"))) float32x2_t vsqrt_f32(float32x2_t __p0) { + float32x2_t __ret; + __ret = (float32x2_t) __builtin_neon_vsqrt_v((int8x8_t)__p0, 9); return __ret; } #else -__ai int16x8_t vtrn2q_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) float32x2_t vsqrt_f32(float32x2_t __p0) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + __ret = (float32x2_t) __builtin_neon_vsqrt_v((int8x8_t)__rev0, 9); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif +#define vsrad_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64_t __ret; \ + uint64_t __s0 = __p0; \ + uint64_t __s1 = __p1; \ + __ret = (uint64_t) __builtin_neon_vsrad_n_u64(__s0, __s1, __p2); \ + __ret; \ +}) +#define vsrad_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64_t __ret; \ + int64_t __s0 = __p0; \ + int64_t __s1 = __p1; \ + __ret = (int64_t) __builtin_neon_vsrad_n_s64(__s0, __s1, __p2); \ + __ret; \ +}) +#define vsrid_n_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64_t __ret; \ + uint64_t __s0 = __p0; \ + uint64_t __s1 = __p1; \ + __ret = (uint64_t) __builtin_neon_vsrid_n_u64(__s0, __s1, __p2); \ + __ret; \ +}) +#define vsrid_n_s64(__p0, __p1, __p2) __extension__ ({ \ + int64_t __ret; \ + int64_t __s0 = __p0; \ + int64_t __s1 = __p1; \ + __ret = (int64_t) __builtin_neon_vsrid_n_s64(__s0, __s1, __p2); \ + __ret; \ +}) +#define vsri_n_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1_t __ret; \ + poly64x1_t __s0 = __p0; \ + poly64x1_t __s1 = __p1; \ + __ret = (poly64x1_t) __builtin_neon_vsri_n_v((int8x8_t)__s0, (int8x8_t)__s1, __p2, 6); \ + __ret; \ +}) #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vtrn2_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); - return __ret; -} +#define vsriq_n_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __ret; \ + poly64x2_t __s0 = __p0; \ + poly64x2_t __s1 = __p1; \ + __ret = (poly64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__s0, (int8x16_t)__s1, __p2, 38); \ + __ret; \ +}) #else -__ai uint8x8_t vtrn2_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vsriq_n_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __ret; \ + poly64x2_t __s0 = __p0; \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 1, 0); \ + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __ret = (poly64x2_t) __builtin_neon_vsriq_n_v((int8x16_t)__rev0, (int8x16_t)__rev1, __p2, 38); \ + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); \ + __ret; \ +}) #endif +#define vst1_p64(__p0, __p1) __extension__ ({ \ + poly64x1_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 6); \ +}) #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vtrn2_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3); - return __ret; -} +#define vst1q_p64(__p0, __p1) __extension__ ({ \ + poly64x2_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 38); \ +}) #else -__ai uint32x2_t vtrn2_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vst1q_p64(__p0, __p1) __extension__ ({ \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 38); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vtrn2_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); - return __ret; -} +#define vst1q_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __s1 = __p1; \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__s1, 42); \ +}) #else -__ai uint16x4_t vtrn2_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vst1q_f64(__p0, __p1) __extension__ ({ \ + float64x2_t __s1 = __p1; \ + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1q_v(__p0, (int8x16_t)__rev1, 42); \ +}) #endif +#define vst1_f64(__p0, __p1) __extension__ ({ \ + float64x1_t __s1 = __p1; \ + __builtin_neon_vst1_v(__p0, (int8x8_t)__s1, 10); \ +}) +#define vst1_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 6); \ +}) #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vtrn2_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); - return __ret; -} +#define vst1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 38); \ +}) #else -__ai int8x8_t vtrn2_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst1q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2_t __s1 = __p1; \ + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 38); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vtrn2_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3); - return __ret; -} +#define vst1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __s1 = __p1; \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__s1, __p2, 42); \ +}) #else -__ai float32x2_t vtrn2_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vst1q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2_t __s1 = __p1; \ + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 1, 0); \ + __builtin_neon_vst1q_lane_v(__p0, (int8x16_t)__rev1, __p2, 42); \ +}) #endif +#define vst1_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1_t __s1 = __p1; \ + __builtin_neon_vst1_lane_v(__p0, (int8x8_t)__s1, __p2, 10); \ +}) +#define vst1_p64_x2(__p0, __p1) __extension__ ({ \ + poly64x1x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \ +}) #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vtrn2_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3); - return __ret; -} +#define vst1q_p64_x2(__p0, __p1) __extension__ ({ \ + poly64x2x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 38); \ +}) #else -__ai int32x2_t vtrn2_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vst1q_p64_x2(__p0, __p1) __extension__ ({ \ + poly64x2x2_t __s1 = __p1; \ + poly64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 38); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vtrn2_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); - return __ret; -} +#define vst1q_f64_x2(__p0, __p1) __extension__ ({ \ + float64x2x2_t __s1 = __p1; \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 42); \ +}) #else -__ai int16x4_t vtrn2_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vst1q_f64_x2(__p0, __p1) __extension__ ({ \ + float64x2x2_t __s1 = __p1; \ + float64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst1q_x2_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 42); \ +}) +#endif + +#define vst1_f64_x2(__p0, __p1) __extension__ ({ \ + float64x1x2_t __s1 = __p1; \ + __builtin_neon_vst1_x2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 10); \ +}) +#define vst1_p64_x3(__p0, __p1) __extension__ ({ \ + poly64x1x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p64_x3(__p0, __p1) __extension__ ({ \ + poly64x2x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 38); \ +}) +#else +#define vst1q_p64_x3(__p0, __p1) __extension__ ({ \ + poly64x2x3_t __s1 = __p1; \ + poly64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f64_x3(__p0, __p1) __extension__ ({ \ + float64x2x3_t __s1 = __p1; \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 42); \ +}) +#else +#define vst1q_f64_x3(__p0, __p1) __extension__ ({ \ + float64x2x3_t __s1 = __p1; \ + float64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst1q_x3_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 42); \ +}) +#endif + +#define vst1_f64_x3(__p0, __p1) __extension__ ({ \ + float64x1x3_t __s1 = __p1; \ + __builtin_neon_vst1_x3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 10); \ +}) +#define vst1_p64_x4(__p0, __p1) __extension__ ({ \ + poly64x1x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst1q_p64_x4(__p0, __p1) __extension__ ({ \ + poly64x2x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 38); \ +}) +#else +#define vst1q_p64_x4(__p0, __p1) __extension__ ({ \ + poly64x2x4_t __s1 = __p1; \ + poly64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst1q_f64_x4(__p0, __p1) __extension__ ({ \ + float64x2x4_t __s1 = __p1; \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 42); \ +}) +#else +#define vst1q_f64_x4(__p0, __p1) __extension__ ({ \ + float64x2x4_t __s1 = __p1; \ + float64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst1q_x4_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 42); \ +}) +#endif + +#define vst1_f64_x4(__p0, __p1) __extension__ ({ \ + float64x1x4_t __s1 = __p1; \ + __builtin_neon_vst1_x4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 10); \ +}) +#define vst2_p64(__p0, __p1) __extension__ ({ \ + poly64x1x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 6); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst2q_p64(__p0, __p1) __extension__ ({ \ + poly64x2x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 38); \ +}) +#else +#define vst2q_p64(__p0, __p1) __extension__ ({ \ + poly64x2x2_t __s1 = __p1; \ + poly64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 38); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_u64(__p0, __p1) __extension__ ({ \ + uint64x2x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 51); \ +}) +#else +#define vst2q_u64(__p0, __p1) __extension__ ({ \ + uint64x2x2_t __s1 = __p1; \ + uint64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 51); \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vst2q_f64(__p0, __p1) __extension__ ({ \ + float64x2x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 42); \ +}) +#else +#define vst2q_f64(__p0, __p1) __extension__ ({ \ + float64x2x2_t __s1 = __p1; \ + float64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 42); \ +}) #endif -__ai uint64x1_t vtst_p64(poly64x1_t __p0, poly64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19); - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vtstq_p64(poly64x2_t __p0, poly64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); - return __ret; -} +#define vst2q_s64(__p0, __p1) __extension__ ({ \ + int64x2x2_t __s1 = __p1; \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], 35); \ +}) #else -__ai uint64x2_t vtstq_p64(poly64x2_t __p0, poly64x2_t __p1) { - uint64x2_t __ret; - poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vst2q_s64(__p0, __p1) __extension__ ({ \ + int64x2x2_t __s1 = __p1; \ + int64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], 35); \ +}) #endif +#define vst2_f64(__p0, __p1) __extension__ ({ \ + float64x1x2_t __s1 = __p1; \ + __builtin_neon_vst2_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], 10); \ +}) +#define vst2_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 6); \ +}) #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vtstq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); - return __ret; -} +#define vst2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 36); \ +}) #else -__ai uint64x2_t vtstq_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vst2q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x2_t __s1 = __p1; \ + poly8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 36); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vtstq_s64(int64x2_t __p0, int64x2_t __p1) { - uint64x2_t __ret; - __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); - return __ret; -} +#define vst2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 38); \ +}) #else -__ai uint64x2_t vtstq_s64(int64x2_t __p0, int64x2_t __p1) { - uint64x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vst2q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x2_t __s1 = __p1; \ + poly64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 38); \ +}) #endif -__ai uint64x1_t vtst_u64(uint64x1_t __p0, uint64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19); - return __ret; -} -__ai uint64x1_t vtst_s64(int64x1_t __p0, int64x1_t __p1) { - uint64x1_t __ret; - __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19); - return __ret; -} -__ai uint64_t vtstd_u64(uint64_t __p0, uint64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vtstd_u64(__p0, __p1); - return __ret; -} -__ai uint64_t vtstd_s64(int64_t __p0, int64_t __p1) { - uint64_t __ret; - __ret = (uint64_t) __builtin_neon_vtstd_s64(__p0, __p1); - return __ret; -} -__ai int8_t vuqaddb_s8(int8_t __p0, uint8_t __p1) { - int8_t __ret; - __ret = (int8_t) __builtin_neon_vuqaddb_s8(__p0, __p1); - return __ret; -} -__ai int32_t vuqadds_s32(int32_t __p0, uint32_t __p1) { - int32_t __ret; - __ret = (int32_t) __builtin_neon_vuqadds_s32(__p0, __p1); - return __ret; -} -__ai int64_t vuqaddd_s64(int64_t __p0, uint64_t __p1) { - int64_t __ret; - __ret = (int64_t) __builtin_neon_vuqaddd_s64(__p0, __p1); - return __ret; -} -__ai int16_t vuqaddh_s16(int16_t __p0, uint16_t __p1) { - int16_t __ret; - __ret = (int16_t) __builtin_neon_vuqaddh_s16(__p0, __p1); - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vuqaddq_s8(int8x16_t __p0, uint8x16_t __p1) { - int8x16_t __ret; - __ret = (int8x16_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); - return __ret; -} +#define vst2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 48); \ +}) #else -__ai int8x16_t vuqaddq_s8(int8x16_t __p0, uint8x16_t __p1) { - int8x16_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x16_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst2q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x2_t __s1 = __p1; \ + uint8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 48); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vuqaddq_s32(int32x4_t __p0, uint32x4_t __p1) { - int32x4_t __ret; - __ret = (int32x4_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); - return __ret; -} +#define vst2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 51); \ +}) #else -__ai int32x4_t vuqaddq_s32(int32x4_t __p0, uint32x4_t __p1) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (int32x4_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vst2q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x2_t __s1 = __p1; \ + uint64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 51); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vuqaddq_s64(int64x2_t __p0, uint64x2_t __p1) { - int64x2_t __ret; - __ret = (int64x2_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); - return __ret; -} +#define vst2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 32); \ +}) #else -__ai int64x2_t vuqaddq_s64(int64x2_t __p0, uint64x2_t __p1) { - int64x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (int64x2_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vst2q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x2_t __s1 = __p1; \ + int8x16x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 32); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vuqaddq_s16(int16x8_t __p0, uint16x8_t __p1) { - int16x8_t __ret; - __ret = (int16x8_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); - return __ret; -} +#define vst2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 42); \ +}) #else -__ai int16x8_t vuqaddq_s16(int16x8_t __p0, uint16x8_t __p1) { - int16x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int16x8_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst2q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x2_t __s1 = __p1; \ + float64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 42); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vuqadd_s8(int8x8_t __p0, uint8x8_t __p1) { - int8x8_t __ret; - __ret = (int8x8_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); - return __ret; -} +#define vst2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x2_t __s1 = __p1; \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], __p2, 35); \ +}) #else -__ai int8x8_t vuqadd_s8(int8x8_t __p0, uint8x8_t __p1) { - int8x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (int8x8_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst2q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x2_t __s1 = __p1; \ + int64x2x2_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __builtin_neon_vst2q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], __p2, 35); \ +}) #endif +#define vst2_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 19); \ +}) +#define vst2_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 10); \ +}) +#define vst2_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1x2_t __s1 = __p1; \ + __builtin_neon_vst2_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], __p2, 3); \ +}) +#define vst3_p64(__p0, __p1) __extension__ ({ \ + poly64x1x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 6); \ +}) #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vuqadd_s32(int32x2_t __p0, uint32x2_t __p1) { - int32x2_t __ret; - __ret = (int32x2_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); - return __ret; -} +#define vst3q_p64(__p0, __p1) __extension__ ({ \ + poly64x2x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 38); \ +}) #else -__ai int32x2_t vuqadd_s32(int32x2_t __p0, uint32x2_t __p1) { - int32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (int32x2_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vst3q_p64(__p0, __p1) __extension__ ({ \ + poly64x2x3_t __s1 = __p1; \ + poly64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 38); \ +}) #endif -__ai int64x1_t vuqadd_s64(int64x1_t __p0, uint64x1_t __p1) { - int64x1_t __ret; - __ret = (int64x1_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3); - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vuqadd_s16(int16x4_t __p0, uint16x4_t __p1) { - int16x4_t __ret; - __ret = (int16x4_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); - return __ret; -} +#define vst3q_u64(__p0, __p1) __extension__ ({ \ + uint64x2x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 51); \ +}) #else -__ai int16x4_t vuqadd_s16(int16x4_t __p0, uint16x4_t __p1) { - int16x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (int16x4_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vst3q_u64(__p0, __p1) __extension__ ({ \ + uint64x2x3_t __s1 = __p1; \ + uint64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 51); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vuzp1_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); - return __ret; -} +#define vst3q_f64(__p0, __p1) __extension__ ({ \ + float64x2x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 42); \ +}) #else -__ai poly8x8_t vuzp1_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly8x8_t __ret; - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst3q_f64(__p0, __p1) __extension__ ({ \ + float64x2x3_t __s1 = __p1; \ + float64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 42); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vuzp1_p16(poly16x4_t __p0, poly16x4_t __p1) { - poly16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); - return __ret; -} +#define vst3q_s64(__p0, __p1) __extension__ ({ \ + int64x2x3_t __s1 = __p1; \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], 35); \ +}) #else -__ai poly16x4_t vuzp1_p16(poly16x4_t __p0, poly16x4_t __p1) { - poly16x4_t __ret; - poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vst3q_s64(__p0, __p1) __extension__ ({ \ + int64x2x3_t __s1 = __p1; \ + int64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], 35); \ +}) #endif +#define vst3_f64(__p0, __p1) __extension__ ({ \ + float64x1x3_t __s1 = __p1; \ + __builtin_neon_vst3_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], 10); \ +}) +#define vst3_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 6); \ +}) #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vuzp1q_p8(poly8x16_t __p0, poly8x16_t __p1) { - poly8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); - return __ret; -} +#define vst3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 36); \ +}) #else -__ai poly8x16_t vuzp1q_p8(poly8x16_t __p0, poly8x16_t __p1) { - poly8x16_t __ret; - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst3q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x3_t __s1 = __p1; \ + poly8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 36); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai poly64x2_t vuzp1q_p64(poly64x2_t __p0, poly64x2_t __p1) { - poly64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2); - return __ret; -} +#define vst3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 38); \ +}) #else -__ai poly64x2_t vuzp1q_p64(poly64x2_t __p0, poly64x2_t __p1) { - poly64x2_t __ret; - poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vst3q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x3_t __s1 = __p1; \ + poly64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 38); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vuzp1q_p16(poly16x8_t __p0, poly16x8_t __p1) { - poly16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); - return __ret; -} +#define vst3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 48); \ +}) #else -__ai poly16x8_t vuzp1q_p16(poly16x8_t __p0, poly16x8_t __p1) { - poly16x8_t __ret; - poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst3q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x3_t __s1 = __p1; \ + uint8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 48); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vuzp1q_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); - return __ret; -} +#define vst3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 51); \ +}) #else -__ai uint8x16_t vuzp1q_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst3q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x3_t __s1 = __p1; \ + uint64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 51); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vuzp1q_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); - return __ret; -} +#define vst3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 32); \ +}) #else -__ai uint32x4_t vuzp1q_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vst3q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x3_t __s1 = __p1; \ + int8x16x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 32); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vuzp1q_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2); - return __ret; -} +#define vst3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 42); \ +}) #else -__ai uint64x2_t vuzp1q_u64(uint64x2_t __p0, uint64x2_t __p1) { - uint64x2_t __ret; - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vst3q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x3_t __s1 = __p1; \ + float64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 42); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vuzp1q_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); - return __ret; -} +#define vst3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x3_t __s1 = __p1; \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], __p2, 35); \ +}) +#else +#define vst3q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x3_t __s1 = __p1; \ + int64x2x3_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __builtin_neon_vst3q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], __p2, 35); \ +}) +#endif + +#define vst3_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 19); \ +}) +#define vst3_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 10); \ +}) +#define vst3_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1x3_t __s1 = __p1; \ + __builtin_neon_vst3_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], __p2, 3); \ +}) +#define vst4_p64(__p0, __p1) __extension__ ({ \ + poly64x1x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 6); \ +}) +#ifdef __LITTLE_ENDIAN__ +#define vst4q_p64(__p0, __p1) __extension__ ({ \ + poly64x2x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 38); \ +}) #else -__ai uint16x8_t vuzp1q_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst4q_p64(__p0, __p1) __extension__ ({ \ + poly64x2x4_t __s1 = __p1; \ + poly64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 38); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vuzp1q_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); - return __ret; -} +#define vst4q_u64(__p0, __p1) __extension__ ({ \ + uint64x2x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 51); \ +}) #else -__ai int8x16_t vuzp1q_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst4q_u64(__p0, __p1) __extension__ ({ \ + uint64x2x4_t __s1 = __p1; \ + uint64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 51); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vuzp1q_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2); - return __ret; -} +#define vst4q_f64(__p0, __p1) __extension__ ({ \ + float64x2x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 42); \ +}) #else -__ai float64x2_t vuzp1q_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vst4q_f64(__p0, __p1) __extension__ ({ \ + float64x2x4_t __s1 = __p1; \ + float64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 42); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vuzp1q_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); - return __ret; -} +#define vst4q_s64(__p0, __p1) __extension__ ({ \ + int64x2x4_t __s1 = __p1; \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], 35); \ +}) #else -__ai float32x4_t vuzp1q_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vst4q_s64(__p0, __p1) __extension__ ({ \ + int64x2x4_t __s1 = __p1; \ + int64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4q_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], 35); \ +}) #endif +#define vst4_f64(__p0, __p1) __extension__ ({ \ + float64x1x4_t __s1 = __p1; \ + __builtin_neon_vst4_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], 10); \ +}) +#define vst4_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x1x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 6); \ +}) #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vuzp1q_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); - return __ret; -} +#define vst4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 36); \ +}) #else -__ai int32x4_t vuzp1q_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vst4q_lane_p8(__p0, __p1, __p2) __extension__ ({ \ + poly8x16x4_t __s1 = __p1; \ + poly8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 36); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vuzp1q_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2); - return __ret; -} +#define vst4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 38); \ +}) #else -__ai int64x2_t vuzp1q_s64(int64x2_t __p0, int64x2_t __p1) { - int64x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vst4q_lane_p64(__p0, __p1, __p2) __extension__ ({ \ + poly64x2x4_t __s1 = __p1; \ + poly64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 38); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vuzp1q_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); - return __ret; -} +#define vst4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 48); \ +}) #else -__ai int16x8_t vuzp1q_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst4q_lane_u8(__p0, __p1, __p2) __extension__ ({ \ + uint8x16x4_t __s1 = __p1; \ + uint8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 48); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vuzp1_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); - return __ret; -} +#define vst4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 51); \ +}) #else -__ai uint8x8_t vuzp1_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst4q_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x2x4_t __s1 = __p1; \ + uint64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 51); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vuzp1_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2); - return __ret; -} +#define vst4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 32); \ +}) #else -__ai uint32x2_t vuzp1_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vst4q_lane_s8(__p0, __p1, __p2) __extension__ ({ \ + int8x16x4_t __s1 = __p1; \ + int8x16x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 32); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vuzp1_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); - return __ret; -} +#define vst4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 42); \ +}) #else -__ai uint16x4_t vuzp1_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vst4q_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x2x4_t __s1 = __p1; \ + float64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 42); \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vuzp1_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); - return __ret; -} +#define vst4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x4_t __s1 = __p1; \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__s1.val[0], (int8x16_t)__s1.val[1], (int8x16_t)__s1.val[2], (int8x16_t)__s1.val[3], __p2, 35); \ +}) #else -__ai int8x8_t vuzp1_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} +#define vst4q_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x2x4_t __s1 = __p1; \ + int64x2x4_t __rev1; \ + __rev1.val[0] = __builtin_shufflevector(__s1.val[0], __s1.val[0], 1, 0); \ + __rev1.val[1] = __builtin_shufflevector(__s1.val[1], __s1.val[1], 1, 0); \ + __rev1.val[2] = __builtin_shufflevector(__s1.val[2], __s1.val[2], 1, 0); \ + __rev1.val[3] = __builtin_shufflevector(__s1.val[3], __s1.val[3], 1, 0); \ + __builtin_neon_vst4q_lane_v(__p0, (int8x16_t)__rev1.val[0], (int8x16_t)__rev1.val[1], (int8x16_t)__rev1.val[2], (int8x16_t)__rev1.val[3], __p2, 35); \ +}) #endif -#ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vuzp1_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2); +#define vst4_lane_u64(__p0, __p1, __p2) __extension__ ({ \ + uint64x1x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 19); \ +}) +#define vst4_lane_f64(__p0, __p1, __p2) __extension__ ({ \ + float64x1x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 10); \ +}) +#define vst4_lane_s64(__p0, __p1, __p2) __extension__ ({ \ + int64x1x4_t __s1 = __p1; \ + __builtin_neon_vst4_lane_v(__p0, (int8x8_t)__s1.val[0], (int8x8_t)__s1.val[1], (int8x8_t)__s1.val[2], (int8x8_t)__s1.val[3], __p2, 3); \ +}) +#define vstrq_p128(__p0, __p1) __extension__ ({ \ + poly128_t __s1 = __p1; \ + __builtin_neon_vstrq_p128(__p0, __s1); \ +}) +__ai __attribute__((target("neon"))) uint64_t vsubd_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vsubd_u64(__p0, __p1); return __ret; } -#else -__ai float32x2_t vuzp1_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) int64_t vsubd_s64(int64_t __p0, int64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vsubd_s64(__p0, __p1); return __ret; } -#endif - #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vuzp1_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2); +__ai __attribute__((target("neon"))) float64x2_t vsubq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = __p0 - __p1; return __ret; } #else -__ai int32x2_t vuzp1_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); +__ai __attribute__((target("neon"))) float64x2_t vsubq_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __rev0 - __rev1; __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif -#ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vuzp1_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); - return __ret; -} -#else -__ai int16x4_t vuzp1_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) float64x1_t vsub_f64(float64x1_t __p0, float64x1_t __p1) { + float64x1_t __ret; + __ret = __p0 - __p1; return __ret; } -#endif - #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vuzp2_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); +__ai __attribute__((target("neon"))) uint16x8_t vsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint16x8_t __ret; + __ret = vcombine_u16(__p0, vsubhn_u32(__p1, __p2)); return __ret; } #else -__ai poly8x8_t vuzp2_p8(poly8x8_t __p0, poly8x8_t __p1) { - poly8x8_t __ret; - poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); +__ai __attribute__((target("neon"))) uint16x8_t vsubhn_high_u32(uint16x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { + uint16x8_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + uint32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vcombine_u16(__rev0, __noswap_vsubhn_u32(__rev1, __rev2)); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vuzp2_p16(poly16x4_t __p0, poly16x4_t __p1) { - poly16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); +__ai __attribute__((target("neon"))) uint32x4_t vsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint32x4_t __ret; + __ret = vcombine_u32(__p0, vsubhn_u64(__p1, __p2)); return __ret; } #else -__ai poly16x4_t vuzp2_p16(poly16x4_t __p0, poly16x4_t __p1) { - poly16x4_t __ret; - poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); +__ai __attribute__((target("neon"))) uint32x4_t vsubhn_high_u64(uint32x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { + uint32x4_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + uint64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __noswap_vcombine_u32(__rev0, __noswap_vsubhn_u64(__rev1, __rev2)); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vuzp2q_p8(poly8x16_t __p0, poly8x16_t __p1) { - poly8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); +__ai __attribute__((target("neon"))) uint8x16_t vsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint8x16_t __ret; + __ret = vcombine_u8(__p0, vsubhn_u16(__p1, __p2)); return __ret; } #else -__ai poly8x16_t vuzp2q_p8(poly8x16_t __p0, poly8x16_t __p1) { - poly8x16_t __ret; - poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); +__ai __attribute__((target("neon"))) uint8x16_t vsubhn_high_u16(uint8x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { + uint8x16_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcombine_u8(__rev0, __noswap_vsubhn_u16(__rev1, __rev2)); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly64x2_t vuzp2q_p64(poly64x2_t __p0, poly64x2_t __p1) { - poly64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3); +__ai __attribute__((target("neon"))) int16x8_t vsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int16x8_t __ret; + __ret = vcombine_s16(__p0, vsubhn_s32(__p1, __p2)); return __ret; } #else -__ai poly64x2_t vuzp2q_p64(poly64x2_t __p0, poly64x2_t __p1) { - poly64x2_t __ret; - poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("neon"))) int16x8_t vsubhn_high_s32(int16x4_t __p0, int32x4_t __p1, int32x4_t __p2) { + int16x8_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + int32x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); + __ret = __noswap_vcombine_s16(__rev0, __noswap_vsubhn_s32(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vuzp2q_p16(poly16x8_t __p0, poly16x8_t __p1) { - poly16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); +__ai __attribute__((target("neon"))) int32x4_t vsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int32x4_t __ret; + __ret = vcombine_s32(__p0, vsubhn_s64(__p1, __p2)); return __ret; } #else -__ai poly16x8_t vuzp2q_p16(poly16x8_t __p0, poly16x8_t __p1) { - poly16x8_t __ret; - poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int32x4_t vsubhn_high_s64(int32x2_t __p0, int64x2_t __p1, int64x2_t __p2) { + int32x4_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + int64x2_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 1, 0); + __ret = __noswap_vcombine_s32(__rev0, __noswap_vsubhn_s64(__rev1, __rev2)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vuzp2q_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); +__ai __attribute__((target("neon"))) int8x16_t vsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int8x16_t __ret; + __ret = vcombine_s8(__p0, vsubhn_s16(__p1, __p2)); return __ret; } #else -__ai uint8x16_t vuzp2q_u8(uint8x16_t __p0, uint8x16_t __p1) { - uint8x16_t __ret; - uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); +__ai __attribute__((target("neon"))) int8x16_t vsubhn_high_s16(int8x8_t __p0, int16x8_t __p1, int16x8_t __p2) { + int8x16_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcombine_s8(__rev0, __noswap_vsubhn_s16(__rev1, __rev2)); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vuzp2q_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); +__ai __attribute__((target("neon"))) uint16x8_t vsubl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + __ret = vmovl_high_u8(__p0) - vmovl_high_u8(__p1); return __ret; } #else -__ai uint32x4_t vuzp2q_u32(uint32x4_t __p0, uint32x4_t __p1) { - uint32x4_t __ret; - uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint16x8_t vsubl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmovl_high_u8(__rev0) - __noswap_vmovl_high_u8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vuzp2q_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vsubl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { uint64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + __ret = vmovl_high_u32(__p0) - vmovl_high_u32(__p1); return __ret; } #else -__ai uint64x2_t vuzp2q_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vsubl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { uint64x2_t __ret; - uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vmovl_high_u32(__rev0) - __noswap_vmovl_high_u32(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vuzp2q_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); +__ai __attribute__((target("neon"))) uint32x4_t vsubl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + __ret = vmovl_high_u16(__p0) - vmovl_high_u16(__p1); return __ret; } #else -__ai uint16x8_t vuzp2q_u16(uint16x8_t __p0, uint16x8_t __p1) { - uint16x8_t __ret; +__ai __attribute__((target("neon"))) uint32x4_t vsubl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmovl_high_u16(__rev0) - __noswap_vmovl_high_u16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vuzp2q_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); +__ai __attribute__((target("neon"))) int16x8_t vsubl_high_s8(int8x16_t __p0, int8x16_t __p1) { + int16x8_t __ret; + __ret = vmovl_high_s8(__p0) - vmovl_high_s8(__p1); return __ret; } #else -__ai int8x16_t vuzp2q_s8(int8x16_t __p0, int8x16_t __p1) { - int8x16_t __ret; +__ai __attribute__((target("neon"))) int16x8_t vsubl_high_s8(int8x16_t __p0, int8x16_t __p1) { + int16x8_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); - __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vuzp2q_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3); - return __ret; -} -#else -__ai float64x2_t vuzp2q_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vuzp2q_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); - return __ret; -} -#else -__ai float32x4_t vuzp2q_f32(float32x4_t __p0, float32x4_t __p1) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vuzp2q_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); - return __ret; -} -#else -__ai int32x4_t vuzp2q_s32(int32x4_t __p0, int32x4_t __p1) { - int32x4_t __ret; - int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + __ret = __noswap_vmovl_high_s8(__rev0) - __noswap_vmovl_high_s8(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vuzp2q_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vsubl_high_s32(int32x4_t __p0, int32x4_t __p1) { int64x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + __ret = vmovl_high_s32(__p0) - vmovl_high_s32(__p1); return __ret; } #else -__ai int64x2_t vuzp2q_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vsubl_high_s32(int32x4_t __p0, int32x4_t __p1) { int64x2_t __ret; - int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __noswap_vmovl_high_s32(__rev0) - __noswap_vmovl_high_s32(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vuzp2q_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); +__ai __attribute__((target("neon"))) int32x4_t vsubl_high_s16(int16x8_t __p0, int16x8_t __p1) { + int32x4_t __ret; + __ret = vmovl_high_s16(__p0) - vmovl_high_s16(__p1); return __ret; } #else -__ai int16x8_t vuzp2q_s16(int16x8_t __p0, int16x8_t __p1) { - int16x8_t __ret; +__ai __attribute__((target("neon"))) int32x4_t vsubl_high_s16(int16x8_t __p0, int16x8_t __p1) { + int32x4_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vmovl_high_s16(__rev0) - __noswap_vmovl_high_s16(__rev1); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vuzp2_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); +__ai __attribute__((target("neon"))) uint16x8_t vsubw_high_u8(uint16x8_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + __ret = __p0 - vmovl_high_u8(__p1); return __ret; } #else -__ai uint8x8_t vuzp2_u8(uint8x8_t __p0, uint8x8_t __p1) { - uint8x8_t __ret; - uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); +__ai __attribute__((target("neon"))) uint16x8_t vsubw_high_u8(uint16x8_t __p0, uint8x16_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __noswap_vmovl_high_u8(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vuzp2_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3); +__ai __attribute__((target("neon"))) uint64x2_t vsubw_high_u32(uint64x2_t __p0, uint32x4_t __p1) { + uint64x2_t __ret; + __ret = __p0 - vmovl_high_u32(__p1); return __ret; } #else -__ai uint32x2_t vuzp2_u32(uint32x2_t __p0, uint32x2_t __p1) { - uint32x2_t __ret; - uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); +__ai __attribute__((target("neon"))) uint64x2_t vsubw_high_u32(uint64x2_t __p0, uint32x4_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 - __noswap_vmovl_high_u32(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vuzp2_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); +__ai __attribute__((target("neon"))) uint32x4_t vsubw_high_u16(uint32x4_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + __ret = __p0 - vmovl_high_u16(__p1); return __ret; } #else -__ai uint16x4_t vuzp2_u16(uint16x4_t __p0, uint16x4_t __p1) { - uint16x4_t __ret; - uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); +__ai __attribute__((target("neon"))) uint32x4_t vsubw_high_u16(uint32x4_t __p0, uint16x8_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __noswap_vmovl_high_u16(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vuzp2_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); +__ai __attribute__((target("neon"))) int16x8_t vsubw_high_s8(int16x8_t __p0, int8x16_t __p1) { + int16x8_t __ret; + __ret = __p0 - vmovl_high_s8(__p1); return __ret; } #else -__ai int8x8_t vuzp2_s8(int8x8_t __p0, int8x8_t __p1) { - int8x8_t __ret; - int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); +__ai __attribute__((target("neon"))) int16x8_t vsubw_high_s8(int16x8_t __p0, int8x16_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __noswap_vmovl_high_s8(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vuzp2_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3); - return __ret; -} -#else -__ai float32x2_t vuzp2_f32(float32x2_t __p0, float32x2_t __p1) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vuzp2_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3); +__ai __attribute__((target("neon"))) int64x2_t vsubw_high_s32(int64x2_t __p0, int32x4_t __p1) { + int64x2_t __ret; + __ret = __p0 - vmovl_high_s32(__p1); return __ret; } #else -__ai int32x2_t vuzp2_s32(int32x2_t __p0, int32x2_t __p1) { - int32x2_t __ret; - int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); +__ai __attribute__((target("neon"))) int64x2_t vsubw_high_s32(int64x2_t __p0, int32x4_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __rev0 - __noswap_vmovl_high_s32(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vuzp2_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); +__ai __attribute__((target("neon"))) int32x4_t vsubw_high_s16(int32x4_t __p0, int16x8_t __p1) { + int32x4_t __ret; + __ret = __p0 - vmovl_high_s16(__p1); return __ret; } #else -__ai int16x4_t vuzp2_s16(int16x4_t __p0, int16x4_t __p1) { - int16x4_t __ret; - int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); +__ai __attribute__((target("neon"))) int32x4_t vsubw_high_s16(int32x4_t __p0, int16x8_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __rev0 - __noswap_vmovl_high_s16(__rev1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vzip1_p8(poly8x8_t __p0, poly8x8_t __p1) { +__ai __attribute__((target("neon"))) poly8x8_t vtrn1_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); return __ret; } #else -__ai poly8x8_t vzip1_p8(poly8x8_t __p0, poly8x8_t __p1) { +__ai __attribute__((target("neon"))) poly8x8_t vtrn1_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vzip1_p16(poly16x4_t __p0, poly16x4_t __p1) { +__ai __attribute__((target("neon"))) poly16x4_t vtrn1_p16(poly16x4_t __p0, poly16x4_t __p1) { poly16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); return __ret; } #else -__ai poly16x4_t vzip1_p16(poly16x4_t __p0, poly16x4_t __p1) { +__ai __attribute__((target("neon"))) poly16x4_t vtrn1_p16(poly16x4_t __p0, poly16x4_t __p1) { poly16x4_t __ret; poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vzip1q_p8(poly8x16_t __p0, poly8x16_t __p1) { +__ai __attribute__((target("neon"))) poly8x16_t vtrn1q_p8(poly8x16_t __p0, poly8x16_t __p1) { poly8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); + __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); return __ret; } #else -__ai poly8x16_t vzip1q_p8(poly8x16_t __p0, poly8x16_t __p1) { +__ai __attribute__((target("neon"))) poly8x16_t vtrn1q_p8(poly8x16_t __p0, poly8x16_t __p1) { poly8x16_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly64x2_t vzip1q_p64(poly64x2_t __p0, poly64x2_t __p1) { +__ai __attribute__((target("neon"))) poly64x2_t vtrn1q_p64(poly64x2_t __p0, poly64x2_t __p1) { poly64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else -__ai poly64x2_t vzip1q_p64(poly64x2_t __p0, poly64x2_t __p1) { +__ai __attribute__((target("neon"))) poly64x2_t vtrn1q_p64(poly64x2_t __p0, poly64x2_t __p1) { poly64x2_t __ret; poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -61812,64 +61842,64 @@ __ai poly64x2_t vzip1q_p64(poly64x2_t __p0, poly64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vzip1q_p16(poly16x8_t __p0, poly16x8_t __p1) { +__ai __attribute__((target("neon"))) poly16x8_t vtrn1q_p16(poly16x8_t __p0, poly16x8_t __p1) { poly16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); return __ret; } #else -__ai poly16x8_t vzip1q_p16(poly16x8_t __p0, poly16x8_t __p1) { +__ai __attribute__((target("neon"))) poly16x8_t vtrn1q_p16(poly16x8_t __p0, poly16x8_t __p1) { poly16x8_t __ret; poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vzip1q_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vtrn1q_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); + __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); return __ret; } #else -__ai uint8x16_t vzip1q_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vtrn1q_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vzip1q_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vtrn1q_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); return __ret; } #else -__ai uint32x4_t vzip1q_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vtrn1q_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vzip1q_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vtrn1q_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else -__ai uint64x2_t vzip1q_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vtrn1q_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -61880,47 +61910,47 @@ __ai uint64x2_t vzip1q_u64(uint64x2_t __p0, uint64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vzip1q_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vtrn1q_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); return __ret; } #else -__ai uint16x8_t vzip1q_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vtrn1q_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vzip1q_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vtrn1q_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); + __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); return __ret; } #else -__ai int8x16_t vzip1q_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vtrn1q_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vzip1q_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vtrn1q_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else -__ai float64x2_t vzip1q_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vtrn1q_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -61931,47 +61961,47 @@ __ai float64x2_t vzip1q_f64(float64x2_t __p0, float64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vzip1q_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vtrn1q_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); return __ret; } #else -__ai float32x4_t vzip1q_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vtrn1q_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vzip1q_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vtrn1q_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); return __ret; } #else -__ai int32x4_t vzip1q_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vtrn1q_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vzip1q_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vtrn1q_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else -__ai int64x2_t vzip1q_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vtrn1q_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -61982,47 +62012,47 @@ __ai int64x2_t vzip1q_s64(int64x2_t __p0, int64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vzip1q_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vtrn1q_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); return __ret; } #else -__ai int16x8_t vzip1q_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vtrn1q_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vzip1_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vtrn1_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); return __ret; } #else -__ai uint8x8_t vzip1_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vtrn1_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vzip1_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vtrn1_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else -__ai uint32x2_t vzip1_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vtrn1_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -62033,47 +62063,47 @@ __ai uint32x2_t vzip1_u32(uint32x2_t __p0, uint32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vzip1_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vtrn1_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); return __ret; } #else -__ai uint16x4_t vzip1_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vtrn1_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vzip1_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vtrn1_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); return __ret; } #else -__ai int8x8_t vzip1_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vtrn1_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vzip1_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vtrn1_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else -__ai float32x2_t vzip1_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vtrn1_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -62084,13 +62114,13 @@ __ai float32x2_t vzip1_f32(float32x2_t __p0, float32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vzip1_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vtrn1_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else -__ai int32x2_t vzip1_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vtrn1_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -62101,81 +62131,115 @@ __ai int32x2_t vzip1_s32(int32x2_t __p0, int32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vzip1_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vtrn1_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); return __ret; } #else -__ai int16x4_t vzip1_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vtrn1_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float16x8_t vtrn1q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x8_t vtrn1q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float16x4_t vtrn1_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x4_t vtrn1_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x8_t vzip2_p8(poly8x8_t __p0, poly8x8_t __p1) { +__ai __attribute__((target("neon"))) poly8x8_t vtrn2_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); + __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); return __ret; } #else -__ai poly8x8_t vzip2_p8(poly8x8_t __p0, poly8x8_t __p1) { +__ai __attribute__((target("neon"))) poly8x8_t vtrn2_p8(poly8x8_t __p0, poly8x8_t __p1) { poly8x8_t __ret; poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x4_t vzip2_p16(poly16x4_t __p0, poly16x4_t __p1) { +__ai __attribute__((target("neon"))) poly16x4_t vtrn2_p16(poly16x4_t __p0, poly16x4_t __p1) { poly16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); + __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); return __ret; } #else -__ai poly16x4_t vzip2_p16(poly16x4_t __p0, poly16x4_t __p1) { +__ai __attribute__((target("neon"))) poly16x4_t vtrn2_p16(poly16x4_t __p0, poly16x4_t __p1) { poly16x4_t __ret; poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly8x16_t vzip2q_p8(poly8x16_t __p0, poly8x16_t __p1) { +__ai __attribute__((target("neon"))) poly8x16_t vtrn2q_p8(poly8x16_t __p0, poly8x16_t __p1) { poly8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); + __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); return __ret; } #else -__ai poly8x16_t vzip2q_p8(poly8x16_t __p0, poly8x16_t __p1) { +__ai __attribute__((target("neon"))) poly8x16_t vtrn2q_p8(poly8x16_t __p0, poly8x16_t __p1) { poly8x16_t __ret; poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai poly64x2_t vzip2q_p64(poly64x2_t __p0, poly64x2_t __p1) { +__ai __attribute__((target("neon"))) poly64x2_t vtrn2q_p64(poly64x2_t __p0, poly64x2_t __p1) { poly64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else -__ai poly64x2_t vzip2q_p64(poly64x2_t __p0, poly64x2_t __p1) { +__ai __attribute__((target("neon"))) poly64x2_t vtrn2q_p64(poly64x2_t __p0, poly64x2_t __p1) { poly64x2_t __ret; poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -62186,64 +62250,64 @@ __ai poly64x2_t vzip2q_p64(poly64x2_t __p0, poly64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai poly16x8_t vzip2q_p16(poly16x8_t __p0, poly16x8_t __p1) { +__ai __attribute__((target("neon"))) poly16x8_t vtrn2q_p16(poly16x8_t __p0, poly16x8_t __p1) { poly16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); + __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); return __ret; } #else -__ai poly16x8_t vzip2q_p16(poly16x8_t __p0, poly16x8_t __p1) { +__ai __attribute__((target("neon"))) poly16x8_t vtrn2q_p16(poly16x8_t __p0, poly16x8_t __p1) { poly16x8_t __ret; poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vzip2q_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vtrn2q_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); + __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); return __ret; } #else -__ai uint8x16_t vzip2q_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint8x16_t vtrn2q_u8(uint8x16_t __p0, uint8x16_t __p1) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vzip2q_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vtrn2q_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); + __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); return __ret; } #else -__ai uint32x4_t vzip2q_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vtrn2q_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vzip2q_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vtrn2q_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else -__ai uint64x2_t vzip2q_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vtrn2q_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -62254,47 +62318,47 @@ __ai uint64x2_t vzip2q_u64(uint64x2_t __p0, uint64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vzip2q_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vtrn2q_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); + __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); return __ret; } #else -__ai uint16x8_t vzip2q_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vtrn2q_u16(uint16x8_t __p0, uint16x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vzip2q_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vtrn2q_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); + __ret = __builtin_shufflevector(__p0, __p1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); return __ret; } #else -__ai int8x16_t vzip2q_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int8x16_t vtrn2q_s8(int8x16_t __p0, int8x16_t __p1) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31); __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vzip2q_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vtrn2q_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else -__ai float64x2_t vzip2q_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("neon"))) float64x2_t vtrn2q_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -62305,47 +62369,47 @@ __ai float64x2_t vzip2q_f64(float64x2_t __p0, float64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x4_t vzip2q_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vtrn2q_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); + __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); return __ret; } #else -__ai float32x4_t vzip2q_f32(float32x4_t __p0, float32x4_t __p1) { +__ai __attribute__((target("neon"))) float32x4_t vtrn2q_f32(float32x4_t __p0, float32x4_t __p1) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vzip2q_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vtrn2q_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); + __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); return __ret; } #else -__ai int32x4_t vzip2q_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vtrn2q_s32(int32x4_t __p0, int32x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vzip2q_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vtrn2q_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else -__ai int64x2_t vzip2q_s64(int64x2_t __p0, int64x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vtrn2q_s64(int64x2_t __p0, int64x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -62356,47 +62420,47 @@ __ai int64x2_t vzip2q_s64(int64x2_t __p0, int64x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vzip2q_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vtrn2q_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); + __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); return __ret; } #else -__ai int16x8_t vzip2q_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vtrn2q_s16(int16x8_t __p0, int16x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vzip2_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vtrn2_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); + __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); return __ret; } #else -__ai uint8x8_t vzip2_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint8x8_t vtrn2_u8(uint8x8_t __p0, uint8x8_t __p1) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vzip2_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vtrn2_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else -__ai uint32x2_t vzip2_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint32x2_t vtrn2_u32(uint32x2_t __p0, uint32x2_t __p1) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -62407,47 +62471,47 @@ __ai uint32x2_t vzip2_u32(uint32x2_t __p0, uint32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vzip2_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vtrn2_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); + __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); return __ret; } #else -__ai uint16x4_t vzip2_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint16x4_t vtrn2_u16(uint16x4_t __p0, uint16x4_t __p1) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vzip2_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vtrn2_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); + __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); return __ret; } #else -__ai int8x8_t vzip2_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int8x8_t vtrn2_s8(int8x8_t __p0, int8x8_t __p1) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai float32x2_t vzip2_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vtrn2_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else -__ai float32x2_t vzip2_f32(float32x2_t __p0, float32x2_t __p1) { +__ai __attribute__((target("neon"))) float32x2_t vtrn2_f32(float32x2_t __p0, float32x2_t __p1) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -62458,13 +62522,13 @@ __ai float32x2_t vzip2_f32(float32x2_t __p0, float32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vzip2_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vtrn2_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 1, 3); return __ret; } #else -__ai int32x2_t vzip2_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int32x2_t vtrn2_s32(int32x2_t __p0, int32x2_t __p1) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -62475,1978 +62539,1474 @@ __ai int32x2_t vzip2_s32(int32x2_t __p0, int32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vzip2_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vtrn2_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); + __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); return __ret; } #else -__ai int16x4_t vzip2_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int16x4_t vtrn2_s16(int16x4_t __p0, int16x4_t __p1) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif -__ai __attribute__((target("aes"))) poly128_t vmull_p64(poly64_t __p0, poly64_t __p1) { - poly128_t __ret; - __ret = (poly128_t) __builtin_neon_vmull_p64(__p0, __p1); - return __ret; -} -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("aes"))) poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) { - poly128_t __ret; - __ret = vmull_p64((poly64_t)(vget_high_p64(__p0)), (poly64_t)(vget_high_p64(__p1))); - return __ret; -} -#else -__ai __attribute__((target("aes"))) poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) { - poly128_t __ret; - poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = vmull_p64((poly64_t)(__noswap_vget_high_p64(__rev0)), (poly64_t)(__noswap_vget_high_p64(__rev1))); - return __ret; -} -#endif - #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("bf16"))) bfloat16x8_t __a64_vcvtq_low_bf16_f32(float32x4_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_f32((int8x16_t)__p0, 43); +__ai __attribute__((target("neon"))) float16x8_t vtrn2q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); return __ret; } #else -__ai __attribute__((target("bf16"))) bfloat16x8_t __a64_vcvtq_low_bf16_f32(float32x4_t __p0) { - bfloat16x8_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_f32((int8x16_t)__rev0, 43); +__ai __attribute__((target("neon"))) float16x8_t vtrn2q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai __attribute__((target("bf16"))) bfloat16x8_t __noswap___a64_vcvtq_low_bf16_f32(float32x4_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t) __builtin_neon___a64_vcvtq_low_bf16_f32((int8x16_t)__p0, 43); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_bf16(__p0_739, __p1_739, __p2_739, __p3_739) __extension__ ({ \ - bfloat16x8_t __ret_739; \ - bfloat16x8_t __s0_739 = __p0_739; \ - bfloat16x4_t __s2_739 = __p2_739; \ - __ret_739 = vsetq_lane_bf16(vget_lane_bf16(__s2_739, __p3_739), __s0_739, __p1_739); \ - __ret_739; \ -}) -#else -#define vcopyq_lane_bf16(__p0_740, __p1_740, __p2_740, __p3_740) __extension__ ({ \ - bfloat16x8_t __ret_740; \ - bfloat16x8_t __s0_740 = __p0_740; \ - bfloat16x4_t __s2_740 = __p2_740; \ - bfloat16x8_t __rev0_740; __rev0_740 = __builtin_shufflevector(__s0_740, __s0_740, 7, 6, 5, 4, 3, 2, 1, 0); \ - bfloat16x4_t __rev2_740; __rev2_740 = __builtin_shufflevector(__s2_740, __s2_740, 3, 2, 1, 0); \ - __ret_740 = __noswap_vsetq_lane_bf16(__noswap_vget_lane_bf16(__rev2_740, __p3_740), __rev0_740, __p1_740); \ - __ret_740 = __builtin_shufflevector(__ret_740, __ret_740, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_740; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopy_lane_bf16(__p0_741, __p1_741, __p2_741, __p3_741) __extension__ ({ \ - bfloat16x4_t __ret_741; \ - bfloat16x4_t __s0_741 = __p0_741; \ - bfloat16x4_t __s2_741 = __p2_741; \ - __ret_741 = vset_lane_bf16(vget_lane_bf16(__s2_741, __p3_741), __s0_741, __p1_741); \ - __ret_741; \ -}) -#else -#define vcopy_lane_bf16(__p0_742, __p1_742, __p2_742, __p3_742) __extension__ ({ \ - bfloat16x4_t __ret_742; \ - bfloat16x4_t __s0_742 = __p0_742; \ - bfloat16x4_t __s2_742 = __p2_742; \ - bfloat16x4_t __rev0_742; __rev0_742 = __builtin_shufflevector(__s0_742, __s0_742, 3, 2, 1, 0); \ - bfloat16x4_t __rev2_742; __rev2_742 = __builtin_shufflevector(__s2_742, __s2_742, 3, 2, 1, 0); \ - __ret_742 = __noswap_vset_lane_bf16(__noswap_vget_lane_bf16(__rev2_742, __p3_742), __rev0_742, __p1_742); \ - __ret_742 = __builtin_shufflevector(__ret_742, __ret_742, 3, 2, 1, 0); \ - __ret_742; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_bf16(__p0_743, __p1_743, __p2_743, __p3_743) __extension__ ({ \ - bfloat16x8_t __ret_743; \ - bfloat16x8_t __s0_743 = __p0_743; \ - bfloat16x8_t __s2_743 = __p2_743; \ - __ret_743 = vsetq_lane_bf16(vgetq_lane_bf16(__s2_743, __p3_743), __s0_743, __p1_743); \ - __ret_743; \ -}) -#else -#define vcopyq_laneq_bf16(__p0_744, __p1_744, __p2_744, __p3_744) __extension__ ({ \ - bfloat16x8_t __ret_744; \ - bfloat16x8_t __s0_744 = __p0_744; \ - bfloat16x8_t __s2_744 = __p2_744; \ - bfloat16x8_t __rev0_744; __rev0_744 = __builtin_shufflevector(__s0_744, __s0_744, 7, 6, 5, 4, 3, 2, 1, 0); \ - bfloat16x8_t __rev2_744; __rev2_744 = __builtin_shufflevector(__s2_744, __s2_744, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_744 = __noswap_vsetq_lane_bf16(__noswap_vgetq_lane_bf16(__rev2_744, __p3_744), __rev0_744, __p1_744); \ - __ret_744 = __builtin_shufflevector(__ret_744, __ret_744, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_744; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_bf16(__p0_745, __p1_745, __p2_745, __p3_745) __extension__ ({ \ - bfloat16x4_t __ret_745; \ - bfloat16x4_t __s0_745 = __p0_745; \ - bfloat16x8_t __s2_745 = __p2_745; \ - __ret_745 = vset_lane_bf16(vgetq_lane_bf16(__s2_745, __p3_745), __s0_745, __p1_745); \ - __ret_745; \ -}) -#else -#define vcopy_laneq_bf16(__p0_746, __p1_746, __p2_746, __p3_746) __extension__ ({ \ - bfloat16x4_t __ret_746; \ - bfloat16x4_t __s0_746 = __p0_746; \ - bfloat16x8_t __s2_746 = __p2_746; \ - bfloat16x4_t __rev0_746; __rev0_746 = __builtin_shufflevector(__s0_746, __s0_746, 3, 2, 1, 0); \ - bfloat16x8_t __rev2_746; __rev2_746 = __builtin_shufflevector(__s2_746, __s2_746, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_746 = __noswap_vset_lane_bf16(__noswap_vgetq_lane_bf16(__rev2_746, __p3_746), __rev0_746, __p1_746); \ - __ret_746 = __builtin_shufflevector(__ret_746, __ret_746, 3, 2, 1, 0); \ - __ret_746; \ -}) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("bf16"))) bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) { - bfloat16x4_t __ret; - __ret = vget_low_bf16(__a64_vcvtq_low_bf16_f32(__p0)); +__ai __attribute__((target("neon"))) float16x4_t vtrn2_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); return __ret; } #else -__ai __attribute__((target("bf16"))) bfloat16x4_t vcvt_bf16_f32(float32x4_t __p0) { - bfloat16x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = __noswap_vget_low_bf16(__noswap___a64_vcvtq_low_bf16_f32(__rev0)); +__ai __attribute__((target("neon"))) float16x4_t vtrn2_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("bf16"))) bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t) __builtin_neon_vcvtq_high_bf16_f32((int8x16_t)__p0, (int8x16_t)__p1, 43); - return __ret; -} -#else -__ai __attribute__((target("bf16"))) bfloat16x8_t vcvtq_high_bf16_f32(bfloat16x8_t __p0, float32x4_t __p1) { - bfloat16x8_t __ret; - bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (bfloat16x8_t) __builtin_neon_vcvtq_high_bf16_f32((int8x16_t)__rev0, (int8x16_t)__rev1, 43); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -#ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("bf16"))) bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) { - bfloat16x8_t __ret; - __ret = __a64_vcvtq_low_bf16_f32(__p0); - return __ret; -} -#else -__ai __attribute__((target("bf16"))) bfloat16x8_t vcvtq_low_bf16_f32(float32x4_t __p0) { - bfloat16x8_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = __noswap___a64_vcvtq_low_bf16_f32(__rev0); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); - return __ret; -} -#endif - -__ai __attribute__((target("bf16"))) poly8x8_t vreinterpret_p8_bf16(bfloat16x4_t __p0) { - poly8x8_t __ret; - __ret = (poly8x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) poly64x1_t vreinterpret_p64_bf16(bfloat16x4_t __p0) { - poly64x1_t __ret; - __ret = (poly64x1_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) poly16x4_t vreinterpret_p16_bf16(bfloat16x4_t __p0) { - poly16x4_t __ret; - __ret = (poly16x4_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) poly8x16_t vreinterpretq_p8_bf16(bfloat16x8_t __p0) { - poly8x16_t __ret; - __ret = (poly8x16_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) poly128_t vreinterpretq_p128_bf16(bfloat16x8_t __p0) { - poly128_t __ret; - __ret = (poly128_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) poly64x2_t vreinterpretq_p64_bf16(bfloat16x8_t __p0) { - poly64x2_t __ret; - __ret = (poly64x2_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) poly16x8_t vreinterpretq_p16_bf16(bfloat16x8_t __p0) { - poly16x8_t __ret; - __ret = (poly16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) uint8x16_t vreinterpretq_u8_bf16(bfloat16x8_t __p0) { - uint8x16_t __ret; - __ret = (uint8x16_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) uint32x4_t vreinterpretq_u32_bf16(bfloat16x8_t __p0) { - uint32x4_t __ret; - __ret = (uint32x4_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) uint64x2_t vreinterpretq_u64_bf16(bfloat16x8_t __p0) { - uint64x2_t __ret; - __ret = (uint64x2_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) uint16x8_t vreinterpretq_u16_bf16(bfloat16x8_t __p0) { - uint16x8_t __ret; - __ret = (uint16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) int8x16_t vreinterpretq_s8_bf16(bfloat16x8_t __p0) { - int8x16_t __ret; - __ret = (int8x16_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) float64x2_t vreinterpretq_f64_bf16(bfloat16x8_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) float32x4_t vreinterpretq_f32_bf16(bfloat16x8_t __p0) { - float32x4_t __ret; - __ret = (float32x4_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) float16x8_t vreinterpretq_f16_bf16(bfloat16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) int32x4_t vreinterpretq_s32_bf16(bfloat16x8_t __p0) { - int32x4_t __ret; - __ret = (int32x4_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) int64x2_t vreinterpretq_s64_bf16(bfloat16x8_t __p0) { - int64x2_t __ret; - __ret = (int64x2_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) int16x8_t vreinterpretq_s16_bf16(bfloat16x8_t __p0) { - int16x8_t __ret; - __ret = (int16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) uint8x8_t vreinterpret_u8_bf16(bfloat16x4_t __p0) { - uint8x8_t __ret; - __ret = (uint8x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) uint32x2_t vreinterpret_u32_bf16(bfloat16x4_t __p0) { - uint32x2_t __ret; - __ret = (uint32x2_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) uint64x1_t vreinterpret_u64_bf16(bfloat16x4_t __p0) { +__ai __attribute__((target("neon"))) uint64x1_t vtst_p64(poly64x1_t __p0, poly64x1_t __p1) { uint64x1_t __ret; - __ret = (uint64x1_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) uint16x4_t vreinterpret_u16_bf16(bfloat16x4_t __p0) { - uint16x4_t __ret; - __ret = (uint16x4_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) int8x8_t vreinterpret_s8_bf16(bfloat16x4_t __p0) { - int8x8_t __ret; - __ret = (int8x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) float64x1_t vreinterpret_f64_bf16(bfloat16x4_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) float32x2_t vreinterpret_f32_bf16(bfloat16x4_t __p0) { - float32x2_t __ret; - __ret = (float32x2_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) float16x4_t vreinterpret_f16_bf16(bfloat16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) int32x2_t vreinterpret_s32_bf16(bfloat16x4_t __p0) { - int32x2_t __ret; - __ret = (int32x2_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) int64x1_t vreinterpret_s64_bf16(bfloat16x4_t __p0) { - int64x1_t __ret; - __ret = (int64x1_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) int16x4_t vreinterpret_s16_bf16(bfloat16x4_t __p0) { - int16x4_t __ret; - __ret = (int16x4_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_p8(poly8x16_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_p128(poly128_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_p64(poly64x2_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_p16(poly16x8_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_u8(uint8x16_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_u32(uint32x4_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_u64(uint64x2_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_u16(uint16x8_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_s8(int8x16_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_f64(float64x2_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_f32(float32x4_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_f16(float16x8_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); - return __ret; -} -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_s32(int32x4_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); + __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19); return __ret; } -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_s64(int64x2_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vtstq_p64(poly64x2_t __p0, poly64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); return __ret; } -__ai __attribute__((target("bf16"))) bfloat16x8_t vreinterpretq_bf16_s16(int16x8_t __p0) { - bfloat16x8_t __ret; - __ret = (bfloat16x8_t)(__p0); +#else +__ai __attribute__((target("neon"))) uint64x2_t vtstq_p64(poly64x2_t __p0, poly64x2_t __p1) { + uint64x2_t __ret; + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_p8(poly8x8_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vtstq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); return __ret; } -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_p64(poly64x1_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); +#else +__ai __attribute__((target("neon"))) uint64x2_t vtstq_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_p16(poly16x4_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vtstq_s64(int64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__p0, (int8x16_t)__p1, 51); return __ret; } -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_u8(uint8x8_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); +#else +__ai __attribute__((target("neon"))) uint64x2_t vtstq_s64(int64x2_t __p0, int64x2_t __p1) { + uint64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (uint64x2_t) __builtin_neon_vtstq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 51); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_u32(uint32x2_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); +#endif + +__ai __attribute__((target("neon"))) uint64x1_t vtst_u64(uint64x1_t __p0, uint64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19); return __ret; } -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_u64(uint64x1_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); +__ai __attribute__((target("neon"))) uint64x1_t vtst_s64(int64x1_t __p0, int64x1_t __p1) { + uint64x1_t __ret; + __ret = (uint64x1_t) __builtin_neon_vtst_v((int8x8_t)__p0, (int8x8_t)__p1, 19); return __ret; } -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_u16(uint16x4_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); +__ai __attribute__((target("neon"))) uint64_t vtstd_u64(uint64_t __p0, uint64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vtstd_u64(__p0, __p1); return __ret; } -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_s8(int8x8_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); +__ai __attribute__((target("neon"))) uint64_t vtstd_s64(int64_t __p0, int64_t __p1) { + uint64_t __ret; + __ret = (uint64_t) __builtin_neon_vtstd_s64(__p0, __p1); return __ret; } -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_f64(float64x1_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); +__ai __attribute__((target("neon"))) int8_t vuqaddb_s8(int8_t __p0, uint8_t __p1) { + int8_t __ret; + __ret = (int8_t) __builtin_neon_vuqaddb_s8(__p0, __p1); return __ret; } -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_f32(float32x2_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); +__ai __attribute__((target("neon"))) int32_t vuqadds_s32(int32_t __p0, uint32_t __p1) { + int32_t __ret; + __ret = (int32_t) __builtin_neon_vuqadds_s32(__p0, __p1); return __ret; } -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_f16(float16x4_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); +__ai __attribute__((target("neon"))) int64_t vuqaddd_s64(int64_t __p0, uint64_t __p1) { + int64_t __ret; + __ret = (int64_t) __builtin_neon_vuqaddd_s64(__p0, __p1); return __ret; } -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_s32(int32x2_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); +__ai __attribute__((target("neon"))) int16_t vuqaddh_s16(int16_t __p0, uint16_t __p1) { + int16_t __ret; + __ret = (int16_t) __builtin_neon_vuqaddh_s16(__p0, __p1); return __ret; } -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_s64(int64x1_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vuqaddq_s8(int8x16_t __p0, uint8x16_t __p1) { + int8x16_t __ret; + __ret = (int8x16_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 32); return __ret; } -__ai __attribute__((target("bf16"))) bfloat16x4_t vreinterpret_bf16_s16(int16x4_t __p0) { - bfloat16x4_t __ret; - __ret = (bfloat16x4_t)(__p0); +#else +__ai __attribute__((target("neon"))) int8x16_t vuqaddq_s8(int8x16_t __p0, uint8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x16_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 32); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -#ifdef __LITTLE_ENDIAN__ -#define vdotq_laneq_u32(__p0_747, __p1_747, __p2_747, __p3_747) __extension__ ({ \ - uint32x4_t __ret_747; \ - uint32x4_t __s0_747 = __p0_747; \ - uint8x16_t __s1_747 = __p1_747; \ - uint8x16_t __s2_747 = __p2_747; \ -uint8x16_t __reint_747 = __s2_747; \ -uint32x4_t __reint1_747 = splatq_laneq_u32(*(uint32x4_t *) &__reint_747, __p3_747); \ - __ret_747 = vdotq_u32(__s0_747, __s1_747, *(uint8x16_t *) &__reint1_747); \ - __ret_747; \ -}) -#else -#define vdotq_laneq_u32(__p0_748, __p1_748, __p2_748, __p3_748) __extension__ ({ \ - uint32x4_t __ret_748; \ - uint32x4_t __s0_748 = __p0_748; \ - uint8x16_t __s1_748 = __p1_748; \ - uint8x16_t __s2_748 = __p2_748; \ - uint32x4_t __rev0_748; __rev0_748 = __builtin_shufflevector(__s0_748, __s0_748, 3, 2, 1, 0); \ - uint8x16_t __rev1_748; __rev1_748 = __builtin_shufflevector(__s1_748, __s1_748, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16_t __rev2_748; __rev2_748 = __builtin_shufflevector(__s2_748, __s2_748, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ -uint8x16_t __reint_748 = __rev2_748; \ -uint32x4_t __reint1_748 = __noswap_splatq_laneq_u32(*(uint32x4_t *) &__reint_748, __p3_748); \ - __ret_748 = __noswap_vdotq_u32(__rev0_748, __rev1_748, *(uint8x16_t *) &__reint1_748); \ - __ret_748 = __builtin_shufflevector(__ret_748, __ret_748, 3, 2, 1, 0); \ - __ret_748; \ -}) #endif #ifdef __LITTLE_ENDIAN__ -#define vdotq_laneq_s32(__p0_749, __p1_749, __p2_749, __p3_749) __extension__ ({ \ - int32x4_t __ret_749; \ - int32x4_t __s0_749 = __p0_749; \ - int8x16_t __s1_749 = __p1_749; \ - int8x16_t __s2_749 = __p2_749; \ -int8x16_t __reint_749 = __s2_749; \ -int32x4_t __reint1_749 = splatq_laneq_s32(*(int32x4_t *) &__reint_749, __p3_749); \ - __ret_749 = vdotq_s32(__s0_749, __s1_749, *(int8x16_t *) &__reint1_749); \ - __ret_749; \ -}) +__ai __attribute__((target("neon"))) int32x4_t vuqaddq_s32(int32x4_t __p0, uint32x4_t __p1) { + int32x4_t __ret; + __ret = (int32x4_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 34); + return __ret; +} #else -#define vdotq_laneq_s32(__p0_750, __p1_750, __p2_750, __p3_750) __extension__ ({ \ - int32x4_t __ret_750; \ - int32x4_t __s0_750 = __p0_750; \ - int8x16_t __s1_750 = __p1_750; \ - int8x16_t __s2_750 = __p2_750; \ - int32x4_t __rev0_750; __rev0_750 = __builtin_shufflevector(__s0_750, __s0_750, 3, 2, 1, 0); \ - int8x16_t __rev1_750; __rev1_750 = __builtin_shufflevector(__s1_750, __s1_750, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __rev2_750; __rev2_750 = __builtin_shufflevector(__s2_750, __s2_750, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ -int8x16_t __reint_750 = __rev2_750; \ -int32x4_t __reint1_750 = __noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_750, __p3_750); \ - __ret_750 = __noswap_vdotq_s32(__rev0_750, __rev1_750, *(int8x16_t *) &__reint1_750); \ - __ret_750 = __builtin_shufflevector(__ret_750, __ret_750, 3, 2, 1, 0); \ - __ret_750; \ -}) +__ai __attribute__((target("neon"))) int32x4_t vuqaddq_s32(int32x4_t __p0, uint32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int32x4_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 34); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vdot_laneq_u32(__p0_751, __p1_751, __p2_751, __p3_751) __extension__ ({ \ - uint32x2_t __ret_751; \ - uint32x2_t __s0_751 = __p0_751; \ - uint8x8_t __s1_751 = __p1_751; \ - uint8x16_t __s2_751 = __p2_751; \ -uint8x16_t __reint_751 = __s2_751; \ -uint32x2_t __reint1_751 = splat_laneq_u32(*(uint32x4_t *) &__reint_751, __p3_751); \ - __ret_751 = vdot_u32(__s0_751, __s1_751, *(uint8x8_t *) &__reint1_751); \ - __ret_751; \ -}) +__ai __attribute__((target("neon"))) int64x2_t vuqaddq_s64(int64x2_t __p0, uint64x2_t __p1) { + int64x2_t __ret; + __ret = (int64x2_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 35); + return __ret; +} #else -#define vdot_laneq_u32(__p0_752, __p1_752, __p2_752, __p3_752) __extension__ ({ \ - uint32x2_t __ret_752; \ - uint32x2_t __s0_752 = __p0_752; \ - uint8x8_t __s1_752 = __p1_752; \ - uint8x16_t __s2_752 = __p2_752; \ - uint32x2_t __rev0_752; __rev0_752 = __builtin_shufflevector(__s0_752, __s0_752, 1, 0); \ - uint8x8_t __rev1_752; __rev1_752 = __builtin_shufflevector(__s1_752, __s1_752, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16_t __rev2_752; __rev2_752 = __builtin_shufflevector(__s2_752, __s2_752, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ -uint8x16_t __reint_752 = __rev2_752; \ -uint32x2_t __reint1_752 = __noswap_splat_laneq_u32(*(uint32x4_t *) &__reint_752, __p3_752); \ - __ret_752 = __noswap_vdot_u32(__rev0_752, __rev1_752, *(uint8x8_t *) &__reint1_752); \ - __ret_752 = __builtin_shufflevector(__ret_752, __ret_752, 1, 0); \ - __ret_752; \ -}) +__ai __attribute__((target("neon"))) int64x2_t vuqaddq_s64(int64x2_t __p0, uint64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int64x2_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 35); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vdot_laneq_s32(__p0_753, __p1_753, __p2_753, __p3_753) __extension__ ({ \ - int32x2_t __ret_753; \ - int32x2_t __s0_753 = __p0_753; \ - int8x8_t __s1_753 = __p1_753; \ - int8x16_t __s2_753 = __p2_753; \ -int8x16_t __reint_753 = __s2_753; \ -int32x2_t __reint1_753 = splat_laneq_s32(*(int32x4_t *) &__reint_753, __p3_753); \ - __ret_753 = vdot_s32(__s0_753, __s1_753, *(int8x8_t *) &__reint1_753); \ - __ret_753; \ -}) +__ai __attribute__((target("neon"))) int16x8_t vuqaddq_s16(int16x8_t __p0, uint16x8_t __p1) { + int16x8_t __ret; + __ret = (int16x8_t) __builtin_neon_vuqaddq_v((int8x16_t)__p0, (int8x16_t)__p1, 33); + return __ret; +} #else -#define vdot_laneq_s32(__p0_754, __p1_754, __p2_754, __p3_754) __extension__ ({ \ - int32x2_t __ret_754; \ - int32x2_t __s0_754 = __p0_754; \ - int8x8_t __s1_754 = __p1_754; \ - int8x16_t __s2_754 = __p2_754; \ - int32x2_t __rev0_754; __rev0_754 = __builtin_shufflevector(__s0_754, __s0_754, 1, 0); \ - int8x8_t __rev1_754; __rev1_754 = __builtin_shufflevector(__s1_754, __s1_754, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __rev2_754; __rev2_754 = __builtin_shufflevector(__s2_754, __s2_754, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ -int8x16_t __reint_754 = __rev2_754; \ -int32x2_t __reint1_754 = __noswap_splat_laneq_s32(*(int32x4_t *) &__reint_754, __p3_754); \ - __ret_754 = __noswap_vdot_s32(__rev0_754, __rev1_754, *(int8x8_t *) &__reint1_754); \ - __ret_754 = __builtin_shufflevector(__ret_754, __ret_754, 1, 0); \ - __ret_754; \ -}) +__ai __attribute__((target("neon"))) int16x8_t vuqaddq_s16(int16x8_t __p0, uint16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int16x8_t) __builtin_neon_vuqaddq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 33); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fp16fml"))) float32x4_t vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vfmlalq_high_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); +__ai __attribute__((target("neon"))) int8x8_t vuqadd_s8(int8x8_t __p0, uint8x8_t __p1) { + int8x8_t __ret; + __ret = (int8x8_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 0); return __ret; } #else -__ai __attribute__((target("fp16fml"))) float32x4_t vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vfmlalq_high_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} -__ai __attribute__((target("fp16fml"))) float32x4_t __noswap_vfmlalq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vfmlalq_high_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); +__ai __attribute__((target("neon"))) int8x8_t vuqadd_s8(int8x8_t __p0, uint8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = (int8x8_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 0); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fp16fml"))) float32x2_t vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vfmlal_high_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); +__ai __attribute__((target("neon"))) int32x2_t vuqadd_s32(int32x2_t __p0, uint32x2_t __p1) { + int32x2_t __ret; + __ret = (int32x2_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 2); return __ret; } #else -__ai __attribute__((target("fp16fml"))) float32x2_t vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (float32x2_t) __builtin_neon_vfmlal_high_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); +__ai __attribute__((target("neon"))) int32x2_t vuqadd_s32(int32x2_t __p0, uint32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = (int32x2_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai __attribute__((target("fp16fml"))) float32x2_t __noswap_vfmlal_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vfmlal_high_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); - return __ret; -} #endif +__ai __attribute__((target("neon"))) int64x1_t vuqadd_s64(int64x1_t __p0, uint64x1_t __p1) { + int64x1_t __ret; + __ret = (int64x1_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 3); + return __ret; +} #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fp16fml"))) float32x4_t vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vfmlalq_low_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); +__ai __attribute__((target("neon"))) int16x4_t vuqadd_s16(int16x4_t __p0, uint16x4_t __p1) { + int16x4_t __ret; + __ret = (int16x4_t) __builtin_neon_vuqadd_v((int8x8_t)__p0, (int8x8_t)__p1, 1); return __ret; } #else -__ai __attribute__((target("fp16fml"))) float32x4_t vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vfmlalq_low_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); +__ai __attribute__((target("neon"))) int16x4_t vuqadd_s16(int16x4_t __p0, uint16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = (int16x4_t) __builtin_neon_vuqadd_v((int8x8_t)__rev0, (int8x8_t)__rev1, 1); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai __attribute__((target("fp16fml"))) float32x4_t __noswap_vfmlalq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vfmlalq_low_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); - return __ret; -} #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fp16fml"))) float32x2_t vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vfmlal_low_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); +__ai __attribute__((target("neon"))) poly8x8_t vuzp1_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); return __ret; } #else -__ai __attribute__((target("fp16fml"))) float32x2_t vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (float32x2_t) __builtin_neon_vfmlal_low_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} -__ai __attribute__((target("fp16fml"))) float32x2_t __noswap_vfmlal_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vfmlal_low_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); +__ai __attribute__((target("neon"))) poly8x8_t vuzp1_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fp16fml"))) float32x4_t vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vfmlslq_high_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); +__ai __attribute__((target("neon"))) poly16x4_t vuzp1_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); return __ret; } #else -__ai __attribute__((target("fp16fml"))) float32x4_t vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vfmlslq_high_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); +__ai __attribute__((target("neon"))) poly16x4_t vuzp1_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai __attribute__((target("fp16fml"))) float32x4_t __noswap_vfmlslq_high_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vfmlslq_high_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly8x16_t vuzp1q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly8x16_t vuzp1q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fp16fml"))) float32x2_t vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vfmlsl_high_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); +__ai __attribute__((target("neon"))) poly64x2_t vuzp1q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else -__ai __attribute__((target("fp16fml"))) float32x2_t vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (float32x2_t) __builtin_neon_vfmlsl_high_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); +__ai __attribute__((target("neon"))) poly64x2_t vuzp1q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai __attribute__((target("fp16fml"))) float32x2_t __noswap_vfmlsl_high_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vfmlsl_high_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) poly16x8_t vuzp1q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); + return __ret; +} +#else +__ai __attribute__((target("neon"))) poly16x8_t vuzp1q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fp16fml"))) float32x4_t vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vfmlslq_low_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); +__ai __attribute__((target("neon"))) uint8x16_t vuzp1q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); return __ret; } #else -__ai __attribute__((target("fp16fml"))) float32x4_t vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { - float32x4_t __ret; - float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float32x4_t) __builtin_neon_vfmlslq_low_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, 41); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint8x16_t vuzp1q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai __attribute__((target("fp16fml"))) float32x4_t __noswap_vfmlslq_low_f16(float32x4_t __p0, float16x8_t __p1, float16x8_t __p2) { - float32x4_t __ret; - __ret = (float32x4_t) __builtin_neon_vfmlslq_low_f16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 41); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vuzp1q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vuzp1q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fp16fml"))) float32x2_t vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vfmlsl_low_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); +__ai __attribute__((target("neon"))) uint64x2_t vuzp1q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else -__ai __attribute__((target("fp16fml"))) float32x2_t vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { - float32x2_t __ret; - float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - float16x4_t __rev2; __rev2 = __builtin_shufflevector(__p2, __p2, 3, 2, 1, 0); - __ret = (float32x2_t) __builtin_neon_vfmlsl_low_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, 9); +__ai __attribute__((target("neon"))) uint64x2_t vuzp1q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai __attribute__((target("fp16fml"))) float32x2_t __noswap_vfmlsl_low_f16(float32x2_t __p0, float16x4_t __p1, float16x4_t __p2) { - float32x2_t __ret; - __ret = (float32x2_t) __builtin_neon_vfmlsl_low_f16((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 9); - return __ret; -} #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vdivq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = __p0 / __p1; +__ai __attribute__((target("neon"))) uint16x8_t vuzp1q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x8_t vdivq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __rev0 / __rev1; +__ai __attribute__((target("neon"))) uint16x8_t vuzp1q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vdiv_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = __p0 / __p1; +__ai __attribute__((target("neon"))) int8x16_t vuzp1q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x4_t vdiv_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __rev0 / __rev1; - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int8x16_t vuzp1q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -#define vduph_lane_f16(__p0, __p1) __extension__ ({ \ - float16_t __ret; \ - float16x4_t __s0 = __p0; \ - __ret = (float16_t) __builtin_neon_vduph_lane_f16((float16x4_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float64x2_t vuzp1q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} #else -#define vduph_lane_f16(__p0, __p1) __extension__ ({ \ - float16_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (float16_t) __builtin_neon_vduph_lane_f16((float16x4_t)__rev0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float64x2_t vuzp1q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vduph_laneq_f16(__p0, __p1) __extension__ ({ \ - float16_t __ret; \ - float16x8_t __s0 = __p0; \ - __ret = (float16_t) __builtin_neon_vduph_laneq_f16((float16x8_t)__s0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float32x4_t vuzp1q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); + return __ret; +} #else -#define vduph_laneq_f16(__p0, __p1) __extension__ ({ \ - float16_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (float16_t) __builtin_neon_vduph_laneq_f16((float16x8_t)__rev0, __p1); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float32x4_t vuzp1q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16_t __ret; \ - float16_t __s0 = __p0; \ - float16_t __s1 = __p1; \ - float16x4_t __s2 = __p2; \ - __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__s2, __p3); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x4_t vuzp1q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); + return __ret; +} #else -#define vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16_t __ret; \ - float16_t __s0 = __p0; \ - float16_t __s1 = __p1; \ - float16x4_t __s2 = __p2; \ - float16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__rev2, __p3); \ - __ret; \ -}) -#define __noswap_vfmah_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16_t __ret; \ - float16_t __s0 = __p0; \ - float16_t __s1 = __p1; \ - float16x4_t __s2 = __p2; \ - __ret = (float16_t) __builtin_neon_vfmah_lane_f16(__s0, __s1, (float16x4_t)__s2, __p3); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x4_t vuzp1q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16x4_t __s2 = __p2; \ - __ret = (float16x8_t) __builtin_neon_vfmaq_lane_f16((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 40); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int64x2_t vuzp1q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} #else -#define vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16x4_t __s2 = __p2; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - __ret = (float16x8_t) __builtin_neon_vfmaq_lane_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x8_t)__rev2, __p3, 40); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vfmaq_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16x4_t __s2 = __p2; \ - __ret = (float16x8_t) __builtin_neon_vfmaq_lane_f16((int8x16_t)__s0, (int8x16_t)__s1, (int8x8_t)__s2, __p3, 40); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int64x2_t vuzp1q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16x4_t __s2 = __p2; \ - __ret = (float16x4_t) __builtin_neon_vfma_lane_f16((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 8); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x8_t vuzp1q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); + return __ret; +} #else -#define vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16x4_t __s2 = __p2; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - float16x4_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 3, 2, 1, 0); \ - __ret = (float16x4_t) __builtin_neon_vfma_lane_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x8_t)__rev2, __p3, 8); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vfma_lane_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16x4_t __s2 = __p2; \ - __ret = (float16x4_t) __builtin_neon_vfma_lane_f16((int8x8_t)__s0, (int8x8_t)__s1, (int8x8_t)__s2, __p3, 8); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int16x8_t vuzp1q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vuzp1_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vuzp1_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16_t __ret; \ - float16_t __s0 = __p0; \ - float16_t __s1 = __p1; \ - float16x8_t __s2 = __p2; \ - __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__s2, __p3); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vuzp1_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} #else -#define vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16_t __ret; \ - float16_t __s0 = __p0; \ - float16_t __s1 = __p1; \ - float16x8_t __s2 = __p2; \ - float16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__rev2, __p3); \ - __ret; \ -}) -#define __noswap_vfmah_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16_t __ret; \ - float16_t __s0 = __p0; \ - float16_t __s1 = __p1; \ - float16x8_t __s2 = __p2; \ - __ret = (float16_t) __builtin_neon_vfmah_laneq_f16(__s0, __s1, (float16x8_t)__s2, __p3); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x2_t vuzp1_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16x8_t __s2 = __p2; \ - __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_f16((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 40); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x4_t vuzp1_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); + return __ret; +} #else -#define vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16x8_t __s2 = __p2; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, (int8x16_t)__rev2, __p3, 40); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vfmaq_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16x8_t __s2 = __p2; \ - __ret = (float16x8_t) __builtin_neon_vfmaq_laneq_f16((int8x16_t)__s0, (int8x16_t)__s1, (int8x16_t)__s2, __p3, 40); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x4_t vuzp1_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16x8_t __s2 = __p2; \ - __ret = (float16x4_t) __builtin_neon_vfma_laneq_f16((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 8); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x8_t vuzp1_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); + return __ret; +} #else -#define vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16x8_t __s2 = __p2; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - float16x8_t __rev2; __rev2 = __builtin_shufflevector(__s2, __s2, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (float16x4_t) __builtin_neon_vfma_laneq_f16((int8x8_t)__rev0, (int8x8_t)__rev1, (int8x16_t)__rev2, __p3, 8); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) -#define __noswap_vfma_laneq_f16(__p0, __p1, __p2, __p3) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16x8_t __s2 = __p2; \ - __ret = (float16x4_t) __builtin_neon_vfma_laneq_f16((int8x8_t)__s0, (int8x8_t)__s1, (int8x16_t)__s2, __p3, 8); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x8_t vuzp1_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vfmaq_n_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16_t __s2 = __p2; \ - __ret = vfmaq_f16(__s0, __s1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float32x2_t vuzp1_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} #else -#define vfmaq_n_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16_t __s2 = __p2; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = __noswap_vfmaq_f16(__rev0, __rev1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float32x2_t vuzp1_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vfma_n_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16_t __s2 = __p2; \ - __ret = vfma_f16(__s0, __s1, (float16x4_t) {__s2, __s2, __s2, __s2}); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x2_t vuzp1_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); + return __ret; +} #else -#define vfma_n_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16_t __s2 = __p2; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = __noswap_vfma_f16(__rev0, __rev1, (float16x4_t) {__s2, __s2, __s2, __s2}); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x2_t vuzp1_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vfmsh_lane_f16(__p0_755, __p1_755, __p2_755, __p3_755) __extension__ ({ \ - float16_t __ret_755; \ - float16_t __s0_755 = __p0_755; \ - float16_t __s1_755 = __p1_755; \ - float16x4_t __s2_755 = __p2_755; \ - __ret_755 = vfmah_lane_f16(__s0_755, -__s1_755, __s2_755, __p3_755); \ - __ret_755; \ -}) +__ai __attribute__((target("neon"))) int16x4_t vuzp1_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); + return __ret; +} #else -#define vfmsh_lane_f16(__p0_756, __p1_756, __p2_756, __p3_756) __extension__ ({ \ - float16_t __ret_756; \ - float16_t __s0_756 = __p0_756; \ - float16_t __s1_756 = __p1_756; \ - float16x4_t __s2_756 = __p2_756; \ - float16x4_t __rev2_756; __rev2_756 = __builtin_shufflevector(__s2_756, __s2_756, 3, 2, 1, 0); \ - __ret_756 = __noswap_vfmah_lane_f16(__s0_756, -__s1_756, __rev2_756, __p3_756); \ - __ret_756; \ -}) +__ai __attribute__((target("neon"))) int16x4_t vuzp1_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vfmsq_lane_f16(__p0_757, __p1_757, __p2_757, __p3_757) __extension__ ({ \ - float16x8_t __ret_757; \ - float16x8_t __s0_757 = __p0_757; \ - float16x8_t __s1_757 = __p1_757; \ - float16x4_t __s2_757 = __p2_757; \ - __ret_757 = vfmaq_lane_f16(__s0_757, -__s1_757, __s2_757, __p3_757); \ - __ret_757; \ -}) +__ai __attribute__((target("neon"))) float16x8_t vuzp1q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); + return __ret; +} #else -#define vfmsq_lane_f16(__p0_758, __p1_758, __p2_758, __p3_758) __extension__ ({ \ - float16x8_t __ret_758; \ - float16x8_t __s0_758 = __p0_758; \ - float16x8_t __s1_758 = __p1_758; \ - float16x4_t __s2_758 = __p2_758; \ - float16x8_t __rev0_758; __rev0_758 = __builtin_shufflevector(__s0_758, __s0_758, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1_758; __rev1_758 = __builtin_shufflevector(__s1_758, __s1_758, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x4_t __rev2_758; __rev2_758 = __builtin_shufflevector(__s2_758, __s2_758, 3, 2, 1, 0); \ - __ret_758 = __noswap_vfmaq_lane_f16(__rev0_758, -__rev1_758, __rev2_758, __p3_758); \ - __ret_758 = __builtin_shufflevector(__ret_758, __ret_758, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_758; \ -}) +__ai __attribute__((target("neon"))) float16x8_t vuzp1q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vfms_lane_f16(__p0_759, __p1_759, __p2_759, __p3_759) __extension__ ({ \ - float16x4_t __ret_759; \ - float16x4_t __s0_759 = __p0_759; \ - float16x4_t __s1_759 = __p1_759; \ - float16x4_t __s2_759 = __p2_759; \ - __ret_759 = vfma_lane_f16(__s0_759, -__s1_759, __s2_759, __p3_759); \ - __ret_759; \ -}) +__ai __attribute__((target("neon"))) float16x4_t vuzp1_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); + return __ret; +} #else -#define vfms_lane_f16(__p0_760, __p1_760, __p2_760, __p3_760) __extension__ ({ \ - float16x4_t __ret_760; \ - float16x4_t __s0_760 = __p0_760; \ - float16x4_t __s1_760 = __p1_760; \ - float16x4_t __s2_760 = __p2_760; \ - float16x4_t __rev0_760; __rev0_760 = __builtin_shufflevector(__s0_760, __s0_760, 3, 2, 1, 0); \ - float16x4_t __rev1_760; __rev1_760 = __builtin_shufflevector(__s1_760, __s1_760, 3, 2, 1, 0); \ - float16x4_t __rev2_760; __rev2_760 = __builtin_shufflevector(__s2_760, __s2_760, 3, 2, 1, 0); \ - __ret_760 = __noswap_vfma_lane_f16(__rev0_760, -__rev1_760, __rev2_760, __p3_760); \ - __ret_760 = __builtin_shufflevector(__ret_760, __ret_760, 3, 2, 1, 0); \ - __ret_760; \ -}) +__ai __attribute__((target("neon"))) float16x4_t vuzp1_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vfmsh_laneq_f16(__p0_761, __p1_761, __p2_761, __p3_761) __extension__ ({ \ - float16_t __ret_761; \ - float16_t __s0_761 = __p0_761; \ - float16_t __s1_761 = __p1_761; \ - float16x8_t __s2_761 = __p2_761; \ - __ret_761 = vfmah_laneq_f16(__s0_761, -__s1_761, __s2_761, __p3_761); \ - __ret_761; \ -}) +__ai __attribute__((target("neon"))) poly8x8_t vuzp2_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); + return __ret; +} #else -#define vfmsh_laneq_f16(__p0_762, __p1_762, __p2_762, __p3_762) __extension__ ({ \ - float16_t __ret_762; \ - float16_t __s0_762 = __p0_762; \ - float16_t __s1_762 = __p1_762; \ - float16x8_t __s2_762 = __p2_762; \ - float16x8_t __rev2_762; __rev2_762 = __builtin_shufflevector(__s2_762, __s2_762, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_762 = __noswap_vfmah_laneq_f16(__s0_762, -__s1_762, __rev2_762, __p3_762); \ - __ret_762; \ -}) +__ai __attribute__((target("neon"))) poly8x8_t vuzp2_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vfmsq_laneq_f16(__p0_763, __p1_763, __p2_763, __p3_763) __extension__ ({ \ - float16x8_t __ret_763; \ - float16x8_t __s0_763 = __p0_763; \ - float16x8_t __s1_763 = __p1_763; \ - float16x8_t __s2_763 = __p2_763; \ - __ret_763 = vfmaq_laneq_f16(__s0_763, -__s1_763, __s2_763, __p3_763); \ - __ret_763; \ -}) +__ai __attribute__((target("neon"))) poly16x4_t vuzp2_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); + return __ret; +} #else -#define vfmsq_laneq_f16(__p0_764, __p1_764, __p2_764, __p3_764) __extension__ ({ \ - float16x8_t __ret_764; \ - float16x8_t __s0_764 = __p0_764; \ - float16x8_t __s1_764 = __p1_764; \ - float16x8_t __s2_764 = __p2_764; \ - float16x8_t __rev0_764; __rev0_764 = __builtin_shufflevector(__s0_764, __s0_764, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1_764; __rev1_764 = __builtin_shufflevector(__s1_764, __s1_764, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev2_764; __rev2_764 = __builtin_shufflevector(__s2_764, __s2_764, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_764 = __noswap_vfmaq_laneq_f16(__rev0_764, -__rev1_764, __rev2_764, __p3_764); \ - __ret_764 = __builtin_shufflevector(__ret_764, __ret_764, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_764; \ -}) +__ai __attribute__((target("neon"))) poly16x4_t vuzp2_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vfms_laneq_f16(__p0_765, __p1_765, __p2_765, __p3_765) __extension__ ({ \ - float16x4_t __ret_765; \ - float16x4_t __s0_765 = __p0_765; \ - float16x4_t __s1_765 = __p1_765; \ - float16x8_t __s2_765 = __p2_765; \ - __ret_765 = vfma_laneq_f16(__s0_765, -__s1_765, __s2_765, __p3_765); \ - __ret_765; \ -}) +__ai __attribute__((target("neon"))) poly8x16_t vuzp2q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); + return __ret; +} #else -#define vfms_laneq_f16(__p0_766, __p1_766, __p2_766, __p3_766) __extension__ ({ \ - float16x4_t __ret_766; \ - float16x4_t __s0_766 = __p0_766; \ - float16x4_t __s1_766 = __p1_766; \ - float16x8_t __s2_766 = __p2_766; \ - float16x4_t __rev0_766; __rev0_766 = __builtin_shufflevector(__s0_766, __s0_766, 3, 2, 1, 0); \ - float16x4_t __rev1_766; __rev1_766 = __builtin_shufflevector(__s1_766, __s1_766, 3, 2, 1, 0); \ - float16x8_t __rev2_766; __rev2_766 = __builtin_shufflevector(__s2_766, __s2_766, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_766 = __noswap_vfma_laneq_f16(__rev0_766, -__rev1_766, __rev2_766, __p3_766); \ - __ret_766 = __builtin_shufflevector(__ret_766, __ret_766, 3, 2, 1, 0); \ - __ret_766; \ -}) +__ai __attribute__((target("neon"))) poly8x16_t vuzp2q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vfmsq_n_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16_t __s2 = __p2; \ - __ret = vfmaq_f16(__s0, -__s1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) poly64x2_t vuzp2q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} #else -#define vfmsq_n_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16_t __s2 = __p2; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = __noswap_vfmaq_f16(__rev0, -__rev1, (float16x8_t) {__s2, __s2, __s2, __s2, __s2, __s2, __s2, __s2}); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) poly64x2_t vuzp2q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vfms_n_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16_t __s2 = __p2; \ - __ret = vfma_f16(__s0, -__s1, (float16x4_t) {__s2, __s2, __s2, __s2}); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) poly16x8_t vuzp2q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); + return __ret; +} #else -#define vfms_n_f16(__p0, __p1, __p2) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16_t __s2 = __p2; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = __noswap_vfma_f16(__rev0, -__rev1, (float16x4_t) {__s2, __s2, __s2, __s2}); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) poly16x8_t vuzp2q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vmaxnmvq_f16(__p0) __extension__ ({ \ - float16_t __ret; \ - float16x8_t __s0 = __p0; \ - __ret = (float16_t) __builtin_neon_vmaxnmvq_f16((int8x16_t)__s0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x16_t vuzp2q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); + return __ret; +} #else -#define vmaxnmvq_f16(__p0) __extension__ ({ \ - float16_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (float16_t) __builtin_neon_vmaxnmvq_f16((int8x16_t)__rev0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint8x16_t vuzp2q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vmaxnmv_f16(__p0) __extension__ ({ \ - float16_t __ret; \ - float16x4_t __s0 = __p0; \ - __ret = (float16_t) __builtin_neon_vmaxnmv_f16((int8x8_t)__s0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vuzp2q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); + return __ret; +} #else -#define vmaxnmv_f16(__p0) __extension__ ({ \ - float16_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (float16_t) __builtin_neon_vmaxnmv_f16((int8x8_t)__rev0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint32x4_t vuzp2q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vmaxvq_f16(__p0) __extension__ ({ \ - float16_t __ret; \ - float16x8_t __s0 = __p0; \ - __ret = (float16_t) __builtin_neon_vmaxvq_f16((int8x16_t)__s0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vuzp2q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} #else -#define vmaxvq_f16(__p0) __extension__ ({ \ - float16_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (float16_t) __builtin_neon_vmaxvq_f16((int8x16_t)__rev0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint64x2_t vuzp2q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vmaxv_f16(__p0) __extension__ ({ \ - float16_t __ret; \ - float16x4_t __s0 = __p0; \ - __ret = (float16_t) __builtin_neon_vmaxv_f16((int8x8_t)__s0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x8_t vuzp2q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); + return __ret; +} #else -#define vmaxv_f16(__p0) __extension__ ({ \ - float16_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (float16_t) __builtin_neon_vmaxv_f16((int8x8_t)__rev0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) uint16x8_t vuzp2q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vminnmvq_f16(__p0) __extension__ ({ \ - float16_t __ret; \ - float16x8_t __s0 = __p0; \ - __ret = (float16_t) __builtin_neon_vminnmvq_f16((int8x16_t)__s0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x16_t vuzp2q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); + return __ret; +} #else -#define vminnmvq_f16(__p0) __extension__ ({ \ - float16_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (float16_t) __builtin_neon_vminnmvq_f16((int8x16_t)__rev0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int8x16_t vuzp2q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vminnmv_f16(__p0) __extension__ ({ \ - float16_t __ret; \ - float16x4_t __s0 = __p0; \ - __ret = (float16_t) __builtin_neon_vminnmv_f16((int8x8_t)__s0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float64x2_t vuzp2q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} #else -#define vminnmv_f16(__p0) __extension__ ({ \ - float16_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (float16_t) __builtin_neon_vminnmv_f16((int8x8_t)__rev0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float64x2_t vuzp2q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vminvq_f16(__p0) __extension__ ({ \ - float16_t __ret; \ - float16x8_t __s0 = __p0; \ - __ret = (float16_t) __builtin_neon_vminvq_f16((int8x16_t)__s0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float32x4_t vuzp2q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); + return __ret; +} #else -#define vminvq_f16(__p0) __extension__ ({ \ - float16_t __ret; \ - float16x8_t __s0 = __p0; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (float16_t) __builtin_neon_vminvq_f16((int8x16_t)__rev0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float32x4_t vuzp2q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vminv_f16(__p0) __extension__ ({ \ - float16_t __ret; \ - float16x4_t __s0 = __p0; \ - __ret = (float16_t) __builtin_neon_vminv_f16((int8x8_t)__s0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x4_t vuzp2q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); + return __ret; +} #else -#define vminv_f16(__p0) __extension__ ({ \ - float16_t __ret; \ - float16x4_t __s0 = __p0; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = (float16_t) __builtin_neon_vminv_f16((int8x8_t)__rev0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) int32x4_t vuzp2q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vmulq_laneq_f16(__p0_767, __p1_767, __p2_767) __extension__ ({ \ - float16x8_t __ret_767; \ - float16x8_t __s0_767 = __p0_767; \ - float16x8_t __s1_767 = __p1_767; \ - __ret_767 = __s0_767 * splatq_laneq_f16(__s1_767, __p2_767); \ - __ret_767; \ -}) +__ai __attribute__((target("neon"))) int64x2_t vuzp2q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} #else -#define vmulq_laneq_f16(__p0_768, __p1_768, __p2_768) __extension__ ({ \ - float16x8_t __ret_768; \ - float16x8_t __s0_768 = __p0_768; \ - float16x8_t __s1_768 = __p1_768; \ - float16x8_t __rev0_768; __rev0_768 = __builtin_shufflevector(__s0_768, __s0_768, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1_768; __rev1_768 = __builtin_shufflevector(__s1_768, __s1_768, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_768 = __rev0_768 * __noswap_splatq_laneq_f16(__rev1_768, __p2_768); \ - __ret_768 = __builtin_shufflevector(__ret_768, __ret_768, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_768; \ -}) +__ai __attribute__((target("neon"))) int64x2_t vuzp2q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vmul_laneq_f16(__p0_769, __p1_769, __p2_769) __extension__ ({ \ - float16x4_t __ret_769; \ - float16x4_t __s0_769 = __p0_769; \ - float16x8_t __s1_769 = __p1_769; \ - __ret_769 = __s0_769 * splat_laneq_f16(__s1_769, __p2_769); \ - __ret_769; \ -}) +__ai __attribute__((target("neon"))) int16x8_t vuzp2q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); + return __ret; +} #else -#define vmul_laneq_f16(__p0_770, __p1_770, __p2_770) __extension__ ({ \ - float16x4_t __ret_770; \ - float16x4_t __s0_770 = __p0_770; \ - float16x8_t __s1_770 = __p1_770; \ - float16x4_t __rev0_770; __rev0_770 = __builtin_shufflevector(__s0_770, __s0_770, 3, 2, 1, 0); \ - float16x8_t __rev1_770; __rev1_770 = __builtin_shufflevector(__s1_770, __s1_770, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_770 = __rev0_770 * __noswap_splat_laneq_f16(__rev1_770, __p2_770); \ - __ret_770 = __builtin_shufflevector(__ret_770, __ret_770, 3, 2, 1, 0); \ - __ret_770; \ -}) +__ai __attribute__((target("neon"))) int16x8_t vuzp2q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vmulxq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vmulxq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); +__ai __attribute__((target("neon"))) uint8x8_t vuzp2_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x8_t vmulxq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vmulxq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); +__ai __attribute__((target("neon"))) uint8x8_t vuzp2_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai __attribute__((target("fullfp16"))) float16x8_t __noswap_vmulxq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vmulxq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vuzp2_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vuzp2_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vmulx_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vmulx_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); +__ai __attribute__((target("neon"))) uint16x4_t vuzp2_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x4_t vmulx_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vmulx_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); +__ai __attribute__((target("neon"))) uint16x4_t vuzp2_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai __attribute__((target("fullfp16"))) float16x4_t __noswap_vmulx_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vmulx_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vuzp2_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vuzp2_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -#define vmulxh_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16_t __ret; \ - float16_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - __ret = (float16_t) __builtin_neon_vmulxh_lane_f16(__s0, (float16x4_t)__s1, __p2); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float32x2_t vuzp2_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} #else -#define vmulxh_lane_f16(__p0, __p1, __p2) __extension__ ({ \ - float16_t __ret; \ - float16_t __s0 = __p0; \ - float16x4_t __s1 = __p1; \ - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 3, 2, 1, 0); \ - __ret = (float16_t) __builtin_neon_vmulxh_lane_f16(__s0, (float16x4_t)__rev1, __p2); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float32x2_t vuzp2_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vmulxq_lane_f16(__p0_771, __p1_771, __p2_771) __extension__ ({ \ - float16x8_t __ret_771; \ - float16x8_t __s0_771 = __p0_771; \ - float16x4_t __s1_771 = __p1_771; \ - __ret_771 = vmulxq_f16(__s0_771, splatq_lane_f16(__s1_771, __p2_771)); \ - __ret_771; \ -}) +__ai __attribute__((target("neon"))) int32x2_t vuzp2_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} #else -#define vmulxq_lane_f16(__p0_772, __p1_772, __p2_772) __extension__ ({ \ - float16x8_t __ret_772; \ - float16x8_t __s0_772 = __p0_772; \ - float16x4_t __s1_772 = __p1_772; \ - float16x8_t __rev0_772; __rev0_772 = __builtin_shufflevector(__s0_772, __s0_772, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x4_t __rev1_772; __rev1_772 = __builtin_shufflevector(__s1_772, __s1_772, 3, 2, 1, 0); \ - __ret_772 = __noswap_vmulxq_f16(__rev0_772, __noswap_splatq_lane_f16(__rev1_772, __p2_772)); \ - __ret_772 = __builtin_shufflevector(__ret_772, __ret_772, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_772; \ -}) +__ai __attribute__((target("neon"))) int32x2_t vuzp2_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vmulx_lane_f16(__p0_773, __p1_773, __p2_773) __extension__ ({ \ - float16x4_t __ret_773; \ - float16x4_t __s0_773 = __p0_773; \ - float16x4_t __s1_773 = __p1_773; \ - __ret_773 = vmulx_f16(__s0_773, splat_lane_f16(__s1_773, __p2_773)); \ - __ret_773; \ -}) +__ai __attribute__((target("neon"))) int16x4_t vuzp2_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); + return __ret; +} #else -#define vmulx_lane_f16(__p0_774, __p1_774, __p2_774) __extension__ ({ \ - float16x4_t __ret_774; \ - float16x4_t __s0_774 = __p0_774; \ - float16x4_t __s1_774 = __p1_774; \ - float16x4_t __rev0_774; __rev0_774 = __builtin_shufflevector(__s0_774, __s0_774, 3, 2, 1, 0); \ - float16x4_t __rev1_774; __rev1_774 = __builtin_shufflevector(__s1_774, __s1_774, 3, 2, 1, 0); \ - __ret_774 = __noswap_vmulx_f16(__rev0_774, __noswap_splat_lane_f16(__rev1_774, __p2_774)); \ - __ret_774 = __builtin_shufflevector(__ret_774, __ret_774, 3, 2, 1, 0); \ - __ret_774; \ -}) +__ai __attribute__((target("neon"))) int16x4_t vuzp2_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vmulxh_laneq_f16(__p0, __p1, __p2) __extension__ ({ \ - float16_t __ret; \ - float16_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - __ret = (float16_t) __builtin_neon_vmulxh_laneq_f16(__s0, (float16x8_t)__s1, __p2); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float16x8_t vuzp2q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); + return __ret; +} #else -#define vmulxh_laneq_f16(__p0, __p1, __p2) __extension__ ({ \ - float16_t __ret; \ - float16_t __s0 = __p0; \ - float16x8_t __s1 = __p1; \ - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__s1, __s1, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = (float16_t) __builtin_neon_vmulxh_laneq_f16(__s0, (float16x8_t)__rev1, __p2); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) float16x8_t vuzp2q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vmulxq_laneq_f16(__p0_775, __p1_775, __p2_775) __extension__ ({ \ - float16x8_t __ret_775; \ - float16x8_t __s0_775 = __p0_775; \ - float16x8_t __s1_775 = __p1_775; \ - __ret_775 = vmulxq_f16(__s0_775, splatq_laneq_f16(__s1_775, __p2_775)); \ - __ret_775; \ -}) +__ai __attribute__((target("neon"))) float16x4_t vuzp2_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); + return __ret; +} #else -#define vmulxq_laneq_f16(__p0_776, __p1_776, __p2_776) __extension__ ({ \ - float16x8_t __ret_776; \ - float16x8_t __s0_776 = __p0_776; \ - float16x8_t __s1_776 = __p1_776; \ - float16x8_t __rev0_776; __rev0_776 = __builtin_shufflevector(__s0_776, __s0_776, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev1_776; __rev1_776 = __builtin_shufflevector(__s1_776, __s1_776, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_776 = __noswap_vmulxq_f16(__rev0_776, __noswap_splatq_laneq_f16(__rev1_776, __p2_776)); \ - __ret_776 = __builtin_shufflevector(__ret_776, __ret_776, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_776; \ -}) +__ai __attribute__((target("neon"))) float16x4_t vuzp2_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vmulx_laneq_f16(__p0_777, __p1_777, __p2_777) __extension__ ({ \ - float16x4_t __ret_777; \ - float16x4_t __s0_777 = __p0_777; \ - float16x8_t __s1_777 = __p1_777; \ - __ret_777 = vmulx_f16(__s0_777, splat_laneq_f16(__s1_777, __p2_777)); \ - __ret_777; \ -}) +__ai __attribute__((target("neon"))) poly8x8_t vzip1_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); + return __ret; +} #else -#define vmulx_laneq_f16(__p0_778, __p1_778, __p2_778) __extension__ ({ \ - float16x4_t __ret_778; \ - float16x4_t __s0_778 = __p0_778; \ - float16x8_t __s1_778 = __p1_778; \ - float16x4_t __rev0_778; __rev0_778 = __builtin_shufflevector(__s0_778, __s0_778, 3, 2, 1, 0); \ - float16x8_t __rev1_778; __rev1_778 = __builtin_shufflevector(__s1_778, __s1_778, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_778 = __noswap_vmulx_f16(__rev0_778, __noswap_splat_laneq_f16(__rev1_778, __p2_778)); \ - __ret_778 = __builtin_shufflevector(__ret_778, __ret_778, 3, 2, 1, 0); \ - __ret_778; \ -}) +__ai __attribute__((target("neon"))) poly8x8_t vzip1_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vmulxq_n_f16(__p0, __p1) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16_t __s1 = __p1; \ - __ret = vmulxq_f16(__s0, (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) poly16x4_t vzip1_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); + return __ret; +} #else -#define vmulxq_n_f16(__p0, __p1) __extension__ ({ \ - float16x8_t __ret; \ - float16x8_t __s0 = __p0; \ - float16_t __s1 = __p1; \ - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret = __noswap_vmulxq_f16(__rev0, (float16x8_t) {__s1, __s1, __s1, __s1, __s1, __s1, __s1, __s1}); \ - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) poly16x4_t vzip1_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vmulx_n_f16(__p0, __p1) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16_t __s1 = __p1; \ - __ret = vmulx_f16(__s0, (float16x4_t) {__s1, __s1, __s1, __s1}); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) poly8x16_t vzip1q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); + return __ret; +} #else -#define vmulx_n_f16(__p0, __p1) __extension__ ({ \ - float16x4_t __ret; \ - float16x4_t __s0 = __p0; \ - float16_t __s1 = __p1; \ - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__s0, __s0, 3, 2, 1, 0); \ - __ret = __noswap_vmulx_f16(__rev0, (float16x4_t) {__s1, __s1, __s1, __s1}); \ - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); \ - __ret; \ -}) +__ai __attribute__((target("neon"))) poly8x16_t vzip1q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vpaddq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vpaddq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); +__ai __attribute__((target("neon"))) poly64x2_t vzip1q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x8_t vpaddq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vpaddq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) poly64x2_t vzip1q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vpmaxq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vpmaxq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); +__ai __attribute__((target("neon"))) poly16x8_t vzip1q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x8_t vpmaxq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vpmaxq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); +__ai __attribute__((target("neon"))) poly16x8_t vzip1q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vpmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vpmaxnmq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); +__ai __attribute__((target("neon"))) uint8x16_t vzip1q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x8_t vpmaxnmq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vpmaxnmq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint8x16_t vzip1q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vpmaxnm_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vpmaxnm_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); +__ai __attribute__((target("neon"))) uint32x4_t vzip1q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x4_t vpmaxnm_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vpmaxnm_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); +__ai __attribute__((target("neon"))) uint32x4_t vzip1q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vpminq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vpminq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); +__ai __attribute__((target("neon"))) uint64x2_t vzip1q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x8_t vpminq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vpminq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint64x2_t vzip1q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vpminnmq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vpminnmq_f16((int8x16_t)__p0, (int8x16_t)__p1, 40); +__ai __attribute__((target("neon"))) uint16x8_t vzip1q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x8_t vpminnmq_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vpminnmq_f16((int8x16_t)__rev0, (int8x16_t)__rev1, 40); +__ai __attribute__((target("neon"))) uint16x8_t vzip1q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vpminnm_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vpminnm_f16((int8x8_t)__p0, (int8x8_t)__p1, 8); +__ai __attribute__((target("neon"))) int8x16_t vzip1q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x4_t vpminnm_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vpminnm_f16((int8x8_t)__rev0, (int8x8_t)__rev1, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int8x16_t vzip1q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vrndiq_f16(float16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vrndiq_f16((int8x16_t)__p0, 40); +__ai __attribute__((target("neon"))) float64x2_t vzip1q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x8_t vrndiq_f16(float16x8_t __p0) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vrndiq_f16((int8x16_t)__rev0, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) float64x2_t vzip1q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vrndi_f16(float16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vrndi_f16((int8x8_t)__p0, 8); +__ai __attribute__((target("neon"))) float32x4_t vzip1q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x4_t vrndi_f16(float16x4_t __p0) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vrndi_f16((int8x8_t)__rev0, 8); +__ai __attribute__((target("neon"))) float32x4_t vzip1q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vsqrtq_f16(float16x8_t __p0) { - float16x8_t __ret; - __ret = (float16x8_t) __builtin_neon_vsqrtq_f16((int8x16_t)__p0, 40); +__ai __attribute__((target("neon"))) int32x4_t vzip1q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x8_t vsqrtq_f16(float16x8_t __p0) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = (float16x8_t) __builtin_neon_vsqrtq_f16((int8x16_t)__rev0, 40); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int32x4_t vzip1q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vsqrt_f16(float16x4_t __p0) { - float16x4_t __ret; - __ret = (float16x4_t) __builtin_neon_vsqrt_f16((int8x8_t)__p0, 8); +__ai __attribute__((target("neon"))) int64x2_t vzip1q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x4_t vsqrt_f16(float16x4_t __p0) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - __ret = (float16x4_t) __builtin_neon_vsqrt_f16((int8x8_t)__rev0, 8); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int64x2_t vzip1q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vtrn1q_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 2, 10, 4, 12, 6, 14); +__ai __attribute__((target("neon"))) int16x8_t vzip1q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x8_t vtrn1q_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 2, 10, 4, 12, 6, 14); +__ai __attribute__((target("neon"))) int16x8_t vzip1q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vtrn1_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 2, 6); +__ai __attribute__((target("neon"))) uint8x8_t vzip1_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x4_t vtrn1_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 2, 6); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint8x8_t vzip1_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vtrn2q_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 9, 3, 11, 5, 13, 7, 15); +__ai __attribute__((target("neon"))) uint32x2_t vzip1_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x8_t vtrn2q_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 9, 3, 11, 5, 13, 7, 15); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) uint32x2_t vzip1_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vtrn2_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 5, 3, 7); +__ai __attribute__((target("neon"))) uint16x4_t vzip1_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x4_t vtrn2_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 5, 3, 7); +__ai __attribute__((target("neon"))) uint16x4_t vzip1_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vuzp1q_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6, 8, 10, 12, 14); +__ai __attribute__((target("neon"))) int8x8_t vzip1_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x8_t vuzp1q_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6, 8, 10, 12, 14); +__ai __attribute__((target("neon"))) int8x8_t vzip1_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 8, 1, 9, 2, 10, 3, 11); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vuzp1_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 0, 2, 4, 6); +__ai __attribute__((target("neon"))) float32x2_t vzip1_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x4_t vuzp1_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2, 4, 6); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) float32x2_t vzip1_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vuzp2q_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7, 9, 11, 13, 15); +__ai __attribute__((target("neon"))) int32x2_t vzip1_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 2); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x8_t vuzp2q_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7, 9, 11, 13, 15); - __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) int32x2_t vzip1_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 2); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vuzp2_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - __ret = __builtin_shufflevector(__p0, __p1, 1, 3, 5, 7); +__ai __attribute__((target("neon"))) int16x4_t vzip1_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x4_t vuzp2_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); - __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3, 5, 7); +__ai __attribute__((target("neon"))) int16x4_t vzip1_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 0, 4, 1, 5); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vzip1q_f16(float16x8_t __p0, float16x8_t __p1) { +__ai __attribute__((target("neon"))) float16x8_t vzip1q_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 8, 1, 9, 2, 10, 3, 11); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x8_t vzip1q_f16(float16x8_t __p0, float16x8_t __p1) { +__ai __attribute__((target("neon"))) float16x8_t vzip1q_f16(float16x8_t __p0, float16x8_t __p1) { float16x8_t __ret; float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -64457,13 +64017,13 @@ __ai __attribute__((target("fullfp16"))) float16x8_t vzip1q_f16(float16x8_t __p0 #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vzip1_f16(float16x4_t __p0, float16x4_t __p1) { +__ai __attribute__((target("neon"))) float16x4_t vzip1_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 0, 4, 1, 5); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x4_t vzip1_f16(float16x4_t __p0, float16x4_t __p1) { +__ai __attribute__((target("neon"))) float16x4_t vzip1_f16(float16x4_t __p0, float16x4_t __p1) { float16x4_t __ret; float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -64474,16 +64034,16 @@ __ai __attribute__((target("fullfp16"))) float16x4_t vzip1_f16(float16x4_t __p0, #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x8_t vzip2q_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; +__ai __attribute__((target("neon"))) poly8x8_t vzip2_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x8_t vzip2q_f16(float16x8_t __p0, float16x8_t __p1) { - float16x8_t __ret; - float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) poly8x8_t vzip2_p8(poly8x8_t __p0, poly8x8_t __p1) { + poly8x8_t __ret; + poly8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; @@ -64491,16 +64051,16 @@ __ai __attribute__((target("fullfp16"))) float16x8_t vzip2q_f16(float16x8_t __p0 #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("fullfp16"))) float16x4_t vzip2_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; +__ai __attribute__((target("neon"))) poly16x4_t vzip2_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); return __ret; } #else -__ai __attribute__((target("fullfp16"))) float16x4_t vzip2_f16(float16x4_t __p0, float16x4_t __p1) { - float16x4_t __ret; - float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); - float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); +__ai __attribute__((target("neon"))) poly16x4_t vzip2_p16(poly16x4_t __p0, poly16x4_t __p1) { + poly16x4_t __ret; + poly16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + poly16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; @@ -64508,107 +64068,377 @@ __ai __attribute__((target("fullfp16"))) float16x4_t vzip2_f16(float16x4_t __p0, #endif #ifdef __LITTLE_ENDIAN__ -#define vsudotq_laneq_s32(__p0_779, __p1_779, __p2_779, __p3_779) __extension__ ({ \ - int32x4_t __ret_779; \ - int32x4_t __s0_779 = __p0_779; \ - int8x16_t __s1_779 = __p1_779; \ - uint8x16_t __s2_779 = __p2_779; \ -uint8x16_t __reint_779 = __s2_779; \ - __ret_779 = vusdotq_s32(__s0_779, (uint8x16_t)(splatq_laneq_s32(*(int32x4_t *) &__reint_779, __p3_779)), __s1_779); \ - __ret_779; \ -}) +__ai __attribute__((target("neon"))) poly8x16_t vzip2q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); + return __ret; +} #else -#define vsudotq_laneq_s32(__p0_780, __p1_780, __p2_780, __p3_780) __extension__ ({ \ - int32x4_t __ret_780; \ - int32x4_t __s0_780 = __p0_780; \ - int8x16_t __s1_780 = __p1_780; \ - uint8x16_t __s2_780 = __p2_780; \ - int32x4_t __rev0_780; __rev0_780 = __builtin_shufflevector(__s0_780, __s0_780, 3, 2, 1, 0); \ - int8x16_t __rev1_780; __rev1_780 = __builtin_shufflevector(__s1_780, __s1_780, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16_t __rev2_780; __rev2_780 = __builtin_shufflevector(__s2_780, __s2_780, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ -uint8x16_t __reint_780 = __rev2_780; \ - __ret_780 = __noswap_vusdotq_s32(__rev0_780, (uint8x16_t)(__noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_780, __p3_780)), __rev1_780); \ - __ret_780 = __builtin_shufflevector(__ret_780, __ret_780, 3, 2, 1, 0); \ - __ret_780; \ -}) +__ai __attribute__((target("neon"))) poly8x16_t vzip2q_p8(poly8x16_t __p0, poly8x16_t __p1) { + poly8x16_t __ret; + poly8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + poly8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vsudot_laneq_s32(__p0_781, __p1_781, __p2_781, __p3_781) __extension__ ({ \ - int32x2_t __ret_781; \ - int32x2_t __s0_781 = __p0_781; \ - int8x8_t __s1_781 = __p1_781; \ - uint8x16_t __s2_781 = __p2_781; \ -uint8x16_t __reint_781 = __s2_781; \ - __ret_781 = vusdot_s32(__s0_781, (uint8x8_t)(splat_laneq_s32(*(int32x4_t *) &__reint_781, __p3_781)), __s1_781); \ - __ret_781; \ -}) +__ai __attribute__((target("neon"))) poly64x2_t vzip2q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} #else -#define vsudot_laneq_s32(__p0_782, __p1_782, __p2_782, __p3_782) __extension__ ({ \ - int32x2_t __ret_782; \ - int32x2_t __s0_782 = __p0_782; \ - int8x8_t __s1_782 = __p1_782; \ - uint8x16_t __s2_782 = __p2_782; \ - int32x2_t __rev0_782; __rev0_782 = __builtin_shufflevector(__s0_782, __s0_782, 1, 0); \ - int8x8_t __rev1_782; __rev1_782 = __builtin_shufflevector(__s1_782, __s1_782, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x16_t __rev2_782; __rev2_782 = __builtin_shufflevector(__s2_782, __s2_782, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ -uint8x16_t __reint_782 = __rev2_782; \ - __ret_782 = __noswap_vusdot_s32(__rev0_782, (uint8x8_t)(__noswap_splat_laneq_s32(*(int32x4_t *) &__reint_782, __p3_782)), __rev1_782); \ - __ret_782 = __builtin_shufflevector(__ret_782, __ret_782, 1, 0); \ - __ret_782; \ -}) +__ai __attribute__((target("neon"))) poly64x2_t vzip2q_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly64x2_t __ret; + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vusdotq_laneq_s32(__p0_783, __p1_783, __p2_783, __p3_783) __extension__ ({ \ - int32x4_t __ret_783; \ - int32x4_t __s0_783 = __p0_783; \ - uint8x16_t __s1_783 = __p1_783; \ - int8x16_t __s2_783 = __p2_783; \ -int8x16_t __reint_783 = __s2_783; \ - __ret_783 = vusdotq_s32(__s0_783, __s1_783, (int8x16_t)(splatq_laneq_s32(*(int32x4_t *) &__reint_783, __p3_783))); \ - __ret_783; \ -}) +__ai __attribute__((target("neon"))) poly16x8_t vzip2q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); + return __ret; +} #else -#define vusdotq_laneq_s32(__p0_784, __p1_784, __p2_784, __p3_784) __extension__ ({ \ - int32x4_t __ret_784; \ - int32x4_t __s0_784 = __p0_784; \ - uint8x16_t __s1_784 = __p1_784; \ - int8x16_t __s2_784 = __p2_784; \ - int32x4_t __rev0_784; __rev0_784 = __builtin_shufflevector(__s0_784, __s0_784, 3, 2, 1, 0); \ - uint8x16_t __rev1_784; __rev1_784 = __builtin_shufflevector(__s1_784, __s1_784, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __rev2_784; __rev2_784 = __builtin_shufflevector(__s2_784, __s2_784, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ -int8x16_t __reint_784 = __rev2_784; \ - __ret_784 = __noswap_vusdotq_s32(__rev0_784, __rev1_784, (int8x16_t)(__noswap_splatq_laneq_s32(*(int32x4_t *) &__reint_784, __p3_784))); \ - __ret_784 = __builtin_shufflevector(__ret_784, __ret_784, 3, 2, 1, 0); \ - __ret_784; \ -}) +__ai __attribute__((target("neon"))) poly16x8_t vzip2q_p16(poly16x8_t __p0, poly16x8_t __p1) { + poly16x8_t __ret; + poly16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + poly16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} #endif #ifdef __LITTLE_ENDIAN__ -#define vusdot_laneq_s32(__p0_785, __p1_785, __p2_785, __p3_785) __extension__ ({ \ - int32x2_t __ret_785; \ - int32x2_t __s0_785 = __p0_785; \ - uint8x8_t __s1_785 = __p1_785; \ - int8x16_t __s2_785 = __p2_785; \ -int8x16_t __reint_785 = __s2_785; \ - __ret_785 = vusdot_s32(__s0_785, __s1_785, (int8x8_t)(splat_laneq_s32(*(int32x4_t *) &__reint_785, __p3_785))); \ - __ret_785; \ -}) +__ai __attribute__((target("neon"))) uint8x16_t vzip2q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); + return __ret; +} #else -#define vusdot_laneq_s32(__p0_786, __p1_786, __p2_786, __p3_786) __extension__ ({ \ - int32x2_t __ret_786; \ - int32x2_t __s0_786 = __p0_786; \ - uint8x8_t __s1_786 = __p1_786; \ - int8x16_t __s2_786 = __p2_786; \ - int32x2_t __rev0_786; __rev0_786 = __builtin_shufflevector(__s0_786, __s0_786, 1, 0); \ - uint8x8_t __rev1_786; __rev1_786 = __builtin_shufflevector(__s1_786, __s1_786, 7, 6, 5, 4, 3, 2, 1, 0); \ - int8x16_t __rev2_786; __rev2_786 = __builtin_shufflevector(__s2_786, __s2_786, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ -int8x16_t __reint_786 = __rev2_786; \ - __ret_786 = __noswap_vusdot_s32(__rev0_786, __rev1_786, (int8x8_t)(__noswap_splat_laneq_s32(*(int32x4_t *) &__reint_786, __p3_786))); \ - __ret_786 = __builtin_shufflevector(__ret_786, __ret_786, 1, 0); \ - __ret_786; \ -}) +__ai __attribute__((target("neon"))) uint8x16_t vzip2q_u8(uint8x16_t __p0, uint8x16_t __p1) { + uint8x16_t __ret; + uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x4_t vzip2q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x4_t vzip2q_u32(uint32x4_t __p0, uint32x4_t __p1) { + uint32x4_t __ret; + uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint64x2_t vzip2q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint64x2_t vzip2q_u64(uint64x2_t __p0, uint64x2_t __p1) { + uint64x2_t __ret; + uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vzip2q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x8_t vzip2q_u16(uint16x8_t __p0, uint16x8_t __p1) { + uint16x8_t __ret; + uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x16_t vzip2q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x16_t vzip2q_s8(int8x16_t __p0, int8x16_t __p1) { + int8x16_t __ret; + int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31); + __ret = __builtin_shufflevector(__ret, __ret, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float64x2_t vzip2q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float64x2_t vzip2q_f64(float64x2_t __p0, float64x2_t __p1) { + float64x2_t __ret; + float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x4_t vzip2q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x4_t vzip2q_f32(float32x4_t __p0, float32x4_t __p1) { + float32x4_t __ret; + float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x4_t vzip2q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x4_t vzip2q_s32(int32x4_t __p0, int32x4_t __p1) { + int32x4_t __ret; + int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int64x2_t vzip2q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int64x2_t vzip2q_s64(int64x2_t __p0, int64x2_t __p1) { + int64x2_t __ret; + int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x8_t vzip2q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x8_t vzip2q_s16(int16x8_t __p0, int16x8_t __p1) { + int16x8_t __ret; + int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x8_t vzip2_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint8x8_t vzip2_u8(uint8x8_t __p0, uint8x8_t __p1) { + uint8x8_t __ret; + uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint32x2_t vzip2_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint32x2_t vzip2_u32(uint32x2_t __p0, uint32x2_t __p1) { + uint32x2_t __ret; + uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x4_t vzip2_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); + return __ret; +} +#else +__ai __attribute__((target("neon"))) uint16x4_t vzip2_u16(uint16x4_t __p0, uint16x4_t __p1) { + uint16x4_t __ret; + uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int8x8_t vzip2_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int8x8_t vzip2_s8(int8x8_t __p0, int8x8_t __p1) { + int8x8_t __ret; + int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float32x2_t vzip2_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float32x2_t vzip2_f32(float32x2_t __p0, float32x2_t __p1) { + float32x2_t __ret; + float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + float32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int32x2_t vzip2_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 1, 3); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int32x2_t vzip2_s32(int32x2_t __p0, int32x2_t __p1) { + int32x2_t __ret; + int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 1, 3); + __ret = __builtin_shufflevector(__ret, __ret, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) int16x4_t vzip2_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); + return __ret; +} +#else +__ai __attribute__((target("neon"))) int16x4_t vzip2_s16(int16x4_t __p0, int16x4_t __p1) { + int16x4_t __ret; + int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float16x8_t vzip2q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 4, 12, 5, 13, 6, 14, 7, 15); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x8_t vzip2q_f16(float16x8_t __p0, float16x8_t __p1) { + float16x8_t __ret; + float16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + float16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 4, 12, 5, 13, 6, 14, 7, 15); + __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) float16x4_t vzip2_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + __ret = __builtin_shufflevector(__p0, __p1, 2, 6, 3, 7); + return __ret; +} +#else +__ai __attribute__((target("neon"))) float16x4_t vzip2_f16(float16x4_t __p0, float16x4_t __p1) { + float16x4_t __ret; + float16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); + float16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); + __ret = __builtin_shufflevector(__rev0, __rev1, 2, 6, 3, 7); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); + return __ret; +} #endif #define vldap1_lane_p64(__p0, __p1, __p2) __extension__ ({ \ @@ -64776,13 +64606,13 @@ int8x16_t __reint_786 = __rev2_786; \ __builtin_neon_vstl1_lane_s64(__p0, (int8x8_t)__s1, __p2, 3); \ }) #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha3"))) uint8x16_t vbcaxq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("sha3,neon"))) uint8x16_t vbcaxq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_vbcaxq_u8((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48); return __ret; } #else -__ai __attribute__((target("sha3"))) uint8x16_t vbcaxq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("sha3,neon"))) uint8x16_t vbcaxq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -64794,13 +64624,13 @@ __ai __attribute__((target("sha3"))) uint8x16_t vbcaxq_u8(uint8x16_t __p0, uint8 #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha3"))) uint32x4_t vbcaxq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("sha3,neon"))) uint32x4_t vbcaxq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vbcaxq_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); return __ret; } #else -__ai __attribute__((target("sha3"))) uint32x4_t vbcaxq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("sha3,neon"))) uint32x4_t vbcaxq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -64812,13 +64642,13 @@ __ai __attribute__((target("sha3"))) uint32x4_t vbcaxq_u32(uint32x4_t __p0, uint #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha3"))) uint64x2_t vbcaxq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { +__ai __attribute__((target("sha3,neon"))) uint64x2_t vbcaxq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vbcaxq_u64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); return __ret; } #else -__ai __attribute__((target("sha3"))) uint64x2_t vbcaxq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { +__ai __attribute__((target("sha3,neon"))) uint64x2_t vbcaxq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -64830,13 +64660,13 @@ __ai __attribute__((target("sha3"))) uint64x2_t vbcaxq_u64(uint64x2_t __p0, uint #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha3"))) uint16x8_t vbcaxq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { +__ai __attribute__((target("sha3,neon"))) uint16x8_t vbcaxq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_vbcaxq_u16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 49); return __ret; } #else -__ai __attribute__((target("sha3"))) uint16x8_t vbcaxq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { +__ai __attribute__((target("sha3,neon"))) uint16x8_t vbcaxq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -64848,13 +64678,13 @@ __ai __attribute__((target("sha3"))) uint16x8_t vbcaxq_u16(uint16x8_t __p0, uint #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha3"))) int8x16_t vbcaxq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { +__ai __attribute__((target("sha3,neon"))) int8x16_t vbcaxq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_vbcaxq_s8((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32); return __ret; } #else -__ai __attribute__((target("sha3"))) int8x16_t vbcaxq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { +__ai __attribute__((target("sha3,neon"))) int8x16_t vbcaxq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -64866,13 +64696,13 @@ __ai __attribute__((target("sha3"))) int8x16_t vbcaxq_s8(int8x16_t __p0, int8x16 #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha3"))) int32x4_t vbcaxq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("sha3,neon"))) int32x4_t vbcaxq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_vbcaxq_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); return __ret; } #else -__ai __attribute__((target("sha3"))) int32x4_t vbcaxq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("sha3,neon"))) int32x4_t vbcaxq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -64884,13 +64714,13 @@ __ai __attribute__((target("sha3"))) int32x4_t vbcaxq_s32(int32x4_t __p0, int32x #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha3"))) int64x2_t vbcaxq_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { +__ai __attribute__((target("sha3,neon"))) int64x2_t vbcaxq_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_vbcaxq_s64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 35); return __ret; } #else -__ai __attribute__((target("sha3"))) int64x2_t vbcaxq_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { +__ai __attribute__((target("sha3,neon"))) int64x2_t vbcaxq_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -64902,13 +64732,13 @@ __ai __attribute__((target("sha3"))) int64x2_t vbcaxq_s64(int64x2_t __p0, int64x #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha3"))) int16x8_t vbcaxq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("sha3,neon"))) int16x8_t vbcaxq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_vbcaxq_s16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); return __ret; } #else -__ai __attribute__((target("sha3"))) int16x8_t vbcaxq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("sha3,neon"))) int16x8_t vbcaxq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -64920,13 +64750,13 @@ __ai __attribute__((target("sha3"))) int16x8_t vbcaxq_s16(int16x8_t __p0, int16x #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha3"))) uint8x16_t veor3q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("sha3,neon"))) uint8x16_t veor3q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint8x16_t __ret; __ret = (uint8x16_t) __builtin_neon_veor3q_u8((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 48); return __ret; } #else -__ai __attribute__((target("sha3"))) uint8x16_t veor3q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("sha3,neon"))) uint8x16_t veor3q_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -64938,13 +64768,13 @@ __ai __attribute__((target("sha3"))) uint8x16_t veor3q_u8(uint8x16_t __p0, uint8 #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha3"))) uint32x4_t veor3q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("sha3,neon"))) uint32x4_t veor3q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_veor3q_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); return __ret; } #else -__ai __attribute__((target("sha3"))) uint32x4_t veor3q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("sha3,neon"))) uint32x4_t veor3q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -64956,13 +64786,13 @@ __ai __attribute__((target("sha3"))) uint32x4_t veor3q_u32(uint32x4_t __p0, uint #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha3"))) uint64x2_t veor3q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { +__ai __attribute__((target("sha3,neon"))) uint64x2_t veor3q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_veor3q_u64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); return __ret; } #else -__ai __attribute__((target("sha3"))) uint64x2_t veor3q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { +__ai __attribute__((target("sha3,neon"))) uint64x2_t veor3q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -64974,13 +64804,13 @@ __ai __attribute__((target("sha3"))) uint64x2_t veor3q_u64(uint64x2_t __p0, uint #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha3"))) uint16x8_t veor3q_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { +__ai __attribute__((target("sha3,neon"))) uint16x8_t veor3q_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint16x8_t __ret; __ret = (uint16x8_t) __builtin_neon_veor3q_u16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 49); return __ret; } #else -__ai __attribute__((target("sha3"))) uint16x8_t veor3q_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { +__ai __attribute__((target("sha3,neon"))) uint16x8_t veor3q_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -64992,13 +64822,13 @@ __ai __attribute__((target("sha3"))) uint16x8_t veor3q_u16(uint16x8_t __p0, uint #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha3"))) int8x16_t veor3q_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { +__ai __attribute__((target("sha3,neon"))) int8x16_t veor3q_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { int8x16_t __ret; __ret = (int8x16_t) __builtin_neon_veor3q_s8((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 32); return __ret; } #else -__ai __attribute__((target("sha3"))) int8x16_t veor3q_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { +__ai __attribute__((target("sha3,neon"))) int8x16_t veor3q_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -65010,13 +64840,13 @@ __ai __attribute__((target("sha3"))) int8x16_t veor3q_s8(int8x16_t __p0, int8x16 #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha3"))) int32x4_t veor3q_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("sha3,neon"))) int32x4_t veor3q_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; __ret = (int32x4_t) __builtin_neon_veor3q_s32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 34); return __ret; } #else -__ai __attribute__((target("sha3"))) int32x4_t veor3q_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("sha3,neon"))) int32x4_t veor3q_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -65028,13 +64858,13 @@ __ai __attribute__((target("sha3"))) int32x4_t veor3q_s32(int32x4_t __p0, int32x #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha3"))) int64x2_t veor3q_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { +__ai __attribute__((target("sha3,neon"))) int64x2_t veor3q_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { int64x2_t __ret; __ret = (int64x2_t) __builtin_neon_veor3q_s64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 35); return __ret; } #else -__ai __attribute__((target("sha3"))) int64x2_t veor3q_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { +__ai __attribute__((target("sha3,neon"))) int64x2_t veor3q_s64(int64x2_t __p0, int64x2_t __p1, int64x2_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -65046,13 +64876,13 @@ __ai __attribute__((target("sha3"))) int64x2_t veor3q_s64(int64x2_t __p0, int64x #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha3"))) int16x8_t veor3q_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("sha3,neon"))) int16x8_t veor3q_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int16x8_t __ret; __ret = (int16x8_t) __builtin_neon_veor3q_s16((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 33); return __ret; } #else -__ai __attribute__((target("sha3"))) int16x8_t veor3q_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("sha3,neon"))) int16x8_t veor3q_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -65064,13 +64894,13 @@ __ai __attribute__((target("sha3"))) int16x8_t veor3q_s16(int16x8_t __p0, int16x #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha3"))) uint64x2_t vrax1q_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("sha3,neon"))) uint64x2_t vrax1q_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vrax1q_u64((int8x16_t)__p0, (int8x16_t)__p1, 51); return __ret; } #else -__ai __attribute__((target("sha3"))) uint64x2_t vrax1q_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("sha3,neon"))) uint64x2_t vrax1q_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -65081,13 +64911,13 @@ __ai __attribute__((target("sha3"))) uint64x2_t vrax1q_u64(uint64x2_t __p0, uint #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha3"))) uint64x2_t vsha512hq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { +__ai __attribute__((target("sha3,neon"))) uint64x2_t vsha512hq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vsha512hq_u64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); return __ret; } #else -__ai __attribute__((target("sha3"))) uint64x2_t vsha512hq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { +__ai __attribute__((target("sha3,neon"))) uint64x2_t vsha512hq_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -65099,13 +64929,13 @@ __ai __attribute__((target("sha3"))) uint64x2_t vsha512hq_u64(uint64x2_t __p0, u #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha3"))) uint64x2_t vsha512h2q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { +__ai __attribute__((target("sha3,neon"))) uint64x2_t vsha512h2q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vsha512h2q_u64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); return __ret; } #else -__ai __attribute__((target("sha3"))) uint64x2_t vsha512h2q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { +__ai __attribute__((target("sha3,neon"))) uint64x2_t vsha512h2q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -65117,13 +64947,13 @@ __ai __attribute__((target("sha3"))) uint64x2_t vsha512h2q_u64(uint64x2_t __p0, #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha3"))) uint64x2_t vsha512su0q_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("sha3,neon"))) uint64x2_t vsha512su0q_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vsha512su0q_u64((int8x16_t)__p0, (int8x16_t)__p1, 51); return __ret; } #else -__ai __attribute__((target("sha3"))) uint64x2_t vsha512su0q_u64(uint64x2_t __p0, uint64x2_t __p1) { +__ai __attribute__((target("sha3,neon"))) uint64x2_t vsha512su0q_u64(uint64x2_t __p0, uint64x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -65134,13 +64964,13 @@ __ai __attribute__((target("sha3"))) uint64x2_t vsha512su0q_u64(uint64x2_t __p0, #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sha3"))) uint64x2_t vsha512su1q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { +__ai __attribute__((target("sha3,neon"))) uint64x2_t vsha512su1q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint64x2_t __ret; __ret = (uint64x2_t) __builtin_neon_vsha512su1q_u64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 51); return __ret; } #else -__ai __attribute__((target("sha3"))) uint64x2_t vsha512su1q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { +__ai __attribute__((target("sha3,neon"))) uint64x2_t vsha512su1q_u64(uint64x2_t __p0, uint64x2_t __p1, uint64x2_t __p2) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -65173,13 +65003,13 @@ __ai __attribute__((target("sha3"))) uint64x2_t vsha512su1q_u64(uint64x2_t __p0, #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sm4"))) uint32x4_t vsm3partw1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("sm4,neon"))) uint32x4_t vsm3partw1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vsm3partw1q_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); return __ret; } #else -__ai __attribute__((target("sm4"))) uint32x4_t vsm3partw1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("sm4,neon"))) uint32x4_t vsm3partw1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -65191,13 +65021,13 @@ __ai __attribute__((target("sm4"))) uint32x4_t vsm3partw1q_u32(uint32x4_t __p0, #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sm4"))) uint32x4_t vsm3partw2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("sm4,neon"))) uint32x4_t vsm3partw2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vsm3partw2q_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); return __ret; } #else -__ai __attribute__((target("sm4"))) uint32x4_t vsm3partw2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("sm4,neon"))) uint32x4_t vsm3partw2q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -65209,13 +65039,13 @@ __ai __attribute__((target("sm4"))) uint32x4_t vsm3partw2q_u32(uint32x4_t __p0, #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sm4"))) uint32x4_t vsm3ss1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("sm4,neon"))) uint32x4_t vsm3ss1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vsm3ss1q_u32((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 50); return __ret; } #else -__ai __attribute__((target("sm4"))) uint32x4_t vsm3ss1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("sm4,neon"))) uint32x4_t vsm3ss1q_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -65323,13 +65153,13 @@ __ai __attribute__((target("sm4"))) uint32x4_t vsm3ss1q_u32(uint32x4_t __p0, uin #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sm4"))) uint32x4_t vsm4eq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("sm4,neon"))) uint32x4_t vsm4eq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vsm4eq_u32((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else -__ai __attribute__((target("sm4"))) uint32x4_t vsm4eq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("sm4,neon"))) uint32x4_t vsm4eq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -65340,13 +65170,13 @@ __ai __attribute__((target("sm4"))) uint32x4_t vsm4eq_u32(uint32x4_t __p0, uint3 #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("sm4"))) uint32x4_t vsm4ekeyq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("sm4,neon"))) uint32x4_t vsm4ekeyq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t) __builtin_neon_vsm4ekeyq_u32((int8x16_t)__p0, (int8x16_t)__p1, 50); return __ret; } #else -__ai __attribute__((target("sm4"))) uint32x4_t vsm4ekeyq_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("sm4,neon"))) uint32x4_t vsm4ekeyq_u32(uint32x4_t __p0, uint32x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -65356,394 +65186,394 @@ __ai __attribute__((target("sm4"))) uint32x4_t vsm4ekeyq_u32(uint32x4_t __p0, ui } #endif -__ai __attribute__((target("v8.1a"))) int32_t vqrdmlahs_s32(int32_t __p0, int32_t __p1, int32_t __p2) { +__ai __attribute__((target("v8.1a,neon"))) int32_t vqrdmlahs_s32(int32_t __p0, int32_t __p1, int32_t __p2) { int32_t __ret; __ret = (int32_t) __builtin_neon_vqrdmlahs_s32(__p0, __p1, __p2); return __ret; } -__ai __attribute__((target("v8.1a"))) int16_t vqrdmlahh_s16(int16_t __p0, int16_t __p1, int16_t __p2) { +__ai __attribute__((target("v8.1a,neon"))) int16_t vqrdmlahh_s16(int16_t __p0, int16_t __p1, int16_t __p2) { int16_t __ret; __ret = (int16_t) __builtin_neon_vqrdmlahh_s16(__p0, __p1, __p2); return __ret; } #ifdef __LITTLE_ENDIAN__ -#define vqrdmlahs_lane_s32(__p0_787, __p1_787, __p2_787, __p3_787) __extension__ ({ \ - int32_t __ret_787; \ - int32_t __s0_787 = __p0_787; \ - int32_t __s1_787 = __p1_787; \ - int32x2_t __s2_787 = __p2_787; \ - __ret_787 = vqrdmlahs_s32(__s0_787, __s1_787, vget_lane_s32(__s2_787, __p3_787)); \ - __ret_787; \ +#define vqrdmlahs_lane_s32(__p0_760, __p1_760, __p2_760, __p3_760) __extension__ ({ \ + int32_t __ret_760; \ + int32_t __s0_760 = __p0_760; \ + int32_t __s1_760 = __p1_760; \ + int32x2_t __s2_760 = __p2_760; \ + __ret_760 = vqrdmlahs_s32(__s0_760, __s1_760, vget_lane_s32(__s2_760, __p3_760)); \ + __ret_760; \ }) #else -#define vqrdmlahs_lane_s32(__p0_788, __p1_788, __p2_788, __p3_788) __extension__ ({ \ - int32_t __ret_788; \ - int32_t __s0_788 = __p0_788; \ - int32_t __s1_788 = __p1_788; \ - int32x2_t __s2_788 = __p2_788; \ - int32x2_t __rev2_788; __rev2_788 = __builtin_shufflevector(__s2_788, __s2_788, 1, 0); \ - __ret_788 = vqrdmlahs_s32(__s0_788, __s1_788, __noswap_vget_lane_s32(__rev2_788, __p3_788)); \ - __ret_788; \ +#define vqrdmlahs_lane_s32(__p0_761, __p1_761, __p2_761, __p3_761) __extension__ ({ \ + int32_t __ret_761; \ + int32_t __s0_761 = __p0_761; \ + int32_t __s1_761 = __p1_761; \ + int32x2_t __s2_761 = __p2_761; \ + int32x2_t __rev2_761; __rev2_761 = __builtin_shufflevector(__s2_761, __s2_761, 1, 0); \ + __ret_761 = vqrdmlahs_s32(__s0_761, __s1_761, __noswap_vget_lane_s32(__rev2_761, __p3_761)); \ + __ret_761; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmlahh_lane_s16(__p0_789, __p1_789, __p2_789, __p3_789) __extension__ ({ \ - int16_t __ret_789; \ - int16_t __s0_789 = __p0_789; \ - int16_t __s1_789 = __p1_789; \ - int16x4_t __s2_789 = __p2_789; \ - __ret_789 = vqrdmlahh_s16(__s0_789, __s1_789, vget_lane_s16(__s2_789, __p3_789)); \ - __ret_789; \ +#define vqrdmlahh_lane_s16(__p0_762, __p1_762, __p2_762, __p3_762) __extension__ ({ \ + int16_t __ret_762; \ + int16_t __s0_762 = __p0_762; \ + int16_t __s1_762 = __p1_762; \ + int16x4_t __s2_762 = __p2_762; \ + __ret_762 = vqrdmlahh_s16(__s0_762, __s1_762, vget_lane_s16(__s2_762, __p3_762)); \ + __ret_762; \ }) #else -#define vqrdmlahh_lane_s16(__p0_790, __p1_790, __p2_790, __p3_790) __extension__ ({ \ - int16_t __ret_790; \ - int16_t __s0_790 = __p0_790; \ - int16_t __s1_790 = __p1_790; \ - int16x4_t __s2_790 = __p2_790; \ - int16x4_t __rev2_790; __rev2_790 = __builtin_shufflevector(__s2_790, __s2_790, 3, 2, 1, 0); \ - __ret_790 = vqrdmlahh_s16(__s0_790, __s1_790, __noswap_vget_lane_s16(__rev2_790, __p3_790)); \ - __ret_790; \ +#define vqrdmlahh_lane_s16(__p0_763, __p1_763, __p2_763, __p3_763) __extension__ ({ \ + int16_t __ret_763; \ + int16_t __s0_763 = __p0_763; \ + int16_t __s1_763 = __p1_763; \ + int16x4_t __s2_763 = __p2_763; \ + int16x4_t __rev2_763; __rev2_763 = __builtin_shufflevector(__s2_763, __s2_763, 3, 2, 1, 0); \ + __ret_763 = vqrdmlahh_s16(__s0_763, __s1_763, __noswap_vget_lane_s16(__rev2_763, __p3_763)); \ + __ret_763; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmlahs_laneq_s32(__p0_791, __p1_791, __p2_791, __p3_791) __extension__ ({ \ - int32_t __ret_791; \ - int32_t __s0_791 = __p0_791; \ - int32_t __s1_791 = __p1_791; \ - int32x4_t __s2_791 = __p2_791; \ - __ret_791 = vqrdmlahs_s32(__s0_791, __s1_791, vgetq_lane_s32(__s2_791, __p3_791)); \ - __ret_791; \ +#define vqrdmlahs_laneq_s32(__p0_764, __p1_764, __p2_764, __p3_764) __extension__ ({ \ + int32_t __ret_764; \ + int32_t __s0_764 = __p0_764; \ + int32_t __s1_764 = __p1_764; \ + int32x4_t __s2_764 = __p2_764; \ + __ret_764 = vqrdmlahs_s32(__s0_764, __s1_764, vgetq_lane_s32(__s2_764, __p3_764)); \ + __ret_764; \ }) #else -#define vqrdmlahs_laneq_s32(__p0_792, __p1_792, __p2_792, __p3_792) __extension__ ({ \ - int32_t __ret_792; \ - int32_t __s0_792 = __p0_792; \ - int32_t __s1_792 = __p1_792; \ - int32x4_t __s2_792 = __p2_792; \ - int32x4_t __rev2_792; __rev2_792 = __builtin_shufflevector(__s2_792, __s2_792, 3, 2, 1, 0); \ - __ret_792 = vqrdmlahs_s32(__s0_792, __s1_792, __noswap_vgetq_lane_s32(__rev2_792, __p3_792)); \ - __ret_792; \ +#define vqrdmlahs_laneq_s32(__p0_765, __p1_765, __p2_765, __p3_765) __extension__ ({ \ + int32_t __ret_765; \ + int32_t __s0_765 = __p0_765; \ + int32_t __s1_765 = __p1_765; \ + int32x4_t __s2_765 = __p2_765; \ + int32x4_t __rev2_765; __rev2_765 = __builtin_shufflevector(__s2_765, __s2_765, 3, 2, 1, 0); \ + __ret_765 = vqrdmlahs_s32(__s0_765, __s1_765, __noswap_vgetq_lane_s32(__rev2_765, __p3_765)); \ + __ret_765; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmlahh_laneq_s16(__p0_793, __p1_793, __p2_793, __p3_793) __extension__ ({ \ - int16_t __ret_793; \ - int16_t __s0_793 = __p0_793; \ - int16_t __s1_793 = __p1_793; \ - int16x8_t __s2_793 = __p2_793; \ - __ret_793 = vqrdmlahh_s16(__s0_793, __s1_793, vgetq_lane_s16(__s2_793, __p3_793)); \ - __ret_793; \ +#define vqrdmlahh_laneq_s16(__p0_766, __p1_766, __p2_766, __p3_766) __extension__ ({ \ + int16_t __ret_766; \ + int16_t __s0_766 = __p0_766; \ + int16_t __s1_766 = __p1_766; \ + int16x8_t __s2_766 = __p2_766; \ + __ret_766 = vqrdmlahh_s16(__s0_766, __s1_766, vgetq_lane_s16(__s2_766, __p3_766)); \ + __ret_766; \ }) #else -#define vqrdmlahh_laneq_s16(__p0_794, __p1_794, __p2_794, __p3_794) __extension__ ({ \ - int16_t __ret_794; \ - int16_t __s0_794 = __p0_794; \ - int16_t __s1_794 = __p1_794; \ - int16x8_t __s2_794 = __p2_794; \ - int16x8_t __rev2_794; __rev2_794 = __builtin_shufflevector(__s2_794, __s2_794, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_794 = vqrdmlahh_s16(__s0_794, __s1_794, __noswap_vgetq_lane_s16(__rev2_794, __p3_794)); \ - __ret_794; \ +#define vqrdmlahh_laneq_s16(__p0_767, __p1_767, __p2_767, __p3_767) __extension__ ({ \ + int16_t __ret_767; \ + int16_t __s0_767 = __p0_767; \ + int16_t __s1_767 = __p1_767; \ + int16x8_t __s2_767 = __p2_767; \ + int16x8_t __rev2_767; __rev2_767 = __builtin_shufflevector(__s2_767, __s2_767, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_767 = vqrdmlahh_s16(__s0_767, __s1_767, __noswap_vgetq_lane_s16(__rev2_767, __p3_767)); \ + __ret_767; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmlahq_laneq_s32(__p0_795, __p1_795, __p2_795, __p3_795) __extension__ ({ \ - int32x4_t __ret_795; \ - int32x4_t __s0_795 = __p0_795; \ - int32x4_t __s1_795 = __p1_795; \ - int32x4_t __s2_795 = __p2_795; \ - __ret_795 = vqrdmlahq_s32(__s0_795, __s1_795, splatq_laneq_s32(__s2_795, __p3_795)); \ - __ret_795; \ +#define vqrdmlahq_laneq_s32(__p0_768, __p1_768, __p2_768, __p3_768) __extension__ ({ \ + int32x4_t __ret_768; \ + int32x4_t __s0_768 = __p0_768; \ + int32x4_t __s1_768 = __p1_768; \ + int32x4_t __s2_768 = __p2_768; \ + __ret_768 = vqrdmlahq_s32(__s0_768, __s1_768, splatq_laneq_s32(__s2_768, __p3_768)); \ + __ret_768; \ }) #else -#define vqrdmlahq_laneq_s32(__p0_796, __p1_796, __p2_796, __p3_796) __extension__ ({ \ - int32x4_t __ret_796; \ - int32x4_t __s0_796 = __p0_796; \ - int32x4_t __s1_796 = __p1_796; \ - int32x4_t __s2_796 = __p2_796; \ - int32x4_t __rev0_796; __rev0_796 = __builtin_shufflevector(__s0_796, __s0_796, 3, 2, 1, 0); \ - int32x4_t __rev1_796; __rev1_796 = __builtin_shufflevector(__s1_796, __s1_796, 3, 2, 1, 0); \ - int32x4_t __rev2_796; __rev2_796 = __builtin_shufflevector(__s2_796, __s2_796, 3, 2, 1, 0); \ - __ret_796 = __noswap_vqrdmlahq_s32(__rev0_796, __rev1_796, __noswap_splatq_laneq_s32(__rev2_796, __p3_796)); \ - __ret_796 = __builtin_shufflevector(__ret_796, __ret_796, 3, 2, 1, 0); \ - __ret_796; \ +#define vqrdmlahq_laneq_s32(__p0_769, __p1_769, __p2_769, __p3_769) __extension__ ({ \ + int32x4_t __ret_769; \ + int32x4_t __s0_769 = __p0_769; \ + int32x4_t __s1_769 = __p1_769; \ + int32x4_t __s2_769 = __p2_769; \ + int32x4_t __rev0_769; __rev0_769 = __builtin_shufflevector(__s0_769, __s0_769, 3, 2, 1, 0); \ + int32x4_t __rev1_769; __rev1_769 = __builtin_shufflevector(__s1_769, __s1_769, 3, 2, 1, 0); \ + int32x4_t __rev2_769; __rev2_769 = __builtin_shufflevector(__s2_769, __s2_769, 3, 2, 1, 0); \ + __ret_769 = __noswap_vqrdmlahq_s32(__rev0_769, __rev1_769, __noswap_splatq_laneq_s32(__rev2_769, __p3_769)); \ + __ret_769 = __builtin_shufflevector(__ret_769, __ret_769, 3, 2, 1, 0); \ + __ret_769; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmlahq_laneq_s16(__p0_797, __p1_797, __p2_797, __p3_797) __extension__ ({ \ - int16x8_t __ret_797; \ - int16x8_t __s0_797 = __p0_797; \ - int16x8_t __s1_797 = __p1_797; \ - int16x8_t __s2_797 = __p2_797; \ - __ret_797 = vqrdmlahq_s16(__s0_797, __s1_797, splatq_laneq_s16(__s2_797, __p3_797)); \ - __ret_797; \ +#define vqrdmlahq_laneq_s16(__p0_770, __p1_770, __p2_770, __p3_770) __extension__ ({ \ + int16x8_t __ret_770; \ + int16x8_t __s0_770 = __p0_770; \ + int16x8_t __s1_770 = __p1_770; \ + int16x8_t __s2_770 = __p2_770; \ + __ret_770 = vqrdmlahq_s16(__s0_770, __s1_770, splatq_laneq_s16(__s2_770, __p3_770)); \ + __ret_770; \ }) #else -#define vqrdmlahq_laneq_s16(__p0_798, __p1_798, __p2_798, __p3_798) __extension__ ({ \ - int16x8_t __ret_798; \ - int16x8_t __s0_798 = __p0_798; \ - int16x8_t __s1_798 = __p1_798; \ - int16x8_t __s2_798 = __p2_798; \ - int16x8_t __rev0_798; __rev0_798 = __builtin_shufflevector(__s0_798, __s0_798, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_798; __rev1_798 = __builtin_shufflevector(__s1_798, __s1_798, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev2_798; __rev2_798 = __builtin_shufflevector(__s2_798, __s2_798, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_798 = __noswap_vqrdmlahq_s16(__rev0_798, __rev1_798, __noswap_splatq_laneq_s16(__rev2_798, __p3_798)); \ - __ret_798 = __builtin_shufflevector(__ret_798, __ret_798, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_798; \ +#define vqrdmlahq_laneq_s16(__p0_771, __p1_771, __p2_771, __p3_771) __extension__ ({ \ + int16x8_t __ret_771; \ + int16x8_t __s0_771 = __p0_771; \ + int16x8_t __s1_771 = __p1_771; \ + int16x8_t __s2_771 = __p2_771; \ + int16x8_t __rev0_771; __rev0_771 = __builtin_shufflevector(__s0_771, __s0_771, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_771; __rev1_771 = __builtin_shufflevector(__s1_771, __s1_771, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_771; __rev2_771 = __builtin_shufflevector(__s2_771, __s2_771, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_771 = __noswap_vqrdmlahq_s16(__rev0_771, __rev1_771, __noswap_splatq_laneq_s16(__rev2_771, __p3_771)); \ + __ret_771 = __builtin_shufflevector(__ret_771, __ret_771, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_771; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmlah_laneq_s32(__p0_799, __p1_799, __p2_799, __p3_799) __extension__ ({ \ - int32x2_t __ret_799; \ - int32x2_t __s0_799 = __p0_799; \ - int32x2_t __s1_799 = __p1_799; \ - int32x4_t __s2_799 = __p2_799; \ - __ret_799 = vqrdmlah_s32(__s0_799, __s1_799, splat_laneq_s32(__s2_799, __p3_799)); \ - __ret_799; \ +#define vqrdmlah_laneq_s32(__p0_772, __p1_772, __p2_772, __p3_772) __extension__ ({ \ + int32x2_t __ret_772; \ + int32x2_t __s0_772 = __p0_772; \ + int32x2_t __s1_772 = __p1_772; \ + int32x4_t __s2_772 = __p2_772; \ + __ret_772 = vqrdmlah_s32(__s0_772, __s1_772, splat_laneq_s32(__s2_772, __p3_772)); \ + __ret_772; \ }) #else -#define vqrdmlah_laneq_s32(__p0_800, __p1_800, __p2_800, __p3_800) __extension__ ({ \ - int32x2_t __ret_800; \ - int32x2_t __s0_800 = __p0_800; \ - int32x2_t __s1_800 = __p1_800; \ - int32x4_t __s2_800 = __p2_800; \ - int32x2_t __rev0_800; __rev0_800 = __builtin_shufflevector(__s0_800, __s0_800, 1, 0); \ - int32x2_t __rev1_800; __rev1_800 = __builtin_shufflevector(__s1_800, __s1_800, 1, 0); \ - int32x4_t __rev2_800; __rev2_800 = __builtin_shufflevector(__s2_800, __s2_800, 3, 2, 1, 0); \ - __ret_800 = __noswap_vqrdmlah_s32(__rev0_800, __rev1_800, __noswap_splat_laneq_s32(__rev2_800, __p3_800)); \ - __ret_800 = __builtin_shufflevector(__ret_800, __ret_800, 1, 0); \ - __ret_800; \ +#define vqrdmlah_laneq_s32(__p0_773, __p1_773, __p2_773, __p3_773) __extension__ ({ \ + int32x2_t __ret_773; \ + int32x2_t __s0_773 = __p0_773; \ + int32x2_t __s1_773 = __p1_773; \ + int32x4_t __s2_773 = __p2_773; \ + int32x2_t __rev0_773; __rev0_773 = __builtin_shufflevector(__s0_773, __s0_773, 1, 0); \ + int32x2_t __rev1_773; __rev1_773 = __builtin_shufflevector(__s1_773, __s1_773, 1, 0); \ + int32x4_t __rev2_773; __rev2_773 = __builtin_shufflevector(__s2_773, __s2_773, 3, 2, 1, 0); \ + __ret_773 = __noswap_vqrdmlah_s32(__rev0_773, __rev1_773, __noswap_splat_laneq_s32(__rev2_773, __p3_773)); \ + __ret_773 = __builtin_shufflevector(__ret_773, __ret_773, 1, 0); \ + __ret_773; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmlah_laneq_s16(__p0_801, __p1_801, __p2_801, __p3_801) __extension__ ({ \ - int16x4_t __ret_801; \ - int16x4_t __s0_801 = __p0_801; \ - int16x4_t __s1_801 = __p1_801; \ - int16x8_t __s2_801 = __p2_801; \ - __ret_801 = vqrdmlah_s16(__s0_801, __s1_801, splat_laneq_s16(__s2_801, __p3_801)); \ - __ret_801; \ +#define vqrdmlah_laneq_s16(__p0_774, __p1_774, __p2_774, __p3_774) __extension__ ({ \ + int16x4_t __ret_774; \ + int16x4_t __s0_774 = __p0_774; \ + int16x4_t __s1_774 = __p1_774; \ + int16x8_t __s2_774 = __p2_774; \ + __ret_774 = vqrdmlah_s16(__s0_774, __s1_774, splat_laneq_s16(__s2_774, __p3_774)); \ + __ret_774; \ }) #else -#define vqrdmlah_laneq_s16(__p0_802, __p1_802, __p2_802, __p3_802) __extension__ ({ \ - int16x4_t __ret_802; \ - int16x4_t __s0_802 = __p0_802; \ - int16x4_t __s1_802 = __p1_802; \ - int16x8_t __s2_802 = __p2_802; \ - int16x4_t __rev0_802; __rev0_802 = __builtin_shufflevector(__s0_802, __s0_802, 3, 2, 1, 0); \ - int16x4_t __rev1_802; __rev1_802 = __builtin_shufflevector(__s1_802, __s1_802, 3, 2, 1, 0); \ - int16x8_t __rev2_802; __rev2_802 = __builtin_shufflevector(__s2_802, __s2_802, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_802 = __noswap_vqrdmlah_s16(__rev0_802, __rev1_802, __noswap_splat_laneq_s16(__rev2_802, __p3_802)); \ - __ret_802 = __builtin_shufflevector(__ret_802, __ret_802, 3, 2, 1, 0); \ - __ret_802; \ +#define vqrdmlah_laneq_s16(__p0_775, __p1_775, __p2_775, __p3_775) __extension__ ({ \ + int16x4_t __ret_775; \ + int16x4_t __s0_775 = __p0_775; \ + int16x4_t __s1_775 = __p1_775; \ + int16x8_t __s2_775 = __p2_775; \ + int16x4_t __rev0_775; __rev0_775 = __builtin_shufflevector(__s0_775, __s0_775, 3, 2, 1, 0); \ + int16x4_t __rev1_775; __rev1_775 = __builtin_shufflevector(__s1_775, __s1_775, 3, 2, 1, 0); \ + int16x8_t __rev2_775; __rev2_775 = __builtin_shufflevector(__s2_775, __s2_775, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_775 = __noswap_vqrdmlah_s16(__rev0_775, __rev1_775, __noswap_splat_laneq_s16(__rev2_775, __p3_775)); \ + __ret_775 = __builtin_shufflevector(__ret_775, __ret_775, 3, 2, 1, 0); \ + __ret_775; \ }) #endif -__ai __attribute__((target("v8.1a"))) int32_t vqrdmlshs_s32(int32_t __p0, int32_t __p1, int32_t __p2) { +__ai __attribute__((target("v8.1a,neon"))) int32_t vqrdmlshs_s32(int32_t __p0, int32_t __p1, int32_t __p2) { int32_t __ret; __ret = (int32_t) __builtin_neon_vqrdmlshs_s32(__p0, __p1, __p2); return __ret; } -__ai __attribute__((target("v8.1a"))) int16_t vqrdmlshh_s16(int16_t __p0, int16_t __p1, int16_t __p2) { +__ai __attribute__((target("v8.1a,neon"))) int16_t vqrdmlshh_s16(int16_t __p0, int16_t __p1, int16_t __p2) { int16_t __ret; __ret = (int16_t) __builtin_neon_vqrdmlshh_s16(__p0, __p1, __p2); return __ret; } #ifdef __LITTLE_ENDIAN__ -#define vqrdmlshs_lane_s32(__p0_803, __p1_803, __p2_803, __p3_803) __extension__ ({ \ - int32_t __ret_803; \ - int32_t __s0_803 = __p0_803; \ - int32_t __s1_803 = __p1_803; \ - int32x2_t __s2_803 = __p2_803; \ - __ret_803 = vqrdmlshs_s32(__s0_803, __s1_803, vget_lane_s32(__s2_803, __p3_803)); \ - __ret_803; \ +#define vqrdmlshs_lane_s32(__p0_776, __p1_776, __p2_776, __p3_776) __extension__ ({ \ + int32_t __ret_776; \ + int32_t __s0_776 = __p0_776; \ + int32_t __s1_776 = __p1_776; \ + int32x2_t __s2_776 = __p2_776; \ + __ret_776 = vqrdmlshs_s32(__s0_776, __s1_776, vget_lane_s32(__s2_776, __p3_776)); \ + __ret_776; \ }) #else -#define vqrdmlshs_lane_s32(__p0_804, __p1_804, __p2_804, __p3_804) __extension__ ({ \ - int32_t __ret_804; \ - int32_t __s0_804 = __p0_804; \ - int32_t __s1_804 = __p1_804; \ - int32x2_t __s2_804 = __p2_804; \ - int32x2_t __rev2_804; __rev2_804 = __builtin_shufflevector(__s2_804, __s2_804, 1, 0); \ - __ret_804 = vqrdmlshs_s32(__s0_804, __s1_804, __noswap_vget_lane_s32(__rev2_804, __p3_804)); \ - __ret_804; \ +#define vqrdmlshs_lane_s32(__p0_777, __p1_777, __p2_777, __p3_777) __extension__ ({ \ + int32_t __ret_777; \ + int32_t __s0_777 = __p0_777; \ + int32_t __s1_777 = __p1_777; \ + int32x2_t __s2_777 = __p2_777; \ + int32x2_t __rev2_777; __rev2_777 = __builtin_shufflevector(__s2_777, __s2_777, 1, 0); \ + __ret_777 = vqrdmlshs_s32(__s0_777, __s1_777, __noswap_vget_lane_s32(__rev2_777, __p3_777)); \ + __ret_777; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmlshh_lane_s16(__p0_805, __p1_805, __p2_805, __p3_805) __extension__ ({ \ - int16_t __ret_805; \ - int16_t __s0_805 = __p0_805; \ - int16_t __s1_805 = __p1_805; \ - int16x4_t __s2_805 = __p2_805; \ - __ret_805 = vqrdmlshh_s16(__s0_805, __s1_805, vget_lane_s16(__s2_805, __p3_805)); \ - __ret_805; \ +#define vqrdmlshh_lane_s16(__p0_778, __p1_778, __p2_778, __p3_778) __extension__ ({ \ + int16_t __ret_778; \ + int16_t __s0_778 = __p0_778; \ + int16_t __s1_778 = __p1_778; \ + int16x4_t __s2_778 = __p2_778; \ + __ret_778 = vqrdmlshh_s16(__s0_778, __s1_778, vget_lane_s16(__s2_778, __p3_778)); \ + __ret_778; \ }) #else -#define vqrdmlshh_lane_s16(__p0_806, __p1_806, __p2_806, __p3_806) __extension__ ({ \ - int16_t __ret_806; \ - int16_t __s0_806 = __p0_806; \ - int16_t __s1_806 = __p1_806; \ - int16x4_t __s2_806 = __p2_806; \ - int16x4_t __rev2_806; __rev2_806 = __builtin_shufflevector(__s2_806, __s2_806, 3, 2, 1, 0); \ - __ret_806 = vqrdmlshh_s16(__s0_806, __s1_806, __noswap_vget_lane_s16(__rev2_806, __p3_806)); \ - __ret_806; \ +#define vqrdmlshh_lane_s16(__p0_779, __p1_779, __p2_779, __p3_779) __extension__ ({ \ + int16_t __ret_779; \ + int16_t __s0_779 = __p0_779; \ + int16_t __s1_779 = __p1_779; \ + int16x4_t __s2_779 = __p2_779; \ + int16x4_t __rev2_779; __rev2_779 = __builtin_shufflevector(__s2_779, __s2_779, 3, 2, 1, 0); \ + __ret_779 = vqrdmlshh_s16(__s0_779, __s1_779, __noswap_vget_lane_s16(__rev2_779, __p3_779)); \ + __ret_779; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmlshs_laneq_s32(__p0_807, __p1_807, __p2_807, __p3_807) __extension__ ({ \ - int32_t __ret_807; \ - int32_t __s0_807 = __p0_807; \ - int32_t __s1_807 = __p1_807; \ - int32x4_t __s2_807 = __p2_807; \ - __ret_807 = vqrdmlshs_s32(__s0_807, __s1_807, vgetq_lane_s32(__s2_807, __p3_807)); \ - __ret_807; \ +#define vqrdmlshs_laneq_s32(__p0_780, __p1_780, __p2_780, __p3_780) __extension__ ({ \ + int32_t __ret_780; \ + int32_t __s0_780 = __p0_780; \ + int32_t __s1_780 = __p1_780; \ + int32x4_t __s2_780 = __p2_780; \ + __ret_780 = vqrdmlshs_s32(__s0_780, __s1_780, vgetq_lane_s32(__s2_780, __p3_780)); \ + __ret_780; \ }) #else -#define vqrdmlshs_laneq_s32(__p0_808, __p1_808, __p2_808, __p3_808) __extension__ ({ \ - int32_t __ret_808; \ - int32_t __s0_808 = __p0_808; \ - int32_t __s1_808 = __p1_808; \ - int32x4_t __s2_808 = __p2_808; \ - int32x4_t __rev2_808; __rev2_808 = __builtin_shufflevector(__s2_808, __s2_808, 3, 2, 1, 0); \ - __ret_808 = vqrdmlshs_s32(__s0_808, __s1_808, __noswap_vgetq_lane_s32(__rev2_808, __p3_808)); \ - __ret_808; \ +#define vqrdmlshs_laneq_s32(__p0_781, __p1_781, __p2_781, __p3_781) __extension__ ({ \ + int32_t __ret_781; \ + int32_t __s0_781 = __p0_781; \ + int32_t __s1_781 = __p1_781; \ + int32x4_t __s2_781 = __p2_781; \ + int32x4_t __rev2_781; __rev2_781 = __builtin_shufflevector(__s2_781, __s2_781, 3, 2, 1, 0); \ + __ret_781 = vqrdmlshs_s32(__s0_781, __s1_781, __noswap_vgetq_lane_s32(__rev2_781, __p3_781)); \ + __ret_781; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmlshh_laneq_s16(__p0_809, __p1_809, __p2_809, __p3_809) __extension__ ({ \ - int16_t __ret_809; \ - int16_t __s0_809 = __p0_809; \ - int16_t __s1_809 = __p1_809; \ - int16x8_t __s2_809 = __p2_809; \ - __ret_809 = vqrdmlshh_s16(__s0_809, __s1_809, vgetq_lane_s16(__s2_809, __p3_809)); \ - __ret_809; \ +#define vqrdmlshh_laneq_s16(__p0_782, __p1_782, __p2_782, __p3_782) __extension__ ({ \ + int16_t __ret_782; \ + int16_t __s0_782 = __p0_782; \ + int16_t __s1_782 = __p1_782; \ + int16x8_t __s2_782 = __p2_782; \ + __ret_782 = vqrdmlshh_s16(__s0_782, __s1_782, vgetq_lane_s16(__s2_782, __p3_782)); \ + __ret_782; \ }) #else -#define vqrdmlshh_laneq_s16(__p0_810, __p1_810, __p2_810, __p3_810) __extension__ ({ \ - int16_t __ret_810; \ - int16_t __s0_810 = __p0_810; \ - int16_t __s1_810 = __p1_810; \ - int16x8_t __s2_810 = __p2_810; \ - int16x8_t __rev2_810; __rev2_810 = __builtin_shufflevector(__s2_810, __s2_810, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_810 = vqrdmlshh_s16(__s0_810, __s1_810, __noswap_vgetq_lane_s16(__rev2_810, __p3_810)); \ - __ret_810; \ +#define vqrdmlshh_laneq_s16(__p0_783, __p1_783, __p2_783, __p3_783) __extension__ ({ \ + int16_t __ret_783; \ + int16_t __s0_783 = __p0_783; \ + int16_t __s1_783 = __p1_783; \ + int16x8_t __s2_783 = __p2_783; \ + int16x8_t __rev2_783; __rev2_783 = __builtin_shufflevector(__s2_783, __s2_783, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_783 = vqrdmlshh_s16(__s0_783, __s1_783, __noswap_vgetq_lane_s16(__rev2_783, __p3_783)); \ + __ret_783; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmlshq_laneq_s32(__p0_811, __p1_811, __p2_811, __p3_811) __extension__ ({ \ - int32x4_t __ret_811; \ - int32x4_t __s0_811 = __p0_811; \ - int32x4_t __s1_811 = __p1_811; \ - int32x4_t __s2_811 = __p2_811; \ - __ret_811 = vqrdmlshq_s32(__s0_811, __s1_811, splatq_laneq_s32(__s2_811, __p3_811)); \ - __ret_811; \ +#define vqrdmlshq_laneq_s32(__p0_784, __p1_784, __p2_784, __p3_784) __extension__ ({ \ + int32x4_t __ret_784; \ + int32x4_t __s0_784 = __p0_784; \ + int32x4_t __s1_784 = __p1_784; \ + int32x4_t __s2_784 = __p2_784; \ + __ret_784 = vqrdmlshq_s32(__s0_784, __s1_784, splatq_laneq_s32(__s2_784, __p3_784)); \ + __ret_784; \ }) #else -#define vqrdmlshq_laneq_s32(__p0_812, __p1_812, __p2_812, __p3_812) __extension__ ({ \ - int32x4_t __ret_812; \ - int32x4_t __s0_812 = __p0_812; \ - int32x4_t __s1_812 = __p1_812; \ - int32x4_t __s2_812 = __p2_812; \ - int32x4_t __rev0_812; __rev0_812 = __builtin_shufflevector(__s0_812, __s0_812, 3, 2, 1, 0); \ - int32x4_t __rev1_812; __rev1_812 = __builtin_shufflevector(__s1_812, __s1_812, 3, 2, 1, 0); \ - int32x4_t __rev2_812; __rev2_812 = __builtin_shufflevector(__s2_812, __s2_812, 3, 2, 1, 0); \ - __ret_812 = __noswap_vqrdmlshq_s32(__rev0_812, __rev1_812, __noswap_splatq_laneq_s32(__rev2_812, __p3_812)); \ - __ret_812 = __builtin_shufflevector(__ret_812, __ret_812, 3, 2, 1, 0); \ - __ret_812; \ +#define vqrdmlshq_laneq_s32(__p0_785, __p1_785, __p2_785, __p3_785) __extension__ ({ \ + int32x4_t __ret_785; \ + int32x4_t __s0_785 = __p0_785; \ + int32x4_t __s1_785 = __p1_785; \ + int32x4_t __s2_785 = __p2_785; \ + int32x4_t __rev0_785; __rev0_785 = __builtin_shufflevector(__s0_785, __s0_785, 3, 2, 1, 0); \ + int32x4_t __rev1_785; __rev1_785 = __builtin_shufflevector(__s1_785, __s1_785, 3, 2, 1, 0); \ + int32x4_t __rev2_785; __rev2_785 = __builtin_shufflevector(__s2_785, __s2_785, 3, 2, 1, 0); \ + __ret_785 = __noswap_vqrdmlshq_s32(__rev0_785, __rev1_785, __noswap_splatq_laneq_s32(__rev2_785, __p3_785)); \ + __ret_785 = __builtin_shufflevector(__ret_785, __ret_785, 3, 2, 1, 0); \ + __ret_785; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmlshq_laneq_s16(__p0_813, __p1_813, __p2_813, __p3_813) __extension__ ({ \ - int16x8_t __ret_813; \ - int16x8_t __s0_813 = __p0_813; \ - int16x8_t __s1_813 = __p1_813; \ - int16x8_t __s2_813 = __p2_813; \ - __ret_813 = vqrdmlshq_s16(__s0_813, __s1_813, splatq_laneq_s16(__s2_813, __p3_813)); \ - __ret_813; \ +#define vqrdmlshq_laneq_s16(__p0_786, __p1_786, __p2_786, __p3_786) __extension__ ({ \ + int16x8_t __ret_786; \ + int16x8_t __s0_786 = __p0_786; \ + int16x8_t __s1_786 = __p1_786; \ + int16x8_t __s2_786 = __p2_786; \ + __ret_786 = vqrdmlshq_s16(__s0_786, __s1_786, splatq_laneq_s16(__s2_786, __p3_786)); \ + __ret_786; \ }) #else -#define vqrdmlshq_laneq_s16(__p0_814, __p1_814, __p2_814, __p3_814) __extension__ ({ \ - int16x8_t __ret_814; \ - int16x8_t __s0_814 = __p0_814; \ - int16x8_t __s1_814 = __p1_814; \ - int16x8_t __s2_814 = __p2_814; \ - int16x8_t __rev0_814; __rev0_814 = __builtin_shufflevector(__s0_814, __s0_814, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev1_814; __rev1_814 = __builtin_shufflevector(__s1_814, __s1_814, 7, 6, 5, 4, 3, 2, 1, 0); \ - int16x8_t __rev2_814; __rev2_814 = __builtin_shufflevector(__s2_814, __s2_814, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_814 = __noswap_vqrdmlshq_s16(__rev0_814, __rev1_814, __noswap_splatq_laneq_s16(__rev2_814, __p3_814)); \ - __ret_814 = __builtin_shufflevector(__ret_814, __ret_814, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_814; \ +#define vqrdmlshq_laneq_s16(__p0_787, __p1_787, __p2_787, __p3_787) __extension__ ({ \ + int16x8_t __ret_787; \ + int16x8_t __s0_787 = __p0_787; \ + int16x8_t __s1_787 = __p1_787; \ + int16x8_t __s2_787 = __p2_787; \ + int16x8_t __rev0_787; __rev0_787 = __builtin_shufflevector(__s0_787, __s0_787, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev1_787; __rev1_787 = __builtin_shufflevector(__s1_787, __s1_787, 7, 6, 5, 4, 3, 2, 1, 0); \ + int16x8_t __rev2_787; __rev2_787 = __builtin_shufflevector(__s2_787, __s2_787, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_787 = __noswap_vqrdmlshq_s16(__rev0_787, __rev1_787, __noswap_splatq_laneq_s16(__rev2_787, __p3_787)); \ + __ret_787 = __builtin_shufflevector(__ret_787, __ret_787, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_787; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmlsh_laneq_s32(__p0_815, __p1_815, __p2_815, __p3_815) __extension__ ({ \ - int32x2_t __ret_815; \ - int32x2_t __s0_815 = __p0_815; \ - int32x2_t __s1_815 = __p1_815; \ - int32x4_t __s2_815 = __p2_815; \ - __ret_815 = vqrdmlsh_s32(__s0_815, __s1_815, splat_laneq_s32(__s2_815, __p3_815)); \ - __ret_815; \ +#define vqrdmlsh_laneq_s32(__p0_788, __p1_788, __p2_788, __p3_788) __extension__ ({ \ + int32x2_t __ret_788; \ + int32x2_t __s0_788 = __p0_788; \ + int32x2_t __s1_788 = __p1_788; \ + int32x4_t __s2_788 = __p2_788; \ + __ret_788 = vqrdmlsh_s32(__s0_788, __s1_788, splat_laneq_s32(__s2_788, __p3_788)); \ + __ret_788; \ }) #else -#define vqrdmlsh_laneq_s32(__p0_816, __p1_816, __p2_816, __p3_816) __extension__ ({ \ - int32x2_t __ret_816; \ - int32x2_t __s0_816 = __p0_816; \ - int32x2_t __s1_816 = __p1_816; \ - int32x4_t __s2_816 = __p2_816; \ - int32x2_t __rev0_816; __rev0_816 = __builtin_shufflevector(__s0_816, __s0_816, 1, 0); \ - int32x2_t __rev1_816; __rev1_816 = __builtin_shufflevector(__s1_816, __s1_816, 1, 0); \ - int32x4_t __rev2_816; __rev2_816 = __builtin_shufflevector(__s2_816, __s2_816, 3, 2, 1, 0); \ - __ret_816 = __noswap_vqrdmlsh_s32(__rev0_816, __rev1_816, __noswap_splat_laneq_s32(__rev2_816, __p3_816)); \ - __ret_816 = __builtin_shufflevector(__ret_816, __ret_816, 1, 0); \ - __ret_816; \ +#define vqrdmlsh_laneq_s32(__p0_789, __p1_789, __p2_789, __p3_789) __extension__ ({ \ + int32x2_t __ret_789; \ + int32x2_t __s0_789 = __p0_789; \ + int32x2_t __s1_789 = __p1_789; \ + int32x4_t __s2_789 = __p2_789; \ + int32x2_t __rev0_789; __rev0_789 = __builtin_shufflevector(__s0_789, __s0_789, 1, 0); \ + int32x2_t __rev1_789; __rev1_789 = __builtin_shufflevector(__s1_789, __s1_789, 1, 0); \ + int32x4_t __rev2_789; __rev2_789 = __builtin_shufflevector(__s2_789, __s2_789, 3, 2, 1, 0); \ + __ret_789 = __noswap_vqrdmlsh_s32(__rev0_789, __rev1_789, __noswap_splat_laneq_s32(__rev2_789, __p3_789)); \ + __ret_789 = __builtin_shufflevector(__ret_789, __ret_789, 1, 0); \ + __ret_789; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vqrdmlsh_laneq_s16(__p0_817, __p1_817, __p2_817, __p3_817) __extension__ ({ \ - int16x4_t __ret_817; \ - int16x4_t __s0_817 = __p0_817; \ - int16x4_t __s1_817 = __p1_817; \ - int16x8_t __s2_817 = __p2_817; \ - __ret_817 = vqrdmlsh_s16(__s0_817, __s1_817, splat_laneq_s16(__s2_817, __p3_817)); \ - __ret_817; \ +#define vqrdmlsh_laneq_s16(__p0_790, __p1_790, __p2_790, __p3_790) __extension__ ({ \ + int16x4_t __ret_790; \ + int16x4_t __s0_790 = __p0_790; \ + int16x4_t __s1_790 = __p1_790; \ + int16x8_t __s2_790 = __p2_790; \ + __ret_790 = vqrdmlsh_s16(__s0_790, __s1_790, splat_laneq_s16(__s2_790, __p3_790)); \ + __ret_790; \ }) #else -#define vqrdmlsh_laneq_s16(__p0_818, __p1_818, __p2_818, __p3_818) __extension__ ({ \ - int16x4_t __ret_818; \ - int16x4_t __s0_818 = __p0_818; \ - int16x4_t __s1_818 = __p1_818; \ - int16x8_t __s2_818 = __p2_818; \ - int16x4_t __rev0_818; __rev0_818 = __builtin_shufflevector(__s0_818, __s0_818, 3, 2, 1, 0); \ - int16x4_t __rev1_818; __rev1_818 = __builtin_shufflevector(__s1_818, __s1_818, 3, 2, 1, 0); \ - int16x8_t __rev2_818; __rev2_818 = __builtin_shufflevector(__s2_818, __s2_818, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_818 = __noswap_vqrdmlsh_s16(__rev0_818, __rev1_818, __noswap_splat_laneq_s16(__rev2_818, __p3_818)); \ - __ret_818 = __builtin_shufflevector(__ret_818, __ret_818, 3, 2, 1, 0); \ - __ret_818; \ +#define vqrdmlsh_laneq_s16(__p0_791, __p1_791, __p2_791, __p3_791) __extension__ ({ \ + int16x4_t __ret_791; \ + int16x4_t __s0_791 = __p0_791; \ + int16x4_t __s1_791 = __p1_791; \ + int16x8_t __s2_791 = __p2_791; \ + int16x4_t __rev0_791; __rev0_791 = __builtin_shufflevector(__s0_791, __s0_791, 3, 2, 1, 0); \ + int16x4_t __rev1_791; __rev1_791 = __builtin_shufflevector(__s1_791, __s1_791, 3, 2, 1, 0); \ + int16x8_t __rev2_791; __rev2_791 = __builtin_shufflevector(__s2_791, __s2_791, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_791 = __noswap_vqrdmlsh_s16(__rev0_791, __rev1_791, __noswap_splat_laneq_s16(__rev2_791, __p3_791)); \ + __ret_791 = __builtin_shufflevector(__ret_791, __ret_791, 3, 2, 1, 0); \ + __ret_791; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a"))) float64x2_t vcaddq_rot270_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("v8.3a,neon"))) float64x2_t vcaddq_rot270_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vcaddq_rot270_f64((int8x16_t)__p0, (int8x16_t)__p1, 42); return __ret; } #else -__ai __attribute__((target("v8.3a"))) float64x2_t vcaddq_rot270_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("v8.3a,neon"))) float64x2_t vcaddq_rot270_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -65754,13 +65584,13 @@ __ai __attribute__((target("v8.3a"))) float64x2_t vcaddq_rot270_f64(float64x2_t #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a"))) float64x2_t vcaddq_rot90_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("v8.3a,neon"))) float64x2_t vcaddq_rot90_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vcaddq_rot90_f64((int8x16_t)__p0, (int8x16_t)__p1, 42); return __ret; } #else -__ai __attribute__((target("v8.3a"))) float64x2_t vcaddq_rot90_f64(float64x2_t __p0, float64x2_t __p1) { +__ai __attribute__((target("v8.3a,neon"))) float64x2_t vcaddq_rot90_f64(float64x2_t __p0, float64x2_t __p1) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -65771,13 +65601,13 @@ __ai __attribute__((target("v8.3a"))) float64x2_t vcaddq_rot90_f64(float64x2_t _ #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a"))) float64x2_t vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { +__ai __attribute__((target("v8.3a,neon"))) float64x2_t vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vcmlaq_f64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); return __ret; } #else -__ai __attribute__((target("v8.3a"))) float64x2_t vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { +__ai __attribute__((target("v8.3a,neon"))) float64x2_t vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -65786,116 +65616,116 @@ __ai __attribute__((target("v8.3a"))) float64x2_t vcmlaq_f64(float64x2_t __p0, f __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai __attribute__((target("v8.3a"))) float64x2_t __noswap_vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { +__ai __attribute__((target("v8.3a,neon"))) float64x2_t __noswap_vcmlaq_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vcmlaq_f64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); return __ret; } #endif -__ai __attribute__((target("v8.3a"))) float64x1_t vcmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { +__ai __attribute__((target("v8.3a,neon"))) float64x1_t vcmla_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vcmla_f64((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); return __ret; } -#define vcmla_lane_f64(__p0_819, __p1_819, __p2_819, __p3_819) __extension__ ({ \ - float64x1_t __ret_819; \ - float64x1_t __s0_819 = __p0_819; \ - float64x1_t __s1_819 = __p1_819; \ - float64x1_t __s2_819 = __p2_819; \ -float64x1_t __reint_819 = __s2_819; \ -uint64x2_t __reint1_819 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_819, __p3_819), vgetq_lane_u64(*(uint64x2_t *) &__reint_819, __p3_819)}; \ - __ret_819 = vcmla_f64(__s0_819, __s1_819, *(float64x1_t *) &__reint1_819); \ - __ret_819; \ +#define vcmla_lane_f64(__p0_792, __p1_792, __p2_792, __p3_792) __extension__ ({ \ + float64x1_t __ret_792; \ + float64x1_t __s0_792 = __p0_792; \ + float64x1_t __s1_792 = __p1_792; \ + float64x1_t __s2_792 = __p2_792; \ +float64x1_t __reint_792 = __s2_792; \ +uint64x2_t __reint1_792 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_792, __p3_792), vgetq_lane_u64(*(uint64x2_t *) &__reint_792, __p3_792)}; \ + __ret_792 = vcmla_f64(__s0_792, __s1_792, *(float64x1_t *) &__reint1_792); \ + __ret_792; \ }) #ifdef __LITTLE_ENDIAN__ -#define vcmlaq_lane_f64(__p0_820, __p1_820, __p2_820, __p3_820) __extension__ ({ \ - float64x2_t __ret_820; \ - float64x2_t __s0_820 = __p0_820; \ - float64x2_t __s1_820 = __p1_820; \ - float64x1_t __s2_820 = __p2_820; \ -float64x1_t __reint_820 = __s2_820; \ -uint64x2_t __reint1_820 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_820, __p3_820), vgetq_lane_u64(*(uint64x2_t *) &__reint_820, __p3_820)}; \ - __ret_820 = vcmlaq_f64(__s0_820, __s1_820, *(float64x2_t *) &__reint1_820); \ - __ret_820; \ +#define vcmlaq_lane_f64(__p0_793, __p1_793, __p2_793, __p3_793) __extension__ ({ \ + float64x2_t __ret_793; \ + float64x2_t __s0_793 = __p0_793; \ + float64x2_t __s1_793 = __p1_793; \ + float64x1_t __s2_793 = __p2_793; \ +float64x1_t __reint_793 = __s2_793; \ +uint64x2_t __reint1_793 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_793, __p3_793), vgetq_lane_u64(*(uint64x2_t *) &__reint_793, __p3_793)}; \ + __ret_793 = vcmlaq_f64(__s0_793, __s1_793, *(float64x2_t *) &__reint1_793); \ + __ret_793; \ }) #else -#define vcmlaq_lane_f64(__p0_821, __p1_821, __p2_821, __p3_821) __extension__ ({ \ - float64x2_t __ret_821; \ - float64x2_t __s0_821 = __p0_821; \ - float64x2_t __s1_821 = __p1_821; \ - float64x1_t __s2_821 = __p2_821; \ - float64x2_t __rev0_821; __rev0_821 = __builtin_shufflevector(__s0_821, __s0_821, 1, 0); \ - float64x2_t __rev1_821; __rev1_821 = __builtin_shufflevector(__s1_821, __s1_821, 1, 0); \ -float64x1_t __reint_821 = __s2_821; \ -uint64x2_t __reint1_821 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_821, __p3_821), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_821, __p3_821)}; \ - __ret_821 = __noswap_vcmlaq_f64(__rev0_821, __rev1_821, *(float64x2_t *) &__reint1_821); \ - __ret_821 = __builtin_shufflevector(__ret_821, __ret_821, 1, 0); \ - __ret_821; \ +#define vcmlaq_lane_f64(__p0_794, __p1_794, __p2_794, __p3_794) __extension__ ({ \ + float64x2_t __ret_794; \ + float64x2_t __s0_794 = __p0_794; \ + float64x2_t __s1_794 = __p1_794; \ + float64x1_t __s2_794 = __p2_794; \ + float64x2_t __rev0_794; __rev0_794 = __builtin_shufflevector(__s0_794, __s0_794, 1, 0); \ + float64x2_t __rev1_794; __rev1_794 = __builtin_shufflevector(__s1_794, __s1_794, 1, 0); \ +float64x1_t __reint_794 = __s2_794; \ +uint64x2_t __reint1_794 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_794, __p3_794), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_794, __p3_794)}; \ + __ret_794 = __noswap_vcmlaq_f64(__rev0_794, __rev1_794, *(float64x2_t *) &__reint1_794); \ + __ret_794 = __builtin_shufflevector(__ret_794, __ret_794, 1, 0); \ + __ret_794; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcmla_laneq_f64(__p0_822, __p1_822, __p2_822, __p3_822) __extension__ ({ \ - float64x1_t __ret_822; \ - float64x1_t __s0_822 = __p0_822; \ - float64x1_t __s1_822 = __p1_822; \ - float64x2_t __s2_822 = __p2_822; \ -float64x2_t __reint_822 = __s2_822; \ -uint64x2_t __reint1_822 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_822, __p3_822), vgetq_lane_u64(*(uint64x2_t *) &__reint_822, __p3_822)}; \ - __ret_822 = vcmla_f64(__s0_822, __s1_822, *(float64x1_t *) &__reint1_822); \ - __ret_822; \ +#define vcmla_laneq_f64(__p0_795, __p1_795, __p2_795, __p3_795) __extension__ ({ \ + float64x1_t __ret_795; \ + float64x1_t __s0_795 = __p0_795; \ + float64x1_t __s1_795 = __p1_795; \ + float64x2_t __s2_795 = __p2_795; \ +float64x2_t __reint_795 = __s2_795; \ +uint64x2_t __reint1_795 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_795, __p3_795), vgetq_lane_u64(*(uint64x2_t *) &__reint_795, __p3_795)}; \ + __ret_795 = vcmla_f64(__s0_795, __s1_795, *(float64x1_t *) &__reint1_795); \ + __ret_795; \ }) #else -#define vcmla_laneq_f64(__p0_823, __p1_823, __p2_823, __p3_823) __extension__ ({ \ - float64x1_t __ret_823; \ - float64x1_t __s0_823 = __p0_823; \ - float64x1_t __s1_823 = __p1_823; \ - float64x2_t __s2_823 = __p2_823; \ - float64x2_t __rev2_823; __rev2_823 = __builtin_shufflevector(__s2_823, __s2_823, 1, 0); \ -float64x2_t __reint_823 = __rev2_823; \ -uint64x2_t __reint1_823 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_823, __p3_823), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_823, __p3_823)}; \ - __ret_823 = vcmla_f64(__s0_823, __s1_823, *(float64x1_t *) &__reint1_823); \ - __ret_823; \ +#define vcmla_laneq_f64(__p0_796, __p1_796, __p2_796, __p3_796) __extension__ ({ \ + float64x1_t __ret_796; \ + float64x1_t __s0_796 = __p0_796; \ + float64x1_t __s1_796 = __p1_796; \ + float64x2_t __s2_796 = __p2_796; \ + float64x2_t __rev2_796; __rev2_796 = __builtin_shufflevector(__s2_796, __s2_796, 1, 0); \ +float64x2_t __reint_796 = __rev2_796; \ +uint64x2_t __reint1_796 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_796, __p3_796), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_796, __p3_796)}; \ + __ret_796 = vcmla_f64(__s0_796, __s1_796, *(float64x1_t *) &__reint1_796); \ + __ret_796; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcmlaq_laneq_f64(__p0_824, __p1_824, __p2_824, __p3_824) __extension__ ({ \ - float64x2_t __ret_824; \ - float64x2_t __s0_824 = __p0_824; \ - float64x2_t __s1_824 = __p1_824; \ - float64x2_t __s2_824 = __p2_824; \ -float64x2_t __reint_824 = __s2_824; \ -uint64x2_t __reint1_824 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_824, __p3_824), vgetq_lane_u64(*(uint64x2_t *) &__reint_824, __p3_824)}; \ - __ret_824 = vcmlaq_f64(__s0_824, __s1_824, *(float64x2_t *) &__reint1_824); \ - __ret_824; \ +#define vcmlaq_laneq_f64(__p0_797, __p1_797, __p2_797, __p3_797) __extension__ ({ \ + float64x2_t __ret_797; \ + float64x2_t __s0_797 = __p0_797; \ + float64x2_t __s1_797 = __p1_797; \ + float64x2_t __s2_797 = __p2_797; \ +float64x2_t __reint_797 = __s2_797; \ +uint64x2_t __reint1_797 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_797, __p3_797), vgetq_lane_u64(*(uint64x2_t *) &__reint_797, __p3_797)}; \ + __ret_797 = vcmlaq_f64(__s0_797, __s1_797, *(float64x2_t *) &__reint1_797); \ + __ret_797; \ }) #else -#define vcmlaq_laneq_f64(__p0_825, __p1_825, __p2_825, __p3_825) __extension__ ({ \ - float64x2_t __ret_825; \ - float64x2_t __s0_825 = __p0_825; \ - float64x2_t __s1_825 = __p1_825; \ - float64x2_t __s2_825 = __p2_825; \ - float64x2_t __rev0_825; __rev0_825 = __builtin_shufflevector(__s0_825, __s0_825, 1, 0); \ - float64x2_t __rev1_825; __rev1_825 = __builtin_shufflevector(__s1_825, __s1_825, 1, 0); \ - float64x2_t __rev2_825; __rev2_825 = __builtin_shufflevector(__s2_825, __s2_825, 1, 0); \ -float64x2_t __reint_825 = __rev2_825; \ -uint64x2_t __reint1_825 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_825, __p3_825), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_825, __p3_825)}; \ - __ret_825 = __noswap_vcmlaq_f64(__rev0_825, __rev1_825, *(float64x2_t *) &__reint1_825); \ - __ret_825 = __builtin_shufflevector(__ret_825, __ret_825, 1, 0); \ - __ret_825; \ +#define vcmlaq_laneq_f64(__p0_798, __p1_798, __p2_798, __p3_798) __extension__ ({ \ + float64x2_t __ret_798; \ + float64x2_t __s0_798 = __p0_798; \ + float64x2_t __s1_798 = __p1_798; \ + float64x2_t __s2_798 = __p2_798; \ + float64x2_t __rev0_798; __rev0_798 = __builtin_shufflevector(__s0_798, __s0_798, 1, 0); \ + float64x2_t __rev1_798; __rev1_798 = __builtin_shufflevector(__s1_798, __s1_798, 1, 0); \ + float64x2_t __rev2_798; __rev2_798 = __builtin_shufflevector(__s2_798, __s2_798, 1, 0); \ +float64x2_t __reint_798 = __rev2_798; \ +uint64x2_t __reint1_798 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_798, __p3_798), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_798, __p3_798)}; \ + __ret_798 = __noswap_vcmlaq_f64(__rev0_798, __rev1_798, *(float64x2_t *) &__reint1_798); \ + __ret_798 = __builtin_shufflevector(__ret_798, __ret_798, 1, 0); \ + __ret_798; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a"))) float64x2_t vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { +__ai __attribute__((target("v8.3a,neon"))) float64x2_t vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vcmlaq_rot180_f64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); return __ret; } #else -__ai __attribute__((target("v8.3a"))) float64x2_t vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { +__ai __attribute__((target("v8.3a,neon"))) float64x2_t vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -65904,116 +65734,116 @@ __ai __attribute__((target("v8.3a"))) float64x2_t vcmlaq_rot180_f64(float64x2_t __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai __attribute__((target("v8.3a"))) float64x2_t __noswap_vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { +__ai __attribute__((target("v8.3a,neon"))) float64x2_t __noswap_vcmlaq_rot180_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vcmlaq_rot180_f64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); return __ret; } #endif -__ai __attribute__((target("v8.3a"))) float64x1_t vcmla_rot180_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { +__ai __attribute__((target("v8.3a,neon"))) float64x1_t vcmla_rot180_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vcmla_rot180_f64((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); return __ret; } -#define vcmla_rot180_lane_f64(__p0_826, __p1_826, __p2_826, __p3_826) __extension__ ({ \ - float64x1_t __ret_826; \ - float64x1_t __s0_826 = __p0_826; \ - float64x1_t __s1_826 = __p1_826; \ - float64x1_t __s2_826 = __p2_826; \ -float64x1_t __reint_826 = __s2_826; \ -uint64x2_t __reint1_826 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_826, __p3_826), vgetq_lane_u64(*(uint64x2_t *) &__reint_826, __p3_826)}; \ - __ret_826 = vcmla_rot180_f64(__s0_826, __s1_826, *(float64x1_t *) &__reint1_826); \ - __ret_826; \ +#define vcmla_rot180_lane_f64(__p0_799, __p1_799, __p2_799, __p3_799) __extension__ ({ \ + float64x1_t __ret_799; \ + float64x1_t __s0_799 = __p0_799; \ + float64x1_t __s1_799 = __p1_799; \ + float64x1_t __s2_799 = __p2_799; \ +float64x1_t __reint_799 = __s2_799; \ +uint64x2_t __reint1_799 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_799, __p3_799), vgetq_lane_u64(*(uint64x2_t *) &__reint_799, __p3_799)}; \ + __ret_799 = vcmla_rot180_f64(__s0_799, __s1_799, *(float64x1_t *) &__reint1_799); \ + __ret_799; \ }) #ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot180_lane_f64(__p0_827, __p1_827, __p2_827, __p3_827) __extension__ ({ \ - float64x2_t __ret_827; \ - float64x2_t __s0_827 = __p0_827; \ - float64x2_t __s1_827 = __p1_827; \ - float64x1_t __s2_827 = __p2_827; \ -float64x1_t __reint_827 = __s2_827; \ -uint64x2_t __reint1_827 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_827, __p3_827), vgetq_lane_u64(*(uint64x2_t *) &__reint_827, __p3_827)}; \ - __ret_827 = vcmlaq_rot180_f64(__s0_827, __s1_827, *(float64x2_t *) &__reint1_827); \ - __ret_827; \ +#define vcmlaq_rot180_lane_f64(__p0_800, __p1_800, __p2_800, __p3_800) __extension__ ({ \ + float64x2_t __ret_800; \ + float64x2_t __s0_800 = __p0_800; \ + float64x2_t __s1_800 = __p1_800; \ + float64x1_t __s2_800 = __p2_800; \ +float64x1_t __reint_800 = __s2_800; \ +uint64x2_t __reint1_800 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_800, __p3_800), vgetq_lane_u64(*(uint64x2_t *) &__reint_800, __p3_800)}; \ + __ret_800 = vcmlaq_rot180_f64(__s0_800, __s1_800, *(float64x2_t *) &__reint1_800); \ + __ret_800; \ }) #else -#define vcmlaq_rot180_lane_f64(__p0_828, __p1_828, __p2_828, __p3_828) __extension__ ({ \ - float64x2_t __ret_828; \ - float64x2_t __s0_828 = __p0_828; \ - float64x2_t __s1_828 = __p1_828; \ - float64x1_t __s2_828 = __p2_828; \ - float64x2_t __rev0_828; __rev0_828 = __builtin_shufflevector(__s0_828, __s0_828, 1, 0); \ - float64x2_t __rev1_828; __rev1_828 = __builtin_shufflevector(__s1_828, __s1_828, 1, 0); \ -float64x1_t __reint_828 = __s2_828; \ -uint64x2_t __reint1_828 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_828, __p3_828), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_828, __p3_828)}; \ - __ret_828 = __noswap_vcmlaq_rot180_f64(__rev0_828, __rev1_828, *(float64x2_t *) &__reint1_828); \ - __ret_828 = __builtin_shufflevector(__ret_828, __ret_828, 1, 0); \ - __ret_828; \ +#define vcmlaq_rot180_lane_f64(__p0_801, __p1_801, __p2_801, __p3_801) __extension__ ({ \ + float64x2_t __ret_801; \ + float64x2_t __s0_801 = __p0_801; \ + float64x2_t __s1_801 = __p1_801; \ + float64x1_t __s2_801 = __p2_801; \ + float64x2_t __rev0_801; __rev0_801 = __builtin_shufflevector(__s0_801, __s0_801, 1, 0); \ + float64x2_t __rev1_801; __rev1_801 = __builtin_shufflevector(__s1_801, __s1_801, 1, 0); \ +float64x1_t __reint_801 = __s2_801; \ +uint64x2_t __reint1_801 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_801, __p3_801), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_801, __p3_801)}; \ + __ret_801 = __noswap_vcmlaq_rot180_f64(__rev0_801, __rev1_801, *(float64x2_t *) &__reint1_801); \ + __ret_801 = __builtin_shufflevector(__ret_801, __ret_801, 1, 0); \ + __ret_801; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcmla_rot180_laneq_f64(__p0_829, __p1_829, __p2_829, __p3_829) __extension__ ({ \ - float64x1_t __ret_829; \ - float64x1_t __s0_829 = __p0_829; \ - float64x1_t __s1_829 = __p1_829; \ - float64x2_t __s2_829 = __p2_829; \ -float64x2_t __reint_829 = __s2_829; \ -uint64x2_t __reint1_829 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_829, __p3_829), vgetq_lane_u64(*(uint64x2_t *) &__reint_829, __p3_829)}; \ - __ret_829 = vcmla_rot180_f64(__s0_829, __s1_829, *(float64x1_t *) &__reint1_829); \ - __ret_829; \ +#define vcmla_rot180_laneq_f64(__p0_802, __p1_802, __p2_802, __p3_802) __extension__ ({ \ + float64x1_t __ret_802; \ + float64x1_t __s0_802 = __p0_802; \ + float64x1_t __s1_802 = __p1_802; \ + float64x2_t __s2_802 = __p2_802; \ +float64x2_t __reint_802 = __s2_802; \ +uint64x2_t __reint1_802 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_802, __p3_802), vgetq_lane_u64(*(uint64x2_t *) &__reint_802, __p3_802)}; \ + __ret_802 = vcmla_rot180_f64(__s0_802, __s1_802, *(float64x1_t *) &__reint1_802); \ + __ret_802; \ }) #else -#define vcmla_rot180_laneq_f64(__p0_830, __p1_830, __p2_830, __p3_830) __extension__ ({ \ - float64x1_t __ret_830; \ - float64x1_t __s0_830 = __p0_830; \ - float64x1_t __s1_830 = __p1_830; \ - float64x2_t __s2_830 = __p2_830; \ - float64x2_t __rev2_830; __rev2_830 = __builtin_shufflevector(__s2_830, __s2_830, 1, 0); \ -float64x2_t __reint_830 = __rev2_830; \ -uint64x2_t __reint1_830 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_830, __p3_830), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_830, __p3_830)}; \ - __ret_830 = vcmla_rot180_f64(__s0_830, __s1_830, *(float64x1_t *) &__reint1_830); \ - __ret_830; \ +#define vcmla_rot180_laneq_f64(__p0_803, __p1_803, __p2_803, __p3_803) __extension__ ({ \ + float64x1_t __ret_803; \ + float64x1_t __s0_803 = __p0_803; \ + float64x1_t __s1_803 = __p1_803; \ + float64x2_t __s2_803 = __p2_803; \ + float64x2_t __rev2_803; __rev2_803 = __builtin_shufflevector(__s2_803, __s2_803, 1, 0); \ +float64x2_t __reint_803 = __rev2_803; \ +uint64x2_t __reint1_803 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_803, __p3_803), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_803, __p3_803)}; \ + __ret_803 = vcmla_rot180_f64(__s0_803, __s1_803, *(float64x1_t *) &__reint1_803); \ + __ret_803; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot180_laneq_f64(__p0_831, __p1_831, __p2_831, __p3_831) __extension__ ({ \ - float64x2_t __ret_831; \ - float64x2_t __s0_831 = __p0_831; \ - float64x2_t __s1_831 = __p1_831; \ - float64x2_t __s2_831 = __p2_831; \ -float64x2_t __reint_831 = __s2_831; \ -uint64x2_t __reint1_831 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_831, __p3_831), vgetq_lane_u64(*(uint64x2_t *) &__reint_831, __p3_831)}; \ - __ret_831 = vcmlaq_rot180_f64(__s0_831, __s1_831, *(float64x2_t *) &__reint1_831); \ - __ret_831; \ +#define vcmlaq_rot180_laneq_f64(__p0_804, __p1_804, __p2_804, __p3_804) __extension__ ({ \ + float64x2_t __ret_804; \ + float64x2_t __s0_804 = __p0_804; \ + float64x2_t __s1_804 = __p1_804; \ + float64x2_t __s2_804 = __p2_804; \ +float64x2_t __reint_804 = __s2_804; \ +uint64x2_t __reint1_804 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_804, __p3_804), vgetq_lane_u64(*(uint64x2_t *) &__reint_804, __p3_804)}; \ + __ret_804 = vcmlaq_rot180_f64(__s0_804, __s1_804, *(float64x2_t *) &__reint1_804); \ + __ret_804; \ }) #else -#define vcmlaq_rot180_laneq_f64(__p0_832, __p1_832, __p2_832, __p3_832) __extension__ ({ \ - float64x2_t __ret_832; \ - float64x2_t __s0_832 = __p0_832; \ - float64x2_t __s1_832 = __p1_832; \ - float64x2_t __s2_832 = __p2_832; \ - float64x2_t __rev0_832; __rev0_832 = __builtin_shufflevector(__s0_832, __s0_832, 1, 0); \ - float64x2_t __rev1_832; __rev1_832 = __builtin_shufflevector(__s1_832, __s1_832, 1, 0); \ - float64x2_t __rev2_832; __rev2_832 = __builtin_shufflevector(__s2_832, __s2_832, 1, 0); \ -float64x2_t __reint_832 = __rev2_832; \ -uint64x2_t __reint1_832 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_832, __p3_832), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_832, __p3_832)}; \ - __ret_832 = __noswap_vcmlaq_rot180_f64(__rev0_832, __rev1_832, *(float64x2_t *) &__reint1_832); \ - __ret_832 = __builtin_shufflevector(__ret_832, __ret_832, 1, 0); \ - __ret_832; \ +#define vcmlaq_rot180_laneq_f64(__p0_805, __p1_805, __p2_805, __p3_805) __extension__ ({ \ + float64x2_t __ret_805; \ + float64x2_t __s0_805 = __p0_805; \ + float64x2_t __s1_805 = __p1_805; \ + float64x2_t __s2_805 = __p2_805; \ + float64x2_t __rev0_805; __rev0_805 = __builtin_shufflevector(__s0_805, __s0_805, 1, 0); \ + float64x2_t __rev1_805; __rev1_805 = __builtin_shufflevector(__s1_805, __s1_805, 1, 0); \ + float64x2_t __rev2_805; __rev2_805 = __builtin_shufflevector(__s2_805, __s2_805, 1, 0); \ +float64x2_t __reint_805 = __rev2_805; \ +uint64x2_t __reint1_805 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_805, __p3_805), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_805, __p3_805)}; \ + __ret_805 = __noswap_vcmlaq_rot180_f64(__rev0_805, __rev1_805, *(float64x2_t *) &__reint1_805); \ + __ret_805 = __builtin_shufflevector(__ret_805, __ret_805, 1, 0); \ + __ret_805; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a"))) float64x2_t vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { +__ai __attribute__((target("v8.3a,neon"))) float64x2_t vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vcmlaq_rot270_f64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); return __ret; } #else -__ai __attribute__((target("v8.3a"))) float64x2_t vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { +__ai __attribute__((target("v8.3a,neon"))) float64x2_t vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -66022,116 +65852,116 @@ __ai __attribute__((target("v8.3a"))) float64x2_t vcmlaq_rot270_f64(float64x2_t __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai __attribute__((target("v8.3a"))) float64x2_t __noswap_vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { +__ai __attribute__((target("v8.3a,neon"))) float64x2_t __noswap_vcmlaq_rot270_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vcmlaq_rot270_f64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); return __ret; } #endif -__ai __attribute__((target("v8.3a"))) float64x1_t vcmla_rot270_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { +__ai __attribute__((target("v8.3a,neon"))) float64x1_t vcmla_rot270_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vcmla_rot270_f64((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); return __ret; } -#define vcmla_rot270_lane_f64(__p0_833, __p1_833, __p2_833, __p3_833) __extension__ ({ \ - float64x1_t __ret_833; \ - float64x1_t __s0_833 = __p0_833; \ - float64x1_t __s1_833 = __p1_833; \ - float64x1_t __s2_833 = __p2_833; \ -float64x1_t __reint_833 = __s2_833; \ -uint64x2_t __reint1_833 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_833, __p3_833), vgetq_lane_u64(*(uint64x2_t *) &__reint_833, __p3_833)}; \ - __ret_833 = vcmla_rot270_f64(__s0_833, __s1_833, *(float64x1_t *) &__reint1_833); \ - __ret_833; \ +#define vcmla_rot270_lane_f64(__p0_806, __p1_806, __p2_806, __p3_806) __extension__ ({ \ + float64x1_t __ret_806; \ + float64x1_t __s0_806 = __p0_806; \ + float64x1_t __s1_806 = __p1_806; \ + float64x1_t __s2_806 = __p2_806; \ +float64x1_t __reint_806 = __s2_806; \ +uint64x2_t __reint1_806 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_806, __p3_806), vgetq_lane_u64(*(uint64x2_t *) &__reint_806, __p3_806)}; \ + __ret_806 = vcmla_rot270_f64(__s0_806, __s1_806, *(float64x1_t *) &__reint1_806); \ + __ret_806; \ }) #ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot270_lane_f64(__p0_834, __p1_834, __p2_834, __p3_834) __extension__ ({ \ - float64x2_t __ret_834; \ - float64x2_t __s0_834 = __p0_834; \ - float64x2_t __s1_834 = __p1_834; \ - float64x1_t __s2_834 = __p2_834; \ -float64x1_t __reint_834 = __s2_834; \ -uint64x2_t __reint1_834 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_834, __p3_834), vgetq_lane_u64(*(uint64x2_t *) &__reint_834, __p3_834)}; \ - __ret_834 = vcmlaq_rot270_f64(__s0_834, __s1_834, *(float64x2_t *) &__reint1_834); \ - __ret_834; \ +#define vcmlaq_rot270_lane_f64(__p0_807, __p1_807, __p2_807, __p3_807) __extension__ ({ \ + float64x2_t __ret_807; \ + float64x2_t __s0_807 = __p0_807; \ + float64x2_t __s1_807 = __p1_807; \ + float64x1_t __s2_807 = __p2_807; \ +float64x1_t __reint_807 = __s2_807; \ +uint64x2_t __reint1_807 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_807, __p3_807), vgetq_lane_u64(*(uint64x2_t *) &__reint_807, __p3_807)}; \ + __ret_807 = vcmlaq_rot270_f64(__s0_807, __s1_807, *(float64x2_t *) &__reint1_807); \ + __ret_807; \ }) #else -#define vcmlaq_rot270_lane_f64(__p0_835, __p1_835, __p2_835, __p3_835) __extension__ ({ \ - float64x2_t __ret_835; \ - float64x2_t __s0_835 = __p0_835; \ - float64x2_t __s1_835 = __p1_835; \ - float64x1_t __s2_835 = __p2_835; \ - float64x2_t __rev0_835; __rev0_835 = __builtin_shufflevector(__s0_835, __s0_835, 1, 0); \ - float64x2_t __rev1_835; __rev1_835 = __builtin_shufflevector(__s1_835, __s1_835, 1, 0); \ -float64x1_t __reint_835 = __s2_835; \ -uint64x2_t __reint1_835 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_835, __p3_835), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_835, __p3_835)}; \ - __ret_835 = __noswap_vcmlaq_rot270_f64(__rev0_835, __rev1_835, *(float64x2_t *) &__reint1_835); \ - __ret_835 = __builtin_shufflevector(__ret_835, __ret_835, 1, 0); \ - __ret_835; \ +#define vcmlaq_rot270_lane_f64(__p0_808, __p1_808, __p2_808, __p3_808) __extension__ ({ \ + float64x2_t __ret_808; \ + float64x2_t __s0_808 = __p0_808; \ + float64x2_t __s1_808 = __p1_808; \ + float64x1_t __s2_808 = __p2_808; \ + float64x2_t __rev0_808; __rev0_808 = __builtin_shufflevector(__s0_808, __s0_808, 1, 0); \ + float64x2_t __rev1_808; __rev1_808 = __builtin_shufflevector(__s1_808, __s1_808, 1, 0); \ +float64x1_t __reint_808 = __s2_808; \ +uint64x2_t __reint1_808 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_808, __p3_808), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_808, __p3_808)}; \ + __ret_808 = __noswap_vcmlaq_rot270_f64(__rev0_808, __rev1_808, *(float64x2_t *) &__reint1_808); \ + __ret_808 = __builtin_shufflevector(__ret_808, __ret_808, 1, 0); \ + __ret_808; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcmla_rot270_laneq_f64(__p0_836, __p1_836, __p2_836, __p3_836) __extension__ ({ \ - float64x1_t __ret_836; \ - float64x1_t __s0_836 = __p0_836; \ - float64x1_t __s1_836 = __p1_836; \ - float64x2_t __s2_836 = __p2_836; \ -float64x2_t __reint_836 = __s2_836; \ -uint64x2_t __reint1_836 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_836, __p3_836), vgetq_lane_u64(*(uint64x2_t *) &__reint_836, __p3_836)}; \ - __ret_836 = vcmla_rot270_f64(__s0_836, __s1_836, *(float64x1_t *) &__reint1_836); \ - __ret_836; \ -}) -#else -#define vcmla_rot270_laneq_f64(__p0_837, __p1_837, __p2_837, __p3_837) __extension__ ({ \ - float64x1_t __ret_837; \ - float64x1_t __s0_837 = __p0_837; \ - float64x1_t __s1_837 = __p1_837; \ - float64x2_t __s2_837 = __p2_837; \ - float64x2_t __rev2_837; __rev2_837 = __builtin_shufflevector(__s2_837, __s2_837, 1, 0); \ -float64x2_t __reint_837 = __rev2_837; \ -uint64x2_t __reint1_837 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_837, __p3_837), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_837, __p3_837)}; \ - __ret_837 = vcmla_rot270_f64(__s0_837, __s1_837, *(float64x1_t *) &__reint1_837); \ - __ret_837; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot270_laneq_f64(__p0_838, __p1_838, __p2_838, __p3_838) __extension__ ({ \ - float64x2_t __ret_838; \ - float64x2_t __s0_838 = __p0_838; \ - float64x2_t __s1_838 = __p1_838; \ - float64x2_t __s2_838 = __p2_838; \ -float64x2_t __reint_838 = __s2_838; \ -uint64x2_t __reint1_838 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_838, __p3_838), vgetq_lane_u64(*(uint64x2_t *) &__reint_838, __p3_838)}; \ - __ret_838 = vcmlaq_rot270_f64(__s0_838, __s1_838, *(float64x2_t *) &__reint1_838); \ - __ret_838; \ -}) -#else -#define vcmlaq_rot270_laneq_f64(__p0_839, __p1_839, __p2_839, __p3_839) __extension__ ({ \ - float64x2_t __ret_839; \ - float64x2_t __s0_839 = __p0_839; \ - float64x2_t __s1_839 = __p1_839; \ - float64x2_t __s2_839 = __p2_839; \ - float64x2_t __rev0_839; __rev0_839 = __builtin_shufflevector(__s0_839, __s0_839, 1, 0); \ - float64x2_t __rev1_839; __rev1_839 = __builtin_shufflevector(__s1_839, __s1_839, 1, 0); \ - float64x2_t __rev2_839; __rev2_839 = __builtin_shufflevector(__s2_839, __s2_839, 1, 0); \ -float64x2_t __reint_839 = __rev2_839; \ -uint64x2_t __reint1_839 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_839, __p3_839), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_839, __p3_839)}; \ - __ret_839 = __noswap_vcmlaq_rot270_f64(__rev0_839, __rev1_839, *(float64x2_t *) &__reint1_839); \ - __ret_839 = __builtin_shufflevector(__ret_839, __ret_839, 1, 0); \ - __ret_839; \ +#define vcmla_rot270_laneq_f64(__p0_809, __p1_809, __p2_809, __p3_809) __extension__ ({ \ + float64x1_t __ret_809; \ + float64x1_t __s0_809 = __p0_809; \ + float64x1_t __s1_809 = __p1_809; \ + float64x2_t __s2_809 = __p2_809; \ +float64x2_t __reint_809 = __s2_809; \ +uint64x2_t __reint1_809 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_809, __p3_809), vgetq_lane_u64(*(uint64x2_t *) &__reint_809, __p3_809)}; \ + __ret_809 = vcmla_rot270_f64(__s0_809, __s1_809, *(float64x1_t *) &__reint1_809); \ + __ret_809; \ +}) +#else +#define vcmla_rot270_laneq_f64(__p0_810, __p1_810, __p2_810, __p3_810) __extension__ ({ \ + float64x1_t __ret_810; \ + float64x1_t __s0_810 = __p0_810; \ + float64x1_t __s1_810 = __p1_810; \ + float64x2_t __s2_810 = __p2_810; \ + float64x2_t __rev2_810; __rev2_810 = __builtin_shufflevector(__s2_810, __s2_810, 1, 0); \ +float64x2_t __reint_810 = __rev2_810; \ +uint64x2_t __reint1_810 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_810, __p3_810), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_810, __p3_810)}; \ + __ret_810 = vcmla_rot270_f64(__s0_810, __s1_810, *(float64x1_t *) &__reint1_810); \ + __ret_810; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.3a"))) float64x2_t vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { +#define vcmlaq_rot270_laneq_f64(__p0_811, __p1_811, __p2_811, __p3_811) __extension__ ({ \ + float64x2_t __ret_811; \ + float64x2_t __s0_811 = __p0_811; \ + float64x2_t __s1_811 = __p1_811; \ + float64x2_t __s2_811 = __p2_811; \ +float64x2_t __reint_811 = __s2_811; \ +uint64x2_t __reint1_811 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_811, __p3_811), vgetq_lane_u64(*(uint64x2_t *) &__reint_811, __p3_811)}; \ + __ret_811 = vcmlaq_rot270_f64(__s0_811, __s1_811, *(float64x2_t *) &__reint1_811); \ + __ret_811; \ +}) +#else +#define vcmlaq_rot270_laneq_f64(__p0_812, __p1_812, __p2_812, __p3_812) __extension__ ({ \ + float64x2_t __ret_812; \ + float64x2_t __s0_812 = __p0_812; \ + float64x2_t __s1_812 = __p1_812; \ + float64x2_t __s2_812 = __p2_812; \ + float64x2_t __rev0_812; __rev0_812 = __builtin_shufflevector(__s0_812, __s0_812, 1, 0); \ + float64x2_t __rev1_812; __rev1_812 = __builtin_shufflevector(__s1_812, __s1_812, 1, 0); \ + float64x2_t __rev2_812; __rev2_812 = __builtin_shufflevector(__s2_812, __s2_812, 1, 0); \ +float64x2_t __reint_812 = __rev2_812; \ +uint64x2_t __reint1_812 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_812, __p3_812), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_812, __p3_812)}; \ + __ret_812 = __noswap_vcmlaq_rot270_f64(__rev0_812, __rev1_812, *(float64x2_t *) &__reint1_812); \ + __ret_812 = __builtin_shufflevector(__ret_812, __ret_812, 1, 0); \ + __ret_812; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("v8.3a,neon"))) float64x2_t vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vcmlaq_rot90_f64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); return __ret; } #else -__ai __attribute__((target("v8.3a"))) float64x2_t vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { +__ai __attribute__((target("v8.3a,neon"))) float64x2_t vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -66140,116 +65970,116 @@ __ai __attribute__((target("v8.3a"))) float64x2_t vcmlaq_rot90_f64(float64x2_t _ __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai __attribute__((target("v8.3a"))) float64x2_t __noswap_vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { +__ai __attribute__((target("v8.3a,neon"))) float64x2_t __noswap_vcmlaq_rot90_f64(float64x2_t __p0, float64x2_t __p1, float64x2_t __p2) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vcmlaq_rot90_f64((int8x16_t)__p0, (int8x16_t)__p1, (int8x16_t)__p2, 42); return __ret; } #endif -__ai __attribute__((target("v8.3a"))) float64x1_t vcmla_rot90_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { +__ai __attribute__((target("v8.3a,neon"))) float64x1_t vcmla_rot90_f64(float64x1_t __p0, float64x1_t __p1, float64x1_t __p2) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vcmla_rot90_f64((int8x8_t)__p0, (int8x8_t)__p1, (int8x8_t)__p2, 10); return __ret; } -#define vcmla_rot90_lane_f64(__p0_840, __p1_840, __p2_840, __p3_840) __extension__ ({ \ - float64x1_t __ret_840; \ - float64x1_t __s0_840 = __p0_840; \ - float64x1_t __s1_840 = __p1_840; \ - float64x1_t __s2_840 = __p2_840; \ -float64x1_t __reint_840 = __s2_840; \ -uint64x2_t __reint1_840 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_840, __p3_840), vgetq_lane_u64(*(uint64x2_t *) &__reint_840, __p3_840)}; \ - __ret_840 = vcmla_rot90_f64(__s0_840, __s1_840, *(float64x1_t *) &__reint1_840); \ - __ret_840; \ +#define vcmla_rot90_lane_f64(__p0_813, __p1_813, __p2_813, __p3_813) __extension__ ({ \ + float64x1_t __ret_813; \ + float64x1_t __s0_813 = __p0_813; \ + float64x1_t __s1_813 = __p1_813; \ + float64x1_t __s2_813 = __p2_813; \ +float64x1_t __reint_813 = __s2_813; \ +uint64x2_t __reint1_813 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_813, __p3_813), vgetq_lane_u64(*(uint64x2_t *) &__reint_813, __p3_813)}; \ + __ret_813 = vcmla_rot90_f64(__s0_813, __s1_813, *(float64x1_t *) &__reint1_813); \ + __ret_813; \ }) #ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot90_lane_f64(__p0_841, __p1_841, __p2_841, __p3_841) __extension__ ({ \ - float64x2_t __ret_841; \ - float64x2_t __s0_841 = __p0_841; \ - float64x2_t __s1_841 = __p1_841; \ - float64x1_t __s2_841 = __p2_841; \ -float64x1_t __reint_841 = __s2_841; \ -uint64x2_t __reint1_841 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_841, __p3_841), vgetq_lane_u64(*(uint64x2_t *) &__reint_841, __p3_841)}; \ - __ret_841 = vcmlaq_rot90_f64(__s0_841, __s1_841, *(float64x2_t *) &__reint1_841); \ - __ret_841; \ +#define vcmlaq_rot90_lane_f64(__p0_814, __p1_814, __p2_814, __p3_814) __extension__ ({ \ + float64x2_t __ret_814; \ + float64x2_t __s0_814 = __p0_814; \ + float64x2_t __s1_814 = __p1_814; \ + float64x1_t __s2_814 = __p2_814; \ +float64x1_t __reint_814 = __s2_814; \ +uint64x2_t __reint1_814 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_814, __p3_814), vgetq_lane_u64(*(uint64x2_t *) &__reint_814, __p3_814)}; \ + __ret_814 = vcmlaq_rot90_f64(__s0_814, __s1_814, *(float64x2_t *) &__reint1_814); \ + __ret_814; \ }) #else -#define vcmlaq_rot90_lane_f64(__p0_842, __p1_842, __p2_842, __p3_842) __extension__ ({ \ - float64x2_t __ret_842; \ - float64x2_t __s0_842 = __p0_842; \ - float64x2_t __s1_842 = __p1_842; \ - float64x1_t __s2_842 = __p2_842; \ - float64x2_t __rev0_842; __rev0_842 = __builtin_shufflevector(__s0_842, __s0_842, 1, 0); \ - float64x2_t __rev1_842; __rev1_842 = __builtin_shufflevector(__s1_842, __s1_842, 1, 0); \ -float64x1_t __reint_842 = __s2_842; \ -uint64x2_t __reint1_842 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_842, __p3_842), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_842, __p3_842)}; \ - __ret_842 = __noswap_vcmlaq_rot90_f64(__rev0_842, __rev1_842, *(float64x2_t *) &__reint1_842); \ - __ret_842 = __builtin_shufflevector(__ret_842, __ret_842, 1, 0); \ - __ret_842; \ +#define vcmlaq_rot90_lane_f64(__p0_815, __p1_815, __p2_815, __p3_815) __extension__ ({ \ + float64x2_t __ret_815; \ + float64x2_t __s0_815 = __p0_815; \ + float64x2_t __s1_815 = __p1_815; \ + float64x1_t __s2_815 = __p2_815; \ + float64x2_t __rev0_815; __rev0_815 = __builtin_shufflevector(__s0_815, __s0_815, 1, 0); \ + float64x2_t __rev1_815; __rev1_815 = __builtin_shufflevector(__s1_815, __s1_815, 1, 0); \ +float64x1_t __reint_815 = __s2_815; \ +uint64x2_t __reint1_815 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_815, __p3_815), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_815, __p3_815)}; \ + __ret_815 = __noswap_vcmlaq_rot90_f64(__rev0_815, __rev1_815, *(float64x2_t *) &__reint1_815); \ + __ret_815 = __builtin_shufflevector(__ret_815, __ret_815, 1, 0); \ + __ret_815; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcmla_rot90_laneq_f64(__p0_843, __p1_843, __p2_843, __p3_843) __extension__ ({ \ - float64x1_t __ret_843; \ - float64x1_t __s0_843 = __p0_843; \ - float64x1_t __s1_843 = __p1_843; \ - float64x2_t __s2_843 = __p2_843; \ -float64x2_t __reint_843 = __s2_843; \ -uint64x2_t __reint1_843 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_843, __p3_843), vgetq_lane_u64(*(uint64x2_t *) &__reint_843, __p3_843)}; \ - __ret_843 = vcmla_rot90_f64(__s0_843, __s1_843, *(float64x1_t *) &__reint1_843); \ - __ret_843; \ +#define vcmla_rot90_laneq_f64(__p0_816, __p1_816, __p2_816, __p3_816) __extension__ ({ \ + float64x1_t __ret_816; \ + float64x1_t __s0_816 = __p0_816; \ + float64x1_t __s1_816 = __p1_816; \ + float64x2_t __s2_816 = __p2_816; \ +float64x2_t __reint_816 = __s2_816; \ +uint64x2_t __reint1_816 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_816, __p3_816), vgetq_lane_u64(*(uint64x2_t *) &__reint_816, __p3_816)}; \ + __ret_816 = vcmla_rot90_f64(__s0_816, __s1_816, *(float64x1_t *) &__reint1_816); \ + __ret_816; \ }) #else -#define vcmla_rot90_laneq_f64(__p0_844, __p1_844, __p2_844, __p3_844) __extension__ ({ \ - float64x1_t __ret_844; \ - float64x1_t __s0_844 = __p0_844; \ - float64x1_t __s1_844 = __p1_844; \ - float64x2_t __s2_844 = __p2_844; \ - float64x2_t __rev2_844; __rev2_844 = __builtin_shufflevector(__s2_844, __s2_844, 1, 0); \ -float64x2_t __reint_844 = __rev2_844; \ -uint64x2_t __reint1_844 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_844, __p3_844), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_844, __p3_844)}; \ - __ret_844 = vcmla_rot90_f64(__s0_844, __s1_844, *(float64x1_t *) &__reint1_844); \ - __ret_844; \ +#define vcmla_rot90_laneq_f64(__p0_817, __p1_817, __p2_817, __p3_817) __extension__ ({ \ + float64x1_t __ret_817; \ + float64x1_t __s0_817 = __p0_817; \ + float64x1_t __s1_817 = __p1_817; \ + float64x2_t __s2_817 = __p2_817; \ + float64x2_t __rev2_817; __rev2_817 = __builtin_shufflevector(__s2_817, __s2_817, 1, 0); \ +float64x2_t __reint_817 = __rev2_817; \ +uint64x2_t __reint1_817 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_817, __p3_817), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_817, __p3_817)}; \ + __ret_817 = vcmla_rot90_f64(__s0_817, __s1_817, *(float64x1_t *) &__reint1_817); \ + __ret_817; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcmlaq_rot90_laneq_f64(__p0_845, __p1_845, __p2_845, __p3_845) __extension__ ({ \ - float64x2_t __ret_845; \ - float64x2_t __s0_845 = __p0_845; \ - float64x2_t __s1_845 = __p1_845; \ - float64x2_t __s2_845 = __p2_845; \ -float64x2_t __reint_845 = __s2_845; \ -uint64x2_t __reint1_845 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_845, __p3_845), vgetq_lane_u64(*(uint64x2_t *) &__reint_845, __p3_845)}; \ - __ret_845 = vcmlaq_rot90_f64(__s0_845, __s1_845, *(float64x2_t *) &__reint1_845); \ - __ret_845; \ +#define vcmlaq_rot90_laneq_f64(__p0_818, __p1_818, __p2_818, __p3_818) __extension__ ({ \ + float64x2_t __ret_818; \ + float64x2_t __s0_818 = __p0_818; \ + float64x2_t __s1_818 = __p1_818; \ + float64x2_t __s2_818 = __p2_818; \ +float64x2_t __reint_818 = __s2_818; \ +uint64x2_t __reint1_818 = (uint64x2_t) {vgetq_lane_u64(*(uint64x2_t *) &__reint_818, __p3_818), vgetq_lane_u64(*(uint64x2_t *) &__reint_818, __p3_818)}; \ + __ret_818 = vcmlaq_rot90_f64(__s0_818, __s1_818, *(float64x2_t *) &__reint1_818); \ + __ret_818; \ }) #else -#define vcmlaq_rot90_laneq_f64(__p0_846, __p1_846, __p2_846, __p3_846) __extension__ ({ \ - float64x2_t __ret_846; \ - float64x2_t __s0_846 = __p0_846; \ - float64x2_t __s1_846 = __p1_846; \ - float64x2_t __s2_846 = __p2_846; \ - float64x2_t __rev0_846; __rev0_846 = __builtin_shufflevector(__s0_846, __s0_846, 1, 0); \ - float64x2_t __rev1_846; __rev1_846 = __builtin_shufflevector(__s1_846, __s1_846, 1, 0); \ - float64x2_t __rev2_846; __rev2_846 = __builtin_shufflevector(__s2_846, __s2_846, 1, 0); \ -float64x2_t __reint_846 = __rev2_846; \ -uint64x2_t __reint1_846 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_846, __p3_846), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_846, __p3_846)}; \ - __ret_846 = __noswap_vcmlaq_rot90_f64(__rev0_846, __rev1_846, *(float64x2_t *) &__reint1_846); \ - __ret_846 = __builtin_shufflevector(__ret_846, __ret_846, 1, 0); \ - __ret_846; \ +#define vcmlaq_rot90_laneq_f64(__p0_819, __p1_819, __p2_819, __p3_819) __extension__ ({ \ + float64x2_t __ret_819; \ + float64x2_t __s0_819 = __p0_819; \ + float64x2_t __s1_819 = __p1_819; \ + float64x2_t __s2_819 = __p2_819; \ + float64x2_t __rev0_819; __rev0_819 = __builtin_shufflevector(__s0_819, __s0_819, 1, 0); \ + float64x2_t __rev1_819; __rev1_819 = __builtin_shufflevector(__s1_819, __s1_819, 1, 0); \ + float64x2_t __rev2_819; __rev2_819 = __builtin_shufflevector(__s2_819, __s2_819, 1, 0); \ +float64x2_t __reint_819 = __rev2_819; \ +uint64x2_t __reint1_819 = (uint64x2_t) {__noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_819, __p3_819), __noswap_vgetq_lane_u64(*(uint64x2_t *) &__reint_819, __p3_819)}; \ + __ret_819 = __noswap_vcmlaq_rot90_f64(__rev0_819, __rev1_819, *(float64x2_t *) &__reint1_819); \ + __ret_819 = __builtin_shufflevector(__ret_819, __ret_819, 1, 0); \ + __ret_819; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.5a"))) float32x4_t vrnd32xq_f32(float32x4_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float32x4_t vrnd32xq_f32(float32x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vrnd32xq_f32((int8x16_t)__p0, 41); return __ret; } #else -__ai __attribute__((target("v8.5a"))) float32x4_t vrnd32xq_f32(float32x4_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float32x4_t vrnd32xq_f32(float32x4_t __p0) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vrnd32xq_f32((int8x16_t)__rev0, 41); @@ -66259,13 +66089,13 @@ __ai __attribute__((target("v8.5a"))) float32x4_t vrnd32xq_f32(float32x4_t __p0) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.5a"))) float32x2_t vrnd32x_f32(float32x2_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float32x2_t vrnd32x_f32(float32x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vrnd32x_f32((int8x8_t)__p0, 9); return __ret; } #else -__ai __attribute__((target("v8.5a"))) float32x2_t vrnd32x_f32(float32x2_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float32x2_t vrnd32x_f32(float32x2_t __p0) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32x2_t) __builtin_neon_vrnd32x_f32((int8x8_t)__rev0, 9); @@ -66275,13 +66105,13 @@ __ai __attribute__((target("v8.5a"))) float32x2_t vrnd32x_f32(float32x2_t __p0) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.5a"))) float64x2_t vrnd32xq_f64(float64x2_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float64x2_t vrnd32xq_f64(float64x2_t __p0) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vrnd32xq_f64((int8x16_t)__p0, 42); return __ret; } #else -__ai __attribute__((target("v8.5a"))) float64x2_t vrnd32xq_f64(float64x2_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float64x2_t vrnd32xq_f64(float64x2_t __p0) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64x2_t) __builtin_neon_vrnd32xq_f64((int8x16_t)__rev0, 42); @@ -66290,19 +66120,19 @@ __ai __attribute__((target("v8.5a"))) float64x2_t vrnd32xq_f64(float64x2_t __p0) } #endif -__ai __attribute__((target("v8.5a"))) float64x1_t vrnd32x_f64(float64x1_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float64x1_t vrnd32x_f64(float64x1_t __p0) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vrnd32x_f64((int8x8_t)__p0, 10); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.5a"))) float32x4_t vrnd32zq_f32(float32x4_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float32x4_t vrnd32zq_f32(float32x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vrnd32zq_f32((int8x16_t)__p0, 41); return __ret; } #else -__ai __attribute__((target("v8.5a"))) float32x4_t vrnd32zq_f32(float32x4_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float32x4_t vrnd32zq_f32(float32x4_t __p0) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vrnd32zq_f32((int8x16_t)__rev0, 41); @@ -66312,13 +66142,13 @@ __ai __attribute__((target("v8.5a"))) float32x4_t vrnd32zq_f32(float32x4_t __p0) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.5a"))) float32x2_t vrnd32z_f32(float32x2_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float32x2_t vrnd32z_f32(float32x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vrnd32z_f32((int8x8_t)__p0, 9); return __ret; } #else -__ai __attribute__((target("v8.5a"))) float32x2_t vrnd32z_f32(float32x2_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float32x2_t vrnd32z_f32(float32x2_t __p0) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32x2_t) __builtin_neon_vrnd32z_f32((int8x8_t)__rev0, 9); @@ -66328,13 +66158,13 @@ __ai __attribute__((target("v8.5a"))) float32x2_t vrnd32z_f32(float32x2_t __p0) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.5a"))) float64x2_t vrnd32zq_f64(float64x2_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float64x2_t vrnd32zq_f64(float64x2_t __p0) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vrnd32zq_f64((int8x16_t)__p0, 42); return __ret; } #else -__ai __attribute__((target("v8.5a"))) float64x2_t vrnd32zq_f64(float64x2_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float64x2_t vrnd32zq_f64(float64x2_t __p0) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64x2_t) __builtin_neon_vrnd32zq_f64((int8x16_t)__rev0, 42); @@ -66343,19 +66173,19 @@ __ai __attribute__((target("v8.5a"))) float64x2_t vrnd32zq_f64(float64x2_t __p0) } #endif -__ai __attribute__((target("v8.5a"))) float64x1_t vrnd32z_f64(float64x1_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float64x1_t vrnd32z_f64(float64x1_t __p0) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vrnd32z_f64((int8x8_t)__p0, 10); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.5a"))) float32x4_t vrnd64xq_f32(float32x4_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float32x4_t vrnd64xq_f32(float32x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vrnd64xq_f32((int8x16_t)__p0, 41); return __ret; } #else -__ai __attribute__((target("v8.5a"))) float32x4_t vrnd64xq_f32(float32x4_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float32x4_t vrnd64xq_f32(float32x4_t __p0) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vrnd64xq_f32((int8x16_t)__rev0, 41); @@ -66365,13 +66195,13 @@ __ai __attribute__((target("v8.5a"))) float32x4_t vrnd64xq_f32(float32x4_t __p0) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.5a"))) float32x2_t vrnd64x_f32(float32x2_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float32x2_t vrnd64x_f32(float32x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vrnd64x_f32((int8x8_t)__p0, 9); return __ret; } #else -__ai __attribute__((target("v8.5a"))) float32x2_t vrnd64x_f32(float32x2_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float32x2_t vrnd64x_f32(float32x2_t __p0) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32x2_t) __builtin_neon_vrnd64x_f32((int8x8_t)__rev0, 9); @@ -66381,13 +66211,13 @@ __ai __attribute__((target("v8.5a"))) float32x2_t vrnd64x_f32(float32x2_t __p0) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.5a"))) float64x2_t vrnd64xq_f64(float64x2_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float64x2_t vrnd64xq_f64(float64x2_t __p0) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vrnd64xq_f64((int8x16_t)__p0, 42); return __ret; } #else -__ai __attribute__((target("v8.5a"))) float64x2_t vrnd64xq_f64(float64x2_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float64x2_t vrnd64xq_f64(float64x2_t __p0) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64x2_t) __builtin_neon_vrnd64xq_f64((int8x16_t)__rev0, 42); @@ -66396,19 +66226,19 @@ __ai __attribute__((target("v8.5a"))) float64x2_t vrnd64xq_f64(float64x2_t __p0) } #endif -__ai __attribute__((target("v8.5a"))) float64x1_t vrnd64x_f64(float64x1_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float64x1_t vrnd64x_f64(float64x1_t __p0) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vrnd64x_f64((int8x8_t)__p0, 10); return __ret; } #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.5a"))) float32x4_t vrnd64zq_f32(float32x4_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float32x4_t vrnd64zq_f32(float32x4_t __p0) { float32x4_t __ret; __ret = (float32x4_t) __builtin_neon_vrnd64zq_f32((int8x16_t)__p0, 41); return __ret; } #else -__ai __attribute__((target("v8.5a"))) float32x4_t vrnd64zq_f32(float32x4_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float32x4_t vrnd64zq_f32(float32x4_t __p0) { float32x4_t __ret; float32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); __ret = (float32x4_t) __builtin_neon_vrnd64zq_f32((int8x16_t)__rev0, 41); @@ -66418,13 +66248,13 @@ __ai __attribute__((target("v8.5a"))) float32x4_t vrnd64zq_f32(float32x4_t __p0) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.5a"))) float32x2_t vrnd64z_f32(float32x2_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float32x2_t vrnd64z_f32(float32x2_t __p0) { float32x2_t __ret; __ret = (float32x2_t) __builtin_neon_vrnd64z_f32((int8x8_t)__p0, 9); return __ret; } #else -__ai __attribute__((target("v8.5a"))) float32x2_t vrnd64z_f32(float32x2_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float32x2_t vrnd64z_f32(float32x2_t __p0) { float32x2_t __ret; float32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float32x2_t) __builtin_neon_vrnd64z_f32((int8x8_t)__rev0, 9); @@ -66434,13 +66264,13 @@ __ai __attribute__((target("v8.5a"))) float32x2_t vrnd64z_f32(float32x2_t __p0) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("v8.5a"))) float64x2_t vrnd64zq_f64(float64x2_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float64x2_t vrnd64zq_f64(float64x2_t __p0) { float64x2_t __ret; __ret = (float64x2_t) __builtin_neon_vrnd64zq_f64((int8x16_t)__p0, 42); return __ret; } #else -__ai __attribute__((target("v8.5a"))) float64x2_t vrnd64zq_f64(float64x2_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float64x2_t vrnd64zq_f64(float64x2_t __p0) { float64x2_t __ret; float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); __ret = (float64x2_t) __builtin_neon_vrnd64zq_f64((int8x16_t)__rev0, 42); @@ -66449,215 +66279,545 @@ __ai __attribute__((target("v8.5a"))) float64x2_t vrnd64zq_f64(float64x2_t __p0) } #endif -__ai __attribute__((target("v8.5a"))) float64x1_t vrnd64z_f64(float64x1_t __p0) { +__ai __attribute__((target("v8.5a,neon"))) float64x1_t vrnd64z_f64(float64x1_t __p0) { float64x1_t __ret; __ret = (float64x1_t) __builtin_neon_vrnd64z_f64((int8x8_t)__p0, 10); return __ret; } #endif -#if defined(__aarch64__) && defined(__ARM_FEATURE_DIRECTED_ROUNDING) #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vrndq_f64(float64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__p0, 42); - return __ret; -} +#define vbfdotq_lane_f32(__p0_820, __p1_820, __p2_820, __p3_820) __extension__ ({ \ + float32x4_t __ret_820; \ + float32x4_t __s0_820 = __p0_820; \ + bfloat16x8_t __s1_820 = __p1_820; \ + bfloat16x4_t __s2_820 = __p2_820; \ +bfloat16x4_t __reint_820 = __s2_820; \ +float32x4_t __reint1_820 = splatq_lane_f32(*(float32x2_t *) &__reint_820, __p3_820); \ + __ret_820 = vbfdotq_f32(__s0_820, __s1_820, *(bfloat16x8_t *) &__reint1_820); \ + __ret_820; \ +}) #else -__ai float64x2_t vrndq_f64(float64x2_t __p0) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float64x2_t) __builtin_neon_vrndq_v((int8x16_t)__rev0, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vbfdotq_lane_f32(__p0_821, __p1_821, __p2_821, __p3_821) __extension__ ({ \ + float32x4_t __ret_821; \ + float32x4_t __s0_821 = __p0_821; \ + bfloat16x8_t __s1_821 = __p1_821; \ + bfloat16x4_t __s2_821 = __p2_821; \ + float32x4_t __rev0_821; __rev0_821 = __builtin_shufflevector(__s0_821, __s0_821, 3, 2, 1, 0); \ + bfloat16x8_t __rev1_821; __rev1_821 = __builtin_shufflevector(__s1_821, __s1_821, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x4_t __rev2_821; __rev2_821 = __builtin_shufflevector(__s2_821, __s2_821, 3, 2, 1, 0); \ +bfloat16x4_t __reint_821 = __rev2_821; \ +float32x4_t __reint1_821 = __noswap_splatq_lane_f32(*(float32x2_t *) &__reint_821, __p3_821); \ + __ret_821 = __noswap_vbfdotq_f32(__rev0_821, __rev1_821, *(bfloat16x8_t *) &__reint1_821); \ + __ret_821 = __builtin_shufflevector(__ret_821, __ret_821, 3, 2, 1, 0); \ + __ret_821; \ +}) #endif -__ai float64x1_t vrnd_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vrnd_v((int8x8_t)__p0, 10); - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vrndaq_f64(float64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__p0, 42); - return __ret; -} +#define vbfdot_lane_f32(__p0_822, __p1_822, __p2_822, __p3_822) __extension__ ({ \ + float32x2_t __ret_822; \ + float32x2_t __s0_822 = __p0_822; \ + bfloat16x4_t __s1_822 = __p1_822; \ + bfloat16x4_t __s2_822 = __p2_822; \ +bfloat16x4_t __reint_822 = __s2_822; \ +float32x2_t __reint1_822 = splat_lane_f32(*(float32x2_t *) &__reint_822, __p3_822); \ + __ret_822 = vbfdot_f32(__s0_822, __s1_822, *(bfloat16x4_t *) &__reint1_822); \ + __ret_822; \ +}) #else -__ai float64x2_t vrndaq_f64(float64x2_t __p0) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float64x2_t) __builtin_neon_vrndaq_v((int8x16_t)__rev0, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vbfdot_lane_f32(__p0_823, __p1_823, __p2_823, __p3_823) __extension__ ({ \ + float32x2_t __ret_823; \ + float32x2_t __s0_823 = __p0_823; \ + bfloat16x4_t __s1_823 = __p1_823; \ + bfloat16x4_t __s2_823 = __p2_823; \ + float32x2_t __rev0_823; __rev0_823 = __builtin_shufflevector(__s0_823, __s0_823, 1, 0); \ + bfloat16x4_t __rev1_823; __rev1_823 = __builtin_shufflevector(__s1_823, __s1_823, 3, 2, 1, 0); \ + bfloat16x4_t __rev2_823; __rev2_823 = __builtin_shufflevector(__s2_823, __s2_823, 3, 2, 1, 0); \ +bfloat16x4_t __reint_823 = __rev2_823; \ +float32x2_t __reint1_823 = __noswap_splat_lane_f32(*(float32x2_t *) &__reint_823, __p3_823); \ + __ret_823 = __noswap_vbfdot_f32(__rev0_823, __rev1_823, *(bfloat16x4_t *) &__reint1_823); \ + __ret_823 = __builtin_shufflevector(__ret_823, __ret_823, 1, 0); \ + __ret_823; \ +}) #endif -__ai float64x1_t vrnda_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vrnda_v((int8x8_t)__p0, 10); - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vrndiq_f64(float64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__p0, 42); - return __ret; -} +#define vbfdotq_laneq_f32(__p0_824, __p1_824, __p2_824, __p3_824) __extension__ ({ \ + float32x4_t __ret_824; \ + float32x4_t __s0_824 = __p0_824; \ + bfloat16x8_t __s1_824 = __p1_824; \ + bfloat16x8_t __s2_824 = __p2_824; \ +bfloat16x8_t __reint_824 = __s2_824; \ +float32x4_t __reint1_824 = splatq_laneq_f32(*(float32x4_t *) &__reint_824, __p3_824); \ + __ret_824 = vbfdotq_f32(__s0_824, __s1_824, *(bfloat16x8_t *) &__reint1_824); \ + __ret_824; \ +}) #else -__ai float64x2_t vrndiq_f64(float64x2_t __p0) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float64x2_t) __builtin_neon_vrndiq_v((int8x16_t)__rev0, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vbfdotq_laneq_f32(__p0_825, __p1_825, __p2_825, __p3_825) __extension__ ({ \ + float32x4_t __ret_825; \ + float32x4_t __s0_825 = __p0_825; \ + bfloat16x8_t __s1_825 = __p1_825; \ + bfloat16x8_t __s2_825 = __p2_825; \ + float32x4_t __rev0_825; __rev0_825 = __builtin_shufflevector(__s0_825, __s0_825, 3, 2, 1, 0); \ + bfloat16x8_t __rev1_825; __rev1_825 = __builtin_shufflevector(__s1_825, __s1_825, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x8_t __rev2_825; __rev2_825 = __builtin_shufflevector(__s2_825, __s2_825, 7, 6, 5, 4, 3, 2, 1, 0); \ +bfloat16x8_t __reint_825 = __rev2_825; \ +float32x4_t __reint1_825 = __noswap_splatq_laneq_f32(*(float32x4_t *) &__reint_825, __p3_825); \ + __ret_825 = __noswap_vbfdotq_f32(__rev0_825, __rev1_825, *(bfloat16x8_t *) &__reint1_825); \ + __ret_825 = __builtin_shufflevector(__ret_825, __ret_825, 3, 2, 1, 0); \ + __ret_825; \ +}) #endif -__ai float64x1_t vrndi_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vrndi_v((int8x8_t)__p0, 10); - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vrndmq_f64(float64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__p0, 42); - return __ret; -} +#define vbfdot_laneq_f32(__p0_826, __p1_826, __p2_826, __p3_826) __extension__ ({ \ + float32x2_t __ret_826; \ + float32x2_t __s0_826 = __p0_826; \ + bfloat16x4_t __s1_826 = __p1_826; \ + bfloat16x8_t __s2_826 = __p2_826; \ +bfloat16x8_t __reint_826 = __s2_826; \ +float32x2_t __reint1_826 = splat_laneq_f32(*(float32x4_t *) &__reint_826, __p3_826); \ + __ret_826 = vbfdot_f32(__s0_826, __s1_826, *(bfloat16x4_t *) &__reint1_826); \ + __ret_826; \ +}) #else -__ai float64x2_t vrndmq_f64(float64x2_t __p0) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float64x2_t) __builtin_neon_vrndmq_v((int8x16_t)__rev0, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vbfdot_laneq_f32(__p0_827, __p1_827, __p2_827, __p3_827) __extension__ ({ \ + float32x2_t __ret_827; \ + float32x2_t __s0_827 = __p0_827; \ + bfloat16x4_t __s1_827 = __p1_827; \ + bfloat16x8_t __s2_827 = __p2_827; \ + float32x2_t __rev0_827; __rev0_827 = __builtin_shufflevector(__s0_827, __s0_827, 1, 0); \ + bfloat16x4_t __rev1_827; __rev1_827 = __builtin_shufflevector(__s1_827, __s1_827, 3, 2, 1, 0); \ + bfloat16x8_t __rev2_827; __rev2_827 = __builtin_shufflevector(__s2_827, __s2_827, 7, 6, 5, 4, 3, 2, 1, 0); \ +bfloat16x8_t __reint_827 = __rev2_827; \ +float32x2_t __reint1_827 = __noswap_splat_laneq_f32(*(float32x4_t *) &__reint_827, __p3_827); \ + __ret_827 = __noswap_vbfdot_f32(__rev0_827, __rev1_827, *(bfloat16x4_t *) &__reint1_827); \ + __ret_827 = __builtin_shufflevector(__ret_827, __ret_827, 1, 0); \ + __ret_827; \ +}) #endif -__ai float64x1_t vrndm_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vrndm_v((int8x8_t)__p0, 10); - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vrndnq_f64(float64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__p0, 42); - return __ret; +#define vbfmlalbq_lane_f32(__p0_828, __p1_828, __p2_828, __p3_828) __extension__ ({ \ + float32x4_t __ret_828; \ + float32x4_t __s0_828 = __p0_828; \ + bfloat16x8_t __s1_828 = __p1_828; \ + bfloat16x4_t __s2_828 = __p2_828; \ + __ret_828 = vbfmlalbq_f32(__s0_828, __s1_828, (bfloat16x8_t) {vget_lane_bf16(__s2_828, __p3_828), vget_lane_bf16(__s2_828, __p3_828), vget_lane_bf16(__s2_828, __p3_828), vget_lane_bf16(__s2_828, __p3_828), vget_lane_bf16(__s2_828, __p3_828), vget_lane_bf16(__s2_828, __p3_828), vget_lane_bf16(__s2_828, __p3_828), vget_lane_bf16(__s2_828, __p3_828)}); \ + __ret_828; \ +}) +#else +#define vbfmlalbq_lane_f32(__p0_829, __p1_829, __p2_829, __p3_829) __extension__ ({ \ + float32x4_t __ret_829; \ + float32x4_t __s0_829 = __p0_829; \ + bfloat16x8_t __s1_829 = __p1_829; \ + bfloat16x4_t __s2_829 = __p2_829; \ + float32x4_t __rev0_829; __rev0_829 = __builtin_shufflevector(__s0_829, __s0_829, 3, 2, 1, 0); \ + bfloat16x8_t __rev1_829; __rev1_829 = __builtin_shufflevector(__s1_829, __s1_829, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x4_t __rev2_829; __rev2_829 = __builtin_shufflevector(__s2_829, __s2_829, 3, 2, 1, 0); \ + __ret_829 = __noswap_vbfmlalbq_f32(__rev0_829, __rev1_829, (bfloat16x8_t) {__noswap_vget_lane_bf16(__rev2_829, __p3_829), __noswap_vget_lane_bf16(__rev2_829, __p3_829), __noswap_vget_lane_bf16(__rev2_829, __p3_829), __noswap_vget_lane_bf16(__rev2_829, __p3_829), __noswap_vget_lane_bf16(__rev2_829, __p3_829), __noswap_vget_lane_bf16(__rev2_829, __p3_829), __noswap_vget_lane_bf16(__rev2_829, __p3_829), __noswap_vget_lane_bf16(__rev2_829, __p3_829)}); \ + __ret_829 = __builtin_shufflevector(__ret_829, __ret_829, 3, 2, 1, 0); \ + __ret_829; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vbfmlalbq_laneq_f32(__p0_830, __p1_830, __p2_830, __p3_830) __extension__ ({ \ + float32x4_t __ret_830; \ + float32x4_t __s0_830 = __p0_830; \ + bfloat16x8_t __s1_830 = __p1_830; \ + bfloat16x8_t __s2_830 = __p2_830; \ + __ret_830 = vbfmlalbq_f32(__s0_830, __s1_830, (bfloat16x8_t) {vgetq_lane_bf16(__s2_830, __p3_830), vgetq_lane_bf16(__s2_830, __p3_830), vgetq_lane_bf16(__s2_830, __p3_830), vgetq_lane_bf16(__s2_830, __p3_830), vgetq_lane_bf16(__s2_830, __p3_830), vgetq_lane_bf16(__s2_830, __p3_830), vgetq_lane_bf16(__s2_830, __p3_830), vgetq_lane_bf16(__s2_830, __p3_830)}); \ + __ret_830; \ +}) +#else +#define vbfmlalbq_laneq_f32(__p0_831, __p1_831, __p2_831, __p3_831) __extension__ ({ \ + float32x4_t __ret_831; \ + float32x4_t __s0_831 = __p0_831; \ + bfloat16x8_t __s1_831 = __p1_831; \ + bfloat16x8_t __s2_831 = __p2_831; \ + float32x4_t __rev0_831; __rev0_831 = __builtin_shufflevector(__s0_831, __s0_831, 3, 2, 1, 0); \ + bfloat16x8_t __rev1_831; __rev1_831 = __builtin_shufflevector(__s1_831, __s1_831, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x8_t __rev2_831; __rev2_831 = __builtin_shufflevector(__s2_831, __s2_831, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_831 = __noswap_vbfmlalbq_f32(__rev0_831, __rev1_831, (bfloat16x8_t) {__noswap_vgetq_lane_bf16(__rev2_831, __p3_831), __noswap_vgetq_lane_bf16(__rev2_831, __p3_831), __noswap_vgetq_lane_bf16(__rev2_831, __p3_831), __noswap_vgetq_lane_bf16(__rev2_831, __p3_831), __noswap_vgetq_lane_bf16(__rev2_831, __p3_831), __noswap_vgetq_lane_bf16(__rev2_831, __p3_831), __noswap_vgetq_lane_bf16(__rev2_831, __p3_831), __noswap_vgetq_lane_bf16(__rev2_831, __p3_831)}); \ + __ret_831 = __builtin_shufflevector(__ret_831, __ret_831, 3, 2, 1, 0); \ + __ret_831; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vbfmlaltq_lane_f32(__p0_832, __p1_832, __p2_832, __p3_832) __extension__ ({ \ + float32x4_t __ret_832; \ + float32x4_t __s0_832 = __p0_832; \ + bfloat16x8_t __s1_832 = __p1_832; \ + bfloat16x4_t __s2_832 = __p2_832; \ + __ret_832 = vbfmlaltq_f32(__s0_832, __s1_832, (bfloat16x8_t) {vget_lane_bf16(__s2_832, __p3_832), vget_lane_bf16(__s2_832, __p3_832), vget_lane_bf16(__s2_832, __p3_832), vget_lane_bf16(__s2_832, __p3_832), vget_lane_bf16(__s2_832, __p3_832), vget_lane_bf16(__s2_832, __p3_832), vget_lane_bf16(__s2_832, __p3_832), vget_lane_bf16(__s2_832, __p3_832)}); \ + __ret_832; \ +}) +#else +#define vbfmlaltq_lane_f32(__p0_833, __p1_833, __p2_833, __p3_833) __extension__ ({ \ + float32x4_t __ret_833; \ + float32x4_t __s0_833 = __p0_833; \ + bfloat16x8_t __s1_833 = __p1_833; \ + bfloat16x4_t __s2_833 = __p2_833; \ + float32x4_t __rev0_833; __rev0_833 = __builtin_shufflevector(__s0_833, __s0_833, 3, 2, 1, 0); \ + bfloat16x8_t __rev1_833; __rev1_833 = __builtin_shufflevector(__s1_833, __s1_833, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x4_t __rev2_833; __rev2_833 = __builtin_shufflevector(__s2_833, __s2_833, 3, 2, 1, 0); \ + __ret_833 = __noswap_vbfmlaltq_f32(__rev0_833, __rev1_833, (bfloat16x8_t) {__noswap_vget_lane_bf16(__rev2_833, __p3_833), __noswap_vget_lane_bf16(__rev2_833, __p3_833), __noswap_vget_lane_bf16(__rev2_833, __p3_833), __noswap_vget_lane_bf16(__rev2_833, __p3_833), __noswap_vget_lane_bf16(__rev2_833, __p3_833), __noswap_vget_lane_bf16(__rev2_833, __p3_833), __noswap_vget_lane_bf16(__rev2_833, __p3_833), __noswap_vget_lane_bf16(__rev2_833, __p3_833)}); \ + __ret_833 = __builtin_shufflevector(__ret_833, __ret_833, 3, 2, 1, 0); \ + __ret_833; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vbfmlaltq_laneq_f32(__p0_834, __p1_834, __p2_834, __p3_834) __extension__ ({ \ + float32x4_t __ret_834; \ + float32x4_t __s0_834 = __p0_834; \ + bfloat16x8_t __s1_834 = __p1_834; \ + bfloat16x8_t __s2_834 = __p2_834; \ + __ret_834 = vbfmlaltq_f32(__s0_834, __s1_834, (bfloat16x8_t) {vgetq_lane_bf16(__s2_834, __p3_834), vgetq_lane_bf16(__s2_834, __p3_834), vgetq_lane_bf16(__s2_834, __p3_834), vgetq_lane_bf16(__s2_834, __p3_834), vgetq_lane_bf16(__s2_834, __p3_834), vgetq_lane_bf16(__s2_834, __p3_834), vgetq_lane_bf16(__s2_834, __p3_834), vgetq_lane_bf16(__s2_834, __p3_834)}); \ + __ret_834; \ +}) +#else +#define vbfmlaltq_laneq_f32(__p0_835, __p1_835, __p2_835, __p3_835) __extension__ ({ \ + float32x4_t __ret_835; \ + float32x4_t __s0_835 = __p0_835; \ + bfloat16x8_t __s1_835 = __p1_835; \ + bfloat16x8_t __s2_835 = __p2_835; \ + float32x4_t __rev0_835; __rev0_835 = __builtin_shufflevector(__s0_835, __s0_835, 3, 2, 1, 0); \ + bfloat16x8_t __rev1_835; __rev1_835 = __builtin_shufflevector(__s1_835, __s1_835, 7, 6, 5, 4, 3, 2, 1, 0); \ + bfloat16x8_t __rev2_835; __rev2_835 = __builtin_shufflevector(__s2_835, __s2_835, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_835 = __noswap_vbfmlaltq_f32(__rev0_835, __rev1_835, (bfloat16x8_t) {__noswap_vgetq_lane_bf16(__rev2_835, __p3_835), __noswap_vgetq_lane_bf16(__rev2_835, __p3_835), __noswap_vgetq_lane_bf16(__rev2_835, __p3_835), __noswap_vgetq_lane_bf16(__rev2_835, __p3_835), __noswap_vgetq_lane_bf16(__rev2_835, __p3_835), __noswap_vgetq_lane_bf16(__rev2_835, __p3_835), __noswap_vgetq_lane_bf16(__rev2_835, __p3_835), __noswap_vgetq_lane_bf16(__rev2_835, __p3_835)}); \ + __ret_835 = __builtin_shufflevector(__ret_835, __ret_835, 3, 2, 1, 0); \ + __ret_835; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("bf16,neon"))) float32x4_t vcvt_f32_bf16(bfloat16x4_t __p0_836) { + float32x4_t __ret_836; +bfloat16x4_t __reint_836 = __p0_836; +int32x4_t __reint1_836 = vshll_n_s16(*(int16x4_t *) &__reint_836, 16); + __ret_836 = *(float32x4_t *) &__reint1_836; + return __ret_836; } #else -__ai float64x2_t vrndnq_f64(float64x2_t __p0) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float64x2_t) __builtin_neon_vrndnq_v((int8x16_t)__rev0, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; +__ai __attribute__((target("bf16,neon"))) float32x4_t vcvt_f32_bf16(bfloat16x4_t __p0_837) { + float32x4_t __ret_837; + bfloat16x4_t __rev0_837; __rev0_837 = __builtin_shufflevector(__p0_837, __p0_837, 3, 2, 1, 0); +bfloat16x4_t __reint_837 = __rev0_837; +int32x4_t __reint1_837 = __noswap_vshll_n_s16(*(int16x4_t *) &__reint_837, 16); + __ret_837 = *(float32x4_t *) &__reint1_837; + __ret_837 = __builtin_shufflevector(__ret_837, __ret_837, 3, 2, 1, 0); + return __ret_837; +} +__ai __attribute__((target("bf16,neon"))) float32x4_t __noswap_vcvt_f32_bf16(bfloat16x4_t __p0_838) { + float32x4_t __ret_838; +bfloat16x4_t __reint_838 = __p0_838; +int32x4_t __reint1_838 = __noswap_vshll_n_s16(*(int16x4_t *) &__reint_838, 16); + __ret_838 = *(float32x4_t *) &__reint1_838; + return __ret_838; } #endif -__ai float64x1_t vrndn_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vrndn_v((int8x8_t)__p0, 10); - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vrndpq_f64(float64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__p0, 42); +__ai __attribute__((target("bf16,neon"))) float32x4_t vcvtq_high_f32_bf16(bfloat16x8_t __p0) { + float32x4_t __ret; + __ret = vcvt_f32_bf16(vget_high_bf16(__p0)); return __ret; } #else -__ai float64x2_t vrndpq_f64(float64x2_t __p0) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float64x2_t) __builtin_neon_vrndpq_v((int8x16_t)__rev0, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("bf16,neon"))) float32x4_t vcvtq_high_f32_bf16(bfloat16x8_t __p0) { + float32x4_t __ret; + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcvt_f32_bf16(__noswap_vget_high_bf16(__rev0)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif -__ai float64x1_t vrndp_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vrndp_v((int8x8_t)__p0, 10); - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vrndxq_f64(float64x2_t __p0) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__p0, 42); +__ai __attribute__((target("bf16,neon"))) float32x4_t vcvtq_low_f32_bf16(bfloat16x8_t __p0) { + float32x4_t __ret; + __ret = vcvt_f32_bf16(vget_low_bf16(__p0)); return __ret; } #else -__ai float64x2_t vrndxq_f64(float64x2_t __p0) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - __ret = (float64x2_t) __builtin_neon_vrndxq_v((int8x16_t)__rev0, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); +__ai __attribute__((target("bf16,neon"))) float32x4_t vcvtq_low_f32_bf16(bfloat16x8_t __p0) { + float32x4_t __ret; + bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); + __ret = __noswap_vcvt_f32_bf16(__noswap_vget_low_bf16(__rev0)); + __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } #endif -__ai float64x1_t vrndx_f64(float64x1_t __p0) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vrndx_v((int8x8_t)__p0, 10); - return __ret; -} +#ifdef __LITTLE_ENDIAN__ +#define vdotq_lane_u32(__p0_839, __p1_839, __p2_839, __p3_839) __extension__ ({ \ + uint32x4_t __ret_839; \ + uint32x4_t __s0_839 = __p0_839; \ + uint8x16_t __s1_839 = __p1_839; \ + uint8x8_t __s2_839 = __p2_839; \ +uint8x8_t __reint_839 = __s2_839; \ +uint32x4_t __reint1_839 = splatq_lane_u32(*(uint32x2_t *) &__reint_839, __p3_839); \ + __ret_839 = vdotq_u32(__s0_839, __s1_839, *(uint8x16_t *) &__reint1_839); \ + __ret_839; \ +}) +#else +#define vdotq_lane_u32(__p0_840, __p1_840, __p2_840, __p3_840) __extension__ ({ \ + uint32x4_t __ret_840; \ + uint32x4_t __s0_840 = __p0_840; \ + uint8x16_t __s1_840 = __p1_840; \ + uint8x8_t __s2_840 = __p2_840; \ + uint32x4_t __rev0_840; __rev0_840 = __builtin_shufflevector(__s0_840, __s0_840, 3, 2, 1, 0); \ + uint8x16_t __rev1_840; __rev1_840 = __builtin_shufflevector(__s1_840, __s1_840, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev2_840; __rev2_840 = __builtin_shufflevector(__s2_840, __s2_840, 7, 6, 5, 4, 3, 2, 1, 0); \ +uint8x8_t __reint_840 = __rev2_840; \ +uint32x4_t __reint1_840 = __noswap_splatq_lane_u32(*(uint32x2_t *) &__reint_840, __p3_840); \ + __ret_840 = __noswap_vdotq_u32(__rev0_840, __rev1_840, *(uint8x16_t *) &__reint1_840); \ + __ret_840 = __builtin_shufflevector(__ret_840, __ret_840, 3, 2, 1, 0); \ + __ret_840; \ +}) #endif -#if defined(__aarch64__) && defined(__ARM_FEATURE_NUMERIC_MAXMIN) + #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); - return __ret; -} +#define vdotq_lane_s32(__p0_841, __p1_841, __p2_841, __p3_841) __extension__ ({ \ + int32x4_t __ret_841; \ + int32x4_t __s0_841 = __p0_841; \ + int8x16_t __s1_841 = __p1_841; \ + int8x8_t __s2_841 = __p2_841; \ +int8x8_t __reint_841 = __s2_841; \ +int32x4_t __reint1_841 = splatq_lane_s32(*(int32x2_t *) &__reint_841, __p3_841); \ + __ret_841 = vdotq_s32(__s0_841, __s1_841, *(int8x16_t *) &__reint1_841); \ + __ret_841; \ +}) #else -__ai float64x2_t vmaxnmq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (float64x2_t) __builtin_neon_vmaxnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vdotq_lane_s32(__p0_842, __p1_842, __p2_842, __p3_842) __extension__ ({ \ + int32x4_t __ret_842; \ + int32x4_t __s0_842 = __p0_842; \ + int8x16_t __s1_842 = __p1_842; \ + int8x8_t __s2_842 = __p2_842; \ + int32x4_t __rev0_842; __rev0_842 = __builtin_shufflevector(__s0_842, __s0_842, 3, 2, 1, 0); \ + int8x16_t __rev1_842; __rev1_842 = __builtin_shufflevector(__s1_842, __s1_842, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev2_842; __rev2_842 = __builtin_shufflevector(__s2_842, __s2_842, 7, 6, 5, 4, 3, 2, 1, 0); \ +int8x8_t __reint_842 = __rev2_842; \ +int32x4_t __reint1_842 = __noswap_splatq_lane_s32(*(int32x2_t *) &__reint_842, __p3_842); \ + __ret_842 = __noswap_vdotq_s32(__rev0_842, __rev1_842, *(int8x16_t *) &__reint1_842); \ + __ret_842 = __builtin_shufflevector(__ret_842, __ret_842, 3, 2, 1, 0); \ + __ret_842; \ +}) #endif -__ai float64x1_t vmaxnm_f64(float64x1_t __p0, float64x1_t __p1) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vmaxnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10); - return __ret; -} #ifdef __LITTLE_ENDIAN__ -__ai float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__p0, (int8x16_t)__p1, 42); - return __ret; -} +#define vdot_lane_u32(__p0_843, __p1_843, __p2_843, __p3_843) __extension__ ({ \ + uint32x2_t __ret_843; \ + uint32x2_t __s0_843 = __p0_843; \ + uint8x8_t __s1_843 = __p1_843; \ + uint8x8_t __s2_843 = __p2_843; \ +uint8x8_t __reint_843 = __s2_843; \ +uint32x2_t __reint1_843 = splat_lane_u32(*(uint32x2_t *) &__reint_843, __p3_843); \ + __ret_843 = vdot_u32(__s0_843, __s1_843, *(uint8x8_t *) &__reint1_843); \ + __ret_843; \ +}) #else -__ai float64x2_t vminnmq_f64(float64x2_t __p0, float64x2_t __p1) { - float64x2_t __ret; - float64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); - float64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); - __ret = (float64x2_t) __builtin_neon_vminnmq_v((int8x16_t)__rev0, (int8x16_t)__rev1, 42); - __ret = __builtin_shufflevector(__ret, __ret, 1, 0); - return __ret; -} +#define vdot_lane_u32(__p0_844, __p1_844, __p2_844, __p3_844) __extension__ ({ \ + uint32x2_t __ret_844; \ + uint32x2_t __s0_844 = __p0_844; \ + uint8x8_t __s1_844 = __p1_844; \ + uint8x8_t __s2_844 = __p2_844; \ + uint32x2_t __rev0_844; __rev0_844 = __builtin_shufflevector(__s0_844, __s0_844, 1, 0); \ + uint8x8_t __rev1_844; __rev1_844 = __builtin_shufflevector(__s1_844, __s1_844, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev2_844; __rev2_844 = __builtin_shufflevector(__s2_844, __s2_844, 7, 6, 5, 4, 3, 2, 1, 0); \ +uint8x8_t __reint_844 = __rev2_844; \ +uint32x2_t __reint1_844 = __noswap_splat_lane_u32(*(uint32x2_t *) &__reint_844, __p3_844); \ + __ret_844 = __noswap_vdot_u32(__rev0_844, __rev1_844, *(uint8x8_t *) &__reint1_844); \ + __ret_844 = __builtin_shufflevector(__ret_844, __ret_844, 1, 0); \ + __ret_844; \ +}) #endif -__ai float64x1_t vminnm_f64(float64x1_t __p0, float64x1_t __p1) { - float64x1_t __ret; - __ret = (float64x1_t) __builtin_neon_vminnm_v((int8x8_t)__p0, (int8x8_t)__p1, 10); - return __ret; -} +#ifdef __LITTLE_ENDIAN__ +#define vdot_lane_s32(__p0_845, __p1_845, __p2_845, __p3_845) __extension__ ({ \ + int32x2_t __ret_845; \ + int32x2_t __s0_845 = __p0_845; \ + int8x8_t __s1_845 = __p1_845; \ + int8x8_t __s2_845 = __p2_845; \ +int8x8_t __reint_845 = __s2_845; \ +int32x2_t __reint1_845 = splat_lane_s32(*(int32x2_t *) &__reint_845, __p3_845); \ + __ret_845 = vdot_s32(__s0_845, __s1_845, *(int8x8_t *) &__reint1_845); \ + __ret_845; \ +}) +#else +#define vdot_lane_s32(__p0_846, __p1_846, __p2_846, __p3_846) __extension__ ({ \ + int32x2_t __ret_846; \ + int32x2_t __s0_846 = __p0_846; \ + int8x8_t __s1_846 = __p1_846; \ + int8x8_t __s2_846 = __p2_846; \ + int32x2_t __rev0_846; __rev0_846 = __builtin_shufflevector(__s0_846, __s0_846, 1, 0); \ + int8x8_t __rev1_846; __rev1_846 = __builtin_shufflevector(__s1_846, __s1_846, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev2_846; __rev2_846 = __builtin_shufflevector(__s2_846, __s2_846, 7, 6, 5, 4, 3, 2, 1, 0); \ +int8x8_t __reint_846 = __rev2_846; \ +int32x2_t __reint1_846 = __noswap_splat_lane_s32(*(int32x2_t *) &__reint_846, __p3_846); \ + __ret_846 = __noswap_vdot_s32(__rev0_846, __rev1_846, *(int8x8_t *) &__reint1_846); \ + __ret_846 = __builtin_shufflevector(__ret_846, __ret_846, 1, 0); \ + __ret_846; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulq_lane_f16(__p0_847, __p1_847, __p2_847) __extension__ ({ \ + float16x8_t __ret_847; \ + float16x8_t __s0_847 = __p0_847; \ + float16x4_t __s1_847 = __p1_847; \ + __ret_847 = __s0_847 * splatq_lane_f16(__s1_847, __p2_847); \ + __ret_847; \ +}) +#else +#define vmulq_lane_f16(__p0_848, __p1_848, __p2_848) __extension__ ({ \ + float16x8_t __ret_848; \ + float16x8_t __s0_848 = __p0_848; \ + float16x4_t __s1_848 = __p1_848; \ + float16x8_t __rev0_848; __rev0_848 = __builtin_shufflevector(__s0_848, __s0_848, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev1_848; __rev1_848 = __builtin_shufflevector(__s1_848, __s1_848, 3, 2, 1, 0); \ + __ret_848 = __rev0_848 * __noswap_splatq_lane_f16(__rev1_848, __p2_848); \ + __ret_848 = __builtin_shufflevector(__ret_848, __ret_848, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_848; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmul_lane_f16(__p0_849, __p1_849, __p2_849) __extension__ ({ \ + float16x4_t __ret_849; \ + float16x4_t __s0_849 = __p0_849; \ + float16x4_t __s1_849 = __p1_849; \ + __ret_849 = __s0_849 * splat_lane_f16(__s1_849, __p2_849); \ + __ret_849; \ +}) +#else +#define vmul_lane_f16(__p0_850, __p1_850, __p2_850) __extension__ ({ \ + float16x4_t __ret_850; \ + float16x4_t __s0_850 = __p0_850; \ + float16x4_t __s1_850 = __p1_850; \ + float16x4_t __rev0_850; __rev0_850 = __builtin_shufflevector(__s0_850, __s0_850, 3, 2, 1, 0); \ + float16x4_t __rev1_850; __rev1_850 = __builtin_shufflevector(__s1_850, __s1_850, 3, 2, 1, 0); \ + __ret_850 = __rev0_850 * __noswap_splat_lane_f16(__rev1_850, __p2_850); \ + __ret_850 = __builtin_shufflevector(__ret_850, __ret_850, 3, 2, 1, 0); \ + __ret_850; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsudotq_lane_s32(__p0_851, __p1_851, __p2_851, __p3_851) __extension__ ({ \ + int32x4_t __ret_851; \ + int32x4_t __s0_851 = __p0_851; \ + int8x16_t __s1_851 = __p1_851; \ + uint8x8_t __s2_851 = __p2_851; \ +uint8x8_t __reint_851 = __s2_851; \ + __ret_851 = vusdotq_s32(__s0_851, (uint8x16_t)(splatq_lane_s32(*(int32x2_t *) &__reint_851, __p3_851)), __s1_851); \ + __ret_851; \ +}) +#else +#define vsudotq_lane_s32(__p0_852, __p1_852, __p2_852, __p3_852) __extension__ ({ \ + int32x4_t __ret_852; \ + int32x4_t __s0_852 = __p0_852; \ + int8x16_t __s1_852 = __p1_852; \ + uint8x8_t __s2_852 = __p2_852; \ + int32x4_t __rev0_852; __rev0_852 = __builtin_shufflevector(__s0_852, __s0_852, 3, 2, 1, 0); \ + int8x16_t __rev1_852; __rev1_852 = __builtin_shufflevector(__s1_852, __s1_852, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev2_852; __rev2_852 = __builtin_shufflevector(__s2_852, __s2_852, 7, 6, 5, 4, 3, 2, 1, 0); \ +uint8x8_t __reint_852 = __rev2_852; \ + __ret_852 = __noswap_vusdotq_s32(__rev0_852, (uint8x16_t)(__noswap_splatq_lane_s32(*(int32x2_t *) &__reint_852, __p3_852)), __rev1_852); \ + __ret_852 = __builtin_shufflevector(__ret_852, __ret_852, 3, 2, 1, 0); \ + __ret_852; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vsudot_lane_s32(__p0_853, __p1_853, __p2_853, __p3_853) __extension__ ({ \ + int32x2_t __ret_853; \ + int32x2_t __s0_853 = __p0_853; \ + int8x8_t __s1_853 = __p1_853; \ + uint8x8_t __s2_853 = __p2_853; \ +uint8x8_t __reint_853 = __s2_853; \ + __ret_853 = vusdot_s32(__s0_853, (uint8x8_t)(splat_lane_s32(*(int32x2_t *) &__reint_853, __p3_853)), __s1_853); \ + __ret_853; \ +}) +#else +#define vsudot_lane_s32(__p0_854, __p1_854, __p2_854, __p3_854) __extension__ ({ \ + int32x2_t __ret_854; \ + int32x2_t __s0_854 = __p0_854; \ + int8x8_t __s1_854 = __p1_854; \ + uint8x8_t __s2_854 = __p2_854; \ + int32x2_t __rev0_854; __rev0_854 = __builtin_shufflevector(__s0_854, __s0_854, 1, 0); \ + int8x8_t __rev1_854; __rev1_854 = __builtin_shufflevector(__s1_854, __s1_854, 7, 6, 5, 4, 3, 2, 1, 0); \ + uint8x8_t __rev2_854; __rev2_854 = __builtin_shufflevector(__s2_854, __s2_854, 7, 6, 5, 4, 3, 2, 1, 0); \ +uint8x8_t __reint_854 = __rev2_854; \ + __ret_854 = __noswap_vusdot_s32(__rev0_854, (uint8x8_t)(__noswap_splat_lane_s32(*(int32x2_t *) &__reint_854, __p3_854)), __rev1_854); \ + __ret_854 = __builtin_shufflevector(__ret_854, __ret_854, 1, 0); \ + __ret_854; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vusdotq_lane_s32(__p0_855, __p1_855, __p2_855, __p3_855) __extension__ ({ \ + int32x4_t __ret_855; \ + int32x4_t __s0_855 = __p0_855; \ + uint8x16_t __s1_855 = __p1_855; \ + int8x8_t __s2_855 = __p2_855; \ +int8x8_t __reint_855 = __s2_855; \ + __ret_855 = vusdotq_s32(__s0_855, __s1_855, (int8x16_t)(splatq_lane_s32(*(int32x2_t *) &__reint_855, __p3_855))); \ + __ret_855; \ +}) +#else +#define vusdotq_lane_s32(__p0_856, __p1_856, __p2_856, __p3_856) __extension__ ({ \ + int32x4_t __ret_856; \ + int32x4_t __s0_856 = __p0_856; \ + uint8x16_t __s1_856 = __p1_856; \ + int8x8_t __s2_856 = __p2_856; \ + int32x4_t __rev0_856; __rev0_856 = __builtin_shufflevector(__s0_856, __s0_856, 3, 2, 1, 0); \ + uint8x16_t __rev1_856; __rev1_856 = __builtin_shufflevector(__s1_856, __s1_856, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev2_856; __rev2_856 = __builtin_shufflevector(__s2_856, __s2_856, 7, 6, 5, 4, 3, 2, 1, 0); \ +int8x8_t __reint_856 = __rev2_856; \ + __ret_856 = __noswap_vusdotq_s32(__rev0_856, __rev1_856, (int8x16_t)(__noswap_splatq_lane_s32(*(int32x2_t *) &__reint_856, __p3_856))); \ + __ret_856 = __builtin_shufflevector(__ret_856, __ret_856, 3, 2, 1, 0); \ + __ret_856; \ +}) #endif + #ifdef __LITTLE_ENDIAN__ -__ai uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { +#define vusdot_lane_s32(__p0_857, __p1_857, __p2_857, __p3_857) __extension__ ({ \ + int32x2_t __ret_857; \ + int32x2_t __s0_857 = __p0_857; \ + uint8x8_t __s1_857 = __p1_857; \ + int8x8_t __s2_857 = __p2_857; \ +int8x8_t __reint_857 = __s2_857; \ + __ret_857 = vusdot_s32(__s0_857, __s1_857, (int8x8_t)(splat_lane_s32(*(int32x2_t *) &__reint_857, __p3_857))); \ + __ret_857; \ +}) +#else +#define vusdot_lane_s32(__p0_858, __p1_858, __p2_858, __p3_858) __extension__ ({ \ + int32x2_t __ret_858; \ + int32x2_t __s0_858 = __p0_858; \ + uint8x8_t __s1_858 = __p1_858; \ + int8x8_t __s2_858 = __p2_858; \ + int32x2_t __rev0_858; __rev0_858 = __builtin_shufflevector(__s0_858, __s0_858, 1, 0); \ + uint8x8_t __rev1_858; __rev1_858 = __builtin_shufflevector(__s1_858, __s1_858, 7, 6, 5, 4, 3, 2, 1, 0); \ + int8x8_t __rev2_858; __rev2_858 = __builtin_shufflevector(__s2_858, __s2_858, 7, 6, 5, 4, 3, 2, 1, 0); \ +int8x8_t __reint_858 = __rev2_858; \ + __ret_858 = __noswap_vusdot_s32(__rev0_858, __rev1_858, (int8x8_t)(__noswap_splat_lane_s32(*(int32x2_t *) &__reint_858, __p3_858))); \ + __ret_858 = __builtin_shufflevector(__ret_858, __ret_858, 1, 0); \ + __ret_858; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint8x16_t __ret; __ret = __p0 + vabdq_u8(__p1, __p2); return __ret; } #else -__ai uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("neon"))) uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint8x16_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -66669,13 +66829,13 @@ __ai uint8x16_t vabaq_u8(uint8x16_t __p0, uint8x16_t __p1, uint8x16_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vabaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vabaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; __ret = __p0 + vabdq_u32(__p1, __p2); return __ret; } #else -__ai uint32x4_t vabaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vabaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -66687,13 +66847,13 @@ __ai uint32x4_t vabaq_u32(uint32x4_t __p0, uint32x4_t __p1, uint32x4_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vabaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t vabaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint16x8_t __ret; __ret = __p0 + vabdq_u16(__p1, __p2); return __ret; } #else -__ai uint16x8_t vabaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t vabaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -66705,13 +66865,13 @@ __ai uint16x8_t vabaq_u16(uint16x8_t __p0, uint16x8_t __p1, uint16x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x16_t vabaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { +__ai __attribute__((target("neon"))) int8x16_t vabaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { int8x16_t __ret; __ret = __p0 + vabdq_s8(__p1, __p2); return __ret; } #else -__ai int8x16_t vabaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { +__ai __attribute__((target("neon"))) int8x16_t vabaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { int8x16_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -66723,13 +66883,13 @@ __ai int8x16_t vabaq_s8(int8x16_t __p0, int8x16_t __p1, int8x16_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vabaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vabaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; __ret = __p0 + vabdq_s32(__p1, __p2); return __ret; } #else -__ai int32x4_t vabaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vabaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -66741,13 +66901,13 @@ __ai int32x4_t vabaq_s32(int32x4_t __p0, int32x4_t __p1, int32x4_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vabaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t vabaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int16x8_t __ret; __ret = __p0 + vabdq_s16(__p1, __p2); return __ret; } #else -__ai int16x8_t vabaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t vabaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -66759,13 +66919,13 @@ __ai int16x8_t vabaq_s16(int16x8_t __p0, int16x8_t __p1, int16x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint8x8_t vaba_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) uint8x8_t vaba_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint8x8_t __ret; __ret = __p0 + vabd_u8(__p1, __p2); return __ret; } #else -__ai uint8x8_t vaba_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) uint8x8_t vaba_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint8x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -66777,13 +66937,13 @@ __ai uint8x8_t vaba_u8(uint8x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x2_t vaba_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { +__ai __attribute__((target("neon"))) uint32x2_t vaba_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint32x2_t __ret; __ret = __p0 + vabd_u32(__p1, __p2); return __ret; } #else -__ai uint32x2_t vaba_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { +__ai __attribute__((target("neon"))) uint32x2_t vaba_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint32x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -66795,13 +66955,13 @@ __ai uint32x2_t vaba_u32(uint32x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x4_t vaba_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { +__ai __attribute__((target("neon"))) uint16x4_t vaba_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint16x4_t __ret; __ret = __p0 + vabd_u16(__p1, __p2); return __ret; } #else -__ai uint16x4_t vaba_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { +__ai __attribute__((target("neon"))) uint16x4_t vaba_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint16x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -66813,13 +66973,13 @@ __ai uint16x4_t vaba_u16(uint16x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int8x8_t vaba_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { +__ai __attribute__((target("neon"))) int8x8_t vaba_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int8x8_t __ret; __ret = __p0 + vabd_s8(__p1, __p2); return __ret; } #else -__ai int8x8_t vaba_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { +__ai __attribute__((target("neon"))) int8x8_t vaba_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int8x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -66831,13 +66991,13 @@ __ai int8x8_t vaba_s8(int8x8_t __p0, int8x8_t __p1, int8x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x2_t vaba_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { +__ai __attribute__((target("neon"))) int32x2_t vaba_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int32x2_t __ret; __ret = __p0 + vabd_s32(__p1, __p2); return __ret; } #else -__ai int32x2_t vaba_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { +__ai __attribute__((target("neon"))) int32x2_t vaba_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int32x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -66849,13 +67009,13 @@ __ai int32x2_t vaba_s32(int32x2_t __p0, int32x2_t __p1, int32x2_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x4_t vaba_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { +__ai __attribute__((target("neon"))) int16x4_t vaba_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int16x4_t __ret; __ret = __p0 + vabd_s16(__p1, __p2); return __ret; } #else -__ai int16x4_t vaba_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { +__ai __attribute__((target("neon"))) int16x4_t vaba_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int16x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -66867,13 +67027,13 @@ __ai int16x4_t vaba_s16(int16x4_t __p0, int16x4_t __p1, int16x4_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t)(vmovl_u8((uint8x8_t)(vabd_u8(__p0, __p1)))); return __ret; } #else -__ai uint16x8_t vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) { uint16x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -66881,7 +67041,7 @@ __ai uint16x8_t vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai uint16x8_t __noswap_vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t __noswap_vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) { uint16x8_t __ret; __ret = (uint16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_u8(__p0, __p1)))); return __ret; @@ -66889,13 +67049,13 @@ __ai uint16x8_t __noswap_vabdl_u8(uint8x8_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t)(vmovl_u32((uint32x2_t)(vabd_u32(__p0, __p1)))); return __ret; } #else -__ai uint64x2_t vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) { uint64x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -66903,7 +67063,7 @@ __ai uint64x2_t vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai uint64x2_t __noswap_vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t __noswap_vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) { uint64x2_t __ret; __ret = (uint64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_u32(__p0, __p1)))); return __ret; @@ -66911,13 +67071,13 @@ __ai uint64x2_t __noswap_vabdl_u32(uint32x2_t __p0, uint32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t)(vmovl_u16((uint16x4_t)(vabd_u16(__p0, __p1)))); return __ret; } #else -__ai uint32x4_t vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) { uint32x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -66925,7 +67085,7 @@ __ai uint32x4_t vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai uint32x4_t __noswap_vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t __noswap_vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) { uint32x4_t __ret; __ret = (uint32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_u16(__p0, __p1)))); return __ret; @@ -66933,13 +67093,13 @@ __ai uint32x4_t __noswap_vabdl_u16(uint16x4_t __p0, uint16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vabdl_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vabdl_s8(int8x8_t __p0, int8x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t)(vmovl_u8((uint8x8_t)(vabd_s8(__p0, __p1)))); return __ret; } #else -__ai int16x8_t vabdl_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vabdl_s8(int8x8_t __p0, int8x8_t __p1) { int16x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -66947,7 +67107,7 @@ __ai int16x8_t vabdl_s8(int8x8_t __p0, int8x8_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai int16x8_t __noswap_vabdl_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t __noswap_vabdl_s8(int8x8_t __p0, int8x8_t __p1) { int16x8_t __ret; __ret = (int16x8_t)(__noswap_vmovl_u8((uint8x8_t)(__noswap_vabd_s8(__p0, __p1)))); return __ret; @@ -66955,13 +67115,13 @@ __ai int16x8_t __noswap_vabdl_s8(int8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vabdl_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vabdl_s32(int32x2_t __p0, int32x2_t __p1) { int64x2_t __ret; __ret = (int64x2_t)(vmovl_u32((uint32x2_t)(vabd_s32(__p0, __p1)))); return __ret; } #else -__ai int64x2_t vabdl_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vabdl_s32(int32x2_t __p0, int32x2_t __p1) { int64x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -66969,7 +67129,7 @@ __ai int64x2_t vabdl_s32(int32x2_t __p0, int32x2_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai int64x2_t __noswap_vabdl_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t __noswap_vabdl_s32(int32x2_t __p0, int32x2_t __p1) { int64x2_t __ret; __ret = (int64x2_t)(__noswap_vmovl_u32((uint32x2_t)(__noswap_vabd_s32(__p0, __p1)))); return __ret; @@ -66977,13 +67137,13 @@ __ai int64x2_t __noswap_vabdl_s32(int32x2_t __p0, int32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vabdl_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vabdl_s16(int16x4_t __p0, int16x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t)(vmovl_u16((uint16x4_t)(vabd_s16(__p0, __p1)))); return __ret; } #else -__ai int32x4_t vabdl_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vabdl_s16(int16x4_t __p0, int16x4_t __p1) { int32x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -66991,7 +67151,7 @@ __ai int32x4_t vabdl_s16(int16x4_t __p0, int16x4_t __p1) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int32x4_t __noswap_vabdl_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t __noswap_vabdl_s16(int16x4_t __p0, int16x4_t __p1) { int32x4_t __ret; __ret = (int32x4_t)(__noswap_vmovl_u16((uint16x4_t)(__noswap_vabd_s16(__p0, __p1)))); return __ret; @@ -66999,13 +67159,13 @@ __ai int32x4_t __noswap_vabdl_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vaddl_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vaddl_u8(uint8x8_t __p0, uint8x8_t __p1) { uint16x8_t __ret; __ret = vmovl_u8(__p0) + vmovl_u8(__p1); return __ret; } #else -__ai uint16x8_t vaddl_u8(uint8x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vaddl_u8(uint8x8_t __p0, uint8x8_t __p1) { uint16x8_t __ret; uint8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -67016,13 +67176,13 @@ __ai uint16x8_t vaddl_u8(uint8x8_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vaddl_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vaddl_u32(uint32x2_t __p0, uint32x2_t __p1) { uint64x2_t __ret; __ret = vmovl_u32(__p0) + vmovl_u32(__p1); return __ret; } #else -__ai uint64x2_t vaddl_u32(uint32x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vaddl_u32(uint32x2_t __p0, uint32x2_t __p1) { uint64x2_t __ret; uint32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -67033,13 +67193,13 @@ __ai uint64x2_t vaddl_u32(uint32x2_t __p0, uint32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vaddl_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vaddl_u16(uint16x4_t __p0, uint16x4_t __p1) { uint32x4_t __ret; __ret = vmovl_u16(__p0) + vmovl_u16(__p1); return __ret; } #else -__ai uint32x4_t vaddl_u16(uint16x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vaddl_u16(uint16x4_t __p0, uint16x4_t __p1) { uint32x4_t __ret; uint16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -67050,13 +67210,13 @@ __ai uint32x4_t vaddl_u16(uint16x4_t __p0, uint16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vaddl_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vaddl_s8(int8x8_t __p0, int8x8_t __p1) { int16x8_t __ret; __ret = vmovl_s8(__p0) + vmovl_s8(__p1); return __ret; } #else -__ai int16x8_t vaddl_s8(int8x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vaddl_s8(int8x8_t __p0, int8x8_t __p1) { int16x8_t __ret; int8x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -67067,13 +67227,13 @@ __ai int16x8_t vaddl_s8(int8x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vaddl_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vaddl_s32(int32x2_t __p0, int32x2_t __p1) { int64x2_t __ret; __ret = vmovl_s32(__p0) + vmovl_s32(__p1); return __ret; } #else -__ai int64x2_t vaddl_s32(int32x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vaddl_s32(int32x2_t __p0, int32x2_t __p1) { int64x2_t __ret; int32x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -67084,13 +67244,13 @@ __ai int64x2_t vaddl_s32(int32x2_t __p0, int32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vaddl_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vaddl_s16(int16x4_t __p0, int16x4_t __p1) { int32x4_t __ret; __ret = vmovl_s16(__p0) + vmovl_s16(__p1); return __ret; } #else -__ai int32x4_t vaddl_s16(int16x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vaddl_s16(int16x4_t __p0, int16x4_t __p1) { int32x4_t __ret; int16x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -67101,13 +67261,13 @@ __ai int32x4_t vaddl_s16(int16x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vaddw_u8(uint16x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vaddw_u8(uint16x8_t __p0, uint8x8_t __p1) { uint16x8_t __ret; __ret = __p0 + vmovl_u8(__p1); return __ret; } #else -__ai uint16x8_t vaddw_u8(uint16x8_t __p0, uint8x8_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vaddw_u8(uint16x8_t __p0, uint8x8_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -67118,13 +67278,13 @@ __ai uint16x8_t vaddw_u8(uint16x8_t __p0, uint8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vaddw_u32(uint64x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vaddw_u32(uint64x2_t __p0, uint32x2_t __p1) { uint64x2_t __ret; __ret = __p0 + vmovl_u32(__p1); return __ret; } #else -__ai uint64x2_t vaddw_u32(uint64x2_t __p0, uint32x2_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vaddw_u32(uint64x2_t __p0, uint32x2_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -67135,13 +67295,13 @@ __ai uint64x2_t vaddw_u32(uint64x2_t __p0, uint32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vaddw_u16(uint32x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vaddw_u16(uint32x4_t __p0, uint16x4_t __p1) { uint32x4_t __ret; __ret = __p0 + vmovl_u16(__p1); return __ret; } #else -__ai uint32x4_t vaddw_u16(uint32x4_t __p0, uint16x4_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vaddw_u16(uint32x4_t __p0, uint16x4_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -67152,13 +67312,13 @@ __ai uint32x4_t vaddw_u16(uint32x4_t __p0, uint16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vaddw_s8(int16x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vaddw_s8(int16x8_t __p0, int8x8_t __p1) { int16x8_t __ret; __ret = __p0 + vmovl_s8(__p1); return __ret; } #else -__ai int16x8_t vaddw_s8(int16x8_t __p0, int8x8_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vaddw_s8(int16x8_t __p0, int8x8_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -67169,13 +67329,13 @@ __ai int16x8_t vaddw_s8(int16x8_t __p0, int8x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vaddw_s32(int64x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vaddw_s32(int64x2_t __p0, int32x2_t __p1) { int64x2_t __ret; __ret = __p0 + vmovl_s32(__p1); return __ret; } #else -__ai int64x2_t vaddw_s32(int64x2_t __p0, int32x2_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vaddw_s32(int64x2_t __p0, int32x2_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -67186,13 +67346,13 @@ __ai int64x2_t vaddw_s32(int64x2_t __p0, int32x2_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vaddw_s16(int32x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vaddw_s16(int32x4_t __p0, int16x4_t __p1) { int32x4_t __ret; __ret = __p0 + vmovl_s16(__p1); return __ret; } #else -__ai int32x4_t vaddw_s16(int32x4_t __p0, int16x4_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vaddw_s16(int32x4_t __p0, int16x4_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -67203,71 +67363,71 @@ __ai int32x4_t vaddw_s16(int32x4_t __p0, int16x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -#define vget_lane_f16(__p0_847, __p1_847) __extension__ ({ \ - float16_t __ret_847; \ - float16x4_t __s0_847 = __p0_847; \ -float16x4_t __reint_847 = __s0_847; \ -int16_t __reint1_847 = vget_lane_s16(*(int16x4_t *) &__reint_847, __p1_847); \ - __ret_847 = *(float16_t *) &__reint1_847; \ - __ret_847; \ +#define vget_lane_f16(__p0_859, __p1_859) __extension__ ({ \ + float16_t __ret_859; \ + float16x4_t __s0_859 = __p0_859; \ +float16x4_t __reint_859 = __s0_859; \ +int16_t __reint1_859 = vget_lane_s16(*(int16x4_t *) &__reint_859, __p1_859); \ + __ret_859 = *(float16_t *) &__reint1_859; \ + __ret_859; \ }) #else -#define vget_lane_f16(__p0_848, __p1_848) __extension__ ({ \ - float16_t __ret_848; \ - float16x4_t __s0_848 = __p0_848; \ - float16x4_t __rev0_848; __rev0_848 = __builtin_shufflevector(__s0_848, __s0_848, 3, 2, 1, 0); \ -float16x4_t __reint_848 = __rev0_848; \ -int16_t __reint1_848 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_848, __p1_848); \ - __ret_848 = *(float16_t *) &__reint1_848; \ - __ret_848; \ +#define vget_lane_f16(__p0_860, __p1_860) __extension__ ({ \ + float16_t __ret_860; \ + float16x4_t __s0_860 = __p0_860; \ + float16x4_t __rev0_860; __rev0_860 = __builtin_shufflevector(__s0_860, __s0_860, 3, 2, 1, 0); \ +float16x4_t __reint_860 = __rev0_860; \ +int16_t __reint1_860 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_860, __p1_860); \ + __ret_860 = *(float16_t *) &__reint1_860; \ + __ret_860; \ }) -#define __noswap_vget_lane_f16(__p0_849, __p1_849) __extension__ ({ \ - float16_t __ret_849; \ - float16x4_t __s0_849 = __p0_849; \ -float16x4_t __reint_849 = __s0_849; \ -int16_t __reint1_849 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_849, __p1_849); \ - __ret_849 = *(float16_t *) &__reint1_849; \ - __ret_849; \ +#define __noswap_vget_lane_f16(__p0_861, __p1_861) __extension__ ({ \ + float16_t __ret_861; \ + float16x4_t __s0_861 = __p0_861; \ +float16x4_t __reint_861 = __s0_861; \ +int16_t __reint1_861 = __noswap_vget_lane_s16(*(int16x4_t *) &__reint_861, __p1_861); \ + __ret_861 = *(float16_t *) &__reint1_861; \ + __ret_861; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vgetq_lane_f16(__p0_850, __p1_850) __extension__ ({ \ - float16_t __ret_850; \ - float16x8_t __s0_850 = __p0_850; \ -float16x8_t __reint_850 = __s0_850; \ -int16_t __reint1_850 = vgetq_lane_s16(*(int16x8_t *) &__reint_850, __p1_850); \ - __ret_850 = *(float16_t *) &__reint1_850; \ - __ret_850; \ +#define vgetq_lane_f16(__p0_862, __p1_862) __extension__ ({ \ + float16_t __ret_862; \ + float16x8_t __s0_862 = __p0_862; \ +float16x8_t __reint_862 = __s0_862; \ +int16_t __reint1_862 = vgetq_lane_s16(*(int16x8_t *) &__reint_862, __p1_862); \ + __ret_862 = *(float16_t *) &__reint1_862; \ + __ret_862; \ }) #else -#define vgetq_lane_f16(__p0_851, __p1_851) __extension__ ({ \ - float16_t __ret_851; \ - float16x8_t __s0_851 = __p0_851; \ - float16x8_t __rev0_851; __rev0_851 = __builtin_shufflevector(__s0_851, __s0_851, 7, 6, 5, 4, 3, 2, 1, 0); \ -float16x8_t __reint_851 = __rev0_851; \ -int16_t __reint1_851 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_851, __p1_851); \ - __ret_851 = *(float16_t *) &__reint1_851; \ - __ret_851; \ +#define vgetq_lane_f16(__p0_863, __p1_863) __extension__ ({ \ + float16_t __ret_863; \ + float16x8_t __s0_863 = __p0_863; \ + float16x8_t __rev0_863; __rev0_863 = __builtin_shufflevector(__s0_863, __s0_863, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16x8_t __reint_863 = __rev0_863; \ +int16_t __reint1_863 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_863, __p1_863); \ + __ret_863 = *(float16_t *) &__reint1_863; \ + __ret_863; \ }) -#define __noswap_vgetq_lane_f16(__p0_852, __p1_852) __extension__ ({ \ - float16_t __ret_852; \ - float16x8_t __s0_852 = __p0_852; \ -float16x8_t __reint_852 = __s0_852; \ -int16_t __reint1_852 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_852, __p1_852); \ - __ret_852 = *(float16_t *) &__reint1_852; \ - __ret_852; \ +#define __noswap_vgetq_lane_f16(__p0_864, __p1_864) __extension__ ({ \ + float16_t __ret_864; \ + float16x8_t __s0_864 = __p0_864; \ +float16x8_t __reint_864 = __s0_864; \ +int16_t __reint1_864 = __noswap_vgetq_lane_s16(*(int16x8_t *) &__reint_864, __p1_864); \ + __ret_864 = *(float16_t *) &__reint1_864; \ + __ret_864; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint16x8_t __ret; __ret = __p0 + vmull_u8(__p1, __p2); return __ret; } #else -__ai uint16x8_t vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -67276,7 +67436,7 @@ __ai uint16x8_t vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai uint16x8_t __noswap_vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t __noswap_vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint16x8_t __ret; __ret = __p0 + __noswap_vmull_u8(__p1, __p2); return __ret; @@ -67284,13 +67444,13 @@ __ai uint16x8_t __noswap_vmlal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint64x2_t __ret; __ret = __p0 + vmull_u32(__p1, __p2); return __ret; } #else -__ai uint64x2_t vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -67299,7 +67459,7 @@ __ai uint64x2_t vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai uint64x2_t __noswap_vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t __noswap_vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint64x2_t __ret; __ret = __p0 + __noswap_vmull_u32(__p1, __p2); return __ret; @@ -67307,13 +67467,13 @@ __ai uint64x2_t __noswap_vmlal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint32x4_t __ret; __ret = __p0 + vmull_u16(__p1, __p2); return __ret; } #else -__ai uint32x4_t vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -67322,7 +67482,7 @@ __ai uint32x4_t vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai uint32x4_t __noswap_vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t __noswap_vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint32x4_t __ret; __ret = __p0 + __noswap_vmull_u16(__p1, __p2); return __ret; @@ -67330,13 +67490,13 @@ __ai uint32x4_t __noswap_vmlal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int16x8_t __ret; __ret = __p0 + vmull_s8(__p1, __p2); return __ret; } #else -__ai int16x8_t vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -67345,7 +67505,7 @@ __ai int16x8_t vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai int16x8_t __noswap_vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t __noswap_vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int16x8_t __ret; __ret = __p0 + __noswap_vmull_s8(__p1, __p2); return __ret; @@ -67353,13 +67513,13 @@ __ai int16x8_t __noswap_vmlal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int64x2_t __ret; __ret = __p0 + vmull_s32(__p1, __p2); return __ret; } #else -__ai int64x2_t vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -67368,7 +67528,7 @@ __ai int64x2_t vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai int64x2_t __noswap_vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t __noswap_vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int64x2_t __ret; __ret = __p0 + __noswap_vmull_s32(__p1, __p2); return __ret; @@ -67376,13 +67536,13 @@ __ai int64x2_t __noswap_vmlal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2 #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int32x4_t __ret; __ret = __p0 + vmull_s16(__p1, __p2); return __ret; } #else -__ai int32x4_t vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -67391,7 +67551,7 @@ __ai int32x4_t vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int32x4_t __noswap_vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t __noswap_vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int32x4_t __ret; __ret = __p0 + __noswap_vmull_s16(__p1, __p2); return __ret; @@ -67399,109 +67559,109 @@ __ai int32x4_t __noswap_vmlal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2 #endif #ifdef __LITTLE_ENDIAN__ -#define vmlal_lane_u32(__p0_853, __p1_853, __p2_853, __p3_853) __extension__ ({ \ - uint64x2_t __ret_853; \ - uint64x2_t __s0_853 = __p0_853; \ - uint32x2_t __s1_853 = __p1_853; \ - uint32x2_t __s2_853 = __p2_853; \ - __ret_853 = __s0_853 + vmull_u32(__s1_853, splat_lane_u32(__s2_853, __p3_853)); \ - __ret_853; \ +#define vmlal_lane_u32(__p0_865, __p1_865, __p2_865, __p3_865) __extension__ ({ \ + uint64x2_t __ret_865; \ + uint64x2_t __s0_865 = __p0_865; \ + uint32x2_t __s1_865 = __p1_865; \ + uint32x2_t __s2_865 = __p2_865; \ + __ret_865 = __s0_865 + vmull_u32(__s1_865, splat_lane_u32(__s2_865, __p3_865)); \ + __ret_865; \ }) #else -#define vmlal_lane_u32(__p0_854, __p1_854, __p2_854, __p3_854) __extension__ ({ \ - uint64x2_t __ret_854; \ - uint64x2_t __s0_854 = __p0_854; \ - uint32x2_t __s1_854 = __p1_854; \ - uint32x2_t __s2_854 = __p2_854; \ - uint64x2_t __rev0_854; __rev0_854 = __builtin_shufflevector(__s0_854, __s0_854, 1, 0); \ - uint32x2_t __rev1_854; __rev1_854 = __builtin_shufflevector(__s1_854, __s1_854, 1, 0); \ - uint32x2_t __rev2_854; __rev2_854 = __builtin_shufflevector(__s2_854, __s2_854, 1, 0); \ - __ret_854 = __rev0_854 + __noswap_vmull_u32(__rev1_854, __noswap_splat_lane_u32(__rev2_854, __p3_854)); \ - __ret_854 = __builtin_shufflevector(__ret_854, __ret_854, 1, 0); \ - __ret_854; \ +#define vmlal_lane_u32(__p0_866, __p1_866, __p2_866, __p3_866) __extension__ ({ \ + uint64x2_t __ret_866; \ + uint64x2_t __s0_866 = __p0_866; \ + uint32x2_t __s1_866 = __p1_866; \ + uint32x2_t __s2_866 = __p2_866; \ + uint64x2_t __rev0_866; __rev0_866 = __builtin_shufflevector(__s0_866, __s0_866, 1, 0); \ + uint32x2_t __rev1_866; __rev1_866 = __builtin_shufflevector(__s1_866, __s1_866, 1, 0); \ + uint32x2_t __rev2_866; __rev2_866 = __builtin_shufflevector(__s2_866, __s2_866, 1, 0); \ + __ret_866 = __rev0_866 + __noswap_vmull_u32(__rev1_866, __noswap_splat_lane_u32(__rev2_866, __p3_866)); \ + __ret_866 = __builtin_shufflevector(__ret_866, __ret_866, 1, 0); \ + __ret_866; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlal_lane_u16(__p0_855, __p1_855, __p2_855, __p3_855) __extension__ ({ \ - uint32x4_t __ret_855; \ - uint32x4_t __s0_855 = __p0_855; \ - uint16x4_t __s1_855 = __p1_855; \ - uint16x4_t __s2_855 = __p2_855; \ - __ret_855 = __s0_855 + vmull_u16(__s1_855, splat_lane_u16(__s2_855, __p3_855)); \ - __ret_855; \ +#define vmlal_lane_u16(__p0_867, __p1_867, __p2_867, __p3_867) __extension__ ({ \ + uint32x4_t __ret_867; \ + uint32x4_t __s0_867 = __p0_867; \ + uint16x4_t __s1_867 = __p1_867; \ + uint16x4_t __s2_867 = __p2_867; \ + __ret_867 = __s0_867 + vmull_u16(__s1_867, splat_lane_u16(__s2_867, __p3_867)); \ + __ret_867; \ }) #else -#define vmlal_lane_u16(__p0_856, __p1_856, __p2_856, __p3_856) __extension__ ({ \ - uint32x4_t __ret_856; \ - uint32x4_t __s0_856 = __p0_856; \ - uint16x4_t __s1_856 = __p1_856; \ - uint16x4_t __s2_856 = __p2_856; \ - uint32x4_t __rev0_856; __rev0_856 = __builtin_shufflevector(__s0_856, __s0_856, 3, 2, 1, 0); \ - uint16x4_t __rev1_856; __rev1_856 = __builtin_shufflevector(__s1_856, __s1_856, 3, 2, 1, 0); \ - uint16x4_t __rev2_856; __rev2_856 = __builtin_shufflevector(__s2_856, __s2_856, 3, 2, 1, 0); \ - __ret_856 = __rev0_856 + __noswap_vmull_u16(__rev1_856, __noswap_splat_lane_u16(__rev2_856, __p3_856)); \ - __ret_856 = __builtin_shufflevector(__ret_856, __ret_856, 3, 2, 1, 0); \ - __ret_856; \ +#define vmlal_lane_u16(__p0_868, __p1_868, __p2_868, __p3_868) __extension__ ({ \ + uint32x4_t __ret_868; \ + uint32x4_t __s0_868 = __p0_868; \ + uint16x4_t __s1_868 = __p1_868; \ + uint16x4_t __s2_868 = __p2_868; \ + uint32x4_t __rev0_868; __rev0_868 = __builtin_shufflevector(__s0_868, __s0_868, 3, 2, 1, 0); \ + uint16x4_t __rev1_868; __rev1_868 = __builtin_shufflevector(__s1_868, __s1_868, 3, 2, 1, 0); \ + uint16x4_t __rev2_868; __rev2_868 = __builtin_shufflevector(__s2_868, __s2_868, 3, 2, 1, 0); \ + __ret_868 = __rev0_868 + __noswap_vmull_u16(__rev1_868, __noswap_splat_lane_u16(__rev2_868, __p3_868)); \ + __ret_868 = __builtin_shufflevector(__ret_868, __ret_868, 3, 2, 1, 0); \ + __ret_868; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlal_lane_s32(__p0_857, __p1_857, __p2_857, __p3_857) __extension__ ({ \ - int64x2_t __ret_857; \ - int64x2_t __s0_857 = __p0_857; \ - int32x2_t __s1_857 = __p1_857; \ - int32x2_t __s2_857 = __p2_857; \ - __ret_857 = __s0_857 + vmull_s32(__s1_857, splat_lane_s32(__s2_857, __p3_857)); \ - __ret_857; \ +#define vmlal_lane_s32(__p0_869, __p1_869, __p2_869, __p3_869) __extension__ ({ \ + int64x2_t __ret_869; \ + int64x2_t __s0_869 = __p0_869; \ + int32x2_t __s1_869 = __p1_869; \ + int32x2_t __s2_869 = __p2_869; \ + __ret_869 = __s0_869 + vmull_s32(__s1_869, splat_lane_s32(__s2_869, __p3_869)); \ + __ret_869; \ }) #else -#define vmlal_lane_s32(__p0_858, __p1_858, __p2_858, __p3_858) __extension__ ({ \ - int64x2_t __ret_858; \ - int64x2_t __s0_858 = __p0_858; \ - int32x2_t __s1_858 = __p1_858; \ - int32x2_t __s2_858 = __p2_858; \ - int64x2_t __rev0_858; __rev0_858 = __builtin_shufflevector(__s0_858, __s0_858, 1, 0); \ - int32x2_t __rev1_858; __rev1_858 = __builtin_shufflevector(__s1_858, __s1_858, 1, 0); \ - int32x2_t __rev2_858; __rev2_858 = __builtin_shufflevector(__s2_858, __s2_858, 1, 0); \ - __ret_858 = __rev0_858 + __noswap_vmull_s32(__rev1_858, __noswap_splat_lane_s32(__rev2_858, __p3_858)); \ - __ret_858 = __builtin_shufflevector(__ret_858, __ret_858, 1, 0); \ - __ret_858; \ +#define vmlal_lane_s32(__p0_870, __p1_870, __p2_870, __p3_870) __extension__ ({ \ + int64x2_t __ret_870; \ + int64x2_t __s0_870 = __p0_870; \ + int32x2_t __s1_870 = __p1_870; \ + int32x2_t __s2_870 = __p2_870; \ + int64x2_t __rev0_870; __rev0_870 = __builtin_shufflevector(__s0_870, __s0_870, 1, 0); \ + int32x2_t __rev1_870; __rev1_870 = __builtin_shufflevector(__s1_870, __s1_870, 1, 0); \ + int32x2_t __rev2_870; __rev2_870 = __builtin_shufflevector(__s2_870, __s2_870, 1, 0); \ + __ret_870 = __rev0_870 + __noswap_vmull_s32(__rev1_870, __noswap_splat_lane_s32(__rev2_870, __p3_870)); \ + __ret_870 = __builtin_shufflevector(__ret_870, __ret_870, 1, 0); \ + __ret_870; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlal_lane_s16(__p0_859, __p1_859, __p2_859, __p3_859) __extension__ ({ \ - int32x4_t __ret_859; \ - int32x4_t __s0_859 = __p0_859; \ - int16x4_t __s1_859 = __p1_859; \ - int16x4_t __s2_859 = __p2_859; \ - __ret_859 = __s0_859 + vmull_s16(__s1_859, splat_lane_s16(__s2_859, __p3_859)); \ - __ret_859; \ +#define vmlal_lane_s16(__p0_871, __p1_871, __p2_871, __p3_871) __extension__ ({ \ + int32x4_t __ret_871; \ + int32x4_t __s0_871 = __p0_871; \ + int16x4_t __s1_871 = __p1_871; \ + int16x4_t __s2_871 = __p2_871; \ + __ret_871 = __s0_871 + vmull_s16(__s1_871, splat_lane_s16(__s2_871, __p3_871)); \ + __ret_871; \ }) #else -#define vmlal_lane_s16(__p0_860, __p1_860, __p2_860, __p3_860) __extension__ ({ \ - int32x4_t __ret_860; \ - int32x4_t __s0_860 = __p0_860; \ - int16x4_t __s1_860 = __p1_860; \ - int16x4_t __s2_860 = __p2_860; \ - int32x4_t __rev0_860; __rev0_860 = __builtin_shufflevector(__s0_860, __s0_860, 3, 2, 1, 0); \ - int16x4_t __rev1_860; __rev1_860 = __builtin_shufflevector(__s1_860, __s1_860, 3, 2, 1, 0); \ - int16x4_t __rev2_860; __rev2_860 = __builtin_shufflevector(__s2_860, __s2_860, 3, 2, 1, 0); \ - __ret_860 = __rev0_860 + __noswap_vmull_s16(__rev1_860, __noswap_splat_lane_s16(__rev2_860, __p3_860)); \ - __ret_860 = __builtin_shufflevector(__ret_860, __ret_860, 3, 2, 1, 0); \ - __ret_860; \ +#define vmlal_lane_s16(__p0_872, __p1_872, __p2_872, __p3_872) __extension__ ({ \ + int32x4_t __ret_872; \ + int32x4_t __s0_872 = __p0_872; \ + int16x4_t __s1_872 = __p1_872; \ + int16x4_t __s2_872 = __p2_872; \ + int32x4_t __rev0_872; __rev0_872 = __builtin_shufflevector(__s0_872, __s0_872, 3, 2, 1, 0); \ + int16x4_t __rev1_872; __rev1_872 = __builtin_shufflevector(__s1_872, __s1_872, 3, 2, 1, 0); \ + int16x4_t __rev2_872; __rev2_872 = __builtin_shufflevector(__s2_872, __s2_872, 3, 2, 1, 0); \ + __ret_872 = __rev0_872 + __noswap_vmull_s16(__rev1_872, __noswap_splat_lane_s16(__rev2_872, __p3_872)); \ + __ret_872 = __builtin_shufflevector(__ret_872, __ret_872, 3, 2, 1, 0); \ + __ret_872; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { uint64x2_t __ret; __ret = __p0 + vmull_u32(__p1, (uint32x2_t) {__p2, __p2}); return __ret; } #else -__ai uint64x2_t vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -67509,7 +67669,7 @@ __ai uint64x2_t vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai uint64x2_t __noswap_vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t __noswap_vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { uint64x2_t __ret; __ret = __p0 + __noswap_vmull_u32(__p1, (uint32x2_t) {__p2, __p2}); return __ret; @@ -67517,13 +67677,13 @@ __ai uint64x2_t __noswap_vmlal_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { uint32x4_t __ret; __ret = __p0 + vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2}); return __ret; } #else -__ai uint32x4_t vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -67531,7 +67691,7 @@ __ai uint32x4_t vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai uint32x4_t __noswap_vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t __noswap_vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { uint32x4_t __ret; __ret = __p0 + __noswap_vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2}); return __ret; @@ -67539,13 +67699,13 @@ __ai uint32x4_t __noswap_vmlal_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { int64x2_t __ret; __ret = __p0 + vmull_s32(__p1, (int32x2_t) {__p2, __p2}); return __ret; } #else -__ai int64x2_t vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -67553,7 +67713,7 @@ __ai int64x2_t vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai int64x2_t __noswap_vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t __noswap_vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { int64x2_t __ret; __ret = __p0 + __noswap_vmull_s32(__p1, (int32x2_t) {__p2, __p2}); return __ret; @@ -67561,13 +67721,13 @@ __ai int64x2_t __noswap_vmlal_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2 #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { int32x4_t __ret; __ret = __p0 + vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2}); return __ret; } #else -__ai int32x4_t vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -67575,7 +67735,7 @@ __ai int32x4_t vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int32x4_t __noswap_vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t __noswap_vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { int32x4_t __ret; __ret = __p0 + __noswap_vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2}); return __ret; @@ -67583,13 +67743,13 @@ __ai int32x4_t __noswap_vmlal_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2 #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint16x8_t __ret; __ret = __p0 - vmull_u8(__p1, __p2); return __ret; } #else -__ai uint16x8_t vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -67598,7 +67758,7 @@ __ai uint16x8_t vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai uint16x8_t __noswap_vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t __noswap_vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint16x8_t __ret; __ret = __p0 - __noswap_vmull_u8(__p1, __p2); return __ret; @@ -67606,13 +67766,13 @@ __ai uint16x8_t __noswap_vmlsl_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint64x2_t __ret; __ret = __p0 - vmull_u32(__p1, __p2); return __ret; } #else -__ai uint64x2_t vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -67621,7 +67781,7 @@ __ai uint64x2_t vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai uint64x2_t __noswap_vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t __noswap_vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint64x2_t __ret; __ret = __p0 - __noswap_vmull_u32(__p1, __p2); return __ret; @@ -67629,13 +67789,13 @@ __ai uint64x2_t __noswap_vmlsl_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint32x4_t __ret; __ret = __p0 - vmull_u16(__p1, __p2); return __ret; } #else -__ai uint32x4_t vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -67644,7 +67804,7 @@ __ai uint32x4_t vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai uint32x4_t __noswap_vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t __noswap_vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint32x4_t __ret; __ret = __p0 - __noswap_vmull_u16(__p1, __p2); return __ret; @@ -67652,13 +67812,13 @@ __ai uint32x4_t __noswap_vmlsl_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int16x8_t __ret; __ret = __p0 - vmull_s8(__p1, __p2); return __ret; } #else -__ai int16x8_t vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -67667,7 +67827,7 @@ __ai int16x8_t vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai int16x8_t __noswap_vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t __noswap_vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int16x8_t __ret; __ret = __p0 - __noswap_vmull_s8(__p1, __p2); return __ret; @@ -67675,13 +67835,13 @@ __ai int16x8_t __noswap_vmlsl_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int64x2_t __ret; __ret = __p0 - vmull_s32(__p1, __p2); return __ret; } #else -__ai int64x2_t vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -67690,7 +67850,7 @@ __ai int64x2_t vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai int64x2_t __noswap_vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t __noswap_vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int64x2_t __ret; __ret = __p0 - __noswap_vmull_s32(__p1, __p2); return __ret; @@ -67698,13 +67858,13 @@ __ai int64x2_t __noswap_vmlsl_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2 #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int32x4_t __ret; __ret = __p0 - vmull_s16(__p1, __p2); return __ret; } #else -__ai int32x4_t vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -67713,7 +67873,7 @@ __ai int32x4_t vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int32x4_t __noswap_vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t __noswap_vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int32x4_t __ret; __ret = __p0 - __noswap_vmull_s16(__p1, __p2); return __ret; @@ -67721,109 +67881,109 @@ __ai int32x4_t __noswap_vmlsl_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2 #endif #ifdef __LITTLE_ENDIAN__ -#define vmlsl_lane_u32(__p0_861, __p1_861, __p2_861, __p3_861) __extension__ ({ \ - uint64x2_t __ret_861; \ - uint64x2_t __s0_861 = __p0_861; \ - uint32x2_t __s1_861 = __p1_861; \ - uint32x2_t __s2_861 = __p2_861; \ - __ret_861 = __s0_861 - vmull_u32(__s1_861, splat_lane_u32(__s2_861, __p3_861)); \ - __ret_861; \ +#define vmlsl_lane_u32(__p0_873, __p1_873, __p2_873, __p3_873) __extension__ ({ \ + uint64x2_t __ret_873; \ + uint64x2_t __s0_873 = __p0_873; \ + uint32x2_t __s1_873 = __p1_873; \ + uint32x2_t __s2_873 = __p2_873; \ + __ret_873 = __s0_873 - vmull_u32(__s1_873, splat_lane_u32(__s2_873, __p3_873)); \ + __ret_873; \ }) #else -#define vmlsl_lane_u32(__p0_862, __p1_862, __p2_862, __p3_862) __extension__ ({ \ - uint64x2_t __ret_862; \ - uint64x2_t __s0_862 = __p0_862; \ - uint32x2_t __s1_862 = __p1_862; \ - uint32x2_t __s2_862 = __p2_862; \ - uint64x2_t __rev0_862; __rev0_862 = __builtin_shufflevector(__s0_862, __s0_862, 1, 0); \ - uint32x2_t __rev1_862; __rev1_862 = __builtin_shufflevector(__s1_862, __s1_862, 1, 0); \ - uint32x2_t __rev2_862; __rev2_862 = __builtin_shufflevector(__s2_862, __s2_862, 1, 0); \ - __ret_862 = __rev0_862 - __noswap_vmull_u32(__rev1_862, __noswap_splat_lane_u32(__rev2_862, __p3_862)); \ - __ret_862 = __builtin_shufflevector(__ret_862, __ret_862, 1, 0); \ - __ret_862; \ +#define vmlsl_lane_u32(__p0_874, __p1_874, __p2_874, __p3_874) __extension__ ({ \ + uint64x2_t __ret_874; \ + uint64x2_t __s0_874 = __p0_874; \ + uint32x2_t __s1_874 = __p1_874; \ + uint32x2_t __s2_874 = __p2_874; \ + uint64x2_t __rev0_874; __rev0_874 = __builtin_shufflevector(__s0_874, __s0_874, 1, 0); \ + uint32x2_t __rev1_874; __rev1_874 = __builtin_shufflevector(__s1_874, __s1_874, 1, 0); \ + uint32x2_t __rev2_874; __rev2_874 = __builtin_shufflevector(__s2_874, __s2_874, 1, 0); \ + __ret_874 = __rev0_874 - __noswap_vmull_u32(__rev1_874, __noswap_splat_lane_u32(__rev2_874, __p3_874)); \ + __ret_874 = __builtin_shufflevector(__ret_874, __ret_874, 1, 0); \ + __ret_874; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlsl_lane_u16(__p0_863, __p1_863, __p2_863, __p3_863) __extension__ ({ \ - uint32x4_t __ret_863; \ - uint32x4_t __s0_863 = __p0_863; \ - uint16x4_t __s1_863 = __p1_863; \ - uint16x4_t __s2_863 = __p2_863; \ - __ret_863 = __s0_863 - vmull_u16(__s1_863, splat_lane_u16(__s2_863, __p3_863)); \ - __ret_863; \ +#define vmlsl_lane_u16(__p0_875, __p1_875, __p2_875, __p3_875) __extension__ ({ \ + uint32x4_t __ret_875; \ + uint32x4_t __s0_875 = __p0_875; \ + uint16x4_t __s1_875 = __p1_875; \ + uint16x4_t __s2_875 = __p2_875; \ + __ret_875 = __s0_875 - vmull_u16(__s1_875, splat_lane_u16(__s2_875, __p3_875)); \ + __ret_875; \ }) #else -#define vmlsl_lane_u16(__p0_864, __p1_864, __p2_864, __p3_864) __extension__ ({ \ - uint32x4_t __ret_864; \ - uint32x4_t __s0_864 = __p0_864; \ - uint16x4_t __s1_864 = __p1_864; \ - uint16x4_t __s2_864 = __p2_864; \ - uint32x4_t __rev0_864; __rev0_864 = __builtin_shufflevector(__s0_864, __s0_864, 3, 2, 1, 0); \ - uint16x4_t __rev1_864; __rev1_864 = __builtin_shufflevector(__s1_864, __s1_864, 3, 2, 1, 0); \ - uint16x4_t __rev2_864; __rev2_864 = __builtin_shufflevector(__s2_864, __s2_864, 3, 2, 1, 0); \ - __ret_864 = __rev0_864 - __noswap_vmull_u16(__rev1_864, __noswap_splat_lane_u16(__rev2_864, __p3_864)); \ - __ret_864 = __builtin_shufflevector(__ret_864, __ret_864, 3, 2, 1, 0); \ - __ret_864; \ +#define vmlsl_lane_u16(__p0_876, __p1_876, __p2_876, __p3_876) __extension__ ({ \ + uint32x4_t __ret_876; \ + uint32x4_t __s0_876 = __p0_876; \ + uint16x4_t __s1_876 = __p1_876; \ + uint16x4_t __s2_876 = __p2_876; \ + uint32x4_t __rev0_876; __rev0_876 = __builtin_shufflevector(__s0_876, __s0_876, 3, 2, 1, 0); \ + uint16x4_t __rev1_876; __rev1_876 = __builtin_shufflevector(__s1_876, __s1_876, 3, 2, 1, 0); \ + uint16x4_t __rev2_876; __rev2_876 = __builtin_shufflevector(__s2_876, __s2_876, 3, 2, 1, 0); \ + __ret_876 = __rev0_876 - __noswap_vmull_u16(__rev1_876, __noswap_splat_lane_u16(__rev2_876, __p3_876)); \ + __ret_876 = __builtin_shufflevector(__ret_876, __ret_876, 3, 2, 1, 0); \ + __ret_876; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlsl_lane_s32(__p0_865, __p1_865, __p2_865, __p3_865) __extension__ ({ \ - int64x2_t __ret_865; \ - int64x2_t __s0_865 = __p0_865; \ - int32x2_t __s1_865 = __p1_865; \ - int32x2_t __s2_865 = __p2_865; \ - __ret_865 = __s0_865 - vmull_s32(__s1_865, splat_lane_s32(__s2_865, __p3_865)); \ - __ret_865; \ +#define vmlsl_lane_s32(__p0_877, __p1_877, __p2_877, __p3_877) __extension__ ({ \ + int64x2_t __ret_877; \ + int64x2_t __s0_877 = __p0_877; \ + int32x2_t __s1_877 = __p1_877; \ + int32x2_t __s2_877 = __p2_877; \ + __ret_877 = __s0_877 - vmull_s32(__s1_877, splat_lane_s32(__s2_877, __p3_877)); \ + __ret_877; \ }) #else -#define vmlsl_lane_s32(__p0_866, __p1_866, __p2_866, __p3_866) __extension__ ({ \ - int64x2_t __ret_866; \ - int64x2_t __s0_866 = __p0_866; \ - int32x2_t __s1_866 = __p1_866; \ - int32x2_t __s2_866 = __p2_866; \ - int64x2_t __rev0_866; __rev0_866 = __builtin_shufflevector(__s0_866, __s0_866, 1, 0); \ - int32x2_t __rev1_866; __rev1_866 = __builtin_shufflevector(__s1_866, __s1_866, 1, 0); \ - int32x2_t __rev2_866; __rev2_866 = __builtin_shufflevector(__s2_866, __s2_866, 1, 0); \ - __ret_866 = __rev0_866 - __noswap_vmull_s32(__rev1_866, __noswap_splat_lane_s32(__rev2_866, __p3_866)); \ - __ret_866 = __builtin_shufflevector(__ret_866, __ret_866, 1, 0); \ - __ret_866; \ +#define vmlsl_lane_s32(__p0_878, __p1_878, __p2_878, __p3_878) __extension__ ({ \ + int64x2_t __ret_878; \ + int64x2_t __s0_878 = __p0_878; \ + int32x2_t __s1_878 = __p1_878; \ + int32x2_t __s2_878 = __p2_878; \ + int64x2_t __rev0_878; __rev0_878 = __builtin_shufflevector(__s0_878, __s0_878, 1, 0); \ + int32x2_t __rev1_878; __rev1_878 = __builtin_shufflevector(__s1_878, __s1_878, 1, 0); \ + int32x2_t __rev2_878; __rev2_878 = __builtin_shufflevector(__s2_878, __s2_878, 1, 0); \ + __ret_878 = __rev0_878 - __noswap_vmull_s32(__rev1_878, __noswap_splat_lane_s32(__rev2_878, __p3_878)); \ + __ret_878 = __builtin_shufflevector(__ret_878, __ret_878, 1, 0); \ + __ret_878; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vmlsl_lane_s16(__p0_867, __p1_867, __p2_867, __p3_867) __extension__ ({ \ - int32x4_t __ret_867; \ - int32x4_t __s0_867 = __p0_867; \ - int16x4_t __s1_867 = __p1_867; \ - int16x4_t __s2_867 = __p2_867; \ - __ret_867 = __s0_867 - vmull_s16(__s1_867, splat_lane_s16(__s2_867, __p3_867)); \ - __ret_867; \ +#define vmlsl_lane_s16(__p0_879, __p1_879, __p2_879, __p3_879) __extension__ ({ \ + int32x4_t __ret_879; \ + int32x4_t __s0_879 = __p0_879; \ + int16x4_t __s1_879 = __p1_879; \ + int16x4_t __s2_879 = __p2_879; \ + __ret_879 = __s0_879 - vmull_s16(__s1_879, splat_lane_s16(__s2_879, __p3_879)); \ + __ret_879; \ }) #else -#define vmlsl_lane_s16(__p0_868, __p1_868, __p2_868, __p3_868) __extension__ ({ \ - int32x4_t __ret_868; \ - int32x4_t __s0_868 = __p0_868; \ - int16x4_t __s1_868 = __p1_868; \ - int16x4_t __s2_868 = __p2_868; \ - int32x4_t __rev0_868; __rev0_868 = __builtin_shufflevector(__s0_868, __s0_868, 3, 2, 1, 0); \ - int16x4_t __rev1_868; __rev1_868 = __builtin_shufflevector(__s1_868, __s1_868, 3, 2, 1, 0); \ - int16x4_t __rev2_868; __rev2_868 = __builtin_shufflevector(__s2_868, __s2_868, 3, 2, 1, 0); \ - __ret_868 = __rev0_868 - __noswap_vmull_s16(__rev1_868, __noswap_splat_lane_s16(__rev2_868, __p3_868)); \ - __ret_868 = __builtin_shufflevector(__ret_868, __ret_868, 3, 2, 1, 0); \ - __ret_868; \ +#define vmlsl_lane_s16(__p0_880, __p1_880, __p2_880, __p3_880) __extension__ ({ \ + int32x4_t __ret_880; \ + int32x4_t __s0_880 = __p0_880; \ + int16x4_t __s1_880 = __p1_880; \ + int16x4_t __s2_880 = __p2_880; \ + int32x4_t __rev0_880; __rev0_880 = __builtin_shufflevector(__s0_880, __s0_880, 3, 2, 1, 0); \ + int16x4_t __rev1_880; __rev1_880 = __builtin_shufflevector(__s1_880, __s1_880, 3, 2, 1, 0); \ + int16x4_t __rev2_880; __rev2_880 = __builtin_shufflevector(__s2_880, __s2_880, 3, 2, 1, 0); \ + __ret_880 = __rev0_880 - __noswap_vmull_s16(__rev1_880, __noswap_splat_lane_s16(__rev2_880, __p3_880)); \ + __ret_880 = __builtin_shufflevector(__ret_880, __ret_880, 3, 2, 1, 0); \ + __ret_880; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { uint64x2_t __ret; __ret = __p0 - vmull_u32(__p1, (uint32x2_t) {__p2, __p2}); return __ret; } #else -__ai uint64x2_t vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -67831,7 +67991,7 @@ __ai uint64x2_t vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai uint64x2_t __noswap_vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t __noswap_vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t __p2) { uint64x2_t __ret; __ret = __p0 - __noswap_vmull_u32(__p1, (uint32x2_t) {__p2, __p2}); return __ret; @@ -67839,13 +67999,13 @@ __ai uint64x2_t __noswap_vmlsl_n_u32(uint64x2_t __p0, uint32x2_t __p1, uint32_t #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { uint32x4_t __ret; __ret = __p0 - vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2}); return __ret; } #else -__ai uint32x4_t vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -67853,7 +68013,7 @@ __ai uint32x4_t vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai uint32x4_t __noswap_vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t __noswap_vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t __p2) { uint32x4_t __ret; __ret = __p0 - __noswap_vmull_u16(__p1, (uint16x4_t) {__p2, __p2, __p2, __p2}); return __ret; @@ -67861,13 +68021,13 @@ __ai uint32x4_t __noswap_vmlsl_n_u16(uint32x4_t __p0, uint16x4_t __p1, uint16_t #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { int64x2_t __ret; __ret = __p0 - vmull_s32(__p1, (int32x2_t) {__p2, __p2}); return __ret; } #else -__ai int64x2_t vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -67875,7 +68035,7 @@ __ai int64x2_t vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai int64x2_t __noswap_vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t __noswap_vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2) { int64x2_t __ret; __ret = __p0 - __noswap_vmull_s32(__p1, (int32x2_t) {__p2, __p2}); return __ret; @@ -67883,13 +68043,13 @@ __ai int64x2_t __noswap_vmlsl_n_s32(int64x2_t __p0, int32x2_t __p1, int32_t __p2 #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { int32x4_t __ret; __ret = __p0 - vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2}); return __ret; } #else -__ai int32x4_t vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -67897,7 +68057,7 @@ __ai int32x4_t vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int32x4_t __noswap_vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t __noswap_vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2) { int32x4_t __ret; __ret = __p0 - __noswap_vmull_s16(__p1, (int16x4_t) {__p2, __p2, __p2, __p2}); return __ret; @@ -67905,246 +68065,504 @@ __ai int32x4_t __noswap_vmlsl_n_s16(int32x4_t __p0, int16x4_t __p1, int16_t __p2 #endif #ifdef __LITTLE_ENDIAN__ -#define vset_lane_f16(__p0_869, __p1_869, __p2_869) __extension__ ({ \ - float16x4_t __ret_869; \ - float16_t __s0_869 = __p0_869; \ - float16x4_t __s1_869 = __p1_869; \ -float16_t __reint_869 = __s0_869; \ -float16x4_t __reint1_869 = __s1_869; \ -int16x4_t __reint2_869 = vset_lane_s16(*(int16_t *) &__reint_869, *(int16x4_t *) &__reint1_869, __p2_869); \ - __ret_869 = *(float16x4_t *) &__reint2_869; \ - __ret_869; \ +#define vset_lane_f16(__p0_881, __p1_881, __p2_881) __extension__ ({ \ + float16x4_t __ret_881; \ + float16_t __s0_881 = __p0_881; \ + float16x4_t __s1_881 = __p1_881; \ +float16_t __reint_881 = __s0_881; \ +float16x4_t __reint1_881 = __s1_881; \ +int16x4_t __reint2_881 = vset_lane_s16(*(int16_t *) &__reint_881, *(int16x4_t *) &__reint1_881, __p2_881); \ + __ret_881 = *(float16x4_t *) &__reint2_881; \ + __ret_881; \ }) #else -#define vset_lane_f16(__p0_870, __p1_870, __p2_870) __extension__ ({ \ - float16x4_t __ret_870; \ - float16_t __s0_870 = __p0_870; \ - float16x4_t __s1_870 = __p1_870; \ - float16x4_t __rev1_870; __rev1_870 = __builtin_shufflevector(__s1_870, __s1_870, 3, 2, 1, 0); \ -float16_t __reint_870 = __s0_870; \ -float16x4_t __reint1_870 = __rev1_870; \ -int16x4_t __reint2_870 = __noswap_vset_lane_s16(*(int16_t *) &__reint_870, *(int16x4_t *) &__reint1_870, __p2_870); \ - __ret_870 = *(float16x4_t *) &__reint2_870; \ - __ret_870 = __builtin_shufflevector(__ret_870, __ret_870, 3, 2, 1, 0); \ - __ret_870; \ +#define vset_lane_f16(__p0_882, __p1_882, __p2_882) __extension__ ({ \ + float16x4_t __ret_882; \ + float16_t __s0_882 = __p0_882; \ + float16x4_t __s1_882 = __p1_882; \ + float16x4_t __rev1_882; __rev1_882 = __builtin_shufflevector(__s1_882, __s1_882, 3, 2, 1, 0); \ +float16_t __reint_882 = __s0_882; \ +float16x4_t __reint1_882 = __rev1_882; \ +int16x4_t __reint2_882 = __noswap_vset_lane_s16(*(int16_t *) &__reint_882, *(int16x4_t *) &__reint1_882, __p2_882); \ + __ret_882 = *(float16x4_t *) &__reint2_882; \ + __ret_882 = __builtin_shufflevector(__ret_882, __ret_882, 3, 2, 1, 0); \ + __ret_882; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vsetq_lane_f16(__p0_871, __p1_871, __p2_871) __extension__ ({ \ - float16x8_t __ret_871; \ - float16_t __s0_871 = __p0_871; \ - float16x8_t __s1_871 = __p1_871; \ -float16_t __reint_871 = __s0_871; \ -float16x8_t __reint1_871 = __s1_871; \ -int16x8_t __reint2_871 = vsetq_lane_s16(*(int16_t *) &__reint_871, *(int16x8_t *) &__reint1_871, __p2_871); \ - __ret_871 = *(float16x8_t *) &__reint2_871; \ - __ret_871; \ +#define vsetq_lane_f16(__p0_883, __p1_883, __p2_883) __extension__ ({ \ + float16x8_t __ret_883; \ + float16_t __s0_883 = __p0_883; \ + float16x8_t __s1_883 = __p1_883; \ +float16_t __reint_883 = __s0_883; \ +float16x8_t __reint1_883 = __s1_883; \ +int16x8_t __reint2_883 = vsetq_lane_s16(*(int16_t *) &__reint_883, *(int16x8_t *) &__reint1_883, __p2_883); \ + __ret_883 = *(float16x8_t *) &__reint2_883; \ + __ret_883; \ }) #else -#define vsetq_lane_f16(__p0_872, __p1_872, __p2_872) __extension__ ({ \ - float16x8_t __ret_872; \ - float16_t __s0_872 = __p0_872; \ - float16x8_t __s1_872 = __p1_872; \ - float16x8_t __rev1_872; __rev1_872 = __builtin_shufflevector(__s1_872, __s1_872, 7, 6, 5, 4, 3, 2, 1, 0); \ -float16_t __reint_872 = __s0_872; \ -float16x8_t __reint1_872 = __rev1_872; \ -int16x8_t __reint2_872 = __noswap_vsetq_lane_s16(*(int16_t *) &__reint_872, *(int16x8_t *) &__reint1_872, __p2_872); \ - __ret_872 = *(float16x8_t *) &__reint2_872; \ - __ret_872 = __builtin_shufflevector(__ret_872, __ret_872, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_872; \ +#define vsetq_lane_f16(__p0_884, __p1_884, __p2_884) __extension__ ({ \ + float16x8_t __ret_884; \ + float16_t __s0_884 = __p0_884; \ + float16x8_t __s1_884 = __p1_884; \ + float16x8_t __rev1_884; __rev1_884 = __builtin_shufflevector(__s1_884, __s1_884, 7, 6, 5, 4, 3, 2, 1, 0); \ +float16_t __reint_884 = __s0_884; \ +float16x8_t __reint1_884 = __rev1_884; \ +int16x8_t __reint2_884 = __noswap_vsetq_lane_s16(*(int16_t *) &__reint_884, *(int16x8_t *) &__reint1_884, __p2_884); \ + __ret_884 = *(float16x8_t *) &__reint2_884; \ + __ret_884 = __builtin_shufflevector(__ret_884, __ret_884, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_884; \ }) #endif +#if defined(__aarch64__) || defined(__arm64ec__) #ifdef __LITTLE_ENDIAN__ -#define vbfmlalbq_lane_f32(__p0_873, __p1_873, __p2_873, __p3_873) __extension__ ({ \ - float32x4_t __ret_873; \ - float32x4_t __s0_873 = __p0_873; \ - bfloat16x8_t __s1_873 = __p1_873; \ - bfloat16x4_t __s2_873 = __p2_873; \ - __ret_873 = vbfmlalbq_f32(__s0_873, __s1_873, (bfloat16x8_t) {vget_lane_bf16(__s2_873, __p3_873), vget_lane_bf16(__s2_873, __p3_873), vget_lane_bf16(__s2_873, __p3_873), vget_lane_bf16(__s2_873, __p3_873), vget_lane_bf16(__s2_873, __p3_873), vget_lane_bf16(__s2_873, __p3_873), vget_lane_bf16(__s2_873, __p3_873), vget_lane_bf16(__s2_873, __p3_873)}); \ - __ret_873; \ +__ai __attribute__((target("aes,neon"))) poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly128_t __ret; + __ret = vmull_p64((poly64_t)(vget_high_p64(__p0)), (poly64_t)(vget_high_p64(__p1))); + return __ret; +} +#else +__ai __attribute__((target("aes,neon"))) poly128_t vmull_high_p64(poly64x2_t __p0, poly64x2_t __p1) { + poly128_t __ret; + poly64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); + poly64x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); + __ret = vmull_p64((poly64_t)(__noswap_vget_high_p64(__rev0)), (poly64_t)(__noswap_vget_high_p64(__rev1))); + return __ret; +} +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlalq_lane_high_f16(__p0_885, __p1_885, __p2_885, __p3_885) __extension__ ({ \ + float32x4_t __ret_885; \ + float32x4_t __s0_885 = __p0_885; \ + float16x8_t __s1_885 = __p1_885; \ + float16x4_t __s2_885 = __p2_885; \ + __ret_885 = vfmlalq_high_f16(__s0_885, __s1_885, (float16x8_t) {vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885), vget_lane_f16(__s2_885, __p3_885)}); \ + __ret_885; \ }) #else -#define vbfmlalbq_lane_f32(__p0_874, __p1_874, __p2_874, __p3_874) __extension__ ({ \ - float32x4_t __ret_874; \ - float32x4_t __s0_874 = __p0_874; \ - bfloat16x8_t __s1_874 = __p1_874; \ - bfloat16x4_t __s2_874 = __p2_874; \ - float32x4_t __rev0_874; __rev0_874 = __builtin_shufflevector(__s0_874, __s0_874, 3, 2, 1, 0); \ - bfloat16x8_t __rev1_874; __rev1_874 = __builtin_shufflevector(__s1_874, __s1_874, 7, 6, 5, 4, 3, 2, 1, 0); \ - bfloat16x4_t __rev2_874; __rev2_874 = __builtin_shufflevector(__s2_874, __s2_874, 3, 2, 1, 0); \ - __ret_874 = __noswap_vbfmlalbq_f32(__rev0_874, __rev1_874, (bfloat16x8_t) {__noswap_vget_lane_bf16(__rev2_874, __p3_874), __noswap_vget_lane_bf16(__rev2_874, __p3_874), __noswap_vget_lane_bf16(__rev2_874, __p3_874), __noswap_vget_lane_bf16(__rev2_874, __p3_874), __noswap_vget_lane_bf16(__rev2_874, __p3_874), __noswap_vget_lane_bf16(__rev2_874, __p3_874), __noswap_vget_lane_bf16(__rev2_874, __p3_874), __noswap_vget_lane_bf16(__rev2_874, __p3_874)}); \ - __ret_874 = __builtin_shufflevector(__ret_874, __ret_874, 3, 2, 1, 0); \ - __ret_874; \ +#define vfmlalq_lane_high_f16(__p0_886, __p1_886, __p2_886, __p3_886) __extension__ ({ \ + float32x4_t __ret_886; \ + float32x4_t __s0_886 = __p0_886; \ + float16x8_t __s1_886 = __p1_886; \ + float16x4_t __s2_886 = __p2_886; \ + float32x4_t __rev0_886; __rev0_886 = __builtin_shufflevector(__s0_886, __s0_886, 3, 2, 1, 0); \ + float16x8_t __rev1_886; __rev1_886 = __builtin_shufflevector(__s1_886, __s1_886, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_886; __rev2_886 = __builtin_shufflevector(__s2_886, __s2_886, 3, 2, 1, 0); \ + __ret_886 = __noswap_vfmlalq_high_f16(__rev0_886, __rev1_886, (float16x8_t) {__noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886), __noswap_vget_lane_f16(__rev2_886, __p3_886)}); \ + __ret_886 = __builtin_shufflevector(__ret_886, __ret_886, 3, 2, 1, 0); \ + __ret_886; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vbfmlalbq_laneq_f32(__p0_875, __p1_875, __p2_875, __p3_875) __extension__ ({ \ - float32x4_t __ret_875; \ - float32x4_t __s0_875 = __p0_875; \ - bfloat16x8_t __s1_875 = __p1_875; \ - bfloat16x8_t __s2_875 = __p2_875; \ - __ret_875 = vbfmlalbq_f32(__s0_875, __s1_875, (bfloat16x8_t) {vgetq_lane_bf16(__s2_875, __p3_875), vgetq_lane_bf16(__s2_875, __p3_875), vgetq_lane_bf16(__s2_875, __p3_875), vgetq_lane_bf16(__s2_875, __p3_875), vgetq_lane_bf16(__s2_875, __p3_875), vgetq_lane_bf16(__s2_875, __p3_875), vgetq_lane_bf16(__s2_875, __p3_875), vgetq_lane_bf16(__s2_875, __p3_875)}); \ - __ret_875; \ +#define vfmlal_lane_high_f16(__p0_887, __p1_887, __p2_887, __p3_887) __extension__ ({ \ + float32x2_t __ret_887; \ + float32x2_t __s0_887 = __p0_887; \ + float16x4_t __s1_887 = __p1_887; \ + float16x4_t __s2_887 = __p2_887; \ + __ret_887 = vfmlal_high_f16(__s0_887, __s1_887, (float16x4_t) {vget_lane_f16(__s2_887, __p3_887), vget_lane_f16(__s2_887, __p3_887), vget_lane_f16(__s2_887, __p3_887), vget_lane_f16(__s2_887, __p3_887)}); \ + __ret_887; \ }) #else -#define vbfmlalbq_laneq_f32(__p0_876, __p1_876, __p2_876, __p3_876) __extension__ ({ \ - float32x4_t __ret_876; \ - float32x4_t __s0_876 = __p0_876; \ - bfloat16x8_t __s1_876 = __p1_876; \ - bfloat16x8_t __s2_876 = __p2_876; \ - float32x4_t __rev0_876; __rev0_876 = __builtin_shufflevector(__s0_876, __s0_876, 3, 2, 1, 0); \ - bfloat16x8_t __rev1_876; __rev1_876 = __builtin_shufflevector(__s1_876, __s1_876, 7, 6, 5, 4, 3, 2, 1, 0); \ - bfloat16x8_t __rev2_876; __rev2_876 = __builtin_shufflevector(__s2_876, __s2_876, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_876 = __noswap_vbfmlalbq_f32(__rev0_876, __rev1_876, (bfloat16x8_t) {__noswap_vgetq_lane_bf16(__rev2_876, __p3_876), __noswap_vgetq_lane_bf16(__rev2_876, __p3_876), __noswap_vgetq_lane_bf16(__rev2_876, __p3_876), __noswap_vgetq_lane_bf16(__rev2_876, __p3_876), __noswap_vgetq_lane_bf16(__rev2_876, __p3_876), __noswap_vgetq_lane_bf16(__rev2_876, __p3_876), __noswap_vgetq_lane_bf16(__rev2_876, __p3_876), __noswap_vgetq_lane_bf16(__rev2_876, __p3_876)}); \ - __ret_876 = __builtin_shufflevector(__ret_876, __ret_876, 3, 2, 1, 0); \ - __ret_876; \ +#define vfmlal_lane_high_f16(__p0_888, __p1_888, __p2_888, __p3_888) __extension__ ({ \ + float32x2_t __ret_888; \ + float32x2_t __s0_888 = __p0_888; \ + float16x4_t __s1_888 = __p1_888; \ + float16x4_t __s2_888 = __p2_888; \ + float32x2_t __rev0_888; __rev0_888 = __builtin_shufflevector(__s0_888, __s0_888, 1, 0); \ + float16x4_t __rev1_888; __rev1_888 = __builtin_shufflevector(__s1_888, __s1_888, 3, 2, 1, 0); \ + float16x4_t __rev2_888; __rev2_888 = __builtin_shufflevector(__s2_888, __s2_888, 3, 2, 1, 0); \ + __ret_888 = __noswap_vfmlal_high_f16(__rev0_888, __rev1_888, (float16x4_t) {__noswap_vget_lane_f16(__rev2_888, __p3_888), __noswap_vget_lane_f16(__rev2_888, __p3_888), __noswap_vget_lane_f16(__rev2_888, __p3_888), __noswap_vget_lane_f16(__rev2_888, __p3_888)}); \ + __ret_888 = __builtin_shufflevector(__ret_888, __ret_888, 1, 0); \ + __ret_888; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vbfmlaltq_lane_f32(__p0_877, __p1_877, __p2_877, __p3_877) __extension__ ({ \ - float32x4_t __ret_877; \ - float32x4_t __s0_877 = __p0_877; \ - bfloat16x8_t __s1_877 = __p1_877; \ - bfloat16x4_t __s2_877 = __p2_877; \ - __ret_877 = vbfmlaltq_f32(__s0_877, __s1_877, (bfloat16x8_t) {vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877), vget_lane_bf16(__s2_877, __p3_877)}); \ - __ret_877; \ +#define vfmlalq_lane_low_f16(__p0_889, __p1_889, __p2_889, __p3_889) __extension__ ({ \ + float32x4_t __ret_889; \ + float32x4_t __s0_889 = __p0_889; \ + float16x8_t __s1_889 = __p1_889; \ + float16x4_t __s2_889 = __p2_889; \ + __ret_889 = vfmlalq_low_f16(__s0_889, __s1_889, (float16x8_t) {vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889), vget_lane_f16(__s2_889, __p3_889)}); \ + __ret_889; \ }) #else -#define vbfmlaltq_lane_f32(__p0_878, __p1_878, __p2_878, __p3_878) __extension__ ({ \ - float32x4_t __ret_878; \ - float32x4_t __s0_878 = __p0_878; \ - bfloat16x8_t __s1_878 = __p1_878; \ - bfloat16x4_t __s2_878 = __p2_878; \ - float32x4_t __rev0_878; __rev0_878 = __builtin_shufflevector(__s0_878, __s0_878, 3, 2, 1, 0); \ - bfloat16x8_t __rev1_878; __rev1_878 = __builtin_shufflevector(__s1_878, __s1_878, 7, 6, 5, 4, 3, 2, 1, 0); \ - bfloat16x4_t __rev2_878; __rev2_878 = __builtin_shufflevector(__s2_878, __s2_878, 3, 2, 1, 0); \ - __ret_878 = __noswap_vbfmlaltq_f32(__rev0_878, __rev1_878, (bfloat16x8_t) {__noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878), __noswap_vget_lane_bf16(__rev2_878, __p3_878)}); \ - __ret_878 = __builtin_shufflevector(__ret_878, __ret_878, 3, 2, 1, 0); \ - __ret_878; \ +#define vfmlalq_lane_low_f16(__p0_890, __p1_890, __p2_890, __p3_890) __extension__ ({ \ + float32x4_t __ret_890; \ + float32x4_t __s0_890 = __p0_890; \ + float16x8_t __s1_890 = __p1_890; \ + float16x4_t __s2_890 = __p2_890; \ + float32x4_t __rev0_890; __rev0_890 = __builtin_shufflevector(__s0_890, __s0_890, 3, 2, 1, 0); \ + float16x8_t __rev1_890; __rev1_890 = __builtin_shufflevector(__s1_890, __s1_890, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_890; __rev2_890 = __builtin_shufflevector(__s2_890, __s2_890, 3, 2, 1, 0); \ + __ret_890 = __noswap_vfmlalq_low_f16(__rev0_890, __rev1_890, (float16x8_t) {__noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890), __noswap_vget_lane_f16(__rev2_890, __p3_890)}); \ + __ret_890 = __builtin_shufflevector(__ret_890, __ret_890, 3, 2, 1, 0); \ + __ret_890; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vbfmlaltq_laneq_f32(__p0_879, __p1_879, __p2_879, __p3_879) __extension__ ({ \ - float32x4_t __ret_879; \ - float32x4_t __s0_879 = __p0_879; \ - bfloat16x8_t __s1_879 = __p1_879; \ - bfloat16x8_t __s2_879 = __p2_879; \ - __ret_879 = vbfmlaltq_f32(__s0_879, __s1_879, (bfloat16x8_t) {vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879), vgetq_lane_bf16(__s2_879, __p3_879)}); \ - __ret_879; \ +#define vfmlal_lane_low_f16(__p0_891, __p1_891, __p2_891, __p3_891) __extension__ ({ \ + float32x2_t __ret_891; \ + float32x2_t __s0_891 = __p0_891; \ + float16x4_t __s1_891 = __p1_891; \ + float16x4_t __s2_891 = __p2_891; \ + __ret_891 = vfmlal_low_f16(__s0_891, __s1_891, (float16x4_t) {vget_lane_f16(__s2_891, __p3_891), vget_lane_f16(__s2_891, __p3_891), vget_lane_f16(__s2_891, __p3_891), vget_lane_f16(__s2_891, __p3_891)}); \ + __ret_891; \ }) #else -#define vbfmlaltq_laneq_f32(__p0_880, __p1_880, __p2_880, __p3_880) __extension__ ({ \ - float32x4_t __ret_880; \ - float32x4_t __s0_880 = __p0_880; \ - bfloat16x8_t __s1_880 = __p1_880; \ - bfloat16x8_t __s2_880 = __p2_880; \ - float32x4_t __rev0_880; __rev0_880 = __builtin_shufflevector(__s0_880, __s0_880, 3, 2, 1, 0); \ - bfloat16x8_t __rev1_880; __rev1_880 = __builtin_shufflevector(__s1_880, __s1_880, 7, 6, 5, 4, 3, 2, 1, 0); \ - bfloat16x8_t __rev2_880; __rev2_880 = __builtin_shufflevector(__s2_880, __s2_880, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_880 = __noswap_vbfmlaltq_f32(__rev0_880, __rev1_880, (bfloat16x8_t) {__noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880), __noswap_vgetq_lane_bf16(__rev2_880, __p3_880)}); \ - __ret_880 = __builtin_shufflevector(__ret_880, __ret_880, 3, 2, 1, 0); \ - __ret_880; \ +#define vfmlal_lane_low_f16(__p0_892, __p1_892, __p2_892, __p3_892) __extension__ ({ \ + float32x2_t __ret_892; \ + float32x2_t __s0_892 = __p0_892; \ + float16x4_t __s1_892 = __p1_892; \ + float16x4_t __s2_892 = __p2_892; \ + float32x2_t __rev0_892; __rev0_892 = __builtin_shufflevector(__s0_892, __s0_892, 1, 0); \ + float16x4_t __rev1_892; __rev1_892 = __builtin_shufflevector(__s1_892, __s1_892, 3, 2, 1, 0); \ + float16x4_t __rev2_892; __rev2_892 = __builtin_shufflevector(__s2_892, __s2_892, 3, 2, 1, 0); \ + __ret_892 = __noswap_vfmlal_low_f16(__rev0_892, __rev1_892, (float16x4_t) {__noswap_vget_lane_f16(__rev2_892, __p3_892), __noswap_vget_lane_f16(__rev2_892, __p3_892), __noswap_vget_lane_f16(__rev2_892, __p3_892), __noswap_vget_lane_f16(__rev2_892, __p3_892)}); \ + __ret_892 = __builtin_shufflevector(__ret_892, __ret_892, 1, 0); \ + __ret_892; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("bf16"))) float32x4_t vcvtq_high_f32_bf16(bfloat16x8_t __p0) { - float32x4_t __ret; - __ret = vcvt_f32_bf16(vget_high_bf16(__p0)); - return __ret; -} +#define vfmlalq_laneq_high_f16(__p0_893, __p1_893, __p2_893, __p3_893) __extension__ ({ \ + float32x4_t __ret_893; \ + float32x4_t __s0_893 = __p0_893; \ + float16x8_t __s1_893 = __p1_893; \ + float16x8_t __s2_893 = __p2_893; \ + __ret_893 = vfmlalq_high_f16(__s0_893, __s1_893, (float16x8_t) {vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893), vgetq_lane_f16(__s2_893, __p3_893)}); \ + __ret_893; \ +}) #else -__ai __attribute__((target("bf16"))) float32x4_t vcvtq_high_f32_bf16(bfloat16x8_t __p0) { - float32x4_t __ret; - bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __noswap_vcvt_f32_bf16(__noswap_vget_high_bf16(__rev0)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vfmlalq_laneq_high_f16(__p0_894, __p1_894, __p2_894, __p3_894) __extension__ ({ \ + float32x4_t __ret_894; \ + float32x4_t __s0_894 = __p0_894; \ + float16x8_t __s1_894 = __p1_894; \ + float16x8_t __s2_894 = __p2_894; \ + float32x4_t __rev0_894; __rev0_894 = __builtin_shufflevector(__s0_894, __s0_894, 3, 2, 1, 0); \ + float16x8_t __rev1_894; __rev1_894 = __builtin_shufflevector(__s1_894, __s1_894, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_894; __rev2_894 = __builtin_shufflevector(__s2_894, __s2_894, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_894 = __noswap_vfmlalq_high_f16(__rev0_894, __rev1_894, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894), __noswap_vgetq_lane_f16(__rev2_894, __p3_894)}); \ + __ret_894 = __builtin_shufflevector(__ret_894, __ret_894, 3, 2, 1, 0); \ + __ret_894; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -__ai __attribute__((target("bf16"))) float32x4_t vcvtq_low_f32_bf16(bfloat16x8_t __p0) { - float32x4_t __ret; - __ret = vcvt_f32_bf16(vget_low_bf16(__p0)); - return __ret; -} +#define vfmlal_laneq_high_f16(__p0_895, __p1_895, __p2_895, __p3_895) __extension__ ({ \ + float32x2_t __ret_895; \ + float32x2_t __s0_895 = __p0_895; \ + float16x4_t __s1_895 = __p1_895; \ + float16x8_t __s2_895 = __p2_895; \ + __ret_895 = vfmlal_high_f16(__s0_895, __s1_895, (float16x4_t) {vgetq_lane_f16(__s2_895, __p3_895), vgetq_lane_f16(__s2_895, __p3_895), vgetq_lane_f16(__s2_895, __p3_895), vgetq_lane_f16(__s2_895, __p3_895)}); \ + __ret_895; \ +}) #else -__ai __attribute__((target("bf16"))) float32x4_t vcvtq_low_f32_bf16(bfloat16x8_t __p0) { - float32x4_t __ret; - bfloat16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); - __ret = __noswap_vcvt_f32_bf16(__noswap_vget_low_bf16(__rev0)); - __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); - return __ret; -} +#define vfmlal_laneq_high_f16(__p0_896, __p1_896, __p2_896, __p3_896) __extension__ ({ \ + float32x2_t __ret_896; \ + float32x2_t __s0_896 = __p0_896; \ + float16x4_t __s1_896 = __p1_896; \ + float16x8_t __s2_896 = __p2_896; \ + float32x2_t __rev0_896; __rev0_896 = __builtin_shufflevector(__s0_896, __s0_896, 1, 0); \ + float16x4_t __rev1_896; __rev1_896 = __builtin_shufflevector(__s1_896, __s1_896, 3, 2, 1, 0); \ + float16x8_t __rev2_896; __rev2_896 = __builtin_shufflevector(__s2_896, __s2_896, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_896 = __noswap_vfmlal_high_f16(__rev0_896, __rev1_896, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_896, __p3_896), __noswap_vgetq_lane_f16(__rev2_896, __p3_896), __noswap_vgetq_lane_f16(__rev2_896, __p3_896), __noswap_vgetq_lane_f16(__rev2_896, __p3_896)}); \ + __ret_896 = __builtin_shufflevector(__ret_896, __ret_896, 1, 0); \ + __ret_896; \ +}) #endif #ifdef __LITTLE_ENDIAN__ -#define vsudotq_lane_s32(__p0_881, __p1_881, __p2_881, __p3_881) __extension__ ({ \ - int32x4_t __ret_881; \ - int32x4_t __s0_881 = __p0_881; \ - int8x16_t __s1_881 = __p1_881; \ - uint8x8_t __s2_881 = __p2_881; \ -uint8x8_t __reint_881 = __s2_881; \ - __ret_881 = vusdotq_s32(__s0_881, (uint8x16_t)(splatq_lane_s32(*(int32x2_t *) &__reint_881, __p3_881)), __s1_881); \ - __ret_881; \ +#define vfmlalq_laneq_low_f16(__p0_897, __p1_897, __p2_897, __p3_897) __extension__ ({ \ + float32x4_t __ret_897; \ + float32x4_t __s0_897 = __p0_897; \ + float16x8_t __s1_897 = __p1_897; \ + float16x8_t __s2_897 = __p2_897; \ + __ret_897 = vfmlalq_low_f16(__s0_897, __s1_897, (float16x8_t) {vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897), vgetq_lane_f16(__s2_897, __p3_897)}); \ + __ret_897; \ }) #else -#define vsudotq_lane_s32(__p0_882, __p1_882, __p2_882, __p3_882) __extension__ ({ \ - int32x4_t __ret_882; \ - int32x4_t __s0_882 = __p0_882; \ - int8x16_t __s1_882 = __p1_882; \ - uint8x8_t __s2_882 = __p2_882; \ - int32x4_t __rev0_882; __rev0_882 = __builtin_shufflevector(__s0_882, __s0_882, 3, 2, 1, 0); \ - int8x16_t __rev1_882; __rev1_882 = __builtin_shufflevector(__s1_882, __s1_882, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __rev2_882; __rev2_882 = __builtin_shufflevector(__s2_882, __s2_882, 7, 6, 5, 4, 3, 2, 1, 0); \ -uint8x8_t __reint_882 = __rev2_882; \ - __ret_882 = __noswap_vusdotq_s32(__rev0_882, (uint8x16_t)(__noswap_splatq_lane_s32(*(int32x2_t *) &__reint_882, __p3_882)), __rev1_882); \ - __ret_882 = __builtin_shufflevector(__ret_882, __ret_882, 3, 2, 1, 0); \ - __ret_882; \ +#define vfmlalq_laneq_low_f16(__p0_898, __p1_898, __p2_898, __p3_898) __extension__ ({ \ + float32x4_t __ret_898; \ + float32x4_t __s0_898 = __p0_898; \ + float16x8_t __s1_898 = __p1_898; \ + float16x8_t __s2_898 = __p2_898; \ + float32x4_t __rev0_898; __rev0_898 = __builtin_shufflevector(__s0_898, __s0_898, 3, 2, 1, 0); \ + float16x8_t __rev1_898; __rev1_898 = __builtin_shufflevector(__s1_898, __s1_898, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_898; __rev2_898 = __builtin_shufflevector(__s2_898, __s2_898, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_898 = __noswap_vfmlalq_low_f16(__rev0_898, __rev1_898, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898), __noswap_vgetq_lane_f16(__rev2_898, __p3_898)}); \ + __ret_898 = __builtin_shufflevector(__ret_898, __ret_898, 3, 2, 1, 0); \ + __ret_898; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vsudot_lane_s32(__p0_883, __p1_883, __p2_883, __p3_883) __extension__ ({ \ - int32x2_t __ret_883; \ - int32x2_t __s0_883 = __p0_883; \ - int8x8_t __s1_883 = __p1_883; \ - uint8x8_t __s2_883 = __p2_883; \ -uint8x8_t __reint_883 = __s2_883; \ - __ret_883 = vusdot_s32(__s0_883, (uint8x8_t)(splat_lane_s32(*(int32x2_t *) &__reint_883, __p3_883)), __s1_883); \ - __ret_883; \ +#define vfmlal_laneq_low_f16(__p0_899, __p1_899, __p2_899, __p3_899) __extension__ ({ \ + float32x2_t __ret_899; \ + float32x2_t __s0_899 = __p0_899; \ + float16x4_t __s1_899 = __p1_899; \ + float16x8_t __s2_899 = __p2_899; \ + __ret_899 = vfmlal_low_f16(__s0_899, __s1_899, (float16x4_t) {vgetq_lane_f16(__s2_899, __p3_899), vgetq_lane_f16(__s2_899, __p3_899), vgetq_lane_f16(__s2_899, __p3_899), vgetq_lane_f16(__s2_899, __p3_899)}); \ + __ret_899; \ }) #else -#define vsudot_lane_s32(__p0_884, __p1_884, __p2_884, __p3_884) __extension__ ({ \ - int32x2_t __ret_884; \ - int32x2_t __s0_884 = __p0_884; \ - int8x8_t __s1_884 = __p1_884; \ - uint8x8_t __s2_884 = __p2_884; \ - int32x2_t __rev0_884; __rev0_884 = __builtin_shufflevector(__s0_884, __s0_884, 1, 0); \ - int8x8_t __rev1_884; __rev1_884 = __builtin_shufflevector(__s1_884, __s1_884, 7, 6, 5, 4, 3, 2, 1, 0); \ - uint8x8_t __rev2_884; __rev2_884 = __builtin_shufflevector(__s2_884, __s2_884, 7, 6, 5, 4, 3, 2, 1, 0); \ -uint8x8_t __reint_884 = __rev2_884; \ - __ret_884 = __noswap_vusdot_s32(__rev0_884, (uint8x8_t)(__noswap_splat_lane_s32(*(int32x2_t *) &__reint_884, __p3_884)), __rev1_884); \ - __ret_884 = __builtin_shufflevector(__ret_884, __ret_884, 1, 0); \ - __ret_884; \ +#define vfmlal_laneq_low_f16(__p0_900, __p1_900, __p2_900, __p3_900) __extension__ ({ \ + float32x2_t __ret_900; \ + float32x2_t __s0_900 = __p0_900; \ + float16x4_t __s1_900 = __p1_900; \ + float16x8_t __s2_900 = __p2_900; \ + float32x2_t __rev0_900; __rev0_900 = __builtin_shufflevector(__s0_900, __s0_900, 1, 0); \ + float16x4_t __rev1_900; __rev1_900 = __builtin_shufflevector(__s1_900, __s1_900, 3, 2, 1, 0); \ + float16x8_t __rev2_900; __rev2_900 = __builtin_shufflevector(__s2_900, __s2_900, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_900 = __noswap_vfmlal_low_f16(__rev0_900, __rev1_900, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_900, __p3_900), __noswap_vgetq_lane_f16(__rev2_900, __p3_900), __noswap_vgetq_lane_f16(__rev2_900, __p3_900), __noswap_vgetq_lane_f16(__rev2_900, __p3_900)}); \ + __ret_900 = __builtin_shufflevector(__ret_900, __ret_900, 1, 0); \ + __ret_900; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlslq_lane_high_f16(__p0_901, __p1_901, __p2_901, __p3_901) __extension__ ({ \ + float32x4_t __ret_901; \ + float32x4_t __s0_901 = __p0_901; \ + float16x8_t __s1_901 = __p1_901; \ + float16x4_t __s2_901 = __p2_901; \ + __ret_901 = vfmlslq_high_f16(__s0_901, __s1_901, (float16x8_t) {vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901), vget_lane_f16(__s2_901, __p3_901)}); \ + __ret_901; \ +}) +#else +#define vfmlslq_lane_high_f16(__p0_902, __p1_902, __p2_902, __p3_902) __extension__ ({ \ + float32x4_t __ret_902; \ + float32x4_t __s0_902 = __p0_902; \ + float16x8_t __s1_902 = __p1_902; \ + float16x4_t __s2_902 = __p2_902; \ + float32x4_t __rev0_902; __rev0_902 = __builtin_shufflevector(__s0_902, __s0_902, 3, 2, 1, 0); \ + float16x8_t __rev1_902; __rev1_902 = __builtin_shufflevector(__s1_902, __s1_902, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_902; __rev2_902 = __builtin_shufflevector(__s2_902, __s2_902, 3, 2, 1, 0); \ + __ret_902 = __noswap_vfmlslq_high_f16(__rev0_902, __rev1_902, (float16x8_t) {__noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902), __noswap_vget_lane_f16(__rev2_902, __p3_902)}); \ + __ret_902 = __builtin_shufflevector(__ret_902, __ret_902, 3, 2, 1, 0); \ + __ret_902; \ }) #endif -#if defined(__aarch64__) #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vabdl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { +#define vfmlsl_lane_high_f16(__p0_903, __p1_903, __p2_903, __p3_903) __extension__ ({ \ + float32x2_t __ret_903; \ + float32x2_t __s0_903 = __p0_903; \ + float16x4_t __s1_903 = __p1_903; \ + float16x4_t __s2_903 = __p2_903; \ + __ret_903 = vfmlsl_high_f16(__s0_903, __s1_903, (float16x4_t) {vget_lane_f16(__s2_903, __p3_903), vget_lane_f16(__s2_903, __p3_903), vget_lane_f16(__s2_903, __p3_903), vget_lane_f16(__s2_903, __p3_903)}); \ + __ret_903; \ +}) +#else +#define vfmlsl_lane_high_f16(__p0_904, __p1_904, __p2_904, __p3_904) __extension__ ({ \ + float32x2_t __ret_904; \ + float32x2_t __s0_904 = __p0_904; \ + float16x4_t __s1_904 = __p1_904; \ + float16x4_t __s2_904 = __p2_904; \ + float32x2_t __rev0_904; __rev0_904 = __builtin_shufflevector(__s0_904, __s0_904, 1, 0); \ + float16x4_t __rev1_904; __rev1_904 = __builtin_shufflevector(__s1_904, __s1_904, 3, 2, 1, 0); \ + float16x4_t __rev2_904; __rev2_904 = __builtin_shufflevector(__s2_904, __s2_904, 3, 2, 1, 0); \ + __ret_904 = __noswap_vfmlsl_high_f16(__rev0_904, __rev1_904, (float16x4_t) {__noswap_vget_lane_f16(__rev2_904, __p3_904), __noswap_vget_lane_f16(__rev2_904, __p3_904), __noswap_vget_lane_f16(__rev2_904, __p3_904), __noswap_vget_lane_f16(__rev2_904, __p3_904)}); \ + __ret_904 = __builtin_shufflevector(__ret_904, __ret_904, 1, 0); \ + __ret_904; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlslq_lane_low_f16(__p0_905, __p1_905, __p2_905, __p3_905) __extension__ ({ \ + float32x4_t __ret_905; \ + float32x4_t __s0_905 = __p0_905; \ + float16x8_t __s1_905 = __p1_905; \ + float16x4_t __s2_905 = __p2_905; \ + __ret_905 = vfmlslq_low_f16(__s0_905, __s1_905, (float16x8_t) {vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905), vget_lane_f16(__s2_905, __p3_905)}); \ + __ret_905; \ +}) +#else +#define vfmlslq_lane_low_f16(__p0_906, __p1_906, __p2_906, __p3_906) __extension__ ({ \ + float32x4_t __ret_906; \ + float32x4_t __s0_906 = __p0_906; \ + float16x8_t __s1_906 = __p1_906; \ + float16x4_t __s2_906 = __p2_906; \ + float32x4_t __rev0_906; __rev0_906 = __builtin_shufflevector(__s0_906, __s0_906, 3, 2, 1, 0); \ + float16x8_t __rev1_906; __rev1_906 = __builtin_shufflevector(__s1_906, __s1_906, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x4_t __rev2_906; __rev2_906 = __builtin_shufflevector(__s2_906, __s2_906, 3, 2, 1, 0); \ + __ret_906 = __noswap_vfmlslq_low_f16(__rev0_906, __rev1_906, (float16x8_t) {__noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906), __noswap_vget_lane_f16(__rev2_906, __p3_906)}); \ + __ret_906 = __builtin_shufflevector(__ret_906, __ret_906, 3, 2, 1, 0); \ + __ret_906; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlsl_lane_low_f16(__p0_907, __p1_907, __p2_907, __p3_907) __extension__ ({ \ + float32x2_t __ret_907; \ + float32x2_t __s0_907 = __p0_907; \ + float16x4_t __s1_907 = __p1_907; \ + float16x4_t __s2_907 = __p2_907; \ + __ret_907 = vfmlsl_low_f16(__s0_907, __s1_907, (float16x4_t) {vget_lane_f16(__s2_907, __p3_907), vget_lane_f16(__s2_907, __p3_907), vget_lane_f16(__s2_907, __p3_907), vget_lane_f16(__s2_907, __p3_907)}); \ + __ret_907; \ +}) +#else +#define vfmlsl_lane_low_f16(__p0_908, __p1_908, __p2_908, __p3_908) __extension__ ({ \ + float32x2_t __ret_908; \ + float32x2_t __s0_908 = __p0_908; \ + float16x4_t __s1_908 = __p1_908; \ + float16x4_t __s2_908 = __p2_908; \ + float32x2_t __rev0_908; __rev0_908 = __builtin_shufflevector(__s0_908, __s0_908, 1, 0); \ + float16x4_t __rev1_908; __rev1_908 = __builtin_shufflevector(__s1_908, __s1_908, 3, 2, 1, 0); \ + float16x4_t __rev2_908; __rev2_908 = __builtin_shufflevector(__s2_908, __s2_908, 3, 2, 1, 0); \ + __ret_908 = __noswap_vfmlsl_low_f16(__rev0_908, __rev1_908, (float16x4_t) {__noswap_vget_lane_f16(__rev2_908, __p3_908), __noswap_vget_lane_f16(__rev2_908, __p3_908), __noswap_vget_lane_f16(__rev2_908, __p3_908), __noswap_vget_lane_f16(__rev2_908, __p3_908)}); \ + __ret_908 = __builtin_shufflevector(__ret_908, __ret_908, 1, 0); \ + __ret_908; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlslq_laneq_high_f16(__p0_909, __p1_909, __p2_909, __p3_909) __extension__ ({ \ + float32x4_t __ret_909; \ + float32x4_t __s0_909 = __p0_909; \ + float16x8_t __s1_909 = __p1_909; \ + float16x8_t __s2_909 = __p2_909; \ + __ret_909 = vfmlslq_high_f16(__s0_909, __s1_909, (float16x8_t) {vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909), vgetq_lane_f16(__s2_909, __p3_909)}); \ + __ret_909; \ +}) +#else +#define vfmlslq_laneq_high_f16(__p0_910, __p1_910, __p2_910, __p3_910) __extension__ ({ \ + float32x4_t __ret_910; \ + float32x4_t __s0_910 = __p0_910; \ + float16x8_t __s1_910 = __p1_910; \ + float16x8_t __s2_910 = __p2_910; \ + float32x4_t __rev0_910; __rev0_910 = __builtin_shufflevector(__s0_910, __s0_910, 3, 2, 1, 0); \ + float16x8_t __rev1_910; __rev1_910 = __builtin_shufflevector(__s1_910, __s1_910, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_910; __rev2_910 = __builtin_shufflevector(__s2_910, __s2_910, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_910 = __noswap_vfmlslq_high_f16(__rev0_910, __rev1_910, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910), __noswap_vgetq_lane_f16(__rev2_910, __p3_910)}); \ + __ret_910 = __builtin_shufflevector(__ret_910, __ret_910, 3, 2, 1, 0); \ + __ret_910; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlsl_laneq_high_f16(__p0_911, __p1_911, __p2_911, __p3_911) __extension__ ({ \ + float32x2_t __ret_911; \ + float32x2_t __s0_911 = __p0_911; \ + float16x4_t __s1_911 = __p1_911; \ + float16x8_t __s2_911 = __p2_911; \ + __ret_911 = vfmlsl_high_f16(__s0_911, __s1_911, (float16x4_t) {vgetq_lane_f16(__s2_911, __p3_911), vgetq_lane_f16(__s2_911, __p3_911), vgetq_lane_f16(__s2_911, __p3_911), vgetq_lane_f16(__s2_911, __p3_911)}); \ + __ret_911; \ +}) +#else +#define vfmlsl_laneq_high_f16(__p0_912, __p1_912, __p2_912, __p3_912) __extension__ ({ \ + float32x2_t __ret_912; \ + float32x2_t __s0_912 = __p0_912; \ + float16x4_t __s1_912 = __p1_912; \ + float16x8_t __s2_912 = __p2_912; \ + float32x2_t __rev0_912; __rev0_912 = __builtin_shufflevector(__s0_912, __s0_912, 1, 0); \ + float16x4_t __rev1_912; __rev1_912 = __builtin_shufflevector(__s1_912, __s1_912, 3, 2, 1, 0); \ + float16x8_t __rev2_912; __rev2_912 = __builtin_shufflevector(__s2_912, __s2_912, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_912 = __noswap_vfmlsl_high_f16(__rev0_912, __rev1_912, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_912, __p3_912), __noswap_vgetq_lane_f16(__rev2_912, __p3_912), __noswap_vgetq_lane_f16(__rev2_912, __p3_912), __noswap_vgetq_lane_f16(__rev2_912, __p3_912)}); \ + __ret_912 = __builtin_shufflevector(__ret_912, __ret_912, 1, 0); \ + __ret_912; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlslq_laneq_low_f16(__p0_913, __p1_913, __p2_913, __p3_913) __extension__ ({ \ + float32x4_t __ret_913; \ + float32x4_t __s0_913 = __p0_913; \ + float16x8_t __s1_913 = __p1_913; \ + float16x8_t __s2_913 = __p2_913; \ + __ret_913 = vfmlslq_low_f16(__s0_913, __s1_913, (float16x8_t) {vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913), vgetq_lane_f16(__s2_913, __p3_913)}); \ + __ret_913; \ +}) +#else +#define vfmlslq_laneq_low_f16(__p0_914, __p1_914, __p2_914, __p3_914) __extension__ ({ \ + float32x4_t __ret_914; \ + float32x4_t __s0_914 = __p0_914; \ + float16x8_t __s1_914 = __p1_914; \ + float16x8_t __s2_914 = __p2_914; \ + float32x4_t __rev0_914; __rev0_914 = __builtin_shufflevector(__s0_914, __s0_914, 3, 2, 1, 0); \ + float16x8_t __rev1_914; __rev1_914 = __builtin_shufflevector(__s1_914, __s1_914, 7, 6, 5, 4, 3, 2, 1, 0); \ + float16x8_t __rev2_914; __rev2_914 = __builtin_shufflevector(__s2_914, __s2_914, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_914 = __noswap_vfmlslq_low_f16(__rev0_914, __rev1_914, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914), __noswap_vgetq_lane_f16(__rev2_914, __p3_914)}); \ + __ret_914 = __builtin_shufflevector(__ret_914, __ret_914, 3, 2, 1, 0); \ + __ret_914; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vfmlsl_laneq_low_f16(__p0_915, __p1_915, __p2_915, __p3_915) __extension__ ({ \ + float32x2_t __ret_915; \ + float32x2_t __s0_915 = __p0_915; \ + float16x4_t __s1_915 = __p1_915; \ + float16x8_t __s2_915 = __p2_915; \ + __ret_915 = vfmlsl_low_f16(__s0_915, __s1_915, (float16x4_t) {vgetq_lane_f16(__s2_915, __p3_915), vgetq_lane_f16(__s2_915, __p3_915), vgetq_lane_f16(__s2_915, __p3_915), vgetq_lane_f16(__s2_915, __p3_915)}); \ + __ret_915; \ +}) +#else +#define vfmlsl_laneq_low_f16(__p0_916, __p1_916, __p2_916, __p3_916) __extension__ ({ \ + float32x2_t __ret_916; \ + float32x2_t __s0_916 = __p0_916; \ + float16x4_t __s1_916 = __p1_916; \ + float16x8_t __s2_916 = __p2_916; \ + float32x2_t __rev0_916; __rev0_916 = __builtin_shufflevector(__s0_916, __s0_916, 1, 0); \ + float16x4_t __rev1_916; __rev1_916 = __builtin_shufflevector(__s1_916, __s1_916, 3, 2, 1, 0); \ + float16x8_t __rev2_916; __rev2_916 = __builtin_shufflevector(__s2_916, __s2_916, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_916 = __noswap_vfmlsl_low_f16(__rev0_916, __rev1_916, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_916, __p3_916), __noswap_vgetq_lane_f16(__rev2_916, __p3_916), __noswap_vgetq_lane_f16(__rev2_916, __p3_916), __noswap_vgetq_lane_f16(__rev2_916, __p3_916)}); \ + __ret_916 = __builtin_shufflevector(__ret_916, __ret_916, 1, 0); \ + __ret_916; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulh_lane_f16(__p0_917, __p1_917, __p2_917) __extension__ ({ \ + float16_t __ret_917; \ + float16_t __s0_917 = __p0_917; \ + float16x4_t __s1_917 = __p1_917; \ + __ret_917 = __s0_917 * vget_lane_f16(__s1_917, __p2_917); \ + __ret_917; \ +}) +#else +#define vmulh_lane_f16(__p0_918, __p1_918, __p2_918) __extension__ ({ \ + float16_t __ret_918; \ + float16_t __s0_918 = __p0_918; \ + float16x4_t __s1_918 = __p1_918; \ + float16x4_t __rev1_918; __rev1_918 = __builtin_shufflevector(__s1_918, __s1_918, 3, 2, 1, 0); \ + __ret_918 = __s0_918 * __noswap_vget_lane_f16(__rev1_918, __p2_918); \ + __ret_918; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +#define vmulh_laneq_f16(__p0_919, __p1_919, __p2_919) __extension__ ({ \ + float16_t __ret_919; \ + float16_t __s0_919 = __p0_919; \ + float16x8_t __s1_919 = __p1_919; \ + __ret_919 = __s0_919 * vgetq_lane_f16(__s1_919, __p2_919); \ + __ret_919; \ +}) +#else +#define vmulh_laneq_f16(__p0_920, __p1_920, __p2_920) __extension__ ({ \ + float16_t __ret_920; \ + float16_t __s0_920 = __p0_920; \ + float16x8_t __s1_920 = __p1_920; \ + float16x8_t __rev1_920; __rev1_920 = __builtin_shufflevector(__s1_920, __s1_920, 7, 6, 5, 4, 3, 2, 1, 0); \ + __ret_920 = __s0_920 * __noswap_vgetq_lane_f16(__rev1_920, __p2_920); \ + __ret_920; \ +}) +#endif + +#ifdef __LITTLE_ENDIAN__ +__ai __attribute__((target("neon"))) uint16x8_t vabdl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { uint16x8_t __ret; __ret = vabdl_u8(vget_high_u8(__p0), vget_high_u8(__p1)); return __ret; } #else -__ai uint16x8_t vabdl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vabdl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { uint16x8_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -68155,13 +68573,13 @@ __ai uint16x8_t vabdl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vabdl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vabdl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { uint64x2_t __ret; __ret = vabdl_u32(vget_high_u32(__p0), vget_high_u32(__p1)); return __ret; } #else -__ai uint64x2_t vabdl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vabdl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { uint64x2_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -68172,13 +68590,13 @@ __ai uint64x2_t vabdl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vabdl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vabdl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { uint32x4_t __ret; __ret = vabdl_u16(vget_high_u16(__p0), vget_high_u16(__p1)); return __ret; } #else -__ai uint32x4_t vabdl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vabdl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { uint32x4_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -68189,13 +68607,13 @@ __ai uint32x4_t vabdl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vabdl_high_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vabdl_high_s8(int8x16_t __p0, int8x16_t __p1) { int16x8_t __ret; __ret = vabdl_s8(vget_high_s8(__p0), vget_high_s8(__p1)); return __ret; } #else -__ai int16x8_t vabdl_high_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vabdl_high_s8(int8x16_t __p0, int8x16_t __p1) { int16x8_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -68206,13 +68624,13 @@ __ai int16x8_t vabdl_high_s8(int8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vabdl_high_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vabdl_high_s32(int32x4_t __p0, int32x4_t __p1) { int64x2_t __ret; __ret = vabdl_s32(vget_high_s32(__p0), vget_high_s32(__p1)); return __ret; } #else -__ai int64x2_t vabdl_high_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vabdl_high_s32(int32x4_t __p0, int32x4_t __p1) { int64x2_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -68223,13 +68641,13 @@ __ai int64x2_t vabdl_high_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vabdl_high_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vabdl_high_s16(int16x8_t __p0, int16x8_t __p1) { int32x4_t __ret; __ret = vabdl_s16(vget_high_s16(__p0), vget_high_s16(__p1)); return __ret; } #else -__ai int32x4_t vabdl_high_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vabdl_high_s16(int16x8_t __p0, int16x8_t __p1) { int32x4_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -68240,13 +68658,13 @@ __ai int32x4_t vabdl_high_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vaddl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vaddl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { uint16x8_t __ret; __ret = vmovl_high_u8(__p0) + vmovl_high_u8(__p1); return __ret; } #else -__ai uint16x8_t vaddl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vaddl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { uint16x8_t __ret; uint8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -68257,13 +68675,13 @@ __ai uint16x8_t vaddl_high_u8(uint8x16_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vaddl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vaddl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { uint64x2_t __ret; __ret = vmovl_high_u32(__p0) + vmovl_high_u32(__p1); return __ret; } #else -__ai uint64x2_t vaddl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vaddl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { uint64x2_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -68274,13 +68692,13 @@ __ai uint64x2_t vaddl_high_u32(uint32x4_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vaddl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vaddl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { uint32x4_t __ret; __ret = vmovl_high_u16(__p0) + vmovl_high_u16(__p1); return __ret; } #else -__ai uint32x4_t vaddl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vaddl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { uint32x4_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -68291,13 +68709,13 @@ __ai uint32x4_t vaddl_high_u16(uint16x8_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vaddl_high_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vaddl_high_s8(int8x16_t __p0, int8x16_t __p1) { int16x8_t __ret; __ret = vmovl_high_s8(__p0) + vmovl_high_s8(__p1); return __ret; } #else -__ai int16x8_t vaddl_high_s8(int8x16_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vaddl_high_s8(int8x16_t __p0, int8x16_t __p1) { int16x8_t __ret; int8x16_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -68308,13 +68726,13 @@ __ai int16x8_t vaddl_high_s8(int8x16_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vaddl_high_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vaddl_high_s32(int32x4_t __p0, int32x4_t __p1) { int64x2_t __ret; __ret = vmovl_high_s32(__p0) + vmovl_high_s32(__p1); return __ret; } #else -__ai int64x2_t vaddl_high_s32(int32x4_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vaddl_high_s32(int32x4_t __p0, int32x4_t __p1) { int64x2_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -68325,13 +68743,13 @@ __ai int64x2_t vaddl_high_s32(int32x4_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vaddl_high_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vaddl_high_s16(int16x8_t __p0, int16x8_t __p1) { int32x4_t __ret; __ret = vmovl_high_s16(__p0) + vmovl_high_s16(__p1); return __ret; } #else -__ai int32x4_t vaddl_high_s16(int16x8_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vaddl_high_s16(int16x8_t __p0, int16x8_t __p1) { int32x4_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -68342,13 +68760,13 @@ __ai int32x4_t vaddl_high_s16(int16x8_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vaddw_high_u8(uint16x8_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vaddw_high_u8(uint16x8_t __p0, uint8x16_t __p1) { uint16x8_t __ret; __ret = __p0 + vmovl_high_u8(__p1); return __ret; } #else -__ai uint16x8_t vaddw_high_u8(uint16x8_t __p0, uint8x16_t __p1) { +__ai __attribute__((target("neon"))) uint16x8_t vaddw_high_u8(uint16x8_t __p0, uint8x16_t __p1) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -68359,13 +68777,13 @@ __ai uint16x8_t vaddw_high_u8(uint16x8_t __p0, uint8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vaddw_high_u32(uint64x2_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vaddw_high_u32(uint64x2_t __p0, uint32x4_t __p1) { uint64x2_t __ret; __ret = __p0 + vmovl_high_u32(__p1); return __ret; } #else -__ai uint64x2_t vaddw_high_u32(uint64x2_t __p0, uint32x4_t __p1) { +__ai __attribute__((target("neon"))) uint64x2_t vaddw_high_u32(uint64x2_t __p0, uint32x4_t __p1) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -68376,13 +68794,13 @@ __ai uint64x2_t vaddw_high_u32(uint64x2_t __p0, uint32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vaddw_high_u16(uint32x4_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vaddw_high_u16(uint32x4_t __p0, uint16x8_t __p1) { uint32x4_t __ret; __ret = __p0 + vmovl_high_u16(__p1); return __ret; } #else -__ai uint32x4_t vaddw_high_u16(uint32x4_t __p0, uint16x8_t __p1) { +__ai __attribute__((target("neon"))) uint32x4_t vaddw_high_u16(uint32x4_t __p0, uint16x8_t __p1) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -68393,13 +68811,13 @@ __ai uint32x4_t vaddw_high_u16(uint32x4_t __p0, uint16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vaddw_high_s8(int16x8_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vaddw_high_s8(int16x8_t __p0, int8x16_t __p1) { int16x8_t __ret; __ret = __p0 + vmovl_high_s8(__p1); return __ret; } #else -__ai int16x8_t vaddw_high_s8(int16x8_t __p0, int8x16_t __p1) { +__ai __attribute__((target("neon"))) int16x8_t vaddw_high_s8(int16x8_t __p0, int8x16_t __p1) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -68410,13 +68828,13 @@ __ai int16x8_t vaddw_high_s8(int16x8_t __p0, int8x16_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vaddw_high_s32(int64x2_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vaddw_high_s32(int64x2_t __p0, int32x4_t __p1) { int64x2_t __ret; __ret = __p0 + vmovl_high_s32(__p1); return __ret; } #else -__ai int64x2_t vaddw_high_s32(int64x2_t __p0, int32x4_t __p1) { +__ai __attribute__((target("neon"))) int64x2_t vaddw_high_s32(int64x2_t __p0, int32x4_t __p1) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -68427,13 +68845,13 @@ __ai int64x2_t vaddw_high_s32(int64x2_t __p0, int32x4_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vaddw_high_s16(int32x4_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vaddw_high_s16(int32x4_t __p0, int16x8_t __p1) { int32x4_t __ret; __ret = __p0 + vmovl_high_s16(__p1); return __ret; } #else -__ai int32x4_t vaddw_high_s16(int32x4_t __p0, int16x8_t __p1) { +__ai __attribute__((target("neon"))) int32x4_t vaddw_high_s16(int32x4_t __p0, int16x8_t __p1) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -68444,147 +68862,147 @@ __ai int32x4_t vaddw_high_s16(int32x4_t __p0, int16x8_t __p1) { #endif #ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_p64(__p0_885, __p1_885, __p2_885, __p3_885) __extension__ ({ \ - poly64x2_t __ret_885; \ - poly64x2_t __s0_885 = __p0_885; \ - poly64x1_t __s2_885 = __p2_885; \ - __ret_885 = vsetq_lane_p64(vget_lane_p64(__s2_885, __p3_885), __s0_885, __p1_885); \ - __ret_885; \ +#define vcopyq_lane_p64(__p0_921, __p1_921, __p2_921, __p3_921) __extension__ ({ \ + poly64x2_t __ret_921; \ + poly64x2_t __s0_921 = __p0_921; \ + poly64x1_t __s2_921 = __p2_921; \ + __ret_921 = vsetq_lane_p64(vget_lane_p64(__s2_921, __p3_921), __s0_921, __p1_921); \ + __ret_921; \ }) #else -#define vcopyq_lane_p64(__p0_886, __p1_886, __p2_886, __p3_886) __extension__ ({ \ - poly64x2_t __ret_886; \ - poly64x2_t __s0_886 = __p0_886; \ - poly64x1_t __s2_886 = __p2_886; \ - poly64x2_t __rev0_886; __rev0_886 = __builtin_shufflevector(__s0_886, __s0_886, 1, 0); \ - __ret_886 = __noswap_vsetq_lane_p64(vget_lane_p64(__s2_886, __p3_886), __rev0_886, __p1_886); \ - __ret_886 = __builtin_shufflevector(__ret_886, __ret_886, 1, 0); \ - __ret_886; \ +#define vcopyq_lane_p64(__p0_922, __p1_922, __p2_922, __p3_922) __extension__ ({ \ + poly64x2_t __ret_922; \ + poly64x2_t __s0_922 = __p0_922; \ + poly64x1_t __s2_922 = __p2_922; \ + poly64x2_t __rev0_922; __rev0_922 = __builtin_shufflevector(__s0_922, __s0_922, 1, 0); \ + __ret_922 = __noswap_vsetq_lane_p64(vget_lane_p64(__s2_922, __p3_922), __rev0_922, __p1_922); \ + __ret_922 = __builtin_shufflevector(__ret_922, __ret_922, 1, 0); \ + __ret_922; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopyq_lane_f64(__p0_887, __p1_887, __p2_887, __p3_887) __extension__ ({ \ - float64x2_t __ret_887; \ - float64x2_t __s0_887 = __p0_887; \ - float64x1_t __s2_887 = __p2_887; \ - __ret_887 = vsetq_lane_f64(vget_lane_f64(__s2_887, __p3_887), __s0_887, __p1_887); \ - __ret_887; \ +#define vcopyq_lane_f64(__p0_923, __p1_923, __p2_923, __p3_923) __extension__ ({ \ + float64x2_t __ret_923; \ + float64x2_t __s0_923 = __p0_923; \ + float64x1_t __s2_923 = __p2_923; \ + __ret_923 = vsetq_lane_f64(vget_lane_f64(__s2_923, __p3_923), __s0_923, __p1_923); \ + __ret_923; \ }) #else -#define vcopyq_lane_f64(__p0_888, __p1_888, __p2_888, __p3_888) __extension__ ({ \ - float64x2_t __ret_888; \ - float64x2_t __s0_888 = __p0_888; \ - float64x1_t __s2_888 = __p2_888; \ - float64x2_t __rev0_888; __rev0_888 = __builtin_shufflevector(__s0_888, __s0_888, 1, 0); \ - __ret_888 = __noswap_vsetq_lane_f64(vget_lane_f64(__s2_888, __p3_888), __rev0_888, __p1_888); \ - __ret_888 = __builtin_shufflevector(__ret_888, __ret_888, 1, 0); \ - __ret_888; \ +#define vcopyq_lane_f64(__p0_924, __p1_924, __p2_924, __p3_924) __extension__ ({ \ + float64x2_t __ret_924; \ + float64x2_t __s0_924 = __p0_924; \ + float64x1_t __s2_924 = __p2_924; \ + float64x2_t __rev0_924; __rev0_924 = __builtin_shufflevector(__s0_924, __s0_924, 1, 0); \ + __ret_924 = __noswap_vsetq_lane_f64(vget_lane_f64(__s2_924, __p3_924), __rev0_924, __p1_924); \ + __ret_924 = __builtin_shufflevector(__ret_924, __ret_924, 1, 0); \ + __ret_924; \ }) #endif -#define vcopy_lane_p64(__p0_889, __p1_889, __p2_889, __p3_889) __extension__ ({ \ - poly64x1_t __ret_889; \ - poly64x1_t __s0_889 = __p0_889; \ - poly64x1_t __s2_889 = __p2_889; \ - __ret_889 = vset_lane_p64(vget_lane_p64(__s2_889, __p3_889), __s0_889, __p1_889); \ - __ret_889; \ +#define vcopy_lane_p64(__p0_925, __p1_925, __p2_925, __p3_925) __extension__ ({ \ + poly64x1_t __ret_925; \ + poly64x1_t __s0_925 = __p0_925; \ + poly64x1_t __s2_925 = __p2_925; \ + __ret_925 = vset_lane_p64(vget_lane_p64(__s2_925, __p3_925), __s0_925, __p1_925); \ + __ret_925; \ }) -#define vcopy_lane_f64(__p0_890, __p1_890, __p2_890, __p3_890) __extension__ ({ \ - float64x1_t __ret_890; \ - float64x1_t __s0_890 = __p0_890; \ - float64x1_t __s2_890 = __p2_890; \ - __ret_890 = vset_lane_f64(vget_lane_f64(__s2_890, __p3_890), __s0_890, __p1_890); \ - __ret_890; \ +#define vcopy_lane_f64(__p0_926, __p1_926, __p2_926, __p3_926) __extension__ ({ \ + float64x1_t __ret_926; \ + float64x1_t __s0_926 = __p0_926; \ + float64x1_t __s2_926 = __p2_926; \ + __ret_926 = vset_lane_f64(vget_lane_f64(__s2_926, __p3_926), __s0_926, __p1_926); \ + __ret_926; \ }) #ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_p64(__p0_891, __p1_891, __p2_891, __p3_891) __extension__ ({ \ - poly64x2_t __ret_891; \ - poly64x2_t __s0_891 = __p0_891; \ - poly64x2_t __s2_891 = __p2_891; \ - __ret_891 = vsetq_lane_p64(vgetq_lane_p64(__s2_891, __p3_891), __s0_891, __p1_891); \ - __ret_891; \ +#define vcopyq_laneq_p64(__p0_927, __p1_927, __p2_927, __p3_927) __extension__ ({ \ + poly64x2_t __ret_927; \ + poly64x2_t __s0_927 = __p0_927; \ + poly64x2_t __s2_927 = __p2_927; \ + __ret_927 = vsetq_lane_p64(vgetq_lane_p64(__s2_927, __p3_927), __s0_927, __p1_927); \ + __ret_927; \ }) #else -#define vcopyq_laneq_p64(__p0_892, __p1_892, __p2_892, __p3_892) __extension__ ({ \ - poly64x2_t __ret_892; \ - poly64x2_t __s0_892 = __p0_892; \ - poly64x2_t __s2_892 = __p2_892; \ - poly64x2_t __rev0_892; __rev0_892 = __builtin_shufflevector(__s0_892, __s0_892, 1, 0); \ - poly64x2_t __rev2_892; __rev2_892 = __builtin_shufflevector(__s2_892, __s2_892, 1, 0); \ - __ret_892 = __noswap_vsetq_lane_p64(__noswap_vgetq_lane_p64(__rev2_892, __p3_892), __rev0_892, __p1_892); \ - __ret_892 = __builtin_shufflevector(__ret_892, __ret_892, 1, 0); \ - __ret_892; \ +#define vcopyq_laneq_p64(__p0_928, __p1_928, __p2_928, __p3_928) __extension__ ({ \ + poly64x2_t __ret_928; \ + poly64x2_t __s0_928 = __p0_928; \ + poly64x2_t __s2_928 = __p2_928; \ + poly64x2_t __rev0_928; __rev0_928 = __builtin_shufflevector(__s0_928, __s0_928, 1, 0); \ + poly64x2_t __rev2_928; __rev2_928 = __builtin_shufflevector(__s2_928, __s2_928, 1, 0); \ + __ret_928 = __noswap_vsetq_lane_p64(__noswap_vgetq_lane_p64(__rev2_928, __p3_928), __rev0_928, __p1_928); \ + __ret_928 = __builtin_shufflevector(__ret_928, __ret_928, 1, 0); \ + __ret_928; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopyq_laneq_f64(__p0_893, __p1_893, __p2_893, __p3_893) __extension__ ({ \ - float64x2_t __ret_893; \ - float64x2_t __s0_893 = __p0_893; \ - float64x2_t __s2_893 = __p2_893; \ - __ret_893 = vsetq_lane_f64(vgetq_lane_f64(__s2_893, __p3_893), __s0_893, __p1_893); \ - __ret_893; \ +#define vcopyq_laneq_f64(__p0_929, __p1_929, __p2_929, __p3_929) __extension__ ({ \ + float64x2_t __ret_929; \ + float64x2_t __s0_929 = __p0_929; \ + float64x2_t __s2_929 = __p2_929; \ + __ret_929 = vsetq_lane_f64(vgetq_lane_f64(__s2_929, __p3_929), __s0_929, __p1_929); \ + __ret_929; \ }) #else -#define vcopyq_laneq_f64(__p0_894, __p1_894, __p2_894, __p3_894) __extension__ ({ \ - float64x2_t __ret_894; \ - float64x2_t __s0_894 = __p0_894; \ - float64x2_t __s2_894 = __p2_894; \ - float64x2_t __rev0_894; __rev0_894 = __builtin_shufflevector(__s0_894, __s0_894, 1, 0); \ - float64x2_t __rev2_894; __rev2_894 = __builtin_shufflevector(__s2_894, __s2_894, 1, 0); \ - __ret_894 = __noswap_vsetq_lane_f64(__noswap_vgetq_lane_f64(__rev2_894, __p3_894), __rev0_894, __p1_894); \ - __ret_894 = __builtin_shufflevector(__ret_894, __ret_894, 1, 0); \ - __ret_894; \ +#define vcopyq_laneq_f64(__p0_930, __p1_930, __p2_930, __p3_930) __extension__ ({ \ + float64x2_t __ret_930; \ + float64x2_t __s0_930 = __p0_930; \ + float64x2_t __s2_930 = __p2_930; \ + float64x2_t __rev0_930; __rev0_930 = __builtin_shufflevector(__s0_930, __s0_930, 1, 0); \ + float64x2_t __rev2_930; __rev2_930 = __builtin_shufflevector(__s2_930, __s2_930, 1, 0); \ + __ret_930 = __noswap_vsetq_lane_f64(__noswap_vgetq_lane_f64(__rev2_930, __p3_930), __rev0_930, __p1_930); \ + __ret_930 = __builtin_shufflevector(__ret_930, __ret_930, 1, 0); \ + __ret_930; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_p64(__p0_895, __p1_895, __p2_895, __p3_895) __extension__ ({ \ - poly64x1_t __ret_895; \ - poly64x1_t __s0_895 = __p0_895; \ - poly64x2_t __s2_895 = __p2_895; \ - __ret_895 = vset_lane_p64(vgetq_lane_p64(__s2_895, __p3_895), __s0_895, __p1_895); \ - __ret_895; \ +#define vcopy_laneq_p64(__p0_931, __p1_931, __p2_931, __p3_931) __extension__ ({ \ + poly64x1_t __ret_931; \ + poly64x1_t __s0_931 = __p0_931; \ + poly64x2_t __s2_931 = __p2_931; \ + __ret_931 = vset_lane_p64(vgetq_lane_p64(__s2_931, __p3_931), __s0_931, __p1_931); \ + __ret_931; \ }) #else -#define vcopy_laneq_p64(__p0_896, __p1_896, __p2_896, __p3_896) __extension__ ({ \ - poly64x1_t __ret_896; \ - poly64x1_t __s0_896 = __p0_896; \ - poly64x2_t __s2_896 = __p2_896; \ - poly64x2_t __rev2_896; __rev2_896 = __builtin_shufflevector(__s2_896, __s2_896, 1, 0); \ - __ret_896 = vset_lane_p64(__noswap_vgetq_lane_p64(__rev2_896, __p3_896), __s0_896, __p1_896); \ - __ret_896; \ +#define vcopy_laneq_p64(__p0_932, __p1_932, __p2_932, __p3_932) __extension__ ({ \ + poly64x1_t __ret_932; \ + poly64x1_t __s0_932 = __p0_932; \ + poly64x2_t __s2_932 = __p2_932; \ + poly64x2_t __rev2_932; __rev2_932 = __builtin_shufflevector(__s2_932, __s2_932, 1, 0); \ + __ret_932 = vset_lane_p64(__noswap_vgetq_lane_p64(__rev2_932, __p3_932), __s0_932, __p1_932); \ + __ret_932; \ }) #endif #ifdef __LITTLE_ENDIAN__ -#define vcopy_laneq_f64(__p0_897, __p1_897, __p2_897, __p3_897) __extension__ ({ \ - float64x1_t __ret_897; \ - float64x1_t __s0_897 = __p0_897; \ - float64x2_t __s2_897 = __p2_897; \ - __ret_897 = vset_lane_f64(vgetq_lane_f64(__s2_897, __p3_897), __s0_897, __p1_897); \ - __ret_897; \ +#define vcopy_laneq_f64(__p0_933, __p1_933, __p2_933, __p3_933) __extension__ ({ \ + float64x1_t __ret_933; \ + float64x1_t __s0_933 = __p0_933; \ + float64x2_t __s2_933 = __p2_933; \ + __ret_933 = vset_lane_f64(vgetq_lane_f64(__s2_933, __p3_933), __s0_933, __p1_933); \ + __ret_933; \ }) #else -#define vcopy_laneq_f64(__p0_898, __p1_898, __p2_898, __p3_898) __extension__ ({ \ - float64x1_t __ret_898; \ - float64x1_t __s0_898 = __p0_898; \ - float64x2_t __s2_898 = __p2_898; \ - float64x2_t __rev2_898; __rev2_898 = __builtin_shufflevector(__s2_898, __s2_898, 1, 0); \ - __ret_898 = vset_lane_f64(__noswap_vgetq_lane_f64(__rev2_898, __p3_898), __s0_898, __p1_898); \ - __ret_898; \ +#define vcopy_laneq_f64(__p0_934, __p1_934, __p2_934, __p3_934) __extension__ ({ \ + float64x1_t __ret_934; \ + float64x1_t __s0_934 = __p0_934; \ + float64x2_t __s2_934 = __p2_934; \ + float64x2_t __rev2_934; __rev2_934 = __builtin_shufflevector(__s2_934, __s2_934, 1, 0); \ + __ret_934 = vset_lane_f64(__noswap_vgetq_lane_f64(__rev2_934, __p3_934), __s0_934, __p1_934); \ + __ret_934; \ }) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmlal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t vmlal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint16x8_t __ret; __ret = vmlal_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2)); return __ret; } #else -__ai uint16x8_t vmlal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t vmlal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -68596,13 +69014,13 @@ __ai uint16x8_t vmlal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vmlal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t vmlal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint64x2_t __ret; __ret = vmlal_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2)); return __ret; } #else -__ai uint64x2_t vmlal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t vmlal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -68614,13 +69032,13 @@ __ai uint64x2_t vmlal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2 #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmlal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vmlal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint32x4_t __ret; __ret = vmlal_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2)); return __ret; } #else -__ai uint32x4_t vmlal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vmlal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -68632,13 +69050,13 @@ __ai uint32x4_t vmlal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2 #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmlal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t vmlal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { int16x8_t __ret; __ret = vmlal_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2)); return __ret; } #else -__ai int16x8_t vmlal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t vmlal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -68650,13 +69068,13 @@ __ai int16x8_t vmlal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { int64x2_t __ret; __ret = vmlal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2)); return __ret; } #else -__ai int64x2_t vmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -68668,13 +69086,13 @@ __ai int64x2_t vmlal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { int32x4_t __ret; __ret = vmlal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2)); return __ret; } #else -__ai int32x4_t vmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -68686,13 +69104,13 @@ __ai int32x4_t vmlal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vmlal_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t vmlal_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) { uint64x2_t __ret; __ret = vmlal_n_u32(__p0, vget_high_u32(__p1), __p2); return __ret; } #else -__ai uint64x2_t vmlal_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t vmlal_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -68703,13 +69121,13 @@ __ai uint64x2_t vmlal_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2 #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmlal_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vmlal_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) { uint32x4_t __ret; __ret = vmlal_n_u16(__p0, vget_high_u16(__p1), __p2); return __ret; } #else -__ai uint32x4_t vmlal_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vmlal_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -68720,13 +69138,13 @@ __ai uint32x4_t vmlal_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2 #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { int64x2_t __ret; __ret = vmlal_n_s32(__p0, vget_high_s32(__p1), __p2); return __ret; } #else -__ai int64x2_t vmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -68737,13 +69155,13 @@ __ai int64x2_t vmlal_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { int32x4_t __ret; __ret = vmlal_n_s16(__p0, vget_high_s16(__p1), __p2); return __ret; } #else -__ai int32x4_t vmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -68754,13 +69172,13 @@ __ai int32x4_t vmlal_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vmlsl_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t vmlsl_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint16x8_t __ret; __ret = vmlsl_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2)); return __ret; } #else -__ai uint16x8_t vmlsl_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t vmlsl_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -68772,13 +69190,13 @@ __ai uint16x8_t vmlsl_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vmlsl_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t vmlsl_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint64x2_t __ret; __ret = vmlsl_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2)); return __ret; } #else -__ai uint64x2_t vmlsl_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t vmlsl_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -68790,13 +69208,13 @@ __ai uint64x2_t vmlsl_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2 #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmlsl_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vmlsl_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint32x4_t __ret; __ret = vmlsl_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2)); return __ret; } #else -__ai uint32x4_t vmlsl_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vmlsl_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -68808,13 +69226,13 @@ __ai uint32x4_t vmlsl_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2 #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vmlsl_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t vmlsl_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { int16x8_t __ret; __ret = vmlsl_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2)); return __ret; } #else -__ai int16x8_t vmlsl_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t vmlsl_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -68826,13 +69244,13 @@ __ai int16x8_t vmlsl_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { int64x2_t __ret; __ret = vmlsl_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2)); return __ret; } #else -__ai int64x2_t vmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -68844,13 +69262,13 @@ __ai int64x2_t vmlsl_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { int32x4_t __ret; __ret = vmlsl_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2)); return __ret; } #else -__ai int32x4_t vmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -68862,13 +69280,13 @@ __ai int32x4_t vmlsl_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vmlsl_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t vmlsl_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) { uint64x2_t __ret; __ret = vmlsl_n_u32(__p0, vget_high_u32(__p1), __p2); return __ret; } #else -__ai uint64x2_t vmlsl_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t vmlsl_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -68879,13 +69297,13 @@ __ai uint64x2_t vmlsl_high_n_u32(uint64x2_t __p0, uint32x4_t __p1, uint32_t __p2 #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vmlsl_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vmlsl_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) { uint32x4_t __ret; __ret = vmlsl_n_u16(__p0, vget_high_u16(__p1), __p2); return __ret; } #else -__ai uint32x4_t vmlsl_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vmlsl_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -68896,13 +69314,13 @@ __ai uint32x4_t vmlsl_high_n_u16(uint32x4_t __p0, uint16x8_t __p1, uint16_t __p2 #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { int64x2_t __ret; __ret = vmlsl_n_s32(__p0, vget_high_s32(__p1), __p2); return __ret; } #else -__ai int64x2_t vmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -68913,13 +69331,13 @@ __ai int64x2_t vmlsl_high_n_s32(int64x2_t __p0, int32x4_t __p1, int32_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { int32x4_t __ret; __ret = vmlsl_n_s16(__p0, vget_high_s16(__p1), __p2); return __ret; } #else -__ai int32x4_t vmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -68929,472 +69347,50 @@ __ai int32x4_t vmlsl_high_n_s16(int32x4_t __p0, int16x8_t __p1, int16_t __p2) { } #endif -#define vmulx_lane_f64(__p0_899, __p1_899, __p2_899) __extension__ ({ \ - float64x1_t __ret_899; \ - float64x1_t __s0_899 = __p0_899; \ - float64x1_t __s1_899 = __p1_899; \ - float64_t __x_899 = vget_lane_f64(__s0_899, 0); \ - float64_t __y_899 = vget_lane_f64(__s1_899, __p2_899); \ - float64_t __z_899 = vmulxd_f64(__x_899, __y_899); \ - __ret_899 = vset_lane_f64(__z_899, __s0_899, __p2_899); \ - __ret_899; \ -}) -#ifdef __LITTLE_ENDIAN__ -#define vmulx_laneq_f64(__p0_900, __p1_900, __p2_900) __extension__ ({ \ - float64x1_t __ret_900; \ - float64x1_t __s0_900 = __p0_900; \ - float64x2_t __s1_900 = __p1_900; \ - float64_t __x_900 = vget_lane_f64(__s0_900, 0); \ - float64_t __y_900 = vgetq_lane_f64(__s1_900, __p2_900); \ - float64_t __z_900 = vmulxd_f64(__x_900, __y_900); \ - __ret_900 = vset_lane_f64(__z_900, __s0_900, 0); \ - __ret_900; \ -}) -#else -#define vmulx_laneq_f64(__p0_901, __p1_901, __p2_901) __extension__ ({ \ - float64x1_t __ret_901; \ - float64x1_t __s0_901 = __p0_901; \ - float64x2_t __s1_901 = __p1_901; \ - float64x2_t __rev1_901; __rev1_901 = __builtin_shufflevector(__s1_901, __s1_901, 1, 0); \ - float64_t __x_901 = vget_lane_f64(__s0_901, 0); \ - float64_t __y_901 = __noswap_vgetq_lane_f64(__rev1_901, __p2_901); \ - float64_t __z_901 = vmulxd_f64(__x_901, __y_901); \ - __ret_901 = vset_lane_f64(__z_901, __s0_901, 0); \ - __ret_901; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmlalq_lane_high_f16(__p0_902, __p1_902, __p2_902, __p3_902) __extension__ ({ \ - float32x4_t __ret_902; \ - float32x4_t __s0_902 = __p0_902; \ - float16x8_t __s1_902 = __p1_902; \ - float16x4_t __s2_902 = __p2_902; \ - __ret_902 = vfmlalq_high_f16(__s0_902, __s1_902, (float16x8_t) {vget_lane_f16(__s2_902, __p3_902), vget_lane_f16(__s2_902, __p3_902), vget_lane_f16(__s2_902, __p3_902), vget_lane_f16(__s2_902, __p3_902), vget_lane_f16(__s2_902, __p3_902), vget_lane_f16(__s2_902, __p3_902), vget_lane_f16(__s2_902, __p3_902), vget_lane_f16(__s2_902, __p3_902)}); \ - __ret_902; \ -}) -#else -#define vfmlalq_lane_high_f16(__p0_903, __p1_903, __p2_903, __p3_903) __extension__ ({ \ - float32x4_t __ret_903; \ - float32x4_t __s0_903 = __p0_903; \ - float16x8_t __s1_903 = __p1_903; \ - float16x4_t __s2_903 = __p2_903; \ - float32x4_t __rev0_903; __rev0_903 = __builtin_shufflevector(__s0_903, __s0_903, 3, 2, 1, 0); \ - float16x8_t __rev1_903; __rev1_903 = __builtin_shufflevector(__s1_903, __s1_903, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x4_t __rev2_903; __rev2_903 = __builtin_shufflevector(__s2_903, __s2_903, 3, 2, 1, 0); \ - __ret_903 = __noswap_vfmlalq_high_f16(__rev0_903, __rev1_903, (float16x8_t) {__noswap_vget_lane_f16(__rev2_903, __p3_903), __noswap_vget_lane_f16(__rev2_903, __p3_903), __noswap_vget_lane_f16(__rev2_903, __p3_903), __noswap_vget_lane_f16(__rev2_903, __p3_903), __noswap_vget_lane_f16(__rev2_903, __p3_903), __noswap_vget_lane_f16(__rev2_903, __p3_903), __noswap_vget_lane_f16(__rev2_903, __p3_903), __noswap_vget_lane_f16(__rev2_903, __p3_903)}); \ - __ret_903 = __builtin_shufflevector(__ret_903, __ret_903, 3, 2, 1, 0); \ - __ret_903; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmlal_lane_high_f16(__p0_904, __p1_904, __p2_904, __p3_904) __extension__ ({ \ - float32x2_t __ret_904; \ - float32x2_t __s0_904 = __p0_904; \ - float16x4_t __s1_904 = __p1_904; \ - float16x4_t __s2_904 = __p2_904; \ - __ret_904 = vfmlal_high_f16(__s0_904, __s1_904, (float16x4_t) {vget_lane_f16(__s2_904, __p3_904), vget_lane_f16(__s2_904, __p3_904), vget_lane_f16(__s2_904, __p3_904), vget_lane_f16(__s2_904, __p3_904)}); \ - __ret_904; \ -}) -#else -#define vfmlal_lane_high_f16(__p0_905, __p1_905, __p2_905, __p3_905) __extension__ ({ \ - float32x2_t __ret_905; \ - float32x2_t __s0_905 = __p0_905; \ - float16x4_t __s1_905 = __p1_905; \ - float16x4_t __s2_905 = __p2_905; \ - float32x2_t __rev0_905; __rev0_905 = __builtin_shufflevector(__s0_905, __s0_905, 1, 0); \ - float16x4_t __rev1_905; __rev1_905 = __builtin_shufflevector(__s1_905, __s1_905, 3, 2, 1, 0); \ - float16x4_t __rev2_905; __rev2_905 = __builtin_shufflevector(__s2_905, __s2_905, 3, 2, 1, 0); \ - __ret_905 = __noswap_vfmlal_high_f16(__rev0_905, __rev1_905, (float16x4_t) {__noswap_vget_lane_f16(__rev2_905, __p3_905), __noswap_vget_lane_f16(__rev2_905, __p3_905), __noswap_vget_lane_f16(__rev2_905, __p3_905), __noswap_vget_lane_f16(__rev2_905, __p3_905)}); \ - __ret_905 = __builtin_shufflevector(__ret_905, __ret_905, 1, 0); \ - __ret_905; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmlalq_lane_low_f16(__p0_906, __p1_906, __p2_906, __p3_906) __extension__ ({ \ - float32x4_t __ret_906; \ - float32x4_t __s0_906 = __p0_906; \ - float16x8_t __s1_906 = __p1_906; \ - float16x4_t __s2_906 = __p2_906; \ - __ret_906 = vfmlalq_low_f16(__s0_906, __s1_906, (float16x8_t) {vget_lane_f16(__s2_906, __p3_906), vget_lane_f16(__s2_906, __p3_906), vget_lane_f16(__s2_906, __p3_906), vget_lane_f16(__s2_906, __p3_906), vget_lane_f16(__s2_906, __p3_906), vget_lane_f16(__s2_906, __p3_906), vget_lane_f16(__s2_906, __p3_906), vget_lane_f16(__s2_906, __p3_906)}); \ - __ret_906; \ -}) -#else -#define vfmlalq_lane_low_f16(__p0_907, __p1_907, __p2_907, __p3_907) __extension__ ({ \ - float32x4_t __ret_907; \ - float32x4_t __s0_907 = __p0_907; \ - float16x8_t __s1_907 = __p1_907; \ - float16x4_t __s2_907 = __p2_907; \ - float32x4_t __rev0_907; __rev0_907 = __builtin_shufflevector(__s0_907, __s0_907, 3, 2, 1, 0); \ - float16x8_t __rev1_907; __rev1_907 = __builtin_shufflevector(__s1_907, __s1_907, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x4_t __rev2_907; __rev2_907 = __builtin_shufflevector(__s2_907, __s2_907, 3, 2, 1, 0); \ - __ret_907 = __noswap_vfmlalq_low_f16(__rev0_907, __rev1_907, (float16x8_t) {__noswap_vget_lane_f16(__rev2_907, __p3_907), __noswap_vget_lane_f16(__rev2_907, __p3_907), __noswap_vget_lane_f16(__rev2_907, __p3_907), __noswap_vget_lane_f16(__rev2_907, __p3_907), __noswap_vget_lane_f16(__rev2_907, __p3_907), __noswap_vget_lane_f16(__rev2_907, __p3_907), __noswap_vget_lane_f16(__rev2_907, __p3_907), __noswap_vget_lane_f16(__rev2_907, __p3_907)}); \ - __ret_907 = __builtin_shufflevector(__ret_907, __ret_907, 3, 2, 1, 0); \ - __ret_907; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmlal_lane_low_f16(__p0_908, __p1_908, __p2_908, __p3_908) __extension__ ({ \ - float32x2_t __ret_908; \ - float32x2_t __s0_908 = __p0_908; \ - float16x4_t __s1_908 = __p1_908; \ - float16x4_t __s2_908 = __p2_908; \ - __ret_908 = vfmlal_low_f16(__s0_908, __s1_908, (float16x4_t) {vget_lane_f16(__s2_908, __p3_908), vget_lane_f16(__s2_908, __p3_908), vget_lane_f16(__s2_908, __p3_908), vget_lane_f16(__s2_908, __p3_908)}); \ - __ret_908; \ -}) -#else -#define vfmlal_lane_low_f16(__p0_909, __p1_909, __p2_909, __p3_909) __extension__ ({ \ - float32x2_t __ret_909; \ - float32x2_t __s0_909 = __p0_909; \ - float16x4_t __s1_909 = __p1_909; \ - float16x4_t __s2_909 = __p2_909; \ - float32x2_t __rev0_909; __rev0_909 = __builtin_shufflevector(__s0_909, __s0_909, 1, 0); \ - float16x4_t __rev1_909; __rev1_909 = __builtin_shufflevector(__s1_909, __s1_909, 3, 2, 1, 0); \ - float16x4_t __rev2_909; __rev2_909 = __builtin_shufflevector(__s2_909, __s2_909, 3, 2, 1, 0); \ - __ret_909 = __noswap_vfmlal_low_f16(__rev0_909, __rev1_909, (float16x4_t) {__noswap_vget_lane_f16(__rev2_909, __p3_909), __noswap_vget_lane_f16(__rev2_909, __p3_909), __noswap_vget_lane_f16(__rev2_909, __p3_909), __noswap_vget_lane_f16(__rev2_909, __p3_909)}); \ - __ret_909 = __builtin_shufflevector(__ret_909, __ret_909, 1, 0); \ - __ret_909; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmlalq_laneq_high_f16(__p0_910, __p1_910, __p2_910, __p3_910) __extension__ ({ \ - float32x4_t __ret_910; \ - float32x4_t __s0_910 = __p0_910; \ - float16x8_t __s1_910 = __p1_910; \ - float16x8_t __s2_910 = __p2_910; \ - __ret_910 = vfmlalq_high_f16(__s0_910, __s1_910, (float16x8_t) {vgetq_lane_f16(__s2_910, __p3_910), vgetq_lane_f16(__s2_910, __p3_910), vgetq_lane_f16(__s2_910, __p3_910), vgetq_lane_f16(__s2_910, __p3_910), vgetq_lane_f16(__s2_910, __p3_910), vgetq_lane_f16(__s2_910, __p3_910), vgetq_lane_f16(__s2_910, __p3_910), vgetq_lane_f16(__s2_910, __p3_910)}); \ - __ret_910; \ -}) -#else -#define vfmlalq_laneq_high_f16(__p0_911, __p1_911, __p2_911, __p3_911) __extension__ ({ \ - float32x4_t __ret_911; \ - float32x4_t __s0_911 = __p0_911; \ - float16x8_t __s1_911 = __p1_911; \ - float16x8_t __s2_911 = __p2_911; \ - float32x4_t __rev0_911; __rev0_911 = __builtin_shufflevector(__s0_911, __s0_911, 3, 2, 1, 0); \ - float16x8_t __rev1_911; __rev1_911 = __builtin_shufflevector(__s1_911, __s1_911, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev2_911; __rev2_911 = __builtin_shufflevector(__s2_911, __s2_911, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_911 = __noswap_vfmlalq_high_f16(__rev0_911, __rev1_911, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_911, __p3_911), __noswap_vgetq_lane_f16(__rev2_911, __p3_911), __noswap_vgetq_lane_f16(__rev2_911, __p3_911), __noswap_vgetq_lane_f16(__rev2_911, __p3_911), __noswap_vgetq_lane_f16(__rev2_911, __p3_911), __noswap_vgetq_lane_f16(__rev2_911, __p3_911), __noswap_vgetq_lane_f16(__rev2_911, __p3_911), __noswap_vgetq_lane_f16(__rev2_911, __p3_911)}); \ - __ret_911 = __builtin_shufflevector(__ret_911, __ret_911, 3, 2, 1, 0); \ - __ret_911; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmlal_laneq_high_f16(__p0_912, __p1_912, __p2_912, __p3_912) __extension__ ({ \ - float32x2_t __ret_912; \ - float32x2_t __s0_912 = __p0_912; \ - float16x4_t __s1_912 = __p1_912; \ - float16x8_t __s2_912 = __p2_912; \ - __ret_912 = vfmlal_high_f16(__s0_912, __s1_912, (float16x4_t) {vgetq_lane_f16(__s2_912, __p3_912), vgetq_lane_f16(__s2_912, __p3_912), vgetq_lane_f16(__s2_912, __p3_912), vgetq_lane_f16(__s2_912, __p3_912)}); \ - __ret_912; \ -}) -#else -#define vfmlal_laneq_high_f16(__p0_913, __p1_913, __p2_913, __p3_913) __extension__ ({ \ - float32x2_t __ret_913; \ - float32x2_t __s0_913 = __p0_913; \ - float16x4_t __s1_913 = __p1_913; \ - float16x8_t __s2_913 = __p2_913; \ - float32x2_t __rev0_913; __rev0_913 = __builtin_shufflevector(__s0_913, __s0_913, 1, 0); \ - float16x4_t __rev1_913; __rev1_913 = __builtin_shufflevector(__s1_913, __s1_913, 3, 2, 1, 0); \ - float16x8_t __rev2_913; __rev2_913 = __builtin_shufflevector(__s2_913, __s2_913, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_913 = __noswap_vfmlal_high_f16(__rev0_913, __rev1_913, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_913, __p3_913), __noswap_vgetq_lane_f16(__rev2_913, __p3_913), __noswap_vgetq_lane_f16(__rev2_913, __p3_913), __noswap_vgetq_lane_f16(__rev2_913, __p3_913)}); \ - __ret_913 = __builtin_shufflevector(__ret_913, __ret_913, 1, 0); \ - __ret_913; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmlalq_laneq_low_f16(__p0_914, __p1_914, __p2_914, __p3_914) __extension__ ({ \ - float32x4_t __ret_914; \ - float32x4_t __s0_914 = __p0_914; \ - float16x8_t __s1_914 = __p1_914; \ - float16x8_t __s2_914 = __p2_914; \ - __ret_914 = vfmlalq_low_f16(__s0_914, __s1_914, (float16x8_t) {vgetq_lane_f16(__s2_914, __p3_914), vgetq_lane_f16(__s2_914, __p3_914), vgetq_lane_f16(__s2_914, __p3_914), vgetq_lane_f16(__s2_914, __p3_914), vgetq_lane_f16(__s2_914, __p3_914), vgetq_lane_f16(__s2_914, __p3_914), vgetq_lane_f16(__s2_914, __p3_914), vgetq_lane_f16(__s2_914, __p3_914)}); \ - __ret_914; \ -}) -#else -#define vfmlalq_laneq_low_f16(__p0_915, __p1_915, __p2_915, __p3_915) __extension__ ({ \ - float32x4_t __ret_915; \ - float32x4_t __s0_915 = __p0_915; \ - float16x8_t __s1_915 = __p1_915; \ - float16x8_t __s2_915 = __p2_915; \ - float32x4_t __rev0_915; __rev0_915 = __builtin_shufflevector(__s0_915, __s0_915, 3, 2, 1, 0); \ - float16x8_t __rev1_915; __rev1_915 = __builtin_shufflevector(__s1_915, __s1_915, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev2_915; __rev2_915 = __builtin_shufflevector(__s2_915, __s2_915, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_915 = __noswap_vfmlalq_low_f16(__rev0_915, __rev1_915, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_915, __p3_915), __noswap_vgetq_lane_f16(__rev2_915, __p3_915), __noswap_vgetq_lane_f16(__rev2_915, __p3_915), __noswap_vgetq_lane_f16(__rev2_915, __p3_915), __noswap_vgetq_lane_f16(__rev2_915, __p3_915), __noswap_vgetq_lane_f16(__rev2_915, __p3_915), __noswap_vgetq_lane_f16(__rev2_915, __p3_915), __noswap_vgetq_lane_f16(__rev2_915, __p3_915)}); \ - __ret_915 = __builtin_shufflevector(__ret_915, __ret_915, 3, 2, 1, 0); \ - __ret_915; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmlal_laneq_low_f16(__p0_916, __p1_916, __p2_916, __p3_916) __extension__ ({ \ - float32x2_t __ret_916; \ - float32x2_t __s0_916 = __p0_916; \ - float16x4_t __s1_916 = __p1_916; \ - float16x8_t __s2_916 = __p2_916; \ - __ret_916 = vfmlal_low_f16(__s0_916, __s1_916, (float16x4_t) {vgetq_lane_f16(__s2_916, __p3_916), vgetq_lane_f16(__s2_916, __p3_916), vgetq_lane_f16(__s2_916, __p3_916), vgetq_lane_f16(__s2_916, __p3_916)}); \ - __ret_916; \ -}) -#else -#define vfmlal_laneq_low_f16(__p0_917, __p1_917, __p2_917, __p3_917) __extension__ ({ \ - float32x2_t __ret_917; \ - float32x2_t __s0_917 = __p0_917; \ - float16x4_t __s1_917 = __p1_917; \ - float16x8_t __s2_917 = __p2_917; \ - float32x2_t __rev0_917; __rev0_917 = __builtin_shufflevector(__s0_917, __s0_917, 1, 0); \ - float16x4_t __rev1_917; __rev1_917 = __builtin_shufflevector(__s1_917, __s1_917, 3, 2, 1, 0); \ - float16x8_t __rev2_917; __rev2_917 = __builtin_shufflevector(__s2_917, __s2_917, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_917 = __noswap_vfmlal_low_f16(__rev0_917, __rev1_917, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_917, __p3_917), __noswap_vgetq_lane_f16(__rev2_917, __p3_917), __noswap_vgetq_lane_f16(__rev2_917, __p3_917), __noswap_vgetq_lane_f16(__rev2_917, __p3_917)}); \ - __ret_917 = __builtin_shufflevector(__ret_917, __ret_917, 1, 0); \ - __ret_917; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmlslq_lane_high_f16(__p0_918, __p1_918, __p2_918, __p3_918) __extension__ ({ \ - float32x4_t __ret_918; \ - float32x4_t __s0_918 = __p0_918; \ - float16x8_t __s1_918 = __p1_918; \ - float16x4_t __s2_918 = __p2_918; \ - __ret_918 = vfmlslq_high_f16(__s0_918, __s1_918, (float16x8_t) {vget_lane_f16(__s2_918, __p3_918), vget_lane_f16(__s2_918, __p3_918), vget_lane_f16(__s2_918, __p3_918), vget_lane_f16(__s2_918, __p3_918), vget_lane_f16(__s2_918, __p3_918), vget_lane_f16(__s2_918, __p3_918), vget_lane_f16(__s2_918, __p3_918), vget_lane_f16(__s2_918, __p3_918)}); \ - __ret_918; \ -}) -#else -#define vfmlslq_lane_high_f16(__p0_919, __p1_919, __p2_919, __p3_919) __extension__ ({ \ - float32x4_t __ret_919; \ - float32x4_t __s0_919 = __p0_919; \ - float16x8_t __s1_919 = __p1_919; \ - float16x4_t __s2_919 = __p2_919; \ - float32x4_t __rev0_919; __rev0_919 = __builtin_shufflevector(__s0_919, __s0_919, 3, 2, 1, 0); \ - float16x8_t __rev1_919; __rev1_919 = __builtin_shufflevector(__s1_919, __s1_919, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x4_t __rev2_919; __rev2_919 = __builtin_shufflevector(__s2_919, __s2_919, 3, 2, 1, 0); \ - __ret_919 = __noswap_vfmlslq_high_f16(__rev0_919, __rev1_919, (float16x8_t) {__noswap_vget_lane_f16(__rev2_919, __p3_919), __noswap_vget_lane_f16(__rev2_919, __p3_919), __noswap_vget_lane_f16(__rev2_919, __p3_919), __noswap_vget_lane_f16(__rev2_919, __p3_919), __noswap_vget_lane_f16(__rev2_919, __p3_919), __noswap_vget_lane_f16(__rev2_919, __p3_919), __noswap_vget_lane_f16(__rev2_919, __p3_919), __noswap_vget_lane_f16(__rev2_919, __p3_919)}); \ - __ret_919 = __builtin_shufflevector(__ret_919, __ret_919, 3, 2, 1, 0); \ - __ret_919; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmlsl_lane_high_f16(__p0_920, __p1_920, __p2_920, __p3_920) __extension__ ({ \ - float32x2_t __ret_920; \ - float32x2_t __s0_920 = __p0_920; \ - float16x4_t __s1_920 = __p1_920; \ - float16x4_t __s2_920 = __p2_920; \ - __ret_920 = vfmlsl_high_f16(__s0_920, __s1_920, (float16x4_t) {vget_lane_f16(__s2_920, __p3_920), vget_lane_f16(__s2_920, __p3_920), vget_lane_f16(__s2_920, __p3_920), vget_lane_f16(__s2_920, __p3_920)}); \ - __ret_920; \ -}) -#else -#define vfmlsl_lane_high_f16(__p0_921, __p1_921, __p2_921, __p3_921) __extension__ ({ \ - float32x2_t __ret_921; \ - float32x2_t __s0_921 = __p0_921; \ - float16x4_t __s1_921 = __p1_921; \ - float16x4_t __s2_921 = __p2_921; \ - float32x2_t __rev0_921; __rev0_921 = __builtin_shufflevector(__s0_921, __s0_921, 1, 0); \ - float16x4_t __rev1_921; __rev1_921 = __builtin_shufflevector(__s1_921, __s1_921, 3, 2, 1, 0); \ - float16x4_t __rev2_921; __rev2_921 = __builtin_shufflevector(__s2_921, __s2_921, 3, 2, 1, 0); \ - __ret_921 = __noswap_vfmlsl_high_f16(__rev0_921, __rev1_921, (float16x4_t) {__noswap_vget_lane_f16(__rev2_921, __p3_921), __noswap_vget_lane_f16(__rev2_921, __p3_921), __noswap_vget_lane_f16(__rev2_921, __p3_921), __noswap_vget_lane_f16(__rev2_921, __p3_921)}); \ - __ret_921 = __builtin_shufflevector(__ret_921, __ret_921, 1, 0); \ - __ret_921; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmlslq_lane_low_f16(__p0_922, __p1_922, __p2_922, __p3_922) __extension__ ({ \ - float32x4_t __ret_922; \ - float32x4_t __s0_922 = __p0_922; \ - float16x8_t __s1_922 = __p1_922; \ - float16x4_t __s2_922 = __p2_922; \ - __ret_922 = vfmlslq_low_f16(__s0_922, __s1_922, (float16x8_t) {vget_lane_f16(__s2_922, __p3_922), vget_lane_f16(__s2_922, __p3_922), vget_lane_f16(__s2_922, __p3_922), vget_lane_f16(__s2_922, __p3_922), vget_lane_f16(__s2_922, __p3_922), vget_lane_f16(__s2_922, __p3_922), vget_lane_f16(__s2_922, __p3_922), vget_lane_f16(__s2_922, __p3_922)}); \ - __ret_922; \ -}) -#else -#define vfmlslq_lane_low_f16(__p0_923, __p1_923, __p2_923, __p3_923) __extension__ ({ \ - float32x4_t __ret_923; \ - float32x4_t __s0_923 = __p0_923; \ - float16x8_t __s1_923 = __p1_923; \ - float16x4_t __s2_923 = __p2_923; \ - float32x4_t __rev0_923; __rev0_923 = __builtin_shufflevector(__s0_923, __s0_923, 3, 2, 1, 0); \ - float16x8_t __rev1_923; __rev1_923 = __builtin_shufflevector(__s1_923, __s1_923, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x4_t __rev2_923; __rev2_923 = __builtin_shufflevector(__s2_923, __s2_923, 3, 2, 1, 0); \ - __ret_923 = __noswap_vfmlslq_low_f16(__rev0_923, __rev1_923, (float16x8_t) {__noswap_vget_lane_f16(__rev2_923, __p3_923), __noswap_vget_lane_f16(__rev2_923, __p3_923), __noswap_vget_lane_f16(__rev2_923, __p3_923), __noswap_vget_lane_f16(__rev2_923, __p3_923), __noswap_vget_lane_f16(__rev2_923, __p3_923), __noswap_vget_lane_f16(__rev2_923, __p3_923), __noswap_vget_lane_f16(__rev2_923, __p3_923), __noswap_vget_lane_f16(__rev2_923, __p3_923)}); \ - __ret_923 = __builtin_shufflevector(__ret_923, __ret_923, 3, 2, 1, 0); \ - __ret_923; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmlsl_lane_low_f16(__p0_924, __p1_924, __p2_924, __p3_924) __extension__ ({ \ - float32x2_t __ret_924; \ - float32x2_t __s0_924 = __p0_924; \ - float16x4_t __s1_924 = __p1_924; \ - float16x4_t __s2_924 = __p2_924; \ - __ret_924 = vfmlsl_low_f16(__s0_924, __s1_924, (float16x4_t) {vget_lane_f16(__s2_924, __p3_924), vget_lane_f16(__s2_924, __p3_924), vget_lane_f16(__s2_924, __p3_924), vget_lane_f16(__s2_924, __p3_924)}); \ - __ret_924; \ -}) -#else -#define vfmlsl_lane_low_f16(__p0_925, __p1_925, __p2_925, __p3_925) __extension__ ({ \ - float32x2_t __ret_925; \ - float32x2_t __s0_925 = __p0_925; \ - float16x4_t __s1_925 = __p1_925; \ - float16x4_t __s2_925 = __p2_925; \ - float32x2_t __rev0_925; __rev0_925 = __builtin_shufflevector(__s0_925, __s0_925, 1, 0); \ - float16x4_t __rev1_925; __rev1_925 = __builtin_shufflevector(__s1_925, __s1_925, 3, 2, 1, 0); \ - float16x4_t __rev2_925; __rev2_925 = __builtin_shufflevector(__s2_925, __s2_925, 3, 2, 1, 0); \ - __ret_925 = __noswap_vfmlsl_low_f16(__rev0_925, __rev1_925, (float16x4_t) {__noswap_vget_lane_f16(__rev2_925, __p3_925), __noswap_vget_lane_f16(__rev2_925, __p3_925), __noswap_vget_lane_f16(__rev2_925, __p3_925), __noswap_vget_lane_f16(__rev2_925, __p3_925)}); \ - __ret_925 = __builtin_shufflevector(__ret_925, __ret_925, 1, 0); \ - __ret_925; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmlslq_laneq_high_f16(__p0_926, __p1_926, __p2_926, __p3_926) __extension__ ({ \ - float32x4_t __ret_926; \ - float32x4_t __s0_926 = __p0_926; \ - float16x8_t __s1_926 = __p1_926; \ - float16x8_t __s2_926 = __p2_926; \ - __ret_926 = vfmlslq_high_f16(__s0_926, __s1_926, (float16x8_t) {vgetq_lane_f16(__s2_926, __p3_926), vgetq_lane_f16(__s2_926, __p3_926), vgetq_lane_f16(__s2_926, __p3_926), vgetq_lane_f16(__s2_926, __p3_926), vgetq_lane_f16(__s2_926, __p3_926), vgetq_lane_f16(__s2_926, __p3_926), vgetq_lane_f16(__s2_926, __p3_926), vgetq_lane_f16(__s2_926, __p3_926)}); \ - __ret_926; \ -}) -#else -#define vfmlslq_laneq_high_f16(__p0_927, __p1_927, __p2_927, __p3_927) __extension__ ({ \ - float32x4_t __ret_927; \ - float32x4_t __s0_927 = __p0_927; \ - float16x8_t __s1_927 = __p1_927; \ - float16x8_t __s2_927 = __p2_927; \ - float32x4_t __rev0_927; __rev0_927 = __builtin_shufflevector(__s0_927, __s0_927, 3, 2, 1, 0); \ - float16x8_t __rev1_927; __rev1_927 = __builtin_shufflevector(__s1_927, __s1_927, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev2_927; __rev2_927 = __builtin_shufflevector(__s2_927, __s2_927, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_927 = __noswap_vfmlslq_high_f16(__rev0_927, __rev1_927, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_927, __p3_927), __noswap_vgetq_lane_f16(__rev2_927, __p3_927), __noswap_vgetq_lane_f16(__rev2_927, __p3_927), __noswap_vgetq_lane_f16(__rev2_927, __p3_927), __noswap_vgetq_lane_f16(__rev2_927, __p3_927), __noswap_vgetq_lane_f16(__rev2_927, __p3_927), __noswap_vgetq_lane_f16(__rev2_927, __p3_927), __noswap_vgetq_lane_f16(__rev2_927, __p3_927)}); \ - __ret_927 = __builtin_shufflevector(__ret_927, __ret_927, 3, 2, 1, 0); \ - __ret_927; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmlsl_laneq_high_f16(__p0_928, __p1_928, __p2_928, __p3_928) __extension__ ({ \ - float32x2_t __ret_928; \ - float32x2_t __s0_928 = __p0_928; \ - float16x4_t __s1_928 = __p1_928; \ - float16x8_t __s2_928 = __p2_928; \ - __ret_928 = vfmlsl_high_f16(__s0_928, __s1_928, (float16x4_t) {vgetq_lane_f16(__s2_928, __p3_928), vgetq_lane_f16(__s2_928, __p3_928), vgetq_lane_f16(__s2_928, __p3_928), vgetq_lane_f16(__s2_928, __p3_928)}); \ - __ret_928; \ -}) -#else -#define vfmlsl_laneq_high_f16(__p0_929, __p1_929, __p2_929, __p3_929) __extension__ ({ \ - float32x2_t __ret_929; \ - float32x2_t __s0_929 = __p0_929; \ - float16x4_t __s1_929 = __p1_929; \ - float16x8_t __s2_929 = __p2_929; \ - float32x2_t __rev0_929; __rev0_929 = __builtin_shufflevector(__s0_929, __s0_929, 1, 0); \ - float16x4_t __rev1_929; __rev1_929 = __builtin_shufflevector(__s1_929, __s1_929, 3, 2, 1, 0); \ - float16x8_t __rev2_929; __rev2_929 = __builtin_shufflevector(__s2_929, __s2_929, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_929 = __noswap_vfmlsl_high_f16(__rev0_929, __rev1_929, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_929, __p3_929), __noswap_vgetq_lane_f16(__rev2_929, __p3_929), __noswap_vgetq_lane_f16(__rev2_929, __p3_929), __noswap_vgetq_lane_f16(__rev2_929, __p3_929)}); \ - __ret_929 = __builtin_shufflevector(__ret_929, __ret_929, 1, 0); \ - __ret_929; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmlslq_laneq_low_f16(__p0_930, __p1_930, __p2_930, __p3_930) __extension__ ({ \ - float32x4_t __ret_930; \ - float32x4_t __s0_930 = __p0_930; \ - float16x8_t __s1_930 = __p1_930; \ - float16x8_t __s2_930 = __p2_930; \ - __ret_930 = vfmlslq_low_f16(__s0_930, __s1_930, (float16x8_t) {vgetq_lane_f16(__s2_930, __p3_930), vgetq_lane_f16(__s2_930, __p3_930), vgetq_lane_f16(__s2_930, __p3_930), vgetq_lane_f16(__s2_930, __p3_930), vgetq_lane_f16(__s2_930, __p3_930), vgetq_lane_f16(__s2_930, __p3_930), vgetq_lane_f16(__s2_930, __p3_930), vgetq_lane_f16(__s2_930, __p3_930)}); \ - __ret_930; \ -}) -#else -#define vfmlslq_laneq_low_f16(__p0_931, __p1_931, __p2_931, __p3_931) __extension__ ({ \ - float32x4_t __ret_931; \ - float32x4_t __s0_931 = __p0_931; \ - float16x8_t __s1_931 = __p1_931; \ - float16x8_t __s2_931 = __p2_931; \ - float32x4_t __rev0_931; __rev0_931 = __builtin_shufflevector(__s0_931, __s0_931, 3, 2, 1, 0); \ - float16x8_t __rev1_931; __rev1_931 = __builtin_shufflevector(__s1_931, __s1_931, 7, 6, 5, 4, 3, 2, 1, 0); \ - float16x8_t __rev2_931; __rev2_931 = __builtin_shufflevector(__s2_931, __s2_931, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_931 = __noswap_vfmlslq_low_f16(__rev0_931, __rev1_931, (float16x8_t) {__noswap_vgetq_lane_f16(__rev2_931, __p3_931), __noswap_vgetq_lane_f16(__rev2_931, __p3_931), __noswap_vgetq_lane_f16(__rev2_931, __p3_931), __noswap_vgetq_lane_f16(__rev2_931, __p3_931), __noswap_vgetq_lane_f16(__rev2_931, __p3_931), __noswap_vgetq_lane_f16(__rev2_931, __p3_931), __noswap_vgetq_lane_f16(__rev2_931, __p3_931), __noswap_vgetq_lane_f16(__rev2_931, __p3_931)}); \ - __ret_931 = __builtin_shufflevector(__ret_931, __ret_931, 3, 2, 1, 0); \ - __ret_931; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vfmlsl_laneq_low_f16(__p0_932, __p1_932, __p2_932, __p3_932) __extension__ ({ \ - float32x2_t __ret_932; \ - float32x2_t __s0_932 = __p0_932; \ - float16x4_t __s1_932 = __p1_932; \ - float16x8_t __s2_932 = __p2_932; \ - __ret_932 = vfmlsl_low_f16(__s0_932, __s1_932, (float16x4_t) {vgetq_lane_f16(__s2_932, __p3_932), vgetq_lane_f16(__s2_932, __p3_932), vgetq_lane_f16(__s2_932, __p3_932), vgetq_lane_f16(__s2_932, __p3_932)}); \ - __ret_932; \ -}) -#else -#define vfmlsl_laneq_low_f16(__p0_933, __p1_933, __p2_933, __p3_933) __extension__ ({ \ - float32x2_t __ret_933; \ - float32x2_t __s0_933 = __p0_933; \ - float16x4_t __s1_933 = __p1_933; \ - float16x8_t __s2_933 = __p2_933; \ - float32x2_t __rev0_933; __rev0_933 = __builtin_shufflevector(__s0_933, __s0_933, 1, 0); \ - float16x4_t __rev1_933; __rev1_933 = __builtin_shufflevector(__s1_933, __s1_933, 3, 2, 1, 0); \ - float16x8_t __rev2_933; __rev2_933 = __builtin_shufflevector(__s2_933, __s2_933, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_933 = __noswap_vfmlsl_low_f16(__rev0_933, __rev1_933, (float16x4_t) {__noswap_vgetq_lane_f16(__rev2_933, __p3_933), __noswap_vgetq_lane_f16(__rev2_933, __p3_933), __noswap_vgetq_lane_f16(__rev2_933, __p3_933), __noswap_vgetq_lane_f16(__rev2_933, __p3_933)}); \ - __ret_933 = __builtin_shufflevector(__ret_933, __ret_933, 1, 0); \ - __ret_933; \ -}) -#endif - -#ifdef __LITTLE_ENDIAN__ -#define vmulh_lane_f16(__p0_934, __p1_934, __p2_934) __extension__ ({ \ - float16_t __ret_934; \ - float16_t __s0_934 = __p0_934; \ - float16x4_t __s1_934 = __p1_934; \ - __ret_934 = __s0_934 * vget_lane_f16(__s1_934, __p2_934); \ - __ret_934; \ -}) -#else -#define vmulh_lane_f16(__p0_935, __p1_935, __p2_935) __extension__ ({ \ - float16_t __ret_935; \ - float16_t __s0_935 = __p0_935; \ - float16x4_t __s1_935 = __p1_935; \ - float16x4_t __rev1_935; __rev1_935 = __builtin_shufflevector(__s1_935, __s1_935, 3, 2, 1, 0); \ - __ret_935 = __s0_935 * __noswap_vget_lane_f16(__rev1_935, __p2_935); \ +#define vmulx_lane_f64(__p0_935, __p1_935, __p2_935) __extension__ ({ \ + float64x1_t __ret_935; \ + float64x1_t __s0_935 = __p0_935; \ + float64x1_t __s1_935 = __p1_935; \ + float64_t __x_935 = vget_lane_f64(__s0_935, 0); \ + float64_t __y_935 = vget_lane_f64(__s1_935, __p2_935); \ + float64_t __z_935 = vmulxd_f64(__x_935, __y_935); \ + __ret_935 = vset_lane_f64(__z_935, __s0_935, __p2_935); \ __ret_935; \ }) -#endif - #ifdef __LITTLE_ENDIAN__ -#define vmulh_laneq_f16(__p0_936, __p1_936, __p2_936) __extension__ ({ \ - float16_t __ret_936; \ - float16_t __s0_936 = __p0_936; \ - float16x8_t __s1_936 = __p1_936; \ - __ret_936 = __s0_936 * vgetq_lane_f16(__s1_936, __p2_936); \ +#define vmulx_laneq_f64(__p0_936, __p1_936, __p2_936) __extension__ ({ \ + float64x1_t __ret_936; \ + float64x1_t __s0_936 = __p0_936; \ + float64x2_t __s1_936 = __p1_936; \ + float64_t __x_936 = vget_lane_f64(__s0_936, 0); \ + float64_t __y_936 = vgetq_lane_f64(__s1_936, __p2_936); \ + float64_t __z_936 = vmulxd_f64(__x_936, __y_936); \ + __ret_936 = vset_lane_f64(__z_936, __s0_936, 0); \ __ret_936; \ }) #else -#define vmulh_laneq_f16(__p0_937, __p1_937, __p2_937) __extension__ ({ \ - float16_t __ret_937; \ - float16_t __s0_937 = __p0_937; \ - float16x8_t __s1_937 = __p1_937; \ - float16x8_t __rev1_937; __rev1_937 = __builtin_shufflevector(__s1_937, __s1_937, 7, 6, 5, 4, 3, 2, 1, 0); \ - __ret_937 = __s0_937 * __noswap_vgetq_lane_f16(__rev1_937, __p2_937); \ +#define vmulx_laneq_f64(__p0_937, __p1_937, __p2_937) __extension__ ({ \ + float64x1_t __ret_937; \ + float64x1_t __s0_937 = __p0_937; \ + float64x2_t __s1_937 = __p1_937; \ + float64x2_t __rev1_937; __rev1_937 = __builtin_shufflevector(__s1_937, __s1_937, 1, 0); \ + float64_t __x_937 = vget_lane_f64(__s0_937, 0); \ + float64_t __y_937 = __noswap_vgetq_lane_f64(__rev1_937, __p2_937); \ + float64_t __z_937 = vmulxd_f64(__x_937, __y_937); \ + __ret_937 = vset_lane_f64(__z_937, __s0_937, 0); \ __ret_937; \ }) #endif #endif #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint16x8_t __ret; __ret = __p0 + vabdl_u8(__p1, __p2); return __ret; } #else -__ai uint16x8_t vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -69403,7 +69399,7 @@ __ai uint16x8_t vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai uint16x8_t __noswap_vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t __noswap_vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p2) { uint16x8_t __ret; __ret = __p0 + __noswap_vabdl_u8(__p1, __p2); return __ret; @@ -69411,13 +69407,13 @@ __ai uint16x8_t __noswap_vabal_u8(uint16x8_t __p0, uint8x8_t __p1, uint8x8_t __p #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint64x2_t __ret; __ret = __p0 + vabdl_u32(__p1, __p2); return __ret; } #else -__ai uint64x2_t vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -69426,7 +69422,7 @@ __ai uint64x2_t vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai uint64x2_t __noswap_vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t __noswap_vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t __p2) { uint64x2_t __ret; __ret = __p0 + __noswap_vabdl_u32(__p1, __p2); return __ret; @@ -69434,13 +69430,13 @@ __ai uint64x2_t __noswap_vabal_u32(uint64x2_t __p0, uint32x2_t __p1, uint32x2_t #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint32x4_t __ret; __ret = __p0 + vabdl_u16(__p1, __p2); return __ret; } #else -__ai uint32x4_t vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -69449,7 +69445,7 @@ __ai uint32x4_t vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai uint32x4_t __noswap_vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t __noswap_vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t __p2) { uint32x4_t __ret; __ret = __p0 + __noswap_vabdl_u16(__p1, __p2); return __ret; @@ -69457,13 +69453,13 @@ __ai uint32x4_t __noswap_vabal_u16(uint32x4_t __p0, uint16x4_t __p1, uint16x4_t #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int16x8_t __ret; __ret = __p0 + vabdl_s8(__p1, __p2); return __ret; } #else -__ai int16x8_t vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -69472,7 +69468,7 @@ __ai int16x8_t vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 7, 6, 5, 4, 3, 2, 1, 0); return __ret; } -__ai int16x8_t __noswap_vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t __noswap_vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { int16x8_t __ret; __ret = __p0 + __noswap_vabdl_s8(__p1, __p2); return __ret; @@ -69480,13 +69476,13 @@ __ai int16x8_t __noswap_vabal_s8(int16x8_t __p0, int8x8_t __p1, int8x8_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int64x2_t __ret; __ret = __p0 + vabdl_s32(__p1, __p2); return __ret; } #else -__ai int64x2_t vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x2_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 1, 0); @@ -69495,7 +69491,7 @@ __ai int64x2_t vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 1, 0); return __ret; } -__ai int64x2_t __noswap_vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t __noswap_vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2) { int64x2_t __ret; __ret = __p0 + __noswap_vabdl_s32(__p1, __p2); return __ret; @@ -69503,13 +69499,13 @@ __ai int64x2_t __noswap_vabal_s32(int64x2_t __p0, int32x2_t __p1, int32x2_t __p2 #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int32x4_t __ret; __ret = __p0 + vabdl_s16(__p1, __p2); return __ret; } #else -__ai int32x4_t vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -69518,22 +69514,22 @@ __ai int32x4_t vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { __ret = __builtin_shufflevector(__ret, __ret, 3, 2, 1, 0); return __ret; } -__ai int32x4_t __noswap_vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t __noswap_vabal_s16(int32x4_t __p0, int16x4_t __p1, int16x4_t __p2) { int32x4_t __ret; __ret = __p0 + __noswap_vabdl_s16(__p1, __p2); return __ret; } #endif -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(__arm64ec__) #ifdef __LITTLE_ENDIAN__ -__ai uint16x8_t vabal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t vabal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint16x8_t __ret; __ret = vabal_u8(__p0, vget_high_u8(__p1), vget_high_u8(__p2)); return __ret; } #else -__ai uint16x8_t vabal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { +__ai __attribute__((target("neon"))) uint16x8_t vabal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) { uint16x8_t __ret; uint16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); uint8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -69545,13 +69541,13 @@ __ai uint16x8_t vabal_high_u8(uint16x8_t __p0, uint8x16_t __p1, uint8x16_t __p2) #endif #ifdef __LITTLE_ENDIAN__ -__ai uint64x2_t vabal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t vabal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint64x2_t __ret; __ret = vabal_u32(__p0, vget_high_u32(__p1), vget_high_u32(__p2)); return __ret; } #else -__ai uint64x2_t vabal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { +__ai __attribute__((target("neon"))) uint64x2_t vabal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2) { uint64x2_t __ret; uint64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); uint32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -69563,13 +69559,13 @@ __ai uint64x2_t vabal_high_u32(uint64x2_t __p0, uint32x4_t __p1, uint32x4_t __p2 #endif #ifdef __LITTLE_ENDIAN__ -__ai uint32x4_t vabal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vabal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint32x4_t __ret; __ret = vabal_u16(__p0, vget_high_u16(__p1), vget_high_u16(__p2)); return __ret; } #else -__ai uint32x4_t vabal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { +__ai __attribute__((target("neon"))) uint32x4_t vabal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2) { uint32x4_t __ret; uint32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); uint16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -69581,13 +69577,13 @@ __ai uint32x4_t vabal_high_u16(uint32x4_t __p0, uint16x8_t __p1, uint16x8_t __p2 #endif #ifdef __LITTLE_ENDIAN__ -__ai int16x8_t vabal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t vabal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { int16x8_t __ret; __ret = vabal_s8(__p0, vget_high_s8(__p1), vget_high_s8(__p2)); return __ret; } #else -__ai int16x8_t vabal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { +__ai __attribute__((target("neon"))) int16x8_t vabal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { int16x8_t __ret; int16x8_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 7, 6, 5, 4, 3, 2, 1, 0); int8x16_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0); @@ -69599,13 +69595,13 @@ __ai int16x8_t vabal_high_s8(int16x8_t __p0, int8x16_t __p1, int8x16_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int64x2_t vabal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vabal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { int64x2_t __ret; __ret = vabal_s32(__p0, vget_high_s32(__p1), vget_high_s32(__p2)); return __ret; } #else -__ai int64x2_t vabal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { +__ai __attribute__((target("neon"))) int64x2_t vabal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { int64x2_t __ret; int64x2_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 1, 0); int32x4_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 3, 2, 1, 0); @@ -69617,13 +69613,13 @@ __ai int64x2_t vabal_high_s32(int64x2_t __p0, int32x4_t __p1, int32x4_t __p2) { #endif #ifdef __LITTLE_ENDIAN__ -__ai int32x4_t vabal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vabal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { int32x4_t __ret; __ret = vabal_s16(__p0, vget_high_s16(__p1), vget_high_s16(__p2)); return __ret; } #else -__ai int32x4_t vabal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { +__ai __attribute__((target("neon"))) int32x4_t vabal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { int32x4_t __ret; int32x4_t __rev0; __rev0 = __builtin_shufflevector(__p0, __p0, 3, 2, 1, 0); int16x8_t __rev1; __rev1 = __builtin_shufflevector(__p1, __p1, 7, 6, 5, 4, 3, 2, 1, 0); @@ -69640,4 +69636,3 @@ __ai int32x4_t vabal_high_s16(int32x4_t __p0, int16x8_t __p1, int16x8_t __p2) { #endif /* if !defined(__ARM_NEON) */ #endif /* ifndef __ARM_FP */ -#endif /* __ARM_NEON_H */ diff --git a/lib/include/arm_sme.h b/lib/include/arm_sme.h index 2ed316f26070..cbfea38fe457 100644 --- a/lib/include/arm_sme.h +++ b/lib/include/arm_sme.h @@ -16,6 +16,8 @@ #endif #include +#include + /* Function attributes */ #define __ai static __inline__ __attribute__((__always_inline__, __nodebug__)) @@ -39,6 +41,11 @@ __ai bool __arm_in_streaming_mode(void) __arm_streaming_compatible { return x0 & 1; } +void *__arm_sc_memcpy(void *dest, const void *src, size_t n) __arm_streaming_compatible; +void *__arm_sc_memmove(void *dest, const void *src, size_t n) __arm_streaming_compatible; +void *__arm_sc_memset(void *s, int c, size_t n) __arm_streaming_compatible; +void *__arm_sc_memchr(void *s, int c, size_t n) __arm_streaming_compatible; + __ai __attribute__((target("sme"))) void svundef_za(void) __arm_streaming_compatible __arm_out("za") { } __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddha_za32_u32_m))) @@ -368,7 +375,7 @@ void svwrite_ver_za8_s8_m(uint64_t, uint32_t, svbool_t, svint8_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svzero_mask_za))) void svzero_mask_za(uint64_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svzero_za))) -void svzero_za(); +void svzero_za(void); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddha_za32_u32_m))) void svaddha_za32_m(uint64_t, svbool_t, svbool_t, svuint32_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svaddha_za32_s32_m))) @@ -597,6 +604,78 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_u8_ void svwrite_ver_za8_m(uint64_t, uint32_t, svbool_t, svuint8_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_ver_za8_s8_m))) void svwrite_ver_za8_m(uint64_t, uint32_t, svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za16_f16_vg1x2))) +void svmla_single_za16_f16_vg1x2(uint32_t, svfloat16x2_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za16_f16_vg1x4))) +void svmla_single_za16_f16_vg1x4(uint32_t, svfloat16x4_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za16_f16_vg1x2))) +void svmla_lane_za16_f16_vg1x2(uint32_t, svfloat16x2_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za16_f16_vg1x4))) +void svmla_lane_za16_f16_vg1x4(uint32_t, svfloat16x4_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za16_f16_vg1x2))) +void svmla_za16_f16_vg1x2(uint32_t, svfloat16x2_t, svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za16_f16_vg1x4))) +void svmla_za16_f16_vg1x4(uint32_t, svfloat16x4_t, svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za16_f16_vg1x2))) +void svmls_single_za16_f16_vg1x2(uint32_t, svfloat16x2_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za16_f16_vg1x4))) +void svmls_single_za16_f16_vg1x4(uint32_t, svfloat16x4_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za16_f16_vg1x2))) +void svmls_lane_za16_f16_vg1x2(uint32_t, svfloat16x2_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za16_f16_vg1x4))) +void svmls_lane_za16_f16_vg1x4(uint32_t, svfloat16x4_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za16_f16_vg1x2))) +void svmls_za16_f16_vg1x2(uint32_t, svfloat16x2_t, svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za16_f16_vg1x4))) +void svmls_za16_f16_vg1x4(uint32_t, svfloat16x4_t, svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za16_f16_m))) +void svmopa_za16_f16_m(uint64_t, svbool_t, svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za16_f16_m))) +void svmops_za16_f16_m(uint64_t, svbool_t, svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za16_f16_vg1x2))) +void svmla_za16_vg1x2(uint32_t, svfloat16x2_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za16_f16_vg1x4))) +void svmla_za16_vg1x4(uint32_t, svfloat16x4_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za16_f16_vg1x2))) +void svmla_lane_za16_vg1x2(uint32_t, svfloat16x2_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za16_f16_vg1x4))) +void svmla_lane_za16_vg1x4(uint32_t, svfloat16x4_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za16_f16_vg1x2))) +void svmla_za16_vg1x2(uint32_t, svfloat16x2_t, svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za16_f16_vg1x4))) +void svmla_za16_vg1x4(uint32_t, svfloat16x4_t, svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za16_f16_vg1x2))) +void svmls_za16_vg1x2(uint32_t, svfloat16x2_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za16_f16_vg1x4))) +void svmls_za16_vg1x4(uint32_t, svfloat16x4_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za16_f16_vg1x2))) +void svmls_lane_za16_vg1x2(uint32_t, svfloat16x2_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za16_f16_vg1x4))) +void svmls_lane_za16_vg1x4(uint32_t, svfloat16x4_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za16_f16_vg1x2))) +void svmls_za16_vg1x2(uint32_t, svfloat16x2_t, svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za16_f16_vg1x4))) +void svmls_za16_vg1x4(uint32_t, svfloat16x4_t, svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za16_f16_m))) +void svmopa_za16_m(uint64_t, svbool_t, svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za16_f16_m))) +void svmops_za16_m(uint64_t, svbool_t, svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za16_f16_vg1x2))) +void svadd_za16_f16_vg1x2(uint32_t, svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za16_f16_vg1x4))) +void svadd_za16_f16_vg1x4(uint32_t, svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_za16_f16_vg1x2))) +void svsub_za16_f16_vg1x2(uint32_t, svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_za16_f16_vg1x4))) +void svsub_za16_f16_vg1x4(uint32_t, svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za16_f16_vg1x2))) +void svadd_za16_vg1x2(uint32_t, svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za16_f16_vg1x4))) +void svadd_za16_vg1x4(uint32_t, svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_za16_f16_vg1x2))) +void svsub_za16_vg1x2(uint32_t, svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_za16_f16_vg1x4))) +void svsub_za16_vg1x4(uint32_t, svfloat16x4_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za64_f64_m))) void svmopa_za64_f64_m(uint64_t, svbool_t, svbool_t, svfloat64_t, svfloat64_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za64_f64_m))) @@ -2059,6 +2138,78 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za8_u8_vg1x void svwrite_za8_vg1x4(uint32_t, svuint8x4_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svwrite_za8_s8_vg1x4))) void svwrite_za8_vg1x4(uint32_t, svint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za16_bf16_vg1x2))) +void svadd_za16_bf16_vg1x2(uint32_t, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za16_bf16_vg1x4))) +void svadd_za16_bf16_vg1x4(uint32_t, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za16_bf16_vg1x2))) +void svmla_single_za16_bf16_vg1x2(uint32_t, svbfloat16x2_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za16_bf16_vg1x4))) +void svmla_single_za16_bf16_vg1x4(uint32_t, svbfloat16x4_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za16_bf16_vg1x2))) +void svmla_lane_za16_bf16_vg1x2(uint32_t, svbfloat16x2_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za16_bf16_vg1x4))) +void svmla_lane_za16_bf16_vg1x4(uint32_t, svbfloat16x4_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za16_bf16_vg1x2))) +void svmla_za16_bf16_vg1x2(uint32_t, svbfloat16x2_t, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za16_bf16_vg1x4))) +void svmla_za16_bf16_vg1x4(uint32_t, svbfloat16x4_t, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za16_bf16_vg1x2))) +void svmls_single_za16_bf16_vg1x2(uint32_t, svbfloat16x2_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za16_bf16_vg1x4))) +void svmls_single_za16_bf16_vg1x4(uint32_t, svbfloat16x4_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za16_bf16_vg1x2))) +void svmls_lane_za16_bf16_vg1x2(uint32_t, svbfloat16x2_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za16_bf16_vg1x4))) +void svmls_lane_za16_bf16_vg1x4(uint32_t, svbfloat16x4_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za16_bf16_vg1x2))) +void svmls_za16_bf16_vg1x2(uint32_t, svbfloat16x2_t, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za16_bf16_vg1x4))) +void svmls_za16_bf16_vg1x4(uint32_t, svbfloat16x4_t, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za16_bf16_m))) +void svmopa_za16_bf16_m(uint64_t, svbool_t, svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za16_bf16_m))) +void svmops_za16_bf16_m(uint64_t, svbool_t, svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_za16_bf16_vg1x2))) +void svsub_za16_bf16_vg1x2(uint32_t, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_za16_bf16_vg1x4))) +void svsub_za16_bf16_vg1x4(uint32_t, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za16_bf16_vg1x2))) +void svadd_za16_vg1x2(uint32_t, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za16_bf16_vg1x4))) +void svadd_za16_vg1x4(uint32_t, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za16_bf16_vg1x2))) +void svmla_za16_vg1x2(uint32_t, svbfloat16x2_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_single_za16_bf16_vg1x4))) +void svmla_za16_vg1x4(uint32_t, svbfloat16x4_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za16_bf16_vg1x2))) +void svmla_lane_za16_vg1x2(uint32_t, svbfloat16x2_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_lane_za16_bf16_vg1x4))) +void svmla_lane_za16_vg1x4(uint32_t, svbfloat16x4_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za16_bf16_vg1x2))) +void svmla_za16_vg1x2(uint32_t, svbfloat16x2_t, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmla_za16_bf16_vg1x4))) +void svmla_za16_vg1x4(uint32_t, svbfloat16x4_t, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za16_bf16_vg1x2))) +void svmls_za16_vg1x2(uint32_t, svbfloat16x2_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_single_za16_bf16_vg1x4))) +void svmls_za16_vg1x4(uint32_t, svbfloat16x4_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za16_bf16_vg1x2))) +void svmls_lane_za16_vg1x2(uint32_t, svbfloat16x2_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_lane_za16_bf16_vg1x4))) +void svmls_lane_za16_vg1x4(uint32_t, svbfloat16x4_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za16_bf16_vg1x2))) +void svmls_za16_vg1x2(uint32_t, svbfloat16x2_t, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmls_za16_bf16_vg1x4))) +void svmls_za16_vg1x4(uint32_t, svbfloat16x4_t, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmopa_za16_bf16_m))) +void svmopa_za16_m(uint64_t, svbool_t, svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svmops_za16_bf16_m))) +void svmops_za16_m(uint64_t, svbool_t, svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_za16_bf16_vg1x2))) +void svsub_za16_vg1x2(uint32_t, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svsub_za16_bf16_vg1x4))) +void svsub_za16_vg1x4(uint32_t, svbfloat16x4_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za64_f64_vg1x2))) void svadd_za64_f64_vg1x2(uint32_t, svfloat64x2_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svadd_za64_f64_vg1x4))) @@ -2403,6 +2554,262 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svvdot_lane_za64_s1 void svvdot_lane_za64_vg1x4(uint32_t, svint16x4_t, svint16_t, uint64_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sme_svvdot_lane_za64_u16_vg1x4))) void svvdot_lane_za64_vg1x4(uint32_t, svuint16x4_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za128_u8))) +svuint8_t svreadz_hor_za128_u8(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za128_u32))) +svuint32_t svreadz_hor_za128_u32(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za128_u64))) +svuint64_t svreadz_hor_za128_u64(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za128_u16))) +svuint16_t svreadz_hor_za128_u16(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za128_bf16))) +svbfloat16_t svreadz_hor_za128_bf16(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za128_s8))) +svint8_t svreadz_hor_za128_s8(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za128_f64))) +svfloat64_t svreadz_hor_za128_f64(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za128_f32))) +svfloat32_t svreadz_hor_za128_f32(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za128_f16))) +svfloat16_t svreadz_hor_za128_f16(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za128_s32))) +svint32_t svreadz_hor_za128_s32(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za128_s64))) +svint64_t svreadz_hor_za128_s64(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za128_s16))) +svint16_t svreadz_hor_za128_s16(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za16_u16))) +svuint16_t svreadz_hor_za16_u16(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za16_bf16))) +svbfloat16_t svreadz_hor_za16_bf16(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za16_f16))) +svfloat16_t svreadz_hor_za16_f16(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za16_s16))) +svint16_t svreadz_hor_za16_s16(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za16_u16_vg2))) +svuint16x2_t svreadz_hor_za16_u16_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za16_bf16_vg2))) +svbfloat16x2_t svreadz_hor_za16_bf16_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za16_f16_vg2))) +svfloat16x2_t svreadz_hor_za16_f16_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za16_s16_vg2))) +svint16x2_t svreadz_hor_za16_s16_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za16_u16_vg4))) +svuint16x4_t svreadz_hor_za16_u16_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za16_bf16_vg4))) +svbfloat16x4_t svreadz_hor_za16_bf16_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za16_f16_vg4))) +svfloat16x4_t svreadz_hor_za16_f16_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za16_s16_vg4))) +svint16x4_t svreadz_hor_za16_s16_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za32_u32))) +svuint32_t svreadz_hor_za32_u32(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za32_f32))) +svfloat32_t svreadz_hor_za32_f32(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za32_s32))) +svint32_t svreadz_hor_za32_s32(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za32_u32_vg2))) +svuint32x2_t svreadz_hor_za32_u32_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za32_f32_vg2))) +svfloat32x2_t svreadz_hor_za32_f32_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za32_s32_vg2))) +svint32x2_t svreadz_hor_za32_s32_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za32_u32_vg4))) +svuint32x4_t svreadz_hor_za32_u32_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za32_f32_vg4))) +svfloat32x4_t svreadz_hor_za32_f32_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za32_s32_vg4))) +svint32x4_t svreadz_hor_za32_s32_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za64_u64))) +svuint64_t svreadz_hor_za64_u64(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za64_f64))) +svfloat64_t svreadz_hor_za64_f64(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za64_s64))) +svint64_t svreadz_hor_za64_s64(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za64_u64_vg2))) +svuint64x2_t svreadz_hor_za64_u64_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za64_f64_vg2))) +svfloat64x2_t svreadz_hor_za64_f64_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za64_s64_vg2))) +svint64x2_t svreadz_hor_za64_s64_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za64_u64_vg4))) +svuint64x4_t svreadz_hor_za64_u64_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za64_f64_vg4))) +svfloat64x4_t svreadz_hor_za64_f64_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za64_s64_vg4))) +svint64x4_t svreadz_hor_za64_s64_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za8_u8))) +svuint8_t svreadz_hor_za8_u8(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za8_s8))) +svint8_t svreadz_hor_za8_s8(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za8_u8_vg2))) +svuint8x2_t svreadz_hor_za8_u8_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za8_s8_vg2))) +svint8x2_t svreadz_hor_za8_s8_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za8_u8_vg4))) +svuint8x4_t svreadz_hor_za8_u8_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_hor_za8_s8_vg4))) +svint8x4_t svreadz_hor_za8_s8_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za128_u8))) +svuint8_t svreadz_ver_za128_u8(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za128_u32))) +svuint32_t svreadz_ver_za128_u32(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za128_u64))) +svuint64_t svreadz_ver_za128_u64(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za128_u16))) +svuint16_t svreadz_ver_za128_u16(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za128_bf16))) +svbfloat16_t svreadz_ver_za128_bf16(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za128_s8))) +svint8_t svreadz_ver_za128_s8(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za128_f64))) +svfloat64_t svreadz_ver_za128_f64(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za128_f32))) +svfloat32_t svreadz_ver_za128_f32(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za128_f16))) +svfloat16_t svreadz_ver_za128_f16(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za128_s32))) +svint32_t svreadz_ver_za128_s32(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za128_s64))) +svint64_t svreadz_ver_za128_s64(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za128_s16))) +svint16_t svreadz_ver_za128_s16(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za16_u16))) +svuint16_t svreadz_ver_za16_u16(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za16_bf16))) +svbfloat16_t svreadz_ver_za16_bf16(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za16_f16))) +svfloat16_t svreadz_ver_za16_f16(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za16_s16))) +svint16_t svreadz_ver_za16_s16(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za16_u16_vg2))) +svuint16x2_t svreadz_ver_za16_u16_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za16_bf16_vg2))) +svbfloat16x2_t svreadz_ver_za16_bf16_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za16_f16_vg2))) +svfloat16x2_t svreadz_ver_za16_f16_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za16_s16_vg2))) +svint16x2_t svreadz_ver_za16_s16_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za16_u16_vg4))) +svuint16x4_t svreadz_ver_za16_u16_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za16_bf16_vg4))) +svbfloat16x4_t svreadz_ver_za16_bf16_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za16_f16_vg4))) +svfloat16x4_t svreadz_ver_za16_f16_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za16_s16_vg4))) +svint16x4_t svreadz_ver_za16_s16_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za32_u32))) +svuint32_t svreadz_ver_za32_u32(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za32_f32))) +svfloat32_t svreadz_ver_za32_f32(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za32_s32))) +svint32_t svreadz_ver_za32_s32(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za32_u32_vg2))) +svuint32x2_t svreadz_ver_za32_u32_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za32_f32_vg2))) +svfloat32x2_t svreadz_ver_za32_f32_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za32_s32_vg2))) +svint32x2_t svreadz_ver_za32_s32_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za32_u32_vg4))) +svuint32x4_t svreadz_ver_za32_u32_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za32_f32_vg4))) +svfloat32x4_t svreadz_ver_za32_f32_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za32_s32_vg4))) +svint32x4_t svreadz_ver_za32_s32_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za64_u64))) +svuint64_t svreadz_ver_za64_u64(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za64_f64))) +svfloat64_t svreadz_ver_za64_f64(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za64_s64))) +svint64_t svreadz_ver_za64_s64(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za64_u64_vg2))) +svuint64x2_t svreadz_ver_za64_u64_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za64_f64_vg2))) +svfloat64x2_t svreadz_ver_za64_f64_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za64_s64_vg2))) +svint64x2_t svreadz_ver_za64_s64_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za64_u64_vg4))) +svuint64x4_t svreadz_ver_za64_u64_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za64_f64_vg4))) +svfloat64x4_t svreadz_ver_za64_f64_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za64_s64_vg4))) +svint64x4_t svreadz_ver_za64_s64_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za8_u8))) +svuint8_t svreadz_ver_za8_u8(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za8_s8))) +svint8_t svreadz_ver_za8_s8(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za8_u8_vg2))) +svuint8x2_t svreadz_ver_za8_u8_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za8_s8_vg2))) +svint8x2_t svreadz_ver_za8_s8_vg2(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za8_u8_vg4))) +svuint8x4_t svreadz_ver_za8_u8_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_ver_za8_s8_vg4))) +svint8x4_t svreadz_ver_za8_s8_vg4(uint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za16_u16_vg1x2))) +svuint16x2_t svreadz_za16_u16_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za16_bf16_vg1x2))) +svbfloat16x2_t svreadz_za16_bf16_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za16_f16_vg1x2))) +svfloat16x2_t svreadz_za16_f16_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za16_s16_vg1x2))) +svint16x2_t svreadz_za16_s16_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za16_u16_vg1x4))) +svuint16x4_t svreadz_za16_u16_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za16_bf16_vg1x4))) +svbfloat16x4_t svreadz_za16_bf16_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za16_f16_vg1x4))) +svfloat16x4_t svreadz_za16_f16_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za16_s16_vg1x4))) +svint16x4_t svreadz_za16_s16_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za32_u32_vg1x2))) +svuint32x2_t svreadz_za32_u32_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za32_f32_vg1x2))) +svfloat32x2_t svreadz_za32_f32_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za32_s32_vg1x2))) +svint32x2_t svreadz_za32_s32_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za32_u32_vg1x4))) +svuint32x4_t svreadz_za32_u32_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za32_f32_vg1x4))) +svfloat32x4_t svreadz_za32_f32_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za32_s32_vg1x4))) +svint32x4_t svreadz_za32_s32_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za64_u64_vg1x2))) +svuint64x2_t svreadz_za64_u64_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za64_f64_vg1x2))) +svfloat64x2_t svreadz_za64_f64_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za64_s64_vg1x2))) +svint64x2_t svreadz_za64_s64_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za64_u64_vg1x4))) +svuint64x4_t svreadz_za64_u64_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za64_f64_vg1x4))) +svfloat64x4_t svreadz_za64_f64_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za64_s64_vg1x4))) +svint64x4_t svreadz_za64_s64_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za8_u8_vg1x2))) +svuint8x2_t svreadz_za8_u8_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za8_s8_vg1x2))) +svint8x2_t svreadz_za8_s8_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za8_u8_vg1x4))) +svuint8x4_t svreadz_za8_u8_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svreadz_za8_s8_vg1x4))) +svint8x4_t svreadz_za8_s8_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svzero_za64_vg1x2))) +void svzero_za64_vg1x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svzero_za64_vg1x4))) +void svzero_za64_vg1x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svzero_za64_vg2x1))) +void svzero_za64_vg2x1(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svzero_za64_vg2x2))) +void svzero_za64_vg2x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svzero_za64_vg2x4))) +void svzero_za64_vg2x4(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svzero_za64_vg4x1))) +void svzero_za64_vg4x1(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svzero_za64_vg4x2))) +void svzero_za64_vg4x2(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sme_svzero_za64_vg4x4))) +void svzero_za64_vg4x4(uint32_t); #ifdef __cplusplus } // extern "C" #endif diff --git a/lib/include/arm_sve.h b/lib/include/arm_sve.h index 3990f803c5a4..87691e03cecb 100644 --- a/lib/include/arm_sve.h +++ b/lib/include/arm_sve.h @@ -124,27532 +124,8062 @@ enum svprfop #define __aio static __inline__ __attribute__((__always_inline__, __nodebug__, __overloadable__)) -#define svreinterpret_s8_s8(...) __builtin_sve_reinterpret_s8_s8(__VA_ARGS__) -#define svreinterpret_s8_u8(...) __builtin_sve_reinterpret_s8_u8(__VA_ARGS__) -#define svreinterpret_s8_s16(...) __builtin_sve_reinterpret_s8_s16(__VA_ARGS__) -#define svreinterpret_s8_u16(...) __builtin_sve_reinterpret_s8_u16(__VA_ARGS__) -#define svreinterpret_s8_s32(...) __builtin_sve_reinterpret_s8_s32(__VA_ARGS__) -#define svreinterpret_s8_u32(...) __builtin_sve_reinterpret_s8_u32(__VA_ARGS__) -#define svreinterpret_s8_s64(...) __builtin_sve_reinterpret_s8_s64(__VA_ARGS__) -#define svreinterpret_s8_u64(...) __builtin_sve_reinterpret_s8_u64(__VA_ARGS__) -#define svreinterpret_s8_f16(...) __builtin_sve_reinterpret_s8_f16(__VA_ARGS__) -#define svreinterpret_s8_bf16(...) __builtin_sve_reinterpret_s8_bf16(__VA_ARGS__) -#define svreinterpret_s8_f32(...) __builtin_sve_reinterpret_s8_f32(__VA_ARGS__) -#define svreinterpret_s8_f64(...) __builtin_sve_reinterpret_s8_f64(__VA_ARGS__) -#define svreinterpret_u8_s8(...) __builtin_sve_reinterpret_u8_s8(__VA_ARGS__) -#define svreinterpret_u8_u8(...) __builtin_sve_reinterpret_u8_u8(__VA_ARGS__) -#define svreinterpret_u8_s16(...) __builtin_sve_reinterpret_u8_s16(__VA_ARGS__) -#define svreinterpret_u8_u16(...) __builtin_sve_reinterpret_u8_u16(__VA_ARGS__) -#define svreinterpret_u8_s32(...) __builtin_sve_reinterpret_u8_s32(__VA_ARGS__) -#define svreinterpret_u8_u32(...) __builtin_sve_reinterpret_u8_u32(__VA_ARGS__) -#define svreinterpret_u8_s64(...) __builtin_sve_reinterpret_u8_s64(__VA_ARGS__) -#define svreinterpret_u8_u64(...) __builtin_sve_reinterpret_u8_u64(__VA_ARGS__) -#define svreinterpret_u8_f16(...) __builtin_sve_reinterpret_u8_f16(__VA_ARGS__) -#define svreinterpret_u8_bf16(...) __builtin_sve_reinterpret_u8_bf16(__VA_ARGS__) -#define svreinterpret_u8_f32(...) __builtin_sve_reinterpret_u8_f32(__VA_ARGS__) -#define svreinterpret_u8_f64(...) __builtin_sve_reinterpret_u8_f64(__VA_ARGS__) -#define svreinterpret_s16_s8(...) __builtin_sve_reinterpret_s16_s8(__VA_ARGS__) -#define svreinterpret_s16_u8(...) __builtin_sve_reinterpret_s16_u8(__VA_ARGS__) -#define svreinterpret_s16_s16(...) __builtin_sve_reinterpret_s16_s16(__VA_ARGS__) -#define svreinterpret_s16_u16(...) __builtin_sve_reinterpret_s16_u16(__VA_ARGS__) -#define svreinterpret_s16_s32(...) __builtin_sve_reinterpret_s16_s32(__VA_ARGS__) -#define svreinterpret_s16_u32(...) __builtin_sve_reinterpret_s16_u32(__VA_ARGS__) -#define svreinterpret_s16_s64(...) __builtin_sve_reinterpret_s16_s64(__VA_ARGS__) -#define svreinterpret_s16_u64(...) __builtin_sve_reinterpret_s16_u64(__VA_ARGS__) -#define svreinterpret_s16_f16(...) __builtin_sve_reinterpret_s16_f16(__VA_ARGS__) -#define svreinterpret_s16_bf16(...) __builtin_sve_reinterpret_s16_bf16(__VA_ARGS__) -#define svreinterpret_s16_f32(...) __builtin_sve_reinterpret_s16_f32(__VA_ARGS__) -#define svreinterpret_s16_f64(...) __builtin_sve_reinterpret_s16_f64(__VA_ARGS__) -#define svreinterpret_u16_s8(...) __builtin_sve_reinterpret_u16_s8(__VA_ARGS__) -#define svreinterpret_u16_u8(...) __builtin_sve_reinterpret_u16_u8(__VA_ARGS__) -#define svreinterpret_u16_s16(...) __builtin_sve_reinterpret_u16_s16(__VA_ARGS__) -#define svreinterpret_u16_u16(...) __builtin_sve_reinterpret_u16_u16(__VA_ARGS__) -#define svreinterpret_u16_s32(...) __builtin_sve_reinterpret_u16_s32(__VA_ARGS__) -#define svreinterpret_u16_u32(...) __builtin_sve_reinterpret_u16_u32(__VA_ARGS__) -#define svreinterpret_u16_s64(...) __builtin_sve_reinterpret_u16_s64(__VA_ARGS__) -#define svreinterpret_u16_u64(...) __builtin_sve_reinterpret_u16_u64(__VA_ARGS__) -#define svreinterpret_u16_f16(...) __builtin_sve_reinterpret_u16_f16(__VA_ARGS__) -#define svreinterpret_u16_bf16(...) __builtin_sve_reinterpret_u16_bf16(__VA_ARGS__) -#define svreinterpret_u16_f32(...) __builtin_sve_reinterpret_u16_f32(__VA_ARGS__) -#define svreinterpret_u16_f64(...) __builtin_sve_reinterpret_u16_f64(__VA_ARGS__) -#define svreinterpret_s32_s8(...) __builtin_sve_reinterpret_s32_s8(__VA_ARGS__) -#define svreinterpret_s32_u8(...) __builtin_sve_reinterpret_s32_u8(__VA_ARGS__) -#define svreinterpret_s32_s16(...) __builtin_sve_reinterpret_s32_s16(__VA_ARGS__) -#define svreinterpret_s32_u16(...) __builtin_sve_reinterpret_s32_u16(__VA_ARGS__) -#define svreinterpret_s32_s32(...) __builtin_sve_reinterpret_s32_s32(__VA_ARGS__) -#define svreinterpret_s32_u32(...) __builtin_sve_reinterpret_s32_u32(__VA_ARGS__) -#define svreinterpret_s32_s64(...) __builtin_sve_reinterpret_s32_s64(__VA_ARGS__) -#define svreinterpret_s32_u64(...) __builtin_sve_reinterpret_s32_u64(__VA_ARGS__) -#define svreinterpret_s32_f16(...) __builtin_sve_reinterpret_s32_f16(__VA_ARGS__) -#define svreinterpret_s32_bf16(...) __builtin_sve_reinterpret_s32_bf16(__VA_ARGS__) -#define svreinterpret_s32_f32(...) __builtin_sve_reinterpret_s32_f32(__VA_ARGS__) -#define svreinterpret_s32_f64(...) __builtin_sve_reinterpret_s32_f64(__VA_ARGS__) -#define svreinterpret_u32_s8(...) __builtin_sve_reinterpret_u32_s8(__VA_ARGS__) -#define svreinterpret_u32_u8(...) __builtin_sve_reinterpret_u32_u8(__VA_ARGS__) -#define svreinterpret_u32_s16(...) __builtin_sve_reinterpret_u32_s16(__VA_ARGS__) -#define svreinterpret_u32_u16(...) __builtin_sve_reinterpret_u32_u16(__VA_ARGS__) -#define svreinterpret_u32_s32(...) __builtin_sve_reinterpret_u32_s32(__VA_ARGS__) -#define svreinterpret_u32_u32(...) __builtin_sve_reinterpret_u32_u32(__VA_ARGS__) -#define svreinterpret_u32_s64(...) __builtin_sve_reinterpret_u32_s64(__VA_ARGS__) -#define svreinterpret_u32_u64(...) __builtin_sve_reinterpret_u32_u64(__VA_ARGS__) -#define svreinterpret_u32_f16(...) __builtin_sve_reinterpret_u32_f16(__VA_ARGS__) -#define svreinterpret_u32_bf16(...) __builtin_sve_reinterpret_u32_bf16(__VA_ARGS__) -#define svreinterpret_u32_f32(...) __builtin_sve_reinterpret_u32_f32(__VA_ARGS__) -#define svreinterpret_u32_f64(...) __builtin_sve_reinterpret_u32_f64(__VA_ARGS__) -#define svreinterpret_s64_s8(...) __builtin_sve_reinterpret_s64_s8(__VA_ARGS__) -#define svreinterpret_s64_u8(...) __builtin_sve_reinterpret_s64_u8(__VA_ARGS__) -#define svreinterpret_s64_s16(...) __builtin_sve_reinterpret_s64_s16(__VA_ARGS__) -#define svreinterpret_s64_u16(...) __builtin_sve_reinterpret_s64_u16(__VA_ARGS__) -#define svreinterpret_s64_s32(...) __builtin_sve_reinterpret_s64_s32(__VA_ARGS__) -#define svreinterpret_s64_u32(...) __builtin_sve_reinterpret_s64_u32(__VA_ARGS__) -#define svreinterpret_s64_s64(...) __builtin_sve_reinterpret_s64_s64(__VA_ARGS__) -#define svreinterpret_s64_u64(...) __builtin_sve_reinterpret_s64_u64(__VA_ARGS__) -#define svreinterpret_s64_f16(...) __builtin_sve_reinterpret_s64_f16(__VA_ARGS__) -#define svreinterpret_s64_bf16(...) __builtin_sve_reinterpret_s64_bf16(__VA_ARGS__) -#define svreinterpret_s64_f32(...) __builtin_sve_reinterpret_s64_f32(__VA_ARGS__) -#define svreinterpret_s64_f64(...) __builtin_sve_reinterpret_s64_f64(__VA_ARGS__) -#define svreinterpret_u64_s8(...) __builtin_sve_reinterpret_u64_s8(__VA_ARGS__) -#define svreinterpret_u64_u8(...) __builtin_sve_reinterpret_u64_u8(__VA_ARGS__) -#define svreinterpret_u64_s16(...) __builtin_sve_reinterpret_u64_s16(__VA_ARGS__) -#define svreinterpret_u64_u16(...) __builtin_sve_reinterpret_u64_u16(__VA_ARGS__) -#define svreinterpret_u64_s32(...) __builtin_sve_reinterpret_u64_s32(__VA_ARGS__) -#define svreinterpret_u64_u32(...) __builtin_sve_reinterpret_u64_u32(__VA_ARGS__) -#define svreinterpret_u64_s64(...) __builtin_sve_reinterpret_u64_s64(__VA_ARGS__) -#define svreinterpret_u64_u64(...) __builtin_sve_reinterpret_u64_u64(__VA_ARGS__) -#define svreinterpret_u64_f16(...) __builtin_sve_reinterpret_u64_f16(__VA_ARGS__) -#define svreinterpret_u64_bf16(...) __builtin_sve_reinterpret_u64_bf16(__VA_ARGS__) -#define svreinterpret_u64_f32(...) __builtin_sve_reinterpret_u64_f32(__VA_ARGS__) -#define svreinterpret_u64_f64(...) __builtin_sve_reinterpret_u64_f64(__VA_ARGS__) -#define svreinterpret_f16_s8(...) __builtin_sve_reinterpret_f16_s8(__VA_ARGS__) -#define svreinterpret_f16_u8(...) __builtin_sve_reinterpret_f16_u8(__VA_ARGS__) -#define svreinterpret_f16_s16(...) __builtin_sve_reinterpret_f16_s16(__VA_ARGS__) -#define svreinterpret_f16_u16(...) __builtin_sve_reinterpret_f16_u16(__VA_ARGS__) -#define svreinterpret_f16_s32(...) __builtin_sve_reinterpret_f16_s32(__VA_ARGS__) -#define svreinterpret_f16_u32(...) __builtin_sve_reinterpret_f16_u32(__VA_ARGS__) -#define svreinterpret_f16_s64(...) __builtin_sve_reinterpret_f16_s64(__VA_ARGS__) -#define svreinterpret_f16_u64(...) __builtin_sve_reinterpret_f16_u64(__VA_ARGS__) -#define svreinterpret_f16_f16(...) __builtin_sve_reinterpret_f16_f16(__VA_ARGS__) -#define svreinterpret_f16_bf16(...) __builtin_sve_reinterpret_f16_bf16(__VA_ARGS__) -#define svreinterpret_f16_f32(...) __builtin_sve_reinterpret_f16_f32(__VA_ARGS__) -#define svreinterpret_f16_f64(...) __builtin_sve_reinterpret_f16_f64(__VA_ARGS__) -#define svreinterpret_bf16_s8(...) __builtin_sve_reinterpret_bf16_s8(__VA_ARGS__) -#define svreinterpret_bf16_u8(...) __builtin_sve_reinterpret_bf16_u8(__VA_ARGS__) -#define svreinterpret_bf16_s16(...) __builtin_sve_reinterpret_bf16_s16(__VA_ARGS__) -#define svreinterpret_bf16_u16(...) __builtin_sve_reinterpret_bf16_u16(__VA_ARGS__) -#define svreinterpret_bf16_s32(...) __builtin_sve_reinterpret_bf16_s32(__VA_ARGS__) -#define svreinterpret_bf16_u32(...) __builtin_sve_reinterpret_bf16_u32(__VA_ARGS__) -#define svreinterpret_bf16_s64(...) __builtin_sve_reinterpret_bf16_s64(__VA_ARGS__) -#define svreinterpret_bf16_u64(...) __builtin_sve_reinterpret_bf16_u64(__VA_ARGS__) -#define svreinterpret_bf16_f16(...) __builtin_sve_reinterpret_bf16_f16(__VA_ARGS__) -#define svreinterpret_bf16_bf16(...) __builtin_sve_reinterpret_bf16_bf16(__VA_ARGS__) -#define svreinterpret_bf16_f32(...) __builtin_sve_reinterpret_bf16_f32(__VA_ARGS__) -#define svreinterpret_bf16_f64(...) __builtin_sve_reinterpret_bf16_f64(__VA_ARGS__) -#define svreinterpret_f32_s8(...) __builtin_sve_reinterpret_f32_s8(__VA_ARGS__) -#define svreinterpret_f32_u8(...) __builtin_sve_reinterpret_f32_u8(__VA_ARGS__) -#define svreinterpret_f32_s16(...) __builtin_sve_reinterpret_f32_s16(__VA_ARGS__) -#define svreinterpret_f32_u16(...) __builtin_sve_reinterpret_f32_u16(__VA_ARGS__) -#define svreinterpret_f32_s32(...) __builtin_sve_reinterpret_f32_s32(__VA_ARGS__) -#define svreinterpret_f32_u32(...) __builtin_sve_reinterpret_f32_u32(__VA_ARGS__) -#define svreinterpret_f32_s64(...) __builtin_sve_reinterpret_f32_s64(__VA_ARGS__) -#define svreinterpret_f32_u64(...) __builtin_sve_reinterpret_f32_u64(__VA_ARGS__) -#define svreinterpret_f32_f16(...) __builtin_sve_reinterpret_f32_f16(__VA_ARGS__) -#define svreinterpret_f32_bf16(...) __builtin_sve_reinterpret_f32_bf16(__VA_ARGS__) -#define svreinterpret_f32_f32(...) __builtin_sve_reinterpret_f32_f32(__VA_ARGS__) -#define svreinterpret_f32_f64(...) __builtin_sve_reinterpret_f32_f64(__VA_ARGS__) -#define svreinterpret_f64_s8(...) __builtin_sve_reinterpret_f64_s8(__VA_ARGS__) -#define svreinterpret_f64_u8(...) __builtin_sve_reinterpret_f64_u8(__VA_ARGS__) -#define svreinterpret_f64_s16(...) __builtin_sve_reinterpret_f64_s16(__VA_ARGS__) -#define svreinterpret_f64_u16(...) __builtin_sve_reinterpret_f64_u16(__VA_ARGS__) -#define svreinterpret_f64_s32(...) __builtin_sve_reinterpret_f64_s32(__VA_ARGS__) -#define svreinterpret_f64_u32(...) __builtin_sve_reinterpret_f64_u32(__VA_ARGS__) -#define svreinterpret_f64_s64(...) __builtin_sve_reinterpret_f64_s64(__VA_ARGS__) -#define svreinterpret_f64_u64(...) __builtin_sve_reinterpret_f64_u64(__VA_ARGS__) -#define svreinterpret_f64_f16(...) __builtin_sve_reinterpret_f64_f16(__VA_ARGS__) -#define svreinterpret_f64_bf16(...) __builtin_sve_reinterpret_f64_bf16(__VA_ARGS__) -#define svreinterpret_f64_f32(...) __builtin_sve_reinterpret_f64_f32(__VA_ARGS__) -#define svreinterpret_f64_f64(...) __builtin_sve_reinterpret_f64_f64(__VA_ARGS__) -__aio __attribute__((target("sve"))) svint8_t svreinterpret_s8(svint8_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_s8(op); -} - -__aio __attribute__((target("sve"))) svint8_t svreinterpret_s8(svuint8_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_u8(op); -} - -__aio __attribute__((target("sve"))) svint8_t svreinterpret_s8(svint16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_s16(op); -} - -__aio __attribute__((target("sve"))) svint8_t svreinterpret_s8(svuint16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_u16(op); -} - -__aio __attribute__((target("sve"))) svint8_t svreinterpret_s8(svint32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_s32(op); -} - -__aio __attribute__((target("sve"))) svint8_t svreinterpret_s8(svuint32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_u32(op); -} - -__aio __attribute__((target("sve"))) svint8_t svreinterpret_s8(svint64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_s64(op); -} - -__aio __attribute__((target("sve"))) svint8_t svreinterpret_s8(svuint64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_u64(op); -} - -__aio __attribute__((target("sve"))) svint8_t svreinterpret_s8(svfloat16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_f16(op); -} - -__aio __attribute__((target("sve"))) svint8_t svreinterpret_s8(svbfloat16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_bf16(op); -} - -__aio __attribute__((target("sve"))) svint8_t svreinterpret_s8(svfloat32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_f32(op); -} - -__aio __attribute__((target("sve"))) svint8_t svreinterpret_s8(svfloat64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_f64(op); -} - -__aio __attribute__((target("sve"))) svuint8_t svreinterpret_u8(svint8_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_s8(op); -} - -__aio __attribute__((target("sve"))) svuint8_t svreinterpret_u8(svuint8_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_u8(op); -} - -__aio __attribute__((target("sve"))) svuint8_t svreinterpret_u8(svint16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_s16(op); -} - -__aio __attribute__((target("sve"))) svuint8_t svreinterpret_u8(svuint16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_u16(op); -} - -__aio __attribute__((target("sve"))) svuint8_t svreinterpret_u8(svint32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_s32(op); -} - -__aio __attribute__((target("sve"))) svuint8_t svreinterpret_u8(svuint32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_u32(op); -} - -__aio __attribute__((target("sve"))) svuint8_t svreinterpret_u8(svint64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_s64(op); -} - -__aio __attribute__((target("sve"))) svuint8_t svreinterpret_u8(svuint64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_u64(op); -} - -__aio __attribute__((target("sve"))) svuint8_t svreinterpret_u8(svfloat16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_f16(op); -} - -__aio __attribute__((target("sve"))) svuint8_t svreinterpret_u8(svbfloat16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_bf16(op); -} - -__aio __attribute__((target("sve"))) svuint8_t svreinterpret_u8(svfloat32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_f32(op); -} - -__aio __attribute__((target("sve"))) svuint8_t svreinterpret_u8(svfloat64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_f64(op); -} - -__aio __attribute__((target("sve"))) svint16_t svreinterpret_s16(svint8_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_s8(op); -} - -__aio __attribute__((target("sve"))) svint16_t svreinterpret_s16(svuint8_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_u8(op); -} - -__aio __attribute__((target("sve"))) svint16_t svreinterpret_s16(svint16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_s16(op); -} - -__aio __attribute__((target("sve"))) svint16_t svreinterpret_s16(svuint16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_u16(op); -} - -__aio __attribute__((target("sve"))) svint16_t svreinterpret_s16(svint32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_s32(op); -} - -__aio __attribute__((target("sve"))) svint16_t svreinterpret_s16(svuint32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_u32(op); -} - -__aio __attribute__((target("sve"))) svint16_t svreinterpret_s16(svint64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_s64(op); -} - -__aio __attribute__((target("sve"))) svint16_t svreinterpret_s16(svuint64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_u64(op); -} - -__aio __attribute__((target("sve"))) svint16_t svreinterpret_s16(svfloat16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_f16(op); -} - -__aio __attribute__((target("sve"))) svint16_t svreinterpret_s16(svbfloat16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_bf16(op); -} - -__aio __attribute__((target("sve"))) svint16_t svreinterpret_s16(svfloat32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_f32(op); -} - -__aio __attribute__((target("sve"))) svint16_t svreinterpret_s16(svfloat64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_f64(op); -} - -__aio __attribute__((target("sve"))) svuint16_t svreinterpret_u16(svint8_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_s8(op); -} - -__aio __attribute__((target("sve"))) svuint16_t svreinterpret_u16(svuint8_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_u8(op); -} - -__aio __attribute__((target("sve"))) svuint16_t svreinterpret_u16(svint16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_s16(op); -} - -__aio __attribute__((target("sve"))) svuint16_t svreinterpret_u16(svuint16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_u16(op); -} - -__aio __attribute__((target("sve"))) svuint16_t svreinterpret_u16(svint32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_s32(op); -} - -__aio __attribute__((target("sve"))) svuint16_t svreinterpret_u16(svuint32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_u32(op); -} - -__aio __attribute__((target("sve"))) svuint16_t svreinterpret_u16(svint64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_s64(op); -} - -__aio __attribute__((target("sve"))) svuint16_t svreinterpret_u16(svuint64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_u64(op); -} - -__aio __attribute__((target("sve"))) svuint16_t svreinterpret_u16(svfloat16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_f16(op); -} - -__aio __attribute__((target("sve"))) svuint16_t svreinterpret_u16(svbfloat16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_bf16(op); -} - -__aio __attribute__((target("sve"))) svuint16_t svreinterpret_u16(svfloat32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_f32(op); -} - -__aio __attribute__((target("sve"))) svuint16_t svreinterpret_u16(svfloat64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_f64(op); -} - -__aio __attribute__((target("sve"))) svint32_t svreinterpret_s32(svint8_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_s8(op); -} - -__aio __attribute__((target("sve"))) svint32_t svreinterpret_s32(svuint8_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_u8(op); -} - -__aio __attribute__((target("sve"))) svint32_t svreinterpret_s32(svint16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_s16(op); -} - -__aio __attribute__((target("sve"))) svint32_t svreinterpret_s32(svuint16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_u16(op); -} - -__aio __attribute__((target("sve"))) svint32_t svreinterpret_s32(svint32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_s32(op); -} - -__aio __attribute__((target("sve"))) svint32_t svreinterpret_s32(svuint32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_u32(op); -} - -__aio __attribute__((target("sve"))) svint32_t svreinterpret_s32(svint64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_s64(op); -} - -__aio __attribute__((target("sve"))) svint32_t svreinterpret_s32(svuint64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_u64(op); -} - -__aio __attribute__((target("sve"))) svint32_t svreinterpret_s32(svfloat16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_f16(op); -} - -__aio __attribute__((target("sve"))) svint32_t svreinterpret_s32(svbfloat16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_bf16(op); -} - -__aio __attribute__((target("sve"))) svint32_t svreinterpret_s32(svfloat32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_f32(op); -} - -__aio __attribute__((target("sve"))) svint32_t svreinterpret_s32(svfloat64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_f64(op); -} - -__aio __attribute__((target("sve"))) svuint32_t svreinterpret_u32(svint8_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_s8(op); -} - -__aio __attribute__((target("sve"))) svuint32_t svreinterpret_u32(svuint8_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_u8(op); -} - -__aio __attribute__((target("sve"))) svuint32_t svreinterpret_u32(svint16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_s16(op); -} - -__aio __attribute__((target("sve"))) svuint32_t svreinterpret_u32(svuint16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_u16(op); -} - -__aio __attribute__((target("sve"))) svuint32_t svreinterpret_u32(svint32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_s32(op); -} - -__aio __attribute__((target("sve"))) svuint32_t svreinterpret_u32(svuint32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_u32(op); -} - -__aio __attribute__((target("sve"))) svuint32_t svreinterpret_u32(svint64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_s64(op); -} - -__aio __attribute__((target("sve"))) svuint32_t svreinterpret_u32(svuint64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_u64(op); -} - -__aio __attribute__((target("sve"))) svuint32_t svreinterpret_u32(svfloat16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_f16(op); -} - -__aio __attribute__((target("sve"))) svuint32_t svreinterpret_u32(svbfloat16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_bf16(op); -} - -__aio __attribute__((target("sve"))) svuint32_t svreinterpret_u32(svfloat32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_f32(op); -} - -__aio __attribute__((target("sve"))) svuint32_t svreinterpret_u32(svfloat64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_f64(op); -} - -__aio __attribute__((target("sve"))) svint64_t svreinterpret_s64(svint8_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_s8(op); -} - -__aio __attribute__((target("sve"))) svint64_t svreinterpret_s64(svuint8_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_u8(op); -} - -__aio __attribute__((target("sve"))) svint64_t svreinterpret_s64(svint16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_s16(op); -} - -__aio __attribute__((target("sve"))) svint64_t svreinterpret_s64(svuint16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_u16(op); -} - -__aio __attribute__((target("sve"))) svint64_t svreinterpret_s64(svint32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_s32(op); -} - -__aio __attribute__((target("sve"))) svint64_t svreinterpret_s64(svuint32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_u32(op); -} - -__aio __attribute__((target("sve"))) svint64_t svreinterpret_s64(svint64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_s64(op); -} - -__aio __attribute__((target("sve"))) svint64_t svreinterpret_s64(svuint64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_u64(op); -} - -__aio __attribute__((target("sve"))) svint64_t svreinterpret_s64(svfloat16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_f16(op); -} - -__aio __attribute__((target("sve"))) svint64_t svreinterpret_s64(svbfloat16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_bf16(op); -} - -__aio __attribute__((target("sve"))) svint64_t svreinterpret_s64(svfloat32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_f32(op); -} - -__aio __attribute__((target("sve"))) svint64_t svreinterpret_s64(svfloat64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_f64(op); -} - -__aio __attribute__((target("sve"))) svuint64_t svreinterpret_u64(svint8_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_s8(op); -} - -__aio __attribute__((target("sve"))) svuint64_t svreinterpret_u64(svuint8_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_u8(op); -} - -__aio __attribute__((target("sve"))) svuint64_t svreinterpret_u64(svint16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_s16(op); -} - -__aio __attribute__((target("sve"))) svuint64_t svreinterpret_u64(svuint16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_u16(op); -} - -__aio __attribute__((target("sve"))) svuint64_t svreinterpret_u64(svint32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_s32(op); -} - -__aio __attribute__((target("sve"))) svuint64_t svreinterpret_u64(svuint32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_u32(op); -} - -__aio __attribute__((target("sve"))) svuint64_t svreinterpret_u64(svint64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_s64(op); -} - -__aio __attribute__((target("sve"))) svuint64_t svreinterpret_u64(svuint64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_u64(op); -} - -__aio __attribute__((target("sve"))) svuint64_t svreinterpret_u64(svfloat16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_f16(op); -} - -__aio __attribute__((target("sve"))) svuint64_t svreinterpret_u64(svbfloat16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_bf16(op); -} - -__aio __attribute__((target("sve"))) svuint64_t svreinterpret_u64(svfloat32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_f32(op); -} - -__aio __attribute__((target("sve"))) svuint64_t svreinterpret_u64(svfloat64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_f64(op); -} - -__aio __attribute__((target("sve"))) svfloat16_t svreinterpret_f16(svint8_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_s8(op); -} - -__aio __attribute__((target("sve"))) svfloat16_t svreinterpret_f16(svuint8_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_u8(op); -} - -__aio __attribute__((target("sve"))) svfloat16_t svreinterpret_f16(svint16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_s16(op); -} - -__aio __attribute__((target("sve"))) svfloat16_t svreinterpret_f16(svuint16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_u16(op); -} - -__aio __attribute__((target("sve"))) svfloat16_t svreinterpret_f16(svint32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_s32(op); -} - -__aio __attribute__((target("sve"))) svfloat16_t svreinterpret_f16(svuint32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_u32(op); -} - -__aio __attribute__((target("sve"))) svfloat16_t svreinterpret_f16(svint64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_s64(op); -} - -__aio __attribute__((target("sve"))) svfloat16_t svreinterpret_f16(svuint64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_u64(op); -} - -__aio __attribute__((target("sve"))) svfloat16_t svreinterpret_f16(svfloat16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_f16(op); -} - -__aio __attribute__((target("sve"))) svfloat16_t svreinterpret_f16(svbfloat16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_bf16(op); -} - -__aio __attribute__((target("sve"))) svfloat16_t svreinterpret_f16(svfloat32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_f32(op); -} - -__aio __attribute__((target("sve"))) svfloat16_t svreinterpret_f16(svfloat64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_f64(op); -} - -__aio __attribute__((target("sve"))) svbfloat16_t svreinterpret_bf16(svint8_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_s8(op); -} - -__aio __attribute__((target("sve"))) svbfloat16_t svreinterpret_bf16(svuint8_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_u8(op); -} - -__aio __attribute__((target("sve"))) svbfloat16_t svreinterpret_bf16(svint16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_s16(op); -} - -__aio __attribute__((target("sve"))) svbfloat16_t svreinterpret_bf16(svuint16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_u16(op); -} - -__aio __attribute__((target("sve"))) svbfloat16_t svreinterpret_bf16(svint32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_s32(op); -} - -__aio __attribute__((target("sve"))) svbfloat16_t svreinterpret_bf16(svuint32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_u32(op); -} - -__aio __attribute__((target("sve"))) svbfloat16_t svreinterpret_bf16(svint64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_s64(op); -} - -__aio __attribute__((target("sve"))) svbfloat16_t svreinterpret_bf16(svuint64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_u64(op); -} - -__aio __attribute__((target("sve"))) svbfloat16_t svreinterpret_bf16(svfloat16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_f16(op); -} - -__aio __attribute__((target("sve"))) svbfloat16_t svreinterpret_bf16(svbfloat16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_bf16(op); -} - -__aio __attribute__((target("sve"))) svbfloat16_t svreinterpret_bf16(svfloat32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_f32(op); -} - -__aio __attribute__((target("sve"))) svbfloat16_t svreinterpret_bf16(svfloat64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_f64(op); -} - -__aio __attribute__((target("sve"))) svfloat32_t svreinterpret_f32(svint8_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_s8(op); -} - -__aio __attribute__((target("sve"))) svfloat32_t svreinterpret_f32(svuint8_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_u8(op); -} - -__aio __attribute__((target("sve"))) svfloat32_t svreinterpret_f32(svint16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_s16(op); -} - -__aio __attribute__((target("sve"))) svfloat32_t svreinterpret_f32(svuint16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_u16(op); -} - -__aio __attribute__((target("sve"))) svfloat32_t svreinterpret_f32(svint32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_s32(op); -} - -__aio __attribute__((target("sve"))) svfloat32_t svreinterpret_f32(svuint32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_u32(op); -} - -__aio __attribute__((target("sve"))) svfloat32_t svreinterpret_f32(svint64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_s64(op); -} - -__aio __attribute__((target("sve"))) svfloat32_t svreinterpret_f32(svuint64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_u64(op); -} - -__aio __attribute__((target("sve"))) svfloat32_t svreinterpret_f32(svfloat16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_f16(op); -} - -__aio __attribute__((target("sve"))) svfloat32_t svreinterpret_f32(svbfloat16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_bf16(op); -} - -__aio __attribute__((target("sve"))) svfloat32_t svreinterpret_f32(svfloat32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_f32(op); -} - -__aio __attribute__((target("sve"))) svfloat32_t svreinterpret_f32(svfloat64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_f64(op); -} - -__aio __attribute__((target("sve"))) svfloat64_t svreinterpret_f64(svint8_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_s8(op); -} - -__aio __attribute__((target("sve"))) svfloat64_t svreinterpret_f64(svuint8_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_u8(op); -} - -__aio __attribute__((target("sve"))) svfloat64_t svreinterpret_f64(svint16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_s16(op); -} - -__aio __attribute__((target("sve"))) svfloat64_t svreinterpret_f64(svuint16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_u16(op); -} - -__aio __attribute__((target("sve"))) svfloat64_t svreinterpret_f64(svint32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_s32(op); -} - -__aio __attribute__((target("sve"))) svfloat64_t svreinterpret_f64(svuint32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_u32(op); -} - -__aio __attribute__((target("sve"))) svfloat64_t svreinterpret_f64(svint64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_s64(op); -} - -__aio __attribute__((target("sve"))) svfloat64_t svreinterpret_f64(svuint64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_u64(op); -} - -__aio __attribute__((target("sve"))) svfloat64_t svreinterpret_f64(svfloat16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_f16(op); -} - -__aio __attribute__((target("sve"))) svfloat64_t svreinterpret_f64(svbfloat16_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_bf16(op); -} - -__aio __attribute__((target("sve"))) svfloat64_t svreinterpret_f64(svfloat32_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_f32(op); -} - -__aio __attribute__((target("sve"))) svfloat64_t svreinterpret_f64(svfloat64_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_f64(op); -} - -#define svreinterpret_s8_s8_x2(...) __builtin_sve_reinterpret_s8_s8_x2(__VA_ARGS__) -#define svreinterpret_s8_u8_x2(...) __builtin_sve_reinterpret_s8_u8_x2(__VA_ARGS__) -#define svreinterpret_s8_s16_x2(...) __builtin_sve_reinterpret_s8_s16_x2(__VA_ARGS__) -#define svreinterpret_s8_u16_x2(...) __builtin_sve_reinterpret_s8_u16_x2(__VA_ARGS__) -#define svreinterpret_s8_s32_x2(...) __builtin_sve_reinterpret_s8_s32_x2(__VA_ARGS__) -#define svreinterpret_s8_u32_x2(...) __builtin_sve_reinterpret_s8_u32_x2(__VA_ARGS__) -#define svreinterpret_s8_s64_x2(...) __builtin_sve_reinterpret_s8_s64_x2(__VA_ARGS__) -#define svreinterpret_s8_u64_x2(...) __builtin_sve_reinterpret_s8_u64_x2(__VA_ARGS__) -#define svreinterpret_s8_f16_x2(...) __builtin_sve_reinterpret_s8_f16_x2(__VA_ARGS__) -#define svreinterpret_s8_bf16_x2(...) __builtin_sve_reinterpret_s8_bf16_x2(__VA_ARGS__) -#define svreinterpret_s8_f32_x2(...) __builtin_sve_reinterpret_s8_f32_x2(__VA_ARGS__) -#define svreinterpret_s8_f64_x2(...) __builtin_sve_reinterpret_s8_f64_x2(__VA_ARGS__) -#define svreinterpret_u8_s8_x2(...) __builtin_sve_reinterpret_u8_s8_x2(__VA_ARGS__) -#define svreinterpret_u8_u8_x2(...) __builtin_sve_reinterpret_u8_u8_x2(__VA_ARGS__) -#define svreinterpret_u8_s16_x2(...) __builtin_sve_reinterpret_u8_s16_x2(__VA_ARGS__) -#define svreinterpret_u8_u16_x2(...) __builtin_sve_reinterpret_u8_u16_x2(__VA_ARGS__) -#define svreinterpret_u8_s32_x2(...) __builtin_sve_reinterpret_u8_s32_x2(__VA_ARGS__) -#define svreinterpret_u8_u32_x2(...) __builtin_sve_reinterpret_u8_u32_x2(__VA_ARGS__) -#define svreinterpret_u8_s64_x2(...) __builtin_sve_reinterpret_u8_s64_x2(__VA_ARGS__) -#define svreinterpret_u8_u64_x2(...) __builtin_sve_reinterpret_u8_u64_x2(__VA_ARGS__) -#define svreinterpret_u8_f16_x2(...) __builtin_sve_reinterpret_u8_f16_x2(__VA_ARGS__) -#define svreinterpret_u8_bf16_x2(...) __builtin_sve_reinterpret_u8_bf16_x2(__VA_ARGS__) -#define svreinterpret_u8_f32_x2(...) __builtin_sve_reinterpret_u8_f32_x2(__VA_ARGS__) -#define svreinterpret_u8_f64_x2(...) __builtin_sve_reinterpret_u8_f64_x2(__VA_ARGS__) -#define svreinterpret_s16_s8_x2(...) __builtin_sve_reinterpret_s16_s8_x2(__VA_ARGS__) -#define svreinterpret_s16_u8_x2(...) __builtin_sve_reinterpret_s16_u8_x2(__VA_ARGS__) -#define svreinterpret_s16_s16_x2(...) __builtin_sve_reinterpret_s16_s16_x2(__VA_ARGS__) -#define svreinterpret_s16_u16_x2(...) __builtin_sve_reinterpret_s16_u16_x2(__VA_ARGS__) -#define svreinterpret_s16_s32_x2(...) __builtin_sve_reinterpret_s16_s32_x2(__VA_ARGS__) -#define svreinterpret_s16_u32_x2(...) __builtin_sve_reinterpret_s16_u32_x2(__VA_ARGS__) -#define svreinterpret_s16_s64_x2(...) __builtin_sve_reinterpret_s16_s64_x2(__VA_ARGS__) -#define svreinterpret_s16_u64_x2(...) __builtin_sve_reinterpret_s16_u64_x2(__VA_ARGS__) -#define svreinterpret_s16_f16_x2(...) __builtin_sve_reinterpret_s16_f16_x2(__VA_ARGS__) -#define svreinterpret_s16_bf16_x2(...) __builtin_sve_reinterpret_s16_bf16_x2(__VA_ARGS__) -#define svreinterpret_s16_f32_x2(...) __builtin_sve_reinterpret_s16_f32_x2(__VA_ARGS__) -#define svreinterpret_s16_f64_x2(...) __builtin_sve_reinterpret_s16_f64_x2(__VA_ARGS__) -#define svreinterpret_u16_s8_x2(...) __builtin_sve_reinterpret_u16_s8_x2(__VA_ARGS__) -#define svreinterpret_u16_u8_x2(...) __builtin_sve_reinterpret_u16_u8_x2(__VA_ARGS__) -#define svreinterpret_u16_s16_x2(...) __builtin_sve_reinterpret_u16_s16_x2(__VA_ARGS__) -#define svreinterpret_u16_u16_x2(...) __builtin_sve_reinterpret_u16_u16_x2(__VA_ARGS__) -#define svreinterpret_u16_s32_x2(...) __builtin_sve_reinterpret_u16_s32_x2(__VA_ARGS__) -#define svreinterpret_u16_u32_x2(...) __builtin_sve_reinterpret_u16_u32_x2(__VA_ARGS__) -#define svreinterpret_u16_s64_x2(...) __builtin_sve_reinterpret_u16_s64_x2(__VA_ARGS__) -#define svreinterpret_u16_u64_x2(...) __builtin_sve_reinterpret_u16_u64_x2(__VA_ARGS__) -#define svreinterpret_u16_f16_x2(...) __builtin_sve_reinterpret_u16_f16_x2(__VA_ARGS__) -#define svreinterpret_u16_bf16_x2(...) __builtin_sve_reinterpret_u16_bf16_x2(__VA_ARGS__) -#define svreinterpret_u16_f32_x2(...) __builtin_sve_reinterpret_u16_f32_x2(__VA_ARGS__) -#define svreinterpret_u16_f64_x2(...) __builtin_sve_reinterpret_u16_f64_x2(__VA_ARGS__) -#define svreinterpret_s32_s8_x2(...) __builtin_sve_reinterpret_s32_s8_x2(__VA_ARGS__) -#define svreinterpret_s32_u8_x2(...) __builtin_sve_reinterpret_s32_u8_x2(__VA_ARGS__) -#define svreinterpret_s32_s16_x2(...) __builtin_sve_reinterpret_s32_s16_x2(__VA_ARGS__) -#define svreinterpret_s32_u16_x2(...) __builtin_sve_reinterpret_s32_u16_x2(__VA_ARGS__) -#define svreinterpret_s32_s32_x2(...) __builtin_sve_reinterpret_s32_s32_x2(__VA_ARGS__) -#define svreinterpret_s32_u32_x2(...) __builtin_sve_reinterpret_s32_u32_x2(__VA_ARGS__) -#define svreinterpret_s32_s64_x2(...) __builtin_sve_reinterpret_s32_s64_x2(__VA_ARGS__) -#define svreinterpret_s32_u64_x2(...) __builtin_sve_reinterpret_s32_u64_x2(__VA_ARGS__) -#define svreinterpret_s32_f16_x2(...) __builtin_sve_reinterpret_s32_f16_x2(__VA_ARGS__) -#define svreinterpret_s32_bf16_x2(...) __builtin_sve_reinterpret_s32_bf16_x2(__VA_ARGS__) -#define svreinterpret_s32_f32_x2(...) __builtin_sve_reinterpret_s32_f32_x2(__VA_ARGS__) -#define svreinterpret_s32_f64_x2(...) __builtin_sve_reinterpret_s32_f64_x2(__VA_ARGS__) -#define svreinterpret_u32_s8_x2(...) __builtin_sve_reinterpret_u32_s8_x2(__VA_ARGS__) -#define svreinterpret_u32_u8_x2(...) __builtin_sve_reinterpret_u32_u8_x2(__VA_ARGS__) -#define svreinterpret_u32_s16_x2(...) __builtin_sve_reinterpret_u32_s16_x2(__VA_ARGS__) -#define svreinterpret_u32_u16_x2(...) __builtin_sve_reinterpret_u32_u16_x2(__VA_ARGS__) -#define svreinterpret_u32_s32_x2(...) __builtin_sve_reinterpret_u32_s32_x2(__VA_ARGS__) -#define svreinterpret_u32_u32_x2(...) __builtin_sve_reinterpret_u32_u32_x2(__VA_ARGS__) -#define svreinterpret_u32_s64_x2(...) __builtin_sve_reinterpret_u32_s64_x2(__VA_ARGS__) -#define svreinterpret_u32_u64_x2(...) __builtin_sve_reinterpret_u32_u64_x2(__VA_ARGS__) -#define svreinterpret_u32_f16_x2(...) __builtin_sve_reinterpret_u32_f16_x2(__VA_ARGS__) -#define svreinterpret_u32_bf16_x2(...) __builtin_sve_reinterpret_u32_bf16_x2(__VA_ARGS__) -#define svreinterpret_u32_f32_x2(...) __builtin_sve_reinterpret_u32_f32_x2(__VA_ARGS__) -#define svreinterpret_u32_f64_x2(...) __builtin_sve_reinterpret_u32_f64_x2(__VA_ARGS__) -#define svreinterpret_s64_s8_x2(...) __builtin_sve_reinterpret_s64_s8_x2(__VA_ARGS__) -#define svreinterpret_s64_u8_x2(...) __builtin_sve_reinterpret_s64_u8_x2(__VA_ARGS__) -#define svreinterpret_s64_s16_x2(...) __builtin_sve_reinterpret_s64_s16_x2(__VA_ARGS__) -#define svreinterpret_s64_u16_x2(...) __builtin_sve_reinterpret_s64_u16_x2(__VA_ARGS__) -#define svreinterpret_s64_s32_x2(...) __builtin_sve_reinterpret_s64_s32_x2(__VA_ARGS__) -#define svreinterpret_s64_u32_x2(...) __builtin_sve_reinterpret_s64_u32_x2(__VA_ARGS__) -#define svreinterpret_s64_s64_x2(...) __builtin_sve_reinterpret_s64_s64_x2(__VA_ARGS__) -#define svreinterpret_s64_u64_x2(...) __builtin_sve_reinterpret_s64_u64_x2(__VA_ARGS__) -#define svreinterpret_s64_f16_x2(...) __builtin_sve_reinterpret_s64_f16_x2(__VA_ARGS__) -#define svreinterpret_s64_bf16_x2(...) __builtin_sve_reinterpret_s64_bf16_x2(__VA_ARGS__) -#define svreinterpret_s64_f32_x2(...) __builtin_sve_reinterpret_s64_f32_x2(__VA_ARGS__) -#define svreinterpret_s64_f64_x2(...) __builtin_sve_reinterpret_s64_f64_x2(__VA_ARGS__) -#define svreinterpret_u64_s8_x2(...) __builtin_sve_reinterpret_u64_s8_x2(__VA_ARGS__) -#define svreinterpret_u64_u8_x2(...) __builtin_sve_reinterpret_u64_u8_x2(__VA_ARGS__) -#define svreinterpret_u64_s16_x2(...) __builtin_sve_reinterpret_u64_s16_x2(__VA_ARGS__) -#define svreinterpret_u64_u16_x2(...) __builtin_sve_reinterpret_u64_u16_x2(__VA_ARGS__) -#define svreinterpret_u64_s32_x2(...) __builtin_sve_reinterpret_u64_s32_x2(__VA_ARGS__) -#define svreinterpret_u64_u32_x2(...) __builtin_sve_reinterpret_u64_u32_x2(__VA_ARGS__) -#define svreinterpret_u64_s64_x2(...) __builtin_sve_reinterpret_u64_s64_x2(__VA_ARGS__) -#define svreinterpret_u64_u64_x2(...) __builtin_sve_reinterpret_u64_u64_x2(__VA_ARGS__) -#define svreinterpret_u64_f16_x2(...) __builtin_sve_reinterpret_u64_f16_x2(__VA_ARGS__) -#define svreinterpret_u64_bf16_x2(...) __builtin_sve_reinterpret_u64_bf16_x2(__VA_ARGS__) -#define svreinterpret_u64_f32_x2(...) __builtin_sve_reinterpret_u64_f32_x2(__VA_ARGS__) -#define svreinterpret_u64_f64_x2(...) __builtin_sve_reinterpret_u64_f64_x2(__VA_ARGS__) -#define svreinterpret_f16_s8_x2(...) __builtin_sve_reinterpret_f16_s8_x2(__VA_ARGS__) -#define svreinterpret_f16_u8_x2(...) __builtin_sve_reinterpret_f16_u8_x2(__VA_ARGS__) -#define svreinterpret_f16_s16_x2(...) __builtin_sve_reinterpret_f16_s16_x2(__VA_ARGS__) -#define svreinterpret_f16_u16_x2(...) __builtin_sve_reinterpret_f16_u16_x2(__VA_ARGS__) -#define svreinterpret_f16_s32_x2(...) __builtin_sve_reinterpret_f16_s32_x2(__VA_ARGS__) -#define svreinterpret_f16_u32_x2(...) __builtin_sve_reinterpret_f16_u32_x2(__VA_ARGS__) -#define svreinterpret_f16_s64_x2(...) __builtin_sve_reinterpret_f16_s64_x2(__VA_ARGS__) -#define svreinterpret_f16_u64_x2(...) __builtin_sve_reinterpret_f16_u64_x2(__VA_ARGS__) -#define svreinterpret_f16_f16_x2(...) __builtin_sve_reinterpret_f16_f16_x2(__VA_ARGS__) -#define svreinterpret_f16_bf16_x2(...) __builtin_sve_reinterpret_f16_bf16_x2(__VA_ARGS__) -#define svreinterpret_f16_f32_x2(...) __builtin_sve_reinterpret_f16_f32_x2(__VA_ARGS__) -#define svreinterpret_f16_f64_x2(...) __builtin_sve_reinterpret_f16_f64_x2(__VA_ARGS__) -#define svreinterpret_bf16_s8_x2(...) __builtin_sve_reinterpret_bf16_s8_x2(__VA_ARGS__) -#define svreinterpret_bf16_u8_x2(...) __builtin_sve_reinterpret_bf16_u8_x2(__VA_ARGS__) -#define svreinterpret_bf16_s16_x2(...) __builtin_sve_reinterpret_bf16_s16_x2(__VA_ARGS__) -#define svreinterpret_bf16_u16_x2(...) __builtin_sve_reinterpret_bf16_u16_x2(__VA_ARGS__) -#define svreinterpret_bf16_s32_x2(...) __builtin_sve_reinterpret_bf16_s32_x2(__VA_ARGS__) -#define svreinterpret_bf16_u32_x2(...) __builtin_sve_reinterpret_bf16_u32_x2(__VA_ARGS__) -#define svreinterpret_bf16_s64_x2(...) __builtin_sve_reinterpret_bf16_s64_x2(__VA_ARGS__) -#define svreinterpret_bf16_u64_x2(...) __builtin_sve_reinterpret_bf16_u64_x2(__VA_ARGS__) -#define svreinterpret_bf16_f16_x2(...) __builtin_sve_reinterpret_bf16_f16_x2(__VA_ARGS__) -#define svreinterpret_bf16_bf16_x2(...) __builtin_sve_reinterpret_bf16_bf16_x2(__VA_ARGS__) -#define svreinterpret_bf16_f32_x2(...) __builtin_sve_reinterpret_bf16_f32_x2(__VA_ARGS__) -#define svreinterpret_bf16_f64_x2(...) __builtin_sve_reinterpret_bf16_f64_x2(__VA_ARGS__) -#define svreinterpret_f32_s8_x2(...) __builtin_sve_reinterpret_f32_s8_x2(__VA_ARGS__) -#define svreinterpret_f32_u8_x2(...) __builtin_sve_reinterpret_f32_u8_x2(__VA_ARGS__) -#define svreinterpret_f32_s16_x2(...) __builtin_sve_reinterpret_f32_s16_x2(__VA_ARGS__) -#define svreinterpret_f32_u16_x2(...) __builtin_sve_reinterpret_f32_u16_x2(__VA_ARGS__) -#define svreinterpret_f32_s32_x2(...) __builtin_sve_reinterpret_f32_s32_x2(__VA_ARGS__) -#define svreinterpret_f32_u32_x2(...) __builtin_sve_reinterpret_f32_u32_x2(__VA_ARGS__) -#define svreinterpret_f32_s64_x2(...) __builtin_sve_reinterpret_f32_s64_x2(__VA_ARGS__) -#define svreinterpret_f32_u64_x2(...) __builtin_sve_reinterpret_f32_u64_x2(__VA_ARGS__) -#define svreinterpret_f32_f16_x2(...) __builtin_sve_reinterpret_f32_f16_x2(__VA_ARGS__) -#define svreinterpret_f32_bf16_x2(...) __builtin_sve_reinterpret_f32_bf16_x2(__VA_ARGS__) -#define svreinterpret_f32_f32_x2(...) __builtin_sve_reinterpret_f32_f32_x2(__VA_ARGS__) -#define svreinterpret_f32_f64_x2(...) __builtin_sve_reinterpret_f32_f64_x2(__VA_ARGS__) -#define svreinterpret_f64_s8_x2(...) __builtin_sve_reinterpret_f64_s8_x2(__VA_ARGS__) -#define svreinterpret_f64_u8_x2(...) __builtin_sve_reinterpret_f64_u8_x2(__VA_ARGS__) -#define svreinterpret_f64_s16_x2(...) __builtin_sve_reinterpret_f64_s16_x2(__VA_ARGS__) -#define svreinterpret_f64_u16_x2(...) __builtin_sve_reinterpret_f64_u16_x2(__VA_ARGS__) -#define svreinterpret_f64_s32_x2(...) __builtin_sve_reinterpret_f64_s32_x2(__VA_ARGS__) -#define svreinterpret_f64_u32_x2(...) __builtin_sve_reinterpret_f64_u32_x2(__VA_ARGS__) -#define svreinterpret_f64_s64_x2(...) __builtin_sve_reinterpret_f64_s64_x2(__VA_ARGS__) -#define svreinterpret_f64_u64_x2(...) __builtin_sve_reinterpret_f64_u64_x2(__VA_ARGS__) -#define svreinterpret_f64_f16_x2(...) __builtin_sve_reinterpret_f64_f16_x2(__VA_ARGS__) -#define svreinterpret_f64_bf16_x2(...) __builtin_sve_reinterpret_f64_bf16_x2(__VA_ARGS__) -#define svreinterpret_f64_f32_x2(...) __builtin_sve_reinterpret_f64_f32_x2(__VA_ARGS__) -#define svreinterpret_f64_f64_x2(...) __builtin_sve_reinterpret_f64_f64_x2(__VA_ARGS__) -__aio __attribute__((target("sve"))) svint8x2_t svreinterpret_s8(svint8x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_s8_x2(op); -} - -__aio __attribute__((target("sve"))) svint8x2_t svreinterpret_s8(svuint8x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_u8_x2(op); -} - -__aio __attribute__((target("sve"))) svint8x2_t svreinterpret_s8(svint16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_s16_x2(op); -} - -__aio __attribute__((target("sve"))) svint8x2_t svreinterpret_s8(svuint16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_u16_x2(op); -} - -__aio __attribute__((target("sve"))) svint8x2_t svreinterpret_s8(svint32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_s32_x2(op); -} - -__aio __attribute__((target("sve"))) svint8x2_t svreinterpret_s8(svuint32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_u32_x2(op); -} - -__aio __attribute__((target("sve"))) svint8x2_t svreinterpret_s8(svint64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_s64_x2(op); -} - -__aio __attribute__((target("sve"))) svint8x2_t svreinterpret_s8(svuint64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_u64_x2(op); -} - -__aio __attribute__((target("sve"))) svint8x2_t svreinterpret_s8(svfloat16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_f16_x2(op); -} - -__aio __attribute__((target("sve"))) svint8x2_t svreinterpret_s8(svbfloat16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_bf16_x2(op); -} - -__aio __attribute__((target("sve"))) svint8x2_t svreinterpret_s8(svfloat32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_f32_x2(op); -} - -__aio __attribute__((target("sve"))) svint8x2_t svreinterpret_s8(svfloat64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_f64_x2(op); -} - -__aio __attribute__((target("sve"))) svuint8x2_t svreinterpret_u8(svint8x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_s8_x2(op); -} - -__aio __attribute__((target("sve"))) svuint8x2_t svreinterpret_u8(svuint8x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_u8_x2(op); -} - -__aio __attribute__((target("sve"))) svuint8x2_t svreinterpret_u8(svint16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_s16_x2(op); -} - -__aio __attribute__((target("sve"))) svuint8x2_t svreinterpret_u8(svuint16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_u16_x2(op); -} - -__aio __attribute__((target("sve"))) svuint8x2_t svreinterpret_u8(svint32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_s32_x2(op); -} - -__aio __attribute__((target("sve"))) svuint8x2_t svreinterpret_u8(svuint32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_u32_x2(op); -} - -__aio __attribute__((target("sve"))) svuint8x2_t svreinterpret_u8(svint64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_s64_x2(op); -} - -__aio __attribute__((target("sve"))) svuint8x2_t svreinterpret_u8(svuint64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_u64_x2(op); -} - -__aio __attribute__((target("sve"))) svuint8x2_t svreinterpret_u8(svfloat16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_f16_x2(op); -} - -__aio __attribute__((target("sve"))) svuint8x2_t svreinterpret_u8(svbfloat16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_bf16_x2(op); -} - -__aio __attribute__((target("sve"))) svuint8x2_t svreinterpret_u8(svfloat32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_f32_x2(op); -} - -__aio __attribute__((target("sve"))) svuint8x2_t svreinterpret_u8(svfloat64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_f64_x2(op); -} - -__aio __attribute__((target("sve"))) svint16x2_t svreinterpret_s16(svint8x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_s8_x2(op); -} - -__aio __attribute__((target("sve"))) svint16x2_t svreinterpret_s16(svuint8x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_u8_x2(op); -} - -__aio __attribute__((target("sve"))) svint16x2_t svreinterpret_s16(svint16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_s16_x2(op); -} - -__aio __attribute__((target("sve"))) svint16x2_t svreinterpret_s16(svuint16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_u16_x2(op); -} - -__aio __attribute__((target("sve"))) svint16x2_t svreinterpret_s16(svint32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_s32_x2(op); -} - -__aio __attribute__((target("sve"))) svint16x2_t svreinterpret_s16(svuint32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_u32_x2(op); -} - -__aio __attribute__((target("sve"))) svint16x2_t svreinterpret_s16(svint64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_s64_x2(op); -} - -__aio __attribute__((target("sve"))) svint16x2_t svreinterpret_s16(svuint64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_u64_x2(op); -} - -__aio __attribute__((target("sve"))) svint16x2_t svreinterpret_s16(svfloat16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_f16_x2(op); -} - -__aio __attribute__((target("sve"))) svint16x2_t svreinterpret_s16(svbfloat16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_bf16_x2(op); -} - -__aio __attribute__((target("sve"))) svint16x2_t svreinterpret_s16(svfloat32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_f32_x2(op); -} - -__aio __attribute__((target("sve"))) svint16x2_t svreinterpret_s16(svfloat64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_f64_x2(op); -} - -__aio __attribute__((target("sve"))) svuint16x2_t svreinterpret_u16(svint8x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_s8_x2(op); -} - -__aio __attribute__((target("sve"))) svuint16x2_t svreinterpret_u16(svuint8x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_u8_x2(op); -} - -__aio __attribute__((target("sve"))) svuint16x2_t svreinterpret_u16(svint16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_s16_x2(op); -} - -__aio __attribute__((target("sve"))) svuint16x2_t svreinterpret_u16(svuint16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_u16_x2(op); -} - -__aio __attribute__((target("sve"))) svuint16x2_t svreinterpret_u16(svint32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_s32_x2(op); -} - -__aio __attribute__((target("sve"))) svuint16x2_t svreinterpret_u16(svuint32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_u32_x2(op); -} - -__aio __attribute__((target("sve"))) svuint16x2_t svreinterpret_u16(svint64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_s64_x2(op); -} - -__aio __attribute__((target("sve"))) svuint16x2_t svreinterpret_u16(svuint64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_u64_x2(op); -} - -__aio __attribute__((target("sve"))) svuint16x2_t svreinterpret_u16(svfloat16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_f16_x2(op); -} - -__aio __attribute__((target("sve"))) svuint16x2_t svreinterpret_u16(svbfloat16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_bf16_x2(op); -} - -__aio __attribute__((target("sve"))) svuint16x2_t svreinterpret_u16(svfloat32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_f32_x2(op); -} - -__aio __attribute__((target("sve"))) svuint16x2_t svreinterpret_u16(svfloat64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_f64_x2(op); -} - -__aio __attribute__((target("sve"))) svint32x2_t svreinterpret_s32(svint8x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_s8_x2(op); -} - -__aio __attribute__((target("sve"))) svint32x2_t svreinterpret_s32(svuint8x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_u8_x2(op); -} - -__aio __attribute__((target("sve"))) svint32x2_t svreinterpret_s32(svint16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_s16_x2(op); -} - -__aio __attribute__((target("sve"))) svint32x2_t svreinterpret_s32(svuint16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_u16_x2(op); -} - -__aio __attribute__((target("sve"))) svint32x2_t svreinterpret_s32(svint32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_s32_x2(op); -} - -__aio __attribute__((target("sve"))) svint32x2_t svreinterpret_s32(svuint32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_u32_x2(op); -} - -__aio __attribute__((target("sve"))) svint32x2_t svreinterpret_s32(svint64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_s64_x2(op); -} - -__aio __attribute__((target("sve"))) svint32x2_t svreinterpret_s32(svuint64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_u64_x2(op); -} - -__aio __attribute__((target("sve"))) svint32x2_t svreinterpret_s32(svfloat16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_f16_x2(op); -} - -__aio __attribute__((target("sve"))) svint32x2_t svreinterpret_s32(svbfloat16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_bf16_x2(op); -} - -__aio __attribute__((target("sve"))) svint32x2_t svreinterpret_s32(svfloat32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_f32_x2(op); -} - -__aio __attribute__((target("sve"))) svint32x2_t svreinterpret_s32(svfloat64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_f64_x2(op); -} - -__aio __attribute__((target("sve"))) svuint32x2_t svreinterpret_u32(svint8x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_s8_x2(op); -} - -__aio __attribute__((target("sve"))) svuint32x2_t svreinterpret_u32(svuint8x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_u8_x2(op); -} - -__aio __attribute__((target("sve"))) svuint32x2_t svreinterpret_u32(svint16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_s16_x2(op); -} - -__aio __attribute__((target("sve"))) svuint32x2_t svreinterpret_u32(svuint16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_u16_x2(op); -} - -__aio __attribute__((target("sve"))) svuint32x2_t svreinterpret_u32(svint32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_s32_x2(op); -} - -__aio __attribute__((target("sve"))) svuint32x2_t svreinterpret_u32(svuint32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_u32_x2(op); -} - -__aio __attribute__((target("sve"))) svuint32x2_t svreinterpret_u32(svint64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_s64_x2(op); -} - -__aio __attribute__((target("sve"))) svuint32x2_t svreinterpret_u32(svuint64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_u64_x2(op); -} - -__aio __attribute__((target("sve"))) svuint32x2_t svreinterpret_u32(svfloat16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_f16_x2(op); -} - -__aio __attribute__((target("sve"))) svuint32x2_t svreinterpret_u32(svbfloat16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_bf16_x2(op); -} - -__aio __attribute__((target("sve"))) svuint32x2_t svreinterpret_u32(svfloat32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_f32_x2(op); -} - -__aio __attribute__((target("sve"))) svuint32x2_t svreinterpret_u32(svfloat64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_f64_x2(op); -} - -__aio __attribute__((target("sve"))) svint64x2_t svreinterpret_s64(svint8x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_s8_x2(op); -} - -__aio __attribute__((target("sve"))) svint64x2_t svreinterpret_s64(svuint8x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_u8_x2(op); -} - -__aio __attribute__((target("sve"))) svint64x2_t svreinterpret_s64(svint16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_s16_x2(op); -} - -__aio __attribute__((target("sve"))) svint64x2_t svreinterpret_s64(svuint16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_u16_x2(op); -} - -__aio __attribute__((target("sve"))) svint64x2_t svreinterpret_s64(svint32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_s32_x2(op); -} - -__aio __attribute__((target("sve"))) svint64x2_t svreinterpret_s64(svuint32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_u32_x2(op); -} - -__aio __attribute__((target("sve"))) svint64x2_t svreinterpret_s64(svint64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_s64_x2(op); -} - -__aio __attribute__((target("sve"))) svint64x2_t svreinterpret_s64(svuint64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_u64_x2(op); -} - -__aio __attribute__((target("sve"))) svint64x2_t svreinterpret_s64(svfloat16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_f16_x2(op); -} - -__aio __attribute__((target("sve"))) svint64x2_t svreinterpret_s64(svbfloat16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_bf16_x2(op); -} - -__aio __attribute__((target("sve"))) svint64x2_t svreinterpret_s64(svfloat32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_f32_x2(op); -} - -__aio __attribute__((target("sve"))) svint64x2_t svreinterpret_s64(svfloat64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_f64_x2(op); -} - -__aio __attribute__((target("sve"))) svuint64x2_t svreinterpret_u64(svint8x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_s8_x2(op); -} - -__aio __attribute__((target("sve"))) svuint64x2_t svreinterpret_u64(svuint8x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_u8_x2(op); -} - -__aio __attribute__((target("sve"))) svuint64x2_t svreinterpret_u64(svint16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_s16_x2(op); -} - -__aio __attribute__((target("sve"))) svuint64x2_t svreinterpret_u64(svuint16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_u16_x2(op); -} - -__aio __attribute__((target("sve"))) svuint64x2_t svreinterpret_u64(svint32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_s32_x2(op); -} - -__aio __attribute__((target("sve"))) svuint64x2_t svreinterpret_u64(svuint32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_u32_x2(op); -} - -__aio __attribute__((target("sve"))) svuint64x2_t svreinterpret_u64(svint64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_s64_x2(op); -} - -__aio __attribute__((target("sve"))) svuint64x2_t svreinterpret_u64(svuint64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_u64_x2(op); -} - -__aio __attribute__((target("sve"))) svuint64x2_t svreinterpret_u64(svfloat16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_f16_x2(op); -} - -__aio __attribute__((target("sve"))) svuint64x2_t svreinterpret_u64(svbfloat16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_bf16_x2(op); -} - -__aio __attribute__((target("sve"))) svuint64x2_t svreinterpret_u64(svfloat32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_f32_x2(op); -} - -__aio __attribute__((target("sve"))) svuint64x2_t svreinterpret_u64(svfloat64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_f64_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat16x2_t svreinterpret_f16(svint8x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_s8_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat16x2_t svreinterpret_f16(svuint8x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_u8_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat16x2_t svreinterpret_f16(svint16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_s16_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat16x2_t svreinterpret_f16(svuint16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_u16_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat16x2_t svreinterpret_f16(svint32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_s32_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat16x2_t svreinterpret_f16(svuint32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_u32_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat16x2_t svreinterpret_f16(svint64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_s64_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat16x2_t svreinterpret_f16(svuint64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_u64_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat16x2_t svreinterpret_f16(svfloat16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_f16_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat16x2_t svreinterpret_f16(svbfloat16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_bf16_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat16x2_t svreinterpret_f16(svfloat32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_f32_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat16x2_t svreinterpret_f16(svfloat64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_f64_x2(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x2_t svreinterpret_bf16(svint8x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_s8_x2(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x2_t svreinterpret_bf16(svuint8x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_u8_x2(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x2_t svreinterpret_bf16(svint16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_s16_x2(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x2_t svreinterpret_bf16(svuint16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_u16_x2(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x2_t svreinterpret_bf16(svint32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_s32_x2(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x2_t svreinterpret_bf16(svuint32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_u32_x2(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x2_t svreinterpret_bf16(svint64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_s64_x2(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x2_t svreinterpret_bf16(svuint64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_u64_x2(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x2_t svreinterpret_bf16(svfloat16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_f16_x2(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x2_t svreinterpret_bf16(svbfloat16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_bf16_x2(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x2_t svreinterpret_bf16(svfloat32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_f32_x2(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x2_t svreinterpret_bf16(svfloat64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_f64_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat32x2_t svreinterpret_f32(svint8x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_s8_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat32x2_t svreinterpret_f32(svuint8x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_u8_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat32x2_t svreinterpret_f32(svint16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_s16_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat32x2_t svreinterpret_f32(svuint16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_u16_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat32x2_t svreinterpret_f32(svint32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_s32_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat32x2_t svreinterpret_f32(svuint32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_u32_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat32x2_t svreinterpret_f32(svint64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_s64_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat32x2_t svreinterpret_f32(svuint64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_u64_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat32x2_t svreinterpret_f32(svfloat16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_f16_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat32x2_t svreinterpret_f32(svbfloat16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_bf16_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat32x2_t svreinterpret_f32(svfloat32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_f32_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat32x2_t svreinterpret_f32(svfloat64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_f64_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat64x2_t svreinterpret_f64(svint8x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_s8_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat64x2_t svreinterpret_f64(svuint8x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_u8_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat64x2_t svreinterpret_f64(svint16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_s16_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat64x2_t svreinterpret_f64(svuint16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_u16_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat64x2_t svreinterpret_f64(svint32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_s32_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat64x2_t svreinterpret_f64(svuint32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_u32_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat64x2_t svreinterpret_f64(svint64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_s64_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat64x2_t svreinterpret_f64(svuint64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_u64_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat64x2_t svreinterpret_f64(svfloat16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_f16_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat64x2_t svreinterpret_f64(svbfloat16x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_bf16_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat64x2_t svreinterpret_f64(svfloat32x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_f32_x2(op); -} - -__aio __attribute__((target("sve"))) svfloat64x2_t svreinterpret_f64(svfloat64x2_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_f64_x2(op); -} - -#define svreinterpret_s8_s8_x3(...) __builtin_sve_reinterpret_s8_s8_x3(__VA_ARGS__) -#define svreinterpret_s8_u8_x3(...) __builtin_sve_reinterpret_s8_u8_x3(__VA_ARGS__) -#define svreinterpret_s8_s16_x3(...) __builtin_sve_reinterpret_s8_s16_x3(__VA_ARGS__) -#define svreinterpret_s8_u16_x3(...) __builtin_sve_reinterpret_s8_u16_x3(__VA_ARGS__) -#define svreinterpret_s8_s32_x3(...) __builtin_sve_reinterpret_s8_s32_x3(__VA_ARGS__) -#define svreinterpret_s8_u32_x3(...) __builtin_sve_reinterpret_s8_u32_x3(__VA_ARGS__) -#define svreinterpret_s8_s64_x3(...) __builtin_sve_reinterpret_s8_s64_x3(__VA_ARGS__) -#define svreinterpret_s8_u64_x3(...) __builtin_sve_reinterpret_s8_u64_x3(__VA_ARGS__) -#define svreinterpret_s8_f16_x3(...) __builtin_sve_reinterpret_s8_f16_x3(__VA_ARGS__) -#define svreinterpret_s8_bf16_x3(...) __builtin_sve_reinterpret_s8_bf16_x3(__VA_ARGS__) -#define svreinterpret_s8_f32_x3(...) __builtin_sve_reinterpret_s8_f32_x3(__VA_ARGS__) -#define svreinterpret_s8_f64_x3(...) __builtin_sve_reinterpret_s8_f64_x3(__VA_ARGS__) -#define svreinterpret_u8_s8_x3(...) __builtin_sve_reinterpret_u8_s8_x3(__VA_ARGS__) -#define svreinterpret_u8_u8_x3(...) __builtin_sve_reinterpret_u8_u8_x3(__VA_ARGS__) -#define svreinterpret_u8_s16_x3(...) __builtin_sve_reinterpret_u8_s16_x3(__VA_ARGS__) -#define svreinterpret_u8_u16_x3(...) __builtin_sve_reinterpret_u8_u16_x3(__VA_ARGS__) -#define svreinterpret_u8_s32_x3(...) __builtin_sve_reinterpret_u8_s32_x3(__VA_ARGS__) -#define svreinterpret_u8_u32_x3(...) __builtin_sve_reinterpret_u8_u32_x3(__VA_ARGS__) -#define svreinterpret_u8_s64_x3(...) __builtin_sve_reinterpret_u8_s64_x3(__VA_ARGS__) -#define svreinterpret_u8_u64_x3(...) __builtin_sve_reinterpret_u8_u64_x3(__VA_ARGS__) -#define svreinterpret_u8_f16_x3(...) __builtin_sve_reinterpret_u8_f16_x3(__VA_ARGS__) -#define svreinterpret_u8_bf16_x3(...) __builtin_sve_reinterpret_u8_bf16_x3(__VA_ARGS__) -#define svreinterpret_u8_f32_x3(...) __builtin_sve_reinterpret_u8_f32_x3(__VA_ARGS__) -#define svreinterpret_u8_f64_x3(...) __builtin_sve_reinterpret_u8_f64_x3(__VA_ARGS__) -#define svreinterpret_s16_s8_x3(...) __builtin_sve_reinterpret_s16_s8_x3(__VA_ARGS__) -#define svreinterpret_s16_u8_x3(...) __builtin_sve_reinterpret_s16_u8_x3(__VA_ARGS__) -#define svreinterpret_s16_s16_x3(...) __builtin_sve_reinterpret_s16_s16_x3(__VA_ARGS__) -#define svreinterpret_s16_u16_x3(...) __builtin_sve_reinterpret_s16_u16_x3(__VA_ARGS__) -#define svreinterpret_s16_s32_x3(...) __builtin_sve_reinterpret_s16_s32_x3(__VA_ARGS__) -#define svreinterpret_s16_u32_x3(...) __builtin_sve_reinterpret_s16_u32_x3(__VA_ARGS__) -#define svreinterpret_s16_s64_x3(...) __builtin_sve_reinterpret_s16_s64_x3(__VA_ARGS__) -#define svreinterpret_s16_u64_x3(...) __builtin_sve_reinterpret_s16_u64_x3(__VA_ARGS__) -#define svreinterpret_s16_f16_x3(...) __builtin_sve_reinterpret_s16_f16_x3(__VA_ARGS__) -#define svreinterpret_s16_bf16_x3(...) __builtin_sve_reinterpret_s16_bf16_x3(__VA_ARGS__) -#define svreinterpret_s16_f32_x3(...) __builtin_sve_reinterpret_s16_f32_x3(__VA_ARGS__) -#define svreinterpret_s16_f64_x3(...) __builtin_sve_reinterpret_s16_f64_x3(__VA_ARGS__) -#define svreinterpret_u16_s8_x3(...) __builtin_sve_reinterpret_u16_s8_x3(__VA_ARGS__) -#define svreinterpret_u16_u8_x3(...) __builtin_sve_reinterpret_u16_u8_x3(__VA_ARGS__) -#define svreinterpret_u16_s16_x3(...) __builtin_sve_reinterpret_u16_s16_x3(__VA_ARGS__) -#define svreinterpret_u16_u16_x3(...) __builtin_sve_reinterpret_u16_u16_x3(__VA_ARGS__) -#define svreinterpret_u16_s32_x3(...) __builtin_sve_reinterpret_u16_s32_x3(__VA_ARGS__) -#define svreinterpret_u16_u32_x3(...) __builtin_sve_reinterpret_u16_u32_x3(__VA_ARGS__) -#define svreinterpret_u16_s64_x3(...) __builtin_sve_reinterpret_u16_s64_x3(__VA_ARGS__) -#define svreinterpret_u16_u64_x3(...) __builtin_sve_reinterpret_u16_u64_x3(__VA_ARGS__) -#define svreinterpret_u16_f16_x3(...) __builtin_sve_reinterpret_u16_f16_x3(__VA_ARGS__) -#define svreinterpret_u16_bf16_x3(...) __builtin_sve_reinterpret_u16_bf16_x3(__VA_ARGS__) -#define svreinterpret_u16_f32_x3(...) __builtin_sve_reinterpret_u16_f32_x3(__VA_ARGS__) -#define svreinterpret_u16_f64_x3(...) __builtin_sve_reinterpret_u16_f64_x3(__VA_ARGS__) -#define svreinterpret_s32_s8_x3(...) __builtin_sve_reinterpret_s32_s8_x3(__VA_ARGS__) -#define svreinterpret_s32_u8_x3(...) __builtin_sve_reinterpret_s32_u8_x3(__VA_ARGS__) -#define svreinterpret_s32_s16_x3(...) __builtin_sve_reinterpret_s32_s16_x3(__VA_ARGS__) -#define svreinterpret_s32_u16_x3(...) __builtin_sve_reinterpret_s32_u16_x3(__VA_ARGS__) -#define svreinterpret_s32_s32_x3(...) __builtin_sve_reinterpret_s32_s32_x3(__VA_ARGS__) -#define svreinterpret_s32_u32_x3(...) __builtin_sve_reinterpret_s32_u32_x3(__VA_ARGS__) -#define svreinterpret_s32_s64_x3(...) __builtin_sve_reinterpret_s32_s64_x3(__VA_ARGS__) -#define svreinterpret_s32_u64_x3(...) __builtin_sve_reinterpret_s32_u64_x3(__VA_ARGS__) -#define svreinterpret_s32_f16_x3(...) __builtin_sve_reinterpret_s32_f16_x3(__VA_ARGS__) -#define svreinterpret_s32_bf16_x3(...) __builtin_sve_reinterpret_s32_bf16_x3(__VA_ARGS__) -#define svreinterpret_s32_f32_x3(...) __builtin_sve_reinterpret_s32_f32_x3(__VA_ARGS__) -#define svreinterpret_s32_f64_x3(...) __builtin_sve_reinterpret_s32_f64_x3(__VA_ARGS__) -#define svreinterpret_u32_s8_x3(...) __builtin_sve_reinterpret_u32_s8_x3(__VA_ARGS__) -#define svreinterpret_u32_u8_x3(...) __builtin_sve_reinterpret_u32_u8_x3(__VA_ARGS__) -#define svreinterpret_u32_s16_x3(...) __builtin_sve_reinterpret_u32_s16_x3(__VA_ARGS__) -#define svreinterpret_u32_u16_x3(...) __builtin_sve_reinterpret_u32_u16_x3(__VA_ARGS__) -#define svreinterpret_u32_s32_x3(...) __builtin_sve_reinterpret_u32_s32_x3(__VA_ARGS__) -#define svreinterpret_u32_u32_x3(...) __builtin_sve_reinterpret_u32_u32_x3(__VA_ARGS__) -#define svreinterpret_u32_s64_x3(...) __builtin_sve_reinterpret_u32_s64_x3(__VA_ARGS__) -#define svreinterpret_u32_u64_x3(...) __builtin_sve_reinterpret_u32_u64_x3(__VA_ARGS__) -#define svreinterpret_u32_f16_x3(...) __builtin_sve_reinterpret_u32_f16_x3(__VA_ARGS__) -#define svreinterpret_u32_bf16_x3(...) __builtin_sve_reinterpret_u32_bf16_x3(__VA_ARGS__) -#define svreinterpret_u32_f32_x3(...) __builtin_sve_reinterpret_u32_f32_x3(__VA_ARGS__) -#define svreinterpret_u32_f64_x3(...) __builtin_sve_reinterpret_u32_f64_x3(__VA_ARGS__) -#define svreinterpret_s64_s8_x3(...) __builtin_sve_reinterpret_s64_s8_x3(__VA_ARGS__) -#define svreinterpret_s64_u8_x3(...) __builtin_sve_reinterpret_s64_u8_x3(__VA_ARGS__) -#define svreinterpret_s64_s16_x3(...) __builtin_sve_reinterpret_s64_s16_x3(__VA_ARGS__) -#define svreinterpret_s64_u16_x3(...) __builtin_sve_reinterpret_s64_u16_x3(__VA_ARGS__) -#define svreinterpret_s64_s32_x3(...) __builtin_sve_reinterpret_s64_s32_x3(__VA_ARGS__) -#define svreinterpret_s64_u32_x3(...) __builtin_sve_reinterpret_s64_u32_x3(__VA_ARGS__) -#define svreinterpret_s64_s64_x3(...) __builtin_sve_reinterpret_s64_s64_x3(__VA_ARGS__) -#define svreinterpret_s64_u64_x3(...) __builtin_sve_reinterpret_s64_u64_x3(__VA_ARGS__) -#define svreinterpret_s64_f16_x3(...) __builtin_sve_reinterpret_s64_f16_x3(__VA_ARGS__) -#define svreinterpret_s64_bf16_x3(...) __builtin_sve_reinterpret_s64_bf16_x3(__VA_ARGS__) -#define svreinterpret_s64_f32_x3(...) __builtin_sve_reinterpret_s64_f32_x3(__VA_ARGS__) -#define svreinterpret_s64_f64_x3(...) __builtin_sve_reinterpret_s64_f64_x3(__VA_ARGS__) -#define svreinterpret_u64_s8_x3(...) __builtin_sve_reinterpret_u64_s8_x3(__VA_ARGS__) -#define svreinterpret_u64_u8_x3(...) __builtin_sve_reinterpret_u64_u8_x3(__VA_ARGS__) -#define svreinterpret_u64_s16_x3(...) __builtin_sve_reinterpret_u64_s16_x3(__VA_ARGS__) -#define svreinterpret_u64_u16_x3(...) __builtin_sve_reinterpret_u64_u16_x3(__VA_ARGS__) -#define svreinterpret_u64_s32_x3(...) __builtin_sve_reinterpret_u64_s32_x3(__VA_ARGS__) -#define svreinterpret_u64_u32_x3(...) __builtin_sve_reinterpret_u64_u32_x3(__VA_ARGS__) -#define svreinterpret_u64_s64_x3(...) __builtin_sve_reinterpret_u64_s64_x3(__VA_ARGS__) -#define svreinterpret_u64_u64_x3(...) __builtin_sve_reinterpret_u64_u64_x3(__VA_ARGS__) -#define svreinterpret_u64_f16_x3(...) __builtin_sve_reinterpret_u64_f16_x3(__VA_ARGS__) -#define svreinterpret_u64_bf16_x3(...) __builtin_sve_reinterpret_u64_bf16_x3(__VA_ARGS__) -#define svreinterpret_u64_f32_x3(...) __builtin_sve_reinterpret_u64_f32_x3(__VA_ARGS__) -#define svreinterpret_u64_f64_x3(...) __builtin_sve_reinterpret_u64_f64_x3(__VA_ARGS__) -#define svreinterpret_f16_s8_x3(...) __builtin_sve_reinterpret_f16_s8_x3(__VA_ARGS__) -#define svreinterpret_f16_u8_x3(...) __builtin_sve_reinterpret_f16_u8_x3(__VA_ARGS__) -#define svreinterpret_f16_s16_x3(...) __builtin_sve_reinterpret_f16_s16_x3(__VA_ARGS__) -#define svreinterpret_f16_u16_x3(...) __builtin_sve_reinterpret_f16_u16_x3(__VA_ARGS__) -#define svreinterpret_f16_s32_x3(...) __builtin_sve_reinterpret_f16_s32_x3(__VA_ARGS__) -#define svreinterpret_f16_u32_x3(...) __builtin_sve_reinterpret_f16_u32_x3(__VA_ARGS__) -#define svreinterpret_f16_s64_x3(...) __builtin_sve_reinterpret_f16_s64_x3(__VA_ARGS__) -#define svreinterpret_f16_u64_x3(...) __builtin_sve_reinterpret_f16_u64_x3(__VA_ARGS__) -#define svreinterpret_f16_f16_x3(...) __builtin_sve_reinterpret_f16_f16_x3(__VA_ARGS__) -#define svreinterpret_f16_bf16_x3(...) __builtin_sve_reinterpret_f16_bf16_x3(__VA_ARGS__) -#define svreinterpret_f16_f32_x3(...) __builtin_sve_reinterpret_f16_f32_x3(__VA_ARGS__) -#define svreinterpret_f16_f64_x3(...) __builtin_sve_reinterpret_f16_f64_x3(__VA_ARGS__) -#define svreinterpret_bf16_s8_x3(...) __builtin_sve_reinterpret_bf16_s8_x3(__VA_ARGS__) -#define svreinterpret_bf16_u8_x3(...) __builtin_sve_reinterpret_bf16_u8_x3(__VA_ARGS__) -#define svreinterpret_bf16_s16_x3(...) __builtin_sve_reinterpret_bf16_s16_x3(__VA_ARGS__) -#define svreinterpret_bf16_u16_x3(...) __builtin_sve_reinterpret_bf16_u16_x3(__VA_ARGS__) -#define svreinterpret_bf16_s32_x3(...) __builtin_sve_reinterpret_bf16_s32_x3(__VA_ARGS__) -#define svreinterpret_bf16_u32_x3(...) __builtin_sve_reinterpret_bf16_u32_x3(__VA_ARGS__) -#define svreinterpret_bf16_s64_x3(...) __builtin_sve_reinterpret_bf16_s64_x3(__VA_ARGS__) -#define svreinterpret_bf16_u64_x3(...) __builtin_sve_reinterpret_bf16_u64_x3(__VA_ARGS__) -#define svreinterpret_bf16_f16_x3(...) __builtin_sve_reinterpret_bf16_f16_x3(__VA_ARGS__) -#define svreinterpret_bf16_bf16_x3(...) __builtin_sve_reinterpret_bf16_bf16_x3(__VA_ARGS__) -#define svreinterpret_bf16_f32_x3(...) __builtin_sve_reinterpret_bf16_f32_x3(__VA_ARGS__) -#define svreinterpret_bf16_f64_x3(...) __builtin_sve_reinterpret_bf16_f64_x3(__VA_ARGS__) -#define svreinterpret_f32_s8_x3(...) __builtin_sve_reinterpret_f32_s8_x3(__VA_ARGS__) -#define svreinterpret_f32_u8_x3(...) __builtin_sve_reinterpret_f32_u8_x3(__VA_ARGS__) -#define svreinterpret_f32_s16_x3(...) __builtin_sve_reinterpret_f32_s16_x3(__VA_ARGS__) -#define svreinterpret_f32_u16_x3(...) __builtin_sve_reinterpret_f32_u16_x3(__VA_ARGS__) -#define svreinterpret_f32_s32_x3(...) __builtin_sve_reinterpret_f32_s32_x3(__VA_ARGS__) -#define svreinterpret_f32_u32_x3(...) __builtin_sve_reinterpret_f32_u32_x3(__VA_ARGS__) -#define svreinterpret_f32_s64_x3(...) __builtin_sve_reinterpret_f32_s64_x3(__VA_ARGS__) -#define svreinterpret_f32_u64_x3(...) __builtin_sve_reinterpret_f32_u64_x3(__VA_ARGS__) -#define svreinterpret_f32_f16_x3(...) __builtin_sve_reinterpret_f32_f16_x3(__VA_ARGS__) -#define svreinterpret_f32_bf16_x3(...) __builtin_sve_reinterpret_f32_bf16_x3(__VA_ARGS__) -#define svreinterpret_f32_f32_x3(...) __builtin_sve_reinterpret_f32_f32_x3(__VA_ARGS__) -#define svreinterpret_f32_f64_x3(...) __builtin_sve_reinterpret_f32_f64_x3(__VA_ARGS__) -#define svreinterpret_f64_s8_x3(...) __builtin_sve_reinterpret_f64_s8_x3(__VA_ARGS__) -#define svreinterpret_f64_u8_x3(...) __builtin_sve_reinterpret_f64_u8_x3(__VA_ARGS__) -#define svreinterpret_f64_s16_x3(...) __builtin_sve_reinterpret_f64_s16_x3(__VA_ARGS__) -#define svreinterpret_f64_u16_x3(...) __builtin_sve_reinterpret_f64_u16_x3(__VA_ARGS__) -#define svreinterpret_f64_s32_x3(...) __builtin_sve_reinterpret_f64_s32_x3(__VA_ARGS__) -#define svreinterpret_f64_u32_x3(...) __builtin_sve_reinterpret_f64_u32_x3(__VA_ARGS__) -#define svreinterpret_f64_s64_x3(...) __builtin_sve_reinterpret_f64_s64_x3(__VA_ARGS__) -#define svreinterpret_f64_u64_x3(...) __builtin_sve_reinterpret_f64_u64_x3(__VA_ARGS__) -#define svreinterpret_f64_f16_x3(...) __builtin_sve_reinterpret_f64_f16_x3(__VA_ARGS__) -#define svreinterpret_f64_bf16_x3(...) __builtin_sve_reinterpret_f64_bf16_x3(__VA_ARGS__) -#define svreinterpret_f64_f32_x3(...) __builtin_sve_reinterpret_f64_f32_x3(__VA_ARGS__) -#define svreinterpret_f64_f64_x3(...) __builtin_sve_reinterpret_f64_f64_x3(__VA_ARGS__) -__aio __attribute__((target("sve"))) svint8x3_t svreinterpret_s8(svint8x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_s8_x3(op); -} - -__aio __attribute__((target("sve"))) svint8x3_t svreinterpret_s8(svuint8x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_u8_x3(op); -} - -__aio __attribute__((target("sve"))) svint8x3_t svreinterpret_s8(svint16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_s16_x3(op); -} - -__aio __attribute__((target("sve"))) svint8x3_t svreinterpret_s8(svuint16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_u16_x3(op); -} - -__aio __attribute__((target("sve"))) svint8x3_t svreinterpret_s8(svint32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_s32_x3(op); -} - -__aio __attribute__((target("sve"))) svint8x3_t svreinterpret_s8(svuint32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_u32_x3(op); -} - -__aio __attribute__((target("sve"))) svint8x3_t svreinterpret_s8(svint64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_s64_x3(op); -} - -__aio __attribute__((target("sve"))) svint8x3_t svreinterpret_s8(svuint64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_u64_x3(op); -} - -__aio __attribute__((target("sve"))) svint8x3_t svreinterpret_s8(svfloat16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_f16_x3(op); -} - -__aio __attribute__((target("sve"))) svint8x3_t svreinterpret_s8(svbfloat16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_bf16_x3(op); -} - -__aio __attribute__((target("sve"))) svint8x3_t svreinterpret_s8(svfloat32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_f32_x3(op); -} - -__aio __attribute__((target("sve"))) svint8x3_t svreinterpret_s8(svfloat64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_f64_x3(op); -} - -__aio __attribute__((target("sve"))) svuint8x3_t svreinterpret_u8(svint8x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_s8_x3(op); -} - -__aio __attribute__((target("sve"))) svuint8x3_t svreinterpret_u8(svuint8x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_u8_x3(op); -} - -__aio __attribute__((target("sve"))) svuint8x3_t svreinterpret_u8(svint16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_s16_x3(op); -} - -__aio __attribute__((target("sve"))) svuint8x3_t svreinterpret_u8(svuint16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_u16_x3(op); -} - -__aio __attribute__((target("sve"))) svuint8x3_t svreinterpret_u8(svint32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_s32_x3(op); -} - -__aio __attribute__((target("sve"))) svuint8x3_t svreinterpret_u8(svuint32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_u32_x3(op); -} - -__aio __attribute__((target("sve"))) svuint8x3_t svreinterpret_u8(svint64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_s64_x3(op); -} - -__aio __attribute__((target("sve"))) svuint8x3_t svreinterpret_u8(svuint64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_u64_x3(op); -} - -__aio __attribute__((target("sve"))) svuint8x3_t svreinterpret_u8(svfloat16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_f16_x3(op); -} - -__aio __attribute__((target("sve"))) svuint8x3_t svreinterpret_u8(svbfloat16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_bf16_x3(op); -} - -__aio __attribute__((target("sve"))) svuint8x3_t svreinterpret_u8(svfloat32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_f32_x3(op); -} - -__aio __attribute__((target("sve"))) svuint8x3_t svreinterpret_u8(svfloat64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_f64_x3(op); -} - -__aio __attribute__((target("sve"))) svint16x3_t svreinterpret_s16(svint8x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_s8_x3(op); -} - -__aio __attribute__((target("sve"))) svint16x3_t svreinterpret_s16(svuint8x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_u8_x3(op); -} - -__aio __attribute__((target("sve"))) svint16x3_t svreinterpret_s16(svint16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_s16_x3(op); -} - -__aio __attribute__((target("sve"))) svint16x3_t svreinterpret_s16(svuint16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_u16_x3(op); -} - -__aio __attribute__((target("sve"))) svint16x3_t svreinterpret_s16(svint32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_s32_x3(op); -} - -__aio __attribute__((target("sve"))) svint16x3_t svreinterpret_s16(svuint32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_u32_x3(op); -} - -__aio __attribute__((target("sve"))) svint16x3_t svreinterpret_s16(svint64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_s64_x3(op); -} - -__aio __attribute__((target("sve"))) svint16x3_t svreinterpret_s16(svuint64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_u64_x3(op); -} - -__aio __attribute__((target("sve"))) svint16x3_t svreinterpret_s16(svfloat16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_f16_x3(op); -} - -__aio __attribute__((target("sve"))) svint16x3_t svreinterpret_s16(svbfloat16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_bf16_x3(op); -} - -__aio __attribute__((target("sve"))) svint16x3_t svreinterpret_s16(svfloat32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_f32_x3(op); -} - -__aio __attribute__((target("sve"))) svint16x3_t svreinterpret_s16(svfloat64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_f64_x3(op); -} - -__aio __attribute__((target("sve"))) svuint16x3_t svreinterpret_u16(svint8x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_s8_x3(op); -} - -__aio __attribute__((target("sve"))) svuint16x3_t svreinterpret_u16(svuint8x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_u8_x3(op); -} - -__aio __attribute__((target("sve"))) svuint16x3_t svreinterpret_u16(svint16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_s16_x3(op); -} - -__aio __attribute__((target("sve"))) svuint16x3_t svreinterpret_u16(svuint16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_u16_x3(op); -} - -__aio __attribute__((target("sve"))) svuint16x3_t svreinterpret_u16(svint32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_s32_x3(op); -} - -__aio __attribute__((target("sve"))) svuint16x3_t svreinterpret_u16(svuint32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_u32_x3(op); -} - -__aio __attribute__((target("sve"))) svuint16x3_t svreinterpret_u16(svint64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_s64_x3(op); -} - -__aio __attribute__((target("sve"))) svuint16x3_t svreinterpret_u16(svuint64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_u64_x3(op); -} - -__aio __attribute__((target("sve"))) svuint16x3_t svreinterpret_u16(svfloat16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_f16_x3(op); -} - -__aio __attribute__((target("sve"))) svuint16x3_t svreinterpret_u16(svbfloat16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_bf16_x3(op); -} - -__aio __attribute__((target("sve"))) svuint16x3_t svreinterpret_u16(svfloat32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_f32_x3(op); -} - -__aio __attribute__((target("sve"))) svuint16x3_t svreinterpret_u16(svfloat64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_f64_x3(op); -} - -__aio __attribute__((target("sve"))) svint32x3_t svreinterpret_s32(svint8x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_s8_x3(op); -} - -__aio __attribute__((target("sve"))) svint32x3_t svreinterpret_s32(svuint8x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_u8_x3(op); -} - -__aio __attribute__((target("sve"))) svint32x3_t svreinterpret_s32(svint16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_s16_x3(op); -} - -__aio __attribute__((target("sve"))) svint32x3_t svreinterpret_s32(svuint16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_u16_x3(op); -} - -__aio __attribute__((target("sve"))) svint32x3_t svreinterpret_s32(svint32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_s32_x3(op); -} - -__aio __attribute__((target("sve"))) svint32x3_t svreinterpret_s32(svuint32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_u32_x3(op); -} - -__aio __attribute__((target("sve"))) svint32x3_t svreinterpret_s32(svint64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_s64_x3(op); -} - -__aio __attribute__((target("sve"))) svint32x3_t svreinterpret_s32(svuint64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_u64_x3(op); -} - -__aio __attribute__((target("sve"))) svint32x3_t svreinterpret_s32(svfloat16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_f16_x3(op); -} - -__aio __attribute__((target("sve"))) svint32x3_t svreinterpret_s32(svbfloat16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_bf16_x3(op); -} - -__aio __attribute__((target("sve"))) svint32x3_t svreinterpret_s32(svfloat32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_f32_x3(op); -} - -__aio __attribute__((target("sve"))) svint32x3_t svreinterpret_s32(svfloat64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_f64_x3(op); -} - -__aio __attribute__((target("sve"))) svuint32x3_t svreinterpret_u32(svint8x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_s8_x3(op); -} - -__aio __attribute__((target("sve"))) svuint32x3_t svreinterpret_u32(svuint8x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_u8_x3(op); -} - -__aio __attribute__((target("sve"))) svuint32x3_t svreinterpret_u32(svint16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_s16_x3(op); -} - -__aio __attribute__((target("sve"))) svuint32x3_t svreinterpret_u32(svuint16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_u16_x3(op); -} - -__aio __attribute__((target("sve"))) svuint32x3_t svreinterpret_u32(svint32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_s32_x3(op); -} - -__aio __attribute__((target("sve"))) svuint32x3_t svreinterpret_u32(svuint32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_u32_x3(op); -} - -__aio __attribute__((target("sve"))) svuint32x3_t svreinterpret_u32(svint64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_s64_x3(op); -} - -__aio __attribute__((target("sve"))) svuint32x3_t svreinterpret_u32(svuint64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_u64_x3(op); -} - -__aio __attribute__((target("sve"))) svuint32x3_t svreinterpret_u32(svfloat16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_f16_x3(op); -} - -__aio __attribute__((target("sve"))) svuint32x3_t svreinterpret_u32(svbfloat16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_bf16_x3(op); -} - -__aio __attribute__((target("sve"))) svuint32x3_t svreinterpret_u32(svfloat32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_f32_x3(op); -} - -__aio __attribute__((target("sve"))) svuint32x3_t svreinterpret_u32(svfloat64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_f64_x3(op); -} - -__aio __attribute__((target("sve"))) svint64x3_t svreinterpret_s64(svint8x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_s8_x3(op); -} - -__aio __attribute__((target("sve"))) svint64x3_t svreinterpret_s64(svuint8x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_u8_x3(op); -} - -__aio __attribute__((target("sve"))) svint64x3_t svreinterpret_s64(svint16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_s16_x3(op); -} - -__aio __attribute__((target("sve"))) svint64x3_t svreinterpret_s64(svuint16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_u16_x3(op); -} - -__aio __attribute__((target("sve"))) svint64x3_t svreinterpret_s64(svint32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_s32_x3(op); -} - -__aio __attribute__((target("sve"))) svint64x3_t svreinterpret_s64(svuint32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_u32_x3(op); -} - -__aio __attribute__((target("sve"))) svint64x3_t svreinterpret_s64(svint64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_s64_x3(op); -} - -__aio __attribute__((target("sve"))) svint64x3_t svreinterpret_s64(svuint64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_u64_x3(op); -} - -__aio __attribute__((target("sve"))) svint64x3_t svreinterpret_s64(svfloat16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_f16_x3(op); -} - -__aio __attribute__((target("sve"))) svint64x3_t svreinterpret_s64(svbfloat16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_bf16_x3(op); -} - -__aio __attribute__((target("sve"))) svint64x3_t svreinterpret_s64(svfloat32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_f32_x3(op); -} - -__aio __attribute__((target("sve"))) svint64x3_t svreinterpret_s64(svfloat64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_f64_x3(op); -} - -__aio __attribute__((target("sve"))) svuint64x3_t svreinterpret_u64(svint8x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_s8_x3(op); -} - -__aio __attribute__((target("sve"))) svuint64x3_t svreinterpret_u64(svuint8x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_u8_x3(op); -} - -__aio __attribute__((target("sve"))) svuint64x3_t svreinterpret_u64(svint16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_s16_x3(op); -} - -__aio __attribute__((target("sve"))) svuint64x3_t svreinterpret_u64(svuint16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_u16_x3(op); -} - -__aio __attribute__((target("sve"))) svuint64x3_t svreinterpret_u64(svint32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_s32_x3(op); -} - -__aio __attribute__((target("sve"))) svuint64x3_t svreinterpret_u64(svuint32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_u32_x3(op); -} - -__aio __attribute__((target("sve"))) svuint64x3_t svreinterpret_u64(svint64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_s64_x3(op); -} - -__aio __attribute__((target("sve"))) svuint64x3_t svreinterpret_u64(svuint64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_u64_x3(op); -} - -__aio __attribute__((target("sve"))) svuint64x3_t svreinterpret_u64(svfloat16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_f16_x3(op); -} - -__aio __attribute__((target("sve"))) svuint64x3_t svreinterpret_u64(svbfloat16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_bf16_x3(op); -} - -__aio __attribute__((target("sve"))) svuint64x3_t svreinterpret_u64(svfloat32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_f32_x3(op); -} - -__aio __attribute__((target("sve"))) svuint64x3_t svreinterpret_u64(svfloat64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_f64_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat16x3_t svreinterpret_f16(svint8x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_s8_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat16x3_t svreinterpret_f16(svuint8x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_u8_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat16x3_t svreinterpret_f16(svint16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_s16_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat16x3_t svreinterpret_f16(svuint16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_u16_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat16x3_t svreinterpret_f16(svint32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_s32_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat16x3_t svreinterpret_f16(svuint32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_u32_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat16x3_t svreinterpret_f16(svint64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_s64_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat16x3_t svreinterpret_f16(svuint64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_u64_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat16x3_t svreinterpret_f16(svfloat16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_f16_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat16x3_t svreinterpret_f16(svbfloat16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_bf16_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat16x3_t svreinterpret_f16(svfloat32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_f32_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat16x3_t svreinterpret_f16(svfloat64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_f64_x3(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x3_t svreinterpret_bf16(svint8x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_s8_x3(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x3_t svreinterpret_bf16(svuint8x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_u8_x3(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x3_t svreinterpret_bf16(svint16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_s16_x3(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x3_t svreinterpret_bf16(svuint16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_u16_x3(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x3_t svreinterpret_bf16(svint32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_s32_x3(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x3_t svreinterpret_bf16(svuint32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_u32_x3(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x3_t svreinterpret_bf16(svint64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_s64_x3(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x3_t svreinterpret_bf16(svuint64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_u64_x3(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x3_t svreinterpret_bf16(svfloat16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_f16_x3(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x3_t svreinterpret_bf16(svbfloat16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_bf16_x3(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x3_t svreinterpret_bf16(svfloat32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_f32_x3(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x3_t svreinterpret_bf16(svfloat64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_f64_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat32x3_t svreinterpret_f32(svint8x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_s8_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat32x3_t svreinterpret_f32(svuint8x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_u8_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat32x3_t svreinterpret_f32(svint16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_s16_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat32x3_t svreinterpret_f32(svuint16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_u16_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat32x3_t svreinterpret_f32(svint32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_s32_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat32x3_t svreinterpret_f32(svuint32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_u32_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat32x3_t svreinterpret_f32(svint64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_s64_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat32x3_t svreinterpret_f32(svuint64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_u64_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat32x3_t svreinterpret_f32(svfloat16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_f16_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat32x3_t svreinterpret_f32(svbfloat16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_bf16_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat32x3_t svreinterpret_f32(svfloat32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_f32_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat32x3_t svreinterpret_f32(svfloat64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_f64_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat64x3_t svreinterpret_f64(svint8x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_s8_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat64x3_t svreinterpret_f64(svuint8x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_u8_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat64x3_t svreinterpret_f64(svint16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_s16_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat64x3_t svreinterpret_f64(svuint16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_u16_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat64x3_t svreinterpret_f64(svint32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_s32_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat64x3_t svreinterpret_f64(svuint32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_u32_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat64x3_t svreinterpret_f64(svint64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_s64_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat64x3_t svreinterpret_f64(svuint64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_u64_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat64x3_t svreinterpret_f64(svfloat16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_f16_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat64x3_t svreinterpret_f64(svbfloat16x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_bf16_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat64x3_t svreinterpret_f64(svfloat32x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_f32_x3(op); -} - -__aio __attribute__((target("sve"))) svfloat64x3_t svreinterpret_f64(svfloat64x3_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_f64_x3(op); -} - -#define svreinterpret_s8_s8_x4(...) __builtin_sve_reinterpret_s8_s8_x4(__VA_ARGS__) -#define svreinterpret_s8_u8_x4(...) __builtin_sve_reinterpret_s8_u8_x4(__VA_ARGS__) -#define svreinterpret_s8_s16_x4(...) __builtin_sve_reinterpret_s8_s16_x4(__VA_ARGS__) -#define svreinterpret_s8_u16_x4(...) __builtin_sve_reinterpret_s8_u16_x4(__VA_ARGS__) -#define svreinterpret_s8_s32_x4(...) __builtin_sve_reinterpret_s8_s32_x4(__VA_ARGS__) -#define svreinterpret_s8_u32_x4(...) __builtin_sve_reinterpret_s8_u32_x4(__VA_ARGS__) -#define svreinterpret_s8_s64_x4(...) __builtin_sve_reinterpret_s8_s64_x4(__VA_ARGS__) -#define svreinterpret_s8_u64_x4(...) __builtin_sve_reinterpret_s8_u64_x4(__VA_ARGS__) -#define svreinterpret_s8_f16_x4(...) __builtin_sve_reinterpret_s8_f16_x4(__VA_ARGS__) -#define svreinterpret_s8_bf16_x4(...) __builtin_sve_reinterpret_s8_bf16_x4(__VA_ARGS__) -#define svreinterpret_s8_f32_x4(...) __builtin_sve_reinterpret_s8_f32_x4(__VA_ARGS__) -#define svreinterpret_s8_f64_x4(...) __builtin_sve_reinterpret_s8_f64_x4(__VA_ARGS__) -#define svreinterpret_u8_s8_x4(...) __builtin_sve_reinterpret_u8_s8_x4(__VA_ARGS__) -#define svreinterpret_u8_u8_x4(...) __builtin_sve_reinterpret_u8_u8_x4(__VA_ARGS__) -#define svreinterpret_u8_s16_x4(...) __builtin_sve_reinterpret_u8_s16_x4(__VA_ARGS__) -#define svreinterpret_u8_u16_x4(...) __builtin_sve_reinterpret_u8_u16_x4(__VA_ARGS__) -#define svreinterpret_u8_s32_x4(...) __builtin_sve_reinterpret_u8_s32_x4(__VA_ARGS__) -#define svreinterpret_u8_u32_x4(...) __builtin_sve_reinterpret_u8_u32_x4(__VA_ARGS__) -#define svreinterpret_u8_s64_x4(...) __builtin_sve_reinterpret_u8_s64_x4(__VA_ARGS__) -#define svreinterpret_u8_u64_x4(...) __builtin_sve_reinterpret_u8_u64_x4(__VA_ARGS__) -#define svreinterpret_u8_f16_x4(...) __builtin_sve_reinterpret_u8_f16_x4(__VA_ARGS__) -#define svreinterpret_u8_bf16_x4(...) __builtin_sve_reinterpret_u8_bf16_x4(__VA_ARGS__) -#define svreinterpret_u8_f32_x4(...) __builtin_sve_reinterpret_u8_f32_x4(__VA_ARGS__) -#define svreinterpret_u8_f64_x4(...) __builtin_sve_reinterpret_u8_f64_x4(__VA_ARGS__) -#define svreinterpret_s16_s8_x4(...) __builtin_sve_reinterpret_s16_s8_x4(__VA_ARGS__) -#define svreinterpret_s16_u8_x4(...) __builtin_sve_reinterpret_s16_u8_x4(__VA_ARGS__) -#define svreinterpret_s16_s16_x4(...) __builtin_sve_reinterpret_s16_s16_x4(__VA_ARGS__) -#define svreinterpret_s16_u16_x4(...) __builtin_sve_reinterpret_s16_u16_x4(__VA_ARGS__) -#define svreinterpret_s16_s32_x4(...) __builtin_sve_reinterpret_s16_s32_x4(__VA_ARGS__) -#define svreinterpret_s16_u32_x4(...) __builtin_sve_reinterpret_s16_u32_x4(__VA_ARGS__) -#define svreinterpret_s16_s64_x4(...) __builtin_sve_reinterpret_s16_s64_x4(__VA_ARGS__) -#define svreinterpret_s16_u64_x4(...) __builtin_sve_reinterpret_s16_u64_x4(__VA_ARGS__) -#define svreinterpret_s16_f16_x4(...) __builtin_sve_reinterpret_s16_f16_x4(__VA_ARGS__) -#define svreinterpret_s16_bf16_x4(...) __builtin_sve_reinterpret_s16_bf16_x4(__VA_ARGS__) -#define svreinterpret_s16_f32_x4(...) __builtin_sve_reinterpret_s16_f32_x4(__VA_ARGS__) -#define svreinterpret_s16_f64_x4(...) __builtin_sve_reinterpret_s16_f64_x4(__VA_ARGS__) -#define svreinterpret_u16_s8_x4(...) __builtin_sve_reinterpret_u16_s8_x4(__VA_ARGS__) -#define svreinterpret_u16_u8_x4(...) __builtin_sve_reinterpret_u16_u8_x4(__VA_ARGS__) -#define svreinterpret_u16_s16_x4(...) __builtin_sve_reinterpret_u16_s16_x4(__VA_ARGS__) -#define svreinterpret_u16_u16_x4(...) __builtin_sve_reinterpret_u16_u16_x4(__VA_ARGS__) -#define svreinterpret_u16_s32_x4(...) __builtin_sve_reinterpret_u16_s32_x4(__VA_ARGS__) -#define svreinterpret_u16_u32_x4(...) __builtin_sve_reinterpret_u16_u32_x4(__VA_ARGS__) -#define svreinterpret_u16_s64_x4(...) __builtin_sve_reinterpret_u16_s64_x4(__VA_ARGS__) -#define svreinterpret_u16_u64_x4(...) __builtin_sve_reinterpret_u16_u64_x4(__VA_ARGS__) -#define svreinterpret_u16_f16_x4(...) __builtin_sve_reinterpret_u16_f16_x4(__VA_ARGS__) -#define svreinterpret_u16_bf16_x4(...) __builtin_sve_reinterpret_u16_bf16_x4(__VA_ARGS__) -#define svreinterpret_u16_f32_x4(...) __builtin_sve_reinterpret_u16_f32_x4(__VA_ARGS__) -#define svreinterpret_u16_f64_x4(...) __builtin_sve_reinterpret_u16_f64_x4(__VA_ARGS__) -#define svreinterpret_s32_s8_x4(...) __builtin_sve_reinterpret_s32_s8_x4(__VA_ARGS__) -#define svreinterpret_s32_u8_x4(...) __builtin_sve_reinterpret_s32_u8_x4(__VA_ARGS__) -#define svreinterpret_s32_s16_x4(...) __builtin_sve_reinterpret_s32_s16_x4(__VA_ARGS__) -#define svreinterpret_s32_u16_x4(...) __builtin_sve_reinterpret_s32_u16_x4(__VA_ARGS__) -#define svreinterpret_s32_s32_x4(...) __builtin_sve_reinterpret_s32_s32_x4(__VA_ARGS__) -#define svreinterpret_s32_u32_x4(...) __builtin_sve_reinterpret_s32_u32_x4(__VA_ARGS__) -#define svreinterpret_s32_s64_x4(...) __builtin_sve_reinterpret_s32_s64_x4(__VA_ARGS__) -#define svreinterpret_s32_u64_x4(...) __builtin_sve_reinterpret_s32_u64_x4(__VA_ARGS__) -#define svreinterpret_s32_f16_x4(...) __builtin_sve_reinterpret_s32_f16_x4(__VA_ARGS__) -#define svreinterpret_s32_bf16_x4(...) __builtin_sve_reinterpret_s32_bf16_x4(__VA_ARGS__) -#define svreinterpret_s32_f32_x4(...) __builtin_sve_reinterpret_s32_f32_x4(__VA_ARGS__) -#define svreinterpret_s32_f64_x4(...) __builtin_sve_reinterpret_s32_f64_x4(__VA_ARGS__) -#define svreinterpret_u32_s8_x4(...) __builtin_sve_reinterpret_u32_s8_x4(__VA_ARGS__) -#define svreinterpret_u32_u8_x4(...) __builtin_sve_reinterpret_u32_u8_x4(__VA_ARGS__) -#define svreinterpret_u32_s16_x4(...) __builtin_sve_reinterpret_u32_s16_x4(__VA_ARGS__) -#define svreinterpret_u32_u16_x4(...) __builtin_sve_reinterpret_u32_u16_x4(__VA_ARGS__) -#define svreinterpret_u32_s32_x4(...) __builtin_sve_reinterpret_u32_s32_x4(__VA_ARGS__) -#define svreinterpret_u32_u32_x4(...) __builtin_sve_reinterpret_u32_u32_x4(__VA_ARGS__) -#define svreinterpret_u32_s64_x4(...) __builtin_sve_reinterpret_u32_s64_x4(__VA_ARGS__) -#define svreinterpret_u32_u64_x4(...) __builtin_sve_reinterpret_u32_u64_x4(__VA_ARGS__) -#define svreinterpret_u32_f16_x4(...) __builtin_sve_reinterpret_u32_f16_x4(__VA_ARGS__) -#define svreinterpret_u32_bf16_x4(...) __builtin_sve_reinterpret_u32_bf16_x4(__VA_ARGS__) -#define svreinterpret_u32_f32_x4(...) __builtin_sve_reinterpret_u32_f32_x4(__VA_ARGS__) -#define svreinterpret_u32_f64_x4(...) __builtin_sve_reinterpret_u32_f64_x4(__VA_ARGS__) -#define svreinterpret_s64_s8_x4(...) __builtin_sve_reinterpret_s64_s8_x4(__VA_ARGS__) -#define svreinterpret_s64_u8_x4(...) __builtin_sve_reinterpret_s64_u8_x4(__VA_ARGS__) -#define svreinterpret_s64_s16_x4(...) __builtin_sve_reinterpret_s64_s16_x4(__VA_ARGS__) -#define svreinterpret_s64_u16_x4(...) __builtin_sve_reinterpret_s64_u16_x4(__VA_ARGS__) -#define svreinterpret_s64_s32_x4(...) __builtin_sve_reinterpret_s64_s32_x4(__VA_ARGS__) -#define svreinterpret_s64_u32_x4(...) __builtin_sve_reinterpret_s64_u32_x4(__VA_ARGS__) -#define svreinterpret_s64_s64_x4(...) __builtin_sve_reinterpret_s64_s64_x4(__VA_ARGS__) -#define svreinterpret_s64_u64_x4(...) __builtin_sve_reinterpret_s64_u64_x4(__VA_ARGS__) -#define svreinterpret_s64_f16_x4(...) __builtin_sve_reinterpret_s64_f16_x4(__VA_ARGS__) -#define svreinterpret_s64_bf16_x4(...) __builtin_sve_reinterpret_s64_bf16_x4(__VA_ARGS__) -#define svreinterpret_s64_f32_x4(...) __builtin_sve_reinterpret_s64_f32_x4(__VA_ARGS__) -#define svreinterpret_s64_f64_x4(...) __builtin_sve_reinterpret_s64_f64_x4(__VA_ARGS__) -#define svreinterpret_u64_s8_x4(...) __builtin_sve_reinterpret_u64_s8_x4(__VA_ARGS__) -#define svreinterpret_u64_u8_x4(...) __builtin_sve_reinterpret_u64_u8_x4(__VA_ARGS__) -#define svreinterpret_u64_s16_x4(...) __builtin_sve_reinterpret_u64_s16_x4(__VA_ARGS__) -#define svreinterpret_u64_u16_x4(...) __builtin_sve_reinterpret_u64_u16_x4(__VA_ARGS__) -#define svreinterpret_u64_s32_x4(...) __builtin_sve_reinterpret_u64_s32_x4(__VA_ARGS__) -#define svreinterpret_u64_u32_x4(...) __builtin_sve_reinterpret_u64_u32_x4(__VA_ARGS__) -#define svreinterpret_u64_s64_x4(...) __builtin_sve_reinterpret_u64_s64_x4(__VA_ARGS__) -#define svreinterpret_u64_u64_x4(...) __builtin_sve_reinterpret_u64_u64_x4(__VA_ARGS__) -#define svreinterpret_u64_f16_x4(...) __builtin_sve_reinterpret_u64_f16_x4(__VA_ARGS__) -#define svreinterpret_u64_bf16_x4(...) __builtin_sve_reinterpret_u64_bf16_x4(__VA_ARGS__) -#define svreinterpret_u64_f32_x4(...) __builtin_sve_reinterpret_u64_f32_x4(__VA_ARGS__) -#define svreinterpret_u64_f64_x4(...) __builtin_sve_reinterpret_u64_f64_x4(__VA_ARGS__) -#define svreinterpret_f16_s8_x4(...) __builtin_sve_reinterpret_f16_s8_x4(__VA_ARGS__) -#define svreinterpret_f16_u8_x4(...) __builtin_sve_reinterpret_f16_u8_x4(__VA_ARGS__) -#define svreinterpret_f16_s16_x4(...) __builtin_sve_reinterpret_f16_s16_x4(__VA_ARGS__) -#define svreinterpret_f16_u16_x4(...) __builtin_sve_reinterpret_f16_u16_x4(__VA_ARGS__) -#define svreinterpret_f16_s32_x4(...) __builtin_sve_reinterpret_f16_s32_x4(__VA_ARGS__) -#define svreinterpret_f16_u32_x4(...) __builtin_sve_reinterpret_f16_u32_x4(__VA_ARGS__) -#define svreinterpret_f16_s64_x4(...) __builtin_sve_reinterpret_f16_s64_x4(__VA_ARGS__) -#define svreinterpret_f16_u64_x4(...) __builtin_sve_reinterpret_f16_u64_x4(__VA_ARGS__) -#define svreinterpret_f16_f16_x4(...) __builtin_sve_reinterpret_f16_f16_x4(__VA_ARGS__) -#define svreinterpret_f16_bf16_x4(...) __builtin_sve_reinterpret_f16_bf16_x4(__VA_ARGS__) -#define svreinterpret_f16_f32_x4(...) __builtin_sve_reinterpret_f16_f32_x4(__VA_ARGS__) -#define svreinterpret_f16_f64_x4(...) __builtin_sve_reinterpret_f16_f64_x4(__VA_ARGS__) -#define svreinterpret_bf16_s8_x4(...) __builtin_sve_reinterpret_bf16_s8_x4(__VA_ARGS__) -#define svreinterpret_bf16_u8_x4(...) __builtin_sve_reinterpret_bf16_u8_x4(__VA_ARGS__) -#define svreinterpret_bf16_s16_x4(...) __builtin_sve_reinterpret_bf16_s16_x4(__VA_ARGS__) -#define svreinterpret_bf16_u16_x4(...) __builtin_sve_reinterpret_bf16_u16_x4(__VA_ARGS__) -#define svreinterpret_bf16_s32_x4(...) __builtin_sve_reinterpret_bf16_s32_x4(__VA_ARGS__) -#define svreinterpret_bf16_u32_x4(...) __builtin_sve_reinterpret_bf16_u32_x4(__VA_ARGS__) -#define svreinterpret_bf16_s64_x4(...) __builtin_sve_reinterpret_bf16_s64_x4(__VA_ARGS__) -#define svreinterpret_bf16_u64_x4(...) __builtin_sve_reinterpret_bf16_u64_x4(__VA_ARGS__) -#define svreinterpret_bf16_f16_x4(...) __builtin_sve_reinterpret_bf16_f16_x4(__VA_ARGS__) -#define svreinterpret_bf16_bf16_x4(...) __builtin_sve_reinterpret_bf16_bf16_x4(__VA_ARGS__) -#define svreinterpret_bf16_f32_x4(...) __builtin_sve_reinterpret_bf16_f32_x4(__VA_ARGS__) -#define svreinterpret_bf16_f64_x4(...) __builtin_sve_reinterpret_bf16_f64_x4(__VA_ARGS__) -#define svreinterpret_f32_s8_x4(...) __builtin_sve_reinterpret_f32_s8_x4(__VA_ARGS__) -#define svreinterpret_f32_u8_x4(...) __builtin_sve_reinterpret_f32_u8_x4(__VA_ARGS__) -#define svreinterpret_f32_s16_x4(...) __builtin_sve_reinterpret_f32_s16_x4(__VA_ARGS__) -#define svreinterpret_f32_u16_x4(...) __builtin_sve_reinterpret_f32_u16_x4(__VA_ARGS__) -#define svreinterpret_f32_s32_x4(...) __builtin_sve_reinterpret_f32_s32_x4(__VA_ARGS__) -#define svreinterpret_f32_u32_x4(...) __builtin_sve_reinterpret_f32_u32_x4(__VA_ARGS__) -#define svreinterpret_f32_s64_x4(...) __builtin_sve_reinterpret_f32_s64_x4(__VA_ARGS__) -#define svreinterpret_f32_u64_x4(...) __builtin_sve_reinterpret_f32_u64_x4(__VA_ARGS__) -#define svreinterpret_f32_f16_x4(...) __builtin_sve_reinterpret_f32_f16_x4(__VA_ARGS__) -#define svreinterpret_f32_bf16_x4(...) __builtin_sve_reinterpret_f32_bf16_x4(__VA_ARGS__) -#define svreinterpret_f32_f32_x4(...) __builtin_sve_reinterpret_f32_f32_x4(__VA_ARGS__) -#define svreinterpret_f32_f64_x4(...) __builtin_sve_reinterpret_f32_f64_x4(__VA_ARGS__) -#define svreinterpret_f64_s8_x4(...) __builtin_sve_reinterpret_f64_s8_x4(__VA_ARGS__) -#define svreinterpret_f64_u8_x4(...) __builtin_sve_reinterpret_f64_u8_x4(__VA_ARGS__) -#define svreinterpret_f64_s16_x4(...) __builtin_sve_reinterpret_f64_s16_x4(__VA_ARGS__) -#define svreinterpret_f64_u16_x4(...) __builtin_sve_reinterpret_f64_u16_x4(__VA_ARGS__) -#define svreinterpret_f64_s32_x4(...) __builtin_sve_reinterpret_f64_s32_x4(__VA_ARGS__) -#define svreinterpret_f64_u32_x4(...) __builtin_sve_reinterpret_f64_u32_x4(__VA_ARGS__) -#define svreinterpret_f64_s64_x4(...) __builtin_sve_reinterpret_f64_s64_x4(__VA_ARGS__) -#define svreinterpret_f64_u64_x4(...) __builtin_sve_reinterpret_f64_u64_x4(__VA_ARGS__) -#define svreinterpret_f64_f16_x4(...) __builtin_sve_reinterpret_f64_f16_x4(__VA_ARGS__) -#define svreinterpret_f64_bf16_x4(...) __builtin_sve_reinterpret_f64_bf16_x4(__VA_ARGS__) -#define svreinterpret_f64_f32_x4(...) __builtin_sve_reinterpret_f64_f32_x4(__VA_ARGS__) -#define svreinterpret_f64_f64_x4(...) __builtin_sve_reinterpret_f64_f64_x4(__VA_ARGS__) -__aio __attribute__((target("sve"))) svint8x4_t svreinterpret_s8(svint8x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_s8_x4(op); -} - -__aio __attribute__((target("sve"))) svint8x4_t svreinterpret_s8(svuint8x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_u8_x4(op); -} - -__aio __attribute__((target("sve"))) svint8x4_t svreinterpret_s8(svint16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_s16_x4(op); -} - -__aio __attribute__((target("sve"))) svint8x4_t svreinterpret_s8(svuint16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_u16_x4(op); -} - -__aio __attribute__((target("sve"))) svint8x4_t svreinterpret_s8(svint32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_s32_x4(op); -} - -__aio __attribute__((target("sve"))) svint8x4_t svreinterpret_s8(svuint32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_u32_x4(op); -} - -__aio __attribute__((target("sve"))) svint8x4_t svreinterpret_s8(svint64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_s64_x4(op); -} - -__aio __attribute__((target("sve"))) svint8x4_t svreinterpret_s8(svuint64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_u64_x4(op); -} - -__aio __attribute__((target("sve"))) svint8x4_t svreinterpret_s8(svfloat16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_f16_x4(op); -} - -__aio __attribute__((target("sve"))) svint8x4_t svreinterpret_s8(svbfloat16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_bf16_x4(op); -} - -__aio __attribute__((target("sve"))) svint8x4_t svreinterpret_s8(svfloat32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_f32_x4(op); -} - -__aio __attribute__((target("sve"))) svint8x4_t svreinterpret_s8(svfloat64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s8_f64_x4(op); -} - -__aio __attribute__((target("sve"))) svuint8x4_t svreinterpret_u8(svint8x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_s8_x4(op); -} - -__aio __attribute__((target("sve"))) svuint8x4_t svreinterpret_u8(svuint8x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_u8_x4(op); -} - -__aio __attribute__((target("sve"))) svuint8x4_t svreinterpret_u8(svint16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_s16_x4(op); -} - -__aio __attribute__((target("sve"))) svuint8x4_t svreinterpret_u8(svuint16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_u16_x4(op); -} - -__aio __attribute__((target("sve"))) svuint8x4_t svreinterpret_u8(svint32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_s32_x4(op); -} - -__aio __attribute__((target("sve"))) svuint8x4_t svreinterpret_u8(svuint32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_u32_x4(op); -} - -__aio __attribute__((target("sve"))) svuint8x4_t svreinterpret_u8(svint64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_s64_x4(op); -} - -__aio __attribute__((target("sve"))) svuint8x4_t svreinterpret_u8(svuint64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_u64_x4(op); -} - -__aio __attribute__((target("sve"))) svuint8x4_t svreinterpret_u8(svfloat16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_f16_x4(op); -} - -__aio __attribute__((target("sve"))) svuint8x4_t svreinterpret_u8(svbfloat16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_bf16_x4(op); -} - -__aio __attribute__((target("sve"))) svuint8x4_t svreinterpret_u8(svfloat32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_f32_x4(op); -} - -__aio __attribute__((target("sve"))) svuint8x4_t svreinterpret_u8(svfloat64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u8_f64_x4(op); -} - -__aio __attribute__((target("sve"))) svint16x4_t svreinterpret_s16(svint8x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_s8_x4(op); -} - -__aio __attribute__((target("sve"))) svint16x4_t svreinterpret_s16(svuint8x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_u8_x4(op); -} - -__aio __attribute__((target("sve"))) svint16x4_t svreinterpret_s16(svint16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_s16_x4(op); -} - -__aio __attribute__((target("sve"))) svint16x4_t svreinterpret_s16(svuint16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_u16_x4(op); -} - -__aio __attribute__((target("sve"))) svint16x4_t svreinterpret_s16(svint32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_s32_x4(op); -} - -__aio __attribute__((target("sve"))) svint16x4_t svreinterpret_s16(svuint32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_u32_x4(op); -} - -__aio __attribute__((target("sve"))) svint16x4_t svreinterpret_s16(svint64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_s64_x4(op); -} - -__aio __attribute__((target("sve"))) svint16x4_t svreinterpret_s16(svuint64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_u64_x4(op); -} - -__aio __attribute__((target("sve"))) svint16x4_t svreinterpret_s16(svfloat16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_f16_x4(op); -} - -__aio __attribute__((target("sve"))) svint16x4_t svreinterpret_s16(svbfloat16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_bf16_x4(op); -} - -__aio __attribute__((target("sve"))) svint16x4_t svreinterpret_s16(svfloat32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_f32_x4(op); -} - -__aio __attribute__((target("sve"))) svint16x4_t svreinterpret_s16(svfloat64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s16_f64_x4(op); -} - -__aio __attribute__((target("sve"))) svuint16x4_t svreinterpret_u16(svint8x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_s8_x4(op); -} - -__aio __attribute__((target("sve"))) svuint16x4_t svreinterpret_u16(svuint8x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_u8_x4(op); -} - -__aio __attribute__((target("sve"))) svuint16x4_t svreinterpret_u16(svint16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_s16_x4(op); -} - -__aio __attribute__((target("sve"))) svuint16x4_t svreinterpret_u16(svuint16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_u16_x4(op); -} - -__aio __attribute__((target("sve"))) svuint16x4_t svreinterpret_u16(svint32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_s32_x4(op); -} - -__aio __attribute__((target("sve"))) svuint16x4_t svreinterpret_u16(svuint32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_u32_x4(op); -} - -__aio __attribute__((target("sve"))) svuint16x4_t svreinterpret_u16(svint64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_s64_x4(op); -} - -__aio __attribute__((target("sve"))) svuint16x4_t svreinterpret_u16(svuint64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_u64_x4(op); -} - -__aio __attribute__((target("sve"))) svuint16x4_t svreinterpret_u16(svfloat16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_f16_x4(op); -} - -__aio __attribute__((target("sve"))) svuint16x4_t svreinterpret_u16(svbfloat16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_bf16_x4(op); -} - -__aio __attribute__((target("sve"))) svuint16x4_t svreinterpret_u16(svfloat32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_f32_x4(op); -} - -__aio __attribute__((target("sve"))) svuint16x4_t svreinterpret_u16(svfloat64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u16_f64_x4(op); -} - -__aio __attribute__((target("sve"))) svint32x4_t svreinterpret_s32(svint8x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_s8_x4(op); -} - -__aio __attribute__((target("sve"))) svint32x4_t svreinterpret_s32(svuint8x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_u8_x4(op); -} - -__aio __attribute__((target("sve"))) svint32x4_t svreinterpret_s32(svint16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_s16_x4(op); -} - -__aio __attribute__((target("sve"))) svint32x4_t svreinterpret_s32(svuint16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_u16_x4(op); -} - -__aio __attribute__((target("sve"))) svint32x4_t svreinterpret_s32(svint32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_s32_x4(op); -} - -__aio __attribute__((target("sve"))) svint32x4_t svreinterpret_s32(svuint32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_u32_x4(op); -} - -__aio __attribute__((target("sve"))) svint32x4_t svreinterpret_s32(svint64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_s64_x4(op); -} - -__aio __attribute__((target("sve"))) svint32x4_t svreinterpret_s32(svuint64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_u64_x4(op); -} - -__aio __attribute__((target("sve"))) svint32x4_t svreinterpret_s32(svfloat16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_f16_x4(op); -} - -__aio __attribute__((target("sve"))) svint32x4_t svreinterpret_s32(svbfloat16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_bf16_x4(op); -} - -__aio __attribute__((target("sve"))) svint32x4_t svreinterpret_s32(svfloat32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_f32_x4(op); -} - -__aio __attribute__((target("sve"))) svint32x4_t svreinterpret_s32(svfloat64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s32_f64_x4(op); -} - -__aio __attribute__((target("sve"))) svuint32x4_t svreinterpret_u32(svint8x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_s8_x4(op); -} - -__aio __attribute__((target("sve"))) svuint32x4_t svreinterpret_u32(svuint8x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_u8_x4(op); -} - -__aio __attribute__((target("sve"))) svuint32x4_t svreinterpret_u32(svint16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_s16_x4(op); -} - -__aio __attribute__((target("sve"))) svuint32x4_t svreinterpret_u32(svuint16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_u16_x4(op); -} - -__aio __attribute__((target("sve"))) svuint32x4_t svreinterpret_u32(svint32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_s32_x4(op); -} - -__aio __attribute__((target("sve"))) svuint32x4_t svreinterpret_u32(svuint32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_u32_x4(op); -} - -__aio __attribute__((target("sve"))) svuint32x4_t svreinterpret_u32(svint64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_s64_x4(op); -} - -__aio __attribute__((target("sve"))) svuint32x4_t svreinterpret_u32(svuint64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_u64_x4(op); -} - -__aio __attribute__((target("sve"))) svuint32x4_t svreinterpret_u32(svfloat16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_f16_x4(op); -} - -__aio __attribute__((target("sve"))) svuint32x4_t svreinterpret_u32(svbfloat16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_bf16_x4(op); -} - -__aio __attribute__((target("sve"))) svuint32x4_t svreinterpret_u32(svfloat32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_f32_x4(op); -} - -__aio __attribute__((target("sve"))) svuint32x4_t svreinterpret_u32(svfloat64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u32_f64_x4(op); -} - -__aio __attribute__((target("sve"))) svint64x4_t svreinterpret_s64(svint8x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_s8_x4(op); -} - -__aio __attribute__((target("sve"))) svint64x4_t svreinterpret_s64(svuint8x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_u8_x4(op); -} - -__aio __attribute__((target("sve"))) svint64x4_t svreinterpret_s64(svint16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_s16_x4(op); -} - -__aio __attribute__((target("sve"))) svint64x4_t svreinterpret_s64(svuint16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_u16_x4(op); -} - -__aio __attribute__((target("sve"))) svint64x4_t svreinterpret_s64(svint32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_s32_x4(op); -} - -__aio __attribute__((target("sve"))) svint64x4_t svreinterpret_s64(svuint32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_u32_x4(op); -} - -__aio __attribute__((target("sve"))) svint64x4_t svreinterpret_s64(svint64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_s64_x4(op); -} - -__aio __attribute__((target("sve"))) svint64x4_t svreinterpret_s64(svuint64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_u64_x4(op); -} - -__aio __attribute__((target("sve"))) svint64x4_t svreinterpret_s64(svfloat16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_f16_x4(op); -} - -__aio __attribute__((target("sve"))) svint64x4_t svreinterpret_s64(svbfloat16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_bf16_x4(op); -} - -__aio __attribute__((target("sve"))) svint64x4_t svreinterpret_s64(svfloat32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_f32_x4(op); -} - -__aio __attribute__((target("sve"))) svint64x4_t svreinterpret_s64(svfloat64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_s64_f64_x4(op); -} - -__aio __attribute__((target("sve"))) svuint64x4_t svreinterpret_u64(svint8x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_s8_x4(op); -} - -__aio __attribute__((target("sve"))) svuint64x4_t svreinterpret_u64(svuint8x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_u8_x4(op); -} - -__aio __attribute__((target("sve"))) svuint64x4_t svreinterpret_u64(svint16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_s16_x4(op); -} - -__aio __attribute__((target("sve"))) svuint64x4_t svreinterpret_u64(svuint16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_u16_x4(op); -} - -__aio __attribute__((target("sve"))) svuint64x4_t svreinterpret_u64(svint32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_s32_x4(op); -} - -__aio __attribute__((target("sve"))) svuint64x4_t svreinterpret_u64(svuint32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_u32_x4(op); -} - -__aio __attribute__((target("sve"))) svuint64x4_t svreinterpret_u64(svint64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_s64_x4(op); -} - -__aio __attribute__((target("sve"))) svuint64x4_t svreinterpret_u64(svuint64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_u64_x4(op); -} - -__aio __attribute__((target("sve"))) svuint64x4_t svreinterpret_u64(svfloat16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_f16_x4(op); -} - -__aio __attribute__((target("sve"))) svuint64x4_t svreinterpret_u64(svbfloat16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_bf16_x4(op); -} - -__aio __attribute__((target("sve"))) svuint64x4_t svreinterpret_u64(svfloat32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_f32_x4(op); -} - -__aio __attribute__((target("sve"))) svuint64x4_t svreinterpret_u64(svfloat64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_u64_f64_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat16x4_t svreinterpret_f16(svint8x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_s8_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat16x4_t svreinterpret_f16(svuint8x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_u8_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat16x4_t svreinterpret_f16(svint16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_s16_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat16x4_t svreinterpret_f16(svuint16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_u16_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat16x4_t svreinterpret_f16(svint32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_s32_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat16x4_t svreinterpret_f16(svuint32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_u32_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat16x4_t svreinterpret_f16(svint64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_s64_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat16x4_t svreinterpret_f16(svuint64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_u64_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat16x4_t svreinterpret_f16(svfloat16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_f16_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat16x4_t svreinterpret_f16(svbfloat16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_bf16_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat16x4_t svreinterpret_f16(svfloat32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_f32_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat16x4_t svreinterpret_f16(svfloat64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f16_f64_x4(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x4_t svreinterpret_bf16(svint8x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_s8_x4(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x4_t svreinterpret_bf16(svuint8x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_u8_x4(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x4_t svreinterpret_bf16(svint16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_s16_x4(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x4_t svreinterpret_bf16(svuint16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_u16_x4(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x4_t svreinterpret_bf16(svint32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_s32_x4(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x4_t svreinterpret_bf16(svuint32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_u32_x4(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x4_t svreinterpret_bf16(svint64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_s64_x4(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x4_t svreinterpret_bf16(svuint64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_u64_x4(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x4_t svreinterpret_bf16(svfloat16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_f16_x4(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x4_t svreinterpret_bf16(svbfloat16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_bf16_x4(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x4_t svreinterpret_bf16(svfloat32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_f32_x4(op); -} - -__aio __attribute__((target("sve"))) svbfloat16x4_t svreinterpret_bf16(svfloat64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_bf16_f64_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat32x4_t svreinterpret_f32(svint8x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_s8_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat32x4_t svreinterpret_f32(svuint8x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_u8_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat32x4_t svreinterpret_f32(svint16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_s16_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat32x4_t svreinterpret_f32(svuint16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_u16_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat32x4_t svreinterpret_f32(svint32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_s32_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat32x4_t svreinterpret_f32(svuint32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_u32_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat32x4_t svreinterpret_f32(svint64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_s64_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat32x4_t svreinterpret_f32(svuint64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_u64_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat32x4_t svreinterpret_f32(svfloat16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_f16_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat32x4_t svreinterpret_f32(svbfloat16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_bf16_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat32x4_t svreinterpret_f32(svfloat32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_f32_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat32x4_t svreinterpret_f32(svfloat64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f32_f64_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat64x4_t svreinterpret_f64(svint8x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_s8_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat64x4_t svreinterpret_f64(svuint8x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_u8_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat64x4_t svreinterpret_f64(svint16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_s16_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat64x4_t svreinterpret_f64(svuint16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_u16_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat64x4_t svreinterpret_f64(svint32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_s32_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat64x4_t svreinterpret_f64(svuint32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_u32_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat64x4_t svreinterpret_f64(svint64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_s64_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat64x4_t svreinterpret_f64(svuint64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_u64_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat64x4_t svreinterpret_f64(svfloat16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_f16_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat64x4_t svreinterpret_f64(svbfloat16x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_bf16_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat64x4_t svreinterpret_f64(svfloat32x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_f32_x4(op); -} - -__aio __attribute__((target("sve"))) svfloat64x4_t svreinterpret_f64(svfloat64x4_t op) __arm_streaming_compatible { - return __builtin_sve_reinterpret_f64_f64_x4(op); -} - -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_bf16_m))) -svbfloat16_t svadd_n_bf16_m(svbool_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_bf16_x))) -svbfloat16_t svadd_n_bf16_x(svbool_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_bf16_z))) -svbfloat16_t svadd_n_bf16_z(svbool_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_bf16_m))) -svbfloat16_t svadd_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_bf16_x))) -svbfloat16_t svadd_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_bf16_z))) -svbfloat16_t svadd_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_bf16))) -svbfloat16_t svclamp_bf16(svbfloat16_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_bf16_m))) -svbfloat16_t svmax_n_bf16_m(svbool_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_bf16_x))) -svbfloat16_t svmax_n_bf16_x(svbool_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_bf16_z))) -svbfloat16_t svmax_n_bf16_z(svbool_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_bf16_m))) -svbfloat16_t svmax_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_bf16_x))) -svbfloat16_t svmax_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_bf16_z))) -svbfloat16_t svmax_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_bf16_m))) -svbfloat16_t svmaxnm_n_bf16_m(svbool_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_bf16_x))) -svbfloat16_t svmaxnm_n_bf16_x(svbool_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_bf16_z))) -svbfloat16_t svmaxnm_n_bf16_z(svbool_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_bf16_m))) -svbfloat16_t svmaxnm_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_bf16_x))) -svbfloat16_t svmaxnm_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_bf16_z))) -svbfloat16_t svmaxnm_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_bf16_m))) -svbfloat16_t svmin_n_bf16_m(svbool_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_bf16_x))) -svbfloat16_t svmin_n_bf16_x(svbool_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_bf16_z))) -svbfloat16_t svmin_n_bf16_z(svbool_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_bf16_m))) -svbfloat16_t svmin_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_bf16_x))) -svbfloat16_t svmin_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_bf16_z))) -svbfloat16_t svmin_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_bf16_m))) -svbfloat16_t svminnm_n_bf16_m(svbool_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_bf16_x))) -svbfloat16_t svminnm_n_bf16_x(svbool_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_bf16_z))) -svbfloat16_t svminnm_n_bf16_z(svbool_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_bf16_m))) -svbfloat16_t svminnm_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_bf16_x))) -svbfloat16_t svminnm_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_bf16_z))) -svbfloat16_t svminnm_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_bf16_m))) -svbfloat16_t svmla_n_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_bf16_x))) -svbfloat16_t svmla_n_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_bf16_z))) -svbfloat16_t svmla_n_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_bf16_m))) -svbfloat16_t svmla_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_bf16_x))) -svbfloat16_t svmla_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_bf16_z))) -svbfloat16_t svmla_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_bf16))) -svbfloat16_t svmla_lane_bf16(svbfloat16_t, svbfloat16_t, svbfloat16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_bf16_m))) -svbfloat16_t svmls_n_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_bf16_x))) -svbfloat16_t svmls_n_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_bf16_z))) -svbfloat16_t svmls_n_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_bf16_m))) -svbfloat16_t svmls_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_bf16_x))) -svbfloat16_t svmls_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_bf16_z))) -svbfloat16_t svmls_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_bf16))) -svbfloat16_t svmls_lane_bf16(svbfloat16_t, svbfloat16_t, svbfloat16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_bf16_m))) -svbfloat16_t svmul_n_bf16_m(svbool_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_bf16_x))) -svbfloat16_t svmul_n_bf16_x(svbool_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_bf16_z))) -svbfloat16_t svmul_n_bf16_z(svbool_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_bf16_m))) -svbfloat16_t svmul_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_bf16_x))) -svbfloat16_t svmul_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_bf16_z))) -svbfloat16_t svmul_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_bf16))) -svbfloat16_t svmul_lane_bf16(svbfloat16_t, svbfloat16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_bf16_m))) -svbfloat16_t svsub_n_bf16_m(svbool_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_bf16_x))) -svbfloat16_t svsub_n_bf16_x(svbool_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_bf16_z))) -svbfloat16_t svsub_n_bf16_z(svbool_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_bf16_m))) -svbfloat16_t svsub_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_bf16_x))) -svbfloat16_t svsub_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_bf16_z))) -svbfloat16_t svsub_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_bf16_m))) -svbfloat16_t svadd_m(svbool_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_bf16_x))) -svbfloat16_t svadd_x(svbool_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_bf16_z))) -svbfloat16_t svadd_z(svbool_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_bf16_m))) -svbfloat16_t svadd_m(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_bf16_x))) -svbfloat16_t svadd_x(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_bf16_z))) -svbfloat16_t svadd_z(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_bf16))) -svbfloat16_t svclamp(svbfloat16_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_bf16_m))) -svbfloat16_t svmax_m(svbool_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_bf16_x))) -svbfloat16_t svmax_x(svbool_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_bf16_z))) -svbfloat16_t svmax_z(svbool_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_bf16_m))) -svbfloat16_t svmax_m(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_bf16_x))) -svbfloat16_t svmax_x(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_bf16_z))) -svbfloat16_t svmax_z(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_bf16_m))) -svbfloat16_t svmaxnm_m(svbool_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_bf16_x))) -svbfloat16_t svmaxnm_x(svbool_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_bf16_z))) -svbfloat16_t svmaxnm_z(svbool_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_bf16_m))) -svbfloat16_t svmaxnm_m(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_bf16_x))) -svbfloat16_t svmaxnm_x(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_bf16_z))) -svbfloat16_t svmaxnm_z(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_bf16_m))) -svbfloat16_t svmin_m(svbool_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_bf16_x))) -svbfloat16_t svmin_x(svbool_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_bf16_z))) -svbfloat16_t svmin_z(svbool_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_bf16_m))) -svbfloat16_t svmin_m(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_bf16_x))) -svbfloat16_t svmin_x(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_bf16_z))) -svbfloat16_t svmin_z(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_bf16_m))) -svbfloat16_t svminnm_m(svbool_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_bf16_x))) -svbfloat16_t svminnm_x(svbool_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_bf16_z))) -svbfloat16_t svminnm_z(svbool_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_bf16_m))) -svbfloat16_t svminnm_m(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_bf16_x))) -svbfloat16_t svminnm_x(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_bf16_z))) -svbfloat16_t svminnm_z(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_bf16_m))) -svbfloat16_t svmla_m(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_bf16_x))) -svbfloat16_t svmla_x(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_bf16_z))) -svbfloat16_t svmla_z(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_bf16_m))) -svbfloat16_t svmla_m(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_bf16_x))) -svbfloat16_t svmla_x(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_bf16_z))) -svbfloat16_t svmla_z(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_bf16))) -svbfloat16_t svmla_lane(svbfloat16_t, svbfloat16_t, svbfloat16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_bf16_m))) -svbfloat16_t svmls_m(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_bf16_x))) -svbfloat16_t svmls_x(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_bf16_z))) -svbfloat16_t svmls_z(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_bf16_m))) -svbfloat16_t svmls_m(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_bf16_x))) -svbfloat16_t svmls_x(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_bf16_z))) -svbfloat16_t svmls_z(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_bf16))) -svbfloat16_t svmls_lane(svbfloat16_t, svbfloat16_t, svbfloat16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_bf16_m))) -svbfloat16_t svmul_m(svbool_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_bf16_x))) -svbfloat16_t svmul_x(svbool_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_bf16_z))) -svbfloat16_t svmul_z(svbool_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_bf16_m))) -svbfloat16_t svmul_m(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_bf16_x))) -svbfloat16_t svmul_x(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_bf16_z))) -svbfloat16_t svmul_z(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_bf16))) -svbfloat16_t svmul_lane(svbfloat16_t, svbfloat16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_bf16_m))) -svbfloat16_t svsub_m(svbool_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_bf16_x))) -svbfloat16_t svsub_x(svbool_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_bf16_z))) -svbfloat16_t svsub_z(svbool_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_bf16_m))) -svbfloat16_t svsub_m(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_bf16_x))) -svbfloat16_t svsub_x(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_bf16_z))) -svbfloat16_t svsub_z(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u8_x2))) -svuint8x2_t svadd_single_u8_x2(svuint8x2_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u32_x2))) -svuint32x2_t svadd_single_u32_x2(svuint32x2_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u64_x2))) -svuint64x2_t svadd_single_u64_x2(svuint64x2_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u16_x2))) -svuint16x2_t svadd_single_u16_x2(svuint16x2_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_s8_x2))) -svint8x2_t svadd_single_s8_x2(svint8x2_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_s32_x2))) -svint32x2_t svadd_single_s32_x2(svint32x2_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_s64_x2))) -svint64x2_t svadd_single_s64_x2(svint64x2_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_s16_x2))) -svint16x2_t svadd_single_s16_x2(svint16x2_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u8_x4))) -svuint8x4_t svadd_single_u8_x4(svuint8x4_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u32_x4))) -svuint32x4_t svadd_single_u32_x4(svuint32x4_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u64_x4))) -svuint64x4_t svadd_single_u64_x4(svuint64x4_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u16_x4))) -svuint16x4_t svadd_single_u16_x4(svuint16x4_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_s8_x4))) -svint8x4_t svadd_single_s8_x4(svint8x4_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_s32_x4))) -svint32x4_t svadd_single_s32_x4(svint32x4_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_s64_x4))) -svint64x4_t svadd_single_s64_x4(svint64x4_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_s16_x4))) -svint16x4_t svadd_single_s16_x4(svint16x4_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_f64_x2))) -svfloat64x2_t svclamp_single_f64_x2(svfloat64x2_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_f32_x2))) -svfloat32x2_t svclamp_single_f32_x2(svfloat32x2_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_f16_x2))) -svfloat16x2_t svclamp_single_f16_x2(svfloat16x2_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_s8_x2))) -svint8x2_t svclamp_single_s8_x2(svint8x2_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_s32_x2))) -svint32x2_t svclamp_single_s32_x2(svint32x2_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_s64_x2))) -svint64x2_t svclamp_single_s64_x2(svint64x2_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_s16_x2))) -svint16x2_t svclamp_single_s16_x2(svint16x2_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_u8_x2))) -svuint8x2_t svclamp_single_u8_x2(svuint8x2_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_u32_x2))) -svuint32x2_t svclamp_single_u32_x2(svuint32x2_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_u64_x2))) -svuint64x2_t svclamp_single_u64_x2(svuint64x2_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_u16_x2))) -svuint16x2_t svclamp_single_u16_x2(svuint16x2_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_f64_x4))) -svfloat64x4_t svclamp_single_f64_x4(svfloat64x4_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_f32_x4))) -svfloat32x4_t svclamp_single_f32_x4(svfloat32x4_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_f16_x4))) -svfloat16x4_t svclamp_single_f16_x4(svfloat16x4_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_s8_x4))) -svint8x4_t svclamp_single_s8_x4(svint8x4_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_s32_x4))) -svint32x4_t svclamp_single_s32_x4(svint32x4_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_s64_x4))) -svint64x4_t svclamp_single_s64_x4(svint64x4_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_s16_x4))) -svint16x4_t svclamp_single_s16_x4(svint16x4_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_u8_x4))) -svuint8x4_t svclamp_single_u8_x4(svuint8x4_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_u32_x4))) -svuint32x4_t svclamp_single_u32_x4(svuint32x4_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_u64_x4))) -svuint64x4_t svclamp_single_u64_x4(svuint64x4_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_u16_x4))) -svuint16x4_t svclamp_single_u16_x4(svuint16x4_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_x2))) -svbfloat16_t svcvt_bf16_f32_x2(svfloat32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f32_x2))) -svfloat16_t svcvt_f16_f32_x2(svfloat32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_x2))) -svint32x2_t svcvt_s32_f32_x2(svfloat32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_x2))) -svuint32x2_t svcvt_u32_f32_x2(svfloat32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_x4))) -svint32x4_t svcvt_s32_f32_x4(svfloat32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_x4))) -svuint32x4_t svcvt_u32_f32_x4(svfloat32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_x2))) -svfloat32x2_t svcvt_f32_s32_x2(svint32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_x4))) -svfloat32x4_t svcvt_f32_s32_x4(svint32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_x2))) -svfloat32x2_t svcvt_f32_u32_x2(svuint32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_x4))) -svfloat32x4_t svcvt_f32_u32_x4(svuint32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtn_bf16_f32_x2))) -svbfloat16_t svcvtn_bf16_f32_x2(svfloat32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtn_f16_f32_x2))) -svfloat16_t svcvtn_f16_f32_x2(svfloat32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_f64_x2))) -svfloat64x2_t svmax_single_f64_x2(svfloat64x2_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_f32_x2))) -svfloat32x2_t svmax_single_f32_x2(svfloat32x2_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_f16_x2))) -svfloat16x2_t svmax_single_f16_x2(svfloat16x2_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_s8_x2))) -svint8x2_t svmax_single_s8_x2(svint8x2_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_s32_x2))) -svint32x2_t svmax_single_s32_x2(svint32x2_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_s64_x2))) -svint64x2_t svmax_single_s64_x2(svint64x2_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_s16_x2))) -svint16x2_t svmax_single_s16_x2(svint16x2_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_u8_x2))) -svuint8x2_t svmax_single_u8_x2(svuint8x2_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_u32_x2))) -svuint32x2_t svmax_single_u32_x2(svuint32x2_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_u64_x2))) -svuint64x2_t svmax_single_u64_x2(svuint64x2_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_u16_x2))) -svuint16x2_t svmax_single_u16_x2(svuint16x2_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_f64_x4))) -svfloat64x4_t svmax_single_f64_x4(svfloat64x4_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_f32_x4))) -svfloat32x4_t svmax_single_f32_x4(svfloat32x4_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_f16_x4))) -svfloat16x4_t svmax_single_f16_x4(svfloat16x4_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_s8_x4))) -svint8x4_t svmax_single_s8_x4(svint8x4_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_s32_x4))) -svint32x4_t svmax_single_s32_x4(svint32x4_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_s64_x4))) -svint64x4_t svmax_single_s64_x4(svint64x4_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_s16_x4))) -svint16x4_t svmax_single_s16_x4(svint16x4_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_u8_x4))) -svuint8x4_t svmax_single_u8_x4(svuint8x4_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_u32_x4))) -svuint32x4_t svmax_single_u32_x4(svuint32x4_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_u64_x4))) -svuint64x4_t svmax_single_u64_x4(svuint64x4_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_u16_x4))) -svuint16x4_t svmax_single_u16_x4(svuint16x4_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_x2))) -svfloat64x2_t svmax_f64_x2(svfloat64x2_t, svfloat64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_x2))) -svfloat32x2_t svmax_f32_x2(svfloat32x2_t, svfloat32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_x2))) -svfloat16x2_t svmax_f16_x2(svfloat16x2_t, svfloat16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_x2))) -svint8x2_t svmax_s8_x2(svint8x2_t, svint8x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_x2))) -svint32x2_t svmax_s32_x2(svint32x2_t, svint32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_x2))) -svint64x2_t svmax_s64_x2(svint64x2_t, svint64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_x2))) -svint16x2_t svmax_s16_x2(svint16x2_t, svint16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_x2))) -svuint8x2_t svmax_u8_x2(svuint8x2_t, svuint8x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_x2))) -svuint32x2_t svmax_u32_x2(svuint32x2_t, svuint32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_x2))) -svuint64x2_t svmax_u64_x2(svuint64x2_t, svuint64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_x2))) -svuint16x2_t svmax_u16_x2(svuint16x2_t, svuint16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_x4))) -svfloat64x4_t svmax_f64_x4(svfloat64x4_t, svfloat64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_x4))) -svfloat32x4_t svmax_f32_x4(svfloat32x4_t, svfloat32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_x4))) -svfloat16x4_t svmax_f16_x4(svfloat16x4_t, svfloat16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_x4))) -svint8x4_t svmax_s8_x4(svint8x4_t, svint8x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_x4))) -svint32x4_t svmax_s32_x4(svint32x4_t, svint32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_x4))) -svint64x4_t svmax_s64_x4(svint64x4_t, svint64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_x4))) -svint16x4_t svmax_s16_x4(svint16x4_t, svint16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_x4))) -svuint8x4_t svmax_u8_x4(svuint8x4_t, svuint8x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_x4))) -svuint32x4_t svmax_u32_x4(svuint32x4_t, svuint32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_x4))) -svuint64x4_t svmax_u64_x4(svuint64x4_t, svuint64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_x4))) -svuint16x4_t svmax_u16_x4(svuint16x4_t, svuint16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_single_f64_x2))) -svfloat64x2_t svmaxnm_single_f64_x2(svfloat64x2_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_single_f32_x2))) -svfloat32x2_t svmaxnm_single_f32_x2(svfloat32x2_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_single_f16_x2))) -svfloat16x2_t svmaxnm_single_f16_x2(svfloat16x2_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_single_f64_x4))) -svfloat64x4_t svmaxnm_single_f64_x4(svfloat64x4_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_single_f32_x4))) -svfloat32x4_t svmaxnm_single_f32_x4(svfloat32x4_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_single_f16_x4))) -svfloat16x4_t svmaxnm_single_f16_x4(svfloat16x4_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_x2))) -svfloat64x2_t svmaxnm_f64_x2(svfloat64x2_t, svfloat64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_x2))) -svfloat32x2_t svmaxnm_f32_x2(svfloat32x2_t, svfloat32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_x2))) -svfloat16x2_t svmaxnm_f16_x2(svfloat16x2_t, svfloat16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_x4))) -svfloat64x4_t svmaxnm_f64_x4(svfloat64x4_t, svfloat64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_x4))) -svfloat32x4_t svmaxnm_f32_x4(svfloat32x4_t, svfloat32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_x4))) -svfloat16x4_t svmaxnm_f16_x4(svfloat16x4_t, svfloat16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_f64_x2))) -svfloat64x2_t svmin_single_f64_x2(svfloat64x2_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_f32_x2))) -svfloat32x2_t svmin_single_f32_x2(svfloat32x2_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_f16_x2))) -svfloat16x2_t svmin_single_f16_x2(svfloat16x2_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_s8_x2))) -svint8x2_t svmin_single_s8_x2(svint8x2_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_s32_x2))) -svint32x2_t svmin_single_s32_x2(svint32x2_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_s64_x2))) -svint64x2_t svmin_single_s64_x2(svint64x2_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_s16_x2))) -svint16x2_t svmin_single_s16_x2(svint16x2_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_u8_x2))) -svuint8x2_t svmin_single_u8_x2(svuint8x2_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_u32_x2))) -svuint32x2_t svmin_single_u32_x2(svuint32x2_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_u64_x2))) -svuint64x2_t svmin_single_u64_x2(svuint64x2_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_u16_x2))) -svuint16x2_t svmin_single_u16_x2(svuint16x2_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_f64_x4))) -svfloat64x4_t svmin_single_f64_x4(svfloat64x4_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_f32_x4))) -svfloat32x4_t svmin_single_f32_x4(svfloat32x4_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_f16_x4))) -svfloat16x4_t svmin_single_f16_x4(svfloat16x4_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_s8_x4))) -svint8x4_t svmin_single_s8_x4(svint8x4_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_s32_x4))) -svint32x4_t svmin_single_s32_x4(svint32x4_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_s64_x4))) -svint64x4_t svmin_single_s64_x4(svint64x4_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_s16_x4))) -svint16x4_t svmin_single_s16_x4(svint16x4_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_u8_x4))) -svuint8x4_t svmin_single_u8_x4(svuint8x4_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_u32_x4))) -svuint32x4_t svmin_single_u32_x4(svuint32x4_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_u64_x4))) -svuint64x4_t svmin_single_u64_x4(svuint64x4_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_u16_x4))) -svuint16x4_t svmin_single_u16_x4(svuint16x4_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_x2))) -svfloat64x2_t svmin_f64_x2(svfloat64x2_t, svfloat64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_x2))) -svfloat32x2_t svmin_f32_x2(svfloat32x2_t, svfloat32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_x2))) -svfloat16x2_t svmin_f16_x2(svfloat16x2_t, svfloat16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_x2))) -svint8x2_t svmin_s8_x2(svint8x2_t, svint8x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_x2))) -svint32x2_t svmin_s32_x2(svint32x2_t, svint32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_x2))) -svint64x2_t svmin_s64_x2(svint64x2_t, svint64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_x2))) -svint16x2_t svmin_s16_x2(svint16x2_t, svint16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_x2))) -svuint8x2_t svmin_u8_x2(svuint8x2_t, svuint8x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_x2))) -svuint32x2_t svmin_u32_x2(svuint32x2_t, svuint32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_x2))) -svuint64x2_t svmin_u64_x2(svuint64x2_t, svuint64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_x2))) -svuint16x2_t svmin_u16_x2(svuint16x2_t, svuint16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_x4))) -svfloat64x4_t svmin_f64_x4(svfloat64x4_t, svfloat64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_x4))) -svfloat32x4_t svmin_f32_x4(svfloat32x4_t, svfloat32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_x4))) -svfloat16x4_t svmin_f16_x4(svfloat16x4_t, svfloat16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_x4))) -svint8x4_t svmin_s8_x4(svint8x4_t, svint8x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_x4))) -svint32x4_t svmin_s32_x4(svint32x4_t, svint32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_x4))) -svint64x4_t svmin_s64_x4(svint64x4_t, svint64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_x4))) -svint16x4_t svmin_s16_x4(svint16x4_t, svint16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_x4))) -svuint8x4_t svmin_u8_x4(svuint8x4_t, svuint8x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_x4))) -svuint32x4_t svmin_u32_x4(svuint32x4_t, svuint32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_x4))) -svuint64x4_t svmin_u64_x4(svuint64x4_t, svuint64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_x4))) -svuint16x4_t svmin_u16_x4(svuint16x4_t, svuint16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_single_f64_x2))) -svfloat64x2_t svminnm_single_f64_x2(svfloat64x2_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_single_f32_x2))) -svfloat32x2_t svminnm_single_f32_x2(svfloat32x2_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_single_f16_x2))) -svfloat16x2_t svminnm_single_f16_x2(svfloat16x2_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_single_f64_x4))) -svfloat64x4_t svminnm_single_f64_x4(svfloat64x4_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_single_f32_x4))) -svfloat32x4_t svminnm_single_f32_x4(svfloat32x4_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_single_f16_x4))) -svfloat16x4_t svminnm_single_f16_x4(svfloat16x4_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_x2))) -svfloat64x2_t svminnm_f64_x2(svfloat64x2_t, svfloat64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_x2))) -svfloat32x2_t svminnm_f32_x2(svfloat32x2_t, svfloat32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_x2))) -svfloat16x2_t svminnm_f16_x2(svfloat16x2_t, svfloat16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_x4))) -svfloat64x4_t svminnm_f64_x4(svfloat64x4_t, svfloat64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_x4))) -svfloat32x4_t svminnm_f32_x4(svfloat32x4_t, svfloat32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_x4))) -svfloat16x4_t svminnm_f16_x4(svfloat16x4_t, svfloat16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_s16_s32_x2))) -svint16_t svqcvt_s16_s32_x2(svint32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_s16_s64_x4))) -svint16_t svqcvt_s16_s64_x4(svint64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_s8_s32_x4))) -svint8_t svqcvt_s8_s32_x4(svint32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_u16_s32_x2))) -svuint16_t svqcvt_u16_s32_x2(svint32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_u16_u32_x2))) -svuint16_t svqcvt_u16_u32_x2(svuint32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_u16_s64_x4))) -svuint16_t svqcvt_u16_s64_x4(svint64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_u16_u64_x4))) -svuint16_t svqcvt_u16_u64_x4(svuint64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_u8_s32_x4))) -svuint8_t svqcvt_u8_s32_x4(svint32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_u8_u32_x4))) -svuint8_t svqcvt_u8_u32_x4(svuint32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_s16_s64_x4))) -svint16_t svqcvtn_s16_s64_x4(svint64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_s8_s32_x4))) -svint8_t svqcvtn_s8_s32_x4(svint32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_u16_s64_x4))) -svuint16_t svqcvtn_u16_s64_x4(svint64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_u16_u64_x4))) -svuint16_t svqcvtn_u16_u64_x4(svuint64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_u8_s32_x4))) -svuint8_t svqcvtn_u8_s32_x4(svint32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_u8_u32_x4))) -svuint8_t svqcvtn_u8_u32_x4(svuint32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_single_s8_x2))) -svint8x2_t svqdmulh_single_s8_x2(svint8x2_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_single_s32_x2))) -svint32x2_t svqdmulh_single_s32_x2(svint32x2_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_single_s64_x2))) -svint64x2_t svqdmulh_single_s64_x2(svint64x2_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_single_s16_x2))) -svint16x2_t svqdmulh_single_s16_x2(svint16x2_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_single_s8_x4))) -svint8x4_t svqdmulh_single_s8_x4(svint8x4_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_single_s32_x4))) -svint32x4_t svqdmulh_single_s32_x4(svint32x4_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_single_s64_x4))) -svint64x4_t svqdmulh_single_s64_x4(svint64x4_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_single_s16_x4))) -svint16x4_t svqdmulh_single_s16_x4(svint16x4_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s8_x2))) -svint8x2_t svqdmulh_s8_x2(svint8x2_t, svint8x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s32_x2))) -svint32x2_t svqdmulh_s32_x2(svint32x2_t, svint32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s64_x2))) -svint64x2_t svqdmulh_s64_x2(svint64x2_t, svint64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s16_x2))) -svint16x2_t svqdmulh_s16_x2(svint16x2_t, svint16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s8_x4))) -svint8x4_t svqdmulh_s8_x4(svint8x4_t, svint8x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s32_x4))) -svint32x4_t svqdmulh_s32_x4(svint32x4_t, svint32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s64_x4))) -svint64x4_t svqdmulh_s64_x4(svint64x4_t, svint64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s16_x4))) -svint16x4_t svqdmulh_s16_x4(svint16x4_t, svint16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshr_n_s16_s32_x2))) -svint16_t svqrshr_n_s16_s32_x2(svint32x2_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshr_n_u16_u32_x2))) -svuint16_t svqrshr_n_u16_u32_x2(svuint32x2_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshr_n_s8_s32_x4))) -svint8_t svqrshr_n_s8_s32_x4(svint32x4_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshr_n_s16_s64_x4))) -svint16_t svqrshr_n_s16_s64_x4(svint64x4_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshr_n_u8_u32_x4))) -svuint8_t svqrshr_n_u8_u32_x4(svuint32x4_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshr_n_u16_u64_x4))) -svuint16_t svqrshr_n_u16_u64_x4(svuint64x4_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrn_n_s8_s32_x4))) -svint8_t svqrshrn_n_s8_s32_x4(svint32x4_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrn_n_s16_s64_x4))) -svint16_t svqrshrn_n_s16_s64_x4(svint64x4_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrn_n_u8_u32_x4))) -svuint8_t svqrshrn_n_u8_u32_x4(svuint32x4_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrn_n_u16_u64_x4))) -svuint16_t svqrshrn_n_u16_u64_x4(svuint64x4_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshru_n_u16_s32_x2))) -svuint16_t svqrshru_n_u16_s32_x2(svint32x2_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshru_n_u8_s32_x4))) -svuint8_t svqrshru_n_u8_s32_x4(svint32x4_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshru_n_u16_s64_x4))) -svuint16_t svqrshru_n_u16_s64_x4(svint64x4_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrun_n_u8_s32_x4))) -svuint8_t svqrshrun_n_u8_s32_x4(svint32x4_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrun_n_u16_s64_x4))) -svuint16_t svqrshrun_n_u16_s64_x4(svint64x4_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svreinterpret_b))) -svbool_t svreinterpret_b(svcount_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svreinterpret_c))) -svcount_t svreinterpret_c(svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_x2))) -svfloat32x2_t svrinta_f32_x2(svfloat32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_x4))) -svfloat32x4_t svrinta_f32_x4(svfloat32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_x2))) -svfloat32x2_t svrintm_f32_x2(svfloat32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_x4))) -svfloat32x4_t svrintm_f32_x4(svfloat32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_x2))) -svfloat32x2_t svrintn_f32_x2(svfloat32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_x4))) -svfloat32x4_t svrintn_f32_x4(svfloat32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_x2))) -svfloat32x2_t svrintp_f32_x2(svfloat32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_x4))) -svfloat32x4_t svrintp_f32_x4(svfloat32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_s8_x2))) -svint8x2_t svrshl_single_s8_x2(svint8x2_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_s32_x2))) -svint32x2_t svrshl_single_s32_x2(svint32x2_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_s64_x2))) -svint64x2_t svrshl_single_s64_x2(svint64x2_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_s16_x2))) -svint16x2_t svrshl_single_s16_x2(svint16x2_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_u8_x2))) -svuint8x2_t svrshl_single_u8_x2(svuint8x2_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_u32_x2))) -svuint32x2_t svrshl_single_u32_x2(svuint32x2_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_u64_x2))) -svuint64x2_t svrshl_single_u64_x2(svuint64x2_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_u16_x2))) -svuint16x2_t svrshl_single_u16_x2(svuint16x2_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_s8_x4))) -svint8x4_t svrshl_single_s8_x4(svint8x4_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_s32_x4))) -svint32x4_t svrshl_single_s32_x4(svint32x4_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_s64_x4))) -svint64x4_t svrshl_single_s64_x4(svint64x4_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_s16_x4))) -svint16x4_t svrshl_single_s16_x4(svint16x4_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_u8_x4))) -svuint8x4_t svrshl_single_u8_x4(svuint8x4_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_u32_x4))) -svuint32x4_t svrshl_single_u32_x4(svuint32x4_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_u64_x4))) -svuint64x4_t svrshl_single_u64_x4(svuint64x4_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_u16_x4))) -svuint16x4_t svrshl_single_u16_x4(svuint16x4_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_x2))) -svint8x2_t svrshl_s8_x2(svint8x2_t, svint8x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_x2))) -svint32x2_t svrshl_s32_x2(svint32x2_t, svint32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_x2))) -svint64x2_t svrshl_s64_x2(svint64x2_t, svint64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_x2))) -svint16x2_t svrshl_s16_x2(svint16x2_t, svint16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_x2))) -svuint8x2_t svrshl_u8_x2(svuint8x2_t, svuint8x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_x2))) -svuint32x2_t svrshl_u32_x2(svuint32x2_t, svuint32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_x2))) -svuint64x2_t svrshl_u64_x2(svuint64x2_t, svuint64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_x2))) -svuint16x2_t svrshl_u16_x2(svuint16x2_t, svuint16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_x4))) -svint8x4_t svrshl_s8_x4(svint8x4_t, svint8x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_x4))) -svint32x4_t svrshl_s32_x4(svint32x4_t, svint32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_x4))) -svint64x4_t svrshl_s64_x4(svint64x4_t, svint64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_x4))) -svint16x4_t svrshl_s16_x4(svint16x4_t, svint16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_x4))) -svuint8x4_t svrshl_u8_x4(svuint8x4_t, svuint8x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_x4))) -svuint32x4_t svrshl_u32_x4(svuint32x4_t, svuint32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_x4))) -svuint64x4_t svrshl_u64_x4(svuint64x4_t, svuint64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_x4))) -svuint16x4_t svrshl_u16_x4(svuint16x4_t, svuint16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u8_x2))) -svuint8x2_t svsel_u8_x2(svcount_t, svuint8x2_t, svuint8x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u32_x2))) -svuint32x2_t svsel_u32_x2(svcount_t, svuint32x2_t, svuint32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u64_x2))) -svuint64x2_t svsel_u64_x2(svcount_t, svuint64x2_t, svuint64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u16_x2))) -svuint16x2_t svsel_u16_x2(svcount_t, svuint16x2_t, svuint16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_bf16_x2))) -svbfloat16x2_t svsel_bf16_x2(svcount_t, svbfloat16x2_t, svbfloat16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s8_x2))) -svint8x2_t svsel_s8_x2(svcount_t, svint8x2_t, svint8x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f64_x2))) -svfloat64x2_t svsel_f64_x2(svcount_t, svfloat64x2_t, svfloat64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f32_x2))) -svfloat32x2_t svsel_f32_x2(svcount_t, svfloat32x2_t, svfloat32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f16_x2))) -svfloat16x2_t svsel_f16_x2(svcount_t, svfloat16x2_t, svfloat16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s32_x2))) -svint32x2_t svsel_s32_x2(svcount_t, svint32x2_t, svint32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s64_x2))) -svint64x2_t svsel_s64_x2(svcount_t, svint64x2_t, svint64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s16_x2))) -svint16x2_t svsel_s16_x2(svcount_t, svint16x2_t, svint16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u8_x4))) -svuint8x4_t svsel_u8_x4(svcount_t, svuint8x4_t, svuint8x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u32_x4))) -svuint32x4_t svsel_u32_x4(svcount_t, svuint32x4_t, svuint32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u64_x4))) -svuint64x4_t svsel_u64_x4(svcount_t, svuint64x4_t, svuint64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u16_x4))) -svuint16x4_t svsel_u16_x4(svcount_t, svuint16x4_t, svuint16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_bf16_x4))) -svbfloat16x4_t svsel_bf16_x4(svcount_t, svbfloat16x4_t, svbfloat16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s8_x4))) -svint8x4_t svsel_s8_x4(svcount_t, svint8x4_t, svint8x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f64_x4))) -svfloat64x4_t svsel_f64_x4(svcount_t, svfloat64x4_t, svfloat64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f32_x4))) -svfloat32x4_t svsel_f32_x4(svcount_t, svfloat32x4_t, svfloat32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f16_x4))) -svfloat16x4_t svsel_f16_x4(svcount_t, svfloat16x4_t, svfloat16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s32_x4))) -svint32x4_t svsel_s32_x4(svcount_t, svint32x4_t, svint32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s64_x4))) -svint64x4_t svsel_s64_x4(svcount_t, svint64x4_t, svint64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s16_x4))) -svint16x4_t svsel_s16_x4(svcount_t, svint16x4_t, svint16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_s32_s16_x2))) -svint32x2_t svunpk_s32_s16_x2(svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_s64_s32_x2))) -svint64x2_t svunpk_s64_s32_x2(svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_s16_s8_x2))) -svint16x2_t svunpk_s16_s8_x2(svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_u32_u16_x2))) -svuint32x2_t svunpk_u32_u16_x2(svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_u64_u32_x2))) -svuint64x2_t svunpk_u64_u32_x2(svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_u16_u8_x2))) -svuint16x2_t svunpk_u16_u8_x2(svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_s32_s16_x4))) -svint32x4_t svunpk_s32_s16_x4(svint16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_s64_s32_x4))) -svint64x4_t svunpk_s64_s32_x4(svint32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_s16_s8_x4))) -svint16x4_t svunpk_s16_s8_x4(svint8x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_u32_u16_x4))) -svuint32x4_t svunpk_u32_u16_x4(svuint16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_u64_u32_x4))) -svuint64x4_t svunpk_u64_u32_x4(svuint32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_u16_u8_x4))) -svuint16x4_t svunpk_u16_u8_x4(svuint8x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_u8_x2))) -svuint8x2_t svuzp_u8_x2(svuint8x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_u32_x2))) -svuint32x2_t svuzp_u32_x2(svuint32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_u64_x2))) -svuint64x2_t svuzp_u64_x2(svuint64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_u16_x2))) -svuint16x2_t svuzp_u16_x2(svuint16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_bf16_x2))) -svbfloat16x2_t svuzp_bf16_x2(svbfloat16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_s8_x2))) -svint8x2_t svuzp_s8_x2(svint8x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_f64_x2))) -svfloat64x2_t svuzp_f64_x2(svfloat64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_f32_x2))) -svfloat32x2_t svuzp_f32_x2(svfloat32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_f16_x2))) -svfloat16x2_t svuzp_f16_x2(svfloat16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_s32_x2))) -svint32x2_t svuzp_s32_x2(svint32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_s64_x2))) -svint64x2_t svuzp_s64_x2(svint64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_s16_x2))) -svint16x2_t svuzp_s16_x2(svint16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_u8_x4))) -svuint8x4_t svuzp_u8_x4(svuint8x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_u32_x4))) -svuint32x4_t svuzp_u32_x4(svuint32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_u64_x4))) -svuint64x4_t svuzp_u64_x4(svuint64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_u16_x4))) -svuint16x4_t svuzp_u16_x4(svuint16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_bf16_x4))) -svbfloat16x4_t svuzp_bf16_x4(svbfloat16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_s8_x4))) -svint8x4_t svuzp_s8_x4(svint8x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_f64_x4))) -svfloat64x4_t svuzp_f64_x4(svfloat64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_f32_x4))) -svfloat32x4_t svuzp_f32_x4(svfloat32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_f16_x4))) -svfloat16x4_t svuzp_f16_x4(svfloat16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_s32_x4))) -svint32x4_t svuzp_s32_x4(svint32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_s64_x4))) -svint64x4_t svuzp_s64_x4(svint64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_s16_x4))) -svint16x4_t svuzp_s16_x4(svint16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_u8_x2))) -svuint8x2_t svuzpq_u8_x2(svuint8x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_u32_x2))) -svuint32x2_t svuzpq_u32_x2(svuint32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_u64_x2))) -svuint64x2_t svuzpq_u64_x2(svuint64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_u16_x2))) -svuint16x2_t svuzpq_u16_x2(svuint16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_bf16_x2))) -svbfloat16x2_t svuzpq_bf16_x2(svbfloat16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_s8_x2))) -svint8x2_t svuzpq_s8_x2(svint8x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_f64_x2))) -svfloat64x2_t svuzpq_f64_x2(svfloat64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_f32_x2))) -svfloat32x2_t svuzpq_f32_x2(svfloat32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_f16_x2))) -svfloat16x2_t svuzpq_f16_x2(svfloat16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_s32_x2))) -svint32x2_t svuzpq_s32_x2(svint32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_s64_x2))) -svint64x2_t svuzpq_s64_x2(svint64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_s16_x2))) -svint16x2_t svuzpq_s16_x2(svint16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_u8_x4))) -svuint8x4_t svuzpq_u8_x4(svuint8x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_u32_x4))) -svuint32x4_t svuzpq_u32_x4(svuint32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_u64_x4))) -svuint64x4_t svuzpq_u64_x4(svuint64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_u16_x4))) -svuint16x4_t svuzpq_u16_x4(svuint16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_bf16_x4))) -svbfloat16x4_t svuzpq_bf16_x4(svbfloat16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_s8_x4))) -svint8x4_t svuzpq_s8_x4(svint8x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_f64_x4))) -svfloat64x4_t svuzpq_f64_x4(svfloat64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_f32_x4))) -svfloat32x4_t svuzpq_f32_x4(svfloat32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_f16_x4))) -svfloat16x4_t svuzpq_f16_x4(svfloat16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_s32_x4))) -svint32x4_t svuzpq_s32_x4(svint32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_s64_x4))) -svint64x4_t svuzpq_s64_x4(svint64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_s16_x4))) -svint16x4_t svuzpq_s16_x4(svint16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_u8_x2))) -svuint8x2_t svzip_u8_x2(svuint8x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_u32_x2))) -svuint32x2_t svzip_u32_x2(svuint32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_u64_x2))) -svuint64x2_t svzip_u64_x2(svuint64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_u16_x2))) -svuint16x2_t svzip_u16_x2(svuint16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_bf16_x2))) -svbfloat16x2_t svzip_bf16_x2(svbfloat16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_s8_x2))) -svint8x2_t svzip_s8_x2(svint8x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_f64_x2))) -svfloat64x2_t svzip_f64_x2(svfloat64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_f32_x2))) -svfloat32x2_t svzip_f32_x2(svfloat32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_f16_x2))) -svfloat16x2_t svzip_f16_x2(svfloat16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_s32_x2))) -svint32x2_t svzip_s32_x2(svint32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_s64_x2))) -svint64x2_t svzip_s64_x2(svint64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_s16_x2))) -svint16x2_t svzip_s16_x2(svint16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_u8_x4))) -svuint8x4_t svzip_u8_x4(svuint8x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_u32_x4))) -svuint32x4_t svzip_u32_x4(svuint32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_u64_x4))) -svuint64x4_t svzip_u64_x4(svuint64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_u16_x4))) -svuint16x4_t svzip_u16_x4(svuint16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_bf16_x4))) -svbfloat16x4_t svzip_bf16_x4(svbfloat16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_s8_x4))) -svint8x4_t svzip_s8_x4(svint8x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_f64_x4))) -svfloat64x4_t svzip_f64_x4(svfloat64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_f32_x4))) -svfloat32x4_t svzip_f32_x4(svfloat32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_f16_x4))) -svfloat16x4_t svzip_f16_x4(svfloat16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_s32_x4))) -svint32x4_t svzip_s32_x4(svint32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_s64_x4))) -svint64x4_t svzip_s64_x4(svint64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_s16_x4))) -svint16x4_t svzip_s16_x4(svint16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_u8_x2))) -svuint8x2_t svzipq_u8_x2(svuint8x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_u32_x2))) -svuint32x2_t svzipq_u32_x2(svuint32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_u64_x2))) -svuint64x2_t svzipq_u64_x2(svuint64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_u16_x2))) -svuint16x2_t svzipq_u16_x2(svuint16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_bf16_x2))) -svbfloat16x2_t svzipq_bf16_x2(svbfloat16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s8_x2))) -svint8x2_t svzipq_s8_x2(svint8x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_f64_x2))) -svfloat64x2_t svzipq_f64_x2(svfloat64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_f32_x2))) -svfloat32x2_t svzipq_f32_x2(svfloat32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_f16_x2))) -svfloat16x2_t svzipq_f16_x2(svfloat16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s32_x2))) -svint32x2_t svzipq_s32_x2(svint32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s64_x2))) -svint64x2_t svzipq_s64_x2(svint64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s16_x2))) -svint16x2_t svzipq_s16_x2(svint16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_u8_x4))) -svuint8x4_t svzipq_u8_x4(svuint8x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_u32_x4))) -svuint32x4_t svzipq_u32_x4(svuint32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_u64_x4))) -svuint64x4_t svzipq_u64_x4(svuint64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_u16_x4))) -svuint16x4_t svzipq_u16_x4(svuint16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_bf16_x4))) -svbfloat16x4_t svzipq_bf16_x4(svbfloat16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s8_x4))) -svint8x4_t svzipq_s8_x4(svint8x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_f64_x4))) -svfloat64x4_t svzipq_f64_x4(svfloat64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_f32_x4))) -svfloat32x4_t svzipq_f32_x4(svfloat32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_f16_x4))) -svfloat16x4_t svzipq_f16_x4(svfloat16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s32_x4))) -svint32x4_t svzipq_s32_x4(svint32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s64_x4))) -svint64x4_t svzipq_s64_x4(svint64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s16_x4))) -svint16x4_t svzipq_s16_x4(svint16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u8_x2))) -svuint8x2_t svadd(svuint8x2_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u32_x2))) -svuint32x2_t svadd(svuint32x2_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u64_x2))) -svuint64x2_t svadd(svuint64x2_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u16_x2))) -svuint16x2_t svadd(svuint16x2_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_s8_x2))) -svint8x2_t svadd(svint8x2_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_s32_x2))) -svint32x2_t svadd(svint32x2_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_s64_x2))) -svint64x2_t svadd(svint64x2_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_s16_x2))) -svint16x2_t svadd(svint16x2_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u8_x4))) -svuint8x4_t svadd(svuint8x4_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u32_x4))) -svuint32x4_t svadd(svuint32x4_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u64_x4))) -svuint64x4_t svadd(svuint64x4_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u16_x4))) -svuint16x4_t svadd(svuint16x4_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_s8_x4))) -svint8x4_t svadd(svint8x4_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_s32_x4))) -svint32x4_t svadd(svint32x4_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_s64_x4))) -svint64x4_t svadd(svint64x4_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_s16_x4))) -svint16x4_t svadd(svint16x4_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_f64_x2))) -svfloat64x2_t svclamp(svfloat64x2_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_f32_x2))) -svfloat32x2_t svclamp(svfloat32x2_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_f16_x2))) -svfloat16x2_t svclamp(svfloat16x2_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_s8_x2))) -svint8x2_t svclamp(svint8x2_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_s32_x2))) -svint32x2_t svclamp(svint32x2_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_s64_x2))) -svint64x2_t svclamp(svint64x2_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_s16_x2))) -svint16x2_t svclamp(svint16x2_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_u8_x2))) -svuint8x2_t svclamp(svuint8x2_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_u32_x2))) -svuint32x2_t svclamp(svuint32x2_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_u64_x2))) -svuint64x2_t svclamp(svuint64x2_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_u16_x2))) -svuint16x2_t svclamp(svuint16x2_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_f64_x4))) -svfloat64x4_t svclamp(svfloat64x4_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_f32_x4))) -svfloat32x4_t svclamp(svfloat32x4_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_f16_x4))) -svfloat16x4_t svclamp(svfloat16x4_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_s8_x4))) -svint8x4_t svclamp(svint8x4_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_s32_x4))) -svint32x4_t svclamp(svint32x4_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_s64_x4))) -svint64x4_t svclamp(svint64x4_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_s16_x4))) -svint16x4_t svclamp(svint16x4_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_u8_x4))) -svuint8x4_t svclamp(svuint8x4_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_u32_x4))) -svuint32x4_t svclamp(svuint32x4_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_u64_x4))) -svuint64x4_t svclamp(svuint64x4_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_u16_x4))) -svuint16x4_t svclamp(svuint16x4_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_x2))) -svbfloat16_t svcvt_bf16(svfloat32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f32_x2))) -svfloat16_t svcvt_f16(svfloat32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_x2))) -svint32x2_t svcvt_s32(svfloat32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_x2))) -svuint32x2_t svcvt_u32(svfloat32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_x4))) -svint32x4_t svcvt_s32(svfloat32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_x4))) -svuint32x4_t svcvt_u32(svfloat32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_x2))) -svfloat32x2_t svcvt_f32(svint32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_x4))) -svfloat32x4_t svcvt_f32(svint32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_x2))) -svfloat32x2_t svcvt_f32(svuint32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_x4))) -svfloat32x4_t svcvt_f32(svuint32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtn_bf16_f32_x2))) -svbfloat16_t svcvtn_bf16(svfloat32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtn_f16_f32_x2))) -svfloat16_t svcvtn_f16(svfloat32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_f64_x2))) -svfloat64x2_t svmax(svfloat64x2_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_f32_x2))) -svfloat32x2_t svmax(svfloat32x2_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_f16_x2))) -svfloat16x2_t svmax(svfloat16x2_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_s8_x2))) -svint8x2_t svmax(svint8x2_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_s32_x2))) -svint32x2_t svmax(svint32x2_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_s64_x2))) -svint64x2_t svmax(svint64x2_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_s16_x2))) -svint16x2_t svmax(svint16x2_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_u8_x2))) -svuint8x2_t svmax(svuint8x2_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_u32_x2))) -svuint32x2_t svmax(svuint32x2_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_u64_x2))) -svuint64x2_t svmax(svuint64x2_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_u16_x2))) -svuint16x2_t svmax(svuint16x2_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_f64_x4))) -svfloat64x4_t svmax(svfloat64x4_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_f32_x4))) -svfloat32x4_t svmax(svfloat32x4_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_f16_x4))) -svfloat16x4_t svmax(svfloat16x4_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_s8_x4))) -svint8x4_t svmax(svint8x4_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_s32_x4))) -svint32x4_t svmax(svint32x4_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_s64_x4))) -svint64x4_t svmax(svint64x4_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_s16_x4))) -svint16x4_t svmax(svint16x4_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_u8_x4))) -svuint8x4_t svmax(svuint8x4_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_u32_x4))) -svuint32x4_t svmax(svuint32x4_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_u64_x4))) -svuint64x4_t svmax(svuint64x4_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_u16_x4))) -svuint16x4_t svmax(svuint16x4_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_x2))) -svfloat64x2_t svmax(svfloat64x2_t, svfloat64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_x2))) -svfloat32x2_t svmax(svfloat32x2_t, svfloat32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_x2))) -svfloat16x2_t svmax(svfloat16x2_t, svfloat16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_x2))) -svint8x2_t svmax(svint8x2_t, svint8x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_x2))) -svint32x2_t svmax(svint32x2_t, svint32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_x2))) -svint64x2_t svmax(svint64x2_t, svint64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_x2))) -svint16x2_t svmax(svint16x2_t, svint16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_x2))) -svuint8x2_t svmax(svuint8x2_t, svuint8x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_x2))) -svuint32x2_t svmax(svuint32x2_t, svuint32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_x2))) -svuint64x2_t svmax(svuint64x2_t, svuint64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_x2))) -svuint16x2_t svmax(svuint16x2_t, svuint16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_x4))) -svfloat64x4_t svmax(svfloat64x4_t, svfloat64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_x4))) -svfloat32x4_t svmax(svfloat32x4_t, svfloat32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_x4))) -svfloat16x4_t svmax(svfloat16x4_t, svfloat16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_x4))) -svint8x4_t svmax(svint8x4_t, svint8x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_x4))) -svint32x4_t svmax(svint32x4_t, svint32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_x4))) -svint64x4_t svmax(svint64x4_t, svint64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_x4))) -svint16x4_t svmax(svint16x4_t, svint16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_x4))) -svuint8x4_t svmax(svuint8x4_t, svuint8x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_x4))) -svuint32x4_t svmax(svuint32x4_t, svuint32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_x4))) -svuint64x4_t svmax(svuint64x4_t, svuint64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_x4))) -svuint16x4_t svmax(svuint16x4_t, svuint16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_single_f64_x2))) -svfloat64x2_t svmaxnm(svfloat64x2_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_single_f32_x2))) -svfloat32x2_t svmaxnm(svfloat32x2_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_single_f16_x2))) -svfloat16x2_t svmaxnm(svfloat16x2_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_single_f64_x4))) -svfloat64x4_t svmaxnm(svfloat64x4_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_single_f32_x4))) -svfloat32x4_t svmaxnm(svfloat32x4_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_single_f16_x4))) -svfloat16x4_t svmaxnm(svfloat16x4_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_x2))) -svfloat64x2_t svmaxnm(svfloat64x2_t, svfloat64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_x2))) -svfloat32x2_t svmaxnm(svfloat32x2_t, svfloat32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_x2))) -svfloat16x2_t svmaxnm(svfloat16x2_t, svfloat16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_x4))) -svfloat64x4_t svmaxnm(svfloat64x4_t, svfloat64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_x4))) -svfloat32x4_t svmaxnm(svfloat32x4_t, svfloat32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_x4))) -svfloat16x4_t svmaxnm(svfloat16x4_t, svfloat16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_f64_x2))) -svfloat64x2_t svmin(svfloat64x2_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_f32_x2))) -svfloat32x2_t svmin(svfloat32x2_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_f16_x2))) -svfloat16x2_t svmin(svfloat16x2_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_s8_x2))) -svint8x2_t svmin(svint8x2_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_s32_x2))) -svint32x2_t svmin(svint32x2_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_s64_x2))) -svint64x2_t svmin(svint64x2_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_s16_x2))) -svint16x2_t svmin(svint16x2_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_u8_x2))) -svuint8x2_t svmin(svuint8x2_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_u32_x2))) -svuint32x2_t svmin(svuint32x2_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_u64_x2))) -svuint64x2_t svmin(svuint64x2_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_u16_x2))) -svuint16x2_t svmin(svuint16x2_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_f64_x4))) -svfloat64x4_t svmin(svfloat64x4_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_f32_x4))) -svfloat32x4_t svmin(svfloat32x4_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_f16_x4))) -svfloat16x4_t svmin(svfloat16x4_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_s8_x4))) -svint8x4_t svmin(svint8x4_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_s32_x4))) -svint32x4_t svmin(svint32x4_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_s64_x4))) -svint64x4_t svmin(svint64x4_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_s16_x4))) -svint16x4_t svmin(svint16x4_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_u8_x4))) -svuint8x4_t svmin(svuint8x4_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_u32_x4))) -svuint32x4_t svmin(svuint32x4_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_u64_x4))) -svuint64x4_t svmin(svuint64x4_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_u16_x4))) -svuint16x4_t svmin(svuint16x4_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_x2))) -svfloat64x2_t svmin(svfloat64x2_t, svfloat64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_x2))) -svfloat32x2_t svmin(svfloat32x2_t, svfloat32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_x2))) -svfloat16x2_t svmin(svfloat16x2_t, svfloat16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_x2))) -svint8x2_t svmin(svint8x2_t, svint8x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_x2))) -svint32x2_t svmin(svint32x2_t, svint32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_x2))) -svint64x2_t svmin(svint64x2_t, svint64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_x2))) -svint16x2_t svmin(svint16x2_t, svint16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_x2))) -svuint8x2_t svmin(svuint8x2_t, svuint8x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_x2))) -svuint32x2_t svmin(svuint32x2_t, svuint32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_x2))) -svuint64x2_t svmin(svuint64x2_t, svuint64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_x2))) -svuint16x2_t svmin(svuint16x2_t, svuint16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_x4))) -svfloat64x4_t svmin(svfloat64x4_t, svfloat64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_x4))) -svfloat32x4_t svmin(svfloat32x4_t, svfloat32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_x4))) -svfloat16x4_t svmin(svfloat16x4_t, svfloat16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_x4))) -svint8x4_t svmin(svint8x4_t, svint8x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_x4))) -svint32x4_t svmin(svint32x4_t, svint32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_x4))) -svint64x4_t svmin(svint64x4_t, svint64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_x4))) -svint16x4_t svmin(svint16x4_t, svint16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_x4))) -svuint8x4_t svmin(svuint8x4_t, svuint8x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_x4))) -svuint32x4_t svmin(svuint32x4_t, svuint32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_x4))) -svuint64x4_t svmin(svuint64x4_t, svuint64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_x4))) -svuint16x4_t svmin(svuint16x4_t, svuint16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_single_f64_x2))) -svfloat64x2_t svminnm(svfloat64x2_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_single_f32_x2))) -svfloat32x2_t svminnm(svfloat32x2_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_single_f16_x2))) -svfloat16x2_t svminnm(svfloat16x2_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_single_f64_x4))) -svfloat64x4_t svminnm(svfloat64x4_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_single_f32_x4))) -svfloat32x4_t svminnm(svfloat32x4_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_single_f16_x4))) -svfloat16x4_t svminnm(svfloat16x4_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_x2))) -svfloat64x2_t svminnm(svfloat64x2_t, svfloat64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_x2))) -svfloat32x2_t svminnm(svfloat32x2_t, svfloat32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_x2))) -svfloat16x2_t svminnm(svfloat16x2_t, svfloat16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_x4))) -svfloat64x4_t svminnm(svfloat64x4_t, svfloat64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_x4))) -svfloat32x4_t svminnm(svfloat32x4_t, svfloat32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_x4))) -svfloat16x4_t svminnm(svfloat16x4_t, svfloat16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_s16_s32_x2))) -svint16_t svqcvt_s16(svint32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_s16_s64_x4))) -svint16_t svqcvt_s16(svint64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_s8_s32_x4))) -svint8_t svqcvt_s8(svint32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_u16_s32_x2))) -svuint16_t svqcvt_u16(svint32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_u16_u32_x2))) -svuint16_t svqcvt_u16(svuint32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_u16_s64_x4))) -svuint16_t svqcvt_u16(svint64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_u16_u64_x4))) -svuint16_t svqcvt_u16(svuint64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_u8_s32_x4))) -svuint8_t svqcvt_u8(svint32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_u8_u32_x4))) -svuint8_t svqcvt_u8(svuint32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_s16_s64_x4))) -svint16_t svqcvtn_s16(svint64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_s8_s32_x4))) -svint8_t svqcvtn_s8(svint32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_u16_s64_x4))) -svuint16_t svqcvtn_u16(svint64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_u16_u64_x4))) -svuint16_t svqcvtn_u16(svuint64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_u8_s32_x4))) -svuint8_t svqcvtn_u8(svint32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_u8_u32_x4))) -svuint8_t svqcvtn_u8(svuint32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_single_s8_x2))) -svint8x2_t svqdmulh(svint8x2_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_single_s32_x2))) -svint32x2_t svqdmulh(svint32x2_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_single_s64_x2))) -svint64x2_t svqdmulh(svint64x2_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_single_s16_x2))) -svint16x2_t svqdmulh(svint16x2_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_single_s8_x4))) -svint8x4_t svqdmulh(svint8x4_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_single_s32_x4))) -svint32x4_t svqdmulh(svint32x4_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_single_s64_x4))) -svint64x4_t svqdmulh(svint64x4_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_single_s16_x4))) -svint16x4_t svqdmulh(svint16x4_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s8_x2))) -svint8x2_t svqdmulh(svint8x2_t, svint8x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s32_x2))) -svint32x2_t svqdmulh(svint32x2_t, svint32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s64_x2))) -svint64x2_t svqdmulh(svint64x2_t, svint64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s16_x2))) -svint16x2_t svqdmulh(svint16x2_t, svint16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s8_x4))) -svint8x4_t svqdmulh(svint8x4_t, svint8x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s32_x4))) -svint32x4_t svqdmulh(svint32x4_t, svint32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s64_x4))) -svint64x4_t svqdmulh(svint64x4_t, svint64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s16_x4))) -svint16x4_t svqdmulh(svint16x4_t, svint16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshr_n_s16_s32_x2))) -svint16_t svqrshr_s16(svint32x2_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshr_n_u16_u32_x2))) -svuint16_t svqrshr_u16(svuint32x2_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshr_n_s8_s32_x4))) -svint8_t svqrshr_s8(svint32x4_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshr_n_s16_s64_x4))) -svint16_t svqrshr_s16(svint64x4_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshr_n_u8_u32_x4))) -svuint8_t svqrshr_u8(svuint32x4_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshr_n_u16_u64_x4))) -svuint16_t svqrshr_u16(svuint64x4_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrn_n_s8_s32_x4))) -svint8_t svqrshrn_s8(svint32x4_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrn_n_s16_s64_x4))) -svint16_t svqrshrn_s16(svint64x4_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrn_n_u8_u32_x4))) -svuint8_t svqrshrn_u8(svuint32x4_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrn_n_u16_u64_x4))) -svuint16_t svqrshrn_u16(svuint64x4_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshru_n_u16_s32_x2))) -svuint16_t svqrshru_u16(svint32x2_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshru_n_u8_s32_x4))) -svuint8_t svqrshru_u8(svint32x4_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshru_n_u16_s64_x4))) -svuint16_t svqrshru_u16(svint64x4_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrun_n_u8_s32_x4))) -svuint8_t svqrshrun_u8(svint32x4_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrun_n_u16_s64_x4))) -svuint16_t svqrshrun_u16(svint64x4_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svreinterpret_b))) -svbool_t svreinterpret(svcount_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svreinterpret_c))) -svcount_t svreinterpret(svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_x2))) -svfloat32x2_t svrinta(svfloat32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_x4))) -svfloat32x4_t svrinta(svfloat32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_x2))) -svfloat32x2_t svrintm(svfloat32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_x4))) -svfloat32x4_t svrintm(svfloat32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_x2))) -svfloat32x2_t svrintn(svfloat32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_x4))) -svfloat32x4_t svrintn(svfloat32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_x2))) -svfloat32x2_t svrintp(svfloat32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_x4))) -svfloat32x4_t svrintp(svfloat32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_s8_x2))) -svint8x2_t svrshl(svint8x2_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_s32_x2))) -svint32x2_t svrshl(svint32x2_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_s64_x2))) -svint64x2_t svrshl(svint64x2_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_s16_x2))) -svint16x2_t svrshl(svint16x2_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_u8_x2))) -svuint8x2_t svrshl(svuint8x2_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_u32_x2))) -svuint32x2_t svrshl(svuint32x2_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_u64_x2))) -svuint64x2_t svrshl(svuint64x2_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_u16_x2))) -svuint16x2_t svrshl(svuint16x2_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_s8_x4))) -svint8x4_t svrshl(svint8x4_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_s32_x4))) -svint32x4_t svrshl(svint32x4_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_s64_x4))) -svint64x4_t svrshl(svint64x4_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_s16_x4))) -svint16x4_t svrshl(svint16x4_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_u8_x4))) -svuint8x4_t svrshl(svuint8x4_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_u32_x4))) -svuint32x4_t svrshl(svuint32x4_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_u64_x4))) -svuint64x4_t svrshl(svuint64x4_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_u16_x4))) -svuint16x4_t svrshl(svuint16x4_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_x2))) -svint8x2_t svrshl(svint8x2_t, svint8x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_x2))) -svint32x2_t svrshl(svint32x2_t, svint32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_x2))) -svint64x2_t svrshl(svint64x2_t, svint64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_x2))) -svint16x2_t svrshl(svint16x2_t, svint16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_x2))) -svuint8x2_t svrshl(svuint8x2_t, svuint8x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_x2))) -svuint32x2_t svrshl(svuint32x2_t, svuint32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_x2))) -svuint64x2_t svrshl(svuint64x2_t, svuint64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_x2))) -svuint16x2_t svrshl(svuint16x2_t, svuint16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_x4))) -svint8x4_t svrshl(svint8x4_t, svint8x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_x4))) -svint32x4_t svrshl(svint32x4_t, svint32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_x4))) -svint64x4_t svrshl(svint64x4_t, svint64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_x4))) -svint16x4_t svrshl(svint16x4_t, svint16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_x4))) -svuint8x4_t svrshl(svuint8x4_t, svuint8x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_x4))) -svuint32x4_t svrshl(svuint32x4_t, svuint32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_x4))) -svuint64x4_t svrshl(svuint64x4_t, svuint64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_x4))) -svuint16x4_t svrshl(svuint16x4_t, svuint16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u8_x2))) -svuint8x2_t svsel(svcount_t, svuint8x2_t, svuint8x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u32_x2))) -svuint32x2_t svsel(svcount_t, svuint32x2_t, svuint32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u64_x2))) -svuint64x2_t svsel(svcount_t, svuint64x2_t, svuint64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u16_x2))) -svuint16x2_t svsel(svcount_t, svuint16x2_t, svuint16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_bf16_x2))) -svbfloat16x2_t svsel(svcount_t, svbfloat16x2_t, svbfloat16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s8_x2))) -svint8x2_t svsel(svcount_t, svint8x2_t, svint8x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f64_x2))) -svfloat64x2_t svsel(svcount_t, svfloat64x2_t, svfloat64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f32_x2))) -svfloat32x2_t svsel(svcount_t, svfloat32x2_t, svfloat32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f16_x2))) -svfloat16x2_t svsel(svcount_t, svfloat16x2_t, svfloat16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s32_x2))) -svint32x2_t svsel(svcount_t, svint32x2_t, svint32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s64_x2))) -svint64x2_t svsel(svcount_t, svint64x2_t, svint64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s16_x2))) -svint16x2_t svsel(svcount_t, svint16x2_t, svint16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u8_x4))) -svuint8x4_t svsel(svcount_t, svuint8x4_t, svuint8x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u32_x4))) -svuint32x4_t svsel(svcount_t, svuint32x4_t, svuint32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u64_x4))) -svuint64x4_t svsel(svcount_t, svuint64x4_t, svuint64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u16_x4))) -svuint16x4_t svsel(svcount_t, svuint16x4_t, svuint16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_bf16_x4))) -svbfloat16x4_t svsel(svcount_t, svbfloat16x4_t, svbfloat16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s8_x4))) -svint8x4_t svsel(svcount_t, svint8x4_t, svint8x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f64_x4))) -svfloat64x4_t svsel(svcount_t, svfloat64x4_t, svfloat64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f32_x4))) -svfloat32x4_t svsel(svcount_t, svfloat32x4_t, svfloat32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f16_x4))) -svfloat16x4_t svsel(svcount_t, svfloat16x4_t, svfloat16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s32_x4))) -svint32x4_t svsel(svcount_t, svint32x4_t, svint32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s64_x4))) -svint64x4_t svsel(svcount_t, svint64x4_t, svint64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s16_x4))) -svint16x4_t svsel(svcount_t, svint16x4_t, svint16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_s32_s16_x2))) -svint32x2_t svunpk_s32(svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_s64_s32_x2))) -svint64x2_t svunpk_s64(svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_s16_s8_x2))) -svint16x2_t svunpk_s16(svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_u32_u16_x2))) -svuint32x2_t svunpk_u32(svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_u64_u32_x2))) -svuint64x2_t svunpk_u64(svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_u16_u8_x2))) -svuint16x2_t svunpk_u16(svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_s32_s16_x4))) -svint32x4_t svunpk_s32(svint16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_s64_s32_x4))) -svint64x4_t svunpk_s64(svint32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_s16_s8_x4))) -svint16x4_t svunpk_s16(svint8x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_u32_u16_x4))) -svuint32x4_t svunpk_u32(svuint16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_u64_u32_x4))) -svuint64x4_t svunpk_u64(svuint32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_u16_u8_x4))) -svuint16x4_t svunpk_u16(svuint8x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_u8_x2))) -svuint8x2_t svuzp(svuint8x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_u32_x2))) -svuint32x2_t svuzp(svuint32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_u64_x2))) -svuint64x2_t svuzp(svuint64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_u16_x2))) -svuint16x2_t svuzp(svuint16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_bf16_x2))) -svbfloat16x2_t svuzp(svbfloat16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_s8_x2))) -svint8x2_t svuzp(svint8x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_f64_x2))) -svfloat64x2_t svuzp(svfloat64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_f32_x2))) -svfloat32x2_t svuzp(svfloat32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_f16_x2))) -svfloat16x2_t svuzp(svfloat16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_s32_x2))) -svint32x2_t svuzp(svint32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_s64_x2))) -svint64x2_t svuzp(svint64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_s16_x2))) -svint16x2_t svuzp(svint16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_u8_x4))) -svuint8x4_t svuzp(svuint8x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_u32_x4))) -svuint32x4_t svuzp(svuint32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_u64_x4))) -svuint64x4_t svuzp(svuint64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_u16_x4))) -svuint16x4_t svuzp(svuint16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_bf16_x4))) -svbfloat16x4_t svuzp(svbfloat16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_s8_x4))) -svint8x4_t svuzp(svint8x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_f64_x4))) -svfloat64x4_t svuzp(svfloat64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_f32_x4))) -svfloat32x4_t svuzp(svfloat32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_f16_x4))) -svfloat16x4_t svuzp(svfloat16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_s32_x4))) -svint32x4_t svuzp(svint32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_s64_x4))) -svint64x4_t svuzp(svint64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_s16_x4))) -svint16x4_t svuzp(svint16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_u8_x2))) -svuint8x2_t svuzpq(svuint8x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_u32_x2))) -svuint32x2_t svuzpq(svuint32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_u64_x2))) -svuint64x2_t svuzpq(svuint64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_u16_x2))) -svuint16x2_t svuzpq(svuint16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_bf16_x2))) -svbfloat16x2_t svuzpq(svbfloat16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_s8_x2))) -svint8x2_t svuzpq(svint8x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_f64_x2))) -svfloat64x2_t svuzpq(svfloat64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_f32_x2))) -svfloat32x2_t svuzpq(svfloat32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_f16_x2))) -svfloat16x2_t svuzpq(svfloat16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_s32_x2))) -svint32x2_t svuzpq(svint32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_s64_x2))) -svint64x2_t svuzpq(svint64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_s16_x2))) -svint16x2_t svuzpq(svint16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_u8_x4))) -svuint8x4_t svuzpq(svuint8x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_u32_x4))) -svuint32x4_t svuzpq(svuint32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_u64_x4))) -svuint64x4_t svuzpq(svuint64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_u16_x4))) -svuint16x4_t svuzpq(svuint16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_bf16_x4))) -svbfloat16x4_t svuzpq(svbfloat16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_s8_x4))) -svint8x4_t svuzpq(svint8x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_f64_x4))) -svfloat64x4_t svuzpq(svfloat64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_f32_x4))) -svfloat32x4_t svuzpq(svfloat32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_f16_x4))) -svfloat16x4_t svuzpq(svfloat16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_s32_x4))) -svint32x4_t svuzpq(svint32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_s64_x4))) -svint64x4_t svuzpq(svint64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_s16_x4))) -svint16x4_t svuzpq(svint16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_u8_x2))) -svuint8x2_t svzip(svuint8x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_u32_x2))) -svuint32x2_t svzip(svuint32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_u64_x2))) -svuint64x2_t svzip(svuint64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_u16_x2))) -svuint16x2_t svzip(svuint16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_bf16_x2))) -svbfloat16x2_t svzip(svbfloat16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_s8_x2))) -svint8x2_t svzip(svint8x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_f64_x2))) -svfloat64x2_t svzip(svfloat64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_f32_x2))) -svfloat32x2_t svzip(svfloat32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_f16_x2))) -svfloat16x2_t svzip(svfloat16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_s32_x2))) -svint32x2_t svzip(svint32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_s64_x2))) -svint64x2_t svzip(svint64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_s16_x2))) -svint16x2_t svzip(svint16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_u8_x4))) -svuint8x4_t svzip(svuint8x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_u32_x4))) -svuint32x4_t svzip(svuint32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_u64_x4))) -svuint64x4_t svzip(svuint64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_u16_x4))) -svuint16x4_t svzip(svuint16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_bf16_x4))) -svbfloat16x4_t svzip(svbfloat16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_s8_x4))) -svint8x4_t svzip(svint8x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_f64_x4))) -svfloat64x4_t svzip(svfloat64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_f32_x4))) -svfloat32x4_t svzip(svfloat32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_f16_x4))) -svfloat16x4_t svzip(svfloat16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_s32_x4))) -svint32x4_t svzip(svint32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_s64_x4))) -svint64x4_t svzip(svint64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_s16_x4))) -svint16x4_t svzip(svint16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_u8_x2))) -svuint8x2_t svzipq(svuint8x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_u32_x2))) -svuint32x2_t svzipq(svuint32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_u64_x2))) -svuint64x2_t svzipq(svuint64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_u16_x2))) -svuint16x2_t svzipq(svuint16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_bf16_x2))) -svbfloat16x2_t svzipq(svbfloat16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s8_x2))) -svint8x2_t svzipq(svint8x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_f64_x2))) -svfloat64x2_t svzipq(svfloat64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_f32_x2))) -svfloat32x2_t svzipq(svfloat32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_f16_x2))) -svfloat16x2_t svzipq(svfloat16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s32_x2))) -svint32x2_t svzipq(svint32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s64_x2))) -svint64x2_t svzipq(svint64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s16_x2))) -svint16x2_t svzipq(svint16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_u8_x4))) -svuint8x4_t svzipq(svuint8x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_u32_x4))) -svuint32x4_t svzipq(svuint32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_u64_x4))) -svuint64x4_t svzipq(svuint64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_u16_x4))) -svuint16x4_t svzipq(svuint16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_bf16_x4))) -svbfloat16x4_t svzipq(svbfloat16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s8_x4))) -svint8x4_t svzipq(svint8x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_f64_x4))) -svfloat64x4_t svzipq(svfloat64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_f32_x4))) -svfloat32x4_t svzipq(svfloat32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_f16_x4))) -svfloat16x4_t svzipq(svfloat16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s32_x4))) -svint32x4_t svzipq(svint32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s64_x4))) -svint64x4_t svzipq(svint64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s16_x4))) -svint16x4_t svzipq(svint16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_s16_s32_x2))) -svint16_t svqcvtn_s16_s32_x2(svint32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_u16_s32_x2))) -svuint16_t svqcvtn_u16_s32_x2(svint32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_u16_u32_x2))) -svuint16_t svqcvtn_u16_u32_x2(svuint32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_s16_s32_x2))) -svint16_t svqcvtn_s16(svint32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_u16_s32_x2))) -svuint16_t svqcvtn_u16(svint32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_u16_u32_x2))) -svuint16_t svqcvtn_u16(svuint32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f64_m))) -svfloat64_t svabd_n_f64_m(svbool_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f32_m))) -svfloat32_t svabd_n_f32_m(svbool_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f16_m))) -svfloat16_t svabd_n_f16_m(svbool_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f64_x))) -svfloat64_t svabd_n_f64_x(svbool_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f32_x))) -svfloat32_t svabd_n_f32_x(svbool_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f16_x))) -svfloat16_t svabd_n_f16_x(svbool_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f64_z))) -svfloat64_t svabd_n_f64_z(svbool_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f32_z))) -svfloat32_t svabd_n_f32_z(svbool_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f16_z))) -svfloat16_t svabd_n_f16_z(svbool_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s8_m))) -svint8_t svabd_n_s8_m(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s32_m))) -svint32_t svabd_n_s32_m(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s64_m))) -svint64_t svabd_n_s64_m(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s16_m))) -svint16_t svabd_n_s16_m(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s8_x))) -svint8_t svabd_n_s8_x(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s32_x))) -svint32_t svabd_n_s32_x(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s64_x))) -svint64_t svabd_n_s64_x(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s16_x))) -svint16_t svabd_n_s16_x(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s8_z))) -svint8_t svabd_n_s8_z(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s32_z))) -svint32_t svabd_n_s32_z(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s64_z))) -svint64_t svabd_n_s64_z(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s16_z))) -svint16_t svabd_n_s16_z(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u8_m))) -svuint8_t svabd_n_u8_m(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u32_m))) -svuint32_t svabd_n_u32_m(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u64_m))) -svuint64_t svabd_n_u64_m(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u16_m))) -svuint16_t svabd_n_u16_m(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u8_x))) -svuint8_t svabd_n_u8_x(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u32_x))) -svuint32_t svabd_n_u32_x(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u64_x))) -svuint64_t svabd_n_u64_x(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u16_x))) -svuint16_t svabd_n_u16_x(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u8_z))) -svuint8_t svabd_n_u8_z(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u32_z))) -svuint32_t svabd_n_u32_z(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u64_z))) -svuint64_t svabd_n_u64_z(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u16_z))) -svuint16_t svabd_n_u16_z(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f64_m))) -svfloat64_t svabd_f64_m(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f32_m))) -svfloat32_t svabd_f32_m(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f16_m))) -svfloat16_t svabd_f16_m(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f64_x))) -svfloat64_t svabd_f64_x(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f32_x))) -svfloat32_t svabd_f32_x(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f16_x))) -svfloat16_t svabd_f16_x(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f64_z))) -svfloat64_t svabd_f64_z(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f32_z))) -svfloat32_t svabd_f32_z(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f16_z))) -svfloat16_t svabd_f16_z(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s8_m))) -svint8_t svabd_s8_m(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s32_m))) -svint32_t svabd_s32_m(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s64_m))) -svint64_t svabd_s64_m(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s16_m))) -svint16_t svabd_s16_m(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s8_x))) -svint8_t svabd_s8_x(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s32_x))) -svint32_t svabd_s32_x(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s64_x))) -svint64_t svabd_s64_x(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s16_x))) -svint16_t svabd_s16_x(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s8_z))) -svint8_t svabd_s8_z(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s32_z))) -svint32_t svabd_s32_z(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s64_z))) -svint64_t svabd_s64_z(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s16_z))) -svint16_t svabd_s16_z(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u8_m))) -svuint8_t svabd_u8_m(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u32_m))) -svuint32_t svabd_u32_m(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u64_m))) -svuint64_t svabd_u64_m(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u16_m))) -svuint16_t svabd_u16_m(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u8_x))) -svuint8_t svabd_u8_x(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u32_x))) -svuint32_t svabd_u32_x(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u64_x))) -svuint64_t svabd_u64_x(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u16_x))) -svuint16_t svabd_u16_x(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u8_z))) -svuint8_t svabd_u8_z(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u32_z))) -svuint32_t svabd_u32_z(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u64_z))) -svuint64_t svabd_u64_z(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u16_z))) -svuint16_t svabd_u16_z(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f64_m))) -svfloat64_t svabs_f64_m(svfloat64_t, svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f32_m))) -svfloat32_t svabs_f32_m(svfloat32_t, svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f16_m))) -svfloat16_t svabs_f16_m(svfloat16_t, svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f64_x))) -svfloat64_t svabs_f64_x(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f32_x))) -svfloat32_t svabs_f32_x(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f16_x))) -svfloat16_t svabs_f16_x(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f64_z))) -svfloat64_t svabs_f64_z(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f32_z))) -svfloat32_t svabs_f32_z(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f16_z))) -svfloat16_t svabs_f16_z(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s8_m))) -svint8_t svabs_s8_m(svint8_t, svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s32_m))) -svint32_t svabs_s32_m(svint32_t, svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s64_m))) -svint64_t svabs_s64_m(svint64_t, svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s16_m))) -svint16_t svabs_s16_m(svint16_t, svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s8_x))) -svint8_t svabs_s8_x(svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s32_x))) -svint32_t svabs_s32_x(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s64_x))) -svint64_t svabs_s64_x(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s16_x))) -svint16_t svabs_s16_x(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s8_z))) -svint8_t svabs_s8_z(svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s32_z))) -svint32_t svabs_s32_z(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s64_z))) -svint64_t svabs_s64_z(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s16_z))) -svint16_t svabs_s16_z(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_n_f64))) -svbool_t svacge_n_f64(svbool_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_n_f32))) -svbool_t svacge_n_f32(svbool_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_n_f16))) -svbool_t svacge_n_f16(svbool_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_f64))) -svbool_t svacge_f64(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_f32))) -svbool_t svacge_f32(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_f16))) -svbool_t svacge_f16(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_n_f64))) -svbool_t svacgt_n_f64(svbool_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_n_f32))) -svbool_t svacgt_n_f32(svbool_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_n_f16))) -svbool_t svacgt_n_f16(svbool_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_f64))) -svbool_t svacgt_f64(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_f32))) -svbool_t svacgt_f32(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_f16))) -svbool_t svacgt_f16(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_n_f64))) -svbool_t svacle_n_f64(svbool_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_n_f32))) -svbool_t svacle_n_f32(svbool_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_n_f16))) -svbool_t svacle_n_f16(svbool_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_f64))) -svbool_t svacle_f64(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_f32))) -svbool_t svacle_f32(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_f16))) -svbool_t svacle_f16(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_n_f64))) -svbool_t svaclt_n_f64(svbool_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_n_f32))) -svbool_t svaclt_n_f32(svbool_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_n_f16))) -svbool_t svaclt_n_f16(svbool_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_f64))) -svbool_t svaclt_f64(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_f32))) -svbool_t svaclt_f32(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_f16))) -svbool_t svaclt_f16(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f64_m))) -svfloat64_t svadd_n_f64_m(svbool_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f32_m))) -svfloat32_t svadd_n_f32_m(svbool_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f16_m))) -svfloat16_t svadd_n_f16_m(svbool_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f64_x))) -svfloat64_t svadd_n_f64_x(svbool_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f32_x))) -svfloat32_t svadd_n_f32_x(svbool_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f16_x))) -svfloat16_t svadd_n_f16_x(svbool_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f64_z))) -svfloat64_t svadd_n_f64_z(svbool_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f32_z))) -svfloat32_t svadd_n_f32_z(svbool_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f16_z))) -svfloat16_t svadd_n_f16_z(svbool_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u8_m))) -svuint8_t svadd_n_u8_m(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u32_m))) -svuint32_t svadd_n_u32_m(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u64_m))) -svuint64_t svadd_n_u64_m(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u16_m))) -svuint16_t svadd_n_u16_m(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s8_m))) -svint8_t svadd_n_s8_m(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s32_m))) -svint32_t svadd_n_s32_m(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s64_m))) -svint64_t svadd_n_s64_m(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s16_m))) -svint16_t svadd_n_s16_m(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u8_x))) -svuint8_t svadd_n_u8_x(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u32_x))) -svuint32_t svadd_n_u32_x(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u64_x))) -svuint64_t svadd_n_u64_x(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u16_x))) -svuint16_t svadd_n_u16_x(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s8_x))) -svint8_t svadd_n_s8_x(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s32_x))) -svint32_t svadd_n_s32_x(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s64_x))) -svint64_t svadd_n_s64_x(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s16_x))) -svint16_t svadd_n_s16_x(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u8_z))) -svuint8_t svadd_n_u8_z(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u32_z))) -svuint32_t svadd_n_u32_z(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u64_z))) -svuint64_t svadd_n_u64_z(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u16_z))) -svuint16_t svadd_n_u16_z(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s8_z))) -svint8_t svadd_n_s8_z(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s32_z))) -svint32_t svadd_n_s32_z(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s64_z))) -svint64_t svadd_n_s64_z(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s16_z))) -svint16_t svadd_n_s16_z(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f64_m))) -svfloat64_t svadd_f64_m(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f32_m))) -svfloat32_t svadd_f32_m(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f16_m))) -svfloat16_t svadd_f16_m(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f64_x))) -svfloat64_t svadd_f64_x(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f32_x))) -svfloat32_t svadd_f32_x(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f16_x))) -svfloat16_t svadd_f16_x(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f64_z))) -svfloat64_t svadd_f64_z(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f32_z))) -svfloat32_t svadd_f32_z(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f16_z))) -svfloat16_t svadd_f16_z(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u8_m))) -svuint8_t svadd_u8_m(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u32_m))) -svuint32_t svadd_u32_m(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u64_m))) -svuint64_t svadd_u64_m(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u16_m))) -svuint16_t svadd_u16_m(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s8_m))) -svint8_t svadd_s8_m(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s32_m))) -svint32_t svadd_s32_m(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s64_m))) -svint64_t svadd_s64_m(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s16_m))) -svint16_t svadd_s16_m(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u8_x))) -svuint8_t svadd_u8_x(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u32_x))) -svuint32_t svadd_u32_x(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u64_x))) -svuint64_t svadd_u64_x(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u16_x))) -svuint16_t svadd_u16_x(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s8_x))) -svint8_t svadd_s8_x(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s32_x))) -svint32_t svadd_s32_x(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s64_x))) -svint64_t svadd_s64_x(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s16_x))) -svint16_t svadd_s16_x(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u8_z))) -svuint8_t svadd_u8_z(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u32_z))) -svuint32_t svadd_u32_z(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u64_z))) -svuint64_t svadd_u64_z(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u16_z))) -svuint16_t svadd_u16_z(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s8_z))) -svint8_t svadd_s8_z(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s32_z))) -svint32_t svadd_s32_z(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s64_z))) -svint64_t svadd_s64_z(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s16_z))) -svint16_t svadd_s16_z(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadda_f64))) -float64_t svadda_f64(svbool_t, float64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadda_f32))) -float32_t svadda_f32(svbool_t, float32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadda_f16))) -float16_t svadda_f16(svbool_t, float16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s8))) -int64_t svaddv_s8(svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s32))) -int64_t svaddv_s32(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s64))) -int64_t svaddv_s64(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s16))) -int64_t svaddv_s16(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u8))) -uint64_t svaddv_u8(svbool_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u32))) -uint64_t svaddv_u32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u64))) -uint64_t svaddv_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u16))) -uint64_t svaddv_u16(svbool_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_f64))) -float64_t svaddv_f64(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_f32))) -float32_t svaddv_f32(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_f16))) -float16_t svaddv_f16(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u32base_u32offset))) -svuint32_t svadrb_u32base_u32offset(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u64base_u64offset))) -svuint64_t svadrb_u64base_u64offset(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u32base_s32offset))) -svuint32_t svadrb_u32base_s32offset(svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u64base_s64offset))) -svuint64_t svadrb_u64base_s64offset(svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u32base_u32index))) -svuint32_t svadrd_u32base_u32index(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u64base_u64index))) -svuint64_t svadrd_u64base_u64index(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u32base_s32index))) -svuint32_t svadrd_u32base_s32index(svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u64base_s64index))) -svuint64_t svadrd_u64base_s64index(svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u32base_u32index))) -svuint32_t svadrh_u32base_u32index(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u64base_u64index))) -svuint64_t svadrh_u64base_u64index(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u32base_s32index))) -svuint32_t svadrh_u32base_s32index(svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u64base_s64index))) -svuint64_t svadrh_u64base_s64index(svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u32base_u32index))) -svuint32_t svadrw_u32base_u32index(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u64base_u64index))) -svuint64_t svadrw_u64base_u64index(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u32base_s32index))) -svuint32_t svadrw_u32base_s32index(svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u64base_s64index))) -svuint64_t svadrw_u64base_s64index(svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_b_z))) -svbool_t svand_b_z(svbool_t, svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u8_m))) -svuint8_t svand_n_u8_m(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u32_m))) -svuint32_t svand_n_u32_m(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u64_m))) -svuint64_t svand_n_u64_m(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u16_m))) -svuint16_t svand_n_u16_m(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s8_m))) -svint8_t svand_n_s8_m(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s32_m))) -svint32_t svand_n_s32_m(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s64_m))) -svint64_t svand_n_s64_m(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s16_m))) -svint16_t svand_n_s16_m(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u8_x))) -svuint8_t svand_n_u8_x(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u32_x))) -svuint32_t svand_n_u32_x(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u64_x))) -svuint64_t svand_n_u64_x(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u16_x))) -svuint16_t svand_n_u16_x(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s8_x))) -svint8_t svand_n_s8_x(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s32_x))) -svint32_t svand_n_s32_x(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s64_x))) -svint64_t svand_n_s64_x(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s16_x))) -svint16_t svand_n_s16_x(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u8_z))) -svuint8_t svand_n_u8_z(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u32_z))) -svuint32_t svand_n_u32_z(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u64_z))) -svuint64_t svand_n_u64_z(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u16_z))) -svuint16_t svand_n_u16_z(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s8_z))) -svint8_t svand_n_s8_z(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s32_z))) -svint32_t svand_n_s32_z(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s64_z))) -svint64_t svand_n_s64_z(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s16_z))) -svint16_t svand_n_s16_z(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u8_m))) -svuint8_t svand_u8_m(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u32_m))) -svuint32_t svand_u32_m(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u64_m))) -svuint64_t svand_u64_m(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u16_m))) -svuint16_t svand_u16_m(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s8_m))) -svint8_t svand_s8_m(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s32_m))) -svint32_t svand_s32_m(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s64_m))) -svint64_t svand_s64_m(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s16_m))) -svint16_t svand_s16_m(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u8_x))) -svuint8_t svand_u8_x(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u32_x))) -svuint32_t svand_u32_x(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u64_x))) -svuint64_t svand_u64_x(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u16_x))) -svuint16_t svand_u16_x(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s8_x))) -svint8_t svand_s8_x(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s32_x))) -svint32_t svand_s32_x(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s64_x))) -svint64_t svand_s64_x(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s16_x))) -svint16_t svand_s16_x(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u8_z))) -svuint8_t svand_u8_z(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u32_z))) -svuint32_t svand_u32_z(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u64_z))) -svuint64_t svand_u64_z(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u16_z))) -svuint16_t svand_u16_z(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s8_z))) -svint8_t svand_s8_z(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s32_z))) -svint32_t svand_s32_z(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s64_z))) -svint64_t svand_s64_z(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s16_z))) -svint16_t svand_s16_z(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u8))) -uint8_t svandv_u8(svbool_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u32))) -uint32_t svandv_u32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u64))) -uint64_t svandv_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u16))) -uint16_t svandv_u16(svbool_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s8))) -int8_t svandv_s8(svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s32))) -int32_t svandv_s32(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s64))) -int64_t svandv_s64(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s16))) -int16_t svandv_s16(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s8_m))) -svint8_t svasr_n_s8_m(svbool_t, svint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s32_m))) -svint32_t svasr_n_s32_m(svbool_t, svint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s64_m))) -svint64_t svasr_n_s64_m(svbool_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s16_m))) -svint16_t svasr_n_s16_m(svbool_t, svint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s8_x))) -svint8_t svasr_n_s8_x(svbool_t, svint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s32_x))) -svint32_t svasr_n_s32_x(svbool_t, svint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s64_x))) -svint64_t svasr_n_s64_x(svbool_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s16_x))) -svint16_t svasr_n_s16_x(svbool_t, svint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s8_z))) -svint8_t svasr_n_s8_z(svbool_t, svint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s32_z))) -svint32_t svasr_n_s32_z(svbool_t, svint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s64_z))) -svint64_t svasr_n_s64_z(svbool_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s16_z))) -svint16_t svasr_n_s16_z(svbool_t, svint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s8_m))) -svint8_t svasr_s8_m(svbool_t, svint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s32_m))) -svint32_t svasr_s32_m(svbool_t, svint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s64_m))) -svint64_t svasr_s64_m(svbool_t, svint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s16_m))) -svint16_t svasr_s16_m(svbool_t, svint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s8_x))) -svint8_t svasr_s8_x(svbool_t, svint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s32_x))) -svint32_t svasr_s32_x(svbool_t, svint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s64_x))) -svint64_t svasr_s64_x(svbool_t, svint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s16_x))) -svint16_t svasr_s16_x(svbool_t, svint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s8_z))) -svint8_t svasr_s8_z(svbool_t, svint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s32_z))) -svint32_t svasr_s32_z(svbool_t, svint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s64_z))) -svint64_t svasr_s64_z(svbool_t, svint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s16_z))) -svint16_t svasr_s16_z(svbool_t, svint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s8_m))) -svint8_t svasr_wide_n_s8_m(svbool_t, svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s32_m))) -svint32_t svasr_wide_n_s32_m(svbool_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s16_m))) -svint16_t svasr_wide_n_s16_m(svbool_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s8_x))) -svint8_t svasr_wide_n_s8_x(svbool_t, svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s32_x))) -svint32_t svasr_wide_n_s32_x(svbool_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s16_x))) -svint16_t svasr_wide_n_s16_x(svbool_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s8_z))) -svint8_t svasr_wide_n_s8_z(svbool_t, svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s32_z))) -svint32_t svasr_wide_n_s32_z(svbool_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s16_z))) -svint16_t svasr_wide_n_s16_z(svbool_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s8_m))) -svint8_t svasr_wide_s8_m(svbool_t, svint8_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s32_m))) -svint32_t svasr_wide_s32_m(svbool_t, svint32_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s16_m))) -svint16_t svasr_wide_s16_m(svbool_t, svint16_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s8_x))) -svint8_t svasr_wide_s8_x(svbool_t, svint8_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s32_x))) -svint32_t svasr_wide_s32_x(svbool_t, svint32_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s16_x))) -svint16_t svasr_wide_s16_x(svbool_t, svint16_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s8_z))) -svint8_t svasr_wide_s8_z(svbool_t, svint8_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s32_z))) -svint32_t svasr_wide_s32_z(svbool_t, svint32_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s16_z))) -svint16_t svasr_wide_s16_z(svbool_t, svint16_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s8_m))) -svint8_t svasrd_n_s8_m(svbool_t, svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s32_m))) -svint32_t svasrd_n_s32_m(svbool_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s64_m))) -svint64_t svasrd_n_s64_m(svbool_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s16_m))) -svint16_t svasrd_n_s16_m(svbool_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s8_x))) -svint8_t svasrd_n_s8_x(svbool_t, svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s32_x))) -svint32_t svasrd_n_s32_x(svbool_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s64_x))) -svint64_t svasrd_n_s64_x(svbool_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s16_x))) -svint16_t svasrd_n_s16_x(svbool_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s8_z))) -svint8_t svasrd_n_s8_z(svbool_t, svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s32_z))) -svint32_t svasrd_n_s32_z(svbool_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s64_z))) -svint64_t svasrd_n_s64_z(svbool_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s16_z))) -svint16_t svasrd_n_s16_z(svbool_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_b_z))) -svbool_t svbic_b_z(svbool_t, svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u8_m))) -svuint8_t svbic_n_u8_m(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u32_m))) -svuint32_t svbic_n_u32_m(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u64_m))) -svuint64_t svbic_n_u64_m(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u16_m))) -svuint16_t svbic_n_u16_m(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s8_m))) -svint8_t svbic_n_s8_m(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s32_m))) -svint32_t svbic_n_s32_m(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s64_m))) -svint64_t svbic_n_s64_m(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s16_m))) -svint16_t svbic_n_s16_m(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u8_x))) -svuint8_t svbic_n_u8_x(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u32_x))) -svuint32_t svbic_n_u32_x(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u64_x))) -svuint64_t svbic_n_u64_x(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u16_x))) -svuint16_t svbic_n_u16_x(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s8_x))) -svint8_t svbic_n_s8_x(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s32_x))) -svint32_t svbic_n_s32_x(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s64_x))) -svint64_t svbic_n_s64_x(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s16_x))) -svint16_t svbic_n_s16_x(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u8_z))) -svuint8_t svbic_n_u8_z(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u32_z))) -svuint32_t svbic_n_u32_z(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u64_z))) -svuint64_t svbic_n_u64_z(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u16_z))) -svuint16_t svbic_n_u16_z(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s8_z))) -svint8_t svbic_n_s8_z(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s32_z))) -svint32_t svbic_n_s32_z(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s64_z))) -svint64_t svbic_n_s64_z(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s16_z))) -svint16_t svbic_n_s16_z(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u8_m))) -svuint8_t svbic_u8_m(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u32_m))) -svuint32_t svbic_u32_m(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u64_m))) -svuint64_t svbic_u64_m(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u16_m))) -svuint16_t svbic_u16_m(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s8_m))) -svint8_t svbic_s8_m(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s32_m))) -svint32_t svbic_s32_m(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s64_m))) -svint64_t svbic_s64_m(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s16_m))) -svint16_t svbic_s16_m(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u8_x))) -svuint8_t svbic_u8_x(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u32_x))) -svuint32_t svbic_u32_x(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u64_x))) -svuint64_t svbic_u64_x(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u16_x))) -svuint16_t svbic_u16_x(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s8_x))) -svint8_t svbic_s8_x(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s32_x))) -svint32_t svbic_s32_x(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s64_x))) -svint64_t svbic_s64_x(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s16_x))) -svint16_t svbic_s16_x(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u8_z))) -svuint8_t svbic_u8_z(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u32_z))) -svuint32_t svbic_u32_z(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u64_z))) -svuint64_t svbic_u64_z(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u16_z))) -svuint16_t svbic_u16_z(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s8_z))) -svint8_t svbic_s8_z(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s32_z))) -svint32_t svbic_s32_z(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s64_z))) -svint64_t svbic_s64_z(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s16_z))) -svint16_t svbic_s16_z(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrka_b_m))) -svbool_t svbrka_b_m(svbool_t, svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrka_b_z))) -svbool_t svbrka_b_z(svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkb_b_m))) -svbool_t svbrkb_b_m(svbool_t, svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkb_b_z))) -svbool_t svbrkb_b_z(svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkn_b_z))) -svbool_t svbrkn_b_z(svbool_t, svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkpa_b_z))) -svbool_t svbrkpa_b_z(svbool_t, svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkpb_b_z))) -svbool_t svbrkpb_b_z(svbool_t, svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f64_m))) -svfloat64_t svcadd_f64_m(svbool_t, svfloat64_t, svfloat64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f32_m))) -svfloat32_t svcadd_f32_m(svbool_t, svfloat32_t, svfloat32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f16_m))) -svfloat16_t svcadd_f16_m(svbool_t, svfloat16_t, svfloat16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f64_x))) -svfloat64_t svcadd_f64_x(svbool_t, svfloat64_t, svfloat64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f32_x))) -svfloat32_t svcadd_f32_x(svbool_t, svfloat32_t, svfloat32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f16_x))) -svfloat16_t svcadd_f16_x(svbool_t, svfloat16_t, svfloat16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f64_z))) -svfloat64_t svcadd_f64_z(svbool_t, svfloat64_t, svfloat64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f32_z))) -svfloat32_t svcadd_f32_z(svbool_t, svfloat32_t, svfloat32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f16_z))) -svfloat16_t svcadd_f16_z(svbool_t, svfloat16_t, svfloat16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u8))) -uint8_t svclasta_n_u8(svbool_t, uint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u32))) -uint32_t svclasta_n_u32(svbool_t, uint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u64))) -uint64_t svclasta_n_u64(svbool_t, uint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u16))) -uint16_t svclasta_n_u16(svbool_t, uint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s8))) -int8_t svclasta_n_s8(svbool_t, int8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_f64))) -float64_t svclasta_n_f64(svbool_t, float64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_f32))) -float32_t svclasta_n_f32(svbool_t, float32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_f16))) -float16_t svclasta_n_f16(svbool_t, float16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s32))) -int32_t svclasta_n_s32(svbool_t, int32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s64))) -int64_t svclasta_n_s64(svbool_t, int64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s16))) -int16_t svclasta_n_s16(svbool_t, int16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u8))) -svuint8_t svclasta_u8(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u32))) -svuint32_t svclasta_u32(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u64))) -svuint64_t svclasta_u64(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u16))) -svuint16_t svclasta_u16(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s8))) -svint8_t svclasta_s8(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_f64))) -svfloat64_t svclasta_f64(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_f32))) -svfloat32_t svclasta_f32(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_f16))) -svfloat16_t svclasta_f16(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s32))) -svint32_t svclasta_s32(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s64))) -svint64_t svclasta_s64(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s16))) -svint16_t svclasta_s16(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u8))) -uint8_t svclastb_n_u8(svbool_t, uint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u32))) -uint32_t svclastb_n_u32(svbool_t, uint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u64))) -uint64_t svclastb_n_u64(svbool_t, uint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u16))) -uint16_t svclastb_n_u16(svbool_t, uint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s8))) -int8_t svclastb_n_s8(svbool_t, int8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_f64))) -float64_t svclastb_n_f64(svbool_t, float64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_f32))) -float32_t svclastb_n_f32(svbool_t, float32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_f16))) -float16_t svclastb_n_f16(svbool_t, float16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s32))) -int32_t svclastb_n_s32(svbool_t, int32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s64))) -int64_t svclastb_n_s64(svbool_t, int64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s16))) -int16_t svclastb_n_s16(svbool_t, int16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u8))) -svuint8_t svclastb_u8(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u32))) -svuint32_t svclastb_u32(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u64))) -svuint64_t svclastb_u64(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u16))) -svuint16_t svclastb_u16(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s8))) -svint8_t svclastb_s8(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_f64))) -svfloat64_t svclastb_f64(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_f32))) -svfloat32_t svclastb_f32(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_f16))) -svfloat16_t svclastb_f16(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s32))) -svint32_t svclastb_s32(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s64))) -svint64_t svclastb_s64(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s16))) -svint16_t svclastb_s16(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s8_m))) -svuint8_t svcls_s8_m(svuint8_t, svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s32_m))) -svuint32_t svcls_s32_m(svuint32_t, svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s64_m))) -svuint64_t svcls_s64_m(svuint64_t, svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s16_m))) -svuint16_t svcls_s16_m(svuint16_t, svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s8_x))) -svuint8_t svcls_s8_x(svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s32_x))) -svuint32_t svcls_s32_x(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s64_x))) -svuint64_t svcls_s64_x(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s16_x))) -svuint16_t svcls_s16_x(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s8_z))) -svuint8_t svcls_s8_z(svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s32_z))) -svuint32_t svcls_s32_z(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s64_z))) -svuint64_t svcls_s64_z(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s16_z))) -svuint16_t svcls_s16_z(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u8_m))) -svuint8_t svclz_u8_m(svuint8_t, svbool_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u32_m))) -svuint32_t svclz_u32_m(svuint32_t, svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u64_m))) -svuint64_t svclz_u64_m(svuint64_t, svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u16_m))) -svuint16_t svclz_u16_m(svuint16_t, svbool_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s8_m))) -svuint8_t svclz_s8_m(svuint8_t, svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s32_m))) -svuint32_t svclz_s32_m(svuint32_t, svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s64_m))) -svuint64_t svclz_s64_m(svuint64_t, svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s16_m))) -svuint16_t svclz_s16_m(svuint16_t, svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u8_x))) -svuint8_t svclz_u8_x(svbool_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u32_x))) -svuint32_t svclz_u32_x(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u64_x))) -svuint64_t svclz_u64_x(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u16_x))) -svuint16_t svclz_u16_x(svbool_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s8_x))) -svuint8_t svclz_s8_x(svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s32_x))) -svuint32_t svclz_s32_x(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s64_x))) -svuint64_t svclz_s64_x(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s16_x))) -svuint16_t svclz_s16_x(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u8_z))) -svuint8_t svclz_u8_z(svbool_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u32_z))) -svuint32_t svclz_u32_z(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u64_z))) -svuint64_t svclz_u64_z(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u16_z))) -svuint16_t svclz_u16_z(svbool_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s8_z))) -svuint8_t svclz_s8_z(svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s32_z))) -svuint32_t svclz_s32_z(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s64_z))) -svuint64_t svclz_s64_z(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s16_z))) -svuint16_t svclz_s16_z(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f64_m))) -svfloat64_t svcmla_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f32_m))) -svfloat32_t svcmla_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f16_m))) -svfloat16_t svcmla_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f64_x))) -svfloat64_t svcmla_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f32_x))) -svfloat32_t svcmla_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f16_x))) -svfloat16_t svcmla_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f64_z))) -svfloat64_t svcmla_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f32_z))) -svfloat32_t svcmla_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f16_z))) -svfloat16_t svcmla_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_f32))) -svfloat32_t svcmla_lane_f32(svfloat32_t, svfloat32_t, svfloat32_t, uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_f16))) -svfloat16_t svcmla_lane_f16(svfloat16_t, svfloat16_t, svfloat16_t, uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_f64))) -svbool_t svcmpeq_n_f64(svbool_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_f32))) -svbool_t svcmpeq_n_f32(svbool_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_f16))) -svbool_t svcmpeq_n_f16(svbool_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u8))) -svbool_t svcmpeq_n_u8(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u32))) -svbool_t svcmpeq_n_u32(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u64))) -svbool_t svcmpeq_n_u64(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u16))) -svbool_t svcmpeq_n_u16(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s8))) -svbool_t svcmpeq_n_s8(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s32))) -svbool_t svcmpeq_n_s32(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s64))) -svbool_t svcmpeq_n_s64(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s16))) -svbool_t svcmpeq_n_s16(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u8))) -svbool_t svcmpeq_u8(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u32))) -svbool_t svcmpeq_u32(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u64))) -svbool_t svcmpeq_u64(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u16))) -svbool_t svcmpeq_u16(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s8))) -svbool_t svcmpeq_s8(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s32))) -svbool_t svcmpeq_s32(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s64))) -svbool_t svcmpeq_s64(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s16))) -svbool_t svcmpeq_s16(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_f64))) -svbool_t svcmpeq_f64(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_f32))) -svbool_t svcmpeq_f32(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_f16))) -svbool_t svcmpeq_f16(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_n_s8))) -svbool_t svcmpeq_wide_n_s8(svbool_t, svint8_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_n_s32))) -svbool_t svcmpeq_wide_n_s32(svbool_t, svint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_n_s16))) -svbool_t svcmpeq_wide_n_s16(svbool_t, svint16_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_s8))) -svbool_t svcmpeq_wide_s8(svbool_t, svint8_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_s32))) -svbool_t svcmpeq_wide_s32(svbool_t, svint32_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_s16))) -svbool_t svcmpeq_wide_s16(svbool_t, svint16_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_f64))) -svbool_t svcmpge_n_f64(svbool_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_f32))) -svbool_t svcmpge_n_f32(svbool_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_f16))) -svbool_t svcmpge_n_f16(svbool_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s8))) -svbool_t svcmpge_n_s8(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s32))) -svbool_t svcmpge_n_s32(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s64))) -svbool_t svcmpge_n_s64(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s16))) -svbool_t svcmpge_n_s16(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u8))) -svbool_t svcmpge_n_u8(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u32))) -svbool_t svcmpge_n_u32(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u64))) -svbool_t svcmpge_n_u64(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u16))) -svbool_t svcmpge_n_u16(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s8))) -svbool_t svcmpge_s8(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s32))) -svbool_t svcmpge_s32(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s64))) -svbool_t svcmpge_s64(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s16))) -svbool_t svcmpge_s16(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_f64))) -svbool_t svcmpge_f64(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_f32))) -svbool_t svcmpge_f32(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_f16))) -svbool_t svcmpge_f16(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u8))) -svbool_t svcmpge_u8(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u32))) -svbool_t svcmpge_u32(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u64))) -svbool_t svcmpge_u64(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u16))) -svbool_t svcmpge_u16(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_s8))) -svbool_t svcmpge_wide_n_s8(svbool_t, svint8_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_s32))) -svbool_t svcmpge_wide_n_s32(svbool_t, svint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_s16))) -svbool_t svcmpge_wide_n_s16(svbool_t, svint16_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_u8))) -svbool_t svcmpge_wide_n_u8(svbool_t, svuint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_u32))) -svbool_t svcmpge_wide_n_u32(svbool_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_u16))) -svbool_t svcmpge_wide_n_u16(svbool_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_s8))) -svbool_t svcmpge_wide_s8(svbool_t, svint8_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_s32))) -svbool_t svcmpge_wide_s32(svbool_t, svint32_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_s16))) -svbool_t svcmpge_wide_s16(svbool_t, svint16_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_u8))) -svbool_t svcmpge_wide_u8(svbool_t, svuint8_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_u32))) -svbool_t svcmpge_wide_u32(svbool_t, svuint32_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_u16))) -svbool_t svcmpge_wide_u16(svbool_t, svuint16_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_f64))) -svbool_t svcmpgt_n_f64(svbool_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_f32))) -svbool_t svcmpgt_n_f32(svbool_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_f16))) -svbool_t svcmpgt_n_f16(svbool_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s8))) -svbool_t svcmpgt_n_s8(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s32))) -svbool_t svcmpgt_n_s32(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s64))) -svbool_t svcmpgt_n_s64(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s16))) -svbool_t svcmpgt_n_s16(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u8))) -svbool_t svcmpgt_n_u8(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u32))) -svbool_t svcmpgt_n_u32(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u64))) -svbool_t svcmpgt_n_u64(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u16))) -svbool_t svcmpgt_n_u16(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s8))) -svbool_t svcmpgt_s8(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s32))) -svbool_t svcmpgt_s32(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s64))) -svbool_t svcmpgt_s64(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s16))) -svbool_t svcmpgt_s16(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_f64))) -svbool_t svcmpgt_f64(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_f32))) -svbool_t svcmpgt_f32(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_f16))) -svbool_t svcmpgt_f16(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u8))) -svbool_t svcmpgt_u8(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u32))) -svbool_t svcmpgt_u32(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u64))) -svbool_t svcmpgt_u64(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u16))) -svbool_t svcmpgt_u16(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_s8))) -svbool_t svcmpgt_wide_n_s8(svbool_t, svint8_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_s32))) -svbool_t svcmpgt_wide_n_s32(svbool_t, svint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_s16))) -svbool_t svcmpgt_wide_n_s16(svbool_t, svint16_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_u8))) -svbool_t svcmpgt_wide_n_u8(svbool_t, svuint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_u32))) -svbool_t svcmpgt_wide_n_u32(svbool_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_u16))) -svbool_t svcmpgt_wide_n_u16(svbool_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_s8))) -svbool_t svcmpgt_wide_s8(svbool_t, svint8_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_s32))) -svbool_t svcmpgt_wide_s32(svbool_t, svint32_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_s16))) -svbool_t svcmpgt_wide_s16(svbool_t, svint16_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_u8))) -svbool_t svcmpgt_wide_u8(svbool_t, svuint8_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_u32))) -svbool_t svcmpgt_wide_u32(svbool_t, svuint32_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_u16))) -svbool_t svcmpgt_wide_u16(svbool_t, svuint16_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_f64))) -svbool_t svcmple_n_f64(svbool_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_f32))) -svbool_t svcmple_n_f32(svbool_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_f16))) -svbool_t svcmple_n_f16(svbool_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s8))) -svbool_t svcmple_n_s8(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s32))) -svbool_t svcmple_n_s32(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s64))) -svbool_t svcmple_n_s64(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s16))) -svbool_t svcmple_n_s16(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u8))) -svbool_t svcmple_n_u8(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u32))) -svbool_t svcmple_n_u32(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u64))) -svbool_t svcmple_n_u64(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u16))) -svbool_t svcmple_n_u16(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s8))) -svbool_t svcmple_s8(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s32))) -svbool_t svcmple_s32(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s64))) -svbool_t svcmple_s64(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s16))) -svbool_t svcmple_s16(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_f64))) -svbool_t svcmple_f64(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_f32))) -svbool_t svcmple_f32(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_f16))) -svbool_t svcmple_f16(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u8))) -svbool_t svcmple_u8(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u32))) -svbool_t svcmple_u32(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u64))) -svbool_t svcmple_u64(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u16))) -svbool_t svcmple_u16(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_s8))) -svbool_t svcmple_wide_n_s8(svbool_t, svint8_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_s32))) -svbool_t svcmple_wide_n_s32(svbool_t, svint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_s16))) -svbool_t svcmple_wide_n_s16(svbool_t, svint16_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_u8))) -svbool_t svcmple_wide_n_u8(svbool_t, svuint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_u32))) -svbool_t svcmple_wide_n_u32(svbool_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_u16))) -svbool_t svcmple_wide_n_u16(svbool_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_s8))) -svbool_t svcmple_wide_s8(svbool_t, svint8_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_s32))) -svbool_t svcmple_wide_s32(svbool_t, svint32_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_s16))) -svbool_t svcmple_wide_s16(svbool_t, svint16_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_u8))) -svbool_t svcmple_wide_u8(svbool_t, svuint8_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_u32))) -svbool_t svcmple_wide_u32(svbool_t, svuint32_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_u16))) -svbool_t svcmple_wide_u16(svbool_t, svuint16_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u8))) -svbool_t svcmplt_n_u8(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u32))) -svbool_t svcmplt_n_u32(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u64))) -svbool_t svcmplt_n_u64(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u16))) -svbool_t svcmplt_n_u16(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_f64))) -svbool_t svcmplt_n_f64(svbool_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_f32))) -svbool_t svcmplt_n_f32(svbool_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_f16))) -svbool_t svcmplt_n_f16(svbool_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s8))) -svbool_t svcmplt_n_s8(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s32))) -svbool_t svcmplt_n_s32(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s64))) -svbool_t svcmplt_n_s64(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s16))) -svbool_t svcmplt_n_s16(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u8))) -svbool_t svcmplt_u8(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u32))) -svbool_t svcmplt_u32(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u64))) -svbool_t svcmplt_u64(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u16))) -svbool_t svcmplt_u16(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s8))) -svbool_t svcmplt_s8(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s32))) -svbool_t svcmplt_s32(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s64))) -svbool_t svcmplt_s64(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s16))) -svbool_t svcmplt_s16(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_f64))) -svbool_t svcmplt_f64(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_f32))) -svbool_t svcmplt_f32(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_f16))) -svbool_t svcmplt_f16(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_u8))) -svbool_t svcmplt_wide_n_u8(svbool_t, svuint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_u32))) -svbool_t svcmplt_wide_n_u32(svbool_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_u16))) -svbool_t svcmplt_wide_n_u16(svbool_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_s8))) -svbool_t svcmplt_wide_n_s8(svbool_t, svint8_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_s32))) -svbool_t svcmplt_wide_n_s32(svbool_t, svint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_s16))) -svbool_t svcmplt_wide_n_s16(svbool_t, svint16_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_u8))) -svbool_t svcmplt_wide_u8(svbool_t, svuint8_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_u32))) -svbool_t svcmplt_wide_u32(svbool_t, svuint32_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_u16))) -svbool_t svcmplt_wide_u16(svbool_t, svuint16_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_s8))) -svbool_t svcmplt_wide_s8(svbool_t, svint8_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_s32))) -svbool_t svcmplt_wide_s32(svbool_t, svint32_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_s16))) -svbool_t svcmplt_wide_s16(svbool_t, svint16_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_f64))) -svbool_t svcmpne_n_f64(svbool_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_f32))) -svbool_t svcmpne_n_f32(svbool_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_f16))) -svbool_t svcmpne_n_f16(svbool_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u8))) -svbool_t svcmpne_n_u8(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u32))) -svbool_t svcmpne_n_u32(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u64))) -svbool_t svcmpne_n_u64(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u16))) -svbool_t svcmpne_n_u16(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s8))) -svbool_t svcmpne_n_s8(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s32))) -svbool_t svcmpne_n_s32(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s64))) -svbool_t svcmpne_n_s64(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s16))) -svbool_t svcmpne_n_s16(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u8))) -svbool_t svcmpne_u8(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u32))) -svbool_t svcmpne_u32(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u64))) -svbool_t svcmpne_u64(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u16))) -svbool_t svcmpne_u16(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s8))) -svbool_t svcmpne_s8(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s32))) -svbool_t svcmpne_s32(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s64))) -svbool_t svcmpne_s64(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s16))) -svbool_t svcmpne_s16(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_f64))) -svbool_t svcmpne_f64(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_f32))) -svbool_t svcmpne_f32(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_f16))) -svbool_t svcmpne_f16(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_n_s8))) -svbool_t svcmpne_wide_n_s8(svbool_t, svint8_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_n_s32))) -svbool_t svcmpne_wide_n_s32(svbool_t, svint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_n_s16))) -svbool_t svcmpne_wide_n_s16(svbool_t, svint16_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_s8))) -svbool_t svcmpne_wide_s8(svbool_t, svint8_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_s32))) -svbool_t svcmpne_wide_s32(svbool_t, svint32_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_s16))) -svbool_t svcmpne_wide_s16(svbool_t, svint16_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_n_f64))) -svbool_t svcmpuo_n_f64(svbool_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_n_f32))) -svbool_t svcmpuo_n_f32(svbool_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_n_f16))) -svbool_t svcmpuo_n_f16(svbool_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_f64))) -svbool_t svcmpuo_f64(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_f32))) -svbool_t svcmpuo_f32(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_f16))) -svbool_t svcmpuo_f16(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u8_m))) -svuint8_t svcnot_u8_m(svuint8_t, svbool_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u32_m))) -svuint32_t svcnot_u32_m(svuint32_t, svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u64_m))) -svuint64_t svcnot_u64_m(svuint64_t, svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u16_m))) -svuint16_t svcnot_u16_m(svuint16_t, svbool_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s8_m))) -svint8_t svcnot_s8_m(svint8_t, svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s32_m))) -svint32_t svcnot_s32_m(svint32_t, svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s64_m))) -svint64_t svcnot_s64_m(svint64_t, svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s16_m))) -svint16_t svcnot_s16_m(svint16_t, svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u8_x))) -svuint8_t svcnot_u8_x(svbool_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u32_x))) -svuint32_t svcnot_u32_x(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u64_x))) -svuint64_t svcnot_u64_x(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u16_x))) -svuint16_t svcnot_u16_x(svbool_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s8_x))) -svint8_t svcnot_s8_x(svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s32_x))) -svint32_t svcnot_s32_x(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s64_x))) -svint64_t svcnot_s64_x(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s16_x))) -svint16_t svcnot_s16_x(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u8_z))) -svuint8_t svcnot_u8_z(svbool_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u32_z))) -svuint32_t svcnot_u32_z(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u64_z))) -svuint64_t svcnot_u64_z(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u16_z))) -svuint16_t svcnot_u16_z(svbool_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s8_z))) -svint8_t svcnot_s8_z(svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s32_z))) -svint32_t svcnot_s32_z(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s64_z))) -svint64_t svcnot_s64_z(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s16_z))) -svint16_t svcnot_s16_z(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u8_m))) -svuint8_t svcnt_u8_m(svuint8_t, svbool_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u32_m))) -svuint32_t svcnt_u32_m(svuint32_t, svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u64_m))) -svuint64_t svcnt_u64_m(svuint64_t, svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u16_m))) -svuint16_t svcnt_u16_m(svuint16_t, svbool_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s8_m))) -svuint8_t svcnt_s8_m(svuint8_t, svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f64_m))) -svuint64_t svcnt_f64_m(svuint64_t, svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f32_m))) -svuint32_t svcnt_f32_m(svuint32_t, svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f16_m))) -svuint16_t svcnt_f16_m(svuint16_t, svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s32_m))) -svuint32_t svcnt_s32_m(svuint32_t, svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s64_m))) -svuint64_t svcnt_s64_m(svuint64_t, svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s16_m))) -svuint16_t svcnt_s16_m(svuint16_t, svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u8_x))) -svuint8_t svcnt_u8_x(svbool_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u32_x))) -svuint32_t svcnt_u32_x(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u64_x))) -svuint64_t svcnt_u64_x(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u16_x))) -svuint16_t svcnt_u16_x(svbool_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s8_x))) -svuint8_t svcnt_s8_x(svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f64_x))) -svuint64_t svcnt_f64_x(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f32_x))) -svuint32_t svcnt_f32_x(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f16_x))) -svuint16_t svcnt_f16_x(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s32_x))) -svuint32_t svcnt_s32_x(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s64_x))) -svuint64_t svcnt_s64_x(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s16_x))) -svuint16_t svcnt_s16_x(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u8_z))) -svuint8_t svcnt_u8_z(svbool_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u32_z))) -svuint32_t svcnt_u32_z(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u64_z))) -svuint64_t svcnt_u64_z(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u16_z))) -svuint16_t svcnt_u16_z(svbool_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s8_z))) -svuint8_t svcnt_s8_z(svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f64_z))) -svuint64_t svcnt_f64_z(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f32_z))) -svuint32_t svcnt_f32_z(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f16_z))) -svuint16_t svcnt_f16_z(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s32_z))) -svuint32_t svcnt_s32_z(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s64_z))) -svuint64_t svcnt_s64_z(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s16_z))) -svuint16_t svcnt_s16_z(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntb))) -uint64_t svcntb(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntb_pat))) -uint64_t svcntb_pat(enum svpattern); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntd))) -uint64_t svcntd(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntd_pat))) -uint64_t svcntd_pat(enum svpattern); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnth))) -uint64_t svcnth(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnth_pat))) -uint64_t svcnth_pat(enum svpattern); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntp_b8))) -uint64_t svcntp_b8(svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntp_b32))) -uint64_t svcntp_b32(svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntp_b64))) -uint64_t svcntp_b64(svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntp_b16))) -uint64_t svcntp_b16(svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntw))) -uint64_t svcntw(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntw_pat))) -uint64_t svcntw_pat(enum svpattern); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_u32))) -svuint32_t svcompact_u32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_u64))) -svuint64_t svcompact_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_f64))) -svfloat64_t svcompact_f64(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_f32))) -svfloat32_t svcompact_f32(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_s32))) -svint32_t svcompact_s32(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_s64))) -svint64_t svcompact_s64(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u8))) -svuint8x2_t svcreate2_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u32))) -svuint32x2_t svcreate2_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u64))) -svuint64x2_t svcreate2_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u16))) -svuint16x2_t svcreate2_u16(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s8))) -svint8x2_t svcreate2_s8(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_f64))) -svfloat64x2_t svcreate2_f64(svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_f32))) -svfloat32x2_t svcreate2_f32(svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_f16))) -svfloat16x2_t svcreate2_f16(svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s32))) -svint32x2_t svcreate2_s32(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s64))) -svint64x2_t svcreate2_s64(svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s16))) -svint16x2_t svcreate2_s16(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u8))) -svuint8x3_t svcreate3_u8(svuint8_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u32))) -svuint32x3_t svcreate3_u32(svuint32_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u64))) -svuint64x3_t svcreate3_u64(svuint64_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u16))) -svuint16x3_t svcreate3_u16(svuint16_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s8))) -svint8x3_t svcreate3_s8(svint8_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_f64))) -svfloat64x3_t svcreate3_f64(svfloat64_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_f32))) -svfloat32x3_t svcreate3_f32(svfloat32_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_f16))) -svfloat16x3_t svcreate3_f16(svfloat16_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s32))) -svint32x3_t svcreate3_s32(svint32_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s64))) -svint64x3_t svcreate3_s64(svint64_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s16))) -svint16x3_t svcreate3_s16(svint16_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u8))) -svuint8x4_t svcreate4_u8(svuint8_t, svuint8_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u32))) -svuint32x4_t svcreate4_u32(svuint32_t, svuint32_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u64))) -svuint64x4_t svcreate4_u64(svuint64_t, svuint64_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u16))) -svuint16x4_t svcreate4_u16(svuint16_t, svuint16_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s8))) -svint8x4_t svcreate4_s8(svint8_t, svint8_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_f64))) -svfloat64x4_t svcreate4_f64(svfloat64_t, svfloat64_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_f32))) -svfloat32x4_t svcreate4_f32(svfloat32_t, svfloat32_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_f16))) -svfloat16x4_t svcreate4_f16(svfloat16_t, svfloat16_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s32))) -svint32x4_t svcreate4_s32(svint32_t, svint32_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s64))) -svint64x4_t svcreate4_s64(svint64_t, svint64_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s16))) -svint16x4_t svcreate4_s16(svint16_t, svint16_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f32_m))) -svfloat16_t svcvt_f16_f32_m(svfloat16_t, svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f32_x))) -svfloat16_t svcvt_f16_f32_x(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f32_z))) -svfloat16_t svcvt_f16_f32_z(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f64_m))) -svfloat16_t svcvt_f16_f64_m(svfloat16_t, svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f64_x))) -svfloat16_t svcvt_f16_f64_x(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f64_z))) -svfloat16_t svcvt_f16_f64_z(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s16_m))) -svfloat16_t svcvt_f16_s16_m(svfloat16_t, svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s16_x))) -svfloat16_t svcvt_f16_s16_x(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s16_z))) -svfloat16_t svcvt_f16_s16_z(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s32_m))) -svfloat16_t svcvt_f16_s32_m(svfloat16_t, svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s32_x))) -svfloat16_t svcvt_f16_s32_x(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s32_z))) -svfloat16_t svcvt_f16_s32_z(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s64_m))) -svfloat16_t svcvt_f16_s64_m(svfloat16_t, svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s64_x))) -svfloat16_t svcvt_f16_s64_x(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s64_z))) -svfloat16_t svcvt_f16_s64_z(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u16_m))) -svfloat16_t svcvt_f16_u16_m(svfloat16_t, svbool_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u16_x))) -svfloat16_t svcvt_f16_u16_x(svbool_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u16_z))) -svfloat16_t svcvt_f16_u16_z(svbool_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u32_m))) -svfloat16_t svcvt_f16_u32_m(svfloat16_t, svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u32_x))) -svfloat16_t svcvt_f16_u32_x(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u32_z))) -svfloat16_t svcvt_f16_u32_z(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u64_m))) -svfloat16_t svcvt_f16_u64_m(svfloat16_t, svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u64_x))) -svfloat16_t svcvt_f16_u64_x(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u64_z))) -svfloat16_t svcvt_f16_u64_z(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f16_m))) -svfloat32_t svcvt_f32_f16_m(svfloat32_t, svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f16_x))) -svfloat32_t svcvt_f32_f16_x(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f16_z))) -svfloat32_t svcvt_f32_f16_z(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f64_m))) -svfloat32_t svcvt_f32_f64_m(svfloat32_t, svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f64_x))) -svfloat32_t svcvt_f32_f64_x(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f64_z))) -svfloat32_t svcvt_f32_f64_z(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_m))) -svfloat32_t svcvt_f32_s32_m(svfloat32_t, svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_x))) -svfloat32_t svcvt_f32_s32_x(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_z))) -svfloat32_t svcvt_f32_s32_z(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s64_m))) -svfloat32_t svcvt_f32_s64_m(svfloat32_t, svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s64_x))) -svfloat32_t svcvt_f32_s64_x(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s64_z))) -svfloat32_t svcvt_f32_s64_z(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_m))) -svfloat32_t svcvt_f32_u32_m(svfloat32_t, svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_x))) -svfloat32_t svcvt_f32_u32_x(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_z))) -svfloat32_t svcvt_f32_u32_z(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u64_m))) -svfloat32_t svcvt_f32_u64_m(svfloat32_t, svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u64_x))) -svfloat32_t svcvt_f32_u64_x(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u64_z))) -svfloat32_t svcvt_f32_u64_z(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f16_m))) -svfloat64_t svcvt_f64_f16_m(svfloat64_t, svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f16_x))) -svfloat64_t svcvt_f64_f16_x(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f16_z))) -svfloat64_t svcvt_f64_f16_z(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f32_m))) -svfloat64_t svcvt_f64_f32_m(svfloat64_t, svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f32_x))) -svfloat64_t svcvt_f64_f32_x(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f32_z))) -svfloat64_t svcvt_f64_f32_z(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s32_m))) -svfloat64_t svcvt_f64_s32_m(svfloat64_t, svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s32_x))) -svfloat64_t svcvt_f64_s32_x(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s32_z))) -svfloat64_t svcvt_f64_s32_z(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s64_m))) -svfloat64_t svcvt_f64_s64_m(svfloat64_t, svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s64_x))) -svfloat64_t svcvt_f64_s64_x(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s64_z))) -svfloat64_t svcvt_f64_s64_z(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u32_m))) -svfloat64_t svcvt_f64_u32_m(svfloat64_t, svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u32_x))) -svfloat64_t svcvt_f64_u32_x(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u32_z))) -svfloat64_t svcvt_f64_u32_z(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u64_m))) -svfloat64_t svcvt_f64_u64_m(svfloat64_t, svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u64_x))) -svfloat64_t svcvt_f64_u64_x(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u64_z))) -svfloat64_t svcvt_f64_u64_z(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s16_f16_m))) -svint16_t svcvt_s16_f16_m(svint16_t, svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s16_f16_x))) -svint16_t svcvt_s16_f16_x(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s16_f16_z))) -svint16_t svcvt_s16_f16_z(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f16_m))) -svint32_t svcvt_s32_f16_m(svint32_t, svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f16_x))) -svint32_t svcvt_s32_f16_x(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f16_z))) -svint32_t svcvt_s32_f16_z(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_m))) -svint32_t svcvt_s32_f32_m(svint32_t, svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_x))) -svint32_t svcvt_s32_f32_x(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_z))) -svint32_t svcvt_s32_f32_z(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f64_m))) -svint32_t svcvt_s32_f64_m(svint32_t, svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f64_x))) -svint32_t svcvt_s32_f64_x(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f64_z))) -svint32_t svcvt_s32_f64_z(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f16_m))) -svint64_t svcvt_s64_f16_m(svint64_t, svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f16_x))) -svint64_t svcvt_s64_f16_x(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f16_z))) -svint64_t svcvt_s64_f16_z(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f32_m))) -svint64_t svcvt_s64_f32_m(svint64_t, svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f32_x))) -svint64_t svcvt_s64_f32_x(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f32_z))) -svint64_t svcvt_s64_f32_z(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f64_m))) -svint64_t svcvt_s64_f64_m(svint64_t, svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f64_x))) -svint64_t svcvt_s64_f64_x(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f64_z))) -svint64_t svcvt_s64_f64_z(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u16_f16_m))) -svuint16_t svcvt_u16_f16_m(svuint16_t, svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u16_f16_x))) -svuint16_t svcvt_u16_f16_x(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u16_f16_z))) -svuint16_t svcvt_u16_f16_z(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f16_m))) -svuint32_t svcvt_u32_f16_m(svuint32_t, svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f16_x))) -svuint32_t svcvt_u32_f16_x(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f16_z))) -svuint32_t svcvt_u32_f16_z(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_m))) -svuint32_t svcvt_u32_f32_m(svuint32_t, svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_x))) -svuint32_t svcvt_u32_f32_x(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_z))) -svuint32_t svcvt_u32_f32_z(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f64_m))) -svuint32_t svcvt_u32_f64_m(svuint32_t, svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f64_x))) -svuint32_t svcvt_u32_f64_x(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f64_z))) -svuint32_t svcvt_u32_f64_z(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f16_m))) -svuint64_t svcvt_u64_f16_m(svuint64_t, svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f16_x))) -svuint64_t svcvt_u64_f16_x(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f16_z))) -svuint64_t svcvt_u64_f16_z(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f32_m))) -svuint64_t svcvt_u64_f32_m(svuint64_t, svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f32_x))) -svuint64_t svcvt_u64_f32_x(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f32_z))) -svuint64_t svcvt_u64_f32_z(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f64_m))) -svuint64_t svcvt_u64_f64_m(svuint64_t, svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f64_x))) -svuint64_t svcvt_u64_f64_x(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f64_z))) -svuint64_t svcvt_u64_f64_z(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f64_m))) -svfloat64_t svdiv_n_f64_m(svbool_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f32_m))) -svfloat32_t svdiv_n_f32_m(svbool_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f16_m))) -svfloat16_t svdiv_n_f16_m(svbool_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f64_x))) -svfloat64_t svdiv_n_f64_x(svbool_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f32_x))) -svfloat32_t svdiv_n_f32_x(svbool_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f16_x))) -svfloat16_t svdiv_n_f16_x(svbool_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f64_z))) -svfloat64_t svdiv_n_f64_z(svbool_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f32_z))) -svfloat32_t svdiv_n_f32_z(svbool_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f16_z))) -svfloat16_t svdiv_n_f16_z(svbool_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s32_m))) -svint32_t svdiv_n_s32_m(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s64_m))) -svint64_t svdiv_n_s64_m(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s32_x))) -svint32_t svdiv_n_s32_x(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s64_x))) -svint64_t svdiv_n_s64_x(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s32_z))) -svint32_t svdiv_n_s32_z(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s64_z))) -svint64_t svdiv_n_s64_z(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u32_m))) -svuint32_t svdiv_n_u32_m(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u64_m))) -svuint64_t svdiv_n_u64_m(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u32_x))) -svuint32_t svdiv_n_u32_x(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u64_x))) -svuint64_t svdiv_n_u64_x(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u32_z))) -svuint32_t svdiv_n_u32_z(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u64_z))) -svuint64_t svdiv_n_u64_z(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f64_m))) -svfloat64_t svdiv_f64_m(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f32_m))) -svfloat32_t svdiv_f32_m(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f16_m))) -svfloat16_t svdiv_f16_m(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f64_x))) -svfloat64_t svdiv_f64_x(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f32_x))) -svfloat32_t svdiv_f32_x(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f16_x))) -svfloat16_t svdiv_f16_x(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f64_z))) -svfloat64_t svdiv_f64_z(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f32_z))) -svfloat32_t svdiv_f32_z(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f16_z))) -svfloat16_t svdiv_f16_z(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s32_m))) -svint32_t svdiv_s32_m(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s64_m))) -svint64_t svdiv_s64_m(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s32_x))) -svint32_t svdiv_s32_x(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s64_x))) -svint64_t svdiv_s64_x(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s32_z))) -svint32_t svdiv_s32_z(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s64_z))) -svint64_t svdiv_s64_z(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u32_m))) -svuint32_t svdiv_u32_m(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u64_m))) -svuint64_t svdiv_u64_m(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u32_x))) -svuint32_t svdiv_u32_x(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u64_x))) -svuint64_t svdiv_u64_x(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u32_z))) -svuint32_t svdiv_u32_z(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u64_z))) -svuint64_t svdiv_u64_z(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f64_m))) -svfloat64_t svdivr_n_f64_m(svbool_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f32_m))) -svfloat32_t svdivr_n_f32_m(svbool_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f16_m))) -svfloat16_t svdivr_n_f16_m(svbool_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f64_x))) -svfloat64_t svdivr_n_f64_x(svbool_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f32_x))) -svfloat32_t svdivr_n_f32_x(svbool_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f16_x))) -svfloat16_t svdivr_n_f16_x(svbool_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f64_z))) -svfloat64_t svdivr_n_f64_z(svbool_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f32_z))) -svfloat32_t svdivr_n_f32_z(svbool_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f16_z))) -svfloat16_t svdivr_n_f16_z(svbool_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s32_m))) -svint32_t svdivr_n_s32_m(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s64_m))) -svint64_t svdivr_n_s64_m(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s32_x))) -svint32_t svdivr_n_s32_x(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s64_x))) -svint64_t svdivr_n_s64_x(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s32_z))) -svint32_t svdivr_n_s32_z(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s64_z))) -svint64_t svdivr_n_s64_z(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u32_m))) -svuint32_t svdivr_n_u32_m(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u64_m))) -svuint64_t svdivr_n_u64_m(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u32_x))) -svuint32_t svdivr_n_u32_x(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u64_x))) -svuint64_t svdivr_n_u64_x(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u32_z))) -svuint32_t svdivr_n_u32_z(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u64_z))) -svuint64_t svdivr_n_u64_z(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f64_m))) -svfloat64_t svdivr_f64_m(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f32_m))) -svfloat32_t svdivr_f32_m(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f16_m))) -svfloat16_t svdivr_f16_m(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f64_x))) -svfloat64_t svdivr_f64_x(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f32_x))) -svfloat32_t svdivr_f32_x(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f16_x))) -svfloat16_t svdivr_f16_x(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f64_z))) -svfloat64_t svdivr_f64_z(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f32_z))) -svfloat32_t svdivr_f32_z(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f16_z))) -svfloat16_t svdivr_f16_z(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s32_m))) -svint32_t svdivr_s32_m(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s64_m))) -svint64_t svdivr_s64_m(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s32_x))) -svint32_t svdivr_s32_x(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s64_x))) -svint64_t svdivr_s64_x(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s32_z))) -svint32_t svdivr_s32_z(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s64_z))) -svint64_t svdivr_s64_z(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u32_m))) -svuint32_t svdivr_u32_m(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u64_m))) -svuint64_t svdivr_u64_m(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u32_x))) -svuint32_t svdivr_u32_x(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u64_x))) -svuint64_t svdivr_u64_x(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u32_z))) -svuint32_t svdivr_u32_z(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u64_z))) -svuint64_t svdivr_u64_z(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_s32))) -svint32_t svdot_n_s32(svint32_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_s64))) -svint64_t svdot_n_s64(svint64_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_u32))) -svuint32_t svdot_n_u32(svuint32_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_u64))) -svuint64_t svdot_n_u64(svuint64_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_s32))) -svint32_t svdot_s32(svint32_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_s64))) -svint64_t svdot_s64(svint64_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_u32))) -svuint32_t svdot_u32(svuint32_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_u64))) -svuint64_t svdot_u64(svuint64_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_s32))) -svint32_t svdot_lane_s32(svint32_t, svint8_t, svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_s64))) -svint64_t svdot_lane_s64(svint64_t, svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_u32))) -svuint32_t svdot_lane_u32(svuint32_t, svuint8_t, svuint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_u64))) -svuint64_t svdot_lane_u64(svuint64_t, svuint16_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8))) -svuint8_t svdup_n_u8(uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32))) -svuint32_t svdup_n_u32(uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64))) -svuint64_t svdup_n_u64(uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16))) -svuint16_t svdup_n_u16(uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8))) -svint8_t svdup_n_s8(int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64))) -svfloat64_t svdup_n_f64(float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32))) -svfloat32_t svdup_n_f32(float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16))) -svfloat16_t svdup_n_f16(float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32))) -svint32_t svdup_n_s32(int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64))) -svint64_t svdup_n_s64(int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16))) -svint16_t svdup_n_s16(int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8_m))) -svuint8_t svdup_n_u8_m(svuint8_t, svbool_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32_m))) -svuint32_t svdup_n_u32_m(svuint32_t, svbool_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64_m))) -svuint64_t svdup_n_u64_m(svuint64_t, svbool_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16_m))) -svuint16_t svdup_n_u16_m(svuint16_t, svbool_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8_m))) -svint8_t svdup_n_s8_m(svint8_t, svbool_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64_m))) -svfloat64_t svdup_n_f64_m(svfloat64_t, svbool_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32_m))) -svfloat32_t svdup_n_f32_m(svfloat32_t, svbool_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16_m))) -svfloat16_t svdup_n_f16_m(svfloat16_t, svbool_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32_m))) -svint32_t svdup_n_s32_m(svint32_t, svbool_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64_m))) -svint64_t svdup_n_s64_m(svint64_t, svbool_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16_m))) -svint16_t svdup_n_s16_m(svint16_t, svbool_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b8))) -svbool_t svdup_n_b8(bool); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b32))) -svbool_t svdup_n_b32(bool); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b64))) -svbool_t svdup_n_b64(bool); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b16))) -svbool_t svdup_n_b16(bool); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8_x))) -svuint8_t svdup_n_u8_x(svbool_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32_x))) -svuint32_t svdup_n_u32_x(svbool_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64_x))) -svuint64_t svdup_n_u64_x(svbool_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16_x))) -svuint16_t svdup_n_u16_x(svbool_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8_x))) -svint8_t svdup_n_s8_x(svbool_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64_x))) -svfloat64_t svdup_n_f64_x(svbool_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32_x))) -svfloat32_t svdup_n_f32_x(svbool_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16_x))) -svfloat16_t svdup_n_f16_x(svbool_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32_x))) -svint32_t svdup_n_s32_x(svbool_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64_x))) -svint64_t svdup_n_s64_x(svbool_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16_x))) -svint16_t svdup_n_s16_x(svbool_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8_z))) -svuint8_t svdup_n_u8_z(svbool_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32_z))) -svuint32_t svdup_n_u32_z(svbool_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64_z))) -svuint64_t svdup_n_u64_z(svbool_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16_z))) -svuint16_t svdup_n_u16_z(svbool_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8_z))) -svint8_t svdup_n_s8_z(svbool_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64_z))) -svfloat64_t svdup_n_f64_z(svbool_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32_z))) -svfloat32_t svdup_n_f32_z(svbool_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16_z))) -svfloat16_t svdup_n_f16_z(svbool_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32_z))) -svint32_t svdup_n_s32_z(svbool_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64_z))) -svint64_t svdup_n_s64_z(svbool_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16_z))) -svint16_t svdup_n_s16_z(svbool_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u8))) -svuint8_t svdup_lane_u8(svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u32))) -svuint32_t svdup_lane_u32(svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u64))) -svuint64_t svdup_lane_u64(svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u16))) -svuint16_t svdup_lane_u16(svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s8))) -svint8_t svdup_lane_s8(svint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_f64))) -svfloat64_t svdup_lane_f64(svfloat64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_f32))) -svfloat32_t svdup_lane_f32(svfloat32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_f16))) -svfloat16_t svdup_lane_f16(svfloat16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s32))) -svint32_t svdup_lane_s32(svint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s64))) -svint64_t svdup_lane_s64(svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s16))) -svint16_t svdup_lane_s16(svint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u8))) -svuint8_t svdupq_n_u8(uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s8))) -svint8_t svdupq_n_s8(int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u16))) -svuint16_t svdupq_n_u16(uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_f16))) -svfloat16_t svdupq_n_f16(float16_t, float16_t, float16_t, float16_t, float16_t, float16_t, float16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s16))) -svint16_t svdupq_n_s16(int16_t, int16_t, int16_t, int16_t, int16_t, int16_t, int16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u32))) -svuint32_t svdupq_n_u32(uint32_t, uint32_t, uint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_f32))) -svfloat32_t svdupq_n_f32(float32_t, float32_t, float32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s32))) -svint32_t svdupq_n_s32(int32_t, int32_t, int32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u64))) -svuint64_t svdupq_n_u64(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_f64))) -svfloat64_t svdupq_n_f64(float64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s64))) -svint64_t svdupq_n_s64(int64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b8))) -svbool_t svdupq_n_b8(bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b16))) -svbool_t svdupq_n_b16(bool, bool, bool, bool, bool, bool, bool, bool); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b32))) -svbool_t svdupq_n_b32(bool, bool, bool, bool); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b64))) -svbool_t svdupq_n_b64(bool, bool); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u8))) -svuint8_t svdupq_lane_u8(svuint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u32))) -svuint32_t svdupq_lane_u32(svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u64))) -svuint64_t svdupq_lane_u64(svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u16))) -svuint16_t svdupq_lane_u16(svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s8))) -svint8_t svdupq_lane_s8(svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_f64))) -svfloat64_t svdupq_lane_f64(svfloat64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_f32))) -svfloat32_t svdupq_lane_f32(svfloat32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_f16))) -svfloat16_t svdupq_lane_f16(svfloat16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s32))) -svint32_t svdupq_lane_s32(svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s64))) -svint64_t svdupq_lane_s64(svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s16))) -svint16_t svdupq_lane_s16(svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_b_z))) -svbool_t sveor_b_z(svbool_t, svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u8_m))) -svuint8_t sveor_n_u8_m(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u32_m))) -svuint32_t sveor_n_u32_m(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u64_m))) -svuint64_t sveor_n_u64_m(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u16_m))) -svuint16_t sveor_n_u16_m(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s8_m))) -svint8_t sveor_n_s8_m(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s32_m))) -svint32_t sveor_n_s32_m(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s64_m))) -svint64_t sveor_n_s64_m(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s16_m))) -svint16_t sveor_n_s16_m(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u8_x))) -svuint8_t sveor_n_u8_x(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u32_x))) -svuint32_t sveor_n_u32_x(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u64_x))) -svuint64_t sveor_n_u64_x(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u16_x))) -svuint16_t sveor_n_u16_x(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s8_x))) -svint8_t sveor_n_s8_x(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s32_x))) -svint32_t sveor_n_s32_x(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s64_x))) -svint64_t sveor_n_s64_x(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s16_x))) -svint16_t sveor_n_s16_x(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u8_z))) -svuint8_t sveor_n_u8_z(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u32_z))) -svuint32_t sveor_n_u32_z(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u64_z))) -svuint64_t sveor_n_u64_z(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u16_z))) -svuint16_t sveor_n_u16_z(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s8_z))) -svint8_t sveor_n_s8_z(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s32_z))) -svint32_t sveor_n_s32_z(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s64_z))) -svint64_t sveor_n_s64_z(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s16_z))) -svint16_t sveor_n_s16_z(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u8_m))) -svuint8_t sveor_u8_m(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u32_m))) -svuint32_t sveor_u32_m(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u64_m))) -svuint64_t sveor_u64_m(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u16_m))) -svuint16_t sveor_u16_m(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s8_m))) -svint8_t sveor_s8_m(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s32_m))) -svint32_t sveor_s32_m(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s64_m))) -svint64_t sveor_s64_m(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s16_m))) -svint16_t sveor_s16_m(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u8_x))) -svuint8_t sveor_u8_x(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u32_x))) -svuint32_t sveor_u32_x(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u64_x))) -svuint64_t sveor_u64_x(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u16_x))) -svuint16_t sveor_u16_x(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s8_x))) -svint8_t sveor_s8_x(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s32_x))) -svint32_t sveor_s32_x(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s64_x))) -svint64_t sveor_s64_x(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s16_x))) -svint16_t sveor_s16_x(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u8_z))) -svuint8_t sveor_u8_z(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u32_z))) -svuint32_t sveor_u32_z(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u64_z))) -svuint64_t sveor_u64_z(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u16_z))) -svuint16_t sveor_u16_z(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s8_z))) -svint8_t sveor_s8_z(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s32_z))) -svint32_t sveor_s32_z(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s64_z))) -svint64_t sveor_s64_z(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s16_z))) -svint16_t sveor_s16_z(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u8))) -uint8_t sveorv_u8(svbool_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u32))) -uint32_t sveorv_u32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u64))) -uint64_t sveorv_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u16))) -uint16_t sveorv_u16(svbool_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s8))) -int8_t sveorv_s8(svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s32))) -int32_t sveorv_s32(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s64))) -int64_t sveorv_s64(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s16))) -int16_t sveorv_s16(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f64))) -svfloat64_t svexpa_f64(svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f32))) -svfloat32_t svexpa_f32(svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f16))) -svfloat16_t svexpa_f16(svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u8))) -svuint8_t svext_u8(svuint8_t, svuint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u32))) -svuint32_t svext_u32(svuint32_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u64))) -svuint64_t svext_u64(svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u16))) -svuint16_t svext_u16(svuint16_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s8))) -svint8_t svext_s8(svint8_t, svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_f64))) -svfloat64_t svext_f64(svfloat64_t, svfloat64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_f32))) -svfloat32_t svext_f32(svfloat32_t, svfloat32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_f16))) -svfloat16_t svext_f16(svfloat16_t, svfloat16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s32))) -svint32_t svext_s32(svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s64))) -svint64_t svext_s64(svint64_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s16))) -svint16_t svext_s16(svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s32_m))) -svint32_t svextb_s32_m(svint32_t, svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s64_m))) -svint64_t svextb_s64_m(svint64_t, svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s16_m))) -svint16_t svextb_s16_m(svint16_t, svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s32_x))) -svint32_t svextb_s32_x(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s64_x))) -svint64_t svextb_s64_x(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s16_x))) -svint16_t svextb_s16_x(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s32_z))) -svint32_t svextb_s32_z(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s64_z))) -svint64_t svextb_s64_z(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s16_z))) -svint16_t svextb_s16_z(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u32_m))) -svuint32_t svextb_u32_m(svuint32_t, svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u64_m))) -svuint64_t svextb_u64_m(svuint64_t, svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u16_m))) -svuint16_t svextb_u16_m(svuint16_t, svbool_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u32_x))) -svuint32_t svextb_u32_x(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u64_x))) -svuint64_t svextb_u64_x(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u16_x))) -svuint16_t svextb_u16_x(svbool_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u32_z))) -svuint32_t svextb_u32_z(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u64_z))) -svuint64_t svextb_u64_z(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u16_z))) -svuint16_t svextb_u16_z(svbool_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s32_m))) -svint32_t svexth_s32_m(svint32_t, svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s64_m))) -svint64_t svexth_s64_m(svint64_t, svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s32_x))) -svint32_t svexth_s32_x(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s64_x))) -svint64_t svexth_s64_x(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s32_z))) -svint32_t svexth_s32_z(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s64_z))) -svint64_t svexth_s64_z(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u32_m))) -svuint32_t svexth_u32_m(svuint32_t, svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u64_m))) -svuint64_t svexth_u64_m(svuint64_t, svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u32_x))) -svuint32_t svexth_u32_x(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u64_x))) -svuint64_t svexth_u64_x(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u32_z))) -svuint32_t svexth_u32_z(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u64_z))) -svuint64_t svexth_u64_z(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_s64_m))) -svint64_t svextw_s64_m(svint64_t, svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_s64_x))) -svint64_t svextw_s64_x(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_s64_z))) -svint64_t svextw_s64_z(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_u64_m))) -svuint64_t svextw_u64_m(svuint64_t, svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_u64_x))) -svuint64_t svextw_u64_x(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_u64_z))) -svuint64_t svextw_u64_z(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u8))) -svuint8_t svget2_u8(svuint8x2_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u32))) -svuint32_t svget2_u32(svuint32x2_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u64))) -svuint64_t svget2_u64(svuint64x2_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u16))) -svuint16_t svget2_u16(svuint16x2_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s8))) -svint8_t svget2_s8(svint8x2_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_f64))) -svfloat64_t svget2_f64(svfloat64x2_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_f32))) -svfloat32_t svget2_f32(svfloat32x2_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_f16))) -svfloat16_t svget2_f16(svfloat16x2_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s32))) -svint32_t svget2_s32(svint32x2_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s64))) -svint64_t svget2_s64(svint64x2_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s16))) -svint16_t svget2_s16(svint16x2_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u8))) -svuint8_t svget3_u8(svuint8x3_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u32))) -svuint32_t svget3_u32(svuint32x3_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u64))) -svuint64_t svget3_u64(svuint64x3_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u16))) -svuint16_t svget3_u16(svuint16x3_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s8))) -svint8_t svget3_s8(svint8x3_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_f64))) -svfloat64_t svget3_f64(svfloat64x3_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_f32))) -svfloat32_t svget3_f32(svfloat32x3_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_f16))) -svfloat16_t svget3_f16(svfloat16x3_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s32))) -svint32_t svget3_s32(svint32x3_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s64))) -svint64_t svget3_s64(svint64x3_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s16))) -svint16_t svget3_s16(svint16x3_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u8))) -svuint8_t svget4_u8(svuint8x4_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u32))) -svuint32_t svget4_u32(svuint32x4_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u64))) -svuint64_t svget4_u64(svuint64x4_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u16))) -svuint16_t svget4_u16(svuint16x4_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s8))) -svint8_t svget4_s8(svint8x4_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_f64))) -svfloat64_t svget4_f64(svfloat64x4_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_f32))) -svfloat32_t svget4_f32(svfloat32x4_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_f16))) -svfloat16_t svget4_f16(svfloat16x4_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s32))) -svint32_t svget4_s32(svint32x4_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s64))) -svint64_t svget4_s64(svint64x4_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s16))) -svint16_t svget4_s16(svint16x4_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svindex_u8))) -svuint8_t svindex_u8(uint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svindex_u32))) -svuint32_t svindex_u32(uint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svindex_u64))) -svuint64_t svindex_u64(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svindex_u16))) -svuint16_t svindex_u16(uint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svindex_s8))) -svint8_t svindex_s8(int8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svindex_s32))) -svint32_t svindex_s32(int32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svindex_s64))) -svint64_t svindex_s64(int64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svindex_s16))) -svint16_t svindex_s16(int16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u8))) -svuint8_t svinsr_n_u8(svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u32))) -svuint32_t svinsr_n_u32(svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u64))) -svuint64_t svinsr_n_u64(svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u16))) -svuint16_t svinsr_n_u16(svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s8))) -svint8_t svinsr_n_s8(svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_f64))) -svfloat64_t svinsr_n_f64(svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_f32))) -svfloat32_t svinsr_n_f32(svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_f16))) -svfloat16_t svinsr_n_f16(svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s32))) -svint32_t svinsr_n_s32(svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s64))) -svint64_t svinsr_n_s64(svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s16))) -svint16_t svinsr_n_s16(svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u8))) -uint8_t svlasta_u8(svbool_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u32))) -uint32_t svlasta_u32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u64))) -uint64_t svlasta_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u16))) -uint16_t svlasta_u16(svbool_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s8))) -int8_t svlasta_s8(svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_f64))) -float64_t svlasta_f64(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_f32))) -float32_t svlasta_f32(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_f16))) -float16_t svlasta_f16(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s32))) -int32_t svlasta_s32(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s64))) -int64_t svlasta_s64(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s16))) -int16_t svlasta_s16(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u8))) -uint8_t svlastb_u8(svbool_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u32))) -uint32_t svlastb_u32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u64))) -uint64_t svlastb_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u16))) -uint16_t svlastb_u16(svbool_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s8))) -int8_t svlastb_s8(svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_f64))) -float64_t svlastb_f64(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_f32))) -float32_t svlastb_f32(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_f16))) -float16_t svlastb_f16(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s32))) -int32_t svlastb_s32(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s64))) -int64_t svlastb_s64(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s16))) -int16_t svlastb_s16(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u8))) -svuint8_t svld1_u8(svbool_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u32))) -svuint32_t svld1_u32(svbool_t, uint32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u64))) -svuint64_t svld1_u64(svbool_t, uint64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u16))) -svuint16_t svld1_u16(svbool_t, uint16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s8))) -svint8_t svld1_s8(svbool_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f64))) -svfloat64_t svld1_f64(svbool_t, float64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f32))) -svfloat32_t svld1_f32(svbool_t, float32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f16))) -svfloat16_t svld1_f16(svbool_t, float16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s32))) -svint32_t svld1_s32(svbool_t, int32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s64))) -svint64_t svld1_s64(svbool_t, int64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s16))) -svint16_t svld1_s16(svbool_t, int16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_u32))) -svuint32_t svld1_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_u64))) -svuint64_t svld1_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_f64))) -svfloat64_t svld1_gather_u64base_index_f64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_f32))) -svfloat32_t svld1_gather_u32base_index_f32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_s32))) -svint32_t svld1_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_s64))) -svint64_t svld1_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_u32))) -svuint32_t svld1_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_u64))) -svuint64_t svld1_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_f64))) -svfloat64_t svld1_gather_u64base_offset_f64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_f32))) -svfloat32_t svld1_gather_u32base_offset_f32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_s32))) -svint32_t svld1_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_s64))) -svint64_t svld1_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_u32))) -svuint32_t svld1_gather_u32base_u32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_u64))) -svuint64_t svld1_gather_u64base_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_f64))) -svfloat64_t svld1_gather_u64base_f64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_f32))) -svfloat32_t svld1_gather_u32base_f32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_s32))) -svint32_t svld1_gather_u32base_s32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_s64))) -svint64_t svld1_gather_u64base_s64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_u32))) -svuint32_t svld1_gather_s32index_u32(svbool_t, uint32_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_f32))) -svfloat32_t svld1_gather_s32index_f32(svbool_t, float32_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_s32))) -svint32_t svld1_gather_s32index_s32(svbool_t, int32_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_u32))) -svuint32_t svld1_gather_u32index_u32(svbool_t, uint32_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_f32))) -svfloat32_t svld1_gather_u32index_f32(svbool_t, float32_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_s32))) -svint32_t svld1_gather_u32index_s32(svbool_t, int32_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_u64))) -svuint64_t svld1_gather_s64index_u64(svbool_t, uint64_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_f64))) -svfloat64_t svld1_gather_s64index_f64(svbool_t, float64_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_s64))) -svint64_t svld1_gather_s64index_s64(svbool_t, int64_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_u64))) -svuint64_t svld1_gather_u64index_u64(svbool_t, uint64_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_f64))) -svfloat64_t svld1_gather_u64index_f64(svbool_t, float64_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_s64))) -svint64_t svld1_gather_u64index_s64(svbool_t, int64_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_u32))) -svuint32_t svld1_gather_s32offset_u32(svbool_t, uint32_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_f32))) -svfloat32_t svld1_gather_s32offset_f32(svbool_t, float32_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_s32))) -svint32_t svld1_gather_s32offset_s32(svbool_t, int32_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_u32))) -svuint32_t svld1_gather_u32offset_u32(svbool_t, uint32_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_f32))) -svfloat32_t svld1_gather_u32offset_f32(svbool_t, float32_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_s32))) -svint32_t svld1_gather_u32offset_s32(svbool_t, int32_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_u64))) -svuint64_t svld1_gather_s64offset_u64(svbool_t, uint64_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_f64))) -svfloat64_t svld1_gather_s64offset_f64(svbool_t, float64_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_s64))) -svint64_t svld1_gather_s64offset_s64(svbool_t, int64_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_u64))) -svuint64_t svld1_gather_u64offset_u64(svbool_t, uint64_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_f64))) -svfloat64_t svld1_gather_u64offset_f64(svbool_t, float64_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_s64))) -svint64_t svld1_gather_u64offset_s64(svbool_t, int64_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u8))) -svuint8_t svld1_vnum_u8(svbool_t, uint8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u32))) -svuint32_t svld1_vnum_u32(svbool_t, uint32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u64))) -svuint64_t svld1_vnum_u64(svbool_t, uint64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u16))) -svuint16_t svld1_vnum_u16(svbool_t, uint16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s8))) -svint8_t svld1_vnum_s8(svbool_t, int8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f64))) -svfloat64_t svld1_vnum_f64(svbool_t, float64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f32))) -svfloat32_t svld1_vnum_f32(svbool_t, float32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f16))) -svfloat16_t svld1_vnum_f16(svbool_t, float16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s32))) -svint32_t svld1_vnum_s32(svbool_t, int32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s64))) -svint64_t svld1_vnum_s64(svbool_t, int64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s16))) -svint16_t svld1_vnum_s16(svbool_t, int16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u8))) -svuint8_t svld1rq_u8(svbool_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u32))) -svuint32_t svld1rq_u32(svbool_t, uint32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u64))) -svuint64_t svld1rq_u64(svbool_t, uint64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u16))) -svuint16_t svld1rq_u16(svbool_t, uint16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s8))) -svint8_t svld1rq_s8(svbool_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_f64))) -svfloat64_t svld1rq_f64(svbool_t, float64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_f32))) -svfloat32_t svld1rq_f32(svbool_t, float32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_f16))) -svfloat16_t svld1rq_f16(svbool_t, float16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s32))) -svint32_t svld1rq_s32(svbool_t, int32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s64))) -svint64_t svld1rq_s64(svbool_t, int64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s16))) -svint16_t svld1rq_s16(svbool_t, int16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_offset_u32))) -svuint32_t svld1sb_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_offset_u64))) -svuint64_t svld1sb_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_offset_s32))) -svint32_t svld1sb_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_offset_s64))) -svint64_t svld1sb_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_u32))) -svuint32_t svld1sb_gather_u32base_u32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_u64))) -svuint64_t svld1sb_gather_u64base_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_s32))) -svint32_t svld1sb_gather_u32base_s32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_s64))) -svint64_t svld1sb_gather_u64base_s64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s32offset_u32))) -svuint32_t svld1sb_gather_s32offset_u32(svbool_t, int8_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s32offset_s32))) -svint32_t svld1sb_gather_s32offset_s32(svbool_t, int8_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32offset_u32))) -svuint32_t svld1sb_gather_u32offset_u32(svbool_t, int8_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32offset_s32))) -svint32_t svld1sb_gather_u32offset_s32(svbool_t, int8_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s64offset_u64))) -svuint64_t svld1sb_gather_s64offset_u64(svbool_t, int8_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s64offset_s64))) -svint64_t svld1sb_gather_s64offset_s64(svbool_t, int8_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64offset_u64))) -svuint64_t svld1sb_gather_u64offset_u64(svbool_t, int8_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64offset_s64))) -svint64_t svld1sb_gather_u64offset_s64(svbool_t, int8_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_vnum_u32))) -svuint32_t svld1sb_vnum_u32(svbool_t, int8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_vnum_u64))) -svuint64_t svld1sb_vnum_u64(svbool_t, int8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_vnum_u16))) -svuint16_t svld1sb_vnum_u16(svbool_t, int8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_vnum_s32))) -svint32_t svld1sb_vnum_s32(svbool_t, int8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_vnum_s64))) -svint64_t svld1sb_vnum_s64(svbool_t, int8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_vnum_s16))) -svint16_t svld1sb_vnum_s16(svbool_t, int8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_u32))) -svuint32_t svld1sb_u32(svbool_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_u64))) -svuint64_t svld1sb_u64(svbool_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_u16))) -svuint16_t svld1sb_u16(svbool_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_s32))) -svint32_t svld1sb_s32(svbool_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_s64))) -svint64_t svld1sb_s64(svbool_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_s16))) -svint16_t svld1sb_s16(svbool_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_index_u32))) -svuint32_t svld1sh_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_index_u64))) -svuint64_t svld1sh_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_index_s32))) -svint32_t svld1sh_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_index_s64))) -svint64_t svld1sh_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_offset_u32))) -svuint32_t svld1sh_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_offset_u64))) -svuint64_t svld1sh_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_offset_s32))) -svint32_t svld1sh_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_offset_s64))) -svint64_t svld1sh_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_u32))) -svuint32_t svld1sh_gather_u32base_u32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_u64))) -svuint64_t svld1sh_gather_u64base_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_s32))) -svint32_t svld1sh_gather_u32base_s32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_s64))) -svint64_t svld1sh_gather_u64base_s64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32index_u32))) -svuint32_t svld1sh_gather_s32index_u32(svbool_t, int16_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32index_s32))) -svint32_t svld1sh_gather_s32index_s32(svbool_t, int16_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32index_u32))) -svuint32_t svld1sh_gather_u32index_u32(svbool_t, int16_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32index_s32))) -svint32_t svld1sh_gather_u32index_s32(svbool_t, int16_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64index_u64))) -svuint64_t svld1sh_gather_s64index_u64(svbool_t, int16_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64index_s64))) -svint64_t svld1sh_gather_s64index_s64(svbool_t, int16_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64index_u64))) -svuint64_t svld1sh_gather_u64index_u64(svbool_t, int16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64index_s64))) -svint64_t svld1sh_gather_u64index_s64(svbool_t, int16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32offset_u32))) -svuint32_t svld1sh_gather_s32offset_u32(svbool_t, int16_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32offset_s32))) -svint32_t svld1sh_gather_s32offset_s32(svbool_t, int16_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32offset_u32))) -svuint32_t svld1sh_gather_u32offset_u32(svbool_t, int16_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32offset_s32))) -svint32_t svld1sh_gather_u32offset_s32(svbool_t, int16_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64offset_u64))) -svuint64_t svld1sh_gather_s64offset_u64(svbool_t, int16_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64offset_s64))) -svint64_t svld1sh_gather_s64offset_s64(svbool_t, int16_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64offset_u64))) -svuint64_t svld1sh_gather_u64offset_u64(svbool_t, int16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64offset_s64))) -svint64_t svld1sh_gather_u64offset_s64(svbool_t, int16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_vnum_u32))) -svuint32_t svld1sh_vnum_u32(svbool_t, int16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_vnum_u64))) -svuint64_t svld1sh_vnum_u64(svbool_t, int16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_vnum_s32))) -svint32_t svld1sh_vnum_s32(svbool_t, int16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_vnum_s64))) -svint64_t svld1sh_vnum_s64(svbool_t, int16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_u32))) -svuint32_t svld1sh_u32(svbool_t, int16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_u64))) -svuint64_t svld1sh_u64(svbool_t, int16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_s32))) -svint32_t svld1sh_s32(svbool_t, int16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_s64))) -svint64_t svld1sh_s64(svbool_t, int16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_index_u64))) -svuint64_t svld1sw_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_index_s64))) -svint64_t svld1sw_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_offset_u64))) -svuint64_t svld1sw_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_offset_s64))) -svint64_t svld1sw_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_u64))) -svuint64_t svld1sw_gather_u64base_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_s64))) -svint64_t svld1sw_gather_u64base_s64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64index_u64))) -svuint64_t svld1sw_gather_s64index_u64(svbool_t, int32_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64index_s64))) -svint64_t svld1sw_gather_s64index_s64(svbool_t, int32_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64index_u64))) -svuint64_t svld1sw_gather_u64index_u64(svbool_t, int32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64index_s64))) -svint64_t svld1sw_gather_u64index_s64(svbool_t, int32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64offset_u64))) -svuint64_t svld1sw_gather_s64offset_u64(svbool_t, int32_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64offset_s64))) -svint64_t svld1sw_gather_s64offset_s64(svbool_t, int32_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64offset_u64))) -svuint64_t svld1sw_gather_u64offset_u64(svbool_t, int32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64offset_s64))) -svint64_t svld1sw_gather_u64offset_s64(svbool_t, int32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_vnum_u64))) -svuint64_t svld1sw_vnum_u64(svbool_t, int32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_vnum_s64))) -svint64_t svld1sw_vnum_s64(svbool_t, int32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_u64))) -svuint64_t svld1sw_u64(svbool_t, int32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_s64))) -svint64_t svld1sw_s64(svbool_t, int32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_offset_u32))) -svuint32_t svld1ub_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_offset_u64))) -svuint64_t svld1ub_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_offset_s32))) -svint32_t svld1ub_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_offset_s64))) -svint64_t svld1ub_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_u32))) -svuint32_t svld1ub_gather_u32base_u32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_u64))) -svuint64_t svld1ub_gather_u64base_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_s32))) -svint32_t svld1ub_gather_u32base_s32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_s64))) -svint64_t svld1ub_gather_u64base_s64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s32offset_u32))) -svuint32_t svld1ub_gather_s32offset_u32(svbool_t, uint8_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s32offset_s32))) -svint32_t svld1ub_gather_s32offset_s32(svbool_t, uint8_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32offset_u32))) -svuint32_t svld1ub_gather_u32offset_u32(svbool_t, uint8_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32offset_s32))) -svint32_t svld1ub_gather_u32offset_s32(svbool_t, uint8_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s64offset_u64))) -svuint64_t svld1ub_gather_s64offset_u64(svbool_t, uint8_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s64offset_s64))) -svint64_t svld1ub_gather_s64offset_s64(svbool_t, uint8_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64offset_u64))) -svuint64_t svld1ub_gather_u64offset_u64(svbool_t, uint8_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64offset_s64))) -svint64_t svld1ub_gather_u64offset_s64(svbool_t, uint8_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_vnum_u32))) -svuint32_t svld1ub_vnum_u32(svbool_t, uint8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_vnum_u64))) -svuint64_t svld1ub_vnum_u64(svbool_t, uint8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_vnum_u16))) -svuint16_t svld1ub_vnum_u16(svbool_t, uint8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_vnum_s32))) -svint32_t svld1ub_vnum_s32(svbool_t, uint8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_vnum_s64))) -svint64_t svld1ub_vnum_s64(svbool_t, uint8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_vnum_s16))) -svint16_t svld1ub_vnum_s16(svbool_t, uint8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_u32))) -svuint32_t svld1ub_u32(svbool_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_u64))) -svuint64_t svld1ub_u64(svbool_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_u16))) -svuint16_t svld1ub_u16(svbool_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_s32))) -svint32_t svld1ub_s32(svbool_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_s64))) -svint64_t svld1ub_s64(svbool_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_s16))) -svint16_t svld1ub_s16(svbool_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_index_u32))) -svuint32_t svld1uh_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_index_u64))) -svuint64_t svld1uh_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_index_s32))) -svint32_t svld1uh_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_index_s64))) -svint64_t svld1uh_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_offset_u32))) -svuint32_t svld1uh_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_offset_u64))) -svuint64_t svld1uh_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_offset_s32))) -svint32_t svld1uh_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_offset_s64))) -svint64_t svld1uh_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_u32))) -svuint32_t svld1uh_gather_u32base_u32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_u64))) -svuint64_t svld1uh_gather_u64base_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_s32))) -svint32_t svld1uh_gather_u32base_s32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_s64))) -svint64_t svld1uh_gather_u64base_s64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32index_u32))) -svuint32_t svld1uh_gather_s32index_u32(svbool_t, uint16_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32index_s32))) -svint32_t svld1uh_gather_s32index_s32(svbool_t, uint16_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32index_u32))) -svuint32_t svld1uh_gather_u32index_u32(svbool_t, uint16_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32index_s32))) -svint32_t svld1uh_gather_u32index_s32(svbool_t, uint16_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64index_u64))) -svuint64_t svld1uh_gather_s64index_u64(svbool_t, uint16_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64index_s64))) -svint64_t svld1uh_gather_s64index_s64(svbool_t, uint16_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64index_u64))) -svuint64_t svld1uh_gather_u64index_u64(svbool_t, uint16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64index_s64))) -svint64_t svld1uh_gather_u64index_s64(svbool_t, uint16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32offset_u32))) -svuint32_t svld1uh_gather_s32offset_u32(svbool_t, uint16_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32offset_s32))) -svint32_t svld1uh_gather_s32offset_s32(svbool_t, uint16_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32offset_u32))) -svuint32_t svld1uh_gather_u32offset_u32(svbool_t, uint16_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32offset_s32))) -svint32_t svld1uh_gather_u32offset_s32(svbool_t, uint16_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64offset_u64))) -svuint64_t svld1uh_gather_s64offset_u64(svbool_t, uint16_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64offset_s64))) -svint64_t svld1uh_gather_s64offset_s64(svbool_t, uint16_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64offset_u64))) -svuint64_t svld1uh_gather_u64offset_u64(svbool_t, uint16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64offset_s64))) -svint64_t svld1uh_gather_u64offset_s64(svbool_t, uint16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_vnum_u32))) -svuint32_t svld1uh_vnum_u32(svbool_t, uint16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_vnum_u64))) -svuint64_t svld1uh_vnum_u64(svbool_t, uint16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_vnum_s32))) -svint32_t svld1uh_vnum_s32(svbool_t, uint16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_vnum_s64))) -svint64_t svld1uh_vnum_s64(svbool_t, uint16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_u32))) -svuint32_t svld1uh_u32(svbool_t, uint16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_u64))) -svuint64_t svld1uh_u64(svbool_t, uint16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_s32))) -svint32_t svld1uh_s32(svbool_t, uint16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_s64))) -svint64_t svld1uh_s64(svbool_t, uint16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_index_u64))) -svuint64_t svld1uw_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_index_s64))) -svint64_t svld1uw_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_offset_u64))) -svuint64_t svld1uw_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_offset_s64))) -svint64_t svld1uw_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_u64))) -svuint64_t svld1uw_gather_u64base_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_s64))) -svint64_t svld1uw_gather_u64base_s64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64index_u64))) -svuint64_t svld1uw_gather_s64index_u64(svbool_t, uint32_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64index_s64))) -svint64_t svld1uw_gather_s64index_s64(svbool_t, uint32_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64index_u64))) -svuint64_t svld1uw_gather_u64index_u64(svbool_t, uint32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64index_s64))) -svint64_t svld1uw_gather_u64index_s64(svbool_t, uint32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64offset_u64))) -svuint64_t svld1uw_gather_s64offset_u64(svbool_t, uint32_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64offset_s64))) -svint64_t svld1uw_gather_s64offset_s64(svbool_t, uint32_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64offset_u64))) -svuint64_t svld1uw_gather_u64offset_u64(svbool_t, uint32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64offset_s64))) -svint64_t svld1uw_gather_u64offset_s64(svbool_t, uint32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_vnum_u64))) -svuint64_t svld1uw_vnum_u64(svbool_t, uint32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_vnum_s64))) -svint64_t svld1uw_vnum_s64(svbool_t, uint32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_u64))) -svuint64_t svld1uw_u64(svbool_t, uint32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_s64))) -svint64_t svld1uw_s64(svbool_t, uint32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u8))) -svuint8x2_t svld2_u8(svbool_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u32))) -svuint32x2_t svld2_u32(svbool_t, uint32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u64))) -svuint64x2_t svld2_u64(svbool_t, uint64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u16))) -svuint16x2_t svld2_u16(svbool_t, uint16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s8))) -svint8x2_t svld2_s8(svbool_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_f64))) -svfloat64x2_t svld2_f64(svbool_t, float64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_f32))) -svfloat32x2_t svld2_f32(svbool_t, float32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_f16))) -svfloat16x2_t svld2_f16(svbool_t, float16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s32))) -svint32x2_t svld2_s32(svbool_t, int32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s64))) -svint64x2_t svld2_s64(svbool_t, int64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s16))) -svint16x2_t svld2_s16(svbool_t, int16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u8))) -svuint8x2_t svld2_vnum_u8(svbool_t, uint8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u32))) -svuint32x2_t svld2_vnum_u32(svbool_t, uint32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u64))) -svuint64x2_t svld2_vnum_u64(svbool_t, uint64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u16))) -svuint16x2_t svld2_vnum_u16(svbool_t, uint16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s8))) -svint8x2_t svld2_vnum_s8(svbool_t, int8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_f64))) -svfloat64x2_t svld2_vnum_f64(svbool_t, float64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_f32))) -svfloat32x2_t svld2_vnum_f32(svbool_t, float32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_f16))) -svfloat16x2_t svld2_vnum_f16(svbool_t, float16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s32))) -svint32x2_t svld2_vnum_s32(svbool_t, int32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s64))) -svint64x2_t svld2_vnum_s64(svbool_t, int64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s16))) -svint16x2_t svld2_vnum_s16(svbool_t, int16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u8))) -svuint8x3_t svld3_u8(svbool_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u32))) -svuint32x3_t svld3_u32(svbool_t, uint32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u64))) -svuint64x3_t svld3_u64(svbool_t, uint64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u16))) -svuint16x3_t svld3_u16(svbool_t, uint16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s8))) -svint8x3_t svld3_s8(svbool_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_f64))) -svfloat64x3_t svld3_f64(svbool_t, float64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_f32))) -svfloat32x3_t svld3_f32(svbool_t, float32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_f16))) -svfloat16x3_t svld3_f16(svbool_t, float16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s32))) -svint32x3_t svld3_s32(svbool_t, int32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s64))) -svint64x3_t svld3_s64(svbool_t, int64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s16))) -svint16x3_t svld3_s16(svbool_t, int16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u8))) -svuint8x3_t svld3_vnum_u8(svbool_t, uint8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u32))) -svuint32x3_t svld3_vnum_u32(svbool_t, uint32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u64))) -svuint64x3_t svld3_vnum_u64(svbool_t, uint64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u16))) -svuint16x3_t svld3_vnum_u16(svbool_t, uint16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s8))) -svint8x3_t svld3_vnum_s8(svbool_t, int8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_f64))) -svfloat64x3_t svld3_vnum_f64(svbool_t, float64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_f32))) -svfloat32x3_t svld3_vnum_f32(svbool_t, float32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_f16))) -svfloat16x3_t svld3_vnum_f16(svbool_t, float16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s32))) -svint32x3_t svld3_vnum_s32(svbool_t, int32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s64))) -svint64x3_t svld3_vnum_s64(svbool_t, int64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s16))) -svint16x3_t svld3_vnum_s16(svbool_t, int16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u8))) -svuint8x4_t svld4_u8(svbool_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u32))) -svuint32x4_t svld4_u32(svbool_t, uint32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u64))) -svuint64x4_t svld4_u64(svbool_t, uint64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u16))) -svuint16x4_t svld4_u16(svbool_t, uint16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s8))) -svint8x4_t svld4_s8(svbool_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_f64))) -svfloat64x4_t svld4_f64(svbool_t, float64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_f32))) -svfloat32x4_t svld4_f32(svbool_t, float32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_f16))) -svfloat16x4_t svld4_f16(svbool_t, float16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s32))) -svint32x4_t svld4_s32(svbool_t, int32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s64))) -svint64x4_t svld4_s64(svbool_t, int64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s16))) -svint16x4_t svld4_s16(svbool_t, int16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u8))) -svuint8x4_t svld4_vnum_u8(svbool_t, uint8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u32))) -svuint32x4_t svld4_vnum_u32(svbool_t, uint32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u64))) -svuint64x4_t svld4_vnum_u64(svbool_t, uint64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u16))) -svuint16x4_t svld4_vnum_u16(svbool_t, uint16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s8))) -svint8x4_t svld4_vnum_s8(svbool_t, int8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_f64))) -svfloat64x4_t svld4_vnum_f64(svbool_t, float64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_f32))) -svfloat32x4_t svld4_vnum_f32(svbool_t, float32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_f16))) -svfloat16x4_t svld4_vnum_f16(svbool_t, float16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s32))) -svint32x4_t svld4_vnum_s32(svbool_t, int32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s64))) -svint64x4_t svld4_vnum_s64(svbool_t, int64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s16))) -svint16x4_t svld4_vnum_s16(svbool_t, int16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u8))) -svuint8_t svldff1_u8(svbool_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u32))) -svuint32_t svldff1_u32(svbool_t, uint32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u64))) -svuint64_t svldff1_u64(svbool_t, uint64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u16))) -svuint16_t svldff1_u16(svbool_t, uint16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s8))) -svint8_t svldff1_s8(svbool_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f64))) -svfloat64_t svldff1_f64(svbool_t, float64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f32))) -svfloat32_t svldff1_f32(svbool_t, float32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f16))) -svfloat16_t svldff1_f16(svbool_t, float16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s32))) -svint32_t svldff1_s32(svbool_t, int32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s64))) -svint64_t svldff1_s64(svbool_t, int64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s16))) -svint16_t svldff1_s16(svbool_t, int16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_u32))) -svuint32_t svldff1_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_u64))) -svuint64_t svldff1_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_f64))) -svfloat64_t svldff1_gather_u64base_index_f64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_f32))) -svfloat32_t svldff1_gather_u32base_index_f32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_s32))) -svint32_t svldff1_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_s64))) -svint64_t svldff1_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_u32))) -svuint32_t svldff1_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_u64))) -svuint64_t svldff1_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_f64))) -svfloat64_t svldff1_gather_u64base_offset_f64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_f32))) -svfloat32_t svldff1_gather_u32base_offset_f32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_s32))) -svint32_t svldff1_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_s64))) -svint64_t svldff1_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_u32))) -svuint32_t svldff1_gather_u32base_u32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_u64))) -svuint64_t svldff1_gather_u64base_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_f64))) -svfloat64_t svldff1_gather_u64base_f64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_f32))) -svfloat32_t svldff1_gather_u32base_f32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_s32))) -svint32_t svldff1_gather_u32base_s32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_s64))) -svint64_t svldff1_gather_u64base_s64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_u32))) -svuint32_t svldff1_gather_s32index_u32(svbool_t, uint32_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_f32))) -svfloat32_t svldff1_gather_s32index_f32(svbool_t, float32_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_s32))) -svint32_t svldff1_gather_s32index_s32(svbool_t, int32_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_u32))) -svuint32_t svldff1_gather_u32index_u32(svbool_t, uint32_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_f32))) -svfloat32_t svldff1_gather_u32index_f32(svbool_t, float32_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_s32))) -svint32_t svldff1_gather_u32index_s32(svbool_t, int32_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_u64))) -svuint64_t svldff1_gather_s64index_u64(svbool_t, uint64_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_f64))) -svfloat64_t svldff1_gather_s64index_f64(svbool_t, float64_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_s64))) -svint64_t svldff1_gather_s64index_s64(svbool_t, int64_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_u64))) -svuint64_t svldff1_gather_u64index_u64(svbool_t, uint64_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_f64))) -svfloat64_t svldff1_gather_u64index_f64(svbool_t, float64_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_s64))) -svint64_t svldff1_gather_u64index_s64(svbool_t, int64_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_u32))) -svuint32_t svldff1_gather_s32offset_u32(svbool_t, uint32_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_f32))) -svfloat32_t svldff1_gather_s32offset_f32(svbool_t, float32_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_s32))) -svint32_t svldff1_gather_s32offset_s32(svbool_t, int32_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_u32))) -svuint32_t svldff1_gather_u32offset_u32(svbool_t, uint32_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_f32))) -svfloat32_t svldff1_gather_u32offset_f32(svbool_t, float32_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_s32))) -svint32_t svldff1_gather_u32offset_s32(svbool_t, int32_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_u64))) -svuint64_t svldff1_gather_s64offset_u64(svbool_t, uint64_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_f64))) -svfloat64_t svldff1_gather_s64offset_f64(svbool_t, float64_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_s64))) -svint64_t svldff1_gather_s64offset_s64(svbool_t, int64_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_u64))) -svuint64_t svldff1_gather_u64offset_u64(svbool_t, uint64_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_f64))) -svfloat64_t svldff1_gather_u64offset_f64(svbool_t, float64_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_s64))) -svint64_t svldff1_gather_u64offset_s64(svbool_t, int64_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u8))) -svuint8_t svldff1_vnum_u8(svbool_t, uint8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u32))) -svuint32_t svldff1_vnum_u32(svbool_t, uint32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u64))) -svuint64_t svldff1_vnum_u64(svbool_t, uint64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u16))) -svuint16_t svldff1_vnum_u16(svbool_t, uint16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s8))) -svint8_t svldff1_vnum_s8(svbool_t, int8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f64))) -svfloat64_t svldff1_vnum_f64(svbool_t, float64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f32))) -svfloat32_t svldff1_vnum_f32(svbool_t, float32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f16))) -svfloat16_t svldff1_vnum_f16(svbool_t, float16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s32))) -svint32_t svldff1_vnum_s32(svbool_t, int32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s64))) -svint64_t svldff1_vnum_s64(svbool_t, int64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s16))) -svint16_t svldff1_vnum_s16(svbool_t, int16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_offset_u32))) -svuint32_t svldff1sb_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_offset_u64))) -svuint64_t svldff1sb_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_offset_s32))) -svint32_t svldff1sb_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_offset_s64))) -svint64_t svldff1sb_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_u32))) -svuint32_t svldff1sb_gather_u32base_u32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_u64))) -svuint64_t svldff1sb_gather_u64base_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_s32))) -svint32_t svldff1sb_gather_u32base_s32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_s64))) -svint64_t svldff1sb_gather_u64base_s64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s32offset_u32))) -svuint32_t svldff1sb_gather_s32offset_u32(svbool_t, int8_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s32offset_s32))) -svint32_t svldff1sb_gather_s32offset_s32(svbool_t, int8_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32offset_u32))) -svuint32_t svldff1sb_gather_u32offset_u32(svbool_t, int8_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32offset_s32))) -svint32_t svldff1sb_gather_u32offset_s32(svbool_t, int8_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s64offset_u64))) -svuint64_t svldff1sb_gather_s64offset_u64(svbool_t, int8_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s64offset_s64))) -svint64_t svldff1sb_gather_s64offset_s64(svbool_t, int8_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64offset_u64))) -svuint64_t svldff1sb_gather_u64offset_u64(svbool_t, int8_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64offset_s64))) -svint64_t svldff1sb_gather_u64offset_s64(svbool_t, int8_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_vnum_u32))) -svuint32_t svldff1sb_vnum_u32(svbool_t, int8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_vnum_u64))) -svuint64_t svldff1sb_vnum_u64(svbool_t, int8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_vnum_u16))) -svuint16_t svldff1sb_vnum_u16(svbool_t, int8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_vnum_s32))) -svint32_t svldff1sb_vnum_s32(svbool_t, int8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_vnum_s64))) -svint64_t svldff1sb_vnum_s64(svbool_t, int8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_vnum_s16))) -svint16_t svldff1sb_vnum_s16(svbool_t, int8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_u32))) -svuint32_t svldff1sb_u32(svbool_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_u64))) -svuint64_t svldff1sb_u64(svbool_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_u16))) -svuint16_t svldff1sb_u16(svbool_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_s32))) -svint32_t svldff1sb_s32(svbool_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_s64))) -svint64_t svldff1sb_s64(svbool_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_s16))) -svint16_t svldff1sb_s16(svbool_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_index_u32))) -svuint32_t svldff1sh_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_index_u64))) -svuint64_t svldff1sh_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_index_s32))) -svint32_t svldff1sh_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_index_s64))) -svint64_t svldff1sh_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_offset_u32))) -svuint32_t svldff1sh_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_offset_u64))) -svuint64_t svldff1sh_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_offset_s32))) -svint32_t svldff1sh_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_offset_s64))) -svint64_t svldff1sh_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_u32))) -svuint32_t svldff1sh_gather_u32base_u32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_u64))) -svuint64_t svldff1sh_gather_u64base_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_s32))) -svint32_t svldff1sh_gather_u32base_s32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_s64))) -svint64_t svldff1sh_gather_u64base_s64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32index_u32))) -svuint32_t svldff1sh_gather_s32index_u32(svbool_t, int16_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32index_s32))) -svint32_t svldff1sh_gather_s32index_s32(svbool_t, int16_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32index_u32))) -svuint32_t svldff1sh_gather_u32index_u32(svbool_t, int16_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32index_s32))) -svint32_t svldff1sh_gather_u32index_s32(svbool_t, int16_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64index_u64))) -svuint64_t svldff1sh_gather_s64index_u64(svbool_t, int16_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64index_s64))) -svint64_t svldff1sh_gather_s64index_s64(svbool_t, int16_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64index_u64))) -svuint64_t svldff1sh_gather_u64index_u64(svbool_t, int16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64index_s64))) -svint64_t svldff1sh_gather_u64index_s64(svbool_t, int16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32offset_u32))) -svuint32_t svldff1sh_gather_s32offset_u32(svbool_t, int16_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32offset_s32))) -svint32_t svldff1sh_gather_s32offset_s32(svbool_t, int16_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32offset_u32))) -svuint32_t svldff1sh_gather_u32offset_u32(svbool_t, int16_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32offset_s32))) -svint32_t svldff1sh_gather_u32offset_s32(svbool_t, int16_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64offset_u64))) -svuint64_t svldff1sh_gather_s64offset_u64(svbool_t, int16_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64offset_s64))) -svint64_t svldff1sh_gather_s64offset_s64(svbool_t, int16_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64offset_u64))) -svuint64_t svldff1sh_gather_u64offset_u64(svbool_t, int16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64offset_s64))) -svint64_t svldff1sh_gather_u64offset_s64(svbool_t, int16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_vnum_u32))) -svuint32_t svldff1sh_vnum_u32(svbool_t, int16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_vnum_u64))) -svuint64_t svldff1sh_vnum_u64(svbool_t, int16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_vnum_s32))) -svint32_t svldff1sh_vnum_s32(svbool_t, int16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_vnum_s64))) -svint64_t svldff1sh_vnum_s64(svbool_t, int16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_u32))) -svuint32_t svldff1sh_u32(svbool_t, int16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_u64))) -svuint64_t svldff1sh_u64(svbool_t, int16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_s32))) -svint32_t svldff1sh_s32(svbool_t, int16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_s64))) -svint64_t svldff1sh_s64(svbool_t, int16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_index_u64))) -svuint64_t svldff1sw_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_index_s64))) -svint64_t svldff1sw_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_offset_u64))) -svuint64_t svldff1sw_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_offset_s64))) -svint64_t svldff1sw_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_u64))) -svuint64_t svldff1sw_gather_u64base_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_s64))) -svint64_t svldff1sw_gather_u64base_s64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64index_u64))) -svuint64_t svldff1sw_gather_s64index_u64(svbool_t, int32_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64index_s64))) -svint64_t svldff1sw_gather_s64index_s64(svbool_t, int32_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64index_u64))) -svuint64_t svldff1sw_gather_u64index_u64(svbool_t, int32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64index_s64))) -svint64_t svldff1sw_gather_u64index_s64(svbool_t, int32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64offset_u64))) -svuint64_t svldff1sw_gather_s64offset_u64(svbool_t, int32_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64offset_s64))) -svint64_t svldff1sw_gather_s64offset_s64(svbool_t, int32_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64offset_u64))) -svuint64_t svldff1sw_gather_u64offset_u64(svbool_t, int32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64offset_s64))) -svint64_t svldff1sw_gather_u64offset_s64(svbool_t, int32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_vnum_u64))) -svuint64_t svldff1sw_vnum_u64(svbool_t, int32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_vnum_s64))) -svint64_t svldff1sw_vnum_s64(svbool_t, int32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_u64))) -svuint64_t svldff1sw_u64(svbool_t, int32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_s64))) -svint64_t svldff1sw_s64(svbool_t, int32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_offset_u32))) -svuint32_t svldff1ub_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_offset_u64))) -svuint64_t svldff1ub_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_offset_s32))) -svint32_t svldff1ub_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_offset_s64))) -svint64_t svldff1ub_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_u32))) -svuint32_t svldff1ub_gather_u32base_u32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_u64))) -svuint64_t svldff1ub_gather_u64base_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_s32))) -svint32_t svldff1ub_gather_u32base_s32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_s64))) -svint64_t svldff1ub_gather_u64base_s64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s32offset_u32))) -svuint32_t svldff1ub_gather_s32offset_u32(svbool_t, uint8_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s32offset_s32))) -svint32_t svldff1ub_gather_s32offset_s32(svbool_t, uint8_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32offset_u32))) -svuint32_t svldff1ub_gather_u32offset_u32(svbool_t, uint8_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32offset_s32))) -svint32_t svldff1ub_gather_u32offset_s32(svbool_t, uint8_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s64offset_u64))) -svuint64_t svldff1ub_gather_s64offset_u64(svbool_t, uint8_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s64offset_s64))) -svint64_t svldff1ub_gather_s64offset_s64(svbool_t, uint8_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64offset_u64))) -svuint64_t svldff1ub_gather_u64offset_u64(svbool_t, uint8_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64offset_s64))) -svint64_t svldff1ub_gather_u64offset_s64(svbool_t, uint8_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_vnum_u32))) -svuint32_t svldff1ub_vnum_u32(svbool_t, uint8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_vnum_u64))) -svuint64_t svldff1ub_vnum_u64(svbool_t, uint8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_vnum_u16))) -svuint16_t svldff1ub_vnum_u16(svbool_t, uint8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_vnum_s32))) -svint32_t svldff1ub_vnum_s32(svbool_t, uint8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_vnum_s64))) -svint64_t svldff1ub_vnum_s64(svbool_t, uint8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_vnum_s16))) -svint16_t svldff1ub_vnum_s16(svbool_t, uint8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_u32))) -svuint32_t svldff1ub_u32(svbool_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_u64))) -svuint64_t svldff1ub_u64(svbool_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_u16))) -svuint16_t svldff1ub_u16(svbool_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_s32))) -svint32_t svldff1ub_s32(svbool_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_s64))) -svint64_t svldff1ub_s64(svbool_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_s16))) -svint16_t svldff1ub_s16(svbool_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_index_u32))) -svuint32_t svldff1uh_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_index_u64))) -svuint64_t svldff1uh_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_index_s32))) -svint32_t svldff1uh_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_index_s64))) -svint64_t svldff1uh_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_offset_u32))) -svuint32_t svldff1uh_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_offset_u64))) -svuint64_t svldff1uh_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_offset_s32))) -svint32_t svldff1uh_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_offset_s64))) -svint64_t svldff1uh_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_u32))) -svuint32_t svldff1uh_gather_u32base_u32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_u64))) -svuint64_t svldff1uh_gather_u64base_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_s32))) -svint32_t svldff1uh_gather_u32base_s32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_s64))) -svint64_t svldff1uh_gather_u64base_s64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32index_u32))) -svuint32_t svldff1uh_gather_s32index_u32(svbool_t, uint16_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32index_s32))) -svint32_t svldff1uh_gather_s32index_s32(svbool_t, uint16_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32index_u32))) -svuint32_t svldff1uh_gather_u32index_u32(svbool_t, uint16_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32index_s32))) -svint32_t svldff1uh_gather_u32index_s32(svbool_t, uint16_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64index_u64))) -svuint64_t svldff1uh_gather_s64index_u64(svbool_t, uint16_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64index_s64))) -svint64_t svldff1uh_gather_s64index_s64(svbool_t, uint16_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64index_u64))) -svuint64_t svldff1uh_gather_u64index_u64(svbool_t, uint16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64index_s64))) -svint64_t svldff1uh_gather_u64index_s64(svbool_t, uint16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32offset_u32))) -svuint32_t svldff1uh_gather_s32offset_u32(svbool_t, uint16_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32offset_s32))) -svint32_t svldff1uh_gather_s32offset_s32(svbool_t, uint16_t const *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32offset_u32))) -svuint32_t svldff1uh_gather_u32offset_u32(svbool_t, uint16_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32offset_s32))) -svint32_t svldff1uh_gather_u32offset_s32(svbool_t, uint16_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64offset_u64))) -svuint64_t svldff1uh_gather_s64offset_u64(svbool_t, uint16_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64offset_s64))) -svint64_t svldff1uh_gather_s64offset_s64(svbool_t, uint16_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64offset_u64))) -svuint64_t svldff1uh_gather_u64offset_u64(svbool_t, uint16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64offset_s64))) -svint64_t svldff1uh_gather_u64offset_s64(svbool_t, uint16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_vnum_u32))) -svuint32_t svldff1uh_vnum_u32(svbool_t, uint16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_vnum_u64))) -svuint64_t svldff1uh_vnum_u64(svbool_t, uint16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_vnum_s32))) -svint32_t svldff1uh_vnum_s32(svbool_t, uint16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_vnum_s64))) -svint64_t svldff1uh_vnum_s64(svbool_t, uint16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_u32))) -svuint32_t svldff1uh_u32(svbool_t, uint16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_u64))) -svuint64_t svldff1uh_u64(svbool_t, uint16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_s32))) -svint32_t svldff1uh_s32(svbool_t, uint16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_s64))) -svint64_t svldff1uh_s64(svbool_t, uint16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_index_u64))) -svuint64_t svldff1uw_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_index_s64))) -svint64_t svldff1uw_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_offset_u64))) -svuint64_t svldff1uw_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_offset_s64))) -svint64_t svldff1uw_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_u64))) -svuint64_t svldff1uw_gather_u64base_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_s64))) -svint64_t svldff1uw_gather_u64base_s64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64index_u64))) -svuint64_t svldff1uw_gather_s64index_u64(svbool_t, uint32_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64index_s64))) -svint64_t svldff1uw_gather_s64index_s64(svbool_t, uint32_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64index_u64))) -svuint64_t svldff1uw_gather_u64index_u64(svbool_t, uint32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64index_s64))) -svint64_t svldff1uw_gather_u64index_s64(svbool_t, uint32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64offset_u64))) -svuint64_t svldff1uw_gather_s64offset_u64(svbool_t, uint32_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64offset_s64))) -svint64_t svldff1uw_gather_s64offset_s64(svbool_t, uint32_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64offset_u64))) -svuint64_t svldff1uw_gather_u64offset_u64(svbool_t, uint32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64offset_s64))) -svint64_t svldff1uw_gather_u64offset_s64(svbool_t, uint32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_vnum_u64))) -svuint64_t svldff1uw_vnum_u64(svbool_t, uint32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_vnum_s64))) -svint64_t svldff1uw_vnum_s64(svbool_t, uint32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_u64))) -svuint64_t svldff1uw_u64(svbool_t, uint32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_s64))) -svint64_t svldff1uw_s64(svbool_t, uint32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u8))) -svuint8_t svldnf1_u8(svbool_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u32))) -svuint32_t svldnf1_u32(svbool_t, uint32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u64))) -svuint64_t svldnf1_u64(svbool_t, uint64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u16))) -svuint16_t svldnf1_u16(svbool_t, uint16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s8))) -svint8_t svldnf1_s8(svbool_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f64))) -svfloat64_t svldnf1_f64(svbool_t, float64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f32))) -svfloat32_t svldnf1_f32(svbool_t, float32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f16))) -svfloat16_t svldnf1_f16(svbool_t, float16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s32))) -svint32_t svldnf1_s32(svbool_t, int32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s64))) -svint64_t svldnf1_s64(svbool_t, int64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s16))) -svint16_t svldnf1_s16(svbool_t, int16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u8))) -svuint8_t svldnf1_vnum_u8(svbool_t, uint8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u32))) -svuint32_t svldnf1_vnum_u32(svbool_t, uint32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u64))) -svuint64_t svldnf1_vnum_u64(svbool_t, uint64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u16))) -svuint16_t svldnf1_vnum_u16(svbool_t, uint16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s8))) -svint8_t svldnf1_vnum_s8(svbool_t, int8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f64))) -svfloat64_t svldnf1_vnum_f64(svbool_t, float64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f32))) -svfloat32_t svldnf1_vnum_f32(svbool_t, float32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f16))) -svfloat16_t svldnf1_vnum_f16(svbool_t, float16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s32))) -svint32_t svldnf1_vnum_s32(svbool_t, int32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s64))) -svint64_t svldnf1_vnum_s64(svbool_t, int64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s16))) -svint16_t svldnf1_vnum_s16(svbool_t, int16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_vnum_u32))) -svuint32_t svldnf1sb_vnum_u32(svbool_t, int8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_vnum_u64))) -svuint64_t svldnf1sb_vnum_u64(svbool_t, int8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_vnum_u16))) -svuint16_t svldnf1sb_vnum_u16(svbool_t, int8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_vnum_s32))) -svint32_t svldnf1sb_vnum_s32(svbool_t, int8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_vnum_s64))) -svint64_t svldnf1sb_vnum_s64(svbool_t, int8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_vnum_s16))) -svint16_t svldnf1sb_vnum_s16(svbool_t, int8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_u32))) -svuint32_t svldnf1sb_u32(svbool_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_u64))) -svuint64_t svldnf1sb_u64(svbool_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_u16))) -svuint16_t svldnf1sb_u16(svbool_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_s32))) -svint32_t svldnf1sb_s32(svbool_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_s64))) -svint64_t svldnf1sb_s64(svbool_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_s16))) -svint16_t svldnf1sb_s16(svbool_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_vnum_u32))) -svuint32_t svldnf1sh_vnum_u32(svbool_t, int16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_vnum_u64))) -svuint64_t svldnf1sh_vnum_u64(svbool_t, int16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_vnum_s32))) -svint32_t svldnf1sh_vnum_s32(svbool_t, int16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_vnum_s64))) -svint64_t svldnf1sh_vnum_s64(svbool_t, int16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_u32))) -svuint32_t svldnf1sh_u32(svbool_t, int16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_u64))) -svuint64_t svldnf1sh_u64(svbool_t, int16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_s32))) -svint32_t svldnf1sh_s32(svbool_t, int16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_s64))) -svint64_t svldnf1sh_s64(svbool_t, int16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sw_vnum_u64))) -svuint64_t svldnf1sw_vnum_u64(svbool_t, int32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sw_vnum_s64))) -svint64_t svldnf1sw_vnum_s64(svbool_t, int32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sw_u64))) -svuint64_t svldnf1sw_u64(svbool_t, int32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sw_s64))) -svint64_t svldnf1sw_s64(svbool_t, int32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_vnum_u32))) -svuint32_t svldnf1ub_vnum_u32(svbool_t, uint8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_vnum_u64))) -svuint64_t svldnf1ub_vnum_u64(svbool_t, uint8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_vnum_u16))) -svuint16_t svldnf1ub_vnum_u16(svbool_t, uint8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_vnum_s32))) -svint32_t svldnf1ub_vnum_s32(svbool_t, uint8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_vnum_s64))) -svint64_t svldnf1ub_vnum_s64(svbool_t, uint8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_vnum_s16))) -svint16_t svldnf1ub_vnum_s16(svbool_t, uint8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_u32))) -svuint32_t svldnf1ub_u32(svbool_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_u64))) -svuint64_t svldnf1ub_u64(svbool_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_u16))) -svuint16_t svldnf1ub_u16(svbool_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_s32))) -svint32_t svldnf1ub_s32(svbool_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_s64))) -svint64_t svldnf1ub_s64(svbool_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_s16))) -svint16_t svldnf1ub_s16(svbool_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_vnum_u32))) -svuint32_t svldnf1uh_vnum_u32(svbool_t, uint16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_vnum_u64))) -svuint64_t svldnf1uh_vnum_u64(svbool_t, uint16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_vnum_s32))) -svint32_t svldnf1uh_vnum_s32(svbool_t, uint16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_vnum_s64))) -svint64_t svldnf1uh_vnum_s64(svbool_t, uint16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_u32))) -svuint32_t svldnf1uh_u32(svbool_t, uint16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_u64))) -svuint64_t svldnf1uh_u64(svbool_t, uint16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_s32))) -svint32_t svldnf1uh_s32(svbool_t, uint16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_s64))) -svint64_t svldnf1uh_s64(svbool_t, uint16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uw_vnum_u64))) -svuint64_t svldnf1uw_vnum_u64(svbool_t, uint32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uw_vnum_s64))) -svint64_t svldnf1uw_vnum_s64(svbool_t, uint32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uw_u64))) -svuint64_t svldnf1uw_u64(svbool_t, uint32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uw_s64))) -svint64_t svldnf1uw_s64(svbool_t, uint32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u8))) -svuint8_t svldnt1_u8(svbool_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u32))) -svuint32_t svldnt1_u32(svbool_t, uint32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u64))) -svuint64_t svldnt1_u64(svbool_t, uint64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u16))) -svuint16_t svldnt1_u16(svbool_t, uint16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s8))) -svint8_t svldnt1_s8(svbool_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f64))) -svfloat64_t svldnt1_f64(svbool_t, float64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f32))) -svfloat32_t svldnt1_f32(svbool_t, float32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f16))) -svfloat16_t svldnt1_f16(svbool_t, float16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s32))) -svint32_t svldnt1_s32(svbool_t, int32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s64))) -svint64_t svldnt1_s64(svbool_t, int64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s16))) -svint16_t svldnt1_s16(svbool_t, int16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u8))) -svuint8_t svldnt1_vnum_u8(svbool_t, uint8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u32))) -svuint32_t svldnt1_vnum_u32(svbool_t, uint32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u64))) -svuint64_t svldnt1_vnum_u64(svbool_t, uint64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u16))) -svuint16_t svldnt1_vnum_u16(svbool_t, uint16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s8))) -svint8_t svldnt1_vnum_s8(svbool_t, int8_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f64))) -svfloat64_t svldnt1_vnum_f64(svbool_t, float64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f32))) -svfloat32_t svldnt1_vnum_f32(svbool_t, float32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f16))) -svfloat16_t svldnt1_vnum_f16(svbool_t, float16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s32))) -svint32_t svldnt1_vnum_s32(svbool_t, int32_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s64))) -svint64_t svldnt1_vnum_s64(svbool_t, int64_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s16))) -svint16_t svldnt1_vnum_s16(svbool_t, int16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u8))) -uint64_t svlen_u8(svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u32))) -uint64_t svlen_u32(svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u64))) -uint64_t svlen_u64(svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u16))) -uint64_t svlen_u16(svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s8))) -uint64_t svlen_s8(svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_f64))) -uint64_t svlen_f64(svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_f32))) -uint64_t svlen_f32(svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_f16))) -uint64_t svlen_f16(svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s32))) -uint64_t svlen_s32(svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s64))) -uint64_t svlen_s64(svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s16))) -uint64_t svlen_s16(svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u8_m))) -svuint8_t svlsl_n_u8_m(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u32_m))) -svuint32_t svlsl_n_u32_m(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u64_m))) -svuint64_t svlsl_n_u64_m(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u16_m))) -svuint16_t svlsl_n_u16_m(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s8_m))) -svint8_t svlsl_n_s8_m(svbool_t, svint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s32_m))) -svint32_t svlsl_n_s32_m(svbool_t, svint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s64_m))) -svint64_t svlsl_n_s64_m(svbool_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s16_m))) -svint16_t svlsl_n_s16_m(svbool_t, svint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u8_x))) -svuint8_t svlsl_n_u8_x(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u32_x))) -svuint32_t svlsl_n_u32_x(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u64_x))) -svuint64_t svlsl_n_u64_x(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u16_x))) -svuint16_t svlsl_n_u16_x(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s8_x))) -svint8_t svlsl_n_s8_x(svbool_t, svint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s32_x))) -svint32_t svlsl_n_s32_x(svbool_t, svint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s64_x))) -svint64_t svlsl_n_s64_x(svbool_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s16_x))) -svint16_t svlsl_n_s16_x(svbool_t, svint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u8_z))) -svuint8_t svlsl_n_u8_z(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u32_z))) -svuint32_t svlsl_n_u32_z(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u64_z))) -svuint64_t svlsl_n_u64_z(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u16_z))) -svuint16_t svlsl_n_u16_z(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s8_z))) -svint8_t svlsl_n_s8_z(svbool_t, svint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s32_z))) -svint32_t svlsl_n_s32_z(svbool_t, svint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s64_z))) -svint64_t svlsl_n_s64_z(svbool_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s16_z))) -svint16_t svlsl_n_s16_z(svbool_t, svint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u8_m))) -svuint8_t svlsl_u8_m(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u32_m))) -svuint32_t svlsl_u32_m(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u64_m))) -svuint64_t svlsl_u64_m(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u16_m))) -svuint16_t svlsl_u16_m(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s8_m))) -svint8_t svlsl_s8_m(svbool_t, svint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s32_m))) -svint32_t svlsl_s32_m(svbool_t, svint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s64_m))) -svint64_t svlsl_s64_m(svbool_t, svint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s16_m))) -svint16_t svlsl_s16_m(svbool_t, svint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u8_x))) -svuint8_t svlsl_u8_x(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u32_x))) -svuint32_t svlsl_u32_x(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u64_x))) -svuint64_t svlsl_u64_x(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u16_x))) -svuint16_t svlsl_u16_x(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s8_x))) -svint8_t svlsl_s8_x(svbool_t, svint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s32_x))) -svint32_t svlsl_s32_x(svbool_t, svint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s64_x))) -svint64_t svlsl_s64_x(svbool_t, svint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s16_x))) -svint16_t svlsl_s16_x(svbool_t, svint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u8_z))) -svuint8_t svlsl_u8_z(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u32_z))) -svuint32_t svlsl_u32_z(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u64_z))) -svuint64_t svlsl_u64_z(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u16_z))) -svuint16_t svlsl_u16_z(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s8_z))) -svint8_t svlsl_s8_z(svbool_t, svint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s32_z))) -svint32_t svlsl_s32_z(svbool_t, svint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s64_z))) -svint64_t svlsl_s64_z(svbool_t, svint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s16_z))) -svint16_t svlsl_s16_z(svbool_t, svint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u8_m))) -svuint8_t svlsl_wide_n_u8_m(svbool_t, svuint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u32_m))) -svuint32_t svlsl_wide_n_u32_m(svbool_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u16_m))) -svuint16_t svlsl_wide_n_u16_m(svbool_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s8_m))) -svint8_t svlsl_wide_n_s8_m(svbool_t, svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s32_m))) -svint32_t svlsl_wide_n_s32_m(svbool_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s16_m))) -svint16_t svlsl_wide_n_s16_m(svbool_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u8_x))) -svuint8_t svlsl_wide_n_u8_x(svbool_t, svuint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u32_x))) -svuint32_t svlsl_wide_n_u32_x(svbool_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u16_x))) -svuint16_t svlsl_wide_n_u16_x(svbool_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s8_x))) -svint8_t svlsl_wide_n_s8_x(svbool_t, svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s32_x))) -svint32_t svlsl_wide_n_s32_x(svbool_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s16_x))) -svint16_t svlsl_wide_n_s16_x(svbool_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u8_z))) -svuint8_t svlsl_wide_n_u8_z(svbool_t, svuint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u32_z))) -svuint32_t svlsl_wide_n_u32_z(svbool_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u16_z))) -svuint16_t svlsl_wide_n_u16_z(svbool_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s8_z))) -svint8_t svlsl_wide_n_s8_z(svbool_t, svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s32_z))) -svint32_t svlsl_wide_n_s32_z(svbool_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s16_z))) -svint16_t svlsl_wide_n_s16_z(svbool_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u8_m))) -svuint8_t svlsl_wide_u8_m(svbool_t, svuint8_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u32_m))) -svuint32_t svlsl_wide_u32_m(svbool_t, svuint32_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u16_m))) -svuint16_t svlsl_wide_u16_m(svbool_t, svuint16_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s8_m))) -svint8_t svlsl_wide_s8_m(svbool_t, svint8_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s32_m))) -svint32_t svlsl_wide_s32_m(svbool_t, svint32_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s16_m))) -svint16_t svlsl_wide_s16_m(svbool_t, svint16_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u8_x))) -svuint8_t svlsl_wide_u8_x(svbool_t, svuint8_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u32_x))) -svuint32_t svlsl_wide_u32_x(svbool_t, svuint32_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u16_x))) -svuint16_t svlsl_wide_u16_x(svbool_t, svuint16_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s8_x))) -svint8_t svlsl_wide_s8_x(svbool_t, svint8_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s32_x))) -svint32_t svlsl_wide_s32_x(svbool_t, svint32_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s16_x))) -svint16_t svlsl_wide_s16_x(svbool_t, svint16_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u8_z))) -svuint8_t svlsl_wide_u8_z(svbool_t, svuint8_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u32_z))) -svuint32_t svlsl_wide_u32_z(svbool_t, svuint32_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u16_z))) -svuint16_t svlsl_wide_u16_z(svbool_t, svuint16_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s8_z))) -svint8_t svlsl_wide_s8_z(svbool_t, svint8_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s32_z))) -svint32_t svlsl_wide_s32_z(svbool_t, svint32_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s16_z))) -svint16_t svlsl_wide_s16_z(svbool_t, svint16_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u8_m))) -svuint8_t svlsr_n_u8_m(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u32_m))) -svuint32_t svlsr_n_u32_m(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u64_m))) -svuint64_t svlsr_n_u64_m(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u16_m))) -svuint16_t svlsr_n_u16_m(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u8_x))) -svuint8_t svlsr_n_u8_x(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u32_x))) -svuint32_t svlsr_n_u32_x(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u64_x))) -svuint64_t svlsr_n_u64_x(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u16_x))) -svuint16_t svlsr_n_u16_x(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u8_z))) -svuint8_t svlsr_n_u8_z(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u32_z))) -svuint32_t svlsr_n_u32_z(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u64_z))) -svuint64_t svlsr_n_u64_z(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u16_z))) -svuint16_t svlsr_n_u16_z(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u8_m))) -svuint8_t svlsr_u8_m(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u32_m))) -svuint32_t svlsr_u32_m(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u64_m))) -svuint64_t svlsr_u64_m(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u16_m))) -svuint16_t svlsr_u16_m(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u8_x))) -svuint8_t svlsr_u8_x(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u32_x))) -svuint32_t svlsr_u32_x(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u64_x))) -svuint64_t svlsr_u64_x(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u16_x))) -svuint16_t svlsr_u16_x(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u8_z))) -svuint8_t svlsr_u8_z(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u32_z))) -svuint32_t svlsr_u32_z(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u64_z))) -svuint64_t svlsr_u64_z(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u16_z))) -svuint16_t svlsr_u16_z(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u8_m))) -svuint8_t svlsr_wide_n_u8_m(svbool_t, svuint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u32_m))) -svuint32_t svlsr_wide_n_u32_m(svbool_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u16_m))) -svuint16_t svlsr_wide_n_u16_m(svbool_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u8_x))) -svuint8_t svlsr_wide_n_u8_x(svbool_t, svuint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u32_x))) -svuint32_t svlsr_wide_n_u32_x(svbool_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u16_x))) -svuint16_t svlsr_wide_n_u16_x(svbool_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u8_z))) -svuint8_t svlsr_wide_n_u8_z(svbool_t, svuint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u32_z))) -svuint32_t svlsr_wide_n_u32_z(svbool_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u16_z))) -svuint16_t svlsr_wide_n_u16_z(svbool_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u8_m))) -svuint8_t svlsr_wide_u8_m(svbool_t, svuint8_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u32_m))) -svuint32_t svlsr_wide_u32_m(svbool_t, svuint32_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u16_m))) -svuint16_t svlsr_wide_u16_m(svbool_t, svuint16_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u8_x))) -svuint8_t svlsr_wide_u8_x(svbool_t, svuint8_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u32_x))) -svuint32_t svlsr_wide_u32_x(svbool_t, svuint32_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u16_x))) -svuint16_t svlsr_wide_u16_x(svbool_t, svuint16_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u8_z))) -svuint8_t svlsr_wide_u8_z(svbool_t, svuint8_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u32_z))) -svuint32_t svlsr_wide_u32_z(svbool_t, svuint32_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u16_z))) -svuint16_t svlsr_wide_u16_z(svbool_t, svuint16_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f64_m))) -svfloat64_t svmad_n_f64_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f32_m))) -svfloat32_t svmad_n_f32_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f16_m))) -svfloat16_t svmad_n_f16_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f64_x))) -svfloat64_t svmad_n_f64_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f32_x))) -svfloat32_t svmad_n_f32_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f16_x))) -svfloat16_t svmad_n_f16_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f64_z))) -svfloat64_t svmad_n_f64_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f32_z))) -svfloat32_t svmad_n_f32_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f16_z))) -svfloat16_t svmad_n_f16_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u8_m))) -svuint8_t svmad_n_u8_m(svbool_t, svuint8_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u32_m))) -svuint32_t svmad_n_u32_m(svbool_t, svuint32_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u64_m))) -svuint64_t svmad_n_u64_m(svbool_t, svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u16_m))) -svuint16_t svmad_n_u16_m(svbool_t, svuint16_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s8_m))) -svint8_t svmad_n_s8_m(svbool_t, svint8_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s32_m))) -svint32_t svmad_n_s32_m(svbool_t, svint32_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s64_m))) -svint64_t svmad_n_s64_m(svbool_t, svint64_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s16_m))) -svint16_t svmad_n_s16_m(svbool_t, svint16_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u8_x))) -svuint8_t svmad_n_u8_x(svbool_t, svuint8_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u32_x))) -svuint32_t svmad_n_u32_x(svbool_t, svuint32_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u64_x))) -svuint64_t svmad_n_u64_x(svbool_t, svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u16_x))) -svuint16_t svmad_n_u16_x(svbool_t, svuint16_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s8_x))) -svint8_t svmad_n_s8_x(svbool_t, svint8_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s32_x))) -svint32_t svmad_n_s32_x(svbool_t, svint32_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s64_x))) -svint64_t svmad_n_s64_x(svbool_t, svint64_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s16_x))) -svint16_t svmad_n_s16_x(svbool_t, svint16_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u8_z))) -svuint8_t svmad_n_u8_z(svbool_t, svuint8_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u32_z))) -svuint32_t svmad_n_u32_z(svbool_t, svuint32_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u64_z))) -svuint64_t svmad_n_u64_z(svbool_t, svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u16_z))) -svuint16_t svmad_n_u16_z(svbool_t, svuint16_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s8_z))) -svint8_t svmad_n_s8_z(svbool_t, svint8_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s32_z))) -svint32_t svmad_n_s32_z(svbool_t, svint32_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s64_z))) -svint64_t svmad_n_s64_z(svbool_t, svint64_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s16_z))) -svint16_t svmad_n_s16_z(svbool_t, svint16_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f64_m))) -svfloat64_t svmad_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f32_m))) -svfloat32_t svmad_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f16_m))) -svfloat16_t svmad_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f64_x))) -svfloat64_t svmad_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f32_x))) -svfloat32_t svmad_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f16_x))) -svfloat16_t svmad_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f64_z))) -svfloat64_t svmad_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f32_z))) -svfloat32_t svmad_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f16_z))) -svfloat16_t svmad_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u8_m))) -svuint8_t svmad_u8_m(svbool_t, svuint8_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u32_m))) -svuint32_t svmad_u32_m(svbool_t, svuint32_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u64_m))) -svuint64_t svmad_u64_m(svbool_t, svuint64_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u16_m))) -svuint16_t svmad_u16_m(svbool_t, svuint16_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s8_m))) -svint8_t svmad_s8_m(svbool_t, svint8_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s32_m))) -svint32_t svmad_s32_m(svbool_t, svint32_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s64_m))) -svint64_t svmad_s64_m(svbool_t, svint64_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s16_m))) -svint16_t svmad_s16_m(svbool_t, svint16_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u8_x))) -svuint8_t svmad_u8_x(svbool_t, svuint8_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u32_x))) -svuint32_t svmad_u32_x(svbool_t, svuint32_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u64_x))) -svuint64_t svmad_u64_x(svbool_t, svuint64_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u16_x))) -svuint16_t svmad_u16_x(svbool_t, svuint16_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s8_x))) -svint8_t svmad_s8_x(svbool_t, svint8_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s32_x))) -svint32_t svmad_s32_x(svbool_t, svint32_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s64_x))) -svint64_t svmad_s64_x(svbool_t, svint64_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s16_x))) -svint16_t svmad_s16_x(svbool_t, svint16_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u8_z))) -svuint8_t svmad_u8_z(svbool_t, svuint8_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u32_z))) -svuint32_t svmad_u32_z(svbool_t, svuint32_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u64_z))) -svuint64_t svmad_u64_z(svbool_t, svuint64_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u16_z))) -svuint16_t svmad_u16_z(svbool_t, svuint16_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s8_z))) -svint8_t svmad_s8_z(svbool_t, svint8_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s32_z))) -svint32_t svmad_s32_z(svbool_t, svint32_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s64_z))) -svint64_t svmad_s64_z(svbool_t, svint64_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s16_z))) -svint16_t svmad_s16_z(svbool_t, svint16_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f64_m))) -svfloat64_t svmax_n_f64_m(svbool_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f32_m))) -svfloat32_t svmax_n_f32_m(svbool_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f16_m))) -svfloat16_t svmax_n_f16_m(svbool_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f64_x))) -svfloat64_t svmax_n_f64_x(svbool_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f32_x))) -svfloat32_t svmax_n_f32_x(svbool_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f16_x))) -svfloat16_t svmax_n_f16_x(svbool_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f64_z))) -svfloat64_t svmax_n_f64_z(svbool_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f32_z))) -svfloat32_t svmax_n_f32_z(svbool_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f16_z))) -svfloat16_t svmax_n_f16_z(svbool_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s8_m))) -svint8_t svmax_n_s8_m(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s32_m))) -svint32_t svmax_n_s32_m(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s64_m))) -svint64_t svmax_n_s64_m(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s16_m))) -svint16_t svmax_n_s16_m(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s8_x))) -svint8_t svmax_n_s8_x(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s32_x))) -svint32_t svmax_n_s32_x(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s64_x))) -svint64_t svmax_n_s64_x(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s16_x))) -svint16_t svmax_n_s16_x(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s8_z))) -svint8_t svmax_n_s8_z(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s32_z))) -svint32_t svmax_n_s32_z(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s64_z))) -svint64_t svmax_n_s64_z(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s16_z))) -svint16_t svmax_n_s16_z(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u8_m))) -svuint8_t svmax_n_u8_m(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u32_m))) -svuint32_t svmax_n_u32_m(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u64_m))) -svuint64_t svmax_n_u64_m(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u16_m))) -svuint16_t svmax_n_u16_m(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u8_x))) -svuint8_t svmax_n_u8_x(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u32_x))) -svuint32_t svmax_n_u32_x(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u64_x))) -svuint64_t svmax_n_u64_x(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u16_x))) -svuint16_t svmax_n_u16_x(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u8_z))) -svuint8_t svmax_n_u8_z(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u32_z))) -svuint32_t svmax_n_u32_z(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u64_z))) -svuint64_t svmax_n_u64_z(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u16_z))) -svuint16_t svmax_n_u16_z(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_m))) -svfloat64_t svmax_f64_m(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_m))) -svfloat32_t svmax_f32_m(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_m))) -svfloat16_t svmax_f16_m(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_x))) -svfloat64_t svmax_f64_x(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_x))) -svfloat32_t svmax_f32_x(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_x))) -svfloat16_t svmax_f16_x(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_z))) -svfloat64_t svmax_f64_z(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_z))) -svfloat32_t svmax_f32_z(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_z))) -svfloat16_t svmax_f16_z(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_m))) -svint8_t svmax_s8_m(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_m))) -svint32_t svmax_s32_m(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_m))) -svint64_t svmax_s64_m(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_m))) -svint16_t svmax_s16_m(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_x))) -svint8_t svmax_s8_x(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_x))) -svint32_t svmax_s32_x(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_x))) -svint64_t svmax_s64_x(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_x))) -svint16_t svmax_s16_x(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_z))) -svint8_t svmax_s8_z(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_z))) -svint32_t svmax_s32_z(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_z))) -svint64_t svmax_s64_z(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_z))) -svint16_t svmax_s16_z(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_m))) -svuint8_t svmax_u8_m(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_m))) -svuint32_t svmax_u32_m(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_m))) -svuint64_t svmax_u64_m(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_m))) -svuint16_t svmax_u16_m(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_x))) -svuint8_t svmax_u8_x(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_x))) -svuint32_t svmax_u32_x(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_x))) -svuint64_t svmax_u64_x(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_x))) -svuint16_t svmax_u16_x(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_z))) -svuint8_t svmax_u8_z(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_z))) -svuint32_t svmax_u32_z(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_z))) -svuint64_t svmax_u64_z(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_z))) -svuint16_t svmax_u16_z(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f64_m))) -svfloat64_t svmaxnm_n_f64_m(svbool_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f32_m))) -svfloat32_t svmaxnm_n_f32_m(svbool_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f16_m))) -svfloat16_t svmaxnm_n_f16_m(svbool_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f64_x))) -svfloat64_t svmaxnm_n_f64_x(svbool_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f32_x))) -svfloat32_t svmaxnm_n_f32_x(svbool_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f16_x))) -svfloat16_t svmaxnm_n_f16_x(svbool_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f64_z))) -svfloat64_t svmaxnm_n_f64_z(svbool_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f32_z))) -svfloat32_t svmaxnm_n_f32_z(svbool_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f16_z))) -svfloat16_t svmaxnm_n_f16_z(svbool_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_m))) -svfloat64_t svmaxnm_f64_m(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_m))) -svfloat32_t svmaxnm_f32_m(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_m))) -svfloat16_t svmaxnm_f16_m(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_x))) -svfloat64_t svmaxnm_f64_x(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_x))) -svfloat32_t svmaxnm_f32_x(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_x))) -svfloat16_t svmaxnm_f16_x(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_z))) -svfloat64_t svmaxnm_f64_z(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_z))) -svfloat32_t svmaxnm_f32_z(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_z))) -svfloat16_t svmaxnm_f16_z(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmv_f64))) -float64_t svmaxnmv_f64(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmv_f32))) -float32_t svmaxnmv_f32(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmv_f16))) -float16_t svmaxnmv_f16(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_f64))) -float64_t svmaxv_f64(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_f32))) -float32_t svmaxv_f32(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_f16))) -float16_t svmaxv_f16(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s8))) -int8_t svmaxv_s8(svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s32))) -int32_t svmaxv_s32(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s64))) -int64_t svmaxv_s64(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s16))) -int16_t svmaxv_s16(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u8))) -uint8_t svmaxv_u8(svbool_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u32))) -uint32_t svmaxv_u32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u64))) -uint64_t svmaxv_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u16))) -uint16_t svmaxv_u16(svbool_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f64_m))) -svfloat64_t svmin_n_f64_m(svbool_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f32_m))) -svfloat32_t svmin_n_f32_m(svbool_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f16_m))) -svfloat16_t svmin_n_f16_m(svbool_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f64_x))) -svfloat64_t svmin_n_f64_x(svbool_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f32_x))) -svfloat32_t svmin_n_f32_x(svbool_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f16_x))) -svfloat16_t svmin_n_f16_x(svbool_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f64_z))) -svfloat64_t svmin_n_f64_z(svbool_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f32_z))) -svfloat32_t svmin_n_f32_z(svbool_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f16_z))) -svfloat16_t svmin_n_f16_z(svbool_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s8_m))) -svint8_t svmin_n_s8_m(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s32_m))) -svint32_t svmin_n_s32_m(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s64_m))) -svint64_t svmin_n_s64_m(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s16_m))) -svint16_t svmin_n_s16_m(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s8_x))) -svint8_t svmin_n_s8_x(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s32_x))) -svint32_t svmin_n_s32_x(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s64_x))) -svint64_t svmin_n_s64_x(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s16_x))) -svint16_t svmin_n_s16_x(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s8_z))) -svint8_t svmin_n_s8_z(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s32_z))) -svint32_t svmin_n_s32_z(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s64_z))) -svint64_t svmin_n_s64_z(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s16_z))) -svint16_t svmin_n_s16_z(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u8_m))) -svuint8_t svmin_n_u8_m(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u32_m))) -svuint32_t svmin_n_u32_m(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u64_m))) -svuint64_t svmin_n_u64_m(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u16_m))) -svuint16_t svmin_n_u16_m(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u8_x))) -svuint8_t svmin_n_u8_x(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u32_x))) -svuint32_t svmin_n_u32_x(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u64_x))) -svuint64_t svmin_n_u64_x(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u16_x))) -svuint16_t svmin_n_u16_x(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u8_z))) -svuint8_t svmin_n_u8_z(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u32_z))) -svuint32_t svmin_n_u32_z(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u64_z))) -svuint64_t svmin_n_u64_z(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u16_z))) -svuint16_t svmin_n_u16_z(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_m))) -svfloat64_t svmin_f64_m(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_m))) -svfloat32_t svmin_f32_m(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_m))) -svfloat16_t svmin_f16_m(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_x))) -svfloat64_t svmin_f64_x(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_x))) -svfloat32_t svmin_f32_x(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_x))) -svfloat16_t svmin_f16_x(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_z))) -svfloat64_t svmin_f64_z(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_z))) -svfloat32_t svmin_f32_z(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_z))) -svfloat16_t svmin_f16_z(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_m))) -svint8_t svmin_s8_m(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_m))) -svint32_t svmin_s32_m(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_m))) -svint64_t svmin_s64_m(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_m))) -svint16_t svmin_s16_m(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_x))) -svint8_t svmin_s8_x(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_x))) -svint32_t svmin_s32_x(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_x))) -svint64_t svmin_s64_x(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_x))) -svint16_t svmin_s16_x(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_z))) -svint8_t svmin_s8_z(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_z))) -svint32_t svmin_s32_z(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_z))) -svint64_t svmin_s64_z(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_z))) -svint16_t svmin_s16_z(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_m))) -svuint8_t svmin_u8_m(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_m))) -svuint32_t svmin_u32_m(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_m))) -svuint64_t svmin_u64_m(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_m))) -svuint16_t svmin_u16_m(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_x))) -svuint8_t svmin_u8_x(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_x))) -svuint32_t svmin_u32_x(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_x))) -svuint64_t svmin_u64_x(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_x))) -svuint16_t svmin_u16_x(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_z))) -svuint8_t svmin_u8_z(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_z))) -svuint32_t svmin_u32_z(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_z))) -svuint64_t svmin_u64_z(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_z))) -svuint16_t svmin_u16_z(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f64_m))) -svfloat64_t svminnm_n_f64_m(svbool_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f32_m))) -svfloat32_t svminnm_n_f32_m(svbool_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f16_m))) -svfloat16_t svminnm_n_f16_m(svbool_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f64_x))) -svfloat64_t svminnm_n_f64_x(svbool_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f32_x))) -svfloat32_t svminnm_n_f32_x(svbool_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f16_x))) -svfloat16_t svminnm_n_f16_x(svbool_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f64_z))) -svfloat64_t svminnm_n_f64_z(svbool_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f32_z))) -svfloat32_t svminnm_n_f32_z(svbool_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f16_z))) -svfloat16_t svminnm_n_f16_z(svbool_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_m))) -svfloat64_t svminnm_f64_m(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_m))) -svfloat32_t svminnm_f32_m(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_m))) -svfloat16_t svminnm_f16_m(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_x))) -svfloat64_t svminnm_f64_x(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_x))) -svfloat32_t svminnm_f32_x(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_x))) -svfloat16_t svminnm_f16_x(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_z))) -svfloat64_t svminnm_f64_z(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_z))) -svfloat32_t svminnm_f32_z(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_z))) -svfloat16_t svminnm_f16_z(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmv_f64))) -float64_t svminnmv_f64(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmv_f32))) -float32_t svminnmv_f32(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmv_f16))) -float16_t svminnmv_f16(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_f64))) -float64_t svminv_f64(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_f32))) -float32_t svminv_f32(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_f16))) -float16_t svminv_f16(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s8))) -int8_t svminv_s8(svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s32))) -int32_t svminv_s32(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s64))) -int64_t svminv_s64(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s16))) -int16_t svminv_s16(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u8))) -uint8_t svminv_u8(svbool_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u32))) -uint32_t svminv_u32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u64))) -uint64_t svminv_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u16))) -uint16_t svminv_u16(svbool_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f64_m))) -svfloat64_t svmla_n_f64_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f32_m))) -svfloat32_t svmla_n_f32_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f16_m))) -svfloat16_t svmla_n_f16_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f64_x))) -svfloat64_t svmla_n_f64_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f32_x))) -svfloat32_t svmla_n_f32_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f16_x))) -svfloat16_t svmla_n_f16_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f64_z))) -svfloat64_t svmla_n_f64_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f32_z))) -svfloat32_t svmla_n_f32_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f16_z))) -svfloat16_t svmla_n_f16_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u8_m))) -svuint8_t svmla_n_u8_m(svbool_t, svuint8_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u32_m))) -svuint32_t svmla_n_u32_m(svbool_t, svuint32_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u64_m))) -svuint64_t svmla_n_u64_m(svbool_t, svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u16_m))) -svuint16_t svmla_n_u16_m(svbool_t, svuint16_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s8_m))) -svint8_t svmla_n_s8_m(svbool_t, svint8_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s32_m))) -svint32_t svmla_n_s32_m(svbool_t, svint32_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s64_m))) -svint64_t svmla_n_s64_m(svbool_t, svint64_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s16_m))) -svint16_t svmla_n_s16_m(svbool_t, svint16_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u8_x))) -svuint8_t svmla_n_u8_x(svbool_t, svuint8_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u32_x))) -svuint32_t svmla_n_u32_x(svbool_t, svuint32_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u64_x))) -svuint64_t svmla_n_u64_x(svbool_t, svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u16_x))) -svuint16_t svmla_n_u16_x(svbool_t, svuint16_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s8_x))) -svint8_t svmla_n_s8_x(svbool_t, svint8_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s32_x))) -svint32_t svmla_n_s32_x(svbool_t, svint32_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s64_x))) -svint64_t svmla_n_s64_x(svbool_t, svint64_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s16_x))) -svint16_t svmla_n_s16_x(svbool_t, svint16_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u8_z))) -svuint8_t svmla_n_u8_z(svbool_t, svuint8_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u32_z))) -svuint32_t svmla_n_u32_z(svbool_t, svuint32_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u64_z))) -svuint64_t svmla_n_u64_z(svbool_t, svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u16_z))) -svuint16_t svmla_n_u16_z(svbool_t, svuint16_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s8_z))) -svint8_t svmla_n_s8_z(svbool_t, svint8_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s32_z))) -svint32_t svmla_n_s32_z(svbool_t, svint32_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s64_z))) -svint64_t svmla_n_s64_z(svbool_t, svint64_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s16_z))) -svint16_t svmla_n_s16_z(svbool_t, svint16_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f64_m))) -svfloat64_t svmla_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f32_m))) -svfloat32_t svmla_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f16_m))) -svfloat16_t svmla_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f64_x))) -svfloat64_t svmla_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f32_x))) -svfloat32_t svmla_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f16_x))) -svfloat16_t svmla_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f64_z))) -svfloat64_t svmla_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f32_z))) -svfloat32_t svmla_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f16_z))) -svfloat16_t svmla_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u8_m))) -svuint8_t svmla_u8_m(svbool_t, svuint8_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u32_m))) -svuint32_t svmla_u32_m(svbool_t, svuint32_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u64_m))) -svuint64_t svmla_u64_m(svbool_t, svuint64_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u16_m))) -svuint16_t svmla_u16_m(svbool_t, svuint16_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s8_m))) -svint8_t svmla_s8_m(svbool_t, svint8_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s32_m))) -svint32_t svmla_s32_m(svbool_t, svint32_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s64_m))) -svint64_t svmla_s64_m(svbool_t, svint64_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s16_m))) -svint16_t svmla_s16_m(svbool_t, svint16_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u8_x))) -svuint8_t svmla_u8_x(svbool_t, svuint8_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u32_x))) -svuint32_t svmla_u32_x(svbool_t, svuint32_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u64_x))) -svuint64_t svmla_u64_x(svbool_t, svuint64_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u16_x))) -svuint16_t svmla_u16_x(svbool_t, svuint16_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s8_x))) -svint8_t svmla_s8_x(svbool_t, svint8_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s32_x))) -svint32_t svmla_s32_x(svbool_t, svint32_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s64_x))) -svint64_t svmla_s64_x(svbool_t, svint64_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s16_x))) -svint16_t svmla_s16_x(svbool_t, svint16_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u8_z))) -svuint8_t svmla_u8_z(svbool_t, svuint8_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u32_z))) -svuint32_t svmla_u32_z(svbool_t, svuint32_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u64_z))) -svuint64_t svmla_u64_z(svbool_t, svuint64_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u16_z))) -svuint16_t svmla_u16_z(svbool_t, svuint16_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s8_z))) -svint8_t svmla_s8_z(svbool_t, svint8_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s32_z))) -svint32_t svmla_s32_z(svbool_t, svint32_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s64_z))) -svint64_t svmla_s64_z(svbool_t, svint64_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s16_z))) -svint16_t svmla_s16_z(svbool_t, svint16_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_f64))) -svfloat64_t svmla_lane_f64(svfloat64_t, svfloat64_t, svfloat64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_f32))) -svfloat32_t svmla_lane_f32(svfloat32_t, svfloat32_t, svfloat32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_f16))) -svfloat16_t svmla_lane_f16(svfloat16_t, svfloat16_t, svfloat16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f64_m))) -svfloat64_t svmls_n_f64_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f32_m))) -svfloat32_t svmls_n_f32_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f16_m))) -svfloat16_t svmls_n_f16_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f64_x))) -svfloat64_t svmls_n_f64_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f32_x))) -svfloat32_t svmls_n_f32_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f16_x))) -svfloat16_t svmls_n_f16_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f64_z))) -svfloat64_t svmls_n_f64_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f32_z))) -svfloat32_t svmls_n_f32_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f16_z))) -svfloat16_t svmls_n_f16_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u8_m))) -svuint8_t svmls_n_u8_m(svbool_t, svuint8_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u32_m))) -svuint32_t svmls_n_u32_m(svbool_t, svuint32_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u64_m))) -svuint64_t svmls_n_u64_m(svbool_t, svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u16_m))) -svuint16_t svmls_n_u16_m(svbool_t, svuint16_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s8_m))) -svint8_t svmls_n_s8_m(svbool_t, svint8_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s32_m))) -svint32_t svmls_n_s32_m(svbool_t, svint32_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s64_m))) -svint64_t svmls_n_s64_m(svbool_t, svint64_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s16_m))) -svint16_t svmls_n_s16_m(svbool_t, svint16_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u8_x))) -svuint8_t svmls_n_u8_x(svbool_t, svuint8_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u32_x))) -svuint32_t svmls_n_u32_x(svbool_t, svuint32_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u64_x))) -svuint64_t svmls_n_u64_x(svbool_t, svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u16_x))) -svuint16_t svmls_n_u16_x(svbool_t, svuint16_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s8_x))) -svint8_t svmls_n_s8_x(svbool_t, svint8_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s32_x))) -svint32_t svmls_n_s32_x(svbool_t, svint32_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s64_x))) -svint64_t svmls_n_s64_x(svbool_t, svint64_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s16_x))) -svint16_t svmls_n_s16_x(svbool_t, svint16_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u8_z))) -svuint8_t svmls_n_u8_z(svbool_t, svuint8_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u32_z))) -svuint32_t svmls_n_u32_z(svbool_t, svuint32_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u64_z))) -svuint64_t svmls_n_u64_z(svbool_t, svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u16_z))) -svuint16_t svmls_n_u16_z(svbool_t, svuint16_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s8_z))) -svint8_t svmls_n_s8_z(svbool_t, svint8_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s32_z))) -svint32_t svmls_n_s32_z(svbool_t, svint32_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s64_z))) -svint64_t svmls_n_s64_z(svbool_t, svint64_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s16_z))) -svint16_t svmls_n_s16_z(svbool_t, svint16_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f64_m))) -svfloat64_t svmls_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f32_m))) -svfloat32_t svmls_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f16_m))) -svfloat16_t svmls_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f64_x))) -svfloat64_t svmls_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f32_x))) -svfloat32_t svmls_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f16_x))) -svfloat16_t svmls_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f64_z))) -svfloat64_t svmls_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f32_z))) -svfloat32_t svmls_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f16_z))) -svfloat16_t svmls_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u8_m))) -svuint8_t svmls_u8_m(svbool_t, svuint8_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u32_m))) -svuint32_t svmls_u32_m(svbool_t, svuint32_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u64_m))) -svuint64_t svmls_u64_m(svbool_t, svuint64_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u16_m))) -svuint16_t svmls_u16_m(svbool_t, svuint16_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s8_m))) -svint8_t svmls_s8_m(svbool_t, svint8_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s32_m))) -svint32_t svmls_s32_m(svbool_t, svint32_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s64_m))) -svint64_t svmls_s64_m(svbool_t, svint64_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s16_m))) -svint16_t svmls_s16_m(svbool_t, svint16_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u8_x))) -svuint8_t svmls_u8_x(svbool_t, svuint8_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u32_x))) -svuint32_t svmls_u32_x(svbool_t, svuint32_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u64_x))) -svuint64_t svmls_u64_x(svbool_t, svuint64_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u16_x))) -svuint16_t svmls_u16_x(svbool_t, svuint16_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s8_x))) -svint8_t svmls_s8_x(svbool_t, svint8_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s32_x))) -svint32_t svmls_s32_x(svbool_t, svint32_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s64_x))) -svint64_t svmls_s64_x(svbool_t, svint64_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s16_x))) -svint16_t svmls_s16_x(svbool_t, svint16_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u8_z))) -svuint8_t svmls_u8_z(svbool_t, svuint8_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u32_z))) -svuint32_t svmls_u32_z(svbool_t, svuint32_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u64_z))) -svuint64_t svmls_u64_z(svbool_t, svuint64_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u16_z))) -svuint16_t svmls_u16_z(svbool_t, svuint16_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s8_z))) -svint8_t svmls_s8_z(svbool_t, svint8_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s32_z))) -svint32_t svmls_s32_z(svbool_t, svint32_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s64_z))) -svint64_t svmls_s64_z(svbool_t, svint64_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s16_z))) -svint16_t svmls_s16_z(svbool_t, svint16_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_f64))) -svfloat64_t svmls_lane_f64(svfloat64_t, svfloat64_t, svfloat64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_f32))) -svfloat32_t svmls_lane_f32(svfloat32_t, svfloat32_t, svfloat32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_f16))) -svfloat16_t svmls_lane_f16(svfloat16_t, svfloat16_t, svfloat16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmov_b_z))) -svbool_t svmov_b_z(svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f64_m))) -svfloat64_t svmsb_n_f64_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f32_m))) -svfloat32_t svmsb_n_f32_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f16_m))) -svfloat16_t svmsb_n_f16_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f64_x))) -svfloat64_t svmsb_n_f64_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f32_x))) -svfloat32_t svmsb_n_f32_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f16_x))) -svfloat16_t svmsb_n_f16_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f64_z))) -svfloat64_t svmsb_n_f64_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f32_z))) -svfloat32_t svmsb_n_f32_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f16_z))) -svfloat16_t svmsb_n_f16_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u8_m))) -svuint8_t svmsb_n_u8_m(svbool_t, svuint8_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u32_m))) -svuint32_t svmsb_n_u32_m(svbool_t, svuint32_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u64_m))) -svuint64_t svmsb_n_u64_m(svbool_t, svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u16_m))) -svuint16_t svmsb_n_u16_m(svbool_t, svuint16_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s8_m))) -svint8_t svmsb_n_s8_m(svbool_t, svint8_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s32_m))) -svint32_t svmsb_n_s32_m(svbool_t, svint32_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s64_m))) -svint64_t svmsb_n_s64_m(svbool_t, svint64_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s16_m))) -svint16_t svmsb_n_s16_m(svbool_t, svint16_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u8_x))) -svuint8_t svmsb_n_u8_x(svbool_t, svuint8_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u32_x))) -svuint32_t svmsb_n_u32_x(svbool_t, svuint32_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u64_x))) -svuint64_t svmsb_n_u64_x(svbool_t, svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u16_x))) -svuint16_t svmsb_n_u16_x(svbool_t, svuint16_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s8_x))) -svint8_t svmsb_n_s8_x(svbool_t, svint8_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s32_x))) -svint32_t svmsb_n_s32_x(svbool_t, svint32_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s64_x))) -svint64_t svmsb_n_s64_x(svbool_t, svint64_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s16_x))) -svint16_t svmsb_n_s16_x(svbool_t, svint16_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u8_z))) -svuint8_t svmsb_n_u8_z(svbool_t, svuint8_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u32_z))) -svuint32_t svmsb_n_u32_z(svbool_t, svuint32_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u64_z))) -svuint64_t svmsb_n_u64_z(svbool_t, svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u16_z))) -svuint16_t svmsb_n_u16_z(svbool_t, svuint16_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s8_z))) -svint8_t svmsb_n_s8_z(svbool_t, svint8_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s32_z))) -svint32_t svmsb_n_s32_z(svbool_t, svint32_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s64_z))) -svint64_t svmsb_n_s64_z(svbool_t, svint64_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s16_z))) -svint16_t svmsb_n_s16_z(svbool_t, svint16_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f64_m))) -svfloat64_t svmsb_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f32_m))) -svfloat32_t svmsb_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f16_m))) -svfloat16_t svmsb_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f64_x))) -svfloat64_t svmsb_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f32_x))) -svfloat32_t svmsb_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f16_x))) -svfloat16_t svmsb_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f64_z))) -svfloat64_t svmsb_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f32_z))) -svfloat32_t svmsb_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f16_z))) -svfloat16_t svmsb_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u8_m))) -svuint8_t svmsb_u8_m(svbool_t, svuint8_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u32_m))) -svuint32_t svmsb_u32_m(svbool_t, svuint32_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u64_m))) -svuint64_t svmsb_u64_m(svbool_t, svuint64_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u16_m))) -svuint16_t svmsb_u16_m(svbool_t, svuint16_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s8_m))) -svint8_t svmsb_s8_m(svbool_t, svint8_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s32_m))) -svint32_t svmsb_s32_m(svbool_t, svint32_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s64_m))) -svint64_t svmsb_s64_m(svbool_t, svint64_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s16_m))) -svint16_t svmsb_s16_m(svbool_t, svint16_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u8_x))) -svuint8_t svmsb_u8_x(svbool_t, svuint8_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u32_x))) -svuint32_t svmsb_u32_x(svbool_t, svuint32_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u64_x))) -svuint64_t svmsb_u64_x(svbool_t, svuint64_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u16_x))) -svuint16_t svmsb_u16_x(svbool_t, svuint16_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s8_x))) -svint8_t svmsb_s8_x(svbool_t, svint8_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s32_x))) -svint32_t svmsb_s32_x(svbool_t, svint32_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s64_x))) -svint64_t svmsb_s64_x(svbool_t, svint64_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s16_x))) -svint16_t svmsb_s16_x(svbool_t, svint16_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u8_z))) -svuint8_t svmsb_u8_z(svbool_t, svuint8_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u32_z))) -svuint32_t svmsb_u32_z(svbool_t, svuint32_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u64_z))) -svuint64_t svmsb_u64_z(svbool_t, svuint64_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u16_z))) -svuint16_t svmsb_u16_z(svbool_t, svuint16_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s8_z))) -svint8_t svmsb_s8_z(svbool_t, svint8_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s32_z))) -svint32_t svmsb_s32_z(svbool_t, svint32_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s64_z))) -svint64_t svmsb_s64_z(svbool_t, svint64_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s16_z))) -svint16_t svmsb_s16_z(svbool_t, svint16_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f64_m))) -svfloat64_t svmul_n_f64_m(svbool_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f32_m))) -svfloat32_t svmul_n_f32_m(svbool_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f16_m))) -svfloat16_t svmul_n_f16_m(svbool_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f64_x))) -svfloat64_t svmul_n_f64_x(svbool_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f32_x))) -svfloat32_t svmul_n_f32_x(svbool_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f16_x))) -svfloat16_t svmul_n_f16_x(svbool_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f64_z))) -svfloat64_t svmul_n_f64_z(svbool_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f32_z))) -svfloat32_t svmul_n_f32_z(svbool_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f16_z))) -svfloat16_t svmul_n_f16_z(svbool_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u8_m))) -svuint8_t svmul_n_u8_m(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u32_m))) -svuint32_t svmul_n_u32_m(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u64_m))) -svuint64_t svmul_n_u64_m(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u16_m))) -svuint16_t svmul_n_u16_m(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s8_m))) -svint8_t svmul_n_s8_m(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s32_m))) -svint32_t svmul_n_s32_m(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s64_m))) -svint64_t svmul_n_s64_m(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s16_m))) -svint16_t svmul_n_s16_m(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u8_x))) -svuint8_t svmul_n_u8_x(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u32_x))) -svuint32_t svmul_n_u32_x(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u64_x))) -svuint64_t svmul_n_u64_x(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u16_x))) -svuint16_t svmul_n_u16_x(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s8_x))) -svint8_t svmul_n_s8_x(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s32_x))) -svint32_t svmul_n_s32_x(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s64_x))) -svint64_t svmul_n_s64_x(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s16_x))) -svint16_t svmul_n_s16_x(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u8_z))) -svuint8_t svmul_n_u8_z(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u32_z))) -svuint32_t svmul_n_u32_z(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u64_z))) -svuint64_t svmul_n_u64_z(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u16_z))) -svuint16_t svmul_n_u16_z(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s8_z))) -svint8_t svmul_n_s8_z(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s32_z))) -svint32_t svmul_n_s32_z(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s64_z))) -svint64_t svmul_n_s64_z(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s16_z))) -svint16_t svmul_n_s16_z(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f64_m))) -svfloat64_t svmul_f64_m(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f32_m))) -svfloat32_t svmul_f32_m(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f16_m))) -svfloat16_t svmul_f16_m(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f64_x))) -svfloat64_t svmul_f64_x(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f32_x))) -svfloat32_t svmul_f32_x(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f16_x))) -svfloat16_t svmul_f16_x(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f64_z))) -svfloat64_t svmul_f64_z(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f32_z))) -svfloat32_t svmul_f32_z(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f16_z))) -svfloat16_t svmul_f16_z(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u8_m))) -svuint8_t svmul_u8_m(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u32_m))) -svuint32_t svmul_u32_m(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u64_m))) -svuint64_t svmul_u64_m(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u16_m))) -svuint16_t svmul_u16_m(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s8_m))) -svint8_t svmul_s8_m(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s32_m))) -svint32_t svmul_s32_m(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s64_m))) -svint64_t svmul_s64_m(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s16_m))) -svint16_t svmul_s16_m(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u8_x))) -svuint8_t svmul_u8_x(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u32_x))) -svuint32_t svmul_u32_x(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u64_x))) -svuint64_t svmul_u64_x(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u16_x))) -svuint16_t svmul_u16_x(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s8_x))) -svint8_t svmul_s8_x(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s32_x))) -svint32_t svmul_s32_x(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s64_x))) -svint64_t svmul_s64_x(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s16_x))) -svint16_t svmul_s16_x(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u8_z))) -svuint8_t svmul_u8_z(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u32_z))) -svuint32_t svmul_u32_z(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u64_z))) -svuint64_t svmul_u64_z(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u16_z))) -svuint16_t svmul_u16_z(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s8_z))) -svint8_t svmul_s8_z(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s32_z))) -svint32_t svmul_s32_z(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s64_z))) -svint64_t svmul_s64_z(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s16_z))) -svint16_t svmul_s16_z(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_f64))) -svfloat64_t svmul_lane_f64(svfloat64_t, svfloat64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_f32))) -svfloat32_t svmul_lane_f32(svfloat32_t, svfloat32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_f16))) -svfloat16_t svmul_lane_f16(svfloat16_t, svfloat16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s8_m))) -svint8_t svmulh_n_s8_m(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s32_m))) -svint32_t svmulh_n_s32_m(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s64_m))) -svint64_t svmulh_n_s64_m(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s16_m))) -svint16_t svmulh_n_s16_m(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s8_x))) -svint8_t svmulh_n_s8_x(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s32_x))) -svint32_t svmulh_n_s32_x(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s64_x))) -svint64_t svmulh_n_s64_x(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s16_x))) -svint16_t svmulh_n_s16_x(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s8_z))) -svint8_t svmulh_n_s8_z(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s32_z))) -svint32_t svmulh_n_s32_z(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s64_z))) -svint64_t svmulh_n_s64_z(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s16_z))) -svint16_t svmulh_n_s16_z(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u8_m))) -svuint8_t svmulh_n_u8_m(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u32_m))) -svuint32_t svmulh_n_u32_m(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u64_m))) -svuint64_t svmulh_n_u64_m(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u16_m))) -svuint16_t svmulh_n_u16_m(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u8_x))) -svuint8_t svmulh_n_u8_x(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u32_x))) -svuint32_t svmulh_n_u32_x(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u64_x))) -svuint64_t svmulh_n_u64_x(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u16_x))) -svuint16_t svmulh_n_u16_x(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u8_z))) -svuint8_t svmulh_n_u8_z(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u32_z))) -svuint32_t svmulh_n_u32_z(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u64_z))) -svuint64_t svmulh_n_u64_z(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u16_z))) -svuint16_t svmulh_n_u16_z(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s8_m))) -svint8_t svmulh_s8_m(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s32_m))) -svint32_t svmulh_s32_m(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s64_m))) -svint64_t svmulh_s64_m(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s16_m))) -svint16_t svmulh_s16_m(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s8_x))) -svint8_t svmulh_s8_x(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s32_x))) -svint32_t svmulh_s32_x(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s64_x))) -svint64_t svmulh_s64_x(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s16_x))) -svint16_t svmulh_s16_x(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s8_z))) -svint8_t svmulh_s8_z(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s32_z))) -svint32_t svmulh_s32_z(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s64_z))) -svint64_t svmulh_s64_z(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s16_z))) -svint16_t svmulh_s16_z(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u8_m))) -svuint8_t svmulh_u8_m(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u32_m))) -svuint32_t svmulh_u32_m(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u64_m))) -svuint64_t svmulh_u64_m(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u16_m))) -svuint16_t svmulh_u16_m(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u8_x))) -svuint8_t svmulh_u8_x(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u32_x))) -svuint32_t svmulh_u32_x(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u64_x))) -svuint64_t svmulh_u64_x(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u16_x))) -svuint16_t svmulh_u16_x(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u8_z))) -svuint8_t svmulh_u8_z(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u32_z))) -svuint32_t svmulh_u32_z(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u64_z))) -svuint64_t svmulh_u64_z(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u16_z))) -svuint16_t svmulh_u16_z(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f64_m))) -svfloat64_t svmulx_n_f64_m(svbool_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f32_m))) -svfloat32_t svmulx_n_f32_m(svbool_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f16_m))) -svfloat16_t svmulx_n_f16_m(svbool_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f64_x))) -svfloat64_t svmulx_n_f64_x(svbool_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f32_x))) -svfloat32_t svmulx_n_f32_x(svbool_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f16_x))) -svfloat16_t svmulx_n_f16_x(svbool_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f64_z))) -svfloat64_t svmulx_n_f64_z(svbool_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f32_z))) -svfloat32_t svmulx_n_f32_z(svbool_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f16_z))) -svfloat16_t svmulx_n_f16_z(svbool_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f64_m))) -svfloat64_t svmulx_f64_m(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f32_m))) -svfloat32_t svmulx_f32_m(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f16_m))) -svfloat16_t svmulx_f16_m(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f64_x))) -svfloat64_t svmulx_f64_x(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f32_x))) -svfloat32_t svmulx_f32_x(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f16_x))) -svfloat16_t svmulx_f16_x(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f64_z))) -svfloat64_t svmulx_f64_z(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f32_z))) -svfloat32_t svmulx_f32_z(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f16_z))) -svfloat16_t svmulx_f16_z(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnand_b_z))) -svbool_t svnand_b_z(svbool_t, svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f64_m))) -svfloat64_t svneg_f64_m(svfloat64_t, svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f32_m))) -svfloat32_t svneg_f32_m(svfloat32_t, svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f16_m))) -svfloat16_t svneg_f16_m(svfloat16_t, svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f64_x))) -svfloat64_t svneg_f64_x(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f32_x))) -svfloat32_t svneg_f32_x(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f16_x))) -svfloat16_t svneg_f16_x(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f64_z))) -svfloat64_t svneg_f64_z(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f32_z))) -svfloat32_t svneg_f32_z(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f16_z))) -svfloat16_t svneg_f16_z(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s8_m))) -svint8_t svneg_s8_m(svint8_t, svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s32_m))) -svint32_t svneg_s32_m(svint32_t, svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s64_m))) -svint64_t svneg_s64_m(svint64_t, svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s16_m))) -svint16_t svneg_s16_m(svint16_t, svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s8_x))) -svint8_t svneg_s8_x(svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s32_x))) -svint32_t svneg_s32_x(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s64_x))) -svint64_t svneg_s64_x(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s16_x))) -svint16_t svneg_s16_x(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s8_z))) -svint8_t svneg_s8_z(svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s32_z))) -svint32_t svneg_s32_z(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s64_z))) -svint64_t svneg_s64_z(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s16_z))) -svint16_t svneg_s16_z(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f64_m))) -svfloat64_t svnmad_n_f64_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f32_m))) -svfloat32_t svnmad_n_f32_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f16_m))) -svfloat16_t svnmad_n_f16_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f64_x))) -svfloat64_t svnmad_n_f64_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f32_x))) -svfloat32_t svnmad_n_f32_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f16_x))) -svfloat16_t svnmad_n_f16_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f64_z))) -svfloat64_t svnmad_n_f64_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f32_z))) -svfloat32_t svnmad_n_f32_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f16_z))) -svfloat16_t svnmad_n_f16_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f64_m))) -svfloat64_t svnmad_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f32_m))) -svfloat32_t svnmad_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f16_m))) -svfloat16_t svnmad_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f64_x))) -svfloat64_t svnmad_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f32_x))) -svfloat32_t svnmad_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f16_x))) -svfloat16_t svnmad_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f64_z))) -svfloat64_t svnmad_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f32_z))) -svfloat32_t svnmad_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f16_z))) -svfloat16_t svnmad_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f64_m))) -svfloat64_t svnmla_n_f64_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f32_m))) -svfloat32_t svnmla_n_f32_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f16_m))) -svfloat16_t svnmla_n_f16_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f64_x))) -svfloat64_t svnmla_n_f64_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f32_x))) -svfloat32_t svnmla_n_f32_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f16_x))) -svfloat16_t svnmla_n_f16_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f64_z))) -svfloat64_t svnmla_n_f64_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f32_z))) -svfloat32_t svnmla_n_f32_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f16_z))) -svfloat16_t svnmla_n_f16_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f64_m))) -svfloat64_t svnmla_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f32_m))) -svfloat32_t svnmla_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f16_m))) -svfloat16_t svnmla_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f64_x))) -svfloat64_t svnmla_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f32_x))) -svfloat32_t svnmla_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f16_x))) -svfloat16_t svnmla_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f64_z))) -svfloat64_t svnmla_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f32_z))) -svfloat32_t svnmla_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f16_z))) -svfloat16_t svnmla_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f64_m))) -svfloat64_t svnmls_n_f64_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f32_m))) -svfloat32_t svnmls_n_f32_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f16_m))) -svfloat16_t svnmls_n_f16_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f64_x))) -svfloat64_t svnmls_n_f64_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f32_x))) -svfloat32_t svnmls_n_f32_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f16_x))) -svfloat16_t svnmls_n_f16_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f64_z))) -svfloat64_t svnmls_n_f64_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f32_z))) -svfloat32_t svnmls_n_f32_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f16_z))) -svfloat16_t svnmls_n_f16_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f64_m))) -svfloat64_t svnmls_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f32_m))) -svfloat32_t svnmls_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f16_m))) -svfloat16_t svnmls_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f64_x))) -svfloat64_t svnmls_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f32_x))) -svfloat32_t svnmls_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f16_x))) -svfloat16_t svnmls_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f64_z))) -svfloat64_t svnmls_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f32_z))) -svfloat32_t svnmls_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f16_z))) -svfloat16_t svnmls_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f64_m))) -svfloat64_t svnmsb_n_f64_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f32_m))) -svfloat32_t svnmsb_n_f32_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f16_m))) -svfloat16_t svnmsb_n_f16_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f64_x))) -svfloat64_t svnmsb_n_f64_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f32_x))) -svfloat32_t svnmsb_n_f32_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f16_x))) -svfloat16_t svnmsb_n_f16_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f64_z))) -svfloat64_t svnmsb_n_f64_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f32_z))) -svfloat32_t svnmsb_n_f32_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f16_z))) -svfloat16_t svnmsb_n_f16_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f64_m))) -svfloat64_t svnmsb_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f32_m))) -svfloat32_t svnmsb_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f16_m))) -svfloat16_t svnmsb_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f64_x))) -svfloat64_t svnmsb_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f32_x))) -svfloat32_t svnmsb_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f16_x))) -svfloat16_t svnmsb_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f64_z))) -svfloat64_t svnmsb_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f32_z))) -svfloat32_t svnmsb_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f16_z))) -svfloat16_t svnmsb_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnor_b_z))) -svbool_t svnor_b_z(svbool_t, svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_b_z))) -svbool_t svnot_b_z(svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u8_m))) -svuint8_t svnot_u8_m(svuint8_t, svbool_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u32_m))) -svuint32_t svnot_u32_m(svuint32_t, svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u64_m))) -svuint64_t svnot_u64_m(svuint64_t, svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u16_m))) -svuint16_t svnot_u16_m(svuint16_t, svbool_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s8_m))) -svint8_t svnot_s8_m(svint8_t, svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s32_m))) -svint32_t svnot_s32_m(svint32_t, svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s64_m))) -svint64_t svnot_s64_m(svint64_t, svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s16_m))) -svint16_t svnot_s16_m(svint16_t, svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u8_x))) -svuint8_t svnot_u8_x(svbool_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u32_x))) -svuint32_t svnot_u32_x(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u64_x))) -svuint64_t svnot_u64_x(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u16_x))) -svuint16_t svnot_u16_x(svbool_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s8_x))) -svint8_t svnot_s8_x(svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s32_x))) -svint32_t svnot_s32_x(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s64_x))) -svint64_t svnot_s64_x(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s16_x))) -svint16_t svnot_s16_x(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u8_z))) -svuint8_t svnot_u8_z(svbool_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u32_z))) -svuint32_t svnot_u32_z(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u64_z))) -svuint64_t svnot_u64_z(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u16_z))) -svuint16_t svnot_u16_z(svbool_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s8_z))) -svint8_t svnot_s8_z(svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s32_z))) -svint32_t svnot_s32_z(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s64_z))) -svint64_t svnot_s64_z(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s16_z))) -svint16_t svnot_s16_z(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorn_b_z))) -svbool_t svorn_b_z(svbool_t, svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_b_z))) -svbool_t svorr_b_z(svbool_t, svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u8_m))) -svuint8_t svorr_n_u8_m(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u32_m))) -svuint32_t svorr_n_u32_m(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u64_m))) -svuint64_t svorr_n_u64_m(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u16_m))) -svuint16_t svorr_n_u16_m(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s8_m))) -svint8_t svorr_n_s8_m(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s32_m))) -svint32_t svorr_n_s32_m(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s64_m))) -svint64_t svorr_n_s64_m(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s16_m))) -svint16_t svorr_n_s16_m(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u8_x))) -svuint8_t svorr_n_u8_x(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u32_x))) -svuint32_t svorr_n_u32_x(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u64_x))) -svuint64_t svorr_n_u64_x(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u16_x))) -svuint16_t svorr_n_u16_x(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s8_x))) -svint8_t svorr_n_s8_x(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s32_x))) -svint32_t svorr_n_s32_x(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s64_x))) -svint64_t svorr_n_s64_x(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s16_x))) -svint16_t svorr_n_s16_x(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u8_z))) -svuint8_t svorr_n_u8_z(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u32_z))) -svuint32_t svorr_n_u32_z(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u64_z))) -svuint64_t svorr_n_u64_z(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u16_z))) -svuint16_t svorr_n_u16_z(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s8_z))) -svint8_t svorr_n_s8_z(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s32_z))) -svint32_t svorr_n_s32_z(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s64_z))) -svint64_t svorr_n_s64_z(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s16_z))) -svint16_t svorr_n_s16_z(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u8_m))) -svuint8_t svorr_u8_m(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u32_m))) -svuint32_t svorr_u32_m(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u64_m))) -svuint64_t svorr_u64_m(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u16_m))) -svuint16_t svorr_u16_m(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s8_m))) -svint8_t svorr_s8_m(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s32_m))) -svint32_t svorr_s32_m(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s64_m))) -svint64_t svorr_s64_m(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s16_m))) -svint16_t svorr_s16_m(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u8_x))) -svuint8_t svorr_u8_x(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u32_x))) -svuint32_t svorr_u32_x(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u64_x))) -svuint64_t svorr_u64_x(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u16_x))) -svuint16_t svorr_u16_x(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s8_x))) -svint8_t svorr_s8_x(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s32_x))) -svint32_t svorr_s32_x(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s64_x))) -svint64_t svorr_s64_x(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s16_x))) -svint16_t svorr_s16_x(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u8_z))) -svuint8_t svorr_u8_z(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u32_z))) -svuint32_t svorr_u32_z(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u64_z))) -svuint64_t svorr_u64_z(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u16_z))) -svuint16_t svorr_u16_z(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s8_z))) -svint8_t svorr_s8_z(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s32_z))) -svint32_t svorr_s32_z(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s64_z))) -svint64_t svorr_s64_z(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s16_z))) -svint16_t svorr_s16_z(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u8))) -uint8_t svorv_u8(svbool_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u32))) -uint32_t svorv_u32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u64))) -uint64_t svorv_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u16))) -uint16_t svorv_u16(svbool_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s8))) -int8_t svorv_s8(svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s32))) -int32_t svorv_s32(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s64))) -int64_t svorv_s64(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s16))) -int16_t svorv_s16(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpfalse_b))) -svbool_t svpfalse_b(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpfirst_b))) -svbool_t svpfirst_b(svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpnext_b8))) -svbool_t svpnext_b8(svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpnext_b32))) -svbool_t svpnext_b32(svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpnext_b64))) -svbool_t svpnext_b64(svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpnext_b16))) -svbool_t svpnext_b16(svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb))) -void svprfb(svbool_t, void const *, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32base))) -void svprfb_gather_u32base(svbool_t, svuint32_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64base))) -void svprfb_gather_u64base(svbool_t, svuint64_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32base_offset))) -void svprfb_gather_u32base_offset(svbool_t, svuint32_t, int64_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64base_offset))) -void svprfb_gather_u64base_offset(svbool_t, svuint64_t, int64_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_s32offset))) -void svprfb_gather_s32offset(svbool_t, void const *, svint32_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32offset))) -void svprfb_gather_u32offset(svbool_t, void const *, svuint32_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_s64offset))) -void svprfb_gather_s64offset(svbool_t, void const *, svint64_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64offset))) -void svprfb_gather_u64offset(svbool_t, void const *, svuint64_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_vnum))) -void svprfb_vnum(svbool_t, void const *, int64_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd))) -void svprfd(svbool_t, void const *, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32base))) -void svprfd_gather_u32base(svbool_t, svuint32_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64base))) -void svprfd_gather_u64base(svbool_t, svuint64_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32base_index))) -void svprfd_gather_u32base_index(svbool_t, svuint32_t, int64_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64base_index))) -void svprfd_gather_u64base_index(svbool_t, svuint64_t, int64_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_s32index))) -void svprfd_gather_s32index(svbool_t, void const *, svint32_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32index))) -void svprfd_gather_u32index(svbool_t, void const *, svuint32_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_s64index))) -void svprfd_gather_s64index(svbool_t, void const *, svint64_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64index))) -void svprfd_gather_u64index(svbool_t, void const *, svuint64_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_vnum))) -void svprfd_vnum(svbool_t, void const *, int64_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh))) -void svprfh(svbool_t, void const *, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32base))) -void svprfh_gather_u32base(svbool_t, svuint32_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64base))) -void svprfh_gather_u64base(svbool_t, svuint64_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32base_index))) -void svprfh_gather_u32base_index(svbool_t, svuint32_t, int64_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64base_index))) -void svprfh_gather_u64base_index(svbool_t, svuint64_t, int64_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_s32index))) -void svprfh_gather_s32index(svbool_t, void const *, svint32_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32index))) -void svprfh_gather_u32index(svbool_t, void const *, svuint32_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_s64index))) -void svprfh_gather_s64index(svbool_t, void const *, svint64_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64index))) -void svprfh_gather_u64index(svbool_t, void const *, svuint64_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_vnum))) -void svprfh_vnum(svbool_t, void const *, int64_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw))) -void svprfw(svbool_t, void const *, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32base))) -void svprfw_gather_u32base(svbool_t, svuint32_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64base))) -void svprfw_gather_u64base(svbool_t, svuint64_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32base_index))) -void svprfw_gather_u32base_index(svbool_t, svuint32_t, int64_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64base_index))) -void svprfw_gather_u64base_index(svbool_t, svuint64_t, int64_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_s32index))) -void svprfw_gather_s32index(svbool_t, void const *, svint32_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32index))) -void svprfw_gather_u32index(svbool_t, void const *, svuint32_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_s64index))) -void svprfw_gather_s64index(svbool_t, void const *, svint64_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64index))) -void svprfw_gather_u64index(svbool_t, void const *, svuint64_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_vnum))) -void svprfw_vnum(svbool_t, void const *, int64_t, enum svprfop); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptest_any))) -bool svptest_any(svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptest_first))) -bool svptest_first(svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptest_last))) -bool svptest_last(svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_pat_b8))) -svbool_t svptrue_pat_b8(enum svpattern); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_pat_b32))) -svbool_t svptrue_pat_b32(enum svpattern); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_pat_b64))) -svbool_t svptrue_pat_b64(enum svpattern); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_pat_b16))) -svbool_t svptrue_pat_b16(enum svpattern); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_b8))) -svbool_t svptrue_b8(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_b32))) -svbool_t svptrue_b32(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_b64))) -svbool_t svptrue_b64(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_b16))) -svbool_t svptrue_b16(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8))) -svint8_t svqadd_n_s8(svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32))) -svint32_t svqadd_n_s32(svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64))) -svint64_t svqadd_n_s64(svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16))) -svint16_t svqadd_n_s16(svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8))) -svuint8_t svqadd_n_u8(svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32))) -svuint32_t svqadd_n_u32(svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64))) -svuint64_t svqadd_n_u64(svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16))) -svuint16_t svqadd_n_u16(svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8))) -svint8_t svqadd_s8(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32))) -svint32_t svqadd_s32(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64))) -svint64_t svqadd_s64(svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16))) -svint16_t svqadd_s16(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8))) -svuint8_t svqadd_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32))) -svuint32_t svqadd_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64))) -svuint64_t svqadd_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16))) -svuint16_t svqadd_u16(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_s32))) -int32_t svqdecb_n_s32(int32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_s64))) -int64_t svqdecb_n_s64(int64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_u32))) -uint32_t svqdecb_n_u32(uint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_u64))) -uint64_t svqdecb_n_u64(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_s32))) -int32_t svqdecb_pat_n_s32(int32_t, enum svpattern, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_s64))) -int64_t svqdecb_pat_n_s64(int64_t, enum svpattern, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_u32))) -uint32_t svqdecb_pat_n_u32(uint32_t, enum svpattern, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_u64))) -uint64_t svqdecb_pat_n_u64(uint64_t, enum svpattern, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_s32))) -int32_t svqdecd_n_s32(int32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_s64))) -int64_t svqdecd_n_s64(int64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_u32))) -uint32_t svqdecd_n_u32(uint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_u64))) -uint64_t svqdecd_n_u64(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_s64))) -svint64_t svqdecd_s64(svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_u64))) -svuint64_t svqdecd_u64(svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_s32))) -int32_t svqdecd_pat_n_s32(int32_t, enum svpattern, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_s64))) -int64_t svqdecd_pat_n_s64(int64_t, enum svpattern, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_u32))) -uint32_t svqdecd_pat_n_u32(uint32_t, enum svpattern, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_u64))) -uint64_t svqdecd_pat_n_u64(uint64_t, enum svpattern, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_s64))) -svint64_t svqdecd_pat_s64(svint64_t, enum svpattern, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_u64))) -svuint64_t svqdecd_pat_u64(svuint64_t, enum svpattern, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_s32))) -int32_t svqdech_n_s32(int32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_s64))) -int64_t svqdech_n_s64(int64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_u32))) -uint32_t svqdech_n_u32(uint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_u64))) -uint64_t svqdech_n_u64(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_s16))) -svint16_t svqdech_s16(svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_u16))) -svuint16_t svqdech_u16(svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_s32))) -int32_t svqdech_pat_n_s32(int32_t, enum svpattern, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_s64))) -int64_t svqdech_pat_n_s64(int64_t, enum svpattern, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_u32))) -uint32_t svqdech_pat_n_u32(uint32_t, enum svpattern, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_u64))) -uint64_t svqdech_pat_n_u64(uint64_t, enum svpattern, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_s16))) -svint16_t svqdech_pat_s16(svint16_t, enum svpattern, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_u16))) -svuint16_t svqdech_pat_u16(svuint16_t, enum svpattern, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b8))) -int32_t svqdecp_n_s32_b8(int32_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b32))) -int32_t svqdecp_n_s32_b32(int32_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b64))) -int32_t svqdecp_n_s32_b64(int32_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b16))) -int32_t svqdecp_n_s32_b16(int32_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b8))) -int64_t svqdecp_n_s64_b8(int64_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b32))) -int64_t svqdecp_n_s64_b32(int64_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b64))) -int64_t svqdecp_n_s64_b64(int64_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b16))) -int64_t svqdecp_n_s64_b16(int64_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b8))) -uint32_t svqdecp_n_u32_b8(uint32_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b32))) -uint32_t svqdecp_n_u32_b32(uint32_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b64))) -uint32_t svqdecp_n_u32_b64(uint32_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b16))) -uint32_t svqdecp_n_u32_b16(uint32_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b8))) -uint64_t svqdecp_n_u64_b8(uint64_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b32))) -uint64_t svqdecp_n_u64_b32(uint64_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b64))) -uint64_t svqdecp_n_u64_b64(uint64_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b16))) -uint64_t svqdecp_n_u64_b16(uint64_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_s32))) -svint32_t svqdecp_s32(svint32_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_s64))) -svint64_t svqdecp_s64(svint64_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_s16))) -svint16_t svqdecp_s16(svint16_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_u32))) -svuint32_t svqdecp_u32(svuint32_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_u64))) -svuint64_t svqdecp_u64(svuint64_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_u16))) -svuint16_t svqdecp_u16(svuint16_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_s32))) -int32_t svqdecw_n_s32(int32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_s64))) -int64_t svqdecw_n_s64(int64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_u32))) -uint32_t svqdecw_n_u32(uint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_u64))) -uint64_t svqdecw_n_u64(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_s32))) -svint32_t svqdecw_s32(svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_u32))) -svuint32_t svqdecw_u32(svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_s32))) -int32_t svqdecw_pat_n_s32(int32_t, enum svpattern, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_s64))) -int64_t svqdecw_pat_n_s64(int64_t, enum svpattern, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_u32))) -uint32_t svqdecw_pat_n_u32(uint32_t, enum svpattern, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_u64))) -uint64_t svqdecw_pat_n_u64(uint64_t, enum svpattern, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_s32))) -svint32_t svqdecw_pat_s32(svint32_t, enum svpattern, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_u32))) -svuint32_t svqdecw_pat_u32(svuint32_t, enum svpattern, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_s32))) -int32_t svqincb_n_s32(int32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_s64))) -int64_t svqincb_n_s64(int64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_u32))) -uint32_t svqincb_n_u32(uint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_u64))) -uint64_t svqincb_n_u64(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_s32))) -int32_t svqincb_pat_n_s32(int32_t, enum svpattern, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_s64))) -int64_t svqincb_pat_n_s64(int64_t, enum svpattern, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_u32))) -uint32_t svqincb_pat_n_u32(uint32_t, enum svpattern, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_u64))) -uint64_t svqincb_pat_n_u64(uint64_t, enum svpattern, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_s32))) -int32_t svqincd_n_s32(int32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_s64))) -int64_t svqincd_n_s64(int64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_u32))) -uint32_t svqincd_n_u32(uint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_u64))) -uint64_t svqincd_n_u64(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_s64))) -svint64_t svqincd_s64(svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_u64))) -svuint64_t svqincd_u64(svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_s32))) -int32_t svqincd_pat_n_s32(int32_t, enum svpattern, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_s64))) -int64_t svqincd_pat_n_s64(int64_t, enum svpattern, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_u32))) -uint32_t svqincd_pat_n_u32(uint32_t, enum svpattern, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_u64))) -uint64_t svqincd_pat_n_u64(uint64_t, enum svpattern, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_s64))) -svint64_t svqincd_pat_s64(svint64_t, enum svpattern, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_u64))) -svuint64_t svqincd_pat_u64(svuint64_t, enum svpattern, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_s32))) -int32_t svqinch_n_s32(int32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_s64))) -int64_t svqinch_n_s64(int64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_u32))) -uint32_t svqinch_n_u32(uint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_u64))) -uint64_t svqinch_n_u64(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_s16))) -svint16_t svqinch_s16(svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_u16))) -svuint16_t svqinch_u16(svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_s32))) -int32_t svqinch_pat_n_s32(int32_t, enum svpattern, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_s64))) -int64_t svqinch_pat_n_s64(int64_t, enum svpattern, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_u32))) -uint32_t svqinch_pat_n_u32(uint32_t, enum svpattern, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_u64))) -uint64_t svqinch_pat_n_u64(uint64_t, enum svpattern, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_s16))) -svint16_t svqinch_pat_s16(svint16_t, enum svpattern, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_u16))) -svuint16_t svqinch_pat_u16(svuint16_t, enum svpattern, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b8))) -int32_t svqincp_n_s32_b8(int32_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b32))) -int32_t svqincp_n_s32_b32(int32_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b64))) -int32_t svqincp_n_s32_b64(int32_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b16))) -int32_t svqincp_n_s32_b16(int32_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b8))) -int64_t svqincp_n_s64_b8(int64_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b32))) -int64_t svqincp_n_s64_b32(int64_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b64))) -int64_t svqincp_n_s64_b64(int64_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b16))) -int64_t svqincp_n_s64_b16(int64_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b8))) -uint32_t svqincp_n_u32_b8(uint32_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b32))) -uint32_t svqincp_n_u32_b32(uint32_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b64))) -uint32_t svqincp_n_u32_b64(uint32_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b16))) -uint32_t svqincp_n_u32_b16(uint32_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b8))) -uint64_t svqincp_n_u64_b8(uint64_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b32))) -uint64_t svqincp_n_u64_b32(uint64_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b64))) -uint64_t svqincp_n_u64_b64(uint64_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b16))) -uint64_t svqincp_n_u64_b16(uint64_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_s32))) -svint32_t svqincp_s32(svint32_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_s64))) -svint64_t svqincp_s64(svint64_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_s16))) -svint16_t svqincp_s16(svint16_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_u32))) -svuint32_t svqincp_u32(svuint32_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_u64))) -svuint64_t svqincp_u64(svuint64_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_u16))) -svuint16_t svqincp_u16(svuint16_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_s32))) -int32_t svqincw_n_s32(int32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_s64))) -int64_t svqincw_n_s64(int64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_u32))) -uint32_t svqincw_n_u32(uint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_u64))) -uint64_t svqincw_n_u64(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_s32))) -svint32_t svqincw_s32(svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_u32))) -svuint32_t svqincw_u32(svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_s32))) -int32_t svqincw_pat_n_s32(int32_t, enum svpattern, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_s64))) -int64_t svqincw_pat_n_s64(int64_t, enum svpattern, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_u32))) -uint32_t svqincw_pat_n_u32(uint32_t, enum svpattern, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_u64))) -uint64_t svqincw_pat_n_u64(uint64_t, enum svpattern, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_s32))) -svint32_t svqincw_pat_s32(svint32_t, enum svpattern, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_u32))) -svuint32_t svqincw_pat_u32(svuint32_t, enum svpattern, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8))) -svint8_t svqsub_n_s8(svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32))) -svint32_t svqsub_n_s32(svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64))) -svint64_t svqsub_n_s64(svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16))) -svint16_t svqsub_n_s16(svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8))) -svuint8_t svqsub_n_u8(svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32))) -svuint32_t svqsub_n_u32(svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64))) -svuint64_t svqsub_n_u64(svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16))) -svuint16_t svqsub_n_u16(svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8))) -svint8_t svqsub_s8(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32))) -svint32_t svqsub_s32(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64))) -svint64_t svqsub_s64(svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16))) -svint16_t svqsub_s16(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8))) -svuint8_t svqsub_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32))) -svuint32_t svqsub_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64))) -svuint64_t svqsub_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16))) -svuint16_t svqsub_u16(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u8_m))) -svuint8_t svrbit_u8_m(svuint8_t, svbool_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u32_m))) -svuint32_t svrbit_u32_m(svuint32_t, svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u64_m))) -svuint64_t svrbit_u64_m(svuint64_t, svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u16_m))) -svuint16_t svrbit_u16_m(svuint16_t, svbool_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s8_m))) -svint8_t svrbit_s8_m(svint8_t, svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s32_m))) -svint32_t svrbit_s32_m(svint32_t, svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s64_m))) -svint64_t svrbit_s64_m(svint64_t, svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s16_m))) -svint16_t svrbit_s16_m(svint16_t, svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u8_x))) -svuint8_t svrbit_u8_x(svbool_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u32_x))) -svuint32_t svrbit_u32_x(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u64_x))) -svuint64_t svrbit_u64_x(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u16_x))) -svuint16_t svrbit_u16_x(svbool_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s8_x))) -svint8_t svrbit_s8_x(svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s32_x))) -svint32_t svrbit_s32_x(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s64_x))) -svint64_t svrbit_s64_x(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s16_x))) -svint16_t svrbit_s16_x(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u8_z))) -svuint8_t svrbit_u8_z(svbool_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u32_z))) -svuint32_t svrbit_u32_z(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u64_z))) -svuint64_t svrbit_u64_z(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u16_z))) -svuint16_t svrbit_u16_z(svbool_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s8_z))) -svint8_t svrbit_s8_z(svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s32_z))) -svint32_t svrbit_s32_z(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s64_z))) -svint64_t svrbit_s64_z(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s16_z))) -svint16_t svrbit_s16_z(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrdffr))) -svbool_t svrdffr(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrdffr_z))) -svbool_t svrdffr_z(svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_f64))) -svfloat64_t svrecpe_f64(svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_f32))) -svfloat32_t svrecpe_f32(svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_f16))) -svfloat16_t svrecpe_f16(svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecps_f64))) -svfloat64_t svrecps_f64(svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecps_f32))) -svfloat32_t svrecps_f32(svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecps_f16))) -svfloat16_t svrecps_f16(svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f64_m))) -svfloat64_t svrecpx_f64_m(svfloat64_t, svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f32_m))) -svfloat32_t svrecpx_f32_m(svfloat32_t, svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f16_m))) -svfloat16_t svrecpx_f16_m(svfloat16_t, svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f64_x))) -svfloat64_t svrecpx_f64_x(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f32_x))) -svfloat32_t svrecpx_f32_x(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f16_x))) -svfloat16_t svrecpx_f16_x(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f64_z))) -svfloat64_t svrecpx_f64_z(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f32_z))) -svfloat32_t svrecpx_f32_z(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f16_z))) -svfloat16_t svrecpx_f16_z(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u8))) -svuint8_t svrev_u8(svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u32))) -svuint32_t svrev_u32(svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u64))) -svuint64_t svrev_u64(svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u16))) -svuint16_t svrev_u16(svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s8))) -svint8_t svrev_s8(svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_f64))) -svfloat64_t svrev_f64(svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_f32))) -svfloat32_t svrev_f32(svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_f16))) -svfloat16_t svrev_f16(svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s32))) -svint32_t svrev_s32(svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s64))) -svint64_t svrev_s64(svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s16))) -svint16_t svrev_s16(svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_b16))) -svbool_t svrev_b16(svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_b32))) -svbool_t svrev_b32(svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_b64))) -svbool_t svrev_b64(svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_b8))) -svbool_t svrev_b8(svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u32_m))) -svuint32_t svrevb_u32_m(svuint32_t, svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u64_m))) -svuint64_t svrevb_u64_m(svuint64_t, svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u16_m))) -svuint16_t svrevb_u16_m(svuint16_t, svbool_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s32_m))) -svint32_t svrevb_s32_m(svint32_t, svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s64_m))) -svint64_t svrevb_s64_m(svint64_t, svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s16_m))) -svint16_t svrevb_s16_m(svint16_t, svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u32_x))) -svuint32_t svrevb_u32_x(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u64_x))) -svuint64_t svrevb_u64_x(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u16_x))) -svuint16_t svrevb_u16_x(svbool_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s32_x))) -svint32_t svrevb_s32_x(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s64_x))) -svint64_t svrevb_s64_x(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s16_x))) -svint16_t svrevb_s16_x(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u32_z))) -svuint32_t svrevb_u32_z(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u64_z))) -svuint64_t svrevb_u64_z(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u16_z))) -svuint16_t svrevb_u16_z(svbool_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s32_z))) -svint32_t svrevb_s32_z(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s64_z))) -svint64_t svrevb_s64_z(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s16_z))) -svint16_t svrevb_s16_z(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u32_m))) -svuint32_t svrevh_u32_m(svuint32_t, svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u64_m))) -svuint64_t svrevh_u64_m(svuint64_t, svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s32_m))) -svint32_t svrevh_s32_m(svint32_t, svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s64_m))) -svint64_t svrevh_s64_m(svint64_t, svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u32_x))) -svuint32_t svrevh_u32_x(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u64_x))) -svuint64_t svrevh_u64_x(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s32_x))) -svint32_t svrevh_s32_x(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s64_x))) -svint64_t svrevh_s64_x(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u32_z))) -svuint32_t svrevh_u32_z(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u64_z))) -svuint64_t svrevh_u64_z(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s32_z))) -svint32_t svrevh_s32_z(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s64_z))) -svint64_t svrevh_s64_z(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_u64_m))) -svuint64_t svrevw_u64_m(svuint64_t, svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_s64_m))) -svint64_t svrevw_s64_m(svint64_t, svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_u64_x))) -svuint64_t svrevw_u64_x(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_s64_x))) -svint64_t svrevw_s64_x(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_u64_z))) -svuint64_t svrevw_u64_z(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_s64_z))) -svint64_t svrevw_s64_z(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f64_m))) -svfloat64_t svrinta_f64_m(svfloat64_t, svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_m))) -svfloat32_t svrinta_f32_m(svfloat32_t, svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f16_m))) -svfloat16_t svrinta_f16_m(svfloat16_t, svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f64_x))) -svfloat64_t svrinta_f64_x(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_x))) -svfloat32_t svrinta_f32_x(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f16_x))) -svfloat16_t svrinta_f16_x(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f64_z))) -svfloat64_t svrinta_f64_z(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_z))) -svfloat32_t svrinta_f32_z(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f16_z))) -svfloat16_t svrinta_f16_z(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f64_m))) -svfloat64_t svrinti_f64_m(svfloat64_t, svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f32_m))) -svfloat32_t svrinti_f32_m(svfloat32_t, svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f16_m))) -svfloat16_t svrinti_f16_m(svfloat16_t, svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f64_x))) -svfloat64_t svrinti_f64_x(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f32_x))) -svfloat32_t svrinti_f32_x(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f16_x))) -svfloat16_t svrinti_f16_x(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f64_z))) -svfloat64_t svrinti_f64_z(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f32_z))) -svfloat32_t svrinti_f32_z(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f16_z))) -svfloat16_t svrinti_f16_z(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f64_m))) -svfloat64_t svrintm_f64_m(svfloat64_t, svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_m))) -svfloat32_t svrintm_f32_m(svfloat32_t, svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f16_m))) -svfloat16_t svrintm_f16_m(svfloat16_t, svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f64_x))) -svfloat64_t svrintm_f64_x(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_x))) -svfloat32_t svrintm_f32_x(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f16_x))) -svfloat16_t svrintm_f16_x(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f64_z))) -svfloat64_t svrintm_f64_z(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_z))) -svfloat32_t svrintm_f32_z(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f16_z))) -svfloat16_t svrintm_f16_z(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f64_m))) -svfloat64_t svrintn_f64_m(svfloat64_t, svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_m))) -svfloat32_t svrintn_f32_m(svfloat32_t, svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f16_m))) -svfloat16_t svrintn_f16_m(svfloat16_t, svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f64_x))) -svfloat64_t svrintn_f64_x(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_x))) -svfloat32_t svrintn_f32_x(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f16_x))) -svfloat16_t svrintn_f16_x(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f64_z))) -svfloat64_t svrintn_f64_z(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_z))) -svfloat32_t svrintn_f32_z(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f16_z))) -svfloat16_t svrintn_f16_z(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f64_m))) -svfloat64_t svrintp_f64_m(svfloat64_t, svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_m))) -svfloat32_t svrintp_f32_m(svfloat32_t, svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f16_m))) -svfloat16_t svrintp_f16_m(svfloat16_t, svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f64_x))) -svfloat64_t svrintp_f64_x(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_x))) -svfloat32_t svrintp_f32_x(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f16_x))) -svfloat16_t svrintp_f16_x(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f64_z))) -svfloat64_t svrintp_f64_z(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_z))) -svfloat32_t svrintp_f32_z(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f16_z))) -svfloat16_t svrintp_f16_z(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f64_m))) -svfloat64_t svrintx_f64_m(svfloat64_t, svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f32_m))) -svfloat32_t svrintx_f32_m(svfloat32_t, svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f16_m))) -svfloat16_t svrintx_f16_m(svfloat16_t, svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f64_x))) -svfloat64_t svrintx_f64_x(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f32_x))) -svfloat32_t svrintx_f32_x(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f16_x))) -svfloat16_t svrintx_f16_x(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f64_z))) -svfloat64_t svrintx_f64_z(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f32_z))) -svfloat32_t svrintx_f32_z(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f16_z))) -svfloat16_t svrintx_f16_z(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f64_m))) -svfloat64_t svrintz_f64_m(svfloat64_t, svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f32_m))) -svfloat32_t svrintz_f32_m(svfloat32_t, svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f16_m))) -svfloat16_t svrintz_f16_m(svfloat16_t, svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f64_x))) -svfloat64_t svrintz_f64_x(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f32_x))) -svfloat32_t svrintz_f32_x(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f16_x))) -svfloat16_t svrintz_f16_x(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f64_z))) -svfloat64_t svrintz_f64_z(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f32_z))) -svfloat32_t svrintz_f32_z(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f16_z))) -svfloat16_t svrintz_f16_z(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_f64))) -svfloat64_t svrsqrte_f64(svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_f32))) -svfloat32_t svrsqrte_f32(svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_f16))) -svfloat16_t svrsqrte_f16(svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrts_f64))) -svfloat64_t svrsqrts_f64(svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrts_f32))) -svfloat32_t svrsqrts_f32(svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrts_f16))) -svfloat16_t svrsqrts_f16(svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f64_m))) -svfloat64_t svscale_n_f64_m(svbool_t, svfloat64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f32_m))) -svfloat32_t svscale_n_f32_m(svbool_t, svfloat32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f16_m))) -svfloat16_t svscale_n_f16_m(svbool_t, svfloat16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f64_x))) -svfloat64_t svscale_n_f64_x(svbool_t, svfloat64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f32_x))) -svfloat32_t svscale_n_f32_x(svbool_t, svfloat32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f16_x))) -svfloat16_t svscale_n_f16_x(svbool_t, svfloat16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f64_z))) -svfloat64_t svscale_n_f64_z(svbool_t, svfloat64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f32_z))) -svfloat32_t svscale_n_f32_z(svbool_t, svfloat32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f16_z))) -svfloat16_t svscale_n_f16_z(svbool_t, svfloat16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f64_m))) -svfloat64_t svscale_f64_m(svbool_t, svfloat64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f32_m))) -svfloat32_t svscale_f32_m(svbool_t, svfloat32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f16_m))) -svfloat16_t svscale_f16_m(svbool_t, svfloat16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f64_x))) -svfloat64_t svscale_f64_x(svbool_t, svfloat64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f32_x))) -svfloat32_t svscale_f32_x(svbool_t, svfloat32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f16_x))) -svfloat16_t svscale_f16_x(svbool_t, svfloat16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f64_z))) -svfloat64_t svscale_f64_z(svbool_t, svfloat64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f32_z))) -svfloat32_t svscale_f32_z(svbool_t, svfloat32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f16_z))) -svfloat16_t svscale_f16_z(svbool_t, svfloat16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_b))) -svbool_t svsel_b(svbool_t, svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u8))) -svuint8_t svsel_u8(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u32))) -svuint32_t svsel_u32(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u64))) -svuint64_t svsel_u64(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u16))) -svuint16_t svsel_u16(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s8))) -svint8_t svsel_s8(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f64))) -svfloat64_t svsel_f64(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f32))) -svfloat32_t svsel_f32(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f16))) -svfloat16_t svsel_f16(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s32))) -svint32_t svsel_s32(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s64))) -svint64_t svsel_s64(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s16))) -svint16_t svsel_s16(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u8))) -svuint8x2_t svset2_u8(svuint8x2_t, uint64_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u32))) -svuint32x2_t svset2_u32(svuint32x2_t, uint64_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u64))) -svuint64x2_t svset2_u64(svuint64x2_t, uint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u16))) -svuint16x2_t svset2_u16(svuint16x2_t, uint64_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s8))) -svint8x2_t svset2_s8(svint8x2_t, uint64_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_f64))) -svfloat64x2_t svset2_f64(svfloat64x2_t, uint64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_f32))) -svfloat32x2_t svset2_f32(svfloat32x2_t, uint64_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_f16))) -svfloat16x2_t svset2_f16(svfloat16x2_t, uint64_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s32))) -svint32x2_t svset2_s32(svint32x2_t, uint64_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s64))) -svint64x2_t svset2_s64(svint64x2_t, uint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s16))) -svint16x2_t svset2_s16(svint16x2_t, uint64_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u8))) -svuint8x3_t svset3_u8(svuint8x3_t, uint64_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u32))) -svuint32x3_t svset3_u32(svuint32x3_t, uint64_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u64))) -svuint64x3_t svset3_u64(svuint64x3_t, uint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u16))) -svuint16x3_t svset3_u16(svuint16x3_t, uint64_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s8))) -svint8x3_t svset3_s8(svint8x3_t, uint64_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_f64))) -svfloat64x3_t svset3_f64(svfloat64x3_t, uint64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_f32))) -svfloat32x3_t svset3_f32(svfloat32x3_t, uint64_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_f16))) -svfloat16x3_t svset3_f16(svfloat16x3_t, uint64_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s32))) -svint32x3_t svset3_s32(svint32x3_t, uint64_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s64))) -svint64x3_t svset3_s64(svint64x3_t, uint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s16))) -svint16x3_t svset3_s16(svint16x3_t, uint64_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u8))) -svuint8x4_t svset4_u8(svuint8x4_t, uint64_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u32))) -svuint32x4_t svset4_u32(svuint32x4_t, uint64_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u64))) -svuint64x4_t svset4_u64(svuint64x4_t, uint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u16))) -svuint16x4_t svset4_u16(svuint16x4_t, uint64_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s8))) -svint8x4_t svset4_s8(svint8x4_t, uint64_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_f64))) -svfloat64x4_t svset4_f64(svfloat64x4_t, uint64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_f32))) -svfloat32x4_t svset4_f32(svfloat32x4_t, uint64_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_f16))) -svfloat16x4_t svset4_f16(svfloat16x4_t, uint64_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s32))) -svint32x4_t svset4_s32(svint32x4_t, uint64_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s64))) -svint64x4_t svset4_s64(svint64x4_t, uint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s16))) -svint16x4_t svset4_s16(svint16x4_t, uint64_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsetffr))) -void svsetffr(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u8))) -svuint8_t svsplice_u8(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u32))) -svuint32_t svsplice_u32(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u64))) -svuint64_t svsplice_u64(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u16))) -svuint16_t svsplice_u16(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s8))) -svint8_t svsplice_s8(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_f64))) -svfloat64_t svsplice_f64(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_f32))) -svfloat32_t svsplice_f32(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_f16))) -svfloat16_t svsplice_f16(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s32))) -svint32_t svsplice_s32(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s64))) -svint64_t svsplice_s64(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s16))) -svint16_t svsplice_s16(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f64_m))) -svfloat64_t svsqrt_f64_m(svfloat64_t, svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f32_m))) -svfloat32_t svsqrt_f32_m(svfloat32_t, svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f16_m))) -svfloat16_t svsqrt_f16_m(svfloat16_t, svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f64_x))) -svfloat64_t svsqrt_f64_x(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f32_x))) -svfloat32_t svsqrt_f32_x(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f16_x))) -svfloat16_t svsqrt_f16_x(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f64_z))) -svfloat64_t svsqrt_f64_z(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f32_z))) -svfloat32_t svsqrt_f32_z(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f16_z))) -svfloat16_t svsqrt_f16_z(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u8))) -void svst1_u8(svbool_t, uint8_t *, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u32))) -void svst1_u32(svbool_t, uint32_t *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u64))) -void svst1_u64(svbool_t, uint64_t *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u16))) -void svst1_u16(svbool_t, uint16_t *, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s8))) -void svst1_s8(svbool_t, int8_t *, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f64))) -void svst1_f64(svbool_t, float64_t *, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f32))) -void svst1_f32(svbool_t, float32_t *, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f16))) -void svst1_f16(svbool_t, float16_t *, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s32))) -void svst1_s32(svbool_t, int32_t *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s64))) -void svst1_s64(svbool_t, int64_t *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s16))) -void svst1_s16(svbool_t, int16_t *, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_u32))) -void svst1_scatter_u32base_index_u32(svbool_t, svuint32_t, int64_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_u64))) -void svst1_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_f64))) -void svst1_scatter_u64base_index_f64(svbool_t, svuint64_t, int64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_f32))) -void svst1_scatter_u32base_index_f32(svbool_t, svuint32_t, int64_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_s32))) -void svst1_scatter_u32base_index_s32(svbool_t, svuint32_t, int64_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_s64))) -void svst1_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_u32))) -void svst1_scatter_u32base_offset_u32(svbool_t, svuint32_t, int64_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_u64))) -void svst1_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_f64))) -void svst1_scatter_u64base_offset_f64(svbool_t, svuint64_t, int64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_f32))) -void svst1_scatter_u32base_offset_f32(svbool_t, svuint32_t, int64_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_s32))) -void svst1_scatter_u32base_offset_s32(svbool_t, svuint32_t, int64_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_s64))) -void svst1_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_u32))) -void svst1_scatter_u32base_u32(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_u64))) -void svst1_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_f64))) -void svst1_scatter_u64base_f64(svbool_t, svuint64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_f32))) -void svst1_scatter_u32base_f32(svbool_t, svuint32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_s32))) -void svst1_scatter_u32base_s32(svbool_t, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_s64))) -void svst1_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_u32))) -void svst1_scatter_s32index_u32(svbool_t, uint32_t *, svint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_f32))) -void svst1_scatter_s32index_f32(svbool_t, float32_t *, svint32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_s32))) -void svst1_scatter_s32index_s32(svbool_t, int32_t *, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_u32))) -void svst1_scatter_u32index_u32(svbool_t, uint32_t *, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_f32))) -void svst1_scatter_u32index_f32(svbool_t, float32_t *, svuint32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_s32))) -void svst1_scatter_u32index_s32(svbool_t, int32_t *, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_u64))) -void svst1_scatter_s64index_u64(svbool_t, uint64_t *, svint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_f64))) -void svst1_scatter_s64index_f64(svbool_t, float64_t *, svint64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_s64))) -void svst1_scatter_s64index_s64(svbool_t, int64_t *, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_u64))) -void svst1_scatter_u64index_u64(svbool_t, uint64_t *, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_f64))) -void svst1_scatter_u64index_f64(svbool_t, float64_t *, svuint64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_s64))) -void svst1_scatter_u64index_s64(svbool_t, int64_t *, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_u32))) -void svst1_scatter_s32offset_u32(svbool_t, uint32_t *, svint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_f32))) -void svst1_scatter_s32offset_f32(svbool_t, float32_t *, svint32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_s32))) -void svst1_scatter_s32offset_s32(svbool_t, int32_t *, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_u32))) -void svst1_scatter_u32offset_u32(svbool_t, uint32_t *, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_f32))) -void svst1_scatter_u32offset_f32(svbool_t, float32_t *, svuint32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_s32))) -void svst1_scatter_u32offset_s32(svbool_t, int32_t *, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_u64))) -void svst1_scatter_s64offset_u64(svbool_t, uint64_t *, svint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_f64))) -void svst1_scatter_s64offset_f64(svbool_t, float64_t *, svint64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_s64))) -void svst1_scatter_s64offset_s64(svbool_t, int64_t *, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_u64))) -void svst1_scatter_u64offset_u64(svbool_t, uint64_t *, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_f64))) -void svst1_scatter_u64offset_f64(svbool_t, float64_t *, svuint64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_s64))) -void svst1_scatter_u64offset_s64(svbool_t, int64_t *, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u8))) -void svst1_vnum_u8(svbool_t, uint8_t *, int64_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u32))) -void svst1_vnum_u32(svbool_t, uint32_t *, int64_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u64))) -void svst1_vnum_u64(svbool_t, uint64_t *, int64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u16))) -void svst1_vnum_u16(svbool_t, uint16_t *, int64_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s8))) -void svst1_vnum_s8(svbool_t, int8_t *, int64_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f64))) -void svst1_vnum_f64(svbool_t, float64_t *, int64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f32))) -void svst1_vnum_f32(svbool_t, float32_t *, int64_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f16))) -void svst1_vnum_f16(svbool_t, float16_t *, int64_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s32))) -void svst1_vnum_s32(svbool_t, int32_t *, int64_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s64))) -void svst1_vnum_s64(svbool_t, int64_t *, int64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s16))) -void svst1_vnum_s16(svbool_t, int16_t *, int64_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_s32))) -void svst1b_s32(svbool_t, int8_t *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_s64))) -void svst1b_s64(svbool_t, int8_t *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_s16))) -void svst1b_s16(svbool_t, int8_t *, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_u32))) -void svst1b_u32(svbool_t, uint8_t *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_u64))) -void svst1b_u64(svbool_t, uint8_t *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_u16))) -void svst1b_u16(svbool_t, uint8_t *, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_offset_u32))) -void svst1b_scatter_u32base_offset_u32(svbool_t, svuint32_t, int64_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_offset_u64))) -void svst1b_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_offset_s32))) -void svst1b_scatter_u32base_offset_s32(svbool_t, svuint32_t, int64_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_offset_s64))) -void svst1b_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_u32))) -void svst1b_scatter_u32base_u32(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_u64))) -void svst1b_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_s32))) -void svst1b_scatter_u32base_s32(svbool_t, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_s64))) -void svst1b_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s32offset_s32))) -void svst1b_scatter_s32offset_s32(svbool_t, int8_t *, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s32offset_u32))) -void svst1b_scatter_s32offset_u32(svbool_t, uint8_t *, svint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32offset_s32))) -void svst1b_scatter_u32offset_s32(svbool_t, int8_t *, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32offset_u32))) -void svst1b_scatter_u32offset_u32(svbool_t, uint8_t *, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s64offset_s64))) -void svst1b_scatter_s64offset_s64(svbool_t, int8_t *, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s64offset_u64))) -void svst1b_scatter_s64offset_u64(svbool_t, uint8_t *, svint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64offset_s64))) -void svst1b_scatter_u64offset_s64(svbool_t, int8_t *, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64offset_u64))) -void svst1b_scatter_u64offset_u64(svbool_t, uint8_t *, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_s32))) -void svst1b_vnum_s32(svbool_t, int8_t *, int64_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_s64))) -void svst1b_vnum_s64(svbool_t, int8_t *, int64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_s16))) -void svst1b_vnum_s16(svbool_t, int8_t *, int64_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_u32))) -void svst1b_vnum_u32(svbool_t, uint8_t *, int64_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_u64))) -void svst1b_vnum_u64(svbool_t, uint8_t *, int64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_u16))) -void svst1b_vnum_u16(svbool_t, uint8_t *, int64_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_s32))) -void svst1h_s32(svbool_t, int16_t *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_s64))) -void svst1h_s64(svbool_t, int16_t *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_u32))) -void svst1h_u32(svbool_t, uint16_t *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_u64))) -void svst1h_u64(svbool_t, uint16_t *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_index_u32))) -void svst1h_scatter_u32base_index_u32(svbool_t, svuint32_t, int64_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_index_u64))) -void svst1h_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_index_s32))) -void svst1h_scatter_u32base_index_s32(svbool_t, svuint32_t, int64_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_index_s64))) -void svst1h_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_offset_u32))) -void svst1h_scatter_u32base_offset_u32(svbool_t, svuint32_t, int64_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_offset_u64))) -void svst1h_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_offset_s32))) -void svst1h_scatter_u32base_offset_s32(svbool_t, svuint32_t, int64_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_offset_s64))) -void svst1h_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_u32))) -void svst1h_scatter_u32base_u32(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_u64))) -void svst1h_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_s32))) -void svst1h_scatter_u32base_s32(svbool_t, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_s64))) -void svst1h_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32index_s32))) -void svst1h_scatter_s32index_s32(svbool_t, int16_t *, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32index_u32))) -void svst1h_scatter_s32index_u32(svbool_t, uint16_t *, svint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32index_s32))) -void svst1h_scatter_u32index_s32(svbool_t, int16_t *, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32index_u32))) -void svst1h_scatter_u32index_u32(svbool_t, uint16_t *, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64index_s64))) -void svst1h_scatter_s64index_s64(svbool_t, int16_t *, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64index_u64))) -void svst1h_scatter_s64index_u64(svbool_t, uint16_t *, svint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64index_s64))) -void svst1h_scatter_u64index_s64(svbool_t, int16_t *, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64index_u64))) -void svst1h_scatter_u64index_u64(svbool_t, uint16_t *, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32offset_s32))) -void svst1h_scatter_s32offset_s32(svbool_t, int16_t *, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32offset_u32))) -void svst1h_scatter_s32offset_u32(svbool_t, uint16_t *, svint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32offset_s32))) -void svst1h_scatter_u32offset_s32(svbool_t, int16_t *, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32offset_u32))) -void svst1h_scatter_u32offset_u32(svbool_t, uint16_t *, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64offset_s64))) -void svst1h_scatter_s64offset_s64(svbool_t, int16_t *, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64offset_u64))) -void svst1h_scatter_s64offset_u64(svbool_t, uint16_t *, svint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64offset_s64))) -void svst1h_scatter_u64offset_s64(svbool_t, int16_t *, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64offset_u64))) -void svst1h_scatter_u64offset_u64(svbool_t, uint16_t *, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_s32))) -void svst1h_vnum_s32(svbool_t, int16_t *, int64_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_s64))) -void svst1h_vnum_s64(svbool_t, int16_t *, int64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_u32))) -void svst1h_vnum_u32(svbool_t, uint16_t *, int64_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_u64))) -void svst1h_vnum_u64(svbool_t, uint16_t *, int64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_s64))) -void svst1w_s64(svbool_t, int32_t *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_u64))) -void svst1w_u64(svbool_t, uint32_t *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_index_u64))) -void svst1w_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_index_s64))) -void svst1w_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_offset_u64))) -void svst1w_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_offset_s64))) -void svst1w_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_u64))) -void svst1w_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_s64))) -void svst1w_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64index_s64))) -void svst1w_scatter_s64index_s64(svbool_t, int32_t *, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64index_u64))) -void svst1w_scatter_s64index_u64(svbool_t, uint32_t *, svint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64index_s64))) -void svst1w_scatter_u64index_s64(svbool_t, int32_t *, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64index_u64))) -void svst1w_scatter_u64index_u64(svbool_t, uint32_t *, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64offset_s64))) -void svst1w_scatter_s64offset_s64(svbool_t, int32_t *, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64offset_u64))) -void svst1w_scatter_s64offset_u64(svbool_t, uint32_t *, svint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64offset_s64))) -void svst1w_scatter_u64offset_s64(svbool_t, int32_t *, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64offset_u64))) -void svst1w_scatter_u64offset_u64(svbool_t, uint32_t *, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_vnum_s64))) -void svst1w_vnum_s64(svbool_t, int32_t *, int64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_vnum_u64))) -void svst1w_vnum_u64(svbool_t, uint32_t *, int64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u8))) -void svst2_u8(svbool_t, uint8_t *, svuint8x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u32))) -void svst2_u32(svbool_t, uint32_t *, svuint32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u64))) -void svst2_u64(svbool_t, uint64_t *, svuint64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u16))) -void svst2_u16(svbool_t, uint16_t *, svuint16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s8))) -void svst2_s8(svbool_t, int8_t *, svint8x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_f64))) -void svst2_f64(svbool_t, float64_t *, svfloat64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_f32))) -void svst2_f32(svbool_t, float32_t *, svfloat32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_f16))) -void svst2_f16(svbool_t, float16_t *, svfloat16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s32))) -void svst2_s32(svbool_t, int32_t *, svint32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s64))) -void svst2_s64(svbool_t, int64_t *, svint64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s16))) -void svst2_s16(svbool_t, int16_t *, svint16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u8))) -void svst2_vnum_u8(svbool_t, uint8_t *, int64_t, svuint8x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u32))) -void svst2_vnum_u32(svbool_t, uint32_t *, int64_t, svuint32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u64))) -void svst2_vnum_u64(svbool_t, uint64_t *, int64_t, svuint64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u16))) -void svst2_vnum_u16(svbool_t, uint16_t *, int64_t, svuint16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s8))) -void svst2_vnum_s8(svbool_t, int8_t *, int64_t, svint8x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_f64))) -void svst2_vnum_f64(svbool_t, float64_t *, int64_t, svfloat64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_f32))) -void svst2_vnum_f32(svbool_t, float32_t *, int64_t, svfloat32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_f16))) -void svst2_vnum_f16(svbool_t, float16_t *, int64_t, svfloat16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s32))) -void svst2_vnum_s32(svbool_t, int32_t *, int64_t, svint32x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s64))) -void svst2_vnum_s64(svbool_t, int64_t *, int64_t, svint64x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s16))) -void svst2_vnum_s16(svbool_t, int16_t *, int64_t, svint16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u8))) -void svst3_u8(svbool_t, uint8_t *, svuint8x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u32))) -void svst3_u32(svbool_t, uint32_t *, svuint32x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u64))) -void svst3_u64(svbool_t, uint64_t *, svuint64x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u16))) -void svst3_u16(svbool_t, uint16_t *, svuint16x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s8))) -void svst3_s8(svbool_t, int8_t *, svint8x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_f64))) -void svst3_f64(svbool_t, float64_t *, svfloat64x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_f32))) -void svst3_f32(svbool_t, float32_t *, svfloat32x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_f16))) -void svst3_f16(svbool_t, float16_t *, svfloat16x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s32))) -void svst3_s32(svbool_t, int32_t *, svint32x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s64))) -void svst3_s64(svbool_t, int64_t *, svint64x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s16))) -void svst3_s16(svbool_t, int16_t *, svint16x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u8))) -void svst3_vnum_u8(svbool_t, uint8_t *, int64_t, svuint8x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u32))) -void svst3_vnum_u32(svbool_t, uint32_t *, int64_t, svuint32x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u64))) -void svst3_vnum_u64(svbool_t, uint64_t *, int64_t, svuint64x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u16))) -void svst3_vnum_u16(svbool_t, uint16_t *, int64_t, svuint16x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s8))) -void svst3_vnum_s8(svbool_t, int8_t *, int64_t, svint8x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_f64))) -void svst3_vnum_f64(svbool_t, float64_t *, int64_t, svfloat64x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_f32))) -void svst3_vnum_f32(svbool_t, float32_t *, int64_t, svfloat32x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_f16))) -void svst3_vnum_f16(svbool_t, float16_t *, int64_t, svfloat16x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s32))) -void svst3_vnum_s32(svbool_t, int32_t *, int64_t, svint32x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s64))) -void svst3_vnum_s64(svbool_t, int64_t *, int64_t, svint64x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s16))) -void svst3_vnum_s16(svbool_t, int16_t *, int64_t, svint16x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u8))) -void svst4_u8(svbool_t, uint8_t *, svuint8x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u32))) -void svst4_u32(svbool_t, uint32_t *, svuint32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u64))) -void svst4_u64(svbool_t, uint64_t *, svuint64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u16))) -void svst4_u16(svbool_t, uint16_t *, svuint16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s8))) -void svst4_s8(svbool_t, int8_t *, svint8x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_f64))) -void svst4_f64(svbool_t, float64_t *, svfloat64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_f32))) -void svst4_f32(svbool_t, float32_t *, svfloat32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_f16))) -void svst4_f16(svbool_t, float16_t *, svfloat16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s32))) -void svst4_s32(svbool_t, int32_t *, svint32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s64))) -void svst4_s64(svbool_t, int64_t *, svint64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s16))) -void svst4_s16(svbool_t, int16_t *, svint16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u8))) -void svst4_vnum_u8(svbool_t, uint8_t *, int64_t, svuint8x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u32))) -void svst4_vnum_u32(svbool_t, uint32_t *, int64_t, svuint32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u64))) -void svst4_vnum_u64(svbool_t, uint64_t *, int64_t, svuint64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u16))) -void svst4_vnum_u16(svbool_t, uint16_t *, int64_t, svuint16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s8))) -void svst4_vnum_s8(svbool_t, int8_t *, int64_t, svint8x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_f64))) -void svst4_vnum_f64(svbool_t, float64_t *, int64_t, svfloat64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_f32))) -void svst4_vnum_f32(svbool_t, float32_t *, int64_t, svfloat32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_f16))) -void svst4_vnum_f16(svbool_t, float16_t *, int64_t, svfloat16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s32))) -void svst4_vnum_s32(svbool_t, int32_t *, int64_t, svint32x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s64))) -void svst4_vnum_s64(svbool_t, int64_t *, int64_t, svint64x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s16))) -void svst4_vnum_s16(svbool_t, int16_t *, int64_t, svint16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u8))) -void svstnt1_u8(svbool_t, uint8_t *, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u32))) -void svstnt1_u32(svbool_t, uint32_t *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u64))) -void svstnt1_u64(svbool_t, uint64_t *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u16))) -void svstnt1_u16(svbool_t, uint16_t *, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s8))) -void svstnt1_s8(svbool_t, int8_t *, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f64))) -void svstnt1_f64(svbool_t, float64_t *, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f32))) -void svstnt1_f32(svbool_t, float32_t *, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f16))) -void svstnt1_f16(svbool_t, float16_t *, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s32))) -void svstnt1_s32(svbool_t, int32_t *, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s64))) -void svstnt1_s64(svbool_t, int64_t *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s16))) -void svstnt1_s16(svbool_t, int16_t *, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u8))) -void svstnt1_vnum_u8(svbool_t, uint8_t *, int64_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u32))) -void svstnt1_vnum_u32(svbool_t, uint32_t *, int64_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u64))) -void svstnt1_vnum_u64(svbool_t, uint64_t *, int64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u16))) -void svstnt1_vnum_u16(svbool_t, uint16_t *, int64_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s8))) -void svstnt1_vnum_s8(svbool_t, int8_t *, int64_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f64))) -void svstnt1_vnum_f64(svbool_t, float64_t *, int64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f32))) -void svstnt1_vnum_f32(svbool_t, float32_t *, int64_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f16))) -void svstnt1_vnum_f16(svbool_t, float16_t *, int64_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s32))) -void svstnt1_vnum_s32(svbool_t, int32_t *, int64_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s64))) -void svstnt1_vnum_s64(svbool_t, int64_t *, int64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s16))) -void svstnt1_vnum_s16(svbool_t, int16_t *, int64_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f64_m))) -svfloat64_t svsub_n_f64_m(svbool_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f32_m))) -svfloat32_t svsub_n_f32_m(svbool_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f16_m))) -svfloat16_t svsub_n_f16_m(svbool_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f64_x))) -svfloat64_t svsub_n_f64_x(svbool_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f32_x))) -svfloat32_t svsub_n_f32_x(svbool_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f16_x))) -svfloat16_t svsub_n_f16_x(svbool_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f64_z))) -svfloat64_t svsub_n_f64_z(svbool_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f32_z))) -svfloat32_t svsub_n_f32_z(svbool_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f16_z))) -svfloat16_t svsub_n_f16_z(svbool_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u8_m))) -svuint8_t svsub_n_u8_m(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u32_m))) -svuint32_t svsub_n_u32_m(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u64_m))) -svuint64_t svsub_n_u64_m(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u16_m))) -svuint16_t svsub_n_u16_m(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s8_m))) -svint8_t svsub_n_s8_m(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s32_m))) -svint32_t svsub_n_s32_m(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s64_m))) -svint64_t svsub_n_s64_m(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s16_m))) -svint16_t svsub_n_s16_m(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u8_x))) -svuint8_t svsub_n_u8_x(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u32_x))) -svuint32_t svsub_n_u32_x(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u64_x))) -svuint64_t svsub_n_u64_x(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u16_x))) -svuint16_t svsub_n_u16_x(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s8_x))) -svint8_t svsub_n_s8_x(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s32_x))) -svint32_t svsub_n_s32_x(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s64_x))) -svint64_t svsub_n_s64_x(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s16_x))) -svint16_t svsub_n_s16_x(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u8_z))) -svuint8_t svsub_n_u8_z(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u32_z))) -svuint32_t svsub_n_u32_z(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u64_z))) -svuint64_t svsub_n_u64_z(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u16_z))) -svuint16_t svsub_n_u16_z(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s8_z))) -svint8_t svsub_n_s8_z(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s32_z))) -svint32_t svsub_n_s32_z(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s64_z))) -svint64_t svsub_n_s64_z(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s16_z))) -svint16_t svsub_n_s16_z(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f64_m))) -svfloat64_t svsub_f64_m(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f32_m))) -svfloat32_t svsub_f32_m(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f16_m))) -svfloat16_t svsub_f16_m(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f64_x))) -svfloat64_t svsub_f64_x(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f32_x))) -svfloat32_t svsub_f32_x(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f16_x))) -svfloat16_t svsub_f16_x(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f64_z))) -svfloat64_t svsub_f64_z(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f32_z))) -svfloat32_t svsub_f32_z(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f16_z))) -svfloat16_t svsub_f16_z(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u8_m))) -svuint8_t svsub_u8_m(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u32_m))) -svuint32_t svsub_u32_m(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u64_m))) -svuint64_t svsub_u64_m(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u16_m))) -svuint16_t svsub_u16_m(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s8_m))) -svint8_t svsub_s8_m(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s32_m))) -svint32_t svsub_s32_m(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s64_m))) -svint64_t svsub_s64_m(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s16_m))) -svint16_t svsub_s16_m(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u8_x))) -svuint8_t svsub_u8_x(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u32_x))) -svuint32_t svsub_u32_x(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u64_x))) -svuint64_t svsub_u64_x(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u16_x))) -svuint16_t svsub_u16_x(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s8_x))) -svint8_t svsub_s8_x(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s32_x))) -svint32_t svsub_s32_x(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s64_x))) -svint64_t svsub_s64_x(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s16_x))) -svint16_t svsub_s16_x(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u8_z))) -svuint8_t svsub_u8_z(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u32_z))) -svuint32_t svsub_u32_z(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u64_z))) -svuint64_t svsub_u64_z(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u16_z))) -svuint16_t svsub_u16_z(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s8_z))) -svint8_t svsub_s8_z(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s32_z))) -svint32_t svsub_s32_z(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s64_z))) -svint64_t svsub_s64_z(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s16_z))) -svint16_t svsub_s16_z(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f64_m))) -svfloat64_t svsubr_n_f64_m(svbool_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f32_m))) -svfloat32_t svsubr_n_f32_m(svbool_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f16_m))) -svfloat16_t svsubr_n_f16_m(svbool_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f64_x))) -svfloat64_t svsubr_n_f64_x(svbool_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f32_x))) -svfloat32_t svsubr_n_f32_x(svbool_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f16_x))) -svfloat16_t svsubr_n_f16_x(svbool_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f64_z))) -svfloat64_t svsubr_n_f64_z(svbool_t, svfloat64_t, float64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f32_z))) -svfloat32_t svsubr_n_f32_z(svbool_t, svfloat32_t, float32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f16_z))) -svfloat16_t svsubr_n_f16_z(svbool_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u8_m))) -svuint8_t svsubr_n_u8_m(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u32_m))) -svuint32_t svsubr_n_u32_m(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u64_m))) -svuint64_t svsubr_n_u64_m(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u16_m))) -svuint16_t svsubr_n_u16_m(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s8_m))) -svint8_t svsubr_n_s8_m(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s32_m))) -svint32_t svsubr_n_s32_m(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s64_m))) -svint64_t svsubr_n_s64_m(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s16_m))) -svint16_t svsubr_n_s16_m(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u8_x))) -svuint8_t svsubr_n_u8_x(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u32_x))) -svuint32_t svsubr_n_u32_x(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u64_x))) -svuint64_t svsubr_n_u64_x(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u16_x))) -svuint16_t svsubr_n_u16_x(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s8_x))) -svint8_t svsubr_n_s8_x(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s32_x))) -svint32_t svsubr_n_s32_x(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s64_x))) -svint64_t svsubr_n_s64_x(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s16_x))) -svint16_t svsubr_n_s16_x(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u8_z))) -svuint8_t svsubr_n_u8_z(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u32_z))) -svuint32_t svsubr_n_u32_z(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u64_z))) -svuint64_t svsubr_n_u64_z(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u16_z))) -svuint16_t svsubr_n_u16_z(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s8_z))) -svint8_t svsubr_n_s8_z(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s32_z))) -svint32_t svsubr_n_s32_z(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s64_z))) -svint64_t svsubr_n_s64_z(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s16_z))) -svint16_t svsubr_n_s16_z(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f64_m))) -svfloat64_t svsubr_f64_m(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f32_m))) -svfloat32_t svsubr_f32_m(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f16_m))) -svfloat16_t svsubr_f16_m(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f64_x))) -svfloat64_t svsubr_f64_x(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f32_x))) -svfloat32_t svsubr_f32_x(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f16_x))) -svfloat16_t svsubr_f16_x(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f64_z))) -svfloat64_t svsubr_f64_z(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f32_z))) -svfloat32_t svsubr_f32_z(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f16_z))) -svfloat16_t svsubr_f16_z(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u8_m))) -svuint8_t svsubr_u8_m(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u32_m))) -svuint32_t svsubr_u32_m(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u64_m))) -svuint64_t svsubr_u64_m(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u16_m))) -svuint16_t svsubr_u16_m(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s8_m))) -svint8_t svsubr_s8_m(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s32_m))) -svint32_t svsubr_s32_m(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s64_m))) -svint64_t svsubr_s64_m(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s16_m))) -svint16_t svsubr_s16_m(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u8_x))) -svuint8_t svsubr_u8_x(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u32_x))) -svuint32_t svsubr_u32_x(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u64_x))) -svuint64_t svsubr_u64_x(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u16_x))) -svuint16_t svsubr_u16_x(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s8_x))) -svint8_t svsubr_s8_x(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s32_x))) -svint32_t svsubr_s32_x(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s64_x))) -svint64_t svsubr_s64_x(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s16_x))) -svint16_t svsubr_s16_x(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u8_z))) -svuint8_t svsubr_u8_z(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u32_z))) -svuint32_t svsubr_u32_z(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u64_z))) -svuint64_t svsubr_u64_z(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u16_z))) -svuint16_t svsubr_u16_z(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s8_z))) -svint8_t svsubr_s8_z(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s32_z))) -svint32_t svsubr_s32_z(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s64_z))) -svint64_t svsubr_s64_z(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s16_z))) -svint16_t svsubr_s16_z(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u8))) -svuint8_t svtbl_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u32))) -svuint32_t svtbl_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u64))) -svuint64_t svtbl_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u16))) -svuint16_t svtbl_u16(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s8))) -svint8_t svtbl_s8(svint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_f64))) -svfloat64_t svtbl_f64(svfloat64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_f32))) -svfloat32_t svtbl_f32(svfloat32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_f16))) -svfloat16_t svtbl_f16(svfloat16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s32))) -svint32_t svtbl_s32(svint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s64))) -svint64_t svtbl_s64(svint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s16))) -svint16_t svtbl_s16(svint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f64))) -svfloat64_t svtmad_f64(svfloat64_t, svfloat64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f32))) -svfloat32_t svtmad_f32(svfloat32_t, svfloat32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f16))) -svfloat16_t svtmad_f16(svfloat16_t, svfloat16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u8))) -svuint8_t svtrn1_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u32))) -svuint32_t svtrn1_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u64))) -svuint64_t svtrn1_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u16))) -svuint16_t svtrn1_u16(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s8))) -svint8_t svtrn1_s8(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_f64))) -svfloat64_t svtrn1_f64(svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_f32))) -svfloat32_t svtrn1_f32(svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_f16))) -svfloat16_t svtrn1_f16(svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s32))) -svint32_t svtrn1_s32(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s64))) -svint64_t svtrn1_s64(svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s16))) -svint16_t svtrn1_s16(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_b16))) -svbool_t svtrn1_b16(svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_b32))) -svbool_t svtrn1_b32(svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_b64))) -svbool_t svtrn1_b64(svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_b8))) -svbool_t svtrn1_b8(svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u8))) -svuint8_t svtrn2_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u32))) -svuint32_t svtrn2_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u64))) -svuint64_t svtrn2_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u16))) -svuint16_t svtrn2_u16(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s8))) -svint8_t svtrn2_s8(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_f64))) -svfloat64_t svtrn2_f64(svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_f32))) -svfloat32_t svtrn2_f32(svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_f16))) -svfloat16_t svtrn2_f16(svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s32))) -svint32_t svtrn2_s32(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s64))) -svint64_t svtrn2_s64(svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s16))) -svint16_t svtrn2_s16(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_b16))) -svbool_t svtrn2_b16(svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_b32))) -svbool_t svtrn2_b32(svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_b64))) -svbool_t svtrn2_b64(svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_b8))) -svbool_t svtrn2_b8(svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f64))) -svfloat64_t svtsmul_f64(svfloat64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f32))) -svfloat32_t svtsmul_f32(svfloat32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f16))) -svfloat16_t svtsmul_f16(svfloat16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f64))) -svfloat64_t svtssel_f64(svfloat64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f32))) -svfloat32_t svtssel_f32(svfloat32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f16))) -svfloat16_t svtssel_f16(svfloat16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_u8))) -svuint8x2_t svundef2_u8(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_u32))) -svuint32x2_t svundef2_u32(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_u64))) -svuint64x2_t svundef2_u64(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_u16))) -svuint16x2_t svundef2_u16(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_s8))) -svint8x2_t svundef2_s8(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_f64))) -svfloat64x2_t svundef2_f64(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_f32))) -svfloat32x2_t svundef2_f32(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_f16))) -svfloat16x2_t svundef2_f16(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_s32))) -svint32x2_t svundef2_s32(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_s64))) -svint64x2_t svundef2_s64(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_s16))) -svint16x2_t svundef2_s16(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_u8))) -svuint8x3_t svundef3_u8(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_u32))) -svuint32x3_t svundef3_u32(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_u64))) -svuint64x3_t svundef3_u64(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_u16))) -svuint16x3_t svundef3_u16(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_s8))) -svint8x3_t svundef3_s8(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_f64))) -svfloat64x3_t svundef3_f64(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_f32))) -svfloat32x3_t svundef3_f32(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_f16))) -svfloat16x3_t svundef3_f16(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_s32))) -svint32x3_t svundef3_s32(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_s64))) -svint64x3_t svundef3_s64(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_s16))) -svint16x3_t svundef3_s16(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_u8))) -svuint8x4_t svundef4_u8(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_u32))) -svuint32x4_t svundef4_u32(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_u64))) -svuint64x4_t svundef4_u64(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_u16))) -svuint16x4_t svundef4_u16(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_s8))) -svint8x4_t svundef4_s8(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_f64))) -svfloat64x4_t svundef4_f64(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_f32))) -svfloat32x4_t svundef4_f32(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_f16))) -svfloat16x4_t svundef4_f16(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_s32))) -svint32x4_t svundef4_s32(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_s64))) -svint64x4_t svundef4_s64(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_s16))) -svint16x4_t svundef4_s16(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_u8))) -svuint8_t svundef_u8(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_u32))) -svuint32_t svundef_u32(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_u64))) -svuint64_t svundef_u64(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_u16))) -svuint16_t svundef_u16(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_s8))) -svint8_t svundef_s8(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_f64))) -svfloat64_t svundef_f64(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_f32))) -svfloat32_t svundef_f32(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_f16))) -svfloat16_t svundef_f16(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_s32))) -svint32_t svundef_s32(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_s64))) -svint64_t svundef_s64(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_s16))) -svint16_t svundef_s16(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_b))) -svbool_t svunpkhi_b(svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_s32))) -svint32_t svunpkhi_s32(svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_s64))) -svint64_t svunpkhi_s64(svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_s16))) -svint16_t svunpkhi_s16(svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_u32))) -svuint32_t svunpkhi_u32(svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_u64))) -svuint64_t svunpkhi_u64(svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_u16))) -svuint16_t svunpkhi_u16(svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_b))) -svbool_t svunpklo_b(svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_s32))) -svint32_t svunpklo_s32(svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_s64))) -svint64_t svunpklo_s64(svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_s16))) -svint16_t svunpklo_s16(svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_u32))) -svuint32_t svunpklo_u32(svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_u64))) -svuint64_t svunpklo_u64(svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_u16))) -svuint16_t svunpklo_u16(svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u8))) -svuint8_t svuzp1_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u32))) -svuint32_t svuzp1_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u64))) -svuint64_t svuzp1_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u16))) -svuint16_t svuzp1_u16(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s8))) -svint8_t svuzp1_s8(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_f64))) -svfloat64_t svuzp1_f64(svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_f32))) -svfloat32_t svuzp1_f32(svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_f16))) -svfloat16_t svuzp1_f16(svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s32))) -svint32_t svuzp1_s32(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s64))) -svint64_t svuzp1_s64(svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s16))) -svint16_t svuzp1_s16(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_b16))) -svbool_t svuzp1_b16(svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_b32))) -svbool_t svuzp1_b32(svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_b64))) -svbool_t svuzp1_b64(svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_b8))) -svbool_t svuzp1_b8(svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u8))) -svuint8_t svuzp2_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u32))) -svuint32_t svuzp2_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u64))) -svuint64_t svuzp2_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u16))) -svuint16_t svuzp2_u16(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s8))) -svint8_t svuzp2_s8(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_f64))) -svfloat64_t svuzp2_f64(svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_f32))) -svfloat32_t svuzp2_f32(svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_f16))) -svfloat16_t svuzp2_f16(svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s32))) -svint32_t svuzp2_s32(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s64))) -svint64_t svuzp2_s64(svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s16))) -svint16_t svuzp2_s16(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_b16))) -svbool_t svuzp2_b16(svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_b32))) -svbool_t svuzp2_b32(svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_b64))) -svbool_t svuzp2_b64(svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_b8))) -svbool_t svuzp2_b8(svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_s32))) -svbool_t svwhilele_b8_s32(int32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_s32))) -svbool_t svwhilele_b32_s32(int32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_s32))) -svbool_t svwhilele_b64_s32(int32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_s32))) -svbool_t svwhilele_b16_s32(int32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_s64))) -svbool_t svwhilele_b8_s64(int64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_s64))) -svbool_t svwhilele_b32_s64(int64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_s64))) -svbool_t svwhilele_b64_s64(int64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_s64))) -svbool_t svwhilele_b16_s64(int64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_u32))) -svbool_t svwhilele_b8_u32(uint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_u32))) -svbool_t svwhilele_b32_u32(uint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_u32))) -svbool_t svwhilele_b64_u32(uint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_u32))) -svbool_t svwhilele_b16_u32(uint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_u64))) -svbool_t svwhilele_b8_u64(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_u64))) -svbool_t svwhilele_b32_u64(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_u64))) -svbool_t svwhilele_b64_u64(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_u64))) -svbool_t svwhilele_b16_u64(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_u32))) -svbool_t svwhilelt_b8_u32(uint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_u32))) -svbool_t svwhilelt_b32_u32(uint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_u32))) -svbool_t svwhilelt_b64_u32(uint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_u32))) -svbool_t svwhilelt_b16_u32(uint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_u64))) -svbool_t svwhilelt_b8_u64(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_u64))) -svbool_t svwhilelt_b32_u64(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_u64))) -svbool_t svwhilelt_b64_u64(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_u64))) -svbool_t svwhilelt_b16_u64(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_s32))) -svbool_t svwhilelt_b8_s32(int32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_s32))) -svbool_t svwhilelt_b32_s32(int32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_s32))) -svbool_t svwhilelt_b64_s32(int32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_s32))) -svbool_t svwhilelt_b16_s32(int32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_s64))) -svbool_t svwhilelt_b8_s64(int64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_s64))) -svbool_t svwhilelt_b32_s64(int64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_s64))) -svbool_t svwhilelt_b64_s64(int64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_s64))) -svbool_t svwhilelt_b16_s64(int64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwrffr))) -void svwrffr(svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u8))) -svuint8_t svzip1_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u32))) -svuint32_t svzip1_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u64))) -svuint64_t svzip1_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u16))) -svuint16_t svzip1_u16(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s8))) -svint8_t svzip1_s8(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_f64))) -svfloat64_t svzip1_f64(svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_f32))) -svfloat32_t svzip1_f32(svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_f16))) -svfloat16_t svzip1_f16(svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s32))) -svint32_t svzip1_s32(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s64))) -svint64_t svzip1_s64(svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s16))) -svint16_t svzip1_s16(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_b16))) -svbool_t svzip1_b16(svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_b32))) -svbool_t svzip1_b32(svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_b64))) -svbool_t svzip1_b64(svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_b8))) -svbool_t svzip1_b8(svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u8))) -svuint8_t svzip2_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u32))) -svuint32_t svzip2_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u64))) -svuint64_t svzip2_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u16))) -svuint16_t svzip2_u16(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s8))) -svint8_t svzip2_s8(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_f64))) -svfloat64_t svzip2_f64(svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_f32))) -svfloat32_t svzip2_f32(svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_f16))) -svfloat16_t svzip2_f16(svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s32))) -svint32_t svzip2_s32(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s64))) -svint64_t svzip2_s64(svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s16))) -svint16_t svzip2_s16(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_b16))) -svbool_t svzip2_b16(svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_b32))) -svbool_t svzip2_b32(svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_b64))) -svbool_t svzip2_b64(svbool_t, svbool_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_b8))) -svbool_t svzip2_b8(svbool_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f64_m))) -svfloat64_t svabd_m(svbool_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f32_m))) -svfloat32_t svabd_m(svbool_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f16_m))) -svfloat16_t svabd_m(svbool_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f64_x))) -svfloat64_t svabd_x(svbool_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f32_x))) -svfloat32_t svabd_x(svbool_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f16_x))) -svfloat16_t svabd_x(svbool_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f64_z))) -svfloat64_t svabd_z(svbool_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f32_z))) -svfloat32_t svabd_z(svbool_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f16_z))) -svfloat16_t svabd_z(svbool_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s8_m))) -svint8_t svabd_m(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s32_m))) -svint32_t svabd_m(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s64_m))) -svint64_t svabd_m(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s16_m))) -svint16_t svabd_m(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s8_x))) -svint8_t svabd_x(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s32_x))) -svint32_t svabd_x(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s64_x))) -svint64_t svabd_x(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s16_x))) -svint16_t svabd_x(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s8_z))) -svint8_t svabd_z(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s32_z))) -svint32_t svabd_z(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s64_z))) -svint64_t svabd_z(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s16_z))) -svint16_t svabd_z(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u8_m))) -svuint8_t svabd_m(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u32_m))) -svuint32_t svabd_m(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u64_m))) -svuint64_t svabd_m(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u16_m))) -svuint16_t svabd_m(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u8_x))) -svuint8_t svabd_x(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u32_x))) -svuint32_t svabd_x(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u64_x))) -svuint64_t svabd_x(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u16_x))) -svuint16_t svabd_x(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u8_z))) -svuint8_t svabd_z(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u32_z))) -svuint32_t svabd_z(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u64_z))) -svuint64_t svabd_z(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u16_z))) -svuint16_t svabd_z(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f64_m))) -svfloat64_t svabd_m(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f32_m))) -svfloat32_t svabd_m(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f16_m))) -svfloat16_t svabd_m(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f64_x))) -svfloat64_t svabd_x(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f32_x))) -svfloat32_t svabd_x(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f16_x))) -svfloat16_t svabd_x(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f64_z))) -svfloat64_t svabd_z(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f32_z))) -svfloat32_t svabd_z(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f16_z))) -svfloat16_t svabd_z(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s8_m))) -svint8_t svabd_m(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s32_m))) -svint32_t svabd_m(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s64_m))) -svint64_t svabd_m(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s16_m))) -svint16_t svabd_m(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s8_x))) -svint8_t svabd_x(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s32_x))) -svint32_t svabd_x(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s64_x))) -svint64_t svabd_x(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s16_x))) -svint16_t svabd_x(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s8_z))) -svint8_t svabd_z(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s32_z))) -svint32_t svabd_z(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s64_z))) -svint64_t svabd_z(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s16_z))) -svint16_t svabd_z(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u8_m))) -svuint8_t svabd_m(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u32_m))) -svuint32_t svabd_m(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u64_m))) -svuint64_t svabd_m(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u16_m))) -svuint16_t svabd_m(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u8_x))) -svuint8_t svabd_x(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u32_x))) -svuint32_t svabd_x(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u64_x))) -svuint64_t svabd_x(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u16_x))) -svuint16_t svabd_x(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u8_z))) -svuint8_t svabd_z(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u32_z))) -svuint32_t svabd_z(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u64_z))) -svuint64_t svabd_z(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u16_z))) -svuint16_t svabd_z(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f64_m))) -svfloat64_t svabs_m(svfloat64_t, svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f32_m))) -svfloat32_t svabs_m(svfloat32_t, svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f16_m))) -svfloat16_t svabs_m(svfloat16_t, svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f64_x))) -svfloat64_t svabs_x(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f32_x))) -svfloat32_t svabs_x(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f16_x))) -svfloat16_t svabs_x(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f64_z))) -svfloat64_t svabs_z(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f32_z))) -svfloat32_t svabs_z(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f16_z))) -svfloat16_t svabs_z(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s8_m))) -svint8_t svabs_m(svint8_t, svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s32_m))) -svint32_t svabs_m(svint32_t, svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s64_m))) -svint64_t svabs_m(svint64_t, svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s16_m))) -svint16_t svabs_m(svint16_t, svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s8_x))) -svint8_t svabs_x(svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s32_x))) -svint32_t svabs_x(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s64_x))) -svint64_t svabs_x(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s16_x))) -svint16_t svabs_x(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s8_z))) -svint8_t svabs_z(svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s32_z))) -svint32_t svabs_z(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s64_z))) -svint64_t svabs_z(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s16_z))) -svint16_t svabs_z(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_n_f64))) -svbool_t svacge(svbool_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_n_f32))) -svbool_t svacge(svbool_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_n_f16))) -svbool_t svacge(svbool_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_f64))) -svbool_t svacge(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_f32))) -svbool_t svacge(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_f16))) -svbool_t svacge(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_n_f64))) -svbool_t svacgt(svbool_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_n_f32))) -svbool_t svacgt(svbool_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_n_f16))) -svbool_t svacgt(svbool_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_f64))) -svbool_t svacgt(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_f32))) -svbool_t svacgt(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_f16))) -svbool_t svacgt(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_n_f64))) -svbool_t svacle(svbool_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_n_f32))) -svbool_t svacle(svbool_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_n_f16))) -svbool_t svacle(svbool_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_f64))) -svbool_t svacle(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_f32))) -svbool_t svacle(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_f16))) -svbool_t svacle(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_n_f64))) -svbool_t svaclt(svbool_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_n_f32))) -svbool_t svaclt(svbool_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_n_f16))) -svbool_t svaclt(svbool_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_f64))) -svbool_t svaclt(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_f32))) -svbool_t svaclt(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_f16))) -svbool_t svaclt(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f64_m))) -svfloat64_t svadd_m(svbool_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f32_m))) -svfloat32_t svadd_m(svbool_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f16_m))) -svfloat16_t svadd_m(svbool_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f64_x))) -svfloat64_t svadd_x(svbool_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f32_x))) -svfloat32_t svadd_x(svbool_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f16_x))) -svfloat16_t svadd_x(svbool_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f64_z))) -svfloat64_t svadd_z(svbool_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f32_z))) -svfloat32_t svadd_z(svbool_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f16_z))) -svfloat16_t svadd_z(svbool_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u8_m))) -svuint8_t svadd_m(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u32_m))) -svuint32_t svadd_m(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u64_m))) -svuint64_t svadd_m(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u16_m))) -svuint16_t svadd_m(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s8_m))) -svint8_t svadd_m(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s32_m))) -svint32_t svadd_m(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s64_m))) -svint64_t svadd_m(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s16_m))) -svint16_t svadd_m(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u8_x))) -svuint8_t svadd_x(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u32_x))) -svuint32_t svadd_x(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u64_x))) -svuint64_t svadd_x(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u16_x))) -svuint16_t svadd_x(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s8_x))) -svint8_t svadd_x(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s32_x))) -svint32_t svadd_x(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s64_x))) -svint64_t svadd_x(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s16_x))) -svint16_t svadd_x(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u8_z))) -svuint8_t svadd_z(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u32_z))) -svuint32_t svadd_z(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u64_z))) -svuint64_t svadd_z(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u16_z))) -svuint16_t svadd_z(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s8_z))) -svint8_t svadd_z(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s32_z))) -svint32_t svadd_z(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s64_z))) -svint64_t svadd_z(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s16_z))) -svint16_t svadd_z(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f64_m))) -svfloat64_t svadd_m(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f32_m))) -svfloat32_t svadd_m(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f16_m))) -svfloat16_t svadd_m(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f64_x))) -svfloat64_t svadd_x(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f32_x))) -svfloat32_t svadd_x(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f16_x))) -svfloat16_t svadd_x(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f64_z))) -svfloat64_t svadd_z(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f32_z))) -svfloat32_t svadd_z(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f16_z))) -svfloat16_t svadd_z(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u8_m))) -svuint8_t svadd_m(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u32_m))) -svuint32_t svadd_m(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u64_m))) -svuint64_t svadd_m(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u16_m))) -svuint16_t svadd_m(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s8_m))) -svint8_t svadd_m(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s32_m))) -svint32_t svadd_m(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s64_m))) -svint64_t svadd_m(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s16_m))) -svint16_t svadd_m(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u8_x))) -svuint8_t svadd_x(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u32_x))) -svuint32_t svadd_x(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u64_x))) -svuint64_t svadd_x(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u16_x))) -svuint16_t svadd_x(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s8_x))) -svint8_t svadd_x(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s32_x))) -svint32_t svadd_x(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s64_x))) -svint64_t svadd_x(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s16_x))) -svint16_t svadd_x(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u8_z))) -svuint8_t svadd_z(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u32_z))) -svuint32_t svadd_z(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u64_z))) -svuint64_t svadd_z(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u16_z))) -svuint16_t svadd_z(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s8_z))) -svint8_t svadd_z(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s32_z))) -svint32_t svadd_z(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s64_z))) -svint64_t svadd_z(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s16_z))) -svint16_t svadd_z(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadda_f64))) -float64_t svadda(svbool_t, float64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadda_f32))) -float32_t svadda(svbool_t, float32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadda_f16))) -float16_t svadda(svbool_t, float16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s8))) -int64_t svaddv(svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s32))) -int64_t svaddv(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s64))) -int64_t svaddv(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s16))) -int64_t svaddv(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u8))) -uint64_t svaddv(svbool_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u32))) -uint64_t svaddv(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u64))) -uint64_t svaddv(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u16))) -uint64_t svaddv(svbool_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_f64))) -float64_t svaddv(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_f32))) -float32_t svaddv(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_f16))) -float16_t svaddv(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u32base_u32offset))) -svuint32_t svadrb_offset(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u64base_u64offset))) -svuint64_t svadrb_offset(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u32base_s32offset))) -svuint32_t svadrb_offset(svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u64base_s64offset))) -svuint64_t svadrb_offset(svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u32base_u32index))) -svuint32_t svadrd_index(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u64base_u64index))) -svuint64_t svadrd_index(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u32base_s32index))) -svuint32_t svadrd_index(svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u64base_s64index))) -svuint64_t svadrd_index(svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u32base_u32index))) -svuint32_t svadrh_index(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u64base_u64index))) -svuint64_t svadrh_index(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u32base_s32index))) -svuint32_t svadrh_index(svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u64base_s64index))) -svuint64_t svadrh_index(svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u32base_u32index))) -svuint32_t svadrw_index(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u64base_u64index))) -svuint64_t svadrw_index(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u32base_s32index))) -svuint32_t svadrw_index(svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u64base_s64index))) -svuint64_t svadrw_index(svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_b_z))) -svbool_t svand_z(svbool_t, svbool_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u8_m))) -svuint8_t svand_m(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u32_m))) -svuint32_t svand_m(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u64_m))) -svuint64_t svand_m(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u16_m))) -svuint16_t svand_m(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s8_m))) -svint8_t svand_m(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s32_m))) -svint32_t svand_m(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s64_m))) -svint64_t svand_m(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s16_m))) -svint16_t svand_m(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u8_x))) -svuint8_t svand_x(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u32_x))) -svuint32_t svand_x(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u64_x))) -svuint64_t svand_x(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u16_x))) -svuint16_t svand_x(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s8_x))) -svint8_t svand_x(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s32_x))) -svint32_t svand_x(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s64_x))) -svint64_t svand_x(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s16_x))) -svint16_t svand_x(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u8_z))) -svuint8_t svand_z(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u32_z))) -svuint32_t svand_z(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u64_z))) -svuint64_t svand_z(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u16_z))) -svuint16_t svand_z(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s8_z))) -svint8_t svand_z(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s32_z))) -svint32_t svand_z(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s64_z))) -svint64_t svand_z(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s16_z))) -svint16_t svand_z(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u8_m))) -svuint8_t svand_m(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u32_m))) -svuint32_t svand_m(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u64_m))) -svuint64_t svand_m(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u16_m))) -svuint16_t svand_m(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s8_m))) -svint8_t svand_m(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s32_m))) -svint32_t svand_m(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s64_m))) -svint64_t svand_m(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s16_m))) -svint16_t svand_m(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u8_x))) -svuint8_t svand_x(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u32_x))) -svuint32_t svand_x(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u64_x))) -svuint64_t svand_x(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u16_x))) -svuint16_t svand_x(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s8_x))) -svint8_t svand_x(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s32_x))) -svint32_t svand_x(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s64_x))) -svint64_t svand_x(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s16_x))) -svint16_t svand_x(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u8_z))) -svuint8_t svand_z(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u32_z))) -svuint32_t svand_z(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u64_z))) -svuint64_t svand_z(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u16_z))) -svuint16_t svand_z(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s8_z))) -svint8_t svand_z(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s32_z))) -svint32_t svand_z(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s64_z))) -svint64_t svand_z(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s16_z))) -svint16_t svand_z(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u8))) -uint8_t svandv(svbool_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u32))) -uint32_t svandv(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u64))) -uint64_t svandv(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u16))) -uint16_t svandv(svbool_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s8))) -int8_t svandv(svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s32))) -int32_t svandv(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s64))) -int64_t svandv(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s16))) -int16_t svandv(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s8_m))) -svint8_t svasr_m(svbool_t, svint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s32_m))) -svint32_t svasr_m(svbool_t, svint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s64_m))) -svint64_t svasr_m(svbool_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s16_m))) -svint16_t svasr_m(svbool_t, svint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s8_x))) -svint8_t svasr_x(svbool_t, svint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s32_x))) -svint32_t svasr_x(svbool_t, svint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s64_x))) -svint64_t svasr_x(svbool_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s16_x))) -svint16_t svasr_x(svbool_t, svint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s8_z))) -svint8_t svasr_z(svbool_t, svint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s32_z))) -svint32_t svasr_z(svbool_t, svint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s64_z))) -svint64_t svasr_z(svbool_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s16_z))) -svint16_t svasr_z(svbool_t, svint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s8_m))) -svint8_t svasr_m(svbool_t, svint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s32_m))) -svint32_t svasr_m(svbool_t, svint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s64_m))) -svint64_t svasr_m(svbool_t, svint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s16_m))) -svint16_t svasr_m(svbool_t, svint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s8_x))) -svint8_t svasr_x(svbool_t, svint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s32_x))) -svint32_t svasr_x(svbool_t, svint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s64_x))) -svint64_t svasr_x(svbool_t, svint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s16_x))) -svint16_t svasr_x(svbool_t, svint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s8_z))) -svint8_t svasr_z(svbool_t, svint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s32_z))) -svint32_t svasr_z(svbool_t, svint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s64_z))) -svint64_t svasr_z(svbool_t, svint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s16_z))) -svint16_t svasr_z(svbool_t, svint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s8_m))) -svint8_t svasr_wide_m(svbool_t, svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s32_m))) -svint32_t svasr_wide_m(svbool_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s16_m))) -svint16_t svasr_wide_m(svbool_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s8_x))) -svint8_t svasr_wide_x(svbool_t, svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s32_x))) -svint32_t svasr_wide_x(svbool_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s16_x))) -svint16_t svasr_wide_x(svbool_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s8_z))) -svint8_t svasr_wide_z(svbool_t, svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s32_z))) -svint32_t svasr_wide_z(svbool_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s16_z))) -svint16_t svasr_wide_z(svbool_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s8_m))) -svint8_t svasr_wide_m(svbool_t, svint8_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s32_m))) -svint32_t svasr_wide_m(svbool_t, svint32_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s16_m))) -svint16_t svasr_wide_m(svbool_t, svint16_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s8_x))) -svint8_t svasr_wide_x(svbool_t, svint8_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s32_x))) -svint32_t svasr_wide_x(svbool_t, svint32_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s16_x))) -svint16_t svasr_wide_x(svbool_t, svint16_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s8_z))) -svint8_t svasr_wide_z(svbool_t, svint8_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s32_z))) -svint32_t svasr_wide_z(svbool_t, svint32_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s16_z))) -svint16_t svasr_wide_z(svbool_t, svint16_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s8_m))) -svint8_t svasrd_m(svbool_t, svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s32_m))) -svint32_t svasrd_m(svbool_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s64_m))) -svint64_t svasrd_m(svbool_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s16_m))) -svint16_t svasrd_m(svbool_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s8_x))) -svint8_t svasrd_x(svbool_t, svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s32_x))) -svint32_t svasrd_x(svbool_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s64_x))) -svint64_t svasrd_x(svbool_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s16_x))) -svint16_t svasrd_x(svbool_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s8_z))) -svint8_t svasrd_z(svbool_t, svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s32_z))) -svint32_t svasrd_z(svbool_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s64_z))) -svint64_t svasrd_z(svbool_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s16_z))) -svint16_t svasrd_z(svbool_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_b_z))) -svbool_t svbic_z(svbool_t, svbool_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u8_m))) -svuint8_t svbic_m(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u32_m))) -svuint32_t svbic_m(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u64_m))) -svuint64_t svbic_m(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u16_m))) -svuint16_t svbic_m(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s8_m))) -svint8_t svbic_m(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s32_m))) -svint32_t svbic_m(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s64_m))) -svint64_t svbic_m(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s16_m))) -svint16_t svbic_m(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u8_x))) -svuint8_t svbic_x(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u32_x))) -svuint32_t svbic_x(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u64_x))) -svuint64_t svbic_x(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u16_x))) -svuint16_t svbic_x(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s8_x))) -svint8_t svbic_x(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s32_x))) -svint32_t svbic_x(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s64_x))) -svint64_t svbic_x(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s16_x))) -svint16_t svbic_x(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u8_z))) -svuint8_t svbic_z(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u32_z))) -svuint32_t svbic_z(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u64_z))) -svuint64_t svbic_z(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u16_z))) -svuint16_t svbic_z(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s8_z))) -svint8_t svbic_z(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s32_z))) -svint32_t svbic_z(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s64_z))) -svint64_t svbic_z(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s16_z))) -svint16_t svbic_z(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u8_m))) -svuint8_t svbic_m(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u32_m))) -svuint32_t svbic_m(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u64_m))) -svuint64_t svbic_m(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u16_m))) -svuint16_t svbic_m(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s8_m))) -svint8_t svbic_m(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s32_m))) -svint32_t svbic_m(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s64_m))) -svint64_t svbic_m(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s16_m))) -svint16_t svbic_m(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u8_x))) -svuint8_t svbic_x(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u32_x))) -svuint32_t svbic_x(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u64_x))) -svuint64_t svbic_x(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u16_x))) -svuint16_t svbic_x(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s8_x))) -svint8_t svbic_x(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s32_x))) -svint32_t svbic_x(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s64_x))) -svint64_t svbic_x(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s16_x))) -svint16_t svbic_x(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u8_z))) -svuint8_t svbic_z(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u32_z))) -svuint32_t svbic_z(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u64_z))) -svuint64_t svbic_z(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u16_z))) -svuint16_t svbic_z(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s8_z))) -svint8_t svbic_z(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s32_z))) -svint32_t svbic_z(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s64_z))) -svint64_t svbic_z(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s16_z))) -svint16_t svbic_z(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrka_b_m))) -svbool_t svbrka_m(svbool_t, svbool_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrka_b_z))) -svbool_t svbrka_z(svbool_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkb_b_m))) -svbool_t svbrkb_m(svbool_t, svbool_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkb_b_z))) -svbool_t svbrkb_z(svbool_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkn_b_z))) -svbool_t svbrkn_z(svbool_t, svbool_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkpa_b_z))) -svbool_t svbrkpa_z(svbool_t, svbool_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkpb_b_z))) -svbool_t svbrkpb_z(svbool_t, svbool_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f64_m))) -svfloat64_t svcadd_m(svbool_t, svfloat64_t, svfloat64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f32_m))) -svfloat32_t svcadd_m(svbool_t, svfloat32_t, svfloat32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f16_m))) -svfloat16_t svcadd_m(svbool_t, svfloat16_t, svfloat16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f64_x))) -svfloat64_t svcadd_x(svbool_t, svfloat64_t, svfloat64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f32_x))) -svfloat32_t svcadd_x(svbool_t, svfloat32_t, svfloat32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f16_x))) -svfloat16_t svcadd_x(svbool_t, svfloat16_t, svfloat16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f64_z))) -svfloat64_t svcadd_z(svbool_t, svfloat64_t, svfloat64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f32_z))) -svfloat32_t svcadd_z(svbool_t, svfloat32_t, svfloat32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f16_z))) -svfloat16_t svcadd_z(svbool_t, svfloat16_t, svfloat16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u8))) -uint8_t svclasta(svbool_t, uint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u32))) -uint32_t svclasta(svbool_t, uint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u64))) -uint64_t svclasta(svbool_t, uint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u16))) -uint16_t svclasta(svbool_t, uint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s8))) -int8_t svclasta(svbool_t, int8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_f64))) -float64_t svclasta(svbool_t, float64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_f32))) -float32_t svclasta(svbool_t, float32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_f16))) -float16_t svclasta(svbool_t, float16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s32))) -int32_t svclasta(svbool_t, int32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s64))) -int64_t svclasta(svbool_t, int64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s16))) -int16_t svclasta(svbool_t, int16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u8))) -svuint8_t svclasta(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u32))) -svuint32_t svclasta(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u64))) -svuint64_t svclasta(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u16))) -svuint16_t svclasta(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s8))) -svint8_t svclasta(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_f64))) -svfloat64_t svclasta(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_f32))) -svfloat32_t svclasta(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_f16))) -svfloat16_t svclasta(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s32))) -svint32_t svclasta(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s64))) -svint64_t svclasta(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s16))) -svint16_t svclasta(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u8))) -uint8_t svclastb(svbool_t, uint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u32))) -uint32_t svclastb(svbool_t, uint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u64))) -uint64_t svclastb(svbool_t, uint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u16))) -uint16_t svclastb(svbool_t, uint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s8))) -int8_t svclastb(svbool_t, int8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_f64))) -float64_t svclastb(svbool_t, float64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_f32))) -float32_t svclastb(svbool_t, float32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_f16))) -float16_t svclastb(svbool_t, float16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s32))) -int32_t svclastb(svbool_t, int32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s64))) -int64_t svclastb(svbool_t, int64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s16))) -int16_t svclastb(svbool_t, int16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u8))) -svuint8_t svclastb(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u32))) -svuint32_t svclastb(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u64))) -svuint64_t svclastb(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u16))) -svuint16_t svclastb(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s8))) -svint8_t svclastb(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_f64))) -svfloat64_t svclastb(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_f32))) -svfloat32_t svclastb(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_f16))) -svfloat16_t svclastb(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s32))) -svint32_t svclastb(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s64))) -svint64_t svclastb(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s16))) -svint16_t svclastb(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s8_m))) -svuint8_t svcls_m(svuint8_t, svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s32_m))) -svuint32_t svcls_m(svuint32_t, svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s64_m))) -svuint64_t svcls_m(svuint64_t, svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s16_m))) -svuint16_t svcls_m(svuint16_t, svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s8_x))) -svuint8_t svcls_x(svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s32_x))) -svuint32_t svcls_x(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s64_x))) -svuint64_t svcls_x(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s16_x))) -svuint16_t svcls_x(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s8_z))) -svuint8_t svcls_z(svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s32_z))) -svuint32_t svcls_z(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s64_z))) -svuint64_t svcls_z(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s16_z))) -svuint16_t svcls_z(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u8_m))) -svuint8_t svclz_m(svuint8_t, svbool_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u32_m))) -svuint32_t svclz_m(svuint32_t, svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u64_m))) -svuint64_t svclz_m(svuint64_t, svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u16_m))) -svuint16_t svclz_m(svuint16_t, svbool_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s8_m))) -svuint8_t svclz_m(svuint8_t, svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s32_m))) -svuint32_t svclz_m(svuint32_t, svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s64_m))) -svuint64_t svclz_m(svuint64_t, svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s16_m))) -svuint16_t svclz_m(svuint16_t, svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u8_x))) -svuint8_t svclz_x(svbool_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u32_x))) -svuint32_t svclz_x(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u64_x))) -svuint64_t svclz_x(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u16_x))) -svuint16_t svclz_x(svbool_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s8_x))) -svuint8_t svclz_x(svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s32_x))) -svuint32_t svclz_x(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s64_x))) -svuint64_t svclz_x(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s16_x))) -svuint16_t svclz_x(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u8_z))) -svuint8_t svclz_z(svbool_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u32_z))) -svuint32_t svclz_z(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u64_z))) -svuint64_t svclz_z(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u16_z))) -svuint16_t svclz_z(svbool_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s8_z))) -svuint8_t svclz_z(svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s32_z))) -svuint32_t svclz_z(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s64_z))) -svuint64_t svclz_z(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s16_z))) -svuint16_t svclz_z(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f64_m))) -svfloat64_t svcmla_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f32_m))) -svfloat32_t svcmla_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f16_m))) -svfloat16_t svcmla_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f64_x))) -svfloat64_t svcmla_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f32_x))) -svfloat32_t svcmla_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f16_x))) -svfloat16_t svcmla_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f64_z))) -svfloat64_t svcmla_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f32_z))) -svfloat32_t svcmla_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f16_z))) -svfloat16_t svcmla_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_f32))) -svfloat32_t svcmla_lane(svfloat32_t, svfloat32_t, svfloat32_t, uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_f16))) -svfloat16_t svcmla_lane(svfloat16_t, svfloat16_t, svfloat16_t, uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_f64))) -svbool_t svcmpeq(svbool_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_f32))) -svbool_t svcmpeq(svbool_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_f16))) -svbool_t svcmpeq(svbool_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u8))) -svbool_t svcmpeq(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u32))) -svbool_t svcmpeq(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u64))) -svbool_t svcmpeq(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u16))) -svbool_t svcmpeq(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s8))) -svbool_t svcmpeq(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s32))) -svbool_t svcmpeq(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s64))) -svbool_t svcmpeq(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s16))) -svbool_t svcmpeq(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u8))) -svbool_t svcmpeq(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u32))) -svbool_t svcmpeq(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u64))) -svbool_t svcmpeq(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u16))) -svbool_t svcmpeq(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s8))) -svbool_t svcmpeq(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s32))) -svbool_t svcmpeq(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s64))) -svbool_t svcmpeq(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s16))) -svbool_t svcmpeq(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_f64))) -svbool_t svcmpeq(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_f32))) -svbool_t svcmpeq(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_f16))) -svbool_t svcmpeq(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_n_s8))) -svbool_t svcmpeq_wide(svbool_t, svint8_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_n_s32))) -svbool_t svcmpeq_wide(svbool_t, svint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_n_s16))) -svbool_t svcmpeq_wide(svbool_t, svint16_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_s8))) -svbool_t svcmpeq_wide(svbool_t, svint8_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_s32))) -svbool_t svcmpeq_wide(svbool_t, svint32_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_s16))) -svbool_t svcmpeq_wide(svbool_t, svint16_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_f64))) -svbool_t svcmpge(svbool_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_f32))) -svbool_t svcmpge(svbool_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_f16))) -svbool_t svcmpge(svbool_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s8))) -svbool_t svcmpge(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s32))) -svbool_t svcmpge(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s64))) -svbool_t svcmpge(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s16))) -svbool_t svcmpge(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u8))) -svbool_t svcmpge(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u32))) -svbool_t svcmpge(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u64))) -svbool_t svcmpge(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u16))) -svbool_t svcmpge(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s8))) -svbool_t svcmpge(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s32))) -svbool_t svcmpge(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s64))) -svbool_t svcmpge(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s16))) -svbool_t svcmpge(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_f64))) -svbool_t svcmpge(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_f32))) -svbool_t svcmpge(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_f16))) -svbool_t svcmpge(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u8))) -svbool_t svcmpge(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u32))) -svbool_t svcmpge(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u64))) -svbool_t svcmpge(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u16))) -svbool_t svcmpge(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_s8))) -svbool_t svcmpge_wide(svbool_t, svint8_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_s32))) -svbool_t svcmpge_wide(svbool_t, svint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_s16))) -svbool_t svcmpge_wide(svbool_t, svint16_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_u8))) -svbool_t svcmpge_wide(svbool_t, svuint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_u32))) -svbool_t svcmpge_wide(svbool_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_u16))) -svbool_t svcmpge_wide(svbool_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_s8))) -svbool_t svcmpge_wide(svbool_t, svint8_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_s32))) -svbool_t svcmpge_wide(svbool_t, svint32_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_s16))) -svbool_t svcmpge_wide(svbool_t, svint16_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_u8))) -svbool_t svcmpge_wide(svbool_t, svuint8_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_u32))) -svbool_t svcmpge_wide(svbool_t, svuint32_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_u16))) -svbool_t svcmpge_wide(svbool_t, svuint16_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_f64))) -svbool_t svcmpgt(svbool_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_f32))) -svbool_t svcmpgt(svbool_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_f16))) -svbool_t svcmpgt(svbool_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s8))) -svbool_t svcmpgt(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s32))) -svbool_t svcmpgt(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s64))) -svbool_t svcmpgt(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s16))) -svbool_t svcmpgt(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u8))) -svbool_t svcmpgt(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u32))) -svbool_t svcmpgt(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u64))) -svbool_t svcmpgt(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u16))) -svbool_t svcmpgt(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s8))) -svbool_t svcmpgt(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s32))) -svbool_t svcmpgt(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s64))) -svbool_t svcmpgt(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s16))) -svbool_t svcmpgt(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_f64))) -svbool_t svcmpgt(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_f32))) -svbool_t svcmpgt(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_f16))) -svbool_t svcmpgt(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u8))) -svbool_t svcmpgt(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u32))) -svbool_t svcmpgt(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u64))) -svbool_t svcmpgt(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u16))) -svbool_t svcmpgt(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_s8))) -svbool_t svcmpgt_wide(svbool_t, svint8_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_s32))) -svbool_t svcmpgt_wide(svbool_t, svint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_s16))) -svbool_t svcmpgt_wide(svbool_t, svint16_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_u8))) -svbool_t svcmpgt_wide(svbool_t, svuint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_u32))) -svbool_t svcmpgt_wide(svbool_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_u16))) -svbool_t svcmpgt_wide(svbool_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_s8))) -svbool_t svcmpgt_wide(svbool_t, svint8_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_s32))) -svbool_t svcmpgt_wide(svbool_t, svint32_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_s16))) -svbool_t svcmpgt_wide(svbool_t, svint16_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_u8))) -svbool_t svcmpgt_wide(svbool_t, svuint8_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_u32))) -svbool_t svcmpgt_wide(svbool_t, svuint32_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_u16))) -svbool_t svcmpgt_wide(svbool_t, svuint16_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_f64))) -svbool_t svcmple(svbool_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_f32))) -svbool_t svcmple(svbool_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_f16))) -svbool_t svcmple(svbool_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s8))) -svbool_t svcmple(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s32))) -svbool_t svcmple(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s64))) -svbool_t svcmple(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s16))) -svbool_t svcmple(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u8))) -svbool_t svcmple(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u32))) -svbool_t svcmple(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u64))) -svbool_t svcmple(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u16))) -svbool_t svcmple(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s8))) -svbool_t svcmple(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s32))) -svbool_t svcmple(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s64))) -svbool_t svcmple(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s16))) -svbool_t svcmple(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_f64))) -svbool_t svcmple(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_f32))) -svbool_t svcmple(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_f16))) -svbool_t svcmple(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u8))) -svbool_t svcmple(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u32))) -svbool_t svcmple(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u64))) -svbool_t svcmple(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u16))) -svbool_t svcmple(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_s8))) -svbool_t svcmple_wide(svbool_t, svint8_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_s32))) -svbool_t svcmple_wide(svbool_t, svint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_s16))) -svbool_t svcmple_wide(svbool_t, svint16_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_u8))) -svbool_t svcmple_wide(svbool_t, svuint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_u32))) -svbool_t svcmple_wide(svbool_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_u16))) -svbool_t svcmple_wide(svbool_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_s8))) -svbool_t svcmple_wide(svbool_t, svint8_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_s32))) -svbool_t svcmple_wide(svbool_t, svint32_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_s16))) -svbool_t svcmple_wide(svbool_t, svint16_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_u8))) -svbool_t svcmple_wide(svbool_t, svuint8_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_u32))) -svbool_t svcmple_wide(svbool_t, svuint32_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_u16))) -svbool_t svcmple_wide(svbool_t, svuint16_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u8))) -svbool_t svcmplt(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u32))) -svbool_t svcmplt(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u64))) -svbool_t svcmplt(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u16))) -svbool_t svcmplt(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_f64))) -svbool_t svcmplt(svbool_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_f32))) -svbool_t svcmplt(svbool_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_f16))) -svbool_t svcmplt(svbool_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s8))) -svbool_t svcmplt(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s32))) -svbool_t svcmplt(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s64))) -svbool_t svcmplt(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s16))) -svbool_t svcmplt(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u8))) -svbool_t svcmplt(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u32))) -svbool_t svcmplt(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u64))) -svbool_t svcmplt(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u16))) -svbool_t svcmplt(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s8))) -svbool_t svcmplt(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s32))) -svbool_t svcmplt(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s64))) -svbool_t svcmplt(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s16))) -svbool_t svcmplt(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_f64))) -svbool_t svcmplt(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_f32))) -svbool_t svcmplt(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_f16))) -svbool_t svcmplt(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_u8))) -svbool_t svcmplt_wide(svbool_t, svuint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_u32))) -svbool_t svcmplt_wide(svbool_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_u16))) -svbool_t svcmplt_wide(svbool_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_s8))) -svbool_t svcmplt_wide(svbool_t, svint8_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_s32))) -svbool_t svcmplt_wide(svbool_t, svint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_s16))) -svbool_t svcmplt_wide(svbool_t, svint16_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_u8))) -svbool_t svcmplt_wide(svbool_t, svuint8_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_u32))) -svbool_t svcmplt_wide(svbool_t, svuint32_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_u16))) -svbool_t svcmplt_wide(svbool_t, svuint16_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_s8))) -svbool_t svcmplt_wide(svbool_t, svint8_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_s32))) -svbool_t svcmplt_wide(svbool_t, svint32_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_s16))) -svbool_t svcmplt_wide(svbool_t, svint16_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_f64))) -svbool_t svcmpne(svbool_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_f32))) -svbool_t svcmpne(svbool_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_f16))) -svbool_t svcmpne(svbool_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u8))) -svbool_t svcmpne(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u32))) -svbool_t svcmpne(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u64))) -svbool_t svcmpne(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u16))) -svbool_t svcmpne(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s8))) -svbool_t svcmpne(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s32))) -svbool_t svcmpne(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s64))) -svbool_t svcmpne(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s16))) -svbool_t svcmpne(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u8))) -svbool_t svcmpne(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u32))) -svbool_t svcmpne(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u64))) -svbool_t svcmpne(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u16))) -svbool_t svcmpne(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s8))) -svbool_t svcmpne(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s32))) -svbool_t svcmpne(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s64))) -svbool_t svcmpne(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s16))) -svbool_t svcmpne(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_f64))) -svbool_t svcmpne(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_f32))) -svbool_t svcmpne(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_f16))) -svbool_t svcmpne(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_n_s8))) -svbool_t svcmpne_wide(svbool_t, svint8_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_n_s32))) -svbool_t svcmpne_wide(svbool_t, svint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_n_s16))) -svbool_t svcmpne_wide(svbool_t, svint16_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_s8))) -svbool_t svcmpne_wide(svbool_t, svint8_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_s32))) -svbool_t svcmpne_wide(svbool_t, svint32_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_s16))) -svbool_t svcmpne_wide(svbool_t, svint16_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_n_f64))) -svbool_t svcmpuo(svbool_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_n_f32))) -svbool_t svcmpuo(svbool_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_n_f16))) -svbool_t svcmpuo(svbool_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_f64))) -svbool_t svcmpuo(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_f32))) -svbool_t svcmpuo(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_f16))) -svbool_t svcmpuo(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u8_m))) -svuint8_t svcnot_m(svuint8_t, svbool_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u32_m))) -svuint32_t svcnot_m(svuint32_t, svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u64_m))) -svuint64_t svcnot_m(svuint64_t, svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u16_m))) -svuint16_t svcnot_m(svuint16_t, svbool_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s8_m))) -svint8_t svcnot_m(svint8_t, svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s32_m))) -svint32_t svcnot_m(svint32_t, svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s64_m))) -svint64_t svcnot_m(svint64_t, svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s16_m))) -svint16_t svcnot_m(svint16_t, svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u8_x))) -svuint8_t svcnot_x(svbool_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u32_x))) -svuint32_t svcnot_x(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u64_x))) -svuint64_t svcnot_x(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u16_x))) -svuint16_t svcnot_x(svbool_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s8_x))) -svint8_t svcnot_x(svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s32_x))) -svint32_t svcnot_x(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s64_x))) -svint64_t svcnot_x(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s16_x))) -svint16_t svcnot_x(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u8_z))) -svuint8_t svcnot_z(svbool_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u32_z))) -svuint32_t svcnot_z(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u64_z))) -svuint64_t svcnot_z(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u16_z))) -svuint16_t svcnot_z(svbool_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s8_z))) -svint8_t svcnot_z(svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s32_z))) -svint32_t svcnot_z(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s64_z))) -svint64_t svcnot_z(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s16_z))) -svint16_t svcnot_z(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u8_m))) -svuint8_t svcnt_m(svuint8_t, svbool_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u32_m))) -svuint32_t svcnt_m(svuint32_t, svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u64_m))) -svuint64_t svcnt_m(svuint64_t, svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u16_m))) -svuint16_t svcnt_m(svuint16_t, svbool_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s8_m))) -svuint8_t svcnt_m(svuint8_t, svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f64_m))) -svuint64_t svcnt_m(svuint64_t, svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f32_m))) -svuint32_t svcnt_m(svuint32_t, svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f16_m))) -svuint16_t svcnt_m(svuint16_t, svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s32_m))) -svuint32_t svcnt_m(svuint32_t, svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s64_m))) -svuint64_t svcnt_m(svuint64_t, svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s16_m))) -svuint16_t svcnt_m(svuint16_t, svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u8_x))) -svuint8_t svcnt_x(svbool_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u32_x))) -svuint32_t svcnt_x(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u64_x))) -svuint64_t svcnt_x(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u16_x))) -svuint16_t svcnt_x(svbool_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s8_x))) -svuint8_t svcnt_x(svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f64_x))) -svuint64_t svcnt_x(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f32_x))) -svuint32_t svcnt_x(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f16_x))) -svuint16_t svcnt_x(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s32_x))) -svuint32_t svcnt_x(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s64_x))) -svuint64_t svcnt_x(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s16_x))) -svuint16_t svcnt_x(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u8_z))) -svuint8_t svcnt_z(svbool_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u32_z))) -svuint32_t svcnt_z(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u64_z))) -svuint64_t svcnt_z(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u16_z))) -svuint16_t svcnt_z(svbool_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s8_z))) -svuint8_t svcnt_z(svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f64_z))) -svuint64_t svcnt_z(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f32_z))) -svuint32_t svcnt_z(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f16_z))) -svuint16_t svcnt_z(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s32_z))) -svuint32_t svcnt_z(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s64_z))) -svuint64_t svcnt_z(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s16_z))) -svuint16_t svcnt_z(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_u32))) -svuint32_t svcompact(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_u64))) -svuint64_t svcompact(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_f64))) -svfloat64_t svcompact(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_f32))) -svfloat32_t svcompact(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_s32))) -svint32_t svcompact(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_s64))) -svint64_t svcompact(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u8))) -svuint8x2_t svcreate2(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u32))) -svuint32x2_t svcreate2(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u64))) -svuint64x2_t svcreate2(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u16))) -svuint16x2_t svcreate2(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s8))) -svint8x2_t svcreate2(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_f64))) -svfloat64x2_t svcreate2(svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_f32))) -svfloat32x2_t svcreate2(svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_f16))) -svfloat16x2_t svcreate2(svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s32))) -svint32x2_t svcreate2(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s64))) -svint64x2_t svcreate2(svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s16))) -svint16x2_t svcreate2(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u8))) -svuint8x3_t svcreate3(svuint8_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u32))) -svuint32x3_t svcreate3(svuint32_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u64))) -svuint64x3_t svcreate3(svuint64_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u16))) -svuint16x3_t svcreate3(svuint16_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s8))) -svint8x3_t svcreate3(svint8_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_f64))) -svfloat64x3_t svcreate3(svfloat64_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_f32))) -svfloat32x3_t svcreate3(svfloat32_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_f16))) -svfloat16x3_t svcreate3(svfloat16_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s32))) -svint32x3_t svcreate3(svint32_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s64))) -svint64x3_t svcreate3(svint64_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s16))) -svint16x3_t svcreate3(svint16_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u8))) -svuint8x4_t svcreate4(svuint8_t, svuint8_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u32))) -svuint32x4_t svcreate4(svuint32_t, svuint32_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u64))) -svuint64x4_t svcreate4(svuint64_t, svuint64_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u16))) -svuint16x4_t svcreate4(svuint16_t, svuint16_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s8))) -svint8x4_t svcreate4(svint8_t, svint8_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_f64))) -svfloat64x4_t svcreate4(svfloat64_t, svfloat64_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_f32))) -svfloat32x4_t svcreate4(svfloat32_t, svfloat32_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_f16))) -svfloat16x4_t svcreate4(svfloat16_t, svfloat16_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s32))) -svint32x4_t svcreate4(svint32_t, svint32_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s64))) -svint64x4_t svcreate4(svint64_t, svint64_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s16))) -svint16x4_t svcreate4(svint16_t, svint16_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f32_m))) -svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f32_x))) -svfloat16_t svcvt_f16_x(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f32_z))) -svfloat16_t svcvt_f16_z(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f64_m))) -svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f64_x))) -svfloat16_t svcvt_f16_x(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f64_z))) -svfloat16_t svcvt_f16_z(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s16_m))) -svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s16_x))) -svfloat16_t svcvt_f16_x(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s16_z))) -svfloat16_t svcvt_f16_z(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s32_m))) -svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s32_x))) -svfloat16_t svcvt_f16_x(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s32_z))) -svfloat16_t svcvt_f16_z(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s64_m))) -svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s64_x))) -svfloat16_t svcvt_f16_x(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s64_z))) -svfloat16_t svcvt_f16_z(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u16_m))) -svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u16_x))) -svfloat16_t svcvt_f16_x(svbool_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u16_z))) -svfloat16_t svcvt_f16_z(svbool_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u32_m))) -svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u32_x))) -svfloat16_t svcvt_f16_x(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u32_z))) -svfloat16_t svcvt_f16_z(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u64_m))) -svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u64_x))) -svfloat16_t svcvt_f16_x(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u64_z))) -svfloat16_t svcvt_f16_z(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f16_m))) -svfloat32_t svcvt_f32_m(svfloat32_t, svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f16_x))) -svfloat32_t svcvt_f32_x(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f16_z))) -svfloat32_t svcvt_f32_z(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f64_m))) -svfloat32_t svcvt_f32_m(svfloat32_t, svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f64_x))) -svfloat32_t svcvt_f32_x(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f64_z))) -svfloat32_t svcvt_f32_z(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_m))) -svfloat32_t svcvt_f32_m(svfloat32_t, svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_x))) -svfloat32_t svcvt_f32_x(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_z))) -svfloat32_t svcvt_f32_z(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s64_m))) -svfloat32_t svcvt_f32_m(svfloat32_t, svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s64_x))) -svfloat32_t svcvt_f32_x(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s64_z))) -svfloat32_t svcvt_f32_z(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_m))) -svfloat32_t svcvt_f32_m(svfloat32_t, svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_x))) -svfloat32_t svcvt_f32_x(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_z))) -svfloat32_t svcvt_f32_z(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u64_m))) -svfloat32_t svcvt_f32_m(svfloat32_t, svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u64_x))) -svfloat32_t svcvt_f32_x(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u64_z))) -svfloat32_t svcvt_f32_z(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f16_m))) -svfloat64_t svcvt_f64_m(svfloat64_t, svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f16_x))) -svfloat64_t svcvt_f64_x(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f16_z))) -svfloat64_t svcvt_f64_z(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f32_m))) -svfloat64_t svcvt_f64_m(svfloat64_t, svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f32_x))) -svfloat64_t svcvt_f64_x(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f32_z))) -svfloat64_t svcvt_f64_z(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s32_m))) -svfloat64_t svcvt_f64_m(svfloat64_t, svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s32_x))) -svfloat64_t svcvt_f64_x(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s32_z))) -svfloat64_t svcvt_f64_z(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s64_m))) -svfloat64_t svcvt_f64_m(svfloat64_t, svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s64_x))) -svfloat64_t svcvt_f64_x(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s64_z))) -svfloat64_t svcvt_f64_z(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u32_m))) -svfloat64_t svcvt_f64_m(svfloat64_t, svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u32_x))) -svfloat64_t svcvt_f64_x(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u32_z))) -svfloat64_t svcvt_f64_z(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u64_m))) -svfloat64_t svcvt_f64_m(svfloat64_t, svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u64_x))) -svfloat64_t svcvt_f64_x(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u64_z))) -svfloat64_t svcvt_f64_z(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s16_f16_m))) -svint16_t svcvt_s16_m(svint16_t, svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s16_f16_x))) -svint16_t svcvt_s16_x(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s16_f16_z))) -svint16_t svcvt_s16_z(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f16_m))) -svint32_t svcvt_s32_m(svint32_t, svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f16_x))) -svint32_t svcvt_s32_x(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f16_z))) -svint32_t svcvt_s32_z(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_m))) -svint32_t svcvt_s32_m(svint32_t, svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_x))) -svint32_t svcvt_s32_x(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_z))) -svint32_t svcvt_s32_z(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f64_m))) -svint32_t svcvt_s32_m(svint32_t, svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f64_x))) -svint32_t svcvt_s32_x(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f64_z))) -svint32_t svcvt_s32_z(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f16_m))) -svint64_t svcvt_s64_m(svint64_t, svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f16_x))) -svint64_t svcvt_s64_x(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f16_z))) -svint64_t svcvt_s64_z(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f32_m))) -svint64_t svcvt_s64_m(svint64_t, svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f32_x))) -svint64_t svcvt_s64_x(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f32_z))) -svint64_t svcvt_s64_z(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f64_m))) -svint64_t svcvt_s64_m(svint64_t, svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f64_x))) -svint64_t svcvt_s64_x(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f64_z))) -svint64_t svcvt_s64_z(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u16_f16_m))) -svuint16_t svcvt_u16_m(svuint16_t, svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u16_f16_x))) -svuint16_t svcvt_u16_x(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u16_f16_z))) -svuint16_t svcvt_u16_z(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f16_m))) -svuint32_t svcvt_u32_m(svuint32_t, svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f16_x))) -svuint32_t svcvt_u32_x(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f16_z))) -svuint32_t svcvt_u32_z(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_m))) -svuint32_t svcvt_u32_m(svuint32_t, svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_x))) -svuint32_t svcvt_u32_x(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_z))) -svuint32_t svcvt_u32_z(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f64_m))) -svuint32_t svcvt_u32_m(svuint32_t, svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f64_x))) -svuint32_t svcvt_u32_x(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f64_z))) -svuint32_t svcvt_u32_z(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f16_m))) -svuint64_t svcvt_u64_m(svuint64_t, svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f16_x))) -svuint64_t svcvt_u64_x(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f16_z))) -svuint64_t svcvt_u64_z(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f32_m))) -svuint64_t svcvt_u64_m(svuint64_t, svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f32_x))) -svuint64_t svcvt_u64_x(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f32_z))) -svuint64_t svcvt_u64_z(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f64_m))) -svuint64_t svcvt_u64_m(svuint64_t, svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f64_x))) -svuint64_t svcvt_u64_x(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f64_z))) -svuint64_t svcvt_u64_z(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f64_m))) -svfloat64_t svdiv_m(svbool_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f32_m))) -svfloat32_t svdiv_m(svbool_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f16_m))) -svfloat16_t svdiv_m(svbool_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f64_x))) -svfloat64_t svdiv_x(svbool_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f32_x))) -svfloat32_t svdiv_x(svbool_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f16_x))) -svfloat16_t svdiv_x(svbool_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f64_z))) -svfloat64_t svdiv_z(svbool_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f32_z))) -svfloat32_t svdiv_z(svbool_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f16_z))) -svfloat16_t svdiv_z(svbool_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s32_m))) -svint32_t svdiv_m(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s64_m))) -svint64_t svdiv_m(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s32_x))) -svint32_t svdiv_x(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s64_x))) -svint64_t svdiv_x(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s32_z))) -svint32_t svdiv_z(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s64_z))) -svint64_t svdiv_z(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u32_m))) -svuint32_t svdiv_m(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u64_m))) -svuint64_t svdiv_m(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u32_x))) -svuint32_t svdiv_x(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u64_x))) -svuint64_t svdiv_x(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u32_z))) -svuint32_t svdiv_z(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u64_z))) -svuint64_t svdiv_z(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f64_m))) -svfloat64_t svdiv_m(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f32_m))) -svfloat32_t svdiv_m(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f16_m))) -svfloat16_t svdiv_m(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f64_x))) -svfloat64_t svdiv_x(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f32_x))) -svfloat32_t svdiv_x(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f16_x))) -svfloat16_t svdiv_x(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f64_z))) -svfloat64_t svdiv_z(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f32_z))) -svfloat32_t svdiv_z(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f16_z))) -svfloat16_t svdiv_z(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s32_m))) -svint32_t svdiv_m(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s64_m))) -svint64_t svdiv_m(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s32_x))) -svint32_t svdiv_x(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s64_x))) -svint64_t svdiv_x(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s32_z))) -svint32_t svdiv_z(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s64_z))) -svint64_t svdiv_z(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u32_m))) -svuint32_t svdiv_m(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u64_m))) -svuint64_t svdiv_m(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u32_x))) -svuint32_t svdiv_x(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u64_x))) -svuint64_t svdiv_x(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u32_z))) -svuint32_t svdiv_z(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u64_z))) -svuint64_t svdiv_z(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f64_m))) -svfloat64_t svdivr_m(svbool_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f32_m))) -svfloat32_t svdivr_m(svbool_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f16_m))) -svfloat16_t svdivr_m(svbool_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f64_x))) -svfloat64_t svdivr_x(svbool_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f32_x))) -svfloat32_t svdivr_x(svbool_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f16_x))) -svfloat16_t svdivr_x(svbool_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f64_z))) -svfloat64_t svdivr_z(svbool_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f32_z))) -svfloat32_t svdivr_z(svbool_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f16_z))) -svfloat16_t svdivr_z(svbool_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s32_m))) -svint32_t svdivr_m(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s64_m))) -svint64_t svdivr_m(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s32_x))) -svint32_t svdivr_x(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s64_x))) -svint64_t svdivr_x(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s32_z))) -svint32_t svdivr_z(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s64_z))) -svint64_t svdivr_z(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u32_m))) -svuint32_t svdivr_m(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u64_m))) -svuint64_t svdivr_m(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u32_x))) -svuint32_t svdivr_x(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u64_x))) -svuint64_t svdivr_x(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u32_z))) -svuint32_t svdivr_z(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u64_z))) -svuint64_t svdivr_z(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f64_m))) -svfloat64_t svdivr_m(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f32_m))) -svfloat32_t svdivr_m(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f16_m))) -svfloat16_t svdivr_m(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f64_x))) -svfloat64_t svdivr_x(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f32_x))) -svfloat32_t svdivr_x(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f16_x))) -svfloat16_t svdivr_x(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f64_z))) -svfloat64_t svdivr_z(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f32_z))) -svfloat32_t svdivr_z(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f16_z))) -svfloat16_t svdivr_z(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s32_m))) -svint32_t svdivr_m(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s64_m))) -svint64_t svdivr_m(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s32_x))) -svint32_t svdivr_x(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s64_x))) -svint64_t svdivr_x(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s32_z))) -svint32_t svdivr_z(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s64_z))) -svint64_t svdivr_z(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u32_m))) -svuint32_t svdivr_m(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u64_m))) -svuint64_t svdivr_m(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u32_x))) -svuint32_t svdivr_x(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u64_x))) -svuint64_t svdivr_x(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u32_z))) -svuint32_t svdivr_z(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u64_z))) -svuint64_t svdivr_z(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_s32))) -svint32_t svdot(svint32_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_s64))) -svint64_t svdot(svint64_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_u32))) -svuint32_t svdot(svuint32_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_u64))) -svuint64_t svdot(svuint64_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_s32))) -svint32_t svdot(svint32_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_s64))) -svint64_t svdot(svint64_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_u32))) -svuint32_t svdot(svuint32_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_u64))) -svuint64_t svdot(svuint64_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_s32))) -svint32_t svdot_lane(svint32_t, svint8_t, svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_s64))) -svint64_t svdot_lane(svint64_t, svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_u32))) -svuint32_t svdot_lane(svuint32_t, svuint8_t, svuint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_u64))) -svuint64_t svdot_lane(svuint64_t, svuint16_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8))) -svuint8_t svdup_u8(uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32))) -svuint32_t svdup_u32(uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64))) -svuint64_t svdup_u64(uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16))) -svuint16_t svdup_u16(uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8))) -svint8_t svdup_s8(int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64))) -svfloat64_t svdup_f64(float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32))) -svfloat32_t svdup_f32(float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16))) -svfloat16_t svdup_f16(float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32))) -svint32_t svdup_s32(int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64))) -svint64_t svdup_s64(int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16))) -svint16_t svdup_s16(int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8_m))) -svuint8_t svdup_u8_m(svuint8_t, svbool_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32_m))) -svuint32_t svdup_u32_m(svuint32_t, svbool_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64_m))) -svuint64_t svdup_u64_m(svuint64_t, svbool_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16_m))) -svuint16_t svdup_u16_m(svuint16_t, svbool_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8_m))) -svint8_t svdup_s8_m(svint8_t, svbool_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64_m))) -svfloat64_t svdup_f64_m(svfloat64_t, svbool_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32_m))) -svfloat32_t svdup_f32_m(svfloat32_t, svbool_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16_m))) -svfloat16_t svdup_f16_m(svfloat16_t, svbool_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32_m))) -svint32_t svdup_s32_m(svint32_t, svbool_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64_m))) -svint64_t svdup_s64_m(svint64_t, svbool_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16_m))) -svint16_t svdup_s16_m(svint16_t, svbool_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b8))) -svbool_t svdup_b8(bool); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b32))) -svbool_t svdup_b32(bool); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b64))) -svbool_t svdup_b64(bool); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b16))) -svbool_t svdup_b16(bool); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8_x))) -svuint8_t svdup_u8_x(svbool_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32_x))) -svuint32_t svdup_u32_x(svbool_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64_x))) -svuint64_t svdup_u64_x(svbool_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16_x))) -svuint16_t svdup_u16_x(svbool_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8_x))) -svint8_t svdup_s8_x(svbool_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64_x))) -svfloat64_t svdup_f64_x(svbool_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32_x))) -svfloat32_t svdup_f32_x(svbool_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16_x))) -svfloat16_t svdup_f16_x(svbool_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32_x))) -svint32_t svdup_s32_x(svbool_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64_x))) -svint64_t svdup_s64_x(svbool_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16_x))) -svint16_t svdup_s16_x(svbool_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8_z))) -svuint8_t svdup_u8_z(svbool_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32_z))) -svuint32_t svdup_u32_z(svbool_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64_z))) -svuint64_t svdup_u64_z(svbool_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16_z))) -svuint16_t svdup_u16_z(svbool_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8_z))) -svint8_t svdup_s8_z(svbool_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64_z))) -svfloat64_t svdup_f64_z(svbool_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32_z))) -svfloat32_t svdup_f32_z(svbool_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16_z))) -svfloat16_t svdup_f16_z(svbool_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32_z))) -svint32_t svdup_s32_z(svbool_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64_z))) -svint64_t svdup_s64_z(svbool_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16_z))) -svint16_t svdup_s16_z(svbool_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u8))) -svuint8_t svdup_lane(svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u32))) -svuint32_t svdup_lane(svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u64))) -svuint64_t svdup_lane(svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u16))) -svuint16_t svdup_lane(svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s8))) -svint8_t svdup_lane(svint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_f64))) -svfloat64_t svdup_lane(svfloat64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_f32))) -svfloat32_t svdup_lane(svfloat32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_f16))) -svfloat16_t svdup_lane(svfloat16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s32))) -svint32_t svdup_lane(svint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s64))) -svint64_t svdup_lane(svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s16))) -svint16_t svdup_lane(svint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u8))) -svuint8_t svdupq_u8(uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s8))) -svint8_t svdupq_s8(int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u16))) -svuint16_t svdupq_u16(uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_f16))) -svfloat16_t svdupq_f16(float16_t, float16_t, float16_t, float16_t, float16_t, float16_t, float16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s16))) -svint16_t svdupq_s16(int16_t, int16_t, int16_t, int16_t, int16_t, int16_t, int16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u32))) -svuint32_t svdupq_u32(uint32_t, uint32_t, uint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_f32))) -svfloat32_t svdupq_f32(float32_t, float32_t, float32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s32))) -svint32_t svdupq_s32(int32_t, int32_t, int32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u64))) -svuint64_t svdupq_u64(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_f64))) -svfloat64_t svdupq_f64(float64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s64))) -svint64_t svdupq_s64(int64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b8))) -svbool_t svdupq_b8(bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b16))) -svbool_t svdupq_b16(bool, bool, bool, bool, bool, bool, bool, bool); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b32))) -svbool_t svdupq_b32(bool, bool, bool, bool); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b64))) -svbool_t svdupq_b64(bool, bool); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u8))) -svuint8_t svdupq_lane(svuint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u32))) -svuint32_t svdupq_lane(svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u64))) -svuint64_t svdupq_lane(svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u16))) -svuint16_t svdupq_lane(svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s8))) -svint8_t svdupq_lane(svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_f64))) -svfloat64_t svdupq_lane(svfloat64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_f32))) -svfloat32_t svdupq_lane(svfloat32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_f16))) -svfloat16_t svdupq_lane(svfloat16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s32))) -svint32_t svdupq_lane(svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s64))) -svint64_t svdupq_lane(svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s16))) -svint16_t svdupq_lane(svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_b_z))) -svbool_t sveor_z(svbool_t, svbool_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u8_m))) -svuint8_t sveor_m(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u32_m))) -svuint32_t sveor_m(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u64_m))) -svuint64_t sveor_m(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u16_m))) -svuint16_t sveor_m(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s8_m))) -svint8_t sveor_m(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s32_m))) -svint32_t sveor_m(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s64_m))) -svint64_t sveor_m(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s16_m))) -svint16_t sveor_m(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u8_x))) -svuint8_t sveor_x(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u32_x))) -svuint32_t sveor_x(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u64_x))) -svuint64_t sveor_x(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u16_x))) -svuint16_t sveor_x(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s8_x))) -svint8_t sveor_x(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s32_x))) -svint32_t sveor_x(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s64_x))) -svint64_t sveor_x(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s16_x))) -svint16_t sveor_x(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u8_z))) -svuint8_t sveor_z(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u32_z))) -svuint32_t sveor_z(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u64_z))) -svuint64_t sveor_z(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u16_z))) -svuint16_t sveor_z(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s8_z))) -svint8_t sveor_z(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s32_z))) -svint32_t sveor_z(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s64_z))) -svint64_t sveor_z(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s16_z))) -svint16_t sveor_z(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u8_m))) -svuint8_t sveor_m(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u32_m))) -svuint32_t sveor_m(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u64_m))) -svuint64_t sveor_m(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u16_m))) -svuint16_t sveor_m(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s8_m))) -svint8_t sveor_m(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s32_m))) -svint32_t sveor_m(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s64_m))) -svint64_t sveor_m(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s16_m))) -svint16_t sveor_m(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u8_x))) -svuint8_t sveor_x(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u32_x))) -svuint32_t sveor_x(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u64_x))) -svuint64_t sveor_x(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u16_x))) -svuint16_t sveor_x(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s8_x))) -svint8_t sveor_x(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s32_x))) -svint32_t sveor_x(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s64_x))) -svint64_t sveor_x(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s16_x))) -svint16_t sveor_x(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u8_z))) -svuint8_t sveor_z(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u32_z))) -svuint32_t sveor_z(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u64_z))) -svuint64_t sveor_z(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u16_z))) -svuint16_t sveor_z(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s8_z))) -svint8_t sveor_z(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s32_z))) -svint32_t sveor_z(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s64_z))) -svint64_t sveor_z(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s16_z))) -svint16_t sveor_z(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u8))) -uint8_t sveorv(svbool_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u32))) -uint32_t sveorv(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u64))) -uint64_t sveorv(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u16))) -uint16_t sveorv(svbool_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s8))) -int8_t sveorv(svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s32))) -int32_t sveorv(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s64))) -int64_t sveorv(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s16))) -int16_t sveorv(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f64))) -svfloat64_t svexpa(svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f32))) -svfloat32_t svexpa(svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f16))) -svfloat16_t svexpa(svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u8))) -svuint8_t svext(svuint8_t, svuint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u32))) -svuint32_t svext(svuint32_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u64))) -svuint64_t svext(svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u16))) -svuint16_t svext(svuint16_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s8))) -svint8_t svext(svint8_t, svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_f64))) -svfloat64_t svext(svfloat64_t, svfloat64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_f32))) -svfloat32_t svext(svfloat32_t, svfloat32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_f16))) -svfloat16_t svext(svfloat16_t, svfloat16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s32))) -svint32_t svext(svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s64))) -svint64_t svext(svint64_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s16))) -svint16_t svext(svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s32_m))) -svint32_t svextb_m(svint32_t, svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s64_m))) -svint64_t svextb_m(svint64_t, svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s16_m))) -svint16_t svextb_m(svint16_t, svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s32_x))) -svint32_t svextb_x(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s64_x))) -svint64_t svextb_x(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s16_x))) -svint16_t svextb_x(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s32_z))) -svint32_t svextb_z(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s64_z))) -svint64_t svextb_z(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s16_z))) -svint16_t svextb_z(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u32_m))) -svuint32_t svextb_m(svuint32_t, svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u64_m))) -svuint64_t svextb_m(svuint64_t, svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u16_m))) -svuint16_t svextb_m(svuint16_t, svbool_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u32_x))) -svuint32_t svextb_x(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u64_x))) -svuint64_t svextb_x(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u16_x))) -svuint16_t svextb_x(svbool_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u32_z))) -svuint32_t svextb_z(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u64_z))) -svuint64_t svextb_z(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u16_z))) -svuint16_t svextb_z(svbool_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s32_m))) -svint32_t svexth_m(svint32_t, svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s64_m))) -svint64_t svexth_m(svint64_t, svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s32_x))) -svint32_t svexth_x(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s64_x))) -svint64_t svexth_x(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s32_z))) -svint32_t svexth_z(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s64_z))) -svint64_t svexth_z(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u32_m))) -svuint32_t svexth_m(svuint32_t, svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u64_m))) -svuint64_t svexth_m(svuint64_t, svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u32_x))) -svuint32_t svexth_x(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u64_x))) -svuint64_t svexth_x(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u32_z))) -svuint32_t svexth_z(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u64_z))) -svuint64_t svexth_z(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_s64_m))) -svint64_t svextw_m(svint64_t, svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_s64_x))) -svint64_t svextw_x(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_s64_z))) -svint64_t svextw_z(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_u64_m))) -svuint64_t svextw_m(svuint64_t, svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_u64_x))) -svuint64_t svextw_x(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_u64_z))) -svuint64_t svextw_z(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u8))) -svuint8_t svget2(svuint8x2_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u32))) -svuint32_t svget2(svuint32x2_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u64))) -svuint64_t svget2(svuint64x2_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u16))) -svuint16_t svget2(svuint16x2_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s8))) -svint8_t svget2(svint8x2_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_f64))) -svfloat64_t svget2(svfloat64x2_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_f32))) -svfloat32_t svget2(svfloat32x2_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_f16))) -svfloat16_t svget2(svfloat16x2_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s32))) -svint32_t svget2(svint32x2_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s64))) -svint64_t svget2(svint64x2_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s16))) -svint16_t svget2(svint16x2_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u8))) -svuint8_t svget3(svuint8x3_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u32))) -svuint32_t svget3(svuint32x3_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u64))) -svuint64_t svget3(svuint64x3_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u16))) -svuint16_t svget3(svuint16x3_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s8))) -svint8_t svget3(svint8x3_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_f64))) -svfloat64_t svget3(svfloat64x3_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_f32))) -svfloat32_t svget3(svfloat32x3_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_f16))) -svfloat16_t svget3(svfloat16x3_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s32))) -svint32_t svget3(svint32x3_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s64))) -svint64_t svget3(svint64x3_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s16))) -svint16_t svget3(svint16x3_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u8))) -svuint8_t svget4(svuint8x4_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u32))) -svuint32_t svget4(svuint32x4_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u64))) -svuint64_t svget4(svuint64x4_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u16))) -svuint16_t svget4(svuint16x4_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s8))) -svint8_t svget4(svint8x4_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_f64))) -svfloat64_t svget4(svfloat64x4_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_f32))) -svfloat32_t svget4(svfloat32x4_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_f16))) -svfloat16_t svget4(svfloat16x4_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s32))) -svint32_t svget4(svint32x4_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s64))) -svint64_t svget4(svint64x4_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s16))) -svint16_t svget4(svint16x4_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u8))) -svuint8_t svinsr(svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u32))) -svuint32_t svinsr(svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u64))) -svuint64_t svinsr(svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u16))) -svuint16_t svinsr(svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s8))) -svint8_t svinsr(svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_f64))) -svfloat64_t svinsr(svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_f32))) -svfloat32_t svinsr(svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_f16))) -svfloat16_t svinsr(svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s32))) -svint32_t svinsr(svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s64))) -svint64_t svinsr(svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s16))) -svint16_t svinsr(svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u8))) -uint8_t svlasta(svbool_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u32))) -uint32_t svlasta(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u64))) -uint64_t svlasta(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u16))) -uint16_t svlasta(svbool_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s8))) -int8_t svlasta(svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_f64))) -float64_t svlasta(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_f32))) -float32_t svlasta(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_f16))) -float16_t svlasta(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s32))) -int32_t svlasta(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s64))) -int64_t svlasta(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s16))) -int16_t svlasta(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u8))) -uint8_t svlastb(svbool_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u32))) -uint32_t svlastb(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u64))) -uint64_t svlastb(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u16))) -uint16_t svlastb(svbool_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s8))) -int8_t svlastb(svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_f64))) -float64_t svlastb(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_f32))) -float32_t svlastb(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_f16))) -float16_t svlastb(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s32))) -int32_t svlastb(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s64))) -int64_t svlastb(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s16))) -int16_t svlastb(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u8))) -svuint8_t svld1(svbool_t, uint8_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u32))) -svuint32_t svld1(svbool_t, uint32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u64))) -svuint64_t svld1(svbool_t, uint64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u16))) -svuint16_t svld1(svbool_t, uint16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s8))) -svint8_t svld1(svbool_t, int8_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f64))) -svfloat64_t svld1(svbool_t, float64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f32))) -svfloat32_t svld1(svbool_t, float32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f16))) -svfloat16_t svld1(svbool_t, float16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s32))) -svint32_t svld1(svbool_t, int32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s64))) -svint64_t svld1(svbool_t, int64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s16))) -svint16_t svld1(svbool_t, int16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_u32))) -svuint32_t svld1_gather_index_u32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_u64))) -svuint64_t svld1_gather_index_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_f64))) -svfloat64_t svld1_gather_index_f64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_f32))) -svfloat32_t svld1_gather_index_f32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_s32))) -svint32_t svld1_gather_index_s32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_s64))) -svint64_t svld1_gather_index_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_u32))) -svuint32_t svld1_gather_offset_u32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_u64))) -svuint64_t svld1_gather_offset_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_f64))) -svfloat64_t svld1_gather_offset_f64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_f32))) -svfloat32_t svld1_gather_offset_f32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_s32))) -svint32_t svld1_gather_offset_s32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_s64))) -svint64_t svld1_gather_offset_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_u32))) -svuint32_t svld1_gather_u32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_u64))) -svuint64_t svld1_gather_u64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_f64))) -svfloat64_t svld1_gather_f64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_f32))) -svfloat32_t svld1_gather_f32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_s32))) -svint32_t svld1_gather_s32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_s64))) -svint64_t svld1_gather_s64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_u32))) -svuint32_t svld1_gather_index(svbool_t, uint32_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_f32))) -svfloat32_t svld1_gather_index(svbool_t, float32_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_s32))) -svint32_t svld1_gather_index(svbool_t, int32_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_u32))) -svuint32_t svld1_gather_index(svbool_t, uint32_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_f32))) -svfloat32_t svld1_gather_index(svbool_t, float32_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_s32))) -svint32_t svld1_gather_index(svbool_t, int32_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_u64))) -svuint64_t svld1_gather_index(svbool_t, uint64_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_f64))) -svfloat64_t svld1_gather_index(svbool_t, float64_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_s64))) -svint64_t svld1_gather_index(svbool_t, int64_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_u64))) -svuint64_t svld1_gather_index(svbool_t, uint64_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_f64))) -svfloat64_t svld1_gather_index(svbool_t, float64_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_s64))) -svint64_t svld1_gather_index(svbool_t, int64_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_u32))) -svuint32_t svld1_gather_offset(svbool_t, uint32_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_f32))) -svfloat32_t svld1_gather_offset(svbool_t, float32_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_s32))) -svint32_t svld1_gather_offset(svbool_t, int32_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_u32))) -svuint32_t svld1_gather_offset(svbool_t, uint32_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_f32))) -svfloat32_t svld1_gather_offset(svbool_t, float32_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_s32))) -svint32_t svld1_gather_offset(svbool_t, int32_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_u64))) -svuint64_t svld1_gather_offset(svbool_t, uint64_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_f64))) -svfloat64_t svld1_gather_offset(svbool_t, float64_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_s64))) -svint64_t svld1_gather_offset(svbool_t, int64_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_u64))) -svuint64_t svld1_gather_offset(svbool_t, uint64_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_f64))) -svfloat64_t svld1_gather_offset(svbool_t, float64_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_s64))) -svint64_t svld1_gather_offset(svbool_t, int64_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u8))) -svuint8_t svld1_vnum(svbool_t, uint8_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u32))) -svuint32_t svld1_vnum(svbool_t, uint32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u64))) -svuint64_t svld1_vnum(svbool_t, uint64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u16))) -svuint16_t svld1_vnum(svbool_t, uint16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s8))) -svint8_t svld1_vnum(svbool_t, int8_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f64))) -svfloat64_t svld1_vnum(svbool_t, float64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f32))) -svfloat32_t svld1_vnum(svbool_t, float32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f16))) -svfloat16_t svld1_vnum(svbool_t, float16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s32))) -svint32_t svld1_vnum(svbool_t, int32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s64))) -svint64_t svld1_vnum(svbool_t, int64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s16))) -svint16_t svld1_vnum(svbool_t, int16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u8))) -svuint8_t svld1rq(svbool_t, uint8_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u32))) -svuint32_t svld1rq(svbool_t, uint32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u64))) -svuint64_t svld1rq(svbool_t, uint64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u16))) -svuint16_t svld1rq(svbool_t, uint16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s8))) -svint8_t svld1rq(svbool_t, int8_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_f64))) -svfloat64_t svld1rq(svbool_t, float64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_f32))) -svfloat32_t svld1rq(svbool_t, float32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_f16))) -svfloat16_t svld1rq(svbool_t, float16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s32))) -svint32_t svld1rq(svbool_t, int32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s64))) -svint64_t svld1rq(svbool_t, int64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s16))) -svint16_t svld1rq(svbool_t, int16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_offset_u32))) -svuint32_t svld1sb_gather_offset_u32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_offset_u64))) -svuint64_t svld1sb_gather_offset_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_offset_s32))) -svint32_t svld1sb_gather_offset_s32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_offset_s64))) -svint64_t svld1sb_gather_offset_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_u32))) -svuint32_t svld1sb_gather_u32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_u64))) -svuint64_t svld1sb_gather_u64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_s32))) -svint32_t svld1sb_gather_s32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_s64))) -svint64_t svld1sb_gather_s64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s32offset_u32))) -svuint32_t svld1sb_gather_offset_u32(svbool_t, int8_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s32offset_s32))) -svint32_t svld1sb_gather_offset_s32(svbool_t, int8_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32offset_u32))) -svuint32_t svld1sb_gather_offset_u32(svbool_t, int8_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32offset_s32))) -svint32_t svld1sb_gather_offset_s32(svbool_t, int8_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s64offset_u64))) -svuint64_t svld1sb_gather_offset_u64(svbool_t, int8_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s64offset_s64))) -svint64_t svld1sb_gather_offset_s64(svbool_t, int8_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64offset_u64))) -svuint64_t svld1sb_gather_offset_u64(svbool_t, int8_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64offset_s64))) -svint64_t svld1sb_gather_offset_s64(svbool_t, int8_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_index_u32))) -svuint32_t svld1sh_gather_index_u32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_index_u64))) -svuint64_t svld1sh_gather_index_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_index_s32))) -svint32_t svld1sh_gather_index_s32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_index_s64))) -svint64_t svld1sh_gather_index_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_offset_u32))) -svuint32_t svld1sh_gather_offset_u32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_offset_u64))) -svuint64_t svld1sh_gather_offset_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_offset_s32))) -svint32_t svld1sh_gather_offset_s32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_offset_s64))) -svint64_t svld1sh_gather_offset_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_u32))) -svuint32_t svld1sh_gather_u32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_u64))) -svuint64_t svld1sh_gather_u64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_s32))) -svint32_t svld1sh_gather_s32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_s64))) -svint64_t svld1sh_gather_s64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32index_u32))) -svuint32_t svld1sh_gather_index_u32(svbool_t, int16_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32index_s32))) -svint32_t svld1sh_gather_index_s32(svbool_t, int16_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32index_u32))) -svuint32_t svld1sh_gather_index_u32(svbool_t, int16_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32index_s32))) -svint32_t svld1sh_gather_index_s32(svbool_t, int16_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64index_u64))) -svuint64_t svld1sh_gather_index_u64(svbool_t, int16_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64index_s64))) -svint64_t svld1sh_gather_index_s64(svbool_t, int16_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64index_u64))) -svuint64_t svld1sh_gather_index_u64(svbool_t, int16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64index_s64))) -svint64_t svld1sh_gather_index_s64(svbool_t, int16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32offset_u32))) -svuint32_t svld1sh_gather_offset_u32(svbool_t, int16_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32offset_s32))) -svint32_t svld1sh_gather_offset_s32(svbool_t, int16_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32offset_u32))) -svuint32_t svld1sh_gather_offset_u32(svbool_t, int16_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32offset_s32))) -svint32_t svld1sh_gather_offset_s32(svbool_t, int16_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64offset_u64))) -svuint64_t svld1sh_gather_offset_u64(svbool_t, int16_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64offset_s64))) -svint64_t svld1sh_gather_offset_s64(svbool_t, int16_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64offset_u64))) -svuint64_t svld1sh_gather_offset_u64(svbool_t, int16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64offset_s64))) -svint64_t svld1sh_gather_offset_s64(svbool_t, int16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_index_u64))) -svuint64_t svld1sw_gather_index_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_index_s64))) -svint64_t svld1sw_gather_index_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_offset_u64))) -svuint64_t svld1sw_gather_offset_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_offset_s64))) -svint64_t svld1sw_gather_offset_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_u64))) -svuint64_t svld1sw_gather_u64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_s64))) -svint64_t svld1sw_gather_s64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64index_u64))) -svuint64_t svld1sw_gather_index_u64(svbool_t, int32_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64index_s64))) -svint64_t svld1sw_gather_index_s64(svbool_t, int32_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64index_u64))) -svuint64_t svld1sw_gather_index_u64(svbool_t, int32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64index_s64))) -svint64_t svld1sw_gather_index_s64(svbool_t, int32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64offset_u64))) -svuint64_t svld1sw_gather_offset_u64(svbool_t, int32_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64offset_s64))) -svint64_t svld1sw_gather_offset_s64(svbool_t, int32_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64offset_u64))) -svuint64_t svld1sw_gather_offset_u64(svbool_t, int32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64offset_s64))) -svint64_t svld1sw_gather_offset_s64(svbool_t, int32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_offset_u32))) -svuint32_t svld1ub_gather_offset_u32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_offset_u64))) -svuint64_t svld1ub_gather_offset_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_offset_s32))) -svint32_t svld1ub_gather_offset_s32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_offset_s64))) -svint64_t svld1ub_gather_offset_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_u32))) -svuint32_t svld1ub_gather_u32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_u64))) -svuint64_t svld1ub_gather_u64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_s32))) -svint32_t svld1ub_gather_s32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_s64))) -svint64_t svld1ub_gather_s64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s32offset_u32))) -svuint32_t svld1ub_gather_offset_u32(svbool_t, uint8_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s32offset_s32))) -svint32_t svld1ub_gather_offset_s32(svbool_t, uint8_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32offset_u32))) -svuint32_t svld1ub_gather_offset_u32(svbool_t, uint8_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32offset_s32))) -svint32_t svld1ub_gather_offset_s32(svbool_t, uint8_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s64offset_u64))) -svuint64_t svld1ub_gather_offset_u64(svbool_t, uint8_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s64offset_s64))) -svint64_t svld1ub_gather_offset_s64(svbool_t, uint8_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64offset_u64))) -svuint64_t svld1ub_gather_offset_u64(svbool_t, uint8_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64offset_s64))) -svint64_t svld1ub_gather_offset_s64(svbool_t, uint8_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_index_u32))) -svuint32_t svld1uh_gather_index_u32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_index_u64))) -svuint64_t svld1uh_gather_index_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_index_s32))) -svint32_t svld1uh_gather_index_s32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_index_s64))) -svint64_t svld1uh_gather_index_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_offset_u32))) -svuint32_t svld1uh_gather_offset_u32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_offset_u64))) -svuint64_t svld1uh_gather_offset_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_offset_s32))) -svint32_t svld1uh_gather_offset_s32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_offset_s64))) -svint64_t svld1uh_gather_offset_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_u32))) -svuint32_t svld1uh_gather_u32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_u64))) -svuint64_t svld1uh_gather_u64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_s32))) -svint32_t svld1uh_gather_s32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_s64))) -svint64_t svld1uh_gather_s64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32index_u32))) -svuint32_t svld1uh_gather_index_u32(svbool_t, uint16_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32index_s32))) -svint32_t svld1uh_gather_index_s32(svbool_t, uint16_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32index_u32))) -svuint32_t svld1uh_gather_index_u32(svbool_t, uint16_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32index_s32))) -svint32_t svld1uh_gather_index_s32(svbool_t, uint16_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64index_u64))) -svuint64_t svld1uh_gather_index_u64(svbool_t, uint16_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64index_s64))) -svint64_t svld1uh_gather_index_s64(svbool_t, uint16_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64index_u64))) -svuint64_t svld1uh_gather_index_u64(svbool_t, uint16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64index_s64))) -svint64_t svld1uh_gather_index_s64(svbool_t, uint16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32offset_u32))) -svuint32_t svld1uh_gather_offset_u32(svbool_t, uint16_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32offset_s32))) -svint32_t svld1uh_gather_offset_s32(svbool_t, uint16_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32offset_u32))) -svuint32_t svld1uh_gather_offset_u32(svbool_t, uint16_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32offset_s32))) -svint32_t svld1uh_gather_offset_s32(svbool_t, uint16_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64offset_u64))) -svuint64_t svld1uh_gather_offset_u64(svbool_t, uint16_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64offset_s64))) -svint64_t svld1uh_gather_offset_s64(svbool_t, uint16_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64offset_u64))) -svuint64_t svld1uh_gather_offset_u64(svbool_t, uint16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64offset_s64))) -svint64_t svld1uh_gather_offset_s64(svbool_t, uint16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_index_u64))) -svuint64_t svld1uw_gather_index_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_index_s64))) -svint64_t svld1uw_gather_index_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_offset_u64))) -svuint64_t svld1uw_gather_offset_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_offset_s64))) -svint64_t svld1uw_gather_offset_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_u64))) -svuint64_t svld1uw_gather_u64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_s64))) -svint64_t svld1uw_gather_s64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64index_u64))) -svuint64_t svld1uw_gather_index_u64(svbool_t, uint32_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64index_s64))) -svint64_t svld1uw_gather_index_s64(svbool_t, uint32_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64index_u64))) -svuint64_t svld1uw_gather_index_u64(svbool_t, uint32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64index_s64))) -svint64_t svld1uw_gather_index_s64(svbool_t, uint32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64offset_u64))) -svuint64_t svld1uw_gather_offset_u64(svbool_t, uint32_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64offset_s64))) -svint64_t svld1uw_gather_offset_s64(svbool_t, uint32_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64offset_u64))) -svuint64_t svld1uw_gather_offset_u64(svbool_t, uint32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64offset_s64))) -svint64_t svld1uw_gather_offset_s64(svbool_t, uint32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u8))) -svuint8x2_t svld2(svbool_t, uint8_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u32))) -svuint32x2_t svld2(svbool_t, uint32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u64))) -svuint64x2_t svld2(svbool_t, uint64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u16))) -svuint16x2_t svld2(svbool_t, uint16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s8))) -svint8x2_t svld2(svbool_t, int8_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_f64))) -svfloat64x2_t svld2(svbool_t, float64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_f32))) -svfloat32x2_t svld2(svbool_t, float32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_f16))) -svfloat16x2_t svld2(svbool_t, float16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s32))) -svint32x2_t svld2(svbool_t, int32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s64))) -svint64x2_t svld2(svbool_t, int64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s16))) -svint16x2_t svld2(svbool_t, int16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u8))) -svuint8x2_t svld2_vnum(svbool_t, uint8_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u32))) -svuint32x2_t svld2_vnum(svbool_t, uint32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u64))) -svuint64x2_t svld2_vnum(svbool_t, uint64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u16))) -svuint16x2_t svld2_vnum(svbool_t, uint16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s8))) -svint8x2_t svld2_vnum(svbool_t, int8_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_f64))) -svfloat64x2_t svld2_vnum(svbool_t, float64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_f32))) -svfloat32x2_t svld2_vnum(svbool_t, float32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_f16))) -svfloat16x2_t svld2_vnum(svbool_t, float16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s32))) -svint32x2_t svld2_vnum(svbool_t, int32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s64))) -svint64x2_t svld2_vnum(svbool_t, int64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s16))) -svint16x2_t svld2_vnum(svbool_t, int16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u8))) -svuint8x3_t svld3(svbool_t, uint8_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u32))) -svuint32x3_t svld3(svbool_t, uint32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u64))) -svuint64x3_t svld3(svbool_t, uint64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u16))) -svuint16x3_t svld3(svbool_t, uint16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s8))) -svint8x3_t svld3(svbool_t, int8_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_f64))) -svfloat64x3_t svld3(svbool_t, float64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_f32))) -svfloat32x3_t svld3(svbool_t, float32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_f16))) -svfloat16x3_t svld3(svbool_t, float16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s32))) -svint32x3_t svld3(svbool_t, int32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s64))) -svint64x3_t svld3(svbool_t, int64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s16))) -svint16x3_t svld3(svbool_t, int16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u8))) -svuint8x3_t svld3_vnum(svbool_t, uint8_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u32))) -svuint32x3_t svld3_vnum(svbool_t, uint32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u64))) -svuint64x3_t svld3_vnum(svbool_t, uint64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u16))) -svuint16x3_t svld3_vnum(svbool_t, uint16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s8))) -svint8x3_t svld3_vnum(svbool_t, int8_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_f64))) -svfloat64x3_t svld3_vnum(svbool_t, float64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_f32))) -svfloat32x3_t svld3_vnum(svbool_t, float32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_f16))) -svfloat16x3_t svld3_vnum(svbool_t, float16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s32))) -svint32x3_t svld3_vnum(svbool_t, int32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s64))) -svint64x3_t svld3_vnum(svbool_t, int64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s16))) -svint16x3_t svld3_vnum(svbool_t, int16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u8))) -svuint8x4_t svld4(svbool_t, uint8_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u32))) -svuint32x4_t svld4(svbool_t, uint32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u64))) -svuint64x4_t svld4(svbool_t, uint64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u16))) -svuint16x4_t svld4(svbool_t, uint16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s8))) -svint8x4_t svld4(svbool_t, int8_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_f64))) -svfloat64x4_t svld4(svbool_t, float64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_f32))) -svfloat32x4_t svld4(svbool_t, float32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_f16))) -svfloat16x4_t svld4(svbool_t, float16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s32))) -svint32x4_t svld4(svbool_t, int32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s64))) -svint64x4_t svld4(svbool_t, int64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s16))) -svint16x4_t svld4(svbool_t, int16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u8))) -svuint8x4_t svld4_vnum(svbool_t, uint8_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u32))) -svuint32x4_t svld4_vnum(svbool_t, uint32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u64))) -svuint64x4_t svld4_vnum(svbool_t, uint64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u16))) -svuint16x4_t svld4_vnum(svbool_t, uint16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s8))) -svint8x4_t svld4_vnum(svbool_t, int8_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_f64))) -svfloat64x4_t svld4_vnum(svbool_t, float64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_f32))) -svfloat32x4_t svld4_vnum(svbool_t, float32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_f16))) -svfloat16x4_t svld4_vnum(svbool_t, float16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s32))) -svint32x4_t svld4_vnum(svbool_t, int32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s64))) -svint64x4_t svld4_vnum(svbool_t, int64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s16))) -svint16x4_t svld4_vnum(svbool_t, int16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u8))) -svuint8_t svldff1(svbool_t, uint8_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u32))) -svuint32_t svldff1(svbool_t, uint32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u64))) -svuint64_t svldff1(svbool_t, uint64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u16))) -svuint16_t svldff1(svbool_t, uint16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s8))) -svint8_t svldff1(svbool_t, int8_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f64))) -svfloat64_t svldff1(svbool_t, float64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f32))) -svfloat32_t svldff1(svbool_t, float32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f16))) -svfloat16_t svldff1(svbool_t, float16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s32))) -svint32_t svldff1(svbool_t, int32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s64))) -svint64_t svldff1(svbool_t, int64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s16))) -svint16_t svldff1(svbool_t, int16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_u32))) -svuint32_t svldff1_gather_index_u32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_u64))) -svuint64_t svldff1_gather_index_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_f64))) -svfloat64_t svldff1_gather_index_f64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_f32))) -svfloat32_t svldff1_gather_index_f32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_s32))) -svint32_t svldff1_gather_index_s32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_s64))) -svint64_t svldff1_gather_index_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_u32))) -svuint32_t svldff1_gather_offset_u32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_u64))) -svuint64_t svldff1_gather_offset_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_f64))) -svfloat64_t svldff1_gather_offset_f64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_f32))) -svfloat32_t svldff1_gather_offset_f32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_s32))) -svint32_t svldff1_gather_offset_s32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_s64))) -svint64_t svldff1_gather_offset_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_u32))) -svuint32_t svldff1_gather_u32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_u64))) -svuint64_t svldff1_gather_u64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_f64))) -svfloat64_t svldff1_gather_f64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_f32))) -svfloat32_t svldff1_gather_f32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_s32))) -svint32_t svldff1_gather_s32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_s64))) -svint64_t svldff1_gather_s64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_u32))) -svuint32_t svldff1_gather_index(svbool_t, uint32_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_f32))) -svfloat32_t svldff1_gather_index(svbool_t, float32_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_s32))) -svint32_t svldff1_gather_index(svbool_t, int32_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_u32))) -svuint32_t svldff1_gather_index(svbool_t, uint32_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_f32))) -svfloat32_t svldff1_gather_index(svbool_t, float32_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_s32))) -svint32_t svldff1_gather_index(svbool_t, int32_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_u64))) -svuint64_t svldff1_gather_index(svbool_t, uint64_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_f64))) -svfloat64_t svldff1_gather_index(svbool_t, float64_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_s64))) -svint64_t svldff1_gather_index(svbool_t, int64_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_u64))) -svuint64_t svldff1_gather_index(svbool_t, uint64_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_f64))) -svfloat64_t svldff1_gather_index(svbool_t, float64_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_s64))) -svint64_t svldff1_gather_index(svbool_t, int64_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_u32))) -svuint32_t svldff1_gather_offset(svbool_t, uint32_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_f32))) -svfloat32_t svldff1_gather_offset(svbool_t, float32_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_s32))) -svint32_t svldff1_gather_offset(svbool_t, int32_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_u32))) -svuint32_t svldff1_gather_offset(svbool_t, uint32_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_f32))) -svfloat32_t svldff1_gather_offset(svbool_t, float32_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_s32))) -svint32_t svldff1_gather_offset(svbool_t, int32_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_u64))) -svuint64_t svldff1_gather_offset(svbool_t, uint64_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_f64))) -svfloat64_t svldff1_gather_offset(svbool_t, float64_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_s64))) -svint64_t svldff1_gather_offset(svbool_t, int64_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_u64))) -svuint64_t svldff1_gather_offset(svbool_t, uint64_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_f64))) -svfloat64_t svldff1_gather_offset(svbool_t, float64_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_s64))) -svint64_t svldff1_gather_offset(svbool_t, int64_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u8))) -svuint8_t svldff1_vnum(svbool_t, uint8_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u32))) -svuint32_t svldff1_vnum(svbool_t, uint32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u64))) -svuint64_t svldff1_vnum(svbool_t, uint64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u16))) -svuint16_t svldff1_vnum(svbool_t, uint16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s8))) -svint8_t svldff1_vnum(svbool_t, int8_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f64))) -svfloat64_t svldff1_vnum(svbool_t, float64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f32))) -svfloat32_t svldff1_vnum(svbool_t, float32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f16))) -svfloat16_t svldff1_vnum(svbool_t, float16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s32))) -svint32_t svldff1_vnum(svbool_t, int32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s64))) -svint64_t svldff1_vnum(svbool_t, int64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s16))) -svint16_t svldff1_vnum(svbool_t, int16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_offset_u32))) -svuint32_t svldff1sb_gather_offset_u32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_offset_u64))) -svuint64_t svldff1sb_gather_offset_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_offset_s32))) -svint32_t svldff1sb_gather_offset_s32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_offset_s64))) -svint64_t svldff1sb_gather_offset_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_u32))) -svuint32_t svldff1sb_gather_u32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_u64))) -svuint64_t svldff1sb_gather_u64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_s32))) -svint32_t svldff1sb_gather_s32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_s64))) -svint64_t svldff1sb_gather_s64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s32offset_u32))) -svuint32_t svldff1sb_gather_offset_u32(svbool_t, int8_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s32offset_s32))) -svint32_t svldff1sb_gather_offset_s32(svbool_t, int8_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32offset_u32))) -svuint32_t svldff1sb_gather_offset_u32(svbool_t, int8_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32offset_s32))) -svint32_t svldff1sb_gather_offset_s32(svbool_t, int8_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s64offset_u64))) -svuint64_t svldff1sb_gather_offset_u64(svbool_t, int8_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s64offset_s64))) -svint64_t svldff1sb_gather_offset_s64(svbool_t, int8_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64offset_u64))) -svuint64_t svldff1sb_gather_offset_u64(svbool_t, int8_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64offset_s64))) -svint64_t svldff1sb_gather_offset_s64(svbool_t, int8_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_index_u32))) -svuint32_t svldff1sh_gather_index_u32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_index_u64))) -svuint64_t svldff1sh_gather_index_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_index_s32))) -svint32_t svldff1sh_gather_index_s32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_index_s64))) -svint64_t svldff1sh_gather_index_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_offset_u32))) -svuint32_t svldff1sh_gather_offset_u32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_offset_u64))) -svuint64_t svldff1sh_gather_offset_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_offset_s32))) -svint32_t svldff1sh_gather_offset_s32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_offset_s64))) -svint64_t svldff1sh_gather_offset_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_u32))) -svuint32_t svldff1sh_gather_u32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_u64))) -svuint64_t svldff1sh_gather_u64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_s32))) -svint32_t svldff1sh_gather_s32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_s64))) -svint64_t svldff1sh_gather_s64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32index_u32))) -svuint32_t svldff1sh_gather_index_u32(svbool_t, int16_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32index_s32))) -svint32_t svldff1sh_gather_index_s32(svbool_t, int16_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32index_u32))) -svuint32_t svldff1sh_gather_index_u32(svbool_t, int16_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32index_s32))) -svint32_t svldff1sh_gather_index_s32(svbool_t, int16_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64index_u64))) -svuint64_t svldff1sh_gather_index_u64(svbool_t, int16_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64index_s64))) -svint64_t svldff1sh_gather_index_s64(svbool_t, int16_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64index_u64))) -svuint64_t svldff1sh_gather_index_u64(svbool_t, int16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64index_s64))) -svint64_t svldff1sh_gather_index_s64(svbool_t, int16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32offset_u32))) -svuint32_t svldff1sh_gather_offset_u32(svbool_t, int16_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32offset_s32))) -svint32_t svldff1sh_gather_offset_s32(svbool_t, int16_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32offset_u32))) -svuint32_t svldff1sh_gather_offset_u32(svbool_t, int16_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32offset_s32))) -svint32_t svldff1sh_gather_offset_s32(svbool_t, int16_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64offset_u64))) -svuint64_t svldff1sh_gather_offset_u64(svbool_t, int16_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64offset_s64))) -svint64_t svldff1sh_gather_offset_s64(svbool_t, int16_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64offset_u64))) -svuint64_t svldff1sh_gather_offset_u64(svbool_t, int16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64offset_s64))) -svint64_t svldff1sh_gather_offset_s64(svbool_t, int16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_index_u64))) -svuint64_t svldff1sw_gather_index_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_index_s64))) -svint64_t svldff1sw_gather_index_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_offset_u64))) -svuint64_t svldff1sw_gather_offset_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_offset_s64))) -svint64_t svldff1sw_gather_offset_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_u64))) -svuint64_t svldff1sw_gather_u64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_s64))) -svint64_t svldff1sw_gather_s64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64index_u64))) -svuint64_t svldff1sw_gather_index_u64(svbool_t, int32_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64index_s64))) -svint64_t svldff1sw_gather_index_s64(svbool_t, int32_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64index_u64))) -svuint64_t svldff1sw_gather_index_u64(svbool_t, int32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64index_s64))) -svint64_t svldff1sw_gather_index_s64(svbool_t, int32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64offset_u64))) -svuint64_t svldff1sw_gather_offset_u64(svbool_t, int32_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64offset_s64))) -svint64_t svldff1sw_gather_offset_s64(svbool_t, int32_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64offset_u64))) -svuint64_t svldff1sw_gather_offset_u64(svbool_t, int32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64offset_s64))) -svint64_t svldff1sw_gather_offset_s64(svbool_t, int32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_offset_u32))) -svuint32_t svldff1ub_gather_offset_u32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_offset_u64))) -svuint64_t svldff1ub_gather_offset_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_offset_s32))) -svint32_t svldff1ub_gather_offset_s32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_offset_s64))) -svint64_t svldff1ub_gather_offset_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_u32))) -svuint32_t svldff1ub_gather_u32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_u64))) -svuint64_t svldff1ub_gather_u64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_s32))) -svint32_t svldff1ub_gather_s32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_s64))) -svint64_t svldff1ub_gather_s64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s32offset_u32))) -svuint32_t svldff1ub_gather_offset_u32(svbool_t, uint8_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s32offset_s32))) -svint32_t svldff1ub_gather_offset_s32(svbool_t, uint8_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32offset_u32))) -svuint32_t svldff1ub_gather_offset_u32(svbool_t, uint8_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32offset_s32))) -svint32_t svldff1ub_gather_offset_s32(svbool_t, uint8_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s64offset_u64))) -svuint64_t svldff1ub_gather_offset_u64(svbool_t, uint8_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s64offset_s64))) -svint64_t svldff1ub_gather_offset_s64(svbool_t, uint8_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64offset_u64))) -svuint64_t svldff1ub_gather_offset_u64(svbool_t, uint8_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64offset_s64))) -svint64_t svldff1ub_gather_offset_s64(svbool_t, uint8_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_index_u32))) -svuint32_t svldff1uh_gather_index_u32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_index_u64))) -svuint64_t svldff1uh_gather_index_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_index_s32))) -svint32_t svldff1uh_gather_index_s32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_index_s64))) -svint64_t svldff1uh_gather_index_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_offset_u32))) -svuint32_t svldff1uh_gather_offset_u32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_offset_u64))) -svuint64_t svldff1uh_gather_offset_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_offset_s32))) -svint32_t svldff1uh_gather_offset_s32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_offset_s64))) -svint64_t svldff1uh_gather_offset_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_u32))) -svuint32_t svldff1uh_gather_u32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_u64))) -svuint64_t svldff1uh_gather_u64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_s32))) -svint32_t svldff1uh_gather_s32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_s64))) -svint64_t svldff1uh_gather_s64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32index_u32))) -svuint32_t svldff1uh_gather_index_u32(svbool_t, uint16_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32index_s32))) -svint32_t svldff1uh_gather_index_s32(svbool_t, uint16_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32index_u32))) -svuint32_t svldff1uh_gather_index_u32(svbool_t, uint16_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32index_s32))) -svint32_t svldff1uh_gather_index_s32(svbool_t, uint16_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64index_u64))) -svuint64_t svldff1uh_gather_index_u64(svbool_t, uint16_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64index_s64))) -svint64_t svldff1uh_gather_index_s64(svbool_t, uint16_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64index_u64))) -svuint64_t svldff1uh_gather_index_u64(svbool_t, uint16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64index_s64))) -svint64_t svldff1uh_gather_index_s64(svbool_t, uint16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32offset_u32))) -svuint32_t svldff1uh_gather_offset_u32(svbool_t, uint16_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32offset_s32))) -svint32_t svldff1uh_gather_offset_s32(svbool_t, uint16_t const *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32offset_u32))) -svuint32_t svldff1uh_gather_offset_u32(svbool_t, uint16_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32offset_s32))) -svint32_t svldff1uh_gather_offset_s32(svbool_t, uint16_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64offset_u64))) -svuint64_t svldff1uh_gather_offset_u64(svbool_t, uint16_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64offset_s64))) -svint64_t svldff1uh_gather_offset_s64(svbool_t, uint16_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64offset_u64))) -svuint64_t svldff1uh_gather_offset_u64(svbool_t, uint16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64offset_s64))) -svint64_t svldff1uh_gather_offset_s64(svbool_t, uint16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_index_u64))) -svuint64_t svldff1uw_gather_index_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_index_s64))) -svint64_t svldff1uw_gather_index_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_offset_u64))) -svuint64_t svldff1uw_gather_offset_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_offset_s64))) -svint64_t svldff1uw_gather_offset_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_u64))) -svuint64_t svldff1uw_gather_u64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_s64))) -svint64_t svldff1uw_gather_s64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64index_u64))) -svuint64_t svldff1uw_gather_index_u64(svbool_t, uint32_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64index_s64))) -svint64_t svldff1uw_gather_index_s64(svbool_t, uint32_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64index_u64))) -svuint64_t svldff1uw_gather_index_u64(svbool_t, uint32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64index_s64))) -svint64_t svldff1uw_gather_index_s64(svbool_t, uint32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64offset_u64))) -svuint64_t svldff1uw_gather_offset_u64(svbool_t, uint32_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64offset_s64))) -svint64_t svldff1uw_gather_offset_s64(svbool_t, uint32_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64offset_u64))) -svuint64_t svldff1uw_gather_offset_u64(svbool_t, uint32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64offset_s64))) -svint64_t svldff1uw_gather_offset_s64(svbool_t, uint32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u8))) -svuint8_t svldnf1(svbool_t, uint8_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u32))) -svuint32_t svldnf1(svbool_t, uint32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u64))) -svuint64_t svldnf1(svbool_t, uint64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u16))) -svuint16_t svldnf1(svbool_t, uint16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s8))) -svint8_t svldnf1(svbool_t, int8_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f64))) -svfloat64_t svldnf1(svbool_t, float64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f32))) -svfloat32_t svldnf1(svbool_t, float32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f16))) -svfloat16_t svldnf1(svbool_t, float16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s32))) -svint32_t svldnf1(svbool_t, int32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s64))) -svint64_t svldnf1(svbool_t, int64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s16))) -svint16_t svldnf1(svbool_t, int16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u8))) -svuint8_t svldnf1_vnum(svbool_t, uint8_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u32))) -svuint32_t svldnf1_vnum(svbool_t, uint32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u64))) -svuint64_t svldnf1_vnum(svbool_t, uint64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u16))) -svuint16_t svldnf1_vnum(svbool_t, uint16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s8))) -svint8_t svldnf1_vnum(svbool_t, int8_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f64))) -svfloat64_t svldnf1_vnum(svbool_t, float64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f32))) -svfloat32_t svldnf1_vnum(svbool_t, float32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f16))) -svfloat16_t svldnf1_vnum(svbool_t, float16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s32))) -svint32_t svldnf1_vnum(svbool_t, int32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s64))) -svint64_t svldnf1_vnum(svbool_t, int64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s16))) -svint16_t svldnf1_vnum(svbool_t, int16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u8))) -svuint8_t svldnt1(svbool_t, uint8_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u32))) -svuint32_t svldnt1(svbool_t, uint32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u64))) -svuint64_t svldnt1(svbool_t, uint64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u16))) -svuint16_t svldnt1(svbool_t, uint16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s8))) -svint8_t svldnt1(svbool_t, int8_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f64))) -svfloat64_t svldnt1(svbool_t, float64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f32))) -svfloat32_t svldnt1(svbool_t, float32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f16))) -svfloat16_t svldnt1(svbool_t, float16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s32))) -svint32_t svldnt1(svbool_t, int32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s64))) -svint64_t svldnt1(svbool_t, int64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s16))) -svint16_t svldnt1(svbool_t, int16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u8))) -svuint8_t svldnt1_vnum(svbool_t, uint8_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u32))) -svuint32_t svldnt1_vnum(svbool_t, uint32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u64))) -svuint64_t svldnt1_vnum(svbool_t, uint64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u16))) -svuint16_t svldnt1_vnum(svbool_t, uint16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s8))) -svint8_t svldnt1_vnum(svbool_t, int8_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f64))) -svfloat64_t svldnt1_vnum(svbool_t, float64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f32))) -svfloat32_t svldnt1_vnum(svbool_t, float32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f16))) -svfloat16_t svldnt1_vnum(svbool_t, float16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s32))) -svint32_t svldnt1_vnum(svbool_t, int32_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s64))) -svint64_t svldnt1_vnum(svbool_t, int64_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s16))) -svint16_t svldnt1_vnum(svbool_t, int16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u8))) -uint64_t svlen(svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u32))) -uint64_t svlen(svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u64))) -uint64_t svlen(svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u16))) -uint64_t svlen(svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s8))) -uint64_t svlen(svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_f64))) -uint64_t svlen(svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_f32))) -uint64_t svlen(svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_f16))) -uint64_t svlen(svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s32))) -uint64_t svlen(svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s64))) -uint64_t svlen(svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s16))) -uint64_t svlen(svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u8_m))) -svuint8_t svlsl_m(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u32_m))) -svuint32_t svlsl_m(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u64_m))) -svuint64_t svlsl_m(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u16_m))) -svuint16_t svlsl_m(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s8_m))) -svint8_t svlsl_m(svbool_t, svint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s32_m))) -svint32_t svlsl_m(svbool_t, svint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s64_m))) -svint64_t svlsl_m(svbool_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s16_m))) -svint16_t svlsl_m(svbool_t, svint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u8_x))) -svuint8_t svlsl_x(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u32_x))) -svuint32_t svlsl_x(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u64_x))) -svuint64_t svlsl_x(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u16_x))) -svuint16_t svlsl_x(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s8_x))) -svint8_t svlsl_x(svbool_t, svint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s32_x))) -svint32_t svlsl_x(svbool_t, svint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s64_x))) -svint64_t svlsl_x(svbool_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s16_x))) -svint16_t svlsl_x(svbool_t, svint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u8_z))) -svuint8_t svlsl_z(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u32_z))) -svuint32_t svlsl_z(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u64_z))) -svuint64_t svlsl_z(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u16_z))) -svuint16_t svlsl_z(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s8_z))) -svint8_t svlsl_z(svbool_t, svint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s32_z))) -svint32_t svlsl_z(svbool_t, svint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s64_z))) -svint64_t svlsl_z(svbool_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s16_z))) -svint16_t svlsl_z(svbool_t, svint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u8_m))) -svuint8_t svlsl_m(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u32_m))) -svuint32_t svlsl_m(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u64_m))) -svuint64_t svlsl_m(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u16_m))) -svuint16_t svlsl_m(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s8_m))) -svint8_t svlsl_m(svbool_t, svint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s32_m))) -svint32_t svlsl_m(svbool_t, svint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s64_m))) -svint64_t svlsl_m(svbool_t, svint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s16_m))) -svint16_t svlsl_m(svbool_t, svint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u8_x))) -svuint8_t svlsl_x(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u32_x))) -svuint32_t svlsl_x(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u64_x))) -svuint64_t svlsl_x(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u16_x))) -svuint16_t svlsl_x(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s8_x))) -svint8_t svlsl_x(svbool_t, svint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s32_x))) -svint32_t svlsl_x(svbool_t, svint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s64_x))) -svint64_t svlsl_x(svbool_t, svint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s16_x))) -svint16_t svlsl_x(svbool_t, svint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u8_z))) -svuint8_t svlsl_z(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u32_z))) -svuint32_t svlsl_z(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u64_z))) -svuint64_t svlsl_z(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u16_z))) -svuint16_t svlsl_z(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s8_z))) -svint8_t svlsl_z(svbool_t, svint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s32_z))) -svint32_t svlsl_z(svbool_t, svint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s64_z))) -svint64_t svlsl_z(svbool_t, svint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s16_z))) -svint16_t svlsl_z(svbool_t, svint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u8_m))) -svuint8_t svlsl_wide_m(svbool_t, svuint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u32_m))) -svuint32_t svlsl_wide_m(svbool_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u16_m))) -svuint16_t svlsl_wide_m(svbool_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s8_m))) -svint8_t svlsl_wide_m(svbool_t, svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s32_m))) -svint32_t svlsl_wide_m(svbool_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s16_m))) -svint16_t svlsl_wide_m(svbool_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u8_x))) -svuint8_t svlsl_wide_x(svbool_t, svuint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u32_x))) -svuint32_t svlsl_wide_x(svbool_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u16_x))) -svuint16_t svlsl_wide_x(svbool_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s8_x))) -svint8_t svlsl_wide_x(svbool_t, svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s32_x))) -svint32_t svlsl_wide_x(svbool_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s16_x))) -svint16_t svlsl_wide_x(svbool_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u8_z))) -svuint8_t svlsl_wide_z(svbool_t, svuint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u32_z))) -svuint32_t svlsl_wide_z(svbool_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u16_z))) -svuint16_t svlsl_wide_z(svbool_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s8_z))) -svint8_t svlsl_wide_z(svbool_t, svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s32_z))) -svint32_t svlsl_wide_z(svbool_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s16_z))) -svint16_t svlsl_wide_z(svbool_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u8_m))) -svuint8_t svlsl_wide_m(svbool_t, svuint8_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u32_m))) -svuint32_t svlsl_wide_m(svbool_t, svuint32_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u16_m))) -svuint16_t svlsl_wide_m(svbool_t, svuint16_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s8_m))) -svint8_t svlsl_wide_m(svbool_t, svint8_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s32_m))) -svint32_t svlsl_wide_m(svbool_t, svint32_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s16_m))) -svint16_t svlsl_wide_m(svbool_t, svint16_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u8_x))) -svuint8_t svlsl_wide_x(svbool_t, svuint8_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u32_x))) -svuint32_t svlsl_wide_x(svbool_t, svuint32_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u16_x))) -svuint16_t svlsl_wide_x(svbool_t, svuint16_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s8_x))) -svint8_t svlsl_wide_x(svbool_t, svint8_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s32_x))) -svint32_t svlsl_wide_x(svbool_t, svint32_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s16_x))) -svint16_t svlsl_wide_x(svbool_t, svint16_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u8_z))) -svuint8_t svlsl_wide_z(svbool_t, svuint8_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u32_z))) -svuint32_t svlsl_wide_z(svbool_t, svuint32_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u16_z))) -svuint16_t svlsl_wide_z(svbool_t, svuint16_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s8_z))) -svint8_t svlsl_wide_z(svbool_t, svint8_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s32_z))) -svint32_t svlsl_wide_z(svbool_t, svint32_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s16_z))) -svint16_t svlsl_wide_z(svbool_t, svint16_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u8_m))) -svuint8_t svlsr_m(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u32_m))) -svuint32_t svlsr_m(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u64_m))) -svuint64_t svlsr_m(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u16_m))) -svuint16_t svlsr_m(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u8_x))) -svuint8_t svlsr_x(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u32_x))) -svuint32_t svlsr_x(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u64_x))) -svuint64_t svlsr_x(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u16_x))) -svuint16_t svlsr_x(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u8_z))) -svuint8_t svlsr_z(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u32_z))) -svuint32_t svlsr_z(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u64_z))) -svuint64_t svlsr_z(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u16_z))) -svuint16_t svlsr_z(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u8_m))) -svuint8_t svlsr_m(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u32_m))) -svuint32_t svlsr_m(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u64_m))) -svuint64_t svlsr_m(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u16_m))) -svuint16_t svlsr_m(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u8_x))) -svuint8_t svlsr_x(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u32_x))) -svuint32_t svlsr_x(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u64_x))) -svuint64_t svlsr_x(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u16_x))) -svuint16_t svlsr_x(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u8_z))) -svuint8_t svlsr_z(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u32_z))) -svuint32_t svlsr_z(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u64_z))) -svuint64_t svlsr_z(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u16_z))) -svuint16_t svlsr_z(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u8_m))) -svuint8_t svlsr_wide_m(svbool_t, svuint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u32_m))) -svuint32_t svlsr_wide_m(svbool_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u16_m))) -svuint16_t svlsr_wide_m(svbool_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u8_x))) -svuint8_t svlsr_wide_x(svbool_t, svuint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u32_x))) -svuint32_t svlsr_wide_x(svbool_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u16_x))) -svuint16_t svlsr_wide_x(svbool_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u8_z))) -svuint8_t svlsr_wide_z(svbool_t, svuint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u32_z))) -svuint32_t svlsr_wide_z(svbool_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u16_z))) -svuint16_t svlsr_wide_z(svbool_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u8_m))) -svuint8_t svlsr_wide_m(svbool_t, svuint8_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u32_m))) -svuint32_t svlsr_wide_m(svbool_t, svuint32_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u16_m))) -svuint16_t svlsr_wide_m(svbool_t, svuint16_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u8_x))) -svuint8_t svlsr_wide_x(svbool_t, svuint8_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u32_x))) -svuint32_t svlsr_wide_x(svbool_t, svuint32_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u16_x))) -svuint16_t svlsr_wide_x(svbool_t, svuint16_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u8_z))) -svuint8_t svlsr_wide_z(svbool_t, svuint8_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u32_z))) -svuint32_t svlsr_wide_z(svbool_t, svuint32_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u16_z))) -svuint16_t svlsr_wide_z(svbool_t, svuint16_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f64_m))) -svfloat64_t svmad_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f32_m))) -svfloat32_t svmad_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f16_m))) -svfloat16_t svmad_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f64_x))) -svfloat64_t svmad_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f32_x))) -svfloat32_t svmad_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f16_x))) -svfloat16_t svmad_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f64_z))) -svfloat64_t svmad_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f32_z))) -svfloat32_t svmad_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f16_z))) -svfloat16_t svmad_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u8_m))) -svuint8_t svmad_m(svbool_t, svuint8_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u32_m))) -svuint32_t svmad_m(svbool_t, svuint32_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u64_m))) -svuint64_t svmad_m(svbool_t, svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u16_m))) -svuint16_t svmad_m(svbool_t, svuint16_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s8_m))) -svint8_t svmad_m(svbool_t, svint8_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s32_m))) -svint32_t svmad_m(svbool_t, svint32_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s64_m))) -svint64_t svmad_m(svbool_t, svint64_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s16_m))) -svint16_t svmad_m(svbool_t, svint16_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u8_x))) -svuint8_t svmad_x(svbool_t, svuint8_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u32_x))) -svuint32_t svmad_x(svbool_t, svuint32_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u64_x))) -svuint64_t svmad_x(svbool_t, svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u16_x))) -svuint16_t svmad_x(svbool_t, svuint16_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s8_x))) -svint8_t svmad_x(svbool_t, svint8_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s32_x))) -svint32_t svmad_x(svbool_t, svint32_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s64_x))) -svint64_t svmad_x(svbool_t, svint64_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s16_x))) -svint16_t svmad_x(svbool_t, svint16_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u8_z))) -svuint8_t svmad_z(svbool_t, svuint8_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u32_z))) -svuint32_t svmad_z(svbool_t, svuint32_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u64_z))) -svuint64_t svmad_z(svbool_t, svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u16_z))) -svuint16_t svmad_z(svbool_t, svuint16_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s8_z))) -svint8_t svmad_z(svbool_t, svint8_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s32_z))) -svint32_t svmad_z(svbool_t, svint32_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s64_z))) -svint64_t svmad_z(svbool_t, svint64_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s16_z))) -svint16_t svmad_z(svbool_t, svint16_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f64_m))) -svfloat64_t svmad_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f32_m))) -svfloat32_t svmad_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f16_m))) -svfloat16_t svmad_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f64_x))) -svfloat64_t svmad_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f32_x))) -svfloat32_t svmad_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f16_x))) -svfloat16_t svmad_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f64_z))) -svfloat64_t svmad_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f32_z))) -svfloat32_t svmad_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f16_z))) -svfloat16_t svmad_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u8_m))) -svuint8_t svmad_m(svbool_t, svuint8_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u32_m))) -svuint32_t svmad_m(svbool_t, svuint32_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u64_m))) -svuint64_t svmad_m(svbool_t, svuint64_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u16_m))) -svuint16_t svmad_m(svbool_t, svuint16_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s8_m))) -svint8_t svmad_m(svbool_t, svint8_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s32_m))) -svint32_t svmad_m(svbool_t, svint32_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s64_m))) -svint64_t svmad_m(svbool_t, svint64_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s16_m))) -svint16_t svmad_m(svbool_t, svint16_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u8_x))) -svuint8_t svmad_x(svbool_t, svuint8_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u32_x))) -svuint32_t svmad_x(svbool_t, svuint32_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u64_x))) -svuint64_t svmad_x(svbool_t, svuint64_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u16_x))) -svuint16_t svmad_x(svbool_t, svuint16_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s8_x))) -svint8_t svmad_x(svbool_t, svint8_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s32_x))) -svint32_t svmad_x(svbool_t, svint32_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s64_x))) -svint64_t svmad_x(svbool_t, svint64_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s16_x))) -svint16_t svmad_x(svbool_t, svint16_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u8_z))) -svuint8_t svmad_z(svbool_t, svuint8_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u32_z))) -svuint32_t svmad_z(svbool_t, svuint32_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u64_z))) -svuint64_t svmad_z(svbool_t, svuint64_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u16_z))) -svuint16_t svmad_z(svbool_t, svuint16_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s8_z))) -svint8_t svmad_z(svbool_t, svint8_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s32_z))) -svint32_t svmad_z(svbool_t, svint32_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s64_z))) -svint64_t svmad_z(svbool_t, svint64_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s16_z))) -svint16_t svmad_z(svbool_t, svint16_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f64_m))) -svfloat64_t svmax_m(svbool_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f32_m))) -svfloat32_t svmax_m(svbool_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f16_m))) -svfloat16_t svmax_m(svbool_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f64_x))) -svfloat64_t svmax_x(svbool_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f32_x))) -svfloat32_t svmax_x(svbool_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f16_x))) -svfloat16_t svmax_x(svbool_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f64_z))) -svfloat64_t svmax_z(svbool_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f32_z))) -svfloat32_t svmax_z(svbool_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f16_z))) -svfloat16_t svmax_z(svbool_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s8_m))) -svint8_t svmax_m(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s32_m))) -svint32_t svmax_m(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s64_m))) -svint64_t svmax_m(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s16_m))) -svint16_t svmax_m(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s8_x))) -svint8_t svmax_x(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s32_x))) -svint32_t svmax_x(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s64_x))) -svint64_t svmax_x(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s16_x))) -svint16_t svmax_x(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s8_z))) -svint8_t svmax_z(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s32_z))) -svint32_t svmax_z(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s64_z))) -svint64_t svmax_z(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s16_z))) -svint16_t svmax_z(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u8_m))) -svuint8_t svmax_m(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u32_m))) -svuint32_t svmax_m(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u64_m))) -svuint64_t svmax_m(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u16_m))) -svuint16_t svmax_m(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u8_x))) -svuint8_t svmax_x(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u32_x))) -svuint32_t svmax_x(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u64_x))) -svuint64_t svmax_x(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u16_x))) -svuint16_t svmax_x(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u8_z))) -svuint8_t svmax_z(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u32_z))) -svuint32_t svmax_z(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u64_z))) -svuint64_t svmax_z(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u16_z))) -svuint16_t svmax_z(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_m))) -svfloat64_t svmax_m(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_m))) -svfloat32_t svmax_m(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_m))) -svfloat16_t svmax_m(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_x))) -svfloat64_t svmax_x(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_x))) -svfloat32_t svmax_x(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_x))) -svfloat16_t svmax_x(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_z))) -svfloat64_t svmax_z(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_z))) -svfloat32_t svmax_z(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_z))) -svfloat16_t svmax_z(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_m))) -svint8_t svmax_m(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_m))) -svint32_t svmax_m(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_m))) -svint64_t svmax_m(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_m))) -svint16_t svmax_m(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_x))) -svint8_t svmax_x(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_x))) -svint32_t svmax_x(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_x))) -svint64_t svmax_x(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_x))) -svint16_t svmax_x(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_z))) -svint8_t svmax_z(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_z))) -svint32_t svmax_z(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_z))) -svint64_t svmax_z(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_z))) -svint16_t svmax_z(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_m))) -svuint8_t svmax_m(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_m))) -svuint32_t svmax_m(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_m))) -svuint64_t svmax_m(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_m))) -svuint16_t svmax_m(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_x))) -svuint8_t svmax_x(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_x))) -svuint32_t svmax_x(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_x))) -svuint64_t svmax_x(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_x))) -svuint16_t svmax_x(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_z))) -svuint8_t svmax_z(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_z))) -svuint32_t svmax_z(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_z))) -svuint64_t svmax_z(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_z))) -svuint16_t svmax_z(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f64_m))) -svfloat64_t svmaxnm_m(svbool_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f32_m))) -svfloat32_t svmaxnm_m(svbool_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f16_m))) -svfloat16_t svmaxnm_m(svbool_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f64_x))) -svfloat64_t svmaxnm_x(svbool_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f32_x))) -svfloat32_t svmaxnm_x(svbool_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f16_x))) -svfloat16_t svmaxnm_x(svbool_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f64_z))) -svfloat64_t svmaxnm_z(svbool_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f32_z))) -svfloat32_t svmaxnm_z(svbool_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f16_z))) -svfloat16_t svmaxnm_z(svbool_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_m))) -svfloat64_t svmaxnm_m(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_m))) -svfloat32_t svmaxnm_m(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_m))) -svfloat16_t svmaxnm_m(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_x))) -svfloat64_t svmaxnm_x(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_x))) -svfloat32_t svmaxnm_x(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_x))) -svfloat16_t svmaxnm_x(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_z))) -svfloat64_t svmaxnm_z(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_z))) -svfloat32_t svmaxnm_z(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_z))) -svfloat16_t svmaxnm_z(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmv_f64))) -float64_t svmaxnmv(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmv_f32))) -float32_t svmaxnmv(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmv_f16))) -float16_t svmaxnmv(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_f64))) -float64_t svmaxv(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_f32))) -float32_t svmaxv(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_f16))) -float16_t svmaxv(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s8))) -int8_t svmaxv(svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s32))) -int32_t svmaxv(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s64))) -int64_t svmaxv(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s16))) -int16_t svmaxv(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u8))) -uint8_t svmaxv(svbool_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u32))) -uint32_t svmaxv(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u64))) -uint64_t svmaxv(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u16))) -uint16_t svmaxv(svbool_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f64_m))) -svfloat64_t svmin_m(svbool_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f32_m))) -svfloat32_t svmin_m(svbool_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f16_m))) -svfloat16_t svmin_m(svbool_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f64_x))) -svfloat64_t svmin_x(svbool_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f32_x))) -svfloat32_t svmin_x(svbool_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f16_x))) -svfloat16_t svmin_x(svbool_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f64_z))) -svfloat64_t svmin_z(svbool_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f32_z))) -svfloat32_t svmin_z(svbool_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f16_z))) -svfloat16_t svmin_z(svbool_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s8_m))) -svint8_t svmin_m(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s32_m))) -svint32_t svmin_m(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s64_m))) -svint64_t svmin_m(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s16_m))) -svint16_t svmin_m(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s8_x))) -svint8_t svmin_x(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s32_x))) -svint32_t svmin_x(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s64_x))) -svint64_t svmin_x(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s16_x))) -svint16_t svmin_x(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s8_z))) -svint8_t svmin_z(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s32_z))) -svint32_t svmin_z(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s64_z))) -svint64_t svmin_z(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s16_z))) -svint16_t svmin_z(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u8_m))) -svuint8_t svmin_m(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u32_m))) -svuint32_t svmin_m(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u64_m))) -svuint64_t svmin_m(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u16_m))) -svuint16_t svmin_m(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u8_x))) -svuint8_t svmin_x(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u32_x))) -svuint32_t svmin_x(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u64_x))) -svuint64_t svmin_x(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u16_x))) -svuint16_t svmin_x(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u8_z))) -svuint8_t svmin_z(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u32_z))) -svuint32_t svmin_z(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u64_z))) -svuint64_t svmin_z(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u16_z))) -svuint16_t svmin_z(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_m))) -svfloat64_t svmin_m(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_m))) -svfloat32_t svmin_m(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_m))) -svfloat16_t svmin_m(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_x))) -svfloat64_t svmin_x(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_x))) -svfloat32_t svmin_x(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_x))) -svfloat16_t svmin_x(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_z))) -svfloat64_t svmin_z(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_z))) -svfloat32_t svmin_z(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_z))) -svfloat16_t svmin_z(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_m))) -svint8_t svmin_m(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_m))) -svint32_t svmin_m(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_m))) -svint64_t svmin_m(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_m))) -svint16_t svmin_m(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_x))) -svint8_t svmin_x(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_x))) -svint32_t svmin_x(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_x))) -svint64_t svmin_x(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_x))) -svint16_t svmin_x(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_z))) -svint8_t svmin_z(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_z))) -svint32_t svmin_z(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_z))) -svint64_t svmin_z(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_z))) -svint16_t svmin_z(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_m))) -svuint8_t svmin_m(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_m))) -svuint32_t svmin_m(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_m))) -svuint64_t svmin_m(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_m))) -svuint16_t svmin_m(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_x))) -svuint8_t svmin_x(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_x))) -svuint32_t svmin_x(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_x))) -svuint64_t svmin_x(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_x))) -svuint16_t svmin_x(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_z))) -svuint8_t svmin_z(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_z))) -svuint32_t svmin_z(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_z))) -svuint64_t svmin_z(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_z))) -svuint16_t svmin_z(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f64_m))) -svfloat64_t svminnm_m(svbool_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f32_m))) -svfloat32_t svminnm_m(svbool_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f16_m))) -svfloat16_t svminnm_m(svbool_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f64_x))) -svfloat64_t svminnm_x(svbool_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f32_x))) -svfloat32_t svminnm_x(svbool_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f16_x))) -svfloat16_t svminnm_x(svbool_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f64_z))) -svfloat64_t svminnm_z(svbool_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f32_z))) -svfloat32_t svminnm_z(svbool_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f16_z))) -svfloat16_t svminnm_z(svbool_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_m))) -svfloat64_t svminnm_m(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_m))) -svfloat32_t svminnm_m(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_m))) -svfloat16_t svminnm_m(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_x))) -svfloat64_t svminnm_x(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_x))) -svfloat32_t svminnm_x(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_x))) -svfloat16_t svminnm_x(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_z))) -svfloat64_t svminnm_z(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_z))) -svfloat32_t svminnm_z(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_z))) -svfloat16_t svminnm_z(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmv_f64))) -float64_t svminnmv(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmv_f32))) -float32_t svminnmv(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmv_f16))) -float16_t svminnmv(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_f64))) -float64_t svminv(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_f32))) -float32_t svminv(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_f16))) -float16_t svminv(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s8))) -int8_t svminv(svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s32))) -int32_t svminv(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s64))) -int64_t svminv(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s16))) -int16_t svminv(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u8))) -uint8_t svminv(svbool_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u32))) -uint32_t svminv(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u64))) -uint64_t svminv(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u16))) -uint16_t svminv(svbool_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f64_m))) -svfloat64_t svmla_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f32_m))) -svfloat32_t svmla_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f16_m))) -svfloat16_t svmla_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f64_x))) -svfloat64_t svmla_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f32_x))) -svfloat32_t svmla_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f16_x))) -svfloat16_t svmla_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f64_z))) -svfloat64_t svmla_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f32_z))) -svfloat32_t svmla_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f16_z))) -svfloat16_t svmla_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u8_m))) -svuint8_t svmla_m(svbool_t, svuint8_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u32_m))) -svuint32_t svmla_m(svbool_t, svuint32_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u64_m))) -svuint64_t svmla_m(svbool_t, svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u16_m))) -svuint16_t svmla_m(svbool_t, svuint16_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s8_m))) -svint8_t svmla_m(svbool_t, svint8_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s32_m))) -svint32_t svmla_m(svbool_t, svint32_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s64_m))) -svint64_t svmla_m(svbool_t, svint64_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s16_m))) -svint16_t svmla_m(svbool_t, svint16_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u8_x))) -svuint8_t svmla_x(svbool_t, svuint8_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u32_x))) -svuint32_t svmla_x(svbool_t, svuint32_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u64_x))) -svuint64_t svmla_x(svbool_t, svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u16_x))) -svuint16_t svmla_x(svbool_t, svuint16_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s8_x))) -svint8_t svmla_x(svbool_t, svint8_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s32_x))) -svint32_t svmla_x(svbool_t, svint32_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s64_x))) -svint64_t svmla_x(svbool_t, svint64_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s16_x))) -svint16_t svmla_x(svbool_t, svint16_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u8_z))) -svuint8_t svmla_z(svbool_t, svuint8_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u32_z))) -svuint32_t svmla_z(svbool_t, svuint32_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u64_z))) -svuint64_t svmla_z(svbool_t, svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u16_z))) -svuint16_t svmla_z(svbool_t, svuint16_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s8_z))) -svint8_t svmla_z(svbool_t, svint8_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s32_z))) -svint32_t svmla_z(svbool_t, svint32_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s64_z))) -svint64_t svmla_z(svbool_t, svint64_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s16_z))) -svint16_t svmla_z(svbool_t, svint16_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f64_m))) -svfloat64_t svmla_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f32_m))) -svfloat32_t svmla_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f16_m))) -svfloat16_t svmla_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f64_x))) -svfloat64_t svmla_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f32_x))) -svfloat32_t svmla_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f16_x))) -svfloat16_t svmla_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f64_z))) -svfloat64_t svmla_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f32_z))) -svfloat32_t svmla_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f16_z))) -svfloat16_t svmla_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u8_m))) -svuint8_t svmla_m(svbool_t, svuint8_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u32_m))) -svuint32_t svmla_m(svbool_t, svuint32_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u64_m))) -svuint64_t svmla_m(svbool_t, svuint64_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u16_m))) -svuint16_t svmla_m(svbool_t, svuint16_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s8_m))) -svint8_t svmla_m(svbool_t, svint8_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s32_m))) -svint32_t svmla_m(svbool_t, svint32_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s64_m))) -svint64_t svmla_m(svbool_t, svint64_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s16_m))) -svint16_t svmla_m(svbool_t, svint16_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u8_x))) -svuint8_t svmla_x(svbool_t, svuint8_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u32_x))) -svuint32_t svmla_x(svbool_t, svuint32_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u64_x))) -svuint64_t svmla_x(svbool_t, svuint64_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u16_x))) -svuint16_t svmla_x(svbool_t, svuint16_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s8_x))) -svint8_t svmla_x(svbool_t, svint8_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s32_x))) -svint32_t svmla_x(svbool_t, svint32_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s64_x))) -svint64_t svmla_x(svbool_t, svint64_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s16_x))) -svint16_t svmla_x(svbool_t, svint16_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u8_z))) -svuint8_t svmla_z(svbool_t, svuint8_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u32_z))) -svuint32_t svmla_z(svbool_t, svuint32_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u64_z))) -svuint64_t svmla_z(svbool_t, svuint64_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u16_z))) -svuint16_t svmla_z(svbool_t, svuint16_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s8_z))) -svint8_t svmla_z(svbool_t, svint8_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s32_z))) -svint32_t svmla_z(svbool_t, svint32_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s64_z))) -svint64_t svmla_z(svbool_t, svint64_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s16_z))) -svint16_t svmla_z(svbool_t, svint16_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_f64))) -svfloat64_t svmla_lane(svfloat64_t, svfloat64_t, svfloat64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_f32))) -svfloat32_t svmla_lane(svfloat32_t, svfloat32_t, svfloat32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_f16))) -svfloat16_t svmla_lane(svfloat16_t, svfloat16_t, svfloat16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f64_m))) -svfloat64_t svmls_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f32_m))) -svfloat32_t svmls_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f16_m))) -svfloat16_t svmls_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f64_x))) -svfloat64_t svmls_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f32_x))) -svfloat32_t svmls_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f16_x))) -svfloat16_t svmls_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f64_z))) -svfloat64_t svmls_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f32_z))) -svfloat32_t svmls_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f16_z))) -svfloat16_t svmls_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u8_m))) -svuint8_t svmls_m(svbool_t, svuint8_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u32_m))) -svuint32_t svmls_m(svbool_t, svuint32_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u64_m))) -svuint64_t svmls_m(svbool_t, svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u16_m))) -svuint16_t svmls_m(svbool_t, svuint16_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s8_m))) -svint8_t svmls_m(svbool_t, svint8_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s32_m))) -svint32_t svmls_m(svbool_t, svint32_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s64_m))) -svint64_t svmls_m(svbool_t, svint64_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s16_m))) -svint16_t svmls_m(svbool_t, svint16_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u8_x))) -svuint8_t svmls_x(svbool_t, svuint8_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u32_x))) -svuint32_t svmls_x(svbool_t, svuint32_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u64_x))) -svuint64_t svmls_x(svbool_t, svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u16_x))) -svuint16_t svmls_x(svbool_t, svuint16_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s8_x))) -svint8_t svmls_x(svbool_t, svint8_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s32_x))) -svint32_t svmls_x(svbool_t, svint32_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s64_x))) -svint64_t svmls_x(svbool_t, svint64_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s16_x))) -svint16_t svmls_x(svbool_t, svint16_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u8_z))) -svuint8_t svmls_z(svbool_t, svuint8_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u32_z))) -svuint32_t svmls_z(svbool_t, svuint32_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u64_z))) -svuint64_t svmls_z(svbool_t, svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u16_z))) -svuint16_t svmls_z(svbool_t, svuint16_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s8_z))) -svint8_t svmls_z(svbool_t, svint8_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s32_z))) -svint32_t svmls_z(svbool_t, svint32_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s64_z))) -svint64_t svmls_z(svbool_t, svint64_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s16_z))) -svint16_t svmls_z(svbool_t, svint16_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f64_m))) -svfloat64_t svmls_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f32_m))) -svfloat32_t svmls_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f16_m))) -svfloat16_t svmls_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f64_x))) -svfloat64_t svmls_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f32_x))) -svfloat32_t svmls_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f16_x))) -svfloat16_t svmls_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f64_z))) -svfloat64_t svmls_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f32_z))) -svfloat32_t svmls_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f16_z))) -svfloat16_t svmls_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u8_m))) -svuint8_t svmls_m(svbool_t, svuint8_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u32_m))) -svuint32_t svmls_m(svbool_t, svuint32_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u64_m))) -svuint64_t svmls_m(svbool_t, svuint64_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u16_m))) -svuint16_t svmls_m(svbool_t, svuint16_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s8_m))) -svint8_t svmls_m(svbool_t, svint8_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s32_m))) -svint32_t svmls_m(svbool_t, svint32_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s64_m))) -svint64_t svmls_m(svbool_t, svint64_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s16_m))) -svint16_t svmls_m(svbool_t, svint16_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u8_x))) -svuint8_t svmls_x(svbool_t, svuint8_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u32_x))) -svuint32_t svmls_x(svbool_t, svuint32_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u64_x))) -svuint64_t svmls_x(svbool_t, svuint64_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u16_x))) -svuint16_t svmls_x(svbool_t, svuint16_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s8_x))) -svint8_t svmls_x(svbool_t, svint8_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s32_x))) -svint32_t svmls_x(svbool_t, svint32_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s64_x))) -svint64_t svmls_x(svbool_t, svint64_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s16_x))) -svint16_t svmls_x(svbool_t, svint16_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u8_z))) -svuint8_t svmls_z(svbool_t, svuint8_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u32_z))) -svuint32_t svmls_z(svbool_t, svuint32_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u64_z))) -svuint64_t svmls_z(svbool_t, svuint64_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u16_z))) -svuint16_t svmls_z(svbool_t, svuint16_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s8_z))) -svint8_t svmls_z(svbool_t, svint8_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s32_z))) -svint32_t svmls_z(svbool_t, svint32_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s64_z))) -svint64_t svmls_z(svbool_t, svint64_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s16_z))) -svint16_t svmls_z(svbool_t, svint16_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_f64))) -svfloat64_t svmls_lane(svfloat64_t, svfloat64_t, svfloat64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_f32))) -svfloat32_t svmls_lane(svfloat32_t, svfloat32_t, svfloat32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_f16))) -svfloat16_t svmls_lane(svfloat16_t, svfloat16_t, svfloat16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmov_b_z))) -svbool_t svmov_z(svbool_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f64_m))) -svfloat64_t svmsb_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f32_m))) -svfloat32_t svmsb_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f16_m))) -svfloat16_t svmsb_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f64_x))) -svfloat64_t svmsb_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f32_x))) -svfloat32_t svmsb_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f16_x))) -svfloat16_t svmsb_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f64_z))) -svfloat64_t svmsb_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f32_z))) -svfloat32_t svmsb_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f16_z))) -svfloat16_t svmsb_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u8_m))) -svuint8_t svmsb_m(svbool_t, svuint8_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u32_m))) -svuint32_t svmsb_m(svbool_t, svuint32_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u64_m))) -svuint64_t svmsb_m(svbool_t, svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u16_m))) -svuint16_t svmsb_m(svbool_t, svuint16_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s8_m))) -svint8_t svmsb_m(svbool_t, svint8_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s32_m))) -svint32_t svmsb_m(svbool_t, svint32_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s64_m))) -svint64_t svmsb_m(svbool_t, svint64_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s16_m))) -svint16_t svmsb_m(svbool_t, svint16_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u8_x))) -svuint8_t svmsb_x(svbool_t, svuint8_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u32_x))) -svuint32_t svmsb_x(svbool_t, svuint32_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u64_x))) -svuint64_t svmsb_x(svbool_t, svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u16_x))) -svuint16_t svmsb_x(svbool_t, svuint16_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s8_x))) -svint8_t svmsb_x(svbool_t, svint8_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s32_x))) -svint32_t svmsb_x(svbool_t, svint32_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s64_x))) -svint64_t svmsb_x(svbool_t, svint64_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s16_x))) -svint16_t svmsb_x(svbool_t, svint16_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u8_z))) -svuint8_t svmsb_z(svbool_t, svuint8_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u32_z))) -svuint32_t svmsb_z(svbool_t, svuint32_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u64_z))) -svuint64_t svmsb_z(svbool_t, svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u16_z))) -svuint16_t svmsb_z(svbool_t, svuint16_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s8_z))) -svint8_t svmsb_z(svbool_t, svint8_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s32_z))) -svint32_t svmsb_z(svbool_t, svint32_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s64_z))) -svint64_t svmsb_z(svbool_t, svint64_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s16_z))) -svint16_t svmsb_z(svbool_t, svint16_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f64_m))) -svfloat64_t svmsb_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f32_m))) -svfloat32_t svmsb_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f16_m))) -svfloat16_t svmsb_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f64_x))) -svfloat64_t svmsb_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f32_x))) -svfloat32_t svmsb_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f16_x))) -svfloat16_t svmsb_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f64_z))) -svfloat64_t svmsb_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f32_z))) -svfloat32_t svmsb_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f16_z))) -svfloat16_t svmsb_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u8_m))) -svuint8_t svmsb_m(svbool_t, svuint8_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u32_m))) -svuint32_t svmsb_m(svbool_t, svuint32_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u64_m))) -svuint64_t svmsb_m(svbool_t, svuint64_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u16_m))) -svuint16_t svmsb_m(svbool_t, svuint16_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s8_m))) -svint8_t svmsb_m(svbool_t, svint8_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s32_m))) -svint32_t svmsb_m(svbool_t, svint32_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s64_m))) -svint64_t svmsb_m(svbool_t, svint64_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s16_m))) -svint16_t svmsb_m(svbool_t, svint16_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u8_x))) -svuint8_t svmsb_x(svbool_t, svuint8_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u32_x))) -svuint32_t svmsb_x(svbool_t, svuint32_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u64_x))) -svuint64_t svmsb_x(svbool_t, svuint64_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u16_x))) -svuint16_t svmsb_x(svbool_t, svuint16_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s8_x))) -svint8_t svmsb_x(svbool_t, svint8_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s32_x))) -svint32_t svmsb_x(svbool_t, svint32_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s64_x))) -svint64_t svmsb_x(svbool_t, svint64_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s16_x))) -svint16_t svmsb_x(svbool_t, svint16_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u8_z))) -svuint8_t svmsb_z(svbool_t, svuint8_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u32_z))) -svuint32_t svmsb_z(svbool_t, svuint32_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u64_z))) -svuint64_t svmsb_z(svbool_t, svuint64_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u16_z))) -svuint16_t svmsb_z(svbool_t, svuint16_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s8_z))) -svint8_t svmsb_z(svbool_t, svint8_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s32_z))) -svint32_t svmsb_z(svbool_t, svint32_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s64_z))) -svint64_t svmsb_z(svbool_t, svint64_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s16_z))) -svint16_t svmsb_z(svbool_t, svint16_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f64_m))) -svfloat64_t svmul_m(svbool_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f32_m))) -svfloat32_t svmul_m(svbool_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f16_m))) -svfloat16_t svmul_m(svbool_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f64_x))) -svfloat64_t svmul_x(svbool_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f32_x))) -svfloat32_t svmul_x(svbool_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f16_x))) -svfloat16_t svmul_x(svbool_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f64_z))) -svfloat64_t svmul_z(svbool_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f32_z))) -svfloat32_t svmul_z(svbool_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f16_z))) -svfloat16_t svmul_z(svbool_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u8_m))) -svuint8_t svmul_m(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u32_m))) -svuint32_t svmul_m(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u64_m))) -svuint64_t svmul_m(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u16_m))) -svuint16_t svmul_m(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s8_m))) -svint8_t svmul_m(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s32_m))) -svint32_t svmul_m(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s64_m))) -svint64_t svmul_m(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s16_m))) -svint16_t svmul_m(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u8_x))) -svuint8_t svmul_x(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u32_x))) -svuint32_t svmul_x(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u64_x))) -svuint64_t svmul_x(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u16_x))) -svuint16_t svmul_x(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s8_x))) -svint8_t svmul_x(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s32_x))) -svint32_t svmul_x(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s64_x))) -svint64_t svmul_x(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s16_x))) -svint16_t svmul_x(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u8_z))) -svuint8_t svmul_z(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u32_z))) -svuint32_t svmul_z(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u64_z))) -svuint64_t svmul_z(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u16_z))) -svuint16_t svmul_z(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s8_z))) -svint8_t svmul_z(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s32_z))) -svint32_t svmul_z(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s64_z))) -svint64_t svmul_z(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s16_z))) -svint16_t svmul_z(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f64_m))) -svfloat64_t svmul_m(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f32_m))) -svfloat32_t svmul_m(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f16_m))) -svfloat16_t svmul_m(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f64_x))) -svfloat64_t svmul_x(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f32_x))) -svfloat32_t svmul_x(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f16_x))) -svfloat16_t svmul_x(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f64_z))) -svfloat64_t svmul_z(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f32_z))) -svfloat32_t svmul_z(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f16_z))) -svfloat16_t svmul_z(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u8_m))) -svuint8_t svmul_m(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u32_m))) -svuint32_t svmul_m(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u64_m))) -svuint64_t svmul_m(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u16_m))) -svuint16_t svmul_m(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s8_m))) -svint8_t svmul_m(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s32_m))) -svint32_t svmul_m(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s64_m))) -svint64_t svmul_m(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s16_m))) -svint16_t svmul_m(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u8_x))) -svuint8_t svmul_x(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u32_x))) -svuint32_t svmul_x(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u64_x))) -svuint64_t svmul_x(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u16_x))) -svuint16_t svmul_x(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s8_x))) -svint8_t svmul_x(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s32_x))) -svint32_t svmul_x(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s64_x))) -svint64_t svmul_x(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s16_x))) -svint16_t svmul_x(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u8_z))) -svuint8_t svmul_z(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u32_z))) -svuint32_t svmul_z(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u64_z))) -svuint64_t svmul_z(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u16_z))) -svuint16_t svmul_z(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s8_z))) -svint8_t svmul_z(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s32_z))) -svint32_t svmul_z(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s64_z))) -svint64_t svmul_z(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s16_z))) -svint16_t svmul_z(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_f64))) -svfloat64_t svmul_lane(svfloat64_t, svfloat64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_f32))) -svfloat32_t svmul_lane(svfloat32_t, svfloat32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_f16))) -svfloat16_t svmul_lane(svfloat16_t, svfloat16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s8_m))) -svint8_t svmulh_m(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s32_m))) -svint32_t svmulh_m(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s64_m))) -svint64_t svmulh_m(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s16_m))) -svint16_t svmulh_m(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s8_x))) -svint8_t svmulh_x(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s32_x))) -svint32_t svmulh_x(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s64_x))) -svint64_t svmulh_x(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s16_x))) -svint16_t svmulh_x(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s8_z))) -svint8_t svmulh_z(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s32_z))) -svint32_t svmulh_z(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s64_z))) -svint64_t svmulh_z(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s16_z))) -svint16_t svmulh_z(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u8_m))) -svuint8_t svmulh_m(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u32_m))) -svuint32_t svmulh_m(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u64_m))) -svuint64_t svmulh_m(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u16_m))) -svuint16_t svmulh_m(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u8_x))) -svuint8_t svmulh_x(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u32_x))) -svuint32_t svmulh_x(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u64_x))) -svuint64_t svmulh_x(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u16_x))) -svuint16_t svmulh_x(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u8_z))) -svuint8_t svmulh_z(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u32_z))) -svuint32_t svmulh_z(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u64_z))) -svuint64_t svmulh_z(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u16_z))) -svuint16_t svmulh_z(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s8_m))) -svint8_t svmulh_m(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s32_m))) -svint32_t svmulh_m(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s64_m))) -svint64_t svmulh_m(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s16_m))) -svint16_t svmulh_m(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s8_x))) -svint8_t svmulh_x(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s32_x))) -svint32_t svmulh_x(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s64_x))) -svint64_t svmulh_x(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s16_x))) -svint16_t svmulh_x(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s8_z))) -svint8_t svmulh_z(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s32_z))) -svint32_t svmulh_z(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s64_z))) -svint64_t svmulh_z(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s16_z))) -svint16_t svmulh_z(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u8_m))) -svuint8_t svmulh_m(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u32_m))) -svuint32_t svmulh_m(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u64_m))) -svuint64_t svmulh_m(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u16_m))) -svuint16_t svmulh_m(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u8_x))) -svuint8_t svmulh_x(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u32_x))) -svuint32_t svmulh_x(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u64_x))) -svuint64_t svmulh_x(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u16_x))) -svuint16_t svmulh_x(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u8_z))) -svuint8_t svmulh_z(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u32_z))) -svuint32_t svmulh_z(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u64_z))) -svuint64_t svmulh_z(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u16_z))) -svuint16_t svmulh_z(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f64_m))) -svfloat64_t svmulx_m(svbool_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f32_m))) -svfloat32_t svmulx_m(svbool_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f16_m))) -svfloat16_t svmulx_m(svbool_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f64_x))) -svfloat64_t svmulx_x(svbool_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f32_x))) -svfloat32_t svmulx_x(svbool_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f16_x))) -svfloat16_t svmulx_x(svbool_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f64_z))) -svfloat64_t svmulx_z(svbool_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f32_z))) -svfloat32_t svmulx_z(svbool_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f16_z))) -svfloat16_t svmulx_z(svbool_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f64_m))) -svfloat64_t svmulx_m(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f32_m))) -svfloat32_t svmulx_m(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f16_m))) -svfloat16_t svmulx_m(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f64_x))) -svfloat64_t svmulx_x(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f32_x))) -svfloat32_t svmulx_x(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f16_x))) -svfloat16_t svmulx_x(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f64_z))) -svfloat64_t svmulx_z(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f32_z))) -svfloat32_t svmulx_z(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f16_z))) -svfloat16_t svmulx_z(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnand_b_z))) -svbool_t svnand_z(svbool_t, svbool_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f64_m))) -svfloat64_t svneg_m(svfloat64_t, svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f32_m))) -svfloat32_t svneg_m(svfloat32_t, svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f16_m))) -svfloat16_t svneg_m(svfloat16_t, svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f64_x))) -svfloat64_t svneg_x(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f32_x))) -svfloat32_t svneg_x(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f16_x))) -svfloat16_t svneg_x(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f64_z))) -svfloat64_t svneg_z(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f32_z))) -svfloat32_t svneg_z(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f16_z))) -svfloat16_t svneg_z(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s8_m))) -svint8_t svneg_m(svint8_t, svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s32_m))) -svint32_t svneg_m(svint32_t, svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s64_m))) -svint64_t svneg_m(svint64_t, svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s16_m))) -svint16_t svneg_m(svint16_t, svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s8_x))) -svint8_t svneg_x(svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s32_x))) -svint32_t svneg_x(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s64_x))) -svint64_t svneg_x(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s16_x))) -svint16_t svneg_x(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s8_z))) -svint8_t svneg_z(svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s32_z))) -svint32_t svneg_z(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s64_z))) -svint64_t svneg_z(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s16_z))) -svint16_t svneg_z(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f64_m))) -svfloat64_t svnmad_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f32_m))) -svfloat32_t svnmad_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f16_m))) -svfloat16_t svnmad_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f64_x))) -svfloat64_t svnmad_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f32_x))) -svfloat32_t svnmad_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f16_x))) -svfloat16_t svnmad_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f64_z))) -svfloat64_t svnmad_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f32_z))) -svfloat32_t svnmad_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f16_z))) -svfloat16_t svnmad_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f64_m))) -svfloat64_t svnmad_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f32_m))) -svfloat32_t svnmad_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f16_m))) -svfloat16_t svnmad_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f64_x))) -svfloat64_t svnmad_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f32_x))) -svfloat32_t svnmad_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f16_x))) -svfloat16_t svnmad_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f64_z))) -svfloat64_t svnmad_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f32_z))) -svfloat32_t svnmad_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f16_z))) -svfloat16_t svnmad_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f64_m))) -svfloat64_t svnmla_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f32_m))) -svfloat32_t svnmla_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f16_m))) -svfloat16_t svnmla_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f64_x))) -svfloat64_t svnmla_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f32_x))) -svfloat32_t svnmla_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f16_x))) -svfloat16_t svnmla_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f64_z))) -svfloat64_t svnmla_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f32_z))) -svfloat32_t svnmla_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f16_z))) -svfloat16_t svnmla_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f64_m))) -svfloat64_t svnmla_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f32_m))) -svfloat32_t svnmla_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f16_m))) -svfloat16_t svnmla_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f64_x))) -svfloat64_t svnmla_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f32_x))) -svfloat32_t svnmla_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f16_x))) -svfloat16_t svnmla_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f64_z))) -svfloat64_t svnmla_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f32_z))) -svfloat32_t svnmla_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f16_z))) -svfloat16_t svnmla_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f64_m))) -svfloat64_t svnmls_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f32_m))) -svfloat32_t svnmls_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f16_m))) -svfloat16_t svnmls_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f64_x))) -svfloat64_t svnmls_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f32_x))) -svfloat32_t svnmls_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f16_x))) -svfloat16_t svnmls_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f64_z))) -svfloat64_t svnmls_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f32_z))) -svfloat32_t svnmls_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f16_z))) -svfloat16_t svnmls_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f64_m))) -svfloat64_t svnmls_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f32_m))) -svfloat32_t svnmls_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f16_m))) -svfloat16_t svnmls_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f64_x))) -svfloat64_t svnmls_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f32_x))) -svfloat32_t svnmls_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f16_x))) -svfloat16_t svnmls_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f64_z))) -svfloat64_t svnmls_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f32_z))) -svfloat32_t svnmls_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f16_z))) -svfloat16_t svnmls_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f64_m))) -svfloat64_t svnmsb_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f32_m))) -svfloat32_t svnmsb_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f16_m))) -svfloat16_t svnmsb_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f64_x))) -svfloat64_t svnmsb_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f32_x))) -svfloat32_t svnmsb_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f16_x))) -svfloat16_t svnmsb_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f64_z))) -svfloat64_t svnmsb_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f32_z))) -svfloat32_t svnmsb_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f16_z))) -svfloat16_t svnmsb_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f64_m))) -svfloat64_t svnmsb_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f32_m))) -svfloat32_t svnmsb_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f16_m))) -svfloat16_t svnmsb_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f64_x))) -svfloat64_t svnmsb_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f32_x))) -svfloat32_t svnmsb_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f16_x))) -svfloat16_t svnmsb_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f64_z))) -svfloat64_t svnmsb_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f32_z))) -svfloat32_t svnmsb_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f16_z))) -svfloat16_t svnmsb_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnor_b_z))) -svbool_t svnor_z(svbool_t, svbool_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_b_z))) -svbool_t svnot_z(svbool_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u8_m))) -svuint8_t svnot_m(svuint8_t, svbool_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u32_m))) -svuint32_t svnot_m(svuint32_t, svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u64_m))) -svuint64_t svnot_m(svuint64_t, svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u16_m))) -svuint16_t svnot_m(svuint16_t, svbool_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s8_m))) -svint8_t svnot_m(svint8_t, svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s32_m))) -svint32_t svnot_m(svint32_t, svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s64_m))) -svint64_t svnot_m(svint64_t, svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s16_m))) -svint16_t svnot_m(svint16_t, svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u8_x))) -svuint8_t svnot_x(svbool_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u32_x))) -svuint32_t svnot_x(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u64_x))) -svuint64_t svnot_x(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u16_x))) -svuint16_t svnot_x(svbool_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s8_x))) -svint8_t svnot_x(svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s32_x))) -svint32_t svnot_x(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s64_x))) -svint64_t svnot_x(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s16_x))) -svint16_t svnot_x(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u8_z))) -svuint8_t svnot_z(svbool_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u32_z))) -svuint32_t svnot_z(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u64_z))) -svuint64_t svnot_z(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u16_z))) -svuint16_t svnot_z(svbool_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s8_z))) -svint8_t svnot_z(svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s32_z))) -svint32_t svnot_z(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s64_z))) -svint64_t svnot_z(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s16_z))) -svint16_t svnot_z(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorn_b_z))) -svbool_t svorn_z(svbool_t, svbool_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_b_z))) -svbool_t svorr_z(svbool_t, svbool_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u8_m))) -svuint8_t svorr_m(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u32_m))) -svuint32_t svorr_m(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u64_m))) -svuint64_t svorr_m(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u16_m))) -svuint16_t svorr_m(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s8_m))) -svint8_t svorr_m(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s32_m))) -svint32_t svorr_m(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s64_m))) -svint64_t svorr_m(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s16_m))) -svint16_t svorr_m(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u8_x))) -svuint8_t svorr_x(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u32_x))) -svuint32_t svorr_x(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u64_x))) -svuint64_t svorr_x(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u16_x))) -svuint16_t svorr_x(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s8_x))) -svint8_t svorr_x(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s32_x))) -svint32_t svorr_x(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s64_x))) -svint64_t svorr_x(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s16_x))) -svint16_t svorr_x(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u8_z))) -svuint8_t svorr_z(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u32_z))) -svuint32_t svorr_z(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u64_z))) -svuint64_t svorr_z(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u16_z))) -svuint16_t svorr_z(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s8_z))) -svint8_t svorr_z(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s32_z))) -svint32_t svorr_z(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s64_z))) -svint64_t svorr_z(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s16_z))) -svint16_t svorr_z(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u8_m))) -svuint8_t svorr_m(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u32_m))) -svuint32_t svorr_m(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u64_m))) -svuint64_t svorr_m(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u16_m))) -svuint16_t svorr_m(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s8_m))) -svint8_t svorr_m(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s32_m))) -svint32_t svorr_m(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s64_m))) -svint64_t svorr_m(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s16_m))) -svint16_t svorr_m(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u8_x))) -svuint8_t svorr_x(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u32_x))) -svuint32_t svorr_x(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u64_x))) -svuint64_t svorr_x(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u16_x))) -svuint16_t svorr_x(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s8_x))) -svint8_t svorr_x(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s32_x))) -svint32_t svorr_x(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s64_x))) -svint64_t svorr_x(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s16_x))) -svint16_t svorr_x(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u8_z))) -svuint8_t svorr_z(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u32_z))) -svuint32_t svorr_z(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u64_z))) -svuint64_t svorr_z(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u16_z))) -svuint16_t svorr_z(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s8_z))) -svint8_t svorr_z(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s32_z))) -svint32_t svorr_z(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s64_z))) -svint64_t svorr_z(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s16_z))) -svint16_t svorr_z(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u8))) -uint8_t svorv(svbool_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u32))) -uint32_t svorv(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u64))) -uint64_t svorv(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u16))) -uint16_t svorv(svbool_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s8))) -int8_t svorv(svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s32))) -int32_t svorv(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s64))) -int64_t svorv(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s16))) -int16_t svorv(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpfalse_b))) -svbool_t svpfalse(void); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpfirst_b))) -svbool_t svpfirst(svbool_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32base))) -void svprfb_gather(svbool_t, svuint32_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64base))) -void svprfb_gather(svbool_t, svuint64_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32base_offset))) -void svprfb_gather_offset(svbool_t, svuint32_t, int64_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64base_offset))) -void svprfb_gather_offset(svbool_t, svuint64_t, int64_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_s32offset))) -void svprfb_gather_offset(svbool_t, void const *, svint32_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32offset))) -void svprfb_gather_offset(svbool_t, void const *, svuint32_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_s64offset))) -void svprfb_gather_offset(svbool_t, void const *, svint64_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64offset))) -void svprfb_gather_offset(svbool_t, void const *, svuint64_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32base))) -void svprfd_gather(svbool_t, svuint32_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64base))) -void svprfd_gather(svbool_t, svuint64_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32base_index))) -void svprfd_gather_index(svbool_t, svuint32_t, int64_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64base_index))) -void svprfd_gather_index(svbool_t, svuint64_t, int64_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_s32index))) -void svprfd_gather_index(svbool_t, void const *, svint32_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32index))) -void svprfd_gather_index(svbool_t, void const *, svuint32_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_s64index))) -void svprfd_gather_index(svbool_t, void const *, svint64_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64index))) -void svprfd_gather_index(svbool_t, void const *, svuint64_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32base))) -void svprfh_gather(svbool_t, svuint32_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64base))) -void svprfh_gather(svbool_t, svuint64_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32base_index))) -void svprfh_gather_index(svbool_t, svuint32_t, int64_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64base_index))) -void svprfh_gather_index(svbool_t, svuint64_t, int64_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_s32index))) -void svprfh_gather_index(svbool_t, void const *, svint32_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32index))) -void svprfh_gather_index(svbool_t, void const *, svuint32_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_s64index))) -void svprfh_gather_index(svbool_t, void const *, svint64_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64index))) -void svprfh_gather_index(svbool_t, void const *, svuint64_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32base))) -void svprfw_gather(svbool_t, svuint32_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64base))) -void svprfw_gather(svbool_t, svuint64_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32base_index))) -void svprfw_gather_index(svbool_t, svuint32_t, int64_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64base_index))) -void svprfw_gather_index(svbool_t, svuint64_t, int64_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_s32index))) -void svprfw_gather_index(svbool_t, void const *, svint32_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32index))) -void svprfw_gather_index(svbool_t, void const *, svuint32_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_s64index))) -void svprfw_gather_index(svbool_t, void const *, svint64_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64index))) -void svprfw_gather_index(svbool_t, void const *, svuint64_t, enum svprfop); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8))) -svint8_t svqadd(svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32))) -svint32_t svqadd(svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64))) -svint64_t svqadd(svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16))) -svint16_t svqadd(svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8))) -svuint8_t svqadd(svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32))) -svuint32_t svqadd(svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64))) -svuint64_t svqadd(svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16))) -svuint16_t svqadd(svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8))) -svint8_t svqadd(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32))) -svint32_t svqadd(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64))) -svint64_t svqadd(svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16))) -svint16_t svqadd(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8))) -svuint8_t svqadd(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32))) -svuint32_t svqadd(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64))) -svuint64_t svqadd(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16))) -svuint16_t svqadd(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_s32))) -int32_t svqdecb(int32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_s64))) -int64_t svqdecb(int64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_u32))) -uint32_t svqdecb(uint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_u64))) -uint64_t svqdecb(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_s32))) -int32_t svqdecb_pat(int32_t, enum svpattern, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_s64))) -int64_t svqdecb_pat(int64_t, enum svpattern, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_u32))) -uint32_t svqdecb_pat(uint32_t, enum svpattern, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_u64))) -uint64_t svqdecb_pat(uint64_t, enum svpattern, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_s32))) -int32_t svqdecd(int32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_s64))) -int64_t svqdecd(int64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_u32))) -uint32_t svqdecd(uint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_u64))) -uint64_t svqdecd(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_s64))) -svint64_t svqdecd(svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_u64))) -svuint64_t svqdecd(svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_s32))) -int32_t svqdecd_pat(int32_t, enum svpattern, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_s64))) -int64_t svqdecd_pat(int64_t, enum svpattern, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_u32))) -uint32_t svqdecd_pat(uint32_t, enum svpattern, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_u64))) -uint64_t svqdecd_pat(uint64_t, enum svpattern, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_s64))) -svint64_t svqdecd_pat(svint64_t, enum svpattern, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_u64))) -svuint64_t svqdecd_pat(svuint64_t, enum svpattern, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_s32))) -int32_t svqdech(int32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_s64))) -int64_t svqdech(int64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_u32))) -uint32_t svqdech(uint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_u64))) -uint64_t svqdech(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_s16))) -svint16_t svqdech(svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_u16))) -svuint16_t svqdech(svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_s32))) -int32_t svqdech_pat(int32_t, enum svpattern, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_s64))) -int64_t svqdech_pat(int64_t, enum svpattern, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_u32))) -uint32_t svqdech_pat(uint32_t, enum svpattern, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_u64))) -uint64_t svqdech_pat(uint64_t, enum svpattern, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_s16))) -svint16_t svqdech_pat(svint16_t, enum svpattern, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_u16))) -svuint16_t svqdech_pat(svuint16_t, enum svpattern, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b8))) -int32_t svqdecp_b8(int32_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b32))) -int32_t svqdecp_b32(int32_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b64))) -int32_t svqdecp_b64(int32_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b16))) -int32_t svqdecp_b16(int32_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b8))) -int64_t svqdecp_b8(int64_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b32))) -int64_t svqdecp_b32(int64_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b64))) -int64_t svqdecp_b64(int64_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b16))) -int64_t svqdecp_b16(int64_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b8))) -uint32_t svqdecp_b8(uint32_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b32))) -uint32_t svqdecp_b32(uint32_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b64))) -uint32_t svqdecp_b64(uint32_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b16))) -uint32_t svqdecp_b16(uint32_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b8))) -uint64_t svqdecp_b8(uint64_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b32))) -uint64_t svqdecp_b32(uint64_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b64))) -uint64_t svqdecp_b64(uint64_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b16))) -uint64_t svqdecp_b16(uint64_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_s32))) -svint32_t svqdecp(svint32_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_s64))) -svint64_t svqdecp(svint64_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_s16))) -svint16_t svqdecp(svint16_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_u32))) -svuint32_t svqdecp(svuint32_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_u64))) -svuint64_t svqdecp(svuint64_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_u16))) -svuint16_t svqdecp(svuint16_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_s32))) -int32_t svqdecw(int32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_s64))) -int64_t svqdecw(int64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_u32))) -uint32_t svqdecw(uint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_u64))) -uint64_t svqdecw(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_s32))) -svint32_t svqdecw(svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_u32))) -svuint32_t svqdecw(svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_s32))) -int32_t svqdecw_pat(int32_t, enum svpattern, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_s64))) -int64_t svqdecw_pat(int64_t, enum svpattern, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_u32))) -uint32_t svqdecw_pat(uint32_t, enum svpattern, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_u64))) -uint64_t svqdecw_pat(uint64_t, enum svpattern, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_s32))) -svint32_t svqdecw_pat(svint32_t, enum svpattern, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_u32))) -svuint32_t svqdecw_pat(svuint32_t, enum svpattern, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_s32))) -int32_t svqincb(int32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_s64))) -int64_t svqincb(int64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_u32))) -uint32_t svqincb(uint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_u64))) -uint64_t svqincb(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_s32))) -int32_t svqincb_pat(int32_t, enum svpattern, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_s64))) -int64_t svqincb_pat(int64_t, enum svpattern, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_u32))) -uint32_t svqincb_pat(uint32_t, enum svpattern, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_u64))) -uint64_t svqincb_pat(uint64_t, enum svpattern, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_s32))) -int32_t svqincd(int32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_s64))) -int64_t svqincd(int64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_u32))) -uint32_t svqincd(uint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_u64))) -uint64_t svqincd(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_s64))) -svint64_t svqincd(svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_u64))) -svuint64_t svqincd(svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_s32))) -int32_t svqincd_pat(int32_t, enum svpattern, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_s64))) -int64_t svqincd_pat(int64_t, enum svpattern, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_u32))) -uint32_t svqincd_pat(uint32_t, enum svpattern, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_u64))) -uint64_t svqincd_pat(uint64_t, enum svpattern, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_s64))) -svint64_t svqincd_pat(svint64_t, enum svpattern, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_u64))) -svuint64_t svqincd_pat(svuint64_t, enum svpattern, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_s32))) -int32_t svqinch(int32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_s64))) -int64_t svqinch(int64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_u32))) -uint32_t svqinch(uint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_u64))) -uint64_t svqinch(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_s16))) -svint16_t svqinch(svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_u16))) -svuint16_t svqinch(svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_s32))) -int32_t svqinch_pat(int32_t, enum svpattern, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_s64))) -int64_t svqinch_pat(int64_t, enum svpattern, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_u32))) -uint32_t svqinch_pat(uint32_t, enum svpattern, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_u64))) -uint64_t svqinch_pat(uint64_t, enum svpattern, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_s16))) -svint16_t svqinch_pat(svint16_t, enum svpattern, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_u16))) -svuint16_t svqinch_pat(svuint16_t, enum svpattern, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b8))) -int32_t svqincp_b8(int32_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b32))) -int32_t svqincp_b32(int32_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b64))) -int32_t svqincp_b64(int32_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b16))) -int32_t svqincp_b16(int32_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b8))) -int64_t svqincp_b8(int64_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b32))) -int64_t svqincp_b32(int64_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b64))) -int64_t svqincp_b64(int64_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b16))) -int64_t svqincp_b16(int64_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b8))) -uint32_t svqincp_b8(uint32_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b32))) -uint32_t svqincp_b32(uint32_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b64))) -uint32_t svqincp_b64(uint32_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b16))) -uint32_t svqincp_b16(uint32_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b8))) -uint64_t svqincp_b8(uint64_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b32))) -uint64_t svqincp_b32(uint64_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b64))) -uint64_t svqincp_b64(uint64_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b16))) -uint64_t svqincp_b16(uint64_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_s32))) -svint32_t svqincp(svint32_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_s64))) -svint64_t svqincp(svint64_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_s16))) -svint16_t svqincp(svint16_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_u32))) -svuint32_t svqincp(svuint32_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_u64))) -svuint64_t svqincp(svuint64_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_u16))) -svuint16_t svqincp(svuint16_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_s32))) -int32_t svqincw(int32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_s64))) -int64_t svqincw(int64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_u32))) -uint32_t svqincw(uint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_u64))) -uint64_t svqincw(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_s32))) -svint32_t svqincw(svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_u32))) -svuint32_t svqincw(svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_s32))) -int32_t svqincw_pat(int32_t, enum svpattern, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_s64))) -int64_t svqincw_pat(int64_t, enum svpattern, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_u32))) -uint32_t svqincw_pat(uint32_t, enum svpattern, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_u64))) -uint64_t svqincw_pat(uint64_t, enum svpattern, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_s32))) -svint32_t svqincw_pat(svint32_t, enum svpattern, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_u32))) -svuint32_t svqincw_pat(svuint32_t, enum svpattern, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8))) -svint8_t svqsub(svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32))) -svint32_t svqsub(svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64))) -svint64_t svqsub(svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16))) -svint16_t svqsub(svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8))) -svuint8_t svqsub(svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32))) -svuint32_t svqsub(svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64))) -svuint64_t svqsub(svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16))) -svuint16_t svqsub(svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8))) -svint8_t svqsub(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32))) -svint32_t svqsub(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64))) -svint64_t svqsub(svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16))) -svint16_t svqsub(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8))) -svuint8_t svqsub(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32))) -svuint32_t svqsub(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64))) -svuint64_t svqsub(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16))) -svuint16_t svqsub(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u8_m))) -svuint8_t svrbit_m(svuint8_t, svbool_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u32_m))) -svuint32_t svrbit_m(svuint32_t, svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u64_m))) -svuint64_t svrbit_m(svuint64_t, svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u16_m))) -svuint16_t svrbit_m(svuint16_t, svbool_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s8_m))) -svint8_t svrbit_m(svint8_t, svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s32_m))) -svint32_t svrbit_m(svint32_t, svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s64_m))) -svint64_t svrbit_m(svint64_t, svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s16_m))) -svint16_t svrbit_m(svint16_t, svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u8_x))) -svuint8_t svrbit_x(svbool_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u32_x))) -svuint32_t svrbit_x(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u64_x))) -svuint64_t svrbit_x(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u16_x))) -svuint16_t svrbit_x(svbool_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s8_x))) -svint8_t svrbit_x(svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s32_x))) -svint32_t svrbit_x(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s64_x))) -svint64_t svrbit_x(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s16_x))) -svint16_t svrbit_x(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u8_z))) -svuint8_t svrbit_z(svbool_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u32_z))) -svuint32_t svrbit_z(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u64_z))) -svuint64_t svrbit_z(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u16_z))) -svuint16_t svrbit_z(svbool_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s8_z))) -svint8_t svrbit_z(svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s32_z))) -svint32_t svrbit_z(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s64_z))) -svint64_t svrbit_z(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s16_z))) -svint16_t svrbit_z(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_f64))) -svfloat64_t svrecpe(svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_f32))) -svfloat32_t svrecpe(svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_f16))) -svfloat16_t svrecpe(svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecps_f64))) -svfloat64_t svrecps(svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecps_f32))) -svfloat32_t svrecps(svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecps_f16))) -svfloat16_t svrecps(svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f64_m))) -svfloat64_t svrecpx_m(svfloat64_t, svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f32_m))) -svfloat32_t svrecpx_m(svfloat32_t, svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f16_m))) -svfloat16_t svrecpx_m(svfloat16_t, svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f64_x))) -svfloat64_t svrecpx_x(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f32_x))) -svfloat32_t svrecpx_x(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f16_x))) -svfloat16_t svrecpx_x(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f64_z))) -svfloat64_t svrecpx_z(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f32_z))) -svfloat32_t svrecpx_z(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f16_z))) -svfloat16_t svrecpx_z(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u8))) -svuint8_t svrev(svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u32))) -svuint32_t svrev(svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u64))) -svuint64_t svrev(svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u16))) -svuint16_t svrev(svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s8))) -svint8_t svrev(svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_f64))) -svfloat64_t svrev(svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_f32))) -svfloat32_t svrev(svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_f16))) -svfloat16_t svrev(svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s32))) -svint32_t svrev(svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s64))) -svint64_t svrev(svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s16))) -svint16_t svrev(svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u32_m))) -svuint32_t svrevb_m(svuint32_t, svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u64_m))) -svuint64_t svrevb_m(svuint64_t, svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u16_m))) -svuint16_t svrevb_m(svuint16_t, svbool_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s32_m))) -svint32_t svrevb_m(svint32_t, svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s64_m))) -svint64_t svrevb_m(svint64_t, svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s16_m))) -svint16_t svrevb_m(svint16_t, svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u32_x))) -svuint32_t svrevb_x(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u64_x))) -svuint64_t svrevb_x(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u16_x))) -svuint16_t svrevb_x(svbool_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s32_x))) -svint32_t svrevb_x(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s64_x))) -svint64_t svrevb_x(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s16_x))) -svint16_t svrevb_x(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u32_z))) -svuint32_t svrevb_z(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u64_z))) -svuint64_t svrevb_z(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u16_z))) -svuint16_t svrevb_z(svbool_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s32_z))) -svint32_t svrevb_z(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s64_z))) -svint64_t svrevb_z(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s16_z))) -svint16_t svrevb_z(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u32_m))) -svuint32_t svrevh_m(svuint32_t, svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u64_m))) -svuint64_t svrevh_m(svuint64_t, svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s32_m))) -svint32_t svrevh_m(svint32_t, svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s64_m))) -svint64_t svrevh_m(svint64_t, svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u32_x))) -svuint32_t svrevh_x(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u64_x))) -svuint64_t svrevh_x(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s32_x))) -svint32_t svrevh_x(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s64_x))) -svint64_t svrevh_x(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u32_z))) -svuint32_t svrevh_z(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u64_z))) -svuint64_t svrevh_z(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s32_z))) -svint32_t svrevh_z(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s64_z))) -svint64_t svrevh_z(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_u64_m))) -svuint64_t svrevw_m(svuint64_t, svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_s64_m))) -svint64_t svrevw_m(svint64_t, svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_u64_x))) -svuint64_t svrevw_x(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_s64_x))) -svint64_t svrevw_x(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_u64_z))) -svuint64_t svrevw_z(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_s64_z))) -svint64_t svrevw_z(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f64_m))) -svfloat64_t svrinta_m(svfloat64_t, svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_m))) -svfloat32_t svrinta_m(svfloat32_t, svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f16_m))) -svfloat16_t svrinta_m(svfloat16_t, svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f64_x))) -svfloat64_t svrinta_x(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_x))) -svfloat32_t svrinta_x(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f16_x))) -svfloat16_t svrinta_x(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f64_z))) -svfloat64_t svrinta_z(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_z))) -svfloat32_t svrinta_z(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f16_z))) -svfloat16_t svrinta_z(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f64_m))) -svfloat64_t svrinti_m(svfloat64_t, svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f32_m))) -svfloat32_t svrinti_m(svfloat32_t, svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f16_m))) -svfloat16_t svrinti_m(svfloat16_t, svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f64_x))) -svfloat64_t svrinti_x(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f32_x))) -svfloat32_t svrinti_x(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f16_x))) -svfloat16_t svrinti_x(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f64_z))) -svfloat64_t svrinti_z(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f32_z))) -svfloat32_t svrinti_z(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f16_z))) -svfloat16_t svrinti_z(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f64_m))) -svfloat64_t svrintm_m(svfloat64_t, svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_m))) -svfloat32_t svrintm_m(svfloat32_t, svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f16_m))) -svfloat16_t svrintm_m(svfloat16_t, svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f64_x))) -svfloat64_t svrintm_x(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_x))) -svfloat32_t svrintm_x(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f16_x))) -svfloat16_t svrintm_x(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f64_z))) -svfloat64_t svrintm_z(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_z))) -svfloat32_t svrintm_z(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f16_z))) -svfloat16_t svrintm_z(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f64_m))) -svfloat64_t svrintn_m(svfloat64_t, svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_m))) -svfloat32_t svrintn_m(svfloat32_t, svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f16_m))) -svfloat16_t svrintn_m(svfloat16_t, svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f64_x))) -svfloat64_t svrintn_x(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_x))) -svfloat32_t svrintn_x(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f16_x))) -svfloat16_t svrintn_x(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f64_z))) -svfloat64_t svrintn_z(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_z))) -svfloat32_t svrintn_z(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f16_z))) -svfloat16_t svrintn_z(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f64_m))) -svfloat64_t svrintp_m(svfloat64_t, svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_m))) -svfloat32_t svrintp_m(svfloat32_t, svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f16_m))) -svfloat16_t svrintp_m(svfloat16_t, svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f64_x))) -svfloat64_t svrintp_x(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_x))) -svfloat32_t svrintp_x(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f16_x))) -svfloat16_t svrintp_x(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f64_z))) -svfloat64_t svrintp_z(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_z))) -svfloat32_t svrintp_z(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f16_z))) -svfloat16_t svrintp_z(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f64_m))) -svfloat64_t svrintx_m(svfloat64_t, svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f32_m))) -svfloat32_t svrintx_m(svfloat32_t, svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f16_m))) -svfloat16_t svrintx_m(svfloat16_t, svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f64_x))) -svfloat64_t svrintx_x(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f32_x))) -svfloat32_t svrintx_x(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f16_x))) -svfloat16_t svrintx_x(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f64_z))) -svfloat64_t svrintx_z(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f32_z))) -svfloat32_t svrintx_z(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f16_z))) -svfloat16_t svrintx_z(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f64_m))) -svfloat64_t svrintz_m(svfloat64_t, svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f32_m))) -svfloat32_t svrintz_m(svfloat32_t, svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f16_m))) -svfloat16_t svrintz_m(svfloat16_t, svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f64_x))) -svfloat64_t svrintz_x(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f32_x))) -svfloat32_t svrintz_x(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f16_x))) -svfloat16_t svrintz_x(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f64_z))) -svfloat64_t svrintz_z(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f32_z))) -svfloat32_t svrintz_z(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f16_z))) -svfloat16_t svrintz_z(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_f64))) -svfloat64_t svrsqrte(svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_f32))) -svfloat32_t svrsqrte(svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_f16))) -svfloat16_t svrsqrte(svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrts_f64))) -svfloat64_t svrsqrts(svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrts_f32))) -svfloat32_t svrsqrts(svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrts_f16))) -svfloat16_t svrsqrts(svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f64_m))) -svfloat64_t svscale_m(svbool_t, svfloat64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f32_m))) -svfloat32_t svscale_m(svbool_t, svfloat32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f16_m))) -svfloat16_t svscale_m(svbool_t, svfloat16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f64_x))) -svfloat64_t svscale_x(svbool_t, svfloat64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f32_x))) -svfloat32_t svscale_x(svbool_t, svfloat32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f16_x))) -svfloat16_t svscale_x(svbool_t, svfloat16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f64_z))) -svfloat64_t svscale_z(svbool_t, svfloat64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f32_z))) -svfloat32_t svscale_z(svbool_t, svfloat32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f16_z))) -svfloat16_t svscale_z(svbool_t, svfloat16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f64_m))) -svfloat64_t svscale_m(svbool_t, svfloat64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f32_m))) -svfloat32_t svscale_m(svbool_t, svfloat32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f16_m))) -svfloat16_t svscale_m(svbool_t, svfloat16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f64_x))) -svfloat64_t svscale_x(svbool_t, svfloat64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f32_x))) -svfloat32_t svscale_x(svbool_t, svfloat32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f16_x))) -svfloat16_t svscale_x(svbool_t, svfloat16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f64_z))) -svfloat64_t svscale_z(svbool_t, svfloat64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f32_z))) -svfloat32_t svscale_z(svbool_t, svfloat32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f16_z))) -svfloat16_t svscale_z(svbool_t, svfloat16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_b))) -svbool_t svsel(svbool_t, svbool_t, svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u8))) -svuint8_t svsel(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u32))) -svuint32_t svsel(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u64))) -svuint64_t svsel(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u16))) -svuint16_t svsel(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s8))) -svint8_t svsel(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f64))) -svfloat64_t svsel(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f32))) -svfloat32_t svsel(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f16))) -svfloat16_t svsel(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s32))) -svint32_t svsel(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s64))) -svint64_t svsel(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s16))) -svint16_t svsel(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u8))) -svuint8x2_t svset2(svuint8x2_t, uint64_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u32))) -svuint32x2_t svset2(svuint32x2_t, uint64_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u64))) -svuint64x2_t svset2(svuint64x2_t, uint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u16))) -svuint16x2_t svset2(svuint16x2_t, uint64_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s8))) -svint8x2_t svset2(svint8x2_t, uint64_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_f64))) -svfloat64x2_t svset2(svfloat64x2_t, uint64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_f32))) -svfloat32x2_t svset2(svfloat32x2_t, uint64_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_f16))) -svfloat16x2_t svset2(svfloat16x2_t, uint64_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s32))) -svint32x2_t svset2(svint32x2_t, uint64_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s64))) -svint64x2_t svset2(svint64x2_t, uint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s16))) -svint16x2_t svset2(svint16x2_t, uint64_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u8))) -svuint8x3_t svset3(svuint8x3_t, uint64_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u32))) -svuint32x3_t svset3(svuint32x3_t, uint64_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u64))) -svuint64x3_t svset3(svuint64x3_t, uint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u16))) -svuint16x3_t svset3(svuint16x3_t, uint64_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s8))) -svint8x3_t svset3(svint8x3_t, uint64_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_f64))) -svfloat64x3_t svset3(svfloat64x3_t, uint64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_f32))) -svfloat32x3_t svset3(svfloat32x3_t, uint64_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_f16))) -svfloat16x3_t svset3(svfloat16x3_t, uint64_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s32))) -svint32x3_t svset3(svint32x3_t, uint64_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s64))) -svint64x3_t svset3(svint64x3_t, uint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s16))) -svint16x3_t svset3(svint16x3_t, uint64_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u8))) -svuint8x4_t svset4(svuint8x4_t, uint64_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u32))) -svuint32x4_t svset4(svuint32x4_t, uint64_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u64))) -svuint64x4_t svset4(svuint64x4_t, uint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u16))) -svuint16x4_t svset4(svuint16x4_t, uint64_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s8))) -svint8x4_t svset4(svint8x4_t, uint64_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_f64))) -svfloat64x4_t svset4(svfloat64x4_t, uint64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_f32))) -svfloat32x4_t svset4(svfloat32x4_t, uint64_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_f16))) -svfloat16x4_t svset4(svfloat16x4_t, uint64_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s32))) -svint32x4_t svset4(svint32x4_t, uint64_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s64))) -svint64x4_t svset4(svint64x4_t, uint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s16))) -svint16x4_t svset4(svint16x4_t, uint64_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u8))) -svuint8_t svsplice(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u32))) -svuint32_t svsplice(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u64))) -svuint64_t svsplice(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u16))) -svuint16_t svsplice(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s8))) -svint8_t svsplice(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_f64))) -svfloat64_t svsplice(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_f32))) -svfloat32_t svsplice(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_f16))) -svfloat16_t svsplice(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s32))) -svint32_t svsplice(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s64))) -svint64_t svsplice(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s16))) -svint16_t svsplice(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f64_m))) -svfloat64_t svsqrt_m(svfloat64_t, svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f32_m))) -svfloat32_t svsqrt_m(svfloat32_t, svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f16_m))) -svfloat16_t svsqrt_m(svfloat16_t, svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f64_x))) -svfloat64_t svsqrt_x(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f32_x))) -svfloat32_t svsqrt_x(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f16_x))) -svfloat16_t svsqrt_x(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f64_z))) -svfloat64_t svsqrt_z(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f32_z))) -svfloat32_t svsqrt_z(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f16_z))) -svfloat16_t svsqrt_z(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u8))) -void svst1(svbool_t, uint8_t *, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u32))) -void svst1(svbool_t, uint32_t *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u64))) -void svst1(svbool_t, uint64_t *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u16))) -void svst1(svbool_t, uint16_t *, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s8))) -void svst1(svbool_t, int8_t *, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f64))) -void svst1(svbool_t, float64_t *, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f32))) -void svst1(svbool_t, float32_t *, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f16))) -void svst1(svbool_t, float16_t *, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s32))) -void svst1(svbool_t, int32_t *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s64))) -void svst1(svbool_t, int64_t *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s16))) -void svst1(svbool_t, int16_t *, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_u32))) -void svst1_scatter_index(svbool_t, svuint32_t, int64_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_u64))) -void svst1_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_f64))) -void svst1_scatter_index(svbool_t, svuint64_t, int64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_f32))) -void svst1_scatter_index(svbool_t, svuint32_t, int64_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_s32))) -void svst1_scatter_index(svbool_t, svuint32_t, int64_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_s64))) -void svst1_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_u32))) -void svst1_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_u64))) -void svst1_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_f64))) -void svst1_scatter_offset(svbool_t, svuint64_t, int64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_f32))) -void svst1_scatter_offset(svbool_t, svuint32_t, int64_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_s32))) -void svst1_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_s64))) -void svst1_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_u32))) -void svst1_scatter(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_u64))) -void svst1_scatter(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_f64))) -void svst1_scatter(svbool_t, svuint64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_f32))) -void svst1_scatter(svbool_t, svuint32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_s32))) -void svst1_scatter(svbool_t, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_s64))) -void svst1_scatter(svbool_t, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_u32))) -void svst1_scatter_index(svbool_t, uint32_t *, svint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_f32))) -void svst1_scatter_index(svbool_t, float32_t *, svint32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_s32))) -void svst1_scatter_index(svbool_t, int32_t *, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_u32))) -void svst1_scatter_index(svbool_t, uint32_t *, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_f32))) -void svst1_scatter_index(svbool_t, float32_t *, svuint32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_s32))) -void svst1_scatter_index(svbool_t, int32_t *, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_u64))) -void svst1_scatter_index(svbool_t, uint64_t *, svint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_f64))) -void svst1_scatter_index(svbool_t, float64_t *, svint64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_s64))) -void svst1_scatter_index(svbool_t, int64_t *, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_u64))) -void svst1_scatter_index(svbool_t, uint64_t *, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_f64))) -void svst1_scatter_index(svbool_t, float64_t *, svuint64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_s64))) -void svst1_scatter_index(svbool_t, int64_t *, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_u32))) -void svst1_scatter_offset(svbool_t, uint32_t *, svint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_f32))) -void svst1_scatter_offset(svbool_t, float32_t *, svint32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_s32))) -void svst1_scatter_offset(svbool_t, int32_t *, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_u32))) -void svst1_scatter_offset(svbool_t, uint32_t *, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_f32))) -void svst1_scatter_offset(svbool_t, float32_t *, svuint32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_s32))) -void svst1_scatter_offset(svbool_t, int32_t *, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_u64))) -void svst1_scatter_offset(svbool_t, uint64_t *, svint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_f64))) -void svst1_scatter_offset(svbool_t, float64_t *, svint64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_s64))) -void svst1_scatter_offset(svbool_t, int64_t *, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_u64))) -void svst1_scatter_offset(svbool_t, uint64_t *, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_f64))) -void svst1_scatter_offset(svbool_t, float64_t *, svuint64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_s64))) -void svst1_scatter_offset(svbool_t, int64_t *, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u8))) -void svst1_vnum(svbool_t, uint8_t *, int64_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u32))) -void svst1_vnum(svbool_t, uint32_t *, int64_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u64))) -void svst1_vnum(svbool_t, uint64_t *, int64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u16))) -void svst1_vnum(svbool_t, uint16_t *, int64_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s8))) -void svst1_vnum(svbool_t, int8_t *, int64_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f64))) -void svst1_vnum(svbool_t, float64_t *, int64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f32))) -void svst1_vnum(svbool_t, float32_t *, int64_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f16))) -void svst1_vnum(svbool_t, float16_t *, int64_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s32))) -void svst1_vnum(svbool_t, int32_t *, int64_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s64))) -void svst1_vnum(svbool_t, int64_t *, int64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s16))) -void svst1_vnum(svbool_t, int16_t *, int64_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_s32))) -void svst1b(svbool_t, int8_t *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_s64))) -void svst1b(svbool_t, int8_t *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_s16))) -void svst1b(svbool_t, int8_t *, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_u32))) -void svst1b(svbool_t, uint8_t *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_u64))) -void svst1b(svbool_t, uint8_t *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_u16))) -void svst1b(svbool_t, uint8_t *, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_offset_u32))) -void svst1b_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_offset_u64))) -void svst1b_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_offset_s32))) -void svst1b_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_offset_s64))) -void svst1b_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_u32))) -void svst1b_scatter(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_u64))) -void svst1b_scatter(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_s32))) -void svst1b_scatter(svbool_t, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_s64))) -void svst1b_scatter(svbool_t, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s32offset_s32))) -void svst1b_scatter_offset(svbool_t, int8_t *, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s32offset_u32))) -void svst1b_scatter_offset(svbool_t, uint8_t *, svint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32offset_s32))) -void svst1b_scatter_offset(svbool_t, int8_t *, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32offset_u32))) -void svst1b_scatter_offset(svbool_t, uint8_t *, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s64offset_s64))) -void svst1b_scatter_offset(svbool_t, int8_t *, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s64offset_u64))) -void svst1b_scatter_offset(svbool_t, uint8_t *, svint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64offset_s64))) -void svst1b_scatter_offset(svbool_t, int8_t *, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64offset_u64))) -void svst1b_scatter_offset(svbool_t, uint8_t *, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_s32))) -void svst1b_vnum(svbool_t, int8_t *, int64_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_s64))) -void svst1b_vnum(svbool_t, int8_t *, int64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_s16))) -void svst1b_vnum(svbool_t, int8_t *, int64_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_u32))) -void svst1b_vnum(svbool_t, uint8_t *, int64_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_u64))) -void svst1b_vnum(svbool_t, uint8_t *, int64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_u16))) -void svst1b_vnum(svbool_t, uint8_t *, int64_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_s32))) -void svst1h(svbool_t, int16_t *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_s64))) -void svst1h(svbool_t, int16_t *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_u32))) -void svst1h(svbool_t, uint16_t *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_u64))) -void svst1h(svbool_t, uint16_t *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_index_u32))) -void svst1h_scatter_index(svbool_t, svuint32_t, int64_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_index_u64))) -void svst1h_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_index_s32))) -void svst1h_scatter_index(svbool_t, svuint32_t, int64_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_index_s64))) -void svst1h_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_offset_u32))) -void svst1h_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_offset_u64))) -void svst1h_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_offset_s32))) -void svst1h_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_offset_s64))) -void svst1h_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_u32))) -void svst1h_scatter(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_u64))) -void svst1h_scatter(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_s32))) -void svst1h_scatter(svbool_t, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_s64))) -void svst1h_scatter(svbool_t, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32index_s32))) -void svst1h_scatter_index(svbool_t, int16_t *, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32index_u32))) -void svst1h_scatter_index(svbool_t, uint16_t *, svint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32index_s32))) -void svst1h_scatter_index(svbool_t, int16_t *, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32index_u32))) -void svst1h_scatter_index(svbool_t, uint16_t *, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64index_s64))) -void svst1h_scatter_index(svbool_t, int16_t *, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64index_u64))) -void svst1h_scatter_index(svbool_t, uint16_t *, svint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64index_s64))) -void svst1h_scatter_index(svbool_t, int16_t *, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64index_u64))) -void svst1h_scatter_index(svbool_t, uint16_t *, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32offset_s32))) -void svst1h_scatter_offset(svbool_t, int16_t *, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32offset_u32))) -void svst1h_scatter_offset(svbool_t, uint16_t *, svint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32offset_s32))) -void svst1h_scatter_offset(svbool_t, int16_t *, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32offset_u32))) -void svst1h_scatter_offset(svbool_t, uint16_t *, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64offset_s64))) -void svst1h_scatter_offset(svbool_t, int16_t *, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64offset_u64))) -void svst1h_scatter_offset(svbool_t, uint16_t *, svint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64offset_s64))) -void svst1h_scatter_offset(svbool_t, int16_t *, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64offset_u64))) -void svst1h_scatter_offset(svbool_t, uint16_t *, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_s32))) -void svst1h_vnum(svbool_t, int16_t *, int64_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_s64))) -void svst1h_vnum(svbool_t, int16_t *, int64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_u32))) -void svst1h_vnum(svbool_t, uint16_t *, int64_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_u64))) -void svst1h_vnum(svbool_t, uint16_t *, int64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_s64))) -void svst1w(svbool_t, int32_t *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_u64))) -void svst1w(svbool_t, uint32_t *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_index_u64))) -void svst1w_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_index_s64))) -void svst1w_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_offset_u64))) -void svst1w_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_offset_s64))) -void svst1w_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_u64))) -void svst1w_scatter(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_s64))) -void svst1w_scatter(svbool_t, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64index_s64))) -void svst1w_scatter_index(svbool_t, int32_t *, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64index_u64))) -void svst1w_scatter_index(svbool_t, uint32_t *, svint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64index_s64))) -void svst1w_scatter_index(svbool_t, int32_t *, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64index_u64))) -void svst1w_scatter_index(svbool_t, uint32_t *, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64offset_s64))) -void svst1w_scatter_offset(svbool_t, int32_t *, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64offset_u64))) -void svst1w_scatter_offset(svbool_t, uint32_t *, svint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64offset_s64))) -void svst1w_scatter_offset(svbool_t, int32_t *, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64offset_u64))) -void svst1w_scatter_offset(svbool_t, uint32_t *, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_vnum_s64))) -void svst1w_vnum(svbool_t, int32_t *, int64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_vnum_u64))) -void svst1w_vnum(svbool_t, uint32_t *, int64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u8))) -void svst2(svbool_t, uint8_t *, svuint8x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u32))) -void svst2(svbool_t, uint32_t *, svuint32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u64))) -void svst2(svbool_t, uint64_t *, svuint64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u16))) -void svst2(svbool_t, uint16_t *, svuint16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s8))) -void svst2(svbool_t, int8_t *, svint8x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_f64))) -void svst2(svbool_t, float64_t *, svfloat64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_f32))) -void svst2(svbool_t, float32_t *, svfloat32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_f16))) -void svst2(svbool_t, float16_t *, svfloat16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s32))) -void svst2(svbool_t, int32_t *, svint32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s64))) -void svst2(svbool_t, int64_t *, svint64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s16))) -void svst2(svbool_t, int16_t *, svint16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u8))) -void svst2_vnum(svbool_t, uint8_t *, int64_t, svuint8x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u32))) -void svst2_vnum(svbool_t, uint32_t *, int64_t, svuint32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u64))) -void svst2_vnum(svbool_t, uint64_t *, int64_t, svuint64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u16))) -void svst2_vnum(svbool_t, uint16_t *, int64_t, svuint16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s8))) -void svst2_vnum(svbool_t, int8_t *, int64_t, svint8x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_f64))) -void svst2_vnum(svbool_t, float64_t *, int64_t, svfloat64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_f32))) -void svst2_vnum(svbool_t, float32_t *, int64_t, svfloat32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_f16))) -void svst2_vnum(svbool_t, float16_t *, int64_t, svfloat16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s32))) -void svst2_vnum(svbool_t, int32_t *, int64_t, svint32x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s64))) -void svst2_vnum(svbool_t, int64_t *, int64_t, svint64x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s16))) -void svst2_vnum(svbool_t, int16_t *, int64_t, svint16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u8))) -void svst3(svbool_t, uint8_t *, svuint8x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u32))) -void svst3(svbool_t, uint32_t *, svuint32x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u64))) -void svst3(svbool_t, uint64_t *, svuint64x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u16))) -void svst3(svbool_t, uint16_t *, svuint16x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s8))) -void svst3(svbool_t, int8_t *, svint8x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_f64))) -void svst3(svbool_t, float64_t *, svfloat64x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_f32))) -void svst3(svbool_t, float32_t *, svfloat32x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_f16))) -void svst3(svbool_t, float16_t *, svfloat16x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s32))) -void svst3(svbool_t, int32_t *, svint32x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s64))) -void svst3(svbool_t, int64_t *, svint64x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s16))) -void svst3(svbool_t, int16_t *, svint16x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u8))) -void svst3_vnum(svbool_t, uint8_t *, int64_t, svuint8x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u32))) -void svst3_vnum(svbool_t, uint32_t *, int64_t, svuint32x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u64))) -void svst3_vnum(svbool_t, uint64_t *, int64_t, svuint64x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u16))) -void svst3_vnum(svbool_t, uint16_t *, int64_t, svuint16x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s8))) -void svst3_vnum(svbool_t, int8_t *, int64_t, svint8x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_f64))) -void svst3_vnum(svbool_t, float64_t *, int64_t, svfloat64x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_f32))) -void svst3_vnum(svbool_t, float32_t *, int64_t, svfloat32x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_f16))) -void svst3_vnum(svbool_t, float16_t *, int64_t, svfloat16x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s32))) -void svst3_vnum(svbool_t, int32_t *, int64_t, svint32x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s64))) -void svst3_vnum(svbool_t, int64_t *, int64_t, svint64x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s16))) -void svst3_vnum(svbool_t, int16_t *, int64_t, svint16x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u8))) -void svst4(svbool_t, uint8_t *, svuint8x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u32))) -void svst4(svbool_t, uint32_t *, svuint32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u64))) -void svst4(svbool_t, uint64_t *, svuint64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u16))) -void svst4(svbool_t, uint16_t *, svuint16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s8))) -void svst4(svbool_t, int8_t *, svint8x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_f64))) -void svst4(svbool_t, float64_t *, svfloat64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_f32))) -void svst4(svbool_t, float32_t *, svfloat32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_f16))) -void svst4(svbool_t, float16_t *, svfloat16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s32))) -void svst4(svbool_t, int32_t *, svint32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s64))) -void svst4(svbool_t, int64_t *, svint64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s16))) -void svst4(svbool_t, int16_t *, svint16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u8))) -void svst4_vnum(svbool_t, uint8_t *, int64_t, svuint8x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u32))) -void svst4_vnum(svbool_t, uint32_t *, int64_t, svuint32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u64))) -void svst4_vnum(svbool_t, uint64_t *, int64_t, svuint64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u16))) -void svst4_vnum(svbool_t, uint16_t *, int64_t, svuint16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s8))) -void svst4_vnum(svbool_t, int8_t *, int64_t, svint8x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_f64))) -void svst4_vnum(svbool_t, float64_t *, int64_t, svfloat64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_f32))) -void svst4_vnum(svbool_t, float32_t *, int64_t, svfloat32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_f16))) -void svst4_vnum(svbool_t, float16_t *, int64_t, svfloat16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s32))) -void svst4_vnum(svbool_t, int32_t *, int64_t, svint32x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s64))) -void svst4_vnum(svbool_t, int64_t *, int64_t, svint64x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s16))) -void svst4_vnum(svbool_t, int16_t *, int64_t, svint16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u8))) -void svstnt1(svbool_t, uint8_t *, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u32))) -void svstnt1(svbool_t, uint32_t *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u64))) -void svstnt1(svbool_t, uint64_t *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u16))) -void svstnt1(svbool_t, uint16_t *, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s8))) -void svstnt1(svbool_t, int8_t *, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f64))) -void svstnt1(svbool_t, float64_t *, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f32))) -void svstnt1(svbool_t, float32_t *, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f16))) -void svstnt1(svbool_t, float16_t *, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s32))) -void svstnt1(svbool_t, int32_t *, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s64))) -void svstnt1(svbool_t, int64_t *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s16))) -void svstnt1(svbool_t, int16_t *, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u8))) -void svstnt1_vnum(svbool_t, uint8_t *, int64_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u32))) -void svstnt1_vnum(svbool_t, uint32_t *, int64_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u64))) -void svstnt1_vnum(svbool_t, uint64_t *, int64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u16))) -void svstnt1_vnum(svbool_t, uint16_t *, int64_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s8))) -void svstnt1_vnum(svbool_t, int8_t *, int64_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f64))) -void svstnt1_vnum(svbool_t, float64_t *, int64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f32))) -void svstnt1_vnum(svbool_t, float32_t *, int64_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f16))) -void svstnt1_vnum(svbool_t, float16_t *, int64_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s32))) -void svstnt1_vnum(svbool_t, int32_t *, int64_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s64))) -void svstnt1_vnum(svbool_t, int64_t *, int64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s16))) -void svstnt1_vnum(svbool_t, int16_t *, int64_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f64_m))) -svfloat64_t svsub_m(svbool_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f32_m))) -svfloat32_t svsub_m(svbool_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f16_m))) -svfloat16_t svsub_m(svbool_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f64_x))) -svfloat64_t svsub_x(svbool_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f32_x))) -svfloat32_t svsub_x(svbool_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f16_x))) -svfloat16_t svsub_x(svbool_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f64_z))) -svfloat64_t svsub_z(svbool_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f32_z))) -svfloat32_t svsub_z(svbool_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f16_z))) -svfloat16_t svsub_z(svbool_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u8_m))) -svuint8_t svsub_m(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u32_m))) -svuint32_t svsub_m(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u64_m))) -svuint64_t svsub_m(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u16_m))) -svuint16_t svsub_m(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s8_m))) -svint8_t svsub_m(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s32_m))) -svint32_t svsub_m(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s64_m))) -svint64_t svsub_m(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s16_m))) -svint16_t svsub_m(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u8_x))) -svuint8_t svsub_x(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u32_x))) -svuint32_t svsub_x(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u64_x))) -svuint64_t svsub_x(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u16_x))) -svuint16_t svsub_x(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s8_x))) -svint8_t svsub_x(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s32_x))) -svint32_t svsub_x(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s64_x))) -svint64_t svsub_x(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s16_x))) -svint16_t svsub_x(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u8_z))) -svuint8_t svsub_z(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u32_z))) -svuint32_t svsub_z(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u64_z))) -svuint64_t svsub_z(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u16_z))) -svuint16_t svsub_z(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s8_z))) -svint8_t svsub_z(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s32_z))) -svint32_t svsub_z(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s64_z))) -svint64_t svsub_z(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s16_z))) -svint16_t svsub_z(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f64_m))) -svfloat64_t svsub_m(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f32_m))) -svfloat32_t svsub_m(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f16_m))) -svfloat16_t svsub_m(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f64_x))) -svfloat64_t svsub_x(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f32_x))) -svfloat32_t svsub_x(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f16_x))) -svfloat16_t svsub_x(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f64_z))) -svfloat64_t svsub_z(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f32_z))) -svfloat32_t svsub_z(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f16_z))) -svfloat16_t svsub_z(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u8_m))) -svuint8_t svsub_m(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u32_m))) -svuint32_t svsub_m(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u64_m))) -svuint64_t svsub_m(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u16_m))) -svuint16_t svsub_m(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s8_m))) -svint8_t svsub_m(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s32_m))) -svint32_t svsub_m(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s64_m))) -svint64_t svsub_m(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s16_m))) -svint16_t svsub_m(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u8_x))) -svuint8_t svsub_x(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u32_x))) -svuint32_t svsub_x(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u64_x))) -svuint64_t svsub_x(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u16_x))) -svuint16_t svsub_x(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s8_x))) -svint8_t svsub_x(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s32_x))) -svint32_t svsub_x(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s64_x))) -svint64_t svsub_x(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s16_x))) -svint16_t svsub_x(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u8_z))) -svuint8_t svsub_z(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u32_z))) -svuint32_t svsub_z(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u64_z))) -svuint64_t svsub_z(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u16_z))) -svuint16_t svsub_z(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s8_z))) -svint8_t svsub_z(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s32_z))) -svint32_t svsub_z(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s64_z))) -svint64_t svsub_z(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s16_z))) -svint16_t svsub_z(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f64_m))) -svfloat64_t svsubr_m(svbool_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f32_m))) -svfloat32_t svsubr_m(svbool_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f16_m))) -svfloat16_t svsubr_m(svbool_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f64_x))) -svfloat64_t svsubr_x(svbool_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f32_x))) -svfloat32_t svsubr_x(svbool_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f16_x))) -svfloat16_t svsubr_x(svbool_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f64_z))) -svfloat64_t svsubr_z(svbool_t, svfloat64_t, float64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f32_z))) -svfloat32_t svsubr_z(svbool_t, svfloat32_t, float32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f16_z))) -svfloat16_t svsubr_z(svbool_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u8_m))) -svuint8_t svsubr_m(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u32_m))) -svuint32_t svsubr_m(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u64_m))) -svuint64_t svsubr_m(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u16_m))) -svuint16_t svsubr_m(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s8_m))) -svint8_t svsubr_m(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s32_m))) -svint32_t svsubr_m(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s64_m))) -svint64_t svsubr_m(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s16_m))) -svint16_t svsubr_m(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u8_x))) -svuint8_t svsubr_x(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u32_x))) -svuint32_t svsubr_x(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u64_x))) -svuint64_t svsubr_x(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u16_x))) -svuint16_t svsubr_x(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s8_x))) -svint8_t svsubr_x(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s32_x))) -svint32_t svsubr_x(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s64_x))) -svint64_t svsubr_x(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s16_x))) -svint16_t svsubr_x(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u8_z))) -svuint8_t svsubr_z(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u32_z))) -svuint32_t svsubr_z(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u64_z))) -svuint64_t svsubr_z(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u16_z))) -svuint16_t svsubr_z(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s8_z))) -svint8_t svsubr_z(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s32_z))) -svint32_t svsubr_z(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s64_z))) -svint64_t svsubr_z(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s16_z))) -svint16_t svsubr_z(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f64_m))) -svfloat64_t svsubr_m(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f32_m))) -svfloat32_t svsubr_m(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f16_m))) -svfloat16_t svsubr_m(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f64_x))) -svfloat64_t svsubr_x(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f32_x))) -svfloat32_t svsubr_x(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f16_x))) -svfloat16_t svsubr_x(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f64_z))) -svfloat64_t svsubr_z(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f32_z))) -svfloat32_t svsubr_z(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f16_z))) -svfloat16_t svsubr_z(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u8_m))) -svuint8_t svsubr_m(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u32_m))) -svuint32_t svsubr_m(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u64_m))) -svuint64_t svsubr_m(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u16_m))) -svuint16_t svsubr_m(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s8_m))) -svint8_t svsubr_m(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s32_m))) -svint32_t svsubr_m(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s64_m))) -svint64_t svsubr_m(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s16_m))) -svint16_t svsubr_m(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u8_x))) -svuint8_t svsubr_x(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u32_x))) -svuint32_t svsubr_x(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u64_x))) -svuint64_t svsubr_x(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u16_x))) -svuint16_t svsubr_x(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s8_x))) -svint8_t svsubr_x(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s32_x))) -svint32_t svsubr_x(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s64_x))) -svint64_t svsubr_x(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s16_x))) -svint16_t svsubr_x(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u8_z))) -svuint8_t svsubr_z(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u32_z))) -svuint32_t svsubr_z(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u64_z))) -svuint64_t svsubr_z(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u16_z))) -svuint16_t svsubr_z(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s8_z))) -svint8_t svsubr_z(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s32_z))) -svint32_t svsubr_z(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s64_z))) -svint64_t svsubr_z(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s16_z))) -svint16_t svsubr_z(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u8))) -svuint8_t svtbl(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u32))) -svuint32_t svtbl(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u64))) -svuint64_t svtbl(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u16))) -svuint16_t svtbl(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s8))) -svint8_t svtbl(svint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_f64))) -svfloat64_t svtbl(svfloat64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_f32))) -svfloat32_t svtbl(svfloat32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_f16))) -svfloat16_t svtbl(svfloat16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s32))) -svint32_t svtbl(svint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s64))) -svint64_t svtbl(svint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s16))) -svint16_t svtbl(svint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f64))) -svfloat64_t svtmad(svfloat64_t, svfloat64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f32))) -svfloat32_t svtmad(svfloat32_t, svfloat32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f16))) -svfloat16_t svtmad(svfloat16_t, svfloat16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u8))) -svuint8_t svtrn1(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u32))) -svuint32_t svtrn1(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u64))) -svuint64_t svtrn1(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u16))) -svuint16_t svtrn1(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s8))) -svint8_t svtrn1(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_f64))) -svfloat64_t svtrn1(svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_f32))) -svfloat32_t svtrn1(svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_f16))) -svfloat16_t svtrn1(svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s32))) -svint32_t svtrn1(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s64))) -svint64_t svtrn1(svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s16))) -svint16_t svtrn1(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u8))) -svuint8_t svtrn2(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u32))) -svuint32_t svtrn2(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u64))) -svuint64_t svtrn2(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u16))) -svuint16_t svtrn2(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s8))) -svint8_t svtrn2(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_f64))) -svfloat64_t svtrn2(svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_f32))) -svfloat32_t svtrn2(svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_f16))) -svfloat16_t svtrn2(svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s32))) -svint32_t svtrn2(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s64))) -svint64_t svtrn2(svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s16))) -svint16_t svtrn2(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f64))) -svfloat64_t svtsmul(svfloat64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f32))) -svfloat32_t svtsmul(svfloat32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f16))) -svfloat16_t svtsmul(svfloat16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f64))) -svfloat64_t svtssel(svfloat64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f32))) -svfloat32_t svtssel(svfloat32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f16))) -svfloat16_t svtssel(svfloat16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_b))) -svbool_t svunpkhi(svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_s32))) -svint32_t svunpkhi(svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_s64))) -svint64_t svunpkhi(svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_s16))) -svint16_t svunpkhi(svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_u32))) -svuint32_t svunpkhi(svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_u64))) -svuint64_t svunpkhi(svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_u16))) -svuint16_t svunpkhi(svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_b))) -svbool_t svunpklo(svbool_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_s32))) -svint32_t svunpklo(svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_s64))) -svint64_t svunpklo(svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_s16))) -svint16_t svunpklo(svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_u32))) -svuint32_t svunpklo(svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_u64))) -svuint64_t svunpklo(svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_u16))) -svuint16_t svunpklo(svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u8))) -svuint8_t svuzp1(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u32))) -svuint32_t svuzp1(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u64))) -svuint64_t svuzp1(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u16))) -svuint16_t svuzp1(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s8))) -svint8_t svuzp1(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_f64))) -svfloat64_t svuzp1(svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_f32))) -svfloat32_t svuzp1(svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_f16))) -svfloat16_t svuzp1(svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s32))) -svint32_t svuzp1(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s64))) -svint64_t svuzp1(svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s16))) -svint16_t svuzp1(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u8))) -svuint8_t svuzp2(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u32))) -svuint32_t svuzp2(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u64))) -svuint64_t svuzp2(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u16))) -svuint16_t svuzp2(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s8))) -svint8_t svuzp2(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_f64))) -svfloat64_t svuzp2(svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_f32))) -svfloat32_t svuzp2(svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_f16))) -svfloat16_t svuzp2(svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s32))) -svint32_t svuzp2(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s64))) -svint64_t svuzp2(svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s16))) -svint16_t svuzp2(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_s32))) -svbool_t svwhilele_b8(int32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_s32))) -svbool_t svwhilele_b32(int32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_s32))) -svbool_t svwhilele_b64(int32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_s32))) -svbool_t svwhilele_b16(int32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_s64))) -svbool_t svwhilele_b8(int64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_s64))) -svbool_t svwhilele_b32(int64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_s64))) -svbool_t svwhilele_b64(int64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_s64))) -svbool_t svwhilele_b16(int64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_u32))) -svbool_t svwhilele_b8(uint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_u32))) -svbool_t svwhilele_b32(uint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_u32))) -svbool_t svwhilele_b64(uint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_u32))) -svbool_t svwhilele_b16(uint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_u64))) -svbool_t svwhilele_b8(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_u64))) -svbool_t svwhilele_b32(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_u64))) -svbool_t svwhilele_b64(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_u64))) -svbool_t svwhilele_b16(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_u32))) -svbool_t svwhilelt_b8(uint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_u32))) -svbool_t svwhilelt_b32(uint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_u32))) -svbool_t svwhilelt_b64(uint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_u32))) -svbool_t svwhilelt_b16(uint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_u64))) -svbool_t svwhilelt_b8(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_u64))) -svbool_t svwhilelt_b32(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_u64))) -svbool_t svwhilelt_b64(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_u64))) -svbool_t svwhilelt_b16(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_s32))) -svbool_t svwhilelt_b8(int32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_s32))) -svbool_t svwhilelt_b32(int32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_s32))) -svbool_t svwhilelt_b64(int32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_s32))) -svbool_t svwhilelt_b16(int32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_s64))) -svbool_t svwhilelt_b8(int64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_s64))) -svbool_t svwhilelt_b32(int64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_s64))) -svbool_t svwhilelt_b64(int64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_s64))) -svbool_t svwhilelt_b16(int64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u8))) -svuint8_t svzip1(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u32))) -svuint32_t svzip1(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u64))) -svuint64_t svzip1(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u16))) -svuint16_t svzip1(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s8))) -svint8_t svzip1(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_f64))) -svfloat64_t svzip1(svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_f32))) -svfloat32_t svzip1(svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_f16))) -svfloat16_t svzip1(svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s32))) -svint32_t svzip1(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s64))) -svint64_t svzip1(svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s16))) -svint16_t svzip1(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u8))) -svuint8_t svzip2(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u32))) -svuint32_t svzip2(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u64))) -svuint64_t svzip2(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u16))) -svuint16_t svzip2(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s8))) -svint8_t svzip2(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_f64))) -svfloat64_t svzip2(svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_f32))) -svfloat32_t svzip2(svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_f16))) -svfloat16_t svzip2(svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s32))) -svint32_t svzip2(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s64))) -svint64_t svzip2(svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s16))) -svint16_t svzip2(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_n_f32))) -svfloat32_t svbfdot_n_f32(svfloat32_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_f32))) -svfloat32_t svbfdot_f32(svfloat32_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_lane_f32))) -svfloat32_t svbfdot_lane_f32(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_n_f32))) -svfloat32_t svbfmlalb_n_f32(svfloat32_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_f32))) -svfloat32_t svbfmlalb_f32(svfloat32_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_lane_f32))) -svfloat32_t svbfmlalb_lane_f32(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_n_f32))) -svfloat32_t svbfmlalt_n_f32(svfloat32_t, svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_f32))) -svfloat32_t svbfmlalt_f32(svfloat32_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_lane_f32))) -svfloat32_t svbfmlalt_lane_f32(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmmla_f32))) -svfloat32_t svbfmmla_f32(svfloat32_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_bf16))) -bfloat16_t svclasta_n_bf16(svbool_t, bfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_bf16))) -svbfloat16_t svclasta_bf16(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_bf16))) -bfloat16_t svclastb_n_bf16(svbool_t, bfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_bf16))) -svbfloat16_t svclastb_bf16(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_m))) -svuint16_t svcnt_bf16_m(svuint16_t, svbool_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_x))) -svuint16_t svcnt_bf16_x(svbool_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_z))) -svuint16_t svcnt_bf16_z(svbool_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_bf16))) -svbfloat16x2_t svcreate2_bf16(svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_bf16))) -svbfloat16x3_t svcreate3_bf16(svbfloat16_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_bf16))) -svbfloat16x4_t svcreate4_bf16(svbfloat16_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_m))) -svbfloat16_t svcvt_bf16_f32_m(svbfloat16_t, svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_x))) -svbfloat16_t svcvt_bf16_f32_x(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_z))) -svbfloat16_t svcvt_bf16_f32_z(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_bf16_f32_m))) -svbfloat16_t svcvtnt_bf16_f32_m(svbfloat16_t, svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16))) -svbfloat16_t svdup_n_bf16(bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_m))) -svbfloat16_t svdup_n_bf16_m(svbfloat16_t, svbool_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_x))) -svbfloat16_t svdup_n_bf16_x(svbool_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_z))) -svbfloat16_t svdup_n_bf16_z(svbool_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_bf16))) -svbfloat16_t svdup_lane_bf16(svbfloat16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_bf16))) -svbfloat16_t svdupq_n_bf16(bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_bf16))) -svbfloat16_t svdupq_lane_bf16(svbfloat16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_bf16))) -svbfloat16_t svext_bf16(svbfloat16_t, svbfloat16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_bf16))) -svbfloat16_t svget2_bf16(svbfloat16x2_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_bf16))) -svbfloat16_t svget3_bf16(svbfloat16x3_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_bf16))) -svbfloat16_t svget4_bf16(svbfloat16x4_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_bf16))) -svbfloat16_t svinsr_n_bf16(svbfloat16_t, bfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_bf16))) -bfloat16_t svlasta_bf16(svbool_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_bf16))) -bfloat16_t svlastb_bf16(svbool_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_bf16))) -svbfloat16_t svld1_bf16(svbool_t, bfloat16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_bf16))) -svbfloat16_t svld1_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_bf16))) -svbfloat16_t svld1rq_bf16(svbool_t, bfloat16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_bf16))) -svbfloat16x2_t svld2_bf16(svbool_t, bfloat16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_bf16))) -svbfloat16x2_t svld2_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_bf16))) -svbfloat16x3_t svld3_bf16(svbool_t, bfloat16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_bf16))) -svbfloat16x3_t svld3_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_bf16))) -svbfloat16x4_t svld4_bf16(svbool_t, bfloat16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_bf16))) -svbfloat16x4_t svld4_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_bf16))) -svbfloat16_t svldff1_bf16(svbool_t, bfloat16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_bf16))) -svbfloat16_t svldff1_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_bf16))) -svbfloat16_t svldnf1_bf16(svbool_t, bfloat16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_bf16))) -svbfloat16_t svldnf1_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_bf16))) -svbfloat16_t svldnt1_bf16(svbool_t, bfloat16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_bf16))) -svbfloat16_t svldnt1_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_bf16))) -uint64_t svlen_bf16(svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_bf16))) -svbfloat16_t svrev_bf16(svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_bf16))) -svbfloat16_t svsel_bf16(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_bf16))) -svbfloat16x2_t svset2_bf16(svbfloat16x2_t, uint64_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_bf16))) -svbfloat16x3_t svset3_bf16(svbfloat16x3_t, uint64_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_bf16))) -svbfloat16x4_t svset4_bf16(svbfloat16x4_t, uint64_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_bf16))) -svbfloat16_t svsplice_bf16(svbool_t, svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_bf16))) -void svst1_bf16(svbool_t, bfloat16_t *, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_bf16))) -void svst1_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_bf16))) -void svst2_bf16(svbool_t, bfloat16_t *, svbfloat16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_bf16))) -void svst2_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16x2_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_bf16))) -void svst3_bf16(svbool_t, bfloat16_t *, svbfloat16x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_bf16))) -void svst3_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16x3_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_bf16))) -void svst4_bf16(svbool_t, bfloat16_t *, svbfloat16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_bf16))) -void svst4_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16x4_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_bf16))) -void svstnt1_bf16(svbool_t, bfloat16_t *, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_bf16))) -void svstnt1_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_bf16))) -svbfloat16_t svtbl_bf16(svbfloat16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_bf16))) -svbfloat16_t svtrn1_bf16(svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_bf16))) -svbfloat16_t svtrn2_bf16(svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_bf16))) -svbfloat16x2_t svundef2_bf16(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_bf16))) -svbfloat16x3_t svundef3_bf16(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_bf16))) -svbfloat16x4_t svundef4_bf16(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_bf16))) -svbfloat16_t svundef_bf16(void); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_bf16))) -svbfloat16_t svuzp1_bf16(svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_bf16))) -svbfloat16_t svuzp2_bf16(svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_bf16))) -svbfloat16_t svzip1_bf16(svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_bf16))) -svbfloat16_t svzip2_bf16(svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_n_f32))) -svfloat32_t svbfdot(svfloat32_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_f32))) -svfloat32_t svbfdot(svfloat32_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_lane_f32))) -svfloat32_t svbfdot_lane(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_n_f32))) -svfloat32_t svbfmlalb(svfloat32_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_f32))) -svfloat32_t svbfmlalb(svfloat32_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_lane_f32))) -svfloat32_t svbfmlalb_lane(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_n_f32))) -svfloat32_t svbfmlalt(svfloat32_t, svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_f32))) -svfloat32_t svbfmlalt(svfloat32_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_lane_f32))) -svfloat32_t svbfmlalt_lane(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmmla_f32))) -svfloat32_t svbfmmla(svfloat32_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_bf16))) -bfloat16_t svclasta(svbool_t, bfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_bf16))) -svbfloat16_t svclasta(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_bf16))) -bfloat16_t svclastb(svbool_t, bfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_bf16))) -svbfloat16_t svclastb(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_m))) -svuint16_t svcnt_m(svuint16_t, svbool_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_x))) -svuint16_t svcnt_x(svbool_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_z))) -svuint16_t svcnt_z(svbool_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_bf16))) -svbfloat16x2_t svcreate2(svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_bf16))) -svbfloat16x3_t svcreate3(svbfloat16_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_bf16))) -svbfloat16x4_t svcreate4(svbfloat16_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_m))) -svbfloat16_t svcvt_bf16_m(svbfloat16_t, svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_x))) -svbfloat16_t svcvt_bf16_x(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_z))) -svbfloat16_t svcvt_bf16_z(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_bf16_f32_m))) -svbfloat16_t svcvtnt_bf16_m(svbfloat16_t, svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16))) -svbfloat16_t svdup_bf16(bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_m))) -svbfloat16_t svdup_bf16_m(svbfloat16_t, svbool_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_x))) -svbfloat16_t svdup_bf16_x(svbool_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_z))) -svbfloat16_t svdup_bf16_z(svbool_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_bf16))) -svbfloat16_t svdup_lane(svbfloat16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_bf16))) -svbfloat16_t svdupq_bf16(bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_bf16))) -svbfloat16_t svdupq_lane(svbfloat16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_bf16))) -svbfloat16_t svext(svbfloat16_t, svbfloat16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_bf16))) -svbfloat16_t svget2(svbfloat16x2_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_bf16))) -svbfloat16_t svget3(svbfloat16x3_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_bf16))) -svbfloat16_t svget4(svbfloat16x4_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_bf16))) -svbfloat16_t svinsr(svbfloat16_t, bfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_bf16))) -bfloat16_t svlasta(svbool_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_bf16))) -bfloat16_t svlastb(svbool_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_bf16))) -svbfloat16_t svld1(svbool_t, bfloat16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_bf16))) -svbfloat16_t svld1_vnum(svbool_t, bfloat16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_bf16))) -svbfloat16_t svld1rq(svbool_t, bfloat16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_bf16))) -svbfloat16x2_t svld2(svbool_t, bfloat16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_bf16))) -svbfloat16x2_t svld2_vnum(svbool_t, bfloat16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_bf16))) -svbfloat16x3_t svld3(svbool_t, bfloat16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_bf16))) -svbfloat16x3_t svld3_vnum(svbool_t, bfloat16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_bf16))) -svbfloat16x4_t svld4(svbool_t, bfloat16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_bf16))) -svbfloat16x4_t svld4_vnum(svbool_t, bfloat16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_bf16))) -svbfloat16_t svldff1(svbool_t, bfloat16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_bf16))) -svbfloat16_t svldff1_vnum(svbool_t, bfloat16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_bf16))) -svbfloat16_t svldnf1(svbool_t, bfloat16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_bf16))) -svbfloat16_t svldnf1_vnum(svbool_t, bfloat16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_bf16))) -svbfloat16_t svldnt1(svbool_t, bfloat16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_bf16))) -svbfloat16_t svldnt1_vnum(svbool_t, bfloat16_t const *, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_bf16))) -uint64_t svlen(svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_bf16))) -svbfloat16_t svrev(svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_bf16))) -svbfloat16_t svsel(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_bf16))) -svbfloat16x2_t svset2(svbfloat16x2_t, uint64_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_bf16))) -svbfloat16x3_t svset3(svbfloat16x3_t, uint64_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_bf16))) -svbfloat16x4_t svset4(svbfloat16x4_t, uint64_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_bf16))) -svbfloat16_t svsplice(svbool_t, svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_bf16))) -void svst1(svbool_t, bfloat16_t *, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_bf16))) -void svst1_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_bf16))) -void svst2(svbool_t, bfloat16_t *, svbfloat16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_bf16))) -void svst2_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16x2_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_bf16))) -void svst3(svbool_t, bfloat16_t *, svbfloat16x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_bf16))) -void svst3_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16x3_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_bf16))) -void svst4(svbool_t, bfloat16_t *, svbfloat16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_bf16))) -void svst4_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16x4_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_bf16))) -void svstnt1(svbool_t, bfloat16_t *, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_bf16))) -void svstnt1_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_bf16))) -svbfloat16_t svtbl(svbfloat16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_bf16))) -svbfloat16_t svtrn1(svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_bf16))) -svbfloat16_t svtrn2(svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_bf16))) -svbfloat16_t svuzp1(svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_bf16))) -svbfloat16_t svuzp2(svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_bf16))) -svbfloat16_t svzip1(svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_bf16))) -svbfloat16_t svzip2(svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_bf16))) -svbfloat16_t svtrn1q_bf16(svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_bf16))) -svbfloat16_t svtrn2q_bf16(svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_bf16))) -svbfloat16_t svuzp1q_bf16(svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_bf16))) -svbfloat16_t svuzp2q_bf16(svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_bf16))) -svbfloat16_t svzip1q_bf16(svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_bf16))) -svbfloat16_t svzip2q_bf16(svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_bf16))) -svbfloat16_t svtrn1q(svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_bf16))) -svbfloat16_t svtrn2q(svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_bf16))) -svbfloat16_t svuzp1q(svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_bf16))) -svbfloat16_t svuzp2q(svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_bf16))) -svbfloat16_t svzip1q(svbfloat16_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_bf16))) -svbfloat16_t svzip2q(svbfloat16_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_f32))) -svfloat32_t svmmla_f32(svfloat32_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_f32))) -svfloat32_t svmmla(svfloat32_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u8))) -svuint8_t svld1ro_u8(svbool_t, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u32))) -svuint32_t svld1ro_u32(svbool_t, uint32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u64))) -svuint64_t svld1ro_u64(svbool_t, uint64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u16))) -svuint16_t svld1ro_u16(svbool_t, uint16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s8))) -svint8_t svld1ro_s8(svbool_t, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f64))) -svfloat64_t svld1ro_f64(svbool_t, float64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f32))) -svfloat32_t svld1ro_f32(svbool_t, float32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f16))) -svfloat16_t svld1ro_f16(svbool_t, float16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s32))) -svint32_t svld1ro_s32(svbool_t, int32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s64))) -svint64_t svld1ro_s64(svbool_t, int64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s16))) -svint16_t svld1ro_s16(svbool_t, int16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_f64))) -svfloat64_t svmmla_f64(svfloat64_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u8))) -svuint8_t svtrn1q_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u32))) -svuint32_t svtrn1q_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u64))) -svuint64_t svtrn1q_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u16))) -svuint16_t svtrn1q_u16(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s8))) -svint8_t svtrn1q_s8(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f64))) -svfloat64_t svtrn1q_f64(svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f32))) -svfloat32_t svtrn1q_f32(svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f16))) -svfloat16_t svtrn1q_f16(svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s32))) -svint32_t svtrn1q_s32(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s64))) -svint64_t svtrn1q_s64(svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s16))) -svint16_t svtrn1q_s16(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u8))) -svuint8_t svtrn2q_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u32))) -svuint32_t svtrn2q_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u64))) -svuint64_t svtrn2q_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u16))) -svuint16_t svtrn2q_u16(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s8))) -svint8_t svtrn2q_s8(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f64))) -svfloat64_t svtrn2q_f64(svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f32))) -svfloat32_t svtrn2q_f32(svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f16))) -svfloat16_t svtrn2q_f16(svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s32))) -svint32_t svtrn2q_s32(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s64))) -svint64_t svtrn2q_s64(svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s16))) -svint16_t svtrn2q_s16(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u8))) -svuint8_t svuzp1q_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u32))) -svuint32_t svuzp1q_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u64))) -svuint64_t svuzp1q_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u16))) -svuint16_t svuzp1q_u16(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s8))) -svint8_t svuzp1q_s8(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f64))) -svfloat64_t svuzp1q_f64(svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f32))) -svfloat32_t svuzp1q_f32(svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f16))) -svfloat16_t svuzp1q_f16(svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s32))) -svint32_t svuzp1q_s32(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s64))) -svint64_t svuzp1q_s64(svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s16))) -svint16_t svuzp1q_s16(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u8))) -svuint8_t svuzp2q_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u32))) -svuint32_t svuzp2q_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u64))) -svuint64_t svuzp2q_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u16))) -svuint16_t svuzp2q_u16(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s8))) -svint8_t svuzp2q_s8(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f64))) -svfloat64_t svuzp2q_f64(svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f32))) -svfloat32_t svuzp2q_f32(svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f16))) -svfloat16_t svuzp2q_f16(svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s32))) -svint32_t svuzp2q_s32(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s64))) -svint64_t svuzp2q_s64(svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s16))) -svint16_t svuzp2q_s16(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u8))) -svuint8_t svzip1q_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u32))) -svuint32_t svzip1q_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u64))) -svuint64_t svzip1q_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u16))) -svuint16_t svzip1q_u16(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s8))) -svint8_t svzip1q_s8(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f64))) -svfloat64_t svzip1q_f64(svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f32))) -svfloat32_t svzip1q_f32(svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f16))) -svfloat16_t svzip1q_f16(svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s32))) -svint32_t svzip1q_s32(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s64))) -svint64_t svzip1q_s64(svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s16))) -svint16_t svzip1q_s16(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u8))) -svuint8_t svzip2q_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u32))) -svuint32_t svzip2q_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u64))) -svuint64_t svzip2q_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u16))) -svuint16_t svzip2q_u16(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s8))) -svint8_t svzip2q_s8(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f64))) -svfloat64_t svzip2q_f64(svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f32))) -svfloat32_t svzip2q_f32(svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f16))) -svfloat16_t svzip2q_f16(svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s32))) -svint32_t svzip2q_s32(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s64))) -svint64_t svzip2q_s64(svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s16))) -svint16_t svzip2q_s16(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u8))) -svuint8_t svld1ro(svbool_t, uint8_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u32))) -svuint32_t svld1ro(svbool_t, uint32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u64))) -svuint64_t svld1ro(svbool_t, uint64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u16))) -svuint16_t svld1ro(svbool_t, uint16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s8))) -svint8_t svld1ro(svbool_t, int8_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f64))) -svfloat64_t svld1ro(svbool_t, float64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f32))) -svfloat32_t svld1ro(svbool_t, float32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f16))) -svfloat16_t svld1ro(svbool_t, float16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s32))) -svint32_t svld1ro(svbool_t, int32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s64))) -svint64_t svld1ro(svbool_t, int64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s16))) -svint16_t svld1ro(svbool_t, int16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_f64))) -svfloat64_t svmmla(svfloat64_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u8))) -svuint8_t svtrn1q(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u32))) -svuint32_t svtrn1q(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u64))) -svuint64_t svtrn1q(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u16))) -svuint16_t svtrn1q(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s8))) -svint8_t svtrn1q(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f64))) -svfloat64_t svtrn1q(svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f32))) -svfloat32_t svtrn1q(svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f16))) -svfloat16_t svtrn1q(svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s32))) -svint32_t svtrn1q(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s64))) -svint64_t svtrn1q(svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s16))) -svint16_t svtrn1q(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u8))) -svuint8_t svtrn2q(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u32))) -svuint32_t svtrn2q(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u64))) -svuint64_t svtrn2q(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u16))) -svuint16_t svtrn2q(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s8))) -svint8_t svtrn2q(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f64))) -svfloat64_t svtrn2q(svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f32))) -svfloat32_t svtrn2q(svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f16))) -svfloat16_t svtrn2q(svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s32))) -svint32_t svtrn2q(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s64))) -svint64_t svtrn2q(svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s16))) -svint16_t svtrn2q(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u8))) -svuint8_t svuzp1q(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u32))) -svuint32_t svuzp1q(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u64))) -svuint64_t svuzp1q(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u16))) -svuint16_t svuzp1q(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s8))) -svint8_t svuzp1q(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f64))) -svfloat64_t svuzp1q(svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f32))) -svfloat32_t svuzp1q(svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f16))) -svfloat16_t svuzp1q(svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s32))) -svint32_t svuzp1q(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s64))) -svint64_t svuzp1q(svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s16))) -svint16_t svuzp1q(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u8))) -svuint8_t svuzp2q(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u32))) -svuint32_t svuzp2q(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u64))) -svuint64_t svuzp2q(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u16))) -svuint16_t svuzp2q(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s8))) -svint8_t svuzp2q(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f64))) -svfloat64_t svuzp2q(svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f32))) -svfloat32_t svuzp2q(svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f16))) -svfloat16_t svuzp2q(svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s32))) -svint32_t svuzp2q(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s64))) -svint64_t svuzp2q(svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s16))) -svint16_t svuzp2q(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u8))) -svuint8_t svzip1q(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u32))) -svuint32_t svzip1q(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u64))) -svuint64_t svzip1q(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u16))) -svuint16_t svzip1q(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s8))) -svint8_t svzip1q(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f64))) -svfloat64_t svzip1q(svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f32))) -svfloat32_t svzip1q(svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f16))) -svfloat16_t svzip1q(svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s32))) -svint32_t svzip1q(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s64))) -svint64_t svzip1q(svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s16))) -svint16_t svzip1q(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u8))) -svuint8_t svzip2q(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u32))) -svuint32_t svzip2q(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u64))) -svuint64_t svzip2q(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u16))) -svuint16_t svzip2q(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s8))) -svint8_t svzip2q(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f64))) -svfloat64_t svzip2q(svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f32))) -svfloat32_t svzip2q(svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f16))) -svfloat16_t svzip2q(svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s32))) -svint32_t svzip2q(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s64))) -svint64_t svzip2q(svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s16))) -svint16_t svzip2q(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_bf16))) -svbfloat16_t svld1ro_bf16(svbool_t, bfloat16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_bf16))) -svbfloat16_t svld1ro(svbool_t, bfloat16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_s32))) -svint32_t svmmla_s32(svint32_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_u32))) -svuint32_t svmmla_u32(svuint32_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_n_s32))) -svint32_t svsudot_n_s32(svint32_t, svint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_s32))) -svint32_t svsudot_s32(svint32_t, svint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_lane_s32))) -svint32_t svsudot_lane_s32(svint32_t, svint8_t, svuint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_n_s32))) -svint32_t svusdot_n_s32(svint32_t, svuint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_s32))) -svint32_t svusdot_s32(svint32_t, svuint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_lane_s32))) -svint32_t svusdot_lane_s32(svint32_t, svuint8_t, svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusmmla_s32))) -svint32_t svusmmla_s32(svint32_t, svuint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_s32))) -svint32_t svmmla(svint32_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_u32))) -svuint32_t svmmla(svuint32_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_n_s32))) -svint32_t svsudot(svint32_t, svint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_s32))) -svint32_t svsudot(svint32_t, svint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_lane_s32))) -svint32_t svsudot_lane(svint32_t, svint8_t, svuint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_n_s32))) -svint32_t svusdot(svint32_t, svuint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_s32))) -svint32_t svusdot(svint32_t, svuint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_lane_s32))) -svint32_t svusdot_lane(svint32_t, svuint8_t, svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusmmla_s32))) -svint32_t svusmmla(svint32_t, svuint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s8))) -svint8_t svaba_n_s8(svint8_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s32))) -svint32_t svaba_n_s32(svint32_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s64))) -svint64_t svaba_n_s64(svint64_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s16))) -svint16_t svaba_n_s16(svint16_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u8))) -svuint8_t svaba_n_u8(svuint8_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u32))) -svuint32_t svaba_n_u32(svuint32_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u64))) -svuint64_t svaba_n_u64(svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u16))) -svuint16_t svaba_n_u16(svuint16_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s8))) -svint8_t svaba_s8(svint8_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s32))) -svint32_t svaba_s32(svint32_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s64))) -svint64_t svaba_s64(svint64_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s16))) -svint16_t svaba_s16(svint16_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u8))) -svuint8_t svaba_u8(svuint8_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u32))) -svuint32_t svaba_u32(svuint32_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u64))) -svuint64_t svaba_u64(svuint64_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u16))) -svuint16_t svaba_u16(svuint16_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s32))) -svint32_t svabalb_n_s32(svint32_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s64))) -svint64_t svabalb_n_s64(svint64_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s16))) -svint16_t svabalb_n_s16(svint16_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u32))) -svuint32_t svabalb_n_u32(svuint32_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u64))) -svuint64_t svabalb_n_u64(svuint64_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u16))) -svuint16_t svabalb_n_u16(svuint16_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s32))) -svint32_t svabalb_s32(svint32_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s64))) -svint64_t svabalb_s64(svint64_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s16))) -svint16_t svabalb_s16(svint16_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u32))) -svuint32_t svabalb_u32(svuint32_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u64))) -svuint64_t svabalb_u64(svuint64_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u16))) -svuint16_t svabalb_u16(svuint16_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s32))) -svint32_t svabalt_n_s32(svint32_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s64))) -svint64_t svabalt_n_s64(svint64_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s16))) -svint16_t svabalt_n_s16(svint16_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u32))) -svuint32_t svabalt_n_u32(svuint32_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u64))) -svuint64_t svabalt_n_u64(svuint64_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u16))) -svuint16_t svabalt_n_u16(svuint16_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s32))) -svint32_t svabalt_s32(svint32_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s64))) -svint64_t svabalt_s64(svint64_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s16))) -svint16_t svabalt_s16(svint16_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u32))) -svuint32_t svabalt_u32(svuint32_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u64))) -svuint64_t svabalt_u64(svuint64_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u16))) -svuint16_t svabalt_u16(svuint16_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s32))) -svint32_t svabdlb_n_s32(svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s64))) -svint64_t svabdlb_n_s64(svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s16))) -svint16_t svabdlb_n_s16(svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u32))) -svuint32_t svabdlb_n_u32(svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u64))) -svuint64_t svabdlb_n_u64(svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u16))) -svuint16_t svabdlb_n_u16(svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s32))) -svint32_t svabdlb_s32(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s64))) -svint64_t svabdlb_s64(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s16))) -svint16_t svabdlb_s16(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u32))) -svuint32_t svabdlb_u32(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u64))) -svuint64_t svabdlb_u64(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u16))) -svuint16_t svabdlb_u16(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s32))) -svint32_t svabdlt_n_s32(svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s64))) -svint64_t svabdlt_n_s64(svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s16))) -svint16_t svabdlt_n_s16(svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u32))) -svuint32_t svabdlt_n_u32(svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u64))) -svuint64_t svabdlt_n_u64(svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u16))) -svuint16_t svabdlt_n_u16(svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s32))) -svint32_t svabdlt_s32(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s64))) -svint64_t svabdlt_s64(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s16))) -svint16_t svabdlt_s16(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u32))) -svuint32_t svabdlt_u32(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u64))) -svuint64_t svabdlt_u64(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u16))) -svuint16_t svabdlt_u16(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_m))) -svint32_t svadalp_s32_m(svbool_t, svint32_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_m))) -svint64_t svadalp_s64_m(svbool_t, svint64_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_m))) -svint16_t svadalp_s16_m(svbool_t, svint16_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_x))) -svint32_t svadalp_s32_x(svbool_t, svint32_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_x))) -svint64_t svadalp_s64_x(svbool_t, svint64_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_x))) -svint16_t svadalp_s16_x(svbool_t, svint16_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_z))) -svint32_t svadalp_s32_z(svbool_t, svint32_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_z))) -svint64_t svadalp_s64_z(svbool_t, svint64_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_z))) -svint16_t svadalp_s16_z(svbool_t, svint16_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_m))) -svuint32_t svadalp_u32_m(svbool_t, svuint32_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_m))) -svuint64_t svadalp_u64_m(svbool_t, svuint64_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_m))) -svuint16_t svadalp_u16_m(svbool_t, svuint16_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_x))) -svuint32_t svadalp_u32_x(svbool_t, svuint32_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_x))) -svuint64_t svadalp_u64_x(svbool_t, svuint64_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_x))) -svuint16_t svadalp_u16_x(svbool_t, svuint16_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_z))) -svuint32_t svadalp_u32_z(svbool_t, svuint32_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_z))) -svuint64_t svadalp_u64_z(svbool_t, svuint64_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_z))) -svuint16_t svadalp_u16_z(svbool_t, svuint16_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_n_u32))) -svuint32_t svadclb_n_u32(svuint32_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_n_u64))) -svuint64_t svadclb_n_u64(svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_u32))) -svuint32_t svadclb_u32(svuint32_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_u64))) -svuint64_t svadclb_u64(svuint64_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_n_u32))) -svuint32_t svadclt_n_u32(svuint32_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_n_u64))) -svuint64_t svadclt_n_u64(svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_u32))) -svuint32_t svadclt_u32(svuint32_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_u64))) -svuint64_t svadclt_u64(svuint64_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u32))) -svuint16_t svaddhnb_n_u32(svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u64))) -svuint32_t svaddhnb_n_u64(svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u16))) -svuint8_t svaddhnb_n_u16(svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s32))) -svint16_t svaddhnb_n_s32(svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s64))) -svint32_t svaddhnb_n_s64(svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s16))) -svint8_t svaddhnb_n_s16(svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u32))) -svuint16_t svaddhnb_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u64))) -svuint32_t svaddhnb_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u16))) -svuint8_t svaddhnb_u16(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s32))) -svint16_t svaddhnb_s32(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s64))) -svint32_t svaddhnb_s64(svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s16))) -svint8_t svaddhnb_s16(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u32))) -svuint16_t svaddhnt_n_u32(svuint16_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u64))) -svuint32_t svaddhnt_n_u64(svuint32_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u16))) -svuint8_t svaddhnt_n_u16(svuint8_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s32))) -svint16_t svaddhnt_n_s32(svint16_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s64))) -svint32_t svaddhnt_n_s64(svint32_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s16))) -svint8_t svaddhnt_n_s16(svint8_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u32))) -svuint16_t svaddhnt_u32(svuint16_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u64))) -svuint32_t svaddhnt_u64(svuint32_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u16))) -svuint8_t svaddhnt_u16(svuint8_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s32))) -svint16_t svaddhnt_s32(svint16_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s64))) -svint32_t svaddhnt_s64(svint32_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s16))) -svint8_t svaddhnt_s16(svint8_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s32))) -svint32_t svaddlb_n_s32(svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s64))) -svint64_t svaddlb_n_s64(svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s16))) -svint16_t svaddlb_n_s16(svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u32))) -svuint32_t svaddlb_n_u32(svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u64))) -svuint64_t svaddlb_n_u64(svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u16))) -svuint16_t svaddlb_n_u16(svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s32))) -svint32_t svaddlb_s32(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s64))) -svint64_t svaddlb_s64(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s16))) -svint16_t svaddlb_s16(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u32))) -svuint32_t svaddlb_u32(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u64))) -svuint64_t svaddlb_u64(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u16))) -svuint16_t svaddlb_u16(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s32))) -svint32_t svaddlbt_n_s32(svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s64))) -svint64_t svaddlbt_n_s64(svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s16))) -svint16_t svaddlbt_n_s16(svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s32))) -svint32_t svaddlbt_s32(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s64))) -svint64_t svaddlbt_s64(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s16))) -svint16_t svaddlbt_s16(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s32))) -svint32_t svaddlt_n_s32(svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s64))) -svint64_t svaddlt_n_s64(svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s16))) -svint16_t svaddlt_n_s16(svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u32))) -svuint32_t svaddlt_n_u32(svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u64))) -svuint64_t svaddlt_n_u64(svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u16))) -svuint16_t svaddlt_n_u16(svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s32))) -svint32_t svaddlt_s32(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s64))) -svint64_t svaddlt_s64(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s16))) -svint16_t svaddlt_s16(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u32))) -svuint32_t svaddlt_u32(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u64))) -svuint64_t svaddlt_u64(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u16))) -svuint16_t svaddlt_u16(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f64_m))) -svfloat64_t svaddp_f64_m(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f32_m))) -svfloat32_t svaddp_f32_m(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f16_m))) -svfloat16_t svaddp_f16_m(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f64_x))) -svfloat64_t svaddp_f64_x(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f32_x))) -svfloat32_t svaddp_f32_x(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f16_x))) -svfloat16_t svaddp_f16_x(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u8_m))) -svuint8_t svaddp_u8_m(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u32_m))) -svuint32_t svaddp_u32_m(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u64_m))) -svuint64_t svaddp_u64_m(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u16_m))) -svuint16_t svaddp_u16_m(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s8_m))) -svint8_t svaddp_s8_m(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s32_m))) -svint32_t svaddp_s32_m(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s64_m))) -svint64_t svaddp_s64_m(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s16_m))) -svint16_t svaddp_s16_m(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u8_x))) -svuint8_t svaddp_u8_x(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u32_x))) -svuint32_t svaddp_u32_x(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u64_x))) -svuint64_t svaddp_u64_x(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u16_x))) -svuint16_t svaddp_u16_x(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s8_x))) -svint8_t svaddp_s8_x(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s32_x))) -svint32_t svaddp_s32_x(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s64_x))) -svint64_t svaddp_s64_x(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s16_x))) -svint16_t svaddp_s16_x(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s32))) -svint32_t svaddwb_n_s32(svint32_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s64))) -svint64_t svaddwb_n_s64(svint64_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s16))) -svint16_t svaddwb_n_s16(svint16_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u32))) -svuint32_t svaddwb_n_u32(svuint32_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u64))) -svuint64_t svaddwb_n_u64(svuint64_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u16))) -svuint16_t svaddwb_n_u16(svuint16_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s32))) -svint32_t svaddwb_s32(svint32_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s64))) -svint64_t svaddwb_s64(svint64_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s16))) -svint16_t svaddwb_s16(svint16_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u32))) -svuint32_t svaddwb_u32(svuint32_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u64))) -svuint64_t svaddwb_u64(svuint64_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u16))) -svuint16_t svaddwb_u16(svuint16_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s32))) -svint32_t svaddwt_n_s32(svint32_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s64))) -svint64_t svaddwt_n_s64(svint64_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s16))) -svint16_t svaddwt_n_s16(svint16_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u32))) -svuint32_t svaddwt_n_u32(svuint32_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u64))) -svuint64_t svaddwt_n_u64(svuint64_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u16))) -svuint16_t svaddwt_n_u16(svuint16_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s32))) -svint32_t svaddwt_s32(svint32_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s64))) -svint64_t svaddwt_s64(svint64_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s16))) -svint16_t svaddwt_s16(svint16_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u32))) -svuint32_t svaddwt_u32(svuint32_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u64))) -svuint64_t svaddwt_u64(svuint64_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u16))) -svuint16_t svaddwt_u16(svuint16_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u8))) -svuint8_t svbcax_n_u8(svuint8_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u32))) -svuint32_t svbcax_n_u32(svuint32_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u64))) -svuint64_t svbcax_n_u64(svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u16))) -svuint16_t svbcax_n_u16(svuint16_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s8))) -svint8_t svbcax_n_s8(svint8_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s32))) -svint32_t svbcax_n_s32(svint32_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s64))) -svint64_t svbcax_n_s64(svint64_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s16))) -svint16_t svbcax_n_s16(svint16_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u8))) -svuint8_t svbcax_u8(svuint8_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u32))) -svuint32_t svbcax_u32(svuint32_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u64))) -svuint64_t svbcax_u64(svuint64_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u16))) -svuint16_t svbcax_u16(svuint16_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s8))) -svint8_t svbcax_s8(svint8_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s32))) -svint32_t svbcax_s32(svint32_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s64))) -svint64_t svbcax_s64(svint64_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s16))) -svint16_t svbcax_s16(svint16_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u8))) -svuint8_t svbsl1n_n_u8(svuint8_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u32))) -svuint32_t svbsl1n_n_u32(svuint32_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u64))) -svuint64_t svbsl1n_n_u64(svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u16))) -svuint16_t svbsl1n_n_u16(svuint16_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s8))) -svint8_t svbsl1n_n_s8(svint8_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s32))) -svint32_t svbsl1n_n_s32(svint32_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s64))) -svint64_t svbsl1n_n_s64(svint64_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s16))) -svint16_t svbsl1n_n_s16(svint16_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u8))) -svuint8_t svbsl1n_u8(svuint8_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u32))) -svuint32_t svbsl1n_u32(svuint32_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u64))) -svuint64_t svbsl1n_u64(svuint64_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u16))) -svuint16_t svbsl1n_u16(svuint16_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s8))) -svint8_t svbsl1n_s8(svint8_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s32))) -svint32_t svbsl1n_s32(svint32_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s64))) -svint64_t svbsl1n_s64(svint64_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s16))) -svint16_t svbsl1n_s16(svint16_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u8))) -svuint8_t svbsl2n_n_u8(svuint8_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u32))) -svuint32_t svbsl2n_n_u32(svuint32_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u64))) -svuint64_t svbsl2n_n_u64(svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u16))) -svuint16_t svbsl2n_n_u16(svuint16_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s8))) -svint8_t svbsl2n_n_s8(svint8_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s32))) -svint32_t svbsl2n_n_s32(svint32_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s64))) -svint64_t svbsl2n_n_s64(svint64_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s16))) -svint16_t svbsl2n_n_s16(svint16_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u8))) -svuint8_t svbsl2n_u8(svuint8_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u32))) -svuint32_t svbsl2n_u32(svuint32_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u64))) -svuint64_t svbsl2n_u64(svuint64_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u16))) -svuint16_t svbsl2n_u16(svuint16_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s8))) -svint8_t svbsl2n_s8(svint8_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s32))) -svint32_t svbsl2n_s32(svint32_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s64))) -svint64_t svbsl2n_s64(svint64_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s16))) -svint16_t svbsl2n_s16(svint16_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u8))) -svuint8_t svbsl_n_u8(svuint8_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u32))) -svuint32_t svbsl_n_u32(svuint32_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u64))) -svuint64_t svbsl_n_u64(svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u16))) -svuint16_t svbsl_n_u16(svuint16_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s8))) -svint8_t svbsl_n_s8(svint8_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s32))) -svint32_t svbsl_n_s32(svint32_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s64))) -svint64_t svbsl_n_s64(svint64_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s16))) -svint16_t svbsl_n_s16(svint16_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u8))) -svuint8_t svbsl_u8(svuint8_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u32))) -svuint32_t svbsl_u32(svuint32_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u64))) -svuint64_t svbsl_u64(svuint64_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u16))) -svuint16_t svbsl_u16(svuint16_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s8))) -svint8_t svbsl_s8(svint8_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s32))) -svint32_t svbsl_s32(svint32_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s64))) -svint64_t svbsl_s64(svint64_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s16))) -svint16_t svbsl_s16(svint16_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u8))) -svuint8_t svcadd_u8(svuint8_t, svuint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u32))) -svuint32_t svcadd_u32(svuint32_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u64))) -svuint64_t svcadd_u64(svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u16))) -svuint16_t svcadd_u16(svuint16_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s8))) -svint8_t svcadd_s8(svint8_t, svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s32))) -svint32_t svcadd_s32(svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s64))) -svint64_t svcadd_s64(svint64_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s16))) -svint16_t svcadd_s16(svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_s32))) -svint32_t svcdot_s32(svint32_t, svint8_t, svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_s64))) -svint64_t svcdot_s64(svint64_t, svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_lane_s32))) -svint32_t svcdot_lane_s32(svint32_t, svint8_t, svint8_t, uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_lane_s64))) -svint64_t svcdot_lane_s64(svint64_t, svint16_t, svint16_t, uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u8))) -svuint8_t svcmla_u8(svuint8_t, svuint8_t, svuint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u32))) -svuint32_t svcmla_u32(svuint32_t, svuint32_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u64))) -svuint64_t svcmla_u64(svuint64_t, svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u16))) -svuint16_t svcmla_u16(svuint16_t, svuint16_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s8))) -svint8_t svcmla_s8(svint8_t, svint8_t, svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s32))) -svint32_t svcmla_s32(svint32_t, svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s64))) -svint64_t svcmla_s64(svint64_t, svint64_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s16))) -svint16_t svcmla_s16(svint16_t, svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_u32))) -svuint32_t svcmla_lane_u32(svuint32_t, svuint32_t, svuint32_t, uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_u16))) -svuint16_t svcmla_lane_u16(svuint16_t, svuint16_t, svuint16_t, uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_s32))) -svint32_t svcmla_lane_s32(svint32_t, svint32_t, svint32_t, uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_s16))) -svint16_t svcmla_lane_s16(svint16_t, svint16_t, svint16_t, uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f32_f16_m))) -svfloat32_t svcvtlt_f32_f16_m(svfloat32_t, svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f32_f16_x))) -svfloat32_t svcvtlt_f32_f16_x(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f64_f32_m))) -svfloat64_t svcvtlt_f64_f32_m(svfloat64_t, svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f64_f32_x))) -svfloat64_t svcvtlt_f64_f32_x(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_f16_f32_m))) -svfloat16_t svcvtnt_f16_f32_m(svfloat16_t, svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_f32_f64_m))) -svfloat32_t svcvtnt_f32_f64_m(svfloat32_t, svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_m))) -svfloat32_t svcvtx_f32_f64_m(svfloat32_t, svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_x))) -svfloat32_t svcvtx_f32_f64_x(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_z))) -svfloat32_t svcvtx_f32_f64_z(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtxnt_f32_f64_m))) -svfloat32_t svcvtxnt_f32_f64_m(svfloat32_t, svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u8))) -svuint8_t sveor3_n_u8(svuint8_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u32))) -svuint32_t sveor3_n_u32(svuint32_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u64))) -svuint64_t sveor3_n_u64(svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u16))) -svuint16_t sveor3_n_u16(svuint16_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s8))) -svint8_t sveor3_n_s8(svint8_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s32))) -svint32_t sveor3_n_s32(svint32_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s64))) -svint64_t sveor3_n_s64(svint64_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s16))) -svint16_t sveor3_n_s16(svint16_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u8))) -svuint8_t sveor3_u8(svuint8_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u32))) -svuint32_t sveor3_u32(svuint32_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u64))) -svuint64_t sveor3_u64(svuint64_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u16))) -svuint16_t sveor3_u16(svuint16_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s8))) -svint8_t sveor3_s8(svint8_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s32))) -svint32_t sveor3_s32(svint32_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s64))) -svint64_t sveor3_s64(svint64_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s16))) -svint16_t sveor3_s16(svint16_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u8))) -svuint8_t sveorbt_n_u8(svuint8_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u32))) -svuint32_t sveorbt_n_u32(svuint32_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u64))) -svuint64_t sveorbt_n_u64(svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u16))) -svuint16_t sveorbt_n_u16(svuint16_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s8))) -svint8_t sveorbt_n_s8(svint8_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s32))) -svint32_t sveorbt_n_s32(svint32_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s64))) -svint64_t sveorbt_n_s64(svint64_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s16))) -svint16_t sveorbt_n_s16(svint16_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u8))) -svuint8_t sveorbt_u8(svuint8_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u32))) -svuint32_t sveorbt_u32(svuint32_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u64))) -svuint64_t sveorbt_u64(svuint64_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u16))) -svuint16_t sveorbt_u16(svuint16_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s8))) -svint8_t sveorbt_s8(svint8_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s32))) -svint32_t sveorbt_s32(svint32_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s64))) -svint64_t sveorbt_s64(svint64_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s16))) -svint16_t sveorbt_s16(svint16_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u8))) -svuint8_t sveortb_n_u8(svuint8_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u32))) -svuint32_t sveortb_n_u32(svuint32_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u64))) -svuint64_t sveortb_n_u64(svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u16))) -svuint16_t sveortb_n_u16(svuint16_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s8))) -svint8_t sveortb_n_s8(svint8_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s32))) -svint32_t sveortb_n_s32(svint32_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s64))) -svint64_t sveortb_n_s64(svint64_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s16))) -svint16_t sveortb_n_s16(svint16_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u8))) -svuint8_t sveortb_u8(svuint8_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u32))) -svuint32_t sveortb_u32(svuint32_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u64))) -svuint64_t sveortb_u64(svuint64_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u16))) -svuint16_t sveortb_u16(svuint16_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s8))) -svint8_t sveortb_s8(svint8_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s32))) -svint32_t sveortb_s32(svint32_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s64))) -svint64_t sveortb_s64(svint64_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s16))) -svint16_t sveortb_s16(svint16_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_m))) -svint8_t svhadd_n_s8_m(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_m))) -svint32_t svhadd_n_s32_m(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_m))) -svint64_t svhadd_n_s64_m(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_m))) -svint16_t svhadd_n_s16_m(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_x))) -svint8_t svhadd_n_s8_x(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_x))) -svint32_t svhadd_n_s32_x(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_x))) -svint64_t svhadd_n_s64_x(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_x))) -svint16_t svhadd_n_s16_x(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_z))) -svint8_t svhadd_n_s8_z(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_z))) -svint32_t svhadd_n_s32_z(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_z))) -svint64_t svhadd_n_s64_z(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_z))) -svint16_t svhadd_n_s16_z(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_m))) -svuint8_t svhadd_n_u8_m(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_m))) -svuint32_t svhadd_n_u32_m(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_m))) -svuint64_t svhadd_n_u64_m(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_m))) -svuint16_t svhadd_n_u16_m(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_x))) -svuint8_t svhadd_n_u8_x(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_x))) -svuint32_t svhadd_n_u32_x(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_x))) -svuint64_t svhadd_n_u64_x(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_x))) -svuint16_t svhadd_n_u16_x(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_z))) -svuint8_t svhadd_n_u8_z(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_z))) -svuint32_t svhadd_n_u32_z(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_z))) -svuint64_t svhadd_n_u64_z(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_z))) -svuint16_t svhadd_n_u16_z(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_m))) -svint8_t svhadd_s8_m(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_m))) -svint32_t svhadd_s32_m(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_m))) -svint64_t svhadd_s64_m(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_m))) -svint16_t svhadd_s16_m(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_x))) -svint8_t svhadd_s8_x(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_x))) -svint32_t svhadd_s32_x(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_x))) -svint64_t svhadd_s64_x(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_x))) -svint16_t svhadd_s16_x(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_z))) -svint8_t svhadd_s8_z(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_z))) -svint32_t svhadd_s32_z(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_z))) -svint64_t svhadd_s64_z(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_z))) -svint16_t svhadd_s16_z(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_m))) -svuint8_t svhadd_u8_m(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_m))) -svuint32_t svhadd_u32_m(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_m))) -svuint64_t svhadd_u64_m(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_m))) -svuint16_t svhadd_u16_m(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_x))) -svuint8_t svhadd_u8_x(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_x))) -svuint32_t svhadd_u32_x(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_x))) -svuint64_t svhadd_u64_x(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_x))) -svuint16_t svhadd_u16_x(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_z))) -svuint8_t svhadd_u8_z(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_z))) -svuint32_t svhadd_u32_z(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_z))) -svuint64_t svhadd_u64_z(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_z))) -svuint16_t svhadd_u16_z(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_u32_z))) -svuint32_t svhistcnt_u32_z(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_u64_z))) -svuint64_t svhistcnt_u64_z(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_s32_z))) -svuint32_t svhistcnt_s32_z(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_s64_z))) -svuint64_t svhistcnt_s64_z(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistseg_u8))) -svuint8_t svhistseg_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistseg_s8))) -svuint8_t svhistseg_s8(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_m))) -svint8_t svhsub_n_s8_m(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_m))) -svint32_t svhsub_n_s32_m(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_m))) -svint64_t svhsub_n_s64_m(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_m))) -svint16_t svhsub_n_s16_m(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_x))) -svint8_t svhsub_n_s8_x(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_x))) -svint32_t svhsub_n_s32_x(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_x))) -svint64_t svhsub_n_s64_x(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_x))) -svint16_t svhsub_n_s16_x(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_z))) -svint8_t svhsub_n_s8_z(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_z))) -svint32_t svhsub_n_s32_z(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_z))) -svint64_t svhsub_n_s64_z(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_z))) -svint16_t svhsub_n_s16_z(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_m))) -svuint8_t svhsub_n_u8_m(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_m))) -svuint32_t svhsub_n_u32_m(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_m))) -svuint64_t svhsub_n_u64_m(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_m))) -svuint16_t svhsub_n_u16_m(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_x))) -svuint8_t svhsub_n_u8_x(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_x))) -svuint32_t svhsub_n_u32_x(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_x))) -svuint64_t svhsub_n_u64_x(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_x))) -svuint16_t svhsub_n_u16_x(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_z))) -svuint8_t svhsub_n_u8_z(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_z))) -svuint32_t svhsub_n_u32_z(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_z))) -svuint64_t svhsub_n_u64_z(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_z))) -svuint16_t svhsub_n_u16_z(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_m))) -svint8_t svhsub_s8_m(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_m))) -svint32_t svhsub_s32_m(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_m))) -svint64_t svhsub_s64_m(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_m))) -svint16_t svhsub_s16_m(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_x))) -svint8_t svhsub_s8_x(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_x))) -svint32_t svhsub_s32_x(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_x))) -svint64_t svhsub_s64_x(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_x))) -svint16_t svhsub_s16_x(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_z))) -svint8_t svhsub_s8_z(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_z))) -svint32_t svhsub_s32_z(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_z))) -svint64_t svhsub_s64_z(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_z))) -svint16_t svhsub_s16_z(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_m))) -svuint8_t svhsub_u8_m(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_m))) -svuint32_t svhsub_u32_m(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_m))) -svuint64_t svhsub_u64_m(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_m))) -svuint16_t svhsub_u16_m(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_x))) -svuint8_t svhsub_u8_x(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_x))) -svuint32_t svhsub_u32_x(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_x))) -svuint64_t svhsub_u64_x(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_x))) -svuint16_t svhsub_u16_x(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_z))) -svuint8_t svhsub_u8_z(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_z))) -svuint32_t svhsub_u32_z(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_z))) -svuint64_t svhsub_u64_z(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_z))) -svuint16_t svhsub_u16_z(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_m))) -svint8_t svhsubr_n_s8_m(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_m))) -svint32_t svhsubr_n_s32_m(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_m))) -svint64_t svhsubr_n_s64_m(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_m))) -svint16_t svhsubr_n_s16_m(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_x))) -svint8_t svhsubr_n_s8_x(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_x))) -svint32_t svhsubr_n_s32_x(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_x))) -svint64_t svhsubr_n_s64_x(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_x))) -svint16_t svhsubr_n_s16_x(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_z))) -svint8_t svhsubr_n_s8_z(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_z))) -svint32_t svhsubr_n_s32_z(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_z))) -svint64_t svhsubr_n_s64_z(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_z))) -svint16_t svhsubr_n_s16_z(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_m))) -svuint8_t svhsubr_n_u8_m(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_m))) -svuint32_t svhsubr_n_u32_m(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_m))) -svuint64_t svhsubr_n_u64_m(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_m))) -svuint16_t svhsubr_n_u16_m(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_x))) -svuint8_t svhsubr_n_u8_x(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_x))) -svuint32_t svhsubr_n_u32_x(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_x))) -svuint64_t svhsubr_n_u64_x(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_x))) -svuint16_t svhsubr_n_u16_x(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_z))) -svuint8_t svhsubr_n_u8_z(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_z))) -svuint32_t svhsubr_n_u32_z(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_z))) -svuint64_t svhsubr_n_u64_z(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_z))) -svuint16_t svhsubr_n_u16_z(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_m))) -svint8_t svhsubr_s8_m(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_m))) -svint32_t svhsubr_s32_m(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_m))) -svint64_t svhsubr_s64_m(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_m))) -svint16_t svhsubr_s16_m(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_x))) -svint8_t svhsubr_s8_x(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_x))) -svint32_t svhsubr_s32_x(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_x))) -svint64_t svhsubr_s64_x(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_x))) -svint16_t svhsubr_s16_x(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_z))) -svint8_t svhsubr_s8_z(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_z))) -svint32_t svhsubr_s32_z(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_z))) -svint64_t svhsubr_s64_z(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_z))) -svint16_t svhsubr_s16_z(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_m))) -svuint8_t svhsubr_u8_m(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_m))) -svuint32_t svhsubr_u32_m(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_m))) -svuint64_t svhsubr_u64_m(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_m))) -svuint16_t svhsubr_u16_m(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_x))) -svuint8_t svhsubr_u8_x(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_x))) -svuint32_t svhsubr_u32_x(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_x))) -svuint64_t svhsubr_u64_x(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_x))) -svuint16_t svhsubr_u16_x(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_z))) -svuint8_t svhsubr_u8_z(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_z))) -svuint32_t svhsubr_u32_z(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_z))) -svuint64_t svhsubr_u64_z(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_z))) -svuint16_t svhsubr_u16_z(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_u32))) -svuint32_t svldnt1_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_u64))) -svuint64_t svldnt1_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_f64))) -svfloat64_t svldnt1_gather_u64base_index_f64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_f32))) -svfloat32_t svldnt1_gather_u32base_index_f32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_s32))) -svint32_t svldnt1_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_s64))) -svint64_t svldnt1_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_u32))) -svuint32_t svldnt1_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_u64))) -svuint64_t svldnt1_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_f64))) -svfloat64_t svldnt1_gather_u64base_offset_f64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_f32))) -svfloat32_t svldnt1_gather_u32base_offset_f32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_s32))) -svint32_t svldnt1_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_s64))) -svint64_t svldnt1_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_u32))) -svuint32_t svldnt1_gather_u32base_u32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_u64))) -svuint64_t svldnt1_gather_u64base_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_f64))) -svfloat64_t svldnt1_gather_u64base_f64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_f32))) -svfloat32_t svldnt1_gather_u32base_f32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_s32))) -svint32_t svldnt1_gather_u32base_s32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_s64))) -svint64_t svldnt1_gather_u64base_s64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_u64))) -svuint64_t svldnt1_gather_s64index_u64(svbool_t, uint64_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_f64))) -svfloat64_t svldnt1_gather_s64index_f64(svbool_t, float64_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_s64))) -svint64_t svldnt1_gather_s64index_s64(svbool_t, int64_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_u64))) -svuint64_t svldnt1_gather_u64index_u64(svbool_t, uint64_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_f64))) -svfloat64_t svldnt1_gather_u64index_f64(svbool_t, float64_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_s64))) -svint64_t svldnt1_gather_u64index_s64(svbool_t, int64_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_u32))) -svuint32_t svldnt1_gather_u32offset_u32(svbool_t, uint32_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_f32))) -svfloat32_t svldnt1_gather_u32offset_f32(svbool_t, float32_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_s32))) -svint32_t svldnt1_gather_u32offset_s32(svbool_t, int32_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_u64))) -svuint64_t svldnt1_gather_s64offset_u64(svbool_t, uint64_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_f64))) -svfloat64_t svldnt1_gather_s64offset_f64(svbool_t, float64_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_s64))) -svint64_t svldnt1_gather_s64offset_s64(svbool_t, int64_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_u64))) -svuint64_t svldnt1_gather_u64offset_u64(svbool_t, uint64_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_f64))) -svfloat64_t svldnt1_gather_u64offset_f64(svbool_t, float64_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_s64))) -svint64_t svldnt1_gather_u64offset_s64(svbool_t, int64_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_offset_u32))) -svuint32_t svldnt1sb_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_offset_u64))) -svuint64_t svldnt1sb_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_offset_s32))) -svint32_t svldnt1sb_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_offset_s64))) -svint64_t svldnt1sb_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_u32))) -svuint32_t svldnt1sb_gather_u32base_u32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_u64))) -svuint64_t svldnt1sb_gather_u64base_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_s32))) -svint32_t svldnt1sb_gather_u32base_s32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_s64))) -svint64_t svldnt1sb_gather_u64base_s64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32offset_u32))) -svuint32_t svldnt1sb_gather_u32offset_u32(svbool_t, int8_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32offset_s32))) -svint32_t svldnt1sb_gather_u32offset_s32(svbool_t, int8_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_s64offset_u64))) -svuint64_t svldnt1sb_gather_s64offset_u64(svbool_t, int8_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_s64offset_s64))) -svint64_t svldnt1sb_gather_s64offset_s64(svbool_t, int8_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64offset_u64))) -svuint64_t svldnt1sb_gather_u64offset_u64(svbool_t, int8_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64offset_s64))) -svint64_t svldnt1sb_gather_u64offset_s64(svbool_t, int8_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_index_u32))) -svuint32_t svldnt1sh_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_index_u64))) -svuint64_t svldnt1sh_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_index_s32))) -svint32_t svldnt1sh_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_index_s64))) -svint64_t svldnt1sh_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_offset_u32))) -svuint32_t svldnt1sh_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_offset_u64))) -svuint64_t svldnt1sh_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_offset_s32))) -svint32_t svldnt1sh_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_offset_s64))) -svint64_t svldnt1sh_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_u32))) -svuint32_t svldnt1sh_gather_u32base_u32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_u64))) -svuint64_t svldnt1sh_gather_u64base_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_s32))) -svint32_t svldnt1sh_gather_u32base_s32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_s64))) -svint64_t svldnt1sh_gather_u64base_s64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64index_u64))) -svuint64_t svldnt1sh_gather_s64index_u64(svbool_t, int16_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64index_s64))) -svint64_t svldnt1sh_gather_s64index_s64(svbool_t, int16_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64index_u64))) -svuint64_t svldnt1sh_gather_u64index_u64(svbool_t, int16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64index_s64))) -svint64_t svldnt1sh_gather_u64index_s64(svbool_t, int16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32offset_u32))) -svuint32_t svldnt1sh_gather_u32offset_u32(svbool_t, int16_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32offset_s32))) -svint32_t svldnt1sh_gather_u32offset_s32(svbool_t, int16_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64offset_u64))) -svuint64_t svldnt1sh_gather_s64offset_u64(svbool_t, int16_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64offset_s64))) -svint64_t svldnt1sh_gather_s64offset_s64(svbool_t, int16_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64offset_u64))) -svuint64_t svldnt1sh_gather_u64offset_u64(svbool_t, int16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64offset_s64))) -svint64_t svldnt1sh_gather_u64offset_s64(svbool_t, int16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_index_u64))) -svuint64_t svldnt1sw_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_index_s64))) -svint64_t svldnt1sw_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_offset_u64))) -svuint64_t svldnt1sw_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_offset_s64))) -svint64_t svldnt1sw_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_u64))) -svuint64_t svldnt1sw_gather_u64base_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_s64))) -svint64_t svldnt1sw_gather_u64base_s64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64index_u64))) -svuint64_t svldnt1sw_gather_s64index_u64(svbool_t, int32_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64index_s64))) -svint64_t svldnt1sw_gather_s64index_s64(svbool_t, int32_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64index_u64))) -svuint64_t svldnt1sw_gather_u64index_u64(svbool_t, int32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64index_s64))) -svint64_t svldnt1sw_gather_u64index_s64(svbool_t, int32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64offset_u64))) -svuint64_t svldnt1sw_gather_s64offset_u64(svbool_t, int32_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64offset_s64))) -svint64_t svldnt1sw_gather_s64offset_s64(svbool_t, int32_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64offset_u64))) -svuint64_t svldnt1sw_gather_u64offset_u64(svbool_t, int32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64offset_s64))) -svint64_t svldnt1sw_gather_u64offset_s64(svbool_t, int32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_offset_u32))) -svuint32_t svldnt1ub_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_offset_u64))) -svuint64_t svldnt1ub_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_offset_s32))) -svint32_t svldnt1ub_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_offset_s64))) -svint64_t svldnt1ub_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_u32))) -svuint32_t svldnt1ub_gather_u32base_u32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_u64))) -svuint64_t svldnt1ub_gather_u64base_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_s32))) -svint32_t svldnt1ub_gather_u32base_s32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_s64))) -svint64_t svldnt1ub_gather_u64base_s64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32offset_u32))) -svuint32_t svldnt1ub_gather_u32offset_u32(svbool_t, uint8_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32offset_s32))) -svint32_t svldnt1ub_gather_u32offset_s32(svbool_t, uint8_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_s64offset_u64))) -svuint64_t svldnt1ub_gather_s64offset_u64(svbool_t, uint8_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_s64offset_s64))) -svint64_t svldnt1ub_gather_s64offset_s64(svbool_t, uint8_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64offset_u64))) -svuint64_t svldnt1ub_gather_u64offset_u64(svbool_t, uint8_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64offset_s64))) -svint64_t svldnt1ub_gather_u64offset_s64(svbool_t, uint8_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_index_u32))) -svuint32_t svldnt1uh_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_index_u64))) -svuint64_t svldnt1uh_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_index_s32))) -svint32_t svldnt1uh_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_index_s64))) -svint64_t svldnt1uh_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_offset_u32))) -svuint32_t svldnt1uh_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_offset_u64))) -svuint64_t svldnt1uh_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_offset_s32))) -svint32_t svldnt1uh_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_offset_s64))) -svint64_t svldnt1uh_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_u32))) -svuint32_t svldnt1uh_gather_u32base_u32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_u64))) -svuint64_t svldnt1uh_gather_u64base_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_s32))) -svint32_t svldnt1uh_gather_u32base_s32(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_s64))) -svint64_t svldnt1uh_gather_u64base_s64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64index_u64))) -svuint64_t svldnt1uh_gather_s64index_u64(svbool_t, uint16_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64index_s64))) -svint64_t svldnt1uh_gather_s64index_s64(svbool_t, uint16_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64index_u64))) -svuint64_t svldnt1uh_gather_u64index_u64(svbool_t, uint16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64index_s64))) -svint64_t svldnt1uh_gather_u64index_s64(svbool_t, uint16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32offset_u32))) -svuint32_t svldnt1uh_gather_u32offset_u32(svbool_t, uint16_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32offset_s32))) -svint32_t svldnt1uh_gather_u32offset_s32(svbool_t, uint16_t const *, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64offset_u64))) -svuint64_t svldnt1uh_gather_s64offset_u64(svbool_t, uint16_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64offset_s64))) -svint64_t svldnt1uh_gather_s64offset_s64(svbool_t, uint16_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64offset_u64))) -svuint64_t svldnt1uh_gather_u64offset_u64(svbool_t, uint16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64offset_s64))) -svint64_t svldnt1uh_gather_u64offset_s64(svbool_t, uint16_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_index_u64))) -svuint64_t svldnt1uw_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_index_s64))) -svint64_t svldnt1uw_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_offset_u64))) -svuint64_t svldnt1uw_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_offset_s64))) -svint64_t svldnt1uw_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_u64))) -svuint64_t svldnt1uw_gather_u64base_u64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_s64))) -svint64_t svldnt1uw_gather_u64base_s64(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64index_u64))) -svuint64_t svldnt1uw_gather_s64index_u64(svbool_t, uint32_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64index_s64))) -svint64_t svldnt1uw_gather_s64index_s64(svbool_t, uint32_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64index_u64))) -svuint64_t svldnt1uw_gather_u64index_u64(svbool_t, uint32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64index_s64))) -svint64_t svldnt1uw_gather_u64index_s64(svbool_t, uint32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64offset_u64))) -svuint64_t svldnt1uw_gather_s64offset_u64(svbool_t, uint32_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64offset_s64))) -svint64_t svldnt1uw_gather_s64offset_s64(svbool_t, uint32_t const *, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64offset_u64))) -svuint64_t svldnt1uw_gather_u64offset_u64(svbool_t, uint32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64offset_s64))) -svint64_t svldnt1uw_gather_u64offset_s64(svbool_t, uint32_t const *, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_m))) -svint64_t svlogb_f64_m(svint64_t, svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_m))) -svint32_t svlogb_f32_m(svint32_t, svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_m))) -svint16_t svlogb_f16_m(svint16_t, svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_x))) -svint64_t svlogb_f64_x(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_x))) -svint32_t svlogb_f32_x(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_x))) -svint16_t svlogb_f16_x(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_z))) -svint64_t svlogb_f64_z(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_z))) -svint32_t svlogb_f32_z(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_z))) -svint16_t svlogb_f16_z(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_u8))) -svbool_t svmatch_u8(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_u16))) -svbool_t svmatch_u16(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_s8))) -svbool_t svmatch_s8(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_s16))) -svbool_t svmatch_s16(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f64_m))) -svfloat64_t svmaxnmp_f64_m(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f32_m))) -svfloat32_t svmaxnmp_f32_m(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f16_m))) -svfloat16_t svmaxnmp_f16_m(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f64_x))) -svfloat64_t svmaxnmp_f64_x(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f32_x))) -svfloat32_t svmaxnmp_f32_x(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f16_x))) -svfloat16_t svmaxnmp_f16_x(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f64_m))) -svfloat64_t svmaxp_f64_m(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f32_m))) -svfloat32_t svmaxp_f32_m(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f16_m))) -svfloat16_t svmaxp_f16_m(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f64_x))) -svfloat64_t svmaxp_f64_x(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f32_x))) -svfloat32_t svmaxp_f32_x(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f16_x))) -svfloat16_t svmaxp_f16_x(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s8_m))) -svint8_t svmaxp_s8_m(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s32_m))) -svint32_t svmaxp_s32_m(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s64_m))) -svint64_t svmaxp_s64_m(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s16_m))) -svint16_t svmaxp_s16_m(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s8_x))) -svint8_t svmaxp_s8_x(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s32_x))) -svint32_t svmaxp_s32_x(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s64_x))) -svint64_t svmaxp_s64_x(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s16_x))) -svint16_t svmaxp_s16_x(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u8_m))) -svuint8_t svmaxp_u8_m(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u32_m))) -svuint32_t svmaxp_u32_m(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u64_m))) -svuint64_t svmaxp_u64_m(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u16_m))) -svuint16_t svmaxp_u16_m(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u8_x))) -svuint8_t svmaxp_u8_x(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u32_x))) -svuint32_t svmaxp_u32_x(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u64_x))) -svuint64_t svmaxp_u64_x(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u16_x))) -svuint16_t svmaxp_u16_x(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f64_m))) -svfloat64_t svminnmp_f64_m(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f32_m))) -svfloat32_t svminnmp_f32_m(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f16_m))) -svfloat16_t svminnmp_f16_m(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f64_x))) -svfloat64_t svminnmp_f64_x(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f32_x))) -svfloat32_t svminnmp_f32_x(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f16_x))) -svfloat16_t svminnmp_f16_x(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f64_m))) -svfloat64_t svminp_f64_m(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f32_m))) -svfloat32_t svminp_f32_m(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f16_m))) -svfloat16_t svminp_f16_m(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f64_x))) -svfloat64_t svminp_f64_x(svbool_t, svfloat64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f32_x))) -svfloat32_t svminp_f32_x(svbool_t, svfloat32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f16_x))) -svfloat16_t svminp_f16_x(svbool_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s8_m))) -svint8_t svminp_s8_m(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s32_m))) -svint32_t svminp_s32_m(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s64_m))) -svint64_t svminp_s64_m(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s16_m))) -svint16_t svminp_s16_m(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s8_x))) -svint8_t svminp_s8_x(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s32_x))) -svint32_t svminp_s32_x(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s64_x))) -svint64_t svminp_s64_x(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s16_x))) -svint16_t svminp_s16_x(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u8_m))) -svuint8_t svminp_u8_m(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u32_m))) -svuint32_t svminp_u32_m(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u64_m))) -svuint64_t svminp_u64_m(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u16_m))) -svuint16_t svminp_u16_m(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u8_x))) -svuint8_t svminp_u8_x(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u32_x))) -svuint32_t svminp_u32_x(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u64_x))) -svuint64_t svminp_u64_x(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u16_x))) -svuint16_t svminp_u16_x(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u32))) -svuint32_t svmla_lane_u32(svuint32_t, svuint32_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u64))) -svuint64_t svmla_lane_u64(svuint64_t, svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u16))) -svuint16_t svmla_lane_u16(svuint16_t, svuint16_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s32))) -svint32_t svmla_lane_s32(svint32_t, svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s64))) -svint64_t svmla_lane_s64(svint64_t, svint64_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s16))) -svint16_t svmla_lane_s16(svint16_t, svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_f32))) -svfloat32_t svmlalb_n_f32(svfloat32_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s32))) -svint32_t svmlalb_n_s32(svint32_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s64))) -svint64_t svmlalb_n_s64(svint64_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s16))) -svint16_t svmlalb_n_s16(svint16_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u32))) -svuint32_t svmlalb_n_u32(svuint32_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u64))) -svuint64_t svmlalb_n_u64(svuint64_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u16))) -svuint16_t svmlalb_n_u16(svuint16_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_f32))) -svfloat32_t svmlalb_f32(svfloat32_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s32))) -svint32_t svmlalb_s32(svint32_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s64))) -svint64_t svmlalb_s64(svint64_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s16))) -svint16_t svmlalb_s16(svint16_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u32))) -svuint32_t svmlalb_u32(svuint32_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u64))) -svuint64_t svmlalb_u64(svuint64_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u16))) -svuint16_t svmlalb_u16(svuint16_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_f32))) -svfloat32_t svmlalb_lane_f32(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_s32))) -svint32_t svmlalb_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_s64))) -svint64_t svmlalb_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_u32))) -svuint32_t svmlalb_lane_u32(svuint32_t, svuint16_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_u64))) -svuint64_t svmlalb_lane_u64(svuint64_t, svuint32_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_f32))) -svfloat32_t svmlalt_n_f32(svfloat32_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s32))) -svint32_t svmlalt_n_s32(svint32_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s64))) -svint64_t svmlalt_n_s64(svint64_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s16))) -svint16_t svmlalt_n_s16(svint16_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u32))) -svuint32_t svmlalt_n_u32(svuint32_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u64))) -svuint64_t svmlalt_n_u64(svuint64_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u16))) -svuint16_t svmlalt_n_u16(svuint16_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_f32))) -svfloat32_t svmlalt_f32(svfloat32_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s32))) -svint32_t svmlalt_s32(svint32_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s64))) -svint64_t svmlalt_s64(svint64_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s16))) -svint16_t svmlalt_s16(svint16_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u32))) -svuint32_t svmlalt_u32(svuint32_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u64))) -svuint64_t svmlalt_u64(svuint64_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u16))) -svuint16_t svmlalt_u16(svuint16_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_f32))) -svfloat32_t svmlalt_lane_f32(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_s32))) -svint32_t svmlalt_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_s64))) -svint64_t svmlalt_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_u32))) -svuint32_t svmlalt_lane_u32(svuint32_t, svuint16_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_u64))) -svuint64_t svmlalt_lane_u64(svuint64_t, svuint32_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u32))) -svuint32_t svmls_lane_u32(svuint32_t, svuint32_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u64))) -svuint64_t svmls_lane_u64(svuint64_t, svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u16))) -svuint16_t svmls_lane_u16(svuint16_t, svuint16_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s32))) -svint32_t svmls_lane_s32(svint32_t, svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s64))) -svint64_t svmls_lane_s64(svint64_t, svint64_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s16))) -svint16_t svmls_lane_s16(svint16_t, svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_f32))) -svfloat32_t svmlslb_n_f32(svfloat32_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s32))) -svint32_t svmlslb_n_s32(svint32_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s64))) -svint64_t svmlslb_n_s64(svint64_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s16))) -svint16_t svmlslb_n_s16(svint16_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u32))) -svuint32_t svmlslb_n_u32(svuint32_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u64))) -svuint64_t svmlslb_n_u64(svuint64_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u16))) -svuint16_t svmlslb_n_u16(svuint16_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_f32))) -svfloat32_t svmlslb_f32(svfloat32_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s32))) -svint32_t svmlslb_s32(svint32_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s64))) -svint64_t svmlslb_s64(svint64_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s16))) -svint16_t svmlslb_s16(svint16_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u32))) -svuint32_t svmlslb_u32(svuint32_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u64))) -svuint64_t svmlslb_u64(svuint64_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u16))) -svuint16_t svmlslb_u16(svuint16_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_f32))) -svfloat32_t svmlslb_lane_f32(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_s32))) -svint32_t svmlslb_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_s64))) -svint64_t svmlslb_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_u32))) -svuint32_t svmlslb_lane_u32(svuint32_t, svuint16_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_u64))) -svuint64_t svmlslb_lane_u64(svuint64_t, svuint32_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_f32))) -svfloat32_t svmlslt_n_f32(svfloat32_t, svfloat16_t, float16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s32))) -svint32_t svmlslt_n_s32(svint32_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s64))) -svint64_t svmlslt_n_s64(svint64_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s16))) -svint16_t svmlslt_n_s16(svint16_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u32))) -svuint32_t svmlslt_n_u32(svuint32_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u64))) -svuint64_t svmlslt_n_u64(svuint64_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u16))) -svuint16_t svmlslt_n_u16(svuint16_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_f32))) -svfloat32_t svmlslt_f32(svfloat32_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s32))) -svint32_t svmlslt_s32(svint32_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s64))) -svint64_t svmlslt_s64(svint64_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s16))) -svint16_t svmlslt_s16(svint16_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u32))) -svuint32_t svmlslt_u32(svuint32_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u64))) -svuint64_t svmlslt_u64(svuint64_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u16))) -svuint16_t svmlslt_u16(svuint16_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_f32))) -svfloat32_t svmlslt_lane_f32(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_s32))) -svint32_t svmlslt_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_s64))) -svint64_t svmlslt_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_u32))) -svuint32_t svmlslt_lane_u32(svuint32_t, svuint16_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_u64))) -svuint64_t svmlslt_lane_u64(svuint64_t, svuint32_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s32))) -svint32_t svmovlb_s32(svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s64))) -svint64_t svmovlb_s64(svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s16))) -svint16_t svmovlb_s16(svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u32))) -svuint32_t svmovlb_u32(svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u64))) -svuint64_t svmovlb_u64(svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u16))) -svuint16_t svmovlb_u16(svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s32))) -svint32_t svmovlt_s32(svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s64))) -svint64_t svmovlt_s64(svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s16))) -svint16_t svmovlt_s16(svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u32))) -svuint32_t svmovlt_u32(svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u64))) -svuint64_t svmovlt_u64(svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u16))) -svuint16_t svmovlt_u16(svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u32))) -svuint32_t svmul_lane_u32(svuint32_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u64))) -svuint64_t svmul_lane_u64(svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u16))) -svuint16_t svmul_lane_u16(svuint16_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s32))) -svint32_t svmul_lane_s32(svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s64))) -svint64_t svmul_lane_s64(svint64_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s16))) -svint16_t svmul_lane_s16(svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s32))) -svint32_t svmullb_n_s32(svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s64))) -svint64_t svmullb_n_s64(svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s16))) -svint16_t svmullb_n_s16(svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u32))) -svuint32_t svmullb_n_u32(svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u64))) -svuint64_t svmullb_n_u64(svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u16))) -svuint16_t svmullb_n_u16(svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s32))) -svint32_t svmullb_s32(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s64))) -svint64_t svmullb_s64(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s16))) -svint16_t svmullb_s16(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u32))) -svuint32_t svmullb_u32(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u64))) -svuint64_t svmullb_u64(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u16))) -svuint16_t svmullb_u16(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_s32))) -svint32_t svmullb_lane_s32(svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_s64))) -svint64_t svmullb_lane_s64(svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_u32))) -svuint32_t svmullb_lane_u32(svuint16_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_u64))) -svuint64_t svmullb_lane_u64(svuint32_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s32))) -svint32_t svmullt_n_s32(svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s64))) -svint64_t svmullt_n_s64(svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s16))) -svint16_t svmullt_n_s16(svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u32))) -svuint32_t svmullt_n_u32(svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u64))) -svuint64_t svmullt_n_u64(svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u16))) -svuint16_t svmullt_n_u16(svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s32))) -svint32_t svmullt_s32(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s64))) -svint64_t svmullt_s64(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s16))) -svint16_t svmullt_s16(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u32))) -svuint32_t svmullt_u32(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u64))) -svuint64_t svmullt_u64(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u16))) -svuint16_t svmullt_u16(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_s32))) -svint32_t svmullt_lane_s32(svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_s64))) -svint64_t svmullt_lane_s64(svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_u32))) -svuint32_t svmullt_lane_u32(svuint16_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_u64))) -svuint64_t svmullt_lane_u64(svuint32_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u8))) -svuint8_t svnbsl_n_u8(svuint8_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u32))) -svuint32_t svnbsl_n_u32(svuint32_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u64))) -svuint64_t svnbsl_n_u64(svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u16))) -svuint16_t svnbsl_n_u16(svuint16_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s8))) -svint8_t svnbsl_n_s8(svint8_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s32))) -svint32_t svnbsl_n_s32(svint32_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s64))) -svint64_t svnbsl_n_s64(svint64_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s16))) -svint16_t svnbsl_n_s16(svint16_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u8))) -svuint8_t svnbsl_u8(svuint8_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u32))) -svuint32_t svnbsl_u32(svuint32_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u64))) -svuint64_t svnbsl_u64(svuint64_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u16))) -svuint16_t svnbsl_u16(svuint16_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s8))) -svint8_t svnbsl_s8(svint8_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s32))) -svint32_t svnbsl_s32(svint32_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s64))) -svint64_t svnbsl_s64(svint64_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s16))) -svint16_t svnbsl_s16(svint16_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_u8))) -svbool_t svnmatch_u8(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_u16))) -svbool_t svnmatch_u16(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_s8))) -svbool_t svnmatch_s8(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_s16))) -svbool_t svnmatch_s16(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmul_n_u8))) -svuint8_t svpmul_n_u8(svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmul_u8))) -svuint8_t svpmul_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_n_u64))) -svuint64_t svpmullb_n_u64(svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_n_u16))) -svuint16_t svpmullb_n_u16(svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_u64))) -svuint64_t svpmullb_u64(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_u16))) -svuint16_t svpmullb_u16(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u8))) -svuint8_t svpmullb_pair_n_u8(svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u32))) -svuint32_t svpmullb_pair_n_u32(svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u8))) -svuint8_t svpmullb_pair_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u32))) -svuint32_t svpmullb_pair_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_n_u64))) -svuint64_t svpmullt_n_u64(svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_n_u16))) -svuint16_t svpmullt_n_u16(svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_u64))) -svuint64_t svpmullt_u64(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_u16))) -svuint16_t svpmullt_u16(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u8))) -svuint8_t svpmullt_pair_n_u8(svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u32))) -svuint32_t svpmullt_pair_n_u32(svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u8))) -svuint8_t svpmullt_pair_u8(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u32))) -svuint32_t svpmullt_pair_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_m))) -svint8_t svqabs_s8_m(svint8_t, svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_m))) -svint32_t svqabs_s32_m(svint32_t, svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_m))) -svint64_t svqabs_s64_m(svint64_t, svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_m))) -svint16_t svqabs_s16_m(svint16_t, svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_x))) -svint8_t svqabs_s8_x(svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_x))) -svint32_t svqabs_s32_x(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_x))) -svint64_t svqabs_s64_x(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_x))) -svint16_t svqabs_s16_x(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_z))) -svint8_t svqabs_s8_z(svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_z))) -svint32_t svqabs_s32_z(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_z))) -svint64_t svqabs_s64_z(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_z))) -svint16_t svqabs_s16_z(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_m))) -svint8_t svqadd_n_s8_m(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_m))) -svint32_t svqadd_n_s32_m(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_m))) -svint64_t svqadd_n_s64_m(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_m))) -svint16_t svqadd_n_s16_m(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_x))) -svint8_t svqadd_n_s8_x(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_x))) -svint32_t svqadd_n_s32_x(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_x))) -svint64_t svqadd_n_s64_x(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_x))) -svint16_t svqadd_n_s16_x(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_z))) -svint8_t svqadd_n_s8_z(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_z))) -svint32_t svqadd_n_s32_z(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_z))) -svint64_t svqadd_n_s64_z(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_z))) -svint16_t svqadd_n_s16_z(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_m))) -svuint8_t svqadd_n_u8_m(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_m))) -svuint32_t svqadd_n_u32_m(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_m))) -svuint64_t svqadd_n_u64_m(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_m))) -svuint16_t svqadd_n_u16_m(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_x))) -svuint8_t svqadd_n_u8_x(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_x))) -svuint32_t svqadd_n_u32_x(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_x))) -svuint64_t svqadd_n_u64_x(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_x))) -svuint16_t svqadd_n_u16_x(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_z))) -svuint8_t svqadd_n_u8_z(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_z))) -svuint32_t svqadd_n_u32_z(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_z))) -svuint64_t svqadd_n_u64_z(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_z))) -svuint16_t svqadd_n_u16_z(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_m))) -svint8_t svqadd_s8_m(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_m))) -svint32_t svqadd_s32_m(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_m))) -svint64_t svqadd_s64_m(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_m))) -svint16_t svqadd_s16_m(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_x))) -svint8_t svqadd_s8_x(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_x))) -svint32_t svqadd_s32_x(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_x))) -svint64_t svqadd_s64_x(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_x))) -svint16_t svqadd_s16_x(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_z))) -svint8_t svqadd_s8_z(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_z))) -svint32_t svqadd_s32_z(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_z))) -svint64_t svqadd_s64_z(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_z))) -svint16_t svqadd_s16_z(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_m))) -svuint8_t svqadd_u8_m(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_m))) -svuint32_t svqadd_u32_m(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_m))) -svuint64_t svqadd_u64_m(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_m))) -svuint16_t svqadd_u16_m(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_x))) -svuint8_t svqadd_u8_x(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_x))) -svuint32_t svqadd_u32_x(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_x))) -svuint64_t svqadd_u64_x(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_x))) -svuint16_t svqadd_u16_x(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_z))) -svuint8_t svqadd_u8_z(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_z))) -svuint32_t svqadd_u32_z(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_z))) -svuint64_t svqadd_u64_z(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_z))) -svuint16_t svqadd_u16_z(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s8))) -svint8_t svqcadd_s8(svint8_t, svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s32))) -svint32_t svqcadd_s32(svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s64))) -svint64_t svqcadd_s64(svint64_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s16))) -svint16_t svqcadd_s16(svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s32))) -svint32_t svqdmlalb_n_s32(svint32_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s64))) -svint64_t svqdmlalb_n_s64(svint64_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s16))) -svint16_t svqdmlalb_n_s16(svint16_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s32))) -svint32_t svqdmlalb_s32(svint32_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s64))) -svint64_t svqdmlalb_s64(svint64_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s16))) -svint16_t svqdmlalb_s16(svint16_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_lane_s32))) -svint32_t svqdmlalb_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_lane_s64))) -svint64_t svqdmlalb_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s32))) -svint32_t svqdmlalbt_n_s32(svint32_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s64))) -svint64_t svqdmlalbt_n_s64(svint64_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s16))) -svint16_t svqdmlalbt_n_s16(svint16_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s32))) -svint32_t svqdmlalbt_s32(svint32_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s64))) -svint64_t svqdmlalbt_s64(svint64_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s16))) -svint16_t svqdmlalbt_s16(svint16_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s32))) -svint32_t svqdmlalt_n_s32(svint32_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s64))) -svint64_t svqdmlalt_n_s64(svint64_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s16))) -svint16_t svqdmlalt_n_s16(svint16_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s32))) -svint32_t svqdmlalt_s32(svint32_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s64))) -svint64_t svqdmlalt_s64(svint64_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s16))) -svint16_t svqdmlalt_s16(svint16_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_lane_s32))) -svint32_t svqdmlalt_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_lane_s64))) -svint64_t svqdmlalt_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s32))) -svint32_t svqdmlslb_n_s32(svint32_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s64))) -svint64_t svqdmlslb_n_s64(svint64_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s16))) -svint16_t svqdmlslb_n_s16(svint16_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s32))) -svint32_t svqdmlslb_s32(svint32_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s64))) -svint64_t svqdmlslb_s64(svint64_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s16))) -svint16_t svqdmlslb_s16(svint16_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_lane_s32))) -svint32_t svqdmlslb_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_lane_s64))) -svint64_t svqdmlslb_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s32))) -svint32_t svqdmlslbt_n_s32(svint32_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s64))) -svint64_t svqdmlslbt_n_s64(svint64_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s16))) -svint16_t svqdmlslbt_n_s16(svint16_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s32))) -svint32_t svqdmlslbt_s32(svint32_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s64))) -svint64_t svqdmlslbt_s64(svint64_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s16))) -svint16_t svqdmlslbt_s16(svint16_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s32))) -svint32_t svqdmlslt_n_s32(svint32_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s64))) -svint64_t svqdmlslt_n_s64(svint64_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s16))) -svint16_t svqdmlslt_n_s16(svint16_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s32))) -svint32_t svqdmlslt_s32(svint32_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s64))) -svint64_t svqdmlslt_s64(svint64_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s16))) -svint16_t svqdmlslt_s16(svint16_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_lane_s32))) -svint32_t svqdmlslt_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_lane_s64))) -svint64_t svqdmlslt_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s8))) -svint8_t svqdmulh_n_s8(svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s32))) -svint32_t svqdmulh_n_s32(svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s64))) -svint64_t svqdmulh_n_s64(svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s16))) -svint16_t svqdmulh_n_s16(svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s8))) -svint8_t svqdmulh_s8(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s32))) -svint32_t svqdmulh_s32(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s64))) -svint64_t svqdmulh_s64(svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s16))) -svint16_t svqdmulh_s16(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s32))) -svint32_t svqdmulh_lane_s32(svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s64))) -svint64_t svqdmulh_lane_s64(svint64_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s16))) -svint16_t svqdmulh_lane_s16(svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s32))) -svint32_t svqdmullb_n_s32(svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s64))) -svint64_t svqdmullb_n_s64(svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s16))) -svint16_t svqdmullb_n_s16(svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s32))) -svint32_t svqdmullb_s32(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s64))) -svint64_t svqdmullb_s64(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s16))) -svint16_t svqdmullb_s16(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_lane_s32))) -svint32_t svqdmullb_lane_s32(svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_lane_s64))) -svint64_t svqdmullb_lane_s64(svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s32))) -svint32_t svqdmullt_n_s32(svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s64))) -svint64_t svqdmullt_n_s64(svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s16))) -svint16_t svqdmullt_n_s16(svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s32))) -svint32_t svqdmullt_s32(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s64))) -svint64_t svqdmullt_s64(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s16))) -svint16_t svqdmullt_s16(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_lane_s32))) -svint32_t svqdmullt_lane_s32(svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_lane_s64))) -svint64_t svqdmullt_lane_s64(svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_m))) -svint8_t svqneg_s8_m(svint8_t, svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_m))) -svint32_t svqneg_s32_m(svint32_t, svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_m))) -svint64_t svqneg_s64_m(svint64_t, svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_m))) -svint16_t svqneg_s16_m(svint16_t, svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_x))) -svint8_t svqneg_s8_x(svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_x))) -svint32_t svqneg_s32_x(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_x))) -svint64_t svqneg_s64_x(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_x))) -svint16_t svqneg_s16_x(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_z))) -svint8_t svqneg_s8_z(svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_z))) -svint32_t svqneg_s32_z(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_z))) -svint64_t svqneg_s64_z(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_z))) -svint16_t svqneg_s16_z(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s8))) -svint8_t svqrdcmlah_s8(svint8_t, svint8_t, svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s32))) -svint32_t svqrdcmlah_s32(svint32_t, svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s64))) -svint64_t svqrdcmlah_s64(svint64_t, svint64_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s16))) -svint16_t svqrdcmlah_s16(svint16_t, svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_lane_s32))) -svint32_t svqrdcmlah_lane_s32(svint32_t, svint32_t, svint32_t, uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_lane_s16))) -svint16_t svqrdcmlah_lane_s16(svint16_t, svint16_t, svint16_t, uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s8))) -svint8_t svqrdmlah_n_s8(svint8_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s32))) -svint32_t svqrdmlah_n_s32(svint32_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s64))) -svint64_t svqrdmlah_n_s64(svint64_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s16))) -svint16_t svqrdmlah_n_s16(svint16_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s8))) -svint8_t svqrdmlah_s8(svint8_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s32))) -svint32_t svqrdmlah_s32(svint32_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s64))) -svint64_t svqrdmlah_s64(svint64_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s16))) -svint16_t svqrdmlah_s16(svint16_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s32))) -svint32_t svqrdmlah_lane_s32(svint32_t, svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s64))) -svint64_t svqrdmlah_lane_s64(svint64_t, svint64_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s16))) -svint16_t svqrdmlah_lane_s16(svint16_t, svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s8))) -svint8_t svqrdmlsh_n_s8(svint8_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s32))) -svint32_t svqrdmlsh_n_s32(svint32_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s64))) -svint64_t svqrdmlsh_n_s64(svint64_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s16))) -svint16_t svqrdmlsh_n_s16(svint16_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s8))) -svint8_t svqrdmlsh_s8(svint8_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s32))) -svint32_t svqrdmlsh_s32(svint32_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s64))) -svint64_t svqrdmlsh_s64(svint64_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s16))) -svint16_t svqrdmlsh_s16(svint16_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s32))) -svint32_t svqrdmlsh_lane_s32(svint32_t, svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s64))) -svint64_t svqrdmlsh_lane_s64(svint64_t, svint64_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s16))) -svint16_t svqrdmlsh_lane_s16(svint16_t, svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s8))) -svint8_t svqrdmulh_n_s8(svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s32))) -svint32_t svqrdmulh_n_s32(svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s64))) -svint64_t svqrdmulh_n_s64(svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s16))) -svint16_t svqrdmulh_n_s16(svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s8))) -svint8_t svqrdmulh_s8(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s32))) -svint32_t svqrdmulh_s32(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s64))) -svint64_t svqrdmulh_s64(svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s16))) -svint16_t svqrdmulh_s16(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s32))) -svint32_t svqrdmulh_lane_s32(svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s64))) -svint64_t svqrdmulh_lane_s64(svint64_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s16))) -svint16_t svqrdmulh_lane_s16(svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_m))) -svint8_t svqrshl_n_s8_m(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_m))) -svint32_t svqrshl_n_s32_m(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_m))) -svint64_t svqrshl_n_s64_m(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_m))) -svint16_t svqrshl_n_s16_m(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_x))) -svint8_t svqrshl_n_s8_x(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_x))) -svint32_t svqrshl_n_s32_x(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_x))) -svint64_t svqrshl_n_s64_x(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_x))) -svint16_t svqrshl_n_s16_x(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_z))) -svint8_t svqrshl_n_s8_z(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_z))) -svint32_t svqrshl_n_s32_z(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_z))) -svint64_t svqrshl_n_s64_z(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_z))) -svint16_t svqrshl_n_s16_z(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_m))) -svuint8_t svqrshl_n_u8_m(svbool_t, svuint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_m))) -svuint32_t svqrshl_n_u32_m(svbool_t, svuint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_m))) -svuint64_t svqrshl_n_u64_m(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_m))) -svuint16_t svqrshl_n_u16_m(svbool_t, svuint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_x))) -svuint8_t svqrshl_n_u8_x(svbool_t, svuint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_x))) -svuint32_t svqrshl_n_u32_x(svbool_t, svuint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_x))) -svuint64_t svqrshl_n_u64_x(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_x))) -svuint16_t svqrshl_n_u16_x(svbool_t, svuint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_z))) -svuint8_t svqrshl_n_u8_z(svbool_t, svuint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_z))) -svuint32_t svqrshl_n_u32_z(svbool_t, svuint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_z))) -svuint64_t svqrshl_n_u64_z(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_z))) -svuint16_t svqrshl_n_u16_z(svbool_t, svuint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_m))) -svint8_t svqrshl_s8_m(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_m))) -svint32_t svqrshl_s32_m(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_m))) -svint64_t svqrshl_s64_m(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_m))) -svint16_t svqrshl_s16_m(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_x))) -svint8_t svqrshl_s8_x(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_x))) -svint32_t svqrshl_s32_x(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_x))) -svint64_t svqrshl_s64_x(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_x))) -svint16_t svqrshl_s16_x(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_z))) -svint8_t svqrshl_s8_z(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_z))) -svint32_t svqrshl_s32_z(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_z))) -svint64_t svqrshl_s64_z(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_z))) -svint16_t svqrshl_s16_z(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_m))) -svuint8_t svqrshl_u8_m(svbool_t, svuint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_m))) -svuint32_t svqrshl_u32_m(svbool_t, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_m))) -svuint64_t svqrshl_u64_m(svbool_t, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_m))) -svuint16_t svqrshl_u16_m(svbool_t, svuint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_x))) -svuint8_t svqrshl_u8_x(svbool_t, svuint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_x))) -svuint32_t svqrshl_u32_x(svbool_t, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_x))) -svuint64_t svqrshl_u64_x(svbool_t, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_x))) -svuint16_t svqrshl_u16_x(svbool_t, svuint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_z))) -svuint8_t svqrshl_u8_z(svbool_t, svuint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_z))) -svuint32_t svqrshl_u32_z(svbool_t, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_z))) -svuint64_t svqrshl_u64_z(svbool_t, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_z))) -svuint16_t svqrshl_u16_z(svbool_t, svuint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s32))) -svint16_t svqrshrnb_n_s32(svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s64))) -svint32_t svqrshrnb_n_s64(svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s16))) -svint8_t svqrshrnb_n_s16(svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u32))) -svuint16_t svqrshrnb_n_u32(svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u64))) -svuint32_t svqrshrnb_n_u64(svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u16))) -svuint8_t svqrshrnb_n_u16(svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s32))) -svint16_t svqrshrnt_n_s32(svint16_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s64))) -svint32_t svqrshrnt_n_s64(svint32_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s16))) -svint8_t svqrshrnt_n_s16(svint8_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u32))) -svuint16_t svqrshrnt_n_u32(svuint16_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u64))) -svuint32_t svqrshrnt_n_u64(svuint32_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u16))) -svuint8_t svqrshrnt_n_u16(svuint8_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s32))) -svuint16_t svqrshrunb_n_s32(svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s64))) -svuint32_t svqrshrunb_n_s64(svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s16))) -svuint8_t svqrshrunb_n_s16(svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s32))) -svuint16_t svqrshrunt_n_s32(svuint16_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s64))) -svuint32_t svqrshrunt_n_s64(svuint32_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s16))) -svuint8_t svqrshrunt_n_s16(svuint8_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_m))) -svint8_t svqshl_n_s8_m(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_m))) -svint32_t svqshl_n_s32_m(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_m))) -svint64_t svqshl_n_s64_m(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_m))) -svint16_t svqshl_n_s16_m(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_x))) -svint8_t svqshl_n_s8_x(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_x))) -svint32_t svqshl_n_s32_x(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_x))) -svint64_t svqshl_n_s64_x(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_x))) -svint16_t svqshl_n_s16_x(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_z))) -svint8_t svqshl_n_s8_z(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_z))) -svint32_t svqshl_n_s32_z(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_z))) -svint64_t svqshl_n_s64_z(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_z))) -svint16_t svqshl_n_s16_z(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_m))) -svuint8_t svqshl_n_u8_m(svbool_t, svuint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_m))) -svuint32_t svqshl_n_u32_m(svbool_t, svuint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_m))) -svuint64_t svqshl_n_u64_m(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_m))) -svuint16_t svqshl_n_u16_m(svbool_t, svuint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_x))) -svuint8_t svqshl_n_u8_x(svbool_t, svuint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_x))) -svuint32_t svqshl_n_u32_x(svbool_t, svuint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_x))) -svuint64_t svqshl_n_u64_x(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_x))) -svuint16_t svqshl_n_u16_x(svbool_t, svuint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_z))) -svuint8_t svqshl_n_u8_z(svbool_t, svuint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_z))) -svuint32_t svqshl_n_u32_z(svbool_t, svuint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_z))) -svuint64_t svqshl_n_u64_z(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_z))) -svuint16_t svqshl_n_u16_z(svbool_t, svuint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_m))) -svint8_t svqshl_s8_m(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_m))) -svint32_t svqshl_s32_m(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_m))) -svint64_t svqshl_s64_m(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_m))) -svint16_t svqshl_s16_m(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_x))) -svint8_t svqshl_s8_x(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_x))) -svint32_t svqshl_s32_x(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_x))) -svint64_t svqshl_s64_x(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_x))) -svint16_t svqshl_s16_x(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_z))) -svint8_t svqshl_s8_z(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_z))) -svint32_t svqshl_s32_z(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_z))) -svint64_t svqshl_s64_z(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_z))) -svint16_t svqshl_s16_z(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_m))) -svuint8_t svqshl_u8_m(svbool_t, svuint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_m))) -svuint32_t svqshl_u32_m(svbool_t, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_m))) -svuint64_t svqshl_u64_m(svbool_t, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_m))) -svuint16_t svqshl_u16_m(svbool_t, svuint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_x))) -svuint8_t svqshl_u8_x(svbool_t, svuint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_x))) -svuint32_t svqshl_u32_x(svbool_t, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_x))) -svuint64_t svqshl_u64_x(svbool_t, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_x))) -svuint16_t svqshl_u16_x(svbool_t, svuint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_z))) -svuint8_t svqshl_u8_z(svbool_t, svuint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_z))) -svuint32_t svqshl_u32_z(svbool_t, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_z))) -svuint64_t svqshl_u64_z(svbool_t, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_z))) -svuint16_t svqshl_u16_z(svbool_t, svuint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_m))) -svuint8_t svqshlu_n_s8_m(svbool_t, svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_m))) -svuint32_t svqshlu_n_s32_m(svbool_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_m))) -svuint64_t svqshlu_n_s64_m(svbool_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_m))) -svuint16_t svqshlu_n_s16_m(svbool_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_x))) -svuint8_t svqshlu_n_s8_x(svbool_t, svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_x))) -svuint32_t svqshlu_n_s32_x(svbool_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_x))) -svuint64_t svqshlu_n_s64_x(svbool_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_x))) -svuint16_t svqshlu_n_s16_x(svbool_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_z))) -svuint8_t svqshlu_n_s8_z(svbool_t, svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_z))) -svuint32_t svqshlu_n_s32_z(svbool_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_z))) -svuint64_t svqshlu_n_s64_z(svbool_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_z))) -svuint16_t svqshlu_n_s16_z(svbool_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s32))) -svint16_t svqshrnb_n_s32(svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s64))) -svint32_t svqshrnb_n_s64(svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s16))) -svint8_t svqshrnb_n_s16(svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u32))) -svuint16_t svqshrnb_n_u32(svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u64))) -svuint32_t svqshrnb_n_u64(svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u16))) -svuint8_t svqshrnb_n_u16(svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s32))) -svint16_t svqshrnt_n_s32(svint16_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s64))) -svint32_t svqshrnt_n_s64(svint32_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s16))) -svint8_t svqshrnt_n_s16(svint8_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u32))) -svuint16_t svqshrnt_n_u32(svuint16_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u64))) -svuint32_t svqshrnt_n_u64(svuint32_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u16))) -svuint8_t svqshrnt_n_u16(svuint8_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s32))) -svuint16_t svqshrunb_n_s32(svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s64))) -svuint32_t svqshrunb_n_s64(svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s16))) -svuint8_t svqshrunb_n_s16(svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s32))) -svuint16_t svqshrunt_n_s32(svuint16_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s64))) -svuint32_t svqshrunt_n_s64(svuint32_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s16))) -svuint8_t svqshrunt_n_s16(svuint8_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_m))) -svint8_t svqsub_n_s8_m(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_m))) -svint32_t svqsub_n_s32_m(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_m))) -svint64_t svqsub_n_s64_m(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_m))) -svint16_t svqsub_n_s16_m(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_x))) -svint8_t svqsub_n_s8_x(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_x))) -svint32_t svqsub_n_s32_x(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_x))) -svint64_t svqsub_n_s64_x(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_x))) -svint16_t svqsub_n_s16_x(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_z))) -svint8_t svqsub_n_s8_z(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_z))) -svint32_t svqsub_n_s32_z(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_z))) -svint64_t svqsub_n_s64_z(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_z))) -svint16_t svqsub_n_s16_z(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_m))) -svuint8_t svqsub_n_u8_m(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_m))) -svuint32_t svqsub_n_u32_m(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_m))) -svuint64_t svqsub_n_u64_m(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_m))) -svuint16_t svqsub_n_u16_m(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_x))) -svuint8_t svqsub_n_u8_x(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_x))) -svuint32_t svqsub_n_u32_x(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_x))) -svuint64_t svqsub_n_u64_x(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_x))) -svuint16_t svqsub_n_u16_x(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_z))) -svuint8_t svqsub_n_u8_z(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_z))) -svuint32_t svqsub_n_u32_z(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_z))) -svuint64_t svqsub_n_u64_z(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_z))) -svuint16_t svqsub_n_u16_z(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_m))) -svint8_t svqsub_s8_m(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_m))) -svint32_t svqsub_s32_m(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_m))) -svint64_t svqsub_s64_m(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_m))) -svint16_t svqsub_s16_m(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_x))) -svint8_t svqsub_s8_x(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_x))) -svint32_t svqsub_s32_x(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_x))) -svint64_t svqsub_s64_x(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_x))) -svint16_t svqsub_s16_x(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_z))) -svint8_t svqsub_s8_z(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_z))) -svint32_t svqsub_s32_z(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_z))) -svint64_t svqsub_s64_z(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_z))) -svint16_t svqsub_s16_z(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_m))) -svuint8_t svqsub_u8_m(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_m))) -svuint32_t svqsub_u32_m(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_m))) -svuint64_t svqsub_u64_m(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_m))) -svuint16_t svqsub_u16_m(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_x))) -svuint8_t svqsub_u8_x(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_x))) -svuint32_t svqsub_u32_x(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_x))) -svuint64_t svqsub_u64_x(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_x))) -svuint16_t svqsub_u16_x(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_z))) -svuint8_t svqsub_u8_z(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_z))) -svuint32_t svqsub_u32_z(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_z))) -svuint64_t svqsub_u64_z(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_z))) -svuint16_t svqsub_u16_z(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_m))) -svint8_t svqsubr_n_s8_m(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_m))) -svint32_t svqsubr_n_s32_m(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_m))) -svint64_t svqsubr_n_s64_m(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_m))) -svint16_t svqsubr_n_s16_m(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_x))) -svint8_t svqsubr_n_s8_x(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_x))) -svint32_t svqsubr_n_s32_x(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_x))) -svint64_t svqsubr_n_s64_x(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_x))) -svint16_t svqsubr_n_s16_x(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_z))) -svint8_t svqsubr_n_s8_z(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_z))) -svint32_t svqsubr_n_s32_z(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_z))) -svint64_t svqsubr_n_s64_z(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_z))) -svint16_t svqsubr_n_s16_z(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_m))) -svuint8_t svqsubr_n_u8_m(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_m))) -svuint32_t svqsubr_n_u32_m(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_m))) -svuint64_t svqsubr_n_u64_m(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_m))) -svuint16_t svqsubr_n_u16_m(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_x))) -svuint8_t svqsubr_n_u8_x(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_x))) -svuint32_t svqsubr_n_u32_x(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_x))) -svuint64_t svqsubr_n_u64_x(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_x))) -svuint16_t svqsubr_n_u16_x(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_z))) -svuint8_t svqsubr_n_u8_z(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_z))) -svuint32_t svqsubr_n_u32_z(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_z))) -svuint64_t svqsubr_n_u64_z(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_z))) -svuint16_t svqsubr_n_u16_z(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_m))) -svint8_t svqsubr_s8_m(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_m))) -svint32_t svqsubr_s32_m(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_m))) -svint64_t svqsubr_s64_m(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_m))) -svint16_t svqsubr_s16_m(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_x))) -svint8_t svqsubr_s8_x(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_x))) -svint32_t svqsubr_s32_x(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_x))) -svint64_t svqsubr_s64_x(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_x))) -svint16_t svqsubr_s16_x(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_z))) -svint8_t svqsubr_s8_z(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_z))) -svint32_t svqsubr_s32_z(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_z))) -svint64_t svqsubr_s64_z(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_z))) -svint16_t svqsubr_s16_z(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_m))) -svuint8_t svqsubr_u8_m(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_m))) -svuint32_t svqsubr_u32_m(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_m))) -svuint64_t svqsubr_u64_m(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_m))) -svuint16_t svqsubr_u16_m(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_x))) -svuint8_t svqsubr_u8_x(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_x))) -svuint32_t svqsubr_u32_x(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_x))) -svuint64_t svqsubr_u64_x(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_x))) -svuint16_t svqsubr_u16_x(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_z))) -svuint8_t svqsubr_u8_z(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_z))) -svuint32_t svqsubr_u32_z(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_z))) -svuint64_t svqsubr_u64_z(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_z))) -svuint16_t svqsubr_u16_z(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s32))) -svint16_t svqxtnb_s32(svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s64))) -svint32_t svqxtnb_s64(svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s16))) -svint8_t svqxtnb_s16(svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u32))) -svuint16_t svqxtnb_u32(svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u64))) -svuint32_t svqxtnb_u64(svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u16))) -svuint8_t svqxtnb_u16(svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s32))) -svint16_t svqxtnt_s32(svint16_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s64))) -svint32_t svqxtnt_s64(svint32_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s16))) -svint8_t svqxtnt_s16(svint8_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u32))) -svuint16_t svqxtnt_u32(svuint16_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u64))) -svuint32_t svqxtnt_u64(svuint32_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u16))) -svuint8_t svqxtnt_u16(svuint8_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s32))) -svuint16_t svqxtunb_s32(svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s64))) -svuint32_t svqxtunb_s64(svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s16))) -svuint8_t svqxtunb_s16(svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s32))) -svuint16_t svqxtunt_s32(svuint16_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s64))) -svuint32_t svqxtunt_s64(svuint32_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s16))) -svuint8_t svqxtunt_s16(svuint8_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u32))) -svuint16_t svraddhnb_n_u32(svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u64))) -svuint32_t svraddhnb_n_u64(svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u16))) -svuint8_t svraddhnb_n_u16(svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s32))) -svint16_t svraddhnb_n_s32(svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s64))) -svint32_t svraddhnb_n_s64(svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s16))) -svint8_t svraddhnb_n_s16(svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u32))) -svuint16_t svraddhnb_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u64))) -svuint32_t svraddhnb_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u16))) -svuint8_t svraddhnb_u16(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s32))) -svint16_t svraddhnb_s32(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s64))) -svint32_t svraddhnb_s64(svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s16))) -svint8_t svraddhnb_s16(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u32))) -svuint16_t svraddhnt_n_u32(svuint16_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u64))) -svuint32_t svraddhnt_n_u64(svuint32_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u16))) -svuint8_t svraddhnt_n_u16(svuint8_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s32))) -svint16_t svraddhnt_n_s32(svint16_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s64))) -svint32_t svraddhnt_n_s64(svint32_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s16))) -svint8_t svraddhnt_n_s16(svint8_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u32))) -svuint16_t svraddhnt_u32(svuint16_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u64))) -svuint32_t svraddhnt_u64(svuint32_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u16))) -svuint8_t svraddhnt_u16(svuint8_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s32))) -svint16_t svraddhnt_s32(svint16_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s64))) -svint32_t svraddhnt_s64(svint32_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s16))) -svint8_t svraddhnt_s16(svint8_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_m))) -svuint32_t svrecpe_u32_m(svuint32_t, svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_x))) -svuint32_t svrecpe_u32_x(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_z))) -svuint32_t svrecpe_u32_z(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_m))) -svint8_t svrhadd_n_s8_m(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_m))) -svint32_t svrhadd_n_s32_m(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_m))) -svint64_t svrhadd_n_s64_m(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_m))) -svint16_t svrhadd_n_s16_m(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_x))) -svint8_t svrhadd_n_s8_x(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_x))) -svint32_t svrhadd_n_s32_x(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_x))) -svint64_t svrhadd_n_s64_x(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_x))) -svint16_t svrhadd_n_s16_x(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_z))) -svint8_t svrhadd_n_s8_z(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_z))) -svint32_t svrhadd_n_s32_z(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_z))) -svint64_t svrhadd_n_s64_z(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_z))) -svint16_t svrhadd_n_s16_z(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_m))) -svuint8_t svrhadd_n_u8_m(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_m))) -svuint32_t svrhadd_n_u32_m(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_m))) -svuint64_t svrhadd_n_u64_m(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_m))) -svuint16_t svrhadd_n_u16_m(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_x))) -svuint8_t svrhadd_n_u8_x(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_x))) -svuint32_t svrhadd_n_u32_x(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_x))) -svuint64_t svrhadd_n_u64_x(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_x))) -svuint16_t svrhadd_n_u16_x(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_z))) -svuint8_t svrhadd_n_u8_z(svbool_t, svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_z))) -svuint32_t svrhadd_n_u32_z(svbool_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_z))) -svuint64_t svrhadd_n_u64_z(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_z))) -svuint16_t svrhadd_n_u16_z(svbool_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_m))) -svint8_t svrhadd_s8_m(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_m))) -svint32_t svrhadd_s32_m(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_m))) -svint64_t svrhadd_s64_m(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_m))) -svint16_t svrhadd_s16_m(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_x))) -svint8_t svrhadd_s8_x(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_x))) -svint32_t svrhadd_s32_x(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_x))) -svint64_t svrhadd_s64_x(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_x))) -svint16_t svrhadd_s16_x(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_z))) -svint8_t svrhadd_s8_z(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_z))) -svint32_t svrhadd_s32_z(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_z))) -svint64_t svrhadd_s64_z(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_z))) -svint16_t svrhadd_s16_z(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_m))) -svuint8_t svrhadd_u8_m(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_m))) -svuint32_t svrhadd_u32_m(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_m))) -svuint64_t svrhadd_u64_m(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_m))) -svuint16_t svrhadd_u16_m(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_x))) -svuint8_t svrhadd_u8_x(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_x))) -svuint32_t svrhadd_u32_x(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_x))) -svuint64_t svrhadd_u64_x(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_x))) -svuint16_t svrhadd_u16_x(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_z))) -svuint8_t svrhadd_u8_z(svbool_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_z))) -svuint32_t svrhadd_u32_z(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_z))) -svuint64_t svrhadd_u64_z(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_z))) -svuint16_t svrhadd_u16_z(svbool_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_m))) -svint8_t svrshl_n_s8_m(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_m))) -svint32_t svrshl_n_s32_m(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_m))) -svint64_t svrshl_n_s64_m(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_m))) -svint16_t svrshl_n_s16_m(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_x))) -svint8_t svrshl_n_s8_x(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_x))) -svint32_t svrshl_n_s32_x(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_x))) -svint64_t svrshl_n_s64_x(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_x))) -svint16_t svrshl_n_s16_x(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_z))) -svint8_t svrshl_n_s8_z(svbool_t, svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_z))) -svint32_t svrshl_n_s32_z(svbool_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_z))) -svint64_t svrshl_n_s64_z(svbool_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_z))) -svint16_t svrshl_n_s16_z(svbool_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_m))) -svuint8_t svrshl_n_u8_m(svbool_t, svuint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_m))) -svuint32_t svrshl_n_u32_m(svbool_t, svuint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_m))) -svuint64_t svrshl_n_u64_m(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_m))) -svuint16_t svrshl_n_u16_m(svbool_t, svuint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_x))) -svuint8_t svrshl_n_u8_x(svbool_t, svuint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_x))) -svuint32_t svrshl_n_u32_x(svbool_t, svuint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_x))) -svuint64_t svrshl_n_u64_x(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_x))) -svuint16_t svrshl_n_u16_x(svbool_t, svuint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_z))) -svuint8_t svrshl_n_u8_z(svbool_t, svuint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_z))) -svuint32_t svrshl_n_u32_z(svbool_t, svuint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_z))) -svuint64_t svrshl_n_u64_z(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_z))) -svuint16_t svrshl_n_u16_z(svbool_t, svuint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_m))) -svint8_t svrshl_s8_m(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_m))) -svint32_t svrshl_s32_m(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_m))) -svint64_t svrshl_s64_m(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_m))) -svint16_t svrshl_s16_m(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_x))) -svint8_t svrshl_s8_x(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_x))) -svint32_t svrshl_s32_x(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_x))) -svint64_t svrshl_s64_x(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_x))) -svint16_t svrshl_s16_x(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_z))) -svint8_t svrshl_s8_z(svbool_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_z))) -svint32_t svrshl_s32_z(svbool_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_z))) -svint64_t svrshl_s64_z(svbool_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_z))) -svint16_t svrshl_s16_z(svbool_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_m))) -svuint8_t svrshl_u8_m(svbool_t, svuint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_m))) -svuint32_t svrshl_u32_m(svbool_t, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_m))) -svuint64_t svrshl_u64_m(svbool_t, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_m))) -svuint16_t svrshl_u16_m(svbool_t, svuint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_x))) -svuint8_t svrshl_u8_x(svbool_t, svuint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_x))) -svuint32_t svrshl_u32_x(svbool_t, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_x))) -svuint64_t svrshl_u64_x(svbool_t, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_x))) -svuint16_t svrshl_u16_x(svbool_t, svuint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_z))) -svuint8_t svrshl_u8_z(svbool_t, svuint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_z))) -svuint32_t svrshl_u32_z(svbool_t, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_z))) -svuint64_t svrshl_u64_z(svbool_t, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_z))) -svuint16_t svrshl_u16_z(svbool_t, svuint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_m))) -svint8_t svrshr_n_s8_m(svbool_t, svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_m))) -svint32_t svrshr_n_s32_m(svbool_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_m))) -svint64_t svrshr_n_s64_m(svbool_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_m))) -svint16_t svrshr_n_s16_m(svbool_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_m))) -svuint8_t svrshr_n_u8_m(svbool_t, svuint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_m))) -svuint32_t svrshr_n_u32_m(svbool_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_m))) -svuint64_t svrshr_n_u64_m(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_m))) -svuint16_t svrshr_n_u16_m(svbool_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_x))) -svint8_t svrshr_n_s8_x(svbool_t, svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_x))) -svint32_t svrshr_n_s32_x(svbool_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_x))) -svint64_t svrshr_n_s64_x(svbool_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_x))) -svint16_t svrshr_n_s16_x(svbool_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_x))) -svuint8_t svrshr_n_u8_x(svbool_t, svuint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_x))) -svuint32_t svrshr_n_u32_x(svbool_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_x))) -svuint64_t svrshr_n_u64_x(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_x))) -svuint16_t svrshr_n_u16_x(svbool_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_z))) -svint8_t svrshr_n_s8_z(svbool_t, svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_z))) -svint32_t svrshr_n_s32_z(svbool_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_z))) -svint64_t svrshr_n_s64_z(svbool_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_z))) -svint16_t svrshr_n_s16_z(svbool_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_z))) -svuint8_t svrshr_n_u8_z(svbool_t, svuint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_z))) -svuint32_t svrshr_n_u32_z(svbool_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_z))) -svuint64_t svrshr_n_u64_z(svbool_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_z))) -svuint16_t svrshr_n_u16_z(svbool_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u32))) -svuint16_t svrshrnb_n_u32(svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u64))) -svuint32_t svrshrnb_n_u64(svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u16))) -svuint8_t svrshrnb_n_u16(svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s32))) -svint16_t svrshrnb_n_s32(svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s64))) -svint32_t svrshrnb_n_s64(svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s16))) -svint8_t svrshrnb_n_s16(svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u32))) -svuint16_t svrshrnt_n_u32(svuint16_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u64))) -svuint32_t svrshrnt_n_u64(svuint32_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u16))) -svuint8_t svrshrnt_n_u16(svuint8_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s32))) -svint16_t svrshrnt_n_s32(svint16_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s64))) -svint32_t svrshrnt_n_s64(svint32_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s16))) -svint8_t svrshrnt_n_s16(svint8_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_m))) -svuint32_t svrsqrte_u32_m(svuint32_t, svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_x))) -svuint32_t svrsqrte_u32_x(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_z))) -svuint32_t svrsqrte_u32_z(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s8))) -svint8_t svrsra_n_s8(svint8_t, svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s32))) -svint32_t svrsra_n_s32(svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s64))) -svint64_t svrsra_n_s64(svint64_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s16))) -svint16_t svrsra_n_s16(svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u8))) -svuint8_t svrsra_n_u8(svuint8_t, svuint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u32))) -svuint32_t svrsra_n_u32(svuint32_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u64))) -svuint64_t svrsra_n_u64(svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u16))) -svuint16_t svrsra_n_u16(svuint16_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u32))) -svuint16_t svrsubhnb_n_u32(svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u64))) -svuint32_t svrsubhnb_n_u64(svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u16))) -svuint8_t svrsubhnb_n_u16(svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s32))) -svint16_t svrsubhnb_n_s32(svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s64))) -svint32_t svrsubhnb_n_s64(svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s16))) -svint8_t svrsubhnb_n_s16(svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u32))) -svuint16_t svrsubhnb_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u64))) -svuint32_t svrsubhnb_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u16))) -svuint8_t svrsubhnb_u16(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s32))) -svint16_t svrsubhnb_s32(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s64))) -svint32_t svrsubhnb_s64(svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s16))) -svint8_t svrsubhnb_s16(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u32))) -svuint16_t svrsubhnt_n_u32(svuint16_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u64))) -svuint32_t svrsubhnt_n_u64(svuint32_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u16))) -svuint8_t svrsubhnt_n_u16(svuint8_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s32))) -svint16_t svrsubhnt_n_s32(svint16_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s64))) -svint32_t svrsubhnt_n_s64(svint32_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s16))) -svint8_t svrsubhnt_n_s16(svint8_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u32))) -svuint16_t svrsubhnt_u32(svuint16_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u64))) -svuint32_t svrsubhnt_u64(svuint32_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u16))) -svuint8_t svrsubhnt_u16(svuint8_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s32))) -svint16_t svrsubhnt_s32(svint16_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s64))) -svint32_t svrsubhnt_s64(svint32_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s16))) -svint8_t svrsubhnt_s16(svint8_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_n_u32))) -svuint32_t svsbclb_n_u32(svuint32_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_n_u64))) -svuint64_t svsbclb_n_u64(svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_u32))) -svuint32_t svsbclb_u32(svuint32_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_u64))) -svuint64_t svsbclb_u64(svuint64_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_n_u32))) -svuint32_t svsbclt_n_u32(svuint32_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_n_u64))) -svuint64_t svsbclt_n_u64(svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_u32))) -svuint32_t svsbclt_u32(svuint32_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_u64))) -svuint64_t svsbclt_u64(svuint64_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s32))) -svint32_t svshllb_n_s32(svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s64))) -svint64_t svshllb_n_s64(svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s16))) -svint16_t svshllb_n_s16(svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u32))) -svuint32_t svshllb_n_u32(svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u64))) -svuint64_t svshllb_n_u64(svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u16))) -svuint16_t svshllb_n_u16(svuint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s32))) -svint32_t svshllt_n_s32(svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s64))) -svint64_t svshllt_n_s64(svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s16))) -svint16_t svshllt_n_s16(svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u32))) -svuint32_t svshllt_n_u32(svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u64))) -svuint64_t svshllt_n_u64(svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u16))) -svuint16_t svshllt_n_u16(svuint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u32))) -svuint16_t svshrnb_n_u32(svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u64))) -svuint32_t svshrnb_n_u64(svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u16))) -svuint8_t svshrnb_n_u16(svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s32))) -svint16_t svshrnb_n_s32(svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s64))) -svint32_t svshrnb_n_s64(svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s16))) -svint8_t svshrnb_n_s16(svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u32))) -svuint16_t svshrnt_n_u32(svuint16_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u64))) -svuint32_t svshrnt_n_u64(svuint32_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u16))) -svuint8_t svshrnt_n_u16(svuint8_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s32))) -svint16_t svshrnt_n_s32(svint16_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s64))) -svint32_t svshrnt_n_s64(svint32_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s16))) -svint8_t svshrnt_n_s16(svint8_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u8))) -svuint8_t svsli_n_u8(svuint8_t, svuint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u32))) -svuint32_t svsli_n_u32(svuint32_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u64))) -svuint64_t svsli_n_u64(svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u16))) -svuint16_t svsli_n_u16(svuint16_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s8))) -svint8_t svsli_n_s8(svint8_t, svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s32))) -svint32_t svsli_n_s32(svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s64))) -svint64_t svsli_n_s64(svint64_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s16))) -svint16_t svsli_n_s16(svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_m))) -svuint8_t svsqadd_n_u8_m(svbool_t, svuint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_m))) -svuint32_t svsqadd_n_u32_m(svbool_t, svuint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_m))) -svuint64_t svsqadd_n_u64_m(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_m))) -svuint16_t svsqadd_n_u16_m(svbool_t, svuint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_x))) -svuint8_t svsqadd_n_u8_x(svbool_t, svuint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_x))) -svuint32_t svsqadd_n_u32_x(svbool_t, svuint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_x))) -svuint64_t svsqadd_n_u64_x(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_x))) -svuint16_t svsqadd_n_u16_x(svbool_t, svuint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_z))) -svuint8_t svsqadd_n_u8_z(svbool_t, svuint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_z))) -svuint32_t svsqadd_n_u32_z(svbool_t, svuint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_z))) -svuint64_t svsqadd_n_u64_z(svbool_t, svuint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_z))) -svuint16_t svsqadd_n_u16_z(svbool_t, svuint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_m))) -svuint8_t svsqadd_u8_m(svbool_t, svuint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_m))) -svuint32_t svsqadd_u32_m(svbool_t, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_m))) -svuint64_t svsqadd_u64_m(svbool_t, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_m))) -svuint16_t svsqadd_u16_m(svbool_t, svuint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_x))) -svuint8_t svsqadd_u8_x(svbool_t, svuint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_x))) -svuint32_t svsqadd_u32_x(svbool_t, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_x))) -svuint64_t svsqadd_u64_x(svbool_t, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_x))) -svuint16_t svsqadd_u16_x(svbool_t, svuint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_z))) -svuint8_t svsqadd_u8_z(svbool_t, svuint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_z))) -svuint32_t svsqadd_u32_z(svbool_t, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_z))) -svuint64_t svsqadd_u64_z(svbool_t, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_z))) -svuint16_t svsqadd_u16_z(svbool_t, svuint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s8))) -svint8_t svsra_n_s8(svint8_t, svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s32))) -svint32_t svsra_n_s32(svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s64))) -svint64_t svsra_n_s64(svint64_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s16))) -svint16_t svsra_n_s16(svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u8))) -svuint8_t svsra_n_u8(svuint8_t, svuint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u32))) -svuint32_t svsra_n_u32(svuint32_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u64))) -svuint64_t svsra_n_u64(svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u16))) -svuint16_t svsra_n_u16(svuint16_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u8))) -svuint8_t svsri_n_u8(svuint8_t, svuint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u32))) -svuint32_t svsri_n_u32(svuint32_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u64))) -svuint64_t svsri_n_u64(svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u16))) -svuint16_t svsri_n_u16(svuint16_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s8))) -svint8_t svsri_n_s8(svint8_t, svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s32))) -svint32_t svsri_n_s32(svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s64))) -svint64_t svsri_n_s64(svint64_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s16))) -svint16_t svsri_n_s16(svint16_t, svint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_u32))) -void svstnt1_scatter_u32base_index_u32(svbool_t, svuint32_t, int64_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_u64))) -void svstnt1_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_f64))) -void svstnt1_scatter_u64base_index_f64(svbool_t, svuint64_t, int64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_f32))) -void svstnt1_scatter_u32base_index_f32(svbool_t, svuint32_t, int64_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_s32))) -void svstnt1_scatter_u32base_index_s32(svbool_t, svuint32_t, int64_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_s64))) -void svstnt1_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_u32))) -void svstnt1_scatter_u32base_offset_u32(svbool_t, svuint32_t, int64_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_u64))) -void svstnt1_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_f64))) -void svstnt1_scatter_u64base_offset_f64(svbool_t, svuint64_t, int64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_f32))) -void svstnt1_scatter_u32base_offset_f32(svbool_t, svuint32_t, int64_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_s32))) -void svstnt1_scatter_u32base_offset_s32(svbool_t, svuint32_t, int64_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_s64))) -void svstnt1_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_u32))) -void svstnt1_scatter_u32base_u32(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_u64))) -void svstnt1_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_f64))) -void svstnt1_scatter_u64base_f64(svbool_t, svuint64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_f32))) -void svstnt1_scatter_u32base_f32(svbool_t, svuint32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_s32))) -void svstnt1_scatter_u32base_s32(svbool_t, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_s64))) -void svstnt1_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_u64))) -void svstnt1_scatter_s64index_u64(svbool_t, uint64_t *, svint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_f64))) -void svstnt1_scatter_s64index_f64(svbool_t, float64_t *, svint64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_s64))) -void svstnt1_scatter_s64index_s64(svbool_t, int64_t *, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_u64))) -void svstnt1_scatter_u64index_u64(svbool_t, uint64_t *, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_f64))) -void svstnt1_scatter_u64index_f64(svbool_t, float64_t *, svuint64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_s64))) -void svstnt1_scatter_u64index_s64(svbool_t, int64_t *, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_u32))) -void svstnt1_scatter_u32offset_u32(svbool_t, uint32_t *, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_f32))) -void svstnt1_scatter_u32offset_f32(svbool_t, float32_t *, svuint32_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_s32))) -void svstnt1_scatter_u32offset_s32(svbool_t, int32_t *, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_u64))) -void svstnt1_scatter_s64offset_u64(svbool_t, uint64_t *, svint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_f64))) -void svstnt1_scatter_s64offset_f64(svbool_t, float64_t *, svint64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_s64))) -void svstnt1_scatter_s64offset_s64(svbool_t, int64_t *, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_u64))) -void svstnt1_scatter_u64offset_u64(svbool_t, uint64_t *, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_f64))) -void svstnt1_scatter_u64offset_f64(svbool_t, float64_t *, svuint64_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_s64))) -void svstnt1_scatter_u64offset_s64(svbool_t, int64_t *, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_offset_u32))) -void svstnt1b_scatter_u32base_offset_u32(svbool_t, svuint32_t, int64_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_offset_u64))) -void svstnt1b_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_offset_s32))) -void svstnt1b_scatter_u32base_offset_s32(svbool_t, svuint32_t, int64_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_offset_s64))) -void svstnt1b_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_u32))) -void svstnt1b_scatter_u32base_u32(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_u64))) -void svstnt1b_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_s32))) -void svstnt1b_scatter_u32base_s32(svbool_t, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_s64))) -void svstnt1b_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32offset_s32))) -void svstnt1b_scatter_u32offset_s32(svbool_t, int8_t *, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32offset_u32))) -void svstnt1b_scatter_u32offset_u32(svbool_t, uint8_t *, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_s64offset_s64))) -void svstnt1b_scatter_s64offset_s64(svbool_t, int8_t *, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_s64offset_u64))) -void svstnt1b_scatter_s64offset_u64(svbool_t, uint8_t *, svint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64offset_s64))) -void svstnt1b_scatter_u64offset_s64(svbool_t, int8_t *, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64offset_u64))) -void svstnt1b_scatter_u64offset_u64(svbool_t, uint8_t *, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_index_u32))) -void svstnt1h_scatter_u32base_index_u32(svbool_t, svuint32_t, int64_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_index_u64))) -void svstnt1h_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_index_s32))) -void svstnt1h_scatter_u32base_index_s32(svbool_t, svuint32_t, int64_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_index_s64))) -void svstnt1h_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_offset_u32))) -void svstnt1h_scatter_u32base_offset_u32(svbool_t, svuint32_t, int64_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_offset_u64))) -void svstnt1h_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_offset_s32))) -void svstnt1h_scatter_u32base_offset_s32(svbool_t, svuint32_t, int64_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_offset_s64))) -void svstnt1h_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_u32))) -void svstnt1h_scatter_u32base_u32(svbool_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_u64))) -void svstnt1h_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_s32))) -void svstnt1h_scatter_u32base_s32(svbool_t, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_s64))) -void svstnt1h_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64index_s64))) -void svstnt1h_scatter_s64index_s64(svbool_t, int16_t *, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64index_u64))) -void svstnt1h_scatter_s64index_u64(svbool_t, uint16_t *, svint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64index_s64))) -void svstnt1h_scatter_u64index_s64(svbool_t, int16_t *, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64index_u64))) -void svstnt1h_scatter_u64index_u64(svbool_t, uint16_t *, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32offset_s32))) -void svstnt1h_scatter_u32offset_s32(svbool_t, int16_t *, svuint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32offset_u32))) -void svstnt1h_scatter_u32offset_u32(svbool_t, uint16_t *, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64offset_s64))) -void svstnt1h_scatter_s64offset_s64(svbool_t, int16_t *, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64offset_u64))) -void svstnt1h_scatter_s64offset_u64(svbool_t, uint16_t *, svint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64offset_s64))) -void svstnt1h_scatter_u64offset_s64(svbool_t, int16_t *, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64offset_u64))) -void svstnt1h_scatter_u64offset_u64(svbool_t, uint16_t *, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_index_u64))) -void svstnt1w_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_index_s64))) -void svstnt1w_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_offset_u64))) -void svstnt1w_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_offset_s64))) -void svstnt1w_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_u64))) -void svstnt1w_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_s64))) -void svstnt1w_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64index_s64))) -void svstnt1w_scatter_s64index_s64(svbool_t, int32_t *, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64index_u64))) -void svstnt1w_scatter_s64index_u64(svbool_t, uint32_t *, svint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64index_s64))) -void svstnt1w_scatter_u64index_s64(svbool_t, int32_t *, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64index_u64))) -void svstnt1w_scatter_u64index_u64(svbool_t, uint32_t *, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64offset_s64))) -void svstnt1w_scatter_s64offset_s64(svbool_t, int32_t *, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64offset_u64))) -void svstnt1w_scatter_s64offset_u64(svbool_t, uint32_t *, svint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64offset_s64))) -void svstnt1w_scatter_u64offset_s64(svbool_t, int32_t *, svuint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64offset_u64))) -void svstnt1w_scatter_u64offset_u64(svbool_t, uint32_t *, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u32))) -svuint16_t svsubhnb_n_u32(svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u64))) -svuint32_t svsubhnb_n_u64(svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u16))) -svuint8_t svsubhnb_n_u16(svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s32))) -svint16_t svsubhnb_n_s32(svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s64))) -svint32_t svsubhnb_n_s64(svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s16))) -svint8_t svsubhnb_n_s16(svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u32))) -svuint16_t svsubhnb_u32(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u64))) -svuint32_t svsubhnb_u64(svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u16))) -svuint8_t svsubhnb_u16(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s32))) -svint16_t svsubhnb_s32(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s64))) -svint32_t svsubhnb_s64(svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s16))) -svint8_t svsubhnb_s16(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u32))) -svuint16_t svsubhnt_n_u32(svuint16_t, svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u64))) -svuint32_t svsubhnt_n_u64(svuint32_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u16))) -svuint8_t svsubhnt_n_u16(svuint8_t, svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s32))) -svint16_t svsubhnt_n_s32(svint16_t, svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s64))) -svint32_t svsubhnt_n_s64(svint32_t, svint64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s16))) -svint8_t svsubhnt_n_s16(svint8_t, svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u32))) -svuint16_t svsubhnt_u32(svuint16_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u64))) -svuint32_t svsubhnt_u64(svuint32_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u16))) -svuint8_t svsubhnt_u16(svuint8_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s32))) -svint16_t svsubhnt_s32(svint16_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s64))) -svint32_t svsubhnt_s64(svint32_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s16))) -svint8_t svsubhnt_s16(svint8_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s32))) -svint32_t svsublb_n_s32(svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s64))) -svint64_t svsublb_n_s64(svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s16))) -svint16_t svsublb_n_s16(svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u32))) -svuint32_t svsublb_n_u32(svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u64))) -svuint64_t svsublb_n_u64(svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u16))) -svuint16_t svsublb_n_u16(svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s32))) -svint32_t svsublb_s32(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s64))) -svint64_t svsublb_s64(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s16))) -svint16_t svsublb_s16(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u32))) -svuint32_t svsublb_u32(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u64))) -svuint64_t svsublb_u64(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u16))) -svuint16_t svsublb_u16(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s32))) -svint32_t svsublbt_n_s32(svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s64))) -svint64_t svsublbt_n_s64(svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s16))) -svint16_t svsublbt_n_s16(svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s32))) -svint32_t svsublbt_s32(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s64))) -svint64_t svsublbt_s64(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s16))) -svint16_t svsublbt_s16(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s32))) -svint32_t svsublt_n_s32(svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s64))) -svint64_t svsublt_n_s64(svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s16))) -svint16_t svsublt_n_s16(svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u32))) -svuint32_t svsublt_n_u32(svuint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u64))) -svuint64_t svsublt_n_u64(svuint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u16))) -svuint16_t svsublt_n_u16(svuint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s32))) -svint32_t svsublt_s32(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s64))) -svint64_t svsublt_s64(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s16))) -svint16_t svsublt_s16(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u32))) -svuint32_t svsublt_u32(svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u64))) -svuint64_t svsublt_u64(svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u16))) -svuint16_t svsublt_u16(svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s32))) -svint32_t svsubltb_n_s32(svint16_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s64))) -svint64_t svsubltb_n_s64(svint32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s16))) -svint16_t svsubltb_n_s16(svint8_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s32))) -svint32_t svsubltb_s32(svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s64))) -svint64_t svsubltb_s64(svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s16))) -svint16_t svsubltb_s16(svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s32))) -svint32_t svsubwb_n_s32(svint32_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s64))) -svint64_t svsubwb_n_s64(svint64_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s16))) -svint16_t svsubwb_n_s16(svint16_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u32))) -svuint32_t svsubwb_n_u32(svuint32_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u64))) -svuint64_t svsubwb_n_u64(svuint64_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u16))) -svuint16_t svsubwb_n_u16(svuint16_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s32))) -svint32_t svsubwb_s32(svint32_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s64))) -svint64_t svsubwb_s64(svint64_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s16))) -svint16_t svsubwb_s16(svint16_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u32))) -svuint32_t svsubwb_u32(svuint32_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u64))) -svuint64_t svsubwb_u64(svuint64_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u16))) -svuint16_t svsubwb_u16(svuint16_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s32))) -svint32_t svsubwt_n_s32(svint32_t, int16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s64))) -svint64_t svsubwt_n_s64(svint64_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s16))) -svint16_t svsubwt_n_s16(svint16_t, int8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u32))) -svuint32_t svsubwt_n_u32(svuint32_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u64))) -svuint64_t svsubwt_n_u64(svuint64_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u16))) -svuint16_t svsubwt_n_u16(svuint16_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s32))) -svint32_t svsubwt_s32(svint32_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s64))) -svint64_t svsubwt_s64(svint64_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s16))) -svint16_t svsubwt_s16(svint16_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u32))) -svuint32_t svsubwt_u32(svuint32_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u64))) -svuint64_t svsubwt_u64(svuint64_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u16))) -svuint16_t svsubwt_u16(svuint16_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u8))) -svuint8_t svtbl2_u8(svuint8x2_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u32))) -svuint32_t svtbl2_u32(svuint32x2_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u64))) -svuint64_t svtbl2_u64(svuint64x2_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u16))) -svuint16_t svtbl2_u16(svuint16x2_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s8))) -svint8_t svtbl2_s8(svint8x2_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f64))) -svfloat64_t svtbl2_f64(svfloat64x2_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f32))) -svfloat32_t svtbl2_f32(svfloat32x2_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f16))) -svfloat16_t svtbl2_f16(svfloat16x2_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s32))) -svint32_t svtbl2_s32(svint32x2_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s64))) -svint64_t svtbl2_s64(svint64x2_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s16))) -svint16_t svtbl2_s16(svint16x2_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u8))) -svuint8_t svtbx_u8(svuint8_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u32))) -svuint32_t svtbx_u32(svuint32_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u64))) -svuint64_t svtbx_u64(svuint64_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u16))) -svuint16_t svtbx_u16(svuint16_t, svuint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s8))) -svint8_t svtbx_s8(svint8_t, svint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f64))) -svfloat64_t svtbx_f64(svfloat64_t, svfloat64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f32))) -svfloat32_t svtbx_f32(svfloat32_t, svfloat32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f16))) -svfloat16_t svtbx_f16(svfloat16_t, svfloat16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s32))) -svint32_t svtbx_s32(svint32_t, svint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s64))) -svint64_t svtbx_s64(svint64_t, svint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s16))) -svint16_t svtbx_s16(svint16_t, svint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_m))) -svint8_t svuqadd_n_s8_m(svbool_t, svint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_m))) -svint32_t svuqadd_n_s32_m(svbool_t, svint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_m))) -svint64_t svuqadd_n_s64_m(svbool_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_m))) -svint16_t svuqadd_n_s16_m(svbool_t, svint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_x))) -svint8_t svuqadd_n_s8_x(svbool_t, svint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_x))) -svint32_t svuqadd_n_s32_x(svbool_t, svint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_x))) -svint64_t svuqadd_n_s64_x(svbool_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_x))) -svint16_t svuqadd_n_s16_x(svbool_t, svint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_z))) -svint8_t svuqadd_n_s8_z(svbool_t, svint8_t, uint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_z))) -svint32_t svuqadd_n_s32_z(svbool_t, svint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_z))) -svint64_t svuqadd_n_s64_z(svbool_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_z))) -svint16_t svuqadd_n_s16_z(svbool_t, svint16_t, uint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_m))) -svint8_t svuqadd_s8_m(svbool_t, svint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_m))) -svint32_t svuqadd_s32_m(svbool_t, svint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_m))) -svint64_t svuqadd_s64_m(svbool_t, svint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_m))) -svint16_t svuqadd_s16_m(svbool_t, svint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_x))) -svint8_t svuqadd_s8_x(svbool_t, svint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_x))) -svint32_t svuqadd_s32_x(svbool_t, svint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_x))) -svint64_t svuqadd_s64_x(svbool_t, svint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_x))) -svint16_t svuqadd_s16_x(svbool_t, svint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_z))) -svint8_t svuqadd_s8_z(svbool_t, svint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_z))) -svint32_t svuqadd_s32_z(svbool_t, svint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_z))) -svint64_t svuqadd_s64_z(svbool_t, svint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_z))) -svint16_t svuqadd_s16_z(svbool_t, svint16_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_s32))) -svbool_t svwhilege_b8_s32(int32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_s32))) -svbool_t svwhilege_b32_s32(int32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_s32))) -svbool_t svwhilege_b64_s32(int32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_s32))) -svbool_t svwhilege_b16_s32(int32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_s64))) -svbool_t svwhilege_b8_s64(int64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_s64))) -svbool_t svwhilege_b32_s64(int64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_s64))) -svbool_t svwhilege_b64_s64(int64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_s64))) -svbool_t svwhilege_b16_s64(int64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_u32))) -svbool_t svwhilege_b8_u32(uint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_u32))) -svbool_t svwhilege_b32_u32(uint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_u32))) -svbool_t svwhilege_b64_u32(uint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_u32))) -svbool_t svwhilege_b16_u32(uint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_u64))) -svbool_t svwhilege_b8_u64(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_u64))) -svbool_t svwhilege_b32_u64(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_u64))) -svbool_t svwhilege_b64_u64(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_u64))) -svbool_t svwhilege_b16_u64(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_s32))) -svbool_t svwhilegt_b8_s32(int32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_s32))) -svbool_t svwhilegt_b32_s32(int32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_s32))) -svbool_t svwhilegt_b64_s32(int32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_s32))) -svbool_t svwhilegt_b16_s32(int32_t, int32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_s64))) -svbool_t svwhilegt_b8_s64(int64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_s64))) -svbool_t svwhilegt_b32_s64(int64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_s64))) -svbool_t svwhilegt_b64_s64(int64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_s64))) -svbool_t svwhilegt_b16_s64(int64_t, int64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_u32))) -svbool_t svwhilegt_b8_u32(uint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_u32))) -svbool_t svwhilegt_b32_u32(uint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_u32))) -svbool_t svwhilegt_b64_u32(uint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_u32))) -svbool_t svwhilegt_b16_u32(uint32_t, uint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_u64))) -svbool_t svwhilegt_b8_u64(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_u64))) -svbool_t svwhilegt_b32_u64(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_u64))) -svbool_t svwhilegt_b64_u64(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_u64))) -svbool_t svwhilegt_b16_u64(uint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u8))) -svbool_t svwhilerw_u8(uint8_t const *, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s8))) -svbool_t svwhilerw_s8(int8_t const *, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u64))) -svbool_t svwhilerw_u64(uint64_t const *, uint64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f64))) -svbool_t svwhilerw_f64(float64_t const *, float64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s64))) -svbool_t svwhilerw_s64(int64_t const *, int64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u16))) -svbool_t svwhilerw_u16(uint16_t const *, uint16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f16))) -svbool_t svwhilerw_f16(float16_t const *, float16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s16))) -svbool_t svwhilerw_s16(int16_t const *, int16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u32))) -svbool_t svwhilerw_u32(uint32_t const *, uint32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f32))) -svbool_t svwhilerw_f32(float32_t const *, float32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s32))) -svbool_t svwhilerw_s32(int32_t const *, int32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u8))) -svbool_t svwhilewr_u8(uint8_t const *, uint8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s8))) -svbool_t svwhilewr_s8(int8_t const *, int8_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u64))) -svbool_t svwhilewr_u64(uint64_t const *, uint64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f64))) -svbool_t svwhilewr_f64(float64_t const *, float64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s64))) -svbool_t svwhilewr_s64(int64_t const *, int64_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u16))) -svbool_t svwhilewr_u16(uint16_t const *, uint16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f16))) -svbool_t svwhilewr_f16(float16_t const *, float16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s16))) -svbool_t svwhilewr_s16(int16_t const *, int16_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u32))) -svbool_t svwhilewr_u32(uint32_t const *, uint32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f32))) -svbool_t svwhilewr_f32(float32_t const *, float32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s32))) -svbool_t svwhilewr_s32(int32_t const *, int32_t const *); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u8))) -svuint8_t svxar_n_u8(svuint8_t, svuint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u32))) -svuint32_t svxar_n_u32(svuint32_t, svuint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u64))) -svuint64_t svxar_n_u64(svuint64_t, svuint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u16))) -svuint16_t svxar_n_u16(svuint16_t, svuint16_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s8))) -svint8_t svxar_n_s8(svint8_t, svint8_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s32))) -svint32_t svxar_n_s32(svint32_t, svint32_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s64))) -svint64_t svxar_n_s64(svint64_t, svint64_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s16))) -svint16_t svxar_n_s16(svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s8))) -svint8_t svaba(svint8_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s32))) -svint32_t svaba(svint32_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s64))) -svint64_t svaba(svint64_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s16))) -svint16_t svaba(svint16_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u8))) -svuint8_t svaba(svuint8_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u32))) -svuint32_t svaba(svuint32_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u64))) -svuint64_t svaba(svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u16))) -svuint16_t svaba(svuint16_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s8))) -svint8_t svaba(svint8_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s32))) -svint32_t svaba(svint32_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s64))) -svint64_t svaba(svint64_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s16))) -svint16_t svaba(svint16_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u8))) -svuint8_t svaba(svuint8_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u32))) -svuint32_t svaba(svuint32_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u64))) -svuint64_t svaba(svuint64_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u16))) -svuint16_t svaba(svuint16_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s32))) -svint32_t svabalb(svint32_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s64))) -svint64_t svabalb(svint64_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s16))) -svint16_t svabalb(svint16_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u32))) -svuint32_t svabalb(svuint32_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u64))) -svuint64_t svabalb(svuint64_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u16))) -svuint16_t svabalb(svuint16_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s32))) -svint32_t svabalb(svint32_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s64))) -svint64_t svabalb(svint64_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s16))) -svint16_t svabalb(svint16_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u32))) -svuint32_t svabalb(svuint32_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u64))) -svuint64_t svabalb(svuint64_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u16))) -svuint16_t svabalb(svuint16_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s32))) -svint32_t svabalt(svint32_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s64))) -svint64_t svabalt(svint64_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s16))) -svint16_t svabalt(svint16_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u32))) -svuint32_t svabalt(svuint32_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u64))) -svuint64_t svabalt(svuint64_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u16))) -svuint16_t svabalt(svuint16_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s32))) -svint32_t svabalt(svint32_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s64))) -svint64_t svabalt(svint64_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s16))) -svint16_t svabalt(svint16_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u32))) -svuint32_t svabalt(svuint32_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u64))) -svuint64_t svabalt(svuint64_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u16))) -svuint16_t svabalt(svuint16_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s32))) -svint32_t svabdlb(svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s64))) -svint64_t svabdlb(svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s16))) -svint16_t svabdlb(svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u32))) -svuint32_t svabdlb(svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u64))) -svuint64_t svabdlb(svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u16))) -svuint16_t svabdlb(svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s32))) -svint32_t svabdlb(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s64))) -svint64_t svabdlb(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s16))) -svint16_t svabdlb(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u32))) -svuint32_t svabdlb(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u64))) -svuint64_t svabdlb(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u16))) -svuint16_t svabdlb(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s32))) -svint32_t svabdlt(svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s64))) -svint64_t svabdlt(svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s16))) -svint16_t svabdlt(svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u32))) -svuint32_t svabdlt(svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u64))) -svuint64_t svabdlt(svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u16))) -svuint16_t svabdlt(svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s32))) -svint32_t svabdlt(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s64))) -svint64_t svabdlt(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s16))) -svint16_t svabdlt(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u32))) -svuint32_t svabdlt(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u64))) -svuint64_t svabdlt(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u16))) -svuint16_t svabdlt(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_m))) -svint32_t svadalp_m(svbool_t, svint32_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_m))) -svint64_t svadalp_m(svbool_t, svint64_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_m))) -svint16_t svadalp_m(svbool_t, svint16_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_x))) -svint32_t svadalp_x(svbool_t, svint32_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_x))) -svint64_t svadalp_x(svbool_t, svint64_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_x))) -svint16_t svadalp_x(svbool_t, svint16_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_z))) -svint32_t svadalp_z(svbool_t, svint32_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_z))) -svint64_t svadalp_z(svbool_t, svint64_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_z))) -svint16_t svadalp_z(svbool_t, svint16_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_m))) -svuint32_t svadalp_m(svbool_t, svuint32_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_m))) -svuint64_t svadalp_m(svbool_t, svuint64_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_m))) -svuint16_t svadalp_m(svbool_t, svuint16_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_x))) -svuint32_t svadalp_x(svbool_t, svuint32_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_x))) -svuint64_t svadalp_x(svbool_t, svuint64_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_x))) -svuint16_t svadalp_x(svbool_t, svuint16_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_z))) -svuint32_t svadalp_z(svbool_t, svuint32_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_z))) -svuint64_t svadalp_z(svbool_t, svuint64_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_z))) -svuint16_t svadalp_z(svbool_t, svuint16_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_n_u32))) -svuint32_t svadclb(svuint32_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_n_u64))) -svuint64_t svadclb(svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_u32))) -svuint32_t svadclb(svuint32_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_u64))) -svuint64_t svadclb(svuint64_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_n_u32))) -svuint32_t svadclt(svuint32_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_n_u64))) -svuint64_t svadclt(svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_u32))) -svuint32_t svadclt(svuint32_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_u64))) -svuint64_t svadclt(svuint64_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u32))) -svuint16_t svaddhnb(svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u64))) -svuint32_t svaddhnb(svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u16))) -svuint8_t svaddhnb(svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s32))) -svint16_t svaddhnb(svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s64))) -svint32_t svaddhnb(svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s16))) -svint8_t svaddhnb(svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u32))) -svuint16_t svaddhnb(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u64))) -svuint32_t svaddhnb(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u16))) -svuint8_t svaddhnb(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s32))) -svint16_t svaddhnb(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s64))) -svint32_t svaddhnb(svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s16))) -svint8_t svaddhnb(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u32))) -svuint16_t svaddhnt(svuint16_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u64))) -svuint32_t svaddhnt(svuint32_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u16))) -svuint8_t svaddhnt(svuint8_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s32))) -svint16_t svaddhnt(svint16_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s64))) -svint32_t svaddhnt(svint32_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s16))) -svint8_t svaddhnt(svint8_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u32))) -svuint16_t svaddhnt(svuint16_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u64))) -svuint32_t svaddhnt(svuint32_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u16))) -svuint8_t svaddhnt(svuint8_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s32))) -svint16_t svaddhnt(svint16_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s64))) -svint32_t svaddhnt(svint32_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s16))) -svint8_t svaddhnt(svint8_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s32))) -svint32_t svaddlb(svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s64))) -svint64_t svaddlb(svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s16))) -svint16_t svaddlb(svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u32))) -svuint32_t svaddlb(svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u64))) -svuint64_t svaddlb(svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u16))) -svuint16_t svaddlb(svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s32))) -svint32_t svaddlb(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s64))) -svint64_t svaddlb(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s16))) -svint16_t svaddlb(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u32))) -svuint32_t svaddlb(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u64))) -svuint64_t svaddlb(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u16))) -svuint16_t svaddlb(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s32))) -svint32_t svaddlbt(svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s64))) -svint64_t svaddlbt(svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s16))) -svint16_t svaddlbt(svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s32))) -svint32_t svaddlbt(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s64))) -svint64_t svaddlbt(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s16))) -svint16_t svaddlbt(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s32))) -svint32_t svaddlt(svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s64))) -svint64_t svaddlt(svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s16))) -svint16_t svaddlt(svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u32))) -svuint32_t svaddlt(svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u64))) -svuint64_t svaddlt(svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u16))) -svuint16_t svaddlt(svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s32))) -svint32_t svaddlt(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s64))) -svint64_t svaddlt(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s16))) -svint16_t svaddlt(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u32))) -svuint32_t svaddlt(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u64))) -svuint64_t svaddlt(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u16))) -svuint16_t svaddlt(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f64_m))) -svfloat64_t svaddp_m(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f32_m))) -svfloat32_t svaddp_m(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f16_m))) -svfloat16_t svaddp_m(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f64_x))) -svfloat64_t svaddp_x(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f32_x))) -svfloat32_t svaddp_x(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f16_x))) -svfloat16_t svaddp_x(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u8_m))) -svuint8_t svaddp_m(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u32_m))) -svuint32_t svaddp_m(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u64_m))) -svuint64_t svaddp_m(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u16_m))) -svuint16_t svaddp_m(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s8_m))) -svint8_t svaddp_m(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s32_m))) -svint32_t svaddp_m(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s64_m))) -svint64_t svaddp_m(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s16_m))) -svint16_t svaddp_m(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u8_x))) -svuint8_t svaddp_x(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u32_x))) -svuint32_t svaddp_x(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u64_x))) -svuint64_t svaddp_x(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u16_x))) -svuint16_t svaddp_x(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s8_x))) -svint8_t svaddp_x(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s32_x))) -svint32_t svaddp_x(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s64_x))) -svint64_t svaddp_x(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s16_x))) -svint16_t svaddp_x(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s32))) -svint32_t svaddwb(svint32_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s64))) -svint64_t svaddwb(svint64_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s16))) -svint16_t svaddwb(svint16_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u32))) -svuint32_t svaddwb(svuint32_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u64))) -svuint64_t svaddwb(svuint64_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u16))) -svuint16_t svaddwb(svuint16_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s32))) -svint32_t svaddwb(svint32_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s64))) -svint64_t svaddwb(svint64_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s16))) -svint16_t svaddwb(svint16_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u32))) -svuint32_t svaddwb(svuint32_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u64))) -svuint64_t svaddwb(svuint64_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u16))) -svuint16_t svaddwb(svuint16_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s32))) -svint32_t svaddwt(svint32_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s64))) -svint64_t svaddwt(svint64_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s16))) -svint16_t svaddwt(svint16_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u32))) -svuint32_t svaddwt(svuint32_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u64))) -svuint64_t svaddwt(svuint64_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u16))) -svuint16_t svaddwt(svuint16_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s32))) -svint32_t svaddwt(svint32_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s64))) -svint64_t svaddwt(svint64_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s16))) -svint16_t svaddwt(svint16_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u32))) -svuint32_t svaddwt(svuint32_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u64))) -svuint64_t svaddwt(svuint64_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u16))) -svuint16_t svaddwt(svuint16_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u8))) -svuint8_t svbcax(svuint8_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u32))) -svuint32_t svbcax(svuint32_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u64))) -svuint64_t svbcax(svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u16))) -svuint16_t svbcax(svuint16_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s8))) -svint8_t svbcax(svint8_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s32))) -svint32_t svbcax(svint32_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s64))) -svint64_t svbcax(svint64_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s16))) -svint16_t svbcax(svint16_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u8))) -svuint8_t svbcax(svuint8_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u32))) -svuint32_t svbcax(svuint32_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u64))) -svuint64_t svbcax(svuint64_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u16))) -svuint16_t svbcax(svuint16_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s8))) -svint8_t svbcax(svint8_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s32))) -svint32_t svbcax(svint32_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s64))) -svint64_t svbcax(svint64_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s16))) -svint16_t svbcax(svint16_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u8))) -svuint8_t svbsl1n(svuint8_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u32))) -svuint32_t svbsl1n(svuint32_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u64))) -svuint64_t svbsl1n(svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u16))) -svuint16_t svbsl1n(svuint16_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s8))) -svint8_t svbsl1n(svint8_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s32))) -svint32_t svbsl1n(svint32_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s64))) -svint64_t svbsl1n(svint64_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s16))) -svint16_t svbsl1n(svint16_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u8))) -svuint8_t svbsl1n(svuint8_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u32))) -svuint32_t svbsl1n(svuint32_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u64))) -svuint64_t svbsl1n(svuint64_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u16))) -svuint16_t svbsl1n(svuint16_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s8))) -svint8_t svbsl1n(svint8_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s32))) -svint32_t svbsl1n(svint32_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s64))) -svint64_t svbsl1n(svint64_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s16))) -svint16_t svbsl1n(svint16_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u8))) -svuint8_t svbsl2n(svuint8_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u32))) -svuint32_t svbsl2n(svuint32_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u64))) -svuint64_t svbsl2n(svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u16))) -svuint16_t svbsl2n(svuint16_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s8))) -svint8_t svbsl2n(svint8_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s32))) -svint32_t svbsl2n(svint32_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s64))) -svint64_t svbsl2n(svint64_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s16))) -svint16_t svbsl2n(svint16_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u8))) -svuint8_t svbsl2n(svuint8_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u32))) -svuint32_t svbsl2n(svuint32_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u64))) -svuint64_t svbsl2n(svuint64_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u16))) -svuint16_t svbsl2n(svuint16_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s8))) -svint8_t svbsl2n(svint8_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s32))) -svint32_t svbsl2n(svint32_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s64))) -svint64_t svbsl2n(svint64_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s16))) -svint16_t svbsl2n(svint16_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u8))) -svuint8_t svbsl(svuint8_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u32))) -svuint32_t svbsl(svuint32_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u64))) -svuint64_t svbsl(svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u16))) -svuint16_t svbsl(svuint16_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s8))) -svint8_t svbsl(svint8_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s32))) -svint32_t svbsl(svint32_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s64))) -svint64_t svbsl(svint64_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s16))) -svint16_t svbsl(svint16_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u8))) -svuint8_t svbsl(svuint8_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u32))) -svuint32_t svbsl(svuint32_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u64))) -svuint64_t svbsl(svuint64_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u16))) -svuint16_t svbsl(svuint16_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s8))) -svint8_t svbsl(svint8_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s32))) -svint32_t svbsl(svint32_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s64))) -svint64_t svbsl(svint64_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s16))) -svint16_t svbsl(svint16_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u8))) -svuint8_t svcadd(svuint8_t, svuint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u32))) -svuint32_t svcadd(svuint32_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u64))) -svuint64_t svcadd(svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u16))) -svuint16_t svcadd(svuint16_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s8))) -svint8_t svcadd(svint8_t, svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s32))) -svint32_t svcadd(svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s64))) -svint64_t svcadd(svint64_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s16))) -svint16_t svcadd(svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_s32))) -svint32_t svcdot(svint32_t, svint8_t, svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_s64))) -svint64_t svcdot(svint64_t, svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_lane_s32))) -svint32_t svcdot_lane(svint32_t, svint8_t, svint8_t, uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_lane_s64))) -svint64_t svcdot_lane(svint64_t, svint16_t, svint16_t, uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u8))) -svuint8_t svcmla(svuint8_t, svuint8_t, svuint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u32))) -svuint32_t svcmla(svuint32_t, svuint32_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u64))) -svuint64_t svcmla(svuint64_t, svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u16))) -svuint16_t svcmla(svuint16_t, svuint16_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s8))) -svint8_t svcmla(svint8_t, svint8_t, svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s32))) -svint32_t svcmla(svint32_t, svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s64))) -svint64_t svcmla(svint64_t, svint64_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s16))) -svint16_t svcmla(svint16_t, svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_u32))) -svuint32_t svcmla_lane(svuint32_t, svuint32_t, svuint32_t, uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_u16))) -svuint16_t svcmla_lane(svuint16_t, svuint16_t, svuint16_t, uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_s32))) -svint32_t svcmla_lane(svint32_t, svint32_t, svint32_t, uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_s16))) -svint16_t svcmla_lane(svint16_t, svint16_t, svint16_t, uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f32_f16_m))) -svfloat32_t svcvtlt_f32_m(svfloat32_t, svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f32_f16_x))) -svfloat32_t svcvtlt_f32_x(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f64_f32_m))) -svfloat64_t svcvtlt_f64_m(svfloat64_t, svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f64_f32_x))) -svfloat64_t svcvtlt_f64_x(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_f16_f32_m))) -svfloat16_t svcvtnt_f16_m(svfloat16_t, svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_f32_f64_m))) -svfloat32_t svcvtnt_f32_m(svfloat32_t, svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_m))) -svfloat32_t svcvtx_f32_m(svfloat32_t, svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_x))) -svfloat32_t svcvtx_f32_x(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_z))) -svfloat32_t svcvtx_f32_z(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtxnt_f32_f64_m))) -svfloat32_t svcvtxnt_f32_m(svfloat32_t, svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u8))) -svuint8_t sveor3(svuint8_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u32))) -svuint32_t sveor3(svuint32_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u64))) -svuint64_t sveor3(svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u16))) -svuint16_t sveor3(svuint16_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s8))) -svint8_t sveor3(svint8_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s32))) -svint32_t sveor3(svint32_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s64))) -svint64_t sveor3(svint64_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s16))) -svint16_t sveor3(svint16_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u8))) -svuint8_t sveor3(svuint8_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u32))) -svuint32_t sveor3(svuint32_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u64))) -svuint64_t sveor3(svuint64_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u16))) -svuint16_t sveor3(svuint16_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s8))) -svint8_t sveor3(svint8_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s32))) -svint32_t sveor3(svint32_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s64))) -svint64_t sveor3(svint64_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s16))) -svint16_t sveor3(svint16_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u8))) -svuint8_t sveorbt(svuint8_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u32))) -svuint32_t sveorbt(svuint32_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u64))) -svuint64_t sveorbt(svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u16))) -svuint16_t sveorbt(svuint16_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s8))) -svint8_t sveorbt(svint8_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s32))) -svint32_t sveorbt(svint32_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s64))) -svint64_t sveorbt(svint64_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s16))) -svint16_t sveorbt(svint16_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u8))) -svuint8_t sveorbt(svuint8_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u32))) -svuint32_t sveorbt(svuint32_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u64))) -svuint64_t sveorbt(svuint64_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u16))) -svuint16_t sveorbt(svuint16_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s8))) -svint8_t sveorbt(svint8_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s32))) -svint32_t sveorbt(svint32_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s64))) -svint64_t sveorbt(svint64_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s16))) -svint16_t sveorbt(svint16_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u8))) -svuint8_t sveortb(svuint8_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u32))) -svuint32_t sveortb(svuint32_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u64))) -svuint64_t sveortb(svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u16))) -svuint16_t sveortb(svuint16_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s8))) -svint8_t sveortb(svint8_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s32))) -svint32_t sveortb(svint32_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s64))) -svint64_t sveortb(svint64_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s16))) -svint16_t sveortb(svint16_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u8))) -svuint8_t sveortb(svuint8_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u32))) -svuint32_t sveortb(svuint32_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u64))) -svuint64_t sveortb(svuint64_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u16))) -svuint16_t sveortb(svuint16_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s8))) -svint8_t sveortb(svint8_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s32))) -svint32_t sveortb(svint32_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s64))) -svint64_t sveortb(svint64_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s16))) -svint16_t sveortb(svint16_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_m))) -svint8_t svhadd_m(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_m))) -svint32_t svhadd_m(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_m))) -svint64_t svhadd_m(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_m))) -svint16_t svhadd_m(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_x))) -svint8_t svhadd_x(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_x))) -svint32_t svhadd_x(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_x))) -svint64_t svhadd_x(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_x))) -svint16_t svhadd_x(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_z))) -svint8_t svhadd_z(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_z))) -svint32_t svhadd_z(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_z))) -svint64_t svhadd_z(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_z))) -svint16_t svhadd_z(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_m))) -svuint8_t svhadd_m(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_m))) -svuint32_t svhadd_m(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_m))) -svuint64_t svhadd_m(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_m))) -svuint16_t svhadd_m(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_x))) -svuint8_t svhadd_x(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_x))) -svuint32_t svhadd_x(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_x))) -svuint64_t svhadd_x(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_x))) -svuint16_t svhadd_x(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_z))) -svuint8_t svhadd_z(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_z))) -svuint32_t svhadd_z(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_z))) -svuint64_t svhadd_z(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_z))) -svuint16_t svhadd_z(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_m))) -svint8_t svhadd_m(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_m))) -svint32_t svhadd_m(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_m))) -svint64_t svhadd_m(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_m))) -svint16_t svhadd_m(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_x))) -svint8_t svhadd_x(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_x))) -svint32_t svhadd_x(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_x))) -svint64_t svhadd_x(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_x))) -svint16_t svhadd_x(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_z))) -svint8_t svhadd_z(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_z))) -svint32_t svhadd_z(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_z))) -svint64_t svhadd_z(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_z))) -svint16_t svhadd_z(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_m))) -svuint8_t svhadd_m(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_m))) -svuint32_t svhadd_m(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_m))) -svuint64_t svhadd_m(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_m))) -svuint16_t svhadd_m(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_x))) -svuint8_t svhadd_x(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_x))) -svuint32_t svhadd_x(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_x))) -svuint64_t svhadd_x(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_x))) -svuint16_t svhadd_x(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_z))) -svuint8_t svhadd_z(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_z))) -svuint32_t svhadd_z(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_z))) -svuint64_t svhadd_z(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_z))) -svuint16_t svhadd_z(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_u32_z))) -svuint32_t svhistcnt_z(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_u64_z))) -svuint64_t svhistcnt_z(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_s32_z))) -svuint32_t svhistcnt_z(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_s64_z))) -svuint64_t svhistcnt_z(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistseg_u8))) -svuint8_t svhistseg(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistseg_s8))) -svuint8_t svhistseg(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_m))) -svint8_t svhsub_m(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_m))) -svint32_t svhsub_m(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_m))) -svint64_t svhsub_m(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_m))) -svint16_t svhsub_m(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_x))) -svint8_t svhsub_x(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_x))) -svint32_t svhsub_x(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_x))) -svint64_t svhsub_x(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_x))) -svint16_t svhsub_x(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_z))) -svint8_t svhsub_z(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_z))) -svint32_t svhsub_z(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_z))) -svint64_t svhsub_z(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_z))) -svint16_t svhsub_z(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_m))) -svuint8_t svhsub_m(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_m))) -svuint32_t svhsub_m(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_m))) -svuint64_t svhsub_m(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_m))) -svuint16_t svhsub_m(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_x))) -svuint8_t svhsub_x(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_x))) -svuint32_t svhsub_x(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_x))) -svuint64_t svhsub_x(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_x))) -svuint16_t svhsub_x(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_z))) -svuint8_t svhsub_z(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_z))) -svuint32_t svhsub_z(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_z))) -svuint64_t svhsub_z(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_z))) -svuint16_t svhsub_z(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_m))) -svint8_t svhsub_m(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_m))) -svint32_t svhsub_m(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_m))) -svint64_t svhsub_m(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_m))) -svint16_t svhsub_m(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_x))) -svint8_t svhsub_x(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_x))) -svint32_t svhsub_x(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_x))) -svint64_t svhsub_x(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_x))) -svint16_t svhsub_x(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_z))) -svint8_t svhsub_z(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_z))) -svint32_t svhsub_z(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_z))) -svint64_t svhsub_z(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_z))) -svint16_t svhsub_z(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_m))) -svuint8_t svhsub_m(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_m))) -svuint32_t svhsub_m(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_m))) -svuint64_t svhsub_m(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_m))) -svuint16_t svhsub_m(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_x))) -svuint8_t svhsub_x(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_x))) -svuint32_t svhsub_x(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_x))) -svuint64_t svhsub_x(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_x))) -svuint16_t svhsub_x(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_z))) -svuint8_t svhsub_z(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_z))) -svuint32_t svhsub_z(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_z))) -svuint64_t svhsub_z(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_z))) -svuint16_t svhsub_z(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_m))) -svint8_t svhsubr_m(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_m))) -svint32_t svhsubr_m(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_m))) -svint64_t svhsubr_m(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_m))) -svint16_t svhsubr_m(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_x))) -svint8_t svhsubr_x(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_x))) -svint32_t svhsubr_x(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_x))) -svint64_t svhsubr_x(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_x))) -svint16_t svhsubr_x(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_z))) -svint8_t svhsubr_z(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_z))) -svint32_t svhsubr_z(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_z))) -svint64_t svhsubr_z(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_z))) -svint16_t svhsubr_z(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_m))) -svuint8_t svhsubr_m(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_m))) -svuint32_t svhsubr_m(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_m))) -svuint64_t svhsubr_m(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_m))) -svuint16_t svhsubr_m(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_x))) -svuint8_t svhsubr_x(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_x))) -svuint32_t svhsubr_x(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_x))) -svuint64_t svhsubr_x(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_x))) -svuint16_t svhsubr_x(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_z))) -svuint8_t svhsubr_z(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_z))) -svuint32_t svhsubr_z(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_z))) -svuint64_t svhsubr_z(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_z))) -svuint16_t svhsubr_z(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_m))) -svint8_t svhsubr_m(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_m))) -svint32_t svhsubr_m(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_m))) -svint64_t svhsubr_m(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_m))) -svint16_t svhsubr_m(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_x))) -svint8_t svhsubr_x(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_x))) -svint32_t svhsubr_x(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_x))) -svint64_t svhsubr_x(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_x))) -svint16_t svhsubr_x(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_z))) -svint8_t svhsubr_z(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_z))) -svint32_t svhsubr_z(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_z))) -svint64_t svhsubr_z(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_z))) -svint16_t svhsubr_z(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_m))) -svuint8_t svhsubr_m(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_m))) -svuint32_t svhsubr_m(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_m))) -svuint64_t svhsubr_m(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_m))) -svuint16_t svhsubr_m(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_x))) -svuint8_t svhsubr_x(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_x))) -svuint32_t svhsubr_x(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_x))) -svuint64_t svhsubr_x(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_x))) -svuint16_t svhsubr_x(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_z))) -svuint8_t svhsubr_z(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_z))) -svuint32_t svhsubr_z(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_z))) -svuint64_t svhsubr_z(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_z))) -svuint16_t svhsubr_z(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_u32))) -svuint32_t svldnt1_gather_index_u32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_u64))) -svuint64_t svldnt1_gather_index_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_f64))) -svfloat64_t svldnt1_gather_index_f64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_f32))) -svfloat32_t svldnt1_gather_index_f32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_s32))) -svint32_t svldnt1_gather_index_s32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_s64))) -svint64_t svldnt1_gather_index_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_u32))) -svuint32_t svldnt1_gather_offset_u32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_u64))) -svuint64_t svldnt1_gather_offset_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_f64))) -svfloat64_t svldnt1_gather_offset_f64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_f32))) -svfloat32_t svldnt1_gather_offset_f32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_s32))) -svint32_t svldnt1_gather_offset_s32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_s64))) -svint64_t svldnt1_gather_offset_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_u32))) -svuint32_t svldnt1_gather_u32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_u64))) -svuint64_t svldnt1_gather_u64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_f64))) -svfloat64_t svldnt1_gather_f64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_f32))) -svfloat32_t svldnt1_gather_f32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_s32))) -svint32_t svldnt1_gather_s32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_s64))) -svint64_t svldnt1_gather_s64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_u64))) -svuint64_t svldnt1_gather_index(svbool_t, uint64_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_f64))) -svfloat64_t svldnt1_gather_index(svbool_t, float64_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_s64))) -svint64_t svldnt1_gather_index(svbool_t, int64_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_u64))) -svuint64_t svldnt1_gather_index(svbool_t, uint64_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_f64))) -svfloat64_t svldnt1_gather_index(svbool_t, float64_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_s64))) -svint64_t svldnt1_gather_index(svbool_t, int64_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_u32))) -svuint32_t svldnt1_gather_offset(svbool_t, uint32_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_f32))) -svfloat32_t svldnt1_gather_offset(svbool_t, float32_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_s32))) -svint32_t svldnt1_gather_offset(svbool_t, int32_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_u64))) -svuint64_t svldnt1_gather_offset(svbool_t, uint64_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_f64))) -svfloat64_t svldnt1_gather_offset(svbool_t, float64_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_s64))) -svint64_t svldnt1_gather_offset(svbool_t, int64_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_u64))) -svuint64_t svldnt1_gather_offset(svbool_t, uint64_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_f64))) -svfloat64_t svldnt1_gather_offset(svbool_t, float64_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_s64))) -svint64_t svldnt1_gather_offset(svbool_t, int64_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_offset_u32))) -svuint32_t svldnt1sb_gather_offset_u32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_offset_u64))) -svuint64_t svldnt1sb_gather_offset_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_offset_s32))) -svint32_t svldnt1sb_gather_offset_s32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_offset_s64))) -svint64_t svldnt1sb_gather_offset_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_u32))) -svuint32_t svldnt1sb_gather_u32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_u64))) -svuint64_t svldnt1sb_gather_u64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_s32))) -svint32_t svldnt1sb_gather_s32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_s64))) -svint64_t svldnt1sb_gather_s64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32offset_u32))) -svuint32_t svldnt1sb_gather_offset_u32(svbool_t, int8_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32offset_s32))) -svint32_t svldnt1sb_gather_offset_s32(svbool_t, int8_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_s64offset_u64))) -svuint64_t svldnt1sb_gather_offset_u64(svbool_t, int8_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_s64offset_s64))) -svint64_t svldnt1sb_gather_offset_s64(svbool_t, int8_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64offset_u64))) -svuint64_t svldnt1sb_gather_offset_u64(svbool_t, int8_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64offset_s64))) -svint64_t svldnt1sb_gather_offset_s64(svbool_t, int8_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_index_u32))) -svuint32_t svldnt1sh_gather_index_u32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_index_u64))) -svuint64_t svldnt1sh_gather_index_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_index_s32))) -svint32_t svldnt1sh_gather_index_s32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_index_s64))) -svint64_t svldnt1sh_gather_index_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_offset_u32))) -svuint32_t svldnt1sh_gather_offset_u32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_offset_u64))) -svuint64_t svldnt1sh_gather_offset_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_offset_s32))) -svint32_t svldnt1sh_gather_offset_s32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_offset_s64))) -svint64_t svldnt1sh_gather_offset_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_u32))) -svuint32_t svldnt1sh_gather_u32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_u64))) -svuint64_t svldnt1sh_gather_u64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_s32))) -svint32_t svldnt1sh_gather_s32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_s64))) -svint64_t svldnt1sh_gather_s64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64index_u64))) -svuint64_t svldnt1sh_gather_index_u64(svbool_t, int16_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64index_s64))) -svint64_t svldnt1sh_gather_index_s64(svbool_t, int16_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64index_u64))) -svuint64_t svldnt1sh_gather_index_u64(svbool_t, int16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64index_s64))) -svint64_t svldnt1sh_gather_index_s64(svbool_t, int16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32offset_u32))) -svuint32_t svldnt1sh_gather_offset_u32(svbool_t, int16_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32offset_s32))) -svint32_t svldnt1sh_gather_offset_s32(svbool_t, int16_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64offset_u64))) -svuint64_t svldnt1sh_gather_offset_u64(svbool_t, int16_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64offset_s64))) -svint64_t svldnt1sh_gather_offset_s64(svbool_t, int16_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64offset_u64))) -svuint64_t svldnt1sh_gather_offset_u64(svbool_t, int16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64offset_s64))) -svint64_t svldnt1sh_gather_offset_s64(svbool_t, int16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_index_u64))) -svuint64_t svldnt1sw_gather_index_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_index_s64))) -svint64_t svldnt1sw_gather_index_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_offset_u64))) -svuint64_t svldnt1sw_gather_offset_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_offset_s64))) -svint64_t svldnt1sw_gather_offset_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_u64))) -svuint64_t svldnt1sw_gather_u64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_s64))) -svint64_t svldnt1sw_gather_s64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64index_u64))) -svuint64_t svldnt1sw_gather_index_u64(svbool_t, int32_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64index_s64))) -svint64_t svldnt1sw_gather_index_s64(svbool_t, int32_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64index_u64))) -svuint64_t svldnt1sw_gather_index_u64(svbool_t, int32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64index_s64))) -svint64_t svldnt1sw_gather_index_s64(svbool_t, int32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64offset_u64))) -svuint64_t svldnt1sw_gather_offset_u64(svbool_t, int32_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64offset_s64))) -svint64_t svldnt1sw_gather_offset_s64(svbool_t, int32_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64offset_u64))) -svuint64_t svldnt1sw_gather_offset_u64(svbool_t, int32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64offset_s64))) -svint64_t svldnt1sw_gather_offset_s64(svbool_t, int32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_offset_u32))) -svuint32_t svldnt1ub_gather_offset_u32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_offset_u64))) -svuint64_t svldnt1ub_gather_offset_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_offset_s32))) -svint32_t svldnt1ub_gather_offset_s32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_offset_s64))) -svint64_t svldnt1ub_gather_offset_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_u32))) -svuint32_t svldnt1ub_gather_u32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_u64))) -svuint64_t svldnt1ub_gather_u64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_s32))) -svint32_t svldnt1ub_gather_s32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_s64))) -svint64_t svldnt1ub_gather_s64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32offset_u32))) -svuint32_t svldnt1ub_gather_offset_u32(svbool_t, uint8_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32offset_s32))) -svint32_t svldnt1ub_gather_offset_s32(svbool_t, uint8_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_s64offset_u64))) -svuint64_t svldnt1ub_gather_offset_u64(svbool_t, uint8_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_s64offset_s64))) -svint64_t svldnt1ub_gather_offset_s64(svbool_t, uint8_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64offset_u64))) -svuint64_t svldnt1ub_gather_offset_u64(svbool_t, uint8_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64offset_s64))) -svint64_t svldnt1ub_gather_offset_s64(svbool_t, uint8_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_index_u32))) -svuint32_t svldnt1uh_gather_index_u32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_index_u64))) -svuint64_t svldnt1uh_gather_index_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_index_s32))) -svint32_t svldnt1uh_gather_index_s32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_index_s64))) -svint64_t svldnt1uh_gather_index_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_offset_u32))) -svuint32_t svldnt1uh_gather_offset_u32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_offset_u64))) -svuint64_t svldnt1uh_gather_offset_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_offset_s32))) -svint32_t svldnt1uh_gather_offset_s32(svbool_t, svuint32_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_offset_s64))) -svint64_t svldnt1uh_gather_offset_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_u32))) -svuint32_t svldnt1uh_gather_u32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_u64))) -svuint64_t svldnt1uh_gather_u64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_s32))) -svint32_t svldnt1uh_gather_s32(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_s64))) -svint64_t svldnt1uh_gather_s64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64index_u64))) -svuint64_t svldnt1uh_gather_index_u64(svbool_t, uint16_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64index_s64))) -svint64_t svldnt1uh_gather_index_s64(svbool_t, uint16_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64index_u64))) -svuint64_t svldnt1uh_gather_index_u64(svbool_t, uint16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64index_s64))) -svint64_t svldnt1uh_gather_index_s64(svbool_t, uint16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32offset_u32))) -svuint32_t svldnt1uh_gather_offset_u32(svbool_t, uint16_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32offset_s32))) -svint32_t svldnt1uh_gather_offset_s32(svbool_t, uint16_t const *, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64offset_u64))) -svuint64_t svldnt1uh_gather_offset_u64(svbool_t, uint16_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64offset_s64))) -svint64_t svldnt1uh_gather_offset_s64(svbool_t, uint16_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64offset_u64))) -svuint64_t svldnt1uh_gather_offset_u64(svbool_t, uint16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64offset_s64))) -svint64_t svldnt1uh_gather_offset_s64(svbool_t, uint16_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_index_u64))) -svuint64_t svldnt1uw_gather_index_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_index_s64))) -svint64_t svldnt1uw_gather_index_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_offset_u64))) -svuint64_t svldnt1uw_gather_offset_u64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_offset_s64))) -svint64_t svldnt1uw_gather_offset_s64(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_u64))) -svuint64_t svldnt1uw_gather_u64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_s64))) -svint64_t svldnt1uw_gather_s64(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64index_u64))) -svuint64_t svldnt1uw_gather_index_u64(svbool_t, uint32_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64index_s64))) -svint64_t svldnt1uw_gather_index_s64(svbool_t, uint32_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64index_u64))) -svuint64_t svldnt1uw_gather_index_u64(svbool_t, uint32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64index_s64))) -svint64_t svldnt1uw_gather_index_s64(svbool_t, uint32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64offset_u64))) -svuint64_t svldnt1uw_gather_offset_u64(svbool_t, uint32_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64offset_s64))) -svint64_t svldnt1uw_gather_offset_s64(svbool_t, uint32_t const *, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64offset_u64))) -svuint64_t svldnt1uw_gather_offset_u64(svbool_t, uint32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64offset_s64))) -svint64_t svldnt1uw_gather_offset_s64(svbool_t, uint32_t const *, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_m))) -svint64_t svlogb_m(svint64_t, svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_m))) -svint32_t svlogb_m(svint32_t, svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_m))) -svint16_t svlogb_m(svint16_t, svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_x))) -svint64_t svlogb_x(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_x))) -svint32_t svlogb_x(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_x))) -svint16_t svlogb_x(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_z))) -svint64_t svlogb_z(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_z))) -svint32_t svlogb_z(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_z))) -svint16_t svlogb_z(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_u8))) -svbool_t svmatch(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_u16))) -svbool_t svmatch(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_s8))) -svbool_t svmatch(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_s16))) -svbool_t svmatch(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f64_m))) -svfloat64_t svmaxnmp_m(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f32_m))) -svfloat32_t svmaxnmp_m(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f16_m))) -svfloat16_t svmaxnmp_m(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f64_x))) -svfloat64_t svmaxnmp_x(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f32_x))) -svfloat32_t svmaxnmp_x(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f16_x))) -svfloat16_t svmaxnmp_x(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f64_m))) -svfloat64_t svmaxp_m(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f32_m))) -svfloat32_t svmaxp_m(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f16_m))) -svfloat16_t svmaxp_m(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f64_x))) -svfloat64_t svmaxp_x(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f32_x))) -svfloat32_t svmaxp_x(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f16_x))) -svfloat16_t svmaxp_x(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s8_m))) -svint8_t svmaxp_m(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s32_m))) -svint32_t svmaxp_m(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s64_m))) -svint64_t svmaxp_m(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s16_m))) -svint16_t svmaxp_m(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s8_x))) -svint8_t svmaxp_x(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s32_x))) -svint32_t svmaxp_x(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s64_x))) -svint64_t svmaxp_x(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s16_x))) -svint16_t svmaxp_x(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u8_m))) -svuint8_t svmaxp_m(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u32_m))) -svuint32_t svmaxp_m(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u64_m))) -svuint64_t svmaxp_m(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u16_m))) -svuint16_t svmaxp_m(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u8_x))) -svuint8_t svmaxp_x(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u32_x))) -svuint32_t svmaxp_x(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u64_x))) -svuint64_t svmaxp_x(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u16_x))) -svuint16_t svmaxp_x(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f64_m))) -svfloat64_t svminnmp_m(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f32_m))) -svfloat32_t svminnmp_m(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f16_m))) -svfloat16_t svminnmp_m(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f64_x))) -svfloat64_t svminnmp_x(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f32_x))) -svfloat32_t svminnmp_x(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f16_x))) -svfloat16_t svminnmp_x(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f64_m))) -svfloat64_t svminp_m(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f32_m))) -svfloat32_t svminp_m(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f16_m))) -svfloat16_t svminp_m(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f64_x))) -svfloat64_t svminp_x(svbool_t, svfloat64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f32_x))) -svfloat32_t svminp_x(svbool_t, svfloat32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f16_x))) -svfloat16_t svminp_x(svbool_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s8_m))) -svint8_t svminp_m(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s32_m))) -svint32_t svminp_m(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s64_m))) -svint64_t svminp_m(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s16_m))) -svint16_t svminp_m(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s8_x))) -svint8_t svminp_x(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s32_x))) -svint32_t svminp_x(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s64_x))) -svint64_t svminp_x(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s16_x))) -svint16_t svminp_x(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u8_m))) -svuint8_t svminp_m(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u32_m))) -svuint32_t svminp_m(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u64_m))) -svuint64_t svminp_m(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u16_m))) -svuint16_t svminp_m(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u8_x))) -svuint8_t svminp_x(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u32_x))) -svuint32_t svminp_x(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u64_x))) -svuint64_t svminp_x(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u16_x))) -svuint16_t svminp_x(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u32))) -svuint32_t svmla_lane(svuint32_t, svuint32_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u64))) -svuint64_t svmla_lane(svuint64_t, svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u16))) -svuint16_t svmla_lane(svuint16_t, svuint16_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s32))) -svint32_t svmla_lane(svint32_t, svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s64))) -svint64_t svmla_lane(svint64_t, svint64_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s16))) -svint16_t svmla_lane(svint16_t, svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_f32))) -svfloat32_t svmlalb(svfloat32_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s32))) -svint32_t svmlalb(svint32_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s64))) -svint64_t svmlalb(svint64_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s16))) -svint16_t svmlalb(svint16_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u32))) -svuint32_t svmlalb(svuint32_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u64))) -svuint64_t svmlalb(svuint64_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u16))) -svuint16_t svmlalb(svuint16_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_f32))) -svfloat32_t svmlalb(svfloat32_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s32))) -svint32_t svmlalb(svint32_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s64))) -svint64_t svmlalb(svint64_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s16))) -svint16_t svmlalb(svint16_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u32))) -svuint32_t svmlalb(svuint32_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u64))) -svuint64_t svmlalb(svuint64_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u16))) -svuint16_t svmlalb(svuint16_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_f32))) -svfloat32_t svmlalb_lane(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_s32))) -svint32_t svmlalb_lane(svint32_t, svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_s64))) -svint64_t svmlalb_lane(svint64_t, svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_u32))) -svuint32_t svmlalb_lane(svuint32_t, svuint16_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_u64))) -svuint64_t svmlalb_lane(svuint64_t, svuint32_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_f32))) -svfloat32_t svmlalt(svfloat32_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s32))) -svint32_t svmlalt(svint32_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s64))) -svint64_t svmlalt(svint64_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s16))) -svint16_t svmlalt(svint16_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u32))) -svuint32_t svmlalt(svuint32_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u64))) -svuint64_t svmlalt(svuint64_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u16))) -svuint16_t svmlalt(svuint16_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_f32))) -svfloat32_t svmlalt(svfloat32_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s32))) -svint32_t svmlalt(svint32_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s64))) -svint64_t svmlalt(svint64_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s16))) -svint16_t svmlalt(svint16_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u32))) -svuint32_t svmlalt(svuint32_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u64))) -svuint64_t svmlalt(svuint64_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u16))) -svuint16_t svmlalt(svuint16_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_f32))) -svfloat32_t svmlalt_lane(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_s32))) -svint32_t svmlalt_lane(svint32_t, svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_s64))) -svint64_t svmlalt_lane(svint64_t, svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_u32))) -svuint32_t svmlalt_lane(svuint32_t, svuint16_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_u64))) -svuint64_t svmlalt_lane(svuint64_t, svuint32_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u32))) -svuint32_t svmls_lane(svuint32_t, svuint32_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u64))) -svuint64_t svmls_lane(svuint64_t, svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u16))) -svuint16_t svmls_lane(svuint16_t, svuint16_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s32))) -svint32_t svmls_lane(svint32_t, svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s64))) -svint64_t svmls_lane(svint64_t, svint64_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s16))) -svint16_t svmls_lane(svint16_t, svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_f32))) -svfloat32_t svmlslb(svfloat32_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s32))) -svint32_t svmlslb(svint32_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s64))) -svint64_t svmlslb(svint64_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s16))) -svint16_t svmlslb(svint16_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u32))) -svuint32_t svmlslb(svuint32_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u64))) -svuint64_t svmlslb(svuint64_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u16))) -svuint16_t svmlslb(svuint16_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_f32))) -svfloat32_t svmlslb(svfloat32_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s32))) -svint32_t svmlslb(svint32_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s64))) -svint64_t svmlslb(svint64_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s16))) -svint16_t svmlslb(svint16_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u32))) -svuint32_t svmlslb(svuint32_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u64))) -svuint64_t svmlslb(svuint64_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u16))) -svuint16_t svmlslb(svuint16_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_f32))) -svfloat32_t svmlslb_lane(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_s32))) -svint32_t svmlslb_lane(svint32_t, svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_s64))) -svint64_t svmlslb_lane(svint64_t, svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_u32))) -svuint32_t svmlslb_lane(svuint32_t, svuint16_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_u64))) -svuint64_t svmlslb_lane(svuint64_t, svuint32_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_f32))) -svfloat32_t svmlslt(svfloat32_t, svfloat16_t, float16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s32))) -svint32_t svmlslt(svint32_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s64))) -svint64_t svmlslt(svint64_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s16))) -svint16_t svmlslt(svint16_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u32))) -svuint32_t svmlslt(svuint32_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u64))) -svuint64_t svmlslt(svuint64_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u16))) -svuint16_t svmlslt(svuint16_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_f32))) -svfloat32_t svmlslt(svfloat32_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s32))) -svint32_t svmlslt(svint32_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s64))) -svint64_t svmlslt(svint64_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s16))) -svint16_t svmlslt(svint16_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u32))) -svuint32_t svmlslt(svuint32_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u64))) -svuint64_t svmlslt(svuint64_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u16))) -svuint16_t svmlslt(svuint16_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_f32))) -svfloat32_t svmlslt_lane(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_s32))) -svint32_t svmlslt_lane(svint32_t, svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_s64))) -svint64_t svmlslt_lane(svint64_t, svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_u32))) -svuint32_t svmlslt_lane(svuint32_t, svuint16_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_u64))) -svuint64_t svmlslt_lane(svuint64_t, svuint32_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s32))) -svint32_t svmovlb(svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s64))) -svint64_t svmovlb(svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s16))) -svint16_t svmovlb(svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u32))) -svuint32_t svmovlb(svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u64))) -svuint64_t svmovlb(svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u16))) -svuint16_t svmovlb(svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s32))) -svint32_t svmovlt(svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s64))) -svint64_t svmovlt(svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s16))) -svint16_t svmovlt(svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u32))) -svuint32_t svmovlt(svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u64))) -svuint64_t svmovlt(svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u16))) -svuint16_t svmovlt(svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u32))) -svuint32_t svmul_lane(svuint32_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u64))) -svuint64_t svmul_lane(svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u16))) -svuint16_t svmul_lane(svuint16_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s32))) -svint32_t svmul_lane(svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s64))) -svint64_t svmul_lane(svint64_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s16))) -svint16_t svmul_lane(svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s32))) -svint32_t svmullb(svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s64))) -svint64_t svmullb(svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s16))) -svint16_t svmullb(svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u32))) -svuint32_t svmullb(svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u64))) -svuint64_t svmullb(svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u16))) -svuint16_t svmullb(svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s32))) -svint32_t svmullb(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s64))) -svint64_t svmullb(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s16))) -svint16_t svmullb(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u32))) -svuint32_t svmullb(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u64))) -svuint64_t svmullb(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u16))) -svuint16_t svmullb(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_s32))) -svint32_t svmullb_lane(svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_s64))) -svint64_t svmullb_lane(svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_u32))) -svuint32_t svmullb_lane(svuint16_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_u64))) -svuint64_t svmullb_lane(svuint32_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s32))) -svint32_t svmullt(svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s64))) -svint64_t svmullt(svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s16))) -svint16_t svmullt(svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u32))) -svuint32_t svmullt(svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u64))) -svuint64_t svmullt(svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u16))) -svuint16_t svmullt(svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s32))) -svint32_t svmullt(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s64))) -svint64_t svmullt(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s16))) -svint16_t svmullt(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u32))) -svuint32_t svmullt(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u64))) -svuint64_t svmullt(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u16))) -svuint16_t svmullt(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_s32))) -svint32_t svmullt_lane(svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_s64))) -svint64_t svmullt_lane(svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_u32))) -svuint32_t svmullt_lane(svuint16_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_u64))) -svuint64_t svmullt_lane(svuint32_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u8))) -svuint8_t svnbsl(svuint8_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u32))) -svuint32_t svnbsl(svuint32_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u64))) -svuint64_t svnbsl(svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u16))) -svuint16_t svnbsl(svuint16_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s8))) -svint8_t svnbsl(svint8_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s32))) -svint32_t svnbsl(svint32_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s64))) -svint64_t svnbsl(svint64_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s16))) -svint16_t svnbsl(svint16_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u8))) -svuint8_t svnbsl(svuint8_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u32))) -svuint32_t svnbsl(svuint32_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u64))) -svuint64_t svnbsl(svuint64_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u16))) -svuint16_t svnbsl(svuint16_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s8))) -svint8_t svnbsl(svint8_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s32))) -svint32_t svnbsl(svint32_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s64))) -svint64_t svnbsl(svint64_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s16))) -svint16_t svnbsl(svint16_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_u8))) -svbool_t svnmatch(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_u16))) -svbool_t svnmatch(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_s8))) -svbool_t svnmatch(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_s16))) -svbool_t svnmatch(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmul_n_u8))) -svuint8_t svpmul(svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmul_u8))) -svuint8_t svpmul(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_n_u64))) -svuint64_t svpmullb(svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_n_u16))) -svuint16_t svpmullb(svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_u64))) -svuint64_t svpmullb(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_u16))) -svuint16_t svpmullb(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u8))) -svuint8_t svpmullb_pair(svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u32))) -svuint32_t svpmullb_pair(svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u8))) -svuint8_t svpmullb_pair(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u32))) -svuint32_t svpmullb_pair(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_n_u64))) -svuint64_t svpmullt(svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_n_u16))) -svuint16_t svpmullt(svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_u64))) -svuint64_t svpmullt(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_u16))) -svuint16_t svpmullt(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u8))) -svuint8_t svpmullt_pair(svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u32))) -svuint32_t svpmullt_pair(svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u8))) -svuint8_t svpmullt_pair(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u32))) -svuint32_t svpmullt_pair(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_m))) -svint8_t svqabs_m(svint8_t, svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_m))) -svint32_t svqabs_m(svint32_t, svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_m))) -svint64_t svqabs_m(svint64_t, svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_m))) -svint16_t svqabs_m(svint16_t, svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_x))) -svint8_t svqabs_x(svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_x))) -svint32_t svqabs_x(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_x))) -svint64_t svqabs_x(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_x))) -svint16_t svqabs_x(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_z))) -svint8_t svqabs_z(svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_z))) -svint32_t svqabs_z(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_z))) -svint64_t svqabs_z(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_z))) -svint16_t svqabs_z(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_m))) -svint8_t svqadd_m(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_m))) -svint32_t svqadd_m(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_m))) -svint64_t svqadd_m(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_m))) -svint16_t svqadd_m(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_x))) -svint8_t svqadd_x(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_x))) -svint32_t svqadd_x(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_x))) -svint64_t svqadd_x(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_x))) -svint16_t svqadd_x(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_z))) -svint8_t svqadd_z(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_z))) -svint32_t svqadd_z(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_z))) -svint64_t svqadd_z(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_z))) -svint16_t svqadd_z(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_m))) -svuint8_t svqadd_m(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_m))) -svuint32_t svqadd_m(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_m))) -svuint64_t svqadd_m(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_m))) -svuint16_t svqadd_m(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_x))) -svuint8_t svqadd_x(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_x))) -svuint32_t svqadd_x(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_x))) -svuint64_t svqadd_x(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_x))) -svuint16_t svqadd_x(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_z))) -svuint8_t svqadd_z(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_z))) -svuint32_t svqadd_z(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_z))) -svuint64_t svqadd_z(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_z))) -svuint16_t svqadd_z(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_m))) -svint8_t svqadd_m(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_m))) -svint32_t svqadd_m(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_m))) -svint64_t svqadd_m(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_m))) -svint16_t svqadd_m(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_x))) -svint8_t svqadd_x(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_x))) -svint32_t svqadd_x(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_x))) -svint64_t svqadd_x(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_x))) -svint16_t svqadd_x(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_z))) -svint8_t svqadd_z(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_z))) -svint32_t svqadd_z(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_z))) -svint64_t svqadd_z(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_z))) -svint16_t svqadd_z(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_m))) -svuint8_t svqadd_m(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_m))) -svuint32_t svqadd_m(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_m))) -svuint64_t svqadd_m(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_m))) -svuint16_t svqadd_m(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_x))) -svuint8_t svqadd_x(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_x))) -svuint32_t svqadd_x(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_x))) -svuint64_t svqadd_x(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_x))) -svuint16_t svqadd_x(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_z))) -svuint8_t svqadd_z(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_z))) -svuint32_t svqadd_z(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_z))) -svuint64_t svqadd_z(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_z))) -svuint16_t svqadd_z(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s8))) -svint8_t svqcadd(svint8_t, svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s32))) -svint32_t svqcadd(svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s64))) -svint64_t svqcadd(svint64_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s16))) -svint16_t svqcadd(svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s32))) -svint32_t svqdmlalb(svint32_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s64))) -svint64_t svqdmlalb(svint64_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s16))) -svint16_t svqdmlalb(svint16_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s32))) -svint32_t svqdmlalb(svint32_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s64))) -svint64_t svqdmlalb(svint64_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s16))) -svint16_t svqdmlalb(svint16_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_lane_s32))) -svint32_t svqdmlalb_lane(svint32_t, svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_lane_s64))) -svint64_t svqdmlalb_lane(svint64_t, svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s32))) -svint32_t svqdmlalbt(svint32_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s64))) -svint64_t svqdmlalbt(svint64_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s16))) -svint16_t svqdmlalbt(svint16_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s32))) -svint32_t svqdmlalbt(svint32_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s64))) -svint64_t svqdmlalbt(svint64_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s16))) -svint16_t svqdmlalbt(svint16_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s32))) -svint32_t svqdmlalt(svint32_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s64))) -svint64_t svqdmlalt(svint64_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s16))) -svint16_t svqdmlalt(svint16_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s32))) -svint32_t svqdmlalt(svint32_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s64))) -svint64_t svqdmlalt(svint64_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s16))) -svint16_t svqdmlalt(svint16_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_lane_s32))) -svint32_t svqdmlalt_lane(svint32_t, svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_lane_s64))) -svint64_t svqdmlalt_lane(svint64_t, svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s32))) -svint32_t svqdmlslb(svint32_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s64))) -svint64_t svqdmlslb(svint64_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s16))) -svint16_t svqdmlslb(svint16_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s32))) -svint32_t svqdmlslb(svint32_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s64))) -svint64_t svqdmlslb(svint64_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s16))) -svint16_t svqdmlslb(svint16_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_lane_s32))) -svint32_t svqdmlslb_lane(svint32_t, svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_lane_s64))) -svint64_t svqdmlslb_lane(svint64_t, svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s32))) -svint32_t svqdmlslbt(svint32_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s64))) -svint64_t svqdmlslbt(svint64_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s16))) -svint16_t svqdmlslbt(svint16_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s32))) -svint32_t svqdmlslbt(svint32_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s64))) -svint64_t svqdmlslbt(svint64_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s16))) -svint16_t svqdmlslbt(svint16_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s32))) -svint32_t svqdmlslt(svint32_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s64))) -svint64_t svqdmlslt(svint64_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s16))) -svint16_t svqdmlslt(svint16_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s32))) -svint32_t svqdmlslt(svint32_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s64))) -svint64_t svqdmlslt(svint64_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s16))) -svint16_t svqdmlslt(svint16_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_lane_s32))) -svint32_t svqdmlslt_lane(svint32_t, svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_lane_s64))) -svint64_t svqdmlslt_lane(svint64_t, svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s8))) -svint8_t svqdmulh(svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s32))) -svint32_t svqdmulh(svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s64))) -svint64_t svqdmulh(svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s16))) -svint16_t svqdmulh(svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s8))) -svint8_t svqdmulh(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s32))) -svint32_t svqdmulh(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s64))) -svint64_t svqdmulh(svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s16))) -svint16_t svqdmulh(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s32))) -svint32_t svqdmulh_lane(svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s64))) -svint64_t svqdmulh_lane(svint64_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s16))) -svint16_t svqdmulh_lane(svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s32))) -svint32_t svqdmullb(svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s64))) -svint64_t svqdmullb(svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s16))) -svint16_t svqdmullb(svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s32))) -svint32_t svqdmullb(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s64))) -svint64_t svqdmullb(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s16))) -svint16_t svqdmullb(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_lane_s32))) -svint32_t svqdmullb_lane(svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_lane_s64))) -svint64_t svqdmullb_lane(svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s32))) -svint32_t svqdmullt(svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s64))) -svint64_t svqdmullt(svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s16))) -svint16_t svqdmullt(svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s32))) -svint32_t svqdmullt(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s64))) -svint64_t svqdmullt(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s16))) -svint16_t svqdmullt(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_lane_s32))) -svint32_t svqdmullt_lane(svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_lane_s64))) -svint64_t svqdmullt_lane(svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_m))) -svint8_t svqneg_m(svint8_t, svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_m))) -svint32_t svqneg_m(svint32_t, svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_m))) -svint64_t svqneg_m(svint64_t, svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_m))) -svint16_t svqneg_m(svint16_t, svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_x))) -svint8_t svqneg_x(svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_x))) -svint32_t svqneg_x(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_x))) -svint64_t svqneg_x(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_x))) -svint16_t svqneg_x(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_z))) -svint8_t svqneg_z(svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_z))) -svint32_t svqneg_z(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_z))) -svint64_t svqneg_z(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_z))) -svint16_t svqneg_z(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s8))) -svint8_t svqrdcmlah(svint8_t, svint8_t, svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s32))) -svint32_t svqrdcmlah(svint32_t, svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s64))) -svint64_t svqrdcmlah(svint64_t, svint64_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s16))) -svint16_t svqrdcmlah(svint16_t, svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_lane_s32))) -svint32_t svqrdcmlah_lane(svint32_t, svint32_t, svint32_t, uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_lane_s16))) -svint16_t svqrdcmlah_lane(svint16_t, svint16_t, svint16_t, uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s8))) -svint8_t svqrdmlah(svint8_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s32))) -svint32_t svqrdmlah(svint32_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s64))) -svint64_t svqrdmlah(svint64_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s16))) -svint16_t svqrdmlah(svint16_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s8))) -svint8_t svqrdmlah(svint8_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s32))) -svint32_t svqrdmlah(svint32_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s64))) -svint64_t svqrdmlah(svint64_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s16))) -svint16_t svqrdmlah(svint16_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s32))) -svint32_t svqrdmlah_lane(svint32_t, svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s64))) -svint64_t svqrdmlah_lane(svint64_t, svint64_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s16))) -svint16_t svqrdmlah_lane(svint16_t, svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s8))) -svint8_t svqrdmlsh(svint8_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s32))) -svint32_t svqrdmlsh(svint32_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s64))) -svint64_t svqrdmlsh(svint64_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s16))) -svint16_t svqrdmlsh(svint16_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s8))) -svint8_t svqrdmlsh(svint8_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s32))) -svint32_t svqrdmlsh(svint32_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s64))) -svint64_t svqrdmlsh(svint64_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s16))) -svint16_t svqrdmlsh(svint16_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s32))) -svint32_t svqrdmlsh_lane(svint32_t, svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s64))) -svint64_t svqrdmlsh_lane(svint64_t, svint64_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s16))) -svint16_t svqrdmlsh_lane(svint16_t, svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s8))) -svint8_t svqrdmulh(svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s32))) -svint32_t svqrdmulh(svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s64))) -svint64_t svqrdmulh(svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s16))) -svint16_t svqrdmulh(svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s8))) -svint8_t svqrdmulh(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s32))) -svint32_t svqrdmulh(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s64))) -svint64_t svqrdmulh(svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s16))) -svint16_t svqrdmulh(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s32))) -svint32_t svqrdmulh_lane(svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s64))) -svint64_t svqrdmulh_lane(svint64_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s16))) -svint16_t svqrdmulh_lane(svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_m))) -svint8_t svqrshl_m(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_m))) -svint32_t svqrshl_m(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_m))) -svint64_t svqrshl_m(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_m))) -svint16_t svqrshl_m(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_x))) -svint8_t svqrshl_x(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_x))) -svint32_t svqrshl_x(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_x))) -svint64_t svqrshl_x(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_x))) -svint16_t svqrshl_x(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_z))) -svint8_t svqrshl_z(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_z))) -svint32_t svqrshl_z(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_z))) -svint64_t svqrshl_z(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_z))) -svint16_t svqrshl_z(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_m))) -svuint8_t svqrshl_m(svbool_t, svuint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_m))) -svuint32_t svqrshl_m(svbool_t, svuint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_m))) -svuint64_t svqrshl_m(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_m))) -svuint16_t svqrshl_m(svbool_t, svuint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_x))) -svuint8_t svqrshl_x(svbool_t, svuint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_x))) -svuint32_t svqrshl_x(svbool_t, svuint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_x))) -svuint64_t svqrshl_x(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_x))) -svuint16_t svqrshl_x(svbool_t, svuint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_z))) -svuint8_t svqrshl_z(svbool_t, svuint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_z))) -svuint32_t svqrshl_z(svbool_t, svuint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_z))) -svuint64_t svqrshl_z(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_z))) -svuint16_t svqrshl_z(svbool_t, svuint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_m))) -svint8_t svqrshl_m(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_m))) -svint32_t svqrshl_m(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_m))) -svint64_t svqrshl_m(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_m))) -svint16_t svqrshl_m(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_x))) -svint8_t svqrshl_x(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_x))) -svint32_t svqrshl_x(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_x))) -svint64_t svqrshl_x(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_x))) -svint16_t svqrshl_x(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_z))) -svint8_t svqrshl_z(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_z))) -svint32_t svqrshl_z(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_z))) -svint64_t svqrshl_z(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_z))) -svint16_t svqrshl_z(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_m))) -svuint8_t svqrshl_m(svbool_t, svuint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_m))) -svuint32_t svqrshl_m(svbool_t, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_m))) -svuint64_t svqrshl_m(svbool_t, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_m))) -svuint16_t svqrshl_m(svbool_t, svuint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_x))) -svuint8_t svqrshl_x(svbool_t, svuint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_x))) -svuint32_t svqrshl_x(svbool_t, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_x))) -svuint64_t svqrshl_x(svbool_t, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_x))) -svuint16_t svqrshl_x(svbool_t, svuint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_z))) -svuint8_t svqrshl_z(svbool_t, svuint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_z))) -svuint32_t svqrshl_z(svbool_t, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_z))) -svuint64_t svqrshl_z(svbool_t, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_z))) -svuint16_t svqrshl_z(svbool_t, svuint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s32))) -svint16_t svqrshrnb(svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s64))) -svint32_t svqrshrnb(svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s16))) -svint8_t svqrshrnb(svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u32))) -svuint16_t svqrshrnb(svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u64))) -svuint32_t svqrshrnb(svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u16))) -svuint8_t svqrshrnb(svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s32))) -svint16_t svqrshrnt(svint16_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s64))) -svint32_t svqrshrnt(svint32_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s16))) -svint8_t svqrshrnt(svint8_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u32))) -svuint16_t svqrshrnt(svuint16_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u64))) -svuint32_t svqrshrnt(svuint32_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u16))) -svuint8_t svqrshrnt(svuint8_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s32))) -svuint16_t svqrshrunb(svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s64))) -svuint32_t svqrshrunb(svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s16))) -svuint8_t svqrshrunb(svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s32))) -svuint16_t svqrshrunt(svuint16_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s64))) -svuint32_t svqrshrunt(svuint32_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s16))) -svuint8_t svqrshrunt(svuint8_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_m))) -svint8_t svqshl_m(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_m))) -svint32_t svqshl_m(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_m))) -svint64_t svqshl_m(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_m))) -svint16_t svqshl_m(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_x))) -svint8_t svqshl_x(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_x))) -svint32_t svqshl_x(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_x))) -svint64_t svqshl_x(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_x))) -svint16_t svqshl_x(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_z))) -svint8_t svqshl_z(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_z))) -svint32_t svqshl_z(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_z))) -svint64_t svqshl_z(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_z))) -svint16_t svqshl_z(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_m))) -svuint8_t svqshl_m(svbool_t, svuint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_m))) -svuint32_t svqshl_m(svbool_t, svuint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_m))) -svuint64_t svqshl_m(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_m))) -svuint16_t svqshl_m(svbool_t, svuint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_x))) -svuint8_t svqshl_x(svbool_t, svuint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_x))) -svuint32_t svqshl_x(svbool_t, svuint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_x))) -svuint64_t svqshl_x(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_x))) -svuint16_t svqshl_x(svbool_t, svuint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_z))) -svuint8_t svqshl_z(svbool_t, svuint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_z))) -svuint32_t svqshl_z(svbool_t, svuint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_z))) -svuint64_t svqshl_z(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_z))) -svuint16_t svqshl_z(svbool_t, svuint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_m))) -svint8_t svqshl_m(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_m))) -svint32_t svqshl_m(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_m))) -svint64_t svqshl_m(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_m))) -svint16_t svqshl_m(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_x))) -svint8_t svqshl_x(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_x))) -svint32_t svqshl_x(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_x))) -svint64_t svqshl_x(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_x))) -svint16_t svqshl_x(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_z))) -svint8_t svqshl_z(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_z))) -svint32_t svqshl_z(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_z))) -svint64_t svqshl_z(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_z))) -svint16_t svqshl_z(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_m))) -svuint8_t svqshl_m(svbool_t, svuint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_m))) -svuint32_t svqshl_m(svbool_t, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_m))) -svuint64_t svqshl_m(svbool_t, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_m))) -svuint16_t svqshl_m(svbool_t, svuint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_x))) -svuint8_t svqshl_x(svbool_t, svuint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_x))) -svuint32_t svqshl_x(svbool_t, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_x))) -svuint64_t svqshl_x(svbool_t, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_x))) -svuint16_t svqshl_x(svbool_t, svuint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_z))) -svuint8_t svqshl_z(svbool_t, svuint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_z))) -svuint32_t svqshl_z(svbool_t, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_z))) -svuint64_t svqshl_z(svbool_t, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_z))) -svuint16_t svqshl_z(svbool_t, svuint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_m))) -svuint8_t svqshlu_m(svbool_t, svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_m))) -svuint32_t svqshlu_m(svbool_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_m))) -svuint64_t svqshlu_m(svbool_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_m))) -svuint16_t svqshlu_m(svbool_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_x))) -svuint8_t svqshlu_x(svbool_t, svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_x))) -svuint32_t svqshlu_x(svbool_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_x))) -svuint64_t svqshlu_x(svbool_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_x))) -svuint16_t svqshlu_x(svbool_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_z))) -svuint8_t svqshlu_z(svbool_t, svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_z))) -svuint32_t svqshlu_z(svbool_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_z))) -svuint64_t svqshlu_z(svbool_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_z))) -svuint16_t svqshlu_z(svbool_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s32))) -svint16_t svqshrnb(svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s64))) -svint32_t svqshrnb(svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s16))) -svint8_t svqshrnb(svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u32))) -svuint16_t svqshrnb(svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u64))) -svuint32_t svqshrnb(svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u16))) -svuint8_t svqshrnb(svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s32))) -svint16_t svqshrnt(svint16_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s64))) -svint32_t svqshrnt(svint32_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s16))) -svint8_t svqshrnt(svint8_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u32))) -svuint16_t svqshrnt(svuint16_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u64))) -svuint32_t svqshrnt(svuint32_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u16))) -svuint8_t svqshrnt(svuint8_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s32))) -svuint16_t svqshrunb(svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s64))) -svuint32_t svqshrunb(svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s16))) -svuint8_t svqshrunb(svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s32))) -svuint16_t svqshrunt(svuint16_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s64))) -svuint32_t svqshrunt(svuint32_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s16))) -svuint8_t svqshrunt(svuint8_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_m))) -svint8_t svqsub_m(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_m))) -svint32_t svqsub_m(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_m))) -svint64_t svqsub_m(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_m))) -svint16_t svqsub_m(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_x))) -svint8_t svqsub_x(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_x))) -svint32_t svqsub_x(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_x))) -svint64_t svqsub_x(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_x))) -svint16_t svqsub_x(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_z))) -svint8_t svqsub_z(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_z))) -svint32_t svqsub_z(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_z))) -svint64_t svqsub_z(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_z))) -svint16_t svqsub_z(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_m))) -svuint8_t svqsub_m(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_m))) -svuint32_t svqsub_m(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_m))) -svuint64_t svqsub_m(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_m))) -svuint16_t svqsub_m(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_x))) -svuint8_t svqsub_x(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_x))) -svuint32_t svqsub_x(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_x))) -svuint64_t svqsub_x(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_x))) -svuint16_t svqsub_x(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_z))) -svuint8_t svqsub_z(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_z))) -svuint32_t svqsub_z(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_z))) -svuint64_t svqsub_z(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_z))) -svuint16_t svqsub_z(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_m))) -svint8_t svqsub_m(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_m))) -svint32_t svqsub_m(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_m))) -svint64_t svqsub_m(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_m))) -svint16_t svqsub_m(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_x))) -svint8_t svqsub_x(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_x))) -svint32_t svqsub_x(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_x))) -svint64_t svqsub_x(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_x))) -svint16_t svqsub_x(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_z))) -svint8_t svqsub_z(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_z))) -svint32_t svqsub_z(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_z))) -svint64_t svqsub_z(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_z))) -svint16_t svqsub_z(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_m))) -svuint8_t svqsub_m(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_m))) -svuint32_t svqsub_m(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_m))) -svuint64_t svqsub_m(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_m))) -svuint16_t svqsub_m(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_x))) -svuint8_t svqsub_x(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_x))) -svuint32_t svqsub_x(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_x))) -svuint64_t svqsub_x(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_x))) -svuint16_t svqsub_x(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_z))) -svuint8_t svqsub_z(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_z))) -svuint32_t svqsub_z(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_z))) -svuint64_t svqsub_z(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_z))) -svuint16_t svqsub_z(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_m))) -svint8_t svqsubr_m(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_m))) -svint32_t svqsubr_m(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_m))) -svint64_t svqsubr_m(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_m))) -svint16_t svqsubr_m(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_x))) -svint8_t svqsubr_x(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_x))) -svint32_t svqsubr_x(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_x))) -svint64_t svqsubr_x(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_x))) -svint16_t svqsubr_x(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_z))) -svint8_t svqsubr_z(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_z))) -svint32_t svqsubr_z(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_z))) -svint64_t svqsubr_z(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_z))) -svint16_t svqsubr_z(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_m))) -svuint8_t svqsubr_m(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_m))) -svuint32_t svqsubr_m(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_m))) -svuint64_t svqsubr_m(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_m))) -svuint16_t svqsubr_m(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_x))) -svuint8_t svqsubr_x(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_x))) -svuint32_t svqsubr_x(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_x))) -svuint64_t svqsubr_x(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_x))) -svuint16_t svqsubr_x(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_z))) -svuint8_t svqsubr_z(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_z))) -svuint32_t svqsubr_z(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_z))) -svuint64_t svqsubr_z(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_z))) -svuint16_t svqsubr_z(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_m))) -svint8_t svqsubr_m(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_m))) -svint32_t svqsubr_m(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_m))) -svint64_t svqsubr_m(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_m))) -svint16_t svqsubr_m(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_x))) -svint8_t svqsubr_x(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_x))) -svint32_t svqsubr_x(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_x))) -svint64_t svqsubr_x(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_x))) -svint16_t svqsubr_x(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_z))) -svint8_t svqsubr_z(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_z))) -svint32_t svqsubr_z(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_z))) -svint64_t svqsubr_z(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_z))) -svint16_t svqsubr_z(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_m))) -svuint8_t svqsubr_m(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_m))) -svuint32_t svqsubr_m(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_m))) -svuint64_t svqsubr_m(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_m))) -svuint16_t svqsubr_m(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_x))) -svuint8_t svqsubr_x(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_x))) -svuint32_t svqsubr_x(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_x))) -svuint64_t svqsubr_x(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_x))) -svuint16_t svqsubr_x(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_z))) -svuint8_t svqsubr_z(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_z))) -svuint32_t svqsubr_z(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_z))) -svuint64_t svqsubr_z(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_z))) -svuint16_t svqsubr_z(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s32))) -svint16_t svqxtnb(svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s64))) -svint32_t svqxtnb(svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s16))) -svint8_t svqxtnb(svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u32))) -svuint16_t svqxtnb(svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u64))) -svuint32_t svqxtnb(svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u16))) -svuint8_t svqxtnb(svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s32))) -svint16_t svqxtnt(svint16_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s64))) -svint32_t svqxtnt(svint32_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s16))) -svint8_t svqxtnt(svint8_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u32))) -svuint16_t svqxtnt(svuint16_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u64))) -svuint32_t svqxtnt(svuint32_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u16))) -svuint8_t svqxtnt(svuint8_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s32))) -svuint16_t svqxtunb(svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s64))) -svuint32_t svqxtunb(svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s16))) -svuint8_t svqxtunb(svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s32))) -svuint16_t svqxtunt(svuint16_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s64))) -svuint32_t svqxtunt(svuint32_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s16))) -svuint8_t svqxtunt(svuint8_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u32))) -svuint16_t svraddhnb(svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u64))) -svuint32_t svraddhnb(svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u16))) -svuint8_t svraddhnb(svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s32))) -svint16_t svraddhnb(svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s64))) -svint32_t svraddhnb(svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s16))) -svint8_t svraddhnb(svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u32))) -svuint16_t svraddhnb(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u64))) -svuint32_t svraddhnb(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u16))) -svuint8_t svraddhnb(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s32))) -svint16_t svraddhnb(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s64))) -svint32_t svraddhnb(svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s16))) -svint8_t svraddhnb(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u32))) -svuint16_t svraddhnt(svuint16_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u64))) -svuint32_t svraddhnt(svuint32_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u16))) -svuint8_t svraddhnt(svuint8_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s32))) -svint16_t svraddhnt(svint16_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s64))) -svint32_t svraddhnt(svint32_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s16))) -svint8_t svraddhnt(svint8_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u32))) -svuint16_t svraddhnt(svuint16_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u64))) -svuint32_t svraddhnt(svuint32_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u16))) -svuint8_t svraddhnt(svuint8_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s32))) -svint16_t svraddhnt(svint16_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s64))) -svint32_t svraddhnt(svint32_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s16))) -svint8_t svraddhnt(svint8_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_m))) -svuint32_t svrecpe_m(svuint32_t, svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_x))) -svuint32_t svrecpe_x(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_z))) -svuint32_t svrecpe_z(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_m))) -svint8_t svrhadd_m(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_m))) -svint32_t svrhadd_m(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_m))) -svint64_t svrhadd_m(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_m))) -svint16_t svrhadd_m(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_x))) -svint8_t svrhadd_x(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_x))) -svint32_t svrhadd_x(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_x))) -svint64_t svrhadd_x(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_x))) -svint16_t svrhadd_x(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_z))) -svint8_t svrhadd_z(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_z))) -svint32_t svrhadd_z(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_z))) -svint64_t svrhadd_z(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_z))) -svint16_t svrhadd_z(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_m))) -svuint8_t svrhadd_m(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_m))) -svuint32_t svrhadd_m(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_m))) -svuint64_t svrhadd_m(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_m))) -svuint16_t svrhadd_m(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_x))) -svuint8_t svrhadd_x(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_x))) -svuint32_t svrhadd_x(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_x))) -svuint64_t svrhadd_x(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_x))) -svuint16_t svrhadd_x(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_z))) -svuint8_t svrhadd_z(svbool_t, svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_z))) -svuint32_t svrhadd_z(svbool_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_z))) -svuint64_t svrhadd_z(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_z))) -svuint16_t svrhadd_z(svbool_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_m))) -svint8_t svrhadd_m(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_m))) -svint32_t svrhadd_m(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_m))) -svint64_t svrhadd_m(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_m))) -svint16_t svrhadd_m(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_x))) -svint8_t svrhadd_x(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_x))) -svint32_t svrhadd_x(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_x))) -svint64_t svrhadd_x(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_x))) -svint16_t svrhadd_x(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_z))) -svint8_t svrhadd_z(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_z))) -svint32_t svrhadd_z(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_z))) -svint64_t svrhadd_z(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_z))) -svint16_t svrhadd_z(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_m))) -svuint8_t svrhadd_m(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_m))) -svuint32_t svrhadd_m(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_m))) -svuint64_t svrhadd_m(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_m))) -svuint16_t svrhadd_m(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_x))) -svuint8_t svrhadd_x(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_x))) -svuint32_t svrhadd_x(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_x))) -svuint64_t svrhadd_x(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_x))) -svuint16_t svrhadd_x(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_z))) -svuint8_t svrhadd_z(svbool_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_z))) -svuint32_t svrhadd_z(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_z))) -svuint64_t svrhadd_z(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_z))) -svuint16_t svrhadd_z(svbool_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_m))) -svint8_t svrshl_m(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_m))) -svint32_t svrshl_m(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_m))) -svint64_t svrshl_m(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_m))) -svint16_t svrshl_m(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_x))) -svint8_t svrshl_x(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_x))) -svint32_t svrshl_x(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_x))) -svint64_t svrshl_x(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_x))) -svint16_t svrshl_x(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_z))) -svint8_t svrshl_z(svbool_t, svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_z))) -svint32_t svrshl_z(svbool_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_z))) -svint64_t svrshl_z(svbool_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_z))) -svint16_t svrshl_z(svbool_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_m))) -svuint8_t svrshl_m(svbool_t, svuint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_m))) -svuint32_t svrshl_m(svbool_t, svuint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_m))) -svuint64_t svrshl_m(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_m))) -svuint16_t svrshl_m(svbool_t, svuint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_x))) -svuint8_t svrshl_x(svbool_t, svuint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_x))) -svuint32_t svrshl_x(svbool_t, svuint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_x))) -svuint64_t svrshl_x(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_x))) -svuint16_t svrshl_x(svbool_t, svuint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_z))) -svuint8_t svrshl_z(svbool_t, svuint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_z))) -svuint32_t svrshl_z(svbool_t, svuint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_z))) -svuint64_t svrshl_z(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_z))) -svuint16_t svrshl_z(svbool_t, svuint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_m))) -svint8_t svrshl_m(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_m))) -svint32_t svrshl_m(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_m))) -svint64_t svrshl_m(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_m))) -svint16_t svrshl_m(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_x))) -svint8_t svrshl_x(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_x))) -svint32_t svrshl_x(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_x))) -svint64_t svrshl_x(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_x))) -svint16_t svrshl_x(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_z))) -svint8_t svrshl_z(svbool_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_z))) -svint32_t svrshl_z(svbool_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_z))) -svint64_t svrshl_z(svbool_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_z))) -svint16_t svrshl_z(svbool_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_m))) -svuint8_t svrshl_m(svbool_t, svuint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_m))) -svuint32_t svrshl_m(svbool_t, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_m))) -svuint64_t svrshl_m(svbool_t, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_m))) -svuint16_t svrshl_m(svbool_t, svuint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_x))) -svuint8_t svrshl_x(svbool_t, svuint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_x))) -svuint32_t svrshl_x(svbool_t, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_x))) -svuint64_t svrshl_x(svbool_t, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_x))) -svuint16_t svrshl_x(svbool_t, svuint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_z))) -svuint8_t svrshl_z(svbool_t, svuint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_z))) -svuint32_t svrshl_z(svbool_t, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_z))) -svuint64_t svrshl_z(svbool_t, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_z))) -svuint16_t svrshl_z(svbool_t, svuint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_m))) -svint8_t svrshr_m(svbool_t, svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_m))) -svint32_t svrshr_m(svbool_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_m))) -svint64_t svrshr_m(svbool_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_m))) -svint16_t svrshr_m(svbool_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_m))) -svuint8_t svrshr_m(svbool_t, svuint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_m))) -svuint32_t svrshr_m(svbool_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_m))) -svuint64_t svrshr_m(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_m))) -svuint16_t svrshr_m(svbool_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_x))) -svint8_t svrshr_x(svbool_t, svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_x))) -svint32_t svrshr_x(svbool_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_x))) -svint64_t svrshr_x(svbool_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_x))) -svint16_t svrshr_x(svbool_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_x))) -svuint8_t svrshr_x(svbool_t, svuint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_x))) -svuint32_t svrshr_x(svbool_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_x))) -svuint64_t svrshr_x(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_x))) -svuint16_t svrshr_x(svbool_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_z))) -svint8_t svrshr_z(svbool_t, svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_z))) -svint32_t svrshr_z(svbool_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_z))) -svint64_t svrshr_z(svbool_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_z))) -svint16_t svrshr_z(svbool_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_z))) -svuint8_t svrshr_z(svbool_t, svuint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_z))) -svuint32_t svrshr_z(svbool_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_z))) -svuint64_t svrshr_z(svbool_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_z))) -svuint16_t svrshr_z(svbool_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u32))) -svuint16_t svrshrnb(svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u64))) -svuint32_t svrshrnb(svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u16))) -svuint8_t svrshrnb(svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s32))) -svint16_t svrshrnb(svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s64))) -svint32_t svrshrnb(svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s16))) -svint8_t svrshrnb(svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u32))) -svuint16_t svrshrnt(svuint16_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u64))) -svuint32_t svrshrnt(svuint32_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u16))) -svuint8_t svrshrnt(svuint8_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s32))) -svint16_t svrshrnt(svint16_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s64))) -svint32_t svrshrnt(svint32_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s16))) -svint8_t svrshrnt(svint8_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_m))) -svuint32_t svrsqrte_m(svuint32_t, svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_x))) -svuint32_t svrsqrte_x(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_z))) -svuint32_t svrsqrte_z(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s8))) -svint8_t svrsra(svint8_t, svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s32))) -svint32_t svrsra(svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s64))) -svint64_t svrsra(svint64_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s16))) -svint16_t svrsra(svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u8))) -svuint8_t svrsra(svuint8_t, svuint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u32))) -svuint32_t svrsra(svuint32_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u64))) -svuint64_t svrsra(svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u16))) -svuint16_t svrsra(svuint16_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u32))) -svuint16_t svrsubhnb(svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u64))) -svuint32_t svrsubhnb(svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u16))) -svuint8_t svrsubhnb(svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s32))) -svint16_t svrsubhnb(svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s64))) -svint32_t svrsubhnb(svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s16))) -svint8_t svrsubhnb(svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u32))) -svuint16_t svrsubhnb(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u64))) -svuint32_t svrsubhnb(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u16))) -svuint8_t svrsubhnb(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s32))) -svint16_t svrsubhnb(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s64))) -svint32_t svrsubhnb(svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s16))) -svint8_t svrsubhnb(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u32))) -svuint16_t svrsubhnt(svuint16_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u64))) -svuint32_t svrsubhnt(svuint32_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u16))) -svuint8_t svrsubhnt(svuint8_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s32))) -svint16_t svrsubhnt(svint16_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s64))) -svint32_t svrsubhnt(svint32_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s16))) -svint8_t svrsubhnt(svint8_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u32))) -svuint16_t svrsubhnt(svuint16_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u64))) -svuint32_t svrsubhnt(svuint32_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u16))) -svuint8_t svrsubhnt(svuint8_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s32))) -svint16_t svrsubhnt(svint16_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s64))) -svint32_t svrsubhnt(svint32_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s16))) -svint8_t svrsubhnt(svint8_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_n_u32))) -svuint32_t svsbclb(svuint32_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_n_u64))) -svuint64_t svsbclb(svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_u32))) -svuint32_t svsbclb(svuint32_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_u64))) -svuint64_t svsbclb(svuint64_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_n_u32))) -svuint32_t svsbclt(svuint32_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_n_u64))) -svuint64_t svsbclt(svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_u32))) -svuint32_t svsbclt(svuint32_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_u64))) -svuint64_t svsbclt(svuint64_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s32))) -svint32_t svshllb(svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s64))) -svint64_t svshllb(svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s16))) -svint16_t svshllb(svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u32))) -svuint32_t svshllb(svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u64))) -svuint64_t svshllb(svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u16))) -svuint16_t svshllb(svuint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s32))) -svint32_t svshllt(svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s64))) -svint64_t svshllt(svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s16))) -svint16_t svshllt(svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u32))) -svuint32_t svshllt(svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u64))) -svuint64_t svshllt(svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u16))) -svuint16_t svshllt(svuint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u32))) -svuint16_t svshrnb(svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u64))) -svuint32_t svshrnb(svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u16))) -svuint8_t svshrnb(svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s32))) -svint16_t svshrnb(svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s64))) -svint32_t svshrnb(svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s16))) -svint8_t svshrnb(svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u32))) -svuint16_t svshrnt(svuint16_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u64))) -svuint32_t svshrnt(svuint32_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u16))) -svuint8_t svshrnt(svuint8_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s32))) -svint16_t svshrnt(svint16_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s64))) -svint32_t svshrnt(svint32_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s16))) -svint8_t svshrnt(svint8_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u8))) -svuint8_t svsli(svuint8_t, svuint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u32))) -svuint32_t svsli(svuint32_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u64))) -svuint64_t svsli(svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u16))) -svuint16_t svsli(svuint16_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s8))) -svint8_t svsli(svint8_t, svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s32))) -svint32_t svsli(svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s64))) -svint64_t svsli(svint64_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s16))) -svint16_t svsli(svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_m))) -svuint8_t svsqadd_m(svbool_t, svuint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_m))) -svuint32_t svsqadd_m(svbool_t, svuint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_m))) -svuint64_t svsqadd_m(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_m))) -svuint16_t svsqadd_m(svbool_t, svuint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_x))) -svuint8_t svsqadd_x(svbool_t, svuint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_x))) -svuint32_t svsqadd_x(svbool_t, svuint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_x))) -svuint64_t svsqadd_x(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_x))) -svuint16_t svsqadd_x(svbool_t, svuint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_z))) -svuint8_t svsqadd_z(svbool_t, svuint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_z))) -svuint32_t svsqadd_z(svbool_t, svuint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_z))) -svuint64_t svsqadd_z(svbool_t, svuint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_z))) -svuint16_t svsqadd_z(svbool_t, svuint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_m))) -svuint8_t svsqadd_m(svbool_t, svuint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_m))) -svuint32_t svsqadd_m(svbool_t, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_m))) -svuint64_t svsqadd_m(svbool_t, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_m))) -svuint16_t svsqadd_m(svbool_t, svuint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_x))) -svuint8_t svsqadd_x(svbool_t, svuint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_x))) -svuint32_t svsqadd_x(svbool_t, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_x))) -svuint64_t svsqadd_x(svbool_t, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_x))) -svuint16_t svsqadd_x(svbool_t, svuint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_z))) -svuint8_t svsqadd_z(svbool_t, svuint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_z))) -svuint32_t svsqadd_z(svbool_t, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_z))) -svuint64_t svsqadd_z(svbool_t, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_z))) -svuint16_t svsqadd_z(svbool_t, svuint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s8))) -svint8_t svsra(svint8_t, svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s32))) -svint32_t svsra(svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s64))) -svint64_t svsra(svint64_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s16))) -svint16_t svsra(svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u8))) -svuint8_t svsra(svuint8_t, svuint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u32))) -svuint32_t svsra(svuint32_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u64))) -svuint64_t svsra(svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u16))) -svuint16_t svsra(svuint16_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u8))) -svuint8_t svsri(svuint8_t, svuint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u32))) -svuint32_t svsri(svuint32_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u64))) -svuint64_t svsri(svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u16))) -svuint16_t svsri(svuint16_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s8))) -svint8_t svsri(svint8_t, svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s32))) -svint32_t svsri(svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s64))) -svint64_t svsri(svint64_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s16))) -svint16_t svsri(svint16_t, svint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_u32))) -void svstnt1_scatter_index(svbool_t, svuint32_t, int64_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_u64))) -void svstnt1_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_f64))) -void svstnt1_scatter_index(svbool_t, svuint64_t, int64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_f32))) -void svstnt1_scatter_index(svbool_t, svuint32_t, int64_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_s32))) -void svstnt1_scatter_index(svbool_t, svuint32_t, int64_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_s64))) -void svstnt1_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_u32))) -void svstnt1_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_u64))) -void svstnt1_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_f64))) -void svstnt1_scatter_offset(svbool_t, svuint64_t, int64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_f32))) -void svstnt1_scatter_offset(svbool_t, svuint32_t, int64_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_s32))) -void svstnt1_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_s64))) -void svstnt1_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_u32))) -void svstnt1_scatter(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_u64))) -void svstnt1_scatter(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_f64))) -void svstnt1_scatter(svbool_t, svuint64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_f32))) -void svstnt1_scatter(svbool_t, svuint32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_s32))) -void svstnt1_scatter(svbool_t, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_s64))) -void svstnt1_scatter(svbool_t, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_u64))) -void svstnt1_scatter_index(svbool_t, uint64_t *, svint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_f64))) -void svstnt1_scatter_index(svbool_t, float64_t *, svint64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_s64))) -void svstnt1_scatter_index(svbool_t, int64_t *, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_u64))) -void svstnt1_scatter_index(svbool_t, uint64_t *, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_f64))) -void svstnt1_scatter_index(svbool_t, float64_t *, svuint64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_s64))) -void svstnt1_scatter_index(svbool_t, int64_t *, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_u32))) -void svstnt1_scatter_offset(svbool_t, uint32_t *, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_f32))) -void svstnt1_scatter_offset(svbool_t, float32_t *, svuint32_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_s32))) -void svstnt1_scatter_offset(svbool_t, int32_t *, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_u64))) -void svstnt1_scatter_offset(svbool_t, uint64_t *, svint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_f64))) -void svstnt1_scatter_offset(svbool_t, float64_t *, svint64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_s64))) -void svstnt1_scatter_offset(svbool_t, int64_t *, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_u64))) -void svstnt1_scatter_offset(svbool_t, uint64_t *, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_f64))) -void svstnt1_scatter_offset(svbool_t, float64_t *, svuint64_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_s64))) -void svstnt1_scatter_offset(svbool_t, int64_t *, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_offset_u32))) -void svstnt1b_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_offset_u64))) -void svstnt1b_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_offset_s32))) -void svstnt1b_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_offset_s64))) -void svstnt1b_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_u32))) -void svstnt1b_scatter(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_u64))) -void svstnt1b_scatter(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_s32))) -void svstnt1b_scatter(svbool_t, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_s64))) -void svstnt1b_scatter(svbool_t, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32offset_s32))) -void svstnt1b_scatter_offset(svbool_t, int8_t *, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32offset_u32))) -void svstnt1b_scatter_offset(svbool_t, uint8_t *, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_s64offset_s64))) -void svstnt1b_scatter_offset(svbool_t, int8_t *, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_s64offset_u64))) -void svstnt1b_scatter_offset(svbool_t, uint8_t *, svint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64offset_s64))) -void svstnt1b_scatter_offset(svbool_t, int8_t *, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64offset_u64))) -void svstnt1b_scatter_offset(svbool_t, uint8_t *, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_index_u32))) -void svstnt1h_scatter_index(svbool_t, svuint32_t, int64_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_index_u64))) -void svstnt1h_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_index_s32))) -void svstnt1h_scatter_index(svbool_t, svuint32_t, int64_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_index_s64))) -void svstnt1h_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_offset_u32))) -void svstnt1h_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_offset_u64))) -void svstnt1h_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_offset_s32))) -void svstnt1h_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_offset_s64))) -void svstnt1h_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_u32))) -void svstnt1h_scatter(svbool_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_u64))) -void svstnt1h_scatter(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_s32))) -void svstnt1h_scatter(svbool_t, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_s64))) -void svstnt1h_scatter(svbool_t, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64index_s64))) -void svstnt1h_scatter_index(svbool_t, int16_t *, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64index_u64))) -void svstnt1h_scatter_index(svbool_t, uint16_t *, svint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64index_s64))) -void svstnt1h_scatter_index(svbool_t, int16_t *, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64index_u64))) -void svstnt1h_scatter_index(svbool_t, uint16_t *, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32offset_s32))) -void svstnt1h_scatter_offset(svbool_t, int16_t *, svuint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32offset_u32))) -void svstnt1h_scatter_offset(svbool_t, uint16_t *, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64offset_s64))) -void svstnt1h_scatter_offset(svbool_t, int16_t *, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64offset_u64))) -void svstnt1h_scatter_offset(svbool_t, uint16_t *, svint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64offset_s64))) -void svstnt1h_scatter_offset(svbool_t, int16_t *, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64offset_u64))) -void svstnt1h_scatter_offset(svbool_t, uint16_t *, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_index_u64))) -void svstnt1w_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_index_s64))) -void svstnt1w_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_offset_u64))) -void svstnt1w_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_offset_s64))) -void svstnt1w_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_u64))) -void svstnt1w_scatter(svbool_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_s64))) -void svstnt1w_scatter(svbool_t, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64index_s64))) -void svstnt1w_scatter_index(svbool_t, int32_t *, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64index_u64))) -void svstnt1w_scatter_index(svbool_t, uint32_t *, svint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64index_s64))) -void svstnt1w_scatter_index(svbool_t, int32_t *, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64index_u64))) -void svstnt1w_scatter_index(svbool_t, uint32_t *, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64offset_s64))) -void svstnt1w_scatter_offset(svbool_t, int32_t *, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64offset_u64))) -void svstnt1w_scatter_offset(svbool_t, uint32_t *, svint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64offset_s64))) -void svstnt1w_scatter_offset(svbool_t, int32_t *, svuint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64offset_u64))) -void svstnt1w_scatter_offset(svbool_t, uint32_t *, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u32))) -svuint16_t svsubhnb(svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u64))) -svuint32_t svsubhnb(svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u16))) -svuint8_t svsubhnb(svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s32))) -svint16_t svsubhnb(svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s64))) -svint32_t svsubhnb(svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s16))) -svint8_t svsubhnb(svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u32))) -svuint16_t svsubhnb(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u64))) -svuint32_t svsubhnb(svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u16))) -svuint8_t svsubhnb(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s32))) -svint16_t svsubhnb(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s64))) -svint32_t svsubhnb(svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s16))) -svint8_t svsubhnb(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u32))) -svuint16_t svsubhnt(svuint16_t, svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u64))) -svuint32_t svsubhnt(svuint32_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u16))) -svuint8_t svsubhnt(svuint8_t, svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s32))) -svint16_t svsubhnt(svint16_t, svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s64))) -svint32_t svsubhnt(svint32_t, svint64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s16))) -svint8_t svsubhnt(svint8_t, svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u32))) -svuint16_t svsubhnt(svuint16_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u64))) -svuint32_t svsubhnt(svuint32_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u16))) -svuint8_t svsubhnt(svuint8_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s32))) -svint16_t svsubhnt(svint16_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s64))) -svint32_t svsubhnt(svint32_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s16))) -svint8_t svsubhnt(svint8_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s32))) -svint32_t svsublb(svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s64))) -svint64_t svsublb(svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s16))) -svint16_t svsublb(svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u32))) -svuint32_t svsublb(svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u64))) -svuint64_t svsublb(svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u16))) -svuint16_t svsublb(svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s32))) -svint32_t svsublb(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s64))) -svint64_t svsublb(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s16))) -svint16_t svsublb(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u32))) -svuint32_t svsublb(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u64))) -svuint64_t svsublb(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u16))) -svuint16_t svsublb(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s32))) -svint32_t svsublbt(svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s64))) -svint64_t svsublbt(svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s16))) -svint16_t svsublbt(svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s32))) -svint32_t svsublbt(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s64))) -svint64_t svsublbt(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s16))) -svint16_t svsublbt(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s32))) -svint32_t svsublt(svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s64))) -svint64_t svsublt(svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s16))) -svint16_t svsublt(svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u32))) -svuint32_t svsublt(svuint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u64))) -svuint64_t svsublt(svuint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u16))) -svuint16_t svsublt(svuint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s32))) -svint32_t svsublt(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s64))) -svint64_t svsublt(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s16))) -svint16_t svsublt(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u32))) -svuint32_t svsublt(svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u64))) -svuint64_t svsublt(svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u16))) -svuint16_t svsublt(svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s32))) -svint32_t svsubltb(svint16_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s64))) -svint64_t svsubltb(svint32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s16))) -svint16_t svsubltb(svint8_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s32))) -svint32_t svsubltb(svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s64))) -svint64_t svsubltb(svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s16))) -svint16_t svsubltb(svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s32))) -svint32_t svsubwb(svint32_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s64))) -svint64_t svsubwb(svint64_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s16))) -svint16_t svsubwb(svint16_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u32))) -svuint32_t svsubwb(svuint32_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u64))) -svuint64_t svsubwb(svuint64_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u16))) -svuint16_t svsubwb(svuint16_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s32))) -svint32_t svsubwb(svint32_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s64))) -svint64_t svsubwb(svint64_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s16))) -svint16_t svsubwb(svint16_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u32))) -svuint32_t svsubwb(svuint32_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u64))) -svuint64_t svsubwb(svuint64_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u16))) -svuint16_t svsubwb(svuint16_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s32))) -svint32_t svsubwt(svint32_t, int16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s64))) -svint64_t svsubwt(svint64_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s16))) -svint16_t svsubwt(svint16_t, int8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u32))) -svuint32_t svsubwt(svuint32_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u64))) -svuint64_t svsubwt(svuint64_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u16))) -svuint16_t svsubwt(svuint16_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s32))) -svint32_t svsubwt(svint32_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s64))) -svint64_t svsubwt(svint64_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s16))) -svint16_t svsubwt(svint16_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u32))) -svuint32_t svsubwt(svuint32_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u64))) -svuint64_t svsubwt(svuint64_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u16))) -svuint16_t svsubwt(svuint16_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u8))) -svuint8_t svtbl2(svuint8x2_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u32))) -svuint32_t svtbl2(svuint32x2_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u64))) -svuint64_t svtbl2(svuint64x2_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u16))) -svuint16_t svtbl2(svuint16x2_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s8))) -svint8_t svtbl2(svint8x2_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f64))) -svfloat64_t svtbl2(svfloat64x2_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f32))) -svfloat32_t svtbl2(svfloat32x2_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f16))) -svfloat16_t svtbl2(svfloat16x2_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s32))) -svint32_t svtbl2(svint32x2_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s64))) -svint64_t svtbl2(svint64x2_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s16))) -svint16_t svtbl2(svint16x2_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u8))) -svuint8_t svtbx(svuint8_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u32))) -svuint32_t svtbx(svuint32_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u64))) -svuint64_t svtbx(svuint64_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u16))) -svuint16_t svtbx(svuint16_t, svuint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s8))) -svint8_t svtbx(svint8_t, svint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f64))) -svfloat64_t svtbx(svfloat64_t, svfloat64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f32))) -svfloat32_t svtbx(svfloat32_t, svfloat32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f16))) -svfloat16_t svtbx(svfloat16_t, svfloat16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s32))) -svint32_t svtbx(svint32_t, svint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s64))) -svint64_t svtbx(svint64_t, svint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s16))) -svint16_t svtbx(svint16_t, svint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_m))) -svint8_t svuqadd_m(svbool_t, svint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_m))) -svint32_t svuqadd_m(svbool_t, svint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_m))) -svint64_t svuqadd_m(svbool_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_m))) -svint16_t svuqadd_m(svbool_t, svint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_x))) -svint8_t svuqadd_x(svbool_t, svint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_x))) -svint32_t svuqadd_x(svbool_t, svint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_x))) -svint64_t svuqadd_x(svbool_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_x))) -svint16_t svuqadd_x(svbool_t, svint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_z))) -svint8_t svuqadd_z(svbool_t, svint8_t, uint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_z))) -svint32_t svuqadd_z(svbool_t, svint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_z))) -svint64_t svuqadd_z(svbool_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_z))) -svint16_t svuqadd_z(svbool_t, svint16_t, uint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_m))) -svint8_t svuqadd_m(svbool_t, svint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_m))) -svint32_t svuqadd_m(svbool_t, svint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_m))) -svint64_t svuqadd_m(svbool_t, svint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_m))) -svint16_t svuqadd_m(svbool_t, svint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_x))) -svint8_t svuqadd_x(svbool_t, svint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_x))) -svint32_t svuqadd_x(svbool_t, svint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_x))) -svint64_t svuqadd_x(svbool_t, svint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_x))) -svint16_t svuqadd_x(svbool_t, svint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_z))) -svint8_t svuqadd_z(svbool_t, svint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_z))) -svint32_t svuqadd_z(svbool_t, svint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_z))) -svint64_t svuqadd_z(svbool_t, svint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_z))) -svint16_t svuqadd_z(svbool_t, svint16_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_s32))) -svbool_t svwhilege_b8(int32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_s32))) -svbool_t svwhilege_b32(int32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_s32))) -svbool_t svwhilege_b64(int32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_s32))) -svbool_t svwhilege_b16(int32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_s64))) -svbool_t svwhilege_b8(int64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_s64))) -svbool_t svwhilege_b32(int64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_s64))) -svbool_t svwhilege_b64(int64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_s64))) -svbool_t svwhilege_b16(int64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_u32))) -svbool_t svwhilege_b8(uint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_u32))) -svbool_t svwhilege_b32(uint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_u32))) -svbool_t svwhilege_b64(uint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_u32))) -svbool_t svwhilege_b16(uint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_u64))) -svbool_t svwhilege_b8(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_u64))) -svbool_t svwhilege_b32(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_u64))) -svbool_t svwhilege_b64(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_u64))) -svbool_t svwhilege_b16(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_s32))) -svbool_t svwhilegt_b8(int32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_s32))) -svbool_t svwhilegt_b32(int32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_s32))) -svbool_t svwhilegt_b64(int32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_s32))) -svbool_t svwhilegt_b16(int32_t, int32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_s64))) -svbool_t svwhilegt_b8(int64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_s64))) -svbool_t svwhilegt_b32(int64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_s64))) -svbool_t svwhilegt_b64(int64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_s64))) -svbool_t svwhilegt_b16(int64_t, int64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_u32))) -svbool_t svwhilegt_b8(uint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_u32))) -svbool_t svwhilegt_b32(uint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_u32))) -svbool_t svwhilegt_b64(uint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_u32))) -svbool_t svwhilegt_b16(uint32_t, uint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_u64))) -svbool_t svwhilegt_b8(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_u64))) -svbool_t svwhilegt_b32(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_u64))) -svbool_t svwhilegt_b64(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_u64))) -svbool_t svwhilegt_b16(uint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u8))) -svbool_t svwhilerw(uint8_t const *, uint8_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s8))) -svbool_t svwhilerw(int8_t const *, int8_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u64))) -svbool_t svwhilerw(uint64_t const *, uint64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f64))) -svbool_t svwhilerw(float64_t const *, float64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s64))) -svbool_t svwhilerw(int64_t const *, int64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u16))) -svbool_t svwhilerw(uint16_t const *, uint16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f16))) -svbool_t svwhilerw(float16_t const *, float16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s16))) -svbool_t svwhilerw(int16_t const *, int16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u32))) -svbool_t svwhilerw(uint32_t const *, uint32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f32))) -svbool_t svwhilerw(float32_t const *, float32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s32))) -svbool_t svwhilerw(int32_t const *, int32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u8))) -svbool_t svwhilewr(uint8_t const *, uint8_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s8))) -svbool_t svwhilewr(int8_t const *, int8_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u64))) -svbool_t svwhilewr(uint64_t const *, uint64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f64))) -svbool_t svwhilewr(float64_t const *, float64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s64))) -svbool_t svwhilewr(int64_t const *, int64_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u16))) -svbool_t svwhilewr(uint16_t const *, uint16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f16))) -svbool_t svwhilewr(float16_t const *, float16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s16))) -svbool_t svwhilewr(int16_t const *, int16_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u32))) -svbool_t svwhilewr(uint32_t const *, uint32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f32))) -svbool_t svwhilewr(float32_t const *, float32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s32))) -svbool_t svwhilewr(int32_t const *, int32_t const *); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u8))) -svuint8_t svxar(svuint8_t, svuint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u32))) -svuint32_t svxar(svuint32_t, svuint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u64))) -svuint64_t svxar(svuint64_t, svuint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u16))) -svuint16_t svxar(svuint16_t, svuint16_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s8))) -svint8_t svxar(svint8_t, svint8_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s32))) -svint32_t svxar(svint32_t, svint32_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s64))) -svint64_t svxar(svint64_t, svint64_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s16))) -svint16_t svxar(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s8))) +svint8_t svreinterpret_s8_s8(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u8))) +svint8_t svreinterpret_s8_u8(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s16))) +svint8_t svreinterpret_s8_s16(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u16))) +svint8_t svreinterpret_s8_u16(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s32))) +svint8_t svreinterpret_s8_s32(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u32))) +svint8_t svreinterpret_s8_u32(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s64))) +svint8_t svreinterpret_s8_s64(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u64))) +svint8_t svreinterpret_s8_u64(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f16))) +svint8_t svreinterpret_s8_f16(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_bf16))) +svint8_t svreinterpret_s8_bf16(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f32))) +svint8_t svreinterpret_s8_f32(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f64))) +svint8_t svreinterpret_s8_f64(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s8))) +svuint8_t svreinterpret_u8_s8(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u8))) +svuint8_t svreinterpret_u8_u8(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s16))) +svuint8_t svreinterpret_u8_s16(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u16))) +svuint8_t svreinterpret_u8_u16(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s32))) +svuint8_t svreinterpret_u8_s32(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u32))) +svuint8_t svreinterpret_u8_u32(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s64))) +svuint8_t svreinterpret_u8_s64(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u64))) +svuint8_t svreinterpret_u8_u64(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f16))) +svuint8_t svreinterpret_u8_f16(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_bf16))) +svuint8_t svreinterpret_u8_bf16(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f32))) +svuint8_t svreinterpret_u8_f32(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f64))) +svuint8_t svreinterpret_u8_f64(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s8))) +svint16_t svreinterpret_s16_s8(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u8))) +svint16_t svreinterpret_s16_u8(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s16))) +svint16_t svreinterpret_s16_s16(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u16))) +svint16_t svreinterpret_s16_u16(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s32))) +svint16_t svreinterpret_s16_s32(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u32))) +svint16_t svreinterpret_s16_u32(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s64))) +svint16_t svreinterpret_s16_s64(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u64))) +svint16_t svreinterpret_s16_u64(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f16))) +svint16_t svreinterpret_s16_f16(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_bf16))) +svint16_t svreinterpret_s16_bf16(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f32))) +svint16_t svreinterpret_s16_f32(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f64))) +svint16_t svreinterpret_s16_f64(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s8))) +svuint16_t svreinterpret_u16_s8(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u8))) +svuint16_t svreinterpret_u16_u8(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s16))) +svuint16_t svreinterpret_u16_s16(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u16))) +svuint16_t svreinterpret_u16_u16(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s32))) +svuint16_t svreinterpret_u16_s32(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u32))) +svuint16_t svreinterpret_u16_u32(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s64))) +svuint16_t svreinterpret_u16_s64(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u64))) +svuint16_t svreinterpret_u16_u64(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f16))) +svuint16_t svreinterpret_u16_f16(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_bf16))) +svuint16_t svreinterpret_u16_bf16(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f32))) +svuint16_t svreinterpret_u16_f32(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f64))) +svuint16_t svreinterpret_u16_f64(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s8))) +svint32_t svreinterpret_s32_s8(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u8))) +svint32_t svreinterpret_s32_u8(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s16))) +svint32_t svreinterpret_s32_s16(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u16))) +svint32_t svreinterpret_s32_u16(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s32))) +svint32_t svreinterpret_s32_s32(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u32))) +svint32_t svreinterpret_s32_u32(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s64))) +svint32_t svreinterpret_s32_s64(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u64))) +svint32_t svreinterpret_s32_u64(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f16))) +svint32_t svreinterpret_s32_f16(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_bf16))) +svint32_t svreinterpret_s32_bf16(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f32))) +svint32_t svreinterpret_s32_f32(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f64))) +svint32_t svreinterpret_s32_f64(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s8))) +svuint32_t svreinterpret_u32_s8(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u8))) +svuint32_t svreinterpret_u32_u8(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s16))) +svuint32_t svreinterpret_u32_s16(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u16))) +svuint32_t svreinterpret_u32_u16(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s32))) +svuint32_t svreinterpret_u32_s32(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u32))) +svuint32_t svreinterpret_u32_u32(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s64))) +svuint32_t svreinterpret_u32_s64(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u64))) +svuint32_t svreinterpret_u32_u64(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f16))) +svuint32_t svreinterpret_u32_f16(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_bf16))) +svuint32_t svreinterpret_u32_bf16(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f32))) +svuint32_t svreinterpret_u32_f32(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f64))) +svuint32_t svreinterpret_u32_f64(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s8))) +svint64_t svreinterpret_s64_s8(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u8))) +svint64_t svreinterpret_s64_u8(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s16))) +svint64_t svreinterpret_s64_s16(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u16))) +svint64_t svreinterpret_s64_u16(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s32))) +svint64_t svreinterpret_s64_s32(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u32))) +svint64_t svreinterpret_s64_u32(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s64))) +svint64_t svreinterpret_s64_s64(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u64))) +svint64_t svreinterpret_s64_u64(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f16))) +svint64_t svreinterpret_s64_f16(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_bf16))) +svint64_t svreinterpret_s64_bf16(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f32))) +svint64_t svreinterpret_s64_f32(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f64))) +svint64_t svreinterpret_s64_f64(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s8))) +svuint64_t svreinterpret_u64_s8(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u8))) +svuint64_t svreinterpret_u64_u8(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s16))) +svuint64_t svreinterpret_u64_s16(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u16))) +svuint64_t svreinterpret_u64_u16(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s32))) +svuint64_t svreinterpret_u64_s32(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u32))) +svuint64_t svreinterpret_u64_u32(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s64))) +svuint64_t svreinterpret_u64_s64(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u64))) +svuint64_t svreinterpret_u64_u64(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f16))) +svuint64_t svreinterpret_u64_f16(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_bf16))) +svuint64_t svreinterpret_u64_bf16(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f32))) +svuint64_t svreinterpret_u64_f32(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f64))) +svuint64_t svreinterpret_u64_f64(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s8))) +svfloat16_t svreinterpret_f16_s8(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u8))) +svfloat16_t svreinterpret_f16_u8(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s16))) +svfloat16_t svreinterpret_f16_s16(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u16))) +svfloat16_t svreinterpret_f16_u16(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s32))) +svfloat16_t svreinterpret_f16_s32(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u32))) +svfloat16_t svreinterpret_f16_u32(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s64))) +svfloat16_t svreinterpret_f16_s64(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u64))) +svfloat16_t svreinterpret_f16_u64(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f16))) +svfloat16_t svreinterpret_f16_f16(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_bf16))) +svfloat16_t svreinterpret_f16_bf16(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f32))) +svfloat16_t svreinterpret_f16_f32(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f64))) +svfloat16_t svreinterpret_f16_f64(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s8))) +svbfloat16_t svreinterpret_bf16_s8(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u8))) +svbfloat16_t svreinterpret_bf16_u8(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s16))) +svbfloat16_t svreinterpret_bf16_s16(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u16))) +svbfloat16_t svreinterpret_bf16_u16(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s32))) +svbfloat16_t svreinterpret_bf16_s32(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u32))) +svbfloat16_t svreinterpret_bf16_u32(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s64))) +svbfloat16_t svreinterpret_bf16_s64(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u64))) +svbfloat16_t svreinterpret_bf16_u64(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f16))) +svbfloat16_t svreinterpret_bf16_f16(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_bf16))) +svbfloat16_t svreinterpret_bf16_bf16(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f32))) +svbfloat16_t svreinterpret_bf16_f32(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f64))) +svbfloat16_t svreinterpret_bf16_f64(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s8))) +svfloat32_t svreinterpret_f32_s8(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u8))) +svfloat32_t svreinterpret_f32_u8(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s16))) +svfloat32_t svreinterpret_f32_s16(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u16))) +svfloat32_t svreinterpret_f32_u16(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s32))) +svfloat32_t svreinterpret_f32_s32(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u32))) +svfloat32_t svreinterpret_f32_u32(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s64))) +svfloat32_t svreinterpret_f32_s64(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u64))) +svfloat32_t svreinterpret_f32_u64(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f16))) +svfloat32_t svreinterpret_f32_f16(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_bf16))) +svfloat32_t svreinterpret_f32_bf16(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f32))) +svfloat32_t svreinterpret_f32_f32(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f64))) +svfloat32_t svreinterpret_f32_f64(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s8))) +svfloat64_t svreinterpret_f64_s8(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u8))) +svfloat64_t svreinterpret_f64_u8(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s16))) +svfloat64_t svreinterpret_f64_s16(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u16))) +svfloat64_t svreinterpret_f64_u16(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s32))) +svfloat64_t svreinterpret_f64_s32(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u32))) +svfloat64_t svreinterpret_f64_u32(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s64))) +svfloat64_t svreinterpret_f64_s64(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u64))) +svfloat64_t svreinterpret_f64_u64(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f16))) +svfloat64_t svreinterpret_f64_f16(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_bf16))) +svfloat64_t svreinterpret_f64_bf16(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f32))) +svfloat64_t svreinterpret_f64_f32(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f64))) +svfloat64_t svreinterpret_f64_f64(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s8))) +svint8_t svreinterpret_s8(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u8))) +svint8_t svreinterpret_s8(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s16))) +svint8_t svreinterpret_s8(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u16))) +svint8_t svreinterpret_s8(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s32))) +svint8_t svreinterpret_s8(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u32))) +svint8_t svreinterpret_s8(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s64))) +svint8_t svreinterpret_s8(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u64))) +svint8_t svreinterpret_s8(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f16))) +svint8_t svreinterpret_s8(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_bf16))) +svint8_t svreinterpret_s8(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f32))) +svint8_t svreinterpret_s8(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f64))) +svint8_t svreinterpret_s8(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s8))) +svuint8_t svreinterpret_u8(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u8))) +svuint8_t svreinterpret_u8(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s16))) +svuint8_t svreinterpret_u8(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u16))) +svuint8_t svreinterpret_u8(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s32))) +svuint8_t svreinterpret_u8(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u32))) +svuint8_t svreinterpret_u8(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s64))) +svuint8_t svreinterpret_u8(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u64))) +svuint8_t svreinterpret_u8(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f16))) +svuint8_t svreinterpret_u8(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_bf16))) +svuint8_t svreinterpret_u8(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f32))) +svuint8_t svreinterpret_u8(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f64))) +svuint8_t svreinterpret_u8(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s8))) +svint16_t svreinterpret_s16(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u8))) +svint16_t svreinterpret_s16(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s16))) +svint16_t svreinterpret_s16(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u16))) +svint16_t svreinterpret_s16(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s32))) +svint16_t svreinterpret_s16(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u32))) +svint16_t svreinterpret_s16(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s64))) +svint16_t svreinterpret_s16(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u64))) +svint16_t svreinterpret_s16(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f16))) +svint16_t svreinterpret_s16(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_bf16))) +svint16_t svreinterpret_s16(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f32))) +svint16_t svreinterpret_s16(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f64))) +svint16_t svreinterpret_s16(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s8))) +svuint16_t svreinterpret_u16(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u8))) +svuint16_t svreinterpret_u16(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s16))) +svuint16_t svreinterpret_u16(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u16))) +svuint16_t svreinterpret_u16(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s32))) +svuint16_t svreinterpret_u16(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u32))) +svuint16_t svreinterpret_u16(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s64))) +svuint16_t svreinterpret_u16(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u64))) +svuint16_t svreinterpret_u16(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f16))) +svuint16_t svreinterpret_u16(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_bf16))) +svuint16_t svreinterpret_u16(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f32))) +svuint16_t svreinterpret_u16(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f64))) +svuint16_t svreinterpret_u16(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s8))) +svint32_t svreinterpret_s32(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u8))) +svint32_t svreinterpret_s32(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s16))) +svint32_t svreinterpret_s32(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u16))) +svint32_t svreinterpret_s32(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s32))) +svint32_t svreinterpret_s32(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u32))) +svint32_t svreinterpret_s32(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s64))) +svint32_t svreinterpret_s32(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u64))) +svint32_t svreinterpret_s32(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f16))) +svint32_t svreinterpret_s32(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_bf16))) +svint32_t svreinterpret_s32(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f32))) +svint32_t svreinterpret_s32(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f64))) +svint32_t svreinterpret_s32(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s8))) +svuint32_t svreinterpret_u32(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u8))) +svuint32_t svreinterpret_u32(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s16))) +svuint32_t svreinterpret_u32(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u16))) +svuint32_t svreinterpret_u32(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s32))) +svuint32_t svreinterpret_u32(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u32))) +svuint32_t svreinterpret_u32(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s64))) +svuint32_t svreinterpret_u32(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u64))) +svuint32_t svreinterpret_u32(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f16))) +svuint32_t svreinterpret_u32(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_bf16))) +svuint32_t svreinterpret_u32(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f32))) +svuint32_t svreinterpret_u32(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f64))) +svuint32_t svreinterpret_u32(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s8))) +svint64_t svreinterpret_s64(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u8))) +svint64_t svreinterpret_s64(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s16))) +svint64_t svreinterpret_s64(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u16))) +svint64_t svreinterpret_s64(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s32))) +svint64_t svreinterpret_s64(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u32))) +svint64_t svreinterpret_s64(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s64))) +svint64_t svreinterpret_s64(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u64))) +svint64_t svreinterpret_s64(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f16))) +svint64_t svreinterpret_s64(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_bf16))) +svint64_t svreinterpret_s64(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f32))) +svint64_t svreinterpret_s64(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f64))) +svint64_t svreinterpret_s64(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s8))) +svuint64_t svreinterpret_u64(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u8))) +svuint64_t svreinterpret_u64(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s16))) +svuint64_t svreinterpret_u64(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u16))) +svuint64_t svreinterpret_u64(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s32))) +svuint64_t svreinterpret_u64(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u32))) +svuint64_t svreinterpret_u64(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s64))) +svuint64_t svreinterpret_u64(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u64))) +svuint64_t svreinterpret_u64(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f16))) +svuint64_t svreinterpret_u64(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_bf16))) +svuint64_t svreinterpret_u64(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f32))) +svuint64_t svreinterpret_u64(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f64))) +svuint64_t svreinterpret_u64(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s8))) +svfloat16_t svreinterpret_f16(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u8))) +svfloat16_t svreinterpret_f16(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s16))) +svfloat16_t svreinterpret_f16(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u16))) +svfloat16_t svreinterpret_f16(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s32))) +svfloat16_t svreinterpret_f16(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u32))) +svfloat16_t svreinterpret_f16(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s64))) +svfloat16_t svreinterpret_f16(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u64))) +svfloat16_t svreinterpret_f16(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f16))) +svfloat16_t svreinterpret_f16(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_bf16))) +svfloat16_t svreinterpret_f16(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f32))) +svfloat16_t svreinterpret_f16(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f64))) +svfloat16_t svreinterpret_f16(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s8))) +svbfloat16_t svreinterpret_bf16(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u8))) +svbfloat16_t svreinterpret_bf16(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s16))) +svbfloat16_t svreinterpret_bf16(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u16))) +svbfloat16_t svreinterpret_bf16(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s32))) +svbfloat16_t svreinterpret_bf16(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u32))) +svbfloat16_t svreinterpret_bf16(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s64))) +svbfloat16_t svreinterpret_bf16(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u64))) +svbfloat16_t svreinterpret_bf16(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f16))) +svbfloat16_t svreinterpret_bf16(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_bf16))) +svbfloat16_t svreinterpret_bf16(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f32))) +svbfloat16_t svreinterpret_bf16(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f64))) +svbfloat16_t svreinterpret_bf16(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s8))) +svfloat32_t svreinterpret_f32(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u8))) +svfloat32_t svreinterpret_f32(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s16))) +svfloat32_t svreinterpret_f32(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u16))) +svfloat32_t svreinterpret_f32(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s32))) +svfloat32_t svreinterpret_f32(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u32))) +svfloat32_t svreinterpret_f32(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s64))) +svfloat32_t svreinterpret_f32(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u64))) +svfloat32_t svreinterpret_f32(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f16))) +svfloat32_t svreinterpret_f32(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_bf16))) +svfloat32_t svreinterpret_f32(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f32))) +svfloat32_t svreinterpret_f32(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f64))) +svfloat32_t svreinterpret_f32(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s8))) +svfloat64_t svreinterpret_f64(svint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u8))) +svfloat64_t svreinterpret_f64(svuint8_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s16))) +svfloat64_t svreinterpret_f64(svint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u16))) +svfloat64_t svreinterpret_f64(svuint16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s32))) +svfloat64_t svreinterpret_f64(svint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u32))) +svfloat64_t svreinterpret_f64(svuint32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s64))) +svfloat64_t svreinterpret_f64(svint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u64))) +svfloat64_t svreinterpret_f64(svuint64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f16))) +svfloat64_t svreinterpret_f64(svfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_bf16))) +svfloat64_t svreinterpret_f64(svbfloat16_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f32))) +svfloat64_t svreinterpret_f64(svfloat32_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f64))) +svfloat64_t svreinterpret_f64(svfloat64_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s8_x2))) +svint8x2_t svreinterpret_s8_s8_x2(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u8_x2))) +svint8x2_t svreinterpret_s8_u8_x2(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s16_x2))) +svint8x2_t svreinterpret_s8_s16_x2(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u16_x2))) +svint8x2_t svreinterpret_s8_u16_x2(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s32_x2))) +svint8x2_t svreinterpret_s8_s32_x2(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u32_x2))) +svint8x2_t svreinterpret_s8_u32_x2(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s64_x2))) +svint8x2_t svreinterpret_s8_s64_x2(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u64_x2))) +svint8x2_t svreinterpret_s8_u64_x2(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f16_x2))) +svint8x2_t svreinterpret_s8_f16_x2(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_bf16_x2))) +svint8x2_t svreinterpret_s8_bf16_x2(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f32_x2))) +svint8x2_t svreinterpret_s8_f32_x2(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f64_x2))) +svint8x2_t svreinterpret_s8_f64_x2(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s8_x2))) +svuint8x2_t svreinterpret_u8_s8_x2(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u8_x2))) +svuint8x2_t svreinterpret_u8_u8_x2(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s16_x2))) +svuint8x2_t svreinterpret_u8_s16_x2(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u16_x2))) +svuint8x2_t svreinterpret_u8_u16_x2(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s32_x2))) +svuint8x2_t svreinterpret_u8_s32_x2(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u32_x2))) +svuint8x2_t svreinterpret_u8_u32_x2(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s64_x2))) +svuint8x2_t svreinterpret_u8_s64_x2(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u64_x2))) +svuint8x2_t svreinterpret_u8_u64_x2(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f16_x2))) +svuint8x2_t svreinterpret_u8_f16_x2(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_bf16_x2))) +svuint8x2_t svreinterpret_u8_bf16_x2(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f32_x2))) +svuint8x2_t svreinterpret_u8_f32_x2(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f64_x2))) +svuint8x2_t svreinterpret_u8_f64_x2(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s8_x2))) +svint16x2_t svreinterpret_s16_s8_x2(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u8_x2))) +svint16x2_t svreinterpret_s16_u8_x2(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s16_x2))) +svint16x2_t svreinterpret_s16_s16_x2(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u16_x2))) +svint16x2_t svreinterpret_s16_u16_x2(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s32_x2))) +svint16x2_t svreinterpret_s16_s32_x2(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u32_x2))) +svint16x2_t svreinterpret_s16_u32_x2(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s64_x2))) +svint16x2_t svreinterpret_s16_s64_x2(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u64_x2))) +svint16x2_t svreinterpret_s16_u64_x2(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f16_x2))) +svint16x2_t svreinterpret_s16_f16_x2(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_bf16_x2))) +svint16x2_t svreinterpret_s16_bf16_x2(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f32_x2))) +svint16x2_t svreinterpret_s16_f32_x2(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f64_x2))) +svint16x2_t svreinterpret_s16_f64_x2(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s8_x2))) +svuint16x2_t svreinterpret_u16_s8_x2(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u8_x2))) +svuint16x2_t svreinterpret_u16_u8_x2(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s16_x2))) +svuint16x2_t svreinterpret_u16_s16_x2(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u16_x2))) +svuint16x2_t svreinterpret_u16_u16_x2(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s32_x2))) +svuint16x2_t svreinterpret_u16_s32_x2(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u32_x2))) +svuint16x2_t svreinterpret_u16_u32_x2(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s64_x2))) +svuint16x2_t svreinterpret_u16_s64_x2(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u64_x2))) +svuint16x2_t svreinterpret_u16_u64_x2(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f16_x2))) +svuint16x2_t svreinterpret_u16_f16_x2(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_bf16_x2))) +svuint16x2_t svreinterpret_u16_bf16_x2(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f32_x2))) +svuint16x2_t svreinterpret_u16_f32_x2(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f64_x2))) +svuint16x2_t svreinterpret_u16_f64_x2(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s8_x2))) +svint32x2_t svreinterpret_s32_s8_x2(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u8_x2))) +svint32x2_t svreinterpret_s32_u8_x2(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s16_x2))) +svint32x2_t svreinterpret_s32_s16_x2(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u16_x2))) +svint32x2_t svreinterpret_s32_u16_x2(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s32_x2))) +svint32x2_t svreinterpret_s32_s32_x2(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u32_x2))) +svint32x2_t svreinterpret_s32_u32_x2(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s64_x2))) +svint32x2_t svreinterpret_s32_s64_x2(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u64_x2))) +svint32x2_t svreinterpret_s32_u64_x2(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f16_x2))) +svint32x2_t svreinterpret_s32_f16_x2(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_bf16_x2))) +svint32x2_t svreinterpret_s32_bf16_x2(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f32_x2))) +svint32x2_t svreinterpret_s32_f32_x2(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f64_x2))) +svint32x2_t svreinterpret_s32_f64_x2(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s8_x2))) +svuint32x2_t svreinterpret_u32_s8_x2(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u8_x2))) +svuint32x2_t svreinterpret_u32_u8_x2(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s16_x2))) +svuint32x2_t svreinterpret_u32_s16_x2(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u16_x2))) +svuint32x2_t svreinterpret_u32_u16_x2(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s32_x2))) +svuint32x2_t svreinterpret_u32_s32_x2(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u32_x2))) +svuint32x2_t svreinterpret_u32_u32_x2(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s64_x2))) +svuint32x2_t svreinterpret_u32_s64_x2(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u64_x2))) +svuint32x2_t svreinterpret_u32_u64_x2(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f16_x2))) +svuint32x2_t svreinterpret_u32_f16_x2(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_bf16_x2))) +svuint32x2_t svreinterpret_u32_bf16_x2(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f32_x2))) +svuint32x2_t svreinterpret_u32_f32_x2(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f64_x2))) +svuint32x2_t svreinterpret_u32_f64_x2(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s8_x2))) +svint64x2_t svreinterpret_s64_s8_x2(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u8_x2))) +svint64x2_t svreinterpret_s64_u8_x2(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s16_x2))) +svint64x2_t svreinterpret_s64_s16_x2(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u16_x2))) +svint64x2_t svreinterpret_s64_u16_x2(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s32_x2))) +svint64x2_t svreinterpret_s64_s32_x2(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u32_x2))) +svint64x2_t svreinterpret_s64_u32_x2(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s64_x2))) +svint64x2_t svreinterpret_s64_s64_x2(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u64_x2))) +svint64x2_t svreinterpret_s64_u64_x2(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f16_x2))) +svint64x2_t svreinterpret_s64_f16_x2(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_bf16_x2))) +svint64x2_t svreinterpret_s64_bf16_x2(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f32_x2))) +svint64x2_t svreinterpret_s64_f32_x2(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f64_x2))) +svint64x2_t svreinterpret_s64_f64_x2(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s8_x2))) +svuint64x2_t svreinterpret_u64_s8_x2(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u8_x2))) +svuint64x2_t svreinterpret_u64_u8_x2(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s16_x2))) +svuint64x2_t svreinterpret_u64_s16_x2(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u16_x2))) +svuint64x2_t svreinterpret_u64_u16_x2(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s32_x2))) +svuint64x2_t svreinterpret_u64_s32_x2(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u32_x2))) +svuint64x2_t svreinterpret_u64_u32_x2(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s64_x2))) +svuint64x2_t svreinterpret_u64_s64_x2(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u64_x2))) +svuint64x2_t svreinterpret_u64_u64_x2(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f16_x2))) +svuint64x2_t svreinterpret_u64_f16_x2(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_bf16_x2))) +svuint64x2_t svreinterpret_u64_bf16_x2(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f32_x2))) +svuint64x2_t svreinterpret_u64_f32_x2(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f64_x2))) +svuint64x2_t svreinterpret_u64_f64_x2(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s8_x2))) +svfloat16x2_t svreinterpret_f16_s8_x2(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u8_x2))) +svfloat16x2_t svreinterpret_f16_u8_x2(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s16_x2))) +svfloat16x2_t svreinterpret_f16_s16_x2(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u16_x2))) +svfloat16x2_t svreinterpret_f16_u16_x2(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s32_x2))) +svfloat16x2_t svreinterpret_f16_s32_x2(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u32_x2))) +svfloat16x2_t svreinterpret_f16_u32_x2(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s64_x2))) +svfloat16x2_t svreinterpret_f16_s64_x2(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u64_x2))) +svfloat16x2_t svreinterpret_f16_u64_x2(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f16_x2))) +svfloat16x2_t svreinterpret_f16_f16_x2(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_bf16_x2))) +svfloat16x2_t svreinterpret_f16_bf16_x2(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f32_x2))) +svfloat16x2_t svreinterpret_f16_f32_x2(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f64_x2))) +svfloat16x2_t svreinterpret_f16_f64_x2(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s8_x2))) +svbfloat16x2_t svreinterpret_bf16_s8_x2(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u8_x2))) +svbfloat16x2_t svreinterpret_bf16_u8_x2(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s16_x2))) +svbfloat16x2_t svreinterpret_bf16_s16_x2(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u16_x2))) +svbfloat16x2_t svreinterpret_bf16_u16_x2(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s32_x2))) +svbfloat16x2_t svreinterpret_bf16_s32_x2(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u32_x2))) +svbfloat16x2_t svreinterpret_bf16_u32_x2(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s64_x2))) +svbfloat16x2_t svreinterpret_bf16_s64_x2(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u64_x2))) +svbfloat16x2_t svreinterpret_bf16_u64_x2(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f16_x2))) +svbfloat16x2_t svreinterpret_bf16_f16_x2(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_bf16_x2))) +svbfloat16x2_t svreinterpret_bf16_bf16_x2(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f32_x2))) +svbfloat16x2_t svreinterpret_bf16_f32_x2(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f64_x2))) +svbfloat16x2_t svreinterpret_bf16_f64_x2(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s8_x2))) +svfloat32x2_t svreinterpret_f32_s8_x2(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u8_x2))) +svfloat32x2_t svreinterpret_f32_u8_x2(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s16_x2))) +svfloat32x2_t svreinterpret_f32_s16_x2(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u16_x2))) +svfloat32x2_t svreinterpret_f32_u16_x2(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s32_x2))) +svfloat32x2_t svreinterpret_f32_s32_x2(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u32_x2))) +svfloat32x2_t svreinterpret_f32_u32_x2(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s64_x2))) +svfloat32x2_t svreinterpret_f32_s64_x2(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u64_x2))) +svfloat32x2_t svreinterpret_f32_u64_x2(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f16_x2))) +svfloat32x2_t svreinterpret_f32_f16_x2(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_bf16_x2))) +svfloat32x2_t svreinterpret_f32_bf16_x2(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f32_x2))) +svfloat32x2_t svreinterpret_f32_f32_x2(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f64_x2))) +svfloat32x2_t svreinterpret_f32_f64_x2(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s8_x2))) +svfloat64x2_t svreinterpret_f64_s8_x2(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u8_x2))) +svfloat64x2_t svreinterpret_f64_u8_x2(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s16_x2))) +svfloat64x2_t svreinterpret_f64_s16_x2(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u16_x2))) +svfloat64x2_t svreinterpret_f64_u16_x2(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s32_x2))) +svfloat64x2_t svreinterpret_f64_s32_x2(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u32_x2))) +svfloat64x2_t svreinterpret_f64_u32_x2(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s64_x2))) +svfloat64x2_t svreinterpret_f64_s64_x2(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u64_x2))) +svfloat64x2_t svreinterpret_f64_u64_x2(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f16_x2))) +svfloat64x2_t svreinterpret_f64_f16_x2(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_bf16_x2))) +svfloat64x2_t svreinterpret_f64_bf16_x2(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f32_x2))) +svfloat64x2_t svreinterpret_f64_f32_x2(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f64_x2))) +svfloat64x2_t svreinterpret_f64_f64_x2(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s8_x2))) +svint8x2_t svreinterpret_s8(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u8_x2))) +svint8x2_t svreinterpret_s8(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s16_x2))) +svint8x2_t svreinterpret_s8(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u16_x2))) +svint8x2_t svreinterpret_s8(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s32_x2))) +svint8x2_t svreinterpret_s8(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u32_x2))) +svint8x2_t svreinterpret_s8(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s64_x2))) +svint8x2_t svreinterpret_s8(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u64_x2))) +svint8x2_t svreinterpret_s8(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f16_x2))) +svint8x2_t svreinterpret_s8(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_bf16_x2))) +svint8x2_t svreinterpret_s8(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f32_x2))) +svint8x2_t svreinterpret_s8(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f64_x2))) +svint8x2_t svreinterpret_s8(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s8_x2))) +svuint8x2_t svreinterpret_u8(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u8_x2))) +svuint8x2_t svreinterpret_u8(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s16_x2))) +svuint8x2_t svreinterpret_u8(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u16_x2))) +svuint8x2_t svreinterpret_u8(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s32_x2))) +svuint8x2_t svreinterpret_u8(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u32_x2))) +svuint8x2_t svreinterpret_u8(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s64_x2))) +svuint8x2_t svreinterpret_u8(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u64_x2))) +svuint8x2_t svreinterpret_u8(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f16_x2))) +svuint8x2_t svreinterpret_u8(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_bf16_x2))) +svuint8x2_t svreinterpret_u8(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f32_x2))) +svuint8x2_t svreinterpret_u8(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f64_x2))) +svuint8x2_t svreinterpret_u8(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s8_x2))) +svint16x2_t svreinterpret_s16(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u8_x2))) +svint16x2_t svreinterpret_s16(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s16_x2))) +svint16x2_t svreinterpret_s16(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u16_x2))) +svint16x2_t svreinterpret_s16(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s32_x2))) +svint16x2_t svreinterpret_s16(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u32_x2))) +svint16x2_t svreinterpret_s16(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s64_x2))) +svint16x2_t svreinterpret_s16(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u64_x2))) +svint16x2_t svreinterpret_s16(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f16_x2))) +svint16x2_t svreinterpret_s16(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_bf16_x2))) +svint16x2_t svreinterpret_s16(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f32_x2))) +svint16x2_t svreinterpret_s16(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f64_x2))) +svint16x2_t svreinterpret_s16(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s8_x2))) +svuint16x2_t svreinterpret_u16(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u8_x2))) +svuint16x2_t svreinterpret_u16(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s16_x2))) +svuint16x2_t svreinterpret_u16(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u16_x2))) +svuint16x2_t svreinterpret_u16(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s32_x2))) +svuint16x2_t svreinterpret_u16(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u32_x2))) +svuint16x2_t svreinterpret_u16(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s64_x2))) +svuint16x2_t svreinterpret_u16(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u64_x2))) +svuint16x2_t svreinterpret_u16(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f16_x2))) +svuint16x2_t svreinterpret_u16(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_bf16_x2))) +svuint16x2_t svreinterpret_u16(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f32_x2))) +svuint16x2_t svreinterpret_u16(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f64_x2))) +svuint16x2_t svreinterpret_u16(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s8_x2))) +svint32x2_t svreinterpret_s32(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u8_x2))) +svint32x2_t svreinterpret_s32(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s16_x2))) +svint32x2_t svreinterpret_s32(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u16_x2))) +svint32x2_t svreinterpret_s32(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s32_x2))) +svint32x2_t svreinterpret_s32(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u32_x2))) +svint32x2_t svreinterpret_s32(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s64_x2))) +svint32x2_t svreinterpret_s32(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u64_x2))) +svint32x2_t svreinterpret_s32(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f16_x2))) +svint32x2_t svreinterpret_s32(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_bf16_x2))) +svint32x2_t svreinterpret_s32(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f32_x2))) +svint32x2_t svreinterpret_s32(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f64_x2))) +svint32x2_t svreinterpret_s32(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s8_x2))) +svuint32x2_t svreinterpret_u32(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u8_x2))) +svuint32x2_t svreinterpret_u32(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s16_x2))) +svuint32x2_t svreinterpret_u32(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u16_x2))) +svuint32x2_t svreinterpret_u32(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s32_x2))) +svuint32x2_t svreinterpret_u32(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u32_x2))) +svuint32x2_t svreinterpret_u32(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s64_x2))) +svuint32x2_t svreinterpret_u32(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u64_x2))) +svuint32x2_t svreinterpret_u32(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f16_x2))) +svuint32x2_t svreinterpret_u32(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_bf16_x2))) +svuint32x2_t svreinterpret_u32(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f32_x2))) +svuint32x2_t svreinterpret_u32(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f64_x2))) +svuint32x2_t svreinterpret_u32(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s8_x2))) +svint64x2_t svreinterpret_s64(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u8_x2))) +svint64x2_t svreinterpret_s64(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s16_x2))) +svint64x2_t svreinterpret_s64(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u16_x2))) +svint64x2_t svreinterpret_s64(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s32_x2))) +svint64x2_t svreinterpret_s64(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u32_x2))) +svint64x2_t svreinterpret_s64(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s64_x2))) +svint64x2_t svreinterpret_s64(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u64_x2))) +svint64x2_t svreinterpret_s64(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f16_x2))) +svint64x2_t svreinterpret_s64(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_bf16_x2))) +svint64x2_t svreinterpret_s64(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f32_x2))) +svint64x2_t svreinterpret_s64(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f64_x2))) +svint64x2_t svreinterpret_s64(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s8_x2))) +svuint64x2_t svreinterpret_u64(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u8_x2))) +svuint64x2_t svreinterpret_u64(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s16_x2))) +svuint64x2_t svreinterpret_u64(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u16_x2))) +svuint64x2_t svreinterpret_u64(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s32_x2))) +svuint64x2_t svreinterpret_u64(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u32_x2))) +svuint64x2_t svreinterpret_u64(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s64_x2))) +svuint64x2_t svreinterpret_u64(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u64_x2))) +svuint64x2_t svreinterpret_u64(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f16_x2))) +svuint64x2_t svreinterpret_u64(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_bf16_x2))) +svuint64x2_t svreinterpret_u64(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f32_x2))) +svuint64x2_t svreinterpret_u64(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f64_x2))) +svuint64x2_t svreinterpret_u64(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s8_x2))) +svfloat16x2_t svreinterpret_f16(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u8_x2))) +svfloat16x2_t svreinterpret_f16(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s16_x2))) +svfloat16x2_t svreinterpret_f16(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u16_x2))) +svfloat16x2_t svreinterpret_f16(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s32_x2))) +svfloat16x2_t svreinterpret_f16(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u32_x2))) +svfloat16x2_t svreinterpret_f16(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s64_x2))) +svfloat16x2_t svreinterpret_f16(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u64_x2))) +svfloat16x2_t svreinterpret_f16(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f16_x2))) +svfloat16x2_t svreinterpret_f16(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_bf16_x2))) +svfloat16x2_t svreinterpret_f16(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f32_x2))) +svfloat16x2_t svreinterpret_f16(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f64_x2))) +svfloat16x2_t svreinterpret_f16(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s8_x2))) +svbfloat16x2_t svreinterpret_bf16(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u8_x2))) +svbfloat16x2_t svreinterpret_bf16(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s16_x2))) +svbfloat16x2_t svreinterpret_bf16(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u16_x2))) +svbfloat16x2_t svreinterpret_bf16(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s32_x2))) +svbfloat16x2_t svreinterpret_bf16(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u32_x2))) +svbfloat16x2_t svreinterpret_bf16(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s64_x2))) +svbfloat16x2_t svreinterpret_bf16(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u64_x2))) +svbfloat16x2_t svreinterpret_bf16(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f16_x2))) +svbfloat16x2_t svreinterpret_bf16(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_bf16_x2))) +svbfloat16x2_t svreinterpret_bf16(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f32_x2))) +svbfloat16x2_t svreinterpret_bf16(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f64_x2))) +svbfloat16x2_t svreinterpret_bf16(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s8_x2))) +svfloat32x2_t svreinterpret_f32(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u8_x2))) +svfloat32x2_t svreinterpret_f32(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s16_x2))) +svfloat32x2_t svreinterpret_f32(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u16_x2))) +svfloat32x2_t svreinterpret_f32(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s32_x2))) +svfloat32x2_t svreinterpret_f32(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u32_x2))) +svfloat32x2_t svreinterpret_f32(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s64_x2))) +svfloat32x2_t svreinterpret_f32(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u64_x2))) +svfloat32x2_t svreinterpret_f32(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f16_x2))) +svfloat32x2_t svreinterpret_f32(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_bf16_x2))) +svfloat32x2_t svreinterpret_f32(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f32_x2))) +svfloat32x2_t svreinterpret_f32(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f64_x2))) +svfloat32x2_t svreinterpret_f32(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s8_x2))) +svfloat64x2_t svreinterpret_f64(svint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u8_x2))) +svfloat64x2_t svreinterpret_f64(svuint8x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s16_x2))) +svfloat64x2_t svreinterpret_f64(svint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u16_x2))) +svfloat64x2_t svreinterpret_f64(svuint16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s32_x2))) +svfloat64x2_t svreinterpret_f64(svint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u32_x2))) +svfloat64x2_t svreinterpret_f64(svuint32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s64_x2))) +svfloat64x2_t svreinterpret_f64(svint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u64_x2))) +svfloat64x2_t svreinterpret_f64(svuint64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f16_x2))) +svfloat64x2_t svreinterpret_f64(svfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_bf16_x2))) +svfloat64x2_t svreinterpret_f64(svbfloat16x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f32_x2))) +svfloat64x2_t svreinterpret_f64(svfloat32x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f64_x2))) +svfloat64x2_t svreinterpret_f64(svfloat64x2_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s8_x3))) +svint8x3_t svreinterpret_s8_s8_x3(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u8_x3))) +svint8x3_t svreinterpret_s8_u8_x3(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s16_x3))) +svint8x3_t svreinterpret_s8_s16_x3(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u16_x3))) +svint8x3_t svreinterpret_s8_u16_x3(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s32_x3))) +svint8x3_t svreinterpret_s8_s32_x3(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u32_x3))) +svint8x3_t svreinterpret_s8_u32_x3(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s64_x3))) +svint8x3_t svreinterpret_s8_s64_x3(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u64_x3))) +svint8x3_t svreinterpret_s8_u64_x3(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f16_x3))) +svint8x3_t svreinterpret_s8_f16_x3(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_bf16_x3))) +svint8x3_t svreinterpret_s8_bf16_x3(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f32_x3))) +svint8x3_t svreinterpret_s8_f32_x3(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f64_x3))) +svint8x3_t svreinterpret_s8_f64_x3(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s8_x3))) +svuint8x3_t svreinterpret_u8_s8_x3(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u8_x3))) +svuint8x3_t svreinterpret_u8_u8_x3(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s16_x3))) +svuint8x3_t svreinterpret_u8_s16_x3(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u16_x3))) +svuint8x3_t svreinterpret_u8_u16_x3(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s32_x3))) +svuint8x3_t svreinterpret_u8_s32_x3(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u32_x3))) +svuint8x3_t svreinterpret_u8_u32_x3(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s64_x3))) +svuint8x3_t svreinterpret_u8_s64_x3(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u64_x3))) +svuint8x3_t svreinterpret_u8_u64_x3(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f16_x3))) +svuint8x3_t svreinterpret_u8_f16_x3(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_bf16_x3))) +svuint8x3_t svreinterpret_u8_bf16_x3(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f32_x3))) +svuint8x3_t svreinterpret_u8_f32_x3(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f64_x3))) +svuint8x3_t svreinterpret_u8_f64_x3(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s8_x3))) +svint16x3_t svreinterpret_s16_s8_x3(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u8_x3))) +svint16x3_t svreinterpret_s16_u8_x3(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s16_x3))) +svint16x3_t svreinterpret_s16_s16_x3(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u16_x3))) +svint16x3_t svreinterpret_s16_u16_x3(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s32_x3))) +svint16x3_t svreinterpret_s16_s32_x3(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u32_x3))) +svint16x3_t svreinterpret_s16_u32_x3(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s64_x3))) +svint16x3_t svreinterpret_s16_s64_x3(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u64_x3))) +svint16x3_t svreinterpret_s16_u64_x3(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f16_x3))) +svint16x3_t svreinterpret_s16_f16_x3(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_bf16_x3))) +svint16x3_t svreinterpret_s16_bf16_x3(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f32_x3))) +svint16x3_t svreinterpret_s16_f32_x3(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f64_x3))) +svint16x3_t svreinterpret_s16_f64_x3(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s8_x3))) +svuint16x3_t svreinterpret_u16_s8_x3(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u8_x3))) +svuint16x3_t svreinterpret_u16_u8_x3(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s16_x3))) +svuint16x3_t svreinterpret_u16_s16_x3(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u16_x3))) +svuint16x3_t svreinterpret_u16_u16_x3(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s32_x3))) +svuint16x3_t svreinterpret_u16_s32_x3(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u32_x3))) +svuint16x3_t svreinterpret_u16_u32_x3(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s64_x3))) +svuint16x3_t svreinterpret_u16_s64_x3(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u64_x3))) +svuint16x3_t svreinterpret_u16_u64_x3(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f16_x3))) +svuint16x3_t svreinterpret_u16_f16_x3(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_bf16_x3))) +svuint16x3_t svreinterpret_u16_bf16_x3(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f32_x3))) +svuint16x3_t svreinterpret_u16_f32_x3(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f64_x3))) +svuint16x3_t svreinterpret_u16_f64_x3(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s8_x3))) +svint32x3_t svreinterpret_s32_s8_x3(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u8_x3))) +svint32x3_t svreinterpret_s32_u8_x3(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s16_x3))) +svint32x3_t svreinterpret_s32_s16_x3(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u16_x3))) +svint32x3_t svreinterpret_s32_u16_x3(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s32_x3))) +svint32x3_t svreinterpret_s32_s32_x3(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u32_x3))) +svint32x3_t svreinterpret_s32_u32_x3(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s64_x3))) +svint32x3_t svreinterpret_s32_s64_x3(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u64_x3))) +svint32x3_t svreinterpret_s32_u64_x3(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f16_x3))) +svint32x3_t svreinterpret_s32_f16_x3(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_bf16_x3))) +svint32x3_t svreinterpret_s32_bf16_x3(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f32_x3))) +svint32x3_t svreinterpret_s32_f32_x3(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f64_x3))) +svint32x3_t svreinterpret_s32_f64_x3(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s8_x3))) +svuint32x3_t svreinterpret_u32_s8_x3(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u8_x3))) +svuint32x3_t svreinterpret_u32_u8_x3(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s16_x3))) +svuint32x3_t svreinterpret_u32_s16_x3(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u16_x3))) +svuint32x3_t svreinterpret_u32_u16_x3(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s32_x3))) +svuint32x3_t svreinterpret_u32_s32_x3(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u32_x3))) +svuint32x3_t svreinterpret_u32_u32_x3(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s64_x3))) +svuint32x3_t svreinterpret_u32_s64_x3(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u64_x3))) +svuint32x3_t svreinterpret_u32_u64_x3(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f16_x3))) +svuint32x3_t svreinterpret_u32_f16_x3(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_bf16_x3))) +svuint32x3_t svreinterpret_u32_bf16_x3(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f32_x3))) +svuint32x3_t svreinterpret_u32_f32_x3(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f64_x3))) +svuint32x3_t svreinterpret_u32_f64_x3(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s8_x3))) +svint64x3_t svreinterpret_s64_s8_x3(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u8_x3))) +svint64x3_t svreinterpret_s64_u8_x3(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s16_x3))) +svint64x3_t svreinterpret_s64_s16_x3(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u16_x3))) +svint64x3_t svreinterpret_s64_u16_x3(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s32_x3))) +svint64x3_t svreinterpret_s64_s32_x3(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u32_x3))) +svint64x3_t svreinterpret_s64_u32_x3(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s64_x3))) +svint64x3_t svreinterpret_s64_s64_x3(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u64_x3))) +svint64x3_t svreinterpret_s64_u64_x3(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f16_x3))) +svint64x3_t svreinterpret_s64_f16_x3(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_bf16_x3))) +svint64x3_t svreinterpret_s64_bf16_x3(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f32_x3))) +svint64x3_t svreinterpret_s64_f32_x3(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f64_x3))) +svint64x3_t svreinterpret_s64_f64_x3(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s8_x3))) +svuint64x3_t svreinterpret_u64_s8_x3(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u8_x3))) +svuint64x3_t svreinterpret_u64_u8_x3(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s16_x3))) +svuint64x3_t svreinterpret_u64_s16_x3(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u16_x3))) +svuint64x3_t svreinterpret_u64_u16_x3(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s32_x3))) +svuint64x3_t svreinterpret_u64_s32_x3(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u32_x3))) +svuint64x3_t svreinterpret_u64_u32_x3(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s64_x3))) +svuint64x3_t svreinterpret_u64_s64_x3(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u64_x3))) +svuint64x3_t svreinterpret_u64_u64_x3(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f16_x3))) +svuint64x3_t svreinterpret_u64_f16_x3(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_bf16_x3))) +svuint64x3_t svreinterpret_u64_bf16_x3(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f32_x3))) +svuint64x3_t svreinterpret_u64_f32_x3(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f64_x3))) +svuint64x3_t svreinterpret_u64_f64_x3(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s8_x3))) +svfloat16x3_t svreinterpret_f16_s8_x3(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u8_x3))) +svfloat16x3_t svreinterpret_f16_u8_x3(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s16_x3))) +svfloat16x3_t svreinterpret_f16_s16_x3(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u16_x3))) +svfloat16x3_t svreinterpret_f16_u16_x3(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s32_x3))) +svfloat16x3_t svreinterpret_f16_s32_x3(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u32_x3))) +svfloat16x3_t svreinterpret_f16_u32_x3(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s64_x3))) +svfloat16x3_t svreinterpret_f16_s64_x3(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u64_x3))) +svfloat16x3_t svreinterpret_f16_u64_x3(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f16_x3))) +svfloat16x3_t svreinterpret_f16_f16_x3(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_bf16_x3))) +svfloat16x3_t svreinterpret_f16_bf16_x3(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f32_x3))) +svfloat16x3_t svreinterpret_f16_f32_x3(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f64_x3))) +svfloat16x3_t svreinterpret_f16_f64_x3(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s8_x3))) +svbfloat16x3_t svreinterpret_bf16_s8_x3(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u8_x3))) +svbfloat16x3_t svreinterpret_bf16_u8_x3(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s16_x3))) +svbfloat16x3_t svreinterpret_bf16_s16_x3(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u16_x3))) +svbfloat16x3_t svreinterpret_bf16_u16_x3(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s32_x3))) +svbfloat16x3_t svreinterpret_bf16_s32_x3(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u32_x3))) +svbfloat16x3_t svreinterpret_bf16_u32_x3(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s64_x3))) +svbfloat16x3_t svreinterpret_bf16_s64_x3(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u64_x3))) +svbfloat16x3_t svreinterpret_bf16_u64_x3(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f16_x3))) +svbfloat16x3_t svreinterpret_bf16_f16_x3(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_bf16_x3))) +svbfloat16x3_t svreinterpret_bf16_bf16_x3(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f32_x3))) +svbfloat16x3_t svreinterpret_bf16_f32_x3(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f64_x3))) +svbfloat16x3_t svreinterpret_bf16_f64_x3(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s8_x3))) +svfloat32x3_t svreinterpret_f32_s8_x3(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u8_x3))) +svfloat32x3_t svreinterpret_f32_u8_x3(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s16_x3))) +svfloat32x3_t svreinterpret_f32_s16_x3(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u16_x3))) +svfloat32x3_t svreinterpret_f32_u16_x3(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s32_x3))) +svfloat32x3_t svreinterpret_f32_s32_x3(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u32_x3))) +svfloat32x3_t svreinterpret_f32_u32_x3(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s64_x3))) +svfloat32x3_t svreinterpret_f32_s64_x3(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u64_x3))) +svfloat32x3_t svreinterpret_f32_u64_x3(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f16_x3))) +svfloat32x3_t svreinterpret_f32_f16_x3(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_bf16_x3))) +svfloat32x3_t svreinterpret_f32_bf16_x3(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f32_x3))) +svfloat32x3_t svreinterpret_f32_f32_x3(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f64_x3))) +svfloat32x3_t svreinterpret_f32_f64_x3(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s8_x3))) +svfloat64x3_t svreinterpret_f64_s8_x3(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u8_x3))) +svfloat64x3_t svreinterpret_f64_u8_x3(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s16_x3))) +svfloat64x3_t svreinterpret_f64_s16_x3(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u16_x3))) +svfloat64x3_t svreinterpret_f64_u16_x3(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s32_x3))) +svfloat64x3_t svreinterpret_f64_s32_x3(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u32_x3))) +svfloat64x3_t svreinterpret_f64_u32_x3(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s64_x3))) +svfloat64x3_t svreinterpret_f64_s64_x3(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u64_x3))) +svfloat64x3_t svreinterpret_f64_u64_x3(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f16_x3))) +svfloat64x3_t svreinterpret_f64_f16_x3(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_bf16_x3))) +svfloat64x3_t svreinterpret_f64_bf16_x3(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f32_x3))) +svfloat64x3_t svreinterpret_f64_f32_x3(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f64_x3))) +svfloat64x3_t svreinterpret_f64_f64_x3(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s8_x3))) +svint8x3_t svreinterpret_s8(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u8_x3))) +svint8x3_t svreinterpret_s8(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s16_x3))) +svint8x3_t svreinterpret_s8(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u16_x3))) +svint8x3_t svreinterpret_s8(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s32_x3))) +svint8x3_t svreinterpret_s8(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u32_x3))) +svint8x3_t svreinterpret_s8(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s64_x3))) +svint8x3_t svreinterpret_s8(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u64_x3))) +svint8x3_t svreinterpret_s8(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f16_x3))) +svint8x3_t svreinterpret_s8(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_bf16_x3))) +svint8x3_t svreinterpret_s8(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f32_x3))) +svint8x3_t svreinterpret_s8(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f64_x3))) +svint8x3_t svreinterpret_s8(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s8_x3))) +svuint8x3_t svreinterpret_u8(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u8_x3))) +svuint8x3_t svreinterpret_u8(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s16_x3))) +svuint8x3_t svreinterpret_u8(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u16_x3))) +svuint8x3_t svreinterpret_u8(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s32_x3))) +svuint8x3_t svreinterpret_u8(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u32_x3))) +svuint8x3_t svreinterpret_u8(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s64_x3))) +svuint8x3_t svreinterpret_u8(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u64_x3))) +svuint8x3_t svreinterpret_u8(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f16_x3))) +svuint8x3_t svreinterpret_u8(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_bf16_x3))) +svuint8x3_t svreinterpret_u8(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f32_x3))) +svuint8x3_t svreinterpret_u8(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f64_x3))) +svuint8x3_t svreinterpret_u8(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s8_x3))) +svint16x3_t svreinterpret_s16(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u8_x3))) +svint16x3_t svreinterpret_s16(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s16_x3))) +svint16x3_t svreinterpret_s16(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u16_x3))) +svint16x3_t svreinterpret_s16(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s32_x3))) +svint16x3_t svreinterpret_s16(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u32_x3))) +svint16x3_t svreinterpret_s16(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s64_x3))) +svint16x3_t svreinterpret_s16(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u64_x3))) +svint16x3_t svreinterpret_s16(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f16_x3))) +svint16x3_t svreinterpret_s16(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_bf16_x3))) +svint16x3_t svreinterpret_s16(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f32_x3))) +svint16x3_t svreinterpret_s16(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f64_x3))) +svint16x3_t svreinterpret_s16(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s8_x3))) +svuint16x3_t svreinterpret_u16(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u8_x3))) +svuint16x3_t svreinterpret_u16(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s16_x3))) +svuint16x3_t svreinterpret_u16(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u16_x3))) +svuint16x3_t svreinterpret_u16(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s32_x3))) +svuint16x3_t svreinterpret_u16(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u32_x3))) +svuint16x3_t svreinterpret_u16(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s64_x3))) +svuint16x3_t svreinterpret_u16(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u64_x3))) +svuint16x3_t svreinterpret_u16(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f16_x3))) +svuint16x3_t svreinterpret_u16(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_bf16_x3))) +svuint16x3_t svreinterpret_u16(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f32_x3))) +svuint16x3_t svreinterpret_u16(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f64_x3))) +svuint16x3_t svreinterpret_u16(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s8_x3))) +svint32x3_t svreinterpret_s32(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u8_x3))) +svint32x3_t svreinterpret_s32(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s16_x3))) +svint32x3_t svreinterpret_s32(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u16_x3))) +svint32x3_t svreinterpret_s32(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s32_x3))) +svint32x3_t svreinterpret_s32(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u32_x3))) +svint32x3_t svreinterpret_s32(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s64_x3))) +svint32x3_t svreinterpret_s32(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u64_x3))) +svint32x3_t svreinterpret_s32(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f16_x3))) +svint32x3_t svreinterpret_s32(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_bf16_x3))) +svint32x3_t svreinterpret_s32(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f32_x3))) +svint32x3_t svreinterpret_s32(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f64_x3))) +svint32x3_t svreinterpret_s32(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s8_x3))) +svuint32x3_t svreinterpret_u32(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u8_x3))) +svuint32x3_t svreinterpret_u32(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s16_x3))) +svuint32x3_t svreinterpret_u32(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u16_x3))) +svuint32x3_t svreinterpret_u32(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s32_x3))) +svuint32x3_t svreinterpret_u32(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u32_x3))) +svuint32x3_t svreinterpret_u32(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s64_x3))) +svuint32x3_t svreinterpret_u32(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u64_x3))) +svuint32x3_t svreinterpret_u32(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f16_x3))) +svuint32x3_t svreinterpret_u32(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_bf16_x3))) +svuint32x3_t svreinterpret_u32(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f32_x3))) +svuint32x3_t svreinterpret_u32(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f64_x3))) +svuint32x3_t svreinterpret_u32(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s8_x3))) +svint64x3_t svreinterpret_s64(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u8_x3))) +svint64x3_t svreinterpret_s64(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s16_x3))) +svint64x3_t svreinterpret_s64(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u16_x3))) +svint64x3_t svreinterpret_s64(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s32_x3))) +svint64x3_t svreinterpret_s64(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u32_x3))) +svint64x3_t svreinterpret_s64(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s64_x3))) +svint64x3_t svreinterpret_s64(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u64_x3))) +svint64x3_t svreinterpret_s64(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f16_x3))) +svint64x3_t svreinterpret_s64(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_bf16_x3))) +svint64x3_t svreinterpret_s64(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f32_x3))) +svint64x3_t svreinterpret_s64(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f64_x3))) +svint64x3_t svreinterpret_s64(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s8_x3))) +svuint64x3_t svreinterpret_u64(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u8_x3))) +svuint64x3_t svreinterpret_u64(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s16_x3))) +svuint64x3_t svreinterpret_u64(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u16_x3))) +svuint64x3_t svreinterpret_u64(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s32_x3))) +svuint64x3_t svreinterpret_u64(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u32_x3))) +svuint64x3_t svreinterpret_u64(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s64_x3))) +svuint64x3_t svreinterpret_u64(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u64_x3))) +svuint64x3_t svreinterpret_u64(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f16_x3))) +svuint64x3_t svreinterpret_u64(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_bf16_x3))) +svuint64x3_t svreinterpret_u64(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f32_x3))) +svuint64x3_t svreinterpret_u64(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f64_x3))) +svuint64x3_t svreinterpret_u64(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s8_x3))) +svfloat16x3_t svreinterpret_f16(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u8_x3))) +svfloat16x3_t svreinterpret_f16(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s16_x3))) +svfloat16x3_t svreinterpret_f16(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u16_x3))) +svfloat16x3_t svreinterpret_f16(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s32_x3))) +svfloat16x3_t svreinterpret_f16(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u32_x3))) +svfloat16x3_t svreinterpret_f16(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s64_x3))) +svfloat16x3_t svreinterpret_f16(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u64_x3))) +svfloat16x3_t svreinterpret_f16(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f16_x3))) +svfloat16x3_t svreinterpret_f16(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_bf16_x3))) +svfloat16x3_t svreinterpret_f16(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f32_x3))) +svfloat16x3_t svreinterpret_f16(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f64_x3))) +svfloat16x3_t svreinterpret_f16(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s8_x3))) +svbfloat16x3_t svreinterpret_bf16(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u8_x3))) +svbfloat16x3_t svreinterpret_bf16(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s16_x3))) +svbfloat16x3_t svreinterpret_bf16(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u16_x3))) +svbfloat16x3_t svreinterpret_bf16(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s32_x3))) +svbfloat16x3_t svreinterpret_bf16(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u32_x3))) +svbfloat16x3_t svreinterpret_bf16(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s64_x3))) +svbfloat16x3_t svreinterpret_bf16(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u64_x3))) +svbfloat16x3_t svreinterpret_bf16(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f16_x3))) +svbfloat16x3_t svreinterpret_bf16(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_bf16_x3))) +svbfloat16x3_t svreinterpret_bf16(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f32_x3))) +svbfloat16x3_t svreinterpret_bf16(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f64_x3))) +svbfloat16x3_t svreinterpret_bf16(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s8_x3))) +svfloat32x3_t svreinterpret_f32(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u8_x3))) +svfloat32x3_t svreinterpret_f32(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s16_x3))) +svfloat32x3_t svreinterpret_f32(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u16_x3))) +svfloat32x3_t svreinterpret_f32(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s32_x3))) +svfloat32x3_t svreinterpret_f32(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u32_x3))) +svfloat32x3_t svreinterpret_f32(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s64_x3))) +svfloat32x3_t svreinterpret_f32(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u64_x3))) +svfloat32x3_t svreinterpret_f32(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f16_x3))) +svfloat32x3_t svreinterpret_f32(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_bf16_x3))) +svfloat32x3_t svreinterpret_f32(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f32_x3))) +svfloat32x3_t svreinterpret_f32(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f64_x3))) +svfloat32x3_t svreinterpret_f32(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s8_x3))) +svfloat64x3_t svreinterpret_f64(svint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u8_x3))) +svfloat64x3_t svreinterpret_f64(svuint8x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s16_x3))) +svfloat64x3_t svreinterpret_f64(svint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u16_x3))) +svfloat64x3_t svreinterpret_f64(svuint16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s32_x3))) +svfloat64x3_t svreinterpret_f64(svint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u32_x3))) +svfloat64x3_t svreinterpret_f64(svuint32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s64_x3))) +svfloat64x3_t svreinterpret_f64(svint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u64_x3))) +svfloat64x3_t svreinterpret_f64(svuint64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f16_x3))) +svfloat64x3_t svreinterpret_f64(svfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_bf16_x3))) +svfloat64x3_t svreinterpret_f64(svbfloat16x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f32_x3))) +svfloat64x3_t svreinterpret_f64(svfloat32x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f64_x3))) +svfloat64x3_t svreinterpret_f64(svfloat64x3_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s8_x4))) +svint8x4_t svreinterpret_s8_s8_x4(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u8_x4))) +svint8x4_t svreinterpret_s8_u8_x4(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s16_x4))) +svint8x4_t svreinterpret_s8_s16_x4(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u16_x4))) +svint8x4_t svreinterpret_s8_u16_x4(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s32_x4))) +svint8x4_t svreinterpret_s8_s32_x4(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u32_x4))) +svint8x4_t svreinterpret_s8_u32_x4(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s64_x4))) +svint8x4_t svreinterpret_s8_s64_x4(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u64_x4))) +svint8x4_t svreinterpret_s8_u64_x4(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f16_x4))) +svint8x4_t svreinterpret_s8_f16_x4(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_bf16_x4))) +svint8x4_t svreinterpret_s8_bf16_x4(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f32_x4))) +svint8x4_t svreinterpret_s8_f32_x4(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f64_x4))) +svint8x4_t svreinterpret_s8_f64_x4(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s8_x4))) +svuint8x4_t svreinterpret_u8_s8_x4(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u8_x4))) +svuint8x4_t svreinterpret_u8_u8_x4(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s16_x4))) +svuint8x4_t svreinterpret_u8_s16_x4(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u16_x4))) +svuint8x4_t svreinterpret_u8_u16_x4(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s32_x4))) +svuint8x4_t svreinterpret_u8_s32_x4(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u32_x4))) +svuint8x4_t svreinterpret_u8_u32_x4(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s64_x4))) +svuint8x4_t svreinterpret_u8_s64_x4(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u64_x4))) +svuint8x4_t svreinterpret_u8_u64_x4(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f16_x4))) +svuint8x4_t svreinterpret_u8_f16_x4(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_bf16_x4))) +svuint8x4_t svreinterpret_u8_bf16_x4(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f32_x4))) +svuint8x4_t svreinterpret_u8_f32_x4(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f64_x4))) +svuint8x4_t svreinterpret_u8_f64_x4(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s8_x4))) +svint16x4_t svreinterpret_s16_s8_x4(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u8_x4))) +svint16x4_t svreinterpret_s16_u8_x4(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s16_x4))) +svint16x4_t svreinterpret_s16_s16_x4(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u16_x4))) +svint16x4_t svreinterpret_s16_u16_x4(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s32_x4))) +svint16x4_t svreinterpret_s16_s32_x4(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u32_x4))) +svint16x4_t svreinterpret_s16_u32_x4(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s64_x4))) +svint16x4_t svreinterpret_s16_s64_x4(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u64_x4))) +svint16x4_t svreinterpret_s16_u64_x4(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f16_x4))) +svint16x4_t svreinterpret_s16_f16_x4(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_bf16_x4))) +svint16x4_t svreinterpret_s16_bf16_x4(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f32_x4))) +svint16x4_t svreinterpret_s16_f32_x4(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f64_x4))) +svint16x4_t svreinterpret_s16_f64_x4(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s8_x4))) +svuint16x4_t svreinterpret_u16_s8_x4(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u8_x4))) +svuint16x4_t svreinterpret_u16_u8_x4(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s16_x4))) +svuint16x4_t svreinterpret_u16_s16_x4(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u16_x4))) +svuint16x4_t svreinterpret_u16_u16_x4(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s32_x4))) +svuint16x4_t svreinterpret_u16_s32_x4(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u32_x4))) +svuint16x4_t svreinterpret_u16_u32_x4(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s64_x4))) +svuint16x4_t svreinterpret_u16_s64_x4(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u64_x4))) +svuint16x4_t svreinterpret_u16_u64_x4(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f16_x4))) +svuint16x4_t svreinterpret_u16_f16_x4(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_bf16_x4))) +svuint16x4_t svreinterpret_u16_bf16_x4(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f32_x4))) +svuint16x4_t svreinterpret_u16_f32_x4(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f64_x4))) +svuint16x4_t svreinterpret_u16_f64_x4(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s8_x4))) +svint32x4_t svreinterpret_s32_s8_x4(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u8_x4))) +svint32x4_t svreinterpret_s32_u8_x4(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s16_x4))) +svint32x4_t svreinterpret_s32_s16_x4(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u16_x4))) +svint32x4_t svreinterpret_s32_u16_x4(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s32_x4))) +svint32x4_t svreinterpret_s32_s32_x4(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u32_x4))) +svint32x4_t svreinterpret_s32_u32_x4(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s64_x4))) +svint32x4_t svreinterpret_s32_s64_x4(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u64_x4))) +svint32x4_t svreinterpret_s32_u64_x4(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f16_x4))) +svint32x4_t svreinterpret_s32_f16_x4(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_bf16_x4))) +svint32x4_t svreinterpret_s32_bf16_x4(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f32_x4))) +svint32x4_t svreinterpret_s32_f32_x4(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f64_x4))) +svint32x4_t svreinterpret_s32_f64_x4(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s8_x4))) +svuint32x4_t svreinterpret_u32_s8_x4(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u8_x4))) +svuint32x4_t svreinterpret_u32_u8_x4(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s16_x4))) +svuint32x4_t svreinterpret_u32_s16_x4(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u16_x4))) +svuint32x4_t svreinterpret_u32_u16_x4(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s32_x4))) +svuint32x4_t svreinterpret_u32_s32_x4(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u32_x4))) +svuint32x4_t svreinterpret_u32_u32_x4(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s64_x4))) +svuint32x4_t svreinterpret_u32_s64_x4(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u64_x4))) +svuint32x4_t svreinterpret_u32_u64_x4(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f16_x4))) +svuint32x4_t svreinterpret_u32_f16_x4(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_bf16_x4))) +svuint32x4_t svreinterpret_u32_bf16_x4(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f32_x4))) +svuint32x4_t svreinterpret_u32_f32_x4(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f64_x4))) +svuint32x4_t svreinterpret_u32_f64_x4(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s8_x4))) +svint64x4_t svreinterpret_s64_s8_x4(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u8_x4))) +svint64x4_t svreinterpret_s64_u8_x4(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s16_x4))) +svint64x4_t svreinterpret_s64_s16_x4(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u16_x4))) +svint64x4_t svreinterpret_s64_u16_x4(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s32_x4))) +svint64x4_t svreinterpret_s64_s32_x4(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u32_x4))) +svint64x4_t svreinterpret_s64_u32_x4(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s64_x4))) +svint64x4_t svreinterpret_s64_s64_x4(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u64_x4))) +svint64x4_t svreinterpret_s64_u64_x4(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f16_x4))) +svint64x4_t svreinterpret_s64_f16_x4(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_bf16_x4))) +svint64x4_t svreinterpret_s64_bf16_x4(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f32_x4))) +svint64x4_t svreinterpret_s64_f32_x4(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f64_x4))) +svint64x4_t svreinterpret_s64_f64_x4(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s8_x4))) +svuint64x4_t svreinterpret_u64_s8_x4(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u8_x4))) +svuint64x4_t svreinterpret_u64_u8_x4(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s16_x4))) +svuint64x4_t svreinterpret_u64_s16_x4(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u16_x4))) +svuint64x4_t svreinterpret_u64_u16_x4(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s32_x4))) +svuint64x4_t svreinterpret_u64_s32_x4(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u32_x4))) +svuint64x4_t svreinterpret_u64_u32_x4(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s64_x4))) +svuint64x4_t svreinterpret_u64_s64_x4(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u64_x4))) +svuint64x4_t svreinterpret_u64_u64_x4(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f16_x4))) +svuint64x4_t svreinterpret_u64_f16_x4(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_bf16_x4))) +svuint64x4_t svreinterpret_u64_bf16_x4(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f32_x4))) +svuint64x4_t svreinterpret_u64_f32_x4(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f64_x4))) +svuint64x4_t svreinterpret_u64_f64_x4(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s8_x4))) +svfloat16x4_t svreinterpret_f16_s8_x4(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u8_x4))) +svfloat16x4_t svreinterpret_f16_u8_x4(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s16_x4))) +svfloat16x4_t svreinterpret_f16_s16_x4(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u16_x4))) +svfloat16x4_t svreinterpret_f16_u16_x4(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s32_x4))) +svfloat16x4_t svreinterpret_f16_s32_x4(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u32_x4))) +svfloat16x4_t svreinterpret_f16_u32_x4(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s64_x4))) +svfloat16x4_t svreinterpret_f16_s64_x4(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u64_x4))) +svfloat16x4_t svreinterpret_f16_u64_x4(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f16_x4))) +svfloat16x4_t svreinterpret_f16_f16_x4(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_bf16_x4))) +svfloat16x4_t svreinterpret_f16_bf16_x4(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f32_x4))) +svfloat16x4_t svreinterpret_f16_f32_x4(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f64_x4))) +svfloat16x4_t svreinterpret_f16_f64_x4(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s8_x4))) +svbfloat16x4_t svreinterpret_bf16_s8_x4(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u8_x4))) +svbfloat16x4_t svreinterpret_bf16_u8_x4(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s16_x4))) +svbfloat16x4_t svreinterpret_bf16_s16_x4(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u16_x4))) +svbfloat16x4_t svreinterpret_bf16_u16_x4(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s32_x4))) +svbfloat16x4_t svreinterpret_bf16_s32_x4(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u32_x4))) +svbfloat16x4_t svreinterpret_bf16_u32_x4(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s64_x4))) +svbfloat16x4_t svreinterpret_bf16_s64_x4(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u64_x4))) +svbfloat16x4_t svreinterpret_bf16_u64_x4(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f16_x4))) +svbfloat16x4_t svreinterpret_bf16_f16_x4(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_bf16_x4))) +svbfloat16x4_t svreinterpret_bf16_bf16_x4(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f32_x4))) +svbfloat16x4_t svreinterpret_bf16_f32_x4(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f64_x4))) +svbfloat16x4_t svreinterpret_bf16_f64_x4(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s8_x4))) +svfloat32x4_t svreinterpret_f32_s8_x4(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u8_x4))) +svfloat32x4_t svreinterpret_f32_u8_x4(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s16_x4))) +svfloat32x4_t svreinterpret_f32_s16_x4(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u16_x4))) +svfloat32x4_t svreinterpret_f32_u16_x4(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s32_x4))) +svfloat32x4_t svreinterpret_f32_s32_x4(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u32_x4))) +svfloat32x4_t svreinterpret_f32_u32_x4(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s64_x4))) +svfloat32x4_t svreinterpret_f32_s64_x4(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u64_x4))) +svfloat32x4_t svreinterpret_f32_u64_x4(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f16_x4))) +svfloat32x4_t svreinterpret_f32_f16_x4(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_bf16_x4))) +svfloat32x4_t svreinterpret_f32_bf16_x4(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f32_x4))) +svfloat32x4_t svreinterpret_f32_f32_x4(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f64_x4))) +svfloat32x4_t svreinterpret_f32_f64_x4(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s8_x4))) +svfloat64x4_t svreinterpret_f64_s8_x4(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u8_x4))) +svfloat64x4_t svreinterpret_f64_u8_x4(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s16_x4))) +svfloat64x4_t svreinterpret_f64_s16_x4(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u16_x4))) +svfloat64x4_t svreinterpret_f64_u16_x4(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s32_x4))) +svfloat64x4_t svreinterpret_f64_s32_x4(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u32_x4))) +svfloat64x4_t svreinterpret_f64_u32_x4(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s64_x4))) +svfloat64x4_t svreinterpret_f64_s64_x4(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u64_x4))) +svfloat64x4_t svreinterpret_f64_u64_x4(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f16_x4))) +svfloat64x4_t svreinterpret_f64_f16_x4(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_bf16_x4))) +svfloat64x4_t svreinterpret_f64_bf16_x4(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f32_x4))) +svfloat64x4_t svreinterpret_f64_f32_x4(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f64_x4))) +svfloat64x4_t svreinterpret_f64_f64_x4(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s8_x4))) +svint8x4_t svreinterpret_s8(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u8_x4))) +svint8x4_t svreinterpret_s8(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s16_x4))) +svint8x4_t svreinterpret_s8(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u16_x4))) +svint8x4_t svreinterpret_s8(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s32_x4))) +svint8x4_t svreinterpret_s8(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u32_x4))) +svint8x4_t svreinterpret_s8(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_s64_x4))) +svint8x4_t svreinterpret_s8(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_u64_x4))) +svint8x4_t svreinterpret_s8(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f16_x4))) +svint8x4_t svreinterpret_s8(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_bf16_x4))) +svint8x4_t svreinterpret_s8(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f32_x4))) +svint8x4_t svreinterpret_s8(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s8_f64_x4))) +svint8x4_t svreinterpret_s8(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s8_x4))) +svuint8x4_t svreinterpret_u8(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u8_x4))) +svuint8x4_t svreinterpret_u8(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s16_x4))) +svuint8x4_t svreinterpret_u8(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u16_x4))) +svuint8x4_t svreinterpret_u8(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s32_x4))) +svuint8x4_t svreinterpret_u8(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u32_x4))) +svuint8x4_t svreinterpret_u8(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_s64_x4))) +svuint8x4_t svreinterpret_u8(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_u64_x4))) +svuint8x4_t svreinterpret_u8(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f16_x4))) +svuint8x4_t svreinterpret_u8(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_bf16_x4))) +svuint8x4_t svreinterpret_u8(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f32_x4))) +svuint8x4_t svreinterpret_u8(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u8_f64_x4))) +svuint8x4_t svreinterpret_u8(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s8_x4))) +svint16x4_t svreinterpret_s16(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u8_x4))) +svint16x4_t svreinterpret_s16(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s16_x4))) +svint16x4_t svreinterpret_s16(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u16_x4))) +svint16x4_t svreinterpret_s16(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s32_x4))) +svint16x4_t svreinterpret_s16(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u32_x4))) +svint16x4_t svreinterpret_s16(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_s64_x4))) +svint16x4_t svreinterpret_s16(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_u64_x4))) +svint16x4_t svreinterpret_s16(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f16_x4))) +svint16x4_t svreinterpret_s16(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_bf16_x4))) +svint16x4_t svreinterpret_s16(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f32_x4))) +svint16x4_t svreinterpret_s16(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s16_f64_x4))) +svint16x4_t svreinterpret_s16(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s8_x4))) +svuint16x4_t svreinterpret_u16(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u8_x4))) +svuint16x4_t svreinterpret_u16(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s16_x4))) +svuint16x4_t svreinterpret_u16(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u16_x4))) +svuint16x4_t svreinterpret_u16(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s32_x4))) +svuint16x4_t svreinterpret_u16(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u32_x4))) +svuint16x4_t svreinterpret_u16(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_s64_x4))) +svuint16x4_t svreinterpret_u16(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_u64_x4))) +svuint16x4_t svreinterpret_u16(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f16_x4))) +svuint16x4_t svreinterpret_u16(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_bf16_x4))) +svuint16x4_t svreinterpret_u16(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f32_x4))) +svuint16x4_t svreinterpret_u16(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u16_f64_x4))) +svuint16x4_t svreinterpret_u16(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s8_x4))) +svint32x4_t svreinterpret_s32(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u8_x4))) +svint32x4_t svreinterpret_s32(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s16_x4))) +svint32x4_t svreinterpret_s32(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u16_x4))) +svint32x4_t svreinterpret_s32(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s32_x4))) +svint32x4_t svreinterpret_s32(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u32_x4))) +svint32x4_t svreinterpret_s32(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_s64_x4))) +svint32x4_t svreinterpret_s32(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_u64_x4))) +svint32x4_t svreinterpret_s32(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f16_x4))) +svint32x4_t svreinterpret_s32(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_bf16_x4))) +svint32x4_t svreinterpret_s32(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f32_x4))) +svint32x4_t svreinterpret_s32(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s32_f64_x4))) +svint32x4_t svreinterpret_s32(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s8_x4))) +svuint32x4_t svreinterpret_u32(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u8_x4))) +svuint32x4_t svreinterpret_u32(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s16_x4))) +svuint32x4_t svreinterpret_u32(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u16_x4))) +svuint32x4_t svreinterpret_u32(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s32_x4))) +svuint32x4_t svreinterpret_u32(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u32_x4))) +svuint32x4_t svreinterpret_u32(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_s64_x4))) +svuint32x4_t svreinterpret_u32(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_u64_x4))) +svuint32x4_t svreinterpret_u32(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f16_x4))) +svuint32x4_t svreinterpret_u32(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_bf16_x4))) +svuint32x4_t svreinterpret_u32(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f32_x4))) +svuint32x4_t svreinterpret_u32(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u32_f64_x4))) +svuint32x4_t svreinterpret_u32(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s8_x4))) +svint64x4_t svreinterpret_s64(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u8_x4))) +svint64x4_t svreinterpret_s64(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s16_x4))) +svint64x4_t svreinterpret_s64(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u16_x4))) +svint64x4_t svreinterpret_s64(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s32_x4))) +svint64x4_t svreinterpret_s64(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u32_x4))) +svint64x4_t svreinterpret_s64(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_s64_x4))) +svint64x4_t svreinterpret_s64(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_u64_x4))) +svint64x4_t svreinterpret_s64(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f16_x4))) +svint64x4_t svreinterpret_s64(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_bf16_x4))) +svint64x4_t svreinterpret_s64(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f32_x4))) +svint64x4_t svreinterpret_s64(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_s64_f64_x4))) +svint64x4_t svreinterpret_s64(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s8_x4))) +svuint64x4_t svreinterpret_u64(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u8_x4))) +svuint64x4_t svreinterpret_u64(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s16_x4))) +svuint64x4_t svreinterpret_u64(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u16_x4))) +svuint64x4_t svreinterpret_u64(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s32_x4))) +svuint64x4_t svreinterpret_u64(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u32_x4))) +svuint64x4_t svreinterpret_u64(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_s64_x4))) +svuint64x4_t svreinterpret_u64(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_u64_x4))) +svuint64x4_t svreinterpret_u64(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f16_x4))) +svuint64x4_t svreinterpret_u64(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_bf16_x4))) +svuint64x4_t svreinterpret_u64(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f32_x4))) +svuint64x4_t svreinterpret_u64(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_u64_f64_x4))) +svuint64x4_t svreinterpret_u64(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s8_x4))) +svfloat16x4_t svreinterpret_f16(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u8_x4))) +svfloat16x4_t svreinterpret_f16(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s16_x4))) +svfloat16x4_t svreinterpret_f16(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u16_x4))) +svfloat16x4_t svreinterpret_f16(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s32_x4))) +svfloat16x4_t svreinterpret_f16(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u32_x4))) +svfloat16x4_t svreinterpret_f16(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_s64_x4))) +svfloat16x4_t svreinterpret_f16(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_u64_x4))) +svfloat16x4_t svreinterpret_f16(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f16_x4))) +svfloat16x4_t svreinterpret_f16(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_bf16_x4))) +svfloat16x4_t svreinterpret_f16(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f32_x4))) +svfloat16x4_t svreinterpret_f16(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f16_f64_x4))) +svfloat16x4_t svreinterpret_f16(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s8_x4))) +svbfloat16x4_t svreinterpret_bf16(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u8_x4))) +svbfloat16x4_t svreinterpret_bf16(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s16_x4))) +svbfloat16x4_t svreinterpret_bf16(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u16_x4))) +svbfloat16x4_t svreinterpret_bf16(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s32_x4))) +svbfloat16x4_t svreinterpret_bf16(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u32_x4))) +svbfloat16x4_t svreinterpret_bf16(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_s64_x4))) +svbfloat16x4_t svreinterpret_bf16(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_u64_x4))) +svbfloat16x4_t svreinterpret_bf16(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f16_x4))) +svbfloat16x4_t svreinterpret_bf16(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_bf16_x4))) +svbfloat16x4_t svreinterpret_bf16(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f32_x4))) +svbfloat16x4_t svreinterpret_bf16(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_bf16_f64_x4))) +svbfloat16x4_t svreinterpret_bf16(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s8_x4))) +svfloat32x4_t svreinterpret_f32(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u8_x4))) +svfloat32x4_t svreinterpret_f32(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s16_x4))) +svfloat32x4_t svreinterpret_f32(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u16_x4))) +svfloat32x4_t svreinterpret_f32(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s32_x4))) +svfloat32x4_t svreinterpret_f32(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u32_x4))) +svfloat32x4_t svreinterpret_f32(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_s64_x4))) +svfloat32x4_t svreinterpret_f32(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_u64_x4))) +svfloat32x4_t svreinterpret_f32(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f16_x4))) +svfloat32x4_t svreinterpret_f32(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_bf16_x4))) +svfloat32x4_t svreinterpret_f32(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f32_x4))) +svfloat32x4_t svreinterpret_f32(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f32_f64_x4))) +svfloat32x4_t svreinterpret_f32(svfloat64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s8_x4))) +svfloat64x4_t svreinterpret_f64(svint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u8_x4))) +svfloat64x4_t svreinterpret_f64(svuint8x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s16_x4))) +svfloat64x4_t svreinterpret_f64(svint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u16_x4))) +svfloat64x4_t svreinterpret_f64(svuint16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s32_x4))) +svfloat64x4_t svreinterpret_f64(svint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u32_x4))) +svfloat64x4_t svreinterpret_f64(svuint32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_s64_x4))) +svfloat64x4_t svreinterpret_f64(svint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_u64_x4))) +svfloat64x4_t svreinterpret_f64(svuint64x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f16_x4))) +svfloat64x4_t svreinterpret_f64(svfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_bf16_x4))) +svfloat64x4_t svreinterpret_f64(svbfloat16x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f32_x4))) +svfloat64x4_t svreinterpret_f64(svfloat32x4_t op); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_reinterpret_f64_f64_x4))) +svfloat64x4_t svreinterpret_f64(svfloat64x4_t op); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f16_x2))) +svfloat32x2_t svcvt_f32_f16_x2(svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtl_f32_f16_x2))) +svfloat32x2_t svcvtl_f32_f16_x2(svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f16_x2))) +svfloat32x2_t svcvt_f32(svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtl_f32_f16_x2))) +svfloat32x2_t svcvtl_f32(svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u8_x2))) +svuint8x2_t svadd_single_u8_x2(svuint8x2_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u32_x2))) +svuint32x2_t svadd_single_u32_x2(svuint32x2_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u64_x2))) +svuint64x2_t svadd_single_u64_x2(svuint64x2_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u16_x2))) +svuint16x2_t svadd_single_u16_x2(svuint16x2_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_s8_x2))) +svint8x2_t svadd_single_s8_x2(svint8x2_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_s32_x2))) +svint32x2_t svadd_single_s32_x2(svint32x2_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_s64_x2))) +svint64x2_t svadd_single_s64_x2(svint64x2_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_s16_x2))) +svint16x2_t svadd_single_s16_x2(svint16x2_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u8_x4))) +svuint8x4_t svadd_single_u8_x4(svuint8x4_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u32_x4))) +svuint32x4_t svadd_single_u32_x4(svuint32x4_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u64_x4))) +svuint64x4_t svadd_single_u64_x4(svuint64x4_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u16_x4))) +svuint16x4_t svadd_single_u16_x4(svuint16x4_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_s8_x4))) +svint8x4_t svadd_single_s8_x4(svint8x4_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_s32_x4))) +svint32x4_t svadd_single_s32_x4(svint32x4_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_s64_x4))) +svint64x4_t svadd_single_s64_x4(svint64x4_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_s16_x4))) +svint16x4_t svadd_single_s16_x4(svint16x4_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_f64_x2))) +svfloat64x2_t svclamp_single_f64_x2(svfloat64x2_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_f32_x2))) +svfloat32x2_t svclamp_single_f32_x2(svfloat32x2_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_f16_x2))) +svfloat16x2_t svclamp_single_f16_x2(svfloat16x2_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_s8_x2))) +svint8x2_t svclamp_single_s8_x2(svint8x2_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_s32_x2))) +svint32x2_t svclamp_single_s32_x2(svint32x2_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_s64_x2))) +svint64x2_t svclamp_single_s64_x2(svint64x2_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_s16_x2))) +svint16x2_t svclamp_single_s16_x2(svint16x2_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_u8_x2))) +svuint8x2_t svclamp_single_u8_x2(svuint8x2_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_u32_x2))) +svuint32x2_t svclamp_single_u32_x2(svuint32x2_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_u64_x2))) +svuint64x2_t svclamp_single_u64_x2(svuint64x2_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_u16_x2))) +svuint16x2_t svclamp_single_u16_x2(svuint16x2_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_f64_x4))) +svfloat64x4_t svclamp_single_f64_x4(svfloat64x4_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_f32_x4))) +svfloat32x4_t svclamp_single_f32_x4(svfloat32x4_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_f16_x4))) +svfloat16x4_t svclamp_single_f16_x4(svfloat16x4_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_s8_x4))) +svint8x4_t svclamp_single_s8_x4(svint8x4_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_s32_x4))) +svint32x4_t svclamp_single_s32_x4(svint32x4_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_s64_x4))) +svint64x4_t svclamp_single_s64_x4(svint64x4_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_s16_x4))) +svint16x4_t svclamp_single_s16_x4(svint16x4_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_u8_x4))) +svuint8x4_t svclamp_single_u8_x4(svuint8x4_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_u32_x4))) +svuint32x4_t svclamp_single_u32_x4(svuint32x4_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_u64_x4))) +svuint64x4_t svclamp_single_u64_x4(svuint64x4_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_u16_x4))) +svuint16x4_t svclamp_single_u16_x4(svuint16x4_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_x2))) +svbfloat16_t svcvt_bf16_f32_x2(svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f32_x2))) +svfloat16_t svcvt_f16_f32_x2(svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_x2))) +svint32x2_t svcvt_s32_f32_x2(svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_x2))) +svuint32x2_t svcvt_u32_f32_x2(svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_x4))) +svint32x4_t svcvt_s32_f32_x4(svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_x4))) +svuint32x4_t svcvt_u32_f32_x4(svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_x2))) +svfloat32x2_t svcvt_f32_s32_x2(svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_x4))) +svfloat32x4_t svcvt_f32_s32_x4(svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_x2))) +svfloat32x2_t svcvt_f32_u32_x2(svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_x4))) +svfloat32x4_t svcvt_f32_u32_x4(svuint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtn_bf16_f32_x2))) +svbfloat16_t svcvtn_bf16_f32_x2(svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtn_f16_f32_x2))) +svfloat16_t svcvtn_f16_f32_x2(svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_f64_x2))) +svfloat64x2_t svmax_single_f64_x2(svfloat64x2_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_f32_x2))) +svfloat32x2_t svmax_single_f32_x2(svfloat32x2_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_f16_x2))) +svfloat16x2_t svmax_single_f16_x2(svfloat16x2_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_s8_x2))) +svint8x2_t svmax_single_s8_x2(svint8x2_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_s32_x2))) +svint32x2_t svmax_single_s32_x2(svint32x2_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_s64_x2))) +svint64x2_t svmax_single_s64_x2(svint64x2_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_s16_x2))) +svint16x2_t svmax_single_s16_x2(svint16x2_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_u8_x2))) +svuint8x2_t svmax_single_u8_x2(svuint8x2_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_u32_x2))) +svuint32x2_t svmax_single_u32_x2(svuint32x2_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_u64_x2))) +svuint64x2_t svmax_single_u64_x2(svuint64x2_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_u16_x2))) +svuint16x2_t svmax_single_u16_x2(svuint16x2_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_f64_x4))) +svfloat64x4_t svmax_single_f64_x4(svfloat64x4_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_f32_x4))) +svfloat32x4_t svmax_single_f32_x4(svfloat32x4_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_f16_x4))) +svfloat16x4_t svmax_single_f16_x4(svfloat16x4_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_s8_x4))) +svint8x4_t svmax_single_s8_x4(svint8x4_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_s32_x4))) +svint32x4_t svmax_single_s32_x4(svint32x4_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_s64_x4))) +svint64x4_t svmax_single_s64_x4(svint64x4_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_s16_x4))) +svint16x4_t svmax_single_s16_x4(svint16x4_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_u8_x4))) +svuint8x4_t svmax_single_u8_x4(svuint8x4_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_u32_x4))) +svuint32x4_t svmax_single_u32_x4(svuint32x4_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_u64_x4))) +svuint64x4_t svmax_single_u64_x4(svuint64x4_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_u16_x4))) +svuint16x4_t svmax_single_u16_x4(svuint16x4_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_x2))) +svfloat64x2_t svmax_f64_x2(svfloat64x2_t, svfloat64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_x2))) +svfloat32x2_t svmax_f32_x2(svfloat32x2_t, svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_x2))) +svfloat16x2_t svmax_f16_x2(svfloat16x2_t, svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_x2))) +svint8x2_t svmax_s8_x2(svint8x2_t, svint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_x2))) +svint32x2_t svmax_s32_x2(svint32x2_t, svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_x2))) +svint64x2_t svmax_s64_x2(svint64x2_t, svint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_x2))) +svint16x2_t svmax_s16_x2(svint16x2_t, svint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_x2))) +svuint8x2_t svmax_u8_x2(svuint8x2_t, svuint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_x2))) +svuint32x2_t svmax_u32_x2(svuint32x2_t, svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_x2))) +svuint64x2_t svmax_u64_x2(svuint64x2_t, svuint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_x2))) +svuint16x2_t svmax_u16_x2(svuint16x2_t, svuint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_x4))) +svfloat64x4_t svmax_f64_x4(svfloat64x4_t, svfloat64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_x4))) +svfloat32x4_t svmax_f32_x4(svfloat32x4_t, svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_x4))) +svfloat16x4_t svmax_f16_x4(svfloat16x4_t, svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_x4))) +svint8x4_t svmax_s8_x4(svint8x4_t, svint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_x4))) +svint32x4_t svmax_s32_x4(svint32x4_t, svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_x4))) +svint64x4_t svmax_s64_x4(svint64x4_t, svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_x4))) +svint16x4_t svmax_s16_x4(svint16x4_t, svint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_x4))) +svuint8x4_t svmax_u8_x4(svuint8x4_t, svuint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_x4))) +svuint32x4_t svmax_u32_x4(svuint32x4_t, svuint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_x4))) +svuint64x4_t svmax_u64_x4(svuint64x4_t, svuint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_x4))) +svuint16x4_t svmax_u16_x4(svuint16x4_t, svuint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_single_f64_x2))) +svfloat64x2_t svmaxnm_single_f64_x2(svfloat64x2_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_single_f32_x2))) +svfloat32x2_t svmaxnm_single_f32_x2(svfloat32x2_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_single_f16_x2))) +svfloat16x2_t svmaxnm_single_f16_x2(svfloat16x2_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_single_f64_x4))) +svfloat64x4_t svmaxnm_single_f64_x4(svfloat64x4_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_single_f32_x4))) +svfloat32x4_t svmaxnm_single_f32_x4(svfloat32x4_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_single_f16_x4))) +svfloat16x4_t svmaxnm_single_f16_x4(svfloat16x4_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_x2))) +svfloat64x2_t svmaxnm_f64_x2(svfloat64x2_t, svfloat64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_x2))) +svfloat32x2_t svmaxnm_f32_x2(svfloat32x2_t, svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_x2))) +svfloat16x2_t svmaxnm_f16_x2(svfloat16x2_t, svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_x4))) +svfloat64x4_t svmaxnm_f64_x4(svfloat64x4_t, svfloat64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_x4))) +svfloat32x4_t svmaxnm_f32_x4(svfloat32x4_t, svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_x4))) +svfloat16x4_t svmaxnm_f16_x4(svfloat16x4_t, svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_f64_x2))) +svfloat64x2_t svmin_single_f64_x2(svfloat64x2_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_f32_x2))) +svfloat32x2_t svmin_single_f32_x2(svfloat32x2_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_f16_x2))) +svfloat16x2_t svmin_single_f16_x2(svfloat16x2_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_s8_x2))) +svint8x2_t svmin_single_s8_x2(svint8x2_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_s32_x2))) +svint32x2_t svmin_single_s32_x2(svint32x2_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_s64_x2))) +svint64x2_t svmin_single_s64_x2(svint64x2_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_s16_x2))) +svint16x2_t svmin_single_s16_x2(svint16x2_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_u8_x2))) +svuint8x2_t svmin_single_u8_x2(svuint8x2_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_u32_x2))) +svuint32x2_t svmin_single_u32_x2(svuint32x2_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_u64_x2))) +svuint64x2_t svmin_single_u64_x2(svuint64x2_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_u16_x2))) +svuint16x2_t svmin_single_u16_x2(svuint16x2_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_f64_x4))) +svfloat64x4_t svmin_single_f64_x4(svfloat64x4_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_f32_x4))) +svfloat32x4_t svmin_single_f32_x4(svfloat32x4_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_f16_x4))) +svfloat16x4_t svmin_single_f16_x4(svfloat16x4_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_s8_x4))) +svint8x4_t svmin_single_s8_x4(svint8x4_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_s32_x4))) +svint32x4_t svmin_single_s32_x4(svint32x4_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_s64_x4))) +svint64x4_t svmin_single_s64_x4(svint64x4_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_s16_x4))) +svint16x4_t svmin_single_s16_x4(svint16x4_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_u8_x4))) +svuint8x4_t svmin_single_u8_x4(svuint8x4_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_u32_x4))) +svuint32x4_t svmin_single_u32_x4(svuint32x4_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_u64_x4))) +svuint64x4_t svmin_single_u64_x4(svuint64x4_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_u16_x4))) +svuint16x4_t svmin_single_u16_x4(svuint16x4_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_x2))) +svfloat64x2_t svmin_f64_x2(svfloat64x2_t, svfloat64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_x2))) +svfloat32x2_t svmin_f32_x2(svfloat32x2_t, svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_x2))) +svfloat16x2_t svmin_f16_x2(svfloat16x2_t, svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_x2))) +svint8x2_t svmin_s8_x2(svint8x2_t, svint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_x2))) +svint32x2_t svmin_s32_x2(svint32x2_t, svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_x2))) +svint64x2_t svmin_s64_x2(svint64x2_t, svint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_x2))) +svint16x2_t svmin_s16_x2(svint16x2_t, svint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_x2))) +svuint8x2_t svmin_u8_x2(svuint8x2_t, svuint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_x2))) +svuint32x2_t svmin_u32_x2(svuint32x2_t, svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_x2))) +svuint64x2_t svmin_u64_x2(svuint64x2_t, svuint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_x2))) +svuint16x2_t svmin_u16_x2(svuint16x2_t, svuint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_x4))) +svfloat64x4_t svmin_f64_x4(svfloat64x4_t, svfloat64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_x4))) +svfloat32x4_t svmin_f32_x4(svfloat32x4_t, svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_x4))) +svfloat16x4_t svmin_f16_x4(svfloat16x4_t, svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_x4))) +svint8x4_t svmin_s8_x4(svint8x4_t, svint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_x4))) +svint32x4_t svmin_s32_x4(svint32x4_t, svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_x4))) +svint64x4_t svmin_s64_x4(svint64x4_t, svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_x4))) +svint16x4_t svmin_s16_x4(svint16x4_t, svint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_x4))) +svuint8x4_t svmin_u8_x4(svuint8x4_t, svuint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_x4))) +svuint32x4_t svmin_u32_x4(svuint32x4_t, svuint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_x4))) +svuint64x4_t svmin_u64_x4(svuint64x4_t, svuint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_x4))) +svuint16x4_t svmin_u16_x4(svuint16x4_t, svuint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_single_f64_x2))) +svfloat64x2_t svminnm_single_f64_x2(svfloat64x2_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_single_f32_x2))) +svfloat32x2_t svminnm_single_f32_x2(svfloat32x2_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_single_f16_x2))) +svfloat16x2_t svminnm_single_f16_x2(svfloat16x2_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_single_f64_x4))) +svfloat64x4_t svminnm_single_f64_x4(svfloat64x4_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_single_f32_x4))) +svfloat32x4_t svminnm_single_f32_x4(svfloat32x4_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_single_f16_x4))) +svfloat16x4_t svminnm_single_f16_x4(svfloat16x4_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_x2))) +svfloat64x2_t svminnm_f64_x2(svfloat64x2_t, svfloat64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_x2))) +svfloat32x2_t svminnm_f32_x2(svfloat32x2_t, svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_x2))) +svfloat16x2_t svminnm_f16_x2(svfloat16x2_t, svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_x4))) +svfloat64x4_t svminnm_f64_x4(svfloat64x4_t, svfloat64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_x4))) +svfloat32x4_t svminnm_f32_x4(svfloat32x4_t, svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_x4))) +svfloat16x4_t svminnm_f16_x4(svfloat16x4_t, svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_s16_s32_x2))) +svint16_t svqcvt_s16_s32_x2(svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_s16_s64_x4))) +svint16_t svqcvt_s16_s64_x4(svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_s8_s32_x4))) +svint8_t svqcvt_s8_s32_x4(svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_u16_s32_x2))) +svuint16_t svqcvt_u16_s32_x2(svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_u16_u32_x2))) +svuint16_t svqcvt_u16_u32_x2(svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_u16_s64_x4))) +svuint16_t svqcvt_u16_s64_x4(svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_u16_u64_x4))) +svuint16_t svqcvt_u16_u64_x4(svuint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_u8_s32_x4))) +svuint8_t svqcvt_u8_s32_x4(svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_u8_u32_x4))) +svuint8_t svqcvt_u8_u32_x4(svuint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_s16_s64_x4))) +svint16_t svqcvtn_s16_s64_x4(svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_s8_s32_x4))) +svint8_t svqcvtn_s8_s32_x4(svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_u16_s64_x4))) +svuint16_t svqcvtn_u16_s64_x4(svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_u16_u64_x4))) +svuint16_t svqcvtn_u16_u64_x4(svuint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_u8_s32_x4))) +svuint8_t svqcvtn_u8_s32_x4(svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_u8_u32_x4))) +svuint8_t svqcvtn_u8_u32_x4(svuint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_single_s8_x2))) +svint8x2_t svqdmulh_single_s8_x2(svint8x2_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_single_s32_x2))) +svint32x2_t svqdmulh_single_s32_x2(svint32x2_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_single_s64_x2))) +svint64x2_t svqdmulh_single_s64_x2(svint64x2_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_single_s16_x2))) +svint16x2_t svqdmulh_single_s16_x2(svint16x2_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_single_s8_x4))) +svint8x4_t svqdmulh_single_s8_x4(svint8x4_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_single_s32_x4))) +svint32x4_t svqdmulh_single_s32_x4(svint32x4_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_single_s64_x4))) +svint64x4_t svqdmulh_single_s64_x4(svint64x4_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_single_s16_x4))) +svint16x4_t svqdmulh_single_s16_x4(svint16x4_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s8_x2))) +svint8x2_t svqdmulh_s8_x2(svint8x2_t, svint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s32_x2))) +svint32x2_t svqdmulh_s32_x2(svint32x2_t, svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s64_x2))) +svint64x2_t svqdmulh_s64_x2(svint64x2_t, svint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s16_x2))) +svint16x2_t svqdmulh_s16_x2(svint16x2_t, svint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s8_x4))) +svint8x4_t svqdmulh_s8_x4(svint8x4_t, svint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s32_x4))) +svint32x4_t svqdmulh_s32_x4(svint32x4_t, svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s64_x4))) +svint64x4_t svqdmulh_s64_x4(svint64x4_t, svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s16_x4))) +svint16x4_t svqdmulh_s16_x4(svint16x4_t, svint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshr_n_s16_s32_x2))) +svint16_t svqrshr_n_s16_s32_x2(svint32x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshr_n_u16_u32_x2))) +svuint16_t svqrshr_n_u16_u32_x2(svuint32x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshr_n_s8_s32_x4))) +svint8_t svqrshr_n_s8_s32_x4(svint32x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshr_n_s16_s64_x4))) +svint16_t svqrshr_n_s16_s64_x4(svint64x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshr_n_u8_u32_x4))) +svuint8_t svqrshr_n_u8_u32_x4(svuint32x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshr_n_u16_u64_x4))) +svuint16_t svqrshr_n_u16_u64_x4(svuint64x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrn_n_s8_s32_x4))) +svint8_t svqrshrn_n_s8_s32_x4(svint32x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrn_n_s16_s64_x4))) +svint16_t svqrshrn_n_s16_s64_x4(svint64x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrn_n_u8_u32_x4))) +svuint8_t svqrshrn_n_u8_u32_x4(svuint32x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrn_n_u16_u64_x4))) +svuint16_t svqrshrn_n_u16_u64_x4(svuint64x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshru_n_u16_s32_x2))) +svuint16_t svqrshru_n_u16_s32_x2(svint32x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshru_n_u8_s32_x4))) +svuint8_t svqrshru_n_u8_s32_x4(svint32x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshru_n_u16_s64_x4))) +svuint16_t svqrshru_n_u16_s64_x4(svint64x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrun_n_u8_s32_x4))) +svuint8_t svqrshrun_n_u8_s32_x4(svint32x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrun_n_u16_s64_x4))) +svuint16_t svqrshrun_n_u16_s64_x4(svint64x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_x2))) +svfloat32x2_t svrinta_f32_x2(svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_x4))) +svfloat32x4_t svrinta_f32_x4(svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_x2))) +svfloat32x2_t svrintm_f32_x2(svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_x4))) +svfloat32x4_t svrintm_f32_x4(svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_x2))) +svfloat32x2_t svrintn_f32_x2(svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_x4))) +svfloat32x4_t svrintn_f32_x4(svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_x2))) +svfloat32x2_t svrintp_f32_x2(svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_x4))) +svfloat32x4_t svrintp_f32_x4(svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_s8_x2))) +svint8x2_t svrshl_single_s8_x2(svint8x2_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_s32_x2))) +svint32x2_t svrshl_single_s32_x2(svint32x2_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_s64_x2))) +svint64x2_t svrshl_single_s64_x2(svint64x2_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_s16_x2))) +svint16x2_t svrshl_single_s16_x2(svint16x2_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_u8_x2))) +svuint8x2_t svrshl_single_u8_x2(svuint8x2_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_u32_x2))) +svuint32x2_t svrshl_single_u32_x2(svuint32x2_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_u64_x2))) +svuint64x2_t svrshl_single_u64_x2(svuint64x2_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_u16_x2))) +svuint16x2_t svrshl_single_u16_x2(svuint16x2_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_s8_x4))) +svint8x4_t svrshl_single_s8_x4(svint8x4_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_s32_x4))) +svint32x4_t svrshl_single_s32_x4(svint32x4_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_s64_x4))) +svint64x4_t svrshl_single_s64_x4(svint64x4_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_s16_x4))) +svint16x4_t svrshl_single_s16_x4(svint16x4_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_u8_x4))) +svuint8x4_t svrshl_single_u8_x4(svuint8x4_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_u32_x4))) +svuint32x4_t svrshl_single_u32_x4(svuint32x4_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_u64_x4))) +svuint64x4_t svrshl_single_u64_x4(svuint64x4_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_u16_x4))) +svuint16x4_t svrshl_single_u16_x4(svuint16x4_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_x2))) +svint8x2_t svrshl_s8_x2(svint8x2_t, svint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_x2))) +svint32x2_t svrshl_s32_x2(svint32x2_t, svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_x2))) +svint64x2_t svrshl_s64_x2(svint64x2_t, svint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_x2))) +svint16x2_t svrshl_s16_x2(svint16x2_t, svint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_x2))) +svuint8x2_t svrshl_u8_x2(svuint8x2_t, svuint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_x2))) +svuint32x2_t svrshl_u32_x2(svuint32x2_t, svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_x2))) +svuint64x2_t svrshl_u64_x2(svuint64x2_t, svuint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_x2))) +svuint16x2_t svrshl_u16_x2(svuint16x2_t, svuint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_x4))) +svint8x4_t svrshl_s8_x4(svint8x4_t, svint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_x4))) +svint32x4_t svrshl_s32_x4(svint32x4_t, svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_x4))) +svint64x4_t svrshl_s64_x4(svint64x4_t, svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_x4))) +svint16x4_t svrshl_s16_x4(svint16x4_t, svint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_x4))) +svuint8x4_t svrshl_u8_x4(svuint8x4_t, svuint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_x4))) +svuint32x4_t svrshl_u32_x4(svuint32x4_t, svuint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_x4))) +svuint64x4_t svrshl_u64_x4(svuint64x4_t, svuint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_x4))) +svuint16x4_t svrshl_u16_x4(svuint16x4_t, svuint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u8_x2))) +svuint8x2_t svsel_u8_x2(svcount_t, svuint8x2_t, svuint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u32_x2))) +svuint32x2_t svsel_u32_x2(svcount_t, svuint32x2_t, svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u64_x2))) +svuint64x2_t svsel_u64_x2(svcount_t, svuint64x2_t, svuint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u16_x2))) +svuint16x2_t svsel_u16_x2(svcount_t, svuint16x2_t, svuint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_bf16_x2))) +svbfloat16x2_t svsel_bf16_x2(svcount_t, svbfloat16x2_t, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s8_x2))) +svint8x2_t svsel_s8_x2(svcount_t, svint8x2_t, svint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f64_x2))) +svfloat64x2_t svsel_f64_x2(svcount_t, svfloat64x2_t, svfloat64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f32_x2))) +svfloat32x2_t svsel_f32_x2(svcount_t, svfloat32x2_t, svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f16_x2))) +svfloat16x2_t svsel_f16_x2(svcount_t, svfloat16x2_t, svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s32_x2))) +svint32x2_t svsel_s32_x2(svcount_t, svint32x2_t, svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s64_x2))) +svint64x2_t svsel_s64_x2(svcount_t, svint64x2_t, svint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s16_x2))) +svint16x2_t svsel_s16_x2(svcount_t, svint16x2_t, svint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u8_x4))) +svuint8x4_t svsel_u8_x4(svcount_t, svuint8x4_t, svuint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u32_x4))) +svuint32x4_t svsel_u32_x4(svcount_t, svuint32x4_t, svuint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u64_x4))) +svuint64x4_t svsel_u64_x4(svcount_t, svuint64x4_t, svuint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u16_x4))) +svuint16x4_t svsel_u16_x4(svcount_t, svuint16x4_t, svuint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_bf16_x4))) +svbfloat16x4_t svsel_bf16_x4(svcount_t, svbfloat16x4_t, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s8_x4))) +svint8x4_t svsel_s8_x4(svcount_t, svint8x4_t, svint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f64_x4))) +svfloat64x4_t svsel_f64_x4(svcount_t, svfloat64x4_t, svfloat64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f32_x4))) +svfloat32x4_t svsel_f32_x4(svcount_t, svfloat32x4_t, svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f16_x4))) +svfloat16x4_t svsel_f16_x4(svcount_t, svfloat16x4_t, svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s32_x4))) +svint32x4_t svsel_s32_x4(svcount_t, svint32x4_t, svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s64_x4))) +svint64x4_t svsel_s64_x4(svcount_t, svint64x4_t, svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s16_x4))) +svint16x4_t svsel_s16_x4(svcount_t, svint16x4_t, svint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_s32_s16_x2))) +svint32x2_t svunpk_s32_s16_x2(svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_s64_s32_x2))) +svint64x2_t svunpk_s64_s32_x2(svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_s16_s8_x2))) +svint16x2_t svunpk_s16_s8_x2(svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_u32_u16_x2))) +svuint32x2_t svunpk_u32_u16_x2(svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_u64_u32_x2))) +svuint64x2_t svunpk_u64_u32_x2(svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_u16_u8_x2))) +svuint16x2_t svunpk_u16_u8_x2(svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_s32_s16_x4))) +svint32x4_t svunpk_s32_s16_x4(svint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_s64_s32_x4))) +svint64x4_t svunpk_s64_s32_x4(svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_s16_s8_x4))) +svint16x4_t svunpk_s16_s8_x4(svint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_u32_u16_x4))) +svuint32x4_t svunpk_u32_u16_x4(svuint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_u64_u32_x4))) +svuint64x4_t svunpk_u64_u32_x4(svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_u16_u8_x4))) +svuint16x4_t svunpk_u16_u8_x4(svuint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_u8_x2))) +svuint8x2_t svuzp_u8_x2(svuint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_u32_x2))) +svuint32x2_t svuzp_u32_x2(svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_u64_x2))) +svuint64x2_t svuzp_u64_x2(svuint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_u16_x2))) +svuint16x2_t svuzp_u16_x2(svuint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_bf16_x2))) +svbfloat16x2_t svuzp_bf16_x2(svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_s8_x2))) +svint8x2_t svuzp_s8_x2(svint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_f64_x2))) +svfloat64x2_t svuzp_f64_x2(svfloat64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_f32_x2))) +svfloat32x2_t svuzp_f32_x2(svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_f16_x2))) +svfloat16x2_t svuzp_f16_x2(svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_s32_x2))) +svint32x2_t svuzp_s32_x2(svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_s64_x2))) +svint64x2_t svuzp_s64_x2(svint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_s16_x2))) +svint16x2_t svuzp_s16_x2(svint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_u8_x4))) +svuint8x4_t svuzp_u8_x4(svuint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_u32_x4))) +svuint32x4_t svuzp_u32_x4(svuint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_u64_x4))) +svuint64x4_t svuzp_u64_x4(svuint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_u16_x4))) +svuint16x4_t svuzp_u16_x4(svuint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_bf16_x4))) +svbfloat16x4_t svuzp_bf16_x4(svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_s8_x4))) +svint8x4_t svuzp_s8_x4(svint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_f64_x4))) +svfloat64x4_t svuzp_f64_x4(svfloat64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_f32_x4))) +svfloat32x4_t svuzp_f32_x4(svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_f16_x4))) +svfloat16x4_t svuzp_f16_x4(svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_s32_x4))) +svint32x4_t svuzp_s32_x4(svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_s64_x4))) +svint64x4_t svuzp_s64_x4(svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_s16_x4))) +svint16x4_t svuzp_s16_x4(svint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_u8_x2))) +svuint8x2_t svuzpq_u8_x2(svuint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_u32_x2))) +svuint32x2_t svuzpq_u32_x2(svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_u64_x2))) +svuint64x2_t svuzpq_u64_x2(svuint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_u16_x2))) +svuint16x2_t svuzpq_u16_x2(svuint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_bf16_x2))) +svbfloat16x2_t svuzpq_bf16_x2(svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_s8_x2))) +svint8x2_t svuzpq_s8_x2(svint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_f64_x2))) +svfloat64x2_t svuzpq_f64_x2(svfloat64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_f32_x2))) +svfloat32x2_t svuzpq_f32_x2(svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_f16_x2))) +svfloat16x2_t svuzpq_f16_x2(svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_s32_x2))) +svint32x2_t svuzpq_s32_x2(svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_s64_x2))) +svint64x2_t svuzpq_s64_x2(svint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_s16_x2))) +svint16x2_t svuzpq_s16_x2(svint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_u8_x4))) +svuint8x4_t svuzpq_u8_x4(svuint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_u32_x4))) +svuint32x4_t svuzpq_u32_x4(svuint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_u64_x4))) +svuint64x4_t svuzpq_u64_x4(svuint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_u16_x4))) +svuint16x4_t svuzpq_u16_x4(svuint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_bf16_x4))) +svbfloat16x4_t svuzpq_bf16_x4(svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_s8_x4))) +svint8x4_t svuzpq_s8_x4(svint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_f64_x4))) +svfloat64x4_t svuzpq_f64_x4(svfloat64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_f32_x4))) +svfloat32x4_t svuzpq_f32_x4(svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_f16_x4))) +svfloat16x4_t svuzpq_f16_x4(svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_s32_x4))) +svint32x4_t svuzpq_s32_x4(svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_s64_x4))) +svint64x4_t svuzpq_s64_x4(svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_s16_x4))) +svint16x4_t svuzpq_s16_x4(svint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_u8_x2))) +svuint8x2_t svzip_u8_x2(svuint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_u32_x2))) +svuint32x2_t svzip_u32_x2(svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_u64_x2))) +svuint64x2_t svzip_u64_x2(svuint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_u16_x2))) +svuint16x2_t svzip_u16_x2(svuint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_bf16_x2))) +svbfloat16x2_t svzip_bf16_x2(svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_s8_x2))) +svint8x2_t svzip_s8_x2(svint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_f64_x2))) +svfloat64x2_t svzip_f64_x2(svfloat64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_f32_x2))) +svfloat32x2_t svzip_f32_x2(svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_f16_x2))) +svfloat16x2_t svzip_f16_x2(svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_s32_x2))) +svint32x2_t svzip_s32_x2(svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_s64_x2))) +svint64x2_t svzip_s64_x2(svint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_s16_x2))) +svint16x2_t svzip_s16_x2(svint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_u8_x4))) +svuint8x4_t svzip_u8_x4(svuint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_u32_x4))) +svuint32x4_t svzip_u32_x4(svuint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_u64_x4))) +svuint64x4_t svzip_u64_x4(svuint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_u16_x4))) +svuint16x4_t svzip_u16_x4(svuint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_bf16_x4))) +svbfloat16x4_t svzip_bf16_x4(svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_s8_x4))) +svint8x4_t svzip_s8_x4(svint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_f64_x4))) +svfloat64x4_t svzip_f64_x4(svfloat64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_f32_x4))) +svfloat32x4_t svzip_f32_x4(svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_f16_x4))) +svfloat16x4_t svzip_f16_x4(svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_s32_x4))) +svint32x4_t svzip_s32_x4(svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_s64_x4))) +svint64x4_t svzip_s64_x4(svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_s16_x4))) +svint16x4_t svzip_s16_x4(svint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_u8_x2))) +svuint8x2_t svzipq_u8_x2(svuint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_u32_x2))) +svuint32x2_t svzipq_u32_x2(svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_u64_x2))) +svuint64x2_t svzipq_u64_x2(svuint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_u16_x2))) +svuint16x2_t svzipq_u16_x2(svuint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_bf16_x2))) +svbfloat16x2_t svzipq_bf16_x2(svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s8_x2))) +svint8x2_t svzipq_s8_x2(svint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_f64_x2))) +svfloat64x2_t svzipq_f64_x2(svfloat64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_f32_x2))) +svfloat32x2_t svzipq_f32_x2(svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_f16_x2))) +svfloat16x2_t svzipq_f16_x2(svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s32_x2))) +svint32x2_t svzipq_s32_x2(svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s64_x2))) +svint64x2_t svzipq_s64_x2(svint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s16_x2))) +svint16x2_t svzipq_s16_x2(svint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_u8_x4))) +svuint8x4_t svzipq_u8_x4(svuint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_u32_x4))) +svuint32x4_t svzipq_u32_x4(svuint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_u64_x4))) +svuint64x4_t svzipq_u64_x4(svuint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_u16_x4))) +svuint16x4_t svzipq_u16_x4(svuint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_bf16_x4))) +svbfloat16x4_t svzipq_bf16_x4(svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s8_x4))) +svint8x4_t svzipq_s8_x4(svint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_f64_x4))) +svfloat64x4_t svzipq_f64_x4(svfloat64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_f32_x4))) +svfloat32x4_t svzipq_f32_x4(svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_f16_x4))) +svfloat16x4_t svzipq_f16_x4(svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s32_x4))) +svint32x4_t svzipq_s32_x4(svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s64_x4))) +svint64x4_t svzipq_s64_x4(svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s16_x4))) +svint16x4_t svzipq_s16_x4(svint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u8_x2))) +svuint8x2_t svadd(svuint8x2_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u32_x2))) +svuint32x2_t svadd(svuint32x2_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u64_x2))) +svuint64x2_t svadd(svuint64x2_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u16_x2))) +svuint16x2_t svadd(svuint16x2_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_s8_x2))) +svint8x2_t svadd(svint8x2_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_s32_x2))) +svint32x2_t svadd(svint32x2_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_s64_x2))) +svint64x2_t svadd(svint64x2_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_s16_x2))) +svint16x2_t svadd(svint16x2_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u8_x4))) +svuint8x4_t svadd(svuint8x4_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u32_x4))) +svuint32x4_t svadd(svuint32x4_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u64_x4))) +svuint64x4_t svadd(svuint64x4_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_u16_x4))) +svuint16x4_t svadd(svuint16x4_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_s8_x4))) +svint8x4_t svadd(svint8x4_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_s32_x4))) +svint32x4_t svadd(svint32x4_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_s64_x4))) +svint64x4_t svadd(svint64x4_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_single_s16_x4))) +svint16x4_t svadd(svint16x4_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_f64_x2))) +svfloat64x2_t svclamp(svfloat64x2_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_f32_x2))) +svfloat32x2_t svclamp(svfloat32x2_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_f16_x2))) +svfloat16x2_t svclamp(svfloat16x2_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_s8_x2))) +svint8x2_t svclamp(svint8x2_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_s32_x2))) +svint32x2_t svclamp(svint32x2_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_s64_x2))) +svint64x2_t svclamp(svint64x2_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_s16_x2))) +svint16x2_t svclamp(svint16x2_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_u8_x2))) +svuint8x2_t svclamp(svuint8x2_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_u32_x2))) +svuint32x2_t svclamp(svuint32x2_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_u64_x2))) +svuint64x2_t svclamp(svuint64x2_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_u16_x2))) +svuint16x2_t svclamp(svuint16x2_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_f64_x4))) +svfloat64x4_t svclamp(svfloat64x4_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_f32_x4))) +svfloat32x4_t svclamp(svfloat32x4_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_f16_x4))) +svfloat16x4_t svclamp(svfloat16x4_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_s8_x4))) +svint8x4_t svclamp(svint8x4_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_s32_x4))) +svint32x4_t svclamp(svint32x4_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_s64_x4))) +svint64x4_t svclamp(svint64x4_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_s16_x4))) +svint16x4_t svclamp(svint16x4_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_u8_x4))) +svuint8x4_t svclamp(svuint8x4_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_u32_x4))) +svuint32x4_t svclamp(svuint32x4_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_u64_x4))) +svuint64x4_t svclamp(svuint64x4_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_u16_x4))) +svuint16x4_t svclamp(svuint16x4_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_x2))) +svbfloat16_t svcvt_bf16(svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f32_x2))) +svfloat16_t svcvt_f16(svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_x2))) +svint32x2_t svcvt_s32(svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_x2))) +svuint32x2_t svcvt_u32(svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_x4))) +svint32x4_t svcvt_s32(svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_x4))) +svuint32x4_t svcvt_u32(svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_x2))) +svfloat32x2_t svcvt_f32(svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_x4))) +svfloat32x4_t svcvt_f32(svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_x2))) +svfloat32x2_t svcvt_f32(svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_x4))) +svfloat32x4_t svcvt_f32(svuint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtn_bf16_f32_x2))) +svbfloat16_t svcvtn_bf16(svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtn_f16_f32_x2))) +svfloat16_t svcvtn_f16(svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_f64_x2))) +svfloat64x2_t svmax(svfloat64x2_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_f32_x2))) +svfloat32x2_t svmax(svfloat32x2_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_f16_x2))) +svfloat16x2_t svmax(svfloat16x2_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_s8_x2))) +svint8x2_t svmax(svint8x2_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_s32_x2))) +svint32x2_t svmax(svint32x2_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_s64_x2))) +svint64x2_t svmax(svint64x2_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_s16_x2))) +svint16x2_t svmax(svint16x2_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_u8_x2))) +svuint8x2_t svmax(svuint8x2_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_u32_x2))) +svuint32x2_t svmax(svuint32x2_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_u64_x2))) +svuint64x2_t svmax(svuint64x2_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_u16_x2))) +svuint16x2_t svmax(svuint16x2_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_f64_x4))) +svfloat64x4_t svmax(svfloat64x4_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_f32_x4))) +svfloat32x4_t svmax(svfloat32x4_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_f16_x4))) +svfloat16x4_t svmax(svfloat16x4_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_s8_x4))) +svint8x4_t svmax(svint8x4_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_s32_x4))) +svint32x4_t svmax(svint32x4_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_s64_x4))) +svint64x4_t svmax(svint64x4_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_s16_x4))) +svint16x4_t svmax(svint16x4_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_u8_x4))) +svuint8x4_t svmax(svuint8x4_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_u32_x4))) +svuint32x4_t svmax(svuint32x4_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_u64_x4))) +svuint64x4_t svmax(svuint64x4_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_u16_x4))) +svuint16x4_t svmax(svuint16x4_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_x2))) +svfloat64x2_t svmax(svfloat64x2_t, svfloat64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_x2))) +svfloat32x2_t svmax(svfloat32x2_t, svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_x2))) +svfloat16x2_t svmax(svfloat16x2_t, svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_x2))) +svint8x2_t svmax(svint8x2_t, svint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_x2))) +svint32x2_t svmax(svint32x2_t, svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_x2))) +svint64x2_t svmax(svint64x2_t, svint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_x2))) +svint16x2_t svmax(svint16x2_t, svint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_x2))) +svuint8x2_t svmax(svuint8x2_t, svuint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_x2))) +svuint32x2_t svmax(svuint32x2_t, svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_x2))) +svuint64x2_t svmax(svuint64x2_t, svuint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_x2))) +svuint16x2_t svmax(svuint16x2_t, svuint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_x4))) +svfloat64x4_t svmax(svfloat64x4_t, svfloat64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_x4))) +svfloat32x4_t svmax(svfloat32x4_t, svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_x4))) +svfloat16x4_t svmax(svfloat16x4_t, svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_x4))) +svint8x4_t svmax(svint8x4_t, svint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_x4))) +svint32x4_t svmax(svint32x4_t, svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_x4))) +svint64x4_t svmax(svint64x4_t, svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_x4))) +svint16x4_t svmax(svint16x4_t, svint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_x4))) +svuint8x4_t svmax(svuint8x4_t, svuint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_x4))) +svuint32x4_t svmax(svuint32x4_t, svuint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_x4))) +svuint64x4_t svmax(svuint64x4_t, svuint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_x4))) +svuint16x4_t svmax(svuint16x4_t, svuint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_single_f64_x2))) +svfloat64x2_t svmaxnm(svfloat64x2_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_single_f32_x2))) +svfloat32x2_t svmaxnm(svfloat32x2_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_single_f16_x2))) +svfloat16x2_t svmaxnm(svfloat16x2_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_single_f64_x4))) +svfloat64x4_t svmaxnm(svfloat64x4_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_single_f32_x4))) +svfloat32x4_t svmaxnm(svfloat32x4_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_single_f16_x4))) +svfloat16x4_t svmaxnm(svfloat16x4_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_x2))) +svfloat64x2_t svmaxnm(svfloat64x2_t, svfloat64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_x2))) +svfloat32x2_t svmaxnm(svfloat32x2_t, svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_x2))) +svfloat16x2_t svmaxnm(svfloat16x2_t, svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_x4))) +svfloat64x4_t svmaxnm(svfloat64x4_t, svfloat64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_x4))) +svfloat32x4_t svmaxnm(svfloat32x4_t, svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_x4))) +svfloat16x4_t svmaxnm(svfloat16x4_t, svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_f64_x2))) +svfloat64x2_t svmin(svfloat64x2_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_f32_x2))) +svfloat32x2_t svmin(svfloat32x2_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_f16_x2))) +svfloat16x2_t svmin(svfloat16x2_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_s8_x2))) +svint8x2_t svmin(svint8x2_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_s32_x2))) +svint32x2_t svmin(svint32x2_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_s64_x2))) +svint64x2_t svmin(svint64x2_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_s16_x2))) +svint16x2_t svmin(svint16x2_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_u8_x2))) +svuint8x2_t svmin(svuint8x2_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_u32_x2))) +svuint32x2_t svmin(svuint32x2_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_u64_x2))) +svuint64x2_t svmin(svuint64x2_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_u16_x2))) +svuint16x2_t svmin(svuint16x2_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_f64_x4))) +svfloat64x4_t svmin(svfloat64x4_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_f32_x4))) +svfloat32x4_t svmin(svfloat32x4_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_f16_x4))) +svfloat16x4_t svmin(svfloat16x4_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_s8_x4))) +svint8x4_t svmin(svint8x4_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_s32_x4))) +svint32x4_t svmin(svint32x4_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_s64_x4))) +svint64x4_t svmin(svint64x4_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_s16_x4))) +svint16x4_t svmin(svint16x4_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_u8_x4))) +svuint8x4_t svmin(svuint8x4_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_u32_x4))) +svuint32x4_t svmin(svuint32x4_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_u64_x4))) +svuint64x4_t svmin(svuint64x4_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_u16_x4))) +svuint16x4_t svmin(svuint16x4_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_x2))) +svfloat64x2_t svmin(svfloat64x2_t, svfloat64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_x2))) +svfloat32x2_t svmin(svfloat32x2_t, svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_x2))) +svfloat16x2_t svmin(svfloat16x2_t, svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_x2))) +svint8x2_t svmin(svint8x2_t, svint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_x2))) +svint32x2_t svmin(svint32x2_t, svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_x2))) +svint64x2_t svmin(svint64x2_t, svint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_x2))) +svint16x2_t svmin(svint16x2_t, svint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_x2))) +svuint8x2_t svmin(svuint8x2_t, svuint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_x2))) +svuint32x2_t svmin(svuint32x2_t, svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_x2))) +svuint64x2_t svmin(svuint64x2_t, svuint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_x2))) +svuint16x2_t svmin(svuint16x2_t, svuint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_x4))) +svfloat64x4_t svmin(svfloat64x4_t, svfloat64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_x4))) +svfloat32x4_t svmin(svfloat32x4_t, svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_x4))) +svfloat16x4_t svmin(svfloat16x4_t, svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_x4))) +svint8x4_t svmin(svint8x4_t, svint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_x4))) +svint32x4_t svmin(svint32x4_t, svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_x4))) +svint64x4_t svmin(svint64x4_t, svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_x4))) +svint16x4_t svmin(svint16x4_t, svint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_x4))) +svuint8x4_t svmin(svuint8x4_t, svuint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_x4))) +svuint32x4_t svmin(svuint32x4_t, svuint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_x4))) +svuint64x4_t svmin(svuint64x4_t, svuint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_x4))) +svuint16x4_t svmin(svuint16x4_t, svuint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_single_f64_x2))) +svfloat64x2_t svminnm(svfloat64x2_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_single_f32_x2))) +svfloat32x2_t svminnm(svfloat32x2_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_single_f16_x2))) +svfloat16x2_t svminnm(svfloat16x2_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_single_f64_x4))) +svfloat64x4_t svminnm(svfloat64x4_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_single_f32_x4))) +svfloat32x4_t svminnm(svfloat32x4_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_single_f16_x4))) +svfloat16x4_t svminnm(svfloat16x4_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_x2))) +svfloat64x2_t svminnm(svfloat64x2_t, svfloat64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_x2))) +svfloat32x2_t svminnm(svfloat32x2_t, svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_x2))) +svfloat16x2_t svminnm(svfloat16x2_t, svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_x4))) +svfloat64x4_t svminnm(svfloat64x4_t, svfloat64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_x4))) +svfloat32x4_t svminnm(svfloat32x4_t, svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_x4))) +svfloat16x4_t svminnm(svfloat16x4_t, svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_s16_s32_x2))) +svint16_t svqcvt_s16(svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_s16_s64_x4))) +svint16_t svqcvt_s16(svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_s8_s32_x4))) +svint8_t svqcvt_s8(svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_u16_s32_x2))) +svuint16_t svqcvt_u16(svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_u16_u32_x2))) +svuint16_t svqcvt_u16(svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_u16_s64_x4))) +svuint16_t svqcvt_u16(svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_u16_u64_x4))) +svuint16_t svqcvt_u16(svuint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_u8_s32_x4))) +svuint8_t svqcvt_u8(svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvt_u8_u32_x4))) +svuint8_t svqcvt_u8(svuint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_s16_s64_x4))) +svint16_t svqcvtn_s16(svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_s8_s32_x4))) +svint8_t svqcvtn_s8(svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_u16_s64_x4))) +svuint16_t svqcvtn_u16(svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_u16_u64_x4))) +svuint16_t svqcvtn_u16(svuint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_u8_s32_x4))) +svuint8_t svqcvtn_u8(svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_u8_u32_x4))) +svuint8_t svqcvtn_u8(svuint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_single_s8_x2))) +svint8x2_t svqdmulh(svint8x2_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_single_s32_x2))) +svint32x2_t svqdmulh(svint32x2_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_single_s64_x2))) +svint64x2_t svqdmulh(svint64x2_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_single_s16_x2))) +svint16x2_t svqdmulh(svint16x2_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_single_s8_x4))) +svint8x4_t svqdmulh(svint8x4_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_single_s32_x4))) +svint32x4_t svqdmulh(svint32x4_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_single_s64_x4))) +svint64x4_t svqdmulh(svint64x4_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_single_s16_x4))) +svint16x4_t svqdmulh(svint16x4_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s8_x2))) +svint8x2_t svqdmulh(svint8x2_t, svint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s32_x2))) +svint32x2_t svqdmulh(svint32x2_t, svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s64_x2))) +svint64x2_t svqdmulh(svint64x2_t, svint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s16_x2))) +svint16x2_t svqdmulh(svint16x2_t, svint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s8_x4))) +svint8x4_t svqdmulh(svint8x4_t, svint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s32_x4))) +svint32x4_t svqdmulh(svint32x4_t, svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s64_x4))) +svint64x4_t svqdmulh(svint64x4_t, svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s16_x4))) +svint16x4_t svqdmulh(svint16x4_t, svint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshr_n_s16_s32_x2))) +svint16_t svqrshr_s16(svint32x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshr_n_u16_u32_x2))) +svuint16_t svqrshr_u16(svuint32x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshr_n_s8_s32_x4))) +svint8_t svqrshr_s8(svint32x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshr_n_s16_s64_x4))) +svint16_t svqrshr_s16(svint64x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshr_n_u8_u32_x4))) +svuint8_t svqrshr_u8(svuint32x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshr_n_u16_u64_x4))) +svuint16_t svqrshr_u16(svuint64x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrn_n_s8_s32_x4))) +svint8_t svqrshrn_s8(svint32x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrn_n_s16_s64_x4))) +svint16_t svqrshrn_s16(svint64x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrn_n_u8_u32_x4))) +svuint8_t svqrshrn_u8(svuint32x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrn_n_u16_u64_x4))) +svuint16_t svqrshrn_u16(svuint64x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshru_n_u16_s32_x2))) +svuint16_t svqrshru_u16(svint32x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshru_n_u8_s32_x4))) +svuint8_t svqrshru_u8(svint32x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshru_n_u16_s64_x4))) +svuint16_t svqrshru_u16(svint64x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrun_n_u8_s32_x4))) +svuint8_t svqrshrun_u8(svint32x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrun_n_u16_s64_x4))) +svuint16_t svqrshrun_u16(svint64x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_x2))) +svfloat32x2_t svrinta(svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_x4))) +svfloat32x4_t svrinta(svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_x2))) +svfloat32x2_t svrintm(svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_x4))) +svfloat32x4_t svrintm(svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_x2))) +svfloat32x2_t svrintn(svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_x4))) +svfloat32x4_t svrintn(svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_x2))) +svfloat32x2_t svrintp(svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_x4))) +svfloat32x4_t svrintp(svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_s8_x2))) +svint8x2_t svrshl(svint8x2_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_s32_x2))) +svint32x2_t svrshl(svint32x2_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_s64_x2))) +svint64x2_t svrshl(svint64x2_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_s16_x2))) +svint16x2_t svrshl(svint16x2_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_u8_x2))) +svuint8x2_t svrshl(svuint8x2_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_u32_x2))) +svuint32x2_t svrshl(svuint32x2_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_u64_x2))) +svuint64x2_t svrshl(svuint64x2_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_u16_x2))) +svuint16x2_t svrshl(svuint16x2_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_s8_x4))) +svint8x4_t svrshl(svint8x4_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_s32_x4))) +svint32x4_t svrshl(svint32x4_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_s64_x4))) +svint64x4_t svrshl(svint64x4_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_s16_x4))) +svint16x4_t svrshl(svint16x4_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_u8_x4))) +svuint8x4_t svrshl(svuint8x4_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_u32_x4))) +svuint32x4_t svrshl(svuint32x4_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_u64_x4))) +svuint64x4_t svrshl(svuint64x4_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_single_u16_x4))) +svuint16x4_t svrshl(svuint16x4_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_x2))) +svint8x2_t svrshl(svint8x2_t, svint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_x2))) +svint32x2_t svrshl(svint32x2_t, svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_x2))) +svint64x2_t svrshl(svint64x2_t, svint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_x2))) +svint16x2_t svrshl(svint16x2_t, svint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_x2))) +svuint8x2_t svrshl(svuint8x2_t, svuint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_x2))) +svuint32x2_t svrshl(svuint32x2_t, svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_x2))) +svuint64x2_t svrshl(svuint64x2_t, svuint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_x2))) +svuint16x2_t svrshl(svuint16x2_t, svuint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_x4))) +svint8x4_t svrshl(svint8x4_t, svint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_x4))) +svint32x4_t svrshl(svint32x4_t, svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_x4))) +svint64x4_t svrshl(svint64x4_t, svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_x4))) +svint16x4_t svrshl(svint16x4_t, svint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_x4))) +svuint8x4_t svrshl(svuint8x4_t, svuint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_x4))) +svuint32x4_t svrshl(svuint32x4_t, svuint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_x4))) +svuint64x4_t svrshl(svuint64x4_t, svuint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_x4))) +svuint16x4_t svrshl(svuint16x4_t, svuint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u8_x2))) +svuint8x2_t svsel(svcount_t, svuint8x2_t, svuint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u32_x2))) +svuint32x2_t svsel(svcount_t, svuint32x2_t, svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u64_x2))) +svuint64x2_t svsel(svcount_t, svuint64x2_t, svuint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u16_x2))) +svuint16x2_t svsel(svcount_t, svuint16x2_t, svuint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_bf16_x2))) +svbfloat16x2_t svsel(svcount_t, svbfloat16x2_t, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s8_x2))) +svint8x2_t svsel(svcount_t, svint8x2_t, svint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f64_x2))) +svfloat64x2_t svsel(svcount_t, svfloat64x2_t, svfloat64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f32_x2))) +svfloat32x2_t svsel(svcount_t, svfloat32x2_t, svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f16_x2))) +svfloat16x2_t svsel(svcount_t, svfloat16x2_t, svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s32_x2))) +svint32x2_t svsel(svcount_t, svint32x2_t, svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s64_x2))) +svint64x2_t svsel(svcount_t, svint64x2_t, svint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s16_x2))) +svint16x2_t svsel(svcount_t, svint16x2_t, svint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u8_x4))) +svuint8x4_t svsel(svcount_t, svuint8x4_t, svuint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u32_x4))) +svuint32x4_t svsel(svcount_t, svuint32x4_t, svuint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u64_x4))) +svuint64x4_t svsel(svcount_t, svuint64x4_t, svuint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u16_x4))) +svuint16x4_t svsel(svcount_t, svuint16x4_t, svuint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_bf16_x4))) +svbfloat16x4_t svsel(svcount_t, svbfloat16x4_t, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s8_x4))) +svint8x4_t svsel(svcount_t, svint8x4_t, svint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f64_x4))) +svfloat64x4_t svsel(svcount_t, svfloat64x4_t, svfloat64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f32_x4))) +svfloat32x4_t svsel(svcount_t, svfloat32x4_t, svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f16_x4))) +svfloat16x4_t svsel(svcount_t, svfloat16x4_t, svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s32_x4))) +svint32x4_t svsel(svcount_t, svint32x4_t, svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s64_x4))) +svint64x4_t svsel(svcount_t, svint64x4_t, svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s16_x4))) +svint16x4_t svsel(svcount_t, svint16x4_t, svint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_s32_s16_x2))) +svint32x2_t svunpk_s32(svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_s64_s32_x2))) +svint64x2_t svunpk_s64(svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_s16_s8_x2))) +svint16x2_t svunpk_s16(svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_u32_u16_x2))) +svuint32x2_t svunpk_u32(svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_u64_u32_x2))) +svuint64x2_t svunpk_u64(svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_u16_u8_x2))) +svuint16x2_t svunpk_u16(svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_s32_s16_x4))) +svint32x4_t svunpk_s32(svint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_s64_s32_x4))) +svint64x4_t svunpk_s64(svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_s16_s8_x4))) +svint16x4_t svunpk_s16(svint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_u32_u16_x4))) +svuint32x4_t svunpk_u32(svuint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_u64_u32_x4))) +svuint64x4_t svunpk_u64(svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpk_u16_u8_x4))) +svuint16x4_t svunpk_u16(svuint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_u8_x2))) +svuint8x2_t svuzp(svuint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_u32_x2))) +svuint32x2_t svuzp(svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_u64_x2))) +svuint64x2_t svuzp(svuint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_u16_x2))) +svuint16x2_t svuzp(svuint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_bf16_x2))) +svbfloat16x2_t svuzp(svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_s8_x2))) +svint8x2_t svuzp(svint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_f64_x2))) +svfloat64x2_t svuzp(svfloat64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_f32_x2))) +svfloat32x2_t svuzp(svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_f16_x2))) +svfloat16x2_t svuzp(svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_s32_x2))) +svint32x2_t svuzp(svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_s64_x2))) +svint64x2_t svuzp(svint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_s16_x2))) +svint16x2_t svuzp(svint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_u8_x4))) +svuint8x4_t svuzp(svuint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_u32_x4))) +svuint32x4_t svuzp(svuint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_u64_x4))) +svuint64x4_t svuzp(svuint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_u16_x4))) +svuint16x4_t svuzp(svuint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_bf16_x4))) +svbfloat16x4_t svuzp(svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_s8_x4))) +svint8x4_t svuzp(svint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_f64_x4))) +svfloat64x4_t svuzp(svfloat64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_f32_x4))) +svfloat32x4_t svuzp(svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_f16_x4))) +svfloat16x4_t svuzp(svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_s32_x4))) +svint32x4_t svuzp(svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_s64_x4))) +svint64x4_t svuzp(svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp_s16_x4))) +svint16x4_t svuzp(svint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_u8_x2))) +svuint8x2_t svuzpq(svuint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_u32_x2))) +svuint32x2_t svuzpq(svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_u64_x2))) +svuint64x2_t svuzpq(svuint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_u16_x2))) +svuint16x2_t svuzpq(svuint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_bf16_x2))) +svbfloat16x2_t svuzpq(svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_s8_x2))) +svint8x2_t svuzpq(svint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_f64_x2))) +svfloat64x2_t svuzpq(svfloat64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_f32_x2))) +svfloat32x2_t svuzpq(svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_f16_x2))) +svfloat16x2_t svuzpq(svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_s32_x2))) +svint32x2_t svuzpq(svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_s64_x2))) +svint64x2_t svuzpq(svint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_s16_x2))) +svint16x2_t svuzpq(svint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_u8_x4))) +svuint8x4_t svuzpq(svuint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_u32_x4))) +svuint32x4_t svuzpq(svuint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_u64_x4))) +svuint64x4_t svuzpq(svuint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_u16_x4))) +svuint16x4_t svuzpq(svuint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_bf16_x4))) +svbfloat16x4_t svuzpq(svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_s8_x4))) +svint8x4_t svuzpq(svint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_f64_x4))) +svfloat64x4_t svuzpq(svfloat64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_f32_x4))) +svfloat32x4_t svuzpq(svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_f16_x4))) +svfloat16x4_t svuzpq(svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_s32_x4))) +svint32x4_t svuzpq(svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_s64_x4))) +svint64x4_t svuzpq(svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzpq_s16_x4))) +svint16x4_t svuzpq(svint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_u8_x2))) +svuint8x2_t svzip(svuint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_u32_x2))) +svuint32x2_t svzip(svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_u64_x2))) +svuint64x2_t svzip(svuint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_u16_x2))) +svuint16x2_t svzip(svuint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_bf16_x2))) +svbfloat16x2_t svzip(svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_s8_x2))) +svint8x2_t svzip(svint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_f64_x2))) +svfloat64x2_t svzip(svfloat64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_f32_x2))) +svfloat32x2_t svzip(svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_f16_x2))) +svfloat16x2_t svzip(svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_s32_x2))) +svint32x2_t svzip(svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_s64_x2))) +svint64x2_t svzip(svint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_s16_x2))) +svint16x2_t svzip(svint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_u8_x4))) +svuint8x4_t svzip(svuint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_u32_x4))) +svuint32x4_t svzip(svuint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_u64_x4))) +svuint64x4_t svzip(svuint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_u16_x4))) +svuint16x4_t svzip(svuint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_bf16_x4))) +svbfloat16x4_t svzip(svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_s8_x4))) +svint8x4_t svzip(svint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_f64_x4))) +svfloat64x4_t svzip(svfloat64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_f32_x4))) +svfloat32x4_t svzip(svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_f16_x4))) +svfloat16x4_t svzip(svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_s32_x4))) +svint32x4_t svzip(svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_s64_x4))) +svint64x4_t svzip(svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip_s16_x4))) +svint16x4_t svzip(svint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_u8_x2))) +svuint8x2_t svzipq(svuint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_u32_x2))) +svuint32x2_t svzipq(svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_u64_x2))) +svuint64x2_t svzipq(svuint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_u16_x2))) +svuint16x2_t svzipq(svuint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_bf16_x2))) +svbfloat16x2_t svzipq(svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s8_x2))) +svint8x2_t svzipq(svint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_f64_x2))) +svfloat64x2_t svzipq(svfloat64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_f32_x2))) +svfloat32x2_t svzipq(svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_f16_x2))) +svfloat16x2_t svzipq(svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s32_x2))) +svint32x2_t svzipq(svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s64_x2))) +svint64x2_t svzipq(svint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s16_x2))) +svint16x2_t svzipq(svint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_u8_x4))) +svuint8x4_t svzipq(svuint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_u32_x4))) +svuint32x4_t svzipq(svuint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_u64_x4))) +svuint64x4_t svzipq(svuint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_u16_x4))) +svuint16x4_t svzipq(svuint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_bf16_x4))) +svbfloat16x4_t svzipq(svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s8_x4))) +svint8x4_t svzipq(svint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_f64_x4))) +svfloat64x4_t svzipq(svfloat64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_f32_x4))) +svfloat32x4_t svzipq(svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_f16_x4))) +svfloat16x4_t svzipq(svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s32_x4))) +svint32x4_t svzipq(svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s64_x4))) +svint64x4_t svzipq(svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq_s16_x4))) +svint16x4_t svzipq(svint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_bf16_x2))) +svbfloat16x2_t svclamp_single_bf16_x2(svbfloat16x2_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_bf16_x4))) +svbfloat16x4_t svclamp_single_bf16_x4(svbfloat16x4_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_bf16_x2))) +svbfloat16x2_t svmax_single_bf16_x2(svbfloat16x2_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_bf16_x4))) +svbfloat16x4_t svmax_single_bf16_x4(svbfloat16x4_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_bf16_x2))) +svbfloat16x2_t svmax_bf16_x2(svbfloat16x2_t, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_bf16_x4))) +svbfloat16x4_t svmax_bf16_x4(svbfloat16x4_t, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_single_bf16_x2))) +svbfloat16x2_t svmaxnm_single_bf16_x2(svbfloat16x2_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_single_bf16_x4))) +svbfloat16x4_t svmaxnm_single_bf16_x4(svbfloat16x4_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_bf16_x2))) +svbfloat16x2_t svmaxnm_bf16_x2(svbfloat16x2_t, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_bf16_x4))) +svbfloat16x4_t svmaxnm_bf16_x4(svbfloat16x4_t, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_bf16_x2))) +svbfloat16x2_t svmin_single_bf16_x2(svbfloat16x2_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_bf16_x4))) +svbfloat16x4_t svmin_single_bf16_x4(svbfloat16x4_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_bf16_x2))) +svbfloat16x2_t svmin_bf16_x2(svbfloat16x2_t, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_bf16_x4))) +svbfloat16x4_t svmin_bf16_x4(svbfloat16x4_t, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_single_bf16_x2))) +svbfloat16x2_t svminnm_single_bf16_x2(svbfloat16x2_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_single_bf16_x4))) +svbfloat16x4_t svminnm_single_bf16_x4(svbfloat16x4_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_bf16_x2))) +svbfloat16x2_t svminnm_bf16_x2(svbfloat16x2_t, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_bf16_x4))) +svbfloat16x4_t svminnm_bf16_x4(svbfloat16x4_t, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_bf16_x2))) +svbfloat16x2_t svclamp(svbfloat16x2_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_single_bf16_x4))) +svbfloat16x4_t svclamp(svbfloat16x4_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_bf16_x2))) +svbfloat16x2_t svmax(svbfloat16x2_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_single_bf16_x4))) +svbfloat16x4_t svmax(svbfloat16x4_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_bf16_x2))) +svbfloat16x2_t svmax(svbfloat16x2_t, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_bf16_x4))) +svbfloat16x4_t svmax(svbfloat16x4_t, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_single_bf16_x2))) +svbfloat16x2_t svmaxnm(svbfloat16x2_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_single_bf16_x4))) +svbfloat16x4_t svmaxnm(svbfloat16x4_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_bf16_x2))) +svbfloat16x2_t svmaxnm(svbfloat16x2_t, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_bf16_x4))) +svbfloat16x4_t svmaxnm(svbfloat16x4_t, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_bf16_x2))) +svbfloat16x2_t svmin(svbfloat16x2_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_single_bf16_x4))) +svbfloat16x4_t svmin(svbfloat16x4_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_bf16_x2))) +svbfloat16x2_t svmin(svbfloat16x2_t, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_bf16_x4))) +svbfloat16x4_t svmin(svbfloat16x4_t, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_single_bf16_x2))) +svbfloat16x2_t svminnm(svbfloat16x2_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_single_bf16_x4))) +svbfloat16x4_t svminnm(svbfloat16x4_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_bf16_x2))) +svbfloat16x2_t svminnm(svbfloat16x2_t, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_bf16_x4))) +svbfloat16x4_t svminnm(svbfloat16x4_t, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u32base_u32offset))) +svuint32_t svadrb_u32base_u32offset(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u64base_u64offset))) +svuint64_t svadrb_u64base_u64offset(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u32base_s32offset))) +svuint32_t svadrb_u32base_s32offset(svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u64base_s64offset))) +svuint64_t svadrb_u64base_s64offset(svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u32base_u32index))) +svuint32_t svadrd_u32base_u32index(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u64base_u64index))) +svuint64_t svadrd_u64base_u64index(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u32base_s32index))) +svuint32_t svadrd_u32base_s32index(svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u64base_s64index))) +svuint64_t svadrd_u64base_s64index(svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u32base_u32index))) +svuint32_t svadrh_u32base_u32index(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u64base_u64index))) +svuint64_t svadrh_u64base_u64index(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u32base_s32index))) +svuint32_t svadrh_u32base_s32index(svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u64base_s64index))) +svuint64_t svadrh_u64base_s64index(svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u32base_u32index))) +svuint32_t svadrw_u32base_u32index(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u64base_u64index))) +svuint64_t svadrw_u64base_u64index(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u32base_s32index))) +svuint32_t svadrw_u32base_s32index(svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u64base_s64index))) +svuint64_t svadrw_u64base_s64index(svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_u32))) +svuint32_t svcompact_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_u64))) +svuint64_t svcompact_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_f64))) +svfloat64_t svcompact_f64(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_f32))) +svfloat32_t svcompact_f32(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_s32))) +svint32_t svcompact_s32(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_s64))) +svint64_t svcompact_s64(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f64))) +svfloat64_t svexpa_f64(svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f32))) +svfloat32_t svexpa_f32(svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f16))) +svfloat16_t svexpa_f16(svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_u32))) +svuint32_t svld1_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_u64))) +svuint64_t svld1_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_f64))) +svfloat64_t svld1_gather_u64base_index_f64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_f32))) +svfloat32_t svld1_gather_u32base_index_f32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_s32))) +svint32_t svld1_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_s64))) +svint64_t svld1_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_u32))) +svuint32_t svld1_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_u64))) +svuint64_t svld1_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_f64))) +svfloat64_t svld1_gather_u64base_offset_f64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_f32))) +svfloat32_t svld1_gather_u32base_offset_f32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_s32))) +svint32_t svld1_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_s64))) +svint64_t svld1_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_u32))) +svuint32_t svld1_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_u64))) +svuint64_t svld1_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_f64))) +svfloat64_t svld1_gather_u64base_f64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_f32))) +svfloat32_t svld1_gather_u32base_f32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_s32))) +svint32_t svld1_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_s64))) +svint64_t svld1_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_u32))) +svuint32_t svld1_gather_s32index_u32(svbool_t, uint32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_f32))) +svfloat32_t svld1_gather_s32index_f32(svbool_t, float32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_s32))) +svint32_t svld1_gather_s32index_s32(svbool_t, int32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_u32))) +svuint32_t svld1_gather_u32index_u32(svbool_t, uint32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_f32))) +svfloat32_t svld1_gather_u32index_f32(svbool_t, float32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_s32))) +svint32_t svld1_gather_u32index_s32(svbool_t, int32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_u64))) +svuint64_t svld1_gather_s64index_u64(svbool_t, uint64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_f64))) +svfloat64_t svld1_gather_s64index_f64(svbool_t, float64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_s64))) +svint64_t svld1_gather_s64index_s64(svbool_t, int64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_u64))) +svuint64_t svld1_gather_u64index_u64(svbool_t, uint64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_f64))) +svfloat64_t svld1_gather_u64index_f64(svbool_t, float64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_s64))) +svint64_t svld1_gather_u64index_s64(svbool_t, int64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_u32))) +svuint32_t svld1_gather_s32offset_u32(svbool_t, uint32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_f32))) +svfloat32_t svld1_gather_s32offset_f32(svbool_t, float32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_s32))) +svint32_t svld1_gather_s32offset_s32(svbool_t, int32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_u32))) +svuint32_t svld1_gather_u32offset_u32(svbool_t, uint32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_f32))) +svfloat32_t svld1_gather_u32offset_f32(svbool_t, float32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_s32))) +svint32_t svld1_gather_u32offset_s32(svbool_t, int32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_u64))) +svuint64_t svld1_gather_s64offset_u64(svbool_t, uint64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_f64))) +svfloat64_t svld1_gather_s64offset_f64(svbool_t, float64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_s64))) +svint64_t svld1_gather_s64offset_s64(svbool_t, int64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_u64))) +svuint64_t svld1_gather_u64offset_u64(svbool_t, uint64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_f64))) +svfloat64_t svld1_gather_u64offset_f64(svbool_t, float64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_s64))) +svint64_t svld1_gather_u64offset_s64(svbool_t, int64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_offset_u32))) +svuint32_t svld1sb_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_offset_u64))) +svuint64_t svld1sb_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_offset_s32))) +svint32_t svld1sb_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_offset_s64))) +svint64_t svld1sb_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_u32))) +svuint32_t svld1sb_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_u64))) +svuint64_t svld1sb_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_s32))) +svint32_t svld1sb_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_s64))) +svint64_t svld1sb_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s32offset_u32))) +svuint32_t svld1sb_gather_s32offset_u32(svbool_t, int8_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s32offset_s32))) +svint32_t svld1sb_gather_s32offset_s32(svbool_t, int8_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32offset_u32))) +svuint32_t svld1sb_gather_u32offset_u32(svbool_t, int8_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32offset_s32))) +svint32_t svld1sb_gather_u32offset_s32(svbool_t, int8_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s64offset_u64))) +svuint64_t svld1sb_gather_s64offset_u64(svbool_t, int8_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s64offset_s64))) +svint64_t svld1sb_gather_s64offset_s64(svbool_t, int8_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64offset_u64))) +svuint64_t svld1sb_gather_u64offset_u64(svbool_t, int8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64offset_s64))) +svint64_t svld1sb_gather_u64offset_s64(svbool_t, int8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_index_u32))) +svuint32_t svld1sh_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_index_u64))) +svuint64_t svld1sh_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_index_s32))) +svint32_t svld1sh_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_index_s64))) +svint64_t svld1sh_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_offset_u32))) +svuint32_t svld1sh_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_offset_u64))) +svuint64_t svld1sh_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_offset_s32))) +svint32_t svld1sh_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_offset_s64))) +svint64_t svld1sh_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_u32))) +svuint32_t svld1sh_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_u64))) +svuint64_t svld1sh_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_s32))) +svint32_t svld1sh_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_s64))) +svint64_t svld1sh_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32index_u32))) +svuint32_t svld1sh_gather_s32index_u32(svbool_t, int16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32index_s32))) +svint32_t svld1sh_gather_s32index_s32(svbool_t, int16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32index_u32))) +svuint32_t svld1sh_gather_u32index_u32(svbool_t, int16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32index_s32))) +svint32_t svld1sh_gather_u32index_s32(svbool_t, int16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64index_u64))) +svuint64_t svld1sh_gather_s64index_u64(svbool_t, int16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64index_s64))) +svint64_t svld1sh_gather_s64index_s64(svbool_t, int16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64index_u64))) +svuint64_t svld1sh_gather_u64index_u64(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64index_s64))) +svint64_t svld1sh_gather_u64index_s64(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32offset_u32))) +svuint32_t svld1sh_gather_s32offset_u32(svbool_t, int16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32offset_s32))) +svint32_t svld1sh_gather_s32offset_s32(svbool_t, int16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32offset_u32))) +svuint32_t svld1sh_gather_u32offset_u32(svbool_t, int16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32offset_s32))) +svint32_t svld1sh_gather_u32offset_s32(svbool_t, int16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64offset_u64))) +svuint64_t svld1sh_gather_s64offset_u64(svbool_t, int16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64offset_s64))) +svint64_t svld1sh_gather_s64offset_s64(svbool_t, int16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64offset_u64))) +svuint64_t svld1sh_gather_u64offset_u64(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64offset_s64))) +svint64_t svld1sh_gather_u64offset_s64(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_index_u64))) +svuint64_t svld1sw_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_index_s64))) +svint64_t svld1sw_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_offset_u64))) +svuint64_t svld1sw_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_offset_s64))) +svint64_t svld1sw_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_u64))) +svuint64_t svld1sw_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_s64))) +svint64_t svld1sw_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64index_u64))) +svuint64_t svld1sw_gather_s64index_u64(svbool_t, int32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64index_s64))) +svint64_t svld1sw_gather_s64index_s64(svbool_t, int32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64index_u64))) +svuint64_t svld1sw_gather_u64index_u64(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64index_s64))) +svint64_t svld1sw_gather_u64index_s64(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64offset_u64))) +svuint64_t svld1sw_gather_s64offset_u64(svbool_t, int32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64offset_s64))) +svint64_t svld1sw_gather_s64offset_s64(svbool_t, int32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64offset_u64))) +svuint64_t svld1sw_gather_u64offset_u64(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64offset_s64))) +svint64_t svld1sw_gather_u64offset_s64(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_offset_u32))) +svuint32_t svld1ub_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_offset_u64))) +svuint64_t svld1ub_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_offset_s32))) +svint32_t svld1ub_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_offset_s64))) +svint64_t svld1ub_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_u32))) +svuint32_t svld1ub_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_u64))) +svuint64_t svld1ub_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_s32))) +svint32_t svld1ub_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_s64))) +svint64_t svld1ub_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s32offset_u32))) +svuint32_t svld1ub_gather_s32offset_u32(svbool_t, uint8_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s32offset_s32))) +svint32_t svld1ub_gather_s32offset_s32(svbool_t, uint8_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32offset_u32))) +svuint32_t svld1ub_gather_u32offset_u32(svbool_t, uint8_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32offset_s32))) +svint32_t svld1ub_gather_u32offset_s32(svbool_t, uint8_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s64offset_u64))) +svuint64_t svld1ub_gather_s64offset_u64(svbool_t, uint8_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s64offset_s64))) +svint64_t svld1ub_gather_s64offset_s64(svbool_t, uint8_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64offset_u64))) +svuint64_t svld1ub_gather_u64offset_u64(svbool_t, uint8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64offset_s64))) +svint64_t svld1ub_gather_u64offset_s64(svbool_t, uint8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_index_u32))) +svuint32_t svld1uh_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_index_u64))) +svuint64_t svld1uh_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_index_s32))) +svint32_t svld1uh_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_index_s64))) +svint64_t svld1uh_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_offset_u32))) +svuint32_t svld1uh_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_offset_u64))) +svuint64_t svld1uh_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_offset_s32))) +svint32_t svld1uh_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_offset_s64))) +svint64_t svld1uh_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_u32))) +svuint32_t svld1uh_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_u64))) +svuint64_t svld1uh_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_s32))) +svint32_t svld1uh_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_s64))) +svint64_t svld1uh_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32index_u32))) +svuint32_t svld1uh_gather_s32index_u32(svbool_t, uint16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32index_s32))) +svint32_t svld1uh_gather_s32index_s32(svbool_t, uint16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32index_u32))) +svuint32_t svld1uh_gather_u32index_u32(svbool_t, uint16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32index_s32))) +svint32_t svld1uh_gather_u32index_s32(svbool_t, uint16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64index_u64))) +svuint64_t svld1uh_gather_s64index_u64(svbool_t, uint16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64index_s64))) +svint64_t svld1uh_gather_s64index_s64(svbool_t, uint16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64index_u64))) +svuint64_t svld1uh_gather_u64index_u64(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64index_s64))) +svint64_t svld1uh_gather_u64index_s64(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32offset_u32))) +svuint32_t svld1uh_gather_s32offset_u32(svbool_t, uint16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32offset_s32))) +svint32_t svld1uh_gather_s32offset_s32(svbool_t, uint16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32offset_u32))) +svuint32_t svld1uh_gather_u32offset_u32(svbool_t, uint16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32offset_s32))) +svint32_t svld1uh_gather_u32offset_s32(svbool_t, uint16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64offset_u64))) +svuint64_t svld1uh_gather_s64offset_u64(svbool_t, uint16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64offset_s64))) +svint64_t svld1uh_gather_s64offset_s64(svbool_t, uint16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64offset_u64))) +svuint64_t svld1uh_gather_u64offset_u64(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64offset_s64))) +svint64_t svld1uh_gather_u64offset_s64(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_index_u64))) +svuint64_t svld1uw_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_index_s64))) +svint64_t svld1uw_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_offset_u64))) +svuint64_t svld1uw_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_offset_s64))) +svint64_t svld1uw_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_u64))) +svuint64_t svld1uw_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_s64))) +svint64_t svld1uw_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64index_u64))) +svuint64_t svld1uw_gather_s64index_u64(svbool_t, uint32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64index_s64))) +svint64_t svld1uw_gather_s64index_s64(svbool_t, uint32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64index_u64))) +svuint64_t svld1uw_gather_u64index_u64(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64index_s64))) +svint64_t svld1uw_gather_u64index_s64(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64offset_u64))) +svuint64_t svld1uw_gather_s64offset_u64(svbool_t, uint32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64offset_s64))) +svint64_t svld1uw_gather_s64offset_s64(svbool_t, uint32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64offset_u64))) +svuint64_t svld1uw_gather_u64offset_u64(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64offset_s64))) +svint64_t svld1uw_gather_u64offset_s64(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u8))) +svuint8_t svldff1_u8(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u32))) +svuint32_t svldff1_u32(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u64))) +svuint64_t svldff1_u64(svbool_t, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u16))) +svuint16_t svldff1_u16(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s8))) +svint8_t svldff1_s8(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f64))) +svfloat64_t svldff1_f64(svbool_t, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f32))) +svfloat32_t svldff1_f32(svbool_t, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f16))) +svfloat16_t svldff1_f16(svbool_t, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s32))) +svint32_t svldff1_s32(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s64))) +svint64_t svldff1_s64(svbool_t, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s16))) +svint16_t svldff1_s16(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_u32))) +svuint32_t svldff1_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_u64))) +svuint64_t svldff1_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_f64))) +svfloat64_t svldff1_gather_u64base_index_f64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_f32))) +svfloat32_t svldff1_gather_u32base_index_f32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_s32))) +svint32_t svldff1_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_s64))) +svint64_t svldff1_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_u32))) +svuint32_t svldff1_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_u64))) +svuint64_t svldff1_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_f64))) +svfloat64_t svldff1_gather_u64base_offset_f64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_f32))) +svfloat32_t svldff1_gather_u32base_offset_f32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_s32))) +svint32_t svldff1_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_s64))) +svint64_t svldff1_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_u32))) +svuint32_t svldff1_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_u64))) +svuint64_t svldff1_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_f64))) +svfloat64_t svldff1_gather_u64base_f64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_f32))) +svfloat32_t svldff1_gather_u32base_f32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_s32))) +svint32_t svldff1_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_s64))) +svint64_t svldff1_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_u32))) +svuint32_t svldff1_gather_s32index_u32(svbool_t, uint32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_f32))) +svfloat32_t svldff1_gather_s32index_f32(svbool_t, float32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_s32))) +svint32_t svldff1_gather_s32index_s32(svbool_t, int32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_u32))) +svuint32_t svldff1_gather_u32index_u32(svbool_t, uint32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_f32))) +svfloat32_t svldff1_gather_u32index_f32(svbool_t, float32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_s32))) +svint32_t svldff1_gather_u32index_s32(svbool_t, int32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_u64))) +svuint64_t svldff1_gather_s64index_u64(svbool_t, uint64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_f64))) +svfloat64_t svldff1_gather_s64index_f64(svbool_t, float64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_s64))) +svint64_t svldff1_gather_s64index_s64(svbool_t, int64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_u64))) +svuint64_t svldff1_gather_u64index_u64(svbool_t, uint64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_f64))) +svfloat64_t svldff1_gather_u64index_f64(svbool_t, float64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_s64))) +svint64_t svldff1_gather_u64index_s64(svbool_t, int64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_u32))) +svuint32_t svldff1_gather_s32offset_u32(svbool_t, uint32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_f32))) +svfloat32_t svldff1_gather_s32offset_f32(svbool_t, float32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_s32))) +svint32_t svldff1_gather_s32offset_s32(svbool_t, int32_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_u32))) +svuint32_t svldff1_gather_u32offset_u32(svbool_t, uint32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_f32))) +svfloat32_t svldff1_gather_u32offset_f32(svbool_t, float32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_s32))) +svint32_t svldff1_gather_u32offset_s32(svbool_t, int32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_u64))) +svuint64_t svldff1_gather_s64offset_u64(svbool_t, uint64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_f64))) +svfloat64_t svldff1_gather_s64offset_f64(svbool_t, float64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_s64))) +svint64_t svldff1_gather_s64offset_s64(svbool_t, int64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_u64))) +svuint64_t svldff1_gather_u64offset_u64(svbool_t, uint64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_f64))) +svfloat64_t svldff1_gather_u64offset_f64(svbool_t, float64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_s64))) +svint64_t svldff1_gather_u64offset_s64(svbool_t, int64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u8))) +svuint8_t svldff1_vnum_u8(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u32))) +svuint32_t svldff1_vnum_u32(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u64))) +svuint64_t svldff1_vnum_u64(svbool_t, uint64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u16))) +svuint16_t svldff1_vnum_u16(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s8))) +svint8_t svldff1_vnum_s8(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f64))) +svfloat64_t svldff1_vnum_f64(svbool_t, float64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f32))) +svfloat32_t svldff1_vnum_f32(svbool_t, float32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f16))) +svfloat16_t svldff1_vnum_f16(svbool_t, float16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s32))) +svint32_t svldff1_vnum_s32(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s64))) +svint64_t svldff1_vnum_s64(svbool_t, int64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s16))) +svint16_t svldff1_vnum_s16(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_offset_u32))) +svuint32_t svldff1sb_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_offset_u64))) +svuint64_t svldff1sb_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_offset_s32))) +svint32_t svldff1sb_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_offset_s64))) +svint64_t svldff1sb_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_u32))) +svuint32_t svldff1sb_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_u64))) +svuint64_t svldff1sb_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_s32))) +svint32_t svldff1sb_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_s64))) +svint64_t svldff1sb_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s32offset_u32))) +svuint32_t svldff1sb_gather_s32offset_u32(svbool_t, int8_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s32offset_s32))) +svint32_t svldff1sb_gather_s32offset_s32(svbool_t, int8_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32offset_u32))) +svuint32_t svldff1sb_gather_u32offset_u32(svbool_t, int8_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32offset_s32))) +svint32_t svldff1sb_gather_u32offset_s32(svbool_t, int8_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s64offset_u64))) +svuint64_t svldff1sb_gather_s64offset_u64(svbool_t, int8_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s64offset_s64))) +svint64_t svldff1sb_gather_s64offset_s64(svbool_t, int8_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64offset_u64))) +svuint64_t svldff1sb_gather_u64offset_u64(svbool_t, int8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64offset_s64))) +svint64_t svldff1sb_gather_u64offset_s64(svbool_t, int8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_vnum_u32))) +svuint32_t svldff1sb_vnum_u32(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_vnum_u64))) +svuint64_t svldff1sb_vnum_u64(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_vnum_u16))) +svuint16_t svldff1sb_vnum_u16(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_vnum_s32))) +svint32_t svldff1sb_vnum_s32(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_vnum_s64))) +svint64_t svldff1sb_vnum_s64(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_vnum_s16))) +svint16_t svldff1sb_vnum_s16(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_u32))) +svuint32_t svldff1sb_u32(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_u64))) +svuint64_t svldff1sb_u64(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_u16))) +svuint16_t svldff1sb_u16(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_s32))) +svint32_t svldff1sb_s32(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_s64))) +svint64_t svldff1sb_s64(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_s16))) +svint16_t svldff1sb_s16(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_index_u32))) +svuint32_t svldff1sh_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_index_u64))) +svuint64_t svldff1sh_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_index_s32))) +svint32_t svldff1sh_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_index_s64))) +svint64_t svldff1sh_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_offset_u32))) +svuint32_t svldff1sh_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_offset_u64))) +svuint64_t svldff1sh_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_offset_s32))) +svint32_t svldff1sh_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_offset_s64))) +svint64_t svldff1sh_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_u32))) +svuint32_t svldff1sh_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_u64))) +svuint64_t svldff1sh_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_s32))) +svint32_t svldff1sh_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_s64))) +svint64_t svldff1sh_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32index_u32))) +svuint32_t svldff1sh_gather_s32index_u32(svbool_t, int16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32index_s32))) +svint32_t svldff1sh_gather_s32index_s32(svbool_t, int16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32index_u32))) +svuint32_t svldff1sh_gather_u32index_u32(svbool_t, int16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32index_s32))) +svint32_t svldff1sh_gather_u32index_s32(svbool_t, int16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64index_u64))) +svuint64_t svldff1sh_gather_s64index_u64(svbool_t, int16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64index_s64))) +svint64_t svldff1sh_gather_s64index_s64(svbool_t, int16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64index_u64))) +svuint64_t svldff1sh_gather_u64index_u64(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64index_s64))) +svint64_t svldff1sh_gather_u64index_s64(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32offset_u32))) +svuint32_t svldff1sh_gather_s32offset_u32(svbool_t, int16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32offset_s32))) +svint32_t svldff1sh_gather_s32offset_s32(svbool_t, int16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32offset_u32))) +svuint32_t svldff1sh_gather_u32offset_u32(svbool_t, int16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32offset_s32))) +svint32_t svldff1sh_gather_u32offset_s32(svbool_t, int16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64offset_u64))) +svuint64_t svldff1sh_gather_s64offset_u64(svbool_t, int16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64offset_s64))) +svint64_t svldff1sh_gather_s64offset_s64(svbool_t, int16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64offset_u64))) +svuint64_t svldff1sh_gather_u64offset_u64(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64offset_s64))) +svint64_t svldff1sh_gather_u64offset_s64(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_vnum_u32))) +svuint32_t svldff1sh_vnum_u32(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_vnum_u64))) +svuint64_t svldff1sh_vnum_u64(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_vnum_s32))) +svint32_t svldff1sh_vnum_s32(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_vnum_s64))) +svint64_t svldff1sh_vnum_s64(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_u32))) +svuint32_t svldff1sh_u32(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_u64))) +svuint64_t svldff1sh_u64(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_s32))) +svint32_t svldff1sh_s32(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_s64))) +svint64_t svldff1sh_s64(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_index_u64))) +svuint64_t svldff1sw_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_index_s64))) +svint64_t svldff1sw_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_offset_u64))) +svuint64_t svldff1sw_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_offset_s64))) +svint64_t svldff1sw_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_u64))) +svuint64_t svldff1sw_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_s64))) +svint64_t svldff1sw_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64index_u64))) +svuint64_t svldff1sw_gather_s64index_u64(svbool_t, int32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64index_s64))) +svint64_t svldff1sw_gather_s64index_s64(svbool_t, int32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64index_u64))) +svuint64_t svldff1sw_gather_u64index_u64(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64index_s64))) +svint64_t svldff1sw_gather_u64index_s64(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64offset_u64))) +svuint64_t svldff1sw_gather_s64offset_u64(svbool_t, int32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64offset_s64))) +svint64_t svldff1sw_gather_s64offset_s64(svbool_t, int32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64offset_u64))) +svuint64_t svldff1sw_gather_u64offset_u64(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64offset_s64))) +svint64_t svldff1sw_gather_u64offset_s64(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_vnum_u64))) +svuint64_t svldff1sw_vnum_u64(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_vnum_s64))) +svint64_t svldff1sw_vnum_s64(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_u64))) +svuint64_t svldff1sw_u64(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_s64))) +svint64_t svldff1sw_s64(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_offset_u32))) +svuint32_t svldff1ub_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_offset_u64))) +svuint64_t svldff1ub_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_offset_s32))) +svint32_t svldff1ub_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_offset_s64))) +svint64_t svldff1ub_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_u32))) +svuint32_t svldff1ub_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_u64))) +svuint64_t svldff1ub_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_s32))) +svint32_t svldff1ub_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_s64))) +svint64_t svldff1ub_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s32offset_u32))) +svuint32_t svldff1ub_gather_s32offset_u32(svbool_t, uint8_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s32offset_s32))) +svint32_t svldff1ub_gather_s32offset_s32(svbool_t, uint8_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32offset_u32))) +svuint32_t svldff1ub_gather_u32offset_u32(svbool_t, uint8_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32offset_s32))) +svint32_t svldff1ub_gather_u32offset_s32(svbool_t, uint8_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s64offset_u64))) +svuint64_t svldff1ub_gather_s64offset_u64(svbool_t, uint8_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s64offset_s64))) +svint64_t svldff1ub_gather_s64offset_s64(svbool_t, uint8_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64offset_u64))) +svuint64_t svldff1ub_gather_u64offset_u64(svbool_t, uint8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64offset_s64))) +svint64_t svldff1ub_gather_u64offset_s64(svbool_t, uint8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_vnum_u32))) +svuint32_t svldff1ub_vnum_u32(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_vnum_u64))) +svuint64_t svldff1ub_vnum_u64(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_vnum_u16))) +svuint16_t svldff1ub_vnum_u16(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_vnum_s32))) +svint32_t svldff1ub_vnum_s32(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_vnum_s64))) +svint64_t svldff1ub_vnum_s64(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_vnum_s16))) +svint16_t svldff1ub_vnum_s16(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_u32))) +svuint32_t svldff1ub_u32(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_u64))) +svuint64_t svldff1ub_u64(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_u16))) +svuint16_t svldff1ub_u16(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_s32))) +svint32_t svldff1ub_s32(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_s64))) +svint64_t svldff1ub_s64(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_s16))) +svint16_t svldff1ub_s16(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_index_u32))) +svuint32_t svldff1uh_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_index_u64))) +svuint64_t svldff1uh_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_index_s32))) +svint32_t svldff1uh_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_index_s64))) +svint64_t svldff1uh_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_offset_u32))) +svuint32_t svldff1uh_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_offset_u64))) +svuint64_t svldff1uh_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_offset_s32))) +svint32_t svldff1uh_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_offset_s64))) +svint64_t svldff1uh_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_u32))) +svuint32_t svldff1uh_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_u64))) +svuint64_t svldff1uh_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_s32))) +svint32_t svldff1uh_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_s64))) +svint64_t svldff1uh_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32index_u32))) +svuint32_t svldff1uh_gather_s32index_u32(svbool_t, uint16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32index_s32))) +svint32_t svldff1uh_gather_s32index_s32(svbool_t, uint16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32index_u32))) +svuint32_t svldff1uh_gather_u32index_u32(svbool_t, uint16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32index_s32))) +svint32_t svldff1uh_gather_u32index_s32(svbool_t, uint16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64index_u64))) +svuint64_t svldff1uh_gather_s64index_u64(svbool_t, uint16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64index_s64))) +svint64_t svldff1uh_gather_s64index_s64(svbool_t, uint16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64index_u64))) +svuint64_t svldff1uh_gather_u64index_u64(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64index_s64))) +svint64_t svldff1uh_gather_u64index_s64(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32offset_u32))) +svuint32_t svldff1uh_gather_s32offset_u32(svbool_t, uint16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32offset_s32))) +svint32_t svldff1uh_gather_s32offset_s32(svbool_t, uint16_t const *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32offset_u32))) +svuint32_t svldff1uh_gather_u32offset_u32(svbool_t, uint16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32offset_s32))) +svint32_t svldff1uh_gather_u32offset_s32(svbool_t, uint16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64offset_u64))) +svuint64_t svldff1uh_gather_s64offset_u64(svbool_t, uint16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64offset_s64))) +svint64_t svldff1uh_gather_s64offset_s64(svbool_t, uint16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64offset_u64))) +svuint64_t svldff1uh_gather_u64offset_u64(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64offset_s64))) +svint64_t svldff1uh_gather_u64offset_s64(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_vnum_u32))) +svuint32_t svldff1uh_vnum_u32(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_vnum_u64))) +svuint64_t svldff1uh_vnum_u64(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_vnum_s32))) +svint32_t svldff1uh_vnum_s32(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_vnum_s64))) +svint64_t svldff1uh_vnum_s64(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_u32))) +svuint32_t svldff1uh_u32(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_u64))) +svuint64_t svldff1uh_u64(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_s32))) +svint32_t svldff1uh_s32(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_s64))) +svint64_t svldff1uh_s64(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_index_u64))) +svuint64_t svldff1uw_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_index_s64))) +svint64_t svldff1uw_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_offset_u64))) +svuint64_t svldff1uw_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_offset_s64))) +svint64_t svldff1uw_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_u64))) +svuint64_t svldff1uw_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_s64))) +svint64_t svldff1uw_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64index_u64))) +svuint64_t svldff1uw_gather_s64index_u64(svbool_t, uint32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64index_s64))) +svint64_t svldff1uw_gather_s64index_s64(svbool_t, uint32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64index_u64))) +svuint64_t svldff1uw_gather_u64index_u64(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64index_s64))) +svint64_t svldff1uw_gather_u64index_s64(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64offset_u64))) +svuint64_t svldff1uw_gather_s64offset_u64(svbool_t, uint32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64offset_s64))) +svint64_t svldff1uw_gather_s64offset_s64(svbool_t, uint32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64offset_u64))) +svuint64_t svldff1uw_gather_u64offset_u64(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64offset_s64))) +svint64_t svldff1uw_gather_u64offset_s64(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_vnum_u64))) +svuint64_t svldff1uw_vnum_u64(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_vnum_s64))) +svint64_t svldff1uw_vnum_s64(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_u64))) +svuint64_t svldff1uw_u64(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_s64))) +svint64_t svldff1uw_s64(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u8))) +svuint8_t svldnf1_u8(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u32))) +svuint32_t svldnf1_u32(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u64))) +svuint64_t svldnf1_u64(svbool_t, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u16))) +svuint16_t svldnf1_u16(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s8))) +svint8_t svldnf1_s8(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f64))) +svfloat64_t svldnf1_f64(svbool_t, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f32))) +svfloat32_t svldnf1_f32(svbool_t, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f16))) +svfloat16_t svldnf1_f16(svbool_t, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s32))) +svint32_t svldnf1_s32(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s64))) +svint64_t svldnf1_s64(svbool_t, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s16))) +svint16_t svldnf1_s16(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u8))) +svuint8_t svldnf1_vnum_u8(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u32))) +svuint32_t svldnf1_vnum_u32(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u64))) +svuint64_t svldnf1_vnum_u64(svbool_t, uint64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u16))) +svuint16_t svldnf1_vnum_u16(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s8))) +svint8_t svldnf1_vnum_s8(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f64))) +svfloat64_t svldnf1_vnum_f64(svbool_t, float64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f32))) +svfloat32_t svldnf1_vnum_f32(svbool_t, float32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f16))) +svfloat16_t svldnf1_vnum_f16(svbool_t, float16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s32))) +svint32_t svldnf1_vnum_s32(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s64))) +svint64_t svldnf1_vnum_s64(svbool_t, int64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s16))) +svint16_t svldnf1_vnum_s16(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_vnum_u32))) +svuint32_t svldnf1sb_vnum_u32(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_vnum_u64))) +svuint64_t svldnf1sb_vnum_u64(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_vnum_u16))) +svuint16_t svldnf1sb_vnum_u16(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_vnum_s32))) +svint32_t svldnf1sb_vnum_s32(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_vnum_s64))) +svint64_t svldnf1sb_vnum_s64(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_vnum_s16))) +svint16_t svldnf1sb_vnum_s16(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_u32))) +svuint32_t svldnf1sb_u32(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_u64))) +svuint64_t svldnf1sb_u64(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_u16))) +svuint16_t svldnf1sb_u16(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_s32))) +svint32_t svldnf1sb_s32(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_s64))) +svint64_t svldnf1sb_s64(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sb_s16))) +svint16_t svldnf1sb_s16(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_vnum_u32))) +svuint32_t svldnf1sh_vnum_u32(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_vnum_u64))) +svuint64_t svldnf1sh_vnum_u64(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_vnum_s32))) +svint32_t svldnf1sh_vnum_s32(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_vnum_s64))) +svint64_t svldnf1sh_vnum_s64(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_u32))) +svuint32_t svldnf1sh_u32(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_u64))) +svuint64_t svldnf1sh_u64(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_s32))) +svint32_t svldnf1sh_s32(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sh_s64))) +svint64_t svldnf1sh_s64(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sw_vnum_u64))) +svuint64_t svldnf1sw_vnum_u64(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sw_vnum_s64))) +svint64_t svldnf1sw_vnum_s64(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sw_u64))) +svuint64_t svldnf1sw_u64(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1sw_s64))) +svint64_t svldnf1sw_s64(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_vnum_u32))) +svuint32_t svldnf1ub_vnum_u32(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_vnum_u64))) +svuint64_t svldnf1ub_vnum_u64(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_vnum_u16))) +svuint16_t svldnf1ub_vnum_u16(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_vnum_s32))) +svint32_t svldnf1ub_vnum_s32(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_vnum_s64))) +svint64_t svldnf1ub_vnum_s64(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_vnum_s16))) +svint16_t svldnf1ub_vnum_s16(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_u32))) +svuint32_t svldnf1ub_u32(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_u64))) +svuint64_t svldnf1ub_u64(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_u16))) +svuint16_t svldnf1ub_u16(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_s32))) +svint32_t svldnf1ub_s32(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_s64))) +svint64_t svldnf1ub_s64(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1ub_s16))) +svint16_t svldnf1ub_s16(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_vnum_u32))) +svuint32_t svldnf1uh_vnum_u32(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_vnum_u64))) +svuint64_t svldnf1uh_vnum_u64(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_vnum_s32))) +svint32_t svldnf1uh_vnum_s32(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_vnum_s64))) +svint64_t svldnf1uh_vnum_s64(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_u32))) +svuint32_t svldnf1uh_u32(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_u64))) +svuint64_t svldnf1uh_u64(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_s32))) +svint32_t svldnf1uh_s32(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uh_s64))) +svint64_t svldnf1uh_s64(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uw_vnum_u64))) +svuint64_t svldnf1uw_vnum_u64(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uw_vnum_s64))) +svint64_t svldnf1uw_vnum_s64(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uw_u64))) +svuint64_t svldnf1uw_u64(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1uw_s64))) +svint64_t svldnf1uw_s64(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32base))) +void svprfb_gather_u32base(svbool_t, svuint32_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64base))) +void svprfb_gather_u64base(svbool_t, svuint64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32base_offset))) +void svprfb_gather_u32base_offset(svbool_t, svuint32_t, int64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64base_offset))) +void svprfb_gather_u64base_offset(svbool_t, svuint64_t, int64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_s32offset))) +void svprfb_gather_s32offset(svbool_t, void const *, svint32_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32offset))) +void svprfb_gather_u32offset(svbool_t, void const *, svuint32_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_s64offset))) +void svprfb_gather_s64offset(svbool_t, void const *, svint64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64offset))) +void svprfb_gather_u64offset(svbool_t, void const *, svuint64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32base))) +void svprfd_gather_u32base(svbool_t, svuint32_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64base))) +void svprfd_gather_u64base(svbool_t, svuint64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32base_index))) +void svprfd_gather_u32base_index(svbool_t, svuint32_t, int64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64base_index))) +void svprfd_gather_u64base_index(svbool_t, svuint64_t, int64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_s32index))) +void svprfd_gather_s32index(svbool_t, void const *, svint32_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32index))) +void svprfd_gather_u32index(svbool_t, void const *, svuint32_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_s64index))) +void svprfd_gather_s64index(svbool_t, void const *, svint64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64index))) +void svprfd_gather_u64index(svbool_t, void const *, svuint64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32base))) +void svprfh_gather_u32base(svbool_t, svuint32_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64base))) +void svprfh_gather_u64base(svbool_t, svuint64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32base_index))) +void svprfh_gather_u32base_index(svbool_t, svuint32_t, int64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64base_index))) +void svprfh_gather_u64base_index(svbool_t, svuint64_t, int64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_s32index))) +void svprfh_gather_s32index(svbool_t, void const *, svint32_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32index))) +void svprfh_gather_u32index(svbool_t, void const *, svuint32_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_s64index))) +void svprfh_gather_s64index(svbool_t, void const *, svint64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64index))) +void svprfh_gather_u64index(svbool_t, void const *, svuint64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32base))) +void svprfw_gather_u32base(svbool_t, svuint32_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64base))) +void svprfw_gather_u64base(svbool_t, svuint64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32base_index))) +void svprfw_gather_u32base_index(svbool_t, svuint32_t, int64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64base_index))) +void svprfw_gather_u64base_index(svbool_t, svuint64_t, int64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_s32index))) +void svprfw_gather_s32index(svbool_t, void const *, svint32_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32index))) +void svprfw_gather_u32index(svbool_t, void const *, svuint32_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_s64index))) +void svprfw_gather_s64index(svbool_t, void const *, svint64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64index))) +void svprfw_gather_u64index(svbool_t, void const *, svuint64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrdffr))) +svbool_t svrdffr(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrdffr_z))) +svbool_t svrdffr_z(svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsetffr))) +void svsetffr(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_u32))) +void svst1_scatter_u32base_index_u32(svbool_t, svuint32_t, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_u64))) +void svst1_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_f64))) +void svst1_scatter_u64base_index_f64(svbool_t, svuint64_t, int64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_f32))) +void svst1_scatter_u32base_index_f32(svbool_t, svuint32_t, int64_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_s32))) +void svst1_scatter_u32base_index_s32(svbool_t, svuint32_t, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_s64))) +void svst1_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_u32))) +void svst1_scatter_u32base_offset_u32(svbool_t, svuint32_t, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_u64))) +void svst1_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_f64))) +void svst1_scatter_u64base_offset_f64(svbool_t, svuint64_t, int64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_f32))) +void svst1_scatter_u32base_offset_f32(svbool_t, svuint32_t, int64_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_s32))) +void svst1_scatter_u32base_offset_s32(svbool_t, svuint32_t, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_s64))) +void svst1_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_u32))) +void svst1_scatter_u32base_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_u64))) +void svst1_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_f64))) +void svst1_scatter_u64base_f64(svbool_t, svuint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_f32))) +void svst1_scatter_u32base_f32(svbool_t, svuint32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_s32))) +void svst1_scatter_u32base_s32(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_s64))) +void svst1_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_u32))) +void svst1_scatter_s32index_u32(svbool_t, uint32_t *, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_f32))) +void svst1_scatter_s32index_f32(svbool_t, float32_t *, svint32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_s32))) +void svst1_scatter_s32index_s32(svbool_t, int32_t *, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_u32))) +void svst1_scatter_u32index_u32(svbool_t, uint32_t *, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_f32))) +void svst1_scatter_u32index_f32(svbool_t, float32_t *, svuint32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_s32))) +void svst1_scatter_u32index_s32(svbool_t, int32_t *, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_u64))) +void svst1_scatter_s64index_u64(svbool_t, uint64_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_f64))) +void svst1_scatter_s64index_f64(svbool_t, float64_t *, svint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_s64))) +void svst1_scatter_s64index_s64(svbool_t, int64_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_u64))) +void svst1_scatter_u64index_u64(svbool_t, uint64_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_f64))) +void svst1_scatter_u64index_f64(svbool_t, float64_t *, svuint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_s64))) +void svst1_scatter_u64index_s64(svbool_t, int64_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_u32))) +void svst1_scatter_s32offset_u32(svbool_t, uint32_t *, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_f32))) +void svst1_scatter_s32offset_f32(svbool_t, float32_t *, svint32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_s32))) +void svst1_scatter_s32offset_s32(svbool_t, int32_t *, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_u32))) +void svst1_scatter_u32offset_u32(svbool_t, uint32_t *, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_f32))) +void svst1_scatter_u32offset_f32(svbool_t, float32_t *, svuint32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_s32))) +void svst1_scatter_u32offset_s32(svbool_t, int32_t *, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_u64))) +void svst1_scatter_s64offset_u64(svbool_t, uint64_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_f64))) +void svst1_scatter_s64offset_f64(svbool_t, float64_t *, svint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_s64))) +void svst1_scatter_s64offset_s64(svbool_t, int64_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_u64))) +void svst1_scatter_u64offset_u64(svbool_t, uint64_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_f64))) +void svst1_scatter_u64offset_f64(svbool_t, float64_t *, svuint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_s64))) +void svst1_scatter_u64offset_s64(svbool_t, int64_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_offset_u32))) +void svst1b_scatter_u32base_offset_u32(svbool_t, svuint32_t, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_offset_u64))) +void svst1b_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_offset_s32))) +void svst1b_scatter_u32base_offset_s32(svbool_t, svuint32_t, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_offset_s64))) +void svst1b_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_u32))) +void svst1b_scatter_u32base_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_u64))) +void svst1b_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_s32))) +void svst1b_scatter_u32base_s32(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_s64))) +void svst1b_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s32offset_s32))) +void svst1b_scatter_s32offset_s32(svbool_t, int8_t *, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s32offset_u32))) +void svst1b_scatter_s32offset_u32(svbool_t, uint8_t *, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32offset_s32))) +void svst1b_scatter_u32offset_s32(svbool_t, int8_t *, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32offset_u32))) +void svst1b_scatter_u32offset_u32(svbool_t, uint8_t *, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s64offset_s64))) +void svst1b_scatter_s64offset_s64(svbool_t, int8_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s64offset_u64))) +void svst1b_scatter_s64offset_u64(svbool_t, uint8_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64offset_s64))) +void svst1b_scatter_u64offset_s64(svbool_t, int8_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64offset_u64))) +void svst1b_scatter_u64offset_u64(svbool_t, uint8_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_index_u32))) +void svst1h_scatter_u32base_index_u32(svbool_t, svuint32_t, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_index_u64))) +void svst1h_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_index_s32))) +void svst1h_scatter_u32base_index_s32(svbool_t, svuint32_t, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_index_s64))) +void svst1h_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_offset_u32))) +void svst1h_scatter_u32base_offset_u32(svbool_t, svuint32_t, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_offset_u64))) +void svst1h_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_offset_s32))) +void svst1h_scatter_u32base_offset_s32(svbool_t, svuint32_t, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_offset_s64))) +void svst1h_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_u32))) +void svst1h_scatter_u32base_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_u64))) +void svst1h_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_s32))) +void svst1h_scatter_u32base_s32(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_s64))) +void svst1h_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32index_s32))) +void svst1h_scatter_s32index_s32(svbool_t, int16_t *, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32index_u32))) +void svst1h_scatter_s32index_u32(svbool_t, uint16_t *, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32index_s32))) +void svst1h_scatter_u32index_s32(svbool_t, int16_t *, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32index_u32))) +void svst1h_scatter_u32index_u32(svbool_t, uint16_t *, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64index_s64))) +void svst1h_scatter_s64index_s64(svbool_t, int16_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64index_u64))) +void svst1h_scatter_s64index_u64(svbool_t, uint16_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64index_s64))) +void svst1h_scatter_u64index_s64(svbool_t, int16_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64index_u64))) +void svst1h_scatter_u64index_u64(svbool_t, uint16_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32offset_s32))) +void svst1h_scatter_s32offset_s32(svbool_t, int16_t *, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32offset_u32))) +void svst1h_scatter_s32offset_u32(svbool_t, uint16_t *, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32offset_s32))) +void svst1h_scatter_u32offset_s32(svbool_t, int16_t *, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32offset_u32))) +void svst1h_scatter_u32offset_u32(svbool_t, uint16_t *, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64offset_s64))) +void svst1h_scatter_s64offset_s64(svbool_t, int16_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64offset_u64))) +void svst1h_scatter_s64offset_u64(svbool_t, uint16_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64offset_s64))) +void svst1h_scatter_u64offset_s64(svbool_t, int16_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64offset_u64))) +void svst1h_scatter_u64offset_u64(svbool_t, uint16_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_index_u64))) +void svst1w_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_index_s64))) +void svst1w_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_offset_u64))) +void svst1w_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_offset_s64))) +void svst1w_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_u64))) +void svst1w_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_s64))) +void svst1w_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64index_s64))) +void svst1w_scatter_s64index_s64(svbool_t, int32_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64index_u64))) +void svst1w_scatter_s64index_u64(svbool_t, uint32_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64index_s64))) +void svst1w_scatter_u64index_s64(svbool_t, int32_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64index_u64))) +void svst1w_scatter_u64index_u64(svbool_t, uint32_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64offset_s64))) +void svst1w_scatter_s64offset_s64(svbool_t, int32_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64offset_u64))) +void svst1w_scatter_s64offset_u64(svbool_t, uint32_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64offset_s64))) +void svst1w_scatter_u64offset_s64(svbool_t, int32_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64offset_u64))) +void svst1w_scatter_u64offset_u64(svbool_t, uint32_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f64))) +svfloat64_t svtmad_f64(svfloat64_t, svfloat64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f32))) +svfloat32_t svtmad_f32(svfloat32_t, svfloat32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f16))) +svfloat16_t svtmad_f16(svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f64))) +svfloat64_t svtsmul_f64(svfloat64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f32))) +svfloat32_t svtsmul_f32(svfloat32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f16))) +svfloat16_t svtsmul_f16(svfloat16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f64))) +svfloat64_t svtssel_f64(svfloat64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f32))) +svfloat32_t svtssel_f32(svfloat32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f16))) +svfloat16_t svtssel_f16(svfloat16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwrffr))) +void svwrffr(svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u32base_u32offset))) +svuint32_t svadrb_offset(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u64base_u64offset))) +svuint64_t svadrb_offset(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u32base_s32offset))) +svuint32_t svadrb_offset(svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrb_u64base_s64offset))) +svuint64_t svadrb_offset(svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u32base_u32index))) +svuint32_t svadrd_index(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u64base_u64index))) +svuint64_t svadrd_index(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u32base_s32index))) +svuint32_t svadrd_index(svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrd_u64base_s64index))) +svuint64_t svadrd_index(svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u32base_u32index))) +svuint32_t svadrh_index(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u64base_u64index))) +svuint64_t svadrh_index(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u32base_s32index))) +svuint32_t svadrh_index(svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrh_u64base_s64index))) +svuint64_t svadrh_index(svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u32base_u32index))) +svuint32_t svadrw_index(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u64base_u64index))) +svuint64_t svadrw_index(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u32base_s32index))) +svuint32_t svadrw_index(svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadrw_u64base_s64index))) +svuint64_t svadrw_index(svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_u32))) +svuint32_t svcompact(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_u64))) +svuint64_t svcompact(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_f64))) +svfloat64_t svcompact(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_f32))) +svfloat32_t svcompact(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_s32))) +svint32_t svcompact(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcompact_s64))) +svint64_t svcompact(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f64))) +svfloat64_t svexpa(svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f32))) +svfloat32_t svexpa(svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexpa_f16))) +svfloat16_t svexpa(svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_u32))) +svuint32_t svld1_gather_index_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_u64))) +svuint64_t svld1_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_f64))) +svfloat64_t svld1_gather_index_f64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_f32))) +svfloat32_t svld1_gather_index_f32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_index_s32))) +svint32_t svld1_gather_index_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_index_s64))) +svint64_t svld1_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_u32))) +svuint32_t svld1_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_u64))) +svuint64_t svld1_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_f64))) +svfloat64_t svld1_gather_offset_f64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_f32))) +svfloat32_t svld1_gather_offset_f32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_offset_s32))) +svint32_t svld1_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_offset_s64))) +svint64_t svld1_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_u32))) +svuint32_t svld1_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_u64))) +svuint64_t svld1_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_f64))) +svfloat64_t svld1_gather_f64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_f32))) +svfloat32_t svld1_gather_f32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32base_s32))) +svint32_t svld1_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64base_s64))) +svint64_t svld1_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_u32))) +svuint32_t svld1_gather_index(svbool_t, uint32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_f32))) +svfloat32_t svld1_gather_index(svbool_t, float32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32index_s32))) +svint32_t svld1_gather_index(svbool_t, int32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_u32))) +svuint32_t svld1_gather_index(svbool_t, uint32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_f32))) +svfloat32_t svld1_gather_index(svbool_t, float32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32index_s32))) +svint32_t svld1_gather_index(svbool_t, int32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_u64))) +svuint64_t svld1_gather_index(svbool_t, uint64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_f64))) +svfloat64_t svld1_gather_index(svbool_t, float64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64index_s64))) +svint64_t svld1_gather_index(svbool_t, int64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_u64))) +svuint64_t svld1_gather_index(svbool_t, uint64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_f64))) +svfloat64_t svld1_gather_index(svbool_t, float64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64index_s64))) +svint64_t svld1_gather_index(svbool_t, int64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_u32))) +svuint32_t svld1_gather_offset(svbool_t, uint32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_f32))) +svfloat32_t svld1_gather_offset(svbool_t, float32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s32offset_s32))) +svint32_t svld1_gather_offset(svbool_t, int32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_u32))) +svuint32_t svld1_gather_offset(svbool_t, uint32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_f32))) +svfloat32_t svld1_gather_offset(svbool_t, float32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u32offset_s32))) +svint32_t svld1_gather_offset(svbool_t, int32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_u64))) +svuint64_t svld1_gather_offset(svbool_t, uint64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_f64))) +svfloat64_t svld1_gather_offset(svbool_t, float64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_s64offset_s64))) +svint64_t svld1_gather_offset(svbool_t, int64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_u64))) +svuint64_t svld1_gather_offset(svbool_t, uint64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_f64))) +svfloat64_t svld1_gather_offset(svbool_t, float64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_gather_u64offset_s64))) +svint64_t svld1_gather_offset(svbool_t, int64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_offset_u32))) +svuint32_t svld1sb_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_offset_u64))) +svuint64_t svld1sb_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_offset_s32))) +svint32_t svld1sb_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_offset_s64))) +svint64_t svld1sb_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_u32))) +svuint32_t svld1sb_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_u64))) +svuint64_t svld1sb_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32base_s32))) +svint32_t svld1sb_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64base_s64))) +svint64_t svld1sb_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s32offset_u32))) +svuint32_t svld1sb_gather_offset_u32(svbool_t, int8_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s32offset_s32))) +svint32_t svld1sb_gather_offset_s32(svbool_t, int8_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32offset_u32))) +svuint32_t svld1sb_gather_offset_u32(svbool_t, int8_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u32offset_s32))) +svint32_t svld1sb_gather_offset_s32(svbool_t, int8_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s64offset_u64))) +svuint64_t svld1sb_gather_offset_u64(svbool_t, int8_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_s64offset_s64))) +svint64_t svld1sb_gather_offset_s64(svbool_t, int8_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64offset_u64))) +svuint64_t svld1sb_gather_offset_u64(svbool_t, int8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_gather_u64offset_s64))) +svint64_t svld1sb_gather_offset_s64(svbool_t, int8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_index_u32))) +svuint32_t svld1sh_gather_index_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_index_u64))) +svuint64_t svld1sh_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_index_s32))) +svint32_t svld1sh_gather_index_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_index_s64))) +svint64_t svld1sh_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_offset_u32))) +svuint32_t svld1sh_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_offset_u64))) +svuint64_t svld1sh_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_offset_s32))) +svint32_t svld1sh_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_offset_s64))) +svint64_t svld1sh_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_u32))) +svuint32_t svld1sh_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_u64))) +svuint64_t svld1sh_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32base_s32))) +svint32_t svld1sh_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64base_s64))) +svint64_t svld1sh_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32index_u32))) +svuint32_t svld1sh_gather_index_u32(svbool_t, int16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32index_s32))) +svint32_t svld1sh_gather_index_s32(svbool_t, int16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32index_u32))) +svuint32_t svld1sh_gather_index_u32(svbool_t, int16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32index_s32))) +svint32_t svld1sh_gather_index_s32(svbool_t, int16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64index_u64))) +svuint64_t svld1sh_gather_index_u64(svbool_t, int16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64index_s64))) +svint64_t svld1sh_gather_index_s64(svbool_t, int16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64index_u64))) +svuint64_t svld1sh_gather_index_u64(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64index_s64))) +svint64_t svld1sh_gather_index_s64(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32offset_u32))) +svuint32_t svld1sh_gather_offset_u32(svbool_t, int16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s32offset_s32))) +svint32_t svld1sh_gather_offset_s32(svbool_t, int16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32offset_u32))) +svuint32_t svld1sh_gather_offset_u32(svbool_t, int16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u32offset_s32))) +svint32_t svld1sh_gather_offset_s32(svbool_t, int16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64offset_u64))) +svuint64_t svld1sh_gather_offset_u64(svbool_t, int16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_s64offset_s64))) +svint64_t svld1sh_gather_offset_s64(svbool_t, int16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64offset_u64))) +svuint64_t svld1sh_gather_offset_u64(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_gather_u64offset_s64))) +svint64_t svld1sh_gather_offset_s64(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_index_u64))) +svuint64_t svld1sw_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_index_s64))) +svint64_t svld1sw_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_offset_u64))) +svuint64_t svld1sw_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_offset_s64))) +svint64_t svld1sw_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_u64))) +svuint64_t svld1sw_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64base_s64))) +svint64_t svld1sw_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64index_u64))) +svuint64_t svld1sw_gather_index_u64(svbool_t, int32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64index_s64))) +svint64_t svld1sw_gather_index_s64(svbool_t, int32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64index_u64))) +svuint64_t svld1sw_gather_index_u64(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64index_s64))) +svint64_t svld1sw_gather_index_s64(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64offset_u64))) +svuint64_t svld1sw_gather_offset_u64(svbool_t, int32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_s64offset_s64))) +svint64_t svld1sw_gather_offset_s64(svbool_t, int32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64offset_u64))) +svuint64_t svld1sw_gather_offset_u64(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_gather_u64offset_s64))) +svint64_t svld1sw_gather_offset_s64(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_offset_u32))) +svuint32_t svld1ub_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_offset_u64))) +svuint64_t svld1ub_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_offset_s32))) +svint32_t svld1ub_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_offset_s64))) +svint64_t svld1ub_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_u32))) +svuint32_t svld1ub_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_u64))) +svuint64_t svld1ub_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32base_s32))) +svint32_t svld1ub_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64base_s64))) +svint64_t svld1ub_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s32offset_u32))) +svuint32_t svld1ub_gather_offset_u32(svbool_t, uint8_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s32offset_s32))) +svint32_t svld1ub_gather_offset_s32(svbool_t, uint8_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32offset_u32))) +svuint32_t svld1ub_gather_offset_u32(svbool_t, uint8_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u32offset_s32))) +svint32_t svld1ub_gather_offset_s32(svbool_t, uint8_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s64offset_u64))) +svuint64_t svld1ub_gather_offset_u64(svbool_t, uint8_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_s64offset_s64))) +svint64_t svld1ub_gather_offset_s64(svbool_t, uint8_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64offset_u64))) +svuint64_t svld1ub_gather_offset_u64(svbool_t, uint8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_gather_u64offset_s64))) +svint64_t svld1ub_gather_offset_s64(svbool_t, uint8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_index_u32))) +svuint32_t svld1uh_gather_index_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_index_u64))) +svuint64_t svld1uh_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_index_s32))) +svint32_t svld1uh_gather_index_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_index_s64))) +svint64_t svld1uh_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_offset_u32))) +svuint32_t svld1uh_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_offset_u64))) +svuint64_t svld1uh_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_offset_s32))) +svint32_t svld1uh_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_offset_s64))) +svint64_t svld1uh_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_u32))) +svuint32_t svld1uh_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_u64))) +svuint64_t svld1uh_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32base_s32))) +svint32_t svld1uh_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64base_s64))) +svint64_t svld1uh_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32index_u32))) +svuint32_t svld1uh_gather_index_u32(svbool_t, uint16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32index_s32))) +svint32_t svld1uh_gather_index_s32(svbool_t, uint16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32index_u32))) +svuint32_t svld1uh_gather_index_u32(svbool_t, uint16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32index_s32))) +svint32_t svld1uh_gather_index_s32(svbool_t, uint16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64index_u64))) +svuint64_t svld1uh_gather_index_u64(svbool_t, uint16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64index_s64))) +svint64_t svld1uh_gather_index_s64(svbool_t, uint16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64index_u64))) +svuint64_t svld1uh_gather_index_u64(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64index_s64))) +svint64_t svld1uh_gather_index_s64(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32offset_u32))) +svuint32_t svld1uh_gather_offset_u32(svbool_t, uint16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s32offset_s32))) +svint32_t svld1uh_gather_offset_s32(svbool_t, uint16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32offset_u32))) +svuint32_t svld1uh_gather_offset_u32(svbool_t, uint16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u32offset_s32))) +svint32_t svld1uh_gather_offset_s32(svbool_t, uint16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64offset_u64))) +svuint64_t svld1uh_gather_offset_u64(svbool_t, uint16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_s64offset_s64))) +svint64_t svld1uh_gather_offset_s64(svbool_t, uint16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64offset_u64))) +svuint64_t svld1uh_gather_offset_u64(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_gather_u64offset_s64))) +svint64_t svld1uh_gather_offset_s64(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_index_u64))) +svuint64_t svld1uw_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_index_s64))) +svint64_t svld1uw_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_offset_u64))) +svuint64_t svld1uw_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_offset_s64))) +svint64_t svld1uw_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_u64))) +svuint64_t svld1uw_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64base_s64))) +svint64_t svld1uw_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64index_u64))) +svuint64_t svld1uw_gather_index_u64(svbool_t, uint32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64index_s64))) +svint64_t svld1uw_gather_index_s64(svbool_t, uint32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64index_u64))) +svuint64_t svld1uw_gather_index_u64(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64index_s64))) +svint64_t svld1uw_gather_index_s64(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64offset_u64))) +svuint64_t svld1uw_gather_offset_u64(svbool_t, uint32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_s64offset_s64))) +svint64_t svld1uw_gather_offset_s64(svbool_t, uint32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64offset_u64))) +svuint64_t svld1uw_gather_offset_u64(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_gather_u64offset_s64))) +svint64_t svld1uw_gather_offset_s64(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u8))) +svuint8_t svldff1(svbool_t, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u32))) +svuint32_t svldff1(svbool_t, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u64))) +svuint64_t svldff1(svbool_t, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_u16))) +svuint16_t svldff1(svbool_t, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s8))) +svint8_t svldff1(svbool_t, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f64))) +svfloat64_t svldff1(svbool_t, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f32))) +svfloat32_t svldff1(svbool_t, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_f16))) +svfloat16_t svldff1(svbool_t, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s32))) +svint32_t svldff1(svbool_t, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s64))) +svint64_t svldff1(svbool_t, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_s16))) +svint16_t svldff1(svbool_t, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_u32))) +svuint32_t svldff1_gather_index_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_u64))) +svuint64_t svldff1_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_f64))) +svfloat64_t svldff1_gather_index_f64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_f32))) +svfloat32_t svldff1_gather_index_f32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_index_s32))) +svint32_t svldff1_gather_index_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_index_s64))) +svint64_t svldff1_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_u32))) +svuint32_t svldff1_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_u64))) +svuint64_t svldff1_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_f64))) +svfloat64_t svldff1_gather_offset_f64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_f32))) +svfloat32_t svldff1_gather_offset_f32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_offset_s32))) +svint32_t svldff1_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_offset_s64))) +svint64_t svldff1_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_u32))) +svuint32_t svldff1_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_u64))) +svuint64_t svldff1_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_f64))) +svfloat64_t svldff1_gather_f64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_f32))) +svfloat32_t svldff1_gather_f32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32base_s32))) +svint32_t svldff1_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64base_s64))) +svint64_t svldff1_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_u32))) +svuint32_t svldff1_gather_index(svbool_t, uint32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_f32))) +svfloat32_t svldff1_gather_index(svbool_t, float32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32index_s32))) +svint32_t svldff1_gather_index(svbool_t, int32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_u32))) +svuint32_t svldff1_gather_index(svbool_t, uint32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_f32))) +svfloat32_t svldff1_gather_index(svbool_t, float32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32index_s32))) +svint32_t svldff1_gather_index(svbool_t, int32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_u64))) +svuint64_t svldff1_gather_index(svbool_t, uint64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_f64))) +svfloat64_t svldff1_gather_index(svbool_t, float64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64index_s64))) +svint64_t svldff1_gather_index(svbool_t, int64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_u64))) +svuint64_t svldff1_gather_index(svbool_t, uint64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_f64))) +svfloat64_t svldff1_gather_index(svbool_t, float64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64index_s64))) +svint64_t svldff1_gather_index(svbool_t, int64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_u32))) +svuint32_t svldff1_gather_offset(svbool_t, uint32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_f32))) +svfloat32_t svldff1_gather_offset(svbool_t, float32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s32offset_s32))) +svint32_t svldff1_gather_offset(svbool_t, int32_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_u32))) +svuint32_t svldff1_gather_offset(svbool_t, uint32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_f32))) +svfloat32_t svldff1_gather_offset(svbool_t, float32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u32offset_s32))) +svint32_t svldff1_gather_offset(svbool_t, int32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_u64))) +svuint64_t svldff1_gather_offset(svbool_t, uint64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_f64))) +svfloat64_t svldff1_gather_offset(svbool_t, float64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_s64offset_s64))) +svint64_t svldff1_gather_offset(svbool_t, int64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_u64))) +svuint64_t svldff1_gather_offset(svbool_t, uint64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_f64))) +svfloat64_t svldff1_gather_offset(svbool_t, float64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_gather_u64offset_s64))) +svint64_t svldff1_gather_offset(svbool_t, int64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u8))) +svuint8_t svldff1_vnum(svbool_t, uint8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u32))) +svuint32_t svldff1_vnum(svbool_t, uint32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u64))) +svuint64_t svldff1_vnum(svbool_t, uint64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_u16))) +svuint16_t svldff1_vnum(svbool_t, uint16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s8))) +svint8_t svldff1_vnum(svbool_t, int8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f64))) +svfloat64_t svldff1_vnum(svbool_t, float64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f32))) +svfloat32_t svldff1_vnum(svbool_t, float32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_f16))) +svfloat16_t svldff1_vnum(svbool_t, float16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s32))) +svint32_t svldff1_vnum(svbool_t, int32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s64))) +svint64_t svldff1_vnum(svbool_t, int64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_s16))) +svint16_t svldff1_vnum(svbool_t, int16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_offset_u32))) +svuint32_t svldff1sb_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_offset_u64))) +svuint64_t svldff1sb_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_offset_s32))) +svint32_t svldff1sb_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_offset_s64))) +svint64_t svldff1sb_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_u32))) +svuint32_t svldff1sb_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_u64))) +svuint64_t svldff1sb_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32base_s32))) +svint32_t svldff1sb_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64base_s64))) +svint64_t svldff1sb_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s32offset_u32))) +svuint32_t svldff1sb_gather_offset_u32(svbool_t, int8_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s32offset_s32))) +svint32_t svldff1sb_gather_offset_s32(svbool_t, int8_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32offset_u32))) +svuint32_t svldff1sb_gather_offset_u32(svbool_t, int8_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u32offset_s32))) +svint32_t svldff1sb_gather_offset_s32(svbool_t, int8_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s64offset_u64))) +svuint64_t svldff1sb_gather_offset_u64(svbool_t, int8_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_s64offset_s64))) +svint64_t svldff1sb_gather_offset_s64(svbool_t, int8_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64offset_u64))) +svuint64_t svldff1sb_gather_offset_u64(svbool_t, int8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sb_gather_u64offset_s64))) +svint64_t svldff1sb_gather_offset_s64(svbool_t, int8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_index_u32))) +svuint32_t svldff1sh_gather_index_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_index_u64))) +svuint64_t svldff1sh_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_index_s32))) +svint32_t svldff1sh_gather_index_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_index_s64))) +svint64_t svldff1sh_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_offset_u32))) +svuint32_t svldff1sh_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_offset_u64))) +svuint64_t svldff1sh_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_offset_s32))) +svint32_t svldff1sh_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_offset_s64))) +svint64_t svldff1sh_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_u32))) +svuint32_t svldff1sh_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_u64))) +svuint64_t svldff1sh_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32base_s32))) +svint32_t svldff1sh_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64base_s64))) +svint64_t svldff1sh_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32index_u32))) +svuint32_t svldff1sh_gather_index_u32(svbool_t, int16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32index_s32))) +svint32_t svldff1sh_gather_index_s32(svbool_t, int16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32index_u32))) +svuint32_t svldff1sh_gather_index_u32(svbool_t, int16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32index_s32))) +svint32_t svldff1sh_gather_index_s32(svbool_t, int16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64index_u64))) +svuint64_t svldff1sh_gather_index_u64(svbool_t, int16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64index_s64))) +svint64_t svldff1sh_gather_index_s64(svbool_t, int16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64index_u64))) +svuint64_t svldff1sh_gather_index_u64(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64index_s64))) +svint64_t svldff1sh_gather_index_s64(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32offset_u32))) +svuint32_t svldff1sh_gather_offset_u32(svbool_t, int16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s32offset_s32))) +svint32_t svldff1sh_gather_offset_s32(svbool_t, int16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32offset_u32))) +svuint32_t svldff1sh_gather_offset_u32(svbool_t, int16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u32offset_s32))) +svint32_t svldff1sh_gather_offset_s32(svbool_t, int16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64offset_u64))) +svuint64_t svldff1sh_gather_offset_u64(svbool_t, int16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_s64offset_s64))) +svint64_t svldff1sh_gather_offset_s64(svbool_t, int16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64offset_u64))) +svuint64_t svldff1sh_gather_offset_u64(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sh_gather_u64offset_s64))) +svint64_t svldff1sh_gather_offset_s64(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_index_u64))) +svuint64_t svldff1sw_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_index_s64))) +svint64_t svldff1sw_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_offset_u64))) +svuint64_t svldff1sw_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_offset_s64))) +svint64_t svldff1sw_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_u64))) +svuint64_t svldff1sw_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64base_s64))) +svint64_t svldff1sw_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64index_u64))) +svuint64_t svldff1sw_gather_index_u64(svbool_t, int32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64index_s64))) +svint64_t svldff1sw_gather_index_s64(svbool_t, int32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64index_u64))) +svuint64_t svldff1sw_gather_index_u64(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64index_s64))) +svint64_t svldff1sw_gather_index_s64(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64offset_u64))) +svuint64_t svldff1sw_gather_offset_u64(svbool_t, int32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_s64offset_s64))) +svint64_t svldff1sw_gather_offset_s64(svbool_t, int32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64offset_u64))) +svuint64_t svldff1sw_gather_offset_u64(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1sw_gather_u64offset_s64))) +svint64_t svldff1sw_gather_offset_s64(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_offset_u32))) +svuint32_t svldff1ub_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_offset_u64))) +svuint64_t svldff1ub_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_offset_s32))) +svint32_t svldff1ub_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_offset_s64))) +svint64_t svldff1ub_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_u32))) +svuint32_t svldff1ub_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_u64))) +svuint64_t svldff1ub_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32base_s32))) +svint32_t svldff1ub_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64base_s64))) +svint64_t svldff1ub_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s32offset_u32))) +svuint32_t svldff1ub_gather_offset_u32(svbool_t, uint8_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s32offset_s32))) +svint32_t svldff1ub_gather_offset_s32(svbool_t, uint8_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32offset_u32))) +svuint32_t svldff1ub_gather_offset_u32(svbool_t, uint8_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u32offset_s32))) +svint32_t svldff1ub_gather_offset_s32(svbool_t, uint8_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s64offset_u64))) +svuint64_t svldff1ub_gather_offset_u64(svbool_t, uint8_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_s64offset_s64))) +svint64_t svldff1ub_gather_offset_s64(svbool_t, uint8_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64offset_u64))) +svuint64_t svldff1ub_gather_offset_u64(svbool_t, uint8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1ub_gather_u64offset_s64))) +svint64_t svldff1ub_gather_offset_s64(svbool_t, uint8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_index_u32))) +svuint32_t svldff1uh_gather_index_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_index_u64))) +svuint64_t svldff1uh_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_index_s32))) +svint32_t svldff1uh_gather_index_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_index_s64))) +svint64_t svldff1uh_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_offset_u32))) +svuint32_t svldff1uh_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_offset_u64))) +svuint64_t svldff1uh_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_offset_s32))) +svint32_t svldff1uh_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_offset_s64))) +svint64_t svldff1uh_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_u32))) +svuint32_t svldff1uh_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_u64))) +svuint64_t svldff1uh_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32base_s32))) +svint32_t svldff1uh_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64base_s64))) +svint64_t svldff1uh_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32index_u32))) +svuint32_t svldff1uh_gather_index_u32(svbool_t, uint16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32index_s32))) +svint32_t svldff1uh_gather_index_s32(svbool_t, uint16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32index_u32))) +svuint32_t svldff1uh_gather_index_u32(svbool_t, uint16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32index_s32))) +svint32_t svldff1uh_gather_index_s32(svbool_t, uint16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64index_u64))) +svuint64_t svldff1uh_gather_index_u64(svbool_t, uint16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64index_s64))) +svint64_t svldff1uh_gather_index_s64(svbool_t, uint16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64index_u64))) +svuint64_t svldff1uh_gather_index_u64(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64index_s64))) +svint64_t svldff1uh_gather_index_s64(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32offset_u32))) +svuint32_t svldff1uh_gather_offset_u32(svbool_t, uint16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s32offset_s32))) +svint32_t svldff1uh_gather_offset_s32(svbool_t, uint16_t const *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32offset_u32))) +svuint32_t svldff1uh_gather_offset_u32(svbool_t, uint16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u32offset_s32))) +svint32_t svldff1uh_gather_offset_s32(svbool_t, uint16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64offset_u64))) +svuint64_t svldff1uh_gather_offset_u64(svbool_t, uint16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_s64offset_s64))) +svint64_t svldff1uh_gather_offset_s64(svbool_t, uint16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64offset_u64))) +svuint64_t svldff1uh_gather_offset_u64(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uh_gather_u64offset_s64))) +svint64_t svldff1uh_gather_offset_s64(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_index_u64))) +svuint64_t svldff1uw_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_index_s64))) +svint64_t svldff1uw_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_offset_u64))) +svuint64_t svldff1uw_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_offset_s64))) +svint64_t svldff1uw_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_u64))) +svuint64_t svldff1uw_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64base_s64))) +svint64_t svldff1uw_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64index_u64))) +svuint64_t svldff1uw_gather_index_u64(svbool_t, uint32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64index_s64))) +svint64_t svldff1uw_gather_index_s64(svbool_t, uint32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64index_u64))) +svuint64_t svldff1uw_gather_index_u64(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64index_s64))) +svint64_t svldff1uw_gather_index_s64(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64offset_u64))) +svuint64_t svldff1uw_gather_offset_u64(svbool_t, uint32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_s64offset_s64))) +svint64_t svldff1uw_gather_offset_s64(svbool_t, uint32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64offset_u64))) +svuint64_t svldff1uw_gather_offset_u64(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1uw_gather_u64offset_s64))) +svint64_t svldff1uw_gather_offset_s64(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u8))) +svuint8_t svldnf1(svbool_t, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u32))) +svuint32_t svldnf1(svbool_t, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u64))) +svuint64_t svldnf1(svbool_t, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_u16))) +svuint16_t svldnf1(svbool_t, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s8))) +svint8_t svldnf1(svbool_t, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f64))) +svfloat64_t svldnf1(svbool_t, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f32))) +svfloat32_t svldnf1(svbool_t, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_f16))) +svfloat16_t svldnf1(svbool_t, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s32))) +svint32_t svldnf1(svbool_t, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s64))) +svint64_t svldnf1(svbool_t, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_s16))) +svint16_t svldnf1(svbool_t, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u8))) +svuint8_t svldnf1_vnum(svbool_t, uint8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u32))) +svuint32_t svldnf1_vnum(svbool_t, uint32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u64))) +svuint64_t svldnf1_vnum(svbool_t, uint64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_u16))) +svuint16_t svldnf1_vnum(svbool_t, uint16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s8))) +svint8_t svldnf1_vnum(svbool_t, int8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f64))) +svfloat64_t svldnf1_vnum(svbool_t, float64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f32))) +svfloat32_t svldnf1_vnum(svbool_t, float32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_f16))) +svfloat16_t svldnf1_vnum(svbool_t, float16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s32))) +svint32_t svldnf1_vnum(svbool_t, int32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s64))) +svint64_t svldnf1_vnum(svbool_t, int64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_s16))) +svint16_t svldnf1_vnum(svbool_t, int16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32base))) +void svprfb_gather(svbool_t, svuint32_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64base))) +void svprfb_gather(svbool_t, svuint64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32base_offset))) +void svprfb_gather_offset(svbool_t, svuint32_t, int64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64base_offset))) +void svprfb_gather_offset(svbool_t, svuint64_t, int64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_s32offset))) +void svprfb_gather_offset(svbool_t, void const *, svint32_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u32offset))) +void svprfb_gather_offset(svbool_t, void const *, svuint32_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_s64offset))) +void svprfb_gather_offset(svbool_t, void const *, svint64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_gather_u64offset))) +void svprfb_gather_offset(svbool_t, void const *, svuint64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32base))) +void svprfd_gather(svbool_t, svuint32_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64base))) +void svprfd_gather(svbool_t, svuint64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32base_index))) +void svprfd_gather_index(svbool_t, svuint32_t, int64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64base_index))) +void svprfd_gather_index(svbool_t, svuint64_t, int64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_s32index))) +void svprfd_gather_index(svbool_t, void const *, svint32_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u32index))) +void svprfd_gather_index(svbool_t, void const *, svuint32_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_s64index))) +void svprfd_gather_index(svbool_t, void const *, svint64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_gather_u64index))) +void svprfd_gather_index(svbool_t, void const *, svuint64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32base))) +void svprfh_gather(svbool_t, svuint32_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64base))) +void svprfh_gather(svbool_t, svuint64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32base_index))) +void svprfh_gather_index(svbool_t, svuint32_t, int64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64base_index))) +void svprfh_gather_index(svbool_t, svuint64_t, int64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_s32index))) +void svprfh_gather_index(svbool_t, void const *, svint32_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u32index))) +void svprfh_gather_index(svbool_t, void const *, svuint32_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_s64index))) +void svprfh_gather_index(svbool_t, void const *, svint64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_gather_u64index))) +void svprfh_gather_index(svbool_t, void const *, svuint64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32base))) +void svprfw_gather(svbool_t, svuint32_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64base))) +void svprfw_gather(svbool_t, svuint64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32base_index))) +void svprfw_gather_index(svbool_t, svuint32_t, int64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64base_index))) +void svprfw_gather_index(svbool_t, svuint64_t, int64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_s32index))) +void svprfw_gather_index(svbool_t, void const *, svint32_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u32index))) +void svprfw_gather_index(svbool_t, void const *, svuint32_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_s64index))) +void svprfw_gather_index(svbool_t, void const *, svint64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_gather_u64index))) +void svprfw_gather_index(svbool_t, void const *, svuint64_t, enum svprfop); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_u32))) +void svst1_scatter_index(svbool_t, svuint32_t, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_u64))) +void svst1_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_f64))) +void svst1_scatter_index(svbool_t, svuint64_t, int64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_f32))) +void svst1_scatter_index(svbool_t, svuint32_t, int64_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_index_s32))) +void svst1_scatter_index(svbool_t, svuint32_t, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_index_s64))) +void svst1_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_u32))) +void svst1_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_u64))) +void svst1_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_f64))) +void svst1_scatter_offset(svbool_t, svuint64_t, int64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_f32))) +void svst1_scatter_offset(svbool_t, svuint32_t, int64_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_offset_s32))) +void svst1_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_offset_s64))) +void svst1_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_u32))) +void svst1_scatter(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_u64))) +void svst1_scatter(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_f64))) +void svst1_scatter(svbool_t, svuint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_f32))) +void svst1_scatter(svbool_t, svuint32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32base_s32))) +void svst1_scatter(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64base_s64))) +void svst1_scatter(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_u32))) +void svst1_scatter_index(svbool_t, uint32_t *, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_f32))) +void svst1_scatter_index(svbool_t, float32_t *, svint32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32index_s32))) +void svst1_scatter_index(svbool_t, int32_t *, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_u32))) +void svst1_scatter_index(svbool_t, uint32_t *, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_f32))) +void svst1_scatter_index(svbool_t, float32_t *, svuint32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32index_s32))) +void svst1_scatter_index(svbool_t, int32_t *, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_u64))) +void svst1_scatter_index(svbool_t, uint64_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_f64))) +void svst1_scatter_index(svbool_t, float64_t *, svint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64index_s64))) +void svst1_scatter_index(svbool_t, int64_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_u64))) +void svst1_scatter_index(svbool_t, uint64_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_f64))) +void svst1_scatter_index(svbool_t, float64_t *, svuint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64index_s64))) +void svst1_scatter_index(svbool_t, int64_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_u32))) +void svst1_scatter_offset(svbool_t, uint32_t *, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_f32))) +void svst1_scatter_offset(svbool_t, float32_t *, svint32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s32offset_s32))) +void svst1_scatter_offset(svbool_t, int32_t *, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_u32))) +void svst1_scatter_offset(svbool_t, uint32_t *, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_f32))) +void svst1_scatter_offset(svbool_t, float32_t *, svuint32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u32offset_s32))) +void svst1_scatter_offset(svbool_t, int32_t *, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_u64))) +void svst1_scatter_offset(svbool_t, uint64_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_f64))) +void svst1_scatter_offset(svbool_t, float64_t *, svint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_s64offset_s64))) +void svst1_scatter_offset(svbool_t, int64_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_u64))) +void svst1_scatter_offset(svbool_t, uint64_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_f64))) +void svst1_scatter_offset(svbool_t, float64_t *, svuint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_scatter_u64offset_s64))) +void svst1_scatter_offset(svbool_t, int64_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_offset_u32))) +void svst1b_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_offset_u64))) +void svst1b_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_offset_s32))) +void svst1b_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_offset_s64))) +void svst1b_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_u32))) +void svst1b_scatter(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_u64))) +void svst1b_scatter(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32base_s32))) +void svst1b_scatter(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64base_s64))) +void svst1b_scatter(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s32offset_s32))) +void svst1b_scatter_offset(svbool_t, int8_t *, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s32offset_u32))) +void svst1b_scatter_offset(svbool_t, uint8_t *, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32offset_s32))) +void svst1b_scatter_offset(svbool_t, int8_t *, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u32offset_u32))) +void svst1b_scatter_offset(svbool_t, uint8_t *, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s64offset_s64))) +void svst1b_scatter_offset(svbool_t, int8_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_s64offset_u64))) +void svst1b_scatter_offset(svbool_t, uint8_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64offset_s64))) +void svst1b_scatter_offset(svbool_t, int8_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_scatter_u64offset_u64))) +void svst1b_scatter_offset(svbool_t, uint8_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_index_u32))) +void svst1h_scatter_index(svbool_t, svuint32_t, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_index_u64))) +void svst1h_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_index_s32))) +void svst1h_scatter_index(svbool_t, svuint32_t, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_index_s64))) +void svst1h_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_offset_u32))) +void svst1h_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_offset_u64))) +void svst1h_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_offset_s32))) +void svst1h_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_offset_s64))) +void svst1h_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_u32))) +void svst1h_scatter(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_u64))) +void svst1h_scatter(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32base_s32))) +void svst1h_scatter(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64base_s64))) +void svst1h_scatter(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32index_s32))) +void svst1h_scatter_index(svbool_t, int16_t *, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32index_u32))) +void svst1h_scatter_index(svbool_t, uint16_t *, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32index_s32))) +void svst1h_scatter_index(svbool_t, int16_t *, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32index_u32))) +void svst1h_scatter_index(svbool_t, uint16_t *, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64index_s64))) +void svst1h_scatter_index(svbool_t, int16_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64index_u64))) +void svst1h_scatter_index(svbool_t, uint16_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64index_s64))) +void svst1h_scatter_index(svbool_t, int16_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64index_u64))) +void svst1h_scatter_index(svbool_t, uint16_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32offset_s32))) +void svst1h_scatter_offset(svbool_t, int16_t *, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s32offset_u32))) +void svst1h_scatter_offset(svbool_t, uint16_t *, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32offset_s32))) +void svst1h_scatter_offset(svbool_t, int16_t *, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u32offset_u32))) +void svst1h_scatter_offset(svbool_t, uint16_t *, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64offset_s64))) +void svst1h_scatter_offset(svbool_t, int16_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_s64offset_u64))) +void svst1h_scatter_offset(svbool_t, uint16_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64offset_s64))) +void svst1h_scatter_offset(svbool_t, int16_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_scatter_u64offset_u64))) +void svst1h_scatter_offset(svbool_t, uint16_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_index_u64))) +void svst1w_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_index_s64))) +void svst1w_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_offset_u64))) +void svst1w_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_offset_s64))) +void svst1w_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_u64))) +void svst1w_scatter(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64base_s64))) +void svst1w_scatter(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64index_s64))) +void svst1w_scatter_index(svbool_t, int32_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64index_u64))) +void svst1w_scatter_index(svbool_t, uint32_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64index_s64))) +void svst1w_scatter_index(svbool_t, int32_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64index_u64))) +void svst1w_scatter_index(svbool_t, uint32_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64offset_s64))) +void svst1w_scatter_offset(svbool_t, int32_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_s64offset_u64))) +void svst1w_scatter_offset(svbool_t, uint32_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64offset_s64))) +void svst1w_scatter_offset(svbool_t, int32_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_scatter_u64offset_u64))) +void svst1w_scatter_offset(svbool_t, uint32_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f64))) +svfloat64_t svtmad(svfloat64_t, svfloat64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f32))) +svfloat32_t svtmad(svfloat32_t, svfloat32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtmad_f16))) +svfloat16_t svtmad(svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f64))) +svfloat64_t svtsmul(svfloat64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f32))) +svfloat32_t svtsmul(svfloat32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtsmul_f16))) +svfloat16_t svtsmul(svfloat16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f64))) +svfloat64_t svtssel(svfloat64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f32))) +svfloat32_t svtssel(svfloat32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtssel_f16))) +svfloat16_t svtssel(svfloat16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmmla_f32))) +svfloat32_t svbfmmla_f32(svfloat32_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_bf16))) +svbfloat16_t svldff1_bf16(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_bf16))) +svbfloat16_t svldff1_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_bf16))) +svbfloat16_t svldnf1_bf16(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_bf16))) +svbfloat16_t svldnf1_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmmla_f32))) +svfloat32_t svbfmmla(svfloat32_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_bf16))) +svbfloat16_t svldff1(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldff1_vnum_bf16))) +svbfloat16_t svldff1_vnum(svbool_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_bf16))) +svbfloat16_t svldnf1(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnf1_vnum_bf16))) +svbfloat16_t svldnf1_vnum(svbool_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_bf16))) +svbfloat16_t svtrn1q_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_bf16))) +svbfloat16_t svtrn2q_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_bf16))) +svbfloat16_t svuzp1q_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_bf16))) +svbfloat16_t svuzp2q_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_bf16))) +svbfloat16_t svzip1q_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_bf16))) +svbfloat16_t svzip2q_bf16(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_bf16))) +svbfloat16_t svtrn1q(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_bf16))) +svbfloat16_t svtrn2q(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_bf16))) +svbfloat16_t svuzp1q(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_bf16))) +svbfloat16_t svuzp2q(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_bf16))) +svbfloat16_t svzip1q(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_bf16))) +svbfloat16_t svzip2q(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_n_f32))) +svfloat32_t svbfdot_n_f32(svfloat32_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_f32))) +svfloat32_t svbfdot_f32(svfloat32_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_lane_f32))) +svfloat32_t svbfdot_lane_f32(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_n_f32))) +svfloat32_t svbfmlalb_n_f32(svfloat32_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_f32))) +svfloat32_t svbfmlalb_f32(svfloat32_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_lane_f32))) +svfloat32_t svbfmlalb_lane_f32(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_n_f32))) +svfloat32_t svbfmlalt_n_f32(svfloat32_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_f32))) +svfloat32_t svbfmlalt_f32(svfloat32_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_lane_f32))) +svfloat32_t svbfmlalt_lane_f32(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_bf16))) +bfloat16_t svclasta_n_bf16(svbool_t, bfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_bf16))) +svbfloat16_t svclasta_bf16(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_bf16))) +bfloat16_t svclastb_n_bf16(svbool_t, bfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_bf16))) +svbfloat16_t svclastb_bf16(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_m))) +svuint16_t svcnt_bf16_m(svuint16_t, svbool_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_x))) +svuint16_t svcnt_bf16_x(svbool_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_z))) +svuint16_t svcnt_bf16_z(svbool_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_bf16))) +svbfloat16x2_t svcreate2_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_bf16))) +svbfloat16x3_t svcreate3_bf16(svbfloat16_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_bf16))) +svbfloat16x4_t svcreate4_bf16(svbfloat16_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_m))) +svbfloat16_t svcvt_bf16_f32_m(svbfloat16_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_x))) +svbfloat16_t svcvt_bf16_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_z))) +svbfloat16_t svcvt_bf16_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_bf16_f32_m))) +svbfloat16_t svcvtnt_bf16_f32_m(svbfloat16_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16))) +svbfloat16_t svdup_n_bf16(bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_m))) +svbfloat16_t svdup_n_bf16_m(svbfloat16_t, svbool_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_x))) +svbfloat16_t svdup_n_bf16_x(svbool_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_z))) +svbfloat16_t svdup_n_bf16_z(svbool_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_bf16))) +svbfloat16_t svdup_lane_bf16(svbfloat16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_bf16))) +svbfloat16_t svdupq_n_bf16(bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_bf16))) +svbfloat16_t svdupq_lane_bf16(svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_bf16))) +svbfloat16_t svext_bf16(svbfloat16_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_bf16))) +svbfloat16_t svget2_bf16(svbfloat16x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_bf16))) +svbfloat16_t svget3_bf16(svbfloat16x3_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_bf16))) +svbfloat16_t svget4_bf16(svbfloat16x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_bf16))) +svbfloat16_t svinsr_n_bf16(svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_bf16))) +bfloat16_t svlasta_bf16(svbool_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_bf16))) +bfloat16_t svlastb_bf16(svbool_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_bf16))) +svbfloat16_t svld1_bf16(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_bf16))) +svbfloat16_t svld1_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_bf16))) +svbfloat16_t svld1rq_bf16(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_bf16))) +svbfloat16x2_t svld2_bf16(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_bf16))) +svbfloat16x2_t svld2_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_bf16))) +svbfloat16x3_t svld3_bf16(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_bf16))) +svbfloat16x3_t svld3_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_bf16))) +svbfloat16x4_t svld4_bf16(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_bf16))) +svbfloat16x4_t svld4_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_bf16))) +svbfloat16_t svldnt1_bf16(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_bf16))) +svbfloat16_t svldnt1_vnum_bf16(svbool_t, bfloat16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_bf16))) +uint64_t svlen_bf16(svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_bf16))) +svbfloat16_t svrev_bf16(svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_bf16))) +svbfloat16_t svsel_bf16(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_bf16))) +svbfloat16x2_t svset2_bf16(svbfloat16x2_t, uint64_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_bf16))) +svbfloat16x3_t svset3_bf16(svbfloat16x3_t, uint64_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_bf16))) +svbfloat16x4_t svset4_bf16(svbfloat16x4_t, uint64_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_bf16))) +svbfloat16_t svsplice_bf16(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_bf16))) +void svst1_bf16(svbool_t, bfloat16_t *, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_bf16))) +void svst1_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_bf16))) +void svst2_bf16(svbool_t, bfloat16_t *, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_bf16))) +void svst2_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_bf16))) +void svst3_bf16(svbool_t, bfloat16_t *, svbfloat16x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_bf16))) +void svst3_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_bf16))) +void svst4_bf16(svbool_t, bfloat16_t *, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_bf16))) +void svst4_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_bf16))) +void svstnt1_bf16(svbool_t, bfloat16_t *, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_bf16))) +void svstnt1_vnum_bf16(svbool_t, bfloat16_t *, int64_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_bf16))) +svbfloat16_t svtbl_bf16(svbfloat16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_bf16))) +svbfloat16_t svtrn1_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_bf16))) +svbfloat16_t svtrn2_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_bf16))) +svbfloat16x2_t svundef2_bf16(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_bf16))) +svbfloat16x3_t svundef3_bf16(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_bf16))) +svbfloat16x4_t svundef4_bf16(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_bf16))) +svbfloat16_t svundef_bf16(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_bf16))) +svbfloat16_t svuzp1_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_bf16))) +svbfloat16_t svuzp2_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_bf16))) +svbfloat16_t svzip1_bf16(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_bf16))) +svbfloat16_t svzip2_bf16(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_n_f32))) +svfloat32_t svbfdot(svfloat32_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_f32))) +svfloat32_t svbfdot(svfloat32_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfdot_lane_f32))) +svfloat32_t svbfdot_lane(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_n_f32))) +svfloat32_t svbfmlalb(svfloat32_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_f32))) +svfloat32_t svbfmlalb(svfloat32_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalb_lane_f32))) +svfloat32_t svbfmlalb_lane(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_n_f32))) +svfloat32_t svbfmlalt(svfloat32_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_f32))) +svfloat32_t svbfmlalt(svfloat32_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlalt_lane_f32))) +svfloat32_t svbfmlalt_lane(svfloat32_t, svbfloat16_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_bf16))) +bfloat16_t svclasta(svbool_t, bfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_bf16))) +svbfloat16_t svclasta(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_bf16))) +bfloat16_t svclastb(svbool_t, bfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_bf16))) +svbfloat16_t svclastb(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_m))) +svuint16_t svcnt_m(svuint16_t, svbool_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_x))) +svuint16_t svcnt_x(svbool_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_bf16_z))) +svuint16_t svcnt_z(svbool_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_bf16))) +svbfloat16x2_t svcreate2(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_bf16))) +svbfloat16x3_t svcreate3(svbfloat16_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_bf16))) +svbfloat16x4_t svcreate4(svbfloat16_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_m))) +svbfloat16_t svcvt_bf16_m(svbfloat16_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_x))) +svbfloat16_t svcvt_bf16_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_bf16_f32_z))) +svbfloat16_t svcvt_bf16_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_bf16_f32_m))) +svbfloat16_t svcvtnt_bf16_m(svbfloat16_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16))) +svbfloat16_t svdup_bf16(bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_m))) +svbfloat16_t svdup_bf16_m(svbfloat16_t, svbool_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_x))) +svbfloat16_t svdup_bf16_x(svbool_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_bf16_z))) +svbfloat16_t svdup_bf16_z(svbool_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_bf16))) +svbfloat16_t svdup_lane(svbfloat16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_bf16))) +svbfloat16_t svdupq_bf16(bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_bf16))) +svbfloat16_t svdupq_lane(svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_bf16))) +svbfloat16_t svext(svbfloat16_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_bf16))) +svbfloat16_t svget2(svbfloat16x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_bf16))) +svbfloat16_t svget3(svbfloat16x3_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_bf16))) +svbfloat16_t svget4(svbfloat16x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_bf16))) +svbfloat16_t svinsr(svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_bf16))) +bfloat16_t svlasta(svbool_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_bf16))) +bfloat16_t svlastb(svbool_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_bf16))) +svbfloat16_t svld1(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_bf16))) +svbfloat16_t svld1_vnum(svbool_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_bf16))) +svbfloat16_t svld1rq(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_bf16))) +svbfloat16x2_t svld2(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_bf16))) +svbfloat16x2_t svld2_vnum(svbool_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_bf16))) +svbfloat16x3_t svld3(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_bf16))) +svbfloat16x3_t svld3_vnum(svbool_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_bf16))) +svbfloat16x4_t svld4(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_bf16))) +svbfloat16x4_t svld4_vnum(svbool_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_bf16))) +svbfloat16_t svldnt1(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_bf16))) +svbfloat16_t svldnt1_vnum(svbool_t, bfloat16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_bf16))) +uint64_t svlen(svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_bf16))) +svbfloat16_t svrev(svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_bf16))) +svbfloat16_t svsel(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_bf16))) +svbfloat16x2_t svset2(svbfloat16x2_t, uint64_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_bf16))) +svbfloat16x3_t svset3(svbfloat16x3_t, uint64_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_bf16))) +svbfloat16x4_t svset4(svbfloat16x4_t, uint64_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_bf16))) +svbfloat16_t svsplice(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_bf16))) +void svst1(svbool_t, bfloat16_t *, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_bf16))) +void svst1_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_bf16))) +void svst2(svbool_t, bfloat16_t *, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_bf16))) +void svst2_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_bf16))) +void svst3(svbool_t, bfloat16_t *, svbfloat16x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_bf16))) +void svst3_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_bf16))) +void svst4(svbool_t, bfloat16_t *, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_bf16))) +void svst4_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_bf16))) +void svstnt1(svbool_t, bfloat16_t *, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_bf16))) +void svstnt1_vnum(svbool_t, bfloat16_t *, int64_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_bf16))) +svbfloat16_t svtbl(svbfloat16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_bf16))) +svbfloat16_t svtrn1(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_bf16))) +svbfloat16_t svtrn2(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_bf16))) +svbfloat16_t svuzp1(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_bf16))) +svbfloat16_t svuzp2(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_bf16))) +svbfloat16_t svzip1(svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_bf16))) +svbfloat16_t svzip2(svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_f32))) +svfloat32_t svmmla_f32(svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_f32))) +svfloat32_t svmmla(svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u8))) +svuint8_t svld1ro_u8(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u32))) +svuint32_t svld1ro_u32(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u64))) +svuint64_t svld1ro_u64(svbool_t, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u16))) +svuint16_t svld1ro_u16(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s8))) +svint8_t svld1ro_s8(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f64))) +svfloat64_t svld1ro_f64(svbool_t, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f32))) +svfloat32_t svld1ro_f32(svbool_t, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f16))) +svfloat16_t svld1ro_f16(svbool_t, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s32))) +svint32_t svld1ro_s32(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s64))) +svint64_t svld1ro_s64(svbool_t, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s16))) +svint16_t svld1ro_s16(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_f64))) +svfloat64_t svmmla_f64(svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u8))) +svuint8_t svtrn1q_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u32))) +svuint32_t svtrn1q_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u64))) +svuint64_t svtrn1q_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u16))) +svuint16_t svtrn1q_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s8))) +svint8_t svtrn1q_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f64))) +svfloat64_t svtrn1q_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f32))) +svfloat32_t svtrn1q_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f16))) +svfloat16_t svtrn1q_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s32))) +svint32_t svtrn1q_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s64))) +svint64_t svtrn1q_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s16))) +svint16_t svtrn1q_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u8))) +svuint8_t svtrn2q_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u32))) +svuint32_t svtrn2q_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u64))) +svuint64_t svtrn2q_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u16))) +svuint16_t svtrn2q_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s8))) +svint8_t svtrn2q_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f64))) +svfloat64_t svtrn2q_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f32))) +svfloat32_t svtrn2q_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f16))) +svfloat16_t svtrn2q_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s32))) +svint32_t svtrn2q_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s64))) +svint64_t svtrn2q_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s16))) +svint16_t svtrn2q_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u8))) +svuint8_t svuzp1q_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u32))) +svuint32_t svuzp1q_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u64))) +svuint64_t svuzp1q_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u16))) +svuint16_t svuzp1q_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s8))) +svint8_t svuzp1q_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f64))) +svfloat64_t svuzp1q_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f32))) +svfloat32_t svuzp1q_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f16))) +svfloat16_t svuzp1q_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s32))) +svint32_t svuzp1q_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s64))) +svint64_t svuzp1q_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s16))) +svint16_t svuzp1q_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u8))) +svuint8_t svuzp2q_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u32))) +svuint32_t svuzp2q_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u64))) +svuint64_t svuzp2q_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u16))) +svuint16_t svuzp2q_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s8))) +svint8_t svuzp2q_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f64))) +svfloat64_t svuzp2q_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f32))) +svfloat32_t svuzp2q_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f16))) +svfloat16_t svuzp2q_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s32))) +svint32_t svuzp2q_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s64))) +svint64_t svuzp2q_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s16))) +svint16_t svuzp2q_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u8))) +svuint8_t svzip1q_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u32))) +svuint32_t svzip1q_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u64))) +svuint64_t svzip1q_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u16))) +svuint16_t svzip1q_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s8))) +svint8_t svzip1q_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f64))) +svfloat64_t svzip1q_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f32))) +svfloat32_t svzip1q_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f16))) +svfloat16_t svzip1q_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s32))) +svint32_t svzip1q_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s64))) +svint64_t svzip1q_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s16))) +svint16_t svzip1q_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u8))) +svuint8_t svzip2q_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u32))) +svuint32_t svzip2q_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u64))) +svuint64_t svzip2q_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u16))) +svuint16_t svzip2q_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s8))) +svint8_t svzip2q_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f64))) +svfloat64_t svzip2q_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f32))) +svfloat32_t svzip2q_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f16))) +svfloat16_t svzip2q_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s32))) +svint32_t svzip2q_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s64))) +svint64_t svzip2q_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s16))) +svint16_t svzip2q_s16(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u8))) +svuint8_t svld1ro(svbool_t, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u32))) +svuint32_t svld1ro(svbool_t, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u64))) +svuint64_t svld1ro(svbool_t, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_u16))) +svuint16_t svld1ro(svbool_t, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s8))) +svint8_t svld1ro(svbool_t, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f64))) +svfloat64_t svld1ro(svbool_t, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f32))) +svfloat32_t svld1ro(svbool_t, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_f16))) +svfloat16_t svld1ro(svbool_t, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s32))) +svint32_t svld1ro(svbool_t, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s64))) +svint64_t svld1ro(svbool_t, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_s16))) +svint16_t svld1ro(svbool_t, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_f64))) +svfloat64_t svmmla(svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u8))) +svuint8_t svtrn1q(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u32))) +svuint32_t svtrn1q(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u64))) +svuint64_t svtrn1q(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_u16))) +svuint16_t svtrn1q(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s8))) +svint8_t svtrn1q(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f64))) +svfloat64_t svtrn1q(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f32))) +svfloat32_t svtrn1q(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_f16))) +svfloat16_t svtrn1q(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s32))) +svint32_t svtrn1q(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s64))) +svint64_t svtrn1q(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1q_s16))) +svint16_t svtrn1q(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u8))) +svuint8_t svtrn2q(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u32))) +svuint32_t svtrn2q(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u64))) +svuint64_t svtrn2q(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_u16))) +svuint16_t svtrn2q(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s8))) +svint8_t svtrn2q(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f64))) +svfloat64_t svtrn2q(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f32))) +svfloat32_t svtrn2q(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_f16))) +svfloat16_t svtrn2q(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s32))) +svint32_t svtrn2q(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s64))) +svint64_t svtrn2q(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2q_s16))) +svint16_t svtrn2q(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u8))) +svuint8_t svuzp1q(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u32))) +svuint32_t svuzp1q(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u64))) +svuint64_t svuzp1q(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_u16))) +svuint16_t svuzp1q(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s8))) +svint8_t svuzp1q(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f64))) +svfloat64_t svuzp1q(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f32))) +svfloat32_t svuzp1q(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_f16))) +svfloat16_t svuzp1q(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s32))) +svint32_t svuzp1q(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s64))) +svint64_t svuzp1q(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1q_s16))) +svint16_t svuzp1q(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u8))) +svuint8_t svuzp2q(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u32))) +svuint32_t svuzp2q(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u64))) +svuint64_t svuzp2q(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_u16))) +svuint16_t svuzp2q(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s8))) +svint8_t svuzp2q(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f64))) +svfloat64_t svuzp2q(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f32))) +svfloat32_t svuzp2q(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_f16))) +svfloat16_t svuzp2q(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s32))) +svint32_t svuzp2q(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s64))) +svint64_t svuzp2q(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2q_s16))) +svint16_t svuzp2q(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u8))) +svuint8_t svzip1q(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u32))) +svuint32_t svzip1q(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u64))) +svuint64_t svzip1q(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_u16))) +svuint16_t svzip1q(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s8))) +svint8_t svzip1q(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f64))) +svfloat64_t svzip1q(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f32))) +svfloat32_t svzip1q(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_f16))) +svfloat16_t svzip1q(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s32))) +svint32_t svzip1q(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s64))) +svint64_t svzip1q(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1q_s16))) +svint16_t svzip1q(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u8))) +svuint8_t svzip2q(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u32))) +svuint32_t svzip2q(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u64))) +svuint64_t svzip2q(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_u16))) +svuint16_t svzip2q(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s8))) +svint8_t svzip2q(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f64))) +svfloat64_t svzip2q(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f32))) +svfloat32_t svzip2q(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_f16))) +svfloat16_t svzip2q(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s32))) +svint32_t svzip2q(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s64))) +svint64_t svzip2q(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2q_s16))) +svint16_t svzip2q(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_bf16))) +svbfloat16_t svld1ro_bf16(svbool_t, bfloat16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ro_bf16))) +svbfloat16_t svld1ro(svbool_t, bfloat16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_s32))) +svint32_t svmmla_s32(svint32_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_u32))) +svuint32_t svmmla_u32(svuint32_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusmmla_s32))) +svint32_t svusmmla_s32(svint32_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_s32))) +svint32_t svmmla(svint32_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmmla_u32))) +svuint32_t svmmla(svuint32_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusmmla_s32))) +svint32_t svusmmla(svint32_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_n_s32))) +svint32_t svsudot_n_s32(svint32_t, svint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_s32))) +svint32_t svsudot_s32(svint32_t, svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_lane_s32))) +svint32_t svsudot_lane_s32(svint32_t, svint8_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_n_s32))) +svint32_t svusdot_n_s32(svint32_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_s32))) +svint32_t svusdot_s32(svint32_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_lane_s32))) +svint32_t svusdot_lane_s32(svint32_t, svuint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_n_s32))) +svint32_t svsudot(svint32_t, svint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_s32))) +svint32_t svsudot(svint32_t, svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsudot_lane_s32))) +svint32_t svsudot_lane(svint32_t, svint8_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_n_s32))) +svint32_t svusdot(svint32_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_s32))) +svint32_t svusdot(svint32_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svusdot_lane_s32))) +svint32_t svusdot_lane(svint32_t, svuint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_u32_z))) +svuint32_t svhistcnt_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_u64_z))) +svuint64_t svhistcnt_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_s32_z))) +svuint32_t svhistcnt_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_s64_z))) +svuint64_t svhistcnt_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistseg_u8))) +svuint8_t svhistseg_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistseg_s8))) +svuint8_t svhistseg_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_u32))) +svuint32_t svldnt1_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_u64))) +svuint64_t svldnt1_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_f64))) +svfloat64_t svldnt1_gather_u64base_index_f64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_f32))) +svfloat32_t svldnt1_gather_u32base_index_f32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_s32))) +svint32_t svldnt1_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_s64))) +svint64_t svldnt1_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_u32))) +svuint32_t svldnt1_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_u64))) +svuint64_t svldnt1_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_f64))) +svfloat64_t svldnt1_gather_u64base_offset_f64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_f32))) +svfloat32_t svldnt1_gather_u32base_offset_f32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_s32))) +svint32_t svldnt1_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_s64))) +svint64_t svldnt1_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_u32))) +svuint32_t svldnt1_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_u64))) +svuint64_t svldnt1_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_f64))) +svfloat64_t svldnt1_gather_u64base_f64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_f32))) +svfloat32_t svldnt1_gather_u32base_f32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_s32))) +svint32_t svldnt1_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_s64))) +svint64_t svldnt1_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_u64))) +svuint64_t svldnt1_gather_s64index_u64(svbool_t, uint64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_f64))) +svfloat64_t svldnt1_gather_s64index_f64(svbool_t, float64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_s64))) +svint64_t svldnt1_gather_s64index_s64(svbool_t, int64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_u64))) +svuint64_t svldnt1_gather_u64index_u64(svbool_t, uint64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_f64))) +svfloat64_t svldnt1_gather_u64index_f64(svbool_t, float64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_s64))) +svint64_t svldnt1_gather_u64index_s64(svbool_t, int64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_u32))) +svuint32_t svldnt1_gather_u32offset_u32(svbool_t, uint32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_f32))) +svfloat32_t svldnt1_gather_u32offset_f32(svbool_t, float32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_s32))) +svint32_t svldnt1_gather_u32offset_s32(svbool_t, int32_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_u64))) +svuint64_t svldnt1_gather_s64offset_u64(svbool_t, uint64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_f64))) +svfloat64_t svldnt1_gather_s64offset_f64(svbool_t, float64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_s64))) +svint64_t svldnt1_gather_s64offset_s64(svbool_t, int64_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_u64))) +svuint64_t svldnt1_gather_u64offset_u64(svbool_t, uint64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_f64))) +svfloat64_t svldnt1_gather_u64offset_f64(svbool_t, float64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_s64))) +svint64_t svldnt1_gather_u64offset_s64(svbool_t, int64_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_offset_u32))) +svuint32_t svldnt1sb_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_offset_u64))) +svuint64_t svldnt1sb_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_offset_s32))) +svint32_t svldnt1sb_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_offset_s64))) +svint64_t svldnt1sb_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_u32))) +svuint32_t svldnt1sb_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_u64))) +svuint64_t svldnt1sb_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_s32))) +svint32_t svldnt1sb_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_s64))) +svint64_t svldnt1sb_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32offset_u32))) +svuint32_t svldnt1sb_gather_u32offset_u32(svbool_t, int8_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32offset_s32))) +svint32_t svldnt1sb_gather_u32offset_s32(svbool_t, int8_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_s64offset_u64))) +svuint64_t svldnt1sb_gather_s64offset_u64(svbool_t, int8_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_s64offset_s64))) +svint64_t svldnt1sb_gather_s64offset_s64(svbool_t, int8_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64offset_u64))) +svuint64_t svldnt1sb_gather_u64offset_u64(svbool_t, int8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64offset_s64))) +svint64_t svldnt1sb_gather_u64offset_s64(svbool_t, int8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_index_u32))) +svuint32_t svldnt1sh_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_index_u64))) +svuint64_t svldnt1sh_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_index_s32))) +svint32_t svldnt1sh_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_index_s64))) +svint64_t svldnt1sh_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_offset_u32))) +svuint32_t svldnt1sh_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_offset_u64))) +svuint64_t svldnt1sh_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_offset_s32))) +svint32_t svldnt1sh_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_offset_s64))) +svint64_t svldnt1sh_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_u32))) +svuint32_t svldnt1sh_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_u64))) +svuint64_t svldnt1sh_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_s32))) +svint32_t svldnt1sh_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_s64))) +svint64_t svldnt1sh_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64index_u64))) +svuint64_t svldnt1sh_gather_s64index_u64(svbool_t, int16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64index_s64))) +svint64_t svldnt1sh_gather_s64index_s64(svbool_t, int16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64index_u64))) +svuint64_t svldnt1sh_gather_u64index_u64(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64index_s64))) +svint64_t svldnt1sh_gather_u64index_s64(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32offset_u32))) +svuint32_t svldnt1sh_gather_u32offset_u32(svbool_t, int16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32offset_s32))) +svint32_t svldnt1sh_gather_u32offset_s32(svbool_t, int16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64offset_u64))) +svuint64_t svldnt1sh_gather_s64offset_u64(svbool_t, int16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64offset_s64))) +svint64_t svldnt1sh_gather_s64offset_s64(svbool_t, int16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64offset_u64))) +svuint64_t svldnt1sh_gather_u64offset_u64(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64offset_s64))) +svint64_t svldnt1sh_gather_u64offset_s64(svbool_t, int16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_index_u64))) +svuint64_t svldnt1sw_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_index_s64))) +svint64_t svldnt1sw_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_offset_u64))) +svuint64_t svldnt1sw_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_offset_s64))) +svint64_t svldnt1sw_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_u64))) +svuint64_t svldnt1sw_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_s64))) +svint64_t svldnt1sw_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64index_u64))) +svuint64_t svldnt1sw_gather_s64index_u64(svbool_t, int32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64index_s64))) +svint64_t svldnt1sw_gather_s64index_s64(svbool_t, int32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64index_u64))) +svuint64_t svldnt1sw_gather_u64index_u64(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64index_s64))) +svint64_t svldnt1sw_gather_u64index_s64(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64offset_u64))) +svuint64_t svldnt1sw_gather_s64offset_u64(svbool_t, int32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64offset_s64))) +svint64_t svldnt1sw_gather_s64offset_s64(svbool_t, int32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64offset_u64))) +svuint64_t svldnt1sw_gather_u64offset_u64(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64offset_s64))) +svint64_t svldnt1sw_gather_u64offset_s64(svbool_t, int32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_offset_u32))) +svuint32_t svldnt1ub_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_offset_u64))) +svuint64_t svldnt1ub_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_offset_s32))) +svint32_t svldnt1ub_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_offset_s64))) +svint64_t svldnt1ub_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_u32))) +svuint32_t svldnt1ub_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_u64))) +svuint64_t svldnt1ub_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_s32))) +svint32_t svldnt1ub_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_s64))) +svint64_t svldnt1ub_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32offset_u32))) +svuint32_t svldnt1ub_gather_u32offset_u32(svbool_t, uint8_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32offset_s32))) +svint32_t svldnt1ub_gather_u32offset_s32(svbool_t, uint8_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_s64offset_u64))) +svuint64_t svldnt1ub_gather_s64offset_u64(svbool_t, uint8_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_s64offset_s64))) +svint64_t svldnt1ub_gather_s64offset_s64(svbool_t, uint8_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64offset_u64))) +svuint64_t svldnt1ub_gather_u64offset_u64(svbool_t, uint8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64offset_s64))) +svint64_t svldnt1ub_gather_u64offset_s64(svbool_t, uint8_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_index_u32))) +svuint32_t svldnt1uh_gather_u32base_index_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_index_u64))) +svuint64_t svldnt1uh_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_index_s32))) +svint32_t svldnt1uh_gather_u32base_index_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_index_s64))) +svint64_t svldnt1uh_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_offset_u32))) +svuint32_t svldnt1uh_gather_u32base_offset_u32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_offset_u64))) +svuint64_t svldnt1uh_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_offset_s32))) +svint32_t svldnt1uh_gather_u32base_offset_s32(svbool_t, svuint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_offset_s64))) +svint64_t svldnt1uh_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_u32))) +svuint32_t svldnt1uh_gather_u32base_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_u64))) +svuint64_t svldnt1uh_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_s32))) +svint32_t svldnt1uh_gather_u32base_s32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_s64))) +svint64_t svldnt1uh_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64index_u64))) +svuint64_t svldnt1uh_gather_s64index_u64(svbool_t, uint16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64index_s64))) +svint64_t svldnt1uh_gather_s64index_s64(svbool_t, uint16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64index_u64))) +svuint64_t svldnt1uh_gather_u64index_u64(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64index_s64))) +svint64_t svldnt1uh_gather_u64index_s64(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32offset_u32))) +svuint32_t svldnt1uh_gather_u32offset_u32(svbool_t, uint16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32offset_s32))) +svint32_t svldnt1uh_gather_u32offset_s32(svbool_t, uint16_t const *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64offset_u64))) +svuint64_t svldnt1uh_gather_s64offset_u64(svbool_t, uint16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64offset_s64))) +svint64_t svldnt1uh_gather_s64offset_s64(svbool_t, uint16_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64offset_u64))) +svuint64_t svldnt1uh_gather_u64offset_u64(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64offset_s64))) +svint64_t svldnt1uh_gather_u64offset_s64(svbool_t, uint16_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_index_u64))) +svuint64_t svldnt1uw_gather_u64base_index_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_index_s64))) +svint64_t svldnt1uw_gather_u64base_index_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_offset_u64))) +svuint64_t svldnt1uw_gather_u64base_offset_u64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_offset_s64))) +svint64_t svldnt1uw_gather_u64base_offset_s64(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_u64))) +svuint64_t svldnt1uw_gather_u64base_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_s64))) +svint64_t svldnt1uw_gather_u64base_s64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64index_u64))) +svuint64_t svldnt1uw_gather_s64index_u64(svbool_t, uint32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64index_s64))) +svint64_t svldnt1uw_gather_s64index_s64(svbool_t, uint32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64index_u64))) +svuint64_t svldnt1uw_gather_u64index_u64(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64index_s64))) +svint64_t svldnt1uw_gather_u64index_s64(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64offset_u64))) +svuint64_t svldnt1uw_gather_s64offset_u64(svbool_t, uint32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64offset_s64))) +svint64_t svldnt1uw_gather_s64offset_s64(svbool_t, uint32_t const *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64offset_u64))) +svuint64_t svldnt1uw_gather_u64offset_u64(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64offset_s64))) +svint64_t svldnt1uw_gather_u64offset_s64(svbool_t, uint32_t const *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_u8))) +svbool_t svmatch_u8(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_u16))) +svbool_t svmatch_u16(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_s8))) +svbool_t svmatch_s8(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_s16))) +svbool_t svmatch_s16(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_u8))) +svbool_t svnmatch_u8(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_u16))) +svbool_t svnmatch_u16(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_s8))) +svbool_t svnmatch_s8(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_s16))) +svbool_t svnmatch_s16(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_u32))) +void svstnt1_scatter_u32base_index_u32(svbool_t, svuint32_t, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_u64))) +void svstnt1_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_f64))) +void svstnt1_scatter_u64base_index_f64(svbool_t, svuint64_t, int64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_f32))) +void svstnt1_scatter_u32base_index_f32(svbool_t, svuint32_t, int64_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_s32))) +void svstnt1_scatter_u32base_index_s32(svbool_t, svuint32_t, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_s64))) +void svstnt1_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_u32))) +void svstnt1_scatter_u32base_offset_u32(svbool_t, svuint32_t, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_u64))) +void svstnt1_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_f64))) +void svstnt1_scatter_u64base_offset_f64(svbool_t, svuint64_t, int64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_f32))) +void svstnt1_scatter_u32base_offset_f32(svbool_t, svuint32_t, int64_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_s32))) +void svstnt1_scatter_u32base_offset_s32(svbool_t, svuint32_t, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_s64))) +void svstnt1_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_u32))) +void svstnt1_scatter_u32base_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_u64))) +void svstnt1_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_f64))) +void svstnt1_scatter_u64base_f64(svbool_t, svuint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_f32))) +void svstnt1_scatter_u32base_f32(svbool_t, svuint32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_s32))) +void svstnt1_scatter_u32base_s32(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_s64))) +void svstnt1_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_u64))) +void svstnt1_scatter_s64index_u64(svbool_t, uint64_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_f64))) +void svstnt1_scatter_s64index_f64(svbool_t, float64_t *, svint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_s64))) +void svstnt1_scatter_s64index_s64(svbool_t, int64_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_u64))) +void svstnt1_scatter_u64index_u64(svbool_t, uint64_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_f64))) +void svstnt1_scatter_u64index_f64(svbool_t, float64_t *, svuint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_s64))) +void svstnt1_scatter_u64index_s64(svbool_t, int64_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_u32))) +void svstnt1_scatter_u32offset_u32(svbool_t, uint32_t *, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_f32))) +void svstnt1_scatter_u32offset_f32(svbool_t, float32_t *, svuint32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_s32))) +void svstnt1_scatter_u32offset_s32(svbool_t, int32_t *, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_u64))) +void svstnt1_scatter_s64offset_u64(svbool_t, uint64_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_f64))) +void svstnt1_scatter_s64offset_f64(svbool_t, float64_t *, svint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_s64))) +void svstnt1_scatter_s64offset_s64(svbool_t, int64_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_u64))) +void svstnt1_scatter_u64offset_u64(svbool_t, uint64_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_f64))) +void svstnt1_scatter_u64offset_f64(svbool_t, float64_t *, svuint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_s64))) +void svstnt1_scatter_u64offset_s64(svbool_t, int64_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_offset_u32))) +void svstnt1b_scatter_u32base_offset_u32(svbool_t, svuint32_t, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_offset_u64))) +void svstnt1b_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_offset_s32))) +void svstnt1b_scatter_u32base_offset_s32(svbool_t, svuint32_t, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_offset_s64))) +void svstnt1b_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_u32))) +void svstnt1b_scatter_u32base_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_u64))) +void svstnt1b_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_s32))) +void svstnt1b_scatter_u32base_s32(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_s64))) +void svstnt1b_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32offset_s32))) +void svstnt1b_scatter_u32offset_s32(svbool_t, int8_t *, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32offset_u32))) +void svstnt1b_scatter_u32offset_u32(svbool_t, uint8_t *, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_s64offset_s64))) +void svstnt1b_scatter_s64offset_s64(svbool_t, int8_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_s64offset_u64))) +void svstnt1b_scatter_s64offset_u64(svbool_t, uint8_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64offset_s64))) +void svstnt1b_scatter_u64offset_s64(svbool_t, int8_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64offset_u64))) +void svstnt1b_scatter_u64offset_u64(svbool_t, uint8_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_index_u32))) +void svstnt1h_scatter_u32base_index_u32(svbool_t, svuint32_t, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_index_u64))) +void svstnt1h_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_index_s32))) +void svstnt1h_scatter_u32base_index_s32(svbool_t, svuint32_t, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_index_s64))) +void svstnt1h_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_offset_u32))) +void svstnt1h_scatter_u32base_offset_u32(svbool_t, svuint32_t, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_offset_u64))) +void svstnt1h_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_offset_s32))) +void svstnt1h_scatter_u32base_offset_s32(svbool_t, svuint32_t, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_offset_s64))) +void svstnt1h_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_u32))) +void svstnt1h_scatter_u32base_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_u64))) +void svstnt1h_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_s32))) +void svstnt1h_scatter_u32base_s32(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_s64))) +void svstnt1h_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64index_s64))) +void svstnt1h_scatter_s64index_s64(svbool_t, int16_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64index_u64))) +void svstnt1h_scatter_s64index_u64(svbool_t, uint16_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64index_s64))) +void svstnt1h_scatter_u64index_s64(svbool_t, int16_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64index_u64))) +void svstnt1h_scatter_u64index_u64(svbool_t, uint16_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32offset_s32))) +void svstnt1h_scatter_u32offset_s32(svbool_t, int16_t *, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32offset_u32))) +void svstnt1h_scatter_u32offset_u32(svbool_t, uint16_t *, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64offset_s64))) +void svstnt1h_scatter_s64offset_s64(svbool_t, int16_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64offset_u64))) +void svstnt1h_scatter_s64offset_u64(svbool_t, uint16_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64offset_s64))) +void svstnt1h_scatter_u64offset_s64(svbool_t, int16_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64offset_u64))) +void svstnt1h_scatter_u64offset_u64(svbool_t, uint16_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_index_u64))) +void svstnt1w_scatter_u64base_index_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_index_s64))) +void svstnt1w_scatter_u64base_index_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_offset_u64))) +void svstnt1w_scatter_u64base_offset_u64(svbool_t, svuint64_t, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_offset_s64))) +void svstnt1w_scatter_u64base_offset_s64(svbool_t, svuint64_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_u64))) +void svstnt1w_scatter_u64base_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_s64))) +void svstnt1w_scatter_u64base_s64(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64index_s64))) +void svstnt1w_scatter_s64index_s64(svbool_t, int32_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64index_u64))) +void svstnt1w_scatter_s64index_u64(svbool_t, uint32_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64index_s64))) +void svstnt1w_scatter_u64index_s64(svbool_t, int32_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64index_u64))) +void svstnt1w_scatter_u64index_u64(svbool_t, uint32_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64offset_s64))) +void svstnt1w_scatter_s64offset_s64(svbool_t, int32_t *, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64offset_u64))) +void svstnt1w_scatter_s64offset_u64(svbool_t, uint32_t *, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64offset_s64))) +void svstnt1w_scatter_u64offset_s64(svbool_t, int32_t *, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64offset_u64))) +void svstnt1w_scatter_u64offset_u64(svbool_t, uint32_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_u32_z))) +svuint32_t svhistcnt_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_u64_z))) +svuint64_t svhistcnt_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_s32_z))) +svuint32_t svhistcnt_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistcnt_s64_z))) +svuint64_t svhistcnt_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistseg_u8))) +svuint8_t svhistseg(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhistseg_s8))) +svuint8_t svhistseg(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_u32))) +svuint32_t svldnt1_gather_index_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_u64))) +svuint64_t svldnt1_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_f64))) +svfloat64_t svldnt1_gather_index_f64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_f32))) +svfloat32_t svldnt1_gather_index_f32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_index_s32))) +svint32_t svldnt1_gather_index_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_index_s64))) +svint64_t svldnt1_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_u32))) +svuint32_t svldnt1_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_u64))) +svuint64_t svldnt1_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_f64))) +svfloat64_t svldnt1_gather_offset_f64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_f32))) +svfloat32_t svldnt1_gather_offset_f32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_offset_s32))) +svint32_t svldnt1_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_offset_s64))) +svint64_t svldnt1_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_u32))) +svuint32_t svldnt1_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_u64))) +svuint64_t svldnt1_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_f64))) +svfloat64_t svldnt1_gather_f64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_f32))) +svfloat32_t svldnt1_gather_f32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32base_s32))) +svint32_t svldnt1_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64base_s64))) +svint64_t svldnt1_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_u64))) +svuint64_t svldnt1_gather_index(svbool_t, uint64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_f64))) +svfloat64_t svldnt1_gather_index(svbool_t, float64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64index_s64))) +svint64_t svldnt1_gather_index(svbool_t, int64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_u64))) +svuint64_t svldnt1_gather_index(svbool_t, uint64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_f64))) +svfloat64_t svldnt1_gather_index(svbool_t, float64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64index_s64))) +svint64_t svldnt1_gather_index(svbool_t, int64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_u32))) +svuint32_t svldnt1_gather_offset(svbool_t, uint32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_f32))) +svfloat32_t svldnt1_gather_offset(svbool_t, float32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u32offset_s32))) +svint32_t svldnt1_gather_offset(svbool_t, int32_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_u64))) +svuint64_t svldnt1_gather_offset(svbool_t, uint64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_f64))) +svfloat64_t svldnt1_gather_offset(svbool_t, float64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_s64offset_s64))) +svint64_t svldnt1_gather_offset(svbool_t, int64_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_u64))) +svuint64_t svldnt1_gather_offset(svbool_t, uint64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_f64))) +svfloat64_t svldnt1_gather_offset(svbool_t, float64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_gather_u64offset_s64))) +svint64_t svldnt1_gather_offset(svbool_t, int64_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_offset_u32))) +svuint32_t svldnt1sb_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_offset_u64))) +svuint64_t svldnt1sb_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_offset_s32))) +svint32_t svldnt1sb_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_offset_s64))) +svint64_t svldnt1sb_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_u32))) +svuint32_t svldnt1sb_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_u64))) +svuint64_t svldnt1sb_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32base_s32))) +svint32_t svldnt1sb_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64base_s64))) +svint64_t svldnt1sb_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32offset_u32))) +svuint32_t svldnt1sb_gather_offset_u32(svbool_t, int8_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u32offset_s32))) +svint32_t svldnt1sb_gather_offset_s32(svbool_t, int8_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_s64offset_u64))) +svuint64_t svldnt1sb_gather_offset_u64(svbool_t, int8_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_s64offset_s64))) +svint64_t svldnt1sb_gather_offset_s64(svbool_t, int8_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64offset_u64))) +svuint64_t svldnt1sb_gather_offset_u64(svbool_t, int8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sb_gather_u64offset_s64))) +svint64_t svldnt1sb_gather_offset_s64(svbool_t, int8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_index_u32))) +svuint32_t svldnt1sh_gather_index_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_index_u64))) +svuint64_t svldnt1sh_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_index_s32))) +svint32_t svldnt1sh_gather_index_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_index_s64))) +svint64_t svldnt1sh_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_offset_u32))) +svuint32_t svldnt1sh_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_offset_u64))) +svuint64_t svldnt1sh_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_offset_s32))) +svint32_t svldnt1sh_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_offset_s64))) +svint64_t svldnt1sh_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_u32))) +svuint32_t svldnt1sh_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_u64))) +svuint64_t svldnt1sh_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32base_s32))) +svint32_t svldnt1sh_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64base_s64))) +svint64_t svldnt1sh_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64index_u64))) +svuint64_t svldnt1sh_gather_index_u64(svbool_t, int16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64index_s64))) +svint64_t svldnt1sh_gather_index_s64(svbool_t, int16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64index_u64))) +svuint64_t svldnt1sh_gather_index_u64(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64index_s64))) +svint64_t svldnt1sh_gather_index_s64(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32offset_u32))) +svuint32_t svldnt1sh_gather_offset_u32(svbool_t, int16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u32offset_s32))) +svint32_t svldnt1sh_gather_offset_s32(svbool_t, int16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64offset_u64))) +svuint64_t svldnt1sh_gather_offset_u64(svbool_t, int16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_s64offset_s64))) +svint64_t svldnt1sh_gather_offset_s64(svbool_t, int16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64offset_u64))) +svuint64_t svldnt1sh_gather_offset_u64(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sh_gather_u64offset_s64))) +svint64_t svldnt1sh_gather_offset_s64(svbool_t, int16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_index_u64))) +svuint64_t svldnt1sw_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_index_s64))) +svint64_t svldnt1sw_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_offset_u64))) +svuint64_t svldnt1sw_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_offset_s64))) +svint64_t svldnt1sw_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_u64))) +svuint64_t svldnt1sw_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64base_s64))) +svint64_t svldnt1sw_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64index_u64))) +svuint64_t svldnt1sw_gather_index_u64(svbool_t, int32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64index_s64))) +svint64_t svldnt1sw_gather_index_s64(svbool_t, int32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64index_u64))) +svuint64_t svldnt1sw_gather_index_u64(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64index_s64))) +svint64_t svldnt1sw_gather_index_s64(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64offset_u64))) +svuint64_t svldnt1sw_gather_offset_u64(svbool_t, int32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_s64offset_s64))) +svint64_t svldnt1sw_gather_offset_s64(svbool_t, int32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64offset_u64))) +svuint64_t svldnt1sw_gather_offset_u64(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1sw_gather_u64offset_s64))) +svint64_t svldnt1sw_gather_offset_s64(svbool_t, int32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_offset_u32))) +svuint32_t svldnt1ub_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_offset_u64))) +svuint64_t svldnt1ub_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_offset_s32))) +svint32_t svldnt1ub_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_offset_s64))) +svint64_t svldnt1ub_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_u32))) +svuint32_t svldnt1ub_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_u64))) +svuint64_t svldnt1ub_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32base_s32))) +svint32_t svldnt1ub_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64base_s64))) +svint64_t svldnt1ub_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32offset_u32))) +svuint32_t svldnt1ub_gather_offset_u32(svbool_t, uint8_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u32offset_s32))) +svint32_t svldnt1ub_gather_offset_s32(svbool_t, uint8_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_s64offset_u64))) +svuint64_t svldnt1ub_gather_offset_u64(svbool_t, uint8_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_s64offset_s64))) +svint64_t svldnt1ub_gather_offset_s64(svbool_t, uint8_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64offset_u64))) +svuint64_t svldnt1ub_gather_offset_u64(svbool_t, uint8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1ub_gather_u64offset_s64))) +svint64_t svldnt1ub_gather_offset_s64(svbool_t, uint8_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_index_u32))) +svuint32_t svldnt1uh_gather_index_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_index_u64))) +svuint64_t svldnt1uh_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_index_s32))) +svint32_t svldnt1uh_gather_index_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_index_s64))) +svint64_t svldnt1uh_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_offset_u32))) +svuint32_t svldnt1uh_gather_offset_u32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_offset_u64))) +svuint64_t svldnt1uh_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_offset_s32))) +svint32_t svldnt1uh_gather_offset_s32(svbool_t, svuint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_offset_s64))) +svint64_t svldnt1uh_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_u32))) +svuint32_t svldnt1uh_gather_u32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_u64))) +svuint64_t svldnt1uh_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32base_s32))) +svint32_t svldnt1uh_gather_s32(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64base_s64))) +svint64_t svldnt1uh_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64index_u64))) +svuint64_t svldnt1uh_gather_index_u64(svbool_t, uint16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64index_s64))) +svint64_t svldnt1uh_gather_index_s64(svbool_t, uint16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64index_u64))) +svuint64_t svldnt1uh_gather_index_u64(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64index_s64))) +svint64_t svldnt1uh_gather_index_s64(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32offset_u32))) +svuint32_t svldnt1uh_gather_offset_u32(svbool_t, uint16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u32offset_s32))) +svint32_t svldnt1uh_gather_offset_s32(svbool_t, uint16_t const *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64offset_u64))) +svuint64_t svldnt1uh_gather_offset_u64(svbool_t, uint16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_s64offset_s64))) +svint64_t svldnt1uh_gather_offset_s64(svbool_t, uint16_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64offset_u64))) +svuint64_t svldnt1uh_gather_offset_u64(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uh_gather_u64offset_s64))) +svint64_t svldnt1uh_gather_offset_s64(svbool_t, uint16_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_index_u64))) +svuint64_t svldnt1uw_gather_index_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_index_s64))) +svint64_t svldnt1uw_gather_index_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_offset_u64))) +svuint64_t svldnt1uw_gather_offset_u64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_offset_s64))) +svint64_t svldnt1uw_gather_offset_s64(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_u64))) +svuint64_t svldnt1uw_gather_u64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64base_s64))) +svint64_t svldnt1uw_gather_s64(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64index_u64))) +svuint64_t svldnt1uw_gather_index_u64(svbool_t, uint32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64index_s64))) +svint64_t svldnt1uw_gather_index_s64(svbool_t, uint32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64index_u64))) +svuint64_t svldnt1uw_gather_index_u64(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64index_s64))) +svint64_t svldnt1uw_gather_index_s64(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64offset_u64))) +svuint64_t svldnt1uw_gather_offset_u64(svbool_t, uint32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_s64offset_s64))) +svint64_t svldnt1uw_gather_offset_s64(svbool_t, uint32_t const *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64offset_u64))) +svuint64_t svldnt1uw_gather_offset_u64(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1uw_gather_u64offset_s64))) +svint64_t svldnt1uw_gather_offset_s64(svbool_t, uint32_t const *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_u8))) +svbool_t svmatch(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_u16))) +svbool_t svmatch(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_s8))) +svbool_t svmatch(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmatch_s16))) +svbool_t svmatch(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_u8))) +svbool_t svnmatch(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_u16))) +svbool_t svnmatch(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_s8))) +svbool_t svnmatch(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmatch_s16))) +svbool_t svnmatch(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_u32))) +void svstnt1_scatter_index(svbool_t, svuint32_t, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_u64))) +void svstnt1_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_f64))) +void svstnt1_scatter_index(svbool_t, svuint64_t, int64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_f32))) +void svstnt1_scatter_index(svbool_t, svuint32_t, int64_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_index_s32))) +void svstnt1_scatter_index(svbool_t, svuint32_t, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_index_s64))) +void svstnt1_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_u32))) +void svstnt1_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_u64))) +void svstnt1_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_f64))) +void svstnt1_scatter_offset(svbool_t, svuint64_t, int64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_f32))) +void svstnt1_scatter_offset(svbool_t, svuint32_t, int64_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_offset_s32))) +void svstnt1_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_offset_s64))) +void svstnt1_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_u32))) +void svstnt1_scatter(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_u64))) +void svstnt1_scatter(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_f64))) +void svstnt1_scatter(svbool_t, svuint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_f32))) +void svstnt1_scatter(svbool_t, svuint32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32base_s32))) +void svstnt1_scatter(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64base_s64))) +void svstnt1_scatter(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_u64))) +void svstnt1_scatter_index(svbool_t, uint64_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_f64))) +void svstnt1_scatter_index(svbool_t, float64_t *, svint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64index_s64))) +void svstnt1_scatter_index(svbool_t, int64_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_u64))) +void svstnt1_scatter_index(svbool_t, uint64_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_f64))) +void svstnt1_scatter_index(svbool_t, float64_t *, svuint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64index_s64))) +void svstnt1_scatter_index(svbool_t, int64_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_u32))) +void svstnt1_scatter_offset(svbool_t, uint32_t *, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_f32))) +void svstnt1_scatter_offset(svbool_t, float32_t *, svuint32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u32offset_s32))) +void svstnt1_scatter_offset(svbool_t, int32_t *, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_u64))) +void svstnt1_scatter_offset(svbool_t, uint64_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_f64))) +void svstnt1_scatter_offset(svbool_t, float64_t *, svint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_s64offset_s64))) +void svstnt1_scatter_offset(svbool_t, int64_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_u64))) +void svstnt1_scatter_offset(svbool_t, uint64_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_f64))) +void svstnt1_scatter_offset(svbool_t, float64_t *, svuint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_scatter_u64offset_s64))) +void svstnt1_scatter_offset(svbool_t, int64_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_offset_u32))) +void svstnt1b_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_offset_u64))) +void svstnt1b_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_offset_s32))) +void svstnt1b_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_offset_s64))) +void svstnt1b_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_u32))) +void svstnt1b_scatter(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_u64))) +void svstnt1b_scatter(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32base_s32))) +void svstnt1b_scatter(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64base_s64))) +void svstnt1b_scatter(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32offset_s32))) +void svstnt1b_scatter_offset(svbool_t, int8_t *, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u32offset_u32))) +void svstnt1b_scatter_offset(svbool_t, uint8_t *, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_s64offset_s64))) +void svstnt1b_scatter_offset(svbool_t, int8_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_s64offset_u64))) +void svstnt1b_scatter_offset(svbool_t, uint8_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64offset_s64))) +void svstnt1b_scatter_offset(svbool_t, int8_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1b_scatter_u64offset_u64))) +void svstnt1b_scatter_offset(svbool_t, uint8_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_index_u32))) +void svstnt1h_scatter_index(svbool_t, svuint32_t, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_index_u64))) +void svstnt1h_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_index_s32))) +void svstnt1h_scatter_index(svbool_t, svuint32_t, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_index_s64))) +void svstnt1h_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_offset_u32))) +void svstnt1h_scatter_offset(svbool_t, svuint32_t, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_offset_u64))) +void svstnt1h_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_offset_s32))) +void svstnt1h_scatter_offset(svbool_t, svuint32_t, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_offset_s64))) +void svstnt1h_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_u32))) +void svstnt1h_scatter(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_u64))) +void svstnt1h_scatter(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32base_s32))) +void svstnt1h_scatter(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64base_s64))) +void svstnt1h_scatter(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64index_s64))) +void svstnt1h_scatter_index(svbool_t, int16_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64index_u64))) +void svstnt1h_scatter_index(svbool_t, uint16_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64index_s64))) +void svstnt1h_scatter_index(svbool_t, int16_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64index_u64))) +void svstnt1h_scatter_index(svbool_t, uint16_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32offset_s32))) +void svstnt1h_scatter_offset(svbool_t, int16_t *, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u32offset_u32))) +void svstnt1h_scatter_offset(svbool_t, uint16_t *, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64offset_s64))) +void svstnt1h_scatter_offset(svbool_t, int16_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_s64offset_u64))) +void svstnt1h_scatter_offset(svbool_t, uint16_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64offset_s64))) +void svstnt1h_scatter_offset(svbool_t, int16_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1h_scatter_u64offset_u64))) +void svstnt1h_scatter_offset(svbool_t, uint16_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_index_u64))) +void svstnt1w_scatter_index(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_index_s64))) +void svstnt1w_scatter_index(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_offset_u64))) +void svstnt1w_scatter_offset(svbool_t, svuint64_t, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_offset_s64))) +void svstnt1w_scatter_offset(svbool_t, svuint64_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_u64))) +void svstnt1w_scatter(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64base_s64))) +void svstnt1w_scatter(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64index_s64))) +void svstnt1w_scatter_index(svbool_t, int32_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64index_u64))) +void svstnt1w_scatter_index(svbool_t, uint32_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64index_s64))) +void svstnt1w_scatter_index(svbool_t, int32_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64index_u64))) +void svstnt1w_scatter_index(svbool_t, uint32_t *, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64offset_s64))) +void svstnt1w_scatter_offset(svbool_t, int32_t *, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_s64offset_u64))) +void svstnt1w_scatter_offset(svbool_t, uint32_t *, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64offset_s64))) +void svstnt1w_scatter_offset(svbool_t, int32_t *, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1w_scatter_u64offset_u64))) +void svstnt1w_scatter_offset(svbool_t, uint32_t *, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_bf16_m))) +svbfloat16_t svadd_n_bf16_m(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_bf16_x))) +svbfloat16_t svadd_n_bf16_x(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_bf16_z))) +svbfloat16_t svadd_n_bf16_z(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_bf16_m))) +svbfloat16_t svadd_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_bf16_x))) +svbfloat16_t svadd_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_bf16_z))) +svbfloat16_t svadd_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_bf16))) +svbfloat16_t svclamp_bf16(svbfloat16_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_bf16_m))) +svbfloat16_t svmax_n_bf16_m(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_bf16_x))) +svbfloat16_t svmax_n_bf16_x(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_bf16_z))) +svbfloat16_t svmax_n_bf16_z(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_bf16_m))) +svbfloat16_t svmax_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_bf16_x))) +svbfloat16_t svmax_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_bf16_z))) +svbfloat16_t svmax_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_bf16_m))) +svbfloat16_t svmaxnm_n_bf16_m(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_bf16_x))) +svbfloat16_t svmaxnm_n_bf16_x(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_bf16_z))) +svbfloat16_t svmaxnm_n_bf16_z(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_bf16_m))) +svbfloat16_t svmaxnm_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_bf16_x))) +svbfloat16_t svmaxnm_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_bf16_z))) +svbfloat16_t svmaxnm_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_bf16_m))) +svbfloat16_t svmin_n_bf16_m(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_bf16_x))) +svbfloat16_t svmin_n_bf16_x(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_bf16_z))) +svbfloat16_t svmin_n_bf16_z(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_bf16_m))) +svbfloat16_t svmin_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_bf16_x))) +svbfloat16_t svmin_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_bf16_z))) +svbfloat16_t svmin_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_bf16_m))) +svbfloat16_t svminnm_n_bf16_m(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_bf16_x))) +svbfloat16_t svminnm_n_bf16_x(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_bf16_z))) +svbfloat16_t svminnm_n_bf16_z(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_bf16_m))) +svbfloat16_t svminnm_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_bf16_x))) +svbfloat16_t svminnm_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_bf16_z))) +svbfloat16_t svminnm_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_bf16_m))) +svbfloat16_t svmla_n_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_bf16_x))) +svbfloat16_t svmla_n_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_bf16_z))) +svbfloat16_t svmla_n_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_bf16_m))) +svbfloat16_t svmla_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_bf16_x))) +svbfloat16_t svmla_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_bf16_z))) +svbfloat16_t svmla_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_bf16))) +svbfloat16_t svmla_lane_bf16(svbfloat16_t, svbfloat16_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_bf16_m))) +svbfloat16_t svmls_n_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_bf16_x))) +svbfloat16_t svmls_n_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_bf16_z))) +svbfloat16_t svmls_n_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_bf16_m))) +svbfloat16_t svmls_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_bf16_x))) +svbfloat16_t svmls_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_bf16_z))) +svbfloat16_t svmls_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_bf16))) +svbfloat16_t svmls_lane_bf16(svbfloat16_t, svbfloat16_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_bf16_m))) +svbfloat16_t svmul_n_bf16_m(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_bf16_x))) +svbfloat16_t svmul_n_bf16_x(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_bf16_z))) +svbfloat16_t svmul_n_bf16_z(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_bf16_m))) +svbfloat16_t svmul_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_bf16_x))) +svbfloat16_t svmul_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_bf16_z))) +svbfloat16_t svmul_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_bf16))) +svbfloat16_t svmul_lane_bf16(svbfloat16_t, svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_bf16_m))) +svbfloat16_t svsub_n_bf16_m(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_bf16_x))) +svbfloat16_t svsub_n_bf16_x(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_bf16_z))) +svbfloat16_t svsub_n_bf16_z(svbool_t, svbfloat16_t, bfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_bf16_m))) +svbfloat16_t svsub_bf16_m(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_bf16_x))) +svbfloat16_t svsub_bf16_x(svbool_t, svbfloat16_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_bf16_z))) +svbfloat16_t svsub_bf16_z(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_bf16_m))) +svbfloat16_t svadd_m(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_bf16_x))) +svbfloat16_t svadd_x(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_bf16_z))) +svbfloat16_t svadd_z(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_bf16_m))) +svbfloat16_t svadd_m(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_bf16_x))) +svbfloat16_t svadd_x(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_bf16_z))) +svbfloat16_t svadd_z(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_bf16))) +svbfloat16_t svclamp(svbfloat16_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_bf16_m))) +svbfloat16_t svmax_m(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_bf16_x))) +svbfloat16_t svmax_x(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_bf16_z))) +svbfloat16_t svmax_z(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_bf16_m))) +svbfloat16_t svmax_m(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_bf16_x))) +svbfloat16_t svmax_x(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_bf16_z))) +svbfloat16_t svmax_z(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_bf16_m))) +svbfloat16_t svmaxnm_m(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_bf16_x))) +svbfloat16_t svmaxnm_x(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_bf16_z))) +svbfloat16_t svmaxnm_z(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_bf16_m))) +svbfloat16_t svmaxnm_m(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_bf16_x))) +svbfloat16_t svmaxnm_x(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_bf16_z))) +svbfloat16_t svmaxnm_z(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_bf16_m))) +svbfloat16_t svmin_m(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_bf16_x))) +svbfloat16_t svmin_x(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_bf16_z))) +svbfloat16_t svmin_z(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_bf16_m))) +svbfloat16_t svmin_m(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_bf16_x))) +svbfloat16_t svmin_x(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_bf16_z))) +svbfloat16_t svmin_z(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_bf16_m))) +svbfloat16_t svminnm_m(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_bf16_x))) +svbfloat16_t svminnm_x(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_bf16_z))) +svbfloat16_t svminnm_z(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_bf16_m))) +svbfloat16_t svminnm_m(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_bf16_x))) +svbfloat16_t svminnm_x(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_bf16_z))) +svbfloat16_t svminnm_z(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_bf16_m))) +svbfloat16_t svmla_m(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_bf16_x))) +svbfloat16_t svmla_x(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_bf16_z))) +svbfloat16_t svmla_z(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_bf16_m))) +svbfloat16_t svmla_m(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_bf16_x))) +svbfloat16_t svmla_x(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_bf16_z))) +svbfloat16_t svmla_z(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_bf16))) +svbfloat16_t svmla_lane(svbfloat16_t, svbfloat16_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_bf16_m))) +svbfloat16_t svmls_m(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_bf16_x))) +svbfloat16_t svmls_x(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_bf16_z))) +svbfloat16_t svmls_z(svbool_t, svbfloat16_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_bf16_m))) +svbfloat16_t svmls_m(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_bf16_x))) +svbfloat16_t svmls_x(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_bf16_z))) +svbfloat16_t svmls_z(svbool_t, svbfloat16_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_bf16))) +svbfloat16_t svmls_lane(svbfloat16_t, svbfloat16_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_bf16_m))) +svbfloat16_t svmul_m(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_bf16_x))) +svbfloat16_t svmul_x(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_bf16_z))) +svbfloat16_t svmul_z(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_bf16_m))) +svbfloat16_t svmul_m(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_bf16_x))) +svbfloat16_t svmul_x(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_bf16_z))) +svbfloat16_t svmul_z(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_bf16))) +svbfloat16_t svmul_lane(svbfloat16_t, svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_bf16_m))) +svbfloat16_t svsub_m(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_bf16_x))) +svbfloat16_t svsub_x(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_bf16_z))) +svbfloat16_t svsub_z(svbool_t, svbfloat16_t, bfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_bf16_m))) +svbfloat16_t svsub_m(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_bf16_x))) +svbfloat16_t svsub_x(svbool_t, svbfloat16_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_bf16_z))) +svbfloat16_t svsub_z(svbool_t, svbfloat16_t, svbfloat16_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_bf16))) svbfloat16_t svtbl2_bf16(svbfloat16x2_t, svuint16_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_bf16))) @@ -27820,18 +8350,18 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_u16))) uint16x8_t svaddqv_u16(svbool_t, svuint16_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_s8))) int8x16_t svaddqv_s8(svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_f64))) -float64x2_t svaddqv_f64(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_f32))) -float32x4_t svaddqv_f32(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_f16))) -float16x8_t svaddqv_f16(svbool_t, svfloat16_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_s32))) int32x4_t svaddqv_s32(svbool_t, svint32_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_s64))) int64x2_t svaddqv_s64(svbool_t, svint64_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_s16))) int16x8_t svaddqv_s16(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_f64))) +float64x2_t svaddqv_f64(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_f32))) +float32x4_t svaddqv_f32(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_f16))) +float16x8_t svaddqv_f16(svbool_t, svfloat16_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_u8))) uint8x16_t svandqv_u8(svbool_t, svuint8_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_u32))) @@ -28734,18 +9264,18 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_u16))) uint16x8_t svaddqv(svbool_t, svuint16_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_s8))) int8x16_t svaddqv(svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_f64))) -float64x2_t svaddqv(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_f32))) -float32x4_t svaddqv(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_f16))) -float16x8_t svaddqv(svbool_t, svfloat16_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_s32))) int32x4_t svaddqv(svbool_t, svint32_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_s64))) int64x2_t svaddqv(svbool_t, svint64_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_s16))) int16x8_t svaddqv(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_f64))) +float64x2_t svaddqv(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_f32))) +float32x4_t svaddqv(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddqv_f16))) +float16x8_t svaddqv(svbool_t, svfloat16_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_u8))) uint8x16_t svandqv(svbool_t, svuint8_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandqv_u32))) @@ -29622,6 +10152,26 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_s64))) svint64_t svzipq2(svint64_t, svint64_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzipq2_s16))) svint16_t svzipq2(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_bf16))) +svbfloat16_t svdup_laneq_bf16(svbfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_bf16))) +svbfloat16_t svdup_laneq(svbfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s8))) +svint8_t svclamp_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s32))) +svint32_t svclamp_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s64))) +svint64_t svclamp_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s16))) +svint16_t svclamp_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u8))) +svuint8_t svclamp_u8(svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u32))) +svuint32_t svclamp_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u64))) +svuint64_t svclamp_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u16))) +svuint16_t svclamp_u16(svuint16_t, svuint16_t, svuint16_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpsel_lane_b16))) svbool_t svpsel_lane_b16(svbool_t, svbool_t, uint32_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpsel_lane_b32))) @@ -29630,6 +10180,166 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpsel_lane_b64))) svbool_t svpsel_lane_b64(svbool_t, svbool_t, uint32_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpsel_lane_b8))) svbool_t svpsel_lane_b8(svbool_t, svbool_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u8_m))) +svuint8_t svrevd_u8_m(svuint8_t, svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u32_m))) +svuint32_t svrevd_u32_m(svuint32_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u64_m))) +svuint64_t svrevd_u64_m(svuint64_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u16_m))) +svuint16_t svrevd_u16_m(svuint16_t, svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_bf16_m))) +svbfloat16_t svrevd_bf16_m(svbfloat16_t, svbool_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s8_m))) +svint8_t svrevd_s8_m(svint8_t, svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f64_m))) +svfloat64_t svrevd_f64_m(svfloat64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f32_m))) +svfloat32_t svrevd_f32_m(svfloat32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f16_m))) +svfloat16_t svrevd_f16_m(svfloat16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s32_m))) +svint32_t svrevd_s32_m(svint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s64_m))) +svint64_t svrevd_s64_m(svint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s16_m))) +svint16_t svrevd_s16_m(svint16_t, svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u8_x))) +svuint8_t svrevd_u8_x(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u32_x))) +svuint32_t svrevd_u32_x(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u64_x))) +svuint64_t svrevd_u64_x(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u16_x))) +svuint16_t svrevd_u16_x(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_bf16_x))) +svbfloat16_t svrevd_bf16_x(svbool_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s8_x))) +svint8_t svrevd_s8_x(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f64_x))) +svfloat64_t svrevd_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f32_x))) +svfloat32_t svrevd_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f16_x))) +svfloat16_t svrevd_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s32_x))) +svint32_t svrevd_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s64_x))) +svint64_t svrevd_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s16_x))) +svint16_t svrevd_s16_x(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u8_z))) +svuint8_t svrevd_u8_z(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u32_z))) +svuint32_t svrevd_u32_z(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u64_z))) +svuint64_t svrevd_u64_z(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u16_z))) +svuint16_t svrevd_u16_z(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_bf16_z))) +svbfloat16_t svrevd_bf16_z(svbool_t, svbfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s8_z))) +svint8_t svrevd_s8_z(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f64_z))) +svfloat64_t svrevd_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f32_z))) +svfloat32_t svrevd_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f16_z))) +svfloat16_t svrevd_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s32_z))) +svint32_t svrevd_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s64_z))) +svint64_t svrevd_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s16_z))) +svint16_t svrevd_s16_z(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s8))) +svint8_t svclamp(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s32))) +svint32_t svclamp(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s64))) +svint64_t svclamp(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s16))) +svint16_t svclamp(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u8))) +svuint8_t svclamp(svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u32))) +svuint32_t svclamp(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u64))) +svuint64_t svclamp(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u16))) +svuint16_t svclamp(svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u8_m))) +svuint8_t svrevd_m(svuint8_t, svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u32_m))) +svuint32_t svrevd_m(svuint32_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u64_m))) +svuint64_t svrevd_m(svuint64_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u16_m))) +svuint16_t svrevd_m(svuint16_t, svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_bf16_m))) +svbfloat16_t svrevd_m(svbfloat16_t, svbool_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s8_m))) +svint8_t svrevd_m(svint8_t, svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f64_m))) +svfloat64_t svrevd_m(svfloat64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f32_m))) +svfloat32_t svrevd_m(svfloat32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f16_m))) +svfloat16_t svrevd_m(svfloat16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s32_m))) +svint32_t svrevd_m(svint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s64_m))) +svint64_t svrevd_m(svint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s16_m))) +svint16_t svrevd_m(svint16_t, svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u8_x))) +svuint8_t svrevd_x(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u32_x))) +svuint32_t svrevd_x(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u64_x))) +svuint64_t svrevd_x(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u16_x))) +svuint16_t svrevd_x(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_bf16_x))) +svbfloat16_t svrevd_x(svbool_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s8_x))) +svint8_t svrevd_x(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f64_x))) +svfloat64_t svrevd_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f32_x))) +svfloat32_t svrevd_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f16_x))) +svfloat16_t svrevd_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s32_x))) +svint32_t svrevd_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s64_x))) +svint64_t svrevd_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s16_x))) +svint16_t svrevd_x(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u8_z))) +svuint8_t svrevd_z(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u32_z))) +svuint32_t svrevd_z(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u64_z))) +svuint64_t svrevd_z(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u16_z))) +svuint16_t svrevd_z(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_bf16_z))) +svbfloat16_t svrevd_z(svbool_t, svbfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s8_z))) +svint8_t svrevd_z(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f64_z))) +svfloat64_t svrevd_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f32_z))) +svfloat32_t svrevd_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f16_z))) +svfloat16_t svrevd_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s32_z))) +svint32_t svrevd_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s64_z))) +svint64_t svrevd_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s16_z))) +svint16_t svrevd_z(svbool_t, svint16_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlslb_f32))) svfloat32_t svbfmlslb_f32(svfloat32_t, svbfloat16_t, svbfloat16_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbfmlslb_lane_f32))) @@ -29644,22 +10354,6 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_f32))) svfloat32_t svclamp_f32(svfloat32_t, svfloat32_t, svfloat32_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_f16))) svfloat16_t svclamp_f16(svfloat16_t, svfloat16_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s8))) -svint8_t svclamp_s8(svint8_t, svint8_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s32))) -svint32_t svclamp_s32(svint32_t, svint32_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s64))) -svint64_t svclamp_s64(svint64_t, svint64_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s16))) -svint16_t svclamp_s16(svint16_t, svint16_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u8))) -svuint8_t svclamp_u8(svuint8_t, svuint8_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u32))) -svuint32_t svclamp_u32(svuint32_t, svuint32_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u64))) -svuint64_t svclamp_u64(svuint64_t, svuint64_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u16))) -svuint16_t svclamp_u16(svuint16_t, svuint16_t, svuint16_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntp_c8))) uint64_t svcntp_c8(svcount_t, uint64_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntp_c32))) @@ -29914,84 +10608,22 @@ __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_c64))) svcount_t svptrue_c64(void); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_c16))) svcount_t svptrue_c16(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_s16_s32_x2))) +svint16_t svqcvtn_s16_s32_x2(svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_u16_s32_x2))) +svuint16_t svqcvtn_u16_s32_x2(svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_u16_u32_x2))) +svuint16_t svqcvtn_u16_u32_x2(svuint32x2_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrn_n_s16_s32_x2))) svint16_t svqrshrn_n_s16_s32_x2(svint32x2_t, uint64_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrn_n_u16_u32_x2))) svuint16_t svqrshrn_n_u16_u32_x2(svuint32x2_t, uint64_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrun_n_u16_s32_x2))) svuint16_t svqrshrun_n_u16_s32_x2(svint32x2_t, uint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u8_m))) -svuint8_t svrevd_u8_m(svuint8_t, svbool_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u32_m))) -svuint32_t svrevd_u32_m(svuint32_t, svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u64_m))) -svuint64_t svrevd_u64_m(svuint64_t, svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u16_m))) -svuint16_t svrevd_u16_m(svuint16_t, svbool_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_bf16_m))) -svbfloat16_t svrevd_bf16_m(svbfloat16_t, svbool_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s8_m))) -svint8_t svrevd_s8_m(svint8_t, svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f64_m))) -svfloat64_t svrevd_f64_m(svfloat64_t, svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f32_m))) -svfloat32_t svrevd_f32_m(svfloat32_t, svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f16_m))) -svfloat16_t svrevd_f16_m(svfloat16_t, svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s32_m))) -svint32_t svrevd_s32_m(svint32_t, svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s64_m))) -svint64_t svrevd_s64_m(svint64_t, svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s16_m))) -svint16_t svrevd_s16_m(svint16_t, svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u8_x))) -svuint8_t svrevd_u8_x(svbool_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u32_x))) -svuint32_t svrevd_u32_x(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u64_x))) -svuint64_t svrevd_u64_x(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u16_x))) -svuint16_t svrevd_u16_x(svbool_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_bf16_x))) -svbfloat16_t svrevd_bf16_x(svbool_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s8_x))) -svint8_t svrevd_s8_x(svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f64_x))) -svfloat64_t svrevd_f64_x(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f32_x))) -svfloat32_t svrevd_f32_x(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f16_x))) -svfloat16_t svrevd_f16_x(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s32_x))) -svint32_t svrevd_s32_x(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s64_x))) -svint64_t svrevd_s64_x(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s16_x))) -svint16_t svrevd_s16_x(svbool_t, svint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u8_z))) -svuint8_t svrevd_u8_z(svbool_t, svuint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u32_z))) -svuint32_t svrevd_u32_z(svbool_t, svuint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u64_z))) -svuint64_t svrevd_u64_z(svbool_t, svuint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u16_z))) -svuint16_t svrevd_u16_z(svbool_t, svuint16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_bf16_z))) -svbfloat16_t svrevd_bf16_z(svbool_t, svbfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s8_z))) -svint8_t svrevd_s8_z(svbool_t, svint8_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f64_z))) -svfloat64_t svrevd_f64_z(svbool_t, svfloat64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f32_z))) -svfloat32_t svrevd_f32_z(svbool_t, svfloat32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f16_z))) -svfloat16_t svrevd_f16_z(svbool_t, svfloat16_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s32_z))) -svint32_t svrevd_s32_z(svbool_t, svint32_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s64_z))) -svint64_t svrevd_s64_z(svbool_t, svint64_t); -__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s16_z))) -svint16_t svrevd_s16_z(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svreinterpret_b))) +svbool_t svreinterpret_b(svcount_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svreinterpret_c))) +svcount_t svreinterpret_c(svbool_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_b))) svboolx2_t svset2_b(svboolx2_t, uint64_t, svbool_t); __ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_b))) @@ -30334,22 +10966,6 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_f32))) svfloat32_t svclamp(svfloat32_t, svfloat32_t, svfloat32_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_f16))) svfloat16_t svclamp(svfloat16_t, svfloat16_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s8))) -svint8_t svclamp(svint8_t, svint8_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s32))) -svint32_t svclamp(svint32_t, svint32_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s64))) -svint64_t svclamp(svint64_t, svint64_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_s16))) -svint16_t svclamp(svint16_t, svint16_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u8))) -svuint8_t svclamp(svuint8_t, svuint8_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u32))) -svuint32_t svclamp(svuint32_t, svuint32_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u64))) -svuint64_t svclamp(svuint64_t, svuint64_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclamp_u16))) -svuint16_t svclamp(svuint16_t, svuint16_t, svuint16_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_b))) svboolx2_t svcreate2(svbool_t, svbool_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_b))) @@ -30562,84 +11178,22 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f32_x4 svfloat32x4_t svldnt1_vnum_x4(svcount_t, float32_t const *, int64_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s32_x4))) svint32x4_t svldnt1_vnum_x4(svcount_t, int32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_s16_s32_x2))) +svint16_t svqcvtn_s16(svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_u16_s32_x2))) +svuint16_t svqcvtn_u16(svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcvtn_u16_u32_x2))) +svuint16_t svqcvtn_u16(svuint32x2_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrn_n_s16_s32_x2))) svint16_t svqrshrn_s16(svint32x2_t, uint64_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrn_n_u16_u32_x2))) svuint16_t svqrshrn_u16(svuint32x2_t, uint64_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrun_n_u16_s32_x2))) svuint16_t svqrshrun_u16(svint32x2_t, uint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u8_m))) -svuint8_t svrevd_m(svuint8_t, svbool_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u32_m))) -svuint32_t svrevd_m(svuint32_t, svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u64_m))) -svuint64_t svrevd_m(svuint64_t, svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u16_m))) -svuint16_t svrevd_m(svuint16_t, svbool_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_bf16_m))) -svbfloat16_t svrevd_m(svbfloat16_t, svbool_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s8_m))) -svint8_t svrevd_m(svint8_t, svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f64_m))) -svfloat64_t svrevd_m(svfloat64_t, svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f32_m))) -svfloat32_t svrevd_m(svfloat32_t, svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f16_m))) -svfloat16_t svrevd_m(svfloat16_t, svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s32_m))) -svint32_t svrevd_m(svint32_t, svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s64_m))) -svint64_t svrevd_m(svint64_t, svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s16_m))) -svint16_t svrevd_m(svint16_t, svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u8_x))) -svuint8_t svrevd_x(svbool_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u32_x))) -svuint32_t svrevd_x(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u64_x))) -svuint64_t svrevd_x(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u16_x))) -svuint16_t svrevd_x(svbool_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_bf16_x))) -svbfloat16_t svrevd_x(svbool_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s8_x))) -svint8_t svrevd_x(svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f64_x))) -svfloat64_t svrevd_x(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f32_x))) -svfloat32_t svrevd_x(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f16_x))) -svfloat16_t svrevd_x(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s32_x))) -svint32_t svrevd_x(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s64_x))) -svint64_t svrevd_x(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s16_x))) -svint16_t svrevd_x(svbool_t, svint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u8_z))) -svuint8_t svrevd_z(svbool_t, svuint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u32_z))) -svuint32_t svrevd_z(svbool_t, svuint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u64_z))) -svuint64_t svrevd_z(svbool_t, svuint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_u16_z))) -svuint16_t svrevd_z(svbool_t, svuint16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_bf16_z))) -svbfloat16_t svrevd_z(svbool_t, svbfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s8_z))) -svint8_t svrevd_z(svbool_t, svint8_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f64_z))) -svfloat64_t svrevd_z(svbool_t, svfloat64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f32_z))) -svfloat32_t svrevd_z(svbool_t, svfloat32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_f16_z))) -svfloat16_t svrevd_z(svbool_t, svfloat16_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s32_z))) -svint32_t svrevd_z(svbool_t, svint32_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s64_z))) -svint64_t svrevd_z(svbool_t, svint64_t); -__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevd_s16_z))) -svint16_t svrevd_z(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svreinterpret_b))) +svbool_t svreinterpret(svcount_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svreinterpret_c))) +svcount_t svreinterpret(svbool_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_b))) svboolx2_t svset2(svboolx2_t, uint64_t, svbool_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_b))) @@ -30964,6 +11518,19004 @@ __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_s64_x svboolx2_t svwhilelt_b64_x2(int64_t, int64_t); __aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_s64_x2))) svboolx2_t svwhilelt_b16_x2(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_u8))) +svuint8_t svdup_laneq_u8(svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_s8))) +svint8_t svdup_laneq_s8(svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_u64))) +svuint64_t svdup_laneq_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_f64))) +svfloat64_t svdup_laneq_f64(svfloat64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_s64))) +svint64_t svdup_laneq_s64(svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_u16))) +svuint16_t svdup_laneq_u16(svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_f16))) +svfloat16_t svdup_laneq_f16(svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_s16))) +svint16_t svdup_laneq_s16(svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_u32))) +svuint32_t svdup_laneq_u32(svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_f32))) +svfloat32_t svdup_laneq_f32(svfloat32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_s32))) +svint32_t svdup_laneq_s32(svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_u8))) +svuint8_t svdup_laneq(svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_s8))) +svint8_t svdup_laneq(svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_u64))) +svuint64_t svdup_laneq(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_f64))) +svfloat64_t svdup_laneq(svfloat64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_s64))) +svint64_t svdup_laneq(svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_u16))) +svuint16_t svdup_laneq(svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_f16))) +svfloat16_t svdup_laneq(svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_s16))) +svint16_t svdup_laneq(svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_u32))) +svuint32_t svdup_laneq(svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_f32))) +svfloat32_t svdup_laneq(svfloat32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_laneq_s32))) +svint32_t svdup_laneq(svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s8))) +svint8_t svaba_n_s8(svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s32))) +svint32_t svaba_n_s32(svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s64))) +svint64_t svaba_n_s64(svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s16))) +svint16_t svaba_n_s16(svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u8))) +svuint8_t svaba_n_u8(svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u32))) +svuint32_t svaba_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u64))) +svuint64_t svaba_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u16))) +svuint16_t svaba_n_u16(svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s8))) +svint8_t svaba_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s32))) +svint32_t svaba_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s64))) +svint64_t svaba_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s16))) +svint16_t svaba_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u8))) +svuint8_t svaba_u8(svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u32))) +svuint32_t svaba_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u64))) +svuint64_t svaba_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u16))) +svuint16_t svaba_u16(svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s32))) +svint32_t svabalb_n_s32(svint32_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s64))) +svint64_t svabalb_n_s64(svint64_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s16))) +svint16_t svabalb_n_s16(svint16_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u32))) +svuint32_t svabalb_n_u32(svuint32_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u64))) +svuint64_t svabalb_n_u64(svuint64_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u16))) +svuint16_t svabalb_n_u16(svuint16_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s32))) +svint32_t svabalb_s32(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s64))) +svint64_t svabalb_s64(svint64_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s16))) +svint16_t svabalb_s16(svint16_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u32))) +svuint32_t svabalb_u32(svuint32_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u64))) +svuint64_t svabalb_u64(svuint64_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u16))) +svuint16_t svabalb_u16(svuint16_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s32))) +svint32_t svabalt_n_s32(svint32_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s64))) +svint64_t svabalt_n_s64(svint64_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s16))) +svint16_t svabalt_n_s16(svint16_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u32))) +svuint32_t svabalt_n_u32(svuint32_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u64))) +svuint64_t svabalt_n_u64(svuint64_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u16))) +svuint16_t svabalt_n_u16(svuint16_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s32))) +svint32_t svabalt_s32(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s64))) +svint64_t svabalt_s64(svint64_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s16))) +svint16_t svabalt_s16(svint16_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u32))) +svuint32_t svabalt_u32(svuint32_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u64))) +svuint64_t svabalt_u64(svuint64_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u16))) +svuint16_t svabalt_u16(svuint16_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s32))) +svint32_t svabdlb_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s64))) +svint64_t svabdlb_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s16))) +svint16_t svabdlb_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u32))) +svuint32_t svabdlb_n_u32(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u64))) +svuint64_t svabdlb_n_u64(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u16))) +svuint16_t svabdlb_n_u16(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s32))) +svint32_t svabdlb_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s64))) +svint64_t svabdlb_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s16))) +svint16_t svabdlb_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u32))) +svuint32_t svabdlb_u32(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u64))) +svuint64_t svabdlb_u64(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u16))) +svuint16_t svabdlb_u16(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s32))) +svint32_t svabdlt_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s64))) +svint64_t svabdlt_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s16))) +svint16_t svabdlt_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u32))) +svuint32_t svabdlt_n_u32(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u64))) +svuint64_t svabdlt_n_u64(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u16))) +svuint16_t svabdlt_n_u16(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s32))) +svint32_t svabdlt_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s64))) +svint64_t svabdlt_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s16))) +svint16_t svabdlt_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u32))) +svuint32_t svabdlt_u32(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u64))) +svuint64_t svabdlt_u64(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u16))) +svuint16_t svabdlt_u16(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_m))) +svint32_t svadalp_s32_m(svbool_t, svint32_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_m))) +svint64_t svadalp_s64_m(svbool_t, svint64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_m))) +svint16_t svadalp_s16_m(svbool_t, svint16_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_x))) +svint32_t svadalp_s32_x(svbool_t, svint32_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_x))) +svint64_t svadalp_s64_x(svbool_t, svint64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_x))) +svint16_t svadalp_s16_x(svbool_t, svint16_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_z))) +svint32_t svadalp_s32_z(svbool_t, svint32_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_z))) +svint64_t svadalp_s64_z(svbool_t, svint64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_z))) +svint16_t svadalp_s16_z(svbool_t, svint16_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_m))) +svuint32_t svadalp_u32_m(svbool_t, svuint32_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_m))) +svuint64_t svadalp_u64_m(svbool_t, svuint64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_m))) +svuint16_t svadalp_u16_m(svbool_t, svuint16_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_x))) +svuint32_t svadalp_u32_x(svbool_t, svuint32_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_x))) +svuint64_t svadalp_u64_x(svbool_t, svuint64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_x))) +svuint16_t svadalp_u16_x(svbool_t, svuint16_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_z))) +svuint32_t svadalp_u32_z(svbool_t, svuint32_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_z))) +svuint64_t svadalp_u64_z(svbool_t, svuint64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_z))) +svuint16_t svadalp_u16_z(svbool_t, svuint16_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_n_u32))) +svuint32_t svadclb_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_n_u64))) +svuint64_t svadclb_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_u32))) +svuint32_t svadclb_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_u64))) +svuint64_t svadclb_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_n_u32))) +svuint32_t svadclt_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_n_u64))) +svuint64_t svadclt_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_u32))) +svuint32_t svadclt_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_u64))) +svuint64_t svadclt_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u32))) +svuint16_t svaddhnb_n_u32(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u64))) +svuint32_t svaddhnb_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u16))) +svuint8_t svaddhnb_n_u16(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s32))) +svint16_t svaddhnb_n_s32(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s64))) +svint32_t svaddhnb_n_s64(svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s16))) +svint8_t svaddhnb_n_s16(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u32))) +svuint16_t svaddhnb_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u64))) +svuint32_t svaddhnb_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u16))) +svuint8_t svaddhnb_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s32))) +svint16_t svaddhnb_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s64))) +svint32_t svaddhnb_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s16))) +svint8_t svaddhnb_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u32))) +svuint16_t svaddhnt_n_u32(svuint16_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u64))) +svuint32_t svaddhnt_n_u64(svuint32_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u16))) +svuint8_t svaddhnt_n_u16(svuint8_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s32))) +svint16_t svaddhnt_n_s32(svint16_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s64))) +svint32_t svaddhnt_n_s64(svint32_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s16))) +svint8_t svaddhnt_n_s16(svint8_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u32))) +svuint16_t svaddhnt_u32(svuint16_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u64))) +svuint32_t svaddhnt_u64(svuint32_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u16))) +svuint8_t svaddhnt_u16(svuint8_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s32))) +svint16_t svaddhnt_s32(svint16_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s64))) +svint32_t svaddhnt_s64(svint32_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s16))) +svint8_t svaddhnt_s16(svint8_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s32))) +svint32_t svaddlb_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s64))) +svint64_t svaddlb_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s16))) +svint16_t svaddlb_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u32))) +svuint32_t svaddlb_n_u32(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u64))) +svuint64_t svaddlb_n_u64(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u16))) +svuint16_t svaddlb_n_u16(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s32))) +svint32_t svaddlb_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s64))) +svint64_t svaddlb_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s16))) +svint16_t svaddlb_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u32))) +svuint32_t svaddlb_u32(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u64))) +svuint64_t svaddlb_u64(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u16))) +svuint16_t svaddlb_u16(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s32))) +svint32_t svaddlbt_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s64))) +svint64_t svaddlbt_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s16))) +svint16_t svaddlbt_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s32))) +svint32_t svaddlbt_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s64))) +svint64_t svaddlbt_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s16))) +svint16_t svaddlbt_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s32))) +svint32_t svaddlt_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s64))) +svint64_t svaddlt_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s16))) +svint16_t svaddlt_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u32))) +svuint32_t svaddlt_n_u32(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u64))) +svuint64_t svaddlt_n_u64(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u16))) +svuint16_t svaddlt_n_u16(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s32))) +svint32_t svaddlt_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s64))) +svint64_t svaddlt_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s16))) +svint16_t svaddlt_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u32))) +svuint32_t svaddlt_u32(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u64))) +svuint64_t svaddlt_u64(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u16))) +svuint16_t svaddlt_u16(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f64_m))) +svfloat64_t svaddp_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f32_m))) +svfloat32_t svaddp_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f16_m))) +svfloat16_t svaddp_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f64_x))) +svfloat64_t svaddp_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f32_x))) +svfloat32_t svaddp_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f16_x))) +svfloat16_t svaddp_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u8_m))) +svuint8_t svaddp_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u32_m))) +svuint32_t svaddp_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u64_m))) +svuint64_t svaddp_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u16_m))) +svuint16_t svaddp_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s8_m))) +svint8_t svaddp_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s32_m))) +svint32_t svaddp_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s64_m))) +svint64_t svaddp_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s16_m))) +svint16_t svaddp_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u8_x))) +svuint8_t svaddp_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u32_x))) +svuint32_t svaddp_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u64_x))) +svuint64_t svaddp_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u16_x))) +svuint16_t svaddp_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s8_x))) +svint8_t svaddp_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s32_x))) +svint32_t svaddp_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s64_x))) +svint64_t svaddp_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s16_x))) +svint16_t svaddp_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s32))) +svint32_t svaddwb_n_s32(svint32_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s64))) +svint64_t svaddwb_n_s64(svint64_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s16))) +svint16_t svaddwb_n_s16(svint16_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u32))) +svuint32_t svaddwb_n_u32(svuint32_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u64))) +svuint64_t svaddwb_n_u64(svuint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u16))) +svuint16_t svaddwb_n_u16(svuint16_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s32))) +svint32_t svaddwb_s32(svint32_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s64))) +svint64_t svaddwb_s64(svint64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s16))) +svint16_t svaddwb_s16(svint16_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u32))) +svuint32_t svaddwb_u32(svuint32_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u64))) +svuint64_t svaddwb_u64(svuint64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u16))) +svuint16_t svaddwb_u16(svuint16_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s32))) +svint32_t svaddwt_n_s32(svint32_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s64))) +svint64_t svaddwt_n_s64(svint64_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s16))) +svint16_t svaddwt_n_s16(svint16_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u32))) +svuint32_t svaddwt_n_u32(svuint32_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u64))) +svuint64_t svaddwt_n_u64(svuint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u16))) +svuint16_t svaddwt_n_u16(svuint16_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s32))) +svint32_t svaddwt_s32(svint32_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s64))) +svint64_t svaddwt_s64(svint64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s16))) +svint16_t svaddwt_s16(svint16_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u32))) +svuint32_t svaddwt_u32(svuint32_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u64))) +svuint64_t svaddwt_u64(svuint64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u16))) +svuint16_t svaddwt_u16(svuint16_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u8))) +svuint8_t svbcax_n_u8(svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u32))) +svuint32_t svbcax_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u64))) +svuint64_t svbcax_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u16))) +svuint16_t svbcax_n_u16(svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s8))) +svint8_t svbcax_n_s8(svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s32))) +svint32_t svbcax_n_s32(svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s64))) +svint64_t svbcax_n_s64(svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s16))) +svint16_t svbcax_n_s16(svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u8))) +svuint8_t svbcax_u8(svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u32))) +svuint32_t svbcax_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u64))) +svuint64_t svbcax_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u16))) +svuint16_t svbcax_u16(svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s8))) +svint8_t svbcax_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s32))) +svint32_t svbcax_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s64))) +svint64_t svbcax_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s16))) +svint16_t svbcax_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u8))) +svuint8_t svbsl1n_n_u8(svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u32))) +svuint32_t svbsl1n_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u64))) +svuint64_t svbsl1n_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u16))) +svuint16_t svbsl1n_n_u16(svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s8))) +svint8_t svbsl1n_n_s8(svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s32))) +svint32_t svbsl1n_n_s32(svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s64))) +svint64_t svbsl1n_n_s64(svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s16))) +svint16_t svbsl1n_n_s16(svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u8))) +svuint8_t svbsl1n_u8(svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u32))) +svuint32_t svbsl1n_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u64))) +svuint64_t svbsl1n_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u16))) +svuint16_t svbsl1n_u16(svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s8))) +svint8_t svbsl1n_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s32))) +svint32_t svbsl1n_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s64))) +svint64_t svbsl1n_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s16))) +svint16_t svbsl1n_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u8))) +svuint8_t svbsl2n_n_u8(svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u32))) +svuint32_t svbsl2n_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u64))) +svuint64_t svbsl2n_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u16))) +svuint16_t svbsl2n_n_u16(svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s8))) +svint8_t svbsl2n_n_s8(svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s32))) +svint32_t svbsl2n_n_s32(svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s64))) +svint64_t svbsl2n_n_s64(svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s16))) +svint16_t svbsl2n_n_s16(svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u8))) +svuint8_t svbsl2n_u8(svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u32))) +svuint32_t svbsl2n_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u64))) +svuint64_t svbsl2n_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u16))) +svuint16_t svbsl2n_u16(svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s8))) +svint8_t svbsl2n_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s32))) +svint32_t svbsl2n_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s64))) +svint64_t svbsl2n_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s16))) +svint16_t svbsl2n_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u8))) +svuint8_t svbsl_n_u8(svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u32))) +svuint32_t svbsl_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u64))) +svuint64_t svbsl_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u16))) +svuint16_t svbsl_n_u16(svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s8))) +svint8_t svbsl_n_s8(svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s32))) +svint32_t svbsl_n_s32(svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s64))) +svint64_t svbsl_n_s64(svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s16))) +svint16_t svbsl_n_s16(svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u8))) +svuint8_t svbsl_u8(svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u32))) +svuint32_t svbsl_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u64))) +svuint64_t svbsl_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u16))) +svuint16_t svbsl_u16(svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s8))) +svint8_t svbsl_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s32))) +svint32_t svbsl_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s64))) +svint64_t svbsl_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s16))) +svint16_t svbsl_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u8))) +svuint8_t svcadd_u8(svuint8_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u32))) +svuint32_t svcadd_u32(svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u64))) +svuint64_t svcadd_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u16))) +svuint16_t svcadd_u16(svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s8))) +svint8_t svcadd_s8(svint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s32))) +svint32_t svcadd_s32(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s64))) +svint64_t svcadd_s64(svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s16))) +svint16_t svcadd_s16(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_s32))) +svint32_t svcdot_s32(svint32_t, svint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_s64))) +svint64_t svcdot_s64(svint64_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_lane_s32))) +svint32_t svcdot_lane_s32(svint32_t, svint8_t, svint8_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_lane_s64))) +svint64_t svcdot_lane_s64(svint64_t, svint16_t, svint16_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u8))) +svuint8_t svcmla_u8(svuint8_t, svuint8_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u32))) +svuint32_t svcmla_u32(svuint32_t, svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u64))) +svuint64_t svcmla_u64(svuint64_t, svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u16))) +svuint16_t svcmla_u16(svuint16_t, svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s8))) +svint8_t svcmla_s8(svint8_t, svint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s32))) +svint32_t svcmla_s32(svint32_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s64))) +svint64_t svcmla_s64(svint64_t, svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s16))) +svint16_t svcmla_s16(svint16_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_u32))) +svuint32_t svcmla_lane_u32(svuint32_t, svuint32_t, svuint32_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_u16))) +svuint16_t svcmla_lane_u16(svuint16_t, svuint16_t, svuint16_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_s32))) +svint32_t svcmla_lane_s32(svint32_t, svint32_t, svint32_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_s16))) +svint16_t svcmla_lane_s16(svint16_t, svint16_t, svint16_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f32_f16_m))) +svfloat32_t svcvtlt_f32_f16_m(svfloat32_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f32_f16_x))) +svfloat32_t svcvtlt_f32_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f64_f32_m))) +svfloat64_t svcvtlt_f64_f32_m(svfloat64_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f64_f32_x))) +svfloat64_t svcvtlt_f64_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_f16_f32_m))) +svfloat16_t svcvtnt_f16_f32_m(svfloat16_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_f32_f64_m))) +svfloat32_t svcvtnt_f32_f64_m(svfloat32_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_m))) +svfloat32_t svcvtx_f32_f64_m(svfloat32_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_x))) +svfloat32_t svcvtx_f32_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_z))) +svfloat32_t svcvtx_f32_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtxnt_f32_f64_m))) +svfloat32_t svcvtxnt_f32_f64_m(svfloat32_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u8))) +svuint8_t sveor3_n_u8(svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u32))) +svuint32_t sveor3_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u64))) +svuint64_t sveor3_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u16))) +svuint16_t sveor3_n_u16(svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s8))) +svint8_t sveor3_n_s8(svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s32))) +svint32_t sveor3_n_s32(svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s64))) +svint64_t sveor3_n_s64(svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s16))) +svint16_t sveor3_n_s16(svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u8))) +svuint8_t sveor3_u8(svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u32))) +svuint32_t sveor3_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u64))) +svuint64_t sveor3_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u16))) +svuint16_t sveor3_u16(svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s8))) +svint8_t sveor3_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s32))) +svint32_t sveor3_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s64))) +svint64_t sveor3_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s16))) +svint16_t sveor3_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u8))) +svuint8_t sveorbt_n_u8(svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u32))) +svuint32_t sveorbt_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u64))) +svuint64_t sveorbt_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u16))) +svuint16_t sveorbt_n_u16(svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s8))) +svint8_t sveorbt_n_s8(svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s32))) +svint32_t sveorbt_n_s32(svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s64))) +svint64_t sveorbt_n_s64(svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s16))) +svint16_t sveorbt_n_s16(svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u8))) +svuint8_t sveorbt_u8(svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u32))) +svuint32_t sveorbt_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u64))) +svuint64_t sveorbt_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u16))) +svuint16_t sveorbt_u16(svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s8))) +svint8_t sveorbt_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s32))) +svint32_t sveorbt_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s64))) +svint64_t sveorbt_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s16))) +svint16_t sveorbt_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u8))) +svuint8_t sveortb_n_u8(svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u32))) +svuint32_t sveortb_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u64))) +svuint64_t sveortb_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u16))) +svuint16_t sveortb_n_u16(svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s8))) +svint8_t sveortb_n_s8(svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s32))) +svint32_t sveortb_n_s32(svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s64))) +svint64_t sveortb_n_s64(svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s16))) +svint16_t sveortb_n_s16(svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u8))) +svuint8_t sveortb_u8(svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u32))) +svuint32_t sveortb_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u64))) +svuint64_t sveortb_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u16))) +svuint16_t sveortb_u16(svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s8))) +svint8_t sveortb_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s32))) +svint32_t sveortb_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s64))) +svint64_t sveortb_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s16))) +svint16_t sveortb_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_m))) +svint8_t svhadd_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_m))) +svint32_t svhadd_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_m))) +svint64_t svhadd_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_m))) +svint16_t svhadd_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_x))) +svint8_t svhadd_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_x))) +svint32_t svhadd_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_x))) +svint64_t svhadd_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_x))) +svint16_t svhadd_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_z))) +svint8_t svhadd_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_z))) +svint32_t svhadd_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_z))) +svint64_t svhadd_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_z))) +svint16_t svhadd_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_m))) +svuint8_t svhadd_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_m))) +svuint32_t svhadd_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_m))) +svuint64_t svhadd_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_m))) +svuint16_t svhadd_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_x))) +svuint8_t svhadd_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_x))) +svuint32_t svhadd_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_x))) +svuint64_t svhadd_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_x))) +svuint16_t svhadd_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_z))) +svuint8_t svhadd_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_z))) +svuint32_t svhadd_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_z))) +svuint64_t svhadd_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_z))) +svuint16_t svhadd_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_m))) +svint8_t svhadd_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_m))) +svint32_t svhadd_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_m))) +svint64_t svhadd_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_m))) +svint16_t svhadd_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_x))) +svint8_t svhadd_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_x))) +svint32_t svhadd_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_x))) +svint64_t svhadd_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_x))) +svint16_t svhadd_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_z))) +svint8_t svhadd_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_z))) +svint32_t svhadd_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_z))) +svint64_t svhadd_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_z))) +svint16_t svhadd_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_m))) +svuint8_t svhadd_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_m))) +svuint32_t svhadd_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_m))) +svuint64_t svhadd_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_m))) +svuint16_t svhadd_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_x))) +svuint8_t svhadd_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_x))) +svuint32_t svhadd_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_x))) +svuint64_t svhadd_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_x))) +svuint16_t svhadd_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_z))) +svuint8_t svhadd_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_z))) +svuint32_t svhadd_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_z))) +svuint64_t svhadd_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_z))) +svuint16_t svhadd_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_m))) +svint8_t svhsub_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_m))) +svint32_t svhsub_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_m))) +svint64_t svhsub_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_m))) +svint16_t svhsub_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_x))) +svint8_t svhsub_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_x))) +svint32_t svhsub_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_x))) +svint64_t svhsub_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_x))) +svint16_t svhsub_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_z))) +svint8_t svhsub_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_z))) +svint32_t svhsub_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_z))) +svint64_t svhsub_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_z))) +svint16_t svhsub_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_m))) +svuint8_t svhsub_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_m))) +svuint32_t svhsub_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_m))) +svuint64_t svhsub_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_m))) +svuint16_t svhsub_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_x))) +svuint8_t svhsub_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_x))) +svuint32_t svhsub_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_x))) +svuint64_t svhsub_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_x))) +svuint16_t svhsub_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_z))) +svuint8_t svhsub_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_z))) +svuint32_t svhsub_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_z))) +svuint64_t svhsub_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_z))) +svuint16_t svhsub_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_m))) +svint8_t svhsub_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_m))) +svint32_t svhsub_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_m))) +svint64_t svhsub_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_m))) +svint16_t svhsub_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_x))) +svint8_t svhsub_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_x))) +svint32_t svhsub_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_x))) +svint64_t svhsub_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_x))) +svint16_t svhsub_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_z))) +svint8_t svhsub_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_z))) +svint32_t svhsub_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_z))) +svint64_t svhsub_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_z))) +svint16_t svhsub_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_m))) +svuint8_t svhsub_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_m))) +svuint32_t svhsub_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_m))) +svuint64_t svhsub_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_m))) +svuint16_t svhsub_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_x))) +svuint8_t svhsub_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_x))) +svuint32_t svhsub_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_x))) +svuint64_t svhsub_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_x))) +svuint16_t svhsub_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_z))) +svuint8_t svhsub_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_z))) +svuint32_t svhsub_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_z))) +svuint64_t svhsub_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_z))) +svuint16_t svhsub_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_m))) +svint8_t svhsubr_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_m))) +svint32_t svhsubr_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_m))) +svint64_t svhsubr_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_m))) +svint16_t svhsubr_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_x))) +svint8_t svhsubr_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_x))) +svint32_t svhsubr_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_x))) +svint64_t svhsubr_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_x))) +svint16_t svhsubr_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_z))) +svint8_t svhsubr_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_z))) +svint32_t svhsubr_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_z))) +svint64_t svhsubr_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_z))) +svint16_t svhsubr_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_m))) +svuint8_t svhsubr_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_m))) +svuint32_t svhsubr_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_m))) +svuint64_t svhsubr_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_m))) +svuint16_t svhsubr_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_x))) +svuint8_t svhsubr_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_x))) +svuint32_t svhsubr_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_x))) +svuint64_t svhsubr_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_x))) +svuint16_t svhsubr_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_z))) +svuint8_t svhsubr_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_z))) +svuint32_t svhsubr_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_z))) +svuint64_t svhsubr_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_z))) +svuint16_t svhsubr_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_m))) +svint8_t svhsubr_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_m))) +svint32_t svhsubr_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_m))) +svint64_t svhsubr_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_m))) +svint16_t svhsubr_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_x))) +svint8_t svhsubr_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_x))) +svint32_t svhsubr_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_x))) +svint64_t svhsubr_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_x))) +svint16_t svhsubr_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_z))) +svint8_t svhsubr_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_z))) +svint32_t svhsubr_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_z))) +svint64_t svhsubr_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_z))) +svint16_t svhsubr_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_m))) +svuint8_t svhsubr_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_m))) +svuint32_t svhsubr_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_m))) +svuint64_t svhsubr_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_m))) +svuint16_t svhsubr_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_x))) +svuint8_t svhsubr_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_x))) +svuint32_t svhsubr_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_x))) +svuint64_t svhsubr_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_x))) +svuint16_t svhsubr_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_z))) +svuint8_t svhsubr_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_z))) +svuint32_t svhsubr_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_z))) +svuint64_t svhsubr_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_z))) +svuint16_t svhsubr_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_m))) +svint64_t svlogb_f64_m(svint64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_m))) +svint32_t svlogb_f32_m(svint32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_m))) +svint16_t svlogb_f16_m(svint16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_x))) +svint64_t svlogb_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_x))) +svint32_t svlogb_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_x))) +svint16_t svlogb_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_z))) +svint64_t svlogb_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_z))) +svint32_t svlogb_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_z))) +svint16_t svlogb_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f64_m))) +svfloat64_t svmaxnmp_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f32_m))) +svfloat32_t svmaxnmp_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f16_m))) +svfloat16_t svmaxnmp_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f64_x))) +svfloat64_t svmaxnmp_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f32_x))) +svfloat32_t svmaxnmp_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f16_x))) +svfloat16_t svmaxnmp_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f64_m))) +svfloat64_t svmaxp_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f32_m))) +svfloat32_t svmaxp_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f16_m))) +svfloat16_t svmaxp_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f64_x))) +svfloat64_t svmaxp_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f32_x))) +svfloat32_t svmaxp_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f16_x))) +svfloat16_t svmaxp_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s8_m))) +svint8_t svmaxp_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s32_m))) +svint32_t svmaxp_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s64_m))) +svint64_t svmaxp_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s16_m))) +svint16_t svmaxp_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s8_x))) +svint8_t svmaxp_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s32_x))) +svint32_t svmaxp_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s64_x))) +svint64_t svmaxp_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s16_x))) +svint16_t svmaxp_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u8_m))) +svuint8_t svmaxp_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u32_m))) +svuint32_t svmaxp_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u64_m))) +svuint64_t svmaxp_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u16_m))) +svuint16_t svmaxp_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u8_x))) +svuint8_t svmaxp_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u32_x))) +svuint32_t svmaxp_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u64_x))) +svuint64_t svmaxp_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u16_x))) +svuint16_t svmaxp_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f64_m))) +svfloat64_t svminnmp_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f32_m))) +svfloat32_t svminnmp_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f16_m))) +svfloat16_t svminnmp_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f64_x))) +svfloat64_t svminnmp_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f32_x))) +svfloat32_t svminnmp_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f16_x))) +svfloat16_t svminnmp_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f64_m))) +svfloat64_t svminp_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f32_m))) +svfloat32_t svminp_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f16_m))) +svfloat16_t svminp_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f64_x))) +svfloat64_t svminp_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f32_x))) +svfloat32_t svminp_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f16_x))) +svfloat16_t svminp_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s8_m))) +svint8_t svminp_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s32_m))) +svint32_t svminp_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s64_m))) +svint64_t svminp_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s16_m))) +svint16_t svminp_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s8_x))) +svint8_t svminp_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s32_x))) +svint32_t svminp_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s64_x))) +svint64_t svminp_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s16_x))) +svint16_t svminp_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u8_m))) +svuint8_t svminp_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u32_m))) +svuint32_t svminp_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u64_m))) +svuint64_t svminp_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u16_m))) +svuint16_t svminp_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u8_x))) +svuint8_t svminp_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u32_x))) +svuint32_t svminp_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u64_x))) +svuint64_t svminp_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u16_x))) +svuint16_t svminp_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u32))) +svuint32_t svmla_lane_u32(svuint32_t, svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u64))) +svuint64_t svmla_lane_u64(svuint64_t, svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u16))) +svuint16_t svmla_lane_u16(svuint16_t, svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s32))) +svint32_t svmla_lane_s32(svint32_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s64))) +svint64_t svmla_lane_s64(svint64_t, svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s16))) +svint16_t svmla_lane_s16(svint16_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_f32))) +svfloat32_t svmlalb_n_f32(svfloat32_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s32))) +svint32_t svmlalb_n_s32(svint32_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s64))) +svint64_t svmlalb_n_s64(svint64_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s16))) +svint16_t svmlalb_n_s16(svint16_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u32))) +svuint32_t svmlalb_n_u32(svuint32_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u64))) +svuint64_t svmlalb_n_u64(svuint64_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u16))) +svuint16_t svmlalb_n_u16(svuint16_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_f32))) +svfloat32_t svmlalb_f32(svfloat32_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s32))) +svint32_t svmlalb_s32(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s64))) +svint64_t svmlalb_s64(svint64_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s16))) +svint16_t svmlalb_s16(svint16_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u32))) +svuint32_t svmlalb_u32(svuint32_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u64))) +svuint64_t svmlalb_u64(svuint64_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u16))) +svuint16_t svmlalb_u16(svuint16_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_f32))) +svfloat32_t svmlalb_lane_f32(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_s32))) +svint32_t svmlalb_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_s64))) +svint64_t svmlalb_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_u32))) +svuint32_t svmlalb_lane_u32(svuint32_t, svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_u64))) +svuint64_t svmlalb_lane_u64(svuint64_t, svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_f32))) +svfloat32_t svmlalt_n_f32(svfloat32_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s32))) +svint32_t svmlalt_n_s32(svint32_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s64))) +svint64_t svmlalt_n_s64(svint64_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s16))) +svint16_t svmlalt_n_s16(svint16_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u32))) +svuint32_t svmlalt_n_u32(svuint32_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u64))) +svuint64_t svmlalt_n_u64(svuint64_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u16))) +svuint16_t svmlalt_n_u16(svuint16_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_f32))) +svfloat32_t svmlalt_f32(svfloat32_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s32))) +svint32_t svmlalt_s32(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s64))) +svint64_t svmlalt_s64(svint64_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s16))) +svint16_t svmlalt_s16(svint16_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u32))) +svuint32_t svmlalt_u32(svuint32_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u64))) +svuint64_t svmlalt_u64(svuint64_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u16))) +svuint16_t svmlalt_u16(svuint16_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_f32))) +svfloat32_t svmlalt_lane_f32(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_s32))) +svint32_t svmlalt_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_s64))) +svint64_t svmlalt_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_u32))) +svuint32_t svmlalt_lane_u32(svuint32_t, svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_u64))) +svuint64_t svmlalt_lane_u64(svuint64_t, svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u32))) +svuint32_t svmls_lane_u32(svuint32_t, svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u64))) +svuint64_t svmls_lane_u64(svuint64_t, svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u16))) +svuint16_t svmls_lane_u16(svuint16_t, svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s32))) +svint32_t svmls_lane_s32(svint32_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s64))) +svint64_t svmls_lane_s64(svint64_t, svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s16))) +svint16_t svmls_lane_s16(svint16_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_f32))) +svfloat32_t svmlslb_n_f32(svfloat32_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s32))) +svint32_t svmlslb_n_s32(svint32_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s64))) +svint64_t svmlslb_n_s64(svint64_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s16))) +svint16_t svmlslb_n_s16(svint16_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u32))) +svuint32_t svmlslb_n_u32(svuint32_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u64))) +svuint64_t svmlslb_n_u64(svuint64_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u16))) +svuint16_t svmlslb_n_u16(svuint16_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_f32))) +svfloat32_t svmlslb_f32(svfloat32_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s32))) +svint32_t svmlslb_s32(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s64))) +svint64_t svmlslb_s64(svint64_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s16))) +svint16_t svmlslb_s16(svint16_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u32))) +svuint32_t svmlslb_u32(svuint32_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u64))) +svuint64_t svmlslb_u64(svuint64_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u16))) +svuint16_t svmlslb_u16(svuint16_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_f32))) +svfloat32_t svmlslb_lane_f32(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_s32))) +svint32_t svmlslb_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_s64))) +svint64_t svmlslb_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_u32))) +svuint32_t svmlslb_lane_u32(svuint32_t, svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_u64))) +svuint64_t svmlslb_lane_u64(svuint64_t, svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_f32))) +svfloat32_t svmlslt_n_f32(svfloat32_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s32))) +svint32_t svmlslt_n_s32(svint32_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s64))) +svint64_t svmlslt_n_s64(svint64_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s16))) +svint16_t svmlslt_n_s16(svint16_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u32))) +svuint32_t svmlslt_n_u32(svuint32_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u64))) +svuint64_t svmlslt_n_u64(svuint64_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u16))) +svuint16_t svmlslt_n_u16(svuint16_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_f32))) +svfloat32_t svmlslt_f32(svfloat32_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s32))) +svint32_t svmlslt_s32(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s64))) +svint64_t svmlslt_s64(svint64_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s16))) +svint16_t svmlslt_s16(svint16_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u32))) +svuint32_t svmlslt_u32(svuint32_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u64))) +svuint64_t svmlslt_u64(svuint64_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u16))) +svuint16_t svmlslt_u16(svuint16_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_f32))) +svfloat32_t svmlslt_lane_f32(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_s32))) +svint32_t svmlslt_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_s64))) +svint64_t svmlslt_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_u32))) +svuint32_t svmlslt_lane_u32(svuint32_t, svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_u64))) +svuint64_t svmlslt_lane_u64(svuint64_t, svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s32))) +svint32_t svmovlb_s32(svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s64))) +svint64_t svmovlb_s64(svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s16))) +svint16_t svmovlb_s16(svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u32))) +svuint32_t svmovlb_u32(svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u64))) +svuint64_t svmovlb_u64(svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u16))) +svuint16_t svmovlb_u16(svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s32))) +svint32_t svmovlt_s32(svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s64))) +svint64_t svmovlt_s64(svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s16))) +svint16_t svmovlt_s16(svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u32))) +svuint32_t svmovlt_u32(svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u64))) +svuint64_t svmovlt_u64(svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u16))) +svuint16_t svmovlt_u16(svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u32))) +svuint32_t svmul_lane_u32(svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u64))) +svuint64_t svmul_lane_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u16))) +svuint16_t svmul_lane_u16(svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s32))) +svint32_t svmul_lane_s32(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s64))) +svint64_t svmul_lane_s64(svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s16))) +svint16_t svmul_lane_s16(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s32))) +svint32_t svmullb_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s64))) +svint64_t svmullb_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s16))) +svint16_t svmullb_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u32))) +svuint32_t svmullb_n_u32(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u64))) +svuint64_t svmullb_n_u64(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u16))) +svuint16_t svmullb_n_u16(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s32))) +svint32_t svmullb_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s64))) +svint64_t svmullb_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s16))) +svint16_t svmullb_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u32))) +svuint32_t svmullb_u32(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u64))) +svuint64_t svmullb_u64(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u16))) +svuint16_t svmullb_u16(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_s32))) +svint32_t svmullb_lane_s32(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_s64))) +svint64_t svmullb_lane_s64(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_u32))) +svuint32_t svmullb_lane_u32(svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_u64))) +svuint64_t svmullb_lane_u64(svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s32))) +svint32_t svmullt_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s64))) +svint64_t svmullt_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s16))) +svint16_t svmullt_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u32))) +svuint32_t svmullt_n_u32(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u64))) +svuint64_t svmullt_n_u64(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u16))) +svuint16_t svmullt_n_u16(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s32))) +svint32_t svmullt_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s64))) +svint64_t svmullt_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s16))) +svint16_t svmullt_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u32))) +svuint32_t svmullt_u32(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u64))) +svuint64_t svmullt_u64(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u16))) +svuint16_t svmullt_u16(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_s32))) +svint32_t svmullt_lane_s32(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_s64))) +svint64_t svmullt_lane_s64(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_u32))) +svuint32_t svmullt_lane_u32(svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_u64))) +svuint64_t svmullt_lane_u64(svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u8))) +svuint8_t svnbsl_n_u8(svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u32))) +svuint32_t svnbsl_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u64))) +svuint64_t svnbsl_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u16))) +svuint16_t svnbsl_n_u16(svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s8))) +svint8_t svnbsl_n_s8(svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s32))) +svint32_t svnbsl_n_s32(svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s64))) +svint64_t svnbsl_n_s64(svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s16))) +svint16_t svnbsl_n_s16(svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u8))) +svuint8_t svnbsl_u8(svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u32))) +svuint32_t svnbsl_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u64))) +svuint64_t svnbsl_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u16))) +svuint16_t svnbsl_u16(svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s8))) +svint8_t svnbsl_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s32))) +svint32_t svnbsl_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s64))) +svint64_t svnbsl_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s16))) +svint16_t svnbsl_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmul_n_u8))) +svuint8_t svpmul_n_u8(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmul_u8))) +svuint8_t svpmul_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_n_u64))) +svuint64_t svpmullb_n_u64(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_n_u16))) +svuint16_t svpmullb_n_u16(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_u64))) +svuint64_t svpmullb_u64(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_u16))) +svuint16_t svpmullb_u16(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u8))) +svuint8_t svpmullb_pair_n_u8(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u32))) +svuint32_t svpmullb_pair_n_u32(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u8))) +svuint8_t svpmullb_pair_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u32))) +svuint32_t svpmullb_pair_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_n_u64))) +svuint64_t svpmullt_n_u64(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_n_u16))) +svuint16_t svpmullt_n_u16(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_u64))) +svuint64_t svpmullt_u64(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_u16))) +svuint16_t svpmullt_u16(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u8))) +svuint8_t svpmullt_pair_n_u8(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u32))) +svuint32_t svpmullt_pair_n_u32(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u8))) +svuint8_t svpmullt_pair_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u32))) +svuint32_t svpmullt_pair_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_m))) +svint8_t svqabs_s8_m(svint8_t, svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_m))) +svint32_t svqabs_s32_m(svint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_m))) +svint64_t svqabs_s64_m(svint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_m))) +svint16_t svqabs_s16_m(svint16_t, svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_x))) +svint8_t svqabs_s8_x(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_x))) +svint32_t svqabs_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_x))) +svint64_t svqabs_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_x))) +svint16_t svqabs_s16_x(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_z))) +svint8_t svqabs_s8_z(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_z))) +svint32_t svqabs_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_z))) +svint64_t svqabs_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_z))) +svint16_t svqabs_s16_z(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_m))) +svint8_t svqadd_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_m))) +svint32_t svqadd_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_m))) +svint64_t svqadd_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_m))) +svint16_t svqadd_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_x))) +svint8_t svqadd_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_x))) +svint32_t svqadd_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_x))) +svint64_t svqadd_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_x))) +svint16_t svqadd_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_z))) +svint8_t svqadd_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_z))) +svint32_t svqadd_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_z))) +svint64_t svqadd_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_z))) +svint16_t svqadd_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_m))) +svuint8_t svqadd_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_m))) +svuint32_t svqadd_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_m))) +svuint64_t svqadd_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_m))) +svuint16_t svqadd_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_x))) +svuint8_t svqadd_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_x))) +svuint32_t svqadd_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_x))) +svuint64_t svqadd_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_x))) +svuint16_t svqadd_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_z))) +svuint8_t svqadd_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_z))) +svuint32_t svqadd_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_z))) +svuint64_t svqadd_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_z))) +svuint16_t svqadd_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_m))) +svint8_t svqadd_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_m))) +svint32_t svqadd_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_m))) +svint64_t svqadd_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_m))) +svint16_t svqadd_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_x))) +svint8_t svqadd_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_x))) +svint32_t svqadd_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_x))) +svint64_t svqadd_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_x))) +svint16_t svqadd_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_z))) +svint8_t svqadd_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_z))) +svint32_t svqadd_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_z))) +svint64_t svqadd_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_z))) +svint16_t svqadd_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_m))) +svuint8_t svqadd_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_m))) +svuint32_t svqadd_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_m))) +svuint64_t svqadd_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_m))) +svuint16_t svqadd_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_x))) +svuint8_t svqadd_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_x))) +svuint32_t svqadd_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_x))) +svuint64_t svqadd_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_x))) +svuint16_t svqadd_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_z))) +svuint8_t svqadd_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_z))) +svuint32_t svqadd_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_z))) +svuint64_t svqadd_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_z))) +svuint16_t svqadd_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s8))) +svint8_t svqcadd_s8(svint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s32))) +svint32_t svqcadd_s32(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s64))) +svint64_t svqcadd_s64(svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s16))) +svint16_t svqcadd_s16(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s32))) +svint32_t svqdmlalb_n_s32(svint32_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s64))) +svint64_t svqdmlalb_n_s64(svint64_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s16))) +svint16_t svqdmlalb_n_s16(svint16_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s32))) +svint32_t svqdmlalb_s32(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s64))) +svint64_t svqdmlalb_s64(svint64_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s16))) +svint16_t svqdmlalb_s16(svint16_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_lane_s32))) +svint32_t svqdmlalb_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_lane_s64))) +svint64_t svqdmlalb_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s32))) +svint32_t svqdmlalbt_n_s32(svint32_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s64))) +svint64_t svqdmlalbt_n_s64(svint64_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s16))) +svint16_t svqdmlalbt_n_s16(svint16_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s32))) +svint32_t svqdmlalbt_s32(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s64))) +svint64_t svqdmlalbt_s64(svint64_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s16))) +svint16_t svqdmlalbt_s16(svint16_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s32))) +svint32_t svqdmlalt_n_s32(svint32_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s64))) +svint64_t svqdmlalt_n_s64(svint64_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s16))) +svint16_t svqdmlalt_n_s16(svint16_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s32))) +svint32_t svqdmlalt_s32(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s64))) +svint64_t svqdmlalt_s64(svint64_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s16))) +svint16_t svqdmlalt_s16(svint16_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_lane_s32))) +svint32_t svqdmlalt_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_lane_s64))) +svint64_t svqdmlalt_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s32))) +svint32_t svqdmlslb_n_s32(svint32_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s64))) +svint64_t svqdmlslb_n_s64(svint64_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s16))) +svint16_t svqdmlslb_n_s16(svint16_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s32))) +svint32_t svqdmlslb_s32(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s64))) +svint64_t svqdmlslb_s64(svint64_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s16))) +svint16_t svqdmlslb_s16(svint16_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_lane_s32))) +svint32_t svqdmlslb_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_lane_s64))) +svint64_t svqdmlslb_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s32))) +svint32_t svqdmlslbt_n_s32(svint32_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s64))) +svint64_t svqdmlslbt_n_s64(svint64_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s16))) +svint16_t svqdmlslbt_n_s16(svint16_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s32))) +svint32_t svqdmlslbt_s32(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s64))) +svint64_t svqdmlslbt_s64(svint64_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s16))) +svint16_t svqdmlslbt_s16(svint16_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s32))) +svint32_t svqdmlslt_n_s32(svint32_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s64))) +svint64_t svqdmlslt_n_s64(svint64_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s16))) +svint16_t svqdmlslt_n_s16(svint16_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s32))) +svint32_t svqdmlslt_s32(svint32_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s64))) +svint64_t svqdmlslt_s64(svint64_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s16))) +svint16_t svqdmlslt_s16(svint16_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_lane_s32))) +svint32_t svqdmlslt_lane_s32(svint32_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_lane_s64))) +svint64_t svqdmlslt_lane_s64(svint64_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s8))) +svint8_t svqdmulh_n_s8(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s32))) +svint32_t svqdmulh_n_s32(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s64))) +svint64_t svqdmulh_n_s64(svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s16))) +svint16_t svqdmulh_n_s16(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s8))) +svint8_t svqdmulh_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s32))) +svint32_t svqdmulh_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s64))) +svint64_t svqdmulh_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s16))) +svint16_t svqdmulh_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s32))) +svint32_t svqdmulh_lane_s32(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s64))) +svint64_t svqdmulh_lane_s64(svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s16))) +svint16_t svqdmulh_lane_s16(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s32))) +svint32_t svqdmullb_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s64))) +svint64_t svqdmullb_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s16))) +svint16_t svqdmullb_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s32))) +svint32_t svqdmullb_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s64))) +svint64_t svqdmullb_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s16))) +svint16_t svqdmullb_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_lane_s32))) +svint32_t svqdmullb_lane_s32(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_lane_s64))) +svint64_t svqdmullb_lane_s64(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s32))) +svint32_t svqdmullt_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s64))) +svint64_t svqdmullt_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s16))) +svint16_t svqdmullt_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s32))) +svint32_t svqdmullt_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s64))) +svint64_t svqdmullt_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s16))) +svint16_t svqdmullt_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_lane_s32))) +svint32_t svqdmullt_lane_s32(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_lane_s64))) +svint64_t svqdmullt_lane_s64(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_m))) +svint8_t svqneg_s8_m(svint8_t, svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_m))) +svint32_t svqneg_s32_m(svint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_m))) +svint64_t svqneg_s64_m(svint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_m))) +svint16_t svqneg_s16_m(svint16_t, svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_x))) +svint8_t svqneg_s8_x(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_x))) +svint32_t svqneg_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_x))) +svint64_t svqneg_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_x))) +svint16_t svqneg_s16_x(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_z))) +svint8_t svqneg_s8_z(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_z))) +svint32_t svqneg_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_z))) +svint64_t svqneg_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_z))) +svint16_t svqneg_s16_z(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s8))) +svint8_t svqrdcmlah_s8(svint8_t, svint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s32))) +svint32_t svqrdcmlah_s32(svint32_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s64))) +svint64_t svqrdcmlah_s64(svint64_t, svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s16))) +svint16_t svqrdcmlah_s16(svint16_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_lane_s32))) +svint32_t svqrdcmlah_lane_s32(svint32_t, svint32_t, svint32_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_lane_s16))) +svint16_t svqrdcmlah_lane_s16(svint16_t, svint16_t, svint16_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s8))) +svint8_t svqrdmlah_n_s8(svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s32))) +svint32_t svqrdmlah_n_s32(svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s64))) +svint64_t svqrdmlah_n_s64(svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s16))) +svint16_t svqrdmlah_n_s16(svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s8))) +svint8_t svqrdmlah_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s32))) +svint32_t svqrdmlah_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s64))) +svint64_t svqrdmlah_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s16))) +svint16_t svqrdmlah_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s32))) +svint32_t svqrdmlah_lane_s32(svint32_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s64))) +svint64_t svqrdmlah_lane_s64(svint64_t, svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s16))) +svint16_t svqrdmlah_lane_s16(svint16_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s8))) +svint8_t svqrdmlsh_n_s8(svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s32))) +svint32_t svqrdmlsh_n_s32(svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s64))) +svint64_t svqrdmlsh_n_s64(svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s16))) +svint16_t svqrdmlsh_n_s16(svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s8))) +svint8_t svqrdmlsh_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s32))) +svint32_t svqrdmlsh_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s64))) +svint64_t svqrdmlsh_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s16))) +svint16_t svqrdmlsh_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s32))) +svint32_t svqrdmlsh_lane_s32(svint32_t, svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s64))) +svint64_t svqrdmlsh_lane_s64(svint64_t, svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s16))) +svint16_t svqrdmlsh_lane_s16(svint16_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s8))) +svint8_t svqrdmulh_n_s8(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s32))) +svint32_t svqrdmulh_n_s32(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s64))) +svint64_t svqrdmulh_n_s64(svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s16))) +svint16_t svqrdmulh_n_s16(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s8))) +svint8_t svqrdmulh_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s32))) +svint32_t svqrdmulh_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s64))) +svint64_t svqrdmulh_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s16))) +svint16_t svqrdmulh_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s32))) +svint32_t svqrdmulh_lane_s32(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s64))) +svint64_t svqrdmulh_lane_s64(svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s16))) +svint16_t svqrdmulh_lane_s16(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_m))) +svint8_t svqrshl_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_m))) +svint32_t svqrshl_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_m))) +svint64_t svqrshl_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_m))) +svint16_t svqrshl_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_x))) +svint8_t svqrshl_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_x))) +svint32_t svqrshl_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_x))) +svint64_t svqrshl_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_x))) +svint16_t svqrshl_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_z))) +svint8_t svqrshl_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_z))) +svint32_t svqrshl_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_z))) +svint64_t svqrshl_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_z))) +svint16_t svqrshl_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_m))) +svuint8_t svqrshl_n_u8_m(svbool_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_m))) +svuint32_t svqrshl_n_u32_m(svbool_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_m))) +svuint64_t svqrshl_n_u64_m(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_m))) +svuint16_t svqrshl_n_u16_m(svbool_t, svuint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_x))) +svuint8_t svqrshl_n_u8_x(svbool_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_x))) +svuint32_t svqrshl_n_u32_x(svbool_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_x))) +svuint64_t svqrshl_n_u64_x(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_x))) +svuint16_t svqrshl_n_u16_x(svbool_t, svuint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_z))) +svuint8_t svqrshl_n_u8_z(svbool_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_z))) +svuint32_t svqrshl_n_u32_z(svbool_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_z))) +svuint64_t svqrshl_n_u64_z(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_z))) +svuint16_t svqrshl_n_u16_z(svbool_t, svuint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_m))) +svint8_t svqrshl_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_m))) +svint32_t svqrshl_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_m))) +svint64_t svqrshl_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_m))) +svint16_t svqrshl_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_x))) +svint8_t svqrshl_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_x))) +svint32_t svqrshl_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_x))) +svint64_t svqrshl_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_x))) +svint16_t svqrshl_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_z))) +svint8_t svqrshl_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_z))) +svint32_t svqrshl_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_z))) +svint64_t svqrshl_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_z))) +svint16_t svqrshl_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_m))) +svuint8_t svqrshl_u8_m(svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_m))) +svuint32_t svqrshl_u32_m(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_m))) +svuint64_t svqrshl_u64_m(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_m))) +svuint16_t svqrshl_u16_m(svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_x))) +svuint8_t svqrshl_u8_x(svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_x))) +svuint32_t svqrshl_u32_x(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_x))) +svuint64_t svqrshl_u64_x(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_x))) +svuint16_t svqrshl_u16_x(svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_z))) +svuint8_t svqrshl_u8_z(svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_z))) +svuint32_t svqrshl_u32_z(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_z))) +svuint64_t svqrshl_u64_z(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_z))) +svuint16_t svqrshl_u16_z(svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s32))) +svint16_t svqrshrnb_n_s32(svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s64))) +svint32_t svqrshrnb_n_s64(svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s16))) +svint8_t svqrshrnb_n_s16(svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u32))) +svuint16_t svqrshrnb_n_u32(svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u64))) +svuint32_t svqrshrnb_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u16))) +svuint8_t svqrshrnb_n_u16(svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s32))) +svint16_t svqrshrnt_n_s32(svint16_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s64))) +svint32_t svqrshrnt_n_s64(svint32_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s16))) +svint8_t svqrshrnt_n_s16(svint8_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u32))) +svuint16_t svqrshrnt_n_u32(svuint16_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u64))) +svuint32_t svqrshrnt_n_u64(svuint32_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u16))) +svuint8_t svqrshrnt_n_u16(svuint8_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s32))) +svuint16_t svqrshrunb_n_s32(svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s64))) +svuint32_t svqrshrunb_n_s64(svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s16))) +svuint8_t svqrshrunb_n_s16(svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s32))) +svuint16_t svqrshrunt_n_s32(svuint16_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s64))) +svuint32_t svqrshrunt_n_s64(svuint32_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s16))) +svuint8_t svqrshrunt_n_s16(svuint8_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_m))) +svint8_t svqshl_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_m))) +svint32_t svqshl_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_m))) +svint64_t svqshl_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_m))) +svint16_t svqshl_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_x))) +svint8_t svqshl_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_x))) +svint32_t svqshl_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_x))) +svint64_t svqshl_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_x))) +svint16_t svqshl_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_z))) +svint8_t svqshl_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_z))) +svint32_t svqshl_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_z))) +svint64_t svqshl_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_z))) +svint16_t svqshl_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_m))) +svuint8_t svqshl_n_u8_m(svbool_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_m))) +svuint32_t svqshl_n_u32_m(svbool_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_m))) +svuint64_t svqshl_n_u64_m(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_m))) +svuint16_t svqshl_n_u16_m(svbool_t, svuint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_x))) +svuint8_t svqshl_n_u8_x(svbool_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_x))) +svuint32_t svqshl_n_u32_x(svbool_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_x))) +svuint64_t svqshl_n_u64_x(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_x))) +svuint16_t svqshl_n_u16_x(svbool_t, svuint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_z))) +svuint8_t svqshl_n_u8_z(svbool_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_z))) +svuint32_t svqshl_n_u32_z(svbool_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_z))) +svuint64_t svqshl_n_u64_z(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_z))) +svuint16_t svqshl_n_u16_z(svbool_t, svuint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_m))) +svint8_t svqshl_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_m))) +svint32_t svqshl_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_m))) +svint64_t svqshl_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_m))) +svint16_t svqshl_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_x))) +svint8_t svqshl_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_x))) +svint32_t svqshl_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_x))) +svint64_t svqshl_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_x))) +svint16_t svqshl_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_z))) +svint8_t svqshl_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_z))) +svint32_t svqshl_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_z))) +svint64_t svqshl_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_z))) +svint16_t svqshl_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_m))) +svuint8_t svqshl_u8_m(svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_m))) +svuint32_t svqshl_u32_m(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_m))) +svuint64_t svqshl_u64_m(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_m))) +svuint16_t svqshl_u16_m(svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_x))) +svuint8_t svqshl_u8_x(svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_x))) +svuint32_t svqshl_u32_x(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_x))) +svuint64_t svqshl_u64_x(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_x))) +svuint16_t svqshl_u16_x(svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_z))) +svuint8_t svqshl_u8_z(svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_z))) +svuint32_t svqshl_u32_z(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_z))) +svuint64_t svqshl_u64_z(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_z))) +svuint16_t svqshl_u16_z(svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_m))) +svuint8_t svqshlu_n_s8_m(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_m))) +svuint32_t svqshlu_n_s32_m(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_m))) +svuint64_t svqshlu_n_s64_m(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_m))) +svuint16_t svqshlu_n_s16_m(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_x))) +svuint8_t svqshlu_n_s8_x(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_x))) +svuint32_t svqshlu_n_s32_x(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_x))) +svuint64_t svqshlu_n_s64_x(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_x))) +svuint16_t svqshlu_n_s16_x(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_z))) +svuint8_t svqshlu_n_s8_z(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_z))) +svuint32_t svqshlu_n_s32_z(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_z))) +svuint64_t svqshlu_n_s64_z(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_z))) +svuint16_t svqshlu_n_s16_z(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s32))) +svint16_t svqshrnb_n_s32(svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s64))) +svint32_t svqshrnb_n_s64(svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s16))) +svint8_t svqshrnb_n_s16(svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u32))) +svuint16_t svqshrnb_n_u32(svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u64))) +svuint32_t svqshrnb_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u16))) +svuint8_t svqshrnb_n_u16(svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s32))) +svint16_t svqshrnt_n_s32(svint16_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s64))) +svint32_t svqshrnt_n_s64(svint32_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s16))) +svint8_t svqshrnt_n_s16(svint8_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u32))) +svuint16_t svqshrnt_n_u32(svuint16_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u64))) +svuint32_t svqshrnt_n_u64(svuint32_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u16))) +svuint8_t svqshrnt_n_u16(svuint8_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s32))) +svuint16_t svqshrunb_n_s32(svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s64))) +svuint32_t svqshrunb_n_s64(svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s16))) +svuint8_t svqshrunb_n_s16(svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s32))) +svuint16_t svqshrunt_n_s32(svuint16_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s64))) +svuint32_t svqshrunt_n_s64(svuint32_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s16))) +svuint8_t svqshrunt_n_s16(svuint8_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_m))) +svint8_t svqsub_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_m))) +svint32_t svqsub_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_m))) +svint64_t svqsub_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_m))) +svint16_t svqsub_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_x))) +svint8_t svqsub_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_x))) +svint32_t svqsub_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_x))) +svint64_t svqsub_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_x))) +svint16_t svqsub_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_z))) +svint8_t svqsub_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_z))) +svint32_t svqsub_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_z))) +svint64_t svqsub_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_z))) +svint16_t svqsub_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_m))) +svuint8_t svqsub_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_m))) +svuint32_t svqsub_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_m))) +svuint64_t svqsub_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_m))) +svuint16_t svqsub_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_x))) +svuint8_t svqsub_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_x))) +svuint32_t svqsub_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_x))) +svuint64_t svqsub_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_x))) +svuint16_t svqsub_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_z))) +svuint8_t svqsub_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_z))) +svuint32_t svqsub_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_z))) +svuint64_t svqsub_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_z))) +svuint16_t svqsub_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_m))) +svint8_t svqsub_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_m))) +svint32_t svqsub_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_m))) +svint64_t svqsub_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_m))) +svint16_t svqsub_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_x))) +svint8_t svqsub_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_x))) +svint32_t svqsub_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_x))) +svint64_t svqsub_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_x))) +svint16_t svqsub_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_z))) +svint8_t svqsub_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_z))) +svint32_t svqsub_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_z))) +svint64_t svqsub_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_z))) +svint16_t svqsub_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_m))) +svuint8_t svqsub_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_m))) +svuint32_t svqsub_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_m))) +svuint64_t svqsub_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_m))) +svuint16_t svqsub_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_x))) +svuint8_t svqsub_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_x))) +svuint32_t svqsub_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_x))) +svuint64_t svqsub_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_x))) +svuint16_t svqsub_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_z))) +svuint8_t svqsub_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_z))) +svuint32_t svqsub_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_z))) +svuint64_t svqsub_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_z))) +svuint16_t svqsub_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_m))) +svint8_t svqsubr_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_m))) +svint32_t svqsubr_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_m))) +svint64_t svqsubr_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_m))) +svint16_t svqsubr_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_x))) +svint8_t svqsubr_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_x))) +svint32_t svqsubr_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_x))) +svint64_t svqsubr_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_x))) +svint16_t svqsubr_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_z))) +svint8_t svqsubr_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_z))) +svint32_t svqsubr_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_z))) +svint64_t svqsubr_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_z))) +svint16_t svqsubr_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_m))) +svuint8_t svqsubr_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_m))) +svuint32_t svqsubr_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_m))) +svuint64_t svqsubr_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_m))) +svuint16_t svqsubr_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_x))) +svuint8_t svqsubr_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_x))) +svuint32_t svqsubr_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_x))) +svuint64_t svqsubr_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_x))) +svuint16_t svqsubr_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_z))) +svuint8_t svqsubr_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_z))) +svuint32_t svqsubr_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_z))) +svuint64_t svqsubr_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_z))) +svuint16_t svqsubr_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_m))) +svint8_t svqsubr_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_m))) +svint32_t svqsubr_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_m))) +svint64_t svqsubr_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_m))) +svint16_t svqsubr_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_x))) +svint8_t svqsubr_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_x))) +svint32_t svqsubr_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_x))) +svint64_t svqsubr_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_x))) +svint16_t svqsubr_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_z))) +svint8_t svqsubr_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_z))) +svint32_t svqsubr_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_z))) +svint64_t svqsubr_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_z))) +svint16_t svqsubr_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_m))) +svuint8_t svqsubr_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_m))) +svuint32_t svqsubr_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_m))) +svuint64_t svqsubr_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_m))) +svuint16_t svqsubr_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_x))) +svuint8_t svqsubr_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_x))) +svuint32_t svqsubr_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_x))) +svuint64_t svqsubr_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_x))) +svuint16_t svqsubr_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_z))) +svuint8_t svqsubr_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_z))) +svuint32_t svqsubr_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_z))) +svuint64_t svqsubr_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_z))) +svuint16_t svqsubr_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s32))) +svint16_t svqxtnb_s32(svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s64))) +svint32_t svqxtnb_s64(svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s16))) +svint8_t svqxtnb_s16(svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u32))) +svuint16_t svqxtnb_u32(svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u64))) +svuint32_t svqxtnb_u64(svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u16))) +svuint8_t svqxtnb_u16(svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s32))) +svint16_t svqxtnt_s32(svint16_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s64))) +svint32_t svqxtnt_s64(svint32_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s16))) +svint8_t svqxtnt_s16(svint8_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u32))) +svuint16_t svqxtnt_u32(svuint16_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u64))) +svuint32_t svqxtnt_u64(svuint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u16))) +svuint8_t svqxtnt_u16(svuint8_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s32))) +svuint16_t svqxtunb_s32(svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s64))) +svuint32_t svqxtunb_s64(svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s16))) +svuint8_t svqxtunb_s16(svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s32))) +svuint16_t svqxtunt_s32(svuint16_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s64))) +svuint32_t svqxtunt_s64(svuint32_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s16))) +svuint8_t svqxtunt_s16(svuint8_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u32))) +svuint16_t svraddhnb_n_u32(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u64))) +svuint32_t svraddhnb_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u16))) +svuint8_t svraddhnb_n_u16(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s32))) +svint16_t svraddhnb_n_s32(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s64))) +svint32_t svraddhnb_n_s64(svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s16))) +svint8_t svraddhnb_n_s16(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u32))) +svuint16_t svraddhnb_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u64))) +svuint32_t svraddhnb_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u16))) +svuint8_t svraddhnb_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s32))) +svint16_t svraddhnb_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s64))) +svint32_t svraddhnb_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s16))) +svint8_t svraddhnb_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u32))) +svuint16_t svraddhnt_n_u32(svuint16_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u64))) +svuint32_t svraddhnt_n_u64(svuint32_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u16))) +svuint8_t svraddhnt_n_u16(svuint8_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s32))) +svint16_t svraddhnt_n_s32(svint16_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s64))) +svint32_t svraddhnt_n_s64(svint32_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s16))) +svint8_t svraddhnt_n_s16(svint8_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u32))) +svuint16_t svraddhnt_u32(svuint16_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u64))) +svuint32_t svraddhnt_u64(svuint32_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u16))) +svuint8_t svraddhnt_u16(svuint8_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s32))) +svint16_t svraddhnt_s32(svint16_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s64))) +svint32_t svraddhnt_s64(svint32_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s16))) +svint8_t svraddhnt_s16(svint8_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_m))) +svuint32_t svrecpe_u32_m(svuint32_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_x))) +svuint32_t svrecpe_u32_x(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_z))) +svuint32_t svrecpe_u32_z(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_m))) +svint8_t svrhadd_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_m))) +svint32_t svrhadd_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_m))) +svint64_t svrhadd_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_m))) +svint16_t svrhadd_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_x))) +svint8_t svrhadd_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_x))) +svint32_t svrhadd_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_x))) +svint64_t svrhadd_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_x))) +svint16_t svrhadd_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_z))) +svint8_t svrhadd_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_z))) +svint32_t svrhadd_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_z))) +svint64_t svrhadd_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_z))) +svint16_t svrhadd_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_m))) +svuint8_t svrhadd_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_m))) +svuint32_t svrhadd_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_m))) +svuint64_t svrhadd_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_m))) +svuint16_t svrhadd_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_x))) +svuint8_t svrhadd_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_x))) +svuint32_t svrhadd_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_x))) +svuint64_t svrhadd_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_x))) +svuint16_t svrhadd_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_z))) +svuint8_t svrhadd_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_z))) +svuint32_t svrhadd_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_z))) +svuint64_t svrhadd_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_z))) +svuint16_t svrhadd_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_m))) +svint8_t svrhadd_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_m))) +svint32_t svrhadd_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_m))) +svint64_t svrhadd_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_m))) +svint16_t svrhadd_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_x))) +svint8_t svrhadd_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_x))) +svint32_t svrhadd_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_x))) +svint64_t svrhadd_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_x))) +svint16_t svrhadd_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_z))) +svint8_t svrhadd_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_z))) +svint32_t svrhadd_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_z))) +svint64_t svrhadd_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_z))) +svint16_t svrhadd_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_m))) +svuint8_t svrhadd_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_m))) +svuint32_t svrhadd_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_m))) +svuint64_t svrhadd_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_m))) +svuint16_t svrhadd_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_x))) +svuint8_t svrhadd_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_x))) +svuint32_t svrhadd_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_x))) +svuint64_t svrhadd_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_x))) +svuint16_t svrhadd_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_z))) +svuint8_t svrhadd_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_z))) +svuint32_t svrhadd_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_z))) +svuint64_t svrhadd_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_z))) +svuint16_t svrhadd_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_m))) +svint8_t svrshl_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_m))) +svint32_t svrshl_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_m))) +svint64_t svrshl_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_m))) +svint16_t svrshl_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_x))) +svint8_t svrshl_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_x))) +svint32_t svrshl_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_x))) +svint64_t svrshl_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_x))) +svint16_t svrshl_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_z))) +svint8_t svrshl_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_z))) +svint32_t svrshl_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_z))) +svint64_t svrshl_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_z))) +svint16_t svrshl_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_m))) +svuint8_t svrshl_n_u8_m(svbool_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_m))) +svuint32_t svrshl_n_u32_m(svbool_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_m))) +svuint64_t svrshl_n_u64_m(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_m))) +svuint16_t svrshl_n_u16_m(svbool_t, svuint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_x))) +svuint8_t svrshl_n_u8_x(svbool_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_x))) +svuint32_t svrshl_n_u32_x(svbool_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_x))) +svuint64_t svrshl_n_u64_x(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_x))) +svuint16_t svrshl_n_u16_x(svbool_t, svuint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_z))) +svuint8_t svrshl_n_u8_z(svbool_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_z))) +svuint32_t svrshl_n_u32_z(svbool_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_z))) +svuint64_t svrshl_n_u64_z(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_z))) +svuint16_t svrshl_n_u16_z(svbool_t, svuint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_m))) +svint8_t svrshl_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_m))) +svint32_t svrshl_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_m))) +svint64_t svrshl_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_m))) +svint16_t svrshl_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_x))) +svint8_t svrshl_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_x))) +svint32_t svrshl_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_x))) +svint64_t svrshl_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_x))) +svint16_t svrshl_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_z))) +svint8_t svrshl_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_z))) +svint32_t svrshl_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_z))) +svint64_t svrshl_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_z))) +svint16_t svrshl_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_m))) +svuint8_t svrshl_u8_m(svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_m))) +svuint32_t svrshl_u32_m(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_m))) +svuint64_t svrshl_u64_m(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_m))) +svuint16_t svrshl_u16_m(svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_x))) +svuint8_t svrshl_u8_x(svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_x))) +svuint32_t svrshl_u32_x(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_x))) +svuint64_t svrshl_u64_x(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_x))) +svuint16_t svrshl_u16_x(svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_z))) +svuint8_t svrshl_u8_z(svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_z))) +svuint32_t svrshl_u32_z(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_z))) +svuint64_t svrshl_u64_z(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_z))) +svuint16_t svrshl_u16_z(svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_m))) +svint8_t svrshr_n_s8_m(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_m))) +svint32_t svrshr_n_s32_m(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_m))) +svint64_t svrshr_n_s64_m(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_m))) +svint16_t svrshr_n_s16_m(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_m))) +svuint8_t svrshr_n_u8_m(svbool_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_m))) +svuint32_t svrshr_n_u32_m(svbool_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_m))) +svuint64_t svrshr_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_m))) +svuint16_t svrshr_n_u16_m(svbool_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_x))) +svint8_t svrshr_n_s8_x(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_x))) +svint32_t svrshr_n_s32_x(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_x))) +svint64_t svrshr_n_s64_x(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_x))) +svint16_t svrshr_n_s16_x(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_x))) +svuint8_t svrshr_n_u8_x(svbool_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_x))) +svuint32_t svrshr_n_u32_x(svbool_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_x))) +svuint64_t svrshr_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_x))) +svuint16_t svrshr_n_u16_x(svbool_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_z))) +svint8_t svrshr_n_s8_z(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_z))) +svint32_t svrshr_n_s32_z(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_z))) +svint64_t svrshr_n_s64_z(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_z))) +svint16_t svrshr_n_s16_z(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_z))) +svuint8_t svrshr_n_u8_z(svbool_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_z))) +svuint32_t svrshr_n_u32_z(svbool_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_z))) +svuint64_t svrshr_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_z))) +svuint16_t svrshr_n_u16_z(svbool_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u32))) +svuint16_t svrshrnb_n_u32(svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u64))) +svuint32_t svrshrnb_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u16))) +svuint8_t svrshrnb_n_u16(svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s32))) +svint16_t svrshrnb_n_s32(svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s64))) +svint32_t svrshrnb_n_s64(svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s16))) +svint8_t svrshrnb_n_s16(svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u32))) +svuint16_t svrshrnt_n_u32(svuint16_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u64))) +svuint32_t svrshrnt_n_u64(svuint32_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u16))) +svuint8_t svrshrnt_n_u16(svuint8_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s32))) +svint16_t svrshrnt_n_s32(svint16_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s64))) +svint32_t svrshrnt_n_s64(svint32_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s16))) +svint8_t svrshrnt_n_s16(svint8_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_m))) +svuint32_t svrsqrte_u32_m(svuint32_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_x))) +svuint32_t svrsqrte_u32_x(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_z))) +svuint32_t svrsqrte_u32_z(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s8))) +svint8_t svrsra_n_s8(svint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s32))) +svint32_t svrsra_n_s32(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s64))) +svint64_t svrsra_n_s64(svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s16))) +svint16_t svrsra_n_s16(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u8))) +svuint8_t svrsra_n_u8(svuint8_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u32))) +svuint32_t svrsra_n_u32(svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u64))) +svuint64_t svrsra_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u16))) +svuint16_t svrsra_n_u16(svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u32))) +svuint16_t svrsubhnb_n_u32(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u64))) +svuint32_t svrsubhnb_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u16))) +svuint8_t svrsubhnb_n_u16(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s32))) +svint16_t svrsubhnb_n_s32(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s64))) +svint32_t svrsubhnb_n_s64(svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s16))) +svint8_t svrsubhnb_n_s16(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u32))) +svuint16_t svrsubhnb_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u64))) +svuint32_t svrsubhnb_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u16))) +svuint8_t svrsubhnb_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s32))) +svint16_t svrsubhnb_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s64))) +svint32_t svrsubhnb_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s16))) +svint8_t svrsubhnb_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u32))) +svuint16_t svrsubhnt_n_u32(svuint16_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u64))) +svuint32_t svrsubhnt_n_u64(svuint32_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u16))) +svuint8_t svrsubhnt_n_u16(svuint8_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s32))) +svint16_t svrsubhnt_n_s32(svint16_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s64))) +svint32_t svrsubhnt_n_s64(svint32_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s16))) +svint8_t svrsubhnt_n_s16(svint8_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u32))) +svuint16_t svrsubhnt_u32(svuint16_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u64))) +svuint32_t svrsubhnt_u64(svuint32_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u16))) +svuint8_t svrsubhnt_u16(svuint8_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s32))) +svint16_t svrsubhnt_s32(svint16_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s64))) +svint32_t svrsubhnt_s64(svint32_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s16))) +svint8_t svrsubhnt_s16(svint8_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_n_u32))) +svuint32_t svsbclb_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_n_u64))) +svuint64_t svsbclb_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_u32))) +svuint32_t svsbclb_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_u64))) +svuint64_t svsbclb_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_n_u32))) +svuint32_t svsbclt_n_u32(svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_n_u64))) +svuint64_t svsbclt_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_u32))) +svuint32_t svsbclt_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_u64))) +svuint64_t svsbclt_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s32))) +svint32_t svshllb_n_s32(svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s64))) +svint64_t svshllb_n_s64(svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s16))) +svint16_t svshllb_n_s16(svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u32))) +svuint32_t svshllb_n_u32(svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u64))) +svuint64_t svshllb_n_u64(svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u16))) +svuint16_t svshllb_n_u16(svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s32))) +svint32_t svshllt_n_s32(svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s64))) +svint64_t svshllt_n_s64(svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s16))) +svint16_t svshllt_n_s16(svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u32))) +svuint32_t svshllt_n_u32(svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u64))) +svuint64_t svshllt_n_u64(svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u16))) +svuint16_t svshllt_n_u16(svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u32))) +svuint16_t svshrnb_n_u32(svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u64))) +svuint32_t svshrnb_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u16))) +svuint8_t svshrnb_n_u16(svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s32))) +svint16_t svshrnb_n_s32(svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s64))) +svint32_t svshrnb_n_s64(svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s16))) +svint8_t svshrnb_n_s16(svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u32))) +svuint16_t svshrnt_n_u32(svuint16_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u64))) +svuint32_t svshrnt_n_u64(svuint32_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u16))) +svuint8_t svshrnt_n_u16(svuint8_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s32))) +svint16_t svshrnt_n_s32(svint16_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s64))) +svint32_t svshrnt_n_s64(svint32_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s16))) +svint8_t svshrnt_n_s16(svint8_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u8))) +svuint8_t svsli_n_u8(svuint8_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u32))) +svuint32_t svsli_n_u32(svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u64))) +svuint64_t svsli_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u16))) +svuint16_t svsli_n_u16(svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s8))) +svint8_t svsli_n_s8(svint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s32))) +svint32_t svsli_n_s32(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s64))) +svint64_t svsli_n_s64(svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s16))) +svint16_t svsli_n_s16(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_m))) +svuint8_t svsqadd_n_u8_m(svbool_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_m))) +svuint32_t svsqadd_n_u32_m(svbool_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_m))) +svuint64_t svsqadd_n_u64_m(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_m))) +svuint16_t svsqadd_n_u16_m(svbool_t, svuint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_x))) +svuint8_t svsqadd_n_u8_x(svbool_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_x))) +svuint32_t svsqadd_n_u32_x(svbool_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_x))) +svuint64_t svsqadd_n_u64_x(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_x))) +svuint16_t svsqadd_n_u16_x(svbool_t, svuint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_z))) +svuint8_t svsqadd_n_u8_z(svbool_t, svuint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_z))) +svuint32_t svsqadd_n_u32_z(svbool_t, svuint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_z))) +svuint64_t svsqadd_n_u64_z(svbool_t, svuint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_z))) +svuint16_t svsqadd_n_u16_z(svbool_t, svuint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_m))) +svuint8_t svsqadd_u8_m(svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_m))) +svuint32_t svsqadd_u32_m(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_m))) +svuint64_t svsqadd_u64_m(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_m))) +svuint16_t svsqadd_u16_m(svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_x))) +svuint8_t svsqadd_u8_x(svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_x))) +svuint32_t svsqadd_u32_x(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_x))) +svuint64_t svsqadd_u64_x(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_x))) +svuint16_t svsqadd_u16_x(svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_z))) +svuint8_t svsqadd_u8_z(svbool_t, svuint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_z))) +svuint32_t svsqadd_u32_z(svbool_t, svuint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_z))) +svuint64_t svsqadd_u64_z(svbool_t, svuint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_z))) +svuint16_t svsqadd_u16_z(svbool_t, svuint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s8))) +svint8_t svsra_n_s8(svint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s32))) +svint32_t svsra_n_s32(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s64))) +svint64_t svsra_n_s64(svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s16))) +svint16_t svsra_n_s16(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u8))) +svuint8_t svsra_n_u8(svuint8_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u32))) +svuint32_t svsra_n_u32(svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u64))) +svuint64_t svsra_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u16))) +svuint16_t svsra_n_u16(svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u8))) +svuint8_t svsri_n_u8(svuint8_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u32))) +svuint32_t svsri_n_u32(svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u64))) +svuint64_t svsri_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u16))) +svuint16_t svsri_n_u16(svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s8))) +svint8_t svsri_n_s8(svint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s32))) +svint32_t svsri_n_s32(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s64))) +svint64_t svsri_n_s64(svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s16))) +svint16_t svsri_n_s16(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u32))) +svuint16_t svsubhnb_n_u32(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u64))) +svuint32_t svsubhnb_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u16))) +svuint8_t svsubhnb_n_u16(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s32))) +svint16_t svsubhnb_n_s32(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s64))) +svint32_t svsubhnb_n_s64(svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s16))) +svint8_t svsubhnb_n_s16(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u32))) +svuint16_t svsubhnb_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u64))) +svuint32_t svsubhnb_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u16))) +svuint8_t svsubhnb_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s32))) +svint16_t svsubhnb_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s64))) +svint32_t svsubhnb_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s16))) +svint8_t svsubhnb_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u32))) +svuint16_t svsubhnt_n_u32(svuint16_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u64))) +svuint32_t svsubhnt_n_u64(svuint32_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u16))) +svuint8_t svsubhnt_n_u16(svuint8_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s32))) +svint16_t svsubhnt_n_s32(svint16_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s64))) +svint32_t svsubhnt_n_s64(svint32_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s16))) +svint8_t svsubhnt_n_s16(svint8_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u32))) +svuint16_t svsubhnt_u32(svuint16_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u64))) +svuint32_t svsubhnt_u64(svuint32_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u16))) +svuint8_t svsubhnt_u16(svuint8_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s32))) +svint16_t svsubhnt_s32(svint16_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s64))) +svint32_t svsubhnt_s64(svint32_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s16))) +svint8_t svsubhnt_s16(svint8_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s32))) +svint32_t svsublb_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s64))) +svint64_t svsublb_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s16))) +svint16_t svsublb_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u32))) +svuint32_t svsublb_n_u32(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u64))) +svuint64_t svsublb_n_u64(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u16))) +svuint16_t svsublb_n_u16(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s32))) +svint32_t svsublb_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s64))) +svint64_t svsublb_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s16))) +svint16_t svsublb_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u32))) +svuint32_t svsublb_u32(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u64))) +svuint64_t svsublb_u64(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u16))) +svuint16_t svsublb_u16(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s32))) +svint32_t svsublbt_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s64))) +svint64_t svsublbt_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s16))) +svint16_t svsublbt_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s32))) +svint32_t svsublbt_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s64))) +svint64_t svsublbt_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s16))) +svint16_t svsublbt_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s32))) +svint32_t svsublt_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s64))) +svint64_t svsublt_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s16))) +svint16_t svsublt_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u32))) +svuint32_t svsublt_n_u32(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u64))) +svuint64_t svsublt_n_u64(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u16))) +svuint16_t svsublt_n_u16(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s32))) +svint32_t svsublt_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s64))) +svint64_t svsublt_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s16))) +svint16_t svsublt_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u32))) +svuint32_t svsublt_u32(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u64))) +svuint64_t svsublt_u64(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u16))) +svuint16_t svsublt_u16(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s32))) +svint32_t svsubltb_n_s32(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s64))) +svint64_t svsubltb_n_s64(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s16))) +svint16_t svsubltb_n_s16(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s32))) +svint32_t svsubltb_s32(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s64))) +svint64_t svsubltb_s64(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s16))) +svint16_t svsubltb_s16(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s32))) +svint32_t svsubwb_n_s32(svint32_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s64))) +svint64_t svsubwb_n_s64(svint64_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s16))) +svint16_t svsubwb_n_s16(svint16_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u32))) +svuint32_t svsubwb_n_u32(svuint32_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u64))) +svuint64_t svsubwb_n_u64(svuint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u16))) +svuint16_t svsubwb_n_u16(svuint16_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s32))) +svint32_t svsubwb_s32(svint32_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s64))) +svint64_t svsubwb_s64(svint64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s16))) +svint16_t svsubwb_s16(svint16_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u32))) +svuint32_t svsubwb_u32(svuint32_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u64))) +svuint64_t svsubwb_u64(svuint64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u16))) +svuint16_t svsubwb_u16(svuint16_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s32))) +svint32_t svsubwt_n_s32(svint32_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s64))) +svint64_t svsubwt_n_s64(svint64_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s16))) +svint16_t svsubwt_n_s16(svint16_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u32))) +svuint32_t svsubwt_n_u32(svuint32_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u64))) +svuint64_t svsubwt_n_u64(svuint64_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u16))) +svuint16_t svsubwt_n_u16(svuint16_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s32))) +svint32_t svsubwt_s32(svint32_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s64))) +svint64_t svsubwt_s64(svint64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s16))) +svint16_t svsubwt_s16(svint16_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u32))) +svuint32_t svsubwt_u32(svuint32_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u64))) +svuint64_t svsubwt_u64(svuint64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u16))) +svuint16_t svsubwt_u16(svuint16_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u8))) +svuint8_t svtbl2_u8(svuint8x2_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u32))) +svuint32_t svtbl2_u32(svuint32x2_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u64))) +svuint64_t svtbl2_u64(svuint64x2_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u16))) +svuint16_t svtbl2_u16(svuint16x2_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s8))) +svint8_t svtbl2_s8(svint8x2_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f64))) +svfloat64_t svtbl2_f64(svfloat64x2_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f32))) +svfloat32_t svtbl2_f32(svfloat32x2_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f16))) +svfloat16_t svtbl2_f16(svfloat16x2_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s32))) +svint32_t svtbl2_s32(svint32x2_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s64))) +svint64_t svtbl2_s64(svint64x2_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s16))) +svint16_t svtbl2_s16(svint16x2_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u8))) +svuint8_t svtbx_u8(svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u32))) +svuint32_t svtbx_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u64))) +svuint64_t svtbx_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u16))) +svuint16_t svtbx_u16(svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s8))) +svint8_t svtbx_s8(svint8_t, svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f64))) +svfloat64_t svtbx_f64(svfloat64_t, svfloat64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f32))) +svfloat32_t svtbx_f32(svfloat32_t, svfloat32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f16))) +svfloat16_t svtbx_f16(svfloat16_t, svfloat16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s32))) +svint32_t svtbx_s32(svint32_t, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s64))) +svint64_t svtbx_s64(svint64_t, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s16))) +svint16_t svtbx_s16(svint16_t, svint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_m))) +svint8_t svuqadd_n_s8_m(svbool_t, svint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_m))) +svint32_t svuqadd_n_s32_m(svbool_t, svint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_m))) +svint64_t svuqadd_n_s64_m(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_m))) +svint16_t svuqadd_n_s16_m(svbool_t, svint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_x))) +svint8_t svuqadd_n_s8_x(svbool_t, svint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_x))) +svint32_t svuqadd_n_s32_x(svbool_t, svint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_x))) +svint64_t svuqadd_n_s64_x(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_x))) +svint16_t svuqadd_n_s16_x(svbool_t, svint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_z))) +svint8_t svuqadd_n_s8_z(svbool_t, svint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_z))) +svint32_t svuqadd_n_s32_z(svbool_t, svint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_z))) +svint64_t svuqadd_n_s64_z(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_z))) +svint16_t svuqadd_n_s16_z(svbool_t, svint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_m))) +svint8_t svuqadd_s8_m(svbool_t, svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_m))) +svint32_t svuqadd_s32_m(svbool_t, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_m))) +svint64_t svuqadd_s64_m(svbool_t, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_m))) +svint16_t svuqadd_s16_m(svbool_t, svint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_x))) +svint8_t svuqadd_s8_x(svbool_t, svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_x))) +svint32_t svuqadd_s32_x(svbool_t, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_x))) +svint64_t svuqadd_s64_x(svbool_t, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_x))) +svint16_t svuqadd_s16_x(svbool_t, svint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_z))) +svint8_t svuqadd_s8_z(svbool_t, svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_z))) +svint32_t svuqadd_s32_z(svbool_t, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_z))) +svint64_t svuqadd_s64_z(svbool_t, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_z))) +svint16_t svuqadd_s16_z(svbool_t, svint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_s32))) +svbool_t svwhilege_b8_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_s32))) +svbool_t svwhilege_b32_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_s32))) +svbool_t svwhilege_b64_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_s32))) +svbool_t svwhilege_b16_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_s64))) +svbool_t svwhilege_b8_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_s64))) +svbool_t svwhilege_b32_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_s64))) +svbool_t svwhilege_b64_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_s64))) +svbool_t svwhilege_b16_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_u32))) +svbool_t svwhilege_b8_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_u32))) +svbool_t svwhilege_b32_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_u32))) +svbool_t svwhilege_b64_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_u32))) +svbool_t svwhilege_b16_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_u64))) +svbool_t svwhilege_b8_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_u64))) +svbool_t svwhilege_b32_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_u64))) +svbool_t svwhilege_b64_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_u64))) +svbool_t svwhilege_b16_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_s32))) +svbool_t svwhilegt_b8_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_s32))) +svbool_t svwhilegt_b32_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_s32))) +svbool_t svwhilegt_b64_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_s32))) +svbool_t svwhilegt_b16_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_s64))) +svbool_t svwhilegt_b8_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_s64))) +svbool_t svwhilegt_b32_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_s64))) +svbool_t svwhilegt_b64_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_s64))) +svbool_t svwhilegt_b16_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_u32))) +svbool_t svwhilegt_b8_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_u32))) +svbool_t svwhilegt_b32_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_u32))) +svbool_t svwhilegt_b64_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_u32))) +svbool_t svwhilegt_b16_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_u64))) +svbool_t svwhilegt_b8_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_u64))) +svbool_t svwhilegt_b32_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_u64))) +svbool_t svwhilegt_b64_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_u64))) +svbool_t svwhilegt_b16_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u8))) +svbool_t svwhilerw_u8(uint8_t const *, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s8))) +svbool_t svwhilerw_s8(int8_t const *, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u64))) +svbool_t svwhilerw_u64(uint64_t const *, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f64))) +svbool_t svwhilerw_f64(float64_t const *, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s64))) +svbool_t svwhilerw_s64(int64_t const *, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u16))) +svbool_t svwhilerw_u16(uint16_t const *, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f16))) +svbool_t svwhilerw_f16(float16_t const *, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s16))) +svbool_t svwhilerw_s16(int16_t const *, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u32))) +svbool_t svwhilerw_u32(uint32_t const *, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f32))) +svbool_t svwhilerw_f32(float32_t const *, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s32))) +svbool_t svwhilerw_s32(int32_t const *, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u8))) +svbool_t svwhilewr_u8(uint8_t const *, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s8))) +svbool_t svwhilewr_s8(int8_t const *, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u64))) +svbool_t svwhilewr_u64(uint64_t const *, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f64))) +svbool_t svwhilewr_f64(float64_t const *, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s64))) +svbool_t svwhilewr_s64(int64_t const *, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u16))) +svbool_t svwhilewr_u16(uint16_t const *, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f16))) +svbool_t svwhilewr_f16(float16_t const *, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s16))) +svbool_t svwhilewr_s16(int16_t const *, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u32))) +svbool_t svwhilewr_u32(uint32_t const *, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f32))) +svbool_t svwhilewr_f32(float32_t const *, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s32))) +svbool_t svwhilewr_s32(int32_t const *, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u8))) +svuint8_t svxar_n_u8(svuint8_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u32))) +svuint32_t svxar_n_u32(svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u64))) +svuint64_t svxar_n_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u16))) +svuint16_t svxar_n_u16(svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s8))) +svint8_t svxar_n_s8(svint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s32))) +svint32_t svxar_n_s32(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s64))) +svint64_t svxar_n_s64(svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s16))) +svint16_t svxar_n_s16(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s8))) +svint8_t svaba(svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s32))) +svint32_t svaba(svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s64))) +svint64_t svaba(svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_s16))) +svint16_t svaba(svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u8))) +svuint8_t svaba(svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u32))) +svuint32_t svaba(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u64))) +svuint64_t svaba(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_n_u16))) +svuint16_t svaba(svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s8))) +svint8_t svaba(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s32))) +svint32_t svaba(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s64))) +svint64_t svaba(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_s16))) +svint16_t svaba(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u8))) +svuint8_t svaba(svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u32))) +svuint32_t svaba(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u64))) +svuint64_t svaba(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaba_u16))) +svuint16_t svaba(svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s32))) +svint32_t svabalb(svint32_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s64))) +svint64_t svabalb(svint64_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_s16))) +svint16_t svabalb(svint16_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u32))) +svuint32_t svabalb(svuint32_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u64))) +svuint64_t svabalb(svuint64_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_n_u16))) +svuint16_t svabalb(svuint16_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s32))) +svint32_t svabalb(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s64))) +svint64_t svabalb(svint64_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_s16))) +svint16_t svabalb(svint16_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u32))) +svuint32_t svabalb(svuint32_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u64))) +svuint64_t svabalb(svuint64_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalb_u16))) +svuint16_t svabalb(svuint16_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s32))) +svint32_t svabalt(svint32_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s64))) +svint64_t svabalt(svint64_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_s16))) +svint16_t svabalt(svint16_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u32))) +svuint32_t svabalt(svuint32_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u64))) +svuint64_t svabalt(svuint64_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_n_u16))) +svuint16_t svabalt(svuint16_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s32))) +svint32_t svabalt(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s64))) +svint64_t svabalt(svint64_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_s16))) +svint16_t svabalt(svint16_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u32))) +svuint32_t svabalt(svuint32_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u64))) +svuint64_t svabalt(svuint64_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabalt_u16))) +svuint16_t svabalt(svuint16_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s32))) +svint32_t svabdlb(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s64))) +svint64_t svabdlb(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_s16))) +svint16_t svabdlb(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u32))) +svuint32_t svabdlb(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u64))) +svuint64_t svabdlb(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_n_u16))) +svuint16_t svabdlb(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s32))) +svint32_t svabdlb(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s64))) +svint64_t svabdlb(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_s16))) +svint16_t svabdlb(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u32))) +svuint32_t svabdlb(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u64))) +svuint64_t svabdlb(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlb_u16))) +svuint16_t svabdlb(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s32))) +svint32_t svabdlt(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s64))) +svint64_t svabdlt(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_s16))) +svint16_t svabdlt(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u32))) +svuint32_t svabdlt(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u64))) +svuint64_t svabdlt(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_n_u16))) +svuint16_t svabdlt(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s32))) +svint32_t svabdlt(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s64))) +svint64_t svabdlt(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_s16))) +svint16_t svabdlt(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u32))) +svuint32_t svabdlt(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u64))) +svuint64_t svabdlt(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabdlt_u16))) +svuint16_t svabdlt(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_m))) +svint32_t svadalp_m(svbool_t, svint32_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_m))) +svint64_t svadalp_m(svbool_t, svint64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_m))) +svint16_t svadalp_m(svbool_t, svint16_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_x))) +svint32_t svadalp_x(svbool_t, svint32_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_x))) +svint64_t svadalp_x(svbool_t, svint64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_x))) +svint16_t svadalp_x(svbool_t, svint16_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s32_z))) +svint32_t svadalp_z(svbool_t, svint32_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s64_z))) +svint64_t svadalp_z(svbool_t, svint64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_s16_z))) +svint16_t svadalp_z(svbool_t, svint16_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_m))) +svuint32_t svadalp_m(svbool_t, svuint32_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_m))) +svuint64_t svadalp_m(svbool_t, svuint64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_m))) +svuint16_t svadalp_m(svbool_t, svuint16_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_x))) +svuint32_t svadalp_x(svbool_t, svuint32_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_x))) +svuint64_t svadalp_x(svbool_t, svuint64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_x))) +svuint16_t svadalp_x(svbool_t, svuint16_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u32_z))) +svuint32_t svadalp_z(svbool_t, svuint32_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u64_z))) +svuint64_t svadalp_z(svbool_t, svuint64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadalp_u16_z))) +svuint16_t svadalp_z(svbool_t, svuint16_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_n_u32))) +svuint32_t svadclb(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_n_u64))) +svuint64_t svadclb(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_u32))) +svuint32_t svadclb(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclb_u64))) +svuint64_t svadclb(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_n_u32))) +svuint32_t svadclt(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_n_u64))) +svuint64_t svadclt(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_u32))) +svuint32_t svadclt(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadclt_u64))) +svuint64_t svadclt(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u32))) +svuint16_t svaddhnb(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u64))) +svuint32_t svaddhnb(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_u16))) +svuint8_t svaddhnb(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s32))) +svint16_t svaddhnb(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s64))) +svint32_t svaddhnb(svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_n_s16))) +svint8_t svaddhnb(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u32))) +svuint16_t svaddhnb(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u64))) +svuint32_t svaddhnb(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_u16))) +svuint8_t svaddhnb(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s32))) +svint16_t svaddhnb(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s64))) +svint32_t svaddhnb(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnb_s16))) +svint8_t svaddhnb(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u32))) +svuint16_t svaddhnt(svuint16_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u64))) +svuint32_t svaddhnt(svuint32_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_u16))) +svuint8_t svaddhnt(svuint8_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s32))) +svint16_t svaddhnt(svint16_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s64))) +svint32_t svaddhnt(svint32_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_n_s16))) +svint8_t svaddhnt(svint8_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u32))) +svuint16_t svaddhnt(svuint16_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u64))) +svuint32_t svaddhnt(svuint32_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_u16))) +svuint8_t svaddhnt(svuint8_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s32))) +svint16_t svaddhnt(svint16_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s64))) +svint32_t svaddhnt(svint32_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddhnt_s16))) +svint8_t svaddhnt(svint8_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s32))) +svint32_t svaddlb(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s64))) +svint64_t svaddlb(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_s16))) +svint16_t svaddlb(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u32))) +svuint32_t svaddlb(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u64))) +svuint64_t svaddlb(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_n_u16))) +svuint16_t svaddlb(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s32))) +svint32_t svaddlb(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s64))) +svint64_t svaddlb(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_s16))) +svint16_t svaddlb(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u32))) +svuint32_t svaddlb(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u64))) +svuint64_t svaddlb(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlb_u16))) +svuint16_t svaddlb(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s32))) +svint32_t svaddlbt(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s64))) +svint64_t svaddlbt(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_n_s16))) +svint16_t svaddlbt(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s32))) +svint32_t svaddlbt(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s64))) +svint64_t svaddlbt(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlbt_s16))) +svint16_t svaddlbt(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s32))) +svint32_t svaddlt(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s64))) +svint64_t svaddlt(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_s16))) +svint16_t svaddlt(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u32))) +svuint32_t svaddlt(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u64))) +svuint64_t svaddlt(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_n_u16))) +svuint16_t svaddlt(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s32))) +svint32_t svaddlt(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s64))) +svint64_t svaddlt(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_s16))) +svint16_t svaddlt(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u32))) +svuint32_t svaddlt(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u64))) +svuint64_t svaddlt(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddlt_u16))) +svuint16_t svaddlt(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f64_m))) +svfloat64_t svaddp_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f32_m))) +svfloat32_t svaddp_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f16_m))) +svfloat16_t svaddp_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f64_x))) +svfloat64_t svaddp_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f32_x))) +svfloat32_t svaddp_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_f16_x))) +svfloat16_t svaddp_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u8_m))) +svuint8_t svaddp_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u32_m))) +svuint32_t svaddp_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u64_m))) +svuint64_t svaddp_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u16_m))) +svuint16_t svaddp_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s8_m))) +svint8_t svaddp_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s32_m))) +svint32_t svaddp_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s64_m))) +svint64_t svaddp_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s16_m))) +svint16_t svaddp_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u8_x))) +svuint8_t svaddp_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u32_x))) +svuint32_t svaddp_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u64_x))) +svuint64_t svaddp_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_u16_x))) +svuint16_t svaddp_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s8_x))) +svint8_t svaddp_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s32_x))) +svint32_t svaddp_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s64_x))) +svint64_t svaddp_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddp_s16_x))) +svint16_t svaddp_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s32))) +svint32_t svaddwb(svint32_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s64))) +svint64_t svaddwb(svint64_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_s16))) +svint16_t svaddwb(svint16_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u32))) +svuint32_t svaddwb(svuint32_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u64))) +svuint64_t svaddwb(svuint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_n_u16))) +svuint16_t svaddwb(svuint16_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s32))) +svint32_t svaddwb(svint32_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s64))) +svint64_t svaddwb(svint64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_s16))) +svint16_t svaddwb(svint16_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u32))) +svuint32_t svaddwb(svuint32_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u64))) +svuint64_t svaddwb(svuint64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwb_u16))) +svuint16_t svaddwb(svuint16_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s32))) +svint32_t svaddwt(svint32_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s64))) +svint64_t svaddwt(svint64_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_s16))) +svint16_t svaddwt(svint16_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u32))) +svuint32_t svaddwt(svuint32_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u64))) +svuint64_t svaddwt(svuint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_n_u16))) +svuint16_t svaddwt(svuint16_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s32))) +svint32_t svaddwt(svint32_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s64))) +svint64_t svaddwt(svint64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_s16))) +svint16_t svaddwt(svint16_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u32))) +svuint32_t svaddwt(svuint32_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u64))) +svuint64_t svaddwt(svuint64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddwt_u16))) +svuint16_t svaddwt(svuint16_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u8))) +svuint8_t svbcax(svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u32))) +svuint32_t svbcax(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u64))) +svuint64_t svbcax(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_u16))) +svuint16_t svbcax(svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s8))) +svint8_t svbcax(svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s32))) +svint32_t svbcax(svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s64))) +svint64_t svbcax(svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_n_s16))) +svint16_t svbcax(svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u8))) +svuint8_t svbcax(svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u32))) +svuint32_t svbcax(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u64))) +svuint64_t svbcax(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_u16))) +svuint16_t svbcax(svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s8))) +svint8_t svbcax(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s32))) +svint32_t svbcax(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s64))) +svint64_t svbcax(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbcax_s16))) +svint16_t svbcax(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u8))) +svuint8_t svbsl1n(svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u32))) +svuint32_t svbsl1n(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u64))) +svuint64_t svbsl1n(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_u16))) +svuint16_t svbsl1n(svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s8))) +svint8_t svbsl1n(svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s32))) +svint32_t svbsl1n(svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s64))) +svint64_t svbsl1n(svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_n_s16))) +svint16_t svbsl1n(svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u8))) +svuint8_t svbsl1n(svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u32))) +svuint32_t svbsl1n(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u64))) +svuint64_t svbsl1n(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_u16))) +svuint16_t svbsl1n(svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s8))) +svint8_t svbsl1n(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s32))) +svint32_t svbsl1n(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s64))) +svint64_t svbsl1n(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl1n_s16))) +svint16_t svbsl1n(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u8))) +svuint8_t svbsl2n(svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u32))) +svuint32_t svbsl2n(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u64))) +svuint64_t svbsl2n(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_u16))) +svuint16_t svbsl2n(svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s8))) +svint8_t svbsl2n(svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s32))) +svint32_t svbsl2n(svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s64))) +svint64_t svbsl2n(svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_n_s16))) +svint16_t svbsl2n(svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u8))) +svuint8_t svbsl2n(svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u32))) +svuint32_t svbsl2n(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u64))) +svuint64_t svbsl2n(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_u16))) +svuint16_t svbsl2n(svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s8))) +svint8_t svbsl2n(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s32))) +svint32_t svbsl2n(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s64))) +svint64_t svbsl2n(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl2n_s16))) +svint16_t svbsl2n(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u8))) +svuint8_t svbsl(svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u32))) +svuint32_t svbsl(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u64))) +svuint64_t svbsl(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_u16))) +svuint16_t svbsl(svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s8))) +svint8_t svbsl(svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s32))) +svint32_t svbsl(svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s64))) +svint64_t svbsl(svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_n_s16))) +svint16_t svbsl(svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u8))) +svuint8_t svbsl(svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u32))) +svuint32_t svbsl(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u64))) +svuint64_t svbsl(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_u16))) +svuint16_t svbsl(svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s8))) +svint8_t svbsl(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s32))) +svint32_t svbsl(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s64))) +svint64_t svbsl(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbsl_s16))) +svint16_t svbsl(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u8))) +svuint8_t svcadd(svuint8_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u32))) +svuint32_t svcadd(svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u64))) +svuint64_t svcadd(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_u16))) +svuint16_t svcadd(svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s8))) +svint8_t svcadd(svint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s32))) +svint32_t svcadd(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s64))) +svint64_t svcadd(svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_s16))) +svint16_t svcadd(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_s32))) +svint32_t svcdot(svint32_t, svint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_s64))) +svint64_t svcdot(svint64_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_lane_s32))) +svint32_t svcdot_lane(svint32_t, svint8_t, svint8_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcdot_lane_s64))) +svint64_t svcdot_lane(svint64_t, svint16_t, svint16_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u8))) +svuint8_t svcmla(svuint8_t, svuint8_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u32))) +svuint32_t svcmla(svuint32_t, svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u64))) +svuint64_t svcmla(svuint64_t, svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_u16))) +svuint16_t svcmla(svuint16_t, svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s8))) +svint8_t svcmla(svint8_t, svint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s32))) +svint32_t svcmla(svint32_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s64))) +svint64_t svcmla(svint64_t, svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_s16))) +svint16_t svcmla(svint16_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_u32))) +svuint32_t svcmla_lane(svuint32_t, svuint32_t, svuint32_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_u16))) +svuint16_t svcmla_lane(svuint16_t, svuint16_t, svuint16_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_s32))) +svint32_t svcmla_lane(svint32_t, svint32_t, svint32_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_s16))) +svint16_t svcmla_lane(svint16_t, svint16_t, svint16_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f32_f16_m))) +svfloat32_t svcvtlt_f32_m(svfloat32_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f32_f16_x))) +svfloat32_t svcvtlt_f32_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f64_f32_m))) +svfloat64_t svcvtlt_f64_m(svfloat64_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtlt_f64_f32_x))) +svfloat64_t svcvtlt_f64_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_f16_f32_m))) +svfloat16_t svcvtnt_f16_m(svfloat16_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtnt_f32_f64_m))) +svfloat32_t svcvtnt_f32_m(svfloat32_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_m))) +svfloat32_t svcvtx_f32_m(svfloat32_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_x))) +svfloat32_t svcvtx_f32_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtx_f32_f64_z))) +svfloat32_t svcvtx_f32_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvtxnt_f32_f64_m))) +svfloat32_t svcvtxnt_f32_m(svfloat32_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u8))) +svuint8_t sveor3(svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u32))) +svuint32_t sveor3(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u64))) +svuint64_t sveor3(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_u16))) +svuint16_t sveor3(svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s8))) +svint8_t sveor3(svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s32))) +svint32_t sveor3(svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s64))) +svint64_t sveor3(svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_n_s16))) +svint16_t sveor3(svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u8))) +svuint8_t sveor3(svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u32))) +svuint32_t sveor3(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u64))) +svuint64_t sveor3(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_u16))) +svuint16_t sveor3(svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s8))) +svint8_t sveor3(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s32))) +svint32_t sveor3(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s64))) +svint64_t sveor3(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor3_s16))) +svint16_t sveor3(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u8))) +svuint8_t sveorbt(svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u32))) +svuint32_t sveorbt(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u64))) +svuint64_t sveorbt(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_u16))) +svuint16_t sveorbt(svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s8))) +svint8_t sveorbt(svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s32))) +svint32_t sveorbt(svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s64))) +svint64_t sveorbt(svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_n_s16))) +svint16_t sveorbt(svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u8))) +svuint8_t sveorbt(svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u32))) +svuint32_t sveorbt(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u64))) +svuint64_t sveorbt(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_u16))) +svuint16_t sveorbt(svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s8))) +svint8_t sveorbt(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s32))) +svint32_t sveorbt(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s64))) +svint64_t sveorbt(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorbt_s16))) +svint16_t sveorbt(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u8))) +svuint8_t sveortb(svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u32))) +svuint32_t sveortb(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u64))) +svuint64_t sveortb(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_u16))) +svuint16_t sveortb(svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s8))) +svint8_t sveortb(svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s32))) +svint32_t sveortb(svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s64))) +svint64_t sveortb(svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_n_s16))) +svint16_t sveortb(svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u8))) +svuint8_t sveortb(svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u32))) +svuint32_t sveortb(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u64))) +svuint64_t sveortb(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_u16))) +svuint16_t sveortb(svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s8))) +svint8_t sveortb(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s32))) +svint32_t sveortb(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s64))) +svint64_t sveortb(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveortb_s16))) +svint16_t sveortb(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_m))) +svint8_t svhadd_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_m))) +svint32_t svhadd_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_m))) +svint64_t svhadd_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_m))) +svint16_t svhadd_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_x))) +svint8_t svhadd_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_x))) +svint32_t svhadd_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_x))) +svint64_t svhadd_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_x))) +svint16_t svhadd_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s8_z))) +svint8_t svhadd_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s32_z))) +svint32_t svhadd_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s64_z))) +svint64_t svhadd_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_s16_z))) +svint16_t svhadd_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_m))) +svuint8_t svhadd_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_m))) +svuint32_t svhadd_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_m))) +svuint64_t svhadd_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_m))) +svuint16_t svhadd_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_x))) +svuint8_t svhadd_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_x))) +svuint32_t svhadd_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_x))) +svuint64_t svhadd_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_x))) +svuint16_t svhadd_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u8_z))) +svuint8_t svhadd_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u32_z))) +svuint32_t svhadd_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u64_z))) +svuint64_t svhadd_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_n_u16_z))) +svuint16_t svhadd_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_m))) +svint8_t svhadd_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_m))) +svint32_t svhadd_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_m))) +svint64_t svhadd_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_m))) +svint16_t svhadd_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_x))) +svint8_t svhadd_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_x))) +svint32_t svhadd_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_x))) +svint64_t svhadd_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_x))) +svint16_t svhadd_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s8_z))) +svint8_t svhadd_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s32_z))) +svint32_t svhadd_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s64_z))) +svint64_t svhadd_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_s16_z))) +svint16_t svhadd_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_m))) +svuint8_t svhadd_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_m))) +svuint32_t svhadd_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_m))) +svuint64_t svhadd_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_m))) +svuint16_t svhadd_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_x))) +svuint8_t svhadd_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_x))) +svuint32_t svhadd_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_x))) +svuint64_t svhadd_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_x))) +svuint16_t svhadd_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u8_z))) +svuint8_t svhadd_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u32_z))) +svuint32_t svhadd_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u64_z))) +svuint64_t svhadd_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhadd_u16_z))) +svuint16_t svhadd_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_m))) +svint8_t svhsub_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_m))) +svint32_t svhsub_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_m))) +svint64_t svhsub_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_m))) +svint16_t svhsub_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_x))) +svint8_t svhsub_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_x))) +svint32_t svhsub_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_x))) +svint64_t svhsub_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_x))) +svint16_t svhsub_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s8_z))) +svint8_t svhsub_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s32_z))) +svint32_t svhsub_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s64_z))) +svint64_t svhsub_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_s16_z))) +svint16_t svhsub_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_m))) +svuint8_t svhsub_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_m))) +svuint32_t svhsub_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_m))) +svuint64_t svhsub_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_m))) +svuint16_t svhsub_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_x))) +svuint8_t svhsub_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_x))) +svuint32_t svhsub_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_x))) +svuint64_t svhsub_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_x))) +svuint16_t svhsub_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u8_z))) +svuint8_t svhsub_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u32_z))) +svuint32_t svhsub_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u64_z))) +svuint64_t svhsub_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_n_u16_z))) +svuint16_t svhsub_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_m))) +svint8_t svhsub_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_m))) +svint32_t svhsub_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_m))) +svint64_t svhsub_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_m))) +svint16_t svhsub_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_x))) +svint8_t svhsub_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_x))) +svint32_t svhsub_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_x))) +svint64_t svhsub_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_x))) +svint16_t svhsub_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s8_z))) +svint8_t svhsub_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s32_z))) +svint32_t svhsub_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s64_z))) +svint64_t svhsub_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_s16_z))) +svint16_t svhsub_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_m))) +svuint8_t svhsub_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_m))) +svuint32_t svhsub_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_m))) +svuint64_t svhsub_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_m))) +svuint16_t svhsub_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_x))) +svuint8_t svhsub_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_x))) +svuint32_t svhsub_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_x))) +svuint64_t svhsub_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_x))) +svuint16_t svhsub_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u8_z))) +svuint8_t svhsub_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u32_z))) +svuint32_t svhsub_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u64_z))) +svuint64_t svhsub_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsub_u16_z))) +svuint16_t svhsub_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_m))) +svint8_t svhsubr_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_m))) +svint32_t svhsubr_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_m))) +svint64_t svhsubr_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_m))) +svint16_t svhsubr_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_x))) +svint8_t svhsubr_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_x))) +svint32_t svhsubr_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_x))) +svint64_t svhsubr_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_x))) +svint16_t svhsubr_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s8_z))) +svint8_t svhsubr_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s32_z))) +svint32_t svhsubr_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s64_z))) +svint64_t svhsubr_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_s16_z))) +svint16_t svhsubr_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_m))) +svuint8_t svhsubr_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_m))) +svuint32_t svhsubr_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_m))) +svuint64_t svhsubr_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_m))) +svuint16_t svhsubr_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_x))) +svuint8_t svhsubr_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_x))) +svuint32_t svhsubr_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_x))) +svuint64_t svhsubr_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_x))) +svuint16_t svhsubr_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u8_z))) +svuint8_t svhsubr_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u32_z))) +svuint32_t svhsubr_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u64_z))) +svuint64_t svhsubr_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_n_u16_z))) +svuint16_t svhsubr_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_m))) +svint8_t svhsubr_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_m))) +svint32_t svhsubr_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_m))) +svint64_t svhsubr_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_m))) +svint16_t svhsubr_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_x))) +svint8_t svhsubr_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_x))) +svint32_t svhsubr_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_x))) +svint64_t svhsubr_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_x))) +svint16_t svhsubr_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s8_z))) +svint8_t svhsubr_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s32_z))) +svint32_t svhsubr_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s64_z))) +svint64_t svhsubr_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_s16_z))) +svint16_t svhsubr_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_m))) +svuint8_t svhsubr_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_m))) +svuint32_t svhsubr_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_m))) +svuint64_t svhsubr_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_m))) +svuint16_t svhsubr_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_x))) +svuint8_t svhsubr_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_x))) +svuint32_t svhsubr_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_x))) +svuint64_t svhsubr_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_x))) +svuint16_t svhsubr_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u8_z))) +svuint8_t svhsubr_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u32_z))) +svuint32_t svhsubr_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u64_z))) +svuint64_t svhsubr_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svhsubr_u16_z))) +svuint16_t svhsubr_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_m))) +svint64_t svlogb_m(svint64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_m))) +svint32_t svlogb_m(svint32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_m))) +svint16_t svlogb_m(svint16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_x))) +svint64_t svlogb_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_x))) +svint32_t svlogb_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_x))) +svint16_t svlogb_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f64_z))) +svint64_t svlogb_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f32_z))) +svint32_t svlogb_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlogb_f16_z))) +svint16_t svlogb_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f64_m))) +svfloat64_t svmaxnmp_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f32_m))) +svfloat32_t svmaxnmp_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f16_m))) +svfloat16_t svmaxnmp_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f64_x))) +svfloat64_t svmaxnmp_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f32_x))) +svfloat32_t svmaxnmp_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmp_f16_x))) +svfloat16_t svmaxnmp_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f64_m))) +svfloat64_t svmaxp_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f32_m))) +svfloat32_t svmaxp_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f16_m))) +svfloat16_t svmaxp_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f64_x))) +svfloat64_t svmaxp_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f32_x))) +svfloat32_t svmaxp_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_f16_x))) +svfloat16_t svmaxp_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s8_m))) +svint8_t svmaxp_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s32_m))) +svint32_t svmaxp_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s64_m))) +svint64_t svmaxp_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s16_m))) +svint16_t svmaxp_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s8_x))) +svint8_t svmaxp_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s32_x))) +svint32_t svmaxp_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s64_x))) +svint64_t svmaxp_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_s16_x))) +svint16_t svmaxp_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u8_m))) +svuint8_t svmaxp_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u32_m))) +svuint32_t svmaxp_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u64_m))) +svuint64_t svmaxp_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u16_m))) +svuint16_t svmaxp_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u8_x))) +svuint8_t svmaxp_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u32_x))) +svuint32_t svmaxp_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u64_x))) +svuint64_t svmaxp_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxp_u16_x))) +svuint16_t svmaxp_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f64_m))) +svfloat64_t svminnmp_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f32_m))) +svfloat32_t svminnmp_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f16_m))) +svfloat16_t svminnmp_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f64_x))) +svfloat64_t svminnmp_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f32_x))) +svfloat32_t svminnmp_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmp_f16_x))) +svfloat16_t svminnmp_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f64_m))) +svfloat64_t svminp_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f32_m))) +svfloat32_t svminp_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f16_m))) +svfloat16_t svminp_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f64_x))) +svfloat64_t svminp_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f32_x))) +svfloat32_t svminp_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_f16_x))) +svfloat16_t svminp_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s8_m))) +svint8_t svminp_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s32_m))) +svint32_t svminp_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s64_m))) +svint64_t svminp_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s16_m))) +svint16_t svminp_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s8_x))) +svint8_t svminp_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s32_x))) +svint32_t svminp_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s64_x))) +svint64_t svminp_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_s16_x))) +svint16_t svminp_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u8_m))) +svuint8_t svminp_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u32_m))) +svuint32_t svminp_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u64_m))) +svuint64_t svminp_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u16_m))) +svuint16_t svminp_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u8_x))) +svuint8_t svminp_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u32_x))) +svuint32_t svminp_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u64_x))) +svuint64_t svminp_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminp_u16_x))) +svuint16_t svminp_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u32))) +svuint32_t svmla_lane(svuint32_t, svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u64))) +svuint64_t svmla_lane(svuint64_t, svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_u16))) +svuint16_t svmla_lane(svuint16_t, svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s32))) +svint32_t svmla_lane(svint32_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s64))) +svint64_t svmla_lane(svint64_t, svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_s16))) +svint16_t svmla_lane(svint16_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_f32))) +svfloat32_t svmlalb(svfloat32_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s32))) +svint32_t svmlalb(svint32_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s64))) +svint64_t svmlalb(svint64_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_s16))) +svint16_t svmlalb(svint16_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u32))) +svuint32_t svmlalb(svuint32_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u64))) +svuint64_t svmlalb(svuint64_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_n_u16))) +svuint16_t svmlalb(svuint16_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_f32))) +svfloat32_t svmlalb(svfloat32_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s32))) +svint32_t svmlalb(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s64))) +svint64_t svmlalb(svint64_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_s16))) +svint16_t svmlalb(svint16_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u32))) +svuint32_t svmlalb(svuint32_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u64))) +svuint64_t svmlalb(svuint64_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_u16))) +svuint16_t svmlalb(svuint16_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_f32))) +svfloat32_t svmlalb_lane(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_s32))) +svint32_t svmlalb_lane(svint32_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_s64))) +svint64_t svmlalb_lane(svint64_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_u32))) +svuint32_t svmlalb_lane(svuint32_t, svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalb_lane_u64))) +svuint64_t svmlalb_lane(svuint64_t, svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_f32))) +svfloat32_t svmlalt(svfloat32_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s32))) +svint32_t svmlalt(svint32_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s64))) +svint64_t svmlalt(svint64_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_s16))) +svint16_t svmlalt(svint16_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u32))) +svuint32_t svmlalt(svuint32_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u64))) +svuint64_t svmlalt(svuint64_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_n_u16))) +svuint16_t svmlalt(svuint16_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_f32))) +svfloat32_t svmlalt(svfloat32_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s32))) +svint32_t svmlalt(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s64))) +svint64_t svmlalt(svint64_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_s16))) +svint16_t svmlalt(svint16_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u32))) +svuint32_t svmlalt(svuint32_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u64))) +svuint64_t svmlalt(svuint64_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_u16))) +svuint16_t svmlalt(svuint16_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_f32))) +svfloat32_t svmlalt_lane(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_s32))) +svint32_t svmlalt_lane(svint32_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_s64))) +svint64_t svmlalt_lane(svint64_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_u32))) +svuint32_t svmlalt_lane(svuint32_t, svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlalt_lane_u64))) +svuint64_t svmlalt_lane(svuint64_t, svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u32))) +svuint32_t svmls_lane(svuint32_t, svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u64))) +svuint64_t svmls_lane(svuint64_t, svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_u16))) +svuint16_t svmls_lane(svuint16_t, svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s32))) +svint32_t svmls_lane(svint32_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s64))) +svint64_t svmls_lane(svint64_t, svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_s16))) +svint16_t svmls_lane(svint16_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_f32))) +svfloat32_t svmlslb(svfloat32_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s32))) +svint32_t svmlslb(svint32_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s64))) +svint64_t svmlslb(svint64_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_s16))) +svint16_t svmlslb(svint16_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u32))) +svuint32_t svmlslb(svuint32_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u64))) +svuint64_t svmlslb(svuint64_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_n_u16))) +svuint16_t svmlslb(svuint16_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_f32))) +svfloat32_t svmlslb(svfloat32_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s32))) +svint32_t svmlslb(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s64))) +svint64_t svmlslb(svint64_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_s16))) +svint16_t svmlslb(svint16_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u32))) +svuint32_t svmlslb(svuint32_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u64))) +svuint64_t svmlslb(svuint64_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_u16))) +svuint16_t svmlslb(svuint16_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_f32))) +svfloat32_t svmlslb_lane(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_s32))) +svint32_t svmlslb_lane(svint32_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_s64))) +svint64_t svmlslb_lane(svint64_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_u32))) +svuint32_t svmlslb_lane(svuint32_t, svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslb_lane_u64))) +svuint64_t svmlslb_lane(svuint64_t, svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_f32))) +svfloat32_t svmlslt(svfloat32_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s32))) +svint32_t svmlslt(svint32_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s64))) +svint64_t svmlslt(svint64_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_s16))) +svint16_t svmlslt(svint16_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u32))) +svuint32_t svmlslt(svuint32_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u64))) +svuint64_t svmlslt(svuint64_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_n_u16))) +svuint16_t svmlslt(svuint16_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_f32))) +svfloat32_t svmlslt(svfloat32_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s32))) +svint32_t svmlslt(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s64))) +svint64_t svmlslt(svint64_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_s16))) +svint16_t svmlslt(svint16_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u32))) +svuint32_t svmlslt(svuint32_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u64))) +svuint64_t svmlslt(svuint64_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_u16))) +svuint16_t svmlslt(svuint16_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_f32))) +svfloat32_t svmlslt_lane(svfloat32_t, svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_s32))) +svint32_t svmlslt_lane(svint32_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_s64))) +svint64_t svmlslt_lane(svint64_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_u32))) +svuint32_t svmlslt_lane(svuint32_t, svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmlslt_lane_u64))) +svuint64_t svmlslt_lane(svuint64_t, svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s32))) +svint32_t svmovlb(svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s64))) +svint64_t svmovlb(svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_s16))) +svint16_t svmovlb(svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u32))) +svuint32_t svmovlb(svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u64))) +svuint64_t svmovlb(svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlb_u16))) +svuint16_t svmovlb(svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s32))) +svint32_t svmovlt(svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s64))) +svint64_t svmovlt(svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_s16))) +svint16_t svmovlt(svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u32))) +svuint32_t svmovlt(svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u64))) +svuint64_t svmovlt(svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmovlt_u16))) +svuint16_t svmovlt(svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u32))) +svuint32_t svmul_lane(svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u64))) +svuint64_t svmul_lane(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_u16))) +svuint16_t svmul_lane(svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s32))) +svint32_t svmul_lane(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s64))) +svint64_t svmul_lane(svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_s16))) +svint16_t svmul_lane(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s32))) +svint32_t svmullb(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s64))) +svint64_t svmullb(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_s16))) +svint16_t svmullb(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u32))) +svuint32_t svmullb(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u64))) +svuint64_t svmullb(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_n_u16))) +svuint16_t svmullb(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s32))) +svint32_t svmullb(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s64))) +svint64_t svmullb(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_s16))) +svint16_t svmullb(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u32))) +svuint32_t svmullb(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u64))) +svuint64_t svmullb(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_u16))) +svuint16_t svmullb(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_s32))) +svint32_t svmullb_lane(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_s64))) +svint64_t svmullb_lane(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_u32))) +svuint32_t svmullb_lane(svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullb_lane_u64))) +svuint64_t svmullb_lane(svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s32))) +svint32_t svmullt(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s64))) +svint64_t svmullt(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_s16))) +svint16_t svmullt(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u32))) +svuint32_t svmullt(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u64))) +svuint64_t svmullt(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_n_u16))) +svuint16_t svmullt(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s32))) +svint32_t svmullt(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s64))) +svint64_t svmullt(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_s16))) +svint16_t svmullt(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u32))) +svuint32_t svmullt(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u64))) +svuint64_t svmullt(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_u16))) +svuint16_t svmullt(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_s32))) +svint32_t svmullt_lane(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_s64))) +svint64_t svmullt_lane(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_u32))) +svuint32_t svmullt_lane(svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmullt_lane_u64))) +svuint64_t svmullt_lane(svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u8))) +svuint8_t svnbsl(svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u32))) +svuint32_t svnbsl(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u64))) +svuint64_t svnbsl(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_u16))) +svuint16_t svnbsl(svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s8))) +svint8_t svnbsl(svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s32))) +svint32_t svnbsl(svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s64))) +svint64_t svnbsl(svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_n_s16))) +svint16_t svnbsl(svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u8))) +svuint8_t svnbsl(svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u32))) +svuint32_t svnbsl(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u64))) +svuint64_t svnbsl(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_u16))) +svuint16_t svnbsl(svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s8))) +svint8_t svnbsl(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s32))) +svint32_t svnbsl(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s64))) +svint64_t svnbsl(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnbsl_s16))) +svint16_t svnbsl(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmul_n_u8))) +svuint8_t svpmul(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmul_u8))) +svuint8_t svpmul(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_n_u64))) +svuint64_t svpmullb(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_n_u16))) +svuint16_t svpmullb(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_u64))) +svuint64_t svpmullb(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_u16))) +svuint16_t svpmullb(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u8))) +svuint8_t svpmullb_pair(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_n_u32))) +svuint32_t svpmullb_pair(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u8))) +svuint8_t svpmullb_pair(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullb_pair_u32))) +svuint32_t svpmullb_pair(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_n_u64))) +svuint64_t svpmullt(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_n_u16))) +svuint16_t svpmullt(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_u64))) +svuint64_t svpmullt(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_u16))) +svuint16_t svpmullt(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u8))) +svuint8_t svpmullt_pair(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_n_u32))) +svuint32_t svpmullt_pair(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u8))) +svuint8_t svpmullt_pair(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpmullt_pair_u32))) +svuint32_t svpmullt_pair(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_m))) +svint8_t svqabs_m(svint8_t, svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_m))) +svint32_t svqabs_m(svint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_m))) +svint64_t svqabs_m(svint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_m))) +svint16_t svqabs_m(svint16_t, svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_x))) +svint8_t svqabs_x(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_x))) +svint32_t svqabs_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_x))) +svint64_t svqabs_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_x))) +svint16_t svqabs_x(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s8_z))) +svint8_t svqabs_z(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s32_z))) +svint32_t svqabs_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s64_z))) +svint64_t svqabs_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqabs_s16_z))) +svint16_t svqabs_z(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_m))) +svint8_t svqadd_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_m))) +svint32_t svqadd_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_m))) +svint64_t svqadd_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_m))) +svint16_t svqadd_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_x))) +svint8_t svqadd_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_x))) +svint32_t svqadd_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_x))) +svint64_t svqadd_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_x))) +svint16_t svqadd_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8_z))) +svint8_t svqadd_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32_z))) +svint32_t svqadd_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64_z))) +svint64_t svqadd_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16_z))) +svint16_t svqadd_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_m))) +svuint8_t svqadd_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_m))) +svuint32_t svqadd_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_m))) +svuint64_t svqadd_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_m))) +svuint16_t svqadd_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_x))) +svuint8_t svqadd_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_x))) +svuint32_t svqadd_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_x))) +svuint64_t svqadd_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_x))) +svuint16_t svqadd_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8_z))) +svuint8_t svqadd_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32_z))) +svuint32_t svqadd_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64_z))) +svuint64_t svqadd_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16_z))) +svuint16_t svqadd_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_m))) +svint8_t svqadd_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_m))) +svint32_t svqadd_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_m))) +svint64_t svqadd_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_m))) +svint16_t svqadd_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_x))) +svint8_t svqadd_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_x))) +svint32_t svqadd_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_x))) +svint64_t svqadd_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_x))) +svint16_t svqadd_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8_z))) +svint8_t svqadd_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32_z))) +svint32_t svqadd_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64_z))) +svint64_t svqadd_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16_z))) +svint16_t svqadd_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_m))) +svuint8_t svqadd_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_m))) +svuint32_t svqadd_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_m))) +svuint64_t svqadd_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_m))) +svuint16_t svqadd_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_x))) +svuint8_t svqadd_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_x))) +svuint32_t svqadd_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_x))) +svuint64_t svqadd_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_x))) +svuint16_t svqadd_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8_z))) +svuint8_t svqadd_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32_z))) +svuint32_t svqadd_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64_z))) +svuint64_t svqadd_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16_z))) +svuint16_t svqadd_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s8))) +svint8_t svqcadd(svint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s32))) +svint32_t svqcadd(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s64))) +svint64_t svqcadd(svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqcadd_s16))) +svint16_t svqcadd(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s32))) +svint32_t svqdmlalb(svint32_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s64))) +svint64_t svqdmlalb(svint64_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_n_s16))) +svint16_t svqdmlalb(svint16_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s32))) +svint32_t svqdmlalb(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s64))) +svint64_t svqdmlalb(svint64_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_s16))) +svint16_t svqdmlalb(svint16_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_lane_s32))) +svint32_t svqdmlalb_lane(svint32_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalb_lane_s64))) +svint64_t svqdmlalb_lane(svint64_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s32))) +svint32_t svqdmlalbt(svint32_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s64))) +svint64_t svqdmlalbt(svint64_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_n_s16))) +svint16_t svqdmlalbt(svint16_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s32))) +svint32_t svqdmlalbt(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s64))) +svint64_t svqdmlalbt(svint64_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalbt_s16))) +svint16_t svqdmlalbt(svint16_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s32))) +svint32_t svqdmlalt(svint32_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s64))) +svint64_t svqdmlalt(svint64_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_n_s16))) +svint16_t svqdmlalt(svint16_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s32))) +svint32_t svqdmlalt(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s64))) +svint64_t svqdmlalt(svint64_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_s16))) +svint16_t svqdmlalt(svint16_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_lane_s32))) +svint32_t svqdmlalt_lane(svint32_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlalt_lane_s64))) +svint64_t svqdmlalt_lane(svint64_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s32))) +svint32_t svqdmlslb(svint32_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s64))) +svint64_t svqdmlslb(svint64_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_n_s16))) +svint16_t svqdmlslb(svint16_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s32))) +svint32_t svqdmlslb(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s64))) +svint64_t svqdmlslb(svint64_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_s16))) +svint16_t svqdmlslb(svint16_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_lane_s32))) +svint32_t svqdmlslb_lane(svint32_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslb_lane_s64))) +svint64_t svqdmlslb_lane(svint64_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s32))) +svint32_t svqdmlslbt(svint32_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s64))) +svint64_t svqdmlslbt(svint64_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_n_s16))) +svint16_t svqdmlslbt(svint16_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s32))) +svint32_t svqdmlslbt(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s64))) +svint64_t svqdmlslbt(svint64_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslbt_s16))) +svint16_t svqdmlslbt(svint16_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s32))) +svint32_t svqdmlslt(svint32_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s64))) +svint64_t svqdmlslt(svint64_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_n_s16))) +svint16_t svqdmlslt(svint16_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s32))) +svint32_t svqdmlslt(svint32_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s64))) +svint64_t svqdmlslt(svint64_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_s16))) +svint16_t svqdmlslt(svint16_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_lane_s32))) +svint32_t svqdmlslt_lane(svint32_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmlslt_lane_s64))) +svint64_t svqdmlslt_lane(svint64_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s8))) +svint8_t svqdmulh(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s32))) +svint32_t svqdmulh(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s64))) +svint64_t svqdmulh(svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_n_s16))) +svint16_t svqdmulh(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s8))) +svint8_t svqdmulh(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s32))) +svint32_t svqdmulh(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s64))) +svint64_t svqdmulh(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_s16))) +svint16_t svqdmulh(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s32))) +svint32_t svqdmulh_lane(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s64))) +svint64_t svqdmulh_lane(svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmulh_lane_s16))) +svint16_t svqdmulh_lane(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s32))) +svint32_t svqdmullb(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s64))) +svint64_t svqdmullb(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_n_s16))) +svint16_t svqdmullb(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s32))) +svint32_t svqdmullb(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s64))) +svint64_t svqdmullb(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_s16))) +svint16_t svqdmullb(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_lane_s32))) +svint32_t svqdmullb_lane(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullb_lane_s64))) +svint64_t svqdmullb_lane(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s32))) +svint32_t svqdmullt(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s64))) +svint64_t svqdmullt(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_n_s16))) +svint16_t svqdmullt(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s32))) +svint32_t svqdmullt(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s64))) +svint64_t svqdmullt(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_s16))) +svint16_t svqdmullt(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_lane_s32))) +svint32_t svqdmullt_lane(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdmullt_lane_s64))) +svint64_t svqdmullt_lane(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_m))) +svint8_t svqneg_m(svint8_t, svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_m))) +svint32_t svqneg_m(svint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_m))) +svint64_t svqneg_m(svint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_m))) +svint16_t svqneg_m(svint16_t, svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_x))) +svint8_t svqneg_x(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_x))) +svint32_t svqneg_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_x))) +svint64_t svqneg_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_x))) +svint16_t svqneg_x(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s8_z))) +svint8_t svqneg_z(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s32_z))) +svint32_t svqneg_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s64_z))) +svint64_t svqneg_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqneg_s16_z))) +svint16_t svqneg_z(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s8))) +svint8_t svqrdcmlah(svint8_t, svint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s32))) +svint32_t svqrdcmlah(svint32_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s64))) +svint64_t svqrdcmlah(svint64_t, svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_s16))) +svint16_t svqrdcmlah(svint16_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_lane_s32))) +svint32_t svqrdcmlah_lane(svint32_t, svint32_t, svint32_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdcmlah_lane_s16))) +svint16_t svqrdcmlah_lane(svint16_t, svint16_t, svint16_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s8))) +svint8_t svqrdmlah(svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s32))) +svint32_t svqrdmlah(svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s64))) +svint64_t svqrdmlah(svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_n_s16))) +svint16_t svqrdmlah(svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s8))) +svint8_t svqrdmlah(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s32))) +svint32_t svqrdmlah(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s64))) +svint64_t svqrdmlah(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_s16))) +svint16_t svqrdmlah(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s32))) +svint32_t svqrdmlah_lane(svint32_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s64))) +svint64_t svqrdmlah_lane(svint64_t, svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlah_lane_s16))) +svint16_t svqrdmlah_lane(svint16_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s8))) +svint8_t svqrdmlsh(svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s32))) +svint32_t svqrdmlsh(svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s64))) +svint64_t svqrdmlsh(svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_n_s16))) +svint16_t svqrdmlsh(svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s8))) +svint8_t svqrdmlsh(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s32))) +svint32_t svqrdmlsh(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s64))) +svint64_t svqrdmlsh(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_s16))) +svint16_t svqrdmlsh(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s32))) +svint32_t svqrdmlsh_lane(svint32_t, svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s64))) +svint64_t svqrdmlsh_lane(svint64_t, svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmlsh_lane_s16))) +svint16_t svqrdmlsh_lane(svint16_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s8))) +svint8_t svqrdmulh(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s32))) +svint32_t svqrdmulh(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s64))) +svint64_t svqrdmulh(svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_n_s16))) +svint16_t svqrdmulh(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s8))) +svint8_t svqrdmulh(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s32))) +svint32_t svqrdmulh(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s64))) +svint64_t svqrdmulh(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_s16))) +svint16_t svqrdmulh(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s32))) +svint32_t svqrdmulh_lane(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s64))) +svint64_t svqrdmulh_lane(svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrdmulh_lane_s16))) +svint16_t svqrdmulh_lane(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_m))) +svint8_t svqrshl_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_m))) +svint32_t svqrshl_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_m))) +svint64_t svqrshl_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_m))) +svint16_t svqrshl_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_x))) +svint8_t svqrshl_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_x))) +svint32_t svqrshl_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_x))) +svint64_t svqrshl_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_x))) +svint16_t svqrshl_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s8_z))) +svint8_t svqrshl_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s32_z))) +svint32_t svqrshl_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s64_z))) +svint64_t svqrshl_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_s16_z))) +svint16_t svqrshl_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_m))) +svuint8_t svqrshl_m(svbool_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_m))) +svuint32_t svqrshl_m(svbool_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_m))) +svuint64_t svqrshl_m(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_m))) +svuint16_t svqrshl_m(svbool_t, svuint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_x))) +svuint8_t svqrshl_x(svbool_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_x))) +svuint32_t svqrshl_x(svbool_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_x))) +svuint64_t svqrshl_x(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_x))) +svuint16_t svqrshl_x(svbool_t, svuint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u8_z))) +svuint8_t svqrshl_z(svbool_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u32_z))) +svuint32_t svqrshl_z(svbool_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u64_z))) +svuint64_t svqrshl_z(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_n_u16_z))) +svuint16_t svqrshl_z(svbool_t, svuint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_m))) +svint8_t svqrshl_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_m))) +svint32_t svqrshl_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_m))) +svint64_t svqrshl_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_m))) +svint16_t svqrshl_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_x))) +svint8_t svqrshl_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_x))) +svint32_t svqrshl_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_x))) +svint64_t svqrshl_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_x))) +svint16_t svqrshl_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s8_z))) +svint8_t svqrshl_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s32_z))) +svint32_t svqrshl_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s64_z))) +svint64_t svqrshl_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_s16_z))) +svint16_t svqrshl_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_m))) +svuint8_t svqrshl_m(svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_m))) +svuint32_t svqrshl_m(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_m))) +svuint64_t svqrshl_m(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_m))) +svuint16_t svqrshl_m(svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_x))) +svuint8_t svqrshl_x(svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_x))) +svuint32_t svqrshl_x(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_x))) +svuint64_t svqrshl_x(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_x))) +svuint16_t svqrshl_x(svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u8_z))) +svuint8_t svqrshl_z(svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u32_z))) +svuint32_t svqrshl_z(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u64_z))) +svuint64_t svqrshl_z(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshl_u16_z))) +svuint16_t svqrshl_z(svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s32))) +svint16_t svqrshrnb(svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s64))) +svint32_t svqrshrnb(svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_s16))) +svint8_t svqrshrnb(svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u32))) +svuint16_t svqrshrnb(svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u64))) +svuint32_t svqrshrnb(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnb_n_u16))) +svuint8_t svqrshrnb(svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s32))) +svint16_t svqrshrnt(svint16_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s64))) +svint32_t svqrshrnt(svint32_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_s16))) +svint8_t svqrshrnt(svint8_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u32))) +svuint16_t svqrshrnt(svuint16_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u64))) +svuint32_t svqrshrnt(svuint32_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrnt_n_u16))) +svuint8_t svqrshrnt(svuint8_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s32))) +svuint16_t svqrshrunb(svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s64))) +svuint32_t svqrshrunb(svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunb_n_s16))) +svuint8_t svqrshrunb(svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s32))) +svuint16_t svqrshrunt(svuint16_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s64))) +svuint32_t svqrshrunt(svuint32_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqrshrunt_n_s16))) +svuint8_t svqrshrunt(svuint8_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_m))) +svint8_t svqshl_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_m))) +svint32_t svqshl_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_m))) +svint64_t svqshl_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_m))) +svint16_t svqshl_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_x))) +svint8_t svqshl_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_x))) +svint32_t svqshl_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_x))) +svint64_t svqshl_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_x))) +svint16_t svqshl_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s8_z))) +svint8_t svqshl_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s32_z))) +svint32_t svqshl_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s64_z))) +svint64_t svqshl_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_s16_z))) +svint16_t svqshl_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_m))) +svuint8_t svqshl_m(svbool_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_m))) +svuint32_t svqshl_m(svbool_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_m))) +svuint64_t svqshl_m(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_m))) +svuint16_t svqshl_m(svbool_t, svuint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_x))) +svuint8_t svqshl_x(svbool_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_x))) +svuint32_t svqshl_x(svbool_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_x))) +svuint64_t svqshl_x(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_x))) +svuint16_t svqshl_x(svbool_t, svuint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u8_z))) +svuint8_t svqshl_z(svbool_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u32_z))) +svuint32_t svqshl_z(svbool_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u64_z))) +svuint64_t svqshl_z(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_n_u16_z))) +svuint16_t svqshl_z(svbool_t, svuint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_m))) +svint8_t svqshl_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_m))) +svint32_t svqshl_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_m))) +svint64_t svqshl_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_m))) +svint16_t svqshl_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_x))) +svint8_t svqshl_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_x))) +svint32_t svqshl_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_x))) +svint64_t svqshl_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_x))) +svint16_t svqshl_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s8_z))) +svint8_t svqshl_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s32_z))) +svint32_t svqshl_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s64_z))) +svint64_t svqshl_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_s16_z))) +svint16_t svqshl_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_m))) +svuint8_t svqshl_m(svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_m))) +svuint32_t svqshl_m(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_m))) +svuint64_t svqshl_m(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_m))) +svuint16_t svqshl_m(svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_x))) +svuint8_t svqshl_x(svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_x))) +svuint32_t svqshl_x(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_x))) +svuint64_t svqshl_x(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_x))) +svuint16_t svqshl_x(svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u8_z))) +svuint8_t svqshl_z(svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u32_z))) +svuint32_t svqshl_z(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u64_z))) +svuint64_t svqshl_z(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshl_u16_z))) +svuint16_t svqshl_z(svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_m))) +svuint8_t svqshlu_m(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_m))) +svuint32_t svqshlu_m(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_m))) +svuint64_t svqshlu_m(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_m))) +svuint16_t svqshlu_m(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_x))) +svuint8_t svqshlu_x(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_x))) +svuint32_t svqshlu_x(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_x))) +svuint64_t svqshlu_x(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_x))) +svuint16_t svqshlu_x(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s8_z))) +svuint8_t svqshlu_z(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s32_z))) +svuint32_t svqshlu_z(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s64_z))) +svuint64_t svqshlu_z(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshlu_n_s16_z))) +svuint16_t svqshlu_z(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s32))) +svint16_t svqshrnb(svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s64))) +svint32_t svqshrnb(svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_s16))) +svint8_t svqshrnb(svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u32))) +svuint16_t svqshrnb(svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u64))) +svuint32_t svqshrnb(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnb_n_u16))) +svuint8_t svqshrnb(svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s32))) +svint16_t svqshrnt(svint16_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s64))) +svint32_t svqshrnt(svint32_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_s16))) +svint8_t svqshrnt(svint8_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u32))) +svuint16_t svqshrnt(svuint16_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u64))) +svuint32_t svqshrnt(svuint32_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrnt_n_u16))) +svuint8_t svqshrnt(svuint8_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s32))) +svuint16_t svqshrunb(svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s64))) +svuint32_t svqshrunb(svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunb_n_s16))) +svuint8_t svqshrunb(svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s32))) +svuint16_t svqshrunt(svuint16_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s64))) +svuint32_t svqshrunt(svuint32_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqshrunt_n_s16))) +svuint8_t svqshrunt(svuint8_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_m))) +svint8_t svqsub_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_m))) +svint32_t svqsub_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_m))) +svint64_t svqsub_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_m))) +svint16_t svqsub_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_x))) +svint8_t svqsub_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_x))) +svint32_t svqsub_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_x))) +svint64_t svqsub_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_x))) +svint16_t svqsub_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8_z))) +svint8_t svqsub_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32_z))) +svint32_t svqsub_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64_z))) +svint64_t svqsub_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16_z))) +svint16_t svqsub_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_m))) +svuint8_t svqsub_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_m))) +svuint32_t svqsub_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_m))) +svuint64_t svqsub_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_m))) +svuint16_t svqsub_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_x))) +svuint8_t svqsub_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_x))) +svuint32_t svqsub_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_x))) +svuint64_t svqsub_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_x))) +svuint16_t svqsub_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8_z))) +svuint8_t svqsub_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32_z))) +svuint32_t svqsub_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64_z))) +svuint64_t svqsub_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16_z))) +svuint16_t svqsub_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_m))) +svint8_t svqsub_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_m))) +svint32_t svqsub_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_m))) +svint64_t svqsub_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_m))) +svint16_t svqsub_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_x))) +svint8_t svqsub_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_x))) +svint32_t svqsub_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_x))) +svint64_t svqsub_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_x))) +svint16_t svqsub_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8_z))) +svint8_t svqsub_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32_z))) +svint32_t svqsub_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64_z))) +svint64_t svqsub_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16_z))) +svint16_t svqsub_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_m))) +svuint8_t svqsub_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_m))) +svuint32_t svqsub_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_m))) +svuint64_t svqsub_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_m))) +svuint16_t svqsub_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_x))) +svuint8_t svqsub_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_x))) +svuint32_t svqsub_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_x))) +svuint64_t svqsub_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_x))) +svuint16_t svqsub_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8_z))) +svuint8_t svqsub_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32_z))) +svuint32_t svqsub_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64_z))) +svuint64_t svqsub_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16_z))) +svuint16_t svqsub_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_m))) +svint8_t svqsubr_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_m))) +svint32_t svqsubr_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_m))) +svint64_t svqsubr_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_m))) +svint16_t svqsubr_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_x))) +svint8_t svqsubr_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_x))) +svint32_t svqsubr_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_x))) +svint64_t svqsubr_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_x))) +svint16_t svqsubr_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s8_z))) +svint8_t svqsubr_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s32_z))) +svint32_t svqsubr_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s64_z))) +svint64_t svqsubr_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_s16_z))) +svint16_t svqsubr_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_m))) +svuint8_t svqsubr_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_m))) +svuint32_t svqsubr_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_m))) +svuint64_t svqsubr_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_m))) +svuint16_t svqsubr_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_x))) +svuint8_t svqsubr_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_x))) +svuint32_t svqsubr_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_x))) +svuint64_t svqsubr_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_x))) +svuint16_t svqsubr_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u8_z))) +svuint8_t svqsubr_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u32_z))) +svuint32_t svqsubr_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u64_z))) +svuint64_t svqsubr_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_n_u16_z))) +svuint16_t svqsubr_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_m))) +svint8_t svqsubr_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_m))) +svint32_t svqsubr_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_m))) +svint64_t svqsubr_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_m))) +svint16_t svqsubr_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_x))) +svint8_t svqsubr_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_x))) +svint32_t svqsubr_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_x))) +svint64_t svqsubr_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_x))) +svint16_t svqsubr_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s8_z))) +svint8_t svqsubr_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s32_z))) +svint32_t svqsubr_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s64_z))) +svint64_t svqsubr_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_s16_z))) +svint16_t svqsubr_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_m))) +svuint8_t svqsubr_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_m))) +svuint32_t svqsubr_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_m))) +svuint64_t svqsubr_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_m))) +svuint16_t svqsubr_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_x))) +svuint8_t svqsubr_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_x))) +svuint32_t svqsubr_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_x))) +svuint64_t svqsubr_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_x))) +svuint16_t svqsubr_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u8_z))) +svuint8_t svqsubr_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u32_z))) +svuint32_t svqsubr_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u64_z))) +svuint64_t svqsubr_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsubr_u16_z))) +svuint16_t svqsubr_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s32))) +svint16_t svqxtnb(svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s64))) +svint32_t svqxtnb(svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_s16))) +svint8_t svqxtnb(svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u32))) +svuint16_t svqxtnb(svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u64))) +svuint32_t svqxtnb(svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnb_u16))) +svuint8_t svqxtnb(svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s32))) +svint16_t svqxtnt(svint16_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s64))) +svint32_t svqxtnt(svint32_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_s16))) +svint8_t svqxtnt(svint8_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u32))) +svuint16_t svqxtnt(svuint16_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u64))) +svuint32_t svqxtnt(svuint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtnt_u16))) +svuint8_t svqxtnt(svuint8_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s32))) +svuint16_t svqxtunb(svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s64))) +svuint32_t svqxtunb(svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunb_s16))) +svuint8_t svqxtunb(svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s32))) +svuint16_t svqxtunt(svuint16_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s64))) +svuint32_t svqxtunt(svuint32_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqxtunt_s16))) +svuint8_t svqxtunt(svuint8_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u32))) +svuint16_t svraddhnb(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u64))) +svuint32_t svraddhnb(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_u16))) +svuint8_t svraddhnb(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s32))) +svint16_t svraddhnb(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s64))) +svint32_t svraddhnb(svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_n_s16))) +svint8_t svraddhnb(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u32))) +svuint16_t svraddhnb(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u64))) +svuint32_t svraddhnb(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_u16))) +svuint8_t svraddhnb(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s32))) +svint16_t svraddhnb(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s64))) +svint32_t svraddhnb(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnb_s16))) +svint8_t svraddhnb(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u32))) +svuint16_t svraddhnt(svuint16_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u64))) +svuint32_t svraddhnt(svuint32_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_u16))) +svuint8_t svraddhnt(svuint8_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s32))) +svint16_t svraddhnt(svint16_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s64))) +svint32_t svraddhnt(svint32_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_n_s16))) +svint8_t svraddhnt(svint8_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u32))) +svuint16_t svraddhnt(svuint16_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u64))) +svuint32_t svraddhnt(svuint32_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_u16))) +svuint8_t svraddhnt(svuint8_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s32))) +svint16_t svraddhnt(svint16_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s64))) +svint32_t svraddhnt(svint32_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svraddhnt_s16))) +svint8_t svraddhnt(svint8_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_m))) +svuint32_t svrecpe_m(svuint32_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_x))) +svuint32_t svrecpe_x(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_u32_z))) +svuint32_t svrecpe_z(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_m))) +svint8_t svrhadd_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_m))) +svint32_t svrhadd_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_m))) +svint64_t svrhadd_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_m))) +svint16_t svrhadd_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_x))) +svint8_t svrhadd_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_x))) +svint32_t svrhadd_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_x))) +svint64_t svrhadd_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_x))) +svint16_t svrhadd_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s8_z))) +svint8_t svrhadd_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s32_z))) +svint32_t svrhadd_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s64_z))) +svint64_t svrhadd_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_s16_z))) +svint16_t svrhadd_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_m))) +svuint8_t svrhadd_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_m))) +svuint32_t svrhadd_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_m))) +svuint64_t svrhadd_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_m))) +svuint16_t svrhadd_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_x))) +svuint8_t svrhadd_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_x))) +svuint32_t svrhadd_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_x))) +svuint64_t svrhadd_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_x))) +svuint16_t svrhadd_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u8_z))) +svuint8_t svrhadd_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u32_z))) +svuint32_t svrhadd_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u64_z))) +svuint64_t svrhadd_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_n_u16_z))) +svuint16_t svrhadd_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_m))) +svint8_t svrhadd_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_m))) +svint32_t svrhadd_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_m))) +svint64_t svrhadd_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_m))) +svint16_t svrhadd_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_x))) +svint8_t svrhadd_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_x))) +svint32_t svrhadd_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_x))) +svint64_t svrhadd_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_x))) +svint16_t svrhadd_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s8_z))) +svint8_t svrhadd_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s32_z))) +svint32_t svrhadd_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s64_z))) +svint64_t svrhadd_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_s16_z))) +svint16_t svrhadd_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_m))) +svuint8_t svrhadd_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_m))) +svuint32_t svrhadd_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_m))) +svuint64_t svrhadd_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_m))) +svuint16_t svrhadd_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_x))) +svuint8_t svrhadd_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_x))) +svuint32_t svrhadd_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_x))) +svuint64_t svrhadd_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_x))) +svuint16_t svrhadd_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u8_z))) +svuint8_t svrhadd_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u32_z))) +svuint32_t svrhadd_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u64_z))) +svuint64_t svrhadd_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrhadd_u16_z))) +svuint16_t svrhadd_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_m))) +svint8_t svrshl_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_m))) +svint32_t svrshl_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_m))) +svint64_t svrshl_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_m))) +svint16_t svrshl_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_x))) +svint8_t svrshl_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_x))) +svint32_t svrshl_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_x))) +svint64_t svrshl_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_x))) +svint16_t svrshl_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s8_z))) +svint8_t svrshl_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s32_z))) +svint32_t svrshl_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s64_z))) +svint64_t svrshl_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_s16_z))) +svint16_t svrshl_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_m))) +svuint8_t svrshl_m(svbool_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_m))) +svuint32_t svrshl_m(svbool_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_m))) +svuint64_t svrshl_m(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_m))) +svuint16_t svrshl_m(svbool_t, svuint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_x))) +svuint8_t svrshl_x(svbool_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_x))) +svuint32_t svrshl_x(svbool_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_x))) +svuint64_t svrshl_x(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_x))) +svuint16_t svrshl_x(svbool_t, svuint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u8_z))) +svuint8_t svrshl_z(svbool_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u32_z))) +svuint32_t svrshl_z(svbool_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u64_z))) +svuint64_t svrshl_z(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_n_u16_z))) +svuint16_t svrshl_z(svbool_t, svuint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_m))) +svint8_t svrshl_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_m))) +svint32_t svrshl_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_m))) +svint64_t svrshl_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_m))) +svint16_t svrshl_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_x))) +svint8_t svrshl_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_x))) +svint32_t svrshl_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_x))) +svint64_t svrshl_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_x))) +svint16_t svrshl_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s8_z))) +svint8_t svrshl_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s32_z))) +svint32_t svrshl_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s64_z))) +svint64_t svrshl_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_s16_z))) +svint16_t svrshl_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_m))) +svuint8_t svrshl_m(svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_m))) +svuint32_t svrshl_m(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_m))) +svuint64_t svrshl_m(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_m))) +svuint16_t svrshl_m(svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_x))) +svuint8_t svrshl_x(svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_x))) +svuint32_t svrshl_x(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_x))) +svuint64_t svrshl_x(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_x))) +svuint16_t svrshl_x(svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u8_z))) +svuint8_t svrshl_z(svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u32_z))) +svuint32_t svrshl_z(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u64_z))) +svuint64_t svrshl_z(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshl_u16_z))) +svuint16_t svrshl_z(svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_m))) +svint8_t svrshr_m(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_m))) +svint32_t svrshr_m(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_m))) +svint64_t svrshr_m(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_m))) +svint16_t svrshr_m(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_m))) +svuint8_t svrshr_m(svbool_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_m))) +svuint32_t svrshr_m(svbool_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_m))) +svuint64_t svrshr_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_m))) +svuint16_t svrshr_m(svbool_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_x))) +svint8_t svrshr_x(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_x))) +svint32_t svrshr_x(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_x))) +svint64_t svrshr_x(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_x))) +svint16_t svrshr_x(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_x))) +svuint8_t svrshr_x(svbool_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_x))) +svuint32_t svrshr_x(svbool_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_x))) +svuint64_t svrshr_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_x))) +svuint16_t svrshr_x(svbool_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s8_z))) +svint8_t svrshr_z(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s32_z))) +svint32_t svrshr_z(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s64_z))) +svint64_t svrshr_z(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_s16_z))) +svint16_t svrshr_z(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u8_z))) +svuint8_t svrshr_z(svbool_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u32_z))) +svuint32_t svrshr_z(svbool_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u64_z))) +svuint64_t svrshr_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshr_n_u16_z))) +svuint16_t svrshr_z(svbool_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u32))) +svuint16_t svrshrnb(svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u64))) +svuint32_t svrshrnb(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_u16))) +svuint8_t svrshrnb(svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s32))) +svint16_t svrshrnb(svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s64))) +svint32_t svrshrnb(svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnb_n_s16))) +svint8_t svrshrnb(svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u32))) +svuint16_t svrshrnt(svuint16_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u64))) +svuint32_t svrshrnt(svuint32_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_u16))) +svuint8_t svrshrnt(svuint8_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s32))) +svint16_t svrshrnt(svint16_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s64))) +svint32_t svrshrnt(svint32_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrshrnt_n_s16))) +svint8_t svrshrnt(svint8_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_m))) +svuint32_t svrsqrte_m(svuint32_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_x))) +svuint32_t svrsqrte_x(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_u32_z))) +svuint32_t svrsqrte_z(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s8))) +svint8_t svrsra(svint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s32))) +svint32_t svrsra(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s64))) +svint64_t svrsra(svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_s16))) +svint16_t svrsra(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u8))) +svuint8_t svrsra(svuint8_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u32))) +svuint32_t svrsra(svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u64))) +svuint64_t svrsra(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsra_n_u16))) +svuint16_t svrsra(svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u32))) +svuint16_t svrsubhnb(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u64))) +svuint32_t svrsubhnb(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_u16))) +svuint8_t svrsubhnb(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s32))) +svint16_t svrsubhnb(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s64))) +svint32_t svrsubhnb(svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_n_s16))) +svint8_t svrsubhnb(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u32))) +svuint16_t svrsubhnb(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u64))) +svuint32_t svrsubhnb(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_u16))) +svuint8_t svrsubhnb(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s32))) +svint16_t svrsubhnb(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s64))) +svint32_t svrsubhnb(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnb_s16))) +svint8_t svrsubhnb(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u32))) +svuint16_t svrsubhnt(svuint16_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u64))) +svuint32_t svrsubhnt(svuint32_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_u16))) +svuint8_t svrsubhnt(svuint8_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s32))) +svint16_t svrsubhnt(svint16_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s64))) +svint32_t svrsubhnt(svint32_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_n_s16))) +svint8_t svrsubhnt(svint8_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u32))) +svuint16_t svrsubhnt(svuint16_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u64))) +svuint32_t svrsubhnt(svuint32_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_u16))) +svuint8_t svrsubhnt(svuint8_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s32))) +svint16_t svrsubhnt(svint16_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s64))) +svint32_t svrsubhnt(svint32_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsubhnt_s16))) +svint8_t svrsubhnt(svint8_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_n_u32))) +svuint32_t svsbclb(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_n_u64))) +svuint64_t svsbclb(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_u32))) +svuint32_t svsbclb(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclb_u64))) +svuint64_t svsbclb(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_n_u32))) +svuint32_t svsbclt(svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_n_u64))) +svuint64_t svsbclt(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_u32))) +svuint32_t svsbclt(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsbclt_u64))) +svuint64_t svsbclt(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s32))) +svint32_t svshllb(svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s64))) +svint64_t svshllb(svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_s16))) +svint16_t svshllb(svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u32))) +svuint32_t svshllb(svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u64))) +svuint64_t svshllb(svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllb_n_u16))) +svuint16_t svshllb(svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s32))) +svint32_t svshllt(svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s64))) +svint64_t svshllt(svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_s16))) +svint16_t svshllt(svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u32))) +svuint32_t svshllt(svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u64))) +svuint64_t svshllt(svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshllt_n_u16))) +svuint16_t svshllt(svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u32))) +svuint16_t svshrnb(svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u64))) +svuint32_t svshrnb(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_u16))) +svuint8_t svshrnb(svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s32))) +svint16_t svshrnb(svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s64))) +svint32_t svshrnb(svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnb_n_s16))) +svint8_t svshrnb(svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u32))) +svuint16_t svshrnt(svuint16_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u64))) +svuint32_t svshrnt(svuint32_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_u16))) +svuint8_t svshrnt(svuint8_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s32))) +svint16_t svshrnt(svint16_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s64))) +svint32_t svshrnt(svint32_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svshrnt_n_s16))) +svint8_t svshrnt(svint8_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u8))) +svuint8_t svsli(svuint8_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u32))) +svuint32_t svsli(svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u64))) +svuint64_t svsli(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_u16))) +svuint16_t svsli(svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s8))) +svint8_t svsli(svint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s32))) +svint32_t svsli(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s64))) +svint64_t svsli(svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsli_n_s16))) +svint16_t svsli(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_m))) +svuint8_t svsqadd_m(svbool_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_m))) +svuint32_t svsqadd_m(svbool_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_m))) +svuint64_t svsqadd_m(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_m))) +svuint16_t svsqadd_m(svbool_t, svuint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_x))) +svuint8_t svsqadd_x(svbool_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_x))) +svuint32_t svsqadd_x(svbool_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_x))) +svuint64_t svsqadd_x(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_x))) +svuint16_t svsqadd_x(svbool_t, svuint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u8_z))) +svuint8_t svsqadd_z(svbool_t, svuint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u32_z))) +svuint32_t svsqadd_z(svbool_t, svuint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u64_z))) +svuint64_t svsqadd_z(svbool_t, svuint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_n_u16_z))) +svuint16_t svsqadd_z(svbool_t, svuint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_m))) +svuint8_t svsqadd_m(svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_m))) +svuint32_t svsqadd_m(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_m))) +svuint64_t svsqadd_m(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_m))) +svuint16_t svsqadd_m(svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_x))) +svuint8_t svsqadd_x(svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_x))) +svuint32_t svsqadd_x(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_x))) +svuint64_t svsqadd_x(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_x))) +svuint16_t svsqadd_x(svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u8_z))) +svuint8_t svsqadd_z(svbool_t, svuint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u32_z))) +svuint32_t svsqadd_z(svbool_t, svuint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u64_z))) +svuint64_t svsqadd_z(svbool_t, svuint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqadd_u16_z))) +svuint16_t svsqadd_z(svbool_t, svuint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s8))) +svint8_t svsra(svint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s32))) +svint32_t svsra(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s64))) +svint64_t svsra(svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_s16))) +svint16_t svsra(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u8))) +svuint8_t svsra(svuint8_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u32))) +svuint32_t svsra(svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u64))) +svuint64_t svsra(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsra_n_u16))) +svuint16_t svsra(svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u8))) +svuint8_t svsri(svuint8_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u32))) +svuint32_t svsri(svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u64))) +svuint64_t svsri(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_u16))) +svuint16_t svsri(svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s8))) +svint8_t svsri(svint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s32))) +svint32_t svsri(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s64))) +svint64_t svsri(svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsri_n_s16))) +svint16_t svsri(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u32))) +svuint16_t svsubhnb(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u64))) +svuint32_t svsubhnb(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_u16))) +svuint8_t svsubhnb(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s32))) +svint16_t svsubhnb(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s64))) +svint32_t svsubhnb(svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_n_s16))) +svint8_t svsubhnb(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u32))) +svuint16_t svsubhnb(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u64))) +svuint32_t svsubhnb(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_u16))) +svuint8_t svsubhnb(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s32))) +svint16_t svsubhnb(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s64))) +svint32_t svsubhnb(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnb_s16))) +svint8_t svsubhnb(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u32))) +svuint16_t svsubhnt(svuint16_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u64))) +svuint32_t svsubhnt(svuint32_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_u16))) +svuint8_t svsubhnt(svuint8_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s32))) +svint16_t svsubhnt(svint16_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s64))) +svint32_t svsubhnt(svint32_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_n_s16))) +svint8_t svsubhnt(svint8_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u32))) +svuint16_t svsubhnt(svuint16_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u64))) +svuint32_t svsubhnt(svuint32_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_u16))) +svuint8_t svsubhnt(svuint8_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s32))) +svint16_t svsubhnt(svint16_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s64))) +svint32_t svsubhnt(svint32_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubhnt_s16))) +svint8_t svsubhnt(svint8_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s32))) +svint32_t svsublb(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s64))) +svint64_t svsublb(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_s16))) +svint16_t svsublb(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u32))) +svuint32_t svsublb(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u64))) +svuint64_t svsublb(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_n_u16))) +svuint16_t svsublb(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s32))) +svint32_t svsublb(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s64))) +svint64_t svsublb(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_s16))) +svint16_t svsublb(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u32))) +svuint32_t svsublb(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u64))) +svuint64_t svsublb(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublb_u16))) +svuint16_t svsublb(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s32))) +svint32_t svsublbt(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s64))) +svint64_t svsublbt(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_n_s16))) +svint16_t svsublbt(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s32))) +svint32_t svsublbt(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s64))) +svint64_t svsublbt(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublbt_s16))) +svint16_t svsublbt(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s32))) +svint32_t svsublt(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s64))) +svint64_t svsublt(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_s16))) +svint16_t svsublt(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u32))) +svuint32_t svsublt(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u64))) +svuint64_t svsublt(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_n_u16))) +svuint16_t svsublt(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s32))) +svint32_t svsublt(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s64))) +svint64_t svsublt(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_s16))) +svint16_t svsublt(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u32))) +svuint32_t svsublt(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u64))) +svuint64_t svsublt(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsublt_u16))) +svuint16_t svsublt(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s32))) +svint32_t svsubltb(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s64))) +svint64_t svsubltb(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_n_s16))) +svint16_t svsubltb(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s32))) +svint32_t svsubltb(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s64))) +svint64_t svsubltb(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubltb_s16))) +svint16_t svsubltb(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s32))) +svint32_t svsubwb(svint32_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s64))) +svint64_t svsubwb(svint64_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_s16))) +svint16_t svsubwb(svint16_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u32))) +svuint32_t svsubwb(svuint32_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u64))) +svuint64_t svsubwb(svuint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_n_u16))) +svuint16_t svsubwb(svuint16_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s32))) +svint32_t svsubwb(svint32_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s64))) +svint64_t svsubwb(svint64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_s16))) +svint16_t svsubwb(svint16_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u32))) +svuint32_t svsubwb(svuint32_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u64))) +svuint64_t svsubwb(svuint64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwb_u16))) +svuint16_t svsubwb(svuint16_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s32))) +svint32_t svsubwt(svint32_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s64))) +svint64_t svsubwt(svint64_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_s16))) +svint16_t svsubwt(svint16_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u32))) +svuint32_t svsubwt(svuint32_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u64))) +svuint64_t svsubwt(svuint64_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_n_u16))) +svuint16_t svsubwt(svuint16_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s32))) +svint32_t svsubwt(svint32_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s64))) +svint64_t svsubwt(svint64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_s16))) +svint16_t svsubwt(svint16_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u32))) +svuint32_t svsubwt(svuint32_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u64))) +svuint64_t svsubwt(svuint64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubwt_u16))) +svuint16_t svsubwt(svuint16_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u8))) +svuint8_t svtbl2(svuint8x2_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u32))) +svuint32_t svtbl2(svuint32x2_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u64))) +svuint64_t svtbl2(svuint64x2_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_u16))) +svuint16_t svtbl2(svuint16x2_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s8))) +svint8_t svtbl2(svint8x2_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f64))) +svfloat64_t svtbl2(svfloat64x2_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f32))) +svfloat32_t svtbl2(svfloat32x2_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_f16))) +svfloat16_t svtbl2(svfloat16x2_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s32))) +svint32_t svtbl2(svint32x2_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s64))) +svint64_t svtbl2(svint64x2_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl2_s16))) +svint16_t svtbl2(svint16x2_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u8))) +svuint8_t svtbx(svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u32))) +svuint32_t svtbx(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u64))) +svuint64_t svtbx(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_u16))) +svuint16_t svtbx(svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s8))) +svint8_t svtbx(svint8_t, svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f64))) +svfloat64_t svtbx(svfloat64_t, svfloat64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f32))) +svfloat32_t svtbx(svfloat32_t, svfloat32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_f16))) +svfloat16_t svtbx(svfloat16_t, svfloat16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s32))) +svint32_t svtbx(svint32_t, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s64))) +svint64_t svtbx(svint64_t, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbx_s16))) +svint16_t svtbx(svint16_t, svint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_m))) +svint8_t svuqadd_m(svbool_t, svint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_m))) +svint32_t svuqadd_m(svbool_t, svint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_m))) +svint64_t svuqadd_m(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_m))) +svint16_t svuqadd_m(svbool_t, svint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_x))) +svint8_t svuqadd_x(svbool_t, svint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_x))) +svint32_t svuqadd_x(svbool_t, svint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_x))) +svint64_t svuqadd_x(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_x))) +svint16_t svuqadd_x(svbool_t, svint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s8_z))) +svint8_t svuqadd_z(svbool_t, svint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s32_z))) +svint32_t svuqadd_z(svbool_t, svint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s64_z))) +svint64_t svuqadd_z(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_n_s16_z))) +svint16_t svuqadd_z(svbool_t, svint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_m))) +svint8_t svuqadd_m(svbool_t, svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_m))) +svint32_t svuqadd_m(svbool_t, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_m))) +svint64_t svuqadd_m(svbool_t, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_m))) +svint16_t svuqadd_m(svbool_t, svint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_x))) +svint8_t svuqadd_x(svbool_t, svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_x))) +svint32_t svuqadd_x(svbool_t, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_x))) +svint64_t svuqadd_x(svbool_t, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_x))) +svint16_t svuqadd_x(svbool_t, svint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s8_z))) +svint8_t svuqadd_z(svbool_t, svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s32_z))) +svint32_t svuqadd_z(svbool_t, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s64_z))) +svint64_t svuqadd_z(svbool_t, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuqadd_s16_z))) +svint16_t svuqadd_z(svbool_t, svint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_s32))) +svbool_t svwhilege_b8(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_s32))) +svbool_t svwhilege_b32(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_s32))) +svbool_t svwhilege_b64(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_s32))) +svbool_t svwhilege_b16(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_s64))) +svbool_t svwhilege_b8(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_s64))) +svbool_t svwhilege_b32(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_s64))) +svbool_t svwhilege_b64(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_s64))) +svbool_t svwhilege_b16(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_u32))) +svbool_t svwhilege_b8(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_u32))) +svbool_t svwhilege_b32(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_u32))) +svbool_t svwhilege_b64(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_u32))) +svbool_t svwhilege_b16(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b8_u64))) +svbool_t svwhilege_b8(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b32_u64))) +svbool_t svwhilege_b32(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b64_u64))) +svbool_t svwhilege_b64(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilege_b16_u64))) +svbool_t svwhilege_b16(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_s32))) +svbool_t svwhilegt_b8(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_s32))) +svbool_t svwhilegt_b32(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_s32))) +svbool_t svwhilegt_b64(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_s32))) +svbool_t svwhilegt_b16(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_s64))) +svbool_t svwhilegt_b8(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_s64))) +svbool_t svwhilegt_b32(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_s64))) +svbool_t svwhilegt_b64(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_s64))) +svbool_t svwhilegt_b16(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_u32))) +svbool_t svwhilegt_b8(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_u32))) +svbool_t svwhilegt_b32(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_u32))) +svbool_t svwhilegt_b64(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_u32))) +svbool_t svwhilegt_b16(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b8_u64))) +svbool_t svwhilegt_b8(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b32_u64))) +svbool_t svwhilegt_b32(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b64_u64))) +svbool_t svwhilegt_b64(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilegt_b16_u64))) +svbool_t svwhilegt_b16(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u8))) +svbool_t svwhilerw(uint8_t const *, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s8))) +svbool_t svwhilerw(int8_t const *, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u64))) +svbool_t svwhilerw(uint64_t const *, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f64))) +svbool_t svwhilerw(float64_t const *, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s64))) +svbool_t svwhilerw(int64_t const *, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u16))) +svbool_t svwhilerw(uint16_t const *, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f16))) +svbool_t svwhilerw(float16_t const *, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s16))) +svbool_t svwhilerw(int16_t const *, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_u32))) +svbool_t svwhilerw(uint32_t const *, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_f32))) +svbool_t svwhilerw(float32_t const *, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilerw_s32))) +svbool_t svwhilerw(int32_t const *, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u8))) +svbool_t svwhilewr(uint8_t const *, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s8))) +svbool_t svwhilewr(int8_t const *, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u64))) +svbool_t svwhilewr(uint64_t const *, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f64))) +svbool_t svwhilewr(float64_t const *, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s64))) +svbool_t svwhilewr(int64_t const *, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u16))) +svbool_t svwhilewr(uint16_t const *, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f16))) +svbool_t svwhilewr(float16_t const *, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s16))) +svbool_t svwhilewr(int16_t const *, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_u32))) +svbool_t svwhilewr(uint32_t const *, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_f32))) +svbool_t svwhilewr(float32_t const *, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilewr_s32))) +svbool_t svwhilewr(int32_t const *, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u8))) +svuint8_t svxar(svuint8_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u32))) +svuint32_t svxar(svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u64))) +svuint64_t svxar(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_u16))) +svuint16_t svxar(svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s8))) +svint8_t svxar(svint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s32))) +svint32_t svxar(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s64))) +svint64_t svxar(svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svxar_n_s16))) +svint16_t svxar(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f64_m))) +svfloat64_t svabd_n_f64_m(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f32_m))) +svfloat32_t svabd_n_f32_m(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f16_m))) +svfloat16_t svabd_n_f16_m(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f64_x))) +svfloat64_t svabd_n_f64_x(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f32_x))) +svfloat32_t svabd_n_f32_x(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f16_x))) +svfloat16_t svabd_n_f16_x(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f64_z))) +svfloat64_t svabd_n_f64_z(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f32_z))) +svfloat32_t svabd_n_f32_z(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f16_z))) +svfloat16_t svabd_n_f16_z(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s8_m))) +svint8_t svabd_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s32_m))) +svint32_t svabd_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s64_m))) +svint64_t svabd_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s16_m))) +svint16_t svabd_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s8_x))) +svint8_t svabd_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s32_x))) +svint32_t svabd_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s64_x))) +svint64_t svabd_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s16_x))) +svint16_t svabd_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s8_z))) +svint8_t svabd_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s32_z))) +svint32_t svabd_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s64_z))) +svint64_t svabd_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s16_z))) +svint16_t svabd_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u8_m))) +svuint8_t svabd_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u32_m))) +svuint32_t svabd_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u64_m))) +svuint64_t svabd_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u16_m))) +svuint16_t svabd_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u8_x))) +svuint8_t svabd_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u32_x))) +svuint32_t svabd_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u64_x))) +svuint64_t svabd_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u16_x))) +svuint16_t svabd_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u8_z))) +svuint8_t svabd_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u32_z))) +svuint32_t svabd_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u64_z))) +svuint64_t svabd_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u16_z))) +svuint16_t svabd_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f64_m))) +svfloat64_t svabd_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f32_m))) +svfloat32_t svabd_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f16_m))) +svfloat16_t svabd_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f64_x))) +svfloat64_t svabd_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f32_x))) +svfloat32_t svabd_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f16_x))) +svfloat16_t svabd_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f64_z))) +svfloat64_t svabd_f64_z(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f32_z))) +svfloat32_t svabd_f32_z(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f16_z))) +svfloat16_t svabd_f16_z(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s8_m))) +svint8_t svabd_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s32_m))) +svint32_t svabd_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s64_m))) +svint64_t svabd_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s16_m))) +svint16_t svabd_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s8_x))) +svint8_t svabd_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s32_x))) +svint32_t svabd_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s64_x))) +svint64_t svabd_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s16_x))) +svint16_t svabd_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s8_z))) +svint8_t svabd_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s32_z))) +svint32_t svabd_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s64_z))) +svint64_t svabd_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s16_z))) +svint16_t svabd_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u8_m))) +svuint8_t svabd_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u32_m))) +svuint32_t svabd_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u64_m))) +svuint64_t svabd_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u16_m))) +svuint16_t svabd_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u8_x))) +svuint8_t svabd_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u32_x))) +svuint32_t svabd_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u64_x))) +svuint64_t svabd_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u16_x))) +svuint16_t svabd_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u8_z))) +svuint8_t svabd_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u32_z))) +svuint32_t svabd_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u64_z))) +svuint64_t svabd_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u16_z))) +svuint16_t svabd_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f64_m))) +svfloat64_t svabs_f64_m(svfloat64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f32_m))) +svfloat32_t svabs_f32_m(svfloat32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f16_m))) +svfloat16_t svabs_f16_m(svfloat16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f64_x))) +svfloat64_t svabs_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f32_x))) +svfloat32_t svabs_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f16_x))) +svfloat16_t svabs_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f64_z))) +svfloat64_t svabs_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f32_z))) +svfloat32_t svabs_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f16_z))) +svfloat16_t svabs_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s8_m))) +svint8_t svabs_s8_m(svint8_t, svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s32_m))) +svint32_t svabs_s32_m(svint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s64_m))) +svint64_t svabs_s64_m(svint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s16_m))) +svint16_t svabs_s16_m(svint16_t, svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s8_x))) +svint8_t svabs_s8_x(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s32_x))) +svint32_t svabs_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s64_x))) +svint64_t svabs_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s16_x))) +svint16_t svabs_s16_x(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s8_z))) +svint8_t svabs_s8_z(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s32_z))) +svint32_t svabs_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s64_z))) +svint64_t svabs_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s16_z))) +svint16_t svabs_s16_z(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_n_f64))) +svbool_t svacge_n_f64(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_n_f32))) +svbool_t svacge_n_f32(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_n_f16))) +svbool_t svacge_n_f16(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_f64))) +svbool_t svacge_f64(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_f32))) +svbool_t svacge_f32(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_f16))) +svbool_t svacge_f16(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_n_f64))) +svbool_t svacgt_n_f64(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_n_f32))) +svbool_t svacgt_n_f32(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_n_f16))) +svbool_t svacgt_n_f16(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_f64))) +svbool_t svacgt_f64(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_f32))) +svbool_t svacgt_f32(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_f16))) +svbool_t svacgt_f16(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_n_f64))) +svbool_t svacle_n_f64(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_n_f32))) +svbool_t svacle_n_f32(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_n_f16))) +svbool_t svacle_n_f16(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_f64))) +svbool_t svacle_f64(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_f32))) +svbool_t svacle_f32(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_f16))) +svbool_t svacle_f16(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_n_f64))) +svbool_t svaclt_n_f64(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_n_f32))) +svbool_t svaclt_n_f32(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_n_f16))) +svbool_t svaclt_n_f16(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_f64))) +svbool_t svaclt_f64(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_f32))) +svbool_t svaclt_f32(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_f16))) +svbool_t svaclt_f16(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f64_m))) +svfloat64_t svadd_n_f64_m(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f32_m))) +svfloat32_t svadd_n_f32_m(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f16_m))) +svfloat16_t svadd_n_f16_m(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f64_x))) +svfloat64_t svadd_n_f64_x(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f32_x))) +svfloat32_t svadd_n_f32_x(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f16_x))) +svfloat16_t svadd_n_f16_x(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f64_z))) +svfloat64_t svadd_n_f64_z(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f32_z))) +svfloat32_t svadd_n_f32_z(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f16_z))) +svfloat16_t svadd_n_f16_z(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u8_m))) +svuint8_t svadd_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u32_m))) +svuint32_t svadd_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u64_m))) +svuint64_t svadd_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u16_m))) +svuint16_t svadd_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s8_m))) +svint8_t svadd_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s32_m))) +svint32_t svadd_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s64_m))) +svint64_t svadd_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s16_m))) +svint16_t svadd_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u8_x))) +svuint8_t svadd_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u32_x))) +svuint32_t svadd_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u64_x))) +svuint64_t svadd_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u16_x))) +svuint16_t svadd_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s8_x))) +svint8_t svadd_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s32_x))) +svint32_t svadd_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s64_x))) +svint64_t svadd_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s16_x))) +svint16_t svadd_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u8_z))) +svuint8_t svadd_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u32_z))) +svuint32_t svadd_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u64_z))) +svuint64_t svadd_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u16_z))) +svuint16_t svadd_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s8_z))) +svint8_t svadd_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s32_z))) +svint32_t svadd_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s64_z))) +svint64_t svadd_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s16_z))) +svint16_t svadd_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f64_m))) +svfloat64_t svadd_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f32_m))) +svfloat32_t svadd_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f16_m))) +svfloat16_t svadd_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f64_x))) +svfloat64_t svadd_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f32_x))) +svfloat32_t svadd_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f16_x))) +svfloat16_t svadd_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f64_z))) +svfloat64_t svadd_f64_z(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f32_z))) +svfloat32_t svadd_f32_z(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f16_z))) +svfloat16_t svadd_f16_z(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u8_m))) +svuint8_t svadd_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u32_m))) +svuint32_t svadd_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u64_m))) +svuint64_t svadd_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u16_m))) +svuint16_t svadd_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s8_m))) +svint8_t svadd_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s32_m))) +svint32_t svadd_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s64_m))) +svint64_t svadd_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s16_m))) +svint16_t svadd_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u8_x))) +svuint8_t svadd_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u32_x))) +svuint32_t svadd_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u64_x))) +svuint64_t svadd_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u16_x))) +svuint16_t svadd_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s8_x))) +svint8_t svadd_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s32_x))) +svint32_t svadd_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s64_x))) +svint64_t svadd_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s16_x))) +svint16_t svadd_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u8_z))) +svuint8_t svadd_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u32_z))) +svuint32_t svadd_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u64_z))) +svuint64_t svadd_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u16_z))) +svuint16_t svadd_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s8_z))) +svint8_t svadd_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s32_z))) +svint32_t svadd_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s64_z))) +svint64_t svadd_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s16_z))) +svint16_t svadd_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadda_f64))) +float64_t svadda_f64(svbool_t, float64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadda_f32))) +float32_t svadda_f32(svbool_t, float32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadda_f16))) +float16_t svadda_f16(svbool_t, float16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s8))) +int64_t svaddv_s8(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s32))) +int64_t svaddv_s32(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s64))) +int64_t svaddv_s64(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s16))) +int64_t svaddv_s16(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u8))) +uint64_t svaddv_u8(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u32))) +uint64_t svaddv_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u64))) +uint64_t svaddv_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u16))) +uint64_t svaddv_u16(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_f64))) +float64_t svaddv_f64(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_f32))) +float32_t svaddv_f32(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_f16))) +float16_t svaddv_f16(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_b_z))) +svbool_t svand_b_z(svbool_t, svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u8_m))) +svuint8_t svand_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u32_m))) +svuint32_t svand_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u64_m))) +svuint64_t svand_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u16_m))) +svuint16_t svand_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s8_m))) +svint8_t svand_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s32_m))) +svint32_t svand_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s64_m))) +svint64_t svand_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s16_m))) +svint16_t svand_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u8_x))) +svuint8_t svand_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u32_x))) +svuint32_t svand_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u64_x))) +svuint64_t svand_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u16_x))) +svuint16_t svand_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s8_x))) +svint8_t svand_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s32_x))) +svint32_t svand_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s64_x))) +svint64_t svand_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s16_x))) +svint16_t svand_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u8_z))) +svuint8_t svand_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u32_z))) +svuint32_t svand_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u64_z))) +svuint64_t svand_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u16_z))) +svuint16_t svand_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s8_z))) +svint8_t svand_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s32_z))) +svint32_t svand_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s64_z))) +svint64_t svand_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s16_z))) +svint16_t svand_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u8_m))) +svuint8_t svand_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u32_m))) +svuint32_t svand_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u64_m))) +svuint64_t svand_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u16_m))) +svuint16_t svand_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s8_m))) +svint8_t svand_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s32_m))) +svint32_t svand_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s64_m))) +svint64_t svand_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s16_m))) +svint16_t svand_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u8_x))) +svuint8_t svand_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u32_x))) +svuint32_t svand_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u64_x))) +svuint64_t svand_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u16_x))) +svuint16_t svand_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s8_x))) +svint8_t svand_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s32_x))) +svint32_t svand_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s64_x))) +svint64_t svand_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s16_x))) +svint16_t svand_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u8_z))) +svuint8_t svand_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u32_z))) +svuint32_t svand_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u64_z))) +svuint64_t svand_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u16_z))) +svuint16_t svand_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s8_z))) +svint8_t svand_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s32_z))) +svint32_t svand_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s64_z))) +svint64_t svand_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s16_z))) +svint16_t svand_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u8))) +uint8_t svandv_u8(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u32))) +uint32_t svandv_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u64))) +uint64_t svandv_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u16))) +uint16_t svandv_u16(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s8))) +int8_t svandv_s8(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s32))) +int32_t svandv_s32(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s64))) +int64_t svandv_s64(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s16))) +int16_t svandv_s16(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s8_m))) +svint8_t svasr_n_s8_m(svbool_t, svint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s32_m))) +svint32_t svasr_n_s32_m(svbool_t, svint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s64_m))) +svint64_t svasr_n_s64_m(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s16_m))) +svint16_t svasr_n_s16_m(svbool_t, svint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s8_x))) +svint8_t svasr_n_s8_x(svbool_t, svint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s32_x))) +svint32_t svasr_n_s32_x(svbool_t, svint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s64_x))) +svint64_t svasr_n_s64_x(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s16_x))) +svint16_t svasr_n_s16_x(svbool_t, svint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s8_z))) +svint8_t svasr_n_s8_z(svbool_t, svint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s32_z))) +svint32_t svasr_n_s32_z(svbool_t, svint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s64_z))) +svint64_t svasr_n_s64_z(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s16_z))) +svint16_t svasr_n_s16_z(svbool_t, svint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s8_m))) +svint8_t svasr_s8_m(svbool_t, svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s32_m))) +svint32_t svasr_s32_m(svbool_t, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s64_m))) +svint64_t svasr_s64_m(svbool_t, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s16_m))) +svint16_t svasr_s16_m(svbool_t, svint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s8_x))) +svint8_t svasr_s8_x(svbool_t, svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s32_x))) +svint32_t svasr_s32_x(svbool_t, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s64_x))) +svint64_t svasr_s64_x(svbool_t, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s16_x))) +svint16_t svasr_s16_x(svbool_t, svint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s8_z))) +svint8_t svasr_s8_z(svbool_t, svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s32_z))) +svint32_t svasr_s32_z(svbool_t, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s64_z))) +svint64_t svasr_s64_z(svbool_t, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s16_z))) +svint16_t svasr_s16_z(svbool_t, svint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s8_m))) +svint8_t svasr_wide_n_s8_m(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s32_m))) +svint32_t svasr_wide_n_s32_m(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s16_m))) +svint16_t svasr_wide_n_s16_m(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s8_x))) +svint8_t svasr_wide_n_s8_x(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s32_x))) +svint32_t svasr_wide_n_s32_x(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s16_x))) +svint16_t svasr_wide_n_s16_x(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s8_z))) +svint8_t svasr_wide_n_s8_z(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s32_z))) +svint32_t svasr_wide_n_s32_z(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s16_z))) +svint16_t svasr_wide_n_s16_z(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s8_m))) +svint8_t svasr_wide_s8_m(svbool_t, svint8_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s32_m))) +svint32_t svasr_wide_s32_m(svbool_t, svint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s16_m))) +svint16_t svasr_wide_s16_m(svbool_t, svint16_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s8_x))) +svint8_t svasr_wide_s8_x(svbool_t, svint8_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s32_x))) +svint32_t svasr_wide_s32_x(svbool_t, svint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s16_x))) +svint16_t svasr_wide_s16_x(svbool_t, svint16_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s8_z))) +svint8_t svasr_wide_s8_z(svbool_t, svint8_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s32_z))) +svint32_t svasr_wide_s32_z(svbool_t, svint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s16_z))) +svint16_t svasr_wide_s16_z(svbool_t, svint16_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s8_m))) +svint8_t svasrd_n_s8_m(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s32_m))) +svint32_t svasrd_n_s32_m(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s64_m))) +svint64_t svasrd_n_s64_m(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s16_m))) +svint16_t svasrd_n_s16_m(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s8_x))) +svint8_t svasrd_n_s8_x(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s32_x))) +svint32_t svasrd_n_s32_x(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s64_x))) +svint64_t svasrd_n_s64_x(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s16_x))) +svint16_t svasrd_n_s16_x(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s8_z))) +svint8_t svasrd_n_s8_z(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s32_z))) +svint32_t svasrd_n_s32_z(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s64_z))) +svint64_t svasrd_n_s64_z(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s16_z))) +svint16_t svasrd_n_s16_z(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_b_z))) +svbool_t svbic_b_z(svbool_t, svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u8_m))) +svuint8_t svbic_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u32_m))) +svuint32_t svbic_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u64_m))) +svuint64_t svbic_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u16_m))) +svuint16_t svbic_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s8_m))) +svint8_t svbic_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s32_m))) +svint32_t svbic_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s64_m))) +svint64_t svbic_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s16_m))) +svint16_t svbic_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u8_x))) +svuint8_t svbic_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u32_x))) +svuint32_t svbic_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u64_x))) +svuint64_t svbic_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u16_x))) +svuint16_t svbic_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s8_x))) +svint8_t svbic_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s32_x))) +svint32_t svbic_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s64_x))) +svint64_t svbic_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s16_x))) +svint16_t svbic_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u8_z))) +svuint8_t svbic_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u32_z))) +svuint32_t svbic_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u64_z))) +svuint64_t svbic_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u16_z))) +svuint16_t svbic_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s8_z))) +svint8_t svbic_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s32_z))) +svint32_t svbic_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s64_z))) +svint64_t svbic_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s16_z))) +svint16_t svbic_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u8_m))) +svuint8_t svbic_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u32_m))) +svuint32_t svbic_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u64_m))) +svuint64_t svbic_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u16_m))) +svuint16_t svbic_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s8_m))) +svint8_t svbic_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s32_m))) +svint32_t svbic_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s64_m))) +svint64_t svbic_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s16_m))) +svint16_t svbic_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u8_x))) +svuint8_t svbic_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u32_x))) +svuint32_t svbic_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u64_x))) +svuint64_t svbic_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u16_x))) +svuint16_t svbic_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s8_x))) +svint8_t svbic_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s32_x))) +svint32_t svbic_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s64_x))) +svint64_t svbic_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s16_x))) +svint16_t svbic_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u8_z))) +svuint8_t svbic_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u32_z))) +svuint32_t svbic_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u64_z))) +svuint64_t svbic_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u16_z))) +svuint16_t svbic_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s8_z))) +svint8_t svbic_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s32_z))) +svint32_t svbic_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s64_z))) +svint64_t svbic_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s16_z))) +svint16_t svbic_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrka_b_m))) +svbool_t svbrka_b_m(svbool_t, svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrka_b_z))) +svbool_t svbrka_b_z(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkb_b_m))) +svbool_t svbrkb_b_m(svbool_t, svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkb_b_z))) +svbool_t svbrkb_b_z(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkn_b_z))) +svbool_t svbrkn_b_z(svbool_t, svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkpa_b_z))) +svbool_t svbrkpa_b_z(svbool_t, svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkpb_b_z))) +svbool_t svbrkpb_b_z(svbool_t, svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f64_m))) +svfloat64_t svcadd_f64_m(svbool_t, svfloat64_t, svfloat64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f32_m))) +svfloat32_t svcadd_f32_m(svbool_t, svfloat32_t, svfloat32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f16_m))) +svfloat16_t svcadd_f16_m(svbool_t, svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f64_x))) +svfloat64_t svcadd_f64_x(svbool_t, svfloat64_t, svfloat64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f32_x))) +svfloat32_t svcadd_f32_x(svbool_t, svfloat32_t, svfloat32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f16_x))) +svfloat16_t svcadd_f16_x(svbool_t, svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f64_z))) +svfloat64_t svcadd_f64_z(svbool_t, svfloat64_t, svfloat64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f32_z))) +svfloat32_t svcadd_f32_z(svbool_t, svfloat32_t, svfloat32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f16_z))) +svfloat16_t svcadd_f16_z(svbool_t, svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u8))) +uint8_t svclasta_n_u8(svbool_t, uint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u32))) +uint32_t svclasta_n_u32(svbool_t, uint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u64))) +uint64_t svclasta_n_u64(svbool_t, uint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u16))) +uint16_t svclasta_n_u16(svbool_t, uint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s8))) +int8_t svclasta_n_s8(svbool_t, int8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_f64))) +float64_t svclasta_n_f64(svbool_t, float64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_f32))) +float32_t svclasta_n_f32(svbool_t, float32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_f16))) +float16_t svclasta_n_f16(svbool_t, float16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s32))) +int32_t svclasta_n_s32(svbool_t, int32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s64))) +int64_t svclasta_n_s64(svbool_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s16))) +int16_t svclasta_n_s16(svbool_t, int16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u8))) +svuint8_t svclasta_u8(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u32))) +svuint32_t svclasta_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u64))) +svuint64_t svclasta_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u16))) +svuint16_t svclasta_u16(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s8))) +svint8_t svclasta_s8(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_f64))) +svfloat64_t svclasta_f64(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_f32))) +svfloat32_t svclasta_f32(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_f16))) +svfloat16_t svclasta_f16(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s32))) +svint32_t svclasta_s32(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s64))) +svint64_t svclasta_s64(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s16))) +svint16_t svclasta_s16(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u8))) +uint8_t svclastb_n_u8(svbool_t, uint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u32))) +uint32_t svclastb_n_u32(svbool_t, uint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u64))) +uint64_t svclastb_n_u64(svbool_t, uint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u16))) +uint16_t svclastb_n_u16(svbool_t, uint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s8))) +int8_t svclastb_n_s8(svbool_t, int8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_f64))) +float64_t svclastb_n_f64(svbool_t, float64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_f32))) +float32_t svclastb_n_f32(svbool_t, float32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_f16))) +float16_t svclastb_n_f16(svbool_t, float16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s32))) +int32_t svclastb_n_s32(svbool_t, int32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s64))) +int64_t svclastb_n_s64(svbool_t, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s16))) +int16_t svclastb_n_s16(svbool_t, int16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u8))) +svuint8_t svclastb_u8(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u32))) +svuint32_t svclastb_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u64))) +svuint64_t svclastb_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u16))) +svuint16_t svclastb_u16(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s8))) +svint8_t svclastb_s8(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_f64))) +svfloat64_t svclastb_f64(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_f32))) +svfloat32_t svclastb_f32(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_f16))) +svfloat16_t svclastb_f16(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s32))) +svint32_t svclastb_s32(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s64))) +svint64_t svclastb_s64(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s16))) +svint16_t svclastb_s16(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s8_m))) +svuint8_t svcls_s8_m(svuint8_t, svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s32_m))) +svuint32_t svcls_s32_m(svuint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s64_m))) +svuint64_t svcls_s64_m(svuint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s16_m))) +svuint16_t svcls_s16_m(svuint16_t, svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s8_x))) +svuint8_t svcls_s8_x(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s32_x))) +svuint32_t svcls_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s64_x))) +svuint64_t svcls_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s16_x))) +svuint16_t svcls_s16_x(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s8_z))) +svuint8_t svcls_s8_z(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s32_z))) +svuint32_t svcls_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s64_z))) +svuint64_t svcls_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s16_z))) +svuint16_t svcls_s16_z(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u8_m))) +svuint8_t svclz_u8_m(svuint8_t, svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u32_m))) +svuint32_t svclz_u32_m(svuint32_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u64_m))) +svuint64_t svclz_u64_m(svuint64_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u16_m))) +svuint16_t svclz_u16_m(svuint16_t, svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s8_m))) +svuint8_t svclz_s8_m(svuint8_t, svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s32_m))) +svuint32_t svclz_s32_m(svuint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s64_m))) +svuint64_t svclz_s64_m(svuint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s16_m))) +svuint16_t svclz_s16_m(svuint16_t, svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u8_x))) +svuint8_t svclz_u8_x(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u32_x))) +svuint32_t svclz_u32_x(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u64_x))) +svuint64_t svclz_u64_x(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u16_x))) +svuint16_t svclz_u16_x(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s8_x))) +svuint8_t svclz_s8_x(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s32_x))) +svuint32_t svclz_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s64_x))) +svuint64_t svclz_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s16_x))) +svuint16_t svclz_s16_x(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u8_z))) +svuint8_t svclz_u8_z(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u32_z))) +svuint32_t svclz_u32_z(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u64_z))) +svuint64_t svclz_u64_z(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u16_z))) +svuint16_t svclz_u16_z(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s8_z))) +svuint8_t svclz_s8_z(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s32_z))) +svuint32_t svclz_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s64_z))) +svuint64_t svclz_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s16_z))) +svuint16_t svclz_s16_z(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f64_m))) +svfloat64_t svcmla_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f32_m))) +svfloat32_t svcmla_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f16_m))) +svfloat16_t svcmla_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f64_x))) +svfloat64_t svcmla_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f32_x))) +svfloat32_t svcmla_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f16_x))) +svfloat16_t svcmla_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f64_z))) +svfloat64_t svcmla_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f32_z))) +svfloat32_t svcmla_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f16_z))) +svfloat16_t svcmla_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_f32))) +svfloat32_t svcmla_lane_f32(svfloat32_t, svfloat32_t, svfloat32_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_f16))) +svfloat16_t svcmla_lane_f16(svfloat16_t, svfloat16_t, svfloat16_t, uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_f64))) +svbool_t svcmpeq_n_f64(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_f32))) +svbool_t svcmpeq_n_f32(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_f16))) +svbool_t svcmpeq_n_f16(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u8))) +svbool_t svcmpeq_n_u8(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u32))) +svbool_t svcmpeq_n_u32(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u64))) +svbool_t svcmpeq_n_u64(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u16))) +svbool_t svcmpeq_n_u16(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s8))) +svbool_t svcmpeq_n_s8(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s32))) +svbool_t svcmpeq_n_s32(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s64))) +svbool_t svcmpeq_n_s64(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s16))) +svbool_t svcmpeq_n_s16(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u8))) +svbool_t svcmpeq_u8(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u32))) +svbool_t svcmpeq_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u64))) +svbool_t svcmpeq_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u16))) +svbool_t svcmpeq_u16(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s8))) +svbool_t svcmpeq_s8(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s32))) +svbool_t svcmpeq_s32(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s64))) +svbool_t svcmpeq_s64(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s16))) +svbool_t svcmpeq_s16(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_f64))) +svbool_t svcmpeq_f64(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_f32))) +svbool_t svcmpeq_f32(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_f16))) +svbool_t svcmpeq_f16(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_n_s8))) +svbool_t svcmpeq_wide_n_s8(svbool_t, svint8_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_n_s32))) +svbool_t svcmpeq_wide_n_s32(svbool_t, svint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_n_s16))) +svbool_t svcmpeq_wide_n_s16(svbool_t, svint16_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_s8))) +svbool_t svcmpeq_wide_s8(svbool_t, svint8_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_s32))) +svbool_t svcmpeq_wide_s32(svbool_t, svint32_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_s16))) +svbool_t svcmpeq_wide_s16(svbool_t, svint16_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_f64))) +svbool_t svcmpge_n_f64(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_f32))) +svbool_t svcmpge_n_f32(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_f16))) +svbool_t svcmpge_n_f16(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s8))) +svbool_t svcmpge_n_s8(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s32))) +svbool_t svcmpge_n_s32(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s64))) +svbool_t svcmpge_n_s64(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s16))) +svbool_t svcmpge_n_s16(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u8))) +svbool_t svcmpge_n_u8(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u32))) +svbool_t svcmpge_n_u32(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u64))) +svbool_t svcmpge_n_u64(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u16))) +svbool_t svcmpge_n_u16(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s8))) +svbool_t svcmpge_s8(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s32))) +svbool_t svcmpge_s32(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s64))) +svbool_t svcmpge_s64(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s16))) +svbool_t svcmpge_s16(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_f64))) +svbool_t svcmpge_f64(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_f32))) +svbool_t svcmpge_f32(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_f16))) +svbool_t svcmpge_f16(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u8))) +svbool_t svcmpge_u8(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u32))) +svbool_t svcmpge_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u64))) +svbool_t svcmpge_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u16))) +svbool_t svcmpge_u16(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_s8))) +svbool_t svcmpge_wide_n_s8(svbool_t, svint8_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_s32))) +svbool_t svcmpge_wide_n_s32(svbool_t, svint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_s16))) +svbool_t svcmpge_wide_n_s16(svbool_t, svint16_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_u8))) +svbool_t svcmpge_wide_n_u8(svbool_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_u32))) +svbool_t svcmpge_wide_n_u32(svbool_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_u16))) +svbool_t svcmpge_wide_n_u16(svbool_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_s8))) +svbool_t svcmpge_wide_s8(svbool_t, svint8_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_s32))) +svbool_t svcmpge_wide_s32(svbool_t, svint32_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_s16))) +svbool_t svcmpge_wide_s16(svbool_t, svint16_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_u8))) +svbool_t svcmpge_wide_u8(svbool_t, svuint8_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_u32))) +svbool_t svcmpge_wide_u32(svbool_t, svuint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_u16))) +svbool_t svcmpge_wide_u16(svbool_t, svuint16_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_f64))) +svbool_t svcmpgt_n_f64(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_f32))) +svbool_t svcmpgt_n_f32(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_f16))) +svbool_t svcmpgt_n_f16(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s8))) +svbool_t svcmpgt_n_s8(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s32))) +svbool_t svcmpgt_n_s32(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s64))) +svbool_t svcmpgt_n_s64(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s16))) +svbool_t svcmpgt_n_s16(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u8))) +svbool_t svcmpgt_n_u8(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u32))) +svbool_t svcmpgt_n_u32(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u64))) +svbool_t svcmpgt_n_u64(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u16))) +svbool_t svcmpgt_n_u16(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s8))) +svbool_t svcmpgt_s8(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s32))) +svbool_t svcmpgt_s32(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s64))) +svbool_t svcmpgt_s64(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s16))) +svbool_t svcmpgt_s16(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_f64))) +svbool_t svcmpgt_f64(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_f32))) +svbool_t svcmpgt_f32(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_f16))) +svbool_t svcmpgt_f16(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u8))) +svbool_t svcmpgt_u8(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u32))) +svbool_t svcmpgt_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u64))) +svbool_t svcmpgt_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u16))) +svbool_t svcmpgt_u16(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_s8))) +svbool_t svcmpgt_wide_n_s8(svbool_t, svint8_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_s32))) +svbool_t svcmpgt_wide_n_s32(svbool_t, svint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_s16))) +svbool_t svcmpgt_wide_n_s16(svbool_t, svint16_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_u8))) +svbool_t svcmpgt_wide_n_u8(svbool_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_u32))) +svbool_t svcmpgt_wide_n_u32(svbool_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_u16))) +svbool_t svcmpgt_wide_n_u16(svbool_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_s8))) +svbool_t svcmpgt_wide_s8(svbool_t, svint8_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_s32))) +svbool_t svcmpgt_wide_s32(svbool_t, svint32_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_s16))) +svbool_t svcmpgt_wide_s16(svbool_t, svint16_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_u8))) +svbool_t svcmpgt_wide_u8(svbool_t, svuint8_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_u32))) +svbool_t svcmpgt_wide_u32(svbool_t, svuint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_u16))) +svbool_t svcmpgt_wide_u16(svbool_t, svuint16_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_f64))) +svbool_t svcmple_n_f64(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_f32))) +svbool_t svcmple_n_f32(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_f16))) +svbool_t svcmple_n_f16(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s8))) +svbool_t svcmple_n_s8(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s32))) +svbool_t svcmple_n_s32(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s64))) +svbool_t svcmple_n_s64(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s16))) +svbool_t svcmple_n_s16(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u8))) +svbool_t svcmple_n_u8(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u32))) +svbool_t svcmple_n_u32(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u64))) +svbool_t svcmple_n_u64(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u16))) +svbool_t svcmple_n_u16(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s8))) +svbool_t svcmple_s8(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s32))) +svbool_t svcmple_s32(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s64))) +svbool_t svcmple_s64(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s16))) +svbool_t svcmple_s16(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_f64))) +svbool_t svcmple_f64(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_f32))) +svbool_t svcmple_f32(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_f16))) +svbool_t svcmple_f16(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u8))) +svbool_t svcmple_u8(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u32))) +svbool_t svcmple_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u64))) +svbool_t svcmple_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u16))) +svbool_t svcmple_u16(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_s8))) +svbool_t svcmple_wide_n_s8(svbool_t, svint8_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_s32))) +svbool_t svcmple_wide_n_s32(svbool_t, svint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_s16))) +svbool_t svcmple_wide_n_s16(svbool_t, svint16_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_u8))) +svbool_t svcmple_wide_n_u8(svbool_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_u32))) +svbool_t svcmple_wide_n_u32(svbool_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_u16))) +svbool_t svcmple_wide_n_u16(svbool_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_s8))) +svbool_t svcmple_wide_s8(svbool_t, svint8_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_s32))) +svbool_t svcmple_wide_s32(svbool_t, svint32_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_s16))) +svbool_t svcmple_wide_s16(svbool_t, svint16_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_u8))) +svbool_t svcmple_wide_u8(svbool_t, svuint8_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_u32))) +svbool_t svcmple_wide_u32(svbool_t, svuint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_u16))) +svbool_t svcmple_wide_u16(svbool_t, svuint16_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u8))) +svbool_t svcmplt_n_u8(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u32))) +svbool_t svcmplt_n_u32(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u64))) +svbool_t svcmplt_n_u64(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u16))) +svbool_t svcmplt_n_u16(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_f64))) +svbool_t svcmplt_n_f64(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_f32))) +svbool_t svcmplt_n_f32(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_f16))) +svbool_t svcmplt_n_f16(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s8))) +svbool_t svcmplt_n_s8(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s32))) +svbool_t svcmplt_n_s32(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s64))) +svbool_t svcmplt_n_s64(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s16))) +svbool_t svcmplt_n_s16(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u8))) +svbool_t svcmplt_u8(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u32))) +svbool_t svcmplt_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u64))) +svbool_t svcmplt_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u16))) +svbool_t svcmplt_u16(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s8))) +svbool_t svcmplt_s8(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s32))) +svbool_t svcmplt_s32(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s64))) +svbool_t svcmplt_s64(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s16))) +svbool_t svcmplt_s16(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_f64))) +svbool_t svcmplt_f64(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_f32))) +svbool_t svcmplt_f32(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_f16))) +svbool_t svcmplt_f16(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_u8))) +svbool_t svcmplt_wide_n_u8(svbool_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_u32))) +svbool_t svcmplt_wide_n_u32(svbool_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_u16))) +svbool_t svcmplt_wide_n_u16(svbool_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_s8))) +svbool_t svcmplt_wide_n_s8(svbool_t, svint8_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_s32))) +svbool_t svcmplt_wide_n_s32(svbool_t, svint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_s16))) +svbool_t svcmplt_wide_n_s16(svbool_t, svint16_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_u8))) +svbool_t svcmplt_wide_u8(svbool_t, svuint8_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_u32))) +svbool_t svcmplt_wide_u32(svbool_t, svuint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_u16))) +svbool_t svcmplt_wide_u16(svbool_t, svuint16_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_s8))) +svbool_t svcmplt_wide_s8(svbool_t, svint8_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_s32))) +svbool_t svcmplt_wide_s32(svbool_t, svint32_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_s16))) +svbool_t svcmplt_wide_s16(svbool_t, svint16_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_f64))) +svbool_t svcmpne_n_f64(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_f32))) +svbool_t svcmpne_n_f32(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_f16))) +svbool_t svcmpne_n_f16(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u8))) +svbool_t svcmpne_n_u8(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u32))) +svbool_t svcmpne_n_u32(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u64))) +svbool_t svcmpne_n_u64(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u16))) +svbool_t svcmpne_n_u16(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s8))) +svbool_t svcmpne_n_s8(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s32))) +svbool_t svcmpne_n_s32(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s64))) +svbool_t svcmpne_n_s64(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s16))) +svbool_t svcmpne_n_s16(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u8))) +svbool_t svcmpne_u8(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u32))) +svbool_t svcmpne_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u64))) +svbool_t svcmpne_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u16))) +svbool_t svcmpne_u16(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s8))) +svbool_t svcmpne_s8(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s32))) +svbool_t svcmpne_s32(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s64))) +svbool_t svcmpne_s64(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s16))) +svbool_t svcmpne_s16(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_f64))) +svbool_t svcmpne_f64(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_f32))) +svbool_t svcmpne_f32(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_f16))) +svbool_t svcmpne_f16(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_n_s8))) +svbool_t svcmpne_wide_n_s8(svbool_t, svint8_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_n_s32))) +svbool_t svcmpne_wide_n_s32(svbool_t, svint32_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_n_s16))) +svbool_t svcmpne_wide_n_s16(svbool_t, svint16_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_s8))) +svbool_t svcmpne_wide_s8(svbool_t, svint8_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_s32))) +svbool_t svcmpne_wide_s32(svbool_t, svint32_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_s16))) +svbool_t svcmpne_wide_s16(svbool_t, svint16_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_n_f64))) +svbool_t svcmpuo_n_f64(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_n_f32))) +svbool_t svcmpuo_n_f32(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_n_f16))) +svbool_t svcmpuo_n_f16(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_f64))) +svbool_t svcmpuo_f64(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_f32))) +svbool_t svcmpuo_f32(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_f16))) +svbool_t svcmpuo_f16(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u8_m))) +svuint8_t svcnot_u8_m(svuint8_t, svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u32_m))) +svuint32_t svcnot_u32_m(svuint32_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u64_m))) +svuint64_t svcnot_u64_m(svuint64_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u16_m))) +svuint16_t svcnot_u16_m(svuint16_t, svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s8_m))) +svint8_t svcnot_s8_m(svint8_t, svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s32_m))) +svint32_t svcnot_s32_m(svint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s64_m))) +svint64_t svcnot_s64_m(svint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s16_m))) +svint16_t svcnot_s16_m(svint16_t, svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u8_x))) +svuint8_t svcnot_u8_x(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u32_x))) +svuint32_t svcnot_u32_x(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u64_x))) +svuint64_t svcnot_u64_x(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u16_x))) +svuint16_t svcnot_u16_x(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s8_x))) +svint8_t svcnot_s8_x(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s32_x))) +svint32_t svcnot_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s64_x))) +svint64_t svcnot_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s16_x))) +svint16_t svcnot_s16_x(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u8_z))) +svuint8_t svcnot_u8_z(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u32_z))) +svuint32_t svcnot_u32_z(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u64_z))) +svuint64_t svcnot_u64_z(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u16_z))) +svuint16_t svcnot_u16_z(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s8_z))) +svint8_t svcnot_s8_z(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s32_z))) +svint32_t svcnot_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s64_z))) +svint64_t svcnot_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s16_z))) +svint16_t svcnot_s16_z(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u8_m))) +svuint8_t svcnt_u8_m(svuint8_t, svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u32_m))) +svuint32_t svcnt_u32_m(svuint32_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u64_m))) +svuint64_t svcnt_u64_m(svuint64_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u16_m))) +svuint16_t svcnt_u16_m(svuint16_t, svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s8_m))) +svuint8_t svcnt_s8_m(svuint8_t, svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f64_m))) +svuint64_t svcnt_f64_m(svuint64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f32_m))) +svuint32_t svcnt_f32_m(svuint32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f16_m))) +svuint16_t svcnt_f16_m(svuint16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s32_m))) +svuint32_t svcnt_s32_m(svuint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s64_m))) +svuint64_t svcnt_s64_m(svuint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s16_m))) +svuint16_t svcnt_s16_m(svuint16_t, svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u8_x))) +svuint8_t svcnt_u8_x(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u32_x))) +svuint32_t svcnt_u32_x(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u64_x))) +svuint64_t svcnt_u64_x(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u16_x))) +svuint16_t svcnt_u16_x(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s8_x))) +svuint8_t svcnt_s8_x(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f64_x))) +svuint64_t svcnt_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f32_x))) +svuint32_t svcnt_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f16_x))) +svuint16_t svcnt_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s32_x))) +svuint32_t svcnt_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s64_x))) +svuint64_t svcnt_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s16_x))) +svuint16_t svcnt_s16_x(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u8_z))) +svuint8_t svcnt_u8_z(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u32_z))) +svuint32_t svcnt_u32_z(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u64_z))) +svuint64_t svcnt_u64_z(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u16_z))) +svuint16_t svcnt_u16_z(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s8_z))) +svuint8_t svcnt_s8_z(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f64_z))) +svuint64_t svcnt_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f32_z))) +svuint32_t svcnt_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f16_z))) +svuint16_t svcnt_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s32_z))) +svuint32_t svcnt_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s64_z))) +svuint64_t svcnt_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s16_z))) +svuint16_t svcnt_s16_z(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntb))) +uint64_t svcntb(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntb_pat))) +uint64_t svcntb_pat(enum svpattern); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntd))) +uint64_t svcntd(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntd_pat))) +uint64_t svcntd_pat(enum svpattern); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnth))) +uint64_t svcnth(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnth_pat))) +uint64_t svcnth_pat(enum svpattern); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntp_b8))) +uint64_t svcntp_b8(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntp_b32))) +uint64_t svcntp_b32(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntp_b64))) +uint64_t svcntp_b64(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntp_b16))) +uint64_t svcntp_b16(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntw))) +uint64_t svcntw(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcntw_pat))) +uint64_t svcntw_pat(enum svpattern); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u8))) +svuint8x2_t svcreate2_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u32))) +svuint32x2_t svcreate2_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u64))) +svuint64x2_t svcreate2_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u16))) +svuint16x2_t svcreate2_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s8))) +svint8x2_t svcreate2_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_f64))) +svfloat64x2_t svcreate2_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_f32))) +svfloat32x2_t svcreate2_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_f16))) +svfloat16x2_t svcreate2_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s32))) +svint32x2_t svcreate2_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s64))) +svint64x2_t svcreate2_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s16))) +svint16x2_t svcreate2_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u8))) +svuint8x3_t svcreate3_u8(svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u32))) +svuint32x3_t svcreate3_u32(svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u64))) +svuint64x3_t svcreate3_u64(svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u16))) +svuint16x3_t svcreate3_u16(svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s8))) +svint8x3_t svcreate3_s8(svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_f64))) +svfloat64x3_t svcreate3_f64(svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_f32))) +svfloat32x3_t svcreate3_f32(svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_f16))) +svfloat16x3_t svcreate3_f16(svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s32))) +svint32x3_t svcreate3_s32(svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s64))) +svint64x3_t svcreate3_s64(svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s16))) +svint16x3_t svcreate3_s16(svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u8))) +svuint8x4_t svcreate4_u8(svuint8_t, svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u32))) +svuint32x4_t svcreate4_u32(svuint32_t, svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u64))) +svuint64x4_t svcreate4_u64(svuint64_t, svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u16))) +svuint16x4_t svcreate4_u16(svuint16_t, svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s8))) +svint8x4_t svcreate4_s8(svint8_t, svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_f64))) +svfloat64x4_t svcreate4_f64(svfloat64_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_f32))) +svfloat32x4_t svcreate4_f32(svfloat32_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_f16))) +svfloat16x4_t svcreate4_f16(svfloat16_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s32))) +svint32x4_t svcreate4_s32(svint32_t, svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s64))) +svint64x4_t svcreate4_s64(svint64_t, svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s16))) +svint16x4_t svcreate4_s16(svint16_t, svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f32_m))) +svfloat16_t svcvt_f16_f32_m(svfloat16_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f32_x))) +svfloat16_t svcvt_f16_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f32_z))) +svfloat16_t svcvt_f16_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f64_m))) +svfloat16_t svcvt_f16_f64_m(svfloat16_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f64_x))) +svfloat16_t svcvt_f16_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f64_z))) +svfloat16_t svcvt_f16_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s16_m))) +svfloat16_t svcvt_f16_s16_m(svfloat16_t, svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s16_x))) +svfloat16_t svcvt_f16_s16_x(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s16_z))) +svfloat16_t svcvt_f16_s16_z(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s32_m))) +svfloat16_t svcvt_f16_s32_m(svfloat16_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s32_x))) +svfloat16_t svcvt_f16_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s32_z))) +svfloat16_t svcvt_f16_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s64_m))) +svfloat16_t svcvt_f16_s64_m(svfloat16_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s64_x))) +svfloat16_t svcvt_f16_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s64_z))) +svfloat16_t svcvt_f16_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u16_m))) +svfloat16_t svcvt_f16_u16_m(svfloat16_t, svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u16_x))) +svfloat16_t svcvt_f16_u16_x(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u16_z))) +svfloat16_t svcvt_f16_u16_z(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u32_m))) +svfloat16_t svcvt_f16_u32_m(svfloat16_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u32_x))) +svfloat16_t svcvt_f16_u32_x(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u32_z))) +svfloat16_t svcvt_f16_u32_z(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u64_m))) +svfloat16_t svcvt_f16_u64_m(svfloat16_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u64_x))) +svfloat16_t svcvt_f16_u64_x(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u64_z))) +svfloat16_t svcvt_f16_u64_z(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f16_m))) +svfloat32_t svcvt_f32_f16_m(svfloat32_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f16_x))) +svfloat32_t svcvt_f32_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f16_z))) +svfloat32_t svcvt_f32_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f64_m))) +svfloat32_t svcvt_f32_f64_m(svfloat32_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f64_x))) +svfloat32_t svcvt_f32_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f64_z))) +svfloat32_t svcvt_f32_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_m))) +svfloat32_t svcvt_f32_s32_m(svfloat32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_x))) +svfloat32_t svcvt_f32_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_z))) +svfloat32_t svcvt_f32_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s64_m))) +svfloat32_t svcvt_f32_s64_m(svfloat32_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s64_x))) +svfloat32_t svcvt_f32_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s64_z))) +svfloat32_t svcvt_f32_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_m))) +svfloat32_t svcvt_f32_u32_m(svfloat32_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_x))) +svfloat32_t svcvt_f32_u32_x(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_z))) +svfloat32_t svcvt_f32_u32_z(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u64_m))) +svfloat32_t svcvt_f32_u64_m(svfloat32_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u64_x))) +svfloat32_t svcvt_f32_u64_x(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u64_z))) +svfloat32_t svcvt_f32_u64_z(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f16_m))) +svfloat64_t svcvt_f64_f16_m(svfloat64_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f16_x))) +svfloat64_t svcvt_f64_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f16_z))) +svfloat64_t svcvt_f64_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f32_m))) +svfloat64_t svcvt_f64_f32_m(svfloat64_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f32_x))) +svfloat64_t svcvt_f64_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f32_z))) +svfloat64_t svcvt_f64_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s32_m))) +svfloat64_t svcvt_f64_s32_m(svfloat64_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s32_x))) +svfloat64_t svcvt_f64_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s32_z))) +svfloat64_t svcvt_f64_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s64_m))) +svfloat64_t svcvt_f64_s64_m(svfloat64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s64_x))) +svfloat64_t svcvt_f64_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s64_z))) +svfloat64_t svcvt_f64_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u32_m))) +svfloat64_t svcvt_f64_u32_m(svfloat64_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u32_x))) +svfloat64_t svcvt_f64_u32_x(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u32_z))) +svfloat64_t svcvt_f64_u32_z(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u64_m))) +svfloat64_t svcvt_f64_u64_m(svfloat64_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u64_x))) +svfloat64_t svcvt_f64_u64_x(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u64_z))) +svfloat64_t svcvt_f64_u64_z(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s16_f16_m))) +svint16_t svcvt_s16_f16_m(svint16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s16_f16_x))) +svint16_t svcvt_s16_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s16_f16_z))) +svint16_t svcvt_s16_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f16_m))) +svint32_t svcvt_s32_f16_m(svint32_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f16_x))) +svint32_t svcvt_s32_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f16_z))) +svint32_t svcvt_s32_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_m))) +svint32_t svcvt_s32_f32_m(svint32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_x))) +svint32_t svcvt_s32_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_z))) +svint32_t svcvt_s32_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f64_m))) +svint32_t svcvt_s32_f64_m(svint32_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f64_x))) +svint32_t svcvt_s32_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f64_z))) +svint32_t svcvt_s32_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f16_m))) +svint64_t svcvt_s64_f16_m(svint64_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f16_x))) +svint64_t svcvt_s64_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f16_z))) +svint64_t svcvt_s64_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f32_m))) +svint64_t svcvt_s64_f32_m(svint64_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f32_x))) +svint64_t svcvt_s64_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f32_z))) +svint64_t svcvt_s64_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f64_m))) +svint64_t svcvt_s64_f64_m(svint64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f64_x))) +svint64_t svcvt_s64_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f64_z))) +svint64_t svcvt_s64_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u16_f16_m))) +svuint16_t svcvt_u16_f16_m(svuint16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u16_f16_x))) +svuint16_t svcvt_u16_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u16_f16_z))) +svuint16_t svcvt_u16_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f16_m))) +svuint32_t svcvt_u32_f16_m(svuint32_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f16_x))) +svuint32_t svcvt_u32_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f16_z))) +svuint32_t svcvt_u32_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_m))) +svuint32_t svcvt_u32_f32_m(svuint32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_x))) +svuint32_t svcvt_u32_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_z))) +svuint32_t svcvt_u32_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f64_m))) +svuint32_t svcvt_u32_f64_m(svuint32_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f64_x))) +svuint32_t svcvt_u32_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f64_z))) +svuint32_t svcvt_u32_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f16_m))) +svuint64_t svcvt_u64_f16_m(svuint64_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f16_x))) +svuint64_t svcvt_u64_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f16_z))) +svuint64_t svcvt_u64_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f32_m))) +svuint64_t svcvt_u64_f32_m(svuint64_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f32_x))) +svuint64_t svcvt_u64_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f32_z))) +svuint64_t svcvt_u64_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f64_m))) +svuint64_t svcvt_u64_f64_m(svuint64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f64_x))) +svuint64_t svcvt_u64_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f64_z))) +svuint64_t svcvt_u64_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f64_m))) +svfloat64_t svdiv_n_f64_m(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f32_m))) +svfloat32_t svdiv_n_f32_m(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f16_m))) +svfloat16_t svdiv_n_f16_m(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f64_x))) +svfloat64_t svdiv_n_f64_x(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f32_x))) +svfloat32_t svdiv_n_f32_x(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f16_x))) +svfloat16_t svdiv_n_f16_x(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f64_z))) +svfloat64_t svdiv_n_f64_z(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f32_z))) +svfloat32_t svdiv_n_f32_z(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f16_z))) +svfloat16_t svdiv_n_f16_z(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s32_m))) +svint32_t svdiv_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s64_m))) +svint64_t svdiv_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s32_x))) +svint32_t svdiv_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s64_x))) +svint64_t svdiv_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s32_z))) +svint32_t svdiv_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s64_z))) +svint64_t svdiv_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u32_m))) +svuint32_t svdiv_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u64_m))) +svuint64_t svdiv_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u32_x))) +svuint32_t svdiv_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u64_x))) +svuint64_t svdiv_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u32_z))) +svuint32_t svdiv_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u64_z))) +svuint64_t svdiv_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f64_m))) +svfloat64_t svdiv_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f32_m))) +svfloat32_t svdiv_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f16_m))) +svfloat16_t svdiv_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f64_x))) +svfloat64_t svdiv_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f32_x))) +svfloat32_t svdiv_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f16_x))) +svfloat16_t svdiv_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f64_z))) +svfloat64_t svdiv_f64_z(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f32_z))) +svfloat32_t svdiv_f32_z(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f16_z))) +svfloat16_t svdiv_f16_z(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s32_m))) +svint32_t svdiv_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s64_m))) +svint64_t svdiv_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s32_x))) +svint32_t svdiv_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s64_x))) +svint64_t svdiv_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s32_z))) +svint32_t svdiv_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s64_z))) +svint64_t svdiv_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u32_m))) +svuint32_t svdiv_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u64_m))) +svuint64_t svdiv_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u32_x))) +svuint32_t svdiv_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u64_x))) +svuint64_t svdiv_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u32_z))) +svuint32_t svdiv_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u64_z))) +svuint64_t svdiv_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f64_m))) +svfloat64_t svdivr_n_f64_m(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f32_m))) +svfloat32_t svdivr_n_f32_m(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f16_m))) +svfloat16_t svdivr_n_f16_m(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f64_x))) +svfloat64_t svdivr_n_f64_x(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f32_x))) +svfloat32_t svdivr_n_f32_x(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f16_x))) +svfloat16_t svdivr_n_f16_x(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f64_z))) +svfloat64_t svdivr_n_f64_z(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f32_z))) +svfloat32_t svdivr_n_f32_z(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f16_z))) +svfloat16_t svdivr_n_f16_z(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s32_m))) +svint32_t svdivr_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s64_m))) +svint64_t svdivr_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s32_x))) +svint32_t svdivr_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s64_x))) +svint64_t svdivr_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s32_z))) +svint32_t svdivr_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s64_z))) +svint64_t svdivr_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u32_m))) +svuint32_t svdivr_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u64_m))) +svuint64_t svdivr_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u32_x))) +svuint32_t svdivr_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u64_x))) +svuint64_t svdivr_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u32_z))) +svuint32_t svdivr_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u64_z))) +svuint64_t svdivr_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f64_m))) +svfloat64_t svdivr_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f32_m))) +svfloat32_t svdivr_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f16_m))) +svfloat16_t svdivr_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f64_x))) +svfloat64_t svdivr_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f32_x))) +svfloat32_t svdivr_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f16_x))) +svfloat16_t svdivr_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f64_z))) +svfloat64_t svdivr_f64_z(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f32_z))) +svfloat32_t svdivr_f32_z(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f16_z))) +svfloat16_t svdivr_f16_z(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s32_m))) +svint32_t svdivr_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s64_m))) +svint64_t svdivr_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s32_x))) +svint32_t svdivr_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s64_x))) +svint64_t svdivr_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s32_z))) +svint32_t svdivr_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s64_z))) +svint64_t svdivr_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u32_m))) +svuint32_t svdivr_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u64_m))) +svuint64_t svdivr_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u32_x))) +svuint32_t svdivr_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u64_x))) +svuint64_t svdivr_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u32_z))) +svuint32_t svdivr_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u64_z))) +svuint64_t svdivr_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_s32))) +svint32_t svdot_n_s32(svint32_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_s64))) +svint64_t svdot_n_s64(svint64_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_u32))) +svuint32_t svdot_n_u32(svuint32_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_u64))) +svuint64_t svdot_n_u64(svuint64_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_s32))) +svint32_t svdot_s32(svint32_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_s64))) +svint64_t svdot_s64(svint64_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_u32))) +svuint32_t svdot_u32(svuint32_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_u64))) +svuint64_t svdot_u64(svuint64_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_s32))) +svint32_t svdot_lane_s32(svint32_t, svint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_s64))) +svint64_t svdot_lane_s64(svint64_t, svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_u32))) +svuint32_t svdot_lane_u32(svuint32_t, svuint8_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_u64))) +svuint64_t svdot_lane_u64(svuint64_t, svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8))) +svuint8_t svdup_n_u8(uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32))) +svuint32_t svdup_n_u32(uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64))) +svuint64_t svdup_n_u64(uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16))) +svuint16_t svdup_n_u16(uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8))) +svint8_t svdup_n_s8(int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64))) +svfloat64_t svdup_n_f64(float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32))) +svfloat32_t svdup_n_f32(float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16))) +svfloat16_t svdup_n_f16(float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32))) +svint32_t svdup_n_s32(int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64))) +svint64_t svdup_n_s64(int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16))) +svint16_t svdup_n_s16(int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8_m))) +svuint8_t svdup_n_u8_m(svuint8_t, svbool_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32_m))) +svuint32_t svdup_n_u32_m(svuint32_t, svbool_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64_m))) +svuint64_t svdup_n_u64_m(svuint64_t, svbool_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16_m))) +svuint16_t svdup_n_u16_m(svuint16_t, svbool_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8_m))) +svint8_t svdup_n_s8_m(svint8_t, svbool_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64_m))) +svfloat64_t svdup_n_f64_m(svfloat64_t, svbool_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32_m))) +svfloat32_t svdup_n_f32_m(svfloat32_t, svbool_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16_m))) +svfloat16_t svdup_n_f16_m(svfloat16_t, svbool_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32_m))) +svint32_t svdup_n_s32_m(svint32_t, svbool_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64_m))) +svint64_t svdup_n_s64_m(svint64_t, svbool_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16_m))) +svint16_t svdup_n_s16_m(svint16_t, svbool_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b8))) +svbool_t svdup_n_b8(bool); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b32))) +svbool_t svdup_n_b32(bool); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b64))) +svbool_t svdup_n_b64(bool); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b16))) +svbool_t svdup_n_b16(bool); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8_x))) +svuint8_t svdup_n_u8_x(svbool_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32_x))) +svuint32_t svdup_n_u32_x(svbool_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64_x))) +svuint64_t svdup_n_u64_x(svbool_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16_x))) +svuint16_t svdup_n_u16_x(svbool_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8_x))) +svint8_t svdup_n_s8_x(svbool_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64_x))) +svfloat64_t svdup_n_f64_x(svbool_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32_x))) +svfloat32_t svdup_n_f32_x(svbool_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16_x))) +svfloat16_t svdup_n_f16_x(svbool_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32_x))) +svint32_t svdup_n_s32_x(svbool_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64_x))) +svint64_t svdup_n_s64_x(svbool_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16_x))) +svint16_t svdup_n_s16_x(svbool_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8_z))) +svuint8_t svdup_n_u8_z(svbool_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32_z))) +svuint32_t svdup_n_u32_z(svbool_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64_z))) +svuint64_t svdup_n_u64_z(svbool_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16_z))) +svuint16_t svdup_n_u16_z(svbool_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8_z))) +svint8_t svdup_n_s8_z(svbool_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64_z))) +svfloat64_t svdup_n_f64_z(svbool_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32_z))) +svfloat32_t svdup_n_f32_z(svbool_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16_z))) +svfloat16_t svdup_n_f16_z(svbool_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32_z))) +svint32_t svdup_n_s32_z(svbool_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64_z))) +svint64_t svdup_n_s64_z(svbool_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16_z))) +svint16_t svdup_n_s16_z(svbool_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u8))) +svuint8_t svdup_lane_u8(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u32))) +svuint32_t svdup_lane_u32(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u64))) +svuint64_t svdup_lane_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u16))) +svuint16_t svdup_lane_u16(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s8))) +svint8_t svdup_lane_s8(svint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_f64))) +svfloat64_t svdup_lane_f64(svfloat64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_f32))) +svfloat32_t svdup_lane_f32(svfloat32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_f16))) +svfloat16_t svdup_lane_f16(svfloat16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s32))) +svint32_t svdup_lane_s32(svint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s64))) +svint64_t svdup_lane_s64(svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s16))) +svint16_t svdup_lane_s16(svint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u8))) +svuint8_t svdupq_n_u8(uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s8))) +svint8_t svdupq_n_s8(int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u16))) +svuint16_t svdupq_n_u16(uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_f16))) +svfloat16_t svdupq_n_f16(float16_t, float16_t, float16_t, float16_t, float16_t, float16_t, float16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s16))) +svint16_t svdupq_n_s16(int16_t, int16_t, int16_t, int16_t, int16_t, int16_t, int16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u32))) +svuint32_t svdupq_n_u32(uint32_t, uint32_t, uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_f32))) +svfloat32_t svdupq_n_f32(float32_t, float32_t, float32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s32))) +svint32_t svdupq_n_s32(int32_t, int32_t, int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u64))) +svuint64_t svdupq_n_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_f64))) +svfloat64_t svdupq_n_f64(float64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s64))) +svint64_t svdupq_n_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b8))) +svbool_t svdupq_n_b8(bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b16))) +svbool_t svdupq_n_b16(bool, bool, bool, bool, bool, bool, bool, bool); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b32))) +svbool_t svdupq_n_b32(bool, bool, bool, bool); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b64))) +svbool_t svdupq_n_b64(bool, bool); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u8))) +svuint8_t svdupq_lane_u8(svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u32))) +svuint32_t svdupq_lane_u32(svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u64))) +svuint64_t svdupq_lane_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u16))) +svuint16_t svdupq_lane_u16(svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s8))) +svint8_t svdupq_lane_s8(svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_f64))) +svfloat64_t svdupq_lane_f64(svfloat64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_f32))) +svfloat32_t svdupq_lane_f32(svfloat32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_f16))) +svfloat16_t svdupq_lane_f16(svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s32))) +svint32_t svdupq_lane_s32(svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s64))) +svint64_t svdupq_lane_s64(svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s16))) +svint16_t svdupq_lane_s16(svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_b_z))) +svbool_t sveor_b_z(svbool_t, svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u8_m))) +svuint8_t sveor_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u32_m))) +svuint32_t sveor_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u64_m))) +svuint64_t sveor_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u16_m))) +svuint16_t sveor_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s8_m))) +svint8_t sveor_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s32_m))) +svint32_t sveor_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s64_m))) +svint64_t sveor_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s16_m))) +svint16_t sveor_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u8_x))) +svuint8_t sveor_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u32_x))) +svuint32_t sveor_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u64_x))) +svuint64_t sveor_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u16_x))) +svuint16_t sveor_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s8_x))) +svint8_t sveor_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s32_x))) +svint32_t sveor_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s64_x))) +svint64_t sveor_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s16_x))) +svint16_t sveor_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u8_z))) +svuint8_t sveor_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u32_z))) +svuint32_t sveor_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u64_z))) +svuint64_t sveor_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u16_z))) +svuint16_t sveor_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s8_z))) +svint8_t sveor_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s32_z))) +svint32_t sveor_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s64_z))) +svint64_t sveor_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s16_z))) +svint16_t sveor_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u8_m))) +svuint8_t sveor_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u32_m))) +svuint32_t sveor_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u64_m))) +svuint64_t sveor_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u16_m))) +svuint16_t sveor_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s8_m))) +svint8_t sveor_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s32_m))) +svint32_t sveor_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s64_m))) +svint64_t sveor_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s16_m))) +svint16_t sveor_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u8_x))) +svuint8_t sveor_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u32_x))) +svuint32_t sveor_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u64_x))) +svuint64_t sveor_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u16_x))) +svuint16_t sveor_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s8_x))) +svint8_t sveor_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s32_x))) +svint32_t sveor_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s64_x))) +svint64_t sveor_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s16_x))) +svint16_t sveor_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u8_z))) +svuint8_t sveor_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u32_z))) +svuint32_t sveor_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u64_z))) +svuint64_t sveor_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u16_z))) +svuint16_t sveor_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s8_z))) +svint8_t sveor_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s32_z))) +svint32_t sveor_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s64_z))) +svint64_t sveor_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s16_z))) +svint16_t sveor_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u8))) +uint8_t sveorv_u8(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u32))) +uint32_t sveorv_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u64))) +uint64_t sveorv_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u16))) +uint16_t sveorv_u16(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s8))) +int8_t sveorv_s8(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s32))) +int32_t sveorv_s32(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s64))) +int64_t sveorv_s64(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s16))) +int16_t sveorv_s16(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u8))) +svuint8_t svext_u8(svuint8_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u32))) +svuint32_t svext_u32(svuint32_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u64))) +svuint64_t svext_u64(svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u16))) +svuint16_t svext_u16(svuint16_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s8))) +svint8_t svext_s8(svint8_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_f64))) +svfloat64_t svext_f64(svfloat64_t, svfloat64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_f32))) +svfloat32_t svext_f32(svfloat32_t, svfloat32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_f16))) +svfloat16_t svext_f16(svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s32))) +svint32_t svext_s32(svint32_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s64))) +svint64_t svext_s64(svint64_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s16))) +svint16_t svext_s16(svint16_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s32_m))) +svint32_t svextb_s32_m(svint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s64_m))) +svint64_t svextb_s64_m(svint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s16_m))) +svint16_t svextb_s16_m(svint16_t, svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s32_x))) +svint32_t svextb_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s64_x))) +svint64_t svextb_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s16_x))) +svint16_t svextb_s16_x(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s32_z))) +svint32_t svextb_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s64_z))) +svint64_t svextb_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s16_z))) +svint16_t svextb_s16_z(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u32_m))) +svuint32_t svextb_u32_m(svuint32_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u64_m))) +svuint64_t svextb_u64_m(svuint64_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u16_m))) +svuint16_t svextb_u16_m(svuint16_t, svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u32_x))) +svuint32_t svextb_u32_x(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u64_x))) +svuint64_t svextb_u64_x(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u16_x))) +svuint16_t svextb_u16_x(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u32_z))) +svuint32_t svextb_u32_z(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u64_z))) +svuint64_t svextb_u64_z(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u16_z))) +svuint16_t svextb_u16_z(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s32_m))) +svint32_t svexth_s32_m(svint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s64_m))) +svint64_t svexth_s64_m(svint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s32_x))) +svint32_t svexth_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s64_x))) +svint64_t svexth_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s32_z))) +svint32_t svexth_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s64_z))) +svint64_t svexth_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u32_m))) +svuint32_t svexth_u32_m(svuint32_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u64_m))) +svuint64_t svexth_u64_m(svuint64_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u32_x))) +svuint32_t svexth_u32_x(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u64_x))) +svuint64_t svexth_u64_x(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u32_z))) +svuint32_t svexth_u32_z(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u64_z))) +svuint64_t svexth_u64_z(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_s64_m))) +svint64_t svextw_s64_m(svint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_s64_x))) +svint64_t svextw_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_s64_z))) +svint64_t svextw_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_u64_m))) +svuint64_t svextw_u64_m(svuint64_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_u64_x))) +svuint64_t svextw_u64_x(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_u64_z))) +svuint64_t svextw_u64_z(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u8))) +svuint8_t svget2_u8(svuint8x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u32))) +svuint32_t svget2_u32(svuint32x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u64))) +svuint64_t svget2_u64(svuint64x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u16))) +svuint16_t svget2_u16(svuint16x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s8))) +svint8_t svget2_s8(svint8x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_f64))) +svfloat64_t svget2_f64(svfloat64x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_f32))) +svfloat32_t svget2_f32(svfloat32x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_f16))) +svfloat16_t svget2_f16(svfloat16x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s32))) +svint32_t svget2_s32(svint32x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s64))) +svint64_t svget2_s64(svint64x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s16))) +svint16_t svget2_s16(svint16x2_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u8))) +svuint8_t svget3_u8(svuint8x3_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u32))) +svuint32_t svget3_u32(svuint32x3_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u64))) +svuint64_t svget3_u64(svuint64x3_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u16))) +svuint16_t svget3_u16(svuint16x3_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s8))) +svint8_t svget3_s8(svint8x3_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_f64))) +svfloat64_t svget3_f64(svfloat64x3_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_f32))) +svfloat32_t svget3_f32(svfloat32x3_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_f16))) +svfloat16_t svget3_f16(svfloat16x3_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s32))) +svint32_t svget3_s32(svint32x3_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s64))) +svint64_t svget3_s64(svint64x3_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s16))) +svint16_t svget3_s16(svint16x3_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u8))) +svuint8_t svget4_u8(svuint8x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u32))) +svuint32_t svget4_u32(svuint32x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u64))) +svuint64_t svget4_u64(svuint64x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u16))) +svuint16_t svget4_u16(svuint16x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s8))) +svint8_t svget4_s8(svint8x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_f64))) +svfloat64_t svget4_f64(svfloat64x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_f32))) +svfloat32_t svget4_f32(svfloat32x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_f16))) +svfloat16_t svget4_f16(svfloat16x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s32))) +svint32_t svget4_s32(svint32x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s64))) +svint64_t svget4_s64(svint64x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s16))) +svint16_t svget4_s16(svint16x4_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svindex_u8))) +svuint8_t svindex_u8(uint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svindex_u32))) +svuint32_t svindex_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svindex_u64))) +svuint64_t svindex_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svindex_u16))) +svuint16_t svindex_u16(uint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svindex_s8))) +svint8_t svindex_s8(int8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svindex_s32))) +svint32_t svindex_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svindex_s64))) +svint64_t svindex_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svindex_s16))) +svint16_t svindex_s16(int16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u8))) +svuint8_t svinsr_n_u8(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u32))) +svuint32_t svinsr_n_u32(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u64))) +svuint64_t svinsr_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u16))) +svuint16_t svinsr_n_u16(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s8))) +svint8_t svinsr_n_s8(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_f64))) +svfloat64_t svinsr_n_f64(svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_f32))) +svfloat32_t svinsr_n_f32(svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_f16))) +svfloat16_t svinsr_n_f16(svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s32))) +svint32_t svinsr_n_s32(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s64))) +svint64_t svinsr_n_s64(svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s16))) +svint16_t svinsr_n_s16(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u8))) +uint8_t svlasta_u8(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u32))) +uint32_t svlasta_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u64))) +uint64_t svlasta_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u16))) +uint16_t svlasta_u16(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s8))) +int8_t svlasta_s8(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_f64))) +float64_t svlasta_f64(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_f32))) +float32_t svlasta_f32(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_f16))) +float16_t svlasta_f16(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s32))) +int32_t svlasta_s32(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s64))) +int64_t svlasta_s64(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s16))) +int16_t svlasta_s16(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u8))) +uint8_t svlastb_u8(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u32))) +uint32_t svlastb_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u64))) +uint64_t svlastb_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u16))) +uint16_t svlastb_u16(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s8))) +int8_t svlastb_s8(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_f64))) +float64_t svlastb_f64(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_f32))) +float32_t svlastb_f32(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_f16))) +float16_t svlastb_f16(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s32))) +int32_t svlastb_s32(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s64))) +int64_t svlastb_s64(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s16))) +int16_t svlastb_s16(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u8))) +svuint8_t svld1_u8(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u32))) +svuint32_t svld1_u32(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u64))) +svuint64_t svld1_u64(svbool_t, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u16))) +svuint16_t svld1_u16(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s8))) +svint8_t svld1_s8(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f64))) +svfloat64_t svld1_f64(svbool_t, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f32))) +svfloat32_t svld1_f32(svbool_t, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f16))) +svfloat16_t svld1_f16(svbool_t, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s32))) +svint32_t svld1_s32(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s64))) +svint64_t svld1_s64(svbool_t, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s16))) +svint16_t svld1_s16(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u8))) +svuint8_t svld1_vnum_u8(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u32))) +svuint32_t svld1_vnum_u32(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u64))) +svuint64_t svld1_vnum_u64(svbool_t, uint64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u16))) +svuint16_t svld1_vnum_u16(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s8))) +svint8_t svld1_vnum_s8(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f64))) +svfloat64_t svld1_vnum_f64(svbool_t, float64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f32))) +svfloat32_t svld1_vnum_f32(svbool_t, float32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f16))) +svfloat16_t svld1_vnum_f16(svbool_t, float16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s32))) +svint32_t svld1_vnum_s32(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s64))) +svint64_t svld1_vnum_s64(svbool_t, int64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s16))) +svint16_t svld1_vnum_s16(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u8))) +svuint8_t svld1rq_u8(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u32))) +svuint32_t svld1rq_u32(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u64))) +svuint64_t svld1rq_u64(svbool_t, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u16))) +svuint16_t svld1rq_u16(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s8))) +svint8_t svld1rq_s8(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_f64))) +svfloat64_t svld1rq_f64(svbool_t, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_f32))) +svfloat32_t svld1rq_f32(svbool_t, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_f16))) +svfloat16_t svld1rq_f16(svbool_t, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s32))) +svint32_t svld1rq_s32(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s64))) +svint64_t svld1rq_s64(svbool_t, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s16))) +svint16_t svld1rq_s16(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_vnum_u32))) +svuint32_t svld1sb_vnum_u32(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_vnum_u64))) +svuint64_t svld1sb_vnum_u64(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_vnum_u16))) +svuint16_t svld1sb_vnum_u16(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_vnum_s32))) +svint32_t svld1sb_vnum_s32(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_vnum_s64))) +svint64_t svld1sb_vnum_s64(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_vnum_s16))) +svint16_t svld1sb_vnum_s16(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_u32))) +svuint32_t svld1sb_u32(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_u64))) +svuint64_t svld1sb_u64(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_u16))) +svuint16_t svld1sb_u16(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_s32))) +svint32_t svld1sb_s32(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_s64))) +svint64_t svld1sb_s64(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sb_s16))) +svint16_t svld1sb_s16(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_vnum_u32))) +svuint32_t svld1sh_vnum_u32(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_vnum_u64))) +svuint64_t svld1sh_vnum_u64(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_vnum_s32))) +svint32_t svld1sh_vnum_s32(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_vnum_s64))) +svint64_t svld1sh_vnum_s64(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_u32))) +svuint32_t svld1sh_u32(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_u64))) +svuint64_t svld1sh_u64(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_s32))) +svint32_t svld1sh_s32(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sh_s64))) +svint64_t svld1sh_s64(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_vnum_u64))) +svuint64_t svld1sw_vnum_u64(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_vnum_s64))) +svint64_t svld1sw_vnum_s64(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_u64))) +svuint64_t svld1sw_u64(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1sw_s64))) +svint64_t svld1sw_s64(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_vnum_u32))) +svuint32_t svld1ub_vnum_u32(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_vnum_u64))) +svuint64_t svld1ub_vnum_u64(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_vnum_u16))) +svuint16_t svld1ub_vnum_u16(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_vnum_s32))) +svint32_t svld1ub_vnum_s32(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_vnum_s64))) +svint64_t svld1ub_vnum_s64(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_vnum_s16))) +svint16_t svld1ub_vnum_s16(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_u32))) +svuint32_t svld1ub_u32(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_u64))) +svuint64_t svld1ub_u64(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_u16))) +svuint16_t svld1ub_u16(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_s32))) +svint32_t svld1ub_s32(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_s64))) +svint64_t svld1ub_s64(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1ub_s16))) +svint16_t svld1ub_s16(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_vnum_u32))) +svuint32_t svld1uh_vnum_u32(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_vnum_u64))) +svuint64_t svld1uh_vnum_u64(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_vnum_s32))) +svint32_t svld1uh_vnum_s32(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_vnum_s64))) +svint64_t svld1uh_vnum_s64(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_u32))) +svuint32_t svld1uh_u32(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_u64))) +svuint64_t svld1uh_u64(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_s32))) +svint32_t svld1uh_s32(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uh_s64))) +svint64_t svld1uh_s64(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_vnum_u64))) +svuint64_t svld1uw_vnum_u64(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_vnum_s64))) +svint64_t svld1uw_vnum_s64(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_u64))) +svuint64_t svld1uw_u64(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1uw_s64))) +svint64_t svld1uw_s64(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u8))) +svuint8x2_t svld2_u8(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u32))) +svuint32x2_t svld2_u32(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u64))) +svuint64x2_t svld2_u64(svbool_t, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u16))) +svuint16x2_t svld2_u16(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s8))) +svint8x2_t svld2_s8(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_f64))) +svfloat64x2_t svld2_f64(svbool_t, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_f32))) +svfloat32x2_t svld2_f32(svbool_t, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_f16))) +svfloat16x2_t svld2_f16(svbool_t, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s32))) +svint32x2_t svld2_s32(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s64))) +svint64x2_t svld2_s64(svbool_t, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s16))) +svint16x2_t svld2_s16(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u8))) +svuint8x2_t svld2_vnum_u8(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u32))) +svuint32x2_t svld2_vnum_u32(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u64))) +svuint64x2_t svld2_vnum_u64(svbool_t, uint64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u16))) +svuint16x2_t svld2_vnum_u16(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s8))) +svint8x2_t svld2_vnum_s8(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_f64))) +svfloat64x2_t svld2_vnum_f64(svbool_t, float64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_f32))) +svfloat32x2_t svld2_vnum_f32(svbool_t, float32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_f16))) +svfloat16x2_t svld2_vnum_f16(svbool_t, float16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s32))) +svint32x2_t svld2_vnum_s32(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s64))) +svint64x2_t svld2_vnum_s64(svbool_t, int64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s16))) +svint16x2_t svld2_vnum_s16(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u8))) +svuint8x3_t svld3_u8(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u32))) +svuint32x3_t svld3_u32(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u64))) +svuint64x3_t svld3_u64(svbool_t, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u16))) +svuint16x3_t svld3_u16(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s8))) +svint8x3_t svld3_s8(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_f64))) +svfloat64x3_t svld3_f64(svbool_t, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_f32))) +svfloat32x3_t svld3_f32(svbool_t, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_f16))) +svfloat16x3_t svld3_f16(svbool_t, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s32))) +svint32x3_t svld3_s32(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s64))) +svint64x3_t svld3_s64(svbool_t, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s16))) +svint16x3_t svld3_s16(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u8))) +svuint8x3_t svld3_vnum_u8(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u32))) +svuint32x3_t svld3_vnum_u32(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u64))) +svuint64x3_t svld3_vnum_u64(svbool_t, uint64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u16))) +svuint16x3_t svld3_vnum_u16(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s8))) +svint8x3_t svld3_vnum_s8(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_f64))) +svfloat64x3_t svld3_vnum_f64(svbool_t, float64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_f32))) +svfloat32x3_t svld3_vnum_f32(svbool_t, float32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_f16))) +svfloat16x3_t svld3_vnum_f16(svbool_t, float16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s32))) +svint32x3_t svld3_vnum_s32(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s64))) +svint64x3_t svld3_vnum_s64(svbool_t, int64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s16))) +svint16x3_t svld3_vnum_s16(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u8))) +svuint8x4_t svld4_u8(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u32))) +svuint32x4_t svld4_u32(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u64))) +svuint64x4_t svld4_u64(svbool_t, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u16))) +svuint16x4_t svld4_u16(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s8))) +svint8x4_t svld4_s8(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_f64))) +svfloat64x4_t svld4_f64(svbool_t, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_f32))) +svfloat32x4_t svld4_f32(svbool_t, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_f16))) +svfloat16x4_t svld4_f16(svbool_t, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s32))) +svint32x4_t svld4_s32(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s64))) +svint64x4_t svld4_s64(svbool_t, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s16))) +svint16x4_t svld4_s16(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u8))) +svuint8x4_t svld4_vnum_u8(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u32))) +svuint32x4_t svld4_vnum_u32(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u64))) +svuint64x4_t svld4_vnum_u64(svbool_t, uint64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u16))) +svuint16x4_t svld4_vnum_u16(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s8))) +svint8x4_t svld4_vnum_s8(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_f64))) +svfloat64x4_t svld4_vnum_f64(svbool_t, float64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_f32))) +svfloat32x4_t svld4_vnum_f32(svbool_t, float32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_f16))) +svfloat16x4_t svld4_vnum_f16(svbool_t, float16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s32))) +svint32x4_t svld4_vnum_s32(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s64))) +svint64x4_t svld4_vnum_s64(svbool_t, int64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s16))) +svint16x4_t svld4_vnum_s16(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u8))) +svuint8_t svldnt1_u8(svbool_t, uint8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u32))) +svuint32_t svldnt1_u32(svbool_t, uint32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u64))) +svuint64_t svldnt1_u64(svbool_t, uint64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u16))) +svuint16_t svldnt1_u16(svbool_t, uint16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s8))) +svint8_t svldnt1_s8(svbool_t, int8_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f64))) +svfloat64_t svldnt1_f64(svbool_t, float64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f32))) +svfloat32_t svldnt1_f32(svbool_t, float32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f16))) +svfloat16_t svldnt1_f16(svbool_t, float16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s32))) +svint32_t svldnt1_s32(svbool_t, int32_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s64))) +svint64_t svldnt1_s64(svbool_t, int64_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s16))) +svint16_t svldnt1_s16(svbool_t, int16_t const *); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u8))) +svuint8_t svldnt1_vnum_u8(svbool_t, uint8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u32))) +svuint32_t svldnt1_vnum_u32(svbool_t, uint32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u64))) +svuint64_t svldnt1_vnum_u64(svbool_t, uint64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u16))) +svuint16_t svldnt1_vnum_u16(svbool_t, uint16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s8))) +svint8_t svldnt1_vnum_s8(svbool_t, int8_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f64))) +svfloat64_t svldnt1_vnum_f64(svbool_t, float64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f32))) +svfloat32_t svldnt1_vnum_f32(svbool_t, float32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f16))) +svfloat16_t svldnt1_vnum_f16(svbool_t, float16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s32))) +svint32_t svldnt1_vnum_s32(svbool_t, int32_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s64))) +svint64_t svldnt1_vnum_s64(svbool_t, int64_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s16))) +svint16_t svldnt1_vnum_s16(svbool_t, int16_t const *, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u8))) +uint64_t svlen_u8(svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u32))) +uint64_t svlen_u32(svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u64))) +uint64_t svlen_u64(svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u16))) +uint64_t svlen_u16(svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s8))) +uint64_t svlen_s8(svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_f64))) +uint64_t svlen_f64(svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_f32))) +uint64_t svlen_f32(svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_f16))) +uint64_t svlen_f16(svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s32))) +uint64_t svlen_s32(svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s64))) +uint64_t svlen_s64(svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s16))) +uint64_t svlen_s16(svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u8_m))) +svuint8_t svlsl_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u32_m))) +svuint32_t svlsl_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u64_m))) +svuint64_t svlsl_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u16_m))) +svuint16_t svlsl_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s8_m))) +svint8_t svlsl_n_s8_m(svbool_t, svint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s32_m))) +svint32_t svlsl_n_s32_m(svbool_t, svint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s64_m))) +svint64_t svlsl_n_s64_m(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s16_m))) +svint16_t svlsl_n_s16_m(svbool_t, svint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u8_x))) +svuint8_t svlsl_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u32_x))) +svuint32_t svlsl_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u64_x))) +svuint64_t svlsl_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u16_x))) +svuint16_t svlsl_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s8_x))) +svint8_t svlsl_n_s8_x(svbool_t, svint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s32_x))) +svint32_t svlsl_n_s32_x(svbool_t, svint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s64_x))) +svint64_t svlsl_n_s64_x(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s16_x))) +svint16_t svlsl_n_s16_x(svbool_t, svint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u8_z))) +svuint8_t svlsl_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u32_z))) +svuint32_t svlsl_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u64_z))) +svuint64_t svlsl_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u16_z))) +svuint16_t svlsl_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s8_z))) +svint8_t svlsl_n_s8_z(svbool_t, svint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s32_z))) +svint32_t svlsl_n_s32_z(svbool_t, svint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s64_z))) +svint64_t svlsl_n_s64_z(svbool_t, svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s16_z))) +svint16_t svlsl_n_s16_z(svbool_t, svint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u8_m))) +svuint8_t svlsl_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u32_m))) +svuint32_t svlsl_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u64_m))) +svuint64_t svlsl_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u16_m))) +svuint16_t svlsl_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s8_m))) +svint8_t svlsl_s8_m(svbool_t, svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s32_m))) +svint32_t svlsl_s32_m(svbool_t, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s64_m))) +svint64_t svlsl_s64_m(svbool_t, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s16_m))) +svint16_t svlsl_s16_m(svbool_t, svint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u8_x))) +svuint8_t svlsl_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u32_x))) +svuint32_t svlsl_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u64_x))) +svuint64_t svlsl_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u16_x))) +svuint16_t svlsl_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s8_x))) +svint8_t svlsl_s8_x(svbool_t, svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s32_x))) +svint32_t svlsl_s32_x(svbool_t, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s64_x))) +svint64_t svlsl_s64_x(svbool_t, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s16_x))) +svint16_t svlsl_s16_x(svbool_t, svint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u8_z))) +svuint8_t svlsl_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u32_z))) +svuint32_t svlsl_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u64_z))) +svuint64_t svlsl_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u16_z))) +svuint16_t svlsl_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s8_z))) +svint8_t svlsl_s8_z(svbool_t, svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s32_z))) +svint32_t svlsl_s32_z(svbool_t, svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s64_z))) +svint64_t svlsl_s64_z(svbool_t, svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s16_z))) +svint16_t svlsl_s16_z(svbool_t, svint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u8_m))) +svuint8_t svlsl_wide_n_u8_m(svbool_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u32_m))) +svuint32_t svlsl_wide_n_u32_m(svbool_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u16_m))) +svuint16_t svlsl_wide_n_u16_m(svbool_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s8_m))) +svint8_t svlsl_wide_n_s8_m(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s32_m))) +svint32_t svlsl_wide_n_s32_m(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s16_m))) +svint16_t svlsl_wide_n_s16_m(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u8_x))) +svuint8_t svlsl_wide_n_u8_x(svbool_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u32_x))) +svuint32_t svlsl_wide_n_u32_x(svbool_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u16_x))) +svuint16_t svlsl_wide_n_u16_x(svbool_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s8_x))) +svint8_t svlsl_wide_n_s8_x(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s32_x))) +svint32_t svlsl_wide_n_s32_x(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s16_x))) +svint16_t svlsl_wide_n_s16_x(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u8_z))) +svuint8_t svlsl_wide_n_u8_z(svbool_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u32_z))) +svuint32_t svlsl_wide_n_u32_z(svbool_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u16_z))) +svuint16_t svlsl_wide_n_u16_z(svbool_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s8_z))) +svint8_t svlsl_wide_n_s8_z(svbool_t, svint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s32_z))) +svint32_t svlsl_wide_n_s32_z(svbool_t, svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s16_z))) +svint16_t svlsl_wide_n_s16_z(svbool_t, svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u8_m))) +svuint8_t svlsl_wide_u8_m(svbool_t, svuint8_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u32_m))) +svuint32_t svlsl_wide_u32_m(svbool_t, svuint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u16_m))) +svuint16_t svlsl_wide_u16_m(svbool_t, svuint16_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s8_m))) +svint8_t svlsl_wide_s8_m(svbool_t, svint8_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s32_m))) +svint32_t svlsl_wide_s32_m(svbool_t, svint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s16_m))) +svint16_t svlsl_wide_s16_m(svbool_t, svint16_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u8_x))) +svuint8_t svlsl_wide_u8_x(svbool_t, svuint8_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u32_x))) +svuint32_t svlsl_wide_u32_x(svbool_t, svuint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u16_x))) +svuint16_t svlsl_wide_u16_x(svbool_t, svuint16_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s8_x))) +svint8_t svlsl_wide_s8_x(svbool_t, svint8_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s32_x))) +svint32_t svlsl_wide_s32_x(svbool_t, svint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s16_x))) +svint16_t svlsl_wide_s16_x(svbool_t, svint16_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u8_z))) +svuint8_t svlsl_wide_u8_z(svbool_t, svuint8_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u32_z))) +svuint32_t svlsl_wide_u32_z(svbool_t, svuint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u16_z))) +svuint16_t svlsl_wide_u16_z(svbool_t, svuint16_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s8_z))) +svint8_t svlsl_wide_s8_z(svbool_t, svint8_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s32_z))) +svint32_t svlsl_wide_s32_z(svbool_t, svint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s16_z))) +svint16_t svlsl_wide_s16_z(svbool_t, svint16_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u8_m))) +svuint8_t svlsr_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u32_m))) +svuint32_t svlsr_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u64_m))) +svuint64_t svlsr_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u16_m))) +svuint16_t svlsr_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u8_x))) +svuint8_t svlsr_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u32_x))) +svuint32_t svlsr_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u64_x))) +svuint64_t svlsr_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u16_x))) +svuint16_t svlsr_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u8_z))) +svuint8_t svlsr_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u32_z))) +svuint32_t svlsr_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u64_z))) +svuint64_t svlsr_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u16_z))) +svuint16_t svlsr_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u8_m))) +svuint8_t svlsr_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u32_m))) +svuint32_t svlsr_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u64_m))) +svuint64_t svlsr_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u16_m))) +svuint16_t svlsr_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u8_x))) +svuint8_t svlsr_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u32_x))) +svuint32_t svlsr_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u64_x))) +svuint64_t svlsr_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u16_x))) +svuint16_t svlsr_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u8_z))) +svuint8_t svlsr_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u32_z))) +svuint32_t svlsr_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u64_z))) +svuint64_t svlsr_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u16_z))) +svuint16_t svlsr_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u8_m))) +svuint8_t svlsr_wide_n_u8_m(svbool_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u32_m))) +svuint32_t svlsr_wide_n_u32_m(svbool_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u16_m))) +svuint16_t svlsr_wide_n_u16_m(svbool_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u8_x))) +svuint8_t svlsr_wide_n_u8_x(svbool_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u32_x))) +svuint32_t svlsr_wide_n_u32_x(svbool_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u16_x))) +svuint16_t svlsr_wide_n_u16_x(svbool_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u8_z))) +svuint8_t svlsr_wide_n_u8_z(svbool_t, svuint8_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u32_z))) +svuint32_t svlsr_wide_n_u32_z(svbool_t, svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u16_z))) +svuint16_t svlsr_wide_n_u16_z(svbool_t, svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u8_m))) +svuint8_t svlsr_wide_u8_m(svbool_t, svuint8_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u32_m))) +svuint32_t svlsr_wide_u32_m(svbool_t, svuint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u16_m))) +svuint16_t svlsr_wide_u16_m(svbool_t, svuint16_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u8_x))) +svuint8_t svlsr_wide_u8_x(svbool_t, svuint8_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u32_x))) +svuint32_t svlsr_wide_u32_x(svbool_t, svuint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u16_x))) +svuint16_t svlsr_wide_u16_x(svbool_t, svuint16_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u8_z))) +svuint8_t svlsr_wide_u8_z(svbool_t, svuint8_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u32_z))) +svuint32_t svlsr_wide_u32_z(svbool_t, svuint32_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u16_z))) +svuint16_t svlsr_wide_u16_z(svbool_t, svuint16_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f64_m))) +svfloat64_t svmad_n_f64_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f32_m))) +svfloat32_t svmad_n_f32_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f16_m))) +svfloat16_t svmad_n_f16_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f64_x))) +svfloat64_t svmad_n_f64_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f32_x))) +svfloat32_t svmad_n_f32_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f16_x))) +svfloat16_t svmad_n_f16_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f64_z))) +svfloat64_t svmad_n_f64_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f32_z))) +svfloat32_t svmad_n_f32_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f16_z))) +svfloat16_t svmad_n_f16_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u8_m))) +svuint8_t svmad_n_u8_m(svbool_t, svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u32_m))) +svuint32_t svmad_n_u32_m(svbool_t, svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u64_m))) +svuint64_t svmad_n_u64_m(svbool_t, svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u16_m))) +svuint16_t svmad_n_u16_m(svbool_t, svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s8_m))) +svint8_t svmad_n_s8_m(svbool_t, svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s32_m))) +svint32_t svmad_n_s32_m(svbool_t, svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s64_m))) +svint64_t svmad_n_s64_m(svbool_t, svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s16_m))) +svint16_t svmad_n_s16_m(svbool_t, svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u8_x))) +svuint8_t svmad_n_u8_x(svbool_t, svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u32_x))) +svuint32_t svmad_n_u32_x(svbool_t, svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u64_x))) +svuint64_t svmad_n_u64_x(svbool_t, svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u16_x))) +svuint16_t svmad_n_u16_x(svbool_t, svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s8_x))) +svint8_t svmad_n_s8_x(svbool_t, svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s32_x))) +svint32_t svmad_n_s32_x(svbool_t, svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s64_x))) +svint64_t svmad_n_s64_x(svbool_t, svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s16_x))) +svint16_t svmad_n_s16_x(svbool_t, svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u8_z))) +svuint8_t svmad_n_u8_z(svbool_t, svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u32_z))) +svuint32_t svmad_n_u32_z(svbool_t, svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u64_z))) +svuint64_t svmad_n_u64_z(svbool_t, svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u16_z))) +svuint16_t svmad_n_u16_z(svbool_t, svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s8_z))) +svint8_t svmad_n_s8_z(svbool_t, svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s32_z))) +svint32_t svmad_n_s32_z(svbool_t, svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s64_z))) +svint64_t svmad_n_s64_z(svbool_t, svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s16_z))) +svint16_t svmad_n_s16_z(svbool_t, svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f64_m))) +svfloat64_t svmad_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f32_m))) +svfloat32_t svmad_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f16_m))) +svfloat16_t svmad_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f64_x))) +svfloat64_t svmad_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f32_x))) +svfloat32_t svmad_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f16_x))) +svfloat16_t svmad_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f64_z))) +svfloat64_t svmad_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f32_z))) +svfloat32_t svmad_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f16_z))) +svfloat16_t svmad_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u8_m))) +svuint8_t svmad_u8_m(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u32_m))) +svuint32_t svmad_u32_m(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u64_m))) +svuint64_t svmad_u64_m(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u16_m))) +svuint16_t svmad_u16_m(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s8_m))) +svint8_t svmad_s8_m(svbool_t, svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s32_m))) +svint32_t svmad_s32_m(svbool_t, svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s64_m))) +svint64_t svmad_s64_m(svbool_t, svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s16_m))) +svint16_t svmad_s16_m(svbool_t, svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u8_x))) +svuint8_t svmad_u8_x(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u32_x))) +svuint32_t svmad_u32_x(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u64_x))) +svuint64_t svmad_u64_x(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u16_x))) +svuint16_t svmad_u16_x(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s8_x))) +svint8_t svmad_s8_x(svbool_t, svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s32_x))) +svint32_t svmad_s32_x(svbool_t, svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s64_x))) +svint64_t svmad_s64_x(svbool_t, svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s16_x))) +svint16_t svmad_s16_x(svbool_t, svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u8_z))) +svuint8_t svmad_u8_z(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u32_z))) +svuint32_t svmad_u32_z(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u64_z))) +svuint64_t svmad_u64_z(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u16_z))) +svuint16_t svmad_u16_z(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s8_z))) +svint8_t svmad_s8_z(svbool_t, svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s32_z))) +svint32_t svmad_s32_z(svbool_t, svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s64_z))) +svint64_t svmad_s64_z(svbool_t, svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s16_z))) +svint16_t svmad_s16_z(svbool_t, svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f64_m))) +svfloat64_t svmax_n_f64_m(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f32_m))) +svfloat32_t svmax_n_f32_m(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f16_m))) +svfloat16_t svmax_n_f16_m(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f64_x))) +svfloat64_t svmax_n_f64_x(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f32_x))) +svfloat32_t svmax_n_f32_x(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f16_x))) +svfloat16_t svmax_n_f16_x(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f64_z))) +svfloat64_t svmax_n_f64_z(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f32_z))) +svfloat32_t svmax_n_f32_z(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f16_z))) +svfloat16_t svmax_n_f16_z(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s8_m))) +svint8_t svmax_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s32_m))) +svint32_t svmax_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s64_m))) +svint64_t svmax_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s16_m))) +svint16_t svmax_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s8_x))) +svint8_t svmax_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s32_x))) +svint32_t svmax_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s64_x))) +svint64_t svmax_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s16_x))) +svint16_t svmax_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s8_z))) +svint8_t svmax_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s32_z))) +svint32_t svmax_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s64_z))) +svint64_t svmax_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s16_z))) +svint16_t svmax_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u8_m))) +svuint8_t svmax_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u32_m))) +svuint32_t svmax_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u64_m))) +svuint64_t svmax_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u16_m))) +svuint16_t svmax_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u8_x))) +svuint8_t svmax_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u32_x))) +svuint32_t svmax_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u64_x))) +svuint64_t svmax_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u16_x))) +svuint16_t svmax_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u8_z))) +svuint8_t svmax_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u32_z))) +svuint32_t svmax_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u64_z))) +svuint64_t svmax_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u16_z))) +svuint16_t svmax_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_m))) +svfloat64_t svmax_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_m))) +svfloat32_t svmax_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_m))) +svfloat16_t svmax_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_x))) +svfloat64_t svmax_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_x))) +svfloat32_t svmax_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_x))) +svfloat16_t svmax_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_z))) +svfloat64_t svmax_f64_z(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_z))) +svfloat32_t svmax_f32_z(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_z))) +svfloat16_t svmax_f16_z(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_m))) +svint8_t svmax_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_m))) +svint32_t svmax_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_m))) +svint64_t svmax_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_m))) +svint16_t svmax_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_x))) +svint8_t svmax_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_x))) +svint32_t svmax_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_x))) +svint64_t svmax_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_x))) +svint16_t svmax_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_z))) +svint8_t svmax_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_z))) +svint32_t svmax_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_z))) +svint64_t svmax_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_z))) +svint16_t svmax_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_m))) +svuint8_t svmax_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_m))) +svuint32_t svmax_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_m))) +svuint64_t svmax_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_m))) +svuint16_t svmax_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_x))) +svuint8_t svmax_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_x))) +svuint32_t svmax_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_x))) +svuint64_t svmax_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_x))) +svuint16_t svmax_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_z))) +svuint8_t svmax_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_z))) +svuint32_t svmax_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_z))) +svuint64_t svmax_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_z))) +svuint16_t svmax_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f64_m))) +svfloat64_t svmaxnm_n_f64_m(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f32_m))) +svfloat32_t svmaxnm_n_f32_m(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f16_m))) +svfloat16_t svmaxnm_n_f16_m(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f64_x))) +svfloat64_t svmaxnm_n_f64_x(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f32_x))) +svfloat32_t svmaxnm_n_f32_x(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f16_x))) +svfloat16_t svmaxnm_n_f16_x(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f64_z))) +svfloat64_t svmaxnm_n_f64_z(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f32_z))) +svfloat32_t svmaxnm_n_f32_z(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f16_z))) +svfloat16_t svmaxnm_n_f16_z(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_m))) +svfloat64_t svmaxnm_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_m))) +svfloat32_t svmaxnm_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_m))) +svfloat16_t svmaxnm_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_x))) +svfloat64_t svmaxnm_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_x))) +svfloat32_t svmaxnm_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_x))) +svfloat16_t svmaxnm_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_z))) +svfloat64_t svmaxnm_f64_z(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_z))) +svfloat32_t svmaxnm_f32_z(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_z))) +svfloat16_t svmaxnm_f16_z(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmv_f64))) +float64_t svmaxnmv_f64(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmv_f32))) +float32_t svmaxnmv_f32(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmv_f16))) +float16_t svmaxnmv_f16(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_f64))) +float64_t svmaxv_f64(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_f32))) +float32_t svmaxv_f32(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_f16))) +float16_t svmaxv_f16(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s8))) +int8_t svmaxv_s8(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s32))) +int32_t svmaxv_s32(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s64))) +int64_t svmaxv_s64(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s16))) +int16_t svmaxv_s16(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u8))) +uint8_t svmaxv_u8(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u32))) +uint32_t svmaxv_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u64))) +uint64_t svmaxv_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u16))) +uint16_t svmaxv_u16(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f64_m))) +svfloat64_t svmin_n_f64_m(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f32_m))) +svfloat32_t svmin_n_f32_m(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f16_m))) +svfloat16_t svmin_n_f16_m(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f64_x))) +svfloat64_t svmin_n_f64_x(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f32_x))) +svfloat32_t svmin_n_f32_x(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f16_x))) +svfloat16_t svmin_n_f16_x(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f64_z))) +svfloat64_t svmin_n_f64_z(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f32_z))) +svfloat32_t svmin_n_f32_z(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f16_z))) +svfloat16_t svmin_n_f16_z(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s8_m))) +svint8_t svmin_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s32_m))) +svint32_t svmin_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s64_m))) +svint64_t svmin_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s16_m))) +svint16_t svmin_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s8_x))) +svint8_t svmin_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s32_x))) +svint32_t svmin_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s64_x))) +svint64_t svmin_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s16_x))) +svint16_t svmin_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s8_z))) +svint8_t svmin_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s32_z))) +svint32_t svmin_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s64_z))) +svint64_t svmin_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s16_z))) +svint16_t svmin_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u8_m))) +svuint8_t svmin_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u32_m))) +svuint32_t svmin_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u64_m))) +svuint64_t svmin_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u16_m))) +svuint16_t svmin_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u8_x))) +svuint8_t svmin_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u32_x))) +svuint32_t svmin_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u64_x))) +svuint64_t svmin_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u16_x))) +svuint16_t svmin_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u8_z))) +svuint8_t svmin_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u32_z))) +svuint32_t svmin_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u64_z))) +svuint64_t svmin_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u16_z))) +svuint16_t svmin_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_m))) +svfloat64_t svmin_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_m))) +svfloat32_t svmin_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_m))) +svfloat16_t svmin_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_x))) +svfloat64_t svmin_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_x))) +svfloat32_t svmin_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_x))) +svfloat16_t svmin_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_z))) +svfloat64_t svmin_f64_z(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_z))) +svfloat32_t svmin_f32_z(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_z))) +svfloat16_t svmin_f16_z(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_m))) +svint8_t svmin_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_m))) +svint32_t svmin_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_m))) +svint64_t svmin_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_m))) +svint16_t svmin_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_x))) +svint8_t svmin_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_x))) +svint32_t svmin_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_x))) +svint64_t svmin_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_x))) +svint16_t svmin_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_z))) +svint8_t svmin_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_z))) +svint32_t svmin_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_z))) +svint64_t svmin_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_z))) +svint16_t svmin_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_m))) +svuint8_t svmin_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_m))) +svuint32_t svmin_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_m))) +svuint64_t svmin_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_m))) +svuint16_t svmin_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_x))) +svuint8_t svmin_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_x))) +svuint32_t svmin_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_x))) +svuint64_t svmin_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_x))) +svuint16_t svmin_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_z))) +svuint8_t svmin_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_z))) +svuint32_t svmin_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_z))) +svuint64_t svmin_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_z))) +svuint16_t svmin_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f64_m))) +svfloat64_t svminnm_n_f64_m(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f32_m))) +svfloat32_t svminnm_n_f32_m(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f16_m))) +svfloat16_t svminnm_n_f16_m(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f64_x))) +svfloat64_t svminnm_n_f64_x(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f32_x))) +svfloat32_t svminnm_n_f32_x(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f16_x))) +svfloat16_t svminnm_n_f16_x(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f64_z))) +svfloat64_t svminnm_n_f64_z(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f32_z))) +svfloat32_t svminnm_n_f32_z(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f16_z))) +svfloat16_t svminnm_n_f16_z(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_m))) +svfloat64_t svminnm_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_m))) +svfloat32_t svminnm_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_m))) +svfloat16_t svminnm_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_x))) +svfloat64_t svminnm_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_x))) +svfloat32_t svminnm_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_x))) +svfloat16_t svminnm_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_z))) +svfloat64_t svminnm_f64_z(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_z))) +svfloat32_t svminnm_f32_z(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_z))) +svfloat16_t svminnm_f16_z(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmv_f64))) +float64_t svminnmv_f64(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmv_f32))) +float32_t svminnmv_f32(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmv_f16))) +float16_t svminnmv_f16(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_f64))) +float64_t svminv_f64(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_f32))) +float32_t svminv_f32(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_f16))) +float16_t svminv_f16(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s8))) +int8_t svminv_s8(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s32))) +int32_t svminv_s32(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s64))) +int64_t svminv_s64(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s16))) +int16_t svminv_s16(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u8))) +uint8_t svminv_u8(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u32))) +uint32_t svminv_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u64))) +uint64_t svminv_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u16))) +uint16_t svminv_u16(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f64_m))) +svfloat64_t svmla_n_f64_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f32_m))) +svfloat32_t svmla_n_f32_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f16_m))) +svfloat16_t svmla_n_f16_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f64_x))) +svfloat64_t svmla_n_f64_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f32_x))) +svfloat32_t svmla_n_f32_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f16_x))) +svfloat16_t svmla_n_f16_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f64_z))) +svfloat64_t svmla_n_f64_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f32_z))) +svfloat32_t svmla_n_f32_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f16_z))) +svfloat16_t svmla_n_f16_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u8_m))) +svuint8_t svmla_n_u8_m(svbool_t, svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u32_m))) +svuint32_t svmla_n_u32_m(svbool_t, svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u64_m))) +svuint64_t svmla_n_u64_m(svbool_t, svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u16_m))) +svuint16_t svmla_n_u16_m(svbool_t, svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s8_m))) +svint8_t svmla_n_s8_m(svbool_t, svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s32_m))) +svint32_t svmla_n_s32_m(svbool_t, svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s64_m))) +svint64_t svmla_n_s64_m(svbool_t, svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s16_m))) +svint16_t svmla_n_s16_m(svbool_t, svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u8_x))) +svuint8_t svmla_n_u8_x(svbool_t, svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u32_x))) +svuint32_t svmla_n_u32_x(svbool_t, svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u64_x))) +svuint64_t svmla_n_u64_x(svbool_t, svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u16_x))) +svuint16_t svmla_n_u16_x(svbool_t, svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s8_x))) +svint8_t svmla_n_s8_x(svbool_t, svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s32_x))) +svint32_t svmla_n_s32_x(svbool_t, svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s64_x))) +svint64_t svmla_n_s64_x(svbool_t, svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s16_x))) +svint16_t svmla_n_s16_x(svbool_t, svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u8_z))) +svuint8_t svmla_n_u8_z(svbool_t, svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u32_z))) +svuint32_t svmla_n_u32_z(svbool_t, svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u64_z))) +svuint64_t svmla_n_u64_z(svbool_t, svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u16_z))) +svuint16_t svmla_n_u16_z(svbool_t, svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s8_z))) +svint8_t svmla_n_s8_z(svbool_t, svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s32_z))) +svint32_t svmla_n_s32_z(svbool_t, svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s64_z))) +svint64_t svmla_n_s64_z(svbool_t, svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s16_z))) +svint16_t svmla_n_s16_z(svbool_t, svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f64_m))) +svfloat64_t svmla_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f32_m))) +svfloat32_t svmla_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f16_m))) +svfloat16_t svmla_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f64_x))) +svfloat64_t svmla_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f32_x))) +svfloat32_t svmla_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f16_x))) +svfloat16_t svmla_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f64_z))) +svfloat64_t svmla_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f32_z))) +svfloat32_t svmla_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f16_z))) +svfloat16_t svmla_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u8_m))) +svuint8_t svmla_u8_m(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u32_m))) +svuint32_t svmla_u32_m(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u64_m))) +svuint64_t svmla_u64_m(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u16_m))) +svuint16_t svmla_u16_m(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s8_m))) +svint8_t svmla_s8_m(svbool_t, svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s32_m))) +svint32_t svmla_s32_m(svbool_t, svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s64_m))) +svint64_t svmla_s64_m(svbool_t, svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s16_m))) +svint16_t svmla_s16_m(svbool_t, svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u8_x))) +svuint8_t svmla_u8_x(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u32_x))) +svuint32_t svmla_u32_x(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u64_x))) +svuint64_t svmla_u64_x(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u16_x))) +svuint16_t svmla_u16_x(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s8_x))) +svint8_t svmla_s8_x(svbool_t, svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s32_x))) +svint32_t svmla_s32_x(svbool_t, svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s64_x))) +svint64_t svmla_s64_x(svbool_t, svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s16_x))) +svint16_t svmla_s16_x(svbool_t, svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u8_z))) +svuint8_t svmla_u8_z(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u32_z))) +svuint32_t svmla_u32_z(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u64_z))) +svuint64_t svmla_u64_z(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u16_z))) +svuint16_t svmla_u16_z(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s8_z))) +svint8_t svmla_s8_z(svbool_t, svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s32_z))) +svint32_t svmla_s32_z(svbool_t, svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s64_z))) +svint64_t svmla_s64_z(svbool_t, svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s16_z))) +svint16_t svmla_s16_z(svbool_t, svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_f64))) +svfloat64_t svmla_lane_f64(svfloat64_t, svfloat64_t, svfloat64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_f32))) +svfloat32_t svmla_lane_f32(svfloat32_t, svfloat32_t, svfloat32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_f16))) +svfloat16_t svmla_lane_f16(svfloat16_t, svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f64_m))) +svfloat64_t svmls_n_f64_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f32_m))) +svfloat32_t svmls_n_f32_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f16_m))) +svfloat16_t svmls_n_f16_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f64_x))) +svfloat64_t svmls_n_f64_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f32_x))) +svfloat32_t svmls_n_f32_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f16_x))) +svfloat16_t svmls_n_f16_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f64_z))) +svfloat64_t svmls_n_f64_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f32_z))) +svfloat32_t svmls_n_f32_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f16_z))) +svfloat16_t svmls_n_f16_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u8_m))) +svuint8_t svmls_n_u8_m(svbool_t, svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u32_m))) +svuint32_t svmls_n_u32_m(svbool_t, svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u64_m))) +svuint64_t svmls_n_u64_m(svbool_t, svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u16_m))) +svuint16_t svmls_n_u16_m(svbool_t, svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s8_m))) +svint8_t svmls_n_s8_m(svbool_t, svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s32_m))) +svint32_t svmls_n_s32_m(svbool_t, svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s64_m))) +svint64_t svmls_n_s64_m(svbool_t, svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s16_m))) +svint16_t svmls_n_s16_m(svbool_t, svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u8_x))) +svuint8_t svmls_n_u8_x(svbool_t, svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u32_x))) +svuint32_t svmls_n_u32_x(svbool_t, svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u64_x))) +svuint64_t svmls_n_u64_x(svbool_t, svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u16_x))) +svuint16_t svmls_n_u16_x(svbool_t, svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s8_x))) +svint8_t svmls_n_s8_x(svbool_t, svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s32_x))) +svint32_t svmls_n_s32_x(svbool_t, svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s64_x))) +svint64_t svmls_n_s64_x(svbool_t, svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s16_x))) +svint16_t svmls_n_s16_x(svbool_t, svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u8_z))) +svuint8_t svmls_n_u8_z(svbool_t, svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u32_z))) +svuint32_t svmls_n_u32_z(svbool_t, svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u64_z))) +svuint64_t svmls_n_u64_z(svbool_t, svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u16_z))) +svuint16_t svmls_n_u16_z(svbool_t, svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s8_z))) +svint8_t svmls_n_s8_z(svbool_t, svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s32_z))) +svint32_t svmls_n_s32_z(svbool_t, svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s64_z))) +svint64_t svmls_n_s64_z(svbool_t, svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s16_z))) +svint16_t svmls_n_s16_z(svbool_t, svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f64_m))) +svfloat64_t svmls_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f32_m))) +svfloat32_t svmls_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f16_m))) +svfloat16_t svmls_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f64_x))) +svfloat64_t svmls_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f32_x))) +svfloat32_t svmls_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f16_x))) +svfloat16_t svmls_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f64_z))) +svfloat64_t svmls_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f32_z))) +svfloat32_t svmls_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f16_z))) +svfloat16_t svmls_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u8_m))) +svuint8_t svmls_u8_m(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u32_m))) +svuint32_t svmls_u32_m(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u64_m))) +svuint64_t svmls_u64_m(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u16_m))) +svuint16_t svmls_u16_m(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s8_m))) +svint8_t svmls_s8_m(svbool_t, svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s32_m))) +svint32_t svmls_s32_m(svbool_t, svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s64_m))) +svint64_t svmls_s64_m(svbool_t, svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s16_m))) +svint16_t svmls_s16_m(svbool_t, svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u8_x))) +svuint8_t svmls_u8_x(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u32_x))) +svuint32_t svmls_u32_x(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u64_x))) +svuint64_t svmls_u64_x(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u16_x))) +svuint16_t svmls_u16_x(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s8_x))) +svint8_t svmls_s8_x(svbool_t, svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s32_x))) +svint32_t svmls_s32_x(svbool_t, svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s64_x))) +svint64_t svmls_s64_x(svbool_t, svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s16_x))) +svint16_t svmls_s16_x(svbool_t, svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u8_z))) +svuint8_t svmls_u8_z(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u32_z))) +svuint32_t svmls_u32_z(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u64_z))) +svuint64_t svmls_u64_z(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u16_z))) +svuint16_t svmls_u16_z(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s8_z))) +svint8_t svmls_s8_z(svbool_t, svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s32_z))) +svint32_t svmls_s32_z(svbool_t, svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s64_z))) +svint64_t svmls_s64_z(svbool_t, svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s16_z))) +svint16_t svmls_s16_z(svbool_t, svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_f64))) +svfloat64_t svmls_lane_f64(svfloat64_t, svfloat64_t, svfloat64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_f32))) +svfloat32_t svmls_lane_f32(svfloat32_t, svfloat32_t, svfloat32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_f16))) +svfloat16_t svmls_lane_f16(svfloat16_t, svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmov_b_z))) +svbool_t svmov_b_z(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f64_m))) +svfloat64_t svmsb_n_f64_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f32_m))) +svfloat32_t svmsb_n_f32_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f16_m))) +svfloat16_t svmsb_n_f16_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f64_x))) +svfloat64_t svmsb_n_f64_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f32_x))) +svfloat32_t svmsb_n_f32_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f16_x))) +svfloat16_t svmsb_n_f16_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f64_z))) +svfloat64_t svmsb_n_f64_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f32_z))) +svfloat32_t svmsb_n_f32_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f16_z))) +svfloat16_t svmsb_n_f16_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u8_m))) +svuint8_t svmsb_n_u8_m(svbool_t, svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u32_m))) +svuint32_t svmsb_n_u32_m(svbool_t, svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u64_m))) +svuint64_t svmsb_n_u64_m(svbool_t, svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u16_m))) +svuint16_t svmsb_n_u16_m(svbool_t, svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s8_m))) +svint8_t svmsb_n_s8_m(svbool_t, svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s32_m))) +svint32_t svmsb_n_s32_m(svbool_t, svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s64_m))) +svint64_t svmsb_n_s64_m(svbool_t, svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s16_m))) +svint16_t svmsb_n_s16_m(svbool_t, svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u8_x))) +svuint8_t svmsb_n_u8_x(svbool_t, svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u32_x))) +svuint32_t svmsb_n_u32_x(svbool_t, svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u64_x))) +svuint64_t svmsb_n_u64_x(svbool_t, svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u16_x))) +svuint16_t svmsb_n_u16_x(svbool_t, svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s8_x))) +svint8_t svmsb_n_s8_x(svbool_t, svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s32_x))) +svint32_t svmsb_n_s32_x(svbool_t, svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s64_x))) +svint64_t svmsb_n_s64_x(svbool_t, svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s16_x))) +svint16_t svmsb_n_s16_x(svbool_t, svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u8_z))) +svuint8_t svmsb_n_u8_z(svbool_t, svuint8_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u32_z))) +svuint32_t svmsb_n_u32_z(svbool_t, svuint32_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u64_z))) +svuint64_t svmsb_n_u64_z(svbool_t, svuint64_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u16_z))) +svuint16_t svmsb_n_u16_z(svbool_t, svuint16_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s8_z))) +svint8_t svmsb_n_s8_z(svbool_t, svint8_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s32_z))) +svint32_t svmsb_n_s32_z(svbool_t, svint32_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s64_z))) +svint64_t svmsb_n_s64_z(svbool_t, svint64_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s16_z))) +svint16_t svmsb_n_s16_z(svbool_t, svint16_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f64_m))) +svfloat64_t svmsb_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f32_m))) +svfloat32_t svmsb_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f16_m))) +svfloat16_t svmsb_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f64_x))) +svfloat64_t svmsb_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f32_x))) +svfloat32_t svmsb_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f16_x))) +svfloat16_t svmsb_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f64_z))) +svfloat64_t svmsb_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f32_z))) +svfloat32_t svmsb_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f16_z))) +svfloat16_t svmsb_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u8_m))) +svuint8_t svmsb_u8_m(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u32_m))) +svuint32_t svmsb_u32_m(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u64_m))) +svuint64_t svmsb_u64_m(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u16_m))) +svuint16_t svmsb_u16_m(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s8_m))) +svint8_t svmsb_s8_m(svbool_t, svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s32_m))) +svint32_t svmsb_s32_m(svbool_t, svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s64_m))) +svint64_t svmsb_s64_m(svbool_t, svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s16_m))) +svint16_t svmsb_s16_m(svbool_t, svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u8_x))) +svuint8_t svmsb_u8_x(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u32_x))) +svuint32_t svmsb_u32_x(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u64_x))) +svuint64_t svmsb_u64_x(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u16_x))) +svuint16_t svmsb_u16_x(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s8_x))) +svint8_t svmsb_s8_x(svbool_t, svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s32_x))) +svint32_t svmsb_s32_x(svbool_t, svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s64_x))) +svint64_t svmsb_s64_x(svbool_t, svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s16_x))) +svint16_t svmsb_s16_x(svbool_t, svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u8_z))) +svuint8_t svmsb_u8_z(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u32_z))) +svuint32_t svmsb_u32_z(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u64_z))) +svuint64_t svmsb_u64_z(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u16_z))) +svuint16_t svmsb_u16_z(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s8_z))) +svint8_t svmsb_s8_z(svbool_t, svint8_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s32_z))) +svint32_t svmsb_s32_z(svbool_t, svint32_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s64_z))) +svint64_t svmsb_s64_z(svbool_t, svint64_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s16_z))) +svint16_t svmsb_s16_z(svbool_t, svint16_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f64_m))) +svfloat64_t svmul_n_f64_m(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f32_m))) +svfloat32_t svmul_n_f32_m(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f16_m))) +svfloat16_t svmul_n_f16_m(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f64_x))) +svfloat64_t svmul_n_f64_x(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f32_x))) +svfloat32_t svmul_n_f32_x(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f16_x))) +svfloat16_t svmul_n_f16_x(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f64_z))) +svfloat64_t svmul_n_f64_z(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f32_z))) +svfloat32_t svmul_n_f32_z(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f16_z))) +svfloat16_t svmul_n_f16_z(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u8_m))) +svuint8_t svmul_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u32_m))) +svuint32_t svmul_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u64_m))) +svuint64_t svmul_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u16_m))) +svuint16_t svmul_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s8_m))) +svint8_t svmul_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s32_m))) +svint32_t svmul_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s64_m))) +svint64_t svmul_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s16_m))) +svint16_t svmul_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u8_x))) +svuint8_t svmul_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u32_x))) +svuint32_t svmul_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u64_x))) +svuint64_t svmul_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u16_x))) +svuint16_t svmul_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s8_x))) +svint8_t svmul_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s32_x))) +svint32_t svmul_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s64_x))) +svint64_t svmul_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s16_x))) +svint16_t svmul_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u8_z))) +svuint8_t svmul_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u32_z))) +svuint32_t svmul_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u64_z))) +svuint64_t svmul_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u16_z))) +svuint16_t svmul_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s8_z))) +svint8_t svmul_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s32_z))) +svint32_t svmul_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s64_z))) +svint64_t svmul_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s16_z))) +svint16_t svmul_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f64_m))) +svfloat64_t svmul_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f32_m))) +svfloat32_t svmul_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f16_m))) +svfloat16_t svmul_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f64_x))) +svfloat64_t svmul_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f32_x))) +svfloat32_t svmul_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f16_x))) +svfloat16_t svmul_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f64_z))) +svfloat64_t svmul_f64_z(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f32_z))) +svfloat32_t svmul_f32_z(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f16_z))) +svfloat16_t svmul_f16_z(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u8_m))) +svuint8_t svmul_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u32_m))) +svuint32_t svmul_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u64_m))) +svuint64_t svmul_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u16_m))) +svuint16_t svmul_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s8_m))) +svint8_t svmul_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s32_m))) +svint32_t svmul_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s64_m))) +svint64_t svmul_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s16_m))) +svint16_t svmul_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u8_x))) +svuint8_t svmul_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u32_x))) +svuint32_t svmul_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u64_x))) +svuint64_t svmul_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u16_x))) +svuint16_t svmul_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s8_x))) +svint8_t svmul_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s32_x))) +svint32_t svmul_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s64_x))) +svint64_t svmul_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s16_x))) +svint16_t svmul_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u8_z))) +svuint8_t svmul_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u32_z))) +svuint32_t svmul_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u64_z))) +svuint64_t svmul_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u16_z))) +svuint16_t svmul_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s8_z))) +svint8_t svmul_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s32_z))) +svint32_t svmul_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s64_z))) +svint64_t svmul_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s16_z))) +svint16_t svmul_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_f64))) +svfloat64_t svmul_lane_f64(svfloat64_t, svfloat64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_f32))) +svfloat32_t svmul_lane_f32(svfloat32_t, svfloat32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_f16))) +svfloat16_t svmul_lane_f16(svfloat16_t, svfloat16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s8_m))) +svint8_t svmulh_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s32_m))) +svint32_t svmulh_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s64_m))) +svint64_t svmulh_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s16_m))) +svint16_t svmulh_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s8_x))) +svint8_t svmulh_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s32_x))) +svint32_t svmulh_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s64_x))) +svint64_t svmulh_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s16_x))) +svint16_t svmulh_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s8_z))) +svint8_t svmulh_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s32_z))) +svint32_t svmulh_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s64_z))) +svint64_t svmulh_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s16_z))) +svint16_t svmulh_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u8_m))) +svuint8_t svmulh_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u32_m))) +svuint32_t svmulh_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u64_m))) +svuint64_t svmulh_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u16_m))) +svuint16_t svmulh_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u8_x))) +svuint8_t svmulh_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u32_x))) +svuint32_t svmulh_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u64_x))) +svuint64_t svmulh_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u16_x))) +svuint16_t svmulh_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u8_z))) +svuint8_t svmulh_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u32_z))) +svuint32_t svmulh_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u64_z))) +svuint64_t svmulh_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u16_z))) +svuint16_t svmulh_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s8_m))) +svint8_t svmulh_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s32_m))) +svint32_t svmulh_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s64_m))) +svint64_t svmulh_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s16_m))) +svint16_t svmulh_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s8_x))) +svint8_t svmulh_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s32_x))) +svint32_t svmulh_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s64_x))) +svint64_t svmulh_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s16_x))) +svint16_t svmulh_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s8_z))) +svint8_t svmulh_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s32_z))) +svint32_t svmulh_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s64_z))) +svint64_t svmulh_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s16_z))) +svint16_t svmulh_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u8_m))) +svuint8_t svmulh_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u32_m))) +svuint32_t svmulh_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u64_m))) +svuint64_t svmulh_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u16_m))) +svuint16_t svmulh_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u8_x))) +svuint8_t svmulh_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u32_x))) +svuint32_t svmulh_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u64_x))) +svuint64_t svmulh_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u16_x))) +svuint16_t svmulh_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u8_z))) +svuint8_t svmulh_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u32_z))) +svuint32_t svmulh_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u64_z))) +svuint64_t svmulh_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u16_z))) +svuint16_t svmulh_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f64_m))) +svfloat64_t svmulx_n_f64_m(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f32_m))) +svfloat32_t svmulx_n_f32_m(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f16_m))) +svfloat16_t svmulx_n_f16_m(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f64_x))) +svfloat64_t svmulx_n_f64_x(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f32_x))) +svfloat32_t svmulx_n_f32_x(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f16_x))) +svfloat16_t svmulx_n_f16_x(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f64_z))) +svfloat64_t svmulx_n_f64_z(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f32_z))) +svfloat32_t svmulx_n_f32_z(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f16_z))) +svfloat16_t svmulx_n_f16_z(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f64_m))) +svfloat64_t svmulx_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f32_m))) +svfloat32_t svmulx_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f16_m))) +svfloat16_t svmulx_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f64_x))) +svfloat64_t svmulx_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f32_x))) +svfloat32_t svmulx_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f16_x))) +svfloat16_t svmulx_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f64_z))) +svfloat64_t svmulx_f64_z(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f32_z))) +svfloat32_t svmulx_f32_z(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f16_z))) +svfloat16_t svmulx_f16_z(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnand_b_z))) +svbool_t svnand_b_z(svbool_t, svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f64_m))) +svfloat64_t svneg_f64_m(svfloat64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f32_m))) +svfloat32_t svneg_f32_m(svfloat32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f16_m))) +svfloat16_t svneg_f16_m(svfloat16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f64_x))) +svfloat64_t svneg_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f32_x))) +svfloat32_t svneg_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f16_x))) +svfloat16_t svneg_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f64_z))) +svfloat64_t svneg_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f32_z))) +svfloat32_t svneg_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f16_z))) +svfloat16_t svneg_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s8_m))) +svint8_t svneg_s8_m(svint8_t, svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s32_m))) +svint32_t svneg_s32_m(svint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s64_m))) +svint64_t svneg_s64_m(svint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s16_m))) +svint16_t svneg_s16_m(svint16_t, svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s8_x))) +svint8_t svneg_s8_x(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s32_x))) +svint32_t svneg_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s64_x))) +svint64_t svneg_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s16_x))) +svint16_t svneg_s16_x(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s8_z))) +svint8_t svneg_s8_z(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s32_z))) +svint32_t svneg_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s64_z))) +svint64_t svneg_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s16_z))) +svint16_t svneg_s16_z(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f64_m))) +svfloat64_t svnmad_n_f64_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f32_m))) +svfloat32_t svnmad_n_f32_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f16_m))) +svfloat16_t svnmad_n_f16_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f64_x))) +svfloat64_t svnmad_n_f64_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f32_x))) +svfloat32_t svnmad_n_f32_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f16_x))) +svfloat16_t svnmad_n_f16_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f64_z))) +svfloat64_t svnmad_n_f64_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f32_z))) +svfloat32_t svnmad_n_f32_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f16_z))) +svfloat16_t svnmad_n_f16_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f64_m))) +svfloat64_t svnmad_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f32_m))) +svfloat32_t svnmad_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f16_m))) +svfloat16_t svnmad_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f64_x))) +svfloat64_t svnmad_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f32_x))) +svfloat32_t svnmad_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f16_x))) +svfloat16_t svnmad_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f64_z))) +svfloat64_t svnmad_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f32_z))) +svfloat32_t svnmad_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f16_z))) +svfloat16_t svnmad_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f64_m))) +svfloat64_t svnmla_n_f64_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f32_m))) +svfloat32_t svnmla_n_f32_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f16_m))) +svfloat16_t svnmla_n_f16_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f64_x))) +svfloat64_t svnmla_n_f64_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f32_x))) +svfloat32_t svnmla_n_f32_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f16_x))) +svfloat16_t svnmla_n_f16_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f64_z))) +svfloat64_t svnmla_n_f64_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f32_z))) +svfloat32_t svnmla_n_f32_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f16_z))) +svfloat16_t svnmla_n_f16_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f64_m))) +svfloat64_t svnmla_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f32_m))) +svfloat32_t svnmla_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f16_m))) +svfloat16_t svnmla_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f64_x))) +svfloat64_t svnmla_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f32_x))) +svfloat32_t svnmla_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f16_x))) +svfloat16_t svnmla_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f64_z))) +svfloat64_t svnmla_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f32_z))) +svfloat32_t svnmla_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f16_z))) +svfloat16_t svnmla_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f64_m))) +svfloat64_t svnmls_n_f64_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f32_m))) +svfloat32_t svnmls_n_f32_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f16_m))) +svfloat16_t svnmls_n_f16_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f64_x))) +svfloat64_t svnmls_n_f64_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f32_x))) +svfloat32_t svnmls_n_f32_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f16_x))) +svfloat16_t svnmls_n_f16_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f64_z))) +svfloat64_t svnmls_n_f64_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f32_z))) +svfloat32_t svnmls_n_f32_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f16_z))) +svfloat16_t svnmls_n_f16_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f64_m))) +svfloat64_t svnmls_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f32_m))) +svfloat32_t svnmls_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f16_m))) +svfloat16_t svnmls_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f64_x))) +svfloat64_t svnmls_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f32_x))) +svfloat32_t svnmls_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f16_x))) +svfloat16_t svnmls_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f64_z))) +svfloat64_t svnmls_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f32_z))) +svfloat32_t svnmls_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f16_z))) +svfloat16_t svnmls_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f64_m))) +svfloat64_t svnmsb_n_f64_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f32_m))) +svfloat32_t svnmsb_n_f32_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f16_m))) +svfloat16_t svnmsb_n_f16_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f64_x))) +svfloat64_t svnmsb_n_f64_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f32_x))) +svfloat32_t svnmsb_n_f32_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f16_x))) +svfloat16_t svnmsb_n_f16_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f64_z))) +svfloat64_t svnmsb_n_f64_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f32_z))) +svfloat32_t svnmsb_n_f32_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f16_z))) +svfloat16_t svnmsb_n_f16_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f64_m))) +svfloat64_t svnmsb_f64_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f32_m))) +svfloat32_t svnmsb_f32_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f16_m))) +svfloat16_t svnmsb_f16_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f64_x))) +svfloat64_t svnmsb_f64_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f32_x))) +svfloat32_t svnmsb_f32_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f16_x))) +svfloat16_t svnmsb_f16_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f64_z))) +svfloat64_t svnmsb_f64_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f32_z))) +svfloat32_t svnmsb_f32_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f16_z))) +svfloat16_t svnmsb_f16_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnor_b_z))) +svbool_t svnor_b_z(svbool_t, svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_b_z))) +svbool_t svnot_b_z(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u8_m))) +svuint8_t svnot_u8_m(svuint8_t, svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u32_m))) +svuint32_t svnot_u32_m(svuint32_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u64_m))) +svuint64_t svnot_u64_m(svuint64_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u16_m))) +svuint16_t svnot_u16_m(svuint16_t, svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s8_m))) +svint8_t svnot_s8_m(svint8_t, svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s32_m))) +svint32_t svnot_s32_m(svint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s64_m))) +svint64_t svnot_s64_m(svint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s16_m))) +svint16_t svnot_s16_m(svint16_t, svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u8_x))) +svuint8_t svnot_u8_x(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u32_x))) +svuint32_t svnot_u32_x(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u64_x))) +svuint64_t svnot_u64_x(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u16_x))) +svuint16_t svnot_u16_x(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s8_x))) +svint8_t svnot_s8_x(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s32_x))) +svint32_t svnot_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s64_x))) +svint64_t svnot_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s16_x))) +svint16_t svnot_s16_x(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u8_z))) +svuint8_t svnot_u8_z(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u32_z))) +svuint32_t svnot_u32_z(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u64_z))) +svuint64_t svnot_u64_z(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u16_z))) +svuint16_t svnot_u16_z(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s8_z))) +svint8_t svnot_s8_z(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s32_z))) +svint32_t svnot_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s64_z))) +svint64_t svnot_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s16_z))) +svint16_t svnot_s16_z(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorn_b_z))) +svbool_t svorn_b_z(svbool_t, svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_b_z))) +svbool_t svorr_b_z(svbool_t, svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u8_m))) +svuint8_t svorr_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u32_m))) +svuint32_t svorr_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u64_m))) +svuint64_t svorr_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u16_m))) +svuint16_t svorr_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s8_m))) +svint8_t svorr_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s32_m))) +svint32_t svorr_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s64_m))) +svint64_t svorr_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s16_m))) +svint16_t svorr_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u8_x))) +svuint8_t svorr_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u32_x))) +svuint32_t svorr_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u64_x))) +svuint64_t svorr_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u16_x))) +svuint16_t svorr_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s8_x))) +svint8_t svorr_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s32_x))) +svint32_t svorr_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s64_x))) +svint64_t svorr_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s16_x))) +svint16_t svorr_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u8_z))) +svuint8_t svorr_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u32_z))) +svuint32_t svorr_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u64_z))) +svuint64_t svorr_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u16_z))) +svuint16_t svorr_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s8_z))) +svint8_t svorr_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s32_z))) +svint32_t svorr_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s64_z))) +svint64_t svorr_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s16_z))) +svint16_t svorr_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u8_m))) +svuint8_t svorr_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u32_m))) +svuint32_t svorr_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u64_m))) +svuint64_t svorr_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u16_m))) +svuint16_t svorr_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s8_m))) +svint8_t svorr_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s32_m))) +svint32_t svorr_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s64_m))) +svint64_t svorr_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s16_m))) +svint16_t svorr_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u8_x))) +svuint8_t svorr_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u32_x))) +svuint32_t svorr_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u64_x))) +svuint64_t svorr_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u16_x))) +svuint16_t svorr_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s8_x))) +svint8_t svorr_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s32_x))) +svint32_t svorr_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s64_x))) +svint64_t svorr_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s16_x))) +svint16_t svorr_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u8_z))) +svuint8_t svorr_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u32_z))) +svuint32_t svorr_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u64_z))) +svuint64_t svorr_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u16_z))) +svuint16_t svorr_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s8_z))) +svint8_t svorr_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s32_z))) +svint32_t svorr_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s64_z))) +svint64_t svorr_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s16_z))) +svint16_t svorr_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u8))) +uint8_t svorv_u8(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u32))) +uint32_t svorv_u32(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u64))) +uint64_t svorv_u64(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u16))) +uint16_t svorv_u16(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s8))) +int8_t svorv_s8(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s32))) +int32_t svorv_s32(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s64))) +int64_t svorv_s64(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s16))) +int16_t svorv_s16(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpfalse_b))) +svbool_t svpfalse_b(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpfirst_b))) +svbool_t svpfirst_b(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpnext_b8))) +svbool_t svpnext_b8(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpnext_b32))) +svbool_t svpnext_b32(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpnext_b64))) +svbool_t svpnext_b64(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpnext_b16))) +svbool_t svpnext_b16(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb))) +void svprfb(svbool_t, void const *, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfb_vnum))) +void svprfb_vnum(svbool_t, void const *, int64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd))) +void svprfd(svbool_t, void const *, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfd_vnum))) +void svprfd_vnum(svbool_t, void const *, int64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh))) +void svprfh(svbool_t, void const *, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfh_vnum))) +void svprfh_vnum(svbool_t, void const *, int64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw))) +void svprfw(svbool_t, void const *, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svprfw_vnum))) +void svprfw_vnum(svbool_t, void const *, int64_t, enum svprfop); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptest_any))) +bool svptest_any(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptest_first))) +bool svptest_first(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptest_last))) +bool svptest_last(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_pat_b8))) +svbool_t svptrue_pat_b8(enum svpattern); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_pat_b32))) +svbool_t svptrue_pat_b32(enum svpattern); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_pat_b64))) +svbool_t svptrue_pat_b64(enum svpattern); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_pat_b16))) +svbool_t svptrue_pat_b16(enum svpattern); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_b8))) +svbool_t svptrue_b8(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_b32))) +svbool_t svptrue_b32(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_b64))) +svbool_t svptrue_b64(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svptrue_b16))) +svbool_t svptrue_b16(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8))) +svint8_t svqadd_n_s8(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32))) +svint32_t svqadd_n_s32(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64))) +svint64_t svqadd_n_s64(svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16))) +svint16_t svqadd_n_s16(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8))) +svuint8_t svqadd_n_u8(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32))) +svuint32_t svqadd_n_u32(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64))) +svuint64_t svqadd_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16))) +svuint16_t svqadd_n_u16(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8))) +svint8_t svqadd_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32))) +svint32_t svqadd_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64))) +svint64_t svqadd_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16))) +svint16_t svqadd_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8))) +svuint8_t svqadd_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32))) +svuint32_t svqadd_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64))) +svuint64_t svqadd_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16))) +svuint16_t svqadd_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_s32))) +int32_t svqdecb_n_s32(int32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_s64))) +int64_t svqdecb_n_s64(int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_u32))) +uint32_t svqdecb_n_u32(uint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_u64))) +uint64_t svqdecb_n_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_s32))) +int32_t svqdecb_pat_n_s32(int32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_s64))) +int64_t svqdecb_pat_n_s64(int64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_u32))) +uint32_t svqdecb_pat_n_u32(uint32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_u64))) +uint64_t svqdecb_pat_n_u64(uint64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_s32))) +int32_t svqdecd_n_s32(int32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_s64))) +int64_t svqdecd_n_s64(int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_u32))) +uint32_t svqdecd_n_u32(uint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_u64))) +uint64_t svqdecd_n_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_s64))) +svint64_t svqdecd_s64(svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_u64))) +svuint64_t svqdecd_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_s32))) +int32_t svqdecd_pat_n_s32(int32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_s64))) +int64_t svqdecd_pat_n_s64(int64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_u32))) +uint32_t svqdecd_pat_n_u32(uint32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_u64))) +uint64_t svqdecd_pat_n_u64(uint64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_s64))) +svint64_t svqdecd_pat_s64(svint64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_u64))) +svuint64_t svqdecd_pat_u64(svuint64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_s32))) +int32_t svqdech_n_s32(int32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_s64))) +int64_t svqdech_n_s64(int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_u32))) +uint32_t svqdech_n_u32(uint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_u64))) +uint64_t svqdech_n_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_s16))) +svint16_t svqdech_s16(svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_u16))) +svuint16_t svqdech_u16(svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_s32))) +int32_t svqdech_pat_n_s32(int32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_s64))) +int64_t svqdech_pat_n_s64(int64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_u32))) +uint32_t svqdech_pat_n_u32(uint32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_u64))) +uint64_t svqdech_pat_n_u64(uint64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_s16))) +svint16_t svqdech_pat_s16(svint16_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_u16))) +svuint16_t svqdech_pat_u16(svuint16_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b8))) +int32_t svqdecp_n_s32_b8(int32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b32))) +int32_t svqdecp_n_s32_b32(int32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b64))) +int32_t svqdecp_n_s32_b64(int32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b16))) +int32_t svqdecp_n_s32_b16(int32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b8))) +int64_t svqdecp_n_s64_b8(int64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b32))) +int64_t svqdecp_n_s64_b32(int64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b64))) +int64_t svqdecp_n_s64_b64(int64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b16))) +int64_t svqdecp_n_s64_b16(int64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b8))) +uint32_t svqdecp_n_u32_b8(uint32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b32))) +uint32_t svqdecp_n_u32_b32(uint32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b64))) +uint32_t svqdecp_n_u32_b64(uint32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b16))) +uint32_t svqdecp_n_u32_b16(uint32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b8))) +uint64_t svqdecp_n_u64_b8(uint64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b32))) +uint64_t svqdecp_n_u64_b32(uint64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b64))) +uint64_t svqdecp_n_u64_b64(uint64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b16))) +uint64_t svqdecp_n_u64_b16(uint64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_s32))) +svint32_t svqdecp_s32(svint32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_s64))) +svint64_t svqdecp_s64(svint64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_s16))) +svint16_t svqdecp_s16(svint16_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_u32))) +svuint32_t svqdecp_u32(svuint32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_u64))) +svuint64_t svqdecp_u64(svuint64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_u16))) +svuint16_t svqdecp_u16(svuint16_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_s32))) +int32_t svqdecw_n_s32(int32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_s64))) +int64_t svqdecw_n_s64(int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_u32))) +uint32_t svqdecw_n_u32(uint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_u64))) +uint64_t svqdecw_n_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_s32))) +svint32_t svqdecw_s32(svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_u32))) +svuint32_t svqdecw_u32(svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_s32))) +int32_t svqdecw_pat_n_s32(int32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_s64))) +int64_t svqdecw_pat_n_s64(int64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_u32))) +uint32_t svqdecw_pat_n_u32(uint32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_u64))) +uint64_t svqdecw_pat_n_u64(uint64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_s32))) +svint32_t svqdecw_pat_s32(svint32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_u32))) +svuint32_t svqdecw_pat_u32(svuint32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_s32))) +int32_t svqincb_n_s32(int32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_s64))) +int64_t svqincb_n_s64(int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_u32))) +uint32_t svqincb_n_u32(uint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_u64))) +uint64_t svqincb_n_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_s32))) +int32_t svqincb_pat_n_s32(int32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_s64))) +int64_t svqincb_pat_n_s64(int64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_u32))) +uint32_t svqincb_pat_n_u32(uint32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_u64))) +uint64_t svqincb_pat_n_u64(uint64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_s32))) +int32_t svqincd_n_s32(int32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_s64))) +int64_t svqincd_n_s64(int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_u32))) +uint32_t svqincd_n_u32(uint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_u64))) +uint64_t svqincd_n_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_s64))) +svint64_t svqincd_s64(svint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_u64))) +svuint64_t svqincd_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_s32))) +int32_t svqincd_pat_n_s32(int32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_s64))) +int64_t svqincd_pat_n_s64(int64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_u32))) +uint32_t svqincd_pat_n_u32(uint32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_u64))) +uint64_t svqincd_pat_n_u64(uint64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_s64))) +svint64_t svqincd_pat_s64(svint64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_u64))) +svuint64_t svqincd_pat_u64(svuint64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_s32))) +int32_t svqinch_n_s32(int32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_s64))) +int64_t svqinch_n_s64(int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_u32))) +uint32_t svqinch_n_u32(uint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_u64))) +uint64_t svqinch_n_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_s16))) +svint16_t svqinch_s16(svint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_u16))) +svuint16_t svqinch_u16(svuint16_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_s32))) +int32_t svqinch_pat_n_s32(int32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_s64))) +int64_t svqinch_pat_n_s64(int64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_u32))) +uint32_t svqinch_pat_n_u32(uint32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_u64))) +uint64_t svqinch_pat_n_u64(uint64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_s16))) +svint16_t svqinch_pat_s16(svint16_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_u16))) +svuint16_t svqinch_pat_u16(svuint16_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b8))) +int32_t svqincp_n_s32_b8(int32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b32))) +int32_t svqincp_n_s32_b32(int32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b64))) +int32_t svqincp_n_s32_b64(int32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b16))) +int32_t svqincp_n_s32_b16(int32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b8))) +int64_t svqincp_n_s64_b8(int64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b32))) +int64_t svqincp_n_s64_b32(int64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b64))) +int64_t svqincp_n_s64_b64(int64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b16))) +int64_t svqincp_n_s64_b16(int64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b8))) +uint32_t svqincp_n_u32_b8(uint32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b32))) +uint32_t svqincp_n_u32_b32(uint32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b64))) +uint32_t svqincp_n_u32_b64(uint32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b16))) +uint32_t svqincp_n_u32_b16(uint32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b8))) +uint64_t svqincp_n_u64_b8(uint64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b32))) +uint64_t svqincp_n_u64_b32(uint64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b64))) +uint64_t svqincp_n_u64_b64(uint64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b16))) +uint64_t svqincp_n_u64_b16(uint64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_s32))) +svint32_t svqincp_s32(svint32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_s64))) +svint64_t svqincp_s64(svint64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_s16))) +svint16_t svqincp_s16(svint16_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_u32))) +svuint32_t svqincp_u32(svuint32_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_u64))) +svuint64_t svqincp_u64(svuint64_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_u16))) +svuint16_t svqincp_u16(svuint16_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_s32))) +int32_t svqincw_n_s32(int32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_s64))) +int64_t svqincw_n_s64(int64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_u32))) +uint32_t svqincw_n_u32(uint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_u64))) +uint64_t svqincw_n_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_s32))) +svint32_t svqincw_s32(svint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_u32))) +svuint32_t svqincw_u32(svuint32_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_s32))) +int32_t svqincw_pat_n_s32(int32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_s64))) +int64_t svqincw_pat_n_s64(int64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_u32))) +uint32_t svqincw_pat_n_u32(uint32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_u64))) +uint64_t svqincw_pat_n_u64(uint64_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_s32))) +svint32_t svqincw_pat_s32(svint32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_u32))) +svuint32_t svqincw_pat_u32(svuint32_t, enum svpattern, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8))) +svint8_t svqsub_n_s8(svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32))) +svint32_t svqsub_n_s32(svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64))) +svint64_t svqsub_n_s64(svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16))) +svint16_t svqsub_n_s16(svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8))) +svuint8_t svqsub_n_u8(svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32))) +svuint32_t svqsub_n_u32(svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64))) +svuint64_t svqsub_n_u64(svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16))) +svuint16_t svqsub_n_u16(svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8))) +svint8_t svqsub_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32))) +svint32_t svqsub_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64))) +svint64_t svqsub_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16))) +svint16_t svqsub_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8))) +svuint8_t svqsub_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32))) +svuint32_t svqsub_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64))) +svuint64_t svqsub_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16))) +svuint16_t svqsub_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u8_m))) +svuint8_t svrbit_u8_m(svuint8_t, svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u32_m))) +svuint32_t svrbit_u32_m(svuint32_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u64_m))) +svuint64_t svrbit_u64_m(svuint64_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u16_m))) +svuint16_t svrbit_u16_m(svuint16_t, svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s8_m))) +svint8_t svrbit_s8_m(svint8_t, svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s32_m))) +svint32_t svrbit_s32_m(svint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s64_m))) +svint64_t svrbit_s64_m(svint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s16_m))) +svint16_t svrbit_s16_m(svint16_t, svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u8_x))) +svuint8_t svrbit_u8_x(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u32_x))) +svuint32_t svrbit_u32_x(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u64_x))) +svuint64_t svrbit_u64_x(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u16_x))) +svuint16_t svrbit_u16_x(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s8_x))) +svint8_t svrbit_s8_x(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s32_x))) +svint32_t svrbit_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s64_x))) +svint64_t svrbit_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s16_x))) +svint16_t svrbit_s16_x(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u8_z))) +svuint8_t svrbit_u8_z(svbool_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u32_z))) +svuint32_t svrbit_u32_z(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u64_z))) +svuint64_t svrbit_u64_z(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u16_z))) +svuint16_t svrbit_u16_z(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s8_z))) +svint8_t svrbit_s8_z(svbool_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s32_z))) +svint32_t svrbit_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s64_z))) +svint64_t svrbit_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s16_z))) +svint16_t svrbit_s16_z(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_f64))) +svfloat64_t svrecpe_f64(svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_f32))) +svfloat32_t svrecpe_f32(svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_f16))) +svfloat16_t svrecpe_f16(svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecps_f64))) +svfloat64_t svrecps_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecps_f32))) +svfloat32_t svrecps_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecps_f16))) +svfloat16_t svrecps_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f64_m))) +svfloat64_t svrecpx_f64_m(svfloat64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f32_m))) +svfloat32_t svrecpx_f32_m(svfloat32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f16_m))) +svfloat16_t svrecpx_f16_m(svfloat16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f64_x))) +svfloat64_t svrecpx_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f32_x))) +svfloat32_t svrecpx_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f16_x))) +svfloat16_t svrecpx_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f64_z))) +svfloat64_t svrecpx_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f32_z))) +svfloat32_t svrecpx_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f16_z))) +svfloat16_t svrecpx_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u8))) +svuint8_t svrev_u8(svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u32))) +svuint32_t svrev_u32(svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u64))) +svuint64_t svrev_u64(svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u16))) +svuint16_t svrev_u16(svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s8))) +svint8_t svrev_s8(svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_f64))) +svfloat64_t svrev_f64(svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_f32))) +svfloat32_t svrev_f32(svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_f16))) +svfloat16_t svrev_f16(svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s32))) +svint32_t svrev_s32(svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s64))) +svint64_t svrev_s64(svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s16))) +svint16_t svrev_s16(svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_b16))) +svbool_t svrev_b16(svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_b32))) +svbool_t svrev_b32(svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_b64))) +svbool_t svrev_b64(svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_b8))) +svbool_t svrev_b8(svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u32_m))) +svuint32_t svrevb_u32_m(svuint32_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u64_m))) +svuint64_t svrevb_u64_m(svuint64_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u16_m))) +svuint16_t svrevb_u16_m(svuint16_t, svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s32_m))) +svint32_t svrevb_s32_m(svint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s64_m))) +svint64_t svrevb_s64_m(svint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s16_m))) +svint16_t svrevb_s16_m(svint16_t, svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u32_x))) +svuint32_t svrevb_u32_x(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u64_x))) +svuint64_t svrevb_u64_x(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u16_x))) +svuint16_t svrevb_u16_x(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s32_x))) +svint32_t svrevb_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s64_x))) +svint64_t svrevb_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s16_x))) +svint16_t svrevb_s16_x(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u32_z))) +svuint32_t svrevb_u32_z(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u64_z))) +svuint64_t svrevb_u64_z(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u16_z))) +svuint16_t svrevb_u16_z(svbool_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s32_z))) +svint32_t svrevb_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s64_z))) +svint64_t svrevb_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s16_z))) +svint16_t svrevb_s16_z(svbool_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u32_m))) +svuint32_t svrevh_u32_m(svuint32_t, svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u64_m))) +svuint64_t svrevh_u64_m(svuint64_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s32_m))) +svint32_t svrevh_s32_m(svint32_t, svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s64_m))) +svint64_t svrevh_s64_m(svint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u32_x))) +svuint32_t svrevh_u32_x(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u64_x))) +svuint64_t svrevh_u64_x(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s32_x))) +svint32_t svrevh_s32_x(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s64_x))) +svint64_t svrevh_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u32_z))) +svuint32_t svrevh_u32_z(svbool_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u64_z))) +svuint64_t svrevh_u64_z(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s32_z))) +svint32_t svrevh_s32_z(svbool_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s64_z))) +svint64_t svrevh_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_u64_m))) +svuint64_t svrevw_u64_m(svuint64_t, svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_s64_m))) +svint64_t svrevw_s64_m(svint64_t, svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_u64_x))) +svuint64_t svrevw_u64_x(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_s64_x))) +svint64_t svrevw_s64_x(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_u64_z))) +svuint64_t svrevw_u64_z(svbool_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_s64_z))) +svint64_t svrevw_s64_z(svbool_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f64_m))) +svfloat64_t svrinta_f64_m(svfloat64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_m))) +svfloat32_t svrinta_f32_m(svfloat32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f16_m))) +svfloat16_t svrinta_f16_m(svfloat16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f64_x))) +svfloat64_t svrinta_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_x))) +svfloat32_t svrinta_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f16_x))) +svfloat16_t svrinta_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f64_z))) +svfloat64_t svrinta_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_z))) +svfloat32_t svrinta_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f16_z))) +svfloat16_t svrinta_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f64_m))) +svfloat64_t svrinti_f64_m(svfloat64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f32_m))) +svfloat32_t svrinti_f32_m(svfloat32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f16_m))) +svfloat16_t svrinti_f16_m(svfloat16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f64_x))) +svfloat64_t svrinti_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f32_x))) +svfloat32_t svrinti_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f16_x))) +svfloat16_t svrinti_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f64_z))) +svfloat64_t svrinti_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f32_z))) +svfloat32_t svrinti_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f16_z))) +svfloat16_t svrinti_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f64_m))) +svfloat64_t svrintm_f64_m(svfloat64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_m))) +svfloat32_t svrintm_f32_m(svfloat32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f16_m))) +svfloat16_t svrintm_f16_m(svfloat16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f64_x))) +svfloat64_t svrintm_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_x))) +svfloat32_t svrintm_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f16_x))) +svfloat16_t svrintm_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f64_z))) +svfloat64_t svrintm_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_z))) +svfloat32_t svrintm_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f16_z))) +svfloat16_t svrintm_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f64_m))) +svfloat64_t svrintn_f64_m(svfloat64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_m))) +svfloat32_t svrintn_f32_m(svfloat32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f16_m))) +svfloat16_t svrintn_f16_m(svfloat16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f64_x))) +svfloat64_t svrintn_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_x))) +svfloat32_t svrintn_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f16_x))) +svfloat16_t svrintn_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f64_z))) +svfloat64_t svrintn_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_z))) +svfloat32_t svrintn_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f16_z))) +svfloat16_t svrintn_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f64_m))) +svfloat64_t svrintp_f64_m(svfloat64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_m))) +svfloat32_t svrintp_f32_m(svfloat32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f16_m))) +svfloat16_t svrintp_f16_m(svfloat16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f64_x))) +svfloat64_t svrintp_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_x))) +svfloat32_t svrintp_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f16_x))) +svfloat16_t svrintp_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f64_z))) +svfloat64_t svrintp_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_z))) +svfloat32_t svrintp_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f16_z))) +svfloat16_t svrintp_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f64_m))) +svfloat64_t svrintx_f64_m(svfloat64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f32_m))) +svfloat32_t svrintx_f32_m(svfloat32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f16_m))) +svfloat16_t svrintx_f16_m(svfloat16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f64_x))) +svfloat64_t svrintx_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f32_x))) +svfloat32_t svrintx_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f16_x))) +svfloat16_t svrintx_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f64_z))) +svfloat64_t svrintx_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f32_z))) +svfloat32_t svrintx_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f16_z))) +svfloat16_t svrintx_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f64_m))) +svfloat64_t svrintz_f64_m(svfloat64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f32_m))) +svfloat32_t svrintz_f32_m(svfloat32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f16_m))) +svfloat16_t svrintz_f16_m(svfloat16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f64_x))) +svfloat64_t svrintz_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f32_x))) +svfloat32_t svrintz_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f16_x))) +svfloat16_t svrintz_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f64_z))) +svfloat64_t svrintz_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f32_z))) +svfloat32_t svrintz_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f16_z))) +svfloat16_t svrintz_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_f64))) +svfloat64_t svrsqrte_f64(svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_f32))) +svfloat32_t svrsqrte_f32(svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_f16))) +svfloat16_t svrsqrte_f16(svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrts_f64))) +svfloat64_t svrsqrts_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrts_f32))) +svfloat32_t svrsqrts_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrts_f16))) +svfloat16_t svrsqrts_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f64_m))) +svfloat64_t svscale_n_f64_m(svbool_t, svfloat64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f32_m))) +svfloat32_t svscale_n_f32_m(svbool_t, svfloat32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f16_m))) +svfloat16_t svscale_n_f16_m(svbool_t, svfloat16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f64_x))) +svfloat64_t svscale_n_f64_x(svbool_t, svfloat64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f32_x))) +svfloat32_t svscale_n_f32_x(svbool_t, svfloat32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f16_x))) +svfloat16_t svscale_n_f16_x(svbool_t, svfloat16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f64_z))) +svfloat64_t svscale_n_f64_z(svbool_t, svfloat64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f32_z))) +svfloat32_t svscale_n_f32_z(svbool_t, svfloat32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f16_z))) +svfloat16_t svscale_n_f16_z(svbool_t, svfloat16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f64_m))) +svfloat64_t svscale_f64_m(svbool_t, svfloat64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f32_m))) +svfloat32_t svscale_f32_m(svbool_t, svfloat32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f16_m))) +svfloat16_t svscale_f16_m(svbool_t, svfloat16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f64_x))) +svfloat64_t svscale_f64_x(svbool_t, svfloat64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f32_x))) +svfloat32_t svscale_f32_x(svbool_t, svfloat32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f16_x))) +svfloat16_t svscale_f16_x(svbool_t, svfloat16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f64_z))) +svfloat64_t svscale_f64_z(svbool_t, svfloat64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f32_z))) +svfloat32_t svscale_f32_z(svbool_t, svfloat32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f16_z))) +svfloat16_t svscale_f16_z(svbool_t, svfloat16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_b))) +svbool_t svsel_b(svbool_t, svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u8))) +svuint8_t svsel_u8(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u32))) +svuint32_t svsel_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u64))) +svuint64_t svsel_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u16))) +svuint16_t svsel_u16(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s8))) +svint8_t svsel_s8(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f64))) +svfloat64_t svsel_f64(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f32))) +svfloat32_t svsel_f32(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f16))) +svfloat16_t svsel_f16(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s32))) +svint32_t svsel_s32(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s64))) +svint64_t svsel_s64(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s16))) +svint16_t svsel_s16(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u8))) +svuint8x2_t svset2_u8(svuint8x2_t, uint64_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u32))) +svuint32x2_t svset2_u32(svuint32x2_t, uint64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u64))) +svuint64x2_t svset2_u64(svuint64x2_t, uint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u16))) +svuint16x2_t svset2_u16(svuint16x2_t, uint64_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s8))) +svint8x2_t svset2_s8(svint8x2_t, uint64_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_f64))) +svfloat64x2_t svset2_f64(svfloat64x2_t, uint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_f32))) +svfloat32x2_t svset2_f32(svfloat32x2_t, uint64_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_f16))) +svfloat16x2_t svset2_f16(svfloat16x2_t, uint64_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s32))) +svint32x2_t svset2_s32(svint32x2_t, uint64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s64))) +svint64x2_t svset2_s64(svint64x2_t, uint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s16))) +svint16x2_t svset2_s16(svint16x2_t, uint64_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u8))) +svuint8x3_t svset3_u8(svuint8x3_t, uint64_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u32))) +svuint32x3_t svset3_u32(svuint32x3_t, uint64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u64))) +svuint64x3_t svset3_u64(svuint64x3_t, uint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u16))) +svuint16x3_t svset3_u16(svuint16x3_t, uint64_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s8))) +svint8x3_t svset3_s8(svint8x3_t, uint64_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_f64))) +svfloat64x3_t svset3_f64(svfloat64x3_t, uint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_f32))) +svfloat32x3_t svset3_f32(svfloat32x3_t, uint64_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_f16))) +svfloat16x3_t svset3_f16(svfloat16x3_t, uint64_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s32))) +svint32x3_t svset3_s32(svint32x3_t, uint64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s64))) +svint64x3_t svset3_s64(svint64x3_t, uint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s16))) +svint16x3_t svset3_s16(svint16x3_t, uint64_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u8))) +svuint8x4_t svset4_u8(svuint8x4_t, uint64_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u32))) +svuint32x4_t svset4_u32(svuint32x4_t, uint64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u64))) +svuint64x4_t svset4_u64(svuint64x4_t, uint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u16))) +svuint16x4_t svset4_u16(svuint16x4_t, uint64_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s8))) +svint8x4_t svset4_s8(svint8x4_t, uint64_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_f64))) +svfloat64x4_t svset4_f64(svfloat64x4_t, uint64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_f32))) +svfloat32x4_t svset4_f32(svfloat32x4_t, uint64_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_f16))) +svfloat16x4_t svset4_f16(svfloat16x4_t, uint64_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s32))) +svint32x4_t svset4_s32(svint32x4_t, uint64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s64))) +svint64x4_t svset4_s64(svint64x4_t, uint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s16))) +svint16x4_t svset4_s16(svint16x4_t, uint64_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u8))) +svuint8_t svsplice_u8(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u32))) +svuint32_t svsplice_u32(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u64))) +svuint64_t svsplice_u64(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u16))) +svuint16_t svsplice_u16(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s8))) +svint8_t svsplice_s8(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_f64))) +svfloat64_t svsplice_f64(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_f32))) +svfloat32_t svsplice_f32(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_f16))) +svfloat16_t svsplice_f16(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s32))) +svint32_t svsplice_s32(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s64))) +svint64_t svsplice_s64(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s16))) +svint16_t svsplice_s16(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f64_m))) +svfloat64_t svsqrt_f64_m(svfloat64_t, svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f32_m))) +svfloat32_t svsqrt_f32_m(svfloat32_t, svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f16_m))) +svfloat16_t svsqrt_f16_m(svfloat16_t, svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f64_x))) +svfloat64_t svsqrt_f64_x(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f32_x))) +svfloat32_t svsqrt_f32_x(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f16_x))) +svfloat16_t svsqrt_f16_x(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f64_z))) +svfloat64_t svsqrt_f64_z(svbool_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f32_z))) +svfloat32_t svsqrt_f32_z(svbool_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f16_z))) +svfloat16_t svsqrt_f16_z(svbool_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u8))) +void svst1_u8(svbool_t, uint8_t *, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u32))) +void svst1_u32(svbool_t, uint32_t *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u64))) +void svst1_u64(svbool_t, uint64_t *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u16))) +void svst1_u16(svbool_t, uint16_t *, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s8))) +void svst1_s8(svbool_t, int8_t *, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f64))) +void svst1_f64(svbool_t, float64_t *, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f32))) +void svst1_f32(svbool_t, float32_t *, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f16))) +void svst1_f16(svbool_t, float16_t *, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s32))) +void svst1_s32(svbool_t, int32_t *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s64))) +void svst1_s64(svbool_t, int64_t *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s16))) +void svst1_s16(svbool_t, int16_t *, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u8))) +void svst1_vnum_u8(svbool_t, uint8_t *, int64_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u32))) +void svst1_vnum_u32(svbool_t, uint32_t *, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u64))) +void svst1_vnum_u64(svbool_t, uint64_t *, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u16))) +void svst1_vnum_u16(svbool_t, uint16_t *, int64_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s8))) +void svst1_vnum_s8(svbool_t, int8_t *, int64_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f64))) +void svst1_vnum_f64(svbool_t, float64_t *, int64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f32))) +void svst1_vnum_f32(svbool_t, float32_t *, int64_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f16))) +void svst1_vnum_f16(svbool_t, float16_t *, int64_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s32))) +void svst1_vnum_s32(svbool_t, int32_t *, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s64))) +void svst1_vnum_s64(svbool_t, int64_t *, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s16))) +void svst1_vnum_s16(svbool_t, int16_t *, int64_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_s32))) +void svst1b_s32(svbool_t, int8_t *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_s64))) +void svst1b_s64(svbool_t, int8_t *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_s16))) +void svst1b_s16(svbool_t, int8_t *, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_u32))) +void svst1b_u32(svbool_t, uint8_t *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_u64))) +void svst1b_u64(svbool_t, uint8_t *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_u16))) +void svst1b_u16(svbool_t, uint8_t *, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_s32))) +void svst1b_vnum_s32(svbool_t, int8_t *, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_s64))) +void svst1b_vnum_s64(svbool_t, int8_t *, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_s16))) +void svst1b_vnum_s16(svbool_t, int8_t *, int64_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_u32))) +void svst1b_vnum_u32(svbool_t, uint8_t *, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_u64))) +void svst1b_vnum_u64(svbool_t, uint8_t *, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_u16))) +void svst1b_vnum_u16(svbool_t, uint8_t *, int64_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_s32))) +void svst1h_s32(svbool_t, int16_t *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_s64))) +void svst1h_s64(svbool_t, int16_t *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_u32))) +void svst1h_u32(svbool_t, uint16_t *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_u64))) +void svst1h_u64(svbool_t, uint16_t *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_s32))) +void svst1h_vnum_s32(svbool_t, int16_t *, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_s64))) +void svst1h_vnum_s64(svbool_t, int16_t *, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_u32))) +void svst1h_vnum_u32(svbool_t, uint16_t *, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_u64))) +void svst1h_vnum_u64(svbool_t, uint16_t *, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_s64))) +void svst1w_s64(svbool_t, int32_t *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_u64))) +void svst1w_u64(svbool_t, uint32_t *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_vnum_s64))) +void svst1w_vnum_s64(svbool_t, int32_t *, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_vnum_u64))) +void svst1w_vnum_u64(svbool_t, uint32_t *, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u8))) +void svst2_u8(svbool_t, uint8_t *, svuint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u32))) +void svst2_u32(svbool_t, uint32_t *, svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u64))) +void svst2_u64(svbool_t, uint64_t *, svuint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u16))) +void svst2_u16(svbool_t, uint16_t *, svuint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s8))) +void svst2_s8(svbool_t, int8_t *, svint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_f64))) +void svst2_f64(svbool_t, float64_t *, svfloat64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_f32))) +void svst2_f32(svbool_t, float32_t *, svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_f16))) +void svst2_f16(svbool_t, float16_t *, svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s32))) +void svst2_s32(svbool_t, int32_t *, svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s64))) +void svst2_s64(svbool_t, int64_t *, svint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s16))) +void svst2_s16(svbool_t, int16_t *, svint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u8))) +void svst2_vnum_u8(svbool_t, uint8_t *, int64_t, svuint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u32))) +void svst2_vnum_u32(svbool_t, uint32_t *, int64_t, svuint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u64))) +void svst2_vnum_u64(svbool_t, uint64_t *, int64_t, svuint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u16))) +void svst2_vnum_u16(svbool_t, uint16_t *, int64_t, svuint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s8))) +void svst2_vnum_s8(svbool_t, int8_t *, int64_t, svint8x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_f64))) +void svst2_vnum_f64(svbool_t, float64_t *, int64_t, svfloat64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_f32))) +void svst2_vnum_f32(svbool_t, float32_t *, int64_t, svfloat32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_f16))) +void svst2_vnum_f16(svbool_t, float16_t *, int64_t, svfloat16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s32))) +void svst2_vnum_s32(svbool_t, int32_t *, int64_t, svint32x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s64))) +void svst2_vnum_s64(svbool_t, int64_t *, int64_t, svint64x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s16))) +void svst2_vnum_s16(svbool_t, int16_t *, int64_t, svint16x2_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u8))) +void svst3_u8(svbool_t, uint8_t *, svuint8x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u32))) +void svst3_u32(svbool_t, uint32_t *, svuint32x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u64))) +void svst3_u64(svbool_t, uint64_t *, svuint64x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u16))) +void svst3_u16(svbool_t, uint16_t *, svuint16x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s8))) +void svst3_s8(svbool_t, int8_t *, svint8x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_f64))) +void svst3_f64(svbool_t, float64_t *, svfloat64x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_f32))) +void svst3_f32(svbool_t, float32_t *, svfloat32x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_f16))) +void svst3_f16(svbool_t, float16_t *, svfloat16x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s32))) +void svst3_s32(svbool_t, int32_t *, svint32x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s64))) +void svst3_s64(svbool_t, int64_t *, svint64x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s16))) +void svst3_s16(svbool_t, int16_t *, svint16x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u8))) +void svst3_vnum_u8(svbool_t, uint8_t *, int64_t, svuint8x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u32))) +void svst3_vnum_u32(svbool_t, uint32_t *, int64_t, svuint32x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u64))) +void svst3_vnum_u64(svbool_t, uint64_t *, int64_t, svuint64x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u16))) +void svst3_vnum_u16(svbool_t, uint16_t *, int64_t, svuint16x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s8))) +void svst3_vnum_s8(svbool_t, int8_t *, int64_t, svint8x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_f64))) +void svst3_vnum_f64(svbool_t, float64_t *, int64_t, svfloat64x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_f32))) +void svst3_vnum_f32(svbool_t, float32_t *, int64_t, svfloat32x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_f16))) +void svst3_vnum_f16(svbool_t, float16_t *, int64_t, svfloat16x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s32))) +void svst3_vnum_s32(svbool_t, int32_t *, int64_t, svint32x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s64))) +void svst3_vnum_s64(svbool_t, int64_t *, int64_t, svint64x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s16))) +void svst3_vnum_s16(svbool_t, int16_t *, int64_t, svint16x3_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u8))) +void svst4_u8(svbool_t, uint8_t *, svuint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u32))) +void svst4_u32(svbool_t, uint32_t *, svuint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u64))) +void svst4_u64(svbool_t, uint64_t *, svuint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u16))) +void svst4_u16(svbool_t, uint16_t *, svuint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s8))) +void svst4_s8(svbool_t, int8_t *, svint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_f64))) +void svst4_f64(svbool_t, float64_t *, svfloat64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_f32))) +void svst4_f32(svbool_t, float32_t *, svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_f16))) +void svst4_f16(svbool_t, float16_t *, svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s32))) +void svst4_s32(svbool_t, int32_t *, svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s64))) +void svst4_s64(svbool_t, int64_t *, svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s16))) +void svst4_s16(svbool_t, int16_t *, svint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u8))) +void svst4_vnum_u8(svbool_t, uint8_t *, int64_t, svuint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u32))) +void svst4_vnum_u32(svbool_t, uint32_t *, int64_t, svuint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u64))) +void svst4_vnum_u64(svbool_t, uint64_t *, int64_t, svuint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u16))) +void svst4_vnum_u16(svbool_t, uint16_t *, int64_t, svuint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s8))) +void svst4_vnum_s8(svbool_t, int8_t *, int64_t, svint8x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_f64))) +void svst4_vnum_f64(svbool_t, float64_t *, int64_t, svfloat64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_f32))) +void svst4_vnum_f32(svbool_t, float32_t *, int64_t, svfloat32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_f16))) +void svst4_vnum_f16(svbool_t, float16_t *, int64_t, svfloat16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s32))) +void svst4_vnum_s32(svbool_t, int32_t *, int64_t, svint32x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s64))) +void svst4_vnum_s64(svbool_t, int64_t *, int64_t, svint64x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s16))) +void svst4_vnum_s16(svbool_t, int16_t *, int64_t, svint16x4_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u8))) +void svstnt1_u8(svbool_t, uint8_t *, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u32))) +void svstnt1_u32(svbool_t, uint32_t *, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u64))) +void svstnt1_u64(svbool_t, uint64_t *, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u16))) +void svstnt1_u16(svbool_t, uint16_t *, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s8))) +void svstnt1_s8(svbool_t, int8_t *, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f64))) +void svstnt1_f64(svbool_t, float64_t *, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f32))) +void svstnt1_f32(svbool_t, float32_t *, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f16))) +void svstnt1_f16(svbool_t, float16_t *, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s32))) +void svstnt1_s32(svbool_t, int32_t *, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s64))) +void svstnt1_s64(svbool_t, int64_t *, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s16))) +void svstnt1_s16(svbool_t, int16_t *, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u8))) +void svstnt1_vnum_u8(svbool_t, uint8_t *, int64_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u32))) +void svstnt1_vnum_u32(svbool_t, uint32_t *, int64_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u64))) +void svstnt1_vnum_u64(svbool_t, uint64_t *, int64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u16))) +void svstnt1_vnum_u16(svbool_t, uint16_t *, int64_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s8))) +void svstnt1_vnum_s8(svbool_t, int8_t *, int64_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f64))) +void svstnt1_vnum_f64(svbool_t, float64_t *, int64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f32))) +void svstnt1_vnum_f32(svbool_t, float32_t *, int64_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f16))) +void svstnt1_vnum_f16(svbool_t, float16_t *, int64_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s32))) +void svstnt1_vnum_s32(svbool_t, int32_t *, int64_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s64))) +void svstnt1_vnum_s64(svbool_t, int64_t *, int64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s16))) +void svstnt1_vnum_s16(svbool_t, int16_t *, int64_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f64_m))) +svfloat64_t svsub_n_f64_m(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f32_m))) +svfloat32_t svsub_n_f32_m(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f16_m))) +svfloat16_t svsub_n_f16_m(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f64_x))) +svfloat64_t svsub_n_f64_x(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f32_x))) +svfloat32_t svsub_n_f32_x(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f16_x))) +svfloat16_t svsub_n_f16_x(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f64_z))) +svfloat64_t svsub_n_f64_z(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f32_z))) +svfloat32_t svsub_n_f32_z(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f16_z))) +svfloat16_t svsub_n_f16_z(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u8_m))) +svuint8_t svsub_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u32_m))) +svuint32_t svsub_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u64_m))) +svuint64_t svsub_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u16_m))) +svuint16_t svsub_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s8_m))) +svint8_t svsub_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s32_m))) +svint32_t svsub_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s64_m))) +svint64_t svsub_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s16_m))) +svint16_t svsub_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u8_x))) +svuint8_t svsub_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u32_x))) +svuint32_t svsub_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u64_x))) +svuint64_t svsub_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u16_x))) +svuint16_t svsub_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s8_x))) +svint8_t svsub_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s32_x))) +svint32_t svsub_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s64_x))) +svint64_t svsub_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s16_x))) +svint16_t svsub_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u8_z))) +svuint8_t svsub_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u32_z))) +svuint32_t svsub_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u64_z))) +svuint64_t svsub_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u16_z))) +svuint16_t svsub_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s8_z))) +svint8_t svsub_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s32_z))) +svint32_t svsub_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s64_z))) +svint64_t svsub_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s16_z))) +svint16_t svsub_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f64_m))) +svfloat64_t svsub_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f32_m))) +svfloat32_t svsub_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f16_m))) +svfloat16_t svsub_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f64_x))) +svfloat64_t svsub_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f32_x))) +svfloat32_t svsub_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f16_x))) +svfloat16_t svsub_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f64_z))) +svfloat64_t svsub_f64_z(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f32_z))) +svfloat32_t svsub_f32_z(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f16_z))) +svfloat16_t svsub_f16_z(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u8_m))) +svuint8_t svsub_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u32_m))) +svuint32_t svsub_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u64_m))) +svuint64_t svsub_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u16_m))) +svuint16_t svsub_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s8_m))) +svint8_t svsub_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s32_m))) +svint32_t svsub_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s64_m))) +svint64_t svsub_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s16_m))) +svint16_t svsub_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u8_x))) +svuint8_t svsub_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u32_x))) +svuint32_t svsub_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u64_x))) +svuint64_t svsub_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u16_x))) +svuint16_t svsub_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s8_x))) +svint8_t svsub_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s32_x))) +svint32_t svsub_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s64_x))) +svint64_t svsub_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s16_x))) +svint16_t svsub_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u8_z))) +svuint8_t svsub_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u32_z))) +svuint32_t svsub_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u64_z))) +svuint64_t svsub_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u16_z))) +svuint16_t svsub_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s8_z))) +svint8_t svsub_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s32_z))) +svint32_t svsub_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s64_z))) +svint64_t svsub_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s16_z))) +svint16_t svsub_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f64_m))) +svfloat64_t svsubr_n_f64_m(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f32_m))) +svfloat32_t svsubr_n_f32_m(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f16_m))) +svfloat16_t svsubr_n_f16_m(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f64_x))) +svfloat64_t svsubr_n_f64_x(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f32_x))) +svfloat32_t svsubr_n_f32_x(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f16_x))) +svfloat16_t svsubr_n_f16_x(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f64_z))) +svfloat64_t svsubr_n_f64_z(svbool_t, svfloat64_t, float64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f32_z))) +svfloat32_t svsubr_n_f32_z(svbool_t, svfloat32_t, float32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f16_z))) +svfloat16_t svsubr_n_f16_z(svbool_t, svfloat16_t, float16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u8_m))) +svuint8_t svsubr_n_u8_m(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u32_m))) +svuint32_t svsubr_n_u32_m(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u64_m))) +svuint64_t svsubr_n_u64_m(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u16_m))) +svuint16_t svsubr_n_u16_m(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s8_m))) +svint8_t svsubr_n_s8_m(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s32_m))) +svint32_t svsubr_n_s32_m(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s64_m))) +svint64_t svsubr_n_s64_m(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s16_m))) +svint16_t svsubr_n_s16_m(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u8_x))) +svuint8_t svsubr_n_u8_x(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u32_x))) +svuint32_t svsubr_n_u32_x(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u64_x))) +svuint64_t svsubr_n_u64_x(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u16_x))) +svuint16_t svsubr_n_u16_x(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s8_x))) +svint8_t svsubr_n_s8_x(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s32_x))) +svint32_t svsubr_n_s32_x(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s64_x))) +svint64_t svsubr_n_s64_x(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s16_x))) +svint16_t svsubr_n_s16_x(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u8_z))) +svuint8_t svsubr_n_u8_z(svbool_t, svuint8_t, uint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u32_z))) +svuint32_t svsubr_n_u32_z(svbool_t, svuint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u64_z))) +svuint64_t svsubr_n_u64_z(svbool_t, svuint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u16_z))) +svuint16_t svsubr_n_u16_z(svbool_t, svuint16_t, uint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s8_z))) +svint8_t svsubr_n_s8_z(svbool_t, svint8_t, int8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s32_z))) +svint32_t svsubr_n_s32_z(svbool_t, svint32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s64_z))) +svint64_t svsubr_n_s64_z(svbool_t, svint64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s16_z))) +svint16_t svsubr_n_s16_z(svbool_t, svint16_t, int16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f64_m))) +svfloat64_t svsubr_f64_m(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f32_m))) +svfloat32_t svsubr_f32_m(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f16_m))) +svfloat16_t svsubr_f16_m(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f64_x))) +svfloat64_t svsubr_f64_x(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f32_x))) +svfloat32_t svsubr_f32_x(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f16_x))) +svfloat16_t svsubr_f16_x(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f64_z))) +svfloat64_t svsubr_f64_z(svbool_t, svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f32_z))) +svfloat32_t svsubr_f32_z(svbool_t, svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f16_z))) +svfloat16_t svsubr_f16_z(svbool_t, svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u8_m))) +svuint8_t svsubr_u8_m(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u32_m))) +svuint32_t svsubr_u32_m(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u64_m))) +svuint64_t svsubr_u64_m(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u16_m))) +svuint16_t svsubr_u16_m(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s8_m))) +svint8_t svsubr_s8_m(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s32_m))) +svint32_t svsubr_s32_m(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s64_m))) +svint64_t svsubr_s64_m(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s16_m))) +svint16_t svsubr_s16_m(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u8_x))) +svuint8_t svsubr_u8_x(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u32_x))) +svuint32_t svsubr_u32_x(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u64_x))) +svuint64_t svsubr_u64_x(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u16_x))) +svuint16_t svsubr_u16_x(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s8_x))) +svint8_t svsubr_s8_x(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s32_x))) +svint32_t svsubr_s32_x(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s64_x))) +svint64_t svsubr_s64_x(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s16_x))) +svint16_t svsubr_s16_x(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u8_z))) +svuint8_t svsubr_u8_z(svbool_t, svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u32_z))) +svuint32_t svsubr_u32_z(svbool_t, svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u64_z))) +svuint64_t svsubr_u64_z(svbool_t, svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u16_z))) +svuint16_t svsubr_u16_z(svbool_t, svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s8_z))) +svint8_t svsubr_s8_z(svbool_t, svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s32_z))) +svint32_t svsubr_s32_z(svbool_t, svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s64_z))) +svint64_t svsubr_s64_z(svbool_t, svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s16_z))) +svint16_t svsubr_s16_z(svbool_t, svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u8))) +svuint8_t svtbl_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u32))) +svuint32_t svtbl_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u64))) +svuint64_t svtbl_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u16))) +svuint16_t svtbl_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s8))) +svint8_t svtbl_s8(svint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_f64))) +svfloat64_t svtbl_f64(svfloat64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_f32))) +svfloat32_t svtbl_f32(svfloat32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_f16))) +svfloat16_t svtbl_f16(svfloat16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s32))) +svint32_t svtbl_s32(svint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s64))) +svint64_t svtbl_s64(svint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s16))) +svint16_t svtbl_s16(svint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u8))) +svuint8_t svtrn1_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u32))) +svuint32_t svtrn1_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u64))) +svuint64_t svtrn1_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u16))) +svuint16_t svtrn1_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s8))) +svint8_t svtrn1_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_f64))) +svfloat64_t svtrn1_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_f32))) +svfloat32_t svtrn1_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_f16))) +svfloat16_t svtrn1_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s32))) +svint32_t svtrn1_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s64))) +svint64_t svtrn1_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s16))) +svint16_t svtrn1_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_b16))) +svbool_t svtrn1_b16(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_b32))) +svbool_t svtrn1_b32(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_b64))) +svbool_t svtrn1_b64(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_b8))) +svbool_t svtrn1_b8(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u8))) +svuint8_t svtrn2_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u32))) +svuint32_t svtrn2_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u64))) +svuint64_t svtrn2_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u16))) +svuint16_t svtrn2_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s8))) +svint8_t svtrn2_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_f64))) +svfloat64_t svtrn2_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_f32))) +svfloat32_t svtrn2_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_f16))) +svfloat16_t svtrn2_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s32))) +svint32_t svtrn2_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s64))) +svint64_t svtrn2_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s16))) +svint16_t svtrn2_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_b16))) +svbool_t svtrn2_b16(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_b32))) +svbool_t svtrn2_b32(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_b64))) +svbool_t svtrn2_b64(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_b8))) +svbool_t svtrn2_b8(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_u8))) +svuint8x2_t svundef2_u8(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_u32))) +svuint32x2_t svundef2_u32(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_u64))) +svuint64x2_t svundef2_u64(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_u16))) +svuint16x2_t svundef2_u16(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_s8))) +svint8x2_t svundef2_s8(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_f64))) +svfloat64x2_t svundef2_f64(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_f32))) +svfloat32x2_t svundef2_f32(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_f16))) +svfloat16x2_t svundef2_f16(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_s32))) +svint32x2_t svundef2_s32(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_s64))) +svint64x2_t svundef2_s64(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef2_s16))) +svint16x2_t svundef2_s16(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_u8))) +svuint8x3_t svundef3_u8(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_u32))) +svuint32x3_t svundef3_u32(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_u64))) +svuint64x3_t svundef3_u64(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_u16))) +svuint16x3_t svundef3_u16(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_s8))) +svint8x3_t svundef3_s8(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_f64))) +svfloat64x3_t svundef3_f64(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_f32))) +svfloat32x3_t svundef3_f32(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_f16))) +svfloat16x3_t svundef3_f16(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_s32))) +svint32x3_t svundef3_s32(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_s64))) +svint64x3_t svundef3_s64(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef3_s16))) +svint16x3_t svundef3_s16(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_u8))) +svuint8x4_t svundef4_u8(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_u32))) +svuint32x4_t svundef4_u32(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_u64))) +svuint64x4_t svundef4_u64(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_u16))) +svuint16x4_t svundef4_u16(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_s8))) +svint8x4_t svundef4_s8(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_f64))) +svfloat64x4_t svundef4_f64(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_f32))) +svfloat32x4_t svundef4_f32(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_f16))) +svfloat16x4_t svundef4_f16(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_s32))) +svint32x4_t svundef4_s32(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_s64))) +svint64x4_t svundef4_s64(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef4_s16))) +svint16x4_t svundef4_s16(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_u8))) +svuint8_t svundef_u8(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_u32))) +svuint32_t svundef_u32(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_u64))) +svuint64_t svundef_u64(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_u16))) +svuint16_t svundef_u16(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_s8))) +svint8_t svundef_s8(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_f64))) +svfloat64_t svundef_f64(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_f32))) +svfloat32_t svundef_f32(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_f16))) +svfloat16_t svundef_f16(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_s32))) +svint32_t svundef_s32(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_s64))) +svint64_t svundef_s64(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svundef_s16))) +svint16_t svundef_s16(void); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_b))) +svbool_t svunpkhi_b(svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_s32))) +svint32_t svunpkhi_s32(svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_s64))) +svint64_t svunpkhi_s64(svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_s16))) +svint16_t svunpkhi_s16(svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_u32))) +svuint32_t svunpkhi_u32(svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_u64))) +svuint64_t svunpkhi_u64(svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_u16))) +svuint16_t svunpkhi_u16(svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_b))) +svbool_t svunpklo_b(svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_s32))) +svint32_t svunpklo_s32(svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_s64))) +svint64_t svunpklo_s64(svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_s16))) +svint16_t svunpklo_s16(svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_u32))) +svuint32_t svunpklo_u32(svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_u64))) +svuint64_t svunpklo_u64(svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_u16))) +svuint16_t svunpklo_u16(svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u8))) +svuint8_t svuzp1_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u32))) +svuint32_t svuzp1_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u64))) +svuint64_t svuzp1_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u16))) +svuint16_t svuzp1_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s8))) +svint8_t svuzp1_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_f64))) +svfloat64_t svuzp1_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_f32))) +svfloat32_t svuzp1_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_f16))) +svfloat16_t svuzp1_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s32))) +svint32_t svuzp1_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s64))) +svint64_t svuzp1_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s16))) +svint16_t svuzp1_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_b16))) +svbool_t svuzp1_b16(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_b32))) +svbool_t svuzp1_b32(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_b64))) +svbool_t svuzp1_b64(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_b8))) +svbool_t svuzp1_b8(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u8))) +svuint8_t svuzp2_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u32))) +svuint32_t svuzp2_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u64))) +svuint64_t svuzp2_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u16))) +svuint16_t svuzp2_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s8))) +svint8_t svuzp2_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_f64))) +svfloat64_t svuzp2_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_f32))) +svfloat32_t svuzp2_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_f16))) +svfloat16_t svuzp2_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s32))) +svint32_t svuzp2_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s64))) +svint64_t svuzp2_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s16))) +svint16_t svuzp2_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_b16))) +svbool_t svuzp2_b16(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_b32))) +svbool_t svuzp2_b32(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_b64))) +svbool_t svuzp2_b64(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_b8))) +svbool_t svuzp2_b8(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_s32))) +svbool_t svwhilele_b8_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_s32))) +svbool_t svwhilele_b32_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_s32))) +svbool_t svwhilele_b64_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_s32))) +svbool_t svwhilele_b16_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_s64))) +svbool_t svwhilele_b8_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_s64))) +svbool_t svwhilele_b32_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_s64))) +svbool_t svwhilele_b64_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_s64))) +svbool_t svwhilele_b16_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_u32))) +svbool_t svwhilele_b8_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_u32))) +svbool_t svwhilele_b32_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_u32))) +svbool_t svwhilele_b64_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_u32))) +svbool_t svwhilele_b16_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_u64))) +svbool_t svwhilele_b8_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_u64))) +svbool_t svwhilele_b32_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_u64))) +svbool_t svwhilele_b64_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_u64))) +svbool_t svwhilele_b16_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_u32))) +svbool_t svwhilelt_b8_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_u32))) +svbool_t svwhilelt_b32_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_u32))) +svbool_t svwhilelt_b64_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_u32))) +svbool_t svwhilelt_b16_u32(uint32_t, uint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_u64))) +svbool_t svwhilelt_b8_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_u64))) +svbool_t svwhilelt_b32_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_u64))) +svbool_t svwhilelt_b64_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_u64))) +svbool_t svwhilelt_b16_u64(uint64_t, uint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_s32))) +svbool_t svwhilelt_b8_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_s32))) +svbool_t svwhilelt_b32_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_s32))) +svbool_t svwhilelt_b64_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_s32))) +svbool_t svwhilelt_b16_s32(int32_t, int32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_s64))) +svbool_t svwhilelt_b8_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_s64))) +svbool_t svwhilelt_b32_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_s64))) +svbool_t svwhilelt_b64_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_s64))) +svbool_t svwhilelt_b16_s64(int64_t, int64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u8))) +svuint8_t svzip1_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u32))) +svuint32_t svzip1_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u64))) +svuint64_t svzip1_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u16))) +svuint16_t svzip1_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s8))) +svint8_t svzip1_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_f64))) +svfloat64_t svzip1_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_f32))) +svfloat32_t svzip1_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_f16))) +svfloat16_t svzip1_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s32))) +svint32_t svzip1_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s64))) +svint64_t svzip1_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s16))) +svint16_t svzip1_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_b16))) +svbool_t svzip1_b16(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_b32))) +svbool_t svzip1_b32(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_b64))) +svbool_t svzip1_b64(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_b8))) +svbool_t svzip1_b8(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u8))) +svuint8_t svzip2_u8(svuint8_t, svuint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u32))) +svuint32_t svzip2_u32(svuint32_t, svuint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u64))) +svuint64_t svzip2_u64(svuint64_t, svuint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u16))) +svuint16_t svzip2_u16(svuint16_t, svuint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s8))) +svint8_t svzip2_s8(svint8_t, svint8_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_f64))) +svfloat64_t svzip2_f64(svfloat64_t, svfloat64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_f32))) +svfloat32_t svzip2_f32(svfloat32_t, svfloat32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_f16))) +svfloat16_t svzip2_f16(svfloat16_t, svfloat16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s32))) +svint32_t svzip2_s32(svint32_t, svint32_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s64))) +svint64_t svzip2_s64(svint64_t, svint64_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s16))) +svint16_t svzip2_s16(svint16_t, svint16_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_b16))) +svbool_t svzip2_b16(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_b32))) +svbool_t svzip2_b32(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_b64))) +svbool_t svzip2_b64(svbool_t, svbool_t); +__ai __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_b8))) +svbool_t svzip2_b8(svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f64_m))) +svfloat64_t svabd_m(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f32_m))) +svfloat32_t svabd_m(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f16_m))) +svfloat16_t svabd_m(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f64_x))) +svfloat64_t svabd_x(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f32_x))) +svfloat32_t svabd_x(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f16_x))) +svfloat16_t svabd_x(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f64_z))) +svfloat64_t svabd_z(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f32_z))) +svfloat32_t svabd_z(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_f16_z))) +svfloat16_t svabd_z(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s8_m))) +svint8_t svabd_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s32_m))) +svint32_t svabd_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s64_m))) +svint64_t svabd_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s16_m))) +svint16_t svabd_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s8_x))) +svint8_t svabd_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s32_x))) +svint32_t svabd_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s64_x))) +svint64_t svabd_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s16_x))) +svint16_t svabd_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s8_z))) +svint8_t svabd_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s32_z))) +svint32_t svabd_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s64_z))) +svint64_t svabd_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_s16_z))) +svint16_t svabd_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u8_m))) +svuint8_t svabd_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u32_m))) +svuint32_t svabd_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u64_m))) +svuint64_t svabd_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u16_m))) +svuint16_t svabd_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u8_x))) +svuint8_t svabd_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u32_x))) +svuint32_t svabd_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u64_x))) +svuint64_t svabd_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u16_x))) +svuint16_t svabd_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u8_z))) +svuint8_t svabd_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u32_z))) +svuint32_t svabd_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u64_z))) +svuint64_t svabd_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_n_u16_z))) +svuint16_t svabd_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f64_m))) +svfloat64_t svabd_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f32_m))) +svfloat32_t svabd_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f16_m))) +svfloat16_t svabd_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f64_x))) +svfloat64_t svabd_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f32_x))) +svfloat32_t svabd_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f16_x))) +svfloat16_t svabd_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f64_z))) +svfloat64_t svabd_z(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f32_z))) +svfloat32_t svabd_z(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_f16_z))) +svfloat16_t svabd_z(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s8_m))) +svint8_t svabd_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s32_m))) +svint32_t svabd_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s64_m))) +svint64_t svabd_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s16_m))) +svint16_t svabd_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s8_x))) +svint8_t svabd_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s32_x))) +svint32_t svabd_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s64_x))) +svint64_t svabd_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s16_x))) +svint16_t svabd_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s8_z))) +svint8_t svabd_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s32_z))) +svint32_t svabd_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s64_z))) +svint64_t svabd_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_s16_z))) +svint16_t svabd_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u8_m))) +svuint8_t svabd_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u32_m))) +svuint32_t svabd_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u64_m))) +svuint64_t svabd_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u16_m))) +svuint16_t svabd_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u8_x))) +svuint8_t svabd_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u32_x))) +svuint32_t svabd_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u64_x))) +svuint64_t svabd_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u16_x))) +svuint16_t svabd_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u8_z))) +svuint8_t svabd_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u32_z))) +svuint32_t svabd_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u64_z))) +svuint64_t svabd_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabd_u16_z))) +svuint16_t svabd_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f64_m))) +svfloat64_t svabs_m(svfloat64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f32_m))) +svfloat32_t svabs_m(svfloat32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f16_m))) +svfloat16_t svabs_m(svfloat16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f64_x))) +svfloat64_t svabs_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f32_x))) +svfloat32_t svabs_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f16_x))) +svfloat16_t svabs_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f64_z))) +svfloat64_t svabs_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f32_z))) +svfloat32_t svabs_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_f16_z))) +svfloat16_t svabs_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s8_m))) +svint8_t svabs_m(svint8_t, svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s32_m))) +svint32_t svabs_m(svint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s64_m))) +svint64_t svabs_m(svint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s16_m))) +svint16_t svabs_m(svint16_t, svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s8_x))) +svint8_t svabs_x(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s32_x))) +svint32_t svabs_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s64_x))) +svint64_t svabs_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s16_x))) +svint16_t svabs_x(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s8_z))) +svint8_t svabs_z(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s32_z))) +svint32_t svabs_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s64_z))) +svint64_t svabs_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svabs_s16_z))) +svint16_t svabs_z(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_n_f64))) +svbool_t svacge(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_n_f32))) +svbool_t svacge(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_n_f16))) +svbool_t svacge(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_f64))) +svbool_t svacge(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_f32))) +svbool_t svacge(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacge_f16))) +svbool_t svacge(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_n_f64))) +svbool_t svacgt(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_n_f32))) +svbool_t svacgt(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_n_f16))) +svbool_t svacgt(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_f64))) +svbool_t svacgt(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_f32))) +svbool_t svacgt(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacgt_f16))) +svbool_t svacgt(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_n_f64))) +svbool_t svacle(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_n_f32))) +svbool_t svacle(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_n_f16))) +svbool_t svacle(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_f64))) +svbool_t svacle(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_f32))) +svbool_t svacle(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svacle_f16))) +svbool_t svacle(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_n_f64))) +svbool_t svaclt(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_n_f32))) +svbool_t svaclt(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_n_f16))) +svbool_t svaclt(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_f64))) +svbool_t svaclt(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_f32))) +svbool_t svaclt(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaclt_f16))) +svbool_t svaclt(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f64_m))) +svfloat64_t svadd_m(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f32_m))) +svfloat32_t svadd_m(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f16_m))) +svfloat16_t svadd_m(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f64_x))) +svfloat64_t svadd_x(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f32_x))) +svfloat32_t svadd_x(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f16_x))) +svfloat16_t svadd_x(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f64_z))) +svfloat64_t svadd_z(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f32_z))) +svfloat32_t svadd_z(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_f16_z))) +svfloat16_t svadd_z(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u8_m))) +svuint8_t svadd_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u32_m))) +svuint32_t svadd_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u64_m))) +svuint64_t svadd_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u16_m))) +svuint16_t svadd_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s8_m))) +svint8_t svadd_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s32_m))) +svint32_t svadd_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s64_m))) +svint64_t svadd_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s16_m))) +svint16_t svadd_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u8_x))) +svuint8_t svadd_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u32_x))) +svuint32_t svadd_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u64_x))) +svuint64_t svadd_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u16_x))) +svuint16_t svadd_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s8_x))) +svint8_t svadd_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s32_x))) +svint32_t svadd_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s64_x))) +svint64_t svadd_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s16_x))) +svint16_t svadd_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u8_z))) +svuint8_t svadd_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u32_z))) +svuint32_t svadd_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u64_z))) +svuint64_t svadd_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_u16_z))) +svuint16_t svadd_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s8_z))) +svint8_t svadd_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s32_z))) +svint32_t svadd_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s64_z))) +svint64_t svadd_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_n_s16_z))) +svint16_t svadd_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f64_m))) +svfloat64_t svadd_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f32_m))) +svfloat32_t svadd_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f16_m))) +svfloat16_t svadd_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f64_x))) +svfloat64_t svadd_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f32_x))) +svfloat32_t svadd_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f16_x))) +svfloat16_t svadd_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f64_z))) +svfloat64_t svadd_z(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f32_z))) +svfloat32_t svadd_z(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_f16_z))) +svfloat16_t svadd_z(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u8_m))) +svuint8_t svadd_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u32_m))) +svuint32_t svadd_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u64_m))) +svuint64_t svadd_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u16_m))) +svuint16_t svadd_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s8_m))) +svint8_t svadd_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s32_m))) +svint32_t svadd_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s64_m))) +svint64_t svadd_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s16_m))) +svint16_t svadd_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u8_x))) +svuint8_t svadd_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u32_x))) +svuint32_t svadd_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u64_x))) +svuint64_t svadd_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u16_x))) +svuint16_t svadd_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s8_x))) +svint8_t svadd_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s32_x))) +svint32_t svadd_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s64_x))) +svint64_t svadd_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s16_x))) +svint16_t svadd_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u8_z))) +svuint8_t svadd_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u32_z))) +svuint32_t svadd_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u64_z))) +svuint64_t svadd_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_u16_z))) +svuint16_t svadd_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s8_z))) +svint8_t svadd_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s32_z))) +svint32_t svadd_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s64_z))) +svint64_t svadd_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadd_s16_z))) +svint16_t svadd_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadda_f64))) +float64_t svadda(svbool_t, float64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadda_f32))) +float32_t svadda(svbool_t, float32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svadda_f16))) +float16_t svadda(svbool_t, float16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s8))) +int64_t svaddv(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s32))) +int64_t svaddv(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s64))) +int64_t svaddv(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_s16))) +int64_t svaddv(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u8))) +uint64_t svaddv(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u32))) +uint64_t svaddv(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u64))) +uint64_t svaddv(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_u16))) +uint64_t svaddv(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_f64))) +float64_t svaddv(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_f32))) +float32_t svaddv(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svaddv_f16))) +float16_t svaddv(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_b_z))) +svbool_t svand_z(svbool_t, svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u8_m))) +svuint8_t svand_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u32_m))) +svuint32_t svand_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u64_m))) +svuint64_t svand_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u16_m))) +svuint16_t svand_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s8_m))) +svint8_t svand_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s32_m))) +svint32_t svand_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s64_m))) +svint64_t svand_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s16_m))) +svint16_t svand_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u8_x))) +svuint8_t svand_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u32_x))) +svuint32_t svand_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u64_x))) +svuint64_t svand_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u16_x))) +svuint16_t svand_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s8_x))) +svint8_t svand_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s32_x))) +svint32_t svand_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s64_x))) +svint64_t svand_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s16_x))) +svint16_t svand_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u8_z))) +svuint8_t svand_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u32_z))) +svuint32_t svand_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u64_z))) +svuint64_t svand_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_u16_z))) +svuint16_t svand_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s8_z))) +svint8_t svand_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s32_z))) +svint32_t svand_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s64_z))) +svint64_t svand_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_n_s16_z))) +svint16_t svand_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u8_m))) +svuint8_t svand_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u32_m))) +svuint32_t svand_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u64_m))) +svuint64_t svand_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u16_m))) +svuint16_t svand_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s8_m))) +svint8_t svand_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s32_m))) +svint32_t svand_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s64_m))) +svint64_t svand_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s16_m))) +svint16_t svand_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u8_x))) +svuint8_t svand_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u32_x))) +svuint32_t svand_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u64_x))) +svuint64_t svand_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u16_x))) +svuint16_t svand_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s8_x))) +svint8_t svand_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s32_x))) +svint32_t svand_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s64_x))) +svint64_t svand_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s16_x))) +svint16_t svand_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u8_z))) +svuint8_t svand_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u32_z))) +svuint32_t svand_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u64_z))) +svuint64_t svand_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_u16_z))) +svuint16_t svand_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s8_z))) +svint8_t svand_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s32_z))) +svint32_t svand_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s64_z))) +svint64_t svand_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svand_s16_z))) +svint16_t svand_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u8))) +uint8_t svandv(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u32))) +uint32_t svandv(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u64))) +uint64_t svandv(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_u16))) +uint16_t svandv(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s8))) +int8_t svandv(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s32))) +int32_t svandv(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s64))) +int64_t svandv(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svandv_s16))) +int16_t svandv(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s8_m))) +svint8_t svasr_m(svbool_t, svint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s32_m))) +svint32_t svasr_m(svbool_t, svint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s64_m))) +svint64_t svasr_m(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s16_m))) +svint16_t svasr_m(svbool_t, svint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s8_x))) +svint8_t svasr_x(svbool_t, svint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s32_x))) +svint32_t svasr_x(svbool_t, svint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s64_x))) +svint64_t svasr_x(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s16_x))) +svint16_t svasr_x(svbool_t, svint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s8_z))) +svint8_t svasr_z(svbool_t, svint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s32_z))) +svint32_t svasr_z(svbool_t, svint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s64_z))) +svint64_t svasr_z(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_n_s16_z))) +svint16_t svasr_z(svbool_t, svint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s8_m))) +svint8_t svasr_m(svbool_t, svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s32_m))) +svint32_t svasr_m(svbool_t, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s64_m))) +svint64_t svasr_m(svbool_t, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s16_m))) +svint16_t svasr_m(svbool_t, svint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s8_x))) +svint8_t svasr_x(svbool_t, svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s32_x))) +svint32_t svasr_x(svbool_t, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s64_x))) +svint64_t svasr_x(svbool_t, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s16_x))) +svint16_t svasr_x(svbool_t, svint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s8_z))) +svint8_t svasr_z(svbool_t, svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s32_z))) +svint32_t svasr_z(svbool_t, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s64_z))) +svint64_t svasr_z(svbool_t, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_s16_z))) +svint16_t svasr_z(svbool_t, svint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s8_m))) +svint8_t svasr_wide_m(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s32_m))) +svint32_t svasr_wide_m(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s16_m))) +svint16_t svasr_wide_m(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s8_x))) +svint8_t svasr_wide_x(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s32_x))) +svint32_t svasr_wide_x(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s16_x))) +svint16_t svasr_wide_x(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s8_z))) +svint8_t svasr_wide_z(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s32_z))) +svint32_t svasr_wide_z(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_n_s16_z))) +svint16_t svasr_wide_z(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s8_m))) +svint8_t svasr_wide_m(svbool_t, svint8_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s32_m))) +svint32_t svasr_wide_m(svbool_t, svint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s16_m))) +svint16_t svasr_wide_m(svbool_t, svint16_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s8_x))) +svint8_t svasr_wide_x(svbool_t, svint8_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s32_x))) +svint32_t svasr_wide_x(svbool_t, svint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s16_x))) +svint16_t svasr_wide_x(svbool_t, svint16_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s8_z))) +svint8_t svasr_wide_z(svbool_t, svint8_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s32_z))) +svint32_t svasr_wide_z(svbool_t, svint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasr_wide_s16_z))) +svint16_t svasr_wide_z(svbool_t, svint16_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s8_m))) +svint8_t svasrd_m(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s32_m))) +svint32_t svasrd_m(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s64_m))) +svint64_t svasrd_m(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s16_m))) +svint16_t svasrd_m(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s8_x))) +svint8_t svasrd_x(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s32_x))) +svint32_t svasrd_x(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s64_x))) +svint64_t svasrd_x(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s16_x))) +svint16_t svasrd_x(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s8_z))) +svint8_t svasrd_z(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s32_z))) +svint32_t svasrd_z(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s64_z))) +svint64_t svasrd_z(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svasrd_n_s16_z))) +svint16_t svasrd_z(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_b_z))) +svbool_t svbic_z(svbool_t, svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u8_m))) +svuint8_t svbic_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u32_m))) +svuint32_t svbic_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u64_m))) +svuint64_t svbic_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u16_m))) +svuint16_t svbic_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s8_m))) +svint8_t svbic_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s32_m))) +svint32_t svbic_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s64_m))) +svint64_t svbic_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s16_m))) +svint16_t svbic_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u8_x))) +svuint8_t svbic_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u32_x))) +svuint32_t svbic_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u64_x))) +svuint64_t svbic_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u16_x))) +svuint16_t svbic_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s8_x))) +svint8_t svbic_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s32_x))) +svint32_t svbic_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s64_x))) +svint64_t svbic_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s16_x))) +svint16_t svbic_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u8_z))) +svuint8_t svbic_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u32_z))) +svuint32_t svbic_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u64_z))) +svuint64_t svbic_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_u16_z))) +svuint16_t svbic_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s8_z))) +svint8_t svbic_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s32_z))) +svint32_t svbic_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s64_z))) +svint64_t svbic_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_n_s16_z))) +svint16_t svbic_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u8_m))) +svuint8_t svbic_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u32_m))) +svuint32_t svbic_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u64_m))) +svuint64_t svbic_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u16_m))) +svuint16_t svbic_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s8_m))) +svint8_t svbic_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s32_m))) +svint32_t svbic_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s64_m))) +svint64_t svbic_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s16_m))) +svint16_t svbic_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u8_x))) +svuint8_t svbic_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u32_x))) +svuint32_t svbic_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u64_x))) +svuint64_t svbic_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u16_x))) +svuint16_t svbic_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s8_x))) +svint8_t svbic_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s32_x))) +svint32_t svbic_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s64_x))) +svint64_t svbic_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s16_x))) +svint16_t svbic_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u8_z))) +svuint8_t svbic_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u32_z))) +svuint32_t svbic_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u64_z))) +svuint64_t svbic_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_u16_z))) +svuint16_t svbic_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s8_z))) +svint8_t svbic_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s32_z))) +svint32_t svbic_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s64_z))) +svint64_t svbic_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbic_s16_z))) +svint16_t svbic_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrka_b_m))) +svbool_t svbrka_m(svbool_t, svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrka_b_z))) +svbool_t svbrka_z(svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkb_b_m))) +svbool_t svbrkb_m(svbool_t, svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkb_b_z))) +svbool_t svbrkb_z(svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkn_b_z))) +svbool_t svbrkn_z(svbool_t, svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkpa_b_z))) +svbool_t svbrkpa_z(svbool_t, svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svbrkpb_b_z))) +svbool_t svbrkpb_z(svbool_t, svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f64_m))) +svfloat64_t svcadd_m(svbool_t, svfloat64_t, svfloat64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f32_m))) +svfloat32_t svcadd_m(svbool_t, svfloat32_t, svfloat32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f16_m))) +svfloat16_t svcadd_m(svbool_t, svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f64_x))) +svfloat64_t svcadd_x(svbool_t, svfloat64_t, svfloat64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f32_x))) +svfloat32_t svcadd_x(svbool_t, svfloat32_t, svfloat32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f16_x))) +svfloat16_t svcadd_x(svbool_t, svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f64_z))) +svfloat64_t svcadd_z(svbool_t, svfloat64_t, svfloat64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f32_z))) +svfloat32_t svcadd_z(svbool_t, svfloat32_t, svfloat32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcadd_f16_z))) +svfloat16_t svcadd_z(svbool_t, svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u8))) +uint8_t svclasta(svbool_t, uint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u32))) +uint32_t svclasta(svbool_t, uint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u64))) +uint64_t svclasta(svbool_t, uint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_u16))) +uint16_t svclasta(svbool_t, uint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s8))) +int8_t svclasta(svbool_t, int8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_f64))) +float64_t svclasta(svbool_t, float64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_f32))) +float32_t svclasta(svbool_t, float32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_f16))) +float16_t svclasta(svbool_t, float16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s32))) +int32_t svclasta(svbool_t, int32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s64))) +int64_t svclasta(svbool_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_n_s16))) +int16_t svclasta(svbool_t, int16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u8))) +svuint8_t svclasta(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u32))) +svuint32_t svclasta(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u64))) +svuint64_t svclasta(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_u16))) +svuint16_t svclasta(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s8))) +svint8_t svclasta(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_f64))) +svfloat64_t svclasta(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_f32))) +svfloat32_t svclasta(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_f16))) +svfloat16_t svclasta(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s32))) +svint32_t svclasta(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s64))) +svint64_t svclasta(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclasta_s16))) +svint16_t svclasta(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u8))) +uint8_t svclastb(svbool_t, uint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u32))) +uint32_t svclastb(svbool_t, uint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u64))) +uint64_t svclastb(svbool_t, uint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_u16))) +uint16_t svclastb(svbool_t, uint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s8))) +int8_t svclastb(svbool_t, int8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_f64))) +float64_t svclastb(svbool_t, float64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_f32))) +float32_t svclastb(svbool_t, float32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_f16))) +float16_t svclastb(svbool_t, float16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s32))) +int32_t svclastb(svbool_t, int32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s64))) +int64_t svclastb(svbool_t, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_n_s16))) +int16_t svclastb(svbool_t, int16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u8))) +svuint8_t svclastb(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u32))) +svuint32_t svclastb(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u64))) +svuint64_t svclastb(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_u16))) +svuint16_t svclastb(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s8))) +svint8_t svclastb(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_f64))) +svfloat64_t svclastb(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_f32))) +svfloat32_t svclastb(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_f16))) +svfloat16_t svclastb(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s32))) +svint32_t svclastb(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s64))) +svint64_t svclastb(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclastb_s16))) +svint16_t svclastb(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s8_m))) +svuint8_t svcls_m(svuint8_t, svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s32_m))) +svuint32_t svcls_m(svuint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s64_m))) +svuint64_t svcls_m(svuint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s16_m))) +svuint16_t svcls_m(svuint16_t, svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s8_x))) +svuint8_t svcls_x(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s32_x))) +svuint32_t svcls_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s64_x))) +svuint64_t svcls_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s16_x))) +svuint16_t svcls_x(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s8_z))) +svuint8_t svcls_z(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s32_z))) +svuint32_t svcls_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s64_z))) +svuint64_t svcls_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcls_s16_z))) +svuint16_t svcls_z(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u8_m))) +svuint8_t svclz_m(svuint8_t, svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u32_m))) +svuint32_t svclz_m(svuint32_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u64_m))) +svuint64_t svclz_m(svuint64_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u16_m))) +svuint16_t svclz_m(svuint16_t, svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s8_m))) +svuint8_t svclz_m(svuint8_t, svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s32_m))) +svuint32_t svclz_m(svuint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s64_m))) +svuint64_t svclz_m(svuint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s16_m))) +svuint16_t svclz_m(svuint16_t, svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u8_x))) +svuint8_t svclz_x(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u32_x))) +svuint32_t svclz_x(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u64_x))) +svuint64_t svclz_x(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u16_x))) +svuint16_t svclz_x(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s8_x))) +svuint8_t svclz_x(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s32_x))) +svuint32_t svclz_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s64_x))) +svuint64_t svclz_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s16_x))) +svuint16_t svclz_x(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u8_z))) +svuint8_t svclz_z(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u32_z))) +svuint32_t svclz_z(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u64_z))) +svuint64_t svclz_z(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_u16_z))) +svuint16_t svclz_z(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s8_z))) +svuint8_t svclz_z(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s32_z))) +svuint32_t svclz_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s64_z))) +svuint64_t svclz_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svclz_s16_z))) +svuint16_t svclz_z(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f64_m))) +svfloat64_t svcmla_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f32_m))) +svfloat32_t svcmla_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f16_m))) +svfloat16_t svcmla_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f64_x))) +svfloat64_t svcmla_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f32_x))) +svfloat32_t svcmla_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f16_x))) +svfloat16_t svcmla_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f64_z))) +svfloat64_t svcmla_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f32_z))) +svfloat32_t svcmla_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_f16_z))) +svfloat16_t svcmla_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_f32))) +svfloat32_t svcmla_lane(svfloat32_t, svfloat32_t, svfloat32_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmla_lane_f16))) +svfloat16_t svcmla_lane(svfloat16_t, svfloat16_t, svfloat16_t, uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_f64))) +svbool_t svcmpeq(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_f32))) +svbool_t svcmpeq(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_f16))) +svbool_t svcmpeq(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u8))) +svbool_t svcmpeq(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u32))) +svbool_t svcmpeq(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u64))) +svbool_t svcmpeq(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_u16))) +svbool_t svcmpeq(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s8))) +svbool_t svcmpeq(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s32))) +svbool_t svcmpeq(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s64))) +svbool_t svcmpeq(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_n_s16))) +svbool_t svcmpeq(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u8))) +svbool_t svcmpeq(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u32))) +svbool_t svcmpeq(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u64))) +svbool_t svcmpeq(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_u16))) +svbool_t svcmpeq(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s8))) +svbool_t svcmpeq(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s32))) +svbool_t svcmpeq(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s64))) +svbool_t svcmpeq(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_s16))) +svbool_t svcmpeq(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_f64))) +svbool_t svcmpeq(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_f32))) +svbool_t svcmpeq(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_f16))) +svbool_t svcmpeq(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_n_s8))) +svbool_t svcmpeq_wide(svbool_t, svint8_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_n_s32))) +svbool_t svcmpeq_wide(svbool_t, svint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_n_s16))) +svbool_t svcmpeq_wide(svbool_t, svint16_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_s8))) +svbool_t svcmpeq_wide(svbool_t, svint8_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_s32))) +svbool_t svcmpeq_wide(svbool_t, svint32_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpeq_wide_s16))) +svbool_t svcmpeq_wide(svbool_t, svint16_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_f64))) +svbool_t svcmpge(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_f32))) +svbool_t svcmpge(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_f16))) +svbool_t svcmpge(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s8))) +svbool_t svcmpge(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s32))) +svbool_t svcmpge(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s64))) +svbool_t svcmpge(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_s16))) +svbool_t svcmpge(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u8))) +svbool_t svcmpge(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u32))) +svbool_t svcmpge(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u64))) +svbool_t svcmpge(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_n_u16))) +svbool_t svcmpge(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s8))) +svbool_t svcmpge(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s32))) +svbool_t svcmpge(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s64))) +svbool_t svcmpge(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_s16))) +svbool_t svcmpge(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_f64))) +svbool_t svcmpge(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_f32))) +svbool_t svcmpge(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_f16))) +svbool_t svcmpge(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u8))) +svbool_t svcmpge(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u32))) +svbool_t svcmpge(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u64))) +svbool_t svcmpge(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_u16))) +svbool_t svcmpge(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_s8))) +svbool_t svcmpge_wide(svbool_t, svint8_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_s32))) +svbool_t svcmpge_wide(svbool_t, svint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_s16))) +svbool_t svcmpge_wide(svbool_t, svint16_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_u8))) +svbool_t svcmpge_wide(svbool_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_u32))) +svbool_t svcmpge_wide(svbool_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_n_u16))) +svbool_t svcmpge_wide(svbool_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_s8))) +svbool_t svcmpge_wide(svbool_t, svint8_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_s32))) +svbool_t svcmpge_wide(svbool_t, svint32_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_s16))) +svbool_t svcmpge_wide(svbool_t, svint16_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_u8))) +svbool_t svcmpge_wide(svbool_t, svuint8_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_u32))) +svbool_t svcmpge_wide(svbool_t, svuint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpge_wide_u16))) +svbool_t svcmpge_wide(svbool_t, svuint16_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_f64))) +svbool_t svcmpgt(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_f32))) +svbool_t svcmpgt(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_f16))) +svbool_t svcmpgt(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s8))) +svbool_t svcmpgt(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s32))) +svbool_t svcmpgt(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s64))) +svbool_t svcmpgt(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_s16))) +svbool_t svcmpgt(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u8))) +svbool_t svcmpgt(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u32))) +svbool_t svcmpgt(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u64))) +svbool_t svcmpgt(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_n_u16))) +svbool_t svcmpgt(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s8))) +svbool_t svcmpgt(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s32))) +svbool_t svcmpgt(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s64))) +svbool_t svcmpgt(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_s16))) +svbool_t svcmpgt(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_f64))) +svbool_t svcmpgt(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_f32))) +svbool_t svcmpgt(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_f16))) +svbool_t svcmpgt(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u8))) +svbool_t svcmpgt(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u32))) +svbool_t svcmpgt(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u64))) +svbool_t svcmpgt(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_u16))) +svbool_t svcmpgt(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_s8))) +svbool_t svcmpgt_wide(svbool_t, svint8_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_s32))) +svbool_t svcmpgt_wide(svbool_t, svint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_s16))) +svbool_t svcmpgt_wide(svbool_t, svint16_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_u8))) +svbool_t svcmpgt_wide(svbool_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_u32))) +svbool_t svcmpgt_wide(svbool_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_n_u16))) +svbool_t svcmpgt_wide(svbool_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_s8))) +svbool_t svcmpgt_wide(svbool_t, svint8_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_s32))) +svbool_t svcmpgt_wide(svbool_t, svint32_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_s16))) +svbool_t svcmpgt_wide(svbool_t, svint16_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_u8))) +svbool_t svcmpgt_wide(svbool_t, svuint8_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_u32))) +svbool_t svcmpgt_wide(svbool_t, svuint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpgt_wide_u16))) +svbool_t svcmpgt_wide(svbool_t, svuint16_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_f64))) +svbool_t svcmple(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_f32))) +svbool_t svcmple(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_f16))) +svbool_t svcmple(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s8))) +svbool_t svcmple(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s32))) +svbool_t svcmple(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s64))) +svbool_t svcmple(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_s16))) +svbool_t svcmple(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u8))) +svbool_t svcmple(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u32))) +svbool_t svcmple(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u64))) +svbool_t svcmple(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_n_u16))) +svbool_t svcmple(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s8))) +svbool_t svcmple(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s32))) +svbool_t svcmple(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s64))) +svbool_t svcmple(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_s16))) +svbool_t svcmple(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_f64))) +svbool_t svcmple(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_f32))) +svbool_t svcmple(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_f16))) +svbool_t svcmple(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u8))) +svbool_t svcmple(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u32))) +svbool_t svcmple(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u64))) +svbool_t svcmple(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_u16))) +svbool_t svcmple(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_s8))) +svbool_t svcmple_wide(svbool_t, svint8_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_s32))) +svbool_t svcmple_wide(svbool_t, svint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_s16))) +svbool_t svcmple_wide(svbool_t, svint16_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_u8))) +svbool_t svcmple_wide(svbool_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_u32))) +svbool_t svcmple_wide(svbool_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_n_u16))) +svbool_t svcmple_wide(svbool_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_s8))) +svbool_t svcmple_wide(svbool_t, svint8_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_s32))) +svbool_t svcmple_wide(svbool_t, svint32_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_s16))) +svbool_t svcmple_wide(svbool_t, svint16_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_u8))) +svbool_t svcmple_wide(svbool_t, svuint8_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_u32))) +svbool_t svcmple_wide(svbool_t, svuint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmple_wide_u16))) +svbool_t svcmple_wide(svbool_t, svuint16_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u8))) +svbool_t svcmplt(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u32))) +svbool_t svcmplt(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u64))) +svbool_t svcmplt(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_u16))) +svbool_t svcmplt(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_f64))) +svbool_t svcmplt(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_f32))) +svbool_t svcmplt(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_f16))) +svbool_t svcmplt(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s8))) +svbool_t svcmplt(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s32))) +svbool_t svcmplt(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s64))) +svbool_t svcmplt(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_n_s16))) +svbool_t svcmplt(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u8))) +svbool_t svcmplt(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u32))) +svbool_t svcmplt(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u64))) +svbool_t svcmplt(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_u16))) +svbool_t svcmplt(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s8))) +svbool_t svcmplt(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s32))) +svbool_t svcmplt(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s64))) +svbool_t svcmplt(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_s16))) +svbool_t svcmplt(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_f64))) +svbool_t svcmplt(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_f32))) +svbool_t svcmplt(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_f16))) +svbool_t svcmplt(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_u8))) +svbool_t svcmplt_wide(svbool_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_u32))) +svbool_t svcmplt_wide(svbool_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_u16))) +svbool_t svcmplt_wide(svbool_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_s8))) +svbool_t svcmplt_wide(svbool_t, svint8_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_s32))) +svbool_t svcmplt_wide(svbool_t, svint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_n_s16))) +svbool_t svcmplt_wide(svbool_t, svint16_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_u8))) +svbool_t svcmplt_wide(svbool_t, svuint8_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_u32))) +svbool_t svcmplt_wide(svbool_t, svuint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_u16))) +svbool_t svcmplt_wide(svbool_t, svuint16_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_s8))) +svbool_t svcmplt_wide(svbool_t, svint8_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_s32))) +svbool_t svcmplt_wide(svbool_t, svint32_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmplt_wide_s16))) +svbool_t svcmplt_wide(svbool_t, svint16_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_f64))) +svbool_t svcmpne(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_f32))) +svbool_t svcmpne(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_f16))) +svbool_t svcmpne(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u8))) +svbool_t svcmpne(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u32))) +svbool_t svcmpne(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u64))) +svbool_t svcmpne(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_u16))) +svbool_t svcmpne(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s8))) +svbool_t svcmpne(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s32))) +svbool_t svcmpne(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s64))) +svbool_t svcmpne(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_n_s16))) +svbool_t svcmpne(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u8))) +svbool_t svcmpne(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u32))) +svbool_t svcmpne(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u64))) +svbool_t svcmpne(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_u16))) +svbool_t svcmpne(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s8))) +svbool_t svcmpne(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s32))) +svbool_t svcmpne(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s64))) +svbool_t svcmpne(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_s16))) +svbool_t svcmpne(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_f64))) +svbool_t svcmpne(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_f32))) +svbool_t svcmpne(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_f16))) +svbool_t svcmpne(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_n_s8))) +svbool_t svcmpne_wide(svbool_t, svint8_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_n_s32))) +svbool_t svcmpne_wide(svbool_t, svint32_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_n_s16))) +svbool_t svcmpne_wide(svbool_t, svint16_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_s8))) +svbool_t svcmpne_wide(svbool_t, svint8_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_s32))) +svbool_t svcmpne_wide(svbool_t, svint32_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpne_wide_s16))) +svbool_t svcmpne_wide(svbool_t, svint16_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_n_f64))) +svbool_t svcmpuo(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_n_f32))) +svbool_t svcmpuo(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_n_f16))) +svbool_t svcmpuo(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_f64))) +svbool_t svcmpuo(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_f32))) +svbool_t svcmpuo(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcmpuo_f16))) +svbool_t svcmpuo(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u8_m))) +svuint8_t svcnot_m(svuint8_t, svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u32_m))) +svuint32_t svcnot_m(svuint32_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u64_m))) +svuint64_t svcnot_m(svuint64_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u16_m))) +svuint16_t svcnot_m(svuint16_t, svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s8_m))) +svint8_t svcnot_m(svint8_t, svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s32_m))) +svint32_t svcnot_m(svint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s64_m))) +svint64_t svcnot_m(svint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s16_m))) +svint16_t svcnot_m(svint16_t, svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u8_x))) +svuint8_t svcnot_x(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u32_x))) +svuint32_t svcnot_x(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u64_x))) +svuint64_t svcnot_x(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u16_x))) +svuint16_t svcnot_x(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s8_x))) +svint8_t svcnot_x(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s32_x))) +svint32_t svcnot_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s64_x))) +svint64_t svcnot_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s16_x))) +svint16_t svcnot_x(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u8_z))) +svuint8_t svcnot_z(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u32_z))) +svuint32_t svcnot_z(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u64_z))) +svuint64_t svcnot_z(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_u16_z))) +svuint16_t svcnot_z(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s8_z))) +svint8_t svcnot_z(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s32_z))) +svint32_t svcnot_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s64_z))) +svint64_t svcnot_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnot_s16_z))) +svint16_t svcnot_z(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u8_m))) +svuint8_t svcnt_m(svuint8_t, svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u32_m))) +svuint32_t svcnt_m(svuint32_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u64_m))) +svuint64_t svcnt_m(svuint64_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u16_m))) +svuint16_t svcnt_m(svuint16_t, svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s8_m))) +svuint8_t svcnt_m(svuint8_t, svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f64_m))) +svuint64_t svcnt_m(svuint64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f32_m))) +svuint32_t svcnt_m(svuint32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f16_m))) +svuint16_t svcnt_m(svuint16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s32_m))) +svuint32_t svcnt_m(svuint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s64_m))) +svuint64_t svcnt_m(svuint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s16_m))) +svuint16_t svcnt_m(svuint16_t, svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u8_x))) +svuint8_t svcnt_x(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u32_x))) +svuint32_t svcnt_x(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u64_x))) +svuint64_t svcnt_x(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u16_x))) +svuint16_t svcnt_x(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s8_x))) +svuint8_t svcnt_x(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f64_x))) +svuint64_t svcnt_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f32_x))) +svuint32_t svcnt_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f16_x))) +svuint16_t svcnt_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s32_x))) +svuint32_t svcnt_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s64_x))) +svuint64_t svcnt_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s16_x))) +svuint16_t svcnt_x(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u8_z))) +svuint8_t svcnt_z(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u32_z))) +svuint32_t svcnt_z(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u64_z))) +svuint64_t svcnt_z(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_u16_z))) +svuint16_t svcnt_z(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s8_z))) +svuint8_t svcnt_z(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f64_z))) +svuint64_t svcnt_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f32_z))) +svuint32_t svcnt_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_f16_z))) +svuint16_t svcnt_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s32_z))) +svuint32_t svcnt_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s64_z))) +svuint64_t svcnt_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcnt_s16_z))) +svuint16_t svcnt_z(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u8))) +svuint8x2_t svcreate2(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u32))) +svuint32x2_t svcreate2(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u64))) +svuint64x2_t svcreate2(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_u16))) +svuint16x2_t svcreate2(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s8))) +svint8x2_t svcreate2(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_f64))) +svfloat64x2_t svcreate2(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_f32))) +svfloat32x2_t svcreate2(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_f16))) +svfloat16x2_t svcreate2(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s32))) +svint32x2_t svcreate2(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s64))) +svint64x2_t svcreate2(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate2_s16))) +svint16x2_t svcreate2(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u8))) +svuint8x3_t svcreate3(svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u32))) +svuint32x3_t svcreate3(svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u64))) +svuint64x3_t svcreate3(svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_u16))) +svuint16x3_t svcreate3(svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s8))) +svint8x3_t svcreate3(svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_f64))) +svfloat64x3_t svcreate3(svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_f32))) +svfloat32x3_t svcreate3(svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_f16))) +svfloat16x3_t svcreate3(svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s32))) +svint32x3_t svcreate3(svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s64))) +svint64x3_t svcreate3(svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate3_s16))) +svint16x3_t svcreate3(svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u8))) +svuint8x4_t svcreate4(svuint8_t, svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u32))) +svuint32x4_t svcreate4(svuint32_t, svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u64))) +svuint64x4_t svcreate4(svuint64_t, svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_u16))) +svuint16x4_t svcreate4(svuint16_t, svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s8))) +svint8x4_t svcreate4(svint8_t, svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_f64))) +svfloat64x4_t svcreate4(svfloat64_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_f32))) +svfloat32x4_t svcreate4(svfloat32_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_f16))) +svfloat16x4_t svcreate4(svfloat16_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s32))) +svint32x4_t svcreate4(svint32_t, svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s64))) +svint64x4_t svcreate4(svint64_t, svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcreate4_s16))) +svint16x4_t svcreate4(svint16_t, svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f32_m))) +svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f32_x))) +svfloat16_t svcvt_f16_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f32_z))) +svfloat16_t svcvt_f16_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f64_m))) +svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f64_x))) +svfloat16_t svcvt_f16_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_f64_z))) +svfloat16_t svcvt_f16_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s16_m))) +svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s16_x))) +svfloat16_t svcvt_f16_x(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s16_z))) +svfloat16_t svcvt_f16_z(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s32_m))) +svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s32_x))) +svfloat16_t svcvt_f16_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s32_z))) +svfloat16_t svcvt_f16_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s64_m))) +svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s64_x))) +svfloat16_t svcvt_f16_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_s64_z))) +svfloat16_t svcvt_f16_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u16_m))) +svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u16_x))) +svfloat16_t svcvt_f16_x(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u16_z))) +svfloat16_t svcvt_f16_z(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u32_m))) +svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u32_x))) +svfloat16_t svcvt_f16_x(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u32_z))) +svfloat16_t svcvt_f16_z(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u64_m))) +svfloat16_t svcvt_f16_m(svfloat16_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u64_x))) +svfloat16_t svcvt_f16_x(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f16_u64_z))) +svfloat16_t svcvt_f16_z(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f16_m))) +svfloat32_t svcvt_f32_m(svfloat32_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f16_x))) +svfloat32_t svcvt_f32_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f16_z))) +svfloat32_t svcvt_f32_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f64_m))) +svfloat32_t svcvt_f32_m(svfloat32_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f64_x))) +svfloat32_t svcvt_f32_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_f64_z))) +svfloat32_t svcvt_f32_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_m))) +svfloat32_t svcvt_f32_m(svfloat32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_x))) +svfloat32_t svcvt_f32_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s32_z))) +svfloat32_t svcvt_f32_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s64_m))) +svfloat32_t svcvt_f32_m(svfloat32_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s64_x))) +svfloat32_t svcvt_f32_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_s64_z))) +svfloat32_t svcvt_f32_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_m))) +svfloat32_t svcvt_f32_m(svfloat32_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_x))) +svfloat32_t svcvt_f32_x(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u32_z))) +svfloat32_t svcvt_f32_z(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u64_m))) +svfloat32_t svcvt_f32_m(svfloat32_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u64_x))) +svfloat32_t svcvt_f32_x(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f32_u64_z))) +svfloat32_t svcvt_f32_z(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f16_m))) +svfloat64_t svcvt_f64_m(svfloat64_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f16_x))) +svfloat64_t svcvt_f64_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f16_z))) +svfloat64_t svcvt_f64_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f32_m))) +svfloat64_t svcvt_f64_m(svfloat64_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f32_x))) +svfloat64_t svcvt_f64_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_f32_z))) +svfloat64_t svcvt_f64_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s32_m))) +svfloat64_t svcvt_f64_m(svfloat64_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s32_x))) +svfloat64_t svcvt_f64_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s32_z))) +svfloat64_t svcvt_f64_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s64_m))) +svfloat64_t svcvt_f64_m(svfloat64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s64_x))) +svfloat64_t svcvt_f64_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_s64_z))) +svfloat64_t svcvt_f64_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u32_m))) +svfloat64_t svcvt_f64_m(svfloat64_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u32_x))) +svfloat64_t svcvt_f64_x(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u32_z))) +svfloat64_t svcvt_f64_z(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u64_m))) +svfloat64_t svcvt_f64_m(svfloat64_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u64_x))) +svfloat64_t svcvt_f64_x(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_f64_u64_z))) +svfloat64_t svcvt_f64_z(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s16_f16_m))) +svint16_t svcvt_s16_m(svint16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s16_f16_x))) +svint16_t svcvt_s16_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s16_f16_z))) +svint16_t svcvt_s16_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f16_m))) +svint32_t svcvt_s32_m(svint32_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f16_x))) +svint32_t svcvt_s32_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f16_z))) +svint32_t svcvt_s32_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_m))) +svint32_t svcvt_s32_m(svint32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_x))) +svint32_t svcvt_s32_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f32_z))) +svint32_t svcvt_s32_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f64_m))) +svint32_t svcvt_s32_m(svint32_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f64_x))) +svint32_t svcvt_s32_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s32_f64_z))) +svint32_t svcvt_s32_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f16_m))) +svint64_t svcvt_s64_m(svint64_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f16_x))) +svint64_t svcvt_s64_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f16_z))) +svint64_t svcvt_s64_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f32_m))) +svint64_t svcvt_s64_m(svint64_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f32_x))) +svint64_t svcvt_s64_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f32_z))) +svint64_t svcvt_s64_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f64_m))) +svint64_t svcvt_s64_m(svint64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f64_x))) +svint64_t svcvt_s64_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_s64_f64_z))) +svint64_t svcvt_s64_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u16_f16_m))) +svuint16_t svcvt_u16_m(svuint16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u16_f16_x))) +svuint16_t svcvt_u16_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u16_f16_z))) +svuint16_t svcvt_u16_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f16_m))) +svuint32_t svcvt_u32_m(svuint32_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f16_x))) +svuint32_t svcvt_u32_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f16_z))) +svuint32_t svcvt_u32_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_m))) +svuint32_t svcvt_u32_m(svuint32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_x))) +svuint32_t svcvt_u32_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f32_z))) +svuint32_t svcvt_u32_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f64_m))) +svuint32_t svcvt_u32_m(svuint32_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f64_x))) +svuint32_t svcvt_u32_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u32_f64_z))) +svuint32_t svcvt_u32_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f16_m))) +svuint64_t svcvt_u64_m(svuint64_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f16_x))) +svuint64_t svcvt_u64_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f16_z))) +svuint64_t svcvt_u64_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f32_m))) +svuint64_t svcvt_u64_m(svuint64_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f32_x))) +svuint64_t svcvt_u64_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f32_z))) +svuint64_t svcvt_u64_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f64_m))) +svuint64_t svcvt_u64_m(svuint64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f64_x))) +svuint64_t svcvt_u64_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svcvt_u64_f64_z))) +svuint64_t svcvt_u64_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f64_m))) +svfloat64_t svdiv_m(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f32_m))) +svfloat32_t svdiv_m(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f16_m))) +svfloat16_t svdiv_m(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f64_x))) +svfloat64_t svdiv_x(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f32_x))) +svfloat32_t svdiv_x(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f16_x))) +svfloat16_t svdiv_x(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f64_z))) +svfloat64_t svdiv_z(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f32_z))) +svfloat32_t svdiv_z(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_f16_z))) +svfloat16_t svdiv_z(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s32_m))) +svint32_t svdiv_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s64_m))) +svint64_t svdiv_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s32_x))) +svint32_t svdiv_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s64_x))) +svint64_t svdiv_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s32_z))) +svint32_t svdiv_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_s64_z))) +svint64_t svdiv_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u32_m))) +svuint32_t svdiv_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u64_m))) +svuint64_t svdiv_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u32_x))) +svuint32_t svdiv_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u64_x))) +svuint64_t svdiv_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u32_z))) +svuint32_t svdiv_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_n_u64_z))) +svuint64_t svdiv_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f64_m))) +svfloat64_t svdiv_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f32_m))) +svfloat32_t svdiv_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f16_m))) +svfloat16_t svdiv_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f64_x))) +svfloat64_t svdiv_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f32_x))) +svfloat32_t svdiv_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f16_x))) +svfloat16_t svdiv_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f64_z))) +svfloat64_t svdiv_z(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f32_z))) +svfloat32_t svdiv_z(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_f16_z))) +svfloat16_t svdiv_z(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s32_m))) +svint32_t svdiv_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s64_m))) +svint64_t svdiv_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s32_x))) +svint32_t svdiv_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s64_x))) +svint64_t svdiv_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s32_z))) +svint32_t svdiv_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_s64_z))) +svint64_t svdiv_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u32_m))) +svuint32_t svdiv_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u64_m))) +svuint64_t svdiv_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u32_x))) +svuint32_t svdiv_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u64_x))) +svuint64_t svdiv_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u32_z))) +svuint32_t svdiv_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdiv_u64_z))) +svuint64_t svdiv_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f64_m))) +svfloat64_t svdivr_m(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f32_m))) +svfloat32_t svdivr_m(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f16_m))) +svfloat16_t svdivr_m(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f64_x))) +svfloat64_t svdivr_x(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f32_x))) +svfloat32_t svdivr_x(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f16_x))) +svfloat16_t svdivr_x(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f64_z))) +svfloat64_t svdivr_z(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f32_z))) +svfloat32_t svdivr_z(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_f16_z))) +svfloat16_t svdivr_z(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s32_m))) +svint32_t svdivr_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s64_m))) +svint64_t svdivr_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s32_x))) +svint32_t svdivr_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s64_x))) +svint64_t svdivr_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s32_z))) +svint32_t svdivr_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_s64_z))) +svint64_t svdivr_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u32_m))) +svuint32_t svdivr_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u64_m))) +svuint64_t svdivr_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u32_x))) +svuint32_t svdivr_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u64_x))) +svuint64_t svdivr_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u32_z))) +svuint32_t svdivr_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_n_u64_z))) +svuint64_t svdivr_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f64_m))) +svfloat64_t svdivr_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f32_m))) +svfloat32_t svdivr_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f16_m))) +svfloat16_t svdivr_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f64_x))) +svfloat64_t svdivr_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f32_x))) +svfloat32_t svdivr_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f16_x))) +svfloat16_t svdivr_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f64_z))) +svfloat64_t svdivr_z(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f32_z))) +svfloat32_t svdivr_z(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_f16_z))) +svfloat16_t svdivr_z(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s32_m))) +svint32_t svdivr_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s64_m))) +svint64_t svdivr_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s32_x))) +svint32_t svdivr_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s64_x))) +svint64_t svdivr_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s32_z))) +svint32_t svdivr_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_s64_z))) +svint64_t svdivr_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u32_m))) +svuint32_t svdivr_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u64_m))) +svuint64_t svdivr_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u32_x))) +svuint32_t svdivr_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u64_x))) +svuint64_t svdivr_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u32_z))) +svuint32_t svdivr_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdivr_u64_z))) +svuint64_t svdivr_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_s32))) +svint32_t svdot(svint32_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_s64))) +svint64_t svdot(svint64_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_u32))) +svuint32_t svdot(svuint32_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_n_u64))) +svuint64_t svdot(svuint64_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_s32))) +svint32_t svdot(svint32_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_s64))) +svint64_t svdot(svint64_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_u32))) +svuint32_t svdot(svuint32_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_u64))) +svuint64_t svdot(svuint64_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_s32))) +svint32_t svdot_lane(svint32_t, svint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_s64))) +svint64_t svdot_lane(svint64_t, svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_u32))) +svuint32_t svdot_lane(svuint32_t, svuint8_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdot_lane_u64))) +svuint64_t svdot_lane(svuint64_t, svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8))) +svuint8_t svdup_u8(uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32))) +svuint32_t svdup_u32(uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64))) +svuint64_t svdup_u64(uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16))) +svuint16_t svdup_u16(uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8))) +svint8_t svdup_s8(int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64))) +svfloat64_t svdup_f64(float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32))) +svfloat32_t svdup_f32(float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16))) +svfloat16_t svdup_f16(float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32))) +svint32_t svdup_s32(int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64))) +svint64_t svdup_s64(int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16))) +svint16_t svdup_s16(int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8_m))) +svuint8_t svdup_u8_m(svuint8_t, svbool_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32_m))) +svuint32_t svdup_u32_m(svuint32_t, svbool_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64_m))) +svuint64_t svdup_u64_m(svuint64_t, svbool_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16_m))) +svuint16_t svdup_u16_m(svuint16_t, svbool_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8_m))) +svint8_t svdup_s8_m(svint8_t, svbool_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64_m))) +svfloat64_t svdup_f64_m(svfloat64_t, svbool_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32_m))) +svfloat32_t svdup_f32_m(svfloat32_t, svbool_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16_m))) +svfloat16_t svdup_f16_m(svfloat16_t, svbool_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32_m))) +svint32_t svdup_s32_m(svint32_t, svbool_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64_m))) +svint64_t svdup_s64_m(svint64_t, svbool_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16_m))) +svint16_t svdup_s16_m(svint16_t, svbool_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b8))) +svbool_t svdup_b8(bool); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b32))) +svbool_t svdup_b32(bool); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b64))) +svbool_t svdup_b64(bool); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_b16))) +svbool_t svdup_b16(bool); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8_x))) +svuint8_t svdup_u8_x(svbool_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32_x))) +svuint32_t svdup_u32_x(svbool_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64_x))) +svuint64_t svdup_u64_x(svbool_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16_x))) +svuint16_t svdup_u16_x(svbool_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8_x))) +svint8_t svdup_s8_x(svbool_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64_x))) +svfloat64_t svdup_f64_x(svbool_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32_x))) +svfloat32_t svdup_f32_x(svbool_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16_x))) +svfloat16_t svdup_f16_x(svbool_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32_x))) +svint32_t svdup_s32_x(svbool_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64_x))) +svint64_t svdup_s64_x(svbool_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16_x))) +svint16_t svdup_s16_x(svbool_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u8_z))) +svuint8_t svdup_u8_z(svbool_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u32_z))) +svuint32_t svdup_u32_z(svbool_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u64_z))) +svuint64_t svdup_u64_z(svbool_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_u16_z))) +svuint16_t svdup_u16_z(svbool_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s8_z))) +svint8_t svdup_s8_z(svbool_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f64_z))) +svfloat64_t svdup_f64_z(svbool_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f32_z))) +svfloat32_t svdup_f32_z(svbool_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_f16_z))) +svfloat16_t svdup_f16_z(svbool_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s32_z))) +svint32_t svdup_s32_z(svbool_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s64_z))) +svint64_t svdup_s64_z(svbool_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_n_s16_z))) +svint16_t svdup_s16_z(svbool_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u8))) +svuint8_t svdup_lane(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u32))) +svuint32_t svdup_lane(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u64))) +svuint64_t svdup_lane(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_u16))) +svuint16_t svdup_lane(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s8))) +svint8_t svdup_lane(svint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_f64))) +svfloat64_t svdup_lane(svfloat64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_f32))) +svfloat32_t svdup_lane(svfloat32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_f16))) +svfloat16_t svdup_lane(svfloat16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s32))) +svint32_t svdup_lane(svint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s64))) +svint64_t svdup_lane(svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdup_lane_s16))) +svint16_t svdup_lane(svint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u8))) +svuint8_t svdupq_u8(uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s8))) +svint8_t svdupq_s8(int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u16))) +svuint16_t svdupq_u16(uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_f16))) +svfloat16_t svdupq_f16(float16_t, float16_t, float16_t, float16_t, float16_t, float16_t, float16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s16))) +svint16_t svdupq_s16(int16_t, int16_t, int16_t, int16_t, int16_t, int16_t, int16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u32))) +svuint32_t svdupq_u32(uint32_t, uint32_t, uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_f32))) +svfloat32_t svdupq_f32(float32_t, float32_t, float32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s32))) +svint32_t svdupq_s32(int32_t, int32_t, int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_u64))) +svuint64_t svdupq_u64(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_f64))) +svfloat64_t svdupq_f64(float64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_s64))) +svint64_t svdupq_s64(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b8))) +svbool_t svdupq_b8(bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool, bool); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b16))) +svbool_t svdupq_b16(bool, bool, bool, bool, bool, bool, bool, bool); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b32))) +svbool_t svdupq_b32(bool, bool, bool, bool); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_n_b64))) +svbool_t svdupq_b64(bool, bool); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u8))) +svuint8_t svdupq_lane(svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u32))) +svuint32_t svdupq_lane(svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u64))) +svuint64_t svdupq_lane(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_u16))) +svuint16_t svdupq_lane(svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s8))) +svint8_t svdupq_lane(svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_f64))) +svfloat64_t svdupq_lane(svfloat64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_f32))) +svfloat32_t svdupq_lane(svfloat32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_f16))) +svfloat16_t svdupq_lane(svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s32))) +svint32_t svdupq_lane(svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s64))) +svint64_t svdupq_lane(svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svdupq_lane_s16))) +svint16_t svdupq_lane(svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_b_z))) +svbool_t sveor_z(svbool_t, svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u8_m))) +svuint8_t sveor_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u32_m))) +svuint32_t sveor_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u64_m))) +svuint64_t sveor_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u16_m))) +svuint16_t sveor_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s8_m))) +svint8_t sveor_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s32_m))) +svint32_t sveor_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s64_m))) +svint64_t sveor_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s16_m))) +svint16_t sveor_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u8_x))) +svuint8_t sveor_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u32_x))) +svuint32_t sveor_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u64_x))) +svuint64_t sveor_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u16_x))) +svuint16_t sveor_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s8_x))) +svint8_t sveor_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s32_x))) +svint32_t sveor_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s64_x))) +svint64_t sveor_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s16_x))) +svint16_t sveor_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u8_z))) +svuint8_t sveor_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u32_z))) +svuint32_t sveor_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u64_z))) +svuint64_t sveor_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_u16_z))) +svuint16_t sveor_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s8_z))) +svint8_t sveor_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s32_z))) +svint32_t sveor_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s64_z))) +svint64_t sveor_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_n_s16_z))) +svint16_t sveor_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u8_m))) +svuint8_t sveor_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u32_m))) +svuint32_t sveor_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u64_m))) +svuint64_t sveor_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u16_m))) +svuint16_t sveor_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s8_m))) +svint8_t sveor_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s32_m))) +svint32_t sveor_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s64_m))) +svint64_t sveor_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s16_m))) +svint16_t sveor_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u8_x))) +svuint8_t sveor_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u32_x))) +svuint32_t sveor_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u64_x))) +svuint64_t sveor_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u16_x))) +svuint16_t sveor_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s8_x))) +svint8_t sveor_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s32_x))) +svint32_t sveor_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s64_x))) +svint64_t sveor_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s16_x))) +svint16_t sveor_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u8_z))) +svuint8_t sveor_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u32_z))) +svuint32_t sveor_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u64_z))) +svuint64_t sveor_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_u16_z))) +svuint16_t sveor_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s8_z))) +svint8_t sveor_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s32_z))) +svint32_t sveor_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s64_z))) +svint64_t sveor_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveor_s16_z))) +svint16_t sveor_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u8))) +uint8_t sveorv(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u32))) +uint32_t sveorv(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u64))) +uint64_t sveorv(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_u16))) +uint16_t sveorv(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s8))) +int8_t sveorv(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s32))) +int32_t sveorv(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s64))) +int64_t sveorv(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_sveorv_s16))) +int16_t sveorv(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u8))) +svuint8_t svext(svuint8_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u32))) +svuint32_t svext(svuint32_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u64))) +svuint64_t svext(svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_u16))) +svuint16_t svext(svuint16_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s8))) +svint8_t svext(svint8_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_f64))) +svfloat64_t svext(svfloat64_t, svfloat64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_f32))) +svfloat32_t svext(svfloat32_t, svfloat32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_f16))) +svfloat16_t svext(svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s32))) +svint32_t svext(svint32_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s64))) +svint64_t svext(svint64_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svext_s16))) +svint16_t svext(svint16_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s32_m))) +svint32_t svextb_m(svint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s64_m))) +svint64_t svextb_m(svint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s16_m))) +svint16_t svextb_m(svint16_t, svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s32_x))) +svint32_t svextb_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s64_x))) +svint64_t svextb_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s16_x))) +svint16_t svextb_x(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s32_z))) +svint32_t svextb_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s64_z))) +svint64_t svextb_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_s16_z))) +svint16_t svextb_z(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u32_m))) +svuint32_t svextb_m(svuint32_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u64_m))) +svuint64_t svextb_m(svuint64_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u16_m))) +svuint16_t svextb_m(svuint16_t, svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u32_x))) +svuint32_t svextb_x(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u64_x))) +svuint64_t svextb_x(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u16_x))) +svuint16_t svextb_x(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u32_z))) +svuint32_t svextb_z(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u64_z))) +svuint64_t svextb_z(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextb_u16_z))) +svuint16_t svextb_z(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s32_m))) +svint32_t svexth_m(svint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s64_m))) +svint64_t svexth_m(svint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s32_x))) +svint32_t svexth_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s64_x))) +svint64_t svexth_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s32_z))) +svint32_t svexth_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_s64_z))) +svint64_t svexth_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u32_m))) +svuint32_t svexth_m(svuint32_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u64_m))) +svuint64_t svexth_m(svuint64_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u32_x))) +svuint32_t svexth_x(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u64_x))) +svuint64_t svexth_x(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u32_z))) +svuint32_t svexth_z(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svexth_u64_z))) +svuint64_t svexth_z(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_s64_m))) +svint64_t svextw_m(svint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_s64_x))) +svint64_t svextw_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_s64_z))) +svint64_t svextw_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_u64_m))) +svuint64_t svextw_m(svuint64_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_u64_x))) +svuint64_t svextw_x(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svextw_u64_z))) +svuint64_t svextw_z(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u8))) +svuint8_t svget2(svuint8x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u32))) +svuint32_t svget2(svuint32x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u64))) +svuint64_t svget2(svuint64x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_u16))) +svuint16_t svget2(svuint16x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s8))) +svint8_t svget2(svint8x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_f64))) +svfloat64_t svget2(svfloat64x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_f32))) +svfloat32_t svget2(svfloat32x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_f16))) +svfloat16_t svget2(svfloat16x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s32))) +svint32_t svget2(svint32x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s64))) +svint64_t svget2(svint64x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget2_s16))) +svint16_t svget2(svint16x2_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u8))) +svuint8_t svget3(svuint8x3_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u32))) +svuint32_t svget3(svuint32x3_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u64))) +svuint64_t svget3(svuint64x3_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_u16))) +svuint16_t svget3(svuint16x3_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s8))) +svint8_t svget3(svint8x3_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_f64))) +svfloat64_t svget3(svfloat64x3_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_f32))) +svfloat32_t svget3(svfloat32x3_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_f16))) +svfloat16_t svget3(svfloat16x3_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s32))) +svint32_t svget3(svint32x3_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s64))) +svint64_t svget3(svint64x3_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget3_s16))) +svint16_t svget3(svint16x3_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u8))) +svuint8_t svget4(svuint8x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u32))) +svuint32_t svget4(svuint32x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u64))) +svuint64_t svget4(svuint64x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_u16))) +svuint16_t svget4(svuint16x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s8))) +svint8_t svget4(svint8x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_f64))) +svfloat64_t svget4(svfloat64x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_f32))) +svfloat32_t svget4(svfloat32x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_f16))) +svfloat16_t svget4(svfloat16x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s32))) +svint32_t svget4(svint32x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s64))) +svint64_t svget4(svint64x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svget4_s16))) +svint16_t svget4(svint16x4_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u8))) +svuint8_t svinsr(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u32))) +svuint32_t svinsr(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u64))) +svuint64_t svinsr(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_u16))) +svuint16_t svinsr(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s8))) +svint8_t svinsr(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_f64))) +svfloat64_t svinsr(svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_f32))) +svfloat32_t svinsr(svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_f16))) +svfloat16_t svinsr(svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s32))) +svint32_t svinsr(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s64))) +svint64_t svinsr(svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svinsr_n_s16))) +svint16_t svinsr(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u8))) +uint8_t svlasta(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u32))) +uint32_t svlasta(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u64))) +uint64_t svlasta(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_u16))) +uint16_t svlasta(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s8))) +int8_t svlasta(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_f64))) +float64_t svlasta(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_f32))) +float32_t svlasta(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_f16))) +float16_t svlasta(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s32))) +int32_t svlasta(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s64))) +int64_t svlasta(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlasta_s16))) +int16_t svlasta(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u8))) +uint8_t svlastb(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u32))) +uint32_t svlastb(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u64))) +uint64_t svlastb(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_u16))) +uint16_t svlastb(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s8))) +int8_t svlastb(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_f64))) +float64_t svlastb(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_f32))) +float32_t svlastb(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_f16))) +float16_t svlastb(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s32))) +int32_t svlastb(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s64))) +int64_t svlastb(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlastb_s16))) +int16_t svlastb(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u8))) +svuint8_t svld1(svbool_t, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u32))) +svuint32_t svld1(svbool_t, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u64))) +svuint64_t svld1(svbool_t, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_u16))) +svuint16_t svld1(svbool_t, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s8))) +svint8_t svld1(svbool_t, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f64))) +svfloat64_t svld1(svbool_t, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f32))) +svfloat32_t svld1(svbool_t, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_f16))) +svfloat16_t svld1(svbool_t, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s32))) +svint32_t svld1(svbool_t, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s64))) +svint64_t svld1(svbool_t, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_s16))) +svint16_t svld1(svbool_t, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u8))) +svuint8_t svld1_vnum(svbool_t, uint8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u32))) +svuint32_t svld1_vnum(svbool_t, uint32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u64))) +svuint64_t svld1_vnum(svbool_t, uint64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_u16))) +svuint16_t svld1_vnum(svbool_t, uint16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s8))) +svint8_t svld1_vnum(svbool_t, int8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f64))) +svfloat64_t svld1_vnum(svbool_t, float64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f32))) +svfloat32_t svld1_vnum(svbool_t, float32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_f16))) +svfloat16_t svld1_vnum(svbool_t, float16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s32))) +svint32_t svld1_vnum(svbool_t, int32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s64))) +svint64_t svld1_vnum(svbool_t, int64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1_vnum_s16))) +svint16_t svld1_vnum(svbool_t, int16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u8))) +svuint8_t svld1rq(svbool_t, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u32))) +svuint32_t svld1rq(svbool_t, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u64))) +svuint64_t svld1rq(svbool_t, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_u16))) +svuint16_t svld1rq(svbool_t, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s8))) +svint8_t svld1rq(svbool_t, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_f64))) +svfloat64_t svld1rq(svbool_t, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_f32))) +svfloat32_t svld1rq(svbool_t, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_f16))) +svfloat16_t svld1rq(svbool_t, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s32))) +svint32_t svld1rq(svbool_t, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s64))) +svint64_t svld1rq(svbool_t, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld1rq_s16))) +svint16_t svld1rq(svbool_t, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u8))) +svuint8x2_t svld2(svbool_t, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u32))) +svuint32x2_t svld2(svbool_t, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u64))) +svuint64x2_t svld2(svbool_t, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_u16))) +svuint16x2_t svld2(svbool_t, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s8))) +svint8x2_t svld2(svbool_t, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_f64))) +svfloat64x2_t svld2(svbool_t, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_f32))) +svfloat32x2_t svld2(svbool_t, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_f16))) +svfloat16x2_t svld2(svbool_t, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s32))) +svint32x2_t svld2(svbool_t, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s64))) +svint64x2_t svld2(svbool_t, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_s16))) +svint16x2_t svld2(svbool_t, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u8))) +svuint8x2_t svld2_vnum(svbool_t, uint8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u32))) +svuint32x2_t svld2_vnum(svbool_t, uint32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u64))) +svuint64x2_t svld2_vnum(svbool_t, uint64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_u16))) +svuint16x2_t svld2_vnum(svbool_t, uint16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s8))) +svint8x2_t svld2_vnum(svbool_t, int8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_f64))) +svfloat64x2_t svld2_vnum(svbool_t, float64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_f32))) +svfloat32x2_t svld2_vnum(svbool_t, float32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_f16))) +svfloat16x2_t svld2_vnum(svbool_t, float16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s32))) +svint32x2_t svld2_vnum(svbool_t, int32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s64))) +svint64x2_t svld2_vnum(svbool_t, int64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld2_vnum_s16))) +svint16x2_t svld2_vnum(svbool_t, int16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u8))) +svuint8x3_t svld3(svbool_t, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u32))) +svuint32x3_t svld3(svbool_t, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u64))) +svuint64x3_t svld3(svbool_t, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_u16))) +svuint16x3_t svld3(svbool_t, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s8))) +svint8x3_t svld3(svbool_t, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_f64))) +svfloat64x3_t svld3(svbool_t, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_f32))) +svfloat32x3_t svld3(svbool_t, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_f16))) +svfloat16x3_t svld3(svbool_t, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s32))) +svint32x3_t svld3(svbool_t, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s64))) +svint64x3_t svld3(svbool_t, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_s16))) +svint16x3_t svld3(svbool_t, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u8))) +svuint8x3_t svld3_vnum(svbool_t, uint8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u32))) +svuint32x3_t svld3_vnum(svbool_t, uint32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u64))) +svuint64x3_t svld3_vnum(svbool_t, uint64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_u16))) +svuint16x3_t svld3_vnum(svbool_t, uint16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s8))) +svint8x3_t svld3_vnum(svbool_t, int8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_f64))) +svfloat64x3_t svld3_vnum(svbool_t, float64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_f32))) +svfloat32x3_t svld3_vnum(svbool_t, float32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_f16))) +svfloat16x3_t svld3_vnum(svbool_t, float16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s32))) +svint32x3_t svld3_vnum(svbool_t, int32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s64))) +svint64x3_t svld3_vnum(svbool_t, int64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld3_vnum_s16))) +svint16x3_t svld3_vnum(svbool_t, int16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u8))) +svuint8x4_t svld4(svbool_t, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u32))) +svuint32x4_t svld4(svbool_t, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u64))) +svuint64x4_t svld4(svbool_t, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_u16))) +svuint16x4_t svld4(svbool_t, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s8))) +svint8x4_t svld4(svbool_t, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_f64))) +svfloat64x4_t svld4(svbool_t, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_f32))) +svfloat32x4_t svld4(svbool_t, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_f16))) +svfloat16x4_t svld4(svbool_t, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s32))) +svint32x4_t svld4(svbool_t, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s64))) +svint64x4_t svld4(svbool_t, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_s16))) +svint16x4_t svld4(svbool_t, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u8))) +svuint8x4_t svld4_vnum(svbool_t, uint8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u32))) +svuint32x4_t svld4_vnum(svbool_t, uint32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u64))) +svuint64x4_t svld4_vnum(svbool_t, uint64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_u16))) +svuint16x4_t svld4_vnum(svbool_t, uint16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s8))) +svint8x4_t svld4_vnum(svbool_t, int8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_f64))) +svfloat64x4_t svld4_vnum(svbool_t, float64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_f32))) +svfloat32x4_t svld4_vnum(svbool_t, float32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_f16))) +svfloat16x4_t svld4_vnum(svbool_t, float16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s32))) +svint32x4_t svld4_vnum(svbool_t, int32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s64))) +svint64x4_t svld4_vnum(svbool_t, int64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svld4_vnum_s16))) +svint16x4_t svld4_vnum(svbool_t, int16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u8))) +svuint8_t svldnt1(svbool_t, uint8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u32))) +svuint32_t svldnt1(svbool_t, uint32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u64))) +svuint64_t svldnt1(svbool_t, uint64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_u16))) +svuint16_t svldnt1(svbool_t, uint16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s8))) +svint8_t svldnt1(svbool_t, int8_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f64))) +svfloat64_t svldnt1(svbool_t, float64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f32))) +svfloat32_t svldnt1(svbool_t, float32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_f16))) +svfloat16_t svldnt1(svbool_t, float16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s32))) +svint32_t svldnt1(svbool_t, int32_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s64))) +svint64_t svldnt1(svbool_t, int64_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_s16))) +svint16_t svldnt1(svbool_t, int16_t const *); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u8))) +svuint8_t svldnt1_vnum(svbool_t, uint8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u32))) +svuint32_t svldnt1_vnum(svbool_t, uint32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u64))) +svuint64_t svldnt1_vnum(svbool_t, uint64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_u16))) +svuint16_t svldnt1_vnum(svbool_t, uint16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s8))) +svint8_t svldnt1_vnum(svbool_t, int8_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f64))) +svfloat64_t svldnt1_vnum(svbool_t, float64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f32))) +svfloat32_t svldnt1_vnum(svbool_t, float32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_f16))) +svfloat16_t svldnt1_vnum(svbool_t, float16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s32))) +svint32_t svldnt1_vnum(svbool_t, int32_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s64))) +svint64_t svldnt1_vnum(svbool_t, int64_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svldnt1_vnum_s16))) +svint16_t svldnt1_vnum(svbool_t, int16_t const *, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u8))) +uint64_t svlen(svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u32))) +uint64_t svlen(svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u64))) +uint64_t svlen(svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_u16))) +uint64_t svlen(svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s8))) +uint64_t svlen(svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_f64))) +uint64_t svlen(svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_f32))) +uint64_t svlen(svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_f16))) +uint64_t svlen(svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s32))) +uint64_t svlen(svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s64))) +uint64_t svlen(svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlen_s16))) +uint64_t svlen(svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u8_m))) +svuint8_t svlsl_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u32_m))) +svuint32_t svlsl_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u64_m))) +svuint64_t svlsl_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u16_m))) +svuint16_t svlsl_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s8_m))) +svint8_t svlsl_m(svbool_t, svint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s32_m))) +svint32_t svlsl_m(svbool_t, svint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s64_m))) +svint64_t svlsl_m(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s16_m))) +svint16_t svlsl_m(svbool_t, svint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u8_x))) +svuint8_t svlsl_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u32_x))) +svuint32_t svlsl_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u64_x))) +svuint64_t svlsl_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u16_x))) +svuint16_t svlsl_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s8_x))) +svint8_t svlsl_x(svbool_t, svint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s32_x))) +svint32_t svlsl_x(svbool_t, svint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s64_x))) +svint64_t svlsl_x(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s16_x))) +svint16_t svlsl_x(svbool_t, svint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u8_z))) +svuint8_t svlsl_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u32_z))) +svuint32_t svlsl_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u64_z))) +svuint64_t svlsl_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_u16_z))) +svuint16_t svlsl_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s8_z))) +svint8_t svlsl_z(svbool_t, svint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s32_z))) +svint32_t svlsl_z(svbool_t, svint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s64_z))) +svint64_t svlsl_z(svbool_t, svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_n_s16_z))) +svint16_t svlsl_z(svbool_t, svint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u8_m))) +svuint8_t svlsl_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u32_m))) +svuint32_t svlsl_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u64_m))) +svuint64_t svlsl_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u16_m))) +svuint16_t svlsl_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s8_m))) +svint8_t svlsl_m(svbool_t, svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s32_m))) +svint32_t svlsl_m(svbool_t, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s64_m))) +svint64_t svlsl_m(svbool_t, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s16_m))) +svint16_t svlsl_m(svbool_t, svint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u8_x))) +svuint8_t svlsl_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u32_x))) +svuint32_t svlsl_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u64_x))) +svuint64_t svlsl_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u16_x))) +svuint16_t svlsl_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s8_x))) +svint8_t svlsl_x(svbool_t, svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s32_x))) +svint32_t svlsl_x(svbool_t, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s64_x))) +svint64_t svlsl_x(svbool_t, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s16_x))) +svint16_t svlsl_x(svbool_t, svint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u8_z))) +svuint8_t svlsl_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u32_z))) +svuint32_t svlsl_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u64_z))) +svuint64_t svlsl_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_u16_z))) +svuint16_t svlsl_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s8_z))) +svint8_t svlsl_z(svbool_t, svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s32_z))) +svint32_t svlsl_z(svbool_t, svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s64_z))) +svint64_t svlsl_z(svbool_t, svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_s16_z))) +svint16_t svlsl_z(svbool_t, svint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u8_m))) +svuint8_t svlsl_wide_m(svbool_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u32_m))) +svuint32_t svlsl_wide_m(svbool_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u16_m))) +svuint16_t svlsl_wide_m(svbool_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s8_m))) +svint8_t svlsl_wide_m(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s32_m))) +svint32_t svlsl_wide_m(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s16_m))) +svint16_t svlsl_wide_m(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u8_x))) +svuint8_t svlsl_wide_x(svbool_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u32_x))) +svuint32_t svlsl_wide_x(svbool_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u16_x))) +svuint16_t svlsl_wide_x(svbool_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s8_x))) +svint8_t svlsl_wide_x(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s32_x))) +svint32_t svlsl_wide_x(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s16_x))) +svint16_t svlsl_wide_x(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u8_z))) +svuint8_t svlsl_wide_z(svbool_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u32_z))) +svuint32_t svlsl_wide_z(svbool_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_u16_z))) +svuint16_t svlsl_wide_z(svbool_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s8_z))) +svint8_t svlsl_wide_z(svbool_t, svint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s32_z))) +svint32_t svlsl_wide_z(svbool_t, svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_n_s16_z))) +svint16_t svlsl_wide_z(svbool_t, svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u8_m))) +svuint8_t svlsl_wide_m(svbool_t, svuint8_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u32_m))) +svuint32_t svlsl_wide_m(svbool_t, svuint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u16_m))) +svuint16_t svlsl_wide_m(svbool_t, svuint16_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s8_m))) +svint8_t svlsl_wide_m(svbool_t, svint8_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s32_m))) +svint32_t svlsl_wide_m(svbool_t, svint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s16_m))) +svint16_t svlsl_wide_m(svbool_t, svint16_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u8_x))) +svuint8_t svlsl_wide_x(svbool_t, svuint8_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u32_x))) +svuint32_t svlsl_wide_x(svbool_t, svuint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u16_x))) +svuint16_t svlsl_wide_x(svbool_t, svuint16_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s8_x))) +svint8_t svlsl_wide_x(svbool_t, svint8_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s32_x))) +svint32_t svlsl_wide_x(svbool_t, svint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s16_x))) +svint16_t svlsl_wide_x(svbool_t, svint16_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u8_z))) +svuint8_t svlsl_wide_z(svbool_t, svuint8_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u32_z))) +svuint32_t svlsl_wide_z(svbool_t, svuint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_u16_z))) +svuint16_t svlsl_wide_z(svbool_t, svuint16_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s8_z))) +svint8_t svlsl_wide_z(svbool_t, svint8_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s32_z))) +svint32_t svlsl_wide_z(svbool_t, svint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsl_wide_s16_z))) +svint16_t svlsl_wide_z(svbool_t, svint16_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u8_m))) +svuint8_t svlsr_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u32_m))) +svuint32_t svlsr_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u64_m))) +svuint64_t svlsr_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u16_m))) +svuint16_t svlsr_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u8_x))) +svuint8_t svlsr_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u32_x))) +svuint32_t svlsr_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u64_x))) +svuint64_t svlsr_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u16_x))) +svuint16_t svlsr_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u8_z))) +svuint8_t svlsr_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u32_z))) +svuint32_t svlsr_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u64_z))) +svuint64_t svlsr_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_n_u16_z))) +svuint16_t svlsr_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u8_m))) +svuint8_t svlsr_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u32_m))) +svuint32_t svlsr_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u64_m))) +svuint64_t svlsr_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u16_m))) +svuint16_t svlsr_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u8_x))) +svuint8_t svlsr_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u32_x))) +svuint32_t svlsr_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u64_x))) +svuint64_t svlsr_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u16_x))) +svuint16_t svlsr_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u8_z))) +svuint8_t svlsr_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u32_z))) +svuint32_t svlsr_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u64_z))) +svuint64_t svlsr_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_u16_z))) +svuint16_t svlsr_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u8_m))) +svuint8_t svlsr_wide_m(svbool_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u32_m))) +svuint32_t svlsr_wide_m(svbool_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u16_m))) +svuint16_t svlsr_wide_m(svbool_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u8_x))) +svuint8_t svlsr_wide_x(svbool_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u32_x))) +svuint32_t svlsr_wide_x(svbool_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u16_x))) +svuint16_t svlsr_wide_x(svbool_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u8_z))) +svuint8_t svlsr_wide_z(svbool_t, svuint8_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u32_z))) +svuint32_t svlsr_wide_z(svbool_t, svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_n_u16_z))) +svuint16_t svlsr_wide_z(svbool_t, svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u8_m))) +svuint8_t svlsr_wide_m(svbool_t, svuint8_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u32_m))) +svuint32_t svlsr_wide_m(svbool_t, svuint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u16_m))) +svuint16_t svlsr_wide_m(svbool_t, svuint16_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u8_x))) +svuint8_t svlsr_wide_x(svbool_t, svuint8_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u32_x))) +svuint32_t svlsr_wide_x(svbool_t, svuint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u16_x))) +svuint16_t svlsr_wide_x(svbool_t, svuint16_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u8_z))) +svuint8_t svlsr_wide_z(svbool_t, svuint8_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u32_z))) +svuint32_t svlsr_wide_z(svbool_t, svuint32_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svlsr_wide_u16_z))) +svuint16_t svlsr_wide_z(svbool_t, svuint16_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f64_m))) +svfloat64_t svmad_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f32_m))) +svfloat32_t svmad_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f16_m))) +svfloat16_t svmad_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f64_x))) +svfloat64_t svmad_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f32_x))) +svfloat32_t svmad_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f16_x))) +svfloat16_t svmad_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f64_z))) +svfloat64_t svmad_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f32_z))) +svfloat32_t svmad_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_f16_z))) +svfloat16_t svmad_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u8_m))) +svuint8_t svmad_m(svbool_t, svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u32_m))) +svuint32_t svmad_m(svbool_t, svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u64_m))) +svuint64_t svmad_m(svbool_t, svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u16_m))) +svuint16_t svmad_m(svbool_t, svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s8_m))) +svint8_t svmad_m(svbool_t, svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s32_m))) +svint32_t svmad_m(svbool_t, svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s64_m))) +svint64_t svmad_m(svbool_t, svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s16_m))) +svint16_t svmad_m(svbool_t, svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u8_x))) +svuint8_t svmad_x(svbool_t, svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u32_x))) +svuint32_t svmad_x(svbool_t, svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u64_x))) +svuint64_t svmad_x(svbool_t, svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u16_x))) +svuint16_t svmad_x(svbool_t, svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s8_x))) +svint8_t svmad_x(svbool_t, svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s32_x))) +svint32_t svmad_x(svbool_t, svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s64_x))) +svint64_t svmad_x(svbool_t, svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s16_x))) +svint16_t svmad_x(svbool_t, svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u8_z))) +svuint8_t svmad_z(svbool_t, svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u32_z))) +svuint32_t svmad_z(svbool_t, svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u64_z))) +svuint64_t svmad_z(svbool_t, svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_u16_z))) +svuint16_t svmad_z(svbool_t, svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s8_z))) +svint8_t svmad_z(svbool_t, svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s32_z))) +svint32_t svmad_z(svbool_t, svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s64_z))) +svint64_t svmad_z(svbool_t, svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_n_s16_z))) +svint16_t svmad_z(svbool_t, svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f64_m))) +svfloat64_t svmad_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f32_m))) +svfloat32_t svmad_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f16_m))) +svfloat16_t svmad_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f64_x))) +svfloat64_t svmad_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f32_x))) +svfloat32_t svmad_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f16_x))) +svfloat16_t svmad_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f64_z))) +svfloat64_t svmad_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f32_z))) +svfloat32_t svmad_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_f16_z))) +svfloat16_t svmad_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u8_m))) +svuint8_t svmad_m(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u32_m))) +svuint32_t svmad_m(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u64_m))) +svuint64_t svmad_m(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u16_m))) +svuint16_t svmad_m(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s8_m))) +svint8_t svmad_m(svbool_t, svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s32_m))) +svint32_t svmad_m(svbool_t, svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s64_m))) +svint64_t svmad_m(svbool_t, svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s16_m))) +svint16_t svmad_m(svbool_t, svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u8_x))) +svuint8_t svmad_x(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u32_x))) +svuint32_t svmad_x(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u64_x))) +svuint64_t svmad_x(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u16_x))) +svuint16_t svmad_x(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s8_x))) +svint8_t svmad_x(svbool_t, svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s32_x))) +svint32_t svmad_x(svbool_t, svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s64_x))) +svint64_t svmad_x(svbool_t, svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s16_x))) +svint16_t svmad_x(svbool_t, svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u8_z))) +svuint8_t svmad_z(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u32_z))) +svuint32_t svmad_z(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u64_z))) +svuint64_t svmad_z(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_u16_z))) +svuint16_t svmad_z(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s8_z))) +svint8_t svmad_z(svbool_t, svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s32_z))) +svint32_t svmad_z(svbool_t, svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s64_z))) +svint64_t svmad_z(svbool_t, svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmad_s16_z))) +svint16_t svmad_z(svbool_t, svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f64_m))) +svfloat64_t svmax_m(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f32_m))) +svfloat32_t svmax_m(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f16_m))) +svfloat16_t svmax_m(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f64_x))) +svfloat64_t svmax_x(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f32_x))) +svfloat32_t svmax_x(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f16_x))) +svfloat16_t svmax_x(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f64_z))) +svfloat64_t svmax_z(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f32_z))) +svfloat32_t svmax_z(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_f16_z))) +svfloat16_t svmax_z(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s8_m))) +svint8_t svmax_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s32_m))) +svint32_t svmax_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s64_m))) +svint64_t svmax_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s16_m))) +svint16_t svmax_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s8_x))) +svint8_t svmax_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s32_x))) +svint32_t svmax_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s64_x))) +svint64_t svmax_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s16_x))) +svint16_t svmax_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s8_z))) +svint8_t svmax_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s32_z))) +svint32_t svmax_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s64_z))) +svint64_t svmax_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_s16_z))) +svint16_t svmax_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u8_m))) +svuint8_t svmax_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u32_m))) +svuint32_t svmax_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u64_m))) +svuint64_t svmax_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u16_m))) +svuint16_t svmax_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u8_x))) +svuint8_t svmax_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u32_x))) +svuint32_t svmax_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u64_x))) +svuint64_t svmax_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u16_x))) +svuint16_t svmax_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u8_z))) +svuint8_t svmax_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u32_z))) +svuint32_t svmax_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u64_z))) +svuint64_t svmax_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_n_u16_z))) +svuint16_t svmax_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_m))) +svfloat64_t svmax_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_m))) +svfloat32_t svmax_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_m))) +svfloat16_t svmax_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_x))) +svfloat64_t svmax_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_x))) +svfloat32_t svmax_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_x))) +svfloat16_t svmax_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f64_z))) +svfloat64_t svmax_z(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f32_z))) +svfloat32_t svmax_z(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_f16_z))) +svfloat16_t svmax_z(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_m))) +svint8_t svmax_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_m))) +svint32_t svmax_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_m))) +svint64_t svmax_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_m))) +svint16_t svmax_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_x))) +svint8_t svmax_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_x))) +svint32_t svmax_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_x))) +svint64_t svmax_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_x))) +svint16_t svmax_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s8_z))) +svint8_t svmax_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s32_z))) +svint32_t svmax_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s64_z))) +svint64_t svmax_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_s16_z))) +svint16_t svmax_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_m))) +svuint8_t svmax_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_m))) +svuint32_t svmax_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_m))) +svuint64_t svmax_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_m))) +svuint16_t svmax_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_x))) +svuint8_t svmax_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_x))) +svuint32_t svmax_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_x))) +svuint64_t svmax_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_x))) +svuint16_t svmax_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u8_z))) +svuint8_t svmax_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u32_z))) +svuint32_t svmax_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u64_z))) +svuint64_t svmax_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmax_u16_z))) +svuint16_t svmax_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f64_m))) +svfloat64_t svmaxnm_m(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f32_m))) +svfloat32_t svmaxnm_m(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f16_m))) +svfloat16_t svmaxnm_m(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f64_x))) +svfloat64_t svmaxnm_x(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f32_x))) +svfloat32_t svmaxnm_x(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f16_x))) +svfloat16_t svmaxnm_x(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f64_z))) +svfloat64_t svmaxnm_z(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f32_z))) +svfloat32_t svmaxnm_z(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_n_f16_z))) +svfloat16_t svmaxnm_z(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_m))) +svfloat64_t svmaxnm_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_m))) +svfloat32_t svmaxnm_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_m))) +svfloat16_t svmaxnm_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_x))) +svfloat64_t svmaxnm_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_x))) +svfloat32_t svmaxnm_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_x))) +svfloat16_t svmaxnm_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f64_z))) +svfloat64_t svmaxnm_z(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f32_z))) +svfloat32_t svmaxnm_z(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnm_f16_z))) +svfloat16_t svmaxnm_z(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmv_f64))) +float64_t svmaxnmv(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmv_f32))) +float32_t svmaxnmv(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxnmv_f16))) +float16_t svmaxnmv(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_f64))) +float64_t svmaxv(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_f32))) +float32_t svmaxv(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_f16))) +float16_t svmaxv(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s8))) +int8_t svmaxv(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s32))) +int32_t svmaxv(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s64))) +int64_t svmaxv(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_s16))) +int16_t svmaxv(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u8))) +uint8_t svmaxv(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u32))) +uint32_t svmaxv(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u64))) +uint64_t svmaxv(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmaxv_u16))) +uint16_t svmaxv(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f64_m))) +svfloat64_t svmin_m(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f32_m))) +svfloat32_t svmin_m(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f16_m))) +svfloat16_t svmin_m(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f64_x))) +svfloat64_t svmin_x(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f32_x))) +svfloat32_t svmin_x(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f16_x))) +svfloat16_t svmin_x(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f64_z))) +svfloat64_t svmin_z(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f32_z))) +svfloat32_t svmin_z(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_f16_z))) +svfloat16_t svmin_z(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s8_m))) +svint8_t svmin_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s32_m))) +svint32_t svmin_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s64_m))) +svint64_t svmin_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s16_m))) +svint16_t svmin_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s8_x))) +svint8_t svmin_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s32_x))) +svint32_t svmin_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s64_x))) +svint64_t svmin_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s16_x))) +svint16_t svmin_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s8_z))) +svint8_t svmin_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s32_z))) +svint32_t svmin_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s64_z))) +svint64_t svmin_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_s16_z))) +svint16_t svmin_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u8_m))) +svuint8_t svmin_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u32_m))) +svuint32_t svmin_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u64_m))) +svuint64_t svmin_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u16_m))) +svuint16_t svmin_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u8_x))) +svuint8_t svmin_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u32_x))) +svuint32_t svmin_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u64_x))) +svuint64_t svmin_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u16_x))) +svuint16_t svmin_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u8_z))) +svuint8_t svmin_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u32_z))) +svuint32_t svmin_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u64_z))) +svuint64_t svmin_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_n_u16_z))) +svuint16_t svmin_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_m))) +svfloat64_t svmin_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_m))) +svfloat32_t svmin_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_m))) +svfloat16_t svmin_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_x))) +svfloat64_t svmin_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_x))) +svfloat32_t svmin_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_x))) +svfloat16_t svmin_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f64_z))) +svfloat64_t svmin_z(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f32_z))) +svfloat32_t svmin_z(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_f16_z))) +svfloat16_t svmin_z(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_m))) +svint8_t svmin_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_m))) +svint32_t svmin_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_m))) +svint64_t svmin_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_m))) +svint16_t svmin_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_x))) +svint8_t svmin_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_x))) +svint32_t svmin_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_x))) +svint64_t svmin_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_x))) +svint16_t svmin_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s8_z))) +svint8_t svmin_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s32_z))) +svint32_t svmin_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s64_z))) +svint64_t svmin_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_s16_z))) +svint16_t svmin_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_m))) +svuint8_t svmin_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_m))) +svuint32_t svmin_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_m))) +svuint64_t svmin_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_m))) +svuint16_t svmin_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_x))) +svuint8_t svmin_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_x))) +svuint32_t svmin_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_x))) +svuint64_t svmin_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_x))) +svuint16_t svmin_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u8_z))) +svuint8_t svmin_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u32_z))) +svuint32_t svmin_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u64_z))) +svuint64_t svmin_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmin_u16_z))) +svuint16_t svmin_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f64_m))) +svfloat64_t svminnm_m(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f32_m))) +svfloat32_t svminnm_m(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f16_m))) +svfloat16_t svminnm_m(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f64_x))) +svfloat64_t svminnm_x(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f32_x))) +svfloat32_t svminnm_x(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f16_x))) +svfloat16_t svminnm_x(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f64_z))) +svfloat64_t svminnm_z(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f32_z))) +svfloat32_t svminnm_z(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_n_f16_z))) +svfloat16_t svminnm_z(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_m))) +svfloat64_t svminnm_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_m))) +svfloat32_t svminnm_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_m))) +svfloat16_t svminnm_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_x))) +svfloat64_t svminnm_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_x))) +svfloat32_t svminnm_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_x))) +svfloat16_t svminnm_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f64_z))) +svfloat64_t svminnm_z(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f32_z))) +svfloat32_t svminnm_z(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnm_f16_z))) +svfloat16_t svminnm_z(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmv_f64))) +float64_t svminnmv(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmv_f32))) +float32_t svminnmv(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminnmv_f16))) +float16_t svminnmv(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_f64))) +float64_t svminv(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_f32))) +float32_t svminv(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_f16))) +float16_t svminv(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s8))) +int8_t svminv(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s32))) +int32_t svminv(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s64))) +int64_t svminv(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_s16))) +int16_t svminv(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u8))) +uint8_t svminv(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u32))) +uint32_t svminv(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u64))) +uint64_t svminv(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svminv_u16))) +uint16_t svminv(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f64_m))) +svfloat64_t svmla_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f32_m))) +svfloat32_t svmla_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f16_m))) +svfloat16_t svmla_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f64_x))) +svfloat64_t svmla_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f32_x))) +svfloat32_t svmla_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f16_x))) +svfloat16_t svmla_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f64_z))) +svfloat64_t svmla_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f32_z))) +svfloat32_t svmla_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_f16_z))) +svfloat16_t svmla_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u8_m))) +svuint8_t svmla_m(svbool_t, svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u32_m))) +svuint32_t svmla_m(svbool_t, svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u64_m))) +svuint64_t svmla_m(svbool_t, svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u16_m))) +svuint16_t svmla_m(svbool_t, svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s8_m))) +svint8_t svmla_m(svbool_t, svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s32_m))) +svint32_t svmla_m(svbool_t, svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s64_m))) +svint64_t svmla_m(svbool_t, svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s16_m))) +svint16_t svmla_m(svbool_t, svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u8_x))) +svuint8_t svmla_x(svbool_t, svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u32_x))) +svuint32_t svmla_x(svbool_t, svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u64_x))) +svuint64_t svmla_x(svbool_t, svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u16_x))) +svuint16_t svmla_x(svbool_t, svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s8_x))) +svint8_t svmla_x(svbool_t, svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s32_x))) +svint32_t svmla_x(svbool_t, svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s64_x))) +svint64_t svmla_x(svbool_t, svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s16_x))) +svint16_t svmla_x(svbool_t, svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u8_z))) +svuint8_t svmla_z(svbool_t, svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u32_z))) +svuint32_t svmla_z(svbool_t, svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u64_z))) +svuint64_t svmla_z(svbool_t, svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_u16_z))) +svuint16_t svmla_z(svbool_t, svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s8_z))) +svint8_t svmla_z(svbool_t, svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s32_z))) +svint32_t svmla_z(svbool_t, svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s64_z))) +svint64_t svmla_z(svbool_t, svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_n_s16_z))) +svint16_t svmla_z(svbool_t, svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f64_m))) +svfloat64_t svmla_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f32_m))) +svfloat32_t svmla_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f16_m))) +svfloat16_t svmla_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f64_x))) +svfloat64_t svmla_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f32_x))) +svfloat32_t svmla_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f16_x))) +svfloat16_t svmla_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f64_z))) +svfloat64_t svmla_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f32_z))) +svfloat32_t svmla_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_f16_z))) +svfloat16_t svmla_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u8_m))) +svuint8_t svmla_m(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u32_m))) +svuint32_t svmla_m(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u64_m))) +svuint64_t svmla_m(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u16_m))) +svuint16_t svmla_m(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s8_m))) +svint8_t svmla_m(svbool_t, svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s32_m))) +svint32_t svmla_m(svbool_t, svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s64_m))) +svint64_t svmla_m(svbool_t, svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s16_m))) +svint16_t svmla_m(svbool_t, svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u8_x))) +svuint8_t svmla_x(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u32_x))) +svuint32_t svmla_x(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u64_x))) +svuint64_t svmla_x(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u16_x))) +svuint16_t svmla_x(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s8_x))) +svint8_t svmla_x(svbool_t, svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s32_x))) +svint32_t svmla_x(svbool_t, svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s64_x))) +svint64_t svmla_x(svbool_t, svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s16_x))) +svint16_t svmla_x(svbool_t, svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u8_z))) +svuint8_t svmla_z(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u32_z))) +svuint32_t svmla_z(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u64_z))) +svuint64_t svmla_z(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_u16_z))) +svuint16_t svmla_z(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s8_z))) +svint8_t svmla_z(svbool_t, svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s32_z))) +svint32_t svmla_z(svbool_t, svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s64_z))) +svint64_t svmla_z(svbool_t, svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_s16_z))) +svint16_t svmla_z(svbool_t, svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_f64))) +svfloat64_t svmla_lane(svfloat64_t, svfloat64_t, svfloat64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_f32))) +svfloat32_t svmla_lane(svfloat32_t, svfloat32_t, svfloat32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmla_lane_f16))) +svfloat16_t svmla_lane(svfloat16_t, svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f64_m))) +svfloat64_t svmls_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f32_m))) +svfloat32_t svmls_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f16_m))) +svfloat16_t svmls_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f64_x))) +svfloat64_t svmls_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f32_x))) +svfloat32_t svmls_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f16_x))) +svfloat16_t svmls_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f64_z))) +svfloat64_t svmls_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f32_z))) +svfloat32_t svmls_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_f16_z))) +svfloat16_t svmls_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u8_m))) +svuint8_t svmls_m(svbool_t, svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u32_m))) +svuint32_t svmls_m(svbool_t, svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u64_m))) +svuint64_t svmls_m(svbool_t, svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u16_m))) +svuint16_t svmls_m(svbool_t, svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s8_m))) +svint8_t svmls_m(svbool_t, svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s32_m))) +svint32_t svmls_m(svbool_t, svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s64_m))) +svint64_t svmls_m(svbool_t, svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s16_m))) +svint16_t svmls_m(svbool_t, svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u8_x))) +svuint8_t svmls_x(svbool_t, svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u32_x))) +svuint32_t svmls_x(svbool_t, svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u64_x))) +svuint64_t svmls_x(svbool_t, svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u16_x))) +svuint16_t svmls_x(svbool_t, svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s8_x))) +svint8_t svmls_x(svbool_t, svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s32_x))) +svint32_t svmls_x(svbool_t, svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s64_x))) +svint64_t svmls_x(svbool_t, svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s16_x))) +svint16_t svmls_x(svbool_t, svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u8_z))) +svuint8_t svmls_z(svbool_t, svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u32_z))) +svuint32_t svmls_z(svbool_t, svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u64_z))) +svuint64_t svmls_z(svbool_t, svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_u16_z))) +svuint16_t svmls_z(svbool_t, svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s8_z))) +svint8_t svmls_z(svbool_t, svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s32_z))) +svint32_t svmls_z(svbool_t, svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s64_z))) +svint64_t svmls_z(svbool_t, svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_n_s16_z))) +svint16_t svmls_z(svbool_t, svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f64_m))) +svfloat64_t svmls_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f32_m))) +svfloat32_t svmls_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f16_m))) +svfloat16_t svmls_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f64_x))) +svfloat64_t svmls_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f32_x))) +svfloat32_t svmls_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f16_x))) +svfloat16_t svmls_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f64_z))) +svfloat64_t svmls_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f32_z))) +svfloat32_t svmls_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_f16_z))) +svfloat16_t svmls_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u8_m))) +svuint8_t svmls_m(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u32_m))) +svuint32_t svmls_m(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u64_m))) +svuint64_t svmls_m(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u16_m))) +svuint16_t svmls_m(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s8_m))) +svint8_t svmls_m(svbool_t, svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s32_m))) +svint32_t svmls_m(svbool_t, svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s64_m))) +svint64_t svmls_m(svbool_t, svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s16_m))) +svint16_t svmls_m(svbool_t, svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u8_x))) +svuint8_t svmls_x(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u32_x))) +svuint32_t svmls_x(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u64_x))) +svuint64_t svmls_x(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u16_x))) +svuint16_t svmls_x(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s8_x))) +svint8_t svmls_x(svbool_t, svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s32_x))) +svint32_t svmls_x(svbool_t, svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s64_x))) +svint64_t svmls_x(svbool_t, svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s16_x))) +svint16_t svmls_x(svbool_t, svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u8_z))) +svuint8_t svmls_z(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u32_z))) +svuint32_t svmls_z(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u64_z))) +svuint64_t svmls_z(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_u16_z))) +svuint16_t svmls_z(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s8_z))) +svint8_t svmls_z(svbool_t, svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s32_z))) +svint32_t svmls_z(svbool_t, svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s64_z))) +svint64_t svmls_z(svbool_t, svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_s16_z))) +svint16_t svmls_z(svbool_t, svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_f64))) +svfloat64_t svmls_lane(svfloat64_t, svfloat64_t, svfloat64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_f32))) +svfloat32_t svmls_lane(svfloat32_t, svfloat32_t, svfloat32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmls_lane_f16))) +svfloat16_t svmls_lane(svfloat16_t, svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmov_b_z))) +svbool_t svmov_z(svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f64_m))) +svfloat64_t svmsb_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f32_m))) +svfloat32_t svmsb_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f16_m))) +svfloat16_t svmsb_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f64_x))) +svfloat64_t svmsb_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f32_x))) +svfloat32_t svmsb_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f16_x))) +svfloat16_t svmsb_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f64_z))) +svfloat64_t svmsb_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f32_z))) +svfloat32_t svmsb_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_f16_z))) +svfloat16_t svmsb_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u8_m))) +svuint8_t svmsb_m(svbool_t, svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u32_m))) +svuint32_t svmsb_m(svbool_t, svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u64_m))) +svuint64_t svmsb_m(svbool_t, svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u16_m))) +svuint16_t svmsb_m(svbool_t, svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s8_m))) +svint8_t svmsb_m(svbool_t, svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s32_m))) +svint32_t svmsb_m(svbool_t, svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s64_m))) +svint64_t svmsb_m(svbool_t, svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s16_m))) +svint16_t svmsb_m(svbool_t, svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u8_x))) +svuint8_t svmsb_x(svbool_t, svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u32_x))) +svuint32_t svmsb_x(svbool_t, svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u64_x))) +svuint64_t svmsb_x(svbool_t, svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u16_x))) +svuint16_t svmsb_x(svbool_t, svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s8_x))) +svint8_t svmsb_x(svbool_t, svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s32_x))) +svint32_t svmsb_x(svbool_t, svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s64_x))) +svint64_t svmsb_x(svbool_t, svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s16_x))) +svint16_t svmsb_x(svbool_t, svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u8_z))) +svuint8_t svmsb_z(svbool_t, svuint8_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u32_z))) +svuint32_t svmsb_z(svbool_t, svuint32_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u64_z))) +svuint64_t svmsb_z(svbool_t, svuint64_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_u16_z))) +svuint16_t svmsb_z(svbool_t, svuint16_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s8_z))) +svint8_t svmsb_z(svbool_t, svint8_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s32_z))) +svint32_t svmsb_z(svbool_t, svint32_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s64_z))) +svint64_t svmsb_z(svbool_t, svint64_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_n_s16_z))) +svint16_t svmsb_z(svbool_t, svint16_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f64_m))) +svfloat64_t svmsb_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f32_m))) +svfloat32_t svmsb_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f16_m))) +svfloat16_t svmsb_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f64_x))) +svfloat64_t svmsb_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f32_x))) +svfloat32_t svmsb_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f16_x))) +svfloat16_t svmsb_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f64_z))) +svfloat64_t svmsb_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f32_z))) +svfloat32_t svmsb_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_f16_z))) +svfloat16_t svmsb_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u8_m))) +svuint8_t svmsb_m(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u32_m))) +svuint32_t svmsb_m(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u64_m))) +svuint64_t svmsb_m(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u16_m))) +svuint16_t svmsb_m(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s8_m))) +svint8_t svmsb_m(svbool_t, svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s32_m))) +svint32_t svmsb_m(svbool_t, svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s64_m))) +svint64_t svmsb_m(svbool_t, svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s16_m))) +svint16_t svmsb_m(svbool_t, svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u8_x))) +svuint8_t svmsb_x(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u32_x))) +svuint32_t svmsb_x(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u64_x))) +svuint64_t svmsb_x(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u16_x))) +svuint16_t svmsb_x(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s8_x))) +svint8_t svmsb_x(svbool_t, svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s32_x))) +svint32_t svmsb_x(svbool_t, svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s64_x))) +svint64_t svmsb_x(svbool_t, svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s16_x))) +svint16_t svmsb_x(svbool_t, svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u8_z))) +svuint8_t svmsb_z(svbool_t, svuint8_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u32_z))) +svuint32_t svmsb_z(svbool_t, svuint32_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u64_z))) +svuint64_t svmsb_z(svbool_t, svuint64_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_u16_z))) +svuint16_t svmsb_z(svbool_t, svuint16_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s8_z))) +svint8_t svmsb_z(svbool_t, svint8_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s32_z))) +svint32_t svmsb_z(svbool_t, svint32_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s64_z))) +svint64_t svmsb_z(svbool_t, svint64_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmsb_s16_z))) +svint16_t svmsb_z(svbool_t, svint16_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f64_m))) +svfloat64_t svmul_m(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f32_m))) +svfloat32_t svmul_m(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f16_m))) +svfloat16_t svmul_m(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f64_x))) +svfloat64_t svmul_x(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f32_x))) +svfloat32_t svmul_x(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f16_x))) +svfloat16_t svmul_x(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f64_z))) +svfloat64_t svmul_z(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f32_z))) +svfloat32_t svmul_z(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_f16_z))) +svfloat16_t svmul_z(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u8_m))) +svuint8_t svmul_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u32_m))) +svuint32_t svmul_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u64_m))) +svuint64_t svmul_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u16_m))) +svuint16_t svmul_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s8_m))) +svint8_t svmul_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s32_m))) +svint32_t svmul_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s64_m))) +svint64_t svmul_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s16_m))) +svint16_t svmul_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u8_x))) +svuint8_t svmul_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u32_x))) +svuint32_t svmul_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u64_x))) +svuint64_t svmul_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u16_x))) +svuint16_t svmul_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s8_x))) +svint8_t svmul_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s32_x))) +svint32_t svmul_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s64_x))) +svint64_t svmul_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s16_x))) +svint16_t svmul_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u8_z))) +svuint8_t svmul_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u32_z))) +svuint32_t svmul_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u64_z))) +svuint64_t svmul_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_u16_z))) +svuint16_t svmul_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s8_z))) +svint8_t svmul_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s32_z))) +svint32_t svmul_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s64_z))) +svint64_t svmul_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_n_s16_z))) +svint16_t svmul_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f64_m))) +svfloat64_t svmul_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f32_m))) +svfloat32_t svmul_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f16_m))) +svfloat16_t svmul_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f64_x))) +svfloat64_t svmul_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f32_x))) +svfloat32_t svmul_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f16_x))) +svfloat16_t svmul_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f64_z))) +svfloat64_t svmul_z(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f32_z))) +svfloat32_t svmul_z(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_f16_z))) +svfloat16_t svmul_z(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u8_m))) +svuint8_t svmul_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u32_m))) +svuint32_t svmul_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u64_m))) +svuint64_t svmul_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u16_m))) +svuint16_t svmul_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s8_m))) +svint8_t svmul_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s32_m))) +svint32_t svmul_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s64_m))) +svint64_t svmul_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s16_m))) +svint16_t svmul_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u8_x))) +svuint8_t svmul_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u32_x))) +svuint32_t svmul_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u64_x))) +svuint64_t svmul_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u16_x))) +svuint16_t svmul_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s8_x))) +svint8_t svmul_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s32_x))) +svint32_t svmul_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s64_x))) +svint64_t svmul_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s16_x))) +svint16_t svmul_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u8_z))) +svuint8_t svmul_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u32_z))) +svuint32_t svmul_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u64_z))) +svuint64_t svmul_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_u16_z))) +svuint16_t svmul_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s8_z))) +svint8_t svmul_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s32_z))) +svint32_t svmul_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s64_z))) +svint64_t svmul_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_s16_z))) +svint16_t svmul_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_f64))) +svfloat64_t svmul_lane(svfloat64_t, svfloat64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_f32))) +svfloat32_t svmul_lane(svfloat32_t, svfloat32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmul_lane_f16))) +svfloat16_t svmul_lane(svfloat16_t, svfloat16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s8_m))) +svint8_t svmulh_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s32_m))) +svint32_t svmulh_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s64_m))) +svint64_t svmulh_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s16_m))) +svint16_t svmulh_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s8_x))) +svint8_t svmulh_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s32_x))) +svint32_t svmulh_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s64_x))) +svint64_t svmulh_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s16_x))) +svint16_t svmulh_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s8_z))) +svint8_t svmulh_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s32_z))) +svint32_t svmulh_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s64_z))) +svint64_t svmulh_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_s16_z))) +svint16_t svmulh_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u8_m))) +svuint8_t svmulh_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u32_m))) +svuint32_t svmulh_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u64_m))) +svuint64_t svmulh_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u16_m))) +svuint16_t svmulh_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u8_x))) +svuint8_t svmulh_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u32_x))) +svuint32_t svmulh_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u64_x))) +svuint64_t svmulh_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u16_x))) +svuint16_t svmulh_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u8_z))) +svuint8_t svmulh_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u32_z))) +svuint32_t svmulh_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u64_z))) +svuint64_t svmulh_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_n_u16_z))) +svuint16_t svmulh_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s8_m))) +svint8_t svmulh_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s32_m))) +svint32_t svmulh_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s64_m))) +svint64_t svmulh_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s16_m))) +svint16_t svmulh_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s8_x))) +svint8_t svmulh_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s32_x))) +svint32_t svmulh_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s64_x))) +svint64_t svmulh_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s16_x))) +svint16_t svmulh_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s8_z))) +svint8_t svmulh_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s32_z))) +svint32_t svmulh_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s64_z))) +svint64_t svmulh_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_s16_z))) +svint16_t svmulh_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u8_m))) +svuint8_t svmulh_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u32_m))) +svuint32_t svmulh_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u64_m))) +svuint64_t svmulh_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u16_m))) +svuint16_t svmulh_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u8_x))) +svuint8_t svmulh_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u32_x))) +svuint32_t svmulh_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u64_x))) +svuint64_t svmulh_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u16_x))) +svuint16_t svmulh_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u8_z))) +svuint8_t svmulh_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u32_z))) +svuint32_t svmulh_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u64_z))) +svuint64_t svmulh_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulh_u16_z))) +svuint16_t svmulh_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f64_m))) +svfloat64_t svmulx_m(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f32_m))) +svfloat32_t svmulx_m(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f16_m))) +svfloat16_t svmulx_m(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f64_x))) +svfloat64_t svmulx_x(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f32_x))) +svfloat32_t svmulx_x(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f16_x))) +svfloat16_t svmulx_x(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f64_z))) +svfloat64_t svmulx_z(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f32_z))) +svfloat32_t svmulx_z(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_n_f16_z))) +svfloat16_t svmulx_z(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f64_m))) +svfloat64_t svmulx_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f32_m))) +svfloat32_t svmulx_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f16_m))) +svfloat16_t svmulx_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f64_x))) +svfloat64_t svmulx_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f32_x))) +svfloat32_t svmulx_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f16_x))) +svfloat16_t svmulx_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f64_z))) +svfloat64_t svmulx_z(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f32_z))) +svfloat32_t svmulx_z(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svmulx_f16_z))) +svfloat16_t svmulx_z(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnand_b_z))) +svbool_t svnand_z(svbool_t, svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f64_m))) +svfloat64_t svneg_m(svfloat64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f32_m))) +svfloat32_t svneg_m(svfloat32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f16_m))) +svfloat16_t svneg_m(svfloat16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f64_x))) +svfloat64_t svneg_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f32_x))) +svfloat32_t svneg_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f16_x))) +svfloat16_t svneg_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f64_z))) +svfloat64_t svneg_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f32_z))) +svfloat32_t svneg_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_f16_z))) +svfloat16_t svneg_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s8_m))) +svint8_t svneg_m(svint8_t, svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s32_m))) +svint32_t svneg_m(svint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s64_m))) +svint64_t svneg_m(svint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s16_m))) +svint16_t svneg_m(svint16_t, svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s8_x))) +svint8_t svneg_x(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s32_x))) +svint32_t svneg_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s64_x))) +svint64_t svneg_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s16_x))) +svint16_t svneg_x(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s8_z))) +svint8_t svneg_z(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s32_z))) +svint32_t svneg_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s64_z))) +svint64_t svneg_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svneg_s16_z))) +svint16_t svneg_z(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f64_m))) +svfloat64_t svnmad_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f32_m))) +svfloat32_t svnmad_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f16_m))) +svfloat16_t svnmad_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f64_x))) +svfloat64_t svnmad_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f32_x))) +svfloat32_t svnmad_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f16_x))) +svfloat16_t svnmad_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f64_z))) +svfloat64_t svnmad_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f32_z))) +svfloat32_t svnmad_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_n_f16_z))) +svfloat16_t svnmad_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f64_m))) +svfloat64_t svnmad_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f32_m))) +svfloat32_t svnmad_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f16_m))) +svfloat16_t svnmad_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f64_x))) +svfloat64_t svnmad_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f32_x))) +svfloat32_t svnmad_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f16_x))) +svfloat16_t svnmad_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f64_z))) +svfloat64_t svnmad_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f32_z))) +svfloat32_t svnmad_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmad_f16_z))) +svfloat16_t svnmad_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f64_m))) +svfloat64_t svnmla_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f32_m))) +svfloat32_t svnmla_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f16_m))) +svfloat16_t svnmla_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f64_x))) +svfloat64_t svnmla_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f32_x))) +svfloat32_t svnmla_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f16_x))) +svfloat16_t svnmla_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f64_z))) +svfloat64_t svnmla_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f32_z))) +svfloat32_t svnmla_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_n_f16_z))) +svfloat16_t svnmla_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f64_m))) +svfloat64_t svnmla_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f32_m))) +svfloat32_t svnmla_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f16_m))) +svfloat16_t svnmla_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f64_x))) +svfloat64_t svnmla_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f32_x))) +svfloat32_t svnmla_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f16_x))) +svfloat16_t svnmla_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f64_z))) +svfloat64_t svnmla_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f32_z))) +svfloat32_t svnmla_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmla_f16_z))) +svfloat16_t svnmla_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f64_m))) +svfloat64_t svnmls_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f32_m))) +svfloat32_t svnmls_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f16_m))) +svfloat16_t svnmls_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f64_x))) +svfloat64_t svnmls_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f32_x))) +svfloat32_t svnmls_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f16_x))) +svfloat16_t svnmls_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f64_z))) +svfloat64_t svnmls_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f32_z))) +svfloat32_t svnmls_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_n_f16_z))) +svfloat16_t svnmls_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f64_m))) +svfloat64_t svnmls_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f32_m))) +svfloat32_t svnmls_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f16_m))) +svfloat16_t svnmls_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f64_x))) +svfloat64_t svnmls_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f32_x))) +svfloat32_t svnmls_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f16_x))) +svfloat16_t svnmls_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f64_z))) +svfloat64_t svnmls_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f32_z))) +svfloat32_t svnmls_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmls_f16_z))) +svfloat16_t svnmls_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f64_m))) +svfloat64_t svnmsb_m(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f32_m))) +svfloat32_t svnmsb_m(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f16_m))) +svfloat16_t svnmsb_m(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f64_x))) +svfloat64_t svnmsb_x(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f32_x))) +svfloat32_t svnmsb_x(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f16_x))) +svfloat16_t svnmsb_x(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f64_z))) +svfloat64_t svnmsb_z(svbool_t, svfloat64_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f32_z))) +svfloat32_t svnmsb_z(svbool_t, svfloat32_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_n_f16_z))) +svfloat16_t svnmsb_z(svbool_t, svfloat16_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f64_m))) +svfloat64_t svnmsb_m(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f32_m))) +svfloat32_t svnmsb_m(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f16_m))) +svfloat16_t svnmsb_m(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f64_x))) +svfloat64_t svnmsb_x(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f32_x))) +svfloat32_t svnmsb_x(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f16_x))) +svfloat16_t svnmsb_x(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f64_z))) +svfloat64_t svnmsb_z(svbool_t, svfloat64_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f32_z))) +svfloat32_t svnmsb_z(svbool_t, svfloat32_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnmsb_f16_z))) +svfloat16_t svnmsb_z(svbool_t, svfloat16_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnor_b_z))) +svbool_t svnor_z(svbool_t, svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_b_z))) +svbool_t svnot_z(svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u8_m))) +svuint8_t svnot_m(svuint8_t, svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u32_m))) +svuint32_t svnot_m(svuint32_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u64_m))) +svuint64_t svnot_m(svuint64_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u16_m))) +svuint16_t svnot_m(svuint16_t, svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s8_m))) +svint8_t svnot_m(svint8_t, svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s32_m))) +svint32_t svnot_m(svint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s64_m))) +svint64_t svnot_m(svint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s16_m))) +svint16_t svnot_m(svint16_t, svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u8_x))) +svuint8_t svnot_x(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u32_x))) +svuint32_t svnot_x(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u64_x))) +svuint64_t svnot_x(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u16_x))) +svuint16_t svnot_x(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s8_x))) +svint8_t svnot_x(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s32_x))) +svint32_t svnot_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s64_x))) +svint64_t svnot_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s16_x))) +svint16_t svnot_x(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u8_z))) +svuint8_t svnot_z(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u32_z))) +svuint32_t svnot_z(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u64_z))) +svuint64_t svnot_z(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_u16_z))) +svuint16_t svnot_z(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s8_z))) +svint8_t svnot_z(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s32_z))) +svint32_t svnot_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s64_z))) +svint64_t svnot_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svnot_s16_z))) +svint16_t svnot_z(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorn_b_z))) +svbool_t svorn_z(svbool_t, svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_b_z))) +svbool_t svorr_z(svbool_t, svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u8_m))) +svuint8_t svorr_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u32_m))) +svuint32_t svorr_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u64_m))) +svuint64_t svorr_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u16_m))) +svuint16_t svorr_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s8_m))) +svint8_t svorr_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s32_m))) +svint32_t svorr_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s64_m))) +svint64_t svorr_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s16_m))) +svint16_t svorr_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u8_x))) +svuint8_t svorr_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u32_x))) +svuint32_t svorr_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u64_x))) +svuint64_t svorr_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u16_x))) +svuint16_t svorr_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s8_x))) +svint8_t svorr_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s32_x))) +svint32_t svorr_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s64_x))) +svint64_t svorr_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s16_x))) +svint16_t svorr_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u8_z))) +svuint8_t svorr_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u32_z))) +svuint32_t svorr_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u64_z))) +svuint64_t svorr_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_u16_z))) +svuint16_t svorr_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s8_z))) +svint8_t svorr_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s32_z))) +svint32_t svorr_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s64_z))) +svint64_t svorr_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_n_s16_z))) +svint16_t svorr_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u8_m))) +svuint8_t svorr_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u32_m))) +svuint32_t svorr_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u64_m))) +svuint64_t svorr_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u16_m))) +svuint16_t svorr_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s8_m))) +svint8_t svorr_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s32_m))) +svint32_t svorr_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s64_m))) +svint64_t svorr_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s16_m))) +svint16_t svorr_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u8_x))) +svuint8_t svorr_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u32_x))) +svuint32_t svorr_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u64_x))) +svuint64_t svorr_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u16_x))) +svuint16_t svorr_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s8_x))) +svint8_t svorr_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s32_x))) +svint32_t svorr_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s64_x))) +svint64_t svorr_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s16_x))) +svint16_t svorr_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u8_z))) +svuint8_t svorr_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u32_z))) +svuint32_t svorr_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u64_z))) +svuint64_t svorr_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_u16_z))) +svuint16_t svorr_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s8_z))) +svint8_t svorr_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s32_z))) +svint32_t svorr_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s64_z))) +svint64_t svorr_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorr_s16_z))) +svint16_t svorr_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u8))) +uint8_t svorv(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u32))) +uint32_t svorv(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u64))) +uint64_t svorv(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_u16))) +uint16_t svorv(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s8))) +int8_t svorv(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s32))) +int32_t svorv(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s64))) +int64_t svorv(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svorv_s16))) +int16_t svorv(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpfalse_b))) +svbool_t svpfalse(void); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svpfirst_b))) +svbool_t svpfirst(svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s8))) +svint8_t svqadd(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s32))) +svint32_t svqadd(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s64))) +svint64_t svqadd(svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_s16))) +svint16_t svqadd(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u8))) +svuint8_t svqadd(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u32))) +svuint32_t svqadd(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u64))) +svuint64_t svqadd(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_n_u16))) +svuint16_t svqadd(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s8))) +svint8_t svqadd(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s32))) +svint32_t svqadd(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s64))) +svint64_t svqadd(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_s16))) +svint16_t svqadd(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u8))) +svuint8_t svqadd(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u32))) +svuint32_t svqadd(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u64))) +svuint64_t svqadd(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqadd_u16))) +svuint16_t svqadd(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_s32))) +int32_t svqdecb(int32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_s64))) +int64_t svqdecb(int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_u32))) +uint32_t svqdecb(uint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_n_u64))) +uint64_t svqdecb(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_s32))) +int32_t svqdecb_pat(int32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_s64))) +int64_t svqdecb_pat(int64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_u32))) +uint32_t svqdecb_pat(uint32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecb_pat_n_u64))) +uint64_t svqdecb_pat(uint64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_s32))) +int32_t svqdecd(int32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_s64))) +int64_t svqdecd(int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_u32))) +uint32_t svqdecd(uint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_n_u64))) +uint64_t svqdecd(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_s64))) +svint64_t svqdecd(svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_u64))) +svuint64_t svqdecd(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_s32))) +int32_t svqdecd_pat(int32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_s64))) +int64_t svqdecd_pat(int64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_u32))) +uint32_t svqdecd_pat(uint32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_n_u64))) +uint64_t svqdecd_pat(uint64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_s64))) +svint64_t svqdecd_pat(svint64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecd_pat_u64))) +svuint64_t svqdecd_pat(svuint64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_s32))) +int32_t svqdech(int32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_s64))) +int64_t svqdech(int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_u32))) +uint32_t svqdech(uint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_n_u64))) +uint64_t svqdech(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_s16))) +svint16_t svqdech(svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_u16))) +svuint16_t svqdech(svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_s32))) +int32_t svqdech_pat(int32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_s64))) +int64_t svqdech_pat(int64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_u32))) +uint32_t svqdech_pat(uint32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_n_u64))) +uint64_t svqdech_pat(uint64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_s16))) +svint16_t svqdech_pat(svint16_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdech_pat_u16))) +svuint16_t svqdech_pat(svuint16_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b8))) +int32_t svqdecp_b8(int32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b32))) +int32_t svqdecp_b32(int32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b64))) +int32_t svqdecp_b64(int32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s32_b16))) +int32_t svqdecp_b16(int32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b8))) +int64_t svqdecp_b8(int64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b32))) +int64_t svqdecp_b32(int64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b64))) +int64_t svqdecp_b64(int64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_s64_b16))) +int64_t svqdecp_b16(int64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b8))) +uint32_t svqdecp_b8(uint32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b32))) +uint32_t svqdecp_b32(uint32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b64))) +uint32_t svqdecp_b64(uint32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u32_b16))) +uint32_t svqdecp_b16(uint32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b8))) +uint64_t svqdecp_b8(uint64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b32))) +uint64_t svqdecp_b32(uint64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b64))) +uint64_t svqdecp_b64(uint64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_n_u64_b16))) +uint64_t svqdecp_b16(uint64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_s32))) +svint32_t svqdecp(svint32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_s64))) +svint64_t svqdecp(svint64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_s16))) +svint16_t svqdecp(svint16_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_u32))) +svuint32_t svqdecp(svuint32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_u64))) +svuint64_t svqdecp(svuint64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecp_u16))) +svuint16_t svqdecp(svuint16_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_s32))) +int32_t svqdecw(int32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_s64))) +int64_t svqdecw(int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_u32))) +uint32_t svqdecw(uint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_n_u64))) +uint64_t svqdecw(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_s32))) +svint32_t svqdecw(svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_u32))) +svuint32_t svqdecw(svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_s32))) +int32_t svqdecw_pat(int32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_s64))) +int64_t svqdecw_pat(int64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_u32))) +uint32_t svqdecw_pat(uint32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_n_u64))) +uint64_t svqdecw_pat(uint64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_s32))) +svint32_t svqdecw_pat(svint32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqdecw_pat_u32))) +svuint32_t svqdecw_pat(svuint32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_s32))) +int32_t svqincb(int32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_s64))) +int64_t svqincb(int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_u32))) +uint32_t svqincb(uint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_n_u64))) +uint64_t svqincb(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_s32))) +int32_t svqincb_pat(int32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_s64))) +int64_t svqincb_pat(int64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_u32))) +uint32_t svqincb_pat(uint32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincb_pat_n_u64))) +uint64_t svqincb_pat(uint64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_s32))) +int32_t svqincd(int32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_s64))) +int64_t svqincd(int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_u32))) +uint32_t svqincd(uint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_n_u64))) +uint64_t svqincd(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_s64))) +svint64_t svqincd(svint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_u64))) +svuint64_t svqincd(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_s32))) +int32_t svqincd_pat(int32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_s64))) +int64_t svqincd_pat(int64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_u32))) +uint32_t svqincd_pat(uint32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_n_u64))) +uint64_t svqincd_pat(uint64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_s64))) +svint64_t svqincd_pat(svint64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincd_pat_u64))) +svuint64_t svqincd_pat(svuint64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_s32))) +int32_t svqinch(int32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_s64))) +int64_t svqinch(int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_u32))) +uint32_t svqinch(uint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_n_u64))) +uint64_t svqinch(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_s16))) +svint16_t svqinch(svint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_u16))) +svuint16_t svqinch(svuint16_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_s32))) +int32_t svqinch_pat(int32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_s64))) +int64_t svqinch_pat(int64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_u32))) +uint32_t svqinch_pat(uint32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_n_u64))) +uint64_t svqinch_pat(uint64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_s16))) +svint16_t svqinch_pat(svint16_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqinch_pat_u16))) +svuint16_t svqinch_pat(svuint16_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b8))) +int32_t svqincp_b8(int32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b32))) +int32_t svqincp_b32(int32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b64))) +int32_t svqincp_b64(int32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s32_b16))) +int32_t svqincp_b16(int32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b8))) +int64_t svqincp_b8(int64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b32))) +int64_t svqincp_b32(int64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b64))) +int64_t svqincp_b64(int64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_s64_b16))) +int64_t svqincp_b16(int64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b8))) +uint32_t svqincp_b8(uint32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b32))) +uint32_t svqincp_b32(uint32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b64))) +uint32_t svqincp_b64(uint32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u32_b16))) +uint32_t svqincp_b16(uint32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b8))) +uint64_t svqincp_b8(uint64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b32))) +uint64_t svqincp_b32(uint64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b64))) +uint64_t svqincp_b64(uint64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_n_u64_b16))) +uint64_t svqincp_b16(uint64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_s32))) +svint32_t svqincp(svint32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_s64))) +svint64_t svqincp(svint64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_s16))) +svint16_t svqincp(svint16_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_u32))) +svuint32_t svqincp(svuint32_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_u64))) +svuint64_t svqincp(svuint64_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincp_u16))) +svuint16_t svqincp(svuint16_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_s32))) +int32_t svqincw(int32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_s64))) +int64_t svqincw(int64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_u32))) +uint32_t svqincw(uint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_n_u64))) +uint64_t svqincw(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_s32))) +svint32_t svqincw(svint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_u32))) +svuint32_t svqincw(svuint32_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_s32))) +int32_t svqincw_pat(int32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_s64))) +int64_t svqincw_pat(int64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_u32))) +uint32_t svqincw_pat(uint32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_n_u64))) +uint64_t svqincw_pat(uint64_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_s32))) +svint32_t svqincw_pat(svint32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqincw_pat_u32))) +svuint32_t svqincw_pat(svuint32_t, enum svpattern, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s8))) +svint8_t svqsub(svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s32))) +svint32_t svqsub(svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s64))) +svint64_t svqsub(svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_s16))) +svint16_t svqsub(svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u8))) +svuint8_t svqsub(svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u32))) +svuint32_t svqsub(svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u64))) +svuint64_t svqsub(svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_n_u16))) +svuint16_t svqsub(svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s8))) +svint8_t svqsub(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s32))) +svint32_t svqsub(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s64))) +svint64_t svqsub(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_s16))) +svint16_t svqsub(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u8))) +svuint8_t svqsub(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u32))) +svuint32_t svqsub(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u64))) +svuint64_t svqsub(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svqsub_u16))) +svuint16_t svqsub(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u8_m))) +svuint8_t svrbit_m(svuint8_t, svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u32_m))) +svuint32_t svrbit_m(svuint32_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u64_m))) +svuint64_t svrbit_m(svuint64_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u16_m))) +svuint16_t svrbit_m(svuint16_t, svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s8_m))) +svint8_t svrbit_m(svint8_t, svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s32_m))) +svint32_t svrbit_m(svint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s64_m))) +svint64_t svrbit_m(svint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s16_m))) +svint16_t svrbit_m(svint16_t, svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u8_x))) +svuint8_t svrbit_x(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u32_x))) +svuint32_t svrbit_x(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u64_x))) +svuint64_t svrbit_x(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u16_x))) +svuint16_t svrbit_x(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s8_x))) +svint8_t svrbit_x(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s32_x))) +svint32_t svrbit_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s64_x))) +svint64_t svrbit_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s16_x))) +svint16_t svrbit_x(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u8_z))) +svuint8_t svrbit_z(svbool_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u32_z))) +svuint32_t svrbit_z(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u64_z))) +svuint64_t svrbit_z(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_u16_z))) +svuint16_t svrbit_z(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s8_z))) +svint8_t svrbit_z(svbool_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s32_z))) +svint32_t svrbit_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s64_z))) +svint64_t svrbit_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrbit_s16_z))) +svint16_t svrbit_z(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_f64))) +svfloat64_t svrecpe(svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_f32))) +svfloat32_t svrecpe(svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpe_f16))) +svfloat16_t svrecpe(svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecps_f64))) +svfloat64_t svrecps(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecps_f32))) +svfloat32_t svrecps(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecps_f16))) +svfloat16_t svrecps(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f64_m))) +svfloat64_t svrecpx_m(svfloat64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f32_m))) +svfloat32_t svrecpx_m(svfloat32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f16_m))) +svfloat16_t svrecpx_m(svfloat16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f64_x))) +svfloat64_t svrecpx_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f32_x))) +svfloat32_t svrecpx_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f16_x))) +svfloat16_t svrecpx_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f64_z))) +svfloat64_t svrecpx_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f32_z))) +svfloat32_t svrecpx_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrecpx_f16_z))) +svfloat16_t svrecpx_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u8))) +svuint8_t svrev(svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u32))) +svuint32_t svrev(svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u64))) +svuint64_t svrev(svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_u16))) +svuint16_t svrev(svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s8))) +svint8_t svrev(svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_f64))) +svfloat64_t svrev(svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_f32))) +svfloat32_t svrev(svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_f16))) +svfloat16_t svrev(svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s32))) +svint32_t svrev(svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s64))) +svint64_t svrev(svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrev_s16))) +svint16_t svrev(svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u32_m))) +svuint32_t svrevb_m(svuint32_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u64_m))) +svuint64_t svrevb_m(svuint64_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u16_m))) +svuint16_t svrevb_m(svuint16_t, svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s32_m))) +svint32_t svrevb_m(svint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s64_m))) +svint64_t svrevb_m(svint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s16_m))) +svint16_t svrevb_m(svint16_t, svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u32_x))) +svuint32_t svrevb_x(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u64_x))) +svuint64_t svrevb_x(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u16_x))) +svuint16_t svrevb_x(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s32_x))) +svint32_t svrevb_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s64_x))) +svint64_t svrevb_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s16_x))) +svint16_t svrevb_x(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u32_z))) +svuint32_t svrevb_z(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u64_z))) +svuint64_t svrevb_z(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_u16_z))) +svuint16_t svrevb_z(svbool_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s32_z))) +svint32_t svrevb_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s64_z))) +svint64_t svrevb_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevb_s16_z))) +svint16_t svrevb_z(svbool_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u32_m))) +svuint32_t svrevh_m(svuint32_t, svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u64_m))) +svuint64_t svrevh_m(svuint64_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s32_m))) +svint32_t svrevh_m(svint32_t, svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s64_m))) +svint64_t svrevh_m(svint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u32_x))) +svuint32_t svrevh_x(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u64_x))) +svuint64_t svrevh_x(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s32_x))) +svint32_t svrevh_x(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s64_x))) +svint64_t svrevh_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u32_z))) +svuint32_t svrevh_z(svbool_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_u64_z))) +svuint64_t svrevh_z(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s32_z))) +svint32_t svrevh_z(svbool_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevh_s64_z))) +svint64_t svrevh_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_u64_m))) +svuint64_t svrevw_m(svuint64_t, svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_s64_m))) +svint64_t svrevw_m(svint64_t, svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_u64_x))) +svuint64_t svrevw_x(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_s64_x))) +svint64_t svrevw_x(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_u64_z))) +svuint64_t svrevw_z(svbool_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrevw_s64_z))) +svint64_t svrevw_z(svbool_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f64_m))) +svfloat64_t svrinta_m(svfloat64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_m))) +svfloat32_t svrinta_m(svfloat32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f16_m))) +svfloat16_t svrinta_m(svfloat16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f64_x))) +svfloat64_t svrinta_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_x))) +svfloat32_t svrinta_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f16_x))) +svfloat16_t svrinta_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f64_z))) +svfloat64_t svrinta_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f32_z))) +svfloat32_t svrinta_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinta_f16_z))) +svfloat16_t svrinta_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f64_m))) +svfloat64_t svrinti_m(svfloat64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f32_m))) +svfloat32_t svrinti_m(svfloat32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f16_m))) +svfloat16_t svrinti_m(svfloat16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f64_x))) +svfloat64_t svrinti_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f32_x))) +svfloat32_t svrinti_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f16_x))) +svfloat16_t svrinti_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f64_z))) +svfloat64_t svrinti_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f32_z))) +svfloat32_t svrinti_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrinti_f16_z))) +svfloat16_t svrinti_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f64_m))) +svfloat64_t svrintm_m(svfloat64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_m))) +svfloat32_t svrintm_m(svfloat32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f16_m))) +svfloat16_t svrintm_m(svfloat16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f64_x))) +svfloat64_t svrintm_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_x))) +svfloat32_t svrintm_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f16_x))) +svfloat16_t svrintm_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f64_z))) +svfloat64_t svrintm_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f32_z))) +svfloat32_t svrintm_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintm_f16_z))) +svfloat16_t svrintm_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f64_m))) +svfloat64_t svrintn_m(svfloat64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_m))) +svfloat32_t svrintn_m(svfloat32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f16_m))) +svfloat16_t svrintn_m(svfloat16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f64_x))) +svfloat64_t svrintn_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_x))) +svfloat32_t svrintn_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f16_x))) +svfloat16_t svrintn_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f64_z))) +svfloat64_t svrintn_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f32_z))) +svfloat32_t svrintn_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintn_f16_z))) +svfloat16_t svrintn_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f64_m))) +svfloat64_t svrintp_m(svfloat64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_m))) +svfloat32_t svrintp_m(svfloat32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f16_m))) +svfloat16_t svrintp_m(svfloat16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f64_x))) +svfloat64_t svrintp_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_x))) +svfloat32_t svrintp_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f16_x))) +svfloat16_t svrintp_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f64_z))) +svfloat64_t svrintp_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f32_z))) +svfloat32_t svrintp_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintp_f16_z))) +svfloat16_t svrintp_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f64_m))) +svfloat64_t svrintx_m(svfloat64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f32_m))) +svfloat32_t svrintx_m(svfloat32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f16_m))) +svfloat16_t svrintx_m(svfloat16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f64_x))) +svfloat64_t svrintx_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f32_x))) +svfloat32_t svrintx_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f16_x))) +svfloat16_t svrintx_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f64_z))) +svfloat64_t svrintx_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f32_z))) +svfloat32_t svrintx_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintx_f16_z))) +svfloat16_t svrintx_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f64_m))) +svfloat64_t svrintz_m(svfloat64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f32_m))) +svfloat32_t svrintz_m(svfloat32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f16_m))) +svfloat16_t svrintz_m(svfloat16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f64_x))) +svfloat64_t svrintz_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f32_x))) +svfloat32_t svrintz_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f16_x))) +svfloat16_t svrintz_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f64_z))) +svfloat64_t svrintz_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f32_z))) +svfloat32_t svrintz_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrintz_f16_z))) +svfloat16_t svrintz_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_f64))) +svfloat64_t svrsqrte(svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_f32))) +svfloat32_t svrsqrte(svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrte_f16))) +svfloat16_t svrsqrte(svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrts_f64))) +svfloat64_t svrsqrts(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrts_f32))) +svfloat32_t svrsqrts(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svrsqrts_f16))) +svfloat16_t svrsqrts(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f64_m))) +svfloat64_t svscale_m(svbool_t, svfloat64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f32_m))) +svfloat32_t svscale_m(svbool_t, svfloat32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f16_m))) +svfloat16_t svscale_m(svbool_t, svfloat16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f64_x))) +svfloat64_t svscale_x(svbool_t, svfloat64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f32_x))) +svfloat32_t svscale_x(svbool_t, svfloat32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f16_x))) +svfloat16_t svscale_x(svbool_t, svfloat16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f64_z))) +svfloat64_t svscale_z(svbool_t, svfloat64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f32_z))) +svfloat32_t svscale_z(svbool_t, svfloat32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_n_f16_z))) +svfloat16_t svscale_z(svbool_t, svfloat16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f64_m))) +svfloat64_t svscale_m(svbool_t, svfloat64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f32_m))) +svfloat32_t svscale_m(svbool_t, svfloat32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f16_m))) +svfloat16_t svscale_m(svbool_t, svfloat16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f64_x))) +svfloat64_t svscale_x(svbool_t, svfloat64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f32_x))) +svfloat32_t svscale_x(svbool_t, svfloat32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f16_x))) +svfloat16_t svscale_x(svbool_t, svfloat16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f64_z))) +svfloat64_t svscale_z(svbool_t, svfloat64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f32_z))) +svfloat32_t svscale_z(svbool_t, svfloat32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svscale_f16_z))) +svfloat16_t svscale_z(svbool_t, svfloat16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_b))) +svbool_t svsel(svbool_t, svbool_t, svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u8))) +svuint8_t svsel(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u32))) +svuint32_t svsel(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u64))) +svuint64_t svsel(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_u16))) +svuint16_t svsel(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s8))) +svint8_t svsel(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f64))) +svfloat64_t svsel(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f32))) +svfloat32_t svsel(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_f16))) +svfloat16_t svsel(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s32))) +svint32_t svsel(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s64))) +svint64_t svsel(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsel_s16))) +svint16_t svsel(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u8))) +svuint8x2_t svset2(svuint8x2_t, uint64_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u32))) +svuint32x2_t svset2(svuint32x2_t, uint64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u64))) +svuint64x2_t svset2(svuint64x2_t, uint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_u16))) +svuint16x2_t svset2(svuint16x2_t, uint64_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s8))) +svint8x2_t svset2(svint8x2_t, uint64_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_f64))) +svfloat64x2_t svset2(svfloat64x2_t, uint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_f32))) +svfloat32x2_t svset2(svfloat32x2_t, uint64_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_f16))) +svfloat16x2_t svset2(svfloat16x2_t, uint64_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s32))) +svint32x2_t svset2(svint32x2_t, uint64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s64))) +svint64x2_t svset2(svint64x2_t, uint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset2_s16))) +svint16x2_t svset2(svint16x2_t, uint64_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u8))) +svuint8x3_t svset3(svuint8x3_t, uint64_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u32))) +svuint32x3_t svset3(svuint32x3_t, uint64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u64))) +svuint64x3_t svset3(svuint64x3_t, uint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_u16))) +svuint16x3_t svset3(svuint16x3_t, uint64_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s8))) +svint8x3_t svset3(svint8x3_t, uint64_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_f64))) +svfloat64x3_t svset3(svfloat64x3_t, uint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_f32))) +svfloat32x3_t svset3(svfloat32x3_t, uint64_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_f16))) +svfloat16x3_t svset3(svfloat16x3_t, uint64_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s32))) +svint32x3_t svset3(svint32x3_t, uint64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s64))) +svint64x3_t svset3(svint64x3_t, uint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset3_s16))) +svint16x3_t svset3(svint16x3_t, uint64_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u8))) +svuint8x4_t svset4(svuint8x4_t, uint64_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u32))) +svuint32x4_t svset4(svuint32x4_t, uint64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u64))) +svuint64x4_t svset4(svuint64x4_t, uint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_u16))) +svuint16x4_t svset4(svuint16x4_t, uint64_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s8))) +svint8x4_t svset4(svint8x4_t, uint64_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_f64))) +svfloat64x4_t svset4(svfloat64x4_t, uint64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_f32))) +svfloat32x4_t svset4(svfloat32x4_t, uint64_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_f16))) +svfloat16x4_t svset4(svfloat16x4_t, uint64_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s32))) +svint32x4_t svset4(svint32x4_t, uint64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s64))) +svint64x4_t svset4(svint64x4_t, uint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svset4_s16))) +svint16x4_t svset4(svint16x4_t, uint64_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u8))) +svuint8_t svsplice(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u32))) +svuint32_t svsplice(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u64))) +svuint64_t svsplice(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_u16))) +svuint16_t svsplice(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s8))) +svint8_t svsplice(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_f64))) +svfloat64_t svsplice(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_f32))) +svfloat32_t svsplice(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_f16))) +svfloat16_t svsplice(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s32))) +svint32_t svsplice(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s64))) +svint64_t svsplice(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsplice_s16))) +svint16_t svsplice(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f64_m))) +svfloat64_t svsqrt_m(svfloat64_t, svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f32_m))) +svfloat32_t svsqrt_m(svfloat32_t, svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f16_m))) +svfloat16_t svsqrt_m(svfloat16_t, svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f64_x))) +svfloat64_t svsqrt_x(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f32_x))) +svfloat32_t svsqrt_x(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f16_x))) +svfloat16_t svsqrt_x(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f64_z))) +svfloat64_t svsqrt_z(svbool_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f32_z))) +svfloat32_t svsqrt_z(svbool_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsqrt_f16_z))) +svfloat16_t svsqrt_z(svbool_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u8))) +void svst1(svbool_t, uint8_t *, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u32))) +void svst1(svbool_t, uint32_t *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u64))) +void svst1(svbool_t, uint64_t *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_u16))) +void svst1(svbool_t, uint16_t *, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s8))) +void svst1(svbool_t, int8_t *, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f64))) +void svst1(svbool_t, float64_t *, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f32))) +void svst1(svbool_t, float32_t *, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_f16))) +void svst1(svbool_t, float16_t *, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s32))) +void svst1(svbool_t, int32_t *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s64))) +void svst1(svbool_t, int64_t *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_s16))) +void svst1(svbool_t, int16_t *, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u8))) +void svst1_vnum(svbool_t, uint8_t *, int64_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u32))) +void svst1_vnum(svbool_t, uint32_t *, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u64))) +void svst1_vnum(svbool_t, uint64_t *, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_u16))) +void svst1_vnum(svbool_t, uint16_t *, int64_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s8))) +void svst1_vnum(svbool_t, int8_t *, int64_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f64))) +void svst1_vnum(svbool_t, float64_t *, int64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f32))) +void svst1_vnum(svbool_t, float32_t *, int64_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_f16))) +void svst1_vnum(svbool_t, float16_t *, int64_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s32))) +void svst1_vnum(svbool_t, int32_t *, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s64))) +void svst1_vnum(svbool_t, int64_t *, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1_vnum_s16))) +void svst1_vnum(svbool_t, int16_t *, int64_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_s32))) +void svst1b(svbool_t, int8_t *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_s64))) +void svst1b(svbool_t, int8_t *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_s16))) +void svst1b(svbool_t, int8_t *, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_u32))) +void svst1b(svbool_t, uint8_t *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_u64))) +void svst1b(svbool_t, uint8_t *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_u16))) +void svst1b(svbool_t, uint8_t *, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_s32))) +void svst1b_vnum(svbool_t, int8_t *, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_s64))) +void svst1b_vnum(svbool_t, int8_t *, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_s16))) +void svst1b_vnum(svbool_t, int8_t *, int64_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_u32))) +void svst1b_vnum(svbool_t, uint8_t *, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_u64))) +void svst1b_vnum(svbool_t, uint8_t *, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1b_vnum_u16))) +void svst1b_vnum(svbool_t, uint8_t *, int64_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_s32))) +void svst1h(svbool_t, int16_t *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_s64))) +void svst1h(svbool_t, int16_t *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_u32))) +void svst1h(svbool_t, uint16_t *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_u64))) +void svst1h(svbool_t, uint16_t *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_s32))) +void svst1h_vnum(svbool_t, int16_t *, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_s64))) +void svst1h_vnum(svbool_t, int16_t *, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_u32))) +void svst1h_vnum(svbool_t, uint16_t *, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1h_vnum_u64))) +void svst1h_vnum(svbool_t, uint16_t *, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_s64))) +void svst1w(svbool_t, int32_t *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_u64))) +void svst1w(svbool_t, uint32_t *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_vnum_s64))) +void svst1w_vnum(svbool_t, int32_t *, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst1w_vnum_u64))) +void svst1w_vnum(svbool_t, uint32_t *, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u8))) +void svst2(svbool_t, uint8_t *, svuint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u32))) +void svst2(svbool_t, uint32_t *, svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u64))) +void svst2(svbool_t, uint64_t *, svuint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_u16))) +void svst2(svbool_t, uint16_t *, svuint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s8))) +void svst2(svbool_t, int8_t *, svint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_f64))) +void svst2(svbool_t, float64_t *, svfloat64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_f32))) +void svst2(svbool_t, float32_t *, svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_f16))) +void svst2(svbool_t, float16_t *, svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s32))) +void svst2(svbool_t, int32_t *, svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s64))) +void svst2(svbool_t, int64_t *, svint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_s16))) +void svst2(svbool_t, int16_t *, svint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u8))) +void svst2_vnum(svbool_t, uint8_t *, int64_t, svuint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u32))) +void svst2_vnum(svbool_t, uint32_t *, int64_t, svuint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u64))) +void svst2_vnum(svbool_t, uint64_t *, int64_t, svuint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_u16))) +void svst2_vnum(svbool_t, uint16_t *, int64_t, svuint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s8))) +void svst2_vnum(svbool_t, int8_t *, int64_t, svint8x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_f64))) +void svst2_vnum(svbool_t, float64_t *, int64_t, svfloat64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_f32))) +void svst2_vnum(svbool_t, float32_t *, int64_t, svfloat32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_f16))) +void svst2_vnum(svbool_t, float16_t *, int64_t, svfloat16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s32))) +void svst2_vnum(svbool_t, int32_t *, int64_t, svint32x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s64))) +void svst2_vnum(svbool_t, int64_t *, int64_t, svint64x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst2_vnum_s16))) +void svst2_vnum(svbool_t, int16_t *, int64_t, svint16x2_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u8))) +void svst3(svbool_t, uint8_t *, svuint8x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u32))) +void svst3(svbool_t, uint32_t *, svuint32x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u64))) +void svst3(svbool_t, uint64_t *, svuint64x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_u16))) +void svst3(svbool_t, uint16_t *, svuint16x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s8))) +void svst3(svbool_t, int8_t *, svint8x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_f64))) +void svst3(svbool_t, float64_t *, svfloat64x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_f32))) +void svst3(svbool_t, float32_t *, svfloat32x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_f16))) +void svst3(svbool_t, float16_t *, svfloat16x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s32))) +void svst3(svbool_t, int32_t *, svint32x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s64))) +void svst3(svbool_t, int64_t *, svint64x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_s16))) +void svst3(svbool_t, int16_t *, svint16x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u8))) +void svst3_vnum(svbool_t, uint8_t *, int64_t, svuint8x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u32))) +void svst3_vnum(svbool_t, uint32_t *, int64_t, svuint32x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u64))) +void svst3_vnum(svbool_t, uint64_t *, int64_t, svuint64x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_u16))) +void svst3_vnum(svbool_t, uint16_t *, int64_t, svuint16x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s8))) +void svst3_vnum(svbool_t, int8_t *, int64_t, svint8x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_f64))) +void svst3_vnum(svbool_t, float64_t *, int64_t, svfloat64x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_f32))) +void svst3_vnum(svbool_t, float32_t *, int64_t, svfloat32x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_f16))) +void svst3_vnum(svbool_t, float16_t *, int64_t, svfloat16x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s32))) +void svst3_vnum(svbool_t, int32_t *, int64_t, svint32x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s64))) +void svst3_vnum(svbool_t, int64_t *, int64_t, svint64x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst3_vnum_s16))) +void svst3_vnum(svbool_t, int16_t *, int64_t, svint16x3_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u8))) +void svst4(svbool_t, uint8_t *, svuint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u32))) +void svst4(svbool_t, uint32_t *, svuint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u64))) +void svst4(svbool_t, uint64_t *, svuint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_u16))) +void svst4(svbool_t, uint16_t *, svuint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s8))) +void svst4(svbool_t, int8_t *, svint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_f64))) +void svst4(svbool_t, float64_t *, svfloat64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_f32))) +void svst4(svbool_t, float32_t *, svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_f16))) +void svst4(svbool_t, float16_t *, svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s32))) +void svst4(svbool_t, int32_t *, svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s64))) +void svst4(svbool_t, int64_t *, svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_s16))) +void svst4(svbool_t, int16_t *, svint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u8))) +void svst4_vnum(svbool_t, uint8_t *, int64_t, svuint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u32))) +void svst4_vnum(svbool_t, uint32_t *, int64_t, svuint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u64))) +void svst4_vnum(svbool_t, uint64_t *, int64_t, svuint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_u16))) +void svst4_vnum(svbool_t, uint16_t *, int64_t, svuint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s8))) +void svst4_vnum(svbool_t, int8_t *, int64_t, svint8x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_f64))) +void svst4_vnum(svbool_t, float64_t *, int64_t, svfloat64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_f32))) +void svst4_vnum(svbool_t, float32_t *, int64_t, svfloat32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_f16))) +void svst4_vnum(svbool_t, float16_t *, int64_t, svfloat16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s32))) +void svst4_vnum(svbool_t, int32_t *, int64_t, svint32x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s64))) +void svst4_vnum(svbool_t, int64_t *, int64_t, svint64x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svst4_vnum_s16))) +void svst4_vnum(svbool_t, int16_t *, int64_t, svint16x4_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u8))) +void svstnt1(svbool_t, uint8_t *, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u32))) +void svstnt1(svbool_t, uint32_t *, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u64))) +void svstnt1(svbool_t, uint64_t *, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_u16))) +void svstnt1(svbool_t, uint16_t *, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s8))) +void svstnt1(svbool_t, int8_t *, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f64))) +void svstnt1(svbool_t, float64_t *, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f32))) +void svstnt1(svbool_t, float32_t *, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_f16))) +void svstnt1(svbool_t, float16_t *, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s32))) +void svstnt1(svbool_t, int32_t *, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s64))) +void svstnt1(svbool_t, int64_t *, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_s16))) +void svstnt1(svbool_t, int16_t *, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u8))) +void svstnt1_vnum(svbool_t, uint8_t *, int64_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u32))) +void svstnt1_vnum(svbool_t, uint32_t *, int64_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u64))) +void svstnt1_vnum(svbool_t, uint64_t *, int64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_u16))) +void svstnt1_vnum(svbool_t, uint16_t *, int64_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s8))) +void svstnt1_vnum(svbool_t, int8_t *, int64_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f64))) +void svstnt1_vnum(svbool_t, float64_t *, int64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f32))) +void svstnt1_vnum(svbool_t, float32_t *, int64_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_f16))) +void svstnt1_vnum(svbool_t, float16_t *, int64_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s32))) +void svstnt1_vnum(svbool_t, int32_t *, int64_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s64))) +void svstnt1_vnum(svbool_t, int64_t *, int64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svstnt1_vnum_s16))) +void svstnt1_vnum(svbool_t, int16_t *, int64_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f64_m))) +svfloat64_t svsub_m(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f32_m))) +svfloat32_t svsub_m(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f16_m))) +svfloat16_t svsub_m(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f64_x))) +svfloat64_t svsub_x(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f32_x))) +svfloat32_t svsub_x(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f16_x))) +svfloat16_t svsub_x(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f64_z))) +svfloat64_t svsub_z(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f32_z))) +svfloat32_t svsub_z(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_f16_z))) +svfloat16_t svsub_z(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u8_m))) +svuint8_t svsub_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u32_m))) +svuint32_t svsub_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u64_m))) +svuint64_t svsub_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u16_m))) +svuint16_t svsub_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s8_m))) +svint8_t svsub_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s32_m))) +svint32_t svsub_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s64_m))) +svint64_t svsub_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s16_m))) +svint16_t svsub_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u8_x))) +svuint8_t svsub_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u32_x))) +svuint32_t svsub_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u64_x))) +svuint64_t svsub_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u16_x))) +svuint16_t svsub_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s8_x))) +svint8_t svsub_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s32_x))) +svint32_t svsub_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s64_x))) +svint64_t svsub_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s16_x))) +svint16_t svsub_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u8_z))) +svuint8_t svsub_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u32_z))) +svuint32_t svsub_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u64_z))) +svuint64_t svsub_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_u16_z))) +svuint16_t svsub_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s8_z))) +svint8_t svsub_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s32_z))) +svint32_t svsub_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s64_z))) +svint64_t svsub_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_n_s16_z))) +svint16_t svsub_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f64_m))) +svfloat64_t svsub_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f32_m))) +svfloat32_t svsub_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f16_m))) +svfloat16_t svsub_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f64_x))) +svfloat64_t svsub_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f32_x))) +svfloat32_t svsub_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f16_x))) +svfloat16_t svsub_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f64_z))) +svfloat64_t svsub_z(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f32_z))) +svfloat32_t svsub_z(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_f16_z))) +svfloat16_t svsub_z(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u8_m))) +svuint8_t svsub_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u32_m))) +svuint32_t svsub_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u64_m))) +svuint64_t svsub_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u16_m))) +svuint16_t svsub_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s8_m))) +svint8_t svsub_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s32_m))) +svint32_t svsub_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s64_m))) +svint64_t svsub_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s16_m))) +svint16_t svsub_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u8_x))) +svuint8_t svsub_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u32_x))) +svuint32_t svsub_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u64_x))) +svuint64_t svsub_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u16_x))) +svuint16_t svsub_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s8_x))) +svint8_t svsub_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s32_x))) +svint32_t svsub_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s64_x))) +svint64_t svsub_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s16_x))) +svint16_t svsub_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u8_z))) +svuint8_t svsub_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u32_z))) +svuint32_t svsub_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u64_z))) +svuint64_t svsub_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_u16_z))) +svuint16_t svsub_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s8_z))) +svint8_t svsub_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s32_z))) +svint32_t svsub_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s64_z))) +svint64_t svsub_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsub_s16_z))) +svint16_t svsub_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f64_m))) +svfloat64_t svsubr_m(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f32_m))) +svfloat32_t svsubr_m(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f16_m))) +svfloat16_t svsubr_m(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f64_x))) +svfloat64_t svsubr_x(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f32_x))) +svfloat32_t svsubr_x(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f16_x))) +svfloat16_t svsubr_x(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f64_z))) +svfloat64_t svsubr_z(svbool_t, svfloat64_t, float64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f32_z))) +svfloat32_t svsubr_z(svbool_t, svfloat32_t, float32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_f16_z))) +svfloat16_t svsubr_z(svbool_t, svfloat16_t, float16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u8_m))) +svuint8_t svsubr_m(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u32_m))) +svuint32_t svsubr_m(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u64_m))) +svuint64_t svsubr_m(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u16_m))) +svuint16_t svsubr_m(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s8_m))) +svint8_t svsubr_m(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s32_m))) +svint32_t svsubr_m(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s64_m))) +svint64_t svsubr_m(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s16_m))) +svint16_t svsubr_m(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u8_x))) +svuint8_t svsubr_x(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u32_x))) +svuint32_t svsubr_x(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u64_x))) +svuint64_t svsubr_x(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u16_x))) +svuint16_t svsubr_x(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s8_x))) +svint8_t svsubr_x(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s32_x))) +svint32_t svsubr_x(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s64_x))) +svint64_t svsubr_x(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s16_x))) +svint16_t svsubr_x(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u8_z))) +svuint8_t svsubr_z(svbool_t, svuint8_t, uint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u32_z))) +svuint32_t svsubr_z(svbool_t, svuint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u64_z))) +svuint64_t svsubr_z(svbool_t, svuint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_u16_z))) +svuint16_t svsubr_z(svbool_t, svuint16_t, uint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s8_z))) +svint8_t svsubr_z(svbool_t, svint8_t, int8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s32_z))) +svint32_t svsubr_z(svbool_t, svint32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s64_z))) +svint64_t svsubr_z(svbool_t, svint64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_n_s16_z))) +svint16_t svsubr_z(svbool_t, svint16_t, int16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f64_m))) +svfloat64_t svsubr_m(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f32_m))) +svfloat32_t svsubr_m(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f16_m))) +svfloat16_t svsubr_m(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f64_x))) +svfloat64_t svsubr_x(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f32_x))) +svfloat32_t svsubr_x(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f16_x))) +svfloat16_t svsubr_x(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f64_z))) +svfloat64_t svsubr_z(svbool_t, svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f32_z))) +svfloat32_t svsubr_z(svbool_t, svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_f16_z))) +svfloat16_t svsubr_z(svbool_t, svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u8_m))) +svuint8_t svsubr_m(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u32_m))) +svuint32_t svsubr_m(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u64_m))) +svuint64_t svsubr_m(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u16_m))) +svuint16_t svsubr_m(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s8_m))) +svint8_t svsubr_m(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s32_m))) +svint32_t svsubr_m(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s64_m))) +svint64_t svsubr_m(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s16_m))) +svint16_t svsubr_m(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u8_x))) +svuint8_t svsubr_x(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u32_x))) +svuint32_t svsubr_x(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u64_x))) +svuint64_t svsubr_x(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u16_x))) +svuint16_t svsubr_x(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s8_x))) +svint8_t svsubr_x(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s32_x))) +svint32_t svsubr_x(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s64_x))) +svint64_t svsubr_x(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s16_x))) +svint16_t svsubr_x(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u8_z))) +svuint8_t svsubr_z(svbool_t, svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u32_z))) +svuint32_t svsubr_z(svbool_t, svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u64_z))) +svuint64_t svsubr_z(svbool_t, svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_u16_z))) +svuint16_t svsubr_z(svbool_t, svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s8_z))) +svint8_t svsubr_z(svbool_t, svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s32_z))) +svint32_t svsubr_z(svbool_t, svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s64_z))) +svint64_t svsubr_z(svbool_t, svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svsubr_s16_z))) +svint16_t svsubr_z(svbool_t, svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u8))) +svuint8_t svtbl(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u32))) +svuint32_t svtbl(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u64))) +svuint64_t svtbl(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_u16))) +svuint16_t svtbl(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s8))) +svint8_t svtbl(svint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_f64))) +svfloat64_t svtbl(svfloat64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_f32))) +svfloat32_t svtbl(svfloat32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_f16))) +svfloat16_t svtbl(svfloat16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s32))) +svint32_t svtbl(svint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s64))) +svint64_t svtbl(svint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtbl_s16))) +svint16_t svtbl(svint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u8))) +svuint8_t svtrn1(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u32))) +svuint32_t svtrn1(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u64))) +svuint64_t svtrn1(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_u16))) +svuint16_t svtrn1(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s8))) +svint8_t svtrn1(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_f64))) +svfloat64_t svtrn1(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_f32))) +svfloat32_t svtrn1(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_f16))) +svfloat16_t svtrn1(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s32))) +svint32_t svtrn1(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s64))) +svint64_t svtrn1(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn1_s16))) +svint16_t svtrn1(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u8))) +svuint8_t svtrn2(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u32))) +svuint32_t svtrn2(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u64))) +svuint64_t svtrn2(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_u16))) +svuint16_t svtrn2(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s8))) +svint8_t svtrn2(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_f64))) +svfloat64_t svtrn2(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_f32))) +svfloat32_t svtrn2(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_f16))) +svfloat16_t svtrn2(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s32))) +svint32_t svtrn2(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s64))) +svint64_t svtrn2(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svtrn2_s16))) +svint16_t svtrn2(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_b))) +svbool_t svunpkhi(svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_s32))) +svint32_t svunpkhi(svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_s64))) +svint64_t svunpkhi(svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_s16))) +svint16_t svunpkhi(svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_u32))) +svuint32_t svunpkhi(svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_u64))) +svuint64_t svunpkhi(svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpkhi_u16))) +svuint16_t svunpkhi(svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_b))) +svbool_t svunpklo(svbool_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_s32))) +svint32_t svunpklo(svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_s64))) +svint64_t svunpklo(svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_s16))) +svint16_t svunpklo(svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_u32))) +svuint32_t svunpklo(svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_u64))) +svuint64_t svunpklo(svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svunpklo_u16))) +svuint16_t svunpklo(svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u8))) +svuint8_t svuzp1(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u32))) +svuint32_t svuzp1(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u64))) +svuint64_t svuzp1(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_u16))) +svuint16_t svuzp1(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s8))) +svint8_t svuzp1(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_f64))) +svfloat64_t svuzp1(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_f32))) +svfloat32_t svuzp1(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_f16))) +svfloat16_t svuzp1(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s32))) +svint32_t svuzp1(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s64))) +svint64_t svuzp1(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp1_s16))) +svint16_t svuzp1(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u8))) +svuint8_t svuzp2(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u32))) +svuint32_t svuzp2(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u64))) +svuint64_t svuzp2(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_u16))) +svuint16_t svuzp2(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s8))) +svint8_t svuzp2(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_f64))) +svfloat64_t svuzp2(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_f32))) +svfloat32_t svuzp2(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_f16))) +svfloat16_t svuzp2(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s32))) +svint32_t svuzp2(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s64))) +svint64_t svuzp2(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svuzp2_s16))) +svint16_t svuzp2(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_s32))) +svbool_t svwhilele_b8(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_s32))) +svbool_t svwhilele_b32(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_s32))) +svbool_t svwhilele_b64(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_s32))) +svbool_t svwhilele_b16(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_s64))) +svbool_t svwhilele_b8(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_s64))) +svbool_t svwhilele_b32(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_s64))) +svbool_t svwhilele_b64(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_s64))) +svbool_t svwhilele_b16(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_u32))) +svbool_t svwhilele_b8(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_u32))) +svbool_t svwhilele_b32(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_u32))) +svbool_t svwhilele_b64(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_u32))) +svbool_t svwhilele_b16(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b8_u64))) +svbool_t svwhilele_b8(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b32_u64))) +svbool_t svwhilele_b32(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b64_u64))) +svbool_t svwhilele_b64(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilele_b16_u64))) +svbool_t svwhilele_b16(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_u32))) +svbool_t svwhilelt_b8(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_u32))) +svbool_t svwhilelt_b32(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_u32))) +svbool_t svwhilelt_b64(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_u32))) +svbool_t svwhilelt_b16(uint32_t, uint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_u64))) +svbool_t svwhilelt_b8(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_u64))) +svbool_t svwhilelt_b32(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_u64))) +svbool_t svwhilelt_b64(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_u64))) +svbool_t svwhilelt_b16(uint64_t, uint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_s32))) +svbool_t svwhilelt_b8(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_s32))) +svbool_t svwhilelt_b32(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_s32))) +svbool_t svwhilelt_b64(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_s32))) +svbool_t svwhilelt_b16(int32_t, int32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b8_s64))) +svbool_t svwhilelt_b8(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b32_s64))) +svbool_t svwhilelt_b32(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b64_s64))) +svbool_t svwhilelt_b64(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svwhilelt_b16_s64))) +svbool_t svwhilelt_b16(int64_t, int64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u8))) +svuint8_t svzip1(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u32))) +svuint32_t svzip1(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u64))) +svuint64_t svzip1(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_u16))) +svuint16_t svzip1(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s8))) +svint8_t svzip1(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_f64))) +svfloat64_t svzip1(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_f32))) +svfloat32_t svzip1(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_f16))) +svfloat16_t svzip1(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s32))) +svint32_t svzip1(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s64))) +svint64_t svzip1(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip1_s16))) +svint16_t svzip1(svint16_t, svint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u8))) +svuint8_t svzip2(svuint8_t, svuint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u32))) +svuint32_t svzip2(svuint32_t, svuint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u64))) +svuint64_t svzip2(svuint64_t, svuint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_u16))) +svuint16_t svzip2(svuint16_t, svuint16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s8))) +svint8_t svzip2(svint8_t, svint8_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_f64))) +svfloat64_t svzip2(svfloat64_t, svfloat64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_f32))) +svfloat32_t svzip2(svfloat32_t, svfloat32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_f16))) +svfloat16_t svzip2(svfloat16_t, svfloat16_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s32))) +svint32_t svzip2(svint32_t, svint32_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s64))) +svint64_t svzip2(svint64_t, svint64_t); +__aio __attribute__((__clang_arm_builtin_alias(__builtin_sve_svzip2_s16))) +svint16_t svzip2(svint16_t, svint16_t); #define svcvtnt_bf16_x svcvtnt_bf16_m #define svcvtnt_bf16_f32_x svcvtnt_bf16_f32_m #define svcvtnt_f16_x svcvtnt_f16_m diff --git a/lib/include/arm_vector_types.h b/lib/include/arm_vector_types.h index b0dd66b07f9b..8e79d39a6041 100644 --- a/lib/include/arm_vector_types.h +++ b/lib/include/arm_vector_types.h @@ -16,7 +16,7 @@ #define __ARM_NEON_TYPES_H typedef float float32_t; typedef __fp16 float16_t; -#ifdef __aarch64__ +#if defined(__aarch64__) || defined(__arm64ec__) typedef double float64_t; #endif @@ -40,7 +40,7 @@ typedef __attribute__((neon_vector_type(4))) float16_t float16x4_t; typedef __attribute__((neon_vector_type(8))) float16_t float16x8_t; typedef __attribute__((neon_vector_type(2))) float32_t float32x2_t; typedef __attribute__((neon_vector_type(4))) float32_t float32x4_t; -#ifdef __aarch64__ +#if defined(__aarch64__) || defined(__arm64ec__) typedef __attribute__((neon_vector_type(1))) float64_t float64x1_t; typedef __attribute__((neon_vector_type(2))) float64_t float64x2_t; #endif @@ -125,7 +125,7 @@ typedef struct float32x4x2_t { float32x4_t val[2]; } float32x4x2_t; -#ifdef __aarch64__ +#if defined(__aarch64__) || defined(__arm64ec__) typedef struct float64x1x2_t { float64x1_t val[2]; } float64x1x2_t; @@ -215,7 +215,7 @@ typedef struct float32x4x3_t { float32x4_t val[3]; } float32x4x3_t; -#ifdef __aarch64__ +#if defined(__aarch64__) || defined(__arm64ec__) typedef struct float64x1x3_t { float64x1_t val[3]; } float64x1x3_t; @@ -305,7 +305,7 @@ typedef struct float32x4x4_t { float32x4_t val[4]; } float32x4x4_t; -#ifdef __aarch64__ +#if defined(__aarch64__) || defined(__arm64ec__) typedef struct float64x1x4_t { float64x1_t val[4]; } float64x1x4_t; diff --git a/lib/include/avx512erintrin.h b/lib/include/avx512erintrin.h deleted file mode 100644 index 1c5a2d2d208f..000000000000 --- a/lib/include/avx512erintrin.h +++ /dev/null @@ -1,271 +0,0 @@ -/*===---- avx512erintrin.h - AVX512ER intrinsics ---------------------------=== - * - * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. - * See https://llvm.org/LICENSE.txt for license information. - * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception - * - *===-----------------------------------------------------------------------=== - */ -#ifndef __IMMINTRIN_H -#error "Never use directly; include instead." -#endif - -#ifndef __AVX512ERINTRIN_H -#define __AVX512ERINTRIN_H - -/* exp2a23 */ -#define _mm512_exp2a23_round_pd(A, R) \ - ((__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \ - (__v8df)_mm512_setzero_pd(), \ - (__mmask8)-1, (int)(R))) - -#define _mm512_mask_exp2a23_round_pd(S, M, A, R) \ - ((__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \ - (__v8df)(__m512d)(S), (__mmask8)(M), \ - (int)(R))) - -#define _mm512_maskz_exp2a23_round_pd(M, A, R) \ - ((__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \ - (__v8df)_mm512_setzero_pd(), \ - (__mmask8)(M), (int)(R))) - -#define _mm512_exp2a23_pd(A) \ - _mm512_exp2a23_round_pd((A), _MM_FROUND_CUR_DIRECTION) - -#define _mm512_mask_exp2a23_pd(S, M, A) \ - _mm512_mask_exp2a23_round_pd((S), (M), (A), _MM_FROUND_CUR_DIRECTION) - -#define _mm512_maskz_exp2a23_pd(M, A) \ - _mm512_maskz_exp2a23_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION) - -#define _mm512_exp2a23_round_ps(A, R) \ - ((__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \ - (__v16sf)_mm512_setzero_ps(), \ - (__mmask16)-1, (int)(R))) - -#define _mm512_mask_exp2a23_round_ps(S, M, A, R) \ - ((__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \ - (__v16sf)(__m512)(S), (__mmask16)(M), \ - (int)(R))) - -#define _mm512_maskz_exp2a23_round_ps(M, A, R) \ - ((__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \ - (__v16sf)_mm512_setzero_ps(), \ - (__mmask16)(M), (int)(R))) - -#define _mm512_exp2a23_ps(A) \ - _mm512_exp2a23_round_ps((A), _MM_FROUND_CUR_DIRECTION) - -#define _mm512_mask_exp2a23_ps(S, M, A) \ - _mm512_mask_exp2a23_round_ps((S), (M), (A), _MM_FROUND_CUR_DIRECTION) - -#define _mm512_maskz_exp2a23_ps(M, A) \ - _mm512_maskz_exp2a23_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION) - -/* rsqrt28 */ -#define _mm512_rsqrt28_round_pd(A, R) \ - ((__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \ - (__v8df)_mm512_setzero_pd(), \ - (__mmask8)-1, (int)(R))) - -#define _mm512_mask_rsqrt28_round_pd(S, M, A, R) \ - ((__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \ - (__v8df)(__m512d)(S), (__mmask8)(M), \ - (int)(R))) - -#define _mm512_maskz_rsqrt28_round_pd(M, A, R) \ - ((__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \ - (__v8df)_mm512_setzero_pd(), \ - (__mmask8)(M), (int)(R))) - -#define _mm512_rsqrt28_pd(A) \ - _mm512_rsqrt28_round_pd((A), _MM_FROUND_CUR_DIRECTION) - -#define _mm512_mask_rsqrt28_pd(S, M, A) \ - _mm512_mask_rsqrt28_round_pd((S), (M), (A), _MM_FROUND_CUR_DIRECTION) - -#define _mm512_maskz_rsqrt28_pd(M, A) \ - _mm512_maskz_rsqrt28_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION) - -#define _mm512_rsqrt28_round_ps(A, R) \ - ((__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \ - (__v16sf)_mm512_setzero_ps(), \ - (__mmask16)-1, (int)(R))) - -#define _mm512_mask_rsqrt28_round_ps(S, M, A, R) \ - ((__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \ - (__v16sf)(__m512)(S), (__mmask16)(M), \ - (int)(R))) - -#define _mm512_maskz_rsqrt28_round_ps(M, A, R) \ - ((__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \ - (__v16sf)_mm512_setzero_ps(), \ - (__mmask16)(M), (int)(R))) - -#define _mm512_rsqrt28_ps(A) \ - _mm512_rsqrt28_round_ps((A), _MM_FROUND_CUR_DIRECTION) - -#define _mm512_mask_rsqrt28_ps(S, M, A) \ - _mm512_mask_rsqrt28_round_ps((S), (M), A, _MM_FROUND_CUR_DIRECTION) - -#define _mm512_maskz_rsqrt28_ps(M, A) \ - _mm512_maskz_rsqrt28_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION) - -#define _mm_rsqrt28_round_ss(A, B, R) \ - ((__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \ - (__v4sf)(__m128)(B), \ - (__v4sf)_mm_setzero_ps(), \ - (__mmask8)-1, (int)(R))) - -#define _mm_mask_rsqrt28_round_ss(S, M, A, B, R) \ - ((__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \ - (__v4sf)(__m128)(B), \ - (__v4sf)(__m128)(S), \ - (__mmask8)(M), (int)(R))) - -#define _mm_maskz_rsqrt28_round_ss(M, A, B, R) \ - ((__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \ - (__v4sf)(__m128)(B), \ - (__v4sf)_mm_setzero_ps(), \ - (__mmask8)(M), (int)(R))) - -#define _mm_rsqrt28_ss(A, B) \ - _mm_rsqrt28_round_ss((A), (B), _MM_FROUND_CUR_DIRECTION) - -#define _mm_mask_rsqrt28_ss(S, M, A, B) \ - _mm_mask_rsqrt28_round_ss((S), (M), (A), (B), _MM_FROUND_CUR_DIRECTION) - -#define _mm_maskz_rsqrt28_ss(M, A, B) \ - _mm_maskz_rsqrt28_round_ss((M), (A), (B), _MM_FROUND_CUR_DIRECTION) - -#define _mm_rsqrt28_round_sd(A, B, R) \ - ((__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \ - (__v2df)(__m128d)(B), \ - (__v2df)_mm_setzero_pd(), \ - (__mmask8)-1, (int)(R))) - -#define _mm_mask_rsqrt28_round_sd(S, M, A, B, R) \ - ((__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \ - (__v2df)(__m128d)(B), \ - (__v2df)(__m128d)(S), \ - (__mmask8)(M), (int)(R))) - -#define _mm_maskz_rsqrt28_round_sd(M, A, B, R) \ - ((__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \ - (__v2df)(__m128d)(B), \ - (__v2df)_mm_setzero_pd(), \ - (__mmask8)(M), (int)(R))) - -#define _mm_rsqrt28_sd(A, B) \ - _mm_rsqrt28_round_sd((A), (B), _MM_FROUND_CUR_DIRECTION) - -#define _mm_mask_rsqrt28_sd(S, M, A, B) \ - _mm_mask_rsqrt28_round_sd((S), (M), (A), (B), _MM_FROUND_CUR_DIRECTION) - -#define _mm_maskz_rsqrt28_sd(M, A, B) \ - _mm_maskz_rsqrt28_round_sd((M), (A), (B), _MM_FROUND_CUR_DIRECTION) - -/* rcp28 */ -#define _mm512_rcp28_round_pd(A, R) \ - ((__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \ - (__v8df)_mm512_setzero_pd(), \ - (__mmask8)-1, (int)(R))) - -#define _mm512_mask_rcp28_round_pd(S, M, A, R) \ - ((__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \ - (__v8df)(__m512d)(S), (__mmask8)(M), \ - (int)(R))) - -#define _mm512_maskz_rcp28_round_pd(M, A, R) \ - ((__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \ - (__v8df)_mm512_setzero_pd(), \ - (__mmask8)(M), (int)(R))) - -#define _mm512_rcp28_pd(A) \ - _mm512_rcp28_round_pd((A), _MM_FROUND_CUR_DIRECTION) - -#define _mm512_mask_rcp28_pd(S, M, A) \ - _mm512_mask_rcp28_round_pd((S), (M), (A), _MM_FROUND_CUR_DIRECTION) - -#define _mm512_maskz_rcp28_pd(M, A) \ - _mm512_maskz_rcp28_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION) - -#define _mm512_rcp28_round_ps(A, R) \ - ((__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \ - (__v16sf)_mm512_setzero_ps(), \ - (__mmask16)-1, (int)(R))) - -#define _mm512_mask_rcp28_round_ps(S, M, A, R) \ - ((__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \ - (__v16sf)(__m512)(S), (__mmask16)(M), \ - (int)(R))) - -#define _mm512_maskz_rcp28_round_ps(M, A, R) \ - ((__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \ - (__v16sf)_mm512_setzero_ps(), \ - (__mmask16)(M), (int)(R))) - -#define _mm512_rcp28_ps(A) \ - _mm512_rcp28_round_ps((A), _MM_FROUND_CUR_DIRECTION) - -#define _mm512_mask_rcp28_ps(S, M, A) \ - _mm512_mask_rcp28_round_ps((S), (M), (A), _MM_FROUND_CUR_DIRECTION) - -#define _mm512_maskz_rcp28_ps(M, A) \ - _mm512_maskz_rcp28_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION) - -#define _mm_rcp28_round_ss(A, B, R) \ - ((__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \ - (__v4sf)(__m128)(B), \ - (__v4sf)_mm_setzero_ps(), \ - (__mmask8)-1, (int)(R))) - -#define _mm_mask_rcp28_round_ss(S, M, A, B, R) \ - ((__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \ - (__v4sf)(__m128)(B), \ - (__v4sf)(__m128)(S), \ - (__mmask8)(M), (int)(R))) - -#define _mm_maskz_rcp28_round_ss(M, A, B, R) \ - ((__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \ - (__v4sf)(__m128)(B), \ - (__v4sf)_mm_setzero_ps(), \ - (__mmask8)(M), (int)(R))) - -#define _mm_rcp28_ss(A, B) \ - _mm_rcp28_round_ss((A), (B), _MM_FROUND_CUR_DIRECTION) - -#define _mm_mask_rcp28_ss(S, M, A, B) \ - _mm_mask_rcp28_round_ss((S), (M), (A), (B), _MM_FROUND_CUR_DIRECTION) - -#define _mm_maskz_rcp28_ss(M, A, B) \ - _mm_maskz_rcp28_round_ss((M), (A), (B), _MM_FROUND_CUR_DIRECTION) - -#define _mm_rcp28_round_sd(A, B, R) \ - ((__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \ - (__v2df)(__m128d)(B), \ - (__v2df)_mm_setzero_pd(), \ - (__mmask8)-1, (int)(R))) - -#define _mm_mask_rcp28_round_sd(S, M, A, B, R) \ - ((__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \ - (__v2df)(__m128d)(B), \ - (__v2df)(__m128d)(S), \ - (__mmask8)(M), (int)(R))) - -#define _mm_maskz_rcp28_round_sd(M, A, B, R) \ - ((__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \ - (__v2df)(__m128d)(B), \ - (__v2df)_mm_setzero_pd(), \ - (__mmask8)(M), (int)(R))) - -#define _mm_rcp28_sd(A, B) \ - _mm_rcp28_round_sd((A), (B), _MM_FROUND_CUR_DIRECTION) - -#define _mm_mask_rcp28_sd(S, M, A, B) \ - _mm_mask_rcp28_round_sd((S), (M), (A), (B), _MM_FROUND_CUR_DIRECTION) - -#define _mm_maskz_rcp28_sd(M, A, B) \ - _mm_maskz_rcp28_round_sd((M), (A), (B), _MM_FROUND_CUR_DIRECTION) - -#endif /* __AVX512ERINTRIN_H */ diff --git a/lib/include/avx512fp16intrin.h b/lib/include/avx512fp16intrin.h index 4123f10c3951..e136aa14a194 100644 --- a/lib/include/avx512fp16intrin.h +++ b/lib/include/avx512fp16intrin.h @@ -96,8 +96,8 @@ _mm512_set_ph(_Float16 __h1, _Float16 __h2, _Float16 __h3, _Float16 __h4, (h5), (h4), (h3), (h2), (h1)) static __inline __m512h __DEFAULT_FN_ATTRS512 -_mm512_set1_pch(_Float16 _Complex h) { - return (__m512h)_mm512_set1_ps(__builtin_bit_cast(float, h)); +_mm512_set1_pch(_Float16 _Complex __h) { + return (__m512h)_mm512_set1_ps(__builtin_bit_cast(float, __h)); } static __inline__ __m128 __DEFAULT_FN_ATTRS128 _mm_castph_ps(__m128h __a) { @@ -282,75 +282,75 @@ _mm512_zextph256_ph512(__m256h __a) { #define _mm_comi_sh(A, B, pred) \ _mm_comi_round_sh((A), (B), (pred), _MM_FROUND_CUR_DIRECTION) -static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comieq_sh(__m128h A, - __m128h B) { - return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_EQ_OS, +static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comieq_sh(__m128h __A, + __m128h __B) { + return __builtin_ia32_vcomish((__v8hf)__A, (__v8hf)__B, _CMP_EQ_OS, _MM_FROUND_CUR_DIRECTION); } -static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comilt_sh(__m128h A, - __m128h B) { - return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_LT_OS, +static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comilt_sh(__m128h __A, + __m128h __B) { + return __builtin_ia32_vcomish((__v8hf)__A, (__v8hf)__B, _CMP_LT_OS, _MM_FROUND_CUR_DIRECTION); } -static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comile_sh(__m128h A, - __m128h B) { - return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_LE_OS, +static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comile_sh(__m128h __A, + __m128h __B) { + return __builtin_ia32_vcomish((__v8hf)__A, (__v8hf)__B, _CMP_LE_OS, _MM_FROUND_CUR_DIRECTION); } -static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comigt_sh(__m128h A, - __m128h B) { - return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_GT_OS, +static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comigt_sh(__m128h __A, + __m128h __B) { + return __builtin_ia32_vcomish((__v8hf)__A, (__v8hf)__B, _CMP_GT_OS, _MM_FROUND_CUR_DIRECTION); } -static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comige_sh(__m128h A, - __m128h B) { - return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_GE_OS, +static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comige_sh(__m128h __A, + __m128h __B) { + return __builtin_ia32_vcomish((__v8hf)__A, (__v8hf)__B, _CMP_GE_OS, _MM_FROUND_CUR_DIRECTION); } -static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comineq_sh(__m128h A, - __m128h B) { - return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_NEQ_US, +static __inline__ int __DEFAULT_FN_ATTRS128 _mm_comineq_sh(__m128h __A, + __m128h __B) { + return __builtin_ia32_vcomish((__v8hf)__A, (__v8hf)__B, _CMP_NEQ_US, _MM_FROUND_CUR_DIRECTION); } -static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomieq_sh(__m128h A, - __m128h B) { - return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_EQ_OQ, +static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomieq_sh(__m128h __A, + __m128h __B) { + return __builtin_ia32_vcomish((__v8hf)__A, (__v8hf)__B, _CMP_EQ_OQ, _MM_FROUND_CUR_DIRECTION); } -static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomilt_sh(__m128h A, - __m128h B) { - return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_LT_OQ, +static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomilt_sh(__m128h __A, + __m128h __B) { + return __builtin_ia32_vcomish((__v8hf)__A, (__v8hf)__B, _CMP_LT_OQ, _MM_FROUND_CUR_DIRECTION); } -static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomile_sh(__m128h A, - __m128h B) { - return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_LE_OQ, +static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomile_sh(__m128h __A, + __m128h __B) { + return __builtin_ia32_vcomish((__v8hf)__A, (__v8hf)__B, _CMP_LE_OQ, _MM_FROUND_CUR_DIRECTION); } -static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomigt_sh(__m128h A, - __m128h B) { - return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_GT_OQ, +static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomigt_sh(__m128h __A, + __m128h __B) { + return __builtin_ia32_vcomish((__v8hf)__A, (__v8hf)__B, _CMP_GT_OQ, _MM_FROUND_CUR_DIRECTION); } -static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomige_sh(__m128h A, - __m128h B) { - return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_GE_OQ, +static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomige_sh(__m128h __A, + __m128h __B) { + return __builtin_ia32_vcomish((__v8hf)__A, (__v8hf)__B, _CMP_GE_OQ, _MM_FROUND_CUR_DIRECTION); } -static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomineq_sh(__m128h A, - __m128h B) { - return __builtin_ia32_vcomish((__v8hf)A, (__v8hf)B, _CMP_NEQ_UQ, +static __inline__ int __DEFAULT_FN_ATTRS128 _mm_ucomineq_sh(__m128h __A, + __m128h __B) { + return __builtin_ia32_vcomish((__v8hf)__A, (__v8hf)__B, _CMP_NEQ_UQ, _MM_FROUND_CUR_DIRECTION); } diff --git a/lib/include/avx512pfintrin.h b/lib/include/avx512pfintrin.h deleted file mode 100644 index f853be021a2d..000000000000 --- a/lib/include/avx512pfintrin.h +++ /dev/null @@ -1,92 +0,0 @@ -/*===------------- avx512pfintrin.h - PF intrinsics ------------------------=== - * - * - * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. - * See https://llvm.org/LICENSE.txt for license information. - * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception - * - *===-----------------------------------------------------------------------=== - */ -#ifndef __IMMINTRIN_H -#error "Never use directly; include instead." -#endif - -#ifndef __AVX512PFINTRIN_H -#define __AVX512PFINTRIN_H - -#define _mm512_mask_prefetch_i32gather_pd(index, mask, addr, scale, hint) \ - __builtin_ia32_gatherpfdpd((__mmask8)(mask), (__v8si)(__m256i)(index), \ - (void const *)(addr), (int)(scale), \ - (int)(hint)) - -#define _mm512_prefetch_i32gather_pd(index, addr, scale, hint) \ - __builtin_ia32_gatherpfdpd((__mmask8) -1, (__v8si)(__m256i)(index), \ - (void const *)(addr), (int)(scale), \ - (int)(hint)) - -#define _mm512_mask_prefetch_i32gather_ps(index, mask, addr, scale, hint) \ - __builtin_ia32_gatherpfdps((__mmask16)(mask), \ - (__v16si)(__m512i)(index), (void const *)(addr), \ - (int)(scale), (int)(hint)) - -#define _mm512_prefetch_i32gather_ps(index, addr, scale, hint) \ - __builtin_ia32_gatherpfdps((__mmask16) -1, \ - (__v16si)(__m512i)(index), (void const *)(addr), \ - (int)(scale), (int)(hint)) - -#define _mm512_mask_prefetch_i64gather_pd(index, mask, addr, scale, hint) \ - __builtin_ia32_gatherpfqpd((__mmask8)(mask), (__v8di)(__m512i)(index), \ - (void const *)(addr), (int)(scale), \ - (int)(hint)) - -#define _mm512_prefetch_i64gather_pd(index, addr, scale, hint) \ - __builtin_ia32_gatherpfqpd((__mmask8) -1, (__v8di)(__m512i)(index), \ - (void const *)(addr), (int)(scale), \ - (int)(hint)) - -#define _mm512_mask_prefetch_i64gather_ps(index, mask, addr, scale, hint) \ - __builtin_ia32_gatherpfqps((__mmask8)(mask), (__v8di)(__m512i)(index), \ - (void const *)(addr), (int)(scale), (int)(hint)) - -#define _mm512_prefetch_i64gather_ps(index, addr, scale, hint) \ - __builtin_ia32_gatherpfqps((__mmask8) -1, (__v8di)(__m512i)(index), \ - (void const *)(addr), (int)(scale), (int)(hint)) - -#define _mm512_prefetch_i32scatter_pd(addr, index, scale, hint) \ - __builtin_ia32_scatterpfdpd((__mmask8)-1, (__v8si)(__m256i)(index), \ - (void *)(addr), (int)(scale), \ - (int)(hint)) - -#define _mm512_mask_prefetch_i32scatter_pd(addr, mask, index, scale, hint) \ - __builtin_ia32_scatterpfdpd((__mmask8)(mask), (__v8si)(__m256i)(index), \ - (void *)(addr), (int)(scale), \ - (int)(hint)) - -#define _mm512_prefetch_i32scatter_ps(addr, index, scale, hint) \ - __builtin_ia32_scatterpfdps((__mmask16)-1, (__v16si)(__m512i)(index), \ - (void *)(addr), (int)(scale), (int)(hint)) - -#define _mm512_mask_prefetch_i32scatter_ps(addr, mask, index, scale, hint) \ - __builtin_ia32_scatterpfdps((__mmask16)(mask), \ - (__v16si)(__m512i)(index), (void *)(addr), \ - (int)(scale), (int)(hint)) - -#define _mm512_prefetch_i64scatter_pd(addr, index, scale, hint) \ - __builtin_ia32_scatterpfqpd((__mmask8)-1, (__v8di)(__m512i)(index), \ - (void *)(addr), (int)(scale), \ - (int)(hint)) - -#define _mm512_mask_prefetch_i64scatter_pd(addr, mask, index, scale, hint) \ - __builtin_ia32_scatterpfqpd((__mmask8)(mask), (__v8di)(__m512i)(index), \ - (void *)(addr), (int)(scale), \ - (int)(hint)) - -#define _mm512_prefetch_i64scatter_ps(addr, index, scale, hint) \ - __builtin_ia32_scatterpfqps((__mmask8)-1, (__v8di)(__m512i)(index), \ - (void *)(addr), (int)(scale), (int)(hint)) - -#define _mm512_mask_prefetch_i64scatter_ps(addr, mask, index, scale, hint) \ - __builtin_ia32_scatterpfqps((__mmask8)(mask), (__v8di)(__m512i)(index), \ - (void *)(addr), (int)(scale), (int)(hint)) - -#endif diff --git a/lib/include/avxintrin.h b/lib/include/avxintrin.h index f116d8bc3a94..4983f3311370 100644 --- a/lib/include/avxintrin.h +++ b/lib/include/avxintrin.h @@ -207,6 +207,8 @@ _mm256_div_ps(__m256 __a, __m256 __b) /// Compares two 256-bit vectors of [4 x double] and returns the greater /// of each pair of values. /// +/// If either value in a comparison is NaN, returns the value from \a __b. +/// /// \headerfile /// /// This intrinsic corresponds to the VMAXPD instruction. @@ -226,6 +228,8 @@ _mm256_max_pd(__m256d __a, __m256d __b) /// Compares two 256-bit vectors of [8 x float] and returns the greater /// of each pair of values. /// +/// If either value in a comparison is NaN, returns the value from \a __b. +/// /// \headerfile /// /// This intrinsic corresponds to the VMAXPS instruction. @@ -245,6 +249,8 @@ _mm256_max_ps(__m256 __a, __m256 __b) /// Compares two 256-bit vectors of [4 x double] and returns the lesser /// of each pair of values. /// +/// If either value in a comparison is NaN, returns the value from \a __b. +/// /// \headerfile /// /// This intrinsic corresponds to the VMINPD instruction. @@ -264,6 +270,8 @@ _mm256_min_pd(__m256d __a, __m256d __b) /// Compares two 256-bit vectors of [8 x float] and returns the lesser /// of each pair of values. /// +/// If either value in a comparison is NaN, returns the value from \a __b. +/// /// \headerfile /// /// This intrinsic corresponds to the VMINPS instruction. @@ -832,6 +840,7 @@ _mm256_permutevar_pd(__m256d __a, __m256i __c) /// Copies the values stored in a 128-bit vector of [4 x float] as /// specified by the 128-bit integer vector operand. +/// /// \headerfile /// /// This intrinsic corresponds to the VPERMILPS instruction. @@ -1574,14 +1583,6 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c) (__v4df)(__m256d)(b), (int)(mask))) /* Compare */ -#define _CMP_EQ_OQ 0x00 /* Equal (ordered, non-signaling) */ -#define _CMP_LT_OS 0x01 /* Less-than (ordered, signaling) */ -#define _CMP_LE_OS 0x02 /* Less-than-or-equal (ordered, signaling) */ -#define _CMP_UNORD_Q 0x03 /* Unordered (non-signaling) */ -#define _CMP_NEQ_UQ 0x04 /* Not-equal (unordered, non-signaling) */ -#define _CMP_NLT_US 0x05 /* Not-less-than (unordered, signaling) */ -#define _CMP_NLE_US 0x06 /* Not-less-than-or-equal (unordered, signaling) */ -#define _CMP_ORD_Q 0x07 /* Ordered (non-signaling) */ #define _CMP_EQ_UQ 0x08 /* Equal (unordered, non-signaling) */ #define _CMP_NGE_US 0x09 /* Not-greater-than-or-equal (unordered, signaling) */ #define _CMP_NGT_US 0x0a /* Not-greater-than (unordered, signaling) */ @@ -1607,13 +1608,14 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c) #define _CMP_GT_OQ 0x1e /* Greater-than (ordered, non-signaling) */ #define _CMP_TRUE_US 0x1f /* True (unordered, signaling) */ +/* Below intrinsic defined in emmintrin.h can be used for AVX */ /// Compares each of the corresponding double-precision values of two /// 128-bit vectors of [2 x double], using the operation specified by the /// immediate integer operand. /// -/// Returns a [2 x double] vector consisting of two doubles corresponding to -/// the two comparison results: zero if the comparison is false, and all 1's -/// if the comparison is true. +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, comparisons that are ordered +/// return false, and comparisons that are unordered return true. /// /// \headerfile /// @@ -1663,17 +1665,16 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c) /// 0x1E: Greater-than (ordered, non-signaling) \n /// 0x1F: True (unordered, signaling) /// \returns A 128-bit vector of [2 x double] containing the comparison results. -#define _mm_cmp_pd(a, b, c) \ - ((__m128d)__builtin_ia32_cmppd((__v2df)(__m128d)(a), \ - (__v2df)(__m128d)(b), (c))) +/// \fn __m128d _mm_cmp_pd(__m128d a, __m128d b, const int c) +/* Below intrinsic defined in xmmintrin.h can be used for AVX */ /// Compares each of the corresponding values of two 128-bit vectors of /// [4 x float], using the operation specified by the immediate integer /// operand. /// -/// Returns a [4 x float] vector consisting of four floats corresponding to -/// the four comparison results: zero if the comparison is false, and all 1's -/// if the comparison is true. +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true. +/// If either value in a comparison is NaN, comparisons that are ordered +/// return false, and comparisons that are unordered return true. /// /// \headerfile /// @@ -1723,17 +1724,15 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c) /// 0x1E: Greater-than (ordered, non-signaling) \n /// 0x1F: True (unordered, signaling) /// \returns A 128-bit vector of [4 x float] containing the comparison results. -#define _mm_cmp_ps(a, b, c) \ - ((__m128)__builtin_ia32_cmpps((__v4sf)(__m128)(a), \ - (__v4sf)(__m128)(b), (c))) +/// \fn __m128 _mm_cmp_ps(__m128 a, __m128 b, const int c) /// Compares each of the corresponding double-precision values of two /// 256-bit vectors of [4 x double], using the operation specified by the /// immediate integer operand. /// -/// Returns a [4 x double] vector consisting of four doubles corresponding to -/// the four comparison results: zero if the comparison is false, and all 1's -/// if the comparison is true. +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, comparisons that are ordered +/// return false, and comparisons that are unordered return true. /// /// \headerfile /// @@ -1791,9 +1790,9 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c) /// [8 x float], using the operation specified by the immediate integer /// operand. /// -/// Returns a [8 x float] vector consisting of eight floats corresponding to -/// the eight comparison results: zero if the comparison is false, and all -/// 1's if the comparison is true. +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true. +/// If either value in a comparison is NaN, comparisons that are ordered +/// return false, and comparisons that are unordered return true. /// /// \headerfile /// @@ -1847,12 +1846,14 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c) ((__m256)__builtin_ia32_cmpps256((__v8sf)(__m256)(a), \ (__v8sf)(__m256)(b), (c))) +/* Below intrinsic defined in emmintrin.h can be used for AVX */ /// Compares each of the corresponding scalar double-precision values of /// two 128-bit vectors of [2 x double], using the operation specified by the /// immediate integer operand. /// -/// If the result is true, all 64 bits of the destination vector are set; -/// otherwise they are cleared. +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, comparisons that are ordered +/// return false, and comparisons that are unordered return true. /// /// \headerfile /// @@ -1902,16 +1903,16 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c) /// 0x1E: Greater-than (ordered, non-signaling) \n /// 0x1F: True (unordered, signaling) /// \returns A 128-bit vector of [2 x double] containing the comparison results. -#define _mm_cmp_sd(a, b, c) \ - ((__m128d)__builtin_ia32_cmpsd((__v2df)(__m128d)(a), \ - (__v2df)(__m128d)(b), (c))) +/// \fn __m128d _mm_cmp_sd(__m128d a, __m128d b, const int c) +/* Below intrinsic defined in xmmintrin.h can be used for AVX */ /// Compares each of the corresponding scalar values of two 128-bit /// vectors of [4 x float], using the operation specified by the immediate /// integer operand. /// -/// If the result is true, all 32 bits of the destination vector are set; -/// otherwise they are cleared. +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true. +/// If either value in a comparison is NaN, comparisons that are ordered +/// return false, and comparisons that are unordered return true. /// /// \headerfile /// @@ -1961,9 +1962,7 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c) /// 0x1E: Greater-than (ordered, non-signaling) \n /// 0x1F: True (unordered, signaling) /// \returns A 128-bit vector of [4 x float] containing the comparison results. -#define _mm_cmp_ss(a, b, c) \ - ((__m128)__builtin_ia32_cmpss((__v4sf)(__m128)(a), \ - (__v4sf)(__m128)(b), (c))) +/// \fn __m128 _mm_cmp_ss(__m128 a, __m128 b, const int c) /// Takes a [8 x i32] vector and returns the vector element value /// indexed by the immediate constant operand. @@ -2213,6 +2212,10 @@ _mm256_cvtpd_ps(__m256d __a) /// Converts a vector of [8 x float] into a vector of [8 x i32]. /// +/// If a converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. +/// /// \headerfile /// /// This intrinsic corresponds to the VCVTPS2DQ instruction. @@ -2242,9 +2245,13 @@ _mm256_cvtps_pd(__m128 __a) return (__m256d)__builtin_convertvector((__v4sf)__a, __v4df); } -/// Converts a 256-bit vector of [4 x double] into a 128-bit vector of [4 -/// x i32], truncating the result by rounding towards zero when it is -/// inexact. +/// Converts a 256-bit vector of [4 x double] into four signed truncated +/// (rounded toward zero) 32-bit integers returned in a 128-bit vector of +/// [4 x i32]. +/// +/// If a converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. /// /// \headerfile /// @@ -2259,9 +2266,12 @@ _mm256_cvttpd_epi32(__m256d __a) return (__m128i)__builtin_ia32_cvttpd2dq256((__v4df) __a); } -/// Converts a 256-bit vector of [4 x double] into a 128-bit vector of [4 -/// x i32]. When a conversion is inexact, the value returned is rounded -/// according to the rounding control bits in the MXCSR register. +/// Converts a 256-bit vector of [4 x double] into a 128-bit vector of +/// [4 x i32]. +/// +/// If a converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. /// /// \headerfile /// @@ -2276,8 +2286,12 @@ _mm256_cvtpd_epi32(__m256d __a) return (__m128i)__builtin_ia32_cvtpd2dq256((__v4df) __a); } -/// Converts a vector of [8 x float] into a vector of [8 x i32], -/// truncating the result by rounding towards zero when it is inexact. +/// Converts a vector of [8 x float] into eight signed truncated (rounded +/// toward zero) 32-bit integers returned in a vector of [8 x i32]. +/// +/// If a converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. /// /// \headerfile /// diff --git a/lib/include/bmiintrin.h b/lib/include/bmiintrin.h index d8e57c0cb494..78bffe68e221 100644 --- a/lib/include/bmiintrin.h +++ b/lib/include/bmiintrin.h @@ -161,8 +161,7 @@ _mm_tzcnt_64(unsigned long long __X) #undef __RELAXED_FN_ATTRS -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__BMI__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__BMI__) /* Define the default attributes for the functions in this file. */ #define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("bmi"))) @@ -610,7 +609,6 @@ __blsr_u64(unsigned long long __X) #undef __DEFAULT_FN_ATTRS -#endif /* !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) \ - || defined(__BMI__) */ +#endif /* !defined(__SCE__) || __has_feature(modules) || defined(__BMI__) */ #endif /* __BMIINTRIN_H */ diff --git a/lib/include/builtins.h b/lib/include/builtins.h index 65095861ca9b..1e534e632c8e 100644 --- a/lib/include/builtins.h +++ b/lib/include/builtins.h @@ -13,4 +13,7 @@ #ifndef __BUILTINS_H #define __BUILTINS_H +#if defined(__MVS__) && __has_include_next() +#include_next +#endif /* __MVS__ */ #endif /* __BUILTINS_H */ diff --git a/lib/include/cpuid.h b/lib/include/cpuid.h index 1ad6853a97c9..82d995f1b966 100644 --- a/lib/include/cpuid.h +++ b/lib/include/cpuid.h @@ -10,7 +10,7 @@ #ifndef __CPUID_H #define __CPUID_H -#if !(__x86_64__ || __i386__) +#if !defined(__x86_64__) && !defined(__i386__) #error this header is for x86 only #endif @@ -200,6 +200,9 @@ #define bit_AMXINT8 0x02000000 /* Features in %eax for leaf 7 sub-leaf 1 */ +#define bit_SHA512 0x00000001 +#define bit_SM3 0x00000002 +#define bit_SM4 0x00000004 #define bit_RAOINT 0x00000008 #define bit_AVXVNNI 0x00000010 #define bit_AVX512BF16 0x00000020 @@ -211,7 +214,12 @@ /* Features in %edx for leaf 7 sub-leaf 1 */ #define bit_AVXVNNIINT8 0x00000010 #define bit_AVXNECONVERT 0x00000020 +#define bit_AMXCOMPLEX 0x00000100 +#define bit_AVXVNNIINT16 0x00000400 #define bit_PREFETCHI 0x00004000 +#define bit_USERMSR 0x00008000 +#define bit_AVX10 0x00080000 +#define bit_APXF 0x00200000 /* Features in %eax for leaf 13 sub-leaf 1 */ #define bit_XSAVEOPT 0x00000001 @@ -244,8 +252,11 @@ #define bit_RDPRU 0x00000010 #define bit_WBNOINVD 0x00000200 +/* Features in %ebx for leaf 0x24 */ +#define bit_AVX10_256 0x00020000 +#define bit_AVX10_512 0x00040000 -#if __i386__ +#ifdef __i386__ #define __cpuid(__leaf, __eax, __ebx, __ecx, __edx) \ __asm("cpuid" : "=a"(__eax), "=b" (__ebx), "=c"(__ecx), "=d"(__edx) \ : "0"(__leaf)) @@ -274,7 +285,7 @@ static __inline unsigned int __get_cpuid_max (unsigned int __leaf, unsigned int *__sig) { unsigned int __eax, __ebx, __ecx, __edx; -#if __i386__ +#ifdef __i386__ int __cpuid_supported; __asm(" pushfl\n" @@ -328,4 +339,13 @@ static __inline int __get_cpuid_count (unsigned int __leaf, return 1; } +// In some configurations, __cpuidex is defined as a builtin (primarily +// -fms-extensions) which will conflict with the __cpuidex definition below. +#if !(__has_builtin(__cpuidex)) +static __inline void __cpuidex(int __cpu_info[4], int __leaf, int __subleaf) { + __cpuid_count(__leaf, __subleaf, __cpu_info[0], __cpu_info[1], __cpu_info[2], + __cpu_info[3]); +} +#endif + #endif /* __CPUID_H */ diff --git a/lib/include/cuda_wrappers/algorithm b/lib/include/cuda_wrappers/algorithm index f14a0b00bb04..3f59f28ae35b 100644 --- a/lib/include/cuda_wrappers/algorithm +++ b/lib/include/cuda_wrappers/algorithm @@ -99,7 +99,7 @@ template __attribute__((enable_if(true, ""))) inline _CPP14_CONSTEXPR __host__ __device__ const __T & min(const __T &__a, const __T &__b) { - return __a < __b ? __a : __b; + return __b < __a ? __b : __a; } #pragma pop_macro("_CPP14_CONSTEXPR") diff --git a/lib/include/emmintrin.h b/lib/include/emmintrin.h index 96e3ebdecbdf..e85bfc47aa5c 100644 --- a/lib/include/emmintrin.h +++ b/lib/include/emmintrin.h @@ -259,6 +259,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sqrt_pd(__m128d __a) { /// result. The upper 64 bits of the result are copied from the upper /// double-precision value of the first operand. /// +/// If either value in a comparison is NaN, returns the value from \a __b. +/// /// \headerfile /// /// This intrinsic corresponds to the VMINSD / MINSD instruction. @@ -278,9 +280,11 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_min_sd(__m128d __a, } /// Performs element-by-element comparison of the two 128-bit vectors of -/// [2 x double] and returns the vector containing the lesser of each pair of +/// [2 x double] and returns a vector containing the lesser of each pair of /// values. /// +/// If either value in a comparison is NaN, returns the value from \a __b. +/// /// \headerfile /// /// This intrinsic corresponds to the VMINPD / MINPD instruction. @@ -301,6 +305,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_min_pd(__m128d __a, /// result. The upper 64 bits of the result are copied from the upper /// double-precision value of the first operand. /// +/// If either value in a comparison is NaN, returns the value from \a __b. +/// /// \headerfile /// /// This intrinsic corresponds to the VMAXSD / MAXSD instruction. @@ -320,9 +326,11 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_max_sd(__m128d __a, } /// Performs element-by-element comparison of the two 128-bit vectors of -/// [2 x double] and returns the vector containing the greater of each pair +/// [2 x double] and returns a vector containing the greater of each pair /// of values. /// +/// If either value in a comparison is NaN, returns the value from \a __b. +/// /// \headerfile /// /// This intrinsic corresponds to the VMAXPD / MAXPD instruction. @@ -410,8 +418,10 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_xor_pd(__m128d __a, } /// Compares each of the corresponding double-precision values of the -/// 128-bit vectors of [2 x double] for equality. Each comparison yields 0x0 -/// for false, 0xFFFFFFFFFFFFFFFF for true. +/// 128-bit vectors of [2 x double] for equality. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns false. /// /// \headerfile /// @@ -429,8 +439,10 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpeq_pd(__m128d __a, /// Compares each of the corresponding double-precision values of the /// 128-bit vectors of [2 x double] to determine if the values in the first -/// operand are less than those in the second operand. Each comparison -/// yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// operand are less than those in the second operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns false. /// /// \headerfile /// @@ -450,7 +462,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmplt_pd(__m128d __a, /// 128-bit vectors of [2 x double] to determine if the values in the first /// operand are less than or equal to those in the second operand. /// -/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns false. /// /// \headerfile /// @@ -470,7 +483,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmple_pd(__m128d __a, /// 128-bit vectors of [2 x double] to determine if the values in the first /// operand are greater than those in the second operand. /// -/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns false. /// /// \headerfile /// @@ -490,7 +504,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpgt_pd(__m128d __a, /// 128-bit vectors of [2 x double] to determine if the values in the first /// operand are greater than or equal to those in the second operand. /// -/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns false. /// /// \headerfile /// @@ -510,8 +525,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpge_pd(__m128d __a, /// 128-bit vectors of [2 x double] to determine if the values in the first /// operand are ordered with respect to those in the second operand. /// -/// A pair of double-precision values are "ordered" with respect to each -/// other if neither value is a NaN. Each comparison yields 0x0 for false, +/// A pair of double-precision values are ordered with respect to each +/// other if neither value is a NaN. Each comparison returns 0x0 for false, /// 0xFFFFFFFFFFFFFFFF for true. /// /// \headerfile @@ -532,8 +547,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpord_pd(__m128d __a, /// 128-bit vectors of [2 x double] to determine if the values in the first /// operand are unordered with respect to those in the second operand. /// -/// A pair of double-precision values are "unordered" with respect to each -/// other if one or both values are NaN. Each comparison yields 0x0 for +/// A pair of double-precision values are unordered with respect to each +/// other if one or both values are NaN. Each comparison returns 0x0 for /// false, 0xFFFFFFFFFFFFFFFF for true. /// /// \headerfile @@ -555,7 +570,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpunord_pd(__m128d __a, /// 128-bit vectors of [2 x double] to determine if the values in the first /// operand are unequal to those in the second operand. /// -/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns true. /// /// \headerfile /// @@ -575,7 +591,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpneq_pd(__m128d __a, /// 128-bit vectors of [2 x double] to determine if the values in the first /// operand are not less than those in the second operand. /// -/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns true. /// /// \headerfile /// @@ -595,7 +612,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnlt_pd(__m128d __a, /// 128-bit vectors of [2 x double] to determine if the values in the first /// operand are not less than or equal to those in the second operand. /// -/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns true. /// /// \headerfile /// @@ -615,7 +633,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnle_pd(__m128d __a, /// 128-bit vectors of [2 x double] to determine if the values in the first /// operand are not greater than those in the second operand. /// -/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns true. /// /// \headerfile /// @@ -635,7 +654,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpngt_pd(__m128d __a, /// 128-bit vectors of [2 x double] to determine if the values in the first /// operand are not greater than or equal to those in the second operand. /// -/// Each comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns true. /// /// \headerfile /// @@ -654,7 +674,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnge_pd(__m128d __a, /// Compares the lower double-precision floating-point values in each of /// the two 128-bit floating-point vectors of [2 x double] for equality. /// -/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns false. /// /// \headerfile /// @@ -678,7 +699,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpeq_sd(__m128d __a, /// the value in the first parameter is less than the corresponding value in /// the second parameter. /// -/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns false. /// /// \headerfile /// @@ -702,7 +724,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmplt_sd(__m128d __a, /// the value in the first parameter is less than or equal to the /// corresponding value in the second parameter. /// -/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns false. /// /// \headerfile /// @@ -726,7 +749,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmple_sd(__m128d __a, /// the value in the first parameter is greater than the corresponding value /// in the second parameter. /// -/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns false. /// /// \headerfile /// @@ -751,7 +775,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpgt_sd(__m128d __a, /// the value in the first parameter is greater than or equal to the /// corresponding value in the second parameter. /// -/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns false. /// /// \headerfile /// @@ -773,11 +798,11 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpge_sd(__m128d __a, /// Compares the lower double-precision floating-point values in each of /// the two 128-bit floating-point vectors of [2 x double] to determine if -/// the value in the first parameter is "ordered" with respect to the +/// the value in the first parameter is ordered with respect to the /// corresponding value in the second parameter. /// -/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. A pair -/// of double-precision values are "ordered" with respect to each other if +/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. A pair +/// of double-precision values are ordered with respect to each other if /// neither value is a NaN. /// /// \headerfile @@ -799,11 +824,11 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpord_sd(__m128d __a, /// Compares the lower double-precision floating-point values in each of /// the two 128-bit floating-point vectors of [2 x double] to determine if -/// the value in the first parameter is "unordered" with respect to the +/// the value in the first parameter is unordered with respect to the /// corresponding value in the second parameter. /// -/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. A pair -/// of double-precision values are "unordered" with respect to each other if +/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. A pair +/// of double-precision values are unordered with respect to each other if /// one or both values are NaN. /// /// \headerfile @@ -829,7 +854,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpunord_sd(__m128d __a, /// the value in the first parameter is unequal to the corresponding value in /// the second parameter. /// -/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns true. /// /// \headerfile /// @@ -853,7 +879,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpneq_sd(__m128d __a, /// the value in the first parameter is not less than the corresponding /// value in the second parameter. /// -/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns true. /// /// \headerfile /// @@ -877,7 +904,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnlt_sd(__m128d __a, /// the value in the first parameter is not less than or equal to the /// corresponding value in the second parameter. /// -/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns true. /// /// \headerfile /// @@ -901,7 +929,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnle_sd(__m128d __a, /// the value in the first parameter is not greater than the corresponding /// value in the second parameter. /// -/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns true. /// /// \headerfile /// @@ -926,7 +955,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpngt_sd(__m128d __a, /// the value in the first parameter is not greater than or equal to the /// corresponding value in the second parameter. /// -/// The comparison yields 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns true. /// /// \headerfile /// @@ -949,8 +979,8 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnge_sd(__m128d __a, /// Compares the lower double-precision floating-point values in each of /// the two 128-bit floating-point vectors of [2 x double] for equality. /// -/// The comparison yields 0 for false, 1 for true. If either of the two -/// lower double-precision values is NaN, 0 is returned. +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. /// /// \headerfile /// @@ -962,8 +992,7 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnge_sd(__m128d __a, /// \param __b /// A 128-bit vector of [2 x double]. The lower double-precision value is /// compared to the lower double-precision value of \a __a. -/// \returns An integer containing the comparison results. If either of the two -/// lower double-precision values is NaN, 0 is returned. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_comieq_sd(__m128d __a, __m128d __b) { return __builtin_ia32_comisdeq((__v2df)__a, (__v2df)__b); @@ -974,8 +1003,8 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_comieq_sd(__m128d __a, /// the value in the first parameter is less than the corresponding value in /// the second parameter. /// -/// The comparison yields 0 for false, 1 for true. If either of the two -/// lower double-precision values is NaN, 0 is returned. +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. /// /// \headerfile /// @@ -987,8 +1016,7 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_comieq_sd(__m128d __a, /// \param __b /// A 128-bit vector of [2 x double]. The lower double-precision value is /// compared to the lower double-precision value of \a __a. -/// \returns An integer containing the comparison results. If either of the two -/// lower double-precision values is NaN, 0 is returned. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_comilt_sd(__m128d __a, __m128d __b) { return __builtin_ia32_comisdlt((__v2df)__a, (__v2df)__b); @@ -999,8 +1027,8 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_comilt_sd(__m128d __a, /// the value in the first parameter is less than or equal to the /// corresponding value in the second parameter. /// -/// The comparison yields 0 for false, 1 for true. If either of the two -/// lower double-precision values is NaN, 0 is returned. +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. /// /// \headerfile /// @@ -1012,8 +1040,7 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_comilt_sd(__m128d __a, /// \param __b /// A 128-bit vector of [2 x double]. The lower double-precision value is /// compared to the lower double-precision value of \a __a. -/// \returns An integer containing the comparison results. If either of the two -/// lower double-precision values is NaN, 0 is returned. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_comile_sd(__m128d __a, __m128d __b) { return __builtin_ia32_comisdle((__v2df)__a, (__v2df)__b); @@ -1024,8 +1051,8 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_comile_sd(__m128d __a, /// the value in the first parameter is greater than the corresponding value /// in the second parameter. /// -/// The comparison yields 0 for false, 1 for true. If either of the two -/// lower double-precision values is NaN, 0 is returned. +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. /// /// \headerfile /// @@ -1037,8 +1064,7 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_comile_sd(__m128d __a, /// \param __b /// A 128-bit vector of [2 x double]. The lower double-precision value is /// compared to the lower double-precision value of \a __a. -/// \returns An integer containing the comparison results. If either of the two -/// lower double-precision values is NaN, 0 is returned. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_comigt_sd(__m128d __a, __m128d __b) { return __builtin_ia32_comisdgt((__v2df)__a, (__v2df)__b); @@ -1049,8 +1075,8 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_comigt_sd(__m128d __a, /// the value in the first parameter is greater than or equal to the /// corresponding value in the second parameter. /// -/// The comparison yields 0 for false, 1 for true. If either of the two -/// lower double-precision values is NaN, 0 is returned. +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. /// /// \headerfile /// @@ -1062,8 +1088,7 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_comigt_sd(__m128d __a, /// \param __b /// A 128-bit vector of [2 x double]. The lower double-precision value is /// compared to the lower double-precision value of \a __a. -/// \returns An integer containing the comparison results. If either of the two -/// lower double-precision values is NaN, 0 is returned. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_comige_sd(__m128d __a, __m128d __b) { return __builtin_ia32_comisdge((__v2df)__a, (__v2df)__b); @@ -1074,8 +1099,8 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_comige_sd(__m128d __a, /// the value in the first parameter is unequal to the corresponding value in /// the second parameter. /// -/// The comparison yields 0 for false, 1 for true. If either of the two -/// lower double-precision values is NaN, 1 is returned. +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 1. /// /// \headerfile /// @@ -1087,18 +1112,17 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_comige_sd(__m128d __a, /// \param __b /// A 128-bit vector of [2 x double]. The lower double-precision value is /// compared to the lower double-precision value of \a __a. -/// \returns An integer containing the comparison results. If either of the two -/// lower double-precision values is NaN, 1 is returned. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_comineq_sd(__m128d __a, __m128d __b) { return __builtin_ia32_comisdneq((__v2df)__a, (__v2df)__b); } /// Compares the lower double-precision floating-point values in each of -/// the two 128-bit floating-point vectors of [2 x double] for equality. The -/// comparison yields 0 for false, 1 for true. +/// the two 128-bit floating-point vectors of [2 x double] for equality. /// -/// If either of the two lower double-precision values is NaN, 0 is returned. +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. /// /// \headerfile /// @@ -1110,8 +1134,7 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_comineq_sd(__m128d __a, /// \param __b /// A 128-bit vector of [2 x double]. The lower double-precision value is /// compared to the lower double-precision value of \a __a. -/// \returns An integer containing the comparison results. If either of the two -/// lower double-precision values is NaN, 0 is returned. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomieq_sd(__m128d __a, __m128d __b) { return __builtin_ia32_ucomisdeq((__v2df)__a, (__v2df)__b); @@ -1122,8 +1145,8 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomieq_sd(__m128d __a, /// the value in the first parameter is less than the corresponding value in /// the second parameter. /// -/// The comparison yields 0 for false, 1 for true. If either of the two lower -/// double-precision values is NaN, 0 is returned. +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. /// /// \headerfile /// @@ -1135,8 +1158,7 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomieq_sd(__m128d __a, /// \param __b /// A 128-bit vector of [2 x double]. The lower double-precision value is /// compared to the lower double-precision value of \a __a. -/// \returns An integer containing the comparison results. If either of the two -/// lower double-precision values is NaN, 0 is returned. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomilt_sd(__m128d __a, __m128d __b) { return __builtin_ia32_ucomisdlt((__v2df)__a, (__v2df)__b); @@ -1147,8 +1169,8 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomilt_sd(__m128d __a, /// the value in the first parameter is less than or equal to the /// corresponding value in the second parameter. /// -/// The comparison yields 0 for false, 1 for true. If either of the two lower -/// double-precision values is NaN, 0 is returned. +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. /// /// \headerfile /// @@ -1160,8 +1182,7 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomilt_sd(__m128d __a, /// \param __b /// A 128-bit vector of [2 x double]. The lower double-precision value is /// compared to the lower double-precision value of \a __a. -/// \returns An integer containing the comparison results. If either of the two -/// lower double-precision values is NaN, 0 is returned. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomile_sd(__m128d __a, __m128d __b) { return __builtin_ia32_ucomisdle((__v2df)__a, (__v2df)__b); @@ -1172,8 +1193,8 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomile_sd(__m128d __a, /// the value in the first parameter is greater than the corresponding value /// in the second parameter. /// -/// The comparison yields 0 for false, 1 for true. If either of the two lower -/// double-precision values is NaN, 0 is returned. +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. /// /// \headerfile /// @@ -1185,8 +1206,7 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomile_sd(__m128d __a, /// \param __b /// A 128-bit vector of [2 x double]. The lower double-precision value is /// compared to the lower double-precision value of \a __a. -/// \returns An integer containing the comparison results. If either of the two -/// lower double-precision values is NaN, 0 is returned. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomigt_sd(__m128d __a, __m128d __b) { return __builtin_ia32_ucomisdgt((__v2df)__a, (__v2df)__b); @@ -1197,8 +1217,8 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomigt_sd(__m128d __a, /// the value in the first parameter is greater than or equal to the /// corresponding value in the second parameter. /// -/// The comparison yields 0 for false, 1 for true. If either of the two -/// lower double-precision values is NaN, 0 is returned. +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. /// /// \headerfile /// @@ -1210,8 +1230,7 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomigt_sd(__m128d __a, /// \param __b /// A 128-bit vector of [2 x double]. The lower double-precision value is /// compared to the lower double-precision value of \a __a. -/// \returns An integer containing the comparison results. If either of the two -/// lower double-precision values is NaN, 0 is returned. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomige_sd(__m128d __a, __m128d __b) { return __builtin_ia32_ucomisdge((__v2df)__a, (__v2df)__b); @@ -1222,8 +1241,8 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomige_sd(__m128d __a, /// the value in the first parameter is unequal to the corresponding value in /// the second parameter. /// -/// The comparison yields 0 for false, 1 for true. If either of the two lower -/// double-precision values is NaN, 1 is returned. +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 1. /// /// \headerfile /// @@ -1235,8 +1254,7 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomige_sd(__m128d __a, /// \param __b /// A 128-bit vector of [2 x double]. The lower double-precision value is /// compared to the lower double-precision value of \a __a. -/// \returns An integer containing the comparison result. If either of the two -/// lower double-precision values is NaN, 1 is returned. +/// \returns An integer containing the comparison result. static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomineq_sd(__m128d __a, __m128d __b) { return __builtin_ia32_ucomisdneq((__v2df)__a, (__v2df)__b); @@ -1304,6 +1322,10 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtepi32_pd(__m128i __a) { /// returned in the lower 64 bits of a 128-bit vector of [4 x i32]. The upper /// 64 bits of the result vector are set to zero. /// +/// If a converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. +/// /// \headerfile /// /// This intrinsic corresponds to the VCVTPD2DQ / CVTPD2DQ instruction. @@ -1319,6 +1341,10 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtpd_epi32(__m128d __a) { /// Converts the low-order element of a 128-bit vector of [2 x double] /// into a 32-bit signed integer value. /// +/// If the converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. +/// /// \headerfile /// /// This intrinsic corresponds to the VCVTSD2SI / CVTSD2SI instruction. @@ -1404,12 +1430,13 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtss_sd(__m128d __a, } /// Converts the two double-precision floating-point elements of a -/// 128-bit vector of [2 x double] into two signed 32-bit integer values, -/// returned in the lower 64 bits of a 128-bit vector of [4 x i32]. +/// 128-bit vector of [2 x double] into two signed truncated (rounded +/// toward zero) 32-bit integer values, returned in the lower 64 bits +/// of a 128-bit vector of [4 x i32]. /// -/// If the result of either conversion is inexact, the result is truncated -/// (rounded towards zero) regardless of the current MXCSR setting. The upper -/// 64 bits of the result vector are set to zero. +/// If a converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. /// /// \headerfile /// @@ -1425,7 +1452,11 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvttpd_epi32(__m128d __a) { } /// Converts the low-order element of a [2 x double] vector into a 32-bit -/// signed integer value, truncating the result when it is inexact. +/// signed truncated (rounded toward zero) integer value. +/// +/// If the converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. /// /// \headerfile /// @@ -1444,6 +1475,10 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_cvttsd_si32(__m128d __a) { /// 128-bit vector of [2 x double] into two signed 32-bit integer values, /// returned in a 64-bit vector of [2 x i32]. /// +/// If a converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. +/// /// \headerfile /// /// This intrinsic corresponds to the CVTPD2PI instruction. @@ -1456,11 +1491,12 @@ static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX _mm_cvtpd_pi32(__m128d __a) { } /// Converts the two double-precision floating-point elements of a -/// 128-bit vector of [2 x double] into two signed 32-bit integer values, -/// returned in a 64-bit vector of [2 x i32]. +/// 128-bit vector of [2 x double] into two signed truncated (rounded toward +/// zero) 32-bit integer values, returned in a 64-bit vector of [2 x i32]. /// -/// If the result of either conversion is inexact, the result is truncated -/// (rounded towards zero) regardless of the current MXCSR setting. +/// If a converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. /// /// \headerfile /// @@ -2099,9 +2135,11 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_add_epi64(__m128i __a, } /// Adds, with saturation, the corresponding elements of two 128-bit -/// signed [16 x i8] vectors, saving each sum in the corresponding element of -/// a 128-bit result vector of [16 x i8]. Positive sums greater than 0x7F are -/// saturated to 0x7F. Negative sums less than 0x80 are saturated to 0x80. +/// signed [16 x i8] vectors, saving each sum in the corresponding element +/// of a 128-bit result vector of [16 x i8]. +/// +/// Positive sums greater than 0x7F are saturated to 0x7F. Negative sums +/// less than 0x80 are saturated to 0x80. /// /// \headerfile /// @@ -2119,10 +2157,11 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epi8(__m128i __a, } /// Adds, with saturation, the corresponding elements of two 128-bit -/// signed [8 x i16] vectors, saving each sum in the corresponding element of -/// a 128-bit result vector of [8 x i16]. Positive sums greater than 0x7FFF -/// are saturated to 0x7FFF. Negative sums less than 0x8000 are saturated to -/// 0x8000. +/// signed [8 x i16] vectors, saving each sum in the corresponding element +/// of a 128-bit result vector of [8 x i16]. +/// +/// Positive sums greater than 0x7FFF are saturated to 0x7FFF. Negative sums +/// less than 0x8000 are saturated to 0x8000. /// /// \headerfile /// @@ -2141,8 +2180,10 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epi16(__m128i __a, /// Adds, with saturation, the corresponding elements of two 128-bit /// unsigned [16 x i8] vectors, saving each sum in the corresponding element -/// of a 128-bit result vector of [16 x i8]. Positive sums greater than 0xFF -/// are saturated to 0xFF. Negative sums are saturated to 0x00. +/// of a 128-bit result vector of [16 x i8]. +/// +/// Positive sums greater than 0xFF are saturated to 0xFF. Negative sums are +/// saturated to 0x00. /// /// \headerfile /// @@ -2161,8 +2202,10 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epu8(__m128i __a, /// Adds, with saturation, the corresponding elements of two 128-bit /// unsigned [8 x i16] vectors, saving each sum in the corresponding element -/// of a 128-bit result vector of [8 x i16]. Positive sums greater than -/// 0xFFFF are saturated to 0xFFFF. Negative sums are saturated to 0x0000. +/// of a 128-bit result vector of [8 x i16]. +/// +/// Positive sums greater than 0xFFFF are saturated to 0xFFFF. Negative sums +/// are saturated to 0x0000. /// /// \headerfile /// @@ -2518,10 +2561,12 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sub_epi64(__m128i __a, return (__m128i)((__v2du)__a - (__v2du)__b); } -/// Subtracts corresponding 8-bit signed integer values in the input and -/// returns the differences in the corresponding bytes in the destination. -/// Differences greater than 0x7F are saturated to 0x7F, and differences less -/// than 0x80 are saturated to 0x80. +/// Subtracts, with saturation, corresponding 8-bit signed integer values in +/// the input and returns the differences in the corresponding bytes in the +/// destination. +/// +/// Differences greater than 0x7F are saturated to 0x7F, and differences +/// less than 0x80 are saturated to 0x80. /// /// \headerfile /// @@ -2538,8 +2583,10 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epi8(__m128i __a, return (__m128i)__builtin_elementwise_sub_sat((__v16qs)__a, (__v16qs)__b); } -/// Subtracts corresponding 16-bit signed integer values in the input and -/// returns the differences in the corresponding bytes in the destination. +/// Subtracts, with saturation, corresponding 16-bit signed integer values in +/// the input and returns the differences in the corresponding bytes in the +/// destination. +/// /// Differences greater than 0x7FFF are saturated to 0x7FFF, and values less /// than 0x8000 are saturated to 0x8000. /// @@ -2558,9 +2605,11 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epi16(__m128i __a, return (__m128i)__builtin_elementwise_sub_sat((__v8hi)__a, (__v8hi)__b); } -/// Subtracts corresponding 8-bit unsigned integer values in the input -/// and returns the differences in the corresponding bytes in the -/// destination. Differences less than 0x00 are saturated to 0x00. +/// Subtracts, with saturation, corresponding 8-bit unsigned integer values in +/// the input and returns the differences in the corresponding bytes in the +/// destination. +/// +/// Differences less than 0x00 are saturated to 0x00. /// /// \headerfile /// @@ -2577,9 +2626,11 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epu8(__m128i __a, return (__m128i)__builtin_elementwise_sub_sat((__v16qu)__a, (__v16qu)__b); } -/// Subtracts corresponding 16-bit unsigned integer values in the input -/// and returns the differences in the corresponding bytes in the -/// destination. Differences less than 0x0000 are saturated to 0x0000. +/// Subtracts, with saturation, corresponding 16-bit unsigned integer values in +/// the input and returns the differences in the corresponding bytes in the +/// destination. +/// +/// Differences less than 0x0000 are saturated to 0x0000. /// /// \headerfile /// @@ -3008,8 +3059,9 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi64(__m128i __a, } /// Compares each of the corresponding 8-bit values of the 128-bit -/// integer vectors for equality. Each comparison yields 0x0 for false, 0xFF -/// for true. +/// integer vectors for equality. +/// +/// Each comparison returns 0x0 for false, 0xFF for true. /// /// \headerfile /// @@ -3026,8 +3078,9 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi8(__m128i __a, } /// Compares each of the corresponding 16-bit values of the 128-bit -/// integer vectors for equality. Each comparison yields 0x0 for false, -/// 0xFFFF for true. +/// integer vectors for equality. +/// +/// Each comparison returns 0x0 for false, 0xFFFF for true. /// /// \headerfile /// @@ -3044,8 +3097,9 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi16(__m128i __a, } /// Compares each of the corresponding 32-bit values of the 128-bit -/// integer vectors for equality. Each comparison yields 0x0 for false, -/// 0xFFFFFFFF for true. +/// integer vectors for equality. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true. /// /// \headerfile /// @@ -3063,8 +3117,9 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi32(__m128i __a, /// Compares each of the corresponding signed 8-bit values of the 128-bit /// integer vectors to determine if the values in the first operand are -/// greater than those in the second operand. Each comparison yields 0x0 for -/// false, 0xFF for true. +/// greater than those in the second operand. +/// +/// Each comparison returns 0x0 for false, 0xFF for true. /// /// \headerfile /// @@ -3086,7 +3141,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi8(__m128i __a, /// 128-bit integer vectors to determine if the values in the first operand /// are greater than those in the second operand. /// -/// Each comparison yields 0x0 for false, 0xFFFF for true. +/// Each comparison returns 0x0 for false, 0xFFFF for true. /// /// \headerfile /// @@ -3106,7 +3161,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi16(__m128i __a, /// 128-bit integer vectors to determine if the values in the first operand /// are greater than those in the second operand. /// -/// Each comparison yields 0x0 for false, 0xFFFFFFFF for true. +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true. /// /// \headerfile /// @@ -3126,7 +3181,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi32(__m128i __a, /// integer vectors to determine if the values in the first operand are less /// than those in the second operand. /// -/// Each comparison yields 0x0 for false, 0xFF for true. +/// Each comparison returns 0x0 for false, 0xFF for true. /// /// \headerfile /// @@ -3146,7 +3201,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmplt_epi8(__m128i __a, /// 128-bit integer vectors to determine if the values in the first operand /// are less than those in the second operand. /// -/// Each comparison yields 0x0 for false, 0xFFFF for true. +/// Each comparison returns 0x0 for false, 0xFFFF for true. /// /// \headerfile /// @@ -3166,7 +3221,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmplt_epi16(__m128i __a, /// 128-bit integer vectors to determine if the values in the first operand /// are less than those in the second operand. /// -/// Each comparison yields 0x0 for false, 0xFFFFFFFF for true. +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true. /// /// \headerfile /// @@ -3207,7 +3262,11 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtsi64_sd(__m128d __a, } /// Converts the first (lower) element of a vector of [2 x double] into a -/// 64-bit signed integer value, according to the current rounding mode. +/// 64-bit signed integer value. +/// +/// If the converted value does not fit in a 64-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. /// /// \headerfile /// @@ -3222,7 +3281,11 @@ static __inline__ long long __DEFAULT_FN_ATTRS _mm_cvtsd_si64(__m128d __a) { } /// Converts the first (lower) element of a vector of [2 x double] into a -/// 64-bit signed integer value, truncating the result when it is inexact. +/// 64-bit signed truncated (rounded toward zero) integer value. +/// +/// If a converted value does not fit in a 64-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. /// /// \headerfile /// @@ -3253,6 +3316,10 @@ static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cvtepi32_ps(__m128i __a) { /// Converts a vector of [4 x float] into a vector of [4 x i32]. /// +/// If a converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. +/// /// \headerfile /// /// This intrinsic corresponds to the VCVTPS2DQ / CVTPS2DQ instruction. @@ -3265,8 +3332,12 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtps_epi32(__m128 __a) { return (__m128i)__builtin_ia32_cvtps2dq((__v4sf)__a); } -/// Converts a vector of [4 x float] into a vector of [4 x i32], -/// truncating the result when it is inexact. +/// Converts a vector of [4 x float] into four signed truncated (rounded toward +/// zero) 32-bit integers, returned in a vector of [4 x i32]. +/// +/// If a converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. /// /// \headerfile /// @@ -4050,26 +4121,22 @@ void _mm_mfence(void); } // extern "C" #endif -/// Converts 16-bit signed integers from both 128-bit integer vector -/// operands into 8-bit signed integers, and packs the results into the -/// destination. Positive values greater than 0x7F are saturated to 0x7F. -/// Negative values less than 0x80 are saturated to 0x80. +/// Converts, with saturation, 16-bit signed integers from both 128-bit integer +/// vector operands into 8-bit signed integers, and packs the results into +/// the destination. +/// +/// Positive values greater than 0x7F are saturated to 0x7F. Negative values +/// less than 0x80 are saturated to 0x80. /// /// \headerfile /// /// This intrinsic corresponds to the VPACKSSWB / PACKSSWB instruction. /// /// \param __a -/// A 128-bit integer vector of [8 x i16]. Each 16-bit element is treated as -/// a signed integer and is converted to a 8-bit signed integer with -/// saturation. Values greater than 0x7F are saturated to 0x7F. Values less -/// than 0x80 are saturated to 0x80. The converted [8 x i8] values are +/// A 128-bit integer vector of [8 x i16]. The converted [8 x i8] values are /// written to the lower 64 bits of the result. /// \param __b -/// A 128-bit integer vector of [8 x i16]. Each 16-bit element is treated as -/// a signed integer and is converted to a 8-bit signed integer with -/// saturation. Values greater than 0x7F are saturated to 0x7F. Values less -/// than 0x80 are saturated to 0x80. The converted [8 x i8] values are +/// A 128-bit integer vector of [8 x i16]. The converted [8 x i8] values are /// written to the higher 64 bits of the result. /// \returns A 128-bit vector of [16 x i8] containing the converted values. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packs_epi16(__m128i __a, @@ -4077,26 +4144,22 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packs_epi16(__m128i __a, return (__m128i)__builtin_ia32_packsswb128((__v8hi)__a, (__v8hi)__b); } -/// Converts 32-bit signed integers from both 128-bit integer vector -/// operands into 16-bit signed integers, and packs the results into the -/// destination. Positive values greater than 0x7FFF are saturated to 0x7FFF. -/// Negative values less than 0x8000 are saturated to 0x8000. +/// Converts, with saturation, 32-bit signed integers from both 128-bit integer +/// vector operands into 16-bit signed integers, and packs the results into +/// the destination. +/// +/// Positive values greater than 0x7FFF are saturated to 0x7FFF. Negative +/// values less than 0x8000 are saturated to 0x8000. /// /// \headerfile /// /// This intrinsic corresponds to the VPACKSSDW / PACKSSDW instruction. /// /// \param __a -/// A 128-bit integer vector of [4 x i32]. Each 32-bit element is treated as -/// a signed integer and is converted to a 16-bit signed integer with -/// saturation. Values greater than 0x7FFF are saturated to 0x7FFF. Values -/// less than 0x8000 are saturated to 0x8000. The converted [4 x i16] values +/// A 128-bit integer vector of [4 x i32]. The converted [4 x i16] values /// are written to the lower 64 bits of the result. /// \param __b -/// A 128-bit integer vector of [4 x i32]. Each 32-bit element is treated as -/// a signed integer and is converted to a 16-bit signed integer with -/// saturation. Values greater than 0x7FFF are saturated to 0x7FFF. Values -/// less than 0x8000 are saturated to 0x8000. The converted [4 x i16] values +/// A 128-bit integer vector of [4 x i32]. The converted [4 x i16] values /// are written to the higher 64 bits of the result. /// \returns A 128-bit vector of [8 x i16] containing the converted values. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packs_epi32(__m128i __a, @@ -4104,26 +4167,22 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packs_epi32(__m128i __a, return (__m128i)__builtin_ia32_packssdw128((__v4si)__a, (__v4si)__b); } -/// Converts 16-bit signed integers from both 128-bit integer vector -/// operands into 8-bit unsigned integers, and packs the results into the -/// destination. Values greater than 0xFF are saturated to 0xFF. Values less -/// than 0x00 are saturated to 0x00. +/// Converts, with saturation, 16-bit signed integers from both 128-bit integer +/// vector operands into 8-bit unsigned integers, and packs the results into +/// the destination. +/// +/// Values greater than 0xFF are saturated to 0xFF. Values less than 0x00 +/// are saturated to 0x00. /// /// \headerfile /// /// This intrinsic corresponds to the VPACKUSWB / PACKUSWB instruction. /// /// \param __a -/// A 128-bit integer vector of [8 x i16]. Each 16-bit element is treated as -/// a signed integer and is converted to an 8-bit unsigned integer with -/// saturation. Values greater than 0xFF are saturated to 0xFF. Values less -/// than 0x00 are saturated to 0x00. The converted [8 x i8] values are +/// A 128-bit integer vector of [8 x i16]. The converted [8 x i8] values are /// written to the lower 64 bits of the result. /// \param __b -/// A 128-bit integer vector of [8 x i16]. Each 16-bit element is treated as -/// a signed integer and is converted to an 8-bit unsigned integer with -/// saturation. Values greater than 0xFF are saturated to 0xFF. Values less -/// than 0x00 are saturated to 0x00. The converted [8 x i8] values are +/// A 128-bit integer vector of [8 x i16]. The converted [8 x i8] values are /// written to the higher 64 bits of the result. /// \returns A 128-bit vector of [16 x i8] containing the converted values. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packus_epi16(__m128i __a, @@ -4742,6 +4801,78 @@ static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_castsi128_pd(__m128i __a) { return (__m128d)__a; } +/// Compares each of the corresponding double-precision values of two +/// 128-bit vectors of [2 x double], using the operation specified by the +/// immediate integer operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, comparisons that are ordered +/// return false, and comparisons that are unordered return true. +/// +/// \headerfile +/// +/// \code +/// __m128d _mm_cmp_pd(__m128d a, __m128d b, const int c); +/// \endcode +/// +/// This intrinsic corresponds to the (V)CMPPD instruction. +/// +/// \param a +/// A 128-bit vector of [2 x double]. +/// \param b +/// A 128-bit vector of [2 x double]. +/// \param c +/// An immediate integer operand, with bits [4:0] specifying which comparison +/// operation to use: \n +/// 0x00: Equal (ordered, non-signaling) \n +/// 0x01: Less-than (ordered, signaling) \n +/// 0x02: Less-than-or-equal (ordered, signaling) \n +/// 0x03: Unordered (non-signaling) \n +/// 0x04: Not-equal (unordered, non-signaling) \n +/// 0x05: Not-less-than (unordered, signaling) \n +/// 0x06: Not-less-than-or-equal (unordered, signaling) \n +/// 0x07: Ordered (non-signaling) \n +/// \returns A 128-bit vector of [2 x double] containing the comparison results. +#define _mm_cmp_pd(a, b, c) \ + ((__m128d)__builtin_ia32_cmppd((__v2df)(__m128d)(a), (__v2df)(__m128d)(b), \ + (c))) + +/// Compares each of the corresponding scalar double-precision values of +/// two 128-bit vectors of [2 x double], using the operation specified by the +/// immediate integer operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, comparisons that are ordered +/// return false, and comparisons that are unordered return true. +/// +/// \headerfile +/// +/// \code +/// __m128d _mm_cmp_sd(__m128d a, __m128d b, const int c); +/// \endcode +/// +/// This intrinsic corresponds to the (V)CMPSD instruction. +/// +/// \param a +/// A 128-bit vector of [2 x double]. +/// \param b +/// A 128-bit vector of [2 x double]. +/// \param c +/// An immediate integer operand, with bits [4:0] specifying which comparison +/// operation to use: \n +/// 0x00: Equal (ordered, non-signaling) \n +/// 0x01: Less-than (ordered, signaling) \n +/// 0x02: Less-than-or-equal (ordered, signaling) \n +/// 0x03: Unordered (non-signaling) \n +/// 0x04: Not-equal (unordered, non-signaling) \n +/// 0x05: Not-less-than (unordered, signaling) \n +/// 0x06: Not-less-than-or-equal (unordered, signaling) \n +/// 0x07: Ordered (non-signaling) \n +/// \returns A 128-bit vector of [2 x double] containing the comparison results. +#define _mm_cmp_sd(a, b, c) \ + ((__m128d)__builtin_ia32_cmpsd((__v2df)(__m128d)(a), (__v2df)(__m128d)(b), \ + (c))) + #if defined(__cplusplus) extern "C" { #endif diff --git a/lib/include/float.h b/lib/include/float.h index 0e73bca0a2d6..e5c439a9d47a 100644 --- a/lib/include/float.h +++ b/lib/include/float.h @@ -10,6 +10,10 @@ #ifndef __CLANG_FLOAT_H #define __CLANG_FLOAT_H +#if defined(__MVS__) && __has_include_next() +#include_next +#else + /* If we're on MinGW, fall back to the system's float.h, which might have * additional definitions provided for Windows. * For more details see http://msdn.microsoft.com/en-us/library/y0ybw9fy.aspx @@ -82,6 +86,18 @@ # undef DBL_HAS_SUBNORM # undef LDBL_HAS_SUBNORM # endif +#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L) || \ + !defined(__STRICT_ANSI__) +# undef FLT_NORM_MAX +# undef DBL_NORM_MAX +# undef LDBL_NORM_MAX +#endif +#endif + +#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L) || \ + !defined(__STRICT_ANSI__) +# undef INFINITY +# undef NAN #endif /* Characteristics of floating point types, C99 5.2.4.2.2 */ @@ -151,6 +167,17 @@ # define LDBL_HAS_SUBNORM __LDBL_HAS_DENORM__ #endif +#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L) || \ + !defined(__STRICT_ANSI__) + /* C23 5.2.5.3.3p29-30 */ +# define INFINITY (__builtin_inff()) +# define NAN (__builtin_nanf("")) + /* C23 5.2.5.3.3p32 */ +# define FLT_NORM_MAX __FLT_NORM_MAX__ +# define DBL_NORM_MAX __DBL_NORM_MAX__ +# define LDBL_NORM_MAX __LDBL_NORM_MAX__ +#endif + #ifdef __STDC_WANT_IEC_60559_TYPES_EXT__ # define FLT16_MANT_DIG __FLT16_MANT_DIG__ # define FLT16_DECIMAL_DIG __FLT16_DECIMAL_DIG__ @@ -165,4 +192,5 @@ # define FLT16_TRUE_MIN __FLT16_TRUE_MIN__ #endif /* __STDC_WANT_IEC_60559_TYPES_EXT__ */ +#endif /* __MVS__ */ #endif /* __CLANG_FLOAT_H */ diff --git a/lib/include/fmaintrin.h b/lib/include/fmaintrin.h index ea832fac4f99..22d1a780bbfd 100644 --- a/lib/include/fmaintrin.h +++ b/lib/include/fmaintrin.h @@ -60,7 +60,8 @@ _mm_fmadd_pd(__m128d __A, __m128d __B, __m128d __C) /// Computes a scalar multiply-add of the single-precision values in the /// low 32 bits of 128-bit vectors of [4 x float]. -/// \code +/// +/// \code{.operation} /// result[31:0] = (__A[31:0] * __B[31:0]) + __C[31:0] /// result[127:32] = __A[127:32] /// \endcode @@ -88,7 +89,8 @@ _mm_fmadd_ss(__m128 __A, __m128 __B, __m128 __C) /// Computes a scalar multiply-add of the double-precision values in the /// low 64 bits of 128-bit vectors of [2 x double]. -/// \code +/// +/// \code{.operation} /// result[63:0] = (__A[63:0] * __B[63:0]) + __C[63:0] /// result[127:64] = __A[127:64] /// \endcode @@ -156,7 +158,8 @@ _mm_fmsub_pd(__m128d __A, __m128d __B, __m128d __C) /// Computes a scalar multiply-subtract of the single-precision values in /// the low 32 bits of 128-bit vectors of [4 x float]. -/// \code +/// +/// \code{.operation} /// result[31:0] = (__A[31:0] * __B[31:0]) - __C[31:0] /// result[127:32] = __A[127:32] /// \endcode @@ -184,7 +187,8 @@ _mm_fmsub_ss(__m128 __A, __m128 __B, __m128 __C) /// Computes a scalar multiply-subtract of the double-precision values in /// the low 64 bits of 128-bit vectors of [2 x double]. -/// \code +/// +/// \code{.operation} /// result[63:0] = (__A[63:0] * __B[63:0]) - __C[63:0] /// result[127:64] = __A[127:64] /// \endcode @@ -252,7 +256,8 @@ _mm_fnmadd_pd(__m128d __A, __m128d __B, __m128d __C) /// Computes a scalar negated multiply-add of the single-precision values in /// the low 32 bits of 128-bit vectors of [4 x float]. -/// \code +/// +/// \code{.operation} /// result[31:0] = -(__A[31:0] * __B[31:0]) + __C[31:0] /// result[127:32] = __A[127:32] /// \endcode @@ -280,7 +285,8 @@ _mm_fnmadd_ss(__m128 __A, __m128 __B, __m128 __C) /// Computes a scalar negated multiply-add of the double-precision values /// in the low 64 bits of 128-bit vectors of [2 x double]. -/// \code +/// +/// \code{.operation} /// result[63:0] = -(__A[63:0] * __B[63:0]) + __C[63:0] /// result[127:64] = __A[127:64] /// \endcode @@ -348,7 +354,8 @@ _mm_fnmsub_pd(__m128d __A, __m128d __B, __m128d __C) /// Computes a scalar negated multiply-subtract of the single-precision /// values in the low 32 bits of 128-bit vectors of [4 x float]. -/// \code +/// +/// \code{.operation} /// result[31:0] = -(__A[31:0] * __B[31:0]) - __C[31:0] /// result[127:32] = __A[127:32] /// \endcode @@ -376,7 +383,8 @@ _mm_fnmsub_ss(__m128 __A, __m128 __B, __m128 __C) /// Computes a scalar negated multiply-subtract of the double-precision /// values in the low 64 bits of 128-bit vectors of [2 x double]. -/// \code +/// +/// \code{.operation} /// result[63:0] = -(__A[63:0] * __B[63:0]) - __C[63:0] /// result[127:64] = __A[127:64] /// \endcode @@ -404,7 +412,8 @@ _mm_fnmsub_sd(__m128d __A, __m128d __B, __m128d __C) /// Computes a multiply with alternating add/subtract of 128-bit vectors of /// [4 x float]. -/// \code +/// +/// \code{.operation} /// result[31:0] = (__A[31:0] * __B[31:0]) - __C[31:0] /// result[63:32] = (__A[63:32] * __B[63:32]) + __C[63:32] /// result[95:64] = (__A[95:64] * __B[95:64]) - __C[95:64] @@ -430,7 +439,8 @@ _mm_fmaddsub_ps(__m128 __A, __m128 __B, __m128 __C) /// Computes a multiply with alternating add/subtract of 128-bit vectors of /// [2 x double]. -/// \code +/// +/// \code{.operation} /// result[63:0] = (__A[63:0] * __B[63:0]) - __C[63:0] /// result[127:64] = (__A[127:64] * __B[127:64]) + __C[127:64] /// \endcode @@ -454,7 +464,8 @@ _mm_fmaddsub_pd(__m128d __A, __m128d __B, __m128d __C) /// Computes a multiply with alternating add/subtract of 128-bit vectors of /// [4 x float]. -/// \code +/// +/// \code{.operation} /// result[31:0] = (__A[31:0] * __B[31:0]) + __C[31:0] /// result[63:32] = (__A[63:32] * __B[63:32]) - __C[63:32] /// result[95:64] = (__A[95:64] * __B[95:64]) + __C[95:64] @@ -480,7 +491,8 @@ _mm_fmsubadd_ps(__m128 __A, __m128 __B, __m128 __C) /// Computes a multiply with alternating add/subtract of 128-bit vectors of /// [2 x double]. -/// \code +/// +/// \code{.operation} /// result[63:0] = (__A[63:0] * __B[63:0]) + __C[63:0] /// result[127:64] = (__A[127:64] * __B[127:64]) - __C[127:64] /// \endcode @@ -664,7 +676,8 @@ _mm256_fnmsub_pd(__m256d __A, __m256d __B, __m256d __C) /// Computes a multiply with alternating add/subtract of 256-bit vectors of /// [8 x float]. -/// \code +/// +/// \code{.operation} /// result[31:0] = (__A[31:0] * __B[31:0]) - __C[31:0] /// result[63:32] = (__A[63:32] * __B[63:32]) + __C[63:32] /// result[95:64] = (__A[95:64] * __B[95:64]) - __C[95:64] @@ -694,7 +707,8 @@ _mm256_fmaddsub_ps(__m256 __A, __m256 __B, __m256 __C) /// Computes a multiply with alternating add/subtract of 256-bit vectors of /// [4 x double]. -/// \code +/// +/// \code{.operation} /// result[63:0] = (__A[63:0] * __B[63:0]) - __C[63:0] /// result[127:64] = (__A[127:64] * __B[127:64]) + __C[127:64] /// result[191:128] = (__A[191:128] * __B[191:128]) - __C[191:128] @@ -720,7 +734,8 @@ _mm256_fmaddsub_pd(__m256d __A, __m256d __B, __m256d __C) /// Computes a vector multiply with alternating add/subtract of 256-bit /// vectors of [8 x float]. -/// \code +/// +/// \code{.operation} /// result[31:0] = (__A[31:0] * __B[31:0]) + __C[31:0] /// result[63:32] = (__A[63:32] * __B[63:32]) - __C[63:32] /// result[95:64] = (__A[95:64] * __B[95:64]) + __C[95:64] @@ -750,7 +765,8 @@ _mm256_fmsubadd_ps(__m256 __A, __m256 __B, __m256 __C) /// Computes a vector multiply with alternating add/subtract of 256-bit /// vectors of [4 x double]. -/// \code +/// +/// \code{.operation} /// result[63:0] = (__A[63:0] * __B[63:0]) + __C[63:0] /// result[127:64] = (__A[127:64] * __B[127:64]) - __C[127:64] /// result[191:128] = (__A[191:128] * __B[191:128]) + __C[191:128] diff --git a/lib/include/ia32intrin.h b/lib/include/ia32intrin.h index 1b979770e196..8e65f232a0de 100644 --- a/lib/include/ia32intrin.h +++ b/lib/include/ia32intrin.h @@ -26,8 +26,8 @@ #define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS #endif -/// Find the first set bit starting from the lsb. Result is undefined if -/// input is 0. +/// Finds the first set bit starting from the least significant bit. The result +/// is undefined if the input is 0. /// /// \headerfile /// @@ -43,8 +43,8 @@ __bsfd(int __A) { return __builtin_ctz((unsigned int)__A); } -/// Find the first set bit starting from the msb. Result is undefined if -/// input is 0. +/// Finds the first set bit starting from the most significant bit. The result +/// is undefined if the input is 0. /// /// \headerfile /// @@ -90,8 +90,8 @@ _bswap(int __A) { return (int)__builtin_bswap32((unsigned int)__A); } -/// Find the first set bit starting from the lsb. Result is undefined if -/// input is 0. +/// Finds the first set bit starting from the least significant bit. The result +/// is undefined if the input is 0. /// /// \headerfile /// @@ -108,8 +108,8 @@ _bswap(int __A) { /// \see __bsfd #define _bit_scan_forward(A) __bsfd((A)) -/// Find the first set bit starting from the msb. Result is undefined if -/// input is 0. +/// Finds the first set bit starting from the most significant bit. The result +/// is undefined if the input is 0. /// /// \headerfile /// @@ -127,8 +127,8 @@ _bswap(int __A) { #define _bit_scan_reverse(A) __bsrd((A)) #ifdef __x86_64__ -/// Find the first set bit starting from the lsb. Result is undefined if -/// input is 0. +/// Finds the first set bit starting from the least significant bit. The result +/// is undefined if the input is 0. /// /// \headerfile /// @@ -143,8 +143,8 @@ __bsfq(long long __A) { return (long long)__builtin_ctzll((unsigned long long)__A); } -/// Find the first set bit starting from the msb. Result is undefined if -/// input is 0. +/// Finds the first set bit starting from the most significant bit. The result +/// is undefined if input is 0. /// /// \headerfile /// @@ -159,7 +159,7 @@ __bsrq(long long __A) { return 63 - __builtin_clzll((unsigned long long)__A); } -/// Swaps the bytes in the input. Converting little endian to big endian or +/// Swaps the bytes in the input, converting little endian to big endian or /// vice versa. /// /// \headerfile @@ -175,7 +175,7 @@ __bswapq(long long __A) { return (long long)__builtin_bswap64((unsigned long long)__A); } -/// Swaps the bytes in the input. Converting little endian to big endian or +/// Swaps the bytes in the input, converting little endian to big endian or /// vice versa. /// /// \headerfile @@ -198,7 +198,7 @@ __bswapq(long long __A) { /// \headerfile /// /// This intrinsic corresponds to the \c POPCNT instruction or a -/// a sequence of arithmetic and logic ops to calculate it. +/// sequence of arithmetic and logic operations to calculate it. /// /// \param __A /// An unsigned 32-bit integer operand. @@ -220,7 +220,7 @@ __popcntd(unsigned int __A) /// \endcode /// /// This intrinsic corresponds to the \c POPCNT instruction or a -/// a sequence of arithmetic and logic ops to calculate it. +/// sequence of arithmetic and logic operations to calculate it. /// /// \param A /// An unsigned 32-bit integer operand. @@ -235,7 +235,7 @@ __popcntd(unsigned int __A) /// \headerfile /// /// This intrinsic corresponds to the \c POPCNT instruction or a -/// a sequence of arithmetic and logic ops to calculate it. +/// sequence of arithmetic and logic operations to calculate it. /// /// \param __A /// An unsigned 64-bit integer operand. @@ -257,7 +257,7 @@ __popcntq(unsigned long long __A) /// \endcode /// /// This intrinsic corresponds to the \c POPCNT instruction or a -/// a sequence of arithmetic and logic ops to calculate it. +/// sequence of arithmetic and logic operations to calculate it. /// /// \param A /// An unsigned 64-bit integer operand. @@ -268,7 +268,7 @@ __popcntq(unsigned long long __A) #endif /* __x86_64__ */ #ifdef __x86_64__ -/// Returns the program status and control \c RFLAGS register with the \c VM +/// Returns the program status-and-control \c RFLAGS register with the \c VM /// and \c RF flags cleared. /// /// \headerfile @@ -282,7 +282,7 @@ __readeflags(void) return __builtin_ia32_readeflags_u64(); } -/// Writes the specified value to the program status and control \c RFLAGS +/// Writes the specified value to the program status-and-control \c RFLAGS /// register. Reserved bits are not affected. /// /// \headerfile @@ -298,7 +298,7 @@ __writeeflags(unsigned long long __f) } #else /* !__x86_64__ */ -/// Returns the program status and control \c EFLAGS register with the \c VM +/// Returns the program status-and-control \c EFLAGS register with the \c VM /// and \c RF flags cleared. /// /// \headerfile @@ -312,7 +312,7 @@ __readeflags(void) return __builtin_ia32_readeflags_u32(); } -/// Writes the specified value to the program status and control \c EFLAGS +/// Writes the specified value to the program status-and-control \c EFLAGS /// register. Reserved bits are not affected. /// /// \headerfile @@ -328,7 +328,7 @@ __writeeflags(unsigned int __f) } #endif /* !__x86_64__ */ -/// Cast a 32-bit float value to a 32-bit unsigned integer value. +/// Casts a 32-bit float value to a 32-bit unsigned integer value. /// /// \headerfile /// @@ -337,13 +337,13 @@ __writeeflags(unsigned int __f) /// /// \param __A /// A 32-bit float value. -/// \returns a 32-bit unsigned integer containing the converted value. +/// \returns A 32-bit unsigned integer containing the converted value. static __inline__ unsigned int __DEFAULT_FN_ATTRS_CAST _castf32_u32(float __A) { return __builtin_bit_cast(unsigned int, __A); } -/// Cast a 64-bit float value to a 64-bit unsigned integer value. +/// Casts a 64-bit float value to a 64-bit unsigned integer value. /// /// \headerfile /// @@ -352,13 +352,13 @@ _castf32_u32(float __A) { /// /// \param __A /// A 64-bit float value. -/// \returns a 64-bit unsigned integer containing the converted value. +/// \returns A 64-bit unsigned integer containing the converted value. static __inline__ unsigned long long __DEFAULT_FN_ATTRS_CAST _castf64_u64(double __A) { return __builtin_bit_cast(unsigned long long, __A); } -/// Cast a 32-bit unsigned integer value to a 32-bit float value. +/// Casts a 32-bit unsigned integer value to a 32-bit float value. /// /// \headerfile /// @@ -367,13 +367,13 @@ _castf64_u64(double __A) { /// /// \param __A /// A 32-bit unsigned integer value. -/// \returns a 32-bit float value containing the converted value. +/// \returns A 32-bit float value containing the converted value. static __inline__ float __DEFAULT_FN_ATTRS_CAST _castu32_f32(unsigned int __A) { return __builtin_bit_cast(float, __A); } -/// Cast a 64-bit unsigned integer value to a 64-bit float value. +/// Casts a 64-bit unsigned integer value to a 64-bit float value. /// /// \headerfile /// @@ -382,7 +382,7 @@ _castu32_f32(unsigned int __A) { /// /// \param __A /// A 64-bit unsigned integer value. -/// \returns a 64-bit float value containing the converted value. +/// \returns A 64-bit float value containing the converted value. static __inline__ double __DEFAULT_FN_ATTRS_CAST _castu64_f64(unsigned long long __A) { return __builtin_bit_cast(double, __A); @@ -470,7 +470,7 @@ __crc32q(unsigned long long __C, unsigned long long __D) } #endif /* __x86_64__ */ -/// Reads the specified performance monitoring counter. Refer to your +/// Reads the specified performance-monitoring counter. Refer to your /// processor's documentation to determine which performance counters are /// supported. /// @@ -487,7 +487,7 @@ __rdpmc(int __A) { return __builtin_ia32_rdpmc(__A); } -/// Reads the processor's time stamp counter and the \c IA32_TSC_AUX MSR +/// Reads the processor's time-stamp counter and the \c IA32_TSC_AUX MSR /// \c (0xc0000103). /// /// \headerfile @@ -495,14 +495,14 @@ __rdpmc(int __A) { /// This intrinsic corresponds to the \c RDTSCP instruction. /// /// \param __A -/// Address of where to store the 32-bit \c IA32_TSC_AUX value. -/// \returns The 64-bit value of the time stamp counter. +/// The address of where to store the 32-bit \c IA32_TSC_AUX value. +/// \returns The 64-bit value of the time-stamp counter. static __inline__ unsigned long long __DEFAULT_FN_ATTRS __rdtscp(unsigned int *__A) { return __builtin_ia32_rdtscp(__A); } -/// Reads the processor's time stamp counter. +/// Reads the processor's time-stamp counter. /// /// \headerfile /// @@ -512,7 +512,7 @@ __rdtscp(unsigned int *__A) { /// /// This intrinsic corresponds to the \c RDTSC instruction. /// -/// \returns The 64-bit value of the time stamp counter. +/// \returns The 64-bit value of the time-stamp counter. #define _rdtsc() __rdtsc() /// Reads the specified performance monitoring counter. Refer to your diff --git a/lib/include/immintrin.h b/lib/include/immintrin.h index 27800f7a8202..cd6cf09b90ca 100644 --- a/lib/include/immintrin.h +++ b/lib/include/immintrin.h @@ -16,281 +16,231 @@ #include -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__MMX__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__MMX__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__SSE__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__SSE__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__SSE2__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__SSE2__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__SSE3__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__SSE3__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__SSSE3__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__SSSE3__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ +#if !defined(__SCE__) || __has_feature(modules) || \ (defined(__SSE4_2__) || defined(__SSE4_1__)) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ +#if !defined(__SCE__) || __has_feature(modules) || \ (defined(__AES__) || defined(__PCLMUL__)) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__CLFLUSHOPT__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__CLFLUSHOPT__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__CLWB__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__CLWB__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AVX__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AVX2__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX2__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__F16C__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__F16C__) #include #endif /* No feature check desired due to internal checks */ #include -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__BMI2__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__BMI2__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__LZCNT__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__LZCNT__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__POPCNT__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__POPCNT__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__FMA__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__FMA__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AVX512F__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512F__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AVX512VL__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512VL__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AVX512BW__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512BW__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AVX512BITALG__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512BITALG__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AVX512CD__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512CD__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AVX512VPOPCNTDQ__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512VPOPCNTDQ__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ +#if !defined(__SCE__) || __has_feature(modules) || \ (defined(__AVX512VL__) && defined(__AVX512VPOPCNTDQ__)) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AVX512VNNI__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512VNNI__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ +#if !defined(__SCE__) || __has_feature(modules) || \ (defined(__AVX512VL__) && defined(__AVX512VNNI__)) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AVXVNNI__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVXVNNI__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AVX512DQ__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512DQ__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ +#if !defined(__SCE__) || __has_feature(modules) || \ (defined(__AVX512VL__) && defined(__AVX512BITALG__)) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ +#if !defined(__SCE__) || __has_feature(modules) || \ (defined(__AVX512VL__) && defined(__AVX512BW__)) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ +#if !defined(__SCE__) || __has_feature(modules) || \ (defined(__AVX512VL__) && defined(__AVX512CD__)) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ +#if !defined(__SCE__) || __has_feature(modules) || \ (defined(__AVX512VL__) && defined(__AVX512DQ__)) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AVX512ER__) -#include -#endif - -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AVX512IFMA__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512IFMA__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ +#if !defined(__SCE__) || __has_feature(modules) || \ (defined(__AVX512IFMA__) && defined(__AVX512VL__)) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AVXIFMA__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVXIFMA__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AVX512VBMI__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512VBMI__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ +#if !defined(__SCE__) || __has_feature(modules) || \ (defined(__AVX512VBMI__) && defined(__AVX512VL__)) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AVX512VBMI2__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512VBMI2__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ +#if !defined(__SCE__) || __has_feature(modules) || \ (defined(__AVX512VBMI2__) && defined(__AVX512VL__)) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AVX512PF__) -#include -#endif - -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AVX512FP16__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512FP16__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ +#if !defined(__SCE__) || __has_feature(modules) || \ (defined(__AVX512VL__) && defined(__AVX512FP16__)) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AVX512BF16__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVX512BF16__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ +#if !defined(__SCE__) || __has_feature(modules) || \ (defined(__AVX512VL__) && defined(__AVX512BF16__)) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__PKU__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__PKU__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__VPCLMULQDQ__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__VPCLMULQDQ__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__VAES__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__VAES__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__GFNI__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__GFNI__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AVXVNNIINT8__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVXVNNIINT8__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AVXNECONVERT__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVXNECONVERT__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__SHA512__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__SHA512__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__SM3__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__SM3__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__SM4__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__SM4__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AVXVNNIINT16__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__AVXVNNIINT16__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__RDPID__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__RDPID__) /// Reads the value of the IA32_TSC_AUX MSR (0xc0000103). /// /// \headerfile @@ -304,8 +254,7 @@ _rdpid_u32(void) { } #endif // __RDPID__ -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__RDRND__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__RDRND__) /// Returns a 16-bit hardware-generated random value. /// /// \headerfile @@ -367,8 +316,7 @@ _rdrand64_step(unsigned long long *__p) } #endif /* __RDRND__ */ -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__FSGSBASE__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__FSGSBASE__) #ifdef __x86_64__ /// Reads the FS base register. /// @@ -481,8 +429,7 @@ _writegsbase_u64(unsigned long long __V) #endif #endif /* __FSGSBASE__ */ -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__MOVBE__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__MOVBE__) /* The structs used below are to force the load/store to be unaligned. This * is accomplished with the __packed__ attribute. The __may_alias__ prevents @@ -598,139 +545,118 @@ _storebe_i64(void * __P, long long __D) { #endif #endif /* __MOVBE */ -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__RTM__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__RTM__) #include #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__SHA__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__SHA__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__FXSR__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__FXSR__) #include #endif /* No feature check desired due to internal MSC_VER checks */ #include -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__XSAVEOPT__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__XSAVEOPT__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__XSAVEC__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__XSAVEC__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__XSAVES__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__XSAVES__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__SHSTK__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__SHSTK__) #include #endif /* Intrinsics inside adcintrin.h are available at all times. */ #include -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__ADX__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__ADX__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__RDSEED__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__RDSEED__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__WBNOINVD__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__WBNOINVD__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__CLDEMOTE__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__CLDEMOTE__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__WAITPKG__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__WAITPKG__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__MOVDIRI__) || defined(__MOVDIR64B__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__MOVDIRI__) || \ + defined(__MOVDIR64B__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__PCONFIG__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__PCONFIG__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__SGX__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__SGX__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__PTWRITE__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__PTWRITE__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__INVPCID__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__INVPCID__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AMX_FP16__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__AMX_FP16__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__KL__) || defined(__WIDEKL__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__KL__) || \ + defined(__WIDEKL__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AMX_TILE__) || defined(__AMX_INT8__) || defined(__AMX_BF16__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__AMX_TILE__) || \ + defined(__AMX_INT8__) || defined(__AMX_BF16__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__AMX_COMPLEX__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__AMX_COMPLEX__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ +#if !defined(__SCE__) || __has_feature(modules) || \ defined(__AVX512VP2INTERSECT__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ +#if !defined(__SCE__) || __has_feature(modules) || \ (defined(__AVX512VL__) && defined(__AVX512VP2INTERSECT__)) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__ENQCMD__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__ENQCMD__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__SERIALIZE__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__SERIALIZE__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__TSXLDTRK__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__TSXLDTRK__) #include #endif diff --git a/lib/include/intrin.h b/lib/include/intrin.h index 9ebaea9fee94..6308c865ca91 100644 --- a/lib/include/intrin.h +++ b/lib/include/intrin.h @@ -15,8 +15,10 @@ #ifndef __INTRIN_H #define __INTRIN_H +#include + /* First include the standard intrinsics. */ -#if defined(__i386__) || defined(__x86_64__) +#if defined(__i386__) || (defined(__x86_64__) && !defined(__arm64ec__)) #include #endif @@ -24,7 +26,7 @@ #include #endif -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(__arm64ec__) #include #endif @@ -131,8 +133,6 @@ void __writefsqword(unsigned long, unsigned __int64); void __writefsword(unsigned long, unsigned short); void __writemsr(unsigned long, unsigned __int64); void *_AddressOfReturnAddress(void); -unsigned char _BitScanForward(unsigned long *_Index, unsigned long _Mask); -unsigned char _BitScanReverse(unsigned long *_Index, unsigned long _Mask); unsigned char _bittest(long const *, long); unsigned char _bittestandcomplement(long *, long); unsigned char _bittestandreset(long *, long); @@ -151,7 +151,6 @@ long _InterlockedExchangeAdd_HLERelease(long volatile *, long); __int64 _InterlockedExchangeAdd64_HLEAcquire(__int64 volatile *, __int64); __int64 _InterlockedExchangeAdd64_HLERelease(__int64 volatile *, __int64); void _ReadBarrier(void); -void _ReadWriteBarrier(void); unsigned int _rorx_u32(unsigned int, const unsigned int); int _sarx_i32(int, unsigned int); #if __STDC_HOSTED__ @@ -167,7 +166,7 @@ unsigned __int32 xbegin(void); void _xend(void); /* These additional intrinsics are turned on in x64/amd64/x86_64 mode. */ -#ifdef __x86_64__ +#if defined(__x86_64__) && !defined(__arm64ec__) void __addgsbyte(unsigned long, unsigned char); void __addgsdword(unsigned long, unsigned long); void __addgsqword(unsigned long, unsigned __int64); @@ -182,12 +181,6 @@ unsigned char __readgsbyte(unsigned long); unsigned long __readgsdword(unsigned long); unsigned __int64 __readgsqword(unsigned long); unsigned short __readgsword(unsigned long); -unsigned __int64 __shiftleft128(unsigned __int64 _LowPart, - unsigned __int64 _HighPart, - unsigned char _Shift); -unsigned __int64 __shiftright128(unsigned __int64 _LowPart, - unsigned __int64 _HighPart, - unsigned char _Shift); void __stosq(unsigned __int64 *, unsigned __int64, size_t); unsigned char __vmx_on(unsigned __int64 *); unsigned char __vmx_vmclear(unsigned __int64 *); @@ -236,216 +229,15 @@ unsigned __int64 _shlx_u64(unsigned __int64, unsigned int); unsigned __int64 _shrx_u64(unsigned __int64, unsigned int); __int64 __mulh(__int64, __int64); unsigned __int64 __umulh(unsigned __int64, unsigned __int64); -__int64 _mul128(__int64, __int64, __int64*); -unsigned __int64 _umul128(unsigned __int64, - unsigned __int64, - unsigned __int64*); +__int64 _mul128(__int64, __int64, __int64 *); #endif /* __x86_64__ */ -#if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__) - -unsigned char _BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask); -unsigned char _BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask); - -#endif - -#if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || defined(__aarch64__) -__int64 _InterlockedDecrement64(__int64 volatile *_Addend); -__int64 _InterlockedExchange64(__int64 volatile *_Target, __int64 _Value); -__int64 _InterlockedExchangeAdd64(__int64 volatile *_Addend, __int64 _Value); -__int64 _InterlockedExchangeSub64(__int64 volatile *_Subend, __int64 _Value); -__int64 _InterlockedIncrement64(__int64 volatile *_Addend); -__int64 _InterlockedOr64(__int64 volatile *_Value, __int64 _Mask); -__int64 _InterlockedXor64(__int64 volatile *_Value, __int64 _Mask); -__int64 _InterlockedAnd64(__int64 volatile *_Value, __int64 _Mask); - -#endif - -/*----------------------------------------------------------------------------*\ -|* Interlocked Exchange Add -\*----------------------------------------------------------------------------*/ -#if defined(__arm__) || defined(__aarch64__) -char _InterlockedExchangeAdd8_acq(char volatile *_Addend, char _Value); -char _InterlockedExchangeAdd8_nf(char volatile *_Addend, char _Value); -char _InterlockedExchangeAdd8_rel(char volatile *_Addend, char _Value); -short _InterlockedExchangeAdd16_acq(short volatile *_Addend, short _Value); -short _InterlockedExchangeAdd16_nf(short volatile *_Addend, short _Value); -short _InterlockedExchangeAdd16_rel(short volatile *_Addend, short _Value); -long _InterlockedExchangeAdd_acq(long volatile *_Addend, long _Value); -long _InterlockedExchangeAdd_nf(long volatile *_Addend, long _Value); -long _InterlockedExchangeAdd_rel(long volatile *_Addend, long _Value); -__int64 _InterlockedExchangeAdd64_acq(__int64 volatile *_Addend, __int64 _Value); -__int64 _InterlockedExchangeAdd64_nf(__int64 volatile *_Addend, __int64 _Value); -__int64 _InterlockedExchangeAdd64_rel(__int64 volatile *_Addend, __int64 _Value); -#endif -/*----------------------------------------------------------------------------*\ -|* Interlocked Increment -\*----------------------------------------------------------------------------*/ -#if defined(__arm__) || defined(__aarch64__) -short _InterlockedIncrement16_acq(short volatile *_Value); -short _InterlockedIncrement16_nf(short volatile *_Value); -short _InterlockedIncrement16_rel(short volatile *_Value); -long _InterlockedIncrement_acq(long volatile *_Value); -long _InterlockedIncrement_nf(long volatile *_Value); -long _InterlockedIncrement_rel(long volatile *_Value); -__int64 _InterlockedIncrement64_acq(__int64 volatile *_Value); -__int64 _InterlockedIncrement64_nf(__int64 volatile *_Value); -__int64 _InterlockedIncrement64_rel(__int64 volatile *_Value); -#endif -/*----------------------------------------------------------------------------*\ -|* Interlocked Decrement -\*----------------------------------------------------------------------------*/ -#if defined(__arm__) || defined(__aarch64__) -short _InterlockedDecrement16_acq(short volatile *_Value); -short _InterlockedDecrement16_nf(short volatile *_Value); -short _InterlockedDecrement16_rel(short volatile *_Value); -long _InterlockedDecrement_acq(long volatile *_Value); -long _InterlockedDecrement_nf(long volatile *_Value); -long _InterlockedDecrement_rel(long volatile *_Value); -__int64 _InterlockedDecrement64_acq(__int64 volatile *_Value); -__int64 _InterlockedDecrement64_nf(__int64 volatile *_Value); -__int64 _InterlockedDecrement64_rel(__int64 volatile *_Value); -#endif -/*----------------------------------------------------------------------------*\ -|* Interlocked And -\*----------------------------------------------------------------------------*/ -#if defined(__arm__) || defined(__aarch64__) -char _InterlockedAnd8_acq(char volatile *_Value, char _Mask); -char _InterlockedAnd8_nf(char volatile *_Value, char _Mask); -char _InterlockedAnd8_rel(char volatile *_Value, char _Mask); -short _InterlockedAnd16_acq(short volatile *_Value, short _Mask); -short _InterlockedAnd16_nf(short volatile *_Value, short _Mask); -short _InterlockedAnd16_rel(short volatile *_Value, short _Mask); -long _InterlockedAnd_acq(long volatile *_Value, long _Mask); -long _InterlockedAnd_nf(long volatile *_Value, long _Mask); -long _InterlockedAnd_rel(long volatile *_Value, long _Mask); -__int64 _InterlockedAnd64_acq(__int64 volatile *_Value, __int64 _Mask); -__int64 _InterlockedAnd64_nf(__int64 volatile *_Value, __int64 _Mask); -__int64 _InterlockedAnd64_rel(__int64 volatile *_Value, __int64 _Mask); -#endif -/*----------------------------------------------------------------------------*\ -|* Bit Counting and Testing -\*----------------------------------------------------------------------------*/ -#if defined(__arm__) || defined(__aarch64__) -unsigned char _interlockedbittestandset_acq(long volatile *_BitBase, - long _BitPos); -unsigned char _interlockedbittestandset_nf(long volatile *_BitBase, - long _BitPos); -unsigned char _interlockedbittestandset_rel(long volatile *_BitBase, - long _BitPos); -unsigned char _interlockedbittestandreset_acq(long volatile *_BitBase, - long _BitPos); -unsigned char _interlockedbittestandreset_nf(long volatile *_BitBase, - long _BitPos); -unsigned char _interlockedbittestandreset_rel(long volatile *_BitBase, - long _BitPos); -#endif -/*----------------------------------------------------------------------------*\ -|* Interlocked Or -\*----------------------------------------------------------------------------*/ -#if defined(__arm__) || defined(__aarch64__) -char _InterlockedOr8_acq(char volatile *_Value, char _Mask); -char _InterlockedOr8_nf(char volatile *_Value, char _Mask); -char _InterlockedOr8_rel(char volatile *_Value, char _Mask); -short _InterlockedOr16_acq(short volatile *_Value, short _Mask); -short _InterlockedOr16_nf(short volatile *_Value, short _Mask); -short _InterlockedOr16_rel(short volatile *_Value, short _Mask); -long _InterlockedOr_acq(long volatile *_Value, long _Mask); -long _InterlockedOr_nf(long volatile *_Value, long _Mask); -long _InterlockedOr_rel(long volatile *_Value, long _Mask); -__int64 _InterlockedOr64_acq(__int64 volatile *_Value, __int64 _Mask); -__int64 _InterlockedOr64_nf(__int64 volatile *_Value, __int64 _Mask); -__int64 _InterlockedOr64_rel(__int64 volatile *_Value, __int64 _Mask); -#endif -/*----------------------------------------------------------------------------*\ -|* Interlocked Xor -\*----------------------------------------------------------------------------*/ -#if defined(__arm__) || defined(__aarch64__) -char _InterlockedXor8_acq(char volatile *_Value, char _Mask); -char _InterlockedXor8_nf(char volatile *_Value, char _Mask); -char _InterlockedXor8_rel(char volatile *_Value, char _Mask); -short _InterlockedXor16_acq(short volatile *_Value, short _Mask); -short _InterlockedXor16_nf(short volatile *_Value, short _Mask); -short _InterlockedXor16_rel(short volatile *_Value, short _Mask); -long _InterlockedXor_acq(long volatile *_Value, long _Mask); -long _InterlockedXor_nf(long volatile *_Value, long _Mask); -long _InterlockedXor_rel(long volatile *_Value, long _Mask); -__int64 _InterlockedXor64_acq(__int64 volatile *_Value, __int64 _Mask); -__int64 _InterlockedXor64_nf(__int64 volatile *_Value, __int64 _Mask); -__int64 _InterlockedXor64_rel(__int64 volatile *_Value, __int64 _Mask); -#endif -/*----------------------------------------------------------------------------*\ -|* Interlocked Exchange -\*----------------------------------------------------------------------------*/ -#if defined(__arm__) || defined(__aarch64__) -char _InterlockedExchange8_acq(char volatile *_Target, char _Value); -char _InterlockedExchange8_nf(char volatile *_Target, char _Value); -char _InterlockedExchange8_rel(char volatile *_Target, char _Value); -short _InterlockedExchange16_acq(short volatile *_Target, short _Value); -short _InterlockedExchange16_nf(short volatile *_Target, short _Value); -short _InterlockedExchange16_rel(short volatile *_Target, short _Value); -long _InterlockedExchange_acq(long volatile *_Target, long _Value); -long _InterlockedExchange_nf(long volatile *_Target, long _Value); -long _InterlockedExchange_rel(long volatile *_Target, long _Value); -__int64 _InterlockedExchange64_acq(__int64 volatile *_Target, __int64 _Value); -__int64 _InterlockedExchange64_nf(__int64 volatile *_Target, __int64 _Value); -__int64 _InterlockedExchange64_rel(__int64 volatile *_Target, __int64 _Value); -#endif -/*----------------------------------------------------------------------------*\ -|* Interlocked Compare Exchange -\*----------------------------------------------------------------------------*/ -#if defined(__arm__) || defined(__aarch64__) -char _InterlockedCompareExchange8_acq(char volatile *_Destination, - char _Exchange, char _Comparand); -char _InterlockedCompareExchange8_nf(char volatile *_Destination, - char _Exchange, char _Comparand); -char _InterlockedCompareExchange8_rel(char volatile *_Destination, - char _Exchange, char _Comparand); -short _InterlockedCompareExchange16_acq(short volatile *_Destination, - short _Exchange, short _Comparand); -short _InterlockedCompareExchange16_nf(short volatile *_Destination, - short _Exchange, short _Comparand); -short _InterlockedCompareExchange16_rel(short volatile *_Destination, - short _Exchange, short _Comparand); -long _InterlockedCompareExchange_acq(long volatile *_Destination, - long _Exchange, long _Comparand); -long _InterlockedCompareExchange_nf(long volatile *_Destination, - long _Exchange, long _Comparand); -long _InterlockedCompareExchange_rel(long volatile *_Destination, - long _Exchange, long _Comparand); -__int64 _InterlockedCompareExchange64_acq(__int64 volatile *_Destination, - __int64 _Exchange, __int64 _Comparand); -__int64 _InterlockedCompareExchange64_nf(__int64 volatile *_Destination, - __int64 _Exchange, __int64 _Comparand); -__int64 _InterlockedCompareExchange64_rel(__int64 volatile *_Destination, - __int64 _Exchange, __int64 _Comparand); -#endif -#if defined(__x86_64__) || defined(__aarch64__) -unsigned char _InterlockedCompareExchange128(__int64 volatile *_Destination, - __int64 _ExchangeHigh, - __int64 _ExchangeLow, - __int64 *_ComparandResult); -#endif -#if defined(__aarch64__) -unsigned char _InterlockedCompareExchange128_acq(__int64 volatile *_Destination, - __int64 _ExchangeHigh, - __int64 _ExchangeLow, - __int64 *_ComparandResult); -unsigned char _InterlockedCompareExchange128_nf(__int64 volatile *_Destination, - __int64 _ExchangeHigh, - __int64 _ExchangeLow, - __int64 *_ComparandResult); -unsigned char _InterlockedCompareExchange128_rel(__int64 volatile *_Destination, - __int64 _ExchangeHigh, - __int64 _ExchangeLow, - __int64 *_ComparandResult); -#endif - /*----------------------------------------------------------------------------*\ |* movs, stos \*----------------------------------------------------------------------------*/ -#if defined(__i386__) || defined(__x86_64__) + +#if defined(__i386__) || (defined(__x86_64__) && !defined(__arm64ec__)) static __inline__ void __DEFAULT_FN_ATTRS __movsb(unsigned char *__dst, unsigned char const *__src, size_t __n) { @@ -514,7 +306,7 @@ static __inline__ void __DEFAULT_FN_ATTRS __stosw(unsigned short *__dst, : "memory"); } #endif -#ifdef __x86_64__ +#if defined(__x86_64__) && !defined(__arm64ec__) static __inline__ void __DEFAULT_FN_ATTRS __movsq( unsigned long long *__dst, unsigned long long const *__src, size_t __n) { __asm__ __volatile__("rep movsq" @@ -533,10 +325,40 @@ static __inline__ void __DEFAULT_FN_ATTRS __stosq(unsigned __int64 *__dst, /*----------------------------------------------------------------------------*\ |* Misc \*----------------------------------------------------------------------------*/ -#if defined(__i386__) || defined(__x86_64__) +#if defined(__i386__) || (defined(__x86_64__) && !defined(__arm64ec__)) static __inline__ void __DEFAULT_FN_ATTRS __halt(void) { __asm__ volatile("hlt"); } + +static inline unsigned char __inbyte(unsigned short port) { + unsigned char ret; + __asm__ __volatile__("inb %w1, %b0" : "=a"(ret) : "Nd"(port)); + return ret; +} + +static inline unsigned short __inword(unsigned short port) { + unsigned short ret; + __asm__ __volatile__("inw %w1, %w0" : "=a"(ret) : "Nd"(port)); + return ret; +} + +static inline unsigned long __indword(unsigned short port) { + unsigned long ret; + __asm__ __volatile__("inl %w1, %k0" : "=a"(ret) : "Nd"(port)); + return ret; +} + +static inline void __outbyte(unsigned short port, unsigned char data) { + __asm__ __volatile__("outb %b0, %w1" : : "a"(data), "Nd"(port)); +} + +static inline void __outword(unsigned short port, unsigned short data) { + __asm__ __volatile__("outw %w0, %w1" : : "a"(data), "Nd"(port)); +} + +static inline void __outdword(unsigned short port, unsigned long data) { + __asm__ __volatile__("outl %k0, %w1" : : "a"(data), "Nd"(port)); +} #endif #if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) @@ -548,9 +370,10 @@ static __inline__ void __DEFAULT_FN_ATTRS __nop(void) { /*----------------------------------------------------------------------------*\ |* MS AArch64 specific \*----------------------------------------------------------------------------*/ -#if defined(__aarch64__) +#if defined(__aarch64__) || defined(__arm64ec__) unsigned __int64 __getReg(int); long _InterlockedAdd(long volatile *Addend, long Value); +__int64 _InterlockedAdd64(__int64 volatile *Addend, __int64 Value); __int64 _ReadStatusReg(int); void _WriteStatusReg(int, __int64); @@ -582,18 +405,19 @@ unsigned int _CountLeadingOnes(unsigned long); unsigned int _CountLeadingOnes64(unsigned __int64); unsigned int _CountLeadingSigns(long); unsigned int _CountLeadingSigns64(__int64); -unsigned int _CountLeadingZeros(unsigned long); -unsigned int _CountLeadingZeros64(unsigned _int64); unsigned int _CountOneBits(unsigned long); unsigned int _CountOneBits64(unsigned __int64); -void __cdecl __prefetch(void *); +unsigned int __hlt(unsigned int, ...); + +void __cdecl __prefetch(const void *); + #endif /*----------------------------------------------------------------------------*\ |* Privileged intrinsics \*----------------------------------------------------------------------------*/ -#if defined(__i386__) || defined(__x86_64__) +#if defined(__i386__) || (defined(__x86_64__) && !defined(__arm64ec__)) static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS __readmsr(unsigned long __register) { // Loads the contents of a 64-bit model specific register (MSR) specified in @@ -607,7 +431,6 @@ __readmsr(unsigned long __register) { __asm__ ("rdmsr" : "=d"(__edx), "=a"(__eax) : "c"(__register)); return (((unsigned __int64)__edx) << 32) | (unsigned __int64)__eax; } -#endif static __inline__ unsigned __LPTRINT_TYPE__ __DEFAULT_FN_ATTRS __readcr3(void) { unsigned __LPTRINT_TYPE__ __cr3_val; @@ -623,6 +446,7 @@ static __inline__ void __DEFAULT_FN_ATTRS __writecr3(unsigned __INTPTR_TYPE__ __cr3_val) { __asm__ ("mov {%0, %%cr3|cr3, %0}" : : "r"(__cr3_val) : "memory"); } +#endif #ifdef __cplusplus } diff --git a/lib/include/intrin0.h b/lib/include/intrin0.h new file mode 100644 index 000000000000..866c8896617d --- /dev/null +++ b/lib/include/intrin0.h @@ -0,0 +1,247 @@ +/* ===-------- intrin.h ---------------------------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +/* Only include this if we're compiling for the windows platform. */ +#ifndef _MSC_VER +#include_next +#else + +#ifndef __INTRIN0_H +#define __INTRIN0_H + +#if defined(__x86_64__) && !defined(__arm64ec__) +#include +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +unsigned char _BitScanForward(unsigned long *_Index, unsigned long _Mask); +unsigned char _BitScanReverse(unsigned long *_Index, unsigned long _Mask); +void _ReadWriteBarrier(void); + +#if defined(__aarch64__) || defined(__arm64ec__) +unsigned int _CountLeadingZeros(unsigned long); +unsigned int _CountLeadingZeros64(unsigned _int64); +unsigned char _InterlockedCompareExchange128_acq(__int64 volatile *_Destination, + __int64 _ExchangeHigh, + __int64 _ExchangeLow, + __int64 *_ComparandResult); +unsigned char _InterlockedCompareExchange128_nf(__int64 volatile *_Destination, + __int64 _ExchangeHigh, + __int64 _ExchangeLow, + __int64 *_ComparandResult); +unsigned char _InterlockedCompareExchange128_rel(__int64 volatile *_Destination, + __int64 _ExchangeHigh, + __int64 _ExchangeLow, + __int64 *_ComparandResult); +#endif + +#ifdef __x86_64__ && !defined(__arm64ec__) +unsigned __int64 _umul128(unsigned __int64, unsigned __int64, + unsigned __int64 *); +unsigned __int64 __shiftleft128(unsigned __int64 _LowPart, + unsigned __int64 _HighPart, + unsigned char _Shift); +unsigned __int64 __shiftright128(unsigned __int64 _LowPart, + unsigned __int64 _HighPart, + unsigned char _Shift); +#endif + +#if defined(__i386__) || (defined(__x86_64__) && !defined(__arm64ec__)) +void _mm_pause(void); +#endif + +#if defined(__x86_64__) || defined(__aarch64__) +unsigned char _InterlockedCompareExchange128(__int64 volatile *_Destination, + __int64 _ExchangeHigh, + __int64 _ExchangeLow, + __int64 *_ComparandResult); +#endif + +#if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__) +unsigned char _BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask); +unsigned char _BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask); +#endif + +#if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || \ + defined(__aarch64__) +__int64 _InterlockedDecrement64(__int64 volatile *_Addend); +__int64 _InterlockedExchange64(__int64 volatile *_Target, __int64 _Value); +__int64 _InterlockedExchangeAdd64(__int64 volatile *_Addend, __int64 _Value); +__int64 _InterlockedExchangeSub64(__int64 volatile *_Subend, __int64 _Value); +__int64 _InterlockedIncrement64(__int64 volatile *_Addend); +__int64 _InterlockedOr64(__int64 volatile *_Value, __int64 _Mask); +__int64 _InterlockedXor64(__int64 volatile *_Value, __int64 _Mask); +__int64 _InterlockedAnd64(__int64 volatile *_Value, __int64 _Mask); +#endif + +#if defined(__arm__) || defined(__aarch64__) || defined(__arm64ec__) +/*----------------------------------------------------------------------------*\ +|* Interlocked Exchange Add +\*----------------------------------------------------------------------------*/ +char _InterlockedExchangeAdd8_acq(char volatile *_Addend, char _Value); +char _InterlockedExchangeAdd8_nf(char volatile *_Addend, char _Value); +char _InterlockedExchangeAdd8_rel(char volatile *_Addend, char _Value); +short _InterlockedExchangeAdd16_acq(short volatile *_Addend, short _Value); +short _InterlockedExchangeAdd16_nf(short volatile *_Addend, short _Value); +short _InterlockedExchangeAdd16_rel(short volatile *_Addend, short _Value); +long _InterlockedExchangeAdd_acq(long volatile *_Addend, long _Value); +long _InterlockedExchangeAdd_nf(long volatile *_Addend, long _Value); +long _InterlockedExchangeAdd_rel(long volatile *_Addend, long _Value); +__int64 _InterlockedExchangeAdd64_acq(__int64 volatile *_Addend, + __int64 _Value); +__int64 _InterlockedExchangeAdd64_nf(__int64 volatile *_Addend, __int64 _Value); +__int64 _InterlockedExchangeAdd64_rel(__int64 volatile *_Addend, + __int64 _Value); + +/*----------------------------------------------------------------------------*\ +|* Interlocked Increment +\*----------------------------------------------------------------------------*/ +short _InterlockedIncrement16_acq(short volatile *_Value); +short _InterlockedIncrement16_nf(short volatile *_Value); +short _InterlockedIncrement16_rel(short volatile *_Value); +long _InterlockedIncrement_acq(long volatile *_Value); +long _InterlockedIncrement_nf(long volatile *_Value); +long _InterlockedIncrement_rel(long volatile *_Value); +__int64 _InterlockedIncrement64_acq(__int64 volatile *_Value); +__int64 _InterlockedIncrement64_nf(__int64 volatile *_Value); +__int64 _InterlockedIncrement64_rel(__int64 volatile *_Value); + +/*----------------------------------------------------------------------------*\ +|* Interlocked Decrement +\*----------------------------------------------------------------------------*/ +short _InterlockedDecrement16_acq(short volatile *_Value); +short _InterlockedDecrement16_nf(short volatile *_Value); +short _InterlockedDecrement16_rel(short volatile *_Value); +long _InterlockedDecrement_acq(long volatile *_Value); +long _InterlockedDecrement_nf(long volatile *_Value); +long _InterlockedDecrement_rel(long volatile *_Value); +__int64 _InterlockedDecrement64_acq(__int64 volatile *_Value); +__int64 _InterlockedDecrement64_nf(__int64 volatile *_Value); +__int64 _InterlockedDecrement64_rel(__int64 volatile *_Value); + +/*----------------------------------------------------------------------------*\ +|* Interlocked And +\*----------------------------------------------------------------------------*/ +char _InterlockedAnd8_acq(char volatile *_Value, char _Mask); +char _InterlockedAnd8_nf(char volatile *_Value, char _Mask); +char _InterlockedAnd8_rel(char volatile *_Value, char _Mask); +short _InterlockedAnd16_acq(short volatile *_Value, short _Mask); +short _InterlockedAnd16_nf(short volatile *_Value, short _Mask); +short _InterlockedAnd16_rel(short volatile *_Value, short _Mask); +long _InterlockedAnd_acq(long volatile *_Value, long _Mask); +long _InterlockedAnd_nf(long volatile *_Value, long _Mask); +long _InterlockedAnd_rel(long volatile *_Value, long _Mask); +__int64 _InterlockedAnd64_acq(__int64 volatile *_Value, __int64 _Mask); +__int64 _InterlockedAnd64_nf(__int64 volatile *_Value, __int64 _Mask); +__int64 _InterlockedAnd64_rel(__int64 volatile *_Value, __int64 _Mask); + +/*----------------------------------------------------------------------------*\ +|* Bit Counting and Testing +\*----------------------------------------------------------------------------*/ +unsigned char _interlockedbittestandset_acq(long volatile *_BitBase, + long _BitPos); +unsigned char _interlockedbittestandset_nf(long volatile *_BitBase, + long _BitPos); +unsigned char _interlockedbittestandset_rel(long volatile *_BitBase, + long _BitPos); +unsigned char _interlockedbittestandreset_acq(long volatile *_BitBase, + long _BitPos); +unsigned char _interlockedbittestandreset_nf(long volatile *_BitBase, + long _BitPos); +unsigned char _interlockedbittestandreset_rel(long volatile *_BitBase, + long _BitPos); + +/*----------------------------------------------------------------------------*\ +|* Interlocked Or +\*----------------------------------------------------------------------------*/ +char _InterlockedOr8_acq(char volatile *_Value, char _Mask); +char _InterlockedOr8_nf(char volatile *_Value, char _Mask); +char _InterlockedOr8_rel(char volatile *_Value, char _Mask); +short _InterlockedOr16_acq(short volatile *_Value, short _Mask); +short _InterlockedOr16_nf(short volatile *_Value, short _Mask); +short _InterlockedOr16_rel(short volatile *_Value, short _Mask); +long _InterlockedOr_acq(long volatile *_Value, long _Mask); +long _InterlockedOr_nf(long volatile *_Value, long _Mask); +long _InterlockedOr_rel(long volatile *_Value, long _Mask); +__int64 _InterlockedOr64_acq(__int64 volatile *_Value, __int64 _Mask); +__int64 _InterlockedOr64_nf(__int64 volatile *_Value, __int64 _Mask); +__int64 _InterlockedOr64_rel(__int64 volatile *_Value, __int64 _Mask); + +/*----------------------------------------------------------------------------*\ +|* Interlocked Xor +\*----------------------------------------------------------------------------*/ +char _InterlockedXor8_acq(char volatile *_Value, char _Mask); +char _InterlockedXor8_nf(char volatile *_Value, char _Mask); +char _InterlockedXor8_rel(char volatile *_Value, char _Mask); +short _InterlockedXor16_acq(short volatile *_Value, short _Mask); +short _InterlockedXor16_nf(short volatile *_Value, short _Mask); +short _InterlockedXor16_rel(short volatile *_Value, short _Mask); +long _InterlockedXor_acq(long volatile *_Value, long _Mask); +long _InterlockedXor_nf(long volatile *_Value, long _Mask); +long _InterlockedXor_rel(long volatile *_Value, long _Mask); +__int64 _InterlockedXor64_acq(__int64 volatile *_Value, __int64 _Mask); +__int64 _InterlockedXor64_nf(__int64 volatile *_Value, __int64 _Mask); +__int64 _InterlockedXor64_rel(__int64 volatile *_Value, __int64 _Mask); + +/*----------------------------------------------------------------------------*\ +|* Interlocked Exchange +\*----------------------------------------------------------------------------*/ +char _InterlockedExchange8_acq(char volatile *_Target, char _Value); +char _InterlockedExchange8_nf(char volatile *_Target, char _Value); +char _InterlockedExchange8_rel(char volatile *_Target, char _Value); +short _InterlockedExchange16_acq(short volatile *_Target, short _Value); +short _InterlockedExchange16_nf(short volatile *_Target, short _Value); +short _InterlockedExchange16_rel(short volatile *_Target, short _Value); +long _InterlockedExchange_acq(long volatile *_Target, long _Value); +long _InterlockedExchange_nf(long volatile *_Target, long _Value); +long _InterlockedExchange_rel(long volatile *_Target, long _Value); +__int64 _InterlockedExchange64_acq(__int64 volatile *_Target, __int64 _Value); +__int64 _InterlockedExchange64_nf(__int64 volatile *_Target, __int64 _Value); +__int64 _InterlockedExchange64_rel(__int64 volatile *_Target, __int64 _Value); + +/*----------------------------------------------------------------------------*\ +|* Interlocked Compare Exchange +\*----------------------------------------------------------------------------*/ +char _InterlockedCompareExchange8_acq(char volatile *_Destination, + char _Exchange, char _Comparand); +char _InterlockedCompareExchange8_nf(char volatile *_Destination, + char _Exchange, char _Comparand); +char _InterlockedCompareExchange8_rel(char volatile *_Destination, + char _Exchange, char _Comparand); +short _InterlockedCompareExchange16_acq(short volatile *_Destination, + short _Exchange, short _Comparand); +short _InterlockedCompareExchange16_nf(short volatile *_Destination, + short _Exchange, short _Comparand); +short _InterlockedCompareExchange16_rel(short volatile *_Destination, + short _Exchange, short _Comparand); +long _InterlockedCompareExchange_acq(long volatile *_Destination, + long _Exchange, long _Comparand); +long _InterlockedCompareExchange_nf(long volatile *_Destination, long _Exchange, + long _Comparand); +long _InterlockedCompareExchange_rel(long volatile *_Destination, + long _Exchange, long _Comparand); +__int64 _InterlockedCompareExchange64_acq(__int64 volatile *_Destination, + __int64 _Exchange, + __int64 _Comparand); +__int64 _InterlockedCompareExchange64_nf(__int64 volatile *_Destination, + __int64 _Exchange, __int64 _Comparand); +__int64 _InterlockedCompareExchange64_rel(__int64 volatile *_Destination, + __int64 _Exchange, + __int64 _Comparand); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* __INTRIN0_H */ +#endif /* _MSC_VER */ diff --git a/lib/include/inttypes.h b/lib/include/inttypes.h index 1c894c4aca49..5150d22f8b2e 100644 --- a/lib/include/inttypes.h +++ b/lib/include/inttypes.h @@ -13,6 +13,9 @@ #if !defined(_AIX) || !defined(_STD_TYPES_T) #define __CLANG_INTTYPES_H #endif +#if defined(__MVS__) && __has_include_next() +#include_next +#else #if defined(_MSC_VER) && _MSC_VER < 1800 #error MSVC does not have inttypes.h prior to Visual Studio 2013 @@ -94,4 +97,5 @@ #define SCNxFAST32 "x" #endif +#endif /* __MVS__ */ #endif /* __CLANG_INTTYPES_H */ diff --git a/lib/include/iso646.h b/lib/include/iso646.h index e0a20c6f1891..b53fcd9b4e53 100644 --- a/lib/include/iso646.h +++ b/lib/include/iso646.h @@ -9,6 +9,9 @@ #ifndef __ISO646_H #define __ISO646_H +#if defined(__MVS__) && __has_include_next() +#include_next +#else #ifndef __cplusplus #define and && @@ -24,4 +27,5 @@ #define xor_eq ^= #endif +#endif /* __MVS__ */ #endif /* __ISO646_H */ diff --git a/lib/include/keylockerintrin.h b/lib/include/keylockerintrin.h index 1994ac42070a..f76e91b4d4b3 100644 --- a/lib/include/keylockerintrin.h +++ b/lib/include/keylockerintrin.h @@ -28,8 +28,7 @@ #ifndef _KEYLOCKERINTRIN_H #define _KEYLOCKERINTRIN_H -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__KL__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__KL__) /* Define the default attributes for the functions in this file. */ #define __DEFAULT_FN_ATTRS \ @@ -327,11 +326,9 @@ _mm_aesdec256kl_u8(__m128i* __odata, __m128i __idata, const void *__h) { #undef __DEFAULT_FN_ATTRS -#endif /* !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) \ - || defined(__KL__) */ +#endif /* !defined(__SCE__ || __has_feature(modules) || defined(__KL__) */ -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__WIDEKL__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__WIDEKL__) /* Define the default attributes for the functions in this file. */ #define __DEFAULT_FN_ATTRS \ @@ -524,7 +521,7 @@ _mm_aesdecwide256kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* #undef __DEFAULT_FN_ATTRS -#endif /* !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) \ - || defined(__WIDEKL__) */ +#endif /* !defined(__SCE__) || __has_feature(modules) || defined(__WIDEKL__) \ + */ #endif /* _KEYLOCKERINTRIN_H */ diff --git a/lib/include/limits.h b/lib/include/limits.h index 15e6bbe0abcf..56dffe568486 100644 --- a/lib/include/limits.h +++ b/lib/include/limits.h @@ -9,6 +9,10 @@ #ifndef __CLANG_LIMITS_H #define __CLANG_LIMITS_H +#if defined(__MVS__) && __has_include_next() +#include_next +#else + /* The system's limits.h may, in turn, try to #include_next GCC's limits.h. Avert this #include_next madness. */ #if defined __GNUC__ && !defined _GCC_LIMITS_H_ @@ -122,4 +126,5 @@ #define ULONG_LONG_MAX (__LONG_LONG_MAX__*2ULL+1ULL) #endif +#endif /* __MVS__ */ #endif /* __CLANG_LIMITS_H */ diff --git a/lib/include/llvm_libc_wrappers/assert.h b/lib/include/llvm_libc_wrappers/assert.h index de650ca8442a..610ed96a458c 100644 --- a/lib/include/llvm_libc_wrappers/assert.h +++ b/lib/include/llvm_libc_wrappers/assert.h @@ -1,4 +1,4 @@ -//===-- Wrapper for C standard assert.h declarations on the GPU ------------===// +//===-- Wrapper for C standard assert.h declarations on the GPU -*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. diff --git a/lib/include/mm3dnow.h b/lib/include/mm3dnow.h index 22ab13aa3340..afffba3a9c75 100644 --- a/lib/include/mm3dnow.h +++ b/lib/include/mm3dnow.h @@ -7,151 +7,16 @@ *===-----------------------------------------------------------------------=== */ +// 3dNow intrinsics are no longer supported. + #ifndef _MM3DNOW_H_INCLUDED #define _MM3DNOW_H_INCLUDED +#ifndef _CLANG_DISABLE_CRT_DEPRECATION_WARNINGS +#warning "The header is deprecated, and 3dNow! intrinsics are unsupported. For other intrinsics, include , instead." +#endif + #include #include -typedef float __v2sf __attribute__((__vector_size__(8))); - -/* Define the default attributes for the functions in this file. */ -#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("3dnow"), __min_vector_width__(64))) - -static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("3dnow"))) -_m_femms(void) { - __builtin_ia32_femms(); -} - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pavgusb(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_pavgusb((__v8qi)__m1, (__v8qi)__m2); -} - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pf2id(__m64 __m) { - return (__m64)__builtin_ia32_pf2id((__v2sf)__m); -} - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pfacc(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_pfacc((__v2sf)__m1, (__v2sf)__m2); -} - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pfadd(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_pfadd((__v2sf)__m1, (__v2sf)__m2); -} - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pfcmpeq(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_pfcmpeq((__v2sf)__m1, (__v2sf)__m2); -} - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pfcmpge(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_pfcmpge((__v2sf)__m1, (__v2sf)__m2); -} - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pfcmpgt(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_pfcmpgt((__v2sf)__m1, (__v2sf)__m2); -} - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pfmax(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_pfmax((__v2sf)__m1, (__v2sf)__m2); -} - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pfmin(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_pfmin((__v2sf)__m1, (__v2sf)__m2); -} - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pfmul(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_pfmul((__v2sf)__m1, (__v2sf)__m2); -} - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pfrcp(__m64 __m) { - return (__m64)__builtin_ia32_pfrcp((__v2sf)__m); -} - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pfrcpit1(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_pfrcpit1((__v2sf)__m1, (__v2sf)__m2); -} - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pfrcpit2(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_pfrcpit2((__v2sf)__m1, (__v2sf)__m2); -} - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pfrsqrt(__m64 __m) { - return (__m64)__builtin_ia32_pfrsqrt((__v2sf)__m); -} - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pfrsqrtit1(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_pfrsqit1((__v2sf)__m1, (__v2sf)__m2); -} - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pfsub(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_pfsub((__v2sf)__m1, (__v2sf)__m2); -} - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pfsubr(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_pfsubr((__v2sf)__m1, (__v2sf)__m2); -} - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pi2fd(__m64 __m) { - return (__m64)__builtin_ia32_pi2fd((__v2si)__m); -} - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pmulhrw(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_pmulhrw((__v4hi)__m1, (__v4hi)__m2); -} - -/* Handle the 3dnowa instructions here. */ -#undef __DEFAULT_FN_ATTRS -#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("3dnowa"), __min_vector_width__(64))) - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pf2iw(__m64 __m) { - return (__m64)__builtin_ia32_pf2iw((__v2sf)__m); -} - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pfnacc(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_pfnacc((__v2sf)__m1, (__v2sf)__m2); -} - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pfpnacc(__m64 __m1, __m64 __m2) { - return (__m64)__builtin_ia32_pfpnacc((__v2sf)__m1, (__v2sf)__m2); -} - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pi2fw(__m64 __m) { - return (__m64)__builtin_ia32_pi2fw((__v2si)__m); -} - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pswapdsf(__m64 __m) { - return (__m64)__builtin_ia32_pswapdsf((__v2sf)__m); -} - -static __inline__ __m64 __DEFAULT_FN_ATTRS -_m_pswapdsi(__m64 __m) { - return (__m64)__builtin_ia32_pswapdsi((__v2si)__m); -} - -#undef __DEFAULT_FN_ATTRS - #endif diff --git a/lib/include/mmintrin.h b/lib/include/mmintrin.h index 08849f01071a..4e154e2d8593 100644 --- a/lib/include/mmintrin.h +++ b/lib/include/mmintrin.h @@ -105,28 +105,23 @@ _mm_cvtm64_si64(__m64 __m) return (long long)__m; } -/// Converts 16-bit signed integers from both 64-bit integer vector -/// parameters of [4 x i16] into 8-bit signed integer values, and constructs -/// a 64-bit integer vector of [8 x i8] as the result. Positive values -/// greater than 0x7F are saturated to 0x7F. Negative values less than 0x80 -/// are saturated to 0x80. +/// Converts, with saturation, 16-bit signed integers from both 64-bit integer +/// vector parameters of [4 x i16] into 8-bit signed integer values, and +/// constructs a 64-bit integer vector of [8 x i8] as the result. +/// +/// Positive values greater than 0x7F are saturated to 0x7F. Negative values +/// less than 0x80 are saturated to 0x80. /// /// \headerfile /// /// This intrinsic corresponds to the PACKSSWB instruction. /// /// \param __m1 -/// A 64-bit integer vector of [4 x i16]. Each 16-bit element is treated as a -/// 16-bit signed integer and is converted to an 8-bit signed integer with -/// saturation. Positive values greater than 0x7F are saturated to 0x7F. -/// Negative values less than 0x80 are saturated to 0x80. The converted -/// [4 x i8] values are written to the lower 32 bits of the result. +/// A 64-bit integer vector of [4 x i16]. The converted [4 x i8] values are +/// written to the lower 32 bits of the result. /// \param __m2 -/// A 64-bit integer vector of [4 x i16]. Each 16-bit element is treated as a -/// 16-bit signed integer and is converted to an 8-bit signed integer with -/// saturation. Positive values greater than 0x7F are saturated to 0x7F. -/// Negative values less than 0x80 are saturated to 0x80. The converted -/// [4 x i8] values are written to the upper 32 bits of the result. +/// A 64-bit integer vector of [4 x i16]. The converted [4 x i8] values are +/// written to the upper 32 bits of the result. /// \returns A 64-bit integer vector of [8 x i8] containing the converted /// values. static __inline__ __m64 __DEFAULT_FN_ATTRS @@ -135,28 +130,23 @@ _mm_packs_pi16(__m64 __m1, __m64 __m2) return (__m64)__builtin_ia32_packsswb((__v4hi)__m1, (__v4hi)__m2); } -/// Converts 32-bit signed integers from both 64-bit integer vector -/// parameters of [2 x i32] into 16-bit signed integer values, and constructs -/// a 64-bit integer vector of [4 x i16] as the result. Positive values -/// greater than 0x7FFF are saturated to 0x7FFF. Negative values less than -/// 0x8000 are saturated to 0x8000. +/// Converts, with saturation, 32-bit signed integers from both 64-bit integer +/// vector parameters of [2 x i32] into 16-bit signed integer values, and +/// constructs a 64-bit integer vector of [4 x i16] as the result. +/// +/// Positive values greater than 0x7FFF are saturated to 0x7FFF. Negative +/// values less than 0x8000 are saturated to 0x8000. /// /// \headerfile /// /// This intrinsic corresponds to the PACKSSDW instruction. /// /// \param __m1 -/// A 64-bit integer vector of [2 x i32]. Each 32-bit element is treated as a -/// 32-bit signed integer and is converted to a 16-bit signed integer with -/// saturation. Positive values greater than 0x7FFF are saturated to 0x7FFF. -/// Negative values less than 0x8000 are saturated to 0x8000. The converted -/// [2 x i16] values are written to the lower 32 bits of the result. +/// A 64-bit integer vector of [2 x i32]. The converted [2 x i16] values are +/// written to the lower 32 bits of the result. /// \param __m2 -/// A 64-bit integer vector of [2 x i32]. Each 32-bit element is treated as a -/// 32-bit signed integer and is converted to a 16-bit signed integer with -/// saturation. Positive values greater than 0x7FFF are saturated to 0x7FFF. -/// Negative values less than 0x8000 are saturated to 0x8000. The converted -/// [2 x i16] values are written to the upper 32 bits of the result. +/// A 64-bit integer vector of [2 x i32]. The converted [2 x i16] values are +/// written to the upper 32 bits of the result. /// \returns A 64-bit integer vector of [4 x i16] containing the converted /// values. static __inline__ __m64 __DEFAULT_FN_ATTRS @@ -165,28 +155,23 @@ _mm_packs_pi32(__m64 __m1, __m64 __m2) return (__m64)__builtin_ia32_packssdw((__v2si)__m1, (__v2si)__m2); } -/// Converts 16-bit signed integers from both 64-bit integer vector -/// parameters of [4 x i16] into 8-bit unsigned integer values, and -/// constructs a 64-bit integer vector of [8 x i8] as the result. Values -/// greater than 0xFF are saturated to 0xFF. Values less than 0 are saturated -/// to 0. +/// Converts, with saturation, 16-bit signed integers from both 64-bit integer +/// vector parameters of [4 x i16] into 8-bit unsigned integer values, and +/// constructs a 64-bit integer vector of [8 x i8] as the result. +/// +/// Values greater than 0xFF are saturated to 0xFF. Values less than 0 are +/// saturated to 0. /// /// \headerfile /// /// This intrinsic corresponds to the PACKUSWB instruction. /// /// \param __m1 -/// A 64-bit integer vector of [4 x i16]. Each 16-bit element is treated as a -/// 16-bit signed integer and is converted to an 8-bit unsigned integer with -/// saturation. Values greater than 0xFF are saturated to 0xFF. Values less -/// than 0 are saturated to 0. The converted [4 x i8] values are written to -/// the lower 32 bits of the result. +/// A 64-bit integer vector of [4 x i16]. The converted [4 x i8] values are +/// written to the lower 32 bits of the result. /// \param __m2 -/// A 64-bit integer vector of [4 x i16]. Each 16-bit element is treated as a -/// 16-bit signed integer and is converted to an 8-bit unsigned integer with -/// saturation. Values greater than 0xFF are saturated to 0xFF. Values less -/// than 0 are saturated to 0. The converted [4 x i8] values are written to -/// the upper 32 bits of the result. +/// A 64-bit integer vector of [4 x i16]. The converted [4 x i8] values are +/// written to the upper 32 bits of the result. /// \returns A 64-bit integer vector of [8 x i8] containing the converted /// values. static __inline__ __m64 __DEFAULT_FN_ATTRS @@ -400,11 +385,13 @@ _mm_add_pi32(__m64 __m1, __m64 __m2) return (__m64)__builtin_ia32_paddd((__v2si)__m1, (__v2si)__m2); } -/// Adds each 8-bit signed integer element of the first 64-bit integer -/// vector of [8 x i8] to the corresponding 8-bit signed integer element of -/// the second 64-bit integer vector of [8 x i8]. Positive sums greater than -/// 0x7F are saturated to 0x7F. Negative sums less than 0x80 are saturated to -/// 0x80. The results are packed into a 64-bit integer vector of [8 x i8]. +/// Adds, with saturation, each 8-bit signed integer element of the first +/// 64-bit integer vector of [8 x i8] to the corresponding 8-bit signed +/// integer element of the second 64-bit integer vector of [8 x i8]. +/// +/// Positive sums greater than 0x7F are saturated to 0x7F. Negative sums +/// less than 0x80 are saturated to 0x80. The results are packed into a +/// 64-bit integer vector of [8 x i8]. /// /// \headerfile /// @@ -422,12 +409,13 @@ _mm_adds_pi8(__m64 __m1, __m64 __m2) return (__m64)__builtin_ia32_paddsb((__v8qi)__m1, (__v8qi)__m2); } -/// Adds each 16-bit signed integer element of the first 64-bit integer -/// vector of [4 x i16] to the corresponding 16-bit signed integer element of -/// the second 64-bit integer vector of [4 x i16]. Positive sums greater than -/// 0x7FFF are saturated to 0x7FFF. Negative sums less than 0x8000 are -/// saturated to 0x8000. The results are packed into a 64-bit integer vector -/// of [4 x i16]. +/// Adds, with saturation, each 16-bit signed integer element of the first +/// 64-bit integer vector of [4 x i16] to the corresponding 16-bit signed +/// integer element of the second 64-bit integer vector of [4 x i16]. +/// +/// Positive sums greater than 0x7FFF are saturated to 0x7FFF. Negative sums +/// less than 0x8000 are saturated to 0x8000. The results are packed into a +/// 64-bit integer vector of [4 x i16]. /// /// \headerfile /// @@ -445,11 +433,12 @@ _mm_adds_pi16(__m64 __m1, __m64 __m2) return (__m64)__builtin_ia32_paddsw((__v4hi)__m1, (__v4hi)__m2); } -/// Adds each 8-bit unsigned integer element of the first 64-bit integer -/// vector of [8 x i8] to the corresponding 8-bit unsigned integer element of -/// the second 64-bit integer vector of [8 x i8]. Sums greater than 0xFF are -/// saturated to 0xFF. The results are packed into a 64-bit integer vector of -/// [8 x i8]. +/// Adds, with saturation, each 8-bit unsigned integer element of the first +/// 64-bit integer vector of [8 x i8] to the corresponding 8-bit unsigned +/// integer element of the second 64-bit integer vector of [8 x i8]. +/// +/// Sums greater than 0xFF are saturated to 0xFF. The results are packed +/// into a 64-bit integer vector of [8 x i8]. /// /// \headerfile /// @@ -467,11 +456,12 @@ _mm_adds_pu8(__m64 __m1, __m64 __m2) return (__m64)__builtin_ia32_paddusb((__v8qi)__m1, (__v8qi)__m2); } -/// Adds each 16-bit unsigned integer element of the first 64-bit integer -/// vector of [4 x i16] to the corresponding 16-bit unsigned integer element -/// of the second 64-bit integer vector of [4 x i16]. Sums greater than -/// 0xFFFF are saturated to 0xFFFF. The results are packed into a 64-bit -/// integer vector of [4 x i16]. +/// Adds, with saturation, each 16-bit unsigned integer element of the first +/// 64-bit integer vector of [4 x i16] to the corresponding 16-bit unsigned +/// integer element of the second 64-bit integer vector of [4 x i16]. +/// +/// Sums greater than 0xFFFF are saturated to 0xFFFF. The results are packed +/// into a 64-bit integer vector of [4 x i16]. /// /// \headerfile /// @@ -552,12 +542,13 @@ _mm_sub_pi32(__m64 __m1, __m64 __m2) return (__m64)__builtin_ia32_psubd((__v2si)__m1, (__v2si)__m2); } -/// Subtracts each 8-bit signed integer element of the second 64-bit -/// integer vector of [8 x i8] from the corresponding 8-bit signed integer -/// element of the first 64-bit integer vector of [8 x i8]. Positive results -/// greater than 0x7F are saturated to 0x7F. Negative results less than 0x80 -/// are saturated to 0x80. The results are packed into a 64-bit integer -/// vector of [8 x i8]. +/// Subtracts, with saturation, each 8-bit signed integer element of the second +/// 64-bit integer vector of [8 x i8] from the corresponding 8-bit signed +/// integer element of the first 64-bit integer vector of [8 x i8]. +/// +/// Positive results greater than 0x7F are saturated to 0x7F. Negative +/// results less than 0x80 are saturated to 0x80. The results are packed +/// into a 64-bit integer vector of [8 x i8]. /// /// \headerfile /// @@ -575,12 +566,13 @@ _mm_subs_pi8(__m64 __m1, __m64 __m2) return (__m64)__builtin_ia32_psubsb((__v8qi)__m1, (__v8qi)__m2); } -/// Subtracts each 16-bit signed integer element of the second 64-bit -/// integer vector of [4 x i16] from the corresponding 16-bit signed integer -/// element of the first 64-bit integer vector of [4 x i16]. Positive results -/// greater than 0x7FFF are saturated to 0x7FFF. Negative results less than -/// 0x8000 are saturated to 0x8000. The results are packed into a 64-bit -/// integer vector of [4 x i16]. +/// Subtracts, with saturation, each 16-bit signed integer element of the +/// second 64-bit integer vector of [4 x i16] from the corresponding 16-bit +/// signed integer element of the first 64-bit integer vector of [4 x i16]. +/// +/// Positive results greater than 0x7FFF are saturated to 0x7FFF. Negative +/// results less than 0x8000 are saturated to 0x8000. The results are packed +/// into a 64-bit integer vector of [4 x i16]. /// /// \headerfile /// @@ -1149,7 +1141,7 @@ _mm_xor_si64(__m64 __m1, __m64 __m2) /// [8 x i8] to determine if the element of the first vector is equal to the /// corresponding element of the second vector. /// -/// The comparison yields 0 for false, 0xFF for true. +/// Each comparison returns 0 for false, 0xFF for true. /// /// \headerfile /// @@ -1171,7 +1163,7 @@ _mm_cmpeq_pi8(__m64 __m1, __m64 __m2) /// [4 x i16] to determine if the element of the first vector is equal to the /// corresponding element of the second vector. /// -/// The comparison yields 0 for false, 0xFFFF for true. +/// Each comparison returns 0 for false, 0xFFFF for true. /// /// \headerfile /// @@ -1193,7 +1185,7 @@ _mm_cmpeq_pi16(__m64 __m1, __m64 __m2) /// [2 x i32] to determine if the element of the first vector is equal to the /// corresponding element of the second vector. /// -/// The comparison yields 0 for false, 0xFFFFFFFF for true. +/// Each comparison returns 0 for false, 0xFFFFFFFF for true. /// /// \headerfile /// @@ -1215,7 +1207,7 @@ _mm_cmpeq_pi32(__m64 __m1, __m64 __m2) /// [8 x i8] to determine if the element of the first vector is greater than /// the corresponding element of the second vector. /// -/// The comparison yields 0 for false, 0xFF for true. +/// Each comparison returns 0 for false, 0xFF for true. /// /// \headerfile /// @@ -1237,7 +1229,7 @@ _mm_cmpgt_pi8(__m64 __m1, __m64 __m2) /// [4 x i16] to determine if the element of the first vector is greater than /// the corresponding element of the second vector. /// -/// The comparison yields 0 for false, 0xFFFF for true. +/// Each comparison returns 0 for false, 0xFFFF for true. /// /// \headerfile /// @@ -1259,7 +1251,7 @@ _mm_cmpgt_pi16(__m64 __m1, __m64 __m2) /// [2 x i32] to determine if the element of the first vector is greater than /// the corresponding element of the second vector. /// -/// The comparison yields 0 for false, 0xFFFFFFFF for true. +/// Each comparison returns 0 for false, 0xFFFFFFFF for true. /// /// \headerfile /// diff --git a/lib/include/module.modulemap b/lib/include/module.modulemap index 56a13f69bc05..9ffc249c8d1a 100644 --- a/lib/include/module.modulemap +++ b/lib/include/module.modulemap @@ -44,7 +44,6 @@ module _Builtin_intrinsics [system] [extern_c] { textual header "avxintrin.h" textual header "avx2intrin.h" textual header "avx512fintrin.h" - textual header "avx512erintrin.h" textual header "fmaintrin.h" header "x86intrin.h" @@ -203,6 +202,11 @@ module _Builtin_stdarg [system] { export * } + explicit module header_macro { + header "__stdarg_header_macro.h" + export * + } + explicit module va_arg { header "__stdarg_va_arg.h" export * @@ -232,6 +236,10 @@ module _Builtin_stdbool [system] { module _Builtin_stddef [system] { textual header "stddef.h" + explicit module header_macro { + header "__stddef_header_macro.h" + export * + } // __stddef_max_align_t.h is always in this module, even if // -fbuiltin-headers-in-system-modules is passed. explicit module max_align_t { @@ -315,3 +323,8 @@ module opencl_c { header "opencl-c.h" header "opencl-c-base.h" } + +module ptrauth { + header "ptrauth.h" + export * +} diff --git a/lib/include/opencl-c-base.h b/lib/include/opencl-c-base.h index 2494f6213fc5..786678b9d8a7 100644 --- a/lib/include/opencl-c-base.h +++ b/lib/include/opencl-c-base.h @@ -46,6 +46,10 @@ #define __opencl_c_ext_fp32_global_atomic_min_max 1 #define __opencl_c_ext_fp32_local_atomic_min_max 1 #define __opencl_c_ext_image_raw10_raw12 1 +#define cl_khr_kernel_clock 1 +#define __opencl_c_kernel_clock_scope_device 1 +#define __opencl_c_kernel_clock_scope_work_group 1 +#define __opencl_c_kernel_clock_scope_sub_group 1 #endif // defined(__SPIR__) || defined(__SPIRV__) #endif // (defined(__OPENCL_CPP_VERSION__) || __OPENCL_C_VERSION__ >= 200) diff --git a/lib/include/opencl-c.h b/lib/include/opencl-c.h index 288bb18bc654..20719b74b6b8 100644 --- a/lib/include/opencl-c.h +++ b/lib/include/opencl-c.h @@ -17314,6 +17314,21 @@ half __ovld __conv sub_group_clustered_rotate(half, int, uint); #endif // cl_khr_fp16 #endif // cl_khr_subgroup_rotate +#if defined(cl_khr_kernel_clock) +#if defined(__opencl_c_kernel_clock_scope_device) +ulong __ovld clock_read_device(); +uint2 __ovld clock_read_hilo_device(); +#endif // __opencl_c_kernel_clock_scope_device +#if defined(__opencl_c_kernel_clock_scope_work_group) +ulong __ovld clock_read_work_group(); +uint2 __ovld clock_read_hilo_work_group(); +#endif // __opencl_c_kernel_clock_scope_work_group +#if defined(__opencl_c_kernel_clock_scope_sub_group) +ulong __ovld clock_read_sub_group(); +uint2 __ovld clock_read_hilo_sub_group(); +#endif // __opencl_c_kernel_clock_scope_sub_group +#endif // cl_khr_kernel_clock + #if defined(cl_intel_subgroups) // Intel-Specific Sub Group Functions float __ovld __conv intel_sub_group_shuffle( float , uint ); diff --git a/lib/include/prfchwintrin.h b/lib/include/prfchwintrin.h index d2f91aa0123e..eaea5f3cf8fe 100644 --- a/lib/include/prfchwintrin.h +++ b/lib/include/prfchwintrin.h @@ -8,16 +8,17 @@ */ #if !defined(__X86INTRIN_H) && !defined(_MM3DNOW_H_INCLUDED) -#error "Never use directly; include or instead." +#error "Never use directly; include instead." #endif #ifndef __PRFCHWINTRIN_H #define __PRFCHWINTRIN_H /// Loads a memory sequence containing the specified memory address into -/// all data cache levels. The cache-coherency state is set to exclusive. -/// Data can be read from and written to the cache line without additional -/// delay. +/// all data cache levels. +/// +/// The cache-coherency state is set to exclusive. Data can be read from +/// and written to the cache line without additional delay. /// /// \headerfile /// @@ -32,10 +33,11 @@ _m_prefetch(void *__P) } /// Loads a memory sequence containing the specified memory address into -/// the L1 data cache and sets the cache-coherency to modified. This -/// provides a hint to the processor that the cache line will be modified. -/// It is intended for use when the cache line will be written to shortly -/// after the prefetch is performed. +/// the L1 data cache and sets the cache-coherency state to modified. +/// +/// This provides a hint to the processor that the cache line will be +/// modified. It is intended for use when the cache line will be written to +/// shortly after the prefetch is performed. /// /// Note that the effect of this intrinsic is dependent on the processor /// implementation. diff --git a/lib/include/ptrauth.h b/lib/include/ptrauth.h new file mode 100644 index 000000000000..154b599862a8 --- /dev/null +++ b/lib/include/ptrauth.h @@ -0,0 +1,330 @@ +/*===---- ptrauth.h - Pointer authentication -------------------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __PTRAUTH_H +#define __PTRAUTH_H + +typedef enum { + ptrauth_key_asia = 0, + ptrauth_key_asib = 1, + ptrauth_key_asda = 2, + ptrauth_key_asdb = 3, + + /* A process-independent key which can be used to sign code pointers. */ + ptrauth_key_process_independent_code = ptrauth_key_asia, + + /* A process-specific key which can be used to sign code pointers. */ + ptrauth_key_process_dependent_code = ptrauth_key_asib, + + /* A process-independent key which can be used to sign data pointers. */ + ptrauth_key_process_independent_data = ptrauth_key_asda, + + /* A process-specific key which can be used to sign data pointers. */ + ptrauth_key_process_dependent_data = ptrauth_key_asdb, + + /* The key used to sign return addresses on the stack. + The extra data is based on the storage address of the return address. + On AArch64, that is always the storage address of the return address + 8 + (or, in other words, the value of the stack pointer on function entry) */ + ptrauth_key_return_address = ptrauth_key_process_dependent_code, + + /* The key used to sign C function pointers. + The extra data is always 0. */ + ptrauth_key_function_pointer = ptrauth_key_process_independent_code, + + /* The key used to sign C++ v-table pointers. + The extra data is always 0. */ + ptrauth_key_cxx_vtable_pointer = ptrauth_key_process_independent_data, + + /* Other pointers signed under the ABI use private ABI rules. */ + +} ptrauth_key; + +/* An integer type of the appropriate size for a discriminator argument. */ +typedef __UINTPTR_TYPE__ ptrauth_extra_data_t; + +/* An integer type of the appropriate size for a generic signature. */ +typedef __UINTPTR_TYPE__ ptrauth_generic_signature_t; + +/* A signed pointer value embeds the original pointer together with + a signature that attests to the validity of that pointer. Because + this signature must use only "spare" bits of the pointer, a + signature's validity is probabilistic in practice: it is unlikely + but still plausible that an invalidly-derived signature will + somehow equal the correct signature and therefore successfully + authenticate. Nonetheless, this scheme provides a strong degree + of protection against certain kinds of attacks. */ + +/* Authenticating a pointer that was not signed with the given key + and extra-data value will (likely) fail by trapping. */ + +/* The null function pointer is always the all-zero bit pattern. + Signing an all-zero bit pattern will embed a (likely) non-zero + signature in the result, and so the result will not seem to be + a null function pointer. Authenticating this value will yield + a null function pointer back. However, authenticating an + all-zero bit pattern will probably fail, because the + authentication will expect a (likely) non-zero signature to + embedded in the value. + + Because of this, if a pointer may validly be null, you should + check for null before attempting to authenticate it with one + of these intrinsics. This is not necessary when using the + __ptrauth qualifier; the compiler will perform this check + automatically. */ + +#if __has_feature(ptrauth_intrinsics) + +/* Strip the signature from a value without authenticating it. + + If the value is a function pointer, the result will not be a + legal function pointer because of the missing signature, and + attempting to call it will result in an authentication failure. + + The value must be an expression of pointer type. + The key must be a constant expression of type ptrauth_key. + The result will have the same type as the original value. */ +#define ptrauth_strip(__value, __key) __builtin_ptrauth_strip(__value, __key) + +/* Blend a constant discriminator into the given pointer-like value + to form a new discriminator. Not all bits of the inputs are + guaranteed to contribute to the result. + + On arm64e, the integer must fall within the range of a uint16_t; + other bits may be ignored. + + For the purposes of ptrauth_sign_constant, the result of calling + this function is considered a constant expression if the arguments + are constant. Some restrictions may be imposed on the pointer. + + The first argument must be an expression of pointer type. + The second argument must be an expression of integer type. + The result will have type uintptr_t. */ +#define ptrauth_blend_discriminator(__pointer, __integer) \ + __builtin_ptrauth_blend_discriminator(__pointer, __integer) + +/* Return a signed pointer for a constant address in a manner which guarantees + a non-attackable sequence. + + The value must be a constant expression of pointer type which evaluates to + a non-null pointer. + The key must be a constant expression of type ptrauth_key. + The extra data must be a constant expression of pointer or integer type; + if an integer, it will be coerced to ptrauth_extra_data_t. + The result will have the same type as the original value. + + This can be used in constant expressions. */ +#define ptrauth_sign_constant(__value, __key, __data) \ + __builtin_ptrauth_sign_constant(__value, __key, __data) + +/* Add a signature to the given pointer value using a specific key, + using the given extra data as a salt to the signing process. + + This operation does not authenticate the original value and is + therefore potentially insecure if an attacker could possibly + control that value. + + The value must be an expression of pointer type. + The key must be a constant expression of type ptrauth_key. + The extra data must be an expression of pointer or integer type; + if an integer, it will be coerced to ptrauth_extra_data_t. + The result will have the same type as the original value. */ +#define ptrauth_sign_unauthenticated(__value, __key, __data) \ + __builtin_ptrauth_sign_unauthenticated(__value, __key, __data) + +/* Authenticate a pointer using one scheme and resign it using another. + + If the result is subsequently authenticated using the new scheme, that + authentication is guaranteed to fail if and only if the initial + authentication failed. + + The value must be an expression of pointer type. + The key must be a constant expression of type ptrauth_key. + The extra data must be an expression of pointer or integer type; + if an integer, it will be coerced to ptrauth_extra_data_t. + The result will have the same type as the original value. + + This operation is guaranteed to not leave the intermediate value + available for attack before it is re-signed. + + Do not pass a null pointer to this function. A null pointer + will not successfully authenticate. + + This operation traps if the authentication fails. */ +#define ptrauth_auth_and_resign(__value, __old_key, __old_data, __new_key, \ + __new_data) \ + __builtin_ptrauth_auth_and_resign(__value, __old_key, __old_data, __new_key, \ + __new_data) + +/* Authenticate a pointer using one scheme and resign it as a C + function pointer. + + If the result is subsequently authenticated using the new scheme, that + authentication is guaranteed to fail if and only if the initial + authentication failed. + + The value must be an expression of function pointer type. + The key must be a constant expression of type ptrauth_key. + The extra data must be an expression of pointer or integer type; + if an integer, it will be coerced to ptrauth_extra_data_t. + The result will have the same type as the original value. + + This operation is guaranteed to not leave the intermediate value + available for attack before it is re-signed. Additionally, if this + expression is used syntactically as the function expression in a + call, only a single authentication will be performed. */ +#define ptrauth_auth_function(__value, __old_key, __old_data) \ + ptrauth_auth_and_resign(__value, __old_key, __old_data, \ + ptrauth_key_function_pointer, 0) + +/* Authenticate a data pointer. + + The value must be an expression of non-function pointer type. + The key must be a constant expression of type ptrauth_key. + The extra data must be an expression of pointer or integer type; + if an integer, it will be coerced to ptrauth_extra_data_t. + The result will have the same type as the original value. + + This operation traps if the authentication fails. */ +#define ptrauth_auth_data(__value, __old_key, __old_data) \ + __builtin_ptrauth_auth(__value, __old_key, __old_data) + +/* Compute a constant discriminator from the given string. + + The argument must be a string literal of char character type. The result + has type ptrauth_extra_data_t. + + The result value is never zero and always within range for both the + __ptrauth qualifier and ptrauth_blend_discriminator. + + This can be used in constant expressions. +*/ +#define ptrauth_string_discriminator(__string) \ + __builtin_ptrauth_string_discriminator(__string) + +/* Compute a constant discriminator from the given type. + + The result can be used as the second argument to + ptrauth_blend_discriminator or the third argument to the + __ptrauth qualifier. It has type size_t. + + If the type is a C++ member function pointer type, the result is + the discriminator used to signed member function pointers of that + type. If the type is a function, function pointer, or function + reference type, the result is the discriminator used to sign + functions of that type. It is ill-formed to use this macro with any + other type. + + A call to this function is an integer constant expression. */ +#define ptrauth_type_discriminator(__type) \ + __builtin_ptrauth_type_discriminator(__type) + +/* Compute a signature for the given pair of pointer-sized values. + The order of the arguments is significant. + + Like a pointer signature, the resulting signature depends on + private key data and therefore should not be reliably reproducible + by attackers. That means that this can be used to validate the + integrity of arbitrary data by storing a signature for that data + alongside it, then checking that the signature is still valid later. + Data which exceeds two pointers in size can be signed by either + computing a tree of generic signatures or just signing an ordinary + cryptographic hash of the data. + + The result has type ptrauth_generic_signature_t. However, it may + not have as many bits of entropy as that type's width would suggest; + some implementations are known to compute a compressed signature as + if the arguments were a pointer and a discriminator. + + The arguments must be either pointers or integers; if integers, they + will be coerce to uintptr_t. */ +#define ptrauth_sign_generic_data(__value, __data) \ + __builtin_ptrauth_sign_generic_data(__value, __data) + +/* C++ vtable pointer signing class attribute */ +#define ptrauth_cxx_vtable_pointer(key, address_discrimination, \ + extra_discrimination...) \ + [[clang::ptrauth_vtable_pointer(key, address_discrimination, \ + extra_discrimination)]] + +#else + +#define ptrauth_strip(__value, __key) \ + ({ \ + (void)__key; \ + __value; \ + }) + +#define ptrauth_blend_discriminator(__pointer, __integer) \ + ({ \ + (void)__pointer; \ + (void)__integer; \ + ((ptrauth_extra_data_t)0); \ + }) + +#define ptrauth_sign_constant(__value, __key, __data) \ + ({ \ + (void)__key; \ + (void)__data; \ + __value; \ + }) + +#define ptrauth_sign_unauthenticated(__value, __key, __data) \ + ({ \ + (void)__key; \ + (void)__data; \ + __value; \ + }) + +#define ptrauth_auth_and_resign(__value, __old_key, __old_data, __new_key, \ + __new_data) \ + ({ \ + (void)__old_key; \ + (void)__old_data; \ + (void)__new_key; \ + (void)__new_data; \ + __value; \ + }) + +#define ptrauth_auth_function(__value, __old_key, __old_data) \ + ({ \ + (void)__old_key; \ + (void)__old_data; \ + __value; \ + }) + +#define ptrauth_auth_data(__value, __old_key, __old_data) \ + ({ \ + (void)__old_key; \ + (void)__old_data; \ + __value; \ + }) + +#define ptrauth_string_discriminator(__string) \ + ({ \ + (void)__string; \ + ((ptrauth_extra_data_t)0); \ + }) + +#define ptrauth_type_discriminator(__type) ((ptrauth_extra_data_t)0) + +#define ptrauth_sign_generic_data(__value, __data) \ + ({ \ + (void)__value; \ + (void)__data; \ + ((ptrauth_generic_signature_t)0); \ + }) + + +#define ptrauth_cxx_vtable_pointer(key, address_discrimination, \ + extra_discrimination...) + +#endif /* __has_feature(ptrauth_intrinsics) */ + +#endif /* __PTRAUTH_H */ diff --git a/lib/include/riscv_vector.h b/lib/include/riscv_vector.h index 083a13587766..c99ceb802174 100644 --- a/lib/include/riscv_vector.h +++ b/lib/include/riscv_vector.h @@ -14,10 +14,6 @@ #include #include -#ifndef __riscv_vector -#error "Vector intrinsics require the vector extension." -#endif - #ifdef __cplusplus extern "C" { #endif diff --git a/lib/include/sifive_vector.h b/lib/include/sifive_vector.h index 42d7224db614..4e67ad6fca26 100644 --- a/lib/include/sifive_vector.h +++ b/lib/include/sifive_vector.h @@ -13,4 +13,106 @@ #pragma clang riscv intrinsic sifive_vector +#define __riscv_sf_vc_x_se_u8mf4(p27_26, p24_20, p11_7, rs1, vl) \ + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint8_t)rs1, 8, 6, vl) +#define __riscv_sf_vc_x_se_u8mf2(p27_26, p24_20, p11_7, rs1, vl) \ + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint8_t)rs1, 8, 7, vl) +#define __riscv_sf_vc_x_se_u8m1(p27_26, p24_20, p11_7, rs1, vl) \ + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint8_t)rs1, 8, 0, vl) +#define __riscv_sf_vc_x_se_u8m2(p27_26, p24_20, p11_7, rs1, vl) \ + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint8_t)rs1, 8, 1, vl) +#define __riscv_sf_vc_x_se_u8m4(p27_26, p24_20, p11_7, rs1, vl) \ + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint8_t)rs1, 8, 2, vl) +#define __riscv_sf_vc_x_se_u8m8(p27_26, p24_20, p11_7, rs1, vl) \ + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint8_t)rs1, 8, 3, vl) + +#define __riscv_sf_vc_x_se_u16mf2(p27_26, p24_20, p11_7, rs1, vl) \ + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint16_t)rs1, 16, 7, vl) +#define __riscv_sf_vc_x_se_u16m1(p27_26, p24_20, p11_7, rs1, vl) \ + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint16_t)rs1, 16, 0, vl) +#define __riscv_sf_vc_x_se_u16m2(p27_26, p24_20, p11_7, rs1, vl) \ + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint16_t)rs1, 16, 1, vl) +#define __riscv_sf_vc_x_se_u16m4(p27_26, p24_20, p11_7, rs1, vl) \ + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint16_t)rs1, 16, 2, vl) +#define __riscv_sf_vc_x_se_u16m8(p27_26, p24_20, p11_7, rs1, vl) \ + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint16_t)rs1, 16, 3, vl) + +#define __riscv_sf_vc_x_se_u32m1(p27_26, p24_20, p11_7, rs1, vl) \ + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint32_t)rs1, 32, 0, vl) +#define __riscv_sf_vc_x_se_u32m2(p27_26, p24_20, p11_7, rs1, vl) \ + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint32_t)rs1, 32, 1, vl) +#define __riscv_sf_vc_x_se_u32m4(p27_26, p24_20, p11_7, rs1, vl) \ + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint32_t)rs1, 32, 2, vl) +#define __riscv_sf_vc_x_se_u32m8(p27_26, p24_20, p11_7, rs1, vl) \ + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint32_t)rs1, 32, 3, vl) + +#define __riscv_sf_vc_i_se_u8mf4(p27_26, p24_20, p11_7, simm5, vl) \ + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 8, 7, vl) +#define __riscv_sf_vc_i_se_u8mf2(p27_26, p24_20, p11_7, simm5, vl) \ + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 8, 6, vl) +#define __riscv_sf_vc_i_se_u8m1(p27_26, p24_20, p11_7, simm5, vl) \ + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 8, 0, vl) +#define __riscv_sf_vc_i_se_u8m2(p27_26, p24_20, p11_7, simm5, vl) \ + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 8, 1, vl) +#define __riscv_sf_vc_i_se_u8m4(p27_26, p24_20, p11_7, simm5, vl) \ + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 8, 2, vl) +#define __riscv_sf_vc_i_se_u8m8(p27_26, p24_20, p11_7, simm5, vl) \ + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 8, 3, vl) + +#define __riscv_sf_vc_i_se_u16mf2(p27_26, p24_20, p11_7, simm5, vl) \ + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 16, 7, vl) +#define __riscv_sf_vc_i_se_u16m1(p27_26, p24_20, p11_7, simm5, vl) \ + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 16, 0, vl) +#define __riscv_sf_vc_i_se_u16m2(p27_26, p24_20, p11_7, simm5, vl) \ + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 16, 1, vl) +#define __riscv_sf_vc_i_se_u16m4(p27_26, p24_20, p11_7, simm5, vl) \ + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 16, 2, vl) +#define __riscv_sf_vc_i_se_u16m8(p27_26, p24_20, p11_7, simm5, vl) \ + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 16, 3, vl) + +#define __riscv_sf_vc_i_se_u32m1(p27_26, p24_20, p11_7, simm5, vl) \ + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 32, 0, vl) +#define __riscv_sf_vc_i_se_u32m2(p27_26, p24_20, p11_7, simm5, vl) \ + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 32, 1, vl) +#define __riscv_sf_vc_i_se_u32m4(p27_26, p24_20, p11_7, simm5, vl) \ + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 32, 2, vl) +#define __riscv_sf_vc_i_se_u32m8(p27_26, p24_20, p11_7, simm5, vl) \ + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 32, 3, vl) + +#if __riscv_v_elen >= 64 +#define __riscv_sf_vc_x_se_u8mf8(p27_26, p24_20, p11_7, rs1, vl) \ + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint8_t)rs1, 8, 5, vl) +#define __riscv_sf_vc_x_se_u16mf4(p27_26, p24_20, p11_7, rs1, vl) \ + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint16_t)rs1, 16, 6, vl) +#define __riscv_sf_vc_x_se_u32mf2(p27_26, p24_20, p11_7, rs1, vl) \ + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint32_t)rs1, 32, 7, vl) + +#define __riscv_sf_vc_i_se_u8mf8(p27_26, p24_20, p11_7, simm5, vl) \ + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 8, 5, vl) +#define __riscv_sf_vc_i_se_u16mf4(p27_26, p24_20, p11_7, simm5, vl) \ + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 16, 6, vl) +#define __riscv_sf_vc_i_se_u32mf2(p27_26, p24_20, p11_7, simm5, vl) \ + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 32, 7, vl) + +#define __riscv_sf_vc_i_se_u64m1(p27_26, p24_20, p11_7, simm5, vl) \ + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 64, 0, vl) +#define __riscv_sf_vc_i_se_u64m2(p27_26, p24_20, p11_7, simm5, vl) \ + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 64, 1, vl) +#define __riscv_sf_vc_i_se_u64m4(p27_26, p24_20, p11_7, simm5, vl) \ + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 64, 2, vl) +#define __riscv_sf_vc_i_se_u64m8(p27_26, p24_20, p11_7, simm5, vl) \ + __riscv_sf_vc_i_se(p27_26, p24_20, p11_7, simm5, 64, 3, vl) + +#if __riscv_xlen >= 64 +#define __riscv_sf_vc_x_se_u64m1(p27_26, p24_20, p11_7, rs1, vl) \ + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint64_t)rs1, 64, 0, vl) +#define __riscv_sf_vc_x_se_u64m2(p27_26, p24_20, p11_7, rs1, vl) \ + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint64_t)rs1, 64, 1, vl) +#define __riscv_sf_vc_x_se_u64m4(p27_26, p24_20, p11_7, rs1, vl) \ + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint64_t)rs1, 64, 2, vl) +#define __riscv_sf_vc_x_se_u64m8(p27_26, p24_20, p11_7, rs1, vl) \ + __riscv_sf_vc_x_se(p27_26, p24_20, p11_7, (uint64_t)rs1, 64, 3, vl) +#endif +#endif + #endif //_SIFIVE_VECTOR_H_ diff --git a/lib/include/smmintrin.h b/lib/include/smmintrin.h index 005d7db9c3c3..b3fec474e35a 100644 --- a/lib/include/smmintrin.h +++ b/lib/include/smmintrin.h @@ -1188,6 +1188,8 @@ static __inline__ int __DEFAULT_FN_ATTRS _mm_testnzc_si128(__m128i __M, /// Compares each of the corresponding 64-bit values of the 128-bit /// integer vectors for equality. /// +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// /// \headerfile /// /// This intrinsic corresponds to the VPCMPEQQ / PCMPEQQ instruction. @@ -1431,8 +1433,10 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu32_epi64(__m128i __V) { } /* SSE4 Pack with Unsigned Saturation. */ -/// Converts 32-bit signed integers from both 128-bit integer vector -/// operands into 16-bit unsigned integers, and returns the packed result. +/// Converts, with saturation, 32-bit signed integers from both 128-bit integer +/// vector operands into 16-bit unsigned integers, and returns the packed +/// result. +/// /// Values greater than 0xFFFF are saturated to 0xFFFF. Values less than /// 0x0000 are saturated to 0x0000. /// @@ -1441,17 +1445,11 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu32_epi64(__m128i __V) { /// This intrinsic corresponds to the VPACKUSDW / PACKUSDW instruction. /// /// \param __V1 -/// A 128-bit vector of [4 x i32]. Each 32-bit element is treated as a -/// signed integer and is converted to a 16-bit unsigned integer with -/// saturation. Values greater than 0xFFFF are saturated to 0xFFFF. Values -/// less than 0x0000 are saturated to 0x0000. The converted [4 x i16] values -/// are written to the lower 64 bits of the result. +/// A 128-bit vector of [4 x i32]. The converted [4 x i16] values are +/// written to the lower 64 bits of the result. /// \param __V2 -/// A 128-bit vector of [4 x i32]. Each 32-bit element is treated as a -/// signed integer and is converted to a 16-bit unsigned integer with -/// saturation. Values greater than 0xFFFF are saturated to 0xFFFF. Values -/// less than 0x0000 are saturated to 0x0000. The converted [4 x i16] values -/// are written to the higher 64 bits of the result. +/// A 128-bit vector of [4 x i32]. The converted [4 x i16] values are +/// written to the higher 64 bits of the result. /// \returns A 128-bit vector of [8 x i16] containing the converted values. static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packus_epi32(__m128i __V1, __m128i __V2) { @@ -2305,6 +2303,8 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_minpos_epu16(__m128i __V) { /// integer vectors to determine if the values in the first operand are /// greater than those in the second operand. /// +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// /// \headerfile /// /// This intrinsic corresponds to the VPCMPGTQ / PCMPGTQ instruction. diff --git a/lib/include/stdalign.h b/lib/include/stdalign.h index 158508e65d2b..56cdfa52d4ba 100644 --- a/lib/include/stdalign.h +++ b/lib/include/stdalign.h @@ -10,6 +10,10 @@ #ifndef __STDALIGN_H #define __STDALIGN_H +#if defined(__MVS__) && __has_include_next() +#include_next +#else + #if defined(__cplusplus) || \ (defined(__STDC_VERSION__) && __STDC_VERSION__ < 202311L) #ifndef __cplusplus @@ -21,4 +25,5 @@ #define __alignof_is_defined 1 #endif /* __STDC_VERSION__ */ +#endif /* __MVS__ */ #endif /* __STDALIGN_H */ diff --git a/lib/include/stdarg.h b/lib/include/stdarg.h index 94b066566f08..6203d7a600a2 100644 --- a/lib/include/stdarg.h +++ b/lib/include/stdarg.h @@ -14,29 +14,24 @@ * need to use some of its interfaces. Otherwise this header provides all of * the expected interfaces. * - * When clang modules are enabled, this header is a textual header. It ignores - * its header guard so that multiple submodules can export its interfaces. - * Take module SM with submodules A and B, whose headers both include stdarg.h - * When SM.A builds, __STDARG_H will be defined. When SM.B builds, the - * definition from SM.A will leak when building without local submodule - * visibility. stdarg.h wouldn't include any of its implementation headers, and - * SM.B wouldn't import any of the stdarg modules, and SM.B's `export *` - * wouldn't export any stdarg interfaces as expected. However, since stdarg.h - * ignores its header guard when building with modules, it all works as - * expected. - * - * When clang modules are not enabled, the header guards can function in the - * normal simple fashion. + * When clang modules are enabled, this header is a textual header to support + * the multiple include behavior. As such, it doesn't directly declare anything + * so that it doesn't add duplicate declarations to all of its includers' + * modules. */ -#if !defined(__STDARG_H) || __has_feature(modules) || \ - defined(__need___va_list) || defined(__need_va_list) || \ - defined(__need_va_arg) || defined(__need___va_copy) || \ - defined(__need_va_copy) +#if defined(__MVS__) && __has_include_next() +#undef __need___va_list +#undef __need_va_list +#undef __need_va_arg +#undef __need___va_copy +#undef __need_va_copy +#include <__stdarg_header_macro.h> +#include_next +#else #if !defined(__need___va_list) && !defined(__need_va_list) && \ !defined(__need_va_arg) && !defined(__need___va_copy) && \ !defined(__need_va_copy) -#define __STDARG_H #define __need___va_list #define __need_va_list #define __need_va_arg @@ -49,6 +44,7 @@ !defined(__STRICT_ANSI__) #define __need_va_copy #endif +#include <__stdarg_header_macro.h> #endif #ifdef __need___va_list @@ -76,4 +72,4 @@ #undef __need_va_copy #endif /* defined(__need_va_copy) */ -#endif +#endif /* __MVS__ */ diff --git a/lib/include/stdatomic.h b/lib/include/stdatomic.h index 521c473dd169..1991351f9e9e 100644 --- a/lib/include/stdatomic.h +++ b/lib/include/stdatomic.h @@ -16,7 +16,7 @@ * Exclude the MSVC path as well as the MSVC header as of the 14.31.30818 * explicitly disallows `stdatomic.h` in the C mode via an `#error`. Fallback * to the clang resource header until that is fully supported. The - * `stdatomic.h` header requires C++ 23 or newer. + * `stdatomic.h` header requires C++23 or newer. */ #if __STDC_HOSTED__ && \ __has_include_next() && \ @@ -35,6 +35,9 @@ extern "C" { #define ATOMIC_BOOL_LOCK_FREE __CLANG_ATOMIC_BOOL_LOCK_FREE #define ATOMIC_CHAR_LOCK_FREE __CLANG_ATOMIC_CHAR_LOCK_FREE +#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L +#define ATOMIC_CHAR8_T_LOCK_FREE __CLANG_ATOMIC_CHAR8_T_LOCK_FREE +#endif #define ATOMIC_CHAR16_T_LOCK_FREE __CLANG_ATOMIC_CHAR16_T_LOCK_FREE #define ATOMIC_CHAR32_T_LOCK_FREE __CLANG_ATOMIC_CHAR32_T_LOCK_FREE #define ATOMIC_WCHAR_T_LOCK_FREE __CLANG_ATOMIC_WCHAR_T_LOCK_FREE @@ -104,6 +107,9 @@ typedef _Atomic(long) atomic_long; typedef _Atomic(unsigned long) atomic_ulong; typedef _Atomic(long long) atomic_llong; typedef _Atomic(unsigned long long) atomic_ullong; +#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L +typedef _Atomic(unsigned char) atomic_char8_t; +#endif typedef _Atomic(uint_least16_t) atomic_char16_t; typedef _Atomic(uint_least32_t) atomic_char32_t; typedef _Atomic(wchar_t) atomic_wchar_t; @@ -166,7 +172,11 @@ typedef _Atomic(uintmax_t) atomic_uintmax_t; typedef struct atomic_flag { atomic_bool _Value; } atomic_flag; +#ifdef __cplusplus +#define ATOMIC_FLAG_INIT {false} +#else #define ATOMIC_FLAG_INIT { 0 } +#endif /* These should be provided by the libc implementation. */ #ifdef __cplusplus diff --git a/lib/include/stdbool.h b/lib/include/stdbool.h index 9406aab0ca72..dfaad2b65a9b 100644 --- a/lib/include/stdbool.h +++ b/lib/include/stdbool.h @@ -12,6 +12,10 @@ #define __bool_true_false_are_defined 1 +#if defined(__MVS__) && __has_include_next() +#include_next +#else + #if defined(__STDC_VERSION__) && __STDC_VERSION__ > 201710L /* FIXME: We should be issuing a deprecation warning here, but cannot yet due * to system headers which include this header file unconditionally. @@ -31,4 +35,5 @@ #endif #endif +#endif /* __MVS__ */ #endif /* __STDBOOL_H */ diff --git a/lib/include/stddef.h b/lib/include/stddef.h index e0ad7b8d17af..99b275aebf5a 100644 --- a/lib/include/stddef.h +++ b/lib/include/stddef.h @@ -14,34 +14,32 @@ * need to use some of its interfaces. Otherwise this header provides all of * the expected interfaces. * - * When clang modules are enabled, this header is a textual header. It ignores - * its header guard so that multiple submodules can export its interfaces. - * Take module SM with submodules A and B, whose headers both include stddef.h - * When SM.A builds, __STDDEF_H will be defined. When SM.B builds, the - * definition from SM.A will leak when building without local submodule - * visibility. stddef.h wouldn't include any of its implementation headers, and - * SM.B wouldn't import any of the stddef modules, and SM.B's `export *` - * wouldn't export any stddef interfaces as expected. However, since stddef.h - * ignores its header guard when building with modules, it all works as - * expected. - * - * When clang modules are not enabled, the header guards can function in the - * normal simple fashion. + * When clang modules are enabled, this header is a textual header to support + * the multiple include behavior. As such, it doesn't directly declare anything + * so that it doesn't add duplicate declarations to all of its includers' + * modules. */ -#if !defined(__STDDEF_H) || __has_feature(modules) || \ - (defined(__STDC_WANT_LIB_EXT1__) && __STDC_WANT_LIB_EXT1__ >= 1) || \ - defined(__need_ptrdiff_t) || defined(__need_size_t) || \ - defined(__need_rsize_t) || defined(__need_wchar_t) || \ - defined(__need_NULL) || defined(__need_nullptr_t) || \ - defined(__need_unreachable) || defined(__need_max_align_t) || \ - defined(__need_offsetof) || defined(__need_wint_t) +#if defined(__MVS__) && __has_include_next() +#undef __need_ptrdiff_t +#undef __need_size_t +#undef __need_rsize_t +#undef __need_wchar_t +#undef __need_NULL +#undef __need_nullptr_t +#undef __need_unreachable +#undef __need_max_align_t +#undef __need_offsetof +#undef __need_wint_t +#include <__stddef_header_macro.h> +#include_next + +#else #if !defined(__need_ptrdiff_t) && !defined(__need_size_t) && \ !defined(__need_rsize_t) && !defined(__need_wchar_t) && \ !defined(__need_NULL) && !defined(__need_nullptr_t) && \ !defined(__need_unreachable) && !defined(__need_max_align_t) && \ !defined(__need_offsetof) && !defined(__need_wint_t) -#define __STDDEF_H #define __need_ptrdiff_t #define __need_size_t /* ISO9899:2011 7.20 (C11 Annex K): Define rsize_t if __STDC_WANT_LIB_EXT1__ is @@ -50,7 +48,24 @@ #define __need_rsize_t #endif #define __need_wchar_t +#if !defined(__STDDEF_H) || __has_feature(modules) +/* + * __stddef_null.h is special when building without modules: if __need_NULL is + * set, then it will unconditionally redefine NULL. To avoid stepping on client + * definitions of NULL, __need_NULL should only be set the first time this + * header is included, that is when __STDDEF_H is not defined. However, when + * building with modules, this header is a textual header and needs to + * unconditionally include __stdef_null.h to support multiple submodules + * exporting _Builtin_stddef.null. Take module SM with submodules A and B, whose + * headers both include stddef.h When SM.A builds, __STDDEF_H will be defined. + * When SM.B builds, the definition from SM.A will leak when building without + * local submodule visibility. stddef.h wouldn't include __stddef_null.h, and + * SM.B wouldn't import _Builtin_stddef.null, and SM.B's `export *` wouldn't + * export NULL as expected. When building with modules, always include + * __stddef_null.h so that everything works as expected. + */ #define __need_NULL +#endif #if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 202311L) || \ defined(__cplusplus) #define __need_nullptr_t @@ -66,6 +81,7 @@ /* wint_t is provided by and not . It's here * for compatibility, but must be explicitly requested. Therefore * __need_wint_t is intentionally not defined here. */ +#include <__stddef_header_macro.h> #endif #if defined(__need_ptrdiff_t) @@ -120,4 +136,4 @@ __WINT_TYPE__ directly; accommodate both by requiring __need_wint_t */ #undef __need_wint_t #endif /* __need_wint_t */ -#endif +#endif /* __MVS__ */ diff --git a/lib/include/stdint.h b/lib/include/stdint.h index b6699b6ca3d4..01feab7b1ee2 100644 --- a/lib/include/stdint.h +++ b/lib/include/stdint.h @@ -14,6 +14,10 @@ #define __CLANG_STDINT_H #endif +#if defined(__MVS__) && __has_include_next() +#include_next +#else + /* If we're hosted, fall back to the system's stdint.h, which might have * additional definitions. */ @@ -947,4 +951,5 @@ typedef __UINTMAX_TYPE__ uintmax_t; #endif #endif /* __STDC_HOSTED__ */ +#endif /* __MVS__ */ #endif /* __CLANG_STDINT_H */ diff --git a/lib/include/stdnoreturn.h b/lib/include/stdnoreturn.h index c90bf77e840e..6a9b209c7218 100644 --- a/lib/include/stdnoreturn.h +++ b/lib/include/stdnoreturn.h @@ -10,9 +10,15 @@ #ifndef __STDNORETURN_H #define __STDNORETURN_H +#if defined(__MVS__) && __has_include_next() +#include_next +#else + #define noreturn _Noreturn #define __noreturn_is_defined 1 +#endif /* __MVS__ */ + #if (defined(__STDC_VERSION__) && __STDC_VERSION__ > 201710L) && \ !defined(_CLANG_DISABLE_CRT_DEPRECATION_WARNINGS) /* The noreturn macro is deprecated in C23. We do not mark it as such because diff --git a/lib/include/tmmintrin.h b/lib/include/tmmintrin.h index 7d8dc46c57bf..bf8327b692d1 100644 --- a/lib/include/tmmintrin.h +++ b/lib/include/tmmintrin.h @@ -271,10 +271,11 @@ _mm_hadd_pi32(__m64 __a, __m64 __b) return (__m64)__builtin_ia32_phaddd((__v2si)__a, (__v2si)__b); } -/// Horizontally adds the adjacent pairs of values contained in 2 packed -/// 128-bit vectors of [8 x i16]. Positive sums greater than 0x7FFF are -/// saturated to 0x7FFF. Negative sums less than 0x8000 are saturated to -/// 0x8000. +/// Horizontally adds, with saturation, the adjacent pairs of values contained +/// in two packed 128-bit vectors of [8 x i16]. +/// +/// Positive sums greater than 0x7FFF are saturated to 0x7FFF. Negative sums +/// less than 0x8000 are saturated to 0x8000. /// /// \headerfile /// @@ -296,10 +297,11 @@ _mm_hadds_epi16(__m128i __a, __m128i __b) return (__m128i)__builtin_ia32_phaddsw128((__v8hi)__a, (__v8hi)__b); } -/// Horizontally adds the adjacent pairs of values contained in 2 packed -/// 64-bit vectors of [4 x i16]. Positive sums greater than 0x7FFF are -/// saturated to 0x7FFF. Negative sums less than 0x8000 are saturated to -/// 0x8000. +/// Horizontally adds, with saturation, the adjacent pairs of values contained +/// in two packed 64-bit vectors of [4 x i16]. +/// +/// Positive sums greater than 0x7FFF are saturated to 0x7FFF. Negative sums +/// less than 0x8000 are saturated to 0x8000. /// /// \headerfile /// @@ -413,10 +415,11 @@ _mm_hsub_pi32(__m64 __a, __m64 __b) return (__m64)__builtin_ia32_phsubd((__v2si)__a, (__v2si)__b); } -/// Horizontally subtracts the adjacent pairs of values contained in 2 -/// packed 128-bit vectors of [8 x i16]. Positive differences greater than -/// 0x7FFF are saturated to 0x7FFF. Negative differences less than 0x8000 are -/// saturated to 0x8000. +/// Horizontally subtracts, with saturation, the adjacent pairs of values +/// contained in two packed 128-bit vectors of [8 x i16]. +/// +/// Positive differences greater than 0x7FFF are saturated to 0x7FFF. +/// Negative differences less than 0x8000 are saturated to 0x8000. /// /// \headerfile /// @@ -438,10 +441,11 @@ _mm_hsubs_epi16(__m128i __a, __m128i __b) return (__m128i)__builtin_ia32_phsubsw128((__v8hi)__a, (__v8hi)__b); } -/// Horizontally subtracts the adjacent pairs of values contained in 2 -/// packed 64-bit vectors of [4 x i16]. Positive differences greater than -/// 0x7FFF are saturated to 0x7FFF. Negative differences less than 0x8000 are -/// saturated to 0x8000. +/// Horizontally subtracts, with saturation, the adjacent pairs of values +/// contained in two packed 64-bit vectors of [4 x i16]. +/// +/// Positive differences greater than 0x7FFF are saturated to 0x7FFF. +/// Negative differences less than 0x8000 are saturated to 0x8000. /// /// \headerfile /// diff --git a/lib/include/varargs.h b/lib/include/varargs.h index d241b7de3cb2..d33ddc5ae7f8 100644 --- a/lib/include/varargs.h +++ b/lib/include/varargs.h @@ -8,5 +8,9 @@ */ #ifndef __VARARGS_H #define __VARARGS_H - #error "Please use instead of " +#if defined(__MVS__) && __has_include_next() +#include_next +#else +#error "Please use instead of " +#endif /* __MVS__ */ #endif diff --git a/lib/include/x86gprintrin.h b/lib/include/x86gprintrin.h index ed141879fbc7..3d5cc606d7e6 100644 --- a/lib/include/x86gprintrin.h +++ b/lib/include/x86gprintrin.h @@ -10,38 +10,31 @@ #ifndef __X86GPRINTRIN_H #define __X86GPRINTRIN_H -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__HRESET__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__HRESET__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__UINTR__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__UINTR__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__USERMSR__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__USERMSR__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__CRC32__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__CRC32__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__PRFCHI__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__PRFCHI__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__RAOINT__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__RAOINT__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__CMPCCXADD__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__CMPCCXADD__) #include #endif diff --git a/lib/include/x86intrin.h b/lib/include/x86intrin.h index 450fd008dab9..f42e9e580f88 100644 --- a/lib/include/x86intrin.h +++ b/lib/include/x86intrin.h @@ -14,53 +14,39 @@ #include -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__3dNOW__) -#include -#endif - -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__PRFCHW__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__PRFCHW__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__SSE4A__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__SSE4A__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__FMA4__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__FMA4__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__XOP__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__XOP__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__TBM__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__TBM__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__LWP__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__LWP__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__MWAITX__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__MWAITX__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__CLZERO__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__CLZERO__) #include #endif -#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \ - defined(__RDPRU__) +#if !defined(__SCE__) || __has_feature(modules) || defined(__RDPRU__) #include #endif diff --git a/lib/include/xmmintrin.h b/lib/include/xmmintrin.h index 47368f3c23d2..1ef89de9c9f5 100644 --- a/lib/include/xmmintrin.h +++ b/lib/include/xmmintrin.h @@ -316,6 +316,8 @@ _mm_rsqrt_ps(__m128 __a) /// operands and returns the lesser value in the low-order bits of the /// vector of [4 x float]. /// +/// If either value in a comparison is NaN, returns the value from \a __b. +/// /// \headerfile /// /// This intrinsic corresponds to the VMINSS / MINSS instructions. @@ -338,6 +340,8 @@ _mm_min_ss(__m128 __a, __m128 __b) /// Compares two 128-bit vectors of [4 x float] and returns the lesser /// of each pair of values. /// +/// If either value in a comparison is NaN, returns the value from \a __b. +/// /// \headerfile /// /// This intrinsic corresponds to the VMINPS / MINPS instructions. @@ -358,6 +362,8 @@ _mm_min_ps(__m128 __a, __m128 __b) /// operands and returns the greater value in the low-order bits of a 128-bit /// vector of [4 x float]. /// +/// If either value in a comparison is NaN, returns the value from \a __b. +/// /// \headerfile /// /// This intrinsic corresponds to the VMAXSS / MAXSS instructions. @@ -380,6 +386,8 @@ _mm_max_ss(__m128 __a, __m128 __b) /// Compares two 128-bit vectors of [4 x float] and returns the greater /// of each pair of values. /// +/// If either value in a comparison is NaN, returns the value from \a __b. +/// /// \headerfile /// /// This intrinsic corresponds to the VMAXPS / MAXPS instructions. @@ -474,8 +482,11 @@ _mm_xor_ps(__m128 __a, __m128 __b) } /// Compares two 32-bit float values in the low-order bits of both -/// operands for equality and returns the result of the comparison in the +/// operands for equality. +/// +/// The comparison returns 0x0 for false, 0xFFFFFFFF for true, in the /// low-order bits of a vector [4 x float]. +/// If either value in a comparison is NaN, returns false. /// /// \headerfile /// @@ -498,6 +509,9 @@ _mm_cmpeq_ss(__m128 __a, __m128 __b) /// Compares each of the corresponding 32-bit float values of the /// 128-bit vectors of [4 x float] for equality. /// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true. +/// If either value in a comparison is NaN, returns false. +/// /// \headerfile /// /// This intrinsic corresponds to the VCMPEQPS / CMPEQPS instructions. @@ -515,8 +529,11 @@ _mm_cmpeq_ps(__m128 __a, __m128 __b) /// Compares two 32-bit float values in the low-order bits of both /// operands to determine if the value in the first operand is less than the -/// corresponding value in the second operand and returns the result of the -/// comparison in the low-order bits of a vector of [4 x float]. +/// corresponding value in the second operand. +/// +/// The comparison returns 0x0 for false, 0xFFFFFFFF for true, in the +/// low-order bits of a vector of [4 x float]. +/// If either value in a comparison is NaN, returns false. /// /// \headerfile /// @@ -540,6 +557,9 @@ _mm_cmplt_ss(__m128 __a, __m128 __b) /// 128-bit vectors of [4 x float] to determine if the values in the first /// operand are less than those in the second operand. /// +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns false. +/// /// \headerfile /// /// This intrinsic corresponds to the VCMPLTPS / CMPLTPS instructions. @@ -557,9 +577,11 @@ _mm_cmplt_ps(__m128 __a, __m128 __b) /// Compares two 32-bit float values in the low-order bits of both /// operands to determine if the value in the first operand is less than or -/// equal to the corresponding value in the second operand and returns the -/// result of the comparison in the low-order bits of a vector of -/// [4 x float]. +/// equal to the corresponding value in the second operand. +/// +/// The comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true, in +/// the low-order bits of a vector of [4 x float]. +/// If either value in a comparison is NaN, returns false. /// /// \headerfile /// @@ -583,6 +605,9 @@ _mm_cmple_ss(__m128 __a, __m128 __b) /// 128-bit vectors of [4 x float] to determine if the values in the first /// operand are less than or equal to those in the second operand. /// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true. +/// If either value in a comparison is NaN, returns false. +/// /// \headerfile /// /// This intrinsic corresponds to the VCMPLEPS / CMPLEPS instructions. @@ -600,8 +625,11 @@ _mm_cmple_ps(__m128 __a, __m128 __b) /// Compares two 32-bit float values in the low-order bits of both /// operands to determine if the value in the first operand is greater than -/// the corresponding value in the second operand and returns the result of -/// the comparison in the low-order bits of a vector of [4 x float]. +/// the corresponding value in the second operand. +/// +/// The comparison returns 0x0 for false, 0xFFFFFFFF for true, in the +/// low-order bits of a vector of [4 x float]. +/// If either value in a comparison is NaN, returns false. /// /// \headerfile /// @@ -627,6 +655,9 @@ _mm_cmpgt_ss(__m128 __a, __m128 __b) /// 128-bit vectors of [4 x float] to determine if the values in the first /// operand are greater than those in the second operand. /// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true. +/// If either value in a comparison is NaN, returns false. +/// /// \headerfile /// /// This intrinsic corresponds to the VCMPLTPS / CMPLTPS instructions. @@ -644,9 +675,11 @@ _mm_cmpgt_ps(__m128 __a, __m128 __b) /// Compares two 32-bit float values in the low-order bits of both /// operands to determine if the value in the first operand is greater than -/// or equal to the corresponding value in the second operand and returns -/// the result of the comparison in the low-order bits of a vector of -/// [4 x float]. +/// or equal to the corresponding value in the second operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true, in the +/// low-order bits of a vector of [4 x float]. +/// If either value in a comparison is NaN, returns false. /// /// \headerfile /// @@ -672,6 +705,9 @@ _mm_cmpge_ss(__m128 __a, __m128 __b) /// 128-bit vectors of [4 x float] to determine if the values in the first /// operand are greater than or equal to those in the second operand. /// +/// Each comparison returns 0x0 for false, 0xFFFFFFFFFFFFFFFF for true. +/// If either value in a comparison is NaN, returns false. +/// /// \headerfile /// /// This intrinsic corresponds to the VCMPLEPS / CMPLEPS instructions. @@ -687,9 +723,12 @@ _mm_cmpge_ps(__m128 __a, __m128 __b) return (__m128)__builtin_ia32_cmpleps((__v4sf)__b, (__v4sf)__a); } -/// Compares two 32-bit float values in the low-order bits of both -/// operands for inequality and returns the result of the comparison in the +/// Compares two 32-bit float values in the low-order bits of both operands +/// for inequality. +/// +/// The comparison returns 0x0 for false, 0xFFFFFFFF for true, in the /// low-order bits of a vector of [4 x float]. +/// If either value in a comparison is NaN, returns true. /// /// \headerfile /// @@ -713,6 +752,9 @@ _mm_cmpneq_ss(__m128 __a, __m128 __b) /// Compares each of the corresponding 32-bit float values of the /// 128-bit vectors of [4 x float] for inequality. /// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true. +/// If either value in a comparison is NaN, returns true. +/// /// \headerfile /// /// This intrinsic corresponds to the VCMPNEQPS / CMPNEQPS @@ -731,8 +773,11 @@ _mm_cmpneq_ps(__m128 __a, __m128 __b) /// Compares two 32-bit float values in the low-order bits of both /// operands to determine if the value in the first operand is not less than -/// the corresponding value in the second operand and returns the result of -/// the comparison in the low-order bits of a vector of [4 x float]. +/// the corresponding value in the second operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true, in the +/// low-order bits of a vector of [4 x float]. +/// If either value in a comparison is NaN, returns true. /// /// \headerfile /// @@ -757,6 +802,9 @@ _mm_cmpnlt_ss(__m128 __a, __m128 __b) /// 128-bit vectors of [4 x float] to determine if the values in the first /// operand are not less than those in the second operand. /// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true. +/// If either value in a comparison is NaN, returns true. +/// /// \headerfile /// /// This intrinsic corresponds to the VCMPNLTPS / CMPNLTPS @@ -775,9 +823,11 @@ _mm_cmpnlt_ps(__m128 __a, __m128 __b) /// Compares two 32-bit float values in the low-order bits of both /// operands to determine if the value in the first operand is not less than -/// or equal to the corresponding value in the second operand and returns -/// the result of the comparison in the low-order bits of a vector of -/// [4 x float]. +/// or equal to the corresponding value in the second operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true, in the +/// low-order bits of a vector of [4 x float]. +/// If either value in a comparison is NaN, returns true. /// /// \headerfile /// @@ -802,6 +852,9 @@ _mm_cmpnle_ss(__m128 __a, __m128 __b) /// 128-bit vectors of [4 x float] to determine if the values in the first /// operand are not less than or equal to those in the second operand. /// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true. +/// If either value in a comparison is NaN, returns true. +/// /// \headerfile /// /// This intrinsic corresponds to the VCMPNLEPS / CMPNLEPS @@ -820,9 +873,11 @@ _mm_cmpnle_ps(__m128 __a, __m128 __b) /// Compares two 32-bit float values in the low-order bits of both /// operands to determine if the value in the first operand is not greater -/// than the corresponding value in the second operand and returns the -/// result of the comparison in the low-order bits of a vector of -/// [4 x float]. +/// than the corresponding value in the second operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true, in the +/// low-order bits of a vector of [4 x float]. +/// If either value in a comparison is NaN, returns true. /// /// \headerfile /// @@ -849,6 +904,9 @@ _mm_cmpngt_ss(__m128 __a, __m128 __b) /// 128-bit vectors of [4 x float] to determine if the values in the first /// operand are not greater than those in the second operand. /// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true. +/// If either value in a comparison is NaN, returns true. +/// /// \headerfile /// /// This intrinsic corresponds to the VCMPNLTPS / CMPNLTPS @@ -867,9 +925,11 @@ _mm_cmpngt_ps(__m128 __a, __m128 __b) /// Compares two 32-bit float values in the low-order bits of both /// operands to determine if the value in the first operand is not greater -/// than or equal to the corresponding value in the second operand and -/// returns the result of the comparison in the low-order bits of a vector -/// of [4 x float]. +/// than or equal to the corresponding value in the second operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true, in the +/// low-order bits of a vector of [4 x float]. +/// If either value in a comparison is NaN, returns true. /// /// \headerfile /// @@ -896,6 +956,9 @@ _mm_cmpnge_ss(__m128 __a, __m128 __b) /// 128-bit vectors of [4 x float] to determine if the values in the first /// operand are not greater than or equal to those in the second operand. /// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true. +/// If either value in a comparison is NaN, returns true. +/// /// \headerfile /// /// This intrinsic corresponds to the VCMPNLEPS / CMPNLEPS @@ -914,9 +977,11 @@ _mm_cmpnge_ps(__m128 __a, __m128 __b) /// Compares two 32-bit float values in the low-order bits of both /// operands to determine if the value in the first operand is ordered with -/// respect to the corresponding value in the second operand and returns the -/// result of the comparison in the low-order bits of a vector of -/// [4 x float]. +/// respect to the corresponding value in the second operand. +/// +/// A pair of floating-point values are ordered with respect to each +/// other if neither value is a NaN. Each comparison returns 0x0 for false, +/// 0xFFFFFFFF for true. /// /// \headerfile /// @@ -941,6 +1006,10 @@ _mm_cmpord_ss(__m128 __a, __m128 __b) /// 128-bit vectors of [4 x float] to determine if the values in the first /// operand are ordered with respect to those in the second operand. /// +/// A pair of floating-point values are ordered with respect to each +/// other if neither value is a NaN. Each comparison returns 0x0 for false, +/// 0xFFFFFFFF for true. +/// /// \headerfile /// /// This intrinsic corresponds to the VCMPORDPS / CMPORDPS @@ -959,9 +1028,11 @@ _mm_cmpord_ps(__m128 __a, __m128 __b) /// Compares two 32-bit float values in the low-order bits of both /// operands to determine if the value in the first operand is unordered -/// with respect to the corresponding value in the second operand and -/// returns the result of the comparison in the low-order bits of a vector -/// of [4 x float]. +/// with respect to the corresponding value in the second operand. +/// +/// A pair of double-precision values are unordered with respect to each +/// other if one or both values are NaN. Each comparison returns 0x0 for +/// false, 0xFFFFFFFF for true. /// /// \headerfile /// @@ -986,6 +1057,10 @@ _mm_cmpunord_ss(__m128 __a, __m128 __b) /// 128-bit vectors of [4 x float] to determine if the values in the first /// operand are unordered with respect to those in the second operand. /// +/// A pair of double-precision values are unordered with respect to each +/// other if one or both values are NaN. Each comparison returns 0x0 for +/// false, 0xFFFFFFFFFFFFFFFF for true. +/// /// \headerfile /// /// This intrinsic corresponds to the VCMPUNORDPS / CMPUNORDPS @@ -1003,9 +1078,10 @@ _mm_cmpunord_ps(__m128 __a, __m128 __b) } /// Compares two 32-bit float values in the low-order bits of both -/// operands for equality and returns the result of the comparison. +/// operands for equality. /// -/// If either of the two lower 32-bit values is NaN, 0 is returned. +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. /// /// \headerfile /// @@ -1018,8 +1094,7 @@ _mm_cmpunord_ps(__m128 __a, __m128 __b) /// \param __b /// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are /// used in the comparison. -/// \returns An integer containing the comparison results. If either of the -/// two lower 32-bit values is NaN, 0 is returned. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_comieq_ss(__m128 __a, __m128 __b) { @@ -1028,9 +1103,10 @@ _mm_comieq_ss(__m128 __a, __m128 __b) /// Compares two 32-bit float values in the low-order bits of both /// operands to determine if the first operand is less than the second -/// operand and returns the result of the comparison. +/// operand. /// -/// If either of the two lower 32-bit values is NaN, 0 is returned. +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. /// /// \headerfile /// @@ -1043,8 +1119,7 @@ _mm_comieq_ss(__m128 __a, __m128 __b) /// \param __b /// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are /// used in the comparison. -/// \returns An integer containing the comparison results. If either of the two -/// lower 32-bit values is NaN, 0 is returned. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_comilt_ss(__m128 __a, __m128 __b) { @@ -1053,9 +1128,10 @@ _mm_comilt_ss(__m128 __a, __m128 __b) /// Compares two 32-bit float values in the low-order bits of both /// operands to determine if the first operand is less than or equal to the -/// second operand and returns the result of the comparison. +/// second operand. /// -/// If either of the two lower 32-bit values is NaN, 0 is returned. +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. /// /// \headerfile /// @@ -1067,8 +1143,7 @@ _mm_comilt_ss(__m128 __a, __m128 __b) /// \param __b /// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are /// used in the comparison. -/// \returns An integer containing the comparison results. If either of the two -/// lower 32-bit values is NaN, 0 is returned. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_comile_ss(__m128 __a, __m128 __b) { @@ -1077,9 +1152,10 @@ _mm_comile_ss(__m128 __a, __m128 __b) /// Compares two 32-bit float values in the low-order bits of both /// operands to determine if the first operand is greater than the second -/// operand and returns the result of the comparison. +/// operand. /// -/// If either of the two lower 32-bit values is NaN, 0 is returned. +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. /// /// \headerfile /// @@ -1091,8 +1167,7 @@ _mm_comile_ss(__m128 __a, __m128 __b) /// \param __b /// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are /// used in the comparison. -/// \returns An integer containing the comparison results. If either of the -/// two lower 32-bit values is NaN, 0 is returned. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_comigt_ss(__m128 __a, __m128 __b) { @@ -1101,9 +1176,10 @@ _mm_comigt_ss(__m128 __a, __m128 __b) /// Compares two 32-bit float values in the low-order bits of both /// operands to determine if the first operand is greater than or equal to -/// the second operand and returns the result of the comparison. +/// the second operand. /// -/// If either of the two lower 32-bit values is NaN, 0 is returned. +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. /// /// \headerfile /// @@ -1115,8 +1191,7 @@ _mm_comigt_ss(__m128 __a, __m128 __b) /// \param __b /// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are /// used in the comparison. -/// \returns An integer containing the comparison results. If either of the two -/// lower 32-bit values is NaN, 0 is returned. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_comige_ss(__m128 __a, __m128 __b) { @@ -1125,9 +1200,10 @@ _mm_comige_ss(__m128 __a, __m128 __b) /// Compares two 32-bit float values in the low-order bits of both /// operands to determine if the first operand is not equal to the second -/// operand and returns the result of the comparison. +/// operand. /// -/// If either of the two lower 32-bit values is NaN, 1 is returned. +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 1. /// /// \headerfile /// @@ -1139,8 +1215,7 @@ _mm_comige_ss(__m128 __a, __m128 __b) /// \param __b /// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are /// used in the comparison. -/// \returns An integer containing the comparison results. If either of the -/// two lower 32-bit values is NaN, 1 is returned. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_comineq_ss(__m128 __a, __m128 __b) { @@ -1148,10 +1223,10 @@ _mm_comineq_ss(__m128 __a, __m128 __b) } /// Performs an unordered comparison of two 32-bit float values using -/// the low-order bits of both operands to determine equality and returns -/// the result of the comparison. +/// the low-order bits of both operands to determine equality. /// -/// If either of the two lower 32-bit values is NaN, 0 is returned. +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. /// /// \headerfile /// @@ -1163,8 +1238,7 @@ _mm_comineq_ss(__m128 __a, __m128 __b) /// \param __b /// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are /// used in the comparison. -/// \returns An integer containing the comparison results. If either of the two -/// lower 32-bit values is NaN, 0 is returned. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomieq_ss(__m128 __a, __m128 __b) { @@ -1173,9 +1247,10 @@ _mm_ucomieq_ss(__m128 __a, __m128 __b) /// Performs an unordered comparison of two 32-bit float values using /// the low-order bits of both operands to determine if the first operand is -/// less than the second operand and returns the result of the comparison. +/// less than the second operand. /// -/// If either of the two lower 32-bit values is NaN, 0 is returned. +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. /// /// \headerfile /// @@ -1187,8 +1262,7 @@ _mm_ucomieq_ss(__m128 __a, __m128 __b) /// \param __b /// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are /// used in the comparison. -/// \returns An integer containing the comparison results. If either of the two -/// lower 32-bit values is NaN, 0 is returned. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomilt_ss(__m128 __a, __m128 __b) { @@ -1197,10 +1271,10 @@ _mm_ucomilt_ss(__m128 __a, __m128 __b) /// Performs an unordered comparison of two 32-bit float values using /// the low-order bits of both operands to determine if the first operand is -/// less than or equal to the second operand and returns the result of the -/// comparison. +/// less than or equal to the second operand. /// -/// If either of the two lower 32-bit values is NaN, 0 is returned. +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. /// /// \headerfile /// @@ -1212,8 +1286,7 @@ _mm_ucomilt_ss(__m128 __a, __m128 __b) /// \param __b /// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are /// used in the comparison. -/// \returns An integer containing the comparison results. If either of the two -/// lower 32-bit values is NaN, 0 is returned. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomile_ss(__m128 __a, __m128 __b) { @@ -1222,10 +1295,10 @@ _mm_ucomile_ss(__m128 __a, __m128 __b) /// Performs an unordered comparison of two 32-bit float values using /// the low-order bits of both operands to determine if the first operand is -/// greater than the second operand and returns the result of the -/// comparison. +/// greater than the second operand. /// -/// If either of the two lower 32-bit values is NaN, 0 is returned. +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. /// /// \headerfile /// @@ -1237,8 +1310,7 @@ _mm_ucomile_ss(__m128 __a, __m128 __b) /// \param __b /// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are /// used in the comparison. -/// \returns An integer containing the comparison results. If either of the two -/// lower 32-bit values is NaN, 0 is returned. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomigt_ss(__m128 __a, __m128 __b) { @@ -1247,10 +1319,10 @@ _mm_ucomigt_ss(__m128 __a, __m128 __b) /// Performs an unordered comparison of two 32-bit float values using /// the low-order bits of both operands to determine if the first operand is -/// greater than or equal to the second operand and returns the result of -/// the comparison. +/// greater than or equal to the second operand. /// -/// If either of the two lower 32-bit values is NaN, 0 is returned. +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. /// /// \headerfile /// @@ -1262,8 +1334,7 @@ _mm_ucomigt_ss(__m128 __a, __m128 __b) /// \param __b /// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are /// used in the comparison. -/// \returns An integer containing the comparison results. If either of the two -/// lower 32-bit values is NaN, 0 is returned. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomige_ss(__m128 __a, __m128 __b) { @@ -1271,10 +1342,10 @@ _mm_ucomige_ss(__m128 __a, __m128 __b) } /// Performs an unordered comparison of two 32-bit float values using -/// the low-order bits of both operands to determine inequality and returns -/// the result of the comparison. +/// the low-order bits of both operands to determine inequality. /// -/// If either of the two lower 32-bit values is NaN, 1 is returned. +/// The comparison returns 0 for false, 1 for true. If either value in a +/// comparison is NaN, returns 0. /// /// \headerfile /// @@ -1286,8 +1357,7 @@ _mm_ucomige_ss(__m128 __a, __m128 __b) /// \param __b /// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are /// used in the comparison. -/// \returns An integer containing the comparison results. If either of the two -/// lower 32-bit values is NaN, 1 is returned. +/// \returns An integer containing the comparison results. static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomineq_ss(__m128 __a, __m128 __b) { @@ -1297,6 +1367,10 @@ _mm_ucomineq_ss(__m128 __a, __m128 __b) /// Converts a float value contained in the lower 32 bits of a vector of /// [4 x float] into a 32-bit integer. /// +/// If the converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. +/// /// \headerfile /// /// This intrinsic corresponds to the VCVTSS2SI / CVTSS2SI @@ -1315,6 +1389,10 @@ _mm_cvtss_si32(__m128 __a) /// Converts a float value contained in the lower 32 bits of a vector of /// [4 x float] into a 32-bit integer. /// +/// If the converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. +/// /// \headerfile /// /// This intrinsic corresponds to the VCVTSS2SI / CVTSS2SI @@ -1335,6 +1413,10 @@ _mm_cvt_ss2si(__m128 __a) /// Converts a float value contained in the lower 32 bits of a vector of /// [4 x float] into a 64-bit integer. /// +/// If the converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. +/// /// \headerfile /// /// This intrinsic corresponds to the VCVTSS2SI / CVTSS2SI @@ -1355,6 +1437,10 @@ _mm_cvtss_si64(__m128 __a) /// Converts two low-order float values in a 128-bit vector of /// [4 x float] into a 64-bit vector of [2 x i32]. /// +/// If a converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. +/// /// \headerfile /// /// This intrinsic corresponds to the CVTPS2PI instruction. @@ -1371,6 +1457,10 @@ _mm_cvtps_pi32(__m128 __a) /// Converts two low-order float values in a 128-bit vector of /// [4 x float] into a 64-bit vector of [2 x i32]. /// +/// If a converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. +/// /// \headerfile /// /// This intrinsic corresponds to the CVTPS2PI instruction. @@ -1384,9 +1474,12 @@ _mm_cvt_ps2pi(__m128 __a) return _mm_cvtps_pi32(__a); } -/// Converts a float value contained in the lower 32 bits of a vector of -/// [4 x float] into a 32-bit integer, truncating the result when it is -/// inexact. +/// Converts the lower (first) element of a vector of [4 x float] into a signed +/// truncated (rounded toward zero) 32-bit integer. +/// +/// If the converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. /// /// \headerfile /// @@ -1403,9 +1496,12 @@ _mm_cvttss_si32(__m128 __a) return __builtin_ia32_cvttss2si((__v4sf)__a); } -/// Converts a float value contained in the lower 32 bits of a vector of -/// [4 x float] into a 32-bit integer, truncating the result when it is -/// inexact. +/// Converts the lower (first) element of a vector of [4 x float] into a signed +/// truncated (rounded toward zero) 32-bit integer. +/// +/// If the converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. /// /// \headerfile /// @@ -1423,9 +1519,12 @@ _mm_cvtt_ss2si(__m128 __a) } #ifdef __x86_64__ -/// Converts a float value contained in the lower 32 bits of a vector of -/// [4 x float] into a 64-bit integer, truncating the result when it is -/// inexact. +/// Converts the lower (first) element of a vector of [4 x float] into a signed +/// truncated (rounded toward zero) 64-bit integer. +/// +/// If the converted value does not fit in a 64-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. /// /// \headerfile /// @@ -1443,9 +1542,13 @@ _mm_cvttss_si64(__m128 __a) } #endif -/// Converts two low-order float values in a 128-bit vector of -/// [4 x float] into a 64-bit vector of [2 x i32], truncating the result -/// when it is inexact. +/// Converts the lower (first) two elements of a 128-bit vector of [4 x float] +/// into two signed truncated (rounded toward zero) 32-bit integers, +/// returned in a 64-bit vector of [2 x i32]. +/// +/// If a converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. /// /// \headerfile /// @@ -1461,9 +1564,13 @@ _mm_cvttps_pi32(__m128 __a) return (__m64)__builtin_ia32_cvttps2pi((__v4sf)__a); } -/// Converts two low-order float values in a 128-bit vector of [4 x -/// float] into a 64-bit vector of [2 x i32], truncating the result when it -/// is inexact. +/// Converts the lower (first) two elements of a 128-bit vector of [4 x float] +/// into two signed truncated (rounded toward zero) 64-bit integers, +/// returned in a 64-bit vector of [2 x i32]. +/// +/// If a converted value does not fit in a 32-bit integer, raises a +/// floating-point invalid exception. If the exception is masked, returns +/// the most negative integer. /// /// \headerfile /// @@ -2940,6 +3047,85 @@ _mm_movemask_ps(__m128 __a) return __builtin_ia32_movmskps((__v4sf)__a); } +/* Compare */ +#define _CMP_EQ_OQ 0x00 /* Equal (ordered, non-signaling) */ +#define _CMP_LT_OS 0x01 /* Less-than (ordered, signaling) */ +#define _CMP_LE_OS 0x02 /* Less-than-or-equal (ordered, signaling) */ +#define _CMP_UNORD_Q 0x03 /* Unordered (non-signaling) */ +#define _CMP_NEQ_UQ 0x04 /* Not-equal (unordered, non-signaling) */ +#define _CMP_NLT_US 0x05 /* Not-less-than (unordered, signaling) */ +#define _CMP_NLE_US 0x06 /* Not-less-than-or-equal (unordered, signaling) */ +#define _CMP_ORD_Q 0x07 /* Ordered (non-signaling) */ + +/// Compares each of the corresponding values of two 128-bit vectors of +/// [4 x float], using the operation specified by the immediate integer +/// operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true. +/// If either value in a comparison is NaN, comparisons that are ordered +/// return false, and comparisons that are unordered return true. +/// +/// \headerfile +/// +/// \code +/// __m128 _mm_cmp_ps(__m128 a, __m128 b, const int c); +/// \endcode +/// +/// This intrinsic corresponds to the (V)CMPPS instruction. +/// +/// \param a +/// A 128-bit vector of [4 x float]. +/// \param b +/// A 128-bit vector of [4 x float]. +/// \param c +/// An immediate integer operand, with bits [4:0] specifying which comparison +/// operation to use: \n +/// 0x00: Equal (ordered, non-signaling) \n +/// 0x01: Less-than (ordered, signaling) \n +/// 0x02: Less-than-or-equal (ordered, signaling) \n +/// 0x03: Unordered (non-signaling) \n +/// 0x04: Not-equal (unordered, non-signaling) \n +/// 0x05: Not-less-than (unordered, signaling) \n +/// 0x06: Not-less-than-or-equal (unordered, signaling) \n +/// 0x07: Ordered (non-signaling) \n +/// \returns A 128-bit vector of [4 x float] containing the comparison results. +#define _mm_cmp_ps(a, b, c) \ + ((__m128)__builtin_ia32_cmpps((__v4sf)(__m128)(a), (__v4sf)(__m128)(b), (c))) + +/// Compares each of the corresponding scalar values of two 128-bit +/// vectors of [4 x float], using the operation specified by the immediate +/// integer operand. +/// +/// Each comparison returns 0x0 for false, 0xFFFFFFFF for true. +/// If either value in a comparison is NaN, comparisons that are ordered +/// return false, and comparisons that are unordered return true. +/// +/// \headerfile +/// +/// \code +/// __m128 _mm_cmp_ss(__m128 a, __m128 b, const int c); +/// \endcode +/// +/// This intrinsic corresponds to the (V)CMPSS instruction. +/// +/// \param a +/// A 128-bit vector of [4 x float]. +/// \param b +/// A 128-bit vector of [4 x float]. +/// \param c +/// An immediate integer operand, with bits [4:0] specifying which comparison +/// operation to use: \n +/// 0x00: Equal (ordered, non-signaling) \n +/// 0x01: Less-than (ordered, signaling) \n +/// 0x02: Less-than-or-equal (ordered, signaling) \n +/// 0x03: Unordered (non-signaling) \n +/// 0x04: Not-equal (unordered, non-signaling) \n +/// 0x05: Not-less-than (unordered, signaling) \n +/// 0x06: Not-less-than-or-equal (unordered, signaling) \n +/// 0x07: Ordered (non-signaling) \n +/// \returns A 128-bit vector of [4 x float] containing the comparison results. +#define _mm_cmp_ss(a, b, c) \ + ((__m128)__builtin_ia32_cmpss((__v4sf)(__m128)(a), (__v4sf)(__m128)(b), (c))) #define _MM_ALIGN16 __attribute__((aligned(16))) diff --git a/lib/include/yvals_core.h b/lib/include/yvals_core.h new file mode 100644 index 000000000000..5ee194a3e5f5 --- /dev/null +++ b/lib/include/yvals_core.h @@ -0,0 +1,25 @@ +//===----- yvals_core.h - Internal MSVC STL core header -------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +// Only include this if we are aiming for MSVC compatibility. +#ifndef _MSC_VER +#include_next +#else + +#ifndef __clang_yvals_core_h +#define __clang_yvals_core_h + +#include_next + +#ifdef _STL_INTRIN_HEADER +#undef _STL_INTRIN_HEADER +#define _STL_INTRIN_HEADER +#endif + +#endif +#endif diff --git a/lib/include/zos_wrappers/builtins.h b/lib/include/zos_wrappers/builtins.h new file mode 100644 index 000000000000..1f0d0e27ecb3 --- /dev/null +++ b/lib/include/zos_wrappers/builtins.h @@ -0,0 +1,18 @@ +/*===---- builtins.h - z/Architecture Builtin Functions --------------------=== + * + * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + * See https://llvm.org/LICENSE.txt for license information. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __ZOS_WRAPPERS_BUILTINS_H +#define __ZOS_WRAPPERS_BUILTINS_H +#if defined(__MVS__) +#include_next +#if defined(__VEC__) +#include +#endif +#endif /* defined(__MVS__) */ +#endif /* __ZOS_WRAPPERS_BUILTINS_H */ From 26ddfabba4611231fa486322b9ba11b1057a2053 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Fri, 23 Aug 2024 02:15:50 +0200 Subject: [PATCH 165/202] libunwind: Update to LLVM 19. --- lib/libunwind/include/__libunwind_config.h | 4 +++ lib/libunwind/src/Registers.hpp | 7 +++++ lib/libunwind/src/Unwind-wasm.c | 4 +-- lib/libunwind/src/UnwindCursor.hpp | 18 ++++++++---- lib/libunwind/src/UnwindLevel1.c | 34 +++++++++++++++++++--- lib/libunwind/src/UnwindRegistersRestore.S | 18 ++++++++++-- lib/libunwind/src/UnwindRegistersSave.S | 4 +-- lib/libunwind/src/assembly.h | 25 ++++++++++++---- lib/libunwind/src/cet_unwind.h | 22 ++++++++++++++ lib/libunwind/src/libunwind.cpp | 5 ++-- 10 files changed, 118 insertions(+), 23 deletions(-) diff --git a/lib/libunwind/include/__libunwind_config.h b/lib/libunwind/include/__libunwind_config.h index 8db336b2d727..028b9e3baa80 100644 --- a/lib/libunwind/include/__libunwind_config.h +++ b/lib/libunwind/include/__libunwind_config.h @@ -180,6 +180,10 @@ #endif #define _LIBUNWIND_HIGHEST_DWARF_REGISTER \ _LIBUNWIND_HIGHEST_DWARF_REGISTER_LOONGARCH +#elif defined(__wasm__) +// Unused +#define _LIBUNWIND_CONTEXT_SIZE 0 +#define _LIBUNWIND_CURSOR_SIZE 0 # else # error "Unsupported architecture." # endif diff --git a/lib/libunwind/src/Registers.hpp b/lib/libunwind/src/Registers.hpp index d11ddb3426d5..861e6b5f6f2c 100644 --- a/lib/libunwind/src/Registers.hpp +++ b/lib/libunwind/src/Registers.hpp @@ -1815,6 +1815,13 @@ inline const char *Registers_ppc64::getRegisterName(int regNum) { /// process. class _LIBUNWIND_HIDDEN Registers_arm64; extern "C" void __libunwind_Registers_arm64_jumpto(Registers_arm64 *); + +#if defined(_LIBUNWIND_USE_GCS) +extern "C" void *__libunwind_cet_get_jump_target() { + return reinterpret_cast(&__libunwind_Registers_arm64_jumpto); +} +#endif + class _LIBUNWIND_HIDDEN Registers_arm64 { public: Registers_arm64(); diff --git a/lib/libunwind/src/Unwind-wasm.c b/lib/libunwind/src/Unwind-wasm.c index f7f39d38b59c..b18b32c5d178 100644 --- a/lib/libunwind/src/Unwind-wasm.c +++ b/lib/libunwind/src/Unwind-wasm.c @@ -14,7 +14,7 @@ #include "config.h" -#ifdef __USING_WASM_EXCEPTIONS__ +#ifdef __WASM_EXCEPTIONS__ #include "unwind.h" #include @@ -120,4 +120,4 @@ _Unwind_GetRegionStart(struct _Unwind_Context *context) { return 0; } -#endif // defined(__USING_WASM_EXCEPTIONS__) +#endif // defined(__WASM_EXCEPTIONS__) diff --git a/lib/libunwind/src/UnwindCursor.hpp b/lib/libunwind/src/UnwindCursor.hpp index 7753936a5894..06e654197351 100644 --- a/lib/libunwind/src/UnwindCursor.hpp +++ b/lib/libunwind/src/UnwindCursor.hpp @@ -36,7 +36,6 @@ #include #include #include -#include #include #define _LIBUNWIND_CHECK_LINUX_SIGRETURN 1 #endif @@ -472,7 +471,7 @@ class _LIBUNWIND_HIDDEN AbstractUnwindCursor { } #endif -#if defined(_LIBUNWIND_USE_CET) +#if defined(_LIBUNWIND_USE_CET) || defined(_LIBUNWIND_USE_GCS) virtual void *get_registers() { _LIBUNWIND_ABORT("get_registers not implemented"); } @@ -955,7 +954,7 @@ class UnwindCursor : public AbstractUnwindCursor{ virtual uintptr_t getDataRelBase(); #endif -#if defined(_LIBUNWIND_USE_CET) +#if defined(_LIBUNWIND_USE_CET) || defined(_LIBUNWIND_USE_GCS) virtual void *get_registers() { return &_registers; } #endif @@ -2416,7 +2415,7 @@ int UnwindCursor::stepWithTBTable(pint_t pc, tbtable *TBTable, } // Reset LR in the current context. - newRegisters.setLR(NULL); + newRegisters.setLR(static_cast(NULL)); _LIBUNWIND_TRACE_UNWINDING( "Extract info from lastStack=%p, returnAddress=%p", @@ -2590,6 +2589,15 @@ void UnwindCursor::setInfoBasedOnIPRegister(bool isReturnAddress) { --pc; #endif +#if !(defined(_LIBUNWIND_SUPPORT_SEH_UNWIND) && defined(_WIN32)) && \ + !defined(_LIBUNWIND_SUPPORT_TBTAB_UNWIND) + // In case of this is frame of signal handler, the IP saved in the signal + // handler points to first non-executed instruction, while FDE/CIE expects IP + // to be after the first non-executed instruction. + if (_isSignalFrame) + ++pc; +#endif + // Ask address space object to find unwind sections for this pc. UnwindInfoSections sects; if (_addressSpace.findUnwindSections(pc, sects)) { @@ -2997,7 +3005,7 @@ bool UnwindCursor::isReadableAddr(const pint_t addr) const { } #endif -#if defined(_LIBUNWIND_USE_CET) +#if defined(_LIBUNWIND_USE_CET) || defined(_LIBUNWIND_USE_GCS) extern "C" void *__libunwind_cet_get_registers(unw_cursor_t *cursor) { AbstractUnwindCursor *co = (AbstractUnwindCursor *)cursor; return co->get_registers(); diff --git a/lib/libunwind/src/UnwindLevel1.c b/lib/libunwind/src/UnwindLevel1.c index 05d0f2cb0a0a..7e785f4d31e7 100644 --- a/lib/libunwind/src/UnwindLevel1.c +++ b/lib/libunwind/src/UnwindLevel1.c @@ -31,7 +31,8 @@ #include "libunwind_ext.h" #include "unwind.h" -#if !defined(_LIBUNWIND_ARM_EHABI) && !defined(__USING_SJLJ_EXCEPTIONS__) +#if !defined(_LIBUNWIND_ARM_EHABI) && !defined(__USING_SJLJ_EXCEPTIONS__) && \ + !defined(__wasm__) #ifndef _LIBUNWIND_SUPPORT_SEH_UNWIND @@ -43,7 +44,7 @@ // _LIBUNWIND_POP_CET_SSP is used to adjust CET shadow stack pointer and we // directly jump to __libunwind_Registers_x86/x86_64_jumpto instead of using // a regular function call to avoid pushing to CET shadow stack again. -#if !defined(_LIBUNWIND_USE_CET) +#if !defined(_LIBUNWIND_USE_CET) && !defined(_LIBUNWIND_USE_GCS) #define __unw_phase2_resume(cursor, fn) \ do { \ (void)fn; \ @@ -71,6 +72,19 @@ __asm__ volatile("jmpq *%%rdx\n\t" :: "D"(cetRegContext), \ "d"(cetJumpAddress)); \ } while (0) +#elif defined(_LIBUNWIND_TARGET_AARCH64) +#define __cet_ss_step_size 8 +#define __unw_phase2_resume(cursor, fn) \ + do { \ + _LIBUNWIND_POP_CET_SSP((fn)); \ + void *cetRegContext = __libunwind_cet_get_registers((cursor)); \ + void *cetJumpAddress = __libunwind_cet_get_jump_target(); \ + __asm__ volatile("mov x0, %0\n\t" \ + "br %1\n\t" \ + : \ + : "r"(cetRegContext), "r"(cetJumpAddress) \ + : "x0"); \ + } while (0) #endif static _Unwind_Reason_Code @@ -169,6 +183,10 @@ unwind_phase1(unw_context_t *uc, unw_cursor_t *cursor, _Unwind_Exception *except } extern int __unw_step_stage2(unw_cursor_t *); +#if defined(_LIBUNWIND_USE_GCS) +// Enable the GCS target feature to permit gcspop instructions to be used. +__attribute__((target("gcs"))) +#endif static _Unwind_Reason_Code unwind_phase2(unw_context_t *uc, unw_cursor_t *cursor, _Unwind_Exception *exception_object) { __unw_init_local(cursor, uc); @@ -179,8 +197,12 @@ unwind_phase2(unw_context_t *uc, unw_cursor_t *cursor, _Unwind_Exception *except // uc is initialized by __unw_getcontext in the parent frame. The first stack // frame walked is unwind_phase2. unsigned framesWalked = 1; -#ifdef _LIBUNWIND_USE_CET +#if defined(_LIBUNWIND_USE_CET) unsigned long shadowStackTop = _get_ssp(); +#elif defined(_LIBUNWIND_USE_GCS) + unsigned long shadowStackTop = 0; + if (__chkfeat(_CHKFEAT_GCS)) + shadowStackTop = (unsigned long)__gcspr(); #endif // Walk each frame until we reach where search phase said to stop. while (true) { @@ -237,7 +259,7 @@ unwind_phase2(unw_context_t *uc, unw_cursor_t *cursor, _Unwind_Exception *except // against return address stored in CET shadow stack, if the 2 addresses don't // match, it means return address in normal stack has been corrupted, we return // _URC_FATAL_PHASE2_ERROR. -#ifdef _LIBUNWIND_USE_CET +#if defined(_LIBUNWIND_USE_CET) || defined(_LIBUNWIND_USE_GCS) if (shadowStackTop != 0) { unw_word_t retInNormalStack; __unw_get_reg(cursor, UNW_REG_IP, &retInNormalStack); @@ -305,6 +327,10 @@ unwind_phase2(unw_context_t *uc, unw_cursor_t *cursor, _Unwind_Exception *except return _URC_FATAL_PHASE2_ERROR; } +#if defined(_LIBUNWIND_USE_GCS) +// Enable the GCS target feature to permit gcspop instructions to be used. +__attribute__((target("gcs"))) +#endif static _Unwind_Reason_Code unwind_phase2_forced(unw_context_t *uc, unw_cursor_t *cursor, _Unwind_Exception *exception_object, diff --git a/lib/libunwind/src/UnwindRegistersRestore.S b/lib/libunwind/src/UnwindRegistersRestore.S index 42c2488fc7cf..9d34c7909ed3 100644 --- a/lib/libunwind/src/UnwindRegistersRestore.S +++ b/lib/libunwind/src/UnwindRegistersRestore.S @@ -20,7 +20,7 @@ .text #endif -#if !defined(__USING_SJLJ_EXCEPTIONS__) +#if !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__wasm__) #if defined(__i386__) DEFINE_LIBUNWIND_FUNCTION(__libunwind_Registers_x86_jumpto) @@ -629,6 +629,10 @@ Lnovec: #elif defined(__aarch64__) +#if defined(__ARM_FEATURE_GCS_DEFAULT) +.arch_extension gcs +#endif + // // extern "C" void __libunwind_Registers_arm64_jumpto(Registers_arm64 *); // @@ -680,6 +684,16 @@ DEFINE_LIBUNWIND_FUNCTION(__libunwind_Registers_arm64_jumpto) ldr x16, [x0, #0x0F8] ldp x0, x1, [x0, #0x000] // restore x0,x1 mov sp,x16 // restore sp +#if defined(__ARM_FEATURE_GCS_DEFAULT) + // If GCS is enabled we need to push the address we're returning to onto the + // GCS stack. We can't just return using br, as there won't be a BTI landing + // pad instruction at the destination. + mov x16, #1 + chkfeat x16 + cbnz x16, Lnogcs + gcspushm x30 +Lnogcs: +#endif ret x30 // jump to pc #elif defined(__arm__) && !defined(__APPLE__) @@ -1232,7 +1246,7 @@ DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind19Registers_loongarch6jumptoEv) #endif -#endif /* !defined(__USING_SJLJ_EXCEPTIONS__) */ +#endif /* !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__wasm__) */ NO_EXEC_STACK_DIRECTIVE diff --git a/lib/libunwind/src/UnwindRegistersSave.S b/lib/libunwind/src/UnwindRegistersSave.S index 19a0e87d683c..5bf6055fe414 100644 --- a/lib/libunwind/src/UnwindRegistersSave.S +++ b/lib/libunwind/src/UnwindRegistersSave.S @@ -20,7 +20,7 @@ .text #endif -#if !defined(__USING_SJLJ_EXCEPTIONS__) +#if !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__wasm__) #if defined(__i386__) @@ -1177,6 +1177,6 @@ DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext) WEAK_ALIAS(__unw_getcontext, unw_getcontext) -#endif /* !defined(__USING_SJLJ_EXCEPTIONS__) */ +#endif /* !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__wasm__) */ NO_EXEC_STACK_DIRECTIVE diff --git a/lib/libunwind/src/assembly.h b/lib/libunwind/src/assembly.h index fb07d04071af..f8e83e138eff 100644 --- a/lib/libunwind/src/assembly.h +++ b/lib/libunwind/src/assembly.h @@ -82,7 +82,22 @@ #define PPC64_OPD2 #endif -#if defined(__aarch64__) && defined(__ARM_FEATURE_BTI_DEFAULT) +#if defined(__aarch64__) +#if defined(__ARM_FEATURE_GCS_DEFAULT) && defined(__ARM_FEATURE_BTI_DEFAULT) +// Set BTI, PAC, and GCS gnu property bits +#define GNU_PROPERTY 7 +// We indirectly branch to __libunwind_Registers_arm64_jumpto from +// __unw_phase2_resume, so we need to use bti jc. +#define AARCH64_BTI bti jc +#elif defined(__ARM_FEATURE_GCS_DEFAULT) +// Set GCS gnu property bit +#define GNU_PROPERTY 4 +#elif defined(__ARM_FEATURE_BTI_DEFAULT) +// Set BTI and PAC gnu property bits +#define GNU_PROPERTY 3 +#define AARCH64_BTI bti c +#endif +#ifdef GNU_PROPERTY .pushsection ".note.gnu.property", "a" SEPARATOR \ .balign 8 SEPARATOR \ .long 4 SEPARATOR \ @@ -91,12 +106,12 @@ .asciz "GNU" SEPARATOR \ .long 0xc0000000 SEPARATOR /* GNU_PROPERTY_AARCH64_FEATURE_1_AND */ \ .long 4 SEPARATOR \ - .long 3 SEPARATOR /* GNU_PROPERTY_AARCH64_FEATURE_1_BTI AND */ \ - /* GNU_PROPERTY_AARCH64_FEATURE_1_PAC */ \ + .long GNU_PROPERTY SEPARATOR \ .long 0 SEPARATOR \ .popsection SEPARATOR -#define AARCH64_BTI bti c -#else +#endif +#endif +#if !defined(AARCH64_BTI) #define AARCH64_BTI #endif diff --git a/lib/libunwind/src/cet_unwind.h b/lib/libunwind/src/cet_unwind.h index c364ed3e12fe..47d7616a7322 100644 --- a/lib/libunwind/src/cet_unwind.h +++ b/lib/libunwind/src/cet_unwind.h @@ -35,6 +35,28 @@ } while (0) #endif +// On AArch64 we use _LIBUNWIND_USE_GCS to indicate that GCS is supported. We +// need to guard any use of GCS instructions with __chkfeat though, as GCS may +// not be enabled. +#if defined(_LIBUNWIND_TARGET_AARCH64) && defined(__ARM_FEATURE_GCS_DEFAULT) +#include + +// We can only use GCS if arm_acle.h defines the GCS intrinsics. +#ifdef _CHKFEAT_GCS +#define _LIBUNWIND_USE_GCS 1 +#endif + +#define _LIBUNWIND_POP_CET_SSP(x) \ + do { \ + if (__chkfeat(_CHKFEAT_GCS)) { \ + unsigned tmp = (x); \ + while (tmp--) \ + __gcspopm(); \ + } \ + } while (0) + +#endif + extern void *__libunwind_cet_get_registers(unw_cursor_t *); extern void *__libunwind_cet_get_jump_target(void); diff --git a/lib/libunwind/src/libunwind.cpp b/lib/libunwind/src/libunwind.cpp index 217dde909863..cf39ec5f7dbd 100644 --- a/lib/libunwind/src/libunwind.cpp +++ b/lib/libunwind/src/libunwind.cpp @@ -26,7 +26,7 @@ #include #endif -#if !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__USING_WASM_EXCEPTIONS__) +#if !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__wasm__) #include "AddressSpace.hpp" #include "UnwindCursor.hpp" @@ -347,8 +347,7 @@ void __unw_remove_dynamic_eh_frame_section(unw_word_t eh_frame_start) { } #endif // defined(_LIBUNWIND_SUPPORT_DWARF_UNWIND) -#endif // !defined(__USING_SJLJ_EXCEPTIONS__) && - // !defined(__USING_WASM_EXCEPTIONS__) +#endif // !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__wasm__) #ifdef __APPLE__ From 70a1805e46b66955b52ba5ac46914f212d99d19c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Fri, 23 Aug 2024 02:25:18 +0200 Subject: [PATCH 166/202] libunwind: Synchronize some CFLAGS/CXXFLAGS with upstream. --- src/libunwind.zig | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/libunwind.zig b/src/libunwind.zig index dd2ed391300c..605206467cd8 100644 --- a/src/libunwind.zig +++ b/src/libunwind.zig @@ -100,14 +100,18 @@ pub fn buildStaticLib(comp: *Compilation, prog_node: std.Progress.Node) BuildErr switch (Compilation.classifyFileExt(unwind_src)) { .c => { - try cflags.append("-std=c11"); + try cflags.append("-std=c17"); }, .cpp => { - try cflags.appendSlice(&[_][]const u8{"-fno-rtti"}); + try cflags.appendSlice(&[_][]const u8{ + "-std=c++17", + "-fno-rtti", + }); }, .assembly_with_cpp => {}, else => unreachable, // You can see the entire list of files just above. } + try cflags.append("-fno-exceptions"); try cflags.append("-I"); try cflags.append(try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libunwind", "include" })); if (target_util.supports_fpic(target)) { @@ -117,6 +121,7 @@ pub fn buildStaticLib(comp: *Compilation, prog_node: std.Progress.Node) BuildErr try cflags.append("-Wa,--noexecstack"); try cflags.append("-fvisibility=hidden"); try cflags.append("-fvisibility-inlines-hidden"); + try cflags.append("-fvisibility-global-new-delete=force-hidden"); // necessary so that libunwind can unwind through its own stack frames try cflags.append("-funwind-tables"); From d13bc04cb4c4c2f0806b9d9c676cb013ea888828 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Fri, 23 Aug 2024 02:31:37 +0200 Subject: [PATCH 167/202] libcxxabi: Update to LLVM 19. --- lib/libcxxabi/include/cxxabi.h | 13 ++++++- lib/libcxxabi/src/aix_state_tab_eh.inc | 16 ++++---- lib/libcxxabi/src/cxa_exception.cpp | 12 +++++- lib/libcxxabi/src/cxa_exception.h | 2 +- lib/libcxxabi/src/cxa_exception_storage.cpp | 4 +- lib/libcxxabi/src/cxa_guard_impl.h | 2 +- lib/libcxxabi/src/cxa_personality.cpp | 40 ++++++++++---------- lib/libcxxabi/src/cxa_thread_atexit.cpp | 2 +- lib/libcxxabi/src/demangle/ItaniumDemangle.h | 12 +++--- lib/libcxxabi/src/fallback_malloc.cpp | 2 +- lib/libcxxabi/src/private_typeinfo.cpp | 34 +++++++++++------ 11 files changed, 84 insertions(+), 55 deletions(-) diff --git a/lib/libcxxabi/include/cxxabi.h b/lib/libcxxabi/include/cxxabi.h index d0701181751c..8d1b5c9b5578 100644 --- a/lib/libcxxabi/include/cxxabi.h +++ b/lib/libcxxabi/include/cxxabi.h @@ -48,13 +48,17 @@ extern _LIBCXXABI_FUNC_VIS void __cxa_free_exception(void *thrown_exception) throw(); // This function is an LLVM extension, which mirrors the same extension in libsupc++ and libcxxrt extern _LIBCXXABI_FUNC_VIS __cxa_exception* +#ifdef __wasm__ +// In Wasm, a destructor returns its argument +__cxa_init_primary_exception(void* object, std::type_info* tinfo, void*(_LIBCXXABI_DTOR_FUNC* dest)(void*)) throw(); +#else __cxa_init_primary_exception(void* object, std::type_info* tinfo, void(_LIBCXXABI_DTOR_FUNC* dest)(void*)) throw(); +#endif // 2.4.3 Throwing the Exception Object extern _LIBCXXABI_FUNC_VIS _LIBCXXABI_NORETURN void __cxa_throw(void *thrown_exception, std::type_info *tinfo, -#ifdef __USING_WASM_EXCEPTIONS__ - // In Wasm, a destructor returns its argument +#ifdef __wasm__ void *(_LIBCXXABI_DTOR_FUNC *dest)(void *)); #else void (_LIBCXXABI_DTOR_FUNC *dest)(void *)); @@ -73,6 +77,11 @@ extern _LIBCXXABI_FUNC_VIS void __cxa_end_cleanup(); #endif extern _LIBCXXABI_FUNC_VIS std::type_info *__cxa_current_exception_type(); +// GNU extension +// Calls `terminate` with the current exception being caught. This function is used by GCC when a `noexcept` function +// throws an exception inside a try/catch block and doesn't catch it. +extern _LIBCXXABI_FUNC_VIS _LIBCXXABI_NORETURN void __cxa_call_terminate(void*) throw(); + // 2.5.4 Rethrowing Exceptions extern _LIBCXXABI_FUNC_VIS _LIBCXXABI_NORETURN void __cxa_rethrow(); diff --git a/lib/libcxxabi/src/aix_state_tab_eh.inc b/lib/libcxxabi/src/aix_state_tab_eh.inc index 0cd94834b37a..0ed329890a0a 100644 --- a/lib/libcxxabi/src/aix_state_tab_eh.inc +++ b/lib/libcxxabi/src/aix_state_tab_eh.inc @@ -102,8 +102,6 @@ static bool state_tab_dbg() { namespace __state_table_eh { -using destruct_f = void (*)(void*); - // Definition of flags for the state table entry field 'action flag'. enum FSMEntryCount : intptr_t { beginCatch = -1, endCatch = -2, deleteObject = -3, cleanupLabel = -4, terminate = -5 }; @@ -145,8 +143,10 @@ struct FSMEntry { intptr_t nextStatePtr; }; union { - // Address of the destructor function. - void (*destructor)(void*, size_t); + // Address of the destructor function with 1 argument. + void (*destructor)(void*); + // Address of the destructor function with 2 arguments. + void (*xlCDestructor)(void*, size_t); // The address of the catch block or cleanup code. void* landingPad; }; @@ -191,12 +191,12 @@ static void invoke_destructor(FSMEntry* fsmEntry, void* addr) { try { if (fsmEntry->elementCount == 1) { _LIBCXXABI_TRACE_STATETAB0("calling scalar destructor\n"); - (*fsmEntry->destructor)(addr, dtorArgument); + (*fsmEntry->xlCDestructor)(addr, dtorArgument); _LIBCXXABI_TRACE_STATETAB0("returned from scalar destructor\n"); } else { _LIBCXXABI_TRACE_STATETAB0("calling vector destructor\n"); __cxa_vec_cleanup(addr, reinterpret_cast(fsmEntry->elementCount), fsmEntry->elemSize, - reinterpret_cast(fsmEntry->destructor)); + fsmEntry->destructor); _LIBCXXABI_TRACE_STATETAB0("returned from vector destructor\n"); } } catch (...) { @@ -213,7 +213,7 @@ static void invoke_delete(FSMEntry* fsmEntry, void* addr) { try { _LIBCXXABI_TRACE_STATETAB0("..calling delete()\n"); // 'destructor' holds a function pointer to delete(). - (*fsmEntry->destructor)(objectAddress, fsmEntry->elemSize); + (*fsmEntry->xlCDestructor)(objectAddress, fsmEntry->elemSize); _LIBCXXABI_TRACE_STATETAB0("..returned from delete()\n"); } catch (...) { _LIBCXXABI_TRACE_STATETAB0("Uncaught exception in delete(), terminating\n"); @@ -681,7 +681,7 @@ static uintptr_t* skip_non_cxx_eh_aware_frames(uint32_t* Pc, uintptr_t* Sp) { // xlclang++ compiled code. If __xlc_exception_handle() is called by // non-C++ EH aware functions, their frames are skipped until a C++ EH aware // frame is found. -// Note: make sure __xlc_excpetion_handle() is a non-leaf function. Currently +// Note: make sure __xlc_exception_handle() is a non-leaf function. Currently // it calls skip_non_cxx_eh_aware_frames(), which in turn calls abort(). _LIBCXXABI_FUNC_VIS uintptr_t __xlc_exception_handle() { // Get the SP of this function, i.e., __xlc_exception_handle(). diff --git a/lib/libcxxabi/src/cxa_exception.cpp b/lib/libcxxabi/src/cxa_exception.cpp index 65e9f4504dda..92901a83bfd0 100644 --- a/lib/libcxxabi/src/cxa_exception.cpp +++ b/lib/libcxxabi/src/cxa_exception.cpp @@ -207,7 +207,12 @@ void __cxa_free_exception(void *thrown_object) throw() { } __cxa_exception* __cxa_init_primary_exception(void* object, std::type_info* tinfo, +#ifdef __wasm__ +// In Wasm, a destructor returns its argument + void *(_LIBCXXABI_DTOR_FUNC* dest)(void*)) throw() { +#else void(_LIBCXXABI_DTOR_FUNC* dest)(void*)) throw() { +#endif __cxa_exception* exception_header = cxa_exception_from_thrown_object(object); exception_header->referenceCount = 0; exception_header->unexpectedHandler = std::get_unexpected(); @@ -267,7 +272,7 @@ will call terminate, assuming that there was no handler for the exception. */ void -#ifdef __USING_WASM_EXCEPTIONS__ +#ifdef __wasm__ // In Wasm, a destructor returns its argument __cxa_throw(void *thrown_object, std::type_info *tinfo, void *(_LIBCXXABI_DTOR_FUNC *dest)(void *)) { #else @@ -584,6 +589,11 @@ void __cxa_end_catch() { } } +void __cxa_call_terminate(void* unwind_arg) throw() { + __cxa_begin_catch(unwind_arg); + std::terminate(); +} + // Note: exception_header may be masquerading as a __cxa_dependent_exception // and that's ok. exceptionType is there too. // However watch out for foreign exceptions. Return null for them. diff --git a/lib/libcxxabi/src/cxa_exception.h b/lib/libcxxabi/src/cxa_exception.h index 10712f6f47bb..aba08f299210 100644 --- a/lib/libcxxabi/src/cxa_exception.h +++ b/lib/libcxxabi/src/cxa_exception.h @@ -43,7 +43,7 @@ struct _LIBCXXABI_HIDDEN __cxa_exception { // Manage the exception object itself. std::type_info *exceptionType; -#ifdef __USING_WASM_EXCEPTIONS__ +#ifdef __wasm__ // In Wasm, a destructor returns its argument void *(_LIBCXXABI_DTOR_FUNC *exceptionDestructor)(void *); #else diff --git a/lib/libcxxabi/src/cxa_exception_storage.cpp b/lib/libcxxabi/src/cxa_exception_storage.cpp index 3a3233a1b927..c842da195acc 100644 --- a/lib/libcxxabi/src/cxa_exception_storage.cpp +++ b/lib/libcxxabi/src/cxa_exception_storage.cpp @@ -12,7 +12,7 @@ #include "cxa_exception.h" -#include <__threading_support> +#include <__thread/support.h> #if defined(_LIBCXXABI_HAS_NO_THREADS) @@ -24,7 +24,7 @@ extern "C" { } // extern "C" } // namespace __cxxabiv1 -#elif defined(HAS_THREAD_LOCAL) +#elif __has_feature(cxx_thread_local) namespace __cxxabiv1 { namespace { diff --git a/lib/libcxxabi/src/cxa_guard_impl.h b/lib/libcxxabi/src/cxa_guard_impl.h index 90d589be4d77..320501cb8593 100644 --- a/lib/libcxxabi/src/cxa_guard_impl.h +++ b/lib/libcxxabi/src/cxa_guard_impl.h @@ -58,7 +58,7 @@ # endif #endif -#include <__threading_support> +#include <__thread/support.h> #include #include #include diff --git a/lib/libcxxabi/src/cxa_personality.cpp b/lib/libcxxabi/src/cxa_personality.cpp index 4b6c4edbc266..843a18a4cbd8 100644 --- a/lib/libcxxabi/src/cxa_personality.cpp +++ b/lib/libcxxabi/src/cxa_personality.cpp @@ -70,7 +70,7 @@ extern "C" EXCEPTION_DISPOSITION _GCC_specific_handler(PEXCEPTION_RECORD, +------------------+--+-----+-----+------------------------+--------------------------+ | callSiteTableLength | (ULEB128) | Call Site Table length, used to find Action table | +---------------------+-----------+---------------------------------------------------+ -#if !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__USING_WASM_EXCEPTIONS__) +#if !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__WASM_EXCEPTIONS__) +---------------------+-----------+------------------------------------------------+ | Beginning of Call Site Table The current ip lies within the | | ... (start, length) range of one of these | @@ -84,7 +84,7 @@ extern "C" EXCEPTION_DISPOSITION _GCC_specific_handler(PEXCEPTION_RECORD, | +-------------+---------------------------------+------------------------------+ | | ... | +----------------------------------------------------------------------------------+ -#else // __USING_SJLJ_EXCEPTIONS__ || __USING_WASM_EXCEPTIONS__ +#else // __USING_SJLJ_EXCEPTIONS__ || __WASM_EXCEPTIONS__ +---------------------+-----------+------------------------------------------------+ | Beginning of Call Site Table The current ip is a 1-based index into | | ... this table. Or it is -1 meaning no | @@ -97,7 +97,7 @@ extern "C" EXCEPTION_DISPOSITION _GCC_specific_handler(PEXCEPTION_RECORD, | +-------------+---------------------------------+------------------------------+ | | ... | +----------------------------------------------------------------------------------+ -#endif // __USING_SJLJ_EXCEPTIONS__ || __USING_WASM_EXCEPTIONS__ +#endif // __USING_SJLJ_EXCEPTIONS__ || __WASM_EXCEPTIONS__ +---------------------------------------------------------------------+ | Beginning of Action Table ttypeIndex == 0 : cleanup | | ... ttypeIndex > 0 : catch | @@ -547,7 +547,7 @@ void set_registers(_Unwind_Exception* unwind_exception, _Unwind_Context* context, const scan_results& results) { -#if defined(__USING_SJLJ_EXCEPTIONS__) || defined(__USING_WASM_EXCEPTIONS__) +#if defined(__USING_SJLJ_EXCEPTIONS__) || defined(__WASM_EXCEPTIONS__) #define __builtin_eh_return_data_regno(regno) regno #elif defined(__ibmxl__) // IBM xlclang++ compiler does not support __builtin_eh_return_data_regno. @@ -642,7 +642,7 @@ static void scan_eh_tab(scan_results &results, _Unwind_Action actions, // Get beginning current frame's code (as defined by the // emitted dwarf code) uintptr_t funcStart = _Unwind_GetRegionStart(context); -#if defined(__USING_SJLJ_EXCEPTIONS__) || defined(__USING_WASM_EXCEPTIONS__) +#if defined(__USING_SJLJ_EXCEPTIONS__) || defined(__WASM_EXCEPTIONS__) if (ip == uintptr_t(-1)) { // no action @@ -652,9 +652,9 @@ static void scan_eh_tab(scan_results &results, _Unwind_Action actions, else if (ip == 0) call_terminate(native_exception, unwind_exception); // ip is 1-based index into call site table -#else // !__USING_SJLJ_EXCEPTIONS__ && !__USING_WASM_EXCEPTIONS__ +#else // !__USING_SJLJ_EXCEPTIONS__ && !__WASM_EXCEPTIONS__ uintptr_t ipOffset = ip - funcStart; -#endif // !__USING_SJLJ_EXCEPTIONS__ && !__USING_WASM_EXCEPTIONS__ +#endif // !__USING_SJLJ_EXCEPTIONS__ && !__WASM_EXCEPTIONS__ const uint8_t* classInfo = NULL; // Note: See JITDwarfEmitter::EmitExceptionTable(...) for corresponding // dwarf emission @@ -675,7 +675,7 @@ static void scan_eh_tab(scan_results &results, _Unwind_Action actions, // Walk call-site table looking for range that // includes current PC. uint8_t callSiteEncoding = *lsda++; -#if defined(__USING_SJLJ_EXCEPTIONS__) || defined(__USING_WASM_EXCEPTIONS__) +#if defined(__USING_SJLJ_EXCEPTIONS__) || defined(__WASM_EXCEPTIONS__) (void)callSiteEncoding; // When using SjLj/Wasm exceptions, callSiteEncoding is never used #endif uint32_t callSiteTableLength = static_cast(readULEB128(&lsda)); @@ -686,7 +686,7 @@ static void scan_eh_tab(scan_results &results, _Unwind_Action actions, while (callSitePtr < callSiteTableEnd) { // There is one entry per call site. -#if !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__USING_WASM_EXCEPTIONS__) +#if !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__WASM_EXCEPTIONS__) // The call sites are non-overlapping in [start, start+length) // The call sites are ordered in increasing value of start uintptr_t start = readEncodedPointer(&callSitePtr, callSiteEncoding); @@ -694,15 +694,15 @@ static void scan_eh_tab(scan_results &results, _Unwind_Action actions, uintptr_t landingPad = readEncodedPointer(&callSitePtr, callSiteEncoding); uintptr_t actionEntry = readULEB128(&callSitePtr); if ((start <= ipOffset) && (ipOffset < (start + length))) -#else // __USING_SJLJ_EXCEPTIONS__ || __USING_WASM_EXCEPTIONS__ +#else // __USING_SJLJ_EXCEPTIONS__ || __WASM_EXCEPTIONS__ // ip is 1-based index into this table uintptr_t landingPad = readULEB128(&callSitePtr); uintptr_t actionEntry = readULEB128(&callSitePtr); if (--ip == 0) -#endif // __USING_SJLJ_EXCEPTIONS__ || __USING_WASM_EXCEPTIONS__ +#endif // __USING_SJLJ_EXCEPTIONS__ || __WASM_EXCEPTIONS__ { // Found the call site containing ip. -#if !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__USING_WASM_EXCEPTIONS__) +#if !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__WASM_EXCEPTIONS__) if (landingPad == 0) { // No handler here @@ -710,16 +710,14 @@ static void scan_eh_tab(scan_results &results, _Unwind_Action actions, return; } landingPad = (uintptr_t)lpStart + landingPad; -#else // __USING_SJLJ_EXCEPTIONS__ || __USING_WASM_EXCEPTIONS__ +#else // __USING_SJLJ_EXCEPTIONS__ || __WASM_EXCEPTIONS__ ++landingPad; -#endif // __USING_SJLJ_EXCEPTIONS__ || __USING_WASM_EXCEPTIONS__ +#endif // __USING_SJLJ_EXCEPTIONS__ || __WASM_EXCEPTIONS__ results.landingPad = landingPad; if (actionEntry == 0) { // Found a cleanup - results.reason = actions & _UA_SEARCH_PHASE - ? _URC_CONTINUE_UNWIND - : _URC_HANDLER_FOUND; + results.reason = (actions & _UA_SEARCH_PHASE) ? _URC_CONTINUE_UNWIND : _URC_HANDLER_FOUND; return; } // Convert 1-based byte offset into @@ -840,7 +838,7 @@ static void scan_eh_tab(scan_results &results, _Unwind_Action actions, action += actionOffset; } // there is no break out of this loop, only return } -#if !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__USING_WASM_EXCEPTIONS__) +#if !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__WASM_EXCEPTIONS__) else if (ipOffset < start) { // There is no call site for this ip @@ -848,7 +846,7 @@ static void scan_eh_tab(scan_results &results, _Unwind_Action actions, // Possible stack corruption. call_terminate(native_exception, unwind_exception); } -#endif // !__USING_SJLJ_EXCEPTIONS__ && !__USING_WASM_EXCEPTIONS__ +#endif // !__USING_SJLJ_EXCEPTIONS__ && !__WASM_EXCEPTIONS__ } // there might be some tricky cases which break out of this loop // It is possible that no eh table entry specify how to handle @@ -905,7 +903,7 @@ _UA_CLEANUP_PHASE */ #if !defined(_LIBCXXABI_ARM_EHABI) -#ifdef __USING_WASM_EXCEPTIONS__ +#ifdef __WASM_EXCEPTIONS__ _Unwind_Reason_Code __gxx_personality_wasm0 #elif defined(__SEH__) && !defined(__USING_SJLJ_EXCEPTIONS__) static _Unwind_Reason_Code __gxx_personality_imp @@ -974,7 +972,7 @@ __gxx_personality_v0 exc->languageSpecificData = results.languageSpecificData; exc->catchTemp = reinterpret_cast(results.landingPad); exc->adjustedPtr = results.adjustedPtr; -#ifdef __USING_WASM_EXCEPTIONS__ +#ifdef __WASM_EXCEPTIONS__ // Wasm only uses a single phase (_UA_SEARCH_PHASE), so save the // results here. set_registers(unwind_exception, context, results); diff --git a/lib/libcxxabi/src/cxa_thread_atexit.cpp b/lib/libcxxabi/src/cxa_thread_atexit.cpp index 665f9e55694a..c6bd0aa323f2 100644 --- a/lib/libcxxabi/src/cxa_thread_atexit.cpp +++ b/lib/libcxxabi/src/cxa_thread_atexit.cpp @@ -8,7 +8,7 @@ #include "abort_message.h" #include "cxxabi.h" -#include <__threading_support> +#include <__thread/support.h> #ifndef _LIBCXXABI_HAS_NO_THREADS #if defined(__ELF__) && defined(_LIBCXXABI_LINK_PTHREAD_LIB) #pragma comment(lib, "pthread") diff --git a/lib/libcxxabi/src/demangle/ItaniumDemangle.h b/lib/libcxxabi/src/demangle/ItaniumDemangle.h index 5a53a18bcc5f..36bf45463636 100644 --- a/lib/libcxxabi/src/demangle/ItaniumDemangle.h +++ b/lib/libcxxabi/src/demangle/ItaniumDemangle.h @@ -39,13 +39,12 @@ DEMANGLE_NAMESPACE_BEGIN template class PODSmallVector { - static_assert(std::is_pod::value, - "T is required to be a plain old data type"); - + static_assert(std::is_trivial::value, + "T is required to be a trivial type"); T *First = nullptr; T *Last = nullptr; T *Cap = nullptr; - T Inline[N] = {0}; + T Inline[N] = {}; bool isInline() const { return First == Inline; } @@ -5542,7 +5541,7 @@ Node *AbstractManglingParser::parseFloatingLiteral() { return nullptr; std::string_view Data(First, N); for (char C : Data) - if (!std::isxdigit(C)) + if (!(C >= '0' && C <= '9') && !(C >= 'a' && C <= 'f')) return nullptr; First += N; if (!consumeIf('E')) @@ -5716,6 +5715,7 @@ Node *AbstractManglingParser::parseTemplateParam() { } // ::= Ty # type parameter +// ::= Tk [] # constrained type parameter // ::= Tn # non-type parameter // ::= Tt * E # template parameter // ::= Tp # parameter pack @@ -5847,7 +5847,7 @@ Node *AbstractManglingParser::parseTemplateArg() { } } -// ::= I * E +// ::= I * [Q ] E // extension, the abi says + template Node * diff --git a/lib/libcxxabi/src/fallback_malloc.cpp b/lib/libcxxabi/src/fallback_malloc.cpp index fa802b2d81a7..76bd2e9bcd9f 100644 --- a/lib/libcxxabi/src/fallback_malloc.cpp +++ b/lib/libcxxabi/src/fallback_malloc.cpp @@ -9,7 +9,7 @@ #include "fallback_malloc.h" #include "abort_message.h" -#include <__threading_support> +#include <__thread/support.h> #ifndef _LIBCXXABI_HAS_NO_THREADS #if defined(__ELF__) && defined(_LIBCXXABI_LINK_PTHREAD_LIB) #pragma comment(lib, "pthread") diff --git a/lib/libcxxabi/src/private_typeinfo.cpp b/lib/libcxxabi/src/private_typeinfo.cpp index 857ae25b7028..9dba91e1985e 100644 --- a/lib/libcxxabi/src/private_typeinfo.cpp +++ b/lib/libcxxabi/src/private_typeinfo.cpp @@ -44,13 +44,25 @@ #include #include #include +#include "abort_message.h" #ifdef _LIBCXXABI_FORGIVING_DYNAMIC_CAST -#include "abort_message.h" #include #include #endif +#if __has_feature(ptrauth_calls) +#include +#endif + +template +static inline T* strip_vtable(T* vtable) { +#if __has_feature(ptrauth_calls) + vtable = ptrauth_strip(vtable, ptrauth_key_cxx_vtable_pointer); +#endif + return vtable; +} + static inline bool is_equal(const std::type_info* x, const std::type_info* y, bool use_strcmp) @@ -102,10 +114,10 @@ void dyn_cast_get_derived_info(derived_object_info* info, const void* static_ptr reinterpret_cast(vtable) + offset_to_ti_proxy; info->dynamic_type = *(reinterpret_cast(ptr_to_ti_proxy)); #else - void **vtable = *static_cast(static_ptr); - info->offset_to_derived = reinterpret_cast(vtable[-2]); - info->dynamic_ptr = static_cast(static_ptr) + info->offset_to_derived; - info->dynamic_type = static_cast(vtable[-1]); + void** vtable = strip_vtable(*static_cast(static_ptr)); + info->offset_to_derived = reinterpret_cast(vtable[-2]); + info->dynamic_ptr = static_cast(static_ptr) + info->offset_to_derived; + info->dynamic_type = static_cast(vtable[-1]); #endif } @@ -470,7 +482,7 @@ __class_type_info::can_catch(const __shim_type_info* thrown_type, if (thrown_class_type == 0) return false; // bullet 2 - assert(adjustedPtr && "catching a class without an object?"); + _LIBCXXABI_ASSERT(adjustedPtr, "catching a class without an object?"); __dynamic_cast_info info = {thrown_class_type, 0, this, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, true, nullptr}; info.number_of_dst_type = 1; thrown_class_type->has_unambiguous_public_base(&info, adjustedPtr, public_path); @@ -560,7 +572,7 @@ __base_class_type_info::has_unambiguous_public_base(__dynamic_cast_info* info, find the layout. */ offset_to_base = __offset_flags >> __offset_shift; if (is_virtual) { - const char* vtable = *static_cast(adjustedPtr); + const char* vtable = strip_vtable(*static_cast(adjustedPtr)); offset_to_base = update_offset_to_base(vtable, offset_to_base); } } else if (!is_virtual) { @@ -1500,8 +1512,8 @@ __base_class_type_info::search_above_dst(__dynamic_cast_info* info, ptrdiff_t offset_to_base = __offset_flags >> __offset_shift; if (__offset_flags & __virtual_mask) { - const char* vtable = *static_cast(current_ptr); - offset_to_base = update_offset_to_base(vtable, offset_to_base); + const char* vtable = strip_vtable(*static_cast(current_ptr)); + offset_to_base = update_offset_to_base(vtable, offset_to_base); } __base_type->search_above_dst(info, dst_ptr, static_cast(current_ptr) + offset_to_base, @@ -1520,8 +1532,8 @@ __base_class_type_info::search_below_dst(__dynamic_cast_info* info, ptrdiff_t offset_to_base = __offset_flags >> __offset_shift; if (__offset_flags & __virtual_mask) { - const char* vtable = *static_cast(current_ptr); - offset_to_base = update_offset_to_base(vtable, offset_to_base); + const char* vtable = strip_vtable(*static_cast(current_ptr)); + offset_to_base = update_offset_to_base(vtable, offset_to_base); } __base_type->search_below_dst(info, static_cast(current_ptr) + offset_to_base, From 1c8f0b8909f342201f84def1efb8c6fb4386cc2c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= Date: Fri, 23 Aug 2024 02:59:46 +0200 Subject: [PATCH 168/202] libcxx: Update to LLVM 19. * Moved the tz.cpp patch to experimental/tzdb.cpp. * Extended the __config_site patch to a few more files. --- .../include/__algorithm/adjacent_find.h | 6 +- lib/libcxx/include/__algorithm/all_of.h | 2 +- lib/libcxx/include/__algorithm/any_of.h | 2 +- .../include/__algorithm/binary_search.h | 4 +- lib/libcxx/include/__algorithm/clamp.h | 4 +- lib/libcxx/include/__algorithm/comp.h | 8 +- .../include/__algorithm/comp_ref_type.h | 6 +- lib/libcxx/include/__algorithm/copy.h | 6 +- .../include/__algorithm/copy_backward.h | 8 +- .../include/__algorithm/copy_move_common.h | 42 +- lib/libcxx/include/__algorithm/count.h | 2 +- lib/libcxx/include/__algorithm/count_if.h | 6 +- lib/libcxx/include/__algorithm/equal.h | 79 +- lib/libcxx/include/__algorithm/equal_range.h | 6 +- lib/libcxx/include/__algorithm/fill_n.h | 58 + lib/libcxx/include/__algorithm/find.h | 22 +- lib/libcxx/include/__algorithm/find_end.h | 4 +- .../include/__algorithm/find_first_of.h | 4 +- lib/libcxx/include/__algorithm/find_if.h | 2 +- lib/libcxx/include/__algorithm/find_if_not.h | 2 +- lib/libcxx/include/__algorithm/fold.h | 10 +- lib/libcxx/include/__algorithm/includes.h | 4 +- .../include/__algorithm/inplace_merge.h | 4 +- lib/libcxx/include/__algorithm/is_heap.h | 4 +- .../include/__algorithm/is_heap_until.h | 4 +- .../include/__algorithm/is_partitioned.h | 2 +- .../include/__algorithm/is_permutation.h | 10 +- lib/libcxx/include/__algorithm/is_sorted.h | 4 +- .../include/__algorithm/is_sorted_until.h | 4 +- .../include/__algorithm/iterator_operations.h | 54 + .../__algorithm/lexicographical_compare.h | 4 +- .../lexicographical_compare_three_way.h | 6 +- lib/libcxx/include/__algorithm/lower_bound.h | 58 +- .../include/__algorithm/make_projected.h | 8 +- lib/libcxx/include/__algorithm/max.h | 8 +- lib/libcxx/include/__algorithm/max_element.h | 4 +- lib/libcxx/include/__algorithm/min.h | 8 +- lib/libcxx/include/__algorithm/min_element.h | 4 +- lib/libcxx/include/__algorithm/minmax.h | 8 +- .../include/__algorithm/minmax_element.h | 7 +- lib/libcxx/include/__algorithm/mismatch.h | 181 +- lib/libcxx/include/__algorithm/move.h | 8 +- .../include/__algorithm/move_backward.h | 8 +- lib/libcxx/include/__algorithm/none_of.h | 2 +- lib/libcxx/include/__algorithm/partial_sort.h | 4 +- lib/libcxx/include/__algorithm/pop_heap.h | 4 +- lib/libcxx/include/__algorithm/pstl.h | 663 +++++++ .../__algorithm/pstl_any_all_none_of.h | 152 -- lib/libcxx/include/__algorithm/pstl_backend.h | 232 --- .../__algorithm/pstl_backends/cpu_backend.h | 68 - .../pstl_backends/cpu_backends/backend.h | 41 - .../pstl_backends/cpu_backends/fill.h | 62 - .../pstl_backends/cpu_backends/for_each.h | 62 - .../pstl_backends/cpu_backends/libdispatch.h | 347 ---- .../pstl_backends/cpu_backends/merge.h | 85 - .../pstl_backends/cpu_backends/serial.h | 83 - .../pstl_backends/cpu_backends/stable_sort.h | 45 - .../pstl_backends/cpu_backends/thread.h | 84 - .../pstl_backends/cpu_backends/transform.h | 138 -- .../cpu_backends/transform_reduce.h | 202 -- lib/libcxx/include/__algorithm/pstl_copy.h | 121 -- lib/libcxx/include/__algorithm/pstl_count.h | 121 -- lib/libcxx/include/__algorithm/pstl_equal.h | 175 -- lib/libcxx/include/__algorithm/pstl_fill.h | 116 -- lib/libcxx/include/__algorithm/pstl_find.h | 141 -- .../include/__algorithm/pstl_for_each.h | 108 -- .../__algorithm/pstl_frontend_dispatch.h | 44 - .../include/__algorithm/pstl_generate.h | 114 -- .../include/__algorithm/pstl_is_partitioned.h | 77 - lib/libcxx/include/__algorithm/pstl_merge.h | 92 - lib/libcxx/include/__algorithm/pstl_move.h | 84 - lib/libcxx/include/__algorithm/pstl_replace.h | 247 --- .../include/__algorithm/pstl_rotate_copy.h | 85 - lib/libcxx/include/__algorithm/pstl_sort.h | 82 - .../include/__algorithm/pstl_stable_sort.h | 61 - .../include/__algorithm/pstl_transform.h | 120 -- lib/libcxx/include/__algorithm/push_heap.h | 4 +- .../__algorithm/ranges_adjacent_find.h | 4 +- .../include/__algorithm/ranges_all_of.h | 4 +- .../include/__algorithm/ranges_any_of.h | 4 +- .../__algorithm/ranges_binary_search.h | 4 +- lib/libcxx/include/__algorithm/ranges_clamp.h | 2 +- .../include/__algorithm/ranges_contains.h | 4 +- .../__algorithm/ranges_contains_subrange.h | 97 + lib/libcxx/include/__algorithm/ranges_count.h | 4 +- .../include/__algorithm/ranges_count_if.h | 4 +- .../include/__algorithm/ranges_ends_with.h | 10 +- lib/libcxx/include/__algorithm/ranges_equal.h | 4 +- .../include/__algorithm/ranges_equal_range.h | 4 +- lib/libcxx/include/__algorithm/ranges_find.h | 8 +- .../include/__algorithm/ranges_find_end.h | 4 +- .../__algorithm/ranges_find_first_of.h | 4 +- .../include/__algorithm/ranges_find_if.h | 4 +- .../include/__algorithm/ranges_find_if_not.h | 4 +- .../include/__algorithm/ranges_find_last.h | 175 ++ .../include/__algorithm/ranges_includes.h | 4 +- .../include/__algorithm/ranges_is_heap.h | 4 +- .../__algorithm/ranges_is_heap_until.h | 4 +- .../__algorithm/ranges_is_partitioned.h | 4 +- .../__algorithm/ranges_is_permutation.h | 4 +- .../include/__algorithm/ranges_is_sorted.h | 4 +- .../__algorithm/ranges_is_sorted_until.h | 4 +- .../ranges_lexicographical_compare.h | 4 +- .../include/__algorithm/ranges_lower_bound.h | 4 +- lib/libcxx/include/__algorithm/ranges_max.h | 8 +- .../include/__algorithm/ranges_max_element.h | 4 +- lib/libcxx/include/__algorithm/ranges_min.h | 8 +- .../include/__algorithm/ranges_min_element.h | 4 +- .../include/__algorithm/ranges_minmax.h | 23 +- .../__algorithm/ranges_minmax_element.h | 4 +- .../include/__algorithm/ranges_mismatch.h | 24 +- .../include/__algorithm/ranges_none_of.h | 4 +- .../include/__algorithm/ranges_remove.h | 4 +- .../include/__algorithm/ranges_remove_if.h | 4 +- .../include/__algorithm/ranges_search.h | 4 +- .../include/__algorithm/ranges_search_n.h | 4 +- .../include/__algorithm/ranges_starts_with.h | 8 +- .../include/__algorithm/ranges_unique.h | 4 +- .../include/__algorithm/ranges_upper_bound.h | 4 +- lib/libcxx/include/__algorithm/remove.h | 2 +- lib/libcxx/include/__algorithm/remove_if.h | 2 +- lib/libcxx/include/__algorithm/rotate.h | 2 +- lib/libcxx/include/__algorithm/search.h | 51 +- lib/libcxx/include/__algorithm/search_n.h | 43 +- .../include/__algorithm/set_intersection.h | 122 +- lib/libcxx/include/__algorithm/simd_utils.h | 164 ++ lib/libcxx/include/__algorithm/sort.h | 5 +- lib/libcxx/include/__algorithm/sort_heap.h | 4 +- lib/libcxx/include/__algorithm/stable_sort.h | 2 +- lib/libcxx/include/__algorithm/unique.h | 6 +- lib/libcxx/include/__algorithm/unwrap_iter.h | 2 +- lib/libcxx/include/__algorithm/upper_bound.h | 6 +- lib/libcxx/include/__assert | 81 + lib/libcxx/include/__assertion_handler | 13 +- lib/libcxx/include/__atomic/aliases.h | 3 +- lib/libcxx/include/__atomic/atomic.h | 12 +- lib/libcxx/include/__atomic/atomic_base.h | 52 +- lib/libcxx/include/__atomic/atomic_flag.h | 67 +- lib/libcxx/include/__atomic/atomic_init.h | 8 +- lib/libcxx/include/__atomic/atomic_ref.h | 378 ++++ lib/libcxx/include/__atomic/atomic_sync.h | 177 +- .../include/__atomic/check_memory_order.h | 4 + lib/libcxx/include/__atomic/cxx_atomic_impl.h | 318 +--- lib/libcxx/include/__atomic/memory_order.h | 2 +- lib/libcxx/include/__atomic/to_gcc_order.h | 54 + lib/libcxx/include/__availability | 324 ---- lib/libcxx/include/__bit/bit_cast.h | 11 +- lib/libcxx/include/__bit/bit_ceil.h | 4 +- lib/libcxx/include/__bit/bit_floor.h | 2 +- lib/libcxx/include/__bit/bit_width.h | 2 +- lib/libcxx/include/__bit/byteswap.h | 2 +- lib/libcxx/include/__bit/countl.h | 15 +- lib/libcxx/include/__bit/countr.h | 23 +- lib/libcxx/include/__bit/has_single_bit.h | 2 +- lib/libcxx/include/__bit/popcount.h | 9 +- lib/libcxx/include/__bit/rotate.h | 37 +- lib/libcxx/include/__bit_reference | 92 +- lib/libcxx/include/__charconv/chars_format.h | 9 +- .../include/__charconv/from_chars_integral.h | 1 + .../include/__charconv/to_chars_base_10.h | 1 + .../__charconv/to_chars_floating_point.h | 1 - .../include/__charconv/to_chars_integral.h | 1 + lib/libcxx/include/__charconv/traits.h | 1 + lib/libcxx/include/__chrono/convert_to_tm.h | 16 + lib/libcxx/include/__chrono/duration.h | 20 +- lib/libcxx/include/__chrono/exception.h | 135 ++ lib/libcxx/include/__chrono/file_clock.h | 1 - lib/libcxx/include/__chrono/formatter.h | 221 ++- lib/libcxx/include/__chrono/leap_second.h | 126 ++ lib/libcxx/include/__chrono/local_info.h | 50 + lib/libcxx/include/__chrono/ostream.h | 53 +- lib/libcxx/include/__chrono/sys_info.h | 51 + lib/libcxx/include/__chrono/time_point.h | 31 +- lib/libcxx/include/__chrono/time_zone.h | 182 ++ lib/libcxx/include/__chrono/time_zone_link.h | 79 + lib/libcxx/include/__chrono/tzdb.h | 55 +- lib/libcxx/include/__chrono/tzdb_list.h | 59 +- lib/libcxx/include/__chrono/weekday.h | 3 + lib/libcxx/include/__chrono/year_month_day.h | 32 +- lib/libcxx/include/__chrono/zoned_time.h | 227 +++ lib/libcxx/include/__compare/partial_order.h | 2 + lib/libcxx/include/__compare/strong_order.h | 25 +- .../include/__compare/synth_three_way.h | 9 +- lib/libcxx/include/__compare/weak_order.h | 14 +- lib/libcxx/include/__concepts/class_or_enum.h | 5 - lib/libcxx/include/__concepts/swappable.h | 4 +- .../__condition_variable/condition_variable.h | 3 +- lib/libcxx/include/__config | 571 ++---- lib/libcxx/include/__configuration/abi.h | 172 ++ .../include/__configuration/availability.h | 400 ++++ lib/libcxx/include/__configuration/compiler.h | 51 + lib/libcxx/include/__configuration/language.h | 46 + lib/libcxx/include/__configuration/platform.h | 54 + lib/libcxx/include/__debug_utils/sanitizers.h | 104 ++ .../include/__exception/exception_ptr.h | 34 +- .../include/__exception/nested_exception.h | 12 +- lib/libcxx/include/__exception/operations.h | 1 - .../include/__expected/bad_expected_access.h | 27 +- lib/libcxx/include/__expected/expected.h | 24 +- .../include/__filesystem/copy_options.h | 1 - .../include/__filesystem/directory_entry.h | 1 - .../include/__filesystem/directory_iterator.h | 1 - .../include/__filesystem/directory_options.h | 1 - lib/libcxx/include/__filesystem/file_status.h | 1 - .../include/__filesystem/file_time_type.h | 1 - lib/libcxx/include/__filesystem/file_type.h | 1 - .../include/__filesystem/filesystem_error.h | 1 - lib/libcxx/include/__filesystem/operations.h | 1 - lib/libcxx/include/__filesystem/path.h | 6 +- .../include/__filesystem/path_iterator.h | 1 - .../include/__filesystem/perm_options.h | 1 - lib/libcxx/include/__filesystem/perms.h | 1 - .../recursive_directory_iterator.h | 1 - lib/libcxx/include/__filesystem/space_info.h | 1 - lib/libcxx/include/__filesystem/u8path.h | 1 - lib/libcxx/include/__format/concepts.h | 6 +- .../include/__format/container_adaptor.h | 4 +- .../include/__format/escaped_output_table.h | 1649 ++++++++--------- .../extended_grapheme_cluster_table.h | 2 +- lib/libcxx/include/__format/format_arg.h | 112 +- .../include/__format/format_arg_store.h | 3 +- lib/libcxx/include/__format/format_args.h | 5 +- lib/libcxx/include/__format/format_context.h | 41 +- .../include/__format/format_functions.h | 36 +- lib/libcxx/include/__format/formatter.h | 3 +- lib/libcxx/include/__format/formatter_bool.h | 3 +- lib/libcxx/include/__format/formatter_char.h | 1 - .../__format/formatter_floating_point.h | 3 +- .../include/__format/formatter_integer.h | 1 - .../include/__format/formatter_integral.h | 2 +- .../include/__format/formatter_output.h | 20 +- .../include/__format/formatter_pointer.h | 1 - .../include/__format/formatter_string.h | 1 - .../__format/indic_conjunct_break_table.h | 350 ++++ .../include/__format/parser_std_format_spec.h | 29 +- lib/libcxx/include/__format/unicode.h | 313 +++- .../include/__format/width_estimation_table.h | 9 +- lib/libcxx/include/__format/write_escaped.h | 28 +- lib/libcxx/include/__functional/bind.h | 13 +- lib/libcxx/include/__functional/bind_back.h | 16 +- lib/libcxx/include/__functional/bind_front.h | 6 +- lib/libcxx/include/__functional/function.h | 38 +- lib/libcxx/include/__functional/hash.h | 13 +- lib/libcxx/include/__functional/identity.h | 4 +- .../include/__functional/is_transparent.h | 5 +- lib/libcxx/include/__functional/mem_fn.h | 4 +- lib/libcxx/include/__functional/mem_fun_ref.h | 4 +- lib/libcxx/include/__functional/not_fn.h | 1 - lib/libcxx/include/__functional/operations.h | 90 +- .../__functional/pointer_to_binary_function.h | 4 +- .../__functional/pointer_to_unary_function.h | 4 +- .../include/__functional/ranges_operations.h | 8 +- .../include/__functional/reference_wrapper.h | 60 +- .../include/__functional/unary_negate.h | 4 +- lib/libcxx/include/__fwd/array.h | 20 + lib/libcxx/include/__fwd/complex.h | 42 + lib/libcxx/include/__fwd/deque.h | 26 + .../{__format/format_fwd.h => __fwd/format.h} | 7 +- lib/libcxx/include/__fwd/functional.h | 28 + lib/libcxx/include/__fwd/get.h | 99 - lib/libcxx/include/__fwd/ios.h | 2 + lib/libcxx/include/__fwd/{hash.h => memory.h} | 10 +- lib/libcxx/include/__fwd/memory_resource.h | 1 - lib/libcxx/include/__fwd/pair.h | 20 + lib/libcxx/include/__fwd/queue.h | 31 + lib/libcxx/include/__fwd/sstream.h | 1 + lib/libcxx/include/__fwd/stack.h | 26 + lib/libcxx/include/__fwd/string.h | 5 +- lib/libcxx/include/__fwd/subrange.h | 15 +- lib/libcxx/include/__fwd/tuple.h | 23 + lib/libcxx/include/__fwd/vector.h | 26 + lib/libcxx/include/__hash_table | 80 +- lib/libcxx/include/__iterator/access.h | 4 +- lib/libcxx/include/__iterator/advance.h | 6 +- .../include/__iterator/aliasing_iterator.h | 127 ++ lib/libcxx/include/__iterator/bounded_iter.h | 101 +- .../include/__iterator/common_iterator.h | 2 +- lib/libcxx/include/__iterator/concepts.h | 15 +- .../include/__iterator/counted_iterator.h | 2 +- .../__iterator/cpp17_iterator_concepts.h | 44 +- lib/libcxx/include/__iterator/data.h | 4 +- lib/libcxx/include/__iterator/empty.h | 8 +- lib/libcxx/include/__iterator/iter_move.h | 2 +- lib/libcxx/include/__iterator/iter_swap.h | 2 +- .../include/__iterator/iterator_traits.h | 5 +- lib/libcxx/include/__iterator/move_iterator.h | 23 +- .../__iterator/ranges_iterator_traits.h | 4 +- .../include/__iterator/reverse_iterator.h | 223 +-- lib/libcxx/include/__iterator/size.h | 8 +- lib/libcxx/include/__iterator/wrap_iter.h | 31 +- lib/libcxx/include/__locale | 42 +- .../include/__locale_dir/locale_base_api.h | 98 + .../__locale_dir/locale_base_api/android.h | 50 + .../locale_base_api/fuchsia.h} | 18 +- .../locale_base_api/ibm.h} | 30 +- .../locale_base_api/locale_guard.h | 5 +- .../__locale_dir/locale_base_api/musl.h | 31 + .../__locale_dir/locale_base_api/newlib.h | 12 + .../locale_base_api/openbsd.h} | 22 +- .../locale_base_api/win32.h} | 10 +- lib/libcxx/include/__math/abs.h | 8 +- lib/libcxx/include/__math/copysign.h | 7 +- .../include/__math/exponential_functions.h | 2 +- lib/libcxx/include/__math/fdim.h | 2 +- lib/libcxx/include/__math/fma.h | 6 +- lib/libcxx/include/__math/hypot.h | 63 +- .../__math/inverse_trigonometric_functions.h | 2 +- lib/libcxx/include/__math/min_max.h | 20 +- lib/libcxx/include/__math/modulo.h | 2 +- lib/libcxx/include/__math/remainder.h | 4 +- lib/libcxx/include/__math/roots.h | 8 +- .../include/__math/rounding_functions.h | 50 +- lib/libcxx/include/__math/special_functions.h | 84 + lib/libcxx/include/__math/traits.h | 58 +- lib/libcxx/include/__mdspan/extents.h | 25 +- lib/libcxx/include/__mdspan/layout_left.h | 2 +- lib/libcxx/include/__mdspan/layout_right.h | 2 +- lib/libcxx/include/__mdspan/layout_stride.h | 2 +- lib/libcxx/include/__mdspan/mdspan.h | 12 +- .../include/__memory/allocate_at_least.h | 26 +- lib/libcxx/include/__memory/allocator.h | 48 +- .../include/__memory/allocator_traits.h | 85 +- lib/libcxx/include/__memory/compressed_pair.h | 18 +- lib/libcxx/include/__memory/construct_at.h | 2 +- lib/libcxx/include/__memory/inout_ptr.h | 109 ++ lib/libcxx/include/__memory/out_ptr.h | 101 + lib/libcxx/include/__memory/pointer_traits.h | 90 +- lib/libcxx/include/__memory/shared_ptr.h | 122 +- lib/libcxx/include/__memory/swap_allocator.h | 4 +- .../include/__memory/temporary_buffer.h | 3 +- .../__memory/uninitialized_algorithms.h | 99 +- lib/libcxx/include/__memory/unique_ptr.h | 55 +- .../__memory/uses_allocator_construction.h | 20 +- .../__memory_resource/memory_resource.h | 6 +- .../monotonic_buffer_resource.h | 1 - .../__memory_resource/polymorphic_allocator.h | 4 +- .../synchronized_pool_resource.h | 1 - .../unsynchronized_pool_resource.h | 1 - lib/libcxx/include/__mutex/lock_guard.h | 11 +- lib/libcxx/include/__mutex/mutex.h | 4 +- lib/libcxx/include/__mutex/tag_types.h | 10 +- lib/libcxx/include/__mutex/unique_lock.h | 20 +- lib/libcxx/include/__node_handle | 2 +- lib/libcxx/include/__numeric/gcd_lcm.h | 74 +- lib/libcxx/include/__numeric/midpoint.h | 18 +- lib/libcxx/include/__numeric/pstl.h | 174 ++ lib/libcxx/include/__numeric/pstl_reduce.h | 109 -- .../include/__numeric/pstl_transform_reduce.h | 156 -- .../include/__numeric/saturation_arithmetic.h | 42 +- lib/libcxx/include/__ostream/basic_ostream.h | 860 +++++++++ lib/libcxx/include/__ostream/print.h | 179 ++ lib/libcxx/include/__pstl/backend.h | 35 + lib/libcxx/include/__pstl/backend_fwd.h | 301 +++ lib/libcxx/include/__pstl/backends/default.h | 503 +++++ .../include/__pstl/backends/libdispatch.h | 397 ++++ lib/libcxx/include/__pstl/backends/serial.h | 181 ++ .../include/__pstl/backends/std_thread.h | 136 ++ .../cpu_algos}/any_of.h | 65 +- .../include/__pstl/cpu_algos/cpu_traits.h | 86 + lib/libcxx/include/__pstl/cpu_algos/fill.h | 66 + .../cpu_algos}/find_if.h | 82 +- .../include/__pstl/cpu_algos/for_each.h | 66 + lib/libcxx/include/__pstl/cpu_algos/merge.h | 85 + .../include/__pstl/cpu_algos/stable_sort.h | 47 + .../include/__pstl/cpu_algos/transform.h | 153 ++ .../__pstl/cpu_algos/transform_reduce.h | 216 +++ lib/libcxx/include/__pstl/dispatch.h | 66 + lib/libcxx/include/__pstl/handle_exception.h | 57 + .../include/__random/discard_block_engine.h | 4 +- .../__random/linear_congruential_engine.h | 103 +- .../__random/negative_binomial_distribution.h | 1 + lib/libcxx/include/__random/seed_seq.h | 8 +- lib/libcxx/include/__ranges/access.h | 15 +- lib/libcxx/include/__ranges/all.h | 5 +- lib/libcxx/include/__ranges/as_rvalue_view.h | 16 +- lib/libcxx/include/__ranges/chunk_by_view.h | 4 +- lib/libcxx/include/__ranges/common_view.h | 10 +- lib/libcxx/include/__ranges/counted.h | 5 +- lib/libcxx/include/__ranges/data.h | 7 +- lib/libcxx/include/__ranges/drop_view.h | 16 +- lib/libcxx/include/__ranges/elements_view.h | 2 +- lib/libcxx/include/__ranges/empty.h | 2 +- lib/libcxx/include/__ranges/iota_view.h | 9 +- lib/libcxx/include/__ranges/lazy_split_view.h | 4 +- lib/libcxx/include/__ranges/movable_box.h | 2 - lib/libcxx/include/__ranges/range_adaptor.h | 53 +- lib/libcxx/include/__ranges/rbegin.h | 5 +- lib/libcxx/include/__ranges/ref_view.h | 2 +- lib/libcxx/include/__ranges/rend.h | 10 +- lib/libcxx/include/__ranges/repeat_view.h | 17 +- lib/libcxx/include/__ranges/reverse_view.h | 5 +- lib/libcxx/include/__ranges/single_view.h | 2 + lib/libcxx/include/__ranges/size.h | 5 +- lib/libcxx/include/__ranges/split_view.h | 4 +- lib/libcxx/include/__ranges/subrange.h | 19 +- lib/libcxx/include/__ranges/take_view.h | 4 +- lib/libcxx/include/__ranges/to.h | 15 +- lib/libcxx/include/__ranges/transform_view.h | 7 - lib/libcxx/include/__ranges/view_interface.h | 17 +- lib/libcxx/include/__ranges/zip_view.h | 11 +- lib/libcxx/include/__split_buffer | 24 +- .../include/__stop_token/stop_callback.h | 10 +- lib/libcxx/include/__stop_token/stop_source.h | 1 - lib/libcxx/include/__stop_token/stop_state.h | 2 +- lib/libcxx/include/__stop_token/stop_token.h | 1 - lib/libcxx/include/__string/char_traits.h | 434 +---- .../include/__string/constexpr_c_functions.h | 27 +- .../include/__support/android/locale_bionic.h | 72 - lib/libcxx/include/__support/musl/xlocale.h | 53 - lib/libcxx/include/__support/newlib/xlocale.h | 22 - .../__support/xlocale/__nop_locale_mgmt.h | 16 +- .../__support/xlocale/__posix_l_fallback.h | 76 +- .../__support/xlocale/__strtonum_fallback.h | 34 +- lib/libcxx/include/__system_error/errc.h | 75 +- .../include/__system_error/error_category.h | 4 +- lib/libcxx/include/__thread/formatter.h | 2 +- lib/libcxx/include/__thread/id.h | 4 +- lib/libcxx/include/__thread/jthread.h | 3 +- .../include/__thread/poll_with_backoff.h | 17 +- lib/libcxx/include/__thread/support.h | 123 ++ lib/libcxx/include/__thread/support/c11.h | 191 ++ .../include/__thread/support/external.h | 21 + lib/libcxx/include/__thread/support/pthread.h | 221 +++ lib/libcxx/include/__thread/support/windows.h | 133 ++ lib/libcxx/include/__thread/this_thread.h | 2 +- lib/libcxx/include/__thread/thread.h | 13 +- .../include/__thread/timed_backoff_policy.h | 2 +- lib/libcxx/include/__threading_support | 456 ----- lib/libcxx/include/__tree | 45 +- lib/libcxx/include/__tuple/find_index.h | 62 + .../ignore.h} | 28 +- lib/libcxx/include/__tuple/make_tuple_types.h | 6 +- lib/libcxx/include/__tuple/sfinae_helpers.h | 43 +- lib/libcxx/include/__tuple/tuple_element.h | 35 +- lib/libcxx/include/__tuple/tuple_like.h | 27 +- .../include/__tuple/tuple_like_no_subrange.h | 61 + lib/libcxx/include/__tuple/tuple_size.h | 7 +- .../include/__type_traits/add_pointer.h | 2 - lib/libcxx/include/__type_traits/apply_cv.h | 78 - .../include/__type_traits/common_type.h | 6 +- .../include/__type_traits/conjunction.h | 7 + lib/libcxx/include/__type_traits/copy_cv.h | 31 +- lib/libcxx/include/__type_traits/datasizeof.h | 48 +- lib/libcxx/include/__type_traits/decay.h | 10 +- .../{operation_traits.h => desugars_to.h} | 10 +- .../has_unique_object_representation.h | 10 +- lib/libcxx/include/__type_traits/invoke.h | 203 +- lib/libcxx/include/__type_traits/is_array.h | 5 +- .../include/__type_traits/is_assignable.h | 21 + .../include/__type_traits/is_constructible.h | 28 + .../include/__type_traits/is_convertible.h | 6 - .../__type_traits/is_copy_assignable.h | 36 - .../__type_traits/is_copy_constructible.h | 35 - lib/libcxx/include/__type_traits/is_enum.h | 10 + .../__type_traits/is_equality_comparable.h | 3 +- .../include/__type_traits/is_function.h | 14 +- .../include/__type_traits/is_fundamental.h | 2 +- .../is_implicitly_default_constructible.h | 2 +- .../include/__type_traits/is_literal_type.h | 4 +- .../is_member_function_pointer.h | 62 - .../__type_traits/is_member_object_pointer.h | 46 - .../include/__type_traits/is_member_pointer.h | 20 +- .../__type_traits/is_move_assignable.h | 34 - .../__type_traits/is_move_constructible.h | 33 - .../__type_traits/is_nothrow_assignable.h | 24 + .../__type_traits/is_nothrow_constructible.h | 56 +- .../__type_traits/is_nothrow_convertible.h | 12 + .../is_nothrow_copy_assignable.h | 36 - .../is_nothrow_copy_constructible.h | 48 - .../is_nothrow_default_constructible.h | 32 - .../__type_traits/is_nothrow_destructible.h | 25 +- .../is_nothrow_move_assignable.h | 36 - .../is_nothrow_move_constructible.h | 45 - .../include/__type_traits/is_null_pointer.h | 12 +- lib/libcxx/include/__type_traits/is_object.h | 25 +- .../include/__type_traits/is_reference.h | 29 +- .../__type_traits/is_reference_wrapper.h | 4 +- lib/libcxx/include/__type_traits/is_scalar.h | 2 +- .../include/__type_traits/is_scoped_enum.h | 40 - .../include/__type_traits/is_swappable.h | 108 +- .../__type_traits/is_trivially_assignable.h | 25 + .../is_trivially_constructible.h | 29 + .../is_trivially_copy_assignable.h | 36 - .../is_trivially_copy_constructible.h | 33 - .../is_trivially_default_constructible.h | 32 - .../is_trivially_move_assignable.h | 36 - .../is_trivially_move_constructible.h | 33 - .../__type_traits/is_trivially_relocatable.h | 42 + lib/libcxx/include/__type_traits/is_void.h | 20 +- .../include/__type_traits/make_signed.h | 4 +- .../include/__type_traits/make_unsigned.h | 4 +- .../noexcept_move_assign_container.h | 2 +- lib/libcxx/include/__type_traits/promote.h | 42 +- .../include/__type_traits/remove_pointer.h | 5 + .../include/__type_traits/remove_reference.h | 14 +- lib/libcxx/include/__type_traits/unwrap_ref.h | 7 +- lib/libcxx/include/__utility/as_const.h | 2 +- .../include/__utility/exception_guard.h | 6 +- lib/libcxx/include/__utility/exchange.h | 2 +- lib/libcxx/include/__utility/forward.h | 4 +- lib/libcxx/include/__utility/forward_like.h | 4 +- .../include/__utility/integer_sequence.h | 87 +- .../include/__utility/is_pointer_in_range.h | 14 +- lib/libcxx/include/__utility/is_valid_range.h | 37 + lib/libcxx/include/__utility/move.h | 8 +- lib/libcxx/include/__utility/no_destroy.h | 29 +- lib/libcxx/include/__utility/pair.h | 310 ++-- .../private_constructor_tag.h} | 20 +- lib/libcxx/include/__utility/rel_ops.h | 8 +- lib/libcxx/include/__utility/small_buffer.h | 2 +- lib/libcxx/include/__utility/swap.h | 12 +- lib/libcxx/include/__utility/to_underlying.h | 2 +- lib/libcxx/include/__verbose_abort | 3 +- lib/libcxx/include/algorithm | 283 +-- lib/libcxx/include/any | 11 +- lib/libcxx/include/array | 43 +- lib/libcxx/include/atomic | 17 +- lib/libcxx/include/barrier | 65 +- lib/libcxx/include/bit | 37 +- lib/libcxx/include/bitset | 6 +- lib/libcxx/include/cassert | 1 - lib/libcxx/include/ccomplex | 1 - lib/libcxx/include/cctype | 1 - lib/libcxx/include/cerrno | 8 +- lib/libcxx/include/cfenv | 1 - lib/libcxx/include/cfloat | 1 - lib/libcxx/include/charconv | 33 +- lib/libcxx/include/chrono | 233 ++- lib/libcxx/include/cinttypes | 1 - lib/libcxx/include/ciso646 | 1 - lib/libcxx/include/climits | 1 - lib/libcxx/include/clocale | 1 - lib/libcxx/include/cmath | 190 +- lib/libcxx/include/codecvt | 5 +- lib/libcxx/include/compare | 38 +- lib/libcxx/include/complex | 380 ++-- lib/libcxx/include/concepts | 53 +- lib/libcxx/include/condition_variable | 2 - lib/libcxx/include/coroutine | 14 +- lib/libcxx/include/csetjmp | 1 - lib/libcxx/include/csignal | 1 - lib/libcxx/include/cstdarg | 1 - lib/libcxx/include/cstdbool | 1 - lib/libcxx/include/cstddef | 3 +- lib/libcxx/include/cstdint | 1 - lib/libcxx/include/cstdio | 1 - lib/libcxx/include/cstdlib | 1 - lib/libcxx/include/cstring | 1 - lib/libcxx/include/ctgmath | 1 - lib/libcxx/include/ctime | 1 - lib/libcxx/include/cuchar | 1 - lib/libcxx/include/cwchar | 9 +- lib/libcxx/include/cwctype | 1 - lib/libcxx/include/deque | 136 +- lib/libcxx/include/exception | 1 - lib/libcxx/include/execution | 5 +- lib/libcxx/include/expected | 19 +- lib/libcxx/include/experimental/__memory | 94 - .../include/experimental/__simd/reference.h | 41 + .../include/experimental/__simd/scalar.h | 8 + lib/libcxx/include/experimental/__simd/simd.h | 11 + .../include/experimental/__simd/simd_mask.h | 11 + .../include/experimental/__simd/vec_ext.h | 12 + lib/libcxx/include/experimental/iterator | 1 - lib/libcxx/include/experimental/memory | 6 +- .../include/experimental/propagate_const | 117 +- lib/libcxx/include/experimental/simd | 3 +- lib/libcxx/include/experimental/type_traits | 7 +- lib/libcxx/include/experimental/utility | 1 - lib/libcxx/include/ext/hash_map | 1 - lib/libcxx/include/ext/hash_set | 1 - lib/libcxx/include/filesystem | 38 +- lib/libcxx/include/format | 88 +- lib/libcxx/include/forward_list | 87 +- lib/libcxx/include/fstream | 308 +-- lib/libcxx/include/functional | 63 +- lib/libcxx/include/future | 204 +- lib/libcxx/include/initializer_list | 1 - lib/libcxx/include/iomanip | 3 +- lib/libcxx/include/ios | 143 +- lib/libcxx/include/iosfwd | 19 +- lib/libcxx/include/iostream | 1 - lib/libcxx/include/istream | 18 +- lib/libcxx/include/iterator | 62 +- lib/libcxx/include/latch | 48 +- lib/libcxx/include/limits | 446 ++--- lib/libcxx/include/list | 102 +- lib/libcxx/include/locale | 355 ++-- lib/libcxx/include/locale.h | 4 - lib/libcxx/include/map | 134 +- lib/libcxx/include/math.h | 8 +- lib/libcxx/include/mdspan | 37 +- lib/libcxx/include/memory | 63 +- lib/libcxx/include/memory_resource | 26 +- lib/libcxx/include/mutex | 39 +- lib/libcxx/include/new | 36 +- lib/libcxx/include/numbers | 1 - lib/libcxx/include/numeric | 41 +- lib/libcxx/include/optional | 60 +- lib/libcxx/include/ostream | 1003 +--------- lib/libcxx/include/print | 15 +- lib/libcxx/include/queue | 300 ++- lib/libcxx/include/random | 4 - lib/libcxx/include/ranges | 101 +- lib/libcxx/include/ratio | 43 +- lib/libcxx/include/regex | 67 +- lib/libcxx/include/scoped_allocator | 32 +- lib/libcxx/include/semaphore | 81 +- lib/libcxx/include/set | 96 +- lib/libcxx/include/shared_mutex | 54 +- lib/libcxx/include/span | 79 +- lib/libcxx/include/sstream | 199 +- lib/libcxx/include/stack | 51 +- lib/libcxx/include/stdexcept | 15 +- lib/libcxx/include/stdlib.h | 12 +- lib/libcxx/include/stop_token | 24 +- lib/libcxx/include/streambuf | 11 +- lib/libcxx/include/string | 426 +++-- lib/libcxx/include/string_view | 52 +- lib/libcxx/include/strstream | 41 +- lib/libcxx/include/syncstream | 2 + lib/libcxx/include/system_error | 1 - lib/libcxx/include/thread | 28 +- lib/libcxx/include/tuple | 298 ++- lib/libcxx/include/type_traits | 81 +- lib/libcxx/include/typeindex | 1 - lib/libcxx/include/typeinfo | 17 +- lib/libcxx/include/unordered_map | 104 +- lib/libcxx/include/unordered_set | 70 +- lib/libcxx/include/utility | 43 +- lib/libcxx/include/valarray | 258 ++- lib/libcxx/include/variant | 324 ++-- lib/libcxx/include/vector | 229 ++- lib/libcxx/include/version | 89 +- lib/libcxx/src/atomic.cpp | 27 +- lib/libcxx/src/barrier.cpp | 22 +- lib/libcxx/src/call_once.cpp | 2 +- lib/libcxx/src/chrono.cpp | 4 +- .../src/condition_variable_destructor.cpp | 2 +- lib/libcxx/src/expected.cpp | 13 + .../src/experimental/chrono_exception.cpp | 22 + .../include/tzdb/time_zone_private.h | 57 + .../experimental/include/tzdb/types_private.h | 117 ++ .../include/tzdb/tzdb_list_private.h | 104 ++ .../experimental/include/tzdb/tzdb_private.h | 28 + lib/libcxx/src/experimental/time_zone.cpp | 1055 +++++++++++ lib/libcxx/src/experimental/tzdb.cpp | 781 ++++++++ lib/libcxx/src/experimental/tzdb_list.cpp | 43 + lib/libcxx/src/filesystem/operations.cpp | 41 +- lib/libcxx/src/filesystem/path.cpp | 28 +- lib/libcxx/src/filesystem/path_parser.h | 28 +- lib/libcxx/src/functional.cpp | 2 - lib/libcxx/src/include/overridable_function.h | 14 +- lib/libcxx/src/ios.cpp | 4 + lib/libcxx/src/iostream.cpp | 36 +- lib/libcxx/src/locale.cpp | 14 +- lib/libcxx/src/mutex_destructor.cpp | 2 +- lib/libcxx/src/new.cpp | 1 + lib/libcxx/src/optional.cpp | 1 - lib/libcxx/src/ostream.cpp | 1 - lib/libcxx/src/pstl/libdispatch.cpp | 9 +- lib/libcxx/src/random.cpp | 2 +- lib/libcxx/src/support/win32/thread_win32.cpp | 2 +- lib/libcxx/src/tz.cpp | 148 -- lib/libcxx/src/tzdb_list.cpp | 113 -- src/libcxx.zig | 4 +- 666 files changed, 20706 insertions(+), 15510 deletions(-) create mode 100644 lib/libcxx/include/__algorithm/pstl.h delete mode 100644 lib/libcxx/include/__algorithm/pstl_any_all_none_of.h delete mode 100644 lib/libcxx/include/__algorithm/pstl_backend.h delete mode 100644 lib/libcxx/include/__algorithm/pstl_backends/cpu_backend.h delete mode 100644 lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/backend.h delete mode 100644 lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/fill.h delete mode 100644 lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/for_each.h delete mode 100644 lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/libdispatch.h delete mode 100644 lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/merge.h delete mode 100644 lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/serial.h delete mode 100644 lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/stable_sort.h delete mode 100644 lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/thread.h delete mode 100644 lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/transform.h delete mode 100644 lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/transform_reduce.h delete mode 100644 lib/libcxx/include/__algorithm/pstl_copy.h delete mode 100644 lib/libcxx/include/__algorithm/pstl_count.h delete mode 100644 lib/libcxx/include/__algorithm/pstl_equal.h delete mode 100644 lib/libcxx/include/__algorithm/pstl_fill.h delete mode 100644 lib/libcxx/include/__algorithm/pstl_find.h delete mode 100644 lib/libcxx/include/__algorithm/pstl_for_each.h delete mode 100644 lib/libcxx/include/__algorithm/pstl_frontend_dispatch.h delete mode 100644 lib/libcxx/include/__algorithm/pstl_generate.h delete mode 100644 lib/libcxx/include/__algorithm/pstl_is_partitioned.h delete mode 100644 lib/libcxx/include/__algorithm/pstl_merge.h delete mode 100644 lib/libcxx/include/__algorithm/pstl_move.h delete mode 100644 lib/libcxx/include/__algorithm/pstl_replace.h delete mode 100644 lib/libcxx/include/__algorithm/pstl_rotate_copy.h delete mode 100644 lib/libcxx/include/__algorithm/pstl_sort.h delete mode 100644 lib/libcxx/include/__algorithm/pstl_stable_sort.h delete mode 100644 lib/libcxx/include/__algorithm/pstl_transform.h create mode 100644 lib/libcxx/include/__algorithm/ranges_contains_subrange.h create mode 100644 lib/libcxx/include/__algorithm/ranges_find_last.h create mode 100644 lib/libcxx/include/__algorithm/simd_utils.h create mode 100644 lib/libcxx/include/__atomic/atomic_ref.h create mode 100644 lib/libcxx/include/__atomic/to_gcc_order.h delete mode 100644 lib/libcxx/include/__availability create mode 100644 lib/libcxx/include/__chrono/exception.h create mode 100644 lib/libcxx/include/__chrono/leap_second.h create mode 100644 lib/libcxx/include/__chrono/local_info.h create mode 100644 lib/libcxx/include/__chrono/sys_info.h create mode 100644 lib/libcxx/include/__chrono/time_zone.h create mode 100644 lib/libcxx/include/__chrono/time_zone_link.h create mode 100644 lib/libcxx/include/__chrono/zoned_time.h create mode 100644 lib/libcxx/include/__configuration/abi.h create mode 100644 lib/libcxx/include/__configuration/availability.h create mode 100644 lib/libcxx/include/__configuration/compiler.h create mode 100644 lib/libcxx/include/__configuration/language.h create mode 100644 lib/libcxx/include/__configuration/platform.h create mode 100644 lib/libcxx/include/__debug_utils/sanitizers.h create mode 100644 lib/libcxx/include/__format/indic_conjunct_break_table.h create mode 100644 lib/libcxx/include/__fwd/complex.h create mode 100644 lib/libcxx/include/__fwd/deque.h rename lib/libcxx/include/{__format/format_fwd.h => __fwd/format.h} (86%) create mode 100644 lib/libcxx/include/__fwd/functional.h delete mode 100644 lib/libcxx/include/__fwd/get.h rename lib/libcxx/include/__fwd/{hash.h => memory.h} (77%) create mode 100644 lib/libcxx/include/__fwd/queue.h create mode 100644 lib/libcxx/include/__fwd/stack.h create mode 100644 lib/libcxx/include/__fwd/vector.h create mode 100644 lib/libcxx/include/__iterator/aliasing_iterator.h create mode 100644 lib/libcxx/include/__locale_dir/locale_base_api.h create mode 100644 lib/libcxx/include/__locale_dir/locale_base_api/android.h rename lib/libcxx/include/{__support/fuchsia/xlocale.h => __locale_dir/locale_base_api/fuchsia.h} (53%) rename lib/libcxx/include/{__support/ibm/xlocale.h => __locale_dir/locale_base_api/ibm.h} (78%) create mode 100644 lib/libcxx/include/__locale_dir/locale_base_api/musl.h create mode 100644 lib/libcxx/include/__locale_dir/locale_base_api/newlib.h rename lib/libcxx/include/{__support/openbsd/xlocale.h => __locale_dir/locale_base_api/openbsd.h} (51%) rename lib/libcxx/include/{__support/win32/locale_win32.h => __locale_dir/locale_base_api/win32.h} (95%) create mode 100644 lib/libcxx/include/__math/special_functions.h create mode 100644 lib/libcxx/include/__memory/inout_ptr.h create mode 100644 lib/libcxx/include/__memory/out_ptr.h create mode 100644 lib/libcxx/include/__numeric/pstl.h delete mode 100644 lib/libcxx/include/__numeric/pstl_reduce.h delete mode 100644 lib/libcxx/include/__numeric/pstl_transform_reduce.h create mode 100644 lib/libcxx/include/__ostream/basic_ostream.h create mode 100644 lib/libcxx/include/__ostream/print.h create mode 100644 lib/libcxx/include/__pstl/backend.h create mode 100644 lib/libcxx/include/__pstl/backend_fwd.h create mode 100644 lib/libcxx/include/__pstl/backends/default.h create mode 100644 lib/libcxx/include/__pstl/backends/libdispatch.h create mode 100644 lib/libcxx/include/__pstl/backends/serial.h create mode 100644 lib/libcxx/include/__pstl/backends/std_thread.h rename lib/libcxx/include/{__algorithm/pstl_backends/cpu_backends => __pstl/cpu_algos}/any_of.h (51%) create mode 100644 lib/libcxx/include/__pstl/cpu_algos/cpu_traits.h create mode 100644 lib/libcxx/include/__pstl/cpu_algos/fill.h rename lib/libcxx/include/{__algorithm/pstl_backends/cpu_backends => __pstl/cpu_algos}/find_if.h (56%) create mode 100644 lib/libcxx/include/__pstl/cpu_algos/for_each.h create mode 100644 lib/libcxx/include/__pstl/cpu_algos/merge.h create mode 100644 lib/libcxx/include/__pstl/cpu_algos/stable_sort.h create mode 100644 lib/libcxx/include/__pstl/cpu_algos/transform.h create mode 100644 lib/libcxx/include/__pstl/cpu_algos/transform_reduce.h create mode 100644 lib/libcxx/include/__pstl/dispatch.h create mode 100644 lib/libcxx/include/__pstl/handle_exception.h delete mode 100644 lib/libcxx/include/__support/android/locale_bionic.h delete mode 100644 lib/libcxx/include/__support/musl/xlocale.h delete mode 100644 lib/libcxx/include/__support/newlib/xlocale.h create mode 100644 lib/libcxx/include/__thread/support.h create mode 100644 lib/libcxx/include/__thread/support/c11.h create mode 100644 lib/libcxx/include/__thread/support/external.h create mode 100644 lib/libcxx/include/__thread/support/pthread.h create mode 100644 lib/libcxx/include/__thread/support/windows.h delete mode 100644 lib/libcxx/include/__threading_support create mode 100644 lib/libcxx/include/__tuple/find_index.h rename lib/libcxx/include/{__type_traits/is_default_constructible.h => __tuple/ignore.h} (52%) create mode 100644 lib/libcxx/include/__tuple/tuple_like_no_subrange.h delete mode 100644 lib/libcxx/include/__type_traits/apply_cv.h rename lib/libcxx/include/__type_traits/{operation_traits.h => desugars_to.h} (84%) delete mode 100644 lib/libcxx/include/__type_traits/is_copy_assignable.h delete mode 100644 lib/libcxx/include/__type_traits/is_copy_constructible.h delete mode 100644 lib/libcxx/include/__type_traits/is_member_function_pointer.h delete mode 100644 lib/libcxx/include/__type_traits/is_member_object_pointer.h delete mode 100644 lib/libcxx/include/__type_traits/is_move_assignable.h delete mode 100644 lib/libcxx/include/__type_traits/is_move_constructible.h delete mode 100644 lib/libcxx/include/__type_traits/is_nothrow_copy_assignable.h delete mode 100644 lib/libcxx/include/__type_traits/is_nothrow_copy_constructible.h delete mode 100644 lib/libcxx/include/__type_traits/is_nothrow_default_constructible.h delete mode 100644 lib/libcxx/include/__type_traits/is_nothrow_move_assignable.h delete mode 100644 lib/libcxx/include/__type_traits/is_nothrow_move_constructible.h delete mode 100644 lib/libcxx/include/__type_traits/is_scoped_enum.h delete mode 100644 lib/libcxx/include/__type_traits/is_trivially_copy_assignable.h delete mode 100644 lib/libcxx/include/__type_traits/is_trivially_copy_constructible.h delete mode 100644 lib/libcxx/include/__type_traits/is_trivially_default_constructible.h delete mode 100644 lib/libcxx/include/__type_traits/is_trivially_move_assignable.h delete mode 100644 lib/libcxx/include/__type_traits/is_trivially_move_constructible.h create mode 100644 lib/libcxx/include/__type_traits/is_trivially_relocatable.h create mode 100644 lib/libcxx/include/__utility/is_valid_range.h rename lib/libcxx/include/{__tuple/pair_like.h => __utility/private_constructor_tag.h} (58%) delete mode 100644 lib/libcxx/include/experimental/__memory create mode 100644 lib/libcxx/src/expected.cpp create mode 100644 lib/libcxx/src/experimental/chrono_exception.cpp create mode 100644 lib/libcxx/src/experimental/include/tzdb/time_zone_private.h create mode 100644 lib/libcxx/src/experimental/include/tzdb/types_private.h create mode 100644 lib/libcxx/src/experimental/include/tzdb/tzdb_list_private.h create mode 100644 lib/libcxx/src/experimental/include/tzdb/tzdb_private.h create mode 100644 lib/libcxx/src/experimental/time_zone.cpp create mode 100644 lib/libcxx/src/experimental/tzdb.cpp create mode 100644 lib/libcxx/src/experimental/tzdb_list.cpp delete mode 100644 lib/libcxx/src/tz.cpp delete mode 100644 lib/libcxx/src/tzdb_list.cpp diff --git a/lib/libcxx/include/__algorithm/adjacent_find.h b/lib/libcxx/include/__algorithm/adjacent_find.h index 7819e2cf49b9..6f15456e3a4d 100644 --- a/lib/libcxx/include/__algorithm/adjacent_find.h +++ b/lib/libcxx/include/__algorithm/adjacent_find.h @@ -26,7 +26,7 @@ _LIBCPP_PUSH_MACROS _LIBCPP_BEGIN_NAMESPACE_STD template -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _Iter +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _Iter __adjacent_find(_Iter __first, _Sent __last, _BinaryPredicate&& __pred) { if (__first == __last) return __first; @@ -40,13 +40,13 @@ __adjacent_find(_Iter __first, _Sent __last, _BinaryPredicate&& __pred) { } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator adjacent_find(_ForwardIterator __first, _ForwardIterator __last, _BinaryPredicate __pred) { return std::__adjacent_find(std::move(__first), std::move(__last), __pred); } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator adjacent_find(_ForwardIterator __first, _ForwardIterator __last) { return std::adjacent_find(std::move(__first), std::move(__last), __equal_to()); } diff --git a/lib/libcxx/include/__algorithm/all_of.h b/lib/libcxx/include/__algorithm/all_of.h index 237f8495c645..ec84eea75929 100644 --- a/lib/libcxx/include/__algorithm/all_of.h +++ b/lib/libcxx/include/__algorithm/all_of.h @@ -19,7 +19,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool all_of(_InputIterator __first, _InputIterator __last, _Predicate __pred) { for (; __first != __last; ++__first) if (!__pred(*__first)) diff --git a/lib/libcxx/include/__algorithm/any_of.h b/lib/libcxx/include/__algorithm/any_of.h index 8ba7aae2b225..b5ff778c4171 100644 --- a/lib/libcxx/include/__algorithm/any_of.h +++ b/lib/libcxx/include/__algorithm/any_of.h @@ -19,7 +19,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool any_of(_InputIterator __first, _InputIterator __last, _Predicate __pred) { for (; __first != __last; ++__first) if (__pred(*__first)) diff --git a/lib/libcxx/include/__algorithm/binary_search.h b/lib/libcxx/include/__algorithm/binary_search.h index 7a77d7b5447b..6065fc37274d 100644 --- a/lib/libcxx/include/__algorithm/binary_search.h +++ b/lib/libcxx/include/__algorithm/binary_search.h @@ -22,14 +22,14 @@ _LIBCPP_BEGIN_NAMESPACE_STD template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool binary_search(_ForwardIterator __first, _ForwardIterator __last, const _Tp& __value, _Compare __comp) { __first = std::lower_bound<_ForwardIterator, _Tp, __comp_ref_type<_Compare> >(__first, __last, __value, __comp); return __first != __last && !__comp(__value, *__first); } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool binary_search(_ForwardIterator __first, _ForwardIterator __last, const _Tp& __value) { return std::binary_search(__first, __last, __value, __less<>()); } diff --git a/lib/libcxx/include/__algorithm/clamp.h b/lib/libcxx/include/__algorithm/clamp.h index 003bf70dd4f0..1a5a3d0744be 100644 --- a/lib/libcxx/include/__algorithm/clamp.h +++ b/lib/libcxx/include/__algorithm/clamp.h @@ -21,7 +21,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD #if _LIBCPP_STD_VER >= 17 template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI constexpr const _Tp& +[[nodiscard]] inline _LIBCPP_HIDE_FROM_ABI constexpr const _Tp& clamp(_LIBCPP_LIFETIMEBOUND const _Tp& __v, _LIBCPP_LIFETIMEBOUND const _Tp& __lo, _LIBCPP_LIFETIMEBOUND const _Tp& __hi, @@ -31,7 +31,7 @@ clamp(_LIBCPP_LIFETIMEBOUND const _Tp& __v, } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI constexpr const _Tp& +[[nodiscard]] inline _LIBCPP_HIDE_FROM_ABI constexpr const _Tp& clamp(_LIBCPP_LIFETIMEBOUND const _Tp& __v, _LIBCPP_LIFETIMEBOUND const _Tp& __lo, _LIBCPP_LIFETIMEBOUND const _Tp& __hi) { diff --git a/lib/libcxx/include/__algorithm/comp.h b/lib/libcxx/include/__algorithm/comp.h index 3902f7560304..a0fa88d6d2ac 100644 --- a/lib/libcxx/include/__algorithm/comp.h +++ b/lib/libcxx/include/__algorithm/comp.h @@ -10,8 +10,7 @@ #define _LIBCPP___ALGORITHM_COMP_H #include <__config> -#include <__type_traits/integral_constant.h> -#include <__type_traits/operation_traits.h> +#include <__type_traits/desugars_to.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) # pragma GCC system_header @@ -27,7 +26,7 @@ struct __equal_to { }; template -struct __desugars_to<__equal_tag, __equal_to, _Tp, _Up> : true_type {}; +inline const bool __desugars_to_v<__equal_tag, __equal_to, _Tp, _Up> = true; // The definition is required because __less is part of the ABI, but it's empty // because all comparisons should be transparent. @@ -42,6 +41,9 @@ struct __less { } }; +template +inline const bool __desugars_to_v<__less_tag, __less<>, _Tp, _Tp> = true; + _LIBCPP_END_NAMESPACE_STD #endif // _LIBCPP___ALGORITHM_COMP_H diff --git a/lib/libcxx/include/__algorithm/comp_ref_type.h b/lib/libcxx/include/__algorithm/comp_ref_type.h index aa9350c38caa..c367fbb91ac2 100644 --- a/lib/libcxx/include/__algorithm/comp_ref_type.h +++ b/lib/libcxx/include/__algorithm/comp_ref_type.h @@ -41,9 +41,9 @@ struct __debug_less { } template - _LIBCPP_CONSTEXPR_SINCE_CXX14 inline _LIBCPP_HIDE_FROM_ABI decltype((void)std::declval<_Compare&>()( - std::declval<_LHS&>(), std::declval<_RHS&>())) - __do_compare_assert(int, _LHS& __l, _RHS& __r) { + _LIBCPP_CONSTEXPR_SINCE_CXX14 inline + _LIBCPP_HIDE_FROM_ABI decltype((void)std::declval<_Compare&>()(std::declval<_LHS&>(), std::declval<_RHS&>())) + __do_compare_assert(int, _LHS& __l, _RHS& __r) { _LIBCPP_ASSERT_SEMANTIC_REQUIREMENT(!__comp_(__l, __r), "Comparator does not induce a strict weak ordering"); (void)__l; (void)__r; diff --git a/lib/libcxx/include/__algorithm/copy.h b/lib/libcxx/include/__algorithm/copy.h index 4c3815405af0..0890b895f540 100644 --- a/lib/libcxx/include/__algorithm/copy.h +++ b/lib/libcxx/include/__algorithm/copy.h @@ -32,7 +32,7 @@ template inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_InIter, _OutIter> __copy(_InIter, _Sent, _OutIter); template -struct __copy_loop { +struct __copy_impl { template _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_InIter, _OutIter> operator()(_InIter __first, _Sent __last, _OutIter __result) const { @@ -94,9 +94,7 @@ struct __copy_loop { __local_first = _Traits::__begin(++__segment_iterator); } } -}; -struct __copy_trivial { // At this point, the iterators have been unwrapped so any `contiguous_iterator` has been unwrapped to a pointer. template ::value, int> = 0> _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_In*, _Out*> @@ -108,7 +106,7 @@ struct __copy_trivial { template pair<_InIter, _OutIter> inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 __copy(_InIter __first, _Sent __last, _OutIter __result) { - return std::__dispatch_copy_or_move<_AlgPolicy, __copy_loop<_AlgPolicy>, __copy_trivial>( + return std::__copy_move_unwrap_iters<__copy_impl<_AlgPolicy> >( std::move(__first), std::move(__last), std::move(__result)); } diff --git a/lib/libcxx/include/__algorithm/copy_backward.h b/lib/libcxx/include/__algorithm/copy_backward.h index 3ec88d8bd5cc..73dc846a975a 100644 --- a/lib/libcxx/include/__algorithm/copy_backward.h +++ b/lib/libcxx/include/__algorithm/copy_backward.h @@ -15,7 +15,7 @@ #include <__config> #include <__iterator/segmented_iterator.h> #include <__type_traits/common_type.h> -#include <__type_traits/is_copy_constructible.h> +#include <__type_traits/is_constructible.h> #include <__utility/move.h> #include <__utility/pair.h> @@ -33,7 +33,7 @@ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_InIter, _OutIter> __copy_backward(_InIter __first, _Sent __last, _OutIter __result); template -struct __copy_backward_loop { +struct __copy_backward_impl { template _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_InIter, _OutIter> operator()(_InIter __first, _Sent __last, _OutIter __result) const { @@ -104,9 +104,7 @@ struct __copy_backward_loop { __local_last = _Traits::__end(__segment_iterator); } } -}; -struct __copy_backward_trivial { // At this point, the iterators have been unwrapped so any `contiguous_iterator` has been unwrapped to a pointer. template ::value, int> = 0> _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_In*, _Out*> @@ -118,7 +116,7 @@ struct __copy_backward_trivial { template _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_BidirectionalIterator1, _BidirectionalIterator2> __copy_backward(_BidirectionalIterator1 __first, _Sentinel __last, _BidirectionalIterator2 __result) { - return std::__dispatch_copy_or_move<_AlgPolicy, __copy_backward_loop<_AlgPolicy>, __copy_backward_trivial>( + return std::__copy_move_unwrap_iters<__copy_backward_impl<_AlgPolicy> >( std::move(__first), std::move(__last), std::move(__result)); } diff --git a/lib/libcxx/include/__algorithm/copy_move_common.h b/lib/libcxx/include/__algorithm/copy_move_common.h index 0fc7a5e3cee7..8a98451a8f96 100644 --- a/lib/libcxx/include/__algorithm/copy_move_common.h +++ b/lib/libcxx/include/__algorithm/copy_move_common.h @@ -19,9 +19,8 @@ #include <__type_traits/enable_if.h> #include <__type_traits/is_always_bitcastable.h> #include <__type_traits/is_constant_evaluated.h> -#include <__type_traits/is_copy_constructible.h> +#include <__type_traits/is_constructible.h> #include <__type_traits/is_trivially_assignable.h> -#include <__type_traits/is_trivially_copyable.h> #include <__type_traits/is_volatile.h> #include <__utility/move.h> #include <__utility/pair.h> @@ -81,30 +80,17 @@ __copy_backward_trivial_impl(_In* __first, _In* __last, _Out* __result) { // Iterator unwrapping and dispatching to the correct overload. -template -struct __overload : _F1, _F2 { - using _F1::operator(); - using _F2::operator(); -}; - -template -struct __can_rewrap : false_type {}; - -template -struct __can_rewrap<_InIter, - _Sent, - _OutIter, - // Note that sentinels are always copy-constructible. - __enable_if_t< is_copy_constructible<_InIter>::value && is_copy_constructible<_OutIter>::value > > - : true_type {}; +template +struct __can_rewrap + : integral_constant::value && is_copy_constructible<_OutIter>::value> {}; template ::value, int> = 0> + __enable_if_t<__can_rewrap<_InIter, _OutIter>::value, int> = 0> _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX17 pair<_InIter, _OutIter> -__unwrap_and_dispatch(_InIter __first, _Sent __last, _OutIter __out_first) { +__copy_move_unwrap_iters(_InIter __first, _Sent __last, _OutIter __out_first) { auto __range = std::__unwrap_range(__first, std::move(__last)); auto __result = _Algorithm()(std::move(__range.first), std::move(__range.second), std::__unwrap_iter(__out_first)); return std::make_pair(std::__rewrap_range<_Sent>(std::move(__first), std::move(__result.first)), @@ -115,24 +101,12 @@ template ::value, int> = 0> + __enable_if_t::value, int> = 0> _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX17 pair<_InIter, _OutIter> -__unwrap_and_dispatch(_InIter __first, _Sent __last, _OutIter __out_first) { +__copy_move_unwrap_iters(_InIter __first, _Sent __last, _OutIter __out_first) { return _Algorithm()(std::move(__first), std::move(__last), std::move(__out_first)); } -template -_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX17 pair<_InIter, _OutIter> -__dispatch_copy_or_move(_InIter __first, _Sent __last, _OutIter __out_first) { - using _Algorithm = __overload<_NaiveAlgorithm, _OptimizedAlgorithm>; - return std::__unwrap_and_dispatch<_Algorithm>(std::move(__first), std::move(__last), std::move(__out_first)); -} - _LIBCPP_END_NAMESPACE_STD _LIBCPP_POP_MACROS diff --git a/lib/libcxx/include/__algorithm/count.h b/lib/libcxx/include/__algorithm/count.h index 23a7d3c4dcfe..1cfe7f631ac1 100644 --- a/lib/libcxx/include/__algorithm/count.h +++ b/lib/libcxx/include/__algorithm/count.h @@ -79,7 +79,7 @@ __count(__bit_iterator<_Cp, _IsConst> __first, __bit_iterator<_Cp, _IsConst> __l } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 __iter_diff_t<_InputIterator> +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 __iter_diff_t<_InputIterator> count(_InputIterator __first, _InputIterator __last, const _Tp& __value) { __identity __proj; return std::__count<_ClassicAlgPolicy>(__first, __last, __value, __proj); diff --git a/lib/libcxx/include/__algorithm/count_if.h b/lib/libcxx/include/__algorithm/count_if.h index 04f52b894f8b..25782069d032 100644 --- a/lib/libcxx/include/__algorithm/count_if.h +++ b/lib/libcxx/include/__algorithm/count_if.h @@ -20,9 +20,9 @@ _LIBCPP_BEGIN_NAMESPACE_STD template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 - typename iterator_traits<_InputIterator>::difference_type - count_if(_InputIterator __first, _InputIterator __last, _Predicate __pred) { +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 +typename iterator_traits<_InputIterator>::difference_type +count_if(_InputIterator __first, _InputIterator __last, _Predicate __pred) { typename iterator_traits<_InputIterator>::difference_type __r(0); for (; __first != __last; ++__first) if (__pred(*__first)) diff --git a/lib/libcxx/include/__algorithm/equal.h b/lib/libcxx/include/__algorithm/equal.h index 3c0e3060e39a..bfc8f72f6eb1 100644 --- a/lib/libcxx/include/__algorithm/equal.h +++ b/lib/libcxx/include/__algorithm/equal.h @@ -18,12 +18,11 @@ #include <__iterator/distance.h> #include <__iterator/iterator_traits.h> #include <__string/constexpr_c_functions.h> +#include <__type_traits/desugars_to.h> #include <__type_traits/enable_if.h> -#include <__type_traits/integral_constant.h> #include <__type_traits/is_constant_evaluated.h> #include <__type_traits/is_equality_comparable.h> #include <__type_traits/is_volatile.h> -#include <__type_traits/operation_traits.h> #include <__utility/move.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) @@ -47,7 +46,7 @@ _LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 boo template ::value && !is_volatile<_Tp>::value && + __enable_if_t<__desugars_to_v<__equal_tag, _BinaryPredicate, _Tp, _Up> && !is_volatile<_Tp>::value && !is_volatile<_Up>::value && __libcpp_is_trivially_equality_comparable<_Tp, _Up>::value, int> = 0> _LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool @@ -56,33 +55,19 @@ __equal_iter_impl(_Tp* __first1, _Tp* __last1, _Up* __first2, _BinaryPredicate&) } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool equal(_InputIterator1 __first1, _InputIterator1 __last1, _InputIterator2 __first2, _BinaryPredicate __pred) { return std::__equal_iter_impl( std::__unwrap_iter(__first1), std::__unwrap_iter(__last1), std::__unwrap_iter(__first2), __pred); } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool equal(_InputIterator1 __first1, _InputIterator1 __last1, _InputIterator2 __first2) { return std::equal(__first1, __last1, __first2, __equal_to()); } #if _LIBCPP_STD_VER >= 14 -template -inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool -__equal(_InputIterator1 __first1, - _InputIterator1 __last1, - _InputIterator2 __first2, - _InputIterator2 __last2, - _BinaryPredicate __pred, - input_iterator_tag, - input_iterator_tag) { - for (; __first1 != __last1 && __first2 != __last2; ++__first1, (void)++__first2) - if (!__pred(*__first1, *__first2)) - return false; - return __first1 == __last1 && __first2 == __last2; -} template _LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool __equal_impl( @@ -101,7 +86,7 @@ template ::value && __is_identity<_Proj1>::value && + __enable_if_t<__desugars_to_v<__equal_tag, _Pred, _Tp, _Up> && __is_identity<_Proj1>::value && __is_identity<_Proj2>::value && !is_volatile<_Tp>::value && !is_volatile<_Up>::value && __libcpp_is_trivially_equality_comparable<_Tp, _Up>::value, int> = 0> @@ -110,17 +95,18 @@ __equal_impl(_Tp* __first1, _Tp* __last1, _Up* __first2, _Up*, _Pred&, _Proj1&, return std::__constexpr_memcmp_equal(__first1, __first2, __element_count(__last1 - __first1)); } -template -inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool -__equal(_RandomAccessIterator1 __first1, - _RandomAccessIterator1 __last1, - _RandomAccessIterator2 __first2, - _RandomAccessIterator2 __last2, - _BinaryPredicate __pred, - random_access_iterator_tag, - random_access_iterator_tag) { - if (std::distance(__first1, __last1) != std::distance(__first2, __last2)) - return false; +template +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool +equal(_InputIterator1 __first1, + _InputIterator1 __last1, + _InputIterator2 __first2, + _InputIterator2 __last2, + _BinaryPredicate __pred) { + if constexpr (__has_random_access_iterator_category<_InputIterator1>::value && + __has_random_access_iterator_category<_InputIterator2>::value) { + if (std::distance(__first1, __last1) != std::distance(__first2, __last2)) + return false; + } __identity __proj; return std::__equal_impl( std::__unwrap_iter(__first1), @@ -132,36 +118,13 @@ __equal(_RandomAccessIterator1 __first1, __proj); } -template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool -equal(_InputIterator1 __first1, - _InputIterator1 __last1, - _InputIterator2 __first2, - _InputIterator2 __last2, - _BinaryPredicate __pred) { - return std::__equal<_BinaryPredicate&>( - __first1, - __last1, - __first2, - __last2, - __pred, - typename iterator_traits<_InputIterator1>::iterator_category(), - typename iterator_traits<_InputIterator2>::iterator_category()); -} - template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool equal(_InputIterator1 __first1, _InputIterator1 __last1, _InputIterator2 __first2, _InputIterator2 __last2) { - return std::__equal( - __first1, - __last1, - __first2, - __last2, - __equal_to(), - typename iterator_traits<_InputIterator1>::iterator_category(), - typename iterator_traits<_InputIterator2>::iterator_category()); + return std::equal(__first1, __last1, __first2, __last2, __equal_to()); } -#endif + +#endif // _LIBCPP_STD_VER >= 14 _LIBCPP_END_NAMESPACE_STD diff --git a/lib/libcxx/include/__algorithm/equal_range.h b/lib/libcxx/include/__algorithm/equal_range.h index a94290431971..09bbf8f00602 100644 --- a/lib/libcxx/include/__algorithm/equal_range.h +++ b/lib/libcxx/include/__algorithm/equal_range.h @@ -23,7 +23,7 @@ #include <__iterator/iterator_traits.h> #include <__iterator/next.h> #include <__type_traits/is_callable.h> -#include <__type_traits/is_copy_constructible.h> +#include <__type_traits/is_constructible.h> #include <__utility/move.h> #include <__utility/pair.h> @@ -60,7 +60,7 @@ __equal_range(_Iter __first, _Sent __last, const _Tp& __value, _Compare&& __comp } template -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_ForwardIterator, _ForwardIterator> +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_ForwardIterator, _ForwardIterator> equal_range(_ForwardIterator __first, _ForwardIterator __last, const _Tp& __value, _Compare __comp) { static_assert(__is_callable<_Compare, decltype(*__first), const _Tp&>::value, "The comparator has to be callable"); static_assert(is_copy_constructible<_ForwardIterator>::value, "Iterator has to be copy constructible"); @@ -73,7 +73,7 @@ equal_range(_ForwardIterator __first, _ForwardIterator __last, const _Tp& __valu } template -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_ForwardIterator, _ForwardIterator> +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_ForwardIterator, _ForwardIterator> equal_range(_ForwardIterator __first, _ForwardIterator __last, const _Tp& __value) { return std::equal_range(std::move(__first), std::move(__last), __value, __less<>()); } diff --git a/lib/libcxx/include/__algorithm/fill_n.h b/lib/libcxx/include/__algorithm/fill_n.h index 36f3349d9e7a..f29633f88087 100644 --- a/lib/libcxx/include/__algorithm/fill_n.h +++ b/lib/libcxx/include/__algorithm/fill_n.h @@ -9,18 +9,74 @@ #ifndef _LIBCPP___ALGORITHM_FILL_N_H #define _LIBCPP___ALGORITHM_FILL_N_H +#include <__algorithm/min.h> #include <__config> +#include <__fwd/bit_reference.h> #include <__iterator/iterator_traits.h> +#include <__memory/pointer_traits.h> #include <__utility/convert_to_integral.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) # pragma GCC system_header #endif +_LIBCPP_PUSH_MACROS +#include <__undef_macros> + _LIBCPP_BEGIN_NAMESPACE_STD // fill_n isn't specialized for std::memset, because the compiler already optimizes the loop to a call to std::memset. +template +inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _OutputIterator +__fill_n(_OutputIterator __first, _Size __n, const _Tp& __value); + +template +_LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI void +__fill_n_bool(__bit_iterator<_Cp, false> __first, typename _Cp::size_type __n) { + using _It = __bit_iterator<_Cp, false>; + using __storage_type = typename _It::__storage_type; + + const int __bits_per_word = _It::__bits_per_word; + // do first partial word + if (__first.__ctz_ != 0) { + __storage_type __clz_f = static_cast<__storage_type>(__bits_per_word - __first.__ctz_); + __storage_type __dn = std::min(__clz_f, __n); + __storage_type __m = (~__storage_type(0) << __first.__ctz_) & (~__storage_type(0) >> (__clz_f - __dn)); + if (_FillVal) + *__first.__seg_ |= __m; + else + *__first.__seg_ &= ~__m; + __n -= __dn; + ++__first.__seg_; + } + // do middle whole words + __storage_type __nw = __n / __bits_per_word; + std::__fill_n(std::__to_address(__first.__seg_), __nw, _FillVal ? static_cast<__storage_type>(-1) : 0); + __n -= __nw * __bits_per_word; + // do last partial word + if (__n > 0) { + __first.__seg_ += __nw; + __storage_type __m = ~__storage_type(0) >> (__bits_per_word - __n); + if (_FillVal) + *__first.__seg_ |= __m; + else + *__first.__seg_ &= ~__m; + } +} + +template +inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 __bit_iterator<_Cp, false> +__fill_n(__bit_iterator<_Cp, false> __first, _Size __n, const bool& __value) { + if (__n > 0) { + if (__value) + std::__fill_n_bool(__first, __n); + else + std::__fill_n_bool(__first, __n); + } + return __first + __n; +} + template inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _OutputIterator __fill_n(_OutputIterator __first, _Size __n, const _Tp& __value) { @@ -37,4 +93,6 @@ fill_n(_OutputIterator __first, _Size __n, const _Tp& __value) { _LIBCPP_END_NAMESPACE_STD +_LIBCPP_POP_MACROS + #endif // _LIBCPP___ALGORITHM_FILL_N_H diff --git a/lib/libcxx/include/__algorithm/find.h b/lib/libcxx/include/__algorithm/find.h index 7d7631b6e98a..7f58dbb13a57 100644 --- a/lib/libcxx/include/__algorithm/find.h +++ b/lib/libcxx/include/__algorithm/find.h @@ -43,7 +43,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD // generic implementation template _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Iter -__find_impl(_Iter __first, _Sent __last, const _Tp& __value, _Proj& __proj) { +__find(_Iter __first, _Sent __last, const _Tp& __value, _Proj& __proj) { for (; __first != __last; ++__first) if (std::__invoke(__proj, *__first) == __value) break; @@ -57,8 +57,7 @@ template ::value && __libcpp_is_trivially_equality_comparable<_Tp, _Up>::value && sizeof(_Tp) == 1, int> = 0> -_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp* -__find_impl(_Tp* __first, _Tp* __last, const _Up& __value, _Proj&) { +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp* __find(_Tp* __first, _Tp* __last, const _Up& __value, _Proj&) { if (auto __ret = std::__constexpr_memchr(__first, __value, __last - __first)) return __ret; return __last; @@ -71,8 +70,7 @@ template ::value && __libcpp_is_trivially_equality_comparable<_Tp, _Up>::value && sizeof(_Tp) == sizeof(wchar_t) && _LIBCPP_ALIGNOF(_Tp) >= _LIBCPP_ALIGNOF(wchar_t), int> = 0> -_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp* -__find_impl(_Tp* __first, _Tp* __last, const _Up& __value, _Proj&) { +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp* __find(_Tp* __first, _Tp* __last, const _Up& __value, _Proj&) { if (auto __ret = std::__constexpr_wmemchr(__first, __value, __last - __first)) return __ret; return __last; @@ -89,10 +87,10 @@ template ::value == is_signed<_Up>::value, int> = 0> _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp* -__find_impl(_Tp* __first, _Tp* __last, const _Up& __value, _Proj& __proj) { +__find(_Tp* __first, _Tp* __last, const _Up& __value, _Proj& __proj) { if (__value < numeric_limits<_Tp>::min() || __value > numeric_limits<_Tp>::max()) return __last; - return std::__find_impl(__first, __last, _Tp(__value), __proj); + return std::__find(__first, __last, _Tp(__value), __proj); } // __bit_iterator implementation @@ -134,7 +132,7 @@ __find_bool(__bit_iterator<_Cp, _IsConst> __first, typename _Cp::size_type __n) template ::value, int> = 0> inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 __bit_iterator<_Cp, _IsConst> -__find_impl(__bit_iterator<_Cp, _IsConst> __first, __bit_iterator<_Cp, _IsConst> __last, const _Tp& __value, _Proj&) { +__find(__bit_iterator<_Cp, _IsConst> __first, __bit_iterator<_Cp, _IsConst> __last, const _Tp& __value, _Proj&) { if (static_cast(__value)) return std::__find_bool(__first, static_cast(__last - __first)); return std::__find_bool(__first, static_cast(__last - __first)); @@ -150,7 +148,7 @@ template ::value, int> = 0> _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _SegmentedIterator -__find_impl(_SegmentedIterator __first, _SegmentedIterator __last, const _Tp& __value, _Proj& __proj) { +__find(_SegmentedIterator __first, _SegmentedIterator __last, const _Tp& __value, _Proj& __proj) { return std::__find_segment_if(std::move(__first), std::move(__last), __find_segment<_Tp>(__value), __proj); } @@ -163,17 +161,17 @@ struct __find_segment { template _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR _InputIterator operator()(_InputIterator __first, _InputIterator __last, _Proj& __proj) const { - return std::__find_impl(__first, __last, __value_, __proj); + return std::__find(__first, __last, __value_, __proj); } }; // public API template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _InputIterator +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _InputIterator find(_InputIterator __first, _InputIterator __last, const _Tp& __value) { __identity __proj; return std::__rewrap_iter( - __first, std::__find_impl(std::__unwrap_iter(__first), std::__unwrap_iter(__last), __value, __proj)); + __first, std::__find(std::__unwrap_iter(__first), std::__unwrap_iter(__last), __value, __proj)); } _LIBCPP_END_NAMESPACE_STD diff --git a/lib/libcxx/include/__algorithm/find_end.h b/lib/libcxx/include/__algorithm/find_end.h index 4c26891666b2..7e08e7953534 100644 --- a/lib/libcxx/include/__algorithm/find_end.h +++ b/lib/libcxx/include/__algorithm/find_end.h @@ -205,7 +205,7 @@ _LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Fo } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator1 find_end( +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator1 find_end( _ForwardIterator1 __first1, _ForwardIterator1 __last1, _ForwardIterator2 __first2, @@ -215,7 +215,7 @@ _LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator1 +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator1 find_end(_ForwardIterator1 __first1, _ForwardIterator1 __last1, _ForwardIterator2 __first2, _ForwardIterator2 __last2) { return std::find_end(__first1, __last1, __first2, __last2, __equal_to()); } diff --git a/lib/libcxx/include/__algorithm/find_first_of.h b/lib/libcxx/include/__algorithm/find_first_of.h index 14271cccc42b..6b99f562f880 100644 --- a/lib/libcxx/include/__algorithm/find_first_of.h +++ b/lib/libcxx/include/__algorithm/find_first_of.h @@ -35,7 +35,7 @@ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _ForwardIterator1 __find_fir } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator1 find_first_of( +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator1 find_first_of( _ForwardIterator1 __first1, _ForwardIterator1 __last1, _ForwardIterator2 __first2, @@ -45,7 +45,7 @@ _LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator1 find_first_of( +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator1 find_first_of( _ForwardIterator1 __first1, _ForwardIterator1 __last1, _ForwardIterator2 __first2, _ForwardIterator2 __last2) { return std::__find_first_of_ce(__first1, __last1, __first2, __last2, __equal_to()); } diff --git a/lib/libcxx/include/__algorithm/find_if.h b/lib/libcxx/include/__algorithm/find_if.h index 09a39f646351..22092d352b06 100644 --- a/lib/libcxx/include/__algorithm/find_if.h +++ b/lib/libcxx/include/__algorithm/find_if.h @@ -19,7 +19,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _InputIterator +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _InputIterator find_if(_InputIterator __first, _InputIterator __last, _Predicate __pred) { for (; __first != __last; ++__first) if (__pred(*__first)) diff --git a/lib/libcxx/include/__algorithm/find_if_not.h b/lib/libcxx/include/__algorithm/find_if_not.h index bf29ebb7cdd9..cc2001967f0c 100644 --- a/lib/libcxx/include/__algorithm/find_if_not.h +++ b/lib/libcxx/include/__algorithm/find_if_not.h @@ -19,7 +19,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _InputIterator +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _InputIterator find_if_not(_InputIterator __first, _InputIterator __last, _Predicate __pred) { for (; __first != __last; ++__first) if (!__pred(*__first)) diff --git a/lib/libcxx/include/__algorithm/fold.h b/lib/libcxx/include/__algorithm/fold.h index 1a9d76b50d83..255658f52324 100644 --- a/lib/libcxx/include/__algorithm/fold.h +++ b/lib/libcxx/include/__algorithm/fold.h @@ -78,8 +78,7 @@ concept __indirectly_binary_left_foldable = struct __fold_left_with_iter { template _Sp, class _Tp, __indirectly_binary_left_foldable<_Tp, _Ip> _Fp> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI static constexpr auto - operator()(_Ip __first, _Sp __last, _Tp __init, _Fp __f) { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI static constexpr auto operator()(_Ip __first, _Sp __last, _Tp __init, _Fp __f) { using _Up = decay_t>>; if (__first == __last) { @@ -95,7 +94,7 @@ struct __fold_left_with_iter { } template > _Fp> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI static constexpr auto operator()(_Rp&& __r, _Tp __init, _Fp __f) { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI static constexpr auto operator()(_Rp&& __r, _Tp __init, _Fp __f) { auto __result = operator()(ranges::begin(__r), ranges::end(__r), std::move(__init), std::ref(__f)); using _Up = decay_t>>; @@ -107,13 +106,12 @@ inline constexpr auto fold_left_with_iter = __fold_left_with_iter(); struct __fold_left { template _Sp, class _Tp, __indirectly_binary_left_foldable<_Tp, _Ip> _Fp> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI static constexpr auto - operator()(_Ip __first, _Sp __last, _Tp __init, _Fp __f) { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI static constexpr auto operator()(_Ip __first, _Sp __last, _Tp __init, _Fp __f) { return fold_left_with_iter(std::move(__first), std::move(__last), std::move(__init), std::ref(__f)).value; } template > _Fp> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI static constexpr auto operator()(_Rp&& __r, _Tp __init, _Fp __f) { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI static constexpr auto operator()(_Rp&& __r, _Tp __init, _Fp __f) { return fold_left_with_iter(ranges::begin(__r), ranges::end(__r), std::move(__init), std::ref(__f)).value; } }; diff --git a/lib/libcxx/include/__algorithm/includes.h b/lib/libcxx/include/__algorithm/includes.h index 05d45365eb80..62af03c37426 100644 --- a/lib/libcxx/include/__algorithm/includes.h +++ b/lib/libcxx/include/__algorithm/includes.h @@ -47,7 +47,7 @@ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool __includes( } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool includes(_InputIterator1 __first1, _InputIterator1 __last1, _InputIterator2 __first2, @@ -67,7 +67,7 @@ includes(_InputIterator1 __first1, } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool includes(_InputIterator1 __first1, _InputIterator1 __last1, _InputIterator2 __first2, _InputIterator2 __last2) { return std::includes(std::move(__first1), std::move(__last1), std::move(__first2), std::move(__last2), __less<>()); } diff --git a/lib/libcxx/include/__algorithm/inplace_merge.h b/lib/libcxx/include/__algorithm/inplace_merge.h index eb3c0bdbc2db..a6bcc66a2fa4 100644 --- a/lib/libcxx/include/__algorithm/inplace_merge.h +++ b/lib/libcxx/include/__algorithm/inplace_merge.h @@ -114,8 +114,8 @@ _LIBCPP_HIDE_FROM_ABI void __buffered_inplace_merge( for (_BidirectionalIterator __i = __middle; __i != __last; __d.template __incr(), (void)++__i, (void)++__p) ::new ((void*)__p) value_type(_IterOps<_AlgPolicy>::__iter_move(__i)); - typedef __unconstrained_reverse_iterator<_BidirectionalIterator> _RBi; - typedef __unconstrained_reverse_iterator _Rv; + typedef reverse_iterator<_BidirectionalIterator> _RBi; + typedef reverse_iterator _Rv; typedef __invert<_Compare> _Inverted; std::__half_inplace_merge<_AlgPolicy>( _Rv(__p), _Rv(__buff), _RBi(__middle), _RBi(__first), _RBi(__last), _Inverted(__comp)); diff --git a/lib/libcxx/include/__algorithm/is_heap.h b/lib/libcxx/include/__algorithm/is_heap.h index 0d2d43c2c3ab..c589b804a5dc 100644 --- a/lib/libcxx/include/__algorithm/is_heap.h +++ b/lib/libcxx/include/__algorithm/is_heap.h @@ -22,13 +22,13 @@ _LIBCPP_BEGIN_NAMESPACE_STD template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool is_heap(_RandomAccessIterator __first, _RandomAccessIterator __last, _Compare __comp) { return std::__is_heap_until(__first, __last, static_cast<__comp_ref_type<_Compare> >(__comp)) == __last; } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool is_heap(_RandomAccessIterator __first, _RandomAccessIterator __last) { return std::is_heap(__first, __last, __less<>()); } diff --git a/lib/libcxx/include/__algorithm/is_heap_until.h b/lib/libcxx/include/__algorithm/is_heap_until.h index 1eae3b86b90d..a174f2453cfc 100644 --- a/lib/libcxx/include/__algorithm/is_heap_until.h +++ b/lib/libcxx/include/__algorithm/is_heap_until.h @@ -46,13 +46,13 @@ __is_heap_until(_RandomAccessIterator __first, _RandomAccessIterator __last, _Co } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _RandomAccessIterator +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _RandomAccessIterator is_heap_until(_RandomAccessIterator __first, _RandomAccessIterator __last, _Compare __comp) { return std::__is_heap_until(__first, __last, static_cast<__comp_ref_type<_Compare> >(__comp)); } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _RandomAccessIterator +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _RandomAccessIterator is_heap_until(_RandomAccessIterator __first, _RandomAccessIterator __last) { return std::__is_heap_until(__first, __last, __less<>()); } diff --git a/lib/libcxx/include/__algorithm/is_partitioned.h b/lib/libcxx/include/__algorithm/is_partitioned.h index 71feed332060..1f7c8b0b267e 100644 --- a/lib/libcxx/include/__algorithm/is_partitioned.h +++ b/lib/libcxx/include/__algorithm/is_partitioned.h @@ -18,7 +18,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD template -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool is_partitioned(_InputIterator __first, _InputIterator __last, _Predicate __pred) { for (; __first != __last; ++__first) if (!__pred(*__first)) diff --git a/lib/libcxx/include/__algorithm/is_permutation.h b/lib/libcxx/include/__algorithm/is_permutation.h index 4226151222bb..2ddfb32a212b 100644 --- a/lib/libcxx/include/__algorithm/is_permutation.h +++ b/lib/libcxx/include/__algorithm/is_permutation.h @@ -113,7 +113,7 @@ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool __is_permutation_impl( // 2+1 iterators, predicate. Not used by range algorithms. template -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool __is_permutation( +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool __is_permutation( _ForwardIterator1 __first1, _Sentinel1 __last1, _ForwardIterator2 __first2, _BinaryPredicate&& __pred) { // Shorten sequences as much as possible by lopping of any equal prefix. for (; __first1 != __last1; ++__first1, (void)++__first2) { @@ -247,7 +247,7 @@ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool __is_permutation( // 2+1 iterators, predicate template -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool is_permutation( +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool is_permutation( _ForwardIterator1 __first1, _ForwardIterator1 __last1, _ForwardIterator2 __first2, _BinaryPredicate __pred) { static_assert(__is_callable<_BinaryPredicate, decltype(*__first1), decltype(*__first2)>::value, "The predicate has to be callable"); @@ -257,7 +257,7 @@ _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool i // 2+1 iterators template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool is_permutation(_ForwardIterator1 __first1, _ForwardIterator1 __last1, _ForwardIterator2 __first2) { return std::is_permutation(__first1, __last1, __first2, __equal_to()); } @@ -266,7 +266,7 @@ is_permutation(_ForwardIterator1 __first1, _ForwardIterator1 __last1, _ForwardIt // 2+2 iterators template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool is_permutation( +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool is_permutation( _ForwardIterator1 __first1, _ForwardIterator1 __last1, _ForwardIterator2 __first2, _ForwardIterator2 __last2) { return std::__is_permutation<_ClassicAlgPolicy>( std::move(__first1), @@ -280,7 +280,7 @@ _LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 // 2+2 iterators, predicate template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool is_permutation( +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool is_permutation( _ForwardIterator1 __first1, _ForwardIterator1 __last1, _ForwardIterator2 __first2, diff --git a/lib/libcxx/include/__algorithm/is_sorted.h b/lib/libcxx/include/__algorithm/is_sorted.h index 1874cace882c..3befb1ac9c26 100644 --- a/lib/libcxx/include/__algorithm/is_sorted.h +++ b/lib/libcxx/include/__algorithm/is_sorted.h @@ -22,13 +22,13 @@ _LIBCPP_BEGIN_NAMESPACE_STD template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool is_sorted(_ForwardIterator __first, _ForwardIterator __last, _Compare __comp) { return std::__is_sorted_until<__comp_ref_type<_Compare> >(__first, __last, __comp) == __last; } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool is_sorted(_ForwardIterator __first, _ForwardIterator __last) { return std::is_sorted(__first, __last, __less<>()); } diff --git a/lib/libcxx/include/__algorithm/is_sorted_until.h b/lib/libcxx/include/__algorithm/is_sorted_until.h index 7450440df2d8..53a49f00de31 100644 --- a/lib/libcxx/include/__algorithm/is_sorted_until.h +++ b/lib/libcxx/include/__algorithm/is_sorted_until.h @@ -35,13 +35,13 @@ __is_sorted_until(_ForwardIterator __first, _ForwardIterator __last, _Compare __ } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator is_sorted_until(_ForwardIterator __first, _ForwardIterator __last, _Compare __comp) { return std::__is_sorted_until<__comp_ref_type<_Compare> >(__first, __last, __comp); } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator is_sorted_until(_ForwardIterator __first, _ForwardIterator __last) { return std::is_sorted_until(__first, __last, __less<>()); } diff --git a/lib/libcxx/include/__algorithm/iterator_operations.h b/lib/libcxx/include/__algorithm/iterator_operations.h index 5cf13f0a3f29..8ced989233bc 100644 --- a/lib/libcxx/include/__algorithm/iterator_operations.h +++ b/lib/libcxx/include/__algorithm/iterator_operations.h @@ -11,6 +11,7 @@ #include <__algorithm/iter_swap.h> #include <__algorithm/ranges_iterator_concept.h> +#include <__assert> #include <__config> #include <__iterator/advance.h> #include <__iterator/distance.h> @@ -160,6 +161,59 @@ struct _IterOps<_ClassicAlgPolicy> { _LIBCPP_HIDE_FROM_ABI static _LIBCPP_CONSTEXPR_SINCE_CXX14 void __advance_to(_Iter& __first, _Iter __last) { __first = __last; } + + // advance with sentinel, a la std::ranges::advance + template + _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 static __difference_type<_Iter> + __advance_to(_Iter& __iter, __difference_type<_Iter> __count, const _Iter& __sentinel) { + return _IterOps::__advance_to(__iter, __count, __sentinel, typename iterator_traits<_Iter>::iterator_category()); + } + +private: + // advance with sentinel, a la std::ranges::advance -- InputIterator specialization + template + _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 static __difference_type<_InputIter> __advance_to( + _InputIter& __iter, __difference_type<_InputIter> __count, const _InputIter& __sentinel, input_iterator_tag) { + __difference_type<_InputIter> __dist = 0; + for (; __dist < __count && __iter != __sentinel; ++__dist) + ++__iter; + return __count - __dist; + } + + // advance with sentinel, a la std::ranges::advance -- BidirectionalIterator specialization + template + _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 static __difference_type<_BiDirIter> + __advance_to(_BiDirIter& __iter, + __difference_type<_BiDirIter> __count, + const _BiDirIter& __sentinel, + bidirectional_iterator_tag) { + __difference_type<_BiDirIter> __dist = 0; + if (__count >= 0) + for (; __dist < __count && __iter != __sentinel; ++__dist) + ++__iter; + else + for (__count = -__count; __dist < __count && __iter != __sentinel; ++__dist) + --__iter; + return __count - __dist; + } + + // advance with sentinel, a la std::ranges::advance -- RandomIterator specialization + template + _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 static __difference_type<_RandIter> + __advance_to(_RandIter& __iter, + __difference_type<_RandIter> __count, + const _RandIter& __sentinel, + random_access_iterator_tag) { + auto __dist = _IterOps::distance(__iter, __sentinel); + _LIBCPP_ASSERT_VALID_INPUT_RANGE( + __count == 0 || (__dist < 0) == (__count < 0), "__sentinel must precede __iter when __count < 0"); + if (__count < 0) + __dist = __dist > __count ? __dist : __count; + else + __dist = __dist < __count ? __dist : __count; + __iter += __dist; + return __count - __dist; + } }; _LIBCPP_END_NAMESPACE_STD diff --git a/lib/libcxx/include/__algorithm/lexicographical_compare.h b/lib/libcxx/include/__algorithm/lexicographical_compare.h index 3efd8e24bf6c..edc29e269c88 100644 --- a/lib/libcxx/include/__algorithm/lexicographical_compare.h +++ b/lib/libcxx/include/__algorithm/lexicographical_compare.h @@ -37,7 +37,7 @@ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool __lexicographical_compa } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool lexicographical_compare( +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool lexicographical_compare( _InputIterator1 __first1, _InputIterator1 __last1, _InputIterator2 __first2, @@ -47,7 +47,7 @@ _LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool lexicographical_compare( +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool lexicographical_compare( _InputIterator1 __first1, _InputIterator1 __last1, _InputIterator2 __first2, _InputIterator2 __last2) { return std::lexicographical_compare(__first1, __last1, __first2, __last2, __less<>()); } diff --git a/lib/libcxx/include/__algorithm/lexicographical_compare_three_way.h b/lib/libcxx/include/__algorithm/lexicographical_compare_three_way.h index 32de97d07a13..a5872e90cf8d 100644 --- a/lib/libcxx/include/__algorithm/lexicographical_compare_three_way.h +++ b/lib/libcxx/include/__algorithm/lexicographical_compare_three_way.h @@ -17,7 +17,7 @@ #include <__config> #include <__iterator/iterator_traits.h> #include <__type_traits/common_type.h> -#include <__type_traits/is_copy_constructible.h> +#include <__type_traits/is_constructible.h> #include <__utility/move.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) @@ -90,7 +90,7 @@ _LIBCPP_HIDE_FROM_ABI constexpr auto __lexicographical_compare_three_way_slow_pa } template -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr auto lexicographical_compare_three_way( +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr auto lexicographical_compare_three_way( _InputIterator1 __first1, _InputIterator1 __last1, _InputIterator2 __first2, _InputIterator2 __last2, _Cmp __comp) -> decltype(__comp(*__first1, *__first2)) { static_assert(__comparison_category, @@ -110,7 +110,7 @@ _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr auto lexicographical_compa } template -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr auto lexicographical_compare_three_way( +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr auto lexicographical_compare_three_way( _InputIterator1 __first1, _InputIterator1 __last1, _InputIterator2 __first2, _InputIterator2 __last2) { return std::lexicographical_compare_three_way( std::move(__first1), std::move(__last1), std::move(__first2), std::move(__last2), std::compare_three_way()); diff --git a/lib/libcxx/include/__algorithm/lower_bound.h b/lib/libcxx/include/__algorithm/lower_bound.h index 8f57f3592c4b..c417d8483549 100644 --- a/lib/libcxx/include/__algorithm/lower_bound.h +++ b/lib/libcxx/include/__algorithm/lower_bound.h @@ -27,11 +27,13 @@ _LIBCPP_BEGIN_NAMESPACE_STD -template -_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _Iter -__lower_bound(_Iter __first, _Sent __last, const _Type& __value, _Comp& __comp, _Proj& __proj) { - auto __len = _IterOps<_AlgPolicy>::distance(__first, __last); - +template +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _Iter __lower_bound_bisecting( + _Iter __first, + const _Type& __value, + typename iterator_traits<_Iter>::difference_type __len, + _Comp& __comp, + _Proj& __proj) { while (__len != 0) { auto __l2 = std::__half_positive(__len); _Iter __m = __first; @@ -46,8 +48,50 @@ __lower_bound(_Iter __first, _Sent __last, const _Type& __value, _Comp& __comp, return __first; } +// One-sided binary search, aka meta binary search, has been in the public domain for decades, and has the general +// advantage of being \Omega(1) rather than the classic algorithm's \Omega(log(n)), with the downside of executing at +// most 2*log(n) comparisons vs the classic algorithm's exact log(n). There are two scenarios in which it really shines: +// the first one is when operating over non-random-access iterators, because the classic algorithm requires knowing the +// container's size upfront, which adds \Omega(n) iterator increments to the complexity. The second one is when you're +// traversing the container in order, trying to fast-forward to the next value: in that case, the classic algorithm +// would yield \Omega(n*log(n)) comparisons and, for non-random-access iterators, \Omega(n^2) iterator increments, +// whereas the one-sided version will yield O(n) operations on both counts, with a \Omega(log(n)) bound on the number of +// comparisons. +template +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator +__lower_bound_onesided(_ForwardIterator __first, _Sent __last, const _Type& __value, _Comp& __comp, _Proj& __proj) { + // step = 0, ensuring we can always short-circuit when distance is 1 later on + if (__first == __last || !std::__invoke(__comp, std::__invoke(__proj, *__first), __value)) + return __first; + + using _Distance = typename iterator_traits<_ForwardIterator>::difference_type; + for (_Distance __step = 1; __first != __last; __step <<= 1) { + auto __it = __first; + auto __dist = __step - _IterOps<_AlgPolicy>::__advance_to(__it, __step, __last); + // once we reach the last range where needle can be we must start + // looking inwards, bisecting that range + if (__it == __last || !std::__invoke(__comp, std::__invoke(__proj, *__it), __value)) { + // we've already checked the previous value and it was less, we can save + // one comparison by skipping bisection + if (__dist == 1) + return __it; + return std::__lower_bound_bisecting<_AlgPolicy>(__first, __value, __dist, __comp, __proj); + } + // range not found, move forward! + __first = __it; + } + return __first; +} + +template +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator +__lower_bound(_ForwardIterator __first, _Sent __last, const _Type& __value, _Comp& __comp, _Proj& __proj) { + const auto __dist = _IterOps<_AlgPolicy>::distance(__first, __last); + return std::__lower_bound_bisecting<_AlgPolicy>(__first, __value, __dist, __comp, __proj); +} + template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator lower_bound(_ForwardIterator __first, _ForwardIterator __last, const _Tp& __value, _Compare __comp) { static_assert(__is_callable<_Compare, decltype(*__first), const _Tp&>::value, "The comparator has to be callable"); auto __proj = std::__identity(); @@ -55,7 +99,7 @@ lower_bound(_ForwardIterator __first, _ForwardIterator __last, const _Tp& __valu } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator lower_bound(_ForwardIterator __first, _ForwardIterator __last, const _Tp& __value) { return std::lower_bound(__first, __last, __value, __less<>()); } diff --git a/lib/libcxx/include/__algorithm/make_projected.h b/lib/libcxx/include/__algorithm/make_projected.h index bb7bc7e8c0b5..5245e523f3df 100644 --- a/lib/libcxx/include/__algorithm/make_projected.h +++ b/lib/libcxx/include/__algorithm/make_projected.h @@ -36,8 +36,8 @@ struct _ProjectedPred { : __pred(__pred_arg), __proj(__proj_arg) {} template - typename __invoke_of<_Pred&, decltype(std::__invoke(std::declval<_Proj&>(), std::declval<_Tp>())) >:: - type _LIBCPP_CONSTEXPR _LIBCPP_HIDE_FROM_ABI + typename __invoke_of<_Pred&, decltype(std::__invoke(std::declval<_Proj&>(), std::declval<_Tp>()))>::type + _LIBCPP_CONSTEXPR _LIBCPP_HIDE_FROM_ABI operator()(_Tp&& __v) const { return std::__invoke(__pred, std::__invoke(__proj, std::forward<_Tp>(__v))); } @@ -45,8 +45,8 @@ struct _ProjectedPred { template typename __invoke_of<_Pred&, decltype(std::__invoke(std::declval<_Proj&>(), std::declval<_T1>())), - decltype(std::__invoke(std::declval<_Proj&>(), - std::declval<_T2>())) >::type _LIBCPP_CONSTEXPR _LIBCPP_HIDE_FROM_ABI + decltype(std::__invoke(std::declval<_Proj&>(), std::declval<_T2>()))>::type _LIBCPP_CONSTEXPR + _LIBCPP_HIDE_FROM_ABI operator()(_T1&& __lhs, _T2&& __rhs) const { return std::__invoke( __pred, std::__invoke(__proj, std::forward<_T1>(__lhs)), std::__invoke(__proj, std::forward<_T2>(__rhs))); diff --git a/lib/libcxx/include/__algorithm/max.h b/lib/libcxx/include/__algorithm/max.h index 8171677f155c..d4c99f6f3643 100644 --- a/lib/libcxx/include/__algorithm/max.h +++ b/lib/libcxx/include/__algorithm/max.h @@ -25,13 +25,13 @@ _LIBCPP_PUSH_MACROS _LIBCPP_BEGIN_NAMESPACE_STD template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 const _Tp& +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 const _Tp& max(_LIBCPP_LIFETIMEBOUND const _Tp& __a, _LIBCPP_LIFETIMEBOUND const _Tp& __b, _Compare __comp) { return __comp(__a, __b) ? __b : __a; } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 const _Tp& +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 const _Tp& max(_LIBCPP_LIFETIMEBOUND const _Tp& __a, _LIBCPP_LIFETIMEBOUND const _Tp& __b) { return std::max(__a, __b, __less<>()); } @@ -39,13 +39,13 @@ max(_LIBCPP_LIFETIMEBOUND const _Tp& __a, _LIBCPP_LIFETIMEBOUND const _Tp& __b) #ifndef _LIBCPP_CXX03_LANG template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp max(initializer_list<_Tp> __t, _Compare __comp) { return *std::__max_element<__comp_ref_type<_Compare> >(__t.begin(), __t.end(), __comp); } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp max(initializer_list<_Tp> __t) { +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp max(initializer_list<_Tp> __t) { return *std::max_element(__t.begin(), __t.end(), __less<>()); } diff --git a/lib/libcxx/include/__algorithm/max_element.h b/lib/libcxx/include/__algorithm/max_element.h index f1d4f1cd0938..c036726cbccd 100644 --- a/lib/libcxx/include/__algorithm/max_element.h +++ b/lib/libcxx/include/__algorithm/max_element.h @@ -35,13 +35,13 @@ __max_element(_ForwardIterator __first, _ForwardIterator __last, _Compare __comp } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _ForwardIterator +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _ForwardIterator max_element(_ForwardIterator __first, _ForwardIterator __last, _Compare __comp) { return std::__max_element<__comp_ref_type<_Compare> >(__first, __last, __comp); } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _ForwardIterator +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _ForwardIterator max_element(_ForwardIterator __first, _ForwardIterator __last) { return std::max_element(__first, __last, __less<>()); } diff --git a/lib/libcxx/include/__algorithm/min.h b/lib/libcxx/include/__algorithm/min.h index 919508486fd5..1bafad8a461e 100644 --- a/lib/libcxx/include/__algorithm/min.h +++ b/lib/libcxx/include/__algorithm/min.h @@ -25,13 +25,13 @@ _LIBCPP_PUSH_MACROS _LIBCPP_BEGIN_NAMESPACE_STD template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 const _Tp& +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 const _Tp& min(_LIBCPP_LIFETIMEBOUND const _Tp& __a, _LIBCPP_LIFETIMEBOUND const _Tp& __b, _Compare __comp) { return __comp(__b, __a) ? __b : __a; } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 const _Tp& +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 const _Tp& min(_LIBCPP_LIFETIMEBOUND const _Tp& __a, _LIBCPP_LIFETIMEBOUND const _Tp& __b) { return std::min(__a, __b, __less<>()); } @@ -39,13 +39,13 @@ min(_LIBCPP_LIFETIMEBOUND const _Tp& __a, _LIBCPP_LIFETIMEBOUND const _Tp& __b) #ifndef _LIBCPP_CXX03_LANG template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp min(initializer_list<_Tp> __t, _Compare __comp) { return *std::__min_element<__comp_ref_type<_Compare> >(__t.begin(), __t.end(), __comp); } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp min(initializer_list<_Tp> __t) { +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp min(initializer_list<_Tp> __t) { return *std::min_element(__t.begin(), __t.end(), __less<>()); } diff --git a/lib/libcxx/include/__algorithm/min_element.h b/lib/libcxx/include/__algorithm/min_element.h index c576d665601d..65f3594d630c 100644 --- a/lib/libcxx/include/__algorithm/min_element.h +++ b/lib/libcxx/include/__algorithm/min_element.h @@ -48,7 +48,7 @@ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Iter __min_element(_Iter __ } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _ForwardIterator +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _ForwardIterator min_element(_ForwardIterator __first, _ForwardIterator __last, _Compare __comp) { static_assert( __has_forward_iterator_category<_ForwardIterator>::value, "std::min_element requires a ForwardIterator"); @@ -59,7 +59,7 @@ min_element(_ForwardIterator __first, _ForwardIterator __last, _Compare __comp) } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _ForwardIterator +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _ForwardIterator min_element(_ForwardIterator __first, _ForwardIterator __last) { return std::min_element(__first, __last, __less<>()); } diff --git a/lib/libcxx/include/__algorithm/minmax.h b/lib/libcxx/include/__algorithm/minmax.h index 5227b8857175..9feda2b4c0da 100644 --- a/lib/libcxx/include/__algorithm/minmax.h +++ b/lib/libcxx/include/__algorithm/minmax.h @@ -24,13 +24,13 @@ _LIBCPP_BEGIN_NAMESPACE_STD template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair minmax(_LIBCPP_LIFETIMEBOUND const _Tp& __a, _LIBCPP_LIFETIMEBOUND const _Tp& __b, _Compare __comp) { return __comp(__b, __a) ? pair(__b, __a) : pair(__a, __b); } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair minmax(_LIBCPP_LIFETIMEBOUND const _Tp& __a, _LIBCPP_LIFETIMEBOUND const _Tp& __b) { return std::minmax(__a, __b, __less<>()); } @@ -38,7 +38,7 @@ minmax(_LIBCPP_LIFETIMEBOUND const _Tp& __a, _LIBCPP_LIFETIMEBOUND const _Tp& __ #ifndef _LIBCPP_CXX03_LANG template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_Tp, _Tp> +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_Tp, _Tp> minmax(initializer_list<_Tp> __t, _Compare __comp) { static_assert(__is_callable<_Compare, _Tp, _Tp>::value, "The comparator has to be callable"); __identity __proj; @@ -47,7 +47,7 @@ minmax(initializer_list<_Tp> __t, _Compare __comp) { } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_Tp, _Tp> +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_Tp, _Tp> minmax(initializer_list<_Tp> __t) { return std::minmax(__t, __less<>()); } diff --git a/lib/libcxx/include/__algorithm/minmax_element.h b/lib/libcxx/include/__algorithm/minmax_element.h index ff8cda321cef..43cb23347c34 100644 --- a/lib/libcxx/include/__algorithm/minmax_element.h +++ b/lib/libcxx/include/__algorithm/minmax_element.h @@ -79,7 +79,7 @@ __minmax_element_impl(_Iter __first, _Sent __last, _Comp& __comp, _Proj& __proj) } template -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_ForwardIterator, _ForwardIterator> +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_ForwardIterator, _ForwardIterator> minmax_element(_ForwardIterator __first, _ForwardIterator __last, _Compare __comp) { static_assert( __has_forward_iterator_category<_ForwardIterator>::value, "std::minmax_element requires a ForwardIterator"); @@ -90,9 +90,8 @@ minmax_element(_ForwardIterator __first, _ForwardIterator __last, _Compare __com } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 - pair<_ForwardIterator, _ForwardIterator> - minmax_element(_ForwardIterator __first, _ForwardIterator __last) { +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_ForwardIterator, _ForwardIterator> +minmax_element(_ForwardIterator __first, _ForwardIterator __last) { return std::minmax_element(__first, __last, __less<>()); } diff --git a/lib/libcxx/include/__algorithm/mismatch.h b/lib/libcxx/include/__algorithm/mismatch.h index d345b6048a7e..632bec02406a 100644 --- a/lib/libcxx/include/__algorithm/mismatch.h +++ b/lib/libcxx/include/__algorithm/mismatch.h @@ -11,47 +11,200 @@ #define _LIBCPP___ALGORITHM_MISMATCH_H #include <__algorithm/comp.h> +#include <__algorithm/min.h> +#include <__algorithm/simd_utils.h> +#include <__algorithm/unwrap_iter.h> #include <__config> -#include <__iterator/iterator_traits.h> +#include <__functional/identity.h> +#include <__iterator/aliasing_iterator.h> +#include <__type_traits/desugars_to.h> +#include <__type_traits/invoke.h> +#include <__type_traits/is_constant_evaluated.h> +#include <__type_traits/is_equality_comparable.h> +#include <__type_traits/is_integral.h> +#include <__utility/move.h> #include <__utility/pair.h> +#include <__utility/unreachable.h> +#include #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) # pragma GCC system_header #endif +_LIBCPP_PUSH_MACROS +#include <__undef_macros> + _LIBCPP_BEGIN_NAMESPACE_STD +template +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_Iter1, _Iter2> +__mismatch_loop(_Iter1 __first1, _Sent1 __last1, _Iter2 __first2, _Pred& __pred, _Proj1& __proj1, _Proj2& __proj2) { + while (__first1 != __last1) { + if (!std::__invoke(__pred, std::__invoke(__proj1, *__first1), std::__invoke(__proj2, *__first2))) + break; + ++__first1; + ++__first2; + } + return std::make_pair(std::move(__first1), std::move(__first2)); +} + +template +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_Iter1, _Iter2> +__mismatch(_Iter1 __first1, _Sent1 __last1, _Iter2 __first2, _Pred& __pred, _Proj1& __proj1, _Proj2& __proj2) { + return std::__mismatch_loop(__first1, __last1, __first2, __pred, __proj1, __proj2); +} + +#if _LIBCPP_VECTORIZE_ALGORITHMS + +template +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_Iter, _Iter> +__mismatch_vectorized(_Iter __first1, _Iter __last1, _Iter __first2) { + using __value_type = __iter_value_type<_Iter>; + constexpr size_t __unroll_count = 4; + constexpr size_t __vec_size = __native_vector_size<__value_type>; + using __vec = __simd_vector<__value_type, __vec_size>; + + if (!__libcpp_is_constant_evaluated()) { + auto __orig_first1 = __first1; + auto __last2 = __first2 + (__last1 - __first1); + while (static_cast(__last1 - __first1) >= __unroll_count * __vec_size) [[__unlikely__]] { + __vec __lhs[__unroll_count]; + __vec __rhs[__unroll_count]; + + for (size_t __i = 0; __i != __unroll_count; ++__i) { + __lhs[__i] = std::__load_vector<__vec>(__first1 + __i * __vec_size); + __rhs[__i] = std::__load_vector<__vec>(__first2 + __i * __vec_size); + } + + for (size_t __i = 0; __i != __unroll_count; ++__i) { + if (auto __cmp_res = __lhs[__i] == __rhs[__i]; !std::__all_of(__cmp_res)) { + auto __offset = __i * __vec_size + std::__find_first_not_set(__cmp_res); + return {__first1 + __offset, __first2 + __offset}; + } + } + + __first1 += __unroll_count * __vec_size; + __first2 += __unroll_count * __vec_size; + } + + // check the remaining 0-3 vectors + while (static_cast(__last1 - __first1) >= __vec_size) { + if (auto __cmp_res = std::__load_vector<__vec>(__first1) == std::__load_vector<__vec>(__first2); + !std::__all_of(__cmp_res)) { + auto __offset = std::__find_first_not_set(__cmp_res); + return {__first1 + __offset, __first2 + __offset}; + } + __first1 += __vec_size; + __first2 += __vec_size; + } + + if (__last1 - __first1 == 0) + return {__first1, __first2}; + + // Check if we can load elements in front of the current pointer. If that's the case load a vector at + // (last - vector_size) to check the remaining elements + if (static_cast(__first1 - __orig_first1) >= __vec_size) { + __first1 = __last1 - __vec_size; + __first2 = __last2 - __vec_size; + auto __offset = + std::__find_first_not_set(std::__load_vector<__vec>(__first1) == std::__load_vector<__vec>(__first2)); + return {__first1 + __offset, __first2 + __offset}; + } // else loop over the elements individually + } + + __equal_to __pred; + __identity __proj; + return std::__mismatch_loop(__first1, __last1, __first2, __pred, __proj, __proj); +} + +template ::value && __desugars_to_v<__equal_tag, _Pred, _Tp, _Tp> && + __is_identity<_Proj1>::value && __is_identity<_Proj2>::value, + int> = 0> +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_Tp*, _Tp*> +__mismatch(_Tp* __first1, _Tp* __last1, _Tp* __first2, _Pred&, _Proj1&, _Proj2&) { + return std::__mismatch_vectorized(__first1, __last1, __first2); +} + +template ::value && __desugars_to_v<__equal_tag, _Pred, _Tp, _Tp> && + __is_identity<_Proj1>::value && __is_identity<_Proj2>::value && + __can_map_to_integer_v<_Tp> && __libcpp_is_trivially_equality_comparable<_Tp, _Tp>::value, + int> = 0> +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_Tp*, _Tp*> +__mismatch(_Tp* __first1, _Tp* __last1, _Tp* __first2, _Pred& __pred, _Proj1& __proj1, _Proj2& __proj2) { + if (__libcpp_is_constant_evaluated()) { + return std::__mismatch_loop(__first1, __last1, __first2, __pred, __proj1, __proj2); + } else { + using _Iter = __aliasing_iterator<_Tp*, __get_as_integer_type_t<_Tp>>; + auto __ret = std::__mismatch_vectorized(_Iter(__first1), _Iter(__last1), _Iter(__first2)); + return {__ret.first.__base(), __ret.second.__base()}; + } +} +#endif // _LIBCPP_VECTORIZE_ALGORITHMS + template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_InputIterator1, _InputIterator2> +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_InputIterator1, _InputIterator2> mismatch(_InputIterator1 __first1, _InputIterator1 __last1, _InputIterator2 __first2, _BinaryPredicate __pred) { - for (; __first1 != __last1; ++__first1, (void)++__first2) - if (!__pred(*__first1, *__first2)) - break; - return pair<_InputIterator1, _InputIterator2>(__first1, __first2); + __identity __proj; + auto __res = std::__mismatch( + std::__unwrap_iter(__first1), std::__unwrap_iter(__last1), std::__unwrap_iter(__first2), __pred, __proj, __proj); + return std::make_pair(std::__rewrap_iter(__first1, __res.first), std::__rewrap_iter(__first2, __res.second)); } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_InputIterator1, _InputIterator2> +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_InputIterator1, _InputIterator2> mismatch(_InputIterator1 __first1, _InputIterator1 __last1, _InputIterator2 __first2) { return std::mismatch(__first1, __last1, __first2, __equal_to()); } #if _LIBCPP_STD_VER >= 14 +template +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_Iter1, _Iter2> __mismatch( + _Iter1 __first1, _Sent1 __last1, _Iter2 __first2, _Sent2 __last2, _Pred& __pred, _Proj1& __proj1, _Proj2& __proj2) { + while (__first1 != __last1 && __first2 != __last2) { + if (!std::__invoke(__pred, std::__invoke(__proj1, *__first1), std::__invoke(__proj2, *__first2))) + break; + ++__first1; + ++__first2; + } + return {std::move(__first1), std::move(__first2)}; +} + +template +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_Tp*, _Tp*> +__mismatch(_Tp* __first1, _Tp* __last1, _Tp* __first2, _Tp* __last2, _Pred& __pred, _Proj1& __proj1, _Proj2& __proj2) { + auto __len = std::min(__last1 - __first1, __last2 - __first2); + return std::__mismatch(__first1, __first1 + __len, __first2, __pred, __proj1, __proj2); +} + template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_InputIterator1, _InputIterator2> +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_InputIterator1, _InputIterator2> mismatch(_InputIterator1 __first1, _InputIterator1 __last1, _InputIterator2 __first2, _InputIterator2 __last2, _BinaryPredicate __pred) { - for (; __first1 != __last1 && __first2 != __last2; ++__first1, (void)++__first2) - if (!__pred(*__first1, *__first2)) - break; - return pair<_InputIterator1, _InputIterator2>(__first1, __first2); + __identity __proj; + auto __res = std::__mismatch( + std::__unwrap_iter(__first1), + std::__unwrap_iter(__last1), + std::__unwrap_iter(__first2), + std::__unwrap_iter(__last2), + __pred, + __proj, + __proj); + return {std::__rewrap_iter(__first1, __res.first), std::__rewrap_iter(__first2, __res.second)}; } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_InputIterator1, _InputIterator2> +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_InputIterator1, _InputIterator2> mismatch(_InputIterator1 __first1, _InputIterator1 __last1, _InputIterator2 __first2, _InputIterator2 __last2) { return std::mismatch(__first1, __last1, __first2, __last2, __equal_to()); } @@ -59,4 +212,6 @@ mismatch(_InputIterator1 __first1, _InputIterator1 __last1, _InputIterator2 __fi _LIBCPP_END_NAMESPACE_STD +_LIBCPP_POP_MACROS + #endif // _LIBCPP___ALGORITHM_MISMATCH_H diff --git a/lib/libcxx/include/__algorithm/move.h b/lib/libcxx/include/__algorithm/move.h index dba6d487fff7..1716d43e2a61 100644 --- a/lib/libcxx/include/__algorithm/move.h +++ b/lib/libcxx/include/__algorithm/move.h @@ -16,7 +16,7 @@ #include <__config> #include <__iterator/segmented_iterator.h> #include <__type_traits/common_type.h> -#include <__type_traits/is_copy_constructible.h> +#include <__type_traits/is_constructible.h> #include <__utility/move.h> #include <__utility/pair.h> @@ -34,7 +34,7 @@ inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_InIter, _OutIte __move(_InIter __first, _Sent __last, _OutIter __result); template -struct __move_loop { +struct __move_impl { template _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_InIter, _OutIter> operator()(_InIter __first, _Sent __last, _OutIter __result) const { @@ -95,9 +95,7 @@ struct __move_loop { __local_first = _Traits::__begin(++__segment_iterator); } } -}; -struct __move_trivial { // At this point, the iterators have been unwrapped so any `contiguous_iterator` has been unwrapped to a pointer. template ::value, int> = 0> _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_In*, _Out*> @@ -109,7 +107,7 @@ struct __move_trivial { template inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_InIter, _OutIter> __move(_InIter __first, _Sent __last, _OutIter __result) { - return std::__dispatch_copy_or_move<_AlgPolicy, __move_loop<_AlgPolicy>, __move_trivial>( + return std::__copy_move_unwrap_iters<__move_impl<_AlgPolicy> >( std::move(__first), std::move(__last), std::move(__result)); } diff --git a/lib/libcxx/include/__algorithm/move_backward.h b/lib/libcxx/include/__algorithm/move_backward.h index aeedf4241dce..4beb7bdbaac0 100644 --- a/lib/libcxx/include/__algorithm/move_backward.h +++ b/lib/libcxx/include/__algorithm/move_backward.h @@ -15,7 +15,7 @@ #include <__config> #include <__iterator/segmented_iterator.h> #include <__type_traits/common_type.h> -#include <__type_traits/is_copy_constructible.h> +#include <__type_traits/is_constructible.h> #include <__utility/move.h> #include <__utility/pair.h> @@ -33,7 +33,7 @@ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 pair<_BidirectionalIterator1 __move_backward(_BidirectionalIterator1 __first, _Sentinel __last, _BidirectionalIterator2 __result); template -struct __move_backward_loop { +struct __move_backward_impl { template _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_InIter, _OutIter> operator()(_InIter __first, _Sent __last, _OutIter __result) const { @@ -104,9 +104,7 @@ struct __move_backward_loop { __local_last = _Traits::__end(--__segment_iterator); } } -}; -struct __move_backward_trivial { // At this point, the iterators have been unwrapped so any `contiguous_iterator` has been unwrapped to a pointer. template ::value, int> = 0> _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_In*, _Out*> @@ -122,7 +120,7 @@ __move_backward(_BidirectionalIterator1 __first, _Sentinel __last, _Bidirectiona std::is_copy_constructible<_BidirectionalIterator1>::value, "Iterators must be copy constructible."); - return std::__dispatch_copy_or_move<_AlgPolicy, __move_backward_loop<_AlgPolicy>, __move_backward_trivial>( + return std::__copy_move_unwrap_iters<__move_backward_impl<_AlgPolicy> >( std::move(__first), std::move(__last), std::move(__result)); } diff --git a/lib/libcxx/include/__algorithm/none_of.h b/lib/libcxx/include/__algorithm/none_of.h index ce59187a3a65..50841ba17cc6 100644 --- a/lib/libcxx/include/__algorithm/none_of.h +++ b/lib/libcxx/include/__algorithm/none_of.h @@ -19,7 +19,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 bool none_of(_InputIterator __first, _InputIterator __last, _Predicate __pred) { for (; __first != __last; ++__first) if (__pred(*__first)) diff --git a/lib/libcxx/include/__algorithm/partial_sort.h b/lib/libcxx/include/__algorithm/partial_sort.h index 85a8fdc77aa2..7f8d0c49147e 100644 --- a/lib/libcxx/include/__algorithm/partial_sort.h +++ b/lib/libcxx/include/__algorithm/partial_sort.h @@ -18,8 +18,8 @@ #include <__config> #include <__debug_utils/randomize_range.h> #include <__iterator/iterator_traits.h> -#include <__type_traits/is_copy_assignable.h> -#include <__type_traits/is_copy_constructible.h> +#include <__type_traits/is_assignable.h> +#include <__type_traits/is_constructible.h> #include <__utility/move.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) diff --git a/lib/libcxx/include/__algorithm/pop_heap.h b/lib/libcxx/include/__algorithm/pop_heap.h index 798a1d09934b..6d23830097ff 100644 --- a/lib/libcxx/include/__algorithm/pop_heap.h +++ b/lib/libcxx/include/__algorithm/pop_heap.h @@ -17,8 +17,8 @@ #include <__assert> #include <__config> #include <__iterator/iterator_traits.h> -#include <__type_traits/is_copy_assignable.h> -#include <__type_traits/is_copy_constructible.h> +#include <__type_traits/is_assignable.h> +#include <__type_traits/is_constructible.h> #include <__utility/move.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) diff --git a/lib/libcxx/include/__algorithm/pstl.h b/lib/libcxx/include/__algorithm/pstl.h new file mode 100644 index 000000000000..0bb052b3f97c --- /dev/null +++ b/lib/libcxx/include/__algorithm/pstl.h @@ -0,0 +1,663 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCPP___ALGORITHM_PSTL_H +#define _LIBCPP___ALGORITHM_PSTL_H + +#include <__config> + +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +#endif + +_LIBCPP_PUSH_MACROS +#include <__undef_macros> + +#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 + +# include <__functional/operations.h> +# include <__iterator/cpp17_iterator_concepts.h> +# include <__iterator/iterator_traits.h> +# include <__pstl/backend.h> +# include <__pstl/dispatch.h> +# include <__pstl/handle_exception.h> +# include <__type_traits/enable_if.h> +# include <__type_traits/is_execution_policy.h> +# include <__type_traits/remove_cvref.h> +# include <__utility/forward.h> +# include <__utility/move.h> + +_LIBCPP_BEGIN_NAMESPACE_STD + +template , + enable_if_t, int> = 0> +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI bool +any_of(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Predicate __pred) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "any_of requires a ForwardIterator"); + using _Implementation = __pstl::__dispatch<__pstl::__any_of, __pstl::__current_configuration, _RawPolicy>; + return __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__last), std::move(__pred)); +} + +template , + enable_if_t, int> = 0> +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI bool +all_of(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Pred __pred) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "all_of requires a ForwardIterator"); + using _Implementation = __pstl::__dispatch<__pstl::__all_of, __pstl::__current_configuration, _RawPolicy>; + return __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__last), std::move(__pred)); +} + +template , + enable_if_t, int> = 0> +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI bool +none_of(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Pred __pred) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "none_of requires a ForwardIterator"); + using _Implementation = __pstl::__dispatch<__pstl::__none_of, __pstl::__current_configuration, _RawPolicy>; + return __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__last), std::move(__pred)); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI _ForwardOutIterator +copy(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _ForwardOutIterator __result) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR( + _ForwardIterator, "copy(first, last, result) requires [first, last) to be ForwardIterators"); + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR( + _ForwardOutIterator, "copy(first, last, result) requires result to be a ForwardIterator"); + _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR( + _ForwardOutIterator, decltype(*__first), "copy(first, last, result) requires result to be an OutputIterator"); + using _Implementation = __pstl::__dispatch<__pstl::__copy, __pstl::__current_configuration, _RawPolicy>; + return __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__last), std::move(__result)); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI _ForwardOutIterator +copy_n(_ExecutionPolicy&& __policy, _ForwardIterator __first, _Size __n, _ForwardOutIterator __result) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR( + _ForwardIterator, "copy_n(first, n, result) requires first to be a ForwardIterator"); + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR( + _ForwardOutIterator, "copy_n(first, n, result) requires result to be a ForwardIterator"); + _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR( + _ForwardOutIterator, decltype(*__first), "copy_n(first, n, result) requires result to be an OutputIterator"); + using _Implementation = __pstl::__dispatch<__pstl::__copy_n, __pstl::__current_configuration, _RawPolicy>; + return __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__n), std::move(__result)); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI __iter_diff_t<_ForwardIterator> +count_if(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Predicate __pred) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR( + _ForwardIterator, "count_if(first, last, pred) requires [first, last) to be ForwardIterators"); + using _Implementation = __pstl::__dispatch<__pstl::__count_if, __pstl::__current_configuration, _RawPolicy>; + return __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__last), std::move(__pred)); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI __iter_diff_t<_ForwardIterator> +count(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, const _Tp& __value) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR( + _ForwardIterator, "count(first, last, val) requires [first, last) to be ForwardIterators"); + using _Implementation = __pstl::__dispatch<__pstl::__count, __pstl::__current_configuration, _RawPolicy>; + return __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__last), __value); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI bool +equal(_ExecutionPolicy&& __policy, + _ForwardIterator1 __first1, + _ForwardIterator1 __last1, + _ForwardIterator2 __first2, + _Pred __pred) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator1, "equal requires ForwardIterators"); + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator2, "equal requires ForwardIterators"); + using _Implementation = __pstl::__dispatch<__pstl::__equal_3leg, __pstl::__current_configuration, _RawPolicy>; + return __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), + std::move(__first1), + std::move(__last1), + std::move(__first2), + std::move(__pred)); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI bool +equal(_ExecutionPolicy&& __policy, _ForwardIterator1 __first1, _ForwardIterator1 __last1, _ForwardIterator2 __first2) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator1, "equal requires ForwardIterators"); + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator2, "equal requires ForwardIterators"); + using _Implementation = __pstl::__dispatch<__pstl::__equal_3leg, __pstl::__current_configuration, _RawPolicy>; + return __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), + std::move(__first1), + std::move(__last1), + std::move(__first2), + equal_to{}); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI bool +equal(_ExecutionPolicy&& __policy, + _ForwardIterator1 __first1, + _ForwardIterator1 __last1, + _ForwardIterator2 __first2, + _ForwardIterator2 __last2, + _Pred __pred) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator1, "equal requires ForwardIterators"); + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator2, "equal requires ForwardIterators"); + using _Implementation = __pstl::__dispatch<__pstl::__equal, __pstl::__current_configuration, _RawPolicy>; + return __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), + std::move(__first1), + std::move(__last1), + std::move(__first2), + std::move(__last2), + std::move(__pred)); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI bool +equal(_ExecutionPolicy&& __policy, + _ForwardIterator1 __first1, + _ForwardIterator1 __last1, + _ForwardIterator2 __first2, + _ForwardIterator2 __last2) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator1, "equal requires ForwardIterators"); + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator2, "equal requires ForwardIterators"); + using _Implementation = __pstl::__dispatch<__pstl::__equal, __pstl::__current_configuration, _RawPolicy>; + return __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), + std::move(__first1), + std::move(__last1), + std::move(__first2), + std::move(__last2), + equal_to{}); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI void +fill(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, const _Tp& __value) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "fill requires ForwardIterators"); + using _Implementation = __pstl::__dispatch<__pstl::__fill, __pstl::__current_configuration, _RawPolicy>; + __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__last), __value); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI void +fill_n(_ExecutionPolicy&& __policy, _ForwardIterator __first, _Size __n, const _Tp& __value) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "fill_n requires a ForwardIterator"); + using _Implementation = __pstl::__dispatch<__pstl::__fill_n, __pstl::__current_configuration, _RawPolicy>; + __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__n), __value); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI _ForwardIterator +find_if(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Predicate __pred) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "find_if requires ForwardIterators"); + using _Implementation = __pstl::__dispatch<__pstl::__find_if, __pstl::__current_configuration, _RawPolicy>; + return __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__last), std::move(__pred)); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI _ForwardIterator +find_if_not(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Predicate __pred) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "find_if_not requires ForwardIterators"); + using _Implementation = __pstl::__dispatch<__pstl::__find_if_not, __pstl::__current_configuration, _RawPolicy>; + return __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__last), std::move(__pred)); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI _ForwardIterator +find(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, const _Tp& __value) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "find requires ForwardIterators"); + using _Implementation = __pstl::__dispatch<__pstl::__find, __pstl::__current_configuration, _RawPolicy>; + return __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__last), __value); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI void +for_each(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Function __func) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "for_each requires ForwardIterators"); + using _Implementation = __pstl::__dispatch<__pstl::__for_each, __pstl::__current_configuration, _RawPolicy>; + __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__last), std::move(__func)); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI void +for_each_n(_ExecutionPolicy&& __policy, _ForwardIterator __first, _Size __size, _Function __func) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "for_each_n requires a ForwardIterator"); + using _Implementation = __pstl::__dispatch<__pstl::__for_each_n, __pstl::__current_configuration, _RawPolicy>; + __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__size), std::move(__func)); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI void +generate(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Generator __gen) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "generate requires ForwardIterators"); + using _Implementation = __pstl::__dispatch<__pstl::__generate, __pstl::__current_configuration, _RawPolicy>; + __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__last), std::move(__gen)); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI void +generate_n(_ExecutionPolicy&& __policy, _ForwardIterator __first, _Size __n, _Generator __gen) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "generate_n requires a ForwardIterator"); + using _Implementation = __pstl::__dispatch<__pstl::__generate_n, __pstl::__current_configuration, _RawPolicy>; + __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__n), std::move(__gen)); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI bool +is_partitioned(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Predicate __pred) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "is_partitioned requires ForwardIterators"); + using _Implementation = __pstl::__dispatch<__pstl::__is_partitioned, __pstl::__current_configuration, _RawPolicy>; + return __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__last), std::move(__pred)); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI _ForwardOutIterator +merge(_ExecutionPolicy&& __policy, + _ForwardIterator1 __first1, + _ForwardIterator1 __last1, + _ForwardIterator2 __first2, + _ForwardIterator2 __last2, + _ForwardOutIterator __result, + _Comp __comp) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator1, "merge requires ForwardIterators"); + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator2, "merge requires ForwardIterators"); + _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(_ForwardOutIterator, decltype(*__first1), "merge requires an OutputIterator"); + _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(_ForwardOutIterator, decltype(*__first2), "merge requires an OutputIterator"); + using _Implementation = __pstl::__dispatch<__pstl::__merge, __pstl::__current_configuration, _RawPolicy>; + return __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), + std::move(__first1), + std::move(__last1), + std::move(__first2), + std::move(__last2), + std::move(__result), + std::move(__comp)); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI _ForwardOutIterator +merge(_ExecutionPolicy&& __policy, + _ForwardIterator1 __first1, + _ForwardIterator1 __last1, + _ForwardIterator2 __first2, + _ForwardIterator2 __last2, + _ForwardOutIterator __result) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator1, "merge requires ForwardIterators"); + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator2, "merge requires ForwardIterators"); + _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(_ForwardOutIterator, decltype(*__first1), "merge requires an OutputIterator"); + _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(_ForwardOutIterator, decltype(*__first2), "merge requires an OutputIterator"); + using _Implementation = __pstl::__dispatch<__pstl::__merge, __pstl::__current_configuration, _RawPolicy>; + return __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), + std::move(__first1), + std::move(__last1), + std::move(__first2), + std::move(__last2), + std::move(__result), + less{}); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI _ForwardOutIterator +move(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _ForwardOutIterator __result) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "move requires ForwardIterators"); + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardOutIterator, "move requires an OutputIterator"); + _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR( + _ForwardOutIterator, decltype(std::move(*__first)), "move requires an OutputIterator"); + using _Implementation = __pstl::__dispatch<__pstl::__move, __pstl::__current_configuration, _RawPolicy>; + return __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__last), std::move(__result)); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI void +replace_if(_ExecutionPolicy&& __policy, + _ForwardIterator __first, + _ForwardIterator __last, + _Pred __pred, + const _Tp& __new_value) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "replace_if requires ForwardIterators"); + using _Implementation = __pstl::__dispatch<__pstl::__replace_if, __pstl::__current_configuration, _RawPolicy>; + __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__last), std::move(__pred), __new_value); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI void +replace(_ExecutionPolicy&& __policy, + _ForwardIterator __first, + _ForwardIterator __last, + const _Tp& __old_value, + const _Tp& __new_value) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "replace requires ForwardIterators"); + using _Implementation = __pstl::__dispatch<__pstl::__replace, __pstl::__current_configuration, _RawPolicy>; + __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__last), __old_value, __new_value); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI void replace_copy_if( + _ExecutionPolicy&& __policy, + _ForwardIterator __first, + _ForwardIterator __last, + _ForwardOutIterator __result, + _Pred __pred, + const _Tp& __new_value) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "replace_copy_if requires ForwardIterators"); + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardOutIterator, "replace_copy_if requires ForwardIterators"); + _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR( + _ForwardOutIterator, decltype(*__first), "replace_copy_if requires an OutputIterator"); + _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(_ForwardOutIterator, const _Tp&, "replace_copy requires an OutputIterator"); + using _Implementation = __pstl::__dispatch<__pstl::__replace_copy_if, __pstl::__current_configuration, _RawPolicy>; + __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), + std::move(__first), + std::move(__last), + std::move(__result), + std::move(__pred), + __new_value); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI void replace_copy( + _ExecutionPolicy&& __policy, + _ForwardIterator __first, + _ForwardIterator __last, + _ForwardOutIterator __result, + const _Tp& __old_value, + const _Tp& __new_value) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "replace_copy requires ForwardIterators"); + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardOutIterator, "replace_copy requires ForwardIterators"); + _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR( + _ForwardOutIterator, decltype(*__first), "replace_copy requires an OutputIterator"); + _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(_ForwardOutIterator, const _Tp&, "replace_copy requires an OutputIterator"); + using _Implementation = __pstl::__dispatch<__pstl::__replace_copy, __pstl::__current_configuration, _RawPolicy>; + __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), + std::move(__first), + std::move(__last), + std::move(__result), + __old_value, + __new_value); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI _ForwardOutIterator rotate_copy( + _ExecutionPolicy&& __policy, + _ForwardIterator __first, + _ForwardIterator __middle, + _ForwardIterator __last, + _ForwardOutIterator __result) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "rotate_copy requires ForwardIterators"); + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardOutIterator, "rotate_copy requires ForwardIterators"); + _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR( + _ForwardOutIterator, decltype(*__first), "rotate_copy requires an OutputIterator"); + using _Implementation = __pstl::__dispatch<__pstl::__rotate_copy, __pstl::__current_configuration, _RawPolicy>; + return __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), + std::move(__first), + std::move(__middle), + std::move(__last), + std::move(__result)); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI void +sort(_ExecutionPolicy&& __policy, _RandomAccessIterator __first, _RandomAccessIterator __last, _Comp __comp) { + _LIBCPP_REQUIRE_CPP17_RANDOM_ACCESS_ITERATOR(_RandomAccessIterator, "sort requires RandomAccessIterators"); + using _Implementation = __pstl::__dispatch<__pstl::__sort, __pstl::__current_configuration, _RawPolicy>; + __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__last), std::move(__comp)); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI void +sort(_ExecutionPolicy&& __policy, _RandomAccessIterator __first, _RandomAccessIterator __last) { + _LIBCPP_REQUIRE_CPP17_RANDOM_ACCESS_ITERATOR(_RandomAccessIterator, "sort requires RandomAccessIterators"); + using _Implementation = __pstl::__dispatch<__pstl::__sort, __pstl::__current_configuration, _RawPolicy>; + __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__last), less{}); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI void +stable_sort(_ExecutionPolicy&& __policy, _RandomAccessIterator __first, _RandomAccessIterator __last, _Comp __comp) { + _LIBCPP_REQUIRE_CPP17_RANDOM_ACCESS_ITERATOR(_RandomAccessIterator, "stable_sort requires RandomAccessIterators"); + using _Implementation = __pstl::__dispatch<__pstl::__stable_sort, __pstl::__current_configuration, _RawPolicy>; + __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__last), std::move(__comp)); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI void +stable_sort(_ExecutionPolicy&& __policy, _RandomAccessIterator __first, _RandomAccessIterator __last) { + _LIBCPP_REQUIRE_CPP17_RANDOM_ACCESS_ITERATOR(_RandomAccessIterator, "stable_sort requires RandomAccessIterators"); + using _Implementation = __pstl::__dispatch<__pstl::__stable_sort, __pstl::__current_configuration, _RawPolicy>; + __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__last), less{}); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI _ForwardOutIterator transform( + _ExecutionPolicy&& __policy, + _ForwardIterator __first, + _ForwardIterator __last, + _ForwardOutIterator __result, + _UnaryOperation __op) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator, "transform requires ForwardIterators"); + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardOutIterator, "transform requires an OutputIterator"); + _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR( + _ForwardOutIterator, decltype(__op(*__first)), "transform requires an OutputIterator"); + using _Implementation = __pstl::__dispatch<__pstl::__transform, __pstl::__current_configuration, _RawPolicy>; + return __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), + std::move(__first), + std::move(__last), + std::move(__result), + std::move(__op)); +} + +template , + enable_if_t, int> = 0> +_LIBCPP_HIDE_FROM_ABI _ForwardOutIterator transform( + _ExecutionPolicy&& __policy, + _ForwardIterator1 __first1, + _ForwardIterator1 __last1, + _ForwardIterator2 __first2, + _ForwardOutIterator __result, + _BinaryOperation __op) { + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator1, "transform requires ForwardIterators"); + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator2, "transform requires ForwardIterators"); + _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardOutIterator, "transform requires an OutputIterator"); + _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR( + _ForwardOutIterator, decltype(__op(*__first1, *__first2)), "transform requires an OutputIterator"); + using _Implementation = __pstl::__dispatch<__pstl::__transform_binary, __pstl::__current_configuration, _RawPolicy>; + return __pstl::__handle_exception<_Implementation>( + std::forward<_ExecutionPolicy>(__policy), + std::move(__first1), + std::move(__last1), + std::move(__first2), + std::move(__result), + std::move(__op)); +} + +_LIBCPP_END_NAMESPACE_STD + +#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 + +_LIBCPP_POP_MACROS + +#endif // _LIBCPP___ALGORITHM_PSTL_H diff --git a/lib/libcxx/include/__algorithm/pstl_any_all_none_of.h b/lib/libcxx/include/__algorithm/pstl_any_all_none_of.h deleted file mode 100644 index 4b1e0e61b542..000000000000 --- a/lib/libcxx/include/__algorithm/pstl_any_all_none_of.h +++ /dev/null @@ -1,152 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_ANY_ALL_NONE_OF_H -#define _LIBCPP___ALGORITHM_PSTL_ANY_ALL_NONE_OF_H - -#include <__algorithm/pstl_find.h> -#include <__algorithm/pstl_frontend_dispatch.h> -#include <__config> -#include <__iterator/cpp17_iterator_concepts.h> -#include <__type_traits/enable_if.h> -#include <__type_traits/is_execution_policy.h> -#include <__type_traits/remove_cvref.h> -#include <__utility/move.h> -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -_LIBCPP_PUSH_MACROS -#include <__undef_macros> - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_BEGIN_NAMESPACE_STD - -template -void __pstl_any_of(); // declaration needed for the frontend dispatch below - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional __any_of( - _ExecutionPolicy&& __policy, _ForwardIterator&& __first, _ForwardIterator&& __last, _Predicate&& __pred) noexcept { - return std::__pstl_frontend_dispatch( - _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_any_of, _RawPolicy), - [&](_ForwardIterator __g_first, _ForwardIterator __g_last, _Predicate __g_pred) -> optional { - auto __res = std::__find_if(__policy, __g_first, __g_last, __g_pred); - if (!__res) - return nullopt; - return *__res != __g_last; - }, - std::move(__first), - std::move(__last), - std::move(__pred)); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI bool -any_of(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Predicate __pred) { - _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator); - auto __res = std::__any_of(__policy, std::move(__first), std::move(__last), std::move(__pred)); - if (!__res) - std::__throw_bad_alloc(); - return *std::move(__res); -} - -template -void __pstl_all_of(); // declaration needed for the frontend dispatch below - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional -__all_of(_ExecutionPolicy&& __policy, _ForwardIterator&& __first, _ForwardIterator&& __last, _Pred&& __pred) noexcept { - return std::__pstl_frontend_dispatch( - _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_all_of, _RawPolicy), - [&](_ForwardIterator __g_first, _ForwardIterator __g_last, _Pred __g_pred) -> optional { - auto __res = std::__any_of(__policy, __g_first, __g_last, [&](__iter_reference<_ForwardIterator> __value) { - return !__g_pred(__value); - }); - if (!__res) - return nullopt; - return !*__res; - }, - std::move(__first), - std::move(__last), - std::move(__pred)); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI bool -all_of(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Pred __pred) { - _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator); - auto __res = std::__all_of(__policy, std::move(__first), std::move(__last), std::move(__pred)); - if (!__res) - std::__throw_bad_alloc(); - return *std::move(__res); -} - -template -void __pstl_none_of(); // declaration needed for the frontend dispatch below - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional -__none_of(_ExecutionPolicy&& __policy, _ForwardIterator&& __first, _ForwardIterator&& __last, _Pred&& __pred) noexcept { - return std::__pstl_frontend_dispatch( - _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_none_of, _RawPolicy), - [&](_ForwardIterator __g_first, _ForwardIterator __g_last, _Pred __g_pred) -> optional { - auto __res = std::__any_of(__policy, __g_first, __g_last, __g_pred); - if (!__res) - return nullopt; - return !*__res; - }, - std::move(__first), - std::move(__last), - std::move(__pred)); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI bool -none_of(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Pred __pred) { - _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator); - auto __res = std::__none_of(__policy, std::move(__first), std::move(__last), std::move(__pred)); - if (!__res) - std::__throw_bad_alloc(); - return *std::move(__res); -} - -_LIBCPP_END_NAMESPACE_STD - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_POP_MACROS - -#endif // _LIBCPP___ALGORITHM_PSTL_ANY_ALL_NONE_OF_H diff --git a/lib/libcxx/include/__algorithm/pstl_backend.h b/lib/libcxx/include/__algorithm/pstl_backend.h deleted file mode 100644 index 3af03ce2fbc8..000000000000 --- a/lib/libcxx/include/__algorithm/pstl_backend.h +++ /dev/null @@ -1,232 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_BACKEND_H -#define _LIBCPP___ALGORITHM_PSTL_BACKEND_H - -#include <__algorithm/pstl_backends/cpu_backend.h> -#include <__config> -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_BEGIN_NAMESPACE_STD - -/* -TODO: Documentation of how backends work - -A PSTL parallel backend is a tag type to which the following functions are associated, at minimum: - - template - optional<__empty> __pstl_for_each(_Backend, _ExecutionPolicy&&, _Iterator __first, _Iterator __last, _Func __f); - - template - optional<_Iterator> __pstl_find_if(_Backend, _Iterator __first, _Iterator __last, _Predicate __pred); - - template - optional<__empty> - __pstl_stable_sort(_Backend, _RandomAccessIterator __first, _RandomAccessIterator __last, _Comp __comp); - - template - optional<_ForwardOutIterator> __pstl_merge(_Backend, - _ForwardIterator1 __first1, - _ForwardIterator1 __last1, - _ForwardIterator2 __first2, - _ForwardIterator2 __last2, - _ForwardOutIterator __result, - _Comp __comp); - - template - optional<_OutIterator> - __pstl_transform(_Backend, _InIterator __first, _InIterator __last, _OutIterator __result, _UnaryOperation __op); - - template - optional<_OutIterator> __pstl_transform(_InIterator1 __first1, - _InIterator2 __first2, - _InIterator1 __last1, - _OutIterator __result, - _BinaryOperation __op); - - template - optional<_Tp> __pstl_transform_reduce(_Backend, - _Iterator1 __first1, - _Iterator1 __last1, - _Iterator2 __first2, - _Iterator2 __last2, - _Tp __init, - _BinaryOperation1 __reduce, - _BinaryOperation2 __transform); - - template - optional<_Tp> __pstl_transform_reduce(_Backend, - _Iterator __first, - _Iterator __last, - _Tp __init, - _BinaryOperation __reduce, - _UnaryOperation __transform); - -// TODO: Complete this list - -The following functions are optional but can be provided. If provided, they are used by the corresponding -algorithms, otherwise they are implemented in terms of other algorithms. If none of the optional algorithms are -implemented, all the algorithms will eventually forward to the basis algorithms listed above: - - template - optional<__empty> __pstl_for_each_n(_Backend, _Iterator __first, _Size __n, _Func __f); - - template - optional __pstl_any_of(_Backend, _Iterator __first, _iterator __last, _Predicate __pred); - - template - optional __pstl_all_of(_Backend, _Iterator __first, _iterator __last, _Predicate __pred); - - template - optional __pstl_none_of(_Backend, _Iterator __first, _iterator __last, _Predicate __pred); - - template - optional<_Iterator> __pstl_find(_Backend, _Iterator __first, _Iterator __last, const _Tp& __value); - - template - optional<_Iterator> __pstl_find_if_not(_Backend, _Iterator __first, _Iterator __last, _Predicate __pred); - - template - optional<__empty> __pstl_fill(_Backend, _Iterator __first, _Iterator __last, const _Tp& __value); - - template - optional<__empty> __pstl_fill_n(_Backend, _Iterator __first, _SizeT __n, const _Tp& __value); - - template - optional<__empty> __pstl_generate(_Backend, _Iterator __first, _Iterator __last, _Generator __gen); - - template - optional<__empty> __pstl_is_partitioned(_Backend, _Iterator __first, _Iterator __last, _Predicate __pred); - - template - optional<__empty> __pstl_generator_n(_Backend, _Iterator __first, _Size __n, _Generator __gen); - - template - optional<_OutIterator> __pstl_merge(_Backend, - _Iterator1 __first1, - _Iterator1 __last1, - _Iterator2 __first2, - _Iterator2 __last2, - _OutIterator __result, - _Comp __comp); - - template - optional<_OutIterator> __pstl_move(_Backend, _Iterator __first, _Iterator __last, _OutIterator __result); - - template - optional<_Tp> __pstl_reduce(_Backend, _Iterator __first, _Iterator __last, _Tp __init, _BinaryOperation __op); - - temlate - optional<__iter_value_type<_Iterator>> __pstl_reduce(_Backend, _Iterator __first, _Iterator __last); - - template - optional<__iter_diff_t<_Iterator>> __pstl_count(_Backend, _Iterator __first, _Iterator __last, const _Tp& __value); - - template - optional<__iter_diff_t<_Iterator>> __pstl_count_if(_Backend, _Iterator __first, _Iterator __last, _Predicate __pred); - - template - optional<__empty> - __pstl_replace(_Backend, _Iterator __first, _Iterator __last, const _Tp& __old_value, const _Tp& __new_value); - - template - optional<__empty> - __pstl_replace_if(_Backend, _Iterator __first, _Iterator __last, _Pred __pred, const _Tp& __new_value); - - template - optional<__empty> __pstl_replace_copy(_Backend, - _Iterator __first, - _Iterator __last, - _OutIterator __result, - const _Tp& __old_value, - const _Tp& __new_value); - - template - optional<__empty> __pstl_replace_copy_if(_Backend, - _Iterator __first, - _Iterator __last, - _OutIterator __result, - _Pred __pred, - const _Tp& __new_value); - - template - optional<_Iterator> __pstl_rotate_copy( - _Backend, _Iterator __first, _Iterator __middle, _Iterator __last, _OutIterator __result); - - template - optional<__empty> __pstl_sort(_Backend, _Iterator __first, _Iterator __last, _Comp __comp); - - template - optional __pstl_equal(_Backend, _Iterator1 first1, _Iterator1 last1, _Iterator2 first2, _Comp __comp); - -// TODO: Complete this list - -Exception handling -================== - -PSTL backends are expected to report errors (i.e. failure to allocate) by returning a disengaged `optional` from their -implementation. Exceptions shouldn't be used to report an internal failure-to-allocate, since all exceptions are turned -into a program termination at the front-end level. When a backend returns a disengaged `optional` to the frontend, the -frontend will turn that into a call to `std::__throw_bad_alloc();` to report the internal failure to the user. -*/ - -template -struct __select_backend; - -template <> -struct __select_backend { - using type = __cpu_backend_tag; -}; - -# if _LIBCPP_STD_VER >= 20 -template <> -struct __select_backend { - using type = __cpu_backend_tag; -}; -# endif - -# if defined(_LIBCPP_PSTL_CPU_BACKEND_SERIAL) || defined(_LIBCPP_PSTL_CPU_BACKEND_THREAD) || \ - defined(_LIBCPP_PSTL_CPU_BACKEND_LIBDISPATCH) -template <> -struct __select_backend { - using type = __cpu_backend_tag; -}; - -template <> -struct __select_backend { - using type = __cpu_backend_tag; -}; - -# else - -// ...New vendors can add parallel backends here... - -# error "Invalid choice of a PSTL parallel backend" -# endif - -_LIBCPP_END_NAMESPACE_STD - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -#endif // _LIBCPP___ALGORITHM_PSTL_BACKEND_H diff --git a/lib/libcxx/include/__algorithm/pstl_backends/cpu_backend.h b/lib/libcxx/include/__algorithm/pstl_backends/cpu_backend.h deleted file mode 100644 index 6980ded189ea..000000000000 --- a/lib/libcxx/include/__algorithm/pstl_backends/cpu_backend.h +++ /dev/null @@ -1,68 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKEND_H -#define _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKEND_H - -#include <__config> - -/* - - // _Functor takes a subrange for [__first, __last) that should be executed in serial - template - optional<__empty> __parallel_for(_RandomAccessIterator __first, _RandomAccessIterator __last, _Functor __func); - - template - optional<_Tp> - __parallel_transform_reduce(_Iterator __first, _Iterator __last, _UnaryOp, _Tp __init, _BinaryOp, _Reduction); - - // Cancel the execution of other jobs - they aren't needed anymore - void __cancel_execution(); - - template - optional __parallel_merge( - _RandomAccessIterator1 __first1, - _RandomAccessIterator1 __last1, - _RandomAccessIterator2 __first2, - _RandomAccessIterator2 __last2, - _RandomAccessIterator3 __outit, - _Compare __comp, - _LeafMerge __leaf_merge); - - template - void __parallel_stable_sort(_RandomAccessIterator __first, - _RandomAccessIterator __last, - _Comp __comp, - _LeafSort __leaf_sort); - - TODO: Document the parallel backend - -Exception handling -================== - -CPU backends are expected to report errors (i.e. failure to allocate) by returning a disengaged `optional` from their -implementation. Exceptions shouldn't be used to report an internal failure-to-allocate, since all exceptions are turned -into a program termination at the front-end level. When a backend returns a disengaged `optional` to the frontend, the -frontend will turn that into a call to `std::__throw_bad_alloc();` to report the internal failure to the user. -*/ - -#include <__algorithm/pstl_backends/cpu_backends/any_of.h> -#include <__algorithm/pstl_backends/cpu_backends/backend.h> -#include <__algorithm/pstl_backends/cpu_backends/fill.h> -#include <__algorithm/pstl_backends/cpu_backends/find_if.h> -#include <__algorithm/pstl_backends/cpu_backends/for_each.h> -#include <__algorithm/pstl_backends/cpu_backends/merge.h> -#include <__algorithm/pstl_backends/cpu_backends/stable_sort.h> -#include <__algorithm/pstl_backends/cpu_backends/transform.h> -#include <__algorithm/pstl_backends/cpu_backends/transform_reduce.h> - -#endif // _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKEND_H diff --git a/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/backend.h b/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/backend.h deleted file mode 100644 index ea2210a4a7ad..000000000000 --- a/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/backend.h +++ /dev/null @@ -1,41 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKEND_BACKEND_H -#define _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKEND_BACKEND_H - -#include <__config> -#include - -#if defined(_LIBCPP_PSTL_CPU_BACKEND_SERIAL) -# include <__algorithm/pstl_backends/cpu_backends/serial.h> -#elif defined(_LIBCPP_PSTL_CPU_BACKEND_THREAD) -# include <__algorithm/pstl_backends/cpu_backends/thread.h> -#elif defined(_LIBCPP_PSTL_CPU_BACKEND_LIBDISPATCH) -# include <__algorithm/pstl_backends/cpu_backends/libdispatch.h> -#else -# error "Invalid CPU backend choice" -#endif - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -#if _LIBCPP_STD_VER >= 17 - -_LIBCPP_BEGIN_NAMESPACE_STD - -struct __cpu_backend_tag {}; - -inline constexpr size_t __lane_size = 64; - -_LIBCPP_END_NAMESPACE_STD - -#endif // _LIBCPP_STD_VER >= 17 - -#endif // _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKEND_BACKEND_H diff --git a/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/fill.h b/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/fill.h deleted file mode 100644 index 64babe9fd2bd..000000000000 --- a/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/fill.h +++ /dev/null @@ -1,62 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKENDS_FILL_H -#define _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKENDS_FILL_H - -#include <__algorithm/fill.h> -#include <__algorithm/pstl_backends/cpu_backends/backend.h> -#include <__config> -#include <__iterator/concepts.h> -#include <__type_traits/is_execution_policy.h> -#include <__utility/empty.h> -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_BEGIN_NAMESPACE_STD - -template -_LIBCPP_HIDE_FROM_ABI _Index __simd_fill_n(_Index __first, _DifferenceType __n, const _Tp& __value) noexcept { - _PSTL_USE_NONTEMPORAL_STORES_IF_ALLOWED - _PSTL_PRAGMA_SIMD - for (_DifferenceType __i = 0; __i < __n; ++__i) - __first[__i] = __value; - return __first + __n; -} - -template -_LIBCPP_HIDE_FROM_ABI optional<__empty> -__pstl_fill(__cpu_backend_tag, _ForwardIterator __first, _ForwardIterator __last, const _Tp& __value) { - if constexpr (__is_parallel_execution_policy_v<_ExecutionPolicy> && - __has_random_access_iterator_category_or_concept<_ForwardIterator>::value) { - return __par_backend::__parallel_for( - __first, __last, [&__value](_ForwardIterator __brick_first, _ForwardIterator __brick_last) { - [[maybe_unused]] auto __res = std::__pstl_fill<__remove_parallel_policy_t<_ExecutionPolicy>>( - __cpu_backend_tag{}, __brick_first, __brick_last, __value); - _LIBCPP_ASSERT_INTERNAL(__res, "unseq/seq should never try to allocate!"); - }); - } else if constexpr (__is_unsequenced_execution_policy_v<_ExecutionPolicy> && - __has_random_access_iterator_category_or_concept<_ForwardIterator>::value) { - std::__simd_fill_n(__first, __last - __first, __value); - return __empty{}; - } else { - std::fill(__first, __last, __value); - return __empty{}; - } -} - -_LIBCPP_END_NAMESPACE_STD - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -#endif // _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKENDS_FILL_H diff --git a/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/for_each.h b/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/for_each.h deleted file mode 100644 index 81fd4526b8db..000000000000 --- a/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/for_each.h +++ /dev/null @@ -1,62 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKNEDS_FOR_EACH_H -#define _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKNEDS_FOR_EACH_H - -#include <__algorithm/for_each.h> -#include <__algorithm/pstl_backends/cpu_backends/backend.h> -#include <__config> -#include <__iterator/concepts.h> -#include <__type_traits/is_execution_policy.h> -#include <__utility/empty.h> -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_BEGIN_NAMESPACE_STD - -template -_LIBCPP_HIDE_FROM_ABI _Iterator __simd_walk(_Iterator __first, _DifferenceType __n, _Function __f) noexcept { - _PSTL_PRAGMA_SIMD - for (_DifferenceType __i = 0; __i < __n; ++__i) - __f(__first[__i]); - - return __first + __n; -} - -template -_LIBCPP_HIDE_FROM_ABI optional<__empty> -__pstl_for_each(__cpu_backend_tag, _ForwardIterator __first, _ForwardIterator __last, _Functor __func) { - if constexpr (__is_parallel_execution_policy_v<_ExecutionPolicy> && - __has_random_access_iterator_category_or_concept<_ForwardIterator>::value) { - return std::__par_backend::__parallel_for( - __first, __last, [__func](_ForwardIterator __brick_first, _ForwardIterator __brick_last) { - [[maybe_unused]] auto __res = std::__pstl_for_each<__remove_parallel_policy_t<_ExecutionPolicy>>( - __cpu_backend_tag{}, __brick_first, __brick_last, __func); - _LIBCPP_ASSERT_INTERNAL(__res, "unseq/seq should never try to allocate!"); - }); - } else if constexpr (__is_unsequenced_execution_policy_v<_ExecutionPolicy> && - __has_random_access_iterator_category_or_concept<_ForwardIterator>::value) { - std::__simd_walk(__first, __last - __first, __func); - return __empty{}; - } else { - std::for_each(__first, __last, __func); - return __empty{}; - } -} - -_LIBCPP_END_NAMESPACE_STD - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -#endif // _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKNEDS_FOR_EACH_H diff --git a/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/libdispatch.h b/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/libdispatch.h deleted file mode 100644 index e885e7f22517..000000000000 --- a/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/libdispatch.h +++ /dev/null @@ -1,347 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKENDS_LIBDISPATCH_H -#define _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKENDS_LIBDISPATCH_H - -#include <__algorithm/inplace_merge.h> -#include <__algorithm/lower_bound.h> -#include <__algorithm/max.h> -#include <__algorithm/merge.h> -#include <__algorithm/upper_bound.h> -#include <__atomic/atomic.h> -#include <__config> -#include <__exception/terminate.h> -#include <__iterator/iterator_traits.h> -#include <__iterator/move_iterator.h> -#include <__memory/allocator.h> -#include <__memory/construct_at.h> -#include <__memory/unique_ptr.h> -#include <__numeric/reduce.h> -#include <__utility/empty.h> -#include <__utility/exception_guard.h> -#include <__utility/move.h> -#include <__utility/pair.h> -#include -#include -#include - -_LIBCPP_PUSH_MACROS -#include <__undef_macros> - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_BEGIN_NAMESPACE_STD - -namespace __par_backend { -inline namespace __libdispatch { - -// ::dispatch_apply is marked as __attribute__((nothrow)) because it doesn't let exceptions propagate, and neither do -// we. -// TODO: Do we want to add [[_Clang::__callback__(__func, __context, __)]]? -_LIBCPP_EXPORTED_FROM_ABI void -__dispatch_apply(size_t __chunk_count, void* __context, void (*__func)(void* __context, size_t __chunk)) noexcept; - -template -_LIBCPP_HIDE_FROM_ABI void __dispatch_apply(size_t __chunk_count, _Func __func) noexcept { - __libdispatch::__dispatch_apply(__chunk_count, &__func, [](void* __context, size_t __chunk) { - (*static_cast<_Func*>(__context))(__chunk); - }); -} - -struct __chunk_partitions { - ptrdiff_t __chunk_count_; // includes the first chunk - ptrdiff_t __chunk_size_; - ptrdiff_t __first_chunk_size_; -}; - -[[__gnu__::__const__]] _LIBCPP_EXPORTED_FROM_ABI __chunk_partitions __partition_chunks(ptrdiff_t __size) noexcept; - -template -_LIBCPP_HIDE_FROM_ABI optional<__empty> -__dispatch_parallel_for(__chunk_partitions __partitions, _RandomAccessIterator __first, _Functor __func) { - // Perform the chunked execution. - __libdispatch::__dispatch_apply(__partitions.__chunk_count_, [&](size_t __chunk) { - auto __this_chunk_size = __chunk == 0 ? __partitions.__first_chunk_size_ : __partitions.__chunk_size_; - auto __index = - __chunk == 0 - ? 0 - : (__chunk * __partitions.__chunk_size_) + (__partitions.__first_chunk_size_ - __partitions.__chunk_size_); - __func(__first + __index, __first + __index + __this_chunk_size); - }); - - return __empty{}; -} - -template -_LIBCPP_HIDE_FROM_ABI optional<__empty> -__parallel_for(_RandomAccessIterator __first, _RandomAccessIterator __last, _Functor __func) { - return __libdispatch::__dispatch_parallel_for( - __libdispatch::__partition_chunks(__last - __first), std::move(__first), std::move(__func)); -} - -template -struct __merge_range { - __merge_range(_RandomAccessIterator1 __mid1, _RandomAccessIterator2 __mid2, _RandomAccessIteratorOut __result) - : __mid1_(__mid1), __mid2_(__mid2), __result_(__result) {} - - _RandomAccessIterator1 __mid1_; - _RandomAccessIterator2 __mid2_; - _RandomAccessIteratorOut __result_; -}; - -template -_LIBCPP_HIDE_FROM_ABI optional<__empty> __parallel_merge( - _RandomAccessIterator1 __first1, - _RandomAccessIterator1 __last1, - _RandomAccessIterator2 __first2, - _RandomAccessIterator2 __last2, - _RandomAccessIterator3 __result, - _Compare __comp, - _LeafMerge __leaf_merge) noexcept { - __chunk_partitions __partitions = - __libdispatch::__partition_chunks(std::max(__last1 - __first1, __last2 - __first2)); - - if (__partitions.__chunk_count_ == 0) - return __empty{}; - - if (__partitions.__chunk_count_ == 1) { - __leaf_merge(__first1, __last1, __first2, __last2, __result, __comp); - return __empty{}; - } - - using __merge_range_t = __merge_range<_RandomAccessIterator1, _RandomAccessIterator2, _RandomAccessIterator3>; - auto const __n_ranges = __partitions.__chunk_count_ + 1; - - // TODO: use __uninitialized_buffer - auto __destroy = [=](__merge_range_t* __ptr) { - std::destroy_n(__ptr, __n_ranges); - std::allocator<__merge_range_t>().deallocate(__ptr, __n_ranges); - }; - - unique_ptr<__merge_range_t[], decltype(__destroy)> __ranges( - [&]() -> __merge_range_t* { -# ifndef _LIBCPP_HAS_NO_EXCEPTIONS - try { -# endif - return std::allocator<__merge_range_t>().allocate(__n_ranges); -# ifndef _LIBCPP_HAS_NO_EXCEPTIONS - } catch (const std::bad_alloc&) { - return nullptr; - } -# endif - }(), - __destroy); - - if (!__ranges) - return nullopt; - - // TODO: Improve the case where the smaller range is merged into just a few (or even one) chunks of the larger case - __merge_range_t* __r = __ranges.get(); - std::__construct_at(__r++, __first1, __first2, __result); - - bool __iterate_first_range = __last1 - __first1 > __last2 - __first2; - - auto __compute_chunk = [&](size_t __chunk_size) -> __merge_range_t { - auto [__mid1, __mid2] = [&] { - if (__iterate_first_range) { - auto __m1 = __first1 + __chunk_size; - auto __m2 = std::lower_bound(__first2, __last2, __m1[-1], __comp); - return std::make_pair(__m1, __m2); - } else { - auto __m2 = __first2 + __chunk_size; - auto __m1 = std::lower_bound(__first1, __last1, __m2[-1], __comp); - return std::make_pair(__m1, __m2); - } - }(); - - __result += (__mid1 - __first1) + (__mid2 - __first2); - __first1 = __mid1; - __first2 = __mid2; - return {std::move(__mid1), std::move(__mid2), __result}; - }; - - // handle first chunk - std::__construct_at(__r++, __compute_chunk(__partitions.__first_chunk_size_)); - - // handle 2 -> N - 1 chunks - for (ptrdiff_t __i = 0; __i != __partitions.__chunk_count_ - 2; ++__i) - std::__construct_at(__r++, __compute_chunk(__partitions.__chunk_size_)); - - // handle last chunk - std::__construct_at(__r, __last1, __last2, __result); - - __libdispatch::__dispatch_apply(__partitions.__chunk_count_, [&](size_t __index) { - auto __first_iters = __ranges[__index]; - auto __last_iters = __ranges[__index + 1]; - __leaf_merge( - __first_iters.__mid1_, - __last_iters.__mid1_, - __first_iters.__mid2_, - __last_iters.__mid2_, - __first_iters.__result_, - __comp); - }); - - return __empty{}; -} - -template -_LIBCPP_HIDE_FROM_ABI optional<_Value> __parallel_transform_reduce( - _RandomAccessIterator __first, - _RandomAccessIterator __last, - _Transform __transform, - _Value __init, - _Combiner __combiner, - _Reduction __reduction) { - if (__first == __last) - return __init; - - auto __partitions = __libdispatch::__partition_chunks(__last - __first); - - auto __destroy = [__count = __partitions.__chunk_count_](_Value* __ptr) { - std::destroy_n(__ptr, __count); - std::allocator<_Value>().deallocate(__ptr, __count); - }; - - // TODO: use __uninitialized_buffer - // TODO: allocate one element per worker instead of one element per chunk - unique_ptr<_Value[], decltype(__destroy)> __values( - std::allocator<_Value>().allocate(__partitions.__chunk_count_), __destroy); - - // __dispatch_apply is noexcept - __libdispatch::__dispatch_apply(__partitions.__chunk_count_, [&](size_t __chunk) { - auto __this_chunk_size = __chunk == 0 ? __partitions.__first_chunk_size_ : __partitions.__chunk_size_; - auto __index = - __chunk == 0 - ? 0 - : (__chunk * __partitions.__chunk_size_) + (__partitions.__first_chunk_size_ - __partitions.__chunk_size_); - if (__this_chunk_size != 1) { - std::__construct_at( - __values.get() + __chunk, - __reduction(__first + __index + 2, - __first + __index + __this_chunk_size, - __combiner(__transform(__first + __index), __transform(__first + __index + 1)))); - } else { - std::__construct_at(__values.get() + __chunk, __transform(__first + __index)); - } - }); - - return std::reduce( - std::make_move_iterator(__values.get()), - std::make_move_iterator(__values.get() + __partitions.__chunk_count_), - std::move(__init), - __combiner); -} - -template -_LIBCPP_HIDE_FROM_ABI optional<__empty> __parallel_stable_sort( - _RandomAccessIterator __first, _RandomAccessIterator __last, _Comp __comp, _LeafSort __leaf_sort) { - const auto __size = __last - __first; - auto __partitions = __libdispatch::__partition_chunks(__size); - - if (__partitions.__chunk_count_ == 0) - return __empty{}; - - if (__partitions.__chunk_count_ == 1) { - __leaf_sort(__first, __last, __comp); - return __empty{}; - } - - using _Value = __iter_value_type<_RandomAccessIterator>; - - auto __destroy = [__size](_Value* __ptr) { - std::destroy_n(__ptr, __size); - std::allocator<_Value>().deallocate(__ptr, __size); - }; - - // TODO: use __uninitialized_buffer - unique_ptr<_Value[], decltype(__destroy)> __values(std::allocator<_Value>().allocate(__size), __destroy); - - // Initialize all elements to a moved-from state - // TODO: Don't do this - this can be done in the first merge - see https://llvm.org/PR63928 - std::__construct_at(__values.get(), std::move(*__first)); - for (__iter_diff_t<_RandomAccessIterator> __i = 1; __i != __size; ++__i) { - std::__construct_at(__values.get() + __i, std::move(__values.get()[__i - 1])); - } - *__first = std::move(__values.get()[__size - 1]); - - __libdispatch::__dispatch_parallel_for( - __partitions, - __first, - [&__leaf_sort, &__comp](_RandomAccessIterator __chunk_first, _RandomAccessIterator __chunk_last) { - __leaf_sort(std::move(__chunk_first), std::move(__chunk_last), __comp); - }); - - bool __objects_are_in_buffer = false; - do { - const auto __old_chunk_size = __partitions.__chunk_size_; - if (__partitions.__chunk_count_ % 2 == 1) { - auto __inplace_merge_chunks = [&__comp, &__partitions](auto __first_chunk_begin) { - std::inplace_merge( - __first_chunk_begin, - __first_chunk_begin + __partitions.__first_chunk_size_, - __first_chunk_begin + __partitions.__first_chunk_size_ + __partitions.__chunk_size_, - __comp); - }; - if (__objects_are_in_buffer) - __inplace_merge_chunks(__values.get()); - else - __inplace_merge_chunks(__first); - __partitions.__first_chunk_size_ += 2 * __partitions.__chunk_size_; - } else { - __partitions.__first_chunk_size_ += __partitions.__chunk_size_; - } - - __partitions.__chunk_size_ *= 2; - __partitions.__chunk_count_ /= 2; - - auto __merge_chunks = [__partitions, __old_chunk_size, &__comp](auto __from_first, auto __to_first) { - __libdispatch::__dispatch_parallel_for( - __partitions, - __from_first, - [__old_chunk_size, &__from_first, &__to_first, &__comp](auto __chunk_first, auto __chunk_last) { - std::merge(std::make_move_iterator(__chunk_first), - std::make_move_iterator(__chunk_last - __old_chunk_size), - std::make_move_iterator(__chunk_last - __old_chunk_size), - std::make_move_iterator(__chunk_last), - __to_first + (__chunk_first - __from_first), - __comp); - }); - }; - - if (__objects_are_in_buffer) - __merge_chunks(__values.get(), __first); - else - __merge_chunks(__first, __values.get()); - __objects_are_in_buffer = !__objects_are_in_buffer; - } while (__partitions.__chunk_count_ > 1); - - if (__objects_are_in_buffer) { - std::move(__values.get(), __values.get() + __size, __first); - } - - return __empty{}; -} - -_LIBCPP_HIDE_FROM_ABI inline void __cancel_execution() {} - -} // namespace __libdispatch -} // namespace __par_backend - -_LIBCPP_END_NAMESPACE_STD - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_POP_MACROS - -#endif // _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKENDS_LIBDISPATCH_H diff --git a/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/merge.h b/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/merge.h deleted file mode 100644 index b0db70f58b2e..000000000000 --- a/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/merge.h +++ /dev/null @@ -1,85 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKENDS_MERGE_H -#define _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKENDS_MERGE_H - -#include <__algorithm/merge.h> -#include <__algorithm/pstl_backends/cpu_backends/backend.h> -#include <__config> -#include <__iterator/concepts.h> -#include <__type_traits/is_execution_policy.h> -#include <__utility/move.h> -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_PUSH_MACROS -# include <__undef_macros> - -_LIBCPP_BEGIN_NAMESPACE_STD - -template -_LIBCPP_HIDE_FROM_ABI optional<_ForwardOutIterator> __pstl_merge( - __cpu_backend_tag, - _ForwardIterator1 __first1, - _ForwardIterator1 __last1, - _ForwardIterator2 __first2, - _ForwardIterator2 __last2, - _ForwardOutIterator __result, - _Comp __comp) { - if constexpr (__is_parallel_execution_policy_v<_ExecutionPolicy> && - __has_random_access_iterator_category_or_concept<_ForwardIterator1>::value && - __has_random_access_iterator_category_or_concept<_ForwardIterator2>::value && - __has_random_access_iterator_category_or_concept<_ForwardOutIterator>::value) { - auto __res = __par_backend::__parallel_merge( - __first1, - __last1, - __first2, - __last2, - __result, - __comp, - [](_ForwardIterator1 __g_first1, - _ForwardIterator1 __g_last1, - _ForwardIterator2 __g_first2, - _ForwardIterator2 __g_last2, - _ForwardOutIterator __g_result, - _Comp __g_comp) { - [[maybe_unused]] auto __g_res = std::__pstl_merge<__remove_parallel_policy_t<_ExecutionPolicy>>( - __cpu_backend_tag{}, - std::move(__g_first1), - std::move(__g_last1), - std::move(__g_first2), - std::move(__g_last2), - std::move(__g_result), - std::move(__g_comp)); - _LIBCPP_ASSERT_INTERNAL(__g_res, "unsed/sed should never try to allocate!"); - }); - if (!__res) - return nullopt; - return __result + (__last1 - __first1) + (__last2 - __first2); - } else { - return std::merge(__first1, __last1, __first2, __last2, __result, __comp); - } -} - -_LIBCPP_END_NAMESPACE_STD - -_LIBCPP_POP_MACROS - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -#endif // _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKENDS_MERGE_H diff --git a/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/serial.h b/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/serial.h deleted file mode 100644 index afcc7ffb2661..000000000000 --- a/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/serial.h +++ /dev/null @@ -1,83 +0,0 @@ -// -*- C++ -*- -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKENDS_SERIAL_H -#define _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKENDS_SERIAL_H - -#include <__config> -#include <__utility/empty.h> -#include <__utility/move.h> -#include -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_PUSH_MACROS -# include <__undef_macros> - -_LIBCPP_BEGIN_NAMESPACE_STD - -namespace __par_backend { -inline namespace __serial_cpu_backend { - -template -_LIBCPP_HIDE_FROM_ABI optional<__empty> -__parallel_for(_RandomAccessIterator __first, _RandomAccessIterator __last, _Fp __f) { - __f(__first, __last); - return __empty{}; -} - -template -_LIBCPP_HIDE_FROM_ABI optional<_Tp> -__parallel_transform_reduce(_Index __first, _Index __last, _UnaryOp, _Tp __init, _BinaryOp, _Reduce __reduce) { - return __reduce(std::move(__first), std::move(__last), std::move(__init)); -} - -template -_LIBCPP_HIDE_FROM_ABI optional<__empty> __parallel_stable_sort( - _RandomAccessIterator __first, _RandomAccessIterator __last, _Compare __comp, _LeafSort __leaf_sort) { - __leaf_sort(__first, __last, __comp); - return __empty{}; -} - -_LIBCPP_HIDE_FROM_ABI inline void __cancel_execution() {} - -template -_LIBCPP_HIDE_FROM_ABI optional<__empty> __parallel_merge( - _RandomAccessIterator1 __first1, - _RandomAccessIterator1 __last1, - _RandomAccessIterator2 __first2, - _RandomAccessIterator2 __last2, - _RandomAccessIterator3 __outit, - _Compare __comp, - _LeafMerge __leaf_merge) { - __leaf_merge(__first1, __last1, __first2, __last2, __outit, __comp); - return __empty{}; -} - -// TODO: Complete this list - -} // namespace __serial_cpu_backend -} // namespace __par_backend - -_LIBCPP_END_NAMESPACE_STD - -_LIBCPP_POP_MACROS - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && && _LIBCPP_STD_VER >= 17 - -#endif // _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKENDS_SERIAL_H diff --git a/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/stable_sort.h b/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/stable_sort.h deleted file mode 100644 index 34c423586c4b..000000000000 --- a/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/stable_sort.h +++ /dev/null @@ -1,45 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKENDS_STABLE_SORT_H -#define _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKENDS_STABLE_SORT_H - -#include <__algorithm/pstl_backends/cpu_backends/backend.h> -#include <__algorithm/stable_sort.h> -#include <__config> -#include <__type_traits/is_execution_policy.h> -#include <__utility/empty.h> -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_BEGIN_NAMESPACE_STD - -template -_LIBCPP_HIDE_FROM_ABI optional<__empty> -__pstl_stable_sort(__cpu_backend_tag, _RandomAccessIterator __first, _RandomAccessIterator __last, _Comp __comp) { - if constexpr (__is_parallel_execution_policy_v<_ExecutionPolicy>) { - return __par_backend::__parallel_stable_sort( - __first, __last, __comp, [](_RandomAccessIterator __g_first, _RandomAccessIterator __g_last, _Comp __g_comp) { - std::stable_sort(__g_first, __g_last, __g_comp); - }); - } else { - std::stable_sort(__first, __last, __comp); - return __empty{}; - } -} - -_LIBCPP_END_NAMESPACE_STD - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -#endif // _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKENDS_STABLE_SORT_H diff --git a/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/thread.h b/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/thread.h deleted file mode 100644 index eb11a961b760..000000000000 --- a/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/thread.h +++ /dev/null @@ -1,84 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKENDS_THREAD_H -#define _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKENDS_THREAD_H - -#include <__assert> -#include <__config> -#include <__utility/empty.h> -#include <__utility/move.h> -#include -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -_LIBCPP_PUSH_MACROS -#include <__undef_macros> - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -// This backend implementation is for testing purposes only and not meant for production use. This will be replaced -// by a proper implementation once the PSTL implementation is somewhat stable. - -_LIBCPP_BEGIN_NAMESPACE_STD - -namespace __par_backend { -inline namespace __thread_cpu_backend { - -template -_LIBCPP_HIDE_FROM_ABI optional<__empty> -__parallel_for(_RandomAccessIterator __first, _RandomAccessIterator __last, _Fp __f) { - __f(__first, __last); - return __empty{}; -} - -template -_LIBCPP_HIDE_FROM_ABI optional<_Tp> -__parallel_transform_reduce(_Index __first, _Index __last, _UnaryOp, _Tp __init, _BinaryOp, _Reduce __reduce) { - return __reduce(std::move(__first), std::move(__last), std::move(__init)); -} - -template -_LIBCPP_HIDE_FROM_ABI optional<__empty> __parallel_stable_sort( - _RandomAccessIterator __first, _RandomAccessIterator __last, _Compare __comp, _LeafSort __leaf_sort) { - __leaf_sort(__first, __last, __comp); - return __empty{}; -} - -_LIBCPP_HIDE_FROM_ABI inline void __cancel_execution() {} - -template -_LIBCPP_HIDE_FROM_ABI optional<__empty> __parallel_merge( - _RandomAccessIterator1 __first1, - _RandomAccessIterator1 __last1, - _RandomAccessIterator2 __first2, - _RandomAccessIterator2 __last2, - _RandomAccessIterator3 __outit, - _Compare __comp, - _LeafMerge __leaf_merge) { - __leaf_merge(__first1, __last1, __first2, __last2, __outit, __comp); - return __empty{}; -} - -} // namespace __thread_cpu_backend -} // namespace __par_backend - -_LIBCPP_END_NAMESPACE_STD - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && && _LIBCPP_STD_VER >= 17 - -_LIBCPP_POP_MACROS - -#endif // _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKENDS_THREAD_H diff --git a/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/transform.h b/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/transform.h deleted file mode 100644 index fdf1a2e78dad..000000000000 --- a/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/transform.h +++ /dev/null @@ -1,138 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKENDS_TRANSFORM_H -#define _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKENDS_TRANSFORM_H - -#include <__algorithm/pstl_backends/cpu_backends/backend.h> -#include <__algorithm/transform.h> -#include <__config> -#include <__iterator/concepts.h> -#include <__iterator/iterator_traits.h> -#include <__type_traits/enable_if.h> -#include <__type_traits/is_execution_policy.h> -#include <__type_traits/remove_cvref.h> -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_PUSH_MACROS -# include <__undef_macros> - -_LIBCPP_BEGIN_NAMESPACE_STD - -template -_LIBCPP_HIDE_FROM_ABI _Iterator2 -__simd_walk(_Iterator1 __first1, _DifferenceType __n, _Iterator2 __first2, _Function __f) noexcept { - _PSTL_PRAGMA_SIMD - for (_DifferenceType __i = 0; __i < __n; ++__i) - __f(__first1[__i], __first2[__i]); - return __first2 + __n; -} - -template -_LIBCPP_HIDE_FROM_ABI optional<_ForwardOutIterator> __pstl_transform( - __cpu_backend_tag, - _ForwardIterator __first, - _ForwardIterator __last, - _ForwardOutIterator __result, - _UnaryOperation __op) { - if constexpr (__is_parallel_execution_policy_v<_ExecutionPolicy> && - __has_random_access_iterator_category_or_concept<_ForwardIterator>::value && - __has_random_access_iterator_category_or_concept<_ForwardOutIterator>::value) { - std::__par_backend::__parallel_for( - __first, __last, [__op, __first, __result](_ForwardIterator __brick_first, _ForwardIterator __brick_last) { - auto __res = std::__pstl_transform<__remove_parallel_policy_t<_ExecutionPolicy>>( - __cpu_backend_tag{}, __brick_first, __brick_last, __result + (__brick_first - __first), __op); - _LIBCPP_ASSERT_INTERNAL(__res, "unseq/seq should never try to allocate!"); - return *std::move(__res); - }); - return __result + (__last - __first); - } else if constexpr (__is_unsequenced_execution_policy_v<_ExecutionPolicy> && - __has_random_access_iterator_category_or_concept<_ForwardIterator>::value && - __has_random_access_iterator_category_or_concept<_ForwardOutIterator>::value) { - return std::__simd_walk( - __first, - __last - __first, - __result, - [&](__iter_reference<_ForwardIterator> __in_value, __iter_reference<_ForwardOutIterator> __out_value) { - __out_value = __op(__in_value); - }); - } else { - return std::transform(__first, __last, __result, __op); - } -} - -template -_LIBCPP_HIDE_FROM_ABI _Iterator3 __simd_walk( - _Iterator1 __first1, _DifferenceType __n, _Iterator2 __first2, _Iterator3 __first3, _Function __f) noexcept { - _PSTL_PRAGMA_SIMD - for (_DifferenceType __i = 0; __i < __n; ++__i) - __f(__first1[__i], __first2[__i], __first3[__i]); - return __first3 + __n; -} -template >, int> = 0> -_LIBCPP_HIDE_FROM_ABI optional<_ForwardOutIterator> __pstl_transform( - __cpu_backend_tag, - _ForwardIterator1 __first1, - _ForwardIterator1 __last1, - _ForwardIterator2 __first2, - _ForwardOutIterator __result, - _BinaryOperation __op) { - if constexpr (__is_parallel_execution_policy_v<_ExecutionPolicy> && - __has_random_access_iterator_category_or_concept<_ForwardIterator1>::value && - __has_random_access_iterator_category_or_concept<_ForwardIterator2>::value && - __has_random_access_iterator_category_or_concept<_ForwardOutIterator>::value) { - auto __res = std::__par_backend::__parallel_for( - __first1, - __last1, - [__op, __first1, __first2, __result](_ForwardIterator1 __brick_first, _ForwardIterator1 __brick_last) { - return std::__pstl_transform<__remove_parallel_policy_t<_ExecutionPolicy>>( - __cpu_backend_tag{}, - __brick_first, - __brick_last, - __first2 + (__brick_first - __first1), - __result + (__brick_first - __first1), - __op); - }); - if (!__res) - return nullopt; - return __result + (__last1 - __first1); - } else if constexpr (__is_unsequenced_execution_policy_v<_ExecutionPolicy> && - __has_random_access_iterator_category_or_concept<_ForwardIterator1>::value && - __has_random_access_iterator_category_or_concept<_ForwardIterator2>::value && - __has_random_access_iterator_category_or_concept<_ForwardOutIterator>::value) { - return std::__simd_walk( - __first1, - __last1 - __first1, - __first2, - __result, - [&](__iter_reference<_ForwardIterator1> __in1, - __iter_reference<_ForwardIterator2> __in2, - __iter_reference<_ForwardOutIterator> __out_value) { __out_value = __op(__in1, __in2); }); - } else { - return std::transform(__first1, __last1, __first2, __result, __op); - } -} - -_LIBCPP_END_NAMESPACE_STD - -_LIBCPP_POP_MACROS - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -#endif // _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKENDS_TRANSFORM_H diff --git a/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/transform_reduce.h b/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/transform_reduce.h deleted file mode 100644 index 14a0d76741d4..000000000000 --- a/lib/libcxx/include/__algorithm/pstl_backends/cpu_backends/transform_reduce.h +++ /dev/null @@ -1,202 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKENDS_TRANSFORM_REDUCE_H -#define _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKENDS_TRANSFORM_REDUCE_H - -#include <__algorithm/pstl_backends/cpu_backends/backend.h> -#include <__config> -#include <__iterator/concepts.h> -#include <__iterator/iterator_traits.h> -#include <__numeric/transform_reduce.h> -#include <__type_traits/is_arithmetic.h> -#include <__type_traits/is_execution_policy.h> -#include <__type_traits/operation_traits.h> -#include <__utility/move.h> -#include -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -_LIBCPP_PUSH_MACROS -#include <__undef_macros> - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_BEGIN_NAMESPACE_STD - -template , - __enable_if_t<__desugars_to<__plus_tag, _BinaryOperation, _Tp, _UnaryResult>::value && is_arithmetic_v<_Tp> && - is_arithmetic_v<_UnaryResult>, - int> = 0> -_LIBCPP_HIDE_FROM_ABI _Tp -__simd_transform_reduce(_DifferenceType __n, _Tp __init, _BinaryOperation, _UnaryOperation __f) noexcept { - _PSTL_PRAGMA_SIMD_REDUCTION(+ : __init) - for (_DifferenceType __i = 0; __i < __n; ++__i) - __init += __f(__i); - return __init; -} - -template , - __enable_if_t::value && - is_arithmetic_v<_Tp> && is_arithmetic_v<_UnaryResult>), - int> = 0> -_LIBCPP_HIDE_FROM_ABI _Tp -__simd_transform_reduce(_Size __n, _Tp __init, _BinaryOperation __binary_op, _UnaryOperation __f) noexcept { - const _Size __block_size = __lane_size / sizeof(_Tp); - if (__n > 2 * __block_size && __block_size > 1) { - alignas(__lane_size) char __lane_buffer[__lane_size]; - _Tp* __lane = reinterpret_cast<_Tp*>(__lane_buffer); - - // initializer - _PSTL_PRAGMA_SIMD - for (_Size __i = 0; __i < __block_size; ++__i) { - ::new (__lane + __i) _Tp(__binary_op(__f(__i), __f(__block_size + __i))); - } - // main loop - _Size __i = 2 * __block_size; - const _Size __last_iteration = __block_size * (__n / __block_size); - for (; __i < __last_iteration; __i += __block_size) { - _PSTL_PRAGMA_SIMD - for (_Size __j = 0; __j < __block_size; ++__j) { - __lane[__j] = __binary_op(std::move(__lane[__j]), __f(__i + __j)); - } - } - // remainder - _PSTL_PRAGMA_SIMD - for (_Size __j = 0; __j < __n - __last_iteration; ++__j) { - __lane[__j] = __binary_op(std::move(__lane[__j]), __f(__last_iteration + __j)); - } - // combiner - for (_Size __j = 0; __j < __block_size; ++__j) { - __init = __binary_op(std::move(__init), std::move(__lane[__j])); - } - // destroyer - _PSTL_PRAGMA_SIMD - for (_Size __j = 0; __j < __block_size; ++__j) { - __lane[__j].~_Tp(); - } - } else { - for (_Size __i = 0; __i < __n; ++__i) { - __init = __binary_op(std::move(__init), __f(__i)); - } - } - return __init; -} - -template -_LIBCPP_HIDE_FROM_ABI optional<_Tp> __pstl_transform_reduce( - __cpu_backend_tag, - _ForwardIterator1 __first1, - _ForwardIterator1 __last1, - _ForwardIterator2 __first2, - _Tp __init, - _BinaryOperation1 __reduce, - _BinaryOperation2 __transform) { - if constexpr (__is_parallel_execution_policy_v<_ExecutionPolicy> && - __has_random_access_iterator_category_or_concept<_ForwardIterator1>::value && - __has_random_access_iterator_category_or_concept<_ForwardIterator2>::value) { - return __par_backend::__parallel_transform_reduce( - __first1, - std::move(__last1), - [__first1, __first2, __transform](_ForwardIterator1 __iter) { - return __transform(*__iter, *(__first2 + (__iter - __first1))); - }, - std::move(__init), - std::move(__reduce), - [__first1, __first2, __reduce, __transform]( - _ForwardIterator1 __brick_first, _ForwardIterator1 __brick_last, _Tp __brick_init) { - return *std::__pstl_transform_reduce<__remove_parallel_policy_t<_ExecutionPolicy>>( - __cpu_backend_tag{}, - __brick_first, - std::move(__brick_last), - __first2 + (__brick_first - __first1), - std::move(__brick_init), - std::move(__reduce), - std::move(__transform)); - }); - } else if constexpr (__is_unsequenced_execution_policy_v<_ExecutionPolicy> && - __has_random_access_iterator_category_or_concept<_ForwardIterator1>::value && - __has_random_access_iterator_category_or_concept<_ForwardIterator2>::value) { - return std::__simd_transform_reduce( - __last1 - __first1, std::move(__init), std::move(__reduce), [&](__iter_diff_t<_ForwardIterator1> __i) { - return __transform(__first1[__i], __first2[__i]); - }); - } else { - return std::transform_reduce( - std::move(__first1), - std::move(__last1), - std::move(__first2), - std::move(__init), - std::move(__reduce), - std::move(__transform)); - } -} - -template -_LIBCPP_HIDE_FROM_ABI optional<_Tp> __pstl_transform_reduce( - __cpu_backend_tag, - _ForwardIterator __first, - _ForwardIterator __last, - _Tp __init, - _BinaryOperation __reduce, - _UnaryOperation __transform) { - if constexpr (__is_parallel_execution_policy_v<_ExecutionPolicy> && - __has_random_access_iterator_category_or_concept<_ForwardIterator>::value) { - return __par_backend::__parallel_transform_reduce( - std::move(__first), - std::move(__last), - [__transform](_ForwardIterator __iter) { return __transform(*__iter); }, - std::move(__init), - __reduce, - [__transform, __reduce](auto __brick_first, auto __brick_last, _Tp __brick_init) { - auto __res = std::__pstl_transform_reduce<__remove_parallel_policy_t<_ExecutionPolicy>>( - __cpu_backend_tag{}, - std::move(__brick_first), - std::move(__brick_last), - std::move(__brick_init), - std::move(__reduce), - std::move(__transform)); - _LIBCPP_ASSERT_INTERNAL(__res, "unseq/seq should never try to allocate!"); - return *std::move(__res); - }); - } else if constexpr (__is_unsequenced_execution_policy_v<_ExecutionPolicy> && - __has_random_access_iterator_category_or_concept<_ForwardIterator>::value) { - return std::__simd_transform_reduce( - __last - __first, - std::move(__init), - std::move(__reduce), - [=, &__transform](__iter_diff_t<_ForwardIterator> __i) { return __transform(__first[__i]); }); - } else { - return std::transform_reduce( - std::move(__first), std::move(__last), std::move(__init), std::move(__reduce), std::move(__transform)); - } -} - -_LIBCPP_END_NAMESPACE_STD - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_POP_MACROS - -#endif // _LIBCPP___ALGORITHM_PSTL_BACKENDS_CPU_BACKENDS_TRANSFORM_REDUCE_H diff --git a/lib/libcxx/include/__algorithm/pstl_copy.h b/lib/libcxx/include/__algorithm/pstl_copy.h deleted file mode 100644 index 1069dcec0e11..000000000000 --- a/lib/libcxx/include/__algorithm/pstl_copy.h +++ /dev/null @@ -1,121 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_COPY_H -#define _LIBCPP___ALGORITHM_PSTL_COPY_H - -#include <__algorithm/copy_n.h> -#include <__algorithm/pstl_backend.h> -#include <__algorithm/pstl_frontend_dispatch.h> -#include <__algorithm/pstl_transform.h> -#include <__config> -#include <__functional/identity.h> -#include <__iterator/concepts.h> -#include <__type_traits/enable_if.h> -#include <__type_traits/is_constant_evaluated.h> -#include <__type_traits/is_execution_policy.h> -#include <__type_traits/is_trivially_copyable.h> -#include <__type_traits/remove_cvref.h> -#include <__utility/move.h> -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -_LIBCPP_PUSH_MACROS -#include <__undef_macros> - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_BEGIN_NAMESPACE_STD - -// TODO: Use the std::copy/move shenanigans to forward to std::memmove - -template -void __pstl_copy(); - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<_ForwardOutIterator> -__copy(_ExecutionPolicy&& __policy, - _ForwardIterator&& __first, - _ForwardIterator&& __last, - _ForwardOutIterator&& __result) noexcept { - return std::__pstl_frontend_dispatch( - _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_copy, _RawPolicy), - [&__policy](_ForwardIterator __g_first, _ForwardIterator __g_last, _ForwardOutIterator __g_result) { - return std::__transform(__policy, __g_first, __g_last, __g_result, __identity()); - }, - std::move(__first), - std::move(__last), - std::move(__result)); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI _ForwardOutIterator -copy(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _ForwardOutIterator __result) { - auto __res = std::__copy(__policy, std::move(__first), std::move(__last), std::move(__result)); - if (!__res) - std::__throw_bad_alloc(); - return *std::move(__res); -} - -template -void __pstl_copy_n(); - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<_ForwardOutIterator> __copy_n( - _ExecutionPolicy&& __policy, _ForwardIterator&& __first, _Size&& __n, _ForwardOutIterator&& __result) noexcept { - return std::__pstl_frontend_dispatch( - _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_copy_n, _RawPolicy), - [&__policy]( - _ForwardIterator __g_first, _Size __g_n, _ForwardOutIterator __g_result) -> optional<_ForwardIterator> { - if constexpr (__has_random_access_iterator_category_or_concept<_ForwardIterator>::value) - return std::__copy(__policy, std::move(__g_first), std::move(__g_first + __g_n), std::move(__g_result)); - else - return std::copy_n(__g_first, __g_n, __g_result); - }, - std::move(__first), - std::move(__n), - std::move(__result)); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI _ForwardOutIterator -copy_n(_ExecutionPolicy&& __policy, _ForwardIterator __first, _Size __n, _ForwardOutIterator __result) { - auto __res = std::__copy_n(__policy, std::move(__first), std::move(__n), std::move(__result)); - if (!__res) - std::__throw_bad_alloc(); - return *std::move(__res); -} - -_LIBCPP_END_NAMESPACE_STD - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_POP_MACROS - -#endif // _LIBCPP___ALGORITHM_PSTL_COPY_H diff --git a/lib/libcxx/include/__algorithm/pstl_count.h b/lib/libcxx/include/__algorithm/pstl_count.h deleted file mode 100644 index 2781f6bfd3c9..000000000000 --- a/lib/libcxx/include/__algorithm/pstl_count.h +++ /dev/null @@ -1,121 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_COUNT_H -#define _LIBCPP___ALGORITHM_PSTL_COUNT_H - -#include <__algorithm/count.h> -#include <__algorithm/for_each.h> -#include <__algorithm/pstl_backend.h> -#include <__algorithm/pstl_for_each.h> -#include <__algorithm/pstl_frontend_dispatch.h> -#include <__atomic/atomic.h> -#include <__config> -#include <__functional/operations.h> -#include <__iterator/iterator_traits.h> -#include <__numeric/pstl_transform_reduce.h> -#include <__type_traits/enable_if.h> -#include <__type_traits/is_execution_policy.h> -#include <__type_traits/remove_cvref.h> -#include <__utility/move.h> -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -_LIBCPP_PUSH_MACROS -#include <__undef_macros> - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_BEGIN_NAMESPACE_STD - -template -void __pstl_count_if(); // declaration needed for the frontend dispatch below - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__iter_diff_t<_ForwardIterator>> __count_if( - _ExecutionPolicy&& __policy, _ForwardIterator&& __first, _ForwardIterator&& __last, _Predicate&& __pred) noexcept { - using __diff_t = __iter_diff_t<_ForwardIterator>; - return std::__pstl_frontend_dispatch( - _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_count_if, _RawPolicy), - [&](_ForwardIterator __g_first, _ForwardIterator __g_last, _Predicate __g_pred) -> optional<__diff_t> { - return std::__transform_reduce( - __policy, - std::move(__g_first), - std::move(__g_last), - __diff_t(), - std::plus{}, - [&](__iter_reference<_ForwardIterator> __element) -> bool { return __g_pred(__element); }); - }, - std::move(__first), - std::move(__last), - std::move(__pred)); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI __iter_diff_t<_ForwardIterator> -count_if(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Predicate __pred) { - auto __res = std::__count_if(__policy, std::move(__first), std::move(__last), std::move(__pred)); - if (!__res) - std::__throw_bad_alloc(); - return *std::move(__res); -} - -template -void __pstl_count(); // declaration needed for the frontend dispatch below - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__iter_diff_t<_ForwardIterator>> -__count(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, const _Tp& __value) { - return std::__pstl_frontend_dispatch( - _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_count, _RawPolicy), - [&](_ForwardIterator __g_first, _ForwardIterator __g_last, const _Tp& __g_value) - -> optional<__iter_diff_t<_ForwardIterator>> { - return std::count_if(__policy, __g_first, __g_last, [&](__iter_reference<_ForwardIterator> __v) { - return __v == __g_value; - }); - }, - std::move(__first), - std::move(__last), - __value); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI __iter_diff_t<_ForwardIterator> -count(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, const _Tp& __value) { - auto __res = std::__count(__policy, std::move(__first), std::move(__last), __value); - if (!__res) - std::__throw_bad_alloc(); - return *__res; -} - -_LIBCPP_END_NAMESPACE_STD - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_POP_MACROS - -#endif // _LIBCPP___ALGORITHM_PSTL_COUNT_H diff --git a/lib/libcxx/include/__algorithm/pstl_equal.h b/lib/libcxx/include/__algorithm/pstl_equal.h deleted file mode 100644 index d235c0f4f419..000000000000 --- a/lib/libcxx/include/__algorithm/pstl_equal.h +++ /dev/null @@ -1,175 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_EQUAL_H -#define _LIBCPP___ALGORITHM_PSTL_EQUAL_H - -#include <__algorithm/equal.h> -#include <__algorithm/pstl_frontend_dispatch.h> -#include <__config> -#include <__functional/operations.h> -#include <__iterator/iterator_traits.h> -#include <__numeric/pstl_transform_reduce.h> -#include <__utility/move.h> - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -_LIBCPP_PUSH_MACROS -#include <__undef_macros> - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_BEGIN_NAMESPACE_STD - -template -void __pstl_equal(); - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional -__equal(_ExecutionPolicy&& __policy, - _ForwardIterator1&& __first1, - _ForwardIterator1&& __last1, - _ForwardIterator2&& __first2, - _Pred&& __pred) noexcept { - return std::__pstl_frontend_dispatch( - _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_equal, _RawPolicy), - [&__policy]( - _ForwardIterator1 __g_first1, _ForwardIterator1 __g_last1, _ForwardIterator2 __g_first2, _Pred __g_pred) { - return std::__transform_reduce( - __policy, - std::move(__g_first1), - std::move(__g_last1), - std::move(__g_first2), - true, - std::logical_and{}, - std::move(__g_pred)); - }, - std::move(__first1), - std::move(__last1), - std::move(__first2), - std::move(__pred)); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI bool -equal(_ExecutionPolicy&& __policy, - _ForwardIterator1 __first1, - _ForwardIterator1 __last1, - _ForwardIterator2 __first2, - _Pred __pred) { - auto __res = std::__equal(__policy, std::move(__first1), std::move(__last1), std::move(__first2), std::move(__pred)); - if (!__res) - std::__throw_bad_alloc(); - return *__res; -} - -template >, int> = 0> -_LIBCPP_HIDE_FROM_ABI bool -equal(_ExecutionPolicy&& __policy, _ForwardIterator1 __first1, _ForwardIterator1 __last1, _ForwardIterator2 __first2) { - return std::equal(__policy, std::move(__first1), std::move(__last1), std::move(__first2), std::equal_to{}); -} - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional -__equal(_ExecutionPolicy&& __policy, - _ForwardIterator1&& __first1, - _ForwardIterator1&& __last1, - _ForwardIterator2&& __first2, - _ForwardIterator2&& __last2, - _Pred&& __pred) noexcept { - return std::__pstl_frontend_dispatch( - _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_equal, _RawPolicy), - [&__policy](_ForwardIterator1 __g_first1, - _ForwardIterator1 __g_last1, - _ForwardIterator2 __g_first2, - _ForwardIterator2 __g_last2, - _Pred __g_pred) -> optional { - if constexpr (__has_random_access_iterator_category<_ForwardIterator1>::value && - __has_random_access_iterator_category<_ForwardIterator2>::value) { - if (__g_last1 - __g_first1 != __g_last2 - __g_first2) - return false; - return std::__equal( - __policy, std::move(__g_first1), std::move(__g_last1), std::move(__g_first2), std::move(__g_pred)); - } else { - (void)__policy; // Avoid unused lambda capture warning - return std::equal( - std::move(__g_first1), - std::move(__g_last1), - std::move(__g_first2), - std::move(__g_last2), - std::move(__g_pred)); - } - }, - std::move(__first1), - std::move(__last1), - std::move(__first2), - std::move(__last2), - std::move(__pred)); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI bool -equal(_ExecutionPolicy&& __policy, - _ForwardIterator1 __first1, - _ForwardIterator1 __last1, - _ForwardIterator2 __first2, - _ForwardIterator2 __last2, - _Pred __pred) { - auto __res = std::__equal( - __policy, std::move(__first1), std::move(__last1), std::move(__first2), std::move(__last2), std::move(__pred)); - if (!__res) - std::__throw_bad_alloc(); - return *__res; -} - -template >, int> = 0> -_LIBCPP_HIDE_FROM_ABI bool -equal(_ExecutionPolicy&& __policy, - _ForwardIterator1 __first1, - _ForwardIterator1 __last1, - _ForwardIterator2 __first2, - _ForwardIterator2 __last2) { - return std::equal( - __policy, std::move(__first1), std::move(__last1), std::move(__first2), std::move(__last2), std::equal_to{}); -} - -_LIBCPP_END_NAMESPACE_STD - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_POP_MACROS - -#endif // _LIBCPP___ALGORITHM_PSTL_EQUAL_H diff --git a/lib/libcxx/include/__algorithm/pstl_fill.h b/lib/libcxx/include/__algorithm/pstl_fill.h deleted file mode 100644 index 488b49a0feec..000000000000 --- a/lib/libcxx/include/__algorithm/pstl_fill.h +++ /dev/null @@ -1,116 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_FILL_H -#define _LIBCPP___ALGORITHM_PSTL_FILL_H - -#include <__algorithm/fill_n.h> -#include <__algorithm/pstl_for_each.h> -#include <__algorithm/pstl_frontend_dispatch.h> -#include <__config> -#include <__iterator/concepts.h> -#include <__iterator/cpp17_iterator_concepts.h> -#include <__iterator/iterator_traits.h> -#include <__type_traits/enable_if.h> -#include <__type_traits/is_execution_policy.h> -#include <__type_traits/remove_cvref.h> -#include <__utility/move.h> -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -_LIBCPP_PUSH_MACROS -#include <__undef_macros> - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_BEGIN_NAMESPACE_STD - -template -void __pstl_fill(); // declaration needed for the frontend dispatch below - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI optional<__empty> -__fill(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, const _Tp& __value) noexcept { - _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator); - return std::__pstl_frontend_dispatch( - _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_fill, _RawPolicy), - [&](_ForwardIterator __g_first, _ForwardIterator __g_last, const _Tp& __g_value) { - return std::__for_each(__policy, __g_first, __g_last, [&](__iter_reference<_ForwardIterator> __element) { - __element = __g_value; - }); - }, - std::move(__first), - std::move(__last), - __value); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI void -fill(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, const _Tp& __value) { - _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator); - if (!std::__fill(__policy, std::move(__first), std::move(__last), __value)) - std::__throw_bad_alloc(); -} - -template -void __pstl_fill_n(); // declaration needed for the frontend dispatch below - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__empty> -__fill_n(_ExecutionPolicy&& __policy, _ForwardIterator&& __first, _SizeT&& __n, const _Tp& __value) noexcept { - _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator); - return std::__pstl_frontend_dispatch( - _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_fill_n, _RawPolicy), - [&](_ForwardIterator __g_first, _SizeT __g_n, const _Tp& __g_value) { - if constexpr (__has_random_access_iterator_category_or_concept<_ForwardIterator>::value) - std::fill(__policy, __g_first, __g_first + __g_n, __g_value); - else - std::fill_n(__g_first, __g_n, __g_value); - return optional<__empty>{__empty{}}; - }, - std::move(__first), - std::move(__n), - __value); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI void -fill_n(_ExecutionPolicy&& __policy, _ForwardIterator __first, _SizeT __n, const _Tp& __value) { - _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator); - if (!std::__fill_n(__policy, std::move(__first), std::move(__n), __value)) - std::__throw_bad_alloc(); -} - -_LIBCPP_END_NAMESPACE_STD - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_POP_MACROS - -#endif // _LIBCPP___ALGORITHM_PSTL_FILL_H diff --git a/lib/libcxx/include/__algorithm/pstl_find.h b/lib/libcxx/include/__algorithm/pstl_find.h deleted file mode 100644 index 5b694db68aea..000000000000 --- a/lib/libcxx/include/__algorithm/pstl_find.h +++ /dev/null @@ -1,141 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_FIND_H -#define _LIBCPP___ALGORITHM_PSTL_FIND_H - -#include <__algorithm/comp.h> -#include <__algorithm/find.h> -#include <__algorithm/pstl_backend.h> -#include <__algorithm/pstl_frontend_dispatch.h> -#include <__config> -#include <__iterator/cpp17_iterator_concepts.h> -#include <__type_traits/enable_if.h> -#include <__type_traits/is_execution_policy.h> -#include <__type_traits/remove_cvref.h> -#include <__utility/move.h> -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -_LIBCPP_PUSH_MACROS -#include <__undef_macros> - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_BEGIN_NAMESPACE_STD - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__remove_cvref_t<_ForwardIterator>> -__find_if(_ExecutionPolicy&&, _ForwardIterator&& __first, _ForwardIterator&& __last, _Predicate&& __pred) noexcept { - using _Backend = typename __select_backend<_RawPolicy>::type; - return std::__pstl_find_if<_RawPolicy>(_Backend{}, std::move(__first), std::move(__last), std::move(__pred)); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI _ForwardIterator -find_if(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Predicate __pred) { - _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator); - auto __res = std::__find_if(__policy, std::move(__first), std::move(__last), std::move(__pred)); - if (!__res) - std::__throw_bad_alloc(); - return *std::move(__res); -} - -template -void __pstl_find_if_not(); - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__remove_cvref_t<_ForwardIterator>> -__find_if_not(_ExecutionPolicy&& __policy, _ForwardIterator&& __first, _ForwardIterator&& __last, _Predicate&& __pred) { - return std::__pstl_frontend_dispatch( - _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_find_if_not, _RawPolicy), - [&](_ForwardIterator&& __g_first, _ForwardIterator&& __g_last, _Predicate&& __g_pred) - -> optional<__remove_cvref_t<_ForwardIterator>> { - return std::__find_if( - __policy, __g_first, __g_last, [&](__iter_reference<__remove_cvref_t<_ForwardIterator>> __value) { - return !__g_pred(__value); - }); - }, - std::move(__first), - std::move(__last), - std::move(__pred)); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI _ForwardIterator -find_if_not(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Predicate __pred) { - _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator); - auto __res = std::__find_if_not(__policy, std::move(__first), std::move(__last), std::move(__pred)); - if (!__res) - std::__throw_bad_alloc(); - return *std::move(__res); -} - -template -void __pstl_find(); - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__remove_cvref_t<_ForwardIterator>> -__find(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, const _Tp& __value) noexcept { - return std::__pstl_frontend_dispatch( - _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_find, _RawPolicy), - [&](_ForwardIterator __g_first, _ForwardIterator __g_last, const _Tp& __g_value) -> optional<_ForwardIterator> { - return std::find_if( - __policy, __g_first, __g_last, [&](__iter_reference<__remove_cvref_t<_ForwardIterator>> __element) { - return __element == __g_value; - }); - }, - std::move(__first), - std::move(__last), - __value); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI _ForwardIterator -find(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, const _Tp& __value) { - _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator); - auto __res = std::__find(__policy, std::move(__first), std::move(__last), __value); - if (!__res) - std::__throw_bad_alloc(); - return *std::move(__res); -} - -_LIBCPP_END_NAMESPACE_STD - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_POP_MACROS - -#endif // _LIBCPP___ALGORITHM_PSTL_FIND_H diff --git a/lib/libcxx/include/__algorithm/pstl_for_each.h b/lib/libcxx/include/__algorithm/pstl_for_each.h deleted file mode 100644 index bb7b5a61a6dc..000000000000 --- a/lib/libcxx/include/__algorithm/pstl_for_each.h +++ /dev/null @@ -1,108 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_FOR_EACH_H -#define _LIBCPP___ALGORITHM_PSTL_FOR_EACH_H - -#include <__algorithm/for_each.h> -#include <__algorithm/for_each_n.h> -#include <__algorithm/pstl_backend.h> -#include <__algorithm/pstl_frontend_dispatch.h> -#include <__config> -#include <__iterator/concepts.h> -#include <__iterator/cpp17_iterator_concepts.h> -#include <__type_traits/enable_if.h> -#include <__type_traits/is_execution_policy.h> -#include <__type_traits/remove_cvref.h> -#include <__type_traits/void_t.h> -#include <__utility/empty.h> -#include <__utility/move.h> -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -_LIBCPP_PUSH_MACROS -#include <__undef_macros> - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_BEGIN_NAMESPACE_STD - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__empty> -__for_each(_ExecutionPolicy&&, _ForwardIterator&& __first, _ForwardIterator&& __last, _Function&& __func) noexcept { - using _Backend = typename __select_backend<_RawPolicy>::type; - return std::__pstl_for_each<_RawPolicy>(_Backend{}, std::move(__first), std::move(__last), std::move(__func)); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI void -for_each(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Function __func) { - _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator); - if (!std::__for_each(__policy, std::move(__first), std::move(__last), std::move(__func))) - std::__throw_bad_alloc(); -} - -template -void __pstl_for_each_n(); // declaration needed for the frontend dispatch below - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__empty> -__for_each_n(_ExecutionPolicy&& __policy, _ForwardIterator&& __first, _Size&& __size, _Function&& __func) noexcept { - return std::__pstl_frontend_dispatch( - _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_for_each_n, _RawPolicy), - [&](_ForwardIterator __g_first, _Size __g_size, _Function __g_func) -> optional<__empty> { - if constexpr (__has_random_access_iterator_category_or_concept<_ForwardIterator>::value) { - std::for_each(__policy, std::move(__g_first), __g_first + __g_size, std::move(__g_func)); - return __empty{}; - } else { - std::for_each_n(std::move(__g_first), __g_size, std::move(__g_func)); - return __empty{}; - } - }, - std::move(__first), - std::move(__size), - std::move(__func)); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI void -for_each_n(_ExecutionPolicy&& __policy, _ForwardIterator __first, _Size __size, _Function __func) { - _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator); - auto __res = std::__for_each_n(__policy, std::move(__first), std::move(__size), std::move(__func)); - if (!__res) - std::__throw_bad_alloc(); -} - -_LIBCPP_END_NAMESPACE_STD - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_POP_MACROS - -#endif // _LIBCPP___ALGORITHM_PSTL_FOR_EACH_H diff --git a/lib/libcxx/include/__algorithm/pstl_frontend_dispatch.h b/lib/libcxx/include/__algorithm/pstl_frontend_dispatch.h deleted file mode 100644 index 6fa110749115..000000000000 --- a/lib/libcxx/include/__algorithm/pstl_frontend_dispatch.h +++ /dev/null @@ -1,44 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_FRONTEND_DISPATCH -#define _LIBCPP___ALGORITHM_PSTL_FRONTEND_DISPATCH - -#include <__config> -#include <__type_traits/is_callable.h> -#include <__utility/forward.h> - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -#if _LIBCPP_STD_VER >= 17 - -_LIBCPP_BEGIN_NAMESPACE_STD - -# define _LIBCPP_PSTL_CUSTOMIZATION_POINT(name, policy) \ - [](auto&&... __args) -> decltype(std::name( \ - typename __select_backend::type{}, std::forward(__args)...)) { \ - return std::name(typename __select_backend::type{}, std::forward(__args)...); \ - } - -template -_LIBCPP_HIDE_FROM_ABI decltype(auto) -__pstl_frontend_dispatch(_SpecializedImpl __specialized_impl, _GenericImpl __generic_impl, _Args&&... __args) { - if constexpr (__is_callable<_SpecializedImpl, _Args...>::value) { - return __specialized_impl(std::forward<_Args>(__args)...); - } else { - return __generic_impl(std::forward<_Args>(__args)...); - } -} - -_LIBCPP_END_NAMESPACE_STD - -#endif // _LIBCPP_STD_VER >= 17 - -#endif // _LIBCPP___ALGORITHM_PSTL_FRONTEND_DISPATCH diff --git a/lib/libcxx/include/__algorithm/pstl_generate.h b/lib/libcxx/include/__algorithm/pstl_generate.h deleted file mode 100644 index 7133c6f4f4c6..000000000000 --- a/lib/libcxx/include/__algorithm/pstl_generate.h +++ /dev/null @@ -1,114 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_GENERATE_H -#define _LIBCPP___ALGORITHM_PSTL_GENERATE_H - -#include <__algorithm/pstl_backend.h> -#include <__algorithm/pstl_for_each.h> -#include <__algorithm/pstl_frontend_dispatch.h> -#include <__config> -#include <__iterator/cpp17_iterator_concepts.h> -#include <__iterator/iterator_traits.h> -#include <__type_traits/enable_if.h> -#include <__type_traits/is_execution_policy.h> -#include <__type_traits/remove_cvref.h> -#include <__utility/move.h> -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -_LIBCPP_PUSH_MACROS -#include <__undef_macros> - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_BEGIN_NAMESPACE_STD - -template -void __pstl_generate(); - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__empty> -__generate(_ExecutionPolicy&& __policy, _ForwardIterator&& __first, _ForwardIterator&& __last, _Generator&& __gen) { - _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator); - return std::__pstl_frontend_dispatch( - _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_generate, _RawPolicy), - [&__policy](_ForwardIterator __g_first, _ForwardIterator __g_last, _Generator __g_gen) { - return std::__for_each( - __policy, std::move(__g_first), std::move(__g_last), [&](__iter_reference<_ForwardIterator> __element) { - __element = __g_gen(); - }); - }, - std::move(__first), - std::move(__last), - std::move(__gen)); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI void -generate(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Generator __gen) { - _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator); - if (!std::__generate(__policy, std::move(__first), std::move(__last), std::move(__gen))) - std::__throw_bad_alloc(); -} - -template -void __pstl_generate_n(); - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__empty> -__generate_n(_ExecutionPolicy&& __policy, _ForwardIterator&& __first, _Size&& __n, _Generator&& __gen) { - return std::__pstl_frontend_dispatch( - _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_generate_n, _RawPolicy), - [&__policy](_ForwardIterator __g_first, _Size __g_n, _Generator __g_gen) { - return std::__for_each_n( - __policy, std::move(__g_first), std::move(__g_n), [&](__iter_reference<_ForwardIterator> __element) { - __element = __g_gen(); - }); - }, - std::move(__first), - __n, - std::move(__gen)); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI void -generate_n(_ExecutionPolicy&& __policy, _ForwardIterator __first, _Size __n, _Generator __gen) { - _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator); - if (!std::__generate_n(__policy, std::move(__first), std::move(__n), std::move(__gen))) - std::__throw_bad_alloc(); -} - -_LIBCPP_END_NAMESPACE_STD - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_POP_MACROS - -#endif // _LIBCPP___ALGORITHM_PSTL_GENERATE_H diff --git a/lib/libcxx/include/__algorithm/pstl_is_partitioned.h b/lib/libcxx/include/__algorithm/pstl_is_partitioned.h deleted file mode 100644 index b65430212207..000000000000 --- a/lib/libcxx/include/__algorithm/pstl_is_partitioned.h +++ /dev/null @@ -1,77 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_IS_PARITTIONED -#define _LIBCPP___ALGORITHM_PSTL_IS_PARITTIONED - -#include <__algorithm/pstl_any_all_none_of.h> -#include <__algorithm/pstl_backend.h> -#include <__algorithm/pstl_find.h> -#include <__algorithm/pstl_frontend_dispatch.h> -#include <__config> -#include <__type_traits/enable_if.h> -#include <__type_traits/is_execution_policy.h> -#include <__type_traits/remove_cvref.h> -#include <__utility/move.h> -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -_LIBCPP_PUSH_MACROS -#include <__undef_macros> - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_BEGIN_NAMESPACE_STD - -template -void __pstl_is_partitioned(); - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional __is_partitioned( - _ExecutionPolicy&& __policy, _ForwardIterator&& __first, _ForwardIterator&& __last, _Predicate&& __pred) { - return std::__pstl_frontend_dispatch( - _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_is_partitioned, _RawPolicy), - [&__policy](_ForwardIterator __g_first, _ForwardIterator __g_last, _Predicate __g_pred) { - __g_first = std::find_if_not(__policy, __g_first, __g_last, __g_pred); - if (__g_first == __g_last) - return true; - ++__g_first; - return std::none_of(__policy, __g_first, __g_last, __g_pred); - }, - std::move(__first), - std::move(__last), - std::move(__pred)); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI bool -is_partitioned(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _Predicate __pred) { - auto __res = std::__is_partitioned(__policy, std::move(__first), std::move(__last), std::move(__pred)); - if (!__res) - std::__throw_bad_alloc(); - return *std::move(__res); -} - -_LIBCPP_END_NAMESPACE_STD - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_POP_MACROS - -#endif // _LIBCPP___ALGORITHM_PSTL_IS_PARITTIONED diff --git a/lib/libcxx/include/__algorithm/pstl_merge.h b/lib/libcxx/include/__algorithm/pstl_merge.h deleted file mode 100644 index 3d262db6bc0c..000000000000 --- a/lib/libcxx/include/__algorithm/pstl_merge.h +++ /dev/null @@ -1,92 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_MERGE_H -#define _LIBCPP___ALGORITHM_PSTL_MERGE_H - -#include <__algorithm/pstl_backend.h> -#include <__config> -#include <__functional/operations.h> -#include <__type_traits/enable_if.h> -#include <__type_traits/is_execution_policy.h> -#include <__type_traits/remove_cvref.h> -#include <__utility/move.h> -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -_LIBCPP_PUSH_MACROS -#include <__undef_macros> - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_BEGIN_NAMESPACE_STD - -template , - class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>, - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<_ForwardOutIterator> -__merge(_ExecutionPolicy&&, - _ForwardIterator1 __first1, - _ForwardIterator1 __last1, - _ForwardIterator2 __first2, - _ForwardIterator2 __last2, - _ForwardOutIterator __result, - _Comp __comp = {}) noexcept { - using _Backend = typename __select_backend<_RawPolicy>::type; - return std::__pstl_merge<_RawPolicy>( - _Backend{}, - std::move(__first1), - std::move(__last1), - std::move(__first2), - std::move(__last2), - std::move(__result), - std::move(__comp)); -} - -template , - class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>, - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI _ForwardOutIterator -merge(_ExecutionPolicy&& __policy, - _ForwardIterator1 __first1, - _ForwardIterator1 __last1, - _ForwardIterator2 __first2, - _ForwardIterator2 __last2, - _ForwardOutIterator __result, - _Comp __comp = {}) { - auto __res = std::__merge( - __policy, - std::move(__first1), - std::move(__last1), - std::move(__first2), - std::move(__last2), - std::move(__result), - std::move(__comp)); - if (!__res) - std::__throw_bad_alloc(); - return *std::move(__res); -} - -_LIBCPP_END_NAMESPACE_STD - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_POP_MACROS - -#endif // _LIBCPP___ALGORITHM_PSTL_MERGE_H diff --git a/lib/libcxx/include/__algorithm/pstl_move.h b/lib/libcxx/include/__algorithm/pstl_move.h deleted file mode 100644 index d8441f1a6c2e..000000000000 --- a/lib/libcxx/include/__algorithm/pstl_move.h +++ /dev/null @@ -1,84 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_MOVE_H -#define _LIBCPP___ALGORITHM_PSTL_MOVE_H - -#include <__algorithm/copy_n.h> -#include <__algorithm/pstl_backend.h> -#include <__algorithm/pstl_frontend_dispatch.h> -#include <__algorithm/pstl_transform.h> -#include <__config> -#include <__functional/identity.h> -#include <__iterator/iterator_traits.h> -#include <__type_traits/enable_if.h> -#include <__type_traits/is_constant_evaluated.h> -#include <__type_traits/is_execution_policy.h> -#include <__type_traits/is_trivially_copyable.h> -#include <__type_traits/remove_cvref.h> -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -_LIBCPP_PUSH_MACROS -#include <__undef_macros> - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_BEGIN_NAMESPACE_STD - -// TODO: Use the std::copy/move shenanigans to forward to std::memmove -// Investigate whether we want to still forward to std::transform(policy) -// in that case for the execution::par part, or whether we actually want -// to run everything serially in that case. - -template -void __pstl_move(); - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<_ForwardOutIterator> -__move(_ExecutionPolicy&& __policy, - _ForwardIterator&& __first, - _ForwardIterator&& __last, - _ForwardOutIterator&& __result) noexcept { - return std::__pstl_frontend_dispatch( - _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_move, _RawPolicy), - [&__policy](_ForwardIterator __g_first, _ForwardIterator __g_last, _ForwardOutIterator __g_result) { - return std::__transform(__policy, __g_first, __g_last, __g_result, [](auto&& __v) { return std::move(__v); }); - }, - std::move(__first), - std::move(__last), - std::move(__result)); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI _ForwardOutIterator -move(_ExecutionPolicy&& __policy, _ForwardIterator __first, _ForwardIterator __last, _ForwardOutIterator __result) { - auto __res = std::__move(__policy, std::move(__first), std::move(__last), std::move(__result)); - if (!__res) - std::__throw_bad_alloc(); - return *__res; -} - -_LIBCPP_END_NAMESPACE_STD - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_POP_MACROS - -#endif // _LIBCPP___ALGORITHM_PSTL_MOVE_H diff --git a/lib/libcxx/include/__algorithm/pstl_replace.h b/lib/libcxx/include/__algorithm/pstl_replace.h deleted file mode 100644 index b1caf3fd4ac0..000000000000 --- a/lib/libcxx/include/__algorithm/pstl_replace.h +++ /dev/null @@ -1,247 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_REPLACE_H -#define _LIBCPP___ALGORITHM_PSTL_REPLACE_H - -#include <__algorithm/pstl_backend.h> -#include <__algorithm/pstl_for_each.h> -#include <__algorithm/pstl_frontend_dispatch.h> -#include <__algorithm/pstl_transform.h> -#include <__config> -#include <__iterator/iterator_traits.h> -#include <__type_traits/enable_if.h> -#include <__type_traits/remove_cvref.h> -#include <__utility/move.h> -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -_LIBCPP_PUSH_MACROS -#include <__undef_macros> - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_BEGIN_NAMESPACE_STD - -template -void __pstl_replace_if(); - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__empty> -__replace_if(_ExecutionPolicy&& __policy, - _ForwardIterator&& __first, - _ForwardIterator&& __last, - _Pred&& __pred, - const _Tp& __new_value) noexcept { - return std::__pstl_frontend_dispatch( - _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_replace_if, _RawPolicy), - [&__policy]( - _ForwardIterator&& __g_first, _ForwardIterator&& __g_last, _Pred&& __g_pred, const _Tp& __g_new_value) { - std::for_each(__policy, __g_first, __g_last, [&](__iter_reference<_ForwardIterator> __element) { - if (__g_pred(__element)) - __element = __g_new_value; - }); - return optional<__empty>{__empty{}}; - }, - std::move(__first), - std::move(__last), - std::move(__pred), - __new_value); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI void -replace_if(_ExecutionPolicy&& __policy, - _ForwardIterator __first, - _ForwardIterator __last, - _Pred __pred, - const _Tp& __new_value) { - auto __res = std::__replace_if(__policy, std::move(__first), std::move(__last), std::move(__pred), __new_value); - if (!__res) - std::__throw_bad_alloc(); -} - -template -void __pstl_replace(); - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__empty> -__replace(_ExecutionPolicy&& __policy, - _ForwardIterator __first, - _ForwardIterator __last, - const _Tp& __old_value, - const _Tp& __new_value) noexcept { - return std::__pstl_frontend_dispatch( - _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_replace, _RawPolicy), - [&__policy]( - _ForwardIterator __g_first, _ForwardIterator __g_last, const _Tp& __g_old_value, const _Tp& __g_new_value) { - return std::__replace_if( - __policy, - std::move(__g_first), - std::move(__g_last), - [&](__iter_reference<_ForwardIterator> __element) { return __element == __g_old_value; }, - __g_new_value); - }, - std::move(__first), - std::move(__last), - __old_value, - __new_value); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI void -replace(_ExecutionPolicy&& __policy, - _ForwardIterator __first, - _ForwardIterator __last, - const _Tp& __old_value, - const _Tp& __new_value) { - if (!std::__replace(__policy, std::move(__first), std::move(__last), __old_value, __new_value)) - std::__throw_bad_alloc(); -} - -template -void __pstl_replace_copy_if(); - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__empty> __replace_copy_if( - _ExecutionPolicy&& __policy, - _ForwardIterator&& __first, - _ForwardIterator&& __last, - _ForwardOutIterator&& __result, - _Pred&& __pred, - const _Tp& __new_value) { - return std::__pstl_frontend_dispatch( - _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_replace_copy_if, _RawPolicy), - [&__policy](_ForwardIterator __g_first, - _ForwardIterator __g_last, - _ForwardOutIterator __g_result, - _Pred __g_pred, - const _Tp& __g_new_value) -> optional<__empty> { - if (!std::__transform( - __policy, __g_first, __g_last, __g_result, [&](__iter_reference<_ForwardIterator> __element) { - return __g_pred(__element) ? __g_new_value : __element; - })) - return nullopt; - return __empty{}; - }, - std::move(__first), - std::move(__last), - std::move(__result), - std::move(__pred), - __new_value); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI void replace_copy_if( - _ExecutionPolicy&& __policy, - _ForwardIterator __first, - _ForwardIterator __last, - _ForwardOutIterator __result, - _Pred __pred, - const _Tp& __new_value) { - if (!std::__replace_copy_if( - __policy, std::move(__first), std::move(__last), std::move(__result), std::move(__pred), __new_value)) - std::__throw_bad_alloc(); -} - -template -void __pstl_replace_copy(); - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__empty> __replace_copy( - _ExecutionPolicy&& __policy, - _ForwardIterator&& __first, - _ForwardIterator&& __last, - _ForwardOutIterator&& __result, - const _Tp& __old_value, - const _Tp& __new_value) noexcept { - return std::__pstl_frontend_dispatch( - _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_replace_copy, _RawPolicy), - [&__policy](_ForwardIterator __g_first, - _ForwardIterator __g_last, - _ForwardOutIterator __g_result, - const _Tp& __g_old_value, - const _Tp& __g_new_value) { - return std::__replace_copy_if( - __policy, - std::move(__g_first), - std::move(__g_last), - std::move(__g_result), - [&](__iter_reference<_ForwardIterator> __element) { return __element == __g_old_value; }, - __g_new_value); - }, - std::move(__first), - std::move(__last), - std::move(__result), - __old_value, - __new_value); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI void replace_copy( - _ExecutionPolicy&& __policy, - _ForwardIterator __first, - _ForwardIterator __last, - _ForwardOutIterator __result, - const _Tp& __old_value, - const _Tp& __new_value) { - if (!std::__replace_copy( - __policy, std::move(__first), std::move(__last), std::move(__result), __old_value, __new_value)) - std::__throw_bad_alloc(); -} - -_LIBCPP_END_NAMESPACE_STD - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_POP_MACROS - -#endif // _LIBCPP___ALGORITHM_PSTL_REPLACE_H diff --git a/lib/libcxx/include/__algorithm/pstl_rotate_copy.h b/lib/libcxx/include/__algorithm/pstl_rotate_copy.h deleted file mode 100644 index 346aab1d4a55..000000000000 --- a/lib/libcxx/include/__algorithm/pstl_rotate_copy.h +++ /dev/null @@ -1,85 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_ROTATE_COPY_H -#define _LIBCPP___ALGORITHM_PSTL_ROTATE_COPY_H - -#include <__algorithm/pstl_backend.h> -#include <__algorithm/pstl_copy.h> -#include <__algorithm/pstl_frontend_dispatch.h> -#include <__type_traits/is_execution_policy.h> -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -_LIBCPP_PUSH_MACROS -#include <__undef_macros> - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_BEGIN_NAMESPACE_STD - -template -void __pstl_rotate_copy(); - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<_ForwardOutIterator> -__rotate_copy(_ExecutionPolicy&& __policy, - _ForwardIterator&& __first, - _ForwardIterator&& __middle, - _ForwardIterator&& __last, - _ForwardOutIterator&& __result) noexcept { - return std::__pstl_frontend_dispatch( - _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_rotate_copy, _RawPolicy), - [&__policy](_ForwardIterator __g_first, - _ForwardIterator __g_middle, - _ForwardIterator __g_last, - _ForwardOutIterator __g_result) -> optional<_ForwardOutIterator> { - auto __result_mid = - std::__copy(__policy, _ForwardIterator(__g_middle), std::move(__g_last), std::move(__g_result)); - if (!__result_mid) - return nullopt; - return std::__copy(__policy, std::move(__g_first), std::move(__g_middle), *std::move(__result_mid)); - }, - std::move(__first), - std::move(__middle), - std::move(__last), - std::move(__result)); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI _ForwardOutIterator rotate_copy( - _ExecutionPolicy&& __policy, - _ForwardIterator __first, - _ForwardIterator __middle, - _ForwardIterator __last, - _ForwardOutIterator __result) { - auto __res = - std::__rotate_copy(__policy, std::move(__first), std::move(__middle), std::move(__last), std::move(__result)); - if (!__res) - std::__throw_bad_alloc(); - return *__res; -} - -_LIBCPP_END_NAMESPACE_STD - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_POP_MACROS - -#endif // _LIBCPP___ALGORITHM_PSTL_ROTATE_COPY_H diff --git a/lib/libcxx/include/__algorithm/pstl_sort.h b/lib/libcxx/include/__algorithm/pstl_sort.h deleted file mode 100644 index a931f768111a..000000000000 --- a/lib/libcxx/include/__algorithm/pstl_sort.h +++ /dev/null @@ -1,82 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_SORT_H -#define _LIBCPP___ALGORITHM_PSTL_SORT_H - -#include <__algorithm/pstl_backend.h> -#include <__algorithm/pstl_frontend_dispatch.h> -#include <__algorithm/pstl_stable_sort.h> -#include <__config> -#include <__functional/operations.h> -#include <__type_traits/is_execution_policy.h> -#include <__type_traits/remove_cvref.h> -#include <__utility/empty.h> -#include <__utility/forward.h> -#include <__utility/move.h> -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -_LIBCPP_PUSH_MACROS -#include <__undef_macros> - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_BEGIN_NAMESPACE_STD - -template -void __pstl_sort(); - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__empty> __sort( - _ExecutionPolicy&& __policy, _RandomAccessIterator __first, _RandomAccessIterator __last, _Comp __comp) noexcept { - return std::__pstl_frontend_dispatch( - _LIBCPP_PSTL_CUSTOMIZATION_POINT(__pstl_sort, _RawPolicy), - [&__policy](_RandomAccessIterator __g_first, _RandomAccessIterator __g_last, _Comp __g_comp) { - std::stable_sort(__policy, std::move(__g_first), std::move(__g_last), std::move(__g_comp)); - return optional<__empty>{__empty{}}; - }, - std::move(__first), - std::move(__last), - std::move(__comp)); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI void -sort(_ExecutionPolicy&& __policy, _RandomAccessIterator __first, _RandomAccessIterator __last, _Comp __comp) { - if (!std::__sort(__policy, std::move(__first), std::move(__last), std::move(__comp))) - std::__throw_bad_alloc(); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI void -sort(_ExecutionPolicy&& __policy, _RandomAccessIterator __first, _RandomAccessIterator __last) { - std::sort(std::forward<_ExecutionPolicy>(__policy), std::move(__first), std::move(__last), less{}); -} - -_LIBCPP_END_NAMESPACE_STD - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_POP_MACROS - -#endif // _LIBCPP___ALGORITHM_PSTL_SORT_H diff --git a/lib/libcxx/include/__algorithm/pstl_stable_sort.h b/lib/libcxx/include/__algorithm/pstl_stable_sort.h deleted file mode 100644 index 8ea0bb3f9a8d..000000000000 --- a/lib/libcxx/include/__algorithm/pstl_stable_sort.h +++ /dev/null @@ -1,61 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_STABLE_SORT_H -#define _LIBCPP___ALGORITHM_PSTL_STABLE_SORT_H - -#include <__algorithm/pstl_backend.h> -#include <__config> -#include <__functional/operations.h> -#include <__type_traits/enable_if.h> -#include <__type_traits/is_execution_policy.h> -#include <__type_traits/remove_cvref.h> -#include <__utility/empty.h> -#include <__utility/move.h> -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -_LIBCPP_PUSH_MACROS -#include <__undef_macros> - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_BEGIN_NAMESPACE_STD - -template , - class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>, - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__empty> __stable_sort( - _ExecutionPolicy&&, _RandomAccessIterator&& __first, _RandomAccessIterator&& __last, _Comp&& __comp = {}) noexcept { - using _Backend = typename __select_backend<_RawPolicy>::type; - return std::__pstl_stable_sort<_RawPolicy>(_Backend{}, std::move(__first), std::move(__last), std::move(__comp)); -} - -template , - class _RawPolicy = __remove_cvref_t<_ExecutionPolicy>, - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI void stable_sort( - _ExecutionPolicy&& __policy, _RandomAccessIterator __first, _RandomAccessIterator __last, _Comp __comp = {}) { - if (!std::__stable_sort(__policy, std::move(__first), std::move(__last), std::move(__comp))) - std::__throw_bad_alloc(); -} - -_LIBCPP_END_NAMESPACE_STD - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_POP_MACROS - -#endif // _LIBCPP___ALGORITHM_PSTL_STABLE_SORT_H diff --git a/lib/libcxx/include/__algorithm/pstl_transform.h b/lib/libcxx/include/__algorithm/pstl_transform.h deleted file mode 100644 index f95938782fc3..000000000000 --- a/lib/libcxx/include/__algorithm/pstl_transform.h +++ /dev/null @@ -1,120 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___ALGORITHM_PSTL_TRANSFORM_H -#define _LIBCPP___ALGORITHM_PSTL_TRANSFORM_H - -#include <__algorithm/pstl_backend.h> -#include <__config> -#include <__iterator/cpp17_iterator_concepts.h> -#include <__type_traits/enable_if.h> -#include <__type_traits/is_execution_policy.h> -#include <__type_traits/remove_cvref.h> -#include <__utility/move.h> -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -_LIBCPP_PUSH_MACROS -#include <__undef_macros> - -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_BEGIN_NAMESPACE_STD - -template , - enable_if_t, int> = 0> -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI optional<__remove_cvref_t<_ForwardOutIterator>> -__transform(_ExecutionPolicy&&, - _ForwardIterator&& __first, - _ForwardIterator&& __last, - _ForwardOutIterator&& __result, - _UnaryOperation&& __op) noexcept { - using _Backend = typename __select_backend<_RawPolicy>::type; - return std::__pstl_transform<_RawPolicy>( - _Backend{}, std::move(__first), std::move(__last), std::move(__result), std::move(__op)); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI _ForwardOutIterator transform( - _ExecutionPolicy&& __policy, - _ForwardIterator __first, - _ForwardIterator __last, - _ForwardOutIterator __result, - _UnaryOperation __op) { - _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator); - _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardOutIterator); - _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(_ForwardOutIterator, decltype(__op(*__first))); - auto __res = std::__transform(__policy, std::move(__first), std::move(__last), std::move(__result), std::move(__op)); - if (!__res) - std::__throw_bad_alloc(); - return *std::move(__res); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI optional<__remove_cvref_t<_ForwardOutIterator>> -__transform(_ExecutionPolicy&&, - _ForwardIterator1&& __first1, - _ForwardIterator1&& __last1, - _ForwardIterator2&& __first2, - _ForwardOutIterator&& __result, - _BinaryOperation&& __op) noexcept { - using _Backend = typename __select_backend<_RawPolicy>::type; - return std::__pstl_transform<_RawPolicy>( - _Backend{}, std::move(__first1), std::move(__last1), std::move(__first2), std::move(__result), std::move(__op)); -} - -template , - enable_if_t, int> = 0> -_LIBCPP_HIDE_FROM_ABI _ForwardOutIterator transform( - _ExecutionPolicy&& __policy, - _ForwardIterator1 __first1, - _ForwardIterator1 __last1, - _ForwardIterator2 __first2, - _ForwardOutIterator __result, - _BinaryOperation __op) { - _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator1); - _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardIterator2); - _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(_ForwardOutIterator); - _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(_ForwardOutIterator, decltype(__op(*__first1, *__first2))); - auto __res = std::__transform( - __policy, std::move(__first1), std::move(__last1), std::move(__first2), std::move(__result), std::move(__op)); - if (!__res) - std::__throw_bad_alloc(); - return *std::move(__res); -} - -_LIBCPP_END_NAMESPACE_STD - -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_PSTL) && _LIBCPP_STD_VER >= 17 - -_LIBCPP_POP_MACROS - -#endif // _LIBCPP___ALGORITHM_PSTL_TRANSFORM_H diff --git a/lib/libcxx/include/__algorithm/push_heap.h b/lib/libcxx/include/__algorithm/push_heap.h index 7d8720e3a93d..ec0b445f2b70 100644 --- a/lib/libcxx/include/__algorithm/push_heap.h +++ b/lib/libcxx/include/__algorithm/push_heap.h @@ -14,8 +14,8 @@ #include <__algorithm/iterator_operations.h> #include <__config> #include <__iterator/iterator_traits.h> -#include <__type_traits/is_copy_assignable.h> -#include <__type_traits/is_copy_constructible.h> +#include <__type_traits/is_assignable.h> +#include <__type_traits/is_constructible.h> #include <__utility/move.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) diff --git a/lib/libcxx/include/__algorithm/ranges_adjacent_find.h b/lib/libcxx/include/__algorithm/ranges_adjacent_find.h index a10b04167ede..3c54f723310a 100644 --- a/lib/libcxx/include/__algorithm/ranges_adjacent_find.h +++ b/lib/libcxx/include/__algorithm/ranges_adjacent_find.h @@ -53,7 +53,7 @@ struct __fn { sentinel_for<_Iter> _Sent, class _Proj = identity, indirect_binary_predicate, projected<_Iter, _Proj>> _Pred = ranges::equal_to> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr _Iter + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _Iter operator()(_Iter __first, _Sent __last, _Pred __pred = {}, _Proj __proj = {}) const { return __adjacent_find_impl(std::move(__first), std::move(__last), __pred, __proj); } @@ -62,7 +62,7 @@ struct __fn { class _Proj = identity, indirect_binary_predicate, _Proj>, projected, _Proj>> _Pred = ranges::equal_to> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr borrowed_iterator_t<_Range> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr borrowed_iterator_t<_Range> operator()(_Range&& __range, _Pred __pred = {}, _Proj __proj = {}) const { return __adjacent_find_impl(ranges::begin(__range), ranges::end(__range), __pred, __proj); } diff --git a/lib/libcxx/include/__algorithm/ranges_all_of.h b/lib/libcxx/include/__algorithm/ranges_all_of.h index 8976541d590c..2f603b32f32d 100644 --- a/lib/libcxx/include/__algorithm/ranges_all_of.h +++ b/lib/libcxx/include/__algorithm/ranges_all_of.h @@ -45,7 +45,7 @@ struct __fn { sentinel_for<_Iter> _Sent, class _Proj = identity, indirect_unary_predicate> _Pred> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()(_Iter __first, _Sent __last, _Pred __pred, _Proj __proj = {}) const { return __all_of_impl(std::move(__first), std::move(__last), __pred, __proj); } @@ -53,7 +53,7 @@ struct __fn { template , _Proj>> _Pred> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()(_Range&& __range, _Pred __pred, _Proj __proj = {}) const { return __all_of_impl(ranges::begin(__range), ranges::end(__range), __pred, __proj); } diff --git a/lib/libcxx/include/__algorithm/ranges_any_of.h b/lib/libcxx/include/__algorithm/ranges_any_of.h index 7c775f5f64de..205fcecc086e 100644 --- a/lib/libcxx/include/__algorithm/ranges_any_of.h +++ b/lib/libcxx/include/__algorithm/ranges_any_of.h @@ -45,7 +45,7 @@ struct __fn { sentinel_for<_Iter> _Sent, class _Proj = identity, indirect_unary_predicate> _Pred> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()(_Iter __first, _Sent __last, _Pred __pred = {}, _Proj __proj = {}) const { return __any_of_impl(std::move(__first), std::move(__last), __pred, __proj); } @@ -53,7 +53,7 @@ struct __fn { template , _Proj>> _Pred> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()(_Range&& __range, _Pred __pred, _Proj __proj = {}) const { return __any_of_impl(ranges::begin(__range), ranges::end(__range), __pred, __proj); } diff --git a/lib/libcxx/include/__algorithm/ranges_binary_search.h b/lib/libcxx/include/__algorithm/ranges_binary_search.h index f3b7842d5ccc..1ef2bd62b599 100644 --- a/lib/libcxx/include/__algorithm/ranges_binary_search.h +++ b/lib/libcxx/include/__algorithm/ranges_binary_search.h @@ -39,7 +39,7 @@ struct __fn { class _Type, class _Proj = identity, indirect_strict_weak_order> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()(_Iter __first, _Sent __last, const _Type& __value, _Comp __comp = {}, _Proj __proj = {}) const { auto __ret = std::__lower_bound<_RangeAlgPolicy>(__first, __last, __value, __comp, __proj); return __ret != __last && !std::invoke(__comp, __value, std::invoke(__proj, *__ret)); @@ -49,7 +49,7 @@ struct __fn { class _Type, class _Proj = identity, indirect_strict_weak_order, _Proj>> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()(_Range&& __r, const _Type& __value, _Comp __comp = {}, _Proj __proj = {}) const { auto __first = ranges::begin(__r); auto __last = ranges::end(__r); diff --git a/lib/libcxx/include/__algorithm/ranges_clamp.h b/lib/libcxx/include/__algorithm/ranges_clamp.h index f5ef5fd7f26e..e6181ef9435e 100644 --- a/lib/libcxx/include/__algorithm/ranges_clamp.h +++ b/lib/libcxx/include/__algorithm/ranges_clamp.h @@ -35,7 +35,7 @@ struct __fn { template > _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr const _Type& operator()( + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr const _Type& operator()( const _Type& __value, const _Type& __low, const _Type& __high, _Comp __comp = {}, _Proj __proj = {}) const { _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( !bool(std::invoke(__comp, std::invoke(__proj, __high), std::invoke(__proj, __low))), diff --git a/lib/libcxx/include/__algorithm/ranges_contains.h b/lib/libcxx/include/__algorithm/ranges_contains.h index 00d0e5401988..4836c3baed17 100644 --- a/lib/libcxx/include/__algorithm/ranges_contains.h +++ b/lib/libcxx/include/__algorithm/ranges_contains.h @@ -37,14 +37,14 @@ namespace __contains { struct __fn { template _Sent, class _Type, class _Proj = identity> requires indirect_binary_predicate, const _Type*> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool static + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool static operator()(_Iter __first, _Sent __last, const _Type& __value, _Proj __proj = {}) { return ranges::find(std::move(__first), __last, __value, std::ref(__proj)) != __last; } template requires indirect_binary_predicate, _Proj>, const _Type*> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool static + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool static operator()(_Range&& __range, const _Type& __value, _Proj __proj = {}) { return ranges::find(ranges::begin(__range), ranges::end(__range), __value, std::ref(__proj)) != ranges::end(__range); diff --git a/lib/libcxx/include/__algorithm/ranges_contains_subrange.h b/lib/libcxx/include/__algorithm/ranges_contains_subrange.h new file mode 100644 index 000000000000..4398c457fd05 --- /dev/null +++ b/lib/libcxx/include/__algorithm/ranges_contains_subrange.h @@ -0,0 +1,97 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCPP___ALGORITHM_RANGES_CONTAINS_SUBRANGE_H +#define _LIBCPP___ALGORITHM_RANGES_CONTAINS_SUBRANGE_H + +#include <__algorithm/ranges_search.h> +#include <__config> +#include <__functional/identity.h> +#include <__functional/ranges_operations.h> +#include <__functional/reference_wrapper.h> +#include <__iterator/concepts.h> +#include <__iterator/indirectly_comparable.h> +#include <__iterator/projected.h> +#include <__ranges/access.h> +#include <__ranges/concepts.h> +#include <__ranges/size.h> +#include <__ranges/subrange.h> +#include <__utility/move.h> + +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +#endif + +_LIBCPP_PUSH_MACROS +#include <__undef_macros> + +#if _LIBCPP_STD_VER >= 23 + +_LIBCPP_BEGIN_NAMESPACE_STD + +namespace ranges { +namespace __contains_subrange { +struct __fn { + template _Sent1, + forward_iterator _Iter2, + sentinel_for<_Iter2> _Sent2, + class _Pred = ranges::equal_to, + class _Proj1 = identity, + class _Proj2 = identity> + requires indirectly_comparable<_Iter1, _Iter2, _Pred, _Proj1, _Proj2> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool static operator()( + _Iter1 __first1, + _Sent1 __last1, + _Iter2 __first2, + _Sent2 __last2, + _Pred __pred = {}, + _Proj1 __proj1 = {}, + _Proj2 __proj2 = {}) { + if (__first2 == __last2) + return true; + + auto __ret = ranges::search( + std::move(__first1), __last1, std::move(__first2), __last2, __pred, std::ref(__proj1), std::ref(__proj2)); + return __ret.empty() == false; + } + + template + requires indirectly_comparable, iterator_t<_Range2>, _Pred, _Proj1, _Proj2> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool static + operator()(_Range1&& __range1, _Range2&& __range2, _Pred __pred = {}, _Proj1 __proj1 = {}, _Proj2 __proj2 = {}) { + if constexpr (sized_range<_Range2>) { + if (ranges::size(__range2) == 0) + return true; + } else { + if (ranges::begin(__range2) == ranges::end(__range2)) + return true; + } + + auto __ret = ranges::search(__range1, __range2, __pred, std::ref(__proj1), std::ref(__proj2)); + return __ret.empty() == false; + } +}; +} // namespace __contains_subrange + +inline namespace __cpo { +inline constexpr auto contains_subrange = __contains_subrange::__fn{}; +} // namespace __cpo +} // namespace ranges + +_LIBCPP_END_NAMESPACE_STD + +#endif // _LIBCPP_STD_VER >= 23 + +_LIBCPP_POP_MACROS + +#endif // _LIBCPP___ALGORITHM_RANGES_CONTAINS_SUBRANGE_H diff --git a/lib/libcxx/include/__algorithm/ranges_count.h b/lib/libcxx/include/__algorithm/ranges_count.h index a8965c1b961f..4f3511743870 100644 --- a/lib/libcxx/include/__algorithm/ranges_count.h +++ b/lib/libcxx/include/__algorithm/ranges_count.h @@ -38,14 +38,14 @@ namespace __count { struct __fn { template _Sent, class _Type, class _Proj = identity> requires indirect_binary_predicate, const _Type*> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr iter_difference_t<_Iter> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr iter_difference_t<_Iter> operator()(_Iter __first, _Sent __last, const _Type& __value, _Proj __proj = {}) const { return std::__count<_RangeAlgPolicy>(std::move(__first), std::move(__last), __value, __proj); } template requires indirect_binary_predicate, _Proj>, const _Type*> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr range_difference_t<_Range> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr range_difference_t<_Range> operator()(_Range&& __r, const _Type& __value, _Proj __proj = {}) const { return std::__count<_RangeAlgPolicy>(ranges::begin(__r), ranges::end(__r), __value, __proj); } diff --git a/lib/libcxx/include/__algorithm/ranges_count_if.h b/lib/libcxx/include/__algorithm/ranges_count_if.h index 71b942dd5322..5f2396ff7d53 100644 --- a/lib/libcxx/include/__algorithm/ranges_count_if.h +++ b/lib/libcxx/include/__algorithm/ranges_count_if.h @@ -50,7 +50,7 @@ struct __fn { sentinel_for<_Iter> _Sent, class _Proj = identity, indirect_unary_predicate> _Predicate> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr iter_difference_t<_Iter> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr iter_difference_t<_Iter> operator()(_Iter __first, _Sent __last, _Predicate __pred, _Proj __proj = {}) const { return ranges::__count_if_impl(std::move(__first), std::move(__last), __pred, __proj); } @@ -58,7 +58,7 @@ struct __fn { template , _Proj>> _Predicate> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr range_difference_t<_Range> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr range_difference_t<_Range> operator()(_Range&& __r, _Predicate __pred, _Proj __proj = {}) const { return ranges::__count_if_impl(ranges::begin(__r), ranges::end(__r), __pred, __proj); } diff --git a/lib/libcxx/include/__algorithm/ranges_ends_with.h b/lib/libcxx/include/__algorithm/ranges_ends_with.h index c2a3cae9f3b1..06efdef36b7c 100644 --- a/lib/libcxx/include/__algorithm/ranges_ends_with.h +++ b/lib/libcxx/include/__algorithm/ranges_ends_with.h @@ -39,7 +39,7 @@ namespace ranges { namespace __ends_with { struct __fn { template - static _LIBCPP_HIDE_FROM_ABI constexpr bool __ends_with_fn_impl_bidirectional( + _LIBCPP_HIDE_FROM_ABI static constexpr bool __ends_with_fn_impl_bidirectional( _Iter1 __first1, _Sent1 __last1, _Iter2 __first2, @@ -56,7 +56,7 @@ struct __fn { } template - static _LIBCPP_HIDE_FROM_ABI constexpr bool __ends_with_fn_impl( + _LIBCPP_HIDE_FROM_ABI static constexpr bool __ends_with_fn_impl( _Iter1 __first1, _Sent1 __last1, _Iter2 __first2, @@ -65,7 +65,7 @@ struct __fn { _Proj1& __proj1, _Proj2& __proj2) { if constexpr (std::bidirectional_iterator<_Sent1> && std::bidirectional_iterator<_Sent2> && - (!std::random_access_iterator<_Sent1>)&&(!std::random_access_iterator<_Sent2>)) { + (!std::random_access_iterator<_Sent1>) && (!std::random_access_iterator<_Sent2>)) { return __ends_with_fn_impl_bidirectional(__first1, __last1, __first2, __last2, __pred, __proj1, __proj2); } else { @@ -133,7 +133,7 @@ struct __fn { requires(forward_iterator<_Iter1> || sized_sentinel_for<_Sent1, _Iter1>) && (forward_iterator<_Iter2> || sized_sentinel_for<_Sent2, _Iter2>) && indirectly_comparable<_Iter1, _Iter2, _Pred, _Proj1, _Proj2> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool operator()( + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()( _Iter1 __first1, _Sent1 __last1, _Iter2 __first2, @@ -152,7 +152,7 @@ struct __fn { class _Proj2 = identity> requires(forward_range<_Range1> || sized_range<_Range1>) && (forward_range<_Range2> || sized_range<_Range2>) && indirectly_comparable, iterator_t<_Range2>, _Pred, _Proj1, _Proj2> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool operator()( + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()( _Range1&& __range1, _Range2&& __range2, _Pred __pred = {}, _Proj1 __proj1 = {}, _Proj2 __proj2 = {}) const { if constexpr (sized_range<_Range1> && sized_range<_Range2>) { auto __n1 = ranges::size(__range1); diff --git a/lib/libcxx/include/__algorithm/ranges_equal.h b/lib/libcxx/include/__algorithm/ranges_equal.h index 31c7ee261da6..edbd0e3641c1 100644 --- a/lib/libcxx/include/__algorithm/ranges_equal.h +++ b/lib/libcxx/include/__algorithm/ranges_equal.h @@ -44,7 +44,7 @@ struct __fn { class _Proj1 = identity, class _Proj2 = identity> requires indirectly_comparable<_Iter1, _Iter2, _Pred, _Proj1, _Proj2> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool operator()( + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()( _Iter1 __first1, _Sent1 __last1, _Iter2 __first2, @@ -74,7 +74,7 @@ struct __fn { class _Proj1 = identity, class _Proj2 = identity> requires indirectly_comparable, iterator_t<_Range2>, _Pred, _Proj1, _Proj2> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool operator()( + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()( _Range1&& __range1, _Range2&& __range2, _Pred __pred = {}, _Proj1 __proj1 = {}, _Proj2 __proj2 = {}) const { if constexpr (sized_range<_Range1> && sized_range<_Range2>) { if (ranges::distance(__range1) != ranges::distance(__range2)) diff --git a/lib/libcxx/include/__algorithm/ranges_equal_range.h b/lib/libcxx/include/__algorithm/ranges_equal_range.h index 4c1c3834ba9f..4a308e016b54 100644 --- a/lib/libcxx/include/__algorithm/ranges_equal_range.h +++ b/lib/libcxx/include/__algorithm/ranges_equal_range.h @@ -46,7 +46,7 @@ struct __fn { class _Tp, class _Proj = identity, indirect_strict_weak_order> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr subrange<_Iter> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr subrange<_Iter> operator()(_Iter __first, _Sent __last, const _Tp& __value, _Comp __comp = {}, _Proj __proj = {}) const { auto __ret = std::__equal_range<_RangeAlgPolicy>(std::move(__first), std::move(__last), __value, __comp, __proj); return {std::move(__ret.first), std::move(__ret.second)}; @@ -56,7 +56,7 @@ struct __fn { class _Tp, class _Proj = identity, indirect_strict_weak_order, _Proj>> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr borrowed_subrange_t<_Range> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr borrowed_subrange_t<_Range> operator()(_Range&& __range, const _Tp& __value, _Comp __comp = {}, _Proj __proj = {}) const { auto __ret = std::__equal_range<_RangeAlgPolicy>(ranges::begin(__range), ranges::end(__range), __value, __comp, __proj); diff --git a/lib/libcxx/include/__algorithm/ranges_find.h b/lib/libcxx/include/__algorithm/ranges_find.h index 7459fad717a5..6b0d5efe37ab 100644 --- a/lib/libcxx/include/__algorithm/ranges_find.h +++ b/lib/libcxx/include/__algorithm/ranges_find.h @@ -44,22 +44,22 @@ struct __fn { if constexpr (forward_iterator<_Iter>) { auto [__first_un, __last_un] = std::__unwrap_range(__first, std::move(__last)); return std::__rewrap_range<_Sent>( - std::move(__first), std::__find_impl(std::move(__first_un), std::move(__last_un), __value, __proj)); + std::move(__first), std::__find(std::move(__first_un), std::move(__last_un), __value, __proj)); } else { - return std::__find_impl(std::move(__first), std::move(__last), __value, __proj); + return std::__find(std::move(__first), std::move(__last), __value, __proj); } } template _Sp, class _Tp, class _Proj = identity> requires indirect_binary_predicate, const _Tp*> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr _Ip + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _Ip operator()(_Ip __first, _Sp __last, const _Tp& __value, _Proj __proj = {}) const { return __find_unwrap(std::move(__first), std::move(__last), __value, __proj); } template requires indirect_binary_predicate, _Proj>, const _Tp*> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr borrowed_iterator_t<_Rp> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr borrowed_iterator_t<_Rp> operator()(_Rp&& __r, const _Tp& __value, _Proj __proj = {}) const { return __find_unwrap(ranges::begin(__r), ranges::end(__r), __value, __proj); } diff --git a/lib/libcxx/include/__algorithm/ranges_find_end.h b/lib/libcxx/include/__algorithm/ranges_find_end.h index 0bda4f3e1cea..e49e66dd4ac0 100644 --- a/lib/libcxx/include/__algorithm/ranges_find_end.h +++ b/lib/libcxx/include/__algorithm/ranges_find_end.h @@ -45,7 +45,7 @@ struct __fn { class _Proj1 = identity, class _Proj2 = identity> requires indirectly_comparable<_Iter1, _Iter2, _Pred, _Proj1, _Proj2> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr subrange<_Iter1> operator()( + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr subrange<_Iter1> operator()( _Iter1 __first1, _Sent1 __last1, _Iter2 __first2, @@ -72,7 +72,7 @@ struct __fn { class _Proj1 = identity, class _Proj2 = identity> requires indirectly_comparable, iterator_t<_Range2>, _Pred, _Proj1, _Proj2> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr borrowed_subrange_t<_Range1> operator()( + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr borrowed_subrange_t<_Range1> operator()( _Range1&& __range1, _Range2&& __range2, _Pred __pred = {}, _Proj1 __proj1 = {}, _Proj2 __proj2 = {}) const { auto __ret = std::__find_end_impl<_RangeAlgPolicy>( ranges::begin(__range1), diff --git a/lib/libcxx/include/__algorithm/ranges_find_first_of.h b/lib/libcxx/include/__algorithm/ranges_find_first_of.h index 63a7b8335faa..d92d9686bc44 100644 --- a/lib/libcxx/include/__algorithm/ranges_find_first_of.h +++ b/lib/libcxx/include/__algorithm/ranges_find_first_of.h @@ -60,7 +60,7 @@ struct __fn { class _Proj1 = identity, class _Proj2 = identity> requires indirectly_comparable<_Iter1, _Iter2, _Pred, _Proj1, _Proj2> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr _Iter1 operator()( + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _Iter1 operator()( _Iter1 __first1, _Sent1 __last1, _Iter2 __first2, @@ -78,7 +78,7 @@ struct __fn { class _Proj1 = identity, class _Proj2 = identity> requires indirectly_comparable, iterator_t<_Range2>, _Pred, _Proj1, _Proj2> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr borrowed_iterator_t<_Range1> operator()( + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr borrowed_iterator_t<_Range1> operator()( _Range1&& __range1, _Range2&& __range2, _Pred __pred = {}, _Proj1 __proj1 = {}, _Proj2 __proj2 = {}) const { return __find_first_of_impl( ranges::begin(__range1), diff --git a/lib/libcxx/include/__algorithm/ranges_find_if.h b/lib/libcxx/include/__algorithm/ranges_find_if.h index 52ae55ce96c3..888f9ec3cb2d 100644 --- a/lib/libcxx/include/__algorithm/ranges_find_if.h +++ b/lib/libcxx/include/__algorithm/ranges_find_if.h @@ -48,13 +48,13 @@ struct __fn { sentinel_for<_Ip> _Sp, class _Proj = identity, indirect_unary_predicate> _Pred> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr _Ip + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _Ip operator()(_Ip __first, _Sp __last, _Pred __pred, _Proj __proj = {}) const { return ranges::__find_if_impl(std::move(__first), std::move(__last), __pred, __proj); } template , _Proj>> _Pred> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr borrowed_iterator_t<_Rp> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr borrowed_iterator_t<_Rp> operator()(_Rp&& __r, _Pred __pred, _Proj __proj = {}) const { return ranges::__find_if_impl(ranges::begin(__r), ranges::end(__r), __pred, __proj); } diff --git a/lib/libcxx/include/__algorithm/ranges_find_if_not.h b/lib/libcxx/include/__algorithm/ranges_find_if_not.h index 60c6796cbbfc..ec19545b5a1b 100644 --- a/lib/libcxx/include/__algorithm/ranges_find_if_not.h +++ b/lib/libcxx/include/__algorithm/ranges_find_if_not.h @@ -40,14 +40,14 @@ struct __fn { sentinel_for<_Ip> _Sp, class _Proj = identity, indirect_unary_predicate> _Pred> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr _Ip + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _Ip operator()(_Ip __first, _Sp __last, _Pred __pred, _Proj __proj = {}) const { auto __pred2 = [&](auto&& __e) -> bool { return !std::invoke(__pred, std::forward(__e)); }; return ranges::__find_if_impl(std::move(__first), std::move(__last), __pred2, __proj); } template , _Proj>> _Pred> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr borrowed_iterator_t<_Rp> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr borrowed_iterator_t<_Rp> operator()(_Rp&& __r, _Pred __pred, _Proj __proj = {}) const { auto __pred2 = [&](auto&& __e) -> bool { return !std::invoke(__pred, std::forward(__e)); }; return ranges::__find_if_impl(ranges::begin(__r), ranges::end(__r), __pred2, __proj); diff --git a/lib/libcxx/include/__algorithm/ranges_find_last.h b/lib/libcxx/include/__algorithm/ranges_find_last.h new file mode 100644 index 000000000000..95f7e77b8ccb --- /dev/null +++ b/lib/libcxx/include/__algorithm/ranges_find_last.h @@ -0,0 +1,175 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCPP___ALGORITHM_RANGES_FIND_LAST_H +#define _LIBCPP___ALGORITHM_RANGES_FIND_LAST_H + +#include <__config> +#include <__functional/identity.h> +#include <__functional/invoke.h> +#include <__functional/ranges_operations.h> +#include <__iterator/concepts.h> +#include <__iterator/indirectly_comparable.h> +#include <__iterator/next.h> +#include <__iterator/prev.h> +#include <__iterator/projected.h> +#include <__ranges/access.h> +#include <__ranges/concepts.h> +#include <__ranges/subrange.h> +#include <__utility/move.h> + +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +#endif + +_LIBCPP_PUSH_MACROS +#include <__undef_macros> + +#if _LIBCPP_STD_VER >= 23 + +_LIBCPP_BEGIN_NAMESPACE_STD + +namespace ranges { + +template +_LIBCPP_HIDE_FROM_ABI constexpr subrange<_Iter> +__find_last_impl(_Iter __first, _Sent __last, _Pred __pred, _Proj& __proj) { + if (__first == __last) { + return subrange<_Iter>(__first, __first); + } + + if constexpr (bidirectional_iterator<_Iter>) { + auto __last_it = ranges::next(__first, __last); + for (auto __it = ranges::prev(__last_it); __it != __first; --__it) { + if (__pred(std::invoke(__proj, *__it))) { + return subrange<_Iter>(std::move(__it), std::move(__last_it)); + } + } + if (__pred(std::invoke(__proj, *__first))) { + return subrange<_Iter>(std::move(__first), std::move(__last_it)); + } + return subrange<_Iter>(__last_it, __last_it); + } else { + bool __found = false; + _Iter __found_it; + for (; __first != __last; ++__first) { + if (__pred(std::invoke(__proj, *__first))) { + __found = true; + __found_it = __first; + } + } + + if (__found) { + return subrange<_Iter>(std::move(__found_it), std::move(__first)); + } else { + return subrange<_Iter>(__first, __first); + } + } +} + +namespace __find_last { +struct __fn { + template + struct __op { + const _Type& __value; + template + _LIBCPP_HIDE_FROM_ABI constexpr decltype(auto) operator()(_Elem&& __elem) const { + return std::forward<_Elem>(__elem) == __value; + } + }; + + template _Sent, class _Type, class _Proj = identity> + requires indirect_binary_predicate, const _Type*> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr static subrange<_Iter> + operator()(_Iter __first, _Sent __last, const _Type& __value, _Proj __proj = {}) { + return ranges::__find_last_impl(std::move(__first), std::move(__last), __op<_Type>{__value}, __proj); + } + + template + requires indirect_binary_predicate, _Proj>, const _Type*> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr static borrowed_subrange_t<_Range> + operator()(_Range&& __range, const _Type& __value, _Proj __proj = {}) { + return ranges::__find_last_impl(ranges::begin(__range), ranges::end(__range), __op<_Type>{__value}, __proj); + } +}; +} // namespace __find_last + +namespace __find_last_if { +struct __fn { + template + struct __op { + _Pred& __pred; + template + _LIBCPP_HIDE_FROM_ABI constexpr decltype(auto) operator()(_Elem&& __elem) const { + return std::invoke(__pred, std::forward<_Elem>(__elem)); + } + }; + + template _Sent, + class _Proj = identity, + indirect_unary_predicate> _Pred> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr static subrange<_Iter> + operator()(_Iter __first, _Sent __last, _Pred __pred, _Proj __proj = {}) { + return ranges::__find_last_impl(std::move(__first), std::move(__last), __op<_Pred>{__pred}, __proj); + } + + template , _Proj>> _Pred> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr static borrowed_subrange_t<_Range> + operator()(_Range&& __range, _Pred __pred, _Proj __proj = {}) { + return ranges::__find_last_impl(ranges::begin(__range), ranges::end(__range), __op<_Pred>{__pred}, __proj); + } +}; +} // namespace __find_last_if + +namespace __find_last_if_not { +struct __fn { + template + struct __op { + _Pred& __pred; + template + _LIBCPP_HIDE_FROM_ABI constexpr decltype(auto) operator()(_Elem&& __elem) const { + return !std::invoke(__pred, std::forward<_Elem>(__elem)); + } + }; + + template _Sent, + class _Proj = identity, + indirect_unary_predicate> _Pred> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr static subrange<_Iter> + operator()(_Iter __first, _Sent __last, _Pred __pred, _Proj __proj = {}) { + return ranges::__find_last_impl(std::move(__first), std::move(__last), __op<_Pred>{__pred}, __proj); + } + + template , _Proj>> _Pred> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr static borrowed_subrange_t<_Range> + operator()(_Range&& __range, _Pred __pred, _Proj __proj = {}) { + return ranges::__find_last_impl(ranges::begin(__range), ranges::end(__range), __op<_Pred>{__pred}, __proj); + } +}; +} // namespace __find_last_if_not + +inline namespace __cpo { +inline constexpr auto find_last = __find_last::__fn{}; +inline constexpr auto find_last_if = __find_last_if::__fn{}; +inline constexpr auto find_last_if_not = __find_last_if_not::__fn{}; +} // namespace __cpo +} // namespace ranges + +_LIBCPP_END_NAMESPACE_STD + +#endif // _LIBCPP_STD_VER >= 23 + +_LIBCPP_POP_MACROS + +#endif // _LIBCPP___ALGORITHM_RANGES_FIND_LAST_H diff --git a/lib/libcxx/include/__algorithm/ranges_includes.h b/lib/libcxx/include/__algorithm/ranges_includes.h index 0bc4c043bd18..c4c3b8ed088d 100644 --- a/lib/libcxx/include/__algorithm/ranges_includes.h +++ b/lib/libcxx/include/__algorithm/ranges_includes.h @@ -45,7 +45,7 @@ struct __fn { class _Proj1 = identity, class _Proj2 = identity, indirect_strict_weak_order, projected<_Iter2, _Proj2>> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool operator()( + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()( _Iter1 __first1, _Sent1 __last1, _Iter2 __first2, @@ -69,7 +69,7 @@ struct __fn { class _Proj2 = identity, indirect_strict_weak_order, _Proj1>, projected, _Proj2>> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool operator()( + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()( _Range1&& __range1, _Range2&& __range2, _Comp __comp = {}, _Proj1 __proj1 = {}, _Proj2 __proj2 = {}) const { return std::__includes( ranges::begin(__range1), diff --git a/lib/libcxx/include/__algorithm/ranges_is_heap.h b/lib/libcxx/include/__algorithm/ranges_is_heap.h index 122368c90d92..3d9e18ce1d90 100644 --- a/lib/libcxx/include/__algorithm/ranges_is_heap.h +++ b/lib/libcxx/include/__algorithm/ranges_is_heap.h @@ -51,7 +51,7 @@ struct __fn { sentinel_for<_Iter> _Sent, class _Proj = identity, indirect_strict_weak_order> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()(_Iter __first, _Sent __last, _Comp __comp = {}, _Proj __proj = {}) const { return __is_heap_fn_impl(std::move(__first), std::move(__last), __comp, __proj); } @@ -59,7 +59,7 @@ struct __fn { template , _Proj>> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()(_Range&& __range, _Comp __comp = {}, _Proj __proj = {}) const { return __is_heap_fn_impl(ranges::begin(__range), ranges::end(__range), __comp, __proj); } diff --git a/lib/libcxx/include/__algorithm/ranges_is_heap_until.h b/lib/libcxx/include/__algorithm/ranges_is_heap_until.h index b2705d37a6d3..7a2e1fc7705b 100644 --- a/lib/libcxx/include/__algorithm/ranges_is_heap_until.h +++ b/lib/libcxx/include/__algorithm/ranges_is_heap_until.h @@ -51,7 +51,7 @@ struct __fn { sentinel_for<_Iter> _Sent, class _Proj = identity, indirect_strict_weak_order> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr _Iter + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _Iter operator()(_Iter __first, _Sent __last, _Comp __comp = {}, _Proj __proj = {}) const { return __is_heap_until_fn_impl(std::move(__first), std::move(__last), __comp, __proj); } @@ -59,7 +59,7 @@ struct __fn { template , _Proj>> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr borrowed_iterator_t<_Range> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr borrowed_iterator_t<_Range> operator()(_Range&& __range, _Comp __comp = {}, _Proj __proj = {}) const { return __is_heap_until_fn_impl(ranges::begin(__range), ranges::end(__range), __comp, __proj); } diff --git a/lib/libcxx/include/__algorithm/ranges_is_partitioned.h b/lib/libcxx/include/__algorithm/ranges_is_partitioned.h index c6a585c9f510..5be6fba46fd9 100644 --- a/lib/libcxx/include/__algorithm/ranges_is_partitioned.h +++ b/lib/libcxx/include/__algorithm/ranges_is_partitioned.h @@ -57,7 +57,7 @@ struct __fn { sentinel_for<_Iter> _Sent, class _Proj = identity, indirect_unary_predicate> _Pred> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()(_Iter __first, _Sent __last, _Pred __pred, _Proj __proj = {}) const { return __is_partitioned_impl(std::move(__first), std::move(__last), __pred, __proj); } @@ -65,7 +65,7 @@ struct __fn { template , _Proj>> _Pred> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()(_Range&& __range, _Pred __pred, _Proj __proj = {}) const { return __is_partitioned_impl(ranges::begin(__range), ranges::end(__range), __pred, __proj); } diff --git a/lib/libcxx/include/__algorithm/ranges_is_permutation.h b/lib/libcxx/include/__algorithm/ranges_is_permutation.h index e0423d722b5b..1f8d67007a57 100644 --- a/lib/libcxx/include/__algorithm/ranges_is_permutation.h +++ b/lib/libcxx/include/__algorithm/ranges_is_permutation.h @@ -56,7 +56,7 @@ struct __fn { class _Proj1 = identity, class _Proj2 = identity, indirect_equivalence_relation, projected<_Iter2, _Proj2>> _Pred = ranges::equal_to> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool operator()( + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()( _Iter1 __first1, _Sent1 __last1, _Iter2 __first2, @@ -74,7 +74,7 @@ struct __fn { class _Proj2 = identity, indirect_equivalence_relation, _Proj1>, projected, _Proj2>> _Pred = ranges::equal_to> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool operator()( + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()( _Range1&& __range1, _Range2&& __range2, _Pred __pred = {}, _Proj1 __proj1 = {}, _Proj2 __proj2 = {}) const { if constexpr (sized_range<_Range1> && sized_range<_Range2>) { if (ranges::distance(__range1) != ranges::distance(__range2)) diff --git a/lib/libcxx/include/__algorithm/ranges_is_sorted.h b/lib/libcxx/include/__algorithm/ranges_is_sorted.h index d71035d5aa1d..5b88d422b4b0 100644 --- a/lib/libcxx/include/__algorithm/ranges_is_sorted.h +++ b/lib/libcxx/include/__algorithm/ranges_is_sorted.h @@ -37,7 +37,7 @@ struct __fn { sentinel_for<_Iter> _Sent, class _Proj = identity, indirect_strict_weak_order> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()(_Iter __first, _Sent __last, _Comp __comp = {}, _Proj __proj = {}) const { return ranges::__is_sorted_until_impl(std::move(__first), __last, __comp, __proj) == __last; } @@ -45,7 +45,7 @@ struct __fn { template , _Proj>> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()(_Range&& __range, _Comp __comp = {}, _Proj __proj = {}) const { auto __last = ranges::end(__range); return ranges::__is_sorted_until_impl(ranges::begin(__range), __last, __comp, __proj) == __last; diff --git a/lib/libcxx/include/__algorithm/ranges_is_sorted_until.h b/lib/libcxx/include/__algorithm/ranges_is_sorted_until.h index dcfb6a4e1813..54de530c8b2f 100644 --- a/lib/libcxx/include/__algorithm/ranges_is_sorted_until.h +++ b/lib/libcxx/include/__algorithm/ranges_is_sorted_until.h @@ -53,7 +53,7 @@ struct __fn { sentinel_for<_Iter> _Sent, class _Proj = identity, indirect_strict_weak_order> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr _Iter + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _Iter operator()(_Iter __first, _Sent __last, _Comp __comp = {}, _Proj __proj = {}) const { return ranges::__is_sorted_until_impl(std::move(__first), std::move(__last), __comp, __proj); } @@ -61,7 +61,7 @@ struct __fn { template , _Proj>> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr borrowed_iterator_t<_Range> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr borrowed_iterator_t<_Range> operator()(_Range&& __range, _Comp __comp = {}, _Proj __proj = {}) const { return ranges::__is_sorted_until_impl(ranges::begin(__range), ranges::end(__range), __comp, __proj); } diff --git a/lib/libcxx/include/__algorithm/ranges_lexicographical_compare.h b/lib/libcxx/include/__algorithm/ranges_lexicographical_compare.h index 90e96b546516..6d82017e302a 100644 --- a/lib/libcxx/include/__algorithm/ranges_lexicographical_compare.h +++ b/lib/libcxx/include/__algorithm/ranges_lexicographical_compare.h @@ -60,7 +60,7 @@ struct __fn { class _Proj1 = identity, class _Proj2 = identity, indirect_strict_weak_order, projected<_Iter2, _Proj2>> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool operator()( + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()( _Iter1 __first1, _Sent1 __last1, _Iter2 __first2, @@ -78,7 +78,7 @@ struct __fn { class _Proj2 = identity, indirect_strict_weak_order, _Proj1>, projected, _Proj2>> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool operator()( + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()( _Range1&& __range1, _Range2&& __range2, _Comp __comp = {}, _Proj1 __proj1 = {}, _Proj2 __proj2 = {}) const { return __lexicographical_compare_impl( ranges::begin(__range1), diff --git a/lib/libcxx/include/__algorithm/ranges_lower_bound.h b/lib/libcxx/include/__algorithm/ranges_lower_bound.h index ab1f80e7ab77..0651147e0424 100644 --- a/lib/libcxx/include/__algorithm/ranges_lower_bound.h +++ b/lib/libcxx/include/__algorithm/ranges_lower_bound.h @@ -43,7 +43,7 @@ struct __fn { class _Type, class _Proj = identity, indirect_strict_weak_order> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr _Iter + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _Iter operator()(_Iter __first, _Sent __last, const _Type& __value, _Comp __comp = {}, _Proj __proj = {}) const { return std::__lower_bound<_RangeAlgPolicy>(__first, __last, __value, __comp, __proj); } @@ -52,7 +52,7 @@ struct __fn { class _Type, class _Proj = identity, indirect_strict_weak_order, _Proj>> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr borrowed_iterator_t<_Range> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr borrowed_iterator_t<_Range> operator()(_Range&& __r, const _Type& __value, _Comp __comp = {}, _Proj __proj = {}) const { return std::__lower_bound<_RangeAlgPolicy>(ranges::begin(__r), ranges::end(__r), __value, __comp, __proj); } diff --git a/lib/libcxx/include/__algorithm/ranges_max.h b/lib/libcxx/include/__algorithm/ranges_max.h index 0f89cb2ff5bf..d0ee6f314b0c 100644 --- a/lib/libcxx/include/__algorithm/ranges_max.h +++ b/lib/libcxx/include/__algorithm/ranges_max.h @@ -41,7 +41,7 @@ struct __fn { template > _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr const _Tp& + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr const _Tp& operator()(_LIBCPP_LIFETIMEBOUND const _Tp& __a, _LIBCPP_LIFETIMEBOUND const _Tp& __b, _Comp __comp = {}, @@ -52,7 +52,7 @@ struct __fn { template > _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr _Tp + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _Tp operator()(initializer_list<_Tp> __il, _Comp __comp = {}, _Proj __proj = {}) const { _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS( __il.begin() != __il.end(), "initializer_list must contain at least one element"); @@ -65,7 +65,7 @@ struct __fn { class _Proj = identity, indirect_strict_weak_order, _Proj>> _Comp = ranges::less> requires indirectly_copyable_storable, range_value_t<_Rp>*> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr range_value_t<_Rp> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr range_value_t<_Rp> operator()(_Rp&& __r, _Comp __comp = {}, _Proj __proj = {}) const { auto __first = ranges::begin(__r); auto __last = ranges::end(__r); @@ -98,6 +98,6 @@ _LIBCPP_END_NAMESPACE_STD _LIBCPP_POP_MACROS -#endif // _LIBCPP_STD_VER >= 20 && +#endif // _LIBCPP_STD_VER >= 20 #endif // _LIBCPP___ALGORITHM_RANGES_MAX_H diff --git a/lib/libcxx/include/__algorithm/ranges_max_element.h b/lib/libcxx/include/__algorithm/ranges_max_element.h index 83adf49b61ad..c57730927116 100644 --- a/lib/libcxx/include/__algorithm/ranges_max_element.h +++ b/lib/libcxx/include/__algorithm/ranges_max_element.h @@ -38,7 +38,7 @@ struct __fn { sentinel_for<_Ip> _Sp, class _Proj = identity, indirect_strict_weak_order> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr _Ip + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _Ip operator()(_Ip __first, _Sp __last, _Comp __comp = {}, _Proj __proj = {}) const { auto __comp_lhs_rhs_swapped = [&](auto&& __lhs, auto&& __rhs) -> bool { return std::invoke(__comp, __rhs, __lhs); }; return ranges::__min_element_impl(__first, __last, __comp_lhs_rhs_swapped, __proj); @@ -47,7 +47,7 @@ struct __fn { template , _Proj>> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr borrowed_iterator_t<_Rp> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr borrowed_iterator_t<_Rp> operator()(_Rp&& __r, _Comp __comp = {}, _Proj __proj = {}) const { auto __comp_lhs_rhs_swapped = [&](auto&& __lhs, auto&& __rhs) -> bool { return std::invoke(__comp, __rhs, __lhs); }; return ranges::__min_element_impl(ranges::begin(__r), ranges::end(__r), __comp_lhs_rhs_swapped, __proj); diff --git a/lib/libcxx/include/__algorithm/ranges_min.h b/lib/libcxx/include/__algorithm/ranges_min.h index 8757358cdf37..cc569d2a060c 100644 --- a/lib/libcxx/include/__algorithm/ranges_min.h +++ b/lib/libcxx/include/__algorithm/ranges_min.h @@ -40,7 +40,7 @@ struct __fn { template > _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr const _Tp& + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr const _Tp& operator()(_LIBCPP_LIFETIMEBOUND const _Tp& __a, _LIBCPP_LIFETIMEBOUND const _Tp& __b, _Comp __comp = {}, @@ -51,7 +51,7 @@ struct __fn { template > _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr _Tp + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _Tp operator()(initializer_list<_Tp> __il, _Comp __comp = {}, _Proj __proj = {}) const { _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS( __il.begin() != __il.end(), "initializer_list must contain at least one element"); @@ -62,7 +62,7 @@ struct __fn { class _Proj = identity, indirect_strict_weak_order, _Proj>> _Comp = ranges::less> requires indirectly_copyable_storable, range_value_t<_Rp>*> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr range_value_t<_Rp> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr range_value_t<_Rp> operator()(_Rp&& __r, _Comp __comp = {}, _Proj __proj = {}) const { auto __first = ranges::begin(__r); auto __last = ranges::end(__r); @@ -90,6 +90,6 @@ _LIBCPP_END_NAMESPACE_STD _LIBCPP_POP_MACROS -#endif // _LIBCPP_STD_VER >= 20 && +#endif // _LIBCPP_STD_VER >= 20 #endif // _LIBCPP___ALGORITHM_RANGES_MIN_H diff --git a/lib/libcxx/include/__algorithm/ranges_min_element.h b/lib/libcxx/include/__algorithm/ranges_min_element.h index 4b9cb76da578..588ef258e26f 100644 --- a/lib/libcxx/include/__algorithm/ranges_min_element.h +++ b/lib/libcxx/include/__algorithm/ranges_min_element.h @@ -52,7 +52,7 @@ struct __fn { sentinel_for<_Ip> _Sp, class _Proj = identity, indirect_strict_weak_order> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr _Ip + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _Ip operator()(_Ip __first, _Sp __last, _Comp __comp = {}, _Proj __proj = {}) const { return ranges::__min_element_impl(__first, __last, __comp, __proj); } @@ -60,7 +60,7 @@ struct __fn { template , _Proj>> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr borrowed_iterator_t<_Rp> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr borrowed_iterator_t<_Rp> operator()(_Rp&& __r, _Comp __comp = {}, _Proj __proj = {}) const { return ranges::__min_element_impl(ranges::begin(__r), ranges::end(__r), __comp, __proj); } diff --git a/lib/libcxx/include/__algorithm/ranges_minmax.h b/lib/libcxx/include/__algorithm/ranges_minmax.h index 22a62b620c93..09cbefd91a8c 100644 --- a/lib/libcxx/include/__algorithm/ranges_minmax.h +++ b/lib/libcxx/include/__algorithm/ranges_minmax.h @@ -23,7 +23,9 @@ #include <__iterator/projected.h> #include <__ranges/access.h> #include <__ranges/concepts.h> +#include <__type_traits/desugars_to.h> #include <__type_traits/is_reference.h> +#include <__type_traits/is_trivially_copyable.h> #include <__type_traits/remove_cvref.h> #include <__utility/forward.h> #include <__utility/move.h> @@ -50,7 +52,7 @@ struct __fn { template > _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr ranges::minmax_result + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr ranges::minmax_result operator()(_LIBCPP_LIFETIMEBOUND const _Type& __a, _LIBCPP_LIFETIMEBOUND const _Type& __b, _Comp __comp = {}, @@ -63,7 +65,7 @@ struct __fn { template > _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr ranges::minmax_result<_Type> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr ranges::minmax_result<_Type> operator()(initializer_list<_Type> __il, _Comp __comp = {}, _Proj __proj = {}) const { _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS( __il.begin() != __il.end(), "initializer_list has to contain at least one element"); @@ -75,7 +77,7 @@ struct __fn { class _Proj = identity, indirect_strict_weak_order, _Proj>> _Comp = ranges::less> requires indirectly_copyable_storable, range_value_t<_Range>*> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr ranges::minmax_result> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr ranges::minmax_result> operator()(_Range&& __r, _Comp __comp = {}, _Proj __proj = {}) const { auto __first = ranges::begin(__r); auto __last = ranges::end(__r); @@ -83,7 +85,20 @@ struct __fn { _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(__first != __last, "range has to contain at least one element"); - if constexpr (forward_range<_Range>) { + // This optimiation is not in minmax_element because clang doesn't see through the pointers and as a result doesn't + // vectorize the code. + if constexpr (contiguous_range<_Range> && is_integral_v<_ValueT> && + __is_cheap_to_copy<_ValueT> & __is_identity<_Proj>::value && + __desugars_to_v<__less_tag, _Comp, _ValueT, _ValueT>) { + minmax_result<_ValueT> __result = {__r[0], __r[0]}; + for (auto __e : __r) { + if (__e < __result.min) + __result.min = __e; + if (__result.max < __e) + __result.max = __e; + } + return __result; + } else if constexpr (forward_range<_Range>) { // Special-case the one element case. Avoid repeatedly initializing objects from the result of an iterator // dereference when doing so might not be idempotent. The `if constexpr` avoids the extra branch in cases where // it's not needed. diff --git a/lib/libcxx/include/__algorithm/ranges_minmax_element.h b/lib/libcxx/include/__algorithm/ranges_minmax_element.h index 5132856ebcd5..4bf6d2404e46 100644 --- a/lib/libcxx/include/__algorithm/ranges_minmax_element.h +++ b/lib/libcxx/include/__algorithm/ranges_minmax_element.h @@ -46,7 +46,7 @@ struct __fn { sentinel_for<_Ip> _Sp, class _Proj = identity, indirect_strict_weak_order> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr ranges::minmax_element_result<_Ip> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr ranges::minmax_element_result<_Ip> operator()(_Ip __first, _Sp __last, _Comp __comp = {}, _Proj __proj = {}) const { auto __ret = std::__minmax_element_impl(std::move(__first), std::move(__last), __comp, __proj); return {__ret.first, __ret.second}; @@ -55,7 +55,7 @@ struct __fn { template , _Proj>> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr ranges::minmax_element_result> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr ranges::minmax_element_result> operator()(_Rp&& __r, _Comp __comp = {}, _Proj __proj = {}) const { auto __ret = std::__minmax_element_impl(ranges::begin(__r), ranges::end(__r), __comp, __proj); return {__ret.first, __ret.second}; diff --git a/lib/libcxx/include/__algorithm/ranges_mismatch.h b/lib/libcxx/include/__algorithm/ranges_mismatch.h index 037af3912623..c4bf0022a9bc 100644 --- a/lib/libcxx/include/__algorithm/ranges_mismatch.h +++ b/lib/libcxx/include/__algorithm/ranges_mismatch.h @@ -10,6 +10,8 @@ #define _LIBCPP___ALGORITHM_RANGES_MISMATCH_H #include <__algorithm/in_in_result.h> +#include <__algorithm/mismatch.h> +#include <__algorithm/unwrap_range.h> #include <__config> #include <__functional/identity.h> #include <__functional/invoke.h> @@ -42,13 +44,17 @@ struct __fn { template static _LIBCPP_HIDE_FROM_ABI constexpr mismatch_result<_I1, _I2> __go(_I1 __first1, _S1 __last1, _I2 __first2, _S2 __last2, _Pred& __pred, _Proj1& __proj1, _Proj2& __proj2) { - while (__first1 != __last1 && __first2 != __last2) { - if (!std::invoke(__pred, std::invoke(__proj1, *__first1), std::invoke(__proj2, *__first2))) - break; - ++__first1; - ++__first2; + if constexpr (forward_iterator<_I1> && forward_iterator<_I2>) { + auto __range1 = std::__unwrap_range(__first1, __last1); + auto __range2 = std::__unwrap_range(__first2, __last2); + auto __res = + std::__mismatch(__range1.first, __range1.second, __range2.first, __range2.second, __pred, __proj1, __proj2); + return {std::__rewrap_range<_S1>(__first1, __res.first), std::__rewrap_range<_S2>(__first2, __res.second)}; + } else { + auto __res = std::__mismatch( + std::move(__first1), std::move(__last1), std::move(__first2), std::move(__last2), __pred, __proj1, __proj2); + return {std::move(__res.first), std::move(__res.second)}; } - return {std::move(__first1), std::move(__first2)}; } template requires indirectly_comparable<_I1, _I2, _Pred, _Proj1, _Proj2> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr mismatch_result<_I1, _I2> operator()( + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr mismatch_result<_I1, _I2> operator()( _I1 __first1, _S1 __last1, _I2 __first2, _S2 __last2, _Pred __pred = {}, _Proj1 __proj1 = {}, _Proj2 __proj2 = {}) const { return __go(std::move(__first1), __last1, std::move(__first2), __last2, __pred, __proj1, __proj2); @@ -71,8 +77,8 @@ struct __fn { class _Proj1 = identity, class _Proj2 = identity> requires indirectly_comparable, iterator_t<_R2>, _Pred, _Proj1, _Proj2> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr mismatch_result, - borrowed_iterator_t<_R2>> + [[nodiscard]] + _LIBCPP_HIDE_FROM_ABI constexpr mismatch_result, borrowed_iterator_t<_R2>> operator()(_R1&& __r1, _R2&& __r2, _Pred __pred = {}, _Proj1 __proj1 = {}, _Proj2 __proj2 = {}) const { return __go( ranges::begin(__r1), ranges::end(__r1), ranges::begin(__r2), ranges::end(__r2), __pred, __proj1, __proj2); diff --git a/lib/libcxx/include/__algorithm/ranges_none_of.h b/lib/libcxx/include/__algorithm/ranges_none_of.h index 59bd87997d44..7df3c1829fcf 100644 --- a/lib/libcxx/include/__algorithm/ranges_none_of.h +++ b/lib/libcxx/include/__algorithm/ranges_none_of.h @@ -46,7 +46,7 @@ struct __fn { sentinel_for<_Iter> _Sent, class _Proj = identity, indirect_unary_predicate> _Pred> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()(_Iter __first, _Sent __last, _Pred __pred = {}, _Proj __proj = {}) const { return __none_of_impl(std::move(__first), std::move(__last), __pred, __proj); } @@ -54,7 +54,7 @@ struct __fn { template , _Proj>> _Pred> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()(_Range&& __range, _Pred __pred, _Proj __proj = {}) const { return __none_of_impl(ranges::begin(__range), ranges::end(__range), __pred, __proj); } diff --git a/lib/libcxx/include/__algorithm/ranges_remove.h b/lib/libcxx/include/__algorithm/ranges_remove.h index 315bed8fba77..17c3a2c5cd06 100644 --- a/lib/libcxx/include/__algorithm/ranges_remove.h +++ b/lib/libcxx/include/__algorithm/ranges_remove.h @@ -37,7 +37,7 @@ namespace __remove { struct __fn { template _Sent, class _Type, class _Proj = identity> requires indirect_binary_predicate, const _Type*> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr subrange<_Iter> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr subrange<_Iter> operator()(_Iter __first, _Sent __last, const _Type& __value, _Proj __proj = {}) const { auto __pred = [&](auto&& __other) -> bool { return __value == __other; }; return ranges::__remove_if_impl(std::move(__first), std::move(__last), __pred, __proj); @@ -46,7 +46,7 @@ struct __fn { template requires permutable> && indirect_binary_predicate, _Proj>, const _Type*> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr borrowed_subrange_t<_Range> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr borrowed_subrange_t<_Range> operator()(_Range&& __range, const _Type& __value, _Proj __proj = {}) const { auto __pred = [&](auto&& __other) -> bool { return __value == __other; }; return ranges::__remove_if_impl(ranges::begin(__range), ranges::end(__range), __pred, __proj); diff --git a/lib/libcxx/include/__algorithm/ranges_remove_if.h b/lib/libcxx/include/__algorithm/ranges_remove_if.h index 943dbdd73807..0ea5d9a01b88 100644 --- a/lib/libcxx/include/__algorithm/ranges_remove_if.h +++ b/lib/libcxx/include/__algorithm/ranges_remove_if.h @@ -59,7 +59,7 @@ struct __fn { sentinel_for<_Iter> _Sent, class _Proj = identity, indirect_unary_predicate> _Pred> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr subrange<_Iter> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr subrange<_Iter> operator()(_Iter __first, _Sent __last, _Pred __pred, _Proj __proj = {}) const { return ranges::__remove_if_impl(std::move(__first), std::move(__last), __pred, __proj); } @@ -68,7 +68,7 @@ struct __fn { class _Proj = identity, indirect_unary_predicate, _Proj>> _Pred> requires permutable> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr borrowed_subrange_t<_Range> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr borrowed_subrange_t<_Range> operator()(_Range&& __range, _Pred __pred, _Proj __proj = {}) const { return ranges::__remove_if_impl(ranges::begin(__range), ranges::end(__range), __pred, __proj); } diff --git a/lib/libcxx/include/__algorithm/ranges_search.h b/lib/libcxx/include/__algorithm/ranges_search.h index ca2326e9ab27..55294c60631b 100644 --- a/lib/libcxx/include/__algorithm/ranges_search.h +++ b/lib/libcxx/include/__algorithm/ranges_search.h @@ -77,7 +77,7 @@ struct __fn { class _Proj1 = identity, class _Proj2 = identity> requires indirectly_comparable<_Iter1, _Iter2, _Pred, _Proj1, _Proj2> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr subrange<_Iter1> operator()( + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr subrange<_Iter1> operator()( _Iter1 __first1, _Sent1 __last1, _Iter2 __first2, @@ -94,7 +94,7 @@ struct __fn { class _Proj1 = identity, class _Proj2 = identity> requires indirectly_comparable, iterator_t<_Range2>, _Pred, _Proj1, _Proj2> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr borrowed_subrange_t<_Range1> operator()( + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr borrowed_subrange_t<_Range1> operator()( _Range1&& __range1, _Range2&& __range2, _Pred __pred = {}, _Proj1 __proj1 = {}, _Proj2 __proj2 = {}) const { auto __first1 = ranges::begin(__range1); if constexpr (sized_range<_Range2>) { diff --git a/lib/libcxx/include/__algorithm/ranges_search_n.h b/lib/libcxx/include/__algorithm/ranges_search_n.h index 4c1d73d8e6c3..56e12755b9bf 100644 --- a/lib/libcxx/include/__algorithm/ranges_search_n.h +++ b/lib/libcxx/include/__algorithm/ranges_search_n.h @@ -71,7 +71,7 @@ struct __fn { class _Pred = ranges::equal_to, class _Proj = identity> requires indirectly_comparable<_Iter, const _Type*, _Pred, _Proj> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr subrange<_Iter> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr subrange<_Iter> operator()(_Iter __first, _Sent __last, iter_difference_t<_Iter> __count, @@ -83,7 +83,7 @@ struct __fn { template requires indirectly_comparable, const _Type*, _Pred, _Proj> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr borrowed_subrange_t<_Range> operator()( + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr borrowed_subrange_t<_Range> operator()( _Range&& __range, range_difference_t<_Range> __count, const _Type& __value, _Pred __pred = {}, _Proj __proj = {}) const { auto __first = ranges::begin(__range); diff --git a/lib/libcxx/include/__algorithm/ranges_starts_with.h b/lib/libcxx/include/__algorithm/ranges_starts_with.h index 90e184aa9bcc..17084e4f2433 100644 --- a/lib/libcxx/include/__algorithm/ranges_starts_with.h +++ b/lib/libcxx/include/__algorithm/ranges_starts_with.h @@ -42,14 +42,14 @@ struct __fn { class _Proj1 = identity, class _Proj2 = identity> requires indirectly_comparable<_Iter1, _Iter2, _Pred, _Proj1, _Proj2> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool operator()( + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI static constexpr bool operator()( _Iter1 __first1, _Sent1 __last1, _Iter2 __first2, _Sent2 __last2, _Pred __pred = {}, _Proj1 __proj1 = {}, - _Proj2 __proj2 = {}) const { + _Proj2 __proj2 = {}) { return __mismatch::__fn::__go( std::move(__first1), std::move(__last1), @@ -67,8 +67,8 @@ struct __fn { class _Proj1 = identity, class _Proj2 = identity> requires indirectly_comparable, iterator_t<_Range2>, _Pred, _Proj1, _Proj2> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool operator()( - _Range1&& __range1, _Range2&& __range2, _Pred __pred = {}, _Proj1 __proj1 = {}, _Proj2 __proj2 = {}) const { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI static constexpr bool + operator()(_Range1&& __range1, _Range2&& __range2, _Pred __pred = {}, _Proj1 __proj1 = {}, _Proj2 __proj2 = {}) { return __mismatch::__fn::__go( ranges::begin(__range1), ranges::end(__range1), diff --git a/lib/libcxx/include/__algorithm/ranges_unique.h b/lib/libcxx/include/__algorithm/ranges_unique.h index 7340310eb36a..7a9b78432187 100644 --- a/lib/libcxx/include/__algorithm/ranges_unique.h +++ b/lib/libcxx/include/__algorithm/ranges_unique.h @@ -47,7 +47,7 @@ struct __fn { sentinel_for<_Iter> _Sent, class _Proj = identity, indirect_equivalence_relation> _Comp = ranges::equal_to> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr subrange<_Iter> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr subrange<_Iter> operator()(_Iter __first, _Sent __last, _Comp __comp = {}, _Proj __proj = {}) const { auto __ret = std::__unique<_RangeAlgPolicy>(std::move(__first), std::move(__last), std::__make_projected(__comp, __proj)); @@ -58,7 +58,7 @@ struct __fn { class _Proj = identity, indirect_equivalence_relation, _Proj>> _Comp = ranges::equal_to> requires permutable> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr borrowed_subrange_t<_Range> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr borrowed_subrange_t<_Range> operator()(_Range&& __range, _Comp __comp = {}, _Proj __proj = {}) const { auto __ret = std::__unique<_RangeAlgPolicy>( ranges::begin(__range), ranges::end(__range), std::__make_projected(__comp, __proj)); diff --git a/lib/libcxx/include/__algorithm/ranges_upper_bound.h b/lib/libcxx/include/__algorithm/ranges_upper_bound.h index 7b571fb3448f..fa6fa7f70ed5 100644 --- a/lib/libcxx/include/__algorithm/ranges_upper_bound.h +++ b/lib/libcxx/include/__algorithm/ranges_upper_bound.h @@ -37,7 +37,7 @@ struct __fn { class _Type, class _Proj = identity, indirect_strict_weak_order> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr _Iter + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _Iter operator()(_Iter __first, _Sent __last, const _Type& __value, _Comp __comp = {}, _Proj __proj = {}) const { auto __comp_lhs_rhs_swapped = [&](const auto& __lhs, const auto& __rhs) -> bool { return !std::invoke(__comp, __rhs, __lhs); @@ -50,7 +50,7 @@ struct __fn { class _Type, class _Proj = identity, indirect_strict_weak_order, _Proj>> _Comp = ranges::less> - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr borrowed_iterator_t<_Range> + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr borrowed_iterator_t<_Range> operator()(_Range&& __r, const _Type& __value, _Comp __comp = {}, _Proj __proj = {}) const { auto __comp_lhs_rhs_swapped = [&](const auto& __lhs, const auto& __rhs) -> bool { return !std::invoke(__comp, __rhs, __lhs); diff --git a/lib/libcxx/include/__algorithm/remove.h b/lib/libcxx/include/__algorithm/remove.h index 1498852c4361..fd01c23cb670 100644 --- a/lib/libcxx/include/__algorithm/remove.h +++ b/lib/libcxx/include/__algorithm/remove.h @@ -24,7 +24,7 @@ _LIBCPP_PUSH_MACROS _LIBCPP_BEGIN_NAMESPACE_STD template -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator remove(_ForwardIterator __first, _ForwardIterator __last, const _Tp& __value) { __first = std::find(__first, __last, __value); if (__first != __last) { diff --git a/lib/libcxx/include/__algorithm/remove_if.h b/lib/libcxx/include/__algorithm/remove_if.h index c77b78023f52..b14f3c0efa7e 100644 --- a/lib/libcxx/include/__algorithm/remove_if.h +++ b/lib/libcxx/include/__algorithm/remove_if.h @@ -23,7 +23,7 @@ _LIBCPP_PUSH_MACROS _LIBCPP_BEGIN_NAMESPACE_STD template -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator remove_if(_ForwardIterator __first, _ForwardIterator __last, _Predicate __pred) { __first = std::find_if<_ForwardIterator, _Predicate&>(__first, __last, __pred); if (__first != __last) { diff --git a/lib/libcxx/include/__algorithm/rotate.h b/lib/libcxx/include/__algorithm/rotate.h index 9a4d07883e32..df4ca95aac95 100644 --- a/lib/libcxx/include/__algorithm/rotate.h +++ b/lib/libcxx/include/__algorithm/rotate.h @@ -15,7 +15,7 @@ #include <__algorithm/swap_ranges.h> #include <__config> #include <__iterator/iterator_traits.h> -#include <__type_traits/is_trivially_move_assignable.h> +#include <__type_traits/is_trivially_assignable.h> #include <__utility/move.h> #include <__utility/pair.h> diff --git a/lib/libcxx/include/__algorithm/search.h b/lib/libcxx/include/__algorithm/search.h index 75f936d0f217..b82ca7809535 100644 --- a/lib/libcxx/include/__algorithm/search.h +++ b/lib/libcxx/include/__algorithm/search.h @@ -117,17 +117,18 @@ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_Iter1, _Iter1> __searc } } -template +template ::value && + __has_random_access_iterator_category<_Iter2>::value, + int> = 0> _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_Iter1, _Iter1> __search_impl( - _Iter1 __first1, - _Sent1 __last1, - _Iter2 __first2, - _Sent2 __last2, - _Pred& __pred, - _Proj1& __proj1, - _Proj2& __proj2, - __enable_if_t<__has_random_access_iterator_category<_Iter1>::value && - __has_random_access_iterator_category<_Iter2>::value>* = nullptr) { + _Iter1 __first1, _Sent1 __last1, _Iter2 __first2, _Sent2 __last2, _Pred& __pred, _Proj1& __proj1, _Proj2& __proj2) { auto __size2 = __last2 - __first2; if (__size2 == 0) return std::make_pair(__first1, __first1); @@ -141,23 +142,25 @@ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_Iter1, _Iter1> __searc __first1, __last1, __first2, __last2, __pred, __proj1, __proj2, __size1, __size2); } -template -_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_Iter1, _Iter1> __search_impl( - _Iter1 __first1, - _Sent1 __last1, - _Iter2 __first2, - _Sent2 __last2, - _Pred& __pred, - _Proj1& __proj1, - _Proj2& __proj2, +template < + class _Iter1, + class _Sent1, + class _Iter2, + class _Sent2, + class _Pred, + class _Proj1, + class _Proj2, __enable_if_t<__has_forward_iterator_category<_Iter1>::value && __has_forward_iterator_category<_Iter2>::value && - !(__has_random_access_iterator_category<_Iter1>::value && - __has_random_access_iterator_category<_Iter2>::value)>* = nullptr) { + !(__has_random_access_iterator_category<_Iter1>::value && + __has_random_access_iterator_category<_Iter2>::value), + int> = 0> +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_Iter1, _Iter1> __search_impl( + _Iter1 __first1, _Sent1 __last1, _Iter2 __first2, _Sent2 __last2, _Pred& __pred, _Proj1& __proj1, _Proj2& __proj2) { return std::__search_forward_impl<_ClassicAlgPolicy>(__first1, __last1, __first2, __last2, __pred, __proj1, __proj2); } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator1 +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator1 search(_ForwardIterator1 __first1, _ForwardIterator1 __last1, _ForwardIterator2 __first2, @@ -170,14 +173,14 @@ search(_ForwardIterator1 __first1, } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator1 +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator1 search(_ForwardIterator1 __first1, _ForwardIterator1 __last1, _ForwardIterator2 __first2, _ForwardIterator2 __last2) { return std::search(__first1, __last1, __first2, __last2, __equal_to()); } #if _LIBCPP_STD_VER >= 17 template -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator search(_ForwardIterator __f, _ForwardIterator __l, const _Searcher& __s) { return __s(__f, __l).first; } diff --git a/lib/libcxx/include/__algorithm/search_n.h b/lib/libcxx/include/__algorithm/search_n.h index c3c01e700bf6..771647d3168a 100644 --- a/lib/libcxx/include/__algorithm/search_n.h +++ b/lib/libcxx/include/__algorithm/search_n.h @@ -108,34 +108,35 @@ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 std::pair<_Iter, _Iter> __se } } -template -_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_Iter, _Iter> __search_n_impl( - _Iter __first, - _Sent __last, - _DiffT __count, - const _Type& __value, - _Pred& __pred, - _Proj& __proj, - __enable_if_t<__has_random_access_iterator_category<_Iter>::value>* = nullptr) { +template ::value, int> = 0> +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_Iter, _Iter> +__search_n_impl(_Iter __first, _Sent __last, _DiffT __count, const _Type& __value, _Pred& __pred, _Proj& __proj) { return std::__search_n_random_access_impl<_ClassicAlgPolicy>( __first, __last, __count, __value, __pred, __proj, __last - __first); } -template -_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_Iter1, _Iter1> __search_n_impl( - _Iter1 __first, - _Sent1 __last, - _DiffT __count, - const _Type& __value, - _Pred& __pred, - _Proj& __proj, - __enable_if_t<__has_forward_iterator_category<_Iter1>::value && - !__has_random_access_iterator_category<_Iter1>::value>* = nullptr) { +template ::value && + !__has_random_access_iterator_category<_Iter1>::value, + int> = 0> +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pair<_Iter1, _Iter1> +__search_n_impl(_Iter1 __first, _Sent1 __last, _DiffT __count, const _Type& __value, _Pred& __pred, _Proj& __proj) { return std::__search_n_forward_impl<_ClassicAlgPolicy>(__first, __last, __count, __value, __pred, __proj); } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator search_n( +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator search_n( _ForwardIterator __first, _ForwardIterator __last, _Size __count, const _Tp& __value, _BinaryPredicate __pred) { static_assert( __is_callable<_BinaryPredicate, decltype(*__first), const _Tp&>::value, "BinaryPredicate has to be callable"); @@ -144,7 +145,7 @@ _LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator search_n(_ForwardIterator __first, _ForwardIterator __last, _Size __count, const _Tp& __value) { return std::search_n(__first, __last, std::__convert_to_integral(__count), __value, __equal_to()); } diff --git a/lib/libcxx/include/__algorithm/set_intersection.h b/lib/libcxx/include/__algorithm/set_intersection.h index 73d888d1b038..bb0d86cd0f58 100644 --- a/lib/libcxx/include/__algorithm/set_intersection.h +++ b/lib/libcxx/include/__algorithm/set_intersection.h @@ -12,10 +12,15 @@ #include <__algorithm/comp.h> #include <__algorithm/comp_ref_type.h> #include <__algorithm/iterator_operations.h> +#include <__algorithm/lower_bound.h> #include <__config> +#include <__functional/identity.h> #include <__iterator/iterator_traits.h> #include <__iterator/next.h> +#include <__type_traits/is_same.h> +#include <__utility/exchange.h> #include <__utility/move.h> +#include <__utility/swap.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) # pragma GCC system_header @@ -38,10 +43,103 @@ struct __set_intersection_result { : __in1_(std::move(__in_iter1)), __in2_(std::move(__in_iter2)), __out_(std::move(__out_iter)) {} }; -template -_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 __set_intersection_result<_InIter1, _InIter2, _OutIter> +// Helper for __set_intersection() with one-sided binary search: populate result and advance input iterators if they +// are found to potentially contain the same value in two consecutive calls. This function is very intimately related to +// the way it is used and doesn't attempt to abstract that, it's not appropriate for general usage outside of its +// context. +template +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 void __set_intersection_add_output_if_equal( + bool __may_be_equal, + _InForwardIter1& __first1, + _InForwardIter2& __first2, + _OutIter& __result, + bool& __prev_may_be_equal) { + if (__may_be_equal && __prev_may_be_equal) { + *__result = *__first1; + ++__result; + ++__first1; + ++__first2; + __prev_may_be_equal = false; + } else { + __prev_may_be_equal = __may_be_equal; + } +} + +// With forward iterators we can make multiple passes over the data, allowing the use of one-sided binary search to +// reduce best-case complexity to log(N). Understanding how we can use binary search and still respect complexity +// guarantees is _not_ straightforward: the guarantee is "at most 2*(N+M)-1 comparisons", and one-sided binary search +// will necessarily overshoot depending on the position of the needle in the haystack -- for instance, if we're +// searching for 3 in (1, 2, 3, 4), we'll check if 3<1, then 3<2, then 3<4, and, finally, 3<3, for a total of 4 +// comparisons, when linear search would have yielded 3. However, because we won't need to perform the intervening +// reciprocal comparisons (ie 1<3, 2<3, 4<3), that extra comparison doesn't run afoul of the guarantee. Additionally, +// this type of scenario can only happen for match distances of up to 5 elements, because 2*log2(8) is 6, and we'll +// still be worse-off at position 5 of an 8-element set. From then onwards these scenarios can't happen. TL;DR: we'll be +// 1 comparison worse-off compared to the classic linear-searching algorithm if matching position 3 of a set with 4 +// elements, or position 5 if the set has 7 or 8 elements, but we'll never exceed the complexity guarantees from the +// standard. +template +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI +_LIBCPP_CONSTEXPR_SINCE_CXX20 __set_intersection_result<_InForwardIter1, _InForwardIter2, _OutIter> __set_intersection( - _InIter1 __first1, _Sent1 __last1, _InIter2 __first2, _Sent2 __last2, _OutIter __result, _Compare&& __comp) { + _InForwardIter1 __first1, + _Sent1 __last1, + _InForwardIter2 __first2, + _Sent2 __last2, + _OutIter __result, + _Compare&& __comp, + std::forward_iterator_tag, + std::forward_iterator_tag) { + _LIBCPP_CONSTEXPR std::__identity __proj; + bool __prev_may_be_equal = false; + + while (__first2 != __last2) { + _InForwardIter1 __first1_next = + std::__lower_bound_onesided<_AlgPolicy>(__first1, __last1, *__first2, __comp, __proj); + std::swap(__first1_next, __first1); + // keeping in mind that a==b iff !(a(__first2, __last2, *__first1, __comp, __proj); + std::swap(__first2_next, __first2); + std::__set_intersection_add_output_if_equal( + __first2 == __first2_next, __first1, __first2, __result, __prev_may_be_equal); + } + return __set_intersection_result<_InForwardIter1, _InForwardIter2, _OutIter>( + _IterOps<_AlgPolicy>::next(std::move(__first1), std::move(__last1)), + _IterOps<_AlgPolicy>::next(std::move(__first2), std::move(__last2)), + std::move(__result)); +} + +// input iterators are not suitable for multipass algorithms, so we stick to the classic single-pass version +template +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI +_LIBCPP_CONSTEXPR_SINCE_CXX20 __set_intersection_result<_InInputIter1, _InInputIter2, _OutIter> +__set_intersection( + _InInputIter1 __first1, + _Sent1 __last1, + _InInputIter2 __first2, + _Sent2 __last2, + _OutIter __result, + _Compare&& __comp, + std::input_iterator_tag, + std::input_iterator_tag) { while (__first1 != __last1 && __first2 != __last2) { if (__comp(*__first1, *__first2)) ++__first1; @@ -55,12 +153,28 @@ __set_intersection( } } - return __set_intersection_result<_InIter1, _InIter2, _OutIter>( + return __set_intersection_result<_InInputIter1, _InInputIter2, _OutIter>( _IterOps<_AlgPolicy>::next(std::move(__first1), std::move(__last1)), _IterOps<_AlgPolicy>::next(std::move(__first2), std::move(__last2)), std::move(__result)); } +template +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI +_LIBCPP_CONSTEXPR_SINCE_CXX20 __set_intersection_result<_InIter1, _InIter2, _OutIter> +__set_intersection( + _InIter1 __first1, _Sent1 __last1, _InIter2 __first2, _Sent2 __last2, _OutIter __result, _Compare&& __comp) { + return std::__set_intersection<_AlgPolicy>( + std::move(__first1), + std::move(__last1), + std::move(__first2), + std::move(__last2), + std::move(__result), + std::forward<_Compare>(__comp), + typename std::_IterOps<_AlgPolicy>::template __iterator_category<_InIter1>(), + typename std::_IterOps<_AlgPolicy>::template __iterator_category<_InIter2>()); +} + template inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _OutputIterator set_intersection( _InputIterator1 __first1, diff --git a/lib/libcxx/include/__algorithm/simd_utils.h b/lib/libcxx/include/__algorithm/simd_utils.h new file mode 100644 index 000000000000..549197be8018 --- /dev/null +++ b/lib/libcxx/include/__algorithm/simd_utils.h @@ -0,0 +1,164 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCPP___ALGORITHM_SIMD_UTILS_H +#define _LIBCPP___ALGORITHM_SIMD_UTILS_H + +#include <__algorithm/min.h> +#include <__bit/bit_cast.h> +#include <__bit/countl.h> +#include <__bit/countr.h> +#include <__config> +#include <__type_traits/is_arithmetic.h> +#include <__type_traits/is_same.h> +#include <__utility/integer_sequence.h> +#include +#include + +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +#endif + +_LIBCPP_PUSH_MACROS +#include <__undef_macros> + +// TODO: Find out how altivec changes things and allow vectorizations there too. +#if _LIBCPP_STD_VER >= 14 && defined(_LIBCPP_CLANG_VER) && !defined(__ALTIVEC__) +# define _LIBCPP_HAS_ALGORITHM_VECTOR_UTILS 1 +#else +# define _LIBCPP_HAS_ALGORITHM_VECTOR_UTILS 0 +#endif + +#if _LIBCPP_HAS_ALGORITHM_VECTOR_UTILS && !defined(__OPTIMIZE_SIZE__) +# define _LIBCPP_VECTORIZE_ALGORITHMS 1 +#else +# define _LIBCPP_VECTORIZE_ALGORITHMS 0 +#endif + +#if _LIBCPP_HAS_ALGORITHM_VECTOR_UTILS + +_LIBCPP_BEGIN_NAMESPACE_STD + +template +inline constexpr bool __can_map_to_integer_v = + sizeof(_Tp) == alignof(_Tp) && (sizeof(_Tp) == 1 || sizeof(_Tp) == 2 || sizeof(_Tp) == 4 || sizeof(_Tp) == 8); + +template +struct __get_as_integer_type_impl; + +template <> +struct __get_as_integer_type_impl<1> { + using type = uint8_t; +}; + +template <> +struct __get_as_integer_type_impl<2> { + using type = uint16_t; +}; +template <> +struct __get_as_integer_type_impl<4> { + using type = uint32_t; +}; +template <> +struct __get_as_integer_type_impl<8> { + using type = uint64_t; +}; + +template +using __get_as_integer_type_t = typename __get_as_integer_type_impl::type; + +// This isn't specialized for 64 byte vectors on purpose. They have the potential to significantly reduce performance +// in mixed simd/non-simd workloads and don't provide any performance improvement for currently vectorized algorithms +// as far as benchmarks are concerned. +# if defined(__AVX__) || defined(__MVS__) +template +inline constexpr size_t __native_vector_size = 32 / sizeof(_Tp); +# elif defined(__SSE__) || defined(__ARM_NEON__) +template +inline constexpr size_t __native_vector_size = 16 / sizeof(_Tp); +# elif defined(__MMX__) +template +inline constexpr size_t __native_vector_size = 8 / sizeof(_Tp); +# else +template +inline constexpr size_t __native_vector_size = 1; +# endif + +template +using __simd_vector __attribute__((__ext_vector_type__(_Np))) = _ArithmeticT; + +template +inline constexpr size_t __simd_vector_size_v = []() -> size_t { + static_assert(_False, "Not a vector!"); +}(); + +template +inline constexpr size_t __simd_vector_size_v<__simd_vector<_Tp, _Np>> = _Np; + +template +_LIBCPP_HIDE_FROM_ABI _Tp __simd_vector_underlying_type_impl(__simd_vector<_Tp, _Np>) { + return _Tp{}; +} + +template +using __simd_vector_underlying_type_t = decltype(std::__simd_vector_underlying_type_impl(_VecT{})); + +// This isn't inlined without always_inline when loading chars. +template +_LIBCPP_NODISCARD _LIBCPP_ALWAYS_INLINE _LIBCPP_HIDE_FROM_ABI _VecT __load_vector(_Iter __iter) noexcept { + return [=](index_sequence<_Indices...>) _LIBCPP_ALWAYS_INLINE noexcept { + return _VecT{__iter[_Indices]...}; + }(make_index_sequence<__simd_vector_size_v<_VecT>>{}); +} + +template +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI bool __all_of(__simd_vector<_Tp, _Np> __vec) noexcept { + return __builtin_reduce_and(__builtin_convertvector(__vec, __simd_vector)); +} + +template +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI size_t __find_first_set(__simd_vector<_Tp, _Np> __vec) noexcept { + using __mask_vec = __simd_vector; + + // This has MSan disabled du to https://github.com/llvm/llvm-project/issues/85876 + auto __impl = [&](_MaskT) _LIBCPP_NO_SANITIZE("memory") noexcept { +# if defined(_LIBCPP_BIG_ENDIAN) + return std::min( + _Np, std::__countl_zero(__builtin_bit_cast(_MaskT, __builtin_convertvector(__vec, __mask_vec)))); +# else + return std::min( + _Np, std::__countr_zero(__builtin_bit_cast(_MaskT, __builtin_convertvector(__vec, __mask_vec)))); +# endif + }; + + if constexpr (sizeof(__mask_vec) == sizeof(uint8_t)) { + return __impl(uint8_t{}); + } else if constexpr (sizeof(__mask_vec) == sizeof(uint16_t)) { + return __impl(uint16_t{}); + } else if constexpr (sizeof(__mask_vec) == sizeof(uint32_t)) { + return __impl(uint32_t{}); + } else if constexpr (sizeof(__mask_vec) == sizeof(uint64_t)) { + return __impl(uint64_t{}); + } else { + static_assert(sizeof(__mask_vec) == 0, "unexpected required size for mask integer type"); + return 0; + } +} + +template +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI size_t __find_first_not_set(__simd_vector<_Tp, _Np> __vec) noexcept { + return std::__find_first_set(~__vec); +} + +_LIBCPP_END_NAMESPACE_STD + +#endif // _LIBCPP_HAS_ALGORITHM_VECTOR_UTILS + +_LIBCPP_POP_MACROS + +#endif // _LIBCPP___ALGORITHM_SIMD_UTILS_H diff --git a/lib/libcxx/include/__algorithm/sort.h b/lib/libcxx/include/__algorithm/sort.h index 8a5e0211cdf4..07b5814639e9 100644 --- a/lib/libcxx/include/__algorithm/sort.h +++ b/lib/libcxx/include/__algorithm/sort.h @@ -696,9 +696,8 @@ __partition_with_equals_on_left(_RandomAccessIterator __first, _RandomAccessIter using _Ops = _IterOps<_AlgPolicy>; typedef typename iterator_traits<_RandomAccessIterator>::difference_type difference_type; typedef typename std::iterator_traits<_RandomAccessIterator>::value_type value_type; - // TODO(LLVM18): Make __begin const, see https://reviews.llvm.org/D147089#4349748 - _RandomAccessIterator __begin = __first; // used for bounds checking, those are not moved around - const _RandomAccessIterator __end = __last; + const _RandomAccessIterator __begin = __first; // used for bounds checking, those are not moved around + const _RandomAccessIterator __end = __last; (void)__end; // value_type __pivot(_Ops::__iter_move(__first)); if (__comp(__pivot, *(__last - difference_type(1)))) { diff --git a/lib/libcxx/include/__algorithm/sort_heap.h b/lib/libcxx/include/__algorithm/sort_heap.h index 060fc33c3c6e..f20b110c7fd1 100644 --- a/lib/libcxx/include/__algorithm/sort_heap.h +++ b/lib/libcxx/include/__algorithm/sort_heap.h @@ -16,8 +16,8 @@ #include <__config> #include <__debug_utils/strict_weak_ordering_check.h> #include <__iterator/iterator_traits.h> -#include <__type_traits/is_copy_assignable.h> -#include <__type_traits/is_copy_constructible.h> +#include <__type_traits/is_assignable.h> +#include <__type_traits/is_constructible.h> #include <__utility/move.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) diff --git a/lib/libcxx/include/__algorithm/stable_sort.h b/lib/libcxx/include/__algorithm/stable_sort.h index 9be192bd65a6..726e7e16b356 100644 --- a/lib/libcxx/include/__algorithm/stable_sort.h +++ b/lib/libcxx/include/__algorithm/stable_sort.h @@ -20,7 +20,7 @@ #include <__memory/destruct_n.h> #include <__memory/temporary_buffer.h> #include <__memory/unique_ptr.h> -#include <__type_traits/is_trivially_copy_assignable.h> +#include <__type_traits/is_trivially_assignable.h> #include <__utility/move.h> #include <__utility/pair.h> #include diff --git a/lib/libcxx/include/__algorithm/unique.h b/lib/libcxx/include/__algorithm/unique.h index 056373d06fe4..d597014596f2 100644 --- a/lib/libcxx/include/__algorithm/unique.h +++ b/lib/libcxx/include/__algorithm/unique.h @@ -29,7 +29,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD // unique template -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 std::pair<_Iter, _Iter> +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 std::pair<_Iter, _Iter> __unique(_Iter __first, _Sent __last, _BinaryPredicate&& __pred) { __first = std::__adjacent_find(__first, __last, __pred); if (__first != __last) { @@ -46,13 +46,13 @@ __unique(_Iter __first, _Sent __last, _BinaryPredicate&& __pred) { } template -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator unique(_ForwardIterator __first, _ForwardIterator __last, _BinaryPredicate __pred) { return std::__unique<_ClassicAlgPolicy>(std::move(__first), std::move(__last), __pred).first; } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator unique(_ForwardIterator __first, _ForwardIterator __last) { return std::unique(__first, __last, __equal_to()); } diff --git a/lib/libcxx/include/__algorithm/unwrap_iter.h b/lib/libcxx/include/__algorithm/unwrap_iter.h index 50d815c97088..8cc0d22d4fc2 100644 --- a/lib/libcxx/include/__algorithm/unwrap_iter.h +++ b/lib/libcxx/include/__algorithm/unwrap_iter.h @@ -13,7 +13,7 @@ #include <__iterator/iterator_traits.h> #include <__memory/pointer_traits.h> #include <__type_traits/enable_if.h> -#include <__type_traits/is_copy_constructible.h> +#include <__type_traits/is_constructible.h> #include <__utility/declval.h> #include <__utility/move.h> diff --git a/lib/libcxx/include/__algorithm/upper_bound.h b/lib/libcxx/include/__algorithm/upper_bound.h index f499f7a80aa6..c39dec2e8969 100644 --- a/lib/libcxx/include/__algorithm/upper_bound.h +++ b/lib/libcxx/include/__algorithm/upper_bound.h @@ -18,7 +18,7 @@ #include <__iterator/advance.h> #include <__iterator/distance.h> #include <__iterator/iterator_traits.h> -#include <__type_traits/is_copy_constructible.h> +#include <__type_traits/is_constructible.h> #include <__utility/move.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) @@ -48,7 +48,7 @@ __upper_bound(_Iter __first, _Sent __last, const _Tp& __value, _Compare&& __comp } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator upper_bound(_ForwardIterator __first, _ForwardIterator __last, const _Tp& __value, _Compare __comp) { static_assert(is_copy_constructible<_ForwardIterator>::value, "Iterator has to be copy constructible"); return std::__upper_bound<_ClassicAlgPolicy>( @@ -56,7 +56,7 @@ upper_bound(_ForwardIterator __first, _ForwardIterator __last, const _Tp& __valu } template -_LIBCPP_NODISCARD_EXT inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator +_LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _ForwardIterator upper_bound(_ForwardIterator __first, _ForwardIterator __last, const _Tp& __value) { return std::upper_bound(std::move(__first), std::move(__last), __value, __less<>()); } diff --git a/lib/libcxx/include/__assert b/lib/libcxx/include/__assert index eb862b5369b2..49769fb4d449 100644 --- a/lib/libcxx/include/__assert +++ b/lib/libcxx/include/__assert @@ -34,4 +34,85 @@ # define _LIBCPP_ASSUME(expression) ((void)0) #endif +// clang-format off +// Fast hardening mode checks. + +#if _LIBCPP_HARDENING_MODE == _LIBCPP_HARDENING_MODE_FAST + +// Enabled checks. +# define _LIBCPP_ASSERT_VALID_INPUT_RANGE(expression, message) _LIBCPP_ASSERT(expression, message) +# define _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(expression, message) _LIBCPP_ASSERT(expression, message) +// Disabled checks. +// On most modern platforms, dereferencing a null pointer does not lead to an actual memory access. +# define _LIBCPP_ASSERT_NON_NULL(expression, message) _LIBCPP_ASSUME(expression) +// Overlapping ranges will make algorithms produce incorrect results but don't directly lead to a security +// vulnerability. +# define _LIBCPP_ASSERT_NON_OVERLAPPING_RANGES(expression, message) _LIBCPP_ASSUME(expression) +# define _LIBCPP_ASSERT_VALID_DEALLOCATION(expression, message) _LIBCPP_ASSUME(expression) +# define _LIBCPP_ASSERT_VALID_EXTERNAL_API_CALL(expression, message) _LIBCPP_ASSUME(expression) +# define _LIBCPP_ASSERT_COMPATIBLE_ALLOCATOR(expression, message) _LIBCPP_ASSUME(expression) +# define _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(expression, message) _LIBCPP_ASSUME(expression) +# define _LIBCPP_ASSERT_PEDANTIC(expression, message) _LIBCPP_ASSUME(expression) +# define _LIBCPP_ASSERT_SEMANTIC_REQUIREMENT(expression, message) _LIBCPP_ASSUME(expression) +# define _LIBCPP_ASSERT_INTERNAL(expression, message) _LIBCPP_ASSUME(expression) +# define _LIBCPP_ASSERT_UNCATEGORIZED(expression, message) _LIBCPP_ASSUME(expression) + +// Extensive hardening mode checks. + +#elif _LIBCPP_HARDENING_MODE == _LIBCPP_HARDENING_MODE_EXTENSIVE + +// Enabled checks. +# define _LIBCPP_ASSERT_VALID_INPUT_RANGE(expression, message) _LIBCPP_ASSERT(expression, message) +# define _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(expression, message) _LIBCPP_ASSERT(expression, message) +# define _LIBCPP_ASSERT_NON_NULL(expression, message) _LIBCPP_ASSERT(expression, message) +# define _LIBCPP_ASSERT_NON_OVERLAPPING_RANGES(expression, message) _LIBCPP_ASSERT(expression, message) +# define _LIBCPP_ASSERT_VALID_DEALLOCATION(expression, message) _LIBCPP_ASSERT(expression, message) +# define _LIBCPP_ASSERT_VALID_EXTERNAL_API_CALL(expression, message) _LIBCPP_ASSERT(expression, message) +# define _LIBCPP_ASSERT_COMPATIBLE_ALLOCATOR(expression, message) _LIBCPP_ASSERT(expression, message) +# define _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(expression, message) _LIBCPP_ASSERT(expression, message) +# define _LIBCPP_ASSERT_PEDANTIC(expression, message) _LIBCPP_ASSERT(expression, message) +# define _LIBCPP_ASSERT_UNCATEGORIZED(expression, message) _LIBCPP_ASSERT(expression, message) +// Disabled checks. +# define _LIBCPP_ASSERT_SEMANTIC_REQUIREMENT(expression, message) _LIBCPP_ASSUME(expression) +# define _LIBCPP_ASSERT_INTERNAL(expression, message) _LIBCPP_ASSUME(expression) + +// Debug hardening mode checks. + +#elif _LIBCPP_HARDENING_MODE == _LIBCPP_HARDENING_MODE_DEBUG + +// All checks enabled. +# define _LIBCPP_ASSERT_VALID_INPUT_RANGE(expression, message) _LIBCPP_ASSERT(expression, message) +# define _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(expression, message) _LIBCPP_ASSERT(expression, message) +# define _LIBCPP_ASSERT_NON_NULL(expression, message) _LIBCPP_ASSERT(expression, message) +# define _LIBCPP_ASSERT_NON_OVERLAPPING_RANGES(expression, message) _LIBCPP_ASSERT(expression, message) +# define _LIBCPP_ASSERT_VALID_DEALLOCATION(expression, message) _LIBCPP_ASSERT(expression, message) +# define _LIBCPP_ASSERT_VALID_EXTERNAL_API_CALL(expression, message) _LIBCPP_ASSERT(expression, message) +# define _LIBCPP_ASSERT_COMPATIBLE_ALLOCATOR(expression, message) _LIBCPP_ASSERT(expression, message) +# define _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(expression, message) _LIBCPP_ASSERT(expression, message) +# define _LIBCPP_ASSERT_PEDANTIC(expression, message) _LIBCPP_ASSERT(expression, message) +# define _LIBCPP_ASSERT_SEMANTIC_REQUIREMENT(expression, message) _LIBCPP_ASSERT(expression, message) +# define _LIBCPP_ASSERT_INTERNAL(expression, message) _LIBCPP_ASSERT(expression, message) +# define _LIBCPP_ASSERT_UNCATEGORIZED(expression, message) _LIBCPP_ASSERT(expression, message) + +// Disable all checks if hardening is not enabled. + +#else + +// All checks disabled. +# define _LIBCPP_ASSERT_VALID_INPUT_RANGE(expression, message) _LIBCPP_ASSUME(expression) +# define _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(expression, message) _LIBCPP_ASSUME(expression) +# define _LIBCPP_ASSERT_NON_NULL(expression, message) _LIBCPP_ASSUME(expression) +# define _LIBCPP_ASSERT_NON_OVERLAPPING_RANGES(expression, message) _LIBCPP_ASSUME(expression) +# define _LIBCPP_ASSERT_VALID_DEALLOCATION(expression, message) _LIBCPP_ASSUME(expression) +# define _LIBCPP_ASSERT_VALID_EXTERNAL_API_CALL(expression, message) _LIBCPP_ASSUME(expression) +# define _LIBCPP_ASSERT_COMPATIBLE_ALLOCATOR(expression, message) _LIBCPP_ASSUME(expression) +# define _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(expression, message) _LIBCPP_ASSUME(expression) +# define _LIBCPP_ASSERT_PEDANTIC(expression, message) _LIBCPP_ASSUME(expression) +# define _LIBCPP_ASSERT_SEMANTIC_REQUIREMENT(expression, message) _LIBCPP_ASSUME(expression) +# define _LIBCPP_ASSERT_INTERNAL(expression, message) _LIBCPP_ASSUME(expression) +# define _LIBCPP_ASSERT_UNCATEGORIZED(expression, message) _LIBCPP_ASSUME(expression) + +#endif // _LIBCPP_HARDENING_MODE == _LIBCPP_HARDENING_MODE_FAST +// clang-format on + #endif // _LIBCPP___ASSERT diff --git a/lib/libcxx/include/__assertion_handler b/lib/libcxx/include/__assertion_handler index 8bc0553c078b..3b6d6b2cca53 100644 --- a/lib/libcxx/include/__assertion_handler +++ b/lib/libcxx/include/__assertion_handler @@ -23,8 +23,17 @@ #else -// TODO(hardening): use `__builtin_verbose_trap(message)` once that becomes available. -# define _LIBCPP_ASSERTION_HANDLER(message) ((void)message, __builtin_trap()) +# if __has_builtin(__builtin_verbose_trap) +// AppleClang shipped a slightly different version of __builtin_verbose_trap from the upstream +// version before upstream Clang actually got the builtin. +# if defined(_LIBCPP_APPLE_CLANG_VER) && _LIBCPP_APPLE_CLANG_VER < 17000 +# define _LIBCPP_ASSERTION_HANDLER(message) __builtin_verbose_trap(message) +# else +# define _LIBCPP_ASSERTION_HANDLER(message) __builtin_verbose_trap("libc++", message) +# endif +# else +# define _LIBCPP_ASSERTION_HANDLER(message) ((void)message, __builtin_trap()) +# endif #endif // _LIBCPP_HARDENING_MODE == _LIBCPP_HARDENING_MODE_DEBUG diff --git a/lib/libcxx/include/__atomic/aliases.h b/lib/libcxx/include/__atomic/aliases.h index 0fa289de54b0..e27e09af6b77 100644 --- a/lib/libcxx/include/__atomic/aliases.h +++ b/lib/libcxx/include/__atomic/aliases.h @@ -18,7 +18,6 @@ #include <__type_traits/make_unsigned.h> #include #include -#include #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) # pragma GCC system_header @@ -92,7 +91,7 @@ using __largest_lock_free_type = short; # elif ATOMIC_CHAR_LOCK_FREE == 2 using __largest_lock_free_type = char; # else -# define _LIBCPP_NO_LOCK_FREE_TYPES // There are no lockfree types (this can happen in freestanding) +# define _LIBCPP_NO_LOCK_FREE_TYPES // There are no lockfree types (this can happen on unusual platforms) # endif # ifndef _LIBCPP_NO_LOCK_FREE_TYPES diff --git a/lib/libcxx/include/__atomic/atomic.h b/lib/libcxx/include/__atomic/atomic.h index 3dfb6937d032..bd3f659c22df 100644 --- a/lib/libcxx/include/__atomic/atomic.h +++ b/lib/libcxx/include/__atomic/atomic.h @@ -462,22 +462,26 @@ atomic_wait_explicit(const atomic<_Tp>* __o, typename atomic<_Tp>::value_type __ // atomic_notify_one template -_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void atomic_notify_one(volatile atomic<_Tp>* __o) _NOEXCEPT { +_LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void +atomic_notify_one(volatile atomic<_Tp>* __o) _NOEXCEPT { __o->notify_one(); } template -_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void atomic_notify_one(atomic<_Tp>* __o) _NOEXCEPT { +_LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void +atomic_notify_one(atomic<_Tp>* __o) _NOEXCEPT { __o->notify_one(); } // atomic_notify_all template -_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void atomic_notify_all(volatile atomic<_Tp>* __o) _NOEXCEPT { +_LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void +atomic_notify_all(volatile atomic<_Tp>* __o) _NOEXCEPT { __o->notify_all(); } template -_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void atomic_notify_all(atomic<_Tp>* __o) _NOEXCEPT { +_LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void +atomic_notify_all(atomic<_Tp>* __o) _NOEXCEPT { __o->notify_all(); } diff --git a/lib/libcxx/include/__atomic/atomic_base.h b/lib/libcxx/include/__atomic/atomic_base.h index 3ad3b562c598..7e26434c9c3a 100644 --- a/lib/libcxx/include/__atomic/atomic_base.h +++ b/lib/libcxx/include/__atomic/atomic_base.h @@ -14,11 +14,10 @@ #include <__atomic/cxx_atomic_impl.h> #include <__atomic/is_always_lock_free.h> #include <__atomic/memory_order.h> -#include <__availability> #include <__config> #include <__memory/addressof.h> #include <__type_traits/is_integral.h> -#include <__type_traits/is_nothrow_default_constructible.h> +#include <__type_traits/is_nothrow_constructible.h> #include <__type_traits/is_same.h> #include @@ -34,7 +33,7 @@ struct __atomic_base // false mutable __cxx_atomic_impl<_Tp> __a_; #if _LIBCPP_STD_VER >= 17 - static _LIBCPP_CONSTEXPR bool is_always_lock_free = __libcpp_is_always_lock_free<__cxx_atomic_impl<_Tp> >::__value; + static constexpr bool is_always_lock_free = __libcpp_is_always_lock_free<__cxx_atomic_impl<_Tp> >::__value; #endif _LIBCPP_HIDE_FROM_ABI bool is_lock_free() const volatile _NOEXCEPT { @@ -104,24 +103,20 @@ struct __atomic_base // false _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void wait(_Tp __v, memory_order __m = memory_order_seq_cst) const volatile _NOEXCEPT { - std::__cxx_atomic_wait(std::addressof(__a_), __v, __m); + std::__atomic_wait(*this, __v, __m); } _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void wait(_Tp __v, memory_order __m = memory_order_seq_cst) const _NOEXCEPT { - std::__cxx_atomic_wait(std::addressof(__a_), __v, __m); + std::__atomic_wait(*this, __v, __m); } _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_one() volatile _NOEXCEPT { - std::__cxx_atomic_notify_one(std::addressof(__a_)); - } - _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_one() _NOEXCEPT { - std::__cxx_atomic_notify_one(std::addressof(__a_)); + std::__atomic_notify_one(*this); } + _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_one() _NOEXCEPT { std::__atomic_notify_one(*this); } _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_all() volatile _NOEXCEPT { - std::__cxx_atomic_notify_all(std::addressof(__a_)); - } - _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_all() _NOEXCEPT { - std::__cxx_atomic_notify_all(std::addressof(__a_)); + std::__atomic_notify_all(*this); } + _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_all() _NOEXCEPT { std::__atomic_notify_all(*this); } #if _LIBCPP_STD_VER >= 20 _LIBCPP_HIDE_FROM_ABI constexpr __atomic_base() noexcept(is_nothrow_default_constructible_v<_Tp>) : __a_(_Tp()) {} @@ -134,11 +129,6 @@ struct __atomic_base // false __atomic_base(const __atomic_base&) = delete; }; -#if _LIBCPP_STD_VER >= 17 -template -_LIBCPP_CONSTEXPR bool __atomic_base<_Tp, __b>::is_always_lock_free; -#endif - // atomic template @@ -200,6 +190,32 @@ struct __atomic_base<_Tp, true> : public __atomic_base<_Tp, false> { _LIBCPP_HIDE_FROM_ABI _Tp operator^=(_Tp __op) _NOEXCEPT { return fetch_xor(__op) ^ __op; } }; +// Here we need _IsIntegral because the default template argument is not enough +// e.g __atomic_base is __atomic_base, which inherits from +// __atomic_base and the caller of the wait function is +// __atomic_base. So specializing __atomic_base<_Tp> does not work +template +struct __atomic_waitable_traits<__atomic_base<_Tp, _IsIntegral> > { + static _LIBCPP_HIDE_FROM_ABI _Tp __atomic_load(const __atomic_base<_Tp, _IsIntegral>& __a, memory_order __order) { + return __a.load(__order); + } + + static _LIBCPP_HIDE_FROM_ABI _Tp + __atomic_load(const volatile __atomic_base<_Tp, _IsIntegral>& __this, memory_order __order) { + return __this.load(__order); + } + + static _LIBCPP_HIDE_FROM_ABI const __cxx_atomic_impl<_Tp>* + __atomic_contention_address(const __atomic_base<_Tp, _IsIntegral>& __a) { + return std::addressof(__a.__a_); + } + + static _LIBCPP_HIDE_FROM_ABI const volatile __cxx_atomic_impl<_Tp>* + __atomic_contention_address(const volatile __atomic_base<_Tp, _IsIntegral>& __this) { + return std::addressof(__this.__a_); + } +}; + _LIBCPP_END_NAMESPACE_STD #endif // _LIBCPP___ATOMIC_ATOMIC_BASE_H diff --git a/lib/libcxx/include/__atomic/atomic_flag.h b/lib/libcxx/include/__atomic/atomic_flag.h index d76e5e45c01a..00b157cdff78 100644 --- a/lib/libcxx/include/__atomic/atomic_flag.h +++ b/lib/libcxx/include/__atomic/atomic_flag.h @@ -15,7 +15,8 @@ #include <__atomic/memory_order.h> #include <__chrono/duration.h> #include <__config> -#include <__threading_support> +#include <__memory/addressof.h> +#include <__thread/support.h> #include #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) @@ -47,22 +48,26 @@ struct atomic_flag { __cxx_atomic_store(&__a_, _LIBCPP_ATOMIC_FLAG_TYPE(false), __m); } - _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void wait(bool __v, memory_order __m = memory_order_seq_cst) const - volatile _NOEXCEPT { - __cxx_atomic_wait(&__a_, _LIBCPP_ATOMIC_FLAG_TYPE(__v), __m); + _LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void + wait(bool __v, memory_order __m = memory_order_seq_cst) const volatile _NOEXCEPT { + std::__atomic_wait(*this, _LIBCPP_ATOMIC_FLAG_TYPE(__v), __m); } - _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void + _LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void wait(bool __v, memory_order __m = memory_order_seq_cst) const _NOEXCEPT { - __cxx_atomic_wait(&__a_, _LIBCPP_ATOMIC_FLAG_TYPE(__v), __m); + std::__atomic_wait(*this, _LIBCPP_ATOMIC_FLAG_TYPE(__v), __m); } - _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_one() volatile _NOEXCEPT { - __cxx_atomic_notify_one(&__a_); + _LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_one() volatile _NOEXCEPT { + std::__atomic_notify_one(*this); + } + _LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_one() _NOEXCEPT { + std::__atomic_notify_one(*this); } - _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_one() _NOEXCEPT { __cxx_atomic_notify_one(&__a_); } _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_all() volatile _NOEXCEPT { - __cxx_atomic_notify_all(&__a_); + std::__atomic_notify_all(*this); + } + _LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_all() _NOEXCEPT { + std::__atomic_notify_all(*this); } - _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void notify_all() _NOEXCEPT { __cxx_atomic_notify_all(&__a_); } #if _LIBCPP_STD_VER >= 20 _LIBCPP_HIDE_FROM_ABI constexpr atomic_flag() _NOEXCEPT : __a_(false) {} @@ -77,6 +82,28 @@ struct atomic_flag { atomic_flag& operator=(const atomic_flag&) volatile = delete; }; +template <> +struct __atomic_waitable_traits { + static _LIBCPP_HIDE_FROM_ABI _LIBCPP_ATOMIC_FLAG_TYPE __atomic_load(const atomic_flag& __a, memory_order __order) { + return std::__cxx_atomic_load(&__a.__a_, __order); + } + + static _LIBCPP_HIDE_FROM_ABI _LIBCPP_ATOMIC_FLAG_TYPE + __atomic_load(const volatile atomic_flag& __a, memory_order __order) { + return std::__cxx_atomic_load(&__a.__a_, __order); + } + + static _LIBCPP_HIDE_FROM_ABI const __cxx_atomic_impl<_LIBCPP_ATOMIC_FLAG_TYPE>* + __atomic_contention_address(const atomic_flag& __a) { + return std::addressof(__a.__a_); + } + + static _LIBCPP_HIDE_FROM_ABI const volatile __cxx_atomic_impl<_LIBCPP_ATOMIC_FLAG_TYPE>* + __atomic_contention_address(const volatile atomic_flag& __a) { + return std::addressof(__a.__a_); + } +}; + inline _LIBCPP_HIDE_FROM_ABI bool atomic_flag_test(const volatile atomic_flag* __o) _NOEXCEPT { return __o->test(); } inline _LIBCPP_HIDE_FROM_ABI bool atomic_flag_test(const atomic_flag* __o) _NOEXCEPT { return __o->test(); } @@ -117,41 +144,43 @@ inline _LIBCPP_HIDE_FROM_ABI void atomic_flag_clear_explicit(atomic_flag* __o, m __o->clear(__m); } -inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_AVAILABILITY_SYNC void +inline _LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_HIDE_FROM_ABI _LIBCPP_AVAILABILITY_SYNC void atomic_flag_wait(const volatile atomic_flag* __o, bool __v) _NOEXCEPT { __o->wait(__v); } -inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_AVAILABILITY_SYNC void +inline _LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_HIDE_FROM_ABI _LIBCPP_AVAILABILITY_SYNC void atomic_flag_wait(const atomic_flag* __o, bool __v) _NOEXCEPT { __o->wait(__v); } -inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_AVAILABILITY_SYNC void +inline _LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_HIDE_FROM_ABI _LIBCPP_AVAILABILITY_SYNC void atomic_flag_wait_explicit(const volatile atomic_flag* __o, bool __v, memory_order __m) _NOEXCEPT { __o->wait(__v, __m); } -inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_AVAILABILITY_SYNC void +inline _LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_HIDE_FROM_ABI _LIBCPP_AVAILABILITY_SYNC void atomic_flag_wait_explicit(const atomic_flag* __o, bool __v, memory_order __m) _NOEXCEPT { __o->wait(__v, __m); } -inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_AVAILABILITY_SYNC void +inline _LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_HIDE_FROM_ABI _LIBCPP_AVAILABILITY_SYNC void atomic_flag_notify_one(volatile atomic_flag* __o) _NOEXCEPT { __o->notify_one(); } -inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_AVAILABILITY_SYNC void atomic_flag_notify_one(atomic_flag* __o) _NOEXCEPT { +inline _LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_HIDE_FROM_ABI _LIBCPP_AVAILABILITY_SYNC void +atomic_flag_notify_one(atomic_flag* __o) _NOEXCEPT { __o->notify_one(); } -inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_AVAILABILITY_SYNC void +inline _LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_HIDE_FROM_ABI _LIBCPP_AVAILABILITY_SYNC void atomic_flag_notify_all(volatile atomic_flag* __o) _NOEXCEPT { __o->notify_all(); } -inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_AVAILABILITY_SYNC void atomic_flag_notify_all(atomic_flag* __o) _NOEXCEPT { +inline _LIBCPP_DEPRECATED_ATOMIC_SYNC _LIBCPP_HIDE_FROM_ABI _LIBCPP_AVAILABILITY_SYNC void +atomic_flag_notify_all(atomic_flag* __o) _NOEXCEPT { __o->notify_all(); } diff --git a/lib/libcxx/include/__atomic/atomic_init.h b/lib/libcxx/include/__atomic/atomic_init.h index 8ef5958bfeda..8e86ba31b4ac 100644 --- a/lib/libcxx/include/__atomic/atomic_init.h +++ b/lib/libcxx/include/__atomic/atomic_init.h @@ -15,12 +15,10 @@ # pragma GCC system_header #endif -#define ATOMIC_FLAG_INIT \ - { false } -#define ATOMIC_VAR_INIT(__v) \ - { __v } +#define ATOMIC_FLAG_INIT {false} +#define ATOMIC_VAR_INIT(__v) {__v} -#if _LIBCPP_STD_VER >= 20 && !defined(_LIBCPP_DISABLE_DEPRECATION_WARNINGS) +#if _LIBCPP_STD_VER >= 20 && defined(_LIBCPP_COMPILER_CLANG_BASED) && !defined(_LIBCPP_DISABLE_DEPRECATION_WARNINGS) # pragma clang deprecated(ATOMIC_VAR_INIT) #endif diff --git a/lib/libcxx/include/__atomic/atomic_ref.h b/lib/libcxx/include/__atomic/atomic_ref.h new file mode 100644 index 000000000000..b0180a37ab50 --- /dev/null +++ b/lib/libcxx/include/__atomic/atomic_ref.h @@ -0,0 +1,378 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +// Kokkos v. 4.0 +// Copyright (2022) National Technology & Engineering +// Solutions of Sandia, LLC (NTESS). +// +// Under the terms of Contract DE-NA0003525 with NTESS, +// the U.S. Government retains certain rights in this software. +// +//===---------------------------------------------------------------------===// + +#ifndef _LIBCPP___ATOMIC_ATOMIC_REF_H +#define _LIBCPP___ATOMIC_ATOMIC_REF_H + +#include <__assert> +#include <__atomic/atomic_sync.h> +#include <__atomic/check_memory_order.h> +#include <__atomic/to_gcc_order.h> +#include <__concepts/arithmetic.h> +#include <__concepts/same_as.h> +#include <__config> +#include <__memory/addressof.h> +#include <__type_traits/has_unique_object_representation.h> +#include <__type_traits/is_trivially_copyable.h> +#include +#include +#include + +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +#endif + +_LIBCPP_PUSH_MACROS +#include <__undef_macros> + +_LIBCPP_BEGIN_NAMESPACE_STD + +#if _LIBCPP_STD_VER >= 20 + +// These types are required to make __atomic_is_always_lock_free work across GCC and Clang. +// The purpose of this trick is to make sure that we provide an object with the correct alignment +// to __atomic_is_always_lock_free, since that answer depends on the alignment. +template +struct __alignment_checker_type { + alignas(_Alignment) char __data; +}; + +template +struct __get_aligner_instance { + static constexpr __alignment_checker_type<_Alignment> __instance{}; +}; + +template +struct __atomic_ref_base { +private: + _LIBCPP_HIDE_FROM_ABI static _Tp* __clear_padding(_Tp& __val) noexcept { + _Tp* __ptr = std::addressof(__val); +# if __has_builtin(__builtin_clear_padding) + __builtin_clear_padding(__ptr); +# endif + return __ptr; + } + + _LIBCPP_HIDE_FROM_ABI static bool __compare_exchange( + _Tp* __ptr, _Tp* __expected, _Tp* __desired, bool __is_weak, int __success, int __failure) noexcept { + if constexpr ( +# if __has_builtin(__builtin_clear_padding) + has_unique_object_representations_v<_Tp> || floating_point<_Tp> +# else + true // NOLINT(readability-simplify-boolean-expr) +# endif + ) { + return __atomic_compare_exchange(__ptr, __expected, __desired, __is_weak, __success, __failure); + } else { // _Tp has padding bits and __builtin_clear_padding is available + __clear_padding(*__desired); + _Tp __copy = *__expected; + __clear_padding(__copy); + // The algorithm we use here is basically to perform `__atomic_compare_exchange` on the + // values until it has either succeeded, or failed because the value representation of the + // objects involved was different. This is why we loop around __atomic_compare_exchange: + // we basically loop until its failure is caused by the value representation of the objects + // being different, not only their object representation. + while (true) { + _Tp __prev = __copy; + if (__atomic_compare_exchange(__ptr, std::addressof(__copy), __desired, __is_weak, __success, __failure)) { + return true; + } + _Tp __curr = __copy; + if (std::memcmp(__clear_padding(__prev), __clear_padding(__curr), sizeof(_Tp)) != 0) { + // Value representation without padding bits do not compare equal -> + // write the current content of *ptr into *expected + std::memcpy(__expected, std::addressof(__copy), sizeof(_Tp)); + return false; + } + } + } + } + + friend struct __atomic_waitable_traits<__atomic_ref_base<_Tp>>; + + // require types that are 1, 2, 4, 8, or 16 bytes in length to be aligned to at least their size to be potentially + // used lock-free + static constexpr size_t __min_alignment = (sizeof(_Tp) & (sizeof(_Tp) - 1)) || (sizeof(_Tp) > 16) ? 0 : sizeof(_Tp); + +public: + using value_type = _Tp; + + static constexpr size_t required_alignment = alignof(_Tp) > __min_alignment ? alignof(_Tp) : __min_alignment; + + // The __atomic_always_lock_free builtin takes into account the alignment of the pointer if provided, + // so we create a fake pointer with a suitable alignment when querying it. Note that we are guaranteed + // that the pointer is going to be aligned properly at runtime because that is a (checked) precondition + // of atomic_ref's constructor. + static constexpr bool is_always_lock_free = + __atomic_always_lock_free(sizeof(_Tp), &__get_aligner_instance::__instance); + + _LIBCPP_HIDE_FROM_ABI bool is_lock_free() const noexcept { return __atomic_is_lock_free(sizeof(_Tp), __ptr_); } + + _LIBCPP_HIDE_FROM_ABI void store(_Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept + _LIBCPP_CHECK_STORE_MEMORY_ORDER(__order) { + _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( + __order == memory_order::relaxed || __order == memory_order::release || __order == memory_order::seq_cst, + "atomic_ref: memory order argument to atomic store operation is invalid"); + __atomic_store(__ptr_, __clear_padding(__desired), std::__to_gcc_order(__order)); + } + + _LIBCPP_HIDE_FROM_ABI _Tp operator=(_Tp __desired) const noexcept { + store(__desired); + return __desired; + } + + _LIBCPP_HIDE_FROM_ABI _Tp load(memory_order __order = memory_order::seq_cst) const noexcept + _LIBCPP_CHECK_LOAD_MEMORY_ORDER(__order) { + _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( + __order == memory_order::relaxed || __order == memory_order::consume || __order == memory_order::acquire || + __order == memory_order::seq_cst, + "atomic_ref: memory order argument to atomic load operation is invalid"); + alignas(_Tp) byte __mem[sizeof(_Tp)]; + auto* __ret = reinterpret_cast<_Tp*>(__mem); + __atomic_load(__ptr_, __ret, std::__to_gcc_order(__order)); + return *__ret; + } + + _LIBCPP_HIDE_FROM_ABI operator _Tp() const noexcept { return load(); } + + _LIBCPP_HIDE_FROM_ABI _Tp exchange(_Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept { + alignas(_Tp) byte __mem[sizeof(_Tp)]; + auto* __ret = reinterpret_cast<_Tp*>(__mem); + __atomic_exchange(__ptr_, __clear_padding(__desired), __ret, std::__to_gcc_order(__order)); + return *__ret; + } + + _LIBCPP_HIDE_FROM_ABI bool + compare_exchange_weak(_Tp& __expected, _Tp __desired, memory_order __success, memory_order __failure) const noexcept + _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__success, __failure) { + _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( + __failure == memory_order::relaxed || __failure == memory_order::consume || + __failure == memory_order::acquire || __failure == memory_order::seq_cst, + "atomic_ref: failure memory order argument to weak atomic compare-and-exchange operation is invalid"); + return __compare_exchange( + __ptr_, + std::addressof(__expected), + std::addressof(__desired), + true, + std::__to_gcc_order(__success), + std::__to_gcc_order(__failure)); + } + _LIBCPP_HIDE_FROM_ABI bool + compare_exchange_strong(_Tp& __expected, _Tp __desired, memory_order __success, memory_order __failure) const noexcept + _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__success, __failure) { + _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( + __failure == memory_order::relaxed || __failure == memory_order::consume || + __failure == memory_order::acquire || __failure == memory_order::seq_cst, + "atomic_ref: failure memory order argument to strong atomic compare-and-exchange operation is invalid"); + return __compare_exchange( + __ptr_, + std::addressof(__expected), + std::addressof(__desired), + false, + std::__to_gcc_order(__success), + std::__to_gcc_order(__failure)); + } + + _LIBCPP_HIDE_FROM_ABI bool + compare_exchange_weak(_Tp& __expected, _Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept { + return __compare_exchange( + __ptr_, + std::addressof(__expected), + std::addressof(__desired), + true, + std::__to_gcc_order(__order), + std::__to_gcc_failure_order(__order)); + } + _LIBCPP_HIDE_FROM_ABI bool + compare_exchange_strong(_Tp& __expected, _Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept { + return __compare_exchange( + __ptr_, + std::addressof(__expected), + std::addressof(__desired), + false, + std::__to_gcc_order(__order), + std::__to_gcc_failure_order(__order)); + } + + _LIBCPP_HIDE_FROM_ABI void wait(_Tp __old, memory_order __order = memory_order::seq_cst) const noexcept + _LIBCPP_CHECK_WAIT_MEMORY_ORDER(__order) { + _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( + __order == memory_order::relaxed || __order == memory_order::consume || __order == memory_order::acquire || + __order == memory_order::seq_cst, + "atomic_ref: memory order argument to atomic wait operation is invalid"); + std::__atomic_wait(*this, __old, __order); + } + _LIBCPP_HIDE_FROM_ABI void notify_one() const noexcept { std::__atomic_notify_one(*this); } + _LIBCPP_HIDE_FROM_ABI void notify_all() const noexcept { std::__atomic_notify_all(*this); } + +protected: + typedef _Tp _Aligned_Tp __attribute__((aligned(required_alignment))); + _Aligned_Tp* __ptr_; + + _LIBCPP_HIDE_FROM_ABI __atomic_ref_base(_Tp& __obj) : __ptr_(std::addressof(__obj)) {} +}; + +template +struct __atomic_waitable_traits<__atomic_ref_base<_Tp>> { + static _LIBCPP_HIDE_FROM_ABI _Tp __atomic_load(const __atomic_ref_base<_Tp>& __a, memory_order __order) { + return __a.load(__order); + } + static _LIBCPP_HIDE_FROM_ABI const _Tp* __atomic_contention_address(const __atomic_ref_base<_Tp>& __a) { + return __a.__ptr_; + } +}; + +template +struct atomic_ref : public __atomic_ref_base<_Tp> { + static_assert(is_trivially_copyable_v<_Tp>, "std::atomic_ref requires that 'T' be a trivially copyable type"); + + using __base = __atomic_ref_base<_Tp>; + + _LIBCPP_HIDE_FROM_ABI explicit atomic_ref(_Tp& __obj) : __base(__obj) { + _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( + reinterpret_cast(std::addressof(__obj)) % __base::required_alignment == 0, + "atomic_ref ctor: referenced object must be aligned to required_alignment"); + } + + _LIBCPP_HIDE_FROM_ABI atomic_ref(const atomic_ref&) noexcept = default; + + _LIBCPP_HIDE_FROM_ABI _Tp operator=(_Tp __desired) const noexcept { return __base::operator=(__desired); } + + atomic_ref& operator=(const atomic_ref&) = delete; +}; + +template + requires(std::integral<_Tp> && !std::same_as) +struct atomic_ref<_Tp> : public __atomic_ref_base<_Tp> { + using __base = __atomic_ref_base<_Tp>; + + using difference_type = __base::value_type; + + _LIBCPP_HIDE_FROM_ABI explicit atomic_ref(_Tp& __obj) : __base(__obj) { + _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( + reinterpret_cast(std::addressof(__obj)) % __base::required_alignment == 0, + "atomic_ref ctor: referenced object must be aligned to required_alignment"); + } + + _LIBCPP_HIDE_FROM_ABI atomic_ref(const atomic_ref&) noexcept = default; + + _LIBCPP_HIDE_FROM_ABI _Tp operator=(_Tp __desired) const noexcept { return __base::operator=(__desired); } + + atomic_ref& operator=(const atomic_ref&) = delete; + + _LIBCPP_HIDE_FROM_ABI _Tp fetch_add(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept { + return __atomic_fetch_add(this->__ptr_, __arg, std::__to_gcc_order(__order)); + } + _LIBCPP_HIDE_FROM_ABI _Tp fetch_sub(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept { + return __atomic_fetch_sub(this->__ptr_, __arg, std::__to_gcc_order(__order)); + } + _LIBCPP_HIDE_FROM_ABI _Tp fetch_and(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept { + return __atomic_fetch_and(this->__ptr_, __arg, std::__to_gcc_order(__order)); + } + _LIBCPP_HIDE_FROM_ABI _Tp fetch_or(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept { + return __atomic_fetch_or(this->__ptr_, __arg, std::__to_gcc_order(__order)); + } + _LIBCPP_HIDE_FROM_ABI _Tp fetch_xor(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept { + return __atomic_fetch_xor(this->__ptr_, __arg, std::__to_gcc_order(__order)); + } + + _LIBCPP_HIDE_FROM_ABI _Tp operator++(int) const noexcept { return fetch_add(_Tp(1)); } + _LIBCPP_HIDE_FROM_ABI _Tp operator--(int) const noexcept { return fetch_sub(_Tp(1)); } + _LIBCPP_HIDE_FROM_ABI _Tp operator++() const noexcept { return fetch_add(_Tp(1)) + _Tp(1); } + _LIBCPP_HIDE_FROM_ABI _Tp operator--() const noexcept { return fetch_sub(_Tp(1)) - _Tp(1); } + _LIBCPP_HIDE_FROM_ABI _Tp operator+=(_Tp __arg) const noexcept { return fetch_add(__arg) + __arg; } + _LIBCPP_HIDE_FROM_ABI _Tp operator-=(_Tp __arg) const noexcept { return fetch_sub(__arg) - __arg; } + _LIBCPP_HIDE_FROM_ABI _Tp operator&=(_Tp __arg) const noexcept { return fetch_and(__arg) & __arg; } + _LIBCPP_HIDE_FROM_ABI _Tp operator|=(_Tp __arg) const noexcept { return fetch_or(__arg) | __arg; } + _LIBCPP_HIDE_FROM_ABI _Tp operator^=(_Tp __arg) const noexcept { return fetch_xor(__arg) ^ __arg; } +}; + +template + requires std::floating_point<_Tp> +struct atomic_ref<_Tp> : public __atomic_ref_base<_Tp> { + using __base = __atomic_ref_base<_Tp>; + + using difference_type = __base::value_type; + + _LIBCPP_HIDE_FROM_ABI explicit atomic_ref(_Tp& __obj) : __base(__obj) { + _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( + reinterpret_cast(std::addressof(__obj)) % __base::required_alignment == 0, + "atomic_ref ctor: referenced object must be aligned to required_alignment"); + } + + _LIBCPP_HIDE_FROM_ABI atomic_ref(const atomic_ref&) noexcept = default; + + _LIBCPP_HIDE_FROM_ABI _Tp operator=(_Tp __desired) const noexcept { return __base::operator=(__desired); } + + atomic_ref& operator=(const atomic_ref&) = delete; + + _LIBCPP_HIDE_FROM_ABI _Tp fetch_add(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept { + _Tp __old = this->load(memory_order_relaxed); + _Tp __new = __old + __arg; + while (!this->compare_exchange_weak(__old, __new, __order, memory_order_relaxed)) { + __new = __old + __arg; + } + return __old; + } + _LIBCPP_HIDE_FROM_ABI _Tp fetch_sub(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept { + _Tp __old = this->load(memory_order_relaxed); + _Tp __new = __old - __arg; + while (!this->compare_exchange_weak(__old, __new, __order, memory_order_relaxed)) { + __new = __old - __arg; + } + return __old; + } + + _LIBCPP_HIDE_FROM_ABI _Tp operator+=(_Tp __arg) const noexcept { return fetch_add(__arg) + __arg; } + _LIBCPP_HIDE_FROM_ABI _Tp operator-=(_Tp __arg) const noexcept { return fetch_sub(__arg) - __arg; } +}; + +template +struct atomic_ref<_Tp*> : public __atomic_ref_base<_Tp*> { + using __base = __atomic_ref_base<_Tp*>; + + using difference_type = ptrdiff_t; + + _LIBCPP_HIDE_FROM_ABI explicit atomic_ref(_Tp*& __ptr) : __base(__ptr) {} + + _LIBCPP_HIDE_FROM_ABI _Tp* operator=(_Tp* __desired) const noexcept { return __base::operator=(__desired); } + + atomic_ref& operator=(const atomic_ref&) = delete; + + _LIBCPP_HIDE_FROM_ABI _Tp* fetch_add(ptrdiff_t __arg, memory_order __order = memory_order_seq_cst) const noexcept { + return __atomic_fetch_add(this->__ptr_, __arg * sizeof(_Tp), std::__to_gcc_order(__order)); + } + _LIBCPP_HIDE_FROM_ABI _Tp* fetch_sub(ptrdiff_t __arg, memory_order __order = memory_order_seq_cst) const noexcept { + return __atomic_fetch_sub(this->__ptr_, __arg * sizeof(_Tp), std::__to_gcc_order(__order)); + } + + _LIBCPP_HIDE_FROM_ABI _Tp* operator++(int) const noexcept { return fetch_add(1); } + _LIBCPP_HIDE_FROM_ABI _Tp* operator--(int) const noexcept { return fetch_sub(1); } + _LIBCPP_HIDE_FROM_ABI _Tp* operator++() const noexcept { return fetch_add(1) + 1; } + _LIBCPP_HIDE_FROM_ABI _Tp* operator--() const noexcept { return fetch_sub(1) - 1; } + _LIBCPP_HIDE_FROM_ABI _Tp* operator+=(ptrdiff_t __arg) const noexcept { return fetch_add(__arg) + __arg; } + _LIBCPP_HIDE_FROM_ABI _Tp* operator-=(ptrdiff_t __arg) const noexcept { return fetch_sub(__arg) - __arg; } +}; + +_LIBCPP_CTAD_SUPPORTED_FOR_TYPE(atomic_ref); + +#endif // _LIBCPP_STD_VER >= 20 + +_LIBCPP_END_NAMESPACE_STD + +_LIBCPP_POP_MACROS + +#endif // _LIBCPP__ATOMIC_ATOMIC_REF_H diff --git a/lib/libcxx/include/__atomic/atomic_sync.h b/lib/libcxx/include/__atomic/atomic_sync.h index 3d20d6a8ce25..aaf81f58731a 100644 --- a/lib/libcxx/include/__atomic/atomic_sync.h +++ b/lib/libcxx/include/__atomic/atomic_sync.h @@ -12,13 +12,17 @@ #include <__atomic/contention_t.h> #include <__atomic/cxx_atomic_impl.h> #include <__atomic/memory_order.h> -#include <__availability> +#include <__atomic/to_gcc_order.h> #include <__chrono/duration.h> #include <__config> #include <__memory/addressof.h> #include <__thread/poll_with_backoff.h> -#include <__threading_support> +#include <__thread/support.h> +#include <__type_traits/conjunction.h> #include <__type_traits/decay.h> +#include <__type_traits/invoke.h> +#include <__type_traits/void_t.h> +#include <__utility/declval.h> #include #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) @@ -27,33 +31,101 @@ _LIBCPP_BEGIN_NAMESPACE_STD +// The customisation points to enable the following functions: +// - __atomic_wait +// - __atomic_wait_unless +// - __atomic_notify_one +// - __atomic_notify_all +// Note that std::atomic::wait was back-ported to C++03 +// The below implementations look ugly to support C++03 +template +struct __atomic_waitable_traits { + template + static void __atomic_load(_AtomicWaitable&&, memory_order) = delete; + + template + static void __atomic_contention_address(_AtomicWaitable&&) = delete; +}; + +template +struct __atomic_waitable : false_type {}; + +template +struct __atomic_waitable< _Tp, + __void_t >::__atomic_load( + std::declval(), std::declval())), + decltype(__atomic_waitable_traits<__decay_t<_Tp> >::__atomic_contention_address( + std::declval()))> > : true_type {}; + +template +struct __atomic_wait_poll_impl { + const _AtomicWaitable& __a_; + _Poll __poll_; + memory_order __order_; + + _LIBCPP_HIDE_FROM_ABI bool operator()() const { + auto __current_val = __atomic_waitable_traits<__decay_t<_AtomicWaitable> >::__atomic_load(__a_, __order_); + return __poll_(__current_val); + } +}; + #ifndef _LIBCPP_HAS_NO_THREADS -_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI void __cxx_atomic_notify_one(void const volatile*); -_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI void __cxx_atomic_notify_all(void const volatile*); -_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI __cxx_contention_t __libcpp_atomic_monitor(void const volatile*); -_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI void __libcpp_atomic_wait(void const volatile*, __cxx_contention_t); +_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI void __cxx_atomic_notify_one(void const volatile*) _NOEXCEPT; +_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI void __cxx_atomic_notify_all(void const volatile*) _NOEXCEPT; +_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI __cxx_contention_t +__libcpp_atomic_monitor(void const volatile*) _NOEXCEPT; +_LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI void +__libcpp_atomic_wait(void const volatile*, __cxx_contention_t) _NOEXCEPT; _LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI void -__cxx_atomic_notify_one(__cxx_atomic_contention_t const volatile*); +__cxx_atomic_notify_one(__cxx_atomic_contention_t const volatile*) _NOEXCEPT; _LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI void -__cxx_atomic_notify_all(__cxx_atomic_contention_t const volatile*); +__cxx_atomic_notify_all(__cxx_atomic_contention_t const volatile*) _NOEXCEPT; _LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI __cxx_contention_t -__libcpp_atomic_monitor(__cxx_atomic_contention_t const volatile*); +__libcpp_atomic_monitor(__cxx_atomic_contention_t const volatile*) _NOEXCEPT; _LIBCPP_AVAILABILITY_SYNC _LIBCPP_EXPORTED_FROM_ABI void -__libcpp_atomic_wait(__cxx_atomic_contention_t const volatile*, __cxx_contention_t); +__libcpp_atomic_wait(__cxx_atomic_contention_t const volatile*, __cxx_contention_t) _NOEXCEPT; + +template +struct __atomic_wait_backoff_impl { + const _AtomicWaitable& __a_; + _Poll __poll_; + memory_order __order_; + + using __waitable_traits = __atomic_waitable_traits<__decay_t<_AtomicWaitable> >; + + _LIBCPP_AVAILABILITY_SYNC + _LIBCPP_HIDE_FROM_ABI bool + __update_monitor_val_and_poll(__cxx_atomic_contention_t const volatile*, __cxx_contention_t& __monitor_val) const { + // In case the contention type happens to be __cxx_atomic_contention_t, i.e. __cxx_atomic_impl, + // the platform wait is directly monitoring the atomic value itself. + // `__poll_` takes the current value of the atomic as an in-out argument + // to potentially modify it. After it returns, `__monitor` has a value + // which can be safely waited on by `std::__libcpp_atomic_wait` without any + // ABA style issues. + __monitor_val = __waitable_traits::__atomic_load(__a_, __order_); + return __poll_(__monitor_val); + } + + _LIBCPP_AVAILABILITY_SYNC + _LIBCPP_HIDE_FROM_ABI bool + __update_monitor_val_and_poll(void const volatile* __contention_address, __cxx_contention_t& __monitor_val) const { + // In case the contention type is anything else, platform wait is monitoring a __cxx_atomic_contention_t + // from the global pool, the monitor comes from __libcpp_atomic_monitor + __monitor_val = std::__libcpp_atomic_monitor(__contention_address); + auto __current_val = __waitable_traits::__atomic_load(__a_, __order_); + return __poll_(__current_val); + } -template -struct __libcpp_atomic_wait_backoff_impl { - _Atp* __a; - _Fn __test_fn; _LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI bool operator()(chrono::nanoseconds __elapsed) const { if (__elapsed > chrono::microseconds(64)) { - auto const __monitor = std::__libcpp_atomic_monitor(__a); - if (__test_fn()) + auto __contention_address = __waitable_traits::__atomic_contention_address(__a_); + __cxx_contention_t __monitor_val; + if (__update_monitor_val_and_poll(__contention_address, __monitor_val)) return true; - std::__libcpp_atomic_wait(__a, __monitor); + std::__libcpp_atomic_wait(__contention_address, __monitor_val); } else if (__elapsed > chrono::microseconds(4)) __libcpp_thread_yield(); else { @@ -62,23 +134,49 @@ struct __libcpp_atomic_wait_backoff_impl { } }; -template -_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_wait(_Atp* __a, _Fn&& __test_fn) { - __libcpp_atomic_wait_backoff_impl<_Atp, __decay_t<_Fn> > __backoff_fn = {__a, __test_fn}; - return std::__libcpp_thread_poll_with_backoff(__test_fn, __backoff_fn); +// The semantics of this function are similar to `atomic`'s +// `.wait(T old, std::memory_order order)`, but instead of having a hardcoded +// predicate (is the loaded value unequal to `old`?), the predicate function is +// specified as an argument. The loaded value is given as an in-out argument to +// the predicate. If the predicate function returns `true`, +// `__atomic_wait_unless` will return. If the predicate function returns +// `false`, it must set the argument to its current understanding of the atomic +// value. The predicate function must not return `false` spuriously. +template +_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void +__atomic_wait_unless(const _AtomicWaitable& __a, _Poll&& __poll, memory_order __order) { + static_assert(__atomic_waitable<_AtomicWaitable>::value, ""); + __atomic_wait_poll_impl<_AtomicWaitable, __decay_t<_Poll> > __poll_impl = {__a, __poll, __order}; + __atomic_wait_backoff_impl<_AtomicWaitable, __decay_t<_Poll> > __backoff_fn = {__a, __poll, __order}; + std::__libcpp_thread_poll_with_backoff(__poll_impl, __backoff_fn); +} + +template +_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void __atomic_notify_one(const _AtomicWaitable& __a) { + static_assert(__atomic_waitable<_AtomicWaitable>::value, ""); + std::__cxx_atomic_notify_one(__atomic_waitable_traits<__decay_t<_AtomicWaitable> >::__atomic_contention_address(__a)); +} + +template +_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void __atomic_notify_all(const _AtomicWaitable& __a) { + static_assert(__atomic_waitable<_AtomicWaitable>::value, ""); + std::__cxx_atomic_notify_all(__atomic_waitable_traits<__decay_t<_AtomicWaitable> >::__atomic_contention_address(__a)); } #else // _LIBCPP_HAS_NO_THREADS -template -_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_notify_all(__cxx_atomic_impl<_Tp> const volatile*) {} -template -_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_notify_one(__cxx_atomic_impl<_Tp> const volatile*) {} -template -_LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_wait(_Atp*, _Fn&& __test_fn) { - return std::__libcpp_thread_poll_with_backoff(__test_fn, __spinning_backoff_policy()); +template +_LIBCPP_HIDE_FROM_ABI void __atomic_wait_unless(const _AtomicWaitable& __a, _Poll&& __poll, memory_order __order) { + __atomic_wait_poll_impl<_AtomicWaitable, __decay_t<_Poll> > __poll_fn = {__a, __poll, __order}; + std::__libcpp_thread_poll_with_backoff(__poll_fn, __spinning_backoff_policy()); } +template +_LIBCPP_HIDE_FROM_ABI void __atomic_notify_one(const _AtomicWaitable&) {} + +template +_LIBCPP_HIDE_FROM_ABI void __atomic_notify_all(const _AtomicWaitable&) {} + #endif // _LIBCPP_HAS_NO_THREADS template @@ -86,21 +184,20 @@ _LIBCPP_HIDE_FROM_ABI bool __cxx_nonatomic_compare_equal(_Tp const& __lhs, _Tp c return std::memcmp(std::addressof(__lhs), std::addressof(__rhs), sizeof(_Tp)) == 0; } -template -struct __cxx_atomic_wait_test_fn_impl { - _Atp* __a; - _Tp __val; - memory_order __order; - _LIBCPP_HIDE_FROM_ABI bool operator()() const { - return !std::__cxx_nonatomic_compare_equal(std::__cxx_atomic_load(__a, __order), __val); +template +struct __atomic_compare_unequal_to { + _Tp __val_; + _LIBCPP_HIDE_FROM_ABI bool operator()(const _Tp& __arg) const { + return !std::__cxx_nonatomic_compare_equal(__arg, __val_); } }; -template -_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI bool -__cxx_atomic_wait(_Atp* __a, _Tp const __val, memory_order __order) { - __cxx_atomic_wait_test_fn_impl<_Atp, _Tp> __test_fn = {__a, __val, __order}; - return std::__cxx_atomic_wait(__a, __test_fn); +template +_LIBCPP_AVAILABILITY_SYNC _LIBCPP_HIDE_FROM_ABI void +__atomic_wait(_AtomicWaitable& __a, _Up __val, memory_order __order) { + static_assert(__atomic_waitable<_AtomicWaitable>::value, ""); + __atomic_compare_unequal_to<_Up> __nonatomic_equal = {__val}; + std::__atomic_wait_unless(__a, __nonatomic_equal, __order); } _LIBCPP_END_NAMESPACE_STD diff --git a/lib/libcxx/include/__atomic/check_memory_order.h b/lib/libcxx/include/__atomic/check_memory_order.h index 3012aec0521b..536f764a6190 100644 --- a/lib/libcxx/include/__atomic/check_memory_order.h +++ b/lib/libcxx/include/__atomic/check_memory_order.h @@ -27,4 +27,8 @@ _LIBCPP_DIAGNOSE_WARNING(__f == memory_order_release || __f == memory_order_acq_rel, \ "memory order argument to atomic operation is invalid") +#define _LIBCPP_CHECK_WAIT_MEMORY_ORDER(__m) \ + _LIBCPP_DIAGNOSE_WARNING(__m == memory_order_release || __m == memory_order_acq_rel, \ + "memory order argument to atomic operation is invalid") + #endif // _LIBCPP___ATOMIC_CHECK_MEMORY_ORDER_H diff --git a/lib/libcxx/include/__atomic/cxx_atomic_impl.h b/lib/libcxx/include/__atomic/cxx_atomic_impl.h index 1a0b808a0cb1..18e88aa97bec 100644 --- a/lib/libcxx/include/__atomic/cxx_atomic_impl.h +++ b/lib/libcxx/include/__atomic/cxx_atomic_impl.h @@ -9,16 +9,14 @@ #ifndef _LIBCPP___ATOMIC_CXX_ATOMIC_IMPL_H #define _LIBCPP___ATOMIC_CXX_ATOMIC_IMPL_H -#include <__atomic/is_always_lock_free.h> #include <__atomic/memory_order.h> +#include <__atomic/to_gcc_order.h> #include <__config> #include <__memory/addressof.h> -#include <__type_traits/conditional.h> #include <__type_traits/is_assignable.h> #include <__type_traits/is_trivially_copyable.h> #include <__type_traits/remove_const.h> #include -#include #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) # pragma GCC system_header @@ -26,7 +24,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD -#if defined(_LIBCPP_HAS_GCC_ATOMIC_IMP) || defined(_LIBCPP_ATOMIC_ONLY_USE_BUILTINS) +#if defined(_LIBCPP_HAS_GCC_ATOMIC_IMP) // [atomics.types.generic]p1 guarantees _Tp is trivially copyable. Because // the default operator= in an object is not volatile, a byte-by-byte copy @@ -44,10 +42,6 @@ _LIBCPP_HIDE_FROM_ABI void __cxx_atomic_assign_volatile(_Tp volatile& __a_value, *__to++ = *__from++; } -#endif - -#if defined(_LIBCPP_HAS_GCC_ATOMIC_IMP) - template struct __cxx_atomic_base_impl { _LIBCPP_HIDE_FROM_ABI @@ -61,32 +55,6 @@ struct __cxx_atomic_base_impl { _Tp __a_value; }; -_LIBCPP_HIDE_FROM_ABI inline _LIBCPP_CONSTEXPR int __to_gcc_order(memory_order __order) { - // Avoid switch statement to make this a constexpr. - return __order == memory_order_relaxed - ? __ATOMIC_RELAXED - : (__order == memory_order_acquire - ? __ATOMIC_ACQUIRE - : (__order == memory_order_release - ? __ATOMIC_RELEASE - : (__order == memory_order_seq_cst - ? __ATOMIC_SEQ_CST - : (__order == memory_order_acq_rel ? __ATOMIC_ACQ_REL : __ATOMIC_CONSUME)))); -} - -_LIBCPP_HIDE_FROM_ABI inline _LIBCPP_CONSTEXPR int __to_gcc_failure_order(memory_order __order) { - // Avoid switch statement to make this a constexpr. - return __order == memory_order_relaxed - ? __ATOMIC_RELAXED - : (__order == memory_order_acquire - ? __ATOMIC_ACQUIRE - : (__order == memory_order_release - ? __ATOMIC_RELAXED - : (__order == memory_order_seq_cst - ? __ATOMIC_SEQ_CST - : (__order == memory_order_acq_rel ? __ATOMIC_ACQUIRE : __ATOMIC_CONSUME)))); -} - template _LIBCPP_HIDE_FROM_ABI void __cxx_atomic_init(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __val) { __cxx_atomic_assign_volatile(__a->__a_value, __val); @@ -529,289 +497,7 @@ __cxx_atomic_fetch_xor(__cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, memory_o #endif // _LIBCPP_HAS_GCC_ATOMIC_IMP, _LIBCPP_HAS_C_ATOMIC_IMP -#ifdef _LIBCPP_ATOMIC_ONLY_USE_BUILTINS - -template -struct __cxx_atomic_lock_impl { - _LIBCPP_HIDE_FROM_ABI __cxx_atomic_lock_impl() _NOEXCEPT : __a_value(), __a_lock(0) {} - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR explicit __cxx_atomic_lock_impl(_Tp value) _NOEXCEPT - : __a_value(value), - __a_lock(0) {} - - _Tp __a_value; - mutable __cxx_atomic_base_impl<_LIBCPP_ATOMIC_FLAG_TYPE> __a_lock; - - _LIBCPP_HIDE_FROM_ABI void __lock() const volatile { - while (1 == __cxx_atomic_exchange(&__a_lock, _LIBCPP_ATOMIC_FLAG_TYPE(true), memory_order_acquire)) - /*spin*/; - } - _LIBCPP_HIDE_FROM_ABI void __lock() const { - while (1 == __cxx_atomic_exchange(&__a_lock, _LIBCPP_ATOMIC_FLAG_TYPE(true), memory_order_acquire)) - /*spin*/; - } - _LIBCPP_HIDE_FROM_ABI void __unlock() const volatile { - __cxx_atomic_store(&__a_lock, _LIBCPP_ATOMIC_FLAG_TYPE(false), memory_order_release); - } - _LIBCPP_HIDE_FROM_ABI void __unlock() const { - __cxx_atomic_store(&__a_lock, _LIBCPP_ATOMIC_FLAG_TYPE(false), memory_order_release); - } - _LIBCPP_HIDE_FROM_ABI _Tp __read() const volatile { - __lock(); - _Tp __old; - __cxx_atomic_assign_volatile(__old, __a_value); - __unlock(); - return __old; - } - _LIBCPP_HIDE_FROM_ABI _Tp __read() const { - __lock(); - _Tp __old = __a_value; - __unlock(); - return __old; - } - _LIBCPP_HIDE_FROM_ABI void __read_inplace(_Tp* __dst) const volatile { - __lock(); - __cxx_atomic_assign_volatile(*__dst, __a_value); - __unlock(); - } - _LIBCPP_HIDE_FROM_ABI void __read_inplace(_Tp* __dst) const { - __lock(); - *__dst = __a_value; - __unlock(); - } -}; - -template -_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_init(volatile __cxx_atomic_lock_impl<_Tp>* __a, _Tp __val) { - __cxx_atomic_assign_volatile(__a->__a_value, __val); -} -template -_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_init(__cxx_atomic_lock_impl<_Tp>* __a, _Tp __val) { - __a->__a_value = __val; -} - -template -_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_store(volatile __cxx_atomic_lock_impl<_Tp>* __a, _Tp __val, memory_order) { - __a->__lock(); - __cxx_atomic_assign_volatile(__a->__a_value, __val); - __a->__unlock(); -} -template -_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_store(__cxx_atomic_lock_impl<_Tp>* __a, _Tp __val, memory_order) { - __a->__lock(); - __a->__a_value = __val; - __a->__unlock(); -} - -template -_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_load(const volatile __cxx_atomic_lock_impl<_Tp>* __a, memory_order) { - return __a->__read(); -} -template -_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_load(const __cxx_atomic_lock_impl<_Tp>* __a, memory_order) { - return __a->__read(); -} - -template -_LIBCPP_HIDE_FROM_ABI void -__cxx_atomic_load(const volatile __cxx_atomic_lock_impl<_Tp>* __a, _Tp* __dst, memory_order) { - __a->__read_inplace(__dst); -} -template -_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_load(const __cxx_atomic_lock_impl<_Tp>* __a, _Tp* __dst, memory_order) { - __a->__read_inplace(__dst); -} - -template -_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_exchange(volatile __cxx_atomic_lock_impl<_Tp>* __a, _Tp __value, memory_order) { - __a->__lock(); - _Tp __old; - __cxx_atomic_assign_volatile(__old, __a->__a_value); - __cxx_atomic_assign_volatile(__a->__a_value, __value); - __a->__unlock(); - return __old; -} -template -_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_exchange(__cxx_atomic_lock_impl<_Tp>* __a, _Tp __value, memory_order) { - __a->__lock(); - _Tp __old = __a->__a_value; - __a->__a_value = __value; - __a->__unlock(); - return __old; -} - -template -_LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_strong( - volatile __cxx_atomic_lock_impl<_Tp>* __a, _Tp* __expected, _Tp __value, memory_order, memory_order) { - _Tp __temp; - __a->__lock(); - __cxx_atomic_assign_volatile(__temp, __a->__a_value); - bool __ret = (std::memcmp(&__temp, __expected, sizeof(_Tp)) == 0); - if (__ret) - __cxx_atomic_assign_volatile(__a->__a_value, __value); - else - __cxx_atomic_assign_volatile(*__expected, __a->__a_value); - __a->__unlock(); - return __ret; -} -template -_LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_strong( - __cxx_atomic_lock_impl<_Tp>* __a, _Tp* __expected, _Tp __value, memory_order, memory_order) { - __a->__lock(); - bool __ret = (std::memcmp(&__a->__a_value, __expected, sizeof(_Tp)) == 0); - if (__ret) - std::memcpy(&__a->__a_value, &__value, sizeof(_Tp)); - else - std::memcpy(__expected, &__a->__a_value, sizeof(_Tp)); - __a->__unlock(); - return __ret; -} - -template -_LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_weak( - volatile __cxx_atomic_lock_impl<_Tp>* __a, _Tp* __expected, _Tp __value, memory_order, memory_order) { - _Tp __temp; - __a->__lock(); - __cxx_atomic_assign_volatile(__temp, __a->__a_value); - bool __ret = (std::memcmp(&__temp, __expected, sizeof(_Tp)) == 0); - if (__ret) - __cxx_atomic_assign_volatile(__a->__a_value, __value); - else - __cxx_atomic_assign_volatile(*__expected, __a->__a_value); - __a->__unlock(); - return __ret; -} -template -_LIBCPP_HIDE_FROM_ABI bool __cxx_atomic_compare_exchange_weak( - __cxx_atomic_lock_impl<_Tp>* __a, _Tp* __expected, _Tp __value, memory_order, memory_order) { - __a->__lock(); - bool __ret = (std::memcmp(&__a->__a_value, __expected, sizeof(_Tp)) == 0); - if (__ret) - std::memcpy(&__a->__a_value, &__value, sizeof(_Tp)); - else - std::memcpy(__expected, &__a->__a_value, sizeof(_Tp)); - __a->__unlock(); - return __ret; -} - -template -_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_fetch_add(volatile __cxx_atomic_lock_impl<_Tp>* __a, _Td __delta, memory_order) { - __a->__lock(); - _Tp __old; - __cxx_atomic_assign_volatile(__old, __a->__a_value); - __cxx_atomic_assign_volatile(__a->__a_value, _Tp(__old + __delta)); - __a->__unlock(); - return __old; -} -template -_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_fetch_add(__cxx_atomic_lock_impl<_Tp>* __a, _Td __delta, memory_order) { - __a->__lock(); - _Tp __old = __a->__a_value; - __a->__a_value += __delta; - __a->__unlock(); - return __old; -} - -template -_LIBCPP_HIDE_FROM_ABI _Tp* -__cxx_atomic_fetch_add(volatile __cxx_atomic_lock_impl<_Tp*>* __a, ptrdiff_t __delta, memory_order) { - __a->__lock(); - _Tp* __old; - __cxx_atomic_assign_volatile(__old, __a->__a_value); - __cxx_atomic_assign_volatile(__a->__a_value, __old + __delta); - __a->__unlock(); - return __old; -} -template -_LIBCPP_HIDE_FROM_ABI _Tp* __cxx_atomic_fetch_add(__cxx_atomic_lock_impl<_Tp*>* __a, ptrdiff_t __delta, memory_order) { - __a->__lock(); - _Tp* __old = __a->__a_value; - __a->__a_value += __delta; - __a->__unlock(); - return __old; -} - -template -_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_fetch_sub(volatile __cxx_atomic_lock_impl<_Tp>* __a, _Td __delta, memory_order) { - __a->__lock(); - _Tp __old; - __cxx_atomic_assign_volatile(__old, __a->__a_value); - __cxx_atomic_assign_volatile(__a->__a_value, _Tp(__old - __delta)); - __a->__unlock(); - return __old; -} -template -_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_fetch_sub(__cxx_atomic_lock_impl<_Tp>* __a, _Td __delta, memory_order) { - __a->__lock(); - _Tp __old = __a->__a_value; - __a->__a_value -= __delta; - __a->__unlock(); - return __old; -} - -template -_LIBCPP_HIDE_FROM_ABI _Tp -__cxx_atomic_fetch_and(volatile __cxx_atomic_lock_impl<_Tp>* __a, _Tp __pattern, memory_order) { - __a->__lock(); - _Tp __old; - __cxx_atomic_assign_volatile(__old, __a->__a_value); - __cxx_atomic_assign_volatile(__a->__a_value, _Tp(__old & __pattern)); - __a->__unlock(); - return __old; -} -template -_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_fetch_and(__cxx_atomic_lock_impl<_Tp>* __a, _Tp __pattern, memory_order) { - __a->__lock(); - _Tp __old = __a->__a_value; - __a->__a_value &= __pattern; - __a->__unlock(); - return __old; -} - -template -_LIBCPP_HIDE_FROM_ABI _Tp -__cxx_atomic_fetch_or(volatile __cxx_atomic_lock_impl<_Tp>* __a, _Tp __pattern, memory_order) { - __a->__lock(); - _Tp __old; - __cxx_atomic_assign_volatile(__old, __a->__a_value); - __cxx_atomic_assign_volatile(__a->__a_value, _Tp(__old | __pattern)); - __a->__unlock(); - return __old; -} -template -_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_fetch_or(__cxx_atomic_lock_impl<_Tp>* __a, _Tp __pattern, memory_order) { - __a->__lock(); - _Tp __old = __a->__a_value; - __a->__a_value |= __pattern; - __a->__unlock(); - return __old; -} - -template -_LIBCPP_HIDE_FROM_ABI _Tp -__cxx_atomic_fetch_xor(volatile __cxx_atomic_lock_impl<_Tp>* __a, _Tp __pattern, memory_order) { - __a->__lock(); - _Tp __old; - __cxx_atomic_assign_volatile(__old, __a->__a_value); - __cxx_atomic_assign_volatile(__a->__a_value, _Tp(__old ^ __pattern)); - __a->__unlock(); - return __old; -} -template -_LIBCPP_HIDE_FROM_ABI _Tp __cxx_atomic_fetch_xor(__cxx_atomic_lock_impl<_Tp>* __a, _Tp __pattern, memory_order) { - __a->__lock(); - _Tp __old = __a->__a_value; - __a->__a_value ^= __pattern; - __a->__unlock(); - return __old; -} - -template ::__value, - __cxx_atomic_base_impl<_Tp>, - __cxx_atomic_lock_impl<_Tp> >::type> -#else template > -#endif //_LIBCPP_ATOMIC_ONLY_USE_BUILTINS struct __cxx_atomic_impl : public _Base { static_assert(is_trivially_copyable<_Tp>::value, "std::atomic requires that 'T' be a trivially copyable type"); diff --git a/lib/libcxx/include/__atomic/memory_order.h b/lib/libcxx/include/__atomic/memory_order.h index 16fd1867698f..294121d1c4e7 100644 --- a/lib/libcxx/include/__atomic/memory_order.h +++ b/lib/libcxx/include/__atomic/memory_order.h @@ -37,7 +37,7 @@ enum class memory_order : __memory_order_underlying_t { seq_cst = __mo_seq_cst }; -static_assert((is_same::type, __memory_order_underlying_t>::value), +static_assert(is_same::type, __memory_order_underlying_t>::value, "unexpected underlying type for std::memory_order"); inline constexpr auto memory_order_relaxed = memory_order::relaxed; diff --git a/lib/libcxx/include/__atomic/to_gcc_order.h b/lib/libcxx/include/__atomic/to_gcc_order.h new file mode 100644 index 000000000000..d04c111addd3 --- /dev/null +++ b/lib/libcxx/include/__atomic/to_gcc_order.h @@ -0,0 +1,54 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCPP___ATOMIC_TO_GCC_ORDER_H +#define _LIBCPP___ATOMIC_TO_GCC_ORDER_H + +#include <__atomic/memory_order.h> +#include <__config> + +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +#endif + +_LIBCPP_BEGIN_NAMESPACE_STD + +#if defined(__ATOMIC_RELAXED) && defined(__ATOMIC_CONSUME) && defined(__ATOMIC_ACQUIRE) && \ + defined(__ATOMIC_RELEASE) && defined(__ATOMIC_ACQ_REL) && defined(__ATOMIC_SEQ_CST) + +_LIBCPP_HIDE_FROM_ABI inline _LIBCPP_CONSTEXPR int __to_gcc_order(memory_order __order) { + // Avoid switch statement to make this a constexpr. + return __order == memory_order_relaxed + ? __ATOMIC_RELAXED + : (__order == memory_order_acquire + ? __ATOMIC_ACQUIRE + : (__order == memory_order_release + ? __ATOMIC_RELEASE + : (__order == memory_order_seq_cst + ? __ATOMIC_SEQ_CST + : (__order == memory_order_acq_rel ? __ATOMIC_ACQ_REL : __ATOMIC_CONSUME)))); +} + +_LIBCPP_HIDE_FROM_ABI inline _LIBCPP_CONSTEXPR int __to_gcc_failure_order(memory_order __order) { + // Avoid switch statement to make this a constexpr. + return __order == memory_order_relaxed + ? __ATOMIC_RELAXED + : (__order == memory_order_acquire + ? __ATOMIC_ACQUIRE + : (__order == memory_order_release + ? __ATOMIC_RELAXED + : (__order == memory_order_seq_cst + ? __ATOMIC_SEQ_CST + : (__order == memory_order_acq_rel ? __ATOMIC_ACQUIRE : __ATOMIC_CONSUME)))); +} + +#endif + +_LIBCPP_END_NAMESPACE_STD + +#endif // _LIBCPP___ATOMIC_TO_GCC_ORDER_H diff --git a/lib/libcxx/include/__availability b/lib/libcxx/include/__availability deleted file mode 100644 index b8b2da9bb122..000000000000 --- a/lib/libcxx/include/__availability +++ /dev/null @@ -1,324 +0,0 @@ -// -*- C++ -*- -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___AVAILABILITY -#define _LIBCPP___AVAILABILITY - -#include <__config> - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -// Libc++ is shipped by various vendors. In particular, it is used as a system -// library on macOS, iOS and other Apple platforms. In order for users to be -// able to compile a binary that is intended to be deployed to an older version -// of a platform, Clang provides availability attributes [1]. These attributes -// can be placed on declarations and are used to describe the life cycle of a -// symbol in the library. -// -// The main goal is to ensure a compile-time error if a symbol that hasn't been -// introduced in a previously released library is used in a program that targets -// that previously released library. Normally, this would be a load-time error -// when one tries to launch the program against the older library. -// -// For example, the filesystem library was introduced in the dylib in macOS 10.15. -// If a user compiles on a macOS 10.15 host but targets macOS 10.13 with their -// program, the compiler would normally not complain (because the required -// declarations are in the headers), but the dynamic loader would fail to find -// the symbols when actually trying to launch the program on macOS 10.13. To -// turn this into a compile-time issue instead, declarations are annotated with -// when they were introduced, and the compiler can produce a diagnostic if the -// program references something that isn't available on the deployment target. -// -// This mechanism is general in nature, and any vendor can add their markup to -// the library (see below). Whenever a new feature is added that requires support -// in the shared library, two macros are added below to allow marking the feature -// as unavailable: -// 1. A macro named `_LIBCPP_AVAILABILITY_HAS_NO_` which must be defined -// exactly when compiling for a target that doesn't support the feature. -// 2. A macro named `_LIBCPP_AVAILABILITY_`, which must always be defined -// and must expand to the proper availability attribute for the platform. -// -// When vendors decide to ship the feature as part of their shared library, they -// can update these macros appropriately for their platform, and the library will -// use those to provide an optimal user experience. -// -// Furthermore, many features in the standard library have corresponding -// feature-test macros. The `_LIBCPP_AVAILABILITY_HAS_NO_` macros -// are checked by the corresponding feature-test macros generated by -// generate_feature_test_macro_components.py to ensure that the library -// doesn't announce a feature as being implemented if it is unavailable on -// the deployment target. -// -// Note that this mechanism is disabled by default in the "upstream" libc++. -// Availability annotations are only meaningful when shipping libc++ inside -// a platform (i.e. as a system library), and so vendors that want them should -// turn those annotations on at CMake configuration time. -// -// [1]: https://clang.llvm.org/docs/AttributeReference.html#availability - -// For backwards compatibility, allow users to define _LIBCPP_DISABLE_AVAILABILITY -// for a while. -#if defined(_LIBCPP_DISABLE_AVAILABILITY) -# if !defined(_LIBCPP_HAS_NO_VENDOR_AVAILABILITY_ANNOTATIONS) -# define _LIBCPP_HAS_NO_VENDOR_AVAILABILITY_ANNOTATIONS -# endif -#endif - -// Availability markup is disabled when building the library, or when a non-Clang -// compiler is used because only Clang supports the necessary attributes. -// doesn't support the proper attributes. -#if defined(_LIBCPP_BUILDING_LIBRARY) || defined(_LIBCXXABI_BUILDING_LIBRARY) || !defined(_LIBCPP_COMPILER_CLANG_BASED) -# if !defined(_LIBCPP_HAS_NO_VENDOR_AVAILABILITY_ANNOTATIONS) -# define _LIBCPP_HAS_NO_VENDOR_AVAILABILITY_ANNOTATIONS -# endif -#endif - -#if defined(_LIBCPP_HAS_NO_VENDOR_AVAILABILITY_ANNOTATIONS) - -// These macros control the availability of std::bad_optional_access and -// other exception types. These were put in the shared library to prevent -// code bloat from every user program defining the vtable for these exception -// types. -// -// Note that when exceptions are disabled, the methods that normally throw -// these exceptions can be used even on older deployment targets, but those -// methods will abort instead of throwing. -# define _LIBCPP_AVAILABILITY_HAS_BAD_OPTIONAL_ACCESS 1 -# define _LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS - -# define _LIBCPP_AVAILABILITY_HAS_BAD_VARIANT_ACCESS 1 -# define _LIBCPP_AVAILABILITY_BAD_VARIANT_ACCESS - -# define _LIBCPP_AVAILABILITY_HAS_BAD_ANY_CAST 1 -# define _LIBCPP_AVAILABILITY_BAD_ANY_CAST - -// These macros controls the availability of __cxa_init_primary_exception -// in the built library, which std::make_exception_ptr might use -// (see libcxx/include/__exception/exception_ptr.h). -# define _LIBCPP_AVAILABILITY_HAS_INIT_PRIMARY_EXCEPTION 1 -# define _LIBCPP_AVAILABILITY_INIT_PRIMARY_EXCEPTION - -// These macros control the availability of all parts of that -// depend on something in the dylib. -# define _LIBCPP_AVAILABILITY_HAS_FILESYSTEM_LIBRARY 1 -# define _LIBCPP_AVAILABILITY_FILESYSTEM_LIBRARY -# define _LIBCPP_AVAILABILITY_FILESYSTEM_LIBRARY_PUSH -# define _LIBCPP_AVAILABILITY_FILESYSTEM_LIBRARY_POP - -// This controls the availability of floating-point std::to_chars functions. -// These overloads were added later than the integer overloads. -# define _LIBCPP_AVAILABILITY_HAS_TO_CHARS_FLOATING_POINT 1 -# define _LIBCPP_AVAILABILITY_TO_CHARS_FLOATING_POINT - -// This controls the availability of the C++20 synchronization library, -// which requires shared library support for various operations -// (see libcxx/src/atomic.cpp). This includes , , -// , and notification functions on std::atomic. -# define _LIBCPP_AVAILABILITY_HAS_SYNC 1 -# define _LIBCPP_AVAILABILITY_SYNC - -// This controls whether the library claims to provide a default verbose -// termination function, and consequently whether the headers will try -// to use it when the mechanism isn't overriden at compile-time. -# define _LIBCPP_AVAILABILITY_HAS_VERBOSE_ABORT 1 -# define _LIBCPP_AVAILABILITY_VERBOSE_ABORT - -// This controls the availability of the C++17 std::pmr library, -// which is implemented in large part in the built library. -# define _LIBCPP_AVAILABILITY_HAS_PMR 1 -# define _LIBCPP_AVAILABILITY_PMR - -// This controls the availability of the C++20 time zone database. -// The parser code is built in the library. -# define _LIBCPP_AVAILABILITY_HAS_TZDB 1 -# define _LIBCPP_AVAILABILITY_TZDB - -// This controls the availability of C++23 , which -// has a dependency on the built library (it needs access to -// the underlying buffer types of std::cout, std::cerr, and std::clog. -# define _LIBCPP_AVAILABILITY_HAS_PRINT 1 -# define _LIBCPP_AVAILABILITY_PRINT - -// Enable additional explicit instantiations of iostreams components. This -// reduces the number of weak definitions generated in programs that use -// iostreams by providing a single strong definition in the shared library. -// -// TODO: Enable additional explicit instantiations on GCC once it supports exclude_from_explicit_instantiation, -// or once libc++ doesn't use the attribute anymore. -// TODO: Enable them on Windows once https://llvm.org/PR41018 has been fixed. -# if !defined(_LIBCPP_COMPILER_GCC) && !defined(_WIN32) -# define _LIBCPP_AVAILABILITY_HAS_ADDITIONAL_IOSTREAM_EXPLICIT_INSTANTIATIONS_1 1 -# else -# define _LIBCPP_AVAILABILITY_HAS_ADDITIONAL_IOSTREAM_EXPLICIT_INSTANTIATIONS_1 0 -# endif - -#elif defined(__APPLE__) - -# define _LIBCPP_AVAILABILITY_HAS_BAD_OPTIONAL_ACCESS \ - (!defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) || __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ >= 50000) - -# define _LIBCPP_AVAILABILITY_HAS_BAD_VARIANT_ACCESS _LIBCPP_AVAILABILITY_HAS_BAD_OPTIONAL_ACCESS -# define _LIBCPP_AVAILABILITY_HAS_BAD_ANY_CAST _LIBCPP_AVAILABILITY_HAS_BAD_OPTIONAL_ACCESS - -# define _LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS __attribute__((availability(watchos, strict, introduced = 5.0))) -# define _LIBCPP_AVAILABILITY_BAD_VARIANT_ACCESS _LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS -# define _LIBCPP_AVAILABILITY_BAD_ANY_CAST _LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS - -// TODO: Update once this is released -# define _LIBCPP_AVAILABILITY_HAS_INIT_PRIMARY_EXCEPTION 0 -# define _LIBCPP_AVAILABILITY_INIT_PRIMARY_EXCEPTION __attribute__((unavailable)) - -// -// clang-format off -# if (defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101500) || \ - (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 130000) || \ - (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 130000) || \ - (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 60000) -// clang-format on -# define _LIBCPP_AVAILABILITY_HAS_FILESYSTEM_LIBRARY 0 -# else -# define _LIBCPP_AVAILABILITY_HAS_FILESYSTEM_LIBRARY 1 -# endif -# define _LIBCPP_AVAILABILITY_FILESYSTEM_LIBRARY \ - __attribute__((availability(macos, strict, introduced = 10.15))) \ - __attribute__((availability(ios, strict, introduced = 13.0))) \ - __attribute__((availability(tvos, strict, introduced = 13.0))) \ - __attribute__((availability(watchos, strict, introduced = 6.0))) -// clang-format off -# define _LIBCPP_AVAILABILITY_FILESYSTEM_LIBRARY_PUSH \ - _Pragma("clang attribute push(__attribute__((availability(macos,strict,introduced=10.15))), apply_to=any(function,record))") \ - _Pragma("clang attribute push(__attribute__((availability(ios,strict,introduced=13.0))), apply_to=any(function,record))") \ - _Pragma("clang attribute push(__attribute__((availability(tvos,strict,introduced=13.0))), apply_to=any(function,record))") \ - _Pragma("clang attribute push(__attribute__((availability(watchos,strict,introduced=6.0))), apply_to=any(function,record))") -# define _LIBCPP_AVAILABILITY_FILESYSTEM_LIBRARY_POP \ - _Pragma("clang attribute pop") \ - _Pragma("clang attribute pop") \ - _Pragma("clang attribute pop") \ - _Pragma("clang attribute pop") -// clang-format on - -// std::to_chars(floating-point) -// clang-format off -# if (defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 130300) || \ - (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 160300) || \ - (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 160300) || \ - (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 90300) -// clang-format on -# define _LIBCPP_AVAILABILITY_HAS_TO_CHARS_FLOATING_POINT 0 -# else -# define _LIBCPP_AVAILABILITY_HAS_TO_CHARS_FLOATING_POINT 1 -# endif -# define _LIBCPP_AVAILABILITY_TO_CHARS_FLOATING_POINT \ - __attribute__((availability(macos, strict, introduced = 13.3))) \ - __attribute__((availability(ios, strict, introduced = 16.3))) \ - __attribute__((availability(tvos, strict, introduced = 16.3))) \ - __attribute__((availability(watchos, strict, introduced = 9.3))) - -// c++20 synchronization library -// clang-format off -# if (defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 110000) || \ - (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 140000) || \ - (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 140000) || \ - (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 70000) -// clang-format on -# define _LIBCPP_AVAILABILITY_HAS_SYNC 0 -# else -# define _LIBCPP_AVAILABILITY_HAS_SYNC 1 -# endif -# define _LIBCPP_AVAILABILITY_SYNC \ - __attribute__((availability(macos, strict, introduced = 11.0))) \ - __attribute__((availability(ios, strict, introduced = 14.0))) \ - __attribute__((availability(tvos, strict, introduced = 14.0))) \ - __attribute__((availability(watchos, strict, introduced = 7.0))) - -// __libcpp_verbose_abort -// TODO: Update once this is released -# define _LIBCPP_AVAILABILITY_HAS_VERBOSE_ABORT 0 - -# define _LIBCPP_AVAILABILITY_VERBOSE_ABORT __attribute__((unavailable)) - -// std::pmr -// clang-format off -# if (defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 140000) || \ - (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 170000) || \ - (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 170000) || \ - (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 100000) -// clang-format on -# define _LIBCPP_AVAILABILITY_HAS_PMR 0 -# else -# define _LIBCPP_AVAILABILITY_HAS_PMR 1 -# endif -// TODO: Enable std::pmr markup once https://github.com/llvm/llvm-project/issues/40340 has been fixed -// Until then, it is possible for folks to try to use `std::pmr` when back-deploying to targets that don't support -// it and it'll be a load-time error, but we don't have a good alternative because the library won't compile if we -// use availability annotations until that bug has been fixed. -# if 0 -# define _LIBCPP_AVAILABILITY_PMR \ - __attribute__((availability(macos, strict, introduced = 14.0))) \ - __attribute__((availability(ios, strict, introduced = 17.0))) \ - __attribute__((availability(tvos, strict, introduced = 17.0))) \ - __attribute__((availability(watchos, strict, introduced = 10.0))) -# else -# define _LIBCPP_AVAILABILITY_PMR -# endif - -# define _LIBCPP_AVAILABILITY_HAS_TZDB 0 -# define _LIBCPP_AVAILABILITY_TZDB __attribute__((unavailable)) - -// Warning: This availability macro works differently than the other macros. -// The dylib part of print is not needed on Apple platforms. Therefore when -// the macro is not available the code calling the dylib is commented out. -// The macro _LIBCPP_AVAILABILITY_PRINT is not used. -# define _LIBCPP_AVAILABILITY_HAS_PRINT 0 -# define _LIBCPP_AVAILABILITY_PRINT __attribute__((unavailable)) - -// clang-format off -# if (defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 120000) || \ - (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 150000) || \ - (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 150000) || \ - (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 80000) -// clang-format on -# define _LIBCPP_AVAILABILITY_HAS_ADDITIONAL_IOSTREAM_EXPLICIT_INSTANTIATIONS_1 0 -# else -# define _LIBCPP_AVAILABILITY_HAS_ADDITIONAL_IOSTREAM_EXPLICIT_INSTANTIATIONS_1 1 -# endif -#else - -// ...New vendors can add availability markup here... - -# error \ - "It looks like you're trying to enable vendor availability markup, but you haven't defined the corresponding macros yet!" - -#endif - -// Define availability attributes that depend on _LIBCPP_HAS_NO_EXCEPTIONS. -// Those are defined in terms of the availability attributes above, and -// should not be vendor-specific. -#if defined(_LIBCPP_HAS_NO_EXCEPTIONS) -# define _LIBCPP_AVAILABILITY_THROW_BAD_ANY_CAST -# define _LIBCPP_AVAILABILITY_THROW_BAD_OPTIONAL_ACCESS -# define _LIBCPP_AVAILABILITY_THROW_BAD_VARIANT_ACCESS -#else -# define _LIBCPP_AVAILABILITY_THROW_BAD_ANY_CAST _LIBCPP_AVAILABILITY_BAD_ANY_CAST -# define _LIBCPP_AVAILABILITY_THROW_BAD_OPTIONAL_ACCESS _LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS -# define _LIBCPP_AVAILABILITY_THROW_BAD_VARIANT_ACCESS _LIBCPP_AVAILABILITY_BAD_VARIANT_ACCESS -#endif - -// Define availability attributes that depend on both -// _LIBCPP_HAS_NO_EXCEPTIONS and _LIBCPP_HAS_NO_RTTI. -#if defined(_LIBCPP_HAS_NO_EXCEPTIONS) || defined(_LIBCPP_HAS_NO_RTTI) -# undef _LIBCPP_AVAILABILITY_HAS_INIT_PRIMARY_EXCEPTION -# undef _LIBCPP_AVAILABILITY_INIT_PRIMARY_EXCEPTION -# define _LIBCPP_AVAILABILITY_HAS_INIT_PRIMARY_EXCEPTION 0 -# define _LIBCPP_AVAILABILITY_INIT_PRIMARY_EXCEPTION -#endif - -#endif // _LIBCPP___AVAILABILITY diff --git a/lib/libcxx/include/__bit/bit_cast.h b/lib/libcxx/include/__bit/bit_cast.h index f20b39ae748b..cd0456738179 100644 --- a/lib/libcxx/include/__bit/bit_cast.h +++ b/lib/libcxx/include/__bit/bit_cast.h @@ -19,12 +19,21 @@ _LIBCPP_BEGIN_NAMESPACE_STD +#ifndef _LIBCPP_CXX03_LANG + +template +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI constexpr _ToType __bit_cast(const _FromType& __from) noexcept { + return __builtin_bit_cast(_ToType, __from); +} + +#endif // _LIBCPP_CXX03_LANG + #if _LIBCPP_STD_VER >= 20 template requires(sizeof(_ToType) == sizeof(_FromType) && is_trivially_copyable_v<_ToType> && is_trivially_copyable_v<_FromType>) -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr _ToType bit_cast(const _FromType& __from) noexcept { +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _ToType bit_cast(const _FromType& __from) noexcept { return __builtin_bit_cast(_ToType, __from); } diff --git a/lib/libcxx/include/__bit/bit_ceil.h b/lib/libcxx/include/__bit/bit_ceil.h index 77fa739503bc..cfd792dc2e2a 100644 --- a/lib/libcxx/include/__bit/bit_ceil.h +++ b/lib/libcxx/include/__bit/bit_ceil.h @@ -24,7 +24,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD #if _LIBCPP_STD_VER >= 17 template -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr _Tp __bit_ceil(_Tp __t) noexcept { +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _Tp __bit_ceil(_Tp __t) noexcept { if (__t < 2) return 1; const unsigned __n = numeric_limits<_Tp>::digits - std::__countl_zero((_Tp)(__t - 1u)); @@ -42,7 +42,7 @@ _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr _Tp __bit_ceil(_Tp __t) no # if _LIBCPP_STD_VER >= 20 template <__libcpp_unsigned_integer _Tp> -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr _Tp bit_ceil(_Tp __t) noexcept { +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _Tp bit_ceil(_Tp __t) noexcept { return std::__bit_ceil(__t); } diff --git a/lib/libcxx/include/__bit/bit_floor.h b/lib/libcxx/include/__bit/bit_floor.h index cf5cf5803ad6..133e369504e4 100644 --- a/lib/libcxx/include/__bit/bit_floor.h +++ b/lib/libcxx/include/__bit/bit_floor.h @@ -23,7 +23,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD #if _LIBCPP_STD_VER >= 20 template <__libcpp_unsigned_integer _Tp> -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr _Tp bit_floor(_Tp __t) noexcept { +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _Tp bit_floor(_Tp __t) noexcept { return __t == 0 ? 0 : _Tp{1} << std::__bit_log2(__t); } diff --git a/lib/libcxx/include/__bit/bit_width.h b/lib/libcxx/include/__bit/bit_width.h index a2020a01421e..853e481776f7 100644 --- a/lib/libcxx/include/__bit/bit_width.h +++ b/lib/libcxx/include/__bit/bit_width.h @@ -22,7 +22,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD template <__libcpp_unsigned_integer _Tp> -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr int bit_width(_Tp __t) noexcept { +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr int bit_width(_Tp __t) noexcept { return __t == 0 ? 0 : std::__bit_log2(__t) + 1; } diff --git a/lib/libcxx/include/__bit/byteswap.h b/lib/libcxx/include/__bit/byteswap.h index 20045d6fd43c..6225ecf2f92d 100644 --- a/lib/libcxx/include/__bit/byteswap.h +++ b/lib/libcxx/include/__bit/byteswap.h @@ -23,7 +23,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD #if _LIBCPP_STD_VER >= 23 template -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr _Tp byteswap(_Tp __val) noexcept { +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _Tp byteswap(_Tp __val) noexcept { if constexpr (sizeof(_Tp) == 1) { return __val; } else if constexpr (sizeof(_Tp) == 2) { diff --git a/lib/libcxx/include/__bit/countl.h b/lib/libcxx/include/__bit/countl.h index 396cfc2c3f40..998a0b44c19d 100644 --- a/lib/libcxx/include/__bit/countl.h +++ b/lib/libcxx/include/__bit/countl.h @@ -6,6 +6,9 @@ // //===----------------------------------------------------------------------===// +// TODO: __builtin_clzg is available since Clang 19 and GCC 14. When support for older versions is dropped, we can +// refactor this code to exclusively use __builtin_clzg. + #ifndef _LIBCPP___BIT_COUNTL_H #define _LIBCPP___BIT_COUNTL_H @@ -38,6 +41,9 @@ _LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR int __libcpp_cl #ifndef _LIBCPP_HAS_NO_INT128 inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR int __libcpp_clz(__uint128_t __x) _NOEXCEPT { +# if __has_builtin(__builtin_clzg) + return __builtin_clzg(__x); +# else // The function is written in this form due to C++ constexpr limitations. // The algorithm: // - Test whether any bit in the high 64-bits is set @@ -49,12 +55,16 @@ inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR int __libcpp_clz(__uint128_t __x) // zeros in the high 64-bits. return ((__x >> 64) == 0) ? (64 + __builtin_clzll(static_cast(__x))) : __builtin_clzll(static_cast(__x >> 64)); +# endif } #endif // _LIBCPP_HAS_NO_INT128 template _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 int __countl_zero(_Tp __t) _NOEXCEPT { static_assert(__libcpp_is_unsigned_integer<_Tp>::value, "__countl_zero requires an unsigned integer type"); +#if __has_builtin(__builtin_clzg) + return __builtin_clzg(__t, numeric_limits<_Tp>::digits); +#else // __has_builtin(__builtin_clzg) if (__t == 0) return numeric_limits<_Tp>::digits; @@ -79,17 +89,18 @@ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 int __countl_zero(_Tp __t) _ } return __ret + __iter; } +#endif // __has_builtin(__builtin_clzg) } #if _LIBCPP_STD_VER >= 20 template <__libcpp_unsigned_integer _Tp> -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr int countl_zero(_Tp __t) noexcept { +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr int countl_zero(_Tp __t) noexcept { return std::__countl_zero(__t); } template <__libcpp_unsigned_integer _Tp> -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr int countl_one(_Tp __t) noexcept { +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr int countl_one(_Tp __t) noexcept { return __t != numeric_limits<_Tp>::max() ? std::countl_zero(static_cast<_Tp>(~__t)) : numeric_limits<_Tp>::digits; } diff --git a/lib/libcxx/include/__bit/countr.h b/lib/libcxx/include/__bit/countr.h index 0cc679f87a99..9e92021fba35 100644 --- a/lib/libcxx/include/__bit/countr.h +++ b/lib/libcxx/include/__bit/countr.h @@ -6,6 +6,9 @@ // //===----------------------------------------------------------------------===// +// TODO: __builtin_ctzg is available since Clang 19 and GCC 14. When support for older versions is dropped, we can +// refactor this code to exclusively use __builtin_ctzg. + #ifndef _LIBCPP___BIT_COUNTR_H #define _LIBCPP___BIT_COUNTR_H @@ -35,13 +38,13 @@ _LIBCPP_NODISCARD inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR int __libcpp_ct return __builtin_ctzll(__x); } -#if _LIBCPP_STD_VER >= 20 - -template <__libcpp_unsigned_integer _Tp> -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr int countr_zero(_Tp __t) noexcept { +template +_LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 int __countr_zero(_Tp __t) _NOEXCEPT { +#if __has_builtin(__builtin_ctzg) + return __builtin_ctzg(__t, numeric_limits<_Tp>::digits); +#else // __has_builtin(__builtin_ctzg) if (__t == 0) return numeric_limits<_Tp>::digits; - if (sizeof(_Tp) <= sizeof(unsigned int)) return std::__libcpp_ctz(static_cast(__t)); else if (sizeof(_Tp) <= sizeof(unsigned long)) @@ -57,10 +60,18 @@ _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr int countr_zero(_Tp __t) n } return __ret + std::__libcpp_ctz(static_cast(__t)); } +#endif // __has_builtin(__builtin_ctzg) +} + +#if _LIBCPP_STD_VER >= 20 + +template <__libcpp_unsigned_integer _Tp> +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr int countr_zero(_Tp __t) noexcept { + return std::__countr_zero(__t); } template <__libcpp_unsigned_integer _Tp> -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr int countr_one(_Tp __t) noexcept { +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr int countr_one(_Tp __t) noexcept { return __t != numeric_limits<_Tp>::max() ? std::countr_zero(static_cast<_Tp>(~__t)) : numeric_limits<_Tp>::digits; } diff --git a/lib/libcxx/include/__bit/has_single_bit.h b/lib/libcxx/include/__bit/has_single_bit.h index a4e178060a73..52f5853a1bc8 100644 --- a/lib/libcxx/include/__bit/has_single_bit.h +++ b/lib/libcxx/include/__bit/has_single_bit.h @@ -24,7 +24,7 @@ _LIBCPP_PUSH_MACROS _LIBCPP_BEGIN_NAMESPACE_STD template <__libcpp_unsigned_integer _Tp> -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr bool has_single_bit(_Tp __t) noexcept { +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool has_single_bit(_Tp __t) noexcept { return __t != 0 && (((__t & (__t - 1)) == 0)); } diff --git a/lib/libcxx/include/__bit/popcount.h b/lib/libcxx/include/__bit/popcount.h index b0319cef2518..5cf0a01d0733 100644 --- a/lib/libcxx/include/__bit/popcount.h +++ b/lib/libcxx/include/__bit/popcount.h @@ -6,6 +6,9 @@ // //===----------------------------------------------------------------------===// +// TODO: __builtin_popcountg is available since Clang 19 and GCC 14. When support for older versions is dropped, we can +// refactor this code to exclusively use __builtin_popcountg. + #ifndef _LIBCPP___BIT_POPCOUNT_H #define _LIBCPP___BIT_POPCOUNT_H @@ -38,7 +41,10 @@ inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR int __libcpp_popcount(unsigned lo #if _LIBCPP_STD_VER >= 20 template <__libcpp_unsigned_integer _Tp> -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr int popcount(_Tp __t) noexcept { +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr int popcount(_Tp __t) noexcept { +# if __has_builtin(__builtin_popcountg) + return __builtin_popcountg(__t); +# else // __has_builtin(__builtin_popcountg) if (sizeof(_Tp) <= sizeof(unsigned int)) return std::__libcpp_popcount(static_cast(__t)); else if (sizeof(_Tp) <= sizeof(unsigned long)) @@ -53,6 +59,7 @@ _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr int popcount(_Tp __t) noex } return __ret; } +# endif // __has_builtin(__builtin_popcountg) } #endif // _LIBCPP_STD_VER >= 20 diff --git a/lib/libcxx/include/__bit/rotate.h b/lib/libcxx/include/__bit/rotate.h index d848056c3350..90e430e9d042 100644 --- a/lib/libcxx/include/__bit/rotate.h +++ b/lib/libcxx/include/__bit/rotate.h @@ -20,24 +20,37 @@ _LIBCPP_BEGIN_NAMESPACE_STD +// Writing two full functions for rotl and rotr makes it easier for the compiler +// to optimize the code. On x86 this function becomes the ROL instruction and +// the rotr function becomes the ROR instruction. template -_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp __rotr(_Tp __t, int __cnt) _NOEXCEPT { - static_assert(__libcpp_is_unsigned_integer<_Tp>::value, "__rotr requires an unsigned integer type"); - const unsigned int __dig = numeric_limits<_Tp>::digits; - if ((__cnt % __dig) == 0) - return __t; +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp __rotl(_Tp __x, int __s) _NOEXCEPT { + static_assert(__libcpp_is_unsigned_integer<_Tp>::value, "__rotl requires an unsigned integer type"); + const int __N = numeric_limits<_Tp>::digits; + int __r = __s % __N; + + if (__r == 0) + return __x; - if (__cnt < 0) { - __cnt *= -1; - return (__t << (__cnt % __dig)) | (__t >> (__dig - (__cnt % __dig))); // rotr with negative __cnt is similar to rotl - } + if (__r > 0) + return (__x << __r) | (__x >> (__N - __r)); - return (__t >> (__cnt % __dig)) | (__t << (__dig - (__cnt % __dig))); + return (__x >> -__r) | (__x << (__N + __r)); } template -_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp __rotl(_Tp __t, int __cnt) _NOEXCEPT { - return std::__rotr(__t, -__cnt); +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp __rotr(_Tp __x, int __s) _NOEXCEPT { + static_assert(__libcpp_is_unsigned_integer<_Tp>::value, "__rotr requires an unsigned integer type"); + const int __N = numeric_limits<_Tp>::digits; + int __r = __s % __N; + + if (__r == 0) + return __x; + + if (__r > 0) + return (__x >> __r) | (__x << (__N - __r)); + + return (__x << -__r) | (__x >> (__N + __r)); } #if _LIBCPP_STD_VER >= 20 diff --git a/lib/libcxx/include/__bit_reference b/lib/libcxx/include/__bit_reference index 3a5339b72ddc..22637d439741 100644 --- a/lib/libcxx/include/__bit_reference +++ b/lib/libcxx/include/__bit_reference @@ -16,6 +16,7 @@ #include <__bit/countr.h> #include <__bit/invert_if.h> #include <__bit/popcount.h> +#include <__compare/ordering.h> #include <__config> #include <__fwd/bit_reference.h> #include <__iterator/iterator_traits.h> @@ -95,8 +96,8 @@ public: } private: - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 explicit __bit_reference( - __storage_pointer __s, __storage_type __m) _NOEXCEPT + _LIBCPP_HIDE_FROM_ABI + _LIBCPP_CONSTEXPR_SINCE_CXX20 explicit __bit_reference(__storage_pointer __s, __storage_type __m) _NOEXCEPT : __seg_(__s), __mask_(__m) {} }; @@ -149,6 +150,7 @@ public: using __container = typename _Cp::__self; _LIBCPP_HIDE_FROM_ABI __bit_const_reference(const __bit_const_reference&) = default; + __bit_const_reference& operator=(const __bit_const_reference&) = delete; _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 __bit_const_reference(const __bit_reference<_Cp>& __x) _NOEXCEPT : __seg_(__x.__seg_), @@ -163,69 +165,12 @@ public: } private: - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR explicit __bit_const_reference( - __storage_pointer __s, __storage_type __m) _NOEXCEPT + _LIBCPP_HIDE_FROM_ABI + _LIBCPP_CONSTEXPR explicit __bit_const_reference(__storage_pointer __s, __storage_type __m) _NOEXCEPT : __seg_(__s), __mask_(__m) {} - - __bit_const_reference& operator=(const __bit_const_reference&) = delete; }; -// fill_n - -template -_LIBCPP_CONSTEXPR_SINCE_CXX20 _LIBCPP_HIDE_FROM_ABI void -__fill_n(__bit_iterator<_Cp, false> __first, typename _Cp::size_type __n) { - using _It = __bit_iterator<_Cp, false>; - using __storage_type = typename _It::__storage_type; - - const int __bits_per_word = _It::__bits_per_word; - // do first partial word - if (__first.__ctz_ != 0) { - __storage_type __clz_f = static_cast<__storage_type>(__bits_per_word - __first.__ctz_); - __storage_type __dn = std::min(__clz_f, __n); - __storage_type __m = (~__storage_type(0) << __first.__ctz_) & (~__storage_type(0) >> (__clz_f - __dn)); - if (_FillVal) - *__first.__seg_ |= __m; - else - *__first.__seg_ &= ~__m; - __n -= __dn; - ++__first.__seg_; - } - // do middle whole words - __storage_type __nw = __n / __bits_per_word; - std::fill_n(std::__to_address(__first.__seg_), __nw, _FillVal ? static_cast<__storage_type>(-1) : 0); - __n -= __nw * __bits_per_word; - // do last partial word - if (__n > 0) { - __first.__seg_ += __nw; - __storage_type __m = ~__storage_type(0) >> (__bits_per_word - __n); - if (_FillVal) - *__first.__seg_ |= __m; - else - *__first.__seg_ &= ~__m; - } -} - -template -inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 void -fill_n(__bit_iterator<_Cp, false> __first, typename _Cp::size_type __n, bool __value) { - if (__n > 0) { - if (__value) - std::__fill_n(__first, __n); - else - std::__fill_n(__first, __n); - } -} - -// fill - -template -inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 void -fill(__bit_iterator<_Cp, false> __first, __bit_iterator<_Cp, false> __last, bool __value) { - std::fill_n(__first, static_cast(__last - __first), __value); -} - // copy template @@ -969,6 +914,7 @@ public: return __x.__seg_ == __y.__seg_ && __x.__ctz_ == __y.__ctz_; } +#if _LIBCPP_STD_VER <= 17 _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 friend bool operator!=(const __bit_iterator& __x, const __bit_iterator& __y) { return !(__x == __y); @@ -993,10 +939,22 @@ public: operator>=(const __bit_iterator& __x, const __bit_iterator& __y) { return !(__x < __y); } +#else // _LIBCPP_STD_VER <= 17 + _LIBCPP_HIDE_FROM_ABI constexpr friend strong_ordering + operator<=>(const __bit_iterator& __x, const __bit_iterator& __y) { + if (__x.__seg_ < __y.__seg_) + return strong_ordering::less; + + if (__x.__seg_ == __y.__seg_) + return __x.__ctz_ <=> __y.__ctz_; + + return strong_ordering::greater; + } +#endif // _LIBCPP_STD_VER <= 17 private: - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 explicit __bit_iterator( - __storage_pointer __s, unsigned __ctz) _NOEXCEPT + _LIBCPP_HIDE_FROM_ABI + _LIBCPP_CONSTEXPR_SINCE_CXX20 explicit __bit_iterator(__storage_pointer __s, unsigned __ctz) _NOEXCEPT : __seg_(__s), __ctz_(__ctz) {} @@ -1007,8 +965,10 @@ private: friend class __bit_iterator<_Cp, true>; template friend struct __bit_array; + template - _LIBCPP_CONSTEXPR_SINCE_CXX20 friend void __fill_n(__bit_iterator<_Dp, false> __first, typename _Dp::size_type __n); + _LIBCPP_CONSTEXPR_SINCE_CXX20 friend void + __fill_n_bool(__bit_iterator<_Dp, false> __first, typename _Dp::size_type __n); template _LIBCPP_CONSTEXPR_SINCE_CXX20 friend __bit_iterator<_Dp, false> __copy_aligned( @@ -1053,8 +1013,8 @@ private: _LIBCPP_CONSTEXPR_SINCE_CXX20 friend __bit_iterator<_Dp, _IC> __find_bool(__bit_iterator<_Dp, _IC>, typename _Dp::size_type); template - friend typename __bit_iterator<_Dp, _IC>::difference_type _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 - __count_bool(__bit_iterator<_Dp, _IC>, typename _Dp::size_type); + friend typename __bit_iterator<_Dp, _IC>::difference_type _LIBCPP_HIDE_FROM_ABI + _LIBCPP_CONSTEXPR_SINCE_CXX20 __count_bool(__bit_iterator<_Dp, _IC>, typename _Dp::size_type); }; _LIBCPP_END_NAMESPACE_STD diff --git a/lib/libcxx/include/__charconv/chars_format.h b/lib/libcxx/include/__charconv/chars_format.h index 95faa29010dd..c76cebd5d184 100644 --- a/lib/libcxx/include/__charconv/chars_format.h +++ b/lib/libcxx/include/__charconv/chars_format.h @@ -39,20 +39,17 @@ inline _LIBCPP_HIDE_FROM_ABI constexpr chars_format operator^(chars_format __x, return chars_format(std::__to_underlying(__x) ^ std::__to_underlying(__y)); } -inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 chars_format& -operator&=(chars_format& __x, chars_format __y) { +inline _LIBCPP_HIDE_FROM_ABI constexpr chars_format& operator&=(chars_format& __x, chars_format __y) { __x = __x & __y; return __x; } -inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 chars_format& -operator|=(chars_format& __x, chars_format __y) { +inline _LIBCPP_HIDE_FROM_ABI constexpr chars_format& operator|=(chars_format& __x, chars_format __y) { __x = __x | __y; return __x; } -inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 chars_format& -operator^=(chars_format& __x, chars_format __y) { +inline _LIBCPP_HIDE_FROM_ABI constexpr chars_format& operator^=(chars_format& __x, chars_format __y) { __x = __x ^ __y; return __x; } diff --git a/lib/libcxx/include/__charconv/from_chars_integral.h b/lib/libcxx/include/__charconv/from_chars_integral.h index e969cedb33cb..c1f033b37b91 100644 --- a/lib/libcxx/include/__charconv/from_chars_integral.h +++ b/lib/libcxx/include/__charconv/from_chars_integral.h @@ -11,6 +11,7 @@ #define _LIBCPP___CHARCONV_FROM_CHARS_INTEGRAL_H #include <__algorithm/copy_n.h> +#include <__assert> #include <__charconv/from_chars_result.h> #include <__charconv/traits.h> #include <__config> diff --git a/lib/libcxx/include/__charconv/to_chars_base_10.h b/lib/libcxx/include/__charconv/to_chars_base_10.h index 0dee351521f9..c49f4f6797aa 100644 --- a/lib/libcxx/include/__charconv/to_chars_base_10.h +++ b/lib/libcxx/include/__charconv/to_chars_base_10.h @@ -11,6 +11,7 @@ #define _LIBCPP___CHARCONV_TO_CHARS_BASE_10_H #include <__algorithm/copy_n.h> +#include <__assert> #include <__charconv/tables.h> #include <__config> #include diff --git a/lib/libcxx/include/__charconv/to_chars_floating_point.h b/lib/libcxx/include/__charconv/to_chars_floating_point.h index 08720e107885..118f316b21a1 100644 --- a/lib/libcxx/include/__charconv/to_chars_floating_point.h +++ b/lib/libcxx/include/__charconv/to_chars_floating_point.h @@ -10,7 +10,6 @@ #ifndef _LIBCPP___CHARCONV_TO_CHARS_FLOATING_POINT_H #define _LIBCPP___CHARCONV_TO_CHARS_FLOATING_POINT_H -#include <__availability> #include <__charconv/chars_format.h> #include <__charconv/to_chars_result.h> #include <__config> diff --git a/lib/libcxx/include/__charconv/to_chars_integral.h b/lib/libcxx/include/__charconv/to_chars_integral.h index 40fbe334d8d5..0369f4dfb9bd 100644 --- a/lib/libcxx/include/__charconv/to_chars_integral.h +++ b/lib/libcxx/include/__charconv/to_chars_integral.h @@ -11,6 +11,7 @@ #define _LIBCPP___CHARCONV_TO_CHARS_INTEGRAL_H #include <__algorithm/copy_n.h> +#include <__assert> #include <__bit/countl.h> #include <__charconv/tables.h> #include <__charconv/to_chars_base_10.h> diff --git a/lib/libcxx/include/__charconv/traits.h b/lib/libcxx/include/__charconv/traits.h index b4907c3f7757..c91c6da32479 100644 --- a/lib/libcxx/include/__charconv/traits.h +++ b/lib/libcxx/include/__charconv/traits.h @@ -10,6 +10,7 @@ #ifndef _LIBCPP___CHARCONV_TRAITS #define _LIBCPP___CHARCONV_TRAITS +#include <__assert> #include <__bit/countl.h> #include <__charconv/tables.h> #include <__charconv/to_chars_base_10.h> diff --git a/lib/libcxx/include/__chrono/convert_to_tm.h b/lib/libcxx/include/__chrono/convert_to_tm.h index 1301cd6f1f1a..3a51019b8078 100644 --- a/lib/libcxx/include/__chrono/convert_to_tm.h +++ b/lib/libcxx/include/__chrono/convert_to_tm.h @@ -16,10 +16,12 @@ #include <__chrono/duration.h> #include <__chrono/file_clock.h> #include <__chrono/hh_mm_ss.h> +#include <__chrono/local_info.h> #include <__chrono/month.h> #include <__chrono/month_weekday.h> #include <__chrono/monthday.h> #include <__chrono/statically_widen.h> +#include <__chrono/sys_info.h> #include <__chrono/system_clock.h> #include <__chrono/time_point.h> #include <__chrono/weekday.h> @@ -27,11 +29,13 @@ #include <__chrono/year_month.h> #include <__chrono/year_month_day.h> #include <__chrono/year_month_weekday.h> +#include <__chrono/zoned_time.h> #include <__concepts/same_as.h> #include <__config> #include <__format/format_error.h> #include <__memory/addressof.h> #include <__type_traits/is_convertible.h> +#include <__type_traits/is_specialization.h> #include #include #include @@ -171,6 +175,18 @@ _LIBCPP_HIDE_FROM_ABI _Tm __convert_to_tm(const _ChronoT& __value) { if (__value.hours().count() > std::numeric_limits::max()) std::__throw_format_error("Formatting hh_mm_ss, encountered an hour overflow"); __result.tm_hour = __value.hours().count(); +# if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) + } else if constexpr (same_as<_ChronoT, chrono::sys_info>) { + // Has no time information. + } else if constexpr (same_as<_ChronoT, chrono::local_info>) { + // Has no time information. +# if !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) && \ + !defined(_LIBCPP_HAS_NO_LOCALIZATION) + } else if constexpr (__is_specialization_v<_ChronoT, chrono::zoned_time>) { + return std::__convert_to_tm<_Tm>( + chrono::sys_time{__value.get_local_time().time_since_epoch()}); +# endif +# endif // !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) } else static_assert(sizeof(_ChronoT) == 0, "Add the missing type specialization"); diff --git a/lib/libcxx/include/__chrono/duration.h b/lib/libcxx/include/__chrono/duration.h index 5693ee644091..1e36d7342836 100644 --- a/lib/libcxx/include/__chrono/duration.h +++ b/lib/libcxx/include/__chrono/duration.h @@ -391,8 +391,8 @@ operator<=>(const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Perio template inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR - typename common_type, duration<_Rep2, _Period2> >::type - operator+(const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Period2>& __rhs) { +typename common_type, duration<_Rep2, _Period2> >::type +operator+(const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Period2>& __rhs) { typedef typename common_type, duration<_Rep2, _Period2> >::type _Cd; return _Cd(_Cd(__lhs).count() + _Cd(__rhs).count()); } @@ -401,8 +401,8 @@ inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR template inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR - typename common_type, duration<_Rep2, _Period2> >::type - operator-(const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Period2>& __rhs) { +typename common_type, duration<_Rep2, _Period2> >::type +operator-(const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Period2>& __rhs) { typedef typename common_type, duration<_Rep2, _Period2> >::type _Cd; return _Cd(_Cd(__lhs).count() - _Cd(__rhs).count()); } @@ -412,7 +412,7 @@ inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR template ::type>::value, int> = 0> + __enable_if_t::type>::value, int> = 0> inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR duration::type, _Period> operator*(const duration<_Rep1, _Period>& __d, const _Rep2& __s) { typedef typename common_type<_Rep1, _Rep2>::type _Cr; @@ -423,7 +423,7 @@ operator*(const duration<_Rep1, _Period>& __d, const _Rep2& __s) { template ::type>::value, int> = 0> + __enable_if_t::type>::value, int> = 0> inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR duration::type, _Period> operator*(const _Rep1& __s, const duration<_Rep2, _Period>& __d) { return __d * __s; @@ -435,7 +435,7 @@ template ::value && - is_convertible<_Rep2, typename common_type<_Rep1, _Rep2>::type>::value, + is_convertible::type>::value, int> = 0> inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR duration::type, _Period> operator/(const duration<_Rep1, _Period>& __d, const _Rep2& __s) { @@ -457,7 +457,7 @@ template ::value && - is_convertible<_Rep2, typename common_type<_Rep1, _Rep2>::type>::value, + is_convertible::type>::value, int> = 0> inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR duration::type, _Period> operator%(const duration<_Rep1, _Period>& __d, const _Rep2& __s) { @@ -468,8 +468,8 @@ operator%(const duration<_Rep1, _Period>& __d, const _Rep2& __s) { template inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR - typename common_type, duration<_Rep2, _Period2> >::type - operator%(const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Period2>& __rhs) { +typename common_type, duration<_Rep2, _Period2> >::type +operator%(const duration<_Rep1, _Period1>& __lhs, const duration<_Rep2, _Period2>& __rhs) { typedef typename common_type<_Rep1, _Rep2>::type _Cr; typedef typename common_type, duration<_Rep2, _Period2> >::type _Cd; return _Cd(static_cast<_Cr>(_Cd(__lhs).count()) % static_cast<_Cr>(_Cd(__rhs).count())); diff --git a/lib/libcxx/include/__chrono/exception.h b/lib/libcxx/include/__chrono/exception.h new file mode 100644 index 000000000000..266f8fac4417 --- /dev/null +++ b/lib/libcxx/include/__chrono/exception.h @@ -0,0 +1,135 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +// For information see https://libcxx.llvm.org/DesignDocs/TimeZone.html + +#ifndef _LIBCPP___CHRONO_EXCEPTION_H +#define _LIBCPP___CHRONO_EXCEPTION_H + +#include +// Enable the contents of the header only when libc++ was built with experimental features enabled. +#if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) + +# include <__chrono/calendar.h> +# include <__chrono/local_info.h> +# include <__chrono/time_point.h> +# include <__config> +# include <__configuration/availability.h> +# include <__verbose_abort> +# include +# include +# include + +# if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +# endif + +_LIBCPP_BEGIN_NAMESPACE_STD + +# if _LIBCPP_STD_VER >= 20 + +namespace chrono { + +class nonexistent_local_time : public runtime_error { +public: + template + _LIBCPP_HIDE_FROM_ABI nonexistent_local_time(const local_time<_Duration>& __time, const local_info& __info) + : runtime_error{__create_message(__time, __info)} { + // [time.zone.exception.nonexist]/2 + // Preconditions: i.result == local_info::nonexistent is true. + // The value of __info.result is not used. + _LIBCPP_ASSERT_PEDANTIC(__info.result == local_info::nonexistent, + "creating an nonexistent_local_time from a local_info that is not non-existent"); + } + + _LIBCPP_HIDE_FROM_ABI nonexistent_local_time(const nonexistent_local_time&) = default; + _LIBCPP_HIDE_FROM_ABI nonexistent_local_time& operator=(const nonexistent_local_time&) = default; + + _LIBCPP_AVAILABILITY_TZDB _LIBCPP_EXPORTED_FROM_ABI ~nonexistent_local_time() override; // exported as key function + +private: + template + _LIBCPP_HIDE_FROM_ABI string __create_message(const local_time<_Duration>& __time, const local_info& __info) { + return std::format( + R"({} is in a gap between +{} {} and +{} {} which are both equivalent to +{} UTC)", + __time, + local_seconds{__info.first.end.time_since_epoch()} + __info.first.offset, + __info.first.abbrev, + local_seconds{__info.second.begin.time_since_epoch()} + __info.second.offset, + __info.second.abbrev, + __info.first.end); + } +}; + +template +_LIBCPP_NORETURN _LIBCPP_AVAILABILITY_TZDB _LIBCPP_HIDE_FROM_ABI void __throw_nonexistent_local_time( + [[maybe_unused]] const local_time<_Duration>& __time, [[maybe_unused]] const local_info& __info) { +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS + throw nonexistent_local_time(__time, __info); +# else + _LIBCPP_VERBOSE_ABORT("nonexistent_local_time was thrown in -fno-exceptions mode"); +# endif +} + +class ambiguous_local_time : public runtime_error { +public: + template + _LIBCPP_HIDE_FROM_ABI ambiguous_local_time(const local_time<_Duration>& __time, const local_info& __info) + : runtime_error{__create_message(__time, __info)} { + // [time.zone.exception.ambig]/2 + // Preconditions: i.result == local_info::ambiguous is true. + // The value of __info.result is not used. + _LIBCPP_ASSERT_PEDANTIC(__info.result == local_info::ambiguous, + "creating an ambiguous_local_time from a local_info that is not ambiguous"); + } + + _LIBCPP_HIDE_FROM_ABI ambiguous_local_time(const ambiguous_local_time&) = default; + _LIBCPP_HIDE_FROM_ABI ambiguous_local_time& operator=(const ambiguous_local_time&) = default; + + _LIBCPP_AVAILABILITY_TZDB _LIBCPP_EXPORTED_FROM_ABI ~ambiguous_local_time() override; // exported as key function + +private: + template + _LIBCPP_HIDE_FROM_ABI string __create_message(const local_time<_Duration>& __time, const local_info& __info) { + return std::format( + // There are two spaces after the full-stop; this has been verified + // in the sources of the Standard. + R"({0} is ambiguous. It could be +{0} {1} == {2} UTC or +{0} {3} == {4} UTC)", + __time, + __info.first.abbrev, + __time - __info.first.offset, + __info.second.abbrev, + __time - __info.second.offset); + } +}; + +template +_LIBCPP_NORETURN _LIBCPP_AVAILABILITY_TZDB _LIBCPP_HIDE_FROM_ABI void __throw_ambiguous_local_time( + [[maybe_unused]] const local_time<_Duration>& __time, [[maybe_unused]] const local_info& __info) { +# ifndef _LIBCPP_HAS_NO_EXCEPTIONS + throw ambiguous_local_time(__time, __info); +# else + _LIBCPP_VERBOSE_ABORT("ambiguous_local_time was thrown in -fno-exceptions mode"); +# endif +} + +} // namespace chrono + +# endif // _LIBCPP_STD_VER >= 20 + +_LIBCPP_END_NAMESPACE_STD + +#endif // !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) + +#endif // _LIBCPP___CHRONO_EXCEPTION_H diff --git a/lib/libcxx/include/__chrono/file_clock.h b/lib/libcxx/include/__chrono/file_clock.h index 7d25729fec01..4dd3f88ce5ba 100644 --- a/lib/libcxx/include/__chrono/file_clock.h +++ b/lib/libcxx/include/__chrono/file_clock.h @@ -10,7 +10,6 @@ #ifndef _LIBCPP___CHRONO_FILE_CLOCK_H #define _LIBCPP___CHRONO_FILE_CLOCK_H -#include <__availability> #include <__chrono/duration.h> #include <__chrono/system_clock.h> #include <__chrono/time_point.h> diff --git a/lib/libcxx/include/__chrono/formatter.h b/lib/libcxx/include/__chrono/formatter.h index 4ad59382a414..449c415e9576 100644 --- a/lib/libcxx/include/__chrono/formatter.h +++ b/lib/libcxx/include/__chrono/formatter.h @@ -10,6 +10,7 @@ #ifndef _LIBCPP___CHRONO_FORMATTER_H #define _LIBCPP___CHRONO_FORMATTER_H +#include <__algorithm/ranges_copy.h> #include <__chrono/calendar.h> #include <__chrono/concepts.h> #include <__chrono/convert_to_tm.h> @@ -17,12 +18,14 @@ #include <__chrono/duration.h> #include <__chrono/file_clock.h> #include <__chrono/hh_mm_ss.h> +#include <__chrono/local_info.h> #include <__chrono/month.h> #include <__chrono/month_weekday.h> #include <__chrono/monthday.h> #include <__chrono/ostream.h> #include <__chrono/parser_std_format_spec.h> #include <__chrono/statically_widen.h> +#include <__chrono/sys_info.h> #include <__chrono/system_clock.h> #include <__chrono/time_point.h> #include <__chrono/weekday.h> @@ -30,6 +33,7 @@ #include <__chrono/year_month.h> #include <__chrono/year_month_day.h> #include <__chrono/year_month_weekday.h> +#include <__chrono/zoned_time.h> #include <__concepts/arithmetic.h> #include <__concepts/same_as.h> #include <__config> @@ -41,8 +45,10 @@ #include <__format/parser_std_format_spec.h> #include <__format/write_escaped.h> #include <__memory/addressof.h> +#include <__type_traits/is_specialization.h> #include #include +#include #include #include @@ -79,12 +85,15 @@ namespace __formatter { // small). Therefore a duration uses its own conversion. template _LIBCPP_HIDE_FROM_ABI void -__format_sub_seconds(const chrono::duration<_Rep, _Period>& __value, basic_stringstream<_CharT>& __sstr) { +__format_sub_seconds(basic_stringstream<_CharT>& __sstr, const chrono::duration<_Rep, _Period>& __value) { __sstr << std::use_facet>(__sstr.getloc()).decimal_point(); using __duration = chrono::duration<_Rep, _Period>; auto __fraction = __value - chrono::duration_cast(__value); + // Converts a negative fraction to its positive value. + if (__value < chrono::seconds{0} && __fraction != __duration{0}) + __fraction += chrono::seconds{1}; if constexpr (chrono::treat_as_floating_point_v<_Rep>) // When the floating-point value has digits itself they are ignored based // on the wording in [tab:time.format.spec] @@ -110,13 +119,13 @@ __format_sub_seconds(const chrono::duration<_Rep, _Period>& __value, basic_strin } template -_LIBCPP_HIDE_FROM_ABI void __format_sub_seconds(const _Tp& __value, basic_stringstream<_CharT>& __sstr) { - __formatter::__format_sub_seconds(__value.time_since_epoch(), __sstr); +_LIBCPP_HIDE_FROM_ABI void __format_sub_seconds(basic_stringstream<_CharT>& __sstr, const _Tp& __value) { + __formatter::__format_sub_seconds(__sstr, __value.time_since_epoch()); } template _LIBCPP_HIDE_FROM_ABI void -__format_sub_seconds(const chrono::hh_mm_ss<_Duration>& __value, basic_stringstream<_CharT>& __sstr) { +__format_sub_seconds(basic_stringstream<_CharT>& __sstr, const chrono::hh_mm_ss<_Duration>& __value) { __sstr << std::use_facet>(__sstr.getloc()).decimal_point(); if constexpr (chrono::treat_as_floating_point_v) std::format_to(std::ostreambuf_iterator<_CharT>{__sstr}, @@ -130,10 +139,24 @@ __format_sub_seconds(const chrono::hh_mm_ss<_Duration>& __value, basic_stringstr __value.fractional_width); } +# if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) && !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && \ + !defined(_LIBCPP_HAS_NO_FILESYSTEM) && !defined(_LIBCPP_HAS_NO_LOCALIZATION) +template +_LIBCPP_HIDE_FROM_ABI void +__format_sub_seconds(basic_stringstream<_CharT>& __sstr, const chrono::zoned_time<_Duration, _TimeZonePtr>& __value) { + __formatter::__format_sub_seconds(__sstr, __value.get_local_time().time_since_epoch()); +} +# endif + template consteval bool __use_fraction() { if constexpr (__is_time_point<_Tp>) return chrono::hh_mm_ss::fractional_width; +# if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) && !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && \ + !defined(_LIBCPP_HAS_NO_FILESYSTEM) && !defined(_LIBCPP_HAS_NO_LOCALIZATION) + else if constexpr (__is_specialization_v<_Tp, chrono::zoned_time>) + return chrono::hh_mm_ss::fractional_width; +# endif else if constexpr (chrono::__is_duration<_Tp>::value) return chrono::hh_mm_ss<_Tp>::fractional_width; else if constexpr (__is_hh_mm_ss<_Tp>) @@ -143,7 +166,7 @@ consteval bool __use_fraction() { } template -_LIBCPP_HIDE_FROM_ABI void __format_year(int __year, basic_stringstream<_CharT>& __sstr) { +_LIBCPP_HIDE_FROM_ABI void __format_year(basic_stringstream<_CharT>& __sstr, int __year) { if (__year < 0) { __sstr << _CharT('-'); __year = -__year; @@ -159,7 +182,7 @@ _LIBCPP_HIDE_FROM_ABI void __format_year(int __year, basic_stringstream<_CharT>& } template -_LIBCPP_HIDE_FROM_ABI void __format_century(int __year, basic_stringstream<_CharT>& __sstr) { +_LIBCPP_HIDE_FROM_ABI void __format_century(basic_stringstream<_CharT>& __sstr, int __year) { // TODO FMT Write an issue // [tab:time.format.spec] // %C The year divided by 100 using floored division. If the result is a @@ -170,10 +193,56 @@ _LIBCPP_HIDE_FROM_ABI void __format_century(int __year, basic_stringstream<_Char __sstr << std::format(_LIBCPP_STATICALLY_WIDEN(_CharT, "{:02}"), __century); } +// Implements the %z format specifier according to [tab:time.format.spec], where +// '__modifier' signals %Oz or %Ez were used. (Both modifiers behave the same, +// so there is no need to distinguish between them.) +template +_LIBCPP_HIDE_FROM_ABI void +__format_zone_offset(basic_stringstream<_CharT>& __sstr, chrono::seconds __offset, bool __modifier) { + if (__offset < 0s) { + __sstr << _CharT('-'); + __offset = -__offset; + } else { + __sstr << _CharT('+'); + } + + chrono::hh_mm_ss __hms{__offset}; + std::ostreambuf_iterator<_CharT> __out_it{__sstr}; + // Note HMS does not allow formatting hours > 23, but the offset is not limited to 24H. + std::format_to(__out_it, _LIBCPP_STATICALLY_WIDEN(_CharT, "{:02}"), __hms.hours().count()); + if (__modifier) + __sstr << _CharT(':'); + std::format_to(__out_it, _LIBCPP_STATICALLY_WIDEN(_CharT, "{:02}"), __hms.minutes().count()); +} + +// Helper to store the time zone information needed for formatting. +struct _LIBCPP_HIDE_FROM_ABI __time_zone { + // Typically these abbreviations are short and fit in the string's internal + // buffer. + string __abbrev; + chrono::seconds __offset; +}; + +template +_LIBCPP_HIDE_FROM_ABI __time_zone __convert_to_time_zone([[maybe_unused]] const _Tp& __value) { +# if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) + if constexpr (same_as<_Tp, chrono::sys_info>) + return {__value.abbrev, __value.offset}; +# if !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) && \ + !defined(_LIBCPP_HAS_NO_LOCALIZATION) + else if constexpr (__is_specialization_v<_Tp, chrono::zoned_time>) + return __formatter::__convert_to_time_zone(__value.get_info()); +# endif + else +# endif // !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) + return {"UTC", chrono::seconds{0}}; +} + template _LIBCPP_HIDE_FROM_ABI void __format_chrono_using_chrono_specs( - const _Tp& __value, basic_stringstream<_CharT>& __sstr, basic_string_view<_CharT> __chrono_specs) { + basic_stringstream<_CharT>& __sstr, const _Tp& __value, basic_string_view<_CharT> __chrono_specs) { tm __t = std::__convert_to_tm(__value); + __time_zone __z = __formatter::__convert_to_time_zone(__value); const auto& __facet = std::use_facet>(__sstr.getloc()); for (auto __it = __chrono_specs.begin(); __it != __chrono_specs.end(); ++__it) { if (*__it == _CharT('%')) { @@ -196,7 +265,7 @@ _LIBCPP_HIDE_FROM_ABI void __format_chrono_using_chrono_specs( // strftime's output is only defined in the range [00, 99]. int __year = __t.tm_year + 1900; if (__year < 1000 || __year > 9999) - __formatter::__format_century(__year, __sstr); + __formatter::__format_century(__sstr, __year); else __facet.put( {__sstr}, __sstr, _CharT(' '), std::addressof(__t), std::to_address(__s), std::to_address(__it + 1)); @@ -242,7 +311,7 @@ _LIBCPP_HIDE_FROM_ABI void __format_chrono_using_chrono_specs( __facet.put( {__sstr}, __sstr, _CharT(' '), std::addressof(__t), std::to_address(__s), std::to_address(__it + 1)); if constexpr (__use_fraction<_Tp>()) - __formatter::__format_sub_seconds(__value, __sstr); + __formatter::__format_sub_seconds(__sstr, __value); break; // Unlike time_put and strftime the formatting library requires %Y @@ -283,22 +352,24 @@ _LIBCPP_HIDE_FROM_ABI void __format_chrono_using_chrono_specs( // Depending on the platform's libc the range of supported years is // limited. Intead of of testing all conditions use the internal // implementation unconditionally. - __formatter::__format_year(__t.tm_year + 1900, __sstr); + __formatter::__format_year(__sstr, __t.tm_year + 1900); break; - case _CharT('F'): { - int __year = __t.tm_year + 1900; - if (__year < 1000) { - __formatter::__format_year(__year, __sstr); - __sstr << std::format(_LIBCPP_STATICALLY_WIDEN(_CharT, "-{:02}-{:02}"), __t.tm_mon + 1, __t.tm_mday); - } else - __facet.put( - {__sstr}, __sstr, _CharT(' '), std::addressof(__t), std::to_address(__s), std::to_address(__it + 1)); - } break; + case _CharT('F'): + // Depending on the platform's libc the range of supported years is + // limited. Instead of testing all conditions use the internal + // implementation unconditionally. + __formatter::__format_year(__sstr, __t.tm_year + 1900); + __sstr << std::format(_LIBCPP_STATICALLY_WIDEN(_CharT, "-{:02}-{:02}"), __t.tm_mon + 1, __t.tm_mday); + break; + + case _CharT('z'): + __formatter::__format_zone_offset(__sstr, __z.__offset, false); + break; case _CharT('Z'): - // TODO FMT Add proper timezone support. - __sstr << _LIBCPP_STATICALLY_WIDEN(_CharT, "UTC"); + // __abbrev is always a char so the copy may convert. + ranges::copy(__z.__abbrev, std::ostreambuf_iterator<_CharT>{__sstr}); break; case _CharT('O'): @@ -310,13 +381,19 @@ _LIBCPP_HIDE_FROM_ABI void __format_chrono_using_chrono_specs( ++__it; __facet.put( {__sstr}, __sstr, _CharT(' '), std::addressof(__t), std::to_address(__s), std::to_address(__it + 1)); - __formatter::__format_sub_seconds(__value, __sstr); + __formatter::__format_sub_seconds(__sstr, __value); break; } } + + // Oz produces the same output as Ez below. [[fallthrough]]; case _CharT('E'): ++__it; + if (*__it == 'z') { + __formatter::__format_zone_offset(__sstr, __z.__offset, true); + break; + } [[fallthrough]]; default: __facet.put( @@ -365,6 +442,17 @@ _LIBCPP_HIDE_FROM_ABI constexpr bool __weekday_ok(const _Tp& __value) { return __value.weekday().ok(); else if constexpr (__is_hh_mm_ss<_Tp>) return true; +# if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) + else if constexpr (same_as<_Tp, chrono::sys_info>) + return true; + else if constexpr (same_as<_Tp, chrono::local_info>) + return true; +# if !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) && \ + !defined(_LIBCPP_HAS_NO_LOCALIZATION) + else if constexpr (__is_specialization_v<_Tp, chrono::zoned_time>) + return true; +# endif +# endif // !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) else static_assert(sizeof(_Tp) == 0, "Add the missing type specialization"); } @@ -405,6 +493,17 @@ _LIBCPP_HIDE_FROM_ABI constexpr bool __weekday_name_ok(const _Tp& __value) { return __value.weekday().ok(); else if constexpr (__is_hh_mm_ss<_Tp>) return true; +# if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) + else if constexpr (same_as<_Tp, chrono::sys_info>) + return true; + else if constexpr (same_as<_Tp, chrono::local_info>) + return true; +# if !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) && \ + !defined(_LIBCPP_HAS_NO_LOCALIZATION) + else if constexpr (__is_specialization_v<_Tp, chrono::zoned_time>) + return true; +# endif +# endif // !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) else static_assert(sizeof(_Tp) == 0, "Add the missing type specialization"); } @@ -445,6 +544,17 @@ _LIBCPP_HIDE_FROM_ABI constexpr bool __date_ok(const _Tp& __value) { return __value.ok(); else if constexpr (__is_hh_mm_ss<_Tp>) return true; +# if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) + else if constexpr (same_as<_Tp, chrono::sys_info>) + return true; + else if constexpr (same_as<_Tp, chrono::local_info>) + return true; +# if !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) && \ + !defined(_LIBCPP_HAS_NO_LOCALIZATION) + else if constexpr (__is_specialization_v<_Tp, chrono::zoned_time>) + return true; +# endif +# endif // !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) else static_assert(sizeof(_Tp) == 0, "Add the missing type specialization"); } @@ -485,6 +595,17 @@ _LIBCPP_HIDE_FROM_ABI constexpr bool __month_name_ok(const _Tp& __value) { return __value.month().ok(); else if constexpr (__is_hh_mm_ss<_Tp>) return true; +# if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) + else if constexpr (same_as<_Tp, chrono::sys_info>) + return true; + else if constexpr (same_as<_Tp, chrono::local_info>) + return true; +# if !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) && \ + !defined(_LIBCPP_HAS_NO_LOCALIZATION) + else if constexpr (__is_specialization_v<_Tp, chrono::zoned_time>) + return true; +# endif +# endif // !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) else static_assert(sizeof(_Tp) == 0, "Add the missing type specialization"); } @@ -510,9 +631,16 @@ __format_chrono(const _Tp& __value, __sstr << __value; else { if constexpr (chrono::__is_duration<_Tp>::value) { - if (__value < __value.zero()) - __sstr << _CharT('-'); - __formatter::__format_chrono_using_chrono_specs(chrono::abs(__value), __sstr, __chrono_specs); + // A duration can be a user defined arithmetic type. Users may specialize + // numeric_limits, but they may not specialize is_signed. + if constexpr (numeric_limits::is_signed) { + if (__value < __value.zero()) { + __sstr << _CharT('-'); + __formatter::__format_chrono_using_chrono_specs(__sstr, -__value, __chrono_specs); + } else + __formatter::__format_chrono_using_chrono_specs(__sstr, __value, __chrono_specs); + } else + __formatter::__format_chrono_using_chrono_specs(__sstr, __value, __chrono_specs); // TODO FMT When keeping the precision it will truncate the string. // Note that the behaviour what the precision does isn't specified. __specs.__precision_ = -1; @@ -556,7 +684,7 @@ __format_chrono(const _Tp& __value, __sstr << _CharT('-'); } - __formatter::__format_chrono_using_chrono_specs(__value, __sstr, __chrono_specs); + __formatter::__format_chrono_using_chrono_specs(__sstr, __value, __chrono_specs); } } @@ -814,6 +942,47 @@ struct formatter, _CharT> : public __formatter_chron return _Base::__parse(__ctx, __format_spec::__fields_chrono, __format_spec::__flags::__time); } }; + +# if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) +template <__fmt_char_type _CharT> +struct formatter : public __formatter_chrono<_CharT> { +public: + using _Base = __formatter_chrono<_CharT>; + + template + _LIBCPP_HIDE_FROM_ABI constexpr typename _ParseContext::iterator parse(_ParseContext& __ctx) { + return _Base::__parse(__ctx, __format_spec::__fields_chrono, __format_spec::__flags::__time_zone); + } +}; + +template <__fmt_char_type _CharT> +struct formatter : public __formatter_chrono<_CharT> { +public: + using _Base = __formatter_chrono<_CharT>; + + template + _LIBCPP_HIDE_FROM_ABI constexpr typename _ParseContext::iterator parse(_ParseContext& __ctx) { + return _Base::__parse(__ctx, __format_spec::__fields_chrono, __format_spec::__flags{}); + } +}; +# if !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) && \ + !defined(_LIBCPP_HAS_NO_LOCALIZATION) +// Note due to how libc++'s formatters are implemented there is no need to add +// the exposition only local-time-format-t abstraction. +template +struct formatter, _CharT> : public __formatter_chrono<_CharT> { +public: + using _Base = __formatter_chrono<_CharT>; + + template + _LIBCPP_HIDE_FROM_ABI constexpr typename _ParseContext::iterator parse(_ParseContext& __ctx) { + return _Base::__parse(__ctx, __format_spec::__fields_chrono, __format_spec::__flags::__clock); + } +}; +# endif // !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) && + // !defined(_LIBCPP_HAS_NO_LOCALIZATION) +# endif // !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) + #endif // if _LIBCPP_STD_VER >= 20 _LIBCPP_END_NAMESPACE_STD diff --git a/lib/libcxx/include/__chrono/leap_second.h b/lib/libcxx/include/__chrono/leap_second.h new file mode 100644 index 000000000000..1a0e7f3107de --- /dev/null +++ b/lib/libcxx/include/__chrono/leap_second.h @@ -0,0 +1,126 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +// For information see https://libcxx.llvm.org/DesignDocs/TimeZone.html + +#ifndef _LIBCPP___CHRONO_LEAP_SECOND_H +#define _LIBCPP___CHRONO_LEAP_SECOND_H + +#include +// Enable the contents of the header only when libc++ was built with experimental features enabled. +#if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) + +# include <__chrono/duration.h> +# include <__chrono/system_clock.h> +# include <__chrono/time_point.h> +# include <__compare/ordering.h> +# include <__compare/three_way_comparable.h> +# include <__config> +# include <__utility/private_constructor_tag.h> + +# if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +# endif + +_LIBCPP_BEGIN_NAMESPACE_STD + +# if _LIBCPP_STD_VER >= 20 + +namespace chrono { + +class leap_second { +public: + [[nodiscard]] + _LIBCPP_HIDE_FROM_ABI explicit constexpr leap_second(__private_constructor_tag, sys_seconds __date, seconds __value) + : __date_(__date), __value_(__value) {} + + _LIBCPP_HIDE_FROM_ABI leap_second(const leap_second&) = default; + _LIBCPP_HIDE_FROM_ABI leap_second& operator=(const leap_second&) = default; + + _LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI constexpr sys_seconds date() const noexcept { return __date_; } + + _LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI constexpr seconds value() const noexcept { return __value_; } + +private: + sys_seconds __date_; + seconds __value_; +}; + +_LIBCPP_HIDE_FROM_ABI inline constexpr bool operator==(const leap_second& __x, const leap_second& __y) { + return __x.date() == __y.date(); +} + +_LIBCPP_HIDE_FROM_ABI inline constexpr strong_ordering operator<=>(const leap_second& __x, const leap_second& __y) { + return __x.date() <=> __y.date(); +} + +template +_LIBCPP_HIDE_FROM_ABI constexpr bool operator==(const leap_second& __x, const sys_time<_Duration>& __y) { + return __x.date() == __y; +} + +template +_LIBCPP_HIDE_FROM_ABI constexpr bool operator<(const leap_second& __x, const sys_time<_Duration>& __y) { + return __x.date() < __y; +} + +template +_LIBCPP_HIDE_FROM_ABI constexpr bool operator<(const sys_time<_Duration>& __x, const leap_second& __y) { + return __x < __y.date(); +} + +template +_LIBCPP_HIDE_FROM_ABI constexpr bool operator>(const leap_second& __x, const sys_time<_Duration>& __y) { + return __y < __x; +} + +template +_LIBCPP_HIDE_FROM_ABI constexpr bool operator>(const sys_time<_Duration>& __x, const leap_second& __y) { + return __y < __x; +} + +template +_LIBCPP_HIDE_FROM_ABI constexpr bool operator<=(const leap_second& __x, const sys_time<_Duration>& __y) { + return !(__y < __x); +} + +template +_LIBCPP_HIDE_FROM_ABI constexpr bool operator<=(const sys_time<_Duration>& __x, const leap_second& __y) { + return !(__y < __x); +} + +template +_LIBCPP_HIDE_FROM_ABI constexpr bool operator>=(const leap_second& __x, const sys_time<_Duration>& __y) { + return !(__x < __y); +} + +template +_LIBCPP_HIDE_FROM_ABI constexpr bool operator>=(const sys_time<_Duration>& __x, const leap_second& __y) { + return !(__x < __y); +} + +# ifndef _LIBCPP_COMPILER_GCC +// This requirement cause a compilation loop in GCC-13 and running out of memory. +// TODO TZDB Test whether GCC-14 fixes this. +template + requires three_way_comparable_with> +_LIBCPP_HIDE_FROM_ABI constexpr auto operator<=>(const leap_second& __x, const sys_time<_Duration>& __y) { + return __x.date() <=> __y; +} +# endif + +} // namespace chrono + +# endif //_LIBCPP_STD_VER >= 20 + +_LIBCPP_END_NAMESPACE_STD + +#endif // !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) + +#endif // _LIBCPP___CHRONO_LEAP_SECOND_H diff --git a/lib/libcxx/include/__chrono/local_info.h b/lib/libcxx/include/__chrono/local_info.h new file mode 100644 index 000000000000..cfe1448904d3 --- /dev/null +++ b/lib/libcxx/include/__chrono/local_info.h @@ -0,0 +1,50 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +// For information see https://libcxx.llvm.org/DesignDocs/TimeZone.html + +#ifndef _LIBCPP___CHRONO_LOCAL_INFO_H +#define _LIBCPP___CHRONO_LOCAL_INFO_H + +#include +// Enable the contents of the header only when libc++ was built with experimental features enabled. +#if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) + +# include <__chrono/sys_info.h> +# include <__config> + +# if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +# endif + +_LIBCPP_BEGIN_NAMESPACE_STD + +# if _LIBCPP_STD_VER >= 20 + +namespace chrono { + +struct local_info { + static constexpr int unique = 0; + static constexpr int nonexistent = 1; + static constexpr int ambiguous = 2; + + int result; + sys_info first; + sys_info second; +}; + +} // namespace chrono + +# endif // _LIBCPP_STD_VER >= 20 + +_LIBCPP_END_NAMESPACE_STD + +#endif // !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) + +#endif // _LIBCPP___CHRONO_LOCAL_INFO_H diff --git a/lib/libcxx/include/__chrono/ostream.h b/lib/libcxx/include/__chrono/ostream.h index b687ef8059d5..e6c43254eea1 100644 --- a/lib/libcxx/include/__chrono/ostream.h +++ b/lib/libcxx/include/__chrono/ostream.h @@ -15,20 +15,23 @@ #include <__chrono/duration.h> #include <__chrono/file_clock.h> #include <__chrono/hh_mm_ss.h> +#include <__chrono/local_info.h> #include <__chrono/month.h> #include <__chrono/month_weekday.h> #include <__chrono/monthday.h> #include <__chrono/statically_widen.h> +#include <__chrono/sys_info.h> #include <__chrono/system_clock.h> #include <__chrono/weekday.h> #include <__chrono/year.h> #include <__chrono/year_month.h> #include <__chrono/year_month_day.h> #include <__chrono/year_month_weekday.h> +#include <__chrono/zoned_time.h> #include <__concepts/same_as.h> #include <__config> #include <__format/format_functions.h> -#include +#include <__fwd/ostream.h> #include #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) @@ -262,6 +265,54 @@ operator<<(basic_ostream<_CharT, _Traits>& __os, const hh_mm_ss<_Duration> __hms return __os << std::format(__os.getloc(), _LIBCPP_STATICALLY_WIDEN(_CharT, "{:L%T}"), __hms); } +# if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) + +template +_LIBCPP_HIDE_FROM_ABI basic_ostream<_CharT, _Traits>& +operator<<(basic_ostream<_CharT, _Traits>& __os, const sys_info& __info) { + // __info.abbrev is always std::basic_string. + // Since these strings typically are short the conversion should be cheap. + std::basic_string<_CharT> __abbrev{__info.abbrev.begin(), __info.abbrev.end()}; + return __os << std::format( + _LIBCPP_STATICALLY_WIDEN(_CharT, "[{:%F %T}, {:%F %T}) {:%T} {:%Q%q} \"{}\""), + __info.begin, + __info.end, + hh_mm_ss{__info.offset}, + __info.save, + __abbrev); +} + +template +_LIBCPP_HIDE_FROM_ABI basic_ostream<_CharT, _Traits>& +operator<<(basic_ostream<_CharT, _Traits>& __os, const local_info& __info) { + auto __result = [&]() -> basic_string<_CharT> { + switch (__info.result) { + case local_info::unique: + return _LIBCPP_STATICALLY_WIDEN(_CharT, "unique"); + case local_info::nonexistent: + return _LIBCPP_STATICALLY_WIDEN(_CharT, "non-existent"); + case local_info::ambiguous: + return _LIBCPP_STATICALLY_WIDEN(_CharT, "ambiguous"); + + default: + return std::format(_LIBCPP_STATICALLY_WIDEN(_CharT, "unspecified result ({})"), __info.result); + }; + }; + + return __os << std::format( + _LIBCPP_STATICALLY_WIDEN(_CharT, "{}: {{{}, {}}}"), __result(), __info.first, __info.second); +} + +# if !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) && \ + !defined(_LIBCPP_HAS_NO_LOCALIZATION) +template +_LIBCPP_HIDE_FROM_ABI basic_ostream<_CharT, _Traits>& +operator<<(basic_ostream<_CharT, _Traits>& __os, const zoned_time<_Duration, _TimeZonePtr>& __tp) { + return __os << std::format(__os.getloc(), _LIBCPP_STATICALLY_WIDEN(_CharT, "{:L%F %T %Z}"), __tp); +} +# endif +# endif // !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) + } // namespace chrono #endif // if _LIBCPP_STD_VER >= 20 diff --git a/lib/libcxx/include/__chrono/sys_info.h b/lib/libcxx/include/__chrono/sys_info.h new file mode 100644 index 000000000000..11536cbde3a3 --- /dev/null +++ b/lib/libcxx/include/__chrono/sys_info.h @@ -0,0 +1,51 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +// For information see https://libcxx.llvm.org/DesignDocs/TimeZone.html + +#ifndef _LIBCPP___CHRONO_SYS_INFO_H +#define _LIBCPP___CHRONO_SYS_INFO_H + +#include +// Enable the contents of the header only when libc++ was built with experimental features enabled. +#if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) + +# include <__chrono/duration.h> +# include <__chrono/system_clock.h> +# include <__chrono/time_point.h> +# include <__config> +# include + +# if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +# endif + +_LIBCPP_BEGIN_NAMESPACE_STD + +# if _LIBCPP_STD_VER >= 20 + +namespace chrono { + +struct sys_info { + sys_seconds begin; + sys_seconds end; + seconds offset; + minutes save; + string abbrev; +}; + +} // namespace chrono + +# endif // _LIBCPP_STD_VER >= 20 + +_LIBCPP_END_NAMESPACE_STD + +#endif // !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) + +#endif // _LIBCPP___CHRONO_SYS_INFO_H diff --git a/lib/libcxx/include/__chrono/time_point.h b/lib/libcxx/include/__chrono/time_point.h index e65253ddb98e..aaf0b098f280 100644 --- a/lib/libcxx/include/__chrono/time_point.h +++ b/lib/libcxx/include/__chrono/time_point.h @@ -78,7 +78,7 @@ class _LIBCPP_TEMPLATE_VIS time_point { template struct _LIBCPP_TEMPLATE_VIS - common_type, chrono::time_point<_Clock, _Duration2> > { +common_type, chrono::time_point<_Clock, _Duration2> > { typedef chrono::time_point<_Clock, typename common_type<_Duration1, _Duration2>::type> type; }; @@ -92,25 +92,22 @@ time_point_cast(const time_point<_Clock, _Duration>& __t) { #if _LIBCPP_STD_VER >= 17 template ::value, int> = 0> -inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR time_point<_Clock, _ToDuration> -floor(const time_point<_Clock, _Duration>& __t) { +inline _LIBCPP_HIDE_FROM_ABI constexpr time_point<_Clock, _ToDuration> floor(const time_point<_Clock, _Duration>& __t) { return time_point<_Clock, _ToDuration>{chrono::floor<_ToDuration>(__t.time_since_epoch())}; } template ::value, int> = 0> -inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR time_point<_Clock, _ToDuration> -ceil(const time_point<_Clock, _Duration>& __t) { +inline _LIBCPP_HIDE_FROM_ABI constexpr time_point<_Clock, _ToDuration> ceil(const time_point<_Clock, _Duration>& __t) { return time_point<_Clock, _ToDuration>{chrono::ceil<_ToDuration>(__t.time_since_epoch())}; } template ::value, int> = 0> -inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR time_point<_Clock, _ToDuration> -round(const time_point<_Clock, _Duration>& __t) { +inline _LIBCPP_HIDE_FROM_ABI constexpr time_point<_Clock, _ToDuration> round(const time_point<_Clock, _Duration>& __t) { return time_point<_Clock, _ToDuration>{chrono::round<_ToDuration>(__t.time_since_epoch())}; } template ::is_signed, int> = 0> -inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR duration<_Rep, _Period> abs(duration<_Rep, _Period> __d) { +inline _LIBCPP_HIDE_FROM_ABI constexpr duration<_Rep, _Period> abs(duration<_Rep, _Period> __d) { return __d >= __d.zero() ? +__d : -__d; } #endif // _LIBCPP_STD_VER >= 17 @@ -180,9 +177,9 @@ operator<=>(const time_point<_Clock, _Duration1>& __lhs, const time_point<_Clock // time_point operator+(time_point x, duration y); template -inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 - time_point<_Clock, typename common_type<_Duration1, duration<_Rep2, _Period2> >::type> - operator+(const time_point<_Clock, _Duration1>& __lhs, const duration<_Rep2, _Period2>& __rhs) { +inline _LIBCPP_HIDE_FROM_ABI +_LIBCPP_CONSTEXPR_SINCE_CXX14 time_point<_Clock, typename common_type<_Duration1, duration<_Rep2, _Period2> >::type> +operator+(const time_point<_Clock, _Duration1>& __lhs, const duration<_Rep2, _Period2>& __rhs) { typedef time_point<_Clock, typename common_type<_Duration1, duration<_Rep2, _Period2> >::type> _Tr; return _Tr(__lhs.time_since_epoch() + __rhs); } @@ -190,18 +187,18 @@ inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 // time_point operator+(duration x, time_point y); template -inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 - time_point<_Clock, typename common_type, _Duration2>::type> - operator+(const duration<_Rep1, _Period1>& __lhs, const time_point<_Clock, _Duration2>& __rhs) { +inline _LIBCPP_HIDE_FROM_ABI +_LIBCPP_CONSTEXPR_SINCE_CXX14 time_point<_Clock, typename common_type, _Duration2>::type> +operator+(const duration<_Rep1, _Period1>& __lhs, const time_point<_Clock, _Duration2>& __rhs) { return __rhs + __lhs; } // time_point operator-(time_point x, duration y); template -inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 - time_point<_Clock, typename common_type<_Duration1, duration<_Rep2, _Period2> >::type> - operator-(const time_point<_Clock, _Duration1>& __lhs, const duration<_Rep2, _Period2>& __rhs) { +inline _LIBCPP_HIDE_FROM_ABI +_LIBCPP_CONSTEXPR_SINCE_CXX14 time_point<_Clock, typename common_type<_Duration1, duration<_Rep2, _Period2> >::type> +operator-(const time_point<_Clock, _Duration1>& __lhs, const duration<_Rep2, _Period2>& __rhs) { typedef time_point<_Clock, typename common_type<_Duration1, duration<_Rep2, _Period2> >::type> _Ret; return _Ret(__lhs.time_since_epoch() - __rhs); } diff --git a/lib/libcxx/include/__chrono/time_zone.h b/lib/libcxx/include/__chrono/time_zone.h new file mode 100644 index 000000000000..de11dac1eef0 --- /dev/null +++ b/lib/libcxx/include/__chrono/time_zone.h @@ -0,0 +1,182 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +// For information see https://libcxx.llvm.org/DesignDocs/TimeZone.html + +#ifndef _LIBCPP___CHRONO_TIME_ZONE_H +#define _LIBCPP___CHRONO_TIME_ZONE_H + +#include +// Enable the contents of the header only when libc++ was built with experimental features enabled. +#if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) + +# include <__chrono/calendar.h> +# include <__chrono/duration.h> +# include <__chrono/exception.h> +# include <__chrono/local_info.h> +# include <__chrono/sys_info.h> +# include <__chrono/system_clock.h> +# include <__compare/strong_order.h> +# include <__config> +# include <__memory/unique_ptr.h> +# include <__type_traits/common_type.h> +# include + +# if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +# endif + +_LIBCPP_PUSH_MACROS +# include <__undef_macros> + +_LIBCPP_BEGIN_NAMESPACE_STD + +# if _LIBCPP_STD_VER >= 20 && !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) && \ + !defined(_LIBCPP_HAS_NO_LOCALIZATION) + +namespace chrono { + +enum class choose { earliest, latest }; + +class _LIBCPP_AVAILABILITY_TZDB time_zone { + _LIBCPP_HIDE_FROM_ABI time_zone() = default; + +public: + class __impl; // public so it can be used by make_unique. + + // The "constructor". + // + // The default constructor is private to avoid the constructor from being + // part of the ABI. Instead use an __ugly_named function as an ABI interface, + // since that gives us the ability to change it in the future. + [[nodiscard]] _LIBCPP_EXPORTED_FROM_ABI static time_zone __create(unique_ptr<__impl>&& __p); + + _LIBCPP_EXPORTED_FROM_ABI ~time_zone(); + + _LIBCPP_HIDE_FROM_ABI time_zone(time_zone&&) = default; + _LIBCPP_HIDE_FROM_ABI time_zone& operator=(time_zone&&) = default; + + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI string_view name() const noexcept { return __name(); } + + template + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI sys_info get_info(const sys_time<_Duration>& __time) const { + return __get_info(chrono::time_point_cast(__time)); + } + + template + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI local_info get_info(const local_time<_Duration>& __time) const { + return __get_info(chrono::time_point_cast(__time)); + } + + // We don't apply nodiscard here since this function throws on many inputs, + // so it could be used as a validation. + template + _LIBCPP_HIDE_FROM_ABI sys_time> to_sys(const local_time<_Duration>& __time) const { + local_info __info = get_info(__time); + switch (__info.result) { + case local_info::unique: + return sys_time>{__time.time_since_epoch() - __info.first.offset}; + + case local_info::nonexistent: + chrono::__throw_nonexistent_local_time(__time, __info); + + case local_info::ambiguous: + chrono::__throw_ambiguous_local_time(__time, __info); + } + + // TODO TZDB The Standard does not specify anything in these cases. + _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( + __info.result != -1, "cannot convert the local time; it would be before the minimum system clock value"); + _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( + __info.result != -2, "cannot convert the local time; it would be after the maximum system clock value"); + + return {}; + } + + template + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI sys_time> + to_sys(const local_time<_Duration>& __time, choose __z) const { + local_info __info = get_info(__time); + switch (__info.result) { + case local_info::unique: + case local_info::nonexistent: // first and second are the same + return sys_time>{__time.time_since_epoch() - __info.first.offset}; + + case local_info::ambiguous: + switch (__z) { + case choose::earliest: + return sys_time>{__time.time_since_epoch() - __info.first.offset}; + + case choose::latest: + return sys_time>{__time.time_since_epoch() - __info.second.offset}; + + // Note a value out of bounds is not specified. + } + } + + // TODO TZDB The standard does not specify anything in these cases. + _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( + __info.result != -1, "cannot convert the local time; it would be before the minimum system clock value"); + _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( + __info.result != -2, "cannot convert the local time; it would be after the maximum system clock value"); + + return {}; + } + + template + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI local_time> + to_local(const sys_time<_Duration>& __time) const { + using _Dp = common_type_t<_Duration, seconds>; + + sys_info __info = get_info(__time); + + _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( + __info.offset >= chrono::seconds{0} || __time.time_since_epoch() >= _Dp::min() - __info.offset, + "cannot convert the system time; it would be before the minimum local clock value"); + + _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN( + __info.offset <= chrono::seconds{0} || __time.time_since_epoch() <= _Dp::max() - __info.offset, + "cannot convert the system time; it would be after the maximum local clock value"); + + return local_time<_Dp>{__time.time_since_epoch() + __info.offset}; + } + + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI const __impl& __implementation() const noexcept { return *__impl_; } + +private: + [[nodiscard]] _LIBCPP_EXPORTED_FROM_ABI string_view __name() const noexcept; + + [[nodiscard]] _LIBCPP_AVAILABILITY_TZDB _LIBCPP_EXPORTED_FROM_ABI sys_info __get_info(sys_seconds __time) const; + [[nodiscard]] _LIBCPP_AVAILABILITY_TZDB _LIBCPP_EXPORTED_FROM_ABI local_info __get_info(local_seconds __time) const; + + unique_ptr<__impl> __impl_; +}; + +[[nodiscard]] _LIBCPP_AVAILABILITY_TZDB _LIBCPP_HIDE_FROM_ABI inline bool +operator==(const time_zone& __x, const time_zone& __y) noexcept { + return __x.name() == __y.name(); +} + +[[nodiscard]] _LIBCPP_AVAILABILITY_TZDB _LIBCPP_HIDE_FROM_ABI inline strong_ordering +operator<=>(const time_zone& __x, const time_zone& __y) noexcept { + return __x.name() <=> __y.name(); +} + +} // namespace chrono + +# endif // _LIBCPP_STD_VER >= 20 && !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) + // && !defined(_LIBCPP_HAS_NO_LOCALIZATION) + +_LIBCPP_END_NAMESPACE_STD + +_LIBCPP_POP_MACROS + +#endif // !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) + +#endif // _LIBCPP___CHRONO_TIME_ZONE_H diff --git a/lib/libcxx/include/__chrono/time_zone_link.h b/lib/libcxx/include/__chrono/time_zone_link.h new file mode 100644 index 000000000000..b2d365c5fd08 --- /dev/null +++ b/lib/libcxx/include/__chrono/time_zone_link.h @@ -0,0 +1,79 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +// For information see https://libcxx.llvm.org/DesignDocs/TimeZone.html + +#ifndef _LIBCPP___CHRONO_TIME_ZONE_LINK_H +#define _LIBCPP___CHRONO_TIME_ZONE_LINK_H + +#include +// Enable the contents of the header only when libc++ was built with experimental features enabled. +#if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) + +# include <__compare/strong_order.h> +# include <__config> +# include <__utility/private_constructor_tag.h> +# include +# include + +# if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +# endif + +_LIBCPP_PUSH_MACROS +# include <__undef_macros> + +_LIBCPP_BEGIN_NAMESPACE_STD + +# if _LIBCPP_STD_VER >= 20 && !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) && \ + !defined(_LIBCPP_HAS_NO_LOCALIZATION) + +namespace chrono { + +class time_zone_link { +public: + [[nodiscard]] + _LIBCPP_HIDE_FROM_ABI explicit time_zone_link(__private_constructor_tag, string_view __name, string_view __target) + : __name_{__name}, __target_{__target} {} + + _LIBCPP_HIDE_FROM_ABI time_zone_link(time_zone_link&&) = default; + _LIBCPP_HIDE_FROM_ABI time_zone_link& operator=(time_zone_link&&) = default; + + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI string_view name() const noexcept { return __name_; } + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI string_view target() const noexcept { return __target_; } + +private: + string __name_; + // TODO TZDB instead of the name we can store the pointer to a zone. These + // pointers are immutable. This makes it possible to directly return a + // pointer in the time_zone in the 'locate_zone' function. + string __target_; +}; + +[[nodiscard]] _LIBCPP_AVAILABILITY_TZDB _LIBCPP_HIDE_FROM_ABI inline bool +operator==(const time_zone_link& __x, const time_zone_link& __y) noexcept { + return __x.name() == __y.name(); +} + +[[nodiscard]] _LIBCPP_AVAILABILITY_TZDB _LIBCPP_HIDE_FROM_ABI inline strong_ordering +operator<=>(const time_zone_link& __x, const time_zone_link& __y) noexcept { + return __x.name() <=> __y.name(); +} + +} // namespace chrono + +# endif //_LIBCPP_STD_VER >= 20 + +_LIBCPP_END_NAMESPACE_STD + +_LIBCPP_POP_MACROS + +#endif // !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) + +#endif // _LIBCPP___CHRONO_TIME_ZONE_LINK_H diff --git a/lib/libcxx/include/__chrono/tzdb.h b/lib/libcxx/include/__chrono/tzdb.h index bd7b05d478e5..f731f8c318be 100644 --- a/lib/libcxx/include/__chrono/tzdb.h +++ b/lib/libcxx/include/__chrono/tzdb.h @@ -14,14 +14,23 @@ #include // Enable the contents of the header only when libc++ was built with experimental features enabled. -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_TZDB) +#if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) +# include <__algorithm/ranges_lower_bound.h> +# include <__chrono/leap_second.h> +# include <__chrono/time_zone.h> +# include <__chrono/time_zone_link.h> +# include <__config> # include +# include # if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) # pragma GCC system_header # endif +_LIBCPP_PUSH_MACROS +# include <__undef_macros> + _LIBCPP_BEGIN_NAMESPACE_STD # if _LIBCPP_STD_VER >= 20 && !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) && \ @@ -29,8 +38,46 @@ _LIBCPP_BEGIN_NAMESPACE_STD namespace chrono { -struct _LIBCPP_AVAILABILITY_TZDB tzdb { +struct tzdb { string version; + vector zones; + vector links; + + vector leap_seconds; + + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI const time_zone* __locate_zone(string_view __name) const { + if (const time_zone* __result = __find_in_zone(__name)) + return __result; + + if (auto __it = ranges::lower_bound(links, __name, {}, &time_zone_link::name); + __it != links.end() && __it->name() == __name) + if (const time_zone* __result = __find_in_zone(__it->target())) + return __result; + + return nullptr; + } + + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI const time_zone* locate_zone(string_view __name) const { + if (const time_zone* __result = __locate_zone(__name)) + return __result; + + std::__throw_runtime_error("tzdb: requested time zone not found"); + } + + [[nodiscard]] _LIBCPP_AVAILABILITY_TZDB _LIBCPP_HIDE_FROM_ABI const time_zone* current_zone() const { + return __current_zone(); + } + +private: + _LIBCPP_HIDE_FROM_ABI const time_zone* __find_in_zone(string_view __name) const noexcept { + if (auto __it = ranges::lower_bound(zones, __name, {}, &time_zone::name); + __it != zones.end() && __it->name() == __name) + return std::addressof(*__it); + + return nullptr; + } + + [[nodiscard]] _LIBCPP_AVAILABILITY_TZDB _LIBCPP_EXPORTED_FROM_ABI const time_zone* __current_zone() const; }; } // namespace chrono @@ -40,6 +87,8 @@ struct _LIBCPP_AVAILABILITY_TZDB tzdb { _LIBCPP_END_NAMESPACE_STD -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_TZDB) +_LIBCPP_POP_MACROS + +#endif // !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) #endif // _LIBCPP___CHRONO_TZDB_H diff --git a/lib/libcxx/include/__chrono/tzdb_list.h b/lib/libcxx/include/__chrono/tzdb_list.h index 0494826c01a3..aeef4fe1aba3 100644 --- a/lib/libcxx/include/__chrono/tzdb_list.h +++ b/lib/libcxx/include/__chrono/tzdb_list.h @@ -14,12 +14,13 @@ #include // Enable the contents of the header only when libc++ was built with experimental features enabled. -#if !defined(_LIBCPP_HAS_NO_INCOMPLETE_TZDB) +#if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) -# include <__availability> +# include <__chrono/time_zone.h> # include <__chrono/tzdb.h> +# include <__config> +# include <__fwd/string.h> # include -# include # if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) # pragma GCC system_header @@ -32,9 +33,18 @@ _LIBCPP_BEGIN_NAMESPACE_STD namespace chrono { +// TODO TZDB +// Libc++ recently switched to only export __ugly_names from the dylib. +// Since the library is still experimental the functions in this header +// should be adapted to this new style. The other tzdb headers should be +// evaluated too. + class _LIBCPP_AVAILABILITY_TZDB tzdb_list { public: - _LIBCPP_EXPORTED_FROM_ABI explicit tzdb_list(tzdb&& __tzdb); + class __impl; // public to allow construction in dylib + _LIBCPP_HIDE_FROM_ABI explicit tzdb_list(__impl* __p) : __impl_(__p) { + _LIBCPP_ASSERT_NON_NULL(__impl_ != nullptr, "initialized time_zone without a valid pimpl object"); + } _LIBCPP_EXPORTED_FROM_ABI ~tzdb_list(); tzdb_list(const tzdb_list&) = delete; @@ -42,32 +52,49 @@ class _LIBCPP_AVAILABILITY_TZDB tzdb_list { using const_iterator = forward_list::const_iterator; - _LIBCPP_NODISCARD_EXT _LIBCPP_EXPORTED_FROM_ABI const tzdb& front() const noexcept; + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI const tzdb& front() const noexcept { return __front(); } - _LIBCPP_EXPORTED_FROM_ABI const_iterator erase_after(const_iterator __p); + _LIBCPP_HIDE_FROM_ABI const_iterator erase_after(const_iterator __p) { return __erase_after(__p); } - _LIBCPP_EXPORTED_FROM_ABI tzdb& __emplace_front(tzdb&& __tzdb); + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI const_iterator begin() const noexcept { return __begin(); } + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI const_iterator end() const noexcept { return __end(); } - _LIBCPP_NODISCARD_EXT _LIBCPP_EXPORTED_FROM_ABI const_iterator begin() const noexcept; - _LIBCPP_NODISCARD_EXT _LIBCPP_EXPORTED_FROM_ABI const_iterator end() const noexcept; + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI const_iterator cbegin() const noexcept { return __cbegin(); } + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI const_iterator cend() const noexcept { return __cend(); } - _LIBCPP_NODISCARD_EXT _LIBCPP_EXPORTED_FROM_ABI const_iterator cbegin() const noexcept; - _LIBCPP_NODISCARD_EXT _LIBCPP_EXPORTED_FROM_ABI const_iterator cend() const noexcept; + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI __impl& __implementation() { return *__impl_; } private: - class __impl; + [[nodiscard]] _LIBCPP_EXPORTED_FROM_ABI const tzdb& __front() const noexcept; + + _LIBCPP_EXPORTED_FROM_ABI const_iterator __erase_after(const_iterator __p); + + [[nodiscard]] _LIBCPP_EXPORTED_FROM_ABI const_iterator __begin() const noexcept; + [[nodiscard]] _LIBCPP_EXPORTED_FROM_ABI const_iterator __end() const noexcept; + + [[nodiscard]] _LIBCPP_EXPORTED_FROM_ABI const_iterator __cbegin() const noexcept; + [[nodiscard]] _LIBCPP_EXPORTED_FROM_ABI const_iterator __cend() const noexcept; + __impl* __impl_; }; -_LIBCPP_NODISCARD_EXT _LIBCPP_AVAILABILITY_TZDB _LIBCPP_EXPORTED_FROM_ABI tzdb_list& get_tzdb_list(); +[[nodiscard]] _LIBCPP_AVAILABILITY_TZDB _LIBCPP_EXPORTED_FROM_ABI tzdb_list& get_tzdb_list(); -_LIBCPP_NODISCARD_EXT _LIBCPP_AVAILABILITY_TZDB _LIBCPP_HIDE_FROM_ABI inline const tzdb& get_tzdb() { +[[nodiscard]] _LIBCPP_AVAILABILITY_TZDB _LIBCPP_HIDE_FROM_ABI inline const tzdb& get_tzdb() { return get_tzdb_list().front(); } +[[nodiscard]] _LIBCPP_AVAILABILITY_TZDB _LIBCPP_HIDE_FROM_ABI inline const time_zone* locate_zone(string_view __name) { + return get_tzdb().locate_zone(__name); +} + +[[nodiscard]] _LIBCPP_AVAILABILITY_TZDB _LIBCPP_HIDE_FROM_ABI inline const time_zone* current_zone() { + return get_tzdb().current_zone(); +} + _LIBCPP_AVAILABILITY_TZDB _LIBCPP_EXPORTED_FROM_ABI const tzdb& reload_tzdb(); -_LIBCPP_NODISCARD_EXT _LIBCPP_AVAILABILITY_TZDB _LIBCPP_EXPORTED_FROM_ABI string remote_version(); +[[nodiscard]] _LIBCPP_AVAILABILITY_TZDB _LIBCPP_EXPORTED_FROM_ABI string remote_version(); } // namespace chrono @@ -76,6 +103,6 @@ _LIBCPP_NODISCARD_EXT _LIBCPP_AVAILABILITY_TZDB _LIBCPP_EXPORTED_FROM_ABI string _LIBCPP_END_NAMESPACE_STD -#endif // !defined(_LIBCPP_HAS_NO_INCOMPLETE_TZDB) +#endif // !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) #endif // _LIBCPP___CHRONO_TZDB_LIST_H diff --git a/lib/libcxx/include/__chrono/weekday.h b/lib/libcxx/include/__chrono/weekday.h index 5a7dedc6e3a1..86c780cc7182 100644 --- a/lib/libcxx/include/__chrono/weekday.h +++ b/lib/libcxx/include/__chrono/weekday.h @@ -79,6 +79,8 @@ _LIBCPP_HIDE_FROM_ABI inline constexpr bool operator==(const weekday& __lhs, con return __lhs.c_encoding() == __rhs.c_encoding(); } +// TODO(LLVM 20): Remove the escape hatch +# ifdef _LIBCPP_ENABLE_REMOVED_WEEKDAY_RELATIONAL_OPERATORS _LIBCPP_HIDE_FROM_ABI inline constexpr bool operator<(const weekday& __lhs, const weekday& __rhs) noexcept { return __lhs.c_encoding() < __rhs.c_encoding(); } @@ -94,6 +96,7 @@ _LIBCPP_HIDE_FROM_ABI inline constexpr bool operator<=(const weekday& __lhs, con _LIBCPP_HIDE_FROM_ABI inline constexpr bool operator>=(const weekday& __lhs, const weekday& __rhs) noexcept { return !(__lhs < __rhs); } +# endif // _LIBCPP_ENABLE_REMOVED_WEEKDAY_RELATIONAL_OPERATORS _LIBCPP_HIDE_FROM_ABI inline constexpr weekday operator+(const weekday& __lhs, const days& __rhs) noexcept { auto const __mu = static_cast(__lhs.c_encoding()) + __rhs.count(); diff --git a/lib/libcxx/include/__chrono/year_month_day.h b/lib/libcxx/include/__chrono/year_month_day.h index 75884f3654d8..b06c0be03e0d 100644 --- a/lib/libcxx/include/__chrono/year_month_day.h +++ b/lib/libcxx/include/__chrono/year_month_day.h @@ -239,33 +239,11 @@ operator==(const year_month_day_last& __lhs, const year_month_day_last& __rhs) n return __lhs.year() == __rhs.year() && __lhs.month_day_last() == __rhs.month_day_last(); } -_LIBCPP_HIDE_FROM_ABI inline constexpr bool -operator!=(const year_month_day_last& __lhs, const year_month_day_last& __rhs) noexcept { - return !(__lhs == __rhs); -} - -_LIBCPP_HIDE_FROM_ABI inline constexpr bool -operator<(const year_month_day_last& __lhs, const year_month_day_last& __rhs) noexcept { - if (__lhs.year() < __rhs.year()) - return true; - if (__lhs.year() > __rhs.year()) - return false; - return __lhs.month_day_last() < __rhs.month_day_last(); -} - -_LIBCPP_HIDE_FROM_ABI inline constexpr bool -operator>(const year_month_day_last& __lhs, const year_month_day_last& __rhs) noexcept { - return __rhs < __lhs; -} - -_LIBCPP_HIDE_FROM_ABI inline constexpr bool -operator<=(const year_month_day_last& __lhs, const year_month_day_last& __rhs) noexcept { - return !(__rhs < __lhs); -} - -_LIBCPP_HIDE_FROM_ABI inline constexpr bool -operator>=(const year_month_day_last& __lhs, const year_month_day_last& __rhs) noexcept { - return !(__lhs < __rhs); +_LIBCPP_HIDE_FROM_ABI inline constexpr strong_ordering +operator<=>(const year_month_day_last& __lhs, const year_month_day_last& __rhs) noexcept { + if (auto __c = __lhs.year() <=> __rhs.year(); __c != 0) + return __c; + return __lhs.month_day_last() <=> __rhs.month_day_last(); } _LIBCPP_HIDE_FROM_ABI inline constexpr year_month_day_last operator/(const year_month& __lhs, last_spec) noexcept { diff --git a/lib/libcxx/include/__chrono/zoned_time.h b/lib/libcxx/include/__chrono/zoned_time.h new file mode 100644 index 000000000000..8cfa2122642c --- /dev/null +++ b/lib/libcxx/include/__chrono/zoned_time.h @@ -0,0 +1,227 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +// For information see https://libcxx.llvm.org/DesignDocs/TimeZone.html + +#ifndef _LIBCPP___CHRONO_ZONED_TIME_H +#define _LIBCPP___CHRONO_ZONED_TIME_H + +#include +// Enable the contents of the header only when libc++ was built with experimental features enabled. +#if !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) + +# include <__chrono/calendar.h> +# include <__chrono/duration.h> +# include <__chrono/sys_info.h> +# include <__chrono/system_clock.h> +# include <__chrono/time_zone.h> +# include <__chrono/tzdb_list.h> +# include <__config> +# include <__fwd/string_view.h> +# include <__type_traits/common_type.h> +# include <__type_traits/conditional.h> +# include <__type_traits/remove_cvref.h> +# include <__utility/move.h> + +# if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +# endif + +_LIBCPP_PUSH_MACROS +# include <__undef_macros> + +_LIBCPP_BEGIN_NAMESPACE_STD + +# if _LIBCPP_STD_VER >= 20 && !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) && \ + !defined(_LIBCPP_HAS_NO_LOCALIZATION) + +namespace chrono { + +template +struct zoned_traits {}; + +template <> +struct zoned_traits { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI static const time_zone* default_zone() { return chrono::locate_zone("UTC"); } + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI static const time_zone* locate_zone(string_view __name) { + return chrono::locate_zone(__name); + } +}; + +template +class zoned_time { + // [time.zone.zonedtime.ctor]/2 + static_assert(__is_duration<_Duration>::value, + "the program is ill-formed since _Duration is not a specialization of std::chrono::duration"); + + // The wording uses the constraints like + // constructible_from + // Using these constraints in the code causes the compiler to give an + // error that the constraint depends on itself. To avoid that issue use + // the fact it is possible to create this object from a _TimeZonePtr. + using __traits = zoned_traits<_TimeZonePtr>; + +public: + using duration = common_type_t<_Duration, seconds>; + + _LIBCPP_HIDE_FROM_ABI zoned_time() + requires requires { __traits::default_zone(); } + : __zone_{__traits::default_zone()}, __tp_{} {} + + _LIBCPP_HIDE_FROM_ABI zoned_time(const zoned_time&) = default; + _LIBCPP_HIDE_FROM_ABI zoned_time& operator=(const zoned_time&) = default; + + _LIBCPP_HIDE_FROM_ABI zoned_time(const sys_time<_Duration>& __tp) + requires requires { __traits::default_zone(); } + : __zone_{__traits::default_zone()}, __tp_{__tp} {} + + _LIBCPP_HIDE_FROM_ABI explicit zoned_time(_TimeZonePtr __zone) : __zone_{std::move(__zone)}, __tp_{} {} + + _LIBCPP_HIDE_FROM_ABI explicit zoned_time(string_view __name) + requires(requires { __traits::locate_zone(string_view{}); } && + constructible_from<_TimeZonePtr, decltype(__traits::locate_zone(string_view{}))>) + : __zone_{__traits::locate_zone(__name)}, __tp_{} {} + + template + _LIBCPP_HIDE_FROM_ABI zoned_time(const zoned_time<_Duration2, _TimeZonePtr>& __zt) + requires is_convertible_v, sys_time<_Duration>> + : __zone_{__zt.get_time_zone()}, __tp_{__zt.get_sys_time()} {} + + _LIBCPP_HIDE_FROM_ABI zoned_time(_TimeZonePtr __zone, const sys_time<_Duration>& __tp) + : __zone_{std::move(__zone)}, __tp_{__tp} {} + + _LIBCPP_HIDE_FROM_ABI zoned_time(string_view __name, const sys_time<_Duration>& __tp) + requires requires { _TimeZonePtr{__traits::locate_zone(string_view{})}; } + : zoned_time{__traits::locate_zone(__name), __tp} {} + + _LIBCPP_HIDE_FROM_ABI zoned_time(_TimeZonePtr __zone, const local_time<_Duration>& __tp) + requires(is_convertible_v() -> to_sys(local_time<_Duration>{})), + sys_time>) + : __zone_{std::move(__zone)}, __tp_{__zone_->to_sys(__tp)} {} + + _LIBCPP_HIDE_FROM_ABI zoned_time(string_view __name, const local_time<_Duration>& __tp) + requires(requires { + _TimeZonePtr{__traits::locate_zone(string_view{})}; + } && is_convertible_v() -> to_sys(local_time<_Duration>{})), + sys_time>) + : zoned_time{__traits::locate_zone(__name), __tp} {} + + _LIBCPP_HIDE_FROM_ABI zoned_time(_TimeZonePtr __zone, const local_time<_Duration>& __tp, choose __c) + requires(is_convertible_v< + decltype(std::declval<_TimeZonePtr&>() -> to_sys(local_time<_Duration>{}, choose::earliest)), + sys_time>) + : __zone_{std::move(__zone)}, __tp_{__zone_->to_sys(__tp, __c)} {} + + _LIBCPP_HIDE_FROM_ABI zoned_time(string_view __name, const local_time<_Duration>& __tp, choose __c) + requires(requires { + _TimeZonePtr{__traits::locate_zone(string_view{})}; + } && is_convertible_v() -> to_sys(local_time<_Duration>{}, choose::earliest)), + sys_time>) + : zoned_time{__traits::locate_zone(__name), __tp, __c} {} + + template + _LIBCPP_HIDE_FROM_ABI zoned_time(_TimeZonePtr __zone, const zoned_time<_Duration2, _TimeZonePtr2>& __zt) + requires is_convertible_v, sys_time<_Duration>> + : __zone_{std::move(__zone)}, __tp_{__zt.get_sys_time()} {} + + // per wording choose has no effect + template + _LIBCPP_HIDE_FROM_ABI zoned_time(_TimeZonePtr __zone, const zoned_time<_Duration2, _TimeZonePtr2>& __zt, choose) + requires is_convertible_v, sys_time<_Duration>> + : __zone_{std::move(__zone)}, __tp_{__zt.get_sys_time()} {} + + template + _LIBCPP_HIDE_FROM_ABI zoned_time(string_view __name, const zoned_time<_Duration2, _TimeZonePtr2>& __zt) + requires(requires { + _TimeZonePtr{__traits::locate_zone(string_view{})}; + } && is_convertible_v, sys_time<_Duration>>) + : zoned_time{__traits::locate_zone(__name), __zt} {} + + template + _LIBCPP_HIDE_FROM_ABI zoned_time(string_view __name, const zoned_time<_Duration2, _TimeZonePtr2>& __zt, choose __c) + requires(requires { + _TimeZonePtr{__traits::locate_zone(string_view{})}; + } && is_convertible_v, sys_time<_Duration>>) + : zoned_time{__traits::locate_zone(__name), __zt, __c} {} + + _LIBCPP_HIDE_FROM_ABI zoned_time& operator=(const sys_time<_Duration>& __tp) { + __tp_ = __tp; + return *this; + } + + _LIBCPP_HIDE_FROM_ABI zoned_time& operator=(const local_time<_Duration>& __tp) { + // TODO TZDB This seems wrong. + // Assigning a non-existent or ambiguous time will throw and not satisfy + // the post condition. This seems quite odd; I constructed an object with + // choose::earliest and that choice is not respected. + // what did LEWG do with this. + // MSVC STL and libstdc++ behave the same + __tp_ = __zone_->to_sys(__tp); + return *this; + } + + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI operator sys_time() const { return get_sys_time(); } + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI explicit operator local_time() const { return get_local_time(); } + + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI _TimeZonePtr get_time_zone() const { return __zone_; } + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI local_time get_local_time() const { return __zone_->to_local(__tp_); } + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI sys_time get_sys_time() const { return __tp_; } + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI sys_info get_info() const { return __zone_->get_info(__tp_); } + +private: + _TimeZonePtr __zone_; + sys_time __tp_; +}; + +zoned_time() -> zoned_time; + +template +zoned_time(sys_time<_Duration>) -> zoned_time>; + +template +using __time_zone_representation = + conditional_t, + const time_zone*, + remove_cvref_t<_TimeZonePtrOrName>>; + +template +zoned_time(_TimeZonePtrOrName&&) -> zoned_time>; + +template +zoned_time(_TimeZonePtrOrName&&, sys_time<_Duration>) + -> zoned_time, __time_zone_representation<_TimeZonePtrOrName>>; + +template +zoned_time(_TimeZonePtrOrName&&, local_time<_Duration>, choose = choose::earliest) + -> zoned_time, __time_zone_representation<_TimeZonePtrOrName>>; + +template +zoned_time(_TimeZonePtrOrName&&, zoned_time<_Duration, TimeZonePtr2>, choose = choose::earliest) + -> zoned_time, __time_zone_representation<_TimeZonePtrOrName>>; + +using zoned_seconds = zoned_time; + +template +_LIBCPP_HIDE_FROM_ABI bool +operator==(const zoned_time<_Duration1, _TimeZonePtr>& __lhs, const zoned_time<_Duration2, _TimeZonePtr>& __rhs) { + return __lhs.get_time_zone() == __rhs.get_time_zone() && __lhs.get_sys_time() == __rhs.get_sys_time(); +} + +} // namespace chrono + +# endif // _LIBCPP_STD_VER >= 20 && !defined(_LIBCPP_HAS_NO_TIME_ZONE_DATABASE) && !defined(_LIBCPP_HAS_NO_FILESYSTEM) + // && !defined(_LIBCPP_HAS_NO_LOCALIZATION) + +_LIBCPP_END_NAMESPACE_STD + +_LIBCPP_POP_MACROS + +#endif // !defined(_LIBCPP_HAS_NO_EXPERIMENTAL_TZDB) + +#endif // _LIBCPP___CHRONO_ZONED_TIME_H diff --git a/lib/libcxx/include/__compare/partial_order.h b/lib/libcxx/include/__compare/partial_order.h index f3ed4900fbff..1d2fae63e5f2 100644 --- a/lib/libcxx/include/__compare/partial_order.h +++ b/lib/libcxx/include/__compare/partial_order.h @@ -28,6 +28,8 @@ _LIBCPP_BEGIN_NAMESPACE_STD // [cmp.alg] namespace __partial_order { +void partial_order() = delete; + struct __fn { // NOLINTBEGIN(libcpp-robust-against-adl) partial_order should use ADL, but only here template diff --git a/lib/libcxx/include/__compare/strong_order.h b/lib/libcxx/include/__compare/strong_order.h index 5f6ade5aef8e..8c363b563822 100644 --- a/lib/libcxx/include/__compare/strong_order.h +++ b/lib/libcxx/include/__compare/strong_order.h @@ -13,11 +13,14 @@ #include <__compare/compare_three_way.h> #include <__compare/ordering.h> #include <__config> +#include <__math/exponential_functions.h> +#include <__math/traits.h> #include <__type_traits/conditional.h> #include <__type_traits/decay.h> +#include <__type_traits/is_floating_point.h> +#include <__type_traits/is_same.h> #include <__utility/forward.h> #include <__utility/priority_tag.h> -#include #include #include @@ -34,6 +37,8 @@ _LIBCPP_BEGIN_NAMESPACE_STD // [cmp.alg] namespace __strong_order { +void strong_order() = delete; + struct __fn { // NOLINTBEGIN(libcpp-robust-against-adl) strong_order should use ADL, but only here template @@ -66,27 +71,27 @@ struct __fn { return strong_ordering::greater; } else if (__t == __u) { if constexpr (numeric_limits<_Dp>::radix == 2) { - return std::signbit(__u) <=> std::signbit(__t); + return __math::signbit(__u) <=> __math::signbit(__t); } else { // This is bullet 3 of the IEEE754 algorithm, relevant // only for decimal floating-point; // see https://stackoverflow.com/questions/69068075/ - if (__t == 0 || std::isinf(__t)) { - return std::signbit(__u) <=> std::signbit(__t); + if (__t == 0 || __math::isinf(__t)) { + return __math::signbit(__u) <=> __math::signbit(__t); } else { int __texp, __uexp; - (void)std::frexp(__t, &__texp); - (void)std::frexp(__u, &__uexp); + (void)__math::frexp(__t, &__texp); + (void)__math::frexp(__u, &__uexp); return (__t < 0) ? (__texp <=> __uexp) : (__uexp <=> __texp); } } } else { // They're unordered, so one of them must be a NAN. // The order is -QNAN, -SNAN, numbers, +SNAN, +QNAN. - bool __t_is_nan = std::isnan(__t); - bool __u_is_nan = std::isnan(__u); - bool __t_is_negative = std::signbit(__t); - bool __u_is_negative = std::signbit(__u); + bool __t_is_nan = __math::isnan(__t); + bool __u_is_nan = __math::isnan(__u); + bool __t_is_negative = __math::signbit(__t); + bool __u_is_negative = __math::signbit(__u); using _IntType = conditional_t< sizeof(__t) == sizeof(int32_t), int32_t, diff --git a/lib/libcxx/include/__compare/synth_three_way.h b/lib/libcxx/include/__compare/synth_three_way.h index 6420d1362db0..e48ce4979983 100644 --- a/lib/libcxx/include/__compare/synth_three_way.h +++ b/lib/libcxx/include/__compare/synth_three_way.h @@ -25,12 +25,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD // [expos.only.func] -// TODO MODULES restore the lamba to match the Standard. -// See https://github.com/llvm/llvm-project/issues/57222 -//_LIBCPP_HIDE_FROM_ABI inline constexpr auto __synth_three_way = -// [](const _Tp& __t, const _Up& __u) -template -_LIBCPP_HIDE_FROM_ABI constexpr auto __synth_three_way(const _Tp& __t, const _Up& __u) +_LIBCPP_HIDE_FROM_ABI inline constexpr auto __synth_three_way = [](const _Tp& __t, const _Up& __u) requires requires { { __t < __u } -> __boolean_testable; { __u < __t } -> __boolean_testable; @@ -45,7 +40,7 @@ _LIBCPP_HIDE_FROM_ABI constexpr auto __synth_three_way(const _Tp& __t, const _Up return weak_ordering::greater; return weak_ordering::equivalent; } -} +}; template using __synth_three_way_result = decltype(std::__synth_three_way(std::declval<_Tp&>(), std::declval<_Up&>())); diff --git a/lib/libcxx/include/__compare/weak_order.h b/lib/libcxx/include/__compare/weak_order.h index 9f719eb64bbc..1a3e85feb233 100644 --- a/lib/libcxx/include/__compare/weak_order.h +++ b/lib/libcxx/include/__compare/weak_order.h @@ -13,10 +13,12 @@ #include <__compare/ordering.h> #include <__compare/strong_order.h> #include <__config> +#include <__math/traits.h> #include <__type_traits/decay.h> +#include <__type_traits/is_floating_point.h> +#include <__type_traits/is_same.h> #include <__utility/forward.h> #include <__utility/priority_tag.h> -#include #ifndef _LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER # pragma GCC system_header @@ -28,6 +30,8 @@ _LIBCPP_BEGIN_NAMESPACE_STD // [cmp.alg] namespace __weak_order { +void weak_order() = delete; + struct __fn { // NOLINTBEGIN(libcpp-robust-against-adl) weak_order should use ADL, but only here template @@ -51,10 +55,10 @@ struct __fn { return weak_ordering::greater; } else { // Otherwise, at least one of them is a NaN. - bool __t_is_nan = std::isnan(__t); - bool __u_is_nan = std::isnan(__u); - bool __t_is_negative = std::signbit(__t); - bool __u_is_negative = std::signbit(__u); + bool __t_is_nan = __math::isnan(__t); + bool __u_is_nan = __math::isnan(__u); + bool __t_is_negative = __math::signbit(__t); + bool __u_is_negative = __math::signbit(__u); if (__t_is_nan && __u_is_nan) { return (__u_is_negative <=> __t_is_negative); } else if (__t_is_nan) { diff --git a/lib/libcxx/include/__concepts/class_or_enum.h b/lib/libcxx/include/__concepts/class_or_enum.h index c1b4a8c258f3..2739e31e14ba 100644 --- a/lib/libcxx/include/__concepts/class_or_enum.h +++ b/lib/libcxx/include/__concepts/class_or_enum.h @@ -28,11 +28,6 @@ _LIBCPP_BEGIN_NAMESPACE_STD template concept __class_or_enum = is_class_v<_Tp> || is_union_v<_Tp> || is_enum_v<_Tp>; -// Work around Clang bug https://llvm.org/PR52970 -// TODO: remove this workaround once libc++ no longer has to support Clang 13 (it was fixed in Clang 14). -template -concept __workaround_52970 = is_class_v<__remove_cvref_t<_Tp>> || is_union_v<__remove_cvref_t<_Tp>>; - #endif // _LIBCPP_STD_VER >= 20 _LIBCPP_END_NAMESPACE_STD diff --git a/lib/libcxx/include/__concepts/swappable.h b/lib/libcxx/include/__concepts/swappable.h index 1337dc49d75b..d339488a087a 100644 --- a/lib/libcxx/include/__concepts/swappable.h +++ b/lib/libcxx/include/__concepts/swappable.h @@ -15,8 +15,8 @@ #include <__concepts/constructible.h> #include <__config> #include <__type_traits/extent.h> -#include <__type_traits/is_nothrow_move_assignable.h> -#include <__type_traits/is_nothrow_move_constructible.h> +#include <__type_traits/is_nothrow_assignable.h> +#include <__type_traits/is_nothrow_constructible.h> #include <__type_traits/remove_cvref.h> #include <__utility/exchange.h> #include <__utility/forward.h> diff --git a/lib/libcxx/include/__condition_variable/condition_variable.h b/lib/libcxx/include/__condition_variable/condition_variable.h index 4d8e590e29db..de35aaca1070 100644 --- a/lib/libcxx/include/__condition_variable/condition_variable.h +++ b/lib/libcxx/include/__condition_variable/condition_variable.h @@ -9,6 +9,7 @@ #ifndef _LIBCPP___CONDITION_VARIABLE_CONDITION_VARIABLE_H #define _LIBCPP___CONDITION_VARIABLE_CONDITION_VARIABLE_H +#include <__chrono/duration.h> #include <__chrono/steady_clock.h> #include <__chrono/system_clock.h> #include <__chrono/time_point.h> @@ -16,7 +17,7 @@ #include <__mutex/mutex.h> #include <__mutex/unique_lock.h> #include <__system_error/system_error.h> -#include <__threading_support> +#include <__thread/support.h> #include <__type_traits/enable_if.h> #include <__type_traits/is_floating_point.h> #include <__utility/move.h> diff --git a/lib/libcxx/include/__config b/lib/libcxx/include/__config index 630861acaff9..8165dbc54907 100644 --- a/lib/libcxx/include/__config +++ b/lib/libcxx/include/__config @@ -11,58 +11,23 @@ #define _LIBCPP___CONFIG /* zig patch: instead of including __config_site, zig adds -D flags when compiling */ +#include <__configuration/abi.h> +#include <__configuration/availability.h> +#include <__configuration/compiler.h> +#include <__configuration/platform.h> #ifndef _LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER # pragma GCC system_header #endif -#if defined(_LIBCPP_ENABLE_CXX17_REMOVED_FEATURES) && !defined(_LIBCPP_DISABLE_DEPRECATION_WARNINGS) -# pragma clang deprecated( \ - _LIBCPP_ENABLE_CXX17_REMOVED_FEATURES, \ - "_LIBCPP_ENABLE_CXX17_REMOVED_FEATURES is deprecated in LLVM 18 and will be removed in LLVM 19") -#endif -#if defined(_LIBCPP_ENABLE_CXX20_REMOVED_FEATURES) && !defined(_LIBCPP_DISABLE_DEPRECATION_WARNINGS) -# pragma clang deprecated( \ - _LIBCPP_ENABLE_CXX20_REMOVED_FEATURES, \ - "_LIBCPP_ENABLE_CXX20_REMOVED_FEATURES is deprecated in LLVM 18 and will be removed in LLVM 19") -#endif - -#if defined(__apple_build_version__) -// Given AppleClang XX.Y.Z, _LIBCPP_APPLE_CLANG_VER is XXYZ (e.g. AppleClang 14.0.3 => 1403) -# define _LIBCPP_COMPILER_CLANG_BASED -# define _LIBCPP_APPLE_CLANG_VER (__apple_build_version__ / 10000) -#elif defined(__clang__) -# define _LIBCPP_COMPILER_CLANG_BASED -# define _LIBCPP_CLANG_VER (__clang_major__ * 100 + __clang_minor__) -#elif defined(__GNUC__) -# define _LIBCPP_COMPILER_GCC -# define _LIBCPP_GCC_VER (__GNUC__ * 100 + __GNUC_MINOR__) -#endif - #ifdef __cplusplus -// Warn if a compiler version is used that is not supported anymore -// LLVM RELEASE Update the minimum compiler versions -# if defined(_LIBCPP_CLANG_VER) -# if _LIBCPP_CLANG_VER < 1600 -# warning "Libc++ only supports Clang 16 and later" -# endif -# elif defined(_LIBCPP_APPLE_CLANG_VER) -# if _LIBCPP_APPLE_CLANG_VER < 1500 -# warning "Libc++ only supports AppleClang 15 and later" -# endif -# elif defined(_LIBCPP_GCC_VER) -# if _LIBCPP_GCC_VER < 1300 -# warning "Libc++ only supports GCC 13 and later" -# endif -# endif - // The attributes supported by clang are documented at https://clang.llvm.org/docs/AttributeReference.html // _LIBCPP_VERSION represents the version of libc++, which matches the version of LLVM. // Given a LLVM release LLVM XX.YY.ZZ (e.g. LLVM 17.0.1 == 17.00.01), _LIBCPP_VERSION is // defined to XXYYZZ. -# define _LIBCPP_VERSION 180100 +# define _LIBCPP_VERSION 190100 # define _LIBCPP_CONCAT_IMPL(_X, _Y) _X##_Y # define _LIBCPP_CONCAT(_X, _Y) _LIBCPP_CONCAT_IMPL(_X, _Y) @@ -71,170 +36,12 @@ # define _LIBCPP_FREESTANDING # endif -// NOLINTBEGIN(libcpp-cpp-version-check) -# ifndef _LIBCPP_STD_VER -# if __cplusplus <= 201103L -# define _LIBCPP_STD_VER 11 -# elif __cplusplus <= 201402L -# define _LIBCPP_STD_VER 14 -# elif __cplusplus <= 201703L -# define _LIBCPP_STD_VER 17 -# elif __cplusplus <= 202002L -# define _LIBCPP_STD_VER 20 -# elif __cplusplus <= 202302L -# define _LIBCPP_STD_VER 23 -# else -// Expected release year of the next C++ standard -# define _LIBCPP_STD_VER 26 -# endif -# endif // _LIBCPP_STD_VER -// NOLINTEND(libcpp-cpp-version-check) - -# if defined(__ELF__) -# define _LIBCPP_OBJECT_FORMAT_ELF 1 -# elif defined(__MACH__) -# define _LIBCPP_OBJECT_FORMAT_MACHO 1 -# elif defined(_WIN32) -# define _LIBCPP_OBJECT_FORMAT_COFF 1 -# elif defined(__wasm__) -# define _LIBCPP_OBJECT_FORMAT_WASM 1 -# elif defined(_AIX) -# define _LIBCPP_OBJECT_FORMAT_XCOFF 1 -# else -// ... add new file formats here ... -# endif - -// ABI { - -# if _LIBCPP_ABI_VERSION >= 2 -// Change short string representation so that string data starts at offset 0, -// improving its alignment in some cases. -# define _LIBCPP_ABI_ALTERNATE_STRING_LAYOUT -// Fix deque iterator type in order to support incomplete types. -# define _LIBCPP_ABI_INCOMPLETE_TYPES_IN_DEQUE -// Fix undefined behavior in how std::list stores its linked nodes. -# define _LIBCPP_ABI_LIST_REMOVE_NODE_POINTER_UB -// Fix undefined behavior in how __tree stores its end and parent nodes. -# define _LIBCPP_ABI_TREE_REMOVE_NODE_POINTER_UB -// Fix undefined behavior in how __hash_table stores its pointer types. -# define _LIBCPP_ABI_FIX_UNORDERED_NODE_POINTER_UB -# define _LIBCPP_ABI_FORWARD_LIST_REMOVE_NODE_POINTER_UB -# define _LIBCPP_ABI_FIX_UNORDERED_CONTAINER_SIZE_TYPE -// Define a key function for `bad_function_call` in the library, to centralize -// its vtable and typeinfo to libc++ rather than having all other libraries -// using that class define their own copies. -# define _LIBCPP_ABI_BAD_FUNCTION_CALL_KEY_FUNCTION -// Override the default return value of exception::what() for -// bad_function_call::what() with a string that is specific to -// bad_function_call (see http://wg21.link/LWG2233). This is an ABI break -// because it changes the vtable layout of bad_function_call. -# define _LIBCPP_ABI_BAD_FUNCTION_CALL_GOOD_WHAT_MESSAGE -// Enable optimized version of __do_get_(un)signed which avoids redundant copies. -# define _LIBCPP_ABI_OPTIMIZED_LOCALE_NUM_GET -// Give reverse_iterator one data member of type T, not two. -// Also, in C++17 and later, don't derive iterator types from std::iterator. -# define _LIBCPP_ABI_NO_ITERATOR_BASES -// Use the smallest possible integer type to represent the index of the variant. -// Previously libc++ used "unsigned int" exclusively. -# define _LIBCPP_ABI_VARIANT_INDEX_TYPE_OPTIMIZATION -// Unstable attempt to provide a more optimized std::function -# define _LIBCPP_ABI_OPTIMIZED_FUNCTION -// All the regex constants must be distinct and nonzero. -# define _LIBCPP_ABI_REGEX_CONSTANTS_NONZERO -// Re-worked external template instantiations for std::string with a focus on -// performance and fast-path inlining. -# define _LIBCPP_ABI_STRING_OPTIMIZED_EXTERNAL_INSTANTIATION -// Enable clang::trivial_abi on std::unique_ptr. -# define _LIBCPP_ABI_ENABLE_UNIQUE_PTR_TRIVIAL_ABI -// Enable clang::trivial_abi on std::shared_ptr and std::weak_ptr -# define _LIBCPP_ABI_ENABLE_SHARED_PTR_TRIVIAL_ABI -// std::random_device holds some state when it uses an implementation that gets -// entropy from a file (see _LIBCPP_USING_DEV_RANDOM). When switching from this -// implementation to another one on a platform that has already shipped -// std::random_device, one needs to retain the same object layout to remain ABI -// compatible. This switch removes these workarounds for platforms that don't care -// about ABI compatibility. -# define _LIBCPP_ABI_NO_RANDOM_DEVICE_COMPATIBILITY_LAYOUT -// Don't export the legacy __basic_string_common class and its methods from the built library. -# define _LIBCPP_ABI_DO_NOT_EXPORT_BASIC_STRING_COMMON -// Don't export the legacy __vector_base_common class and its methods from the built library. -# define _LIBCPP_ABI_DO_NOT_EXPORT_VECTOR_BASE_COMMON -// According to the Standard, `bitset::operator[] const` returns bool -# define _LIBCPP_ABI_BITSET_VECTOR_BOOL_CONST_SUBSCRIPT_RETURN_BOOL -// Fix the implementation of CityHash used for std::hash. -// This is an ABI break because `std::hash` will return a different result, -// which means that hashing the same object in translation units built against -// different versions of libc++ can return inconsistent results. This is especially -// tricky since std::hash is used in the implementation of unordered containers. -// -// The incorrect implementation of CityHash has the problem that it drops some -// bits on the floor. -# define _LIBCPP_ABI_FIX_CITYHASH_IMPLEMENTATION -// Remove the base 10 implementation of std::to_chars from the dylib. -// The implementation moved to the header, but we still export the symbols from -// the dylib for backwards compatibility. -# define _LIBCPP_ABI_DO_NOT_EXPORT_TO_CHARS_BASE_10 -# elif _LIBCPP_ABI_VERSION == 1 -# if !(defined(_LIBCPP_OBJECT_FORMAT_COFF) || defined(_LIBCPP_OBJECT_FORMAT_XCOFF)) -// Enable compiling copies of now inline methods into the dylib to support -// applications compiled against older libraries. This is unnecessary with -// COFF dllexport semantics, since dllexport forces a non-inline definition -// of inline functions to be emitted anyway. Our own non-inline copy would -// conflict with the dllexport-emitted copy, so we disable it. For XCOFF, -// the linker will take issue with the symbols in the shared object if the -// weak inline methods get visibility (such as from -fvisibility-inlines-hidden), -// so disable it. -# define _LIBCPP_DEPRECATED_ABI_LEGACY_LIBRARY_DEFINITIONS_FOR_INLINE_FUNCTIONS -# endif -// Feature macros for disabling pre ABI v1 features. All of these options -// are deprecated. -# if defined(__FreeBSD__) && __FreeBSD__ < 14 -# define _LIBCPP_DEPRECATED_ABI_DISABLE_PAIR_TRIVIAL_COPY_CTOR -# endif -// For XCOFF linkers, we have problems if we see a weak hidden version of a symbol -// in user code (like you get with -fvisibility-inlines-hidden) and then a strong def -// in the library, so we need to always rely on the library version. -# if defined(_AIX) -# define _LIBCPP_ABI_BAD_FUNCTION_CALL_KEY_FUNCTION -# endif -# endif - -# if defined(_LIBCPP_BUILDING_LIBRARY) || _LIBCPP_ABI_VERSION >= 2 -// Define a key function for `bad_function_call` in the library, to centralize -// its vtable and typeinfo to libc++ rather than having all other libraries -// using that class define their own copies. -# define _LIBCPP_ABI_BAD_FUNCTION_CALL_KEY_FUNCTION -# endif - -// We had some bugs where we use [[no_unique_address]] together with construct_at, -// which causes UB as the call on construct_at could write to overlapping subobjects -// -// https://github.com/llvm/llvm-project/issues/70506 -// https://github.com/llvm/llvm-project/issues/70494 -// -// To fix the bug we had to change the ABI of some classes to remove [[no_unique_address]] under certain conditions. -// The macro below is used for all classes whose ABI have changed as part of fixing these bugs. -# define _LIBCPP_ABI_LLVM18_NO_UNIQUE_ADDRESS __attribute__((__abi_tag__("llvm18_nua"))) - -// Changes the iterator type of select containers (see below) to a bounded iterator that keeps track of whether it's -// within the bounds of the original container and asserts it on every dereference. -// -// ABI impact: changes the iterator type of the relevant containers. -// -// Supported containers: -// - `span`; -// - `string_view`; -// - `array`. -// #define _LIBCPP_ABI_BOUNDED_ITERATORS - -// } ABI - // HARDENING { -// TODO(hardening): deprecate this in LLVM 19. // This is for backward compatibility -- make enabling `_LIBCPP_ENABLE_ASSERTIONS` (which predates hardening modes) -// equivalent to setting the extensive mode. +// equivalent to setting the extensive mode. This is deprecated and will be removed in LLVM 20. # ifdef _LIBCPP_ENABLE_ASSERTIONS +# warning "_LIBCPP_ENABLE_ASSERTIONS is deprecated, please use _LIBCPP_HARDENING_MODE instead" # if _LIBCPP_ENABLE_ASSERTIONS != 0 && _LIBCPP_ENABLE_ASSERTIONS != 1 # error "_LIBCPP_ENABLE_ASSERTIONS must be set to 0 or 1" # endif @@ -325,6 +132,12 @@ // clang-format on # ifndef _LIBCPP_HARDENING_MODE + +# ifndef _LIBCPP_HARDENING_MODE_DEFAULT +# error _LIBCPP_HARDENING_MODE_DEFAULT is not defined. This definition should be set at configuration time in the \ +`__config_site` header, please make sure your installation of libc++ is not broken. +# endif + # define _LIBCPP_HARDENING_MODE _LIBCPP_HARDENING_MODE_DEFAULT # endif @@ -339,87 +152,6 @@ _LIBCPP_HARDENING_MODE_EXTENSIVE, \ _LIBCPP_HARDENING_MODE_DEBUG # endif -// clang-format off -// Fast hardening mode checks. - -# if _LIBCPP_HARDENING_MODE == _LIBCPP_HARDENING_MODE_FAST - -// Enabled checks. -# define _LIBCPP_ASSERT_VALID_INPUT_RANGE(expression, message) _LIBCPP_ASSERT(expression, message) -# define _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(expression, message) _LIBCPP_ASSERT(expression, message) -// Disabled checks. -// On most modern platforms, dereferencing a null pointer does not lead to an actual memory access. -# define _LIBCPP_ASSERT_NON_NULL(expression, message) _LIBCPP_ASSUME(expression) -// Overlapping ranges will make algorithms produce incorrect results but don't directly lead to a security -// vulnerability. -# define _LIBCPP_ASSERT_NON_OVERLAPPING_RANGES(expression, message) _LIBCPP_ASSUME(expression) -# define _LIBCPP_ASSERT_VALID_DEALLOCATION(expression, message) _LIBCPP_ASSUME(expression) -# define _LIBCPP_ASSERT_VALID_EXTERNAL_API_CALL(expression, message) _LIBCPP_ASSUME(expression) -# define _LIBCPP_ASSERT_COMPATIBLE_ALLOCATOR(expression, message) _LIBCPP_ASSUME(expression) -# define _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(expression, message) _LIBCPP_ASSUME(expression) -# define _LIBCPP_ASSERT_PEDANTIC(expression, message) _LIBCPP_ASSUME(expression) -# define _LIBCPP_ASSERT_SEMANTIC_REQUIREMENT(expression, message) _LIBCPP_ASSUME(expression) -# define _LIBCPP_ASSERT_INTERNAL(expression, message) _LIBCPP_ASSUME(expression) -# define _LIBCPP_ASSERT_UNCATEGORIZED(expression, message) _LIBCPP_ASSUME(expression) - -// Extensive hardening mode checks. - -# elif _LIBCPP_HARDENING_MODE == _LIBCPP_HARDENING_MODE_EXTENSIVE - -// Enabled checks. -# define _LIBCPP_ASSERT_VALID_INPUT_RANGE(expression, message) _LIBCPP_ASSERT(expression, message) -# define _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(expression, message) _LIBCPP_ASSERT(expression, message) -# define _LIBCPP_ASSERT_NON_NULL(expression, message) _LIBCPP_ASSERT(expression, message) -# define _LIBCPP_ASSERT_NON_OVERLAPPING_RANGES(expression, message) _LIBCPP_ASSERT(expression, message) -# define _LIBCPP_ASSERT_VALID_DEALLOCATION(expression, message) _LIBCPP_ASSERT(expression, message) -# define _LIBCPP_ASSERT_VALID_EXTERNAL_API_CALL(expression, message) _LIBCPP_ASSERT(expression, message) -# define _LIBCPP_ASSERT_COMPATIBLE_ALLOCATOR(expression, message) _LIBCPP_ASSERT(expression, message) -# define _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(expression, message) _LIBCPP_ASSERT(expression, message) -# define _LIBCPP_ASSERT_PEDANTIC(expression, message) _LIBCPP_ASSERT(expression, message) -# define _LIBCPP_ASSERT_UNCATEGORIZED(expression, message) _LIBCPP_ASSERT(expression, message) -// Disabled checks. -# define _LIBCPP_ASSERT_SEMANTIC_REQUIREMENT(expression, message) _LIBCPP_ASSUME(expression) -# define _LIBCPP_ASSERT_INTERNAL(expression, message) _LIBCPP_ASSUME(expression) - -// Debug hardening mode checks. - -# elif _LIBCPP_HARDENING_MODE == _LIBCPP_HARDENING_MODE_DEBUG - -// All checks enabled. -# define _LIBCPP_ASSERT_VALID_INPUT_RANGE(expression, message) _LIBCPP_ASSERT(expression, message) -# define _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(expression, message) _LIBCPP_ASSERT(expression, message) -# define _LIBCPP_ASSERT_NON_NULL(expression, message) _LIBCPP_ASSERT(expression, message) -# define _LIBCPP_ASSERT_NON_OVERLAPPING_RANGES(expression, message) _LIBCPP_ASSERT(expression, message) -# define _LIBCPP_ASSERT_VALID_DEALLOCATION(expression, message) _LIBCPP_ASSERT(expression, message) -# define _LIBCPP_ASSERT_VALID_EXTERNAL_API_CALL(expression, message) _LIBCPP_ASSERT(expression, message) -# define _LIBCPP_ASSERT_COMPATIBLE_ALLOCATOR(expression, message) _LIBCPP_ASSERT(expression, message) -# define _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(expression, message) _LIBCPP_ASSERT(expression, message) -# define _LIBCPP_ASSERT_PEDANTIC(expression, message) _LIBCPP_ASSERT(expression, message) -# define _LIBCPP_ASSERT_SEMANTIC_REQUIREMENT(expression, message) _LIBCPP_ASSERT(expression, message) -# define _LIBCPP_ASSERT_INTERNAL(expression, message) _LIBCPP_ASSERT(expression, message) -# define _LIBCPP_ASSERT_UNCATEGORIZED(expression, message) _LIBCPP_ASSERT(expression, message) - -// Disable all checks if hardening is not enabled. - -# else - -// All checks disabled. -# define _LIBCPP_ASSERT_VALID_INPUT_RANGE(expression, message) _LIBCPP_ASSUME(expression) -# define _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS(expression, message) _LIBCPP_ASSUME(expression) -# define _LIBCPP_ASSERT_NON_NULL(expression, message) _LIBCPP_ASSUME(expression) -# define _LIBCPP_ASSERT_NON_OVERLAPPING_RANGES(expression, message) _LIBCPP_ASSUME(expression) -# define _LIBCPP_ASSERT_VALID_DEALLOCATION(expression, message) _LIBCPP_ASSUME(expression) -# define _LIBCPP_ASSERT_VALID_EXTERNAL_API_CALL(expression, message) _LIBCPP_ASSUME(expression) -# define _LIBCPP_ASSERT_COMPATIBLE_ALLOCATOR(expression, message) _LIBCPP_ASSUME(expression) -# define _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(expression, message) _LIBCPP_ASSUME(expression) -# define _LIBCPP_ASSERT_PEDANTIC(expression, message) _LIBCPP_ASSUME(expression) -# define _LIBCPP_ASSERT_SEMANTIC_REQUIREMENT(expression, message) _LIBCPP_ASSUME(expression) -# define _LIBCPP_ASSERT_INTERNAL(expression, message) _LIBCPP_ASSUME(expression) -# define _LIBCPP_ASSERT_UNCATEGORIZED(expression, message) _LIBCPP_ASSUME(expression) - -# endif // _LIBCPP_HARDENING_MODE == _LIBCPP_HARDENING_MODE_FAST -// clang-format on - // } HARDENING # define _LIBCPP_TOSTRING2(x) #x @@ -430,30 +162,15 @@ _LIBCPP_HARDENING_MODE_DEBUG # define _LIBCPP_CXX03_LANG # endif -# ifndef __has_attribute -# define __has_attribute(__x) 0 -# endif - -# ifndef __has_builtin -# define __has_builtin(__x) 0 -# endif - -# ifndef __has_extension -# define __has_extension(__x) 0 -# endif - -# ifndef __has_feature -# define __has_feature(__x) 0 -# endif - -# ifndef __has_cpp_attribute -# define __has_cpp_attribute(__x) 0 -# endif - # ifndef __has_constexpr_builtin # define __has_constexpr_builtin(x) 0 # endif +// This checks wheter a Clang module is built +# ifndef __building_module +# define __building_module(...) 0 +# endif + // '__is_identifier' returns '0' if '__x' is a reserved identifier provided by // the compiler and '1' otherwise. # ifndef __is_identifier @@ -466,8 +183,8 @@ _LIBCPP_HARDENING_MODE_DEBUG # define __has_keyword(__x) !(__is_identifier(__x)) -# ifndef __has_include -# define __has_include(...) 0 +# ifndef __has_warning +# define __has_warning(...) 0 # endif # if !defined(_LIBCPP_COMPILER_CLANG_BASED) && __cplusplus < 201103L @@ -508,35 +225,14 @@ _LIBCPP_HARDENING_MODE_DEBUG # if !defined(_LIBCPP_ENABLE_EXPERIMENTAL) && !defined(_LIBCPP_BUILDING_LIBRARY) # define _LIBCPP_HAS_NO_INCOMPLETE_PSTL # define _LIBCPP_HAS_NO_EXPERIMENTAL_STOP_TOKEN -# define _LIBCPP_HAS_NO_INCOMPLETE_TZDB +# define _LIBCPP_HAS_NO_EXPERIMENTAL_TZDB # define _LIBCPP_HAS_NO_EXPERIMENTAL_SYNCSTREAM # endif -// Need to detect which libc we're using if we're on Linux. -# if defined(__linux__) -# include -# if defined(__GLIBC_PREREQ) -# define _LIBCPP_GLIBC_PREREQ(a, b) __GLIBC_PREREQ(a, b) -# else -# define _LIBCPP_GLIBC_PREREQ(a, b) 0 -# endif // defined(__GLIBC_PREREQ) -# endif // defined(__linux__) - # if defined(__MVS__) # include // for __NATIVE_ASCII_F # endif -# ifndef __BYTE_ORDER__ -# error \ - "Your compiler doesn't seem to define __BYTE_ORDER__, which is required by libc++ to know the endianness of your target platform" -# endif - -# if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ -# define _LIBCPP_LITTLE_ENDIAN -# elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -# define _LIBCPP_BIG_ENDIAN -# endif // __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ - # if defined(_WIN32) # define _LIBCPP_WIN32API # define _LIBCPP_SHORT_WCHAR 1 @@ -618,7 +314,7 @@ _LIBCPP_HARDENING_MODE_DEBUG # define _ALIGNAS(x) alignas(x) # define _LIBCPP_NORETURN [[noreturn]] # define _NOEXCEPT noexcept -# define _NOEXCEPT_(x) noexcept(x) +# define _NOEXCEPT_(...) noexcept(__VA_ARGS__) # define _LIBCPP_CONSTEXPR constexpr # else @@ -630,7 +326,7 @@ _LIBCPP_HARDENING_MODE_DEBUG # define _LIBCPP_HAS_NO_NOEXCEPT # define nullptr __nullptr # define _NOEXCEPT throw() -# define _NOEXCEPT_(x) +# define _NOEXCEPT_(...) # define static_assert(...) _Static_assert(__VA_ARGS__) # define decltype(...) __decltype(__VA_ARGS__) # define _LIBCPP_CONSTEXPR @@ -640,63 +336,32 @@ typedef __char32_t char32_t; # endif -# if !defined(__cpp_exceptions) || __cpp_exceptions < 199711L -# define _LIBCPP_HAS_NO_EXCEPTIONS -# endif - # define _LIBCPP_PREFERRED_ALIGNOF(_Tp) __alignof(_Tp) -# if defined(_LIBCPP_COMPILER_CLANG_BASED) - -# if defined(__APPLE__) -# if defined(__i386__) || defined(__x86_64__) -// use old string layout on x86_64 and i386 -# elif defined(__arm__) -// use old string layout on arm (which does not include aarch64/arm64), except on watch ABIs -# if defined(__ARM_ARCH_7K__) && __ARM_ARCH_7K__ >= 2 -# define _LIBCPP_ABI_ALTERNATE_STRING_LAYOUT -# endif -# else -# define _LIBCPP_ABI_ALTERNATE_STRING_LAYOUT -# endif -# endif - // Objective-C++ features (opt-in) -# if __has_feature(objc_arc) -# define _LIBCPP_HAS_OBJC_ARC -# endif - -# if __has_feature(objc_arc_weak) -# define _LIBCPP_HAS_OBJC_ARC_WEAK -# endif - -# if __has_extension(blocks) -# define _LIBCPP_HAS_EXTENSION_BLOCKS -# endif - -# if defined(_LIBCPP_HAS_EXTENSION_BLOCKS) && defined(__APPLE__) -# define _LIBCPP_HAS_BLOCKS_RUNTIME -# endif - -# if !__has_feature(address_sanitizer) -# define _LIBCPP_HAS_NO_ASAN -# endif - -# define _LIBCPP_ALWAYS_INLINE __attribute__((__always_inline__)) +# if __has_feature(objc_arc) +# define _LIBCPP_HAS_OBJC_ARC +# endif -# define _LIBCPP_DISABLE_EXTENSION_WARNING __extension__ +# if __has_feature(objc_arc_weak) +# define _LIBCPP_HAS_OBJC_ARC_WEAK +# endif -# elif defined(_LIBCPP_COMPILER_GCC) +# if __has_extension(blocks) +# define _LIBCPP_HAS_EXTENSION_BLOCKS +# endif -# if !defined(__SANITIZE_ADDRESS__) -# define _LIBCPP_HAS_NO_ASAN -# endif +# if defined(_LIBCPP_HAS_EXTENSION_BLOCKS) && defined(__APPLE__) +# define _LIBCPP_HAS_BLOCKS_RUNTIME +# endif -# define _LIBCPP_ALWAYS_INLINE __attribute__((__always_inline__)) +# if !__has_feature(address_sanitizer) +# define _LIBCPP_HAS_NO_ASAN +# endif -# define _LIBCPP_DISABLE_EXTENSION_WARNING __extension__ +# define _LIBCPP_ALWAYS_INLINE __attribute__((__always_inline__)) -# endif // _LIBCPP_COMPILER_[CLANG|GCC] +# define _LIBCPP_DISABLE_EXTENSION_WARNING __extension__ # if defined(_LIBCPP_OBJECT_FORMAT_COFF) @@ -787,6 +452,23 @@ typedef __char32_t char32_t; # define _LIBCPP_EXCLUDE_FROM_EXPLICIT_INSTANTIATION _LIBCPP_ALWAYS_INLINE # endif +# ifdef _LIBCPP_COMPILER_CLANG_BASED +# define _LIBCPP_DIAGNOSTIC_PUSH _Pragma("clang diagnostic push") +# define _LIBCPP_DIAGNOSTIC_POP _Pragma("clang diagnostic pop") +# define _LIBCPP_CLANG_DIAGNOSTIC_IGNORED(str) _Pragma(_LIBCPP_TOSTRING(clang diagnostic ignored str)) +# define _LIBCPP_GCC_DIAGNOSTIC_IGNORED(str) +# elif defined(_LIBCPP_COMPILER_GCC) +# define _LIBCPP_DIAGNOSTIC_PUSH _Pragma("GCC diagnostic push") +# define _LIBCPP_DIAGNOSTIC_POP _Pragma("GCC diagnostic pop") +# define _LIBCPP_CLANG_DIAGNOSTIC_IGNORED(str) +# define _LIBCPP_GCC_DIAGNOSTIC_IGNORED(str) _Pragma(_LIBCPP_TOSTRING(GCC diagnostic ignored str)) +# else +# define _LIBCPP_DIAGNOSTIC_PUSH +# define _LIBCPP_DIAGNOSTIC_POP +# define _LIBCPP_CLANG_DIAGNOSTIC_IGNORED(str) +# define _LIBCPP_GCC_DIAGNOSTIC_IGNORED(str) +# endif + # if _LIBCPP_HARDENING_MODE == _LIBCPP_HARDENING_MODE_FAST # define _LIBCPP_HARDENING_SIG f # elif _LIBCPP_HARDENING_MODE == _LIBCPP_HARDENING_MODE_EXTENSIVE @@ -848,22 +530,24 @@ typedef __char32_t char32_t; // the implementation of a virtual function in an ABI-incompatible way in the first place, // since that would be an ABI break anyway. Hence, the lack of ABI tag should not be noticeable. // +// The macro can be applied to record and enum types. When the tagged type is nested in +// a record this "parent" record needs to have the macro too. Another use case for applying +// this macro to records and unions is to apply an ABI tag to inline constexpr variables. +// This can be useful for inline variables that are implementation details which are expected +// to change in the future. +// // TODO: We provide a escape hatch with _LIBCPP_NO_ABI_TAG for folks who want to avoid increasing // the length of symbols with an ABI tag. In practice, we should remove the escape hatch and // use compression mangling instead, see https://github.com/itanium-cxx-abi/cxx-abi/issues/70. # ifndef _LIBCPP_NO_ABI_TAG # define _LIBCPP_HIDE_FROM_ABI \ _LIBCPP_HIDDEN _LIBCPP_EXCLUDE_FROM_EXPLICIT_INSTANTIATION \ - __attribute__((__abi_tag__(_LIBCPP_TOSTRING(_LIBCPP_ODR_SIGNATURE)))) + __attribute__((__abi_tag__(_LIBCPP_TOSTRING(_LIBCPP_ODR_SIGNATURE)))) # else # define _LIBCPP_HIDE_FROM_ABI _LIBCPP_HIDDEN _LIBCPP_EXCLUDE_FROM_EXPLICIT_INSTANTIATION # endif # define _LIBCPP_HIDE_FROM_ABI_VIRTUAL _LIBCPP_HIDDEN _LIBCPP_EXCLUDE_FROM_EXPLICIT_INSTANTIATION -// This macro provides a HIDE_FROM_ABI equivalent that can be applied to extern -// "C" function, as those lack mangling. -# define _LIBCPP_HIDE_FROM_ABI_C _LIBCPP_HIDDEN _LIBCPP_EXCLUDE_FROM_EXPLICIT_INSTANTIATION - # ifdef _LIBCPP_BUILDING_LIBRARY # if _LIBCPP_ABI_VERSION > 1 # define _LIBCPP_HIDE_FROM_ABI_AFTER_V1 _LIBCPP_HIDE_FROM_ABI @@ -874,21 +558,51 @@ typedef __char32_t char32_t; # define _LIBCPP_HIDE_FROM_ABI_AFTER_V1 _LIBCPP_HIDE_FROM_ABI # endif -// TODO(LLVM-19): Remove _LIBCPP_INLINE_VISIBILITY and _VSTD, which we're keeping around -// only to ease the renaming for downstreams. -# define _LIBCPP_INLINE_VISIBILITY _LIBCPP_HIDE_FROM_ABI -# define _VSTD std +// TODO: Remove this workaround once we drop support for Clang 16 +# if __has_warning("-Wc++23-extensions") +# define _LIBCPP_CLANG_DIAGNOSTIC_IGNORED_CXX23_EXTENSION _LIBCPP_CLANG_DIAGNOSTIC_IGNORED("-Wc++23-extensions") +# else +# define _LIBCPP_CLANG_DIAGNOSTIC_IGNORED_CXX23_EXTENSION _LIBCPP_CLANG_DIAGNOSTIC_IGNORED("-Wc++2b-extensions") +# endif + +// Clang modules take a significant compile time hit when pushing and popping diagnostics. +// Since all the headers are marked as system headers in the modulemap, we can simply disable this +// pushing and popping when building with clang modules. +# if !__has_feature(modules) +# define _LIBCPP_PUSH_EXTENSION_DIAGNOSTICS \ + _LIBCPP_DIAGNOSTIC_PUSH \ + _LIBCPP_CLANG_DIAGNOSTIC_IGNORED("-Wc++11-extensions") \ + _LIBCPP_CLANG_DIAGNOSTIC_IGNORED("-Wc++14-extensions") \ + _LIBCPP_CLANG_DIAGNOSTIC_IGNORED("-Wc++17-extensions") \ + _LIBCPP_CLANG_DIAGNOSTIC_IGNORED("-Wc++20-extensions") \ + _LIBCPP_CLANG_DIAGNOSTIC_IGNORED_CXX23_EXTENSION \ + _LIBCPP_GCC_DIAGNOSTIC_IGNORED("-Wc++14-extensions") \ + _LIBCPP_GCC_DIAGNOSTIC_IGNORED("-Wc++17-extensions") \ + _LIBCPP_GCC_DIAGNOSTIC_IGNORED("-Wc++20-extensions") \ + _LIBCPP_GCC_DIAGNOSTIC_IGNORED("-Wc++23-extensions") +# define _LIBCPP_POP_EXTENSION_DIAGNOSTICS _LIBCPP_DIAGNOSTIC_POP +# else +# define _LIBCPP_PUSH_EXTENSION_DIAGNOSTICS +# define _LIBCPP_POP_EXTENSION_DIAGNOSTICS +# endif // Inline namespaces are available in Clang/GCC/MSVC regardless of C++ dialect. // clang-format off -# define _LIBCPP_BEGIN_NAMESPACE_STD namespace _LIBCPP_TYPE_VISIBILITY_DEFAULT std { \ +# define _LIBCPP_BEGIN_NAMESPACE_STD _LIBCPP_PUSH_EXTENSION_DIAGNOSTICS \ + namespace _LIBCPP_TYPE_VISIBILITY_DEFAULT std { \ inline namespace _LIBCPP_ABI_NAMESPACE { -# define _LIBCPP_END_NAMESPACE_STD }} +# define _LIBCPP_END_NAMESPACE_STD }} _LIBCPP_POP_EXTENSION_DIAGNOSTICS +#ifdef _LIBCPP_ABI_NO_FILESYSTEM_INLINE_NAMESPACE +# define _LIBCPP_BEGIN_NAMESPACE_FILESYSTEM _LIBCPP_BEGIN_NAMESPACE_STD namespace filesystem { +# define _LIBCPP_END_NAMESPACE_FILESYSTEM } _LIBCPP_END_NAMESPACE_STD +#else # define _LIBCPP_BEGIN_NAMESPACE_FILESYSTEM _LIBCPP_BEGIN_NAMESPACE_STD \ inline namespace __fs { namespace filesystem { -# define _LIBCPP_END_NAMESPACE_FILESYSTEM _LIBCPP_END_NAMESPACE_STD }} +# define _LIBCPP_END_NAMESPACE_FILESYSTEM }} _LIBCPP_END_NAMESPACE_STD +#endif + // clang-format on # if __has_attribute(__enable_if__) @@ -985,6 +699,14 @@ typedef __char32_t char32_t; # define _LIBCPP_DEPRECATED_(m) # endif +# if _LIBCPP_STD_VER < 20 +# define _LIBCPP_DEPRECATED_ATOMIC_SYNC \ + _LIBCPP_DEPRECATED_("The C++20 synchronization library has been deprecated prior to C++20. Please update to " \ + "using -std=c++20 if you need to use these facilities.") +# else +# define _LIBCPP_DEPRECATED_ATOMIC_SYNC /* nothing */ +# endif + # if !defined(_LIBCPP_CXX03_LANG) # define _LIBCPP_DEPRECATED_IN_CXX11 _LIBCPP_DEPRECATED # else @@ -1015,6 +737,12 @@ typedef __char32_t char32_t; # define _LIBCPP_DEPRECATED_IN_CXX23 # endif +# if _LIBCPP_STD_VER >= 26 +# define _LIBCPP_DEPRECATED_IN_CXX26 _LIBCPP_DEPRECATED +# else +# define _LIBCPP_DEPRECATED_IN_CXX26 +# endif + # if !defined(_LIBCPP_HAS_NO_CHAR8_T) # define _LIBCPP_DEPRECATED_WITH_CHAR8_T _LIBCPP_DEPRECATED # else @@ -1068,20 +796,6 @@ typedef __char32_t char32_t; # define _LIBCPP_CONSTEXPR_SINCE_CXX23 # endif -# ifndef _LIBCPP_HAS_NO_ASAN -extern "C" _LIBCPP_EXPORTED_FROM_ABI void -__sanitizer_annotate_contiguous_container(const void*, const void*, const void*, const void*); -extern "C" _LIBCPP_EXPORTED_FROM_ABI void __sanitizer_annotate_double_ended_contiguous_container( - const void*, const void*, const void*, const void*, const void*, const void*); -extern "C" _LIBCPP_EXPORTED_FROM_ABI int -__sanitizer_verify_double_ended_contiguous_container(const void*, const void*, const void*, const void*); -# endif - -// Try to find out if RTTI is disabled. -# if !defined(__cpp_rtti) || __cpp_rtti < 199711L -# define _LIBCPP_HAS_NO_RTTI -# endif - # ifndef _LIBCPP_WEAK # define _LIBCPP_WEAK __attribute__((__weak__)) # endif @@ -1193,9 +907,6 @@ __sanitizer_verify_double_ended_contiguous_container(const void*, const void*, c # ifndef _LIBCPP_ATOMIC_FLAG_TYPE # define _LIBCPP_ATOMIC_FLAG_TYPE bool # endif -# ifdef _LIBCPP_FREESTANDING -# define _LIBCPP_ATOMIC_ONLY_USE_BUILTINS -# endif # endif # if defined(__FreeBSD__) && defined(__clang__) && __has_attribute(__no_thread_safety_analysis__) @@ -1257,23 +968,6 @@ __sanitizer_verify_double_ended_contiguous_container(const void*, const void*, c # define _LIBCPP_IF_WIDE_CHARACTERS(...) __VA_ARGS__ # endif -# if defined(_LIBCPP_ENABLE_CXX17_REMOVED_FEATURES) -# define _LIBCPP_ENABLE_CXX17_REMOVED_AUTO_PTR -# define _LIBCPP_ENABLE_CXX17_REMOVED_BINDERS -# define _LIBCPP_ENABLE_CXX17_REMOVED_RANDOM_SHUFFLE -# define _LIBCPP_ENABLE_CXX17_REMOVED_UNEXPECTED_FUNCTIONS -# define _LIBCPP_ENABLE_CXX17_REMOVED_UNARY_BINARY_FUNCTION -# endif // _LIBCPP_ENABLE_CXX17_REMOVED_FEATURES - -# if defined(_LIBCPP_ENABLE_CXX20_REMOVED_FEATURES) -# define _LIBCPP_ENABLE_CXX20_REMOVED_ALLOCATOR_MEMBERS -# define _LIBCPP_ENABLE_CXX20_REMOVED_ALLOCATOR_VOID_SPECIALIZATION -# define _LIBCPP_ENABLE_CXX20_REMOVED_BINDER_TYPEDEFS -# define _LIBCPP_ENABLE_CXX20_REMOVED_NEGATORS -# define _LIBCPP_ENABLE_CXX20_REMOVED_RAW_STORAGE_ITERATOR -# define _LIBCPP_ENABLE_CXX20_REMOVED_TYPE_TRAITS -# endif // _LIBCPP_ENABLE_CXX20_REMOVED_FEATURES - // clang-format off # define _LIBCPP_PUSH_MACROS _Pragma("push_macro(\"min\")") _Pragma("push_macro(\"max\")") _Pragma("push_macro(\"refresh\")") _Pragma("push_macro(\"move\")") _Pragma("push_macro(\"erase\")") # define _LIBCPP_POP_MACROS _Pragma("pop_macro(\"min\")") _Pragma("pop_macro(\"max\")") _Pragma("pop_macro(\"refresh\")") _Pragma("pop_macro(\"move\")") _Pragma("pop_macro(\"erase\")") @@ -1322,23 +1016,6 @@ __sanitizer_verify_double_ended_contiguous_container(const void*, const void*, c // the ABI inconsistent. # endif -# ifdef _LIBCPP_COMPILER_CLANG_BASED -# define _LIBCPP_DIAGNOSTIC_PUSH _Pragma("clang diagnostic push") -# define _LIBCPP_DIAGNOSTIC_POP _Pragma("clang diagnostic pop") -# define _LIBCPP_CLANG_DIAGNOSTIC_IGNORED(str) _Pragma(_LIBCPP_TOSTRING(clang diagnostic ignored str)) -# define _LIBCPP_GCC_DIAGNOSTIC_IGNORED(str) -# elif defined(_LIBCPP_COMPILER_GCC) -# define _LIBCPP_DIAGNOSTIC_PUSH _Pragma("GCC diagnostic push") -# define _LIBCPP_DIAGNOSTIC_POP _Pragma("GCC diagnostic pop") -# define _LIBCPP_CLANG_DIAGNOSTIC_IGNORED(str) -# define _LIBCPP_GCC_DIAGNOSTIC_IGNORED(str) _Pragma(_LIBCPP_TOSTRING(GCC diagnostic ignored str)) -# else -# define _LIBCPP_DIAGNOSTIC_PUSH -# define _LIBCPP_DIAGNOSTIC_POP -# define _LIBCPP_CLANG_DIAGNOSTIC_IGNORED(str) -# define _LIBCPP_GCC_DIAGNOSTIC_IGNORED(str) -# endif - // c8rtomb() and mbrtoc8() were added in C++20 and C23. Support for these // functions is gradually being added to existing C libraries. The conditions // below check for known C library versions and conditions under which these @@ -1447,7 +1124,7 @@ __sanitizer_verify_double_ended_contiguous_container(const void*, const void*, c # define _LIBCPP_USING_IF_EXISTS # endif -# if __has_cpp_attribute(nodiscard) +# if __has_cpp_attribute(__nodiscard__) # define _LIBCPP_NODISCARD [[__nodiscard__]] # else // We can't use GCC's [[gnu::warn_unused_result]] and @@ -1456,27 +1133,13 @@ __sanitizer_verify_double_ended_contiguous_container(const void*, const void*, c # define _LIBCPP_NODISCARD # endif -// _LIBCPP_NODISCARD_EXT may be used to apply [[nodiscard]] to entities not -// specified as such as an extension. -# if !defined(_LIBCPP_DISABLE_NODISCARD_EXT) -# define _LIBCPP_NODISCARD_EXT _LIBCPP_NODISCARD -# else -# define _LIBCPP_NODISCARD_EXT -# endif - -# if _LIBCPP_STD_VER >= 20 || !defined(_LIBCPP_DISABLE_NODISCARD_EXT) -# define _LIBCPP_NODISCARD_AFTER_CXX17 _LIBCPP_NODISCARD -# else -# define _LIBCPP_NODISCARD_AFTER_CXX17 -# endif - # if __has_attribute(__no_destroy__) # define _LIBCPP_NO_DESTROY __attribute__((__no_destroy__)) # else # define _LIBCPP_NO_DESTROY # endif -# if __has_attribute(__diagnose_if__) && !defined(_LIBCPP_DISABLE_ADDITIONAL_DIAGNOSTICS) +# if __has_attribute(__diagnose_if__) # define _LIBCPP_DIAGNOSE_WARNING(...) __attribute__((__diagnose_if__(__VA_ARGS__, "warning"))) # else # define _LIBCPP_DIAGNOSE_WARNING(...) diff --git a/lib/libcxx/include/__configuration/abi.h b/lib/libcxx/include/__configuration/abi.h new file mode 100644 index 000000000000..cfd878121380 --- /dev/null +++ b/lib/libcxx/include/__configuration/abi.h @@ -0,0 +1,172 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCPP___CONFIGURATION_ABI_H +#define _LIBCPP___CONFIGURATION_ABI_H + +/* zig patch: instead of including __config_site, zig adds -D flags when compiling */ +#include <__configuration/compiler.h> +#include <__configuration/platform.h> + +#ifndef _LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER +# pragma GCC system_header +#endif + +#if _LIBCPP_ABI_VERSION >= 2 +// Change short string representation so that string data starts at offset 0, +// improving its alignment in some cases. +# define _LIBCPP_ABI_ALTERNATE_STRING_LAYOUT +// Fix deque iterator type in order to support incomplete types. +# define _LIBCPP_ABI_INCOMPLETE_TYPES_IN_DEQUE +// Fix undefined behavior in how std::list stores its linked nodes. +# define _LIBCPP_ABI_LIST_REMOVE_NODE_POINTER_UB +// Fix undefined behavior in how __tree stores its end and parent nodes. +# define _LIBCPP_ABI_TREE_REMOVE_NODE_POINTER_UB +// Fix undefined behavior in how __hash_table stores its pointer types. +# define _LIBCPP_ABI_FIX_UNORDERED_NODE_POINTER_UB +# define _LIBCPP_ABI_FORWARD_LIST_REMOVE_NODE_POINTER_UB +# define _LIBCPP_ABI_FIX_UNORDERED_CONTAINER_SIZE_TYPE +// Override the default return value of exception::what() for bad_function_call::what() +// with a string that is specific to bad_function_call (see http://wg21.link/LWG2233). +// This is an ABI break on platforms that sign and authenticate vtable function pointers +// because it changes the mangling of the virtual function located in the vtable, which +// changes how it gets signed. +# define _LIBCPP_ABI_BAD_FUNCTION_CALL_GOOD_WHAT_MESSAGE +// Enable optimized version of __do_get_(un)signed which avoids redundant copies. +# define _LIBCPP_ABI_OPTIMIZED_LOCALE_NUM_GET +// Give reverse_iterator one data member of type T, not two. +// Also, in C++17 and later, don't derive iterator types from std::iterator. +# define _LIBCPP_ABI_NO_ITERATOR_BASES +// Use the smallest possible integer type to represent the index of the variant. +// Previously libc++ used "unsigned int" exclusively. +# define _LIBCPP_ABI_VARIANT_INDEX_TYPE_OPTIMIZATION +// Unstable attempt to provide a more optimized std::function +# define _LIBCPP_ABI_OPTIMIZED_FUNCTION +// All the regex constants must be distinct and nonzero. +# define _LIBCPP_ABI_REGEX_CONSTANTS_NONZERO +// Re-worked external template instantiations for std::string with a focus on +// performance and fast-path inlining. +# define _LIBCPP_ABI_STRING_OPTIMIZED_EXTERNAL_INSTANTIATION +// Enable clang::trivial_abi on std::unique_ptr. +# define _LIBCPP_ABI_ENABLE_UNIQUE_PTR_TRIVIAL_ABI +// Enable clang::trivial_abi on std::shared_ptr and std::weak_ptr +# define _LIBCPP_ABI_ENABLE_SHARED_PTR_TRIVIAL_ABI +// std::random_device holds some state when it uses an implementation that gets +// entropy from a file (see _LIBCPP_USING_DEV_RANDOM). When switching from this +// implementation to another one on a platform that has already shipped +// std::random_device, one needs to retain the same object layout to remain ABI +// compatible. This switch removes these workarounds for platforms that don't care +// about ABI compatibility. +# define _LIBCPP_ABI_NO_RANDOM_DEVICE_COMPATIBILITY_LAYOUT +// Don't export the legacy __basic_string_common class and its methods from the built library. +# define _LIBCPP_ABI_DO_NOT_EXPORT_BASIC_STRING_COMMON +// Don't export the legacy __vector_base_common class and its methods from the built library. +# define _LIBCPP_ABI_DO_NOT_EXPORT_VECTOR_BASE_COMMON +// According to the Standard, `bitset::operator[] const` returns bool +# define _LIBCPP_ABI_BITSET_VECTOR_BOOL_CONST_SUBSCRIPT_RETURN_BOOL +// Fix the implementation of CityHash used for std::hash. +// This is an ABI break because `std::hash` will return a different result, +// which means that hashing the same object in translation units built against +// different versions of libc++ can return inconsistent results. This is especially +// tricky since std::hash is used in the implementation of unordered containers. +// +// The incorrect implementation of CityHash has the problem that it drops some +// bits on the floor. +# define _LIBCPP_ABI_FIX_CITYHASH_IMPLEMENTATION +// Remove the base 10 implementation of std::to_chars from the dylib. +// The implementation moved to the header, but we still export the symbols from +// the dylib for backwards compatibility. +# define _LIBCPP_ABI_DO_NOT_EXPORT_TO_CHARS_BASE_10 +// Define std::array/std::string_view iterators to be __wrap_iters instead of raw +// pointers, which prevents people from relying on a non-portable implementation +// detail. This is especially useful because enabling bounded iterators hardening +// requires code not to make these assumptions. +# define _LIBCPP_ABI_USE_WRAP_ITER_IN_STD_ARRAY +# define _LIBCPP_ABI_USE_WRAP_ITER_IN_STD_STRING_VIEW +// Dont' add an inline namespace for `std::filesystem` +# define _LIBCPP_ABI_NO_FILESYSTEM_INLINE_NAMESPACE +// std::basic_ios uses WEOF to indicate that the fill value is +// uninitialized. However, on platforms where the size of char_type is +// equal to or greater than the size of int_type and char_type is unsigned, +// std::char_traits::eq_int_type() cannot distinguish between WEOF +// and WCHAR_MAX. This ABI setting determines whether we should instead track whether the fill +// value has been initialized using a separate boolean, which changes the ABI. +# define _LIBCPP_ABI_IOS_ALLOW_ARBITRARY_FILL_VALUE +// Make a std::pair of trivially copyable types trivially copyable. +// While this technically doesn't change the layout of pair itself, other types may decide to programatically change +// their representation based on whether something is trivially copyable. +# define _LIBCPP_ABI_TRIVIALLY_COPYABLE_PAIR +#elif _LIBCPP_ABI_VERSION == 1 +# if !(defined(_LIBCPP_OBJECT_FORMAT_COFF) || defined(_LIBCPP_OBJECT_FORMAT_XCOFF)) +// Enable compiling copies of now inline methods into the dylib to support +// applications compiled against older libraries. This is unnecessary with +// COFF dllexport semantics, since dllexport forces a non-inline definition +// of inline functions to be emitted anyway. Our own non-inline copy would +// conflict with the dllexport-emitted copy, so we disable it. For XCOFF, +// the linker will take issue with the symbols in the shared object if the +// weak inline methods get visibility (such as from -fvisibility-inlines-hidden), +// so disable it. +# define _LIBCPP_DEPRECATED_ABI_LEGACY_LIBRARY_DEFINITIONS_FOR_INLINE_FUNCTIONS +# endif +// Feature macros for disabling pre ABI v1 features. All of these options +// are deprecated. +# if defined(__FreeBSD__) && __FreeBSD__ < 14 +# define _LIBCPP_DEPRECATED_ABI_DISABLE_PAIR_TRIVIAL_COPY_CTOR +# endif +#endif + +// We had some bugs where we use [[no_unique_address]] together with construct_at, +// which causes UB as the call on construct_at could write to overlapping subobjects +// +// https://github.com/llvm/llvm-project/issues/70506 +// https://github.com/llvm/llvm-project/issues/70494 +// +// To fix the bug we had to change the ABI of some classes to remove [[no_unique_address]] under certain conditions. +// The macro below is used for all classes whose ABI have changed as part of fixing these bugs. +#define _LIBCPP_ABI_LLVM18_NO_UNIQUE_ADDRESS __attribute__((__abi_tag__("llvm18_nua"))) + +// Changes the iterator type of select containers (see below) to a bounded iterator that keeps track of whether it's +// within the bounds of the original container and asserts it on every dereference. +// +// ABI impact: changes the iterator type of the relevant containers. +// +// Supported containers: +// - `span`; +// - `string_view`. +// #define _LIBCPP_ABI_BOUNDED_ITERATORS + +// Changes the iterator type of `basic_string` to a bounded iterator that keeps track of whether it's within the bounds +// of the original container and asserts it on every dereference and when performing iterator arithmetics. +// +// ABI impact: changes the iterator type of `basic_string` and its specializations, such as `string` and `wstring`. +// #define _LIBCPP_ABI_BOUNDED_ITERATORS_IN_STRING + +// Changes the iterator type of `vector` to a bounded iterator that keeps track of whether it's within the bounds of the +// original container and asserts it on every dereference and when performing iterator arithmetics. Note: this doesn't +// yet affect `vector`. +// +// ABI impact: changes the iterator type of `vector` (except `vector`). +// #define _LIBCPP_ABI_BOUNDED_ITERATORS_IN_VECTOR + +#if defined(_LIBCPP_COMPILER_CLANG_BASED) +# if defined(__APPLE__) +# if defined(__i386__) || defined(__x86_64__) +// use old string layout on x86_64 and i386 +# elif defined(__arm__) +// use old string layout on arm (which does not include aarch64/arm64), except on watch ABIs +# if defined(__ARM_ARCH_7K__) && __ARM_ARCH_7K__ >= 2 +# define _LIBCPP_ABI_ALTERNATE_STRING_LAYOUT +# endif +# else +# define _LIBCPP_ABI_ALTERNATE_STRING_LAYOUT +# endif +# endif +#endif + +#endif // _LIBCPP___CONFIGURATION_ABI_H diff --git a/lib/libcxx/include/__configuration/availability.h b/lib/libcxx/include/__configuration/availability.h new file mode 100644 index 000000000000..ab483a07c9c1 --- /dev/null +++ b/lib/libcxx/include/__configuration/availability.h @@ -0,0 +1,400 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCPP___CONFIGURATION_AVAILABILITY_H +#define _LIBCPP___CONFIGURATION_AVAILABILITY_H + +#include <__configuration/compiler.h> +#include <__configuration/language.h> + +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +#endif + +// Libc++ is shipped by various vendors. In particular, it is used as a system +// library on macOS, iOS and other Apple platforms. In order for users to be +// able to compile a binary that is intended to be deployed to an older version +// of a platform, Clang provides availability attributes [1]. These attributes +// can be placed on declarations and are used to describe the life cycle of a +// symbol in the library. +// +// The main goal is to ensure a compile-time error if a symbol that hasn't been +// introduced in a previously released library is used in a program that targets +// that previously released library. Normally, this would be a load-time error +// when one tries to launch the program against the older library. +// +// For example, the filesystem library was introduced in the dylib in LLVM 9. +// On Apple platforms, this corresponds to macOS 10.15. If a user compiles on +// a macOS 10.15 host but targets macOS 10.13 with their program, the compiler +// would normally not complain (because the required declarations are in the +// headers), but the dynamic loader would fail to find the symbols when actually +// trying to launch the program on macOS 10.13. To turn this into a compile-time +// issue instead, declarations are annotated with when they were introduced, and +// the compiler can produce a diagnostic if the program references something that +// isn't available on the deployment target. +// +// This mechanism is general in nature, and any vendor can add their markup to +// the library (see below). Whenever a new feature is added that requires support +// in the shared library, two macros are added below to allow marking the feature +// as unavailable: +// 1. A macro named `_LIBCPP_AVAILABILITY_HAS_` which must be defined +// to `_LIBCPP_INTRODUCED_IN_` for the appropriate LLVM version. +// 2. A macro named `_LIBCPP_AVAILABILITY_`, which must be defined to +// `_LIBCPP_INTRODUCED_IN__MARKUP` for the appropriate LLVM version. +// +// When vendors decide to ship the feature as part of their shared library, they +// can update the `_LIBCPP_INTRODUCED_IN_` macro (and the markup counterpart) +// based on the platform version they shipped that version of LLVM in. The library +// will then use this markup to provide an optimal user experience on these platforms. +// +// Furthermore, many features in the standard library have corresponding +// feature-test macros. The `_LIBCPP_AVAILABILITY_HAS_` macros +// are checked by the corresponding feature-test macros generated by +// generate_feature_test_macro_components.py to ensure that the library +// doesn't announce a feature as being implemented if it is unavailable on +// the deployment target. +// +// Note that this mechanism is disabled by default in the "upstream" libc++. +// Availability annotations are only meaningful when shipping libc++ inside +// a platform (i.e. as a system library), and so vendors that want them should +// turn those annotations on at CMake configuration time. +// +// [1]: https://clang.llvm.org/docs/AttributeReference.html#availability + +// For backwards compatibility, allow users to define _LIBCPP_DISABLE_AVAILABILITY +// for a while. +#if defined(_LIBCPP_DISABLE_AVAILABILITY) +# if !defined(_LIBCPP_HAS_NO_VENDOR_AVAILABILITY_ANNOTATIONS) +# define _LIBCPP_HAS_NO_VENDOR_AVAILABILITY_ANNOTATIONS +# endif +#endif + +// Availability markup is disabled when building the library, or when a non-Clang +// compiler is used because only Clang supports the necessary attributes. +#if defined(_LIBCPP_BUILDING_LIBRARY) || defined(_LIBCXXABI_BUILDING_LIBRARY) || !defined(_LIBCPP_COMPILER_CLANG_BASED) +# if !defined(_LIBCPP_HAS_NO_VENDOR_AVAILABILITY_ANNOTATIONS) +# define _LIBCPP_HAS_NO_VENDOR_AVAILABILITY_ANNOTATIONS +# endif +#endif + +// When availability annotations are disabled, we take for granted that features introduced +// in all versions of the library are available. +#if defined(_LIBCPP_HAS_NO_VENDOR_AVAILABILITY_ANNOTATIONS) + +# define _LIBCPP_INTRODUCED_IN_LLVM_19 1 +# define _LIBCPP_INTRODUCED_IN_LLVM_19_ATTRIBUTE /* nothing */ + +# define _LIBCPP_INTRODUCED_IN_LLVM_18 1 +# define _LIBCPP_INTRODUCED_IN_LLVM_18_ATTRIBUTE /* nothing */ + +# define _LIBCPP_INTRODUCED_IN_LLVM_17 1 +# define _LIBCPP_INTRODUCED_IN_LLVM_17_ATTRIBUTE /* nothing */ + +# define _LIBCPP_INTRODUCED_IN_LLVM_16 1 +# define _LIBCPP_INTRODUCED_IN_LLVM_16_ATTRIBUTE /* nothing */ + +# define _LIBCPP_INTRODUCED_IN_LLVM_15 1 +# define _LIBCPP_INTRODUCED_IN_LLVM_15_ATTRIBUTE /* nothing */ + +# define _LIBCPP_INTRODUCED_IN_LLVM_14 1 +# define _LIBCPP_INTRODUCED_IN_LLVM_14_ATTRIBUTE /* nothing */ + +# define _LIBCPP_INTRODUCED_IN_LLVM_13 1 +# define _LIBCPP_INTRODUCED_IN_LLVM_13_ATTRIBUTE /* nothing */ + +# define _LIBCPP_INTRODUCED_IN_LLVM_12 1 +# define _LIBCPP_INTRODUCED_IN_LLVM_12_ATTRIBUTE /* nothing */ + +# define _LIBCPP_INTRODUCED_IN_LLVM_11 1 +# define _LIBCPP_INTRODUCED_IN_LLVM_11_ATTRIBUTE /* nothing */ + +# define _LIBCPP_INTRODUCED_IN_LLVM_10 1 +# define _LIBCPP_INTRODUCED_IN_LLVM_10_ATTRIBUTE /* nothing */ + +# define _LIBCPP_INTRODUCED_IN_LLVM_9 1 +# define _LIBCPP_INTRODUCED_IN_LLVM_9_ATTRIBUTE /* nothing */ +# define _LIBCPP_INTRODUCED_IN_LLVM_9_ATTRIBUTE_PUSH /* nothing */ +# define _LIBCPP_INTRODUCED_IN_LLVM_9_ATTRIBUTE_POP /* nothing */ + +# define _LIBCPP_INTRODUCED_IN_LLVM_8 1 +# define _LIBCPP_INTRODUCED_IN_LLVM_8_ATTRIBUTE /* nothing */ + +# define _LIBCPP_INTRODUCED_IN_LLVM_4 1 +# define _LIBCPP_INTRODUCED_IN_LLVM_4_ATTRIBUTE /* nothing */ + +#elif defined(__APPLE__) + +// clang-format off + +// LLVM 19 +// TODO: Fill this in +# define _LIBCPP_INTRODUCED_IN_LLVM_19 0 +# define _LIBCPP_INTRODUCED_IN_LLVM_19_ATTRIBUTE __attribute__((unavailable)) + +// LLVM 18 +// TODO: Fill this in +# define _LIBCPP_INTRODUCED_IN_LLVM_18 0 +# define _LIBCPP_INTRODUCED_IN_LLVM_18_ATTRIBUTE __attribute__((unavailable)) + +// LLVM 17 +# if (defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 140400) || \ + (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 170400) || \ + (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 170400) || \ + (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 100400) +# define _LIBCPP_INTRODUCED_IN_LLVM_17 0 +# else +# define _LIBCPP_INTRODUCED_IN_LLVM_17 1 +# endif +# define _LIBCPP_INTRODUCED_IN_LLVM_17_ATTRIBUTE \ + __attribute__((availability(macos, strict, introduced = 14.4))) \ + __attribute__((availability(ios, strict, introduced = 17.4))) \ + __attribute__((availability(tvos, strict, introduced = 17.4))) \ + __attribute__((availability(watchos, strict, introduced = 10.4))) + +// LLVM 16 +# if (defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 140000) || \ + (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 170000) || \ + (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 170000) || \ + (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 100000) +# define _LIBCPP_INTRODUCED_IN_LLVM_16 0 +# else +# define _LIBCPP_INTRODUCED_IN_LLVM_16 1 +# endif +# define _LIBCPP_INTRODUCED_IN_LLVM_16_ATTRIBUTE \ + __attribute__((availability(macos, strict, introduced = 14.0))) \ + __attribute__((availability(ios, strict, introduced = 17.0))) \ + __attribute__((availability(tvos, strict, introduced = 17.0))) \ + __attribute__((availability(watchos, strict, introduced = 10.0))) + +// LLVM 15 +# if (defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 130400) || \ + (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 160500) || \ + (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 160500) || \ + (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 90500) +# define _LIBCPP_INTRODUCED_IN_LLVM_15 0 +# else +# define _LIBCPP_INTRODUCED_IN_LLVM_15 1 +# endif +# define _LIBCPP_INTRODUCED_IN_LLVM_15_ATTRIBUTE \ + __attribute__((availability(macos, strict, introduced = 13.4))) \ + __attribute__((availability(ios, strict, introduced = 16.5))) \ + __attribute__((availability(tvos, strict, introduced = 16.5))) \ + __attribute__((availability(watchos, strict, introduced = 9.5))) + +// LLVM 14 +# define _LIBCPP_INTRODUCED_IN_LLVM_14 _LIBCPP_INTRODUCED_IN_LLVM_15 +# define _LIBCPP_INTRODUCED_IN_LLVM_14_ATTRIBUTE _LIBCPP_INTRODUCED_IN_LLVM_15_ATTRIBUTE + +// LLVM 13 +# if (defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 130000) || \ + (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 160000) || \ + (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 160000) || \ + (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 90000) +# define _LIBCPP_INTRODUCED_IN_LLVM_13 0 +# else +# define _LIBCPP_INTRODUCED_IN_LLVM_13 1 +# endif +# define _LIBCPP_INTRODUCED_IN_LLVM_13_ATTRIBUTE \ + __attribute__((availability(macos, strict, introduced = 13.0))) \ + __attribute__((availability(ios, strict, introduced = 16.0))) \ + __attribute__((availability(tvos, strict, introduced = 16.0))) \ + __attribute__((availability(watchos, strict, introduced = 9.0))) + +// LLVM 12 +# if (defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 120300) || \ + (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 150300) || \ + (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 150300) || \ + (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 80300) +# define _LIBCPP_INTRODUCED_IN_LLVM_12 0 +# else +# define _LIBCPP_INTRODUCED_IN_LLVM_12 1 +# endif +# define _LIBCPP_INTRODUCED_IN_LLVM_12_ATTRIBUTE \ + __attribute__((availability(macos, strict, introduced = 12.3))) \ + __attribute__((availability(ios, strict, introduced = 15.3))) \ + __attribute__((availability(tvos, strict, introduced = 15.3))) \ + __attribute__((availability(watchos, strict, introduced = 8.3))) + +// LLVM 11 +# if (defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 110000) || \ + (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 140000) || \ + (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 140000) || \ + (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 70000) +# define _LIBCPP_INTRODUCED_IN_LLVM_11 0 +# else +# define _LIBCPP_INTRODUCED_IN_LLVM_11 1 +# endif +# define _LIBCPP_INTRODUCED_IN_LLVM_11_ATTRIBUTE \ + __attribute__((availability(macos, strict, introduced = 11.0))) \ + __attribute__((availability(ios, strict, introduced = 14.0))) \ + __attribute__((availability(tvos, strict, introduced = 14.0))) \ + __attribute__((availability(watchos, strict, introduced = 7.0))) + +// LLVM 10 +# define _LIBCPP_INTRODUCED_IN_LLVM_10 _LIBCPP_INTRODUCED_IN_LLVM_11 +# define _LIBCPP_INTRODUCED_IN_LLVM_10_ATTRIBUTE _LIBCPP_INTRODUCED_IN_LLVM_11_ATTRIBUTE + +// LLVM 9 +# if (defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101500) || \ + (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 130000) || \ + (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 130000) || \ + (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 60000) +# define _LIBCPP_INTRODUCED_IN_LLVM_9 0 +# else +# define _LIBCPP_INTRODUCED_IN_LLVM_9 1 +# endif +# define _LIBCPP_INTRODUCED_IN_LLVM_9_ATTRIBUTE \ + __attribute__((availability(macos, strict, introduced = 10.15))) \ + __attribute__((availability(ios, strict, introduced = 13.0))) \ + __attribute__((availability(tvos, strict, introduced = 13.0))) \ + __attribute__((availability(watchos, strict, introduced = 6.0))) +# define _LIBCPP_INTRODUCED_IN_LLVM_9_ATTRIBUTE_PUSH \ + _Pragma("clang attribute push(__attribute__((availability(macos,strict,introduced=10.15))), apply_to=any(function,record))") \ + _Pragma("clang attribute push(__attribute__((availability(ios,strict,introduced=13.0))), apply_to=any(function,record))") \ + _Pragma("clang attribute push(__attribute__((availability(tvos,strict,introduced=13.0))), apply_to=any(function,record))") \ + _Pragma("clang attribute push(__attribute__((availability(watchos,strict,introduced=6.0))), apply_to=any(function,record))") +# define _LIBCPP_INTRODUCED_IN_LLVM_9_ATTRIBUTE_POP \ + _Pragma("clang attribute pop") \ + _Pragma("clang attribute pop") \ + _Pragma("clang attribute pop") \ + _Pragma("clang attribute pop") + +// LLVM 4 +# if defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 50000 +# define _LIBCPP_INTRODUCED_IN_LLVM_4 0 +# else +# define _LIBCPP_INTRODUCED_IN_LLVM_4 1 +# endif +# define _LIBCPP_INTRODUCED_IN_LLVM_4_ATTRIBUTE __attribute__((availability(watchos, strict, introduced = 5.0))) + +// clang-format on + +#else + +// ...New vendors can add availability markup here... + +# error \ + "It looks like you're trying to enable vendor availability markup, but you haven't defined the corresponding macros yet!" + +#endif + +// These macros control the availability of std::bad_optional_access and +// other exception types. These were put in the shared library to prevent +// code bloat from every user program defining the vtable for these exception +// types. +// +// Note that when exceptions are disabled, the methods that normally throw +// these exceptions can be used even on older deployment targets, but those +// methods will abort instead of throwing. +#define _LIBCPP_AVAILABILITY_HAS_BAD_OPTIONAL_ACCESS _LIBCPP_INTRODUCED_IN_LLVM_4 +#define _LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS _LIBCPP_INTRODUCED_IN_LLVM_4_ATTRIBUTE + +#define _LIBCPP_AVAILABILITY_HAS_BAD_VARIANT_ACCESS _LIBCPP_INTRODUCED_IN_LLVM_4 +#define _LIBCPP_AVAILABILITY_BAD_VARIANT_ACCESS _LIBCPP_INTRODUCED_IN_LLVM_4_ATTRIBUTE + +#define _LIBCPP_AVAILABILITY_HAS_BAD_ANY_CAST _LIBCPP_INTRODUCED_IN_LLVM_4 +#define _LIBCPP_AVAILABILITY_BAD_ANY_CAST _LIBCPP_INTRODUCED_IN_LLVM_4_ATTRIBUTE + +// These macros control the availability of all parts of that +// depend on something in the dylib. +#define _LIBCPP_AVAILABILITY_HAS_FILESYSTEM_LIBRARY _LIBCPP_INTRODUCED_IN_LLVM_9 +#define _LIBCPP_AVAILABILITY_FILESYSTEM_LIBRARY _LIBCPP_INTRODUCED_IN_LLVM_9_ATTRIBUTE +#define _LIBCPP_AVAILABILITY_FILESYSTEM_LIBRARY_PUSH _LIBCPP_INTRODUCED_IN_LLVM_9_ATTRIBUTE_PUSH +#define _LIBCPP_AVAILABILITY_FILESYSTEM_LIBRARY_POP _LIBCPP_INTRODUCED_IN_LLVM_9_ATTRIBUTE_POP + +// This controls the availability of the C++20 synchronization library, +// which requires shared library support for various operations +// (see libcxx/src/atomic.cpp). This includes , , +// , and notification functions on std::atomic. +#define _LIBCPP_AVAILABILITY_HAS_SYNC _LIBCPP_INTRODUCED_IN_LLVM_11 +#define _LIBCPP_AVAILABILITY_SYNC _LIBCPP_INTRODUCED_IN_LLVM_11_ATTRIBUTE + +// Enable additional explicit instantiations of iostreams components. This +// reduces the number of weak definitions generated in programs that use +// iostreams by providing a single strong definition in the shared library. +// +// TODO: Enable additional explicit instantiations on GCC once it supports exclude_from_explicit_instantiation, +// or once libc++ doesn't use the attribute anymore. +// TODO: Enable them on Windows once https://llvm.org/PR41018 has been fixed. +#if !defined(_LIBCPP_COMPILER_GCC) && !defined(_WIN32) +# define _LIBCPP_AVAILABILITY_HAS_ADDITIONAL_IOSTREAM_EXPLICIT_INSTANTIATIONS_1 _LIBCPP_INTRODUCED_IN_LLVM_12 +#else +# define _LIBCPP_AVAILABILITY_HAS_ADDITIONAL_IOSTREAM_EXPLICIT_INSTANTIATIONS_1 0 +#endif + +// This controls the availability of floating-point std::to_chars functions. +// These overloads were added later than the integer overloads. +#define _LIBCPP_AVAILABILITY_HAS_TO_CHARS_FLOATING_POINT _LIBCPP_INTRODUCED_IN_LLVM_14 +#define _LIBCPP_AVAILABILITY_TO_CHARS_FLOATING_POINT _LIBCPP_INTRODUCED_IN_LLVM_14_ATTRIBUTE + +// This controls whether the library claims to provide a default verbose +// termination function, and consequently whether the headers will try +// to use it when the mechanism isn't overriden at compile-time. +#define _LIBCPP_AVAILABILITY_HAS_VERBOSE_ABORT _LIBCPP_INTRODUCED_IN_LLVM_15 +#define _LIBCPP_AVAILABILITY_VERBOSE_ABORT _LIBCPP_INTRODUCED_IN_LLVM_15_ATTRIBUTE + +// This controls the availability of the C++17 std::pmr library, +// which is implemented in large part in the built library. +// +// TODO: Enable std::pmr markup once https://github.com/llvm/llvm-project/issues/40340 has been fixed +// Until then, it is possible for folks to try to use `std::pmr` when back-deploying to targets that don't support +// it and it'll be a load-time error, but we don't have a good alternative because the library won't compile if we +// use availability annotations until that bug has been fixed. +#define _LIBCPP_AVAILABILITY_HAS_PMR _LIBCPP_INTRODUCED_IN_LLVM_16 +#define _LIBCPP_AVAILABILITY_PMR + +// These macros controls the availability of __cxa_init_primary_exception +// in the built library, which std::make_exception_ptr might use +// (see libcxx/include/__exception/exception_ptr.h). +#define _LIBCPP_AVAILABILITY_HAS_INIT_PRIMARY_EXCEPTION _LIBCPP_INTRODUCED_IN_LLVM_18 +#define _LIBCPP_AVAILABILITY_INIT_PRIMARY_EXCEPTION _LIBCPP_INTRODUCED_IN_LLVM_18_ATTRIBUTE + +// This controls the availability of C++23 , which +// has a dependency on the built library (it needs access to +// the underlying buffer types of std::cout, std::cerr, and std::clog. +#define _LIBCPP_AVAILABILITY_HAS_PRINT _LIBCPP_INTRODUCED_IN_LLVM_18 +#define _LIBCPP_AVAILABILITY_PRINT _LIBCPP_INTRODUCED_IN_LLVM_18_ATTRIBUTE + +// This controls the availability of the C++20 time zone database. +// The parser code is built in the library. +#define _LIBCPP_AVAILABILITY_HAS_TZDB _LIBCPP_INTRODUCED_IN_LLVM_19 +#define _LIBCPP_AVAILABILITY_TZDB _LIBCPP_INTRODUCED_IN_LLVM_19_ATTRIBUTE + +// These macros determine whether we assume that std::bad_function_call and +// std::bad_expected_access provide a key function in the dylib. This allows +// centralizing their vtable and typeinfo instead of having all TUs provide +// a weak definition that then gets deduplicated. +#define _LIBCPP_AVAILABILITY_HAS_BAD_FUNCTION_CALL_KEY_FUNCTION _LIBCPP_INTRODUCED_IN_LLVM_19 +#define _LIBCPP_AVAILABILITY_BAD_FUNCTION_CALL_KEY_FUNCTION _LIBCPP_INTRODUCED_IN_LLVM_19_ATTRIBUTE +#define _LIBCPP_AVAILABILITY_HAS_BAD_EXPECTED_ACCESS_KEY_FUNCTION _LIBCPP_INTRODUCED_IN_LLVM_19 +#define _LIBCPP_AVAILABILITY_BAD_EXPECTED_ACCESS_KEY_FUNCTION _LIBCPP_INTRODUCED_IN_LLVM_19_ATTRIBUTE + +// Define availability attributes that depend on _LIBCPP_HAS_NO_EXCEPTIONS. +// Those are defined in terms of the availability attributes above, and +// should not be vendor-specific. +#if defined(_LIBCPP_HAS_NO_EXCEPTIONS) +# define _LIBCPP_AVAILABILITY_THROW_BAD_ANY_CAST +# define _LIBCPP_AVAILABILITY_THROW_BAD_OPTIONAL_ACCESS +# define _LIBCPP_AVAILABILITY_THROW_BAD_VARIANT_ACCESS +#else +# define _LIBCPP_AVAILABILITY_THROW_BAD_ANY_CAST _LIBCPP_AVAILABILITY_BAD_ANY_CAST +# define _LIBCPP_AVAILABILITY_THROW_BAD_OPTIONAL_ACCESS _LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS +# define _LIBCPP_AVAILABILITY_THROW_BAD_VARIANT_ACCESS _LIBCPP_AVAILABILITY_BAD_VARIANT_ACCESS +#endif + +// Define availability attributes that depend on both +// _LIBCPP_HAS_NO_EXCEPTIONS and _LIBCPP_HAS_NO_RTTI. +#if defined(_LIBCPP_HAS_NO_EXCEPTIONS) || defined(_LIBCPP_HAS_NO_RTTI) +# undef _LIBCPP_AVAILABILITY_HAS_INIT_PRIMARY_EXCEPTION +# undef _LIBCPP_AVAILABILITY_INIT_PRIMARY_EXCEPTION +# define _LIBCPP_AVAILABILITY_HAS_INIT_PRIMARY_EXCEPTION 0 +# define _LIBCPP_AVAILABILITY_INIT_PRIMARY_EXCEPTION +#endif + +#endif // _LIBCPP___CONFIGURATION_AVAILABILITY_H diff --git a/lib/libcxx/include/__configuration/compiler.h b/lib/libcxx/include/__configuration/compiler.h new file mode 100644 index 000000000000..d109aa748f6a --- /dev/null +++ b/lib/libcxx/include/__configuration/compiler.h @@ -0,0 +1,51 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCPP___CONFIGURATION_COMPILER_H +#define _LIBCPP___CONFIGURATION_COMPILER_H + +/* zig patch: instead of including __config_site, zig adds -D flags when compiling */ + +#ifndef _LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER +# pragma GCC system_header +#endif + +#if defined(__apple_build_version__) +// Given AppleClang XX.Y.Z, _LIBCPP_APPLE_CLANG_VER is XXYZ (e.g. AppleClang 14.0.3 => 1403) +# define _LIBCPP_COMPILER_CLANG_BASED +# define _LIBCPP_APPLE_CLANG_VER (__apple_build_version__ / 10000) +#elif defined(__clang__) +# define _LIBCPP_COMPILER_CLANG_BASED +# define _LIBCPP_CLANG_VER (__clang_major__ * 100 + __clang_minor__) +#elif defined(__GNUC__) +# define _LIBCPP_COMPILER_GCC +# define _LIBCPP_GCC_VER (__GNUC__ * 100 + __GNUC_MINOR__) +#endif + +#ifdef __cplusplus + +// Warn if a compiler version is used that is not supported anymore +// LLVM RELEASE Update the minimum compiler versions +# if defined(_LIBCPP_CLANG_VER) +# if _LIBCPP_CLANG_VER < 1700 +# warning "Libc++ only supports Clang 17 and later" +# endif +# elif defined(_LIBCPP_APPLE_CLANG_VER) +# if _LIBCPP_APPLE_CLANG_VER < 1500 +# warning "Libc++ only supports AppleClang 15 and later" +# endif +# elif defined(_LIBCPP_GCC_VER) +# if _LIBCPP_GCC_VER < 1400 +# warning "Libc++ only supports GCC 14 and later" +# endif +# endif + +#endif + +#endif // _LIBCPP___CONFIGURATION_COMPILER_H diff --git a/lib/libcxx/include/__configuration/language.h b/lib/libcxx/include/__configuration/language.h new file mode 100644 index 000000000000..cca6c71486b7 --- /dev/null +++ b/lib/libcxx/include/__configuration/language.h @@ -0,0 +1,46 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCPP___CONFIGURATION_LANGUAGE_H +#define _LIBCPP___CONFIGURATION_LANGUAGE_H + +/* zig patch: instead of including __config_site, zig adds -D flags when compiling */ + +#ifndef _LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER +# pragma GCC system_header +#endif + +// NOLINTBEGIN(libcpp-cpp-version-check) +#ifdef __cplusplus +# if __cplusplus <= 201103L +# define _LIBCPP_STD_VER 11 +# elif __cplusplus <= 201402L +# define _LIBCPP_STD_VER 14 +# elif __cplusplus <= 201703L +# define _LIBCPP_STD_VER 17 +# elif __cplusplus <= 202002L +# define _LIBCPP_STD_VER 20 +# elif __cplusplus <= 202302L +# define _LIBCPP_STD_VER 23 +# else +// Expected release year of the next C++ standard +# define _LIBCPP_STD_VER 26 +# endif +#endif // __cplusplus +// NOLINTEND(libcpp-cpp-version-check) + +#if !defined(__cpp_rtti) || __cpp_rtti < 199711L +# define _LIBCPP_HAS_NO_RTTI +#endif + +#if !defined(__cpp_exceptions) || __cpp_exceptions < 199711L +# define _LIBCPP_HAS_NO_EXCEPTIONS +#endif + +#endif // _LIBCPP___CONFIGURATION_LANGUAGE_H diff --git a/lib/libcxx/include/__configuration/platform.h b/lib/libcxx/include/__configuration/platform.h new file mode 100644 index 000000000000..540b30c55859 --- /dev/null +++ b/lib/libcxx/include/__configuration/platform.h @@ -0,0 +1,54 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCPP___CONFIGURATION_PLATFORM_H +#define _LIBCPP___CONFIGURATION_PLATFORM_H + +/* zig patch: instead of including __config_site, zig adds -D flags when compiling */ + +#ifndef _LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER +# pragma GCC system_header +#endif + +#if defined(__ELF__) +# define _LIBCPP_OBJECT_FORMAT_ELF 1 +#elif defined(__MACH__) +# define _LIBCPP_OBJECT_FORMAT_MACHO 1 +#elif defined(_WIN32) +# define _LIBCPP_OBJECT_FORMAT_COFF 1 +#elif defined(__wasm__) +# define _LIBCPP_OBJECT_FORMAT_WASM 1 +#elif defined(_AIX) +# define _LIBCPP_OBJECT_FORMAT_XCOFF 1 +#else +// ... add new file formats here ... +#endif + +// Need to detect which libc we're using if we're on Linux. +#if defined(__linux__) +# include +# if defined(__GLIBC_PREREQ) +# define _LIBCPP_GLIBC_PREREQ(a, b) __GLIBC_PREREQ(a, b) +# else +# define _LIBCPP_GLIBC_PREREQ(a, b) 0 +# endif // defined(__GLIBC_PREREQ) +#endif // defined(__linux__) + +#ifndef __BYTE_ORDER__ +# error \ + "Your compiler doesn't seem to define __BYTE_ORDER__, which is required by libc++ to know the endianness of your target platform" +#endif + +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +# define _LIBCPP_LITTLE_ENDIAN +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ +# define _LIBCPP_BIG_ENDIAN +#endif // __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + +#endif // _LIBCPP___CONFIGURATION_PLATFORM_H diff --git a/lib/libcxx/include/__debug_utils/sanitizers.h b/lib/libcxx/include/__debug_utils/sanitizers.h new file mode 100644 index 000000000000..d8547e324933 --- /dev/null +++ b/lib/libcxx/include/__debug_utils/sanitizers.h @@ -0,0 +1,104 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCPP___LIBCXX_DEBUG_UTILS_SANITIZERS_H +#define _LIBCPP___LIBCXX_DEBUG_UTILS_SANITIZERS_H + +#include <__config> +#include <__type_traits/integral_constant.h> +#include <__type_traits/is_constant_evaluated.h> + +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +#endif + +#ifndef _LIBCPP_HAS_NO_ASAN + +extern "C" { +_LIBCPP_EXPORTED_FROM_ABI void +__sanitizer_annotate_contiguous_container(const void*, const void*, const void*, const void*); +_LIBCPP_EXPORTED_FROM_ABI void __sanitizer_annotate_double_ended_contiguous_container( + const void*, const void*, const void*, const void*, const void*, const void*); +_LIBCPP_EXPORTED_FROM_ABI int +__sanitizer_verify_double_ended_contiguous_container(const void*, const void*, const void*, const void*); +} + +#endif // _LIBCPP_HAS_NO_ASAN + +_LIBCPP_BEGIN_NAMESPACE_STD + +// ASan choices +#ifndef _LIBCPP_HAS_NO_ASAN +# define _LIBCPP_HAS_ASAN_CONTAINER_ANNOTATIONS_FOR_ALL_ALLOCATORS 1 +#endif + +#ifdef _LIBCPP_HAS_ASAN_CONTAINER_ANNOTATIONS_FOR_ALL_ALLOCATORS +// __asan_annotate_container_with_allocator determines whether containers with custom allocators are annotated. This is +// a public customization point to disable annotations if the custom allocator assumes that the memory isn't poisoned. +// See the https://libcxx.llvm.org/UsingLibcxx.html#turning-off-asan-annotation-in-containers for more information. +template +struct __asan_annotate_container_with_allocator : true_type {}; +#endif + +// Annotate a double-ended contiguous range. +// - [__first_storage, __last_storage) is the allocated memory region, +// - [__first_old_contained, __last_old_contained) is the previously allowed (unpoisoned) range, and +// - [__first_new_contained, __last_new_contained) is the new allowed (unpoisoned) range. +template +_LIBCPP_HIDE_FROM_ABI void __annotate_double_ended_contiguous_container( + const void* __first_storage, + const void* __last_storage, + const void* __first_old_contained, + const void* __last_old_contained, + const void* __first_new_contained, + const void* __last_new_contained) { +#ifdef _LIBCPP_HAS_NO_ASAN + (void)__first_storage; + (void)__last_storage; + (void)__first_old_contained; + (void)__last_old_contained; + (void)__first_new_contained; + (void)__last_new_contained; +#else + if (__asan_annotate_container_with_allocator<_Allocator>::value && __first_storage != nullptr) + __sanitizer_annotate_double_ended_contiguous_container( + __first_storage, + __last_storage, + __first_old_contained, + __last_old_contained, + __first_new_contained, + __last_new_contained); +#endif +} + +// Annotate a contiguous range. +// [__first_storage, __last_storage) is the allocated memory region, +// __old_last_contained is the previously last allowed (unpoisoned) element, and +// __new_last_contained is the new last allowed (unpoisoned) element. +template +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 void __annotate_contiguous_container( + const void* __first_storage, + const void* __last_storage, + const void* __old_last_contained, + const void* __new_last_contained) { +#ifdef _LIBCPP_HAS_NO_ASAN + (void)__first_storage; + (void)__last_storage; + (void)__old_last_contained; + (void)__new_last_contained; +#else + if (!__libcpp_is_constant_evaluated() && __asan_annotate_container_with_allocator<_Allocator>::value && + __first_storage != nullptr) + __sanitizer_annotate_contiguous_container( + __first_storage, __last_storage, __old_last_contained, __new_last_contained); +#endif +} + +_LIBCPP_END_NAMESPACE_STD + +#endif // _LIBCPP___LIBCXX_DEBUG_UTILS_SANITIZERS_H diff --git a/lib/libcxx/include/__exception/exception_ptr.h b/lib/libcxx/include/__exception/exception_ptr.h index 53e2f718bc1b..beadd9212abd 100644 --- a/lib/libcxx/include/__exception/exception_ptr.h +++ b/lib/libcxx/include/__exception/exception_ptr.h @@ -9,7 +9,6 @@ #ifndef _LIBCPP___EXCEPTION_EXCEPTION_PTR_H #define _LIBCPP___EXCEPTION_EXCEPTION_PTR_H -#include <__availability> #include <__config> #include <__exception/operations.h> #include <__memory/addressof.h> @@ -26,6 +25,8 @@ #ifndef _LIBCPP_ABI_MICROSOFT +# if _LIBCPP_AVAILABILITY_HAS_INIT_PRIMARY_EXCEPTION + namespace __cxxabiv1 { extern "C" { @@ -36,15 +37,20 @@ struct __cxa_exception; _LIBCPP_OVERRIDABLE_FUNC_VIS __cxa_exception* __cxa_init_primary_exception( void*, std::type_info*, - void( -# if defined(_WIN32) - __thiscall -# endif - *)(void*)) throw(); +# if defined(_WIN32) + void(__thiscall*)(void*)) throw(); +# elif defined(__wasm__) + // In Wasm, a destructor returns its argument + void* (*)(void*)) throw(); +# else + void (*)(void*)) throw(); +# endif } } // namespace __cxxabiv1 +# endif + #endif namespace std { // purposefully not using versioning namespace @@ -60,6 +66,9 @@ class _LIBCPP_EXPORTED_FROM_ABI exception_ptr { friend _LIBCPP_HIDE_FROM_ABI exception_ptr make_exception_ptr(_Ep) _NOEXCEPT; public: + // exception_ptr is basically a COW string. + using __trivially_relocatable = exception_ptr; + _LIBCPP_HIDE_FROM_ABI exception_ptr() _NOEXCEPT : __ptr_() {} _LIBCPP_HIDE_FROM_ABI exception_ptr(nullptr_t) _NOEXCEPT : __ptr_() {} @@ -88,9 +97,18 @@ _LIBCPP_HIDE_FROM_ABI exception_ptr make_exception_ptr(_Ep __e) _NOEXCEPT { using _Ep2 = __decay_t<_Ep>; void* __ex = __cxxabiv1::__cxa_allocate_exception(sizeof(_Ep)); +# ifdef __wasm__ + // In Wasm, a destructor returns its argument + (void)__cxxabiv1::__cxa_init_primary_exception( + __ex, const_cast(&typeid(_Ep)), [](void* __p) -> void* { +# else (void)__cxxabiv1::__cxa_init_primary_exception(__ex, const_cast(&typeid(_Ep)), [](void* __p) { - std::__destroy_at(static_cast<_Ep2*>(__p)); - }); +# endif + std::__destroy_at(static_cast<_Ep2*>(__p)); +# ifdef __wasm__ + return __p; +# endif + }); try { ::new (__ex) _Ep2(__e); diff --git a/lib/libcxx/include/__exception/nested_exception.h b/lib/libcxx/include/__exception/nested_exception.h index 417db54e6eaa..feb489f87f62 100644 --- a/lib/libcxx/include/__exception/nested_exception.h +++ b/lib/libcxx/include/__exception/nested_exception.h @@ -15,8 +15,8 @@ #include <__type_traits/decay.h> #include <__type_traits/is_base_of.h> #include <__type_traits/is_class.h> +#include <__type_traits/is_constructible.h> #include <__type_traits/is_convertible.h> -#include <__type_traits/is_copy_constructible.h> #include <__type_traits/is_final.h> #include <__type_traits/is_polymorphic.h> #include <__utility/forward.h> @@ -84,17 +84,15 @@ struct __can_dynamic_cast : _BoolConstant< is_polymorphic<_From>::value && (!is_base_of<_To, _From>::value || is_convertible::value)> {}; -template -inline _LIBCPP_HIDE_FROM_ABI void -rethrow_if_nested(const _Ep& __e, __enable_if_t< __can_dynamic_cast<_Ep, nested_exception>::value>* = 0) { +template ::value, int> = 0> +inline _LIBCPP_HIDE_FROM_ABI void rethrow_if_nested(const _Ep& __e) { const nested_exception* __nep = dynamic_cast(std::addressof(__e)); if (__nep) __nep->rethrow_nested(); } -template -inline _LIBCPP_HIDE_FROM_ABI void -rethrow_if_nested(const _Ep&, __enable_if_t::value>* = 0) {} +template ::value, int> = 0> +inline _LIBCPP_HIDE_FROM_ABI void rethrow_if_nested(const _Ep&) {} } // namespace std diff --git a/lib/libcxx/include/__exception/operations.h b/lib/libcxx/include/__exception/operations.h index 8f374c0ccee5..0a9c7a7c7f0d 100644 --- a/lib/libcxx/include/__exception/operations.h +++ b/lib/libcxx/include/__exception/operations.h @@ -9,7 +9,6 @@ #ifndef _LIBCPP___EXCEPTION_OPERATIONS_H #define _LIBCPP___EXCEPTION_OPERATIONS_H -#include <__availability> #include <__config> #include diff --git a/lib/libcxx/include/__expected/bad_expected_access.h b/lib/libcxx/include/__expected/bad_expected_access.h index 27f01d9350ee..1b734389e831 100644 --- a/lib/libcxx/include/__expected/bad_expected_access.h +++ b/lib/libcxx/include/__expected/bad_expected_access.h @@ -27,23 +27,28 @@ _LIBCPP_BEGIN_NAMESPACE_STD template class bad_expected_access; +_LIBCPP_DIAGNOSTIC_PUSH +# if !_LIBCPP_AVAILABILITY_HAS_BAD_EXPECTED_ACCESS_KEY_FUNCTION +_LIBCPP_CLANG_DIAGNOSTIC_IGNORED("-Wweak-vtables") +# endif template <> -class bad_expected_access : public exception { +class _LIBCPP_EXPORTED_FROM_ABI bad_expected_access : public exception { protected: - _LIBCPP_HIDE_FROM_ABI bad_expected_access() noexcept = default; - _LIBCPP_HIDE_FROM_ABI bad_expected_access(const bad_expected_access&) = default; - _LIBCPP_HIDE_FROM_ABI bad_expected_access(bad_expected_access&&) = default; - _LIBCPP_HIDE_FROM_ABI bad_expected_access& operator=(const bad_expected_access&) = default; - _LIBCPP_HIDE_FROM_ABI bad_expected_access& operator=(bad_expected_access&&) = default; - _LIBCPP_HIDE_FROM_ABI_VIRTUAL ~bad_expected_access() override = default; + _LIBCPP_HIDE_FROM_ABI bad_expected_access() noexcept = default; + _LIBCPP_HIDE_FROM_ABI bad_expected_access(const bad_expected_access&) noexcept = default; + _LIBCPP_HIDE_FROM_ABI bad_expected_access(bad_expected_access&&) noexcept = default; + _LIBCPP_HIDE_FROM_ABI bad_expected_access& operator=(const bad_expected_access&) noexcept = default; + _LIBCPP_HIDE_FROM_ABI bad_expected_access& operator=(bad_expected_access&&) noexcept = default; + _LIBCPP_HIDE_FROM_ABI_VIRTUAL ~bad_expected_access() override = default; public: - // The way this has been designed (by using a class template below) means that we'll already - // have a profusion of these vtables in TUs, and the dynamic linker will already have a bunch - // of work to do. So it is not worth hiding the specialization in the dylib, given that - // it adds deployment target restrictions. +# if _LIBCPP_AVAILABILITY_HAS_BAD_EXPECTED_ACCESS_KEY_FUNCTION + const char* what() const noexcept override; +# else _LIBCPP_HIDE_FROM_ABI_VIRTUAL const char* what() const noexcept override { return "bad access to std::expected"; } +# endif }; +_LIBCPP_DIAGNOSTIC_POP template class bad_expected_access : public bad_expected_access { diff --git a/lib/libcxx/include/__expected/expected.h b/lib/libcxx/include/__expected/expected.h index 443d9257dc59..f618b20603e6 100644 --- a/lib/libcxx/include/__expected/expected.h +++ b/lib/libcxx/include/__expected/expected.h @@ -23,24 +23,15 @@ #include <__type_traits/is_assignable.h> #include <__type_traits/is_constructible.h> #include <__type_traits/is_convertible.h> -#include <__type_traits/is_copy_assignable.h> -#include <__type_traits/is_copy_constructible.h> -#include <__type_traits/is_default_constructible.h> #include <__type_traits/is_function.h> -#include <__type_traits/is_move_assignable.h> -#include <__type_traits/is_move_constructible.h> +#include <__type_traits/is_nothrow_assignable.h> #include <__type_traits/is_nothrow_constructible.h> -#include <__type_traits/is_nothrow_copy_assignable.h> -#include <__type_traits/is_nothrow_copy_constructible.h> -#include <__type_traits/is_nothrow_default_constructible.h> -#include <__type_traits/is_nothrow_move_assignable.h> -#include <__type_traits/is_nothrow_move_constructible.h> #include <__type_traits/is_reference.h> #include <__type_traits/is_same.h> #include <__type_traits/is_swappable.h> -#include <__type_traits/is_trivially_copy_constructible.h> +#include <__type_traits/is_trivially_constructible.h> #include <__type_traits/is_trivially_destructible.h> -#include <__type_traits/is_trivially_move_constructible.h> +#include <__type_traits/is_trivially_relocatable.h> #include <__type_traits/is_void.h> #include <__type_traits/lazy.h> #include <__type_traits/negation.h> @@ -473,6 +464,11 @@ class expected : private __expected_base<_Tp, _Err> { using error_type = _Err; using unexpected_type = unexpected<_Err>; + using __trivially_relocatable = + __conditional_t<__libcpp_is_trivially_relocatable<_Tp>::value && __libcpp_is_trivially_relocatable<_Err>::value, + expected, + void>; + template using rebind = expected<_Up, error_type>; @@ -511,7 +507,9 @@ class expected : private __expected_base<_Tp, _Err> { _And< is_constructible<_Tp, _UfQual>, is_constructible<_Err, _OtherErrQual>, _If<_Not, bool>>::value, - _And< _Not&>>, + _And< + _Not<_And, is_same<_Err, _OtherErr>>>, // use the copy constructor instead, see #92676 + _Not&>>, _Not>>, _Not&>>, _Not>>, diff --git a/lib/libcxx/include/__filesystem/copy_options.h b/lib/libcxx/include/__filesystem/copy_options.h index 1bf71292c8a6..097eebe61137 100644 --- a/lib/libcxx/include/__filesystem/copy_options.h +++ b/lib/libcxx/include/__filesystem/copy_options.h @@ -10,7 +10,6 @@ #ifndef _LIBCPP___FILESYSTEM_COPY_OPTIONS_H #define _LIBCPP___FILESYSTEM_COPY_OPTIONS_H -#include <__availability> #include <__config> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) diff --git a/lib/libcxx/include/__filesystem/directory_entry.h b/lib/libcxx/include/__filesystem/directory_entry.h index 016ad94a853d..96d88dcd90b4 100644 --- a/lib/libcxx/include/__filesystem/directory_entry.h +++ b/lib/libcxx/include/__filesystem/directory_entry.h @@ -10,7 +10,6 @@ #ifndef _LIBCPP___FILESYSTEM_DIRECTORY_ENTRY_H #define _LIBCPP___FILESYSTEM_DIRECTORY_ENTRY_H -#include <__availability> #include <__chrono/time_point.h> #include <__compare/ordering.h> #include <__config> diff --git a/lib/libcxx/include/__filesystem/directory_iterator.h b/lib/libcxx/include/__filesystem/directory_iterator.h index a5aa5ff5432d..e0246d8001e1 100644 --- a/lib/libcxx/include/__filesystem/directory_iterator.h +++ b/lib/libcxx/include/__filesystem/directory_iterator.h @@ -11,7 +11,6 @@ #define _LIBCPP___FILESYSTEM_DIRECTORY_ITERATOR_H #include <__assert> -#include <__availability> #include <__config> #include <__filesystem/directory_entry.h> #include <__filesystem/directory_options.h> diff --git a/lib/libcxx/include/__filesystem/directory_options.h b/lib/libcxx/include/__filesystem/directory_options.h index 683c4678e083..d0cd3ebfdaa7 100644 --- a/lib/libcxx/include/__filesystem/directory_options.h +++ b/lib/libcxx/include/__filesystem/directory_options.h @@ -10,7 +10,6 @@ #ifndef _LIBCPP___FILESYSTEM_DIRECTORY_OPTIONS_H #define _LIBCPP___FILESYSTEM_DIRECTORY_OPTIONS_H -#include <__availability> #include <__config> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) diff --git a/lib/libcxx/include/__filesystem/file_status.h b/lib/libcxx/include/__filesystem/file_status.h index 3e2b32eef82e..da316c8b0274 100644 --- a/lib/libcxx/include/__filesystem/file_status.h +++ b/lib/libcxx/include/__filesystem/file_status.h @@ -10,7 +10,6 @@ #ifndef _LIBCPP___FILESYSTEM_FILE_STATUS_H #define _LIBCPP___FILESYSTEM_FILE_STATUS_H -#include <__availability> #include <__config> #include <__filesystem/file_type.h> #include <__filesystem/perms.h> diff --git a/lib/libcxx/include/__filesystem/file_time_type.h b/lib/libcxx/include/__filesystem/file_time_type.h index e086dbcc3f51..63e4ae1578cf 100644 --- a/lib/libcxx/include/__filesystem/file_time_type.h +++ b/lib/libcxx/include/__filesystem/file_time_type.h @@ -10,7 +10,6 @@ #ifndef _LIBCPP___FILESYSTEM_FILE_TIME_TYPE_H #define _LIBCPP___FILESYSTEM_FILE_TIME_TYPE_H -#include <__availability> #include <__chrono/file_clock.h> #include <__chrono/time_point.h> #include <__config> diff --git a/lib/libcxx/include/__filesystem/file_type.h b/lib/libcxx/include/__filesystem/file_type.h index c509085d90de..e4ac1dfee9ed 100644 --- a/lib/libcxx/include/__filesystem/file_type.h +++ b/lib/libcxx/include/__filesystem/file_type.h @@ -10,7 +10,6 @@ #ifndef _LIBCPP___FILESYSTEM_FILE_TYPE_H #define _LIBCPP___FILESYSTEM_FILE_TYPE_H -#include <__availability> #include <__config> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) diff --git a/lib/libcxx/include/__filesystem/filesystem_error.h b/lib/libcxx/include/__filesystem/filesystem_error.h index bfdcc5eaee52..80a11e3b1932 100644 --- a/lib/libcxx/include/__filesystem/filesystem_error.h +++ b/lib/libcxx/include/__filesystem/filesystem_error.h @@ -10,7 +10,6 @@ #ifndef _LIBCPP___FILESYSTEM_FILESYSTEM_ERROR_H #define _LIBCPP___FILESYSTEM_FILESYSTEM_ERROR_H -#include <__availability> #include <__config> #include <__filesystem/path.h> #include <__memory/shared_ptr.h> diff --git a/lib/libcxx/include/__filesystem/operations.h b/lib/libcxx/include/__filesystem/operations.h index 9bb83576f54b..f588189ed1d9 100644 --- a/lib/libcxx/include/__filesystem/operations.h +++ b/lib/libcxx/include/__filesystem/operations.h @@ -10,7 +10,6 @@ #ifndef _LIBCPP___FILESYSTEM_OPERATIONS_H #define _LIBCPP___FILESYSTEM_OPERATIONS_H -#include <__availability> #include <__chrono/time_point.h> #include <__config> #include <__filesystem/copy_options.h> diff --git a/lib/libcxx/include/__filesystem/path.h b/lib/libcxx/include/__filesystem/path.h index 8c7d426f7a6f..ff468d517722 100644 --- a/lib/libcxx/include/__filesystem/path.h +++ b/lib/libcxx/include/__filesystem/path.h @@ -12,11 +12,9 @@ #include <__algorithm/replace.h> #include <__algorithm/replace_copy.h> -#include <__availability> #include <__config> -#include <__functional/hash.h> #include <__functional/unary_function.h> -#include <__fwd/hash.h> +#include <__fwd/functional.h> #include <__iterator/back_insert_iterator.h> #include <__iterator/iterator_traits.h> #include <__type_traits/decay.h> @@ -813,7 +811,7 @@ class _LIBCPP_EXPORTED_FROM_ABI path { _LIBCPP_HIDE_FROM_ABI path extension() const { return string_type(__extension()); } // query - _LIBCPP_NODISCARD_AFTER_CXX17 _LIBCPP_HIDE_FROM_ABI bool empty() const noexcept { return __pn_.empty(); } + _LIBCPP_NODISCARD _LIBCPP_HIDE_FROM_ABI bool empty() const noexcept { return __pn_.empty(); } _LIBCPP_HIDE_FROM_ABI bool has_root_name() const { return !__root_name().empty(); } _LIBCPP_HIDE_FROM_ABI bool has_root_directory() const { return !__root_directory().empty(); } diff --git a/lib/libcxx/include/__filesystem/path_iterator.h b/lib/libcxx/include/__filesystem/path_iterator.h index d2d65cd122ca..f4d486d86cf3 100644 --- a/lib/libcxx/include/__filesystem/path_iterator.h +++ b/lib/libcxx/include/__filesystem/path_iterator.h @@ -11,7 +11,6 @@ #define _LIBCPP___FILESYSTEM_PATH_ITERATOR_H #include <__assert> -#include <__availability> #include <__config> #include <__filesystem/path.h> #include <__iterator/iterator_traits.h> diff --git a/lib/libcxx/include/__filesystem/perm_options.h b/lib/libcxx/include/__filesystem/perm_options.h index 529ef13558e9..64c16ee60a17 100644 --- a/lib/libcxx/include/__filesystem/perm_options.h +++ b/lib/libcxx/include/__filesystem/perm_options.h @@ -10,7 +10,6 @@ #ifndef _LIBCPP___FILESYSTEM_PERM_OPTIONS_H #define _LIBCPP___FILESYSTEM_PERM_OPTIONS_H -#include <__availability> #include <__config> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) diff --git a/lib/libcxx/include/__filesystem/perms.h b/lib/libcxx/include/__filesystem/perms.h index 8f5f9a7e8248..458f1e6e5348 100644 --- a/lib/libcxx/include/__filesystem/perms.h +++ b/lib/libcxx/include/__filesystem/perms.h @@ -10,7 +10,6 @@ #ifndef _LIBCPP___FILESYSTEM_PERMS_H #define _LIBCPP___FILESYSTEM_PERMS_H -#include <__availability> #include <__config> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) diff --git a/lib/libcxx/include/__filesystem/recursive_directory_iterator.h b/lib/libcxx/include/__filesystem/recursive_directory_iterator.h index a8af4f73b14a..caa1396eb301 100644 --- a/lib/libcxx/include/__filesystem/recursive_directory_iterator.h +++ b/lib/libcxx/include/__filesystem/recursive_directory_iterator.h @@ -10,7 +10,6 @@ #ifndef _LIBCPP___FILESYSTEM_RECURSIVE_DIRECTORY_ITERATOR_H #define _LIBCPP___FILESYSTEM_RECURSIVE_DIRECTORY_ITERATOR_H -#include <__availability> #include <__config> #include <__filesystem/directory_entry.h> #include <__filesystem/directory_options.h> diff --git a/lib/libcxx/include/__filesystem/space_info.h b/lib/libcxx/include/__filesystem/space_info.h index 2e80ae3b2c11..3fa57d33096f 100644 --- a/lib/libcxx/include/__filesystem/space_info.h +++ b/lib/libcxx/include/__filesystem/space_info.h @@ -10,7 +10,6 @@ #ifndef _LIBCPP___FILESYSTEM_SPACE_INFO_H #define _LIBCPP___FILESYSTEM_SPACE_INFO_H -#include <__availability> #include <__config> #include diff --git a/lib/libcxx/include/__filesystem/u8path.h b/lib/libcxx/include/__filesystem/u8path.h index bde878054865..dae5823128f0 100644 --- a/lib/libcxx/include/__filesystem/u8path.h +++ b/lib/libcxx/include/__filesystem/u8path.h @@ -11,7 +11,6 @@ #define _LIBCPP___FILESYSTEM_U8PATH_H #include <__algorithm/unwrap_iter.h> -#include <__availability> #include <__config> #include <__filesystem/path.h> #include diff --git a/lib/libcxx/include/__format/concepts.h b/lib/libcxx/include/__format/concepts.h index 299c5f40ee35..13380e9b91af 100644 --- a/lib/libcxx/include/__format/concepts.h +++ b/lib/libcxx/include/__format/concepts.h @@ -13,12 +13,14 @@ #include <__concepts/same_as.h> #include <__concepts/semiregular.h> #include <__config> -#include <__format/format_fwd.h> #include <__format/format_parse_context.h> +#include <__fwd/format.h> +#include <__fwd/tuple.h> +#include <__tuple/tuple_size.h> #include <__type_traits/is_specialization.h> #include <__type_traits/remove_const.h> +#include <__type_traits/remove_reference.h> #include <__utility/pair.h> -#include #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) # pragma GCC system_header diff --git a/lib/libcxx/include/__format/container_adaptor.h b/lib/libcxx/include/__format/container_adaptor.h index ec806ef16bf5..9f49ca03bf4f 100644 --- a/lib/libcxx/include/__format/container_adaptor.h +++ b/lib/libcxx/include/__format/container_adaptor.h @@ -18,11 +18,11 @@ #include <__format/concepts.h> #include <__format/formatter.h> #include <__format/range_default_formatter.h> +#include <__fwd/queue.h> +#include <__fwd/stack.h> #include <__ranges/ref_view.h> #include <__type_traits/is_const.h> #include <__type_traits/maybe_const.h> -#include -#include _LIBCPP_BEGIN_NAMESPACE_STD diff --git a/lib/libcxx/include/__format/escaped_output_table.h b/lib/libcxx/include/__format/escaped_output_table.h index 495a2fbc7b03..f7be2dc61f21 100644 --- a/lib/libcxx/include/__format/escaped_output_table.h +++ b/lib/libcxx/include/__format/escaped_output_table.h @@ -80,10 +80,9 @@ namespace __escaped_output_table { /// The entries of the characters to escape in format's debug string. /// /// Contains the entries for [format.string.escaped]/2.2.1.2.1 -/// CE is a Unicode encoding and C corresponds to either a UCS scalar value -/// whose Unicode property General_Category has a value in the groups -/// Separator (Z) or Other (C) or to a UCS scalar value which has the Unicode -/// property Grapheme_Extend=Yes, as described by table 12 of UAX #44 +/// CE is a Unicode encoding and C corresponds to a UCS scalar value whose +/// Unicode property General_Category has a value in the groups Separator (Z) +/// or Other (C), as described by table 12 of UAX #44 /// /// Separator (Z) consists of General_Category /// - Space_Separator, @@ -98,7 +97,6 @@ namespace __escaped_output_table { /// - Unassigned. /// /// The data is generated from -/// - https://www.unicode.org/Public/UCD/latest/ucd/DerivedCoreProperties.txt /// - https://www.unicode.org/Public/UCD/latest/ucd/extracted/DerivedGeneralCategory.txt /// /// The table is similar to the table @@ -107,926 +105,751 @@ namespace __escaped_output_table { /// table lacks a property, thus having more bits available for the size. /// /// The data has 2 values: -/// - bits [0, 10] The size of the range, allowing 2048 elements. -/// - bits [11, 31] The lower bound code point of the range. The upper bound of -/// the range is lower bound + size. -inline constexpr uint32_t __entries[893] = { - 0x00000020, - 0x0003f821, - 0x00056800, - 0x0018006f, - 0x001bc001, - 0x001c0003, - 0x001c5800, - 0x001c6800, - 0x001d1000, - 0x00241806, - 0x00298000, - 0x002ab801, - 0x002c5801, - 0x002c802d, - 0x002df800, - 0x002e0801, - 0x002e2001, - 0x002e3808, - 0x002f5803, - 0x002fa810, - 0x0030800a, - 0x0030e000, - 0x00325814, - 0x00338000, - 0x0036b007, - 0x0036f805, - 0x00373801, - 0x00375003, - 0x00387001, - 0x00388800, - 0x0039801c, - 0x003d300a, - 0x003d900d, - 0x003f5808, - 0x003fd802, - 0x0040b003, - 0x0040d808, - 0x00412802, - 0x00414806, - 0x0041f800, - 0x0042c804, - 0x0042f800, - 0x00435804, - 0x00447810, - 0x00465038, - 0x0049d000, - 0x0049e000, - 0x004a0807, - 0x004a6800, - 0x004a8806, - 0x004b1001, - 0x004c0800, - 0x004c2000, - 0x004c6801, - 0x004c8801, - 0x004d4800, - 0x004d8800, - 0x004d9802, - 0x004dd002, - 0x004df000, - 0x004e0805, - 0x004e4801, - 0x004e6800, - 0x004e780c, - 0x004ef000, - 0x004f1003, - 0x004ff004, - 0x00502000, - 0x00505803, - 0x00508801, - 0x00514800, - 0x00518800, - 0x0051a000, - 0x0051b800, - 0x0051d003, - 0x00520817, - 0x0052e800, - 0x0052f806, - 0x00538001, - 0x0053a800, - 0x0053b80b, - 0x00542000, - 0x00547000, - 0x00549000, - 0x00554800, - 0x00558800, - 0x0055a000, - 0x0055d002, - 0x00560807, - 0x00565000, - 0x00566802, - 0x0056880e, - 0x00571003, - 0x00579006, - 0x0057d007, - 0x00582000, - 0x00586801, - 0x00588801, - 0x00594800, - 0x00598800, - 0x0059a000, - 0x0059d002, - 0x0059f001, - 0x005a0805, - 0x005a4801, - 0x005a680e, - 0x005af000, - 0x005b1003, - 0x005bc00a, - 0x005c2000, - 0x005c5802, - 0x005c8800, - 0x005cb002, - 0x005cd800, - 0x005ce800, - 0x005d0002, - 0x005d2802, - 0x005d5802, - 0x005dd004, - 0x005e0000, - 0x005e1802, - 0x005e4800, - 0x005e6802, - 0x005e8814, - 0x005fd805, - 0x00602000, - 0x00606800, - 0x00608800, - 0x00614800, - 0x0061d002, - 0x0061f002, - 0x00622812, - 0x0062d801, - 0x0062f001, - 0x00631003, - 0x00638006, - 0x00640800, - 0x00646800, - 0x00648800, - 0x00654800, - 0x0065a000, - 0x0065d002, - 0x0065f800, - 0x00661000, - 0x00662801, - 0x00664800, - 0x00666010, - 0x0066f800, - 0x00671003, - 0x00678000, - 0x0067a00d, - 0x00686800, - 0x00688800, - 0x0069d801, - 0x0069f000, - 0x006a0804, - 0x006a4800, - 0x006a6800, - 0x006a8003, - 0x006ab800, - 0x006b1003, - 0x006c0001, - 0x006c2000, - 0x006cb802, - 0x006d9000, - 0x006de000, - 0x006df001, - 0x006e3808, - 0x006e9005, - 0x006ef806, - 0x006f8001, - 0x006fa80b, - 0x00718800, - 0x0071a00a, - 0x00723807, - 0x0072e024, - 0x00741800, - 0x00742800, - 0x00745800, - 0x00752000, - 0x00753000, - 0x00758800, - 0x0075a008, - 0x0075f001, - 0x00762800, - 0x00763808, - 0x0076d001, - 0x0077001f, - 0x0078c001, - 0x0079a800, - 0x0079b800, - 0x0079c800, - 0x007a4000, - 0x007b6811, - 0x007c0004, - 0x007c3001, - 0x007c6830, - 0x007e3000, - 0x007e6800, - 0x007ed824, - 0x00816803, - 0x00819005, - 0x0081c801, - 0x0081e801, - 0x0082c001, - 0x0082f002, - 0x00838803, - 0x00841000, - 0x00842801, - 0x00846800, - 0x0084e800, - 0x00863000, - 0x00864004, - 0x00867001, - 0x00924800, - 0x00927001, - 0x0092b800, - 0x0092c800, - 0x0092f001, - 0x00944800, - 0x00947001, - 0x00958800, - 0x0095b001, - 0x0095f800, - 0x00960800, - 0x00963001, - 0x0096b800, - 0x00988800, - 0x0098b001, - 0x009ad804, - 0x009be802, - 0x009cd005, - 0x009fb001, - 0x009ff001, - 0x00b40000, - 0x00b4e802, - 0x00b7c806, - 0x00b89002, - 0x00b8b008, - 0x00b99001, - 0x00b9b808, - 0x00ba900d, - 0x00bb6800, - 0x00bb880e, - 0x00bda001, - 0x00bdb806, - 0x00be3000, - 0x00be480a, - 0x00bee802, - 0x00bf5005, - 0x00bfd005, - 0x00c05804, - 0x00c0d005, - 0x00c3c806, - 0x00c42801, - 0x00c54800, - 0x00c55804, - 0x00c7b009, - 0x00c8f803, - 0x00c93801, - 0x00c96003, - 0x00c99000, - 0x00c9c806, - 0x00ca0802, - 0x00cb7001, - 0x00cba80a, - 0x00cd6003, - 0x00ce5005, - 0x00ced802, - 0x00d0b801, - 0x00d0d802, - 0x00d2b000, - 0x00d2c008, - 0x00d31000, - 0x00d32807, - 0x00d3980c, - 0x00d45005, - 0x00d4d005, - 0x00d57055, - 0x00d9a006, - 0x00d9e000, - 0x00da1000, - 0x00da6802, - 0x00db5808, - 0x00dbf802, - 0x00dd1003, - 0x00dd4001, - 0x00dd5802, - 0x00df3000, - 0x00df4001, - 0x00df6800, - 0x00df7802, - 0x00dfa007, - 0x00e16007, - 0x00e1b004, - 0x00e25002, - 0x00e44806, - 0x00e5d801, - 0x00e6400a, - 0x00e6a00c, - 0x00e71006, - 0x00e76800, - 0x00e7a000, - 0x00e7c001, - 0x00e7d804, - 0x00ee003f, - 0x00f8b001, - 0x00f8f001, - 0x00fa3001, - 0x00fa7001, - 0x00fac000, - 0x00fad000, - 0x00fae000, - 0x00faf000, - 0x00fbf001, - 0x00fda800, - 0x00fe2800, - 0x00fea001, - 0x00fee000, - 0x00ff8001, - 0x00ffa800, - 0x00fff810, - 0x01014007, - 0x0102f810, - 0x01039001, - 0x01047800, - 0x0104e802, - 0x0106083e, - 0x010c6003, - 0x01213818, - 0x01225814, - 0x015ba001, - 0x015cb000, - 0x01677802, - 0x0167a004, - 0x01693000, - 0x01694004, - 0x01697001, - 0x016b4006, - 0x016b880e, - 0x016cb808, - 0x016d3800, - 0x016d7800, - 0x016db800, - 0x016df800, - 0x016e3800, - 0x016e7800, - 0x016eb800, - 0x016ef820, - 0x0172f021, - 0x0174d000, - 0x0177a00b, - 0x017eb019, - 0x017fe004, - 0x01815005, - 0x01820000, - 0x0184b803, - 0x01880004, - 0x01898000, - 0x018c7800, - 0x018f200b, - 0x0190f800, - 0x05246802, - 0x05263808, - 0x05316013, - 0x05337803, - 0x0533a009, - 0x0534f001, - 0x05378001, - 0x0537c007, - 0x053e5804, - 0x053e9000, - 0x053ea000, - 0x053ed017, - 0x05401000, - 0x05403000, - 0x05405800, - 0x05412801, - 0x05416003, - 0x0541d005, - 0x0543c007, - 0x05462009, - 0x0546d017, - 0x0547f800, - 0x05493007, - 0x054a380a, - 0x054aa00a, - 0x054be805, - 0x054d9800, - 0x054db003, - 0x054de001, - 0x054e7000, - 0x054ed003, - 0x054f2800, - 0x054ff800, - 0x05514805, - 0x05518801, - 0x0551a80a, - 0x05521800, - 0x05526000, - 0x05527001, - 0x0552d001, - 0x0553e000, - 0x05558000, - 0x05559002, - 0x0555b801, - 0x0555f001, - 0x05560800, - 0x05561817, - 0x05576001, - 0x0557b00a, - 0x05583801, - 0x05587801, - 0x0558b808, - 0x05593800, - 0x05597800, - 0x055b6003, - 0x055f2800, - 0x055f4000, - 0x055f6802, - 0x055fd005, - 0x06bd200b, - 0x06be3803, - 0x06bfe7ff, - 0x06ffe7ff, - 0x073fe7ff, - 0x077fe7ff, - 0x07bfe103, - 0x07d37001, - 0x07d6d025, - 0x07d8380b, - 0x07d8c004, - 0x07d8f000, - 0x07d9b800, - 0x07d9e800, - 0x07d9f800, - 0x07da1000, - 0x07da2800, - 0x07de180f, - 0x07ec8001, - 0x07ee4006, - 0x07ee801f, - 0x07f0000f, - 0x07f0d015, - 0x07f29800, - 0x07f33800, - 0x07f36003, - 0x07f3a800, - 0x07f7e803, - 0x07fcf001, - 0x07fdf802, - 0x07fe4001, - 0x07fe8001, - 0x07fec001, - 0x07fee802, - 0x07ff3800, - 0x07ff780c, - 0x07fff001, - 0x08006000, - 0x08013800, - 0x0801d800, - 0x0801f000, - 0x08027001, - 0x0802f021, - 0x0807d804, - 0x08081803, - 0x0809a002, - 0x080c7800, - 0x080ce802, - 0x080d082e, - 0x080fe882, - 0x0814e802, - 0x0816880f, - 0x0817e003, - 0x08192008, - 0x081a5804, - 0x081bb009, - 0x081cf000, - 0x081e2003, - 0x081eb029, - 0x0824f001, - 0x08255005, - 0x0826a003, - 0x0827e003, - 0x08294007, - 0x082b200a, - 0x082bd800, - 0x082c5800, - 0x082c9800, - 0x082cb000, - 0x082d1000, - 0x082d9000, - 0x082dd000, - 0x082de842, - 0x0839b808, - 0x083ab009, - 0x083b4017, - 0x083c3000, - 0x083d8800, - 0x083dd844, - 0x08403001, - 0x08404800, - 0x0841b000, - 0x0841c802, - 0x0841e801, - 0x0842b000, - 0x0844f807, - 0x0845802f, - 0x08479800, - 0x0847b004, - 0x0848e002, - 0x0849d004, - 0x084a003f, - 0x084dc003, - 0x084e8001, - 0x0850080e, - 0x0850a000, - 0x0850c000, - 0x0851b009, - 0x08524806, - 0x0852c806, - 0x0855001f, - 0x08572805, - 0x0857b808, - 0x0859b002, - 0x085ab001, - 0x085b9804, - 0x085c9006, - 0x085ce80b, - 0x085d804f, - 0x08624836, - 0x0865980c, - 0x08679806, - 0x0869200b, - 0x0869d125, - 0x0873f800, - 0x08755002, - 0x08757001, - 0x0875904d, - 0x08794007, - 0x087a300a, - 0x087ad015, - 0x087c1003, - 0x087c5025, - 0x087e6013, - 0x087fb808, - 0x08800800, - 0x0881c00e, - 0x08827003, - 0x08838000, - 0x08839801, - 0x0883b00b, - 0x08859803, - 0x0885c801, - 0x0885e800, - 0x0886100d, - 0x08874806, - 0x0887d008, - 0x08893804, - 0x08896808, - 0x088a4007, - 0x088b9800, - 0x088bb80a, - 0x088db008, - 0x088e4803, - 0x088e7800, - 0x088f0000, - 0x088fa80a, - 0x08909000, - 0x08917802, - 0x0891a000, - 0x0891b001, - 0x0891f000, - 0x0892083e, - 0x08943800, - 0x08944800, - 0x08947000, - 0x0894f000, - 0x08955005, - 0x0896f800, - 0x0897180c, - 0x0897d007, - 0x08982000, - 0x08986801, - 0x08988801, - 0x08994800, - 0x08998800, - 0x0899a000, - 0x0899d002, - 0x0899f000, - 0x089a0000, - 0x089a2801, - 0x089a4801, - 0x089a7001, - 0x089a880b, - 0x089b209b, - 0x08a1c007, - 0x08a21002, - 0x08a23000, - 0x08a2e000, - 0x08a2f000, - 0x08a3101d, - 0x08a58000, - 0x08a59805, - 0x08a5d000, - 0x08a5e800, - 0x08a5f801, - 0x08a61001, - 0x08a64007, - 0x08a6d0a5, - 0x08ad7800, - 0x08ad9005, - 0x08ade001, - 0x08adf801, - 0x08aee023, - 0x08b19807, - 0x08b1e800, - 0x08b1f801, - 0x08b2280a, - 0x08b2d005, - 0x08b36812, - 0x08b55800, - 0x08b56800, - 0x08b58005, - 0x08b5b800, - 0x08b5d005, - 0x08b65035, - 0x08b8d804, - 0x08b91003, - 0x08b93808, - 0x08ba38b8, - 0x08c17808, - 0x08c1c801, - 0x08c1e063, - 0x08c7980b, - 0x08c83801, - 0x08c85001, - 0x08c8a000, - 0x08c8b800, - 0x08c98000, - 0x08c9b000, - 0x08c9c803, - 0x08c9f000, - 0x08ca1800, - 0x08ca3808, - 0x08cad045, - 0x08cd4001, - 0x08cea007, - 0x08cf0000, - 0x08cf281a, - 0x08d00809, - 0x08d19805, - 0x08d1d803, - 0x08d23808, - 0x08d28805, - 0x08d2c802, - 0x08d4500c, - 0x08d4c001, - 0x08d5180c, - 0x08d7c806, - 0x08d850f5, - 0x08e04800, - 0x08e1800d, - 0x08e1f800, - 0x08e23009, - 0x08e36802, - 0x08e48018, - 0x08e55006, - 0x08e59001, - 0x08e5a84a, - 0x08e83800, - 0x08e85000, - 0x08e98814, - 0x08ea3808, - 0x08ead005, - 0x08eb3000, - 0x08eb4800, - 0x08ec7803, - 0x08eca800, - 0x08ecb800, - 0x08ecc806, - 0x08ed5135, - 0x08f79801, - 0x08f7c808, - 0x08f88800, - 0x08f9b007, - 0x08fa0000, - 0x08fa1000, - 0x08fad055, - 0x08fd880e, - 0x08ff900c, - 0x091cd065, - 0x09237800, - 0x0923a80a, - 0x092a27ff, - 0x096a224b, - 0x097f980c, - 0x09a18010, - 0x09a23fff, - 0x09e23fb8, - 0x0a323fff, - 0x0a723fff, - 0x0ab23fff, - 0x0af23fff, - 0x0b3239b8, - 0x0b51c806, - 0x0b52f800, - 0x0b535003, - 0x0b55f800, - 0x0b565005, - 0x0b577006, - 0x0b57b009, - 0x0b598006, - 0x0b5a3009, - 0x0b5ad000, - 0x0b5b1000, - 0x0b5bc004, - 0x0b5c82af, - 0x0b74d864, - 0x0b7a5804, - 0x0b7c400a, - 0x0b7d003f, - 0x0b7f200b, - 0x0b7f900d, - 0x0c3fc007, - 0x0c66b029, - 0x0c684fff, - 0x0ca84fff, - 0x0ce84fff, - 0x0d284fff, - 0x0d684ae6, - 0x0d7fa000, - 0x0d7fe000, - 0x0d7ff800, - 0x0d89180e, - 0x0d89981c, - 0x0d8a9801, - 0x0d8ab00d, - 0x0d8b4007, - 0x0d97e7ff, - 0x0dd7e103, - 0x0de35804, - 0x0de3e802, - 0x0de44806, - 0x0de4d001, - 0x0de4e801, - 0x0de507ff, - 0x0e2507ff, - 0x0e6502af, - 0x0e7e203b, - 0x0e87b009, - 0x0e893801, - 0x0e8b2800, - 0x0e8b3802, - 0x0e8b7014, - 0x0e8c2806, - 0x0e8d5003, - 0x0e8f5814, - 0x0e921002, - 0x0e923079, - 0x0e96a00b, - 0x0e97a00b, - 0x0e9ab808, - 0x0e9bc886, - 0x0ea2a800, - 0x0ea4e800, - 0x0ea50001, - 0x0ea51801, - 0x0ea53801, - 0x0ea56800, - 0x0ea5d000, - 0x0ea5e000, - 0x0ea62000, - 0x0ea83000, - 0x0ea85801, - 0x0ea8a800, - 0x0ea8e800, - 0x0ea9d000, - 0x0ea9f800, - 0x0eaa2800, - 0x0eaa3802, - 0x0eaa8800, - 0x0eb53001, - 0x0ebe6001, - 0x0ed00036, - 0x0ed1d831, - 0x0ed3a800, - 0x0ed42000, - 0x0ed46473, - 0x0ef8f805, - 0x0ef95904, - 0x0f037091, - 0x0f096809, - 0x0f09f001, - 0x0f0a5003, - 0x0f0a813f, - 0x0f157011, - 0x0f176003, - 0x0f17d004, - 0x0f1801cf, - 0x0f276003, - 0x0f27d2e5, - 0x0f3f3800, - 0x0f3f6000, - 0x0f3f7800, - 0x0f3ff800, - 0x0f462801, - 0x0f46802f, - 0x0f4a2006, - 0x0f4a6003, - 0x0f4ad003, - 0x0f4b0310, - 0x0f65a84b, - 0x0f69f0c1, - 0x0f702000, - 0x0f710000, - 0x0f711800, - 0x0f712801, - 0x0f714000, - 0x0f719800, - 0x0f71c000, - 0x0f71d000, - 0x0f71e005, - 0x0f721803, - 0x0f724000, - 0x0f725000, - 0x0f726000, - 0x0f728000, - 0x0f729800, - 0x0f72a801, - 0x0f72c000, - 0x0f72d000, - 0x0f72e000, - 0x0f72f000, - 0x0f730000, - 0x0f731800, - 0x0f732801, - 0x0f735800, - 0x0f739800, - 0x0f73c000, - 0x0f73e800, - 0x0f73f800, - 0x0f745000, - 0x0f74e004, - 0x0f752000, - 0x0f755000, - 0x0f75e033, - 0x0f77910d, - 0x0f816003, - 0x0f84a00b, - 0x0f857801, - 0x0f860000, - 0x0f868000, - 0x0f87b009, - 0x0f8d7037, - 0x0f90180c, - 0x0f91e003, - 0x0f924806, - 0x0f92900d, - 0x0f933099, - 0x0fb6c003, - 0x0fb76802, - 0x0fb7e802, - 0x0fbbb803, - 0x0fbed005, - 0x0fbf6003, - 0x0fbf880e, - 0x0fc06003, - 0x0fc24007, - 0x0fc2d005, - 0x0fc44007, - 0x0fc57001, - 0x0fc5904d, - 0x0fd2a00b, - 0x0fd37001, - 0x0fd3e802, - 0x0fd44806, - 0x0fd5f000, - 0x0fd63007, - 0x0fd6e003, - 0x0fd74806, - 0x0fd7c806, - 0x0fdc9800, - 0x0fde5824, - 0x0fdfd405, - 0x1537001f, - 0x15b9d005, - 0x15c0f001, - 0x1675100d, - 0x175f0fff, - 0x179f0c1e, - 0x17d0f5e1, - 0x189a5804}; +/// - bits [0, 13] The size of the range, allowing 16384 elements. +/// - bits [14, 31] The lower bound code point of the range. The upper bound of +/// the range is lower bound + size. Note the code expects code units the fit +/// into 18 bits, instead of the 21 bits needed for the full Unicode range. +_LIBCPP_HIDE_FROM_ABI inline constexpr uint32_t __entries[711] = { + 0x00000020 /* 00000000 - 00000020 [ 33] */, + 0x001fc021 /* 0000007f - 000000a0 [ 34] */, + 0x002b4000 /* 000000ad - 000000ad [ 1] */, + 0x00de0001 /* 00000378 - 00000379 [ 2] */, + 0x00e00003 /* 00000380 - 00000383 [ 4] */, + 0x00e2c000 /* 0000038b - 0000038b [ 1] */, + 0x00e34000 /* 0000038d - 0000038d [ 1] */, + 0x00e88000 /* 000003a2 - 000003a2 [ 1] */, + 0x014c0000 /* 00000530 - 00000530 [ 1] */, + 0x0155c001 /* 00000557 - 00000558 [ 2] */, + 0x0162c001 /* 0000058b - 0000058c [ 2] */, + 0x01640000 /* 00000590 - 00000590 [ 1] */, + 0x01720007 /* 000005c8 - 000005cf [ 8] */, + 0x017ac003 /* 000005eb - 000005ee [ 4] */, + 0x017d4010 /* 000005f5 - 00000605 [ 17] */, + 0x01870000 /* 0000061c - 0000061c [ 1] */, + 0x01b74000 /* 000006dd - 000006dd [ 1] */, + 0x01c38001 /* 0000070e - 0000070f [ 2] */, + 0x01d2c001 /* 0000074b - 0000074c [ 2] */, + 0x01ec800d /* 000007b2 - 000007bf [ 14] */, + 0x01fec001 /* 000007fb - 000007fc [ 2] */, + 0x020b8001 /* 0000082e - 0000082f [ 2] */, + 0x020fc000 /* 0000083f - 0000083f [ 1] */, + 0x02170001 /* 0000085c - 0000085d [ 2] */, + 0x0217c000 /* 0000085f - 0000085f [ 1] */, + 0x021ac004 /* 0000086b - 0000086f [ 5] */, + 0x0223c008 /* 0000088f - 00000897 [ 9] */, + 0x02388000 /* 000008e2 - 000008e2 [ 1] */, + 0x02610000 /* 00000984 - 00000984 [ 1] */, + 0x02634001 /* 0000098d - 0000098e [ 2] */, + 0x02644001 /* 00000991 - 00000992 [ 2] */, + 0x026a4000 /* 000009a9 - 000009a9 [ 1] */, + 0x026c4000 /* 000009b1 - 000009b1 [ 1] */, + 0x026cc002 /* 000009b3 - 000009b5 [ 3] */, + 0x026e8001 /* 000009ba - 000009bb [ 2] */, + 0x02714001 /* 000009c5 - 000009c6 [ 2] */, + 0x02724001 /* 000009c9 - 000009ca [ 2] */, + 0x0273c007 /* 000009cf - 000009d6 [ 8] */, + 0x02760003 /* 000009d8 - 000009db [ 4] */, + 0x02778000 /* 000009de - 000009de [ 1] */, + 0x02790001 /* 000009e4 - 000009e5 [ 2] */, + 0x027fc001 /* 000009ff - 00000a00 [ 2] */, + 0x02810000 /* 00000a04 - 00000a04 [ 1] */, + 0x0282c003 /* 00000a0b - 00000a0e [ 4] */, + 0x02844001 /* 00000a11 - 00000a12 [ 2] */, + 0x028a4000 /* 00000a29 - 00000a29 [ 1] */, + 0x028c4000 /* 00000a31 - 00000a31 [ 1] */, + 0x028d0000 /* 00000a34 - 00000a34 [ 1] */, + 0x028dc000 /* 00000a37 - 00000a37 [ 1] */, + 0x028e8001 /* 00000a3a - 00000a3b [ 2] */, + 0x028f4000 /* 00000a3d - 00000a3d [ 1] */, + 0x0290c003 /* 00000a43 - 00000a46 [ 4] */, + 0x02924001 /* 00000a49 - 00000a4a [ 2] */, + 0x02938002 /* 00000a4e - 00000a50 [ 3] */, + 0x02948006 /* 00000a52 - 00000a58 [ 7] */, + 0x02974000 /* 00000a5d - 00000a5d [ 1] */, + 0x0297c006 /* 00000a5f - 00000a65 [ 7] */, + 0x029dc009 /* 00000a77 - 00000a80 [ 10] */, + 0x02a10000 /* 00000a84 - 00000a84 [ 1] */, + 0x02a38000 /* 00000a8e - 00000a8e [ 1] */, + 0x02a48000 /* 00000a92 - 00000a92 [ 1] */, + 0x02aa4000 /* 00000aa9 - 00000aa9 [ 1] */, + 0x02ac4000 /* 00000ab1 - 00000ab1 [ 1] */, + 0x02ad0000 /* 00000ab4 - 00000ab4 [ 1] */, + 0x02ae8001 /* 00000aba - 00000abb [ 2] */, + 0x02b18000 /* 00000ac6 - 00000ac6 [ 1] */, + 0x02b28000 /* 00000aca - 00000aca [ 1] */, + 0x02b38001 /* 00000ace - 00000acf [ 2] */, + 0x02b4400e /* 00000ad1 - 00000adf [ 15] */, + 0x02b90001 /* 00000ae4 - 00000ae5 [ 2] */, + 0x02bc8006 /* 00000af2 - 00000af8 [ 7] */, + 0x02c00000 /* 00000b00 - 00000b00 [ 1] */, + 0x02c10000 /* 00000b04 - 00000b04 [ 1] */, + 0x02c34001 /* 00000b0d - 00000b0e [ 2] */, + 0x02c44001 /* 00000b11 - 00000b12 [ 2] */, + 0x02ca4000 /* 00000b29 - 00000b29 [ 1] */, + 0x02cc4000 /* 00000b31 - 00000b31 [ 1] */, + 0x02cd0000 /* 00000b34 - 00000b34 [ 1] */, + 0x02ce8001 /* 00000b3a - 00000b3b [ 2] */, + 0x02d14001 /* 00000b45 - 00000b46 [ 2] */, + 0x02d24001 /* 00000b49 - 00000b4a [ 2] */, + 0x02d38006 /* 00000b4e - 00000b54 [ 7] */, + 0x02d60003 /* 00000b58 - 00000b5b [ 4] */, + 0x02d78000 /* 00000b5e - 00000b5e [ 1] */, + 0x02d90001 /* 00000b64 - 00000b65 [ 2] */, + 0x02de0009 /* 00000b78 - 00000b81 [ 10] */, + 0x02e10000 /* 00000b84 - 00000b84 [ 1] */, + 0x02e2c002 /* 00000b8b - 00000b8d [ 3] */, + 0x02e44000 /* 00000b91 - 00000b91 [ 1] */, + 0x02e58002 /* 00000b96 - 00000b98 [ 3] */, + 0x02e6c000 /* 00000b9b - 00000b9b [ 1] */, + 0x02e74000 /* 00000b9d - 00000b9d [ 1] */, + 0x02e80002 /* 00000ba0 - 00000ba2 [ 3] */, + 0x02e94002 /* 00000ba5 - 00000ba7 [ 3] */, + 0x02eac002 /* 00000bab - 00000bad [ 3] */, + 0x02ee8003 /* 00000bba - 00000bbd [ 4] */, + 0x02f0c002 /* 00000bc3 - 00000bc5 [ 3] */, + 0x02f24000 /* 00000bc9 - 00000bc9 [ 1] */, + 0x02f38001 /* 00000bce - 00000bcf [ 2] */, + 0x02f44005 /* 00000bd1 - 00000bd6 [ 6] */, + 0x02f6000d /* 00000bd8 - 00000be5 [ 14] */, + 0x02fec004 /* 00000bfb - 00000bff [ 5] */, + 0x03034000 /* 00000c0d - 00000c0d [ 1] */, + 0x03044000 /* 00000c11 - 00000c11 [ 1] */, + 0x030a4000 /* 00000c29 - 00000c29 [ 1] */, + 0x030e8001 /* 00000c3a - 00000c3b [ 2] */, + 0x03114000 /* 00000c45 - 00000c45 [ 1] */, + 0x03124000 /* 00000c49 - 00000c49 [ 1] */, + 0x03138006 /* 00000c4e - 00000c54 [ 7] */, + 0x0315c000 /* 00000c57 - 00000c57 [ 1] */, + 0x0316c001 /* 00000c5b - 00000c5c [ 2] */, + 0x03178001 /* 00000c5e - 00000c5f [ 2] */, + 0x03190001 /* 00000c64 - 00000c65 [ 2] */, + 0x031c0006 /* 00000c70 - 00000c76 [ 7] */, + 0x03234000 /* 00000c8d - 00000c8d [ 1] */, + 0x03244000 /* 00000c91 - 00000c91 [ 1] */, + 0x032a4000 /* 00000ca9 - 00000ca9 [ 1] */, + 0x032d0000 /* 00000cb4 - 00000cb4 [ 1] */, + 0x032e8001 /* 00000cba - 00000cbb [ 2] */, + 0x03314000 /* 00000cc5 - 00000cc5 [ 1] */, + 0x03324000 /* 00000cc9 - 00000cc9 [ 1] */, + 0x03338006 /* 00000cce - 00000cd4 [ 7] */, + 0x0335c005 /* 00000cd7 - 00000cdc [ 6] */, + 0x0337c000 /* 00000cdf - 00000cdf [ 1] */, + 0x03390001 /* 00000ce4 - 00000ce5 [ 2] */, + 0x033c0000 /* 00000cf0 - 00000cf0 [ 1] */, + 0x033d000b /* 00000cf4 - 00000cff [ 12] */, + 0x03434000 /* 00000d0d - 00000d0d [ 1] */, + 0x03444000 /* 00000d11 - 00000d11 [ 1] */, + 0x03514000 /* 00000d45 - 00000d45 [ 1] */, + 0x03524000 /* 00000d49 - 00000d49 [ 1] */, + 0x03540003 /* 00000d50 - 00000d53 [ 4] */, + 0x03590001 /* 00000d64 - 00000d65 [ 2] */, + 0x03600000 /* 00000d80 - 00000d80 [ 1] */, + 0x03610000 /* 00000d84 - 00000d84 [ 1] */, + 0x0365c002 /* 00000d97 - 00000d99 [ 3] */, + 0x036c8000 /* 00000db2 - 00000db2 [ 1] */, + 0x036f0000 /* 00000dbc - 00000dbc [ 1] */, + 0x036f8001 /* 00000dbe - 00000dbf [ 2] */, + 0x0371c002 /* 00000dc7 - 00000dc9 [ 3] */, + 0x0372c003 /* 00000dcb - 00000dce [ 4] */, + 0x03754000 /* 00000dd5 - 00000dd5 [ 1] */, + 0x0375c000 /* 00000dd7 - 00000dd7 [ 1] */, + 0x03780005 /* 00000de0 - 00000de5 [ 6] */, + 0x037c0001 /* 00000df0 - 00000df1 [ 2] */, + 0x037d400b /* 00000df5 - 00000e00 [ 12] */, + 0x038ec003 /* 00000e3b - 00000e3e [ 4] */, + 0x03970024 /* 00000e5c - 00000e80 [ 37] */, + 0x03a0c000 /* 00000e83 - 00000e83 [ 1] */, + 0x03a14000 /* 00000e85 - 00000e85 [ 1] */, + 0x03a2c000 /* 00000e8b - 00000e8b [ 1] */, + 0x03a90000 /* 00000ea4 - 00000ea4 [ 1] */, + 0x03a98000 /* 00000ea6 - 00000ea6 [ 1] */, + 0x03af8001 /* 00000ebe - 00000ebf [ 2] */, + 0x03b14000 /* 00000ec5 - 00000ec5 [ 1] */, + 0x03b1c000 /* 00000ec7 - 00000ec7 [ 1] */, + 0x03b3c000 /* 00000ecf - 00000ecf [ 1] */, + 0x03b68001 /* 00000eda - 00000edb [ 2] */, + 0x03b8001f /* 00000ee0 - 00000eff [ 32] */, + 0x03d20000 /* 00000f48 - 00000f48 [ 1] */, + 0x03db4003 /* 00000f6d - 00000f70 [ 4] */, + 0x03e60000 /* 00000f98 - 00000f98 [ 1] */, + 0x03ef4000 /* 00000fbd - 00000fbd [ 1] */, + 0x03f34000 /* 00000fcd - 00000fcd [ 1] */, + 0x03f6c024 /* 00000fdb - 00000fff [ 37] */, + 0x04318000 /* 000010c6 - 000010c6 [ 1] */, + 0x04320004 /* 000010c8 - 000010cc [ 5] */, + 0x04338001 /* 000010ce - 000010cf [ 2] */, + 0x04924000 /* 00001249 - 00001249 [ 1] */, + 0x04938001 /* 0000124e - 0000124f [ 2] */, + 0x0495c000 /* 00001257 - 00001257 [ 1] */, + 0x04964000 /* 00001259 - 00001259 [ 1] */, + 0x04978001 /* 0000125e - 0000125f [ 2] */, + 0x04a24000 /* 00001289 - 00001289 [ 1] */, + 0x04a38001 /* 0000128e - 0000128f [ 2] */, + 0x04ac4000 /* 000012b1 - 000012b1 [ 1] */, + 0x04ad8001 /* 000012b6 - 000012b7 [ 2] */, + 0x04afc000 /* 000012bf - 000012bf [ 1] */, + 0x04b04000 /* 000012c1 - 000012c1 [ 1] */, + 0x04b18001 /* 000012c6 - 000012c7 [ 2] */, + 0x04b5c000 /* 000012d7 - 000012d7 [ 1] */, + 0x04c44000 /* 00001311 - 00001311 [ 1] */, + 0x04c58001 /* 00001316 - 00001317 [ 2] */, + 0x04d6c001 /* 0000135b - 0000135c [ 2] */, + 0x04df4002 /* 0000137d - 0000137f [ 3] */, + 0x04e68005 /* 0000139a - 0000139f [ 6] */, + 0x04fd8001 /* 000013f6 - 000013f7 [ 2] */, + 0x04ff8001 /* 000013fe - 000013ff [ 2] */, + 0x05a00000 /* 00001680 - 00001680 [ 1] */, + 0x05a74002 /* 0000169d - 0000169f [ 3] */, + 0x05be4006 /* 000016f9 - 000016ff [ 7] */, + 0x05c58008 /* 00001716 - 0000171e [ 9] */, + 0x05cdc008 /* 00001737 - 0000173f [ 9] */, + 0x05d5000b /* 00001754 - 0000175f [ 12] */, + 0x05db4000 /* 0000176d - 0000176d [ 1] */, + 0x05dc4000 /* 00001771 - 00001771 [ 1] */, + 0x05dd000b /* 00001774 - 0000177f [ 12] */, + 0x05f78001 /* 000017de - 000017df [ 2] */, + 0x05fa8005 /* 000017ea - 000017ef [ 6] */, + 0x05fe8005 /* 000017fa - 000017ff [ 6] */, + 0x06038000 /* 0000180e - 0000180e [ 1] */, + 0x06068005 /* 0000181a - 0000181f [ 6] */, + 0x061e4006 /* 00001879 - 0000187f [ 7] */, + 0x062ac004 /* 000018ab - 000018af [ 5] */, + 0x063d8009 /* 000018f6 - 000018ff [ 10] */, + 0x0647c000 /* 0000191f - 0000191f [ 1] */, + 0x064b0003 /* 0000192c - 0000192f [ 4] */, + 0x064f0003 /* 0000193c - 0000193f [ 4] */, + 0x06504002 /* 00001941 - 00001943 [ 3] */, + 0x065b8001 /* 0000196e - 0000196f [ 2] */, + 0x065d400a /* 00001975 - 0000197f [ 11] */, + 0x066b0003 /* 000019ac - 000019af [ 4] */, + 0x06728005 /* 000019ca - 000019cf [ 6] */, + 0x0676c002 /* 000019db - 000019dd [ 3] */, + 0x06870001 /* 00001a1c - 00001a1d [ 2] */, + 0x0697c000 /* 00001a5f - 00001a5f [ 1] */, + 0x069f4001 /* 00001a7d - 00001a7e [ 2] */, + 0x06a28005 /* 00001a8a - 00001a8f [ 6] */, + 0x06a68005 /* 00001a9a - 00001a9f [ 6] */, + 0x06ab8001 /* 00001aae - 00001aaf [ 2] */, + 0x06b3c030 /* 00001acf - 00001aff [ 49] */, + 0x06d34002 /* 00001b4d - 00001b4f [ 3] */, + 0x06dfc000 /* 00001b7f - 00001b7f [ 1] */, + 0x06fd0007 /* 00001bf4 - 00001bfb [ 8] */, + 0x070e0002 /* 00001c38 - 00001c3a [ 3] */, + 0x07128002 /* 00001c4a - 00001c4c [ 3] */, + 0x07224006 /* 00001c89 - 00001c8f [ 7] */, + 0x072ec001 /* 00001cbb - 00001cbc [ 2] */, + 0x07320007 /* 00001cc8 - 00001ccf [ 8] */, + 0x073ec004 /* 00001cfb - 00001cff [ 5] */, + 0x07c58001 /* 00001f16 - 00001f17 [ 2] */, + 0x07c78001 /* 00001f1e - 00001f1f [ 2] */, + 0x07d18001 /* 00001f46 - 00001f47 [ 2] */, + 0x07d38001 /* 00001f4e - 00001f4f [ 2] */, + 0x07d60000 /* 00001f58 - 00001f58 [ 1] */, + 0x07d68000 /* 00001f5a - 00001f5a [ 1] */, + 0x07d70000 /* 00001f5c - 00001f5c [ 1] */, + 0x07d78000 /* 00001f5e - 00001f5e [ 1] */, + 0x07df8001 /* 00001f7e - 00001f7f [ 2] */, + 0x07ed4000 /* 00001fb5 - 00001fb5 [ 1] */, + 0x07f14000 /* 00001fc5 - 00001fc5 [ 1] */, + 0x07f50001 /* 00001fd4 - 00001fd5 [ 2] */, + 0x07f70000 /* 00001fdc - 00001fdc [ 1] */, + 0x07fc0001 /* 00001ff0 - 00001ff1 [ 2] */, + 0x07fd4000 /* 00001ff5 - 00001ff5 [ 1] */, + 0x07ffc010 /* 00001fff - 0000200f [ 17] */, + 0x080a0007 /* 00002028 - 0000202f [ 8] */, + 0x0817c010 /* 0000205f - 0000206f [ 17] */, + 0x081c8001 /* 00002072 - 00002073 [ 2] */, + 0x0823c000 /* 0000208f - 0000208f [ 1] */, + 0x08274002 /* 0000209d - 0000209f [ 3] */, + 0x0830400e /* 000020c1 - 000020cf [ 15] */, + 0x083c400e /* 000020f1 - 000020ff [ 15] */, + 0x08630003 /* 0000218c - 0000218f [ 4] */, + 0x0909c018 /* 00002427 - 0000243f [ 25] */, + 0x0912c014 /* 0000244b - 0000245f [ 21] */, + 0x0add0001 /* 00002b74 - 00002b75 [ 2] */, + 0x0ae58000 /* 00002b96 - 00002b96 [ 1] */, + 0x0b3d0004 /* 00002cf4 - 00002cf8 [ 5] */, + 0x0b498000 /* 00002d26 - 00002d26 [ 1] */, + 0x0b4a0004 /* 00002d28 - 00002d2c [ 5] */, + 0x0b4b8001 /* 00002d2e - 00002d2f [ 2] */, + 0x0b5a0006 /* 00002d68 - 00002d6e [ 7] */, + 0x0b5c400d /* 00002d71 - 00002d7e [ 14] */, + 0x0b65c008 /* 00002d97 - 00002d9f [ 9] */, + 0x0b69c000 /* 00002da7 - 00002da7 [ 1] */, + 0x0b6bc000 /* 00002daf - 00002daf [ 1] */, + 0x0b6dc000 /* 00002db7 - 00002db7 [ 1] */, + 0x0b6fc000 /* 00002dbf - 00002dbf [ 1] */, + 0x0b71c000 /* 00002dc7 - 00002dc7 [ 1] */, + 0x0b73c000 /* 00002dcf - 00002dcf [ 1] */, + 0x0b75c000 /* 00002dd7 - 00002dd7 [ 1] */, + 0x0b77c000 /* 00002ddf - 00002ddf [ 1] */, + 0x0b978021 /* 00002e5e - 00002e7f [ 34] */, + 0x0ba68000 /* 00002e9a - 00002e9a [ 1] */, + 0x0bbd000b /* 00002ef4 - 00002eff [ 12] */, + 0x0bf58019 /* 00002fd6 - 00002fef [ 26] */, + 0x0c000000 /* 00003000 - 00003000 [ 1] */, + 0x0c100000 /* 00003040 - 00003040 [ 1] */, + 0x0c25c001 /* 00003097 - 00003098 [ 2] */, + 0x0c400004 /* 00003100 - 00003104 [ 5] */, + 0x0c4c0000 /* 00003130 - 00003130 [ 1] */, + 0x0c63c000 /* 0000318f - 0000318f [ 1] */, + 0x0c79000a /* 000031e4 - 000031ee [ 11] */, + 0x0c87c000 /* 0000321f - 0000321f [ 1] */, + 0x29234002 /* 0000a48d - 0000a48f [ 3] */, + 0x2931c008 /* 0000a4c7 - 0000a4cf [ 9] */, + 0x298b0013 /* 0000a62c - 0000a63f [ 20] */, + 0x29be0007 /* 0000a6f8 - 0000a6ff [ 8] */, + 0x29f2c004 /* 0000a7cb - 0000a7cf [ 5] */, + 0x29f48000 /* 0000a7d2 - 0000a7d2 [ 1] */, + 0x29f50000 /* 0000a7d4 - 0000a7d4 [ 1] */, + 0x29f68017 /* 0000a7da - 0000a7f1 [ 24] */, + 0x2a0b4002 /* 0000a82d - 0000a82f [ 3] */, + 0x2a0e8005 /* 0000a83a - 0000a83f [ 6] */, + 0x2a1e0007 /* 0000a878 - 0000a87f [ 8] */, + 0x2a318007 /* 0000a8c6 - 0000a8cd [ 8] */, + 0x2a368005 /* 0000a8da - 0000a8df [ 6] */, + 0x2a55000a /* 0000a954 - 0000a95e [ 11] */, + 0x2a5f4002 /* 0000a97d - 0000a97f [ 3] */, + 0x2a738000 /* 0000a9ce - 0000a9ce [ 1] */, + 0x2a768003 /* 0000a9da - 0000a9dd [ 4] */, + 0x2a7fc000 /* 0000a9ff - 0000a9ff [ 1] */, + 0x2a8dc008 /* 0000aa37 - 0000aa3f [ 9] */, + 0x2a938001 /* 0000aa4e - 0000aa4f [ 2] */, + 0x2a968001 /* 0000aa5a - 0000aa5b [ 2] */, + 0x2ab0c017 /* 0000aac3 - 0000aada [ 24] */, + 0x2abdc009 /* 0000aaf7 - 0000ab00 [ 10] */, + 0x2ac1c001 /* 0000ab07 - 0000ab08 [ 2] */, + 0x2ac3c001 /* 0000ab0f - 0000ab10 [ 2] */, + 0x2ac5c008 /* 0000ab17 - 0000ab1f [ 9] */, + 0x2ac9c000 /* 0000ab27 - 0000ab27 [ 1] */, + 0x2acbc000 /* 0000ab2f - 0000ab2f [ 1] */, + 0x2adb0003 /* 0000ab6c - 0000ab6f [ 4] */, + 0x2afb8001 /* 0000abee - 0000abef [ 2] */, + 0x2afe8005 /* 0000abfa - 0000abff [ 6] */, + 0x35e9000b /* 0000d7a4 - 0000d7af [ 12] */, + 0x35f1c003 /* 0000d7c7 - 0000d7ca [ 4] */, + 0x35ff2103 /* 0000d7fc - 0000f8ff [ 8452] */, + 0x3e9b8001 /* 0000fa6e - 0000fa6f [ 2] */, + 0x3eb68025 /* 0000fada - 0000faff [ 38] */, + 0x3ec1c00b /* 0000fb07 - 0000fb12 [ 12] */, + 0x3ec60004 /* 0000fb18 - 0000fb1c [ 5] */, + 0x3ecdc000 /* 0000fb37 - 0000fb37 [ 1] */, + 0x3ecf4000 /* 0000fb3d - 0000fb3d [ 1] */, + 0x3ecfc000 /* 0000fb3f - 0000fb3f [ 1] */, + 0x3ed08000 /* 0000fb42 - 0000fb42 [ 1] */, + 0x3ed14000 /* 0000fb45 - 0000fb45 [ 1] */, + 0x3ef0c00f /* 0000fbc3 - 0000fbd2 [ 16] */, + 0x3f640001 /* 0000fd90 - 0000fd91 [ 2] */, + 0x3f720006 /* 0000fdc8 - 0000fdce [ 7] */, + 0x3f74001f /* 0000fdd0 - 0000fdef [ 32] */, + 0x3f868005 /* 0000fe1a - 0000fe1f [ 6] */, + 0x3f94c000 /* 0000fe53 - 0000fe53 [ 1] */, + 0x3f99c000 /* 0000fe67 - 0000fe67 [ 1] */, + 0x3f9b0003 /* 0000fe6c - 0000fe6f [ 4] */, + 0x3f9d4000 /* 0000fe75 - 0000fe75 [ 1] */, + 0x3fbf4003 /* 0000fefd - 0000ff00 [ 4] */, + 0x3fefc002 /* 0000ffbf - 0000ffc1 [ 3] */, + 0x3ff20001 /* 0000ffc8 - 0000ffc9 [ 2] */, + 0x3ff40001 /* 0000ffd0 - 0000ffd1 [ 2] */, + 0x3ff60001 /* 0000ffd8 - 0000ffd9 [ 2] */, + 0x3ff74002 /* 0000ffdd - 0000ffdf [ 3] */, + 0x3ff9c000 /* 0000ffe7 - 0000ffe7 [ 1] */, + 0x3ffbc00c /* 0000ffef - 0000fffb [ 13] */, + 0x3fff8001 /* 0000fffe - 0000ffff [ 2] */, + 0x40030000 /* 0001000c - 0001000c [ 1] */, + 0x4009c000 /* 00010027 - 00010027 [ 1] */, + 0x400ec000 /* 0001003b - 0001003b [ 1] */, + 0x400f8000 /* 0001003e - 0001003e [ 1] */, + 0x40138001 /* 0001004e - 0001004f [ 2] */, + 0x40178021 /* 0001005e - 0001007f [ 34] */, + 0x403ec004 /* 000100fb - 000100ff [ 5] */, + 0x4040c003 /* 00010103 - 00010106 [ 4] */, + 0x404d0002 /* 00010134 - 00010136 [ 3] */, + 0x4063c000 /* 0001018f - 0001018f [ 1] */, + 0x40674002 /* 0001019d - 0001019f [ 3] */, + 0x4068402e /* 000101a1 - 000101cf [ 47] */, + 0x407f8081 /* 000101fe - 0001027f [ 130] */, + 0x40a74002 /* 0001029d - 0001029f [ 3] */, + 0x40b4400e /* 000102d1 - 000102df [ 15] */, + 0x40bf0003 /* 000102fc - 000102ff [ 4] */, + 0x40c90008 /* 00010324 - 0001032c [ 9] */, + 0x40d2c004 /* 0001034b - 0001034f [ 5] */, + 0x40dec004 /* 0001037b - 0001037f [ 5] */, + 0x40e78000 /* 0001039e - 0001039e [ 1] */, + 0x40f10003 /* 000103c4 - 000103c7 [ 4] */, + 0x40f58029 /* 000103d6 - 000103ff [ 42] */, + 0x41278001 /* 0001049e - 0001049f [ 2] */, + 0x412a8005 /* 000104aa - 000104af [ 6] */, + 0x41350003 /* 000104d4 - 000104d7 [ 4] */, + 0x413f0003 /* 000104fc - 000104ff [ 4] */, + 0x414a0007 /* 00010528 - 0001052f [ 8] */, + 0x4159000a /* 00010564 - 0001056e [ 11] */, + 0x415ec000 /* 0001057b - 0001057b [ 1] */, + 0x4162c000 /* 0001058b - 0001058b [ 1] */, + 0x4164c000 /* 00010593 - 00010593 [ 1] */, + 0x41658000 /* 00010596 - 00010596 [ 1] */, + 0x41688000 /* 000105a2 - 000105a2 [ 1] */, + 0x416c8000 /* 000105b2 - 000105b2 [ 1] */, + 0x416e8000 /* 000105ba - 000105ba [ 1] */, + 0x416f4042 /* 000105bd - 000105ff [ 67] */, + 0x41cdc008 /* 00010737 - 0001073f [ 9] */, + 0x41d58009 /* 00010756 - 0001075f [ 10] */, + 0x41da0017 /* 00010768 - 0001077f [ 24] */, + 0x41e18000 /* 00010786 - 00010786 [ 1] */, + 0x41ec4000 /* 000107b1 - 000107b1 [ 1] */, + 0x41eec044 /* 000107bb - 000107ff [ 69] */, + 0x42018001 /* 00010806 - 00010807 [ 2] */, + 0x42024000 /* 00010809 - 00010809 [ 1] */, + 0x420d8000 /* 00010836 - 00010836 [ 1] */, + 0x420e4002 /* 00010839 - 0001083b [ 3] */, + 0x420f4001 /* 0001083d - 0001083e [ 2] */, + 0x42158000 /* 00010856 - 00010856 [ 1] */, + 0x4227c007 /* 0001089f - 000108a6 [ 8] */, + 0x422c002f /* 000108b0 - 000108df [ 48] */, + 0x423cc000 /* 000108f3 - 000108f3 [ 1] */, + 0x423d8004 /* 000108f6 - 000108fa [ 5] */, + 0x42470002 /* 0001091c - 0001091e [ 3] */, + 0x424e8004 /* 0001093a - 0001093e [ 5] */, + 0x4250003f /* 00010940 - 0001097f [ 64] */, + 0x426e0003 /* 000109b8 - 000109bb [ 4] */, + 0x42740001 /* 000109d0 - 000109d1 [ 2] */, + 0x42810000 /* 00010a04 - 00010a04 [ 1] */, + 0x4281c004 /* 00010a07 - 00010a0b [ 5] */, + 0x42850000 /* 00010a14 - 00010a14 [ 1] */, + 0x42860000 /* 00010a18 - 00010a18 [ 1] */, + 0x428d8001 /* 00010a36 - 00010a37 [ 2] */, + 0x428ec003 /* 00010a3b - 00010a3e [ 4] */, + 0x42924006 /* 00010a49 - 00010a4f [ 7] */, + 0x42964006 /* 00010a59 - 00010a5f [ 7] */, + 0x42a8001f /* 00010aa0 - 00010abf [ 32] */, + 0x42b9c003 /* 00010ae7 - 00010aea [ 4] */, + 0x42bdc008 /* 00010af7 - 00010aff [ 9] */, + 0x42cd8002 /* 00010b36 - 00010b38 [ 3] */, + 0x42d58001 /* 00010b56 - 00010b57 [ 2] */, + 0x42dcc004 /* 00010b73 - 00010b77 [ 5] */, + 0x42e48006 /* 00010b92 - 00010b98 [ 7] */, + 0x42e7400b /* 00010b9d - 00010ba8 [ 12] */, + 0x42ec004f /* 00010bb0 - 00010bff [ 80] */, + 0x43124036 /* 00010c49 - 00010c7f [ 55] */, + 0x432cc00c /* 00010cb3 - 00010cbf [ 13] */, + 0x433cc006 /* 00010cf3 - 00010cf9 [ 7] */, + 0x434a0007 /* 00010d28 - 00010d2f [ 8] */, + 0x434e8125 /* 00010d3a - 00010e5f [ 294] */, + 0x439fc000 /* 00010e7f - 00010e7f [ 1] */, + 0x43aa8000 /* 00010eaa - 00010eaa [ 1] */, + 0x43ab8001 /* 00010eae - 00010eaf [ 2] */, + 0x43ac804a /* 00010eb2 - 00010efc [ 75] */, + 0x43ca0007 /* 00010f28 - 00010f2f [ 8] */, + 0x43d68015 /* 00010f5a - 00010f6f [ 22] */, + 0x43e28025 /* 00010f8a - 00010faf [ 38] */, + 0x43f30013 /* 00010fcc - 00010fdf [ 20] */, + 0x43fdc008 /* 00010ff7 - 00010fff [ 9] */, + 0x44138003 /* 0001104e - 00011051 [ 4] */, + 0x441d8008 /* 00011076 - 0001107e [ 9] */, + 0x442f4000 /* 000110bd - 000110bd [ 1] */, + 0x4430c00c /* 000110c3 - 000110cf [ 13] */, + 0x443a4006 /* 000110e9 - 000110ef [ 7] */, + 0x443e8005 /* 000110fa - 000110ff [ 6] */, + 0x444d4000 /* 00011135 - 00011135 [ 1] */, + 0x44520007 /* 00011148 - 0001114f [ 8] */, + 0x445dc008 /* 00011177 - 0001117f [ 9] */, + 0x44780000 /* 000111e0 - 000111e0 [ 1] */, + 0x447d400a /* 000111f5 - 000111ff [ 11] */, + 0x44848000 /* 00011212 - 00011212 [ 1] */, + 0x4490803d /* 00011242 - 0001127f [ 62] */, + 0x44a1c000 /* 00011287 - 00011287 [ 1] */, + 0x44a24000 /* 00011289 - 00011289 [ 1] */, + 0x44a38000 /* 0001128e - 0001128e [ 1] */, + 0x44a78000 /* 0001129e - 0001129e [ 1] */, + 0x44aa8005 /* 000112aa - 000112af [ 6] */, + 0x44bac004 /* 000112eb - 000112ef [ 5] */, + 0x44be8005 /* 000112fa - 000112ff [ 6] */, + 0x44c10000 /* 00011304 - 00011304 [ 1] */, + 0x44c34001 /* 0001130d - 0001130e [ 2] */, + 0x44c44001 /* 00011311 - 00011312 [ 2] */, + 0x44ca4000 /* 00011329 - 00011329 [ 1] */, + 0x44cc4000 /* 00011331 - 00011331 [ 1] */, + 0x44cd0000 /* 00011334 - 00011334 [ 1] */, + 0x44ce8000 /* 0001133a - 0001133a [ 1] */, + 0x44d14001 /* 00011345 - 00011346 [ 2] */, + 0x44d24001 /* 00011349 - 0001134a [ 2] */, + 0x44d38001 /* 0001134e - 0001134f [ 2] */, + 0x44d44005 /* 00011351 - 00011356 [ 6] */, + 0x44d60004 /* 00011358 - 0001135c [ 5] */, + 0x44d90001 /* 00011364 - 00011365 [ 2] */, + 0x44db4002 /* 0001136d - 0001136f [ 3] */, + 0x44dd408a /* 00011375 - 000113ff [ 139] */, + 0x45170000 /* 0001145c - 0001145c [ 1] */, + 0x4518801d /* 00011462 - 0001147f [ 30] */, + 0x45320007 /* 000114c8 - 000114cf [ 8] */, + 0x453680a5 /* 000114da - 0001157f [ 166] */, + 0x456d8001 /* 000115b6 - 000115b7 [ 2] */, + 0x45778021 /* 000115de - 000115ff [ 34] */, + 0x4591400a /* 00011645 - 0001164f [ 11] */, + 0x45968005 /* 0001165a - 0001165f [ 6] */, + 0x459b4012 /* 0001166d - 0001167f [ 19] */, + 0x45ae8005 /* 000116ba - 000116bf [ 6] */, + 0x45b28035 /* 000116ca - 000116ff [ 54] */, + 0x45c6c001 /* 0001171b - 0001171c [ 2] */, + 0x45cb0003 /* 0001172c - 0001172f [ 4] */, + 0x45d1c0b8 /* 00011747 - 000117ff [ 185] */, + 0x460f0063 /* 0001183c - 0001189f [ 100] */, + 0x463cc00b /* 000118f3 - 000118fe [ 12] */, + 0x4641c001 /* 00011907 - 00011908 [ 2] */, + 0x46428001 /* 0001190a - 0001190b [ 2] */, + 0x46450000 /* 00011914 - 00011914 [ 1] */, + 0x4645c000 /* 00011917 - 00011917 [ 1] */, + 0x464d8000 /* 00011936 - 00011936 [ 1] */, + 0x464e4001 /* 00011939 - 0001193a [ 2] */, + 0x4651c008 /* 00011947 - 0001194f [ 9] */, + 0x46568045 /* 0001195a - 0001199f [ 70] */, + 0x466a0001 /* 000119a8 - 000119a9 [ 2] */, + 0x46760001 /* 000119d8 - 000119d9 [ 2] */, + 0x4679401a /* 000119e5 - 000119ff [ 27] */, + 0x46920007 /* 00011a48 - 00011a4f [ 8] */, + 0x46a8c00c /* 00011aa3 - 00011aaf [ 13] */, + 0x46be4006 /* 00011af9 - 00011aff [ 7] */, + 0x46c280f5 /* 00011b0a - 00011bff [ 246] */, + 0x47024000 /* 00011c09 - 00011c09 [ 1] */, + 0x470dc000 /* 00011c37 - 00011c37 [ 1] */, + 0x47118009 /* 00011c46 - 00011c4f [ 10] */, + 0x471b4002 /* 00011c6d - 00011c6f [ 3] */, + 0x47240001 /* 00011c90 - 00011c91 [ 2] */, + 0x472a0000 /* 00011ca8 - 00011ca8 [ 1] */, + 0x472dc048 /* 00011cb7 - 00011cff [ 73] */, + 0x4741c000 /* 00011d07 - 00011d07 [ 1] */, + 0x47428000 /* 00011d0a - 00011d0a [ 1] */, + 0x474dc002 /* 00011d37 - 00011d39 [ 3] */, + 0x474ec000 /* 00011d3b - 00011d3b [ 1] */, + 0x474f8000 /* 00011d3e - 00011d3e [ 1] */, + 0x47520007 /* 00011d48 - 00011d4f [ 8] */, + 0x47568005 /* 00011d5a - 00011d5f [ 6] */, + 0x47598000 /* 00011d66 - 00011d66 [ 1] */, + 0x475a4000 /* 00011d69 - 00011d69 [ 1] */, + 0x4763c000 /* 00011d8f - 00011d8f [ 1] */, + 0x47648000 /* 00011d92 - 00011d92 [ 1] */, + 0x47664006 /* 00011d99 - 00011d9f [ 7] */, + 0x476a8135 /* 00011daa - 00011edf [ 310] */, + 0x47be4006 /* 00011ef9 - 00011eff [ 7] */, + 0x47c44000 /* 00011f11 - 00011f11 [ 1] */, + 0x47cec002 /* 00011f3b - 00011f3d [ 3] */, + 0x47d68055 /* 00011f5a - 00011faf [ 86] */, + 0x47ec400e /* 00011fb1 - 00011fbf [ 15] */, + 0x47fc800c /* 00011ff2 - 00011ffe [ 13] */, + 0x48e68065 /* 0001239a - 000123ff [ 102] */, + 0x491bc000 /* 0001246f - 0001246f [ 1] */, + 0x491d400a /* 00012475 - 0001247f [ 11] */, + 0x49510a4b /* 00012544 - 00012f8f [ 2636] */, + 0x4bfcc00c /* 00012ff3 - 00012fff [ 13] */, + 0x4d0c000f /* 00013430 - 0001343f [ 16] */, + 0x4d158fa9 /* 00013456 - 000143ff [ 4010] */, + 0x5191e1b8 /* 00014647 - 000167ff [ 8633] */, + 0x5a8e4006 /* 00016a39 - 00016a3f [ 7] */, + 0x5a97c000 /* 00016a5f - 00016a5f [ 1] */, + 0x5a9a8003 /* 00016a6a - 00016a6d [ 4] */, + 0x5aafc000 /* 00016abf - 00016abf [ 1] */, + 0x5ab28005 /* 00016aca - 00016acf [ 6] */, + 0x5abb8001 /* 00016aee - 00016aef [ 2] */, + 0x5abd8009 /* 00016af6 - 00016aff [ 10] */, + 0x5ad18009 /* 00016b46 - 00016b4f [ 10] */, + 0x5ad68000 /* 00016b5a - 00016b5a [ 1] */, + 0x5ad88000 /* 00016b62 - 00016b62 [ 1] */, + 0x5ade0004 /* 00016b78 - 00016b7c [ 5] */, + 0x5ae402af /* 00016b90 - 00016e3f [ 688] */, + 0x5ba6c064 /* 00016e9b - 00016eff [ 101] */, + 0x5bd2c003 /* 00016f4b - 00016f4e [ 4] */, + 0x5be20006 /* 00016f88 - 00016f8e [ 7] */, + 0x5be8003f /* 00016fa0 - 00016fdf [ 64] */, + 0x5bf9400a /* 00016fe5 - 00016fef [ 11] */, + 0x5bfc800d /* 00016ff2 - 00016fff [ 14] */, + 0x61fe0007 /* 000187f8 - 000187ff [ 8] */, + 0x63358029 /* 00018cd6 - 00018cff [ 42] */, + 0x634262e6 /* 00018d09 - 0001afef [ 8935] */, + 0x6bfd0000 /* 0001aff4 - 0001aff4 [ 1] */, + 0x6bff0000 /* 0001affc - 0001affc [ 1] */, + 0x6bffc000 /* 0001afff - 0001afff [ 1] */, + 0x6c48c00e /* 0001b123 - 0001b131 [ 15] */, + 0x6c4cc01c /* 0001b133 - 0001b14f [ 29] */, + 0x6c54c001 /* 0001b153 - 0001b154 [ 2] */, + 0x6c55800d /* 0001b156 - 0001b163 [ 14] */, + 0x6c5a0007 /* 0001b168 - 0001b16f [ 8] */, + 0x6cbf0903 /* 0001b2fc - 0001bbff [ 2308] */, + 0x6f1ac004 /* 0001bc6b - 0001bc6f [ 5] */, + 0x6f1f4002 /* 0001bc7d - 0001bc7f [ 3] */, + 0x6f224006 /* 0001bc89 - 0001bc8f [ 7] */, + 0x6f268001 /* 0001bc9a - 0001bc9b [ 2] */, + 0x6f28125f /* 0001bca0 - 0001ceff [ 4704] */, + 0x73cb8001 /* 0001cf2e - 0001cf2f [ 2] */, + 0x73d1c008 /* 0001cf47 - 0001cf4f [ 9] */, + 0x73f1003b /* 0001cfc4 - 0001cfff [ 60] */, + 0x743d8009 /* 0001d0f6 - 0001d0ff [ 10] */, + 0x7449c001 /* 0001d127 - 0001d128 [ 2] */, + 0x745cc007 /* 0001d173 - 0001d17a [ 8] */, + 0x747ac014 /* 0001d1eb - 0001d1ff [ 21] */, + 0x74918079 /* 0001d246 - 0001d2bf [ 122] */, + 0x74b5000b /* 0001d2d4 - 0001d2df [ 12] */, + 0x74bd000b /* 0001d2f4 - 0001d2ff [ 12] */, + 0x74d5c008 /* 0001d357 - 0001d35f [ 9] */, + 0x74de4086 /* 0001d379 - 0001d3ff [ 135] */, + 0x75154000 /* 0001d455 - 0001d455 [ 1] */, + 0x75274000 /* 0001d49d - 0001d49d [ 1] */, + 0x75280001 /* 0001d4a0 - 0001d4a1 [ 2] */, + 0x7528c001 /* 0001d4a3 - 0001d4a4 [ 2] */, + 0x7529c001 /* 0001d4a7 - 0001d4a8 [ 2] */, + 0x752b4000 /* 0001d4ad - 0001d4ad [ 1] */, + 0x752e8000 /* 0001d4ba - 0001d4ba [ 1] */, + 0x752f0000 /* 0001d4bc - 0001d4bc [ 1] */, + 0x75310000 /* 0001d4c4 - 0001d4c4 [ 1] */, + 0x75418000 /* 0001d506 - 0001d506 [ 1] */, + 0x7542c001 /* 0001d50b - 0001d50c [ 2] */, + 0x75454000 /* 0001d515 - 0001d515 [ 1] */, + 0x75474000 /* 0001d51d - 0001d51d [ 1] */, + 0x754e8000 /* 0001d53a - 0001d53a [ 1] */, + 0x754fc000 /* 0001d53f - 0001d53f [ 1] */, + 0x75514000 /* 0001d545 - 0001d545 [ 1] */, + 0x7551c002 /* 0001d547 - 0001d549 [ 3] */, + 0x75544000 /* 0001d551 - 0001d551 [ 1] */, + 0x75a98001 /* 0001d6a6 - 0001d6a7 [ 2] */, + 0x75f30001 /* 0001d7cc - 0001d7cd [ 2] */, + 0x76a3000e /* 0001da8c - 0001da9a [ 15] */, + 0x76a80000 /* 0001daa0 - 0001daa0 [ 1] */, + 0x76ac044f /* 0001dab0 - 0001deff [ 1104] */, + 0x77c7c005 /* 0001df1f - 0001df24 [ 6] */, + 0x77cac0d4 /* 0001df2b - 0001dfff [ 213] */, + 0x7801c000 /* 0001e007 - 0001e007 [ 1] */, + 0x78064001 /* 0001e019 - 0001e01a [ 2] */, + 0x78088000 /* 0001e022 - 0001e022 [ 1] */, + 0x78094000 /* 0001e025 - 0001e025 [ 1] */, + 0x780ac004 /* 0001e02b - 0001e02f [ 5] */, + 0x781b8020 /* 0001e06e - 0001e08e [ 33] */, + 0x7824006f /* 0001e090 - 0001e0ff [ 112] */, + 0x784b4002 /* 0001e12d - 0001e12f [ 3] */, + 0x784f8001 /* 0001e13e - 0001e13f [ 2] */, + 0x78528003 /* 0001e14a - 0001e14d [ 4] */, + 0x7854013f /* 0001e150 - 0001e28f [ 320] */, + 0x78abc010 /* 0001e2af - 0001e2bf [ 17] */, + 0x78be8004 /* 0001e2fa - 0001e2fe [ 5] */, + 0x78c001cf /* 0001e300 - 0001e4cf [ 464] */, + 0x793e82e5 /* 0001e4fa - 0001e7df [ 742] */, + 0x79f9c000 /* 0001e7e7 - 0001e7e7 [ 1] */, + 0x79fb0000 /* 0001e7ec - 0001e7ec [ 1] */, + 0x79fbc000 /* 0001e7ef - 0001e7ef [ 1] */, + 0x79ffc000 /* 0001e7ff - 0001e7ff [ 1] */, + 0x7a314001 /* 0001e8c5 - 0001e8c6 [ 2] */, + 0x7a35c028 /* 0001e8d7 - 0001e8ff [ 41] */, + 0x7a530003 /* 0001e94c - 0001e94f [ 4] */, + 0x7a568003 /* 0001e95a - 0001e95d [ 4] */, + 0x7a580310 /* 0001e960 - 0001ec70 [ 785] */, + 0x7b2d404b /* 0001ecb5 - 0001ed00 [ 76] */, + 0x7b4f80c1 /* 0001ed3e - 0001edff [ 194] */, + 0x7b810000 /* 0001ee04 - 0001ee04 [ 1] */, + 0x7b880000 /* 0001ee20 - 0001ee20 [ 1] */, + 0x7b88c000 /* 0001ee23 - 0001ee23 [ 1] */, + 0x7b894001 /* 0001ee25 - 0001ee26 [ 2] */, + 0x7b8a0000 /* 0001ee28 - 0001ee28 [ 1] */, + 0x7b8cc000 /* 0001ee33 - 0001ee33 [ 1] */, + 0x7b8e0000 /* 0001ee38 - 0001ee38 [ 1] */, + 0x7b8e8000 /* 0001ee3a - 0001ee3a [ 1] */, + 0x7b8f0005 /* 0001ee3c - 0001ee41 [ 6] */, + 0x7b90c003 /* 0001ee43 - 0001ee46 [ 4] */, + 0x7b920000 /* 0001ee48 - 0001ee48 [ 1] */, + 0x7b928000 /* 0001ee4a - 0001ee4a [ 1] */, + 0x7b930000 /* 0001ee4c - 0001ee4c [ 1] */, + 0x7b940000 /* 0001ee50 - 0001ee50 [ 1] */, + 0x7b94c000 /* 0001ee53 - 0001ee53 [ 1] */, + 0x7b954001 /* 0001ee55 - 0001ee56 [ 2] */, + 0x7b960000 /* 0001ee58 - 0001ee58 [ 1] */, + 0x7b968000 /* 0001ee5a - 0001ee5a [ 1] */, + 0x7b970000 /* 0001ee5c - 0001ee5c [ 1] */, + 0x7b978000 /* 0001ee5e - 0001ee5e [ 1] */, + 0x7b980000 /* 0001ee60 - 0001ee60 [ 1] */, + 0x7b98c000 /* 0001ee63 - 0001ee63 [ 1] */, + 0x7b994001 /* 0001ee65 - 0001ee66 [ 2] */, + 0x7b9ac000 /* 0001ee6b - 0001ee6b [ 1] */, + 0x7b9cc000 /* 0001ee73 - 0001ee73 [ 1] */, + 0x7b9e0000 /* 0001ee78 - 0001ee78 [ 1] */, + 0x7b9f4000 /* 0001ee7d - 0001ee7d [ 1] */, + 0x7b9fc000 /* 0001ee7f - 0001ee7f [ 1] */, + 0x7ba28000 /* 0001ee8a - 0001ee8a [ 1] */, + 0x7ba70004 /* 0001ee9c - 0001eea0 [ 5] */, + 0x7ba90000 /* 0001eea4 - 0001eea4 [ 1] */, + 0x7baa8000 /* 0001eeaa - 0001eeaa [ 1] */, + 0x7baf0033 /* 0001eebc - 0001eeef [ 52] */, + 0x7bbc810d /* 0001eef2 - 0001efff [ 270] */, + 0x7c0b0003 /* 0001f02c - 0001f02f [ 4] */, + 0x7c25000b /* 0001f094 - 0001f09f [ 12] */, + 0x7c2bc001 /* 0001f0af - 0001f0b0 [ 2] */, + 0x7c300000 /* 0001f0c0 - 0001f0c0 [ 1] */, + 0x7c340000 /* 0001f0d0 - 0001f0d0 [ 1] */, + 0x7c3d8009 /* 0001f0f6 - 0001f0ff [ 10] */, + 0x7c6b8037 /* 0001f1ae - 0001f1e5 [ 56] */, + 0x7c80c00c /* 0001f203 - 0001f20f [ 13] */, + 0x7c8f0003 /* 0001f23c - 0001f23f [ 4] */, + 0x7c924006 /* 0001f249 - 0001f24f [ 7] */, + 0x7c94800d /* 0001f252 - 0001f25f [ 14] */, + 0x7c998099 /* 0001f266 - 0001f2ff [ 154] */, + 0x7db60003 /* 0001f6d8 - 0001f6db [ 4] */, + 0x7dbb4002 /* 0001f6ed - 0001f6ef [ 3] */, + 0x7dbf4002 /* 0001f6fd - 0001f6ff [ 3] */, + 0x7dddc003 /* 0001f777 - 0001f77a [ 4] */, + 0x7df68005 /* 0001f7da - 0001f7df [ 6] */, + 0x7dfb0003 /* 0001f7ec - 0001f7ef [ 4] */, + 0x7dfc400e /* 0001f7f1 - 0001f7ff [ 15] */, + 0x7e030003 /* 0001f80c - 0001f80f [ 4] */, + 0x7e120007 /* 0001f848 - 0001f84f [ 8] */, + 0x7e168005 /* 0001f85a - 0001f85f [ 6] */, + 0x7e220007 /* 0001f888 - 0001f88f [ 8] */, + 0x7e2b8001 /* 0001f8ae - 0001f8af [ 2] */, + 0x7e2c804d /* 0001f8b2 - 0001f8ff [ 78] */, + 0x7e95000b /* 0001fa54 - 0001fa5f [ 12] */, + 0x7e9b8001 /* 0001fa6e - 0001fa6f [ 2] */, + 0x7e9f4002 /* 0001fa7d - 0001fa7f [ 3] */, + 0x7ea24006 /* 0001fa89 - 0001fa8f [ 7] */, + 0x7eaf8000 /* 0001fabe - 0001fabe [ 1] */, + 0x7eb18007 /* 0001fac6 - 0001facd [ 8] */, + 0x7eb70003 /* 0001fadc - 0001fadf [ 4] */, + 0x7eba4006 /* 0001fae9 - 0001faef [ 7] */, + 0x7ebe4006 /* 0001faf9 - 0001faff [ 7] */, + 0x7ee4c000 /* 0001fb93 - 0001fb93 [ 1] */, + 0x7ef2c024 /* 0001fbcb - 0001fbef [ 37] */, + 0x7efe8405 /* 0001fbfa - 0001ffff [ 1030] */, + 0xa9b8001f /* 0002a6e0 - 0002a6ff [ 32] */, + 0xadce8005 /* 0002b73a - 0002b73f [ 6] */, + 0xae078001 /* 0002b81e - 0002b81f [ 2] */, + 0xb3a8800d /* 0002cea2 - 0002ceaf [ 14] */, + 0xbaf8400e /* 0002ebe1 - 0002ebef [ 15] */, + 0xbb9789a1 /* 0002ee5e - 0002f7ff [ 2466] */, + 0xbe8785e1 /* 0002fa1e - 0002ffff [ 1506] */, + 0xc4d2c004 /* 0003134b - 0003134f [ 5] */}; +/// Returns whether the code unit needs to be escaped. +/// /// At the end of the valid Unicode code points space a lot of code points are /// either reserved or a noncharacter. Adding all these entries to the -/// lookup table would add 446 entries to the table (in Unicode 14). -/// Instead the only the start of the region is stored, every code point in -/// this region needs to be escaped. -inline constexpr uint32_t __unallocated_region_lower_bound = 0x000323b0; +/// lookup table would greatly increase the size of the table. Instead these +/// entries are manually processed. In this large area of reserved code points, +/// there is a small area of extended graphemes that should not be escaped +/// unconditionally. This is also manually coded. See the generation script for +/// more details. -/// Returns whether the code unit needs to be escaped. /// -/// \pre The code point is a valid Unicode code point. +/// \\pre The code point is a valid Unicode code point. [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool __needs_escape(const char32_t __code_point) noexcept { - // Since __unallocated_region_lower_bound contains the unshifted range do the - // comparison without shifting. - if (__code_point >= __unallocated_region_lower_bound) + + // The entries in the gap at the end. + if(__code_point >= 0x000e0100 && __code_point <= 0x000e01ef) + return false; + + // The entries at the end. + if (__code_point >= 0x000323b0) return true; - ptrdiff_t __i = std::ranges::upper_bound(__entries, (__code_point << 11) | 0x7ffu) - __entries; + ptrdiff_t __i = std::ranges::upper_bound(__entries, (__code_point << 14) | 0x3fffu) - __entries; if (__i == 0) return false; --__i; - uint32_t __upper_bound = (__entries[__i] >> 11) + (__entries[__i] & 0x7ffu); + uint32_t __upper_bound = (__entries[__i] >> 14) + (__entries[__i] & 0x3fffu); return __code_point <= __upper_bound; } diff --git a/lib/libcxx/include/__format/extended_grapheme_cluster_table.h b/lib/libcxx/include/__format/extended_grapheme_cluster_table.h index 9616dfecd604..48581d8a5dde 100644 --- a/lib/libcxx/include/__format/extended_grapheme_cluster_table.h +++ b/lib/libcxx/include/__format/extended_grapheme_cluster_table.h @@ -125,7 +125,7 @@ enum class __property : uint8_t { /// following benchmark. /// libcxx/benchmarks/std_format_spec_string_unicode.bench.cpp // clang-format off -inline constexpr uint32_t __entries[1496] = { +_LIBCPP_HIDE_FROM_ABI inline constexpr uint32_t __entries[1496] = { 0x00000091, 0x00005005, 0x00005811, diff --git a/lib/libcxx/include/__format/format_arg.h b/lib/libcxx/include/__format/format_arg.h index 34ed9bcd6d63..aa02f81dc40e 100644 --- a/lib/libcxx/include/__format/format_arg.h +++ b/lib/libcxx/include/__format/format_arg.h @@ -14,11 +14,12 @@ #include <__concepts/arithmetic.h> #include <__config> #include <__format/concepts.h> -#include <__format/format_fwd.h> #include <__format/format_parse_context.h> #include <__functional/invoke.h> +#include <__fwd/format.h> #include <__memory/addressof.h> #include <__type_traits/conditional.h> +#include <__type_traits/remove_const.h> #include <__utility/forward.h> #include <__utility/move.h> #include <__utility/unreachable.h> @@ -96,7 +97,7 @@ _LIBCPP_HIDE_FROM_ABI constexpr __arg_t __get_packed_type(uint64_t __types, size } // namespace __format -// This function is not user obervable, so it can directly use the non-standard +// This function is not user observable, so it can directly use the non-standard // types of the "variant". See __arg_t for more details. template _LIBCPP_HIDE_FROM_ABI decltype(auto) __visit_format_arg(_Visitor&& __vis, basic_format_arg<_Context> __arg) { @@ -147,6 +148,59 @@ _LIBCPP_HIDE_FROM_ABI decltype(auto) __visit_format_arg(_Visitor&& __vis, basic_ __libcpp_unreachable(); } +# if _LIBCPP_STD_VER >= 26 && defined(_LIBCPP_HAS_EXPLICIT_THIS_PARAMETER) + +template +_LIBCPP_HIDE_FROM_ABI _Rp __visit_format_arg(_Visitor&& __vis, basic_format_arg<_Context> __arg) { + switch (__arg.__type_) { + case __format::__arg_t::__none: + return std::invoke_r<_Rp>(std::forward<_Visitor>(__vis), __arg.__value_.__monostate_); + case __format::__arg_t::__boolean: + return std::invoke_r<_Rp>(std::forward<_Visitor>(__vis), __arg.__value_.__boolean_); + case __format::__arg_t::__char_type: + return std::invoke_r<_Rp>(std::forward<_Visitor>(__vis), __arg.__value_.__char_type_); + case __format::__arg_t::__int: + return std::invoke_r<_Rp>(std::forward<_Visitor>(__vis), __arg.__value_.__int_); + case __format::__arg_t::__long_long: + return std::invoke_r<_Rp>(std::forward<_Visitor>(__vis), __arg.__value_.__long_long_); + case __format::__arg_t::__i128: +# ifndef _LIBCPP_HAS_NO_INT128 + return std::invoke_r<_Rp>(std::forward<_Visitor>(__vis), __arg.__value_.__i128_); +# else + __libcpp_unreachable(); +# endif + case __format::__arg_t::__unsigned: + return std::invoke_r<_Rp>(std::forward<_Visitor>(__vis), __arg.__value_.__unsigned_); + case __format::__arg_t::__unsigned_long_long: + return std::invoke_r<_Rp>(std::forward<_Visitor>(__vis), __arg.__value_.__unsigned_long_long_); + case __format::__arg_t::__u128: +# ifndef _LIBCPP_HAS_NO_INT128 + return std::invoke_r<_Rp>(std::forward<_Visitor>(__vis), __arg.__value_.__u128_); +# else + __libcpp_unreachable(); +# endif + case __format::__arg_t::__float: + return std::invoke_r<_Rp>(std::forward<_Visitor>(__vis), __arg.__value_.__float_); + case __format::__arg_t::__double: + return std::invoke_r<_Rp>(std::forward<_Visitor>(__vis), __arg.__value_.__double_); + case __format::__arg_t::__long_double: + return std::invoke_r<_Rp>(std::forward<_Visitor>(__vis), __arg.__value_.__long_double_); + case __format::__arg_t::__const_char_type_ptr: + return std::invoke_r<_Rp>(std::forward<_Visitor>(__vis), __arg.__value_.__const_char_type_ptr_); + case __format::__arg_t::__string_view: + return std::invoke_r<_Rp>(std::forward<_Visitor>(__vis), __arg.__value_.__string_view_); + case __format::__arg_t::__ptr: + return std::invoke_r<_Rp>(std::forward<_Visitor>(__vis), __arg.__value_.__ptr_); + case __format::__arg_t::__handle: + return std::invoke_r<_Rp>( + std::forward<_Visitor>(__vis), typename basic_format_arg<_Context>::handle{__arg.__value_.__handle_}); + } + + __libcpp_unreachable(); +} + +# endif // _LIBCPP_STD_VER >= 26 && defined(_LIBCPP_HAS_EXPLICIT_THIS_PARAMETER) + /// Contains the values used in basic_format_arg. /// /// This is a separate type so it's possible to store the values and types in @@ -230,6 +284,52 @@ class _LIBCPP_TEMPLATE_VIS basic_format_arg { _LIBCPP_HIDE_FROM_ABI explicit operator bool() const noexcept { return __type_ != __format::__arg_t::__none; } +# if _LIBCPP_STD_VER >= 26 && defined(_LIBCPP_HAS_EXPLICIT_THIS_PARAMETER) + + // This function is user facing, so it must wrap the non-standard types of + // the "variant" in a handle to stay conforming. See __arg_t for more details. + template + _LIBCPP_HIDE_FROM_ABI decltype(auto) visit(this basic_format_arg __arg, _Visitor&& __vis) { + switch (__arg.__type_) { +# ifndef _LIBCPP_HAS_NO_INT128 + case __format::__arg_t::__i128: { + typename __basic_format_arg_value<_Context>::__handle __h{__arg.__value_.__i128_}; + return std::invoke(std::forward<_Visitor>(__vis), typename basic_format_arg<_Context>::handle{__h}); + } + + case __format::__arg_t::__u128: { + typename __basic_format_arg_value<_Context>::__handle __h{__arg.__value_.__u128_}; + return std::invoke(std::forward<_Visitor>(__vis), typename basic_format_arg<_Context>::handle{__h}); + } +# endif + default: + return std::__visit_format_arg(std::forward<_Visitor>(__vis), __arg); + } + } + + // This function is user facing, so it must wrap the non-standard types of + // the "variant" in a handle to stay conforming. See __arg_t for more details. + template + _LIBCPP_HIDE_FROM_ABI _Rp visit(this basic_format_arg __arg, _Visitor&& __vis) { + switch (__arg.__type_) { +# ifndef _LIBCPP_HAS_NO_INT128 + case __format::__arg_t::__i128: { + typename __basic_format_arg_value<_Context>::__handle __h{__arg.__value_.__i128_}; + return std::invoke_r<_Rp>(std::forward<_Visitor>(__vis), typename basic_format_arg<_Context>::handle{__h}); + } + + case __format::__arg_t::__u128: { + typename __basic_format_arg_value<_Context>::__handle __h{__arg.__value_.__u128_}; + return std::invoke_r<_Rp>(std::forward<_Visitor>(__vis), typename basic_format_arg<_Context>::handle{__h}); + } +# endif + default: + return std::__visit_format_arg<_Rp>(std::forward<_Visitor>(__vis), __arg); + } + } + +# endif // _LIBCPP_STD_VER >= 26 && defined(_LIBCPP_HAS_EXPLICIT_THIS_PARAMETER) + private: using char_type = typename _Context::char_type; @@ -270,7 +370,11 @@ class _LIBCPP_TEMPLATE_VIS basic_format_arg<_Context>::handle { // This function is user facing, so it must wrap the non-standard types of // the "variant" in a handle to stay conforming. See __arg_t for more details. template -_LIBCPP_HIDE_FROM_ABI decltype(auto) visit_format_arg(_Visitor&& __vis, basic_format_arg<_Context> __arg) { +# if _LIBCPP_STD_VER >= 26 && defined(_LIBCPP_HAS_EXPLICIT_THIS_PARAMETER) +_LIBCPP_DEPRECATED_IN_CXX26 +# endif + _LIBCPP_HIDE_FROM_ABI decltype(auto) + visit_format_arg(_Visitor&& __vis, basic_format_arg<_Context> __arg) { switch (__arg.__type_) { # ifndef _LIBCPP_HAS_NO_INT128 case __format::__arg_t::__i128: { @@ -282,7 +386,7 @@ _LIBCPP_HIDE_FROM_ABI decltype(auto) visit_format_arg(_Visitor&& __vis, basic_fo typename __basic_format_arg_value<_Context>::__handle __h{__arg.__value_.__u128_}; return std::invoke(std::forward<_Visitor>(__vis), typename basic_format_arg<_Context>::handle{__h}); } -# endif +# endif // _LIBCPP_STD_VER >= 26 && defined(_LIBCPP_HAS_EXPLICIT_THIS_PARAMETER) default: return std::__visit_format_arg(std::forward<_Visitor>(__vis), __arg); } diff --git a/lib/libcxx/include/__format/format_arg_store.h b/lib/libcxx/include/__format/format_arg_store.h index 066cd369eb89..23a599e99575 100644 --- a/lib/libcxx/include/__format/format_arg_store.h +++ b/lib/libcxx/include/__format/format_arg_store.h @@ -151,7 +151,7 @@ consteval __arg_t __determine_arg_t() { // The overload for not formattable types allows triggering the static // assertion below. template - requires(!__formattable<_Tp, typename _Context::char_type>) + requires(!__formattable_with<_Tp, _Context>) consteval __arg_t __determine_arg_t() { return __arg_t::__none; } @@ -165,7 +165,6 @@ _LIBCPP_HIDE_FROM_ABI basic_format_arg<_Context> __create_format_arg(_Tp& __valu using _Dp = remove_const_t<_Tp>; constexpr __arg_t __arg = __determine_arg_t<_Context, _Dp>(); static_assert(__arg != __arg_t::__none, "the supplied type is not formattable"); - static_assert(__formattable_with<_Tp, _Context>); // Not all types can be used to directly initialize the diff --git a/lib/libcxx/include/__format/format_args.h b/lib/libcxx/include/__format/format_args.h index 9e0afecc0ae9..07923570f389 100644 --- a/lib/libcxx/include/__format/format_args.h +++ b/lib/libcxx/include/__format/format_args.h @@ -10,11 +10,10 @@ #ifndef _LIBCPP___FORMAT_FORMAT_ARGS_H #define _LIBCPP___FORMAT_FORMAT_ARGS_H -#include <__availability> #include <__config> #include <__format/format_arg.h> #include <__format/format_arg_store.h> -#include <__format/format_fwd.h> +#include <__fwd/format.h> #include #include @@ -29,8 +28,6 @@ _LIBCPP_BEGIN_NAMESPACE_STD template class _LIBCPP_TEMPLATE_VIS basic_format_args { public: - basic_format_args() noexcept = default; - template _LIBCPP_HIDE_FROM_ABI basic_format_args(const __format_arg_store<_Context, _Args...>& __store) noexcept : __size_(sizeof...(_Args)) { diff --git a/lib/libcxx/include/__format/format_context.h b/lib/libcxx/include/__format/format_context.h index edb0348b34f3..20c07559eae4 100644 --- a/lib/libcxx/include/__format/format_context.h +++ b/lib/libcxx/include/__format/format_context.h @@ -10,7 +10,6 @@ #ifndef _LIBCPP___FORMAT_FORMAT_CONTEXT_H #define _LIBCPP___FORMAT_FORMAT_CONTEXT_H -#include <__availability> #include <__concepts/same_as.h> #include <__config> #include <__format/buffer.h> @@ -18,7 +17,7 @@ #include <__format/format_arg_store.h> #include <__format/format_args.h> #include <__format/format_error.h> -#include <__format/format_fwd.h> +#include <__fwd/format.h> #include <__iterator/back_insert_iterator.h> #include <__iterator/concepts.h> #include <__memory/addressof.h> @@ -27,7 +26,7 @@ #include #ifndef _LIBCPP_HAS_NO_LOCALIZATION -# include +# include <__locale> # include #endif @@ -132,6 +131,9 @@ class _LIBCPP_HIDE_FROM_ABI explicit basic_format_context(_OutIt __out_it, basic_format_args __args) : __out_it_(std::move(__out_it)), __args_(__args) {} # endif + + basic_format_context(const basic_format_context&) = delete; + basic_format_context& operator=(const basic_format_context&) = delete; }; // A specialization for __retarget_buffer @@ -166,20 +168,25 @@ class _LIBCPP_TEMPLATE_VIS basic_format_context basic_format_arg { - if constexpr (same_as) - return {}; - else if constexpr (same_as::handle>) - // At the moment it's not possible for formatting to use a re-targeted handle. - // TODO FMT add this when support is needed. - std::__throw_format_error("Re-targeting handle not supported"); - else - return basic_format_arg{ - __format::__determine_arg_t(), - __basic_format_arg_value(__arg)}; - }, - static_cast<_Context*>(__c)->arg(__id)); + auto __visitor = [&](auto __arg) -> basic_format_arg { + if constexpr (same_as) + return {}; + else if constexpr (same_as::handle>) + // At the moment it's not possible for formatting to use a re-targeted handle. + // TODO FMT add this when support is needed. + std::__throw_format_error("Re-targeting handle not supported"); + else + return basic_format_arg{ + __format::__determine_arg_t(), + __basic_format_arg_value(__arg)}; + }; +# if _LIBCPP_STD_VER >= 26 && defined(_LIBCPP_HAS_EXPLICIT_THIS_PARAMETER) + return static_cast<_Context*>(__c)->arg(__id).visit(std::move(__visitor)); +# else + _LIBCPP_SUPPRESS_DEPRECATED_PUSH + return std::visit_format_arg(std::move(__visitor), static_cast<_Context*>(__c)->arg(__id)); + _LIBCPP_SUPPRESS_DEPRECATED_POP +# endif // _LIBCPP_STD_VER >= 26 && defined(_LIBCPP_HAS_EXPLICIT_THIS_PARAMETER) }) { } diff --git a/lib/libcxx/include/__format/format_functions.h b/lib/libcxx/include/__format/format_functions.h index cf833ad20554..d14b49aff149 100644 --- a/lib/libcxx/include/__format/format_functions.h +++ b/lib/libcxx/include/__format/format_functions.h @@ -41,7 +41,7 @@ #include #ifndef _LIBCPP_HAS_NO_LOCALIZATION -# include +# include <__locale> #endif #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) @@ -66,15 +66,14 @@ using wformat_args = basic_format_args; # endif template -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI __format_arg_store<_Context, _Args...> make_format_args(_Args&... __args) { - return _VSTD::__format_arg_store<_Context, _Args...>(__args...); +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI __format_arg_store<_Context, _Args...> make_format_args(_Args&... __args) { + return std::__format_arg_store<_Context, _Args...>(__args...); } # ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS template -_LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI __format_arg_store -make_wformat_args(_Args&... __args) { - return _VSTD::__format_arg_store(__args...); +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI __format_arg_store make_wformat_args(_Args&... __args) { + return std::__format_arg_store(__args...); } # endif @@ -452,8 +451,7 @@ format_to(_OutIt __out_it, wformat_string<_Args...> __fmt, _Args&&... __args) { // TODO FMT This needs to be a template or std::to_chars(floating-point) availability markup // fires too eagerly, see http://llvm.org/PR61563. template -_LIBCPP_NODISCARD_EXT _LIBCPP_ALWAYS_INLINE inline _LIBCPP_HIDE_FROM_ABI string -vformat(string_view __fmt, format_args __args) { +[[nodiscard]] _LIBCPP_ALWAYS_INLINE inline _LIBCPP_HIDE_FROM_ABI string vformat(string_view __fmt, format_args __args) { string __res; std::vformat_to(std::back_inserter(__res), __fmt, __args); return __res; @@ -463,7 +461,7 @@ vformat(string_view __fmt, format_args __args) { // TODO FMT This needs to be a template or std::to_chars(floating-point) availability markup // fires too eagerly, see http://llvm.org/PR61563. template -_LIBCPP_NODISCARD_EXT _LIBCPP_ALWAYS_INLINE inline _LIBCPP_HIDE_FROM_ABI wstring +[[nodiscard]] _LIBCPP_ALWAYS_INLINE inline _LIBCPP_HIDE_FROM_ABI wstring vformat(wstring_view __fmt, wformat_args __args) { wstring __res; std::vformat_to(std::back_inserter(__res), __fmt, __args); @@ -472,14 +470,14 @@ vformat(wstring_view __fmt, wformat_args __args) { # endif template -_LIBCPP_NODISCARD_EXT _LIBCPP_ALWAYS_INLINE _LIBCPP_HIDE_FROM_ABI string +[[nodiscard]] _LIBCPP_ALWAYS_INLINE _LIBCPP_HIDE_FROM_ABI string format(format_string<_Args...> __fmt, _Args&&... __args) { return std::vformat(__fmt.get(), std::make_format_args(__args...)); } # ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS template -_LIBCPP_NODISCARD_EXT _LIBCPP_ALWAYS_INLINE _LIBCPP_HIDE_FROM_ABI wstring +[[nodiscard]] _LIBCPP_ALWAYS_INLINE _LIBCPP_HIDE_FROM_ABI wstring format(wformat_string<_Args...> __fmt, _Args&&... __args) { return std::vformat(__fmt.get(), std::make_wformat_args(__args...)); } @@ -520,14 +518,14 @@ _LIBCPP_HIDE_FROM_ABI size_t __vformatted_size(basic_string_view<_CharT> __fmt, } template -_LIBCPP_NODISCARD_EXT _LIBCPP_ALWAYS_INLINE _LIBCPP_HIDE_FROM_ABI size_t +[[nodiscard]] _LIBCPP_ALWAYS_INLINE _LIBCPP_HIDE_FROM_ABI size_t formatted_size(format_string<_Args...> __fmt, _Args&&... __args) { return std::__vformatted_size(__fmt.get(), basic_format_args{std::make_format_args(__args...)}); } # ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS template -_LIBCPP_NODISCARD_EXT _LIBCPP_ALWAYS_INLINE _LIBCPP_HIDE_FROM_ABI size_t +[[nodiscard]] _LIBCPP_ALWAYS_INLINE _LIBCPP_HIDE_FROM_ABI size_t formatted_size(wformat_string<_Args...> __fmt, _Args&&... __args) { return std::__vformatted_size(__fmt.get(), basic_format_args{std::make_wformat_args(__args...)}); } @@ -585,7 +583,7 @@ format_to(_OutIt __out_it, locale __loc, wformat_string<_Args...> __fmt, _Args&& // TODO FMT This needs to be a template or std::to_chars(floating-point) availability markup // fires too eagerly, see http://llvm.org/PR61563. template -_LIBCPP_NODISCARD_EXT _LIBCPP_ALWAYS_INLINE inline _LIBCPP_HIDE_FROM_ABI string +[[nodiscard]] _LIBCPP_ALWAYS_INLINE inline _LIBCPP_HIDE_FROM_ABI string vformat(locale __loc, string_view __fmt, format_args __args) { string __res; std::vformat_to(std::back_inserter(__res), std::move(__loc), __fmt, __args); @@ -596,7 +594,7 @@ vformat(locale __loc, string_view __fmt, format_args __args) { // TODO FMT This needs to be a template or std::to_chars(floating-point) availability markup // fires too eagerly, see http://llvm.org/PR61563. template -_LIBCPP_NODISCARD_EXT _LIBCPP_ALWAYS_INLINE inline _LIBCPP_HIDE_FROM_ABI wstring +[[nodiscard]] _LIBCPP_ALWAYS_INLINE inline _LIBCPP_HIDE_FROM_ABI wstring vformat(locale __loc, wstring_view __fmt, wformat_args __args) { wstring __res; std::vformat_to(std::back_inserter(__res), std::move(__loc), __fmt, __args); @@ -605,14 +603,14 @@ vformat(locale __loc, wstring_view __fmt, wformat_args __args) { # endif template -_LIBCPP_NODISCARD_EXT _LIBCPP_ALWAYS_INLINE _LIBCPP_HIDE_FROM_ABI string +[[nodiscard]] _LIBCPP_ALWAYS_INLINE _LIBCPP_HIDE_FROM_ABI string format(locale __loc, format_string<_Args...> __fmt, _Args&&... __args) { return std::vformat(std::move(__loc), __fmt.get(), std::make_format_args(__args...)); } # ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS template -_LIBCPP_NODISCARD_EXT _LIBCPP_ALWAYS_INLINE _LIBCPP_HIDE_FROM_ABI wstring +[[nodiscard]] _LIBCPP_ALWAYS_INLINE _LIBCPP_HIDE_FROM_ABI wstring format(locale __loc, wformat_string<_Args...> __fmt, _Args&&... __args) { return std::vformat(std::move(__loc), __fmt.get(), std::make_wformat_args(__args...)); } @@ -658,14 +656,14 @@ _LIBCPP_HIDE_FROM_ABI size_t __vformatted_size(locale __loc, basic_string_view<_ } template -_LIBCPP_NODISCARD_EXT _LIBCPP_ALWAYS_INLINE _LIBCPP_HIDE_FROM_ABI size_t +[[nodiscard]] _LIBCPP_ALWAYS_INLINE _LIBCPP_HIDE_FROM_ABI size_t formatted_size(locale __loc, format_string<_Args...> __fmt, _Args&&... __args) { return std::__vformatted_size(std::move(__loc), __fmt.get(), basic_format_args{std::make_format_args(__args...)}); } # ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS template -_LIBCPP_NODISCARD_EXT _LIBCPP_ALWAYS_INLINE _LIBCPP_HIDE_FROM_ABI size_t +[[nodiscard]] _LIBCPP_ALWAYS_INLINE _LIBCPP_HIDE_FROM_ABI size_t formatted_size(locale __loc, wformat_string<_Args...> __fmt, _Args&&... __args) { return std::__vformatted_size(std::move(__loc), __fmt.get(), basic_format_args{std::make_wformat_args(__args...)}); } diff --git a/lib/libcxx/include/__format/formatter.h b/lib/libcxx/include/__format/formatter.h index 079befc5bd9c..e2f418f936ee 100644 --- a/lib/libcxx/include/__format/formatter.h +++ b/lib/libcxx/include/__format/formatter.h @@ -10,9 +10,8 @@ #ifndef _LIBCPP___FORMAT_FORMATTER_H #define _LIBCPP___FORMAT_FORMATTER_H -#include <__availability> #include <__config> -#include <__format/format_fwd.h> +#include <__fwd/format.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) # pragma GCC system_header diff --git a/lib/libcxx/include/__format/formatter_bool.h b/lib/libcxx/include/__format/formatter_bool.h index 1c479501b675..17dc69541e8f 100644 --- a/lib/libcxx/include/__format/formatter_bool.h +++ b/lib/libcxx/include/__format/formatter_bool.h @@ -12,7 +12,6 @@ #include <__algorithm/copy.h> #include <__assert> -#include <__availability> #include <__config> #include <__format/concepts.h> #include <__format/format_parse_context.h> @@ -22,7 +21,7 @@ #include <__utility/unreachable.h> #ifndef _LIBCPP_HAS_NO_LOCALIZATION -# include +# include <__locale> #endif #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) diff --git a/lib/libcxx/include/__format/formatter_char.h b/lib/libcxx/include/__format/formatter_char.h index 3358d422252f..d33e84368a76 100644 --- a/lib/libcxx/include/__format/formatter_char.h +++ b/lib/libcxx/include/__format/formatter_char.h @@ -10,7 +10,6 @@ #ifndef _LIBCPP___FORMAT_FORMATTER_CHAR_H #define _LIBCPP___FORMAT_FORMATTER_CHAR_H -#include <__availability> #include <__concepts/same_as.h> #include <__config> #include <__format/concepts.h> diff --git a/lib/libcxx/include/__format/formatter_floating_point.h b/lib/libcxx/include/__format/formatter_floating_point.h index 46a090a787ae..fa42ba203b0b 100644 --- a/lib/libcxx/include/__format/formatter_floating_point.h +++ b/lib/libcxx/include/__format/formatter_floating_point.h @@ -16,6 +16,7 @@ #include <__algorithm/min.h> #include <__algorithm/rotate.h> #include <__algorithm/transform.h> +#include <__assert> #include <__charconv/chars_format.h> #include <__charconv/to_chars_floating_point.h> #include <__charconv/to_chars_result.h> @@ -38,7 +39,7 @@ #include #ifndef _LIBCPP_HAS_NO_LOCALIZATION -# include +# include <__locale> #endif #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) diff --git a/lib/libcxx/include/__format/formatter_integer.h b/lib/libcxx/include/__format/formatter_integer.h index d57082b3881b..41400f00478e 100644 --- a/lib/libcxx/include/__format/formatter_integer.h +++ b/lib/libcxx/include/__format/formatter_integer.h @@ -10,7 +10,6 @@ #ifndef _LIBCPP___FORMAT_FORMATTER_INTEGER_H #define _LIBCPP___FORMAT_FORMATTER_INTEGER_H -#include <__availability> #include <__concepts/arithmetic.h> #include <__config> #include <__format/concepts.h> diff --git a/lib/libcxx/include/__format/formatter_integral.h b/lib/libcxx/include/__format/formatter_integral.h index e0217a240027..eca966f8886f 100644 --- a/lib/libcxx/include/__format/formatter_integral.h +++ b/lib/libcxx/include/__format/formatter_integral.h @@ -32,7 +32,7 @@ #include #ifndef _LIBCPP_HAS_NO_LOCALIZATION -# include +# include <__locale> #endif #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) diff --git a/lib/libcxx/include/__format/formatter_output.h b/lib/libcxx/include/__format/formatter_output.h index d5038eb158b0..1498f64c4aef 100644 --- a/lib/libcxx/include/__format/formatter_output.h +++ b/lib/libcxx/include/__format/formatter_output.h @@ -100,8 +100,8 @@ __padding_size(size_t __size, size_t __width, __format_spec::__alignment __align /// /// This uses a "mass output function" of __format::__output_buffer when possible. template <__fmt_char_type _CharT, __fmt_char_type _OutCharT = _CharT> -_LIBCPP_HIDE_FROM_ABI auto __copy(basic_string_view<_CharT> __str, output_iterator auto __out_it) - -> decltype(__out_it) { +_LIBCPP_HIDE_FROM_ABI auto +__copy(basic_string_view<_CharT> __str, output_iterator auto __out_it) -> decltype(__out_it) { if constexpr (std::same_as>>) { __out_it.__get_container()->__copy(__str); return __out_it; @@ -116,16 +116,16 @@ _LIBCPP_HIDE_FROM_ABI auto __copy(basic_string_view<_CharT> __str, output_iterat template ::value_type, __fmt_char_type _OutCharT = _CharT> -_LIBCPP_HIDE_FROM_ABI auto __copy(_Iterator __first, _Iterator __last, output_iterator auto __out_it) - -> decltype(__out_it) { +_LIBCPP_HIDE_FROM_ABI auto +__copy(_Iterator __first, _Iterator __last, output_iterator auto __out_it) -> decltype(__out_it) { return __formatter::__copy(basic_string_view{__first, __last}, std::move(__out_it)); } template ::value_type, __fmt_char_type _OutCharT = _CharT> -_LIBCPP_HIDE_FROM_ABI auto __copy(_Iterator __first, size_t __n, output_iterator auto __out_it) - -> decltype(__out_it) { +_LIBCPP_HIDE_FROM_ABI auto +__copy(_Iterator __first, size_t __n, output_iterator auto __out_it) -> decltype(__out_it) { return __formatter::__copy(basic_string_view{std::to_address(__first), __n}, std::move(__out_it)); } @@ -136,9 +136,11 @@ template ::value_type, __fmt_char_type _OutCharT = _CharT, class _UnaryOperation> -_LIBCPP_HIDE_FROM_ABI auto __transform( - _Iterator __first, _Iterator __last, output_iterator auto __out_it, _UnaryOperation __operation) - -> decltype(__out_it) { +_LIBCPP_HIDE_FROM_ABI auto +__transform(_Iterator __first, + _Iterator __last, + output_iterator auto __out_it, + _UnaryOperation __operation) -> decltype(__out_it) { if constexpr (std::same_as>>) { __out_it.__get_container()->__transform(__first, __last, std::move(__operation)); return __out_it; diff --git a/lib/libcxx/include/__format/formatter_pointer.h b/lib/libcxx/include/__format/formatter_pointer.h index 3373996ec3d5..6941343efd91 100644 --- a/lib/libcxx/include/__format/formatter_pointer.h +++ b/lib/libcxx/include/__format/formatter_pointer.h @@ -10,7 +10,6 @@ #ifndef _LIBCPP___FORMAT_FORMATTER_POINTER_H #define _LIBCPP___FORMAT_FORMATTER_POINTER_H -#include <__availability> #include <__config> #include <__format/concepts.h> #include <__format/format_parse_context.h> diff --git a/lib/libcxx/include/__format/formatter_string.h b/lib/libcxx/include/__format/formatter_string.h index d1ccfb9b5f7d..347439fc8dff 100644 --- a/lib/libcxx/include/__format/formatter_string.h +++ b/lib/libcxx/include/__format/formatter_string.h @@ -10,7 +10,6 @@ #ifndef _LIBCPP___FORMAT_FORMATTER_STRING_H #define _LIBCPP___FORMAT_FORMATTER_STRING_H -#include <__availability> #include <__config> #include <__format/concepts.h> #include <__format/format_parse_context.h> diff --git a/lib/libcxx/include/__format/indic_conjunct_break_table.h b/lib/libcxx/include/__format/indic_conjunct_break_table.h new file mode 100644 index 000000000000..44521d27498c --- /dev/null +++ b/lib/libcxx/include/__format/indic_conjunct_break_table.h @@ -0,0 +1,350 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +// WARNING, this entire header is generated by +// utils/generate_indic_conjunct_break_table.py +// DO NOT MODIFY! + +// UNICODE, INC. LICENSE AGREEMENT - DATA FILES AND SOFTWARE +// +// See Terms of Use +// for definitions of Unicode Inc.'s Data Files and Software. +// +// NOTICE TO USER: Carefully read the following legal agreement. +// BY DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING UNICODE INC.'S +// DATA FILES ("DATA FILES"), AND/OR SOFTWARE ("SOFTWARE"), +// YOU UNEQUIVOCALLY ACCEPT, AND AGREE TO BE BOUND BY, ALL OF THE +// TERMS AND CONDITIONS OF THIS AGREEMENT. +// IF YOU DO NOT AGREE, DO NOT DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE +// THE DATA FILES OR SOFTWARE. +// +// COPYRIGHT AND PERMISSION NOTICE +// +// Copyright (c) 1991-2022 Unicode, Inc. All rights reserved. +// Distributed under the Terms of Use in https://www.unicode.org/copyright.html. +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of the Unicode data files and any associated documentation +// (the "Data Files") or Unicode software and any associated documentation +// (the "Software") to deal in the Data Files or Software +// without restriction, including without limitation the rights to use, +// copy, modify, merge, publish, distribute, and/or sell copies of +// the Data Files or Software, and to permit persons to whom the Data Files +// or Software are furnished to do so, provided that either +// (a) this copyright and permission notice appear with all copies +// of the Data Files or Software, or +// (b) this copyright and permission notice appear in associated +// Documentation. +// +// THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF +// ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +// WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT OF THIRD PARTY RIGHTS. +// IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS +// NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL +// DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, +// DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER +// TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +// PERFORMANCE OF THE DATA FILES OR SOFTWARE. +// +// Except as contained in this notice, the name of a copyright holder +// shall not be used in advertising or otherwise to promote the sale, +// use or other dealings in these Data Files or Software without prior +// written authorization of the copyright holder. + +#ifndef _LIBCPP___FORMAT_INDIC_CONJUNCT_BREAK_TABLE_H +#define _LIBCPP___FORMAT_INDIC_CONJUNCT_BREAK_TABLE_H + +#include <__algorithm/ranges_upper_bound.h> +#include <__config> +#include <__iterator/access.h> +#include +#include + +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +#endif + +_LIBCPP_BEGIN_NAMESPACE_STD + +#if _LIBCPP_STD_VER >= 20 + +namespace __indic_conjunct_break { + +enum class __property : uint8_t { + // Values generated from the data files. + __Consonant, + __Extend, + __Linker, + + // The code unit has none of above properties. + __none +}; + +/// The entries of the indic conjunct break property table. +/// +/// The data is generated from +/// - https://www.unicode.org/Public/UCD/latest/ucd/DerivedCoreProperties.txt +/// +/// The data has 3 values +/// - bits [0, 1] The property. One of the values generated from the datafiles +/// of \ref __property +/// - bits [2, 10] The size of the range. +/// - bits [11, 31] The lower bound code point of the range. The upper bound of +/// the range is lower bound + size. +/// +/// The 9 bits for the size allow a maximum range of 512 elements. Some ranges +/// in the Unicode tables are larger. They are stored in multiple consecutive +/// ranges in the data table. An alternative would be to store the sizes in a +/// separate 16-bit value. The original MSVC STL code had such an approach, but +/// this approach uses less space for the data and is about 4% faster in the +/// following benchmark. +/// libcxx/benchmarks/std_format_spec_string_unicode.bench.cpp +// clang-format off +_LIBCPP_HIDE_FROM_ABI inline constexpr uint32_t __entries[201] = { + 0x00180139, + 0x001a807d, + 0x00241811, + 0x002c88b1, + 0x002df801, + 0x002e0805, + 0x002e2005, + 0x002e3801, + 0x00308029, + 0x00325851, + 0x00338001, + 0x0036b019, + 0x0036f815, + 0x00373805, + 0x0037500d, + 0x00388801, + 0x00398069, + 0x003f5821, + 0x003fe801, + 0x0040b00d, + 0x0040d821, + 0x00412809, + 0x00414811, + 0x0042c809, + 0x0044c01d, + 0x0046505d, + 0x00471871, + 0x0048a890, + 0x0049e001, + 0x004a6802, + 0x004a880d, + 0x004ac01c, + 0x004bc01c, + 0x004ca84c, + 0x004d5018, + 0x004d9000, + 0x004db00c, + 0x004de001, + 0x004e6802, + 0x004ee004, + 0x004ef800, + 0x004f8004, + 0x004ff001, + 0x0051e001, + 0x0054a84c, + 0x00555018, + 0x00559004, + 0x0055a810, + 0x0055e001, + 0x00566802, + 0x0057c800, + 0x0058a84c, + 0x00595018, + 0x00599004, + 0x0059a810, + 0x0059e001, + 0x005a6802, + 0x005ae004, + 0x005af800, + 0x005b8800, + 0x0060a84c, + 0x0061503c, + 0x0061e001, + 0x00626802, + 0x0062a805, + 0x0062c008, + 0x0065e001, + 0x0068a894, + 0x0069d805, + 0x006a6802, + 0x0071c009, + 0x0072400d, + 0x0075c009, + 0x0076400d, + 0x0078c005, + 0x0079a801, + 0x0079b801, + 0x0079c801, + 0x007b8805, + 0x007ba001, + 0x007bd00d, + 0x007c0001, + 0x007c1009, + 0x007c3005, + 0x007e3001, + 0x0081b801, + 0x0081c805, + 0x00846801, + 0x009ae809, + 0x00b8a001, + 0x00be9001, + 0x00bee801, + 0x00c54801, + 0x00c9c809, + 0x00d0b805, + 0x00d30001, + 0x00d3a81d, + 0x00d3f801, + 0x00d58035, + 0x00d5f83d, + 0x00d9a001, + 0x00db5821, + 0x00dd5801, + 0x00df3001, + 0x00e1b801, + 0x00e68009, + 0x00e6a031, + 0x00e71019, + 0x00e76801, + 0x00e7a001, + 0x00e7c005, + 0x00ee00fd, + 0x01006801, + 0x01068031, + 0x01070801, + 0x0107282d, + 0x01677809, + 0x016bf801, + 0x016f007d, + 0x01815015, + 0x0184c805, + 0x05337801, + 0x0533a025, + 0x0534f005, + 0x05378005, + 0x05416001, + 0x05470045, + 0x05495809, + 0x054d9801, + 0x05558001, + 0x05559009, + 0x0555b805, + 0x0555f005, + 0x05560801, + 0x0557b001, + 0x055f6801, + 0x07d8f001, + 0x07f1003d, + 0x080fe801, + 0x08170001, + 0x081bb011, + 0x08506801, + 0x08507801, + 0x0851c009, + 0x0851f801, + 0x08572805, + 0x0869200d, + 0x08755805, + 0x0877e809, + 0x087a3029, + 0x087c100d, + 0x08838001, + 0x0883f801, + 0x0885d001, + 0x08880009, + 0x08899805, + 0x088b9801, + 0x088e5001, + 0x0891b001, + 0x08974805, + 0x0899d805, + 0x089b3019, + 0x089b8011, + 0x08a23001, + 0x08a2f001, + 0x08a61801, + 0x08ae0001, + 0x08b5b801, + 0x08b95801, + 0x08c1d001, + 0x08c9f001, + 0x08ca1801, + 0x08d1a001, + 0x08d23801, + 0x08d4c801, + 0x08ea1001, + 0x08ea2005, + 0x08ecb801, + 0x08fa1001, + 0x0b578011, + 0x0b598019, + 0x0de4f001, + 0x0e8b2801, + 0x0e8b3809, + 0x0e8b7011, + 0x0e8bd81d, + 0x0e8c2819, + 0x0e8d500d, + 0x0e921009, + 0x0f000019, + 0x0f004041, + 0x0f00d819, + 0x0f011805, + 0x0f013011, + 0x0f047801, + 0x0f098019, + 0x0f157001, + 0x0f17600d, + 0x0f27600d, + 0x0f468019, + 0x0f4a2019}; +// clang-format on + +/// Returns the indic conjuct break property of a code point. +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr __property __get_property(const char32_t __code_point) noexcept { + // The algorithm searches for the upper bound of the range and, when found, + // steps back one entry. This algorithm is used since the code point can be + // anywhere in the range. After a lower bound is found the next step is to + // compare whether the code unit is indeed in the range. + // + // Since the entry contains a code unit, size, and property the code point + // being sought needs to be adjusted. Just shifting the code point to the + // proper position doesn't work; suppose an entry has property 0, size 1, + // and lower bound 3. This results in the entry 0x1810. + // When searching for code point 3 it will search for 0x1800, find 0x1810 + // and moves to the previous entry. Thus the lower bound value will never + // be found. + // The simple solution is to set the bits belonging to the property and + // size. Then the upper bound for code point 3 will return the entry after + // 0x1810. After moving to the previous entry the algorithm arrives at the + // correct entry. + ptrdiff_t __i = std::ranges::upper_bound(__entries, (__code_point << 11) | 0x7ffu) - __entries; + if (__i == 0) + return __property::__none; + + --__i; + uint32_t __upper_bound = (__entries[__i] >> 11) + ((__entries[__i] >> 2) & 0b1'1111'1111); + if (__code_point <= __upper_bound) + return static_cast<__property>(__entries[__i] & 0b11); + + return __property::__none; +} + +} // namespace __indic_conjunct_break + +#endif //_LIBCPP_STD_VER >= 20 + +_LIBCPP_END_NAMESPACE_STD + +#endif // _LIBCPP___FORMAT_INDIC_CONJUNCT_BREAK_TABLE_H diff --git a/lib/libcxx/include/__format/parser_std_format_spec.h b/lib/libcxx/include/__format/parser_std_format_spec.h index cf8af87b2128..150bdde89f3b 100644 --- a/lib/libcxx/include/__format/parser_std_format_spec.h +++ b/lib/libcxx/include/__format/parser_std_format_spec.h @@ -129,8 +129,7 @@ _LIBCPP_HIDE_FROM_ABI constexpr uint32_t __substitute_arg_id(basic_format_arg<_C /// /// They default to false so when a new field is added it needs to be opted in /// explicitly. -// TODO FMT Use an ABI tag for this struct. -struct __fields { +struct _LIBCPP_HIDE_FROM_ABI __fields { uint16_t __sign_ : 1 {false}; uint16_t __alternate_form_ : 1 {false}; uint16_t __zero_padding_ : 1 {false}; @@ -355,10 +354,10 @@ class _LIBCPP_TEMPLATE_VIS __parser { _LIBCPP_HIDE_FROM_ABI constexpr typename _ParseContext::iterator __parse(_ParseContext& __ctx, __fields __fields) { auto __begin = __ctx.begin(); auto __end = __ctx.end(); - if (__begin == __end) + if (__begin == __end || *__begin == _CharT('}') || (__fields.__use_range_fill_ && *__begin == _CharT(':'))) return __begin; - if (__parse_fill_align(__begin, __end, __fields.__use_range_fill_) && __begin == __end) + if (__parse_fill_align(__begin, __end) && __begin == __end) return __begin; if (__fields.__sign_) { @@ -574,12 +573,10 @@ class _LIBCPP_TEMPLATE_VIS __parser { return false; } - _LIBCPP_HIDE_FROM_ABI constexpr void __validate_fill_character(_CharT __fill, bool __use_range_fill) { + _LIBCPP_HIDE_FROM_ABI constexpr void __validate_fill_character(_CharT __fill) { // The forbidden fill characters all code points formed from a single code unit, thus the // check can be omitted when more code units are used. - if (__use_range_fill && (__fill == _CharT('{') || __fill == _CharT('}') || __fill == _CharT(':'))) - std::__throw_format_error("The fill option contains an invalid value"); - else if (__fill == _CharT('{') || __fill == _CharT('}')) + if (__fill == _CharT('{')) std::__throw_format_error("The fill option contains an invalid value"); } @@ -590,7 +587,7 @@ class _LIBCPP_TEMPLATE_VIS __parser { # ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS || (same_as<_CharT, wchar_t> && sizeof(wchar_t) == 2) # endif - _LIBCPP_HIDE_FROM_ABI constexpr bool __parse_fill_align(_Iterator& __begin, _Iterator __end, bool __use_range_fill) { + _LIBCPP_HIDE_FROM_ABI constexpr bool __parse_fill_align(_Iterator& __begin, _Iterator __end) { _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS( __begin != __end, "when called with an empty input the function will cause " @@ -606,7 +603,7 @@ class _LIBCPP_TEMPLATE_VIS __parser { // The forbidden fill characters all are code points encoded // in one code unit, thus the check can be omitted when more // code units are used. - __validate_fill_character(*__begin, __use_range_fill); + __validate_fill_character(*__begin); std::copy_n(__begin, __code_units, std::addressof(__fill_.__data[0])); __begin += __code_units + 1; @@ -623,7 +620,7 @@ class _LIBCPP_TEMPLATE_VIS __parser { # ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS template requires(same_as<_CharT, wchar_t> && sizeof(wchar_t) == 4) - _LIBCPP_HIDE_FROM_ABI constexpr bool __parse_fill_align(_Iterator& __begin, _Iterator __end, bool __use_range_fill) { + _LIBCPP_HIDE_FROM_ABI constexpr bool __parse_fill_align(_Iterator& __begin, _Iterator __end) { _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS( __begin != __end, "when called with an empty input the function will cause " @@ -632,7 +629,7 @@ class _LIBCPP_TEMPLATE_VIS __parser { if (!__unicode::__is_scalar_value(*__begin)) std::__throw_format_error("The fill option contains an invalid value"); - __validate_fill_character(*__begin, __use_range_fill); + __validate_fill_character(*__begin); __fill_.__data[0] = *__begin; __begin += 2; @@ -651,14 +648,14 @@ class _LIBCPP_TEMPLATE_VIS __parser { # else // _LIBCPP_HAS_NO_UNICODE // range-fill and tuple-fill are identical template - _LIBCPP_HIDE_FROM_ABI constexpr bool __parse_fill_align(_Iterator& __begin, _Iterator __end, bool __use_range_fill) { + _LIBCPP_HIDE_FROM_ABI constexpr bool __parse_fill_align(_Iterator& __begin, _Iterator __end) { _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS( __begin != __end, "when called with an empty input the function will cause " "undefined behavior by evaluating data not in the input"); if (__begin + 1 != __end) { if (__parse_alignment(*(__begin + 1))) { - __validate_fill_character(*__begin, __use_range_fill); + __validate_fill_character(*__begin); __fill_.__data[0] = *__begin; __begin += 2; @@ -1158,8 +1155,8 @@ __estimate_column_width(basic_string_view<_CharT> __str, size_t __maximum, __col // When Unicode isn't supported assume ASCII and every code unit is one code // point. In ASCII the estimated column width is always one. Thus there's no // need for rounding. - size_t __width_ = std::min(__str.size(), __maximum); - return {__width_, __str.begin() + __width_}; + size_t __width = std::min(__str.size(), __maximum); + return {__width, __str.begin() + __width}; } # endif // !defined(_LIBCPP_HAS_NO_UNICODE) diff --git a/lib/libcxx/include/__format/unicode.h b/lib/libcxx/include/__format/unicode.h index 40067ca3448b..de7d0fea1df5 100644 --- a/lib/libcxx/include/__format/unicode.h +++ b/lib/libcxx/include/__format/unicode.h @@ -15,8 +15,10 @@ #include <__concepts/same_as.h> #include <__config> #include <__format/extended_grapheme_cluster_table.h> +#include <__format/indic_conjunct_break_table.h> #include <__iterator/concepts.h> #include <__iterator/readable_traits.h> // iter_value_t +#include <__utility/unreachable.h> #include #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) @@ -292,84 +294,231 @@ class __code_point_view { }; # endif // _LIBCPP_HAS_NO_WIDE_CHARACTERS -_LIBCPP_HIDE_FROM_ABI constexpr bool __at_extended_grapheme_cluster_break( - bool& __ri_break_allowed, - bool __has_extened_pictographic, - __extended_grapheme_custer_property_boundary::__property __prev, - __extended_grapheme_custer_property_boundary::__property __next) { - using __extended_grapheme_custer_property_boundary::__property; +// State machine to implement the Extended Grapheme Cluster Boundary +// +// The exact rules may change between Unicode versions. +// This implements the extended rules see +// https://www.unicode.org/reports/tr29/#Grapheme_Cluster_Boundaries +class __extended_grapheme_cluster_break { + using __EGC_property = __extended_grapheme_custer_property_boundary::__property; + using __inCB_property = __indic_conjunct_break::__property; - __has_extened_pictographic |= __prev == __property::__Extended_Pictographic; +public: + _LIBCPP_HIDE_FROM_ABI constexpr explicit __extended_grapheme_cluster_break(char32_t __first_code_point) + : __prev_code_point_(__first_code_point), + __prev_property_(__extended_grapheme_custer_property_boundary::__get_property(__first_code_point)) { + // Initializes the active rule. + if (__prev_property_ == __EGC_property::__Extended_Pictographic) + __active_rule_ = __rule::__GB11_emoji; + else if (__prev_property_ == __EGC_property::__Regional_Indicator) + __active_rule_ = __rule::__GB12_GB13_regional_indicator; + else if (__indic_conjunct_break::__get_property(__first_code_point) == __inCB_property::__Consonant) + __active_rule_ = __rule::__GB9c_indic_conjunct_break; + } - // https://www.unicode.org/reports/tr29/tr29-39.html#Grapheme_Cluster_Boundary_Rules + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool operator()(char32_t __next_code_point) { + __EGC_property __next_property = __extended_grapheme_custer_property_boundary::__get_property(__next_code_point); + bool __result = __evaluate(__next_code_point, __next_property); + __prev_code_point_ = __next_code_point; + __prev_property_ = __next_property; + return __result; + } - // *** Break at the start and end of text, unless the text is empty. *** + // The code point whose break propery are considered during the next + // evaluation cyle. + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr char32_t __current_code_point() const { return __prev_code_point_; } - _LIBCPP_ASSERT_INTERNAL(__prev != __property::__sot, "should be handled in the constructor"); // GB1 - _LIBCPP_ASSERT_INTERNAL(__prev != __property::__eot, "should be handled by our caller"); // GB2 +private: + // The naming of the identifiers matches the Unicode standard. + // NOLINTBEGIN(readability-identifier-naming) + + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool + __evaluate(char32_t __next_code_point, __EGC_property __next_property) { + switch (__active_rule_) { + case __rule::__none: + return __evaluate_none(__next_code_point, __next_property); + case __rule::__GB9c_indic_conjunct_break: + return __evaluate_GB9c_indic_conjunct_break(__next_code_point, __next_property); + case __rule::__GB11_emoji: + return __evaluate_GB11_emoji(__next_code_point, __next_property); + case __rule::__GB12_GB13_regional_indicator: + return __evaluate_GB12_GB13_regional_indicator(__next_code_point, __next_property); + } + __libcpp_unreachable(); + } - // *** Do not break between a CR and LF. Otherwise, break before and after controls. *** - if (__prev == __property::__CR && __next == __property::__LF) // GB3 - return false; + _LIBCPP_HIDE_FROM_ABI constexpr bool __evaluate_none(char32_t __next_code_point, __EGC_property __next_property) { + // *** Break at the start and end of text, unless the text is empty. *** - if (__prev == __property::__Control || __prev == __property::__CR || __prev == __property::__LF) // GB4 - return true; + _LIBCPP_ASSERT_INTERNAL(__prev_property_ != __EGC_property::__sot, "should be handled in the constructor"); // GB1 + _LIBCPP_ASSERT_INTERNAL(__prev_property_ != __EGC_property::__eot, "should be handled by our caller"); // GB2 - if (__next == __property::__Control || __next == __property::__CR || __next == __property::__LF) // GB5 - return true; + // *** Do not break between a CR and LF. Otherwise, break before and after controls. *** + if (__prev_property_ == __EGC_property::__CR && __next_property == __EGC_property::__LF) // GB3 + return false; - // *** Do not break Hangul syllable sequences. *** - if (__prev == __property::__L && (__next == __property::__L || __next == __property::__V || - __next == __property::__LV || __next == __property::__LVT)) // GB6 - return false; + if (__prev_property_ == __EGC_property::__Control || __prev_property_ == __EGC_property::__CR || + __prev_property_ == __EGC_property::__LF) // GB4 + return true; - if ((__prev == __property::__LV || __prev == __property::__V) && - (__next == __property::__V || __next == __property::__T)) // GB7 - return false; + if (__next_property == __EGC_property::__Control || __next_property == __EGC_property::__CR || + __next_property == __EGC_property::__LF) // GB5 + return true; - if ((__prev == __property::__LVT || __prev == __property::__T) && __next == __property::__T) // GB8 - return false; + // *** Do not break Hangul syllable sequences. *** + if (__prev_property_ == __EGC_property::__L && + (__next_property == __EGC_property::__L || __next_property == __EGC_property::__V || + __next_property == __EGC_property::__LV || __next_property == __EGC_property::__LVT)) // GB6 + return false; - // *** Do not break before extending characters or ZWJ. *** - if (__next == __property::__Extend || __next == __property::__ZWJ) - return false; // GB9 + if ((__prev_property_ == __EGC_property::__LV || __prev_property_ == __EGC_property::__V) && + (__next_property == __EGC_property::__V || __next_property == __EGC_property::__T)) // GB7 + return false; - // *** Do not break before SpacingMarks, or after Prepend characters. *** - if (__next == __property::__SpacingMark) // GB9a - return false; + if ((__prev_property_ == __EGC_property::__LVT || __prev_property_ == __EGC_property::__T) && + __next_property == __EGC_property::__T) // GB8 + return false; - if (__prev == __property::__Prepend) // GB9b - return false; + // *** Do not break before extending characters or ZWJ. *** + if (__next_property == __EGC_property::__Extend || __next_property == __EGC_property::__ZWJ) + return false; // GB9 - // *** Do not break within emoji modifier sequences or emoji zwj sequences. *** + // *** Do not break before SpacingMarks, or after Prepend characters. *** + if (__next_property == __EGC_property::__SpacingMark) // GB9a + return false; - // GB11 \p{Extended_Pictographic} Extend* ZWJ x \p{Extended_Pictographic} - // - // Note that several parts of this rule are matched by GB9: Any x (Extend | ZWJ) - // - \p{Extended_Pictographic} x Extend - // - Extend x Extend - // - \p{Extended_Pictographic} x ZWJ - // - Extend x ZWJ - // - // So the only case left to test is - // - \p{Extended_Pictographic}' x ZWJ x \p{Extended_Pictographic} - // where \p{Extended_Pictographic}' is stored in __has_extened_pictographic - if (__has_extened_pictographic && __prev == __property::__ZWJ && __next == __property::__Extended_Pictographic) - return false; + if (__prev_property_ == __EGC_property::__Prepend) // GB9b + return false; - // *** Do not break within emoji flag sequences *** + // *** Do not break within certain combinations with Indic_Conjunct_Break (InCB)=Linker. *** + if (__indic_conjunct_break::__get_property(__next_code_point) == __inCB_property::__Consonant) { + __active_rule_ = __rule::__GB9c_indic_conjunct_break; + __GB9c_indic_conjunct_break_state_ = __GB9c_indic_conjunct_break_state::__Consonant; + return true; + } + + // *** Do not break within emoji modifier sequences or emoji zwj sequences. *** + if (__next_property == __EGC_property::__Extended_Pictographic) { + __active_rule_ = __rule::__GB11_emoji; + __GB11_emoji_state_ = __GB11_emoji_state::__Extended_Pictographic; + return true; + } + + // *** Do not break within emoji flag sequences *** - // That is, do not break between regional indicator (RI) symbols if there - // is an odd number of RI characters before the break point. + // That is, do not break between regional indicator (RI) symbols if there + // is an odd number of RI characters before the break point. + if (__next_property == __EGC_property::__Regional_Indicator) { // GB12 + GB13 + __active_rule_ = __rule::__GB12_GB13_regional_indicator; + return true; + } - if (__prev == __property::__Regional_Indicator && __next == __property::__Regional_Indicator) { // GB12 + GB13 - __ri_break_allowed = !__ri_break_allowed; - return __ri_break_allowed; + // *** Otherwise, break everywhere. *** + return true; // GB999 } - // *** Otherwise, break everywhere. *** - return true; // GB999 -} + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool + __evaluate_GB9c_indic_conjunct_break(char32_t __next_code_point, __EGC_property __next_property) { + __inCB_property __break = __indic_conjunct_break::__get_property(__next_code_point); + if (__break == __inCB_property::__none) { + __active_rule_ = __rule::__none; + return __evaluate_none(__next_code_point, __next_property); + } + + switch (__GB9c_indic_conjunct_break_state_) { + case __GB9c_indic_conjunct_break_state::__Consonant: + if (__break == __inCB_property::__Extend) { + return false; + } + if (__break == __inCB_property::__Linker) { + __GB9c_indic_conjunct_break_state_ = __GB9c_indic_conjunct_break_state::__Linker; + return false; + } + __active_rule_ = __rule::__none; + return __evaluate_none(__next_code_point, __next_property); + + case __GB9c_indic_conjunct_break_state::__Linker: + if (__break == __inCB_property::__Extend) { + return false; + } + if (__break == __inCB_property::__Linker) { + return false; + } + if (__break == __inCB_property::__Consonant) { + __GB9c_indic_conjunct_break_state_ = __GB9c_indic_conjunct_break_state::__Consonant; + return false; + } + __active_rule_ = __rule::__none; + return __evaluate_none(__next_code_point, __next_property); + } + __libcpp_unreachable(); + } + + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool + __evaluate_GB11_emoji(char32_t __next_code_point, __EGC_property __next_property) { + switch (__GB11_emoji_state_) { + case __GB11_emoji_state::__Extended_Pictographic: + if (__next_property == __EGC_property::__Extend) { + __GB11_emoji_state_ = __GB11_emoji_state::__Extend; + return false; + } + [[fallthrough]]; + case __GB11_emoji_state::__Extend: + if (__next_property == __EGC_property::__ZWJ) { + __GB11_emoji_state_ = __GB11_emoji_state::__ZWJ; + return false; + } + if (__next_property == __EGC_property::__Extend) + return false; + __active_rule_ = __rule::__none; + return __evaluate_none(__next_code_point, __next_property); + + case __GB11_emoji_state::__ZWJ: + if (__next_property == __EGC_property::__Extended_Pictographic) { + __GB11_emoji_state_ = __GB11_emoji_state::__Extended_Pictographic; + return false; + } + __active_rule_ = __rule::__none; + return __evaluate_none(__next_code_point, __next_property); + } + __libcpp_unreachable(); + } + + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool + __evaluate_GB12_GB13_regional_indicator(char32_t __next_code_point, __EGC_property __next_property) { + __active_rule_ = __rule::__none; + if (__next_property == __EGC_property::__Regional_Indicator) + return false; + return __evaluate_none(__next_code_point, __next_property); + } + + char32_t __prev_code_point_; + __EGC_property __prev_property_; + + enum class __rule { + __none, + __GB9c_indic_conjunct_break, + __GB11_emoji, + __GB12_GB13_regional_indicator, + }; + __rule __active_rule_ = __rule::__none; + + enum class __GB11_emoji_state { + __Extended_Pictographic, + __Extend, + __ZWJ, + }; + __GB11_emoji_state __GB11_emoji_state_ = __GB11_emoji_state::__Extended_Pictographic; + + enum class __GB9c_indic_conjunct_break_state { + __Consonant, + __Linker, + }; + + __GB9c_indic_conjunct_break_state __GB9c_indic_conjunct_break_state_ = __GB9c_indic_conjunct_break_state::__Consonant; + + // NOLINTEND(readability-identifier-naming) +}; /// Helper class to extract an extended grapheme cluster from a Unicode character range. /// @@ -382,9 +531,7 @@ class __extended_grapheme_cluster_view { public: _LIBCPP_HIDE_FROM_ABI constexpr explicit __extended_grapheme_cluster_view(_Iterator __first, _Iterator __last) - : __code_point_view_(__first, __last), - __next_code_point_(__code_point_view_.__consume().__code_point), - __next_prop_(__extended_grapheme_custer_property_boundary::__get_property(__next_code_point_)) {} + : __code_point_view_(__first, __last), __at_break_(__code_point_view_.__consume().__code_point) {} struct __cluster { /// The first code point of the extended grapheme cluster. @@ -400,44 +547,20 @@ class __extended_grapheme_cluster_view { _Iterator __last_; }; - _LIBCPP_HIDE_FROM_ABI constexpr __cluster __consume() { - _LIBCPP_ASSERT_INTERNAL(__next_prop_ != __extended_grapheme_custer_property_boundary::__property::__eot, - "can't move beyond the end of input"); - - char32_t __code_point = __next_code_point_; - if (!__code_point_view_.__at_end()) - return {__code_point, __get_break()}; - - __next_prop_ = __extended_grapheme_custer_property_boundary::__property::__eot; - return {__code_point, __code_point_view_.__position()}; + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr __cluster __consume() { + char32_t __code_point = __at_break_.__current_code_point(); + _Iterator __position = __code_point_view_.__position(); + while (!__code_point_view_.__at_end()) { + if (__at_break_(__code_point_view_.__consume().__code_point)) + break; + __position = __code_point_view_.__position(); + } + return {__code_point, __position}; } private: __code_point_view<_CharT> __code_point_view_; - - char32_t __next_code_point_; - __extended_grapheme_custer_property_boundary::__property __next_prop_; - - _LIBCPP_HIDE_FROM_ABI constexpr _Iterator __get_break() { - bool __ri_break_allowed = true; - bool __has_extened_pictographic = false; - while (true) { - _Iterator __result = __code_point_view_.__position(); - __extended_grapheme_custer_property_boundary::__property __prev = __next_prop_; - if (__code_point_view_.__at_end()) { - __next_prop_ = __extended_grapheme_custer_property_boundary::__property::__eot; - return __result; - } - __next_code_point_ = __code_point_view_.__consume().__code_point; - __next_prop_ = __extended_grapheme_custer_property_boundary::__get_property(__next_code_point_); - - __has_extened_pictographic |= - __prev == __extended_grapheme_custer_property_boundary::__property::__Extended_Pictographic; - - if (__at_extended_grapheme_cluster_break(__ri_break_allowed, __has_extened_pictographic, __prev, __next_prop_)) - return __result; - } - } + __extended_grapheme_cluster_break __at_break_; }; template diff --git a/lib/libcxx/include/__format/width_estimation_table.h b/lib/libcxx/include/__format/width_estimation_table.h index cfb488975d57..11f61dea18d6 100644 --- a/lib/libcxx/include/__format/width_estimation_table.h +++ b/lib/libcxx/include/__format/width_estimation_table.h @@ -119,7 +119,7 @@ namespace __width_estimation_table { /// - bits [0, 13] The size of the range, allowing 16384 elements. /// - bits [14, 31] The lower bound code point of the range. The upper bound of /// the range is lower bound + size. -inline constexpr uint32_t __entries[108] = { +_LIBCPP_HIDE_FROM_ABI inline constexpr uint32_t __entries[107] = { 0x0440005f /* 00001100 - 0000115f [ 96] */, // 0x08c68001 /* 0000231a - 0000231b [ 2] */, // 0x08ca4001 /* 00002329 - 0000232a [ 2] */, // @@ -158,14 +158,13 @@ inline constexpr uint32_t __entries[108] = { 0x0ba00019 /* 00002e80 - 00002e99 [ 26] */, // 0x0ba6c058 /* 00002e9b - 00002ef3 [ 89] */, // 0x0bc000d5 /* 00002f00 - 00002fd5 [ 214] */, // - 0x0bfc000b /* 00002ff0 - 00002ffb [ 12] */, // - 0x0c00003e /* 00003000 - 0000303e [ 63] */, // + 0x0bfc004e /* 00002ff0 - 0000303e [ 79] */, // 0x0c104055 /* 00003041 - 00003096 [ 86] */, // 0x0c264066 /* 00003099 - 000030ff [ 103] */, // 0x0c41402a /* 00003105 - 0000312f [ 43] */, // 0x0c4c405d /* 00003131 - 0000318e [ 94] */, // 0x0c640053 /* 00003190 - 000031e3 [ 84] */, // - 0x0c7c002e /* 000031f0 - 0000321e [ 47] */, // + 0x0c7bc02f /* 000031ef - 0000321e [ 48] */, // 0x0c880027 /* 00003220 - 00003247 [ 40] */, // 0x0c943fff /* 00003250 - 0000724f [16384] */, // 0x1c94323c /* 00007250 - 0000a48c [12861] */, // @@ -238,7 +237,7 @@ inline constexpr uint32_t __table_upper_bound = 0x0003fffd; /// Returns the estimated width of a Unicode code point. /// -/// \pre The code point is a valid Unicode code point. +/// \\pre The code point is a valid Unicode code point. [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr int __estimated_width(const char32_t __code_point) noexcept { // Since __table_upper_bound contains the unshifted range do the // comparison without shifting. diff --git a/lib/libcxx/include/__format/write_escaped.h b/lib/libcxx/include/__format/write_escaped.h index 43a074dd8d70..052ea98c3c3b 100644 --- a/lib/libcxx/include/__format/write_escaped.h +++ b/lib/libcxx/include/__format/write_escaped.h @@ -101,15 +101,27 @@ _LIBCPP_HIDE_FROM_ABI void __write_escape_ill_formed_code_unit(basic_string<_Cha } template -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI bool __is_escaped_sequence_written(basic_string<_CharT>& __str, char32_t __value) { +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI bool +__is_escaped_sequence_written(basic_string<_CharT>& __str, bool __last_escaped, char32_t __value) { # ifdef _LIBCPP_HAS_NO_UNICODE // For ASCII assume everything above 127 is printable. if (__value > 127) return false; # endif + // [format.string.escaped]/2.2.1.2.1 + // CE is UTF-8, UTF-16, or UTF-32 and C corresponds to a Unicode scalar + // value whose Unicode property General_Category has a value in the groups + // Separator (Z) or Other (C), as described by UAX #44 of the Unicode Standard, if (!__escaped_output_table::__needs_escape(__value)) - return false; + // [format.string.escaped]/2.2.1.2.2 + // CE is UTF-8, UTF-16, or UTF-32 and C corresponds to a Unicode scalar + // value with the Unicode property Grapheme_Extend=Yes as described by UAX + // #44 of the Unicode Standard and C is not immediately preceded in S by a + // character P appended to E without translation to an escape sequence, + if (!__last_escaped || __extended_grapheme_custer_property_boundary::__get_property(__value) != + __extended_grapheme_custer_property_boundary::__property::__Extend) + return false; __formatter::__write_well_formed_escaped_code_unit(__str, __value); return true; @@ -124,8 +136,8 @@ enum class __escape_quotation_mark { __apostrophe, __double_quote }; // [format.string.escaped]/2 template -[[nodiscard]] _LIBCPP_HIDE_FROM_ABI bool -__is_escaped_sequence_written(basic_string<_CharT>& __str, char32_t __value, __escape_quotation_mark __mark) { +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI bool __is_escaped_sequence_written( + basic_string<_CharT>& __str, char32_t __value, bool __last_escaped, __escape_quotation_mark __mark) { // 2.2.1.1 - Mapped character in [tab:format.escape.sequences] switch (__value) { case _CharT('\t'): @@ -167,7 +179,7 @@ __is_escaped_sequence_written(basic_string<_CharT>& __str, char32_t __value, __e // TODO FMT determine what to do with shift sequences. // 2.2.1.2.1 and 2.2.1.2.2 - Escape - return __formatter::__is_escaped_sequence_written(__str, __formatter::__to_char32(__value)); + return __formatter::__is_escaped_sequence_written(__str, __last_escaped, __formatter::__to_char32(__value)); } template @@ -175,11 +187,15 @@ _LIBCPP_HIDE_FROM_ABI void __escape(basic_string<_CharT>& __str, basic_string_view<_CharT> __values, __escape_quotation_mark __mark) { __unicode::__code_point_view<_CharT> __view{__values.begin(), __values.end()}; + // When the first code unit has the property Grapheme_Extend=Yes it needs to + // be escaped. This happens when the previous code unit was also escaped. + bool __escape = true; while (!__view.__at_end()) { auto __first = __view.__position(); typename __unicode::__consume_result __result = __view.__consume(); if (__result.__status == __unicode::__consume_result::__ok) { - if (!__formatter::__is_escaped_sequence_written(__str, __result.__code_point, __mark)) + __escape = __formatter::__is_escaped_sequence_written(__str, __result.__code_point, __escape, __mark); + if (!__escape) // 2.2.1.3 - Add the character ranges::copy(__first, __view.__position(), std::back_insert_iterator(__str)); } else { diff --git a/lib/libcxx/include/__functional/bind.h b/lib/libcxx/include/__functional/bind.h index 19e7d82155ec..b4f46441da50 100644 --- a/lib/libcxx/include/__functional/bind.h +++ b/lib/libcxx/include/__functional/bind.h @@ -13,6 +13,7 @@ #include <__config> #include <__functional/invoke.h> #include <__functional/weak_result_type.h> +#include <__fwd/functional.h> #include <__type_traits/decay.h> #include <__type_traits/is_reference_wrapper.h> #include <__type_traits/is_void.h> @@ -94,7 +95,7 @@ __mu(_Ti& __ti, tuple<_Uj...>& __uj) { return std::__mu_expand(__ti, __uj, __indices()); } -template +template struct __mu_return2 {}; template @@ -104,8 +105,8 @@ struct __mu_return2 { template ::value, int> = 0> inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 - typename __mu_return2<0 < is_placeholder<_Ti>::value, _Ti, _Uj>::type - __mu(_Ti&, _Uj& __uj) { +typename __mu_return2<0 < is_placeholder<_Ti>::value, _Ti, _Uj>::type +__mu(_Ti&, _Uj& __uj) { const size_t __indx = is_placeholder<_Ti>::value - 1; return std::forward::type>(std::get<__indx>(__uj)); } @@ -119,7 +120,7 @@ inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 _Ti& __mu(_Ti& __ti, return __ti; } -template +template struct __mu_return_impl; template @@ -224,8 +225,8 @@ class __bind : public __weak_result_type<__decay_t<_Fp> > { template _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 - typename __bind_return >::type - operator()(_Args&&... __args) const { + typename __bind_return >::type + operator()(_Args&&... __args) const { return std::__apply_functor(__f_, __bound_args_, __indices(), tuple<_Args&&...>(std::forward<_Args>(__args)...)); } }; diff --git a/lib/libcxx/include/__functional/bind_back.h b/lib/libcxx/include/__functional/bind_back.h index ce26d3b70630..e44768d2283c 100644 --- a/lib/libcxx/include/__functional/bind_back.h +++ b/lib/libcxx/include/__functional/bind_back.h @@ -52,7 +52,7 @@ struct __bind_back_t : __perfect_forward<__bind_back_op template requires is_constructible_v, _Fn> && is_move_constructible_v> && - (is_constructible_v, _Args> && ...) && (is_move_constructible_v> && ...) + (is_constructible_v, _Args> && ...) && (is_move_constructible_v> && ...) _LIBCPP_HIDE_FROM_ABI constexpr auto __bind_back(_Fn&& __f, _Args&&... __args) noexcept( noexcept(__bind_back_t, tuple...>>( std::forward<_Fn>(__f), std::forward_as_tuple(std::forward<_Args>(__args)...)))) @@ -62,6 +62,20 @@ _LIBCPP_HIDE_FROM_ABI constexpr auto __bind_back(_Fn&& __f, _Args&&... __args) n std::forward<_Fn>(__f), std::forward_as_tuple(std::forward<_Args>(__args)...)); } +# if _LIBCPP_STD_VER >= 23 +template +_LIBCPP_HIDE_FROM_ABI constexpr auto bind_back(_Fn&& __f, _Args&&... __args) { + static_assert(is_constructible_v, _Fn>, "bind_back requires decay_t to be constructible from F"); + static_assert(is_move_constructible_v>, "bind_back requires decay_t to be move constructible"); + static_assert((is_constructible_v, _Args> && ...), + "bind_back requires all decay_t to be constructible from respective Args"); + static_assert((is_move_constructible_v> && ...), + "bind_back requires all decay_t to be move constructible"); + return __bind_back_t, tuple...>>( + std::forward<_Fn>(__f), std::forward_as_tuple(std::forward<_Args>(__args)...)); +} +# endif // _LIBCPP_STD_VER >= 23 + #endif // _LIBCPP_STD_VER >= 20 _LIBCPP_END_NAMESPACE_STD diff --git a/lib/libcxx/include/__functional/bind_front.h b/lib/libcxx/include/__functional/bind_front.h index 30dda533615b..87ef3affe80b 100644 --- a/lib/libcxx/include/__functional/bind_front.h +++ b/lib/libcxx/include/__functional/bind_front.h @@ -17,7 +17,6 @@ #include <__type_traits/decay.h> #include <__type_traits/enable_if.h> #include <__type_traits/is_constructible.h> -#include <__type_traits/is_move_constructible.h> #include <__utility/forward.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) @@ -30,9 +29,8 @@ _LIBCPP_BEGIN_NAMESPACE_STD struct __bind_front_op { template - _LIBCPP_HIDE_FROM_ABI constexpr auto operator()(_Args&&... __args) const - noexcept(noexcept(std::invoke(std::forward<_Args>(__args)...))) - -> decltype(std::invoke(std::forward<_Args>(__args)...)) { + _LIBCPP_HIDE_FROM_ABI constexpr auto operator()(_Args&&... __args) const noexcept( + noexcept(std::invoke(std::forward<_Args>(__args)...))) -> decltype(std::invoke(std::forward<_Args>(__args)...)) { return std::invoke(std::forward<_Args>(__args)...); } }; diff --git a/lib/libcxx/include/__functional/function.h b/lib/libcxx/include/__functional/function.h index 416c26a0c73f..c7b98035e34b 100644 --- a/lib/libcxx/include/__functional/function.h +++ b/lib/libcxx/include/__functional/function.h @@ -28,7 +28,7 @@ #include <__type_traits/decay.h> #include <__type_traits/is_core_convertible.h> #include <__type_traits/is_scalar.h> -#include <__type_traits/is_trivially_copy_constructible.h> +#include <__type_traits/is_trivially_constructible.h> #include <__type_traits/is_trivially_destructible.h> #include <__type_traits/is_void.h> #include <__type_traits/strip_signature.h> @@ -55,7 +55,9 @@ _LIBCPP_BEGIN_NAMESPACE_STD // bad_function_call _LIBCPP_DIAGNOSTIC_PUSH +# if !_LIBCPP_AVAILABILITY_HAS_BAD_FUNCTION_CALL_KEY_FUNCTION _LIBCPP_CLANG_DIAGNOSTIC_IGNORED("-Wweak-vtables") +# endif class _LIBCPP_EXPORTED_FROM_ABI bad_function_call : public exception { public: _LIBCPP_HIDE_FROM_ABI bad_function_call() _NOEXCEPT = default; @@ -64,7 +66,7 @@ class _LIBCPP_EXPORTED_FROM_ABI bad_function_call : public exception { // Note that when a key function is not used, every translation unit that uses // bad_function_call will end up containing a weak definition of the vtable and // typeinfo. -# ifdef _LIBCPP_ABI_BAD_FUNCTION_CALL_KEY_FUNCTION +# if _LIBCPP_AVAILABILITY_HAS_BAD_FUNCTION_CALL_KEY_FUNCTION ~bad_function_call() _NOEXCEPT override; # else _LIBCPP_HIDE_FROM_ABI_VIRTUAL ~bad_function_call() _NOEXCEPT override {} @@ -230,10 +232,10 @@ class _LIBCPP_TEMPLATE_VIS __base; template class __base<_Rp(_ArgTypes...)> { - __base(const __base&); - __base& operator=(const __base&); - public: + __base(const __base&) = delete; + __base& operator=(const __base&) = delete; + _LIBCPP_HIDE_FROM_ABI __base() {} _LIBCPP_HIDE_FROM_ABI_VIRTUAL virtual ~__base() {} virtual __base* __clone() const = 0; @@ -514,7 +516,7 @@ struct __policy { } _LIBCPP_HIDE_FROM_ABI static const __policy* __create_empty() { - static const _LIBCPP_CONSTEXPR __policy __policy = { + static constexpr __policy __policy = { nullptr, nullptr, true, @@ -541,7 +543,7 @@ struct __policy { template _LIBCPP_HIDE_FROM_ABI static const __policy* __choose_policy(/* is_small = */ false_type) { - static const _LIBCPP_CONSTEXPR __policy __policy = { + static constexpr __policy __policy = { &__large_clone<_Fun>, &__large_destroy<_Fun>, false, @@ -556,7 +558,7 @@ struct __policy { template _LIBCPP_HIDE_FROM_ABI static const __policy* __choose_policy(/* is_small = */ true_type) { - static const _LIBCPP_CONSTEXPR __policy __policy = { + static constexpr __policy __policy = { nullptr, nullptr, false, @@ -768,7 +770,7 @@ class __func<_Rp1 (^)(_ArgTypes1...), _Alloc, _Rp(_ArgTypes...)> : public __base { } - virtual __base<_Rp(_ArgTypes...)>* __clone() const { + _LIBCPP_HIDE_FROM_ABI_VIRTUAL virtual __base<_Rp(_ArgTypes...)>* __clone() const { _LIBCPP_ASSERT_INTERNAL( false, "Block pointers are just pointers, so they should always fit into " @@ -777,9 +779,11 @@ class __func<_Rp1 (^)(_ArgTypes1...), _Alloc, _Rp(_ArgTypes...)> : public __base return nullptr; } - virtual void __clone(__base<_Rp(_ArgTypes...)>* __p) const { ::new ((void*)__p) __func(__f_); } + _LIBCPP_HIDE_FROM_ABI_VIRTUAL virtual void __clone(__base<_Rp(_ArgTypes...)>* __p) const { + ::new ((void*)__p) __func(__f_); + } - virtual void destroy() _NOEXCEPT { + _LIBCPP_HIDE_FROM_ABI_VIRTUAL virtual void destroy() _NOEXCEPT { # ifndef _LIBCPP_HAS_OBJC_ARC if (__f_) _Block_release(__f_); @@ -787,7 +791,7 @@ class __func<_Rp1 (^)(_ArgTypes1...), _Alloc, _Rp(_ArgTypes...)> : public __base __f_ = 0; } - virtual void destroy_deallocate() _NOEXCEPT { + _LIBCPP_HIDE_FROM_ABI_VIRTUAL virtual void destroy_deallocate() _NOEXCEPT { _LIBCPP_ASSERT_INTERNAL( false, "Block pointers are just pointers, so they should always fit into " @@ -795,16 +799,20 @@ class __func<_Rp1 (^)(_ArgTypes1...), _Alloc, _Rp(_ArgTypes...)> : public __base "never be invoked."); } - virtual _Rp operator()(_ArgTypes&&... __arg) { return std::__invoke(__f_, std::forward<_ArgTypes>(__arg)...); } + _LIBCPP_HIDE_FROM_ABI_VIRTUAL virtual _Rp operator()(_ArgTypes&&... __arg) { + return std::__invoke(__f_, std::forward<_ArgTypes>(__arg)...); + } # ifndef _LIBCPP_HAS_NO_RTTI - virtual const void* target(type_info const& __ti) const _NOEXCEPT { + _LIBCPP_HIDE_FROM_ABI_VIRTUAL virtual const void* target(type_info const& __ti) const _NOEXCEPT { if (__ti == typeid(__func::__block_type)) return &__f_; return (const void*)nullptr; } - virtual const std::type_info& target_type() const _NOEXCEPT { return typeid(__func::__block_type); } + _LIBCPP_HIDE_FROM_ABI_VIRTUAL virtual const std::type_info& target_type() const _NOEXCEPT { + return typeid(__func::__block_type); + } # endif // _LIBCPP_HAS_NO_RTTI }; diff --git a/lib/libcxx/include/__functional/hash.h b/lib/libcxx/include/__functional/hash.h index ff22055d6915..a9e450edd39f 100644 --- a/lib/libcxx/include/__functional/hash.h +++ b/lib/libcxx/include/__functional/hash.h @@ -10,23 +10,18 @@ #define _LIBCPP___FUNCTIONAL_HASH_H #include <__config> -#include <__functional/invoke.h> #include <__functional/unary_function.h> -#include <__fwd/hash.h> -#include <__tuple/sfinae_helpers.h> -#include <__type_traits/is_copy_constructible.h> -#include <__type_traits/is_default_constructible.h> +#include <__fwd/functional.h> +#include <__type_traits/conjunction.h> +#include <__type_traits/invoke.h> +#include <__type_traits/is_constructible.h> #include <__type_traits/is_enum.h> -#include <__type_traits/is_move_constructible.h> #include <__type_traits/underlying_type.h> -#include <__utility/forward.h> -#include <__utility/move.h> #include <__utility/pair.h> #include <__utility/swap.h> #include #include #include -#include #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) # pragma GCC system_header diff --git a/lib/libcxx/include/__functional/identity.h b/lib/libcxx/include/__functional/identity.h index 7fbfc6c6249b..8468de3dae26 100644 --- a/lib/libcxx/include/__functional/identity.h +++ b/lib/libcxx/include/__functional/identity.h @@ -11,7 +11,7 @@ #define _LIBCPP___FUNCTIONAL_IDENTITY_H #include <__config> -#include <__functional/reference_wrapper.h> +#include <__fwd/functional.h> #include <__type_traits/integral_constant.h> #include <__utility/forward.h> @@ -44,7 +44,7 @@ struct __is_identity > : true_type {}; struct identity { template - _LIBCPP_NODISCARD_EXT _LIBCPP_HIDE_FROM_ABI constexpr _Tp&& operator()(_Tp&& __t) const noexcept { + [[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr _Tp&& operator()(_Tp&& __t) const noexcept { return std::forward<_Tp>(__t); } diff --git a/lib/libcxx/include/__functional/is_transparent.h b/lib/libcxx/include/__functional/is_transparent.h index 13fc94f71c6b..b2d62f2e3ead 100644 --- a/lib/libcxx/include/__functional/is_transparent.h +++ b/lib/libcxx/include/__functional/is_transparent.h @@ -11,7 +11,6 @@ #define _LIBCPP___FUNCTIONAL_IS_TRANSPARENT #include <__config> -#include <__type_traits/integral_constant.h> #include <__type_traits/void_t.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) @@ -23,10 +22,10 @@ _LIBCPP_BEGIN_NAMESPACE_STD #if _LIBCPP_STD_VER >= 14 template -struct __is_transparent : false_type {}; +inline const bool __is_transparent_v = false; template -struct __is_transparent<_Tp, _Up, __void_t > : true_type {}; +inline const bool __is_transparent_v<_Tp, _Up, __void_t > = true; #endif diff --git a/lib/libcxx/include/__functional/mem_fn.h b/lib/libcxx/include/__functional/mem_fn.h index 349a6ce3a757..ee07a71774f9 100644 --- a/lib/libcxx/include/__functional/mem_fn.h +++ b/lib/libcxx/include/__functional/mem_fn.h @@ -38,8 +38,8 @@ class __mem_fn : public __weak_result_type<_Tp> { template _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 - typename __invoke_return::type - operator()(_ArgTypes&&... __args) const { + typename __invoke_return::type + operator()(_ArgTypes&&... __args) const { return std::__invoke(__f_, std::forward<_ArgTypes>(__args)...); } }; diff --git a/lib/libcxx/include/__functional/mem_fun_ref.h b/lib/libcxx/include/__functional/mem_fun_ref.h index fe43c4656092..c344420b0299 100644 --- a/lib/libcxx/include/__functional/mem_fun_ref.h +++ b/lib/libcxx/include/__functional/mem_fun_ref.h @@ -89,8 +89,8 @@ class _LIBCPP_TEMPLATE_VIS _LIBCPP_DEPRECATED_IN_CXX11 const_mem_fun_t : public }; template -class _LIBCPP_TEMPLATE_VIS _LIBCPP_DEPRECATED_IN_CXX11 const_mem_fun1_t - : public __binary_function { +class _LIBCPP_TEMPLATE_VIS +_LIBCPP_DEPRECATED_IN_CXX11 const_mem_fun1_t : public __binary_function { _Sp (_Tp::*__p_)(_Ap) const; public: diff --git a/lib/libcxx/include/__functional/not_fn.h b/lib/libcxx/include/__functional/not_fn.h index 23a491c135d7..4b3ce5524a74 100644 --- a/lib/libcxx/include/__functional/not_fn.h +++ b/lib/libcxx/include/__functional/not_fn.h @@ -16,7 +16,6 @@ #include <__type_traits/decay.h> #include <__type_traits/enable_if.h> #include <__type_traits/is_constructible.h> -#include <__type_traits/is_move_constructible.h> #include <__utility/forward.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) diff --git a/lib/libcxx/include/__functional/operations.h b/lib/libcxx/include/__functional/operations.h index 7ddc00650f16..0a6320f19de3 100644 --- a/lib/libcxx/include/__functional/operations.h +++ b/lib/libcxx/include/__functional/operations.h @@ -13,8 +13,7 @@ #include <__config> #include <__functional/binary_function.h> #include <__functional/unary_function.h> -#include <__type_traits/integral_constant.h> -#include <__type_traits/operation_traits.h> +#include <__type_traits/desugars_to.h> #include <__utility/forward.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) @@ -41,18 +40,18 @@ _LIBCPP_CTAD_SUPPORTED_FOR_TYPE(plus); // The non-transparent std::plus specialization is only equivalent to a raw plus // operator when we don't perform an implicit conversion when calling it. template -struct __desugars_to<__plus_tag, plus<_Tp>, _Tp, _Tp> : true_type {}; +inline const bool __desugars_to_v<__plus_tag, plus<_Tp>, _Tp, _Tp> = true; template -struct __desugars_to<__plus_tag, plus, _Tp, _Up> : true_type {}; +inline const bool __desugars_to_v<__plus_tag, plus, _Tp, _Up> = true; #if _LIBCPP_STD_VER >= 14 template <> struct _LIBCPP_TEMPLATE_VIS plus { template _LIBCPP_CONSTEXPR_SINCE_CXX14 _LIBCPP_HIDE_FROM_ABI auto operator()(_T1&& __t, _T2&& __u) const - noexcept(noexcept(std::forward<_T1>(__t) + std::forward<_T2>(__u))) - -> decltype(std::forward<_T1>(__t) + std::forward<_T2>(__u)) { + noexcept(noexcept(std::forward<_T1>(__t) + std::forward<_T2>(__u))) // + -> decltype(std::forward<_T1>(__t) + std::forward<_T2>(__u)) { return std::forward<_T1>(__t) + std::forward<_T2>(__u); } typedef void is_transparent; @@ -77,8 +76,8 @@ template <> struct _LIBCPP_TEMPLATE_VIS minus { template _LIBCPP_CONSTEXPR_SINCE_CXX14 _LIBCPP_HIDE_FROM_ABI auto operator()(_T1&& __t, _T2&& __u) const - noexcept(noexcept(std::forward<_T1>(__t) - std::forward<_T2>(__u))) - -> decltype(std::forward<_T1>(__t) - std::forward<_T2>(__u)) { + noexcept(noexcept(std::forward<_T1>(__t) - std::forward<_T2>(__u))) // + -> decltype(std::forward<_T1>(__t) - std::forward<_T2>(__u)) { return std::forward<_T1>(__t) - std::forward<_T2>(__u); } typedef void is_transparent; @@ -103,8 +102,8 @@ template <> struct _LIBCPP_TEMPLATE_VIS multiplies { template _LIBCPP_CONSTEXPR_SINCE_CXX14 _LIBCPP_HIDE_FROM_ABI auto operator()(_T1&& __t, _T2&& __u) const - noexcept(noexcept(std::forward<_T1>(__t) * std::forward<_T2>(__u))) - -> decltype(std::forward<_T1>(__t) * std::forward<_T2>(__u)) { + noexcept(noexcept(std::forward<_T1>(__t) * std::forward<_T2>(__u))) // + -> decltype(std::forward<_T1>(__t) * std::forward<_T2>(__u)) { return std::forward<_T1>(__t) * std::forward<_T2>(__u); } typedef void is_transparent; @@ -129,8 +128,8 @@ template <> struct _LIBCPP_TEMPLATE_VIS divides { template _LIBCPP_CONSTEXPR_SINCE_CXX14 _LIBCPP_HIDE_FROM_ABI auto operator()(_T1&& __t, _T2&& __u) const - noexcept(noexcept(std::forward<_T1>(__t) / std::forward<_T2>(__u))) - -> decltype(std::forward<_T1>(__t) / std::forward<_T2>(__u)) { + noexcept(noexcept(std::forward<_T1>(__t) / std::forward<_T2>(__u))) // + -> decltype(std::forward<_T1>(__t) / std::forward<_T2>(__u)) { return std::forward<_T1>(__t) / std::forward<_T2>(__u); } typedef void is_transparent; @@ -155,8 +154,8 @@ template <> struct _LIBCPP_TEMPLATE_VIS modulus { template _LIBCPP_CONSTEXPR_SINCE_CXX14 _LIBCPP_HIDE_FROM_ABI auto operator()(_T1&& __t, _T2&& __u) const - noexcept(noexcept(std::forward<_T1>(__t) % std::forward<_T2>(__u))) - -> decltype(std::forward<_T1>(__t) % std::forward<_T2>(__u)) { + noexcept(noexcept(std::forward<_T1>(__t) % std::forward<_T2>(__u))) // + -> decltype(std::forward<_T1>(__t) % std::forward<_T2>(__u)) { return std::forward<_T1>(__t) % std::forward<_T2>(__u); } typedef void is_transparent; @@ -179,7 +178,8 @@ template <> struct _LIBCPP_TEMPLATE_VIS negate { template _LIBCPP_CONSTEXPR_SINCE_CXX14 _LIBCPP_HIDE_FROM_ABI auto operator()(_Tp&& __x) const - noexcept(noexcept(-std::forward<_Tp>(__x))) -> decltype(-std::forward<_Tp>(__x)) { + noexcept(noexcept(-std::forward<_Tp>(__x))) // + -> decltype(-std::forward<_Tp>(__x)) { return -std::forward<_Tp>(__x); } typedef void is_transparent; @@ -206,8 +206,8 @@ template <> struct _LIBCPP_TEMPLATE_VIS bit_and { template _LIBCPP_CONSTEXPR_SINCE_CXX14 _LIBCPP_HIDE_FROM_ABI auto operator()(_T1&& __t, _T2&& __u) const - noexcept(noexcept(std::forward<_T1>(__t) & std::forward<_T2>(__u))) - -> decltype(std::forward<_T1>(__t) & std::forward<_T2>(__u)) { + noexcept(noexcept(std::forward<_T1>(__t) & + std::forward<_T2>(__u))) -> decltype(std::forward<_T1>(__t) & std::forward<_T2>(__u)) { return std::forward<_T1>(__t) & std::forward<_T2>(__u); } typedef void is_transparent; @@ -225,7 +225,8 @@ template <> struct _LIBCPP_TEMPLATE_VIS bit_not { template _LIBCPP_CONSTEXPR_SINCE_CXX14 _LIBCPP_HIDE_FROM_ABI auto operator()(_Tp&& __x) const - noexcept(noexcept(~std::forward<_Tp>(__x))) -> decltype(~std::forward<_Tp>(__x)) { + noexcept(noexcept(~std::forward<_Tp>(__x))) // + -> decltype(~std::forward<_Tp>(__x)) { return ~std::forward<_Tp>(__x); } typedef void is_transparent; @@ -250,8 +251,8 @@ template <> struct _LIBCPP_TEMPLATE_VIS bit_or { template _LIBCPP_CONSTEXPR_SINCE_CXX14 _LIBCPP_HIDE_FROM_ABI auto operator()(_T1&& __t, _T2&& __u) const - noexcept(noexcept(std::forward<_T1>(__t) | std::forward<_T2>(__u))) - -> decltype(std::forward<_T1>(__t) | std::forward<_T2>(__u)) { + noexcept(noexcept(std::forward<_T1>(__t) | std::forward<_T2>(__u))) // + -> decltype(std::forward<_T1>(__t) | std::forward<_T2>(__u)) { return std::forward<_T1>(__t) | std::forward<_T2>(__u); } typedef void is_transparent; @@ -276,8 +277,8 @@ template <> struct _LIBCPP_TEMPLATE_VIS bit_xor { template _LIBCPP_CONSTEXPR_SINCE_CXX14 _LIBCPP_HIDE_FROM_ABI auto operator()(_T1&& __t, _T2&& __u) const - noexcept(noexcept(std::forward<_T1>(__t) ^ std::forward<_T2>(__u))) - -> decltype(std::forward<_T1>(__t) ^ std::forward<_T2>(__u)) { + noexcept(noexcept(std::forward<_T1>(__t) ^ std::forward<_T2>(__u))) // + -> decltype(std::forward<_T1>(__t) ^ std::forward<_T2>(__u)) { return std::forward<_T1>(__t) ^ std::forward<_T2>(__u); } typedef void is_transparent; @@ -304,8 +305,8 @@ template <> struct _LIBCPP_TEMPLATE_VIS equal_to { template _LIBCPP_CONSTEXPR_SINCE_CXX14 _LIBCPP_HIDE_FROM_ABI auto operator()(_T1&& __t, _T2&& __u) const - noexcept(noexcept(std::forward<_T1>(__t) == std::forward<_T2>(__u))) - -> decltype(std::forward<_T1>(__t) == std::forward<_T2>(__u)) { + noexcept(noexcept(std::forward<_T1>(__t) == std::forward<_T2>(__u))) // + -> decltype(std::forward<_T1>(__t) == std::forward<_T2>(__u)) { return std::forward<_T1>(__t) == std::forward<_T2>(__u); } typedef void is_transparent; @@ -315,11 +316,11 @@ struct _LIBCPP_TEMPLATE_VIS equal_to { // The non-transparent std::equal_to specialization is only equivalent to a raw equality // comparison when we don't perform an implicit conversion when calling it. template -struct __desugars_to<__equal_tag, equal_to<_Tp>, _Tp, _Tp> : true_type {}; +inline const bool __desugars_to_v<__equal_tag, equal_to<_Tp>, _Tp, _Tp> = true; // In the transparent case, we do not enforce that template -struct __desugars_to<__equal_tag, equal_to, _Tp, _Up> : true_type {}; +inline const bool __desugars_to_v<__equal_tag, equal_to, _Tp, _Up> = true; #if _LIBCPP_STD_VER >= 14 template @@ -339,8 +340,8 @@ template <> struct _LIBCPP_TEMPLATE_VIS not_equal_to { template _LIBCPP_CONSTEXPR_SINCE_CXX14 _LIBCPP_HIDE_FROM_ABI auto operator()(_T1&& __t, _T2&& __u) const - noexcept(noexcept(std::forward<_T1>(__t) != std::forward<_T2>(__u))) - -> decltype(std::forward<_T1>(__t) != std::forward<_T2>(__u)) { + noexcept(noexcept(std::forward<_T1>(__t) != std::forward<_T2>(__u))) // + -> decltype(std::forward<_T1>(__t) != std::forward<_T2>(__u)) { return std::forward<_T1>(__t) != std::forward<_T2>(__u); } typedef void is_transparent; @@ -360,17 +361,23 @@ struct _LIBCPP_TEMPLATE_VIS less : __binary_function<_Tp, _Tp, bool> { }; _LIBCPP_CTAD_SUPPORTED_FOR_TYPE(less); +template +inline const bool __desugars_to_v<__less_tag, less<_Tp>, _Tp, _Tp> = true; + #if _LIBCPP_STD_VER >= 14 template <> struct _LIBCPP_TEMPLATE_VIS less { template _LIBCPP_CONSTEXPR_SINCE_CXX14 _LIBCPP_HIDE_FROM_ABI auto operator()(_T1&& __t, _T2&& __u) const - noexcept(noexcept(std::forward<_T1>(__t) < std::forward<_T2>(__u))) - -> decltype(std::forward<_T1>(__t) < std::forward<_T2>(__u)) { + noexcept(noexcept(std::forward<_T1>(__t) < std::forward<_T2>(__u))) // + -> decltype(std::forward<_T1>(__t) < std::forward<_T2>(__u)) { return std::forward<_T1>(__t) < std::forward<_T2>(__u); } typedef void is_transparent; }; + +template +inline const bool __desugars_to_v<__less_tag, less<>, _Tp, _Tp> = true; #endif #if _LIBCPP_STD_VER >= 14 @@ -391,8 +398,8 @@ template <> struct _LIBCPP_TEMPLATE_VIS less_equal { template _LIBCPP_CONSTEXPR_SINCE_CXX14 _LIBCPP_HIDE_FROM_ABI auto operator()(_T1&& __t, _T2&& __u) const - noexcept(noexcept(std::forward<_T1>(__t) <= std::forward<_T2>(__u))) - -> decltype(std::forward<_T1>(__t) <= std::forward<_T2>(__u)) { + noexcept(noexcept(std::forward<_T1>(__t) <= std::forward<_T2>(__u))) // + -> decltype(std::forward<_T1>(__t) <= std::forward<_T2>(__u)) { return std::forward<_T1>(__t) <= std::forward<_T2>(__u); } typedef void is_transparent; @@ -417,8 +424,8 @@ template <> struct _LIBCPP_TEMPLATE_VIS greater_equal { template _LIBCPP_CONSTEXPR_SINCE_CXX14 _LIBCPP_HIDE_FROM_ABI auto operator()(_T1&& __t, _T2&& __u) const - noexcept(noexcept(std::forward<_T1>(__t) >= std::forward<_T2>(__u))) - -> decltype(std::forward<_T1>(__t) >= std::forward<_T2>(__u)) { + noexcept(noexcept(std::forward<_T1>(__t) >= + std::forward<_T2>(__u))) -> decltype(std::forward<_T1>(__t) >= std::forward<_T2>(__u)) { return std::forward<_T1>(__t) >= std::forward<_T2>(__u); } typedef void is_transparent; @@ -443,8 +450,8 @@ template <> struct _LIBCPP_TEMPLATE_VIS greater { template _LIBCPP_CONSTEXPR_SINCE_CXX14 _LIBCPP_HIDE_FROM_ABI auto operator()(_T1&& __t, _T2&& __u) const - noexcept(noexcept(std::forward<_T1>(__t) > std::forward<_T2>(__u))) - -> decltype(std::forward<_T1>(__t) > std::forward<_T2>(__u)) { + noexcept(noexcept(std::forward<_T1>(__t) > std::forward<_T2>(__u))) // + -> decltype(std::forward<_T1>(__t) > std::forward<_T2>(__u)) { return std::forward<_T1>(__t) > std::forward<_T2>(__u); } typedef void is_transparent; @@ -471,8 +478,8 @@ template <> struct _LIBCPP_TEMPLATE_VIS logical_and { template _LIBCPP_CONSTEXPR_SINCE_CXX14 _LIBCPP_HIDE_FROM_ABI auto operator()(_T1&& __t, _T2&& __u) const - noexcept(noexcept(std::forward<_T1>(__t) && std::forward<_T2>(__u))) - -> decltype(std::forward<_T1>(__t) && std::forward<_T2>(__u)) { + noexcept(noexcept(std::forward<_T1>(__t) && std::forward<_T2>(__u))) // + -> decltype(std::forward<_T1>(__t) && std::forward<_T2>(__u)) { return std::forward<_T1>(__t) && std::forward<_T2>(__u); } typedef void is_transparent; @@ -495,7 +502,8 @@ template <> struct _LIBCPP_TEMPLATE_VIS logical_not { template _LIBCPP_CONSTEXPR_SINCE_CXX14 _LIBCPP_HIDE_FROM_ABI auto operator()(_Tp&& __x) const - noexcept(noexcept(!std::forward<_Tp>(__x))) -> decltype(!std::forward<_Tp>(__x)) { + noexcept(noexcept(!std::forward<_Tp>(__x))) // + -> decltype(!std::forward<_Tp>(__x)) { return !std::forward<_Tp>(__x); } typedef void is_transparent; @@ -520,8 +528,8 @@ template <> struct _LIBCPP_TEMPLATE_VIS logical_or { template _LIBCPP_CONSTEXPR_SINCE_CXX14 _LIBCPP_HIDE_FROM_ABI auto operator()(_T1&& __t, _T2&& __u) const - noexcept(noexcept(std::forward<_T1>(__t) || std::forward<_T2>(__u))) - -> decltype(std::forward<_T1>(__t) || std::forward<_T2>(__u)) { + noexcept(noexcept(std::forward<_T1>(__t) || std::forward<_T2>(__u))) // + -> decltype(std::forward<_T1>(__t) || std::forward<_T2>(__u)) { return std::forward<_T1>(__t) || std::forward<_T2>(__u); } typedef void is_transparent; diff --git a/lib/libcxx/include/__functional/pointer_to_binary_function.h b/lib/libcxx/include/__functional/pointer_to_binary_function.h index 51a7c3fe0fc0..e345250dcdd8 100644 --- a/lib/libcxx/include/__functional/pointer_to_binary_function.h +++ b/lib/libcxx/include/__functional/pointer_to_binary_function.h @@ -22,8 +22,8 @@ _LIBCPP_BEGIN_NAMESPACE_STD #if _LIBCPP_STD_VER <= 14 || defined(_LIBCPP_ENABLE_CXX17_REMOVED_BINDERS) template -class _LIBCPP_TEMPLATE_VIS _LIBCPP_DEPRECATED_IN_CXX11 pointer_to_binary_function - : public __binary_function<_Arg1, _Arg2, _Result> { +class _LIBCPP_TEMPLATE_VIS +_LIBCPP_DEPRECATED_IN_CXX11 pointer_to_binary_function : public __binary_function<_Arg1, _Arg2, _Result> { _Result (*__f_)(_Arg1, _Arg2); public: diff --git a/lib/libcxx/include/__functional/pointer_to_unary_function.h b/lib/libcxx/include/__functional/pointer_to_unary_function.h index 0338e7671789..3a5d153d3617 100644 --- a/lib/libcxx/include/__functional/pointer_to_unary_function.h +++ b/lib/libcxx/include/__functional/pointer_to_unary_function.h @@ -22,8 +22,8 @@ _LIBCPP_BEGIN_NAMESPACE_STD #if _LIBCPP_STD_VER <= 14 || defined(_LIBCPP_ENABLE_CXX17_REMOVED_BINDERS) template -class _LIBCPP_TEMPLATE_VIS _LIBCPP_DEPRECATED_IN_CXX11 pointer_to_unary_function - : public __unary_function<_Arg, _Result> { +class _LIBCPP_TEMPLATE_VIS +_LIBCPP_DEPRECATED_IN_CXX11 pointer_to_unary_function : public __unary_function<_Arg, _Result> { _Result (*__f_)(_Arg); public: diff --git a/lib/libcxx/include/__functional/ranges_operations.h b/lib/libcxx/include/__functional/ranges_operations.h index 38b28018049e..27f06eadd0eb 100644 --- a/lib/libcxx/include/__functional/ranges_operations.h +++ b/lib/libcxx/include/__functional/ranges_operations.h @@ -13,8 +13,7 @@ #include <__concepts/equality_comparable.h> #include <__concepts/totally_ordered.h> #include <__config> -#include <__type_traits/integral_constant.h> -#include <__type_traits/operation_traits.h> +#include <__type_traits/desugars_to.h> #include <__utility/forward.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) @@ -98,7 +97,10 @@ struct greater_equal { // For ranges we do not require that the types on each side of the equality // operator are of the same type template -struct __desugars_to<__equal_tag, ranges::equal_to, _Tp, _Up> : true_type {}; +inline const bool __desugars_to_v<__equal_tag, ranges::equal_to, _Tp, _Up> = true; + +template +inline const bool __desugars_to_v<__less_tag, ranges::less, _Tp, _Up> = true; #endif // _LIBCPP_STD_VER >= 20 diff --git a/lib/libcxx/include/__functional/reference_wrapper.h b/lib/libcxx/include/__functional/reference_wrapper.h index 54de06a8879c..3570e2673c80 100644 --- a/lib/libcxx/include/__functional/reference_wrapper.h +++ b/lib/libcxx/include/__functional/reference_wrapper.h @@ -10,12 +10,16 @@ #ifndef _LIBCPP___FUNCTIONAL_REFERENCE_WRAPPER_H #define _LIBCPP___FUNCTIONAL_REFERENCE_WRAPPER_H +#include <__compare/synth_three_way.h> +#include <__concepts/boolean_testable.h> #include <__config> #include <__functional/invoke.h> #include <__functional/weak_result_type.h> #include <__memory/addressof.h> #include <__type_traits/enable_if.h> +#include <__type_traits/is_const.h> #include <__type_traits/remove_cvref.h> +#include <__type_traits/void_t.h> #include <__utility/declval.h> #include <__utility/forward.h> @@ -35,12 +39,12 @@ class _LIBCPP_TEMPLATE_VIS reference_wrapper : public __weak_result_type<_Tp> { type* __f_; static void __fun(_Tp&) _NOEXCEPT; - static void __fun(_Tp&&) = delete; + static void __fun(_Tp&&) = delete; // NOLINT(modernize-use-equals-delete) ; This is llvm.org/PR54276 public: - template < - class _Up, - class = __enable_if_t::value, decltype(__fun(std::declval<_Up>())) > > + template ()))>, + __enable_if_t::value, int> = 0> _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX20 reference_wrapper(_Up&& __u) _NOEXCEPT_(noexcept(__fun(std::declval<_Up>()))) { type& __f = static_cast<_Up&&>(__u); @@ -63,6 +67,54 @@ class _LIBCPP_TEMPLATE_VIS reference_wrapper : public __weak_result_type<_Tp> { { return std::__invoke(get(), std::forward<_ArgTypes>(__args)...); } + +#if _LIBCPP_STD_VER >= 26 + + // [refwrap.comparisons], comparisons + + _LIBCPP_HIDE_FROM_ABI friend constexpr bool operator==(reference_wrapper __x, reference_wrapper __y) + requires requires { + { __x.get() == __y.get() } -> __boolean_testable; + } + { + return __x.get() == __y.get(); + } + + _LIBCPP_HIDE_FROM_ABI friend constexpr bool operator==(reference_wrapper __x, const _Tp& __y) + requires requires { + { __x.get() == __y } -> __boolean_testable; + } + { + return __x.get() == __y; + } + + _LIBCPP_HIDE_FROM_ABI friend constexpr bool operator==(reference_wrapper __x, reference_wrapper __y) + requires(!is_const_v<_Tp>) && requires { + { __x.get() == __y.get() } -> __boolean_testable; + } + { + return __x.get() == __y.get(); + } + + _LIBCPP_HIDE_FROM_ABI friend constexpr auto operator<=>(reference_wrapper __x, reference_wrapper __y) + requires requires { std::__synth_three_way(__x.get(), __y.get()); } + { + return std::__synth_three_way(__x.get(), __y.get()); + } + + _LIBCPP_HIDE_FROM_ABI friend constexpr auto operator<=>(reference_wrapper __x, const _Tp& __y) + requires requires { std::__synth_three_way(__x.get(), __y); } + { + return std::__synth_three_way(__x.get(), __y); + } + + _LIBCPP_HIDE_FROM_ABI friend constexpr auto operator<=>(reference_wrapper __x, reference_wrapper __y) + requires(!is_const_v<_Tp>) && requires { std::__synth_three_way(__x.get(), __y.get()); } + { + return std::__synth_three_way(__x.get(), __y.get()); + } + +#endif // _LIBCPP_STD_VER >= 26 }; #if _LIBCPP_STD_VER >= 17 diff --git a/lib/libcxx/include/__functional/unary_negate.h b/lib/libcxx/include/__functional/unary_negate.h index d130b7d728a2..5bd487a97bcb 100644 --- a/lib/libcxx/include/__functional/unary_negate.h +++ b/lib/libcxx/include/__functional/unary_negate.h @@ -22,8 +22,8 @@ _LIBCPP_BEGIN_NAMESPACE_STD #if _LIBCPP_STD_VER <= 17 || defined(_LIBCPP_ENABLE_CXX20_REMOVED_NEGATORS) template -class _LIBCPP_TEMPLATE_VIS _LIBCPP_DEPRECATED_IN_CXX17 unary_negate - : public __unary_function { +class _LIBCPP_TEMPLATE_VIS +_LIBCPP_DEPRECATED_IN_CXX17 unary_negate : public __unary_function { _Predicate __pred_; public: diff --git a/lib/libcxx/include/__fwd/array.h b/lib/libcxx/include/__fwd/array.h index 9a79effb617d..b429d0c5a954 100644 --- a/lib/libcxx/include/__fwd/array.h +++ b/lib/libcxx/include/__fwd/array.h @@ -21,6 +21,26 @@ _LIBCPP_BEGIN_NAMESPACE_STD template struct _LIBCPP_TEMPLATE_VIS array; +template +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp& get(array<_Tp, _Size>&) _NOEXCEPT; + +template +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 const _Tp& get(const array<_Tp, _Size>&) _NOEXCEPT; + +#ifndef _LIBCPP_CXX03_LANG +template +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp&& get(array<_Tp, _Size>&&) _NOEXCEPT; + +template +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 const _Tp&& get(const array<_Tp, _Size>&&) _NOEXCEPT; +#endif + +template +struct __is_std_array : false_type {}; + +template +struct __is_std_array > : true_type {}; + _LIBCPP_END_NAMESPACE_STD #endif // _LIBCPP___FWD_ARRAY_H diff --git a/lib/libcxx/include/__fwd/complex.h b/lib/libcxx/include/__fwd/complex.h new file mode 100644 index 000000000000..22c78c5cc3c7 --- /dev/null +++ b/lib/libcxx/include/__fwd/complex.h @@ -0,0 +1,42 @@ +//===---------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===---------------------------------------------------------------------===// + +#ifndef _LIBCPP___FWD_COMPLEX_H +#define _LIBCPP___FWD_COMPLEX_H + +#include <__config> +#include + +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +#endif + +_LIBCPP_BEGIN_NAMESPACE_STD + +template +class _LIBCPP_TEMPLATE_VIS complex; + +#if _LIBCPP_STD_VER >= 26 + +template +_LIBCPP_HIDE_FROM_ABI constexpr _Tp& get(complex<_Tp>&) noexcept; + +template +_LIBCPP_HIDE_FROM_ABI constexpr _Tp&& get(complex<_Tp>&&) noexcept; + +template +_LIBCPP_HIDE_FROM_ABI constexpr const _Tp& get(const complex<_Tp>&) noexcept; + +template +_LIBCPP_HIDE_FROM_ABI constexpr const _Tp&& get(const complex<_Tp>&&) noexcept; + +#endif // _LIBCPP_STD_VER >= 26 + +_LIBCPP_END_NAMESPACE_STD + +#endif // _LIBCPP___FWD_COMPLEX_H diff --git a/lib/libcxx/include/__fwd/deque.h b/lib/libcxx/include/__fwd/deque.h new file mode 100644 index 000000000000..fd2fb5bb4b8e --- /dev/null +++ b/lib/libcxx/include/__fwd/deque.h @@ -0,0 +1,26 @@ +//===---------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===---------------------------------------------------------------------===// + +#ifndef _LIBCPP___FWD_DEQUE_H +#define _LIBCPP___FWD_DEQUE_H + +#include <__config> +#include <__fwd/memory.h> + +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +#endif + +_LIBCPP_BEGIN_NAMESPACE_STD + +template > +class _LIBCPP_TEMPLATE_VIS deque; + +_LIBCPP_END_NAMESPACE_STD + +#endif // _LIBCPP___FWD_DEQUE_H diff --git a/lib/libcxx/include/__format/format_fwd.h b/lib/libcxx/include/__fwd/format.h similarity index 86% rename from lib/libcxx/include/__format/format_fwd.h rename to lib/libcxx/include/__fwd/format.h index 120b2fc8d47d..b30c220f8a04 100644 --- a/lib/libcxx/include/__format/format_fwd.h +++ b/lib/libcxx/include/__fwd/format.h @@ -7,10 +7,9 @@ // //===----------------------------------------------------------------------===// -#ifndef _LIBCPP___FORMAT_FORMAT_FWD_H -#define _LIBCPP___FORMAT_FORMAT_FWD_H +#ifndef _LIBCPP___FWD_FORMAT_H +#define _LIBCPP___FWD_FORMAT_H -#include <__availability> #include <__config> #include <__iterator/concepts.h> @@ -36,4 +35,4 @@ struct _LIBCPP_TEMPLATE_VIS formatter; _LIBCPP_END_NAMESPACE_STD -#endif // _LIBCPP___FORMAT_FORMAT_FWD_H +#endif // _LIBCPP___FWD_FORMAT_H diff --git a/lib/libcxx/include/__fwd/functional.h b/lib/libcxx/include/__fwd/functional.h new file mode 100644 index 000000000000..32c9ef33e453 --- /dev/null +++ b/lib/libcxx/include/__fwd/functional.h @@ -0,0 +1,28 @@ +//===---------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===---------------------------------------------------------------------===// + +#ifndef _LIBCPP___FWD_FUNCTIONAL_H +#define _LIBCPP___FWD_FUNCTIONAL_H + +#include <__config> + +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +#endif + +_LIBCPP_BEGIN_NAMESPACE_STD + +template +struct _LIBCPP_TEMPLATE_VIS hash; + +template +class _LIBCPP_TEMPLATE_VIS reference_wrapper; + +_LIBCPP_END_NAMESPACE_STD + +#endif // _LIBCPP___FWD_FUNCTIONAL_H diff --git a/lib/libcxx/include/__fwd/get.h b/lib/libcxx/include/__fwd/get.h deleted file mode 100644 index e7261b826953..000000000000 --- a/lib/libcxx/include/__fwd/get.h +++ /dev/null @@ -1,99 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// - -#ifndef _LIBCPP___FWD_GET_H -#define _LIBCPP___FWD_GET_H - -#include <__concepts/copyable.h> -#include <__config> -#include <__fwd/array.h> -#include <__fwd/pair.h> -#include <__fwd/subrange.h> -#include <__fwd/tuple.h> -#include <__tuple/tuple_element.h> -#include - -#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) -# pragma GCC system_header -#endif - -_LIBCPP_BEGIN_NAMESPACE_STD - -#ifndef _LIBCPP_CXX03_LANG - -template -_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 typename tuple_element<_Ip, tuple<_Tp...> >::type& -get(tuple<_Tp...>&) _NOEXCEPT; - -template -_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 const typename tuple_element<_Ip, tuple<_Tp...> >::type& -get(const tuple<_Tp...>&) _NOEXCEPT; - -template -_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 typename tuple_element<_Ip, tuple<_Tp...> >::type&& -get(tuple<_Tp...>&&) _NOEXCEPT; - -template -_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 const typename tuple_element<_Ip, tuple<_Tp...> >::type&& -get(const tuple<_Tp...>&&) _NOEXCEPT; - -#endif //_LIBCPP_CXX03_LANG - -template -_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 typename tuple_element<_Ip, pair<_T1, _T2> >::type& -get(pair<_T1, _T2>&) _NOEXCEPT; - -template -_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 const typename tuple_element<_Ip, pair<_T1, _T2> >::type& -get(const pair<_T1, _T2>&) _NOEXCEPT; - -#ifndef _LIBCPP_CXX03_LANG -template -_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 typename tuple_element<_Ip, pair<_T1, _T2> >::type&& -get(pair<_T1, _T2>&&) _NOEXCEPT; - -template -_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 const typename tuple_element<_Ip, pair<_T1, _T2> >::type&& -get(const pair<_T1, _T2>&&) _NOEXCEPT; -#endif - -template -_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp& get(array<_Tp, _Size>&) _NOEXCEPT; - -template -_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 const _Tp& get(const array<_Tp, _Size>&) _NOEXCEPT; - -#ifndef _LIBCPP_CXX03_LANG -template -_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 _Tp&& get(array<_Tp, _Size>&&) _NOEXCEPT; - -template -_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 const _Tp&& get(const array<_Tp, _Size>&&) _NOEXCEPT; -#endif - -#if _LIBCPP_STD_VER >= 20 - -namespace ranges { - -template - requires((_Index == 0 && copyable<_Iter>) || _Index == 1) -_LIBCPP_HIDE_FROM_ABI constexpr auto get(const subrange<_Iter, _Sent, _Kind>& __subrange); - -template - requires(_Index < 2) -_LIBCPP_HIDE_FROM_ABI constexpr auto get(subrange<_Iter, _Sent, _Kind>&& __subrange); - -} // namespace ranges - -using ranges::get; - -#endif // _LIBCPP_STD_VER >= 20 - -_LIBCPP_END_NAMESPACE_STD - -#endif // _LIBCPP___FWD_GET_H diff --git a/lib/libcxx/include/__fwd/ios.h b/lib/libcxx/include/__fwd/ios.h index 82c865d58cc7..48350709d4ce 100644 --- a/lib/libcxx/include/__fwd/ios.h +++ b/lib/libcxx/include/__fwd/ios.h @@ -18,6 +18,8 @@ _LIBCPP_BEGIN_NAMESPACE_STD +class _LIBCPP_EXPORTED_FROM_ABI ios_base; + template > class _LIBCPP_TEMPLATE_VIS basic_ios; diff --git a/lib/libcxx/include/__fwd/hash.h b/lib/libcxx/include/__fwd/memory.h similarity index 77% rename from lib/libcxx/include/__fwd/hash.h rename to lib/libcxx/include/__fwd/memory.h index af9eca876a10..b9e151855ad7 100644 --- a/lib/libcxx/include/__fwd/hash.h +++ b/lib/libcxx/include/__fwd/memory.h @@ -6,8 +6,8 @@ // //===---------------------------------------------------------------------===// -#ifndef _LIBCPP___FWD_HASH_H -#define _LIBCPP___FWD_HASH_H +#ifndef _LIBCPP___FWD_MEMORY_H +#define _LIBCPP___FWD_MEMORY_H #include <__config> @@ -17,9 +17,9 @@ _LIBCPP_BEGIN_NAMESPACE_STD -template -struct _LIBCPP_TEMPLATE_VIS hash; +template +class _LIBCPP_TEMPLATE_VIS allocator; _LIBCPP_END_NAMESPACE_STD -#endif // _LIBCPP___FWD_HASH_H +#endif // _LIBCPP___FWD_MEMORY_H diff --git a/lib/libcxx/include/__fwd/memory_resource.h b/lib/libcxx/include/__fwd/memory_resource.h index 03b78ad2bd3c..d68b2c2b6315 100644 --- a/lib/libcxx/include/__fwd/memory_resource.h +++ b/lib/libcxx/include/__fwd/memory_resource.h @@ -9,7 +9,6 @@ #ifndef _LIBCPP___FWD_MEMORY_RESOURCE_H #define _LIBCPP___FWD_MEMORY_RESOURCE_H -#include <__availability> #include <__config> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) diff --git a/lib/libcxx/include/__fwd/pair.h b/lib/libcxx/include/__fwd/pair.h index 3844014de3ad..af32628fe1e0 100644 --- a/lib/libcxx/include/__fwd/pair.h +++ b/lib/libcxx/include/__fwd/pair.h @@ -10,6 +10,8 @@ #define _LIBCPP___FWD_PAIR_H #include <__config> +#include <__fwd/tuple.h> +#include #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) # pragma GCC system_header @@ -20,6 +22,24 @@ _LIBCPP_BEGIN_NAMESPACE_STD template struct _LIBCPP_TEMPLATE_VIS pair; +template +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 typename tuple_element<_Ip, pair<_T1, _T2> >::type& +get(pair<_T1, _T2>&) _NOEXCEPT; + +template +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 const typename tuple_element<_Ip, pair<_T1, _T2> >::type& +get(const pair<_T1, _T2>&) _NOEXCEPT; + +#ifndef _LIBCPP_CXX03_LANG +template +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 typename tuple_element<_Ip, pair<_T1, _T2> >::type&& +get(pair<_T1, _T2>&&) _NOEXCEPT; + +template +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 const typename tuple_element<_Ip, pair<_T1, _T2> >::type&& +get(const pair<_T1, _T2>&&) _NOEXCEPT; +#endif + _LIBCPP_END_NAMESPACE_STD #endif // _LIBCPP___FWD_PAIR_H diff --git a/lib/libcxx/include/__fwd/queue.h b/lib/libcxx/include/__fwd/queue.h new file mode 100644 index 000000000000..50d99ad9c29f --- /dev/null +++ b/lib/libcxx/include/__fwd/queue.h @@ -0,0 +1,31 @@ +//===---------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===---------------------------------------------------------------------===// + +#ifndef _LIBCPP___FWD_QUEUE_H +#define _LIBCPP___FWD_QUEUE_H + +#include <__config> +#include <__functional/operations.h> +#include <__fwd/deque.h> +#include <__fwd/vector.h> + +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +#endif + +_LIBCPP_BEGIN_NAMESPACE_STD + +template > +class _LIBCPP_TEMPLATE_VIS queue; + +template , class _Compare = less > +class _LIBCPP_TEMPLATE_VIS priority_queue; + +_LIBCPP_END_NAMESPACE_STD + +#endif // _LIBCPP___FWD_QUEUE_H diff --git a/lib/libcxx/include/__fwd/sstream.h b/lib/libcxx/include/__fwd/sstream.h index e2d46fbe1d9b..39a9c3faf1f8 100644 --- a/lib/libcxx/include/__fwd/sstream.h +++ b/lib/libcxx/include/__fwd/sstream.h @@ -10,6 +10,7 @@ #define _LIBCPP___FWD_SSTREAM_H #include <__config> +#include <__fwd/memory.h> #include <__fwd/string.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) diff --git a/lib/libcxx/include/__fwd/stack.h b/lib/libcxx/include/__fwd/stack.h new file mode 100644 index 000000000000..7dab6c1a4f4e --- /dev/null +++ b/lib/libcxx/include/__fwd/stack.h @@ -0,0 +1,26 @@ +//===---------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===---------------------------------------------------------------------===// + +#ifndef _LIBCPP___FWD_STACK_H +#define _LIBCPP___FWD_STACK_H + +#include <__config> +#include <__fwd/deque.h> + +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +#endif + +_LIBCPP_BEGIN_NAMESPACE_STD + +template > +class _LIBCPP_TEMPLATE_VIS stack; + +_LIBCPP_END_NAMESPACE_STD + +#endif // _LIBCPP___FWD_STACK_H diff --git a/lib/libcxx/include/__fwd/string.h b/lib/libcxx/include/__fwd/string.h index 032132374de5..2418e1f9b23d 100644 --- a/lib/libcxx/include/__fwd/string.h +++ b/lib/libcxx/include/__fwd/string.h @@ -9,8 +9,8 @@ #ifndef _LIBCPP___FWD_STRING_H #define _LIBCPP___FWD_STRING_H -#include <__availability> #include <__config> +#include <__fwd/memory.h> #include <__fwd/memory_resource.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) @@ -39,9 +39,6 @@ template <> struct char_traits; #endif -template -class _LIBCPP_TEMPLATE_VIS allocator; - template , class _Allocator = allocator<_CharT> > class _LIBCPP_TEMPLATE_VIS basic_string; diff --git a/lib/libcxx/include/__fwd/subrange.h b/lib/libcxx/include/__fwd/subrange.h index d09b9b1c5b97..60a41da23dd4 100644 --- a/lib/libcxx/include/__fwd/subrange.h +++ b/lib/libcxx/include/__fwd/subrange.h @@ -9,7 +9,10 @@ #ifndef _LIBCPP___FWD_SUBRANGE_H #define _LIBCPP___FWD_SUBRANGE_H +#include <__concepts/copyable.h> #include <__config> +#include <__iterator/concepts.h> +#include #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) # pragma GCC system_header @@ -17,8 +20,6 @@ #if _LIBCPP_STD_VER >= 20 -# include <__iterator/concepts.h> - _LIBCPP_BEGIN_NAMESPACE_STD namespace ranges { @@ -29,8 +30,18 @@ template _Sent, subrange_ki requires(_Kind == subrange_kind::sized || !sized_sentinel_for<_Sent, _Iter>) class _LIBCPP_TEMPLATE_VIS subrange; +template + requires((_Index == 0 && copyable<_Iter>) || _Index == 1) +_LIBCPP_HIDE_FROM_ABI constexpr auto get(const subrange<_Iter, _Sent, _Kind>&); + +template + requires(_Index < 2) +_LIBCPP_HIDE_FROM_ABI constexpr auto get(subrange<_Iter, _Sent, _Kind>&&); + } // namespace ranges +using ranges::get; + _LIBCPP_END_NAMESPACE_STD #endif // _LIBCPP_STD_VER >= 20 diff --git a/lib/libcxx/include/__fwd/tuple.h b/lib/libcxx/include/__fwd/tuple.h index 16b3fabbb995..902770c29555 100644 --- a/lib/libcxx/include/__fwd/tuple.h +++ b/lib/libcxx/include/__fwd/tuple.h @@ -10,6 +10,7 @@ #define _LIBCPP___FWD_TUPLE_H #include <__config> +#include #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) # pragma GCC system_header @@ -17,11 +18,33 @@ _LIBCPP_BEGIN_NAMESPACE_STD +template +struct _LIBCPP_TEMPLATE_VIS tuple_element; + #ifndef _LIBCPP_CXX03_LANG template class _LIBCPP_TEMPLATE_VIS tuple; +template +struct _LIBCPP_TEMPLATE_VIS tuple_size; + +template +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 typename tuple_element<_Ip, tuple<_Tp...> >::type& +get(tuple<_Tp...>&) _NOEXCEPT; + +template +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 const typename tuple_element<_Ip, tuple<_Tp...> >::type& +get(const tuple<_Tp...>&) _NOEXCEPT; + +template +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 typename tuple_element<_Ip, tuple<_Tp...> >::type&& +get(tuple<_Tp...>&&) _NOEXCEPT; + +template +_LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 const typename tuple_element<_Ip, tuple<_Tp...> >::type&& +get(const tuple<_Tp...>&&) _NOEXCEPT; + #endif // _LIBCPP_CXX03_LANG _LIBCPP_END_NAMESPACE_STD diff --git a/lib/libcxx/include/__fwd/vector.h b/lib/libcxx/include/__fwd/vector.h new file mode 100644 index 000000000000..c9cc96137449 --- /dev/null +++ b/lib/libcxx/include/__fwd/vector.h @@ -0,0 +1,26 @@ +//===---------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===---------------------------------------------------------------------===// + +#ifndef _LIBCPP___FWD_VECTOR_H +#define _LIBCPP___FWD_VECTOR_H + +#include <__config> +#include <__fwd/memory.h> + +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +#endif + +_LIBCPP_BEGIN_NAMESPACE_STD + +template > +class _LIBCPP_TEMPLATE_VIS vector; + +_LIBCPP_END_NAMESPACE_STD + +#endif // _LIBCPP___FWD_VECTOR_H diff --git a/lib/libcxx/include/__hash_table b/lib/libcxx/include/__hash_table index 13420012006e..025758528573 100644 --- a/lib/libcxx/include/__hash_table +++ b/lib/libcxx/include/__hash_table @@ -28,12 +28,9 @@ #include <__type_traits/can_extract_key.h> #include <__type_traits/conditional.h> #include <__type_traits/is_const.h> -#include <__type_traits/is_copy_constructible.h> +#include <__type_traits/is_constructible.h> +#include <__type_traits/is_nothrow_assignable.h> #include <__type_traits/is_nothrow_constructible.h> -#include <__type_traits/is_nothrow_copy_constructible.h> -#include <__type_traits/is_nothrow_default_constructible.h> -#include <__type_traits/is_nothrow_move_assignable.h> -#include <__type_traits/is_nothrow_move_constructible.h> #include <__type_traits/is_pointer.h> #include <__type_traits/is_reference.h> #include <__type_traits/is_swappable.h> @@ -243,9 +240,9 @@ public: private: static_assert(!is_const<__node_type>::value, "_NodePtr should never be a pointer to const"); - static_assert((is_same::element_type, void>::value), + static_assert(is_same::element_type, void>::value, "_VoidPtr does not point to unqualified void type"); - static_assert((is_same<__rebind_pointer_t<_VoidPtr, __node_type>, _NodePtr>::value), + static_assert(is_same<__rebind_pointer_t<_VoidPtr, __node_type>, _NodePtr>::value, "_VoidPtr does not rebind to _NodePtr."); }; @@ -284,13 +281,21 @@ public: _LIBCPP_HIDE_FROM_ABI __hash_iterator() _NOEXCEPT : __node_(nullptr) {} - _LIBCPP_HIDE_FROM_ABI reference operator*() const { return __node_->__upcast()->__get_value(); } + _LIBCPP_HIDE_FROM_ABI reference operator*() const { + _LIBCPP_ASSERT_NON_NULL( + __node_ != nullptr, "Attempted to dereference a non-dereferenceable unordered container iterator"); + return __node_->__upcast()->__get_value(); + } _LIBCPP_HIDE_FROM_ABI pointer operator->() const { + _LIBCPP_ASSERT_NON_NULL( + __node_ != nullptr, "Attempted to dereference a non-dereferenceable unordered container iterator"); return pointer_traits::pointer_to(__node_->__upcast()->__get_value()); } _LIBCPP_HIDE_FROM_ABI __hash_iterator& operator++() { + _LIBCPP_ASSERT_NON_NULL( + __node_ != nullptr, "Attempted to increment a non-incrementable unordered container iterator"); __node_ = __node_->__next_; return *this; } @@ -345,12 +350,20 @@ public: _LIBCPP_HIDE_FROM_ABI __hash_const_iterator(const __non_const_iterator& __x) _NOEXCEPT : __node_(__x.__node_) {} - _LIBCPP_HIDE_FROM_ABI reference operator*() const { return __node_->__upcast()->__get_value(); } + _LIBCPP_HIDE_FROM_ABI reference operator*() const { + _LIBCPP_ASSERT_NON_NULL( + __node_ != nullptr, "Attempted to dereference a non-dereferenceable unordered container const_iterator"); + return __node_->__upcast()->__get_value(); + } _LIBCPP_HIDE_FROM_ABI pointer operator->() const { + _LIBCPP_ASSERT_NON_NULL( + __node_ != nullptr, "Attempted to dereference a non-dereferenceable unordered container const_iterator"); return pointer_traits::pointer_to(__node_->__upcast()->__get_value()); } _LIBCPP_HIDE_FROM_ABI __hash_const_iterator& operator++() { + _LIBCPP_ASSERT_NON_NULL( + __node_ != nullptr, "Attempted to increment a non-incrementable unordered container const_iterator"); __node_ = __node_->__next_; return *this; } @@ -400,13 +413,21 @@ public: _LIBCPP_HIDE_FROM_ABI __hash_local_iterator() _NOEXCEPT : __node_(nullptr) {} - _LIBCPP_HIDE_FROM_ABI reference operator*() const { return __node_->__upcast()->__get_value(); } + _LIBCPP_HIDE_FROM_ABI reference operator*() const { + _LIBCPP_ASSERT_NON_NULL( + __node_ != nullptr, "Attempted to dereference a non-dereferenceable unordered container local_iterator"); + return __node_->__upcast()->__get_value(); + } _LIBCPP_HIDE_FROM_ABI pointer operator->() const { + _LIBCPP_ASSERT_NON_NULL( + __node_ != nullptr, "Attempted to dereference a non-dereferenceable unordered container local_iterator"); return pointer_traits::pointer_to(__node_->__upcast()->__get_value()); } _LIBCPP_HIDE_FROM_ABI __hash_local_iterator& operator++() { + _LIBCPP_ASSERT_NON_NULL( + __node_ != nullptr, "Attempted to increment a non-incrementable unordered container local_iterator"); __node_ = __node_->__next_; if (__node_ != nullptr && std::__constrain_hash(__node_->__hash(), __bucket_count_) != __bucket_) __node_ = nullptr; @@ -475,13 +496,21 @@ public: __bucket_(__x.__bucket_), __bucket_count_(__x.__bucket_count_) {} - _LIBCPP_HIDE_FROM_ABI reference operator*() const { return __node_->__upcast()->__get_value(); } + _LIBCPP_HIDE_FROM_ABI reference operator*() const { + _LIBCPP_ASSERT_NON_NULL( + __node_ != nullptr, "Attempted to dereference a non-dereferenceable unordered container const_local_iterator"); + return __node_->__upcast()->__get_value(); + } _LIBCPP_HIDE_FROM_ABI pointer operator->() const { + _LIBCPP_ASSERT_NON_NULL( + __node_ != nullptr, "Attempted to dereference a non-dereferenceable unordered container const_local_iterator"); return pointer_traits::pointer_to(__node_->__upcast()->__get_value()); } _LIBCPP_HIDE_FROM_ABI __hash_const_local_iterator& operator++() { + _LIBCPP_ASSERT_NON_NULL( + __node_ != nullptr, "Attempted to increment a non-incrementable unordered container const_local_iterator"); __node_ = __node_->__next_; if (__node_ != nullptr && std::__constrain_hash(__node_->__hash(), __bucket_count_) != __bucket_) __node_ = nullptr; @@ -671,11 +700,11 @@ private: // check for sane allocator pointer rebinding semantics. Rebinding the // allocator for a new pointer type should be exactly the same as rebinding // the pointer using 'pointer_traits'. - static_assert((is_same<__node_pointer, typename __node_traits::pointer>::value), + static_assert(is_same<__node_pointer, typename __node_traits::pointer>::value, "Allocator does not rebind pointers in a sane manner."); typedef __rebind_alloc<__node_traits, __first_node> __node_base_allocator; typedef allocator_traits<__node_base_allocator> __node_base_traits; - static_assert((is_same<__node_base_pointer, typename __node_base_traits::pointer>::value), + static_assert(is_same<__node_base_pointer, typename __node_base_traits::pointer>::value, "Allocator does not rebind pointers in a sane manner."); private: @@ -802,7 +831,7 @@ public: return __emplace_unique_key_args(_NodeTypes::__get_key(__x), std::move(__x)); } - template ::value> > + template ::value, int> = 0> _LIBCPP_HIDE_FROM_ABI pair __insert_unique(_Pp&& __x) { return __emplace_unique(std::forward<_Pp>(__x)); } @@ -899,13 +928,12 @@ public: _LIBCPP_HIDE_FROM_ABI void swap(__hash_table& __u) #if _LIBCPP_STD_VER <= 11 - _NOEXCEPT_(__is_nothrow_swappable::value&& __is_nothrow_swappable::value && + _NOEXCEPT_(__is_nothrow_swappable_v&& __is_nothrow_swappable_v && (!allocator_traits<__pointer_allocator>::propagate_on_container_swap::value || - __is_nothrow_swappable<__pointer_allocator>::value) && - (!__node_traits::propagate_on_container_swap::value || - __is_nothrow_swappable<__node_allocator>::value)); + __is_nothrow_swappable_v<__pointer_allocator>) && + (!__node_traits::propagate_on_container_swap::value || __is_nothrow_swappable_v<__node_allocator>)); #else - _NOEXCEPT_(__is_nothrow_swappable::value&& __is_nothrow_swappable::value); + _NOEXCEPT_(__is_nothrow_swappable_v&& __is_nothrow_swappable_v); #endif _LIBCPP_HIDE_FROM_ABI size_type max_bucket_count() const _NOEXCEPT { return max_size(); } @@ -1072,8 +1100,8 @@ __hash_table<_Tp, _Hash, _Equal, _Alloc>::__hash_table(__hash_table&& __u, const template __hash_table<_Tp, _Hash, _Equal, _Alloc>::~__hash_table() { #if defined(_LIBCPP_CXX03_LANG) - static_assert((is_copy_constructible::value), "Predicate must be copy-constructible."); - static_assert((is_copy_constructible::value), "Hasher must be copy-constructible."); + static_assert(is_copy_constructible::value, "Predicate must be copy-constructible."); + static_assert(is_copy_constructible::value, "Hasher must be copy-constructible."); #endif __deallocate_node(__p1_.first().__next_); @@ -1199,7 +1227,7 @@ template void __hash_table<_Tp, _Hash, _Equal, _Alloc>::__assign_unique(_InputIterator __first, _InputIterator __last) { typedef iterator_traits<_InputIterator> _ITraits; typedef typename _ITraits::value_type _ItValueType; - static_assert((is_same<_ItValueType, __container_value_type>::value), + static_assert(is_same<_ItValueType, __container_value_type>::value, "__assign_unique may only be called with the containers value type"); if (bucket_count() != 0) { @@ -1956,12 +1984,12 @@ __hash_table<_Tp, _Hash, _Equal, _Alloc>::__equal_range_multi(const _Key& __k) c template void __hash_table<_Tp, _Hash, _Equal, _Alloc>::swap(__hash_table& __u) #if _LIBCPP_STD_VER <= 11 - _NOEXCEPT_(__is_nothrow_swappable::value&& __is_nothrow_swappable::value && + _NOEXCEPT_(__is_nothrow_swappable_v&& __is_nothrow_swappable_v && (!allocator_traits<__pointer_allocator>::propagate_on_container_swap::value || - __is_nothrow_swappable<__pointer_allocator>::value) && - (!__node_traits::propagate_on_container_swap::value || __is_nothrow_swappable<__node_allocator>::value)) + __is_nothrow_swappable_v<__pointer_allocator>) && + (!__node_traits::propagate_on_container_swap::value || __is_nothrow_swappable_v<__node_allocator>)) #else - _NOEXCEPT_(__is_nothrow_swappable::value&& __is_nothrow_swappable::value) + _NOEXCEPT_(__is_nothrow_swappable_v&& __is_nothrow_swappable_v) #endif { _LIBCPP_ASSERT_COMPATIBLE_ALLOCATOR( diff --git a/lib/libcxx/include/__iterator/access.h b/lib/libcxx/include/__iterator/access.h index 5c6090eeb40c..acc4f60bf697 100644 --- a/lib/libcxx/include/__iterator/access.h +++ b/lib/libcxx/include/__iterator/access.h @@ -54,8 +54,8 @@ _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX17 auto end(const _Cp& __c) -> # if _LIBCPP_STD_VER >= 14 template -_LIBCPP_HIDE_FROM_ABI constexpr auto cbegin(const _Cp& __c) noexcept(noexcept(std::begin(__c))) - -> decltype(std::begin(__c)) { +_LIBCPP_HIDE_FROM_ABI constexpr auto +cbegin(const _Cp& __c) noexcept(noexcept(std::begin(__c))) -> decltype(std::begin(__c)) { return std::begin(__c); } diff --git a/lib/libcxx/include/__iterator/advance.h b/lib/libcxx/include/__iterator/advance.h index 73473f899eac..296db1aaab65 100644 --- a/lib/libcxx/include/__iterator/advance.h +++ b/lib/libcxx/include/__iterator/advance.h @@ -61,7 +61,7 @@ __advance(_RandIter& __i, typename iterator_traits<_RandIter>::difference_type _ template < class _InputIter, class _Distance, class _IntegralDistance = decltype(std::__convert_to_integral(std::declval<_Distance>())), - class = __enable_if_t::value> > + __enable_if_t::value, int> = 0> _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX17 void advance(_InputIter& __i, _Distance __orig_n) { typedef typename iterator_traits<_InputIter>::difference_type _Difference; _Difference __n = static_cast<_Difference>(std::__convert_to_integral(__orig_n)); @@ -170,14 +170,14 @@ struct __fn { } else { // Otherwise, if `n` is non-negative, while `bool(i != bound_sentinel)` is true, increments `i` but at // most `n` times. - while (__i != __bound_sentinel && __n > 0) { + while (__n > 0 && __i != __bound_sentinel) { ++__i; --__n; } // Otherwise, while `bool(i != bound_sentinel)` is true, decrements `i` but at most `-n` times. if constexpr (bidirectional_iterator<_Ip> && same_as<_Ip, _Sp>) { - while (__i != __bound_sentinel && __n < 0) { + while (__n < 0 && __i != __bound_sentinel) { --__i; ++__n; } diff --git a/lib/libcxx/include/__iterator/aliasing_iterator.h b/lib/libcxx/include/__iterator/aliasing_iterator.h new file mode 100644 index 000000000000..94ba577078b5 --- /dev/null +++ b/lib/libcxx/include/__iterator/aliasing_iterator.h @@ -0,0 +1,127 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCPP___ITERATOR_ALIASING_ITERATOR_H +#define _LIBCPP___ITERATOR_ALIASING_ITERATOR_H + +#include <__config> +#include <__iterator/iterator_traits.h> +#include <__memory/pointer_traits.h> +#include <__type_traits/is_trivial.h> +#include + +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +#endif + +// This iterator wrapper is used to type-pun an iterator to return a different type. This is done without UB by not +// actually punning the type, but instead inspecting the object representation of the base type and copying that into +// an instance of the alias type. For that reason the alias type has to be trivial. The alias is returned as a prvalue +// when derferencing the iterator, since it is temporary storage. This wrapper is used to vectorize some algorithms. + +_LIBCPP_BEGIN_NAMESPACE_STD + +template +struct __aliasing_iterator_wrapper { + class __iterator { + _BaseIter __base_ = nullptr; + + using __iter_traits = iterator_traits<_BaseIter>; + using __base_value_type = typename __iter_traits::value_type; + + static_assert(__has_random_access_iterator_category<_BaseIter>::value, + "The base iterator has to be a random access iterator!"); + + public: + using iterator_category = random_access_iterator_tag; + using value_type = _Alias; + using difference_type = ptrdiff_t; + using reference = value_type&; + using pointer = value_type*; + + static_assert(is_trivial::value); + static_assert(sizeof(__base_value_type) == sizeof(value_type)); + + _LIBCPP_HIDE_FROM_ABI __iterator() = default; + _LIBCPP_HIDE_FROM_ABI __iterator(_BaseIter __base) _NOEXCEPT : __base_(__base) {} + + _LIBCPP_HIDE_FROM_ABI __iterator& operator++() _NOEXCEPT { + ++__base_; + return *this; + } + + _LIBCPP_HIDE_FROM_ABI __iterator operator++(int) _NOEXCEPT { + __iterator __tmp(*this); + ++__base_; + return __tmp; + } + + _LIBCPP_HIDE_FROM_ABI __iterator& operator--() _NOEXCEPT { + --__base_; + return *this; + } + + _LIBCPP_HIDE_FROM_ABI __iterator operator--(int) _NOEXCEPT { + __iterator __tmp(*this); + --__base_; + return __tmp; + } + + _LIBCPP_HIDE_FROM_ABI friend __iterator operator+(__iterator __iter, difference_type __n) _NOEXCEPT { + return __iterator(__iter.__base_ + __n); + } + + _LIBCPP_HIDE_FROM_ABI friend __iterator operator+(difference_type __n, __iterator __iter) _NOEXCEPT { + return __iterator(__n + __iter.__base_); + } + + _LIBCPP_HIDE_FROM_ABI __iterator& operator+=(difference_type __n) _NOEXCEPT { + __base_ += __n; + return *this; + } + + _LIBCPP_HIDE_FROM_ABI friend __iterator operator-(__iterator __iter, difference_type __n) _NOEXCEPT { + return __iterator(__iter.__base_ - __n); + } + + _LIBCPP_HIDE_FROM_ABI friend difference_type operator-(__iterator __lhs, __iterator __rhs) _NOEXCEPT { + return __lhs.__base_ - __rhs.__base_; + } + + _LIBCPP_HIDE_FROM_ABI __iterator& operator-=(difference_type __n) _NOEXCEPT { + __base_ -= __n; + return *this; + } + + _LIBCPP_HIDE_FROM_ABI _BaseIter __base() const _NOEXCEPT { return __base_; } + + _LIBCPP_HIDE_FROM_ABI _Alias operator*() const _NOEXCEPT { + _Alias __val; + __builtin_memcpy(&__val, std::__to_address(__base_), sizeof(value_type)); + return __val; + } + + _LIBCPP_HIDE_FROM_ABI value_type operator[](difference_type __n) const _NOEXCEPT { return *(*this + __n); } + + _LIBCPP_HIDE_FROM_ABI friend bool operator==(const __iterator& __lhs, const __iterator& __rhs) _NOEXCEPT { + return __lhs.__base_ == __rhs.__base_; + } + + _LIBCPP_HIDE_FROM_ABI friend bool operator!=(const __iterator& __lhs, const __iterator& __rhs) _NOEXCEPT { + return __lhs.__base_ != __rhs.__base_; + } + }; +}; + +// This is required to avoid ADL instantiations on _BaseT +template +using __aliasing_iterator = typename __aliasing_iterator_wrapper<_BaseT, _Alias>::__iterator; + +_LIBCPP_END_NAMESPACE_STD + +#endif // _LIBCPP___ITERATOR_ALIASING_ITERATOR_H diff --git a/lib/libcxx/include/__iterator/bounded_iter.h b/lib/libcxx/include/__iterator/bounded_iter.h index 2a667648871c..8a81c9ffbfc3 100644 --- a/lib/libcxx/include/__iterator/bounded_iter.h +++ b/lib/libcxx/include/__iterator/bounded_iter.h @@ -11,6 +11,8 @@ #define _LIBCPP___ITERATOR_BOUNDED_ITER_H #include <__assert> +#include <__compare/ordering.h> +#include <__compare/three_way_comparable.h> #include <__config> #include <__iterator/iterator_traits.h> #include <__memory/pointer_traits.h> @@ -31,13 +33,20 @@ _LIBCPP_BEGIN_NAMESPACE_STD // Iterator wrapper that carries the valid range it is allowed to access. // // This is a simple iterator wrapper for contiguous iterators that points -// within a [begin, end) range and carries these bounds with it. The iterator -// ensures that it is pointing within that [begin, end) range when it is -// dereferenced. +// within a [begin, end] range and carries these bounds with it. The iterator +// ensures that it is pointing within [begin, end) range when it is +// dereferenced. It also ensures that it is never iterated outside of +// [begin, end]. This is important for two reasons: // -// Arithmetic operations are allowed and the bounds of the resulting iterator -// are not checked. Hence, it is possible to create an iterator pointing outside -// its range, but it is not possible to dereference it. +// 1. It allows `operator*` and `operator++` bounds checks to be `iter != end`. +// This is both less for the optimizer to prove, and aligns with how callers +// typically use iterators. +// +// 2. Advancing an iterator out of bounds is undefined behavior (see the table +// in [input.iterators]). In particular, when the underlying iterator is a +// pointer, it is undefined at the language level (see [expr.add]). If +// bounded iterators exhibited this undefined behavior, we risk compiler +// optimizations deleting non-redundant bounds checks. template ::value > > struct __bounded_iter { using value_type = typename iterator_traits<_Iterator>::value_type; @@ -51,14 +60,14 @@ struct __bounded_iter { // Create a singular iterator. // - // Such an iterator does not point to any object and is conceptually out of bounds, so it is - // not dereferenceable. Observing operations like comparison and assignment are valid. + // Such an iterator points past the end of an empty span, so it is not dereferenceable. + // Observing operations like comparison and assignment are valid. _LIBCPP_HIDE_FROM_ABI __bounded_iter() = default; _LIBCPP_HIDE_FROM_ABI __bounded_iter(__bounded_iter const&) = default; _LIBCPP_HIDE_FROM_ABI __bounded_iter(__bounded_iter&&) = default; - template ::value > > + template ::value, int> = 0> _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR __bounded_iter(__bounded_iter<_OtherIterator> const& __other) _NOEXCEPT : __current_(__other.__current_), __begin_(__other.__begin_), @@ -70,18 +79,20 @@ struct __bounded_iter { private: // Create an iterator wrapping the given iterator, and whose bounds are described - // by the provided [begin, end) range. + // by the provided [begin, end] range. // - // This constructor does not check whether the resulting iterator is within its bounds. - // However, it does check that the provided [begin, end) range is a valid range (that - // is, begin <= end). + // The constructor does not check whether the resulting iterator is within its bounds. It is a + // responsibility of the container to ensure that the given bounds are valid. // // Since it is non-standard for iterators to have this constructor, __bounded_iter must // be created via `std::__make_bounded_iter`. - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 explicit __bounded_iter( - _Iterator __current, _Iterator __begin, _Iterator __end) + _LIBCPP_HIDE_FROM_ABI + _LIBCPP_CONSTEXPR_SINCE_CXX14 explicit __bounded_iter(_Iterator __current, _Iterator __begin, _Iterator __end) : __current_(__current), __begin_(__begin), __end_(__end) { - _LIBCPP_ASSERT_INTERNAL(__begin <= __end, "__bounded_iter(current, begin, end): [begin, end) is not a valid range"); + _LIBCPP_ASSERT_INTERNAL( + __begin <= __current, "__bounded_iter(current, begin, end): current and begin are inconsistent"); + _LIBCPP_ASSERT_INTERNAL( + __current <= __end, "__bounded_iter(current, begin, end): current and end are inconsistent"); } template @@ -90,30 +101,37 @@ struct __bounded_iter { public: // Dereference and indexing operations. // - // These operations check that the iterator is dereferenceable, that is within [begin, end). + // These operations check that the iterator is dereferenceable. Since the class invariant is + // that the iterator is always within `[begin, end]`, we only need to check it's not pointing to + // `end`. This is easier for the optimizer because it aligns with the `iter != container.end()` + // checks that typical callers already use (see + // https://github.com/llvm/llvm-project/issues/78829). _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 reference operator*() const _NOEXCEPT { _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS( - __in_bounds(__current_), "__bounded_iter::operator*: Attempt to dereference an out-of-range iterator"); + __current_ != __end_, "__bounded_iter::operator*: Attempt to dereference an iterator at the end"); return *__current_; } _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 pointer operator->() const _NOEXCEPT { _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS( - __in_bounds(__current_), "__bounded_iter::operator->: Attempt to dereference an out-of-range iterator"); + __current_ != __end_, "__bounded_iter::operator->: Attempt to dereference an iterator at the end"); return std::__to_address(__current_); } _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 reference operator[](difference_type __n) const _NOEXCEPT { _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS( - __in_bounds(__current_ + __n), "__bounded_iter::operator[]: Attempt to index an iterator out-of-range"); + __n >= __begin_ - __current_, "__bounded_iter::operator[]: Attempt to index an iterator past the start"); + _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS( + __n < __end_ - __current_, "__bounded_iter::operator[]: Attempt to index an iterator at or past the end"); return __current_[__n]; } // Arithmetic operations. // - // These operations do not check that the resulting iterator is within the bounds, since that - // would make it impossible to create a past-the-end iterator. + // These operations check that the iterator remains within `[begin, end]`. _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 __bounded_iter& operator++() _NOEXCEPT { + _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS( + __current_ != __end_, "__bounded_iter::operator++: Attempt to advance an iterator past the end"); ++__current_; return *this; } @@ -124,6 +142,8 @@ struct __bounded_iter { } _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 __bounded_iter& operator--() _NOEXCEPT { + _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS( + __current_ != __begin_, "__bounded_iter::operator--: Attempt to rewind an iterator past the start"); --__current_; return *this; } @@ -134,6 +154,10 @@ struct __bounded_iter { } _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 __bounded_iter& operator+=(difference_type __n) _NOEXCEPT { + _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS( + __n >= __begin_ - __current_, "__bounded_iter::operator+=: Attempt to rewind an iterator past the start"); + _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS( + __n <= __end_ - __current_, "__bounded_iter::operator+=: Attempt to advance an iterator past the end"); __current_ += __n; return *this; } @@ -151,6 +175,10 @@ struct __bounded_iter { } _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX14 __bounded_iter& operator-=(difference_type __n) _NOEXCEPT { + _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS( + __n <= __current_ - __begin_, "__bounded_iter::operator-=: Attempt to rewind an iterator past the start"); + _LIBCPP_ASSERT_VALID_ELEMENT_ACCESS( + __n >= __current_ - __end_, "__bounded_iter::operator-=: Attempt to advance an iterator past the end"); __current_ -= __n; return *this; } @@ -175,10 +203,15 @@ struct __bounded_iter { operator==(__bounded_iter const& __x, __bounded_iter const& __y) _NOEXCEPT { return __x.__current_ == __y.__current_; } + +#if _LIBCPP_STD_VER <= 17 _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR friend bool operator!=(__bounded_iter const& __x, __bounded_iter const& __y) _NOEXCEPT { return __x.__current_ != __y.__current_; } +#endif + + // TODO(mordante) disable these overloads in the LLVM 20 release. _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR friend bool operator<(__bounded_iter const& __x, __bounded_iter const& __y) _NOEXCEPT { return __x.__current_ < __y.__current_; @@ -196,16 +229,30 @@ struct __bounded_iter { return __x.__current_ >= __y.__current_; } -private: - // Return whether the given iterator is in the bounds of this __bounded_iter. - _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR bool __in_bounds(_Iterator const& __iter) const { - return __iter >= __begin_ && __iter < __end_; +#if _LIBCPP_STD_VER >= 20 + _LIBCPP_HIDE_FROM_ABI constexpr friend strong_ordering + operator<=>(__bounded_iter const& __x, __bounded_iter const& __y) noexcept { + if constexpr (three_way_comparable<_Iterator, strong_ordering>) { + return __x.__current_ <=> __y.__current_; + } else { + if (__x.__current_ < __y.__current_) + return strong_ordering::less; + + if (__x.__current_ == __y.__current_) + return strong_ordering::equal; + + return strong_ordering::greater; + } } +#endif // _LIBCPP_STD_VER >= 20 +private: template friend struct pointer_traits; + template + friend struct __bounded_iter; _Iterator __current_; // current iterator - _Iterator __begin_, __end_; // valid range represented as [begin, end) + _Iterator __begin_, __end_; // valid range represented as [begin, end] }; template diff --git a/lib/libcxx/include/__iterator/common_iterator.h b/lib/libcxx/include/__iterator/common_iterator.h index 7b3f4610d531..199de2cc7337 100644 --- a/lib/libcxx/include/__iterator/common_iterator.h +++ b/lib/libcxx/include/__iterator/common_iterator.h @@ -124,7 +124,7 @@ class common_iterator { } template - _LIBCPP_HIDE_FROM_ABI decltype(auto) operator->() const + _LIBCPP_HIDE_FROM_ABI auto operator->() const requires indirectly_readable && (requires(const _I2& __i) { __i.operator->(); } || is_reference_v> || constructible_from, iter_reference_t<_I2>>) diff --git a/lib/libcxx/include/__iterator/concepts.h b/lib/libcxx/include/__iterator/concepts.h index afb7b821a99c..0a4878308d55 100644 --- a/lib/libcxx/include/__iterator/concepts.h +++ b/lib/libcxx/include/__iterator/concepts.h @@ -177,19 +177,19 @@ concept __has_arrow = input_iterator<_Ip> && (is_pointer_v<_Ip> || requires(_Ip template concept indirectly_unary_invocable = indirectly_readable<_It> && copy_constructible<_Fp> && invocable<_Fp&, iter_value_t<_It>&> && - invocable<_Fp&, iter_reference_t<_It>> && invocable<_Fp&, iter_common_reference_t<_It>> && + invocable<_Fp&, iter_reference_t<_It>> && common_reference_with< invoke_result_t<_Fp&, iter_value_t<_It>&>, invoke_result_t<_Fp&, iter_reference_t<_It>>>; template concept indirectly_regular_unary_invocable = indirectly_readable<_It> && copy_constructible<_Fp> && regular_invocable<_Fp&, iter_value_t<_It>&> && - regular_invocable<_Fp&, iter_reference_t<_It>> && regular_invocable<_Fp&, iter_common_reference_t<_It>> && + regular_invocable<_Fp&, iter_reference_t<_It>> && common_reference_with< invoke_result_t<_Fp&, iter_value_t<_It>&>, invoke_result_t<_Fp&, iter_reference_t<_It>>>; template concept indirect_unary_predicate = indirectly_readable<_It> && copy_constructible<_Fp> && predicate<_Fp&, iter_value_t<_It>&> && - predicate<_Fp&, iter_reference_t<_It>> && predicate<_Fp&, iter_common_reference_t<_It>>; + predicate<_Fp&, iter_reference_t<_It>>; template concept indirect_binary_predicate = @@ -197,8 +197,7 @@ concept indirect_binary_predicate = predicate<_Fp&, iter_value_t<_It1>&, iter_value_t<_It2>&> && predicate<_Fp&, iter_value_t<_It1>&, iter_reference_t<_It2>> && predicate<_Fp&, iter_reference_t<_It1>, iter_value_t<_It2>&> && - predicate<_Fp&, iter_reference_t<_It1>, iter_reference_t<_It2>> && - predicate<_Fp&, iter_common_reference_t<_It1>, iter_common_reference_t<_It2>>; + predicate<_Fp&, iter_reference_t<_It1>, iter_reference_t<_It2>>; template concept indirect_equivalence_relation = @@ -206,8 +205,7 @@ concept indirect_equivalence_relation = equivalence_relation<_Fp&, iter_value_t<_It1>&, iter_value_t<_It2>&> && equivalence_relation<_Fp&, iter_value_t<_It1>&, iter_reference_t<_It2>> && equivalence_relation<_Fp&, iter_reference_t<_It1>, iter_value_t<_It2>&> && - equivalence_relation<_Fp&, iter_reference_t<_It1>, iter_reference_t<_It2>> && - equivalence_relation<_Fp&, iter_common_reference_t<_It1>, iter_common_reference_t<_It2>>; + equivalence_relation<_Fp&, iter_reference_t<_It1>, iter_reference_t<_It2>>; template concept indirect_strict_weak_order = @@ -215,8 +213,7 @@ concept indirect_strict_weak_order = strict_weak_order<_Fp&, iter_value_t<_It1>&, iter_value_t<_It2>&> && strict_weak_order<_Fp&, iter_value_t<_It1>&, iter_reference_t<_It2>> && strict_weak_order<_Fp&, iter_reference_t<_It1>, iter_value_t<_It2>&> && - strict_weak_order<_Fp&, iter_reference_t<_It1>, iter_reference_t<_It2>> && - strict_weak_order<_Fp&, iter_common_reference_t<_It1>, iter_common_reference_t<_It2>>; + strict_weak_order<_Fp&, iter_reference_t<_It1>, iter_reference_t<_It2>>; template requires(indirectly_readable<_Its> && ...) && invocable<_Fp, iter_reference_t<_Its>...> diff --git a/lib/libcxx/include/__iterator/counted_iterator.h b/lib/libcxx/include/__iterator/counted_iterator.h index 008c52fa87ce..ea2832e3b978 100644 --- a/lib/libcxx/include/__iterator/counted_iterator.h +++ b/lib/libcxx/include/__iterator/counted_iterator.h @@ -129,7 +129,7 @@ class counted_iterator return *this; } - _LIBCPP_HIDE_FROM_ABI decltype(auto) operator++(int) { + _LIBCPP_HIDE_FROM_ABI constexpr decltype(auto) operator++(int) { _LIBCPP_ASSERT_UNCATEGORIZED(__count_ > 0, "Iterator already at or past end."); --__count_; # ifndef _LIBCPP_HAS_NO_EXCEPTIONS diff --git a/lib/libcxx/include/__iterator/cpp17_iterator_concepts.h b/lib/libcxx/include/__iterator/cpp17_iterator_concepts.h index d1ad2b4e2848..ba3536b68609 100644 --- a/lib/libcxx/include/__iterator/cpp17_iterator_concepts.h +++ b/lib/libcxx/include/__iterator/cpp17_iterator_concepts.h @@ -14,10 +14,8 @@ #include <__concepts/same_as.h> #include <__config> #include <__iterator/iterator_traits.h> +#include <__type_traits/is_constructible.h> #include <__type_traits/is_convertible.h> -#include <__type_traits/is_copy_constructible.h> -#include <__type_traits/is_default_constructible.h> -#include <__type_traits/is_move_constructible.h> #include <__type_traits/is_signed.h> #include <__type_traits/is_void.h> #include <__utility/as_const.h> @@ -70,7 +68,7 @@ concept __cpp17_default_constructible = is_default_constructible_v<_Tp>; template concept __cpp17_iterator = __cpp17_copy_constructible<_Iter> && __cpp17_copy_assignable<_Iter> && __cpp17_destructible<_Iter> && - (is_signed_v<__iter_diff_t<_Iter>> || is_void_v<__iter_diff_t<_Iter>>)&&requires(_Iter __iter) { + (is_signed_v<__iter_diff_t<_Iter>> || is_void_v<__iter_diff_t<_Iter>>) && requires(_Iter __iter) { { *__iter }; { ++__iter } -> same_as<_Iter&>; }; @@ -159,29 +157,31 @@ concept __cpp17_random_access_iterator = _LIBCPP_END_NAMESPACE_STD # ifndef _LIBCPP_DISABLE_ITERATOR_CHECKS -# define _LIBCPP_REQUIRE_CPP17_INPUT_ITERATOR(iter_t) static_assert(::std::__cpp17_input_iterator); -# define _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(iter_t, write_t) \ - static_assert(::std::__cpp17_output_iterator); -# define _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(iter_t) static_assert(::std::__cpp17_forward_iterator); -# define _LIBCPP_REQUIRE_CPP17_BIDIRECTIONAL_ITERATOR(iter_t) \ - static_assert(::std::__cpp17_bidirectional_iterator); -# define _LIBCPP_REQUIRE_CPP17_RANDOM_ACCESS_ITERATOR(iter_t) \ - static_assert(::std::__cpp17_random_access_iterator); +# define _LIBCPP_REQUIRE_CPP17_INPUT_ITERATOR(iter_t, message) \ + static_assert(::std::__cpp17_input_iterator, message) +# define _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(iter_t, write_t, message) \ + static_assert(::std::__cpp17_output_iterator, message) +# define _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(iter_t, message) \ + static_assert(::std::__cpp17_forward_iterator, message) +# define _LIBCPP_REQUIRE_CPP17_BIDIRECTIONAL_ITERATOR(iter_t, message) \ + static_assert(::std::__cpp17_bidirectional_iterator, message) +# define _LIBCPP_REQUIRE_CPP17_RANDOM_ACCESS_ITERATOR(iter_t, message) \ + static_assert(::std::__cpp17_random_access_iterator, message) # else -# define _LIBCPP_REQUIRE_CPP17_INPUT_ITERATOR(iter_t) -# define _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(iter_t, write_t) -# define _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(iter_t) -# define _LIBCPP_REQUIRE_CPP17_BIDIRECTIONAL_ITERATOR(iter_t) -# define _LIBCPP_REQUIRE_CPP17_RANDOM_ACCESS_ITERATOR(iter_t) +# define _LIBCPP_REQUIRE_CPP17_INPUT_ITERATOR(iter_t, message) static_assert(true) +# define _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(iter_t, write_t, message) static_assert(true) +# define _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(iter_t, message) static_assert(true) +# define _LIBCPP_REQUIRE_CPP17_BIDIRECTIONAL_ITERATOR(iter_t, message) static_assert(true) +# define _LIBCPP_REQUIRE_CPP17_RANDOM_ACCESS_ITERATOR(iter_t, message) static_assert(true) # endif #else // _LIBCPP_STD_VER >= 20 -# define _LIBCPP_REQUIRE_CPP17_INPUT_ITERATOR(iter_t) -# define _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(iter_t, write_t) -# define _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(iter_t) -# define _LIBCPP_REQUIRE_CPP17_BIDIRECTIONAL_ITERATOR(iter_t) -# define _LIBCPP_REQUIRE_CPP17_RANDOM_ACCESS_ITERATOR(iter_t) +# define _LIBCPP_REQUIRE_CPP17_INPUT_ITERATOR(iter_t, message) static_assert(true) +# define _LIBCPP_REQUIRE_CPP17_OUTPUT_ITERATOR(iter_t, write_t, message) static_assert(true) +# define _LIBCPP_REQUIRE_CPP17_FORWARD_ITERATOR(iter_t, message) static_assert(true) +# define _LIBCPP_REQUIRE_CPP17_BIDIRECTIONAL_ITERATOR(iter_t, message) static_assert(true) +# define _LIBCPP_REQUIRE_CPP17_RANDOM_ACCESS_ITERATOR(iter_t, message) static_assert(true) #endif // _LIBCPP_STD_VER >= 20 diff --git a/lib/libcxx/include/__iterator/data.h b/lib/libcxx/include/__iterator/data.h index 398673906101..b7c1603652b0 100644 --- a/lib/libcxx/include/__iterator/data.h +++ b/lib/libcxx/include/__iterator/data.h @@ -23,12 +23,12 @@ _LIBCPP_BEGIN_NAMESPACE_STD #if _LIBCPP_STD_VER >= 17 template -constexpr _LIBCPP_HIDE_FROM_ABI auto data(_Cont& __c) _NOEXCEPT_(noexcept(__c.data())) -> decltype(__c.data()) { +constexpr _LIBCPP_HIDE_FROM_ABI auto data(_Cont& __c) noexcept(noexcept(__c.data())) -> decltype(__c.data()) { return __c.data(); } template -constexpr _LIBCPP_HIDE_FROM_ABI auto data(const _Cont& __c) _NOEXCEPT_(noexcept(__c.data())) -> decltype(__c.data()) { +constexpr _LIBCPP_HIDE_FROM_ABI auto data(const _Cont& __c) noexcept(noexcept(__c.data())) -> decltype(__c.data()) { return __c.data(); } diff --git a/lib/libcxx/include/__iterator/empty.h b/lib/libcxx/include/__iterator/empty.h index 3ca0aff6be46..773f2776955b 100644 --- a/lib/libcxx/include/__iterator/empty.h +++ b/lib/libcxx/include/__iterator/empty.h @@ -23,18 +23,18 @@ _LIBCPP_BEGIN_NAMESPACE_STD #if _LIBCPP_STD_VER >= 17 template -_LIBCPP_NODISCARD_AFTER_CXX17 _LIBCPP_HIDE_FROM_ABI constexpr auto empty(const _Cont& __c) - _NOEXCEPT_(noexcept(__c.empty())) -> decltype(__c.empty()) { +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr auto +empty(const _Cont& __c) noexcept(noexcept(__c.empty())) -> decltype(__c.empty()) { return __c.empty(); } template -_LIBCPP_NODISCARD_AFTER_CXX17 _LIBCPP_HIDE_FROM_ABI constexpr bool empty(const _Tp (&)[_Sz]) noexcept { +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool empty(const _Tp (&)[_Sz]) noexcept { return false; } template -_LIBCPP_NODISCARD_AFTER_CXX17 _LIBCPP_HIDE_FROM_ABI constexpr bool empty(initializer_list<_Ep> __il) noexcept { +[[nodiscard]] _LIBCPP_HIDE_FROM_ABI constexpr bool empty(initializer_list<_Ep> __il) noexcept { return __il.size() == 0; } diff --git a/lib/libcxx/include/__iterator/iter_move.h b/lib/libcxx/include/__iterator/iter_move.h index 202b94cccc5a..ba8aed3c0ffb 100644 --- a/lib/libcxx/include/__iterator/iter_move.h +++ b/lib/libcxx/include/__iterator/iter_move.h @@ -35,7 +35,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD namespace ranges { namespace __iter_move { -void iter_move(); +void iter_move() = delete; template concept __unqualified_iter_move = __class_or_enum> && requires(_Tp&& __t) { diff --git a/lib/libcxx/include/__iterator/iter_swap.h b/lib/libcxx/include/__iterator/iter_swap.h index 52c3f095e7fb..01ab1b97d650 100644 --- a/lib/libcxx/include/__iterator/iter_swap.h +++ b/lib/libcxx/include/__iterator/iter_swap.h @@ -42,7 +42,7 @@ void iter_swap(_I1, _I2) = delete; template concept __unqualified_iter_swap = - (__class_or_enum> || __class_or_enum>)&&requires(_T1&& __x, _T2&& __y) { + (__class_or_enum> || __class_or_enum>) && requires(_T1&& __x, _T2&& __y) { // NOLINTNEXTLINE(libcpp-robust-against-adl) iter_swap ADL calls should only be made through ranges::iter_swap iter_swap(std::forward<_T1>(__x), std::forward<_T2>(__y)); }; diff --git a/lib/libcxx/include/__iterator/iterator_traits.h b/lib/libcxx/include/__iterator/iterator_traits.h index 2cd82525ba06..11af9e301842 100644 --- a/lib/libcxx/include/__iterator/iterator_traits.h +++ b/lib/libcxx/include/__iterator/iterator_traits.h @@ -21,7 +21,6 @@ #include <__fwd/pair.h> #include <__iterator/incrementable_traits.h> #include <__iterator/readable_traits.h> -#include <__type_traits/add_const.h> #include <__type_traits/common_reference.h> #include <__type_traits/conditional.h> #include <__type_traits/disjunction.h> @@ -493,8 +492,8 @@ using __iter_mapped_type = typename iterator_traits<_InputIterator>::value_type: template using __iter_to_alloc_type = - pair< typename add_const::value_type::first_type>::type, - typename iterator_traits<_InputIterator>::value_type::second_type>; + pair::value_type::first_type, + typename iterator_traits<_InputIterator>::value_type::second_type>; template using __iterator_category_type = typename iterator_traits<_Iter>::iterator_category; diff --git a/lib/libcxx/include/__iterator/move_iterator.h b/lib/libcxx/include/__iterator/move_iterator.h index d1bd0138bdda..a1c53e9bd2b5 100644 --- a/lib/libcxx/include/__iterator/move_iterator.h +++ b/lib/libcxx/include/__iterator/move_iterator.h @@ -105,9 +105,8 @@ class _LIBCPP_TEMPLATE_VIS move_iterator typedef iterator_type pointer; typedef typename iterator_traits::reference __reference; - typedef typename conditional< is_reference<__reference>::value, - __libcpp_remove_reference_t<__reference>&&, - __reference >::type reference; + typedef __conditional_t::value, __libcpp_remove_reference_t<__reference>&&, __reference> + reference; #endif // _LIBCPP_STD_VER >= 20 _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX17 explicit move_iterator(_Iter __i) : __current_(std::move(__i)) {} @@ -157,14 +156,14 @@ class _LIBCPP_TEMPLATE_VIS move_iterator #else _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX17 move_iterator() : __current_() {} - template ::value && is_convertible::value > > + template ::value && is_convertible::value, int> = 0> _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX17 move_iterator(const move_iterator<_Up>& __u) : __current_(__u.base()) {} template ::value && is_convertible::value && - is_assignable<_Iter&, const _Up&>::value > > + __enable_if_t< !is_same<_Up, _Iter>::value && is_convertible::value && + is_assignable<_Iter&, const _Up&>::value, + int> = 0> _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX17 move_iterator& operator=(const move_iterator<_Up>& __u) { __current_ = __u.base(); return *this; @@ -292,8 +291,8 @@ operator>=(const move_iterator<_Iter1>& __x, const move_iterator<_Iter2>& __y) { #if _LIBCPP_STD_VER >= 20 template _Iter2> inline _LIBCPP_HIDE_FROM_ABI constexpr auto -operator<=>(const move_iterator<_Iter1>& __x, const move_iterator<_Iter2>& __y) - -> compare_three_way_result_t<_Iter1, _Iter2> { +operator<=>(const move_iterator<_Iter1>& __x, + const move_iterator<_Iter2>& __y) -> compare_three_way_result_t<_Iter1, _Iter2> { return __x.base() <=> __y.base(); } #endif // _LIBCPP_STD_VER >= 20 @@ -330,6 +329,12 @@ operator+(typename move_iterator<_Iter>::difference_type __n, const move_iterato } #endif // _LIBCPP_STD_VER >= 20 +#if _LIBCPP_STD_VER >= 20 +template + requires(!sized_sentinel_for<_Iter1, _Iter2>) +inline constexpr bool disable_sized_sentinel_for, move_iterator<_Iter2>> = true; +#endif // _LIBCPP_STD_VER >= 20 + template inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX17 move_iterator<_Iter> make_move_iterator(_Iter __i) { return move_iterator<_Iter>(std::move(__i)); diff --git a/lib/libcxx/include/__iterator/ranges_iterator_traits.h b/lib/libcxx/include/__iterator/ranges_iterator_traits.h index a30864199df7..859e7082048a 100644 --- a/lib/libcxx/include/__iterator/ranges_iterator_traits.h +++ b/lib/libcxx/include/__iterator/ranges_iterator_traits.h @@ -13,7 +13,6 @@ #include <__config> #include <__fwd/pair.h> #include <__ranges/concepts.h> -#include <__type_traits/add_const.h> #include <__type_traits/remove_const.h> #if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) @@ -32,8 +31,7 @@ using __range_mapped_type = typename ranges::range_value_t<_Range>::second_type; template using __range_to_alloc_type = - pair::first_type>, - typename ranges::range_value_t<_Range>::second_type>; + pair::first_type, typename ranges::range_value_t<_Range>::second_type>; #endif diff --git a/lib/libcxx/include/__iterator/reverse_iterator.h b/lib/libcxx/include/__iterator/reverse_iterator.h index 79b48bcea57a..50c0f21eaa28 100644 --- a/lib/libcxx/include/__iterator/reverse_iterator.h +++ b/lib/libcxx/include/__iterator/reverse_iterator.h @@ -34,7 +34,7 @@ #include <__type_traits/enable_if.h> #include <__type_traits/is_assignable.h> #include <__type_traits/is_convertible.h> -#include <__type_traits/is_nothrow_copy_constructible.h> +#include <__type_traits/is_nothrow_constructible.h> #include <__type_traits/is_pointer.h> #include <__type_traits/is_same.h> #include <__utility/declval.h> @@ -96,14 +96,14 @@ class _LIBCPP_TEMPLATE_VIS reverse_iterator _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX17 explicit reverse_iterator(_Iter __x) : __t_(__x), current(__x) {} - template ::value && is_convertible<_Up const&, _Iter>::value > > + template ::value && is_convertible<_Up const&, _Iter>::value, int> = 0> _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX17 reverse_iterator(const reverse_iterator<_Up>& __u) : __t_(__u.base()), current(__u.base()) {} template ::value && is_convertible<_Up const&, _Iter>::value && - is_assignable<_Iter&, _Up const&>::value > > + __enable_if_t::value && is_convertible<_Up const&, _Iter>::value && + is_assignable<_Iter&, _Up const&>::value, + int> = 0> _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX17 reverse_iterator& operator=(const reverse_iterator<_Up>& __u) { __t_ = current = __u.base(); return *this; @@ -113,14 +113,14 @@ class _LIBCPP_TEMPLATE_VIS reverse_iterator _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX17 explicit reverse_iterator(_Iter __x) : current(__x) {} - template ::value && is_convertible<_Up const&, _Iter>::value > > + template ::value && is_convertible<_Up const&, _Iter>::value, int> = 0> _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX17 reverse_iterator(const reverse_iterator<_Up>& __u) : current(__u.base()) {} template ::value && is_convertible<_Up const&, _Iter>::value && - is_assignable<_Iter&, _Up const&>::value > > + __enable_if_t::value && is_convertible<_Up const&, _Iter>::value && + is_assignable<_Iter&, _Up const&>::value, + int> = 0> _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX17 reverse_iterator& operator=(const reverse_iterator<_Up>& __u) { current = __u.base(); return *this; @@ -184,7 +184,7 @@ class _LIBCPP_TEMPLATE_VIS reverse_iterator #if _LIBCPP_STD_VER >= 20 _LIBCPP_HIDE_FROM_ABI friend constexpr iter_rvalue_reference_t<_Iter> iter_move(const reverse_iterator& __i) noexcept( - is_nothrow_copy_constructible_v<_Iter>&& noexcept(ranges::iter_move(--std::declval<_Iter&>()))) { + is_nothrow_copy_constructible_v<_Iter> && noexcept(ranges::iter_move(--std::declval<_Iter&>()))) { auto __tmp = __i.base(); return ranges::iter_move(--__tmp); } @@ -192,9 +192,8 @@ class _LIBCPP_TEMPLATE_VIS reverse_iterator template _Iter2> _LIBCPP_HIDE_FROM_ABI friend constexpr void iter_swap(const reverse_iterator& __x, const reverse_iterator<_Iter2>& __y) noexcept( - is_nothrow_copy_constructible_v<_Iter> && - is_nothrow_copy_constructible_v<_Iter2>&& noexcept( - ranges::iter_swap(--std::declval<_Iter&>(), --std::declval<_Iter2&>()))) { + is_nothrow_copy_constructible_v<_Iter> && is_nothrow_copy_constructible_v<_Iter2> && + noexcept(ranges::iter_swap(--std::declval<_Iter&>(), --std::declval<_Iter2&>()))) { auto __xtmp = __x.base(); auto __ytmp = __y.base(); ranges::iter_swap(--__xtmp, --__ytmp); @@ -285,8 +284,8 @@ operator<=>(const reverse_iterator<_Iter1>& __x, const reverse_iterator<_Iter2>& #ifndef _LIBCPP_CXX03_LANG template inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX17 auto -operator-(const reverse_iterator<_Iter1>& __x, const reverse_iterator<_Iter2>& __y) - -> decltype(__y.base() - __x.base()) { +operator-(const reverse_iterator<_Iter1>& __x, + const reverse_iterator<_Iter2>& __y) -> decltype(__y.base() - __x.base()) { return __y.base() - __x.base(); } #else @@ -316,172 +315,6 @@ inline _LIBCPP_HIDE_FROM_ABI _LIBCPP_CONSTEXPR_SINCE_CXX17 reverse_iterator<_Ite } #endif -#if _LIBCPP_STD_VER <= 17 -template -using __unconstrained_reverse_iterator = reverse_iterator<_Iter>; -#else - -// __unconstrained_reverse_iterator allows us to use reverse iterators in the implementation of algorithms by working -// around a language issue in C++20. -// In C++20, when a reverse iterator wraps certain C++20-hostile iterators, calling comparison operators on it will -// result in a compilation error. However, calling comparison operators on the pristine hostile iterator is not -// an error. Thus, we cannot use reverse_iterators in the implementation of an algorithm that accepts a -// C++20-hostile iterator. This class is an internal workaround -- it is a copy of reverse_iterator with -// tweaks to make it support hostile iterators. -// -// A C++20-hostile iterator is one that defines a comparison operator where one of the arguments is an exact match -// and the other requires an implicit conversion, for example: -// friend bool operator==(const BaseIter&, const DerivedIter&); -// -// C++20 rules for rewriting equality operators create another overload of this function with parameters reversed: -// friend bool operator==(const DerivedIter&, const BaseIter&); -// -// This creates an ambiguity in overload resolution. -// -// Clang treats this ambiguity differently in different contexts. When operator== is actually called in the function -// body, the code is accepted with a warning. When a concept requires operator== to be a valid expression, however, -// it evaluates to false. Thus, the implementation of reverse_iterator::operator== can actually call operator== on its -// base iterators, but the constraints on reverse_iterator::operator== prevent it from being considered during overload -// resolution. This class simply removes the problematic constraints from comparison functions. -template -class __unconstrained_reverse_iterator { - _Iter __iter_; - -public: - static_assert(__has_bidirectional_iterator_category<_Iter>::value || bidirectional_iterator<_Iter>); - - using iterator_type = _Iter; - using iterator_category = - _If<__has_random_access_iterator_category<_Iter>::value, - random_access_iterator_tag, - __iterator_category_type<_Iter>>; - using pointer = __iterator_pointer_type<_Iter>; - using value_type = iter_value_t<_Iter>; - using difference_type = iter_difference_t<_Iter>; - using reference = iter_reference_t<_Iter>; - - _LIBCPP_HIDE_FROM_ABI constexpr __unconstrained_reverse_iterator() = default; - _LIBCPP_HIDE_FROM_ABI constexpr __unconstrained_reverse_iterator(const __unconstrained_reverse_iterator&) = default; - _LIBCPP_HIDE_FROM_ABI constexpr explicit __unconstrained_reverse_iterator(_Iter __iter) : __iter_(__iter) {} - - _LIBCPP_HIDE_FROM_ABI constexpr _Iter base() const { return __iter_; } - _LIBCPP_HIDE_FROM_ABI constexpr reference operator*() const { - auto __tmp = __iter_; - return *--__tmp; - } - - _LIBCPP_HIDE_FROM_ABI constexpr pointer operator->() const { - if constexpr (is_pointer_v<_Iter>) { - return std::prev(__iter_); - } else { - return std::prev(__iter_).operator->(); - } - } - - _LIBCPP_HIDE_FROM_ABI friend constexpr iter_rvalue_reference_t<_Iter> - iter_move(const __unconstrained_reverse_iterator& __i) noexcept( - is_nothrow_copy_constructible_v<_Iter>&& noexcept(ranges::iter_move(--std::declval<_Iter&>()))) { - auto __tmp = __i.base(); - return ranges::iter_move(--__tmp); - } - - _LIBCPP_HIDE_FROM_ABI constexpr __unconstrained_reverse_iterator& operator++() { - --__iter_; - return *this; - } - - _LIBCPP_HIDE_FROM_ABI constexpr __unconstrained_reverse_iterator operator++(int) { - auto __tmp = *this; - --__iter_; - return __tmp; - } - - _LIBCPP_HIDE_FROM_ABI constexpr __unconstrained_reverse_iterator& operator--() { - ++__iter_; - return *this; - } - - _LIBCPP_HIDE_FROM_ABI constexpr __unconstrained_reverse_iterator operator--(int) { - auto __tmp = *this; - ++__iter_; - return __tmp; - } - - _LIBCPP_HIDE_FROM_ABI constexpr __unconstrained_reverse_iterator& operator+=(difference_type __n) { - __iter_ -= __n; - return *this; - } - - _LIBCPP_HIDE_FROM_ABI constexpr __unconstrained_reverse_iterator& operator-=(difference_type __n) { - __iter_ += __n; - return *this; - } - - _LIBCPP_HIDE_FROM_ABI constexpr __unconstrained_reverse_iterator operator+(difference_type __n) const { - return __unconstrained_reverse_iterator(__iter_ - __n); - } - - _LIBCPP_HIDE_FROM_ABI constexpr __unconstrained_reverse_iterator operator-(difference_type __n) const { - return __unconstrained_reverse_iterator(__iter_ + __n); - } - - _LIBCPP_HIDE_FROM_ABI constexpr difference_type operator-(const __unconstrained_reverse_iterator& __other) const { - return __other.__iter_ - __iter_; - } - - _LIBCPP_HIDE_FROM_ABI constexpr auto operator[](difference_type __n) const { return *(*this + __n); } - - // Deliberately unconstrained unlike the comparison functions in `reverse_iterator` -- see the class comment for the - // rationale. - _LIBCPP_HIDE_FROM_ABI friend constexpr bool - operator==(const __unconstrained_reverse_iterator& __lhs, const __unconstrained_reverse_iterator& __rhs) { - return __lhs.base() == __rhs.base(); - } - - _LIBCPP_HIDE_FROM_ABI friend constexpr bool - operator!=(const __unconstrained_reverse_iterator& __lhs, const __unconstrained_reverse_iterator& __rhs) { - return __lhs.base() != __rhs.base(); - } - - _LIBCPP_HIDE_FROM_ABI friend constexpr bool - operator<(const __unconstrained_reverse_iterator& __lhs, const __unconstrained_reverse_iterator& __rhs) { - return __lhs.base() > __rhs.base(); - } - - _LIBCPP_HIDE_FROM_ABI friend constexpr bool - operator>(const __unconstrained_reverse_iterator& __lhs, const __unconstrained_reverse_iterator& __rhs) { - return __lhs.base() < __rhs.base(); - } - - _LIBCPP_HIDE_FROM_ABI friend constexpr bool - operator<=(const __unconstrained_reverse_iterator& __lhs, const __unconstrained_reverse_iterator& __rhs) { - return __lhs.base() >= __rhs.base(); - } - - _LIBCPP_HIDE_FROM_ABI friend constexpr bool - operator>=(const __unconstrained_reverse_iterator& __lhs, const __unconstrained_reverse_iterator& __rhs) { - return __lhs.base() <= __rhs.base(); - } -}; - -#endif // _LIBCPP_STD_VER <= 17 - -template

        -xr$|H-8K$ez0l4^eEI_Lwa zF^2CHc|fkw^DPc}eFvGWE)E-H#L2z+P9heRLMIUmN+Cbw67ePC+vq6mhZSS!A^1yQD?mtE#E_HisYt#2@_spQgq51=jRYpiaNZe zC`x&Rxei27n5}D!jZFI|b60`Wz>cgbauK~u4^6KjRW2|}vzyf?NPSA5mAF1amPLU- zryh0QHJMqnJNk@;?;KmvTQYAiHluHDO~L4!H>_PNtesc}TN*8PvI7xn3P*Q{SXlN* z%Dhi?0H8qqOb*m2F`;2T-Whc7iOI z2f3DzI;>CbP3d#mLCV!Zt_lM0$l4+a=-ujPntq;ApG^IXo}V>%{>0&IW^IbIqbr_H z=Il+&#Q5A=t&(emvw07Rjo(?z$2-uaR)jjPHI{h7waze{PW8!E9yzhrh<#z%BPmNf zwaOz4Yn?f#pi-zBo%yZGm z;_94ZB};B5-0E=Dajs+DXbg2Mxv3r<=v;p(b&L%Z*PnZ2bik&1eAy!@b?jT^k?{dd zbuzK+kyHoUKTupc?ky7oN`yT3;N_T#Aa=>`Q z)UroXf;qg(BU6rGrk6dE63mfR9+`FoGqdcGlwh9fAQ%VO%=1n#y!VA8n9Y};U}g>5 zD+y-S2*##;&QHkly{%&%bEu|h^aG}B&AUgOsPtm0WBSzE1l0gB`2$W=p5QZC(32bu zP!#)#fmBkSTJdml+5^r@7gRJX^@yJ~z==X9V#k_PlDaoWmO21dlg;5-2kYJZG@oYVl2sVCe(YAC+PHHrlr#Y?$5o{i$vie8e_ zZWQWmVO?=<13BB#DNh(^i-2}62Wd$ljSdR2FXL(nklY^d*kEyn^mH<= zW_|;PG?N**jSmX9R~A5;u$JKp-0tt_lqU?N2~vNP3|iAIEFIu7nKY2DvhhcJ!vEqOe(J;Fy~M2}dSjPLnWiplsu6E}Mko zD>_I{gUUpL^B$l4v(F{}gkpVe)ilRykNd71IDO>%lz~nmCtuNu z+Ds%>BiAsgq~rkS{Nb0@$CXxhc}M8~kt?d((yYCY418qXE!8--O9}<5M^fE~Ufq}W zI+sxD_FhRJ6gEy8TkZzE(Ip$5SH>D;S+yO+b?tt#jVgE9SJvA@TZ7+UPiEV$Vk$&B zr(ddURJ0VM+k3(6*5J#O)VRZ4&aYQ;Koy1|iEudtoT#_n)8Heyau*3oqvU@DyL!uf z9;3QJw`K98r!Ey|u>A}4O7zDD5pWiG5&gzSvIaLT-QNQ}6@7-3dA!gAypVFV!xIXE+hzGj#K-U98cs+?O;(@v~e4yE`Yfj0nt(ouU`4okr>gOn{p-}4Z zn7sUs2lx@~foz-&cy=ZFDJ`H&N=&5eCWcErpZui~6E%EdqEqYJHD!%OG_6-!vNSn~ z3pf>osrj2VB!iI_UYeoA{74k#FsjOq`}56s&q*L>dA}1hr)5ZYw6SwAhQTvP6Q{!$d6<~jKw&)>UsZ~2%(`Pd&k%b zu*0~=DH8NjU;O>BM#{6S*N+Xikz1&(Sgqu!OQX2k;g0fs>}97 zygneTo^qh(NUQF0RtkS?|78>E2J7W)ce-bM)-r zvWgE=Y^jk@K5#g$4d=eFWk{&2dSmFAvJ?<<%z6Wzzt6eHyy;4;V37rx92t3O#ZBhi{vyartC$)ZKId3m%QK6u9~+Z+80c)!y`sUI3pME1veV3k(ch;j ze8dRz6eLC}X$lhf64J=_A6WmP3U4`&!LvVOXEa-sFIPsuJn@vx2VE=2l}1{BvXDK^ z2H0!x7si#PRO5yNye7cid5a7P%b=A4ZW*+4z{R*~0{mj0F&+Vds>o$_%rqOp9qxtM z2w0|bt8$9EG33Hj)>q6_*Lg>ZBwn5^62uuNt442AL??;Ada}HAEXA$C*N39}b?2DB zgV{MmQ5;y+@qU%nVAV5w-wjx)0uUBbSEA%FRiqJ8st|JOCnOx$N=<@#vgR&T(yJ%k zZyYvR9F{>W>LdizOF)6;ttmuoVJ|Y94}}xCUod(QJ(zA}Go?Lu@r~j=${giEEc?P1KnjSp z=LtmK-)`JcG7z6BO9|o|6=&`6$6b8w1X6c^PB~dFGmw;EP~b`6@Mt)sq$o^ckCaHO zJq{YlX+$bjG`+ZX7gwFYsnli>Dsf9?KxHyFe*|eR<Js!|Jtd&?Y>JXU;Ju0ST=%IcKic!5)qjiLPV~r!Vo{ZFBEF}H8B~V?} z>MhV5l5ICh3d1Vvlx8H+Bo7y4sFbtSRw#wB^8pTX$PdR;RX^pl(W?2TM-*P-h&1ae zEN`>!2Bhs*2Bb7$#Q~A07K&(x$^n%RrqKBE52kd9gV8eU$(NZM&aEXI&aJGs;&5n3 zIup$g^}sqOsPM$IjftYIXtVtyS8TV-NUm##odjMQYN^y7blJ0lE{!I8ph@Yp47>LR zWBm}D4&9lunkv1bFVyUQ2`CY(DwgnwBUw{~$$`}k8G=5{NLHef?%LPRyu_#1J1eZ~ z-ROfx*s2}5;x!%pyLT4BSBgnvZ#5tx%N|n}_wH#^s#Xk0!?3QiL0~fpX1lM|CJhbc zy#r=ek@0(_-~uuLgmIBFFt&#qaZ`^~;VRr0%jfu>TXcGVA&s&rS?6IQ%!(Py`K-Vw z3*16xI&gA$msCb>VzjuZd>^!jo?uXWWkICa()(>v>RM@TQfRK&J{j$@PKKB;xT?f3Tpb`*l^BiT0>vs{RW^aEuBsWEKpY8vRl$ymlbF4y;{_}$ zURs>Q^DZlmq@j^GRwsVv#=%huf!!w(Qv#*cz{YlVN(wU~`~zgVvh*}7OV2&a%5oDY zX0l@9&^AT7tKK6KUlTbCpMamq-vs?bwY1ik9_7C03tl@&Rf>PnM>t->>N0{@ucBpZ zgP5dYyk;o*&{?V|FKueRqEzzbq-vCtA}72$NlzRE(I<6wjas!jcgo#Jy?mw;*7ufF z?T2zj_LM3U`Qm4Rt$n%xv8*Ij6bj)6?w@Jp6w;C^UMYp;ibjD8Yy_!`PEggiP5uk3 zCI15M!z3E9^1CbxPUuP_<4;#~c#z^&5?mDdb~vP+VkugoUaDm0)NIBL7fAIdEw7&B z&x$8$%g-v6#p>RhO#Z@0B1b@OQyJZ)noT+4pf1OI(#bC zL=%oYuFF6wX+1v6OAT{&P`su?;%+3cYXOMl6R_;k`+`r(^H$9>tLBaEFoEGCpbM8- zRmqy#a8!j;?~a?VVgBqou3Koe7+?Mav(C9#&G6r%xH1aWS7tvss4^G;!ueV#LZ@ zQLl~^zHJ5;q-0tAj8R?XS4Nlk%V4Ch8I~v*SE_cGN?j0ZEotxSRk_BOn5~zb3{aez z;NnYwSD`03Dgp+QJazm4bwnvlp)nufi!9Cj_FVDy?26mRi?@%hxV=!kePYG!GsWAd zR@`1H-d-%;Ry6{Mhgps5cMZ4T;i^I@7r6ZbUcX z^ivzv$%bOB3wp4+>&VqjJdfGSu2eL+k(0Ad%!t@ZGS5w(;9<4d;s=3fbu}L?WkoBC z3@tjl4v;Dav~X^Ui+X`wQg7Vk83Zf%;CyGm^z30vmx5tDv3^v zT-drR?YSiY_=Y%r9$4aYEv==1I_G1d8 zxY1d12x+9W;+9vtsrpqEPU=N^oNENEq~u|A```{%!Jl~M7v8m}jgi%ZrPBm;(Y`?l zrv?;z^rh;BFz=8`CCy|i?JfB>dikjJa`@tVAwzG|W#~<|@I0Bzfr7-mwPzbk28F{1 z0l*Qv2x-?PY+=@Svkzl|yvVmR??!`^C~L8Lh{7u4>}V#3 z)9#a(<2^72E2TSCn^iEin@i0BBHqOW86 z3Bn-FuZ;#;nbjp~?NL6h?0|Kwnpw~n*P^{~K1G%!rs$UL0FklkGu5oPZTO02hjhFuW z&?kUz7GE}sPJ?)qO7y4eC^jNZ=W@KL;a_C*G^2@zh$d~qHJNSDa)@t<(%Ry5DJ=YB zE>o;SWiDJ-X*&^G zwv@U?eP9WXFjM|jTi8Ce?&Sf}t@!zh92dM(=;?~SfrD_5Rf4uvfftCJ2ZEjmp%b`A zX{cnr=BAE*a)&!xY%Nj+oBmh4Dqb>494-Lg2b0%TyAW=t+;oG%Y$rcLfJgc)M6jIV`mYgXwV z)`1s9^aDh48#1%DH;(P0!j-gISG2jT6?TCvKMc0WVF-WJmhuMFAoXEZ$@_4|c=Itd z^KmcmZPtc1emEatSji&`10PR_g{pYM*s0Ht5v7OiCC-lFis6KtiXOz>aWcY(VDb7x z+3}6?SZ-45E#>H5RdEjukht7@tV|vGq7PJ-kcHuvyTF5Ke{!c5{egy-ibspu=Z>_@ z`M07C9zA*RD(3jJGcV42Z4FbwRVn5ZsVDBliyUx-S79rv13AVtq4qU)CtmnXc6$CB zIN>+*tz_*uRLXCbX(9U!H1Qi}eZ2*kc?XfayaxeVOa3=(6rZU$J~PhDRtaijYW<)w zYtj^wB|QW$@!ms6a!gKH%gDb&GbVo`nfh~j_+aQJ0bdi;Na(4itxb(A>dx1f6Gm4~ z7_!p^CK2t9M4IS`gy3nL$Ng@eZ~`;=&5;MNou9(xtyLu%QIA{~u^oz8jmkk9H8kZ& z#%Bsj6v-%tKy0A%=XT7$A@3TH$EahT+N}OyqGz`E^Rs06fFf!9{qe^%2qEmZgwtl!coVXbBL zAK)qB6>25?S5&W@aW5Nt*-Yl{k-o`NrBV(r<-dw=TrNCTNq@{bs$~BK{1@6c{$}^x zg}#^T{MYe250uL_{;O1iprj2V;y-F=)7lHc5th`RZa?2CbzbC25 z+Y28}{vxmy4j;#!r%s1$T$#&0NY2oIdV{>^gdfntx}EWiTYETi@vD#J+)|x5Zdp!6 zGlaODTdI||O3u>;pQg`ND@VVwwuBdEGpn)~5BZ^c%e{MR50@T-*N^^T;C8$Oy=OBG z$7U!5-f*;a;SmDIJvyUc*xTSoe)E?MbQk}VhCx}ExAl?j9(hBb#fS9bDX&Xkec)z~66C-Eu z=l6Y902&8mO{xD5!JfEU`~$Jx$#RHHPbRUue(9M5uClfFgYur5^k%F?+b)_BTSLNN zMZ)aK3a9U?QllE6RGEp!vEH|oC(7+X6@tv%sm6!QpE@L(Uj;z&EB0Rqy5A9W*KwTX zTZV4k#X+Z1<^$B$(`Y%LCPW&X8Z}rnETmYRc31=nkmuz80Md2_DawUaAmuTzj8I4m zdLj-%+UFX6L~5s3jP~<`wgg7e`6byMCrC_L%UgsiTEv5CTrMMgT+ym-o@NCx^S0_d zG8@dJt40ggiQd=Ka`Qz|C|)cxaab#KP%7GKh`j_OxixvMk=<}XjXSNKXho2N6KggQ zJ#Bj6Sqe?x-rFykqmBk>u{AhR!uJXFps#mJ?}`4;TIr1V4hu!8)ihDrFEAHM5@;Dy ztkZtMnQ3li4YIa{(p}rjOC`LWYuv`uUzhIc)f-gwS%p&@cMx;a?i)$sxaa%fEhU;U zHq;HiMNe>?gKv65B)r5z+g`>CS4&RX%jWsZGY>`^1?8!bhD1c|mY~<_9u3=QO12)b z-WGWdiYZfQO6zRQgMOr(m_8Es-N8mS`=wO9mHvSvB51EoUN(|EjW49#wToZL`@XxS zTGA*jBZLcuitt$@DX>kdEmtKB!T13hc4ZrCR}c!T?YJdQoPZTA-6;XcNXQ#?cT$zL&s=%*Pe*);HL>GN>8-8<6i7ORl&E3RuxiA#F1&D7+2 z+h^-;HJn`2HgOvzQ}18aD{)05E4rq!;$a#)3DtdaXZZMgTI&=|Qy7D1Tllsv1ozulA1QRATI;Fz_Vq_=>T3tY|G60o>r%Ik`M`vOujAS%nmy&`M>WRfdV@&Gz7;N9SN5`Z!8r!BZyuLh{TPf5MMS>E6K<9 zf$SG$$Tm>L!o|uZKv!kuP9zsWuB;C7n&$%KPYL9!o-2^qhr9e({;ELU_*{Xk-TTW< z7-t3Ywa*pEea)nfMrj%=%4SszvQUS*?`<$o^VOx#Q3D<{9Y;yF{eY_;NyzKGR&S_;PMtCbU&>4aG1jd>Tq+Wzx09K*`9 zU~NKAI!)aaT#0oX|EE`AO_%|nD{)pB{g?DqUlD8h=y5(>#OTHI&og>9*)IPo`a?lH zkU@Nz*(*U(mmlQ65y;OOd&L>?e9_o{{4oZyKEpjL^I~Z1u$~HwPhCbDL(|Mh@4PL~ z;UfD*4P~e}l#4;Y+;fQlG2+WEs=gqIpEK!-nw`#Bb$1&l7J>?yWUjJw@}2{^@~0Y9 zhlIEox$>8KY8lP1B8Lh*b^Jf@#Cdq?w=7XG@Gt*#e^?N=GP1tBJXM}-^B|!I64?%?_2{C==KWRA=V>5z5NvoMCJ>p0`br-%j>LKZDu0 zHJB*_xY=-&=`6R`#RCsVph29fQSOnt%)Ly~~ZWv)Abig)x!!Vr~>ezy1jHEp{T*h0?+Y}2I zyxzz82&8tGd=AV|>UxkVnK}#FKxX<~yAeIXtR%Cg-RQhiTxtyZJ6pO>DF}l0Ah+jA z8e)6kK7tDdzhl^JWSc2L;hYed$eKbN0WsL4*Jiwqlid%w8TB`x#C+GQQ$%u}YAIC6 zvzgpLSC|3@g$qYR6EgXq0kzig{{RjUeHA%|A=bY-KeUR+LCpq1YndK_lq*!55(kFiK<|{AgF*CXft-S34D$=At=!)p0U#wvzU`zyo~HX z9A}*`AL)J5WcNvt-85tucaYt5N_H%#WTrrN>n@7yJ_*@%KdI>usT)B)`5wGo@gPiw zHIt)BBxlqoZ<5?Noz%x~7hdIT=xKhM`n%3ksi?K-7-qpS6*q5(ZT8*qfS7iFuVOR? zcK%-Qo}EuWd><#;f;YwM>^RKm=xFK9h3oGae&7Lu&IfjG`%Y0OlR#*6Nyat!%*#8`hl5+I(jysWGql{w{X5N>SN0Pt#_-I6F8N12h1`qrW z|2g~2GlAhxduhq!2{Mn@t zhuVYpKKT$Ll2K;l@grmRwj0B(^*C||V=e~a$s2UBp{HII5ElZ+P?I@c+(yc)qxUM59n&# zUnNt1G3&<(&J=7ZkCq3(9_`8<1n8);!}7w>OJaj)qm*WbZUy?U&l#faSCE22W#4v5C(U;kahOjv28#Zg;S z1RGhS)g{eoXxo*{M`Ly{Mc=Wu^OTa{bS5V4axPk6Rh(eAeXHY9r`Sxd_iua z2)aL_%^1HofVRc~9NQqMCx@W|&`{GwzD+e#*7sIjprrHNozFKZQ*6F7#Toge?IW8$ zNZ9C4l-}U^?zoRPczFBlxtA$x8^=4;q(>0;+P)L}O+5f_h%Rh5IlBFS4bfWz9CFR1 z@eX47_#7Rr+&OH=!n1KE1PqTzlN)x(Os-{)>&dlmfZ5YC8#TMcO;fU$8d6+9;>-rP zSI?%|pA29ZhC{-NKKj->drMcaBUnO+)sxR%`+C#$p>}ni1o!lolG|ERd)Au9o{XM_ zJ3-GaO@)?y+>D+i&k_g5(FD_O9~pi4+aY(o8JcqopUx_p3pqzZAurz`r!jmTbwD2Wfd98d_ZK=BCh z#h4!4;CZg3jxAbBkAYh2nS3=@a0T^>J8n=v2qd13noP(yo*~8s$vl+Fnfu6}1*P)~ z@8Q4KKBge06u|;aM9+jVQ?IY=*ctS|(m?u%Eow=UodHDh|1(NVx7$*Euq zn>(371nBw#P8bPjqB?l`8{f?I7!-?LpWzC=$eoAyQ+jL#2F4h!y@E+2zn@@0q2<0&MH+Xi%DK1u-P0|aE47NUn1?>DwWK#tB{t0x`))e&r$tpLWRU>%DDsw!@% z<7Xt1LL(a%TWPhc+-;vv+#*B}dibCgzuEdaY(UpTbU~Si|7h0fP#N!>m z5rM@dNUDq8fC)D$XJyIl+335x`})p2b7WU}oKTPnxfszJ=hnCmt1l7oF4hUB zXiBpwdj~=-*$1zLbws03#|@=@Wu0La5BxDUIS*RbWNP-W*l+i!`oldeYA?%OK!Bck z3e*tmDBFq`bK2;pa6*rYmFd%o8|^hPw1gd(=p0EAHCpv(DjC<)T-#Q2-Xk(g&l?rH z?T~{=G|$N7>nKVm#Hx5zeNWu~0N9fOXF(|4_#Lh^ZHbqRhqCqDw7WnpIW~$kE*_fG zcnKa4MKFx60*UKaw#Lzt$SZ2Z8vS*c(!ZA5AEY#AAz+sK$jGfbr8LwM<(hyVJ)*Ku zItw(aFjJ1&2t-P$4cQ-{$yp*L;6Xa|BS~zFFWGw#wUNRNl;8*8!A zi28M`obVj|5*RPDl0?Rj)1O_4xgtR*E?80wuh@_+UJP(g@BTcO(fW+hvPq&@-R1;1 z0?v*mLSIKa!i%icobQAe%=)-}>u63L31&h96*l>cM#J{;#%X9E z5e&*FqgYJb(u&I@yp3h<%H(bUE+3NDU4X4BddruO!=_xTHLn5M@wbm<1{G14|I^)EC{%($S=^Dg)V+Cq36O4 zC3)>!JzTDgw!(z22l&+n<3U`SARElLJ z&5VOnX!6i;`PUJ3ZNKPO`&SIUt_2E2uCnr#-$X=!1*{~%I+KX^gAW}iPO5kCB_zff zb)|1vW6MVD9$+=)4}b|C&~O#bYe$^|gTQ6S=muFt`(A|#SK1o9*)+N9H1k!RHfMx4 zwo6Xbnm3(jJ)M#ey(tP<$0c|5M#?IHLZiH`EL1mL=?Z!Wr}2?J(li)Xk9Jx%hud@- zU~_n#iHy3jBuNN35E_N$Egw}rGEedfS7H+PDte6{0-=jnoakXiI%rYoSaC{yn^d$PbD*%J} zd8afn_OLuDx`Y~vinaBBEYRr-DPqYvo)44Fm=e(}ASBmuM>@_Z%hwj$LoM7Xw_C~& zk_JHiz0qIW3Lh`!2_-5$WGk<|DjajJ<$cz$8U2kloLHsdz7mAG-axBE?Z_~ql5Y+QRE~1jS-^V*&DxDbDVaQ?~=c5;tJ-Dsrrs{m>>@rHII-beI z$h%7xIZX!{YF$DY+-ygMAXwcjpQ5c}`p&V=Iepi`0g;ke4I5niWgp9v5~l??cBxYy zcHR);s3rSt@=`v2uw(Mn0P+g=6N5de&UvY^j!T(99upr$PX>54mHP4I2l#IS~35PR}@GznOQJF0jFe5YzgMWNX!`}Sh%?{u-hMeENWr$KO|a3FBE zwn*8Cd#oFGPm^6xqz&vsU9jn#zQ{mcu;*FhE^Tm(&&RkC3ZiL2#&&K412CbR=>#V9 zI=(q3w1XdT7+?mZOo-GdwPeSLr<;4)C0GzX64Q@3%NMjt3>>6h3Bq8Nfdn^K{>+W# zqz^*;cbaCY0tSV9tr#M2hgTT4bm_tz9O4n2Z>wftpk2z_&`Ct&2k&^+H6z@3Ec+Q}Uc{HtqhnI`e4Fxik{BKIQ>L#^l>vm zl$No9>l^)HYs7>Hr;5Hgft37-QKm&cqh%y_pbx)#4 z)}mJ-FP?V3M`w41=0Y;PMu{|~)-d&|8Lnm6qcybH$L77A*+my}UL|wg*fm}gd6kHo z(D=&_;xD&TlX1^5;$0peoWH08Kg0BBXg;e1*Ed>L31Hm|%Mjz4@Vrat#KIDJ*(0l= z9;IZ$Sd!M~`B0|$QO8II2y>|#R$Y3?#o}o>G&oG+E=XHCVK+vxmIv3VQS7a#9U=c1 zBFi^l+cxL^3U)D zLnpBj^he%8XZ0ANu@p^0im!SBd800!sd6*F<>%#Sc_T9k?Mi*y(Qa-pzg^$tBu4L@ zAwOZV2?g1mlrrl4OP0dfWABZUpZMZ?$#LSwxoemvp@v}FMRAGTzqgj&K}Tn39;W&@ zoV+834m4949cZjd(smCY90cI0R3&~p1CmBub{a9LMgSDf zFu1js9R33P;IIPlrh`8DBtgs-%jOxDaER1zr@Q`L+KsCN7qwi3O|u~#i`G{>8Y`!R zT!Vf(V4}ubvXv-vtlsfX)^z^P@OI|by)#VT2@w?8Hpaf=vjRuUh3ElgyySSg}kNw%eYr>PHA z|FSunFOw=1vAMt;aT1w1fij{c5M_*wM%lxpdtFx(Wrjt|z_4qm4z!eIqvRiu`(*m&PP z?tKFG&YQyfXpPvrmllKvF~}v*!55~_w||>*@{kTg^NB(I`LJZU1)gO_CAlwdQXMYZ zVsm~{dpt(dsQ}6@<|sutgphxxZM=)~< zc!beiJRV~byFe2VepkDEH%vzpRf--7qp_iCLLA0)DGZ%p&rb0NTS5!eEGac7-pc5| z4PKE1t)L9i189)+5V*n*%3oj^m0P?3_dn=GLwWcf-hvPLmY|18mx`^85m^#i2vvK) z(k<7_|F3k*NOxn^Peh*|^itK_{qC@ST$0NKJ*gE~B<%50Sjiv`p%w;5hGcj+%*noS zo^wQJ;uRQ)pGw)rfzKRKl2T|}9ngaiLA?WD4Ql+T1*pwJVpE>+T2jk)0d5g-ur;{G zMl5m!%}CrYy68zmd%(yvTt&Wey8=#msHUN|SWEKgGK5ldquq9GTt9(~tKS-I4&`?? zfd^BsqAQ!%WH}wmbWmo5C8p|;VI}+|agX)e+(9FdVE2-9B#ETMoGI#OrDXl(M*f>R zar4c3tTul*EP-p2UJMf9P|0*Pd!;m2z1_GC!)#YxgVxH5@lH;u#F5?)N23wGz&4k- z4@D6C?9_P6M-5uaN#px><5(oOO!IJwEHL(B(wOPRik1qb_3%bUmtJ+bD`*7J&G*>D zeI`Ro37@htrub0V5S#37zfT0*;Lla>CiDcn1KPnIUjJvXFRfIXE>a z5sG!v)4Wf>iVz{aR;j#$J1QrTxcs>yIh*}RpyYR}+W11ez)`;;pCxYmTdQj5%p)%LL0GOk~g!;G*>C zW`P+R>V>tuiM6aeQ8RRwKru=TB8^apC>tRxGiI^T_w>4m8n&{9D5ET1a z(G9u)?95rv7+4aDWfI6IJRVo68ug;x$yMJUk(xt#A;LhUB}HYkd2t{%nY|_xxTD8p z!4c7?1q36UgMcVRib{>e>jfc{TbnjzeRV|IJV^$rK?T@F&|%k9H9O zX1mzeI6WGr^P^#EZZY#0;&I{mxLn4cv6S3gLTJi_TjX#X7*fYv)^olLbmDG1K;L<| z8#|rM5_>{mhK%lx&l!&R*)zMkkpYYtA_na()~RI*gYDx+6G_mPtjvX3PD+}y-8R2s zz3Luhi_tZgC=2Q**mFLXoxR!3h*mIb=ne-OvfA}5@(RNpwlqntzzdAhWRzaM5n z+qL?*ONwz=AY*5ZbHB|aTHuN&`XLdXz}Bm7ljKw1JcJIWg~dy4OqJ--s+VYPm_fqU zoZXZ<@oPPuH}#MqR3m}=f5BvgCL1R8)!nkIWi+%TOHfz6)!NVDC&huLWEeDye?+rkVuhe2UE$5lDX=(3?@AMCk< zkKo%a9+RgXMs*S1!RyM7s#v@-zO;=^;V~?3y@Q1$RK|Lft!kiQPon}+X@%{ z$+ql-k%-oI*~~MD0|(j)z=42=&K;-c3qd@R>RM?|wSyHkzYF)aU#(#2HUcti654u) z=Oj0SexS3Y`?ipwGT$mJwlXEkiIj44G78u34Lc(o#0%RSm^!Ec5&)~F_TODdwGM*2nNT5*f{Vh%hy*Kc~uxdkq@^w z#v7r@N8iJrhm5{i{EFSFX*HbD_6{J8YcGZ!fBv%VlyupesIw9tf1<7U)UG!S6Y*lv zO$3Udj%mvTrjR_eC%QT`vc922;&DaJioz#Ce2);Wl{o<#!s77yKeWeiDqmtV{X*P; zhRdcdJ3HiMkGm~CpQAZ~UaRSeHbG8{#^WKz5V}VQh~)ZtSHB~_7TdV}Ff8*;a-gEy zsfTvdrn{_==tz|XEnN}E)YL#MO~IBKK2Ja(gbgO zH%`(mPJ+W-oMeLppfIIu*d zN7L&IMmok{;3tYrqb?oKiLSw!P)?*bQNAEus*sTov?Q z#jUPcQYKtS_F}#CZQ_6zV5mgD((pVRtY}k$pTp=!6tBKbJamVqz|A0m05@+hZ|29% z+=VZ!O9sNYV3y$1goAbGb(U%Ym^2qTQM_utyoTaSMF_(pU{{@|=Vc)at$V>~#c2Ng7-2Vrs+Ol14&lq(p7Sdo0u z?_qSPFQEIvU~?VHmMvTvaC2pj5BdD)p`|prC&0AYgE}afdoI9xli?blV$3U~~-pZVetvy5zwM zZ6{kW{aqt}7a)zB{R{j&qBmJyxbmJvHTQV^-aEh=Lkbei&&r-;{ktvAk_NLw!@^Iy zIlNa_AbNB7x>mrjg4+eUqKeW{vA;+kZ@DObL1Mqw)u zD|J3t+MY4m45F`?UF1;4i_f@WxR=ZV|7dxaGv7()2vL=(C;PvSJsOcF@5Mp$ZRTEd z-Wqdr6j>E&ugo{-MEp5qA)DX0ocSkUY9+=Um*sQx)OCU_me4eBlA2ppDs3S+a`x2t zdCb(FaBrgN#F>)~SXpz8x*X9S!`9I>+>;CY}YgPF)=!wjP@Rmp%d(ucSb$%)|XO?vLFEF{>I){`8elG}LUZVET^0nuK=2fxQW z3Fiu;nTEQ*jhW`-Mr11=+hC!5D9qRTFC(OU!#j~}+wDppDPv9b237gRYIb^*uK7ruAZX72h<)#an>YwwVkW zLa_;P!-!1a%_;8%;4PtxL@N8Or&{xwx{LbPhOf%%P|WwQN`e79+d!3NiL zD2Ol^4%b0g5>;_cnhyjC>*SAp!Re7abJZUBe`PmWn{}<GCFBs0*qt!xflp@|kO@dreP^z2^T6{RoyU;DVFrO~^~Vv|yf09~42DCz zx(*0*kzWqO55FVgmkm~he(QNn%r*c!G-Ey+csD=k1cuf?2Lb|*3CmaN z#z%JyQ*I^3|D%@h$i&d8AR8HH-PMDMv^WYlde$VCCWG+{7fUhBq8^d7jkF~l5Q|niA{|t za<}dQY23QET^+UnMy_Ay`B;_BEPsi#g8yHf+;LD|SnzzZT53vVU~a2MbZA{YnqK`g zVN()U485=kb750Ld#Fm62{Z^!iKYkfxZL!A**o|6xUTBn&x~G{B{}0HPC^`#8O8BS zk!4;91d{lqB!LD<+EQNO*>Vfmw8eo23KSG@h)4_wFd&El5ds;A11h*6sG*{yMS!6y z7$P8m02f4SK+pgJOu66RTKmk|XEd5TxNz?umwfPO=A8XI`>eh8dhNB>?k})IUO%41 zoAI{-Rg@4WmV&d(ct20dq5MrW6~bA{!-O;5coiB|3*1dsaAl>-Wm9)OlFL$q6^p@R zzvz4kpjc4sEp;HDxsCKqcm|n$DR-94@os3)Q>EK-%y|&*y`ry00wWe2x%+QJUen!r z8_64U@_O7B=4IrL@iOvN;+H|m%iQYHzW?k+qMQHEzDV(IzKB!?YpezGQ&AFOTy%UW zucE`t<5%H6qNf@c)Jc9|q86>+$L9X#R74WeHwjrmjI6pqqLQ4}QhD6t0_zGl^}N*A zV}ry>jUNqGYUGmWXI8KjdOrre0w@;p4R){Dx!6MeMGcO{ZJ3A_=EeEozm7D_tLOJB z`YUO!j#fH*zEfW{qpw~3B#CRQ-27qDLam-)v1IEGvaCH)sPz?YS6G8cVdqZ#schW= zE7~H3D=uX5CtWV%tS&8=Vcb}xFxx~6jITSu^7u6hatPH#keqk0Eig!wjkoeOkcuqi z&<^86`1_p4{71h>oVUiaXP`1*&=nedj!g)UM0 z3ord`PNtWUUEsC#{V6@^mG3OQ@{LkyWqn=A6;N2_hUMis!fP;kntmiRJft7pr$Bzn zD=w2UEKet_ph|ElrUeZYw+V}=HWIxzRoYqbNEc6$Ry#V9m)iPisnQ!QMAg?%JL?S5 zZ2hzpY*%N9dUuVx)lUp?*h7!Oe<*2aSwHntcPbqgH&*|AxRC5JnlxsttHarHS~W@9 zw~GuSEN}Thu#|UcUmjwE$G_}NFTCtJ zulDLECYhtcZg0F}Nonh_dI?*W=hI`AtlzxcWXaxc5@9FoiudSDdqf2`-N+BvV81Su zU)82XeOb4>VtdcqzSv$m$F~U2)nD>(VLPEk@u9?kA7gZ!e=@mrp`EoKK8Pv~z%e-g zRV@fqPvZ*|>OUr<(fRzeZAbM?KJLH0TIFMbihauCBH~zG%O{KFC(xlI8@5*m`8+hZ zz1q*`9N+JqjQyLPGhf}3OoqRPlG*Ur)?_jKb)RHx#_zjtG8z85Uosv3x_>es{(3;N z82&noCwye%_9~HuWEY=j^LgXI_No>kzryD_K0mW{d-Xwlewxp7_&i9`OzDd18T>hM zrv15|KWkg;&kg!>v;8Szk8HI+H??gmp))HLv{FK*0_5ZS2@GN-OaRK!91O)KSx~8* zR?nT|^vHiFIh{HA|Nr~HK>-G1z;e5(^8V>!_Y*EZcFoEsTz=dMJx{p&_!Ijsf7<28 z@c&hpmoM*Hapu5=jhi-K^rqsQiWfiZsZV+Glh1zONoPIi4s!hhBKz`P-7GKIKs(#YbLx*&qG!pL9I&iI+U$oCiPT z+~-~K+?{{*oWI^ze0lL@#jA?L#h1LZctdgU#eet07yZriul(B=++2L(tj78()Wt`?mDA_Z=wm8#3Y11cRg*A1x=M zw#DLR(?eM1G+;%=8XO$*2JAHCyiZM?j)a{HYB4=@(b{tga~n#UCfVF*OIWGfy%m8* z<&F_PC2-%_-wKkiYEMts( zo?Or|654}>kxW>N-$j9{^w=HbwJW0xKl16OC0I zJJ8rWUs|s7a$-0E3Tl7&9#Dq$v3pJ#YkYSpL!%~?NugR&=DnhfYGDM*SR*#7^9oHS z0SmZoT3&`SAA&M>gUdg!=jb?u`g1g$YGmd3G*g^wT<9#@fc_6c$0G79=Xw%2ZVrAl zQ3*$>K}hywp!@}w%_Zj2Kd1|{M)c|WA!IynQ)!AL z$7X~vV;CPajJbd@7%+|qgDve$cEgy@W6XiEAPkv;9lakgjNvx#$C0Z*c-r3Q0;o>tOlIfF(EbJDCHRs^f}_omPJ1Y8Hdd z3#x^ue{T+sE+(ISZ$zX0^)5_V7bfyusL=&ZQfEY>F6jJpz0EhSX7JkUZ^QZI?S1dl z+sql%Ode_)C=L;afemV(y^W5k7S!+&mR`H-PLM&B>+S38lK0%Ee)M!TzY6QP*OhRI;Lo|M`LDd{8qj7uk*00mSj0yG& z^=6??8R|qHwFc^>P$v!bJ%&0RI(Wmszy!o80uvk()O(F=1~rq1ng(iCP_qW*=EZz~ zx+xEJhoIhLWa~7t<)P+*S`^fxLEYqJ<4%$7{kP_NwWkxPnvu<*MmzIl>x67tLLBQ9 z+1~AB3s6_hLaXcr_Y3MKqm@BTqSdwgb5MJ_fV$CWWl*DC zd0KTrD^{(OF)E05^xojKa;S{O3dzsDVk3~snf*e2r;*K2C-SH@P$z{tX{hgTvMoh@ z?e#h7bx5c;7zGV=CXYG|>a0*_4RzcpxD@r{hpvjJ!yQ6>hY^#Q9P>4gIuGiiP!|pL z?M_VYgqSe}K4L}BnYu1%_uvQgjT>DJb+jJpL%wb*jgbF4rhv|v1*YESbX|&i)jMAv z_jSKe-)?j@)QLQ54b(}YP8#Z4ovuq!4<8(H;pcrlB-FPVT@7_6k2($NtWakSb+6NP zDeC0yxzV~qsBbm8QZ^d8=27QCT@>n~pf38{HPq4WJim5B zSLIcVaSUch?^~R%OHn`cYf#yC5JqdiP_H+-8tOzIwFc^>P$v!b%}&>)sMkpoWT=OP z`WBa3xTIbD~ce)=Dvt5lVb)*V89v(a^>(KU}c59*>&7Y+4IPFL=1 zpzFIvP&HX!_pAhU%;;*Uqbu`tT?t)Rimn`}+R^()r|VMG|NL4Gb-z&GWOOyui9Biz zRAtB{lZN^RP%(&~j)T9fU2Hp!v5dKa>Ny)rma#x)pefm^gr3Y?5<1Yi6g#=iY?VWU zbmNWc#w1xe7)^t@mzy5MI3uzthl-}1q1BBT^4@Aa&;K%+aGbFHQ}6A6&}MyAN9f)9i%EC|7#H-ZovBy1@{eQo90x}|PipQmC>r*5!_ zi2@t*>u~?h;QrC|%J`-yXAsnu4P}1At?PluZN@#?0$fo*NA$0OMw<=@}&Zvj3N6(bc ztihfl;2Otr!0@gr5?hGEFI*7an|1OC<1g@OeKOWt0G}66%oD=>$bVq6>a4Y_M}Zbx zyo(RmD;D7av+nWF%HtnXtZEDvS@r=0$L{I_4hJ$%>p{H1Rvs`XGS5NgM(3|iiLd5a zbH*k&fs#h%nO-SOo7{U$WXy6WENs45`D@Nak`Zsv+MMlgEX5L_4Z!n7i zMlM)@+0_FKnaIqB9x>x^Pp&tMz>M_p`v|qHz#RBc9p+ZSyiPDP1~Zk1nFMB9Fw+Jz>a+KM=z-xBXcuXAePn1uACelNPj8++08#Q2`t*uEuLek> z*yZf9(m0jBVM7ISnR&HYMAaH?e|IbsU!!Nwo1kbS6D_>z!YP%cCY-Dl@>)F`$n{vl z$zrNV(*2s;#0!z)Ew$_Mux(N?$acTN>8|mF!86aJkXdl^1yhrLl6h$o& z0l$&8oAM9qa?+5PuGBP2Wmpq=4zKlMYzS-8uyz~PbOTmfz&a$XR|#v*ux9dD(_qaCYu2!?GpzXr ztU|!LLs+|oHQXn*S;%9}gS9BEMZ>z*utvBue#c_K+S3Qtb;24qtkJ&QoEYiDOzY$7 zSf6I`E1h1Bg(~`%(@7ZD8l4PdB9Bo6V^SEChB4xF3K$&$S7;i9A-`fCPU)L%4_4p0-d*+{F2h@B3^p7GwO|7N8oigx((W=hG*Yc}6)&vyJOJ5|Zswstd`VL8k}XAAHo#Xt zMnFCa?O#Ja&c9z>PBhz4ED(2Z-3C@?Wj&g5l{)yY8b|AJS>tBS>Z?>gQ?va0AY!QN z*pz~!tN49%l?2e$#uDS)32P@cS?$H-+V9~dr*ZgwtC$3@5D!cmUd`$}*2Bo-)S{jc z-h|;fi%kW*{rRxpK|x(Ds9A%W&O=QBH6y4QgL=7jP4&0cwKr2^FexRn;ObpFBD`I~ zqxdBM&gbz|CVN4694Nu7z6?BBOfI5W-IlIxF%8=LU4qGJKrc&Tl@LlSS8qwG7hCLG zdf!D*XEJ`+qVL5J1dwk^=5Zjfn_Q{iY7^i3?M_`2Ypb>y#$dwZt4Pc6Znz}zw6f;2 z!Hx++iCJdu2(wJa5$C^cOAPY#ubbN*UAXPA&VVL>w;T; zO{OWliKkAhMk~jfNgRv*osnZ{7nr#%J)Xvyn>K1W-K)_eJ`KEIjSeptmg~`Sv)|$X zjqCb#oXiLFS;3JJsRs4wEbuwhID3^pqo#sOhmC5&mqn9O5LfH5VEDZ_Yam?@nt=Z8%scWJ{Rib%4uXSG`_!q@ODtLNnas_Pl+>^ z>+b$D!{ktRC(gu}k<`_AN=D4fq56;dfcYfavb4usXgXm0-q>M&x|5-?9=oL$4~-dQ z)`ONFw`?5V06n{eFD-9N2zuArmY`wuu88Cuq3toKoh3tK7m(8}rQ-TX~`kYaO^Vsj?NUSd+LdG-czk*bsvu}atA!K}CW zoF%Q9taxc%R3Pj|&rJ5g5T2un&(U=e|CBEZJ_$7BRZ~50QEg*RmwHX2+S2nDyfqSJhF4e9&>ar#mq=Zs?+Bo0|~1E@tLN9A*L4?~S`5)?S3Q zn|Y)r#@dabaIIbwMe<#=yR}dyr}|v*nsf^sL*+iK+XN$JwF16=AH%%BoEa|j`*2wn z$ls|4qugi)h*kD3f6uLuGD7ktT&)=lr#wb5rBTXejSFVnU|uYki8_p`ga?=dg84hH zP8-Z*9%cfVDZxw`%!>pw6JUyQZ*CXNi@7>)Ftd4>8DQoFGiNX_6wE>$ChE;m!Mup8 zBdd)zd6)%YhF5cSc(rKrw?-Ro6m3eFXY)>&)nL4kyK06pwmMH34jLL4#<*d;zz7pC zw7BOO2ZZsr+%;_&lX;8@Fs6htWf;#ls?_^Y3>dcy;|1I`Zy2+Aj2STIgfV9rR~kv` z{YWv63gh|QHFB)cBag8F1}6qUk7GrTzlr=QpFG*h7d|mz3-e;M>T^p+KhBH=gZZ2r zEa6CnbhoS8LWLMM)m&M9aI(@4jpZ_u2TIdn^imq4-HTVH{$aQcS(dR>q|F2jqWADl z%+i6XLLZXEn2BwY@(Mf?HNc@_=UwSa=4f|v{?6q5i`Mc%$h4D!oNSzZaPd}d%9hfY zUf;J7wq>;ECvDZ0l~b6SWGgAVb#?=*n#J;e)-3~#ONBGxMj??t_v+J2qEsH7lvUS} zV#saD9;4p#eh1Wh$X%e`mH$_$cjcW@PaG+#jR)%Oi>UVyr{0aC-i=1R=LYK82WJPi zbX17VYm=Q7K%#P3V&UEDX8tmn$yxBdVR>2;1ZLSsG}4wD^hs&CCo+fmlVs#|=G;sW zsFd)|_;p4b6e4LcUb0Tr)lD*a75qk5)5HmG*f-<^Q zg1(8RNlu1G%9e{8s5N1OXu`_Z4G}$EBW1!ZF*BEO;p^8lHAmMgLZIXXx#>28)H{|a zcrI5gRRN1pNL=qMzkzU_8ZKHxEL*sV%bn%xSD4B=Cc!cpEPQ4Qa)FX3_?gs`+qu@` zC+4wHT}=--FWF6CyhA9Q|*b>FFFFkRKxB;oK zRi-yD#_X}xDZbYutoyaVD*#W3!hwtdY9uLXVIvu(AuDhE1Tn0cff z)YbHbgz1)KCDiZjh9mgL*k zAipM&N^tp7tvG>X8Y-Ihd`|DPgy5pJ#q?=d7L!81Q+bO+>hczsG&giL2n#}3Fob6s9o9Gw!)eZC^DA8a8OoLv|J6k$hwZ zeTc+IUvvq;Eq4mXqw)+(#eyru5EiRti))ujnhjQa6Q_{6EXWfh6*a%0T~$Hbc9pv1 zsD<)(Qb4K=c-_Ir)od2t zJK|Fl`atJ}AcOwsRkbpLJp@LE7#EU$G)ts6C>Z^C)pDZnhw5&!S6C18g3-9C+~h*| z%Wb&Bs5d<5EA>~`C5q^9;vh$YVOny?ZuwX36kU3p1b`N;rI%*$ncXP2M{n(YvaZSl z=qTSz{pQ}M=({P~kWTsR^6Oxm;P;zmmbx%LD*v3`;Ppsp{+SKuU{%q1iht}HsMxs- zVd|qQeeX9{c|}vKW@MBXl%1ncK72nm)PJs@0sc7CEvqc~$UHxIo2Ck5oqH1})$`P8 zOhPMWrn?w03cYMp9FkF~jNC+Znbn1YDI0cd`le}UELv+_aD?t*la+?`X`0;W&)Pno+nGK7C9_L}DcQ;L5xcW-`37S)0-i<1#vajNlC)ygInx(DG0tOb!Nh%(UX|A8%xhj?vKDh*mZe! zAku^^!Olyyg2#3n0qn2|GzIQE~;_mqJ%hiCB zx9Jx2yZBF8-rgdshV!3>S#d>;Y0$J4}q zH(5@8Xa8z@HltAYKn^O*sm86XX+@P3bn0UY)D22CP^ERB1Re0~Pc%DdYf&h$ELDe>(E{nL=BBhG)E`HHZ!F!Y zGk1fKzHW+^$9tcZApI{ zw)#BPm`Hrs39QVvcsq{u&DbGMA4|!_Yw4A7z12Ak#b_-!3LO|nF3TvZn0`du%Ku$o z-3dc(9y{>($$~dEZ#DvDN(<<2RY1BZ|6@tdn~p z3&y0jL~LyfvP`l&tJ@XobxJI80VS&sDu8h~ot3T70TFF1h-h~ng5N9v2+mikI`b8p zmG6LahKeIfihso$km)Dt*N43J?0kRN64x-+o=}%Q>abzv9AZZTC za?<=MS{h1T$==lTXyi2K>i>WKze52^mXrH-uvk!etB%gD72PX)diz!#Q(k@SamTMY z;lz{b1+G-zYQ2hM4V*c+WoYYt?t8!cKj5shANU|uu2MxSRkph5;)gx_4=;JdBOhgj ztsecSk9q9l9{+^PpZKIFKjo=U`?EiP`v3ZiXFT&++jl(sFaPSVpR@Cd=RWUmu6+Ir z{`Q40dhy@A{!?+P_=(~_7e86NwfHZ^e=UBhI9>d7 z@iWEG77ubPtm>EGP<)m@SHRK-|Lbaf{S06G`1+}uUHx>pdc(e#>*_RLySVzvk*oCe zQ+(B)IsW|DaO0O8=3n?KnD0Bxt>Nm;4)aOA3g)v8^Ur+Mo;hGX5pJAxm?^#rX8tC7 z$UlXvZ+4iE^Hne(beNCvwVSWs_pcO%N{-{}x87}S|1n(sCR>Vm5EZBF>X*EOl!&sn zD!##0syo@$clvWFX=GR5evQ30b$smV=Ny^BJ61@9b`-3!{(jZlQG3Q}aBzhZGIr%7 z{yZwcSl2)O1?vq3Tt-SITZA9kl(@SM7O}&PZ1A^`L*H{6%siU%&NfA9s!>3}65{DPRS)dS2mK1= z3jBFKaMT&Fq<(IsIDn<9^Cvs{?WT9!X1!o5zP-?A|JC@0J@iRC&zgO=@hO|%lM-LQ z{dW7xu64WmBj+2on=ADo`S$k=ihbgV-tVn`GdoM|>bw0bJ4CJRUbfBC4R(4aWp4hw ze`UvJatvR;>0jA-X;lB}v=Ng1lgSVE+C$g@nS9ItWZPMy=Y8rw?8r1_vDhTlHC4Nmi|M~WJ z-tGT+rS<5=oEh=ocGruX?|{-w9LH_7ljXKZtHaGZ;dIm>i zf%i$j_7{HV&wpa={+IW0>R*g{Pj{+b^BwEQpZm9)K4#bc%;)eC=e4K#6K?u@yY^JS zbK1W>#lL;tQJ(B9`&Iw;B&X2(ziUr;qQCct{M+UJ?MvS4C;0Q`{M+Nb*Y9vC@Hl_^ z4_yd6*7@Wox7oWt#^3f`{=7eRe)z`;>*u3;l3xuQ$NY)oz261@AN#krec2HH$T{t8 zHRFuSyjP#_J1_MoPaQJ6NBOtgT*yDt+a3LkQT7pj?K&4Am-sb##*FYEI@J$-$-X_@ z$NMXXjV2H4Nls$4_WzHeT!TxRc zC+yofj`9uXoCo>ahQu~F``Yh+}FW+Eq`vCuTgZJhB-p|+jxZW@H z+TZWK&MP1E=iSG@{hNQ=>fip!+YLE){Ig%%;@>{t*9QI0?<`th&h&4ubO|%y@Ap%m z%bWe$kH2qYw8?q;z)!8+Mt}F3ON0&nem{Gaaou|7iLtNPoo9HvnOE4gb^dMsSB81I z&!KlZ*Re7$rT2lG>{``({heu}62_-=>$J<-)BJfi{oI~#s-qlszP@))asqdLeZO(t zDgNy>-?VRQod-YXYT#u5cFULSdH3=e@Mbow!Mi6p*S+#;d%}rf=CFy6YbW@fSNpd$ ze&?rl*|p=npHnW2kMldP_ix8K%1tx&Mys7l-*jW8?DTxEk2HrxNgp2hrQNy8zkTv! z*0Vn6iT617^!l|AIA`_v8@*|}VRyO8+KPNr zVj1|%enJusYFI~;uX;S;r;dyI+llNG-sbNeEFy415>Ws$klFXMSx4L1N}KuKgciIm ztP(1Gtw-@NR(0{zEwrVG(%yf{Z<2fpdEhIPgG5I-agn*ovF1%G}PvWK- zLz~K@O@cNpv}r@TTxeqf?JHj7@7<^<0(6dcqtKqnO%sMTo<|!4ttPaZp|M!W`}X@w z?tg`qRh0ig$j#Aq3GH%j8a1@xJlbMEkr$zj7#gcaa7*hfJYrG=HK3{Q@CfUm@doE7 zfOn^v9E7Yq7){VB7rszy!Y~%vA`+{toK58BSYA*X4)G|gg+p|^ueUY{qS|_Eh+SEL zO|h)T7}h&925YiN-4p1TMbK=yOKcaX@#uL>kCtJpsjER;l}A)qh>&YDtd{k18!OW2 z`3sCUOj}<*;+?#l@q9nO&-aV_ZOzW<)nZ7q92_I&<7rN)tU37b0K`*roTAFj0#9nq z$QfxExm1$a>oJD4&5|Cr7-8apn3vX&B}w*)U{M{}$J-Dfd$iCIx<)+qJZ7EGTO-?GS_LV3h@k);jXpDx8%0)WnqYt8)x?}+$1 z4aXXScWMm{m*3n^d@$mE-QJ4(#mP)Ci$XSXvf7H1LpWR$jbDfyD+bD6L@kpyXkem3 z!jGfdB10o52dtJm*cyvB(FTzpohoZ<*~Tq!S;`wat?@iX!&mWGUWI>=Kv9NUcy!Ag zg+_i*o0;V%(X?1&+F0W;#u~oEofv;C_h1Ba5|~=gE~_l9QnswVj?RU&c)t27@@s_C z?H0C=^Rc3{Z{y^(U@6-l%S)xZ)f%K9x02%!NkL|Wh9O>y0`pFo?#`s(hb0B0k$$Xo z9&Rw+X&mpz+Lou$N{@uGwYF!=LsNk`FP$_BPc#D7u) zJ81*^r&60H9q(R_!eeaY?tYHKPk*P2@}@2%3?$n9Mr-n z=9F{=CGb5Zp{W^?%R~GVTDLYsP_oOKA!_Z$W6s>r&v#~S$Ujim8_f;1MVz->vTob} z71}Jf9oYyn)PJ5^mA0&X^?#toB4gfqOsc`r_S6&QmkKRX`yzR(T15Gae}dvu|E2O` zeH8lA((rGzGt_2cIXj~!byPET^hYf?Lj2&2Am>Glz$c90WkE2~a#jhpL}nV8P5;NV zZe`QUMwwRD!V*hcEoC4EOB=gi2I7bk@yxczK3}qBEwouqX6bc(TE8F zpCB4BezANF`e9u4zJ}sVYB9bXmB&QoF{AQj_fYHn(LEGa9*U*Y<*Fd2PfI>ZF;_hT=?%%B|F7E+8|^#WfXDDHhSNo zG^)#Z(5P)-;xm-#_C0xfRDX!e>f0#0me{iGIvw2=m5LI8GAw;xRaRm|zq^mki__v) zu*y(zPx{Q7Da+BUHg9HU5vvG?S8l?a3forjylqdbd|Bcufag;V&D1FWhC>-VRDKx| z7P=eNqp}X=!dHt`5BR4+2_tzu%y2~8QJJ@vjVS$|C(rHLflbeAQ#M`mM8BOSpug<7|zfvr4BG~$!L2%EUg>q(GO z{1J^VT|%Qv8!A8zH>DSb1+;}x;pDW)vF#nx`?M}9L_Ei-P$#oRbF5)gF0`UNTo##C z1Z(Pyz=g7#;*hc+w4u6@5B0kUGUNnmnt&OXg}OB3axbToRfTFsFPPknrY4)^7OR@Y zqjs5_qJ;9c#VS%nHr0sPRF9}f$XW_6OF|9>hq8qCY53F%W;v(AXlk%p8(%=Q= z!xy`%gdVZY+sp6l3B2LcG~I@6@z+Pvx7LmiQh3?b2ep>dC7K6YJ2*mJaokn6wQz(= z+Y}`oEQ}8&rHij1o0D8p$q+4n4Hc(0U!?3#j(GGFpO|(gkBth{KuM#8RxC+O0K*v9 zVy5F-b%$*4ecI8?Wsyo1t8G(R>UlMkQxb||c?w;}n@umZ4Z~fq`_Z(#-qgL3stGMW{W9RexI*uG363&tSf60(p&Pn z;^QiVrlMFGuP+Sr!?dFW*(LtjWA4h!^{dD;B}IjP2&yO5>BiHK;OCY+olM)zVTwR| z^3>niQ^jjEMGMPmZ`gdgb9%O6^M|nE_;|go8OQ6k795XqPR}n3)4Z?F$5tZGW{jCG z&WUU^OL;`~c20j)F(qKv9vPVK|HEA64M=mK%_DGGnW}Kw0Nt}8= zZNBL})A^`d5@j*1_S4q>;c7pYYk#BK{~l2ndHO zAUuo<3tWH_mE1ebf-qm<_xXxM!9_vg*+$UjE9L1v0OkKnWodHOr2#zCg$Va5Mz#Tf z@wIIvvWn9kTAHMBIdZ+%q-8g9QDELFTgJ#TwwD@eW(B2$ zdP?W2*}xt9KDFjkAoOE1pq9b4 zujhfhk6C#`^#!I-FPEkQOqwG$W0$!pHsL1W=xK5Ev~l!>fun&}(oieiR<8ji&PLPd zLsZJEJ9%aO?2$nfSp^{JJ8hQfEZ@{kuR4PzCuwOnx$|nQdl6uFg>`clpBlAhU0Cvn zTrXI-t_KNr z*1wqF=;2*weUsXVjkKeY>Jlj?mYV}LF?r3H{GoNR&73ijk(p+}@>-Vh^YXm3O|L{} z?zRxudynRX_{j1?TrYN4WDOMvh>)CgUnVC)MRs#>i7XUSp<0B@=$L%sV#d3TEIX)&9 zA2SyJgL^2)yD~Yxw5%I~Tb*?|PLQqL1UPbB;v9EFeoPZN{!o6tE8LqN-C|yj8nxzL zN}%2`;BuTDzn(#b?#an`e`i~ab9X2jlnO%}IG7k8?kIsg}9W!S1@2!467?!!V;6DuNC#H$xXsQ@hjT99;1K zfR@pF&z%;#3bUsz>niBQ?#evPX&E(YJx}k7mJ!O`)-r0;dbTd3Wz?{Rmf`Z9Y8gQ^ z(=rducs^(0>wf0ZUALD~phguFfbKdcKXi$T5d=)3f0w!nv!{vk7yR;RITbT|8UjfC zJ#YM-=*CHIMD&sb|1EY+l>f)$0H-1IF5v2n!A#{~N|V4$3ufA2wh3me4)Y^6>jan^ z1(R@f!eGYpFk`^f1XDAZ^KHVO>b)t%z1bxg*=eH&Gn|K6Je8P$U`7n)Ji*M?VWQp~ zJ{6erxw^nrNOx*Xo6;;W^Qufdf2!vGxxmz8gku}-qBCVNf^IDL>^R?X^OH04bbLeq zY9gl_N_wFh7jxC~m_idg%o4IEHssgRwyE|MjiygU96Y#)th}JWg3GBc4iqdxtEFp) zTHeyTF7h+)lr3+eKM6pE8XG)rGJV`+!{gL?YC}+#ZZdHDuAqpA^{F`L231*@-XOI+ z>}pxHOJhH2xAd#fh854EiwBE(`O-_#Q|bS=rCa$GXFo8IbnjSID*Aa1cxxzZwflTg zS0^^2nvxRRWq_Q4fKbH9Xq9D=RSTPvqFFNIrk@rzK}H4YXEyZrlB9YI6{Xr z@--{;^Ns4>CH^+!1Q!-Jfj9=DVuj!8sdnDAxOrXFswL};sp0;SUWC%s(;vU}jdJ{;KyWMrTTh!j4R!IpqfwSvA9 zhZTALt(XHEW3}RW+np1Wtvi#7Q%uf%r}${FusD#kU))y$f}lG?X5$^!MXdIx;#6}M zx1U*BBh;c>3@XM_H}?_aqBv|Ep;v1p<{b)I%kt4Dtf#h z2Ao~8d%#(AoJHXb^P3jHlg3k~9bkHfZwVV-aerj$$m> z8kEs{f1r~V|G}=wPMi>(RCMU`$_rnS?Pa;xI;JrcpDO+rk6>`^Zw-i(jyNeqt^8{_ zJ-JPY(b;-J)aI`yI0x7H2+lgrtXZ)9re*u&`d5JyRTjF~V%<;(qK(y01WbE$h9gl! z-FQm1(Wm%L3$Mv9l~5LKE3=SDNBKAPS~Vg(LbS4p0{4*4L}_KyrlLtx(R5@h8i0L& zstTwb3sj*~*};X=UVfQPmk}6+0&^K#j5u5L^s?!%StHG@;l@V8X(L(6THJxLc<3=Z z&nb){UF+)>P@wLdTixSrT%0k+X;SxCL0ZcYFuxRE%8&Khqs$F8RPKj$$DEur!fN>+ zTsD!u-s)7l=}&AcHt$O0FNMqpz+0vn?d zsBLBhj4lZDk46W}2K-pKG_Z-4ulQzMx~&|Js>SNtEWcYPDl(oZ5<`^gNCG z9Z1#Ev>{VSI`YN|Gh3cVN+}a2RAGv_S}IMRoirhGX~r~SGNTv?@BV z@_e|u2iGz?=RKet2`F=pGA9%j+*O(GMYaSXl+9NBCQnxs#9U z9lJ7_vW<5%y}-X15YfA8*g@MH&_*3?RA^&DqZ)V47sj&ev~F4#X}K@79QT&vxzgEh zQ9mNtcBlPlSw=f9#7#c}BhX1jDDrF&^>He!>dAh9>YG$t^K-{arrfHvbK`qUT0wT0 zrG0z)^MWn$%w|bm!IQ%^i*szIN&A`$R9FKr8YcaLHBk(xM*t^*c);~n_=$?hSQ>BFA95@-;!Yrr$%)tX75I@WFjH- za8E!PakFVeW|Nhr90jFORT`srmj5#?u4`ty#%Du3J{!XLNNqu>gQr$dy_@hv&c-P@ zgFERXjZ)Q`95j#WA-bKY{L>|!#TTk2&dA)oWvV#585a72Z)KL-)D}y8)Np7BY{e$T zJMqI!3;Ft#rHOGyMM$n>mc`Ovvus3G3fc$qyFbPNI;npZFaD>Mm#2~Ec5d#0QE z?3fPS?m20MJmQCqaSTmuvvC_^A~u%l&&e3Ig`ZoR(%)^U1br?Xp(WIeBLYfGLpq-H zx?j;?YW-P01oOCjv^)0H4r1n}o|D(=&15zKN^?yRI~5=d&9RSQ`TxIKw|8xmKsP@C#Z>!1;sDHIVV2rw%b z3a(|f^6J8qt*VNjIca#l7%9AHUKQ9d9SL?WiK^0;ShwhM*}0@%Cclx`X|M={h3Ht? zqn5Qe8^uf7E98ETdBWSZtj0O=$_5L=j4$T&=IVKL&aFBI$B5T3JeI!8T^gPRY9E@U zfsBgPi)(LnV+5NJ>8szSnwT`_nSK%f%Mn3JMZ{1Kz_B%7aFNVLR>?zTsLRioZ_i_R z45h(#6^`6gxhIEyIaaQmSALm=04#;puA;VS8+5bfzv|~v529qTwrmSdtechpIy~UK ziCbo!Zp?p`jF{f4O-?m+9i^6h?t1DxL>`o$s89)B`RovewuR}r;`aG_=h zWBG{MC=s=BA&eWsIYL;twC7{cPcbCPl4-Vjs>!-nu6AokWaVkWO!&OoiUVmvt3vtAzIl1i zMg@I;L>{6j5_AEj<YuVzJ@CVr}JJHeGg$zwW257{W-NzlPzjQ6Y>P z!hM7=w>Fg=j&Nix2>0c}A{SuxwYj-6x0VR^T7F+xD>mOMgsFf~2z@vxg!^z|))1!i z2vZ=;2w}z$hK$Es=Tn7)6uU$8Um-<;*KYF?poRC#KWOJikAq8FZYcj!fkk+Fw`>@W zM!^s?PV$0H8#y<@O0-DfVH7S~yy-fXQd*h$;L<*rVf-sE@CTi#4$)K2^_?un1#fC7 z!K8LF`82=GFt=c~O!_WABe1sCZ2%@pY{SZo8;4p$MN9k=%p_D*N?97zm3y$KQ_sI+xF%DcjIY-lllacpoKWy#KRQs7+``guCL4bLFrc3$uGqj)MImz6~ zngauBKM~r$!k&LX?PZZoTl>j;`w7}lsr{6-->mkdx%PY2et@5A)_yGCew6m(YCmr6 zH>rI(AH-aD@gNa~m6|DczevY5C$kLHj)V*x@_+Uq5mEEE+Oc>Il=*!Y>0tPv`O~X} zVwY3S{VXq_kOm7KRov{sLVHz1CWWdq^fV>W-IhCK!>D+n#jq^xcN4d^+L&%qZeqEU zlGGYiZPknLrE6Ql{sWzTR6UoAhyW9jOmyv-=NC+17mem`{#b5RUnT%IGw`bZpQ%Q; z)SOGr^@jAZGgI}L-4VxnG}e-uIoV7W{?2m>EP}PE_BVgZ;-_1!r1S^0G7-gbVQnoI zjco-W*?Pq}g->s-oWn|dRCKQ+E4rtO+Xx&nfByB= zw^U8p+8caoD>6g^+|txaS8QpjHT-((dhUZ?r})HK){bA}09w^Ul*-}P?HnVjfAe}% zCsk?}72L}Fs;?wZ*#cy=cIzQcWtRcLn{|}Gb%I#u=z5)NpmUnE-YHset}07Ow#3G# zLJ+~3^kyR`QwbQ$1g$x6S)XX?zCA$-qC6etcWKJws;imV55ztg0e}|<%AYZB+4kcO z_|vo>*Fqck^d==JgB@0!EUk{)>S=X4Nw%|o(&j;>)diGk((0zp&~S0PT;efpTg?`b zgra0diJ^EujeegeD6v|fyg(11tP7VvYyGr+v$_Rymxil@SHDjil=L&PcK)qpZf{6~ z!X1gddWGlA>R|3^x=R$<&Iu4Cg3Qy$N#P1sNwlkV00E@_W-0yy7FCf+#swa2jc_Ua zG~xMs345{aFbi1e#$#z-T23Rg@u;(y4+0*V}E|-eP+h|AxqkYJFN}TSvDYW6OaxTUu##VP` z#1MkNa{ho_=7V0@#?mQrub3iS!nB7X$-=#4)~x47mq%>KTcFF!X}8pv9I+TrmXI^D zp}&up-2nIWmA_Wh6b^@*&7iF$)trSfzL*y0-qgRk{C$je@=3(On1UPG2;Q$rL&tU83x~pAn_V!%t zb>Tgy5ny=hv%AZrjENL_#Z1yx&+jN}*OCk->8bzrHL*A>HMs#XDc@ttUcxt>aqmhs zktl~OHwG78>#2Ha`AJ$WO~#1|H$;rZ1S`c!v^y#Zf`n6M4!8>~1 zA0Aip$JO*Wol2Zc@VH<7%qrK`4^3KOF45%koTTC<7sT{5){&#Vu;>hAOf>4VRlZ1JU=UwFm4yyB|+eVncP=?ZwuMb>!DdZnUE7UoX& z*c%QO-U3(eTpPuk5L8BX1=M*z7UPWUxnY?h9tAC`+Rr zW0$nlIHM#%Nn`A`{;i+y!TPr?w2F7V-z-bmY8@kF0!opO-RXGp=vC^U&2yL!Yc5;B z4(zu(%mG_p!u~M_Lvvt+SB>eg!lsI|SQjCzSC`osj^0FPxyik+mb7j<%B9rJ?f@2Ig3X-MT)N0@q6 zY?7u=!CK1A4xM2u-OX4t$X%#CwX0i0NG~Oi`I}EeqOr}Q!NJXP!RyO#*y8qTB{>z!N~ zr`AhMvPLUDB^hRBOOiEM)6!n;m@qxcvK43RJ!EA1dvruYU*cCWPUFyvY{)$;-o%RB z)EV5vdze=Tp3WL^O11S|l&E z(}WQZEA>_Gw9=YYIbv9YH7pw~>{_=SLbwUxXi9RH>b!M8yd~1&{?-Y5V*P;1Tf=4R zH>@|ZnVU{@Z*0qUa=XdyuB?$_PWxBs^X_QKG*IL;+v+g=N>_%FTg_4^UDz(*_mFtm zWBIsA&pCy8%~~eNbI!(P0T9o|1s_h)q-HR|2|4pxxao=97roR7{Bj~aULB304A0f* z%~-LuRs61O+?c3cGOX)%d7r)!ZqgJ-HfM5Nq}e;*L%U~yTUfudVp7=p_jX&Kpqa#% z{ZQUczgTTfB^yb~1690Rj-VU>`0UC9llvcCStwLaPfFV>>-cA|P&h~5E9-^|g$MH& zugt4&=_mU;yLKu$snbtnEc8^)Be(U@M9X&n+gQ2U&Us{A$!iW@ z%XuqG_1^10w(+Bm@8E^ERC{|dVA59mvQ|gFW34(lUe13k(Mm^%&}xWQ9a*b`S*ss= zD~^o0J$u%*I`6BqvXxex9&|#fa+uir;BVPTR?D4eQPm?9}8Ve)E2R1$Khm z2N3el_s-bs=}krta1u>RkllXsy!Q>cc3(g^s|MmMc{Zdwm|@##j-8eJ+x;1p{D!N& zQve&gAN8~MHr`q2#CPG{__oMrdd(Qo$~u61;Pj`$Dn*SSQUl?PONv51j~ zGMX&vDY1fU9|9Pba3J0y+mC;nBpeXuQxg*1_!mbdrw7zh)`7f@<-DcFz{C2KAN@1q z%8rhS9q_Sk`xyyl&7fd}cg7l!rKHMyX!$mJ=0| zx?NtDndVr7L{$|AP!NDzZg`pd?ajz3=Wn(8IZn|~@vvG+BrvPzXf>`@$`iq=CM zy>NBukQ|BN=YdxgK`{h6C4*m~7XGc%3dhvE4Vhik5R5WEf^%3YapAfp1j8hB&E8xx)!E9kroS}q5c-S(Bf1#F9Cc`Fk>YZdZ z?d9avw^Y%(?z}-ZbLtwLxLlAcjqvNe<5kjtbGK4fQ>?iLQ7f!5C@%-=N8M@iir$0e z3idG+cSg~h@ZgsDt#R~bldYBMoz2JRovA({R>}MNDrv{Wuy8r79rS`Hz&3Oj#&6Qy zjcz0I94i#dbL8W2>y$z;)8go9T;4{Nq5Zg~g6vUHnMKps9g&F*W~+h{;*_lK0u-Y{ zWHOW*p&+v@zs7_Pk7!wO;xHxyV08-lQ<`?QWkTxrQ07Z<8vEJRzzSN078P11+%=o* z&Q(U==xBM(^l3AqF%4oPS{nhTWJFJy5xuSlJ2IHj*rf9cfbcnWhW~gM7QpL47=jLp z2+6os3(59iNm<5+uv>{ov+-?;;c*Z`b6bDZ!hN&#&6w*H->_IlmqJzIedeM0d5l$# zU4wvcIh%4NtJDBzQR~;fe(Y)A9p%?CABKvyK)fEI!Xg0=TB1p8m6vI&B^nTBFZVZ6;93ecmsoC<|iU<*QHX zKktI`uc@4Ob;wV*`ghzcZK(QpR)VIJ#z+VLWnzO{_>;9wsin^I5(@QkZ%sjRi=T#$xrWuxel82HbEolj#cQk|xESZVP1~q+NKu9Q;%?qJ1 z5*9ega+#aXO)aC!|Mq2CoA>qt2$H5=jt!SjXrFA#0r(07fhVAn-)cPXoVC>M!FhLe z!UA!Q-_*)fjIKvoTfZZzEzSq~D3;DIh=DBTw?qe?9-hJEXn2MPT{D{?ptsx`(x|FZ zbmqccLvJYv8WmWI-nasAI+Lp@Wey`}HL(;yf-=OA(h{*`sx|7c6y>XR0V)dAYA$lwZ;%V;s5O!;W@QEz zXO~cEdAygaOa#;9XdRPHO*mR0tu`;d*oK9gO?NshvScFiV~!YCC_t){A1v#!$(U4l zNXv<%=ru`fK6l3&=B`?6c1kvBcJkR!0ysBTy1mszv$0~TX}UDhS8Z9$FmcEuZ9T^Y zm5Gn?C`qV8#c9nxZk5C|^{wG`+Xnu$*$6pUB*T=^a+>C;vWRWV9pB7D{`Pfwr=!Bs zO-Q8Hw?o#93e*m2s8r=-{R1fB$)PUF^j!<;=S_G?NrC-GLooHNXO z3v*&A=GFUNPEnRR<^f@9Ki;%qPUbNuz?>51lwqDC%+aNopZX7u+$JL@^mDH;Srtoa zhB=nU90hY+nB#`IR+tMXE_wHF?FEzN#DID9L@>2~Xv8oVPt3jh!insw5azI9o^147 ziur>)=Iz4NZj5ARq7QyF?tyscDgpRY}HEdgqi zp-c{+ujo}?J2M;A+Lfy`Mm<>{U0WVGxwOqZx(%1%Xkitf7^iqooao^S_@CKX7xGL* z<6`DMrYbZuTQl7^XF^{@@j1VY(ASi6C73P#=}py8zDt2m>E9?7RaG&#tSLOlK@%7S zu3{Tyh9D&S-L%S7>Dq9=CT2r2@!Fs}Gi@pmh@oC&ORrF>q(gruP85Gk+huNwoB=Mu z@EY_f8}yUHphLaoG^}|rl1zasa_)+azEncX(QoGEt5M4l&}y3rNrp40e&ee2LP7jP zxo%?|*)%Lb`%zL7{HC0tg_%kn@KfL>Uz=wb%%D?Zx>QEYHUgR+3Nk(A1_mGItuF_s zyx)Q&cwovAi2lszvJuXX$4TcvBU7Bb3=^Hagdf?ghL#k^IZ#M9jRlo+2ZL0B~xA&^fvMG-2v72~%sPvHA;b zWDA;sOVWrfhRRNt5RCz)ylql+iR zab6&+EYRaHs*C+|o)AFlam znI#IeYWs_paYpx>|Qz6tY>8fYRJ5y#j+V7AAn6NUo`Q?Zh>+SS>XVnr-8 zwVu0}Keoq0$G&E>^+k#E>#+RAhnsc3Es@r*94!k34R>>ZaP>qh_z<)aJhvt?oH#yw&@VE zg}5;ZRp{laB2cU?!?Gc=!BQz-dff_Q;*M|WM;=;wyjIqjxO|-z&+$3MXNV#923>72 z?Q(FVk8PElGZ|JaUp>D6J5@9jm$yhcmYXhZtFG~?dd_%gjOaW_%V4<+D4^H!MXn!#kMQ^ zRWDO(>?;bM=hfBSlPEPwIbHAgE}vv+DWQv`tYqRfBzAS^uM)_Xu+^coD5VmRZo?@NXpH3Nfjp5h(6I`S_$%Oe^vbQ=)TqDIrIt~(MW0?&m>)$h89Ra$wCXq zm&j;O$%Zs(S=;O7*IrdlIx}H-f_$}5&TqJAxxNypEh~F{>}%KLEG<*BG-_EdHr`Qk zJWRm^X83*b#w*DhO0Zg1iv2H1^hhL%0vbLtNmpC~w{$!&8ExBn;T1*}OL0>;GI~qi z*`9I`g($&0>w*_mh)7w-zAK4VDabeKNj(|4c-2*0@~$&MA_c+#)$QIh9M)EKO`ZD6 zt`(KdDG~K!(xyR9i&)+`{jz*=(|!O*YhpJp(IP4N?-=l>sxUOSV$}J{t5!jmXp#C5 zSO5*5ITv+G2SG|u-Ed_%npYVM-8_9E?gPN=wyF zh(jymHeKykt|9SK!7lNwWyoI|cjSbvsXFe=>P z#EopMQX!Pt@c1m!wYs!TsrvH+$x5Y0>f9RMU#`N+a|#o(7O+t)<|9AWmt20(9WoHK zKrdUWo`WBE-7@fD2ldTe7c)K>6zl_pnk~sJ(xJ8G5Ai$&+p9do%+gz63aeErV_P)A zFsO%mY+DJy>}cIu$*Q&99_(p(Lc*6=mx!0Ilf;VXIFsfOl55EH@^C$uO)~&BWE+u? zfK7ARVo7fAj%#IIWN7PMy!8y93ieuO5c`CIERgc zhH57t>6U!Ez>MOS`s*|0>&)6`!p~=b-NG7m7R1%;AhAwwpUiz8%J?=Ct3nYbm`;Y9 zS0q_8K@;o_-ItY_5X3ywQ^{uWo<22)pS3kw8GDvav;aR_4N0lIuThK;bD82-%6}Hj zn8`x+DG>vsfTA!>858HnSjeP>6P3D9ajIHa3_wuki~5y;^>{)PXTz4{ zAZIST6fa?@HS`P11ljOZ$dheHB zSnVMJveVYy5RRoo+Dk1z23DPc|1Qu?D=F&&Z)_|ff2nq~#S0BG2sCz(?n=M^l@t)`a@m|(O)1q~?J8@i@O7RN%&{^KrYm1r8kCr71 ze(-JNLPvdmG=pH}j!Cl;Y_`{w?=USl&zVzA?zI?|4xEzGlFf}8q(8moF|3Jxd>Nce>7vHq~Zvuvq76tvA)V!w6(a zk$n=(Jj>FB^qbZu&6Llq8k=3}a*Z>4>Xjis_B}|LC5l|wXWN7t#vu}`*gpQq19~J< zu<7oVNR~FxgmGO3K;0~?OV_3Mxit|U)6~|Egst~0>vtOaZn?{&k{y`c@OYZ9shlLi z!JhES8W#YjOfWB)B;D*N`OwO`b0RM;_v|wF`ouU(i{| zhM%u1JwIEtX&5Iwi2{NqH|m;z*{B;)v#;13t%wI&Fd%jW1A-~6#VGaD7N?N)xm`b> zJWBs41&MBq9t+3nR33Rk$Mlq5TIpd6ws#$QOLc)Ank($aFC^_!oxXp>^(Hk?+F3>& zuz_&xhw&{l0=CmZfx`od@jlAlB4AjKgastHYyn}gFfw3|!ZWi3AWs4bSqN!Ly=5WD z#&Yw%iQ#SLJ+3otpP`hx?#hBmql9XriFH=|*>O<187M%VOmb;w1I- z4_uI33{((Iyd}A{vhieAH7-RSgLdaic( zZ~sTKK99+GR5C!0*&B*Ml=bn8uq>|wk0SY@T11sV)uOg09tGw@Y{J*s@eFx8IdzDM)3?Zic6L zCiOvO=&1RU1uC8x7Np`O8VTm-RyspfBBSXp7*2H19$0|b1)FVVU2-7Lad5oV?b3#N z#+?F)e$vu~=!xI_=0%UZLME)%sQ9<0F;9vnuSm4 zMPZ49V5-O^3rH>3lisP)QBg`tiZr1ei&5-8V6|LkYCPb~vEoCp14_#UrpRvTI~F%e zLC{g7U`P3l_!Y=ih-)Ko!BpWf2wB@2Ee#a-azvkuvV2c-t7T9{(K!HmO*3HiI!^Am zG`l2;zYZAKJikB%uokBpk4tl>*eBZvLL*!CDANM8ThDN`{JK;qTuAUTcCn>7f`2UD zk;H4{9Z5)dCj=8R?$Pf&t>qd^<6hNNsv^&$1HzBdnhE5Iu+iggu25e#GC3hfJ*5UV z3g|84O^pYm^+6ZCL0-TGq?R$K#xs!!eA&yH^~B3zgJ?{!Hr$f3#g)eWTGE%!U^Y0> zo?~5^KTe#KkIdrx;)$AVyl9uXDP=UXJ&wbdR)~G!IIY7`Q?T13Jkb5A%n955XtMcXb=T0>u+I?2V7*=y(TooJD{4&0cVx-0ww#6k~8c2Si)1e{@Q%{^TeB!C) zBk2mDdJ+e_lAko3D&=Jf&C2R2|HMKzN4K~yi`@k12+4gqtW72wMizm*9btd9SxqZN zae~rVMpl6mezKq{Z(%3r0gz_X$XZGob%5%a8e8#io=aEaKMxn{D{RY|J)+IRkb=2& z+FYx{0-`LP9&mssEEOI0(MXu<)hG+;^!3zr+Hz)+o=l;%_{q23$f#tYwLac~2NPh5 z9xRGQ;m72gYLn&7NII|VIlYj~6?XI&*A*&7+`vj9Ib4vK;EyBrNBMPam0~DlUE!XM z+xe$L#h+4XTctQuJhQ)-fCNXEZ>*NM(sowq5tX9+#K+OBm>eoFMIL5<&g#!c^V@V` zdwFziQmpn0VpH4WI2ZXh{O_2@G5OPHZRN&?=vnx2=iXF4Sy_aL+wEbj14P-J>hsCpt%;^d1i* z8I6z22z`{2vU*TIRH&p%d&T5nf!*%zg^ju-#AMg9RYc zZ4#aJTtdqyUnEhGK0@~G5WSb$J66sWK3>kW)%de;9*sW^%+c#$qbx8%U0QL#M2-Vk zON=>yX!=M4^Q~Y6a@@b1VXQt(rG4m3{!>qnj>%Z*oX=W&QwHoxeyIi-*E{pkVP1PV zD_4)4UKhW1^4SKqr_L1^Si{j%5iRjcCtu97eIL|L6^tB~MhUHSMOQGfO9XXe8PVL7 zrlrj2$O6*C%qp`-E}|S=b}PngMT}=nlEy*h8T*BhOKbuvsH`Y~w5YPeM7CwBi%w%_ zW~cQw^t+{%yQJT#>6-Q1Ojl8;W+FQ_U0HhOcLF(azh$~AR$=|d%&~sExzlL65@Lw^ z9a|%J*?6_|^#!vIf&~%9W8}r9mP5ARS-(!Yd6e6;MdA>anORTMcA1-E>uCb(X-d}9 zlvz*hJt6LSt~^33H1~unkc&F~#+6)6;RRB>wwUs>72Xa`w&j~z%68eeB`I8-6rZQR z4-WA+5f5sEl}ZI<8mdoGHf?{pSRzm-sRCYluEHWF6qoF;4O@ncDdUXw_01P|qLeo}|G!%mAh%K~7d za8@`1&C2c7>;c>!-Pkq?3yzBg$BhMvL9m!g;Hn+c64|o_R<;YUr<*k8%Ic~XTkU8Y z4wUF)iS@+o{U=$;oKD&OCvtsFwa-@qjU~05sTKOFVkR>M@^iG#EWk3-kab!8d3=9w zrQ@Rh_O<=Tqlo!?2UjY5Z7=U-HBWj}4hnjSjSindCb zQ+$z+Qo57tP3TU-yVvUsdb5?`8-6`Dphvtun!^l|_DMhBKS^S+nDRv-dtgc3kzHZ+GAS_s)!3qme9W#<=fACL`i0 zX%olR$_}ViShm&}oRll&_sVbM+NJ9G&A47!#_OW?d95)PW|D|RSwtcm5eBb71IR=P zT2O+3Nl>UEQFbS+DkwoohD4ia6Kx`h2wI}IexL8}obKCqW+a1cSa$0nJl(hZ^f|xt zJHPWgzkla<^p_LnYY16T-DRy!3MQbRqu52-#(YTMR8>TQdCK^n8B-<;GoxQi$`cIg zjSQ20fw2l1ek?e<86p^mc+@GRd0e$g*}28(2A*ra9K45jwkjNi@7OA#EAvtt%#^!4 zRgUuWAKp`xcFnhKAnR0yAC-+^P((%2>bI&s%T%l`lcAV&)mrVzMjd|u+;gk0=HFFC zLpLh(vB(mU&vnX?*;fO37!C9xS0qxAL82m{C()bXAxFWV7^#}>2FWOmdKVLMy4BI~ z9$fRZruUPt?-<-e6OetJ4CSr$B}3!MI)|%DqQnl#UbO6WJ^PT3ARM+-%D_11Ots-;~=0)2fNyoFh0*xz##pTMoB9rmY%{YjpsllUGN?OS58>A|9T5s^pE zY2H!C8C}7Q0?-#Et1K{`f9q>K(rPaL1-FF<)HXzF-aAaz792=DQ)-`j59vKjQ|6Ir zyWd!vzDY*WNVKD%&j0#uc1sk5OCu@-3|`B*@r&8f$9|pcQh@Et$bY)_POA;tR|0>Y z;1D2)h5K|KTofFr{WzK1ifXAQP3DNPQe$Q{7t&(+j%9k?@=~0$IN1xFbBi%vCR)FU z<@OHg3dcJTMmZT0uw}+A%Y$x+*p75fgc>&<3GT-0{upo<`F7#O!a<U;u!g(8(g`*9^ES zVt<7|&*D~+^#Z#@Y}Hj3DOjx*DWK)fFHs)WkxsRMx~u8n(yi0QLH&cQs;sGk!?u6a zv|6g?FqOh$xwg1iuC>Jy{t7!>yLhn_xu9Yi$7N*|*3dwShRU$k*T$6_YLzLom5IfE zx9hkQFtUB8=3!GD;c=S-%Lo{ENq$PDk6kM#Xmx z>qrDx%Sw}qRIjBKq@qqmE6-{TGND|mQoyQ~M)5=}PT(MZ!%{L-vhh)&O1Dry1#l!y z5cH`%gzbi3$Bz$6;I2Xgxu93IW&5aPusLRs!XuocUPggIBow`w)l%StHrB$7YBn1{ zqnXCUOr?auM0-qRH+7>yTud~BZ)xb^!nP@uh&qcYr2^ueZCuPchWM|F6p1Az{UTw; za#0yC{_=Z!c(H{mqet*oX0enYj>4 z)Zd>BvfO{fMU9V2rW3ks4MN3A2A$7NYk>R?wg&0>qOezfV^NNOCEwBm9Bc-6;<9s? z4;A-uir6ojj~P|DFp9R1(|j?SdtO;Kqq3%f0FbCVs5uV@a{=7xJD-0#hL!2JtdSJz ziI(N}8L<7q95(qzY$kIRgTn1230(qhc{jhAnbVqSE!N}B?51NZgjJ&Q5s8%4vKu|$ z4>a&MH_4FWG?Rd|-~#cdShx{2;I0~ki!9XYLl|*Y_?7pJTY0h4B0-2zEwJ(yM?4cb zF8L2Qen2R|H3L!VgNuAn9PuW~NB`0hM@YJ&PlviTl{6 z9dY0ogdMjfRx)^V9vlS)sE&iWVgN*T*We8?SSJPD?|q2D8%*?jR-ZsfYQ|y+h#lbf zEX{NWuL(CAvKh@V&5INnra3WCH>7ZeMNG8M524t5obe#($Af$nd#bdwmVJ3Gg9Qe! z?E3kO*Q7lj3|K6J|B~@PB7`wzrpHVLW$h@8|FrsNfebUa6bptX8-FvBoi~ljG%KXy zESt-P*EvHO(aNtgG@qteEB?b$8(l>K7#EQ+llAEvccs4-JbYVtiJ#`45NvrILLo?%zrlHKAX{JNA(#ee5Jqtb2L?O-ei{Mlm4UQ?xW-SXjUK1^3h`FJSVvG zM&0Ks`(gKA=7Coc+9 z1geiWm^Mc)jk}y*Y`_A(4Hm7WH!uYymeP(%xwn?4aDUA%)o&LCO zfgX(>kV57JtvgxrnXOAK4O%5`WR%fd_TFe1+RM34W@)G?UnWcFF^7U{e~QF1i3SMHiNNwkz&;^ z=6=)~8OkToF>ONQ1va!B4V;4%C{Ci|s<}IfPN;I(#5C^5>6M#AwzE<7&+$2=ym)7$ zCeiGTSDQrcL^#Qk>op!`DL=k=8mV@AdRFi-edcwAC8)<|1k@RNym4*?}Si9d60mOS> z=OCWc6*eQ~cZ&OTiV3rLLe+TArpf~{1$wGKND9~5VY*!4e(Npk#&5+;4c*ig#d1ssZ^pqA- zh|Dj+EV)R#61TLNzDKbog>+Jc-D@&r9b2rlTopz%gK4c)v|}Qkh}+3)NMOYcc#wf6 zXdw~!B4{O@Q47+U8bdI1vbp)MUIN-V+~Gy0?V(*_EcZA)=EazWAGW3zbaG*)FD{`q z1;R0Ig?JRza=Bj|Vi_{IXja&%0llVBVHGZ2>?cBGfaj9~2$d}}rQqSH4|oTNWYmKj z07-6>+rs_aru#RjXwpt@ia(Ugx*@;oK4KrpzJ{)qNT5x;lC`i|j@NSQz16U~8~^t_ z{_oO;Kn=Qoa{u<(>BkmWPmW2XI6z@@Qo3@d84=RPTXA4d09y%32~w0_xUg>vzc4Ov zkqic#QvB(r26wGr$GF|lsSLNVqhSES62QHj%?jCWN7xdukM6+CjC+}`Y844|!kx}H zWH|`>sudc;1{umI>`akfZDMvV1I8#|;7XS{$3v1?plScj=(|Z2|JiDevf6FxhVs?xK+QT(gqw4`a9VT%fFk4_WE}%IcS@7#@NU)t_J|_?& z4wk|!Az!=EES?_^&xWA&D`6OEQf!{+M9dOeC)VQDQeAgi_&lx*1gHmF#1OBzW5>Y_ zyKW1KcpzQ^f?Z(6jWjPJra=6eW)dk@B#^2d$?QknV?m~TJ1F?7KJMY+wN0w{v&}Z( zMTCI>PSAR)6qR>JPXggKG+51JLAWQ>WGm~7vWrY9ZwmV`?D3Ge5N20dktsJNWksfJ z;iGbTGi+_t?zV7uGfze}RsS72al0CAOUfHdTRQb_U`pOlOi{X6#>$t}6`4X?8#+}p zE*mVJ`eDe(g{IVdLsQBdqmed~NTDgWa{+&wfuVk@2Sq?R@J2mQTO3g8hpehxHn(er z99RrhMY`w+K%h~)if5@hA4Jt^O>Nl4MW|G|5h|F)6rq9@#v)X>5g&^`SMuTUATLPR z3>AN-)JCKzr`smLceVA$<*+OgnJi6o8a!!;U+xP#P3^zLFKrO8$663HIfiU~{!sBg zjg%-O8NK4uO|mX&;)%gYr8S>QRzXZ5x@09!l&)2Bh(d7LW_%F4g(|D0Uwa;h5xXYi z4=MDYM-AirY`gCp8qsF_saAZxFKK8z<+Xx(1Qp)AhQ&RJ(q-67 z;!jE*XlyKvigPjb8_*}XieP=*8R68KaZmqauQ%5pAbLyM{>y(9$aJ^ z_EEeN|1Mip#)3Z0k*Qu-=R?@#qi?^B~-Ws?`*CGkuvU z4%gEZ51Ha3*(%uPLTH-Ur6%LHQ2|M?K`x{R6>K&qswt4N(*ZkuPc{TbA6S zhIbyh6u!)QTQ^PK7B$Neo@n%ThvVE`>+O}0y{y)+tMk}eBO)-^cc@e#5AUWXNk|xmN52F-`+F5f%?GhJF zde+kBsNI&=!h&XtyHUF>mm*&axh!`VG?=QW9SPMfXiO!oUE1~K)~-tHyPYJgBl?>WpBSR_JWWjBq7yQS%jE@KVQ#=e^O2+;6fX4>v&2Eru1i?BX=Q@n%O^I zBa5-(R{bHL@x(y6R1TsL=Sf=Si_g64DZT(6`}MPX-lbn;B;>%<^tlhKHhEuM8=y-o zwNxEeuDz?&inJXE57>)QNB5g$?S2mX3(P?zqdEVf?6g+FiwL&$-&}s8+9j${5mll^!qf6Y4 z0z+2^k$z3OqFYZ;6XW&>pde8>hzvLfConnSfYN|t``|}tY&3ePZQGh3fy_0VoxsMl z3EEAl7zAtHdAZHV+ypkxTo-S@2ALD|qeOm$y+~+tx|*~BXw(Ql675!``gO-<3(#YP z80CWWcN3T5NFk8qh`Y*21uti$P&^NRFGouKeAy#qzSN!$P!H>ABmpi)lx8u@s9YT=m(=xZ89EJygDM! z&TFj_(^?9`2wuet0u(d_I`Dm@!LM31H%b4}KS|T^M~TS-oolf zZBrE86yDNZ1S~|Ky0*aeu*jB;|5$BOEa97iVTu^NvEUt&(#z>S#ByngLaCzu1c0f+of)WbkYpz7NqhvO0R^w`0q|E_T}^-^{_^IkPe1Lu^VR&HlpfDPUpH1WJMmKgleZ$7ob_17F&b5GH6bCX!MH1>WxdyTXiSw?`$|8 zf5~ukIxHfIMaB9ykFNOcygOA*q3Md+vL|7uBG^S9i%`D3!&5lhge<+r+wq` zSEx&~a*|rC+AHdzlwAG_ui)z-tltR5*yz=1Wjb0yEYne^ZYy_P)MB^Fd4spzO>p7|K=j_Xq z{1xJ6so}4i4pDQal;K{2#5RQqD{ncSx6%js%f>J!?51#GNlP0`^9limZu7#W_+t$f zzPL=`^HI{=UV7#y`;l1qj4?PJX$>7C7WWs2U_96+%cenkw7YhvSc43sh)v}_5d$6t zb!cBn*e`-Oy-d-4rGL|Ix@mgRK3IbA^U|B$ggIv_G#(zVoNHaeNe4vsnbuR^xyf=7 zA`zIuRN7v9VtZ-o$N5e|jPJzrjrB=nv)7p25zDH-fwwL_A3kT#Nsyyfm}ZCHWSRGJ z{?)TdY|P*g9HN{eB30sV+Xx*g7de%KYb-caCffuw_`@*6nsKEa!(2MGt-&YD=^OP= zDmqL{PwTqhIjoX9hIz_gp2_U;#$7vxweqW~78W9Cs4NC6euU7`Y06esOPYs`FYu-8 zqCGs}IC%1iKBikJTaVIH){$fO_c#_DN7?fn@@L~NKUy8qp=np}c zyArvBCIx|)_07oE-~`M?-I?Yp5%YkU4e$5qIZ$6f&YRM`BeP39#8!=CLgnTv8pli4 z@?>Q^{<0C@smh`T1E~PgvZ-%QnzkeADf{l6(dhY7hP=H!B&tP9pG^VAKDF(@*w1Zx zz`ozlU$KRKU?^h?2Mj~Ts*W!|bB=ar&JjOz^e8=P#gYDTQPA|J@MO=#RF&Il?Vki+ zEIpGn_x$U_egzSX$4jCg=;pX;nzNdY*x#dh?@leL&X9tLKQA!cc%IhxXY9+<{1u;? z=h|H#&Xhy}HdCi<8QEih*-;nk+fCu2o@Gb{)yy=@&fNO1r={h`7?L1-kG_L3_%|Zkxz;!23ES;yKa6#T2ii z0x}^@JI;DOfV;&M-;L~5k=31vinb~N>Onw5adQwFunuluyd@H_PH8n@)yw+@?`ZU( zOdh#bXvPnXFss;N?9gERd}KRQMEf#~*y>kdRbXg+ zFF&f0WU0tTGN4mEBbmI^NQSRuB-vN|F&jzI+;=^eOrC3vB}0vp_%eshAp1TI+vk@K z8?fhUeaY1N-eHkG?=39LPcR1ZyPsHS#vi}>a1<;$%g-&w$?ok?(OnGyX=^7a)slaRT)10m*c(YA4yS_8aD*QJ*)PR8-R>swz5!1xFh3Yf87PTxal+yE~Pwfr)cT>+ViN&VQaN z*jD@d%#g$d50`%73_?Fve7n}7GIzkJ{eK;T1WL?yF4Yp+G+f(^mq7<|VAH>Aj{fXa zbA8yMtWw`ZRw+@gb@-9YlC2hQI^tCm}HRV4rcjIl`u0YKnKKSetlRL$ViE%`Stmc zG`Nf3;R?1?v|djUWgplc#Fx6o{FB+9qBdSh`k}8sZR2&FeMzLpU;iqRKb`c8T;gB6 zzLh^ec9iF@OZRdqJReRE{~o704W{qqqUS3F*$ufuz1EjJwiGU5g3* zxfCXKL0n7MK8(pqXYclj4kh_A#c4=(QS_Fr!8^D}3g-)xzPal0KCUW1sjKFLNOM|0 zxMCZrn>C{@Q!w|K$s~FM%&D*|dM|5z2d+6$x!`b+8jzy6AqN;2M}G#`0}ie&?jhb; z8A%flltj3?vSRQT%U2z$^l&^<1VdbJkrMzg9tH}{BkU;1Xg^ztR_qRa6ax+)YPR@_ zdZ%)aIE+wGF0-MMX@nfLeLi?{`?+8et9Z*ch1tYmX2auTT`vI&e{g2kw=-Fi!NXKn znHYb8kDlb?$;1EW#0$?H?r0v??-?fgWrem%E0h)%!RjB4r%&vPU#sn(qr{4ugDGJs zh(rpwh$+R~`JNA6969Tr2cTyW!0-YJC(( zVsDPpQOYGzBQs?O=~qW-$*!ime!)E69^fO}hm+J5@q&Olep5hA%zgaZ))QBL1&9D_ z?c)>{Tk(IauP4~`Z^|HUfG031(oYW_-euXS30N5KthAcm;w*Ql1~KoT7-d@?-2P9e z4nuK|YX7IPRGHkYCH(G&3hGS24#oiWnvF;k)?IO0!k(lA_rdJd^&OHnT?T85ai&X> zdl^hFTCNnGkB`orx$+5gSz`tcYZkFYXtdb)r+)Hqm$r$4 z@(FE46xSz_l8~k$%(e@&`M1*m$(`CAN#zb5$pkUz$gdZJU3YQ`-lAvV3wmtW?n8)@ zR+32jyfIc-uQg8|Ar!g==>v}< zr{r8N7?`rgZs!G>pAUn)*&CObYTIO4FtRoPXGr}i!&LU&oB2dKd&#s%u!@7-Sq%6# z3u?()zn|Aq&~48Fd{9^b!+etWmpceOc>Hw21{BN@dA|edQ#8n zxQ5z7fGel9xaV>)yS&a8AKzo~+|K}o6TxEvV znMt}s>pVP&0vbViU?M5~ke^{%K{7+*9T`#RWF`IralP)H)&cXuBQ-!Eu)h&TR?Z{% z1cbXa7^P(N2ANJ}7D;%>Xv^BdpAB>=etr!@j#+1O(L&E4nPudr4(>~$_2k9e!WR(9 zQiGyG>lkluYRSeMdvrG5*fPOh*-;DBM{7zxRn|i4=+xmu38ad~7j-xz0jiVb1@eis zQUkw7l~r_zF|z6iDKty_p|EK5MRdYG`w;~X7*Za>S?)|+7^U2YnLYnFSfm*2Or)<5 z#XTO9DI4=v^rYxF2Rz4hdc&@CjT2K*p6@a!?FGYfGAE zn=+rwXmlk+VdMA3u#I@YKgwLj)5r8-)+BZ;BVLFI6pm8&EdjNt%rS2NfR@EVt_T8V z-Z@>F84J2%=s%+?tYltSmeEtXB0bFM3R{3_p3rTSw4Puq1h?rPY_H|?gqkzwle2Q; z+|jtQN{9#1__`>4$iU6M%z@j-kO>jfg1>cbV z^|zhwe!WvuS&e-NG%M`StU>P6s@VEx`CGRlLT~YJ-~d*{3>Xq%`I4$24_E}ql8v1| z{?DVipi%En|>1kX>LzrH71%QXQo4dcmVL9Y&R0-jsP- zmKuz`? zX9{j~n@;&|b;w@8L=}wFq7Tel#BMQ!_!rhV0nwzMa_hs_fH2A+1U_bq!N=+=^M<4| zopTd-2yGNlnh>3m@;x9OXg#1!+g-L_3iU@j+-V8=9k?h{`~Jc!KRF-02sLR(}YHkV{R{i^ep=EC3H9 zyJ9j9fv?p4^MaWs~zbI8c?W7P0I@}9Yyvx1zYQG zK%*Q{)Bw_>j-3Uqbivp*__+f3&^VjM8SDw>fS}XTrc*+-1#?~LdejP&YY`w5Z^gCU zwekrZZ}TxmQBOEvSly#4cA^H!3RhJP5Rqk3yB7CW71{?*5tZO?EDPVJP>Nn%!~~P( z@c2Z$Z%yks=OX9R7S2bX4P#ZKC?q^4E?9HTxrflc5Xl^^qNsvT&!i9=87DO+mKgv1 z8p)9pnxGT$d)6dSvDgY?wbRCU$&!o0^@8@WSU{ob)QH=-SVhcX=>pV?2rK0elz1-t z^)RYy9vHZNp3n3h?$qETj*QBYhR`O6y-#@CFxiQKA4mg3oFFnrU8Nu5g3ul+i_w6( z+NqeE;(x>td+4@es5*2UeuZJH78;@=j@NF(!Nl!y%I@TIFn zpIJ!FBUh2G7yn6GyA#DHLjZ}E03k#=?eKW$Hi0QK zunUt$d@*c6maKO1Kl(uF(V|MDi82->>UNN6o&YK4Y?CP4A|qFZt3(0R_jV1+rWjMj zT|HGG{dKF5!xw&li2jT`1XYp;BVW<5h7sZHQU*C%Z3a2^O^ALU{}IM?^LRQ{vduD8 zi%T++q~qGOsCE3fxu z!f96V{IV4n5V&0Y_=3r=AOF9i>vorAPM#leZU!?jt%507&pRwHawc(+RjyUq7JV20 z7_NpyvX&Oe8*Ja^#T%Z!x1UT(W5KF6>s)ff;f^vcj-(uQ&m=pJF5K^9(@PTrs|TWT zrP?Zm1983CSh1q7zrVk>0*isZ?+l=+-qDXiLH=~XmbC8ZA7lWu^=m9J+a`u(qXJp$ zhkV`jN2TJnV2Vitl|fXXEoan%%Ly~2GqIvak{>Yb=R#7$J}#VsGr$EFTUVOM6C>0) z3&8dCgn|9hR$5EJ7On&O!)8m&hfTss3B_*`CrCtnli)T&>6?TsrYMEPTc)UT3Q@8e z@iVPRAMv&J$InrSq*4lPOB-BDtu*FBY(BwtOpLUt#8tvVXMmNSOOq)(uya`HvA@%Q zNE`Om-L@+^4p^*f^+vPR*FUf#UNuCrB{GQIsUedEW>$U1?{Z?KkE8ASQY8M>?n&Xz zb(#cNekKMgK@>%0gz0)prm#X0j$!LV*c~W`K_L9&G17Y-1?_KLt+Zp02(+@V0f9$@ zG@yUfQOTlv>nV;%YT#fkf1B4}k5FX`HCVUo;ODyRWqik(CH^-5jz@iIO$1z5q!)#7 z9s1k65Rdv$(5(<^Cxyu7gzw8Y=WxZNzO->?zt$04vIRsy%YA9neNJWvJ~!j8-R5oZ zs895Y>nd&ad@k`V?Ng=^oNqmEpp3q>UsmvX+TZi0p*IRg0nQ}*$RU=1!n#(z0)T`B z_ocA|aE0C&wWhHzlCBag{h7Q9E8Ci^hb{ZKAt*7YQ%sR(WUz%2p%1L4~ zuwunPe_yNFsMo5Maw!S}O@@YJOBuVtBPfQGkQ$N(NQ9f(xYS*%^1xJ z^^Wyh_G-|ZWcMjHz5g+Q7Eg&p`e z6u_sX-0$rexcc&e=@X90%4^%db*P*Y;-P~8poGH<>puaR!w{K=W`c_?va8FXEmO#Z}}}tz?xs(eh#Tm2%E(j`&q#%7cQ6FFQ=M zzv)8tsaAVc(z1DWQD8bd9&|)jpjZSx$$E()3Ip6aldieDvM&CWSd?&leWwK=fwn3S z8AHPcx4ykX#gDFU4~eC#0Gh={4(WFt>;u~%FnF_ef9LTm9KCTF7=;Xru>=pr01gi>GfnttfvcBzUIoG zU*DCfhI9y4J^jG&cv~yeKCWv;Z~FG!`Zi(Bm|)kp*C7-1xumMApgsI)oc=q4z(&?X zrFrmjgVH4M4-H@c5{(sU8l|?ImvNQ|oUSVi=eR91k0VEfJ7)s_Z zxz4mJMH;x!uCrf(9`4(c{>u|1%@3-#nRBwD81>1F%)lKn;W>x=);$mh!Xjyv3j z?3Y7*BkDL6?xB;a@iH||a-U9ugaK{gsHb~}Gq$vv~FvT9BP=08bUV#q|gQ=33#xkPf86oowSWpB@{8i1jJlBAY z!1co0Su2ngW30~g3;1)x#j(1+g#(<$a4c9D*3oZ7g;;OStBDKL5UR`RdDS%{;c&&_ATFB7>=I;oK4}CMGdRb%QVUe5@>@IN#i#V zjy_Vrg1D^ljVbXD2ijgqcvB|TPMTEvi{gglE|(_O%rb$rZ&*%dPMRi8Z?~ZP5oh;R z{<(IaqjO36>iLP`aV(qttP^q!v5F3Jc$<;>>~JS!X)2j_U82iLI*Jv+aimjNa*dCc zga}R?rfYM<_|dtZ#ERM#1SsW~K^Ju!NNCmSj-{`U3re~W+Et`=(2BGz=(2Ae70XS+ z+@2S>r!_q3Jb(S1W^`atBw;ko0Y-~g#x2jNgMZ6E*FloM>Ds@MZ5ABxp|Nw3V8p}m z&9ojI)i841XM=iRWIX;XnB5c}E9^ivei}&k79#wrPA89!2q6-Lbu7CdO9>y&mY54) znds#NucEBPS|1>SAbAl*gnP--_D9fNx?s!wUzDVbp$gQ(C zgkA^9rE;0dTy0CI28#)3Up8YXj)BL#oP->-*Kr7lt>TpGRY#nl$gzCwNY?_6w2MSiNq1!d0HECqp! zU%7tQk`+)Y;CCU+2O4v(2HIlK+}Bcam#L@lJywwrBX+AzN!h5(W02WTAmdCsY2?zs zS%DTYWY23BK6XHIgP5W?aC*M@n{2@s`R(DB-N*Cgql{g86nvTvcip&nNVHSN?M%d7 zhA*H-_v$cSi~5e>JXM4XI8PUU1LxT;oYLSuoJR!bkql0NY?=lB8bHc`@79uDRj;`{ zSM?xvZ&=G!Juz6VSmJT})#m82XeJh%(VN1P?v4M-2<3?6oeQL0L>&EKLs)!9F3+hqV-L|2i8e6 z44fqR!F9QeDtu0_bC#DwK$SJ~O=3XBQx2_jD!!w+S5ST4AzRMP7IJrgf;I^x~C41C<%t!dG0=)iilbNXwFMe@rfGS!pKupILgfEMamv zww7y~}$_}H6mCo`79p~;icNkpkFMs42cnCw$ctG{(T-9?ZE@uXwM*(+&dTCBlCs)OthG`AK?{ za}SM@aStZkehlbtqH>}DiIv^h5VLBdQ%S2bsWA6BQ9y5rfYbaMlsRBDIylH$!1chO z^bIyr4t5Y66H*UZ9H3>rWV+`V167N~3~cSY?ZBYb?1Uzn{zBMkX%o&S?-ffhfM8zb!A=;*dSx-T87e9TuI@v=?rG|l&FJE)PNM7ls-GnLN?qx^PI%al>x#m2R9C!~ zN7R>Dga3&AJxVYB0KuC>NryUSqk_hB`wj_UhUD2w4!d5|G}no>;X?NsY+OkrjZ4kF z&e!mlNyoUsJbGZ8Lj5Vg6?y=AK1U9N&V7cUGd&1$HJtVcniulnpa%3S1rCM37r3)s z;1&wNoh<-&z6Us0!#NM!f`G$^3UJ00&DJ0zk(Pk(mIDl#JPF?}60svW`KyiA6svMU zbO7VJC_1=QfcgUWOrD&kuM8DUqYkZ?1-T7+Xp<2`nnN(Hb_v4X!GCp5;H!aifx8Fm zx9DL~R`99S-KjXqeLdZ?I-837Ru`wDL-^j+ZYoZ#HiW-opnBY2VrKd3Ol|U52`OtntDZav+6sfn;C)6KpYYHM+@N3 z82CqW@Q<%9(v2H{V>$T7EFi|Cekh7`WA8*aPDXe|-2!c|l=S4RH6M~6xD)F6$wJRh zSkGtko}cO&IDI}A%&opd92s-0I?OWtN}(zkHC`^Oz#x+|Zn;NZDM(oDUTGDt0;hNt zh~`z8PRNTnZgDyy^%q5+c__xoO17$T*qUf$FWx0Z#$+>*qv&=Z9PZwsV~-It*65?P zXcx`+!Uyh^1=t#bSh#|%AIWDw^K;gDB7pS3xs87PMo$+ zP0q(&iU&?Y6_h)(hSkFEH17Jr_GX+lYcc`i@)|EdSWbAEL9=GTEdID$ zkwBwy!rpZVd#3gxv7E*+s`jR^_|Mz@#?s_z=9>N};gZ=6N;xFC=$ zc8^6GXVc7-xT9$H>1F%G6t3x&nZk8tWl`aBJSSJUE-Q{hDaEpu-ftG`;v%+;rO?Ws zD8TnoDu{MF^^0#Awj(Q@%6n*~L|HwVrm0gvsrwTvJ2=dPJ3;#F z$|7ZJMreYZS?QWMy%NnAWf=TfvVQ8FUD;XTf20dkreJZ@6+cnV^;8Fk;EM$v-wf6I z!YlH^7giP}2jntSXU_AKsaKOSG8%H89BUnNBUXj;bDp?=AbF_8vfh4xL}c$cBKeP#IV!E z)!Jte9Q!K!<7d~%Gb2Ju&+tI1;~Ysbq&kiT=Q%c;CmTy=VvI@c?JtH;--~%{d)RI# zH?)(ag~yha66%V-8eX*o4)P8M5gb zX+0I8E$zYLknU+LqMs=qh<+|(>^YzvwjlIC1JWAs)+mLcwM9--jrI)?mPm-xjpg*u zhkh9@n+-i%)E7BKTnv52WoR+;JN7f^Ukyc71$yyS7kyY-3I!Z9#C-xp*c<3b*1un- zt1R#h02ipxG6QHS%sSBW497ksEk(&WEkJlV$~3Iyv5ehF#^Vr6Xa?ycR{u;xuKS*# zaP#w#=LDSb#`8|FINyh1%%-9w?{jj_o@ev>a;;okk+nkolsm_9wac`!FJ9WpRIFCS z3ZznAvg|91FSN*9E-#*7b=!WsA$WBs(sq(b@Zw6BlBfUzeIiF$!-;w?Ro#mqHkQDTlYGfg`KV~RIt zzY^0^PIk-D1XAjrDW4#?B=d9hVFx$2mz&eu?F1umM-hyxR*<}Sh;Mn+*en}70w#eE z>;$Qa_KI;TC}Iu!4isaQo+Qx+>?59*gd@sL=Xj#HyM*Y)qhD{OXR(L=SAV9kd`9SY2QI?^>$jjKssU7Q5^!A(NjX9v%5XSOlgSKSq$y z_lcIJ&XCc&h-^VE8@bD6!kj!19ArYAQia)z{F)*Eo1?PtfhiEusk?eT4c#3 zX#9f2X%|^}RF5vPuMn1Ee~;F(#7UQHodM&f1LCHb&#j3|tYHBMs#+5^#o=qSbTC;- zkgPg7b(INlu!r1CvT+k2oiWnf$5hfbK$G>-qxS3o&lG&oQPdc1AW=-!kpQ(@*UK92 z5pT)o#-3OTtEh#VS#40UR-W{jRcKPjtPRC9Ywe3BHymFEvV0;OvxxvQSNf%RrTBbb zdxc|g^!|!)IGOjF{K8;z(1)n+sTYmyRKqY$)+Mbjok&zOKf+Ea3G|Alo3%hxTw5nb zye98R5ub+be#w0cY)N@*zh#WHyHF#Jkj%f8cMrE$3erX!I}ZpPqgK``+c~gQP=0U8 z9UqkxQ_3KBs$FYaS?H2bJGMBY+69)?G#3=7hu>6Vr*X*c>h7+`jYudegM^G>g%LFE zr{po)ZI$Ix(GY{vGJ)kBb|OL)%@JUjo}VS2rR3X!k6g@utE1WXY`|B1D+g(_R-qxaHrkz3~MJyYdb*E znjMd}KC&um?k;KL*}i%)jf{p*2Noa%1PaJ5{&l0A{Z*2W+tbw(1ASp&|0_j7xnj|S z%v`NWs!HnwiU?)d%7gRf9viD=3W%j6I^4^UYgh9v(VMw*HQWam*Y-hDX;=!By_S`w zCohkh@VWh6dKmUMF z+!`g9d7^6}D7){W#YuKDjz;f5voGdW>3%fj_M@WNXcH~1N~6~&q+Ma>>YbJ}E0BC$ z#FL;)uiqNL#-V$Hd~}6r+i?Rh8nO&!$l1$ndYa%}@^;wYeJ-^g0G|oFqJ*`bw?q4< zrF=B^{QD*0U zPqJazZg>QT-VCFjo_7>lv4j&VXFWEL5j@weNtGtpH##&~5N z1Ct)m3lZB5E7+9GyG8gT=; z7b( z9aKxx2IfKjb_Za((iyM;=xu$Aud5@5Id0Q^2wYi;eiEU3%(e7q5;=F>OG)#4pbePXjo3FVu#mR zRXkf6Z)t!X#}k|cL6C+DVo`Jr`vw#tq77Qkcq3DH8V`{!q{NGuiixClQLdVOa|Z1DUTL zW%c+N)%hMHtHU>H9vcaXrSwkBPgl@&QdfpyR#%7q6Wp3?O4cZYY1Ya25Cf8FNT*rE zMGPWRwYOK=5lES&o++)Ei1QhW#WRGWLmC!#Ve&I~wnY12gLntH9CQ!%p_SBZ`hj5^N0PmELFqI)r zNL9s-jM%CQQmc6%(Rd9Hd-&QHndCN|gfDqYlX5ClSw1gU%Ys<{LX;Ol{~wZJ68u>ezp88e8Q*DI?{!sf#!hl8Jc zIG|cHRn4^32=RC$T$rC^O4F|)fPnXtUcjYo0m-A$V=^a8QP&xKh@4^@uvpWgl(s+r z&3sb23;(<9WYVWfS)!=TPPQcf2Dy_pCuxc1CGU>XU^(4jc~&iHTRLi~NjC75%^%qB z;tya$ZwK@gF=doQQ!nzr6+eYZ3eKH!j7i7fLaVTIL@);VLqCT)feLMm6rD#e?EK&q znKI!^lJToKW`rPs05+ics4Wph!qx*WYvBYsS^-RqAU`L@g4KPbtZz|*BYZkRS^R_y zs_~Bs!HhA5JJyKZuT`8jU-Q-I!{h0wa=Ln*69=E|P9ccjANmcP0eJ|s6A{(ahH=GJ z=}y71?he%hxb+OY+o=LhujLtL47ggER2}4u39{_h)%4eolI6OZevFHoQesilzn z85&E2=7yx?hD3=pHjFF9jP7*o;|7rv(>VUs^$Y|e2Wf`8hxL_Qmly)mr$CK{fRvU) z-Dzj}oyn)x*M`vCV3gcoP+WtNH9Bf{Yki}mu2He7wQUqC4UUi6mPikBO^T!Iwi1Dn zboQ&+UIj38oF9%Xr7H<7ub}gv_#e^vPuNkLuU6;3$48YuOa2ORCXvVw$Wf)wvY+#R z#8IUemL7%dPh1eEV&=Zmlg^(LuCY9dFZSS?8R5=cR+d+`8(NN6I~Ep})qB!;?Jzua zG>tH0EIJkzzNuqjjc+O)D=clZIHit-70bTLN$1ZEivx~22DF+Q%I}foEA7-&I z?l0@WNVFT&R1uGG5C!YG>I_FELFceB7SlV@=3_01hybVNltmfkI&r($sWMSI(hsp^ zbJrbIQ`%)Zzl};I{z|FS#NjJboS0X0d;Pwo`TuMWe@cb)ggWJC6S}RQKXgZ!z3#9{ z_^?C4qU3+HoW_rF0_Eg8R28d$7Ww@cy)Y1O4PM8KCtB?W@&b}N{&gKtxPz4o75{Z= zs0@?m544iyciqyB-?4O|T6>8rvId_-n}|qj|0MA~ElyBUVtIF{8Ea+2SxPNfw8;^& z*|2=chyyb3;^IWkH*ptfhbEv1yZ=zymDC?4W}D-vu>E*DkIqiF)0}kD62q7!i$aXD zWKpYF;-H1d7NS4<{I>YLh4UW1^=Zy~7*79~i&ne=%BI(+^S|vB+ins20olN60mDI-5 z%i8W*dYsjkXCp<;O`~cyNk_|d+GG6>lRDY`H3M9&3ANrNEQ`L7BGby_NH>stW!R~M zH!n)>1ubrjZo1V{H*eAAle!9P*Lv}cK@pam4)GD>DL>{KeIVNAtb5#TafCt9H&JwJ zAYyR_KGMN4u9o9Det&Jbp?`lZ?DxmB^lb>hYuU`dng=i{jWwcepDxa1oQC9B6+|i~ zyDVZOIfTZ+9eESN3Fx*lAq?yqmS>D!fs5KMkg@BIHpV0-@YX=Pf7M+Qn65x%<01xI zd7SOL(PQnEyeaWbr#XR@)jTvmr1}XuD`}$^^5sftkc8$v-mbwQc3XP}PmKh5qcqzj zHYaHk1HzF9FfC0@rDLc~bXrPHj1sz5`ynR(JELBI>``v(kJ~Tq);|8|qj?44dU{E= zm5FhS5F6yHN7+o9^sE2cVzK+}9eOnu5V*ZdVs>k%Qmx?7Ot?rD1C>&fnYIBSC)!=F z14ZCJ53V}-N z&LsmOMIhL!X``6J%7Ge>gn*#O1_io$-RbH$cqf3!Z0P`yRU@6q4aVc`a8X?i8(y}G zKiUqx-VYe~D_xaphDv##RE!cdq>ocp@pu~gPRP4|MeqNo?S)pf^2FmD{Lo7eY{2Q# zL%7opI5090d3#E5(Tw88L_0|UC#hARe#Hk#21JS~3E0P~&O>yR+?AHZ&o8%}qak;M zQsQy;({!DY10is7-{)1_X3km&gAe8 zP{)UNVbOO&4GciZeq|M;7h}P_I?+n~QAYf#j#envdLK4xC0Cr5%fiXj7Il~f&c-oKslG8Rkvy!hJ>u9+Xp)J_!DdRkg?%q1Kua5m(+*j%d1^o zjhPB*J#C*gv%Mhz1f<5mg@-bPn&@v7t4%Kr+Uwgx5F6KU^xPoP&?QCG1_bX&oA zxKl@WB19@kkU8t>^}Y0cE_9aRs!RwQV1R5>9G}MIOSl9yYB|rqjVDHL0rqua65~Sy z)q&Y`7rHN?VSMyXeeh^vA^;9mIRq(1gpSq0fgiBG(iwycp7cH1AO1f{MMc528FXZc&p^YX>%!TpcX!ChxRyq_2s8%u?hal#mp_xSK5) zu2O20+yrbXh*KE4Jl07af{!AfG*@lJS~A)%qEII+~gME zkAz7`i>!HU{8q99)5%Ct%o*ntEXD6`Xd$$E--Jg-FYqQ?P4lXWBEL#LZ|bmOPM9Ud(LY4J*wCga^gh@EU={8!agmZ*u&*OO_?7$d?Q}acR)2QIOxd~gCJM0?kBn5 zZ})6WVHkFGK$-b}xgO+2x}Vt_X_d7;9HlNE!Z-oZv|@emY5J?(*1OaBFCHK%pCSHu zw4KKNXk6SFyrs!aPQ)K}oF3Py;P4$u$4Iti{lgF0cP!BciuX|?L0 zR-=+7Tuzyc0M&c*x0z)ls4BBNn#&pv|GxiUE?OiPCGz-~5JYk~FBY*!Wqk{|2p?hK zM&s2Ji;xE-7C{h6v{M@H)np=*D_*utbYJIcG7<92OUp!(XyCg`MWb3K6|oK$r6MG) z9;wL0A>#}%Ym~M#9jqb%miG87M1~f7C4tzA|~~RTq3FtL~^(ToGRI5$k8c zsgnMasRmcU&dJKFRo4#BHAS9<5RoVoV8|>bHakEuuEK?MeD=5mx>x9t|-#EW+X)CgAu*7=UzGMF59a z)4&afpdArifMpBr;mZ53dp|3z-Am*a2}{T=xJVYuEx9z%m0P+iza$`5etnHpz;-V6 zRTvOfyg?Kbi9bnJhL&)r7@L-TE<^-5czq*CSV$92IHIp_cOs(q20s}4A`LE%N&`MCjkQ~Z#@?VjwAfG2Gp!a3G4uT1EnrnP7M1+!OsBc zI6&I&@>&N^=vaE-;OP|LaiUP$p44|aLGHB%nPLPS7&|2mZP5MJfO@Gkd1PI zYz)|&+@sfKzV6HR+m@u9x&M+$Z61<{}FrHDAw!lhr!I}Qg#-J(X&Dh-q+Hqa8Nbq~6|7ZiwNIc3{xv@&Cv zw)+TmDNwFzhm%#!eCJhDr`J!ybS%9jMqXCgYN|u;dX!hf*{UWDnR=-v-@y=ZA$rd9 zY57}?RUaav)F8D(1f_Db-%pdB>%q`?=A5;Hn#GjU=$Qul7^(Nd2$)mON658JG+4&d zMdLZfs&Va6VfTh81BTKwRxeR@#d`M;pNiB(9gmrsgV2=MZn~^z*rs|G8TJzt@2atT zLZ`qgt(?v5^BLQF;~-mIaee1~&KaPxLq zHbjpLrr}er)*TQPukC3l}mv}yO&Y9h|y+XI#iS?HT`dA z(?zb=*v1-6t4aUP4q4r$a&d7g7g`uRT`+JF(;pyHpB{Z@Di`KUX`3uixyDuCyH&DSpdhc>kRsU`6nmR z_c(y#*YSk>FBHJ>|3-x&P@o0|Sz>6QsAU2+bpCu6vY~T-Hc(12H1Q|10kFW0dN+bb zrW8fcI6GoMdF-=j`g!<9Km?5iZD0#p6tuyXBQ-{wir9dYU>&6;EfEPe2QDst28OhJ z{0#9SNhh%R0o%#qXZS9UpHUs(Rs0MVX*Bbz#?P2@Es%k@8$Uxgz^aVR=%BDO#m~4v z5#ncF9rPdjzM=ov|61t(V3cobfc8tX?p9 z479?iS}m1zqzZcz803f`@O0r14#b;6{or(%KhJ+~M$EbV2Peh+W&Q(u4|32X{~1eX z1I}aPm{-#Cc?oPSOG{upS^5y^W$8mFX!SNAM<8n(P z_j%D!k{(BfwIvl23a2A?E9Qx)*m26{#iptUJ5>p;nZ%&pOG1;tnSuQXA4;68m^fMK z3X8B4^K?u~GtyQELLE#X%RJV|hcvN2Tm-J2=(QJNg-sL{^KSSC#)5wVMqpT7p!2y!9Pi#KEYdJAMEE3rGNb! z?x&BKAmZVd5E6#cOYd703GvP6c)lk66D|t4$jkrH$9X-`uryxK4*K5_^UE`(Wtbz>mU~WaDuqJa&Wl@Te#M%#KtIA zEJ*fRDxH=yu|xBiHsTKw$PW_G>qa^?o4jmwmlgxkOy61CHpOYE!)aRJ6z7A}=bXlt zP!I0SDl|hYwfKWHwkaGf9_4KwSCpO&il{|-Fgo1KCpYUmT$bJMwk-GE&9_9i=RE*Y zEPq%5xNe)EqtkK|y9kh~og>drJf8LVx$6YUT|G4tf#g*W^rU}gp46VhHsLNhNs|4# z3Olh&S2Mnkn#J^lv$hnTws;pvz9qV!U*=5p3NvvjN+5yP?!ivKMfitC30>xBS|Wk! z(x+sJl0y&^CN4lFI(dvs{~I_4-rhaTMHcv~onmgvvscBheg-`F_GiGG7K=IgE~W_n z!fixbK=#ZQ@!=p{A>Y|`SpSggUwrZ1DF2FLkx>=$4a|y_MJ)6Sw00K4w);AvR)$h~{bT9%|0)gM+%Im84Z=>ICulB>c62zIY?!Abb}BHn z$c)t~-y!v7huoOlD!z_W(jK*3Co=^8rib`Ly&Ph!ZtWZvrG~cI*XrK@oc2+4r|N$# z^>1Kn5mm~|h=FNqAWtjP(ms>HfhJrk(ZDKC)eJ3E7PSy*wxO@5(Vegzt=A5`2Ow-ho_CR9_jVx4{7YDpDooaISKh zeR{x3$vd%)bRXAhoZ@F-387jgZ(m6Nrlgp3r60emxIIQ#*SNb+z z`VN)7)s@}Kt1Xn312DrF;t;e2iojsXWA(h?yESxg`XvhhvM<*K3N%tB3{3 zBaG#?7QY)h-W0y6D^u8pd6N;gDVi}GHv}r1$aan?gZ2IuaM;%N@Fy~mnS2THoiF?Z z$5f|*JW$Nn!XVoa5!h0Wf-+kykx-3#kBGL)!#Lj@t=aEzm`)eo^BdMKLQj49mZL0< zFV;Th_h+h3N>n^fgM|ijku5ZsH%Yc3)dy0K|Dnk}Yl-fkBDyJQXa&hn%+d;yft(OT zwH9=-dz@ez#mOezn+n*_;?Z^#>WWr)A&r%R@Ug{xc z$PBVIB94YF7V45MjSbOC6wj>@j6HHPl^Qk5gbIR8PPV=h0zaSY@_?#%pi5_E z`D_&(y8R?Ij0OLk)0vEGHEdA|6C^tp6v8ge-d2zQ{d$Mi2UU(q)UaL9T@*gjvuN^u zEO_`17?Ta)L$Vt@#y1kZ{m@x=wGnzL-rkY7ArU--g|)z7spYUtnvIa*tjX)&FzK7r z5}5(Dp=cs_lyj4ofJJPG!+wd|F&S+t^p$fyA*RRoh%e_Eg3VM7tLYg$KT6ZoEB6Mx zJ$?~~kmzkXGgvmf(2o6_JhhVOx<@S{CoqCfB1IR6)wA2z2*OsPbkv_N;ZGvqb(|cX=nvfA$fbULYQn=L#}MSH!IJp^DuumdD#INZL`h z99L_#2+KkCxmFpmC|hf?5AWh+FW5%@O<1W!@BjqqI~fm54DV!L-&hbZnnoPyGsWLJ zLrv(*S|qqCWc+)+;djqB;EvjN*l6d-;NWX-2C&Qt@kwoXVhy~nQ-xb(J-A<8{VBRw zN&nP3Vo}=^R?&a%i&Er42}?N4{$++qcZF+7PE$${C5n(8TiHPvS8f%iQc3?7g9^Rf z8oWhAUrE31daK_QLlV&)J;(;}Z35HG1?BjQ@-<4H%)ot!SLcBs+)mSc?jc3v zsEAp0bj3@sGui^t-OVf;>x|HM}C7EK!J8w>tr{5$L2BOZ(eZ-EE>W7!b~ zQWs*FO3JVx6N6+cNm>+5XN^PNSX93uLM%wIl=%5U&F29gnELxqb4v%>3e3UrEQ}QJ zEZC7s@dwE(RJ>~t|EV-dph3nwz8$&hO{?v&HuEpoQVL?Vq|^(bB!(vxq<%9AXOQZlLVGcU82UxGOP z05HmSrlZO9c^V*%v;9VW?iBLBNIx?7&Y~6M zy7XJm?vW$rZ@I_{GIL$j=R7cz$Zp>LUj}|0@lvALM3BubI#O2zEO_Oh5934P7bqN8 zo_&w4x=HySJ0dJWLyiAFM1K&fU|9A^Lo10frXc%PNUA~A7GM0ii+AE`|hHUYaC6~!O3Ly$s zMT?e@7&Ys)7#88wLMcX5jUUj~()&9#SdxPn(8hxQJ^nxR{4V!=GXq^ zKV1}I?G?sNTP4joPw*y_8oQgUnJY;+DwU)0Ryq%Vij?}Kgn|Q{xyZtzYNEZQf6t3i>vr_(mV` zqeC-K4AcE6elN>8SpubxJ%)o7LmGkTG~J}}GEDd}3@@(3FT#2)_mj8?IU9HKi$hH` zC>}n1Jpv4!Ul$)FTQZ6Cdj%hjcgS3*0fAO`^s^OYQ>X<(l)ICky%6fns|g_ro-jzn zcmackJ*6B>*qBfffZzuJNe*m*MhxRl?qpf$;tp*0AWb~d22FD#v7#c5xI2I&?3v*k zC&$rICRV$ZO?IPHg!lj^l`6_dtPmIGEKo6JoVi-Wt}#tpWP9`djOZDiL8&V*!hh zqbu5aHlu{T&0#K5Ww zuG=(p&)gja?%m`98 zq}f8G&80-T3_HCxkq%3KUP6R@1?JVatAl-u+1J`+{2GqMvginL(F(&-BD!qKAodv2 zk*KqzRY#Fpe$~-}17?_|!k##?@K)07yh-mcus_ba<~Sp7d=ol~RpilAH*x*6SDZcC z&t~%_U&VU>?)O6gSva6zb~d~n zWFIJE;%-SjXbfTrq;S&5YL>xJj2ev84Tv9!2nHvHjcj8It*z?IaqeqQ3O_0~ErdWB z0o|%;^Mb{*i?(c7m>8PmB3?dm%)utc=EB6VVOe%!Y}T;gbn+8JD{{Au;>^e^XJ*h~ zacHhOGt`HM`oPSfW?TNLow?yaZY=|HHaAcaouAH64j{}XhdyR>QT zun%Y#t#}-~a~IPL+XaKGQA5*0C#gAa>8kYHKlFG@uTQ`Il({v(!9@bS3I4M|`rcpb z*@qscpZhEpmFv?Fa_RA&{@c&<{59z}KG*ZSKb>QTdb%`|{YImMS1^MZDun6E|2DSSiPdBYCOWs_4_ zk@r-V15p zrcc7;6zx>W;*8{`A~S^&RKVJF2#_UD<6CxxGMMeV#tz;#IC6)t-% zN#b8-V=(9Z#`Hs{p6bRvye6Ifl|p31Yt#SwXD%zmPxqQKG+mUy38abRGpeQ(a;MHk z@i*2o77X6n)GmOFpIYO?GoSCzj)~U?2F$Bif2L5lqw9VBc~*>8Gwhb43}5o#EhLhN6E3&wnd3m)lGxm7gO*d`=5e3nUj!=49@M=1 zP8S0&GPTUbRKdSe{KYVSYN*304yQ|0a=OHts|*S}IKu;7Co8--R2lEA;yTH7h&0Tf zs7KqVo?x&;704gc<3}p8;;+JfL9sI>G49jv$~s_D{pwVoD5NV77N>I zg?G}~DH_E#f<7n2cUB*!Bco|VhgM72!nn24C3qx*)?|efWMhSZo&y|Bo+AQf^{p95 z2A&5M{2F)$Xbz%!cSzzY*CS*{MGBQzl%wZ<-v^Om}t z*8)%#hYDmv@CM=T92ogrspIlsf>q-fN*JUAzY@!dS@30wsBD;(GPoXp7(bNzeSu+@ zMJtG>8$#EGQZXkEmMrMxZ%g*+W_|hTl^UCkrOlk|p%L#=O6lUYC_r^<{AINKZVA9> znJ!1V=MfDhAo6hU5%YM(Xz`J z$cSh{)%Xt@9p_gtIiutT*>xmBHU_yB2y4ce~a|mA%uWPOo~%( zH|r%1%@s~3MyxZ1hBUDz)BSO9r+%wU$sXJ!{Vh%}!@ea2c?pAwZx385PGt8uJ#Py+aaNrQ_dR z?<5V*J@DA z^rR)DM|@DAmDo=sOBTv{y7V6cKeKN|5Jr_ssZy;H-c+vF`>|rffH}33bEgP1O^5DI z!=&+8`qbn?uXtv3o1Z?*fuJN=T}5;INx)KG*R3O6>feId_hU?a9KB$jvMkR}X*>_J`a zoTKQ-_k0c=WDb4q2Y(JZFqh1s?+XW%RUvWtHCW`vf)7LrvJJK?GH80eGtW{&*+zJ- z@RmQ%O2_Fj$D2v@p9L?X~`KFaKv4#9VZy^^n`u1WjsFJ#GfTa z)>?F_9JJQT7HYO&ga8@Nv_(dIdZD7wsy!jWt+Mk{Cx9w+2$>k2!;}$3C~wg$nGeiL zEGp^L7D@sU22AYm@|GjA9NzHz-T6Vhc+L{^jXsCWoF z)C=>WHmi&DQUWn4DdiwyMYjdI>Jt&R&RnI7IpDsF%L3ku4By48BO^zfbrBZ395Q(}40tHTUm%i4&I2AH#>d~rv5LS?>uqVy<*GkbgMG76^!~c)F zw*j*2s_uNx$NlcF({0(7B^lqoQ4%?0A*TS_W1K`sg+BqMorxeFEP@Qm&=4^aj0~NOClN#mO>lyED7X_FP#z*^62147ASOyspaTNG z-+!%r&ONu)vLOjGPONg@uYLAjd+oK?UVDAA(eNifK=EgOW==#uk;9h)D0+V4Mh6T4 z4P1CDppBBwRNz>CG40b%rb%6!&KJe4TpOUS-CFA?S&!`6cqqT{?6cSGkYL3}q*WYh z@7JnuA3fM>XTL$JicRqY4wz?-1TiGwRIHexdnYg4XZiw=QrGxg?$%qJXnq=*>gMR@ z`DB^i)F+i02~GQzDRoMo5KR{XvrTH2bWsGo&tRB}IA}PBC?IGy%e08ST$`Ui4#&AH z{}L}U4w^CKNAu5|bnC~Y$Sx-PUj8nk9J9X)?u(y`^8Xq_MT0?MG(Yed*3M5y`RDfW z=wE~g)0gLO{i63&S|jHd(2rk}{}L}1>t&pO`EkCV%0JJGzJGTkX$HdqMl-h;k;wyN zx`-9)k^Jvuv|J6MaLtou#8v8;g>ErNz)0C=AWe)wH*wV`{>K6VrB~m;^9}BtN<*!8 zUgsjjXMTeAwb_N&P zZA+p6QBIfvGQkF)(R9+@(TP8UC!{d%8H(a&zZX#INWH5-s!F-gvn8VNFz=h_%23Oi zRxqn}6~qy$xeRfPEjYlJY_S^9aRk8#V|8{mY)rh1KiysvnamX%tlag4^`SFDKE)i0 zUW2WODp;C4s0NL#%8PdW{RXQHfMzf)v>F^(q6ttdKW4E@`Tx6hV~l21bw*mMIjp1# zFvGD+gxf{T*2;|`&h**mx@!?dU}34S`sTps_B)?o(=_uZ^zuXb3@=LF^6ez4`QF*g zqVNP#z(1;m{}Wox(!w8-ifDtB163{jmQv)R@)NCt2-?0wTEiwMGLo!gzv2-q+h9!# zE$cq^_YJ&Uvi!s+f^{Fdm34ox3OF;dLNJ0At^01)_gXqDS@&Vbq9}43sQd)hZkYtU z#ur2>K73I@iZ|9Udd4z(D(ud9mkjsJV^nu4782vI4DF<6@XNA=F9Eo57xr9ezK*7- zOD(#)A&&`SvQ?`MnxzPjXf~KDP=u2-@f4=)hL#z+-Fx_(i^LL4%*pB=o2H43Lc_oo>@AXmbd4CGQ&(?{o z9Sw!<2m+s|%||KQ_yRdKyFg9=0r`LlhxYb9lMQ)`CWDLC&E)N1(2!$pV{`@I5eu*& z^!lnFqO;J)e0WPAMAcOKP-?X2%ZH|Q_lTG$L(CwYw+ZJ3z6f3>!#11(VV}3!{2b=b zmhooYrdFUmUW-s`R>_Le8WR1b(?+5c!SgF;k5K^W!X)_6t5l?XfCyS%Z$_NhU0^U` zakJA&Vlo3{W=Wi_gD%g&U0ObIImQB$5T=o1d9_$|#7gJhN2qc6UDXdeP5_6b(0dUa1TzCFC za8M@lPm+WF?qDa0fx`BXJY~8sw-T})n<<-Joox1hVoa-gt&2Aj+i1%n%k%6boy~Z-G<+ZNql@mGtafzkx?y=l zw2!K1H}|so#rw$ElIo+`sseQaA-}lCfpYApJA-Sa%8O*NDXOC5!dauimgW#tYQKIk-}fmG%`H6kKm3i#7&qj9_J8@$2Eohn$3E>JKa<8CC1t|H z+RiJIxLJk+vtR4@s`V5XOwF*m!p9;NijYbZ#wDRjhs>x{o`^>hiV%hpbbj2_B=8e` zAXNdz1L}iWI1dt=-P8CIr*g~cO}lUp!V*9Pk;#pUb_)IKhQfFBL?Lg>vTb@o8?u{h z->KvY1PCIsNyo0ZRlXGrO@oWuA`Yr4XDwV%iNm-p+61nj;Xv<&%dA1M-`nCMml^OPUp-+)H6#|jt38AYl=Wk%2j zj7DVAWysamAlEe@EH4UN8PL9B@NdD&H&qm9GXoRdC;7VD32rC%@aiqmtMyK4 zUM6cE>SEERh4#P9jiR3@mN1+f1*dD<`&Foi zH&D%#Syqu~B~muQ+;)txmqdc;+T@-a(+5hZcL62wdi+Ir=dW_y6#b!#if!FdLHN3R zwii7>#+0EoU=70taiQd+VS^CU6mpxG3O54jN&fr)1?ghEk{Oi1|96d>3KP{a)3&xs zc0x?jXVrML(wHkN?cd?1{Yrb~O~KjmwAxQs+DlIuWshCdKY2?~4dring7$H?$+D7K ztkJrttHa_G)=@rZz~-~OWPh@5~=Q^6cbao)E)z0Ayb?7g(RDu0|8Np@eUnnawF!7V)K(d;g%8xP4M zLJAeIjgC!us#4jxuIr{U8r~${!%p(7=Sm17H5Z1UZP_Pf5a9q4ryB}+kI7_J9qgR@wU} zyuo@kfJe$+yu1O>Y*@xdoU7^4jnNO4GMOH~UZ>MPR*8jUqCGJjnC!+CO1+Euf=o#u3hvv3rv$?)oVweKV5}(4;I%Ii=5P|CaStZl_4nUhj(-WGReNg0 zN{K!-DAd7L)jKVlzk`y zmH*Xm`rUFSutqL=7xVo2pYO|qYmi!cMZOq#{!umlzDi?`RN8;Ulia|CkW*K~qAi@H zOiVqK=D+q) zS517R=C(g2?vfFR3y_r8s&-|U8M6aR6jtSO__-A}lTnAuzAQytr?E7Xl@;GOoEAv_ z805C5A0hJIqM;(JuaI4(i|>yzm|#y?=f)f3Puh1)RxDHNO)+i^Y6b1%r5)#AlbFO4 z8?r~aWo?RIYCl`(PLnOf)h(trlQyr?W71$X4e*E3m5fa+=}{2Zfzj+xt*R-zDX|gn zTxk-Nh?B=Rhv5-ZU%?XxP02kF$k$2>jWHZRoq~*17J9Y1%ZJ3PzE$5@#PeP z%)WjZ%fTVg)Ez>AC`_Jt1SH&RNT`iy>gG?fJVHLrClw)YOA2#TbzmFt;V@vDFHwOf zC+<~Jt3u?t_>j&y_+=R-{ZRgY2d&mlSK%XFmNE;1h?FicImsgu5J}@EjuW9q?|5vWHs@VFKn~^qP=vNWexCvgmLuM_|wLHpizzgdp zNz2YTPpdGpH!d?I>VnIUTr5jNe!5nP5&bw3{&?CFBdQQC3z%vCKhbL{)uAj`?$jx1 z3)bL;a1qk9Ptxpz@+e{NtqQGhg_m#h!@8%g4p|R%p9YBf$Zt#jOTy}cQz_vz5m#~w zM1ZQ3oE|MBRN}W%)F__L!s%1%gU7E6RV)K&jxUTbMU%|wjPGm_*#z5jIMq%+^ri2d zKu^G}y0%#hBAlSdwBcMAc~RHT#?RHah~`Dx@F>{M2y`6)wHaLk*5~W6KIjdkn10lz z$N(b?p|XpPkE+r_H*M%%N>)=$^y;MB2gxje0(vGJ&eY7A zqK>{wyi(ll$96pamh7-mkb>j`9Ci!0sK@LZrcJqbpiMDmU$bHmrb+`lT3*vN)TA)- zdo~bo)!(YinN%52mC;G|fh&VgJmA)|Z<{_in0yJ%d|i#+KU5`1r_uE)Bu9wSI_B+WaU-bdUVs41u=f(%$wo zr@+k~Lr|~;iQi z=y7{Vc-)*FhQMBIcosh>c`T8px%i6vJ&P!qEriS1>#{9Qo04XXC`~2vE^%1 z&v>qQOYr4-fvBq_ z-l&kc1%0v#v%rk+da+sY!neiQ2P{VBPwI;SoiFfFKKlGseG(Bm#j%~x8N8(QW0p=` z`bjP0Ei^;>7u1As8WyoqHNldZ+aliSqqFbspf#f12pPbIUe?lj8s%$KI2zA_2hr22 zD%#Xg7f-y3BeW#9qHG_fuZpG^S?V#F%+)NKQiy>xxVMlv7dS&I>&zM1>+q1 z^|+`m<5DdLm`-#u3MP^+O)%u1FHU4esxCQvIl++TNi0cB^r)$Q`=G=ui3*Z~ zdEqN}Cl1macNXtcri1p4T(Hge+7xk#Bu$f=m2;{_c0PzqVlq+*5Y)`a(ca2&K7KL|G-K`qI3jtE8NM6`1%J*I2iGjJ0wbMdykwI$BG-TKdNfBek{xlM5H{VU~? zo{*ELnVb>O$qqoafL%MX@AG+_jC-R32PmJR1h9@uE>$`enYLudT|3Zs@|l0F;)4!n zj#l)BbA5tQ?y)|J!GTea34uT{{hx^JWxrRUe4S zWLUkMq;-;R7S@!uzG)FnQ-oxaA^d)cA!QbhrW`{_IVE|WCj~2j7y;pON+$Vj*F_PT z;x$J*J0^{eO_?^HJ%umuc=qKQaod_Z9q8O!)xF~24$mXgWuBK?4?iI^u@Kw3tN0B$ zVx)UH>+~;Y4(9#%K_yQJ=6M<=JGXVm4Q=$uXM>qkOzFJ1yTRJMEI@0Qttsc$fUe;D zfIMuo3@5b)YtE$<62p+CYaeob;>L7cpunwE7SnB(RE@Nu2~61zd3Bgu?}~%aorn&k zqHmT6M7);J&w|I>M|uep@~K$PVj|asG`t=t{q*2iOt*@(csAN`J1@~sJV^h#q5DMj zrgJ-A;TwK)gkP~;%W)5D=JR5&CJ$<=DThs8ZIgq)1w7_MT6yeG1A!A{* zH!hsE4X3PyA(bdLA(de5pLkH@B~jL~N`_9ytDpm~(6aF3zdK@@a)tNox}$urB|uqB zXYp-or|mQ!Abq4DS+!&(%*qROapTgZnO#pc0kUiNYHOjuK+jqWtqGIT&QN5JP70A2 z6ez5le#y2OC|=u3%Ozt2n6eEBJ-?@4J+Vy+{T3)9HxN4X&j29XgSM@Hmye;|?cJmNNV)57~$cCj>jNgbwU z_sSa)GofBy?_;}F;FFL0#lSIuN!Z%D0z{&-kP~2J(e9-;=<&WAQ>o%)P3Hvp@>Gzk zS#QqcaBGUXR!2LM1cva3!pvl?JIb4mslug&m$E$!BK;4aOI}zgUS0i-?&vAKvzttL zI(|k5z8B>ml7a7RP{qG2Km8|sL>2!%w&U$55U0t} zp(_v4rxbLl;Syn17B2BiGrwYJ!(KpuTIn@_Sk3L407B+a-Obj#*v)mZ1~k8)dTElm zbh$WHsTUWKQkn&qJVgwUayO9x%1uECh9hpCs=FrBvM=F(WeLotFN52|0_;m85vw}J z)ooQi`^Wn-5SF^;sx35hyx}OcB&mz^pIwP9ZM}3E_%d1EVbR82)=9}q5--#f35V72 zKhC1pi{jd*6QbKB-^+M>eTbLrmkf-Xpf;7g1g&hmqWgK^VQ4E zhB}&o3(N$vWzNt}BJOUrhl*zPdLhm)$jTq@Sey52P#>y*9cv$i4Ug=6Fz@$w;mf#3 z4dvzD=6uv&g0HeqfyhlUcYaHy;DI}%d?%pxLP&@;Z5J-=WJbZmlt!};jp?yN7d@s* z$hYFFWY5p>{9!%MARzR&Jtsg4a`W7i zJkLV$&Y@@pD2@+7p{5(7W3jE%B9E{n8;IQ3O~!kpZ@xJJaFKVsFOmLCOv>Ps_x|T) z4*|q|35U468HHMTGJ1-#09Jd%sS3e4hzC-|6|^#evsEyr9?2SlbwhF{C2JzD zr_1{**Fc ziO&06T+6x7)OsD8dne2tH3RCjS58lH7Xq=MwMH@^UU%1eS6b0#wIAj_VZ2s%QkV{- zTE0RmEi#%4Ir2wY|0u!<#`jW3YJhQkUt8VFZNX;kSdcu;;9y}pKGDgDiWE@9F~I#M znEIx8Zm5e9oXW){HzfPGs`7q9@Aubwt4olPW}!B=h;9pZhDXz@SO9v3ue0&Gt4!2p zr`Y$nxcw2*0zU|P6Fx1Ofwhv}kMY3pHd_e;RIAA4f;!A%&zL$7>GcZBnZ(wjLPN{8 zOr3l2m9#tnKKfRF^9j|}dOFgUl}TQ(&!a8TVh^a2Gf;Y5u|L$$2(QQ2T(swaAhInz z&%s|Z7F#ehB9dwjO)>KPl-A?WR1j=H=Mz(AKh?nUqsW z>-}+cHA^RdD9I*GfEz{h6iio|GD}#|mLf*O9#Pu`8}y%TyA;b>25LXa zS;5SJz&lUk$V)IYNi;_}>22M~T6u)``2y|R`BD3OgukQNM?qFKeU8zZ7D?l4VYC?1 zJ7jOL?KFseJMAnL0OzG5G=Nr@=yv z)6h?9@R6NpitB1Gp4KZHSKh}UEz?vFzlY=ON@CLAE<(86gMT1$Kri4`*S z94!^BHP|iT9s9sfm_$F@g=_giASTYhoEvgu2uA_zyobA^oh$GkQi^;g90vl8O;R2R zO148ZnI+qC3&y;dmY)jq%T5Kgt@8Y%YPy`~1BJl^iWtAa$XY?;@4UkA$+eMl0KlTT zstue(?J8qwg~8p7pFy-C$jj3T+g1f3@Ersn^il3F$_I`&m@Ns&bWnL(VXn-DT8=e` zH<@a{bWWTua-J+s44oke)9ezxLsezO8beal7kFy=o0*_;!9| z(3d}?9&wG;Sb%A)f)g(o^l)OBE3JzD zjJJks+~UxFSJREr@e*Cfm>ir?2z!POst7w#OtMuX>x2zw#{k@fBRXZ-ZE@DAB= zg{a&9IRIm-c6UgON*rO(-JynXZ{5xFtDwhoj8^NnWp8VPF7P>TZ2-HNo8QS+z!hz# z6w%4OMa(|g47-OnnL1)6Mz8Bc;PIrO(~ddTu$$U&4HUj#r0}Ez$8EVmz$E-jY7PJM zgzjd)2wpeE=h{{IUTl10bjGb(`#QRPjAx%*t%ip>N<|=dQ@eY0P*{m|n_31AK49I9 z9Int>dagsdLes&8c9kd%g?&a40$GBkcJ?-r8~E#+Kn!kd02EfZ1(BBBv>vlCc6!U_ zI)(28ij0o7GR;!Usz)+C>4ywgk95<2ze9VI1-~}ghwI>sFx`>Jgr&!J;0gz-T@w8) zMQ_0-$>$k&OiS`iB4yKzCzE7bjScX8xFbA|t&HarW4$rkBvx2NTXJKxsBf72UXaL% zB?H-5brMvo(SkqH9kWTuZ2^o^M^N|3PLRL2?wl5)UvhJFR!7Xn*Z`{ub-}yO8-(|b6xQJy4qeMBYwVw*0VzH;zEWPaC88aTzNa^YFMZ znLjZul1Z||Kr6B1s3Za1Q;+dRYluGbNxeml%Z!hNw+LhldbWTnTd0P?cI|Wj{ajD~f_->Mt)Z9$yykz#emty4 zZW2ys4L1j9_`$J(!e%hKSvVPeL|$pVSW6i3g1X2AKq<QZJ(Q+~Na z3J1HyLVkD9&F1X_G}coxG1$Y-jEtn47)R*9{l4S1CTB{|s;+=fhHH8mjHq&xOrzN1 z#0v1=!=?~3W_8Vh1s)Wo-bkY+MqpOnho85P`W@BujArR#cAQ*9aNlYhziFFa}%l7eIk95Y2WlZIsUaCMQPFFewTFhB|dtdg)=$p*jznH-~M}ut3 z0TH(S#w?FPW#)tSWlKw3?gl>`VT3z_+Cw(}KJXV~urwQzyKGrj#AuWsL+A!z1%{g1 zk-6Qk585_T``KneepHsO7`Vpd8p8JY*B&2Xo{z!TFqLaPgrze|qR$(W?02pV>=OLT zA2FyljYkBxL`)q@s)3>$nGX=sk^jS8G6=M(tuH;jA%iA5pzY(eIEsWI4J6C(XQl+J zcP8_$dD!WPUmg+76hFv2mphaHkjJl;*lJl)zUS_=i!VfX+Qk=)JMG)*MFfBxLWvS% zoB1VOma7P5Gp9#c^i)d<_k&`k)If1gbQ=fI#%My87SuAEqOq4pKdpCoh02q`-XXy# zLn`k#MmUz<9KFWBN310chl1WIIDKP!N7&}IkWD6uMQO11E%B~iW2+v#r(1)T@#a*@G`USkCta?5s`Y&{TYw^5pstq3dwh9Q4iLlU8c2ya7T zQ!mEsYx4ND^HPrg*kd zBq<$kAq3X5-)~OCi50Xex>KwP`(|`fL=L9+p@%(*3r>CtVAA}gK*~Nr)RWZC5O+hA zAZZSAyf!&yqFBbWoQmbB7lY?5_F@80!o0I@wYH?M>$SI}sC0!k1SicmO%k0kMk{9p z$2o)J>@ql(hT%A8a4?5jJ`K+(mVw#z6HC&Cej>jz`!)xLU9-PY~INI_R1mKfAq(dENar z+kR76zF@O`ANPM2vuT$nm>qrCGND1{JK1>4VJquAY1W=@+)YTAlu!R8&1^Q$GyBUl zo5{iC$BM~|^9JxMlLyu1*OfVIou$Xv#}3T*F_$F!e0mUI$B4%ZBv_zOJ(B!wDQz&rFymaWr#J02@Ags>yu~WfX}ynG}MG_RCz;!!u_mw&f?- zg`o?bEwda$b85-^y}Z{`o1%B9kmNfjtQKar^Fd8+Q}oMxxiNZlBG)$zvMGMogct$N zP7~9$(eVO|XBYt<;eFYzw@&~;MPPc^J>haF(v#3pfXXlZ3Bc_oHv^Ty4i)7;cKh05 z4tiYN_oS7%ygAyV*UN8?P~4>X_j8X8-|y#DSXVFkGlsX5_Luo9uQ(8&0C!`mPXH%C zv%`==g4iqwNb=T0(}}>7_mHhF(?q}6n2rnleJ&O78b#2sk%eblW(J`r4QQOm(1Q5qnZR+gWMWZ$lBq z24N#QPaZ7|w^hns>Amycd~*Bv`k3>g-r&6GjEs(rPh{C;mrY%ESrlESKmOm;r1Xek z|AMsa4J!_Rb)^l3$yaQ%uF@vCM4PphHt8kWkQt+dr*?@pS5?~7FVSXorA^}!ZLX}e zXk@4)ue2GtL>na+Dj43};B0`~mfg26@hInqi5O&Jc_RiK>&2AC`GR@* z;4L=j&>D9x(UZ=02`l-?@}3seQ+|$q*jwzt6^PItzSQ)@?^o`*A4|j;IuNb#rFw$v zmx!vIX5;18Hcqyv-F+z;26o0_K?zr7OjxlyRZ#ckh<{cw-Ky~jbGc-X@;@w=uL700 zM1EMun=!NOGqDEl!`NUs=-Hd>a};g)DpU8t>fq57m*Vnj7aH%8LLIkb5ioW$gWM*z zvPZ9Cz#LI{jLq2JS)5g!2cIG&UK6)e3*xk;mQyWfA0lIsdKHTtz>kG6^fa(+>?R-=@UcQ!D7}B$pK(Qh78jrnNFiv zP`Z=^%;J2qU2z$ssd&AxwmgzYRR{Yv>{GFTh*)qYdwCsx@R=oYbqNs79HQ>^CQ(YHGmN>jEvzsOwyAs7tDxTjK_KoVQj3}%2M}X zbV3Nq9+L7zmyC#}U4$Wh3MpG4<^!ZsvHFcGwnvCZXes)LUmn$Yn5X7=O8t;sufq$< zc)6$Z!+z^-K=oC83oqF1P%wTwOIC%<5EyGc2_Gs%qBw}YSeI~<)TTyvB1g6)`|NnH z>DJUpMJB~4*3+a>DQQOxD>2xVU|wZq?WRZ2ZonKYSXhBT)0FyV5xH@_o`!>9;Wgbg zTP_Y3W$DK~Rm$Ys06Xn9@44!MJe z<$MhEdME+-j!evnf_&F>FV_I_O7E@it|7*^3G#^GoxX~$2pF-j*Qr|xmi4gLtGZJj zp^3Y=g}q+E^Ds&8y_+cJ5>?2}(Gy01?(WX-9Neqv+<8@HXbs%*fy02kDFfSri|x6V zo|CKfl!J!)%z}#tBkNWqmkGGx7^YVoAPjLH8w|Tw%xxqoTG+%%#$tm7b3(rfSF`sYZR1ZJ%-5ckFgNSXu|3uC3+t|4hc zNGi;?WHK+1G%I2R_&uUTyyd4Yb9g{nBBW_BhkcaTU?oT~>{7*AFsoY(29cetJSHV2 zmHiM!)G>#~e%PUfhCx+|0VU{5B@XtHF-z+-hs_a|s5Ed2h*!WUpPX)umK%6N)0(Do z#-Gg2(x0$2y~g)q)C-ux6_hC7H3zHc0`uE@`jU8HtWv_Ud=H2R9t-%xLjZzQJh1s% zT0C&;)^2jEVt-j5mC-ar5-acVz>}J?k(-6E3YSWp3Bk<^N_Mm2f7`NuSo|;6Y5Gw7 zFHQdEI5K?7 zsj7jB7l#9hJ}S3rIZA2O?NMwu`zBv(c@q}fxh4v7$ber)d!5H`q=!&`OHpCteAv7_ z`y&S26u*Y@l{_X=yo>LdOtpv^A63K*(C_u`w%(B3Lo>x+U0kz_Agr>pC$u(b7f1yI z+egaO>x+rY;Z?6 zF*Y6nLt$P|q&BZYveSU`Y4%5J!YPSC6meL^l+UE1je=;jVXsGwmOq&hGOH|76NyQF zrjQuJIj!_YpcZx&T`w?NHL}2fe?m+H&)Q-q;R_LgNLl}jfU!o7Cmw`Y23T``*;#r* z2spuSYYyU)lpRbFvZb3vEP-JhLCP*%rD?-sVqz_6|j!Je>=0Gv2^NHs%l!*$XBwe|#+{z_p3 zJa8Zm7TV!{qGkOYQa={ds6-7s{|&68xZ%bFDbnq{x)W)(jC0u4PgXnB@+b|a;F50% z^$~L?DJ4vw9q#<(DG;TW?T)+KNpV_1>jLYnP45hUc`!Q>CU#a6lOEqqG2d**brmF8 zNX!spl>%f$NH%vRi<6|}21$URm6c^26`k$fm8yikS7e`-vZGCpz7eg)4d z)HXH0p(OjHT^;d^fzz5)ZBdkf^5gW~4jnR`TgRl{pbu2&ck(pZ4ERe8U8u(U*jnv_ z%}XDzrVs8%XcFkbeG)WGwcZBavr+EgGn%R`_%nI}iplr>;bZP~UXvdVDqf|2phvsz zK)1ZlPU@0t9ajo_H09wzFFQ^AE0&xu9uOg_cXS-kRTvyFQzeX$l3RtJX<*SLF}$Ug zvZ(Y1$B;sz&hWMDiyG7!)$oisdRg=319n3yvU%x(#duST>n)H6**9$knsRDT7aODB zBp6NrRwl*eG?w@)A_dmsHf$%yvTNlY821q{pL<{`MS$VK8crfZsxzj*;!`nms{*`W zn*v3VUC=PqG%Wie2Ps(LYjb=tW6C+r=Df{@O;M7P#au@0ruNMy(qJSd%MP0D#h3h*C`i_7Wi}Zk9jd_m5lO5aH1{zR9h~TN^q|;A=kBWcjh0Hn2le4she0=Jn>=lm16D&J5JeZU*Oo#W2zIRr2T0DY zdLs$(!WY~ND$({h!Uw@-z8|>3u^5)qY%HG%Dfr4Y`07zxo6qu6!QIvQSAT zYhyV{w^n`%?oajXixd1*L$1$HeaVg7sR$04`A%&at(Xs?R2bwPEE=io^@-vG=?TM- zl&=oSE)|KUn+4Ldr@PcWgsyQj(^Z7Gaz~{NzIh<-uM@m`5>=Ak%&B6M6YK6}E6h`? z=BZKWYqZyZ8PicqGK80{uoOgOM&+}YttS;0mRr6-gshT$z8Y^7^ZxS)r^}G^U zRvE{y{tm?X1Nq+u;xrn5asJ;=K&wSffUEPrS@NuXfuz4LfA$Z_?w#dldCB%)T~ums z{u^fHxy<=wRX>(KW5^KX5`vtGVPERxeFf6ITQ*@pa!zsk#H`7vIyuZ7k>kYBj4 zto6BBnq8a!zFt=4ALgZO{e$^``4&8}nEQ+KFaLTa!RGb(`<|_o#rwYe*S_VyW)8@@ z{6lAX`ssE5Xmu|>E7FaU&Hp3nUVPT5u?rbOjsNuPUKm~dP~O*yu%2OwUX-7>0HLu& z?~OgFI6kDFcRrF02F-!I3wV)3%8%~gSV{7$xAD_7iSx7+gMW~o?1S6KCu5j&62ZsP zv}XTt;*-ANwrSJ0*YY3II}~Nft@ibUY}@pe5Aq$?WxeGcoQB;`J00(qQ9k-dqoU9K z52T{c{WGMZ>+=I5ngJDEm4D??j%}zYaIg38^(rbQBiQ$j%I}-<&*>x2?}buynv)i` zT2=+mB;)=0yFUi&oT`-R;1&s|MP$#ufL>Br6Oc|qQLRwgV_wf(HuMZK%nGGILgA@D zQ2R!;NR~}vP5z4~e5;gf^~1-Ft?t&#ME*CQhEc-Lzvtf2Cx*`JR`t9lv%ds`>UrH- zc3xYPUTbh(w>(vDk>$hAYY9O*uXzb)HK(cM9Ty^2IIB;Dvzp^wXEi5uIIB?(>8w^6 z5HzdH&uTxUy+Y4QXEnMnoz*y1uIh3|qv#;9ndhO`wX=&(Na;SfTKLi3ZG7+X|M?dp z{y+bM!T-zH!rGCziQ@x~N`bq1lapg-7u%!ck@O+C@$G;Xny=j{Yocn--oR?==gMQ4 zN*}UdELsyQ8>yG*-|M+}M>pCji__pL_Z~#F{OY`4mDH}w|K^XKFWC^o#=`XaVv9yIQ=JY)lCA#2youw$m?fFi3 z_SS+s0_=i)i%4ZmbcjaZ9K{LeEB0W zifK$ea@^MF2nIGfs`7d$Ch>#~Prb@w5=Z!rgpax~T$^sSu{Uq0`H`}rLbby1o7L!! z>>C;zJAN4ZrfoK0G4}NuTO-qUrgmd`EWp&~{emD_i{-(-+ei;LM~iwd!XjK)Z@E$P z_LN%AkiDRu9|e?pzCau`*Fthm?jae1-wUyD&FFGcUJRkXx-N!qXt-`y8g)1Bd#@2X zdrizO)lI5C@LqI^9nHwotW!S9+?8YC&sSKn!%I1eR%xw z**D2E5USk=bi6Iyp=s(#&z_#NFhTs^tB3}mO|8QV^8WCI$H?O9*fy&X2Qcd+Rh$BJzp%;ado?BlT}zq zCwnJUf!o%_vov_!aA?rtc&zSakhQ>*0oXT3r}>3wjTjz=vB`QzQ_4jGE1w6PFveMC zZ%bjG`d~70;*0d`85m5VbCM0s%^UAbJR%c$Q9FQtjEjjqXwDAr}zLE)v9vOW#T z{&1ADO16u_w7~+|h(HIAaTZzz1{;%Gw7>xM>IBxvYV+9^4)!SQAnU#LDwb3rTH|fh zbT=jlqPoJ?Ym0)KVq6x9_!oq~__P-SQxrL>h=l97zh9;i){afgH6GdVn_$dJA!$0M zcN71Dbm8{BmLzHQoKw~yd-tgCsL0H(R=|j+Mx!E3p$9}&dDrzk3`0__PyUc=Xoz(I zNR_j#FMoFSA@giULp*ldi4k&}t-N<++1&cA%)i|c8>K0rKvczYiq`Sx~WM8zxg z^@sUnh9%>EtVBb$T7{hzfQtp@W=ZJAR#=1eu(AZ&`WR!N@W*O#Ls!Ll7P`sa$MVNg zTUkX`TlN1028%dUlaTXT#{#)GL-Tbs?Sgt;POP+ID|^oPCsNh=-50(r`M@e#d@SDUb@qI5DI9jELJO5IH8S zbcE5pO7>bA#imgWjQKs3irf_6QcVxaMzJD~H{GCF+D4JR)G8(N*Vd+d^3LT?nt;^Q zQCd6zU_KRN4UG1hUT3@(#c|4f7@h!iDMo`61O_m9ZpBzmanTBB8Ego)lzAKr6`~*& zXv7r*%e#HNAAm_7a33$`xx#welHJ7@zIgZ0kOPsw%8T99p)Gj`unsJz)^g(k;v8%T z`N)b5KpG!x*IH=IUy$Oxj+n4&`>4B+M{;y3#e7|Dn>&zGNtetk=`6a9*XI5HBk}@l z#aJ$|N!A`zKXTV3FmH*%jma81(bu{` z^L3;Anv4=l@q@k5ynlfq*XrpdD$?_Se-+E&y7(%-m4ZAtAF5UBm2gh7$^2|S!d-2r zYG3g6H9PR?;yBbpElTvPfs|TB6PUL*ftc{*>yB*OmQQTgB?>a5C|{4h!U~sOTQU~~iX!xQZ2OV^cJ@&_W68BYI zO7o6_$ml@es(Qr4tC)}K3o)!~l6vijSgZuJwzlUJc7b)pL%01980=#AS8LA02Q{Go zCJ6h{c?-&GtUWB@SQQ zKxxC5xGB0QE^(9HsrUUoE^*ugb%`6NC-~;jC2pK-Mf^Cp#P!$F;2-f4XWehLu~)dn zh2g^`?%OZ;OWc1*+GL2*buny)bp#;e#L&7Jwjx;xAPlxGoR zAnqVO^>qSrHm@#*#eiM;`F)p3x*94I$k9-8Zn*yjW89${`VW>_E6m~`z>&$YM86&X zpN&`?T=PFr`h4&sD1D~$x9}oc@S|m=iqcXV6r!^1fvbP0rSwl;h?>7Y`JbctLx%xM zpI;q&M2liK5v9#RtDkHh{PX)-lz$?U(_fu4TtaH32>wJ8c;nzS7Xa5JDsxFHud|UY z@pQw?Mce_zqIHq*`-;p_R3eUWp5BO4FLA-W^ySgdz>L3yJOTMU)T`^>hKWFawYW&B z3I}5%t8BrSiiX_Hvp?p1*@62lWd3q8c{hXNEky%KRbz==E^zOpZ0Z(TyiQ)Z z|I1qDYsV>2-@&MfdB??s;m9V3PVgt`_s6()=8J!@FN5prh`4VHn0)-rHYwm6VO08( zg0-do*ocOOt+bY(T%5h7t#d5#>@+CSgFXA|{LM?aVvOcr|2jsR|F&-A`D(8pmQlJ| zdmVPdnqCM=#0>jE3YR$`|CrjZsu*aby?Tz?>z`Hoq1x*c^q>EJl>gqRaSFkV6f86o z`D4HR^I6`@&wrE`GOO_-Q+D`yGXKlp;rsRZC-lfi1qK+iUKJ3O)&G9v=xn=MQ zy1m@i<|KycqG3gX_=|q7Ti+N>Y1hWtTjlg*3yQYQ!U_&?!{AbC?{(WiEysIv_SKCR zWgN;|>u;XFP=i!+y3qp0lNW)3w|8uE|1o=nlPQ*-jkd?*>M9k7WyR-;^*!dd9nfwU z;akd6(`i!XHkAWnN!jY6FX?M^kP7cQxU*kz>g1~QrBY#{d-QUmhEPS#xv5QznBcb7 z!yX9L@Gh2C>P@uzX{$$+gVQm3lg9JuP2?^V^d1ukkGp!c?}vF zUZ7!+o4ks;VbUpsm)(_BbSOo5yKYQovs&Y|iREV|om%ttwUzZPK9VX$KDRlw zA#*BZ!mTN?iVKMTi6#esp=QIsz35tr;>84 zpfE9I$9BKqzhNf%NyH-4m~g9K;-s;QzrL5hiECPdE~SC0!&EGZ>sA_TOr;V-&giS{ zxjqVlQk_D3>ytE%IZ7R3)w-=6s&}j()VbWIm5ChsRpeWba-3*}uwL*j9}A_BvTnLU zNa24d)S9MlNp@06frm{aLl}eWmmmf;g1(DJP)TTnYR|Hh3f_;SlzBbRq$p(;q6d3~ z1ZpcqT!ZwEE35P-Xm>o6sM~mmcTkA*B?Ikq|Ld`QGYFo)h4^@^n`nGKGs7Jyzeon%|oY5rTqq8a1J6U(~(Sdr0E)J-vZ1io} z=uzV=SMS*ODl=TDcc!|Ry?3DAp^IS~ecSEszC9{7dh|L{@1TQn8CDV5-WDYRp`yV) zKRt^6U0V}bI3_Ywx|dNZ4fh{;?c=iD$6Vpel|V%m%ycg?cvMMOb%aWS{*Dwwz`e}6 z-_BOB?gzz?WSdek%`FEjZK@rZG0-G ztDmB!$iO0)>MV@8qbtc2)P@~7dj~U<_NZwxCieN&x&%|4POvK@4*yKMf8?jVP+ulEnOt;PUj%%4EN? zngdWQQ+j+R=sIkksNY6NdyRr{aI%*p%CcA*)T&q3^FE0yIgwaY2}!);Hyzx_+IFYc zo@UZo;GdHyysB3zg+y~?T9y3dWwh6_GDw_tGJ&AQKv5hx+Ujoeha=-1O7$W69pn4; z-s(D9!9%)ya-p94viy#M?W$fJ@Mvb7hjF{8?VL_9>)5y)HBbX6R~BaW znd*!-?S7_%N8%%Y!e`0dl-n>M;v8)gB~6qtK!-muk?ejmu2uA>S{7}WokZ;fUujls zPgpUYDXtqMqHkHqa2>K+!G|s{=xU+m*FLQ8ZYN(xP=!O@cs*)1n=wWVBL^{(W+ilY zK!Lgy-_mR5S8gBgBoq2Cjq5eW#>9F%X${g28sg3Vg#Ken7ep&#f*yTeF< zs5&2pGf?Uc{z6Ql^PO}q%-Zctstye3(~>3FZv5p!8XE3K+eFK9hBh@?_t7yp9MGQM z_gVj9*1!l4ZsiT-f+|K4aR@qAK|IBPG$ei+?;{{W~>;X-1@Q54ga1*%!dV(Ey#gbs3N0~>si<>9@mQb9qucSALCcrNlpa3$V{DEp-~$`Y95PUv}-d% z81H)0kzMJ|v9u5)elj58jm_yqCzn7K-qBn8_8|Wi6AvI=KjV*@)8^85cmRjjP(Ysn7VZhP0JFlsHlkB)yu=QEVJjyDP~HFu0V6%9*`R<3kt>jf_EJ9d?z0*fjE*3$c_1koGk}R*vqNt%qM2d z+WTBkpU17w1^yP?`Wf{qFLdyC!f?8%-r?5fvvxe`r8Z9mtR`xteDgM_uAinw7p{k= zC8}3t!8E_ZID9^NI~9Zr9G?;-$|}K0RnG3L;7}X-CJ_b8b0#re;CX9E`UxomX|O3? z3QhA24Wqadjn6{}eZ7bIV+|m?1%#7ohzs_7KP|}TIF9lRRL+Hmj|EQUxKx^aUFeN9 zG!+32SEVcG>>BJuYDxEQC#=4hlX)fUx?&gUuwn;l3^iyYb2CZ;=!RBu#x)P((3Tuc z-qt4|Z&-?|ivMWb!*#JqP1w9=A)5t#?yu-0ySr{(RcF|@Funpi)b6Y2d+b@fZ1esn z@mO{D+F5@TQAi7Iyl;;^Y8yv{23CX|Febh!$we!+S3Rn(9~x->wz79ZQmXmZQy#h2 zZ4ch6&A7vK`kK!d;tFNX>EXln@Em_to;bSw@m)cjpthSKhAbm>lv-d#$VfKJt78XU zLzklmr#oFxmE$#Haz#gaEp|`|5J|4GFK8i!L52%ta#U~W@d$HV;9b6(49Rdjdv1Xu zZjP0i1|W1C9lLF3OM)#pj>Hql3C0uhA8CTfuY!Q#j!OsxYqNUw#ofbOOM%TQ*nni{ z4nv*NP*Ax#KaE!EV7U`dHsCzYa};4ZAYc=Whc}}MOKk~|B=Axf3O2AMg{_i|TT7}- zLqWY~1kiZ{g-qOmB9{cp0BG_^^XPd&F+9PeM&WEfUh`v|E$lh#ilozFR>zyxRTYL~ zzS*3Wl#I)G1~$gJVwGIfbnZohPEcl@FYmqV5OWUrrDd2ofnA%SJy}PU+WjanacbgldJM&CiCc>PSANp_^z^#jIK3DyW zMQva8^KAJ+Kyu`*W`ew|Vkh&$?Zu^;;lCQPi`q!CO)G9j%_ObYV5v6#v9e1(GyX#L zGvgnrex5Hs=y*e)CWAAYH|cim;&+~FLB9Hag#NpwD6^j)-~|^e{I&l;cgh>EJ2%a! z#r|PG6NZAs*|bYcBYP*;D-xqu@?Wf0V|Fv>{+Zx7H%Swy zI(EnJj{B`u2Ygst<`}Nq%?;5n#5Y8fe1o~OneQPxMikvZx_q0Nq`7esI6rY`G_y)u z*M6YGc2{QSSgq#c2XTi3w0+#LrHCjWDQ>e~2~JBc&qSM1P4Z^bm~k_gp1Y(GE!Rl~ z{0EZ=kQLL-n~o8HB)t&SE}Rz22aRj_AW<-Nx4ai@-4F+tE!=52OP&GNV&zWpzL}rq zZ?STHWHOs4nM+pd+-Bv@g_S!CdW5DkadqvM{$6YMch$8cHAz@Io0jGHy2RS;8G?`I z#oEmz-{IOZr=hizfIPHz$Kg2t;I#uX+n#629mi+3=c(#vw&#iJ=f(1aVg3^93!`JP ziGxrTOd$7dsihGYMR4Qt+muwuU%vwHh3qA}laP!&VALB{6aqaU>^1S^V2R0aW=nf2 zxqU6Q64j2R)clp?$Zki(BzGKpXHQzC4;X1PpQT|zP$#Up9_>8d9erF*D1jv$y7s)`5&$qTGmQei~e`cC~dqY`a-Q*Z1`gK~{8-~lA1D9Fg*k_M-l z4;nQsT4&G-9E2gXdl*8j(1-YU#-4?ud7-L;X4J)eV`3YmTHvE-Yc-SlSz@GC&S0l$-?v42KxtfQ$+Q{8U`Dpfh;zWE&mCC(~_Q|+M)6)wxj~)9uzdOlWdD*v+7okV%-2|MsBBtkkbr@1G@PH z@Y_G=XapeGaly6=9z0?iMdMBA-&RX(W_*Pn@=_C0m1>T{kOn@${4H;Fr#mb7&H3|q-miXU!!DJ3lu4c|KM0(XA+t~-DTI`}jgays zMy*VVqts8L3lP4|2($2NUG^YJid7;*$-OgU<= zgT$nQPxM*yyw4q~Sl{Mta^re6hDX=@iZbsDf=3#s(qcFB-DD9pV!4~=WFhb=Dw7{% zV2%y`gZ2%xnW>r3K+;nV=^DObdNWHWkk>NHExQ%@Q2R<3Gjx)O7bFX##D)qF!+nmB zz1{F-r<0~a3Du_2TOk$@i&>Z(7zZBU`gf2fdxi93{)O*wXP8fdc9XgZasYr9HVt2i zW6d7vRTqp=EHX6 z8PRt$(yxh{=bO`(+s^1AU*h)(yAE`Q)^16cupV@bC=aO^*S$bn#}MJEEttG=8pG!w z=}jD@Z|jCN?pQ1bPmu$ExbL>}nptgKS}~n`FWT!)fRK;-ctn8MGOz$#EEpg8j5F*I z@2^eg_?yq$Qx_A_5z=#uh={2%h!3+JkZYF{(}|H@>B42^A695_1Y%-JHJeg*d>2U1 zcD=@!L*U7>&_GS1x9Vm(rFooU=SFq`~o{?O6Nwjq;fgR z_Q^-gE;FONxSC6LS3450Urju+3mW&OyFhG&@;}>N)D>OExu#BzzAi5_5`Oua)q^xK zX+dgpqXS1Uo5KKzS!^}KKxhZ!*HHn)&0AdxvYBg0!A&T`(##JtTq}D!DQ!(M4jo5% z5a@lo`D3DCyAa&%^l+C*+4+ssWhFhQy7r_|oM;c_^9*3e8uVgQ4qRq_)KL6{fH*=M z4rKewqb>qb4cV`jH-oAa&HW3s5_B(0Pi#1rUft z^HU6^L#7<;%+O#0oIU*9gR!lp)bjC(byBfKp4vdS=NOF=)IMLW^m6e(7 zF&O5=E?J<>{+pn>fT{Cn(U17ZlN9Pji#}G+m|kgijAjKMC`&aNkxwX2BL1uKHswV| zJQ1AVgO6)gi#DI*_O}G>1Otd?$a~RlQ3A?Y`Qliw(eGuHECfyeE7~QPfq`cNPtwf} z2HD`u@;(~f>=E0Yw4b)*-v%;-*vdZ8Hrt2NV{54C3_fpG!R$9j%o556iw4vrAtv-s z*#+5Fk%;(0yjNeL-h-f_OW1# zWi%P(;Yfg_7C5F%JenPsk)LFeAjuFFZH#a!FIkxGE}nfVyo2Wb9j^?i0|?M_E}_{| z>qh=`ebRF6)FRrcHX18d%Cu9QL(=jh)xbG)#F%yp*}T+FNgE`=Pd>YJ%E0-N>gW0L zgXEZ4wmD{vG`)Hnf{<2ZEd7sv!_}N;V0W6lFnf<~b- zP3HJG3L!YF2xDCH+!*Z}mxL3lcG$XHU<Ia0^|xCxeb}G)$brNVs}b{W68%&x zYW{b8*c5^Jxg@{fKa{=)x^aD1Ys&Pj{fT8Xuvqa9D6y3eYI!Y{WE=E< zhqdI`y!m^WzatV-sn`Ka6M}ehQLp3H_DSBN0&F7;BlK#O=S>B8Onpe3*Uq1`zsFn! z2<4tk2BH6}3>s557W8$fq%S>Qd%Qd0YQVrmhUiRFfm!g~GG;-Ufm4Z{TB>)Xvsug_ z&a($p#e3gZ#e7l%}mCE=XmoW&7mOG<+e&e>*=2^p!?rfq;%->f%8A^~!Mj^y=MG zf~1`vLT?7p(v?Cj=yVKyxMz3D!!M}*Wb(+H&R&nudeW5!)e~&;M-%E}(7&Bz^K#ap zzXf&O9Sa=w0uE{F5HHPRxYO zU|*s8V|Vbj{W4YJDGBtah~;;H&Xs$O&oJXN)z7EO4^H~Zt>%~;=R|$Cos{@2lxnK5 zL(I)tdDILmkh5rJFK(GQG_U~Z#gKr?IvWM{wmR|@U}}7?U_h5VRp!_tKjQ*ATbd}A z-CK2?YXr?j;yi~^ zhg2-EimFm;mC@(M=v1k%lT-kdTU|P3H6eU(z{n^;+^x{}sbO|_=E3sU9-#%dKMn;` zq@35LU1Cg-&0hx_)Q)yrkJPtt#7*w%)w`Dk)tHHRh#6=HDr7@e#lXy5hZGq+`5s!C zJ@&GLgtUR$y%lYV!`!V?hYGaIO?*z?mfkD-%tn~gUoL|@r`OrhZ2#(BI=EbLrSyKn z^K5;M36+Y!^K|yBwom6PiZFY?r75T|5F;<6B2xTi3QbCZM>I`s(=m-KQ^!EndBf-J zbZhD^mdzANHW6;fwy!wAFM62m2dtq%*%J^PEg{H{85Y7O31=JIC|v-iAOkG&76N5M zw6+R+JW(#Tt9XW%`x(vVgmwFj{q@T~i*xFt#kn>;Au50ZrYS(%*lMKXQ$4G0P*?Hy z>~J?rD|B;i6j$yo&0Sm4nGx;Se79+Q*aJKp7|#sXZ7o^}&um5Ov&z^bYUnYx;WAp3 zwyDxnl@|Bu#N|F~rS(I?YhruofDWHS*8c(f zTTWtrXc8Q30^eE5p|M`8-V2ebm_ zOi$K}aYI1u=G%qEKrGUat~;R?l$`IQ7SC0Fqe6zyGsC}(IoI6QQGd3=Us5G^dnvx~`=gC{_0;7tYZqpYC(>4{b5drQ>r&0!mC$x%pW~Nn z(}#NvlVla588lR=ZZkOtm18#)N^X7~)}PJ0)gu_WRIfn=MaYF5IBJ$jpqBU1L!@g7 ziODEAre%dR)HLVxUaLTG>!jE4`;NazuNSN(c))>*&~<2~Z`9Bg2tGgI=rD*dtXYaY zrmQfIblVR{9HjG7I1%!4ROwrA&Mb`OMV8_DUcZr4b z#~oH%IOxB?%w6=NXniZ1QhM$(-FvFD)dLt<6?a*ZfM%?9MTdft_dhCv$~*_>h@c{n z9Tr!A9E+i%;eHBsV-e{EO>j5gO87Dw|exMAA3KR38XQ`1Sg-XK(x6&Q+?xL}r5;alm1 zrL)9e02z>2_EhH#feirHo4Cu+G|?R^&_se*)_sK)xre@p!s;o~Uw%WJWIAUF+~2JN zmnnnEXTKPz7q9}zD7%%mFdD(Xlqe_7VNP*SBxL(qitgUh6Acnzh+#=V=Gu02`Q$Q~ zM$o|DL7Uu&p&1m;wtft$xj6257;_SWD?<^1$OI3 zUA#c3Kq|SKk#Fja88;mraMMwR^bzGqy;_lvvX9Xl?n2uUPZE%4j?{*nSJ2%t*1+?` zG;xC$dSLH5!96%jfi0C0mFB=pe|fqPCXM@u2lsiV0{eJB^Tuj&A1^4u?&Srsn$@_A zcfKz7@Z2Ftsz}`eDFp9pLV`+*o?!%@;5iGx$RXzoMP7^#wu=ZjovD`iMC`nx=dVjU zDMrA|V4F}@vHawv%HFdYT|lfTN+$c zxa<$3kQMs5p53BlydL7q=2oLlXP0p=EutfF+>@qACRS8hmlQTAC;07_KTU`eK7F3PPbX295#W3XUUPeW7Tval&5_kJ9}r4I>3 zwXpEUI^!dlVVfTPY@>8rIyxjLZj4mB!6J%byU>M}N%9~J11Idu zZ|5gTirj1X=CH~|7R9CWE|@&_U?jI5Wq@OAyufB-sf*&H-U;dHIT!6XzG=dhmqxAI*>q|Q!u9kt z)6(*2QF%ie!CRq~M77tIgs>}(q!BNZ70;`XHQg4!1Om?r`j#L?&vIWxqH^{Yjffu> zFp(gS(E-+gv02+!xU<<;dcBQlz($Hk9AIlNM-M^blV}Y(<`IPqDp7m4HX*CN6~EM2 z%F-wF!Yw4NSlyBv^DlhS%Olyxm*$85Q|03o`P2WoQsiV+{%H7kb(#4IssTPa&i+i< zW-Pr^sKj(t{+TyX)#x2p`%qWq?|Mt6vlr!Wob!q)HuC!X^PyIpeSAs&u0Qj}WiiL6 zDZi3mpTCzECFHbbFUvpq)KD|_$p=Wd*UaC-OLl5qk!bMB{QucU-OYCXC%k08P}b>O zo$q zle^m;xfNRf$NBXcJ z!t4}dxDUGEHxIP6=xa&FDV)DEdjs$ARLT(lyiSTVUHyd6yLIP46z z4lzksR3`%Fq<{dv1rFC|^xkK@7`{F>fDg|GqG1iDMgB^t!hfTL?Uca=V<8L;fGB3X z%$!l6&;l_Pf>|SRRM;}gMZ^*9{-x`yp2bVd`+S)aLksLjBlcLmcP7M{^X?1j3d{8bPb=Z20OgwcLIcRu54W&TH z4_Iqez)^M`t_XZ*^>3)`6~F06Vt$}N7S}>#snC}sQLlwVV?Fo(Q};g5cGczG=h=Io zfA^l7n+-`I$&IqlX|y+OkocBGQl!(nGXY`1N@sYx=qzR})(mTzNth1M48wXSm5T(3 z8f&cB1{(FMQBs9At+b7mrB|gI-+57kZ_xzs!&+~&%rET%l8pM7zk*V$d*0z%6*j+Xn2cLO$%1LH8}nty@! zOTABh|2KWV)Q9VS0+sF-#Tk6NQ^GQpqohk=eRQMMJDIR(OJ`x37%eqqcY z>>3+eD%ze`Yqa{9KGk=)i%LyjT6zb

        VY({Aa*+hfI+N2*k zG4A`wMn>CvLfc$Eux$*_pj_|K$(PY69b#(8+}%gb1EfZj5Vx&$!S3wKDAJ_VC4aI{ z)Jd_%a;j0v3iK@DZBwe>UXSy@v|i~In=2|Mzcr>rKKM3-9uI$)J~j@FZLotfltUL& zqneb<-kR11n(?L~14aLhSZImH8U`2rIys5MSXvs3G&rOhvz){nkW%eB)IlD?Yr4)+ z4G>8({9;^!)uMMe+N1pTvY13wDw4MX<7DnRcuyU0cVV;o8$bf|G-H6&AnWtUGR9=v zi^Fs(R3z4{*@D%V+htg}es~fdQO*bOj2)tE|4PWtREUzu)v})+2aAoEFaUv6OV2@S z76{EX9}69YZ**lG7#az7qq#t8x8}!GfD|vg5~QpNl8GHUE|0JZJmH9~sev{|ER%Y* z|3k@M)~=R`u##hN*&l}v_MJ!Wgh5EB1g3P{ZUUxxukpePjTC)v8#VA_b&|G7QSa`#o)XWmTCw?nse zKU*jqun4A~U{oa^P?I7am5FgT?N8*yy zfr(XqY4B;%Wys4!6~BtAc*(wjTF`S1pwAP364*jjp_CGQhqq;DvgS?V^}&iZ_Vfmt zoK#(evQ*oIXlrk9*%_!!xLhnR(JSUp zm-EZWn2FJKPuaOc(8ljE2RACU9NzKSq}vs{&OF6gBRz6kw@l6=|^4!4i zS^sglGq%s!7ZG~-B!yaqGSph4%r$F8(I(`)R;^lB>IQ&iT^R-nRaputr`5_NCEOQ2 zI>Pf6uI1UDr!uq-?*CU9#Hk>#QgQADIA9DfKFr*VTzN?+%|`dVg+rRb*)6N|K#SJ- zLOREUrT9)toX2iZaCoBIK9h9KO6bop&Xf7`5>>z9mc_4=kq1rpjff&mg)OM8n&;28a0U6C?}AU8ji0~RYVd|O?I2FywmPQ zYJ`uPpb$Dc5J5(AwqI!lBKE#H$~~fR7sHC#ldc(P83yUV@}BfC06b2A8T7d`=@Bs? zM#J|K2dxr4*+vIwQ8rV){?C-`NY4D5`5DHU$ZDhqrZui80)-Ov<0jtN@=P9FCCGR* z{n6XXC7Yx_d|UV;Te$L$G&}rpPX2q(ZQb7rTCOG3T4sVZFu{#Viv{tM5i@$mQdaT| zhSHRQHa2>kVF@rovXIfjcoC5x4 z_ZPlMb;m*shij=8*vvp1JBR7-SKYzmAGAud3q) zh7p;n<9df7=JqSA=vY7`l^7fp9e29pj{?g?`XSh?yUwqOT<#55$$V(*9%$k<*b z5dTG#djrEunEx}xZ)}Ka6HztJLDx=V+H6-~r@u)yS{R8bc z{Yt-9Vq^BKov@QdV?gdW@-J_oUv>Vs)G~5OsBi4lu+C^4_A!D3@A*Qi#f>eQHAm12 zP?Q0QcD{U@O4TN=BsZE0ktdA)6E{rYic zNA21s7TS&;e(u5En|sl7lMN4s>uRf%iG#}iHQ*E+m;%Nn30N8k2t1Tj0Fj%mi7$vW zxDZJ~9rmBF8wfVo&e$$&JCr?2@G&h$_s5cW=qeMbOqwHt3u+%^iYyX3Oh(A|2%fLl zHb&broQ`qEk)Ie~y=(Tm!jrl}_q5F;f;KHkUbtlVWgz>%WR&ETAh&U1e1KAJ@0+c! z*B_F{F%QP^Tw;!78*OY;rRs?4mcO}VdSrPEx?>g^#DAka9E-v%5$3LbknzQ*2cOkm zG=_c$m5mU$!3)6OIFNlrIY{Gl+}zr=SCp)dDUb<`nGTK8`Z@n)&|3a# z%-r6n)-wJ7VqZUi#k+sqdP|5+m95pV7@{dI>B@^LE`Q4%wmM*tA3X>XFlApt#ihtR zYoX9w>K;RUKH7ii%FEFGuZZ?iZ3`8eYt@(60o5ni9$*b6 z7yf}Lfq^Osp#2etll4Ou;LtOWT^{t@VqH< zmD6H7k1#t#3#V}2%o+dml(gV=Y~ScC~b_(ClV4l#|oX16N`pot?mP1#*Hr{cSi_P z5yPs%^9N@Ez}n*@)8b_(eiA*)zf^BFht3%E`Xn-vLp?tVg+oOoMOFGr@A#|mVE{B9 zSB!jtQe4;LrnAyjPyReoabxUY0`Ra)YJI#`1?8b$C%q6@9i5=2()wYf_S22`(h33I z8U(&_TO=&u@zy5{^XuZB-vZ<3?yRUk_L&FFkTO^>g4fY4UXI7Fbyexph&Zv_ODez- zU$3Y>8X&R9?-iO1=_&;{cPTLrTOL?QPKsOoMpb{>Q*0K^nu64VwK$N~-&t!gnTcil z#%U`=EBkY6@@PcU_u}59jU9oA#2F^*(48!$7}5`ftR;H|y)n-f!mxfnM#?i_OA#iO zF}bhr?M2@Lm&<`5%r0`&{~>_=%WXm_b+V7&-yxF%5J&Fg(w?ezr3D#dZAi~0WtWwA z=n(jMFVW5FTKko%@0-2Sk=m_51%t_V7ol0-?LvqhdXUY0;EicEWV*ZbPvP_-GA4gW ziU7^c_$LgMkv)B--al^r#AzqlBlpgGtvg6SaurjD=n13LIAj=7;#U|Ud4Q5~gxQf_ zsKRwM4;qV{M?E8&O`YFoo&coR{#P2sxB5^NIFg8?Is`7%vn5ID1Rj4Mttrx(QGy?i zkxYZ3wIWBxtIzh_lk|9YZ}r)|I>EzAc-K{yg`j!vWN-VgNB#?1=v}q6U&EC63=iA* zJIELMq3#=69%m9@EA*$UBE-lk@|NwIy3Hn3k!!q8%(XPzpc&=38J3DH@MuGK*S-~@ z2rOc3yK0ZKc!0nwQAG0d7$fKBQS-{tGV_0GDYzlY1p>2s#$H5~%)I*?l}9MYa?UXO zfcB-ZgH`PjJJchr4z3Ow4~k4$y{q5CIgpN7SN4En+_$vmBsBsO)Yv8z5N2V1Zd$~W z(?ZTWiKZ-_s~WwR_B1ELo6zcGWvfqY138iuF{gngRrV7TbB5?gleUpkv4fk3Mn?nI zk;Mx8;6^SC@}1z0I19QWHY4;<30fsVN?b?5Z}gZBeEB*k9ax7wNgc#c4VmLk#$2gk z75;Hm7xnT>*F_9FnnMW{IHYyz1kS`UC z`u9gNpxOGr(I7gon|ZiFv)`}Ecx^=7Eiqbsy_i+a-jlCIt$h$N`zK125=S^Zm~BRS z60L>#Z?$JkZaoc{R;e|ph@=N^0^dLt^7gs*z!V}7vnU@iSP zMm%jU?LJSwNp3F9$B1V}&m&8Wlw)cS8k%B%}$$qIudS2YDZhhG{{qoUjO{%v>_9^{ZC5`M4 zYAJ1?BS5f*Y%qq4xjW||BCq}f=b_LSfVqGM2Hl&kr#uJ zI&Avoe}af$@dlchwi5NiPf)RF8k;2`laMqH1?qNBHKQRp4=-vx!b#|rZ1f^5wqNnR zNKw8?KF+Jttme7}QfiyDJUUQoIqtK`i0aJ;e?wiLjkfLQxkL-#V|(=wOqg~s+>+R^ z+p&Sfw(TK-Bo5nlT=-N*9=D-(BW;q`Y)CdTD0pv(UiXP&fA-(n9*wJ)_UVI)A$VFO zG$c)9dfm!zn~j?@2n*0htUlQtur;feP*OZ8OCDr4+GEHrhLXC)zp!AL1~~%yqPK1s z(h6%|U}vzbcs_w&>_FI4S=OItAbfQ7KzP`PR@y+c8?!j%U~Adk8Hvc8k3^nR>yG3F zFw~65*E^KfAWY;5rtk|+9KbEp9QA~j4I7QYLI)yi(zMZP$r=m8CJoXIfW^w1D=A=M z=QiSp3!|VScaF%tWPLlmdMuo)4021gF1!z4o3B4BUWgqqmsKydy_tMa$eTm{Z$_?I zko;;^oQj-A%O<43?uBFXV5fCu78n>z6P8%&GLuBi25a;2UTot}Sj2snTl$AAE|Ux- zCZ>Pnm}=UPD1BZK;s7=&&Jj_d68zu+O;u6>B%W*`?=s8R8#K!vAIA(epnl>~TKE4f zR4^`#yN88Pfxh+Vs!7~5u^~_by-g8gRP&IzME z%$8+{IHW-;g*C_qgmhpveh%i*5^P1ZBR=qfUbm>KhkKLE8V~FjYocs$NPiYAA$%5^ z8^${#+RRAk9O4}zT+qFah;UPK9~O?XRtk2>-`)@dONEj(jiVZz9F;_}Y2oXDDSAgp zlgsSDX78%~BSJfPywe{m@#S!J`q8_6yY^l8Y1fd`pF+BD1s2n#h%pR#Hq~4(?w<8p zQ<+DFajA$7byi)~tK9oRPm$T^wK_hm9bwdWgm|y*3B#hfSMLdNWM(4gzQ0#BJ{qQl z%O?b5OPKvQK<9ddg)!6{LG*wU4FfUT8#eUF^vVPaphyQbXyuSanv6naHn?DA?52@? zuoBy#2tA^msQi%N#u*%eD~Mz#5RT7La%?pG2Bf#;B)x{A=woA} zhQ3{>kl{b+gjrS5sRgG4|nGG#~uuSRpApw$u`VJdm=3)Sdmb!fRO6M?f#FkTt6oY8S}QRR9lw&H-Nl+#vm!!~M}N4H&>B zO$y*6)nQA?W8w-HZ4RN?-v<-|w^p*wx9M*tn!*?Jnq7k_6hOOxnvdmwfJ!KUnm~rI zkL-Z>g?)qW0qT+bqY9|QC)Wb?4h?Lx`CBqK6*y}ldb37f6+f}2L!0Ctq4%bKCB-I0!E1DX`xX>aTW5#vtd*g|GvuNH;niK7XLnBL`Q)lw)oc%6d7oGpi|z_ zDpp)`^=s@y8v9_>2y57fI&4ENqeiNuL%3%B)44zZ@~Dwe7y_UfdJzKPn4r~>V*)(7 zhF*O{+P5yuOV{bPNQlzX;92HqhFO7qABBN5d2lJN1_ZCA22{FIeoPH$r}Dr?|5U>j z?F1+ya5$4BO}AMRb%;0%cMOpRh72bCNYr;1a@X_rExu5)jreJ$4K+=+M1x_85mJmo z;1xkzNBI!%g`M|s39K4q0rpsl`pX#seqNM9xBrH?-&8n$f+WBhtg;p)G?2H>XU%k3 z!{WDdMW~}a0VlzT- zw`;=JmSIF4b_Aara^QO_ce%8;M+~=EA2HohSLBmo&De>0nuhIk9}=r^fm55+jOK;j zZuhB?bM@wXbQ&MVTd}IK#5w4+d0NH*(sD_N&SHzH35Ez=pa}=nDo%b3y`=iCN1Es< z(~5vw_tj=?~k>!`mMkfwcPOQ`wiPclC1Vp)&Os@&)#Z& z0{!N3waExG*wW*?YU)#14*WHx?=tc7SvQHhTH4^pI1Cpt*%?B{>Bm80MGnYP9NFCC zeb?jdR$I2TeSfr*varM8r)G(>6n5}_2eAS9#8_;ACWaAfT0(5VzeaRsW|@L|KQ=5g zgk_Y%F=c*_5_oCNpUFvKnj@_OnS?@ZyyQ!fiCi|P#ys*MJNv13rrFfI#})i#J9o>R zA^DWF5A@ui$&hq4e2)PhnHVz|Ir-~KeXpUTuidNv>rGf8ES8nHV(01?vN$ZKaS(QzDyo+Z%&@y3&(>FZ7(E& z*=SmTBMag^G2>2zIFG4scgMH^nEFPFk3Wf-RMBcdO$rm_*gu#0146^jENz3B|6yrW zYuQ9lg&U2frvg(=z*V?MUPAl;Gq9LAD!2v{&VcFa0TI`9d{S+o9{Mi2L^V%xr^qnK z6IWk!)Fz-d;~3$hJG0z zWg!HTkD^@!X;Df&0O_UxmB@W|5wl0JCNIbaBDx4f<>?X6=ri;%vjJtNm>v~Bq}h|s z@en!83Z!N7YS|kCQmzB&FlBhyYgC$Cw2SLbF7r4HHO$XF9d<%!M`0Uy7*wd?N?rqs z=l~w9g*MYKRzp$#NIZadg5QiK{rXTd8V;F8awQ(ipH)?%VokPBkzYv}>`zS{wkgv9 zsdSNv2NXiFCDVl@tknteqAv7BZs&>G&%~1A?8UpRO1USlsdlUPn7Eqk-($G8$Mdp=4j1Cu$cr8gLVNUR~;z zpo-+j1g$k1)U5?s+k(;Hiyi}fYHlyM_89o+>M`)JkNGQCy1?e}xSen690A%1o!c=jYn+oi+vD|_afME}f zqcI(rhM^7Hh5;)K5JZL%4@D5J9tPSrUWkK2aaQ5bQ4xuLfS!33O;|p-^>;n;NGz?mp|Mfm0>!aYRIyV&4KIVuR?DKEO;1d~JoRAjM#XKg3*D*2bhs<< zvJDfyonP~`Aru5ZyQi}HYtpYWx({aU?8~V@z#;I~H$?^eA{WrLSNB_PtVII3GvR2d zy$lcPDv?DEsq=90S0~q44xunH2N+5Z*8XO%{@;5APu# z-UIw-&B#BJ84kn2lkgLG1ZWgbG4nxHDX$k+jwfXip4}O>?}a{w2?V zwp!2Q#^Cxb1)Agqk2{8IUTr8#(j;L$j8jNVpEYz#Q3*r4KmBx!C!4rR=z!L4zXr}} z@wncKM>M)$Q_PxgYNn8^YhyKGX}lNYM>bcqkP3{k*XesvHwFg)+LEMuk=HDPrbr7r zBVQpMi|q3ezH|YX#cTVZAsu-)oeU|mncjyj73Nw}Nf6aoQHpdMi?H9%q%=-1Tu8;Zk5SP<-d1o?9bk#k{Tu-Jrip=%!1$hUDXWW|Z zF6N2nvxu{CI_0ig%pLH#CpkqO*(pk#QHi)xg7v5sE}EUKJohfnr%3nY)d8oO-WXGe zoUcUF)G@eXoyl+B+RJ{}KF)?n4jQHZ8}rK?V)87e8F*Ev8OBSCCx4wo z*1!#9cN=5_h*%~nD*(&Q$iL|c8+-M91@4ifWv%=5E^H#pn%E`GVgMjl`t@SM@kBJ{ zo@PPncKSETm@&W-RC{qAB)enufi0g61&T>Xc`IBFd;{Ze>*5*_(t>Y$$4OQRVV~?euxqKN?9*2{Q~JE?ABcpv z8ySdr@~>=E`Ckl1m2ZSLLi0ol+3wQIarbZI++P%ZwOq?!w8A!K;lK6tuh9^hhikvV zYKJ||QxMCeMJi@CJ5nhlbmKKI-qFtt)s>$aS~n$Cx*BH$8mlfH7}_ci49#1VqpU?8 zqYrE;+yXXphlO;rmCz?0Z$hf+EY8@~D{?jDA@7n76IDz&L=rAaUZCPJTlb(X%zNg2 z^ut6wV=O(>!W?tzmwmOu%$VP-drL9#Sdwlm{5nT@9(EfP`Pl>OBYc_OM8w#A$M*s+OGkG=tL zwf;tQVlk)Lst3J|?6RLVN8znwTdb?va5wihra%^Jp|pHsUIBS9hHz3`oK3Lfx1p2=ncT~)$ZqI4pGVTrS& ziF2x4a_%Lc0*W}B<+7EXHN%Z#8?~NP9n8RVsB4PrBV3EqJ<0Z^WVL6ady*MFJH^qg z&EBTmjSNuK_h-}aEB` zvNOD~4!5%NZo#s0i_zH4yuC<+p{aR3442D1m12bMz68aby1n(D_?Ui~rc?T*^J3|i zSfkkySaj?&%0M=T0XRE`G-(jwS28t+%F5@cw?SuJ_GL=?%N#5~CC=tagQV%;q*FKQ zP&ukCkYOt6-=yE^BrKJMVQl+x&mNu`?r#9w`jCtZG$a0saDCWonu@av)NoV!hQzQ> z;9AlH5Hd$+eYLB0bVLXp$3iLa_wINF#Lw{FxWcbDeRq6W&kg}@`iRRn0cQHUt9Ev{ zm!?0EV9JTR%3Qvy|}1)F4BwVf8nr6G0dB&2qe39$-diF`!rr9sG;tweJVUy zu?H-kPx4?qzBr8QK;x;^-I=V6fb!;8sUA)WzVvX_ zzg$%hCsg+Yo)Q1?51FTpm3iudEhOZpb_~B*SbjIrwd$D;^&HYKJ=;J%M^w)d>iG@L zcUrB7k5cgRtw2>I*Sf==Ze)h7>zHa`TRs|UomEY=a!U8WVn)A^9<_2(?@k&%9^J4% zeOn0wO)4mS#y-|G-5sCS#{khnE?4k&QTNYlm1U>t=&6rPv#tcw^aqQ2!U)f+8fI$2 zs>>GC_BmB|j)EV$$Ow~igp|=Ly08J!PUSD~O`INHTj!-v=aSdCq&k;X=Q7{lpZ-wg z`$qb9FkhRD)Cv7s_S;0JzzG>NvU8*i)e4#9w;?4J=&3zj(AX2WcX93h%%<$^YOmYt!1#D|Klo(Qp zZdrnLgVKS@1FxbjMLLq;M%~O#yo;FQaJLD{Qe1)v=%{Mxk0dyx)Y6}|7Kthl0S&Fo z*Q7y54(|C9F)@CEno{0;hcx*SG_CLA!lrC6By7%DF-)j$1cK`)m&3ZT>zb zUDf6MGd>6`N^C6MaDNoM%}Bx28*FX2$o@4=Eb7q+y+QA~URSIm645%C zM@B_jQnR!q$=)8O*&;nNZTNA{9Vym2rcpMpPU4&C;xI}98Cu$0b*afJ$um1!bcW@p zhWK&A__YYx(8ViAJK=^F^T5TAi(ZVS)`Hv=`9|BM&+QjG667;1CZBI4e0B9mB-)|G zQA`yCWiUL0p~$((haz*!xGFq*Hil&(GQgmR!2aoBJWx}8#LxB9efKd(@ z^Tb;_9DC^T8S+&WTtMy|E&_;Db_`4GiaDx9;(sv{f`Fc0|^sl0N zy+?XcgyDuT*#?A=a3r}^jyg(Vlue9E583FxH*uXKqPcsInDGbq2aNu1Hi6j6gNPq_ z5J|XX{h*yrGQ6Aqy*i8P9tR#2e?d3Lm+NW}grBPasp4JC<6(4B5^`l?HTC3Az>hWIm&cX34>jd#Ak@PJ;e+>9zd}smP z3N>O>fI@UA&RWl8cO2|H`!(m{3m{0PYX`4rnpr-U zDU{4TOKOjaM(4=(q61kieZ8tgc=px+oN6)$sV>Wd>8Pb|zpeYDo8t{-EKVNsr^q17mGqx^Fc(I8PA zRZVj?D8XLYAflr^R)F+Q&Z2}_AQbCpRG}Qru1lbO(tgh_c(I0}a z*+`)c9#CV3OnWT-53LJo4b36z-UDjF;VMabk!f--PUgX*FJXK&X;jL1d!d)S&HX31 z^ne=0{IK70pR0crP=oLWO%PeAL6N(fl@TX}vG3*bOJ%8eegq_vaCk*5+UN-hym_)N zgcU{$7FB~86k3*n@!Q5W03rfq6wp6x`+ly69b!A_0y5j;N9hE%csogR1cs3Vs@cN( zit7Rzeg*?RAj15P`|IcK*5IS%GU&r`@?i+9u|rt~3p7RDLX1GMKh*&H7d9Ma1QQ!` zy~B}Oxx;W7PB??W_A^FfyPK94&f^-5XBOz&sVs^ufb=jDE!FVxmv|(Eu-wo{#C&u_ zNzK5g6-W2Pa?m~T8v~G$7l=Uyxkq%O98jgiiwy{GGX>Zfei)K=u13;z4wwBrNV*}M z9P~=!Y7()W3Y03ak+WDV#()9!(t~}102uPHqDU-OrNGEF_*kWSZ3N+EdAcHY6xqY|;w@+uhZf~#J@evlAxuvuO3~-7~0GAjG zbLn^2hQSas;{CPv*XS={Fk+Vv_FB+7O!YYJE22M?<)z$`A}9#&Jq*Uym|zY{%kif6 zc~;mcp?4w}3UxAA3{ZxRiFkTJVn`mO8#NlOr48bNHXYBG6MYcVn9mc^j4v4b*w>hW zVh0RZ4hrq*`RyPmc4*OUGLK{A;dNtHDisp~L1R@oc$f;VMsyrtD(*yKDjtb!+#5X) zVJhq-VQp(%d`^Z1cLpdrJQTK?Ard`+Ki)BST>``nCIhw?uX_Rqy|S+K7r466viqsnynv^jNx%ehDYL-lw%Fpz-T86BO0+s zSlBG=%a;sDW?)-)1zBK;oj_tM2$E9)U5*-BLPh1?U=?)NTtXT{S7R8AC1D|Wmpzum zf6v8S}=u22dD$HAtCAsD*a`wc*72<_@8v?jORLS};2jWXO9T)oTVJ z&;^EbAg}~X6f+7>DrVI79Lw?mr+GU*4RE0kXum@`so^V+h0Qp|pKNry_rgA=AEGe) zziZ=l5_x_#zTe-`)%+33zz`S;UkO}OLV`D#8kv-+?w!e4L^rVOOt82~q82>+@C9ce z=0Nv~g8wbj!=XB12Qq~k%BCp&!vtPE+aw;5O6k4liZkx%w_8dL5*r8HOS-?K^fi&P zo=nIe6)A}C+fW^vuc4-wf0;Dba-~uVEiv-uTcz`=;Hl+xL(;BPnZ0CgYuGovSVWm= z`M5N)E%b~P&zO+5+LV|-D+OjEf@NxTnAnK97^;6-_>L& zky9JpQSva@R2+ui>P)if0O6L!M#4qkA3dt~cgGL$LpzW0kik5`9r&=1AM**6ADo}n z*vP;t>1snaS&$$L*wBaM$e}mRt~HdWyoo2R(j__Lyow@N(@G_5F?QHxKyHfH;+ggA zbzaGQ9xxYou~FE$5hQ0DmyX#b%mmxApn*g+Y{q2;d$@r@uK>R1 zk&%z3U4Ip2ScOmW1N@)hhenVlz8iohpe%pn&X|}kAt5D?%l3?aK*es|cpatb#;f_U z)DnV)ElorRN>8UCdiMh61^zh8?UGrj4uL-@{J>wL_b)c3aWZ)`9x zYYY9tNZjr@YXURwyhH6ulJsY0-~PQAhz=xjKzqf73d@oMW_ zSb9xcdMDJmp=#%{f9$O@Cc-B2q7%AA^(OLkSZZoa!P)QrK7~iBh4~0)&6H!{_00_Y zQ`2HyYSO%Eyn4KoJ;cmD=@KK}9nBC5=rG$=d#_YG_toB+t8s>Mu>p-_OXh#v3s{_E zL)iyTPNp!bM5?v9`=kG2cTqR%a5rJ^*dlBm8OcfO{w~#vkq__uV|w7TLbQj5Hmjk{ z$q3Pu7eb#zUabJ<5&Hu3W~5lF870{Re%a(vlE_zw4_>36kS}B>C3j@5F)?gfQYj#s z9Z{*MGHE;qvhOsspsQG6=uYVBeYIaKJ38+jomWQ}xRwtAe}55E!5{kT90?5Az1Y8qid- zj+$(p(&(W0#g~NA%-4!aGB7+dsExd5_r%Ly@rp*Ws@JPlF=mnU6M6S+odj%}P*rt2 zOt8TzlJM=^OAQPQR-zP--cEeZ#k^-DOhvC|h?Pjuhf?T=VI{I}keYV!SEf_hf0+4Y z<%x@WwddX}=;m-jjndAs=ct-@WFaI-Q!*9P^J%-jGntNcW4wLLk45ilmi^ji*Bp&S)i~)r{bVB4Zamy$ zQ+UMBAP+F%iGQD+Ci@%bfyOSV1{x!L!WuiLT4?05?tR9WbqOU^ zBP)8nVvU@mkri)5V!w@=bz0yd`%*FN!z{ZnY;z_k#yY84<;?`Y8Y60aYyPP286$fc zeqL&RO70FgL;%P{V_c>APpf$-4$T`$uWN^Fb$>d-1`r1?DiV$+e9T%^teQPquQ41ylmks52!z*C`|?Nk7OP>#E;&|2Wbg zlB#aP{@eRT=B6!?Sb8-0J(A_Uki1ccj4@6qz#_|i<2=0_IIom1&j!3>BiZo z|A_rXJ`CYhC#DVg;zt_gJ?q)NXCtLM+2kKn04MbU@!)%k7G!}1&G(Izw}eJA*}@--;Y@!wfd(}>w%*v-Z#W1!*#>oaP5QSjM+0*QZ*_m? z|E_mQ#Tb2m^g0BSDFh{Vlr*jt23^r9{5)ZN;TZ8Kp=r4l!OMOZ5BPbKAMensn30f> zFTH}UMlrLR4-@pjfzL^i19oU)G3&u4Dh)DwJzx1Z?eyDHV z6RkFAY4N4ed9@r@UduHRTGEW_t+D zhOgIAwK@@H|IMtBxRB!IA&+ZfjM(S29@y!iJQ^*VPxh1VcvH$@in|%{EmCUpJsX&P z&z7a{^DT>!^ZgG@@_pUD-&!__qaSjLav?FV4AI!q^c~1lyCV$jjghUqfY9@o?a8o0 zQ%MXcf7!YmQPtd?Y-sl+Y*N+*wgu}F1XNvt@EZ}Wi!O3M0p)l>z@jVm9aPlHL3s83 ztiGR99U{@sz1=Z7U;i%&Ku2s!C)(vxL#9)z!O`V3W+ah+rdIU1ucPaW{&N!oAD&|tOc!M)zR|u)MzcAQp>)J%z7tgMaFTItY;_L6F&H6-*rQiwsPROcrDMR zv5tO^BrJ<0yZz}*^t65;yZw8z+Bc3su!VMK%NFLng?Y8GpcWQrVO;)1!PqQYqQEc+ zx-DMOg|ebWuZROh*%wvOc`CZItmxs9&BI$5q0bE)wv;}qcos8xg2NNTF?ZpM%vA@^ z>hw+Ka~&|_gn+U40Dt7U*-YPTo5w!1xa=W&$2RhUb0YbR2nm5`q;Jt)gPeJmf?S#N z?h)e&nn8y^%v!dB{w}LO^P0fwhy!g!TUhm5>a6!1KhLYG^VWew);sMj=k$=x zWl=5ptl@uFm+~|(cR`vl2+x3%#T z_uWbLZ2qUoyk1i8dEEmu)q76Q&RM;98K+Zay|GAMj-40u8H_zxZIui4)a8{>Qqh-5Ocxv{MV6I?k1KEa;M{Ro{Bp1~iho|XGs*o}VGTr1{=+{NO=kT~h9b|Zua zn;tQ64xI(#<+26Efi7~LEe;KK2Di+0b|a{rD!N!!blxjEuZosb(GnF6>_)@Gn;4Hm zd1gDj_Ym=uTp~f41J;bL8DIyGBydG{&ZMl~!Q+M7_w#r44*i|O0hVn6YV;1UVt2H3 zyTuPB=}v1STk!X{>l_Z{p#WLz$zu6LTwtT&Gx7 z%)-w;(yxJwS)*4&tIP&{lbjmxn$!M1)rAVeyP)}@<+I%^eMAIZxG{yqbT05m**U%o zj6TH;eDEh-=Xrnu70O&3jam+offR|b#S*7n?f5x1rq+Ixbe$U6P^}*;c|TUlexzz>1%b*3 ztx4CwPmJL5w4ph|e^Cu#)g_txYCmIU=7|x(3;)((UkNM2^y%uApp9F^hB9G@L?dX3 zc{BnjLmWVLS<0*HYQ9WdvM5ej5upmIh!6yp6xZ1z6%DL}_OK0YjsqPuw2x3LVOL}t z*6-O-bsV-fMfWV?^itBI)w9D!`^Bm?(|@ntPx)qG%oj_5cm+Gu0HHno4{c+{c1K52 z_;px&LlF`(z|+gvY|d5f1m&=y0W$4`^bEa*bL~eld$|`C@}qpK)Z9^;Yo@;~iBhgC z^8IX!owm`RU>3qO&ghawoHF`ZP0lR3iS?!tUc#_6C!b*=PnT8Ac~x_&>XcreqAE=! z>%u0`XX9hZgY&g-;Q$uv$}tFT=*qM%Av8Bpenxb7=BB}}Na%rC`_N53D-6@XAP+-1 zt0f5-XIO_zhp(7os!$uXG+X@+vG%I_#7)KgJgn8ai)w?;)A}_B1D{WkF6qq{`gKBl z{fRBbxLC0E`RQ^Pb3TkY4daxWI>j)4Tq<=1HH6J4U$bY+a`RqpUgZ{4Zeh*vH?E7& zV(dZa3uQ%%UeTf|IM^t1<<2QP3^ocl_o6MOoEZR#q36~2kvti^DV2F4~ z)Kn)nOS2*@#MTO4?EjitO_D~o<-Xb_F;N{h{qXH+-NTT~>HhL&?F6&-cMiAI-(u=_ zN2{B`dwL2P*bK6u?vSqp;PN9PM$M-dd`H=wv$j_-XGC{ZY~>fFkzxRnX*gcJC??5awc?couM8K1(ajZaZEE4RohCHnSJQO%iKWjNj= z6BT3}$9>zovPB-Vo7hZgKYu5SaCda>R_2LFO2$&1iexZBSIUZ3 zyrLCVw5p0$si?X-MkPT8KGO)MI2oz;^e0VmCr2F3f-flbWadoy~7na6FsaKkqbHJZ}+_j7|3+YCJO6@*$Fm+Ik+>1 zb!b zKFV)EmnmJc>29O@(|SEk*vttj-6}+QLm;S!w^dOFJXU2V%F1TEvKdu2tIB4n>;r2w zczI=m_!lahFDsk#%H~wrX;pTb%6{=E0!}Gk*;?wr?raM^JR^7WlK$wu`gp#qcEPJ% zP_^e&?K!G_?|V>$NpJ45iD5M>j#HD?h#LD0(W&uLS>uw|xTG4FRpT->Ryl)w#Dj{j z(6uXNMJry>iYi)FMXOX)U2!5~@++O#&dW){{!PldMWCPH5Xk=s))}GN4jB=R%K0B~ z@uZO!$GyWUK4n=0M7^5n$6BnU{A=7e`F=f8#7Arn+l~Bra5=LRC zs6JrVf-c#hw=<$ewYs={aB5z3tG-xPblxjEuZosb(GnF^r)HyRybQT-E1I9uB}2W9 zQqx>#)3*(53&w0QAco-XRt=r>WP`X|w!Q3aFRSepwY{=t5Jd86@vc#928w#9?9QrJ zw5p0G)tyPYQys*p@93I-fsl>G_IAvt6-W+1rb)zZj*=(JmS%Lx-lT!gs-;<4`diMC zE!Y5~8gsV^b#mcPV4uGYm1lyNu~w&Ek?crSF%@t}c347uD4Z>gol$y0g;LwCk+u!`KShSRN&Z zIV=I$XSGSb8J+ZN<>_JT;^J)q5*Ha*yFvywnq^2WUoKl-_Li5`@`_qsSvyH-j`1Lf zB~xJR+GTBDfO-QM8Td@Cw5l3tVA99Gs#+(wmX8&mXj^1n?o1|b@1uk=$)7zV`%mJt zDg=#qOX``po#@A6Z$>F0#npmllRY+je;)Xo^T@pKK;3T-qd&|(TpfK-Y8+M1oJpAS zPEH9()2eowYI~JQ-((X8?{llTF2$_Gh*kJ#qvp1SK7f)=TpU~bWSXB*_52>Y9o7iu zO+z|Hr@|}($y_R)v95luFz@CFaL~Sz4B{J8uW(&?X~Cg~Omi zFbasB)4Pd$x0ndK1(-SPtV|*Koy7PRCTmugEFn2g&dDY=w{?(+4X?ZYgHGy9SY-?5Z zFKykQ{#b!VNp`aexsvL*lb}lctHfr(~iX5`tWYYe^w_xQQSc=mbZ36N@;Q9@1%G5I*yim5g=-KW>xH@ik)0rEEePE`_5S=vbx@LY3EW|-;&q2r23Xs z-!k>(RN2-=Fq`V#yg~nTfdfgvfiuDZ^Uq+T00$Osfj^Q^CTJk4h3+g2cE=#152DotIQPkJwn{s`Akfa#aEH=gNu}yrKnFw5W;}gH|FZAL^SZ z_i|b0yq7tzG8a|mB4q~l&khd{->j9oFq(7pZ0Po6el%z4*<82in^WZzG5K@I5SLZ$ zw0(bWbbof2`8d|IkJ^*Bl1V4wXvkgp>A@WQa`tAX5SNG}`o&**ieCvOE9xtr6qi&X zlJjrteX_eoYLDN%8KMn`Yjs}QvOhg1Mz6lBcK&8C+L8BWHAJ4ylAv4IhQ=fv&;_+} zfeM0-%+6NWMI!~WUs8KP_S7Y7_-3YUSp}DGz6#leY?|$3C9Rs)C6gh#I3v0>16|CK z)7En2!^&3Fk*YH0^37Fc%$1whRmPl>WuKWj{gQl;6=jUrG?Xz{_(prfV)*u~?jfo3 zx<70r`P54U%K4Y%%9yFGEC=+UQ{_Gn1b@N``ysBGJbqiEirF~wNnC~eY)G~mq zrSB_t%~0iqvdTrTa#2;DSC!|fa*ef8$eb;0Ql6+>mrzymUG$mKAP6+CaMz)=%VlfJ z-rBNSTTyE(v{vPGM#Nth)4>Ppk3)-pSM5*Id}T~ZW9w7C>P@Vwi3uq+C)Ox$DLceb zhI!E=Wzj>r1RtdLol?;$imsgtqdShdA7g{l_GZ{%>w-;hXg$RwYojr2cmqI5x|Da8 zTuf@waxpF0EMqzu*iWQ^gvxz_ld2?eb>L|EF`h&VZ&3cKF1f_yAS`L-if@>7g06;H zn$aZ$b(EpbY6P?FH92WD8&y#khCr*UPug#CLa}VgHC0gkngH>Mk%B+IQVxz3PilkA zMyM@Rkc3WE7H355D7Vw#qMzJQCEbTNth?w*d}NFMERm5;XN+{WOpmVdK)MfWjC7iW zM)oZUkc@OM>i&owE>7Ykt`|ww! z8Dw&VEFjpCvZ6!AmSugngKX}m=Rv^})M1A@MRluQ-KwgatZWv#7U!3bs#&fYEN^{v>eF1|7X>zm{xCn zUac?KHBxPBK=1U)r9l-x;m6dm(AuB=M8QQ+>ww`ifR3Cc# z`7;Uk>aLb5p2P5nW*vOhVG>jNcD!`M?1>4HiUMakJld9ni7Q(R%v@iGf`X)`X8p=W ziH*tjyapZK$587uvd+u4DT~7BgQMSq3+4lI!K|ALW-OsZZkdXc=%+lz2NcJxZ|bFw zZIkBCYJC^F%UUl!kV9yS1CI6t#E{E^Jnjl5=T$Qj7b_KNX|p%&!=Bc#XY86hd~KoZ zHAl%lY+6a{&jO>|c>G$U+^A88G4RW1H`-`7LRoCIHq5jf-mk40-b{6PfItwt_VA=9 z%D0}A8jKU-Ss&x9#yH1yHb=|;bP?2q%|dV%ZHcD)N#iTN}ge>bSTJ30%(8aZF&HK0Q)>OSrl^B1dDZmCGi$4+5S&KHL? z2>Iegf~}X;3+7-#`w5iuux|wXRa@ znhj0v)V4zi_THIkUD9Lu1kdPdhQ*fiCb3kX6`te`wwb{l8C#(ZOs@I`A&p(JsEI5a znD7QB)Brh%nNu3rQ!44%GGz2}Mn(CHD@@1-)@4HMdsiamKJ!QL_pa=sBWBWHho#{7 z3WA3k-LEYonmuy?G7FhH2BMIddp0}v2`((e>3=WEMATt+?^?F{Tk`Bu=vKth5UCgS_&+uoPxGwYv(I3`tdkwh6BPha)A)|*3%PcE>KEu zHiel7WCR59m@IvThg~=*r16x_qo78#ZY+M(cQk;WNL;H3BT+nR+M~QBrKv_Y<$a_AwMPft z*}#6}YyjC&GD&|N7fS-yH?XPn0 zjcDCM?;i0GwfvY%xIMOSn~ktg+_6`CrDUlq%lvsfUC(*Iz|2eyZi z5S`ooKolhvOkt{NR?ZuC1>}@|kY_O4*{jAPbZHT|$IEPb>790rk9EohEWeS7@U&Nd zN~}Iv4EK-uc~sy@@Es9;87PZ;`mW_y+-uc@u`rQg-v6TENUejsSP?Eu>96`XYza4% z%54n&WX8_2tETK(o>{F~W>!l;4WTn#D<+G6s%cO;VQsKn{nV7L0TEeVJNor)V|J8G z4^D#XIx~Un{RnQMHdB@ruO_XCh!tAB+HjL}7dCvEBC$(?l;#g6_^;RcIITZG>tJW- zKsW-*7d_@BqhqGjjW0T4eR54HjfieDe^r9;X=qQw4W(Wz*`(4N~S@kPxzItA&KeZ~QAb+)1L2Q62YR;Z<$(tNXqdX2otU}G`Y1tze9ubyO4 zEu*iRY;svZB`zyI+0y0Ap>>Ct>xf(S87RZr45lXpS{C9gzu9cGHr`iMg@rP~|G0J> zXHxP1CiTQqmWP^YR=vaW8zn!<1D@6*R_qxM$PQ^QvO`2o+5O`o(f(9~L3G`oK>UiH zjMd7fHBoCD&xT*4#BlU`PMJ%(e=?jW$0RW-n`^wc{O7BEh|$>Lleqq~+J|<5?pNnT zu4}b>en%}SfQX!?A5N#+<~}2N3QO*h1mpnGVI`p`dxUT=ovYf^@#dcO()Jkb7A*5& zmU+k|+6i=m6XoQfDei|4wki=ON+}&hCNgfPRR)L9og&8Gp*(9VO+U~l=@*(6l5-+E z*M=MZCKi5Nz zWcx4OLJ^^~2X<>Yf>n`?p{IA2E>D`n!E||f6CM0Wi*$M3UbWLhnoSK~hw~~w7$;c_ zHD04w*K*k*eF@>{%Q!F;o8?=Lfz)mX)9#In zH`-`v_dw4|?l!hftAC*45!sTIc27&dkg%KV%{(crP*J?|{$4jvyEhbi9@6d+i|bvr zqYmZ!vo^~%(HY8si40$vSMA;5|L zB2oinwadmRXu}SxlQew!>QrG?XP$=d8mlu0A8-@+41)}OI~;r>Q)|JejZ=UgEYQ1V z0c{GF+xZ7#(pTB`>IVq|N!wjfGIQu37|TWa22P>TotQ+zg0z9@Kuf;0oFJ0IwV5U_ z_e>vRo$ULe;|y2b6+5aQb_^{%2=vAdn}|6eS)C9xo!`i4#hFrJIw`jZonlyr+lFR| zi(%V`iqd@D%Y>@dN%WyyP(1Z zyA)2XVnR0S>=<#+WQa|#$fwW!*ODT%#7R(``PgQ*+;&Ui2i5$5g@f{=P+ee)y z=ZWKOk0c8Yu)>TlAd_K1j0_uMKS^g*f!Hpx?@>N~Acy{jMN=o~2ZTKAa&Sr}!Os{U z&a)W=!jB~VpfnIqDcwNboXT9(uPGN!-NHV!=fnPZ0Hc1KO|9R~@#0%O@E+Ga-LYsJ!OhUM6Bg8f1>{L%Gi_$1QE4bGiq69Ky~Vu z#9}}KlYvdr+jJaMewM?e3~2C30*$dMgJZ<%*+!EHuipWlPPAA6*iuZq7W13aG9E)5 zB@~3uHn5CZocjYE7Bx4LA)N`|6lI32o-RG&Jne(^XRGB&uMM-*Ah{~YBo4~zX?s2L zhgMzI4LH7{O6od}v;hy{)fxLQWD|8#bq!TF)U(`#G@zszEMzL^LE*bhllsrsdNNAx zVY}73j}y_(+DjqkL$AP+qb-aYAr~SYK_)^W3|;_ZWxC;NjQ@m=;m0N2&4x5XKnyWApY!e~-vHZYoLgH);=q^QOE3Qdi4T$9lIo%Z~@EiDwPC zth_Yn#Hm3IfD86{!uckjwnhj-)J_lXA4wqge3X914#Wrz?Eq}eXR<3Vyl}@s+Yeo6 zI#+wj_N8S8Ijp3T)+y{NGA!7eLuBZ)J)3FQqtHWTku!^g`Z2??KcPoznu7}PBL*o& z@a*)$;q)(>WATFw?dxR9@J(sk7_Pu9f-*;0ssv@?;#*l;8Lz@h;I?j^mcPoT;~d7r zbgJq(0FSntRVSkF5dAaK9wEPJ7AF7#%z!RQM`RkQ8Fa-TeAZVtuwMWtu>8I7!iU52 z6&{EWyrf?MZhJFQx*-n*mh_zNJdqDlM{|6*jx8c_c96764mH?7Dhk;qA z5<5*2&!12^%Un$^fDT|r3`XpZrt>C{@s2TXQaiwRSzna$NLvYnOoTN3%VwT1xM=^y zPzQ1@PY%@5@S8^60WbZw`2ye5%TZY$E0tb$!su+CPpCUoGOJ%qy@xoAY)+t^wehrI z1plh}+?aCx`|&Pp{GWj)LIl>L4Gbme4#tQ5!W|SI4 zGf+cxJ4dw`RC8*3I);RTg<^DZT38LJ*DO#^x>w}2HxJt!ue3EQ78-ksbkK09H-l8Ut83^mzE6sFSUk_;UAC7Gl1 zOB}JX)nBu-T$OP}Wt0doT*5hKlfPb`^r})1&bS&P8zvKF|bAS3bSQvIklW>rL6`k)y#IEI)_enP^ zj*a}UC|tynwE|NME7A(=Zo~PguAehDECx!{HZZqs$&uoGS>48()W={F#F`9?adOfM ziDW2U_!2ay;!7mU`a^iIBxiFVI@%g0rJ-deCX}mQss0cwl>X0-4W58K{iu!TFek%n zL|SuG`jlVt_}TCOt19rvoZ~kSU?zhs{|Ft5Uil5=mXhqX$#ZUX&Kr4PqZ|AM+95AZf)V2N~j8VW4OAcUsBOdNHmc59zFz;-f`__r}XQ+7@(KAlob&+qNhJQ z5Me*88NCcKk24$@SX8vP`9S*r2-E4}mBeONn<7Ax(pq4_{!bUh{q#w9!p>rMm%-=> z0S*m^n&^WYMI%=6@PRzrN6!ungd38ex%&|qy9+f>lZ#>idSF%R{9tH;Uz_8>F;zcn zP69s^vXGjiu@#!}56uNanYsDY3gH@ICnjfKjkkiCn#7v&43c0718u zraKMQhlv#@=qFxmRPH#NY{mu~Hb4?NmMc0~A&GHzse-gR=nwu{93fM?BIw)1U5Z&o zRJn-I`R=AM)1c!O-U)Fp(*@&7-~j*?NDmJXdM6!>*r5qQ7Iup2VeHa{;sZ?roBs^& zMWjjoXzlQH(l`Hnn1I{fQ1gFlD$SsR`BLsXaDoV5}30FlwPp8K!kTV1r|`*AF@t zTs#--VRmHttz<6a{tWjSItK(Hx=IGqB!$p^b#zgLly^{MrLW@mf+Z2d9m-mD@y6x_ zZMNQjXjpuUS`yn~G#WY1lBe6MTouAV zEI}Aw(!%>CBQ3%jCzUMO1kszmAhM`x=*WaV6bQf}v~qt<`P~s5YWVO>GN#_Lx@Vbk z_NQcCEyY@hc&QpJGbo%dRbd71PBXT@MPF}QCZL{=`|{y?;RQ>JQIIX6AzzF1Y*J1P z8b(r*r!S_0tB2jTD4doKaUGO7IY>Ht&#iE_9SGh0ci( z=bh6e9FrE5-xDN{?xDER_FT&jzt0a_XVKRnsM!(j0hzxfl8rn8*)-3{5;NiqVN# z=!7Ik6;&%A#xEJ@zc;p-K!FmYR{+IfOW1}iaMiVi4~uuBYO3RF6a=o^tyCmY?ka>^ z{xTpO2zGr4XCHn+2)7KveeTPGaI&6$kqGCV`(Ga6G!tJO!r6wE{L&zaTPZNEU~QJP z=^2T`PW&PyPTULl&0i;RF&lu0h>-h-n!94qZP&JSKNPv4Z-(Q|U9_bJ z_ATsYXt?3%^MCFD)IPAG5!GW3QUcZF1=$Oq&)Al@j}d9PT7tFkh8&74saNqvtV(tY zo+!NesJ%HOQ@!!lP`vyVN#H4j64q*)(Q(mevfmsPRzvrGMX0UA{Kz~i^JA3$cRAX@ zl(jLEiPIPs=H@ClQ^EJO(W#ivo!uCI^L?>jBk*YYxbN3sCD@{03QtGPZoK?RE9%HU zl7np5IwHk;)!uNwRo{!0i=WSsiFus$b4|y?IDK!T6uRbs%2+}#Kp4sKaBT+A9v|GL z{hs7p?=wBdy`_cXp+4bkW*W)QT;?8S(RjR!9zP+QaFz5nPFSrJuxY-EV{RkFD~!}N-L6gc?X{?JNQ(;5e}Pt zB0S{2!@__V+wi$;(#IisiB~b|1DcU*i90UZiMYdZ2Z9gfM&~-1<6?u=BO62CfYoc~ z!y4|5Xdc7JKvo|M#mw2KW@g^6*pLv5ITSRiVID!_BzIBnq#clp#LGqyNv>;HESkY# z$#|lA0h*DeFA`);A|{1HImoz29U*Y#plS|+N`6Xhrk(VtmKc|7gw&UfNHO4H!TpZF z(re~$>R>Y};ZzI=dVuMS7am0+@LOD~e1t15y7}5r;1@JNF7oM5*xJD9HP;Y9Y{U-O zf@ckj)a3q@gol_v<+zr3`b@9(Z?otA?8FPLr&#r)cKd_B`JdnSTHV^)?oE712bOv6Y3)&vuhGoqD+uUeYG7teNh6|7pC{T_NU&bAm2%-Vesrty)+UP$>Mj z%|E53peo!wB_h|x$ao=t5{{+C@u8*Q6h6cB-cFAvxB5=NWBEO_OGgI2t2~ethI)@w zp=}%)m~H7mrSV&(vUd7GBQ;o*6br)ralZ#ahRvM>IzT9Nklp!xW=wwjdYYX(VbT*i z`i#uGwwdgZ=`6+Tln+wo-~rP@2*MgOe|)IL2;0nNtnAbpC$Pl6%$e$Y^MBgoo|Fh= zQ~fv^&8ps(KE_~}7hqn@`-SOwh92{y^l>b^YB!ZswH6uwiqJW!wkO%+YUzov`H{D=TZp zD=3Kr4mgPb0SY5})QrKN9tYfX0EZY5-~<7oF<`)eyCbFz7}_BU7;tx-&iwxWeeSJW z??aXxo21uZC9kUP$2t4#v-dvx>+F-BJjK^&XiSFUJJqNh7P@va8!t5RCm_nhJl`j( zSD_rFlDn=vB1GA{P=5Fz1qv-Hw`=3w6^~ zY^N~zz6YK8Gg@SM?u0(^X*`fQ^rN>h-?^`E&&$CZTzn7l&7(*brZO%N1S~(^@b$}E zpad)BV6yQizh>g-TCNy?y*S)|K-Zt1yF7oGFUo4+td81|Elo|0oYd!Gx{}xlZ{UOU z<6rl3kGl^DLitF_lQo3u=Y(!@x=Gn8cRoK94v(?1s{**QU(e|3z0Oi!f0`Fb|sie&e(X&P-@v)lfTe+xSO&%6>hqYTu;v=L)$b_AUpBb0(G!Fn(lqw5) z^GJGA5>Vph7#B5?qq>Qs=|R%$Ky91q0wzv2Fe$hezo<1ZA$Cc<1&D(013r(EhdV3? z?DDv{f-RLmN_cG)T?jUOv5GA`=OL7*6kB*UzALtHny;!C!QZDTy^|3^kR5){D4g88 zC@8UndruC#N7OS=Q+dJcWVnFO&<+gQJQWZGjfy~wHE)syS#r@IA5AWtbHu%r=`E+mT0)0N! zQ_uzvbq{&W=aYm`MYxG*%jb_CQ5|}%c=?6;@-FY@^7W?FW|ht^6po*VR0d$#2mKC) zOS*;DI01mdxsATP z@WIl>PgShU3e#u@_St9!rExL9D1KU#f#fTGMqd!p#cs5V%%0QoA#&Rv|H;JGBCFFtGb|O z0+U?bdrZF`s(w+dpyDTn&Kmi!uvBqWOW*gj?jC^&SQhpiP2ik+N9Av0W{(Hur(53L zQr;_$K3 zZHn&T^d#MMF;>=>O<4{38u}E^4?g?oQhf`zX=Ni)sy{$F9!g`us@O_TcD-qtDUMdsi zS|?Uaufa?pdwNaC0L;^XIhhC0G|m2GniVl&o-~2SD+WWiR%qt|e%CV}p`CH(g@ZaQ zSGBh@^bMJD^Tp3s6AG@i_DBi_B`E$ibcC12Y1Gqp+#7dYP;7U}kQq#PKrE zEyX>GU-;UeTfk&)ISw#b_-lS{DV>s+Z@4NurcdMsHAQfi@cjt*KG8mxHOm|L8nOk= zmWAjzJF$yjl#sFVb6jMEpuySe!wB+y@lOlIll^iN$93mf3$4_SI&?dnhnU^~bjok` zFvDc3ahw0U4zk7b=3U9}kR&XE9^I1xFQ7E|nOFPmDR zadyltZAZv@eLXq5zOqEv^m=CA(kmlWCGj*b&m5VH$>xjR z$T6Sy4t7p18e1z(3O&PRhOd;JQI!+9%dp4KnF5QghF?dSq%*tE$ zxT9d`V4N*o_OIO@T|c^_RTTY@E{`Y-_$watqqGhPE7tx!txsb*$&-K*aB{%s2HZ#b zT%?C9TaW2$r@WZ2zcon92B+<@Y*e;RzHhv_)J8;7Ev46$&rrawryP{L{G8yrr1iUl ztNiox2J`m^RsCkvBwu+fykvG9a#j&9;tQ0Q%vU(Gh%2}Rt1FtWQ)p370-Jhzf=}2S z*C5AZ>igz$hti|*#tfJxX+PGaIff5K5YhR+zZo%ZI(?XAD?#(7{VX-Z;v^T#BlK5M zjU@`prpgukET|(zo}5#ou_M~C7nZw6IDO<8bW~<;yCMNja#7iQ}*c94S=HR79f~^D}@1Le?^d#f8Iii{8R) z5S!D zE*ZWwF^+P)a9v3dm%SN7=$r)5iw|C>Z4N{MucOQYY%p%~hwrWN;&t2aSkiQ9iq^2{ zWzSD%GZDaA4<@B6JZ``!ukJH?^RzrTgnjk4^YE3?PuedP&%lA2T5k z)zdUd;*q?LjAy?4{zUXlW}+e_ct=4K5zFG&+fn|++bA`ni8gp2bHP{oe!a+@Y`G3* z_?%DJ1wQRNRQVv+O^FxD>byP}aGQMCCpBuRfS##j&EH3PpRneS>&Q)8^Cy@;S@YE1 z)J#HBc;xuTcPeq3)Y`>owBFg5x{C)V+nbg=ojt_6Hf{1!QhR%{;-BqII=14m-@7Su zZ?*mYOsBHnsS29Qz*7dF?DqtQK}z5mJqF6)0azeI)ZoNu`CDz2cc(dbWRl7}BY{Xp zIa}{!&mn1)DbO9CCqr;qc7n#>eJV$I@DnTjY0bX)?h;6|7i(P@!4wtG-(*+1F|BLt6ET?1TFrOgx^`{x-53_)c^jX?rcwXYjw4 z*^PHd!XxcI0!p#VSO8KMMmp)UnGF>1@uqyF)Pe;k#Ux|2mY!7l;PUsdKR9EfC2g66 zG@|sz_pbfM_f6mMBGP!{$l7mwfBl9g)&cJJ@XIMHCL-b0jDP^oNf#(RBF4i!u+O|j zj!sNQK3j<^O@{U*Fd4+b38Yt(!BszNZ3H-k{Fn&q z39SjaIWZCQ&58K%oYsbfeWejCNw1owWCdKH6q!-HZ%zaTvl+^fg@c|K2zPqHn``f* z?{q;)IbiJD4U6+z^oG(Vgb!^6xE6BYM>gq;fhmh4{fw*~SW*>iasiUVXd;s@vv^1@ zfnWe6Axw_GDcU5Ngq8|T!rgYSyiEvvzCA#VyOC?t1Cp^@YWAfnK}Z66E5Hw{)eIKi zUtuAprHKWEqcT#aSm45%Sm64kucR2TprjbkKvGPN2Gt1(G^kWa6Er;@RYYDgJO~y_ z7p#c~x~lO2gU#UK?g|gy8IY)f)Gnxj+!)Bu2TxC0<_Il>UX%I7Eon<@SHGqXz^ zT|{+W+FH(|@(G(cNRIG^;nNqt-^a`%b-$T1KT@j|q(cm2eQ3#NXerb0SQgnmYotet z>sm_`LTN}kAz6fmx$rHYoCLO4Y5{GF_063FvO7SGShViz34 z9+zFl*5Jco~e4uQzdcU4{@%62=l7xWB8{pUcZhw z*8pmPE~u}yGCEy5a5h+;a~z@-kC5mMlzp&5nI+i_%92E%qD&HfEtE-$4kXb*Sx9t# zg(oP%hpHS-&5>pNwJCE0DhGi->Bb`ici5@}+r!NO5*KLY5)SMdAL!F7! z%F&V50E}B_1gvU@o?QTo$z}YkG4u=McT+b+pNBBkZ)8$s#Y(qSIkCCAoERs!Ea%VL znija4^C;*B8&OZ}3al@=@mx=g1E57;t|z7wcS=>nU#p%NhwX6+(BYl ze|>e5j0$cWxV~{>C@gnW-$d_6j%uu^>esczunM2oA}Xzb-PdkGCLil7L-S7TXvrIb zO%SoUp{i&4>e&=p>-{WwdGQ@E)Fs&t1B-(3ksWyv>)mn0Bk8tUdet8}c^l;<(_UC% ztED_kGD2HQ>tj7?X>Xx;$$C7+QyYsTsl#t!v@Pw@)FU*21V?5Hna_XpL-uctJ8xEE z>tIZlY4K?8XDMTpKJpXv78rc)`i9w~xRNm*v?=|feC4Josu)UzsFo)>S|biCWeO=6I2cHPtZ z-->^#>$k;q2FR*U&yUixaG8^#`)%d`O_wID!kMbu*uY|Fs31(AW+-gJ&a56{)Qawu zygNV}QKkh>XA)QZ?PhKX>n5p$`3kMZz76;W_W3=B9J5BCDdM-RcG&USg#lQ>x#RN9 z+Ei)OFJPJ18zaCm?toZNt#`M}Lnv=+0QPP`y4OBJP?!KaoK2VTVM`=6#A%gX?T8bDh~v zBZ5aE9+k{AU!*NZU!9?7(H%)ya3P^rf3r@%6RSuz6tpZQ`6Cj=@1P~rvr5&rlXRMyDKT8Wl^j$ zx?SR;st(N8tGC0srPHOQ5)Y*aF9e(A`^;3{I0urXg$ble%)NPLB#nn(`jKD(pZ!S0Z}Z|em!wT&kxI^38v1z5jMR3|47@ly zHH|mUG@zDz7LQu@Q=|wgt)sPHF_qfftVJr6x)407vdZhMp=PDY2$%3wwZWo3ed z4JtS7?PMWEnXgp~C+V<3w0=cZpR+?nm+E zSXO2k^#!GlwWj8L4q`r4aeq3W?R^p%bWZx-Ld>R^bAxpIP4_c6^Jj%ZVZtb6CW8k# ztx?Dm+e!t39qiPKQNP<0XSK+R3!VhoX(8rfCNb-TmQ^$PZpo}o&Nc6`_ie3dl;KwQ zr)jJqVH$z1IZjL@X(^`MoK204LzT7rcFnQ%-GjC9BO}I&-8>q*TEwQ-;~Jr-QiH0d z5^4(=N!2o$Oo%B$Pf=AnHPjt1+Pz4(@Kj$em)Mp}8AuvN^yY|NEhDHl5$=?xv;KN= zE}ZJwZStxF)`n#SR&0|@dO`!g%{dOSv}iO_;hLO@G{@|kNviM74Q{o0cJ|zQv*x(R zp1|N%DJTs%)@N5ux}jm7fv`Ex>$0nc-PN!|6S60T&S1J2fJoqkUq3bQJ^g3RUE+}8*T7h~{U>w(h z5LBsteHf0ZigbpP^m9QS0a|Q@7klH^{o=2FpeP5$CEk(X8q^=m>SJ!abu)IC+TIH) z@iy>=sWD^y=EWzqzeVb)dXr-sn}848{vH?~+CPN|lFU)AuPYU36Aa1w$iZaf7wa4mPUafnw-rNrIg4w`RRlwq@-oQlV}!N2tp$tE9tI zGERmp2R=}V#DB(7Zh;Qs8v#6vTU-Mm>dRklK{bI2WTRRO^vD&v&z-nw+BCjd}K$s$XEra)h4ODQF1N z%@%`vUvgih&;z$W?o^Sfe;-XW{QLbo6$CGdDkZzRKWkSH6!=DADwkzD*SnF?`Bi)X zB|vCsk9pu3tm_P16n|_$-#I1HyL4^}LD%K{_H>co#JraCcQa3Xx0mg|C<+lny!NQOaULUPK9Qn2=~4NGXQHTe`5{s6+Cdt0OOt= zNLm)3XDarz-l0gXVZBwF4zg@1$9}@LKlc_-S1q#AYP7eCGEu3;H0R#h7gn{#A|BiY zfccR8Lin$Mx-zs9$@Pl9n4L4g&qq$ZM zO=ue$4VO=qoUxAYXI?ZaMjA`G2=d!GCBMCFi4XD{r;GgVAj2$;*}R3Q5Q!6VS;Cw| z@GUKJH#apdq&SqQ(KU#DOgHXf9wSGC+IUq2YFIaptO9@Rp+l)C{<=Tjaz8X`G&X`! zbrMH^rK>1d3CFnPR3giFDmEnsVnso-@+P>7K5?~!awM8AAYq_iG>6J7ghbW#NFZ2{ zIu8p?69_mNB^3KAfuIp9P`ZI`W{Q;=vE8%+RcDL=5h11+ESh%tV9H3N?1_Q}X(XBw ztqmT)bYAXCQLP5BMmkFlBkxUp1CALq;qv)J^0szCF9d}iGmEs~Jdg&ZuRvO`pyCq= zX#MNjX@X{|=4qm<#f>T?QEnaQLIu=bhD;mz~As0P0cdu!X(E>b*}A z_HJ&8i9%FPhsw&21b{^%E>X)(s#oG0V8NpCXDu1z?F%c;XIwmync8-Z^A}Ph-y_iHXTj|6VaEYBAE32Z_|T z7DL%!mK8>Lh;J^8Kk|x#u>2c^4s)4`0Wmx7n=r%8lJRZg40JK33td4tvAphSO$ak^ z`Rce#d=>vBJ$@YYGZ3U=@olV;z&~2;1+|mBom8OqR0R^h1WC=Og{4wqV351Cuy8;0 z7{1E|N=`z!Koy(y;a3~bV63Qe!AXJWt*TjzA=ay0pib`^Bxeg8C$WngUk(#bH#m_C z)N)&n&Ex{@VsEpW&b}~EQ7jV_YK>P5o)KlexUtCpdDdeQAJr9pz&enL)ftLC1%f6G zh0w4BLP!43=`b1(zr@3k^Yg5SQOka&1lb=0kxXYG68AH6BIl>)Bso8)qiMWxrqSSR zWJ58NW_EcURI;VPP{AqsdpC9PYfi z9d&3GA6>{AyvQ6T@d6<2ydra}>vytHl2mpO#?tywD?AhVez;QU{O#?Ng;>gwNr2-F z*RFJV@_Zy&MIZH0Hf8_7QY(TXQ|P5| z!kgIK(H420E`&sKkw3*SJaA{Ie78$+1o9z6vs_5{V9}bA%SJHbbh?8}oA@d}u@;6! zP97a^JSbfdvM=N_WdXqRQJt%3@&BMsagg|@PzovI;%s(4>Xis_KvGET2`al+MJy{J z0zbQ0r7Jh_qR39q*q9U=Hz8)`ZE8~rqQd3oSzO9y!3dBK%AQ=gpjH7C4X3VWzzR^C zSQs&E6FzYyaF55B`IM_sqxYeVO^-2)L=4sUj3SMu1fUty(WbuGPa5P{uRafPpW`9cf~LQ>hYD3iDBb@?FYj0GZWV zqQLG{(a**e z^aG!4wV#@)S(AA15NaMP5#NAmgYn)RJj4U%%t1ng&ME4~h(tVX51T5~tqH@%<0)AS zpT{VD7f{)lECe8fVj_ths$(Xui`x%hb-*dJAtD(KUv+BCIwx^fx?X}WBt3Z5B$afp z@MTg zQo2ALBirMzQAyOm4c zr096Y7E>wE{w^D&9eTSeR!9r=>V8@8|%lbtKdR z#_uX$#2TFLqA?U~a>B^Dye;PoUL^XYs2iY0ifZT1;%)pQYguc){W`q!ZCX6o6&kN| zj-$T)p1ny`Ly-gbteSe>FBiYBVdFsM(I3;o@0{m?J)h2pEw`V?{n0B*cYuf6*x*^)BhXrRsiqe~Kuo$A%r=`Cz; z`GdM*nY#hvyUHC07KgpQ{>$=qr`30j&_f`A(aThVTP3q`@UFNhvt`vqNO|=|7;x1^ zWeqS{Ui8+u9d|LFzn!X6!}9o#e^iAd84mMxR}4<6SVStFQaVM3cJ9bU&q8v)cB%^J z{9q_Mv^kt1->$9XIfI3=ubJH_pqMyM?1K><0NvB#o3*ED#Zf5LRI=9Vj6U_peCV*! z@?p*hSgN`u0hRlVV5ed1`Yys~OGF&jCPgGwbZ*@!I#)NAPQ#D2Bj_d4jyj}@a+we4 zKFF#1EBEY8<@pB|2dy6eX}Y@%n5)|)4Rt1j^!+WDK6dERdKF%x)LN}?*#vB8jIP$UA}hC0&aTevKTce;$$>rS`H?(X_Wv>>mN8#-0VAeLPe={RRYAr+)WFzsva> z(a&JOt1;f%eZ17NwKBvzHep#tx15hU+zn-G5-$M-FYIPTyP zLR`1+FPDkkp#oO$Bm7l0SaOBTASRkY0drRF*kA1pZt-g;b&kW(kS{F00hW^qyfQK< zJqIf(-Q5N;*b4K-^T9IC(KK24c&?ZeFf%ic;op+f=%$_Ck{n*$-p+v_Ug6*Fz$_;X z6FfvDi4d!Yg@a#j)1t$KG#K-M6w?M|knmRGAAH=bMQKm#?I8?}%U;2*E3R#``k=78 zrW|Rn?L876dy(PlkBJCtNMCQ7%#m#9HmmT!oHpYq%%ab(l$WUAstf0r&jks*j=*-6F#SFc0$zAY>KI))(9Jza!iw#umz7n zBd%Z^Kns%NR;Al3GCUZHrA|ue_?!0~D$1?JB{dJwU-8k!X@AX&Am~=!?qBYHxhPnesCQr$s5i-jTP0YQ^IIem%1gnbI8kNA z$6*dIiM5!m_-Wv0}!+yMfK8W_N)+W9;R%p1$!`XcE;-SXG?D8A>S;cw3KpSFgd}ESuK0c?RktC) znWuzU-CNFgYbHRYqn0o5B+Dzkzi^8nlcVRDF=fvHOywo(Z%4H;p|_HxA8gm&U#=OA zNC2H88*Dq~PR9 z-5@8JE~$A#MxhkVIsuJ*V1k#Tczn_Rh_WTUiky|Xgf>FjlKgRuUMv$VVpUKn+f$4& zYJRICqVzp|XTOgQv+Scjb&3!7E}Ww6`U-8|$}Hekvlt)iw3NDhMZc z%HvcSEbmjP!4GLg>Ryr+lMX$BVfad;7x3xv?yh#f_`dYq4sXFCNc;)}(i^1Y;J^tc z8w)W~AIaeQI~=QGu^wv#is>?P0sj}ZUB+Fp)-Al%m8jE<=__lE%(JctrucrXH9f(# zt9?tg+9?00?rJYjqEB{3CSGUg?2WcU5d8Lo_3vkx!Cw6adr24GWv!phY+46#smrz` zmUwvr#ZU!+025FF9#Cl{*CQnOxS5%jvMjbjmg+;=JyR!H9O91OpqzF^-}C+m?&?gr z3&Iz7S*PXbBxyLBRUaJ;8(-PFuyjHyOsQEZj+Ye> z+KuzqEzA|~#SzI5$}<0eOZm~xrAuwW6i{8Q!$DNLD0Aj!4Hd!`r5ak8wkm5}`3g)3 zOR)*{CTS+qC+Ju2{Sio*=~MBLJGp``%{brNSF?3bew~|c-fwGp@%UsELoYN&sMmNF z7APbr-!A&UycF=uxJ_w!Ln5T`5p2u~OS&~!U<)8Z*gfQI`LDl(`ZX&4kHi5xCVdM$^4i_B>0*|$ zm6w~iM)+Nx@E*@J$v{gwq;6JzS6A%Mzl2LNf|#u9h7e6eFb&3x`)Z-p2(dAH(b&{+3fG)0x>*yrc z^DQk!H}f_!o4+iGq46<+Z@0$BT-_pppylYUaZk(9J#JjG#BzaqltK4#I!Uk)+%5iv zRwe0tKmk(?r%GSl?XA42B<7yY?qT9fv|+1#u`ms%3)8T%FiGLqmEXRkakDVpD}lhu zbV!g5FPF69f&omx!u|Hy)IDU$~61Ib_aA+}M;}2$mH(wkmRjSyw0&^$swh^F36FKrP zdlfIAkc&iGORd3!cnoLu9Vtd;S&$S^gb@%uJfC*$pe2S1#YKGW7(EgoJ5=eO9;GG=ahCNuF(7mzj3{F8#+VPrdFcL z+jNVct)QS0Td6O2KAylP$&TdRZY zAB|VEuDMQ%N7sJmr2T7FyC%zq;*V}|S^iG0(*Qb|KHkl+MJ1QFr4EH+`*x(TVrCZC zT{F{P_ABiq*;bHfSNy_XT*^uFV_E5GCekWw-S0sj6t@vslgjsTvDgFgof-<^ty5sK zc%-5KJ)BLFgE$nx)G5bRb?wjTY6%LrFw=M+-9~uCO5lK{t_&q`Fw1KwfyXl3u+{?E z_h>cnApwJ*H34M+6R?1l00dM3)W{x?Ucu)>_FRE|vDhF?Eja*~!3M@PJ!%}<7en(A zoYtNygLZ&ylY%Z&ODcX4gx0zfX+P4@vvXX+EvsFZy#tF2t*kr8e|dXuuGeu~1SW)R z3b_DbRY9E!IA_K}Cx<(341ifIe9G1Qzb5 zo9I>^t28J$HFuOL4IA|ouLIlGD|hbAH4tx|1)@^1JYEb(X?-*j9C;+0LUY7k%DOhM z{kiHkAqj^jM`Y-kvJ(znR)*^4)QgF?R~}t~Tlq0R*~^BhTqV8_sSVuhzZotB{i$N* zo>q$iG;(|2QW=wZ@tqunV>c(lg=h#FvRK6}6`N&IaHP4jr+X1p%Rdu|0^+2_{7C4KVr^DvY8>=dx#Xh?8a>(pTgwoIy6fGZm`fn+h-T zU7HvQ@I-#B(nOsft7P9c0~5BFG5|o5E#*AQo(vTaQhFH{MQh>%K9Rh5nk6_f<&+-Q z)>NY4bU_)9sYE~{Kh`AzW?vXdGj<^bEKy9FjcO8s0oQwBtVlQ$vBiw(;Nqin@R&XY00Gl zrh{p`ai+nvq$iFb3ul{4u8P_t$5}h?#wiH;it=~v3zOxSH{_ebuHJ|465JG|42j5| zlkqU6>eXrAPK$MAI}x`NRD!q*=ooU|La0mX7^Y34c7EIARJlU=ZMW43`p=PtOAF95 zMRnP`3Jh?Vhx%|lRu=j4%hx5wTfkDX^0_dwH?#!i2iCfw5RA)8vz@<1F`Pdy}+^Yk*ED~5A zmDRYRNi3UVd|MdbfU#^KS#7-e;DoF~sAQ9JV%geB^iLHOcRqp zv0n&|j*1}@#3&W)rPQL|j3o=$s1q5nRaKFRcNYkY#3;ERwKt$YWfwz;6&8)1aTrnk zccJiELMUHn1vsf#Npw5|Q~=r>d7mMh)U|uOU_6Yxe-O1)BX9QDhUK=E{dn?;;nSMJqog+|U}fx}3s^08=s=A62Q7YETkx zR@2puh=&l0e7W}VgI!8)$oQ*#{m@c+o(6innNYvO#-+{WwMeK+jGZN+-9bWjg+owE zuQmyh04@^Z0N{qa5pxF_ZEWw$_l*_%7$|&2C4)kVcXeVc^^O`u6q%2maip8_nja1? z$09f(l)*3u^+=W(7-_Lu6X=C~r16Z;5fKq>#M~e)7Pk*&6DlIwG($woGF~Fx6a%e6 zL4>%C>_v7pF`3cp3`Fb$0g~px7u)+Zbm&MUb~T3aVpZF(k3y=XSl2Dx$Fv~`T0H3e znLIvMCB>RQmf<~j3~_JRHPyvG`8u95>g$5U~G1{zAEAXqa=w;-MlWNzOHx+rHi_5*}V2H_M zXIia7vWYyv2MWT<=f*3a6A|q@fH&bM@~oc};_yeX3}d4df62jXI#3#c(()mZ`b`iU zt+AzJrA5SAa6uS^;`qH*#NzV=!&Qje&Qv+O&{l}ij7San?m1@(QBYe+q%2P@_(bt$ zBqo>8Ua)s4ykBj(_uK6fYYOF-4bN(L$C7+KJjfE;mluMzt@Ud0^Vu$54CtcFc+_{Z z1H;Lnc%qjYMgR@;=8FEL8OTWqXjFvb`zxz@N1#)ODW7z>(Y z(0UOslHUPtjbDYla}|qX6FT_jU5gMNdZ2xw=I~v$=Ux*k%wVg?GTWhPy&Vk-Lx%N0 z&FN`>=*&@j%1dm7tEB@dxeUU=ailW`Dv0bc)$ZNu1a3XQm*<)GWk0_>^SnopOUc?9 z52@IO4CiK}nvdM%OOn#7!*M>Y@=Bz);?e>`P@OU5nH&Dk@m!s+CqQljDWbzj10c~5 z$4kLAOu{%j2R}n{8!^jrRusQYZh)Mm&a!eRx6p~0PE{><=9Geq!`DS`E4+M24=@fF z0pq7cW@L&u^p#wDajyM1&awT?U)WcC-$ld!Gp~}^E6||}4hJa@_3vP=i7Zq1^;#c` za7l*SSZ9z>Uo|`|^Z7oW$loR3IZo{Be2ULJ)wWqrp3m-(=&{}n4vOE?nJtusNGB}x zVKPpYd6y)e7Hu_)9WLw`V+!K%OAZ^Mu=ss$ye=PI)BcWYp8In9{ojJ7F0NrFg=M7V zEBIhxfU2}A%7Zf>*sf>b#i9C3S!Agxve9&@Qf50yp-rG*Pm5hKfV0Ej5G5MA_)Evz zsL<>(mjaO8Jcg5{3qnlm1mPr@$O%4_SII{bMUjr&%J9#(BCqIBE{yDHy^*>H8as4# zx44Cz#k**pUf#_I5n(tt6S8$w4rhoc3~}cjtYbrxHVz%S>khlL+x8c>wbnadx_F#S z6;aGRS$M`GtaM3DX%<2U{4jrtAHHb#r97L{N|DGA%AzX>ahy{yw+)>!vg}>^AqZV#eFFt49!Kz3V%)@n zp?7ET%19cO_fxY#T#dFm&iAWd+Q;+z4tPB7N$?EOlctZb^`yJq@maoCiI?5&=+(*N zvK=htcSb2*d2}kpo@eHo&x~%LE!TSs{mKWe8DdQUPa?P&mdIo|{9xRZi+Ri>i-;^w zsIm0uSLnI=gX*9wi<+UuEajIgEz5 z2Q!HZDM5J_qxt*wU!B81vU41Wv}RmW_9=dKTv&gAlU~Yha$)`CrhIXli5gC;x(YQ~ zDFPEggpFDJVVmvKDXCjtE!`MRdhol_*0HQH?_Hl7wQ}Y0Io*!*u<}X!%%5N(2(Q&` z$QP*o_Q_BssQLB|f$aXVKot)k;rKHjM+eo9XB^F4o>iZ zFqtEHnsoGn%;yd)lbZ-<^Xk%827tay;ZB$ zCH*qb3Oz;_IKb@Oae2(>uL<-5K-*D6#+Mz?I7Sx^;ziW}`Wk@s`~-eFH9|ER>8iA#Tfc31u^nq7UjC5$gIW#KV-8~Vljd~Vr)h(pPDpOGq%eAn!Q4S zOg~o&B6uf?vto=lJdjw?7IZTWbI>*VXcOqziLb3J18A4Rm$*pTiSKRis!C|clhYC9 zTPpj2rA452af_u+YJlxuLLST`)iExr%C?eAc*MJno%pG-xu_beu8~7MWK+hg@Fy-S zUcoyXUL{XwHJ2Majg~p~Ggd3ustJ3+WrGE?UO2=VBl*r^K^61PI1Vehjwlaj+Ja_h z>|&}30n?>}S*clRjTw{48cWBlGqCDIYe(I9bl9A=b=MA!p}*Sy#|jW2M=4?Nj$Z{b z3P=RzmyTW=z334@H~hmW^T=2N)urqHLZPq>)3 z-UO#n9$KZmR_IJS8GD#+^O$bZjyj}RjOJ>c3G5wMs^oEE3j;)8xXFjha1OF>+M3+? zQZXP7FV)N5alLYE0B^gm8#dQ1lX=YE7$w8o)A}coE^vKZXYCcQO4(FK9Ks-*thJWCy$KR+CP*uCg;t}SI_@YTF$a9l!Ll{eHB^8k zRuD0g_1LXz^>+)ml>KLvqz^u)=nVNEv&l9SDUJ)3 zT5AIKoUS&f^2mM>*+lQt5?CUcI3j~@Dx?LC67EBzQF%Cw5wDw!R3xPwlDcyJ29XrY zsa9wdfT}{=pe8E>2`hKlGPVjV@HoL<88AGSp^BK~A?~aUZB>u0sgaBq#e|^5 zo{VkeIYYLwt?IA`L(nnq70VKp7e$~_U}e18Anr)V-925Gq%IQYk7a6BW7@5IEZJ)r z+ttc9({}XN1YE_WEm)~^Wib}%1nU~xK1Wxj@dYoQf@Ewr;l%+E#x{$9mXH)JTZ9Gf)Xv7~3I5D`Wc%bAh?xw4ysJb3=I&^<0Fxp_w2#!sdo^0Vb8Xk&by-F%^pu zwHLai!xMr{=Ef}$dr}d?+(C>$I1Om(80e)ELA!NXyT zr$i57cG7wq$>>nQW zu0|7*gY-1MADFOdlsc=hHJ$Iuy@#X~sqZ_gF?n~r5&|@`DUj!gh(CTYh)B`CW6%6x z;Xx?c_r-sfpBFpEj`!HO+m)*p{oe6;@l6OAW6_gBjc^91v&R-odGD-l+bLy_-l@` zwPKB1(w5P}f?R&5+tL>QDu)nJ{W-_8T|#4k!SUU4zw`CkcG6q9;LQrpwS73koHA54 zBLC%X0$4ehkb76QPLT9fzoKj|tyCxG_Kya3s;H8xg9F-`+L9Thz$8bf$w6@}(&RpDA&S4xLSe(mTB5STSEdC?5L-CCMd`?yb*-N56#K*&( z$@a_JhcZtJG^nYH{Z2Nfr zfJb^yfB$H+d>7dPIVT;+kRp13G;+*%+1)NL#S|Jnlm zLiitR2B-KRC;USO!=y(SSO*URVNvJ3|s``e!D#kM-rGOW3$^`CpV*tm0KuG(;S zc#ePrBRP?g$%OzVh-5DtEg{e8XJhfSnfOsSY9y*ZVX8Nqi$8df1>um0n{jjhbVcQS ze~y0apS2f+3(r(Hbs2q_)HxWehcB9?v#3OweIN@^4wSm)4j@zKIJ#%^dgX|ugTp#a47OYE;b!4puE2p9T7TIxBQBpoj=BrE6t* zAy{O2RqDLlAUu{$WLjdQ%GRD+8fcZ7eIe6EWtlN_lq~h&ameY#jVM3I5y=zcjUOrA z0({Z~ZL(>TGobatuS3*C>*GM4km+C=55M$-$wOyp!y&PC@~N;hFR(&-tF;t0** z@EqEa<|jErrekWnai%ei#vF2gf?elY4@|C$FAceF)|n2F>pi`tO>oenB}1PHsqpUTt*REYyD9M9qI0?S?{Bsp}@pnuLz%5$#u zvP0XGNuiyWd!(9?*bqV=GmUSqHi6WjZ*K3C$TdmHvjfv>1J%3$nk*#v({aKJ6}Z03 z;@~|avp~l#5Ilt8Aqm*f-aYLTcAA}!a&x1yAX94#|FEJUk zoq+|Vva2_F8S(Na56KzD<=Jsmnm_ZDM2TyjN4j>{QR0LOxlOJLf#DTB1dS)~DT38C zS9t={&)Erl%1)FBeTu76aCTKcnna+A4qz)fZPMSmY^9MbC_L+9ln<%{jRW2Yu%lGF z<>P9w@s&hqCUF+Nl8UqRHk>87dIrwQJ#hm~+@_@KZcY>TRM*auHSZnm*Qa=*IBQqh zORnEMbT;~pZIBcHpx^w1euH!CC0Ew|E6{Hq`Wn}7u(v`IV{g5T`c1Q&f6#AUNWTGF zq2DkmFPDBpAEDpS$3N&dXJs**y?)aq_fR#A<68Y#tth-W{ifN^|6lbR?XJW%C_|ch zm4~vW?eb-w%=T-Ol>9fiH|43h*S3u79*wsv4ALzhoIZq;lea!&XPIB)nYO*+w4wq< zW6nUJ{!3Rq9Z$}sM^xVmSI?t3s^za5+XKuCNA&=nH%zmw1Y_W|$67uo5oq%L6k&?) zr(Zul{dzRtUmhjigB!(w=;>o}^dIJ;{ga3FiwM`F3W|2+w#AN?)r0Hd33BWrAj10! zgz$k)z;nCWC+wF#8E>8J@Ydsc>oBKUFUa!d`{SMc<(K~7?<&fEaY^OKC9tGM`aF<4 z*?LJxEl)VS@aMJ9*Ly7S{9Tqmsg`w{00nz@|Dt|`*)pKd2n=w1D!@41c-Cu0MR>#Iih?p+qE}4Ufw>c+xNN2qx@|Z&$LUGooew9oA$mgv6oM0#rwH> z5aU4GJa9?1?fXKMQ_xSRQoGj&)D%#B96SlH)r9Ht0|MnCeR~K{KB$QpPA7ttK8LnQ z>{A`KG7rxJZ9U2>8qvf0bpqlnM;=K9eE|4*y$!&p z^~p2|;OF%1IRO6b&)-Yg{u*GmhWaRtuuusw*Qc0>tsm+6?3PxR+wUn$pQ}fD+9T>| zuov#2(lM5{Vxn`6OjCv748TbZ3xFQ)jklW06Z;63lPaO{m>zfxD}Ci`C_2BRP*&T zf=*Cw44q%~%TPxbheelttz=9I3TayR(ui_40>BnZZ7< zCg}1xebVH!YKFm|)^7%fFW+6=%clm%#%v;^>h(~cUWsE7OQKDO`id`9y^1!rifP21 zGjOQ{f*#!=lGi3S=u8sg*(@L_G+2c_(k)oUYbEwsm3lvyqIPq$ z8}K3oy-x5sO_ZX}f5p5tl2Z6iu1cS6cgzEzrW6(~C{dJAUQ4uCp?1kTt)IvsMJ%M$ z$gQj;>m(yzqhnJ@(gpIW<0Dm)6g?d$emDyMztU7^^6=;BUCMmQ!*{$=agu8n`eT0J zo_ElMUn$Wsfyk~M!3NCwaD?LT0Yp278>#qg1yw43WCh+pD$6X8y7LkPIUjhW6yYo$ zdkK#uu%(QA#VQ;^K|b)EIlX~zI+C7IBrMkP*EGXoI;A-=*_cjf4|+HhlLxBwCMNWu zwvUYh;poBD^*M?fpzJvuy>2MFpNP6YC2HjXgeDR_FC)N`+uO7wqTK?~TFMLmgc!U<|&1xr{ik8ai(!mU`{#uFDca$ z`y|WD>s5+eZZm`E{7_reDtcLoO?gBvbiaXb@dDj7n1-jpiXI6qfI}6@gjeDi!&)^<; znFsbs%SUUDT}<9dyZ!zNNi8z86MiJb6Mn=fB)I68hsN5s1$;uTv1Lc-_s?1MpRkw2 z`VshVCgihqA+_yBSwZNc2`oW!3BN=taR&GK0j=ZAK%2G@EJP_cC9Qjd@GOu{?*`=* zH&sePz(P({n$P_c4hpIw%q|JHFuqVN5R+D(+Ca0#s|YqMN`RI?VJaja*_+jjFiV`H zBt?W-dcit!ef2D@#UW{ym>|i7IYnhH6U-zt8-J%{>|L}_vm}M5=}@m?i|I6(X=bPC zkP`z2O29Xlq`2~GHc`W{3F4(WRqL>cWtrfPNOcuWhTC@iRsZWS4|HJqRh87?_peM=N#B%qmT-?v1tXG9NJx|B-F*J?ZPh4+H^^F`Eb0M zr^7g;3^)q(Vi{@Z6ol%Q->eFui{-_toFzFy?yt_l>nc+UrJeJL#_WvYDCd3pMy)Nf zTdvBk=4%w&BBfD=%3fJo>sv%9@3x3!DX+GOFe+v(BAZ&F!EzK)@i*;ojW+F}ZzY)+ zGs)wUL4FoyQNLVm7Gd76FpET>;XQ7cM22u`5=~g13}YBY0i%Y9Q8Y-4%m`}1pD<@X z3ObUov`!l;DT- zT^;l^)*UPxp4VOuCB>x!%47o!W#h|49cYZq0V2g-d1bT2p#`e$)i}bCUpx0}Dp+28 zd`^7Z;_N0}V`^>5Ql4~w)BxT_oUo6v3TrIym%5I|xmALBD@3Z9I^I{I?ZNOH)~xBK z(>Ar+MJ-eIk)L~76uF>z%DVk^H|wc7n@;tk3SMQbB8zRwc8~%pu`O*Ma~0<_2azYu zzBkqERWBw=`o*9xM*_4{syg2g3zX-;OGst3ZAs(j>&{7M`x4t+n4A*xoa%ST0+w5u zrQSc|9TY8%7(cp;3ygYXzzZH3?O>p;sjOoPn<|^Z|U|Xl?8Xf zW9=GelV#+#lDba&t^7UtJ^*6+$J!pin$*(pJ5qlZxG$U4NrgA?i~LCt#R5Ph*MT(l zFU$amzxrd|c~RWWq+Qd}DHU>VzU**oP12cDExKlC1=T`jynE+WczXrqD{YOHZxQv&&ytC0+8#SP5aD` zG%8X4J<7U3)K^i~1pxo`$-1uy?yst>I|JljO)0vzD>86D;%`wUK@55>F8Ta=6|_Qwct?@{<$5rUFNDYw0a?pl0*3wvo#`r-01DREEektZ-7+vTIEXa7!B@50ZovdHXz3()ok1n4ky za$5xeJ4(spFaXEhrsHq-MDytRPU~p;pCiKCax${4CORvg?<}E|g-$hY4d+{2Z5981Zg_Rw2ilc% zD(3`@GjCkRNsYjZ^mXM;&zbGbg zU{R`9`9HBOxg?H~?*n^T@t@Ir*-?!)!$7W6G5K+BRbZU!Rdj>Twys5g`D}Ku{KeJ* zd5y{o-h9WK#`&ALdSzVr+w6urRB7~(1$j*SXpIienY=kr7 z>}S@3>@&S=B>RCK_p0zECC%xLOL@Kzyv>#Dy7uLD4#}FkQK3cAMXnv$Y1&^*so^Mg zuS2r1d$AA}1c3fc18wVUk4pz+5ez{gt!hm0bg^}piAvP{N})9ZEVW%QXmBOibgAGmQra4GRp!(KspVuQ}O6Q-PgFxzM{4{GN{@A#Lo(B~!2S6k>lGHZr1bozy9yq$^) zSA@@RAda1-S7=!b%wh=8b%(!&yHb&KPA(_g~wLW+nE;I7QlgDU5!TcgW#}wQ>Dtj4ro1{)tXeA&_0nW<`B#jqsz`|oanH;!d-aGJ z{skE>&6inDB62VdX~aIcwV5tr6$_f2IGHGof68>8uyvS@dp?lKOqaqjYj9oNI9x}E zO-fsr>okUWjUjaQDl7x<1jsI0wGPNoe%j*~j7MyyVm!SJ<7uHd1LH+(Mq+2JiDK02 z+)`u)rCrK?Peoi<*;9qySS^NWt=L}v#rs(+zW<^)D1Kx4#sB96J{kT`^1Kt3csNa_ zE8_#YU7BZdb!0tc%`qxCRi$2&>sx#W4t{JxX)ZblMhXQ>4JJzcusq~Ej2&3Kmr%x1 zr@Tun$jcwQX8Ao69|DXe(Yj8OwzGg?B;oNMVERQQFi&<6n{)|(}s;M^4Ws8cTgFMN}5VnA4 z=`kAYZ)~yP`His_U^OC)&OQ-c4BdxNP%;Lqr&nbvIfXM#o@iHHR$?y>^sDi?a|z+g zP>^25e`?1L`*gIlHR3#pH7JFl?bl+&d}hO6&a$qw=e^1KuG@VU3muIDV);_MH=p=d zWl`%C64?;6Lwrq7+?>dydV*wV0uKBkevp&H$GJmz+A-WPFUQO;A3Gv39aSB~F(Mip z&&{4XBFFhM(Id+3eq)Rrh&^-_RR#O<|3X!r;lCZ_3vo8W2|Ns@G(sFSi8R9C9kYmJ z03?7%s-B@*Ng^$2GGhVI15!QC(TB>8CZm#Hn5UX!T~W?r{IiAZ<3ZK9)%=Z9^?tS5 zlPfSm-XTr|VU1>CB*2&CJem_rvtg#YQc?Pdy_{|@wio*Y9m0b1z;PewjBbrp^AhO%9oz{_-2%e@*LK zD0BH`{@d{WoA1zDds^?r0ELtlEJh;Z^bZ{%KiC82Zt}6j1WtDzZkRH&0UD#8&QH#0*S{saaRIFcM*f!Mlfnefa+jTEgO3Y zDUQtZ#HA*$_F;TDLe-3y6dJlhiep!&Rk4AFbm8dzjVm7YzK&ZN9IioSP4MEr%kt~_ zhJnKfNl^CmD7!)wVWpqRcn8$*%#1J^oqzlzS=P~_nA8>kHpA2y6)7_a=`H%h5cd~f$WbcfQlwwjB#zV*k~86B zg!)0ozoC|o$dkh^5Eepf33^29nJUT(os3PBsdb)2Ee5X1WZPP-*=f~(b_%21UgW9Q zFyeZY&BP|VFI9Uz#2>;sd?dD>2=17A>@M>bFKHiM>W|Zs_rv141lR__ml!RVkR#Je z3hXn0<|QrrFm3ZvG|wi->5J^Tr{^{y zxFswS6c8TUAjkt^cw7T|A)hTvKFaMr8IY>H2arAIjEJug%;`skh2k>MJ7N?N&7}^&0#Y5 z&(HVom%p_v!X?>m!1DwwBZ=vH zr?qT<9V6%JBLGvNDg6Fcv@c>pSw2}+m{`u1R78onfT2XSI!RL_=eJu$elxo}Jv!(G zol1DntU;6*MR|qR3-pSZB9D>A*o*(NWB8dY$nQkuzR%x7OYCW;1MtcWcFMyH%^nUQ zxGcYSjIcvC+@sH+h`ae(9uWdWOP%s=4u<-Uj#qB>Ucqa}#tYenaw#4igAqQDV1|eZ z9^xhSBy#MO4~(@htFi5*lB&nm!NYo1yj1BYon7sR$4aB&<_Y;vhrgGFd}1(`PuCn8 zUyiN@Qw0-%Qk_H3U`G02eZDL&Ocml5A&;X=KK4=BPoN+x_L^fsI1le?7nip=d=%~~ zKldHP00|ydw}o}E>*YQgXuR9fVj|m7A~DQI2^=7@8Xyk8@4tLswIRG!9^;4B60vvN ze&ONIb@J}FcizL2D9Ojr@_h;*{%9t#spgk8ze)$hp!c_BQKmOV8J+14qw?%lPcjyM zZ;K;q4UR=Rvr0^rzhmy&kSjg_7Dausv%xn#3V6#&PV&^KgfjlHBm1p-^p+r=P@OoY zCZ1}#9{{dS_z*L@Ix$49&T`{1^@zf@M#0V;njHX22xNwveL+S(){8 zc_OMv#0Z0Eek{|o^f;Z(PY?1@cw0`@Y7jCcWau>#0Xdunt+mVcyNVBSIIhJMy>hYm zutott?o;RL_r62OG8}rDj_Z+VEcl?u5g2a60JxhEVjFDHe01y)??`OOyIg#PDOF17 zxcqX9iheqKo2RzHy6Rt!>NE7O1we}oK0jt!tJh@UPN;#WMt7=F#YJSuYHjmY1)O^{ zJApGe!>{a7FIuE@F}M!2N@uov_}ucKYLekxD%ccnKRS|WDhnLls|Z?_#Qbfv*Oi)CuS5S z-7|_&&hk-wFVo`09sHf_c`3dRai_stl6^dLC0kZZyc3<08YvnB%LZ@k!V^W63E4pu zSm7`AiYXdQc{^YA&1 zqWA&c73Ss7wazKg;Sdjf_R`#mAbt9u($zgf45LJ`#%1|~C@H)WPj<}~+&jWO;qj=4 zbc`XLBYqhvyOI|*;v*RcvQz}|L>35k9uqf36HuMB3L6G|DM~m=T*#SNSQ2^&JSyG0 zNt<(1y7Gctb#PL%qFOg;ZfK;qM)UHZ=7pQ2tEYLvZK7d$j&2e~rM$|jDNnGbHsIuZ zv;wGvqMOA_n7PNZvm!bsW((4U9Sd5sboPxtD6|8&#t98DrR0qS7~&U2MWPQeegz-Q z?xlPX=i1B1204vD)~@^zMGV-cZH2nLC+O3!qh`ZsbqyaL-UFmpwyu^aJ6rP72v0fr z>wS47zK61y1DO^axF0K`4MKQuom0uRU|=6YQ>@Cp*dRF zbw_&hZ6Sl5HSRkT)>yB4!eH_}v)8t3>sBRJBHpe`JmIUA!MJAoAQUOYVhOHdLaL?2 z=?1-FI?1A2JiTN1_$#x1^16iO1tHk|j?|YjC_9P|P_#5BVsBfbjA2Wq94OQ5<@~2T z3qu`vPnG`N*}`J6am%L78_wT)-nI)a?Dpnfx%2B@_3GtauX*k6&Zs{>7;fKj(JL+< zFHPF4oL@Ou%=i3(#VanpILr9o%Kt8&YiHTUS9InFn>H`**nZLUezCBnw{=5TAkf4g zwGJ$HhUbsYOTWLU-`n~ZT&Ts}GO|q;-gP|r*+v%Lg>N~y@QN}=Vqm^8Bcq!j;n1&Q zc4LyoU!(x^NANIpC-mTr`qtrFDMjqR)i3f4hGmyuH|W=E_0;wH#y-yB=sJC4*XeL{ zt-g))?M;u|cdQCE3u_Uie%x~he3y86bI&u-Gy|7PXY%XRgd zl~=FS)pF(4OLcXhAir3UU#)LII~-l5Z)1JCQr}*wZ{MVEU#D+Z=-Vsw?Q(r%0~P1X zfuYM*w!c;F^YrW(&(l@joW1%fcxR+0@^V>Ce}ld)=-WTix6S&tSKltuw@W0H969)U zsd;-N+j8DHQzhscmY>FbKjLPZ**zvGWPDrh@2tAa1Bo(nNpUh42#&f#)59LUZ>CPY z?WVVA^_Vqow3Vvp< zetqhSlLIvmy9618I2_q{N)2A3hhDq#)xEmf1p6PQ3$s0ZgRatUJvye|7iJ&ZqN~em zj_$QHql2{90A@*70cN&QNZwuLCHIG;S7~$;ecRc9hYO?8t5?1XkBmlN*LZAax-i?r zt8|ri>(N2ZzA*b3+%<|$m1JBK_~3>+_%S2vZn$G%j8%dlBM-YI{6>tIRewBpp!g+k zTifuag;(+bi@{=;2cyv|)&I(d7cO4e^jio#UmO^I(i^lA=_`Dc*a|H&Q6_EX5~o96 zsje-V2XdIsJo6;fC;Q0^2xBNRh%?&*90mt(y@3SkHY=8Mz2*_*OO38z)gX8?XWh5? z&HY=3Ow>OI*9}LnSTRY)5*rX%I}FAhvfM-9k1ndA){ymYZKz*;R#0DDK@Boyp{}uG zsOvEZs=UM=9FBIZ3H2)sRV96bjb8wD^(-;ASs?1c8i*PsAuLwB(S->KfrxUzy33N? zX{@`8F-Mzj0F`O>a+4=et(-)iWbQd*Y?8MN^#y8E&QN|#5%A3;-cgbu7UoEgP>U;; z&iawm<^>II0)eAd2rM_~>UKs1s1{M}BniEEt?>d&x?Y$CO-uf$MvR&|u$X7^GkE1+ z;{}#4y|AL0MP0ohplQnGit5~Mp1EDt+cm)ZMqOP6-m6n@NCA=2d4h|P4@aD7RErzd z*3oF&N|1Kx>iLbwUXw1&%#F045$)E1UOh#?Rq2JiMD7hon^*R5weA~`=R=fKYuD?e z%@sKM=Ja@8A~1)ejSbM=pj%s3wtvE`zj+FqCMO~B7k!Uc@)8^kN5#sQf5{JQYIO3; z)siCX*|y8C581U{53K@WhprL_ zPHS^sUYIWAa)gd(%hY>GTG~Q|d0qVstKm>*{>*}VneHznXk*qI{M;jvD)BSjE`?mX z1Q9JL&V{>)B?Wf_|E!$h!kQYlafBXd%;Y6n?~w30n$AclW56CvKBIaKbPJ7gIGUds z5x`11b?`)LG^Iu$=bsy*>^@TM-J4E+f4c1a2m2m!Gmy@E9uzF#U;toaRU9&q>=KXyy5>wDT%7 zqfghrxb=(aN9Tq+)XEo{_x`m0VTH;~lk!mharMqy7Wz6Q!hAT?iF|npcMr*PPaO2x z1B>mfCB8S$5nT`Rkny2x_`HPCbpSW5t~(kzJ{rm$-(R}%I4aQ`P}T%$2IyE(8RYt) zMr$t<=A{NUFz@u#yhJ1_OX~&jZ7d_Q623i1y*VQb@fz`jhnYM(^0B!JSWBAm2seP5 za7^Q;+Sx!}ol~tUaCp{kE+v|hF5oyvr(AfEyU=;>ZGdNj0z5o{@_OMrQzVQCkOJ5U zD@D56ii@@6y2X*U#x8WCD2fUyU8;?HFgFy2*^1a#x zopX+0@}YH-Ta8!~Dp_@YB@a|iHa(D>Y;tJ=dNemk4e)4nHk%)TIPM;Q z@MxrgDteMB#$fLqPEE`MMSM@TihpBss;}^gLQpzORXJZvgYq-K?zfi~%4djhptUaf z;4Z(dB#`3cyy_eb5C^K27ABJ2(<*Qj5(+23ce*@JzsY%6Og8Cb%vvb+`dldw1PQdV9ZdIs+JW$WoT}n$b(jaQ_KJ` z-pnhhGg_gsTtnsEpZ2>qN%2KqkQp-1E!L8+I`cg#$Vxd_xUyoR#gibC5Ib{kmz~Dj%=HMlF8e`+u_eyVzeo#c!rd zd?F=pjM|Nhoh28p*;qVG24%qbt$Ywfe5)h@RE@*fZ{2KQsd6D`W8;FSi2zX%+8pEi zA9(lYc=_$T3?Gs;$OS@BdS^C^Zu-vu$Rj_`JMZ8fl!sgW4&=JtJ7e^Ay!$HDJ>c1H z@#Ec9ZKDaB8th*#syX7Z%Taqu602C#d)M;kyld}Z#V2lFe4>I-X*`G(4NLlG@w134 z)?lfrlv&PXb0fP zR6nd)jml$aC5v-8%4^n^alJv4x51N#=8%_)%w@&T8IVKSV&3h{aNR3-ZSC zxvoU~UQs#{%E=#1nRstzjBo(3FtSo8T2}&*Kcv?s6-gMkSfseNE)g@HBGU6}p;Nu~ zBYH-H@wi??GIETFE*)F{wX+e8sO#&9Miwxbe?~Nt>+=6`_dalPU1xpw+gvP{f+vR3X{ z4rN(7m7rG`N}v8&evSughZCltDpW43ICQ97#ts7x3ReowVqU5h97!}IDk`uQY8Ibs zy~)9slL{VO()7{9XHZ%}XW~IX@lTmVTTBKKKpo#+98|t{Qqg6{Sld>W9?3L3s1u2O zwxV`HGYOlH%VE`HS|!8Mv7B}J{7<}3=GaJgts(id1ZLQQA~K+qxK9QYI)daaq_?zu zEEq+sLD^*QwDC2Hi@h3BTW3f}fVzd7i2`Yx^+U+eFX|Xdnq?z~Qkk$FpvI;ujNq2U`P@gay>I$II7;{(kmML!;uTGk(z84%SPO3U?HBuC^;e%ZqXP->b; zh1gOMF)AvvpeVY?j~W{)GHo@s$qhlI(#iBjlPDmdkby+B65`NoZ$vQxfYxkcWHTkB zI(sDohtM<3PXg4wq6l^?R_K+-?^Ulz#1>?9!#%+=o#*TMmXK$fuhC}>ra>o)u!4Z*_LeEeBUCwhd#Km zeT&#uxYlt;79iF6fsO5dyCYd`^U*s!MmKDdk(p%>W$m?*|K)GJv*Y2~P5^(C{xWvU zwj*HY>CRPeM0nMSKp^N~d}7t=&ZBqm?~&~fzxvC!<)dHr&?_E%`2+Xg_p*Cm`lVm8 z<(@CTd-F@a=&qR;zvzXVUeND#x4!C?8#hi*Px0^04Y#kq?bcf+*R5Tnf1*Q84)(nJ zyF)B6_p`-~mhX*F6qAX31JWXtaoza@QOo+uy9CLBQi2q}%c2k8GT6&AZbfF&Z|Ez1 z5QSjj5W?JW#LAVSZ!{|sMo7gA`IO-gz0~5Au3)2pmK&25d1`}e5sY?_dU@~bi!GB5U`1Jtu zum&)%zLW=WurdHEqYZ?W0hr2JIsjBP1DH=}KePRW2MAswo={_05xE6kwGW8tnd5c( z#&~_Ty1zKw{S%e$TN%`e`n8y7pz6^ueovweMY@JybPnKvmK6n(S$X37v_oILtVFHXC0?4o7;qOjMIN6&N(qf9_FF>vIN zV;4ah5rfST{bu>ch+=Elyjf{fHz;O@@7M)nmP+!-#H;9FS1q^Eu&}L~#n2+6#H7(M z8O@cuJYRJARqFDU;VxgTblJ*WqYRzB!kajN&b%64%v^CF&eWOVW*6=Bp+LPSt%#`i zm1_1BUskyLbXLZrl{rHhnmxsvYO{0r=)}o;ZIAD8vvU@)9-5uEB5L-_)a*fzejmvD z1ti&ke5}l2%FygV-c+0AFgPtAfIfM$nmrlQL}_}AtLy)0_3xmW?#{EB0R%W=Y)a?IT^#*qmBZGuR%$RM6n;K-x-%0eH0xK=O(Q z021H_h1@YrA$ODs^W~93?jWm;`Qrx^65RZ~RRaKy2C(TURwI@kDQE%!buL3+8Gx9j zu73DiNQA`Cr+Ta#vShR{^>n59M$$`6~Nb?FY*aTo6J zsN1qBEFfRL1MhW>=`(I&zUr?L=K5p#JB>`WpMO<%O$j^9M<}L0_58zE-=%GAGjw_v z&MB`!-}i7dktl#DaEn~}BnO){v@K`6yBfl7Z{PJT6Pc!7t2GsjWwcVwxhkx@%QqFD zK7!|oPw+4312)L>`&+1*X(N`ZZJiB6(p)pSyEcjY_nsYQU1vIqXEZk>jBGMa$!yAcQtBi~P2t}?PRC=~|fNRp@N9Qipy?n-bo+Q#a`<&7)Gb*JD4pq&9&L3b4LJl(FSzNCt6nUHY5TN-ggR-jbUotduK_W?Y}cpD|)EZ zv6$nHjuG&@HpZo(3Gc+TjMW-IvG>BW762XSM==e(I5zy!Z0w{B^um0>wyj4iEE=0p zu+Q2cNSi+l6jBuonN73hf$b>s_n4vRiy%7@!oj+`M;1e=?U-x1=1%W>;H$Ib5K^D#3Tk=r&CtXhn5foU0w z@%SAjl0IRbm5L_rILnV%{UgI%aHRanvEmaVqO<(u@}Hb6e{#C`Wt*d z#u9+9Sq}wZV#=8Z11;3|Fo0*v(ArZ#%VNO?@(*D9@PS#I%k(IWBF5F1G%DpuFI z=`ye>6OD;rWTD+ma%+mhnhT|!#YkK!kAv}2F*DI_CGCV$?nu)H1ioya5>8M?Dh~@5 zg`LNKGeo(JN?VZQnISD>ChRE>XcoDHFMLl3em=FN>$A2h{1M&@39kx33Q)h|1M(T3EN8SnV$)Z)8fHqFRBC2Bwy(m5y(n^cvW=@ux zfl0?u+$jwZ1Hqu`h)vlC%-urOdd4hzNu8(4b)G5e+-##AM*Fb}+BdiJQ{S<_W2un~ zS>e7B5X}gWLJ3f~GmJdyGdg_{i!A?718Ks1|4F^;hGLXpQ46*PNcqV-{WLDjPISaF z1!ed_&!`BS#92U%K4R?DdBcy)MJ&U&`gt47s9x1EJ?NJhmlYCSz>_YTeL$`x9?=Fs z(qwB}ue{eRLZDi0wPvuSY!tZ85;_lk+4K8_TpCU8q)!SE;8GFi| z*;nn%e*fN^zzU$X1$$(b&TS|vjZPn0d;X4|g>p}h6g~MOf#t+7SWZ^JLYXMzgQMrf z?MoazQS6JbD7_v!D7-oi*bh`2VgDtQ3o&5y?kJw2MjuY84@(W`Q@59}dFJ+rP3!hq z`y?GEfV1UK&KIBDrI8*S9_d15q<00w@E}iz4a2ezAXxO(t7?N|=<)riDdU}}7N|WbD?Ws8uhUDSVa>tKbB0!mw zFI30(4|ja7(s7l+hAz6zKJ#IdtkUsMl3PJ(t`lu##Oe6W2puijE_*2-L?49#BQ zO|{u$^hdxsxNNh>ZY@E1(TWJleKmVnDvsUJ?H+yhNvln@xW>4L++-NV}Qfl_H z6;ZQ2HG77|@(OdGoh*;m%ABJNqdmi$>S*UevscY!G)z--40Wm6mzi{WMuR+?2ArUX12NT=#N&6b-MtbJT-GAWt1nZ3DOR~E`$ zIbyvP1lSjcS9k3Y1g@xPlNzue}5qD{8!G0_@s z^Gu~pD|41I0O9nKHZSo4sL!P5*ABONX>AGQSJny?8Yg?(u;hl@JX~qh${g`F7nZbn zBDQ&SNt-9iZJsLHWS<`6Tr&jQGi%DQwKC@@qjB zbg~|Ri3`bZWI}*r^m^;yw$<_Ya32>deN>s5^f=G1ONu9`g|;JgtY9vkSX08!sWlNl z7;NBYxF3fr{a7D%2;dqjIWrd4F!3_hLJu;_8I7(9rq-}Xv*>ekSpA4)+^c{YOhyg4 z^ssiL6+cAmnWy?^S|94vzNePpG#Bz$`XWm&XiyWcjSct5TE}5wwDUbJ9F<#pm zxqo=PbCvO`44Pgjj^kryR~a9!#0c-c&z@}zzI3D|p65P>19XPU_<#8+_kr?24i*Dp zQz$}LF&JfQ@Mxz@^z0r%KtCy48i>phGpR^)sob9kE%;CoOR2sK3`@Hep-uT|( z#?SFUjh|YzT;u0fl^VZbMbx`Bfh1#f6FG z8oxSGYJ9e6+;;9>R6tyr;4YE&7WAlUvcbU$S7!A?m(j}}S&Q8#ot@=+L z+6_zyrqo6RV__B@Oz^yu*b7a)wK=A)?GUjNj}3ZM-@dlIYADxrYHmgv=Lh~otr`kC zIC|nSyY<`TcnTbN`R3IPWeQG>F zo23W5z2ktV@6L}1@vWM#*x?sX8aX<^4Oy=q2DpQj9t#-fti>hEjOWHn^n1bbk_wjW z@kP)K!*9L-zN3sd`S&xcnp(Q?(edWH)6@6iWl6iZ3h_$b_%dAbA0cfsh5!HeQ zRshX4Rdz$rT(bnWA)IG-(H*wtAg)_O-MP{#cgM=i(h7*W%p1PA>R(9uFcCfiJnpW| zwQcf!k%A6EowF09!;ov>V!1Py+ag7g&3H6^1}iy%p)Apyu}cF6f=P6|jFG*z4*A5p zy1-LrpS-?PtWYfDtm-7;UmE@y!=JOBFInR{SMKbEqO*p`13p-Xhp*@yiQ}JNdHhGp zRURv5*35W}I+T7}I?yn`I# zfZkXnnw$2RB%G)e4=oun@ifvMGbtapj{Bi9`2w9F`+n!A>bcqRm`N>PZaLiQrDoEN zT7m{p)vaqSh)$fcSB7ZJ?!s!trj@U=PTYjlfSaib*x-)Ig|(zSE;wbKMuy32GX}*2%*8CkWKBA)Y%w zR^~Z2AYn651|Xc{O%zUmj~Dytqw(0qlJXqlvHj(q9w>SWwy;uIN(7Zy5+Ppu@MyVl zrHW9)3p_1EkGL<2imFVD<)hQWd@HrjY8v^O+@+W7DPi)`BISzWLfb{%G8KhR`0%bHV z-b4s?jtU9~Y}_SSSG^$mXpdI4+$31y#5^!s8p=U~sD?5kRPP(^*ZxYstjwJEYu}Q7 zo#ur>_rMY>>hwsd&9ftsjVuc08i{o)u$Zm1X=QeMo7b39x)n8SrtK~_u(xOcA%y-M zZs2OO3~ZIbNy)RKa)Osr5$SV^RDVFDjt+*0ZtP&N$ct0MeL7v~QypKto9U-`zJi(P z=AkJv*@bXi5V=ybl75WfzY>OEAqb7UB_Sxju+eF`ast|>AN<4Q)os*t6Y@Tbv z5p7uXIYDEKs)+kB?!7TISU)9#Y#15`D$uYp2Pp%d=7gs#pZ70p<|Kn6U*v&xcVWp= z>teG+d?$-oOaQ=yak%9@m6lb8nMr{IEi6>I&Fsy+)_CD~Si#Y=x7?5YML!Jm2T>mh z0(M?wIc?G6Mx)H9!H3HzYzAG`6+ST41$F_CznhoMO?fftWFnXn}^iP8AJNT+o%tL zR`2|bMKV#qHZ0OtcJmlrc0C%9T(V}jx{+ub-}6n-1bM%e@|zDVw3jpGc1oK{Vad`6 zdy;BCEP;26I{!_@rnQWnW;!7NC=7#GZ@BM(2D4U=Q*|tpwL~?2K+3m zN<=4fpoKlCmmUBP;%#`$=>73jyC1D>j=s>O(g#FIJG_X{Q3dLe4?4bpM1&Dyz(U^S z=tZSwZLH!u+rjsHtz-nzm2SmG({8^Ib-~Y>etilO;8%apmF)n_j8>P86cb z2o>UsKJs=mJ^} z(m-e!b2?dz@A|b|!XkN{M^Y)S`;Thd^%&EGKxn;t{S<#QYi95$&TDYPPtj#he79WN z0X8;IT>kw|r3ML8pW$w<05AGS-;4gm&hX?9YmYFnPp}#=WonWGdQb|f(5Q)!I&K1z8Klj{*_yj zRh*=SC>4N_6#CP3{vrf?XkI?9x07^<%V^K;#+iNxft*gEKVqsVZa7YQ@CIg3{Mu?s z@#_iV&*dFEK-e*8W~!pb5#v+SW37?V>FH^0Kfbwt+N`X9(;M{N%&OHd+4SO>7rvRWD|UY|_gcKcLkO*%cjZgTDPOZ?x)jrwq7{{=66QKP57 z*`4g6o|>|!&5d*pgsi5^_#}$}u{zTBiC=>S*T;* zP8zXrR#_paV@ik!s4Nq4NWq)3_US2aZnDy>R4JdXP$>aQH`CK{rF^^Yet1RO^^5RnVteub`*Em7xl{@uiYK54{|8qFdB)r?Li@ z0cqf}A?ajaZ|Dv?@3%ZQ>JFW_!^0Ykn#%P=gm}%40*`bRz$QF3xOqG@s4n`yg?k zvev^Bw_4d~?1o}}VtQXUaciDXR!57Ez*H>BuGDzP8#mf{E{z+U`fS3ST)FYdO5?1G zmKz^N%TV9q>xd8Rvg>_4RCxi`5(H7)8=uPSy97#L!8&OP!O6thXz&dln+$U>^MM(j zF;?c>?1ucW{CrR1&hh_S+upgK=PI`Vsz{p`;>@r+ zz9VmNbST!cJF2@iY%-aqJT#N-Bs&$!CTh+ah83K&ydJKGmp85^3FGEk>O|s{V^E8u z4^o6%$FlkS4Ehy_S;kW=Ly^qj1>K~>t(;i734P8PGTpjDy(MAd9DBNK9d^4NE{p<)Mvk`WiBX_5d-`Kb>MboG)Yv(;>FV^GziBJjO|3Jwoy0 zwl#puoAv!@xN@WIC&7u7k(RoaWGP7Jg>4T)9AM2#6zEa6AMILgNRc{ZZwZ&W#UqoK zf#yex+$;HlJs2cokuQ!Coc-wMkztJ4uQ|C?*ywaZS2l?(>dMxUzJkgv zX{R{o&pzF9e|Azo#a2wFZ6%Eak0P)8(zAM_eNnoyR4JP1yYuUueII(7Uf_=3NHEVs zf$KIUjS?;%QS;v}Kq%Fk#v1`5@TUTYGRQ6m0Zk7@3fG~5y3>Gm0kOO_jPfN!af%Yn z2zyMO{ER(_aZmKEL=-C#1H@7xhNca~=*Ng5td>=XL0jq!^AN>(!GK*!B8~_XB@ z1PIN)Eg%p-7`6e$#UT&~BudG%3<%ozxY^LP70U?EyeE09#YXmaldr!Q1Osl>U*Vp{(LO;?T!NwveWD8A>Jb~0k zS%%3)*K!$3{NT_?I>*de%R&iMWkkq{qEI-}k$9dajUY#3Z=_?DJxvYQYWLNobd{uq zKYrhSw(e)7&EuIdfhtMb%=f;XO??XYu}yuVi0F9bCmC9!4E=;o{HoQG#8kKCFUF^C z5=q2`1-fU*Q^ zO8`q!ujN#W0q8!1jt##663j1{pj`}1ccmzkS&hm75!IXn;yw<9|}2sF+lb2OqEHS7-NkIzPCv z{j0!IdVaBvZ!PM1TLao;xwyk#Q*oRAGXf<~y6Z%GIMZ+9fHM!#NdCRJh{kI*@>Zwy zdb8PRL_FD2k2F3EF{JUEjweZbLzhC?uEIJcYUb=ydr+7F7{FQz8fbigV;cV=WGi~q zxuYW6eY&FAIY5`tMou2rl@&7%Td(VI&{Q*%zlY4m80TO#I_Lj!&lBs=2iJq)iPvo@`T|pHbva1ZBb}XzG$>YjZT`W#lsJKIS z6D3&T%vhc&%CfMbIBAqeuu~8YYDs6f_Qw!-CAcs0Xo5$D#Gp!p{7EfbHSFh+L_L#k zA7DG=!Vy&aEr%<+c;U-F_82G;RMn&qJsQp!cRCn}cT4ce@t=lI@Ljpxc$`N2n_3cZjwJIJp_ZUdV1t*o6#Q zhHL}5D%rH5ik?SQ!4CV#OD7`8PXMl^h-&xoas=&?1?|FV&~<5-gY~raO!_uy5}z^V zd)`sDY@+Ijlx?b`ux>t}(++AO;R#9~ghv5pG3DwE>;3FgHID@!Z-ONxdGI)LXZdvR zV(FXp3f5Cl_5*_;p1L7}fS|`Zj)9O>z*I8jrwfG^3=~nV1$%H1cn}vvblDW-UYleh z0w9*V(#^n&!lL|rx}yE}>WWa?qbn+Ax2~w3SzTeGYf|}aP`IKiCeUSFC%`K11hfs( z9rzbh&VC&Sj(KiK!Tbzwij~j&-bXM+IiT%06$|Ayn?={z1P0w_vsv(cSnsiGfFW1R z8MgW|5&e&WsR*M+WK{v~G7NyTT{sCg1^#5@hZu#y*Zds!^${2~c|BxD7Vi};QZ_Uf z&8UCnaseI6KNO-eZxE$XGGTtRmR|@a%zqf?RC^=6*+wv8L?7+^h-_}ms@@XL~%%dfwxlK}1mZ<%Tjt#0(Cy0v@PUs4v z7Ig)2QZ?q5&=pFO$P@(;Iwg9RpCNUSk|Qp4U~~A2wXf7tZa1hWr>)__9|981#5ox> zT{Mw{;t``?;tR6z1+~DorK@V39YmLP}UWXU;Q>-ikD)bn## zJHMz~WW}RhKqQ+(Yz8C^psf->OMS6bHzmoErenXvjRCssKsk)uAPf&K3t=F8Ox*EGvVby{#OfL$e z<|NU8m%?wqIg8vBL$(DDGwbXKT&ikH1c#w->amJ>0J0$Y88mj{xR_W02}BUpL4pv4 z#Nf`np3I!Se&52C$!s=rrV>;ZD=D!gAh6Xd@^!%V+fskWk{BC^QPj7{*6n5Uhxm@USP-^q+YR zHi~S)MfV%QMLf{41=4pxUROHSS+F=mr3kf&aXp`H$aMK&GaY#*dz&-xNFFz>GTdU* zD&mNILL4TQzK!`F@Moh@h>B25(EWZkYbw_e15!>R+u1;1BX*DFi0o(i8rE>OX+HjI zY-dYJ{Yr6pNIudV<590}jwn@U@4L*|R389QeYQ7NUZ|LXvX(n z>~8xOnxesJEgEI){nesTroF&iQ(vc$<0@5@jRn9nwRy0nlQR(6o4A1IuHO;9B4M_k zyUq?1e>7O>+DAg`I9f1-1Qi>09Juf6P`AOF3RFxOI;5_FY=c`?RHl%pSeXwf|EQ(= zxo#i3?2up>U3Sh4#%6E?it4zH)3gy0tU@Gt{PKGxzm)>61XGZ+vp2fU+(%Tf#YB8u zYw9-%1?{E09pis+4C>IHs8D^C0s@(A{fy`@E#T=t4bsu^YXKdgg@@Xp%~GBOmy)j` zc%fhgbJMSg{Qy=o13BI*(?mZlAl1@x!r)d%1(t5d0J|ehhCYj{wSZ$;9^hCg1-T*v z0S@>HaJKUZgA}4@&!d|{F{;KGtQtVPcFAezI$Gxu65K*O`~R0Dmrs=Lpya7MmF z0KavoOkgu)$%9dgcD`A%?9-`LOj$Pf3?3+U_>Hmy*k-3Q_u6|>T-91DyB>sy6xDW? zIvg*pDZ>!}nV=7l@K{p02+ELv>$pvYZ_U}!!erFSBAg-RWeq)ppF)hX2}1C6UFR?~Ztp-tS6 zGI3u-A`~mg*MUK0{q+BD7$C!zLSZoI#V}|MUW1N<*UBqC7;{}zAQ0U^@-j<^pkkgI zF)YFT63F;cb1{Xg;2s= zeOZ+9$s?;6YE_RtP7f}{tc*Qb4)Uzb0KR2_*Mo(m;f-Ga-AMtNZWt)(rooL3`+`{H zuly(`l~}JzOTokSfUlZ86jsd$DnYmZT(ExFKgIG`eoZc=3{iE1sE5w*$10aN`a%G=gAk^Com1lv+bknqDkcfILs6=Ai{R(JZvsti6q)dV#k)m7s)Bj%Vs*TzvUn4bK_3Do&{+Vm+g zN*`;gG9auQn+KqR3*5uSV{1v7L}tG%p8x)ubDic(HdO3X1~f>FAx$ATNteL zam-w?(x+O56)@kRIE-k-fDgI_?k=m0LIP(zEC=@x!osFrLRbn0A%SLNKvl*t^7s_* zu_Gg(jbRqecwAt)T*l*KIYMMCKZ=`sU?nZ#m|_h@C+ND!$pWDlbZsgF1Q$eLW-Sj2 z8en+?pcx5N-1Y+OGVvjAK`!`O0EqEr4_zgA6d|%Z+*pXn9^If9K%GgaxVjN0ev(;Y z=8{mO8iiq!TT1)q+@6Ibj;kU^i%E3QWXA1bu9_z3f^p7Y2%z)t16Qa|=s7N%`>oK{ z3=xMhR8ccmtp^!9j%#VCPL>RbNfRd%3P*Vq-$01r?^&#`3zD0H@4Ufx3)PmOu7#os z1Z6yGST1%1<`H8w_`t?$kX;#=0s%my&(HF2WS7BC+D|aDwFF)9wnExwxeBI)Q7i+y zpcFT|Qrr@hqEqV6)uUlwiDnwjPd`gsXkpPCt@fks0R~F(OjinOH3XF=Ls$#G`~_wh zJDZ}OOW@9K11}ORg4gVLg*T13g%Lnu(3Fn=Aby*}K(S2_t-^MYI8Sgv@D`xA7#}0t z7zrA+GXxDxSpgIbp-&6qDaGV;enFOcw90VR%T6sB1dv%VP}?g~w#v>{byv1D!pn76 zp{gRyXlziC;q7yiH)(eSZZR}#t3kXNJFX`6JsJs4{{kAaa=Fl zS2{AfN-k2czr-H@CXQ-Q-xS-qdq|f51xswAxK1i z``ZVJ7+l>q<>1sI6>f}bw82TI%YbD0>CHZy-h8Ox40*b2b2Rd~3PUboM~=;Kc05Sj zMrzZXCEOUr{;2C$S>zUW(E@Y(jD||w=(4QDjkCjqDVxsBlS+)Y>$+kp5{WSgcG=(( zf+m>ZE6FS|qX{z)?LN4K2I;Q>RGLwoyr`?@nOwI?UgIJ^!S85s$ebl5ITpvi6C+0Q zb3ZtSM=}2^6rmtuwnb|YnYv`WOCT}~vtS}RH-~2Dyti%u>^xxhC$j8Io$3MWSY3cYk>qNCI|8>9n=YIEz zUw^mWJ>lQ|;BVO7PJeghl-=zJ-+9Sz^1C1Kl5g?vKKVV?-Z%Q)N59MN{+ZwXte5Jw|C#*wY=Bg{S$xp>j$i#|Jd(-&HMR}{O-p-`!4nK&3^ZNe)si$w|`CqHvSL2 z&hIjE8~-}L`{)nayEnD--|%1mz`t4GG(^7nS}*lUAL-ZlyG!r4Zyxt|f9iLC-|yb; zJ^p)s_lG{3uWskx`^z+3!voOp5`Zw*HyLA#_mcQet?2hxFzW>Aa^-H|2cmKY9^F@Al`7_q)UE%Je zm7MYVPW-C9d$GU!%x~Mf7x}wC{tc`Bh5l~gx2)tQe>b<=-o3!z{ly2Yx6GRbr-Sdc zcLdq#?)@LKI=lYeWiNTBfA_P0`W+hO9qqhH$zS_DYhk0`{p4AzbJ`*L7Y?1NcK*ws zv~M=}^L?MT+Hd!7uAH=%$*L~seEbir(?|$gCji%+0`|UFZ4>EqJmQOF*yU}+3&L6htoNy_+`Of#-b9N7?)YU(- zesIF&D(-&4DU1^?|JS_rA#+&l`&DfP^>f!b;rfKt; zx`o!n*%^l7q|%zm3Ej^WFM&ls2uxoxUHHMw-zXo6t^vzZZ-0#)f&KroM$WvYX%2| z^zen;H7qzr#$cB_X8sDCAXH)yIFXY~iAWlK^Wylco7$9RhG$z8AyPQXYdBuGkA6W5 zPcd+eZA-qrH)a`&wkH`Y4Kzde?^cE-e?qpkn>^gZ*0qG$4nf77_-5~vm0bRtF-M=m7|!{mKKxFjWy&K*{0_-6GtkUv#JoW?g@A&48rKv57sRD2|57v_pzU9R8A2H^V3P% z@Zg162s5l;O^WS^3qjz>cW#k*I(m3>XPZ>2L?D^6pfq^`WARkS)s&gG> zduRyO%*CUCGx0F)Oa$Zeh7JzHnndFRSd%BjU@dxNqAf5opT3t1j)2WvPyj2?;|-~> zfQnNg`mJaeu#5(5P2CUOHzW^0&!45BC(%9|k>v>J*}4=tg?7CWaxAKJW#ovVgwf?O zQ$b7*IF)22Y0HckjKXYd<3O|u`lBHX)C~awYoSdrRYI*HR+vJ>C{Ot?67cItD1d~U zfPS%tje8;xq=R0^Ug*L7+joEj37U}r@E=Xm=#&y+(4j%%?IhCK!t4BNfRiGz(_VR~LfK)u|M#cT+4pP6?gVR~QuS1G2U} zkh#LF2rYU~7Hzi`>Sb#SxukxFj>_N~wTy}*`NV6K5~VvrMn&>`8-^xOUU?#}37a~K zaaa&4hk>JN73C|Wxptv#!<o)Ky)s|qC6?7MEtP3K#tQ)sU5amQ+m-O9c zd-e<|h|aR5tA<95=4)(ombUqQa2ZK+vG3OQQZ5RsT3z(j8-jL*#O z804-B*DLtAnF((gH;)N{@jo7n#;$;Fo&1~DlbUqS$1Lnv%kQKPavK@BifeCJyZ?pZi8vW zgEhqL5=ADwcgu9o{fzFTAetHjm`UG&u<57j$xQm{VD1oK5rt`;We?PtJkt<6 z#o`(VWnz4%m6?9R%ftaaPPelZ`n0xCM zQ=BqvGEF;Qr^loISd0v)RlUZ^)S!cVR1*Ll-rbvs@+N00)E%X9=0s~Ao02rm$6n7l z^2nXkjaSUbGn~Q0eXI|*GE&m7Iefy0lOQR3YjquggWm{VKee@@%u@# zA>(ec>><6B((J+Nw(*b?sUbPduFhv|Vu$gg88~6~57q=+6R~L{-P)yC!j!6^gJoaO zuo^fEfKv4X5}IqBTSkYJ$l$aOaH6oqn;Q*hl;6~7NKa$YD2(PAZ7QN~l8y`pCrxEA zq@WSwjOcva!9YrdKfeP>ZgL>Zrut2h8EAPn3qyHDVncaFqUGBXUhYX!#|uPSMNH|7 z>y5`eHRB>sAvBV`yj*p79WKh+f6?&J{L@j0MH-fr<(M_VC~hD_uV`A^u8GfZnr;qg zI+NUyFe}GS1zATcChIz%g6sx{AU{cWPr5ig>_YAQu0y}X&~2Z9AJx8Pmc`l{7)awy zk)P%FvwdO>23GS^%usI2w2mMoQkp+AJ3~4p}&Kid-B3LkHA#o7&-~aqaDWGwwksQ$?>L? z{$J33i{fgcIQk96(N*q%u8F?q-E)M&=h*X+GQ^p8m!XYP+(q;o9S0@I&}XdM@O8Hl zdki(2%T=&00l6@C2S66(E12Ar2ZkfF#f7x+`{BWe6p!5_JVJM}-8#q8eriCH4p#Io zZ?n$NYs)joEY|Z^$=n%<^4OE)0LbX5Aq#Us|Gy&X zXp8YsGMPVK@X;RO7}z>Js2nMQ zuttc<4oY!nK@I$lQY4)!%pGhTZGCqeN5x_+C1pys(x8m04K>C9Gm4f>rbhl8O(uf~ zv~>fM>9;nfYX)AD;JmB%$0Vehuz?+Z;q1Vu?Cc%W0u9kBQ;(`KHTF6gQ(~EB=hszB zFsAI2HWo`J$*NrqCou97V2fOZ9{?l+=kmt>*c2e<*K2!P97%SOsD!&{V~$PrC!C!g z4KkZ2EzR+8qqO1t6dMc)(H&*ZNltbJ()7u0zUyJOkakypYo5G@OtG{NBW&u%MVm3u zq_c$UOp|qTl;ZtqrKt-BMLw;UX6eWb(f}n92%ywP)H?Q+;zgKk_$mlvdayRbv*{;F zEtr+mZK*=#ys`zCAvmdDGJczGcs|}Av#ttMrf0v_Ezd{&xf7#7c1*|^!(jb-y64E! zU)R@|r2rZ8XrQyT20{S4>+Hsm&W;#j$)%(`nWV~%oNb|x>X4+LSOeTdOc?qaIxV4u zFz!w43BUYS{5v=u2VX!Gtxhk6BgYrpw}mfOrN9?s&$?|e_PPm@0|#6L%miGRny<_J zU#_c<(@D0gI#5Gj-#Rd?pC%uhliCyEA@{4CLa`~WE2jHX1E2MKf}aigjuWhlft$Jw z&=NTD7-Iuqn9Bw*!}+lZw!d`8p+X}MJB3bEN#^gaol)3t4F=l!)|~ku-a9{sH7c;b z0V9;P(2}*l2xTp>M?uyCL&OGsY$CncG2%t}yp9p7GBq2q$UQPTT5uvL$6}BshCQYP zhCLY++W)}V%%I>m+g30r7*wO{QM`RSCD+Btc-L14`5sPvSvjBHoF!KRu;Z*@Uct@93?HtZjyOTU3_@CQb)T?2}0m zzBfCQM#xb0(@MuYp+>$fm!`4V9KGg&~qP)Xh1=RjKwCB^i1{;Ly>GR4+RWK?2r3IG7h6u`13prP3WI9O2nokVpHvC zuxTU%(WH=pXc}1_2<1}8NimI#qbYrHz0u%MP>zJ>L6Df#qC+fbXMkQvc-RXGYRoR4 zNuP8oeXFbgC)yYd#sgSkV(rys(5y8AH37YjpDA?Of_+zOuh!aK{f>D)*4zWRVvr=e zTvTI>4XN=pUEHj!lXUF^`9`2*m))p#GQdbbquMv~;VL*26^@u#9n07WcoGxqP!6kt zbAodb-1L~Nm>!#@{xPSXV#SRa1@pzF-SMVF!<9~XN6QcxWymIf5YEO&jC^#L6e(2E z(8z688km(JR3E6l_ko6yxvtXSOy|63;hAnKnTwj}Q1sNC+m{@&qBD9OM*I>2u zZ#iTFekqKBq|iDJ$|KySHdKe>{qM&^(Hp55n|bqT-FFSfQ7pvhy|aR)@;Q)JrraP4`wq0eE2e z)Tkhc?-bG9Hq&RC)X_4nv1~mp)QSZ0g^&agMO79^POh0jPWt2|ST)O%hGd}_k54p} zMurxoF^~pZqBJnlcwW+Ai%4TE?*wTevubI7mWYF*R}e8hEtAKJ;y{CMXz=dqiJAhY z_OTN37sqPEu_YL>p zCISd7t3$kUR3eVYxj@VET$pY)2S)T<$kg(M|4ZZPDxcC4#8r0YNJ990iJHe`Q9;)^ zj%QgiWSug%d{$Ueb-6Iz@2@owGt6v~I?3V(3ne@l$1A`kPUTq>84Gh!j`LQwM$K)+ zgiLS>my3EF{4jBj&$5W(ae|S!etbl%XOquNlTR%sU=)o)C-9g$#nMd@I<;kBNIukG z6^!9~#g>wn7&YCoXx4QQ7%gDhiK<~js)6VUl%fnY^%S8lBS*qq9U>KMSy!MT<&i=Q zb`d?6)(=lIc5WJPErdZ8xxK4*qtYI14H;5&v56J}9z7-PiJI*;kLtfbq*w=99VZew|y!2$6&`F9;FEnsM<2@WDk zgP8f$lCI>R00+g&0WiRDc#6UllNG9ET8woQ2puiM282S>LMba{BaT<8fC;I9HHRQ- zQvm^|f}-P8OvPuczXNyk0ve2FKw_renSa`FYPWW}c!ARyiK{h1lWA2w(oEE3m5P~2 z&{(rlRF`xae(+UTu^;Ri6;1dU_H;$FB6~*kp(La$4C#ixO^!bKP>0b6EKI9Z zKj^Obfq2&3;h$*wzQ7Rs$c%_HE#U~(DUg6WI=@oK3>O}y&a}qH+qCnaO`mL%VZ>5b z&lZ+9gOE6(@)hMoyfx5MiS*P8OKo+9NGrjJdFdE!w`RX8{yNuU6@MK*F9IXf0r$aj z-$dXNsFQOD$~<>=|;sdp`z4ImhxH-aPD7t5siO_VBFK)x*&%Yan{cJfx1fyR=p zhat#FAsddE1>%TKDSocUbLzz4{;KFk*HZ|@+_$1!TamZbCy^qQq*Nd9b}8J5^)Ky! zDBjb!7LENLg3uzVn{^x=_RXFX+oL;pt`GyY()mwtlVW8O<1zZ$hwDR~!%256u&C5# z$YvM&r1oqbJZXRGo^*rTk(bAI_EXlzQ&?Wx%k_(8LzWQ9%#w-?;?K*)Pd#~{o9gnY zUmo|%TM)>}UD>gEfBODxh8r$SB4*OlX>VP+H5JRBw3%f;dzcw_s;m<&E;9-m$~wOp zgH-$_X={kJ|98s#q_u;3=QJzjmffS?`7I7g;mLoIHG)^OuT|eyYgB)RD^2nf%^kh<^qlm!CbSQ7x*lr=GwG3rdCVGt#7?dy!9c6uFbFJvzxF0Eh@KO>!cZB6%m0AXfuF5=Rgw}BoT`yWo@M?(*naA zt+HK=0}$c{50eO4CSoMkL51WUpajl4)e^q6ocmI52xwEVN#9NzY<9`E&+H6dN_NVY z_NuKLy*g@6-tuP=FEvZS8gTMCj|Gv8K)%6PD717e66O}_Wvbc+BNu5LjBFMkj4Y33 z2lFiaWwCZQftffr3z%u=MCE`T!_5x)bLSt6_Dz z{p9B$i^wRYbuMZ-7X@8e7WMv*5k#NcK>kKap&Je5|3ncK)7O~@8ptC=(3*nD5JBkj zLImNi9f+X$4rJGi@-io|;rk1`;e8m~x4X!dBRkX z9sVM~S{yTujn40&fOhfaElQ5Zjy!%zp(yGt;P4h{o6kp zmIf!C0=v-0-SX1;dKu4&%1awYSpd&b--3VqG z(1k(9a9#$EQu)yX`4idL)XwLB;(Z;tkF43fq1nh`ZDwBmX*=uS@}2L{E_JJK`Ve+x zKLhn*0gM6uBA)m=!=z7hxTtug(`VAX=-{BQ8LNd7n;y3tKw>K1Zs7c&{k=~oL#_Sm zkiK;(DNT)V9h#|fQ8M6?F%3Mq5w{E9zifiK%-WG z9lMw@1H>9WqFllFPIETT3Lie{5*OhM5;`P9ZZPADeLY{7$w*!2)s4;%WymVcvY&Z` zphTSRcu1U1XQ9Q?<`(b0<^r$cwwE&(l^cS7Wadgp`K(s$PmCOO~KB_Lpf zc?(iaMctu>O>DW$<`h9mmBE~8KIg$Ov5qZIUw<%H27|fOyv|^jj6*9sieAq0dj%cM zRYW6(hedo&IFceBB4S@(Se{V1O}f;Hq?V!fND&*OTL(Jm-rI9l!Uv= zh|h;*J2Okkc8NAq#3tL<60$jr6^-U08;xl@n|WdLvuu{1FAv6uj^Q9nXVQz$XE-&g zM9`rURvwPZEH@nKD25{uJs8e9?RPYHEb}&22i@Jd%p*%)tJ&82!31j$gR9IJX6-@Y zIF6o+s}fcRc$el^6)ZQCUT$`NI_pl|U1L`vG3M$raIq~wF4tY9n-o1R#e7_F3?`2N zP$=st{=nxz{4EcFpsR(}fg5W=UOE6Ew=@6<{@&$AJ4e?mqupl@mW+0fM!TSK=6ig! zvo_i^|7F|xed7_IR1Ns;zAz)kI$37KmfKp1l}q@YMXJRV$~FP!Fg6~PM(hcxFN>-6 zQ8>*nQ$oaD2freB#96}vX29_}%pe4Yi6v5;!bWGwHISfpV6yz_N6SrjD|x(D|#hyKaWVBIJgs>*szg%kfqx z;c;2tTa9rkr-d9CGQ>|)^J>`KRwzPVzRp<@1fE1by}NdX5Ou|%7?qB6*GR%N3d7T= z^sMy5S@p3*r9tXY6)%I-iBo{cro&zVu;WsvZ2q(I4S&j|R%`1&>Tig=2_=0vYHMg{g#Pg ziI9nbwL38|Bqr29GkPYRkywLdy>#iT<&6u@2xATFFNTX-m~c$SH6YafWsX#r$zt6FS`h-TfECJU5=Arf-!9aV(WX| zeg1>3EVl8Mj0RyJDch#V(*!r}d|6uCcDnYqZ5L{@jcv6yq?)N8xNt(S`~y5tu)GpS z=cC(_yxSc)1lk)r$zE}RFHhHI4}Ig#kG$&;2l}S$RAo;syKWR9CR5Aip~TAMPw(0P$8ubZadP>^1ZGXc{wXdOmPIRs6n=SO0%Uzv8` zcdb-PoZHSShEL;lO#p4fw7N1*Vi4}(Oa-OrR}*fu9Uz$&G-Dog#7!n+KF-?g9;laQ zLUhT9YJE4|j2^Lk;bz*~CIBfa%`+L{xOIyl8tFR8c=`b^X&DW{FK6sDqQW@KWD#}A zpQ%Mp@IXiI%4}jJ;qS*dP=n2H`x>X_QW(R!u=;&}Cf~MfH+Vx_%i00UJ@q#5VudMh zbpCKU{@&Di=f?QE(V5%WKF?vikc|)o`@$eM7 z>BE|WHl=jZ0T3)^*UE6ueX;VUogUoBl{W?>ssQ2uwf&)_*c+E=hp2F_5BiQvfzUUH z;299OnaBjm5nl+rGvWjFqQm)~d~{VNs~{VU+2 zuZpt%m1fcgK`KqOT@QIQ%`+_(=Cw!K|I(AfDF^@%r6#7dztX~QD3RX_?`d`lM9>wd zl3e@epcDhdLwQSQ7H4+6DZ`YIt99`3pg$h0J>7*BRlH3tEu!OiJZmX#wiF|_Bpq~H z-@@szS?9m2a%RlN2C@hk?90KcDxXCqvw1+8S?j|-t#h8B0;+Io<+=1Zdu`iaZC~bN z5X2_x(1#S0kn$YMdX0Q@Xm{<#jS@@!77tXwLQt_lW~1#-C2TdOo=j@rIwLE}YK6_U z`_d8EXT+l(LD5U|u+D-Uu-S6xzH+Roik|ksn_)lO*#%TXjd~~pG}(b5X!jC2LYSA5 z6yYnheUFmdCKD6`LeP@nr*suuvV8SZSho+7V@Wrw{6>k6vV6jClpKp?3%|L|ZkT}U z8|8c>s?cw4ZJ$g047{!IkJgk0Nn_$dOa&pU-g*LJi95~kd0KH&=_q#ZcLNzeU>2z=G}CEDjbqZX^~Vv z6Hk%E+A}%vZu9+0CnUBtqp;pNVY&Z8ILuEb(4_CKRZ0>Tk!V}8IrK#R6~4XEG8x4s zq0N*i4rZYxZYG_qjPM)+A<+%$4e)ul6)0mj8ht@Gj>ZrIBzPK)3}Dbcl%$Dk{C%=6 zCH6}=K@9X8gu1=bR44%lrgxLh?@H0q`&s@+zfQ`sW81xA+&TR;{}l`R^gX-_BQT6f7=syY?xT;u7N(HIHJSB?Nz8jl%1!WNjvdPuCl!U^ zg2ruhr4fmZ8s#WbszCd;4_UTA%mpb#>?#=%7!$jCwQgIwl#K=2ENwfW;GrGpVHW+m z>BHY*+rP90m?Te+aH)Nz_KdbHxdymFJgG)xv*P>_(v>)W_M$NsqT$U8PouIrv<6AT zK2aaJyC#%ta8e!b$kHqj;AqTK2Ae9b(l@Dtjsm^dLbm&8B2ldfW`#U*F6(3PYU43_ zu4Z+x2mL~6iW+O3ZwGmt93(|xNY`f9G}}t0Z&M*!!i37UnOJ2w^cCW6bY?WbiNuwy z_F)jCKmw%Qs>ZCOVs3*q*hE0vBDD??(e664t=w2+0Ktkwko87Yn!#B?WkeA`CBqQ9 zGVw5Wge#(KWwCizLpLKpz@z%vS00AgQ-RAMqF`ORKoA*`rIwe-5yl5h#pnwE7>Y!K zK(%jF%=Pn1d1cAs5X+{)3y{O%v`{_ysBJ1dBLA~3 zGiZl{Q78fKkwP6pj9z+8TYQ4kDEJo2(S6s;<$&xrdk*wqXzan>hLSMo9>R(>GIE+> z#%JSrKmjCQBEzL+#A5-TTb2dO3>8;v|IQoN4t`&%U5{Qn4Dj6h{+=7xP8fyBLa!an zI#;Tdaz2FJmw#}O58?Bc2Ws+p=0o7So6Lvs$A+qq2WpGvvPy&?!+q;c{@}y*60b%O z@M@VnP%;0(^T`8M4>=Ozgkgm|P&O=?#n;aR^`1x5jn2)BMDDpxmzmn{-Jf-!NiOS= zz}?`Ilu^yCoF|Y&=c1%D5gYtPEeF1>+gep!VSn+7W;u_l$~RN;D&>1@Gvg;Sps~)o z;mGixR#NI;oXjWvre=9GggWrx*|E<1XzZTkOww-&b*TIXa-C^3d>` zbH~s-x;>rrH*`K>-Sn!GN~g{e;c`)5T=TEr+#Tz@$J*JQdOJ$Vk4>HB6od8czvSdB->k-!y) zUVi@Q`^+Y`b*yvY27SRH(;s;s$j;|j=o;&68GLB?pQWLEmX)SwRdH6W*uJcMBUcJB zi=C@lV)Nx?q?j3aG}U>jZ}mnYGb|jVK4uMxLRg8Z?`FJ~^i~-dOg`G;DEgQDeW-If znlkn5eAEzi5JVZR?BStEs`FxRZ52+pmzw$&`OT$1|D|CC4xwu5`F!eTu&ML9Oz#ua zzAfQ^T(0Wdu>_@2S1e`c9a@Yo4FgQ~4#E7KmzWY5sjtB-F0^ccaVkbti-xDM{ZJie zEScjJftVcCNO3LU_onhQhvzy{>)Ec}bh;G{g}A_Fpgmm>1wQbP+9&f>#@yQM|F zM33V7xVyGqFQkhB<1I|U30|Xh3ukDAur}_M7$!_>0I*@j+`|iO+0rZmrb;C(nwr64 z!^-P{ud&|)1{Z4I@$LM1+T4e7wi^yX6#)RoJsq)*kAlBqA9p@B+}EhBy7D!)lsENh zwo#M3w~75#nA=Sj_+rNFHa;8gshxDLJWE1Nb)V3RL+|jH^Ltc>T4wuE)A- za|~v=8;L+J+>M9&leXx`;0~w#bvW`twxS+c<ZCK?y>*CmL@H_yg=wU6M?os34yik%-ks2X|JH%`&%PVg zcQTi6bz0AZ!%xAL-VKFQDh`CUCfS@rxMaJ#&bFQR+OqE1(kRl^g2RC04B`ThvD~_) zEXS_0GRtWw?f&>KFjHZKrE+xE`tm_ zrX_UTlvoZq?JO7I2C1CACtQ}CpI9}$s=7>oPS`;yHJJ;CJ`Jd4^ zL~A+RqU{SKItY!#6r}QEoDL4T6gp4~sCfD*R_%{y=@enW>3eFD6*mOId~8+LCaucE zp0`#D6I&k%QJHYWjI_!usN4Y_DPgw;CUwLybvOcv;aM})D&&i0Z8{+yjO$sFSm?UY zFsz@eY4HQuNAyxno%Lu23Y?e7ykOI@(9Y58x1+9VE0kGk`Uy~4oOLQ_fQZw&uIJ=L zT{qm9p49br40m0bKrQNu{5!5I2s_5r;}*Cc17=PU=dRxg25ZKFd589-E@RF%dh5+d zm)1pYk^VO$mM3QMSTj#0;$$&NrCJgDOEW(sB&2a zvU{Xw1UEY(BIiy&U#wwd=|VAa&(j31B~hvrZs;N4v~>%QD1D_pP4@ftB<#MW#nt5--qKqB7Pqm zQ|dami^g)6pQgYbX~LS=!lO2j1v)5!Wx@;C1~E2ok@}McZMK^l6y|}#EKS^#9KDHg zG+1WB-=Llyu{BR}_DDQpsN42CB&f39G4lc+`Hk@=;?u;jF3orzDw8farz>-kv$`Vd z&*+M^a9US{*ePAn9Vd0A7bm#p7mQ6X6-S~jAX2qt^*mSJi=DZu${FSb8oZ+42+T{m zLR8n(AlJ)WMaFmvwGdRv6w3;fvXk@v3OdOgB2uO0y!+S$(!K?l(C+^u(xuz@e5&AL z{9`x)d~SD5LMy+-C_kn1GmKWejzBScMH)1_Z>&EOsAV=1(a2Q4DT8{3`CiD~!K0S# zTN$(2lOhMYcFGpM)7TV-| zPfd&be*;EH_xl~?#IW3I%7)8bsv%lVjO-@Hh)Vmyk5NFo0TwG5)Q)O%q2QrbelLC_ zgUNx*^ys-F4-xAFi&(=wOE+M8qdp0k^P`4t(cGz+T8!UkIZK=wb8vIrluc!!$gnrz zp8nr}&0Aslx;0q7)>}XmcQuF)+|X|sKiq~F zD#FJY1K#Y7P`Jaa@iym-adC!rCyDm)_9)DebDi$F&I;+ z@E2}ON;7*CV-k~(i^YT36I`zbz2~ z=Esuwi|0zfID1P*rYL$?ASl01A=Xzz?@ftzBXdG><$Dj=kjIsX&O0<9Vrs`eBFs?0 zAunA5!CR=GIWuVsgMiJNm{$-f7FGj3$=o2o6ktY7N;ixO*9!8+mc$iBS&9J{!{*5) zJIt2=_@@ZOwcLIRC9tw|8PLzkNp=x0QJ+C~hw%QPqGc`40 zVx-tk=uFGnj4A>y8fhHd8tOF@$HPr1-enTRFzMfjGRhX8`6)!`T1}GRRXM_!vM?3j zSTa2i#4=I}60Y52>NlPR~V( zEJZ0(iDghaHzpa^vy^*;p{;;VXtC6-74ZoHYo|7AnxwD?z%f@;94fNghl-5SAhA+a zOhYxCGP(gyF-5#vBi)t}N*})u`I9~29&+02AjXu*4een#Y4(lJ+%X)abDT3_gZYH$ z3xeXnyN2cg_nJJLL7u_ANU5?|3&?g+MdrG68Dv{L!}XATv;2EmX+H7Pk@n8`SEkd7 z46Hsutm@&42w#qYHRz@W?fdUEav>dFR8;^+6&t!4-vHfMW$pq7i{fGW7!Mk;sQo3r8!5GitxLq$DrvUf1^isLiT)*cZ=nXA>udq`DFGLhJBHED#!4 zu(rXBuk)WrKC%rec*nNxyA-5{GzekQMBOGpGLeI6d>cRLRbKDRHw}&qwypY@Hfl4> ziyhh?M9DZ40C^&HDCOEX#DzzaqO`E4RdY|D z*9j$cSnS3=)uS4XW094=8c5^k^)<5nHSv)p<3$^m-Te#*DrfCQpa)oN&@mMeOcmVo=OSdpPhD-$w*D@fLwfKD1DyM7$31XXFhb@5!WyLdl0+$i*eITvGHkAo1JoS z3Z$*8`dC_v8wSjuaKqRj3k`;cINbcEGt*e%`nzDSVj(&?t# z4!{48e)~uM%jLhkQ`>+lw=>n-E?={)ZqJ_)cj=MxjL_Kd+Z6s~X+_>j8IX}GoDXhL z?>nDWw7IP}d6YTphn)|PSik!<0GQ@%u7MhAoo6Y@4~oqu`pgm%&}oLPFYI=A()ksF zB=vC5;O9R5RA4r-0Ee`X!@}qfwfCjIPw28At;cr)MBlqj>}%+D7eqjXnIaSoqSkAZ zUDmvAp_3X|!8m2tmDXGNgDC=1L*TZR9PQaApV2;hP6%z6oY6i=s~LD`h?!yQyB=A;Y$VBugEm*uUaZF z;njYsB{0OG_83ah2;*How+PnGMym)!%+pD+bkh0x#0HLF{d5x9CTcGi5}CpxU2_42 zz;Qyy8<1!w0!ZsaYZ8`{(u*ITv)E3RmBcv7Js-MOX)+pvVyIi@Ea%<3qEw|ED}PV2 z$&P(QGElLVhT=R(z3YBv`x1ILD7uUUr8>o{*wr3(r^JGtZxgL>aZYb+Vk7l zM`OAHqNr~&DwkyAD-(V)bpU8*W3I!sApq!F&t%FV1L7r@BQe;yqEf$uIJ z2>L7vzwrKyUGr&|D>1xwomWR9O0osr?V~0QEj_BcgH&S!T};xgU^=y=cTz<1 z^F1;?@_mFQ{D`sWo&>)nAMQ=;z#wLgGIivt#O4HC{mfTsH4A_f zIQ;`vo@ty+AygD$t@tZDd0>k|D)Sk$9Edn2BE3(*!~q7Yn;C6wp3}&$G8JEq*=mBe zPx4EY_jh0;vT?cdmziLAPqGKw0&B*?<MBpvmWY9G?+pw-N<_>h5TfDN9Wl=%&_Mp;AQ8384rX* zqJkS2y>DwNc9I5bNQt7C02z~SxaSfp_P}a1z-m;mvXuu+c_29st8=jwGnN9;VPP^b zNNbi*85^`#*Zw-fWk}H9pvwG;02HI~7 zRbmUNv3LaLrR5tWxJ5fpZ8GXe5Vc`rK>$F{Ygw%cH_QzMa@Eo(Be;@Sx`Wl;I5QSb z+EVYJ$ldf?Be)huaG4QYg@iGky+;0Cl|gB8$98IgFlt9x2(fm=i}?vtGXqGE3uXVm zbpTJ(G6X;d*WFOL5TXC5-H-t(VqEYTEeY#Xt@@h1_W`57+vJ%gw!F*(QT{zL?p>hl%F6rBbMCqK)~$Q%aY~Ptx=ZRl_v%^Q zEjJTmnrS+2Z7pcJ2MCy9C7rdHHOz`x#MPnB6$}n^Vs++PAL|_>gAe=JDs0?Z- zi0o8@@Dw0BWspe$!YPAhD1&TQCURURN?hY8VSfK_?{n_CRV8)f;8{bXI*+^0eti3T z?{9zmQq$0lG`!mBXsy2qt8L?=;q>Z}r=| zDMVnI2ey4lKpQfIv4|ZT*QZKx{VA@NMn~V0dq-gwe0zpOLQHUtgFFONynNd5PKIL( zrm%AmPxlf0SrdPy1dJKt7W4F6wB6<>xXln3R4LIu7Dam}0J#kwMf5OsCl9tikh-U0 zBGCEN&=CeLX{UyTV}29Hh@#FtnRWUAps_@B<{`HbdH$<)SGX+8m+6NY1Z%r}aJyki zV4<(NSEUUN<-ssx+`e?w8WI>hmo8OE&q?A^O+Q3+5X=bOV?i77p)`0GbmcRWl4ABQ z@Z>JwED-%Eq#dIlO;E8cAQF#t**%@0Adqm$UZLjiD_*VGE4+LA^H*8j?v_V{}ZZvHAc(dM+xU+@MVG{BUgaVWOR-g~MN6>L;umgmP zcsXBPYOu?PD~Sckh7DWkx9rzb5Df{aV7$~xC}tz)8NN2-aBMI!l9?hVfY4fU$PiVh zD~DRl=!&=^uDYT)5L|T?#gU!TmAJB#T-}^+#GQoth#LbL&xh&>YhYQ1qDrIfktWAC z>F!L8>Y@Gx)dOd}pl6(*^Lmzy5GdxU7a8G|6gvVlq%mKlVKnq0VlSytL!(744FRQ6 zi{QB}s0-(9p~#l6nI5VrX3wqg)Scc{o{ISoGfT#AU#mEm;lDBqt5T(^@SE$UIQpz6 z%Br(sXs|?GYTGd3aOG$HEWM$92ShSx!~yT>C64o)I|elodFYAbLR0#&CytvDh0qLE z2vL=YLMd+wqDV5Z8e%vh=E8N-m<*}TX=&s*bTK6!HQ9nLRM|;Fu}gF@%VWh&xAKq} z@u$kk7Ri+L)tvpRY1>Q0T`oe-x0n~070A%C)a&yU+gn2I3%&s0sS1ihZBk|mru2X$ zLM@>(5g4uZsIK!Z4(o#v(cp|V2<`ePE9vx%->RqJG<_nXxD{~zqMAXYBes~#4(cyL z>?&7*VMSLE{EDt1__D6(u$OfO(U){3iP9xq@rt9rk<_smuRtZu%%lHW7_l0d&+9u0K48Vve^WdeVcb!B9xf-es{4@q+h3B4FInR7V%Lyd#-&Vo;}FJ*4;pX);GjkS-8pS zmUQ!*!$ENvTfdtK*%A=o63y$${mC-kcz(Izg{E?^#IG7f2pO>W3v<+VWd^uZoMX+h zGzsQLmBb&mmcQnOMRzWiUZhk#4-AI5t|!0(tYF&-AtzuCrW}GiOWHMw@r?Zk65?LZ zL@f3J2sGOSk(Sl7ZKFb+8oRj-FSKv7sT?t;QK%n};0!eu0*KiAhzAz{Of|ixKaZvl z!)_Kt;pXfO8cQJ;Ad@w1?vSjJt|Iidis&0vEmIY6Q!pKWmr7Ml*qg=f8&TXs^Zta0 zDmu1iIZZ+7W@kVHG{;EBI4y8hOTZZ$lE8HAUbOs)Xu3KVw-lPBeK%}68!NF-> zmdlX#k%Eh-soyhIv3@5uW@5J}aHS})-2du9IT|x_h2g)Bj%B|f&qg)7O`NRf&CU&P z#jz)1C%iQ1G~T~nmAlJ+GYb+S3wV&VI$iqnBN9P%F;D4fZmjuj*a4tg8g9>Y(o~P zMO3~@_xrUa6WCry-IH1zMkjRZ8J#?xLG7LCrLAew*KZi#P1Ro9e*JFRm_T=o7zP?6 z-9W?kc{hQ#wnG`;B~Dw~d6Z7z{vt1F{RzjJJ&-jWDyzH=aRon1Mhf{fNotp;2tQcA zTTXI|p1o#)5s|P+u!K;bCmUjMx};_2j7U(-RcX|0(1P^FN5MHR+O6@bL6jryB0eW& zBv+};EYL2Hg(h!Nt;N(S2YsRmiBj323>3sO^%0TG6k6{Kvu||ZKJWo{`;M{OH>fAT zl@B2ya#H^?r&pL8-7J5;wK+r3yD#ahdig8XRni-UlEY>dYLP_gdS~Qxxg{YWSx5P z0Gqf8Gle%onqHvhyLbNwAhx{>u>j{=K>61;eRE5w0<&HM%#{SOd-(v^Gh*5*`}yfq z4j}H?&J$!eQ~2C8-9zQDOnC~hNd(1pbdhLauP7hbm#l8_q3$bly&=>}uUIHe2kzSZ z>du~5_Td)Q@98OJb&YyQI7(OuN0$35FlQRZT<7mv$sR{AtzwTaiH7ipUol^LsfXqn z+NFjBfRH&8($x|$JGtal?~Q^wrxg z)5)a~c&~sO|DSq$F)=Hb@LOV+eI6H_E-(NeOjPYg!zr0dO$X_q8k(xW#Krf0&ui4tXW z;_cahR@#P);VZy`9h^L~UzH8DuNuV36K35ECt3CIY@Z&Hen>0n_PXe)bP}0!lM(<@ zbBuOFk1sZZ{GrE7%}#X`Ck3|)`DJpUd{~ruf z4wgZM#+e+E2<8b5$D|5jqCe_DVbyA-h3KIY#QO$essA7w3v*ST#5_fAd4RM zYf9D*+J@5RDqN#rx|H!Vn_tIAOhiM9{uC)kXF3ySR`ut;9vk}J*% zLIYSpYDvN^U>z+57aR_SNN7hliIxW7RhAb?#e@*b#>Ktp8;>1-mJ-4P zc%wyCtTCR9>#Ccqc(am<1)Q_0z_296J;TbSxQ?q}BceiWLe`A>fVp~Ctv=cy7fs!k zu-##WBT#x3-;0`@>dHdAj{4=^+|g9+)C-nbF$X9{i*`6u=vtzq*>wJaSRukM@hstb z;vJV$RpKXkKSAnKWCDoOXUYbv*!ie^`z#i9hn(q?{h|F+`c&&NUNx1@&@=r=-wkhU z%$v&Keqd{Ymo9~1CZ;JJxI;CFVFL*9zByen*U#xnnA)ta1 z(?UkfR<(7klJ6s|Z$*l~zC|yrQaePeXF`qUwTDseRqfrjP!}1m&tqh!L5Wzpu(oCz zn_q!SrH;a@EAFCM6bM>u!*ejJkoJBeUrlo7r=CvT4kK7i|D@lt6wo)`Fj$X|Am1re zZ)vC}xEjT<7(T^)n8GRc0u-xkg~@sA8MHy!f<=K-_Nu0sQ{Vea=sjDzeN*o9E36%D zD|&zWdGwxDqjRD6XF~6%>HQ3To1*_S<^JdF#KzHsfgT*%RBZz%kV%${z{tY6ma%RH z#;yg!0h8hrY=aK_ip|C`4k*1mzPF1>G^!5+G2L^pbOPhY|Y~q$-|6JWAR@_kNYUu7V4XmhU z_Evg0@OP!uU3SwwN4=rDup4!BO+EG^rNAg7rd~#1ubMr?v<1o8tF1}SKwGk$w!~qU z!u?Vr{!~^OFP4_Vk&r~EZT=6DcgrPmZTj}pYR*0WFs$R{3qj^Pt&cla5FlE|eujl6*RTD% z2{eRD8Y-_AHGCG~F6gR7FkR6Vr^&vzqB1&?2O8urcJlFb!dN7JM4ues<^+!7q_HjQ zbDg@Fhiml?EpfD(6jx(=Bv`&8M=XBvQ5pPoIp@HSpy=YK!kk6}J4QIv#GA~& ztC-7Q;A!St9AN*Xf3;)W&3^JJM(5$L@pUO&l5~D5oc60*OzTJ%THD zfnlYsv0E>v@o2HBE&V5gK@mMtfIz6$bwG;PP~Ns+1gS`(;6|o!5Di^=Ww9Vbn_qEY zOsP~?&nW7$S8z71GX{w%U$C+4i?OZ%98^Pf4re)$Zz_02S(w<;Cn|m}QnIht0ZJd9 zWY6R${qF1n02_$dydvIXLpq28xAEAl*KgsmF@SW>#_^Tvz%{S9wA}17$eM*HJ%ApT zvozZ3bEoaRY1Dk|l4&yUN{g$z|K}SgQe`YLw4SlB2Dwv(h!=Izuh>wS4wWZqM;_J& zqHG6X<(Ro$-6*#x+oc~%@pF5Mpf|qUqaS->>9E9RV(*S%10-1ct$*B3Xfozsk$5PW zLAK;t_<|Gj%;J$WQ#1Vw&LQ(I-q;h=ImW@3ewZ2yY8u$Ag^1LB#DiX_1XJBpLi@Hr z34y_uCQ|~{N^ebM<=k_0s*RJ=o7HO?t@An;A>t_PlWJwX3F89TY_;SMe zJ!-hwwhk;bi8_Z!p4(KAD9=!ceFAp|MC7cbUX{xhDnqW9hK)Yn1Rx%zB37&qPJ><2 zg->x6>`rnOQppLZG^IU0@DP$@h;HuQIb4U@?G0EuYMpOp?N!&V51{}v^1f(mp4WoK zsTDYB2rWOeW&~6xp;x7ZCYmjKd#)5_?;$*EsW6ow%FlxJS{?yBBZ_edZsL3za)C_V zNR80D+dRXZQI9pT@3f}CAukUXutWi5Ku$rLOgl9*@0J5+i= z?oJL6-rD*HKK{Ho?E;)sNHC2X@WLJjff2`Dg(EfuksaWOA=Pfg4A1O{Kr(8=15vU# zX=xCJU%WS&4F8)>v3h3@(u9u<+10gTg62^7tU zTF#P(M~%6|X8AK0eORyly$2M-gn9T|9r^&qU8&K9!tEdve#8%?AB^rAtSCE_T zR)3Bo2z3%eYg!Uv2ptGMr%8-D04|W3a%59n|@?(>wfANuVmo#gI43@D4k=* ze6noDARAlX`4hD%2rOFvsCOd_C`yR}DS?goiluT`ur>2gNuR3GC5!uiVX?DFA0|YJ zsW_boJCC|MktFCXM^fT9);#IkEds{7?N~+D&eOC*APZsLJJ=T>KW+SyIHQ@}wka!7 zH1@r`WgtXw%jJw>u%F-ekGDDqp>9tk6$9>s*llP7Sb#r=(|7H)!&NFr?TrN_$2o_i z6bd&qp1@i8D^bfEVY^vOG*$^4YvR_Y6HW=)7ZD+GT=C#SK><$AHIW-OfijQk&eMnP z!ZsXw!jIO;^wSNx!iFB7%^hTVNo)%cKK*TXa$r0*Ik++v+(JaA>7;N_yl2v-7@yak zgUvvlX>2*p5)(h(9`boT|+$aDQh+{mxmr69OA-3AcF&N zwBeS6o4G^$yN!P-|6ahq7XR4L=XUdN2mf~QuaAHK4gY?cfBpO$@Qm*7c*5=;QsS8s z%Vxg;I^Gu_?Zi^ZER8vPy{hT|3D*R|ix$0U9O@-8fiY6FnWA^Tq;lJn$=ji-lS8(_ zPcRe--mBR5$exf%4IOp*Nw&@zS52ZAwwjJ<*f;wxs~?Gu>YP{Dd0$*fsWO#f}n zpKJQ_XZ*=v@7fTsvIKbvMbC-yA+nT+OkJJ$C!W?zChcR~*NLr13j}9^j?y zga#3N?L=Wjpt+*w<@Q#*>!U(>-8O6^#;*I5xh9{2*RAvysZ2biX-q@174f&Io^ZV! z^Bs!Fj070pUqscJ<=#x+@a_`t=5)V{JsbLs1=KTE=7Hz}yHiy5g8w}^sVnq9ZC9c? zVXLPTaO2zuA&(^=Tsv6SjtA*wSvwBtm$l^4*%sKZmO2Q(Q@*PX39fY+4nNhh-4Yf3 z*+6(pre!8`hdk7Ln#$b3FYol};sR;RsWV zU`q~2Et;kk)NJcSB5JeN6Vh1m0}D2sc*W1v^xZN6aY$ZmX^r3^6VRHC746fC-70pH zk{ye!+Fz?@-K_f6IsWA~>QCR6tlW)|fmeV=Blmi{ppi4y@smOMQ{7v~AQsHXpvU+;@o z>mAv6@D`ccoEwO6#{-}_@PoFGghE&oN%&}`&py%-16F?7c0AiKwNo?4i(V`de?87P z0@5hzT802p(=INOn&Sfv;o6D|Pcjy-0~&10M#7zSXb#<-t-0J2D?`dr%n4Eug%ZQr z_8fVH%fj_zvb%_6V;~wBuub1n%S4XC40f$qt&30&TSlzb1spL;OqF2_hV?)bkzqr0 zC!YfMS7S4?{5%n%dzpi5*wC5yx1l{HLK783@P?NwAe8oW!8}M$CQLe2bq_t^$bqd% zg1F8@=iDt0qFRXNxd-*Y{?J#q_FE@Xw};@Ayk}L`snUNZ=Jep%ILGj0 z-m={BvBMphX3Dr=AmSFXiNrYFB%j&(rt-YV{H3QX?P1nv^Cdsg8nV<|v^MtHIE$Wn z8~9w@ z!|G1;IZbuIBJez)b_hI7wYZsBjQfQzfK>_-D{)A%JU`{{%K^`nhiyl_Gyi^lu5&1I zq|}NV-9BH>F<)aJW7O(#P8R()^C1W zSN~~L{nrGAuIWq+qT>yFyTZp1G#?~PPjrt*hTRAz)e3=6%EWp|(@PJH!O$1>e30;W zH{TGx+ZiG~O<~Hz6$|4DI*X z(3ubnw5qp^Q=CdlK)^B+i`ngPFxBGa^mQe*U-Kc1z?0}K>*W!~We{BHRSIY%>>4F* zn#3i($Lh}ye064L`jz4;YD|r!#1^{oSo9$g%naOlAF?tscG*t_; zoAFJ}SllM}ZkGM3^Y3_f>iP=w7k3iAky%W*aK1m2mWN|J!HMAai2?wKT}BK7lbtfc z{M*Q+mDKtMsCi#JfmKUPk~MgvX`3v(;D(f}DEliQ z%@euqpt2g!DM)`=J=?S3pd+3Eo0LK1xUcY3Is8pKs$ql|(e6+%w~*C2Z? zstPWVM-}gDQ-yE{CYYBIf81M_1gm^^))h8USl4E)8yRuVxq}c*Yf7?|L9QJ>Z|O zsWbw~1U=;?K;npo#QIif=mS@ z)T?FHLoXL~MNd-iF00PVeE-R-^r|a{w&<0>b7gZEuJCcCOC!DQ2H$}J;8#k;bM`5tUiE3X1svbmTPPCk)CKYVJ zI45Fu@z{YyHpB~|!8zaHoEn^0gYz`_>>q;wR9u2tf`K5)v-<; zIcG78uK1Q8(UbTqL&~$eQSIR2bv|+K@?T9W3i17P`@Z<%=9v>UOS9*%04`eMWBU6` z_Qe(dMcT!9reqAwUq+IaE-Ak>n9K3Rdu5j`=F}@Zxa%%Tt>kbR)9FEs6^)8%y5>i^ zqLHrJJ&A4y#pNf4ObMJA8h4-ITT+o-@dErZBNczhg%OfnbT!rk=A+jc*0k>P|?H4k`OtE5aJTexKHYpsz!uPGL za5)Hm+q!BHa3Z(?hIBR9du!ORqz)k9Gbl>ZnX-}Y7p9#^QOa#elqG_ia|LXu$0ah{ zwSuJqF3J8$G0UUE);x*cvKHE&V!PIAZfhDf*dj*}kOy5=irO{y4 zacl=#5TW+^>H_kqJP5Ix(jXE%yS{Jgn0C}zPBloMUaP-79lba2Xo$0}n9~{%WWBwnRxgGOF zBtXIvxa20Xm%-v6>8ds;0!l?D8~H%==3Iaw;uW~sl9?mF5^uHjn4o`;1EE5rEMdH{ zrSwl_nA!(@A@H4#SNlfCW#CZ80FDP~x<WniYF!%xsC0fpj zm~vo434I^&==(}uA3bi^8?e@`A!KbXj@}e|O_B*0f*%VT>;#-%=EoFUH9U6axKi|w zrVs@zClwtGYL4eK))z z7}~0bvIB13LkkVjj2SQ48GHvkov7GP^p&N1Tb;9&VMG-eA)geqDeG_+oV zVi&);>k2DL3)BnBT=NNQ=&1ZB!T*J)gr8PvHuN}mgxR22fGc}U!q|jthr6uuUO$tE zje%!@SJFo2iQQ+n1Zn7M8@H`4MGskZRoDEcEQu^>R*0!m z4?Pz#zbm$h#y|;9+9`Utpp#EgTp| zN0%z}k&YE~vS^4-e{qh@X6TP^XDrVqLJwx*#rb zRZb?5!47EK-i1Jx;?~gy5e&SV@_}pC0Wd4I1V7L*nb8^hpcI-a!9%f-BGj~hWeJU> zU$~4YRkxQ&IyZ)g+Z}?fx}8|W?oO!4?V^n9_scGx^$-Cc7^MS+6oKajDgha&I+>x@ zhc_rP3`VjrgrxM8XUbdYt;>ha4U*uP5SgTy7J6#zbmI*bkX2LyH#AZfxL#*5`A`zN zVs7x4nOKf^!#r@#t43&Ka9*$HffDYz1#w7$ij;m2`N@kY;dKvTfz3AaLqp3$QAl8`_RcF5TlS3@Qk{9zI z{AkfQr!8VEZO5pli0bDTjq~qW#?FH$lKqkJ*8itbuo5nf$UX(Z`Bh|tj%E<1ZK5Ud+D2Cs}8>aexUfnhTh0c>=hqARu|0a z536q`=Ri^m8dH1;ziev#Fa+@+tt*tmcOXB9oMvBX+bXrGjRgG0N^O&(QceKG%G)bP z=@4X@X}y|d4Ii1LeCF^a9>kn%k77)JWmyrgK(D>Ijlx#yB+(DgSdeWxNtcgBElf^1 zpZ>=Ctq_W+wCRwJYtx|*wSh_e^?EMh`dF|NK!o;Z@WzPA zYI%>r#sby_CP3YrfsC3R+4`rtj7HrJKso*EIKxha0F?jP2T+hxvI6lTuhP#c+KONJILrbU} zE&nHEI;>?%q9Tse68=}K)oc7O;XnQ+)oSD~ydY-6eoAVUxCXaY^hlL|4QG*;>AT`% z^`B8Vi9$#0<&o&A*bP6Scd1xRrPX4Hu1VK^z0o>TPPPmW08Dh z7X70}3T8UD$FnJ~{LwM#Cv>ygX^4C2mgODl_AcX`FDb4i{6L5o1_Pb!{KlDpav`ep?SS&4eLgf`g|7uz`Y z!@C~Kd&`zo^_Fd-6hBOF={LQ$-nOM?@YwC|3$54-yMy}A>TerGsABPnWc+=gphLy- znfnBzlQj=@SE7h2l4sHn$~DAUQVol5@Wl10*k{8geAR!8lJfE*a;@!9>7&$Fl(Bpu=!y731M6`?; zM?!Q{2azRW^cFL9WxUv9SoM+^U#lTi>x-32)u)opkxFRf5f+KWTqVXOs7QI@d0_~^ z3@VhRlenLdq)FU&qnGP1nG?Pk9hX3Icf8t?edkyQ{%LFpu;#0Fy5dty^srjSRPL;y(b41LO$7Z$Lg2`vm>==j!iswwMt)DFL(zgU6QY8HtNg0(?TJ#_R zB_t3w8@Io(ud?^as-ox?`Z+z79>QWkz&!WwZl2k^kTDAj4CxMu1d!vZnWL<#!d;vE zwrH!qogp2Qdj~`73{>CFf^+rwPgSC0OVQhpU5zGcs1)skusH_Ks=3umj6*|i3p|LG zao0E&yDU3+mMVvik3YjlZ|CEQv;Xk;Gf$uGG-aB-YP2)thR5C8NGyflRgoO6bYIrE zFMg!c4^H5RJKB*6r-(55905Wz(+Z6`+RU_ms4+GnB4y%b*`Bmi+0YHd4d?$plXv!P zhm{rdZFu#?*bsZj59~I;cpC#_kZm>>b8M^K?osDBp2}Kmk3tAdQ3fsURGN!MrULYq z$yHLHC6+wMQpMrtE_$S@fdJJ5dH%d-BLaU43;ZO6}T2ilYVzz($EosH6Ef&&DyZAT)@ zdv6Dq?^Uu2l;;Pcm&!Gv=CFyik(k66`Ppn6I;Af4`!0p{JKJn9e@AF$-}dWU`G~jq zPP>Wt&DwO*?FNWNj}**8gvSu=5zAokMwbdF{Wh(JhLYDY8BCB+M|GwkI;oK70L;QD zagZ6I>;MkR5fDuBZqVazhaF(TtOIACXn|Ty@vNcjPHs?K!7-vZkF)Hi(nOoEh&N8) z&H+Qf+p~c-u@yG=Sf|6xJ$ZAprb%Vc?{?70(63+UC!E-YJjix*QLx`oFg6r7&$e>y z8x|hra`dm1u!(LW8db|xO7Z@rmQ%PPL#V+C}!k@6Hl^ksHVGv>9 z6--Tbdya&l*#f4RNY{UIkxyU^;1vAnfk^pWXlPW_0x_sVt!${>WDsn>N+uK{3^y{7YLd%-e)w?BVjXG+kuJw=F&J@m%U z7$be0b|Ism6@tE~@&+yk?@u1%a_Ii#HC%A&y^;(1%_CgkJg5e&`h^~?sYkGqGs+EQ zz@HIaKwNBUYGfT2kaeX2(otIrq@uPaShZ?or1rbURna$>MV)ODfaywWm%?>1u^Cn~ zW64qiT6$CVd_&Z)RQ1Oz3C&I8p5YL6b`oNlWhz z;f&o%-CBZw&rQIjf3K%8zRTGPr1i~BU+w@C8}152nf^LvU3X!~gc-O<9k$ zVUs`|?%gSp*e@u`Xa~!$#W#kiDG{s4?5U)4@GR8JY#uzuDKYKTa|s9_6^8)0c(m~S zfSjDyDJ2B@Ct)eGtOV>V$S72uzn6e0vR}Og!p-z60O!7Vrizd?ppoYko%#&jD#BYx zL;|D6VwhEDFHnstm<3FPw(#*n>0`2tfHXvCQ!SA#c%T>c5tK({cSG?kctY%%40D{{ z1cK?xH%LzSCfmyzZqa%wVM!De4Uql8dPk&W4PMu9GaTh5ke{KT&}5NS%CFi$&V-_F z)@lNcwr9gcjj#w$MI>CXiY#+NLx~v-Z6Z}+Nm4^RnKs8 z`7&4Ro`}nuaEl-}>LjFlm;2)PLj+BjO@`qXtc4X4AFwpueJ{Gsv9fc!`-;9)luK`k z0{?cnuc<_Pg6+x1#0#-`Ldb9AGI z-XVbBpiHb#n(ptaljG~H&Iw(iAVsF0;NG2Jf&ba}(%-?}{%)dab?#JMv%1?yS_~@E zf+?G3lesWc8-dk{VlD|6)eq#%93!|`Dgr8-u;)zO=FnbXc{Ch>vc)um+r;fQ++TfN zTgr+1w0b}Axy1!^{ z!HQk(qnp0Ejb*^C@Zfg$ag|k|b#Fajzz#RNsUGY1wJ?qqvYJVn4@i`QMOHQ~E~rKL zfj+oE0Lev_UbNDjV4m$ve@id;;TqR$AAs#M*&{Ja%7#8;Phj0(`uo%&xx;biU;*JI z9>n?#{ZG9o+=Op-h&gK=ODE1s-R)NgLyF~h6@;%h z%r2SmO>5wMBmQ$7Vh@0IH;+nFM9hWR4c};Lg%Jz>n+1KdX!m2uVj~FY9qxVVdKXcd zl+v)3sk>*`fS6%#Jzxx=E2^I{5|3)a*jPY^MKaBb;u8S~#*$-*PGd)2};H`LiDt-$)K4)oauYdV*ErXnGi#tE>X5u zx+W0JPb6f~IHUCy7DJ?i@plCctS?0cuW@onty4!Lv-Uny!;?qLuQxZi-D0{>r+0%} zC1{m3<;PK@${Qg^=wtHfffaASY8Pj;G;Vlbn$|A{*U+V84lP6bY`1v*J_wKfL(t@Tn63Plpto@{)BrM@b^q8#n7@XUQ z8xh(8TCWRm9}ljD$hHg0<;0gpO25{}F~4T}u#H^pyq;eKyPCT~{Z&b&nk5phNqJ^} zhmeHHc57B>!N0rHzgx9;WD4x;R!NRCR7ok!;Z}*yLRnN~%{T?*YQG3%WzH?=N;=B9 zs@f4csL)J!dsHtMYX(w~U`J;N5DrN%8L^ezA&hMt|2~~OS`@)NW8VQ0#-exU_#HLNv(!3=W=|Cv9RTfwHdZ6ui^;vN_i zr|SB5)O)LuDG?YcKHticS#j9*hNjAu=MDPiOZ{#SB}d`JK!x~Od21BcIpjyv(rB#q zFcz9r=NUXlZ4a=D1>$nBw$1=UioLZn@F;Up6sqeyZl+AX%c)-LiX58Ld49YUQ|JK@ z9vK4(bfY$bGMZ`W6$s-wQ zi1167VB^<{;mc9y&a=#*9ZsIEbRGs$be?fCuYLJ%&o!^m$(`cNOha89 zKeVU3Pab~;3_R8XTesAzX+3nWK*si~{x0DrsyL4g6UG$j%xDNuXHA!B#81@oCP0SK zr<16S0BNx_Svj$nYtLKs?UyI(CKRXvwKO0okm~c{(5m_m4bp!ZL|%9VBBVb#5&BYO zct!oaIT7X)V>6!55ZYeAf$)Y4n2nS)rHT6&Ren~C9vu?Ds)RxJKMBG`LRO9s)#D_o z)T-b~5GeO2Z{!vM@+RshrXAQ>gcW~@R8GI4keKv==xteD%q5pXNUI4VUi96RB45~l zXCD$%$e{dX7Z%Y-xBxQFMBFo*cpG)+)(?4|&aqRv!9X@_35vNfUEw0=c7c%!ek<(FEC* z&|MUup8h()@zAiHQ%T}C2Camz9aeLd!d{0^dc{~{(Ilc_&<0wjQ7R9LccBkx;OW;G2k^rCoKQP5X(L|MFx7%{NvlvY zJJGxfMoaj0nN1WIAqC#uY=>`;^3&Cyu1F1)TNT|m?|him6C^(#)XoOLK|QNh55}MB z!3Et@&;8nZ49@X$sh-c+_^&6LI@QA5;k!kFHpre-%O|Vj$|?=?tcRP1gNM^v$4~{N z_&sYn7luh?EZ8H=p*)^KaGO3*i9{XD5B&?nQLOB5TSqRbBLXq~z`1TyvWT~JS!w0y zK_VOy2Gx<>U=RbW*LDyA^{M=A|(%oIZj_^h2Q3t$|h8)h#XwdEfq|u0s=>S9s7#BIcE z^JPwrsiiz;HhEW!9)Rx1snJF+5HDA{)X4E-)U(1jdpsDS#%tCV)JO=ZsFBU=qQ+|s zZA)q-Y=F3i2)nb1`PYu&?C=ULn;N=D89xL^xlUKdg8Nm`3WWzcI--y8E-0K(i|_VZ z4YiCA(~yM>(~M}eppJoPbiY?yGa}NG0Bcf);H*(WBJp^Oc@TZAV8Doauzw9MM~|nM z8+T9^%ZT+VjIKmL{Ea@|DWU=ULH8i>W6}e$OWUePy5`@T z^rc_#Ym0xgp2yM}t+9NjBr*Deg!_`F7xmt#6i1oe<0!ML%n%0}Zst#L7Rq#wleZ8c6rx&qz5yq4m-f8x7zciYE2AL?Xs^H4;;QiRf{|-fFUJ4GQ9?@0uL#_q7g$? zy`$jW#xbl1p>MEa7~)k7XTi4zZ}obH)5+s6DGft#6sg9rhfC~lZ13&W08gWUh(xM! zQlWd^;!@$SQ8Ox5&D&GC+mP5nl0t-upU`X*5n_oj_DaX?e*Ulc{HO3$gqY{{Ub!Zj z|24(c5NjY2%`{)*pl-k(OZT&f@dxj>4kkQA;OjRx+iiWriOA6f26w&$b`H zFWIKq-m{8GfVpypkiof)=kjEHB>HwDF>lIMoSxHp7w}R`k7VLgYMgLN;#8AzP0uDs za}`WM0lAk()8WoN{R`f)9D0S6u>R~u>qe^H{`=idXpDo3!U{h4Yf8g=Y&$fD77x!@#5=i;y^3ClT3t1BjN zL213aK%_vKGSwaujbEl|CQARXR9$3v#T;;+wW%H}zBVK=d|S04+uc~1K!hKBuoga8 z_aC4PdWkLh+@)Z%xF~I!4O32NiaLw#Ts89D?xB8y5WV$fFz5-1{g zufjApayWGt>#LVHm?YT99e!RKn6H_L3`52VkA#?A7LsAQbPpI+e(IJCZc02SRC^;- zd(u~nZHH>0j$;87;A!P(%izQ5rHUS+ijC?;&^!eDiMrhF_16DHfMu6A=}rp=im@E- zMdaUFB6|6O$t1ck=H}{we6azsioP5!y%} zp~>mJt?&LtrE=)WngTwTo~>}!4+BdS0FH!7+%z1?f=$ppYz<+IB}qc6GOmEqt`iib z=!C*jkRMU{WsM4>|5u30b>}%3b~J|(&zYsP^PW^-wWl+beu46=bIA9Pq-8tj`{<}D znEQ-wcr!B`UW^Dq`5gm6H=jX$)y>SX?IMG=I=G75i*55Ni%QjTjf4TYYj(@jB<_0m z_C|55a&QHWoC6yf5R3&a32b>FdKa63o07CZ60iMe{UstWIz*(sYVluWZj6tznjor zRgD}a#if78J?{WOJ;h+L+Yf|W>b+V`-(yA&6G@AFyFdLjXqC7rWiFkrNLWd4CvvxJE-&}qKabIzKByPN`>puC*BHyDC?Rp z%8+n0y_^pTDgY_wx=2yNkKeSnAncLoW6FYRU;)zw7HSn>c#u&aNk*jK5?(PeD_}V& zl@xgRHVVM11JDThX3J~jPRQL9Ogy8CNt{Lt+B%MJfxio+V7kf{1yzRAfxlO*%EZkM zWBYFW(kNdG$o2G3Y}*|ngZAxZ!xMbjt8{EYs6ZfI1H`{zROT*sZ|93!&Jh!LM>rHPfmIpZ?E$hgJ_}^x z=0>zY9Ug5kOTQ$TU;ZUe)Zgsk0zhcde!jHz&qtz9pQ?C-q-xnne_%7_R%tu^*ZzLJ zbpMHNnIqAq;#)ZI`_+pZ?=P_%8lSMnCqv_q2QzImIk&xw9E&!Smuw~@_pG6?M@+n% zh~$8*;y`dXKn4oK5$Nz|08j%ACsVHH+Tyt9KdWYb^RxEbvzQ0(tmsE(+jeFd>r8>6 ztzL^TY9U`HOi2uNH^1%a5)E%M-s}%=!kp5=Z_xt!9ZkWAL{-&MWaWtx86jNG6(DlY zltK6T;{B26tEKy|l*sTq|Shb;?4Fcxi-fhbwFE=adGC7bKD;9uG3ivWOQ#k1z1j=Ypq~b9AMe z^^~y?R3fI~rJE$-nc+@Dfi!(3$wAx8MqXEZkJ1f9?!+nb5niK=CL|{t&*Q0L_5Bqj zdY2~w#)}2U9EpCXbpLzBz5C~3e#_MvzI>MX?aSwPOTZtA&KLC@iGI6u|Ape-O^7P~ z&@-AFKwtO94_3%V`bY;=af%&~QWYVQbixHa){4h@ zJvNKSPp$sZ$1w4hBo8=)Ngu(iz@5Y_@sk+qY%sY_CxlolC=stAH6wz~NDTx7us77j zIc;Neb4W+e?%aNEu9K?q-_R#g5vC9x85N?I*OIdYjlbdcyd3&zU?7+dDb-Vo@T$?> zTb^|fgBp49=~CM2pt4HE)k~?i(v1mzVNZ$|q9Yok z^x7%3Zf31=Nzxm`agl4D?Q~%%@2w=do5<3iW*8P~5~)uYhdQKZwVHTZ9FnzsQT4Dz z^*UGp^B9uu&YP)WnS0tJd&3~L#$Ohmwav9kj-d4KY9tdQ8i^PTQkGF=Ue$+)-9MR_ z_*<_ASapRY{Vg<1chDjL_+H1QIpQijxacUTfw<1QDk9bPQPWCb{C^L=pgLY9ufA7R zGKD~}L>dC)a+Q>-I1=U6>e)1!0k+^&C`%rHtQ}4wn5@DV(&wL7u8=dVnJ8Uq~6b2GYimWl^*NEIYMhp zokHlFIsN1LkZ%^^-Od_Ufv~(tE3+?4F&%edBw6L-OtE zgn%vPq4ev?q?5|B=GRj?xfPtUU%zUNok-d{_0v>BKo`}beX-_{;$>m>W=8O`CJ5V+ z_TBubGi>^^+rp2F&B4Bm6f4Xg2j({0_j-7lW#R6@s*ftz-s+6BM=%Q)0mq{KDm@Wi zNQRI$b+A1~RugwGkNR1ABo+FIp4V%I?D@$=f~p3v8X-9WHxa-Gq?Q%gK&pT z<;lmcFCtnMxl~;w2Yg55IMey!L?zDjds>{0w9E7QHsRCmZ|U z`S_my{^vR*%;%#jCql^D(=IG6iE!3gr1%?8SgZ;Lm@E!?m_Yku$`gZ5@x)*seXV69 zigA$kP$S-R;+xaNdnV#gPuJWePpu;nz%w%3)pAesMrHsy4aep&Abn>yvdf>Y4cO7~ zd_76U@^(>uDOJZERJ{tb-=2Qg5KU?a!j+JX@$@^n@(tuOB~l)54@+H8J{WHbPT2$tGN<7P*#@}=VoHVcWnvOq!{U^f5n}phM%Fh{rTl?}AclthAg)-o za-C*QH9TkA%47r?Ktwv$&4{RCA~sWbBI$B6l33^~$Y@~*GJ-(vjlMJl8?+ELIFpwn zl#Bkm!3w0&cRWZHN;Dsr1b-`w-qFjA9Qt7!+Ipe@HjCI}BefpzgEo8#-ROvW#F0+I zf^>A~JNV@xZ>3%O7TQpVvhf#&7JzYb(7)Y)W@1wtQ4hYuR0?RYoUH%N2>+6hmeMk* zbOD6+SUl%mde&XV=8vAE8~?W1DYnmk!a}PFoo}xI@hh-|bfyeR1n==o0t+a`&2JTa+)l4#-_13RD_bJw4e}?%%LanBqKBkCJ zPwATFZhk{-i*7zA`yj-~iwEF-mFid}P~-A&pjX09U#EuFQQoYDO{}voWqrNWvZAB} zNvVczc|;N10AOyUJ(epldY&KY+--?8UMo2WfJXF`K85zqOb~}3JvE7(;>R)l5FyYs z4PG9EX|vCe`NW4%qT}aK^T1SV#;Mr&;@#Q3>6HqY!~lc?0TtAao=nJjSmPh*H>zz}I}3Er%o>yL zm>OjI@LYt1ADNnP3RM@cU}3IHL7ljuA%-v0=RBj$8ee3M)m=IN!PIXMyr7rl7LWX% z&smdXS){;v{34_2jPeHg&CFzTVS$oW3*idpqv%!!lh!Zic&(mbc8YM!M+)I|Gy;O^ zJB{?$Xb9~8Oa-+|doPCnF8GsVqvj+vkBM%z2-OtKa+1xDH^#pv@RNI=PlO8QZ{f>u8w_;P;3h!pvlv3{&3 zgavaSh(2By_2#$Av?mrw-a{WrY>D2r?38-e$Q^qW=u@Obe(x}iP7_*S2uupkJ@tw7 z@EjiII53UOP%ci{HG-G~&Vzl%g5107#9P%m_og4GuWp{bm67)ATHZm$ondr;`-}gn z@}lU&3{HCDr*%bcksh0gI1;@ZKh!_7`}?93^_N65c0U%qw|;;0QM*4JeW3oL=o6v* zhbaFk-Lv+y$d2DRd+$C>6f=t90#A;#=KR$u(#W+$e;4W~DCuVy2f6(@`&i}&8=iHR z3D?ECAnz{ofxo&jI0+_>{3|6K8cicVEtxrd~MH^3a+w%CNT()Ti zPbhVTnHox0Z931(N5Pt>CmqsC$srJFe>4r6{1ZCbNmD#s@##q=Z@nPr>r5Vw&+?u%W&_bY*bXIimZ>>po)oIo6s*q2g zM%MAfeV={n-pNL<$D;4l?vE~O1gvYOzrpgR_U;AD(x|Qiexl97Dyfg2L4hrn_Qd2X z5UZOvx2uhnAW>!pXC3(DZ|ldE6^OYcm=q1C{wOQGwngF+9-q z_N19APZ4;*MOJ?poY*C$;P8rpnpi*$gIjTZwN78Hyrt7HkF(OfA!+I6ar}*+0PFPy z)~7=Dnwc*IP=GPLA`_S5<-El5b+d0$T#(ZKbMuCg#x7FDLs=s>mGg;I6&G3J zBJ5|SDPo#5QPN=4#zp$-Iog7)WuY8cfmd`;QpJ$_5N2cAq#@TOgCP2XGTh+iK|j<6 zj%I0d9S#eM@e?njkv>ctNkcV(1C5c({H?!YRFJ@?;}!M!(v6V_T7Q|T9MXA^7#^tl zD_yv$9eOaTfZ1Tc+=xmmK)3!fjET{9Gv7i*GRziCjBZ%uAyWYllt)a+z)YS;%-t0h zd*{hsDSW!%z47up`6=Nq_{Cr5oiWG9pD?Hsiy=WlOfRxzL91#BMpggzdzoPAV#qytLGZFdz)*qaxKQHcTgOk_|#t<-NI5 z=?tXTjP0N5gP{Np=U0}263l?oR&KcxN}W+D%tXGuTtUruXICb)|4A*7D*hNF zRTlJ{^jo9~qtZe@q$yHW5RxZV1)K*`g?>eJvV8N%dMms)Q#%3DmriR2w2y8FWUf zkg2tfIh~U#xLfRlWdw|spM=mNRhBd1Aj}f5rQ-}cYd#uCRh3eoFKwjScMDRjl}Xi< z;ha>NCQqv2oO!hsNEPT2)n%l*YNR>~rS!YYQe@L#1_#-%Skd&MIzY0vL#jq^`;FJY zOFu$T^ht8ZEGgR1CRcy2VLBYw4S`+!1u;AsBeVkzU#A2#d|feTl|ZaM>86{Z3wh&b zDk4@J&5X5g+}1mp1w!T|oWlbj6K|tKiV-2=t8DeAy`))Jll_G>0(B=n4G4=*_}gk( zM)e}TVqAItpnWHZE{Fb^9s zR-gn@r$GGafcU39;tA~y9QsQ{Hh-CS!$*(YlD-9gpODpSk=x%t#@S<@`92=ej$2BK6`RCm56|s`}N_JQSXV$CfsM37{6wC!Im*$ zE<<11CCnx>2}`n?q}SuzwyHr@2$nf{z|Cmog*#(9rKwM(g0mgQB4VORjD{({Ob9?W z;Fl?A;<}oE`j>qZ%RNnqa5psJx#_qnx5|L)mhz3}q_NP#DXk^a!n$uk=}m4j2&R4x zox!lxS`b%~Qqwb2Ui*vNL%2S)x;99Bekh+s@z2*a*e5VZwLLc(luDRcB?D>6tQ<=8-^n&1mffrJ+F**EJ<=Z$0=ILQSDEa=-O+M0u*vV zD{lVKc=|M1QCZ+Sgk++{(}!q+>-3>EPTh)|IP9rNB&+DZBC#WNntx?YbuR8>13jbe zPp4`OgZfVHw}a*Tahjq6(f{<*B$P5s3>8u)5kA_beJqc~?%r|2`d>@`w<)fkHh^LJs#u4ua4W>>!tl zZu>WQ->xPBMYO4L#=F{tR`Rr6+Lsww&FDkJ0<-0K%M;_5x@fqS)fXGQ zYYSr7hg1a{nfe+NP2`n4Y`nO%4>BY>@8x|mNbs4-!-n_TpooW!+rq6z<+v~Hi~*)* zlUq$6nU@R1QPm4E{>{p9qdJ1Wl>q`d!+DIWMLcfa``Fdr^Pc6NA|j`KQh zLi4#Ax7$Z6Y&H8=6VJ!kC}z7$VRdYal@7q&YS0vA>>B3+4A@vOC~1k@QpT&5l0@1U zEc=7ur=Rtgk0>Ubn0p3vm6_09mnB_c_gIuzBDM!^1M?ADEFjoG3w1yYk6Q6_xw-F> zI`W@`7h6J3sAG_mAkV?fJ2c()b{H$2W#_QDE4iHr+JdBV{hHkK08W&KNH#IS8kcNn*V~3@N!GGfPl7zoWticwXs{Og7FakinDJ$D4ty4xVu^=hC##_t%rD8{ZH?DT+`-c9 ziM5&S@H^Hbc55?^U!Jkw7wvZzYnU>YD-;!=tC-!)A(_O(1c%yqgsvYVPe7el56Pt_ zM-~D#*%Bta?SaYgScNs7X_z{n|LyVnB2qO>ALL1Z>9C!4zN$#IWxWdUqi6U;X{fFF zo`LllBx?r7thI?R2}ZCHj`%jlMQ!WUCRxPa^@HQ-Us8Z%l83PQ0Q&qP3FEEU)b!x+}xeqG=(n;;rAq1EM9cWX_+xE1Ksf_WkG54#E`rMZ#3 zcC8dP&&|mghq@IwE1e15l2kElpj#Rh91S`&4o5aH(9~AoBt6s$6X0*5o>Xs;TL)hf zk#)mu$GH|Jl|(5oLW^zX!Df3X$!6r;Tp;HiTgVo6xuPWvK)8#+eiK8JcG1u(wC(PK zJY;XfH)sYf<8FNLJw`NH^;O!NFWqYSAZU~Rd%t&wqQ!@Utb2LV8AyKs0HulH4;3i6 zu0j{eDa0*ng`fD=C%*NMGhP^rUx6{f5-jvdakQ)w;x>vN)|82ftRH~Q<)84Ys&VTF z#s?Zwweha@hun+gFib$1(k#%u!O*lNa=7imu%A9OKi1olr+v8qL@hVxQ} znN$Qp7%()}9kM7&axP9^ugO_EWKu#HMde4L9~?^m2fe>y;!k=$v2OHXDR4U}I{zab zg_nf?x*&`hF}j5U^pHbY%q-AxpUtUWWOJ${ASzs$KIG?lqc;=@bXVx1Xa~H z%_El(Q}s9AMcVJMP20Linj2ApPT%pnHy`D(x0Cwi$J|Lcyx`c_2uIP8++;8n4va|D zPZ2e_vb2+qdwzVBJMBU0LJ_$4fKMDtL=2Weep3gmu(UxX=ju)T<~36BG+b2j~}N1^MXN+pi;sTjNOkJP_z z2bTAVsaV2h_3iRLhte?G-Ubkw**49K;tKpY1+F^a8qCpdCurBmnuhpVTiNN;ZjoeN zp-(=2SiesAh_Jrd+ZIz5BWn&+Wam(fqsz6x6h9&U$$mm5A z@-AsyKaKwT`T(EqEjz1PDNCkdx;@z+&=4rucO8jd8f>F#b@}k;w*0C zXz})#r9BmisonNKz{p#(jm_|vNGhnP3H%3=qm?iNQnH(#kfiu_aO*;J(ob>-i5S8Z zpoVD*@VNwK1Hq`w>V67f2$}+W?45$OL!vjoc!_85(NgU)=tAvQt+!~)9j3E%*|aF} z|8YJL;)wK{p~pIKjXGJ(RR^1JJS*1QO|vZlJ8S4d2;ZAQm8HfG3vfH(%dhZ$u22u* zjari8SXwghX~y_8YDKktIx1*nisr7og{V|7BT_Flq(;fDcuNMqd8W3alzgVl(cFm- z!yL3`%t28W%~_tR5;9lC0-zQ#4Z}l}r!KXGsWRf}ovL}8Dk9c;^UvkTP-?-dhFoZ< zJY6>-7csVm`tHy1%efnvuZ}`6TXz)A+^ZgeVic4k@(U z^0)Pa;{=_fZxeMciII_q5XnYzV_{hIJY6eZt@PXAQ_)=aKUton%_d7DS$L^wbe@jl9^GZlEjJ6{+0=1BA*vQjA*lKp~PR-V>*`{j4xVoxv_ z?<`G&L3;WSu0XR@rVoKp)3an`KODB|rRi_x?oAX9vqzCF(;Jzk>})d zf1NpzD_?Fy()e(7Gc1ZTZCCW9OvDdFE6|i%|MSVz4LeS&!e;(_T?+tgLrA}9TU-5| zUv3t^cHgwEI|?~V2?HfJbhRoC8W|v@Pm2X0gimW3MtjPuyxdm$Q z^K&9bUgpPz78psJNp<-k{WMbLDq?P?@2%st>ZMl(^IW*00sknNLBq%%-`I~vuy>fy zuvsp+EK*Irb@;6L*1-~4mltA++G5BD>*Bk@r>h>H@LLL>2p!Dvi5yHNe3C!(^NQ`` zB!%KzvSA7r+5F0<*Snvt>(i+xg|yr^J3bVPD+dr|apsy&v_gnH;V7|2BA$ z^%jk6zcv|g^aXl2ngD`>Lr;j-xE~DCni@QOa!E)dv-tW^2WggM%y}}oip3jX|`N(E}hiY!Hlg#XlzK{h#9E3 z2^f%JOQ((p-w;MbI>;z5d8paYTQ(p zJoEs3zi$&o-{fNYe;5a!i0OKjUde-MB~Bi&5K;s!Z%lz0t5i*1(?m#%znYj-=3satP6VHwT1<6*W^$7XpCJ?+55Mw7LDArZw)?0ShO^M+}T!g{6<} zyaFZk#AvpwyPV^0IHIq1NF{@g>b7pmaLs|L$i(pd4jZ6Pis8rDUET}_WXQKF7?3>_ zbxIGm-plAayNQB4%(ir9=Hely)Z-QVLJYrbHf=q_mM||IYT4Axg9=`!s-|CLjHGTp zXscRo;h+|QG)wH|IoQ63V$cMC<#}#~!m9@X4L=cRe(n$`7hME?KSlxW9~F&g7vOi45+G zrHNauF2O>ishNZ0CB8X*P^}kQWHtSg&FZRg$i;nPd464l1ztRCP#p1*A}hasg64@8 zE@VwVn^eRw0W}`T*!^#64G*#U6!^HfGb>d>IIX_ReZPpR=vIK90!5a&^7+r}&)d?^ z(aL@CRdjq~O49&(r{Xf$9SXwgN##}!rC%;sBv1(PO5Q~dSc<~>RGd020QwpGk~rp& z>LE^&GdX0~;KJaF9*i%P;aY&Lw+6hHPEx;YB!~hAEx(qw508ignfFMGD*5ntZyNrp zh7XZ+hd&Q_Y51#t_-|74ZwbS{;G;IefHej-u=4vnd`95T|I@oO2fb`CvNiv$Iqa*! ze@lj;+<)trD%K5kfn@pvK+PtkgMsaGKZWu>YWt`9^wZ=au#^{70K#-XtPG#bdFEFF z)FA5<0zl#GhogjP!Yz{3Q}(JrNT&};@KK=|2b3RxiGZPR5t!sC0fE8`NKuD-J!uWu zX%Z5uuuyXa-yyFR2xbmn3smL|RqthES>(8UQH9@0VnfU8AU{I?Ad^G%kc~pfS9DZR zFoRm94s}5>duUWVTAu21cT99DNIiH0)vc^5U7^%4ol6cC{s8zBPO z;Yuw*M0B_nM0oGLu0TYK9)Lu?)f^=9G_Zceh~=ax^~`)3`gJ2rWQ_heV3K!iE11x( z_b?R=1(^J90Ta5h5hk!R=Ai@=UTlI%=MFiSqyU(6eGhU^Fp&%lFxexn>R}=uSr1G+ z$K8M>5JwM0U>U4Q!T}6+TVsl%)X?xikA9NGdzaq;y%&Bk8(Oy$Z(^$>y%G1eX=v-F zr>!lT5bBEkh=J(pbtEqvtrjMmV^0rStqO@+k?@cO7=+030Y`jb5IoCeL1LkDbRcK7 z#xK#h)G64+f=$ZfM(66-8-qlE^2v81u^^_vln__Me)7=lwb1OUZ#JlnUhJ*jx^=h# z_NnrPP$fs7uSv;AY_9GPyKF(5I(9wI zrZfAIy4ANZxnBQ_@rbf?&r9G8i!(b<9x8O$sn}i8Jkk@d&KN_Ga?ZAO%|?(!{ceI2 zm;7||*<=iSyb4?ek?;_MivvOnp`xk4rpn|i;0Q8PfjRmIHriExbrg`;B_NV~G{8eV zv>HOf@xBRKQi=A1Fi&zSv1l%!d$n~;FkNKVMI)W5qte)r6luZD39-m0Tm$4{jA*0M zFbzV0PRxX1>o5gUOGxWCc$|pQ6RZ-1p&y`@kXxaXY)sc6o6G~%N;}t)YJP0BW3kHs zI&G<)+T$+yUunc2mR^5Ucm+j!_$m&f-v^IKp;Sg?0MoC|gr#uW2FR?0nVq3Eo7jL{ zSNjA?c_7n)l$b3)&j8JUT+_B7g-BGpfaYBRXg^U+fG7q-721hNu$(Q=5fdd$$DM|{ zUM&C)DB)5#$_(IB7!FV@yw{)Q z2p+>!`x&Zq?I5}xxW!o43i6zI9V-6r-#g%u82nf}w=N%X{u^VeX-4sf-|sY5y!yfGWi3m^V*`Z+Z^j0t3t(gqp!P zEt;Ng-M~~p(Z|UC*^6Z8ue$4Y8|J(vOOv78 z7uR)k&s$+LWlISNaKsaaHAcW-U`pK)qIPHK%ZqvH=Bdhe9|<7Ym&f4M&Km z==P?ozt!FNAqNtu9v)dXWH8RWm3UAfC zuOHMQOTrJR6VkU6S&XaQjOZlC?1OHFJ=~VAfE()Ce%T6Tmxfr76!9Wx2iLW+rG1ia zrLdh#a%%Gndbt8mx#9@1Fh%Ce0(aC(qE={&R)#UdfT?tmlOsB(V>fkR9#uPl~8c2+pmz8*UORVA zi?;HRKu%JD*)DGjwVo?k&;-R|7KRmFVQcYywpzR4gL)g+lyMX|xjxQaJ}{cxrULYT zjj`#_ksobrVtYDc&U-a%XhK+qCiWK?t_`hEyS%HN0fkOfHV<5m_-_Prz6`CT|Y>xiu#5 z81*86jJLbGfn`EhL)L8|t7O9n!g!mBGsayn!Vx%d-XpAuAyhNY)nj=rYZYM$;(vyU zrmzihYucICqK1K$Jz9X3Kxn?M=|lR_f)>PD8Qi1%)YCw>M(diHn${+dP`tPS9p;9; zE=8}mtJfkhP9ifRW6IUr?Y+J2b`ggyf&XMzYQe?ua*=z9CE=D(Mc?)7Ey_Zqg?dkA z)~0Q|;^p%owOz0gu@Hd2yBoq%0AHNKR5C^bJ>zeMSug}Mm~go%h@uEcq8i{Y3kAuB z+6_yV+H9+a0)@eUJ3aL9M<5O}5_{~0e>sSvnzaOc^GQ?tkZMQRhw2RY)*Wk*-b(Ko zfeW(r3#V$)*=?b-ilL*rZDvSukr&MB>UMTJwObD}5@QJweEbAjp3Q8BO<};=$4`WM z)Eums#1PwY|LC;Jd1WPRbqb(g~B~H^>A+p-%;&=)+qOik}}&g?|e<;QajT2=g=Vqb9ce z4ZW!xHBk#u6KL)CgZHiMcHt?<6@>MG+u&v_TU6Wv7SxHgUrBj3Om<7Wxcbp)z3A-n zuU1fh!tENjgQ!}ZM2kmUi&t*lUa@QJw&K;q0hJ=3?B3$lByHHk{C>WKp+`_Q zB`HwU?6eE}g|=E2&|(^9eVZ~gfKAER+V`jw^=X&Qh5Z?Qu{F7S&eHRHESm+V1Qrk# zzv(U)xdjZp93y~EjtSk{weIan43_RpPw`Yj1)d6Ak>8&6A6txKDZu8PB?LeJ;kv4; z#{-svnD>3+Ma1Q!W#7WLZ$hDRwEJIX`xmuZ&~4w%_VjIzr=s7XOy~`9Dx}_;uUui<-dz}Q#^H1PV06XvCZr|&s}BZ4KO0+E*5qyA80dN z5b6MHj9ga_v=8cf?SS{nFCJ*4H{jqu-~}WO|HJmI#H+kl;0twC`8OmX0X73OKjm@y zOU2qu!y=R%o~tc>H1Pt=xB3y!h2LSya!?7Rt2}k5dyS`!AYEo0I^a`X;^%56=95)z zWntV_P)Hvuj|ZbTsgR-4@AytCZno$SrX6VT)dFdi1PdDAW7|UYB==PahHR(Y^|Of> z>qBe5hrLv%ZpnS`g7v5(HARjU*hzW5+-%QW&hb>tbKo3i#=?^M6y_@zZPe-Hm~sg1 zvj|C=1?dV; z1-Jv6arglJ<_A?MZ%AiDg%U~p24}zxtScgkm{GQ;kCM>lWUcf6()Rx0bsg2+@7ent z=^UM-A6vHkE3(NxCr)A~c5E&o4o(_8eh5D<^d;@f?Zdqf_qo0A?ep9|GJoKZLjRzk zfExsuVyIgbYKow4Ex-w;7*MGT0!$FCX$3W{3Mx^lDWWwX`npAc@5}vs*UUa=A6brN zg*QJm`^?@mvu4ejwbrazvnFiHc4VW@Cs&Fj6=d@tB*99Mbw&1p7z`$yL{u#&qM)_| z%&{P%Ow7rsDg$OyLW;ev&Kdn%vhV* zoYQ#tiI&--=0&h!<5HRlr**d`blcXY9MFeyV5mB%=M-3tq4j1$K76@^GcR;IrIdUb znIvwZFMU4BhsUId$`B?31S4$*Fi|je9cy%as~dP_^1mxXh9PPa{i}-qOSsh{88NTd zmoZb@ew==^j{d{&*~is6l7cO>f-4dFm3k;24AZ-a3<;JMW@f-U6-{E>U++fof@6l( zH__<1n}VCs1FU{&<7M@YV4%wX!UP@Fl-0j!HQne<(i>j@csD1mmMb zPMLfo4Nbe`Zqv;WZ8y1@*gK3L_pzX=oi7F1Gare{TScOkv$x_ z2q)~L_iT?h?}?=b$<H8C?M2R z)||2m1_x64OaVfXxlUZbAd;_u3;nCyPA_T&)fxmHy1qPD89%S_xo35D=#*5J4I(InpMJ@1ysPso$zvC3boVtm& z*!`VTN?eXvQmMxbx;fHG5R?dt_;80#7T=(qCL?Ti1#RraYHU%}B5ZLpPuLgDQNtlN zH0fTd9g-$S5RTUv$1TKmF(RMeYFjGTN6`3ZC0~7d1Wll^Cg^V4E9=_26H|uyX#UHp zqRw-4l?mFR3CcpsL`)8AY+o?*T38fTmkzc#g$DDQgYF00&sD004a_sKHGZ>7IsGNc zE;1MgTg2P8=P{lM6~`R2O;@=>FNNTmEw_`KNA41Gv%NO(grB}e5dtMm>J!_>Opv3# z4u34XqjfACxu#>`Y+Q0ITv($`+dYIujEJKxwpavB*a|xk+GWoq#My0N`UD1A(g%q% z-yFg`^<8s5vgva(1x&n*G9Se}`8 E&>rHE{g@7C0YGf^mG`1oV|uissqQcITuY zFq4zNeR}lC&h_#^0OTjvhKY!9P^iH z4o%5#{qx+xtuqi054fF}bKjM80Yzm2IDi2}DZI^uXbsws)~%HVXI{S6JYSI$MB4bf zhD6pUHg0ma3}U4@ZX9^gszNR>HfA7^rlA0u3!_B`#79`rNX3!yXM1WlElW(z6tQvU z!jxF#4m1SX6v_xh7-u-M2rl@aPpLiNm<#PHu11KbA^wOuMQ8{v24MJEscB;;`4OKG z-6(T+^hOTPaHW$lc38k}^#iXdh_~D{kKGXBD?ds&l%>EbuQ}0=igKKy|To4khp$%<&}C4!Gdoo*pU=$fD*Kq!Tr}ct`G`n*E{nhA=p^^ zvcShQBl3-#yUx2ImLQ%{-mu7xZ4gNgZKsYfuc-}*dN=& z)$lv_ua-h?x81ah)MYp3FjJ%|N-_4^D=k-~M>xIksAO0(koyO`ST4I^3u?+@WrWhU zHm|r2+eLu5aU|qwQD!o3*K#`7g@hvd+jt_TKTo%(cr&21h&jo%^N>|X23WRGKLG;f zuEqT7wMbtK4)bC-2#tKFuJ+6rLcZG{oeD z{|%3$d`v8+o9GN2gV~_2q9!Ka=(M*19GdcGc1`Fg`IDlpGP%{bzhP%z6jHDrv&-nc zKQ=Q@6@rye8eDndy`SD7)e0=*;C7SJ*ZAefKo2#D$XUs{Jub%f`|aZ zMWnCJ7n6VN?7mSG18{8@K=?AZFL)QgQ3eI^FFk-*>v~e0h~0@ld{g2!`e0ejF3sG+ z*IHj&7t`_^J4+spnDhUQ|h~x7E-yyvOy; z7&4^PjGcTVJ}a!)3RlEf)A_Hl5EHG+r-%@Zs3==7k;95o{{0$XegT~}CdXAR@WMn% z`JyBFCLqf7zI9}h`m!Tf8JK)b_p&3C!?zL?6#21u3vjn#G zrd<|i))p3zTfOa-xJWo*;7L;qG11Q4%1p!Z#?gbGjl4+|6OD_OVh2XFA`BRrx)w%Q z<*W)LEG@yvlUHDb`mTu)zF7rE)YdW>DJ_GM^%+LEQpzwQ|I~nynQLK0{_?A#5tiOy zWbO)#P~SB%!Z)kHh}v2PBOS|Ngu^hNMh0rG6F;L%AAeI)51}n!ADUCywdrhtx*ytW zq&|_l?E6I)d2A2>uI^I|1I@Uu97PnV1(_WH^5X9P1nu&=e1&sXj6&RtuYhr|N zR)G<;VT8*=mWVUzyCz2XW)&DwTgzai zx(r6}rt+-HY(QW{){8(RBiEWW65y-B$cW9FZ(o5C>boXJ_+}LtQCrJkq_zx3@cs1| z$$0SP7Dm{Je`*?$`m!2~U?yVL{L2*>p}uQkgl|@X5w*1pMtYXPh?F5ydCGR8+x?x6 zd9%!7)1oi=PAAH(^u$JIZ#FEp_bTR$F;-0upC+TnbTT z{o(tnyKGxOvKv$#*Dy<0<{Nj(A3pJ(*;+|!xLIv;g!`fFz@nW@yz)?Dq$3L$Eci}# zXTSaRv*?~ef!z22nlw?6nr0_?)@C32+bxQBMf$Sjzf{wH)sNoP{1OGI<;9LWsQ;Fo zSN|=mwW>R8mJT`=ySsIj%SZ>ebHPuy#dos?xAirzcIy?!kw#bYuQsH2a>So+c1z>O zSjJmSC~W8bs~lA9iyhh?B>Z)K136{>m*+)|-mxvPxm6{)` z3~9QsGV9h>8HN?ZMydeuyt|V3LoKW?D2YBkG@Q6i97m4<(MNL}#l6Y&^Nx{w?y% z4Djx_zOA&rUB|cIYyTEbZ^nD~xW4UaeJk_D<>nox1_Nx(tnBVD^lg;vB6u)kXB*&} zJIxJ!ztO@R7C8wY1AbHz8x^K=e;5^FWN1|6=g}G#8vz;Va>F8j2tO?NH8qE27>6hg z%jGaE3L7NGU|uN!s>AXVeY?V8dCN71r65m%ia1t>cdEf`G#pU2sIgrn5?Atl&hdLI zt69y3EB1D5q9;FKG`W1@X1Rc4W%E<;em%bU!A7T!T&>3f$^)&`u~EX@u9G>|~(l5>Ww$VPav9F4#7kx`MsM%*ry2O@!dzs_rX0^2! zNAPJP0w?!y>m}!6tFYYnctbGmMSR1@e}Hg3%AGy#C?A!Q_Z3yG8`F>M;3D1t!2X68 z45G8Q9bu$H;Rqvdh>#!K-UPVV9w!`BRIMjf5$Sx_ko(fu8xz;7&=%Z#bgHWB-+T{F z>&poN>2JEmjY~PFxpe#eKuIfm3%}1@JZ{q0#<*0y;G$Pk^eP|TrYL3km@bSJlOT1Y z{kSa#WX-AKIjYh5Pt5kuUBv`?iz)6(|%cqie0 z0?d5FP4$XPL7`*);WPu!VKt$2^WMT`r8}9A zq_(RN-cOUJx7`=*%*qPG66O{9~k= ztw%_iMLpJ$8LM{0{vPScs+|qhx-C#`FU2S@10?gRg7ku_+zsSP$9e_XDT8dTS;g_* z7G&e90tB2S#YL*uT0UjPPV%=q`I@2cR4*GOx{))`yeLIzQt9pY#dD;FCQ|itTKOC+ zpA5ezl#XzFGs9zbb2oGt$HRRsV2*I9y1*C#HKTf_KX%KzW~eS)hg4>j^82JxG|F*( zhfp0;3RmibiyqfIu4U)%5&O%<(V@8o-yB<0yv5yL(N0-?p+;BH9&8;bX3lOESEnVu z>S3ke=#rX<9>Z%;^^e%i^OvmtMe6r7e>kB#nw;te&vYP^9wjwaqm+(Z)ScAIIHLzK zp9ycT5x0&h6oxig~pyCWfWt@-ondJLO5bfZ3r3u!m#18 z|6qt3wJ=f8ps*te%0zlukT$<3@d0uV36Mh+>PmhbH)RoT!@SgfK3_D)RYy~3t7k}v z)`(AaXOGGezfmeS=5+_2t7sL7PCM;MY7*+C_YWw!WA=a#1Xotuve_ibB4-ey z?W#4}G60EM)PXM42XaUXpx1D)N^q%p%fBw^U)cHpgt z_e$#RP~dlK&X*aOJsSf_RS!G;|Ax(lX}GJF)XWtcuZB)chfbVdEmo$MU}Y*)c&>>R zu0g&Uo((IA1J4poqG(*B1WzOBRxm{y$2n_6GR&iXE;MpswNTG2fqEuXIJR~kWX^B~73`u}r=bpsi=))=Z09#zQkFR|~i9p31{L7Aicxa=01KcZxAWE@$2u zx6unyHN5z!UWj5UODLwYgkpjP#c0b^1Y;j^Y2G11&|1mQqsKb}J7gJV>Vr;Y*`S9p z9SL30X{=Ro#o;Bm84eX5YT`z>=w5}-hWS6=lmlY=rXkLlT8&hIdi|;b={VWFr)a7- zw^`E*Dcv(RXHMr<6_fo6^HbFnI@9lvSKRhutkt45=KKDeR|ft<%4xbF2yE8KM6HWl zijsMUkO@+LhLs}o&_9}YmP%>}uO@`iXgPc5*D=y!zLm)&4FYGWY;Y1n6(dt{X3T^1 zDnB6}TIFCU$XT9H32i1kywX>v&4_Cag~vOBIgj@vJx>eoima4}>Q}D=@8~A^MVIi- zT6ihmz0#859l9{>@hR}GQ(SqxzuY706|2O%4pilP_<*nOWh=)!unQox5Tgq@x#r<} zp|9{ItAr0vkvw?&d~MHLL2VotwA(struv0QTJ?~k>$R~L$j4@gBd#=-ew^h-Q|xhv`+j@$8gtS20|^JBnqyP&vY_4B0) z$s*b?#slfqS{O`tK{V0dvKJM%$EW&f$yNujc}*f54ym7yc?OwQok|WSe;nJES;ZUm zPRgi1i_PH3m?m**j{;uw<2-jXo)?m~#do5Avk-V!JmfIoX(ez}&maEhNAR)K&r4L7 zy;{)oM?QWiygI^{w1hg_1aZV?p_Dc|GVHS;&1M$M^JyES%X4z&Jf~;DH35)|{D7v2 z4G87y>=3aOP`wx#=s;cTa9dHq*$w7KX{5`}Lbs*(!lSpLf(@{pMUu$0g@ddN7kWsw z=Dw&OEM3hg!j7SgB3f0M$fN&S*GU8hqXBD;1{+zTTv|s<*69P$uIQL^kwqt#*PrNm z@OnuqUiPnueyop$wVZYPe#KV)gVujDeMt9ftff55g$BELx~CG_W})9G>09U+rDQuF zz;DfJfe@Y$7*^%kM`Yd{H}+%GNGY%N+M%cMtZh)1mkPCh=G zB)_BMS-40Vd1E%k;&fbW!ZmW^^$#D5BUz0!0=+s-a{~;qwo`w*iP> z!S<$_Xi^Y93Rt-Q%DiH>a<^4?o9x%o2r8KD4GK0O4pY}r4Tz!F`Xx@sAh;+MF2tP^ z@9hzlnyAiD4uwSSiNb;2axID~QG~3wtStw6i^ZtK%#Yl?2VDH3o+?ijsq>NJmO(TG zciZ#%v-|lEBA_GpY>xJ`B?jB=uDP2?YBl!^l4{MZC+VuW0TK@SZzbUrf1QL=Qw@@i z8d1X4Eo{lA3Zn!YCdBuMU*T~f3nff~&vc&^Jjp)ZjtQx8`|H5VZRzd5%o%!NQ1lTc zALGU*il6Ra*;u|IV~yG_bX#aOt|dG97TP>3dm3mIjq(KQysgc6Fs3?Dw#_7H*?scg z*u=c4wVhg$zYL~{xzmr@y`T_M>rZ$qH8%UrL0MyCB)EXLXePb zYjE)qgPsyl5p<_S)`Iz^c#R&8JKw?n3i>q? zxFVj~vPhYvRIR+@i&ApJh){|+=V|=dn{Xc2Z5)gZ-zMWG-*6>}k|P@X1s-7ooP{dD zRV7a5EoUm1GiN!ItsIE8Fj2u{zd|(hvzTZegT1ksAV#NkMdvTbag%0p64v_N8IGse zjvo?%wHrL#gx4Zg*SEGRd{#!{>@!`3x5#4pWyrVh5S!Z&--~+E1wB@e@@;9 z?0H+^Oy#b^AKCNv!X$N{2+vbIeI+gt^f|%~G z0T^LxOupYm_g>+_M=<_u}vyOpOIXGN{(QamyD7CH5rBMn%X4Eh!igoXVcWJ5@NmT z&5yup9kvP=H!y+*Vp?_nlaAiK8|dkFu1U#RV63!dgztekJhpJ znArx20zGrtLT_V}>qCcozBlJJ5hhm!=x9CspGxUASHocIn|<%mB>6Bh0k$WBRRwwY zJ8i1=RrOz`uiD2y_J^O2VV?hFFEcaK`Bm44}r|%m%cDrIB3}q z#|L}LFm2Fbo?ysy+cEHK4_a#D}a5q-F%8{?WUBX>oh;_ z;RDM=(zG%`NmGfE`b5~2WTH9X_fuu%`41|2B6`6=fVCQ!Wm)yvK%3^IWimsYtAJv5 zjyP1*V^^UW_NQxpq(Q3GUQ>tr(hgP=HQoDD0FpInr*tWk7N*`{XDP|q1jX6YHZzUh z_{$QRzZ^fM`-JIb4!k;u8Pp>3Kw~UETTByXqV#!@5IgS}wq&A*y)#o1H|pP2cs%AG zbG_-jAJ-$$!G!D-K6<1ULz*HMasr`vRaDjGryYcXF6s%T)K$`don56a{;@y&EX7^6 zhY?D)$fQeTqDz?}6TnToq{M`2CuuG+=@1@FwP2{kacn;mQ0jD%{v*i%@YVj#QF#pE z(u9UxA!WI6OexC;RSOaLjHdVu06ZM>IQOthIv_Me}bEU%_YKg?nOcp7H|i(6c65d5Y~;7zZr+5QomjHcCDvLw!+;@I}6)j?z~G_YLw(DQY_SW(3Th(0S-!T`;fcU z?oLEh(tf(bv<{srWr|7yApzH+Yreg8dPA)OLMP`7NDsbswL8b23DLT6s_t zs8h_~w)%F~Eee3z^(%ttm(#vMlco~rXp9_=-TfehFCEphb-Y53_a*NM*nt|wQcS|Y za!kTVf*~;#i~=Qt^?%r8B?=J;N1XuWwld(|V?443NqRjZA#W`tY1#93-ZC-?fwTN> z270d*HNe_?1Sh0H8L*lkxt%~RxP52b!vdzc2w(*o);Qvo4A`XY-4Lkbb8Q0_sf%=} z z#EVbwEc~`-K|2eNw0O}YI)%{WcgY!?D8-s~C@eCgcmd_+ToHYq+W|fAG_B@BNn^P# z&k>8M@k-%;o%F= zro}#Ijfk71otHkx7h#oy#|p)~-6Y5M+T7dz3(pF{rJi+s>BHfe~xvB}uHSY!&vV2+c@6?HWpjL-EvsEvhV%4jj45Bh{ zH*j;s+i5Fa__o%$x)9Tz#I}YJ#1e_qe-_Kjv{IXl48RZYXRiNB0H6G z<4+O~*k5N@aAVCMAezr2;Bim-D_c{HIMIP-Mv&fE(8IYL`%dy7#iit`$uKmqD;n_E zJL`&U*~rr3GYjkMY?D4v+P9xkFad%4N5}r>zIlH&6GDWKimsLI4gQj8bseL)$vvbBJVuWNMtZw)-PH>Prh%?g0|z`kOsw6QHdAm| zds|G{0`Qj*!#9t=9h?ALDG z8;Ei4CwGMx>@D=BtOo*O3LY`Iw;N|A$h8ooG71+D@9`BJ~B}rV{i8HJ6B^fn2afCc>lqi+)aaHx#f1Mo{43QR{Zn@{1K7w2($D zI8Js79!~E8J`0!R6=_Kq7ZmkV{!S!23vu#)D!bydHEr3k*%pWDu{7Cc5yHPGTa?<- zunG;c4p{c?ff8OuSkDfjF|tt*9AbIgtetCfj3(Hk?x&G62S!&#an%)ERGB>1NmR2p zD1eQ>hd_*p17YLDe{v`#v>kzNUaiC$Su3-Oyr|azsz^=S2a&v(TsVf#BpPon>(i5Fp$oGUbpOJ#@ z$Y}K<_wmoS7oc(Fz2M**S3^Kqxq$a@5$lH{$LIUOcd;K4=(2rCawVRsGcet|;%>;I z$S^f2U1L}>ev%ezN24eT93^HFF<1TG7!(n^w;vf!T%%DOB2^3;NGLtF!xK^@ep=I%^#VJ!> zii15BeYxi~%lXdWw$JL=!=S0^xkU0TYO;$gp%AMxVhgs}`0vJ1XF!QsM}ZcF6;=L) zCU#L}Ddwd(c#;5b7=!1jz#_C>DDER)>K?V5*Hu|+f}}mdNS!EnZ!GvGcCR1C4W=D~ z>@{h=0kv8R(5~jRn61y_y9gZ)4y8}<3=q9&LWkFK8gAHx=%jIdqBw_~!?KmAYOPyc zSvB?r^3+V}?4P1$Xl*4!ix2e&rxE z$u<^!D2j{CVm2hkojUeU7JkVqob2i|fpFU{wJ$?7>g;%M7^7_^LN*wy!vyzNUKdm% zh)qU)uwY{;dJDe;HT_{Ju6ZbiSzZN-@`Ksv6?qg>684j*An0bMq``oT%oA_TEr1zQ z)hZfXhPUEHVM$YqHGV;UmLjX5#1sk2(O}Ynh-S{+a zUt1&=PYt3GQHgw55%GL)Yx^35N5P#fG&A^hr~k2$-oOY|Y;WNe061rUB;0|3J&D!+ zOQaP2y5HkfzkW?tHv9zi{HxOU*>ZIkDI*-%tP^IwiC(oUdYz~}LdKv!Y10E$FOr5= z>Z^elenqv{)(d!&7wWusm)rB}>iX3J0zezks+X8GLX~OG$pWvs_8kDU z=TeiZ~**BYEhC!K)=` z3MC)bCcTs)N+`%0QmYN1EGNHB{;p`Fzorj7-V<%LC5?!dXnr&xC7u_AeEhvC{pk$h zD;Z9fgJ?X8Z`r}s@9Mrak@9n^nn?a6HEUj|ak zBB!K#C>by+f|%AG=q;6^s6##j;NiK@Wu;5RBY9!2E;37O!L5blS>EI29Lst(yqHY) zL?vyT6|{^NJW;yxlqzimZ<*cv|@wUs?$E6(+-6OD12;h z8H!L9j%Q{T_CJTMDuyNZR$8*r;!AAe?1U-rlLE{HlEPHyV=eyllLI{L6x+n&m)Y&K z>7AQhDuFr>)&B1$@*_wSt?gC0!c&g7_dth zXP$plg2`)YqJC4X;^&)*$;iBO>1Z-m@B?i{qd$tn%DaO|dz<#Vc}31&znh#mtPU556C8VUwS9&;9GO+;Io7(JuF>V@aMP6* zJjc!22O1VXBTcycCQ-;&7Br}`iFMVNV2<&-=EogdPA%2Ws<4>I5h&s(w|5|d5mB7| zK%zjh$r?j8pf3Cg$hcW?XHDbshF(Z)Y${F#TE}tT>%-wJKMGqSI{(WmbUf6M(Y0b>mz?v9%>{m zd2NFUyD3dy?Ydu!lExA+NxNR{Dz9zal5QGm+?sa0+Ld41*rt*%Yp@5iEq-33?!G_u z@T0JaE>*y19ckSv-~vf9hw6vi+|3*GO)< zD`q*Irhc_sw}tM0A?u5eKYHZfDEUGvP;>wC_D7FAi_4_>n7a~RMS}MAt0d33D}F$k z>IGfxNVkd~KEpa&O>dMgNcYR>4tw35-pb~fx)1g-p6X4v^|(7_X;MjRh6@v&md%w@ zRE7+ShaP#Jt9{6mB1_PXy(mU+knsgVg-|1#6z23@h0F70vwhKXA& zdtHjAxK`tBlz^i^HC<=L*wN$-w>SzM0~D@5kZ!P-8ⅆy@5;w=-N=Vb*3}zv#&a> zi9P^a4{Y52*lb6isohe##tN2FYZ)wnJ}!<*ab1USh~{8zw_Zb{{-ZTWozAq^N_VF0 zI`#@~O7qAYE5>`}_Zu+%hTEaWx@LdWKebPtYtnV$jrFH9t%Qo{v;5wCf6AdhxY%H} z5qMcU8a6)^Kt~mrGGy2RSwN(nNf1DX*Ejn^Y3`u2w)x9P8XxMXEaRk60BW+!LUex< zZ}x`I0J*GA^WVzGhuW-a`V1mMf9NDl=t5z@1K`&9_^LHN9>ZY(K$i6fS0n52lJUv+ zhg)VDpA3!aZs-q)ZjFzjA?uGv;wr|cFAOoG1b73Oq38?{viTI~z*o%)_p~RBkG1T_ zrzhrn#)oe~G=Hg8<3s(e@$tl9kk$3Uu%4X)T_;(tZK#wzsYpK zTK40k{s12RX^oGsTH}-T$0);(5AI+>0DZ>qBtL)h{kdX%e1Gs*$d8XrXGTubZrS-G zfm6iUkNK}@bXR<@{iG6}ksoQz&u}SboNf`og3WU{SVB3>5siWGv=JlygRceH1%#HL zD(}0%&pYj>6j9w5!8+T|9<8Uj~E{SDRCpVm_fs6p?l4;2*3cCd-{Jh)OWUsc`v&mObR?*^_yA! z^uC#cNstu}`OO?1RV>L=7;=#GLqr6VDv?kZzR$Q!57jqWPQg}JsDE=sZAC@t4Lq3= zr7Y8vNTn#Ld@L=9Z_?Q7Sq^Q=G^}7uqD!(E*EUF%HltudUeeS?_?Iy7NIO)b+e`wOn1`;3G^e?%$xg?e=|FkZE3S&t4pu* zmM7LQMj}7~XrGMZQhw{g>#QCOEwz?{fn~wKC>j`;E=m~gWr+bAX>Uz1&?p6iOmnu= zZV+2k40SQvssj{oqqUt@gki->QT5cCaCWEf9%f36zF3dYuh|G^Ylg!p=+Rodp|7gA za9jcj6+OwLfrv~=ywA|>e*CX^0;X-MJR=A6L64{I)}|gy=sm@a6x>Lh$=taZa^#-C zV(|Yd)P5(*wu*o!GI-d_7d$Kg1y96-hleM@6MJ}MzHmQ%45!Y`EqH7##-9L>j#RYZ zK@S%^+DCNnd7OfE#hgr@Nlpu%9ZiFayaa43fpXX;E@GyX3ugsO5cxLh+l2JUjlTJ4 z;*NiX8gFqwohDg)gp6a#eLY7K-m|6l0<#Ix7lRFB>T~nf9?NA$=4`3lq5yfqKmCwo zM%B>Zw5tB98rIAwNsVPMdCKLkSeccou5f zZ515>OEJ;i9_BzH3<@bQOSQrDE z=+l)C))wQq(=#UK7Y05<7zk%x2fV+Sf#cRbdJJ7Yj1PesssM*Il6J)FRV!n@`{tG)o0YIC3l8n5 zO4?>MOS&23Rbg^x+JXOc!)j%&81WFU^#p@t^rH|S_J|t;-+O{Fi0Iy+MfeZ^4M7?V z2AzX^8n-~L^oz(KJdrD@X%?!yF1xTH$fl!)YMBYjs!~2n#!S*u?PEnSABRnF!#six zAPZ)1o;2gM+|}YYxTMi&NVoC4PD0j{JiVbywlZ+OXeM)G4rUDGhrCU zBWfzTZ!s5RglT>bTtL?8pXAVx5fJ68Er^Y1iw+_?8)1`gY@QG%EOeX_aN?Ud@ND zWJdeU>JNj=T*ah4Vd45;l1D$yeFycy9?7meGZTOd)=K zfW$PR8-&u^`QV$nBUGob?7r~zMH-?7CSulgqLB7n9pX^kvi%77g-%_ZZUL4 zQK32LMQ|!M?c#cr;ztYtBdaEnM1_}tqM}4r^w|FN5g`%+*LDIOeJm|M_WDM}q6Nn0 zxY(_6k{3IyBQiN?TgrS~IRRT`2(UWCA^DC}JDKQ}!d8Kit83TH57}-V4#q%!sFu?y zQmG06{Zh=2RL|A{I=$iQD;?T8u6<1$Jx4!6f>>Y$eb+c)Q5;=t802-G1(vQ zQ_pa>GWb|)D2k}(e39bk~-{R7FPTY$kf z!Wne*jUqhteoZm$hQ^v3;7*AqA9N&L&gMA$;DDOfG=}1r$_PMNetzRYi%!!~+YC~0 zJ^j&tpxYPS50z|me@Bx1!Is*q0Lg|-efH1=XSCpqZK{xv?B@WJDJ-S$M44y8eM=}oP!62HF8XrcF39HeA^TxJmnEW4F z8YOg}9PqE!h0 zu;nmV+h_CDh9KTo+-IyoBkEanT?6OtkoqVj`|V0;^N0+;?N~dyM5R?vS_I<`4Ih*1 z9GiH<;i+Wpuy?AhK~r3{yUsUi*0$56W@HMiXv7~HvGpNy+bG^d7VUI1BfV0qfr_qU zU0BYo3%{mC$jaA+6b?o4M6J00L9%WIr+2cQ)ziYhweHC|6ECh(Iz z@fgo`&QmEZj*^;zq~b>FL&^PafdxUmsy_cJ<}{LDYt#%x*V!Z#zGb(dijGxFzNZ)! z9GYtkxM2d06oBa>0{%AAdD7ddevIYNpf~SS*g7eWMr0lt9@cS;#&h??$N8NOxaS^h zl=l|it(WrBDkpEz<8g>_hv0a5{SGCssP8I*_iR_3{%%TlNuRv~Yf`5lqKa;u2PK*W zyOosc&r;G&R0*mWi_msRN25j$B9qWe8Ea|Wm)-|^dE;;K=XLO!;S-4AS{*zH{9_H^ ztZ+1y*gXq%^1v8>7wSW=AQ&6rP7Ox1=1a=3BGZ=_=bK`@x`zn%6UOjdVS{t1Pn@ zL;6^gH8$3A`UX%5bP+X(8Bj!QX-|J$+5?)@Tj6CL2Y+2RQ9t$LPJlF;sXv1e2M#?^13#-J{&K0%k3v$s}Np2DYB| z*5yM1Dk1H}16N6z2ZUoYIy+68onAhvypvY;cIpR(Z~T(6i}W^rItG3)c?P*!+yZ-d zl8P=-;=Ye+mzKrTI0TP?J5#D#Izik{NdseEHFejjrDH|_oE!Lkxm}c!hfxw#Wkn`r zs}l+qnb7@ih=^$EKOp|2woeJ#fZGPUx;9e`TDLP<=xF#WCIVcEhp9_2C5n3HyT!5d zW)I{Vw_FzxJ!NC#x(=$0fA-n{AD??ZhuO!XlHq_qGiG2 zaw)GK6G>9zG!MbSP4yRtTXG0a72yfZd|%+~iy@OvFEd6~6Rc$(uQ3?AP?P(LuuO~( z!nVrnhn(aHAV}}l_0*Cg?i_8(l}((0P$XpOa_YPH#FKi9Ej;-l>qs7Dw-03qu9hn^ z6UuNjep+P+b!KJU*yE2Tb(wSN%`v-_p$F4&ovY4?#P!0)eJ9ZNdi-LF%h$D>3M>Y; z79FTH*0d)3TAOS=HrdPfr?oKI*Fsh{*}3D%COb8!-I`5)vWrC-7jK{J=fsYy^p@03 zH8hu){2oD+q<3oC&o6vGWKX~+n*jHC7HIB>k$E1dwuD^BddS+sCErDwqq(`%C%U!O zSKzNg-mwM|jVJ)q|6 zl4Pt^8{Zst_C;RQC=bFc1Dk~pS|eaf@)|-yrWu8b2pL;Y&zz)@@?dKq1u?(~au~7A zdcdloPeTu;;!YI|oQ_k%tiyv&sqp zsSk42D`vymlw@;#P{~FdN=UjHD-s2)S+B+Oec<#7mJH(c7Z!dzAK_m{+p*4(PP&*CG=@N0B=`(#y0 z8<#o!lYk(dWPaGgKZlHORnxVQ)mpvW$nb9R#ifm?I*^#b>){k&uYyJ?BapDTd|7Qn zSiE=$rc-ry>@A$)XV(95wa837u9O)!Mrs;-)r}F8?|v10C8R*|>y-D<7^{T(8GJrx zvWZlshX*CsOher+Wx5G2UiKVtuR*(Y+j(ZY;LP!`T_Dol?zap2dBlP@`F?E|^f2k* zgF{A&+Aip^C&Wm$UBHA@Xcm=1c-6GttGT~YZwUey)o-C2pKoEi=8mXINLeWF-dp&% z^?PsObF2btQk*|&2-sWrL@jiZ0s3TZZ{Y^eKIqZT7n~;HYo#DgWOlNqN?CpAbSwjZ z0GN0S8i^QFL$C;U3Wnn~ZVmelY368 z?#uXZ!mxSFhHsK*-Enb7DeI95eKu!gF@erpOMX`~MPYUU>R0vW*OH@TS+J!tYDZPk zc>+zkOEifKoslQO!l-XX&}%v2Ymx_BSy~2})M_hixcpT?ojb$0Em>2@}9>pjH|gYFMIYD76m@3 zIJ(jwXB?1;^W&=gi`Tj>&FSbCsw#`brfc(6)~BQwtMvh;7pnE!X>E5C2g=-SI7C%R z(_03`2!2v^f1^p-?e05>tDeyPY2*QYbZ#NrL_A%82Zr~ zU(mDdxKn7*tL{wJ#XxXJYeQlD1Hm1xEg`r=Sq5_k{;vPURlkT2ZLw7Bw28D$T`}>b zu56<#=$Un#%vl1OiFJ_)k(S#djl{#6!;5%l%GSi$t3t~MVF1I8nWSXF=|(F^Ql7Ny zhi0`A&TXy)KV85;dGk~3oZ{>f=l4!2oKiF$a)0zgnK;{uqVm%e*zg_Nqy9A4^8yra{KH^HtLu?+4S@T4a~qIAte z4}yPt?{yDYTf)GwMZj?_4UBY4Ti^{xL+%phH-ew|jSf=+7my8^HDF%G1~X^Jmn1S^ zyz68p1KTn%cs4>DXu|`%>p)|TtbjeVCY0;Ryc`Vm${`FT2{kc^4zY{Q6BiW|F4I4A zBQJ7LjdBEbxI>d_Q6qyCbDs-gT{EYQ*iQ!}5B|fsNg?gDXtaUEaZfA>9(&~^>XB<0 zI*5vmXy%U~)c-2q7;MAv1#~QutHR@9!3$fW|nR`S?GwFjoegjHd7ccKWsKWvo;RyyvAfE z_$_F_CY5pCGVBX(`7LRrnmGT=PdzINBbANQ4Cre{F%EOF9qXUZ7tF9ubB1+v=nHC3 zs*ZXzcb%>+d8~int6+WIDzN@36|~I&EzP&UTo}kfR;H#PxNe@&)<bU=Pyn|63w`Z>Ev#FL1AluFDLm0W={q`eBmuw`*xpBK0tF3Ja8?kE} zL>QVubH+D%T8&QV89)?sTl1WlpPqWIZ1(d+adTlRRNcgK&0eL!bH2f|YH%tvcs4XR zb@c{k-$J;vTGmfO1Yg6)>uB(>l(VWk zq%egedWL8YS>E>INat0F=8qM!t|LR~NaxDO@R)D#s2Us%4QlZxDjmIggJ1d>J#02O zdL0d(^bHXhgQwNtL}>7IXmH}{4SxI!`7xZh zjt0;92G6R&snFor(BRb78@!|jn<$;Sjs|CagXh)YOlXjpv3?$2y}^I}<2dYWB;fvMG@_<-alq6v-r=zEsW8TLHDd`j1 zM&etzy@^fMIUD!H3jpL6!jcBZh1`;+8$|?pfjs9yqP?OgxiRM@=97Pe;Ja}Z67%7f zo+8_qB$(_$re%4(lbyD5P(jI`>}KVHy%SNv`KG?`0+m%;iVAOAV^Mab_duhI&$`mS zorOJIV+yohuWCSOSE;mvc(nho19ceuhO+U*WE;S!V2s7a8(aT|+tCTJP zubar~JcZ4H&SFjP?#UoH_M;^b93|uWA#f~?XzD4TIGoiB6l;M1bGpaD!cqw4voFeJQ z0aF#2I>8gm0}s;Vtc(Vu28xk_D`n(v3n%#U9- zvw`KXnijGG8?JIe1@&sZ@`!&xbVX*BxirxcTyIymaaH7>>kl3<<8jB&YE$KWGpvuz9sc zBad7~H#1NIAeLc(1Tc#F!iVWlbLkqx`9jgidM$Xub4Fyu4Lxix_j`u|KV&WaBjY*D zx9pJt${trjI~p~8-{MH}k(-vHHEgNL*EyiIX>J?E!VX?`{6!~f5;x(M)$`_otuDnF zCkKFxSBb{h2t9(qh`*8ug5kq4S8u7;kWU`A>r0<@ zb$9yk&g{fLU>i4m3iU^(t{SwU8j;NdLjD1@aV~YE&mvuEgQ3|M^;bJu**qXHUks9F zip+0W#QWS)n@cit(%M7ya%hjp7fajoc<9qExx!X7>r_1I6dQLlBlXjcu8G^CzN6x3 z+Mc#LZK?=2eeB7%AQk@l+<5cuzb@UtP0m;=C+%Zb4d=lzs0sf9=H&XcYbQD}zImoP zPv9lQ1cfX7S=2}d@u__7!M%ka6m#B=yyGB|VX;^s`lX0`p(q`GAbGcLE@PYU7~W^1 z*u+mqy~cGb|Jqz-vV!I>F_5srZk49f*=wt}jE{+-`!6ya00>uO_6t2-+R4}k=z{iS z#r3g|Rn~UYHa3;fS^G1Mp8)o-kU+@jv#^Vm-RPd9s2pBqQ63>F=KtfHDW{;I`81H7zoaIdCa?mdL zhQVuo!{j1|0dD3@Sq?;ywWaAW!xWfj7VCME=jre~#z7I*noNQmH6BEjLHlZ3cVev1<&L;fDT! zIKs2kZG5pKu+>BE9mB(wgZDou9~!XkXGtPV;B850p(3G$+~}Vmh0A_sln?Z-?dmP} z_Evg3i=_@iPV!H+%Bt6I0cxm4_|Sb>j(<$Ny@l_QBcG$Hl1hCL{3J|1z=-A3!BKuA zyYFkWP%+S%J!=d}^iQiddP6Y?hg;!d5=2;)mZ{(vk0y(S#!F$UkM7xhl#} zK@-`ubcE2nO?NyMqP5vS@mg+ItMaSHqQ0|Stdzo=O2=A_GJ20Y^Y0@q+z8CmWHg}? zvQFkRZV1Rao@M61G=1pflz8}uzKViyqdG5VqmvE5rF_PgQ0K)gGgqgvVe~JE?F6;@ z8&9cqtIEjB*AM6yGjP-Zlx8&XnJhC`9}c)4q}ipTaXv%5s>nv;T$Y)u!c0USq(6ER zklxH6BXREQLZ-S+ZZJ&-@xb$@9RKC_1bjURlDYwJ6s{q-5YT11sL(Z=&(IK@&u65e zd*`yu9Bih-sfsTFcpJ%oHtUJHm(lB=Wtq7uG;3BmW3@6{<9^rASZ;B}$-vKUwmabx z;>n`Ra#X1B4` z@`L}MS!S+lw%itWKD~4{edF(u%W#+1|mGR?Lv3Sh0kS0(0_mE+uxIV!BuUTfU z3R}eXg%AEPbgXbf3ypU#(@j&r5F&<$cf9uzo1BF|*5_{xznZHQFHp2?FRizqPmq6y zmdL-$d6}o@%U&+Bq-j(o{}_Mi-o08!jr3;H(EW>KVJ>E#WT0(hnMiGv^MWtGa{8Xi z;i!`B4!?tG>E1_Ap%1Qe)RY-;cwP+p+S=+_DnKzOFA<=oM_y+yP0C-Tmy=HbT~8BQ zxsK#|d9^-Ht5ip;)Cxqu7!fE3iM~sw=eCcRW@49X8!xY;gb@>K!QI%_s<{!P4BCtP zH0(6rlR@;N+;~-6eQEYFmuIVA(Smd&_Z8K3MgMTagDN$Stx=&%ggEq3b8O{I4XDvB zQ=%`R2AL|IDYkCba!{qE{MuUnlI5UEH*@eAp%kigGv_eZ0yNjDR_dtbph`EtIc7Pi z(pg)g>-jPlwljP5Jk9gP@XWeX9x7e31mHn|!w%PRE5&(qUMU{NV|vem>cea_{Bd|b zS=RQ$@eVlN8~hQ_{?yMb;>B9j`U-X6GNA3-;Cc2xBF&tDGzB|cxSbs8u z$O#@Tu72aWC-gC{c`S1q`%WBu@G*S(iHeavSAX@cn(oshZ#Z{;$d3rn;X2xfokPX$ zQ*{=^KUBvxrB|(P-7?j2evk!YA1-Rm>M(ui*@HJ=3*_a&8@#eUEqmF$0;=s?|LCje zV|?S|rRD3mWl0^!8DEWz6kM<;H?c<O%AVE!lg6Za^z=Z;Bf6r#;t0LE#b8OYacdW4CH?ex#V* z%E>+dN+Mau3W}Ut(s@-J!~DJ(iu>ANNU=w6<=_z)_H1*mTPqV5THWq1Y%MuxUwR|O z{aPq=^QCVW{d`32vJheocUX@V_XE4VfrlU3!-MzlExetRtJG3)hd5+d2H?ZKGnMq# zofMP5>2P;yzusPKz+iq>OK;s3{Z5e+4>5tq8~3)##O_0~t4OVn-3NJ;TQHBTtW)aI zBTMSEb*@#=2Bj$0y)CdcdgJ6>m+lOC&s?`6t{BjV-3I_FwBf`Zp(g5RUnFjfz5 zt)4+ngv7E!)^B&0KRcS}Y94%vUQ@r#jUG8_KUvPbroO|yC;aT!&pX^P|1mCo6?cj= zHtXCm|3Q5hM6;Iv6_dFt>uvbg0?(UbM>tNMh~0uji62rRS5Cot^hn5rbIh1#?O;!% zFZp*)x$3B2oUXYi`qQFzsQl;fl;pqXUq0bqa!EEigLf|0ElA=$pLp_(l0otl?ZLRG ztP^vn7B!#6!BukJ_cr{N{6S(20hOJF(Y0c;H(;00uVXqbsSI3}%bC$jFDClP)r$5& zubnr*MpR?VVZHnQ42nt_hgjm*xQ~2nM20O)8|&R0|DI=-*Lwc#KOITLxb1Z17$clH zXX*5F>YcKV*O`5ak=M|T0tETNy)ES}h^V84&HCrrl;^n=s!#xNFRwZAml9x*y94ab zrGx62j(g3e($MB;!p+r(+}nSIgF$z)tMwIeJbevI%1W=(p|Ft#QP5Nh?W|QwXf!?H z%2J$ctGEeDjj0s6V6&9AjI2+Wd_OL#9}BtuU9{)z#f1Pg$naqQj;gA)s)F1zusH-p z$59BgW(sutsetE#)XC@i+A^9bL;RF#q#3Sr z>gU%1_fvmHtad<=`Xn!Lm;*NHYKk530DB&VT0tgurr&V4&Tas>P1rkzG(|gJOh)Wb z;^?hjhJjEKLRkEim|bLD?vRy}aBAE=rBBYF^MQB+GCGaVFO6AJ|P%3V)!i({-ePf1GSRo@+hMv>wk>)bs*u^2}3+ zH$Lgp;zam;R;jUwqHzo8dzvgwY0d#0`5c~KYm(=YF*+VEKoit`H!quJr!+!7!Mw; zV|o{$a7F%};_rPY5{{4WB8pR^Dyg$Qo@nS?uGV(5`jl~)7W_I?>H7hj!+Fg=(-Y4* z9lhi0&?ogkfP!BYVqP}#&_%56+K4u^3~5#Z^%**s|pJo4@v+efjb(a z-22qiT`eqdR$qKuwS-WfI9X;N-I>a@+o^prQOC*S^H{((Si7KHb!7SOfExsg4x47;pKlbv1 zj}NQnBTH)5NvcqFw>GaZj&kg7$?>sym7HI{`ZFOP_#WpccDAfX5xqW#wYm`{p)bB#U-^u^G_gm-5h_2NC(vAiCJc;S;n5w=uKYE17|3?&pf>wvHphSziRUCiuN@W`lAfr zR-hqEAM(ks`{WfqdB7*nqlMn0Nw&0?Uhw7&Hmdq{Mzog$YV}_4Qi0DEhsbmx3|#Ffsg@`AHaN(}PM7_-TL#d> zk()d$ePT0ib_!^fI$`rYByj~+ML`cZSoTR-R)Eh2o%?l2)Q-V_Nzx-X|s zebfckgQ7BQL6d(Muo(**~Tm4 zu#@sB>CVD958r1s<(Ez{UC@6E0Dx^ajxoji2Nka!FLD`w8adZ9Ia;|y~x(gAiF?5*a z9Oen<6{Ex&E^GRtJ#Q~A_GooBEc-boUhJ`-_yLo3dvlhD5%5c`vRmwFI`tQ*OsgC& zVlYGQw^T5jIf%9jIj7negWhmn8& z$nF1mA@|EdZrJ>}e&kMq04I-^9G@6L&k}MEDFwNgg(Q%>s1)QbCW$S@rkFPT*cA(xv+)r3Ax>?WeZ0@p#1Ri$ejtu zJs*&JPASNpQVMb>m4e(eNvr6Bi|Qjj~Y6v{oW6yzRL3UWtT9$bN30(t!Wkz4zD zA@?UjZn9eB&e3r<*N)tAEE}TS<4U32V@g5ps8W!7L@CHUtQ6!PQVMd1m4e*GfZR&~ zxfcU+=a)n7{QnH(_Wrz(`|YR27EXZ>cZ&PLa%^EyNCLT+l!DxgNnj3>(2|xXF)pCWJ1u7NiDUbenp%?UDPzw5Im4f~mrJ#RW zDd<0^6!f1}3RO-j1^t8|GW4HT3i?kf1^wf4pjoQaj??zf8~v!HyHSa4UqrDl@8T{( z?z%nb*2;J+FM78&d52+dfW;#COX|JZ$Xr6dbx`s}uk&7%&g=DGjQghZF7}wti$?24 z=fUZ{&5?1lM<32<4(Q8RkeVR^OzXuN)2qvWuc8b`7XG0Zh;3V9X^Ezu{Ebk;uA#{0 z56%&2D(wR}LEIP(;Ay1{;7O$n;J8u-a7-x!cuXk+cvL9^ctj}!IHFXloKgnxau~qH zGi~;n;VtcLR9hm zMmI{^PanxkG_nZX_W% z&;SW>9N8XJC>T6J(mQo(x;wo^TU*`fv$Y!p;mC+ktl40~AdQnnNay85{X5@;s7GYw zMg1^h*bTRf`YDNK==`iw==_XQhG0S|LvTtdLvT_lLvUOvLolY4AsAK45FAy?5FA#@ z5R5D*>PLPqq8{5^p3ZlqcR0TqZ$>s!mS00!mIXs3>>XDM_Qs0K5bzjnua1DPW_~(SY^3;SAb;_Usn?$+^OJDrmxyiX^b4C7I@g0vNuymJ zor2r%F|vzWHkUiJO6=qF%N2AIYR1rcTq)?pEzZzMSQ6I#^(Ai zZQ==?e%<2H$)zi@eMj|P(w$3bkw`XJrP+%pD>h9IdFMLXkT>~j089Tu74K$XYT zl*mFnPpNpGQt>>c;(1EN^OTC`DHYFCDxRlQJWr{3o>K9=<;V=rsi%dhB?e6J!@UpB z;Jh7Amx_%W>$~M7r2Uhqeh=xL9#gXMhE0D=;qG`WOpWrlg{hNq8%>?kcc=IwM^lS3 zql2kSO2O1crC@4aDVUm53Z`b2f~oUL!PK-;Fg2wVOr2EFwM6dXB4F5-{1hF^T+AlbxdLOH%R3+P7IJy>%HzPVU5bJ0 z!8v}*1z}F9kz#yl0}-nIEi%bqy$?sw0&e+8piQZ+Wf*F`Tg!mJ-|DiX_}d)TD2^oC zs24Zb&L2`bFRz#uHXmc&wLkKr*%0D*zLY!A8E!ceo@E36hOv8 zVb;Cx5O}XEwq?Ie2X!2w{Jsv?5SdF<3EhkM4orFN*vCwOI-N6L8m7PZW5D%I8#5CP z7OCEu?K9tDbk{dW*G)W$58>Os`AEVzMF$(vAhzz#l*E>w<~G-eTFX{*Ns|+Rw*B3V zD(i1nmX^{G>}zR>1wgZ_Jua$5>x>~YFjy;=mKaqVcwM-MwI<7zh}|Po+}ThVOm*EP z&QJ#E6~)z{Hsyc6xlRUu14H#HD-ro)CC>!$7Jvr-unmePL}Y(UV=lGPF1k&`PZ#~v z6D^Jm4yE@diybYOr0ads=KqMlXbU!oAJgWTgNdExN>>$7di#?8HERdft>3V5(+z{0 zw=mmdoFbyP4(^E%fU)~`1Rb7EgG1Y+!k!4%EqBp!-jmBAVjADPE6eG=RS|`fR!k)5 zqy*+5o9?v0Lx_ol5y^e>1W}Og^lG}oH!=set-SQVOUd8cR{A>H{*p>_9jBHEq3v5% z-uBnuLg~-5ZqF6JdF90)J4A6J+U81cU3uw`{3&RAZd>V_R$lt?M`-(5ZKWG4Fa0f* z{)M*E7D8%e1OB(__ZQpImP*9n9Qb=lqRWHiUWQx4xb9;ZXJjy&8rWU8T zLa4M~w+heN2pwSAdE3V*2*1i*x7vN<$`N$*6BNfLmmAt=ti1GBRQefhrMIlS^vn~K z{(3ulH?O?-ZwqB?q31v!TzTm~|0t!u)ZX|FD=&WbG{v{JHNI)(rH`rft!O`hB*2$kwjB z_#eKN;!m^}U$gS!-&XN2v={GRdGWvd5M5tFZ^_DwFT9`PD4jWC?^}84u@6xCj<(Xh zD=)nuyzg!+-Lvx2Km8-x-qBVXSNA@zMC$$;VnSH>YTVM+lFJ#A2A8N5IkBp{RP=|q z*j$PVheflU*&s(O?oK(#s+>PPa&MasZt>^rx+#tz77sQQWC&Hp+@y>4c)(vqz+C&v+{(t7)2FR|fy7N6B_p9&ik0YsVwLTo5d&l$AN~}m!3~~$; zbW~W_1q8noHIJ#vtD#;}HB`6j6>{`E^D-IKfNVD)paEe72-5-ClCf+np!kUw;c194 zN+N=iC`KW|bchJbi-%~4LlhA9`~BD6=iGB|cMDmD8C%fjo{zov+Usktz4qE`TT6%Z zmjEJKW5&GFozZk`m9vfetSdUKn%P&b607jnb3M8WY^>7M2OPXSJW18IGf`Led>&s# z;2VQcc6`-D@+Wy9qzkO=wcVI$=C!MWbH0MJx)MA(uPbq@XLKFDDfpVMY&SmbuTT5u z|7}(J^VZ-Q8vH@GLD6W@1_{MnmPHEWGin)G^wSZnk34%=yLz`M)CJMH1UxN1{0qu;>! z5w0H2>0US`a*MJ%3T_G2bJ6-M)x+VCDvS!!Y*iY_O`F17C*RZ<)lOST{*?aqbM?&FkLT)LV~(q*$9lBRUaA&X?$H&t-aY2odiU7# zW9!4df^fCev@t+S(sj9!e7r9DW_mB`Vt z&R7qY$2^w1O62je9wjmdXxRzZ!eZz?bxCxK3mLpTx=$Fob0spbPJ`_;x`OS~Ikr#t zV*64m*DAQ+D|kj%uzg-vu>G{IVEa3|g6(tudeJ}sQI74Wd$D~frZ2cq!nwdue2FA*&q6iVzwKBI1-It<~3pQPkVl3kFaUi_Howj9o_@mv1DK2 zzE1^tc2PTersQL)_w3|kH&6eDMy8Wf=8}?w%JyU)(3Z%Rqyv#s%qy?ZjT1q3e+ zpB!``JxCKgoYIcd1MW8;04WI2y!LEGmoefY=+7f zfMc6O960t9Z<&J+guI?~Qy^|bc(Q{{2rW)@SbgDoyhHSUsE+JpFLgb+43g!XSG=go(8d;_206)Ar**M~A2LtEYuQP* zPz>+nWqrrd~5jI+vviSQ}zlVMqFNrkO52 zMu6{HTgw&E2bgGHZ!aFYAE*9k@nRV-e!*Ux{1n{3ym+x8b1LqEIUc+3bvZIuA)-Y!OC0NA4MADFZ+nrH%385SJv{^iJ09?&bWWQPS zyjn|Sw7?@(rae+Ezj8CUMh$nHb$g^DMYZf^d5&F3jBd8gdk=u?fjVR>s@55fY-DY% zPKJ*_u?RsrDr>2#9cLUl^%`1S1zr=5pGNw*#G8ecEWC39DFRz7URRzj+nNc2O9wR; z3}%D37iX8(i!O?`D{LO@g*RU~Q5*Hrf)&eE@pfAt)C!8WsP`{h@kQjm$Q41pq{7jx zSO~QL??}bED&>4t)vXFiyx>4#TWMKL(HaVvu}L=& z7THJ0EWFmX0Uu$_Bmy6WLLmbH+nW!qNnhgf2s1FfgtZ27%`5q3X=B@))0gN$2C~jj z!MNLievF@+rKT`R`N0d`9hW2U7M4Ra7B-?rhM z4{dnw4(j7Xm8qJYm7*GjrDdbHFH#M$7CqHevg@=nzo;!9kIu)y5-V~J^Pr7k1t{7{ zUkY^b0Wms0&gJ=B<$_mWQf8+ zT^aokaLo>QX1kJju2U!AnUPD3{!7Pm{jG|2dWvF9UPe6EyT3v~J4w>)28-vizRq`; zhK*ooC%+zawUfL3Kf-8-3onSsxge{NwhGfR@)oh)utgzCwu_CEb_h!OF_zh7Z6_vq z01++}b-`V_R~gwZtyO{Ea97bW5K|$O%IBU7l4sYpn8MhbErZENu4(VYlc_`kdwbG!- z;_TC;kXleVLOt_ioGb(DZ>YU`#KgOP76B*l$Y$7A=El&NH0L~odhqg zlhUHm%fkj2a3*0#Z@OeKOYxeAn6p;m>@4gQXQ%mFORFqadwzPlm!HlWKMh)f8P6i< z`J8!_k15VBQU{ztU&Xc;R5^#Tr1AP-pW9-%RljFLo!+r}>m8j4UG$8;(y0pB2aFqe zbY71X7cM405B3R<_dpS`jX)PnKBeS&N~ITN2aRh12@+7W3kF`1ShLa@I)HzJ-`Xi+ zwGcdh84%9st6m6+Du2FQL=a{2w%Ut=iySDI$weSH&PV}V!YDELmw`4+p?ZWR z0NURE;y{}NmBwlm@u$^A=Y8q#?vJJPu@}R^zmMvo->|ShQg(Um$8w`N&^M^ zdaRKXx}tN*8+1zTDZ930h@@Dhfr4QAvJ9QE_FU$ur370^xw{f|UUmOi!G2m_^@`0o zJ-Xn5ItNf)!RB1|GEf&hQ1pj(r!^p=QFd}&d_Y=KdGF+8)SWL5{&ApOCja0Vv95E% zw{@NERIxTb7(K~xK3{(J`J--@R_=Onpw5AEnW)1pVqIsa znxXE%^Y6OOJiq(J(RByPWp*8(i*^05?>d|Ip0B`=Z7;d!#nE*K%4K#PhmLjqsP8)4 z^`39n+18g#zc{+?K)KAWYYk@MSP_MqRCq&gPS_v^Ab;oh+hgdR>Q%ibOn(TSiZ>@R zBY7vLy3{b6E=-_s3)nEjQPcCPmviM!EwS;iG|Zgut@fN%i(?DZjs#cT(L}SIA3vI0 z&{w^q37aHiofmIyKSSHy(PUSxXEfPalS;&MVclR8rgdXRGmu56H%pEtGo0N0t``S~ zI8ZK=L-2`N*Z2Cavmf>Oc3mgkzx&0}bqC62c0F2DINXfe3&*Ump_F#9CDrQ@jzhhi zbig=Cvv{TvZlb(k7Bttx-bYpWk2RLazSqy*B;sQx?b-V<*zdCUiC&Z3{{HUKH#|EEwZg&G*SCZE zxOUln`{C-S;XJtKE0diD9$ZWOej{Fg++4VPHuEbaQ0t~(6Tbp&rH*X(h|cX%_U&7E zJm8%1FN@w_H|8zxG+!3oX16x+)uGX^HFHVJJS^4Y{*`yBh-`39>NoQ09Ips9;BPaz zi=~W=R!A&n5q1iVA$72R-)Lv2?cyez!BL!IGJUt+8E|;f!pbJazr^ee{#dj8n8SM) zYnd#?4VSQf4Rb{kcBhu}t1x zay4%(HLTU2LmUB2T7A`fniK8*?5qn8E50I}B^^#K>ih>IFWpEfFs}mh6nL_~0GXH< zFT$iBOj6|QD#9X2w@Q%0DpM~~=;bljJJJplh~|5IlxsFe3#?3)t1z^jbOp#ijMLf| zAR{qxF_7cjf@ZgN?6{pd zEqYazaQeEmnlDAJ#Y0^+AO2{(8D{2={nyFOIJhx5#KHrL!{b*)540osRea=l{}uN6 zE9f2tv(1x4;v-p|nioA+$NC_(8dUVQqUbGF6u97e%AN=&vSz~ZN_kn=)UOrazrnue z?Ch6CZ{!`!4?|bP(5?hQlK8f&5*x5mYYzYt>sQJE;Hwn90fDs5x#*I)S?qdB^f@iM zoQ5t2h2t*jly&Y|P1{yB$rwXr7xJR>ArRbom8SBiyOp1Hm7i7R=k)rVq$Zmu7;kU4 zxE2p*R?A~)(UtK z%~*P>p{(Hb0T)IN(0e3A7!V}J7CL4kAl@zgusfPy^#Y=J4Ac~0=W-eNBIM3ik)0|s zts>JAbmfv1Rzm!crxp1{@*)TLIUfBdZ^<%k*XvwHRnprsT7$@IK+lhPX%2}Q|y+3dH69ncz(0+Fe9&rbK*nB*961Fet$>5iu4UbS&0r%e?i9 z#QM%!jYSo*ej|8xD>DPxeWV+GE)Ldty5pKb&2nRfW75^0Mx1~7Vb7>0A;FMu$P#7g z$7*Ml!dg45dt{R(3bl4nt+CEQ>>?m@t_4lKx0cJND84{Li6zibIQp$Z5*X!FU=2WZ za_}MJnOW_UO87P3CT&=p*rur6)O9K*$_P}gi|1mQe8&awafIWC5)K{X<_8^_EMBnY zm?%`cq9s&Ql2s;DCnev0vDc8R5G>_0!Y?dUn3(sd6#Eal?I~s&lyr?u-_X1ol9BoD z1x1J_U$jWU0`KcD;)c>fu$*idWsm_F!H`LGfN2FH9>i5Agj6C03~eV3ZACY}s=f6k zv@yRGTEJMX)DxBMF_%?}Q(p4U6Gf6Li`68UZC{@;9tlXF7fYtkw z9DG9M{)%YPyg8H$>5L}ZA!>p{*rI&oA;O5;4As$%`^vsfNzemJMwkVx(AHeSi!v=S-o6-b}J`PapGQ}!%Jkw(P3iJ z-bd=DaZ-5l2g8jGz)~%`fsM%bsS$a>XWDLJMIFD%U@saJuwlDeK>8r zwKJeYn`O$r`lbN0Mg=kpz+_G^uwUdrP6ad$N959oB16scXIydeb=;F`XsKxeYJ}xBSxdev8xv zMHAP<4fUJT%FTBSh199B=(%9qtGNWfq73Jf9Xs1Ov@JaZ>=GbF!#?RJ1W-1?LChoU z3Wfn`$cC5!5x`0}+@xyP@B(b?K&v=I3yeDu2L+;rTky`dA@xfoxo-h4)bl}@B6ZE- z@|Vn+CqqP{mi#jQJb7A74D2avEiDBr=oTjwP)g^u)(XKWyRV|<)9iD)tjRvcMJ~>Z z17S_Eno}I;hGb=78@0*zKI6U)9%m*$x)m^1Z|yYhfH6e(4}P01a$)jOa3Nn_psetG z_sR(s*T1v@SH;?Q$MLzaOB?8{1BzLQAV{x6?d3`^h7 z8$!#WcZ{|Jw`%Bw?%T2v;%Kw6*FeyCd-lGmsm3-57&A2K`h}Da{ll;8SmwwBl|OvP zX?mX%Ie+*ExZDeR<`18nDiJw<`1L2umF$=^fB4m-6z?{a!(0PqhKn$*5m6@s!gZ5`OmFV4jIRYt7bUddJ1k`Lnfp5qL39W%@~Qw zAlTDpd0cgfZwbeLak$JSG6qp{90hD)qIEX*mcISiS-#CNlGE}-I!{&{rO~*Gwv!3l~gFwXt6nS}KLNMq(R?!Pc4UUx z?^`S@J1i>egK1;T^}+RWkmUdL0{v{nP32$Y=u>dW##PLV|2J;0gs8;Wj>{SjvoBb6 z&iP?U1RvwD_jN93rjoj4kCg9;%zvPKe_wH9=I#1m4g(&9DLUlI;vM%?^e5$eDte-P z|9El3;0kvD>iw~J&Jso8QsA7`ZCR`o4mr#^9Ffn|BCAVk@!t3Mg8ts}JuTi{zJIpc zqS-hO5rW=V>bvp%;yc%k@0RbW^l!`ePZc+2(ytHhi#29jjRoLXV%wN%nz|%cmnVTt z9s5orta|fU8Cnsdd@n+j@9!&as8{&QK5K;awSER9`D&jmzVu`%--|5e`^SqL$$OZ6 z$T442knWc4!1V)hXH1=h?cPald28Rj*`Ig*{o)D-*6)_VIuz$neZOePW4U}UESK+} zDsC8?vapR+Yj&{!=(LiLnVsUvckc)Kc%pny#dnwQpY2u*Pb{p|Xqn|*FitttW%w7+ zROl;hGefp^vmm)AyiM(C!YVYgBZL%pvOTTG-N(7SJy!0C_@#U=ektGISKN5P{UFu< zW}EOxn);w|3p2)TwC`lO_}nv1`Cd#@zJI*95wL+LkM(Fv@T)n+uM}K2_49UTi+wcM z#_W@c!zi>#e~Y=3SoQaE#iqHnzw@X(+@;y~3s^nhmG8xO<@=|K8!z3e$cxpzAME3^ z^1b-1eE)2>A}<#IPgyJ;>sI?%xtNGrz86u;_xBYyxf1wXx!NP$YM(5=^zi=O5C z$BP>;+aCF0_K;QY6x^p{r;k5TLT*veBe#4n(vxg08)50T8t+^*i8#U{Z zE&}0Tp`Y&E-ACT?y~ta>f418dt80ixKWvRkzP*Ge(;*8=CStiP-%F3dlyrZ&I+4A6 zFS3{K?<;PQWoln8wV?xe&Ep@oSFrj=k5;HhG=aZgO?PZc*>N$kQa zl{91s_B85t;mM+itN%~R_tgJH`Tp_Z#;c>f-a6G#IaEOZJ8?R42W=vBHdl?Z)vqId zE=;5C>`^@bv)k4v3yV!rV2d$MEzgEuC#t4{+xh*D^WiqKbj*d@G+8mGKs~LHjxxGX z6cpVb^Qqr_7l@O1_EGW8$GUu@tzDqhOcgVB$4P(?b?|V$ zPY!9ROBufdw;C!g% z-ec-A&Q`JBA$(m=>Ygoco8rHD(D?B5ek*0{!McmFOtcjQe%{z*4JUns*?)CX&j!K# zpZWQ~n2qU=mvm(Tnd`=P%lCBSZ_D>j6*tnAz8jBQH-^qcrY8atY<%dz1SSsd}fU?(OB+N6Pm?e);~s z;zop36a-j)q>F-ki}wx%?=9a`+1=&)XS*oS0I%V~i5k$V)&geJ^HnFW{k<_d(;GjA zKUr=-Y+AktfG5iLj~6$_IB9cDyXnO+%pNrKn&{&>L1_13IH#JazimwvA({T?m-zAFFS%ANw)HZafx(Cib=TiLul z^f1ekk{vb3;E5@v(KN)MvX8QF@ugEJtGBMfBZOI}N9F9H0EhUsmag6s{VM)gz2yH&hJd$IN$mJ?CgveI55%R?n|%Af%6DrV zco8_(m8PrDl<%S0)8+fK#f=koo5y@vymH(iG+tT8?Gtv}zS{Jt90i(3U+ty#OI?=T z`{CZ!cbD&JeOLK@XK|xGD>E0J`;z)(*(1_r>F6D8_IAMvFAwQnI(AbaIb)eu+C_^( zLB?6ME8H(4Wk=jELXVfbUu?D@eW$*|X<$oQK}KpWwc#GK+DxMlbRQ6U!GlkAA8^zg z5AN$ez;VQbPjnw}S{@H(yARe*q%BwdN@A7Vud4|WELy#4LMq9FFw7#Y`{rjRlml4M zUPBb6`$hVxn)^kv&{Z$Dx1$qj0%_8fz6L~dYO3-w;V2_7wc-WLC9?5Sj=#aqK+MM|BJ9seitCvZC?RdbTu z6r6`fxP>uKQ})F2$zgo4`cJABm0F|d5599ZAF4=&J0)Vk`~{zo`n9650zXCIV~WAN zrJoatAR6Bl_pgkmxw=zd#f zQrOj)^w(SmenTBFCVhkT;f(t5Cf+H^dLSdLbTK|vB?`8_G^0vCUy@O`Q;SY(60te~ zi3HxlgYJP&dU88Z_4D#?^VQSAV$3`U=Rg%jPZcKv5eynPB>3nEo}#B7`tLB-+8&PV zWoAbCerEQkx%`oeHhYTx7-sfT@HG|el>(huHEI!IMy~!Ay%c;y1s6y`&fx{?FQqAW zN^gOZU0nIKQwx;SVLy*fv(V6|^H7&ehCy4QFF&P}E()Ys$bfxiDOKo8mk6dPRc6Ip zU+(oV?h}lT8421KxO)ZetX|Kus4Nku@;i4 zl>@9xO}6-GRB2`_8$uFeF~McgY19Ib6@}3;D&)mdN)R)q$P4{xsN}!XuZjx_B7Fz1 zIt`yzOP_S=Y^x&v!&X+c(5gJNY@pWFe`a3{-fmyyEsfI>Mx3hawj?W%;3@VVSbPxH zf_>1&7XZ8Xzz|I))rKXJrF7nAOg7sImOtsSf`zHrWryH9a!3UktF1DeWdgBg`ppQ- zISN%qtct z(wFOq6CBu2mN+PJI~G)9e@4`yzN7j)`!Q2L`%<~q6CMO^y4V(drN^zWHuQ@u z5j6{;#N_(0NEfpOf|ZiZ3?BOFIL;7E9n9>7;HdRL7*@LJ)3Ev~*?Xq$Vrnf+8@8Fz zOlj#GFVt%CfyG+p;MsUv%jv}2g&A%QnzA)r4yB}qZ{5$LwD#k2IHjAeKk>&{k}hD4 zPpOP}p%LkwMPIlIOZ?fGnLKg22Ercel*a;U*w3U-W@{A(QY#5U;| zvtQ$=!0ULzY%@v@;r&wLnek4=<@$Of1Ypi>0;qbJU^1KXnZ8Cl7J;3HZ0yvaNoK4; zqNU;tn6Bpnx2F1h6KAmJjk@N?e4~hq8Wl^7fg>)dI75?aNsPDIu%rVM97vSS{Z*01 zJ;;u5lnKXf}suN-^# zY3sxPXzbmG^7zSN@0rl;isdXsG6d3{{^6uMnKLBz?{UpXA&BUCCd$6cJV~Z%{*Hp2 zb4q@|{6YS)HiAOo?=Ad(bkXn0c_+L5`wPFvEMKJlF}`OtMe{a-Lr5dby`@j|JW$L2 znpepuu4!F44}9W(1`mAVC&~jJ8&y+H+9~ZHMA3&ho{){NV(t}F2 zXRnj0uZ3akb*G{kBQ&3C>*~&2hl=NN|7H-@>rvg(hu0gLW;-cGIo4ZQmAlUqdd6MN z-KlZzSV7a5fA(3sQzR~Q&ebTwFEwi0GYQ={H7XakN(ffUGriZH`omS<;CQ_g=sez7b`C&_v`y!F@s^*7#Z@aZE!&_KG{#BOJgyfSev%FCKzE z;Pk%bI6vbrAUtojY3^QRaFgIcH8kh>xq$vNwBm3~(1fa=x^JHLp8QWIJtb;IX_Rk3yvJQprX4{sB>fE%x7aa2Rg}KboDvTe&MFz2JAAeRFdouqe(W8Y$R+R zZr|4P)ueW=*l3T*Brtg_dPoXnD)U2|$Bdf3M5Ar=86>htqF}WI5=D)K^B~rl zPuw$-o-W@r5}z#JA1`j4gTx_1F_Y-m+f zB%}2eE0!%AO-5VU??>$@n|{bv>9^VKFe`?-`?%d%j0WKipR&6_Rvh)|DR;{j&p*`dp%eDD zDOY!t9eCRA1~kq_*|G219V7Azy*qDrjL83*yOS5}ju9DN&CX4KSl}=sk8^iwx7}GB zV3f_@Z+DEnf2{`hJYcsBzE`Q5lXl18t6rV^OWoOw+2(6DF=+j=%d5>C$K}2HE~X@dDLf5g-VLQkG7vEcPKUys~G;n$1eP89L(qV6OH*V$*OAgY=Yxx10(K&_TTn7p#{;w$VjH5vB*%YRb-S_bX1sMhLxto7!)bf=?7!Jo*|Fsw zr2v0!GOyRgKxhY1%XanvQu4*Mt%qfsWRE?EB^KpeoP$fK)_~%)_%8&K{I#xp5={=1 z47<@l!`c1MG2g9d1|DW@_6W*C+yCG+)yg`1=lAZ>FZkeVXYCg}@Gno)(9af7lWve^qr-A!4wNL-xBj0%N z%b)wp6Q}jmeTt^`INN*|+rDBIb2v;vAon-s_Z#m5KQ(qwYAd5)0NS>g1?Vy9%u=Y+ zKN;1CVcqu+4J=DbQ9XEPa)_#eydYo!4Jl51>$j!1*Nb7iwa*Yjks#9Mp-W>6-x5Abc@)d#yaV=@!U2(jx@$Fgl z|CDgx80FhaIwY&DmTBogY|=MGOLg|Kq`5uOiUDF zVQiD2>^tl-JfTBaA^J`Z-5q?V7U4jfG#tl@6(XlNLUdiUnZ5EjIc@iBwG(CGb}UW} z3pEEU{9DWRV7ZaWLj`Nuai2(}ri2FElTJiBVLOE@ci^zDZ3v%^>|hPHDXydTP2i~u zAj1hmkVLx&LP-p}Kof&>JUUAaK7Sp?YX?4udYTqgozG4NCvy~;-Vz%x1{%3Ucxu*W zZTevWheIyuL|PZ+F{#a5mor+Q?_d?E#rcZ(^MbSlF~_wF7Nrj1%Z{9pu-OhVgpOFees|HEcZPOGd!HvccC`l(q^a#Y`*9ltx#hF#9x5_jdHwQ43gYmNM z?GQNS46x%)!t9x^?=dS4iw)nh@DOTBW_|$I!k`%1eD)FXF;t_E{G!Nc?z4~F4D0sX z^F7b#AAlcyC$~4h^2@=Ge(TRa^=IdQaP6iz*1cM5&arvKr?iQi^N3cxxK zF(|GJ_P4&>iUyrth8T6ZOWh1s+7GA$#=dqj>%i-o z$I5VuAx53g&}R47c*VYE!l-l^T{GFPfikvwTTTRLf?7^qDd(Iq`M5Iai0jUW!4NTZ z0zQSWx)^wDu%QJupwY}3WuWmm;2*Eq>ZZ0CggzOwwR?x1N3=cL@tf@tKlvE~3OUo* z#zFcHjVVM9vYlIL1;{;Osl!6`TFK_zj^|+)Vy2@qDUdd93M{>qO=1~oMRVwBgN)!y zdGsXUSAm@rcs3ZY-ghS!51ALkZ8F39S}0eIrcy*(8FqH;gJv`f+Q!5k2S;NY1EJto z+~em&1*cpq4jZ)yNhFDkwlq3Z?0;J~)G^m#y^ zF9be~Aqog|whlv$=K;_H`@cq`o39E~s5sOU?IIb9IXyigBM7OHn3fw{r0u!b(1>_3 zdqYXfr0*lv;-VDoh@~P5aRRZJXrYOd4m`G8K9UPhuaZC!kwCdY)0d5xhjwz!QUaxr z!ybVGzDkyOfuf-jneryebxWdXuS}*TB}!e0)djb%BhRQhfS@>tn+7qSA=|3patwN* z0-#)|^esZgS7o8HjCeglB|IY@*Eu$AA%EzTB`9;9D8pRRN~D)lBBg}B*O=CNJr=x# z(gxuSg8bk6v+A9M?TbUh+lWRJPMm|02fvIwh5n=@`j6!&?Gs{cd*}9lE>=P`faBzr z4s`DAwdT0g!E4xo$%D%JHLk2*h|na)kqi~8GEKFINb6Eh$B9!^(@gd#;B(T_qZ9MvRdIs=64j>Xl@rR&^)C1%|^^Y z0Hxz%aU_pis5HG%p#ck<_WEjxrYnw;KZwG*mK9_i;PgQN)x!?Dn?ML?NNhxIKJiTE zlk7#0kvJPwbT(+IWY;MA1b^q+PEBKh+^*KEN=NxlibDLNVKoBUX-VaXJ(1Fo3X#<( z^@@l%r!VRkr>~x;l+@<)WG9#zk6s}j7>{m{b9X%2h=#I?V|d4Zaf{~PNH#YVs35p$t#L%0$8+dR&>ZYo!-QzD z_VyCJG?s|6v?P(1jj%L*Mhg1i`4N2v${k-8Z-yg9AsD5ZE>oXnvJ6WvE>||Evf-73 zRZObwV4&GXuPpCh|SY9 zW2>cvLV}!A&JA*JY!U7ysm4^a8f7ARKJx$%7{KWOybc~6R7x}}!wGL_hs`C<*77yg zQ=2}kU8q-s6xh$sb{6A6N@r^s&2mW@StFVmu`ptmCf1J;rf73PE)ytOEiG9sF15QP z%GoV_Yy`v0G?5TcL>oKI@VXR*0F&&GUZi?$t(*(W)k&tDtyNHINtqJV0}E{}M8yHw zG9YC|*}}K2QcG;4Uorl?Sm|5D%Gp|lSlR5SN34YI5@*VUM40@@0zHCWX zv)*r6vM^n2Bw)B;C@?E4A0hGM$*^NwL}fGBVZppBcA)`>+RnoeIui$5fzfER&H6Ob zs9~g|;0{|Bjk0;1+TO_lFP}*q;ps|5Np4~c!N=puO(^39cQ>S^C|go-?BohzFUq}iakg{CgrW!e zocf}R%XxRB*^byh;)V+vl-VCqG$t_QtH zkxIstMMIH$4&=w#K{NBtEyvEIOvbrK=V&H3Cyip#j+#H*g zGgC2ioi;6%Y##&>^=}# z57CCM=hPO}o^kilDc#Rdp@xm&P0>-^&rzH8`nc{+P?heYXSm~D(n1Du-5`7i^vO&z zp~}_sgRH@NuxAzI;9$g=e+X{UN7K40X5uZI;wkqm+Q(JAwTEkVd_+EwFu4#rg|4Mj zvzV~~HQ*WHnnpErZfZt&c8Eev?9#|hbCSDAifWp(r?7zUBYd}tqf}8>6(o`;7iIFL z=JT#%X4yn`wuJfY2oF3q4lNUoU>?ls?$0?IW#$4oX|ll8)64j3imAjoeyG%Le?6e< z4#?#1Pjk&q)8cxa$#IYF&UkXg)7)`?&ob)W>wZt`_dfS~Gakp;Wf2-z|J3Un4Oh>M zOvsYXPD}6|rx{!vb339;%{O$k+`?9OD8(mb&J&x@j%b6?A<4LBD;8p;G9d2}JuR4P zPMSeW9M~pKL9jeUMoB-)M-po?V1O`_LAW!Xr-Jptx0l;fA@-PRMBKq4%S~=kjocNR zqJt}}^K;AT@PQSkyywvh95JIf%2kMf>z?4Yiu zUC8~+`|;?+h|K=%=nB<&m^$w%*C~>nQ9Z-I9GzCZfO<;zIG#@Gevj!XrkAEalWC?h zq)i{~44YNxI2YjD5|BPJ*bnh{cz^Op)iWkK51r+<_=A_nmm2}4kQpKibbw=qO0p5! zG?Y~DHqd+A^?xkfISgcmJ9-O_vLn_$y)EHRgz>mLPM^l3lgr&y=+yGI%7DuQhBsph zbjx0k%{{t8%^8PPZ~;^D+pr2IvVDe~<2_hKmE*(JlvgoQGkjrwvpIND7HwBrH4dmQ z-Z)Eb4h=Of#b$$sP29U*uvQDnd%qZhFC*JP0;XhZ92KDGyd%1@@O7B0NNVhX4um}< zF#^h%BzY&H^ulsMHg+FV8?=y0l#;!NBTUOJrKpT{7Yp?veR_VG zWVd(Vw9r@_qHg@OdD1Rq)oY?@fKYIQQ#U&WQZc5wXER3b-N1!(NriDeuPfu}yP(j9 zaQAYK8__ZxCvuHA$C3~)JX+qM|N|Q4}IB9UG4>jHtIf@cQk-i1XAuGh&`>pKTc*mP|h=Q%&AIxEiF6YKAAb z0BSSNcDe9IOPyTw-#qWGCjECt?|?%?d7r+zdmb(q~x> z-#0sNcDakGyiA@k>rc!)9_?L@n&G-#IP^m1$5!{Af9|#XtV3gk@>o_u`Gqx$d zg&Qunr6UYyQF|Gai+Q}JKsXcXZ>lMuxG%ZOCad0fs&5E?qtgI677p?z8TG$LQNq&r z%l7-^4Jn^++*bqcX(sj6GFt1(1xw6PdzD%{9m$oVcCfzc+Ne!>7p`w1k0_Tnx7SeM zNQl@bGZ8<@cEMjZ!K&wo1+Cc=w!s!p*qX|7qHyvj)D#>EIdUw-1Mm#7nMBB>16$Y7 zBSO@&P%*OF^R_R{YD!>aiP={&AB-EnO7B+dS8`Xn%9sPt@7n1S4X)7(OF3wrLyM2- zWEC&d&-1@7OaD_hW}^&CIfls)H=ab0I3flt&Be|1<@%9X>H zlD}iOHzQDl+eaIBQNw-zdi>4ZcWFbjOe4`uq{Hes3#zU;3@0?zGX#sxY`o3vq$Uoo zhW03`dUg;u*i~F-ISpfD@^97nSw)7NLFBZ`KTlxat*n}?fdwk5k?Tn1SBI+L3(Cz^ zgs{4f`zSD0O`#^|zu8yRaDWw9)wHjImf$*DL8H=XB7_Fqs9l%xCds=09{#O}lk##~}~U zjUvaQ0;RamIQN{tQ~VuH=pcls(r-v2z-OwR>3hjHfjUjg!sdNQ#vZOS{26xPSUMJ zxy9Lt7BJ%M=eF`^<5m*saee8lhpO3ioOF>fEm1SCTSK}*Fnyqk&k4^nbCR3e!$_ML z0q(gPrsyQM4mJ6l9F5x8&5R3@rU`43)PTpfLBEI1p2<|ymmLG5^T6U)V;8cDy8wnz-gTEILLNEf4Be|-E zzns6KXKURvYT_9OSLxX`_6$*l^*cYaDaLTuu$ZpW^Dtd47a1v|QL`Mw&68}3pKIOE z3rOfS_A~vNP0?F;iF2ShhZRv?evROrJl>%FakO zXCO)9L+tNU^0_aQ@_f`HYj95U`5=Re8(CHnahaiFsap>RM~Mk2UXIyH5i~b?y8?Vr zLO;~heW7I_DQu$D581b{_<7l_ZJBC2qeC=QWKT3f@a;#DTxTkv*!@^iC$N2p@uca7 z)edu2*U7l2lPu_sWE>DDo^5Gr#9ltJCqLE)L=A3l^L>V<^Xr5h>*qtT;z|!(!vY7b z4XBv6h{vI-Hy8MQloPi|AqUfw`f@xvg6}lt0Y%26!&RpR5FpMG+d2f95S6AN(n{Ts zuVnybuV?KhBHu_Jh3T~v1+ckDAt`3YI(;xmHJ~c_Yb1xk73ODny@HYNMC! zA!(QV0S#!`WQU?^Z9y?#MjRGG5=A~CYTSgDjWTVfiB{&Csa9>Y;s(MLn5iXrEe-0W zqqSj1DpcJ*SLveXd_~rPx;6wgIM|OHd1A~X?Ykqa%Dh+EeWkL8T-oG<5tQlzZHI)g zya6g(8xOt6g={~(lOuM zkVa|3tWvxLi~*XdL8zwj;gAJxm$8_$(&B75(8Go;H(FS)U_*d~sV>cmNR;s&>GTp( z+l%}Fjd(9$KrOTie*=yI8STJDM&OMlPweVF7Q35rK{nIT}KT9*fPRCqyO|Y!av0Xrtg(zi4DrDBuBQf{UE`&})gPw*@jXOHU zuDI9!2UZY@duBcu#Q8l#g@zkdi0#P~HjydzJxkTVmu00OP7QI#psJtyI;d7cKkRO8%>Kbwiv4w zkb@B@>E*yshZ4b+>@vXj#MW!qJmbQpn`aF%)LtItPNufp(`qC91H$*-IBz0O4%J`@ z5PP)LgdDe4i70H8t!IbhoOHIeg#oPe0NU2GeccB@810Rnq2bL6XHPgqjucyP3kopI zF4}R(onfd-#J1fi`z1yWP?{xEmnAb#Z)BQqs9jxHJQ>6NGWGBpawH#45YF+Eis<0P zo^uaKJ*Edt4UV}7ZtF^Cl;dJ?@k+t-zCA!|DIal)C~+kuE7Mi8J+9eCtbP?9MT(h< z=@rx9X#$_|x!CV1e$VJP4ezw8S@EJA^aD$KW3-ujBBeq9bxh3Cm&UG{2*9RM|7++v8 zIu!CtZnE(Xqg|M4yrUC1!uXrg1Lp`k?<(O7LO|RgDC*QTUb&_e``JuBQ_Eiq#*Mb? z3vkt|S0Nmx;cT;_{CIMK1!MYEF{7CE3b~sz`Idbl(g|AUfX8}AHbhS3Qzk^ND4Dl# zVJIbVEJcI8nF|?vcd8p$^fa)HI$OzSNhe}e!v&E|SFvl9KacpQn*v5CJBiNHSx)A& zCVJH~Hfl8BVC(lb+TG&JQKU%K7XDJPtWD4FeN8 z&Afc(@k+kRcDD`TJ|*5_e)TE8%)1Ns4Az^)FqwL`S`>I>C%f~b(iV~~QJW~`_(YDDy z(~%9~Y%E5_*B-YG5IeuyJXLcV32g30SNN9Srr*sQ=LYU`*fNK0OvSercUE*RIAd8Y(e8qGwWd4aHUQw|DKNB4+hhQ4*(W?qesDZUh7G)MG{~fLG-k ze=KzC1jVK=@7|9#r&>MAeoqemWT~YBeDX3twXGIgH2bnX?$1;6<5m)U&YC~I?3D!b z>3=5*8+kl93mkxps*(>rkKW>!iwBzz2>2#MYt$tdWWg#=NW+P$jBt4YpWQ{~y46Jj zKSg|Jcpw{68?qHpj9^76(pY+t#mO8|^dh!>f;E)yu*xMx zk%CkFC$Gf`$Gh8@32=D>;mj6~&+l9OTwHO<{wG=U&`$Y^G!W+wPrk{+nC%;v_7D)g zWWJFm7m50|e!&MWC{02Q6zihvuqUOu*+QmXvhcw|NwszVCBnIdr@_t;APvd=!88y5 z%#fTD74M{}6iyo3CueIph9?G*BA6Th3qz_x0P&-AW4Z^{bl4n%ab?4>1WuR*Str&>5hHge$#W&S*7t)|QU=@4t znGHF3%@>sdDC(*oUhFMG7uZo3z~h9c+}Q~fkm}14YNt}wHl_fb1LKwxR53nkzDow* z4{RDVDhqwC8J*i&qN?mH*%XE;zJa<-nwF*@(Zhr`iGpB76lB|64+YT?ge>a>UL0=Q zIwNN2TjU#|-t6^~kIzG<@Ml4m$fOYZ?BkwLZwv@eL}bR_EF>cDTu?PbOo@W|9=@K6 z6I&H813hA)-V%W(rh&z7Yj>a!HPIPD^D0Q4h<5W%=AhbJK5r>7ri9*QX2HUBses1@aZHLUsXgAxvM6i<7ITxPWWM=`=3%D4fEGc!oYbns$q>h$KM9bTd z*a44=zDDe5uuQZi#ZG+~R38vKIn?EUgyj$(2eKStj3UnQ*rzDqd>1nW6$Ba(=8~tv zNS=yaLh>A;STovQERQKH-xG>nFY!vgcOUExmmmz5aY0jG8vA%D{u10vX z1%{E@?E38UKOp(q%r&iVugEtbZ6;QgL2RM96l901avgI@zotbJ(Xwu}pNX5~AgR1qtPLQoY9OIP%FOfw`cbtb>& z4@hSeu-;K*eNapOcReO1QjZl~&{W%d86AZ@YES;XObx83Xl0`}2>AJmPP~R4S^T;Y4D(w%v2a3Zp07zb zf(9P*m5OFQ%n#!8Vt~-%u>2I{RI&96O+wiTEoYlM~FyyUSyFjC55<=#i>!cB27qFqzTJi;k(Gu1+Cjlm{N&L^#=<;zDX?z zXfXqs`bn^;4S=xd4Q*zC;zhQbbyGJOFB%`RzkOSW4$+%F!?&ifF*>Zn*O~8ggnl1J69(7V4-~ zG(Oj)(d%6$Rj{lKrU9UazwM=lMUpy0ji|UXDOlrm^zNzJ0%c^5bah%`Lkf6Hw&3_l z&^Nse>C;7F9!tVSTmegqxeaLyB#?)tJE6d^%g*3H!`0@gJkcR?!R;9Im9eeT*^rDU z`1L5-$g?we%DB-bE_FCPrmTvR{sh>Rpp%;w>phYOg6<(nW3Nwj|Qv zfOZxt5vbZlfFzZHwpo-cP@_gowq!0!DvT(p*d;{C5zN+Y^EtHJ;#tKvt$r{1>bi-N zE$L!A!H^5T_7(u{uknfO=wIfU;kiGY-=w(d6;@xhd;p|YGpq0A+1ZEQZdTueaTL34 z=StD4Ol#)sg6DB6B;Tp!yX&It>pGzE<~!v3k$ac7hcquFsTT>6n1rQapEr|e+$JS_k=q;j=0+G%uBHL`HOWY}8EdMx4p)=R4s^?Fx!`Ifxl1~*$y%a8 zwT?~wDhwdkfSmmJNpR!iqLN)1C!e(Y>)KSeKDgeNJ#Z9$%62GX|KAimP?IF)KcTV9 z6ijhikZY69fN**tm#2P_z!gw1YdmqK-&kwL=lG+aR4xZQN#qK0IkSa+(*^)> zEjnz;Tn?YdTDx>E2lvJE&gFpSQ0@$DVc0Qu?2+%GF;{nd*63irZ_5LbBG%A#yVKNu2We9*KWk3R(v>9)QhV0A4HqV&*bf za70o(v%=5@mhDGdV5AyCU{T@)?$Dbc3hhiKidA80s0T=<0}tEwM>aEttu2;f+PSrU z%5ZDI2xKsu8^Hj_EEeDznywUbbMU+%$mM1&Fb;Xk*v4FP5j)Hs1N1jxM&O(EEpk+O zh~dTp*99k_oE9P()(lKFiA9|r`t@Qrf^Ct=8JEVS%W^Ne+zi@SF22D*KPsNk54o?o zaEcLO#$rqoUlU~gTTC~txbscCBZ?YsSYjteg;7cT+>W!^M?R9+u}9pqI3?+@JS0)_ z_mxg-KP!b67PP|@liwUc2Pa#dfo(w0+VV@&`xU8`A~+M*!;kfwIU$-j;YiLxPSgsv zy_!q#E6Q+hEA2U|(nt%O*k0PMkLH#V)Q)DXGPAXC97woAMj2Xi%khv!1B2E}w%`Wb zB7N!DxGi)JT3S>sL5dtNmC|qR)F_|?K`8tWSIL{y)hYg$o%J$El@4zp#qg&d3n^HN z!WEiPk&*E2Y2u`n(3nS+?0QL8CHrRurOH)qBa=4DP)3{BExQ=C2q5P_3M^fyUNc~K zKY*|FiM&v>W=VkD>?i^!dJ<(LWQ^K2k;SrmBf@Q6Tg7z(_IHT8|75YK=OrJFu3={q{u?WjaK2&hK;%{ASXYtWv7g`#ppv&Wgk5ie=V4NgI$wY$o(P|EVnAlGF? zAJgg?QFDIYS6a3`22zS0VLVAuPGy!Cm7)W6poqk7%A*BLuZ2isgh~)bF%kK3R-1r7 z5bbUFrW83#dxHdM$@eC%^RJ`wYg|Z|dTmqXh-y`rP-$hw)p1rA6L;|LD3Zmtrw~V81rfzSg#7cO~C5BQ9UT28uV3%M7oUn&VEZ-P~7g(89W7 zO$)|IBL&*~)mmpRxiN=C2~@mDo3eh1x8Sc9w?REybC1Cy>x0`Ba%F4CZ`QrN$fW>(vv?TWNebb4RrJJyig2sY=azwGd-vJtAbtk3#1DKAXhlCMM{T3T zBSQ1}!Wuuv)=bC=Nirts3vg!)=M$Ev_13AdIH^$!A47MI%ZTElU?*%(U0A2UArz~q(GVhE9I&xIP3xq;fUhe-*JFrbv(^s0vE z)ae3QAaHYT(8~p_dA-hnLwnPE(?q&B^W(|?;k^P*5}#o$TiCyKOV%>oLh62E{rcmd zXuo7h7u3=)682IHfytegmLk6CvVL2F?(hoxRFc;l`u7{dI4&tQ#&ei_B5uR3@FQ#) zWcvS(SFdTkXGL_CvyNv-k1zAsO#MN(E%-Wkw{~ZC@ww-&-DYEm=2m!1hRm^G*?34; zgnL{8cLX6tICLli>v768heg&hi&4byFskD6o!o$v#sAzq*&%Y@o+`<=_SmpoDAP{W zbF?fA-|TlKG+N4%gW~zimfEbbCCt*4gEr3wZ{2b-nA-mzCLg|=00N2A>(;z7RIA6%*0g4d#codg1aaT)3Xa6y)6tV9dwa;>aUe!*DaPm_Z%~C z&Qs+cUH?7JZy3CYX3;8V4YkZbov!fUn0nOwp%%jr1`gfng@+`YS^)bg!XqxjB+>>M zTf%jKXxQpO%v*lyfQZKdWLYmD+O*gM2w|T;ZIldV`!HskwwI`VRjOF|_SM;`gBI7C z5fBk=ZLdUcjX<}N^eWq+Q}Dvogy6W*!)%|NBa|tQ^#sQ_;unso`d(9m6wG0#8o(nM zZ_0=h-f4cKbfscJ{WwPbhibO@L-G5vfzpOXv(|P3z+?}#ygkI8)qLSu_K;HuO_@Bf z_r-kb&+lnA!K|^P8h~n?t?WoG545CAtcZMC7QbsgEu6i{KQ;%eijT?}TJvzXBAhou z0W$o_#KG@&k*!50QfLq$RBV+F#qib+>jkC+?SY)aD2*L9{g+28Uq!SCJzLMlgy1IZ z49NpAkPd4mMM_A-%B{hveiJXPHtHq&qu6dNx$D&33|xxQ@St^)g~+h>==?LSe2o zVFU41&zwt5CWH|ji?nFdBGQNnAY=k8$r?8`-1G>+O!EjaBJ?5zUSa&!-b0TN9xRFw zo_B^c)PoSWA&|SMz=&2v97fz)N*6kPuD3r8ln5xQqEFo($$@lCLkheC57>BBBADCTS3*d&mq!q8 zg7h!VCr(gUs$g%p^ZwcX@v5Q1dZTDg)|hNVSB?s8l!i1TZY;*)!54h4Q;_HMQ6)Q| zb=ad}O2A>X8NLoA9D(9oKXe1s`m%ue=QY=Z$EmWENLf9qw1 z1t~+&JgQ7 zld_mPW9+{?8@&C$@aMP9ghT7d{2&*C4%;SR-+DC8gBu8PqFcGu=WIQ#=E>Hp%GMJ{ zOI~z90di}`9w4w{phFd#0s_5v1J)=kjlB~XLzvb!laG7;dHavCg4dXQ9)ix0b?;Y13XLz!T>Y>% z+^Phq5QPah3QWd;9O`(cS%As@+p)-^Q9;MW5R+h)7WWXBl z#b+Y6(iE)$!vQncRf}!6rbeiEj$H<4Sd9QHQ9+x>;4G`t=8-Olr3wrVd6sHg){}hV zoMs|s<;XDv3vYl^De@h`Iq)@860H+7G_5mrDRV|D#_kO7PG%a)Of$xZsGu-8A&;fd z*B*1SPJRf7LaaUnVr;+gINwI~+eq$&UvnKzcygA-j1;nNpc%I~V!FXp%ncS$No+es{vnLQ`P<F!)5{ zeg!tw_6u5hlm7fh@*W+lD6bllz=Jp2KB$Uqg6y_8*VEqda(lj=Lo_L?&stIRawU7( z@K9<1jX(=;@h#v&F-4ODF5ix!zIbrX9u(G0-lf7{Qgq0gb6e{dH20redIy$;q>F~9P%M@ zZ4uoY9NwV_R6BzblR$eU-9~;yAwPrC1jIt^8XacJ5>5#U8D}hIEoo&n(b7K+Dmp#F z=|$;ubWy*r<+U&f9;;hu9%gI-O((JSR%Hh{zy=N4UxpH85UgZpC6dY@ zNO=xRw{s{lhHtlA^AaBd$IPPiu`odNW>&DV>6kwNofT8)kefUce#2>EsQu(21Y~`% zR?8(+Lq1y|f3>?|$&Rk-2D6l3Al98mXL~kk8a!m8>KH=CbJsir!plQjMx;J+3v7Z7 za-uP@Do#se{4s0SSIdw>gpZNCzgl*N*zUMkV$5))k7B9p+qFOd47bDmwo#;LvCAiF z(v-jFR5lI8I`&iHv&08$M37Bq3uR17gftr_1iJ;VQMC=0ZzlYcMi#*Aa+=vEOp_Mx zfQos%u!x#d{KkURDJkCp|?vM!{X&Y zB$NsiZI2*=1dbGpVx11#9cBASoLu0cj51_ip&&|GU|aW$?@jE9E79r#T-Mr38Iyh) zrnVp?YeMUYniDn5t&Ss3Q(fc8OJW=06L5AFiEu3KAZgLmDzUriWW#!AvaAhk2-=`F9b8uTpv0ykeOiG z4bx>4%n+$FZ+~D?C*ES~zAc?;a^q`FoJn?7cF!($MNJpSZI5KLy${B+_dU#k+{f0s zfZA)bFMh=Z)c#VjG2F%s#X!L=%*f{(nvt(9X5@$v-4qh3p6=q3k49qu?Yo9b0Oa#Ay39?Z}s3>k#YZkn^b*L7$wB=jzz4#}j+spr?l}nL| zU|2eiIK}PCZHg;*iGW25{8O4sBri<9X_<`6!GWE+_GW?mSNQlY?wbgJsgGMSh@p)G zynj_&o7o5e8bUO%@A35_sB9R~xR5IMi|rq9QALny+s(uwr&Z3PdyRs$@@3w-wsKJd z2M^YL@;*93hz7Y+{2DnHs(i$Ob_}sCir-vN7?6g>>d6cy`})AL`Mcfc-JWUNG5TMy z=CNv=JJxBsoNw67Y+m;-nrm3N5)=#<$c-v~f1QXBnD{r{N+P+Y3PXz)uU5 z57yCS9+b2U%9uvFID5$gPcK>1Le-g{6smO;@D0G`VDqAtj^B#h zyd(8Tk87S^MS?aH)lw3m-sqA)ju{PQk!xIY6B2~}E2!aYSYy*G^HwlbHU$4hTR2fw zH?~blDcb>e8{0stfkg9c)a$lH1tjl*WX`;v*Q1_cP>OZ~68Jfe1Zqnn zKi3u_n6g62Sk#i|2~VoU+`+0kY8wtPL?It2=<+O2yWO<@VzVQbQ+38TGg{n>FAv!A zj5p6Z>98n)(;=qlSYJV;Cu=6Xa!OI^lnZ?A({?ad>7tPrfTB3Rz6y zz95@stE=wGe#!(tu++>ARFv1Yc%J`($X1zy&IttqonFuNf2sq*kvWZ*n@{g2C7ZiIcy~?{CI9pf3)gVe9Wh7sKKa zieti(>p<+Af&*c+SrOMR!5&>JC2KOLLSV_70M9fXsF0fWnk=Tzv3b#fA7V_yfKqWT z3zJ;y=teFTc&axG6PlQ}E}8)(TRbpAh)z7qpapxNzCqwBU0@NNId~xCMyB@HEgFxx zl9(Jz%JL%Q-EIB(#^`5vRFs;0w&@v&mfFi9Kws>jQgW@u7$1&84YmxLkcM1=98R7o z)z9+eU!r!PllX96SK2>s!y&$3AWzy?>!LFXxr~wK#+8;!flNi~HpQuYS0`1~ICFXG z`frTBHvKRb*xVV_LOy+avphE0QQBK6(qp<_>dLf#E^b*A{KC(|IwXb@%7mR&-w&<^?|`AicaY( zFjogW14l2;rm?VW<1ZEEm7Jp!>Ce%yz!gJ59P5nw@!4jl0M>VhbH|zhUU7g|3c&Hi zMDoO#eo__O%DJo0BQ2J7kVrrStHvvu`GUh)iYb{SrWDs>5&9mCGa$+aA{8K9F?ntbzQWl)?P<-0!KhY3cM){Djp7kH_-^pTu3vh z>$*+R9$lk#0JkaHr4k4~dl-`+imwoWV|owEYxPh3&T^qKx^N(=nB!_*sWLmk86PsS=+GJBiSB*RJ%69`oaa2}`FD~Jb9P|!q7Sz!)?6*xU;QVG`~8`G zP7ZEpw!%gf*Xl6W-#`JJ{FVNEQ-A(if0p&yI#iE{9>j0%(&_|iphS^TCviJa` zmyly5-(oy_!s7&%0={DAL|eezC?%Zxv=Iv<4#u4eH-h9O7NEdEJ=bD_CcCa8vJux4 zz$zw}tV4tpBMzhJ<5c8#!gh8HWQJ5bx2865MJH!{O8%K%wjCjqP6>ipU5kn=bic7N4XbG#Ty-v}htJKv?-y z=y+`LBzZ?QHl{pVQ`V6=&Vt8+;&7;S&*jaoX6D1^#mhl^#o%H+HM$YBpK4o zB=c_q##up#veW6TE^- zlXjeEDMIp9d;vPk3&=BFPd?4Z6A-;@hIJ8TCzK~SA$t=U1i31wsmDV?!Q>j!*5~Us zI~Gg#ia7cVt5mHLIkmGR-Nrdl=taU2jI+D;j4R}LYL!+((P_QW1>p&3_)V%;Fi|yA zB;2foV?JTp$^dac2MyWf(JuqeNpfRCw=2Tb&bsxu*1dzvDPH|}VA4?YLaBG=6PfDv zS*#8P{go0MmZLEy)H(t~m%mP8VrVuF*1>Uiq^pJ{W;(mV z_oE*y?hCXJ5oB&sXA(4iqz8@Yw2!d#|&9fzyr?kQc8F>)iD84(Jp{b{6t&B@>A zs~FnAFf7;#Q(Sl3f-vn$B*tT2Wd0n(H_Ck^b{yEzxDy(d*#03dO58_w*1%@KoG%A8 zN3(-IUY>ymbiF{evVALKWt^czBczNJPN?N_$gLgco@JBBjNk-kVhUBcGk%U}E1 zBx@#nN;(t?x6g}<0a+Ks0bPm4IwL&Kvy*C1EgS zvYKAuJzJr{KmFLzB#XKmq+SdPk~gzdkOLWwOa1ZJy`1}E&D%Vqimg>h)IIE?H~f5CV+m(6S2xBy?q`ei6T59{UVG3gqD zaXaui*}GM)(rq0`GJt-}`asZLo&wx5rwajYsgVj~lU6lzo;@F-3?OgxfY*%GQ5naS z^*x)?I8LbI6NEBIlebg=&Bbk4^Km26Sn{mA&rFC?&v3iD``Up40d8k7?-6QpX%z=T z#tM%~k|P+(KVo|+hY#8#v&$GzmYpH{6GrY1iO~Nq@I&YHK$Cw2YhSn6tdgfhw4pmw zpbEXVs(SZUwe|}1?}x3*cqn)97J>s~7PsiS=Z|xS2AiJW%K1bNKXLNb%H)7dZTY$D z3}lfK+@6B^5jte3DArjR1WLjye+29#T9NG%Q%zA+E|ft+DjAgnuvC{;*2@Hf?ISV$ zfO+&k240j7V1T>5Gq5*cZupY?UyksySflTP`+@Y$+SS7J8L9<1{wWW$W^Al!Ixoz6FIQ=ad2lzrgd2zDt0xwj>K-$UmG7^3291ZKm zYR$PSF0+xOK^6&|vvr#Wdy?s&6x&NWcp$o6Ww%>40T-$!wl(=kMDV^K`6%`bxJDA? zhk2FQev0R=c&g2q$dUZEO}C_Z6y|0nKyKm>qb}@^NaCbcMCZZCI=k+U=0&e|NjaL~ zy|6DlE1ISAjP}fVFonNR*WaOD!qalHk+VA?vKpWVtnvw@yoUyv;{ygvfyUnGe{A!J zJlVF^WvHn{j5VE9$))Bw#Tlga&K=P@erxrmKXf9f#ZId{GIGbhg9)PSjTF zJ=+7IJhB zElsm4pz*jlBJ_&GHW0X|p6&i=>g1A9z!@&4i&u4mPKx%V=}jKmDS%CHa#$+Z-jd$z zzuclPHVoaOuvgn;IA?YOPo^*G?m10)0GFx`>!`a80d?>v^|zlfS#z8YT& zTlw>G`cxQ71OPX>`M!~b#d^7s#?4WD1UZUJ+KD`bUT$z05c`LEs`Q9csHxy3CkIz| z#q*pCY-O>g^z9WKQ=L@+%ad5b;`1GMupwW}WkC@P@s64${&KuL?BW)mmt`Ah0+PLw z>^zM5SkmWkM@=cPVL?wGAUNm#V%`3slla!<4+ zZ((<8qu}Iuv`s8f5Qei`n+?h0fAC|xU2I9{kMn2;AWa^`L~%m$Ym&n45aLRnJUX!| z_EnafBUHMQHLtFKs;IFM6$e%1kR?R+hkCAiBQw^Mq zG!Kun$acZ^iN@#n#KOb+%SBRW!sQ*MUijmRNU2hbSr&wVudcPD2U+~;$eqR~Lmo+WCv32{@#Dut*z|kE@O)&hugN$FwLJXY$RuE?!K&8uv&0-9NuL zJK=_?CP_JB-jqb|>4Asz42Lpc3{}GDfaqmEZ;gEy-;L zcpzw0NckTYZE`?OK4I+3bld6QU-%@{?O$e=yVdRgiI^SeB^J@PJFPwEZ0R0DlmpN{ z_QG+r3ARsJv@sZIoVbj=L|OS0K#XN%`Al}W|6p<%Sz|8R6K4=6ziEt=AcLT9CufG- zb|B5Z@zyT8g{Nm{-_n(EOE&LwYz@?_EXux!WkdGV>^EiL@aY|SRok;~zOAd|w(OU> ztJtpB5^a1imcnLstJkV(vq+JQIGe6pp&xJ(TyDt?5Q-+o zV;heKY_P&hAt{}0K!btbPI*p$DIRGFOuK`3%SVuubFrD*MEgm$pXj4vgkh+DPyu4g zWS=u^b|A1AhY9d^AIe5CFvxxecdtIVg(&g)hj2v0q?I*?>o50TJd<~Lu{h_-991fTJu(X9i$%wa!=2Smy&NuB1enmeFh-i%8s~-UKeR@iZ-XXB<`}JcZ zN{0?w*GKgORGH?Pz3bvQg2P2Q-J*N~FA9|SfFg}c*2v(nL$L6;jqLrTGLK6A6}?Jk z-pJlt=A9-l%`5en%hdOfO6Qb%oL4YHukR`|{1T}QMXB#BQ>REZ@5M&;i)HFBkO~Zx z_ZX?ZJCk~KV0>Gd_pRh*%*y+gGW95_fK;hR`p3*fHF5JYUS|3nnOOc@8X9_ZpSP5^ zyA+s9P|H$&wvr+SgvBXN@T3-LV^{bvUhJo2gMQv}kIGcKk)3LeC2xcc-V3D>Fk*VIIg?A2Nc11OaINp*^lRPZ$9Olo>o6{La#{fY(Y$u2# z2ohlJLR8#P()Y@sndg6Nuq1gt+K}PH?%)H2WY%-0F*2a%F(bsggAX=XqAcd)b^POb z9;?ecg==5y=*p>gvTmR~w%IGVSpvhx;d?ZC?+rqLhgsQe`nk(pKHkVaK(cgOMZ&LJ z!VN`&^duK?%F-n>7xcul=mbxp=h4mXxs2z%W?+)vZm{u^vbL?(ciup*ISaaIXm+Z6 zUBE`mFUzy&EzJ}WRTHoHmVrbdL_8}ef2iz~syQd4u52ch!x@vjslb!t3ttC;{WqzVH{QPxYUwe4MSk&s5%*D({Px_w$wah06PU<$c=ve@FOs^`GT^ zRL}X%>bZeqWs6>I`2vcc)7OWheZiYu_o531hW)tFJ4w29I#+vQ5_B;A>W3Z!oJTrqPpn&TUb_r@DeOw$`juv*qil@^!|#Y8fWV zNgOX6JHl`3^`b60%H{$GkcHs_ljetu37aclr^{)aDPN~bfP7drNbKIoKB8AgrXOcz zZg#J;q>ev))Et%@*-1X2)l~K?1wso9rvw&Q?DHNQ#)Tbjg5w2;5%DSec1Q3?<=d?P zHszl)AfxJ=bkFFB(tSPSAg#9X8ABMvT!ht7-Z>#Nl=(7G539Ed=}sF(VXZb~k<-Yg zhsKhBvOQq#hMd%%z5%L*8<&NzHUIq2P*mbc475h0U?hxW@7~BhbHiBj?mAqg_p`|v zR6OeD)!0QEYb9K_4kf?rtIy6`4=zEhBKir`q|3Z`11I7es20BD2Ay~%5vHy4x@lbR zu*Fb8?L+f7ATJ{zu5T9Bmhi9C86>|N=`dtOZH49qmMiK8#PG;g2bLM0q@37_X*w50 zx?eN=f)JkIS2oVOxRMoX<&!##Hr1_N@yymvamgutliT2=N=$7P94Gn9Wv3QAcY3RX zB$gptq4PL@q8HZ_>N~+xRN0!=yy-XJX*^^IpKNQ?<6B3V5>BXFMcS8+sL<#B^-b_8|gIH%#UEB*(WF65$qeg@_;|n zgZ^Bp4L0cD1Ed%}+fq*4$*0=v&yl1B`t?^d%YF{KK-glPikRqrSu=`C>G0j@OJ>CH zN{Y}gvq!G*bqRJShYTP|F?&S?kw!gam&5fXA5^J9{hqKwkGkI(zB~L+dVa<=?@e(s zgwoTA-Srtoi2D30x{3up@E9<=xzXqWExr67Ur&Z(GUqdnC+raVNL~|IYlCkgpJ$P& z*g1NFAS~X{?v&*tPL?@NI_bf(l^8OgU%ZLNQ61(OXrKs=71qOTXpzaXi6Vq=MfZOX-pz$nNKZ^?R z-Mm-Up(ya-{Scnl_9tJ*k_mLu9ew3A;Bi=;h7`LfCt{EZjxmyepD{}VCBV<9w_Kcy zpd^QVwoHv-c9*yOzz9Q-I0~}L=LreNMjP zP9qYhvGW{1u&kKnBa~ey37+GPk+z-_rmxdjr&J+NsjM+u$)RhLcSI6$ZI@7S?6R9L zB|`9V( z6|YCj*Tpi!acVeT?d%K*GnE9@1l(-&C+VktAMsDrikNHWqdTTiOy|F*^$R}m<9b4% zKE_kK?iowp8=RGR&1fCa=O#wZ5QOAAj(WP%IEKyj)%6 zYGUr(GE=@V`Q3KD4+sL-3}q|-vw~hn`|^&+Xa{I&=Z10_(kkfbWdpf0w^9qHdDfbc z+Jq^|6;`K#kd#<9#ZquA(~KdmY@+zh#su|HZ5aG;rAUz7?(lHM6lAApUBZu?2%9=f zG3kl~skCAGx!FV(Wh-4M);?}d!Jbawi>o=n9#o??3-X3I*Nq`oQ!TrsN6 z@F9Vph7!n*Yh*(NqCq|s^F@q?-(pM|o=H*9%ulwH&lu&vv_LoT80Z0IGnwxK{&|Lo zEJH<3wUgiL`nC*#gE`Z;-T`;EXMR&;ILPI3aO5>nNm|2Y@yEda&bavQT$oN3s4Nhm=G`lHCoE&z{*c~|) zObVC3)RBc?NqYmF70#yC)xg*ws;4D*&Y)wE@ojYml@ik%B#=^N<3%bfU~aIZ#OX$< z&^goSY(k;K<^!@=n)1s!=mjitoz)5ATXrV4uvj>Gb(Vam_68Hs65=i00W>BH>Tg$4uQ2t5A4YuzHD zhgVtzu+*ZFO(2kMp)nO`Aw5t5jUu}Q$vE4VZb0M$L{Et%h?@%UXUh)BKa>gvlK*L) zT5z2rp*3{zr`~lCtM?QTYqH826xycK<5JR2h?H z|KSj%0vD7Whv-ST-DWB<0?BGBki^>1MClkn40yAJlG9A~aZ{V<_ZFzZ=Q)d-7EHaG z#cqS`-7{au^hB3@qRHXRG(cUYk&OZ}nV$)WT6ft6OYmx2C0rswu^5URmo1^pA(;H7 z6WRP4XQXl3*;Thdj@zcjv5PbP~zAVri)-=dITwq6-6KzBd>0 z-Yil`{Rn+OE*AY*9MFVjOJq%QSDdBfwP8eL7U ztZMSepv^~-i6+MfU0Xl8PLmT<+Ri>10+T zIaTC5HfVm1l!1gn3|Q|vNy>@QV2Rm~Br=s>7)5ygN%MOJY3f+=t%AJ-&6K<(S4WY2 zOrZG*^KK_vS3(qm?rFL#rw#<Xg1>A#)8sekXZ8)-6kP`LT>=0)C|u^LCKg$j%Sv-w9*d6mWp7{I-lvB z4fl$o$xf<-jYGiV>VW9M-z^Ktc`gabwHF9z`KeA#&9Dbx+s9|^4f<<`Q=yLGZ}Qm^ zjM08^BHh$dd(^l#8lh9&(OnT?H_`NP0E5BmLS%B;wflkKh`82~kR3qo9T10dV!GH6+Oa~1pyB?sC&glp zLu|{=*X?M(+m`V@>4zetb(6(fGx;qMF3ya}NA$wc6mgr8;&A~VaIY|pqAhsp*m3l- zJsD?w7`$3Gu4e(W8yvTs(G7kjsxlbEh-83^B`{uMYJqB8ZZf#okb*NxzU_!?FGrAV z^+G}+wa%$}f}NKX{C-Mfj~5PV!F4r5C=PR7*78$ixN|(UCtV;^25;wJw7!J+!$-C8 zdw(E5P0ug`4D}Sq_k@VUM${QV+6lH~!AN*Mapl1};y-hTHnEL&e>bOcxK)}(WJPmm zI$r6<`4g#{$Mz!mgP5glqBDBL%v!ewCH)Nh|HBizUkEpN5J!ITzz_$#%pTXcU+8^L zk0KZSa=I@UsJiHt?$`^O{q{#8!UWyZruiZZ| zxM3*SxM}n7mK(NiyYVU8cieRIE$K+-sXL$c^kx zvAXEJMbS|*j3Rp}i{9&t-o_WAK&&o$Pf_$vGTb?qohXamoDti1vU-o5uBILuG!Y?Qa-%k!A!)1?^g!lA^cub_&`zk0dgQP%`TLM5BS34@Ea1Zt|^@rr6-hSf-GPEaM5<=i%;s? zWRK#2(oeEHcon=Zd$R!Uy1w5RpVqhO9>t$u6rWL+8M3^+ES@9StiB<-U(@-!i{kPJ zRw%pdo8{Q=_N~w9+gy+0&nt@0D~o(X|FJCoJYU>wI1#k3Y5lIE_<0q_BQd*DcK$A3 zd{N&PdlcVS6kk%7C9-^2t@qii@AJi%^=-LF@x4Xy6=hi=%g4*&dwub7Hl=JFN2=B9 zs zW;0}Y>u=I}zqS5cUtBJ<(z>iEen(MU9<+*#kj?)F#RshT9lrRSzRmS0{+y!tyt3fn zoBg(m4_fi(_~K?uxzMBd?L~3o8!+!=`MLjIfbMo*d{N&PdlY|mQQVwDF+R?|pw`{k zpY4moKiF2jX6(-@ii-g;ztyvXyes}JUtCOz7>hNpu5c%pVl{AiPse0Qxu<37SuP{uc zk)rr1WjRHb4~kfAwBkSFi_huXT#w?ri{kUjGEbI2FW38SUtB)Z*+P%v;{V)+CNWEP zo-FSw*ZZx$_@cfo_9*_tMe!wNSt84LS^S56@nwC3f z^2Nt>Vi?C|clE^WDvD1i3+lh@_eyfH%NL*2HypIr6lWjK=UrK_z{~zo6Su)2KkAE5 z>)UjX;?F3G&nU|bSyr^(9j$wYFOCxt^WLNQ(~IJ0|Co2Od|vbJia*^KpVK!KR%<%{ zw4(UDvY^4r-dGlYnlHYfZ|Jht6yI4CKd&t3$?_$wcSkOE`r?cFw%DWiQ;XtD%Cbb3 zf6{t)Nc&V@d|BU?dlc^!#aEOCWqkH&wT>GxOQz$CkL%2bqxhQ6j}*lxlm!KS_C7)0 zb$-MbH_r+@+}9L`LCfb|#Zi)G#|1i9T)eqBz-fIW=D?cbw-m)^lx2o24-0MF#NFbH z&+6N3kK#8M#ZM^<+R^OGYTaSo&A#}YzRmS0ep69=URmbJa#XE5$SY=vI=`TA7HLjF@d;(YyD$4QVcjk(exolwsc(}#if=25o3{s^ zAlaKWc87J_eDP@&=V-X5b;Uh$ou5&b8M1suoxj;y-|CCc>f3CO;x`nDye7;^;wq>+{Nj-a7kT6?d&~@x{?nFz-EzBW23xU0Ki%Wxt>mcaya~ z?29kz+hUL6*d_SlOUi=&F8d9&?m)NM7hl#l)JtnXhc3hyUs0A7vOKQV9nx;{#m9Bh zA4hMzn$F|&>Wfb(3yw3{*Z$#%M1vw18+~z8gHQG-o)pEUWmh1k?4ifsK=55F&SeK~ zz3ba_kK)J_eCspHGDDVA|A*EcSIL!gR$LYW*=&#E$Q*oejQ*K-vV80dZ^{pi8+`FO zeZzrZ&Dar9`Qr1+GEbJ-c|eCBD`V#hP`BRoZJ|eTQkEsMd|I7vSnHBsiP|jd8`}Ie#U-He>s?vU{b#>bjvX0|f(dSD=4W!55!WmMO9?o6z4GKiz9>v56E)mJS))cc@lkjt+CCYY0R-6_^Y?#uU!$o zgh;l+-mlTTiZh%u-J36$m!^~1ppHLXU3 zmN<4=;C)qMT9$xNBMRqO6hErzR1^- ze+2Zb{fu;`can}Iijs&=@aZTNz#yqrM4*X`OBMNx+uz?JIkk&SU!ME;a>u}SU6bJa zX^zwn1pS@*-KewW{XsA5ikl!MdUrRGq*S{(d;Q%+O!M9ACV`k0NHy;!YbORrI#}dM zo@0GBa1-lPIGn>ocOe_J5y@1Cp{p3H(AF(W$y=DNo&ZPWbIp&LV^i&2m^C<*wkZE~ zn{q|@$Fe~tALtT^PV%V5uaGt#uv&~4XoNa|C1iO`VcpO}cb#B;piJsH2xU&- zW@yC+Eo8><#i6h%zt&kCoazjLMrA_4qlT%3b)hd`R+<4e>O)#pQ7CUSeDik6eV_9SY2W8NI~_Je1Z>q!q=OxK4wY#!cp1m*0Tw4vYv50g-1Fp!FENj zhOClb!q@(eV)7IpT~wX>Qev3CTpFT4W>KRKtUCqP(da(mL|qgh`J~r}GL_?DhcQo6 zt0w}Jdc|lxK=E|kz3(S<2qtW5c@oiJn0!pP7)pI&19UagNK?HxV99jTm90??0##KB zTy)_nSF+`8A*ij}>Ju!O#BIrEN!b&g2@M{3XQ|o%qpC-vxe#|SruZhNSVOH#TFPJO z44CK!$$BFN`XTdD2&AAF&#+_>&Kl}4{TVXa993AHk*suU^o+GnPsV_5^6`)lew=tM z(0_;iU{)2F^bhYTn>bfF20>U>q(-87rj8-iq>2?2Yc<;^k>)G?eEdBjstb1}>V<1+ey>!FmI14@; z4|LzJ*D4y<4`Fsmc;( zmd16FJWG9&rH(|LAW|8YX^vKC3GNM!ki&v>Kt<7a7mI{Vi>kSE#4VDxu!`_^y;J{=_aMeq#`QE-V#G^Jy$Q18C8!QU5!}vevz;N8qw{! zNc`$>qjRf6^d4HR0kU&~6ksZp3hYf~ziXSZtt=zIotl;q1_FKwY41$I38s4ON`i_~ zgH{rJh*lC);_R~Xl_Yb?w8ly@9F4WLzbkYMHuqOH*x3o8wKqspj9AqIhdCAQocw$HPvbJ(1cWk#YnwDsmrU2)+iL006oBm1GpkCJDg_`rp4J zsyohW7B&A6qZ@)V&(43?+fC~?tU%!@F(f0PZ8E0R*kykb-$@oQ{w4hK-m5PlWg)L2 zC9`z^FMsxvA%(~yIQXzM=|d@x+w2j&%snnB%Me4mBYG82R*y4h88Uuj(3@T}MMHup zCa#5v;*hpK5(*>6q3i^uD}E00W7Xv}?C(P&wCwtHeQ$s*%g})iM%(Vm#QUhq63yFt4nYs?WsGfu$x6vC2e4XL#noIqlOf>Ph2&uf}Qg zC-_Uom{gImcJxjWl;5fg9j#v1tu}&+0VI)c% zuc36Kk-#DObmb=h*0V<_np^BIsTlKt!ieX-@Qr#hcc`eW+ROTm!BRx<6zld;b}fwv zN}3XggVXF6`_iF3;jvn$A!k0E2oH`#WN$kW2FOea2Z_2XN5GCDh(Ov-E@GiXxC0dx z^av6^RTJklt-afVL57H78{%D5j?)Fl7e)=%)o+PXF0jldd9 z*qJlrw6QDNfv=l2@K_q?*bL)>pvev%RDs@#zjIvs@aJtVhGEkl(7gw)O)ME8Q-j#1o7e-o}>^!mZOM!pYdU3nR~dJ{|$pHhL#q{+!=PLcfSGEH;PPZD8nCyzeuLi zVYw6wr}`AfUBFV`I>V1b%tx~bI1fvt!BKOgyNr$RNBc&GDGs1csxns2v^q2V+`LoP zLEl-r1#&4LU8RGDd3|h8jBvWo7*o~&jA>53w576ViaE92EYOC)eyVoFhyo9Rh2_p) zEsY@go!G9PcR(`TTm5w&Nw#y3FAf|E5HPYigP#FF3iDp$|Gt=&#?^Rsd5EDOjvL$P zQXfbr%E5hoV$Mg|PHVhkd5O%NRf1Pxz6)@$Vq}(3IPtI)ETM4X@w7gk(yFxiM)61O zW*2{ga9Hr#<3hu*rZMxj5A zF$1V(@=wUmuY-yZFrf*v-)Hos+B5aN!8v>16MUk+FZcrQ zy8i`>X|7rZta=8(o@0S!v;B%+i^_;n3(RI-t$8$L3Fgg$X~SknpU~?WL3{8ig; zZ{VoDaS_h1L<5`$dn^mC-`ebULA)DrrYm8CrT`Wj*x<7(*!b?@JPJ$-^ z1QY4EGa(2iM4W^%l{ubk6?2Scpy-0;nbWSH=XqancktO(b)L_vF_Z@UJ4Z^z&POy~1L#=f)AK3Od7jXFQE-q)=Oc(sLC?Wrw{A8<=m}O-U4yxY zX?25OfZ8BvaG06v+6PdkkZ6=gia>JceGlgi)A1m1EKWJ2pmw(Tr1eZmpEwJrtGx>~!j zqRqG#Q-GlNbxbX8PlT^#XYF@Bl}4Fl@NTur;N8wPaski*3=aV4nrXTk{l^BP!3QIS zC)8cj>&UlT*N|`TV4fh>+jszRp$2DkB4RjsG8BE?40N)qjISs|zRhh08R6buLQ)^c z@<`%Vv4y5u0|R`%)QNtxye4~%EB1~bdIg6RDy0kaW2hUI|aWjvEd#%8)r{3Z^Y#A zYqOd}h=^WxAZpB_*NNvgAsYu9{gZe`nha$9g)Rd@C)a8!+%wZw{E#>W5tG#=;xo2J z(3gXR0U+S80a#N)*=lpTc9$50WrJk6cf>;4xHl=zYy?g|77#g(ic$i{X&{7h(8s!5T^i8F!(BnihhjXylxX=meew;ba4KR^Fa8ok!e5BFr751T_Lk{j4Q z5qMwRxHyGoopAYzxCmN)?W(a^A^P23)i;6 z{@Bbz=y(cy_`Bd_@bRv=Mc6zZ?t-|ki7lbL`O!|K_`P88ykuBeK09i3OpLIKl!bZYhasx=**KAR3VUL#>{L`zoBTq#_& zJIujp`X%}5DNb;UYMYW*^fcL=B>L*iqpH;FB^Irw6<~opRD{O*xJeRx;Wy$ibwko} ztLz965FYLHItMo(uXD(YXUCR*(Qbg=k^Cu)UyUm?*<)*&ZcJ3AYgd^(%&g^XN<~|g zqwU+Q?WTPc%{;2c4epwN7Q#hoEp%vc+KDjva#P=N2emRfsL)p~z;y10fulyY(wyoc z6oqo62}tQ6hkF~eDqs}?P17SeW&j*M?m}-ne2@(+RhQTFYEg;t_e_>+E^4Nc}E1* z*Cm=nNiIwTYC3_w0Lrq-2sIaC<31|A!kq+sAppH3q~1<|cL;sI?!(~q^*)u~ELhk{ zC}GMEX?8i5wmF72b?I)Mi4(HB8+Y3RGm&M<^1^$VJB~42xckyUW}ARtx){?(F>ucO zZpW=@XX@&-cEDE_oxY#$qROw?F3+h7?dt>)Uv>w#bBP->Pw#GnB8fPUP_H8KKZz>e zoL4CszE#O$AWW{1|4CH$vumouB@o#3TNhzZg6yK3YXgPoL+eE_c&))YR}FC5?>EUt zkCxp`3Mg(SwZAcyA~x#xk*@i)yN2xuKXATPuuC>J%gA_7@i(R z97mYa0X8ltS1{fW?afF(SvMeC*@&!;>pc?n0Bw1A51mb1lw9tw-5Z1yWi_`4BH=#S z!j7y7Fz5I&4aNtKAa0d48$&E%@YOT8K8V!ioNwh32KA8jNRm*7XmB3F}zoYBOT+^P-T&3}s)aXeE_-*%L-?UNCp) zo|sCBtBuDf{UsuRKiEH#Fw;TCb_N(BYwn)r?6NIj7|!Pj&u<6MwQsOz3fPW_bXq^A zEsTGyjW3s6DD+bhq{Z=ljsS&Em5=Sut82ccz`!BSMl(I;nE=1_ZhCTVA zJ80;tVtN5ZVpqL-tbx{LQ4I^&wW=ldI zk7heTvl|8LZ4S-SYoHl^6W5Jqko}%$Chph5YAZUQ%)K~tXm`xcb-~|OIlI-?M>(Dq zh=&D{M?5G7yHwZq4dNjYLA4IzJ&@y_mX|NphR3MEy(iz$g=KD58p%(CfJU&Y2xxrY z{RKX84hBfC09(57veQ<8sT9c~mWiTs>RcLgJ%T$p4%r*bpOQD~hsRXIOa`3?DgtA< zuoBTP>ga={!w@OnlVcbirx*zGC)njdPsDW!-3XWd8uD=S2u&A#6);(<+HY%em>$68 z!U|(nW*35?;%FE`#LdEy*Lpn~cP=KYe#S+;7u5EU#T2md6L9OUu zQ=(dMPjW97fe|vts*z2snq77)65i)nweAA_3M!$vGA2UfQD4ofJ+Z-vQ|mn#32-e#GnJ!y;~@-z5V=2J31F}%rh`=gZPDEFe*>JBdGercA=z@Mv)PUo z!(3phI3{9eJl5v)MVM#w1)f4^Jdj$Ev|-2$Vgg!sxqwle!*BsDM{!UvMT&~Ywf4%y z`Vp;lpF70u7G9-Bj5iR1Pa>a^7$25)iVGfJhvEerl<<%)_gm8?U$bkzt7ERSRRCz< zcCf881QAVL8qKXZHvYE8Pk0pWoOWxr?bhr78fuPS?vS-+d>rd_96Q7j3;Q>9w*DeU z=*zZKH14c-h#lXxUBGl3qG&&X66Y+BJ3WH@YbSr88wtwEw@GL6jb1VNIs!rl2aCxk zietGtw6Gb!%?1a0-Lu-Ksn_b({TZEYkFp8b0?%RW_9V6y#Q$2vgG@WwC2MjbdQCe* z5x>#ATu9}oU3kxK5*Pm~*U}_Lpmf+jNp3IA5bW=fyS`GI>SYUA-FC zaR9!%95-WT_9T*31r~u}RNz z6{Ps?Qv~L#n3J(Z9 z!YuJr0rFf$k)ls)$U@OG$O64xg(AJlfF;5`K;x!@T!Voz?{H-t zqD}r;J(Uh=eYyjKKf7`+H4qi~JXRn+6a_ry8braE)F zCnrnnF}cAxHrB}QZ7fpBB6Q!N<6>C?9N*cF*Ko0Raw!?uRQy8D!h#M0;6N^qFmAy( z$g8rju3f|nUE)=p)7b^QypbOpp?=UI=PYKxN0!hnjX<7?rZV%$;Xbn$h9$WcJ!)gBR>o_7_oDk_=fyvx7f<~&wgX+*!HXZXSI>8 z+Fgs)&WrIdj*mmC@qAbVH@R@p2GLir+@9s+!gAMR`>wm_yg;DZV^~fT?88>-U<}I{ zi{;#GcuV6pWSoiI*O`e zd5&nS7r*IfoNa zU6ZO#pza_C<(%ZWL0?%@H@D&s0s}4!4ox6M1Dy-HxIqmb>joz^9Yok6Aqos)$rfJs z#PwQBY_FO;n>P`WSWJ!`i|z1N;q$7f4t6`%#YL_)5@WOXK)!&=*|X)8 zz0|?F;M}UMXa?z%Ka=OoS}}v`E+~zoujE&AZk1)>Iz42y#-fXBIt{bNlA=d`P0`1E zO{p+zO2w|SrW~`TL9dxL_&1?+&u=ht{avhE-7H6{teb6CRo1OlvToO*QlRDQV%@}X ziFE@HugbdR9GBY)b^rWo5&M(mxK3W1<1z;72OL+fA6tx>UdL;Vv%6_ z{9HB5Z;VJykj$ls5k0{QxHMtC49bc=N@%7~DIdS=}lTICnPTU67 z&tL{5-6Mdagd5Vy4de$>c~UBH8DkojV1;pQ;=F-su7&PZ+60a@_^~(<+#!ruopQr&!JEs9hIkBS3R`>?&D7 zQSKf&6w#MCC^oyHDDzFzcU6^hvL0RCDOb5~zN>4lyH#kI0WU{q&c&NJF;N-UzZ}9v zPf##vNgzQ?AuT~bqT$;2ih}Nb4ka43hRC#*3c8?B(4nwb**>SB3sB1x3c8-sk}h}$ zCI?XIz{9rGf_0%6a(cwc25XQ{SjI`STnp=?)Vi8p!zt7B8cNBP9hBNo1!BvVn4uJc zUnZDA;=9|yQvFb8lSw)Ty`%%#fXQj9^CmfjkZDkQK8?|e$f6x(KcIHVWcSAx$kZ03 zO{rt{U&>P;(?*b~?Uy8CEr(1)tB{GMlgo9QCKagm$g!80?>M2IbFS#>7xrKDwF-ny zh1yS}Cww1*o=v%+XM-O;B{Y0)R&gWE1wGwrKW5Z53|0~*tXt3{b+Xica&a-WpRWuF zmQ(vBV|cGf(8J<(f*#XA>z-$+{X}$xsItuweiUlIe@T)9j&u_p$x?<&#h0j#mkCwp z1%vvW>_|$YwM{{%|GO&L@#?=PMRvdxLhY^6{`KnB8oE>T4Z1^rtJtzG-4P&6s07_X zJd}e0>fuIgljo#Eele7kr&|LUnBdC^YOOGMDWTF;NRPvi4W5+c7ykV)KZRP#Jg6woX)D zs$biAYfQ2H2(Ksc4@HdJI;%5&X{)tJdox>duao7fFQR$6)Y$lB@rc`f?rP#a)L2Wa zW9_lN(JD$JE9VM`QIM6sh|*T|%mA#q%J_U))DOpj>7t9Z$37`j>hCnxo_9i=fYN-Y zvG%&15bqOp%EQ-*%Rp)@Zm9z_rcv&i6Jke~-0KI=89nyQ-l>2})8{i_O zj&jMr5N}3g=g9t?S7SEE7(zHFrYDa=c81VzLNFilk6q0>{()pJdKhLpnoe^wWXmq1 z%@HYLe7VUX39f7oTcuoSq^E_9!bQ+#Jctl=Jgls&@!i2wxqQh4v*!(>IB!wA0|kbu z^aLy6wGsJT?NC+x4%q{9L~6nX1UNdH>tXRjVqbjO{74blQw~-G?CuI{)_1LFecQAt zFnAwDavQdZK*e_@ce~`oocE?dUtCw*n~k@lbEH_t0O&>B*{888z8(fhqFrtMRxBwD zXzbm-nL5`Ngnc7cfEI@oD@6hCbd^vZ6?wrFBUh@57AexTcm(ig2BTojV8w3#$f zT2Spep)QZe8&mU$+?J!3;b4wdp@`ItkIK!?JR(oYwqD4H*1=*K;uQ*bO|@%Ud^b8J zm;<3~eD3ZYw2)dO1z|96Kuw%x&sVM4K@Ov;?QA7$LbqR$)HUluXSxiH-s>frBW?7;ZO%4az+}&}T+!j~;)Z}P9 z-N~v~lmNt&Td^&nAoorrj9FJyH#trNxo+=7+cZ#1i+V#K43T}@P?}7cY^(p4v8?_& zc_jhQNGrU$<_V?M4YM3jGxzlJ=xqaP4}Jgg=i!>sIV8~5L)1Gtd0=C^Er zZI<%9Vf$-ei+2%kn@Jhhr%ig{e35R{%eZ|^w!fC(0}zO9Ie}=3(lFPa3K@@-Q=ul5 z#?U<5OyC`%He_c`*g0uLfS{Ek0=4TS0_OTJ?EH?1Ku&UVA|MjySE^9if=xOiFz=TU z-BmbA7nBhqQ0pQB5+JQ50=BxVMBw~tgjSR{cN8gzz=9w|$xC|nA_7b5(Sp^q2B5TsHscmlnKn3~y{=v2oxrtN=7R`1**9`ibqoE*^!6WvwOe!T7AC@|;cN zZT+0?+SoBQdqF28ufn}W$)C$zTX?JJfWFkv#zc3oDRRd+KH3fXVZN#Nb+AmhuhV*N ztiY8#2!}v$MMzpmMSl=Q`%|oYc6K5}H;OdLpETc+iJP!1f+ZwiWp?`*qrixOSNhYC ziTGE!;eJo}0v}+Ocv;{sS#d*8o(%)zA+Ec9K#jK=T6$u2n#-l!ua%BqR8cLT`l ztQ7SpYA4LLW`Fdy@g$kJssANzN>g`us&Qfx_;Hb0WF|G38sPz%V^KHcO!_@+fk^Z_ zC8YL%X1WGczz<*hw(R6vUF}7}M|^YNid>9xALl>XZwIpu2QZv3JsTk4SLR3UtGgL8`@y zu*qT>Mya!y%@!5pK5g{>P9yABhM(SG0`Wv!P2L4d<0)`Fal-Bl-bW(ML5*03tS z-RStMRw1JNHa{waxW-&T07%^(M9z(><&v2afMwNdoM5SQMVHtM>G7aCp?@ZGZ_)Vr z?oHsPO$R%hcOMYrutSESCP>%4vb;;|fnA#OLl?hlHc~hS03J305=KJSh5UgcvJTSg z{)$oPfS1D(R#&oAPSob?Aa*l$&P`^bAa6N@w$9zzq6>Ey7|_Bqp%`173M;fnVE@4J zCpr|lHeL5nWV5E-8u6V;0_m7A!drvJm64cTOs&&&PU!h4Dw)5#qmX0Mb&rCna%P}R zh!fK>4#}q&heS-(ahMsX%?O9A?kTC1xY}Ug>evE73yZ#xWBUjvbw}IMxc{TIsOcgV z@5i=mXOLqqi-Rk)lG4WDYhg9o90kC+!6J zTIbCuo+mKdas>~eUpkHLjvx*DM1IQ&3KB3HrK|&f11|Owb7E#MyJxUQN2Nkao6?4u zhpCQoUUuD=%dP{tE$Tp{qoV?R_%I?h8(NOb!eqL4ThER|No7ASYQ%zv)k_|3ra{CF zD_pb!c|IVb>#j&)Uo^scrz8~5#NIz`AAx8|L~{MLzv0@ogH6wtlccMirFs3eAGvnz ztT>|A=CzAlie)ee?LWxI`1UC56Wt|#w<4E`L8wkR6vC!nu`LM3t8hA+b0#azw*~eb z(K+)$*x?h6?wO{9XcL-vN9o#z2a)F9{77S?uS~w&n~3p->Iu2NR+n&e5Gr;rgjqxl zpopXyy|)2qZ$uMmq8j8#v~cer)KdJKen^GoG#?FzJz-j44~hcDUk5{W21EdNB#;J- zj0$^>ec!>Ppg6$f+pz%!^Kt=2ROMd=Di23t>t760;u2HXG#x$5u>?UQQO+tmiNQm@ zjKs+(Fg!;P0vaDK5u|-R2qGiqH3%YIQvc5nMVksj%Xy(Bv^lAj7mcwGkT1xqu?=f! zY^kc39kSRHR+VU@1pD?F?$ov)%r({Yn&J7iRomVfM14w1$+C4?BemF6^Y@sJUMnuG zifEDWV-qOf|!c2gu4<=9pC}+#ra6Awc`Wz%MRwmw3kv>XD^!dv=;V(I1&aE z1^_o|1F(`f$I`c3eKow^C4&Rv%PT-rZU}&m&d39NK)LU7U!q9EFeAZ=7)QkpzJ|F@ zev@?ardh^q(&^+w?QXEutX%bVllUQ>Rh%}yCgw;16PSNQ9a{Btl~Ze)hA1ep>NIWq zK%OUdL?l2PAM+hYG~fnRaBz!rxItJ{MLqjt#!^2xvO$V@q7-1k3~;>H!m6IRUidzA z_0RRW3IyX3MT-dXB~MrZ0lJ7z7L2d0c3{BK(T&-@i!YS?(FRF495I!Q(`JvJr4bWo z4h%ZKu$-(co1AlCXzIIDnabE~L<|NGg(|c?)elPa(&UP6GTkdJf<)SwTVh*1%$*+@ z;C2FR>(~x$psP{F5WEXzn%f;7ro70F|SFroqumZOY-%g4ONUFtx~)1`3-S;fJ0 zHsuJjsS9C7DdG{PEy}dT{B2?MrF{3@a#9m@!So2O16`fN+J;V5$4|H_C}nIb24^!< zyZ1{GqtUng=)Hdhe3gbI>lYg?kum&K+Qf|FRA3Er{%A7mXjoh?#V2WZlp`@VK2GiK zD{Dz`1qRVHE^s)wr6EN@7gt2*5|BrSG}@K~rfvA*{C)xN?qC$mkrsY6N_-cnNVJ-c zXdB%mHM{c^6(Mo_yPtsyB@uJ3l}Zr>Jk`YmYbnW2rp_pdNp7ALCD~b}B;skLwl+$_ z&R#)|ylZPnNzp+_37xJY$GW6MaHy?y-GCstVS9bFhHa>^Q?a!?F){X6Y=@`?E1mA< z)I^2B1r-aCMyx|ka)f|52q6rA3@_M>gb+K05ZcCtBt3Jac891&OCiu{GSM5Wh$X8Ow@W)~0KimTMUu`cDmB5f2KEJtxHP0OZSZ4KZw*qX z;;=19ea8~6fmc5{2^Zy_poEKvhOjbT!gafoaNQ;emjqpsn(RNw9|T=4<9(dj`1FDi z*p<=fB^)L=s7m4$ZMQQp@43V)+Fl5@ka!8Xoy4mSg5fn zE30FL*!gK4;g~^0hnuSl$DYI?4S@XMkcP~ndT{+7!$edSagQBb{}mn5o;+0I@6SWp zFNDR8V0ZpPAE1fIqvb6tDHh^*d(*Wdc8Toj54z}70wPDsxsoT^9o)(VlaXXcHIyZ; zmPp#9_%_{LI69(gBV-E6d;MjGfi_GeUy7A$^!FD-cO$Zs(Xr^gsHNks|170zlHjw6 zm_EVo0p^o#db4|g6@r|;=~O3fE+;T4$sEIs7#5fR-kzVgprNZT|CMq?^b;A-mz(Zn zzYy3J$Hg8BXDaDo&Gt-Ae=w*S8ys6D;uR-0Y^)X=5>SqyYd{;;V12RSPm3&fNe$ue zp@S$(e3$uc(zg^^%e#4@rcEa*)Y#L8PPcYzB%QPP!>hWR}kvPfhg5^44ZHfG7OT*RC2^`NgITK$d(%_MZsam|Il z&|ww!j@YCegoUHTr75RZXXRDX8@kh5b$V5?h?_y}k;(k*Y*2MLp zSaZ}cSMac05;%Asxyp*y9f+N97cUO*hF9vL3sWPCBX-4!amdO!IqZ=qaA^u>1M!3a z(~%E_VSQ~!*nMbr7>readi4g+XF8ykOgV4Mq3BK769^#-wMSpH(w|I!Kj!<3^{$hQ zi>5*3VeE-K&f-LoqD$Xkvs&WbC9x54K~BS>mU4qUK0zl4;82Rf^ROBgaz?=}?tz-| zgIK&TV_m~Qg1*>sC3%|;S2po;R{`Xk zuL4N04pLD8BndqMNxBO(Xi*Lfk_x_(LsG|52T7ewuLj9)%TWAZ6qI?_dx4U^tOn)j ztAO%nuL4T83r%Q=lO*&6rS=tte&{Hoh)@!`QvlNN%}~-UYoUBklvK2-9$sovdvej$y zJqn_eUX$-(xvge=TB{fzeiw`n?_xy}aQ4|aTdVn>K5=LPx3G9AbvqY27DQqV?*j|; z#Cadr@9SZH1cxf~<8O-4TruB%f0Snd^W|+Du9FACrH-+R;X&)^d=NVf6Y)Gru!298 zA&2@dGeF<7ackXQHzV|29r<-~LA^%~ig&TQ-W^WX69u~q3oMLfu@$oo!b!PnY>iPD z$>ZUML|sa5C^XKNt-hI0wA8uw1A(odXe<`GC)x;54;HtwA;$8#U`Kq{jm3q|19s98 z;TVn1McM5L54j=I%+_MNDl-2Wv5Q~rpId<*(iqtQZ?eX3y$0IN8g1k2enld~{Q zyMwa``VMX&6kuHIuQywL8wLj&?Y2595uQ|U3xCMn(i|@#pSqjcC1XoA*}$3}lD0Yr z)xwSZZc}SZS|2lx$ew>l905`OjM_{gEac+GI9sGMadtsnN`8?wq(fS)3mjQiBJG?9 zcSVa0HBMkZCc12OZTDP`V~r8nZUDrEDEU_FK>S}j{Q2?0Kp;z7k7~TwSZX7qqnOpI zAKX;P-%G3d!A*wMek@x*7(*=^!Dld^0{E-(=*2)n>d0(s^3Tv_iA{4@1=++W3lI|S zJIo#mjVf6Fq(}bAGXF;9|3qN3iQdslz*U={L=ma81& zY#h6W*e-I6k}n~$WHJ#(Yc3__ZP_&8Ttu3i;I*LM&~MNT)9BUknIkeHx}MP|8qLOX z@Pql_nd!XymBA}0WS(3Zv*uYdcU;CmTrUXj{7L*Mo#g}hR|A7SVsI{aga-?FAgtFm z1(LENxVbl9bH$3&?mVRk-!rJulxvDZN|c;vG4SeT5<)7#;4XIt7V*IiwK`H2&@0S_ zP<_+c>?CEarVI!tU!>oA!jsrQ;f;b2fPKgoCO7*2K@*NW))=PA1l4*zp3i%O6WXif zs?E3KT=;aJHAzLR7-HbPn_lh?etAImpQ-eo;Jxbi`^=qBiDn+OJNVE5TaoI?&QVZ_YX)Cz9&2}z_E$$Ob=*GyMyBcBN#<->#&3`$?K%g z;JCIgsFN1cOpzAt3s^DI_OyARHjUNv8)ew(BP=k5M&P=~PGyrM?49zUio)NSud(y1>9CjI4r)(UCA`Uzr>fuyhhpJZ9 zK*f`NoKcGHP9EJ#;Wv#eS|`u8N*)wgJ=@w^$x~Y=&kdD4^>y-WspM&_ zlZP8pias~j$)k%K@^Nv6PR~AXs^sZkCl3_f*AryZC1HnD7S;EyQxxKFoI%!39`5tm zO{JL41={MNgK0xjI4ikO$(|iTc#dp0xK?5J8oI#NN9!E8=i=D@0!QW zlCEnvv&`$&q{}Yy-JrshNRyw>10zVd8J6|C4g&m^CN(2Pd2;)+<_6PVT?$|4tc!L1+lYn&}f(iu4MY}m0L zMy<$SZ+E^V&P| z$y5s`%woc+9=ba?n4>{&Hs3IJK!=n0B#Ix5<%_h~^oJ8M=(J+zlY&>uCbgBj0iF%7 zwG&<6cg_7-b`=gMA=_2dXzsY_=GHB(G%YtKHqU2=lQEjF0nT=kbg=pvE2E!^ZG|~1 z=vKLe)7V?Q!Dr7Q%X$kBKX%}s{g<>_QJJSKdJ7qL#gjIR$K&k5dmnp!r|~i&Fj8(b zc{;<`+kkSqzHnnbZ`lRFghPse<#@O7V%*Vca$CO@*d1IT4MYdAYY%1%pPou!a%8q~ zzIh;I3~v;pl`S+CHMFI{W=oZ79Xr1gn}!SqN=u4T$!K>#B$R)p}-Y^iK8c zDC;?^dKRpnvz2-lyXql-wVs8%9&Ty_(I#W#sY-UrxT_%97!=G@;QrgZ$?;S~9A3ip z(W`7(Tg8t7I#Sd+LYiNk^ z5jkD!RjJMp`SzQNG{f8kKMC7%kh5m6{V`!YyBkzO2<~XKn33JV92d2CCd6b;(hHH8`Fh^0U$7Tc$)Z(q!76bJcDbvPaubHI z?}``ee!=S4>+89qdd5vybEQ&`q(UWWDmQ1VXIx}S{ZV|5Y)Ud72L|@dyxw_}BA%3R zWZr~8AK;P~fTR5*&vRB^7p(2GmdA}@@oH@^lx@S+IZkTUIH{CS6zmD*mTfiyJ;YIt zM6NWC(+qG=V-ryzf7Z+}NB*4y>!OLz2YiR+H9U)a3>(72{qlDZ(6{3hg$yoA- z@SwWz*GVkJkKUgvU-pGMtuLB1cC&ReoBoGiOtQ(jsU&&#P5lqW-z6v9%fXYhQB=Vv zXrw67SWhFx!aC9AxNluI+!uHSH7>RlgF_jPI-|shm9d^1?sD*Imx-lg^R$ksp&OW3 zn5)|w`f0iH`Kctk{3SOnzaKf({da6R8a+whXv#goNqLW-6jp(3MTM}rt*HA{^@eVW z@|3orOhfK$V|JbSG+YD1*2!xF_)}{=on^d}5@Li!Rd_sW&M`Za6+Ry}sgcKP;`^>_x7tnRAez`t6L=O-|f z?_-bWCoqbC4Ia-f@)4LUY#(7)teHg#(IVa-g$-{^6wB)0?Rd#_)A@89GHJ2o-rKZG zAr%gskVmWR+5sKdisc8-K9<$}J~G^?OPCF|E6$kK&~T?AjZIDzWS5E2CLgfHmvJK{ z|DB8=a5tV#lX6Q$x`yZCSEH*;q=Bsm{QyoE5FFdxXs8;nMU!KnE_HY`qYn7qe~IpiZa z5yDIJJj(qPik`r08J4&cnfm2uH_swAfXwpK zdCAe1^e}`cm0?&qBx9~N6CjMRC{gBM^MLb3RAgZL#IUY8C~2troZsP)*^Dxgs~YcW zsH+RETa^*H*c3A5B6ckiFmd-vN`LH@#%Z@d`B4&AsX8n)DBkbzfzsQs2^!dSPEC0m z`VxpGm6r>AQoeV28xp!}Y-A{BrdJI``ngpw^RtTJ?pjj_(ko6blQ>ivGU83MNa%xY z>TE=(iHvnE3^8$#a$HV%7)LoL!7jM^2{ae5CpLksw~gip2=88T^#W7O!Lkj`6hrQ6 zzt6l`D?^cGmzG`Z20AoG6^cG=w`V#pNCt>6z6OYL!Hvki0vEfaQzl(*C@CaP=Qr0k zdEz1xGw;7PBElCd@{O+o3{kN-9b)3rz7b?!ZdI8S4R>LvkG%if@`HX~*F^PPZ2$tp zG%SbRO-5R7U9NkT^(K2iA-iBVB$L1I-gNaj?>yF9w|8FNdRI$bePF@Suj-T)-g&DR zoQB%cP~8Pl`9N7f)sa~+TaSAz0T&8DYl=0eaJ_!bsR(OMV7SVft4@=40g=l}lv|_u zuW=i}Vd<7dX}Q@v`PEz&AAz}e*eaP7&XqR-U6GkCQB-nDVP*g!4(QRksQSJNoW z1hJW-{basPmE78?VJobNH4)AtY{_HzgFurb+fS7NZ8Z#m^Qop>&FRJM3O2kFodBI8 z#=Y15u}*aayEuLac%LB1eB=P**cIP);LD$1c;uJ9^rsK~Bq_<4xAUE|`K`385GQ1m z(Big7ba1z`k9Z+mO-K(EQ#P-OV-c?hzV?>)U3~Lbf9b1lebCpY6O+rPdiGO~D#nnC z;%QM4C;y@%zNw~9z4I#{_{>MYc=o({s(z^^mrpglOf`vNsUK8rwx7#B3~{wWa=@xZ zKW2Ott+;V3dfKkQ#@*n#cHGoInh!Yj0|rF_(l+pv7-+sQ$e{9ThN^*Cq%`#}p#pe0 zl};&5#d+z}O0OB7O4C6H?|{-NTg7ceM@VbtGH#J1UIcenN8FMnKlWOU4l_1k25|?| z=pg;-G!J6G>SmmrX40sMhxc-c#^BvbQ!qFztt&ARe@t71H<6YW5nQ0R1~9f(;o3-; zD}VHJ)LeG7y=C!BIU>eV#k?*XID~N_0b-;w2o9ugJi_@yZGa*Ubd+>(BjT_v#4N$wD8*7l5-f>-WDP?COZ(t60~)bL6-?(qi+c4;-gtyI zm0!+Nm~&Y6bSxvn9PrZMStxcldxB><#7X(ILa59@lO?+36gxt(E*yFfa@gktFdObxRt8nv5sfuyQs96Idswc${QV541 z>bEzf$8sl2TU`}l)7bXwK(^ewRUT@uQm-q#ILHcnl z8SAG4vMXj!|1*kB!S#fb$N)W#IJ0Fn?IEg`eE+}m1H^&D=Tncv?CIcD8osy#BTC=J zkH*>bhu$e?7&a3S#XSAknFu)H!WL+Nb<3k5?1GAK;lI!V!?TfK02%w!r_nDOgQkTd zmd+!P)i1`5HW(^!6@Ow;TiLE^H12anK9{X{pV{%gxi2^dTebcd6+lqjN+z^=rc(zR zE@C2>j{)k3G^LzrTN7nX__UM`?_=lM!!#MSLP@v3e63QHYQC+ z4fh`CxFGld561Fp9?Zt;^(Y2;NAwyfQN@l=-v&B;c2XpUlSJtRI^m?)x0_?eIw!?? zb`OobV(k4DFFnvv%nuSfHHnVZ`?{r;#(xVZ3a?66(r*qaB?VuRbmv7uz~6Herps!_+v86VW&{C3Ke0;_)u6?GIt`ztHI`{mObg}^9W}S!e}P# zqLJ!MZm<*MkjU%0C&nRnz7kk;VnqMLMb|yLL(geOE}qua>zhTky8O&&7q|FfV+IXr zQ9CYnhtZX;9t`DIw&Z9zvcb-b4Ue?YPS)3kLtv;HsSXh!1%au-Wp`-wbr8`;Jth1#vzNA+$l}mw2$kQ|;?^OI!PO^=Mdv50$FkE= zD$!AjWygt4>#pai5&YIcNjNX08PIX5Yc=G^>Zj}pEj*^IZAUUKyx!-=uC}^*;d*vm zD^G)MXt3SqMi9)6tqzYZ^b~US6iHR!z1WpKu3#9}jC(ENhUfSswZ8Ej-Fo8blw@C_Cb7llX!dm~gF~ljw%;HD`7(q@`gj&K5?AzZSx*v4F zZ0T-zLU%qF4UiLRhgz9^peF`$761re$ogo9CebdtIi$H^rJB49>LvziRBU*4a>#oJ z0%Tx_`;J6b#pD20%?v252kBz%vf;7hH#9$}t0iu>E+Q(~X+6+M!DSo8UPY5T5i&te z36b-G{;r??&kF!{9uw{uORj#yge!&-{n``a@_q{!Vu@E35;KS0T_-1kYQ7)yV*C30h!7qzolsaXQKW2KWQ)1EIPQM9)wX1k0}~v$Q$;BKjPj7I+*a^ zs(047Vq{xfCt(G^h?5T+gl;b_IcgRHLb-3dBf67vL>;*0(7y043hqA$8d8eaJI9jW z6cV}yIh#Ec?jsAr4E`d#s5FGR12X|GOOXPrW!=K=;Z?j*>=hJX0*h7_ z5uW{bHN!MmO+JK$-hA=gP^CJrW||(!!b_AdqMV) zew3d{mY;&1WUYwy8as${Z5Qb}WF%8{1tlUMXX|Z{~vE}17z1#-T9t#?q}cI zA4jr`ja%T`7ZKWtg_Vh8du$Wy^ur$nGdLNB;^pz?F;i59lBksJXj~-E;A`BrJ0M6W zRvt|dlg^7~)JfzSP2vOv6lf+IqA&uKAV4z~(0~X^P@o|S=>&n_@4wbQ=bqai{>Y<7 zs_J{b_S$=|y}tI^Yp;!;EUOF?y{D;h0kF#i8pjgf2&JAOjhVha)i#E~k3;Qch*j+@ zEFdx+rjY=ez#FOw5EE(H1}ecSfqI9ZNQxq>X{*`>1ucMR00vl`*O;k&1m^?@M5c`0v>Ezp=H82#^8eke<6u;aI>tYNd2&f7JAWMLT5hVCb zu-0kU`mm6R&jf2djI9q0x%W)K*2CEPz!1UD1PqR_oP^KxooHN=0mT-G(NQZtGMqNr zq|-%sX?ecA)5uhU(U@6TL}11c77(+0%{-;(W?5W7^(-c_Wpa|LZvQJ$xvWAYFG3!} z(DLe-c$f8o9alCClVI{0Yg?6D4YaLlnoph(_G6^VQpR}2-n6X2cd<*wz-CRA2;=&%IkW6a2Z*^i$tA-hYo#UPS zw@I)L_9r@1In>mmt`{&)O#aZ|oYW^%Va|rJhPI+Y^Hi~J&4dugo*aVaE9*(m)Gc7l<4dQ@j>TDni1x;8O%!vfyYx`j;> zll0TjrW6d;H@HO1>PC)XuSIh-mIhUlawrJO*)Cl7kUKA`L%w?0zE5k1QnnJFnuS)o zS`atw$WuOpYn|T(@>zYQ%_IR(nLH8ZlK%280y8 za0M~h9EOg)QCFA_Mk{8ln^)M`7#kg0hK>K&*!b?8n#8%7(qJ`3-tnVt*vN`En-r=W z`E78@XxSoVH#EQ5(y6uSnwqO=ebI;hU(0&ou}ol&JMe|T2wH(Kb@1ycdFt(J{u zBVyA_iMPzsY>fRzy5(p&=h}vQ65!OOIB=Ks{Jg3!b$D3<x}-46)cx!cSNRd-YTa{247OLpi^iHEJ$oBi$vesppq9X4@Hp308^O9Igfb$HLL zZyP_I{(P<;5~sYgaPRnhj;3O&$m53WLpEc)uF z;H=Dj7Xf8ad8FSI&oCy^%_f4)Xb(a*HVdk&1xCppH&+zQVOG;`RB^Q=3M&aXPj>3R zTr3DTkcSdeG!^`i1k)s%+9EE%1dXOH(Nz`CJ4HB{LD@7KB-P-k(og^^BbvHgUui2( z05XCFH+5+NBkxqXkeeCIaO!{lXDfONSyOXcjzFVc_nnPW8-|3TUb%JLjJUo9)l#W ziMN=Ki%}DCVMDXo7a^t%9a@DL>M8QU^ROF{Ty8!J%I#QFPDI%yROWg?xOxJ8*s1Qx zMnsi>w^0*4g<5zZI_&Z9E%Lpva-~g|AC2DJr76(8S_JuuL_6N}8(3e!XZK<@W!a zcL-B*Sxs4T9K>%lb%mbEZkT!=S6XnuC4=4Y4_c|tZkecPjP+km;z?Eb!{KO978^Uz zn4IxNwi3Jh+1-2irEA6~>ANjKJh}R4MEr{J$A9r4b7wEtB?AiUkMfj4Zd}H8seCJ0 zVwV%Tv~}HVF?uD};uX@@#L5no{lq=l6}OG|!9_%?8n{T{A_EsGeM@BEq6~1%@qrwH zH@nWN9dMOC8c)@KYFz%jO8Y1Cz<|O$@{asB?dBG-5OXocRM3Uj6j_@d=~ky6c(@Y< zM_}Xrsmjd#o$5pFOQP(5zH469RezqrMi1WC{*%qgnEUgP16nACVQ-*MH%o!q+@-o( zs&tcHyquW8{s?GjdDYppyUSWHyBrvnF(KkF!`fwY7nE)xplu`^uVN-M77|s>ess51 zEQuV3y(qm~qs|%4ocKfL^oa(dfd00&31&kMQFnJPN~Z8BPi;x!i;-otv!$!us@fh@ zb{dIDaOf*4)nkGNF{NS3R@x(A7w@h4@0k;bs1XdU?NObbw<*831=o{6%T5+m7Oi%hy|X@$*ww)9GqZj2k%vw&yz)!<2Jw! z81PU9wZ7A~C4E5z(w{M^ra3K0LH1SELKFh7Dnh*lQ)|&E)LpJab+c`;Xd&x~DV11y zVisD4EOde6KNK_EvL*bsmheF}!3uJ<{A} zL__+(RvJa;aAA0%-VHrX@63Ik5vh=LMvF)V)omO63D^vE57aZz0&i8hlL`cb2sAk! zM)_b^vnZ1Hb)9epivrDIS_DGqh}>&xREI@tSWfX2@C9A75*%#j(F#o9^GIj~M{t&n zDkab@4$%th;8JQ{Gf6vm#;fyzhPj_+RELUDJ!q*Hi-Aof+UTy1L?b#QQFpbp&*8S5 zQ*#hJdc6DkP0`>90hCbWKotSy&9l&gfDIF_;f03Pl5*G9Ct5M1Ny08J4GVWKRV?A9 z8i_VH99Sg-)w4X!2dl;ujj5m;hO{%PfCP=HpMAp#xz@=LzW}Xxhy3gtTAt$^q@ z{DaW-2D+<7M8Yvp@VCXh2M}0FDy9k<)dXc)>|+Q+oG`>1F<2DQ^fV*)^!#XJ7dR5K z=(e@A@4`eu4EuqSe2#S90`Iy`BE7hXIgPcnEc>II(6gBVo#Gcje+ z1aH==0bHLc)!lU?8Ibq=j#+F%AVgTig?r%3JOUoR^gssWy&~W@Hjp{y}Fd*{#4URje?BaR~JyQ!yuJ7JLc<=%s>Bo!wDo<{2m%!P+Cgv!%>L$(H8NVHWI@c{};q zx_~i{SHX;VTOtm@hqrpfMG>QEGeS=7U{QHjcJZ+utCi_Bp*u3|G)2TE_-mNk8gk(R zyLaEt+%TQrjMcz1G#kFhfBwY872WQY9kNG2Ax#SkVc(AuhBnDnhkZ*Y_fTcu57yWQeD}sbi1y`RpM%0 zZPnF}aFvjBI(L;Gm|_oKOzMHT2*ZPy=m8>LQkFFfe#0byeW&vt3#>xeB!ADd+tI|`j{#611M3=nehuH|(dA8IDlnHDK7$9% zZ&HgbD-Td6le<~mfW{Rt5c*Gq!2kjl&2!0`HebI5#itY@x6vf+rKcmR`~$oF3FewmcC_5QB!c9YCCGI~T|iA?!$ z^x#!(W_U^i_NKY2<0Zn;L!|(Ghlr4^J3(uZKR}6z@}sO8Vza5}8-r z82zqheHC{MS|3@*3LRn{Pk}ge=J1-POtKm_!PJ>0E?Wj`W~^w@k|$J}SR%>Jla!K+ z0Lxokek8fRa$22UvM*1UzFfgyqu2(%y4$esdB!fgOHAZ!kDtc+4;_+V3w((h=JeYL zjX7uPEEdE2p|`(NE)M{6JkNR2Qr+AKh`)ue_>RaMGRB19<$D zK2laS7h1a#ne3(pe@vxe0lLHLd3T-7Dhq&F#AJ=1KbLBQ3_L}$H%VPv?Nms9VK!}9QGt-Z1HTv;`HFaq~LE;x_-xMF= z1w}04+s8lYlRlIzvR;C@VlQdn9%C%L=JALgg^X?bAcUjda*;ko>>Q_ zGvQe^idK`*tC>CM&ra(ZeO_j*d`Z`3CcU5s5ZHMYSP30~U2ChQd3E#9f&g8~SuzAZtpRweNv(TgHjjo_W zeFD8yl^Pq-NsjaKQZv0^a%Jeu&iHCAb&&y{y5uidVWo*o8X_=TFa>4y{Yqn3dWt=B z@;+(+mYUWf98TX&Z<#uX?UYO32dBoRMtR0H^$Z@N7$gr3n;+G-Y&u?~s zq^-{NV4eH@+}G0+VD_4LVaO60MZTXcj1W-dTb|Hr3!CrQW2-iM5D4KZw5q{W7BVID zW@rK#;fL>l+y4FlY#3Z8a$nmXQ>r~=&9I@zCEKzcbtX$f;}JgqJR~7wXE&JPkaujf zUF;t0Ugo~{D{;NsU|)6+Xx|n6uY{okjsdHtAGcm!Y7iY1rb|Yg#_OMHuBSn4A$BRKwAVJpBVAvD}$PX zsLDbyNUlI$-`NDT%79|f%;^G;vz1LbWnAE@Jb7B#q}c}SO-r^%4+G~Z_CIZp9$`XV zQl$ET$%W8?qIFq-H&e1ehDvpPF~AU44R+Q`SBU`$xurBa`pLNzluNLh`tgAl_um+O zEfwyGR?UUy64x+kcE5}OWZHN>c}WLH-KqscbctCE1GX$k%dE4sD{HP#9uX{{9((=OaPy{V(s zGYE{b(3?gjY+f+uV3pK4?rXIrpVV0kKG7CFEv%r!2!N53s*uM|BPj~>!?;5%KS8dd z5mBxtCo{U?*X0rWETj*ROK^VPDkPJ_KzLwMe})EE<(xuq^)|zwtk$hwD$0BA8l#Xc zIOJ{H%UY^VqR}imNi~p0)!jtr>iLR#0H7bD{9E{W6K(upRY#x|_#uj4tTIm7k`e8{B0N_!$j7q2!hric5+#WcZ!g>eCXBs^y%Q86y|>UT!bqBhme zX8G0;2omtK$l5@3GVdcC(y7l?Dxq5&SRF?13=p;=do%~F4g!gOR|~Qemu8o+8W99Q zG=ZSCCBvHv5f&Q zj!Zl+M=k7oxyC9c6*>(@I%VJ76n}gK z(v(${Gp9|<~b(~Z%S`Wyyj0El+FM6f%TyaLkW!4|aVroOU6SGOhUG`C_E zurKM59EglD8A+RxouGd+XoN2~yaOxAEl@k%s1sCwF8nB`1GMz18C1S5WMiN;@q7$S z1Jbo6wG}cw!*6yrM|SAo8Sck|L3M6q(4fleD;rcB3Oxqy>3oA@P9yULEX-) zfrzvHx_9#0KT6IU#e15T(ukth#uRf}R!19*FFa-=!;;i`rY^MQrY>H9LLG%2p+Oy$ zP22@F#T(XFM>D2As@WJBS!_8zyX3Z9BQX*)V*yRsGUG{<_nQR+lxYoX+A*f(t(;Nh zPg0(9i&l}41ZlO-h^;E+CbJQpDK%gUnj$KZqEcQ^1DE(*55Vqbwuv$8yi{$p)m{#%mulN+r5{EmRHI#)Y zH93mMa)i0G(hL4Zi{IkysHAKc9@?kJ{Lb;4%@Y_XLX(i&$F;a(tS`TJ1X+@CQV`Aqs;YaPK4lxHpY zwSnbX3Vv&bqlc-(MuMN#+d*V*nC3WWZ9J1!;8TAxy^wd7NGtK{a4;zjLENrV%hYZ2 zDg^Uk;fb2}$*Zx&>DTutCFbZb5@W6`XUFX3UHm=V+LeByK~STum&=-;<2eMZb#{nv zE}u~pUtD(3Y{SW9q1DDIL;;#=M$08sdMW#BHE1x;6Z*X*C>XNHw*V`a6~SZhQ}JiR zmX>l?kYA%RHb`6QHit#8r3e^IkKugUoeMZ3RE$6tG)=;QJ)qx(RZht{Jq*HpAJ_y0?2SC62Y4g*^P3$mypc1jype}Pm?j@<+C~R~GQjxSeOyLcvVMB^cvT*;Op%&S>>PY&E}GF%s8#Y{6`0cQ+;^K-nRfZgE7U zAM}}M=grb&RE6^vmjZ$T zf+I~jD9+w?J6SAPPf_%0MF@a8sJ&LW#v~PE>d_~HX?yRAx{yU4`f{hERY@i3FQ4yF z1qOQyn9Lopwi&F#GBor4CvECQREavyIe~)3c6?}LGu6-<+hnE%V0M^scDT^Flh`J} z3k)`h;UU%R8fzxI!REpO4v1HHd&2h-hFIPlraZJRe9KT*w8+1HYYfr{Tb^d}r0-|S zdY{IRR7-XyI}N{hQ?HZFEAt)?Xq-(K@Ab{v|Jv?@KVD0_ zh5|^MaO;S?%0NN6y^iu{42m){Fq}H|6DXye45mqd@~?q{Woad$N$mxL6#I)rhVsx| z?H6AM3p}ohg`TeOG}On=Dgz!|knQ`#d(BXy2~6j!*;)4E;FUIvY5|7p^b85I%ycZ{ zT@6-72isjc9gDmsl=@Wr57;qk^ik_HWJ?jwhq?TDK|#rRXtY`qx`dj8cyc}kdp+B7 z+%cHfC5x;VXU7OvutW|eOdgAs*c;L(dr+jab3oVHX6U95nPZ zY8$0#Pj2WZ3}ndP4|5-9s*{~ae6!bazVV?`93})oGeVh9#V|m*)hDghh?J@cGTRKL zjnFiMhqg8Wp`uJwfg&S(?`@wlrU`Jf{xFd+{~7&xL_0bL`(wjo4N1S?C|%WIbS>MK z4SVXa4M?&Rud4#LH~^3{ z&6F@M<6TphGUfWLr3b6pG7L@?Nns6(`77;)g%CNT3i_csv9@7v4c!P4+UY}^kBdeK zWZ~~|{`T8^f;4>AX5uoC9G}ROg{>k(&F~Gq$EvVeVWAx4Ic!1+2!gf)xnp6*82AAVCjJAw;&2PbL z;5?f=OkS6~UPZS@S3{UAUncj$OWbbf7MI_@v1-hi|1vN^5~z|Z1+|1(*UY}ep57&z zb4~njS4kc$edRx}=I!x1-e|=Cv({GP>69}8pS;?deGNEYYlMaxw-xQwuF@CTMhjC2 z@%Bi|Dx*nkCb4|7_j@gSV<3yym2fIYP)`rBrnOKr!?h(J!h=qI=koeiBLI2nAQ0s*`xdAo<=G5Nf`VEjY5de~ckVy!^ zYy|dZB-uIp*L_H4h-%yrGE6zFvK>lQ+y%!BgXUwC1XsnKk%QovWjQA(%lo|wStK{I zo>`oq_v}c&XN%smMfD7cNK33Tk|8}Ux)dlVbSZMN<3JTt7&?h=R3}dh=n~i2k^w~l zZGuo?*Rq76VKI6~ z4!{RDr{|$=>Dmk%O;27XMyX3)^87>ZdPkc5!(Z%WM>J1*%#ua*s6WGoMFlc6MB=VE z3ebigW@za=AVd21H><-2!Z3qQkH|AdpK(HAeh7eyaRLNKl-ggs{hcm3J@x>}sr5K| zf~wv%(V`{$D|qP9-P-ZdZNXgDxF-YN>ZCsqc1WTK}NCWTlfGtzP^CHXt2|7a#-9E zE=5ra-s5Uf6&%;I#QH&vdbVT` zPXh5PYwdzy4aYtH_9i%nCWYshibwj{dV4m5#qtu zsrGFDJ(R+Vo?uFx=cZMha@M*9?qtC^t<;O}9=#)al1Y2v^D{@#GetpeMlClVNj zE$uE})Y)Td9306oexTpvBi>|!e3_$goh?#TGy4D`!hf4fy}mMpAP1~r>MCqV2c5{k zZDt=IXz&h?!gMYD@qyOrYfeaB)7mrr)_$tr+LPYelWMJn94pwknSE;g)>`XIQ?#AqSbJgmVYP5v&FK=L-QgGLRsRbxYkbKf| z`DA4?7aECZiUPnjjm5{p;9)A801t+42ZEfynoF?ehv+QEKAYWXKRv-|B!BwzMM%$Kh8tV}9Ha+8 zJj$BZ%Kg+#pOm8j(YKW!oBv+zbcuYjZvDA(jNR z!9cXBLj`|u|D{29kG>e&v6KpRbS%wxAU^+!@vSd-;gy|-~@1EDrPI}Fk`d6R&Wh=AA+xUlmcS$>Aqg(bLXYJi)|MH;UZDK<|@813^_Ad1= zAM(2~e|O?n?HvKVg7OK!8}WDF^e=II>)o>75u7OuJ>s>qe5o4V<9E1;_2pMSZCGvk zyC^_}ydQu{&5zP*xmDOon&v zK5Y1ssNr_-#Kedyr_)2yET`hVb#?b}=1KUX%F=(XmZ`U8N+-<#49 zGBR!8A^f{X8Jzx}>D)sYV+8@EC_msW{5cn^LUg#v~+ANyTLFZUT z?vP~ip705_l73!2-o=ZH-DK8-$zio2pHWz3ZxG&bdwVHMQ7rmET;_|aKNgPy#@AM- zu*4o|U*@PvvQJ706I{(tdSYw;`RDb#mHl~mK1pW=OvP?U`@+dYH6WuM%xugF{?qRS z9t8hdM?%<%PODITU0K}9y-y3{-JOUfXl5qxjkEygd~~8Wv18`Oh(r+CC@YydHCvfe zZnAG=x#lDN2@Nrxh|fN(pM^hXkZKy~YoETg=Du~`LNRph_MV)>aOQ@?m|(GP^?Gfb zrzs*^i(-z_&ncUdxq4+}6LOk6gf*75UzzkF>YsqkatoXpxU`!r@AS`D(n;K58^$5d z9$&$h;-DRVG!Cy{O{;D)faEXbc?cjvsxlilnD-A!gsi4gwCn1P{XZW;emLP@0SK1l zm9t5iW4MeN8!|O9FdvOPJ=7JixOa@+W(*cZ9{cE{Bt}dh)EtWS^zihz^mMfU^z7H( z13O_ZoV8P2SqE*mEy0{+tyeC8r8CeX1q9l)y(+lWo4!|93Y*n7+s!J4n?wmn<>Q-T zGOGo#qa=VebJb|mG18*L10_EY=IhFwsT>JqCyg!J8I&I?ppeurK$&zXKSz@*q$GSP7;+jzFqwNnDHOU5xJzECy<-GWtmL_LDeS)ZHN^ zy|>oBUiEB`?leR1)mEw5^{?Z#R$J@Yrx0&nE}SLAIS4KO-{o>;_vla*M^ThS5qbPE zfwa*6*A>g>rfcL<`d2zD)T_LoTFjbE-o%y93h%a>@qybR335yGE#fu(dPRh6O|^0? zX5SP)i*jEbYKL7&)?vrg8vumx)?}M@ftcJ1mGG|1iiH?zBw~p}yhUI>o1Ac3n9nrl zMdQZ%N4FDxlM;I zi#&+u@?}#p-e?Ob%XvpZ$?nmSN~={(nnSgEBN`Y0V$4KFWh zxPdF3)Nq|}0y$?7N+b=TaDsDp48$>j>AOIV>&Y>PL8w4z9UrBOi5=pM=icL zrWq+ClJSyu$c8zsE?ZW>Q1w1`MCn34gt(qm>h&u0U6p_34M6>!&yaWKUwhRgzUIPg z0}KYsGh5BR013Z4ZzP-Vl<~nzzr{-b&S!q{I%Sbn_nCVM0Hlx4Mo5Aj`OEY){pCJ{ ziV{8;1g2lrDWwdz{}149S0MoHcR@Mfb4(*0Z3jJFRTVNFA(&R z*{XqLu|#cS-WpDaVD`;D^WXGW`XFN+W-!>M%*+;Nnx36D$csS! z)g1DOXh=Ofua8||EP1&ll{=?$=gM>OVvt29qB~;szgNhxKr0vf(3TzAvY@S~#7dZH zL%?Ges5DWFZ_2|^e;w6ysf8XN_tz5>k^Yx9vGa5 zSe(hO0b(3Aa#rS_v{!U`)ZyUH%i-W`6dc@*cOhAQ^)=f#fe zlIrE6$N5k7tW7-r?%?qVs2W+X32c;*W>Jq=Cg=(=r~VO_{nuF^x+Xq@SK4~3g#!?c zH{&q^5Uz>Y#P2U=u*EQGEBP!Mnzkpb4w$hJsbN>K`e2**ovPJT5i!49<(_?r*h;dx zkUm6m_+estk&V5?<{k6)j;VY@$srJAq#Uf@=Bt0mIs%!H;QIcM4v_PJwd{M!+_Y=7 z62%c3274cOk}WV&?IHj;jPJ?uH&)$TCt-UWR4;9Z@;RXQ;2 z8{pjff3^?jw8J?qI7@=F1RQhd=fmDMbYv6R1ZtFoPXzFjz*WHv^R+Op^TcO{9ufUT zHhb>LW+&B5Xc8#9SR->dlFo9l6vNK`aSc17*2jFh&d?51eugdKwVf_#%+>How(@Hz*Z@~! zB9U|Ar!Xnj3`x){Y-i7hRoF#|D9(kmX;G6@_ zz>qYxkvwnRZ&-t86RF%u&P7nejd&Fyxtfp!4rCmnOduo-4&i0KNo@=2)B?G)%F3)2 z26;e!v>eXSKAgi2=dj=$5u78y8ECZ~q5_v{3$_yn8w4?0dYYSBn}@mnM=)7WDzFfGImfS1w4!@( zKSX~n=-CA;d6G@aMlXuwW24VdiB6tUe9&r)3kcx$($0YKo-V;d3&=BS{!D8S<3sCV zJTz;6AI_}9nH3y%X22J~8NhheqV0?*7?Hk2V`rnzFY8ORw*28PrE(%&$Jc3IkkF0*Gd@A7e7G7xQ=%HB?-6-PjpiA<;ZNCONInWzXjXZvtY zI~+EEBCP~x2{;2?PPDnYpqTmLL5evb3z(*73oPrX#$2l$&0n141q2m#IE!Mm(+cZk zcz|LShOe(IBC13&rzNsz=Q-U&VZ$`;te%~F8a@;k6lI3cZg8b+oS#Z59<;XjnMHU zY709)2S%4E%G%M@FaU?mm3coF9S-|B5!Zrq95|)X9kEze%CHkLFT);9)qPTBPOHpm z%6zx9s!cprAD!%JPjooz=ugv)k-J7ja=>F(UJj4Vj)J!&c;^L=ore8om|Q0cXx?}-t1u&cvEzonh3rGqx0p0G zbp*8GO$)u4vE%?!0#t?c_#_VRm#ryQd>NIcL_YLk6>^sP4LRuzIjM%QbBgG48nOzf zIU(;C{+)?ueJEx%6mx3n93z|KwBA-LI!(yJ(JJAr?UG0n-&!ii7X6{K*V)>% zRl@6t?f5a0ztC^+IdAYeHF#MKUS5qktOnA$5X7tz!){G>B3Bqd=CdB9Tnv=OCM(Ek zML_wz*uq+5sSevcyGU#5=|u{%&dp5Mg~H0}HmdWWbZ;LwJ;zN?xS7%GnH)Fy44t+2 zikcVlSQ~(o$kc$`MO|Wf%Q!h9wQ>UEWPoinbs+}wbBb;e`^cNIN9lG9@d*T1^s!-B zSD?7OPFB^$epSm}6`6EEf~q1hOKBWwM*+xqLk^hJD#Py?eXry`^*sF$gSwqY<@A1D zA3D7(=@LPuddS^_OrV|;nT;syTApUQLfr*+Sa zt7rFQP0@YDJnLU#MEU5P!6bQ<5-9Otdvv}b9QVh@W^y9?b0P%wN2jkY8zdyMAYTAT zF;Mh=I~5r_huQmaRv7Ru;Gph^Ap6099MrSLQ$yzEbRr@vPHVqbe33+)b_@{pv8}Xl z7+ml4YmS5E3CN-jbGrZY!aE;!XP6)py0t8(3hX>tM+32oAOFlv0@b=U6Sl; z?;{*%aKVbzDu!UAb=a?m0s{-Jd-4th#2Eo1*i;#a()u#gaD7FQCy)uJzOoWRtQj~| zLRBpqIj&gzI;lZx^%q$YevWwdk4(Th0c&wvXSBU2A_&@6cZmQ|d>l3duSiUa*KE%4!Yg>+p-hR6P)4obRM1opbtjrqL|LWW9L8zJLoDv!wm| zRTeAlxTa-{4PRcy^-PbosLjFx=w8 zjc6(j>B#xSn>j7>rGLxpoppG8e!?2Io%EwNf)O+&{GC$0p0lHiT7444(1}~9H#_;U zxzv`orEhT<3)xd~Cw}~q^c$S526+mi0al8^N`_}yQ|I~=oh15zgV=amsMF_~CR(Rk7k$ zRP~0uMXbtoY9C3zqz_n~C_cD5v;~q7Z`aeWSOqhoVGs(#!+M$`DIo56m)29?_R?Fw2k*#ZGTUE&Q9DU0Y@1vMJQ0kB2WA9fB+L^*CDW4_qB zia@1#_;P!6d7w|1R1j(UwQ-RrA7}wetM=NB6bI_0qyGn@7t(ydNK;cn`u8SCGJ5Or#zAd0gFy84mys|K@bC(i5Wpr@jT#103MQ(7O(&*Gv>Pw7aR zIQ!O95v%sRf&qV-b#}YfsUY>KA1D2q*rv7TUhAL1d+MpNhd8PVBSQb0uVdaHF(+ld zVA1Dwf_4P@c5P~?3Pjes7*pQ5$^wi&`qhcdIkqnxOL<3;L0%=%*it>5CRl)5Hh8j^@vZ;54DJP3`5X z889mDu+`cKu|o5bdwd~LBf&fZ;!xZLoCP!NtyM&u@mmq%8&Kz4f8f-4?w?Mbe_*Cv zAkW7C7V*z1bY1fNg8}0Fg8|Zf;yDv$`gvo5>cI~SZ1p35D0r*LGRQ&=Tje1ggA+*d z!4gS6HAs?^fg~?0ljM~JL8du5_E^7KmmmSaDEGeKD3PRpoD*fbmLz+?`uJy!;@tCm0X`;S4|^aw?{LD z#U!nd!HN7&|5`e{x$DQ`h&42xkY}KvxX|R&zutuhf@3ehW_U}_#A`ux4(-G{Ic}L zn5z9tH5?`*#a#wXSPuciqs0er-~*Yywo|&%X*w}ukQQHX z#D)w%c1@>p=*LWxXiG)!#6JYGM_HieEGci(a7)h?u$n7iB9|7v>ISY04>>9o#H^QK zk{^(u<4%|=^eP3_P=YcK0;!Vy&LieM`3|)+pAw}KFJKw&%7e%T?0^hvU{iG?+c)Q4 zmx2!M^DuUJ+rrp6u~TS)W@!YCXTtD>mWSapLNSu%b0ZOv>-6D=%)1l%nlV(_*lsqf z1CHI0>EE+fNRx@R;_rE$g(!SppOl}YJ=aL;54eS%v*ZucQ6fQ&CGk`W8GsT?2DZDD z<}ZsSn@17LeXi69@UfADjSNM4>KNlQ$d#;gygp|HBDYj(QAj!A*HmZj#YD8UmL+wZ ztn|ReM!}ZP2xJvoqB=C4GrlxRjQgA!Mrm(pl+=H?M#N`)wk~HL^Ty*>^~Q@cMRw}i z`kWbBeW2(%B_tQD+p9RUaXrpgI1_gPCzd*AGM_ZwWDuM;q0bzZhrKn{i^U4OiICit zofnB^8}O#mZW#QrSMm{@8H*{c+~3dz>B4OG1ICn)YPZjnkbF)h5T|4dtuT3G{RboK zGo`3rOc~_gyzpTC2P0z2y#q`sH)xW*3t;s@zHB;Q4)^&o4B??-2%Tp(;>$2{xmUZ? z7yRBfe(UgM7(BQz&@FX{KW*?<^X1zgfH9e-IcLh9A2?H5jdzGkv}? zOQso9mp_7Ki!3tUJWo-oV?{x<88}q|0O!P;Lx0?DPDTGR=Q8mjAEO@w8&ibUi9|c4 z2YGX-GxTrsQlXC~UjuL20>*US&jEGW%>4=SqU2WQ%2Ux#J!^0ST-uzvmdWQ+5NoYd zfNngbZvAw(#IIrW`g~X5q~N}F`89|u<34pr{CYqgqq|SbuMmG2a!QEL*5lVv@hg!C z%^VTUX^5zHtLaeR?P4M?@TsAAQ4Ai#tMKtIJNc=3a3f|7inKX}@z$KG&P?dYX)pLL z7~M4J!qYIT$OifR9GR688!_wj0JDOWhe6}WAg2}@)HcpbFR$TLo}t~I1E+?LlsFah zZ9w<0;Z*9_m{Z>(PHhBE#lp-vRo3YwdatZQiQ+|v{&6FzJy3lcX6<~=r^u>*7BdND zr4EkGwQ0$~g>qIKxXdvNc3orO^1V2Gt%1vyk94JM;)dS#3L-TlBWU6ijL87cHl`kU z7`BZc>JuvS!iT4IhMd_f&j@aSXB#?z-4v_1w69aw;Zd1z^y=w()c7*Vj7Lp`QNnnX z?*0GgQ4U)Xk51h(gHUZw#gE7VNVm(3X?PrD%>69I(a{aGXhR%1GlV!H~PVG?Yeev`cLHA`UZAy82z$- z4ITM`ac$2OtTZfh717>3&%y+rH}?IkOki8+TPm@y^R2P(vs1YLRBcN>0p|SQv6;6d z;*wO6DItP#{JO7m&#dNO(QWYJm$dF8*Cif)MvwzM+??7Wy(>0mLtO#?7Buhl08^jx z*4cC>hzSW}SX73cCK&5aVT0m*LK4atoD$Ll8!O(>0pm-<2_+U)Liku^D8JzPgogDM z?_xBq%u%3Ot!LZXxa6_7FBs|p+b0Lq?-{s!XhSX^a=i-=H}3*PaQl#PyQSsQ4A*sX zpS?eyv-fFdZ~D@$73_`5Q^fubrnjD@9E z(u?W`a|KJfTHYC&>|Fi7L+}+Fc5QjT*+?o8r)RhEJVjdc)gIlJR(zul8(JehyeYoi z_TI?cyd#Tk^M?ZP*f?i9O0P@ah_eL;A!aX;TXGtvzpceJ8gzHoOYT)#MFJXmVX1iW z!7~7?75CzP?$yg}jZ#j>>vc&gpSjE+0?IP%ai}Yg@gcB8!kl6O68M%M<;V&nm zhy7~*gAcEX=i+V+qX1v8dSIJC0JNV^E%HG{=B>zn^Y|0pPT<%z@sZWV`0%RAJSlzT z3}UI;cHbdw{egwLEW}!j*8^s|N)MD{W|6`)xt=PF8NWVZ+NAo{ue|%T7=P#4ofQ=c>A?_oQVtja2Wx0%vUXpj-Qm{e-8)0J~j9Fk~j+<=1#4o>d zq`zvU2ZW7zE*VVwt1yxr8wZjyHmn0QeV$OpJF~injE@}@T`c6aIGG&EZy&F9iS#+F zY7bj=U#Z~g%<8*GD;0uVJdpFK)ysRiT4>Uf!ZP(7B~P%OfJ{snpXenEJW+baMrv^Y zOBU1(ll|8O*Zhsqhm<&kN{_7q{rLd;SReH9wV;m=fL5HU`1j)lXx3)NSaf5irUiL< z;}6v)>T(9>8Gc@ zT;1YB1=NZ3wbh;iNYB6v+HTJS#B9zS*%Hv|5Z8BY!_ZZgrAl{73sE41s2mF)g|@S$=Owk;}~ zDMEs5rY?i_S66k&f?@CS;&X_2B^uE(kwtWn-ZpFcl44E`3S^{Vc07^B8cb#+@`HRD z=@;5)?!0{`AiV8-!26Rn%-)+xGS^LzDF2==mWzHc1Uf^n9Osu&XO_midCpS}_UI!?h}J(;%{434AJPWc!Gg1M`Xl!cc4B*tH zC2_8{L8g{9Zzm(1p8xicf>t@Cnv$K(oY|DgX`(lO2z8oosCL_kOoK%Nk8^ls*}7QM zxk<+eo}-vva185MCw(o)$RRZ;)L>z{Gj{)LZF&wa3{Jm$*Tg4kIvXoyG)>n_O(`Bo zX>=snpyuP~-;-*DPMbE)?kvIr|ef9nU zjhFk@B{m9ZgR~kp3$q>9HB@m-zhH1t2kLPtBh2aLeE34K^4WP~f=7omdYwz|zrQoq z<6^AW-NgPnIaPCvTQvPeqf%n(1Co0Xpds1V!ja5T=w@&%8j{Vv$dPQeSdL^ApeOp+ z#8yK!Ny-@{2a0gy5lI3Ag2ePRlBlz5)Bm%-3<i_gJRzgv*}I$zuJ~7e3aylDC;!#tnw3{F1To`_8cimBu?9?$4#Yzw z$gh9|=CpRDh!^$>pdpJ$iLKpagva8p%avcLl5NvcIi3PDg9ry|rf=tiYvRfNm)n$b zhF~Y9h$B?nCTxUx^2n1aRTRTw`Mj^d$8PHf@%&fR=aA$5)K6rr}) zcx{@5@&ZaS?$<|(Ve!Ci9Znz=Rjx{QUSD|~S6i;H+|5fGcE2@e*^?ewY&GGu|8bgS zd2K`}T2Qf&XKh9xv;+aAX5wOx(tpqcIN4Fx-Ey$2(_RTK=^zp`U1gT?gl_pgtluqW z?qXhYZXV(SZ-us2RkEY(Olqg^Yhp&oxT$1M*n@wRU19gF8>8boIGZAizV*G5u|Yrm zWHYoNTT}^ZRlL9cD+KLgBfCTg6hs(chFh%8hPOI&gAqL$o~R$%cEy*K>_}r*G5XU@ z(2Vu2;qP&cvJo-xyH~U_OOZ;pR|Of_Ze1Bb>|1i(aCkL259m$+d$p3*h$Xbd!u4C<+GG-W@HyThTl1=CfhlD@-}T^>j(M6rD4}bvDEK zkVWd1Fv4u3GQwjUnbN-$PYu}qJ}?e&7qD3YV^2&an**5bz0QkdM8#P=pZpORb7HTY zCXKzi5MNJ3t{ocC3mQw4B4~LZ)S{jX%dR@MKL-DLb(iQ zgqp-(x*#YxN-F8ap*}f0fpwIhR{_oOr9wCIaHvHA3PX1kv5nip5MWz79Gl10BG^h< zddMGSp@#!3tUW(^j+di^Nq~ym&Z@Kn!c`$axG9*br~pA?1DJF`)pTD$-||valmMx~ zCt_y-hlK4V0sJ(b4&YZDJPrvAAs%P=Dzd|Q!kv7utgWhI)rjMv$N|mh(o!xm_Y@%f z6WQnfj1+fD96SJsSUEav{3tq9+e~WV>KH)te9|9hozZkaU}^pn{aW#4s8(iF=C`2N z)LNF5wSeXWOwalRE4Bg9@jjp<4(Nyg9phTQhygUKh~|fVS-vF*@{^@zo}ihhR9u7D zhkn|jpB8lHn+zh*Kl3m<1(hf)Ajg|p#xg#XT%zMfrYCG0qM&7&fDbN)igl(0ub6@6 z18y4yi9W>dR1)gvI3=+Mx+KyZF{>9r(qz7J^00WAs8IRV0^1Jl4os1Z_Zg3sxu81u}BNV}5P|So3h; zF?}xx4-<_ihq22_gtx%lggJ+`h73{89o{M?;;@9CEm=v-))L{-3w9e@Ii^oGCSj{F z#7!?&t0zURN=D+tDh;)u3%61kE>ziQEMO}F))Qy zd?;;Z0Y4Rc))7v8ju!JB>bZe6!&b9-HHZN^q88A=W4Z^O+Q+=8XL1KHT+EA%7jY&+ z=R8}-6g{r*&xD4ZqBps&T2jLRD?&U2-<=JWT~K9?jdQw$+*R+gUN2L=OdyqjRwn%= zw5klPW;&e%T5&)t0>tc^amZ6m1xY}Wt18E9-7Shmu%W`Js=4a2-tA@X(r2h#!p_$F zeb~#Sd!Jxa^?`oXbGiiYN+`fQ1guf@Y6%9$Zvw7}?Wm2@O5#tvjvOE$JEAxcIxsEXhWeDWZLMLEnWz@ggMa zVo(UQhr=dB$jdZswSH!wT@XxppMnY^bwLhq#o?_8UQcEQ=?b#1eui9??At}qTSzxe z-PbQNqf5k*G7Zc&`b{9^!kQ+;YnmWF1l~d)-kifDhXX^$b#`E|2}};H$f3R+Iq!Dl zyzIyY*^vv_kw2u2GGXwX-%0a864(B-rYRF@KiUU%*g=u?i#dfrk>+bayj%6m6wpys ztt3X-UOZ%ciEnI(gVy<7l>3qG%Vt_pojhnPB^GIB#hG@-t&B@XjIHJP+;D2MbA3;C z@xAY|LpM9gt29%$$w~VDM&4}I7>YH-vDpzzWwIYkkx8ya{FhqN>Q=|`9;z`dX;e!s zqqLg8*r|?p>l2CmiFBWg&@{l60b2=3Bhw7$3RcOu$0>(YKvJ3E21bSlvk)7k4J9>g z?6du}I_4niFnPwvmbcK3QK)DQ^i+#N%h8)lFqM-99~1o)5DXPy!_uoeDemI5XuM;u zmnr`QM}6C@O?~Cv_UI2=>Hn!WN5h**j-Z~v#wBA6tq?lFQ~H0&?*N|)L(JsPf-W{* zx5!#5jO=S!6ZRMQ(`JejEk;Le=7R&RfD5iUgu6Pf)??rA(aZV#r5-0N=dyq#-fIXy zqdrGScCs;p85koAYBs%MoY9H1p;wRFw1H`)TBkG3hf*KEr5q>~oNsA|HmQq92clu+ zW`t408F?Gyt^`P)A|!oG>mCp2SdwT@_Q20iq~+hr^zIeAq`ZI6$QmPuC~*6EWRp2Qm2uoLpN!7;+s}B4)_-Y>O0f zW9!o;6x=eJWZ#;oa+C}?MwP@I1=`GoiRjCO$!MqU#A-#HWGfOdr(5tfhOUH(#K>5F z+Z~W7sw)dT?V6JU56I(PCJFDSVT1Eq_+P)nMqfAvS*5W4}jM|Y&VYJC0(>*=Q zjf_u>hEc)6tWg;%cZ-!SaXn)~qRV>c680)(VyYav-Q|#juq+)_h)FPTR3c;18fJaP^n1XYA>S;8hOeJ0$KB!wR{iu@NL~XrHZY1jFs%2=28J-6BC>HFiQKVH~fmd1{ z#SQsNJ!z;XzWSVtMh3*2&Gt2*RwW?UHXB>nZNI=;G%{x`m4>xc?rB)dN3L!oM=-mM z+;AAVy;v>aW&M|@Tf5q*T273z(_NAdE^Mu9?)&Yz6lh8?hT89J%98uKqgpIQ-nJdM4{37%+THsp=~`DDK-h#pK55G4w=MO}~r$GIw_* zhbFQ($_UId0~1=Gh>`MTE0mNYZiV9LG)*eAiKFlatbv1-{wU%(;X0rc$>O6)KDwy;6_T~t+rHfVK2 z8`R#I4cd|oxbT-Qh-PCD_389`` z+hc3F6*?kHKqhK%hY`OcZiR|9Oy5f=(_|@01f=v_Lqy7!X<4Csww@I_SfHs?ThCyH zKDQnv&#^~Rlx^c3*D|zE%ua`-7s?%qJA5~2g${oFgYQiI96KX<+a9n&X`zHA-I1^? zcZcuRwL%9=JYAnKZkenHE7Zuf{evObP2({Q~ngRG;C)%=T zev78V?nra>fD5e9);--}>&B3}LBb-ouKP4^$gR+J$KIDLXQt-;He-cyN+ecjj5J~` z|GsW4D>M`)E=JrLVcqclSy-X#vK*sgQ`xgb5#%;#(Ww!$K{X;~gN_iqXEt189UBx} z71BM#ZpgM@V}oL!h9NNvCfp7VN!bP^E~h*m46WR{S5tbLG<*A2IEkY|3F%X^tdU~~3$tT} zCYb6}U&(Y(4m&+T`N{)D>SfE(@mE#P@Uh&;Lt>alpGvZ8cjajlwS7W%q=x<3{f7(3 zc8@-f#C;sT)EsKHhvVX(?SRX!y={bJFj)G)*^NbR@fNa?PSi0ro1FMGHZn4jj%?mc zTakQ3nGX-A9F_t@+!)EHT}e+i!0Yg4J!1k|e#Y@VxTe{06U)&#&=)*v>Ep0DSg;aV zL`F!#;cT8h90FJT=~%f&)=m7WP+8Q;Vt_v#XdQ;RKc$zYZ)@SHyr)G^vU+$*Z&v=NVI9*_K^4+zK1R%}h9qhDVVGIGP&>87&52!`8X9gSDTr5rRX`VB)Vjas0MU z#QIJa39-L0b<4vH5koS=R;|bHKiJ-vBozKqpm3e6)HROp31r_qNg$W};u$qNTJ=E*y@(yERK;_$Ae2tA*?E+PJk7#;DNRdz#B86jGVudBBqF|A~UQNDS4y=hg|(lVm+R=q_r1rXZW01S=yF5s_K8bg0x)$_AqW147)ob2pSx%}Ir`# zgRxZlq-|fy`Aaid3nXKSK1tuBH67M*o7cm!MG3`psA9`|+!8^O2mBLT+rLsFft;UU z$&s!iM`%zSj?LuMqli!tmM{n+o>UC(-)KvNlC?fwo6u55dSkSgUroW-@nCUb?Ad^B zE$Ba_S0y+s1^~1{PF0~S(l!Q}QB*T4SD|>CnueK7@Th9ulbzJAnx16L9)B(6G31an zgn%U#eXe~K7t{s(#Pe>$oda4*`w{NmdW?S2}@IXVr+X8tJcbYjmLFBW;^h zgBVYcRJXXg>-^xV<|(-fam>T;pV-og6mFhg+EB0*YEjv%OEwkvg`c`)|n- zU_|TL6$<0kBh@CA_Cf@~`u{miWifg!+e$~LJ{tXu@2c1yv6>4j6LqWeNr%&K%XunB zWNK|U+oNry?P0>Ere4kEy=K)^Dw2ePwHUqbUO))=YdByLTY%FWI{GttyOxAmT-qLy z<6O=@f=ulwQJt>pAptwMBL6G17e8m!V>E&&@{r&}IpvEdR{qYSJUF1dpE_OFb|7E= z_Rnt}u`Pll^!GKP*9b6!uopLYXhKhybOU+X1TNMy!WIXcZC*nGDvyF_7eIUY z&4*K$45vcgX7n0^I{VV!qduGcQK(pphID{wKy3iDzYl2E0TGDEXz2AEfXdy{vm+HZ zz=EQt1NI=z!)HK-Js{5BRXMhtUCpcXJntlZqI9`%R_i%EA}TQN(V{Nv*`gY9T(6JQ zkPoRxN>5+3U#MB9M}!sTfCwuTpggRQ6NcC65fNYrT=KDp_Uimd9+Y}?Zv7rDscB0a z_lRAac|&@-WQ-K6HX~XfJTRO2B|l{XYX${C<18|@)R2?*pwy7#_FxTa7ezjcyjzW0 zJqNYyn#>!rq)S>YsynCG#H1S4nMq}=YGRgBI;q)Z)?HQx8~3^-9Me2N7Mh>M1JoYs zeRBcf4PL7tyh)V~imeeOD=3j#Esq0MIs3He+q7P`%0WAv9aREBMN@%Ibxz(LnTHTq` z_FzqCh%<*^dAE8jLqjh18?x*TSyn>`0Y`!b!RL;50!?M8wQq5g-z{#8&tM(WBhAN{ z%HNz-Z9aw)7UE>l`92%7FWc`);;M3kcFgiicEY}RBfF%IbXz>eFn2!;&(1~`^dR5a z3)Kfb`8mv!v(cMF`rCL<_6H{)O?Tl8)v5N(7IqgiR7?3+$;<-Bb|RZCFvwP^WeDvS z;zLNq-T=LVl3oRCOrPMj-sI`J(A>;htJ&BtwIwtXfkcZsl{fEZnc-oN(wl^SV2Y_rYdWMGxt3yWaNDb%CDMbv4$q7zR_ z0maM)HC=#&d4cbCx2#?fDYS#3Xj_1=($!AQCN#P>(fN2xy?RI`fX2`r)&ikOj%B(i ziT>p1?1rX=jU^#sj8wQGVl0*~fU5WomH+}u7fkl4oZAF|a{uZ>0rU@%pz>i} z7h9e11NW*r|FTki$s5ZnG^R;CAu0YXhqwE;?QaH})JYd%g01ebZ5TqZ)jn(!u>NZ7 zLWBbopf74?Ena6^J80AYMC8h>(WER_n(S+mL7O>$q6E?=Vdg5 z#FJyw%|ze6NIsRV0!t$_k_aIk@NPfb_<;_#T!#%^(LwD|`tQxHeKV6AUR-T!u(Y9~ zopS!_&J6yYOz!H|h?Z1qCzGAQ?^+wDJH95Jj6=!Eo_tB)JH(DS-`=)fk9aq!d6)G| z+NG0s&5)qly$70nv*!-7KNh=mLV`4z z7ZX?@`wLc(vLoMqPnsXE;pF~we~mf2@#yuV8~z)!f1Bf^k_IpHCi~YOPA>^r*78X! zoK~8&ULA?jvXzg$*CK8?$Zu0SJ8I3t3!E+b)mS?_=2xTb?6_Zzw6ha_HOwJHe${Sg zr~Inb&QAN)P&+&0SIu^|Q??09XQpJtJjeUFa-6Oaf;2VH&0eh(`5(ebM3_xz zL#*-B6eRW|Rt^zyp&=m^K4^QBWRqHxz!yp(H0eD1AXhbs;4AaG@zuGQ zm?51@g{y{Hr+$$ncny$5wg-{PjBPr!tPB{ZhFh#r;w#+Sra%HLRcMq6ihi+I(~CCN z``G)oUe!YOWbzu(q#@)k)sN#Zk%JM+#PCDfiZXKo!}}mC@SSSF4UvA6j5oG8Xk%GM zIBSHCa66~U$Cy5#$qKVz-dt>t7T)UMS*<|!RFI^&hNkAZXe2 z{q#`02D9W!Ng2Y#JGF7`MDNxQPFOdTM9kF|FEBThC_$J z)YfjKrEje~47;)KVf=YeW;D(_YSA!pa-zjpZ3)Hg-Bk%G@*^n2Eb})Oh9s3{^Z)6+ z@*`4Gy3>AAL-AA5Jz$u>$~uIx=FV@rj*C|~z}D=T%%~ZRwwTN+F|+YoG8=tf6qZu5 zKaJAg$2v`Rsv}jl*s7)s#dcxddmcq@E4Ekkz-8F`N^} z>ybK6Vp|80eMzq~#iYcJSP#YN(EYHLMzeq9UykbR^AXF%)i{cxg#XZ>%m;qIv@oo%T> zTWJvHEXoXoBvRFJ`XNG-G2Tq#!{h0%ScnndbP#JXI-N5&kLYkgmh;fVqHR&*jnTDK zj)uBgZD4u@=Nsuy(ij1s+?D>GV$SV2!iMxSsU-mTZBZXLIEAVGSFK%l(k^796b48V z6$(+{Z9P;C=gAY|?1mDeSmd$K14AT1ZZ2B)UPA?-4r3hhlR!cKI^!f}XUD03_jw)>q&$!JdGT*yLF zBw0Dd(sVD?)Iep^4`N>zc3hM%CNY4JTp}BbX~MgzaYMqQIB$*Sea5AQ4@KaWE}De7 zV5G%Xrfdj{qF2}If!sXu-E|7acqfbzSb|PdB&at~-Na@{OjxAvFlyLhv^ek$QiE@d zn1_S#GWr1VTY;B2Df<|1^Q~}^B4nx-qJV!~%qx;cq?<;TGtX(zDOz^G%blA9Za?Dn zPJXex=#0iCB@YIae^uqKfKg)y^9x|z1vb!hG0EOXVVJ|nogWw>FMET^iu_DZ5diW% zPSrInWk|^#15#F9Qp-tU$>ueEf~kc843{|%hSx&9vm_%Z;|JvH%Zh#J&CA|TgWgEz zXeLJ_N@Bc@c4--2eZx#I8mLuxAqGdK%^Eo#(gZ1{1#+|ynRptig#Kie-J>sy_=mKV zzzqMwWjydNe;JjZicV>6Br_=zRSaq!$@y()VyO4s7ORE)MZ0y`<1Gn7uh2-H>Oo3L z9fglgC%u~mFA){JcvYbQW2U~A;7BG&uBUZ~Z^-I|){mw2Y0}mSLnJHYW?KHYIwq(ZX#7WaEPj_b8(~A`t*=r3l9xqnL!3CD> z0t6HwKo|iPY$7E{L`jrH1UD2x5Csef5KsXTDnTI%I8aHPAQB~>-+!NT?!B)*gbj4h zwB>sD>wN69&p!L?z0W@TJYQ>vK%z4?aDNVX#?8%`Hh`1$4bqTM}J9m?{zv4H{!M! z$+Xgfb)kY1)>Q-#Nusf40aS%pYwL7~W+h_?8r+7OrEGIOyBmDg92)yj-u<>rn!w%Bw+) zq?0#xc2kv%!AetKe7n@PhGYDSeJ4aqEdXD|t>Zvbs8%w$(PqS}_t%Sg#Dp0!EaVtI z8SkuuLa)kMKF!y|U|AnGs-petv#G!|#n?R8WCDuhW@ri$?Z1in4%VR=y_knEY18>q zRy_Xr^s}7O(yx!C5q1!&(0p+zG!5g}K!r1mxJ_KK0mNMJZ-TMW|8ky)#2d%O-=G!o zK4*8cyjM{K;xm$~*y69@Jw_zk(;ybD{jyg(4jTdc0y0>ayN|uN3&EGQs zHOD>XlcuX0yIfq^WUSgKtDJNlrd0p5oxb)nFeZ8s35#yw=4#WpG|sYVj)^L?7pA`K zO{N*xWbG!l&abC&T|0#?d{$OuYh4r zety51XJTg-aYMqqmHh0!OezrPCU+X*m4dBKDKNhr<`>UX`nVb~waepZmGz0!63EBmU;c^m&9t2WB_6!=s(%kio6#<*CR5+{t+ zsGhNV#<<#u=t=JZNv*8Ptq+JJLK;EjTt-p=8Crb+$C`2yO^M*m1SKxuJ4hZIphkgY zis#dv!(I)CadYW^Dpb~zt7jka@T2YoGza)mV~o&fL<=$33w7nPR^6wk)4I$In&y3EjUeUzNS~>=AjRxyneAebq)?e!aOdvdPP&BEM1@;PfU5UyOr0 zkqIVJ1)9#pdFjdk{29Oa;RY@ZK^7P%g9(o+?RqeC6-ymZ1)r{DinvnFei)gc+O3gg zxRMNR5fK-@+s6d-RhsoIQDWkyTT!r?H_}n35lk6d&ixYi&nm{IyrU zm^ay1HUfySOA5pG{oBZ5loUZu{kq;qnyy0;P&c;|Uw_@hXJ7qi5B=HeISIy!k%rm} z#cZG$mCK9~@ir6#lP-z@sVe8AZ~y9V{P73=^szYsC~#EHRiT`5D+hF1%jli_O4dwW z3!wxPzuj>AY8)loFZvH7N?vn|8!(YWh(d zM4KXXOZ6l5CZC4<^H!A7mU&0BmZy0*e+Dk;T-c0B@%hK19o-?Wk8#DupRgXVk-v}N zxty=(FXOMnUyHvD{H^8hC;9t$%+C;i!%|4o&JL)$Cv#+!pFw)DMJdE*<_sHAgc1}f zurq{Dj^KqIx=wvgPaL_24(W96Oybf)62xL=dy>TAa`>UY`2544TllBTx1~MaRvtGD zK1xU)$5G@v48Ch@sL=lGp=b2s>>%c8uUoz?rcOI3rRQZrlf1n!PtPJD)}--fUqS+6 zl9v%c<0tAzw3i$FC&m5cX6*=*3bu?nhOgJqYxy<5@Ma$|(k-Z?U(^ng4=c%q+E~F0 zdr{|n;+WcOa2l2M50;<>F;$PZI)Gr=9TzLBY0g7Tl1JrUW^{cZ{N`=$!!Pl|vV!y* zbT63&HgjZKdW#iC6M~y1V~Kmn-#c3#h|SJSCwaN8mOsOJZK{s_6!nPe<2s{4+?fI= ztdN<)W0R8wVo}GkKB;HPYv^)7+KB_+UDVpK$&^M@9cj0L3IvClT~w=_QmqZ%QjRAW zU^vH7S<62M?_q%>sRE#(6-*5@;y5G;>RE*_)AesNz$C&P4VX76QJSwIQL#FkZYzg; zKx!CS`i&dWTADk|O%kt8vAh5j_%@>w(VK-7T42Rx?=?I9O<96#1V{mV z%wVV5%z?!zg&ky{YLboR3LtYl;jdK=oo?kY1^e7R%iSaQm2SAxiHeO|jU#0;1%_5D zg1!$hTy3SN+9{Q0|Dp=T@dc;$bl55@-ngMAP4y{Lo*0bn^WgWIUD>w;7g~qqBkqxh z>lRB5uQAh)SSf6Vp3cuHEEkRp(wo=`bD#*j$uxqhmEO87eK}vxvy}V6cMOK$g~`*3 z1MCk`e_DmCc+jJA8-5XYsz97E0PzwPDYKK#pkWgaqcqjzkbRxTssz+2*$O(Jh?7=f zccPQs+DJWtuK~K5iYOWx8pjf?e}1dhZyywgfI;W?I(jy*@_r)Syx-bQbK4U?jWu;B zr4R%uR_(=0k{dawgL36wjX85Cm+0s2WUkO6GEMQ4n{69ml9h#rhALE%v_uM*DTm3o zrz)HuOdZ=8i6qkEmdzgLSS-#7_1o(Bwp*6H zd^e>$dHL?0EQzpcZ~3>A3(BdBG|K!ixVq*{l%fhvl||l!9ShB%^^2GbLrJ%i#Tj85 z4hUFWU88=bLT)Pxp<6vTFC~lk0}BbR5Lh7_QHR+oBQ&h#x)VF5a0jOOg0;_fPyw4M zbN5gb1|yE+B+@!|v$~yyg)^cNFMMmi@I$Hk-lMai+#S1l#kj8LuVq9>c}Io^t@Bdo zmT2g@=+)%sHeP>S{8qc&9IfjUR_ZPOVQrsyP5bQOiez|Of_zANfa?=!f6Bd2BlX2( z3>~<(TxxBhp#tSH>sH*94~|7*OQBW15dhhXuss5>^(C5ywLy!*c#5bkH-eUc#Ti~< zv{R1~%f;FbHdUsUKk7L_2f=VXmm6F_39P(sPA|U01+8Jgn8zW3);D_1JgFa6yF^4v zd0fTG=cR5QHlC8KMln-lFva>{^oePXMPkM<(SCHJ_^9lc1R@}&rLM4tOWBWZ;|?*( zf{I)U=e8G!SlaGmG94F<%UP>c&M$Qx;^i7DQz_3#U^cP*RW~eX%p-EPcn96cn#ACx z+&;mfF6C7ZelG8}gdrZrZFyCAn`?ktgSgRP7&C~83q~FcJJ~lB>q)|=6abi^W^Yar zl1!?px@96NDS0RFmLMC+kZeM`HrXlzWohpjciQTsLs9btFJL`Bn@7y1|G}!k@Z8G3 zXhQ|DJ24A@*P`vOzz7bsv${K8Tu_vOPse~Bc3_aaxvtrU7~^q=&gNkQ{=OH%>A zU!k18YAzDuTscyPHX;W(?8pO^b&L?roTHPrCU>}6UQWSc`0}g|_<5;S#xUQCp|xes zUM(?->|Cg9F;*yVQX>bj8C#*mdx{eG=w&PC+%~*_ln}oZe(`0={<tqD?e~hfmnK*k~`?bykGm(n)>Z>tvE?qekhl7_PX%7+FR4$9)kL%S3U&mG! zwURiPrq2pl7lf=yQZliMSWsxQziBR7mUUa*!Ijd%V=w{#X%Llbk|@#HEsgn8Tjgv! z3*9nhaO6{36302MmG2FksGMSCKG+rXp~dxLs&d159z?-6HI!*9zK;p6nqvE{{Gow_ zJ;2J3e|mrBNBu(;70_ka934o;#5x^b4bDRXjztGX|GP_&XB^0=hnO7%GEwdMeK-2$ z--nT84Yz@08f;}PL9?~gO+;v4v`wt7{QUy}V>DtN(?k3I0ce?@=+>i4pk-@XfcAGw zXbID=t~@4a*`?OfE;W9CcX?5=;e`Mu;jdGuW z0yjq=P0X7>^eM9C%^xM%KWGZzmI$rnJz)@YG=t!Ram^X5noX0FcB>QUER=ssA@@nd zc>viz1|-YoU~wMyU^hV&0fPLgL?>0%yr ziw8_$D9D8-a;E95GxJQ)4W`PlYgQY>NNU?{Vh^N=Eyi`&QP@S z!eqWgSSRVv+GNU4p}(;CNv8gETsu{-G$~r*iitV4Eo!;M@KUh~#0N|=c18xfXRLuR zL~W-E43Yh3j9oh%#4oRflF0_xJ{MX8nXUmX?1;tUyf_DlHaK*&>$ALDZ39+zzUkNXfR(NU{ldsmVTX9=RY_DW zR1p^RaqKOZxI?GF;D=-=YpDN`Z=48qOAn_k>w?YE)uOT{v!B(yzibK+B;MQ>YFUBig zQ~;z+iH-bmH9u?|Y-rg!`(8mo&i{}hj{uDPDuKGb7!bfL&@54yJ(VU3*E;V^oE=Se z290j922l`0(Nua(u?>0O*!0ZU*J>rxkXES-aN8nobL$3!bsSKyOrSFDj%i?`C}&Z0 zVypx~MYG(ZMyFF>&7I>*i-#ShT#a#v;4CufxUP~30=3;ST+fGO?v1j;h#=WN%NLO( z+6#(>6zU7)Xly09iTCXDuaF~;m{78>Hph$z&G^x)cV5?shs-n`EC4D;AqX9)5}ITM zaK=ulIr-;a^;&|7BGBuoX`)+VQ&5-4uH`m{kQ+^Ftev;a3f1bF^%}8$YqE*rzy|h6 zbOPBcEQXoeWTKUDoJ+8E(P*8`nCX#wsqlS*7H#0!L3Js2brctMRUIuR@8MMc?Ph?8 zNzDCY&i!l=bplrp1~2YQPIGi6T}?^N>^n^{VuSE{EBb1RzJ?cOBpBb7Erhbkt3gaJ zue;F|rUP&TTneq>O55&Adx4dP&yRY_EDZeOWB0dZdA6-Q6!4;=09hA6*d0~?<3Z!d ztPA{Nbtp^dO1GB{A)o#f0Ug}NFZ1f+QuvqZem4Di} zZ^Wy~zJ6cvt0-oRH{6!nRVRjS4LLufoCk}2yW4Mf)}OM}WeR%U(k-jd#*v99dEslT`DLqLx(-_`F*JGpu z85{1#bi z&axO@TLd)L6@%QHbj1MoMqL4QI+?fxaMijo7z%0v6H^4_H@<-Jo^$|IDBmG=%^ zDeoa&DevvNQr`Y#LJQJt=W*rzm{takBFW1vPiv%>COL~~{g_w9CMeKlme-kNVwA?$ z1WI37=aOBlh@mzo`5#*;tAZUe(R=iWWa7}3Pcqp5>g2eQ1)EAUkEdmc6Sz+*1W++%SjSUeD8Pu_Pg)>n>T*+RrlHv z zZv;Jhu#X>^LC>7L;GEqS)Cgj5jKy#yUUTSHl>da>auA)6n6!P`Uc0^1;%?3%$9=|% zY+Vb>!IcTP5Hj%4bJJ>_34W z*ZUmF9{oh>cAyCBqX)B(`AtFgn7zAg3uwJjY-u8sH9r&Y*VQ4s&1|$R!k-fm-Z!L7 zk8A_&A_PF$Ke9_4r|5^acAK-+^(sKbi>}%opbm&r53Csp!NybW6|^CbiK#er3ueow z-7g3yzMPkYjzQmf+8u*5;){8C*)i5Y^~!UW^a`|@l!)ihwi5AL_Ffk8y_FHqnZMQz zj`~^hdWu-rD#6pi`fSn6;|10ccAhrYXNq$6s~j5-NIg#*?I`UY?T1v39}orJadH|Q z4I2LtKQ2+m1I4FWTccw#!PvrVbhtKg8EH7YJ|EU(a5+2#mX@#D-CvDRFB-lg-OwMM zxQ2CX&Y7RMiff(gIM)VO?G_@Y`ospVEv^~YA+9a1ZDsED*VxBSZ;gE%=Bkff0aN3I z5ApgjPOm>WwyL-0;8<4jNq5&dMZ_Kh(8*ZS( zD-eEBS0Ma?u0VKNS0Mbnu0Z%XU4ihkx&k5Rv>SwH?P?JImAe&$-*C5r@Gm`trviis zLxw{RA2bO6URNOeJ6(bBZ*>JiPH8s?zoRP)7F zAmmWPq&LEBjHcNeF$nK-w}S9h?p6@KQnw)cf-*Q{vzuSHO30pg-74X~_@lFF zm8R^eyI$syc(zJU@6wfu->EAVAJCPG z-=QlNKcp)azg<@<<^*{|&0)J54u8wt3N`O{w?fSk4I#G(&=m+dpV1)v16_ge_jLur-_sQckLe18zpE<{{*JCd_}jVy;Za@byc2dc2>;mK z3c`=OTS53S4I#IP&=m;(L{}g@sVfkkv8zG&HFqlr|IFPA!moM=PX-9#hX&!}N&|%7(iI55 zsVflvwXQ&TPFEoOD_w!`8@dAFU+M~kXLSX_uj>kgf1xW7{<*F|IIk<*veh5GKH}_3 z8#Wj0YT!QUZf!KUTN@3!rCl6M$<-46v%6Koe{#1<_&wcc z#2vLB9G-aV_#sj6lONR6i( zu+cVO1Ua%w|6s5%9N=BgX>_Ss0LC3jxag@*2#bP*dHH$@Pjk*hcAWt z3G-=ZX6jbE@u;>A0yu{!J2C%!e1C8B{yt*yDShI4(qCtpq}79_A+yKWb)@yD)<#YF zQGd>L2m6!8r|4*qH3bC~ZBB*qZn`*Q&~U`QuS79uQD3{*wWO|L%PF6*LOT5$+tnn0 zxRT0P>Gr?RcTtY$6SlX}CQ0&hU9*r{Wm>}3O0R0vw=-SzC)?Iw3Q}F$8tk=@TS|~L ztfEFaxxF6bcC8>IL4q0SGF`7q&38+z^GaI@gyV%mTstRmO__2Qa+OsmMg zu$eG&V~f5){jmXpLGMVfc4VUWz#dd8-&K+@FV+1aI#J3Is5ct~Of7+T%ycJue4T+% zOwQa$TH^=U4Z9N@=9wShli4GlS$^8zozMoY{o&Iy>=R^*2G^Z<2A`(+WCoQDWv;zl zvyJQxxh^SlO?D>~#b7ezT3_b6Fg&5XCl`i280*Sh3&Ru7;?n}3tgf|XuE&QbR`coc zkZVo(>D=&ymNn0XPel3mQg=E(JOQuF=R<)6`w!SSGd#h#&(DNh#PkoJ=7uK_gz~xY ziTLxOz*EB$Qmape0*USqr756@Y(K#((?tGsc6dUxIZN71oSv;Bu!xXWoyDl|-F9?% zLez3Jq~#<4KH+(IZS+2^?ZVNAw?>EUR@FrB@NdyF9`?6uw?+qtC)h)mzk_1nl#OAn zDPKX{9_%O50gSIiZl1{c3_n&Bv0mT1(>(62mJM5@@3g6qDfsVo#YxFeL(H_8um+z_~`vFVsRvMY1R=xtj<`Kgi-)cZX@! zU8;^Ij9T`WqNfEOp!?xsgyrY#&D3y+bOd2_G+Jq)y%T3wm*UNRzJo;v$bHYGO&%OF5GZGzH<0-*T}haSs+nVhb{ z`|Snm|DM|OX?szcNhR)Z9&}B)8$1UbI(ydEm^vheS=l5wS_CvLm~T2y1d-e3*xP|gOW$Rvy#+v zfr(`~sfT``qeKHUBU%NU>Lu-aYp-By#Zf{Q)HX%Q_5_7Mt5>WgaY5D4s~^=r8*C$( z|IKgz8Dr19bmOynmk+g8kFHri+#Ok!U9xU%dqd|k)E{b3Bq$3S-R$T-M_%Kw8Gd#k z$UO(P55;TX;LZJPV+new`3HFx;vCMK7Ji7hy3^|9ZYW&qx4Q0BJYe<#+{g!Q2WZ{B zLoaIXMQwLJboY)s?}P!?UzR_)>gGGCgU=jvqVz3-=HC6X8(qD`5JPSdP?*Xjl(1na zjefDaNkEy0X=|r@C{w!iy8!-=mEfxwj6g-(^vsghz%!PR;(2GcH<%eCMDbCXWdaR| z(leUy!1HC5=bfm6d}PG)qr}L-MdF<(mc>W^;994YNWhhp&n#2wBQkL1<17Au_^8AP!8`}NuPZ<9?An8CoqupWVh`AfMRu8O zA6TbbEVYgTb@MWcMu;w|+Sy&Jhftb5gi3i}gN2G%Yo*z<6}iqo^J#r3T{kaNj!Q(I ze9?8sZoS#S;Zb^{tKL9oj@Whs4RSj#u0lYgyRO>VWeHT_Sqz98)@q(it+kjf25S&a zwS~Z)Casv9q}#BwyGG}B!GkAxtWl;U+J4t0ydK)l)IlUbwkQt1%f70W=^MMF1Z-6e zfuNlvtdeUx^xo5npr#f(*#sgeJQkT(OsZc%EL8GMZeyly34x2o3qw$IIB4l_aI>>ZWDS`G zV5&fhAwa`d27NC55!M9;`Jeh_xLEOI2a>k0!#9)gE{Zp7z;mYC8@V6MJ@3Ii3YZ3! z;_zfHthFAkI4K@|l6i6@JJ^;rCvmh}EhtXbuZI^}K_j6cXpk4b;QHT+WG=EI@rq>^ zs}*^uED{w?9l$JK%O7NR=0F&2^3*dggUU3fk zp46eEY;T;bTL+kgpr!6^wK-5|csmG@d&~Je%t*hX4iW7Me^|Fx_Zq$KL19CBq+Y_) zv*M*rg)_2HZ?RX5&U3WDdP`W(ren)4)S8TzwO5$RsN$J2smV)ObA}Fnh8#%G7glO& zX)UfJ+vvMJH#k18E;B++yS+vZW?v%^^%dz&W5jKtX(NgqJ}Dffm$}z zu55C0`Qp-%eX)CRY`E7zJE)r{Zooul zNyX4Uhp59mpcu}e#XGrT$VEKvvHJ!}?+k-^hm!V?%oq^9A=HKdcRLBdVO-Is6CzRS zNlvcDQ*bD9eWK0->v0G=X{fg5rg6f7kvWklm_Lw%R=b{!txIk0uq}jq6A?ieLjcxm z9qBd9B-d*m>DBG-7c_auMiew9r=_6_HOQlBh#j=`difnW@2*{)UYj|JoQfc_v~i~N zwZIXr`oS8vy4pAsAcn_QXq-@evBt^D^@BA|Ct5CToVDtQYFt)roS!9EXdJ*_v~e=a z{a}q-o-FBd&1yeH&#kI9&NG}98VB$fZJg|gKUm{1LTT*SsM@LMHqfDsEu%9@e_?ECUNq+Hh@1^~RUB z@)Pb)m*Fw1u?34QDspztvLf7>LdD#uA{*auE~nj3>Me5GkY%BC)2%#ff!QsxKukx2cFbxWWH6o-*vQoz|sN$;B5Xf{rECPem) zpz|>#U)F^yhKzBKQ23DnO=x|Im(yC~zzT}NvSyx6FkcJSE%O_~ET54NWHN2D1$`dm+CmL=`UkVf~mIiyJi*16M>5A8S3gSSe_*r{1Unw59A1i)taq+}L zNsp?P1Lfl*MUh9<2uD45bkQXe7>?S>L22igiGyd<(odPA5D)8j>5-#Q;H+R@x3w25 z1|Db9FqMkkw6qTSf7%JJOtqHJty_^8(TO>Sja_c<@EDmOxX!w$7yV>pI0a9DyoAVb?ypF_=m24HGE!HuIJpI|N1Q;DcCu#6;Wqg}1-XJVkrWf2Y`y zj1(~X_X(xgPJ&ppIoxlv@4H4DFlqf=0uq=%*&>zG5L2W!!cVx{GdY} z8e!RSOFJV*KI=*LpAi#4uuxq7Zg;4X#Z=of#*l*L#~<}O+9Cxx0ORWCZv<&X)$#}8 z*pjCU9c0qD9|%$yu{3W7O!kWyu;5)Nz18E3+ZXkdjcVg~avNoGpOAs!reI&fau1PQ zdvgFqdpHb{k(0kM-z`&JkBa0=XdNwXkYs;OKW>ih&?Zr<{w4UgO98^+-1cwP*rEIs zi~5UD6bwc{zeM8$qm&b&pK>u@Us4PK$7pTGKw@)39(AB^4bqk)SMDG+_VYVJq3RGb zpW6*OWk7TXm3!3I?TC5o+oBw{xLp@#x@!xOUF&c9-1@Vv-c6RAa+JiTFd6GG7f6Bh;75ykvq&B4LtX*Gv_ zv|~>D9cN5Ts2@V4}Wfv?H0e zTo*G$t39(NPF&Bhaci(^DmgCJAT$!ThiPgeGKYwC=tn`MBOAvUL*vOWq0XkFMl4^w zHk5f6N3)NySAVPpCDNc?w!7aoS{%mS!En&gcRC+U?Ow#PAqYpHmO+kE5))wK*Y~tP z5WKOZ#8^wiQ%~i^;H4y0yINgyPbfY8&u~mXYE=8 z+G>E?9OJ-m%W$J^v+pawi%4ikCs^)n^;WAj8cQP*q1aHs3i>i#F``iHu4yE8 zW6Zij>?L#sEP`Wv-Q!?aY!~|&m3b3Ur&;L-BuSf{g|njIk)A>wRe>H(w?jf=W(;#C zYjX95*-~hbw>{*sgS;I;wXg6#oYN{IfB+_{rD&(eEFc73C^@`0Ksu8Js0ZG z2r3G96;^w0#Sx@7wABU|SR%B5ao|99iplaIBUHNx!+sC}VHiZp%pg)rFEWTKy|v2E z<0HPe5NXtG>&eC6uHV&R0_}iy=Uj&SBZR7HpMgKj1!6($cEbZDNJA!4bO{VD7{+Z+ z+-ZWeYP;@LK3dC_qB%1%U%mdZP?<8rsmjxq=v*RgkY# z!(0DkQSM=Dg?ngz(Zt_|2}-us^xoqH4kmYrKGB5Y8(O285$G35gV^ zg!8Ld&&0lxLSN`{D4u8>=Ir&w|L(04{n)HYtDa1)S98BJlI>f^{B%N(WZ?zllqG>-HU&~HdQ+5JslRJEn1wynAX`Yi|#%hBM87X5YizU z>205Wk_eV*HZEHq)1Qc%)e6bS(hhoT=suHVk7NIEube}SD4hUniDF-ilvL0=&Ju>P zHc${dz#Un$Do;VDs|9T`%{~-#sV}Iho&A)Sy{~Sep$wiYjTZS~13w{l3Rnmm&EN?| zXuocNuA2?wf-upU-cfBo4DX<(A$>W#$D5*tFaYaR%$d0GgeywAZ;v<^L_hYjWv@H zKiXWbEmc0Y9b4G)dd7zFE%9VrhQzXXq;4-au>op~0L#EU4ug0(b1Rp7`f2u!BnWlJ z3MgeCrQE3Q98acV41H!Vxpdy0LA537=n6^L5uUTscV^%kxfucU1{pi8!J6x` zZwm+a+F^)LmK}yjXaGy#N5vl2F~38qE%8i3OjKwALLN^DAZ(-45I#&Tbc^uLOW~cO z_;F!DVsR69_K;@jX#^$(9L&!G7hPv5^D_ZRq8}*6xMCN?5}OW*w$*=zRM!Gw*}?x@ z+VTCct=Lr7X_7Wmh!B~94im1~hmEt)2RQ!5ZJFOr?`?__v?3W;*)9N_qAqZxNdb!% zCN%Oj?#?efNq`NTwoSD_HMoPtXKWM}UqN&IX>-@6wo)ds#Vg`4TdB+e6gSP0pTepl z_>Q%Vb=dn&F1|;c(xu6eWZ5=4u9t&}i%+BNe1Y^vUxDB22PkrZkLQ|$I_X^%Ta zSy7~6#Yy1@wykv|rW4LGCi2YneLEzX;e552n7YACdS}DqB}-AEtI}!F*M3-hGkh!+;<6Kjtrd@ zeUr3IS%$qB2D_v$s~w1}NR*E|RYfEQLyH@z7?92y-aw~&X7^|L6JI8_+B+}n{7d4F zNmJ~c)Tjx2FoAYB5UCr{l~7S0-;IYlA(eXxil}DpAc3ZIE>$z}1u`r{aVLo@~Ra+lmEmRzsRAdol-yn>F4r^u4 z9@b(AlgpK8T~=_4RoSPwfh7|@RgbPt1j%?(Il8(P44~RV6OOJ{vY{YNZv^A+$gG;u z4BsKzDzUJ_@z&NX4XmZdTa!1Rm#9xLlGi9mh)G+; zo*`Y*HW*}$o_S8Bh&}jlGhvlW8rK)pmu%#t*f3Gtw4^a)wmHT&I@ZOOG$m$yTPPC@NlzvMX+}I}7Rc1=RmA>O%ru0@YrQ7l+T_de}2t7^2 zW_sExfG}$usk_*pyeZwR`N43B94pJ88S%ip`ixUo>ySIiI2wY1C~X@AVHp(z9L#{7 zUV*L1#Wqk~G<`+(4^%$w=E#)(bebc5Q?}Y*`vW*lFSJGiRJAqiPNjqUO_|cmmQ)h| zh+864`qOC%vu3|7Q+i<$qb<%Pgm^$NR(nRKbn6*!N-rB!$#Jm;v5e?V=~}FVNZXFX zG66|wFe0scBCS7VBE=eL4CV#Fl&*!Hf*NH?Z$gQ5&LUH~nYh*PiN=Rl3;w-q*l-^lvN~bnA zpqwdvd3Xy`IuY@hBY>_cQ@YQKOzlkRGT3@kx--^V^~G}4#YR{#rN025LNlcga0X{e zS5Ls!n@`h}-u4!68)McLn}Y2;7-g3=rEe1Za29KJ^Hlmlj6>sfpx_Y&wjSt*(v;57 z=rw~WJu$139Z|p{fH$QhTKchV#(8gfS0x3&LymTOLse5c1I5#xps7_(}t8RxXh5IqE(e?RID$tU5LVkREly{hl%_sm8)x)MtY>n zvnN1=4zxG5pLW6}^DBM5Mg_g7^QOebOnn-QFyIo&y}Ag485gAU@>1Vio}uKh`c2hN zfNVqI$lp*nE3r#fXfP&)1?7gM+Ab5g>^nRxzes~SgE67Loj4&ai6Mrx?*urk+dJTG z4K~2AnTo*>mT<5Iq3;z-IBr=0GJ)l~{rdaiuoKn(~QrN!igh@;^=yup)@V~b?8Y5Z67&apy_e)oXgbK0zl0e!|g+R3CzJx$* zaqnftQL0k2%~MJY6p83(m`d?|!`BVwE#8Y(*UJ7;djYi!XD^i$^)!`mLi=FxH|jmD zfGb~#aZ5dVr8m21a|LzOJ4gJ&ThHQ#GJ1w(8v?Z~S+O}EuPprpCoFC+?WY^DRm*bL z2VE9?#A1Gg*u;?^r6j8fp_qm>3qWMcq|oA~{1H-@h}*n%yUZLwZNM{q+%7pL!61e0 z?FM)e%{R-1%`D)+EzZk27CTu&gH3s8xLU7P^9<5qoHFG7_#i`5y6n5g)~QE=nBp^2 z*@uW!Uz|^A`$+{QNH?n$#%IPn*pl!)*7PLhjJ!uCu6#wmt%VN`X&U38 zZ}wnmzNWJB6U&j&P6gY{?0c_8kwe;~#@gE*C^-$d{*YTdsjY6qZM32!1L7F2)R2f= z>DbepYjXgyMoku0%p)|!5mF<&8jEe3^{tS;TfSxOY3Lvjse}3r5NNj%l@)a2jg)Nl zQBi=C{U%*5G|BR>WqCEly)dpz-PTo@j{cWLc&6X7Pcf&m`^9XT>Kx%)`P~`bbh!*= zvW0pg@Lll-O$>ag_V!MuW?{`||MqQ}4#ZvlpY=TA^+zLZk7-kdTqR?6iRTMGqu{un zB$r~OYqnb?;dp2{t#ig5*8hWJjHSxfxRjG$lTJQox#h8L!U0PhH*r_jvZS2`%r8-^ zzh`sftzC?&vK{MeJIpgLQJyQ(TehXQ@dDc=d*@l|vTu{rrebuo9yz8oC$UaRiiW(v z5#%SLf3F_m5cTNI`pP28G`7vLI)fdpF;&QBpx6sR5%1<8W)jQe&!3CwM-%M~B@us*Yr#P4B#lkB_!IGgkpECJ^2-BYRVIEyMkb4AM0 zB`9J>h98f!BXtp&=>L?erc46xruk!5<|!879!nUoS`U~ zCCE2mO3iz8q%Uw6J!Mk^Zd(FGRh?E5a2ytJg6n6bEZs65P}QRKqi|lYP)_4nCc3k zWOiMBr!}#{EwTKTpQIdErtIou!P={Zw?yb4=fZvh$c5oo)En5D!67#<`Sx@UZwv!k**XBrZ zeA0Y|4J5Kt6W|O2J%e6+`^c&eaSS?jc7u*|ha2s-xVl;;+B)^Q{f0=GJswv6HNyW) z!#O)FPC>CoAKAy65IO0ccavBJ)+TLxvypTNtH()@8jmBDlT9B|$r^#-+p z&t#z?Hxo8yK>%gNOCfYA9%DKh@Us@CCulD(Qp^E^~YaO(;nIw@R8&=5yQG#Pq z@?U;o5gb!Z$^4>JgJZHOIA}vHAH#z)U>%yCOl}6-X})$>eo6MP*g$nmO+Z(aPyJDW zMfRM=`77h+%YDkyXUn9YF4MCEH75AGrT6HM^8Mv^*DHqQ&}Thz#JA_^-R!l0@$FU2 z)7rh7n5M0@d-)Cgb*5RKH{G&sVy@tw8&OM94b9O0UffIWfLD#l?*3F-&o0^+b8XT1 zZfHCh4&YZB52}mTl{TxN)pLi`mP6KxFQxds-e`xcJCX~ffec{GvBH6m`1S%H?1a;x zhe#O|ivHsVUNP*%EDvc^5`wF*RELm{lkAZe4R^atK!E$FDzdMqpc`Be|4;|TOSXyg zRXcRJVwyinUMq=&Pr9kqcCYAE&fJg8p-G}(zY+59Tx*i$aNBLr@Ve_vbfxE>qUyw6!(igCpCFfj_%hxCyP8LN>LUl*~_Er+YY zz6l1Vcgb~8%moWtn+p~($(YD766Iy*9>#%#(XX)`n*YUAb%_p+(_o}!kcBg9)~p#_vnGnx=*Rsxx*D6Ks#8a(eopCn z<+2bcTuLQbyh1VsPN^hAd0MtK#Y?FqOIJvyFe;T~wH1;nph_iKeT8I-vrzwDAvcqQi4S=@d)1K7VS*(R zlf;84KU9`!iRtO~3Wb?hGO|eCSzOpD6_zhh6pVvmxeTFNW(r@aonq+{be0xAX|}q7 zInNSbq%dufceou8Pqeu_pq#3*Q<^>`%&s7;V033zIE!gzrCE>^zQp*&?80FJ!2WRY zLbXqN8Q4(enI$xp*|d~>8s3EeX4A3JeF_edC)zcF{AnV|evQlhCSr0V{Su9#rKo_R zwQ9yB*~FP-j9KfXngc5007b0SX^PjkOwN3Afe0e}?8F~5z%wJOxF*sRI}#qL_rW5K z)QV$UTc$dTgFTGF*q5uBbv!xh*GGHyG~=F_K#bwX>2@);o-|Q;c9=4w39G=PXNs`S zhg-&6BefnzZkT#R%~S|G^ki$S(`}D(5R-<0yaNqe2&S<77R!uQ5SFfn7`Q?Vabc@p zuL&RL0Y1(b&&MCe9^kih%2|W{lJMny_P-zfXby11@RaGtrqhBgOJZ1Nw4on|zR%C9 z&5o$%K(COxfbHUd`s;uMpA|+d8fSsviWVT1I6qdF`I(rmq0D1)BG7E-I~J3ls4SnH zWl-=VPo(8(CC>Jgv=Y%F@-3Cn;;RoO_6RL+We&Eus~BjK5K4+DbCl#hn5h#oEwAgS zjkCkj*7iITCDyD@r`m^;fuhUHr=yZ=coh~7sQ`&4@ z_N>c?SUJ|N^+K)IY_63@DO;v$50BH}E%9{F>4N^fY(nc5JnA*RvX3{-5WxVHyw;HI z;m3W)v7C=AEiMb{A&~tI<6&;v@fqU$JXQ`Q^CM+WS5%D0tyrG>@|@+$1dx!nTFSF& z%M~dx&Z+Z&g4G_mr{B0mBYov&DUX8@1BukZIa5ndn>y$WQ0G97u?sVWk-_7kfPzZr zU>UDaOE#%I$EOzzkbTIls$jYyWaLj+@zQ;RM0ufM3xy{7W-dQm=m~M4Yoq&`j4_D% zRO0p7Myby>mguv4G_9ez^1H`$BiDfa6%8*_)IsZ&spcI=RGl>1?wI zsl9CqA(}0}d-;{d=e&NCC}bd$q?NVL z%5u8beoE>-OKkq3c105yDCB%7WJ1mpDoCg~Am9#IQ4in^B5M`1UZn!zIzMA5`2Dt> zLBv24ZAHJ)7I3=H54U%XM;LPjGoRw$;nN)VTjHZ_(FfB|TdLd@=}g;KUREL=>y4sE zeLF0j%F?GVrkbuW4Su-IRJ7PUd$R4#v!e3Z_Ac}{aOjoJb4+p*@LvpEOmz3Kfj?p* zBhI1?j(xJ(G=ui_Anqs>@psnL$E~*=ayk1j?mknEc%td$rjk4OQDZ$GN-}_n8BGK7 z^OfS28q~g!G+8-mQ2j#EbmgQ${R>HJD<=&qU`SeDIcZP>L(;~| zNrNgFk~UXP8q~v(w6${5VATytJ1Zv*);OQk&Fn>|56LVh-$l+-(ETFM^mZA;Xq%rbM{fJrW3!^RF&|gTjdJX?$wIVcsxH= zLP$ty50l;|L8}9a$Yt_DDUMg4Zj3=$0ISj0ubzBQ9` zk1eExJw*0kvUP1Nl7*lbX+z_bEaJEv40>M$F&0IJu~XEi=<)>FypR?>OkFg$ozx&fqut2y>s6f zqL?j$tFq&QhRA1N<3WP)-9wwT79{k&FYZHkbfp&7cREAyhl>(FJW!%EWM^s!_J@Y- z527sW50SYQQ4y=_*jRigFjA|?pLOG`W-A2^maIT+z5rw-%U(>IQyeZI|n-i zYs0T&!O>Jc`T70cufrIT^j*cU6{%^#GT&2tPw45T{~Q5oK~om)Znx2F4|QAhu5Gy! z*Zo;xnbCp2&9%|b0WB0dRx_5bdEyp6JEiRfd?V5=zltbg-W}P);A2aCYblLJ7K&5l zs!`zRKgtBCGr|*!M0AFZknkq%o#3k+sqWpm@fYM|q9^F!mnaxd5Pr|_8`gzxt5%?? zV{`OhfN{td2u0s0-x8I9KJp@RpR+PJ{=6vTCDk%Ce{3!sPLz-B(>gT0qDIk2LO!3i z!Z@rW6>E=$Su6FhHT|X4QUlxeO~12jI*_`KGzS-oWTfW%KJ|8Y?!BB^DEA4^8ia1y zI>j2!c!esmj`py?Ty6F{t?}nN$DBq^nVpr5?%8RSpaExz$a&T)=Y}Fwi)1uqKmw}V(36=x zt~$ui*&xTCAPjRLs}UgrAb9B=Lm(x&!Ps7K=5}AW2%eqag&kBQDf`ABOjp}-;pz_5r)Apo3@whNM*_=6Eh@&A*rhPeWlxt@nM!{jeW3tF1YE$ z%!Dl8sscIbV9uyC>2OloTC7Rla7!<}pO)FeSEXqHF)%#9c)%~y8{^4MOd?_pEa|!?TVECt!8^WcRjmJG{^uu->-Jw9*J`UqMq;8mutw2c zwF(j#EX8=TfH+fvTj|yo@MK@8tN!c zPcO4Ccjfbc0w-h+Viyj|XXV4>CyEd2Iw}mL7R3z@Yi)L9WX;GBAA9_prQ0?~d$bS? z_C{Hgz!0#5n3J5QusC<8BeO)eJKC-BaUd^?O3JeahcQ4>tLb&H)wq4+*|pkQ|6h40 ztkrn5PuxTlILj(q2~9pymPiM9D1UO7O07L#m7&P8tRE<#COtACM1B?J4&ktOY8laCii_^8ia?J(sRL9HH+47b9B zA$FBvhb+r=!|JjdC``?_j!09M=Iy`oPD3x{(LQnGt6AC+G8-77gf8f=R*xAx?p12N zBAdZrx{O4kvot5&&+Jy$WlZidS4;u&NRwz;bY$#0GKBoME>sKtP0$D`oibArJFH?6 z?i-CR|Fv4(7Ll-wE`8Tv;8Jf%TS0o&=zz&B$< zN1Mp``NfF}e?!#=B@-vC&C;o`R5Ap$L?nQ7Gy+#fMWhHMB@Uf$ur@VaF`6?f=zpw4 zl+78%n0UPWz%*CMvGxM1NW@pY}9gD~(c7vcK2n2`Q2i&RF0TaLkl@kBKyRB+tD6 z?ZG3Nv#ZT^f1Yc8zP8JDU|upo9EHh+%kN`lC`YO=6-?#_jNYH;nZb7+^3Hmfu)cCv zzmxrDD#elYnS6?%P^$@VbkM&^Wm_>L1VBnS?XBOIJZ`cI6|y98j&~D^T+rT@%v%hn zNm{mvMP!!Dq!q9wvQZ5&qaDV(U09NO_D^jSW2cf`KKhFWq=MW4i3_Ej=#8TIaI911 z%|3gBCwbvdPvjrD|7uUQc^-q>mu^e;28m2ZvhG2%klI3!F^)_De9nahZxj7(CV%n< z$=d=4kt|0>*_xbHx?_N6j*`NL^O2qv!~{v96Xik4q7DxR%W>tvnK?i9iOT20JlN+G ze4aRmujPW$0rb4CK=MZQAkUN39S}OFZ-|JM+RuyC!kqFb-*lm)nmOXaf6pIFcL_qh zJyG7$r{V2<`Sw_OyFdvCs?c8G#i7azWq(v~ah(*#xX$_K!@||R(7bbcnh8%&=qb7` zdb8Tbs?D30Y-~Wj@CUZU^_tuYA$(**Z8y;^A;tc=4FQ`3z8~6D_y{*ZSc2go4JkpB ztdPWhq-dN>>5x3z-(;2A5J4$CZ`y6ZP~xH8C$mHh}|z5DSae7U_(Pj z#v-Ojj4q`X5Oz(U*;GrDxHD7f7-FE*pt;hp%Ki%h8`BPeNi5zD*WDV5u||6FjgcDr z&$`_$$qRCL0aA5pc)*Dl6;*?qOt|r49IR*m3XAAP=F)BFZfr|A1`>767K><8A4>$! z$FWO3NzyIxJW+e_Xo(n>zh?UN>jH_573|@OXL^e+HmuGMVVASb^8-wNF|Zxi2j_;g=C>t2tDfIq-{l9nz&n7> zyFl@JHLYrhY8Cz4gahSauK*n7S3BkbrRfxQ^pYo^T*aVP0^V`9JJ;)C4r_NzN@*zT zELS*avM1qeEU=`$=JzDOL)lmONB};Dl5L4O!7h{0-ui=VHpC9FN|8y-Z<}yw?D=91 zRyyx&wEM=fHHOi4zYU3jZvGNy$Y9=ME0;_eViU6U596bRrIdaG2P~5l1OU;_wOosZ z{k4u4#=SKPwJX(cY#o;tc2OA>yQtMFpT8(Ya_J4B-Sia5T3z4PPrM7jsJTsXJe#${ z8hxHdFMw-BAULn9(YEIV$ISbzXX?DnvX2k-M#0R25zTpyWR0^=6ilW=J4~4;k|Mpu zoG_y6W6AAmoC`3=Gc+qB#^8)^#6b6kp=QL8pP_&$RNs#CjXg)4_WNCCJ3N_H5+2Iq$#Lj` zPi)3ho9W~999Q0)Z8Kuje(uld{w((*#q3qKsf^BJmOt0-uXl8PYqio@r4_-#OZZ)X zFk?L*KMtw|a2g}n`s)wED#!B#Qe9uhc9c<+?&Z?hJg&k(`8-!iMjj{w&;tut z9zZ|^K(%bH&FsQ;-aH}lJ#C3GD)S0>{TL@F%5&3PL?A%hp#d$b{0NG@#4ZvQHbMDb z^x{NFF`f-vV8d(4tclHfoB7gsJ@(cr!9E%YR)8F)r3@X6!#HLC0bU3c5Fg)P@CAP9NF)sVS3d(!xY>{*39lxj(WDIqk&02 zRlSa`LJcB3w;Le7{Y z?B)=Zt6B`n*brtpztojGt>{BT^3aE z!lmw4gzmc#Tb#58whux*0pSR2HRQy_0-L$s8j6MB1_;hXO<2qGCtv?^j_}MocioA! zA*3@wr~|EK&X}k!a05V6xf4wTS=(r)Q%Z2YA1q1eZp^K&vs;2iq3pgNEJ`yu+44}T z)q)b3xXeoy*ymz$(l46i68`n*B~7Ddik+lSja;kt{yq*+Hlu5F@>LBCQlQgHzbvmb z=uAC|c!aF-!?apvW(=cmFtnr5gZV+M0|J6*Uo$`j6funZJ8oeB&dNu}%9)Y;BGw#? zTrK@Rxi?2&Zrl1B?Y3ExkrCS6Z-$n8dC!DyhlMYgo0WDL%4;oq09rf`64#?dc{oznw@q`;UZ&aU2C$O~=NLIWy(gqb{l*S*u^-!dN-zaAjVxM)2e1@v- zl=rx|njFj$o$UQ70m_~=GqH_j%cjXza?B@TOh8rv0O0Yc1}y`z_wsZKa9oWpjWm9?Y*X0 zbQZT-&ppNsoI%A$*az+;R+F!_5~u~Q6>JOh`b-zDU|V>c&t_Y&nKsaLgxdyDs{muFS1}n2OPbe@sfu%ND3Kb!G*^ zAHxNWRI>*WnvI8OMtV##$*h@s7tcw~>Z_^drednGlT-S!GYc@wAa`hwTWJ<>vka`u zrttof^YcV^{V*6ZWT?$l(m%&CzR`I*|)b z(QxdT!S04S_@w>W94Td#eIDyYescI^V+{C(wlF1rU$iAQA%ZqL5)j_?VGw1`A|Ob} zF&mJ!^*Yz@MmA-Rpg7`d9 zbX#(U2I&3__u4wHisn*DrRV(9Nz@=*@Rcf^P5bc4Q;qO>IY&XP~S+ zJrZ@Um>wkelw zu}cMI8f8U1p-5bDSSES-nkoCp#J_r1=3cd+GEM30tStoWOEJ1Vz*M7sDb0a>DXm>F zc3`s21`y(f%|V5hH-#By%P2A1IlO)sk7Z zXxe5R{N|ku!p;^+J34L}6Q7min$7xB1;XHGZ*+1+dZ-3;>K1JPFk0Q3oKQI@t(+5; za^?ogA$=8|lc5}TOw+m3scj_C8SHK+f$hqUvei+s2|IHCtr>NDv2pW1w@?XdGG9Cl zn{zzh&lj(#dZBn2Hjj9|PZ7eX*n}`yJjfHlwh`XpTat9;B({wiSZXULvCYUNsjr;GwiuJ7 zv2qf#2a=?@auRd>CP{1MBxW)qi8fWQfI9QFBgyds;b8b^@nC+@#yNN$cnGE@A((((b0?9!F+oj^`ecoAgYEQEu}+Cdo`OOXH&G^wN^IS|*oxuM%qyC(MoCu(?ZtZc4UE`fBCV>;N>T zWE$&XWpiC*zWAA2y!4=+)psYD%{x#RcNtZ%7!Nj77x*vf^nm562tGqmRfJ18Z5nY>*Rg!lMc65Ym6 z6wT7(uQgmJPL|EZO(wQxul2%>%rD*tP(mGh-eOp;*%xdBq(iMO@dLq8RNOO~kmgO$ z)@DB=ISoFcqq!@vSerGQA~a_nGudOs(T44o04SvPJ*^Rpr3v@9cgD_%ZW4jhoalO> z7ebR$bTk$@L8+I>{T{iehFeIKAn^%1a(>$+FX}`gYnk=Qqm=g`D~zy|qG;Zm zR@8lxS_H@cHIC)PM-*YdgFre5yB%0fp+2c0*G4SzVAxl&NZjH?@`LXSe0wB*3XjRp zQ+#qWfFr)AJ2rM+8B;V3Hh;MedZfG$OZkyf#9mc1Q7-SIumx={uHAj7nSX?|W*l)^ zM^tOj?RiZ;Nw(XmKjwlEYO0j0t4FQ zBrAFO+PmcobgLUjKwig^WTLWW?9DRHd|Lzm@t!&o(Jsn=auvxxfae$mD( z#SqIr@9dD09kG^60XMr7s;Io3QAW+R%qye9(?E3&_KLf(@e0lz!a*LVCRI>uqkv*{ zu#BBwxQWZKOkZ@`wb4)TgbJojjc~+qTY==rB4qE)(JJ*eHi0X7n8UW6 zVA!??G9p6NvfW~Or)u`~up%g208uzQ#)25IbiXV#9*iER*N6HsUrOI~RYTXV-zd}C zE-DFok66O=!2a(77CJ!4wyhazfEw9kSjqX9ze0D`##^_2<4VDLZSn9w=Y05UYR zF-N$Q9u+_S23`=ao8~F1k($j*ZC5ZC$f0Yao2fwZyH2VQ4*K0*=Y<^Z!2&LKJ#NUf zIl4Kg&%tkHt45F9EM|^zEQc0?{U#AseepmrJ^p!aSTcBRSc;3mA^_A!Hr(LjpNN}r z>J&pC9Eg>rZdkupgO71dl)okex2b(f6acZouvr#Ow)dJ_;;gD!(6LP@(~@B_WOD?% z?;9|8ZizSa6PphR*J>wXH}!e#R@@?*RL2iTcE8O&M|WVPByJEi4Jhz(;>onKj0 zCZdvBF8fM7d!sK7uaYqDo0}Oy0$>u_>Q1W-SqjUU&9vOoFUVC9*_(*ZZt1bf$XH?x zE31C*JL074Imdd#Jis$MvEeEIDOu76vT2=>nNC$guBuN9@MuVS$0R5)nixJLs<`uO ztb_nUQ)~tL&i=7Y7CMArvRXp5P7L|%lu#$?wLR!f&Lr!#7ODMM831#65c3(-M^*Gn z;AP1??Y9k(qCk#&OlQ*mT2{s(oP5rb3*!Qj!D%%rJa03{?wY;UiaEcyn6+B3hK;N} z$Qo|bRN!X5fE6l@RJ4jYNy&r~uy^ha_D|lRH2ditL=f@;D*{IBwFmnhWM`3$qKgX) z9KwMhC=U#r%V<+G-@`S{_S!Vh2C5p1)DjGAtmISd3&~#BS+s-G%sS0{pUL2mRrY#o z5Q;Amf9c{>i1uYr6`Mj)L9;h>;9%xZuQYaQn* z-*N%&6g_oKOaROzirej>M!Q*W^IsYQk&&8lT}j+b@PZy@OB8_T=SOXTL6{ ziy!tEM$2(rQ>aNJv-6|4YOE3J)(H2L{Hmv+(`m!Q)S5-P+#sQ;pkmJ&4O*t_fgahb z;-;;*y^D+6x28H&0}#`^Wr>x1>d^zrKV$j#+b<)d0?{LD%AFdn9r*!ErYx=OjR;dl zCC&WNHH0LT4ZE2i=bQCSGe4$e>L#n~s4B$*!u0B0*beCpu3LMEbbE3=c~7zH6KY*A z-;e~V+Y+B%-%pY1z`&{X$jqG4jlp6aMiz`q$H~H*ed}>_*_zDqmZ#%9F`W0QxY>0a z@5A-b8Un*-Z<6fCqJb#=`1<}@=PN=e%@T+?<(e@d=J;JJUlEO555yJ8%=$4g3Q+i{ zl0t7MRqfhsu6n>Qpg=7EaIy!L3ijD`V>UCKT*nlrqj6bjq99yWBlA`Q?e#F9!s^7% zDU!S1I=eXMk6KvD!K+Sq-Jm&1oJ$jSBl}Q_cx*n6n!+U;;Rd2$uMWliwFfELxwsh1 zU?N~wfQFI{JSb$4%R+4Qmkp}fH9}iHYu)~J#fHqyOUtv&4WK!@zXcBp8BU0>PLN#h z4JZ*Vaw?}1EA}a;6|`(xGSB7Njp7~#fa4!0-)~+f;s-$C-UXgAxIfMkKr&R7Aj506 z%HHgm(#bU{RxbR5qvo`Lni z;=r_Albm=h@gc7?N#O)Ev1>YpIOc3KFDLcpzXN|cxmx_?t^Z%} zm$!nQ|6%wG0*87Grhwkw+#DSk%`@#egFKi^5n5Ich$H7#!{bNkLKyZu_eXW#(hdry z3-0q%59 z`}4Zp&4!=lI9!GIz!{et8Sb@{DJxQSm629PuGo3L0H8_!%0WK4FzWau-)F2LOh0M1 z->~?|02UejOnTSFhdmZ&z~Uic*(J?k^eONM5#MvhM zwi4WEdYyc|maQ=<`^pcLl}{IyOWmty@AU9a8@w2|oB94lN#5m??6)LAa9qCrC~=C5 zqtYX|A|Nu zd5qsy_BNDEmew+zjx32v-{&$8G~;U6atY3mK&8N1=N*7HtOVm(WjSX>&hx8`XNl+g zO^6*Ma)wZaE`kGiA+K`|y1};gAb!`vI{4pdJ$;`1fi#5dfXVY5mmFEFmx78WJ<>7w zX7Bb0pDg>tK=<9K0GCsX`Y3ecln-xmJkA7JP&|?F4+O-*^WCkO<_>m z$z~FLo^jM86>X|0Q_cA`59d@_;&g_KI<|#LHvXF0?v+=>RrGQhN#rnMs=O(u*rQGQ zLbgCwL%EZp4kIx=dno&LCo#Yo)@tfhB6pm&;#3H;T_a*#xiJG=W8TitnvD_^DC(0G z-e22mAK9}l?ePLHVx)5(IEyXCstAV?ADrBsccH<4`;7ljYw_^n~v3wk2i}Mn2HS3b_hZxjBZ^dR_y$Wk#-NP{5kOCQ*@OB#UgFhS51 z-9hy_f{D9ATaXM(MKZczmnani$YTJ~plEZJB4jdzo}f_*b82vyu)PCyI$M1-dq4Qu z65mkmdZpVOZ4Weey&8HGsCLBqv%({Gy@px$8s;fDl)Yv(ERkH=w71P_7~ zhnj|b(`byd?LwWwj!E)IZSMrqWnw!)R3YmZ_*2>mD-8=;(I^+D)J48Ka5Qfy?uS!O zp&cMI;jxh`#)uzzlyjRJAl=kP1!Le=UU<>b2v!SfM$k?*1u2hv+8a;K)lu&T=pRRN z(fV!g6H}=teUNWMp-O>XH8m4_MK#qjtW!->r<$ftHBAGxa(a_UaDx;vrC}S zAt4@?{EEPavM(w4F&+O1hfxhs+4(^#!*!iyNZKJb+Q?@J>ha*}(xQclyd3(3Co@NQ zDdL0UZXlTrGwppaBuGD3J~885Aw3{zMPA81*;aFox>zL@6o@(wC9>%qmvv-qvFq1V zC$l!nAP&fG*fsLw0R1|UXnC+=3!x)QJIb#t6lQ~lTiZgAaM_=q`J*OaXTz40dc*o6b7b9<v8mB7=vCKr}C7^mOnC})|T7xCpoPlic3 zdPV9v1U}wioOwCw8TvldgQNiUDAiM8Z$BjUI8Z(H{I}-k5<^RtP8W&A4~RgH!KGEf zg6-$!R4>1}hPJF54kPYaui*F5${YE{!4KUVD5zlz2QdA@o0b{&isTq;7Ux70+@Hdm zmPHEg*;|?Rq54VBe~~w;0>yU$lCnX((Q@=2LoRve@VCl;-8iis8~w|#niKt ztzT{Yqp77}j9zX&q`DblKb2esmz>d4*IKxQ&56MyKiK(!jyxIiPm{ULXyw4NQMT@= za=rKNJsjrhL8`QP#P+h|jrNsf$gvCL;!2jI&j&U3KAfGoMvUgbOPEj)_6a&%? ztkieNlB~Nu+OOpKLO5>f=R|6?bZ(x%d-PW)4w%5b&A}uD#5LwYdQ6IJI&ySlDg1I% zDdxObabl8le<%+6>^)TQ-R^62qOzc|S@nTd&EHx6+BnVLO+PqALQ`C#xlO@+!EU9v z?LM?4@!eDWmfW($p3%2WgX}fY@->N(3@ol@Fu7DFk1BeJtFi8S98qJyJ^rSv9g)^Fw znb6b`cMQ*sX&-X~-YZPm<8&@NsoBps7mg`g&Vs#3ctBt%0@Rv1>(mcFu+N#`DLj|W z!i;V9<$p8D3DZZpcuK#pO>sr&8}=%v=k=|yVVfhit&S5&5y3MTgVtE~cUpc)RTz;r zRaz-T%&7Ds*yLhaZPrpX5{Er6I0+_!|9in|phB$1cvlB=8wUsKP`$rOd*L0?6V7ei zUsB4WLH(T)wqeGhaS0^w9T0Z*=5vyCTujDI(vue;>G6vtX#k1D3=x}{#iAnS$wRG*1(Os zLiS-1jq`RMu{XX#?kJrOmUEo2!{CC#8SWr~7m%K{n_r_h_?0Jz8!yPcN$sU`Gs7gS zaahF1vfoiM<#xW7`}$Vk{bfCPYLZXR%bwOJ-ry(s%nt(zJ07%$8+CGL_GdKDU3tI) ze?}1GjLwggAmeL3j46{Hh(6IHWFGNG0UJMMAv|JP7}qh{@{q6z1YE7+@1H}dbJf_B zo$|*=8Yy}??p_>kKHO}bcYVa0B3u+I3LYY(on+Mc<16Ho!Yxjdo$Q_3+*ydN>+!k0 z@iUc}FlD%<@O)i3Id&s59Z+xrme}*tg10@eW77 z{*vT_x}fGA0JpjJr1GoqZl;7%MNUm*AE&J=>ob!qj@l7eylGFDwO|;A$qoB4d)x}l z*1@^0=J!r2GMX0mIA2Qb2GjCBiVb<;U?i%25i5aoPb(Gyl6}fC33BYkpp*2>1YN7T zeDl?E z)T7Yj8*AYtV$E5B^L079DqJtw%2$*5(i&6tF6zKlttIUoaU}8df91_!^%;)pMn zZJ8|4zgVn^%N>yavjG10!d`_&FOh(VgGsS*9}Lgg69T@l7j+CtAW+d!keA0L7mm|$ zu_el7X|zh|aqnDlI8t9fo9sybdw0jlQKPu6^QF{1#$S>xrnkO;ABa%th}D?3)2+d{ z@nvBxd~L3sYPl(H&vjGy6B+A6ak9@;#u{ek84pea+O4fd-6yo{fK*}+B_aY?BZ$N& z&WiG)o3;-wvfsn+^9|~7dysihneB(2QCPVzvm;UBb}vOa=7Dpn_G5Za~@bB5yZ=R z-<%7}3?@tBZ|T|c@Qd1ylPF^aN>V2~s32fc{zNalbB^ohp)siuN~YA7bCD3zcK)o0 zwy-O7v>s+V{5ql0p8(`e_I6eE5G-H)(*#$vbH#G)YCtYhYL5fsfp5~z1!WW9WjTjv(3TcCg+ul~=&Ot{Zp3(35qR@hVoh*Kx z;#XWqXDFT@Ak7v!2Ee0MaG)*dPKUTL&|G7Gyi(Uz6rKa}!aIgEe~{YHOa9om6mZJI zarE~HljABCIPWkSNvt!WaI!T`j8`ZuNslXeEXYm@lDeeQnPToLojvT%94T_tpn6gq z;^I7fvGe@?M-{oi-NTx_^;nh_XoOW_yaGJZN36qv%PK%~Lx7$FAZf9XpoE5oAM{vY zgZN@CeLuJIb&B4MK|5SrF(_Z;@GAnGqw+<1!175@zQ`w7Jdvn9K4#?$;Z?=VX=8@? zm*x3HlS|S!{u*Oa7$i&fbom7TXU4Rq?B$m)g9}W*hjccqCHy@f({n9aCCWnqIJ!^` z)t+W&x;}YhZ4Xb(7%M(Kl4JqF$BxjP<~OkCMjbt3152KG%)v+2Vi=y6NGk zxIt|>;f^3yOpGQWdk;WtNltJMaENmOru$Vypn>K{E;L-P=gAi9I6;H9&`r1NA5|8p z66FcdSE6)Qk|>z>MrT~x`CTd~Pv|9S*Qh)j)R*o#rbe~w@-ZV?UvZc(fx-qhg!=qJ zZwC>%fp(9^a9>9PYdmnYVuHy~x0W&ewugV9C`6EPY=;7~YqLM(HK<)PSQ7$_?L%W89~ z^-P9c4q0&&=q3>>qeBjHSPVH7I_HqB>|gV>POoN4x!PVnP#AW<|c^c+=ptzxtlGtOT^ z&~a0mjd?YW>z5T2hUDbZQq^z8n#heHYr8Ys|< zgcdbHEIYvZ&pxkkNS8)8MHHTDYiq`f4=&FqR$w z$QhNr7k*nBN3Si_vh(xA(OXR@cl{enw;bdcYhN(Z-eLa;H}D2nOj-m}Gh{FabpREX ziM_{s(Y$=GWAE{dgxLGaj)*m>BKF=au0?G)c~O)BbV=Q3MBQh)A>6EtMpSUw`+9PJ z6dJ@?mAj{>@tuVid`{z!OnmqBs%yG57YY77@l&d~R*l^~&i--9q<3i8+~RBvu1!g| z+1{v{BlYHlYH_;yFrP#9W}kkQ>WxA~cVGu_xnz$6+=u+q0PgkQeZ4}(d7MK$UMzfm z$GX+!s8EU@{BOv4Lq)S7Lg5L0Cq!pKjUDnyu03vSp=|6y&a}BPD-1MN3KgdL5DTrj zB`m7wLPLqL*AgKf{4!DOUF!v5pO~9-BunIlxN^T(8_^J@#RHV(Xwq7!DeyB#il`Z- z1vGJ3jqUTJ_bFK`LlZd@v#g^c4U!S)sES1lHZbK(zuBX7!KzwxCW>8qb^5RxlduWv z(8GLdLOF%l;dqQ;W87<(7dv($4Mic%&KF{b+`X#a;EYQt*#JVKkPfh~SjRXhQ?6sh z(#5Hiay<)9D$EJl0oLAiMf&o>Szd%A%&3YM8q#q{KwTbT+D?cZ=Hr1&5ah`}!Qb=g zN2j@d)x=;gj@*H`4#t6jMzL(I5&6f_@xcnINBod_bXFbX3_PPwXY4713*F^e?w}Xb zY)N)cR4pzV=P7E}gzk#RnslutiqYN?=b9kO_r;RN?w$#-`;cS<_}w=l;Vvg-P&p=4fgm?)f>wJf;&bc@o z25(2aI7G^cacp5}U}J{8u=B1Askd(&FR{$RP)bt`@;hH3bVkgl!dM~{jh_c_HKCQ` zS=c3ucX}GqGAf&@m4M50AJ4*E7#`2EN+MO7{g~qLnh?Pg6b(fP#=~pyskYcyyF1zY z<1(AF0&p~Q(SfxwZ^nce6ssoT)S4v4`A)4pE^&gVW#rTv{Tmt(M9)2X7E-)k2IR0e zNbCmb?VQfN&y`2@xbp&!%%KR&tHfplP_vWB#=Z7);YqQ&At;WT}^%FSwB~0g(7Kg!aft^bPyk$LAcqNJpF#H9r** zcTKr*;vPwjJ(3UAWT&!Ma)X`OYypBL8X3CWXT2+D?xME`WeDNmcv7DP<`UmftPY3u zNa=1l%HY3;*-s2+{@F@GC5K1UDwg*dwp(h-J6|+3)6~T0Gj>2n2MZD-E<(Fy;%hG* zH>OJ*guHN*H!BVvX%KJIIc#mDFE5eh!((3_173yg-a$L4x_9*R8M$}h-Kd`qfz8>Q zUPh4ZYf4mA#>zH6i>TqiE9UWVPdck>tiH&qDCj*kT zu_(076otsab#k93p{3)AVPOqhBV}MLMpNBb*^I+cTT_~yLlFhW55Ldt17SzMVvmbd z;|3I4r-gqb`aq6Uo&+Dr;o%bjcZh^SY(SxjN3^0vM1J9!83kI*7kq|tgi#iJ=2C)u z^U@QZJeC8ni1^=J8`Df^r}0T0seH0>q_QrTaVX44!sZQU6~=T{;S7Zdvuf!3X;s_} z>noH2#s3nYR$#vOtPZjIaRBx;{|hXG*?;&KSO7Jci(CMS*P@Z{|I(Ko?GbwyRB%nu z@+%;#zOzW152xG#g2#qi5)-5p*%5EobC+&M!bn^#1Nt8%p#A1grJb56ew z=U5cW&lVLROInTy{jrFEeFuTOR1NA@jtHHb2*%{vsxr*7Y7kJE2rv$;2K{kC1SpGU z6n^KD3h|pL;52&Jx zG$LrQR8Gf6G@`LmBO22rnUKb$5y{O)lZ8e^+Nkn~5cHh=NpBD`q)10tI)_fYwTMzM zenbjKOG@!lJ$GSBSvWtXoV-{{nfotBDRci7P|61{L@8p9dRT`W*@r5*Uq;BvM+r<; zCRO(nN zF$fIab4w-XHMyWvDwZZ#n!daAs-d$qS>eZWl~|QjB3P;_m6o;9IIco~hi$ZqRB|jO z8eSolM(i}qohjO`tWvC7qvjR@C$3lD)hZ=5^J3=V*m>22@8?_``##OZxip6F|6Is0 zCg@U$79A17_l79VsdR9qViJEuWmxHsufgt@{=T_(Y>hT>DWm{v7ILqa-8iZ8lr7I( z45zZ}2Lf+qkHqO*j6XOF(207M=olM1&e=oz-8W~?SF zPwvLTJz}$uDZ_1I4+V(H7uG6w=f7PpMV#`RK@tS?k5?^FT9FM1g@jhhp)FDtaOmMwO1Aa%E>6jB-#G}GHESV3R2)S;oWJD~mhnLo3671}!O8Ax!$?oK z1Ce_zW_F!J@JiIEZ`bneDn2*v%swjO(mFxUCkU8>O8jE1z9Q?RZz%h5;uC zgcU(DXdh%}GvrZfYb!V8QPQAD=sz{hilbBL*YN@Fw*mnHN&ygo6oA_pLAq8TX$%{c z&30qD6|Ywsbv(9TSV7okumiHpw}_sm*;LdM5?c{3JirOHp+Axz3QCd#5>i?1>2)?J zd@glTr*6Q>*85-`Oeezhd~MgDxuFJD8cv0tpXG2KLg4DeQ^gVZ9my};`%*DGI-_!8 zA_>BzWN4%^3YfewFU>8<)S%70a6iU6yYL{S>-tU2%Z=1)Dl??8ps!pkp=^;&W{5w; z!`zQ1J>3S7=9Tpu2aVzMlpXddxJji+pV-j!(BV}BF026kVOOzn5Lyb;(-@r|)agx- z3P%oL2780kdR>~5jmoR(fF`Xg1Fz_j3O441GB-^A=>3`N6~YX8Fqe&3t6@Q@u%X?%Mog|G0WimdctmHJ1?~w9blDl%cU59!44*l}h8e`R*Y$)c072>xb5aqHo@Ao^(eOXt; zCz#uCb*dbq1oC+r5v;ZBb)D;KV(w%o{;#xGW5)!XDH#S>D5VhLdCidfa$gdpkjOSj zi1OBj{9%&tIaw~*sBAOVl}9+;HQ7lTHU4Wf)PXo{e83%Vx(m`dq!|-@VID?Cg;(;{ zu>pD`uOI`qSatCWk_urd{4udkAj>>#3ji%jI+}fgvO=ziB6{Gc6Y`Go&}oGK*X1Ot zcYd258o32i{%W3+WP`sXa^UZ2Zs=gMlxFsX6aVl=%&+Qp z_R19~U^v0gS3Dj{0?<^N)q(CWnWZi7*1Eg@{eGq5CSGsEL=(4RHp|v?xJx1}k zO6GHA)sIK2YUbQ+6=e-fePwzqfhH{+l*+m{msB!YH{g5hFc;T?!W4TIfzq|{S%qYGw1N`J;Irp zyYu#RJ%7U&?&C6f$$<;~WA9VPL3T~9toiDS;Y}9V${zW*Nr3->dvq~<7Jj`c{33<6 z-hJbJuhYB9Uf$x@(GTs`uL)gpU(Y}H)fxR7@8wN?%{=6Hy-@NU?-hLP{`%d45Bl9$ z_;uzJuGa}A|1kV&_wqx(>r$;y>i_#w|7wO(3xB&u?Qu_~;P~vP{EHjz_3OPK@Go$# zU!QrKs`yeFg?mD+33wCrEDTkI|vr?^{0`ae(0?k_aJQz=_aWr)rcj9 zZ~5Eb#FVbdwiNHEfcXx^QTv<~l~$ct+kdaRmyXoUsO!w*W!=k^uHDMw@t8_p*~W(y zt`m!kY5asV)a7VA@`Q+0dQ2qx>{~ij1@*!MeWg4f-pOkoxLM4kgxw9vH=PvEes@Ua z4eK*xwx*R@vByVC&kwq9h3Kij;FbJ|v9uo4-hGYASjcS5U`TfD6xQTKzYra3loHy_ z=+=dz*Vej8y_>drWBk_{8*BC&-LY<)Km)^8#OELFwiS zlUcTzwDOs2c@(w^HLORokkT345mERZKtkKBf7(7v)Jmy*+>3J-STbwuz1o zc?mapQ@h8td}gRM6Ymv>U{SW{qNBOGCrW#7oQlH(OTb_zzbat8B7g3!`8xfm<-13l zj8t$;NJV9CD1(9<$lg(2w?Z{I=I+At-aFCuao7A^aN}JJ!ZXwoWq_#XnkAB8q1A&ars- zBxOBVRJQ`hQ-b3RH5&O-{tLam%k(x=+3c_BbX!B$Lt$}teqqqrvF_&xo9o=SL$q}7 zOK%I;=oJ9%QO+@Czu?|V|D{n5%tMQNM;i4Q3*&$T^H_n!tZVo8rNrjV)qeIiK4y}i z)e)5ujDUd=jG)*iFMx8mx$rhVW)h!j8L5KPVx)=@69GUuw%|Z-s0a=}W9+)_3YFwv zhXn=8e$mArzV2h1)dQa}L}LrJmM7QW+Uo^+PP0d+dM_#0qoDR{J(8%eOP#Tc!=j19 z4)zWtLF2#z&?#3NPOyfIDJkTD^JC`&XLW?5_iR$zf)H2~Nl@1j=JDO9jO+K~vbc)Idl!a8 zLK3#Zev0TCDth2Yg_pk(rbZ%9nr5!c3tZfRaF~RpMH(X4!YGIWqS~0=$ke4eW&~j) zyr&s#8ni`je7UDU7qhj9E(5wuo%vXiK{S;MgPsf`w@A^`1MDpX7-U?;SIi|8IwOTi zFw8o=7#p;VjN4kK!BeWsc+t%3g#}sOCKJcOg+I}P#MwZ4RTL~r$8V8}Uo^@Ud59ep z`ksb0wj|wn0fgpNDV20djSNgaSO-Tr;>dXL6r4^?xR#(XT0_i6_Oq+62a8ljM#>^Q z_rE}0=pPf@Mbk){?M|;dIo6^}TynI9VI3+%Uh=Y}QDa$EoOutovlK63YWfvpurhJL zC(vZ^_K(c+H&xeo$_xe3=Gwm*3PeBawo_}-JJG6cY>_2f+}l!s^KyZM z_Te$%%({FVe z3bI+y>~ZnkRH8Shcl4FX)KX?VF7zl*l8?4y!R~w2bZsbgr}6JOnN3nRmBN+Q)5?)% zBlnw!@smJ(F;G(SMh1j~fD!f0$Aqa%b$$Y-XWsd1%)@j~B)&Oe5sX|K=YyzV_is4! z20_PelhAEN-IKQ(^8$#(^WY(yz3Y`)%cm#`EyL`tfWquR&p5lDbYisd(b|&iGDHA0I3FWvUTEnZ%lvT?L6&JV$7qun0s4&OCOLrzvHOrPk{^5-P z)=x%=83dQ1k=M9l)q)pq6KruxRQIH*PN^-6F+CZm4)3XkA$B4Ixe0AC#F4h*WxkMD zxF>GW%;7rFz^3i=6cnN*X?O;SAI3{6UF!L)LZ~z-LNiJJQ#*ajR6d5~kk|4*;Xr^4 zOEjh4j@K>B%q;xYJ#%*krRjOM=Ka?^wC5nNd%cAutYuOJq&J}gZmW@%xS)9ZAQ{tj zNP+Pe(=sdU?fJ~MTi09FV~-(KrEF*MtrNZpX30H!hQX*L*XCLlRs4EVtB=HIHel$h9IJDCO0j|L1jIKc4li_RqMZm6qbwx?NE*-R9!UI}+tfj3UYI#IBPaoPb zbr)BrDPKrEuib#Ny3or3?F%qfX&;f{$zfVOPIZ zx3*K5E#;T>emx004uwXmu}7i?AmW>Aa?EN(G&pEx?+xxD>hp{s0CcXZVaEl&5YPLf zWtUFWR9{R?f*8@3+~tUZpYG8C5g_;rBPhWrwG?ZsS%<$QWN+iLxxpsN+ zrn2mWH&c0IiGrp#LM!_p8yPsxFl~L8i^Pa0jIy9oHZho&ORR7q2xX$L-WwZuCcRC3 zy4-Jz6Io7?G>C6$I_Y&a=&!g-O*?4C;)Zy?q4x}0<(`3-2%Oe}glTLYM7AflLlKpU zq~(TgFJn3n(qPac5xbkjN1Dqixa4X6!x=Yzi?{`LtB39SYwB#l%Uh=LutzPE=(<6w zjX&+@Y9WQqfe`6kf0Zzoqmmm~zM`WCWgX=|`v5d5s=KoOszGbGH(hqb_-!fIWr?<8 z?7+(g$Z04r#16C$0%_QsZdEp?O`(uOf*U#Df-Tyc*gnTCtBo<(MBUluj(FoBQs6Ip_KywIm z4H~C!h|nki3m}FF4f|OIn8vaZYT;um0x}Ikdb*6!@K$L$6xH5UcZ>$dHKmq#vvC7~ z5byfTyFSB7K8Jp59+)ELLDY}draf($!v*<)&>o#y7JL6i&nPtW> zo^C(?Kf1?9)~@UdBrDSTidv_&g8#ZJRy5lUzKmfT7i}uELs$+Tb_pg+t3Mf=ZpkN4 zl$1NiqoaKM8V4WTX%<0Ze443t;rQ>#{59y)LIxDE7eW+CXseYc_hX*$bWixTU&aat z_V|_W(Z+2yXX4PF@cv}b3+(rE>?bs#Rz-z{9Vl%U%arvtF{(p|5ZR4ltfE(@cTRiB zCb6$t26f)|?OhZ4rGG;Ma_PB2pIW@hCx8;g>8h}gEQ=lX2vR595}`DqtWV#h*p}p$ zcxfIYD0{XxP3UQ3`l zaTRpOa@5pLQ(K!%o)#fdjG>!aB;S)=TIIeQKKanOXUcoD=i!j=Vvaua!!<|WgEKQO%ILlLd#8^2HGq)pejx->q5EQV81Rp zY`u878HSxo^RF&9^>VrKc{HHLRM2u0AVqt1hn1)#ZvSsO2I!>l4e)EndWDD(EQs zoeH{7T>RWJ>K_O}$2L(hS=U98*VKiA?HJ-V*-Trlq^1qCiKQ}jZU2N;WN{P|+~m^M#eVdJyMaRHaIua98*upi>|f#=0GK3gfp4a4WN@y~n1 zuqVyuA8g}$Z^tfW@4HlN{Cf6i>9L6$cryG~E0kY$zxZv|A&#xy#<2T28vHx%(>p|T z;O$n_Z+^-3;q>CO@<0Cz4l{Ck(T5q= zn_;IU?ym+pixXZHG8Ojs*bn9RhMhW9vCiM21je(0r|Y!I@F?NC(=}{0Hu*TfSnM+3 z@faZrc|a6%-0#F)zSV;oG>TVf+($#17mhhyQNEw2QvdfXEYOKe3(!1yZSd#4h3E$XQ=IhtH4!|0u2YGau z=+>Ogh+%L>FLt>m`|wl`A&)er!FAaiE+76{tzSJKYaQv zPT8PA=ZE+awv*Mo6A=J2#j{i0kf(#dQVk>mP*VtyRs?j<)`vYl7ZrY{+qst|oXlTzKMQZZtAECejwJ{Y zjF9WbKipCvmjZzDecT;|J|Z21;HDGo(eIjEkv5vGdb@M^v#ua<+q6SF6@?(YDY>R& zp&(^H8{dei*^=yQ#ptf{7Bm)#q%lE1(q>8L$+jAJC2Xf&8~<1MJ=DH3jDVGFzCBG@ zE?ncDMgr}_RK^NPe&%T&*6$!&cu_MnmE>pjCJk?Xe7QEuZbw`BF~&l|Y2PdPK#PFl zhwdD-obcn^!&039qaz>*atEbE#0DY)AM#c3l7z+9-$T%K(?7si_uI_GK6zTMeT z{{_Ob>OaSU6KIJ5EKPSkO&zAJ;0SK}%ihkh*K+ z)7MW-Yfd@iuLcgg0q*B2F)%mM02pushugQ1nmklIp^Edac)}I;du%61DhfB;I`FH0wg^N%m)L2gnigA$)R! z*nd*r;QJJL)5wk?-blxd)(rOW#)eeMEl#JRRzu ze`J3qYaPjVq`M;l_xCyM?r?W!Z#rT4tWVA#nz29ns-pe$Mvu&?{RS6L}$Fp9vWstYko@NNX-i!l2 zCZNC^dpcF#Gqut5Oa}q1R58B-f%sOjqE_J8*(J5WQb$g zSdJdL58k@_^>_AL_uk3x`cKpN@X`1bh!7L!D|bz9Xm`fCz43|36&WYLTFq6-s#TY) zT2-s9(x33(B`cMN1rZ@}NLm08SDbH?8X*b`zlGwm*x?Tqk9kJ1BjxY$%PYm^%EzJD z@$$FeEL)o|ABSQm%irVcE5#Pd$D!D%^7lBI2rSC;UUm;#7aonn7s$_pIL}X0w4evx zR@Q<_%{Rsa%u&3DdtW3gf20H=bW+RCS#;f^qRF|7Ub?7gdhViY7Zt6ayXcxlMH}ZX zs!+$uGKi<(oWr_gQPI}9i>_K!w0-WPx?!veKcNb>Uv+*Sq?Bs$EyjP{|!hxvn;@gs-)cKe)rJmeRk zSLZXVIbN0@beum99WU8?v;yt%+G8144$u0{u!rmyNgfnITc)*jJGr1@iF%|{kCLRE z=+b(B?qO3fwq2+adj;2C;F6F(+gZ}I4oc?x1h;h$owANCu<%tzyKhV<0Wc7N4-Qlz zxYEHZ1;OYVyx7V~mLq|Df&iW~fFv0r<6TCd(5bV_SY(r3D6x=b@1fXv5cq{G3~;`weIJ<-0-WGov&DsV zSZpz202XGj@MP2}?l~1qDN6tf&u2g`=Ro*rBk%vfDAm!jmzPR4v^8#Coqe3C1ij*6Z5D6 zH3nQD;4Fs|l3T?^{ZvQcdjD2n2FS=DM4=Kg4&0>69JmRPDb6`S7EfN`-YE$N<)MV81k;C_dci?ZZ0R6)IKX`IAfDzn14gkW;V@quh0M$_isvM9*)B7Rr%8-y z8~mBf1;-1jbJ6i!uswp?_SS(t2co?&9iD@Rr^6jL;&ix}M?xx*VmgA?0coI-I43OM z8^t&@4LT3Wyx8-jaoCN`=|Fej<0qykr}X-rBpHi7qYgwVEp9-zRhjF2kZKmXNT5}& zF}}Oc-6bU48FUfrW4P>h(ibXj^M!-)ZME_K_(a0RP+Z@#>(+jA7qR>h7n?Q|BwcAo z1o|S7&Mu1qqXlh8bb?!{Me8j#O^p-eIY)TtonI^4q#yjf1V;{lNb>P)!0}ZzCI5gb zV4Nl|#>i9=0F-BoQ3SShJ3H-+=r>*xj`DzyU1D(_1g}fK&hZy84~gb7Frb5-gT&3y zzPk!>`yp{gPpXAFrZcVX_=^IxIsn=GpABmTTD(bBXmx)G&<03d6YYPeE*J4LX0Nlj z{m^=6CFdf0@xW>?fA9S>S@!X%-s=)1m&fiALkw6OB5Hn)_fvPx*|UE7LY|3i3@lR$ zL-1?Bb`0!NgFba0QtwlM$pOdT5$H#t&s!%_X%HRpHZ-QFp9T$*H;w;aQ2HI?s!;L! z<==cWDa4X&3mrX;qeS8-5fj(4Um*b-5<5^pz4!Sx8%92PTS?O*2(jhH>6B&D#uA8{%+ru{Vgc)p;Npp{8-u=lSoV8X! z|Dbb_B_kPfbPTFxL@dL&F--BukWo1_Iuyq#UGK>*kAd4^pZ2_YOxF0z6IjaLl z+PmW=`9B~|$ue!>@qoDS;N$Jn!t%*bh{fhy5tQ&Sd^+q>duRTloaO`;ghJ zaeMxTnS~X~Y*s6%W*TOJ0bA%0ldZNX!MnU^c4}-#{XQ}bh-!F0DYc=61WxlYOkvh~ zWNn&v!*{Y1_IX5F!nqos(966&p_G|~Pis zwPHo>l1s9ctHv*xSR4E=>ly;I6jlXXjC?w*y_AWGOqy767^KAb%&+s*WJmCB>mz9A#J5*Tqw< zSJtyZU6R{}MiWdyD5!py69zcApCdg=ny^gJ7fXCz-4`?teyQN zdtxPJtf_sCbxBmLBw~>9V#1X1fPS`XvP(!OWLK-znu5l8k2LmAb&xSg0fI-!TM4Ed zi$I^?%*>`mSRvhpbO4g{SLT%b#^(y3m5rMWxrG(mR9|Gm>`RD zT0^t=yNWb*d+gheP8#_zA)S%NSSr+*tB0|Ocrc6iN=*#bC&>=d?J~(bC}TpC^&C{J zaOM?~fV4v{0b2iTO|ZsG@jOznZRpBpu1`cytPii4MI6$j(SA-%C}~aIq}*@x{xV6^ zAoQb9mPqzJ0Dv_m%?K^P!A;Wn>>sp_bf~+45y*rj*K?8pK0Aw1W)<+zvmV zU-6njb6574Y+6Xf)oVLu{NG&d^(wXXzB`KtPe1QYW$dR2C(ub9BRg?S8S%p{6wFh% z-`P>t|CKrVM7-1SI<=eqwh~SVNR4+F%9`ep7bt&}t4()7nXcLH#0~-D?$<+($*t?v zA;qv;?O~L9BZ9Kb9Tz**vOk$3`%UjrlIDqnX0UprG=MfYjMypI^e zkjq!8*F3xZHflLa?)BXq6`qVG>TYV#SX>3FO|(=QoQ)+o zeX#=rm!y`E4&Vefpn0WfkW3D+lRkn+n<%`M&#fFPa46h;-OGXir!eDr`Bbk_muE z>%`wog5cR2xEH$3ulPR)>1*WdNYyq5ifE9ASu&s{Klyj8LQm9g*`;JX@11``wsT7@ zUPdDyiMR&0C`N=*4!hz$XmZX-yEdZS@M-L zCT48$vTB3EH~6+O=oNhH zYjq7NG1VNI#j&MT_-1k?DLQ}*kW#E(O(9J^Yb0aHK2rx5ww)Vt$Ed<*sf zlDCx&Ddg%S7fK{5{JS5_37BqwAd0&#R$?{KI`a>uLFBYe+~Q18bp z^$JNmB2tZJW=9Q}teQ0MX2&zg$4JJ)U5hJ@w ztFChtJEePYXtIQ$+A5P*lAX1}i_)-oJ7xh3g6)_>S?ImD9(3B6JqmKP=QI$1^{-+p zY{-tuo{$YIgQBjDH-xnG0CZeN?6m9AOse|nq$oh|qW0CpsECHMq^c1qjF3NBv55R; z9aBmOpUOUM zP)Jn>MM6D{qHOs2R0Wi4?F&PZ3;8+eF`9zhK#I#lc7hBX!}78cmY4E?g-6St-;laW z$N_x3B{CYdp8L%!!yC}{3LdcB1RkcJTf!Uqc@=M1UwUXQ(#rS+4{%tfEK48z09Q3JSD);f9JU-iV9i~TiK@-^5lJ{V$-;@)8CYG2!+PP zLY^Wn`PS)#W4P?Am2x&d$C)BFwq}G#R6BgtV1mD4BbatxFu_t?6lf0%2p>uu%xe@3 zuD_F5h8&LYsrxWRWkAiPHu78Qyvl`OX2BhS88#*>#VE|76 zI|SCzqG2>8umWy5V4*gyl~G^J0=wkph=Klulyg*bnKb!U(snxW++vJ7RY$ejf~%GN z6UL_ToE?s5B0(lpYm^;H>umY<`T}bU7$QpJEf+?XM&73nYSQDqOh*z;mgU1;innJKi zY>Ujed8m>1;Pt%SgJ*1iiN5JYc8AO``C`x6EQ&*=S$trjS%k#}n`M1PJ7Mr%OPl!v zOmkB!Dp0jXHpun-#k*TyU zvw|I|J9a`k1D@HaEwqOYQ%WinYg>4cZpnUB>(7+4-yz{wGa8ms5?pH3MJ2u=^u5Nq zC)N8O0SpnY3DW7BsON?gLkKRdQ9SQca}$bGw%5AfbqExCxe8gx$5v>`4cC#>vBZs- zxxBh5XWmJkK4`9YC-(MfwXq)ZOK%SeQBwdO8b^ojQz{dI5noC$Tkcw^D$R1qtB#65D}4a(nn+tLk6`eIVQJT}tGP0PX? z`#rRLD|sh>;2i`V*0Q&DM;d#qT_h5m!Avoxg!!RPxgJ}d)j9;}Zx8FX z*l`0GxLazV-IpL5E}vmv3~PlsZYbD+sW;n#aLqLPa$wf$DTTCl1k((BLw@4}t{Kd0 z)1nXpQ?NmDzyS|L8H9k8W*Lx@)?sSSWJ;Ycdw>+SUsLC}MhyYg&`#S?dEkywsG&We z>>WZ^Q^s#q4_VeR&D{$6*P`wS`C@G3J62c9 zzs;6t=!y&T>TJalHbE7K!?b}+Au(eRD0xYVbQyFsJ}8W!7P3MT)Pi6lEa-&-0|-D- z1WS4X@IcP`h{@l-FJRHuC%pb6=Pd>h3S8u$o`(2Ob3GF@L;f9Z5rbbYhZjImddlFJssP<2R3 zrw<;LiD3-w*!!S#Ufv`0s>tlxMsY>G!y)>6&p6RSxbUZyF+V<3J7Y0$7D?em$}-Lw0((y7{F z1QZ&Z`Y0l_!$6$gT4e_*gAEr92OapjCT*Z9Sm{dg&(yL%EVny`=pTO5vN8@)B59O{ zUAw6`F#eW#m#08i2LnFW?Ka5Onr_!(mnq~+hHXQxP{9M?aJ-|&c4uRv)11(MM2b>O2;EWop%iufDkU{D1Dl1@p%A&}+TSfMVBGReN|Q9a*EUQGuK8@`=pW4awd zgYy$r<;`7&A_R+1!?@|t>ZRNd^d#SdK*Z z&*No7$FQM@UJrIue!^w`-WBIn2DZjSp_Ku&ur-(cNe9=tBF^#!_q_@sAwlp&L&?}7 zJU{(Q$qqd}t1Fg?D{{MGDo)0B<;9`NTJ{DH_$V9c>P{UVkt)bH?T;)72{Wp%J~`g` z4~A~HTESS$PxFc@GY~$EOBCL=+EZ!<$-r@EPtYko^Fy(TQw>j$V$~X4iXoF{W@h`n z(-VE)kP$}LB*HJrFUj6t!~&3NKDZ2@@Av=^hh>@%O|pOssPMYy zcB&of9xWCa&A$qrJ}hSBUY0}pgg3jSZ&LYMAtbB=~T3qpYiKLz@HT* zONPtw$(E6E z2&I9M!ZN@T)R4RnTv8G(tBxNBPT_F8$OZ9tQlA_MdrVc#C4921WMA`mpD0?>e1r0Z zxE!1=W}3|!&oZp$SvE`2#Bez{qve1*k_s|IQjP^ao?|po0@DmT5m@WqiKwHbZmV%^ zFj|C0%qLVV>V<5NkCKZx#UtTnUysB)*C%7O`Ep@Y5YZS48c{!f6&H5VG})&4utGz z6zMhEX$h)r+Q?^`+E=)%G zX_$hA5+N@t#yplZAJ2PEGR~;!24?Ixw$%`|`dDEqCKuit6-!nCl4#`8JU}(5puX+x zwe-nA|EM#l-o;!Bo5d z1rS$NfuL9_6P^CIih@!+Kig{04{%(o50686@X*+3GR4@J1T#oCswZ@u>TucVr>k$! z?v~_L)vhQ2TOT*$B~>EE zzSh*fR(;CbS`2=+utHg#<*ot>>~Di{SY?3&cWD|8?=*jqx~#UK<>&|?8lA1^2*_~& zVf({?908DH`VLjhsWI$hj|$_w1Z3WTpiWe$AsAX=8g_HOb2xAcg7O*3Sw7DyVu8;y zc=Gi5RN}d?Q5C6nFY@KBKcB(_!Az;|Rn-uXjd4N_ zjlg9*Xj`>1Ap{-qZ)17@LBzJJ@xbP6njcNI>8a+pLRFXmt#dlYN+>rFMv>9N6_z5^ zOf}385lM!N*agEy+yy9=H0G1|i#R_lAlA5IE2b1_p8+t@*;8tWIXSHc9$~_W9)NW? zFByP3T>^zlic%+-Qmq}u;55msEBw9$|Q^JupGs~J-N)_ieWb8@vDl3 zO%e-LYB(!BBKx4xG}9az9gtN8#Q~{9#F(+V5-)YgsIySkj+$n|+b#oEDIAcJoCs`f zm1&2lxhwxOcKPyF_Kh*D8`G1CY&?JSm+sSk8KTK$tebSUAuO%stx_ zHa4aYaHtYk4!h;B6g}Lmy6QyYC}vG$K}Y>5@mqGJgXsgGY^(jT#Pg3nCppR|hHR&_ zJdZ#EygJg{sa0NPq8;SG>y?=(5jGh)!8ccv-#qB>&D&%aAo73)nZZILrZ}diCFRtW zkb6R3<^?gIb97M1-T9_g9mtm5$k^1vuqn9w@z~T!weV5RlfImaO`WVXb*gB}QsJ!X z0m>PD(#}(=Hlqce&!?ysr1x%il!hc@##TbNFcelvT)ymO37<2JXRp5Vxrfhue{5_| zYdD+S5*J5GlAmeL128wqGPQ3!2v zPO%HAEzfe z%qA{y%t9lu(PK+qBXwoigS->+6!3Yx9(d&#M|9}kmStQ3u_@bb-McYrU3R5;g~!+i zkql*W4k;QBtY?0KPg6AJXBy2-L`yI?neHp=r|Y6_p4g4DUnRU`bR2ZQ&uQiG?mM`tWY$Tr73zA z_Q1OIxUeoEr3gc`Ei)bgTG_BH1g zOUL9{9KO^2S?-OuJ7l(RC@k{6407_PK>6;gL%onH2DYxp$g85OjkK{5>L9+|8agqX>#AfNmbBHzS)mXGzhA!)_mNGQAtW{6A~1hDE1U-*q`p^9N86>C>kfp9=7hn zh6kN#W<9%F%7YG7?C5gI9EN8vpSpo|dTb%zz~3*{Ztz5g%`Tl|=u1#&{a4Js>*P+A zrKg~s7v3mfQveI!n*!&k^{E;;E~NiyU&UpD-osW3Kx+ZIvVJBROvE_DvG#O5pLy~v*>po=(es=7 zW1L@`B*p^p-Kk7Od1++T35Yp9EsB)iicOCZKr{5b{jh?`)sC zAt|AnC(flzUQ;I$%x@PGrU|Y_w7NNC&!8hp!yx(%ACkpBn2v7C{uW2V9yO!9f=7|GCX_~-FnWVo2G@Dt5g(!A4RV{ zXVI&sl?}mIAz1|Mm%@!?(AB0PVx<$P*DU9qV7=awoG!2iX6Yo@5!m2HCA(CK6aa5j z$E^bDJkD#>F5zkk0<0f(%>3w7Y7#Ffb#Al{vbcg^AgtBB$Gy84N`VGt)^{2%EuPH11no=tn9wo7H2)`P=;cs84jxH5#8>tQ^3HhiM%Job zFU81>SA^O(Q;sYhbe`(epB&3TAAB_>Kz31i+4XU(uuz_*Gh8eh&?|MO%TU}h?42_N zOM4d*(J80#0>|NWdC-)8>EJ#Ed@SiU!6`-fmCeks=?k)?W`<(}-~i)1@=a7T{Th!K zD-DExzSnG3o$oaf<>hNY9;&!OakV1Lxngj!M=0bZS}`@?ct-5V+}t_D%Gq;ZMQCW9 z2MtEt5DncbD6B7x^8#o9oN^$`AhC+2WnNdYw9M;ghNXW7OGGWAZ4R-7sFhPqSRx>% zB;CEp-NmFU$uQ%P!WW}wxhzznN>|C_07kMiy}}m{9o4h{Y)(uu_@lQf#5UoJThrnl!XA@dGwmgk|0JQDJ2DLcgGv6)Pypr6B1S}RQ7um~hF5SEjQEPJO03iC z!lyBX$P4a_K98fJLNqjEV9v$$sv}jz0c~-$f}~QrmZJdQRIQ+wI@>8ZVt;Xv+#~+t zP-d&8yQ03L$i8lAJ4o^Vn5CFA3L~~T5fKz_vCH#8732$cl;lE^%uZ}RNr_aL`u1ca zgdARj#yxBS@rD>v3ooZiYg_G=d=Rb0Y$D2GxT1+y$sVGKs##l#oro#F9x~c8X01hU zlsAP)!0;;IvFI5wYuPH1OB(njB@r2NNzZ8g;gbVtC44h>-kR0Dt~5pjLIlvShj#Ze zlE45ivbz^Ub?(=e1dCqV*&QaV(ohtB97rzw*g`hLzqn2%uoy8j9&ugR)c<`(eSJTv0&q>OtQZSFKksQE>%FaNz4Zt(MtY;ev}?{m#)nyJPe@8+N>!`DYB3&g}z24D!w$1RK?Fy)B-tyA>3*M&3;W28w?x$EFZ2^NbJZ>C5a$$PQ@h zaD0eo7G{wH&q+2w1>TlsVGrpaIHY-%G#&Wm%GxE@)ReiM^?I5IVNByZVDW6_>vvvR zt6c;BYhtKY{-ecwu*o8(G=!wb3+@ki_PL5I{zX8t9iyBqaZ&hO{(?-szSWmOUO_ zuTC8F@LTn;F6q=})AS(Z4KQ5!kf?7;?VR9a!_Mi+yffWKc69sW*AK=(93BSUwWeOI zXSNYL`u;Rok}$8QD8R=?K3WhIZ^W_s;RzU^v-kN4&{GC$aT}7am8x4SRUwg0VHIJ% ziOJiNT%}$OvC>0vZthxuM(LaEPh@XlEwb$93^-a>6RtHd3{STY_I1ckhhF%zeHY8} zr)g2{E_RogHl08flPOjJ56?)|P1n?4%0p3&Rl<6#+Zpt*$03a|d0$A0ghPjh#=WP= z$#U@~&+zW(u*0~{3~9Y2=aEQ?#?T(2fwA5%rHK<5g~LeuvP%7r@Vci{qZV6b~mLgpE_u!0t1<{7cJ%av`Tfs7`wX)pr0 zTQw3`J||twE||YgV#s(V>E}4<>U6WH}Zso654C-*`5`IKGt!=KdLbrALpn zwg^~Ov@-S0246`=9OL9OBfYtkyT0|^BxFTSF82=^bcK(^t>Rn~30`Z(f1NgWcOV&l z*_DZI$?px{*kU~MkQ&67{k=XO|;Tz}4Plj(~_r$lAzcJKG!>@l1?;tC^yWj7` z$VdbWWs@|rfkzUTg_9jO<`zl5g5~2yWS#sQAKgoSmi()HWS^PpJ-3uLrCYm3L!iRe zBwSvDeOBHxB;Yu+>V{sUqclIDp8LtTqF;mnuJ2p1OkAsJHGf!zw^W!KmQw_ z|K+=5GumD4tCX5qM>A@7qnhcL%^)DkX3)pf&WGRnxp#i_{eOAlqyQ8+YG-TF&PKPx z`sR^oDc!Vdkc1J!cv5)MuyBK2i-A$>c9>>kSJG}Vb~ke3E@Ib8gs=+-O7!H$uEH9( zR9tAc2(Vn2jyyI9H$%Z$-=Ut!+%V z2kIq{l~5<7n}Ee5CSZiPr8|1nGb2w_wXJlKGC7gFYLQA=kW8hLU3_~6RLb%WS;uCw zNTn!`rqbBVGoezov$1HWuXdzLMwDqQtQS$12xWsHIU;Q`aF);$G=U0H(C<6yC03=& z@M;FBjlv{#0re#A@etDHiRI8(R{x+U{L{=)#GQKfk z06@|_>LgkjhV#acO>C5S-t5OC#sbd=(26yfH)ms#c8iQihGK=^OT`KRYThKF!n~pJ zg?W=^MRlZ$vKaDhIB)V3^K7YfkwzEhjb_5U)ur^KRF};R=Pi+{p>~!?7qt`SjdtR^ zF%cqlFtoY0CVRWFwz;;p{C26n$$Dj0-xt?Grqb3yIBTpl4&&gT^wc5*E1#ql5~6C^ z0w}QF=~J0PymyG$R`QsbF+MR9$_jrKD{OGOr_Hskp~5CtxN0ZVB>Pyim!h?iZbhVL zi-YxA8Z=lblO)uGczi%`r`yx*8YBsXodyeKP}HEuykMQ&HQ8b7t=?#`R7p33P#`i* zKVA+0u!E&>{fhFfeGMHa`uAk==VzC`1+Mb#G#XusGq!B{`rbuJt|sO$;mOw|d%QDk*ERmM-a#e=cepq?*(FbazM`R82_d_&1C5?hGuQ?uQGqWM7QqOTr4!S6W5gc~20O)gY zzM{6E5CkfMMMPv`os%Pv2_ZWMg2ejuTN7p^Mbbnugn_E*vi-pqq>YKfzoCvs_Bm8l z8xXuLNCK3}K;-?n%np7y)GhN5_&_wcI{sLrA8X=|wfeC({F2%R>8tuT>d(R%# z!lJ1iw5~u?0wEu}C3{3xwN(w@5=ZIR zp{C)hsNOw%AGv*$kOd5ePWrXp12Mx7bQ~xmxB;YRLeWtw02pzqII*S%;lPQkrv^5v zVBnm%b&^oAPaUmIK&wB+pu)_V(ns>g3Y$-L5>88CZGH zB!O*M>s`1~Y?zOSJ42Tb7g-7u-qeDq3>tWe^d-TGq4tI(=)e6+SV%4Vnt5?GN(8i) z6+po5gT{K%G$h7FB4~y5XoqHDC&P}QCyb4vl!3IDlriFD{t3DiY$C&Nh8i#o)BKOW zdJpNWg7||foGD>KDKfEEdig3!yywrkUqW%Fd5>QQ7iRPe^^jw~`N#g6TPh$)_`UR2 zG>ZEG()??CNx+lkU*IEqWUBY*AiX3e7HEV_cQtY>P3vN-Yj;k=C9-j849Pux3G>8a zX0HMf;?tA-CY*VCHJ^|3xgy<;77*z-?gvYH59(q_(Lt*i)J1=qE>~md!z@NN4pM(c z(i@jI2MWYmmn8%glgE?G&;8$}mtKDPrTjNlt4;NaVwYW(O?TH%O--%gzsvOBj}-sK zZ$(LcUUEtQipj}|iK!K9*RCzfU4A(w*R5N3`Q_`~!0^v1eO-0=WtZxc&M)_u)vGV7 zr!=;50BS<$s`V$J>Y@n|PxqSi zl|X`z3g3{-IL@jn$LEc-i!uG`@IkUi0sAuKR@>d|#H`+hr^zlxPEf-Hx|yt&Vb%HcdXK(GS`4sCy(hUsD@3s*us!(_6}Rn7Q4IW zp3f5al1e%KV*IgRUv$ZSUZ}vOpNqN_nHD7XV61syw0jTW(6BYa=0a=S3);bn%uvV6 zo5amC1s4>JqXo+GcnAfFDL7Pzda_4~`I)#haF>ac)dV6f;D%H~Ph2Zn4zw|y(Fj36 z1!X~;#C{3kgOz<8H{^Db_e%pCW1(06b9~Kfz^_au9uK;uz9pL04Pk?|N(99gZ(heg zO#6pfSVAoHT3A3rPq7+citTW+!v_`_Ge-=IHu`I{@3WXOC{WJVWZ(=u#wGlbGdPz( zN;x8H^$Cj{*C#x3OrMb79G~KnQ}OeOq`#82J`5PBG;rN62^UzS8(|ax?etfM*V;#4 z%r)qO=ir+4Ju_Sv6*ArKdSKVN+E6q>QianLXoa(O7coMC9YLFTj$*JM<2-j zpR%{b1k0Y)9MM3~MvFNw$f;lt^qIh4L+%(aX@*0M<_+B$D;8d!LZiZIW;Ab5v8vHV}CC*7O_54Ga(ixtXCgo(Z! zz(=(%=~|uh6fZzM}wLlw0KF4)h1Z*y_nlM^IZ0_B5?bPrQCi>;KAjO z=@TxW*C)u~Y@4~9qiO!+Xd1(SMU)1aV`XD(G8vVy)CE>TGVPv&r$LyL%#w(zGh8Sq zw40?$)%B4COG|Nk#6WRSa)?__{Do1&@nwZlnc>jil9@a-tjr{x3iWUnJG$nm&;nNK5i4}GaE9@>RaGue<=4U_2Wj$_# z;ETnN2$P7-LZ#EJXrkX4HGAnb9Lg*KKT|9PZ03}3lp{SYQIG2L#tN#BlFN^9RxD1S;lL(->* z{7edmFjUs3<%e3zQ|o3`St=wbURGk9jc{`^>--Q(7InRaeGFA+qVG zM-fDxMqM%XEttx_z08X*)RRsil)x*#WqIHwGRp=mFda(`@V)<{z^{tHOY**dyd($U zf>D%ZCU~cqTVa*Ed9_Pztrb+pn`e8$RB+}KFCs(bWHDAH=rz&8?oj0x%cebDhGK^O zo^~4Hcd+I;I%kuFH!=eZ9P15-z$|ePUkdan3^zk*J53d48geK#dY`;8T^(F1r^(f* z&F-p8MLQHI!4tuy!u3S?-ulkzHC5M1N5J8WlMPwAPR76nWUrKsrOP9xuGUQO%DM2d zbvQtB(e9P)p#@tLn|~f zgkZICf^Sh0b*SkRRbx4>%c4Sc+4V(dIya7aqJ5oM)A_ZKu7%@89kXZPj`371>R8>f z6;^SeAV_eaIOqxmj8C0%^c`CSX)19ryVr?hPdZU0MO0oc|p%~~vfv9G7VvWxBaQjVXqEAUeamMqF-f{@d!5lXJq6-9j3JSF- zfWm$9-1eRnj_62X<6xX4F`rz6lrNnmh65 za8@9U2rtRf`Ughqd%9;qWSrdDqO#1!FuRM1gELr5r~?;9(ZYj83wTM&?v?M(#CTMd zp~fS_3b+&kycng6x)fOyl!blnn#V@F_W-cji|$Dk9$Ldo(q8N(Dff3o>_y{IiI^7! zlPL;BG_yZt=6&Y9kBf>)E87#hi)7Bg7$26(4Lu_H@T1utb=c5!ZOva zI29`!nNTAqb)GgLgaf=RFsOmxl&(dQbA-{EByqo#7;M-Pcd~H5n7-#3He5Q;f(0QG z5F6Ytc*Ea743u}Km?7PbSNCF4Z|zA>Tn90rg*vFE(2QyZ%-TzJqPx(Z-xEd zMeGGIY~(HSt&9(OjfPQ3#}_f1Jd(@!RxlhCJ`uoa4(lxMTXC`ATk+Qsww1F_z+%CK zs7$$9b|G(Tj-*?jsYG3iByLF%0h2gZme4arBx36`6*7@4hl~=YJV$y;e;qa*)MJED z)NXJZ2OW6AIxv(Y54)A#hSb@^DA~}ZRG@zE|7Y**lmY@25}?os1q3Z2*a6a5 zLcpLA3XB*eLeT^X5VU}(0i%{ql!$S7zQ5nz=iGDaCFwVLhWSkT({;{0XP>>-UVH7e z*V|q@eeY0YT*j3$Vvw|)+z#jan!wjn6K_ZskzGW66QxOn<&pA8#9L*s5E1G{xy1#| zu4O(0t_~1rVhR$?I%U~lAHs`(2MJB+r2<22%BLD_GsVX29rB)7o?vjCg+BA zFXDk%W<>QI4*+T3w-LK)RFCisa)5#K8wnOns+-#*;!%>MA`jHlTV+?Rz?~EicPrEO zwzlR#ROW*05PUcrB4V%|1IP!Owwf>HBj$xA$M_nfMum2lBW+7Gu(ZU+LZqd9DPfTc z+yh(b6}g8;1K%i<(!$`C*@(hY3U;w#_V+Lk2K=(j1MpUS{(qQ9A4j-i+hoPJvBW%Pnv;VU?Ugn;k~ z&X6bubp@JGyEVyuS`^g~AHb`-Y;Ir9=5U5|zsz-xz52`&P&euGRQKbl{9~LUy&p@J zyhNyX>0`rz5jAJ97cib$R+nu**MT%I%RguXV1WIBL>q!pQbg7lTjBHOGtzl9xB$^^41l9Iz4IzA# zHE2IZp`k*zRpYL7GWT|9>*3lOEngxctyF1;5stx#E2c{&AslZ$ z9;LinmssMBZCSE%Y!|i%Tu!To3O`Q%v9b#KW4s;3$Hh?L81hh^?S62U4^D}a@pi2K zfSiN=0OwHNg7OWz1(yHf1B0hATZ|vzYRW&r>k~eR-j2awuXdx++YukRTD%>x+90yq z;&MXOA}--4%d3?u%pYj$N{*(Yr+ASJc=RGkj+B_pu8R6 zI!fsCnqUZMN4ahz!<8l;xel$e`m_i$^|?L-DVLk$eRc>b2klxCm`Jie{Q^%(hKz`f zvtlS~-X(kDh-x}ipO8xHZKCawJ1`Vtz(Xw^waU`Ao6l;*fId+k6Y&wmII?E-*(W0a zbDdzwS+n|-4t~M$pOgv8ycF+`VX{7sV9ClT$S7C!ODpfiyF*a1H(d-N(I;&bg8z@yf#lVYvR**%RBIxsjMKdHl zY~(~r(%v#qXDR~P093A~3fzHQ`3x?heDK^kCIC8Zz?xK%nR;;EaeOr(GG1iFWP!SK zVoaR*&WUlp8evpyb+JCtfuc2Nol!efjBFTw`k7qUqMniVs0c@@Y$Po0`&&U-sB)3A zWLH2?l&vuY4kDBXC6oDZXJ@3)k`?_zp<&G`4FmZn39P}Ntm_ClV(E>bL+e37W*0r2 zLOPJEPTvr=m-0wNES~=^a!GYz1n?CgUWWL^%52dQ$`xM{#8^-jSplf^XAv^1A2}|=!nKKbm9jk?NY(4 zxik=%g7_=3K_%Ys5XTP2H4G~kw8gVkz z*5GzlASSq;rKjeI8O9bm((dobMj;hkV>j3ylV>nDA3T^&@XZ5V0(w7*yg|4c5w65K z@>Qb;S6&Jk_+VLtp9LJRKjDJdNGjL9Hi30ZVad_bkFg;v{dF@&>Rhq70L!Yyr3aQq zZy&Ay}KkBP7<`izGK#|wu16ws^QVVzQ#4UqwO*AEDU-Ao*8glS0>^@JvWI3 ze&y7=kFaY9{xL069!9*lM#63fR+#SsYg@@_LBI+w)@8Ko490^*04Cy*U=v;?as|<< zYu7Dn2-Xk8j0*E-Fs*AbKoXZS)fwtaD6pxOmK6+dJ=S5?sKSV2{)vH25>K$kqh54t z13OTfx$+cP$8uKv-^FtCFd2NS-zGL2yZaf*hr!?r2_X3_eOnoP1@pJB%?54cN(NtI zCswhQsL5GBi=Lx}vc??rf)P*tLhp<#BGUu=#P!4-`G8x3AocqOB_P6gaVKngF#U_Z z&Zc|d;tu6N7sy`3`o(K+ypvFLCkyXxY}}h}{{fCV8e`#*jr@Aj{4{sL`$>07wPX1G zNaXq?E|njVA7&Jm#!3gv)6JkZs&%6S5MT4?0Q4V*H7;M0p4w{6ES72;A&;DxU4W{o z1Rz0b&h0|tgKazsgdWgccMza;N~eF22M>|!MsGCS#4=$f95}9Bm6&RBhg2*5bluV{ zIwcnTLb?Zb(j{o?kvu-2LI#|Pp&H^lUaeLLYevFhr-U%c}!eHH3g8G0w`?28t&PEr4#!E^6*46I@1Xb zR^oZ|HTv)E0O}Dgb#FXUTG@)S$=2IW1pH9{q3Hf$Ij-N3)GN@Z*$ zFP5g`&A`E)-~5$do7z8w{FeLjgE$rqoZwC(EalW-9BP8 z-JmOtkNW27tJA*CRJMsd0jhMEiqd*xaC3hfOVU+%@&IBCkP_5bun4I?x!6YKc}{SN z1;0b`wH`_)6}0&Tw!M1iKrQ_vOL~vgmLdj5Ik1?QOrQHx(}cvO;B$4QO)72zkzoo_?jK>@x%6PmX_Rz(~ZuWJntKzU8(9v6jg6(I>aY$=@CslX-a`<(PJ0p%(%QliqeBQr{0$db98{7cH%D-YQ{P0)T|0u<`iH z3?hz7OG6?Rc)+}2T1=6Ip>*Aha90b1Rxy+c0Skr?*bx1dvB-A9p%Cvh)WsMiU~Z-2 ztd2HV^99EiZU{Pc43R3+g4TF5I3_%474itDViZP8#F#c&hVK^}a=-SeU+lQ-_G_e6 z*{Hos`McJL&O`6^TDczXC;RU*UBTv-XVmgcxn->4T|AN6Dl&$aj!W;hdcPTis42F^ zj#Ip0<+emN-8$8eE(y_InLiBcsPk^wht_Mwl{x`!K}Un-UX$oE_IfC|!}6blIxDXW z#QjD>fdcl;_W4+eqG5G7tf>P!9AX{}&Fs)3ker0Rphu1@(VuuA}87mlQW z!;%_YP>Xw5Ae{1uC4j>X#l*PYXJ@y^>p^Phcbbu?~M zknG_EBu*d>UU$oApTYaJN>=3dy!ZAg{>>zNmGcrBfG0oBGGscLy=^ZkC8{JH@yQ5$ zu)X@Mn`)c*@Ma^|D7z6$ctb&~!7J2|-{Evp#pExkrJ>^%N^cetDJl&gmq2Q|JDsI; zxF;3G^f1>T{|e#gW(72Feyi=k%Xh|jW56bY-Q>R)g@WlaXvL= zaAzNj21kznxSFF%_SGvRbQTXkPA5UZZ1X!n#M zm1%a%43*3D#Tb!>*u3!px;iqh6ytb^(O~%-7GP$N!WkCA;T@Gq`tOxh_#m4h{8kHO zfBNN(;v5Yxmvm*0ex)A(FSaN^FFG4wLYF1-sDLLWeQ!6(P33VT<1o$LRC}&x9+Fu4 zTgEA}p^`LFVV^yyLomak_V^tfOWcoQAeuhRGSNSOj7bUQ`fIat5 zL=q^Xpb5BuXrh&Vv6m*sB293_0{awj5k;Ed_X?UgmeT}dZ(2~h1nHyKhF52kV#LR` zb~XrgB8WIHV_S<69oX95K>E=z;!WGURt`%_o>r>YvgL7UwbPz)2~J`SOe_7$#=vMG zeHZ2dnpS)0PEoIgUR9Bv-3V&oC5jSZ)nUel(to;9SS>`l-U0q&ctSqF4a=Sy(ay%w zn0Btlc_C{E%zdN$=!5O6by27X^JXxY6+JgYlV3*JX`q6oe!D>33S}FM!jl3kVMdsnnc?q=?aI&Pflp9AtN~dUhus1&x&Ct%dA`F59w1r#3&Xm zn`daP5Pu-q5SbSKJpV2n!WbT&%OBbkEIL|Q7sg1_*UIPgZkS&3lB`4gy+OTD9%Xog zllF>Lc9g#_;ItMfnA*12+F+J)+K(C#J1omU_!bn`o*cMgJG49!y2h?1ezUts3Gzan z+W!jcWJ+$Y-5lyvbU>)Hp_-l@YPuOb6FDVB!m5^sqb)SRA4F*Sa_sZq~-J$$%I9PQbf6}*Hn?F1MA)a0Me0cV&^<-FCHp!M%N31aIG(`&fFC#6M(xO#5)oko3 zTbW1IZ16|7(%?w7nm|hOy^+6+6#_{u=fB)u)2VD_S>SFHA8|j(bmM$_I9V39^X|!B z^xv2e%2|e{EW(9v$@Do*av-CmRmKfJw29!M`3st&Pg9*}R=;)pyxaX%iPc#CUyM0O_CFe1#G z-XSS{arw?TkJ^6@S@odMkPn&JT1q@w%B3e_EW`a{%YDPfu1L}*C4R#z0GX#A?!r7%azS^osyUu~S zvWP=slDSCqt;~LjKM6B@+ePY;qeXOv0PD3d`Mnx zGhllBQ(8Ad0&Y}=&)b~iEo&iTpKYj#@M5d-byBGIh{!g@)tFn3|JW)&ffD!W)m8D8 zo}!Mym&BS9Z6Y7ig>AL?dxeqkpLW)nTMVqtE&6JJSWCaKk@d-46Fi>qG* z2TiarLcDo?dShc7;B3>XnoFQVTiew;rGDE&C9wc^l}^M%DiD5{s6{*&L!^EL2h^>s zn*lt6HiHKxO06Cq^$L#?uEWsB>xV&wwT6h85hkLP4|B#1!VUkk+O)NId_-|`8+La{ z2FVm0+X+wfIWia`17|dS5dj2zB6Gs<}Fp%uo{OU8;$DV>e1zTsG;fL|?<#&1q$o_k$do7A)_ zCd4X2iUgDsR6$6dB{u|mf{<88CG$l>VsC4gkd%=uXuVK$dz2`^_Mpf}>(LleOXn(W zBKMNCqC1R)2t}hqVadIHZPs~rQQ?3&38~wGgcR=C9?c8ov?3A;Wjf0BlRwT$hy&f$ zAR)NLRZdlld{d$y3F`q4`x_mW*bCv9(O<^EKs@+*c`MTn{0+djSX|&UTIuVbaDmgn=2G&)9nnn{Gf{7%7 zgJ|=k1bUqJ%1{yckY-!oo=JK1*QbkzmBBD*y(~tn{Ma^D`EcT-cuZLAS5t^qa#4ur zo&#=_FY0nk#uW1KFc~O>joy*Tu)>z8Bxiy>u=GF)Z3B6kO4j8vLdKww5zfTQQ30+bLbN;gZH zawNmfFu+PJQSxQ%D34F@kLWm}CKsq&iGnP9QZUSBA50YWhKte>a=qcyhH5JaN6VFv zau}7U52G~H+sFD}$N^d`B^~j)8FtN(&Z$>A;&>zA2N2XttPg?0T8TelDuA(-@6bB4 z8*zpq9g)rW#?BC3tLX$%>Zt9gJPz@Q=g$t$F`r69R{NYhL?bNF5|0Ww#jg?tT@Vgo zZ3mu`MJ=ZhwVXy*%dy-d>8#|ZV^fp$y>@`CCKVd12SUfqQ9j}JiJfFCX-^em$S>M! z^E+t=HPUhmI^rFii!6D%UB9Cx9)rFqV>oz?&Rw4;wpQD?{EP#iS{7S>cDMDrwt|Lg z`X^-NgUV$Xuvj7b&|)T><@=&+1Y zK?(w15kPQ${c`}?g-?@kSb7KW!HPHpnqQK})RCv~vjk!2osw%lykLSi;j_O7pS`%m z%L-NQ_R1nr4sglxNl+8hrQP=w?(DiRFe!II8HH#z6_2F?v92-FMLfA;&Fv=SalX;& zz?@F8NO4&ujANT%phG4ua*~R^#UIH26twuhg1&=XZb}L*Z>#=H#{9zlKw@kG$0XZU z%_&Q{vUSRk?=56Kms=cj} zw}fG7q!Q&#vD*JsP;4twL{q1oOZ*#<_-9d0nXPOhsA%^0kx)IX+psywgFd&9H43R% zb_1T>h@3(pop>R*TKojD)T#L>Ss(zecW|}4=~kEyVsD`dW#-Vz-cvXGvuLl}15Gmz zGqy(IYO;f&IhSAi)d8RQ6v2m(1H3_g?S+>X4KMPmy1Tnwhh4y!?(Wnjzs7IzvZ|Nj zJiHI`Yn%#d-pBe>Wb#;ns)&|)jutS$n8A@RH)co<8nafp$0Ip)jMi?e1|HLrJAOpH zMtbTrC3U5R;JE^hW-yINJEE{`dKhk5kMitKe|0@18A6J`X|A4V_^U62TB8X0)N#esv_ArQn5vZZIhV+(A(5g*%^6R>H*4;*0{qd`9VGW|X>nnHg0Y7P_e6k?Tk? zUReGmjA7`bCcVPlgMDden1>Q|svPg|+G~gZRXCQ1)6}{~E_sD8T@8do3!34GIl6@W zk>aS(`8K?QmhNPlYmWw#9?u3<`ZS7Xu6cq5QlHpYKzd4uV>_6vyC+**#U!(fiMNt^ z5TOo;1-sJS<--+X$Y?2wm6`>vSg^K!ArmW}4oXpU9V@at&^~vCv;79wXd4+dvm;B8 z1=CzSCaphv_sN&1*?2a~B|RXFB{E#+7z=!&DtN0X!(ifb;9TeBeAMh5kfxA>cm{~l zS11G;bQWXi8gZDnQKf{XDIGRdlw#-15ovWpb9i<{nRkB*<TpSVTV_hJR`#P4#|?rI&hWRTjxr7AWZo>NF-NE+_~stT{fdgNGedS;?Ec*+e=S zYt-EkI7dsvgk)I|o(3dB)3=szl6~Gdm8?2?Hv}%zAui*}Zm3hF0($vIi;XwRG_tx% znM$K+Y&h#o45>;g?a#dl&@x|6U?PX}Ntp5%)ad7eVu%@~Q*g0%I0z(sz?L={(}Kw# zngbj#(#WQ&s{j&4IAvGhFz`jefprQFt}NJIr{Hj@pzKvsQ%P@0QgT(r^)2E@9MMy- z07$wNh>;JE3c#j3an*nWeal#Nkvrkt{O&)cIpw#HQ5XRU4Q<5fkK=TG;Jr6PWXx%6 zEh1JD111ns@f-a{CiVPxWC?9b-gYN{+H!fnM^ge%R88JH#v+it!gGBJR&apTH2ekL zx4B>;6%(Xyw&gjV-b!+f2?zHc{y-8=0SqVbRBsbE7+?-ne-y6HsRQo3d|P0HS1hg? zx%BJsY7c5&B&7%eznav%-Yi9yqjy`Dx!b@GYOVnTRDnZ?4*Qn%2=oMpd0x>r>R=FQ zMoin?k$gvggg2&&#++rRZ}{h{;bdyuDPkNE^w0(qqN6T=UTLGr-JB;ZfnG)ns)+s4 zP~vIP%EKW;-yxu{>NM1GSdg630bXrsEWXhZ#n6s$p+7!i=qEJjvYJSAGL{$+;n748 zby;FV>gp;5bE~Ny@r|G6Fs)gC+lCH0Z-i*H#jAx2`IXv@;n&s*To5nCbI3_E%w)$p z!ET0$8px>A6Gy`02nCS-r%1czBq9E*J#{N<1!Aba3`gYAEt(Y9tyhHs_o<3qL?b~x zkmcl4WCh{tcAfZ^aM+$Ixf?=20!!|`6XvWw<87r$<~({_ z!RZDYuDX^vKaHa#GoDebo@nI)07#+V zHH3ny2ejN7s)}9iLZ+QU`IrZ?6ge^qHYI~@1|txsf)GrcCpTC7gQaOkyJ0jYn*L-W zwOtl;FG29WfviI$3NRjN3sUj&<)3?Afc5<-RByXnFA?jm_xeyTEXMpur{dL! z>!;o{M+YI!W)SM-urhv~kF{TT$U8$CnwlapOdOTSB)~@iBFR>rrL684?zblX%qKaQ#HRBW*5JWhAABV^x+#SfW@T8w`TvWe&pCBYGNaWYel| ztx2eY8Og))N%%>6*Mrl2mcT;HAwhvg?OdNw>@AT(#xZtE2Ff~tFTlM<(-u1uWgW-- zPM4GoDaZkVWKODvEw@zmp;6@|p-1~!-D=rM_kWR!L(gaHSMK?Ktqg8dd4x1GyB%>S z%pFi*;W9KL$PXy>!;t30p`Hc;yT*qn_2FiQN8$}_FVM?VLTQ}6dOd@U zo1yYK&WqF?hH8KRm3uh@PpD|`i166ZjDmGFJY=*zOPkp&mDkft)xauOb&#Nw+w`AG znip~suKimpxs)+M_0)Sn^+IxI)ThvoIXybxFL?%PaDV3d^PVizlU=PL7_nDZfh#}} zz#mt=fb=G2AR$*PFtoP4dX&uA(-QV)5SHYC_YRfoQu!T~DFUv#GjkdQWW)jP(-rtn z#@`3Hs-u@^X}|8nn9u0-si8^!PW$i7P!9L$m2Is7_bIf;iuN5*^9^N=c1HC|Oi`CY zQ%ibOpzo}H73e#!UvtBg{5@kZmI^QqVm{KW4s<6SVm12av`iZe0dnKxs)uMcJ4}5- z-=X0geMfrg64*N`^TTEI9o2`R@0hMyoZ|13TvhjBS~{-#(5D5xo|f8|9X0d|v=yfQ zz8+~*TeDWfJ&j$vq7785rnGiZt(?8u{Iv3!_8_f%hHqxoGt|AIdxpJI!N?9kUE+vm znV-`BmNZWfa-yDQ{sm8_B~EY)9XSO3YI{i>0r}Wg^h8pGI)Vg-G$Ad&N^xoV@dQ9# z3Q;pnSqYvXbE4EIYcOaE(iW}@`d}q%+?`$eCShq3>8JyD4hF}&7|8S+L;7I&P!epd zSU8DDtio+Xca__}$-hw=78D`sPbZsG3kC>=6hqWx`hyh5k)mGHYj>r&|F2R9Mf%7RMV3~J1U=D14)IRlYQj4HMC)KEm0%jc-7DMV9rglPQH-<(Rbdiqb79NH#MPqbnPlQ$^P6)@C%&I6G4B&U%oS<-se$+=Zs2uWR&PK=>vWT|Or-iGh8%FAhr%}k;V1_$4sq$y?n^TRi z=fZxEiT^?yY`>b|C-gqClb?d<5&ml8s;n|m%U<&7S#2IczL@xF$oJUh$%+5U^~~7h z#LsX&!0-Qy>&cCi6W_u0$d<{8jO%fpKauM`%H7Cy|M=v@?Ofl;?|-BBlz%$c^IX4| z>uIjvna!Y_;!U0qdsx_r$KiPsKgR8s`1s#)J;(L2Tu)K>hqz9)CMRy?y7%hIiN|q0 z%ljYadYbD`a9!s5@3@}g`jcGu5ez`caCQM8i4o4uaQzQlPXO#LuG7@RF}m3$>i+?* z%Uo~ay2$mrxy}Im9bAuWn4I`2t|xeYC)fQv{}HZJ7~p@9>lxa6lAd2RIq_t!m-zjo zTn}=6HrH9M|B>rF*Z;)z1QD}O$S!@@n4b8)+HFB-yX@a0hUl`T)~{*h_bY#||79tD ziMAiecbBX4|GfJ)!$A9K=A#%Aw33d>iU-qYr_0xqQ^}{3`;t#2f0&$1K9wvcpGHpJweNoZp8xho$y<^G$(xgj&Ubt?dGvMP^_|;hlQ$-B zO1}TbS0<0W>2Wvzz%7rz^$9<8TXy>$Px=>6-1UP0{{Q?%^5lQ{{^WhhZze~QfAw3* z2a;#}+kf+8KmJR}{^Tc~^25*mkN@et$$$RQr#}5@Kk~ibxATVU|J_eMYhUuJ1quUhLlZ+E^$E&&_^7qLcG4A7_t-el7u2gmXcmT!M&&BK=L$%QjCT zfcDAgc8pY=mij*VE9@0M!Y_d|uPd0Daw1PU1jF8-zw;Z&>A|NvlOyWTr(=iaw_uSY zQ%<(98EGWDj=R%aCeu&5>&G|8lIZ-Kq5GeE8koimh2%Or?S_MAjfQGoSno(g$5x4pnljdd@5mhxU02T zZGPIFp5B^|&-~`e?1$3dBH(>{ZF*~GkCok62}8B#9wH?_>2HuDguyIHf1A=flf6~C z^eH-z+^snNPlTFdsY)!hPooP>L zxy%8T`Gm{Ns?5h#rl4NP=b=$A((J%6=#~)20 z5F7Lx(5t7BW=%|Fa=kWwy@ltf1XBJCH}#Ih%6n!8Jx?;x zJ|rgEuPdomFImw$lhq#pAA^|_L*w= zqBQF%q16QI2K?~6ri7r?obF=b+?X0R5oH%gG>eu1-yKcg=GE-t2p?)L?WlZXWLHb- z(gJje%}A>m_DQWs`Z))%aSnKK8oIaL_gE<^nfm@hl_a z_b)g1wCJcG>}qrWvFmt2)o-zeHMq|2s--a0bLg9nD!#oku?L+{TkW=>0Wafm9>n83 z7!=wLaiNOsa^3%Dlk19 z3T&(H9iDiM-c1cpJcj44~;B?jAeczK2d8f|ipY}e~U z!<`+Wz(?R6hlkrcvJY4AVk#JWFgwmi9lvgGV#f36c5OEux~Ymycd-2!CZw*9Vlhzh z6BKWeZbOF`3}*9~ZGQ(G?MzOpo6r#{T@)9%#iKipOsbDzC8u>|M36C%I-q$?^Q=wBCgIenQXAc$SFaX~E- z&ZicSt=poQ=RQ@2ynROKW_XQ$#@FbF2^syoccbduV0Ief57>vp)4++Q_yQx#aeCUp zEGS70v)v~HXUW7CLg(t>Z(>{)HM6#}YV{noc`|X`&>m0U!7j(`wR6qR^Swe(P2Kj= zi9PM-OY~;GKA+T=VONA>1=y!RioFn{&cK`I#T8c3{A{H818dVfY$wvZ&^;Gv9->^; z3&sNyqAQG~tbmG9H4F(sC>2pDfxFowwLA@G=ehMt<>7oXPk5o($-^HHWBlwmpjyA5 z$UT>5QGht@$Yhj@afm0^O{V+d4rFZA6z7IVF|RPJ4=N{&GL0bRcy{|45rZ+4 z%b?<1DAmpF0^{CZBe%_ zKvIvch$KBc89SHxCOOf z&Gr*MNLX5WSi58y#2KhEo2|j2xzhFQ(7UkgdmGKxPKR*e^fjPvXL6usOFU()Qek@5 zK@btU_N0W(Jw%3=B@N;#zK;P&bhIi54#k}K_mdalVFuS9wH_c>h=SI%_VbdFS}TP!#QteY0QE2HNea&3nW^S^xISv3nHm(o! z$uv)WFdp<)e5P5@h@Z`^YDb^V;+Z;#ke40uM^F}57H$Gi-tGnC(2CDu|Js8)D(@t~ z1@wsq4y%g8eUsV`KA0UT09cK?qdm~WqFFzR;_4%LDK@KR9nUo*O3RQ^ZNc6 z-=EO;Cpfnrp@3;%yqquKa7VYp^rGCh+yjT2gp~U@N@_5>P<*7()T)$!baB;3B)#Cn z3p}6fY-*1^rNd4-t(s<2_JexK+e-xbjtW86Wa)SJ7{MuxBvyh;z}?f@p|@o+s&UVA>eQOr{n zZzl`Q^JyMscrd#)XO2ph(xwT-s`m4Jk>qh5`Lv>Z%9d7HEC~$Ov?0VEv7?dxHf(|; z;lg5ygex5hS%D*oEyQvp@nCz?y~NmXqu4F}?)6^|T1!6#v)h@>ar#)7D|l3a`ar$& zNG3Q@*qFZtgLd;3f*p2e4J7nz;V`@k#G>Y46(V(9d?ii@&ugV@fq&HQTPp8VyYH-f zzUP=4mmQJ~ zh0ZOwOeM#VdT0M8{c3N3l6Q$7+Z(`I2QmG)9!J$J?~y?`MYTaU3)P;{$=v4SUL-w7 zPsB2jsh~v$uDj^60>NbsCKHkgFfIV7zkz;jp;BiTb+BYD3GITUJ=u8-2zwVhb}iHB z$6HwqG2H~w=`4fU#SDG(;9kQy4DSB{o6qxThGe%9q`ospetK$_`){?%7 z;o6y8?5RNg++JI*Gw)DlreuBs30Tqyceb80)hJd=>%Ruv7*uFp0MyJVTQZy(bL`Db&?qb%LO7UgNU@Rzubsr28Cb- zLZq1bH{G8dQbft?Oz^~F4ror&2=)sFU1EepQNkD{MJb$?8G+V(%H|^i;B4z4;E20E zwGshGdk~ zt~7dzgqmFhEcJdtq8*hv!;q}TL?x(<3d*cAS>Rql7nt&cVo#i@avvk(RL?inh{+7_ zT3#nCQx=fmm+BHBtXgEp11`3I$enjkc6wZs^Q>(^bWJ#tlN+nvbuPP)pcGU9ie!VUB}%#pL}{qpB!EN39TNBmKi~I zQV`JY2~`1v$MqYHVu33I%icnsFB+0#{<~1Xb84W|GCVC!g&dwm;5jXGSupb024pUR z>90LxE`lx8?}rqnk6c0|sIsSqC@?;oOwTc25&Q?s4QLu*lN-cv1#Wu3i66`kme-B& z!Tv$_9qSIWCBCz2uC$t;?-`z|R&$1i^~WTRWJLQq`l7me z@?l~HH(J6mOXp_SC`HjqY5GMBUl=TF=>^IN-8RbhGWtT<6j!5+kn9{~m()2zts|-w z=*>&QEA+gCuKh!+nwTxV(xS*Wgm9xl5h5NI#sj^dh^#0yi`0OGqV-OPjIrL;5i{9% zANkBc;4^d9>{gP}!J*EOxfsd4BF$;tKBWFY8%K1dqw@kpFOKO!U;&5qY@q=8Fn3;c z=JdSO(c`O|I8l5RJG!86s3CUrB%f$KH$9(l|DxeD_D>tNNO~`rO)HC3C}w&P0TnWn zLDw?RB0d6Zh`1x~MXg8kti2TDVtdcty}W^^;su+;2Zp_0u$8fV@?@~a+N2xgm zG@zW#>m+5aM@FvXZnnqh2K$;ND~1z2G0el`VOPq9nH|OLh&MX8%yNhH2wRFjBro5B z-vY+bk&fL=!67{&2iU=_dl=b6n@JM+J1yI5f>LCETRLNHTkuiTaV zp3!xHjRUF#bMRt(e+J)c9$2HRgJyVtZVO@*`J6r|s7@Oy`{+Q{nF87+KbIGo&vkM`02ewE$t_J4==*N?lv#VSrGPpdI={Vf#_(Dxu-<6&lZ22!pF zO;bc=(U)`qi-Nd_3=?HIHjD}s6Q-B;qVwEdbSiY@1~C|BUNK0(f=PA?Ehjzh_-%FP z=mxsOv?0b}1NM<_KmvCgWnCDKA!fu`xuCjDpcQQqn4(q00V#~%?-8;e{Ag5XDKQ4S z%<76`!ND(zienOX<6jhY5#nD0eu;mPa&KL+c|rrKAPolh_ya|*Bae36Qc*llCY+4* zA~{Us;EEBReIS6NOoUN~?1({1oE7L9=$?z{_E_-T!$1=~!@zUTVu)#REDnW2PXe+O zPSd~UP|VQtFckDxL!mgP@KNtjoFPO?1%(=-noyHq6rmtfw}U5d#b7ALh?ONWD1rt3 zxdMs&*^nxN1WhNzu&_u$8T+=zVj(U@+ru;zTeBrm&}&_W$gKhrMd>NtPjMATb!EsM zOc92BN<2#ee0gM+g8jmfi?oU%SNsI+ENo`TfA_LO9%CmJIq7LW8pc8D!0FOBWjt5s zyxJ9WhlidcY+%(XWvk{;_^31~Ba??p${|q-!W016nG@L6&iBr(+3|oK*8g!ssP1=~*zd;hu>}J^p97z5Pd{exj|lva82ovy{~uDhBA{_5u91hfT!vKxhj_HW z@xalieCmrYs81$sFVw6z<_|k>sZ6PIIxhZ@@!NcX_vRA{u0niO5RGH?HOVf7{3Faf zPf^XotC)d#@YF(jFM+S(T6lZWEB*qS#5&MBj!wPglnaLKt!AN zt{1_sg=Zjd+59C$;E5gCGeFy7dL~W5C(l524bOlEaJyCkaYZ}>`xFw%G>>d12h2c5 zfSCvpNO3vDh%KDY0hsC~q;Fn!lqo=aM(o^pZa}HZ7vQZS!Wi@jj0;t8x_h2;(p{A@OdIq50>I$`M7ecu>VhiG>USdyF-HC^@P7 z6ywHv%yb2PCYW>t^>4J9y(GzgBY=3#Iq*iFNDPI3d(-BADov_IUhTOoCSs)cP ziLsS81RwIoN*Pa&Z^p#Rl_h`Bc~|RX$8_xkGV-~i3L#FgiO>_ z@o{tv^K+zS%S0?jW;u$C1P^Y$8(BykBzs;&SIe zvb2Ch5!uu*6vdSNdFgD~%O>B<@TlO8%ZwdHkIlj7Q5C@-qF_DFf$E(j8;?}0NBA!} zQms|Pzlv{ot|g(a@RZJ;_VB#~d9ogaVXANq%P12q{e&SuC0Iy%0t}0RMPBSS)nV`F zli1KW^rYRb_o#P!2<)Rq*tUlWwILHnr2%nN$^1N#q{d3&L5ShKQQ@0sGHJ-*^_}7L zlj5xX>KlFE3@Yk*3^bOeO)|`DxO*H!yBzQdy z1F#q}#vEx<3fw2nERSvp%yNE#2gmuyujr1qKQLX$&RsV6P zA&sbqe?BDC!vsM6qEWvR@h0H=i+L4(5U3|?P_zBPp#CRw)Hi!D0uO#j7=dwvkx#9O zk$DmQ{l$n1KM0I`>cL`!wJOr05n_r$17bq9wX#d`ky3CCHn>{0?XJme8|k|Q;`|1! z__ZJn9NpmPD;}_7Zm@Q)^4CpDAW*4mCUcoUzepa&eA1l)>cbs`PCJ$6ubWVZ?+xq+ zKH>F@Nne@=tW%w8oC)Yy{o*S$?cFiVK1mqybRNH8@3usY{^omBx7eVSz;}=)b9YM! z&rY%#iCmUgFmwJma7U-=bA;f(U04>xhkyHJFHW;pzG-jDDe^3*V$nf*v^`4P#q29e zk9#)&pXi+UrwxVp7@6I2V0KemBtC|FI0)t!?!Do{0!VsaiODe@rNENBXj7#qkU!^5cD`LRevt9|KDav?1myq9M8@ zij;v)H24-N9bQrm;UyfvrSOs~%aL)EL9dop3zGe&<8@blE%+L77WfCW#UR+u#3QsW zgN1cl$TuyPwWHhsV#SB1y_Do;Olv>VjwFCEFTCY}--eGl5d<_(0Bc9)_1*&*>=aRL z&gMA{E=&Rq2-`z~p(3|T#hpsfA;M}dVdpydj>`UVSb;^48TKy4eO02U;UHrxuE9#P zg=|LM?FAY8UglMhZIGl_Nqz6)t)fUkVRjJ{KsH5u4r=W0R_@r^e5a@6LLt>v(oaJ~ zPHI+f9rLtVkW}{pms4snHN;UYvD`^Xf4}W=Csa-X=(X&=$hIDa+3UF- zG%zq4XD{Y9=bZp+=vTr3oKXWLQAKJkPdpbiNZcd!hMDXEIHh?jZb{L#X04@4DIYfR-)7=Co){{c6x7 zorJ69UC@LOtAY16Ep6YJHr%qqu-#c8BB#!0aWJds8ZX?dekzXUU@YI3uKRd&<{ z+rKv?YHTGegGSQoVgVw*FuBw#>1Rw7=B1T3`51DI^XV(K(WkG}S54AW#7}vjFs>b~ zCv773mo#GZ|BSA5;zpU5k=gG=!sC%PVJ*j3(eh0Aq#jsL!ic_6+ChpL+U8sn0NR{4 zaG`G5*`OnXqMt#0GVh*t>0uzW-YM~LU^f9Q{ASYf7Y0s`|HwUNDK^Tq4%>hLpwD7) z3lUdvY4t}QKA$;scly~Tq0>%F9$v9mAkUuC`b{5(^1^uiSzTp=O#vX?&4DeICpkLZ z&$SN^a2tg>u{0_}DmLSRtbcdLsV00)o9TUmrKn2IEOI%KsE{$V3h*hVN!JshsjxMa z^0h!0m{Q$I(F0q7AurjaO=j}&1jQ5ckkYk8jxQ==-T}f$DMbr)Ssc*Yt})TNtz-k$ z1!2|6&tev{IW(Io2a9bb=8ZuERO_Ce;s~7Up!TrHe$HRQ6$G$`lOwhe=m|i_Th?VGHbmdd{v&U;PzsATa{sM$~B}pIwReb zxriH!^Wh+9T`6aoAh0^m(GAu~bN^g4->GBgtYq}M>CUI{RgGhmZI->XoJ(~yndG-e zLmMZgx?SR9P1$9x;9!&fT%&wB491jpCYl%?!EPtHlmO4t1;3|cIuqXsLIrufl+o5x zk+iT#g-g_xomHY)Vz+XW-&r!6GeI{+jprOYt8h$oLp&A3)=0n5M6`nzi~FqR)4uVZ zA>rLy?_mG9NcDCts+o(_gYPM?M+PGObUmzmU`a)fj$ro@X9Ozqx$|6`&A0n~{a*Ci zxFb?k$5lnekyrVPju2OsQ$p?sKrZ?++8gg!RnCGRtt!FemYp7DC2<-7^Gk+P!yT+j z%N%rkMr(^gLRnqd$hn40=v}*FHGGDgov$tn>LfN zHQ#2|M!HkCHwx?iqM5`gNhad446-B>tN1$MIhIRexic#FewRC~a_@D0OGUXVY1y;S z(iigNB{0*i=^wlPv-<9R{%%R%1?A>k@m(#}e_rK+aps)LS?|>e$Po^X>vDY;R4$kl zmQ`-va9k|P)w4gYWM5nj$0dCy!wwrV^_>(r>(W=SzgbeSyH}I_c;xt&!W7@Hl~&KS z6!Y8KQQ#I{X~XQ2e@9{4f!#kk{S$g(>y)HC%W|B6D#)&bBUPCzED&<`Fug5g7zJ_N zmEk*GGAh`;-*pHDyh=)|s2wfQ@>r$sghEz(UrztYAkHKJQlVI>gyjo76R1- zENG^5@$e;3wi*wp)y9KVgLUQcwNxb!JS^S=&+4T`I4%^% zgdTae-g>vE4qQ;IManyZb9|5IDSwaaNi;24My~37IV3p&Vi~UcAST5~5^sax(GMaV zXkOv$8qSxdEe2DK#TCkOk)$O{RB?quX{@(|SG$^pUsfmy|5~9uB(`w`ML=K5pJe-o zNF*cy_;P85n%}V23gziL;}0}h^A!rE^vWxg*^|_Oe1)>zo)yZ@ZB{4`NrW;&3~aJ& z8BQR^3Y0^tjxO2@jhAm6IbK{C z#rl*`sGjo#5jJAXR=+pR>(I+*lj2t<{ zwS4y=2NBnB02gic)27fs_aP3WqaVo*7a#(i{T8*X{A^w3dH8jBnF#ksLVgl}y%TP^_-&poAIIWnZGs(qfu>jU#)` zG19Y;YAIOBrxC}6PB=Bhp*ZR_mXgDz5I434J}TWCk=<5gR+%`@vexnLsv$b zh+RCSSeU}ddgLS~X>bBtqtMlXq#0gD3a4*bPSVUZ!BSP?Jg8x87svgh7~X}AaI_K; zzca#m58qpE^z=i_DymtRv@vB7B9uK=It8h1svtaadf>!jbn*#Tyx66y&>K_eBF-9oj8mG`D7|Z%h(d(z-y1{}ER1vfX8BPqA@P~CtRC0s< z+!$>d#l*-q#q3RfO<|r)0h#RJARf?!c8#FIG$cgKT+V3AI@HIi ztmVja5u7<(0buiRg@T34KjEIO3|FAAZN0pQ-#VPZJ(YDLEMiqNbz3L2j46_atBe$- zz=A^vrOcTvS`@qgX-p`dTTs$F&clH0QV7c*0lc|e>|u_yiq?>ZXHPVSuxrPo_4qzzfeqfSv+f6?e(}NbljjgY@cTOrSr@LhHEXq_W zVQfYtHD^=U3=Uk`*M5!N7hL|biE~LeeUB2?1zJ<_leyCz_t0M8`R4OXz@|>NTxKQ8inCl3;m#kllqtrOOYc zyKDixTz7f>P^X9@`D%>Gw|0v3*bBv?qYh(o*5MV9`V7ZpYc7aVq`}-&!>hEX*l!-} zO2I}u6n#nRqf3V zH-Evsu=^thB5GpNFQ)v*k+P8A&GZgbwYvzGAlp{fWk60G&J)srd%cWAN~~wi3KVXW zQ+aY*E$^28y4z*VG*E^W<)?aSyPw2B%^5ZJ5V>fe;z%H#3^QY_j1M_lm6c5@J{oaI zVi7pza5V9I&6d_2qx>!}1ongg##qDXbrGAD+#}-*P0IKgozgMnkTu%cJ!|-ERNcMC zP}s5Uyw_TzkVFqf&YKwmECEuq|M+hSEo{cnLgJ|FEhIB@NkXV40%$$u>cBo$PE%gc zV-Lveq|$7<3o#s$1oG(FGz3r+6_d+mB?fIv)Hw_wP!l_RzfL7FK_$W~^_^SgM0Y5J z7o8wN@g>VLt`D5q!OAFiar!rIgC7U4&9%L~v@?fK%OE*eEfLowpn9?jUbJ6G(NN>iV~OV>E8k>K05iL zG}ubq>B28&dXiitCj$0-Z84hZ538d@RTgrItaDn>Aq`0jVd=JDKhNT>1bqWq-Yv0F zEv$*h(o?=Vr0U5#TbDY5eDRUgp9NM4Zwe6r@TkBtNmtv$GEvHGDLQDLRb#KFEX05# zp;yKoNyG};isV_fq>Sy$F5XHwt7LsT6-<|>Q<)GWJuvb@NKUI3sNjT1%l~6ddJS=@ zL6*4;y$0*u6{ry?Oo{ZE_OZgzW~4ncL}*WJ=3%0}UdAYLL6}jXU8Fxor9^*H!Y(Hy zc)oc>h!Om)SF6ftw$u&DB8LOG9~;E|0B{tQ}XDq{2KEchZ+AAEq#G@CdZ%9Uej&bDQ{+q3mX#l4Cn90UHo?`ckM@ zD{{U4)5FS-7nXbu%*dcb0mky#hS+p_fLlgKw{MrUx04Mpk9VPaEd0G>ixRZ?GtsYO z%DZ%nQ3u!`4tk~T61@g`34M%$Jkr*Me|CBW3ezj2!AKGwN}K=x@X7~6t3jv}cQPl_ z--1J}6zU*CbEvllsmi)e(5B52yBkqRjqxhey>}XhDocWB7@G4{(#Rsc~pg{-t@dm$*S*>pxz5)eu*!?y`^P+&kj7X%Dm-HXuVYIh;U zJ)(2p0?^*N9%%otCTJ8lXukqzHsv+0guGrm(kkRhE-WEWf_>RKg!3X8SEGwWW5}bz zBN;b8OE{I7ksE|Yl%1hRt%tppEqV-6`VB04!;!{&2FJRMBwHB?{&7t_Kzqi+FJEbh zf)6*)gzRc%vX~R@bAu!dO9sip0M2t$R0QC$E7QF?MvZsFgg-c}DV(+sckzAa|gqcQ3(j%><~x4w2X`t%_D1lpuj_ zmICHLJuE`IZ)M*T3&rkSU}1%m z7^aA*3!XIl4n>$k{)_!s)@D7nSz4$j-p@w{fO=cY-J>{?nlb^y0ZHL_fM9&S2pVO4 z*7SHVY+`+18akxui;zBVE|oqjdg6C(*SGW$B^l6HB3`p`k@szPwjWI}We6BPp}<0O z53V-VZWA4Dt3I&@rIx*ND{6GQ=AOqMAFDc@F9TZLj41If_=!=)#QhBG(`rZBTjIoh z6E&<$A#1sOR^m%kW44;RrKP%i-d$#keU7V;_oD2c7IId2>_lX1VMHB9o+WU1=?L2~ zUs8~pwPMkPxl&vubM#n!6}!{7>!ODv+|AeCXarBy!g@NIzSNEEMLTG{8J3EvQagNS z|93{>o7bSR9werFLCR_hds9T=8~purjzUoWG9z)HkVq7X*oBbz`p`~ME{Wy#tK6*1 z&8XZjxg1HlCzJZ9 zF|=}ad`3C}&mqJqCE!0K4M9=T>P1pfon{6SqQ%(3eYJtz5gLtmVVze&p7&}Zl4Z}Md$ttv+e zJn4{ra9xPk)A72@HT$kxDtoaZsM3A9Qf`{7a!%_VsU|ZJsTljIsp@W!;*7YY*n(xT z(U;CD7@D-aD#YGiB@*M(L?YmH39q8*F1}u>aNGv`ys@M5#Ow*Xvu#h(k6g){!sp^c zq#GGY3NfC+^csZ6lip9oAb_a zPGKx5MdpE`*%_5Rz^?v=4!UMSr(4wQL7Fv-FyoeYqd!|SD>1=H2L>>1k(WnV2f3pE zlB|#;e{RZwGJp|Sz={9@DgdfyhsgS`>-@mP5EYO=Js8ZW`%sPe;C^lEFyVk}snjAh z)z4ipn32Ird&c_neTLWY!lVr`=GNT)n35O?SShuX)HnZ+3fVY;rUfcuYHARuS%&`? zTYz@icbpUzLqI<07`{fcId(H;2lWS(jPFV;#Ly+-!I9zYr3Lw)Hl zy6^{A6wclg+JNpBXqIou!aFsX{m^c=*d`R}Bck_@K){1?1~jtcuG~v?-d#z`A=^L> zGjN^g!(Z3PP6k+v4fXE0ssn$Wacx7R+#>BdIzv=FriH4Jor)bG`e$49Bq^}^{ysJuh8L_Y}MzM&?+VRVrjH=s9U+C$kkVZDfP7qhr6kPxw`kyViYyrF#I z{M|H}{l-6Wa&c297t`yhdPla_f%X=4Ht*skx=3`8VCp8>N>%sExAbas{Y81$6`11=8LO;{w@<{cJ$tRwL&I2k9N*8@@lWgVrsC@Xv;$})CLSvj5p4yYu@!7(*@ zEa0G#u_G<8@E_Wp4cxKymdXe9`h&_AK2~H42UP=sp`0wbpi?KzP~zBa5lMI;!zyBi za^O8Ml;hn|Bu94%hxk|PZO#LxMb8>uqV3hg@Of4(@~KbtSWtpQ06X2&KGO~=as4OE zu*etQTR-9^C-KAvPHW8!hX^up=~Q3nlyYQ;amhPX5O}B@K{GH55%$?0q$>)kKy`NF z<164TwZ4?MPB%*Z8V;be*P30Nx0Ia&I1}T?zWCBK8;i4xgyW^z1tN{Xb2PE6@w-s; zXnE!AYI$~%i99>q9T(D%_u`G6BgME}@VH#!R31)CX46!dU5eCV?n74}+u(%`X7N&> zXmgt2OSon)X-u-EBrHrqcAEFFKrVGL&q z31wmKk%GK5_i80Fh%hrK!Vp!IF!Zj7p?9kFcjg%4s5UYTCba(|*%2DqVcH$yl%u~C zYgS@!YGNU3q~Cz4;ky#3FS?`eMLytE0}k>fM=7tmWGBf>!yG__ygG%{cuiXw?@3go z^HHB@`JSdK>55|NGf@WcFT9ulHv~Yj>giD0_Kj& ze82l}gop5X{e^G!r|+p{XF;C_kr$urm&X8cVymg7J>bxT`I!APOp3sundMXxCg`+Q zJc%)3`34Q*I{H!KI_K2Pc@0$%V@~%wW9mT2KAKOKQ&^2i+oQlq(FDq9g4$PT;9S{%auDeGZyuv3aa!zg6OX1;%l=QhRgThRig z(8}xtXI0oopT|AI542ZgTlHxyEnJ=~OJX!w*)f3S_2n(PQcuqgKxx2#Y(r;Q=O$af z8O}b-ncCHALLY6eDsL5U4}X~RoU=1W-WVoF*oRQ^g9g`}Tyh~~z&l^;#$@9Q7?RGD zM*`Z)1ZbO4Bit?9yihq5MIpmTaBjS!l@%K#z1yJBd~!>k?0&(#WZ!olF>t>k0hLy) z9K(|5TyvBLdPxK<0I8j>1}z8DIoGg4rLRE8w*||6Eyu7Bj1yIzs#G1v;+R(D`B&2$ z9WAg%rVK`XjTjcCZuhvHkCY;7ax`h$6}{#Vm^dHQ5S%0YWBQ+U2-Km6I0OO6Z`lwq zzvT*hc}pZMwbb#r%vyXQY$B3XW_$ITn}R3!O0xq?T{$D+1DG9{nA2DOvkr|I*+U$f z0OYr9XfTfxOrXuuoQJRhv;3Q2&UwCG+%V~z;m{O8Ha#j|0PGTf25ekjfb`c(h4oDK z)<*jCtysmb7OdTtyUTL)l{qz6y5?kNU+tR9{hD&jwSEcLs>sN&28Rjo*hN>`H0a!N z_G36&EF9s}y1x03#)5#qI-5Dn;R)-4i++*5%J$5a+d1|}d*X7Ul zxfK^cjh1~~5LMna4^sMO{4gb(?21H%T99quR>}M}{|IILy<~KZ$&}w-212vxggP$$3?!O@YvRp|7toSvav&!W6 zCzT&e9DxEpo}t_}O}UJi6aL^!`k=4);2b9@)-XTjA3WP1eBB=e^0|ZzXQ6X_yfN5? z2xj4neMbf1es{aRGwDlT;XqgI4QkDJH6u2!(R}sg z26Qcd&dRyy^}XaK7iA&zqDBSsmE%srt3@ct!LyL8o2S(_=b5)-$48 zZXkV)5yP&Zo?y7wM#Su+KX&WYFM}7vXze25vhF@+6GW+7W$%CJm>g?}_oiUn^6nP_ z0D-$m{KB<~ES=uN`2cx0>3@H>qy9SJOPdKKQHE>_##x@*{Iwox@r9xqF)~6l*2ctZ zA|_t-P)F82{t^nhBP+H9jP($#cK<;mw#{7R=N~F)4;#_Fb`{ZSLIgJ4MK^gX)0s8N zZeQ%tEBVS~H*guU6IX|Uo8^mtXOdB_$!N|7e}lyDvX!h#PPlJD2MmtlA%oneR>SVFsf3?^JtK&d|{wFzW45- zm2acIB64vgiDN0E%IJjre{E0*m4nqwXH+b({A5~~c`~cWa!+vcFQ-cL2&b=;Jlp^v zmOp5#6J z=!laKGLwybowNmL_lgOkKyTUt4Uy#oxD7S8!w<0;Wk0Qx`JkNrG~_$41YWivxU!#PT}gEM@2pqTa~m2>tB;p{A^XPcjn{8Z1z?r1$KW4!iY z;d|@G-L1bcZp6+dIh+JN#K>ON`l@g48`lQHuO-c|c9BDU*gDon${J!(&IQFCy^XL(eC{fs*)suBE%}IGpn^0ig0N|t)LdLD*loy zK@RSbVmW-;tK!RC$y`nN2}wR*PkBNSWGH=wXXl&@rF&h@-uYPW4K8QEGrgD2-?x@PWm~v*0jmwae#U1)?fhR7c zrI1kwMRwMlV2ZSt#t;OfpiA}Hxq+BInn2J7L~Q$}BC7M4+Hs8KYNxwZ)&cgxa3mHP zjhoP7TqEIab$B0DvWJ;|X^9k#Yl)0J9cl;xeY|A}id!cF{a_+YXG?~ohd-;22BMs| z#$1V2tS4{Hs-!*VMnEo*A}aUv~R0H@Y(DJB6%gKe=l1+wak7W%W4el z9tw7mTuhIFpW*>ZI1m;hfE4Yhnko4gcqt!*fcH9~YU6WBQF<r{2K66(2kr9PInB?-{BH{GcXb%o~=E0i&u>5%IR zd$j<5N#cyQ(S&tLLyuMfi`&u}M8FBWkCQO!_yIpHGew;XxjUuF97%SyzM}6>lUY&| zZBSAL$ywtWLmPkepL&Ra)XnsZW5pN4kaX23h+=s{?o@%eyvT{L^ zv{K2td}4e}v$cRTJ)o6Sbuc_Sb9vb2Jx&bAB|AYLSi)eG%6C`j*)xQ|bs1xlM z9=@!8Xzxs@-y4gtZfI1b<+4$ly_1{-1V{CNtOGb zE_XuZWVc@zcR}cnj3?S(Rto*bzUA96g-y00fgvOaG~6q397i!=}MFl!tFMDvTMU_a3qJP#mbTd1ji~WrPEaHtkPG2GjE&{W3CNqAImJ1t zpB3-Y;@GwGc<4i2jQvpavkw#zD=E@W{u~i7nl(Bq5(*Sn(4o2|GP5FNI#aiTH8Cf{ z9y$v%40_7xXgxZ!WX)KtM`xZv_m|FcDto|m_K%UyQRD8X>g^=f*igJpx2@jpe z3}gtxX-80Le@{hiA#73Gpx?ySmv-Q|{4?t;smW9{`$3&N4M@PCK@AH%MTtPYh;rG@7P#6RUQf(U)(#sa@XBQqVS@xoXf`oGFb|F?fDLu7 zRUCYG<3pQ!9^J|X`n;K<)S<%;`2K0(-7~|xX8=gp|1n5X8?m<_h_;Fxwk@p$sYBXy zM<}A|{24Bs*?bpuUUN(3Ib1FR9=DkAbq@0j!~4D1DO7w)p@lQaq+Y4S{zRN44KzgX z;cVi=>{+oCwG47C*I=*1w1Ys_4ZmlT#gMmLF!m$~NG7B@W9Hu7NgaV!DysYe2apak z`9HmRgE)UcNYXzT>a;8&if4ofNwvuV>alfpBLJYkq3*jok7~1}V$Wpyc?ZuPOFAt+ zEEhlI;ztwBBH8|l?HLbCG! zy}aNSo)Pm+0&+#R>Fe54Cpz3-b2s_@2tk{84Bu_iclFH9@!^T>0DpqM24HLr*GQ0N z1mhXunMS`4;Q))Llg=jOJ4Lm19!bZV?M+G-sQm^SyEFRY3|};(j6CAl%l4*w+s&A% zZ;4*ERje(nEphajw)Jwm!0i`7S~eH)nSMYHlYErfeS^&Sw&Bj>QRk^6yVYz6*J;LX zOW~iLMn~R<)cej>XOo8v_*PWgAL`sxjNQKKWVZMH1Q;VhYAmw-JlMyBd~mW0pyn(= zxk0_&*y&7cB{x~nZf~93$K{wmun1|T#ePvq^ zmQ8N6!_{aMX5R$237eye)=&Att;GYGz1cAD`MeGJCEh;gan(!l_AYh@XfqSBL)YI_ zJ?^JFDlcTV+)~-^7dt9H>tDa5U&JUTm~}W?hy65kBalUQ-tC^kv7ck=te>3hK7n+q z8mpG*-gp87&*@9?$KKM0iT9{bv>kO?SJ=igNIfJJohneoEb55gGl@EFuID_N z&|2Y3{5(Rz9q@tlB}6DA-pD;Trr)v{Cgs4-siGM+bVf_o} z2m>^fz8l8BGdcGl2c(gGj2=ok9v$iE_nCR689SaC|2Rjs zlbP6)apH;X*pBVj*kfm1N^);mU1^&?Xjkr9tZ~}nTsvjml@cYeYM@e5gd`E`jy1@Y)qrQUn(qwUqom;1_()I{zf z?iJyev;b@?EI(f#AfCBD^R8!`l(?1tj0g*=Yuc0j>ddFG*76x-&vQ9$FFoK@7g|+@ z`gcd!-S>H;&5a1UD1s;ni@(No?MBv3S`BW$7XzxWK<7dLFwRI=b+z8Fh?bW)1#Wk% z1>D{U>b5~GUHcMx2D~-ScWSjbyvIdW{VYjN%NKa?j#@wN$?T_qZ)OWRj-apqrrlrv zrM~vN-fU;>2FO(XpJ*_5Gi6GmgGT1`O-auXHyIkzJ3U z$M}53pHG%Xk%YJXStJ|=#OL&41DH#VkBh)X`O=>({YY0`fcwZvXr({-uWoy?YiNKSRRv|; zY=3Ch-&&gWZ}EhsFbc7{ORJ9Aud6-~33eb9Q(b{jgb{U0Nb60=QNv)SyM{!*jpgTd zt45X`s?CII#$24h1Hg9^P`7#nI98aHkUT;V~KL~+l$}V+T2tvO>Lf!?&pEoFwD=LvoHdP{*_<#&@$vWTT$(V40DO-IB`}o`RI7lRx z*;m{zM|LEg;He(}EQgHRYByX&)NQ7}twxSMae9Fd~jfgC|$Ou*wIx5D!6CA4ThQ za{m|6hIOjG_D^uiP^Mo*VxG{4N#MyOmoaMomuf={;y_S$7;O-JN;;%7)T{GorJ=!E zKc7WC#T;4s$CywdCA)^wdFuG}$$D>}z!03g(+hoivZfi-SvEN?c!P(~P`w?(r6JWo z)nIG^J|#2yl-_7!am-(^xm#y9mcVy~KmM|mTM*i+keq|7; zqxFcTQc0#t>LiPk|*t5r0k{z;vEd3DcQXsl&J`VLInj!gQu7 z)48biTrplqIcHrb^&}*I5Z4i3nWi3p`DQ~MRP0}gmjGUA*D1_7W`j7X(Q8s?AVqUl z-&m<@en|?KDA#6o40J>G03|x@``MY@+Yu1z46Ztm&CcXyu z?qmFdB;@6|77T}jvcY)nf{pv5SW z0?d?-xJ*cpruqn&K8Dc5uVa`AII-1ZDIi*k1K=4m-mlC%bC3Y7bUbz75a)C{4)pS0 zXLnc6!BVxquL2{^K>k=KZuv?&38dINv9YIL|G)_6k9DO8Q!v%$9mFOT6oYzC_^Z7Z zCYzVRb)+J~A`zQOj@y`RRXpv!4M>*`#(Qq&nH!iz zCB3KzNd$Mf0qzH$1Kdgk_tH*qgAKU}+~o}1)m^~7+zKvc=e@yQD}akVrwz{$%MJz5 zWMW85evb*#Ec`_I{u0Su{rTVXnOx5NI$6e1z?4jsWi&_lDJvQWjNoZHLo+2w5&SS( zPB?=`s-1H4dLSvG@ls*7taT(?QMo?Aw&ciny|*2gL#_7-7Q|C|C!#*-Eslem?5`Sj zx!_I1wk>Nbf7t-birpYPAAK#9os$B3r?o$Q6({5N8%s^Otb;)o)a^CCD4D}zw zm#trh;ke{Aag$u19-AZOno?Zk&2hq^H?6%{(8+51L+$DFl8tNq$Ljr*VXyW&%}et$ zS4+_%o~a#r`dDB6sK!0qUcV_HCMMj997|Y&Nk{_QPzhTZr<7JbrIJ$!WaaYrS(O-H zXDAhJGp(|cj<+MDVvuDEdqiXYJB^?oCI~B|Bwjd{C#X_O$e|=))@ZB5v3qjxBn%QW zaW9f;sgEhtV-^A{A~Of0i#=|s?0Sy7Wb{A+QN@Nsj^5GTXZVq@%Z}!?&l@8jn1?}r zFb@mqB@B~NL~~Ll37rsO-2X5l2a-+28Hp;XaxgI1AkNUw>R$b<>7BrhD^Aw+C%^s( z2@2FIleSKIoP0<1UTST*O#~S}uha6!89I3eI);MtF3>m6oikBEDZ1q!>`7)T3PjUc zR%8QwN62E7VkL8w*g4u^!G1m+e@7-T9Y-eMpJ#W359u=8Rw&IMWef^ScBwLCnX}fN z(K9r|(4|t*cne(m3lu(L_i5xr`${E(9B%hE$Yv#`g`8}e!mUz!=ArVsx(CcPl}yK~ zU{Kx<=z&|tTp59w>ud!g!hN2$*vg%rf_cB&vL!oP>8rP-K;XrB1(EQXEm_48`ua24 zkovBq9c)N9h+!#$fnBGCSVL#J%noF!@mw^L)PNWhA@4?!Tq~_eHpzS(Og_QtooL6b zlX+N=RR$*#(E}+c9Q+l z6Mn&pq7|O+f<|1@Q|Zr4vLVMr5+@nGxdq|m^-*;)kaa&^B|wA5C3jK#3YEm#TlcqHAMOz#y=LvO zx*kI}&5y>kPyCnnCTVY!yw1@?gF5JKwLZdeS5gWT35{LJ{!J5w?7$Z-N=)W?(_aK?`!o}R6<5BsXbWKWtFg~DON$>H}oBryRLdjt*a_4>7^dfZ%xl;dNq;D zhGexT!{ z4gK8ugaHZY7Z>k5&45;Y#>9D^vw!>W!5a~x-2S=t z8!8z#^zDI7h4(M1DX?Hg?Fb803JXLOg$4c{EKogRflBnBK4F2L&Gc&!mkhybU$$_w z{Y5PBYzPZz)K~z~nb&8~@VrW4fjZoDV;9`mTz}TsmDa|JSg=W3rT&8gWy^1eP5Ple zsV(YkxQg7r-S#y9E8o@f3zLaPz}P0=^r$Su<#&<#@wp$wUjyI%ccK3M&Eggia5A`(PQ?1-RZYopt`@^xP6$t^4v3NRH9j2>*iE!^ z3h6r{@7I0-+L821iw^64fau_f0(X~5u52V;Pa+bxQfT+G=`kP|a!J<@qmXdt zToRFXxB3z3IVyn2D#Lr^ZmF{dbyNKSA`{Afvf?ib=o6%O;prqHShw9r<`m4M7U8 zsALmts~;?{?AL%zTROMO z#v^vVvF(HugAtoXv%_+&tE*ljb`Z6SY!sW|qp;)ph#lOf9^|=w-Y1&cSTUm#tT?Ck zAQ!VL!HSLeeL*D*a-LFHF{iSUa{Uczubyog2Hk;!yg-_vfEC;1y=Y*CX9uiUIINeC zm$xwajp{R`aYQ9pF-)0W;Os0U#*Gb=(ls=;bhxFlB34{DEUf6?ffW~ND$U@o%@OkG-l(lv0_-q zB_fTngvzWS_KaMo=^4H*Ac*Va46hnjcQm_Axpj5bTZSF9BduwYQmZwip*B4jS?_H#7V^w*X%#hc| zRlRh5QAbntlcnYB9|unht9p@2yRE8QUojxTpoWrgz4fcDrY?IG*2b)Dw{(UL$k67hh4t#=bt;sbCX~>=i@Rpy@5WVyMeD zs@}(|5@vJ5l&w-kqIM^Li8blb_YV&bPj}%DrZ2V7F*JysWLZ70(sN`w%dg1HBx|p5 z?o-V5(rXI1wysx7Pz;se*er%Kecx8YkpC+x>0tF0po8ixU?Ky<+{q^GhfThCgF_ZN zL)Pa)`xn$6hKnVY7%s-J*rl(bExyZeu^ykVs(dY4Bc=LVzz4sq-^Mv~k`3pEtUIna zQiMdIJ520H2pMD-#QNdp^TBAbm{f`gMM&qaWIU?bR45n6W#AI(&!}YF=L9G4Osm{* zn-@It=_45sn>&CI;Lw39YJ-m(N7zz&pFO;-lKEZ0FsJWhE+M3pQgAu3&pYXsk|d7k z*+tHFuzlP!=knq_a~i)maEvPI=pyF4+_dJb_C2z&J>|rkC2VYJ$p&}SU+F~`Z_YR3 zkN$heYi~wgZ!Q||Y!Sai)y75)-QAp#*PDy`R@EV|H$PtOEpEh1RMJMwL$5l6Y0#~& z8jzl-{%+h1bkc@D5VVisTQmnzT4tlg`5n1MABJxUbk=opI(s0av$wuu&&N%Iu z&Z>kvPEeZOsuaDA-)B|AMQ12&2~`QByG8U?&(;mYxc^SylC9OoEkzRP;fmhY^fI#5 zEg**)61`O^daK43Y8v#28{5nqTdcJ-)qPIO-<_o;78RTpai?LO+<*I4zw_U>m z*Dsv=!6fa8Yd>6fWIRU$v@hJ)NPWlJZ`b15kJXE7uj;sQ>TJpa4IZJ6mcG?8<+&KA zakAqFrv~n@gcE~x&CFC?R54xf^DvV13SW>={CByo*pNU{x9dfc+S`ZHi;aqKc!KN8 zXO4*?Ah6T}$-Yvr_d>pV+NE}N)%xuONQZ~K7mkjz$xSI;RI8nrOk7}~1OI~K1orsiV`b3fGsQSs$)QM(FGD)4?D2a8GbB3coGLg9x zmgX+_$1q}b#~N;*(1+y{;u|um3oC|St?A6!N0O=Nuiks(WhgQFYL*32VNxa7GNF>a zF|HEveT>rVM5WkC{64D^Z_^B=ZQCl@o39o-(X(~KH0r<8ug;cuDlZCnh4x2o;~hECRLY5_5?OSP(oVGad|D;4-vp)l z+or#9{kD9YX3@wpep}Yvr5)XYW4DQgq${z0*#Czz@7Zk;w+=Nd?-t9gn#g;0JN>eb z>*}9zuSIxPRnFWl7)IRkY7WHg?R$)QPHhq1IIohi%-qgA@>z(vpptpsj^8h;M9x{F z6!~3LiLvK)5c7hbjRdl82o`V8CSdfH#qHiFnY0%eaoEEYbQP0sU2*Vry&r~E;-AF} zRbf}2f0CBx37w?K_90?H@R`j&8Rx3iThI;K$Z52#w--wQ%?C}|1(oc=MLihBR!VU` zw7y6Yab)aGMf|6+BL0uQN%*g%@(sy1dEWkPZU@@=BY}+t`MQ@c^DoeEHy1|1f^Eh-fY07D`8NpjrQiAY-z8EQnODArMN7$8CSY! zaOR#IRX+0n;0wx7W%iyTst|&P{7cmartmWR`JRX=^Y;`bfR4W4Tj1*Jdzv}v9QAg? z7SJ7;+puJIk)?YSM$^z71?}Yqv?J0U`XbL?ZGzT~t?h*Nat7^26SS&+vb5d;>DnHU zdh2gCLArg9RfZg`Nd(p^T=}i>do|a+inOJ}i}$8IXE><_pL#%PZS>?*5|FkZ;Jjyg zx#TEqQ0PHn5J9I?xj3d{5! zv=LsNcu=8jIjiND@dvG0I3)x+6sH8FTd@l!-ULD_W}VXhAjqn3N$DG^lC_8h_rdR| z9w3^mJxIlMh7o8IU)CNxo2=QhyYb*qRY&jWaQ(rkQaHkva1R1BgE|>~wV=6@*;`b>^&C`TRst&)AIn%St?rGG2$KRMygw;2iku`N? zWw_AQnqEhSSXYS*G5ZiDGQ>EgWQb9H=lQToWQZ%V>&tY#sdwfcY78>(`TRrs^h{oi zhZF&q&c|1qOoK$zmI*MA4R1GBFn-l#42d9~NY5o|MIhvPl_v zcyG|+{XFM6B_gUT`us2r%RfMwjy_EAxI_1bAGSO+P7fuHQpt@*}vI%7!AJse$(&pgUlanv4*j-i={<3i=i zeA7a;V%m8DqqWjdLlofA5M^^gPnhIIl_($!Dv=ZCRg*=ysFH0krftLb%lfWp9@Rsx zTUH4=Jq(Y$q-PV0=8@+O|MJ7x(oa6J*V4mlYyDsgRO=DRc>R*``VsYr>g$Oz9_>Oh z30O82aXd{|A5xWWSmed&d-Zqf-VozC>2KrrTFG32O8aHCe{V4KtlnEx5-YZy>ozt5 z9CfQ{{T8)$`QAu<@68N7P}!;uS7ihXXZcNMYgS;ORsFSFvM)NXrQ5;!xvP!mmQ~&)%S(_vX~|-XRR0qRWNB`iz@~$z@YM5}@8;eGHq! zSOngr{Yd`q8>+REznaKjK}Z_CkGDnsREqq??^BdgFeg;f*YJJNDLuRFo<{w5{Jw0t zSMT!_H;p6{di_3>c1^E~{HYZAdjqA&AEn8kzKi^+6#0u?U#9C#y)*ZQ2Kmc-KL3V& zdWQVH!5aO1d^M22o1->0|Ax4$ZqBr+rU5jTLgWvqyron!F&83~izt^W7KV6l|cZ>4bM&R8O(P5jTTO!kGxekfWmCyG7F6C0Wm8R2HkG1m4}|ZR z^<9)r^+efJLfP&YWz(}s*aG^W!C$^VThyrs_FB}2@;do|2wPNM$;cV*`IGXhF2Zb! zzG88mrBv@TDrIq<)`Ph~RHs5QO%b>B`A00+H=%{eqh$Qp^t@VNzzZrF@OJ!uky5J2 zl1lWQ3o3zT?hz(Ub;dqyQa&GgT~x2ZDl(p4z1NA97D`y5C>z$;DZXiVlaz!grrwM* zp`VFROj5*f=9x6&Lax>AI#AnYtFq1FQWC|OSKm;SZS{u8x=JaEvZCL4z8rtQppw12 zs50V{stkXmp=B-#oG|Q>NB0>vB=J%AKbpPVY^fc6G>&B-ytZNnZMpa;@PO2lDrF5+ zDH)wIy&T#&*w7H2RsxB-G74Rtv7;WLJigi1~Wa=hTi*-^g7x?{9p-t~VxTHAEO`w6d3^~nmPEj#i! zC0k=yC5ySO#bwKEsSI0rj7L$}y%1}>*e@;RwdkIhEh2Ird8sTXCk19dbl0{m<+`D4adzM>N2)~1@C3kIdJ(DY`b>GQ$J)`SF()>G3^{$ON7Y{^7zS}a9-CWsnv zHTT=dhO>xEAc(T*guMS*7|w+|ipE9`O5<&!Y6ClMaaz11V(0Q5Mf-=UV^X=J86}sf zBb03Q&Uph}G8~z;Y4r}rZ!|%Ar2**&Wo^ph6t}mSPAq2*0TFz`#}myx4TH@JU|%1`q?+^)_nDhDx^K>}$gJZS?_pxT2D(t0&nasGlWQ?7nR1^~5C1k9}=c_7FTP{(m>jmm8>$mY!>N*VaHCfLSr}pU?1I8({ zo5}cU5G{^o0ZFGK$vV0#vG(agn=BD6E(5LT#f(ZO=A57h%4wAwZvKL&X8KeH+UEYC zZJfdkr-WPS!&v~r*W8Xswgi4u7&7-_Kl$2z4Pl&NjKv^?jk|bRNBAhkx3^UK2%jDd zzgDxjahLvX-j(6X$ZPkYoiYnYTp4{WuRyZLRDvrPk%uQ#vT(yp&$#Mb zc9*06JO0{idRDKY%Qd}@ymE!IAq;P*0|vHE$v%Cp1*t7JzvjQ|bP^Btz4q!{i6QQg zghyXT#~|rNO0mm1N+Icteg}!C6dL#$oxo zV0nqJHjBY4uZsiOM-0|lDRyD%Zh!$-*CUW?D#6)RK{k80ARBQft2tHEco5!yR*faIK2lUQBM$((}9rq-JN3V zgf+!H~ zC3R~_ncGW8>8FIMOxfU9m-!|fBp*7W(^0MOCI8jkK;l!J3*d$?Hf21)?5sDTMRKI$ z{dH|+n&-!Ls@haQ#!hbK@LM)r=`BvWDoJ=XvjAwl)dJ!GH=Tr?pi<(%Fdx$KV*+Q} z9Sk4K#DR%prs16-O1LmUu15)zuTqmAzt2)i2AEMP@kk}mj6-y%RcFKfO#1IM{e;Qi z+|dsa3#3_q9`+Va+;6D`xbcE6m1Cn!Q;Hi;Qi>ao$KOX(GSR~-!~9**tBW^kkU?d= zG3)!njm202jS=nrM(?AgSihMCFWngNx7Dq!cg1(d*aX7Zl?cuyl?X)3l<5RM4_49> zu$%)idDBiHCT_CzMGAWu!<04`tnISeXZYt-Ld&N0`-a<{^55y3vcYXOwOhcaE4;%T z-=uMh(n=TI^2=d*ptJ|G*pQpzcv}@mHK#a@1!ycq z&fr?Qxj0@LDh6m=z{>3O8qVM(o8(kAEL{5Ye?{XeO%73`y$$(U`0bR?uF6X@ z*-M3-IW{d-ZCa{E9-RwP5zEz1I>)Ee@8L?MR8AgJo|tHhi!1<=aClyz%^$#7?v1h4Z*gytK8Xj3XHFLn>~U;9 zLqVwRMYXfs09B$(X9RNjz|cQ$7KS=Qx{t#9D}slko?B`sc$|C5k0>kL6+HsUH&r4w zY^a0`tT$EdP}Q}jpUV1aRkhiqm-UE&Z>xlYFB}Zt$5d)FQ<{dVM0Yy?`yA1;rGr|x z6%TFrV78$z9xQHX?*%BsSfZB^zDbF;qK5uC2?SI^9hTMDM54wny0PWFvB{*Ru_ASt zfT#Nu4OzX;-X=Pc{`wcO@g7TV=VvZ4xuTq4Uv*9}@>h5E{~6H4%aX`H0`tAf?H^uv zZlxlT*uikAuUNTn(Ze9AOHA~;@A?cGzI>@=xG05QeQzTsB$9jy8m>&KRIl)RxF1Is zk!>C=KSFw-aKVQzBzy*}yz=~~+n-kKPj~wMvEh!Vag<}Edal+PuLjUXBP6xriYndz zn*7HqH+J|o4kgX^t!276D%=Xvnf$bGm8iKy$(~Zx*;hC>a6a} z8b74>xFRH+#`dvw-e|pOQl0toDbJ_wH{>lO7r&K!z<2xPH(cs|h4!a4Zol%UO-tbW zUg#aDU({0g-XWG^iMzBkYTZkurSSbSVJUn?QDZ5X62+1gm*UTxmSXOxsIV79cci@R z#n9r1$>cABTJp2p)N-ngE3?Bw#OtoTOkuix%UAM4kX_ksSO=~y7V2?FP(HJ}T!zgs zJhQ$XSr>$Y9eYk@<8HDG!iYZ;?u84-xGrY#I#B5RQW^LRfCM51uYD9KqSefiLUEgXn;Cozk4I@o09#I|D@(oqGSTQr8FQ9f_8{3dKj@_+jar2}sx|204uJa# zivm3WYt;uyo@2c+)!T5rQPtD6&HEx@seUZ%akg*=EQj5!y$s^Sw3yt+xy-eiAt5Hm z><-c|M67m%t1h_H(u!tzw1#hX~GS{b&6O6xqne5cT?f921w--?XGvpO+vkAePZyL+PnS_ z_N*xuNzzS|bhDe1UEa=$cRVGGSZe2|Rpb>-psw1c+3(k%cT0k2JZrjeoAp3o^}|)< z6gHO+I&*hMw*ADysYP3dwFopY&QHGNfzUfn-|)-o;UU5RdFa&NhpD_t^Q$4EUAI7&Fcb5N+4mAj88bbu>UTcVtM{ zO-{mZevKWSc`?e6tDi1=^9yt0#k%2s^uQx4$$OcaQ*B)A9!7{Zazw%bY~cuWgMIPL zqa1G^PTpOAmOI4)kE-2>2(OKgPOG~FWfz-LiDWjZ65*7r8XXcJ@m#PU1`10c+LrvP zr~eXrI%4|rK)uTw`dRn4dCU9X5-6T$MpJ~Wx~9hTgUJq}uZ1!uQ{=~F$3%z0W|t7c z<&q!ni&IkNZXqxhU~(iJ7Xb2O{r%5h_wFLH}W;X9`P zRH(x{!^B6)&r9Sb%9>7l)rj1QtFU|y)AKtQCt+GV+zgtm_lE#h(K&|$n!ak zRw-hbIH9GfI(Cw`sHb)I24*6_K30E=nU!Q|w1jX;p$52xG~Em4eaIr&%F!XH&0B)9 ztGlS)0*FyO`(JG)E?Aq8!ZiY}uF@^2aUdatlsDVnU4JGUh(s#X72n@_`fNJdZtrPl z@|^OSiQ`z55Yq|Q$a4)HmtZ06>6S+3o(G?TP%*P?^}!(wb+zyX*^z28m9(eF-*GlQ z{?riLSC>tT-?sx94TM{$^#hZ0T_j)aWb2h>OSNu1`>3w&>J9JQfbJQ7XD>2Osv(Vo zPyL_Cpn!5UpxESIc|p#{50gCy-?1BpxXV6fn{ytE6cH2{+K<uzp5Wkz{RCG$Of)GEmg>f%bg>Z<4o1VKIbXe(a(Puh9ZD!JS*iwqHl@pi>m9PR z=IGM;|<~Z?NDw@)V@?qlDTFx+;H}*@!N14J2?o`o@&U!tIxdCzgdE} ze1mP_I<@@{#D~{vPrZge@z$5u3X0e8C;MJ|P5g9^YTnhTd5>z|)u?$-?XFnUQl4aY zb;7M^Q}N^XUPU_)e&*VNv2~6&g>+H~thEZ8GObrKr|#xOuRd7&hq|lmAJ%%q1!){= z)s-*kXRTT-%WVNp8^p-+-AHBa7N&|CQb&3P;h?+LmdsXb57z2&WDhc2l+wBiuE93E zUg9n2HGpA`gN^bzm01j{lf;>zv3%aj_8@mQ}t7+d7p-_TCDkDJtO2iP5r=jy7a zZnNHwV}V0p6#RSELfoqKe<_X-vat7NQ!@0%xMe*J7oG;dwS#&Y%tpTkBS0eq-Ry_; zez8q6_!a~-inoTVVjvY8NDd`aB6ySA#y6jQnw7C^lm2Gg6TF%(A~NzB6y$9#)D62f z2!E>3|33sj%k5R#djt|6-HS~>9YcLBU9Iluiu;cBLg?yD^>^DenUY_^ziTB?aAM8R z9wbVFK)A3T3TZ(KFX~VTfrO*mEjx93B9pSTG1Ykw%F}Zec@eV&tA(lFTFEwZx*g_c47s3riGxFB zVZh1KPnBUX=11;Cjd0r@V?foAhF{@-OJE0)xW-llWVZVc9v{xx@4 z`loc8PyQNu;HBE@_(MLEw*KxuEw`p&?kGlDgX7V434uC|s}dqBx^l=pe-XtX_Pi<` z0fi#;oZT^!EO8p8OWb$fGibFlA+G88sKWX0aSEh>j1j@_8oxY0J0wbv;o?t zKtp{Ai__eu)@z?OI|Y^H0BD9OU+Gj>802Hty)ZM?_5dzN$^M{}ewqEQC2UM^h2Q$% zzE<5VnDiO#EWxvLk`K|7w!*6PROO}$vM&#W1`yT-sf3Vz=~`N)agsnuwc^(w8_*S0 zY4rtgXIeZFFg z%J4*~74~675$Ma2CyHb2OK7oFG!eaVWJE3Eh^z^c%cLFo@^XmUuzrNdO~gv1nf3P> zedqaPzb2Ern>998A_Qp)WoFH8xaK4ydUIkoYns3Fexy&s@MlO78O{4m_a?=!Bi)o3ImAQdW0e3O^UW=ZLn_r z5Rfz8i!1RBbhtnSsCij6nPVVW5zW*zgQ>Bmu3V7>lLP@_J!_WBLHk9`#LB;{H!xG^ z?Jt@wHZ8m@T%B=&(+|T~`+v{1+Tw50H6h*cTd;poBjb4GN4_~cPd2A~`7@p;_4evK zPhyg|sb&Vh1kj4PMrq@+Zci7_lVWW)1_pQKY-}B#2D*t}Sl>4r>uzRaozK;}BOAkm z5%?y|idSUlZ?H0%#PX-GvaX0R-mUVbp=0+pSlQ^Yy;&LBhMQ`zG7J`-U`bB3zo_{N ztjuH(;`K6F+2}wME3+)L8!P*H(Wub#oX8VmM#joy=L<71?)lN2Yf~~?$zK+j+K)Aw z7*`X}@NY6x`wUix2;{O15J)`VM5ZQdv6xz6jtWd|t(5-$E==wBVQSq4F4DFu7wIgr z2a`AL-enSlg1j|uf43bGlZN>jxNCc&ta>7BG8qOIf%~zu!J9G`(Vejftb7=Ac4HAz zfHM|RDY6JO@M~icb1f_a;%l`@)aOO9h$(iow*RM@%?5)w^>dvB+}>yxvvL@xR+l9`M!v-AEpmOe(2% zj%paK$aAiH-236Kd|h_q>W0U5PX+*K!ZAQq7b-8C~uK71S6H!yTC>T|d&RH3Z7$8IP%WMhcV3+pR&wxBh_gI*@C z*;E^gN4z%WW2`8*J=XY8hBH;PHhIL5<~gW|5=9RNHBq*Ej74xb)YWEue!uiY zwOx|#?F_NjWff7sm1~Fjp-!HXOtjQ{Hu-fl1WW-GldFALBC#+O#-JKEQFAX zbBdAfL1WLcUm}g2=A-ugR4nJ>BWUb`euBo%s|+;u{581B5s%e!#i*pX^w#C18g43=H&%{Z835*B6&&tJc&=zD72C!M?rddOt6 zYTEs5Uc0{Z6@rC!cHYRi8iL>UT>BI{ce3=QgVna9!XTL{3N6O%fJ=Fwg@hc?#E%~V z=^*@wk6}A>v7dm&p0MnO=A4iOQ zuTR!l67LpF8W2uPtR>#CF{3Txwiq*Hx($+Q^MHei@t50RjotEO)5sq3MmMtA<>tLm zsyleML$=v09)eqV&G>B0(H)>PtUZ#LkuRWwN7~9HJrDU}dY;$yN3%V(gS7LVkPL3c zB_2({2h*`B%(@kSw+HomDEAaR1Z#y+*;Am4V1w+BGl)B#ED%qHPPhgMBUm>`E36V6 zih27qcy0?MoDF+96tAtuby#gjYxUYg(R0AThS08%UW2=h0|O#;l|UZ-TSRihAM2hP{p~43ZU3Yp%F)b zv>pvWTb|Mnz`g~a4MlJp@EYTddD(v8I~j+0xSi{ohi`mCSB=}p&Q9ih&CmTr5@T}| z;Glp5fc`&GQHW#tZ-@gLgXn{Y(1ngSUy$GAYQXryKCm+fB+$jok^qGdwa%xK_f;}! z?=sS!BsrOF(OyPHKJjmCvv|2S!&^wRTl#4%CyrapANj4J`J4gPh2r9SqF%HpJ_oVl zc~|mVJ(`A@wt9Eqg8gB7W_^(>mUD8?6Lj-O+->+l5w<|{y3+TE{NpH4s0=Ck+MeRV ztEa)!XC8gS=m+Zw5fDhVXYVYPhWZ|_bv^#hY8&k)7^S{Zdin|3roQW)nj+o~=1qO5 zKH-&zvsH>d=6{r~;i-uG`XA|G!N>eP%|7Pu(c{d=3}&)fGYT)2*)DPk zFzE z^iy6qOkPgPB>}7O)Mf{C?b&-nd{3`;g8lQJ@Z3L7^b&!s+;$5ZsU(QZqBIdjc7H=D z91cs2l|1*ECtERA_KBm`Dsooc;*n=sPcnUk^HY}#AdZSP=>kQ&`aAD0*~!n7;X!-& zMW!X#*u`8lW3kuS?F>u~g~z?AgH?~8q5~nauvFyp``;-miDUidjwYXFn-dFEsy)HB z3@^AP5yi}uXj$Y5bua9P4uJ{X$xOedaxCfB`Y_8f^>A~&B{?LH4&-`LF1k_>EplU9lbt8POrP20I{D5&7y zvsO-*v$y1N?~sFE%*@BO4LDUUy(whteZkx0)p0LBFRjCfl01%7=&tJDOR~G z_E$2#{CIUG@P!y4WEjLJcD8>xw!gfy{fou+FE+RTxi;~Qz%#0RD2Z&A0@L|zeHVS- z-_#-@AqZ3L-gO!@YO9@^C0k)&SY@^z?mfBQ*`bO_gypo=OOAb|liP;M!dKZ>YTbe5 zBCdm+GUkAI|J||z;dPzWayt5(__7tBW+6yU>U>J`ph~9Lfdovs0t+G&v&9EIuXAb{ zDCP!bCrWE)@ql(GoPLY0&oV!PYaAp>ezovpN9}+xtQ{{-IVJMV$^u}t2xk$lox&M| zYoZ6te;Z4SXEl99T05kol&+K{O8euTXR%|Il!ys%@2Z0+p`Hhhs^m~58V~G< zh{8Npsh!Ezlw1-ab<7gyX7~vTSuZJOksvX%kFw&Jbfr}OU3-m5h~Mcq`=psEL^wJA zun_@ey+kNb3X+iGCKZg=qLmkgjwPWkLdU{R)SYKeOrf}%nQRB0$**Nwqk5^0ndiWA zd;f3l-#k}5yZM=&&7aGfKi3H!{x|dHKkIEFq5s$O&Oe(!CFl~|*8T*2_ueQT zuVpPOV37h;WWz}%ZN5u*ViAxhqWc>M_dn zT=HrB=h9D;NgpoD;t?FgtiykD6hlDlgo_G*Bxf&OJdu{2OjD`g(-fPBqvNS>n+o-X zD=9aMF$L07%Eqn{3@L%^44)s7ycCo@!3#V+Ez&9X~m4g zKeI&B`gt0lbT*KK#%b3RRgp+kZ{phZ;*4s{44ws#_`qJz9T~G)Dk`I#AFT_(VEKz{ zf)tlIG`JGa6-^Yl9*=ZhoDpSl!s&}>oWPRLfnfDEtRYfzMP2&EpC)ab?@VWXMmRak zHHh>i2IbXP3;`>1ygZy>D;Tk4Fp*UYei|PuCFCK=;^hPmt&k$-h~x}U#~i^^!@^g^ zHvobO)Cy;I$58q(OyK5n&(pDIu;fHL!41zOpG0#y)yDO~u0WO&ZWb0RL59|e6%uJ! zS>UU~9s1&-loh#w48^4ZEclhW*BJK$BE7F*3=A9(1wS;Xc_!1;y2OIpkzpG1V5waP zLgG$T9UkamVnT4^Tcye38{Ma8*9F6I?fN|84%W`tsK zU`|vZJ3+#CG!w$a0?Uyo5QG#rzR8gX3oOd4LE{$+jW41r4r?@GP1fpi-GZVz;9POjdr> zKxkpM(;Z?Kr{k<_d9y#ytxgay%dH+g-$seN`%15-bq(JYVHKXO)x*#LxKF;@paxj@ z*&_bC!cCbMs(5q*@s;=$B{7{q$=KK4)Ruh$-ForV_M@h@WeEIBqPM^%8E6kd!$k6B zt?oy6t}aw-KoYbt=7e;fXR`&L9LSVUFSw63lx15%2hy&)t=4p)q`1m#l*S?YaUYoT zDF8`rfRUXp5{m0m4mr9h#B3d+P)KeDB2^Y5QL2GblHzBWbp92vxjei zaCJ+&;#~_EzHsraGTOb32V@)Of#c}>;CqH32yf@Xx*uTCmAZH^`MpCmT75^>>i6Q^ z?e<-Fn&|HIG$cIMc_!46Fh?Xg?DhK@0>!%p>i1!U>rTm~A+z(PGVf&v(7L%IhF^0j z27%~Ad8Ca)tmVAIy>On1oeBAncFd8vnO zfTU{ok(Ym6z<~P3kO*J|7O*0KfC_*b*=i32lXAVsOL#o)ESj#du{x!}b_YBbA9a|I z_f==uSLk=zSDlT6TAgA!Hz0R_tb~BDaGlA2m%b4!>5^B9|7zSb_OvqUGf`Jx47OZ4 zf_@6Ny`)9?q8DYiB?)4{zAGZfM5J}QEeLLZW5y(wR0c%C{`Z`DC)B??Jr@sU8_Rnn zJ+aEOI+xum2l5PF4e(7vI@h87JckzGqoEiXraE}SZ?xK-p3fU%EA+>P7V?JXX-K{v z8am&hEw-qaF8HPHbdC}n?>L+M3_BU@&fj~X0|Dw9X1_PiKCgTBwdY^VmiW@1vwzS+ z*wtoVwX_|&(w*CZ`*L2wb{ImA^NwXhkWT4~ty@AH^l)QK^f$JI3Hdc|2@b#HTLLjypL?em|f5rJNf5J+FIjI!}#WFTrAiB#9apNs{Uo3C=}Th2N|Tf{fq*gCSyG z>AhN0zrq&Ci?IfB!-iLc1Q)63PfcNQGT(olopP#e4%!DPAn!5k;BFnZVODSf@w0fD zzd%hPF+{%bVmty22|H0fEk>`nY2bQLi>B}3e@Q$Sl2L&Vi(IKv@c^*ze36Bg-x1^( zf<8g`=tahfoPq&O(+te+HUs;S+%H`Neo1Qq>KwnMv}k(apMm7!k`>AAYLHy+B@=!z zLXlk11KXs?28*+-625p*C1hkN$2M)=C9v&-BsT~$Cp1W}uEkeZRcgz96JDj{-N-)Lr1ut|Wx4MojVS7>W` zJV_;KfYv}Ih+JWCZRzJqVqOUcGC|xe4K4T^?DOa4D;??e$=5k$;pv5=GtibUB;bkS ztB3A%q9QRVNJB#jPz6pgCGgvdOf!7e*eC6gVrP=$oYkLJtU3PuNDq)95@?B|zsI7zO*{C9&tVoJPGAi3$E}X>Kox zN!vG_V=lVWN%KO|fXFPt>dgm)f<5L_g+PkrHdm2dLfkUeU|d{)BJ{zS+^Guz#o?FS z>UQaD$N@=49=~)^FNsJj=gks}+AFGz`dq>3-J7=I9Q?9tUy8Lc1XRraSK?c}ps1>r z@~W4!hF9|{w79}{K-h(maVQg**Fr6MLFESlvxxAZiiU>dN8+}0yOjJkArBElx#m@y zw@Bq*G>s$sh8k}cE58{$bPur-a}U&pjjzE61pc7$Z@q#K9%{^^0m^{L;n79~w>+9f zoIFzYrrLGV7{csGWl-+FjC_}CPykxt_?=uW5nx3QOJ;$od>pY9xb<}K2s*sc{>Z`l4+);uUk1uDr@ZUS+-$IRa2}28HL>m4!B-*}>$$8ex|8h6ux@ z_#6x*BBC?BPwXwEof)X{j~)G!<{vBZ4HVV@#%0|4qUP3`Nemk-W{ieb@n^?g&5a36 zcdu)C8>_%6;|m0Bk7g_x?cU6inn4&KY=vgj$QBiohBcL);~#QsDz*d?*RUithZeQ3 zXFQwWH#R5U@kVx?wv3gsk)WRtHqGa;TJ@N<{bA*4VsH&B!f=;3$QD$`Zo5Eer9`8(d_RW5S9$_Pk9N%hZljpoj8}T{P zMOU9K6`OI)4Fk!h$9yG>`AW-}iQU+3^|$jeZ+XmuF}zwB^Ekp}KITzkL?k)4Pj>G< z=J9UM1TcDq8>vh$^G0LL!`<1KSBVz!nAd1u>wy)3>Zh|YugB+{H|p+_7?y6tiV~J+ zlGLOr7B1dkLH?&V=BC~;8g6u3>$#G*Eu2wp7+F?(E3bW}+laJH?Y8vu+;j=T1Q~5u z_8hmk9a@rb*wWQ>!f3T_El_Q+6g1s^YQRJjFAUpJ?SeNLY^G2*tZerU_o*c zOo;pcXX)|!Fs(KzCosR@8vt3(!6&s*nJ;8e?KQ$-u^mQ{{L{HhT-lIJxJc~wv0Op? zRQ~)o^5><(^WVsyU(BDkB}@7HpniMs_dBzr2qP4!@%)qdbL9x!*Aw3q^@yc$uQZ%^ zTC%_qJ0uj5f?>%9$1-MUC^DWrxs&81)0i7afaKJ|H=cXWCJx1#@`G!3aM7S7A4;bb z3mH#OQ%5|&23n1h8YC+sP)0=SJ$fme#Ja!q13*@Bmc=+JAE&xcMnRGGkJ&+!-NS2a zQ?9&lvf9dZ1TniLM-ksO0S-@a9z^}f#BHfWb(1$qmF;iB#b|#M&pm5}a$K{5Eq9bs z^+!}v--mhdR5CFQuklnixAIkL#_XuloJdfXCUvt(1f%@%c6*4sg=1eM@+E4`ah_yw zp$GGVT~WAuNw*TteT`*Rmk6LIyRp41BmiN^@-bt<=CV_L_&6WPQ|!sOlKn7DuxR=T zo=n9jI`iTmNLC*g1AW<^Y06Exp(quzOBlxy*nda*NnSq}Uq2r!$b;txi~4|*h%Jpj z`-Zba2h+d!OqN=k$vBsRFYk|5Knf^rPs=}sYm1aB!TUO!Rn$2iRm7!)i}*-7^Kp>- zdE82;+7>xdqG==I^dK~na{eIH&+c^qIGFzFSLoWA$h8hxi#UXfUEB4(N!R_c>+P%} zK?g`R>fB+iTSwCK>OAbM7=h4P8%U;l6!+>71oiqSU(fQN(`y!qAU{g6*HKLrUSbwD zx|?IjLgtTWW*E@;(R8uVr+wuN*i*qNrA?s{{$ULneCbR+UPVs8C4K2=2w)#fog*F} zU7SVtsn?fd!(k@ZHIr-yIMl%qySsX@&v6x*7oD|C|J6^CLfTiFF)(}4O{eHN>?QCt ztR`M`2lsHUvEWNr<$~jz!6oU%#^CJxRvpNhJg$OfI+1)5$e!2)KV3ScpM2;b+kH;< zvU*t4u$vnY%#{78;OD&yMC!;d(3=4g<<-OnEghieKsni38`>soTf8LS8*^xj~F_>xbwh8!uFV)bY=(b6`K;M^a` zD{Q9t1x%20+>VqGyu5$r^+ zOp$)kR?`Bq6UnJv2t^d@QnQN4vP8mX(E{VHLJ@zXfKE==up=1JheaiX1?u{;Xo5KE zE;Pa5MNmx>3?6_2-d>2b)f^DOv^a7gN2K+Emw-s8+NPZRHWPfl>C^qGzunXQsc-Le zI}C%HEX)Q^v@bbQ$Wi8WCOymFzP*6?vIT*$ zg?^XeHx*-z%`t`#W^@ri_uCV$+WhpoxV5Orr63__$-P`z_qy7Ovw1gF;wk=7hV zYq|qL-&^bl@Hi%>JCBRLeAIg6W>hw!SfmKS0m!BlIj|1V zD457+>rsgZ;kq6l@9MKv_?)dKpV4N3uEbokJGrn{Ydmmaa3SNulZWW~nvMk6JaPg9 ziel>&ROBP8NYKupjuScpXFwy+(p8u`g9^~-EtUZQR`ZIb1%R1-U5o)ey{;jtBl+c$ zdPq%h&5}B%hde&h_8GN}M3c>@0`U0-;UK$ULO?;33EyRXD#>R(lOltfO&`7UZ2mpr z4pB}Di0BLf5mL*;oN7^vbE>+<$?{o%*2%&uVQ82ldhVm=I31>lOpn81bs~!#*9wPd z_Xbip1Y+Afsn2EdmubC{+@0;Hn&PRnSl|({J}vptnH@qaXy(P_M_2tUJYtvp=zy<$ zT|F(H7tF)6iz=l~i?`leu1&*YGE#RuN`>hW&xTAu&Gg%$h zsw+dvC59D@W|d?Le|`*N?oO}ADWu^3&V=*hUC-&_i6B>ef%rmiAnd~4K!H$LW(GxR z4J$~L)0G|r6E)Cqu|=d3ItC&t5wY5vPg9^52Qn%VP?XJ>{APh=HZZ$*g2LyOWOofh zLWa{I+toFA2?+|F&L~8N^ggo!g@}L|$Otf?Mfw1^^cjHE7$G#K4#*r*{`L>i&2zr9 ziOK3DEnDap2z)uYv@=52*+kkhJs#Ub#LOivA+$SJx;CW&U2Rp2E-z_S;6=%AeM?tG z1A3XO5`g?RtO~G7kO_iqkPSpCqE#6}vzY87A;{1!iXg%e$w83}m@H^-g!~F72564- z7$k%;n=&RD6Y5|r6AH20jug^-E+;Z>XPK8X0+2>!y?{5!3mM)Bh2ohP&1BHJ$>kZf zZj$V_@O*7TUO}bLc|HsV-=Y`}TSR{mu710KH;bJ4*?ArcwvuruQIl*p!}YJen}a>B z!S9_xxRr%PAS1wl7J&>x4nPjI1EyvXc4oHrE#dZHhT@5p8LXiswed@aOQ`teq+GOIkZPNAkIE>DmQK{Ib>#AX&T5{SB=h2c;Wp*Sb8C zCnHJzR%|W!Q?l|cU7l~k)&fYDXXS5bd0Ms>%OTlDm-IOCyLh=sC_aGXlf9ySSq5C-H2s&$l9hR~EAHCqP?eBDG;h9>DjqGcD@Ix6cn+bP`o2*22d<8!yQ>q}h-z=qyh0T?!!p0P9w(~4I=8H`{`GAdDN@y1r{7VO= zr8Lo3dr*T+wf6=#?|n)1Ri^FV`Iau023}O}UIK3nKz_THOaca$asEy+IiJalfo!I| zPpnDuic;}MHQ?adN3_;)O^MjJrKPn16sYu8G zYn=|NB!_vRSrV({_l9Eew>QC}V}i8BrFN(%M{%zsFtlNlAkO_nIk~re+CE%^qz+JH zYR|?~%J__8w=xd}r>^K#lF$uF7<6Bppu$-@5Hde;w)e*JBeiGI42Pci->aouQqj5o zEWo;;VCrWCN}PFL$fI?Z+Iat&e}NE4I)~b`Z#{+*#xV%Zh$HEVBv6hEH65+RwBUBs z8s`9#%cdWrvVeWq=>+=%IYp@-4~HH#O6~Y@Q0RBFo*&2B%|TB^$t%vtXS+%=67#x( z^$21Jq5n1@$wV+Cp;X>8u?V%v0@33CpnqbiHG7}%^ep0Mqe0<;;>rkA<}AshZT}*_ zNlw93&Y$pXG?N9AVy+;UzLNk%aohJae$4Hz9?axzZCl&egofDI?C}nt?NYL*KHBRL z3Vu&CrNRWt%C@V#Z2?`9HUleIgyxx4#~BI0z0Sj0^F&cQnyJt76WiG(h)|!SkZmaC z;Nq#Zf)Gz&^hI$8O5qlUMDu|qE@7nNq?7QfWs|MkxHg@q;(h6E@JfHs(O8Xf3C^sB zwfn2B#tScfH4re8)opb|a_on8J~!Cn z9!C`?=$}-0geQK{HLt{)p$<=x8fC`p3}+5P*rRRUrV)_lC`OXVqf*r_CYiY;K{XJD zRG}m5mna97Vly0sPU^>McP3w|a&Q6`b6?En#>Yp9D^AJN%C;T|v@wugwm7FM zRfIIr6|4-I(|Ps2Y{<~>0Sz`>mMdy|h~%7c*!@b3L=-kN(*1W7+@csNp+{pPNSaBW z3p0^86C^2Sdf#GGj6M|;tSW$&pl*fwHqmxe^gd>@sw`aO1=}W3Q~Aw6)tSCdz;&iS z)%FCd5po6ADo?z#+M&38qQoQj=eKYQApc426mo@$W-8%WOynx^3IW|ck!u^LVMydU zC};ox3AU#;7&{7{&$b!?JBR^Vm}xSbEQ_t-4;xmyS*i`bJJkk}jgDjD(pFofd1sgq zHQS(c>lp?ea}9-5gtQyv$*EMA+jc{7G^7TKb8?A^1R1i+b?Zn#Vd<44Rv+P<$Kb~* z`?5<|5kDaV1svtH+L|}#D$J_-N$)ZIWw3OwBqzIeUB)`i8t;ZGPq9Um*qD5}dbbKLiy1gg{oi3JVdj)I1ek;GUezSTW_Ej^RJ_F8%lfndnnHBJ=B z_V#6S=EJao)RY$EbenO~;^G(77vG19s~GAR*D29C=@{w(J)z zYJXVN7#9xt&&2i%e2hLY3+k0t**OU6gsOMcfstu8mBT|~K^<8w7u2(8WO#fO)NiUC z6^$6`6J24J^tJ@`Alco-1w$=@y4sKP9*Ij7)GhN}5G5z1i5X#YEG7h;T2MFlm>IO7 zo(EDmd;*I@Ca4oO!I|dKLl$qLtq{!2A=@pKinha#_)shK%K46oTb2I8u z2BL@PXK5&ut6{mKR;B6+VNEwV&vrZK27Td1xaM3Dn_^oQ02rh}zS0&iI)`WXY9>8H zjDp(-m`N$GkPvP;yfoZ%$u`k z)~nK=r)~=G^*7b%*R(fY=qJB~CwJ~hq(3N+UV}fBl1%=R_c*J%<^_4c{2#M|c|X*m zC561XhH{Dd^Z@DF=8kFxBnVj2^>|S+ldlvt@GvP&Pyus~mQ^Y@&Zy$+avuHB)nE6k z@`c39@IwC~ec6VWmOJr`5yjc?vA{%F0XNx%%i@q{`wCzI0ijf;#df%ssp8rkTzAXy+Sb>g&2*oY*8 zIVGUT0=P)(OE!^Z`d|sG0`3CVtkwQ+3{>y3XMV$B*Mo=hr3Fg*GsQ!dqq z%j~G2D~@^ti9#*m;RS`o4hD$BawER@x>>0Sh-X84$(I~@-c7#DW<}&FC~B56?`V-S z&uDd0&g4uE3gAlZhJvuETX_`K)t(8Px9lQnO7JO&ntT;|j6)5%b6+rI>=iS(10s}*o#RKs9VUV7$Gu6u^goVljZ;$I z1qi*yl|c?0{Yj4Ga3|JP=Zs91wNVQMu$h3GcnYh0if7#0K&D3FR4_7*(ZMt<`G_8%T_^-pxzr} zbmxxnk)#}qA-;c25u)>K={3%y|=IYh$WlQX3IjukwJ+b^bs3_#s&(jMt`GR zE1;F5iJR$yhr5ZcLtz)>IdYpKH_9|fneVCB8o#+=fwOpwM zOx($+8TVQ8$jG>$Rn(c?J>-Sno$>;leHVlfck)yQqN>R77I`ff|ELBPS1Oy#_?}69 zHOJyAb3sUS*%$#RB3<_=k}si$2; zuB(UuEQ5G^t>>xwE&8OB-!gdJT#Kry6w{kBgI8Fq`cW}eOcT}GEh0at+PpK=E2CF$ z?Iv9UcyrEI>xVKflkwg%#)zUV48npHXFSUOOfDN0pc>l$ES3OXZ==5W^2bLB^KXYB8LN`4kLi`!b(W%{;I-BBmzbE5fDHE5RHH7x*(w2hQq|MtsQE(BU2q>vUa!#DQOw~~ky!Pi1<*>1I+*{vif znWxFx5jL^xPgJyKi=i&LV#JpKtcsXm1~K#5VdUH4njJ>Hf#U9M4-eW3%8+cwie->J zyi)!FX>yM$GqFN1F|=7+-Z<6+go~ch{3Zj=PW%PU3!On8wNW<@v*W(osKF zW;=$L3kGV@>aJ^46hu#oP7Vo2xswB`#s0U70unL=5nsk+BxJLr zvK}>*6I6bC%z{LEELy|~8gu3t3pNP?D6~u`GMK|-&sm2}Mu6tgQlBF;q=Rz{m?B)v ziDl31OuowE#v`wys+HP}$tTcoeC*Zhtrh2p56&DP&ZQLx5Xk%&%!gnf9-7YNYqX@w zqf}k>X`K>C}cRLADkU4ucNkFP^M=;lzBFX zoDL7xp8ep^K*rvs5)NeS-4-0A!oZc`Ito)rYv>z75mYeKT&B-ln3)bS*ntZ}wgcf0 z{x42DD6d-Bv5*^0O_{}{v??k|{WPsCDfs9@#}**V#QI6UE>Vv{-cmTXCqE;laV zWU7Ol`7eg=Xc8j2!?^b0=Et5_wvZJaEDWO)p%U8~A6m8mS!!otiY@An~7zkZF) zFv-c+Gxb{(HSp@TbVB(yjwvG%IP6Jk3zMXYarUs}|Eoq8X`$m77=M#y!8do{W@*>F zwD&)6%}`l=+LFJ>dwM>9{*nB7sqp+G`STCw&)br}%-{cT{(LciUMf6a%%A@{&!e2e zZ03F$U0ks%`zQpJVK!#1omPZ}Q9>8FI-TK#?>MMpvKAPuPrUcymDP__x}sIocb&@P z`KG?3hckyEv3U?W8W}ub2g%T}!gi5M45w=50#t8ehtVRW?J7vcC7KuqD9DBuSBX^p zm7?`QXu;#D_bT@8J5(Nimbv!JkYIHo7;!gcLtF=pQL!Cfj85`=JmTR5V!=5dTjWa99>jbYQ4{m4REwg~JdX z!pJof1F^NPW3a^Ru2?Z1t#qaKDh_FESNppp=dzs%Gb1@z*ws=9+St{$5KAz5m71mg zU$I9{mL?8qo^_yzae?`0yu94@j3`)$fyjPPVoj(f*Iv=*GwFN&uc!3};>v&~We}g8 z4u*>EFh+$Og6BS^#st7LOIz;<0=!YLA80SUQrg#xj{_8moa~^w`ez_{Ki#fqc%A z7j_0e>{ySX0qqTm@`F?!IwU(5ZdSyPA?z7i2&?n#5b#!K@|w1W6K(HU4QZP%b!SwX zcLwA|6k^}qQH2;OXsMF`l-d9xRB?;@50nFqTFu79J`2DEWvKn=d_QyIoKOf zZW`!hcEBUnp>d7L(85AGr5w+P`5x4rZ6bMwhu=+lOr@QRUTlS6!)S?5{W!z{$wgNH z#g4eL2-ox`-qYHRn2?8@)Kx&r^@zWYw1dBzUp>~YSqzR;nKpQFQKlbc@S0w+^#D2U z*Bw{j%w4!cPL|dh z5Ddc-YY3lKaN<{bH6-WM;^2}Ea`B?U-c%P2>bDRAX9&3MN5m-jBV`x-L>?`0q0Ipa zY4DmfYJ@`-_)R=%CrdwmR7_20*3dOpZs5)U?g2pi2}>L7S~`96k$zU*w!IP;6P7&( z&Y@Q9{y$@|In_23j~AaT{p`_DZ}QmL^bKZWlAN3jkFUC?N;(U2FSeS{A<9zn-$GXm z>^9tOn!K*(NQ-);b|*{#cECI>lVe(NOGOK-l`MLli0aVXUv=sxvD}r2TU2m)Fdnea ztu=5PRa-JkfG0cJ{%U^c+;wvdIracXFI`C~T9xHGh)uwDD{n93)%j&bbC!V>WVntn zHiM0_K(lx1$BVLXB=0-GH;Ed{cC-Xd(eTjuVkzFDsw?lBo5sj@Ds-fr2%!t#Qe1$e zOE6H;hTSKDunnWslbWN)02{s$UJST&H_55ATZZiGx8okSmtYHqCuTltC%iV)>DD^6 z(x(_oqeWZ{xfYVm>GM}ihog;b;+W^X5s4#WSjJ*W5eF?G3z5Z z{!2!(;g~(}Wa)j}3)covUM!KSXUM3VaK}TGU=`TWnfz{N{7&U?9@uKfzFQ2b9?08+ zmh)@8wwp1kpiRIjOo{;o?SXhHZ+BUwd_+a6YdE4pXce=cU%wt~P@>PaFCZ=pC3FX) z@Wuv)&OmdB7ZX;>q<~x;quw|G%wtD}SdK*@*VaI4v}UPkh~bUVpkihr^FzIh4y;M2 zN1PXkr>WHmz`Kt%qKF|2Tndtz#?-SS>cF-E$3w`3sUP2Kf^5?Tf_?BvWR!VOx1h*S zCL1y&UY2|?A>4=Ejm%2kbIJG=+^z@sa0Dwz5Ke1GaEd9w6)Cv-eGCId=5az8>@pzn( z%f27RgBmb57>;#Fa4_uhJ14|~Z@f?r9tb{@n8$4*H6iXT~N32-T6 zbaZr_asKEzw}0F_Zimx9PH4#JJ-5AxB0!~TtG2qWN`0sbrV%|32rxx}0YOv{AgBli z1h^cPx~D32dsK$os;ye?=lh*=t-ZFk96Jt9JUpzm_u6yK^_z44=I{OcP38`5B+72H zH`cY9`5Skj7qc0ItXGEE0(eJeypX{aq7T>RO_;!qsn}!xIkzT=9fy#bOwEQ;tg&2O z(lMpo_`WtK2X!PaAyH|Axp@Z4pu>`7hls|JPv*vnPjJ&-i!Q?5ML#gvg>jKFVC)xq z+`$iQr5m`g+?F8Ok*YzUjcGXpiZUK4iuOfjNHRrHwkzW@O)*7>);S)tUo#eLp}}Y_ zs$#`mbOj9P2y_KgXx$E}LcaQ0Ufl3Al{lc8NFuTm;=(^Jp+vOe?@PG+*iO9p=fida z%^i%~EfRq%RMm`jJf7}&L%Q+RbcP=xdCNwu)V3ip@;Zq`BCrjGt4L>h2niwDRs6c(MLL0><(=8J9a76Te+9M=K)X zTbzeya73EkBj3N(G!P4W9(UxJs zIjy0Yc{CxjcaouvsR5Bfs1s|OOK}*L1EX7p-DMt1VgWB-ibET&n2Tmn9E_P2DD})h zMC{;7K+gV#*w2X?7ZO0)I#ax4@Wna>IkLhV+$wfQiWnuxOgrRhu$WdvL41 z8PDXK@#Bp7()-X2`%;*XcB zC9&^!iY-A4enE~T5OoX05!2agab5gECRu*$au9|hG~vAhIMU~{@=1RTo+l^`HG55a zPVdQhMq@mbC(QJ@O?ynk(CkT0`DN0C0#&BVDD!?4$gZrqmL#?ym5Q>7%XlNfV^p1n zjh(|7RwYpmm}hVRit1_t}(k8QZCH_(8#^%M5YeoJF5t-a> zJ}h)wf}cjqTu`sF{m21s&e$ZY3f&C@r!|zYfwMLnmGo%UmQ%@)Xp{HCXz})Dzr$!- z;IoqMBeq#k$rY)r&Q;FSkybOd3bhkkHI0s}R79HBu|H$zji}pj0o(B3th1>tB#Fib zsUq9dQ1dHgs}NnBD5_{_0bOiWpopyokRXsjnWYs#)sPD$4+vDMNelBd7kt`G(r3CCt)T)d$ zmIhsD5_zpk=}@4A99WvwYWN~Yoyt_Uv2*e=m7b#(gltV(EH}w4=o4;Gpwp*I6{#c` ziW!$0Q*Nj&Cq@}}=STRcpEsr7DJp4_e}`mXZ!t8q+ya?DoeW-aGVUCdP_$zR>@GpgQ!V#R$pI2dVem+ScCDxuL7O zr*~tr>F|Sx4j$OQ?}7VYao@c+@7%F>&-QKi?0)&(FWYt3OYgkn_SVHnhKRD(Cz*wmNjMkJ+)W83ybh7w`AE)ZyqrSxYi$-a$ zAW7RlL#i5H-EzNlT*tQarp-OCW1iEgklknpslB1I;rPuc_2YXj*RV(8S1H#vCnLFL z`#Qlbzu}K*wIoBiEUX+2p_)Y&U?faQeuF(g#rN&#+y5I6w#y%F_rVxRitHE3IN;rQ z0$@^gAT6#U%{@+n5zxI9E9mdw=k}i0=H%Xo8Ly{F%^sTcH&rh^CS72MP%w@9TWTteX5TYOLj-^kA^|6 zkh57Ihtc({O4|9W!~EHk$niK*4fg}@~sDT^#oGfo-$M`B#s3;m~2?WDd?9Ejzb?^y)h+% z?ytp1Wtfz}4EaKEhU7Z^_5GRW=asbT?)e)^cW~@f9&ZV^X{+AkG8@Ued#fSv)l5id zR2@iuli7^^M1TC1XpRni?Qg%W#KH5A*4lJ@SlBj_43O5-y&fPg83!V2 z*J9=gwu}wzpJ3l$3JJGB3Sh_mbn>@Ye>h4>-nBSHlgkI3;iG zMQRbv?@a!cHkyM|SO6Y=Gr^8JGww~A>Xw1BEICU7OB$=6w~QrCOkt$?%0MsikByXhT~!DIWCVij zBo4r%UDeCFShQ(AFrzK1^OX+3o#X0Fw`3z7Y^^C{h?7z>XticTppdS^@yZeA^fS;3 zooq`-=?BzGmq4*zWQ}h?CHIuaK$A7YW^qpl7g=mhqV{5+QipUkiad8xpGi*|*Kdmk zj`Pm>Jk9~tVX@CQcgkWe=>Zn|6kTbtN4VMyr0@Jbpeu`gF}`e(mo4iVogmzn4*Mm` z`Abf>t%V#?p<{em7(b9Lc2`_%jvNk)-Bn-gIySP{s{r75N?jrNGZCO^U57Y;lfMRH zn$w=cNQc`mx!7Ymrs(##%q)9VyH?|zlhs4ZJYrkRP`hECcmE1ONJlunGjxBpeZ6H~ zPzNmYxp-Z4KlANg=JWKEEptb{%u9Iuyv&^$zGx3$;6W|(DM7|}4isgm9LNPZy{OMD z^Rn7ZasVcaZ4fz$G(vZ}qp4GdH>J+xZqb!xUgWA}UeI@bpVyUTp5dyOO(J|u>mG+& z@|ROs`8&!!K(;cc z=0yDDNY7`%&!vXPRG}oJVV$)%c)aVOwI-M$Uer+EaI`W6a2oocu0&%D=*sV_`W=j4 z(G=i9(6z%&I<*7Icqbl-8uZu z4OmCY9xDOa0;~lD4J2F8%I9yw`A_QfY_WBsWF$iUPjgxs7SSP+-$E|j)xxP@VUnVK zZYgr*zjRwkVp1=`U%N^4y}@EBWyJbs@^lX_lEBN?dCEF;7KRMF%%4k#zKU^^!*%5# z<18D77rY?l^q>R>hJh)P007{TAIxvC4YIxGF9c(d6X2DKuOTpSUh*KJv#Q^hv3tc( ztKD8)i5iSsYXiWKVfwq)gkkte4qz!p#BM1Q0D&;zNowmST#!l0T$Vf8q)$ zMyE55U{y)dR`&y!z1=mS?FY#!0okTN> zGMz-vq=?X%X@v0#bUx>Ycwr4lMrB5zWRr~u>mKT7gGaE}%3hqA52yza5a`(o8$Q4q z?m{QY%q-S{nB5V9f|(@)N#I3L0gBJkVgMBIG%n0Xww;djNd-t3ui?Rd?LGG!?;KO+ zb;!?on%tzNl7cRO(+4+&h1pO)FJM3zAC$#QFVw!PTPTLDysiJw%XO8Dh0FEX7K@D% zH@*}Em(hrly#fK#ak~RE=iW({zxM%+WjvgI8lwC-qA~rO;vugbSL$|ZIYFs3SLs;J zBP(G9V)nAg!+}%tkY zqOk&AhrZ5>VA&T9m9krptFex&^k$NQw)JcE=V|SOVD(C&svh9#ALQ6|$ymqrV4v%1 zJjs<)QUmx36%S=-J8PvD)xw6EP=JPh?o>5oIfICni_(G@pbS42w6FFCK35?jjw} z?JJIljC33eOM9=$!^+0YTz-MI79F>Z#6{9#OqLzwgyd>bUYSc*;^)>z~AktBgHU@KP&qFn~(#i!wA%Pf(2*^;htiQ;mxFmZ{LC-~duWlNEl zEeBpURp4dBy5-e*NvuFBk!`zz#G?NBM15V~c=i)<3NT}C2O<>BuznbNuwea;1;A61 zWB7Ui|Q=YP=Fk{?GgA&?PJid+?>Dapz~!b0K> z)&pDQ6p@lig|g6@{vtBjvmqH)QXOBf-V-D`sq};=w z{5$*g9uBgL-(h4yy0%TvejQ$N7uHYktny&_%uRqpWb@LE)qOhtZg2h3ed>B|z3Y9| zy|Ghz_$l=uSI+6!!3kaY;pYkBxv6?5kFi|=@Xl}nLtsg?kxubRU0J&ct|3Zfe`Q>s zdE%6QgK9DR7X9HGp*8~1Wyv=;1gXP^DJa5AlTSMtFE>J<;9br<*H7)ol^sRh78ZsI zofd3$7?^yDP`%B{MI(y3ozze;-BK3sY$rsFNzUTPWY?B42@O*ZDfF6^R`L}lIN=m3 zvX6FxF-o+zKR@7&98IWe+(TlIsRelVsV*Y%Lq91c@iuDhd=!$6^6_0Eo+DJ+v)|*- zJ>St!QQxSHYJXW4ul4P!jI~GUu`eBWbx{kI9&|^w?n6s}evnfrsX`!5 zw*{Jz2UYF12?WJ5gfn|7Vm2_=Jcm!2a7GP=L8c%5%eRxco9xMnV7X_nz>UvZ0<1p^ z=`B3F^2{(Ps`<0Md&nc3V0!J>_$jJ1_NAx1pXi*tvNj?B>&bhZgQi5_zVvix;-qrI zQ%kIPy864nnrJN5_SZ?f>(G=Yf7=;>#I2h@wu4a8&B_&%>0a7ZNAl+_%uJV7!)bh6 z$?l^>S(|cs9uw+2pAUgertUlxxVD3ts)4{F$I!K7)VnnnBk2G+g0XL;VmjG(q7%E&diDxb-#Abydx3O+LE8rJ|o)K zvE80CNa^m(jA{ej|99Z5d-v+=z#K8_8X;ps;Y-49>wo0tHkqUbRDH${L?R3kuYqTXD))CSOvoa$yZkDp0QC(zEED zanT_bY}!Y9p&Jeb&Q`LERgEjj0*1AGT`yTpagAcx7A*+7ywr^ug#Z`kaUG!TwG!np zB>@iw$k`0T_`JBK*1aQCV4cryb7Mz_n4Xm`)uIL}d9(wKZvB0WAHO6@Sht_*R!ivrFjAczL5DtE7Zguy04+zJn5rIl?V>)&v_j8dMQ zMCp>YKRv=m=NpAIZ4IKj&~qIp@?j_;{lgn)muno$<`j24H&kXDGP5&~rH0j1De}0A zJt1FD$*GKwm6WCi`(_8VtWa>kn=+&BO0eh=Xiw(F+npx>pT0U+ZGCU>9RUTK zQmxD0B-#i8g2McSP0&VWwO)5lY1Vv6m>1BKB?k9*WVpYD(d`5<&44#SS9PZ|u$7yz zCG3Lsd>6n=@YO0v0*b0!W-{@P_wVefXvfn`{&LK=<;TQzy1;VyO5+1<3iEXRCia;g z?xu}){8bN!Qy;H>0#K{XV>gPWaivlV%V6LrYCqRw(12Lx}#0Zj0!Nr$=yqh}yt_2tqD z^MlxdlT(u;(TuFa*eG9prHFUM)yJEq)$ds&v`%l+0E4@SSc%sZAVHW5ki^q9f+X5e zHTVIlt#WKp1H6v=P;|!J4g#QDlb=o>)#S0-Ru6eY9I{JG3t*3sY{_bm8fu(gcm~#G z4Un-_J0Zs)XaJF81dLhI*;U`;wY|{j*iT2*ZxL0H`>R*E|2vWT+vvck5OJUa4yb5h zw_wt0O8pB-@EPlB#)>PZ4*@5(UYa`GT&E5!T23Eseynz@cwTMiT}VD5l~e?RdJ%;+ z$tlFn>IRWXVhvR@Vm;b`X*kjbV@ES>*vcebhBi!Y7j|@}Nu&*s>LP~Z(5xX29!P*J zz*)qQotGjG8vG6o-eTWl+0fbH8Y#SA(^JLGEZZ04fdSUZ12SH)e^mp00OE{36rXO_ z)6YU50&e)E0%HayL(Ra=kf97&h%4(x_~>TivZGiViiB!QI@Fo{sJfb}YQ?q%H_`f0 zHu3R~6GtL%(oo-iw1162Du2&CQY$dDRYP#J^e{Wb?%{UGA>w(Wp+kxiJZqCIAzA7} zGG=2KXYaK4-UBwF%?b+|qP}uFMK_X)O zyR0}sCT9bBwLDwOIg-m}fZR!ko*`nUXMgXPleD#N0+2mNUS!9=QJndAGwW(EzhqC_ z)`G5{eoIf+jSsYW6vN6p)_cY-SN3K49X(SpI_%ZLV9LSsE-zsmgnXLZ^&sSiWqG@Tw>4GLGSmO{<;G!%vIev2GVZ(jAh@qV=;<7wf6%j=^C^%!x@S*Rgrmkt z9=}MK4LK6EEzx!%SqFY(d&yot$6tEoy=zZ9nxE;Bh1$QYeWqO=;t=sEk8avcjN-4K zcyl7J=(GJ}R}(PXTdGIQQpdJCef#_LCnS^G#lI|@_mqFk^=zS~F}A2v`6aIwTt6QA`pNp^owzs2=Eloy zI{{P>bt!u;T~N>`aj+DiP2LMj+gpb6K3P8;TL zke5PT@2d3R@oAF-qBFIPNqkWF6dXQ;L&=wbv?u&%dsv!P*M8?mfSL%l(qvTm1xvk3 zy2d2!knPZ>n&wzeyTtJwIt%a=qY>);7ly$_qr2!8b*>)zRVP;2wC@oe_C}M2h5dPM zT+@YJVPU^^{VyzFvanyft`^p-s0sr0(}%o$^KG9Fphg5jQKfuux~S~eUxH3i?XbL- zMu4bg9Y4~D9ciZ=MxE&x+9)yKCOSlFV`}OoP*|08>Fh+g)+2*=SG7k-r}9JLU*6XV zNquzGoBsK`@aDHbPW}K@mpU@4Y;h?B%jrht>gY!Dbt4vA?yBeoM#5*M7yRCtj;ud1 z9CO9Qj50By6f3*_>Iny#5F|o{JF< zuU+~K3E;SPyO|{kQ$e2%F*~?ZHWrn4<%YdNVhoqW0-@yd;~&X4u5$tdfa@&qU3HF8 zt#b_LZ(K3QxXc2xN}cJrsf1&^nCO#Dlp&pn7y#~#D}RcVbaRynq?1$SNEPnK;Fi2M z%Y<7)h$eYOXL^diL5>i{O-q0w4THZDFiglk0z7-l^GzV|>qpBjd(?>(B{}37TxO3` za*iYr!}5PfyqQy%uuU>VYzla=`~`hF>z9b<3FSEE$WKrUl{n)CHCW2QD?qVScA#_v zC*588p59w+=go^esZ}Fcg;_qYet0#>C-K$aEoWpL|Uy z9OM1xlM-S6nX3O!1YJafovoM%&sDUGI*`g#tc#{kwrJiySJBMK7)bOKfd(R}oqI}u zQhBiS9)G61Po;)9dXjJHDL*s8Up6E)-M`j>&B?=lt0(^y8A^een~z}Q?@T9Yoq_fZ zWMoR!wm|PCQsesL>-#vdCD=pHw+Kc*lI}H_;Wd`}Hi*f{ZNsR~w@9Lfdm%FGyi6d! zAV7k=CmD`zXq%8l1x~DV6d4xk4R8GyMG}}sWox_IDG5tc%KX)UkF(DK^zr}tBrT2X z}sI%o(QlzoxVt8X#{=A#C1JyWhc?`zSoMRbD9<6QR6pdToP}}lz z)h&EUJ8a1?7R;8@H~+tn)!)4}J@y!9pl;=XmM4A&Ye(|P4&Blb$=hG6lX{K32LD=; zzHeUdr0<*Sk-is%w_#Bf!Xe?CZgABm))L0WY!3_8rVB|0st1y-n=I5I)bs5^o9iNp zJH|WICQ>ITmnn2_b)n{hE+)7xqLr+&d(wdzu|+vUo>t!D{l^tZ%aem`YoBD(wPAIK zlk-%36RBbGO~Xy?^`@b=8s<6jpgPmh{9TZE9gC^AoLKXeB2~1D#t3W(b2!>2@;a^_ z*qz|LxIf4b1K5DC^w!5vI}|9wbJ%+VMJ$!=Q#o0DBmBu=*S9!5j*#Z`Ez^YwqSWfl zMs(q>%E>^B^btclNnMa5YE1KPJ_YisK8SM)0xjbTZ!s;&Yd`>%!YaB(^}FVmJc#6p+5(=yj%n+Ka^FiC3X*S`qohLMGHs4}5iZ>0Em_ z9UVD;s{P^8Pe`2HU7BpyLFpZmHThw@9nP>p7(ie%lTG1DieWG&zCW&t>s>iAB;Vd5 zHHNn~-D5p^o~l0n6Gn0}`d4^f!y^^Vh9lC}sCQ0;p{^8bty@S2G~i1qI_v>(;Q5+J z$bKV2;JP=F7)(t&1`?*}8tOCJ_yge7WsD@=WJBO!*feZPi5_g8NKuCykWsCnj~(5% zVv!B#$1w5n0O$ml;3GD*%qaIq8$lxwaJmDlj_{Y&NcIEgk-l$kO?zn=J`odHj>NjCG99L{6l)|x^>@nqL$wMPP?M@7qhLq)Wg#xao7|qv#q8n_)^p%MrJG{A&CR)ZGb}IG-^fz z*=p0XvOI(g15C4OQ`%h`ugKtJOEJ?X5I6fk1bak$kkz*~}SO*;W zQ0ezEWXsLJusyHqAyaQByjj=6^kwE+Il+UuHW+K^){)91 zN|c8d$Q_4kXzh62PbqX$h7LYsQ5JHe%SgZ3T`Fm`I8zUmei9H_!SWNy@8P>K*`zvZ z8G%qkLz2fntIIpVwb=jH>Ay^wfG}e#plCVWAG7*a6{sx$V%x8+9eNlrLl+Kdz#xeR zEF>uu0W{3^lj-+t0;Js7(q2DLNgk{A)fDDqQpjg|O9dKI3_5OeHJ z5c4POTyW&zb1!a1cckVd|1Hm=c&>HC$f970o-K<4CX!``T^fA5W1c(s$vcZ_e9y52 z+U3*u1o|!_e&L%=eaOlLxvB-`aASz+25j8UPWgPImk0s?c-qti19^EUf!4lmr6f!N zc|F0rQWEs9mDC4t4^jakiRl>3_Va=%U-2J0Wb8QvbG+^BYSqLJ=Xh#|*fZLhoOHZt zTA_+o*;TaI8$T@`Vj9s=pb-dIkw%aVN-1#(UEOKN6IMggV~*e#)nftRHo9xJ^I8sM z5n(}GedsaYPdlV!Qhm9BretIyu$#+mEwiVqz|G&krnvch@gVvAA3kmx11>k6Fn+s1 z+4Ay$DaSw_u(*HloQ8Y>eq{&6y(CZS_%)qQf&RR<`uGohI)lUZidb2LyaVBoydxIp z$c(z26L@k)pX{S%zU`1tHz4zvVHHFvOC)%`E%(T?jnoxWWirty=I&(Tke8NsOC)N$ zlt^U329apCQ6#c^Qi6+y9AqN8=z<(PeKrRGW^0Nqlkn>tJs*Le@0z-&_*lno3n+dEIOxNs&m>^C( zLcSXunW88a#})j6c7v)DGW?;wTEP>Xz09WknE2{mub~j`Ph^}FL1}}nY z#%J&VH2H(IyFoJ73s8MGpw9c?9-*|f#VQNP4nR^|hEP?!88sVJzo?4)XBlsl9G}t9 zLm4z=#R25s+a&}mk*J|OmP1P6ST2vnj#4~`4z+RO4sL*VF#(Eyuv85ikF}|ALa2Gm z2{%$3^lJl(#TOETXp;iay)%pOA+9g8h6~^7|Ins_<){T)`htL0>=j!EGHWfOJ^3T7 z7wA?PWY`kF$Sw;Ziv)?endT#ivw!ZgVbc1=m_u}FKpv401~utix|wa3poxwOl$;!& zUqO)WZ{a@;R4ltUY}swKN_v-+H0;Wi;OK7)Vba}_h|)!z4wY&P{8}cIY^=#U9By+S z{MHK=%P1EC_+LP4a)_37w;Z71kW#v&cpB+iO#of!)2JzYcV6q$!k3qrb$;I zTU>H30FgPc9$`HRJwm&uiJTH{t9h$eSGJ|qDb$@IqQ?HY+X*?!BWgN@d_-ec35OI7 z3q;8wsU0SR{cAXrP=j_*(wModZy<@nYrH;SS*<&Hk7qUTMzAceuoB9&R(y_ih0PJv z+Zd-OWG~{HwtK_GY3HJ`H?SeZ9m5pr&b0Gp&6UedhdC3i6-GIPf&-^RdAC}NNq+KA zOr?{E!R#_^kPTF=wG$%pF19(iEbd@Rgx!W&s)jFY9A<7O)C+R)v&YlB9<4=XtwLOa zyt_(ar%)^K=~GsyRdg_$e%<$;{O%2z|FV*bitQo)>cNtJZauC6>r=6!Yo~$~+IkRk zkN|QTqSRZ()pHEewcIgGCt1D8A71zSCI|GI(~u{x^;`%C8@GZAV65E=u3g$_ipc0` z^5hFeLu9Ln_=zx;K3&4QVqqNRyywdrN4kvhPR(3`GrcdFV9O$k$%sdlN_OqC1ePzk z?NUSAdt=)RYWt^dyQH>rZoAmfc5iHZUTweZwu@@}MYWZPgLx97;VQF`y+z4lMP)%o z2evT^YGyE%5KBwd6bS`&FwRJXOA}{Q<)zJJw6VdE`?fM7C^O);+*ZxO8wv#tvKnFD zrdUX#c^UH&tj$EBhe-<38G$8voTC*i9+wQ(?Xc8vbPZB!r);klpvJbOKlsJRWCWK7 zuJr2S{r@N4HzB7ZWem8}cn}q0u>tgAU|46I5Ebovi>~3l;h+PZZOaRpNi3MbC>|0M zh|9N#jlj}jU5JHM3V}o&Ng*LZTa;N-^ClK0FXjOJK{Rn*_u~)ir1<4qvm>QLjCBB6 z;z_$tBSa0{hAg9kp>dv~q$S)46A5d|OT-oCQWkv~>QPt?1U7sPUe8#LMZaoKe!V+( z`^sJ+km4?kpzxDhvG&6HintRk?DZh&g36iU8jL~44`GxUn3%2*GQ%h-jp80YoQC^W z<}n`+?TA<`q!GF8w^7&|LEi7PW#H8o|DNcA?oUzi&- z2xcO|55%&%sC}VyFOdg6xcT$UYLGwVoMGc-MFOp01|x>@2~ z$!~#)%?tneI47UY=0sMvhFu>y2G+OC*`~mO%-I%py4(bw9Z+v1qJS_cXXb(Bc;_26 zK>^z?3Rm+KEbR7NWhg^`Mi~{m$_EZ7&QOJ8sbWG1wVcGsq%Y^&1U7OWOo&NjaT>#6 z8fK6f6=-ZmK`zy}6KSZtPLEn4PG{X_G$bLcdeZ}yQpkz0QhNzyR`2q8kXC8XWTT!T zAB*h4qA9!&PLm5XERMNi*bYIHJtfJDj3q6Y>3^OPk%_=LYVlY>mmOg`Obi5{@*xo8 z)5@BBh~NMM=b{71HT96XY+ORIikho|%m9|rS_x#vr3}bOm(Vp-Pr=jQq`pbS=drr_ zR>>10uDAl0CNU6QnRl>hC3$@>STJe?Cuk505VnEupz}zXaG*`V2DZZ*qABieA5J;_ ztUikMX1ioTs$GGc7b!=mCl>73CYRbRLF!tf3|`SNdxOkGWQiy#M4wb#J0u!)>W!Tx~hUAOSlpratAfE6mKoq1=Woy#l|_UW){20@}~C2p0cYmY6CBWWL#C3vDTZ*o6AX!7#`KWOetjv zF$l69%-7wxwm$(w3ItO%T*` zLuGnn>NPd;t@9UC+*5@u&9M^en6_-Ol&OF!V8BF zjw-v+$S}fjvos84F}q@qu7s%`=VOnSh9@*(kmBgwIh*$}zulS7Ds37FiR9BH9Sd#} zA>mkzlh|E)N1%b@d0Po}8`6{Yw&QM_mH&bUgh=DdBcFkM?#P$C&uAw;aurB-nHUJn z*YL=9QRn!Y599sUbobz12Z67C_ayJnc8ysfyGHoQIlUn>kiHRP>%|~9ix=b@$(GD3 zW`#LpWAZiaMxobAMYt^ex>^*PyuRmt@-K^q6*KY1VaDC7H2VRu9w^_!u za#f<(MOu*xf?38ziQ&iQVrKs<5-IRPNInqnNWR!5sy(3k-!$WFpN(3=^p=?aPOgs( z7>fRtrXmy2fHawr$aW}f%QuwKn3C=S$89Re6MUtSOHdlR4=zSjJujQiUv_4bqu>PI z6z7T$sb>kElrW3NqNTj;`B0Qx0)BsAaP;vQ6@g)>3tIt=>oQmQM(Lm{cHk=09@%J6 zU&Pl69mondu&tsoFYCy)WSrpgc&xwxJ!Lm2;6M|qHJjTmd8#RUpNygYDJKZk7N4x_ zeBnI$LILIA-y344+LJHP5)+uTDg$+TTRsDn9!4k_m(0j4Pe@=I2LPmU!}S|3Dbh>L zE$C^OR+$`i#K;G;+8Abx; znZq2eCe}uY8ArLbrcb>wJw*-|7QzMT589c!g8p&}rBnh3MI{=_gF^(wI!9?0%^Wh6 zNpZ8b)!s9cSw0-q0Ms$C#!Nah*O?T^!r>(f6RCyqnI6G*vcUOV8;BP0DREpk^Z#a@ z*LCdCS_hHgByU(x1m%ZR-<<2Pt}%IF1Wk6`;uN{5#Wg3kVDKbqo8ju*y2?Y<4(DmN zL&K;X6>9@@aGZ8LDv=mL4p8DfDMweOXFimdV=mVa*ahMCV-Sio+D2~vNz4oH8pp%gr<`~ zNxcFer#?@S#zg2jkDPCQM6n9t5t``{vI1v-R)Od|aiRH%W!jNoNgPC3f#>vJ(QrtX z^44H&(rMqBEoYmX>Dk3ZV5-Lfsz+Do76q4Y8EEnk?TDy(4O*a zrPkvWapF7(*0HSeyE7gG`+ls1RWh({K(Uej|SC711>W4QQ9K>hoB z0jf0uHPzY#)EU}_PKPb2I|7auez zlxRL^Y~Izf_#lcunBo`A`7fhRI8%x7E|3dm%yZtaXYIV3u$!?m!23Nbt;yTK={@D8 zwauYvOg9V4B%TMLQct4wyFedJ>o=<@n!{n4O;soh5vOXttvFTge1a^L2NHQycqI~Q{OF!u>P)eF>2xyme=A3L8t7FUKN(hAg5 z4~Qvn3|a?Es=T&9T-ZW%agqG0&-O$kXPw&nDPA;;RgbJS_b{N;Z_eD}aJ7zfkW8kI zbj1Ly(#N}~vA3r@6mp;HuohQ&Pz=6jlUA7|!{${UrhixC$tJU5Lo9a`N9A5F8ivpp zuwLj0ley~K!~Ppr`C><$%mL2stYfWu(R8i?8P$WBdrVM_&425tKGO6MBUO|mDc)FK zA4gkWCi6ROed6_eR}y&I@!ItF@EAQI5!i)tWUcit)L&no`&}DuqrM*I$P*)0`%VDK zxcBl=z!^U(pq7uu>PSrU$)mL{Y*9cye$$>HJY7phfQSdGY|C z=hc<=ct`q9j)VI{rT%qQVDvbT4-^fD`O61*+9aYQ9pE_!8}ah7gS9R@1D>_oiTDrU zNC!UWPpoc0&otdMhPJT=isvsnn6yO!K&w72pL(e3ri<) zO0TFq65O%5nn{;Fag=6NJ6po7PtdoIrIysKpBIOhRPT87_ zlrM(E<05}?x94daIxrbHw;^Ee1>auH&`(1?zae7oa#Js&Nf%8&FKnO!C%e&)19Bax zvt4zd-u4x|z@3pcEp!!$rhz&~+t7)DI@=Y1I`7*T-FMth=ei)A}b`RmAi-5DkCZUX8oZ9^vpYOXr~bO9K{ zJ@2Nx8kE+LHvlz5_Hzc*g$vBoh6vPbPZLmcv<;mYsF|Js)LGv?@4h2Yb3GBLg{EEv z&ROo_i&1@2u8X<~R4wL`J8J~7w+`aczcO#V8Hl~jAksE;5*n}OKW zi$O&1I>$l^uthECd3VtO*fe&KZ0;I3;0*alCbt%?jAgU54V@UU>5T!fdEZ`i-x09c zjS;Z3O}z-1g^dDlexpgkIX~V2lp@SCpnmg%Z;mHmLBP&z@=>S7Z9Yv~QYDYxg{cLZuGi9qRys>WUf&NK=4vk0l_9FIyXIY%X69om`vb(gsIT+>s# zOJCMWRt@6zlTLFtWb+1LF~g!vB3#o=daR%BceTtXwa0&4y**C-yz(!G$Rsg=mPM6T zDM}-Y7}6&E9$2%=ut1KzH7)?Ywg|3JrGuu&9^u?>qy+=QEU2bg7Oz0no}jY!ih?iV zSZSi)XS6)ixKXmTXb>o8$T{}ljkWG&mbO`6n$gV>Ykm8S|7KXbIm6nfUW~7V04?x# zAIv6%H-C)#H)xnzJfY^P~XT3G|w3~fUvMv^mI0$_8#eXa=D%$5k)d{Zw12J0jP zn^W2)@@VL_ufR!8<2=lOYQWs78=8PRL)*}afjWId0BXj!&${n8TW4;FK+QJwB5-DI zKvU6)FY}?tY&;ppH9y2i&hr3RcY{TbB|kXF-!`4m)?a9#_{Q{& zL1X5fOT5#k%v|)NQfHu7H()L6(q>t?p(sIWI|$H=JhUo62M9STVzhj&9qB5MW3lA% zp&QqGJOH7N25!V=dE&-^AY;0+Pe!<=c?BKXA1uagX09iMvPJ z;FLn4_}3uDOsLOw|orhQ>gazoNS!GYn#sW*5Zy= zkLkQ2nJAbvK&Vhw#@KAlB7}z%0u@-9ussA6@5ECI{2FY;%Uq8eN!& ztoAeOyIOoYBxp$;Du+M8whxtv^3$@P*#2OtkJgr=SdrbKmg9Q$pE3K(z=7CYDA&ra zj+&1t28beFOhlfJT;mTYkxZLTc(teWmz-Yx=}F#(dV|57D0pL@Et2Z6V9PzDgCKi0 z2!m$GbX&)GGQNY~PLjS>FrEzT)J_aL?eD$}yKax1ad2mmGrHR$+J+9ia|d>Yoi^gz z@MgVR zofxpy2-vW1pK#w%IvTt=0yfgrivSzGxppgs32qLcjd4v+xRVCZR=AbT*?Yc0N%w5d zR+~Ca2W}B)>cpU}+#Kd?$hXJbcLZ(VmI&JLEo%oT(1vay-13$H*eKU@%w049c9B~d zV88KU0Lu*GD@|Rds{w-W1akB(0kFYa1@EYTjDW3XfDPTc9$dDQo7$k38g*ZaDhdNJ39wHRX?1^LJh`*h?Mw5qx=d!kz8Qr^=O z-gpxQVRq7YCL8_p5?{xwLb|bPzr>h4!eV+K-*BeQ=o*EY*?rKIw(53CkKZl_qr-zm z8CI^2hflrZyI+0xGt2*SD`DY>8^5ot`F)x1>CoZh>CpfBog_Wrs>sNq4~m(#Cr?qw zw0djuPO1cQ6xY+GN0N8xBWIl5ocsypY5O?_v*^pO_Ec{U${mb(0l3q1`$WPP{C9ev zdhLlK^5Q=HN|MtU`ni2rQq+9O4bRh%TAPGJ2Y2^{LenLhF01K9nyOJbHd={|pzHhk z#Vl51gGy{Lu)qGoReBlRPotszp_k0FJ{%jteD?RVEa}MpHV+dP{TL&_bNco(>1vtA&Sn09cOffPfqJ)Gk*At|4#3xc!te3vjxazcyvxp=V=rL ze#158hvsk5F=`hDm~;N`PMPkf}H=0AbEd&=WSGWdp$=uB1VA19759@b2lJ$%Gi zGlMs6st)%2=RV_;*LcPS{qvgf z$rpac$?VAKS2{0>`e`kMkjG)8+DU{bk87V#u>I6{OZxG*5cedxc${1KOY!Sr@jAe6 zAlDrUn)t}pYSm?rSgMHw=$FEg13|g$ag>+i6bky}nS;kUO4R(Ze=AH;(UYBlF~ThC z4DrH2x>`Ocq`Tn1Vu-M?$Ph0Q>p*<5Dg5$5>@sS;>V_)^6^hLX;u$MDRByUU)4@YD z9Xdobtaj3sWgOjmA#ttV6 zy`1DT`;~sQPbPK61$8=Jr_}!$9?#c}?}LYkV0b3yhtv7%&K#1lE&Es9DtemE@N}0r zBens~o;V~cqc`~s&)FoWow*PJIIC-voGwiUAr4owDS_tfq2tL1yfsBsu7(tXt$Ae2 zdZbWhlXSs>G$DSjE15unqptXaz1p<9;h98JTQEQvn2!UWyP|;`)1Esu0-`wv{=sWZ zlQ`YudC{aC%om&&aK_p5g*;%^3l4dPM{{Tm+QGBBUUmanJi979nmv3x`Or1SBaP|# z@HX%$fcb*2hNR~7oarSVL+E57jN)9Rle;y~wUQ zDa5=35wq5poX*MpY%cl!hh7|7Xrfb`-@|Dac#@{;;;$s{qWG(}MRc57Pq+mF!^26U zygT8oLOujpJS-M$6<~v;3-6=&K1X@R(rI1xt*CSwQq3g7HZ#ZZMI>guLOwg>9_H?t z!oaHG3p@)~8XW+BES5$i3CI?Mqnp5zvuEp?V zw`6*d#QMB!-U;+ePLAY5oPk-t+IPiXRW}J}SEP5aEUlm(Dn% zrTX|U^gm7mwP3n5N%UC)QcVuM&0wimJ*3JcojmWtFd+(U%yu6y%6R?bME&E`gU8dA z&yOW+DM@8oy%{8l_;H?J2fyH7{TmL@$_U0FX`jQo0?MGS;D>^9HTuO0c|Uwkm+2Nn z$0|^n)NfqR=>BTBzrQl8Zv&=3tKOIcsO|r3jd@V?N2F$mpJgDIQhketI&c&}a!>O2 zZZ@1XBdOxJGr-N-z3;R4O~FRV3^9 z(otPSc@ONboY2M}r8Q17Ol|+z-rr;8{Gw=)H5oQVDub@U zM#Kak>?rW3E0BL+rdOjtBpmm7}$ZDRm9pxi0{Ca5jU493!(XML7r>^mg3;O3Z z<5MsEjBD4%teE4iK;ozc+LMk1zL>J449d;&+qjFY|0C)@xl1M?3 zU}-HvwrHZQWT>sKi*IX@%POb(+@i(S9cDOo97#yW zeU>AqPIX$OU1u!Q8q30(u}m|TKXj+(G!|vvGnR!gmOxY9oUA(iOal=G z;D}j?z9FC}#P|ls=iR~Mc}uwThD+6A!Y#oATGKt$6+6vN`mZ$XkFl?)mn)wYRlaom z7LkX$OaHh(t-Pwb`Sn;~`>MCRo~)qW^o_5oCa>>LJWnx33X4}vCcuKUA-%l-9N1Jy zUWnPqMyi>Y2h;SLXzTIafOm2?8bv+4XyBe8a4RDC@4rQO(dI1f>7HUVh`Sx4ZRo(( zoq>B|+>>vQ(=YOS2>lznC)%4wn|jeIj@*Ozdk<)>=wPntxVr%CI$)~>z?Q#5sB)Gq zJ+P+mLks}Zna-#+2KBVdDjB48s;y$G1mJpyl-zv-ACZUE^bx1^kCu2w%v zSW%WXwz8*~(BrvU_2cTqAYI%O!;1C_(oz2yL0Zi~8rr+I7lAaqS0D}Ub%Nc9A8!Ea z0=F`t-t(bCF57Zbcj-kxu1*Zpg}ni&Ro@==-w4#jy_r|x-X@>`XLT=%;Jsl1hq$K0 z_i9`XfGu$=1MI&|7NWy0+*^!N@@y^padl$AmhKIJt@!p(_!}^5`Q8ZFYEv(orL#%mDnnS8v!&#%Y)RRGk4yeLsitf^Lh>>dv7-Q3?>c?i1qGzK>jaYsF{ zmc7ZDEqTTi1UF<%85;)g6EY178%UYqX~^Z!eOgd9!hLpI4bvp_6+&+Y?mM0is~%m5 zBOkfX^+K~=WrMkvxbTVV9Tz_FqT&Li*(*9xh5&+*KY3q5G&@nlYnowA?h;C!xlbv%$VvCrwgf+IjOXtQ z@%%Feb<}@F9y8zUY(sx?@T7D19j__c(o}4Q--{9b6R)7fsaG7wE1Z|#7`nesBpbNg z@clJ;H#bzbs_23{AE9mNz}*ksABg0bZ%@!~&VEPkk3{lBQ!ipEeMg`M_C=tE_pKe8z!}2K zI3edP>1>8~@59^D`FOSK z^~$|oPk)TQ=W8LvNK z)lTV(zj8v?Ed;;nDpBx}L#2=LIh2Ee?{nxh8AbR?xpxws`7;cY8fgy}1E_gFwh>9R z1s&LSh{cz#tL~n;i{7U1YEXAi?5Z=jhBTk?T}7?Z-JYav=)ipB#IC^HPWkp3`px<6 z$z73;Og8l*h)?ZeYIZSn7~K@t^o+Xz>^j#Sds!W@h0}%1kP|O!0&JYNp%Vi(_Obxj zgl|u|@5tcBUlsv7)zpiCnS7bRn|PVMkEi{315hJ(*MVwC@EN_ks1#zB`y@YUplPvxhr}>F!pCA13W+b316ruN?`eSXR`ZC zms9S;7!k6H?H|<@o;0E>;~P565fy|g9G2h+%i$&I?W%I!R_)WApm2^H+z)4qd9Y&# z?X4H9Q)?b3I)x+B_Ey^-~OdHCU{fA`Pc`?-&Pb8ewj=wLbT z;AiN7hg6@+1A@k;w?A6T2Bz5@K8%2k30rEKk}|07Ok~zi)Xml*rQ=X(M9U|CNg-`R ztHn?iMWP67c})6mr5SDu{wnyoSN5JarbzF*OM^#xOWQf(Wh<_CJ`uF>4_MD1>vrIN zJ>w&N2SF<>K_r>vBjN||NEP#sr#NN*PQeoGeiK7FJ{(2AZW>czGd zyh@vcWo4e%X=N$ja|CY>*V!W~3%DCJV3UNfG_~HYDIo@}M*_dX&iZfGb>djR>URn; zCKQ@4Ul0#v2!XDrO-q3Dk=hpbr}})%xBWq#E*45jodao4i1v=Oc|=({3Z`Meu3e~? zkLl(1HjW<4Di?}WsMdot3K}mdZzPZn6+H}rf1a}$v0A)0Y;M9NAI?uN%GtrZUKz?I z5`>h;_)Q$ua=?rXna(e2OV>|vNtcft7br}oE($pmJUHhzCsP#PjUi1{jz-K93KYzj z6Z{eR$5%XkitGoipyQi!lNG^hJ()Xw1$X)hH4hE;CNh(nAjb$()%izp zA_YjO^{9-b&9SP})B(fTY1$sDQ~r4C8`ER(+_h1z5jGDu!bg;APY-T8B3ip+NGsk( zqS8c_HQ-R)AkVeCtMcQTf}bRZp_+V*-Lt2Bc0T~He~4$>-@iEo+Ge4G@}riz9SO*G zS=Gkoy3EyiA-&1p_ViczvJ9J4A%_-WvL=oRtFpSE-I5JnHWFqAuGhli7BI@&t%y}a zb*xfCNtoSQBUaIu02Wpm2?JJvgtfb?wMMLpO@&oPKw*^{HDHx{Bdkx$-iTGw30UMN ztm27?0#1+#SOrUY53|3!G;_>ILP1LqS%E^eU1`e`=_;GJ{Mhl@Zbd}?2&RW!kJonT zRL=EKP;bsrkR{$--BtaO*9!~VtF;0$D&|iJMeF)>6_8P3!u!dfY(z%EA9jUsqZW~o z(Ci-KO*3Q+@!8J}88NaHkda7+&DH8_0tH?@2!2UTTC?GR$|u8ic871&5}=M@O4ss5ZvZiV%@I-)B_|E|hl`BFqz(Bo=2 zn&lN<6a`0yuoy`Qt9B1}3}ECBfF(DDL4-d5q7aH`U`HE|9ki!*!oCh?7CxU6v5>#K zf{KXMBC|TX#d1Dd+$2=9^m#zoUAlNQ(7}lQ@Thjs|F1R)Fs<)N@iz%yM_EZ8H%*Y} z0LMx`!l@$WF0Q_#y9hH1m|=zxibHz^)r3~;khN_ThfLn1BUbC7%>{b3B`v+`P-$X+ z5TOWqW@!DLqBe~gWZ&Ms(;5TdFr!4cqc66Mm+^BZB87pD+a- zJt&S_31wi!kb4nsID(jknqpe$J(qB}b^55K926t2QH&Q`>1|aO)z*!baz)HLEVFb4 zCA?i~E?`aCCvh(1dUS@GW}K&L+o1#F4)P&A%~XDO>9dEdg|$DWT^ILTNJ=h@%z;f> zsZZu3%p#9Q4oa!3s%s*A)>|OJ2#feEfV)i&xHg7zx;(K4JjVFU32Y=NN_2+Uq$>pb zQ+2RoU{VHlLLkq2KuTjRAy|08IB zw^ISyWPU`({>gwAP6Yrj#*G)GG6YcrZCFj>8<_03^Ez!Tata-buhTkulW%OadNgrR zlAI;x$@<5s`p4OW^;mp%r4ZySh#WsI^6T1I{BaQ@w6GJpvL7rVq4SzZF(}32i*OZV z@u$@mLN=%Sc`W{dzL80;cbY|kXfYOl2VlP! zUN$I4hPQ`wod587hFK|Y(=YUnc=1A;@~*EJ(be@F}nY!m|m z*$O4RDBvb~`h6${VQNoRd!9lUYI$>z6Py@^h-a6uPl(t!4jBWVNXuS39Q@kR#!H?dV8E>0<(X5=f6E<=u%_scmN&hVvZ<`*% z^m6C5H{O2XdN zJa!uW>`SQ~5I&ye*R{-O^rgI$IT+EEl)fQdVTOac0;QPIFrSOe=(K)=8BObc&Wz^t zZGpF8i}2sUmuAf9+)Hb{#aZZ$v!H2xDbw1pN{QvyV)M0=*5Bq|dnE4JQYimGr-eTt zXEhrZ)vWJybzSG;NI%ey_BS{em-(6zwgw;G#r~oX&)u%jHgv!f*suEoVH>FF_K1Ik zI(d2fGmA`ZZ7+g$2n88qUCqlp?8gDQ?pR#nRt8jq5AQ-#cj>YpS0@H)sTP1*@$I4T zHvqLE8Vl!NoP7x|NyamkOX z69cw*djM?Nw+H+;F5}Ye5wMH57rol*h2_89r$Vgwfz@n0(Xlvi$Jz&WwXEEJ?RnD| zuXo<`#TSn^dGUvkKY~dl2BF?|JiS=2|2BL_(U<5a#w@V}(ac8Pbli6u;vhrhV{=?_##dXgi`bAP02-n2)5WN$D}W3IJjQ0ZIHIIA6)DqK`9R7h_00!Hhg ze1JCi)wV;bGxK!j`t$C=A1M=o{7to+t7?{+63NdA?ne2TN~naPF595&;25VKs0*)N#0!GPnJ(5Hjil4Ky_e!J8Su=!%3#m|?iftUZyl6HZ{%R*AvM9dNb3CS z;5C%U!wn#}qHptL=0)t37qK&V5#5Scqy1&hw)CM#ZSrEtF7HH~&0Tm8WlqE@lLnda z~bl~ufNeZ)qe%Q~wtCKKJ5*9Xg-RT%NWzXoR&#-W0%wI0mTr~{!Y3FhTsRHv}Zt#Y4vrI2DLk5b3f z)j^_xPHKz|>L7nWRjMUp^f@h*230j)Jll8Fwj}?vi(8N-`?g0PJl;=OL$G_-ZQn}J z%-N($b=lV%EU@MfB?&c)k$5k9M%L-D6Vnb5#2aTFAtarR!)nZ1`%KIh@! zT9^=sG;zoAcm@^G`m!3H+T9^(V%R@N^1W3T^Rjmj)XOzMVH{K%G+Md$#!3Rs~ z_HFjGegB|rxsC%-iO&_0M+7AKbF{P~cGWFUVXX>Qsv+T4Mtc==FkwQDp)-1|PH77p zS0^0HkWv>&M!w7~&m)9CIbTIxnG&la%>r_P41UgxhpyI=%vmuvE{9s}Clp~9;8 zdRxeh7(9^tjYLhQU~982;0ag!0P@YTOuo@p8I6*PW-7zXOHaSa+a;%;wQZb!ueihv zFHQ2NUWCd+8s*h2L(#3f-ZJ2vXWM}P67CT+M!^bg8Ix3e-l2g!(yyQ>2XlxVF8{LgdwyWd zD9~_)^sQB~TJ!hI4>3Q;%IM)GfXA87kv+ewaTATF%rU*zprilUxF+uli*9rQZnyy384O1lO zyjimi8TV{QWdJk~8TO3FT5p}+=5f-Op^^#+kZX9INoNi?ve;GBs#*`#kgFjB@069f z0U<82c}!yKnl_uf$po`un)p%$LMK$|or-3WiEvOjxV>MVnTXz7r6)|^eA z4NW%?{j79AB#50ot}U-t#bS7Ec`>{;Rd-`NYLTET>yub#98#rQ!n540mvXdOww9)? z3VL(0UZ5}Yg~>0Rq5dSXf^W2Wc$T-s!z0jY9p+UpaZ+|nS!BD~UNMkS6ZB6N;r#PE z_c?h++ovmRp6fDW{u!g~i;^*K>)A^xNpEd?EcQV+Ql~)>RdK{g!uiq!d>uA%=R?(tz3dPLH z6L!RuyM;s9Kl(2|<1B;>y;{mGDxssVD$iW(>fbiWr(?OO__#xRitYa0?Hrae>R};r zC&xM3XWY>=qYXn*v)!fN?gPyMYr;YL1tV)Z?mGoTtSFun&Y8Aq=Toxm4VA4bv`lIy z&%{@bg|3*v9}uoeQh8skEsV%#Q5n33VPNHnt0L#z3@2yN+qqsRr0650|E%80lFj+u zC|SB{s;K(NQL~lDUyt;Roi**`lcni&1D`hgUKgL*(r(405BvhLi1UJW!BEcl?<}x> z9uW4F&o#ozdO)zSXmOio0)~VsC5kAVZ^!iMH(d3wYPrXv{|%DGZQ&tu|Fbs@Yp8H2 zJB9o_cT|3-^TZs(4fY-r<1z5iALPi zb$km~R%nc?9van^b|bp4bC63Y+8lMYw=ADb2UT1-OoEIbQ2XerAfYD_fYx-tV2l99u=`}&w<8#|raBxp_KCL9r$Z6x@uFUd+>l~d z-d!qb4>>yO)l52l-@sHl6zbJtq7O0KHC6S z%_pRdzJ9I}!)BOmscs8e#@3W0syVtu%hH-IZ0GZ&Ml!ukBN+kXPH#++lX-FLE*_~B zRJiQT16oE-YkZD$U`XmIxG>6+p=I z5ikQ80S2@PB;e9I0I4xfZ|iT-b#@z~P-{B2MP_34j7q-+LuEyLzcn4-B19GYujE=o zC1?$mpfyzLT0=#czO`DZGIK!$sz?NC*Vghb80Vlqx(mA?ZGBY8ueaZXM15muGQ(B1 zq^5NpGtlFPWSVn*p8;eY^A~W0fTHbmKBFC%E^_E!5nL9?cf!Er(PGgwHf;0SA{f2S zZ!w0k9-EbB1(I($p8RTeBpM)Y%U(ih z{C$)&EBRe}0$8Ch+tSwLpL$}qMxOTmww^yR{Xt+G-a%hX=r1Bnqouy&H~9(I z*XBlqYLhG;@Uv9yPX4irFWT+Glt^ZUqREF%=g(>LcO%diz5j4qR zk1dPV7WNoHP)1@l1G<-20gPvDvQ4Tv?MRlKP-d#3IoO<$I+H`S-JJQ(OgknwW5bKI z<77rV0vm%ZHc$@Jc;2TK(7QU5U$e@j9V&X&4zX?AcTdNCH{H1JCO3tNpXF+qK#Zs; zNq7O+NZXmHP74(h(0cp{^9@v-7DRhxyHluwtZ`EP5*hrskePF6y*uY6Td+ILP}rT~ zev&ORnL`h07mEm_h>Rm524VBNUAD`!cHZs8S~cu;y-!qtezJC!Wn}b_3`2jGWoYNg z9`erAM6yS5&2%T{bE#&yu$70lNlcpGWW2Y8v*~#9*^EA#vW~ke)}1Ya#uy(?7ff%! z^YLx94cdz|PQ!`qHG(L{wXm>0hSe2k`D|=tNiU3c9CphJAZH5i z>a=?>mr)0jexyN3EA-S8dm4>Dr;-QMllzcVC#f_J>15u=cmmc3LMyWPCdR?KNycM( z$jR*E5F}tg*4s)m5nO`oVzZ&mS+hZk#Ca|^W2VUHYENvo7{^BXCxVEYEo|@aGzC>M zXsZn26Pl6ghxH zKzM|VYws**G&+YozRy2McxirK#MKz9`RnhHam`%}h{g>dJrzNQwAWO?#ZVrX?E_}H1;Eb^}P#F0S{Ea{Ln`tGHB(vdc@{8(LR`c$vCc?=14x1oBC*c0wi2WpK?-KW2el|;YLnjYYsKMK z6`&mzE!qHm5Lgx`RU+9^Yw{6G7@2%UoPUryg#i+@B$0~5SRxIwl7r>~09-&nS{U$ad-sa^CG)K$!bPxTnk!u&=7e&21uu$~!D9p08 z#6L5cRzwoUlxgTu( zM4OjIilF&WEa~`atP}0cE=1v3StPz~BrZ!B)R$GrVxaM6W-^}j3qwTG^@Z)S+=l$Qd7c{z&zV$d zx#1u2S&T@VUWI*S%UTAgiIv^(895Q!YaHc$A1mnPmKF2- z_y)ZkOw{0*6Z^TD){z}opqER#-33->dig~_u#zKC(94Y_5xfX08!Uncpn#`wq1Z$A z$Q9Uboj3@eK_j2sT-V5_b1}8@JT&sUeUwGmSX=~g7vPu7)0Vu~OxkwsiALm7Tx z)bRbyiWSRd@zQn`tE})^5q2cKD_*&ed*y_JSFvs^j4(>}l$LeuDN=4^D1=8b2NY;9 z2Yl`;SwqA%sQ$tJ#vTW=pWIv;_M~OWnTGB7U&7 z+9DDp{ZnE>aitzWdN2}-c^6g+EDRE9?G1ydFx#s)iKek61etE}1BE4#g^GtopLtuv zUG{`TPu5q4u@T1Em7K|!295NI5Urrt5%{dVyTRYMB>6OKEa|*YJeja7!p2ft&-N2E z!JaiJ(Faa7rG4{6+&AKYf`~ch?WD@_-ndJQo9r5RJOo;rWs5wH{HfL|*sFq%mzN=w zR3K{Dg`tZfYy;T<^R;M) zn-$6>lJ@{y?Lz^r&4@vx%7{%T+^=GJLFPD-} z8(>pg?OBa$E9HnklZ_)bjyJ+!5JmGpk-R3U01d0YA6$}c|ri*Kf^7Hz~cHTdXl%4WYOS+OO;9w*7oXpXjn$&kY< zrW1I_uYsY_=R^~~S%)7ZugUvl`3JKZHj!*3)(X{I&y2!S zV7fme(>;Ma9;OhKcTH_7_TC$OAi?lYN1BVCI14szYyC#JSx)_CI>7KB-fok&4sy{9 z$KIQa|Eh(7Rb%$X5Z4@4GaiuDD!l30ZT-f2ZA1Y-b0a=H?^A2BkV~2N#I&&UqXFdx zu3!SZoY)RuyBe`w^MrTqX$V8+RJ#fx5Mr1KfpC+wl9sBr6>~yiUpOlCc?s`1kQ(Fv z&)&O0*>#{)7bQM z%gAn;rg7PE5_J%y2o5MjBt>Fk=V7P>lL+D@6)=eaC#fV3sEGkV38paJqz5VZ#R=VY>hNfbnd+Z03vNj3Ib zU_}&glBKFCK%Zs`2setu2#&b1>=mePxH=H*ZeYXqC{;`bffB`h{- z(MXOV2E{_;D%hzP)Nl3RUf=z}Vm5ppte33_vrAJyF z=}vO~H69rbLF~k-^vKE&h$-oTzyfxCSNV?mkq;$*yuMi>w}u3PaM|mzAN*q~oO|i9 z4vp%un4-6^6$7=IIKRC-Q9sr}P;M3udu|Gt!jkG?(I`qQ*hYlH ztW9iIAJL}oN?Xcd$|?lGRrqdF+6sOmsY)Uydk_V>m772I-vq_ee%aTw7Ay&MNB@am zjXfu)YOZ%}3ujq-U+agaSu5H+S*+}9&D)1CtFNJ=^1b*~&zdE!ZGB!pamQM^DiE!k z$Ym^)OPrNNA^pLJf9uE4>4BeyYvO)>Ma`e`?<&M-e>~&uk7xYyl|tU~ z*)HJW1*1OfL?GH=eC?s~>WD3z#0A+a7I;7H+euuIqRk{YWWbs(?hbt zg>;nqBX2dAz&D9a?am-=KoNu#{Y4o6dP8p=*@KE*<6_hnNyYIm%U-z{m20qR>QA^B z$=ONfxsy9s&IQ3S%yX}Ko@7jzspU!9tqHnhk|t5R;aI^LdoE1Mi~V^%2u3Fa+3z8h zN#2Z|7-S89>Fl|r{7BOpt%h`k!Dczz4Hhi#Q62gNwh>MwUPUZyH|P(UGMSv(gA$~C z=nIkdu_QK-OFu+(mLifNXt5;Ecid1O`6Q33GJ2{U8I!lDPuN86nI&z_2Vg6?o}8N@ z=0GK-B$+ca2{WZ6nfE8q^j_Yw$sLIiFnFa;lNRkGeI6>(7pYs;n@{A=(T6#%!64H+ zpIe*q`O@~y>3+G7?w6r^3CBeDr}So`J5zt6d%Rytr-SZScjuHhsV>pI)`YxAe(N4h^Ulj&X?W;7!`DYuk<=1Incv5@j4 z{kAbly(2VcPrwP$T7qUPSLWzga_^nI($O&yyXb!T$_t@;^iiTh(f!C8x&yQd1PeW{N3ib~Hz}?uuqC&!!PfgBSfMbhL=mj;b7XZ@IYNnG-nsvnSy`0#iSA6wd~Z_DqF#1vEg4421qp=@Z|z(sR8xzE`z#i`b&@CH zD9a!&Yw?tKmGrq{UTEOdhOr$gq23UAWX8(l@i;xqd!g0C4=mlN_xU~&Kg2qg1)*H2 zTi78_K4Md;F_8f+p-QNHERUL}yUKLc1ntX{c#@9T^{1$&EsT7M_%wK=6tE8nSW z+s$jo;>xwxSW&`F+RKW8Y+BS)2}q|G~mL}&8^1|Y@qN8fW)q1S5juwC-jvDRx= zKy#F>rz&(RBK(?J$K#~T* z+{>GHtan5CjN&8!1iznh=6~6GXGJ;l@AlY}ePgdoV;|I5d}8d~Nsav_k6rE?dqo=i zfX2SZW0y7dK94=oH+DxFd%woM+hb2?>@PC5p~@ZgmjXxkN`VV4SaA#BijSnYlgyJW z+Xdd=WfdLrODF0wW#m^_J=-Ip30l_3-|#XZc{I#E@@YKsESRMXx0jTeyULx^M@4h94k0I|KM62! zZtH{tQzdF}cDsW{yhn+aoSpFtO9UuA1Nk9@j^LvjRrizBCB&sboZRR+^QOVNMe&RDm^-D>Sz3b`i`p4L=;J#to_2AI$ zx-V_lJBgkwlSVVO#J)jjAUe?%2Vi{a2eXJiI?VE`W2PdM!uQAFrP z`#v3Mmf?pyCPXL?iG4W<*VjI_YvEeiZ44%vp;Gd;?VPB024Vk^2BJN! z7}VMDLjx;RZNuWX^A40 z0^BYe4qddTF5kcdQI&5T+tUYcxxa9&`zM@7`37rVl+QX$8Q1*$mK)1w-CVX_xaB(L z`($mwM=$R!_~_a#$mYbZzV)`BR?@OECw5(kIpGX)cA2OVZYhpqPd>3+fF(Dl(p2r_ zBy5HJoGdbN4tQY56;aYWKAs-`QbD_b~z)-KDfoXCiMY z?}EO2#mCqWPP*Y*l!;T(BNB^$^{$6ao!ArYWLiz2w`9Ar}1A-3KSU_jg@2GL-{wgor>&_Yhd~1DNl5S4U+#- z4tu#QZO8EBTNrM?X8Qa`_ssqiE>pbJ&bZ=j({rN{HgHnt*Qo4dL?KZctRF%^jzc=C zg>`~g`DNkf0pPZ-pBuxsqTBk}xJ#Gy2OqvurXYXYH6*)J6O~8k<9XBP!zk@x7{l5cPCOI!A!bo|k$@ClW*DG)Yf-Fu<)ffJAe?C(D za{i1P#oV(mou1vruRxI3bXNut(o#~n_liwCYf&orvUp#A;rrkst< zeOZeKlQ+X5BzXvAwkZ?Jg+{R_q?n}gVY@oegwowgNy~9Iv^@Y~iZ6a~^iWv-@|yL^#h-s& zq4X7~=__MLzjDovVik2Lr7?QGL1z((6OVh#HPH$l5^ z|u6={jiHNLC6rVJGo)CR%`w%R5N>1X-TH5{GFp`nY zw&6t>dQ2>;CQB1)R_UG{d6jTaL7)3tchMlh{4W6|@2g9VwSB>a^4m@pA08<_Rgu5= zg}xTrHA}T!yh-dCUHB5Ww)u?GB=xb5PYz7KwQVhd-a2Tm=b>}+07w%3i3fnjj3rZ(aX=a&8rHlxe$AI`yC8XCe zVhq)Y8%M_`jLv>V?D|)*>-o2$`fX?W?b>Eb>DKJEB9Yv?nred9>{kop@q;ho4(ehp=Sj-0lw=5<@b_%l{`S#5dRe`=V}86Eejl9U zqMe>!e8T_E6emaWerPy%uKSe0xW$7L`&z%-H8HWDkIEQkRya~TlG~%;bjCs6Q;8jm zwC2V(mK_dKPraV{ZZ&#ck9#(_yjxRmgZR}`?$p*v=u=(BgGcl^DV(7YZs0yrDcq`l zr>o%y8lfxG%WYSqc{d;Xjl;mE#Lt1Ref z@XCKcK5+9bx0E{%-847)JddGk%}h`$_#CeDfq`S?ZCi9qAc-eq<>)PdtLB-30F-0% ztG?IREoiHobb8;md`ESsDQ>ZJmB5_80vo1zcHmhiX8R-`9=NLQfxop);(hxrDBEz{ z4aw-Z8#ccDU=mN}f9Z5@BT9m^0eRuY^zydUECG(sBngm(FNgx@R2(?8~oUwAFrVVvi^KQrkIs`W45m(}HdoYm) zz&^_yA*Nd9oP*qYhZqBT#C4nRf%gX9VZx1TdYEJ6?2X zZU!QYS9f+U!Vy;^1>{lQH$z3Spbo^h$WMtsw=2MQa-96C6eGYyH2mp6oVQ-j8_#oo z>b0$((>3TaP?Y6~F(x7sAxAG}DF1ANMNTrNgQAY(5`zGeKvDZz_qpiPc`?CZ>`#)9 z<}VPDM6*0riLW3tXX=Ci7SbLc3Zgco$+l@yG}R{`e=g*sRzVV3fQfEC7Kfl4rj2=e zm3{)S;0*&TVb-hf>ib^5@Q)-DcFkat_ z5_&bH9vG4L3@YDY#N{RH&0gg{SPR_(S~1N;f}PBSZ5*pK!g1Mq5ZSu{XH!V5iPS?($+=`aL_&?;2`ubUi_T&5TL=3lD~6ssV^}q0W=;rE#Jqrt-sM# zWBAGZ*JtCesrbu~Z>$`Nzy6x<++cpJoRw7js~8JT)qVal{u&nw4J==Qz(}k|{vovf z&hoAg1o9f#VigDskNXlT94|@|a(LY<5NJ0F`;Wf>nldp--y=x)_u_%TH=hFtygz|J zdF^K#AP}#B6`4W#dtXihskTTw->U>2x%tCE;Av1LoFa-Qj}?sq&P`%2S&U<|Uy<#^ z3$+*>!)scM{@@(N869ryETRj^YGVpCDhZTDSjw6sDquEb zzv+iIY2Pc&vJd*Q1qF&BdmcyE-|(m`6m;#O5ho#L>!*|}E%_|%)}!Hy;14;|qptOZ z=27j)sQJRT(51flJhiklZuV6Q(S%jahdfcNB^#j1srId$;_NmSQn;GcZBPlFrsbyN z`TJUTxW3Bnx@k==?#Z3X<#&-W^ucbp6XRpe79fpN0EF4SrQz_zKB4t$25)<>Z;-7X)IJnXERMV+Z%h z`m)m$#n>u-SLd*X?^G>2>eaFchVbQOz~?Bs+RLt?WWNM`EvqI)vEC?Sh2?pJK0+g6#E1E>lcsV8He`rUkSL_8Z1ZB3%uPR<+*ZqAokC^(|jW%hn zgX^1F@T290<{<{5xRY~+liwz%@UQ4qC0Z$nt_pfEYr-W=&QV_;8+S1)OTV#wRYrZv z?8|=o@;HK3DYlI?Zrt&TP9_>I&;|!xUDIO+M><~>5L7XXu%+yUO#&#PjQj;Q%w%9?FwaUjoc&ymp zZauH{#!#~ADv)zP#Iz!&W1@42DG`uopWeju?%9h)A2u=Jckt^I84=gf?-I49dzw(I z9FWufsr?s}17gE9B%#=-L(b#xZQV-&yqH6>VMCdqZHCGm;7;`6zxBEj4pG4Oe7WAiBLL=o?YY*0>6Rrde_jDG@06%7AN}mh zit>LguEw(D`)FYy&`2|pyd{Zxkx^>1kyik%jd<|>vH}FnPxIy}*UwuLtt+U7-g3iS z+iv>-(9_%)kwbIcGdK0EJ5TpqkfA)c=O-eYc8G?QXkiW#zr(9)T+vF$7If(xQ}wP8 z_e-N;ifBan>l%6lv3IG}UK*5z)(DAP@^9>Bq#7 zhgWNUk$|BcwY%B#^-O7Uf}Qj(zS!UP%ZTNagz>xx6Y|59&SqK>X93%IWnoW&Wxa41 zRnIQ|NGV=MVEPK;{jX#SrcTEXfLc6`1vAd_7VCSmevxCbb-`iY)GpN*!s(>I6!$U_ zmbrQXkaxV21_5i4I9n;wC?9B%mIk+NJGdw2o{h1AS)hr>h72Plq7$X_fta2|_rm@TesL7sALsHa;VfA_IsUkkHAgl*!c=~D5? zR(6}{(AI9J-4bEXKKnU{vQBE1D#_CnD?>FuZ@Piz2c%X?29C1~%*(4d-XA}y`d`L= zQxC19|2@eBRao&fNeKuGQ6{N(Y@h%Pg%J9ka)E`l>jHCt9>3)={zj5ZBXmlzrP^~r z2mtF+eq0HJ&5R7mVKIEC_>-*sQE{EKB06 zHa6`tb&Gvif=UNS{9KHk{L{;5CwZlMCj+*>p3l~oFHI{kDLKJ6)tL0zdrdVXSHFOQ z2WfDqy)3c2vch&fQm#0OC(G!DkEk zjZFQd<{DU$9bDbrxiv{@!H`gPLUhk&6E&qb6XL`zt1bty^S;)r_INS!*AGoJDjS6W z2$hN4o?Sj)S*pDd9X!Z@5qTx=pRKQtz45x|hECMP68VHMT&j>;Bzc0FqF`8cAbJ%e zk0mwfrL7gPNxLZR&BNQ}m-4$DOrMoPnKzT&f%UDM%WPuez~2J?=;~?te++j%wUbdfX9>`w5Smug2|5+@v;n>BucjuwE!+SxGZ2xz!MV%Xv`s8?i$SR$dgBvh4Oe zF!)aXPFXVMVtX7ImwyW9sx5TTL==sfKYDk zM-(g$ecF}vOpHOXEI-|&=vkDXs1qeD>uMRsSrqWO+a}LnS{E6_!hm zHddmC$ER4ICL#B*6^SI}@rt{T1$QfFfKzP}^RGERXVKM75+HE3W&0wTmiH(~y2@8N zn`^ZCYL%}V2O->pn=qfvFrS{guXX!wr;5{YntP~Ub{=x18uF^$>nc1gex+e-rG%R|7_9ag?P1Wet7#UUN% zEwB;JTL_F4MYm+jIQ)^K@M`POS^F_YLY7_hqt5vNo}wok|{KAWtPv?kv1do zo!ws$2;-+hbYEbSnUw#bD7RcsDZs?_c7G^t$b}P%ZgXFv*P5z2E9ncp z;#Bb(&axr4+u`8B@{8g83@xZDl;BbyoRF4}h=VV3%jw9wU|cDcI1fnqOTQc^r}=1} zeXT#bBD<)BXNtd4t95KKMRK_U;^c34pJkduPa-0801CpOwEKJk1b2+44mvR+W}Kl? zx{t#<}UckXQu9+3w zP$G@A4if5}ICxX>*~|nLk_n)>3JWQN(&NE@I(UZhTW&3P@-eN#+E8k_^Co4HjHx|o zZ!N@qO;S0pep=-`t#Yzb`X5;3Oh2u1E<2U;x&MAu4qzm!nk3}29{rmQ6^|_cEp82d zc`2Syi0dn!-3`UFdqc(Z<4N)SSgm-#w@0^aVB^TwVH-!T4aHMzb~`l%a3R}UwvE~v z=;w<5mW{*Tvdx>>Q2N~EAbf7yhwE+oVB0AEtka;dff_bW?rSLtN$0%wPQWTKY1CGqpN3=d}e^VeWAubh{RgKyVvQi-Um>o+#6HYMJb`B()>>Nzd zSPHHBvE9no_E}2&ToqDWB}a8P3&*KQ&*aZARONl5dpdhIQrSHit$A1JETc=6FRsN> zWokBjsD{j=PDCeNoyw&B!{%Xyi1lcj{Kuk^x*neFp$W1JG!r~r+h=#u)EIlH-U7#n zcwmt%9S$hUn|ySS>L69-YjRuiicHpqQ(ggbU);HlT*feoFpch}j_kV5b%4o$q1OwB z+92}B>Yw1fau*t%1H^RK*hIbXD*HV6vEnfWl*z$#4(wTnx46(7bCi1gFnP{yXZpCm za4CObJ4e-3lPG&PZtb-uCMF_Mh?tlcDbRMrb`2Yr7jOZB7JJP@eZAuIa57HXUEKDy zQ9&bLoe#yMb;jVeN`BO}aSp;~VM|(7bFErM2yX|=$f0NUG%~9Bt$q%*ui@a{*ZS2$ zxltP|Ryfl!O5^tYyJDM6hZ9I->#kvB4mPgrbB)4gOwV^2coFHS4W2L6s^dJ6)6Y~8 z`We?x{}}fYX~@&2J_Rxgs{;Y~+xf{zCCBTL6OL2i;}#L2JVR}1oG+TM{#^gnJZ#8{kEcKpr>FFs{-#W1x3lJ_y>rLu3}`ASPs{_*-6zX=;5bP8~wiL({xyRM5lV5aWkd|xmLB@fz8hBO!wZ``tzMe z2#IJ{VkEdGw=Im;66WSd5) z!3YmCClKe2yl1!l(9$a!EkE>*H0q&x)Jkmk2_g~NG9fTM_QX~&f)v1MzNm28Du?FE zPxm+*=&|t<37$>N7PH&SsX=%K=wt%fUrkYb4ZEUY@Vq!8LZzbvAPyKNHdJoHp1GT; zdyqmt`~o%nSpAZhy?Cy`kLV&2OeQPMT2Jd6i)ac{`E8jb@sOCug+-;D_O;%y4bh=5 z5yq|Aohec?A%$qPH8wU@p{>d+>01DMXt_glg$u03iOaodqF7$YMSADhY{3+9)Lkl8 zjowNul>Wsu(BKOhIFuGn7<=hn67Sj+9atrlE~z+K-01{R4wcHPb{VTns&@9~yZ279 zPB+)<?N!E!)oNvFpN&7!=<2g-DiyIw!JTu@-d2R`ey7FE)WMQY9R z0D?jyOx!V2{GIWyjZ-Bb_~dErIc5g$rZPh{9H7OMfVV~f6ad~Vxa?PUh(3H(tiT8U zav-%x1o<-|t6)onNt|$hOx!JFDc+CYjmdzxPvZKIy62-zW_htS8rX z_W{vRtS>r~nFgX~+_Bxo7X*7FWd-q&587kBoI%RT>)HbKR&a20msPNOzNDHFDbxV@ zW5pY%3g$$QadD=n!exqalA>%8;`Acf^8xeJa8JxnAvsI4cY=l#qqEZoecU!1Z7_!k z!uW`_@UnEo&4*dr5K*>>?ytr%27zp$!T6_xIKw1GVgwa6O7kg-|H?wH zZTiL|CgR-d3AMyP`jNFx0b*;iCHNA^TR5_@NBpjxi5q6z4d6U{x3QeeXmczCrptDw z7?SKcarLUU7L)w`q@f>Ap-2N~0JAHjGJs}{4WNYw9nRIm13(|r(wyNYfd}_Y&W-Fn z1fi)Og0#zF(!);O&=!H25*kZG%8cGbOtMFNwEO$jT2$v zIMRNU#^E=j@t8bPR}-gb7jMfbJ_Ld~@fC77K{(t8EHb;N`}DIc z40hz|Xdj3Zc+ozc-uZ+m8^g9_o}(TL-<7Cb2D z4;VMZ_Y0xb1+gZ$O%3860jZr4hya5PfVfBbN;t#X-B7Lp;vN?~)4lh#{vkkI?7nEB z2641X&7*#0>v@4VZY|2_2XTD(r)hzE9TQwZ`D75+?;0POlJU6#h%?#&zeo)a z2XUbfT7&p zAg5Cynm~@fjjim{l%5sbk)dg4Bpi}~96c8j7Er@w0Xf{9eL(IA#VJM$B<1cnz$v80 zUF=c?$<>Wke;XKz)r7m99yMYL#!Lk~E)PV5NdEo-kUX;Yru@l|w`~~@z-FhImWxKo1Fp;qht02UBNhi-nG!0Z%jfeU^v?pQAxEm2*=Wz-Vgc?4Ne5NZG)E>DD8!{sTX z(qGT-+A9nvJCz0_4Y0h7$J0n%&QN(gC#yhp(;3ROayir<&(rD2H)sR0CoDRB_htuhz4935~BW;q+%{087;c{VABM{x1{c^b?<3z%Xm2ZpUIVwdW zU#EU;$nRmDh&CRT29~~ywZXGY`_+*u zqWq3(`|eR+7}zQer3=>VO!9L+Rt;K>J;q$A5VGtX3q!f1rGC>iD70B+frsGb1p=+ot z#5C^sv9{#|YNI@XG^x>7td-1oy9Fjh*HXeB=cT{DLV>4btU+QVLu|FfF8s>2{ML9v zw{6>2foR(=*%e$Z9=mH5AXJ^B&bC;SNBIaH#j}^T!qCSh7_vdqxRngs zAyT^BD%%PfLfi2TjnU`{+t^hu^|~n-OCOQDLdmnfk^bk4u91o)T)Svh8~t>sb~XA)adQte#PaLz=58krrqamYNceKXTq0lCQA zB`qOSGj@eom;fC%#?otj7Cz@Sf;X&i1{N zYs6gBS70j}6S`kVM1GU4>PpNxyD6<0J>F4Xga~XHSAwoDSEnK9!Vw>tI-fP4P=V63 zFI^Pwo5_W*gq%t1xjwczXu&PpP|8WO`iv|Oppm#~nc@c>sOqlg&I)p|0$ywlD1Qfx#Ih)~o{||-14SYq z9&oCqG3}I=zel{DMmr-7aaj|w@>B3c{oPdB+ym19Q~O5hvM^aHLJRnI*9WO<%EIcD zNsJ!9A>h=@(D3QX$162!M9VmX*_<-T=Y8d33dkp;X}K?DGFPMuq6C^rWno$&K+H%q z>;lBtHt3jA095OgNdZ*>)}4*Qu&&3ZjLB3{(hZ=R6DFhb zw3#qTI}vVGCrqZouoKY*l7z`r7`9xMjyVbMafnpLrO=mvsuCt0U4f_v3&U2mO$6m1 z)%HTr0;)1qr6<#rtvQXBpzGw%B}2mNAU;w(UBEBVhr{IIt`~;@$k-#D4-^r3Fi_e= zoc2ca={XW(JJTT0O&WyfUhqRGN;?HAwIDu?E7u8GB!=Osi7%W$E443(=Z~~KN(XTH z*x2lLI$ED>3Bpzzzwp2v{PD`Bkody5nhMH ztR#haC6rFGH%gK7j#wFHXvfy|Lix(TD<8!{*o{^|XXGtFKgmi?^< zW}es5H-D`^%MZ=mu&;$M~#F zOosp=-BXt~#IY;cB!u@0AEgdcf&o&RW{ucbTs3&)p)^wCmu~TXQhg&} zC`_u=0xA!D+KwJ9_8@N5gO1>A&gs2b{g61IV@xnuhwM#!LkZeETrNxNWr_D zz?==K`}93pLFpy{v;W*yzQo|QLN|F`;P=oA$^&s#r&BV8YK($xLe;A^pUy?3 z9C-%}T3_k{Mh72aC%7-fU#F2MRdh$Tn)Q`fxC()UXRX(aJV;nJKJZ&a0(Sb9n?xds zWB9z9vDPwMB5f=`yZBBby_XV6hzoi7930RN1I!$L0LK_UuS#LU&-+@R+ey|mBQS;7 zmQiMMY%+`w9yAYQMWlR95{c_ql`0Z%h{U!X=UMQXYErZO3$ws=CS z8?hm}oueZ>&*b}+hgotX<#QOW2ts`!bvnh_Y5V^-RXkRLN%=sJ>I;ejV(&~=ya(#=tQee9-Q z=8>z_NX}#R?)k7r-tLj7H1byQ|4KEGG^;f5Aq~9E16MTghc)nIHLy$fym<2mHE`Yo zPio+Q(!k|vV5hp{0~$y^Gae_6`yq{^A!=I6b~WyPjr(DZJE3v^QR9}Xajk0HeH!6?g0a&@6i|;?pjsp_ z=+PPDj@WRY!{RwKV1*#WWZ0HWPPoMoV3J%=FPc(m_&(ROh0O?0GE07^VFbN1m=vc! zC8)@`)WMH>C>`fPq|#&p^$QZ<(rD?1emN7(n)}^XS;0&4;hVor-f>>1H%@n3Vg~=& zLX*b1Zie9&n3>a|czCX0BuTBBVw+S|m{fg2Oc4;!nEy=%w0-rNts zW!@{^2nyWaemG6bCY8MF!e+ZjhSD4&|H++JD;s+>5>MkSHuE=>A1AW0?do2mAUoD` zU0=oDP7?*D3z%H${%OcfTxto|7j}k`d)Iq@{kSVoqr?8F?rM+w;qrF=rA6ING~OiY7UnZkHTB$ zz@xvlK9BxD@aWCKqparBJo*&lQE|w{!{f+f`^p@b! z?++ejHJ|3urx=flLoOzdn&Z~v(S>0=3Xxt)9@QK!Cmw~j&VffyuFs=42al57uGkB! z`81C{#duU4axr<-9Jd~i{`fE+g-9>$D69E2k3PkCR2*_K zdDI-Y9*_RSFdl_SFC~v^4wn;;!dvISqrbjBkKPnK`fr0rSi5E+-y^x6Xk_A6TD94+W3@f5D@y=F>d-6ys5G$i?JQbKH77`r2VU z3Xxt)9@QK!Cmw~j&VfgNZG9g7cfq6oCU{hs?B9V^o_!d9CuNRg9tLQhefSB9{V~ z))e+qgGK;E0l+-k3ft~p&b3BbENWc$4 zv&8SI(1HSsz3NV+((2`Aal~ROj!=_O{l_g;cJO2sN4PL!!s~XL)0SLZ>2k(g9}(ZV zueZ|vs}hOG247PmL1_X<;U447)|5w>J*%`%pJAKIBwXIGwmhPFuYuAC3(jByD($FN zR9VEL3Nwa@+1Q>9%OWUr=qrmD>o1GY+eUTF24xXZm^$q_oWGcL6$)h#KJSEf8hyP7 zDTC-Mi-=umR7OMvD=ag-G6D&C4wCl47Ax5+euCXwTO09jL)Ly@$XYFFC2JR1eO39; zsB`@$B<_?)DY=T8BtAKSJ|DiO4kP;8eb(GNk1`QU8_8Hzlfu>~B*q{Zw;fRxkv`{p zUF6yXwOVSPHci*rD3MzAnNEmwS`^VUxRTAQ`NZhxoU39@i^k%0CL&rd#r4tJQO%^r ziq}FY&u()@$YI8=!Vqex_gfT`>~H7x0@NMvR*?@i#;W2m@rg9t{{#(ixcF(`pLR^X zzen;ruS`ochVdk{NY)B&Wv@S}O-cgexN2uv0&%6*eL*TAC;-93?{I4ICMtE@@5f1# zdI%NFl-0Psh01B9rlPot4sGd3-J^S={GZWB_9VTv68b7N+NI*q4T_~bQFmhXo{PF& z2)7FX5p_qZ8A9y3<15PpbM5xFOM3bhd85=rzqfbNJ@iv45_P{WMbdL!5X%ZjA(9WL zmM&8{RkV{quQ5zTNghK zPJ`^1tCAo;S0%BG=DiU43x1`V_bl@L?$qFoOjWW<3x3Qs@;;3krLd1yLn0Ne52sRd zdy9X5sz%O7AlJR^T)*UIOzNZK7q3@eWFS4F?j{~1r4fr`xyA3W0c*|z{RlEF4q-!O z@wvZ%(05D!(Ejq8HgQHdXTrP95H^%}cPN#OO z(f$V>TknB1=l6!tufO>~@JC(MW!Rx`q1RXdp?b24-s8z@k30T1AM|5s$1gvjWvwNEby)~47U<4tndK$UGD{EMNMwCqh^+5@T4dG0YpBSQ)~M0e(;`bc|4G1EKXaLgtOL{E6;}CW zBdb`MOCzhk_7ch}>_>ZvjIZ?Ijbzo0A*=p%a+!~vpHaA?ZWRe=%P6!rNJgPCw-4i{ zImcm8t}Mxh6AxU^C+?8Z0aN?T-$^yVbI&<-XA)coOO|(j;+T$tcmy~Qr~8(lXw#*S zt9lx5YX#fTCILt%M)2PqD3vm;O@z#HVP^m1LpC760{0@(CcF8RqeQkJj< zvbT~pA(Q9mtvoW5y_M>hTKORGRwfT6mMS|O6Q<8hnq;`8m?YlHH(=?@uO!o#Zaj1e z;YPImtosID+2|X1r3kokU|vw)z{%*UBd?_v7VApQni`+Bqi^73bszJ!uBR;bkQnQYY$}YkBRLq-1iv?= z-uGOX)Qk3-5n=G$+;!E{C`62 ze!7kxL=2&WVw;_jN1Kkjk}>js&5qwNw_Cvw#CmzF6iZe}lZjVwfKK}Os3YcC9&-AM zx#`M%dSrKsv#~o?83ih1_FEyo);+t_N%mXmp~X$#wySO=o23cwuUK07HOb+|Pjf)5 z!^#dd8GI(a^f{b;3z}yOZ=ja^pUDm7aeLlN7upDpsj*X1H;R+F&5zMaQa6tx@~7?K zKQoinjU>9%joX}A;8l`5Vo>s>g(SzCD1O%JMr^OcE7mHTc{!t{Wi!Ju$V1iw0naU zg7)hhjo!GmGuCaV`-u4bnnmdK1JmPEqsUp5{`ANcGxKjfeOBMqnpH2GF5*Z5b@azi zShdwXDq=^KsiV5u>SiTbpd{0hv$%6aDedTF$&ocS+*s<6QlT|A%Yi#xmcJF8v1 z`IE4EobCH;wMyfeS9bti+;2AifQliHs~FTRul~Lp%ByeW9$s68OS^;dQzV0ftL#a) z{L&jjYIMu1Zg|1lKJ~UwO~f_y0A=F3nqUVDrr&_0JO@lI5g`T^IhR|dJMfKyY@mVCP#4(2(tpSK&YE@G=8x^ z8owE5LM3W~4TWA2avfe*_jVwU)ZQ2>ts5$Bwi~K!OdH>WR~e!e2xnul(iAFY?yO{pXcc#DGgL##AY5)vZOv(?i=h*Z6gQADH{V?V(|m}vZC=Zm zo0;gAuj=&4n42qMjFd}q#*;|X2`C@aN!tvb^tMYv%am(M9%+a^$s@Lmqb8qi!P8jp zEQVO#S_~1!yb9gk%VLO*gc2Bnxk5?$ZR`fog?>Y@V5jQ0ENps}IO_F@BONrPO+p=x znSc;RoPXU7j&2YO26Sh3t0=-fB{6={o~DxW58rf0QFc>9QF=_5P1 zIz(c}iaUJj2fdV&amxuS(;`I~I$Tjc^ab1wjB(H`4oBeR6q`sXlq?fI2`J7{;jA>@ zk-)MBu`-g42j?dDKxZ7>KFcrcO^!Ef9vs5OaTxc=kU#taf%4TadD)8t`g8h|w3xvB ze2~4JILrpS{O4fp{cRe7PDYc>+Y6tG5E)17gDSg9Ey?t0?tX+OwpX_g~8jAySu5H~ZqY|*Opr5q9?>pJ$r)>Fup}v7GKFh_goso>R1t|H6 zAy?cD+1g$LXEbE0Ip_m6D{tRjfqp&tegP|<>?97J>?95-H5@7XTl2f0*Lol4#&CaQ z0)ArsZSb512SX2VA7Y_w@4c$hGhksZ)Zc%8vMAc*g6AEZRbe6fBybBA%JAjUaahour_*{DrRwyb^E!}U|o z>pvSv3p0*Tn@UD$J}gN298$iZIHW(L?A4Ws;`D}5DPW!5fE2Xl#=oTKl)wCwFDuHa z;`Ml~@am><8o30)IjaiQ&n>4CCg3Ck-^$bxyD3AL3uN=#buf~Sry-RnhUQEmtHhO} zeEgj>nSHgNh_#$<2f{Q0JbgtWcZoU2A3U_fANUbhmZOK}My`d!YOBxcpN{Ct1MPOY ztu0&NC|b+^&;eWLkmT82^8C`a{1BYC84w+n@!khS*%cYRr$iEH8d+m0@e;2?a(IiP z?~!~)-TR=M^~_x{YQMRAYRB9tQ{5DX#w#&}O_21cUi*Sx;H`dv4r_EluW(LC8aq_6 zNC4KuiPZ*@S-rk#ty$HyzN~cd*3czaCM4}-RyEeIkTjNbg`}5vpO;&>iKIo<2F0Ty zn^^WZm&){AylvawQDp+@8wd&yKYEVf3dQo`7f7-*`8GWjpW-~_*&UJv$kKrCGU-7Y*7v+N|UtY}WqjT_> zjm8-`4RCjk!b3C0WA=_Gb*mNjvM1;QIk1{!$l?YcreyM!{xh6FVLt4sN+w_4n#zrB*<$QZ0lSe$SnKo9&lPW$mXZL-_Tb%SK7EvfXxpu6QkL{kDs_kf3J~-Pn zg2fITLY7~ZTs5=QH~2^xB8EzS-c%XD0vDnTm^ity#l24|L5fqK$ezX|i5T9S_trAD zS>I5f3Yh8}7H0bD8#we2)A*e08`Ak3a>P|iLc2V>OGOi1cj=G7t?btv@qc&9@5RZ; z=Hrzy#5lTVFIOsOcy2nK-7y&n^n^(L0NV|-Qx5n+FU8OEnw)OSF(7zE{i>(8g=MeP zKcEuOyN-_#*y;rw|N?;x#le6 z6a@V42m=1!m?78%-?f2Fp!5X}UFpk*7YcXk!>(q;FM;}ujfIzuiJkkP01|@P)}jD? z2JAj*9zbtX50TUd)Q13i0d<`U*_UM>fW5LB1pF1flE>W3$bG$xjPo+a36eU!vx8nD z0IVc%B!kas)DE&IoE=%8B7c0_9Nr$Ea~LtEu_Rl3Xr!(T+2YiE$V)g&_GdUxd}yR^ zy0F`eAvvRy9V67siP-UU=!cKfJuHx9TgE*eN`rKIv5%^k|4TXjw?TRP+ddtVM{vaO zb{!GQ{I_#ly`!THn@+=gKBR{cj>80UI8H{)svkX>hT+JGjDOp75T8$aCt}Wv`sT4a^6NX#*k2tHKZodYCCw5mpHmWa zPM%Vc8>IxMZhr#F=F%26K~Cdzn#&@W)fc$7AzdI*c#mWO%e1NsaK{pHW@det zxKdHpzQk9awvV?fRUxn)^85t|t0AS*2`ytdTn~RO%3_^b2puCUnd#%|2Wuf_QYjM5 zZ(kIv7``a`kK)L6*^r-bFzO+l%3~d|G6NCddCBovNR8i5`QJEQ+y{1R9n-X{(^;y+ zFnx3qo)!-H0St(!^XZM5iu{Fq&cnz^7;JW;uBt>gNG@27@Gp`-PAxK4L(T;8!`Iq~ zv)X{OHN76NWLp{jBq>tG%#hUs2i{9~TfuVMR8N6t#Gfe9qcI)q>$=jx@2iuJ0rT>K zSH*g|4PJ@jJuWP9O_AQ{ezwnNnnHi8oD(m1r_`V8DUVs9Y8VP$$65E*)<}F;XtM8& zaubFA5?Cu z`cU4d6kYZUO1bGGl#pL;m~zu~C_}r#7%x|ty;13YreChJ7YrrnsI}-@JlIgGd7PCi zN^n!V%V+$CR?Ny0Js4Mp{^Lkj z9&6|Z1kx)a3&BUPFomEMJ(v^AeA2RaH7vqZLWcELzUt@krA;PZ8W9a&q8Xj#9Oh%X zX9P8-ldkm%@Fv9y??GfC{1|MQmNc%yE$UY`fu-I9E zZbOgSwnKAckPy+2_>YH3txcpX3XY+>aqE_Lx2y8%u>(+GalMYc&6bDtMu>D73{hF_ zH*>)A#I`D+n@r&a(TB~V%8$lNr3I1s%)x?1Hp4S&B|;^jlgh`5Cmg71{rRxW`6p^`52Gfgi|)ov*79}{JIT2yMOBcK~@s4#Tp+PX%u-pUQN(r*!; zPe4~v&2FfkoE&2zyd$=>{+hEdQl00$=I?w`*IcThY3H5yn!oeXuQ|5qSUL8hvQ@Tc z#}1dT>>k=9L6hw5^>+XTaX~xqt|xT|1}*LL-hp>r`W-MF0M94=uEfTva#gW|S6V2q zkbkHx+1L8Swz;W@*KWoi7rHyYczN&6FAm$Cs7DQ%MAl~|la{wXao4zFgh zR0(+^DWQF>`&^`}q#+nAiM3h>E0-96hM$>#<_&D62K-`S8th9v+R<3ql=zvXJ@V{3YHjZ7E6 z$&78v_LPpGTPwfhxn1Dz%?^|+Nx%T}=DX!XkijUY3&s#d-#2a@>vXsDMn=cR)v!YL zH@G-nKF~W$P~4X@Xs11h?4N$43`$GNkb?9@lg(5*DeJwXq^|kY-d86*(*{Pf4YuZxY2TDFIFbFbs zpa#(AV2zL1iyOeXeNGO?%ZI27!b32dJ;&PR{ujT$C4{^uTP zeSWKByTejeR)eiq0Nk8n(XvNtBil%pOOoNf%9k3X+2}k|`8=<|^Vz@CIx>A-YZ-Hh zz`QNF<6OO_a{wl);TTRHTgxnntqA!=*>aOgZ7mxcv)M{W3$X-DW88Y%8xyWt9VF5m zVf7*CY#Ou6LW(kqH@MX=n^K60_w4Oq3d>r{Wb|?m8^xHr8{Fepd=G-2-0stoiQ#ZMk_#2nPurS&QPZ2d0xqI57RMCZ}+_p0i<^ ztWRa0g++>!M~3OdY%__%mL8DWmL7ZPP9?H1#ScmaalwITNFQpkWM=ZSN_s$|D9Od& zi=NZP`e|P(=B9nA+}yrY#@m-F^C?6k0+%(JpteK$L=pwcuR4c_@|{Di{vT2#;rxWs z!}%$DChoI@ju;PLdw9smYbM(@*@bpkA;gUMl<6mOi3^d&c#7`$g$k}Lr_L(-TE9xh zSlPRQT!ymq&Gz8oKV`P^!bVpVdr%iMy}$@NNqo zCUS~bTahk88rvccTUbt2KO#Z^BFT}Yv zYS8xJayTuu#CE7ap)SFZ-ee~!2*Ik7se6C+JCAb)C4Oor?-=91G7`H&1%cOCHt0#mfeI4E*nyAsm*1bH11 z0)9WE>`%*bjPv4s0)-gy);!Om8Qo;nVtyq$cxmq*H^b zCz1eqxC*RLnKZ%I65e(2__6eL@*}akq^?G5COJaP?*6k1bug1rWne#}ndwg|aPp5m z2XtPw%Y)rQbitK8TD8X;r(oj5|cZxvM(X`TzD{69fXi@Zb_UDnSKh6S@w8mlWW+t_? zk)5IYFHt#Z=93uKg(4IQ`*DXW#f+21`Yu2%9AV|>zv6_NHEBo|!K?O+QcVu3U_BJLE|fcJ)X0lnKC~NV#t&bij3JTA^9g(O2QVfm4;d6r@?0x&Pv#~4pA&iS5#@(Q>fK&OL|pUiFFeq0ABY0#j;9XpwNpC zH01m#s%?i>vJp6{Y!Vf4B8<1bRs((&GtZi}#0dIJ%EP55}%fgWv$WL1j z!sXhNy&_bp?XoL$GhROH5M>~?xR15PcgsvzoOi69v{>D~8!cYbiiJ><+Wz3VY^UWuOAInQzil1#bzaD}dXn1AhL>3#JE=SU(%DWfAES&- zyIEBLZMLaS$7*csK$A{3ZCrI`xcj^9RvT|W;-_!F7T7PzcjX|Z9;=-b`g^ne+KA;W zmJ-{o-7m0H@vBejeq97Mj7dHT`*ni1-+&R56nx{<+_=zx0t*w(yot8l6aBR=XdBty ziljA@2(Spkh_vp;0}v?HZ~}%D!I?JFwl~c*8L(o(0#LELa92nv;O|Jj z8{2x!Ts21F@saL_p4Yi5?0B1h6T0f`R{MC8$78*#!g99_h_03m7(Rm)Kc&8EthnDI zdS!R`1Lo}yXny}*e?MP@tsm)MYi64~W6pW7sPA`o)c5OOkzeE9&hQ$E#PEe(ld)4k zYFB%-qFgh@jU7LhEsjob0YcPn*c`_;llxK1!)7?zqc5ci{ZaWO<*w%y`oi(ZZRL;j z*>&?$>&dQDO7(o3bl&o=_DvCrm&FEM3cjo#cK^jEW6HBgw#wsHWJ580RN@~c^Smf3qGy4#freSMM zhD7rJn9wlo`Us8h?eBpQozu~XEXY5Dzq(a^{zq>s%8}wDPH+S?_oly{+p|b3GlG+f z=L$~Rt0UHc^as!oD@LR zx8Nx&&+s>|Yp=N;WlX~Q+3?93LIN1swJLW=WB>uk31v^jg9P?!Clx5%ui|t$>>=m1 zsc(U!;;SAjc99g3JtY=@EqHAD{fdLs<-9o!Aj#|+!$TeK`vA7gZD#!10m^-tv6Lyr7k z+FjvA?fbUk*DXL#Udj4=8((n$*=Ljs?|2)!6A zLvFXuyeI$jCGBAKs# z+q!#j%1B(!-|#*1d3IOvArp-CG_zM&f;s<7Q9*D-o{8LmGK+|nR_@!S?7jt$@g6ms zg&qt$dT=80w}>_4-E+#O`z3eC#I)1ofnACr$sagJKL{hV)UI8^lRNq~cmX@R0;Ad0 zzz%ssv575f5rPa?tPk13MN{dG8_my?<%laTVnC@&M6P(|6VZ%d>`lvR^O<}JpQXK5Jxy|c|GKWhJpekni)SU z7et`no}127oloXGt_aD2*~#a%S0xV{B{cGcCG*wllf~wFsLteWaJH}$y%yHQ2jpL5 zFieMP*DN3mcZaYX@r?u+KHLC~K#w8bqfo2JSKgG%&MQg~>1^a+r<$&ZB-4@Q15pA` zGEEq1ktB0)K9j_Y^x*)IKBXnK%}G^VH6wLEHQYY&5ikewWw71lLlEPDTPZ3k zlUvCVCvmp8YZ0K(2!mMx+_60?0Q}}7Q!kn9u%R;e5bMz%C&~K2N*UBRK;|1fEDfSP ziU?U{0{5CCHkrWkNyaqlKK;uUk20G54gbzFYFDAmQu@Y0iU+2j72+o=vD;w8bijdy zd>n9~8LT4YE%X++UvmbE*-1?fn}M{L7%E;m3;8&O!0Gnfn35t3c}j}LQ)WOC@*yzw zJkJ~nnbIE#w6m z4oqM935WU(^)x0GCbdF-drbD%RLzmCzRs)K&=x3M5LN>u)}{yBAM#A`Sox8U{P<8>MccpDwaZ5G%06UXG<0ns|4Ad*CDwE_K*CaHbYbsAjZ0eIN0M z+uhQ|pp&f;)D)d3%mQl`4^3ouwYE+Wo$@vU5aCCv|Fv~u0R|>ql;Cod1p>|PRIO}$u@IawE}O1W+7 zAoix#P8r4c8!Z=*ipgn|9K^<*g zK^)%PVL)8PX_#69%Th<4BpUWX795LuPvG$dw;{S%1;{X<;s8T6twK8e-kh{oP>$JBuL7!M2haKIWP(t%KIZ~24c$k3~vvUue>>OFCu~* z$md2*C*k{*lIP572)qz74w4~#L1faOY6+X%2oEt57M9%o=;d1`=e{yI~^G!I~U1p=6-%;v-^+R6${9 z%bLU#5v>KR-KrJj#AMvOf+8X&ncxOcQ2mX_C?dW zlk?O?Rxi?~BEH46W)yO|!i;s>fuRSz`uR5|NNvK45~ z4vJ^8`e4pc&u1LWSp>Al&F$F@gbowyq}hTf+4W&M?#*lm1^)^=yuVG8(BAYn#jHWz zDSV!YO`l_dNYE>m*Z4Ha+9xh> zVrY{@p)-45EV9FerbQ%cYXBRNbkL6toLCF6G(_QWY+yOQ3Q>hL&(=5CU~^IU z688TZ$?1);fhQ?p&ohh-JjuWYF-f!5o+v2;8Hl#S;~j!<;o|fN4N5%8vecCLV(7D? zM1ePo9`M5G=lrg@sc&}eg*Pm7MacQ!3d{P6d%86J4n~wvQxGw z4I%h&b!Tj*)iPW?{~eqo0Xgp>LX%zfMAI&^d!$AseCGt(*hLrNxVHYH2`*=N4L)N=t^Ur%`+pJlvG8ZN(=Vt6CRXeoO4OyD~?4brn~QVV!?R+DB+Y(pdP$D z$y2eHLj@st;xV&b^=9UuS!5_-H4qUbv%ifOrz4Y#%g2gm&vppF5ihk@j$jyfo{c}? zZ`SE526l2|R9?7tPz^Y*@+Po`ovI;fr>`MuM=eu%1-Z9yc(aK54bzP0@b>L>5_5`DayyI@=>tI?#0-fMyEL;GQ4?u zlAiJBNL~6)6mxyi(Wb{IT6H*3B|8=h4!5MSO?C-VJkXm)=B7FLgaga<`CU;?L{}D| zpSrTpcxVE5#VE()7;nhyko}uXnQT#xE`}_ynN*+h5{Z+jPg&Kn$z2K9FXbVGbl?^G z{@^Pt(!0{#(HEuIuvSyU@2TFE67{}!eF@mgRYMcDbIq=h$r7iEJaIEfJYF%WXbFBkM z3cTAk&5wB-{EKWHNYnntSgx<%bh(l_B_?Be?4f{?@KA&oLMqbBg)|xJg9kOuesvxyXa>KUgMA3omqDE|%2jQ;rzVv;WwgBet3CMalC| z#9o`Mv5sMHdotXJb;gAKl$DK-t8Zjsd_yeg3&m>T(g5R#xbVzkcx_^|g88Kd1zcI> z6$n7$**3r+QJOj+x?!2|QnuN*(@&JyiKJBy<57gedds^R>-;gT^F&3K7?o9-2F4_T zWE+J1v_Uv;p@l63*d@?Tnvi88-OmV?W-~MI2gYUOt0U!Ykx2|HdFF2?;yNOiYOJFx zLmDbfnUAQ#3?V5XGhO@{(TrRsq8X4lErVR!Hc9$?r~Jwr$|K#!Ct7gaURAMG z$&tF9?;epaceCnrI_(Y(-S)bYSX0OzDcbLcM{4A7xj2;_Qd6}Ge%;l-;sUS|^Lap+tf`c59Hua1?~DfS1nr5;@zIIgx}2#AhKAiZ;TD zFlFxhr0BM-hn0dgK}=34y;i0m|1L!5LIs-AkLvx+w2;A1ptFvs2kUXmoZw-k~ zNoH6cH)`=%t3=2OJoJi}t0jEUo>464y|fQARNw#(PIl~*9upH4e*Hj=(qT%}!lViphz$7ph>IVbwcP~k##C!;P)C`HAvpp<;< z$?SN|S@W5*mZ2}0Aaj=Ei8wp72x$dW)cMw-J*8?zm)*v~@%HqvY1r~n+cK<@QJz`3 zjn9NfcEPUVa6M_8iWIgrvbbi9zmT!R#K*7IP~OD=PuV~ zv|@cJpI)Trg`N<>KDIL{ExN{Kuj=Z~q_mvB5bUZD;Wz=0cKN{P(4dpa?Ry7BSoQ}% z@3?{xVd^=l1a4X&#g*U++e!2ofCXu?*{9V)kf&!b1$F$u-`caLIJo6j40X(lsd!Ho z7P1z|xB5lT^7cpV5JcIf62iMQC0It}u!yF1UoW~<^~~*b?BN>bl=5BQkzRnVT~2;0 zX(r?8QUuSFv&yHB45kAPm1;G3k5jZF%-R`K(yB$C6Z#qycdIfF^N1JbCl#OXv3yvS z^hTmdJ$|m3n<}4w=-Sq)t&Ey1-c)s;$NiZjL9(GX4UO`)&2qJYND#s^024DY&j{ie zv|D@^|E0m$OY4=c1#AsnpBY=pgPSt;IYxp!Nhzn8noiX6G?}oSXP^}(s`m_Bi&G+E zK_>>CXTvj+&LPK-m)R5nehg? zI(y6P?&LYmAauko}d=0t8-;2JiIYFXpDxxx;Il^T`#RMxz(g;`)&JPriTay4RPos#|I6qkS zq3N0*UOweH1pYGr+h6s*~=I%ZII3RaiM0LP(LKt%L*6A{r{^B^9{9dZ;Yiz#f^ z?blDc&5p=!viLoA`;hdzgS_qX_b6yDzdAv5o8VEinifGOI32DykT9l(=u3qsiLdvL zGS_N`P_f$R44vVTYKE^`JHz(m3tUBcYU|uMO8wqfyx`WCbDpfvic@6GZ&d589<0AFX(RX1&aabgB?%3{X zPBx(i=5%%|N5NJ5qdE22N{*-RLd_wJ_qUzOTFGBhSwkf5sZG3)_a`i(s@KRSp3O5+ zp`yt3LZ8@{=I?q8eUHs#vr9slSKt)GA2WVSx!(MaSOi=S+IP`ce+dUrm~8^R*=%#_ z-9FGB=(bvPsbX7w{F|o>CG|`|Y4PU=ybU7K1yjm3pjpsZEWY#R^_^TRZJpGqNfy=O zJ1;(TY()@@I)|@Xe1kc)$gl=nGLLF{dJEb>urq6uO}{>TGY46ca0w!fmZXIFNf2Uv zGYCUwu%ckl@w%w{*zpR=%BIX|d1bK-?K8=aSD~#3)+JwQtR)bA#9=CQ(=p$#HCJ7an}f~z1$X~Fda)zm+d+^Mdi!K@Sq1( zDJ4ZSVTCcO*u;qeqez|-}(%&jq&Wh(9(gmA~p-3Bv)6<3awSrqsTAL@h5qW5q= zJACD67WNgckFSj}`q>e5q3P+v(b*YsZ{u0A7WF2#nbVYk8F>2v_fOU9FQbfXFv~9< zHwm}t`0O;{oY(q7oE<&s8#_BjrX;fk=@lJGSZgAf6!z478%zp&@=1!x%e^c^b-fDP z5N>@nw&F+mpY*8LjuEN2P8+zoQ}9LT3p`x&F#v#U`fil^+x4@`TV)pu`&WG4@xIl8 zLt=y|W$CrcQ!SrV&3*PXRV0R959fKTJj-x)7u1CMi>2m~3TVXBB@rgLpRC0Zd7wre z1Sc2H%x+;I6gwW`fTl&3s?LMdJY-bW2;)c=2Ut~_L0e4>BGQg^rJxT+f(0tXlHQJZ zRA1m*cU|#afW>-R9svyS?PH|aNmnkV}MAkS3HA;H{DwP?5$Oc z1tkE(01#eyw?PnYV(Bv?30Sjl-l?zGgBX|Ot{6=F92XH-@kI`%v`b1HM1V^f+k$=d zbu0)32`Lv6!KrXVODx{|14IJV(-{dR-VoG8Rr276TIDAN#s)Ur*#&)fKYfLJs;SmV z_f^V_;ceei*YNh?R{7~6_jP7K4Rjx)Eu*I1j1}mu`t|^z+t>rjA*>>$eWjE#oh6*B zOq4Dp2ymffOFkWbQfXf(lmYV)A8aCBFH=w=<=3V}u&#U4Zh&>;dqIPCxpwF65D@mA zn@(E^`t>EkzN})k|0Xow#d=rrP7Srv{-=wKyR`pMJNv&Y+ctVrXvqwLl*GoFF+<2V zHi3&KcD_2YB<;KaozO_$`I$jxP`vObkZ2=W(k_m!q^m$CC}?u)*U zWea`J&T~`c;iva;qHoT}wH+=+-`shbpwVGi&(ZT>odQ}UwgWBN1>1im8cTIwc0}Y2d}@PN7-w9p^28 z8{Q$YEw_<{fp_S7sgiJdL5lHC`Pl<2+^!6U;0`oxd4EDQy-D3^T`;16ho-LV0TpwQ z)~6}^Z$(mNDtPvBXU*0}2&3pO`aMBV_7jh+f9p7jtdXh@wCS^6Fp->+!{_e%5%YKO zS5z8rv4ZARRh;xCsf@kpuj5T4l=j^2sy*hL6EAm_`^us4Gi+t6x zqWBA`&}edi5m0_7zhQq0r^jO>EO{31ue$Kj zzd`}p`ahO2H9%zViW|3*|ADaRmH(4f$1HBzEzhB;mJ{u9@$i_7QAa}j@Wt=BL7Od=Sb5zg^*RC*$g4)BD}#6 za-&%yQ%jQ+zj=~WYHF;bAko znzzXwg17&K_g1wxrnNoYxG7LQx-TBXvEom+g;cOltb1+ixFLa#EmNI@V1sLv-j>%c3pR!=XusH+=>FDLimQ2pQtu!TYjSM7~u|0{78#}m@ zkPH)gGQH^4nSnK=rL0+zBi!j;PF!FmQNVPHAV2{P6p6q}A_p~yAiy0IP=Wvv1QA3K z^&q-Uw-`(iAc#mrJfH7(?{n|DrP9NW1M|n&OLgx(kNx<)_V2ZSJKLR7T}unO^mIl* z3QkBevMZEz`r7=jAoFOY&~)mx!fqG2UpNjqa!tlWnzA6o)WceJjTJ*alIyYs^%|MLLhQ?zNo3=EplCyowqKkAO*wJ7Kdn8t63#Jb2a8`zs$DgvcWQXJ7?zKH zIDr9%j#kdw&aH9cXnm?aEJo^~-3`HqQ%Cih79W1I(J48>g#5V_=zZFXs^cWVn`MGt zxVb8pi8IIzsVp_QE~}5k*Ov)0{Ij6GZF`pL+trk9Z?%#7=BPofJgRQl$o+4hECgf? zRBP}Vt-(;Ws8&hRLl!1iJ2=uIT64K)jFQ@pEWA&FLzqXldof=#SRj{p- z$9=8GCDXg9!y*n*@Fy86hZlu8hUp8^H9kXqrr1&opTyWTVtBmuu}3PBOKE zY2q|8gsZ1%D}w0BVHt0~wnA#BH2y_7vzhH|hLdC}9(#!99aZ;T<#brIiz#v!ktn ztU5?o*4-%z;f?+;DZGhw(IIC?uI}PP?Lk|MN7>$-t=*c5jr4VTYpl7TN-mD&YNf zdKn6@`&_^_oXyzv82i)?M(s~zIjhgTA~~4s_H7=Tb@;t-&^DQzn>lrjKH*U`jco1Mj!iqwRnx{T2nsEMZ|vlY@e*cEA+p zwDojmk+G&7oHaQ><^1ypUHK7ZB0r1&xc3WYCNI zz_D*kLHccz8kNG}Mq-e)VA^4SdBCfbjvB`2?KLD}Siu+YigY&mI^b5|i&0A^sG(EL z!(yE9Zg}JW5W%o%Av7#hb|H;bMy0I6x*KT;l=iu$?gqaO{izGdJ@1sGv)K&C<4o}# znmWgrLms_3c4urM|3jFB%oe4#$^9IB?q)sILh`zy4V%A1^17&P0%1fvz=-0=Omy-; z)?<@oRb{UwV-uOx+vmo~Z0S|QNj+s63O!{*7$Dw;{Ydc6>ImToYO2XZ7?d;MayFao z0}I1{#9NJ@tY=N^0Fu>9UJLD^?ev^ks~t{sQk_Zf*P5clV%d`71Z$7Ik8$c;b@s@6 z3!gz0(#5|_s|`axjB)x-;%Rjr{QvjXoZ%pasDJR4$N13_BmC&|dZ+-Wid{b2gQkhBjoCa|D@IGEo|U)jUL;eFMe zsvn$@NJG}mm@L9s)6yvd^+@pAJa+6Rs*QwUl-&mPTo_hBk{%+p(UO5xOKPKZ5h22k zJ~b3{K}A_VYwD4Vw72*Q)zRuIYMh3TtgTf?Y4i6fO--Y_+IAM()>3te9gd>*PV$>e zj^Au6AdtS`z?Q&nBZ);J{AR_+Q+5vngZ101lb=M{cF9TnbscI7-S5RfhJWLM&rWGt z!>CWR-`Yd-AZR#^7ebhbI;&&b+E5j>?@lD-tsv}`n|5p#1>}I(n~Y*FXE0;v-p2bP ztoc62Anm1;1^}MevyN1_AZPi~2}<`+J~{&;&EK=L`x8Zr*l2ec5(R#QL>&qxBnn+Q zBsx7_#gbJlb`cUF>Kc}(4_>I8*XGBM~q{^oRZ#r6Z z1b3x-jXf*Khq{IFS*II~KOW zWlWSeNEVC;J%_a6Z;&zWcTL!pc4iGAT&@T%z#JNLnPN$tyChvv2QH!V1V>79*BOYj zUMqC*jR2BkJKPcbmr2+(JqzVN<9E{!s>{0JV?1G zxNFk!Ae}Lw)eislU)W&fzz>_C5D(U|vL%PL@6hjtTN*ieGm4QL+R}I}Z+yzCh2v2L zQ#xTEj@G~6>Ak7AV9Qr&jYznlYJ9zUkIkE%>6h?zsBW+c$AV&U^|jTFVIJ@ly)5-a zLH`aIbKQ&<{C4(4gr1}!OvS)ni>e3R$wsHjHsn30uIMQmp`gw zD}vvO2x+@6$yg`vYEj}*$mlP%176Y;D`-Neozb9$w6-gU)|_n^x5=E*R!cQmkgXMSbvXIL3u9f9Fs3!ZDS6DiSE_H-nTxx?TLOv9K)gXFj(_BIH zcIO9F%l58`{)U3DQh1uacFb`T{ZUN}qK`7tgNy@aXmOB|(H3G1(sTR=*u|^0BcPJ~ z9?NZ$VMYx$5Sr~sP#R{a7UV()#-}Tf3kmW9Si5c%0dm@7Q@>d7>ae&vBoY_WE+}16 zXUPmVSXlMC!_)|I{I9E%4lHneWP*A;A)TqUP zDrnK~+%?p^a1gnjY>nx_LZK4FUbPM|^_Grx-xJidJ=Q`NhiA&5WiqHGU|by6V5Ngu zlZ>L=02Ju{+CmJUm1`-?CHrv9i;03FTQ_RNtcfs0e;refahY7Zs`;XDhMwkJ}DXooSNuoAH{HInUvA`%#V9Xc8ZBDs*xc)3bA>Oq~?QxbYWEZqj$g6LJPr>_-^#HUe-6brE2NqK@1_$yMp z2;W{==jm$##Quk<;9fUMIG&QXD9T4ab2=LIXg`nUD(%GpLRnhSLp3x3kd-+emgcEF zK{Wt9-uSY$WP98rlk=Pbi4BD|ZS2<7cDt_Tz`$Z%2MJ@DJ^>g4i*Oo?NE{SC*$r}A zi6}&T1bd>-k+SR@gFc~S&rjiF;5nh?ARgxFuhLVC z!>5w?)B!zpo1R+KQ=6|V)f>xRO@>ItnhYhNM+xFfuoh%-IEg?K*us{5(U;66OA+|h z0fDp6Ap@L&X+4&h*2*FNaQ2aNiKTSw;(FZQl3(@c`E91+zLt+NW?wUcr@h* z;p8jV{jUm$_{!nB|7;be;elp`o;YF|1Zg@!4`?9BQtc`TT2?{IEYeVs#HCu0y?r7; zJ=pCdPZujU42qaHD!T!RVlWm(y)kbD?yeCi49U{Dt%or9HVK4jXtN2}~(EK$|&yVqYek_`vAJ;uU<~cS$?t6Y* z_xzZrT`7?awdh*C6kIfw@%eXw09>TC@XGn043!BT91&JIctMoPs*eh!k~Sb{?%ho z4D;Q+R{~SZu@TV51K9e^cwQ8vg^?A>hdIf?c6nX3j)Ro+_2fE^_N#LLQbb&5j-y=G zxSW@nq3yU$mw?71cS9f^Niqwc;s@+-QS4F)NIKDgQkpE4-Kvzsu$rc*U7PA3h`SAZ z4bF{P3txTzhOlCyl+yBYAgiM$-m(`ukDN>XB**U|%c*oub|~G~keEyU%gSEAzyc;6 zv%lKNIhJ@hgvvsXBSS)rAUv^TYXCx5^%x<=YM(%T9Kf?>C(dt6imy?2h@62D6j~XC z>-P3wr-YXej9w-~$H3W=l4yfe5l@DK;~QJ}$&*JhGnsn~$GD{9{`4y+>cvQ)@+mUV)cI4J&nM+J@*qJ;&! zw2Ln^EEV#-mN|UUr)|liqA@bLYsct$FmYW=MXry{gvx45MaR6`+LUC&Qjx?{yBcI1 zu&oxCv6jZ}L1L+Vffp4_3zo`03RQ6{cz=SWf;BXZ28?__na*RRABxh4qXU+UKfy;= z-Ia1hZhp+C(r)g+IUfvcQP)vGbcX_H@bScsC+-F1j}E@`bz1WJ(IIS{C;(qVpZ)&(*HC8^=9jytw>j#eIkbeMz{NdYv=Oe;nKg z+h-UF8fA%g5gPVt9{- z7A-9yG$lchnYd_Tqi{oq-Mz@*_uU*qH4T}e%%e(RO%_6heuPFk6m<8AcGaB1FoFp%N$ph^peSU! z9^`VilRFRUTgtIceB)TShu@56T$)J0Gxb-E7OC{jY$pT8D(zp$?37y_~= zLqVIWw}1AksaMP-69PbH8lRICpe%d1l>vH7_IOjk>dwu&{My{tZVjdSGVkd z1E*g>EA3s?FQf9E)o_)$m_{>Hcy`1f~NJWmL)| zAmhk7kg|{jho#XhI3VPkM@P3%Cky1T2OqZrge;_Q7T{%q;KC*wWCK&MI)2_nRL>Wc zv)F3`YiZ$!7t$>5PY3wZN*QWS9Fv2Kb7;7^VC(D$XVh$j!&j8w43BJpH=MHPLU!@i zt--RnMY9w$t(22cWM^wqv|xSjB9#b`sv-)k3tz1a z?)UB}+K<&2S`0)Bii*Fd#Bl(GsBy`wjiXXA2uFi4?&?>oO)LT3YSaCsPH)18d9yuu zzp?^si)Xw~FT4tbp!Byhx#(I>{Vp>(zlv990EYI#jb07zW?4mgwHIFv1;6T7=U2y9 z>r9}q1Y3R;ob5(mbtaI4!OZsNAge$NP~^~PDt=iRh>q54hJy%egJXZAg0%TbOcrzkQ&4IdZs(F+VR(lV@qg3x|Pp@xWF5WZAl@}~WqBn&` z)`X|M8XYa5eoS3cRhjpq$cW+le}Qt7ek-1?^9_g1bz+wf0^ zO#i=w{n@;6lZggk4rXcRY@$OZcur>VEYD5EGld@`mldv)>wcbCnfc*qA95_vU@Etx zSAaEAHEBoOB%+eP@(d$tv*%!sn2w(Jp-bJB#6X`85dAj*#)68m56IVve)-HnU;X|_ zufHK@w&jNVy|GAh)INegof*sqXV2n?-&;sFD87vjSkA((e9F$2x0>>to-G9!oxoZ* zfS21W7O5tTSSKToMk{-_9xa7yc9m~nwC!*r7T?ld6zOVmDcm=5CCDwo{nN}|E*nK8 zs@o&D;WyBNn=+itd;#bSOB3;A=8HgI%56x55`zZOI1`d-k-jWv1>3UVKG(idmr4qi zAh7cmGqY3t4D-Zcqv9fZAPEx!qqp=Y1L#q8M?gUKQj9RZ<$_mo__9Qenq`%c=iq{t zEjVLKx>t?sv0>v2Y8*XrqCQ}_6OYk0Dx_nphSaZvrn%%0^F#+#-7>&oDuyhD`Byu- zPZ?Pd2cdG!NJFHK#tZz$ktPpEjWm25yUOn{szk;RhR9S_2k2T+=hB#O)L#raxtN?* z!r^olsfeV?vh6J?d##YZ!Jj%S=G6`2dG9UyxJ+YHoFUuR8L~{vhHVhdLLc}C!QW$~ zv6-?xL%DVs@)m=@Sw4E!qG#VNgW2Mby&#c@?iA{rt={wjFpIewV`yVBkn5 z?p992V>~Kuf&*dgnf|O`rR!xo@ZPC03WLRXHPGd0%}lTk?=t99w}kgynN(scR&#jM@XfI-93n#n1+nW+*fmXk!)Lo=kNh7>sQlxF-SN(4ByC81qt-JMnPa()NpVv(%9r1<^7abem( z_8{s7T*3gq6{e2KqhV6Q$rM9NA1XdQUE!Fu*WB~%v7P?p)LioE{Jxnr(##(YA8+LY z76l+Z-0Yk*oxJ=(HLA7~bc zeX95!N(9p0S|R@u*ltx%QP%ViKF@j1Q^k+k!E!lHPJ~0MEPCw;!IjUG0^&)lTIaA9 zM&aX|xULM^DRih${5bXS$NgPc2|_gP>xC2TehXg*_@K2|?^95h{G z#CmGanmaOWg;ej>u9aWwv@6+I#UJ?1v+Lc7nx7KYlwDREG|lzJACBrfu9-#^>-3b; zB2lj-RneTC%(!goS{D~|J-BRafl8aVIvbtNY$a(6oJgXgPLGjZA*ZwKBuz@>CmndC>>VrTvBJI=86vwVxSh|h^!}f~*um{gS-qhPX2jn& zMlVXLezn6CE?{bsq2E`AwI6yLWjg6NDP2|@XF z5yZ3CMUX}y1Tkw6)TLLU1@u@?h543AbBJNh=FCj3^qd zoAr_j_gw`MTnAMQUI>y4D|`d9kV4Wx$&b#|)~+vL#|CgS=|J96(LI zeCG`KEO(I-Nqac=n)F%ktgu%%#NPm>(_6xF1-P|lIEpQ@z#qt#*LRCftzchStf)z; zPRR=?moh19QNlh8r?EA+Q7l`-jNPw`ch>}tsQ8#%7xb6Fs>>Gg_Mu^~6X$-+qDbW4QavY6n1oEo2W5q-| zkxWeRS4~Hl!|q=a%q1Io1z7^XT*BMAj8S+?-@8$W>t&_6?gEdk;L(@xK8nTROj3Pb z3Gs#J0pi+9Is^xC;4cXrI9341_b&?$B-jmuLt&(6503ADuHazRi1a6DSYF~tA1!HQ z@2=tP+IILW5nHZ%ba?&#KDw5O=GUUr}QxR(r?n)MYXk*?cyNg zfEijGq$HaMNof2%raVL*ej!AVL|+#4nh_CL8AVF3sWl(ytR|rwPKStyRgHtVU`~WE zaX7ysHm=h66?-xHJL=zQg#z_3y%3uA&H-&N2w7cmM9o7ln!ZOlq>xnhK%>+WOZfpN zPIB=oFw@B3UK5t(;vburtODt@Oy!i%ir@TQvW4UBk*J+tvr%@Yw z!EcSg3wDGW4%&o{KG2rE>V#SkNm!V9&ZDaEA$iDy-vbrA zHlf4naL1<*bqR+4-!g8E=c)Cm3c5jh>%7ug3D}wA`0bQp=N}^+#cGP~os}lcLzEQq z?|piS`_6?$(9r%SyrBakQ{5-tHYCC1L6+>z<>~^- zx+Oio7Bf1GZ*8BB9m@uO*6fB)UGh_o%JRg;wSDG6((zjI-ID$){&9X!zsR>)^FSGr zA=1MY)L)}}Wj7q$Am=G=Yei;z#Rug>pH@m}{Jv&8PZEROLB?Ey%bqbj%@jRR`4^ci zZe>p7=GtubG|&{AU($l&n`h;4pO&kh+n3f4a9HSpkKyN;UstZ-i(HWca-SMaHm9nz z4+9g9UDrp4>u(fkwn|z)trP+R&ZqdS%`F$r;$RqUa}iN__rd~Fa8tQSCJ=oQIVk>< z-7S0NFSYE)#Xgg5BLm?X#{j>jO72dQ@>{A5C)Qnj!_gMYR&R_;icqelg(b$T4qll3 zb;NNfbw?3q1&GZlO9R5p!vSIDF}A&t_D+e!LI9mGltvx`!w(2{`?%@S7zBnN2VERPETD*7+-i zl@cKxdBf}bo%cdlxKdUQ9=bOvJ|nro&*I+Xn-y~bPC=~clz;#$oJ@M68ztM*+Ug1) z$q%6*palu(7ovP1cb-FYpP(|Neujl_1Kz$F=ygArstZ>CE;3&1LO9}gZ0TIA+oXXD z#ehAxmsVECh{M8qigbr_20H{}9PKJ+8aw6W55rH!V1sW1)KKN*2Mx%xnzx5_W~C~8 zL^9UX*Z9}XElGiZGRcKuv9E)VDR&rg5Cmf&c z6+h&%jRKbwN}{zRM$YUB$?OTS5D)%p@$679Q);e|18djTj^#UQmx&z5pnI<6JQ$$w z33?UePpk_n*0FW6x13x~>YO|wd9*H-TR*fePY`26JfPAOd(%0ydFd6HC^V3oMU<)9 za2bA()Pjz_5st}fEk6sMj7&ZaJEAiAIT#vF!l6qlYr3SkZco5(uh>49SiWVIXlqOI zTPq~@GHvXEk@XFCcNYmLH(H=V2)p> z0OuS?Fc;$iEIy>2Y1Sm_2$lwlJ08E)Z?KMRN%yHW08P!=y2iT?>X0ubPVap>^ie1Z zsba%nhn9*Tv6gdC?MPkDy|sRx|1P}n(bq6G*C)`TRUL!KYDZOA_>THYbYn>+7S-Y6 z%B=(g(BpMA4RgP&j>ax?jp!J2xtV1brrwgC&>&KzzSq~|J|9Rs;g_De8&2*#Qb%p; zT0sOA42w!}gV8jn1YFnaU=T!g{(Buq(8CtfXS~pl@9CD9UNx7bg1%(bF&SrY(328B z_(_jQG9h6O>yrl)rT<<2u7@8KD<9QEA#qxP95hU1R7mLaLF@opgStS!gmj=1wEnab zy%4x5KE;TMrob!0h&n$Z{YR*00B5ys)5jUT$Usi(Hvu*>hD1a+_tlKjH>e@wzLjGY z`sN(9(4+uDwWgD+SR*IVn@NLU=lc!!N10&A3>o3($qw{BR64 zmI=vQ7~A(0$FMj)1|Rm0uI!Z+q~qsg3{+nmDfnY!0LI7+;1ErMP?6OruJ z+DW&eorGgXm6Rb~xy16s7^9A`$2!8Mp^l)mANhx$D(EbIc{vlaHWlV|;E%tDVVNZ6 zF%r%75-s}LEm|VTdPu?t9q**#b^NYN#w&p9txFI*X;26fOFf5_`GsdTreYnSQPw^y zxh}FX1lv)8ErE<7ZqgBo5TiZ}k6WLcDyJU+5}c3;peTzbdIv)2ZLHRGad3PVEHxn; z^N?l%KN!J`a;R-Ja0pzOqXW>0q$lGXACqsH-)EVRQ+xGU+w06zyAphAp6FfCVjJak zfi-oT$q$ZT=M=V}N2&yb)#P~xK(*P>GK9!(@gsE0c8R!07-mMUZW^sixQVqcfy4*; zo6HhWp*GQQei*{{%7n&H+C|G+r}$A^I6GU9h~Q_lX@|lEMLe>MppqJU?a)JYNCAq^84e>r@PXhB+czcb@%`s%`UpFpc)UBvW#@hH_dfW0|GuAu zPcMyfI4dZT?_viXI~lXNL~E`_qidy=l`QG8mgv%o>MM#*Z|tq{uQFrX$TQFU_K$BX ze&E*LOX=*NBvrX%(4upc9TiB!rAf`Ot-1W~$aOJ7cYE>EGc*Q3*ULTFLNVEt^xl=G zx!x~!46;p0woRi>2CeFjSJmsSK*Pm(eGCx65ki1LszkMB$BIsxrD>Mflc3k{$cWS_ z%YkL7*gXl1zy@1od~2Fpn3uZsD7UzQ!okS%-kDpn?jz~j7|cRv$Dqr#uKLDwM=_b^ z2Ok8VDwG~3B>Nr9h1%9ipI*<%yPE1`t*|P~gl&CKuxTcwahHN5!yp93)G1JPcWN@B zUBnc-bf@mfW_g7VT&gHfsQXj>6?~mlvme5s4~m6en9>UzGkY7iZ{tapgOZ=OOGR1w zKabo#9N1vxiphLZ|7B?lLAg@-hb?hHyUz8R47pw&KU01&@85~?QoT4PTuq`ZekN7r zo$i+Oo!xydwag&?m|75o+1VG=fd}v&Ye3|7@LlqP1>yGSRSRwNy+N9}oy=;9Dh^XR}uV zAQ-k=o#P?h|6wkxqc`xu{ny=*?F!#skB!ouOTHa5<&ELWYpaJD+k*#N*!GGt=6x|q4& zP!fUVAfAxKyO*-Tgx_V8Sxc^jZgn|4ESqE4#eClELXV-NXLJ!pCQjO&hq*Oh+q2AT zyYqfaqCdPTsGM(K`R3E@W`|htba=kan+TAa`>IRJym)z;7ccRmW;Wj$btIP*{wSq= z0*2k4L2fYQ0s}A;mRQh`cAY_5yt{{?s`bRrn8SDmDWWAa!C}Np?%_gXM0F2OuTu{f z+sz($c-X^*_VPXO%yxiWysEu|$V~QasWZEG-|y?q$a|)^jlr zT(_hPtwAR9GKy4BBY^Rv+Ag4sZe+>mt?%+%`*>?aC3=aI4O$RxU_>4l;x(tx)~_kQ z@q$U@a;r%q;$tRh&NhWH5Zby3hNFy716`I-WtRm6j7Zo*WFBg)`}rw1x0|de0gHy_ zAxQj}OhVZ!QU2Y4V2ePuar7r#MEI3nB4DZA%I9x;jR zBbzo#x5c)kn?=ka-@XbMj*M}No@YCQrD_8Vhvef3jUDyK1dP$BpH%i2Lp|nu9`!y! zi`Z8^>f>Xh_OMRWqdqno^$Cw!0;oEDtx=yH8?~OFOU^7E^=Xfqn}Pnil?EjO_)F7a zkSSgt#%}~su*?mMgqu*qMfo&-z#PC5rB66wKcCe()98NxrGkuKgQSNX@*=N3&k68nQ^j#7i@#cjy{0dJEr*!C;4hlu z0c`K{Jg7lGr5Df@FX;1fTEWh^$aZe1=3A94623l&Wz*Zmj=%Yb(FUH@gBMu|nVbJU zHC%u%jV-{Mm6--XeOWKfkJdIzPtLT+nk%+mz^x<>4orgg?J=#_3%rQ%RZM_ScpL8k+Y!?LDih)sc7 zLIK(GiwDm@V~ynr3@+6xF-Rf;BuByAd3`oizNh1^c1W~aJ(blepNWCT7X=t0dLkfN z#mS9mqC^4F6J___cVj@kX=`>s4};{x`V2@`5^zMa@%zHc$hIPqPh9Z=_2q|1KE{hD_2Mb_a}p#A0YfC8x*C!Vh=)bBKCoS4&EEAc^oLuRv8?-Mm&lUzA8Ag$yGg& z_1Ew^i|KJS24No6z{r;ofpWzIjDuxZaexe!0cvK4Gh!Kh9?h^eWK~bT*9Z}fWYYibukD8RmtEx0JGBn{j^i}aHW-#{e zKnPcvDd5FUkq|tW(qc_!I-0-}s_UZL>;d!6U3^1CVu=CcDDw6qTg)OI79;*m6n|>9 zcq4cO9Gn={m&`5&ahJ`VkV_MgZwe2{FxJ}`40^gmekTV&mM-jGk9pw?YGGbw4smB2itp46#r$W zFTd|9sBSQ2zsJ`SwGO>v#IsJ}wPB0jV5)lCKOd~7hEmv8HyX&)6$4o^1j%J}L>KPh zMLQIlCNitdLMD8p-H8j}U+g>dby({WKgPrcXhj%VFx2qlnRUc8HO#zMoM;I?`iy5+ z09`(Me6Tmk7d|6FC7C+OX7MM!Sp*?BM(h9-Q|gAt#SRLX-wUkC0%E{ldH-Fwqdajj zFG2thMquZy`JLQ2fh{kxkw?RHC;x^LN1&QlLN&SXQrth7V&7Pki)pq7`Evs?lqOg* z-hUZJ_`1>1UNbg>|CRf4U4((+uo8o?R^WjMBmH(pDu%-+!9zgf^@Azx@@Eon3b!Q6 z-Y{V2?pt_lHyTUimAE=RhF^;oTHfh^`mrZ92dk;wEa3~7Oh~YrKt&zu4%b8RQ5`Nx zb^&yO_tee$25Sn5H#JlFzPc3Vg2y8GQdr@R&qN7huZY^^;%~z2C>F4nb+6gm>E4^EN8~{k875Ywt zzH@HUsxvggjQK6CNlsYadm2F}-$BhXX5jfuXN+m{PL6}|k{{fyMV0i)poSkN^4e-j+=Gu0(jGg?8!Hk!FCtOAT4Xw_>16Hf5IgAT+Hr!u)9X?6j<#zhC`lk!f$2x-Ut7o$ zDh$>t{>N;awu#<8$e^Z z28L2bzbhjn^>c(Qq`fdA4?#N3G7U|KhPrh_2&X9)V}fw@vND_;;^@-{#6;b8K3>ca zl%08(-hHdT*EA@IV1?#}0~ z;{845BZYa*%dxvoAt%ri zL{2*vLfEJ@w#q?IHle8uLaLMvbPAD3QUq0z(ggJM7h2Sx>EfMaD{M*M z$N-_J+3XE0hMN>Zvs~3+Ix*9!x{!!H+P#gi2lE4TSkXKYU=Onur>1&e$g_3;I~kZRQfDvCEzO}<)rSSzwDhnT(NB@IfZ zq-@k8W`Hc2sE@@$QM)U#qf8!^ zj+9vq4Y|R@Q&=fcs$Z@pf0+@?a)+2iE)7uU9JY$%$eWWd|f(h9k#jg)<1Fm*3s?=sMTW!HwjjaF64pd z{%cytu8u*fU>{^<)v-_+0=yMxyKowGu!ms%O5NgL4J1^`6rkEmL$iLe_bp~^wU&EO+Ops#^a5g@ zm)(-P=_BK4te=7t`4jjVrr+A3N5x5EfL`woTCE=Gr%?_h?ASDWCn*G~@xTa{H({EJzm1PMq zq7X{p@z^@_5Me^ciV+iTzF8R%ho6D&Ng`pspYjOc+>!;{NmsEJ6#^bJ30 zFUD1{KHerd3Rk6%@h8__<4MCYQWEl4PYO$c=32eq|No&Cxn)EtL;M?BSHaadT*0f-8+xK4e_j?<8Sog`?b?M+V);8E)`_bsIh~CWVOIsFW(X!0zD#zP+86yIbQ>G^jAV**GXI^9)M! zn8m5j$L%=InXWssHcB9ob!TXtr)54ih{D@>)dlS<_RH@Sow5Ge z`&1Xwu-<*DRcGVS6<$_cd@V+x#g6H{$i!CdrXHPum73a|h9<<|YA_wOiyvfB9xr4S zmk|r9tonKeuIxh-0;Yqt>AB>U)v6sUR;KNCJBMHRugz+r<)+!=l(f6CD+KUb)gMVg zo=eDPgK(j%n|$^pIBdSu5*7WhZd8hg)0H|8vB-i4t8rls64fVcra~ zUUmgcdBkV}J>Z(Y-%L$O6oa=txZ~A#C7Jg#iATe@G^&p!Zyw}V3=I3&1KY*GzwJR9L{f^|NZ-Of@NoKB4oxc{p30hO z!&%q@-%+jKQ9bfz2%zlPgZaP)43A(|0Dg4Zyf&*)F_|(ApU#km)t+3oF&gTriD3_@ zdJzI9bY^}M#8)TLuu#xwR_Ph9mbh*SEGKmH#MsSO^k@tX97q8(&k~L!=882$K^z7q zXeqI$50hYzVfQgFzU**Xrk)d;p^|4yx8yFvnLZ^j zJ(-SDZQO4r_0UxJQJibNUNw&(U?!yB-j?hzB44J(77_BREG;@As zZ19fk8+CUsS?@gmZx26S07u=Gd|UW&ld1844?hUzaOZyuKi2w(gh!yU*XS~obVv3& zMi$uogsGl{vf`&awl|J=swI2eL@(=v7vF%)^#W}v9Im}ZlAqSFcmUdaBhWBZqAMyc z6>>bG2e3?uR3&Qx*INBq&*Jm=bcQN1m#hVliHsuzuzbbp7b=zR7$}WH5_rNEAWw9O z4#!)dwfF=}lbppgT544aYaBE*?{4^m02Qy`kJ(VQu;jQ$4u~BMOa=G6YHFlgL7`;_oj{ZVxEZ`%aetrHJ|Rd^UIA1UVq9>7f}`vX z0(m*k6n+CZM*{U-DU|}P%9l{%84waG2nkC;C`a(u4ll^puZa;X^s%No#Ygc68%Ll{1nS?)YIhj6+C-pxJgbnfboKkd3-*D$TWRrU ztwHZ`mU$h_RXw6^agtgUm#d^V=Hfwv`clGzg|JmNGhOYZsgx$~+hQ z9(3rPegreL83dTx(4G^ZgE2|#@}`lzt0A)jQgMaO@M^x+JHvcAK3S1=O z^(+fJzm)KLC$rNg3C;et(d=KWN?o2ZCf0gA++S0(am~y@;Gn6twy&U&Qkt}SNDP1_ z#U|u-W?JxH&1CubCdfi{z{j5vlYq!#AoMFS1IdBq!-vp2VAlzdTemeon9&M%F7qid ze}+%4oF>S}EG~s#XZSV6el*>=#P^kaVgYdc&j-=8OPX0=HKSCY@=e&KFYwdxG#xKW z0nV#)3E%gJ@7tg~@`1DJ9C7=sT7ah7#ed`i^#n*WQUHMCRJ+N-0 z!G$a0;b%WTd=Y^uWN!i77x4=*T9M(bRCK{PQ-1pL-hdFV)7TKrI*wa;yG;flXkXe+Kc zIb@53GQEZZ0XE>r#%xQzIc6Eq%Z&N;Ttq)+E#SUjHzaaY=6MGBe2FtwW~lt9zYJ8a zCFa>vxkhq>R4%k$gUZwAnaV{q*QRo`+v`xdHd3NG9u)#7*b^m93%$oX%`!Wn0!`8t zj;Z3eSt7Tj$3%@@)j|xg&)c)vlN8wvSyl=+K%!R6vBh?f_t=&)yiSU_%mYzmP|i74 z#L9VY<$QW)KPlH)IVU}Yg)_Bs-ntLJU{KCg;mOK5YrmEAmB?ERHndcIA>B}(5cqem zL@>&T^c`s~WM81Is~ah3K&4b?*4DFa`oHYVC9T0q^zbyQ&>dQg4Kpm%jBiq?TxA^8 z)bO{b>KBHnq9#d4N=v*Yu(9p7^0;uikluoKRhn{j&<0f7Dh(6I{ z4#)BH47e`-c*yRUWh;j$a}s2f9Z7Mxj@UkEIdxo;rP8(x=Dll>7M~V^82?EHufW_1 z`7GHQNqtx8Vo$Tp*p5*2FYnXAFNxk)H_BCv8w|d`!7iEcX58>Du8rdNadhsaS1q+j zvQDEhd<5F`Lt~vrV~{8SPiU-%Esc36w}xs$q{Z7wT$@X#Z5j!=GI4}L*)THffEqKy zEO1|B_4u&SkSIH%0bp9wK#x|CEk6TCqd|&@O}MAhehNT@`m8!*F&yoZjX*5q`zpw7 zbL=4$!p|d#p$5_zGzuZz>NlU~xYIzsw)9@mrGRtPQ8q*97kL#Zf#<`I5+d0nbSGL5 zL>asFiwC4hOZgtf;Ehl;`{HMZ=%5i7XLnnc(roMR%tr{kkWUAD^}|lvT~I+*+>?1pRIgZ-;_tPuK15ru#k3= zP3-Tt6>AgLrJvG1p?LjmnNgk98frI(XgZz&S zsb<15OwaFbJkO&6e0=O~J};ry=~ch}UAv3wp5jG`lnQ&&$t3S+Ydbl-9gPyv!7Onw zj6#jftgAR!w>lgi@wPgptRY>i%kTX@yHU&hRoZni69$kLe=Wn*)fWI^{bo-v^`bWu z9TrdK6=aB|mJd>L-IR02uOoT1}gq7_idBVDJOzdF!z;kEv4NSR8gN3T?;DBK0nsnk|51cUDkUf}=vIiAG@+!*yk>DrV+h}-E zpr^6{ee4%D_(6?8q(pH)=vskQ6jtZr2iWV)4tR;-UBvYIt05v*gMP}>xZKOtND|@~ zlNtrMPZbuyrzejWS-W>7EIPt4E4f^MJVq!SpB6qMk(bVThJeOj$dv(QB}lO~kq6Ht zAsg|z%?Qc*tY)O#I%koi@x%#9axx(a;O2zLFh*$_w3VJbnoQ&2=YKNkvz!cw98Bue z9kXVfj>5-G=k62>q=}y`DI4a5AH`J1=G1uOT66M-lEa7{NnVrdWW6YSN#wc#b!X}d zYOMD(vB4bOP)yHcXSP6#BpKp{tSQnt`A7v=i8TBkEXtIG5%D#8iBlo8n2-e%+TU zEo^%PW2|;f(Xl~gQG|;X&B-7?#Nl8N^X6ZOdbsER-;HJmPIWRBPs2B=p3hxAd z@#7(z-KaQ+Qe6veNr@?b`VjSTRGf3!q?_!6B>LCH1AwaW&}e*GWm?AYFr9CMUgeG? zaEaG|*F;X}Ny0-Me zKt#1pPRbUlLMN12RnXqjFGKc8n4?u%1F%ukZB-Bf!9aEp~g>> zRRZsaxPoPdr;5TlVin^j{v5~85#xt|dt?=QYvO2(R~S_gO7IGuHp%O%yrP*XXF|L} z+xjCmh2X=mjf~LhXxKHkI0ap=MRsA3HM{6-*hM1p8thWz1~omb_-C^1T9PSMqAY** zXhBg^Pz-ltXSD8nfteSp$l8cTly)gtui7n+e zv23lEj$NNA)m!6ZfkCgu=n% zm>eCSK$dg!%IY)U8*=mhb1$0+8FMjfUaLiko`g;k1Lb)1HilRmK3mNVT_iNfG!+4k zktIy?IN4l5mqS2aK^Oca3aH}>(U{3@(l2H1VA_$zlPlMW2>Xw#o;#zYx@$TZW-DOQ zF+rW85&((ukXQQ=54rc#IctF^_<@u-JI`*;_NVyz00PkrJ<32y8iE40hMpOL$Ab)^ z?+(2RK8KwTfZi%X%e_$$+zDA3K$C_p^y16;oMY~68%&^Z)XcI)&>b44YwatL9a@GT zyHh3OZRDXw*qzD6U^&Dfqg+l-P-B8ora^?P1MCb{cYryQ(YH;*Kz!q%8xg&?S*VQ~ z5%Q}y5reN17lPW-T-dAabD98NgI)- zB%YwDfG~Np>y`pr%blIeJr;fNpnEeF%<|WR`1TXj`3cqB-ZGRBSHm3LH89uYcO}CR zlji)#pI8=y;@M_m&9(TRXK$GAvE9Y@>`D8H8i!7_f%CO^YKyRq;cIg~HV7tWJmqQ` z&WBz|`2_g0Z`@wZ?ij!^oAcKW?kR8Vh@OV$nOtI*4W6|k<(|TI|?Bf5*dhfj{Tm49`+7}ll z!y3mZx*CP@CEXUTTM#bSZ7&S6)Cxocq;6yT&LblNUZP#>jg)#42_Yxda$~f7X|Ty#E6wrTN)%H!d3DI7^;tqR$fw4y{vd&p>>?sNVEaKTT<%M zJNef=~B>t{ARX8b@q*!*s zJxd@w$@2Wtmp)FYFBMUJsR~TFDJbBedOFL{lIq29ue!Y}d4h|v#6z~=q1=@m_UGN{ zhxJJk-#$G&sR>qyw zThaNXRsK*pDf3He6J-J?2Q%I=o#sjjTPZUDF+YLJtfGJ}|2p|LkkcuB=KKsfG@zH& z>xLnOd5*rRmkPew#S6A(bU`c1#~JNj+DlL=hQntfE)6U5|BMBb~` zUh$OqbU!uq@D2iTCW|MLD3q3jMO?Mr5C_9+TfaL1HYz~_C&^~&a%PC=VM~gWqlG&< zuMYWPfl!_B@PUptK=BA}NgDZY6hqq)_;iZ?Dkn}n#C`jR|eiwCDzFKy=jtf?50()8>ji7Xtp-;pT zCu;&apNv3YR>eWwID|GepSB=FK$BOoqS!+e`iBA}`&d_Xyp8Lx1=yz&gc^ky8=4sJ zE7!tMGp;scr2i(!Yi4j>Gm>A8kcvOO4#5q1Y6UQ}Xw;b`5le7V^ z#UYrf8*Di_WXr>0Fcvi0U$%@WhDGY7)%#W8$_ezKy!r%E<+x^!R5@x0SgIV+z?1%^@$luD~?B^%+`Wr7yt|Td3HvK|mgvs*2h(SakCA zN$TPTm6(;(;9{up&YcNY51uAD6DbsA>7<@xqZ82@0XVOobclM`Ga0Qo4^u;njiWf{{u!K7rm4~IWmV%(dcErq;v;#X+(XvGVgdX-$I#y#L`Yr#<*|?9ty3Bj4c(YX; zWDX2c;VYb(nruhT527M*D|&ar(6tm+VuMCr zZLHXfIaF*1i4VZ7(8uOcqmm6fsmtE`q1o(~kT!g{*GL;iUB06{xXVj#T8W+I{Kj5f{ig+e1<}C3sX_;pbCgI_U;(MSe9SP{MstvVWd|{YY!KFE( zQ&z*l0_rnha-wJG`82u0QAPKXGspq6oep!Phgml_9$p((B|p}~^&Ip78iqMP zUs^r4TEZ47U9fs?hnI9OFP8vaQAVXDOh`%RE|nw7>?PUypr~KaV|MpjX@kQ= zCk7iENx!DW@eyRw+}_P!xMSnvgBSEC`5KNUh}ChLtN*^>Ted=_1E?1iujRa{uGeO_ zJXirWwTnL_T4AA+ReKQ=!~^}8ty1sDpXmyOD#yda1)xA?LmEGXJCQjxJ3Ll*D2{2t5#<#Ubszk}9X z3Q;5AzUGhM!Sy?rd^kh44;=CWe{(_%zvaIr!{>nw!j!f}An4grg;e!!ZmHG4qSla_ z;Hy~!{*gLv&XynnnAF&F(h7fNl6hz!GHQ-oL8%On!F(i>V2H1y(vrjG7iGpp{)yY( z&%q?s1(d|BIydE-lg#)ca=>;LXR1sgIGu4XF(RBU&;wUkCM8n*eWAzq6MPqG7l-8j zfeEST+JmOW7%n-9pbMMS=7l7Ye^$-&ccV5|(d)sU@;*=s{n%jR-QJABl1j-* zy;>t5^A2frH6W4_V;~AOyhJfBNzd_Zv7ijec50%!q%cSR&=dduaW2{M5P^55&AVq= zpqQJMw;+7320VuIC4=vYFA01Q-3^0Jw6Zx{>`AQ2y}nn#h2G8ys~fl0*fpjj*1>PY zG&nDwaCYt)!k&_X=cHR{`x&h~-Ih3&98j(f7oKWcwbli7dRCR7x#z;2>8N@E4lAv7 zC=RY(fY1&Cn-&^a7m0fXLa^89mk^iRAMJ$&w?D-j#3vX;pAAf;WG4M2-zM3o|4Rgu zrOohly&Oem9QuWJ^)^i`bc_WSGUyI2gBO7=1@VUPBJS$&02x)x$UM2eBNB;5Ps>(W~CZ%oy3< zQlLs_sPO!0790B>$rPg|pHMqx6e62;4A#5DL9$)DSCj^IlEt9xeg}CeyHJz#T{$hR z|Jw(vmBV^-m9p-n{gsTalPjBvRMK8a8CQed$2gTMclK`yz(!Vb>d17pUfCS0(ZGZo z+;sIu6*=m!IK4d+?(Dj zkxvamq-FZnfA=^W{1HdRA8aVsYk0e=x3KHKT)SO^Qo2?>J`yw-eDy^Htm*=wmrAq6 zY}b?lAEBureIM@M%2d#tUtuSM=7M1iwQan2<{jn6x6|*$p76OU{4Q^O^zi`^T4(;;vq9YH3c9+taUN~#%T$M3l`9H2Z;oH$g=Py!Bvax=n|68$p-b>B5?C= z<|G;Ta>XC zq8R?YVngH$pe<*O6dLK0)t;+y#=P!oSv72S1m=L51gO9V*|u_n+P>e%a!qlEZItl0 zY5XQ8rb`g5twmoA^Sy}=-oNVHg=l+A{E@EKuS74fNQ6BEfdv z8W)0X5K6mptrl(Pv~;11i8>(qAp*NhKFH00GUl&7lrcC3XCNDVK>+k=zJN;nv_q8x z%E?!32-2aIZ3BGV6E(FU2qqdZcf~-4ShEnr0IMO0@DhRalb)NJRPISY!WC@*s=$tl z&lv$cD{fX@RlFjgIaewUExJOev%@?W?Py@e6oHPD6GSyRW~C2;Li)$N?8$&C`%tF` z)V5obraiYyHKfn}{(&TZ9H#YF&u4lQk+R058^K-44q}yxmV|5~E14d|Ts5NzGb9>e#=U zQjF8si{bgD>GvGM$hx_leorm=h>fVhz zhxY-=Xi*-MoqXIbpkw|!bhI3oA%%^Kn?XGt?uDGR|0U$4jdIBhd3jQuJfh^V;=_}S z*`D4;#*uG_6e4B?nQhLTtCWYdvA?7q?sG0$bK4CR-TUks*1t}+O|`r3988Qn0ho!S zay1*bL--7RVoA|GKQfqj9OemLfQeCI80kPb#rVL8UB#mS{pHY$mQ#c<-~E zEuq4DC>1KaCm|BfoB%a)^HFd;n6bmeI2YcdS#;=79HqTX;XN#Zy0VQlgk>t%{7IU& zAECE=mx!TN@6MoL$p#lJQo@345jF_oMdJ#%?Kt0XrV7ok^Ek}?;dV`xDNE23Mz za2l1PI9!*P5{UwjuyoNPkQzQyzY72*f>{V?TX_&vE0QUtj7fY&W9?}t1~<}DXS!_f zF${!VEN>D-Cd`QN?DjsH4$~-eeB!bb^jdK=hZqP6P#+F{gp2M@yP6ddm97{RkPK8% zP$E;wZq*qwQKFpTloftx4JXTqg`J@-@Nb@f)n9Xmz11XPu^;ZlIuAKjVV8f43bshFpP>O{dv z{V#o=sK(lWJ^O7elOYp8wYL!Wpy+4JfiFX}!Ux1PpvLoib@$SwdNPM-@UArDIhp(MPzB<%cw(-!uTaSu#o6s`3HHpR52Q}uNvJB9uEp=Y8%OplRF#jKf= z>em+RiQEXC4`F9zoaj7E9_N5S1X!I$(vrh`^m9r-NwT0xr(2msbwwL!30||MHVdDNivZovqB~*kPUpxFqC67OP)6pKmYzWD4b&0;H2-AtfJ$1E9%T>aUC6S2;DN zy5M(C8U|Eik_4Wy-9}l=IqQ= zzoZRl`-s|tSr#;ua%(*#4=)atLFwQ!50Ig9Y}3ymJjjg)9r|3Df}V|5`!dwBu-fm# zv(`w-xbxOOEkL#w=tuO@_s23J04z3z02m${CaMLL2kjix>wt(~wU+@v2Kt$2Afg`i zt)o8{`CXPnS|UfzD~`w6z=Z%wlOS*~HN*xW)qZslOeR#d55HKu_}Pe)fR$52tb}TT z9jB!$rH?J$Bix)=f*UbK6ZEH-13lyLr~o)4Sb^8-XANwCjHYIwlP-eUiQs1^!faOJ z1(WEF`xuS}BH#fwp%q_;6ebUYG%fHBArejHzBV-Y2jqoy%dyG*q!bLeLA z+acW9lhRD~wrilTG%Qmx3%Q+MA$Y$*9grMyxmJcb zO8F9IOf_Kc8#gygRza$5WrYdr@$YbEd! zij-VgFHU93;dViBt4=|Iw%8-(pHY_7L`~D8{L}5g0jJtzbm{CE@^hIx*})Yi-#V5- zD+h5PhwJ6=rr3toPQ-Qi#fy{S#3pH8R~dX9$%4iXsax0(%OPXF%Rb;x?!`au>u-pi z3_HU*#Rj-)!wskaAGdrH5{ZVqIOEBa_K4IX`)oaHCs0?!HqEz59ZK2#MoK)DX}DOc?s z*+qP4OpQD~-eFAD#YqmN8zl#N)2&~=tS9xvWu6!&Mzg@{{XN-ml~c|pPIuN{;2V9w zM2jJC`;Re(po9ZTE{eW9mN}iQ#Z?y8XQ7f{PX_W-?1256=dY z)~frfO!z#)usY5cyz{UYtfmXfxLyz~bAinpVPnUEcwt{8@=b%))P~Y)e9r44pL_8j z>wk53Feh(-&qLT{OhC8?K*QoWIIDWnhs&8)Y-s5f3Je4lkg7a2*42UxSXP@{c}f^G z{{bO@@~Jd>zNyL%Wd#!q086feo$4IMbMbCJb%dw1=D^#f>p|t_I_)9_S$p;ujDRbIIN* zcdOM@yal28V(<^Kqf-f}#qK`nLvbO}0Pj12_J!qF#MJD)N@!=qFyMsJ(!(0BpJ8<( zjABfRU)bfqUow$w=jh4>UmmIzh3Av`%_RzRGIp=OKvFQSTigc}zm9Jac_`&G+ zObpWE&tYZf(Ll%&holZS_Nz36G_qp4Tt!VGfw0>1Nw!Lw;>ufbcT-Y8`zouaApi%c zX^H(1H7)UrT_ok2dcRTJAfQ3)<2#n9fWC;&-pncVDF*Dh9#HIJw0Xx#W$R=lRtHH7 zD6XM}w4{Xq2*#nuW{Te=Bs!NoHNjN*rcAuS8pOF!K+Z3RL+auwpmN1F3ArV-a!3$M zj}@(mi5Fz!fS7&y1TuT6!s4W%IrtlvzX5eec}QP^BZPtM?QSoA=^BR6m+lTBnfW=G zK?ACbNDyl29OyF))TB3ZieQPBouJbo^Q7z`%&azSN&cj1`uz86nm!<5sNr1Ci-F%l zh-5@40V)b`Y>LCLpZo{&UeLdkP*q={XUbVyevbJraDhHs-vvlA0bk#D;f$6Pwj$uQ73OkRYq;Ai+pUy^pi&}%-NR+F+77a)I}5coiFihPwrYoL z+}F1s2aaAxL>2$Z`pqI+v{~Y~NRcv*XhS(2s;?|az}Y-2LCN|yRH&tPNfC(wn$|f}Ar@9hH)loZ_4m7sl9rl&|<6H3dvIXy;fP#My zfmwTpk8!Cw8K=jdN`4(fSbWrzJQe!#Bu{FRK}c&b*WQKK0EQQhAuQr9yvCv-!BL&| z06K9O-f4Ab||bc9(lC3=WqQtJ&#@p*uvU~@Q%LzrnTiB`t=?jNt_ zV2w=G3>HZr^5LM-&|(S4lYl3b6kbF)ofN;FX4?#(ym%)&7OZ2iY^&mR^C2!!5&}Jv zc$<6=sP}p0goAmSv3ueT4W8;yB?)iFF07ghiFRhStG4&QA$Ia$9VqFpRr2F zWblgAHZI}&hU3C{=tT7mnG<^JBq}Cp%KpnqYax&PWY}Qi;WEdG;#j@O=aVT^{8DNKD;JR>&Oq_dH23>N#tv_b8z-Cx-OaR8wO}JKg?qU39^2Vqp2wZS9}56(IMS1M%i!B zAv+#^A=SECt=zeR1{(u@SVQNmzIP0m6Ny(-j}E5F-dkm$^SR_!c(}dvVT1v!#W+dG z(FecE_5&m~b7~=*A0Ag$j2wF0n!GFd>&SV7ZeND#f5hr2l0g89SYn*3Rg?&{%kZ6w3$n|(x(C(bW701X1Z=VEfKVsQy^aNx z6(_i-Z~&0I=U%mk#TCBjbvMN$S^p3WdQ3-v)%(4sMiI-%<4uO$Ox|CAiY5`x`!xsz zKduG#wGh7y1N30yz!_p3EW>87g32Besr9+g`G2{48z8%`>fZBw+^@bLts}c_x9v8* z_aZ`zsF7Y!Y{m8i9mbZ;j0m1Q7v-99O}VD1ic4}$S;o)fdgXa;urQq<-cu3*A}FB& z1qmV>0RlwO3k?VmiNFd7P+|cMD4>80A{dZ~0`vQ?z0bMlcDK6q!80zba-Vb0*=K*O zwf6ejYp)HJ_$^DU@Ov5JLmJs2C|ef<0+FTB_A`bwLJV_nSt=l_+=y1|UylZ8-mw=) z2`(uOQw~y%ly3#XcC%<@c8DXij8Z+{ZnY^vdcQ$hi(U)kUrh1Bu>Z*m!oF0Av&X)~ zfuAZf_T|}Baq{da#LPMi;?|hj1yRc<&qytYbGL`5mV^Bp-1eVwgWK6r3wSJ04?Y7J zqyB&8l_Gt3ony}IKN5$Nmf?9|CF6m5!)6zWg3rQOXS<(sSZ)=?X_*94_f`+CLO7Nk8g(g1U=QizW?oL)*5bF>xHILFtT1`Lx zU|LF^7->A&$@Q+<`feS?t}gTv8>X;mCW{y<@hQv$9}^g)qdvJ6tK-tmluCge`rnS_ zzp{b^AVzMgwNO^b4O+UHtU(k+D^|hopdO=#uQ8d$t7ckXqpXri%E1W)5ZDeJt$oz3 z1Io}?7t^mx8Z>l*aMscg&&cLi*Fv~^F&12e@1{N$T+wa5y34}dhIV~JS- zCaZdixx+TQE*R!la$SJeMLBT=7#_CiDG#sW!o%liOyrA}?~x~#jv14Al(NgTW>8N@ z(dZQVLSL8JArRJ3+h8#kypYeb4#wP&lmbh^`% zdAk`v90>m0Kyb)v5cL@l228R+vxB~ZL0^HNJoqZ>=~1`~I5YoqBY97cBBu2gUA-$* z+M0*GbzzragB^m|0Mb2Q#0&ohaB3+UwEmru1*V}N#UkGcCYp#mJ&78W4MJG_H?Y4v z+#CxCgax@F5DigJr7+c%Lc*h|ZaNq>6rLz&01!u02!d+@Ap*1?=a^TrD{wy=ye7IP zc#qwW1%!5@dG6FXdsoYptK07Vx}{f;&XGK3LtziU)~$V47)dt8M6E~h&B_PF?J;iE zV{Ui*z~z$WARO?zU~iy)!9pTteDA>(aw4!xHEL?q9XbV(}57M!gQcquLoNH%csKuZvAxFi6J1y zONgy7%VoC%^H{K(iO!=#7DUjE@8uK}sJWS5y2h9t+Cg>A22tZ0WbdVGHo!D=@_Q!3 zIBNvnWwsr>i;gyQBzZ;z(|UW1x4KT}Z|#gBA-BMMi<4`j+|9Q?v~AidB@$JfDrIvC zl-iS%g(ti0$wu_fBJbfu^37!-6y8&Y_t-tWhc@6nG6$|kWT=C??Rz4aiqPIuE<(F+ zqQiWEYsP%c5$2eW*}g#vZxgh+>@CY%>{-tfA+yl49qupw15t0LwVY`tPFW50td2*^ zT?F(2jfLxeU44y@()dAsTl|!pKr(ljiTs^o`kbUDn?B?|Z>yJenXLT9{LQC|e3PKJ z@I;GxbxUGVfb}63Fi#nLM}vO>)_+nF+~w5oqFiBtHZwktp~>fLKP!ToWP}C6o&oCp zjLU=iAh({dx4ONH1;U=JXb_ThFi@e@^C?!+nNQX%E@aO(mFsUzhxAaL#{j!n++4wr zV{@2TkBTKs5Q*?`s-jqxthi?Vgq>pUr}ySu7<#n^b0O#d#_lJx2bxF4dUhA<*_)N3 zh>4AxlBv^?9OJA0rgG3v?nZumNj|4m@^RdjwX zuOE16{Ky{^mC-me+6~|>FCkh^Vhxb9OCfnTzq&x-DN#&c2#uQVBMa?LA?4L~mJuH|BCAe{0L%^Dvk3YLU5!mDzeVY77f2PX9y$ zVXX)WQh1`(Bcuh(F$IF3X7Oc$)N*1jPgjr+c8}z!h`CH^AETIy7%WTH{M8bDk(IOJ zY?IIQ#6EJsJqT@kia*M2#>#DxBuE>Mxp3KC6#-;)PKLg^<>YH5+erRT>(X+h<5(iU zdb*b`^u2R>^*(*K&)&UfApc^j%0d<|v=0-lbbwzsTZQhU{rX5dH5wny9agKQ8aVFBS37@+2Fw+fakyHz2H{7t*{YQ~63nte&W zB^Z@||2DL7KlGzj`G%mE{1v}t%Vr-=>%(Kj6_}dsOFbdDI@lf8`xCrZazZUMs%v_m zp5%wj7V!s2O>1JePcYg_)P%%lnftCfu-^IGQsy zdwLUxH(M=TYj_iTp?%T{jpYBmHE8Ix_M1-RS zx~y&JJJT%00_g^964s|MqlO31|Yu&P?fpbT1D8h%eAUx1?SH_B>{cTU;2P&NMU~5*> zfmIi+Zii#RgR&2qOwRKpP+@xz5p*E`A5~YLoyBx+N-J ziz}DQZ5{C?x1ARU6m8-qQ07OT=G%Fd=2A7YNw?j!i`#}$$u=yIaa-&0!(0%@Ly>94 zW5E#U@Y^8ry8J$g{LBpE;tUpOq@%d9I@(j)11}Kb_b^O91F+6Bwx$M>{RUq__x23B zuY|qI)drn+HJEGTReWzS2C)SYt6TwscTlPyf;6bc0o*W)U;7Pax8Vji&@ug)cg$z0a ze6ru*LO?X8t@k+3jP2VA(sWbLvRx#~Oqn5tIe>TYRbAyCq&pxlIaOz|O4*W(!U#oF z=z^v;JgcIuCJ6nKoH5$j+PKLr8nct$O~2o{E*JtNGzm21xCCNN%AycBluB3(~e{ctc(STc!A7Pxrk~z&@(_)lT8~RWdpWb zw+`uZK);f25fv$Akr%HE*D#JZgf2f&l)gQbgtM?3qv;#zCOcN4?p1CZfNobSqp5|H zx*(C{H2{8jIMR(;F(Q%g$AV!yCe?lga@&@Z&m>(E;+RkW4hwFSFR+9+5>8|QXYB)= z@JtVw@WK@|r>{+M6iSj$13Hs2V6B{dR=@1n1jLS}3aF17PnF{Mq456;A7qh#oJD>| z0HfvEOIIZRpAndg)$sX30DJy+i z5AYLE`j2WbPorJZ&9g+lDjFBJ8ZWJr2iQuJ62Fx0GUWYnjqqwvE`i6(+V90Arw3e= z7Ub2Fqk2J>3@iTzmBx2)(mkxLdr=tt$MCrQGSzfiWwE0j$oPDdcIOzk*d+5Vc|X7{Fhw=j4k(iantPlQ|RgE(-#)>lA!YXi&Th692h2|3EAu^8}h3j_sBi z1B>F%n#Kr|3N7Ft|MHKMw4HuLmrD9RF3Hi60>^TD^8xk{Y!(qo0%mXfDVoSQmNx7= zj)mLBI9AzltdGZ0$!Q*wAs&}11p{lTCULai&}qa- z2WoWUv?7-dhdedex>s2qjykr?fB}XGpkhl&5VR)GV#ZiufjIOt!lVZ6FVn1F0v&N_ zqc&D-$w12yRXi=Q3Q|{fB2?(uC2X|dLB3l_N#tKe3kkp>c{?>oxlsr#n*h`WD7GR& z=?kt90nrb7vASyu%mm!{l;znM$S=3rujNEz$LO+3k)^}pnDW;JYq?x~U9bu1qMtA= zh5Fyl1-W_~7eVow=oapv2JUX9AVxEP515)4w1Fx;Sy8V zfFg&z`Lk^?@?ovcp@m+Q{DaV0>!A}o5rrWPHgy^4wmJ=PR&Lt{OeU>?Vw9FZY)z|7 zH7NyUp^X+-^j3HbZFXMXG9lUM0w}KHgB!GhP!rbca>CVwTi2x|L$W zZOw37%W%tx&Wu~Y<8fQ_xb^e@^>1h5chhgX`TvBU|F=1fiDA2S=f+33@XQ;|LVZ4*gpMxH0SwTpJgdNhCQ66^i{NY)`875LOey(Bqg_ z*T!uwj{l5O783GI2s)}_nLGZovN-qkB^oNyjLV8{4X&UQMZpvs0VOAmDHw?0Ppk_8 z1Jsn{5d1+?LMugA0Xdk(Dj=uw(jyuZVejxI7bXaNA#S!DN2~Qu%krGC#PJqE2oEF} zgIVv(aL5vGw`G~r`GwI!z$D5npc?0Y(+_U6sK6m9V`+OsS|-UEkVpx!vGsI21uAa5 zwVfTnG}+2ndJb@=O(q)*#!Z2Ya8O`8b5aX?mS~wb- zhC)q%7{8DVV-s0OQb#&SE%y5;K5bX}#2lrMDJqe#j)&cZni?Xyq(^iosRfCc{8WP* zkJicHhRRZ6WSyQ{(UVj3PL-Ub0Sgoa5X1x3fXje3w}-Z`Vv%5bFcNIp%Li<6f9ohD zD`h}^igPm|PCF2SNKshiV(HnAMcE7OwJpVvuKqC@5^b2m`=hf=iDr99UZgw_ zoK9`By>y&sp_-L7l{4RHvXp!$>5_=V=?zSJ)XIuN-^s6`>4#=6zNriw$@f~6bVi#V zF2ZJO<2uol$1M(E;6EzwNo!o(L-ZBeHC!+>86AM#o`5LNXFNX&eV_?2Ig{B(-9m5c zai^}y?U4tzDoO)mpp%cXoE$+$5Jki%d{HQ-ulKR(Pa&E0K04fDbm*K2m{#alzCwf} z_!vn3#d?94PLzSVB4As~7(doBT!=dwI6&m6_5jj@!!wK5nF%U$h!;RD5+oeOx3Yxb z0K!t+AYNqkQp+?wwnlM^S_VyK>QOD@+O$4`F?dX~qd2V)3v94x*WG}QYe2`;JFWAo z{aWU>WGETMiJJ^0PbR(*|03P;7-15hF!E-+Zl;sVR-5SCEuk$vVGziAOGcamh%gK3 z?Md~P5G!l@1RaKcI!(u*70|CVU->Lye()f7fT0AN*>DPsJ8!TH>t{Z&jA0ztP#JVv z$>jp59kEqa&qkxkigvQvQCoHW-d^brId;p&OJ8PJg%}>*aq8A$x|xW{F77c|R=og` zW3no?w<0#W57?(utfy3&j>bmeD}+l|90Qeb(k$MHyIb##fwC$h2Kq8%nt0d4Oh(}p z?_T0Lw%q02OFbvoWg01H9C}c!HfOCk&>iv7S6JkDFPaf?!~1W#J+hp?d1MW5k@85iI#x7 zRrA}<70*t#Gp@8>IviYmifcO6TxTbE59cl@o9EhAzgOjkUmWd0K^K3{*!~_gJlMXo zxLP$aAJH6E5~m8Q&hZvVlWuIDPCePhCqJ?|Ji&)#(_oYvTcls*I<7?%>j=Y=#eo2H zr~0x-$U80}s{>6_3bk~x2+yT)f4<(&W4V#6%BiN-nn$Ron)xoPj)0K>p3H5-j?s4q z-)Ye!$*;^)S(WCpNOGep)kxb(xx>scOS~%8iiV8u+OT&ERD(8CwrVhf05Yg$o_adJ zQKPmwz-T2p0Xw-YV6i!1G(HJM)zTf_0T7&LoilMJcDVC0oJ8F$=$e!gBAi6sqWWQa z0G;}^I!Z(pvflL)HXsDS)0{;0m@B_D00fyxWjYYeVu4s#wx(q=GjhIgz@9KYiqdnM z?EOYX$hyO1g7DIl!AW}hX3*OW9ul4;)R*u?bdI;WmmA5AkFS21Zd|TmqT^q_1MwmY zeJS+GbI*%9mm3~Q4`^m?fn}NXdAUrYTKfvSIQM@o^E4|b5@;JTKZfYqFhq~{i83(_ zPwUF>gF@mE{ybfg(l$@rw6a0wNTs-j_{G=&=tvmk3e7+f%B+)%)6h;zeWizj%|Yc!;CfgKy%_;PO3WW{nl@?rvtRLmGCf1sSw}Hmt4Kq1=Ew#ips3yIa@a`g~pOz78KGNdTaeGRR zAWc7r2Vu+pyBXCV9hfaRc3bLr_ni|Q$HT>S0#N+rt|-dLPAPOYWUbzcwd@ zJQ~fL`W+SjUXNPtr07G@`fe4^kcoLn6qU=Q64o$aG?lQ_ttAAFUBWLhQQZ$IfQ;oy z?Lg>W+O9oHIE}I%Txy{L{J<=B?r#(nIXV6Y7N>=$JZ#dOnAP^swQ<|G%_pRbf|n8h zfo8~(nRwX&t#DRTLSm&aa%z@-5ilzsE5Wl>+ms{z*+#KSe(cx6MFEP5?G8ESrksGd zk4%kmi{g{|R*}QibdtYAVsZ44xL+;Bld5)C)EVL>jDi!22_Ui9U3J)Xg3egi5JPmM z4qg5j0E|XcI@Xx$-pGKtg}M>eQifu$4E0uVX^$;rc20*vj0Th%5VxPyRA+r>&R#S2 zw^CFJlKt2cWmy8tnSz9gCA|f@_3OB{5abjjmM8iYq;jqx*(wEtky?dJuYo#M z%cae!>Qw1bCLMMz;9!01sVoT-q|n!ErJt6JhN#4nyp_-7s>&s-XQlQf$={>fEe}_b zGDe1=ePcy#posiGcmPMEm5|=$DtBYi2c&W=ZlY~+xdBfQ6oJ2k)h<`~)|r(m4uP{Q z*(1uV^;G<58TeGS3f;X(gciU8Xw&wtUhJ$0Bf#55IC3zz9__H>xl_w*2WV`_a?F(A z(=yF5%rJp2d1>$mNOrtsrgGgwm28~mFIx+6lD}mD-HD!XxQjr|XI#)FwBTAzcVawR z7reR#aoQ7l>D4vJ-}W@%EQ8nKnUFdUbLAHc0tH_OeEM{5?@`75HR>7+6v!JIBvSttWr}V_k(njfckeLK%ZE4u4*(f$y6%Da}zQXNZ@ZYI3 zq-ItU%oH?INtI?ShcBTjbPGP1u-FM(1>bFTp81 zeD>)o-O&F&$W>;*0bTLj?AO)nhAOhjSOSrd+!OvN&kbYPTnN24hXq{*Vn7%GZBpRB z`gI8hMUewgDJdW&Cmq^DdN|~SL{$ggAq?J%O?rO zb+9>eRQ!k_6?D-VE8~z!?68JkFI8?R8L56}26V~@@Z!jH08PTQX3$u$S0UxH5}7~t zec7W7ze-_R3gS35#k#F83i3WICZN>8g4toNak5D>9y}be1$I6%a!V(^gySQ3!C4oK zDphMb&6mO>`i1|RaCU%6|HKcU2#p8J+oH;{$XcFz_Hkn7sD#N5HRJ5%)uZh`L3MF^ zK!OVAXBARw=>DX}vC%}HrqD?oWlmI06hi#Htbz+{!m}uQ*zx#{9M!`|fP-8}#w>^r zm3&hCD5Vd$U&M(@S{+sc2e{Y3HxW71Oy0y+G=**nMF6hSy|^f@225NH7~SvI$1*aY zeg0nT#npaNk2`U-Pc9l0S4UM8aYZC6!{lsxUFnVWjhRY6B(C0M4DnD!To4_vV&Y0+ z$|Te}qqrg)mx-$Zs;JE`t~j8*C$4Z9LBAu^J&(AOchiY0657uvt{BWLam7HrxH4g8 zng#I^nY_Yhd_Dq;VKx#1-(k{|T9V?5b2(!e+e<>6Wz4N7@6i50#rBw^q3^f+z?#gC zAMPfdu1$kXPBOW6l}teq&AZn2P_tKRFWED!7#c0aI6nQR_RILy(m)? z)CwMc_Nmy4gw1>+W~gmiiIKS;mT&-I=Scczj?M+OuFn9xpD^duT@*CU2o31XH=l)~)mwU%HxZ6WEbYeB&kf)|tgw zd$%%M%4WpLHQX66a@uCqm<>le+h=|VB~6wAD*GO+f^CYi(U2twG2VM}TniAhq`5tWvghYyl}2$ne-JwUKA$G5}UU{a#-o$^YS zl@Svko4ZFbB^fC97C$7jIQfEvmpCAi4D4U^r&=n!t588nExB6lvc}4RI?y z)pb&CFZqe&LBcN6Me$9zf#pr|1SooS2MQ_D0l7lPN<}u$6^Km7h8!)*i3HVoTLwHE z8Wa)sKMxJ;SAG21j|S`IuLlhxBIUd#3|dye9sF|hrp<$C*5ssJf!cCd6LxYVST%WEgaH7gJlIOS=^a()eFoJPI*4grhJ?SRkk7ZjfH1#Uug_+lozb zQ7Hq2_2Qpl$I=GpqOs7}xUn}5+8po!Y4t2WT@}NE6-KdP$8KK}EaB$+p(H z6Y*?~;JN1jjhRyQ;)e+^R&|5?+H`&D~Z}(kH%n&ZKKiMSsg6sPv;&gOb=vN=ngFlESE1|&8=06Z)dWQccs$(E4zzy~PPn=u^P;7ikc(pnHfrEij- zE}{_L>b*WvFSNCrV@tQp>YlG6uyK`~e?zq&GVFgia+n*R8P!P%EhBPa3C3~tn)cOO z(2nFg`Xw~9@2@eKHp_|0D29s=G9W{gwr()gEqVXyl9@qPWAu@)rSy+dJE&d8<+AI7 zyWOuOD|jUj1)NxaIE}~!MBZ1VdJR7_e5v3b2{byTzL(Ev=vl8Nej-Ex z2rW5X5p+HPZx!5-4a}OZi>h<8h8h8{%@R<-(E}Oqi z+wHZWC{+Z%mR`QbXj5LuK5bBdCJf%lsO{&|kXmMhOH5f^LNpkqa9?Tu;>0(no@SiB zJQ|T?0cyc_&2v85DSv=KyfmIu3W5O<(r0Rm`4ze0ZrvevDWjPYC`53cnXCN7H5HUx3zd?^GEd7L0cW(Z_KzGN3D+sh7Mm{xccDfPRUSq8BXaqB}JT z_45}=-ypPLh0CHi2wVaEgOG_(9K?2x#d3Ux{iSCd1m`mq2BArhx5;O;Is>So>V68I zsf1GiB1PBBA;&!RD2b`lxx6IkBQE>zWz?+M{C$u>yg8p~nGRCOlA~S4(Q3v(hx_`$z=T_aXAq z9RouLCcU^kCX**M3Np9xECK9_bTuQ zeTmR4eyQp_cMvvNRc_PT67fhei{xphj&X`Ej!d!HWqLY^Gt|s-A36^ylR1ZOEDu)} z8LFmMp-U^&rCH?YHXT~0GS8Vs%0}s%`uXW94}4~;IFPGZ=r<(3M*9tHZz2$M1mHI$ zf^WgEQvF*Hbl+|ciO*JkeRQ!_Aj%+Y5xSQN&BDcR8b_0 zP8CJ79BNa-YzoCVR*V4xJaNpjOak^v`~6Op#vN`NNi&!+joNVsTxuePnh%;jU2z+k zoG^L7BP+sIcWl$Y@|*eJv6r7Obd~pQV^t0K3jSVO1Hg(MG%M)C46ezU!Hk$`(@Q(Z z+_6Assf%{lf_)R@6t3COc&x->sfgg5hQvZdZSs194>VmiK!nMAlA=Ty)3jnk6TNG3E%{2! z$aqF#<@>QX>^tp!RNoLPZo{LME4KsX`RxW>IPObmjMT{R{|T4Ripo(=T#wYeI(3!A zFGAzGKzTiRDc=g4pVVty&Nr*f6-pA2qSkrQ@$PE2%V~4251ZO12RXWfB%#7mn1i_j z2cISV*AP!3o1*5{?&r1+#h6p*qpRkv~JF^ zzs8A$%J9nzZFaBaM|1}Cz$s|x#GkjYPLuL*7gOX-7}!2>B>bvw-Q0L{uxtVZw95rD z@Cz^wyV>%Hi?L!bUj4Go1+o=}OiS{Uqate#%`bU;kf86`h4Q%jc6hMSB|y+SVt zAcIvjADKUM1r*c$R!@cTy9zJXgQjIC(( z9LnlJVe?(R2%Q)zP?(VxS>Yr<1G>`1*$ylkgDMHp<&A@{%Hb$@zX{1Ziftn#!SC(3 z-N*`3mkfPX@wH(?dTo?`jTT0uJJa7wPMLX!@!Er^%3-O~p$wrj`Xckky(%SNFpc=W zqH|T@#JiqfB0xd+gHU4K|AEp*6@YdqAzNQ3?};s#B6hymqP zK${cZCdPDv9T>s?GRIF~R1(c%<$8d)*g|halW&<+i0uk}FSSBdAYZ6=6u(Zs-_g1AAuX*< zz81bFUNCSbUQkJ|x-MW{y*=>a=oT(0SjxK9|Z_y;AEYpD7F)NkW*sL%Asg`G>ZumM_Hmc%kWdCE&HlUK&bif#q~-_)vR5~=4< zd$vutB5@9G^AjUtL@md$T@pySN}4mp+=dw>z&J7PYDKq>qlhI&Xr}hd&D!OIdE{FyZ9d zCrASqCEHfiBN9gYo>?z_kLjE5rCfw2+-vkxqueRyKG)rH?n`@@USD@nqUb9^&CDoK z+JOJKWFxV2?#IO_QB&_-lqiTpC+nTa0c3?riy7^?`;EF*i2OyPn4#h%7(qt=S;z(S zzfuooT735L4f75J zqV4$nR+BA-_SgEk<0P>sV|4dQ`mJEy@{n{6_od_G4;V#z;rCNIpn2PyKJw$ieq6o` zpr(8o^wx(bVR<2iT)qt_v;bQ`A4a|Sxg*iJk!(~{3oy}J=+T(nyM^mc7}F^l=3TQ$&d z!C~@^gzmALW?ctDWv?Ew{2(#*Ua>@8->#9t63eDni9C5dn!mo&58S9CXNR-(OTK5y zjbqN(^nU!~tFMh7V5l&m`?z4*YTZ!bV76|^U*g!3&(ZEoSD^;1K5Rq|ZK6JX3xCL0 z>sIVjU$Bv=XWEJPD4Q`PVIh<=(254Ju5S$%#9FH==fbr|>_uPS^ zHAQ%Wt*zSeCS~R+x`Lt|pkFyqV&_27o@VDj(VcUkz$4csr+n-xtCHvr)gI5q)XLl^ zvD_!d(W+WYr6rdb0Ln0aTiwBAbMBLB|57STog=WvSnT!&E@lp6w#j{xb!nB|IroXW zMwc{(QJg?rRxlAF&s}k&h{?(w19y%1!^Dt6XpST*HFL`@j=Pq6+!bCe=Lx#3q@B2_ zmQd_;NOLxj5@^^dAYuBk53v9KK#>0E{bb(wldBrHI;X`X^K*vR0%IPB1-rb(L1yjn zPLBl^oFB2FOlv)qn6w%Hiri`95Gqz8yOg{x0yIdK4-;@Gio>x7;f4C*ej;MZeQ^b1 zVofU}rk*d(h^gR9%6xG%h{=3$tX3H@;nxt0H-ch`ZvlVYPw)YXq`P*?)0dPPkdBQ0$huau$TAwLX)AZ zAlteZS^!I^7A;Jb&2=2J43#xH(4}*fk3cO>W~R-%HMgkPF(DhqaQg4>dz}%I{d__C z)la#f{~kqUXHB86i=xdPm+>HC(8Yt?gkNH3(P=M?xKEckxr95yORLEj<;V@9)#5~> zT_4z?5PHdW!pyoBXwDvaH%nYx2lHd78>rAycTTQ|1Fk2+X!9kgQ)8e+?W16!@Y0ev zOWQG+VdzLppuk`RvT$1mHE&?_Kckz=!*(}dcEdn$t-eUGHwWp6Jc(_0Kdd}Xe9c#* zcdtuF*LMM(ca+K+1(b3vdYY?ItdPl!U)jDv%TsFP6G+zqI_e1Qz$nnNT*&~2_HlK^ zcohX}H_M-&zIOkw5*uH7#<20F-z;qG{IAi#LnU)9uwJYc57Xzmwri!+idN74FXp-C zO%tl*qI_ChkF> z9x_gwJRves>s|-A=a-U=FjFi+7*U#d2DJ#o7LIqY2xCExEvMDIven^(z<3FY3-u8` zSxTSM3W6o-Zb`dQTCWoE&6HglJ*7BJ6F3_+TxR?sHQFM+Sywt?U4pMD+!2FKZCZGz zNty#C6H>}~28XivX7~;c^+HBP!akk90wWjp(v`29DyB=0zu1qXgoTU2pp*3&jC3Ih zCa(t-gj$PY^Dr2CEw<9fU>K2}p;pmQ(7alnbhH=cXcG?PO0~~O;Kps;5H0?6yP&~Y zj0CmcdVx5@n+ww3DZT@&`w`bZ2gFhKJP`NJ3q@S#S%J9cXg;^x*0_6mxVdXV+(5#G z`Ghr=&nE@QV=HzW$-A-%HJDAP=gg0n!QzCv+T(>Yd?cj%Ce)yxQ1rqk6b>%hXSb=f zX%h;qpA7?Ih%SlX!V^lX0xz<(2Xjp*#H_NE&NHFPxt2IM|AYcsenQcI-663Vk)Kc| z&numvd9mlU4+1o%&I4TVu$BFq&p5i78UE+X^9Xig{%oIf{w#o}KE_Ac^O!%|FLeIE za-Ic@Z$xgHPXzKjmERu!alx!X_WyMX>blIk9vVZJe#vLD}<|eDA(c zEFm_Y1(WZ}g7BIWX1r#BGkziLfXjKr7p5-aYsW0{b>;k+=@VZg=Ow<><^_qbtImL# zXI^|IUVI%CX5efGbS((5LknYRex?0@&7qxh&Y=Y`)hECxdmeLWXND<;Wr~K}OXs4w zBRZZ1bLjHI9GWi7p(6`pXZ|^K#OBa@X3e3?&tVQ>T^VN1O$Ah&7c__9D|2GznV&<0 zeh!`RbLcqN^dt%eDz?-Lo%+IgF(;L163f(HoXB6m4+piA*rAnjPlfy($Fn3i6d#~>=V|ks&0!YE&R+>l{(8>2d)fRH(l>W0d#-br zRy}I6$`YOM%=bw6VU7&w;a`7s3iANJ0KN^9lD1?hq(W zk@@WV-j&VW+#ho3uNX7*!pt}Jdt<(lTGZO?o4c|m&yK4q;Os@sVeQ5Ntuy^0=Q4L& zXKTLMCT=s|dj1e+zOl?JKQUpSw(ZEaPa00-O(M!tJ`0wf6;E;=9;@WhxlyyAlL`c$ z^K?;LhLZE0E|)zkrVA@B=L6}-WZDDsHrOp}BYU_d^ed)6T()cZYg{1zzXAl8u z>o)@d=R@kdXF-~n__H8&N}V&QtE~%y^d-*fxQ%}119HcR2&lN|A)AwH}mSDPbO5M~>ROAc^&CK@OO}b3-~v!^qTyVr0rN@}XH6;lWvZvGwt} zubr#S3&IG}`&lqu7I__l1fGVG1G)ykOHP0m-kKx&#qp_HF+a30uHe; z?D_?kW$E=o%d$0=Wr@VL_|Z!85z#pNN$z)dZtLo_5XLRB7RMLH*Zka$Oblp$d*>8K z3*fBJOGerAm|uG@bbhe{FelEQS?#Z-`Q<|#PPy4+@h2x2#?Jg6?vpl~K00eQ5dn18 zvx!->;B2B7h1sMa2eoOl>3oA6;5l<*=2tkI&N9e>*yJ2L6Z8p88Ft1k?F0K7f-|~p z=Eu%BnK~ND-^{`ehqRtGcEIF$U`K6U5OxNC1wPCLc->iJN5>c1#`sF|7xKjt@nsxO zKg8ySuZ=bfZQLg8oLCrZ^K-n-V2u-(=Hz&ylzF67AIGEYd2qasWtcl(S0_hW%sH=~ zS&lbQ3`*g!q%0^!2h9S0&8R)x_JUG&(`gmjB}`Icr>?x-t}9}3@(U+ z`+j54YFN@4y~56ZU15S#x@uZ+wdmB```Q(%r{Z49fD;FPn*EFVWD`CEpjBsa#D^h2y@^jc2ca<}bkqazne zz8w1n`F?y(@)aLETdqXe^WaJ!zfkh!WIRLJ%r&uRmMd{0g)x)qII?Y*mf@l_7JM^i z9gH0r7vLC3rhXl$p;a&F&sSVLTM)wr5kjcPf^U(6ObPN~8)Lz@4KE~kw*`KNICo@y z(ctWvo5?2@gJotSIfMQ3v2w1$r_9W>%`WkA=IY$x^npvZL}UW8Y~eX$A^u)VHg4uS zLa09-@kq)nPi+U(iZVMF@(6p`eQy5_Y|=;Ahr|=h@}viI&K@}K2$jqe5KP#F-@>!N zigM5L^m-Wq!Mzn_iR`8lG>O@th@FP4fDei0!(b$}#^*KFNpgno`po@>7mbTO$c1Ei?Se*ldGJ*#09>SYwJ*n z6|?fAkJ$bv^hq5T*Lo=lg2*v&r5%Gj{)=Ek%AfvEq_$C4;X~VE#w6WplawmhomRW@ z+IVtdN{|w)ujbz5wbdr;r{|;uIOkbYf_597ySMt&86}*rvH~ z+C&Dn(Q)OwpA$StSs1oUT?agq5)Rt~3DN!xIVX4Y^Jm}wht)?6Vi&X$Ge$^0^A zi*d2f%{fo3tuwE(_4zL;yWl*TfhdUla~Fy#xci**;~7PjAxfE=FldlAE-f_qRxFFe z)#;bN_u5`kiEjFOpJU?lS{Ehhhu@#`;)n0 zlnhV6wCNH?l=6gcE?+dvp|(yty6spN%Wea)4VhFJ)4gj)=f!eJ)WkN;-*9 z=eux6nh_;Rq9g5eks^X;eW7wVwG?$k`NWloNA}Eh%Z#G4{nmT#JK-3 z2VD+p#w`06rCTL(2T06ds}+$qQrc<&P3h$Dcwx01B6zWp#fBbHss9bZPl0NsL4$$_VkQj4Js78JbiGQuZN`=5iP_flN># zG^YfA_}g+&?9dBdzBKVJapY1@^!!vSvc$Sz$i#{RcRYy&5eh(>i`LUw#4as!BqHkoST7I5Ks(>d} zwvO@GaFb5`=q*XF^VuN$P+&pVA|nj8I#8oXm|k)- zN7jRwpj+Mn6;Yy@Rt+AdI4qT+u_N|%(~Ltn0y@X7R9(I{z6CrgTWzdV8_CmDvpl@B zrJP|*?+3Ur=kK8?wcmbid>0Scs^2Ney=3p$er)xObJZtoPmVQ{DN<(Ak)wyRm;RIZ z5UenGo0Zb0=9eN{Q;rc+HKaU)P4V)YKy+HVE@0cN)jH{GW&5o0+pQFNkDjqznifn^ zfE%%YGfwxZ7R!AWC2t`m7)M2iJ?5RLI}Y@M&puu;$;dukddN&vR!}w6QxR5ah;gak ztk*X%C3RB0y<`KT^@@#&HdeO00XFN2$w7K@OpmnFDo&5_H%k7Aa|f4)r}}zggBGkH zb2U04atFZ2L)&Zx{34A`ES|~cPg11Qi8vvr<&|JXp<2|(llmA$vWLeywNqF2go8 z3*t~mHFJcp38kAm?E6b)Qfc%0kob8%UfWxvXdCxlFk)RFHnqzqcvp@!cS?XwHHJcb zl9FofE$MJE%jRy2)}+}x8YL&348p6~R+4B+O1B+O-iCs+gwjjM@|V1wboffkH%^sH znE3KdJ#_PxWLmG>pPv5ZFR$KU37YWRDN`#oNHLgr+r}iVCVzu>tB*7{cu1QD7ZL%8RZRJ=N&APM09X0SY`dx=v?BP)$3iS zS9U8B=Ov~4;=5Z6&`|&6V{c8=CuoV2(h&MS=fGB%zAJfmY$Av5xil?O@l}s1xf|7A z&zbNB2Zub;eP+N2`$2$D%%xuBipv`8dH|5wS=2_d8D%kKf&gjE3DvDkilnYdXN-HJ z-f;*UBiE?6g!){xhkmZEZDt&by-Fv)Ii94?5h&BwJwf)mWp?{VdV1-MTWW-{KrM@) zRGJ)4k$lP5Mj98}Z&VeM9;ScDvnNIxi7PYSOb61E3Jqn21}OB)SM!ikrWXB9I{9x8 zn-2QJtE0M;mXnABCF3kB{}N{lm>CGyF}&rntzSHNAz?@3WeLdhp)Mxrp3GH45{meBQt`?Zx{h{`SeN+-Cla;?dG z{Z$z&5d2cUNqWj8LQzI!DD@03`)6+~@(rof~kv}AFBZo<-?9%9T#HrPG1l~3^ z0kskrQf@E0)^&i6Shr9k=z_A`e|53wYI95?BCZp$WJ`LCxk-DmP*#$LFf!h&zP}B zl`M%H(mTrfNBMKZLvmzJ1PIw@ivY@=D*|Zs!og3t%d-IdtfJ1C7sxBWW^f+(0Ear* zI{C*r(NaGLv{3e3(Zam=$6o3CM#5BFRGl+m;4EkH6UHRGQdKS@BybrZYHH9A5;*IZtp#ajP8%=0=p} z+bK6qISB+aW)K6(6QEAVTUM}UpoY7QWXO&U&BV#m^;CK>&ku4G95XanIMIYW6PP<$mLX8^Jk2ujwnN+=jkhuLvb9DW*lExeLbvb+sh z24{T(r?DOSmIxlO=@~NHC|u+Rbs1_7@mExvor?wOl7e|1#1m^TVB8FJ`Z40zk>^Q$ zd0O9aRWc4Kd7eHen3bYTd|I4l$oox9by4V7OBsdki*8a0j`@vp3f&z!3f&%cnmKVc z*Qnea#5o03i#XZC<0y|!B1U;^At|@vJO}jRC@q)M{ra0YvOB8I0oAeK>!zKbH`%q4 zugxMm1`Bx4j_e@)vnIQiCp)!xL1YJ3&z|fKLEiZK>}1$`l8$37fT!_KKb zvV#TANp_Hr2uybRH11?$v&;qmHLMobaImC$1LHa=F1C#eWUc_=PySG zlCRIH8yOXzr7WfFf+Ud1QYNv<(ywPE0Ii*`EX8+`QNgcImI`0+qt##pCyxmb%=%jL zggezNTu6N_%7dOXNWP(%yFUHKopXM6W(2W+SeW=eL?a6$$Lvcl8cY!Sj$mif5kFUv zk>_jL(TNOE*phAcQ+t2xK$h(7v>hA3(v^8^bWVWE1aiK} z1%jvY_tS@7jrPq^dQCD?;rnJ>j7PPpOB$NoeCD#5@lF=SZ>~huT-M&-hEbg3gFojo90=G4PNjY4o?=F?Im|At|n&@syNJEU%6Mg0G zQ8|l9boJdw39_N`m$O3(^*x|+wMM$p&}cq#>P2X$TeX10JziO@=B20go#9vqk^`~B z985(%?O7m!m;$TMV3>WGe7<1|9&4^tziE_8eU5z`r~+_8tN4JK1satw09(*_K$MN3*y2t%Mj1&r0A)@-o1~we0n-O9Db9Gch+eW^6v=<1J zcMzybL#9^f0&0hn1NoR(7QF}|GUmvcv(%lb~u1<{20 zdb2`MW3kX4*@AYoO7Ur+htj#YWaH2&<@9o*>`=ZLM(#k6_p*pMaGv)x)G0|NGmDv- zIWN#X6kOF3FiTVx{HU`@-z#kD>+PC2#ctiD{^Y&VNf8(v_}QiJIdennVXQK2!q16~ z!9Q6WsvQpfGh;lV0uQM4aFl-3EyFLucOoypD@E(f!2Wg(9C^ETkWne!Ei=ch==oMT zX{{hAU{paSDCov+SAAq-(-I!J>pd(UH8}g2xZ+)LG*J~uTzj3#S}(CATfj(&AtXvj2_8{spW zw~2|dGe|E|hpfHv$mjKlY6cCIf8W`1x z0MUWd;fCmx5RM#4(YE8;L9b2o8|Vw-OwM{)_ke&Uv@kgxnS(Jz(TLm;Pna1mZ@d@TlQma1Mne;v|2O!T5M<$w3JBD;6TX9& zj_W@$a8yE_um1luK zYAntgW6IDv%htUI=_e=(JXqJSaI~HTGz;cUwafH)$%cFRdOUfB9m{SDjdU?g$4{bo zjqi?4^|F3V=Ka#-c7(|E$@_Ic{fbu48ofeUtWS9-ORN2}xYtkjO#QL08j$k-(%E}6 zNd5AcfkLH-_n8ecxW@SY;1xM2cjsN62}+G{{4|mD$^F1odU_8}^S-N_xqfmhn$b2mdl%$+^hzsTd&nQxEw>fhi$WXoj<* z2R^3b>EpmsTrzM?NdK&+_f(A;?``Ab+>w=A8n zBG{}oLZKIzBpiTdQaN)yt7dOzTkf77} zK2pn1V~**Q9qd>Voan(Vpw?+NFu+hj1y45&zcE2_%Ay^zcl*Qn}TJ@83rE{puzn2;T*W}uy9ADa{92VF}I7SO~hMW&F5KKv`{u&gb@YBTwHLm>$= zA__kxi_G*4lPM#bzO%dab~L)PQ{@Za_nHM!Sl|7*sSz}pnvCp+y$GD+k!mxc|1!g` z%u~1GqM_c+Pyys!bmqC&1({i@;bQJ;xRCRMy~v)DMfQwF4*=CZ$Ps8^pxdacQ{Kl_ z%VrcAU5W3Q3B1knvrBC=IXx44OagzNDI8fR%yk8N#=9>0_qaRI1IB;{E^Gp9^F{~a z8adv8-%*OKu)(+Xnr4_OT@G?z->q9g&OsGZVnIeKl*;#@*{<_I@>a+qupt7y!FhJe!18mL&}W)yxyC=Sb@F4 zGK>$()P>x1NVdAp*rH^ju3iikAwWCyH8BOtIN^XVHPSO6{6s8(Lju-?dkQdSGQOh` zHJk>v8*@y32}Kp{Zr!M>?IDpS)PDv{ST{AbXLgd-tdnZYI=MLSj3Spj$9COM}_G>>Y>j4O{8Kf)3Jr;Pl<@9qra^;i#n2-2riJOC2lY5wqND8xaY04tgpFBrC|GUM zN|px|v0!@Wp^_2hHjP|^F>p9R&4=6Fhc?ojfOi>`)d$&pnzW9vPpc0?v&5?Lr49}^ z{nI1Ps^i@G!6*nt^A-^oCf{8wv=Img+8nfX@G!^zL>3#x)rZ7I>Gy*4Uw{erA^a=S zdF3pX>rU}-`)Z!cYn>pn6ufm<>S+8jH-WnDqy@qG1a9PQUL4bV6OB zdq?Qraos~<$8?3_rq!R5d4F&xsy~8qP7Cf1;SSI1BR|cA7Q`rm7x~3f1vB!CR_mYR z5hh}iOs}QLN&;@<^#WBW zDYRCgxzk~KG`d|ZkK%ai5AM2p=u>eh(x<*A=mwL(~R1^XRSQ4B#TybAq6RqR| zPh3IaRc<1py*UKjP-HwR?_B(xJ1&vJOwtL8k*zNA?s$-X*ETz$ykENp0;8uEf+5ha zUaz^$M@!>dCH$Gqs4B@v5n49K(e;E2jn8COCfW@s&Iuf)yUk0nd4{a8sgnr`X>?9l zWfSxn5E?TiHGJ?BCbvwC8pI1?I`Cqe;7Q;BHMs6VRj9=VARVZx$0n)&3yugzD6avW z0|m&Pc_~Q`-Zf99EHTZ{8j;mTR|u_9JGfFh4U>Rc%}LCZvr@4yh1#Q+Rv7Dx!tiLC zb~ExSMq2HOK$Hdb$H}FNi4(JloDA$!Rm5{qRZLJDX08+)Wf0=8 zLkyL-EMqZxNk}h`Mv7<2+Z6dRf@K-Kp!Bdz6eHm!To_QBk7Y=3X*-FsJX~3%WYvHf zfv2R=o5?*i#knG+nH~AL4IiUt`P?R?T64Qpa{jZ$n6>a~o8oaK4Hh*{>S$XGweJV; zP8WO2z-GaJD9!>jHMs>Xj@eB}3Ong@gVs!`Ofnz>=hS0@sk~mIYAm=>+oC!p977nl zAfaSH#&)5g7gh?0>S<876}9`a6ie;h2-mmEFT2+yK0yAzXzIG_!#f9x|htLcDz#$oz98hTbi$k43iCNc zB2kWPAO`1lN{R7PsRjo&nH`!nfobm&X<>SW#^K5u9a&M{^cQ8WJLGCm7-Q)8Kby_V zR)TIvco9?F*{GhQX~v2BGozg4oKXtn#oy9Lp=shhE#f20B&WLfDb0*Uv(57(hLM>K zdNET&4SNGK7!=iLW8FqMaq>nKWls%Zy0oD88^2Sx^heu$CW1)}xkqV^LXd2Gb&jrch zk?hBl_TvpLPEiiEFKf!%2$7SshvrhU9Tt}XzQ{Jugvpbpt?iTghx#)b?Zp%6_-6vV zyg+OXG>nlNC&M?D{J_3~k+lYlePNfGl#tMd7x#DsKbiD|vu5^zCJZvWQ?m+Ji*aN; zE#E+t?vXa=p~Xs2OcW!1+QS{tlK$ccYQsX;q|b;)52z`9 zVBlrHmeUkbJozL8)d36MWDw#M)Vn|I47Awpw+V{v?r^w zjw86GCYmc0DitVXT*SF?!E$R+e{K^tLArub{R$>38pPw85P?K)d8aZuD`lE~-v#_i}#XENEw8(J0C1I8; zY{8jjq@!|*teItcd@M-6-PiM7-d@^@=Cf1xbaK0{baGOCK*Ic#V$HP-*xj50xh^rjvl~e1`)bdt0){^20IDYaYP15x98Cbr-G7U1Xk1_rpy|8LpiKVNoS=pUJ{6LMRWW!GGkx9 zctOsw8wfYpb2${hX{28Xq{KErqGb|LdrbyZzb2C525o5>*sKZbRifypiZ)4vY>|V| zZMkWK2{O@iq^Nv`&W=vSKXG`RRtDO0mV`U`l6+&EX2Y~IXGtLIOQIzXNDB(ntLH2U z8hy!%R9kamh3Qp=vZmEp|7c%}3GnqFbGJOap(}??{<*BCRhH2O<<~@`sSwibd7xPo z`3D5XgmT^I4~$9pw=i92HM!-cLKF1Wc0pt-0?lIkdMT4h%DI_L(hzO=hd#Q6AHR?= zQv3|M<+I@qi(5u#=}Kvj3~tHHQl`B4DwfTBsVy-8JvNgHy}{M7vMcvr24TIn9qT8G zT3uOMoy&c42CYte?KC~0${XAQl86ql_i1(W=zg`H?l(`V=2?|WW^0JnubyfML5Tb{ zYAt>>t~L#kb@I*Hd2CjYqZ+0PjJ@%4=fKCO9oiBk+(LVdJwpxsX;%#2WQNVr$07s z!t7GEy_mf#+Gr4`{~%_Q-hkx_;jd|3W;SCadM5H=u+;F8YwWuo4E;;DHoS$&YW=g2h^*;JXWl5Ep+VdC8xIH`!N z4mv0uWzvI|`2Bf6TUb;WLeY^LcX3+`yy(*%idO5NlbJB{qRezY-Akh=FwX8Q&Nk|j zk*KVlOTtTxHZg!Wkxa`HZ^A#8IY}DLiyP@DNnp{uq>+9?*CA3E=*sh#aJi_FzL?A6 zM*1JQaH>T{&My98@(komsB~8F*}+D^LH2*4{3t4meZAlP&oW}EGaJlI(nRyz(Fb&LYfP)>av)F_C|?0 zNR>M)=0K>6m;(_kvPRE#?0l6~KR;*-1?L|-ScNp5OVcs6`75?9>V4(suitRoSAKpf z0~gr{=jX1ri`2XD_ypY>-O4R4+B6o+`8g8IxT2rN=f@dk=Vs2ryd~#&ww92B>Cb*8 zE!m!~nPV!nq~(^k&tOgGH>RQkbgnVUpQ)gg3x}TE0<)m^ukeNCp+OJFGE(tZ(UM%< zg_ex%C$1%9__mD2=2^8ki(KTS@*-6XUkc6BS+8WUhX{akmZ@f>ne6^%wqkw*iYj6# zmYWx2z}J2{(4Ey-DZfJ2!n@FewWW3hW?;?-9o z;YzvD=pNH4oQ$1A9P~{5f zGq;H9m;<&y`PINy7&Cj(c~y!?ddjzO_8O4!ZUXYXXmK!80;fsF*qaRe)UBy?3gig zO?^RaF)BulnG~{Y*@i1zzn~cw)ED__q`t74%fu?GFNI)%c6Uzog%ouC>Wh}VP@rx3 z%EV?Y|E)Bfe)UC>{1O2UNM?tcre|p2xzQr}qH{)X!owOypohv)qS}h%nypKf%mtda zUovlmwGYbs+Q*M*N|)3DIf-p+karg4U^Xm(d)sd;G&@Mruo4OQAg{IUvMs1H7jFgc zR(!4WU`k;y`{hzwoB~q_PTx0A*2c$--Qs072_yAj-@g*`{!2 z*`np^0%`BK#YEs3q**|K;*UjDyDjYPcd?qWlZhJbL>Mz+H%Kd5Gz`FcC*<CUe>8kd zczBlu0m1DyINGsyj6uNSYtT*L5cvz*m6!hB#16>+UgDvx-n6C~>AX>vP-oAbnow#y z61sf^ZaY`RaA<;xW1AC(p}M8zY(l~M!!M4>?Z;jH+uyuu|Vr49$)94{50f| zO@ufdT*t>@@=5asExM^=5pI#-6lMG76hqK1H8=;wZ!BE^t=C`GD@mf zY>g@twi3psPIoO)K>B=3X2JFWv}djFf^pTtXRLAMUy}zd z{q#E??k`kPs0nOWo9g3)Ldm-Hpa1kV{e^t8W>;vKLc{CQEx&lAzmSW7^)|`_g?eDtgu1jid4v>u!QO`Nyemg=%sEa&x$mzWH4b>pp5;)<{4487ou4MU#H#`|LMV8LZ`xPrqLGC0Wex{;I#n zYX0l*`SO^~Z~u3@H~aY?zF_wQ7}4q99`pBPbN&41{QY1f{pv^keXEha*Y#}x8#dkZ zuU7v6JAu>he%H#>@pq^H?K{?=8XoQR;GbLnYudH@z3;zHeG|>5-~Nz|r3Q7TU->&< zhP{;^`W$;PRfe6FulcYqg9rKRKlF9tJpS{n9_<%vbV4x)$5c>zOIhByFn#Rx<4N*~ zk;Wei>97z|P0G8^#W+gxMJPji*CKfn-7f`LMQQkIQ4i04yqH&!T;S{mwvD5Kunn%* z32yp9YN>BK&;xrEZwy=VK91Kd^PKZacB=l0OB}h*3jUCsn~(@8S!*3YWX?+*xwG|S1iAj|1^7Ec(MQQBLClW{C`73f&LpA8M*Sx;Hs;Z{dOm* zF4ljGI{v>a+U={a?p#v%Z|TydV^>^$`S5V369k zU$1^gy|T~<4~;IWTizaDujs`#1x+ixQq53GBai*aR~Fl#R9_qaL2bxB%pg9qja9`q zthA!^vNkfzkZ{+8yf#-gDb?4;3bnC%eXk8SitO9f#cwIw_w8~P)pAl0T*s(On;=o( z3S-1#wf0fVf+pXvCu@z$hRjy=HrX?jtzPgH)}V4PRKJ|&+-oC4C*UOmXm3yy?(5i? zKGqh?GeT}{5pq_X^=Ng*p(h*ut;JR-*4N51Rv$;BWf=^F91E82Dl-AprfdSblk-oh zAguNxiQLv(>I$0F=JJ0*xr{UG!R@ok9L)iomARwXOX}(Cr1h}ubNSY897Qpb3VZPdIP&llXVnyv;G-0U!dB| z8C9}xU-T(1@#g8f6_wlGcHfKkF%NcMVD1J=gzK@@;kChIi z(6UHB`}o1)#}w}S__y-uBZ?^j4^ecHZBjxg=vMTGTs1L*?WR^&Im4T+=NgA&&#cSW zY@F6z3e*Lb25vQ70+^JNQMXlC zlOsT}UxFBScU_|`s3_~hQ&lC&kRE=rO7KC#IXPs0LTHrIWwv+NQi1KQDPinJGODQ? zdum;L599vV?pl|i?$a`Lc(<$8B_q};`?WOMPF(Uy|7Nn*#o#?rOEtetoc=P<-o~6+ z5S1t7V8m{tYTJdNCNO3v^qc2feUsBzSD5o#?5BE5PSXL!F;HxBK)I_X2G%9-VJKt4 zep~V9X5Rps3ml5~$d1uyid5M-+7ApASEqyK^JKoZ4UCxHWjRJ!xcVc zg{63B6m@8h!t_ZsAGX59ejJ{oFu@Hfe8dXNX31(kGP5wHj#?=#;Af?dQVKBpX={Cz zK&9-ol`V?(>4C0Gv*K9#^JdULbObn{6Vc0lqnCFXVIHnJLOoQ?Lc^lf01zl2saha5 zjZs{4uBSsxSBXc2#hUR6?zl01Kooe)N_&OCM$%IVj`1A>pl?nifInb66F_=~x{ep> zVs4=P^y)fJ_k3Nf&j(Gh3s)<1e^$?8dUm2v+e}sE#EhOXiTk*N!PBQLDy7f4FMZB` zPJ>&NmjKB2m3*!sJxNo7^j5A>vcn*~(?fduEJ*LlAic+WScLSRc_6*FIG!1h-aBJF zKxbY^?<>?b6Vm%;Kw7-jK^iE{3F)aqZ8IS~HKS*9P->7Rbm)gPknMxCoN=@Aa&^d-V8a!%t)Rlb(*>bvvYm{s+32goUoZT6DyVEw0 z)XL84qyl1kdn?va`cJlEt*}A)E~4}Y9_B|XLR&Y&t5#F9h2NDKzfGncw(^1j!Z1B# zzsyyMAX_g1uE(d19ZOJWkoV~p0qE%gR0?>W+*i<#D@n18$se=#mhB4`HS6^Wdu?fN z(7R^6eu~#x=|&Lnd!2#T_3Yqxv3(g5r0zg%2>rKV-?w_qtWRyN?AGGX1syln~Tczwn}j# zv_;=u_)c2+qUt$WF}X(9%+R&MLOPXix5CAWx6e^nl8nMTt+4oiMjbonD6C*S3h%PQ zMKym{rKm61tlDjbimJ!%N}r+t*!sYDwZM1;Fy>lzw`kl$&_~L>4O&6d${)4zQfUjE z6}Am+^^8IMQOs!Plvf|3 z^~r~ar~#B7E-SIyRXy!rVdzfBhvoDtbZi8+ntix0|BxEdG{pU-&`zN%r~7*!@@uNh z9?bm1{n>{cYI03r`jDeXH+Z5JZ4#~PvBQzdW3Y%Uk2p?X59 z>oQ!P>qNQHamyIkF5=1vv;jfV2{gj~Oge7yJ?|4}87XfmcNaN06t7p5e1j9_~ zO7IT}&K^~f-PRHiOeeulCT4e+y9}HrjP=ljQWPMotS~`|x%k}ENAx1wvEakyPR&z4 zy+CekxR?KpE$Kt?9JOGUmYLd10Rw70X= zfY=;?pd@IR0LiKl3(BCm$6LOvW{VOGW2AR(hfr0I_3fpUR;8J4ydy6<^shMBzmp&X zlLxpe3-^?+U~-?X4wIkb$VKO!JZRO5%kL}o(8*ch_TEB!gC>LUUAaQd9kOrgGrl>* zH%0QGC#&_JB~U`AW0<5ZPnFp7>s4X6aEr?1%W#SP<845JW)!HpAKx z-n#i~SW{;l{^*7|O%T8KQPF;xj*DIy8$iKTZ38%#eUwA+7?I>XP^-{b@Esdas$=KP zX_ps19v8uVxh5Lu9t*w~-+OlIDD}f9AVkJv>gce>PyT(g;5KrN63!zAxLabUM{G7A zr`xSBJSTC#BW}T&g>$+u5J%D)Nhc8Z3}m^!ce)`sWzpDKw0Po076$;|N2iVcYp zWIB8!-~hrGZ9t`kpso*hfl_F`XOBK6wQN_++PUVhr*8%XU!g(C!pk%|j=BPjEL|=r zNI&8Q>3FP$5{roGg9JbBQ|5a zl5b|}}^4x!BO?fKf--`9?n zKTvEZgYSX9b|}}^4(pq+_W9aj=WPK$R-V07v*9+?*AC_S+95XA9k(IrI%(WsXXLDj zEu_TJ#8+}0oB01fd+#3{*LB@_zW2J(4RqtjBltsCM8*t$5XXp3Ga3Hd-tAu?)i1kIrm)R zuvTmY+3W!;l-j(11hHIVP+B)m>gtkKsaln#I%~_8ahs0gv#1X>rd&KwTNI6kP2~ue zSREFhyQ!67$T+*MHFhYMIHCt8I=~zfW)_tE2WCEJn7J5wpQ1Ux(`BU<2|pL$+k9oS z+)*idsIvTv4`FzLQ zJl*lOc{`M(c|*=W4VG=>wurUospP}+U6SlfM?XB*(NEoEeX%&x%UYWCiqxIVnimQ) zh9?!dR4%a07^?8fSNz%guRsMG+-@5LUv&ZA@;n_$LkdB zb`hUYr^AtR~J?{O8GX0tWJOU|4!#beH<;|?EaQld^np1DasEw!%ab_X9bUq7}# z+rqlKGwaRDfOWX+pC~uNGaVqxR4CD@0G?{7V#RZ$YnJw*aRxS7XkmP&!SmPc_Lo9# zPmk!^t15CgZMSnIB`2X}VJRh_EDVZB5`NN&c;`mUae9da%={7iby2@A*{?W6cxM}m2 zTejYM+t{}4x8ISp+B?SYe9iYw?0oG%*|qzh?%DhOuY3KzAGqt_8xH-@8}}dh!8d^c z4kdunG%qp=jphmPQM)ZmW}_~+5VxvW-ISRQ&I15v zhi(9?thgSq+QGTNdd0zt4Q7Y*0Qb%cwqzOB_kV>>?kr$^1Dz{*$b-wGdLkOkz+I-W zK@KiYDkC!o_c^uXz+#6;A*S02N#^e zb{V*9WpH`21Kg`>$$@+D2H@00F);?0R6YWo8$Gs4e)xlH@Nd2-181xoD(=` zJh%&;;Qrkboop+>-4D1*Ci8fm5V*TCa2Lwp@?;0N9}}-SGT46uaF=?6yXe8a?7&?V zxR*S*_&dsE@bwFH^4bF2y8u@?X&&6Y0(V~qF5Z&@yFA$e?)wTRyXywvuJi_X*@Jt{ zfx9elS3S6^o#1{!IGHHG{Q%nAe z9^B(tZgjsUaHs8lXJs01K#9q|_S=AaTLJDqoG{X4bf zh=J_Jp5V^*26sjVW&_u-19zqhxGePmosH$4;C|{NowN&ZU(cJCK`$aY7;3eJtAGm*-UN-_*237 zX=x_xwe(NL<0S%lor5b^fFBWDjbeze!(;7*9KPCuy8U~(3Yjg&Lyl$lNK88~b`8X9 z=LLY)m@c}3ENCE0cE7WN_q9weKQCr1$>i_nO}9zUY3N5a?#D8Cap)HC@@NPCFRCer zfATeZ!n@oX-b)@{>|P=MlHgtO@UC>ids2#OM}bVFg-iSM^zyjieQO5qN*P`r?SS{! z3!S(32Jo)+hIiG&dkl9u{jUn%DZAfUnIgbZV#mK#Cp!yx_wc->6?%BzCU~F7;GOC# z;g?4{;Qf@uyup&#?xHRh+XF%u^$Xo^QW#{3(_ zm`P;C9PI*55D^*7#(dTRN8HZ>g@_Wy{NcZ4tBA9dNi&7KdbDw+Xk*E>L5LAEk^6L+ zHa_z6?~l`eYIq}sw9DXF?P}0tY(*~v-*wmEiW*$AdkncwjQx8lvay0nUdx-7-RUv* zPGRin3}b6$jPYm(#=cVML^4i$&hRv$>>e{br4>K$o=^-1ORfgI3gk;_&(u0*_;=LF zZv+LvJN1Ir+#cX5lb6SGK{rz?O)?L6fc#9MXh_cK39>Q;dVoCZK|bR^o)yTaJ;((7 z%S8E`qNG*Uh`tYq*@T?qnBB=x#fco^g)+oE+yU{Y3Smd)QcoO{lG6*viyq?3 z4&p^Ye91$6sT1N)3*y%m5Wfb9&Clbhg2G}zd@_fal%WF0Jlp~CQb853xdFs0y&+x( zIuY_UgHjo57NiRXL5*1jVo}>!yOQx zD=bp7FPW?^@Px%@1&7<>H{-w~c@BGydu3;)AD&0NZa2}mLv76WXyaVb#%b5aX|-`y zZIFDK{`4oe}Iba0#7hq5FCz%&N}eU3cN+` z(?!Pnb0^qY%94;aNXS4%_h{ov(Z-T%V@Yi+tBqya_~hR}hj&;Tg!{2LdbB}?eg?;i zYhy)ikgCsI)5b5V4Xk0{Ai~b9kVn@&cVygWZIJ$NnsR_PX1Gsh1}4*`&oCB*AZ-v3 zhevv}ai(a4e15ett2R!njnlO8%rD}YJ7jGTa)w8Gv~j*@W8SqfuQn{De}Oi>@=LVg zOco;5@JNp~E*EVqx;EGvfOzL#x%TNtkF$4px4}W=86N4;#V4wtjFnb5|=!EthqMGuS5hYpJ}?mVW2YM@Nm!uab>X8qm5^aHfD6o zZ0Nk2IkhoI8-MxJn7)puh#5ohJ=!=|v~k+CaawJhRU2n%m?^IS9Hs)3%oTwzs7jK^6LZ;ma)GX zFu_fxWzf#~EyW@H>r+%j+#P7c1T)M-eZ+kh{E3L^BGx3^%<;p=pVE@{5}RGMbt?~K zTv0|I=Ks{-{>qZwr%Qvv6ZeWjJGaOp!#8L8ps<$vMgHV-5C`kk%zl+XSWvXYYncVZ z+W7)&fEN|EisV4qfdBJ#&G|RizmwzoFZIr#p@}FixV{mZRv1vaCIHI#xg=-{{7g{B zS%}n8DLlR;TO8claMFdLF5F#Ux0JB50`AV*eR_5X+%0miqSp?09MJ;i_1xl62lnQ_ zj-6LpuPFNG)$!s`2co3N%80tCH!B6+IrKF1IF(T4-^x+-2YQRyf};9`s!NQgTlHPh zFdSJfxe+aCM9bW#%R`gt!sl~VCPoapd$e)AXk*2-v7$EC)W#ZZd`4|JbCUQkludB7 z8;%phIUG~EMW7AS#tirA%d$e)BXk*^BF|Rfj)W!mBeB)0T3yu(Q%FY5Prbio>i#8Tr8;fe=lG?aL8~^w@ z#^Ov;g3Snm9&M}^Z7jPsmes~pwQ-d;{@_2r2CfYP)P&!IKUK$;f&l zx*9q{T@j%MWHli92^N>9#FNB~9HPrGMTIkUhaFcmSU>PJKg^5((H!?AR(md5_Htux zq=WLmtYuWmU#bk9Tu97~OmfC>9vD&!W!nGUb?`OC$!P1s4Nx`ro#d|ZQY zl)vUMye14!+t^s6*$8rv@5dKVN2L+4o1qv1as2}Rz`>ux@)^CkMHC}Q7DiNtCMfDx z{NWd82humvma5aucGuMGgX_v6-6~yy_U+l)$u?v z_e3Fj=>(#%#QC)wf`7#MC*$`JYP2&!RHJ48Z}Jk+$09sld&76r@zQqeG&AOh~2qr#}+y@JCtfJJPQ6 zUEO=3|DO$DwZ12OYrA3dzz#91>cnn|$6LcL&%s74AZnICTvw-k@!xMqcxVs>!$lRE z>*>OslkHmk%?*j-@=?5|o4TE`@((VHVL^_{`n_N&@mm<;m-p2ztA-H=x|2tfmxHFma6U3QPF zn_rg#RNHi#3?uc_lCsAwEy~U&x2{=C+7&CIp8oCc`{TgTpww8qi{m47W!&xloze2} z{>q%bpeXV;Mj)1h!S9DUXmVwkEg5L6_#ZWHa&>+lEUBdaik`0PgtPW$J9WVtBg1vs zw;Stl_N$>~UJ13zlkh<6Q1A?X;O2-1gOf~&`rYr|uOtrp%$W-y?O;IGL|x^2;=98C zz^w|*tOA^}Xj&s&FI$F+^jA-3A+--N(1b-HU1- zNpt>A8`1Oocyxh#BLbxIAJ1CKNH!}Fm!>jW;t>sEQLjPp_Bx>HReGrirKi=)^GG#D zPdSfD{Gq`Xl8%ihjCw@mwn`FM`tCQYQAvD_P4xF}4Ut6URM~5FImY7a8$=E1@9K}z zImQaYAf2JEGc1zLNiF_wOw^u`lu>1%b@mjNds2O#oAgAP1xI%}?xnS*fYpvk5v;Ix zLOp9?kG?S=zqP?h88BxF7O#V*x5VEzcu1ACWX_if5TXEy0BGQ17(X~u6)4#sLL|&# zfW%XyBHYU(5FIEM?mz?BeYD-s<*{}%e$S{%y>Bo)zAP-UuhnXp_0v@&X~ysMef!z8 z2^OBCBQ2Jk^>h)$0E|s5Qg)Mx+W38=nliGMUx6tzs!@WA8@Uk~c|o^ZPs!~^+IPgq zth?itSFAfSW$2DHl?k_kK_uHZq@&EK?iDD(BML{SkdO4D!nttF4l0p&PqPG5eNrAk35t5?nB?SZoe)xOQ z1VbnHyd1%Sb^bC(d+L9n(Xx(!G#+T(nlx?Wvu(ShUL8c&9#Y0#do=xYcqFYpkc_6$ zecJTpcRy1Z^GK^P81|8esjc^z;@qQ*{q(t{*Y8RhEV2^-y>uen09- zKi#iC5AnO&pK48#8!P?D?Tb)&4}f82g^G!v$Ss*es_@VC=~Bm`{v?~T+OEQ z_RWRj8!6uN%*$!b%i$yF$!bm*hea6oKp2ewR&9)9(Zl;jH1j*+X&dU*9oou*1^z0H zmAxBP)lvnN`5ju0?G6@pXuR4F?Zpk|b^EnuVAqq^?l*J(#)3zfLhMx1-Ms!I- z%Z9(;g{f?Lk#|WV#C+3fyTAAeemM$SbItP0#-E&@ z_~^fofLs=VtlH#@`Ks}UZStnZZ9~YbI(H8CYkuyoi(Xlng{?bTOk*bsCQU88!@1NA z69}T`R|}?$cjT)`Xa8U}26MtzHJ#yucR1VxvR9AN4YEG=jlphrirs{QX_UBm8P47* zxifQT?oUW=I~e?Xk?IbL0q8o;j(=Tmr6_=|s$O$2_;d_M-8z56&+2vyE5L)n zY2DtAgHYEya1QEmOJayj3Zxgu z4G%jeGP%WRs%DQeR{6K@8A;{2U?k8v^8w=78#9c z_!evllzM2^@lsInQtT~iJ5?2J3Jn|pxFU>D1Ytb>=xDp?83^;?RRw_H1jYh1DRxZNVN5d9>cZm%B|UI#SD3aM6PXx1CdTF zV0ewXY_d7so32ah8(0es-<^J(8gXt!M4f|?$<}*@CtDCD&19>+rXwVr(;l4SZM?~B zy;_vq(gXzNwo`5USE(C2GjARl9_TP;7BMEI8T9EL?OSAc-~!sw9vh2)%tY7p_^WTP z3y$@c8ebh77fQOVH8z*Bh%l@BWm~#`Axl@AQNH&rU7M}rrsJItyWxGVGoyCgthcs#5yJq6e)-Fk z_xw?Zcf=?nU#8qbA_Iom_s}xsWfSYq|Bo|X#s(YoJXQSKqvTeOK7k$+UYw-uAR>UN$1PZ;CF1$~;nB=bZMjw;h^lH0D zH)$9RuuAh zm26EFA~r12b78ooCvhhwqwxtdFrWP{STAxNF>Zz5Gv;6p($hI}Fimsgod_e{-@=9NCYzM?KkZe6`7bV+YxfzFe|L&D>^gJCjk{A=qtr z*33C{c6>5@_M-DETIA$p_F;ZpsS0D9xh1PJrBL5FNk5iH^W*xWb}(3w3_M?u6rbk# zwB6Ix;&*|j%;iv~D0#dtlqu?bMo~-O4UsE~DoK^^%<=zsM6c3)H&_mRZ&}nv8|3BP zoP)MH4%(J;&@Hbv2i+nL0yvlI@*+409>+iHhV)`JG#;TIy^j*AJ^6O!r)-N^vzJ| zzI@yHeH{0fe|!b+%l$N(G>q+-3}D^qM1k6#P*|Y!rph4mi8aI#avrOdzNGjsEN;tp zTU!l$i7Zs>G3~%yn(T>waS! zET^X?^`52G@*WS9#G6KXw;ux1yKJ26es}D}zRPB}?A_ii-I`5_2$S?Xpl{&AaVpKqn4i z_1H3pG3Md;8i>zOeJk$N#w5d0Jyi-E33)rs+bpIsxGGQaswk&IpqOUD1dj~RNam7%r3^GcRY3=TKwY%e$-W@NicREDV2m6VF7n5ejj_bKwl+IOMtit$~d3E%tdgXHkIGh04rV^(o%ra+>@*uTp zlDt_fcwY3Y>O9@lx1hl;>bnNDz2oTi7~Sr9l2Hv&h7q$A!O z9JbQ-r*%zU!;u)UB8u^Y>D)MEZo^p(?rSEo*Vkk}vGat1GuhH$wyY)>p^nXAGF)#r zwVV{yf*Sh_om3_b8{4G9O1e%V;)-hwqb2^)LGqqiv|35>lkjsAvkOI6Tr|gZ#f>wq zD;pb*>58p9r7Ig3DE$JdPyob{3MQ&F@vw1|J~(KwcE0`X;AmSCBopR>o_ubpOqiN8 zKKj$@J6in}0-kvJxNwXetfO2eYMvNSOS%Ho1ziD(nkWVoRa^|H1ziD(x+qLj87MI0 zh(>A=ap1&A8lulDGTFrZ{ci3uSo=$`4#B@x&qo+uIXGEG@|Y85d&cO1@HxJ9Ff_Qkh++|z*>gNMh3&GuAt|tuAqm? zM+O5`m8|b&U6rrNHC@iB7{f;KXd?)$wtV^u;>u7`Eun;o(e&A?j(ULPMNtn}FzNx4 zF6xnqCGVAC!`oZTSJwS-se5h3Quj=Wqn<%gPZhFJ;c-Vj=uT};dQsE^P)0of)kQr* zkB7xdKFif?B0HlVa0m5xGb)3%jH>>OdN^!S{2SQHL!iAV>H##P9zg4&9s`R5p*{7O zeOkiTNGHBTJwPDpaWL3Aq1t45T_xw&CeikycSve&ukMi4+Kt^)(Jja*U*Ok5YqYgd zjv2L;QKAn{9UbqtkrI?)@}sWvcOt?xnK=+ zf|WS1rn}Ac8T+E7*JkYt){pDVEt%_6{Q9!6)$9aoO1YJ^%kH{Pbj^3eb@oPZ%?Yl# zmkZaBhf9aaX`55W+$m|glca70%z}Vf=l}*W?bvPj9clIGWFi|JdM4D48aYbjtE6aU zE`*cNmwxmbPpe}sq2sP+-5OHQvKHikCQnY8J(A9J6Wp|YQJC7987*>~%E?V@dckXy zpTX`xmcD)^K~|-@vdv<}{yOliiihQy@E|`9#LN8TDDfr`6GM@g!H`@sG^8_72hEla z)UCm{`LJxRWR1Y#_K@=Tl$*vv4Ie;o8w}6vHgs_r^g3boHsB$;+%Xmcn;k=$=sA*v zHWbdC;SdzV(xD8A^8-1{+HffGIe}%jhwwuh333dTtUTnS@4ns@< zzCT$=byCY%O||`^16XZGG?AGvQ9S0wt0dh!#9O|WTZaf*N7QD_X7O9#Wed_|i|o%@ zGNjY8*t9H;cP6iJ=v0%e6ibzij%vm=2I*L!aO~ z{`hDX8U%lp_^&x-PQTE6yD3QD&F+xlQv#MPUPtt`CH?BlqfN3HnrT0K!R$xm$?)Aj zIDLxTOFVaxs`T?aOW$2Vz_77Mw80!a;QCRGhc|Nxe%LmhH-u%&nP_vK2+n2~Heze_ zR+=7EzYMbuI^=DKi}*sJRq_CZ)%(U;$Tp@!njI$+hr}KATZQx_Uxw!I<^gTOuK|_T z2ubb1c4G{qZz_E9U=Y}->8!02@OTII)e^$JtGRZYgV}t&jV9PTHzrC`nsvHuvPsH{ zuq4bO0Oo$~nYcB`Sov(nOJY@Xq^e=KJ$woj%eqhBtyi$Z4Y7isN)`$qXp(23T_C~(M7!cx2m6(`ZELDr&8{)QCnM%`OEcq9x`4>I|56AFSF3`hFk^?p_Ms)!ve9 zx%*1+-hacNr}<*)6vWXq%E4+RUCkHGm;j!AQbrRTAaQq?CQ0>Q9eM5pr$`OP6aJPl zIc+gjz!s5}KJ2!HqN`6IjdvLAY)d}W9uN$~Z1%OycW&VoAW;RQ8jnINLX3_D^dGe4 zK)##1a6KY(lj`**Xi2Ki32@+@I@M-s#9PyVOJPb>~!+izf znl_4>AuhZ(i9F)8Z^1-h=n-cUFvnUa4?|p@hx>&?^?{5qT&O1pI7KRSs7=Pgj%(Qy zQWL#Ms7~@uHa+sWq@iRf(gUWo;Jeo62_(rvi--$=iGQs+$`)h}8yB})bqzS#+JiO_ z3DBK=a`bL&z>b>zm90R6@`~bglx~sn(jJy}{!!XK-AIOMMPx5LgN|9$m3I?{HBnu0CKiH1OVD~NsBe7wriLZIe&n zO40DBq1x0P7_%8s3>BRJZ?0ok<%r%SV8qW>VyKErQ1g5jpWr(c8w!OEV8Bx;ev?Vf z!&@b;tzGL=@5{7nex{b6a}?!;l%jV@QFZ>aj!{;>9vm62RH6{%RAqECwD6RnI1C>gpuTrU1Y9KO_3;+~EGZ(0d>KrLKZuGCYQn1qu4Y_?F28?YeBY%gY6nbaE zy_2|iwz+q-k!+jZ!FN^eXP0~Dkb7sJduOkE=KzmU)A(tFgXzfcLe|n z)`GX}4?e1;4I_G}-FGlJZXb3=_fSN~ZYQF{wzEWbRGk_HqVuX&x=2erK(xt$cUp@9 z`mK5*~hpqSe;iB(eekKS))S!;-F-J02^q3q#)l?_xRbuMILNIG#L5U{E<-`(? z5s|0dUl=dzJ%%zTI4~ThnK#aDvU_cDKVkQ~gX7HH(KfR=V*novIP(b=df^vAQu-$C6j=K5Y>ea*O`MJH=IiG^V5=1TF3lD?uPcP=rHi@11 zag)BEL(-f0#rCydxAF_>F8f7kmNp@3h>lzi;aGgDzAm2>=-?G}C&K;FoSK-kCge;YqO8BWgHfh~O7x$gk`&!B zuMjFjVxJS_1LN8K9QV)LJ-ePnOXM2z(R{V;07j@m$2i8H##rAIo~H~|=O9Vahbehz z(Lg{jhw!xzHxEEThF8qKSPMUk?^0z3{?UWvl0RMKG#sEjBb0s1+Gri@z*(oKAb zU>*CA@Bt<(FjpPJXojds)xaXCKQMMz6!Q$L)Q<1AMehd95Syr51RKvPfkVEg6lT@4 zp?gIJ4SR@XKT>w@HrWBpkJh_Ryqkf6M~E)KB$AUIhaFGZFGqNRWlkF469XI>6Oz2U z+&#+uu4qE{y8~*q0!X`lFc{|xE_;D*0x3tH(C=!lq!dBNU2LKN5qCwhko3yI;E)Z7 z<(!^LYKTXzxTr@tsDLqGv=91At>uNLpnxJ)L6F)-|zqqu5u13;XK;OOOiZ@cCP?J!+ z66UW+Ot#UnT+Gu(i)QWU*cd19CV-WKlVB3!?`RS%M*S|5l!=Ih zWtuUrHKVNQCEPIZgcfA7kpMMRL>%q>Y``7tlXF5<^ZnC6BwN^AJ_eg(T{u5MY8X?B z9vGHGY%7coJK#i9+8&Ds^+;R9q1ctYJfJqI*>Ex_k{k+5x&n%E!^<`Yg#oOQdw7^u z+SgcKB+8tX3Mu_@g=Js(E(GR$4>b<>(%Tk-n?+ zx9eJ{N)V<SF`kLRnW|JAN!sjF-GxEQVn}| zpklzbpn4OmW8a(ZatO!me{|jYv(_>T{ zTUMDc6yZ=9!6q&Kav1+H8l4gV%poK?*$Pj!Dit)Q+6Jq;gWz5q7FrGd3upn2A;l>z z0Uby2KcXHGaZQu;WKG!vyv~zIa;4uuy2fFTF*D-NYAXIjz=$|mggxG_86mRtvx4Pf zT9hj@@PDXXQHO->TX_*?ZA)>;s`d?7J?;)-sNyn$R1h^rGR=vQI=Lo!s;N5zx)Of| zeXu9&IR6>`rOW@5(d7sYv+*&#{3-(-VfwRGXVgfPDoABoj};LvQ)zp@6cXYwvLMAq z{AZQ)j-%-oom9`mX3vwL?VsGrQ>=0N5fhXzrlDB3O0kBLu ze+vDM%gD(BN!F~%3Ax6S7GEeynTS%lOC4YmHsZO#WUyt7$|Oc?esu zrY)crUaQAH&5-tl2igNRMYua{%SKHt?~@3}g=Ukhv1;{HlUtusEEWuse%8bC`Cw1oXBaVtWFCLLkvgzqqZt`8H1(d70wrU&9a-NNA@zqhUVxPxXH zsqL(BHjF*w<$xCNSy^;dM3K6+9;G-m^2(m@vCO^3ls{_Yq;aYDiqOMwZ*kp)>5u;! zb>G|R-!6POj(==>^G&W}+<2~E8L(zN$NxyNl0D(pykRXLDj?7r=C{=RbnkmjG~mpa zG0&6;(o{(`#P>ze(H93(S{kerDBLKq#s`9Bf$Ic%#V!|@1qRAV3tYxPEd zwXYIjt?=V<3hHZ`j6`H{L9)J!?NxSS1E>PbrmW3?@#Kwyd@)-xh(~`EN>CHkkFq2 zv!Z8Lc@_fUuS~I@ip4o=TgsfHH>6{(>v2FhcZ>UcoU4$2jirKADtm_<@ z6rC|}*S?YeoI;g@97Y9*&u=Bn$aNkgIU}_$?7GcjW-%rMgoJ4gG@sk**h4lYC+}4z zzds1b&+bb%gAc*R$d`1LFPdvYar*^*p1L)IX#ww!6EVkbwbDu%`loSprqet}4xWwP zHqUR8%MMVdZk4#UeKTN3HbMo;3>5SrSRb0{JOE!R8YYs}JxstboY4#6o8iE_gicZU zvNmciP7Hb9s9!7vm{lvY{F>neIW{wW^OeRRgvk04C_lT8^WvcZ-*Vvt-0r#dfFvikxi!O&)()h0A-WyA@Mnx96|g! za}}JstzDPTh#eACHM&hRAwQv^ktGARmL^1%JZQ=j^MVGpK&u((b@4KLLRjD3q!*Lzu4^`*B!LGtKQ4C1`5;AnwsI&H#6)V#2%VBYD?LIPVl0EpJGziI93xw75@ z;y2O=toN+0k_cSW<&7Dl=Qb9NEpPOVl|WK%3Lu>k2oUp{Z|$0IZB?!D{Z(Dh5&&@w zw_0lT>?XBJdP)W&Tp&j&fL@?pLUvXQqMT!$SdRwO(P5=%>uku=4tt>OQP(yWY&IEG zHgxL;JM+VF^fU)bt+yqX;`O$~bwrxXs%5THwwJgTw#1@7<;r$JpQbisqg%jc^sKv~ zED-OV$|KnMHT+ojTqmxaUd5%o-DPPQ| zTv(0$v|ku4quf#c;;3U@*`^{)G{Qx7BKe{YW^;018qF|xa$^~A$TJY&RcV`vXjVBv zI-!qIP?fuiErxX0{S@~c-=)z7KSul0cAuUgu}FdDkMMDKFk|~#cUESVGU=>ZZ~e+_ znA28#qkcotu%lQOGkxhbexl)z3@c=9{mr5|R(#(x+%LPn>G!VS8{C^&%uZpAm4b+- zX*l=^mOmJ9+7`WWoQl_i!v(8TfZPpGfPB@Y54<*VG_Ah*NgN}l$gXMw@(6RJtCXH^ zp3|f3^BG;&^5*9`E()#yZcYui<|PH8>#8C=PCPj!YojkAWSOO3fshq#wo0u$4kX9H zY=YhiwljU62+j<(ap0&kGvb-Od>r`dEx;}-9XTys!=ZMXujsUj?dTp{&2!Z;Zq_>V zIaC6ENVpO3>6A^Cx;Lp~W1B)Un~#nbr`!f+ozR&LV0Un}kq*AuEQRY7s>Bh+%a>$$ zK2L6`KGx#@0_re7nu;ngv~DW^gt1vLfb$Nc)Y` zClMddVQc6M9knR4*K`I4Jvxy^8gNBAtnS+M7{p$dr^6%{)sZjAn=3h_E7+RhYKJbm z);OOrpXy74LI=J-KgBj&M^X4*5ETOs2eP;M$T6h`l8paPY9$P{oVF^I7Mh!5@eJCD zz3r6+!#!u`T9bMki+&)jD*>3!mip7%a8ad$52m2>sq~A+5xaVM8oU#mF--$%@nZsp03OO>j81#S!Z3e>}yTq_usvS+6 z#Gv6^VsMvgM`wvj`V^pgGc&;ubjc}LfgWhWxe?Sh?-j_JRRmJmG}!3AK_f1Xfj2;H z!E-%llv-I@+FMNTRc*Bcg{QF#p4Yubt8{73^Y)|Vy!U*xYqe~&SY3VYa4=2ANlRlf zA2Jabw^pjJ*E5`{Z9UBCiU^yPO_8tKr}Zhri8CQ}yc85AieX0Ka3 zA6FQead6bN^|I2@3x{Zzdf_N;(3&HdL%H)_W(>V1LFUXFbzTa*Cu7J)X2yA1q~TBF z*A$qB@wu@~G;tZu)olPKq?63?yzYf`uIZX@2MQrHmJN4pn}%!4^kdsg z9X*?MG`+n%we}!(^zrSqh{1M2S3b{jHP+k?H|w5WXY|`8-zbRKUi5ltyLvUt8D)A> z;Nj@Gg88i%mt}^}<=`%l6)(;ixF)3VD_zGypcfXnrrKuhr4SLYY<$yw^d!_QZ=YlNH5hk-dq7w26>U=DfZEbxtTbY|{7i-Hr4vBwYa`ZXRG z`?0wmaah6o8~>VfE%-o=e00a~bVw>HX3if1WLb9v{iCP$R}ux9cG)9xK5OljOvb++ zYKDverl^T(;V(YzjLufuLzdQ3Nx078&%l`p;7am7;XWOvsx))qJ^&lJwWOx#Ib3LM z)Ych&;*rxpOLs)vIQH}l0BZSU*b5ao(l}A^i{EQ8&_CKixfHnZyFvZY1VpK?pd#%79b zLxY7D&Y(kaJXIlVVkjI-L%0a2BjDUitSOW^R*vbv-5)O+` z)grXl?qC~?6KDuk^-7eCjy0@+brm2;X3B4fUr%^b{J{L)-k(04e)_Z2h8ame%q3pl z)*Kepo72xf_x?DZ*7u(UzS$gB2$3 zWil+~-*4F&e4&*8#?e-o4BB1^rTk;8WZQO%l>hRsC)zi{x5%_Oz*7D#Plbr05}ibO zB-NUWBT}%E@CXc%s3+&jp;)PH+p)KZ##yF6t_n3LhlUxIm>JwEMTQJaJUygjTeMR% zW)lT9xy*6qO>=830oF)|enT+87ewv`nJnyDVKzgmo8GGgmfidiAhI~RykfhOs#e=d zQ&B#Ovb=P6fIAXaO!~&pGEtPoMaYunZS{!*AgOIN?EUm#d{y{8!asx&G|i=qd7|wAT@t_)<>4JtoF^ zq0?`(Li$XdO`@Y(R6w7(H8T=JOj)K8zIsV54a=y_DBlE!$PV)4=cqE8?EkwmwA}^FraPdr~6-Lm7v^a0msuzzU4NKoO0yoPA zkB`V*>mOq{bLnCZIpUQ)vCGtgoV(hKD+RfWJNB{5i-+@YApRV_2hlNW5?K+Dy`Dzk z%2qHsp5SxU(eb$5ijEUd1S>+sa=Pzxn;@_$cA+219#CwV)!tou+83aVXQ5vDBmZjE#_Tb$jh>`cv*r4f`sj0*7Yf;u&$KSPZ+d|vV@l54? zcPAf6n#YpHF>k10mO$wLu^Mf31UjV@2=L4##1HqO~jePLaCXDY6L( z2tYU4DS~U=8!6f%vPla#e79s-Bb{V=N}NaOmSUU8xNV%(WN&L{GD==q=nWdO_&hRK zw;*kT#sUwhaX%k5bD>fLLgY{UaV^oA`p4YmQ7#x`kEs2JTfjnU0Lx@9_k%(%_kcVu zhp|_<9AZ-}mjiO_YGV1zCIoAgBrE;&cKzX~oe9G7{CTVXJnoCCar?Bk1y%g#V9Xr& z?TQw|{yTmiE`IL%W$1>#4Bqe;U*bEVv#!<}#XIEe_;WGPjmANyTA&xU+=`PBrnbWd zkAusS1uN;4Y>uR_ITK=ai07CEV>^k2>V8uyd^eb)xnUg?2nq-Ui1BzDYl9c~QQwf6 zNg{0o^QPaLdRO`_en(4>nVN5U*pRFA}V-(tF zGk^J<@EHo_Zd~R&Z(5a3M)DNT?FlJqZA^=7(_BQDoACA8ZP8G{*OH$Su9PAoqA?ID zLYn8jk(<(QX|TrTTI@m-frftlnUC6(CH1#hnl8LE5P!Qy{S{ebJZO?(5a$^1#XmEO zig()*g7~wxih7udLS{bFu8ujc;XR%NNn;<7AcgIUZ|YXgrr&n=31LZ18}n$|I6po$ zYEczhlyQoHB@I(f8_NPlxJXCHU35TU6cEF4V>Q(dQ+mlj{Fub|qwPWI>r~l8fWJHVU*i8&-{0@Pm-rZTSaJ3mlKNi)3kdiB+>cGG z48S&90Xvjp8YbFSj^)4M0gA*kmVqA`Q}`r`luhKZD+d+^Ou6)eFB7oWgVd+xutR-@ zS3`e&%pp5NKcB70gC;<8#OHbiRhrcA8=@SBEz%VH7w$C^4);U&QG7a6j%>)OsEYtn zdI0w*z$ul@5b2?NyPtLXf4Uuxxw!2U{!2cyuyhnP3|O36$p&ZSh643ZksC>-BkLIU z!3tyFW=8@L?+(xkI*RrIJ|VR<)pn9wGMtZudWFSckadyF+%&`B;pD)siu@=9fF!bwn4Oc##xH7&&?vGKvd|+D1VW z5P~lUGgI}tV6u^n0gc%xgV2-NLUUn>SThNh(PT!bKrj)#GlfORT0@|K;@qSWfC5!d z5N%>9kn;*qA~jow5urE1#<5t*up`}u`e#iW3JDGE*1(ye7>}xrLyRm1`#>0vg{0^j zSxZ7w3s{<^^M!t)1%0kv!&)}7W(+NZ;5!=&&Wsm77R`2paWWRp53-?12Qd_>-SSW< z{3rc0AOo@(M(N>Kco>l1iywygupvG)H-jID)G7mc0}NRn2+u`sPTMw@gs;fd zXuDDj*?RE?q0&%~Lcb<|S)8^voYEYNmb>PVM19Q@ zg|qdrmSQ@UB{PkRIee5CZ4PC=*c>YP=*7+9qY_ga>;!g%{sRRN(2bu!S25GN>j0&e zP~W_J0-@ci(QcJ0P&ARA;kP) zI-v_Z!_4}T2X&IYZ-}g3y{v9o3EIXvWUx)Cm$ebHR%!_&(J9*-&M6$(F{d}+P@CJH zJZk0w8w7koQ86i2LxJ4D4;3zzC=y?YNpK-YJ!e?^+B#?W#dGOi&2T-9J(tQ}DFC@4 zo95uarhr(Y-5axMK5FL{&&CY1xj|NJkc?PiR_1tbiBZkRBxA~RD&S_Es+ZwZy>=5$ z&1QG6&v)77igG}Y+Cc`-v$%xdL;!AaJlI@`qAI(N@Lb_2wG?t0>BmBOKHOf^_?r=v zOqgo0-A5C|Cr}ssX8zQQb4jtJN}WGdcg~+$kw4@5<}}n!8SP#@(5^f8&!gbJl69ku zcswi0h1WKq+tibsDz=0ZB<=bs2!0}sQb=D+3IH2iej$FXZ8E-{N44TSszjOOQO!Ui z3daM^qbkqdY}6l*_3P)kV-UGi9oVL|@ z9er=&Ca)ERb1*9$g`tRbTNIA!Q{gC<)ZK-nw5lb(2NANQ)hLIU{vqehXpxFIlobMM zDbdZ8ZfgTiQ=1Nt;)rdcVWHR_MMN0jND?1mRe!E#pM04^<*HIPRC-gb`BEjR&C=Df zbx}Te5H}{9PpKx-W($zWJ(v}f8orlGXs(#lCM1)U?iuzqk%ShSfG$9%ZmgJ;?RmLh zQq?cuNMZ&Dg6 z6ND0TV%3$HLc?SwrW`t5iK)89n}U0jFfTDh#b8z03woBAvfL}frS0RAze`)aDA^BY z)kjNS6ENGCW0utx6lN>qgTW?|<&m}q*p;J-4k%31sAoO97G; zsHz#ZHp9lC(uzwdXjl}N(t5kQxKu6KEDyWwG7ICclaeHSwKh8zM+rKdx#CirPn8kJ z98lX&w9o`h!?wz5S#c?k$)vAEn!gbW^*M)jX+35brB!B1BxfkaP2!46X?^GDvu!6$ zfK@phSc6U&esX%D$C`I0byk-GL9V(~8RwnVr9vQZ5;x|F6u zQ>uovQ3@X6mqezTQZz#D2x3?;-o<&-XM=WeTHG#KI^1fifJ|}sF%zfXZ1^XCRr`-Mj3--Y`WdZw273>qH z*A?s&Sy9Yxoiw%RJ-H^`1^c`-%?kFFq^U13XWZ|i_b*YJ;t+5}H*w;Z3-*;cd%ZNZ zwS{D~r-C2#Vfy`g)1da-F`=>hzf?Rh>TL z)t6nTFB7GuI(;aoOqAlkl6FES-Kb8V2~$H)N1Z;&?wi)>E93hGBq^F(n=@`8NxSOw zb;wc<<5W72lchFsCQFmfI(=T0V*F;Jl&h1Qz9=7Z^SLOjs7lHV`bjYveCsn>eQ53h zt`h$j8CS#;NaKC`vds5{d&}AMS^wZbbbo*gyEmVvS4{NB zT@YPPetvtHFL99WvT(L5{v>g(j1KL~rRC#cSJNOVPt;dw30q9fSa#EoRUEpM0#F5A zC^NT`k}*YY+0$m&y-CRs)$}!r34sQ43bRO1PlP*(?#qp2My8Xc3=FpVV}5Ho_0p5aVSNpsvYN=eQsGHM)zo36VYQrw1P3N zG0_2-K0DZJaAA6hPQ&z=Og*wTeu>J4h61$@E&8OI90x3E`R|=dt<%_DzQEIYzL_m; zg>J1kK|XOyeIlHh%@RvnmV#~`Ms$BLxYf;r7LZV7J_%+u@KYDk6~fch@U*3vO%Ibz zV@qo#D~N$Ci-vLr%IM3ys9*$h)@^SFF+f35J=bQ{K}qkV;q+3Z+8rEzi_HW<6UF}=$4cXm%AZ{ zkWxtE|E8roG{Ro&cC%Qx!`rg5D(~be3@rYx@wcg!I~~7`gb|gl)idqpDcNpml;XH4 zE!?&LyO0H7mO^`fR?%+zjV$(xA0;_VEF|<2f7Vc9hnQOVL2^9L^6$YmuS-gUziV|R`>vH z5y28bX_)c>Sn>;#`O5>&_oX>z(Hwuq;D?0C6~jm1>;81i?p8)}^Vx+UG^+?HIbvSq ztg`F#TQ;i@-THa;9o}SOkt+6(W)`2yx|!AFeaF*6@=_h}Iwq!Y&g2tgj-t}Un3KvT zrs5`s@Rpo2m11JpXetF{`LUW9Rd)PB#ZL;XrKza6sUR;_UUGQuHIg%3Os6Hv&8G#? zB#GrB#4nZU@Rxo2#?#|YOuPB)=$RX_9h3K+PmY1sF*U5hUJ;%lktE|8sM2#y{MC$S z+$0zi?&Wu-4CLaXp`9z4D3Q9?Qp}5z0p&Q$Uc_SQWD+pQqa@8Cg`Mm}fGOqu0AC|2 z+KqT>OX@}6J2TPu&z%UiM1EU#swAZsfD*nX0jMWST!7^vO;{36bSxkm>aw5CoDecO z{RHb>mp=#5D!Yb12|gWpJmPPfN%C-8#emr&%vfJ9rwBf`iRdBa4khmXer*PjPZAz| zVltgjI*_HZBDOr|K;y;q$pM>ji}cSpL2{{X6#>llYa^vy>QF8RjS-a=B)yl*GLWvS ze4mjh@J{Ax^Q$2yw@;4D6P{*5m#o1)+yL*!Vf~YtbST|MCP}EWKz5)oUED(tk+fC( z1uAJ`+%P>@fKY|2rJLIHOGVVbG7`9^B>1gQke0P64gWO_Sg75O2f>O| z=AxQM17^*;jv4Pm_6xR*{Q^MFuAmakXGs)u-rSjbrrgaw)>aA$4lYcaPgk>?Ozc6# z%-^dzgi8)YDv<`|tX}d22I8O9PfVstR=bs>c5qhGcRgf|VJy~bOg3Mbp-0iXRi!Yk z)+YPxY0KMB^1OYSDW*Zq;z~}nH^hsv$1?)%fPTjKEiw`IwVPuqB!F97DtoLw!aPK1 zbDvAE#hMv)uD-`1nc;odI;;bv`|l~X1ogWoln2T@L5Z|^$Y7-4zmk{Y_pl}uDX}ps zuM!8^RO}V=l|CsSqqk+84l~j5djV{B@L2k-@F+WE6qcpUM=U+vp^8!g`s&d(BK(It z@Wr;5E=18_TNQ4!JzhqP1ClK*7w^hFF78ubb^NAQZ)9`gI{}ZGo^XzDI@F%3Fj@Gx9k&A*mdR-%I(7lCjM`S98A_ zt@$T6#P36J?GDy#>k@+p#q9jndWiBH9F<^fAZXSg(oDQ)9z00bJ?$nV@v*EsND{GA z^k(Cps%J(RH9_nfb_$ve3e+N*)ml4Yb8=|Z(z+!{PDzSRL{}vGhg%%Tkkydz^&_s) z$}MhdQQZ^P^^iQl>j6b5l_fElzMnj;i0el}cGEK3j>b$zv*%^7nAb%iOBejzwsgtgZBLi|-R;!RkRKjwEQf8aeekKuwG8qNL3Mt#ExER4zY-3^{$`Yt+zZg=6oWb#Jn(^gMgF zJz<9#shL2ATy<<^x@2(W?_c0!HU2FF@KOhWdQ058EI?pX9b=eYac?_;x1?__^UXSu zceR#LG!(oux?)>gaanVmicZ=b!J*z7aX}f&DQnOWT)|4i0;j6$EZ9kt<&HMofJ1PpF-W3e|9&x>aLja6o&k z{69jj=jK`%QZ0je8TMZ#(nYns8W62sKbE~-vBSEZ6~>-WNE@q63C)nAW#92r=Fu8r zb1{(Jq>bb@wUGvP_SZ%wx7_Suc2wKL8?hLMR1D1ZeS=rZ5TQ~dNp}yKC(_fUid+Se z&f!Hn!?;v$i@#^|H$H9)*0bwgNXfSm5_VSRqjq8&A2v#44M}I#($^_P;s)>uhY6Zv z)+*oM>>&ye@DO6>W)HE+b{5GNv^KuLlPT5g0J)Za1FsI5L^|&Wx?`B~4m(=ZF@o!; za3OMXLY#2Pa6{#LX8v9OQ+trI{8G1YvDoMahK+J);q@Z@+mWag&*ex`Y9hK6na#Xx z%}?={l;HSpsKl`wn6o;g5oPDV#*6GU6H?+Ak|X0ELM9@it|0)N-Lb-^SM`nNsVI{K zD*}}Qudyep&}Lc)I3cU+a@e9m31?r4%96%&33HDp$Bae3Bl|+ksA2|Q1samCFrb0t zR;+zB!2u9hjC;Zr*wN~fI%R3KLMV>PYJ!@h&#}25i$^MitE$DN*VH`MRkapBt2Ks> zvUfgQH8@o%+Y)L|57@Rt@OV8k1K}9kRndU*-&vFXAb@G!bxt|c+y)Ff?6^9F#4dGjsCJhN zcxJ`p(Ey^6tLjzd;doMiYH`g;UD3%PgiWpX6^4BjXPYgtOHEAzN&Xcw=BF?wPwkc;Z=KIN2@~}%vcXx z8-O%&O}*;ObqGa&W7TM)G)wgcxaK`@t&Hh1t}%op9<+xaVVWN6n~Z;thIa>Nl^>*M zwVjRVo8YA&gc!@R9*xS**)V4NT3Tc+SRH`A^c-!t<;r4Td0NZ}0K$bn*^*1@mQ%-3 zpT&C0_*^HCdk#=j?%0gW9ZeYy0O%4qG|OseSq*7ud|8!wcZFL;%X9*CdevIIVt-fo zI~4yGG_)sN?P%1@jcY~c;KIMlS-lt-6v$imy!yKa5Ox4HlQPv|8cmt;aNXJwX7kz1 z3rO@^WfjrH2?_97d*K9shn`C|nje~p{eUsiEGD=mP&(_XAa}$W6Ke|~+Bv3nrmdZ0 zYKQl)u_B?$(T(1xY6$Pkeug5WX6p_VfX>zO>0>=}ZY~ z`N6eXp)IGAHtGAtur(^-K0z%3@!6RHC2Ok_?bT+JwIVZ23rCdN zBCV3#VS3y2lM4Ju?XaWTR_>|}D_&BZLU7@Ha@xsG+oZ@Grhp*xc|(fu1h$;F#V2!> z&eXN7>yL24I421^-F!&QTp;+fkP1qYdyhOE#GKe-Y($f9mX?bI2{g!~+L0#JN?b<; zZMz=CtHYc4TFh^P+e?e_kf5caOk@^nvupfU4cj!`t{IWKK$7)maU{qS$D>Lc1Qy=Y zRNHoNg~jm9a1xiEVB574tMr@gTKoM5$Cg=uXNfI$jL#V-<-gkaNA4v>Jp()`)xH`# zz$_Go792GHm-hYJ2qng_isN5n$dpXS@kp+tk9ifK!VgtR{KvR3{S&m-e4}y^m9dw& zFNFg+GClM{h$KG9d%LR!9N_~45Zrc(qLo0hJ))%E(SV{lCRz)agL$9;vV$xfMJQ#4a`z_18CHQm7!Lz z1o5tVsIGXZmIW2xU)GhPy9OO)LHL9b=b@wWuAO%#OtEV4&~fLWQ+f9;1rQeVrzRAkKPTw!pjeHre7?X{B>X%rpVvLYU_rl4sYVD_ zEiI}k=CgWW&R23-ZCn(K)QJ0EKZHw^)F$!>CPeI;&+)gQnE8sIq=qfJtrx2( zTEk2c;lg%W^GYqms!Z=vB#?bdQ#T5yF0n(Du24MYBw4tu^L53ic3xKm)q<{a3F9r` z)@(4&H8Y?VoO#Jur#X+opfJi8-m-W;gFPm1wtSCSL&q_HG7`i6!MY7w&bSC$u9^(KF~Wc|ZkpqoVuBep9H-@K>PkS-jB1RxvM1$sST31*c`^-|Gr?{qz97!uu zmBk0)VpBM&bD^&Q4$10CRd*Tkg=JGq1n&E!IsTao7*MI*)P_98_~?<2f3-1!5S4PG zH3=jrLPkX?pyyH~>}#~60(u6vPUlFXS3A@FgNhk_Z)LiFaLa$6O!t44XI<36yrq*P zYvYv$Q^z62RA(?2WH*DIqQ<@qSWC03B@4DMaq*81KpafE4r|@%3hB|3PR~?X>^d3^ zBKcA++q6T!i3N~25E&fui^=CvG>KL{D zXuH2irO5l!F_J`6WL6~o%A}yjd)iTH4@CbdB%}+t_e*+!C+&d{eea~~gZ3;Sykr!> zM-KxC`xG2JL{W4=gFv$!4}qqDr9dO#{xe%Ns{31vQY9v2Y+4#EO;+Rv(YlN2QLxYP zsL6k~4TANTxi4J6J@@gG&ZRZzRxT(r1>9MSmiRN|n~Xor($ZtwFtQ~(FxWI-nj%+u z=;m``0zi8?Ui6&Ar(V73klFa1O$ShY7v>?ux`g?%B}Yfi|GSbgQIWUi67>QZvJj5r zA|F_1=`+NZvlPeZQ{3-UU4KDvsGG zoP-dE9=Dot1+yJ;%;qTCGJ6gb6nzix5+ziAT6{W_!tmPtJ$WtRK@teX+c~dsSb8U~ z5janylm$_V*WBdp%6JX_7q1CH@R|e1=NxJD+5;uP6ap1w?HaIYOb0})gnw9%W)XNa zuw%8Jxh=GgBg?T`uiO@FALX{7j9qR^O{_*qFZPjnRcN^_W_Vgw9D9+T37vlR%d#*X z7k3jhRh1j=`kd!qyO)SS>D*=6ks=G55jLH&#*m4s$>Y;5J|SP9a_coPD{*cjSO*>R z8d5{ve(8Dou!U(F#EXN70%j`F5dEU+{R0_2Dr-&(gqKL-urdkAhD?i8yVim7b}=^{ z+J@PYzOi+$q*$S3;vXjwnrbAPcsWSFMz}8o7+ZcS;w=+1Uk;i@RsI56s7qOKNVTwE zm)Np%#RuY_0|u&7nTG3pMbPXHZg1m~e=|Qf(v8}b!Giy}WWej7gpU}R+J48wMm%+; zNZJ-Wn&sp!=9G@_CxN3bHS`2~;w&2vc(lgSc{{pg`KEYt-Ofs_OOgF-#u}*6Jgr4r z@$xPo=5d`LWf{OOqlG@->Y2mflJ%Qy#xqONDD-X z%8>FQw+;!iWe1dYKubQT7cqk`MC40?1-9i+3{la|d^?NIrh(I6bSDsI}@azHiz+u@@cDAD)f7E?? zghy!OVWg@Pv?{2F#C9?kCF4#@#(xGVPI%%gexh)FTWLc4pKmibS7AtG5)aCy3@j$c z?9>R5#A9!_CgEEh?8~z2?|D!aYuUi2_xLE}TiTHhVf80aVuZ$j*(+z92jr-;RAjhZ zP6Zsz_6>*vWJzJF9osj^ru>X*o(z>!y5gER^1D_JLz(gPUXe^s8)zDPAcFOi0;KM3;PJ|$#=se$=`^wDVCACE6 zjRJ-W)gyuCNH6B{XCe$sayN|01*LBeO~xOy(DCI`eor<_mqPQEDB9>C(_5NkVc5NP zrN?W_Y}>%37FpbfPH)_&-7dU^KqXAWoC})xEQ8Wvjha%{Uhb&Q{1=t`TVXJI2AgD6 zw_aB*2`w(ZOHkNpE6C?EQA*SmC?Q6RyFmI1f0yMZO*2%!A*4tcjqkX}FU1rJ1&U2t zu1~x`6)jh%o)|%;-6$4MdFRXOoX{VC6$5VgO!R9OB4tjT-f9$jtI=uHK^#{j8#9W_ zf|d~7l)z)a9y9~fgA8yUOgYjrFg40GJIITWl$>X((I~yu5cQ3>WO5s}_hl4o>WeP0 zd|YCA)vcpC)EB~LtFyGIKnR(b7&ZIE&K?lSrE5kEwfL&h(C|V!kE`5oP8Pcq!nvEj`%CsNr>bduCC?YF zAT@6O?l0TB-qTy?0JVY}ZJl#UPY_20`i{l$S^Fa2kZzI%2QG5RWR<+cnLxDskocp` zbxwV2SqT(OFruLzuML3&_-s*Uwy9eJTEki0R$qRU zNv{^HlIy!_eoQb2iOA97`B#}zEWsoXa^~IEt~=h{Y7z(5J>)p|eq&a@2ov9$Tj3sV z(7d+0;+veMX%C4l!HZd@!{ zzj^`A;-lscHieHS42_VFDQ(ApUR&T6kAGQvF`+DXI-&^AH@QnF8E-XoNleM_RR_#c zO$w`SQGk9zmXZVJ7PrIh-K6gWY;*`GngYS0@~Nc8h=-5Rl2He3dMJS5aN9YX2a4c; zoi$qN4i4Z%a0LKd@fviuCLXirxI_pa9|NRD*(Ne9x|*~O!4LDGvoy3Kw+WGV8gNg9 z<&Xdv$=L-==p<=lz&W!DKRTL4{8sTe_Lml?sx)YGui;&3I8m7WRe8I9k|e)oM{YDF zN-3VlqPk7<#G(C}Gj?h+iPt_T$3jvIY&O7aSxw1Ew=hhSewKv%G3O*yL*w_bMx)Wt zvP7u{;QR}eOp;HqUkk*&>XF$Ll61G1yIou_N%<>6dd}7-N&n8u-u6f#JNgr4mG7^- zji<24_{-ug84=IY?<2N;CMwJ`?U7TmwbKZ{$w;usUrU^8o&X&MgndK?G#95| z>sc=HHhv(0#PZ#oCrp-ASYW~Nz!2`*EayzLJ`PFa9O+n@XkkJTCe|i+TW;rQQ-a+A z#i!gqCiQcKeJf$Z(a%p=J~0acWf)WQabT$Cd@MRmg@*SgzK3nu+J~qHPE8dUu{tAC zhc|jeO7lgvfjH=H!*yb%&A_UgWb<@aIW&M}0O;0v;lcfRz8>5tj)T{Ym_$O&HbxE< zaQ77d*_i=UZaJ0yfQk*)HD_`XJ!;DvvIZD&Hx$`m;UcB{u_;a&35;o*3@5_^rW9?# z2&J}$P}VFY2h!PJv%F{+3HM4~iL^i6q=?jj7p7KPvtOD{-;t@sKc-YnxD7rsHe{Lt zx=x8R_1 zS;8oPb&F#umLy^@(SRhsyaI4Y=k_C5{(yFP%JxTB#;=#Nq{(g{5JWhqs#rNCf~Zoh zEl7#G-zz7Xf&w^>+;$`ilnHNkI09qW9g_lBy32MSDHf2is=nVinSRpEvX;!09Gpx) z&S$CszQH~te;cla{z)XBwS`0&v7yN8M=bDWnHye!?n2iRhRDBXC5hX<+DfeQ_zR;l@3l__nV428}_ToCr zVmU;0WR6HugZ0urWpk-m5jix1rp3x);ONU_oFpWQLSa)`g+jU#a=6&sbszQdho52z zct`rnTyze6zBK%Vz8_2f;hC=Q&+7Zz(^>!g?b2ysx+^{R6^;g}q`#ocrgV-={HNQR zmo`SX`eJp}Y^CBc&TFRFz^`{XhA$94Vh@X%Fj)mCq?&DOkw*+7!SjI1&wZ1F?S4#I zbeL#V3lwPt?=d};IZXA1mP1G`5T>QTg!{CYi+NG`1j) ziXrPsot4K0ko2M}V}V(2do6JWo#Fg>YF$uT zmk^>gocn(*t&r2R;xk|)T?oCRHJ_-{x$J&n4Unzvu@ z${QHX%b>u7XJf(e11Di04`5q zA@ct2;4kfQHVY9`c3X04($faSv$$HUiiJL@U#*5u>bdh#KBRN`tss1!U-yK66}HH= zR`XY_dA1i_GiWY`EfU;ubcQX(B%*eLrB1Q<}c3b;W8H3%>l1EvTt#eiu|>lPDS@;=|W*4}3y{jg%j>=wf&K$R<2FL4QmSWKWiLtnP4B2REZC z0&dRh6WpBBC%Do158>vFKEaJ0Dgieu{1$Fb={dMLsZUgZX*~xwQ~C`?MmV^o6CZ<& zRt6H`=5f7aZ}THxW|%TM9TJbCjzTdkFPPC`H%Wv>WjAO0J`r$|oy*{&k7^EKLpEc} zMJyc1LoyB(O#ab+^kG6P+=nyL+mohX%6^wFeU-v*Ro?`v5vu z=N8Co{*M8hdBdTs_B`jf5?>B;@gWIvII5I{S->CzI1dO%)r8>tFsAVZeXr>V8r#R+ z6y{(g<{|LKjWUaFrKtEIE^XprGOwvdeyN8)QiLCvRS!N~E|C1K+01R|f% zmy^c5#E|oS2?+nfkber~L+PAAImB?}C_4n;LwC}N&@0obMYZ41?D9Ur-=(1^mjJbM zwJD?0_^lxm-}f~f%wkT1S#TQ6_gY&TsPZphAa&H~ir&Y87sJ0Z8?VdJj8fAG6gX`H z)=l~KsN+}K!^qj;7x=kCBiwL)q^%_xxR3?2Y=_|tPAV5HfX&w8&@cSCO9JJ!0@z*U zr4?MJyi*ypXkW5j&@DTR<-+p%0FcKk1?lWvQV|w{_)p*aYP;d^PrDOGx9&cbNfX@_ zdXmx8S+YP`o2hBi`nXu^n+ndu%2JXgxDs=rM2?RwTu&?gCaEkt4GOSqcWf|7Y(bLN z9hhe*j0HrIGq%ZUlNmd)w9tm$@?sAa_b4$fxJg#eGNrVI@+VE7m3{yFRW>4l56LQRldJ(_$MGmxJNg zEDI<=ZSo-Vcagsx-u8-nSiwu2#gR_X(agxyv5xUol-Wxb2s0>PnmMXAq460(#g*e8 zI)l_h4SX_88LZOUXaO5O(29I32Jry(Z3E?;C|s}tD5%5QyCUb(kWuYm)eF;*JD5!% zS)@+Bx%VMXL#jMWr#VqpXNH^#D`Z)+oIGC41q#186xZltie`BIltM=Ix8CcXz>T=^r*HC zY(fgTRulAU&*?{x5%*Jwv|njeZ1Y6&>tJ-JG_ZB$CGI0-q%wior6 z_4-aiEHsS~#(W(W@!t1!3==>4+*oAAhuK$|80Exdy-5B0DDsEjNQ&smy&8bxkihj_ znsJxnL%Mgu8vjiZz0W*kiMWPbSphR6$Qa!v#5KBN^a#|xa#_tW?u@KXEK(U+&BdY# z1+QFExA0^Fq)8%^9!RFjT9&lQo)Oa-G2q1XdL}9ahD=n*2Zhq|-Z$mK{#O?5 zg4I5bxT2+)o{2)j#%>3yB{o1Gj#iUO(TnL3_OHqD&fon>;8@KZo;4OSQ*mg7Gg{hjw1t{oL@%7jk%Iq4rEeB6lF)ZcsMb__HcG>|$%e3Q| zH4wa1ohyYL$Z_V;nO)R7w@Wyf+2yqt>Ym?4S^-9oOb4o++eJ`CP19jyEBCQtnVs#@ zXyS>ThuLoPjPJ8ue4p6Kx8bdaqYKKal2_J`*{;T5Q*j}@&5*e))Xns&^X1ON^+72v zbUxdwoCe8c7;FY+4Yn#S#uFOY-f*V3e~?yCT;>2uJT@wT1`i~F8IT@eK=VKXElhCXiJH%>Z$s{lU zhtUgUO}LLY%z=AhPbHW_FUn2+jD-+{afF7lD!zS4@}?S7AMV z#&3$aC|(J1A>5j{yFxX7KD5E8=jery=wL%6L=Oi4)z9}Ql{7#8c_+<}|AR=Aof9lP z+?Gq`?S{fbkf{8*>2AqYV#VLe%faAxbl0NU1SnB_k;TP!)H}JVS6fw+=7$uKJ*nwC z#R^u7C+#n}bV*5!IGf5fI&CrN)Xao4Txu$0-(c`Al1ixJtd>5VU%H&vC-Qh&pORDuLNt_`7Tf--;k^!}!R$oEHJyeu7VG_kFS+?GHyBKYH!>`Z zFXpoIc=7`yk&7~Lwd&O*RT4p)c}7vB&?n|ph;4mL8jbkfUwc5mh`6Zn>ouRWcLeh5 z*VGs9=bgk6GWxg&9NoOe1vCJjU$YJgbIB_7Qwb*!L40;}alFWgJ6L! zE!u++4>)KYa^iUX0X0B-cetS({cA>oEq5mRgZAohdnU}~oKzU6MM#m!Y7r`%RSr&w z2CJ0=dN)`=6#Z7{*DC_7*`^q1H^N?8dv*TQWrOv5b$Ikxmnkb0ARJG7^+J7%+p8OE z5TsO#$Gf=aNsTRcMvs#;l3YYxE`bXg6)7EPLi>38fu_XTV!qyFAJ6XJCO7_Gw@P6Q zYOZ2eoSfJ67PG+65jN8xUBD!tP%uo517OF1f^fDAZwZPOUbimxReUaWncjQBjVwNo zB4z6IEQTEhoGzwN#-5Buss0>K*^}YS6Xm;zAS)3^bNYm67rGkHCgrK_g{;MM#2x=d z9=YrfE!N@%+Sm%h9Xb~yBsVuECK=;QTpsVEC5XDfXS~pNxQcCSD+DDkh)AhSJG7^y zoN?_g{E+;{7gvif&@?6X$m?9ddr^8teDR4Vu6~(W_(|78?D~n|F1n8%uh8viP9tj! z>Szk8G?nd$ke)ruJv(F1*mUXHKKE?2`*7WV9p|erIl))o&`h(yY_q_zw18Y^FIBHA zLKvUJ-R^UW&-ggyI)U(!iX_Nlx1{>#S$7nJR*RQ-%VMR!oJQ%6K1nHwDGtw{qc(in z-Cs8*2cFRrM9$#zT1lVJX>Yt-N3E8)jBt-LI3nB~*z${zv74tc_#ZCuxfiX~m zD}tOIRGZ(z1yz~G&e50-2sHL$WB-Xwlj-RHyfgap7Bzd(HyclFRiB_zr~Pe-oUtb$ zJbf~2PoQ7&L}1LrRO3Ai5FK>^z~|rqlYw*Ej|= z?4`nE1__a?7%uhI&G;jN6~f$yL)RC0DgH{n#`RmlU!^D^xFiqv+`BQ+Iu*j$-bUSUJuh_e55xSnmQlTqqFG<^ysu2S!j85OpoN|RB3dixAQ4C z@T`7Auo?ZfU8xqtqk1|I6yAaEP~qL@`119dMy4eWsJyJoIEp9vEw(sL!&7><*aGQ- zs-2+KfSJNwhlayHqnz9M;&`_9t_wKmEnsuxRxo^zs|@2hqTh49c7-zEmW`bkSQ5Em zuwIWIt#}EDHW?wE@mrOn4=Ohog%_xH6`x0X4x1D^x-C0rpZy>cu)p8tYyU5h^O)ne z{ak0*)ClW6^6peDY+oyNs`&jpGbCK?H5){E$@0UCTpy#5S#Ls2k?25PF zY0@#2&G0Ga(IaQhs#YX?gZ_pjY(bMPJ_DdnuV01oYp)zBF7WUf^=*3iRI!!G*5U1k z8v{HD@i7zZkN&zQ0(Hy{%6*&q6(Y$tAta+<^6<}kX!-7^=;PTIlc}CZ|m*f zt-+4<^vh}i(Z%$d1kB7muI5u+JKNkxEgsn}QG^wtaSfl+JUZmXdS?A83e7=ijczyd z59HWOkAi`|NF-@rtM#XQ;GbH5h8I+O>I2Du>N}lKsPDE-2+$zkW0*kXzDDG>6`IS{ zC$}?}2Et9B3}F%u9pgJV-)@+tjIDiBDZC92FEWE?9XjL=xYL&cSA?M!RPqptC_I2d z9(WOi2T*5IV`%s>{2O71;Z8AJHwmOnW*4dzGi-@cvI>b(oi+tnw&gso*UNVbUnjaf z;ZE-|!eubtC1{D{1_Mc3t|ga1n#wD?;u&&DnRMIpsJ3_4F)Hv+cf%4Zw%O(k8!Jx@<8Q z0@738Qs1FX05Kj8_Kh2Mfg7C`wVrVrG7abz|0O2bP zh@!Usj`V6KNB`J%xuHRo#CBba&+pdU%MtDG&v5dNlVDe^Ob7}k-|u|IWF-X zHm0TFp7r=avh>1kHiYCkVonV{*k6@U+{(YM=v6%~;L+JthB5nx@lS&2&CJ1lxsg7~ zufv%2j*fEtaNycOP_WSK&Zwx&%@qk*>2rL>rxBjJ4OC5~Qzl*~5Q6#W)jS~kvJ